1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20#include <linux/types.h>
21#include <linux/kernel.h>
22#include <linux/init.h>
23#include <linux/spinlock.h>
24#include <linux/smp.h>
25#include <linux/interrupt.h>
26#include <linux/sched/signal.h>
27#include <linux/sched/debug.h>
28#include <linux/atomic.h>
29#include <linux/bitops.h>
30#include <linux/percpu.h>
31#include <linux/notifier.h>
32#include <linux/cpu.h>
33#include <linux/mutex.h>
34#include <linux/export.h>
35#include <linux/hardirq.h>
36#include <linux/delay.h>
37#include <linux/moduleparam.h>
38#include <linux/kthread.h>
39#include <linux/tick.h>
40#include <linux/rcupdate_wait.h>
41#include <linux/sched/isolation.h>
42#include <linux/kprobes.h>
43#include <linux/slab.h>
44#include <linux/irq_work.h>
45#include <linux/rcupdate_trace.h>
46
47#define CREATE_TRACE_POINTS
48
49#include "rcu.h"
50
51#ifdef MODULE_PARAM_PREFIX
52#undef MODULE_PARAM_PREFIX
53#endif
54#define MODULE_PARAM_PREFIX "rcupdate."
55
56#ifndef CONFIG_TINY_RCU
57module_param(rcu_expedited, int, 0);
58module_param(rcu_normal, int, 0);
59static int rcu_normal_after_boot = IS_ENABLED(CONFIG_PREEMPT_RT);
60#ifndef CONFIG_PREEMPT_RT
61module_param(rcu_normal_after_boot, int, 0);
62#endif
63#endif
64
65#ifdef CONFIG_DEBUG_LOCK_ALLOC
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102static bool rcu_read_lock_held_common(bool *ret)
103{
104 if (!debug_lockdep_rcu_enabled()) {
105 *ret = true;
106 return true;
107 }
108 if (!rcu_is_watching()) {
109 *ret = false;
110 return true;
111 }
112 if (!rcu_lockdep_current_cpu_online()) {
113 *ret = false;
114 return true;
115 }
116 return false;
117}
118
119int rcu_read_lock_sched_held(void)
120{
121 bool ret;
122
123 if (rcu_read_lock_held_common(&ret))
124 return ret;
125 return lock_is_held(&rcu_sched_lock_map) || !preemptible();
126}
127EXPORT_SYMBOL(rcu_read_lock_sched_held);
128#endif
129
130#ifndef CONFIG_TINY_RCU
131
132
133
134
135
136
137
138
139
140bool rcu_gp_is_normal(void)
141{
142 return READ_ONCE(rcu_normal) &&
143 rcu_scheduler_active != RCU_SCHEDULER_INIT;
144}
145EXPORT_SYMBOL_GPL(rcu_gp_is_normal);
146
147static atomic_t rcu_expedited_nesting = ATOMIC_INIT(1);
148
149
150
151
152
153
154
155
156bool rcu_gp_is_expedited(void)
157{
158 return rcu_expedited || atomic_read(&rcu_expedited_nesting);
159}
160EXPORT_SYMBOL_GPL(rcu_gp_is_expedited);
161
162
163
164
165
166
167
168
169void rcu_expedite_gp(void)
170{
171 atomic_inc(&rcu_expedited_nesting);
172}
173EXPORT_SYMBOL_GPL(rcu_expedite_gp);
174
175
176
177
178
179
180
181
182
183
184void rcu_unexpedite_gp(void)
185{
186 atomic_dec(&rcu_expedited_nesting);
187}
188EXPORT_SYMBOL_GPL(rcu_unexpedite_gp);
189
190static bool rcu_boot_ended __read_mostly;
191
192
193
194
195void rcu_end_inkernel_boot(void)
196{
197 rcu_unexpedite_gp();
198 if (rcu_normal_after_boot)
199 WRITE_ONCE(rcu_normal, 1);
200 rcu_boot_ended = true;
201}
202
203
204
205
206bool rcu_inkernel_boot_has_ended(void)
207{
208 return rcu_boot_ended;
209}
210EXPORT_SYMBOL_GPL(rcu_inkernel_boot_has_ended);
211
212#endif
213
214
215
216
217
218
219void rcu_test_sync_prims(void)
220{
221 if (!IS_ENABLED(CONFIG_PROVE_RCU))
222 return;
223 synchronize_rcu();
224 synchronize_rcu_expedited();
225}
226
227#if !defined(CONFIG_TINY_RCU) || defined(CONFIG_SRCU)
228
229
230
231
232static int __init rcu_set_runtime_mode(void)
233{
234 rcu_test_sync_prims();
235 rcu_scheduler_active = RCU_SCHEDULER_RUNNING;
236 kfree_rcu_scheduler_running();
237 rcu_test_sync_prims();
238 return 0;
239}
240core_initcall(rcu_set_runtime_mode);
241
242#endif
243
244#ifdef CONFIG_DEBUG_LOCK_ALLOC
245static struct lock_class_key rcu_lock_key;
246struct lockdep_map rcu_lock_map = {
247 .name = "rcu_read_lock",
248 .key = &rcu_lock_key,
249 .wait_type_outer = LD_WAIT_FREE,
250 .wait_type_inner = LD_WAIT_CONFIG,
251};
252EXPORT_SYMBOL_GPL(rcu_lock_map);
253
254static struct lock_class_key rcu_bh_lock_key;
255struct lockdep_map rcu_bh_lock_map = {
256 .name = "rcu_read_lock_bh",
257 .key = &rcu_bh_lock_key,
258 .wait_type_outer = LD_WAIT_FREE,
259 .wait_type_inner = LD_WAIT_CONFIG,
260};
261EXPORT_SYMBOL_GPL(rcu_bh_lock_map);
262
263static struct lock_class_key rcu_sched_lock_key;
264struct lockdep_map rcu_sched_lock_map = {
265 .name = "rcu_read_lock_sched",
266 .key = &rcu_sched_lock_key,
267 .wait_type_outer = LD_WAIT_FREE,
268 .wait_type_inner = LD_WAIT_SPIN,
269};
270EXPORT_SYMBOL_GPL(rcu_sched_lock_map);
271
272
273static struct lock_class_key rcu_callback_key;
274struct lockdep_map rcu_callback_map =
275 STATIC_LOCKDEP_MAP_INIT("rcu_callback", &rcu_callback_key);
276EXPORT_SYMBOL_GPL(rcu_callback_map);
277
278noinstr int notrace debug_lockdep_rcu_enabled(void)
279{
280 return rcu_scheduler_active != RCU_SCHEDULER_INACTIVE && READ_ONCE(debug_locks) &&
281 current->lockdep_recursion == 0;
282}
283EXPORT_SYMBOL_GPL(debug_lockdep_rcu_enabled);
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305int rcu_read_lock_held(void)
306{
307 bool ret;
308
309 if (rcu_read_lock_held_common(&ret))
310 return ret;
311 return lock_is_held(&rcu_lock_map);
312}
313EXPORT_SYMBOL_GPL(rcu_read_lock_held);
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330int rcu_read_lock_bh_held(void)
331{
332 bool ret;
333
334 if (rcu_read_lock_held_common(&ret))
335 return ret;
336 return in_softirq() || irqs_disabled();
337}
338EXPORT_SYMBOL_GPL(rcu_read_lock_bh_held);
339
340int rcu_read_lock_any_held(void)
341{
342 bool ret;
343
344 if (rcu_read_lock_held_common(&ret))
345 return ret;
346 if (lock_is_held(&rcu_lock_map) ||
347 lock_is_held(&rcu_bh_lock_map) ||
348 lock_is_held(&rcu_sched_lock_map))
349 return 1;
350 return !preemptible();
351}
352EXPORT_SYMBOL_GPL(rcu_read_lock_any_held);
353
354#endif
355
356
357
358
359
360
361
362void wakeme_after_rcu(struct rcu_head *head)
363{
364 struct rcu_synchronize *rcu;
365
366 rcu = container_of(head, struct rcu_synchronize, head);
367 complete(&rcu->completion);
368}
369EXPORT_SYMBOL_GPL(wakeme_after_rcu);
370
371void __wait_rcu_gp(bool checktiny, int n, call_rcu_func_t *crcu_array,
372 struct rcu_synchronize *rs_array)
373{
374 int i;
375 int j;
376
377
378 for (i = 0; i < n; i++) {
379 if (checktiny &&
380 (crcu_array[i] == call_rcu)) {
381 might_sleep();
382 continue;
383 }
384 for (j = 0; j < i; j++)
385 if (crcu_array[j] == crcu_array[i])
386 break;
387 if (j == i) {
388 init_rcu_head_on_stack(&rs_array[i].head);
389 init_completion(&rs_array[i].completion);
390 (crcu_array[i])(&rs_array[i].head, wakeme_after_rcu);
391 }
392 }
393
394
395 for (i = 0; i < n; i++) {
396 if (checktiny &&
397 (crcu_array[i] == call_rcu))
398 continue;
399 for (j = 0; j < i; j++)
400 if (crcu_array[j] == crcu_array[i])
401 break;
402 if (j == i) {
403 wait_for_completion(&rs_array[i].completion);
404 destroy_rcu_head_on_stack(&rs_array[i].head);
405 }
406 }
407}
408EXPORT_SYMBOL_GPL(__wait_rcu_gp);
409
410#ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD
411void init_rcu_head(struct rcu_head *head)
412{
413 debug_object_init(head, &rcuhead_debug_descr);
414}
415EXPORT_SYMBOL_GPL(init_rcu_head);
416
417void destroy_rcu_head(struct rcu_head *head)
418{
419 debug_object_free(head, &rcuhead_debug_descr);
420}
421EXPORT_SYMBOL_GPL(destroy_rcu_head);
422
423static bool rcuhead_is_static_object(void *addr)
424{
425 return true;
426}
427
428
429
430
431
432
433
434
435
436
437
438void init_rcu_head_on_stack(struct rcu_head *head)
439{
440 debug_object_init_on_stack(head, &rcuhead_debug_descr);
441}
442EXPORT_SYMBOL_GPL(init_rcu_head_on_stack);
443
444
445
446
447
448
449
450
451
452
453
454
455void destroy_rcu_head_on_stack(struct rcu_head *head)
456{
457 debug_object_free(head, &rcuhead_debug_descr);
458}
459EXPORT_SYMBOL_GPL(destroy_rcu_head_on_stack);
460
461const struct debug_obj_descr rcuhead_debug_descr = {
462 .name = "rcu_head",
463 .is_static_object = rcuhead_is_static_object,
464};
465EXPORT_SYMBOL_GPL(rcuhead_debug_descr);
466#endif
467
468#if defined(CONFIG_TREE_RCU) || defined(CONFIG_RCU_TRACE)
469void do_trace_rcu_torture_read(const char *rcutorturename, struct rcu_head *rhp,
470 unsigned long secs,
471 unsigned long c_old, unsigned long c)
472{
473 trace_rcu_torture_read(rcutorturename, rhp, secs, c_old, c);
474}
475EXPORT_SYMBOL_GPL(do_trace_rcu_torture_read);
476#else
477#define do_trace_rcu_torture_read(rcutorturename, rhp, secs, c_old, c) \
478 do { } while (0)
479#endif
480
481#if IS_ENABLED(CONFIG_RCU_TORTURE_TEST) || IS_MODULE(CONFIG_RCU_TORTURE_TEST)
482
483long rcutorture_sched_setaffinity(pid_t pid, const struct cpumask *in_mask)
484{
485 int ret;
486
487 ret = sched_setaffinity(pid, in_mask);
488 WARN_ONCE(ret, "%s: sched_setaffinity() returned %d\n", __func__, ret);
489 return ret;
490}
491EXPORT_SYMBOL_GPL(rcutorture_sched_setaffinity);
492#endif
493
494#ifdef CONFIG_RCU_STALL_COMMON
495int rcu_cpu_stall_ftrace_dump __read_mostly;
496module_param(rcu_cpu_stall_ftrace_dump, int, 0644);
497int rcu_cpu_stall_suppress __read_mostly;
498EXPORT_SYMBOL_GPL(rcu_cpu_stall_suppress);
499module_param(rcu_cpu_stall_suppress, int, 0644);
500int rcu_cpu_stall_timeout __read_mostly = CONFIG_RCU_CPU_STALL_TIMEOUT;
501module_param(rcu_cpu_stall_timeout, int, 0644);
502#endif
503
504
505
506int rcu_cpu_stall_suppress_at_boot __read_mostly;
507EXPORT_SYMBOL_GPL(rcu_cpu_stall_suppress_at_boot);
508module_param(rcu_cpu_stall_suppress_at_boot, int, 0444);
509
510#ifdef CONFIG_PROVE_RCU
511
512
513
514
515static bool rcu_self_test;
516module_param(rcu_self_test, bool, 0444);
517
518static int rcu_self_test_counter;
519
520static void test_callback(struct rcu_head *r)
521{
522 rcu_self_test_counter++;
523 pr_info("RCU test callback executed %d\n", rcu_self_test_counter);
524}
525
526DEFINE_STATIC_SRCU(early_srcu);
527static unsigned long early_srcu_cookie;
528
529struct early_boot_kfree_rcu {
530 struct rcu_head rh;
531};
532
533static void early_boot_test_call_rcu(void)
534{
535 static struct rcu_head head;
536 static struct rcu_head shead;
537 struct early_boot_kfree_rcu *rhp;
538
539 call_rcu(&head, test_callback);
540 if (IS_ENABLED(CONFIG_SRCU)) {
541 early_srcu_cookie = start_poll_synchronize_srcu(&early_srcu);
542 call_srcu(&early_srcu, &shead, test_callback);
543 }
544 rhp = kmalloc(sizeof(*rhp), GFP_KERNEL);
545 if (!WARN_ON_ONCE(!rhp))
546 kfree_rcu(rhp, rh);
547}
548
549void rcu_early_boot_tests(void)
550{
551 pr_info("Running RCU self tests\n");
552
553 if (rcu_self_test)
554 early_boot_test_call_rcu();
555 rcu_test_sync_prims();
556}
557
558static int rcu_verify_early_boot_tests(void)
559{
560 int ret = 0;
561 int early_boot_test_counter = 0;
562
563 if (rcu_self_test) {
564 early_boot_test_counter++;
565 rcu_barrier();
566 if (IS_ENABLED(CONFIG_SRCU)) {
567 early_boot_test_counter++;
568 srcu_barrier(&early_srcu);
569 WARN_ON_ONCE(!poll_state_synchronize_srcu(&early_srcu, early_srcu_cookie));
570 }
571 }
572 if (rcu_self_test_counter != early_boot_test_counter) {
573 WARN_ON(1);
574 ret = -1;
575 }
576
577 return ret;
578}
579late_initcall(rcu_verify_early_boot_tests);
580#else
581void rcu_early_boot_tests(void) {}
582#endif
583
584#include "tasks.h"
585
586#ifndef CONFIG_TINY_RCU
587
588
589
590
591void __init rcupdate_announce_bootup_oddness(void)
592{
593 if (rcu_normal)
594 pr_info("\tNo expedited grace period (rcu_normal).\n");
595 else if (rcu_normal_after_boot)
596 pr_info("\tNo expedited grace period (rcu_normal_after_boot).\n");
597 else if (rcu_expedited)
598 pr_info("\tAll grace periods are expedited (rcu_expedited).\n");
599 if (rcu_cpu_stall_suppress)
600 pr_info("\tRCU CPU stall warnings suppressed (rcu_cpu_stall_suppress).\n");
601 if (rcu_cpu_stall_timeout != CONFIG_RCU_CPU_STALL_TIMEOUT)
602 pr_info("\tRCU CPU stall warnings timeout set to %d (rcu_cpu_stall_timeout).\n", rcu_cpu_stall_timeout);
603 rcu_tasks_bootup_oddness();
604}
605
606#endif
607