1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33#include <linux/types.h>
34#include <linux/kernel.h>
35#include <linux/init.h>
36#include <linux/spinlock.h>
37#include <linux/smp.h>
38#include <linux/interrupt.h>
39#include <linux/sched/signal.h>
40#include <linux/sched/debug.h>
41#include <linux/atomic.h>
42#include <linux/bitops.h>
43#include <linux/percpu.h>
44#include <linux/notifier.h>
45#include <linux/cpu.h>
46#include <linux/mutex.h>
47#include <linux/export.h>
48#include <linux/hardirq.h>
49#include <linux/delay.h>
50#include <linux/moduleparam.h>
51#include <linux/kthread.h>
52#include <linux/tick.h>
53#include <linux/rcupdate_wait.h>
54
55#define CREATE_TRACE_POINTS
56
57#include "rcu.h"
58
59#ifdef MODULE_PARAM_PREFIX
60#undef MODULE_PARAM_PREFIX
61#endif
62#define MODULE_PARAM_PREFIX "rcupdate."
63
64#ifndef CONFIG_TINY_RCU
65extern int rcu_expedited;
66module_param(rcu_expedited, int, 0);
67extern int rcu_normal;
68module_param(rcu_normal, int, 0);
69static int rcu_normal_after_boot;
70module_param(rcu_normal_after_boot, int, 0);
71#endif
72
73#ifdef CONFIG_DEBUG_LOCK_ALLOC
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105int rcu_read_lock_sched_held(void)
106{
107 int lockdep_opinion = 0;
108
109 if (!debug_lockdep_rcu_enabled())
110 return 1;
111 if (!rcu_is_watching())
112 return 0;
113 if (!rcu_lockdep_current_cpu_online())
114 return 0;
115 if (debug_locks)
116 lockdep_opinion = lock_is_held(&rcu_sched_lock_map);
117 return lockdep_opinion || !preemptible();
118}
119EXPORT_SYMBOL(rcu_read_lock_sched_held);
120#endif
121
122#ifndef CONFIG_TINY_RCU
123
124
125
126
127
128
129
130
131
132bool rcu_gp_is_normal(void)
133{
134 return READ_ONCE(rcu_normal) &&
135 rcu_scheduler_active != RCU_SCHEDULER_INIT;
136}
137EXPORT_SYMBOL_GPL(rcu_gp_is_normal);
138
139static atomic_t rcu_expedited_nesting = ATOMIC_INIT(1);
140
141
142
143
144
145
146
147
148bool rcu_gp_is_expedited(void)
149{
150 return rcu_expedited || atomic_read(&rcu_expedited_nesting) ||
151 rcu_scheduler_active == RCU_SCHEDULER_INIT;
152}
153EXPORT_SYMBOL_GPL(rcu_gp_is_expedited);
154
155
156
157
158
159
160
161
162void rcu_expedite_gp(void)
163{
164 atomic_inc(&rcu_expedited_nesting);
165}
166EXPORT_SYMBOL_GPL(rcu_expedite_gp);
167
168
169
170
171
172
173
174
175
176
177void rcu_unexpedite_gp(void)
178{
179 atomic_dec(&rcu_expedited_nesting);
180}
181EXPORT_SYMBOL_GPL(rcu_unexpedite_gp);
182
183
184
185
186void rcu_end_inkernel_boot(void)
187{
188 rcu_unexpedite_gp();
189 if (rcu_normal_after_boot)
190 WRITE_ONCE(rcu_normal, 1);
191}
192
193#endif
194
195
196
197
198
199
200void rcu_test_sync_prims(void)
201{
202 if (!IS_ENABLED(CONFIG_PROVE_RCU))
203 return;
204 synchronize_rcu();
205 synchronize_rcu_bh();
206 synchronize_sched();
207 synchronize_rcu_expedited();
208 synchronize_rcu_bh_expedited();
209 synchronize_sched_expedited();
210}
211
212#if !defined(CONFIG_TINY_RCU) || defined(CONFIG_SRCU)
213
214
215
216
217static int __init rcu_set_runtime_mode(void)
218{
219 rcu_test_sync_prims();
220 rcu_scheduler_active = RCU_SCHEDULER_RUNNING;
221 rcu_test_sync_prims();
222 return 0;
223}
224core_initcall(rcu_set_runtime_mode);
225
226#endif
227
228#ifdef CONFIG_PREEMPT_RCU
229
230
231
232
233
234
235void __rcu_read_lock(void)
236{
237 current->rcu_read_lock_nesting++;
238 barrier();
239}
240EXPORT_SYMBOL_GPL(__rcu_read_lock);
241
242
243
244
245
246
247
248
249void __rcu_read_unlock(void)
250{
251 struct task_struct *t = current;
252
253 if (t->rcu_read_lock_nesting != 1) {
254 --t->rcu_read_lock_nesting;
255 } else {
256 barrier();
257 t->rcu_read_lock_nesting = INT_MIN;
258 barrier();
259 if (unlikely(READ_ONCE(t->rcu_read_unlock_special.s)))
260 rcu_read_unlock_special(t);
261 barrier();
262 t->rcu_read_lock_nesting = 0;
263 }
264#ifdef CONFIG_PROVE_LOCKING
265 {
266 int rrln = READ_ONCE(t->rcu_read_lock_nesting);
267
268 WARN_ON_ONCE(rrln < 0 && rrln > INT_MIN / 2);
269 }
270#endif
271}
272EXPORT_SYMBOL_GPL(__rcu_read_unlock);
273
274#endif
275
276#ifdef CONFIG_DEBUG_LOCK_ALLOC
277static struct lock_class_key rcu_lock_key;
278struct lockdep_map rcu_lock_map =
279 STATIC_LOCKDEP_MAP_INIT("rcu_read_lock", &rcu_lock_key);
280EXPORT_SYMBOL_GPL(rcu_lock_map);
281
282static struct lock_class_key rcu_bh_lock_key;
283struct lockdep_map rcu_bh_lock_map =
284 STATIC_LOCKDEP_MAP_INIT("rcu_read_lock_bh", &rcu_bh_lock_key);
285EXPORT_SYMBOL_GPL(rcu_bh_lock_map);
286
287static struct lock_class_key rcu_sched_lock_key;
288struct lockdep_map rcu_sched_lock_map =
289 STATIC_LOCKDEP_MAP_INIT("rcu_read_lock_sched", &rcu_sched_lock_key);
290EXPORT_SYMBOL_GPL(rcu_sched_lock_map);
291
292static struct lock_class_key rcu_callback_key;
293struct lockdep_map rcu_callback_map =
294 STATIC_LOCKDEP_MAP_INIT("rcu_callback", &rcu_callback_key);
295EXPORT_SYMBOL_GPL(rcu_callback_map);
296
297int notrace debug_lockdep_rcu_enabled(void)
298{
299 return rcu_scheduler_active != RCU_SCHEDULER_INACTIVE && debug_locks &&
300 current->lockdep_recursion == 0;
301}
302EXPORT_SYMBOL_GPL(debug_lockdep_rcu_enabled);
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324int rcu_read_lock_held(void)
325{
326 if (!debug_lockdep_rcu_enabled())
327 return 1;
328 if (!rcu_is_watching())
329 return 0;
330 if (!rcu_lockdep_current_cpu_online())
331 return 0;
332 return lock_is_held(&rcu_lock_map);
333}
334EXPORT_SYMBOL_GPL(rcu_read_lock_held);
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351int rcu_read_lock_bh_held(void)
352{
353 if (!debug_lockdep_rcu_enabled())
354 return 1;
355 if (!rcu_is_watching())
356 return 0;
357 if (!rcu_lockdep_current_cpu_online())
358 return 0;
359 return in_softirq() || irqs_disabled();
360}
361EXPORT_SYMBOL_GPL(rcu_read_lock_bh_held);
362
363#endif
364
365
366
367
368
369
370
371void wakeme_after_rcu(struct rcu_head *head)
372{
373 struct rcu_synchronize *rcu;
374
375 rcu = container_of(head, struct rcu_synchronize, head);
376 complete(&rcu->completion);
377}
378EXPORT_SYMBOL_GPL(wakeme_after_rcu);
379
380void __wait_rcu_gp(bool checktiny, int n, call_rcu_func_t *crcu_array,
381 struct rcu_synchronize *rs_array)
382{
383 int i;
384 int j;
385
386
387 for (i = 0; i < n; i++) {
388 if (checktiny &&
389 (crcu_array[i] == call_rcu ||
390 crcu_array[i] == call_rcu_bh)) {
391 might_sleep();
392 continue;
393 }
394 init_rcu_head_on_stack(&rs_array[i].head);
395 init_completion(&rs_array[i].completion);
396 for (j = 0; j < i; j++)
397 if (crcu_array[j] == crcu_array[i])
398 break;
399 if (j == i)
400 (crcu_array[i])(&rs_array[i].head, wakeme_after_rcu);
401 }
402
403
404 for (i = 0; i < n; i++) {
405 if (checktiny &&
406 (crcu_array[i] == call_rcu ||
407 crcu_array[i] == call_rcu_bh))
408 continue;
409 for (j = 0; j < i; j++)
410 if (crcu_array[j] == crcu_array[i])
411 break;
412 if (j == i)
413 wait_for_completion(&rs_array[i].completion);
414 destroy_rcu_head_on_stack(&rs_array[i].head);
415 }
416}
417EXPORT_SYMBOL_GPL(__wait_rcu_gp);
418
419#ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD
420void init_rcu_head(struct rcu_head *head)
421{
422 debug_object_init(head, &rcuhead_debug_descr);
423}
424
425void destroy_rcu_head(struct rcu_head *head)
426{
427 debug_object_free(head, &rcuhead_debug_descr);
428}
429
430static bool rcuhead_is_static_object(void *addr)
431{
432 return true;
433}
434
435
436
437
438
439
440
441
442
443
444
445void init_rcu_head_on_stack(struct rcu_head *head)
446{
447 debug_object_init_on_stack(head, &rcuhead_debug_descr);
448}
449EXPORT_SYMBOL_GPL(init_rcu_head_on_stack);
450
451
452
453
454
455
456
457
458
459
460
461
462void destroy_rcu_head_on_stack(struct rcu_head *head)
463{
464 debug_object_free(head, &rcuhead_debug_descr);
465}
466EXPORT_SYMBOL_GPL(destroy_rcu_head_on_stack);
467
468struct debug_obj_descr rcuhead_debug_descr = {
469 .name = "rcu_head",
470 .is_static_object = rcuhead_is_static_object,
471};
472EXPORT_SYMBOL_GPL(rcuhead_debug_descr);
473#endif
474
475#if defined(CONFIG_TREE_RCU) || defined(CONFIG_PREEMPT_RCU) || defined(CONFIG_RCU_TRACE)
476void do_trace_rcu_torture_read(const char *rcutorturename, struct rcu_head *rhp,
477 unsigned long secs,
478 unsigned long c_old, unsigned long c)
479{
480 trace_rcu_torture_read(rcutorturename, rhp, secs, c_old, c);
481}
482EXPORT_SYMBOL_GPL(do_trace_rcu_torture_read);
483#else
484#define do_trace_rcu_torture_read(rcutorturename, rhp, secs, c_old, c) \
485 do { } while (0)
486#endif
487
488#ifdef CONFIG_RCU_STALL_COMMON
489
490#ifdef CONFIG_PROVE_RCU
491#define RCU_STALL_DELAY_DELTA (5 * HZ)
492#else
493#define RCU_STALL_DELAY_DELTA 0
494#endif
495
496int rcu_cpu_stall_suppress __read_mostly;
497static int rcu_cpu_stall_timeout __read_mostly = CONFIG_RCU_CPU_STALL_TIMEOUT;
498
499module_param(rcu_cpu_stall_suppress, int, 0644);
500module_param(rcu_cpu_stall_timeout, int, 0644);
501
502int rcu_jiffies_till_stall_check(void)
503{
504 int till_stall_check = READ_ONCE(rcu_cpu_stall_timeout);
505
506
507
508
509
510 if (till_stall_check < 3) {
511 WRITE_ONCE(rcu_cpu_stall_timeout, 3);
512 till_stall_check = 3;
513 } else if (till_stall_check > 300) {
514 WRITE_ONCE(rcu_cpu_stall_timeout, 300);
515 till_stall_check = 300;
516 }
517 return till_stall_check * HZ + RCU_STALL_DELAY_DELTA;
518}
519
520void rcu_sysrq_start(void)
521{
522 if (!rcu_cpu_stall_suppress)
523 rcu_cpu_stall_suppress = 2;
524}
525
526void rcu_sysrq_end(void)
527{
528 if (rcu_cpu_stall_suppress == 2)
529 rcu_cpu_stall_suppress = 0;
530}
531
532static int rcu_panic(struct notifier_block *this, unsigned long ev, void *ptr)
533{
534 rcu_cpu_stall_suppress = 1;
535 return NOTIFY_DONE;
536}
537
538static struct notifier_block rcu_panic_block = {
539 .notifier_call = rcu_panic,
540};
541
542static int __init check_cpu_stall_init(void)
543{
544 atomic_notifier_chain_register(&panic_notifier_list, &rcu_panic_block);
545 return 0;
546}
547early_initcall(check_cpu_stall_init);
548
549#endif
550
551#ifdef CONFIG_TASKS_RCU
552
553
554
555
556
557
558
559
560
561
562
563
564
565static struct rcu_head *rcu_tasks_cbs_head;
566static struct rcu_head **rcu_tasks_cbs_tail = &rcu_tasks_cbs_head;
567static DECLARE_WAIT_QUEUE_HEAD(rcu_tasks_cbs_wq);
568static DEFINE_RAW_SPINLOCK(rcu_tasks_cbs_lock);
569
570
571DEFINE_STATIC_SRCU(tasks_rcu_exit_srcu);
572
573
574#define RCU_TASK_STALL_TIMEOUT (HZ * 60 * 10)
575static int rcu_task_stall_timeout __read_mostly = RCU_TASK_STALL_TIMEOUT;
576module_param(rcu_task_stall_timeout, int, 0644);
577
578static void rcu_spawn_tasks_kthread(void);
579static struct task_struct *rcu_tasks_kthread_ptr;
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599void call_rcu_tasks(struct rcu_head *rhp, rcu_callback_t func)
600{
601 unsigned long flags;
602 bool needwake;
603 bool havetask = READ_ONCE(rcu_tasks_kthread_ptr);
604
605 rhp->next = NULL;
606 rhp->func = func;
607 raw_spin_lock_irqsave(&rcu_tasks_cbs_lock, flags);
608 needwake = !rcu_tasks_cbs_head;
609 *rcu_tasks_cbs_tail = rhp;
610 rcu_tasks_cbs_tail = &rhp->next;
611 raw_spin_unlock_irqrestore(&rcu_tasks_cbs_lock, flags);
612
613 if ((needwake && havetask) ||
614 (!havetask && !irqs_disabled_flags(flags))) {
615 rcu_spawn_tasks_kthread();
616 wake_up(&rcu_tasks_cbs_wq);
617 }
618}
619EXPORT_SYMBOL_GPL(call_rcu_tasks);
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654void synchronize_rcu_tasks(void)
655{
656
657 RCU_LOCKDEP_WARN(rcu_scheduler_active == RCU_SCHEDULER_INACTIVE,
658 "synchronize_rcu_tasks called too soon");
659
660
661 wait_rcu_gp(call_rcu_tasks);
662}
663EXPORT_SYMBOL_GPL(synchronize_rcu_tasks);
664
665
666
667
668
669
670
671void rcu_barrier_tasks(void)
672{
673
674 synchronize_rcu_tasks();
675}
676EXPORT_SYMBOL_GPL(rcu_barrier_tasks);
677
678
679static void check_holdout_task(struct task_struct *t,
680 bool needreport, bool *firstreport)
681{
682 int cpu;
683
684 if (!READ_ONCE(t->rcu_tasks_holdout) ||
685 t->rcu_tasks_nvcsw != READ_ONCE(t->nvcsw) ||
686 !READ_ONCE(t->on_rq) ||
687 (IS_ENABLED(CONFIG_NO_HZ_FULL) &&
688 !is_idle_task(t) && t->rcu_tasks_idle_cpu >= 0)) {
689 WRITE_ONCE(t->rcu_tasks_holdout, false);
690 list_del_init(&t->rcu_tasks_holdout_list);
691 put_task_struct(t);
692 return;
693 }
694 rcu_request_urgent_qs_task(t);
695 if (!needreport)
696 return;
697 if (*firstreport) {
698 pr_err("INFO: rcu_tasks detected stalls on tasks:\n");
699 *firstreport = false;
700 }
701 cpu = task_cpu(t);
702 pr_alert("%p: %c%c nvcsw: %lu/%lu holdout: %d idle_cpu: %d/%d\n",
703 t, ".I"[is_idle_task(t)],
704 "N."[cpu < 0 || !tick_nohz_full_cpu(cpu)],
705 t->rcu_tasks_nvcsw, t->nvcsw, t->rcu_tasks_holdout,
706 t->rcu_tasks_idle_cpu, cpu);
707 sched_show_task(t);
708}
709
710
711static int __noreturn rcu_tasks_kthread(void *arg)
712{
713 unsigned long flags;
714 struct task_struct *g, *t;
715 unsigned long lastreport;
716 struct rcu_head *list;
717 struct rcu_head *next;
718 LIST_HEAD(rcu_tasks_holdouts);
719
720
721 housekeeping_affine(current);
722
723
724
725
726
727
728
729 for (;;) {
730
731
732 raw_spin_lock_irqsave(&rcu_tasks_cbs_lock, flags);
733 list = rcu_tasks_cbs_head;
734 rcu_tasks_cbs_head = NULL;
735 rcu_tasks_cbs_tail = &rcu_tasks_cbs_head;
736 raw_spin_unlock_irqrestore(&rcu_tasks_cbs_lock, flags);
737
738
739 if (!list) {
740 wait_event_interruptible(rcu_tasks_cbs_wq,
741 rcu_tasks_cbs_head);
742 if (!rcu_tasks_cbs_head) {
743 WARN_ON(signal_pending(current));
744 schedule_timeout_interruptible(HZ/10);
745 }
746 continue;
747 }
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763 synchronize_sched();
764
765
766
767
768
769
770
771
772 rcu_read_lock();
773 for_each_process_thread(g, t) {
774 if (t != current && READ_ONCE(t->on_rq) &&
775 !is_idle_task(t)) {
776 get_task_struct(t);
777 t->rcu_tasks_nvcsw = READ_ONCE(t->nvcsw);
778 WRITE_ONCE(t->rcu_tasks_holdout, true);
779 list_add(&t->rcu_tasks_holdout_list,
780 &rcu_tasks_holdouts);
781 }
782 }
783 rcu_read_unlock();
784
785
786
787
788
789
790
791
792 synchronize_srcu(&tasks_rcu_exit_srcu);
793
794
795
796
797
798
799 lastreport = jiffies;
800 while (!list_empty(&rcu_tasks_holdouts)) {
801 bool firstreport;
802 bool needreport;
803 int rtst;
804 struct task_struct *t1;
805
806 schedule_timeout_interruptible(HZ);
807 rtst = READ_ONCE(rcu_task_stall_timeout);
808 needreport = rtst > 0 &&
809 time_after(jiffies, lastreport + rtst);
810 if (needreport)
811 lastreport = jiffies;
812 firstreport = true;
813 WARN_ON(signal_pending(current));
814 list_for_each_entry_safe(t, t1, &rcu_tasks_holdouts,
815 rcu_tasks_holdout_list) {
816 check_holdout_task(t, needreport, &firstreport);
817 cond_resched();
818 }
819 }
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841 synchronize_sched();
842
843
844 while (list) {
845 next = list->next;
846 local_bh_disable();
847 list->func(list);
848 local_bh_enable();
849 list = next;
850 cond_resched();
851 }
852 schedule_timeout_uninterruptible(HZ/10);
853 }
854}
855
856
857static void rcu_spawn_tasks_kthread(void)
858{
859 static DEFINE_MUTEX(rcu_tasks_kthread_mutex);
860 struct task_struct *t;
861
862 if (READ_ONCE(rcu_tasks_kthread_ptr)) {
863 smp_mb();
864 return;
865 }
866 mutex_lock(&rcu_tasks_kthread_mutex);
867 if (rcu_tasks_kthread_ptr) {
868 mutex_unlock(&rcu_tasks_kthread_mutex);
869 return;
870 }
871 t = kthread_run(rcu_tasks_kthread, NULL, "rcu_tasks_kthread");
872 BUG_ON(IS_ERR(t));
873 smp_mb();
874 WRITE_ONCE(rcu_tasks_kthread_ptr, t);
875 mutex_unlock(&rcu_tasks_kthread_mutex);
876}
877
878
879void exit_tasks_rcu_start(void)
880{
881 preempt_disable();
882 current->rcu_tasks_idx = __srcu_read_lock(&tasks_rcu_exit_srcu);
883 preempt_enable();
884}
885
886
887void exit_tasks_rcu_finish(void)
888{
889 preempt_disable();
890 __srcu_read_unlock(&tasks_rcu_exit_srcu, current->rcu_tasks_idx);
891 preempt_enable();
892}
893
894#endif
895
896#ifndef CONFIG_TINY_RCU
897
898
899
900
901static void __init rcu_tasks_bootup_oddness(void)
902{
903#ifdef CONFIG_TASKS_RCU
904 if (rcu_task_stall_timeout != RCU_TASK_STALL_TIMEOUT)
905 pr_info("\tTasks-RCU CPU stall warnings timeout set to %d (rcu_task_stall_timeout).\n", rcu_task_stall_timeout);
906 else
907 pr_info("\tTasks RCU enabled.\n");
908#endif
909}
910
911#endif
912
913#ifdef CONFIG_PROVE_RCU
914
915
916
917
918static bool rcu_self_test;
919static bool rcu_self_test_bh;
920static bool rcu_self_test_sched;
921
922module_param(rcu_self_test, bool, 0444);
923module_param(rcu_self_test_bh, bool, 0444);
924module_param(rcu_self_test_sched, bool, 0444);
925
926static int rcu_self_test_counter;
927
928static void test_callback(struct rcu_head *r)
929{
930 rcu_self_test_counter++;
931 pr_info("RCU test callback executed %d\n", rcu_self_test_counter);
932}
933
934static void early_boot_test_call_rcu(void)
935{
936 static struct rcu_head head;
937
938 call_rcu(&head, test_callback);
939}
940
941static void early_boot_test_call_rcu_bh(void)
942{
943 static struct rcu_head head;
944
945 call_rcu_bh(&head, test_callback);
946}
947
948static void early_boot_test_call_rcu_sched(void)
949{
950 static struct rcu_head head;
951
952 call_rcu_sched(&head, test_callback);
953}
954
955void rcu_early_boot_tests(void)
956{
957 pr_info("Running RCU self tests\n");
958
959 if (rcu_self_test)
960 early_boot_test_call_rcu();
961 if (rcu_self_test_bh)
962 early_boot_test_call_rcu_bh();
963 if (rcu_self_test_sched)
964 early_boot_test_call_rcu_sched();
965 rcu_test_sync_prims();
966}
967
968static int rcu_verify_early_boot_tests(void)
969{
970 int ret = 0;
971 int early_boot_test_counter = 0;
972
973 if (rcu_self_test) {
974 early_boot_test_counter++;
975 rcu_barrier();
976 }
977 if (rcu_self_test_bh) {
978 early_boot_test_counter++;
979 rcu_barrier_bh();
980 }
981 if (rcu_self_test_sched) {
982 early_boot_test_counter++;
983 rcu_barrier_sched();
984 }
985
986 if (rcu_self_test_counter != early_boot_test_counter) {
987 WARN_ON(1);
988 ret = -1;
989 }
990
991 return ret;
992}
993late_initcall(rcu_verify_early_boot_tests);
994#else
995void rcu_early_boot_tests(void) {}
996#endif
997
998#ifndef CONFIG_TINY_RCU
999
1000
1001
1002
1003void __init rcupdate_announce_bootup_oddness(void)
1004{
1005 if (rcu_normal)
1006 pr_info("\tNo expedited grace period (rcu_normal).\n");
1007 else if (rcu_normal_after_boot)
1008 pr_info("\tNo expedited grace period (rcu_normal_after_boot).\n");
1009 else if (rcu_expedited)
1010 pr_info("\tAll grace periods are expedited (rcu_expedited).\n");
1011 if (rcu_cpu_stall_suppress)
1012 pr_info("\tRCU CPU stall warnings suppressed (rcu_cpu_stall_suppress).\n");
1013 if (rcu_cpu_stall_timeout != CONFIG_RCU_CPU_STALL_TIMEOUT)
1014 pr_info("\tRCU CPU stall warnings timeout set to %d (rcu_cpu_stall_timeout).\n", rcu_cpu_stall_timeout);
1015 rcu_tasks_bootup_oddness();
1016}
1017
1018#endif
1019