1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30#include <linux/types.h>
31#include <linux/kernel.h>
32#include <linux/init.h>
33#include <linux/spinlock.h>
34#include <linux/smp.h>
35#include <linux/rcupdate_wait.h>
36#include <linux/interrupt.h>
37#include <linux/sched.h>
38#include <linux/sched/debug.h>
39#include <linux/nmi.h>
40#include <linux/atomic.h>
41#include <linux/bitops.h>
42#include <linux/export.h>
43#include <linux/completion.h>
44#include <linux/moduleparam.h>
45#include <linux/percpu.h>
46#include <linux/notifier.h>
47#include <linux/cpu.h>
48#include <linux/mutex.h>
49#include <linux/time.h>
50#include <linux/kernel_stat.h>
51#include <linux/wait.h>
52#include <linux/kthread.h>
53#include <uapi/linux/sched/types.h>
54#include <linux/prefetch.h>
55#include <linux/delay.h>
56#include <linux/stop_machine.h>
57#include <linux/random.h>
58#include <linux/trace_events.h>
59#include <linux/suspend.h>
60#include <linux/ftrace.h>
61
62#include "tree.h"
63#include "rcu.h"
64
65#ifdef MODULE_PARAM_PREFIX
66#undef MODULE_PARAM_PREFIX
67#endif
68#define MODULE_PARAM_PREFIX "rcutree."
69
70
71
72
73
74
75
76
77
78
79
80#ifdef CONFIG_TRACING
81# define DEFINE_RCU_TPS(sname) \
82static char sname##_varname[] = #sname; \
83static const char *tp_##sname##_varname __used __tracepoint_string = sname##_varname;
84# define RCU_STATE_NAME(sname) sname##_varname
85#else
86# define DEFINE_RCU_TPS(sname)
87# define RCU_STATE_NAME(sname) __stringify(sname)
88#endif
89
90#define RCU_STATE_INITIALIZER(sname, sabbr, cr) \
91DEFINE_RCU_TPS(sname) \
92static DEFINE_PER_CPU_SHARED_ALIGNED(struct rcu_data, sname##_data); \
93struct rcu_state sname##_state = { \
94 .level = { &sname##_state.node[0] }, \
95 .rda = &sname##_data, \
96 .call = cr, \
97 .gp_state = RCU_GP_IDLE, \
98 .gpnum = 0UL - 300UL, \
99 .completed = 0UL - 300UL, \
100 .orphan_lock = __RAW_SPIN_LOCK_UNLOCKED(&sname##_state.orphan_lock), \
101 .orphan_pend = RCU_CBLIST_INITIALIZER(sname##_state.orphan_pend), \
102 .orphan_done = RCU_CBLIST_INITIALIZER(sname##_state.orphan_done), \
103 .barrier_mutex = __MUTEX_INITIALIZER(sname##_state.barrier_mutex), \
104 .name = RCU_STATE_NAME(sname), \
105 .abbr = sabbr, \
106 .exp_mutex = __MUTEX_INITIALIZER(sname##_state.exp_mutex), \
107 .exp_wake_mutex = __MUTEX_INITIALIZER(sname##_state.exp_wake_mutex), \
108}
109
110RCU_STATE_INITIALIZER(rcu_sched, 's', call_rcu_sched);
111RCU_STATE_INITIALIZER(rcu_bh, 'b', call_rcu_bh);
112
113static struct rcu_state *const rcu_state_p;
114LIST_HEAD(rcu_struct_flavors);
115
116
117static bool dump_tree;
118module_param(dump_tree, bool, 0444);
119
120static bool rcu_fanout_exact;
121module_param(rcu_fanout_exact, bool, 0444);
122
123static int rcu_fanout_leaf = RCU_FANOUT_LEAF;
124module_param(rcu_fanout_leaf, int, 0444);
125int rcu_num_lvls __read_mostly = RCU_NUM_LVLS;
126
127int num_rcu_lvl[] = NUM_RCU_LVL_INIT;
128int rcu_num_nodes __read_mostly = NUM_RCU_NODES;
129
130int sysctl_panic_on_rcu_stall __read_mostly;
131
132
133
134
135
136
137
138
139
140
141
142
143
144int rcu_scheduler_active __read_mostly;
145EXPORT_SYMBOL_GPL(rcu_scheduler_active);
146
147
148
149
150
151
152
153
154
155
156
157
158
159static int rcu_scheduler_fully_active __read_mostly;
160
161static void rcu_init_new_rnp(struct rcu_node *rnp_leaf);
162static void rcu_cleanup_dead_rnp(struct rcu_node *rnp_leaf);
163static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu);
164static void invoke_rcu_core(void);
165static void invoke_rcu_callbacks(struct rcu_state *rsp, struct rcu_data *rdp);
166static void rcu_report_exp_rdp(struct rcu_state *rsp,
167 struct rcu_data *rdp, bool wake);
168static void sync_sched_exp_online_cleanup(int cpu);
169
170
171#ifdef CONFIG_RCU_KTHREAD_PRIO
172static int kthread_prio = CONFIG_RCU_KTHREAD_PRIO;
173#else
174static int kthread_prio = IS_ENABLED(CONFIG_RCU_BOOST) ? 1 : 0;
175#endif
176module_param(kthread_prio, int, 0644);
177
178
179
180#ifdef CONFIG_RCU_TORTURE_TEST_SLOW_PREINIT
181static int gp_preinit_delay = CONFIG_RCU_TORTURE_TEST_SLOW_PREINIT_DELAY;
182module_param(gp_preinit_delay, int, 0644);
183#else
184static const int gp_preinit_delay;
185#endif
186
187#ifdef CONFIG_RCU_TORTURE_TEST_SLOW_INIT
188static int gp_init_delay = CONFIG_RCU_TORTURE_TEST_SLOW_INIT_DELAY;
189module_param(gp_init_delay, int, 0644);
190#else
191static const int gp_init_delay;
192#endif
193
194#ifdef CONFIG_RCU_TORTURE_TEST_SLOW_CLEANUP
195static int gp_cleanup_delay = CONFIG_RCU_TORTURE_TEST_SLOW_CLEANUP_DELAY;
196module_param(gp_cleanup_delay, int, 0644);
197#else
198static const int gp_cleanup_delay;
199#endif
200
201
202
203
204
205
206
207
208
209
210#define PER_RCU_NODE_PERIOD 3
211
212
213
214
215
216
217
218
219
220
221unsigned long rcutorture_testseq;
222unsigned long rcutorture_vernum;
223
224
225
226
227
228
229
230unsigned long rcu_rnp_online_cpus(struct rcu_node *rnp)
231{
232 return READ_ONCE(rnp->qsmaskinitnext);
233}
234
235
236
237
238
239
240static int rcu_gp_in_progress(struct rcu_state *rsp)
241{
242 return READ_ONCE(rsp->completed) != READ_ONCE(rsp->gpnum);
243}
244
245
246
247
248
249
250
251void rcu_sched_qs(void)
252{
253 if (!__this_cpu_read(rcu_sched_data.cpu_no_qs.s))
254 return;
255 trace_rcu_grace_period(TPS("rcu_sched"),
256 __this_cpu_read(rcu_sched_data.gpnum),
257 TPS("cpuqs"));
258 __this_cpu_write(rcu_sched_data.cpu_no_qs.b.norm, false);
259 if (!__this_cpu_read(rcu_sched_data.cpu_no_qs.b.exp))
260 return;
261 __this_cpu_write(rcu_sched_data.cpu_no_qs.b.exp, false);
262 rcu_report_exp_rdp(&rcu_sched_state,
263 this_cpu_ptr(&rcu_sched_data), true);
264}
265
266void rcu_bh_qs(void)
267{
268 if (__this_cpu_read(rcu_bh_data.cpu_no_qs.s)) {
269 trace_rcu_grace_period(TPS("rcu_bh"),
270 __this_cpu_read(rcu_bh_data.gpnum),
271 TPS("cpuqs"));
272 __this_cpu_write(rcu_bh_data.cpu_no_qs.b.norm, false);
273 }
274}
275
276
277
278
279
280#define RCU_DYNTICK_CTRL_MASK 0x1
281#define RCU_DYNTICK_CTRL_CTR (RCU_DYNTICK_CTRL_MASK + 1)
282#ifndef rcu_eqs_special_exit
283#define rcu_eqs_special_exit() do { } while (0)
284#endif
285
286static DEFINE_PER_CPU(struct rcu_dynticks, rcu_dynticks) = {
287 .dynticks_nesting = DYNTICK_TASK_EXIT_IDLE,
288 .dynticks = ATOMIC_INIT(RCU_DYNTICK_CTRL_CTR),
289#ifdef CONFIG_NO_HZ_FULL_SYSIDLE
290 .dynticks_idle_nesting = DYNTICK_TASK_NEST_VALUE,
291 .dynticks_idle = ATOMIC_INIT(1),
292#endif
293};
294
295
296
297
298
299
300
301
302static DEFINE_PER_CPU(bool, disable_rcu_irq_enter);
303
304bool rcu_irq_enter_disabled(void)
305{
306 return this_cpu_read(disable_rcu_irq_enter);
307}
308
309
310
311
312
313static void rcu_dynticks_eqs_enter(void)
314{
315 struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
316 int seq;
317
318
319
320
321
322
323 seq = atomic_add_return(RCU_DYNTICK_CTRL_CTR, &rdtp->dynticks);
324
325 WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) &&
326 (seq & RCU_DYNTICK_CTRL_CTR));
327
328 WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) &&
329 (seq & RCU_DYNTICK_CTRL_MASK));
330}
331
332
333
334
335
336static void rcu_dynticks_eqs_exit(void)
337{
338 struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
339 int seq;
340
341
342
343
344
345
346 seq = atomic_add_return(RCU_DYNTICK_CTRL_CTR, &rdtp->dynticks);
347 WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) &&
348 !(seq & RCU_DYNTICK_CTRL_CTR));
349 if (seq & RCU_DYNTICK_CTRL_MASK) {
350 atomic_andnot(RCU_DYNTICK_CTRL_MASK, &rdtp->dynticks);
351 smp_mb__after_atomic();
352
353 rcu_eqs_special_exit();
354 }
355}
356
357
358
359
360
361
362
363
364
365
366
367static void rcu_dynticks_eqs_online(void)
368{
369 struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
370
371 if (atomic_read(&rdtp->dynticks) & RCU_DYNTICK_CTRL_CTR)
372 return;
373 atomic_add(RCU_DYNTICK_CTRL_CTR, &rdtp->dynticks);
374}
375
376
377
378
379
380
381bool rcu_dynticks_curr_cpu_in_eqs(void)
382{
383 struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
384
385 return !(atomic_read(&rdtp->dynticks) & RCU_DYNTICK_CTRL_CTR);
386}
387
388
389
390
391
392int rcu_dynticks_snap(struct rcu_dynticks *rdtp)
393{
394 int snap = atomic_add_return(0, &rdtp->dynticks);
395
396 return snap & ~RCU_DYNTICK_CTRL_MASK;
397}
398
399
400
401
402
403static bool rcu_dynticks_in_eqs(int snap)
404{
405 return !(snap & RCU_DYNTICK_CTRL_CTR);
406}
407
408
409
410
411
412
413static bool rcu_dynticks_in_eqs_since(struct rcu_dynticks *rdtp, int snap)
414{
415 return snap != rcu_dynticks_snap(rdtp);
416}
417
418
419
420
421
422static void rcu_dynticks_momentary_idle(void)
423{
424 struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
425 int special = atomic_add_return(2 * RCU_DYNTICK_CTRL_CTR,
426 &rdtp->dynticks);
427
428
429 WARN_ON_ONCE(!(special & RCU_DYNTICK_CTRL_CTR));
430}
431
432
433
434
435
436
437
438
439bool rcu_eqs_special_set(int cpu)
440{
441 int old;
442 int new;
443 struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu);
444
445 do {
446 old = atomic_read(&rdtp->dynticks);
447 if (old & RCU_DYNTICK_CTRL_CTR)
448 return false;
449 new = old | RCU_DYNTICK_CTRL_MASK;
450 } while (atomic_cmpxchg(&rdtp->dynticks, old, new) != old);
451 return true;
452}
453
454
455
456
457
458
459
460
461
462
463
464
465static void rcu_momentary_dyntick_idle(void)
466{
467 raw_cpu_write(rcu_dynticks.rcu_need_heavy_qs, false);
468 rcu_dynticks_momentary_idle();
469}
470
471
472
473
474
475
476void rcu_note_context_switch(bool preempt)
477{
478 barrier();
479 trace_rcu_utilization(TPS("Start context switch"));
480 rcu_sched_qs();
481 rcu_preempt_note_context_switch();
482
483 if (!smp_load_acquire(this_cpu_ptr(&rcu_dynticks.rcu_urgent_qs)))
484 goto out;
485 this_cpu_write(rcu_dynticks.rcu_urgent_qs, false);
486 if (unlikely(raw_cpu_read(rcu_dynticks.rcu_need_heavy_qs)))
487 rcu_momentary_dyntick_idle();
488 this_cpu_inc(rcu_dynticks.rcu_qs_ctr);
489 if (!preempt)
490 rcu_note_voluntary_context_switch_lite(current);
491out:
492 trace_rcu_utilization(TPS("End context switch"));
493 barrier();
494}
495EXPORT_SYMBOL_GPL(rcu_note_context_switch);
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510void rcu_all_qs(void)
511{
512 unsigned long flags;
513
514 if (!raw_cpu_read(rcu_dynticks.rcu_urgent_qs))
515 return;
516 preempt_disable();
517
518 if (!smp_load_acquire(this_cpu_ptr(&rcu_dynticks.rcu_urgent_qs))) {
519 preempt_enable();
520 return;
521 }
522 this_cpu_write(rcu_dynticks.rcu_urgent_qs, false);
523 barrier();
524 if (unlikely(raw_cpu_read(rcu_dynticks.rcu_need_heavy_qs))) {
525 local_irq_save(flags);
526 rcu_momentary_dyntick_idle();
527 local_irq_restore(flags);
528 }
529 if (unlikely(raw_cpu_read(rcu_sched_data.cpu_no_qs.b.exp)))
530 rcu_sched_qs();
531 this_cpu_inc(rcu_dynticks.rcu_qs_ctr);
532 barrier();
533 preempt_enable();
534}
535EXPORT_SYMBOL_GPL(rcu_all_qs);
536
537static long blimit = 10;
538static long qhimark = 10000;
539static long qlowmark = 100;
540
541module_param(blimit, long, 0444);
542module_param(qhimark, long, 0444);
543module_param(qlowmark, long, 0444);
544
545static ulong jiffies_till_first_fqs = ULONG_MAX;
546static ulong jiffies_till_next_fqs = ULONG_MAX;
547static bool rcu_kick_kthreads;
548
549module_param(jiffies_till_first_fqs, ulong, 0644);
550module_param(jiffies_till_next_fqs, ulong, 0644);
551module_param(rcu_kick_kthreads, bool, 0644);
552
553
554
555
556
557static ulong jiffies_till_sched_qs = HZ / 20;
558module_param(jiffies_till_sched_qs, ulong, 0644);
559
560static bool rcu_start_gp_advanced(struct rcu_state *rsp, struct rcu_node *rnp,
561 struct rcu_data *rdp);
562static void force_qs_rnp(struct rcu_state *rsp,
563 int (*f)(struct rcu_data *rsp, bool *isidle,
564 unsigned long *maxj),
565 bool *isidle, unsigned long *maxj);
566static void force_quiescent_state(struct rcu_state *rsp);
567static int rcu_pending(void);
568
569
570
571
572unsigned long rcu_batches_started(void)
573{
574 return rcu_state_p->gpnum;
575}
576EXPORT_SYMBOL_GPL(rcu_batches_started);
577
578
579
580
581unsigned long rcu_batches_started_sched(void)
582{
583 return rcu_sched_state.gpnum;
584}
585EXPORT_SYMBOL_GPL(rcu_batches_started_sched);
586
587
588
589
590unsigned long rcu_batches_started_bh(void)
591{
592 return rcu_bh_state.gpnum;
593}
594EXPORT_SYMBOL_GPL(rcu_batches_started_bh);
595
596
597
598
599unsigned long rcu_batches_completed(void)
600{
601 return rcu_state_p->completed;
602}
603EXPORT_SYMBOL_GPL(rcu_batches_completed);
604
605
606
607
608unsigned long rcu_batches_completed_sched(void)
609{
610 return rcu_sched_state.completed;
611}
612EXPORT_SYMBOL_GPL(rcu_batches_completed_sched);
613
614
615
616
617unsigned long rcu_batches_completed_bh(void)
618{
619 return rcu_bh_state.completed;
620}
621EXPORT_SYMBOL_GPL(rcu_batches_completed_bh);
622
623
624
625
626
627
628
629unsigned long rcu_exp_batches_completed(void)
630{
631 return rcu_state_p->expedited_sequence;
632}
633EXPORT_SYMBOL_GPL(rcu_exp_batches_completed);
634
635
636
637
638
639unsigned long rcu_exp_batches_completed_sched(void)
640{
641 return rcu_sched_state.expedited_sequence;
642}
643EXPORT_SYMBOL_GPL(rcu_exp_batches_completed_sched);
644
645
646
647
648void rcu_force_quiescent_state(void)
649{
650 force_quiescent_state(rcu_state_p);
651}
652EXPORT_SYMBOL_GPL(rcu_force_quiescent_state);
653
654
655
656
657void rcu_bh_force_quiescent_state(void)
658{
659 force_quiescent_state(&rcu_bh_state);
660}
661EXPORT_SYMBOL_GPL(rcu_bh_force_quiescent_state);
662
663
664
665
666void rcu_sched_force_quiescent_state(void)
667{
668 force_quiescent_state(&rcu_sched_state);
669}
670EXPORT_SYMBOL_GPL(rcu_sched_force_quiescent_state);
671
672
673
674
675void show_rcu_gp_kthreads(void)
676{
677 struct rcu_state *rsp;
678
679 for_each_rcu_flavor(rsp) {
680 pr_info("%s: wait state: %d ->state: %#lx\n",
681 rsp->name, rsp->gp_state, rsp->gp_kthread->state);
682
683 }
684}
685EXPORT_SYMBOL_GPL(show_rcu_gp_kthreads);
686
687
688
689
690
691
692
693
694void rcutorture_record_test_transition(void)
695{
696 rcutorture_testseq++;
697 rcutorture_vernum = 0;
698}
699EXPORT_SYMBOL_GPL(rcutorture_record_test_transition);
700
701
702
703
704void rcutorture_get_gp_data(enum rcutorture_type test_type, int *flags,
705 unsigned long *gpnum, unsigned long *completed)
706{
707 struct rcu_state *rsp = NULL;
708
709 switch (test_type) {
710 case RCU_FLAVOR:
711 rsp = rcu_state_p;
712 break;
713 case RCU_BH_FLAVOR:
714 rsp = &rcu_bh_state;
715 break;
716 case RCU_SCHED_FLAVOR:
717 rsp = &rcu_sched_state;
718 break;
719 default:
720 break;
721 }
722 if (rsp == NULL)
723 return;
724 *flags = READ_ONCE(rsp->gp_flags);
725 *gpnum = READ_ONCE(rsp->gpnum);
726 *completed = READ_ONCE(rsp->completed);
727}
728EXPORT_SYMBOL_GPL(rcutorture_get_gp_data);
729
730
731
732
733
734
735void rcutorture_record_progress(unsigned long vernum)
736{
737 rcutorture_vernum++;
738}
739EXPORT_SYMBOL_GPL(rcutorture_record_progress);
740
741
742
743
744static struct rcu_node *rcu_get_root(struct rcu_state *rsp)
745{
746 return &rsp->node[0];
747}
748
749
750
751
752
753
754static int rcu_future_needs_gp(struct rcu_state *rsp)
755{
756 struct rcu_node *rnp = rcu_get_root(rsp);
757 int idx = (READ_ONCE(rnp->completed) + 1) & 0x1;
758 int *fp = &rnp->need_future_gp[idx];
759
760 return READ_ONCE(*fp);
761}
762
763
764
765
766
767
768static bool
769cpu_needs_another_gp(struct rcu_state *rsp, struct rcu_data *rdp)
770{
771 if (rcu_gp_in_progress(rsp))
772 return false;
773 if (rcu_future_needs_gp(rsp))
774 return true;
775 if (!rcu_segcblist_is_enabled(&rdp->cblist))
776 return false;
777 if (!rcu_segcblist_restempty(&rdp->cblist, RCU_NEXT_READY_TAIL))
778 return true;
779 if (rcu_segcblist_future_gp_needed(&rdp->cblist,
780 READ_ONCE(rsp->completed)))
781 return true;
782 return false;
783}
784
785
786
787
788
789
790
791static void rcu_eqs_enter_common(bool user)
792{
793 struct rcu_state *rsp;
794 struct rcu_data *rdp;
795 struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
796
797 trace_rcu_dyntick(TPS("Start"), rdtp->dynticks_nesting, 0);
798 if (IS_ENABLED(CONFIG_RCU_EQS_DEBUG) &&
799 !user && !is_idle_task(current)) {
800 struct task_struct *idle __maybe_unused =
801 idle_task(smp_processor_id());
802
803 trace_rcu_dyntick(TPS("Error on entry: not idle task"), rdtp->dynticks_nesting, 0);
804 rcu_ftrace_dump(DUMP_ORIG);
805 WARN_ONCE(1, "Current pid: %d comm: %s / Idle pid: %d comm: %s",
806 current->pid, current->comm,
807 idle->pid, idle->comm);
808 }
809 for_each_rcu_flavor(rsp) {
810 rdp = this_cpu_ptr(rsp->rda);
811 do_nocb_deferred_wakeup(rdp);
812 }
813 rcu_prepare_for_idle();
814 __this_cpu_inc(disable_rcu_irq_enter);
815 rdtp->dynticks_nesting = 0;
816 rcu_dynticks_eqs_enter();
817 __this_cpu_dec(disable_rcu_irq_enter);
818 rcu_dynticks_task_enter();
819
820
821
822
823
824 RCU_LOCKDEP_WARN(lock_is_held(&rcu_lock_map),
825 "Illegal idle entry in RCU read-side critical section.");
826 RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map),
827 "Illegal idle entry in RCU-bh read-side critical section.");
828 RCU_LOCKDEP_WARN(lock_is_held(&rcu_sched_lock_map),
829 "Illegal idle entry in RCU-sched read-side critical section.");
830}
831
832
833
834
835
836static void rcu_eqs_enter(bool user)
837{
838 struct rcu_dynticks *rdtp;
839
840 rdtp = this_cpu_ptr(&rcu_dynticks);
841 WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) &&
842 (rdtp->dynticks_nesting & DYNTICK_TASK_NEST_MASK) == 0);
843 if ((rdtp->dynticks_nesting & DYNTICK_TASK_NEST_MASK) == DYNTICK_TASK_NEST_VALUE)
844 rcu_eqs_enter_common(user);
845 else
846 rdtp->dynticks_nesting -= DYNTICK_TASK_NEST_VALUE;
847}
848
849
850
851
852
853
854
855
856
857
858
859
860
861void rcu_idle_enter(void)
862{
863 unsigned long flags;
864
865 local_irq_save(flags);
866 rcu_eqs_enter(false);
867 rcu_sysidle_enter(0);
868 local_irq_restore(flags);
869}
870EXPORT_SYMBOL_GPL(rcu_idle_enter);
871
872#ifdef CONFIG_NO_HZ_FULL
873
874
875
876
877
878
879
880
881void rcu_user_enter(void)
882{
883 rcu_eqs_enter(1);
884}
885#endif
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903void rcu_irq_exit(void)
904{
905 struct rcu_dynticks *rdtp;
906
907 RCU_LOCKDEP_WARN(!irqs_disabled(), "rcu_irq_exit() invoked with irqs enabled!!!");
908 rdtp = this_cpu_ptr(&rcu_dynticks);
909 WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) &&
910 rdtp->dynticks_nesting < 1);
911 if (rdtp->dynticks_nesting <= 1) {
912 rcu_eqs_enter_common(true);
913 } else {
914 trace_rcu_dyntick(TPS("--="), rdtp->dynticks_nesting, rdtp->dynticks_nesting - 1);
915 rdtp->dynticks_nesting--;
916 }
917 rcu_sysidle_enter(1);
918}
919
920
921
922
923void rcu_irq_exit_irqson(void)
924{
925 unsigned long flags;
926
927 local_irq_save(flags);
928 rcu_irq_exit();
929 local_irq_restore(flags);
930}
931
932
933
934
935
936
937
938
939static void rcu_eqs_exit_common(long long oldval, int user)
940{
941 RCU_TRACE(struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);)
942
943 rcu_dynticks_task_exit();
944 rcu_dynticks_eqs_exit();
945 rcu_cleanup_after_idle();
946 trace_rcu_dyntick(TPS("End"), oldval, rdtp->dynticks_nesting);
947 if (IS_ENABLED(CONFIG_RCU_EQS_DEBUG) &&
948 !user && !is_idle_task(current)) {
949 struct task_struct *idle __maybe_unused =
950 idle_task(smp_processor_id());
951
952 trace_rcu_dyntick(TPS("Error on exit: not idle task"),
953 oldval, rdtp->dynticks_nesting);
954 rcu_ftrace_dump(DUMP_ORIG);
955 WARN_ONCE(1, "Current pid: %d comm: %s / Idle pid: %d comm: %s",
956 current->pid, current->comm,
957 idle->pid, idle->comm);
958 }
959}
960
961
962
963
964
965static void rcu_eqs_exit(bool user)
966{
967 struct rcu_dynticks *rdtp;
968 long long oldval;
969
970 rdtp = this_cpu_ptr(&rcu_dynticks);
971 oldval = rdtp->dynticks_nesting;
972 WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && oldval < 0);
973 if (oldval & DYNTICK_TASK_NEST_MASK) {
974 rdtp->dynticks_nesting += DYNTICK_TASK_NEST_VALUE;
975 } else {
976 rdtp->dynticks_nesting = DYNTICK_TASK_EXIT_IDLE;
977 rcu_eqs_exit_common(oldval, user);
978 }
979}
980
981
982
983
984
985
986
987
988
989
990
991
992void rcu_idle_exit(void)
993{
994 unsigned long flags;
995
996 local_irq_save(flags);
997 rcu_eqs_exit(false);
998 rcu_sysidle_exit(0);
999 local_irq_restore(flags);
1000}
1001EXPORT_SYMBOL_GPL(rcu_idle_exit);
1002
1003#ifdef CONFIG_NO_HZ_FULL
1004
1005
1006
1007
1008
1009
1010void rcu_user_exit(void)
1011{
1012 rcu_eqs_exit(1);
1013}
1014#endif
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035void rcu_irq_enter(void)
1036{
1037 struct rcu_dynticks *rdtp;
1038 long long oldval;
1039
1040 RCU_LOCKDEP_WARN(!irqs_disabled(), "rcu_irq_enter() invoked with irqs enabled!!!");
1041 rdtp = this_cpu_ptr(&rcu_dynticks);
1042 oldval = rdtp->dynticks_nesting;
1043 rdtp->dynticks_nesting++;
1044 WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) &&
1045 rdtp->dynticks_nesting == 0);
1046 if (oldval)
1047 trace_rcu_dyntick(TPS("++="), oldval, rdtp->dynticks_nesting);
1048 else
1049 rcu_eqs_exit_common(oldval, true);
1050 rcu_sysidle_exit(1);
1051}
1052
1053
1054
1055
1056void rcu_irq_enter_irqson(void)
1057{
1058 unsigned long flags;
1059
1060 local_irq_save(flags);
1061 rcu_irq_enter();
1062 local_irq_restore(flags);
1063}
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074void rcu_nmi_enter(void)
1075{
1076 struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
1077 int incby = 2;
1078
1079
1080 WARN_ON_ONCE(rdtp->dynticks_nmi_nesting < 0);
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090 if (rcu_dynticks_curr_cpu_in_eqs()) {
1091 rcu_dynticks_eqs_exit();
1092 incby = 1;
1093 }
1094 rdtp->dynticks_nmi_nesting += incby;
1095 barrier();
1096}
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106void rcu_nmi_exit(void)
1107{
1108 struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
1109
1110
1111
1112
1113
1114
1115 WARN_ON_ONCE(rdtp->dynticks_nmi_nesting <= 0);
1116 WARN_ON_ONCE(rcu_dynticks_curr_cpu_in_eqs());
1117
1118
1119
1120
1121
1122 if (rdtp->dynticks_nmi_nesting != 1) {
1123 rdtp->dynticks_nmi_nesting -= 2;
1124 return;
1125 }
1126
1127
1128 rdtp->dynticks_nmi_nesting = 0;
1129 rcu_dynticks_eqs_enter();
1130}
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140bool notrace __rcu_is_watching(void)
1141{
1142 return !rcu_dynticks_curr_cpu_in_eqs();
1143}
1144
1145
1146
1147
1148
1149
1150
1151bool notrace rcu_is_watching(void)
1152{
1153 bool ret;
1154
1155 preempt_disable_notrace();
1156 ret = __rcu_is_watching();
1157 preempt_enable_notrace();
1158 return ret;
1159}
1160EXPORT_SYMBOL_GPL(rcu_is_watching);
1161
1162
1163
1164
1165
1166
1167
1168
1169void rcu_request_urgent_qs_task(struct task_struct *t)
1170{
1171 int cpu;
1172
1173 barrier();
1174 cpu = task_cpu(t);
1175 if (!task_curr(t))
1176 return;
1177 smp_store_release(per_cpu_ptr(&rcu_dynticks.rcu_urgent_qs, cpu), true);
1178}
1179
1180#if defined(CONFIG_PROVE_RCU) && defined(CONFIG_HOTPLUG_CPU)
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203bool rcu_lockdep_current_cpu_online(void)
1204{
1205 struct rcu_data *rdp;
1206 struct rcu_node *rnp;
1207 bool ret;
1208
1209 if (in_nmi())
1210 return true;
1211 preempt_disable();
1212 rdp = this_cpu_ptr(&rcu_sched_data);
1213 rnp = rdp->mynode;
1214 ret = (rdp->grpmask & rcu_rnp_online_cpus(rnp)) ||
1215 !rcu_scheduler_fully_active;
1216 preempt_enable();
1217 return ret;
1218}
1219EXPORT_SYMBOL_GPL(rcu_lockdep_current_cpu_online);
1220
1221#endif
1222
1223
1224
1225
1226
1227
1228
1229
1230static int rcu_is_cpu_rrupt_from_idle(void)
1231{
1232 return __this_cpu_read(rcu_dynticks.dynticks_nesting) <= 1;
1233}
1234
1235
1236
1237
1238
1239
1240static int dyntick_save_progress_counter(struct rcu_data *rdp,
1241 bool *isidle, unsigned long *maxj)
1242{
1243 rdp->dynticks_snap = rcu_dynticks_snap(rdp->dynticks);
1244 rcu_sysidle_check_cpu(rdp, isidle, maxj);
1245 if (rcu_dynticks_in_eqs(rdp->dynticks_snap)) {
1246 trace_rcu_fqs(rdp->rsp->name, rdp->gpnum, rdp->cpu, TPS("dti"));
1247 if (ULONG_CMP_LT(READ_ONCE(rdp->gpnum) + ULONG_MAX / 4,
1248 rdp->mynode->gpnum))
1249 WRITE_ONCE(rdp->gpwrap, true);
1250 return 1;
1251 }
1252 return 0;
1253}
1254
1255
1256
1257
1258
1259
1260
1261static int rcu_implicit_dynticks_qs(struct rcu_data *rdp,
1262 bool *isidle, unsigned long *maxj)
1263{
1264 unsigned long jtsq;
1265 bool *rnhqp;
1266 bool *ruqp;
1267 unsigned long rjtsc;
1268 struct rcu_node *rnp;
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278 if (rcu_dynticks_in_eqs_since(rdp->dynticks, rdp->dynticks_snap)) {
1279 trace_rcu_fqs(rdp->rsp->name, rdp->gpnum, rdp->cpu, TPS("dti"));
1280 rdp->dynticks_fqs++;
1281 return 1;
1282 }
1283
1284
1285 jtsq = jiffies_till_sched_qs;
1286 rjtsc = rcu_jiffies_till_stall_check();
1287 if (jtsq > rjtsc / 2) {
1288 WRITE_ONCE(jiffies_till_sched_qs, rjtsc);
1289 jtsq = rjtsc / 2;
1290 } else if (jtsq < 1) {
1291 WRITE_ONCE(jiffies_till_sched_qs, 1);
1292 jtsq = 1;
1293 }
1294
1295
1296
1297
1298
1299
1300
1301 rnp = rdp->mynode;
1302 ruqp = per_cpu_ptr(&rcu_dynticks.rcu_urgent_qs, rdp->cpu);
1303 if (time_after(jiffies, rdp->rsp->gp_start + jtsq) &&
1304 READ_ONCE(rdp->rcu_qs_ctr_snap) != per_cpu(rcu_dynticks.rcu_qs_ctr, rdp->cpu) &&
1305 READ_ONCE(rdp->gpnum) == rnp->gpnum && !rdp->gpwrap) {
1306 trace_rcu_fqs(rdp->rsp->name, rdp->gpnum, rdp->cpu, TPS("rqc"));
1307 return 1;
1308 } else {
1309
1310 smp_store_release(ruqp, true);
1311 }
1312
1313
1314 if (!(rdp->grpmask & rcu_rnp_online_cpus(rnp))) {
1315 trace_rcu_fqs(rdp->rsp->name, rdp->gpnum, rdp->cpu, TPS("ofl"));
1316 rdp->offline_fqs++;
1317 return 1;
1318 }
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341 rnhqp = &per_cpu(rcu_dynticks.rcu_need_heavy_qs, rdp->cpu);
1342 if (!READ_ONCE(*rnhqp) &&
1343 (time_after(jiffies, rdp->rsp->gp_start + jtsq) ||
1344 time_after(jiffies, rdp->rsp->jiffies_resched))) {
1345 WRITE_ONCE(*rnhqp, true);
1346
1347 smp_store_release(ruqp, true);
1348 rdp->rsp->jiffies_resched += 5;
1349 }
1350
1351
1352
1353
1354
1355 if (jiffies - rdp->rsp->gp_start > rcu_jiffies_till_stall_check() / 2)
1356 resched_cpu(rdp->cpu);
1357
1358 return 0;
1359}
1360
1361static void record_gp_stall_check_time(struct rcu_state *rsp)
1362{
1363 unsigned long j = jiffies;
1364 unsigned long j1;
1365
1366 rsp->gp_start = j;
1367 smp_wmb();
1368 j1 = rcu_jiffies_till_stall_check();
1369 WRITE_ONCE(rsp->jiffies_stall, j + j1);
1370 rsp->jiffies_resched = j + j1 / 2;
1371 rsp->n_force_qs_gpstart = READ_ONCE(rsp->n_force_qs);
1372}
1373
1374
1375
1376
1377static const char *gp_state_getname(short gs)
1378{
1379 if (gs < 0 || gs >= ARRAY_SIZE(gp_state_names))
1380 return "???";
1381 return gp_state_names[gs];
1382}
1383
1384
1385
1386
1387static void rcu_check_gp_kthread_starvation(struct rcu_state *rsp)
1388{
1389 unsigned long gpa;
1390 unsigned long j;
1391
1392 j = jiffies;
1393 gpa = READ_ONCE(rsp->gp_activity);
1394 if (j - gpa > 2 * HZ) {
1395 pr_err("%s kthread starved for %ld jiffies! g%lu c%lu f%#x %s(%d) ->state=%#lx\n",
1396 rsp->name, j - gpa,
1397 rsp->gpnum, rsp->completed,
1398 rsp->gp_flags,
1399 gp_state_getname(rsp->gp_state), rsp->gp_state,
1400 rsp->gp_kthread ? rsp->gp_kthread->state : ~0);
1401 if (rsp->gp_kthread) {
1402 sched_show_task(rsp->gp_kthread);
1403 wake_up_process(rsp->gp_kthread);
1404 }
1405 }
1406}
1407
1408
1409
1410
1411
1412
1413
1414static void rcu_dump_cpu_stacks(struct rcu_state *rsp)
1415{
1416 int cpu;
1417 unsigned long flags;
1418 struct rcu_node *rnp;
1419
1420 rcu_for_each_leaf_node(rsp, rnp) {
1421 raw_spin_lock_irqsave_rcu_node(rnp, flags);
1422 for_each_leaf_node_possible_cpu(rnp, cpu)
1423 if (rnp->qsmask & leaf_node_cpu_bit(rnp, cpu))
1424 if (!trigger_single_cpu_backtrace(cpu))
1425 dump_cpu_task(cpu);
1426 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
1427 }
1428}
1429
1430
1431
1432
1433
1434static void rcu_stall_kick_kthreads(struct rcu_state *rsp)
1435{
1436 unsigned long j;
1437
1438 if (!rcu_kick_kthreads)
1439 return;
1440 j = READ_ONCE(rsp->jiffies_kick_kthreads);
1441 if (time_after(jiffies, j) && rsp->gp_kthread &&
1442 (rcu_gp_in_progress(rsp) || READ_ONCE(rsp->gp_flags))) {
1443 WARN_ONCE(1, "Kicking %s grace-period kthread\n", rsp->name);
1444 rcu_ftrace_dump(DUMP_ALL);
1445 wake_up_process(rsp->gp_kthread);
1446 WRITE_ONCE(rsp->jiffies_kick_kthreads, j + HZ);
1447 }
1448}
1449
1450static inline void panic_on_rcu_stall(void)
1451{
1452 if (sysctl_panic_on_rcu_stall)
1453 panic("RCU Stall\n");
1454}
1455
1456static void print_other_cpu_stall(struct rcu_state *rsp, unsigned long gpnum)
1457{
1458 int cpu;
1459 long delta;
1460 unsigned long flags;
1461 unsigned long gpa;
1462 unsigned long j;
1463 int ndetected = 0;
1464 struct rcu_node *rnp = rcu_get_root(rsp);
1465 long totqlen = 0;
1466
1467
1468 rcu_stall_kick_kthreads(rsp);
1469 if (rcu_cpu_stall_suppress)
1470 return;
1471
1472
1473
1474 raw_spin_lock_irqsave_rcu_node(rnp, flags);
1475 delta = jiffies - READ_ONCE(rsp->jiffies_stall);
1476 if (delta < RCU_STALL_RAT_DELAY || !rcu_gp_in_progress(rsp)) {
1477 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
1478 return;
1479 }
1480 WRITE_ONCE(rsp->jiffies_stall,
1481 jiffies + 3 * rcu_jiffies_till_stall_check() + 3);
1482 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
1483
1484
1485
1486
1487
1488
1489 pr_err("INFO: %s detected stalls on CPUs/tasks:",
1490 rsp->name);
1491 print_cpu_stall_info_begin();
1492 rcu_for_each_leaf_node(rsp, rnp) {
1493 raw_spin_lock_irqsave_rcu_node(rnp, flags);
1494 ndetected += rcu_print_task_stall(rnp);
1495 if (rnp->qsmask != 0) {
1496 for_each_leaf_node_possible_cpu(rnp, cpu)
1497 if (rnp->qsmask & leaf_node_cpu_bit(rnp, cpu)) {
1498 print_cpu_stall_info(rsp, cpu);
1499 ndetected++;
1500 }
1501 }
1502 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
1503 }
1504
1505 print_cpu_stall_info_end();
1506 for_each_possible_cpu(cpu)
1507 totqlen += rcu_segcblist_n_cbs(&per_cpu_ptr(rsp->rda,
1508 cpu)->cblist);
1509 pr_cont("(detected by %d, t=%ld jiffies, g=%ld, c=%ld, q=%lu)\n",
1510 smp_processor_id(), (long)(jiffies - rsp->gp_start),
1511 (long)rsp->gpnum, (long)rsp->completed, totqlen);
1512 if (ndetected) {
1513 rcu_dump_cpu_stacks(rsp);
1514
1515
1516 rcu_print_detail_task_stall(rsp);
1517 } else {
1518 if (READ_ONCE(rsp->gpnum) != gpnum ||
1519 READ_ONCE(rsp->completed) == gpnum) {
1520 pr_err("INFO: Stall ended before state dump start\n");
1521 } else {
1522 j = jiffies;
1523 gpa = READ_ONCE(rsp->gp_activity);
1524 pr_err("All QSes seen, last %s kthread activity %ld (%ld-%ld), jiffies_till_next_fqs=%ld, root ->qsmask %#lx\n",
1525 rsp->name, j - gpa, j, gpa,
1526 jiffies_till_next_fqs,
1527 rcu_get_root(rsp)->qsmask);
1528
1529 sched_show_task(current);
1530 }
1531 }
1532
1533 rcu_check_gp_kthread_starvation(rsp);
1534
1535 panic_on_rcu_stall();
1536
1537 force_quiescent_state(rsp);
1538}
1539
1540static void print_cpu_stall(struct rcu_state *rsp)
1541{
1542 int cpu;
1543 unsigned long flags;
1544 struct rcu_node *rnp = rcu_get_root(rsp);
1545 long totqlen = 0;
1546
1547
1548 rcu_stall_kick_kthreads(rsp);
1549 if (rcu_cpu_stall_suppress)
1550 return;
1551
1552
1553
1554
1555
1556
1557 pr_err("INFO: %s self-detected stall on CPU", rsp->name);
1558 print_cpu_stall_info_begin();
1559 print_cpu_stall_info(rsp, smp_processor_id());
1560 print_cpu_stall_info_end();
1561 for_each_possible_cpu(cpu)
1562 totqlen += rcu_segcblist_n_cbs(&per_cpu_ptr(rsp->rda,
1563 cpu)->cblist);
1564 pr_cont(" (t=%lu jiffies g=%ld c=%ld q=%lu)\n",
1565 jiffies - rsp->gp_start,
1566 (long)rsp->gpnum, (long)rsp->completed, totqlen);
1567
1568 rcu_check_gp_kthread_starvation(rsp);
1569
1570 rcu_dump_cpu_stacks(rsp);
1571
1572 raw_spin_lock_irqsave_rcu_node(rnp, flags);
1573 if (ULONG_CMP_GE(jiffies, READ_ONCE(rsp->jiffies_stall)))
1574 WRITE_ONCE(rsp->jiffies_stall,
1575 jiffies + 3 * rcu_jiffies_till_stall_check() + 3);
1576 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
1577
1578 panic_on_rcu_stall();
1579
1580
1581
1582
1583
1584
1585
1586
1587 resched_cpu(smp_processor_id());
1588}
1589
1590static void check_cpu_stall(struct rcu_state *rsp, struct rcu_data *rdp)
1591{
1592 unsigned long completed;
1593 unsigned long gpnum;
1594 unsigned long gps;
1595 unsigned long j;
1596 unsigned long js;
1597 struct rcu_node *rnp;
1598
1599 if ((rcu_cpu_stall_suppress && !rcu_kick_kthreads) ||
1600 !rcu_gp_in_progress(rsp))
1601 return;
1602 rcu_stall_kick_kthreads(rsp);
1603 j = jiffies;
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622 gpnum = READ_ONCE(rsp->gpnum);
1623 smp_rmb();
1624 js = READ_ONCE(rsp->jiffies_stall);
1625 smp_rmb();
1626 gps = READ_ONCE(rsp->gp_start);
1627 smp_rmb();
1628 completed = READ_ONCE(rsp->completed);
1629 if (ULONG_CMP_GE(completed, gpnum) ||
1630 ULONG_CMP_LT(j, js) ||
1631 ULONG_CMP_GE(gps, js))
1632 return;
1633 rnp = rdp->mynode;
1634 if (rcu_gp_in_progress(rsp) &&
1635 (READ_ONCE(rnp->qsmask) & rdp->grpmask)) {
1636
1637
1638 print_cpu_stall(rsp);
1639
1640 } else if (rcu_gp_in_progress(rsp) &&
1641 ULONG_CMP_GE(j, js + RCU_STALL_RAT_DELAY)) {
1642
1643
1644 print_other_cpu_stall(rsp, gpnum);
1645 }
1646}
1647
1648
1649
1650
1651
1652
1653
1654
1655
1656
1657void rcu_cpu_stall_reset(void)
1658{
1659 struct rcu_state *rsp;
1660
1661 for_each_rcu_flavor(rsp)
1662 WRITE_ONCE(rsp->jiffies_stall, jiffies + ULONG_MAX / 2);
1663}
1664
1665
1666
1667
1668
1669
1670
1671
1672
1673
1674static unsigned long rcu_cbs_completed(struct rcu_state *rsp,
1675 struct rcu_node *rnp)
1676{
1677
1678
1679
1680
1681
1682
1683
1684 if (rcu_get_root(rsp) == rnp && rnp->gpnum == rnp->completed)
1685 return rnp->completed + 1;
1686
1687
1688
1689
1690
1691 return rnp->completed + 2;
1692}
1693
1694
1695
1696
1697
1698static void trace_rcu_future_gp(struct rcu_node *rnp, struct rcu_data *rdp,
1699 unsigned long c, const char *s)
1700{
1701 trace_rcu_future_grace_period(rdp->rsp->name, rnp->gpnum,
1702 rnp->completed, c, rnp->level,
1703 rnp->grplo, rnp->grphi, s);
1704}
1705
1706
1707
1708
1709
1710
1711
1712
1713
1714static bool __maybe_unused
1715rcu_start_future_gp(struct rcu_node *rnp, struct rcu_data *rdp,
1716 unsigned long *c_out)
1717{
1718 unsigned long c;
1719 bool ret = false;
1720 struct rcu_node *rnp_root = rcu_get_root(rdp->rsp);
1721
1722
1723
1724
1725
1726 c = rcu_cbs_completed(rdp->rsp, rnp);
1727 trace_rcu_future_gp(rnp, rdp, c, TPS("Startleaf"));
1728 if (rnp->need_future_gp[c & 0x1]) {
1729 trace_rcu_future_gp(rnp, rdp, c, TPS("Prestartleaf"));
1730 goto out;
1731 }
1732
1733
1734
1735
1736
1737
1738
1739
1740
1741
1742
1743
1744
1745
1746 if (rnp->gpnum != rnp->completed ||
1747 READ_ONCE(rnp_root->gpnum) != READ_ONCE(rnp_root->completed)) {
1748 rnp->need_future_gp[c & 0x1]++;
1749 trace_rcu_future_gp(rnp, rdp, c, TPS("Startedleaf"));
1750 goto out;
1751 }
1752
1753
1754
1755
1756
1757
1758 if (rnp != rnp_root)
1759 raw_spin_lock_rcu_node(rnp_root);
1760
1761
1762
1763
1764
1765
1766 c = rcu_cbs_completed(rdp->rsp, rnp_root);
1767 if (!rcu_is_nocb_cpu(rdp->cpu))
1768 (void)rcu_segcblist_accelerate(&rdp->cblist, c);
1769
1770
1771
1772
1773
1774 if (rnp_root->need_future_gp[c & 0x1]) {
1775 trace_rcu_future_gp(rnp, rdp, c, TPS("Prestartedroot"));
1776 goto unlock_out;
1777 }
1778
1779
1780 rnp_root->need_future_gp[c & 0x1]++;
1781
1782
1783 if (rnp_root->gpnum != rnp_root->completed) {
1784 trace_rcu_future_gp(rnp, rdp, c, TPS("Startedleafroot"));
1785 } else {
1786 trace_rcu_future_gp(rnp, rdp, c, TPS("Startedroot"));
1787 ret = rcu_start_gp_advanced(rdp->rsp, rnp_root, rdp);
1788 }
1789unlock_out:
1790 if (rnp != rnp_root)
1791 raw_spin_unlock_rcu_node(rnp_root);
1792out:
1793 if (c_out != NULL)
1794 *c_out = c;
1795 return ret;
1796}
1797
1798
1799
1800
1801
1802static int rcu_future_gp_cleanup(struct rcu_state *rsp, struct rcu_node *rnp)
1803{
1804 int c = rnp->completed;
1805 int needmore;
1806 struct rcu_data *rdp = this_cpu_ptr(rsp->rda);
1807
1808 rnp->need_future_gp[c & 0x1] = 0;
1809 needmore = rnp->need_future_gp[(c + 1) & 0x1];
1810 trace_rcu_future_gp(rnp, rdp, c,
1811 needmore ? TPS("CleanupMore") : TPS("Cleanup"));
1812 return needmore;
1813}
1814
1815
1816
1817
1818
1819
1820
1821
1822static void rcu_gp_kthread_wake(struct rcu_state *rsp)
1823{
1824 if (current == rsp->gp_kthread ||
1825 !READ_ONCE(rsp->gp_flags) ||
1826 !rsp->gp_kthread)
1827 return;
1828 swake_up(&rsp->gp_wq);
1829}
1830
1831
1832
1833
1834
1835
1836
1837
1838
1839
1840
1841
1842
1843static bool rcu_accelerate_cbs(struct rcu_state *rsp, struct rcu_node *rnp,
1844 struct rcu_data *rdp)
1845{
1846 bool ret = false;
1847
1848
1849 if (!rcu_segcblist_pend_cbs(&rdp->cblist))
1850 return false;
1851
1852
1853
1854
1855
1856
1857
1858
1859
1860
1861
1862 if (rcu_segcblist_accelerate(&rdp->cblist, rcu_cbs_completed(rsp, rnp)))
1863 ret = rcu_start_future_gp(rnp, rdp, NULL);
1864
1865
1866 if (rcu_segcblist_restempty(&rdp->cblist, RCU_WAIT_TAIL))
1867 trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("AccWaitCB"));
1868 else
1869 trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("AccReadyCB"));
1870 return ret;
1871}
1872
1873
1874
1875
1876
1877
1878
1879
1880
1881
1882
1883static bool rcu_advance_cbs(struct rcu_state *rsp, struct rcu_node *rnp,
1884 struct rcu_data *rdp)
1885{
1886
1887 if (!rcu_segcblist_pend_cbs(&rdp->cblist))
1888 return false;
1889
1890
1891
1892
1893
1894 rcu_segcblist_advance(&rdp->cblist, rnp->completed);
1895
1896
1897 return rcu_accelerate_cbs(rsp, rnp, rdp);
1898}
1899
1900
1901
1902
1903
1904
1905
1906static bool __note_gp_changes(struct rcu_state *rsp, struct rcu_node *rnp,
1907 struct rcu_data *rdp)
1908{
1909 bool ret;
1910 bool need_gp;
1911
1912
1913 if (rdp->completed == rnp->completed &&
1914 !unlikely(READ_ONCE(rdp->gpwrap))) {
1915
1916
1917 ret = rcu_accelerate_cbs(rsp, rnp, rdp);
1918
1919 } else {
1920
1921
1922 ret = rcu_advance_cbs(rsp, rnp, rdp);
1923
1924
1925 rdp->completed = rnp->completed;
1926 trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("cpuend"));
1927 }
1928
1929 if (rdp->gpnum != rnp->gpnum || unlikely(READ_ONCE(rdp->gpwrap))) {
1930
1931
1932
1933
1934
1935 rdp->gpnum = rnp->gpnum;
1936 trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("cpustart"));
1937 need_gp = !!(rnp->qsmask & rdp->grpmask);
1938 rdp->cpu_no_qs.b.norm = need_gp;
1939 rdp->rcu_qs_ctr_snap = __this_cpu_read(rcu_dynticks.rcu_qs_ctr);
1940 rdp->core_needs_qs = need_gp;
1941 zero_cpu_stall_ticks(rdp);
1942 WRITE_ONCE(rdp->gpwrap, false);
1943 }
1944 return ret;
1945}
1946
1947static void note_gp_changes(struct rcu_state *rsp, struct rcu_data *rdp)
1948{
1949 unsigned long flags;
1950 bool needwake;
1951 struct rcu_node *rnp;
1952
1953 local_irq_save(flags);
1954 rnp = rdp->mynode;
1955 if ((rdp->gpnum == READ_ONCE(rnp->gpnum) &&
1956 rdp->completed == READ_ONCE(rnp->completed) &&
1957 !unlikely(READ_ONCE(rdp->gpwrap))) ||
1958 !raw_spin_trylock_rcu_node(rnp)) {
1959 local_irq_restore(flags);
1960 return;
1961 }
1962 needwake = __note_gp_changes(rsp, rnp, rdp);
1963 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
1964 if (needwake)
1965 rcu_gp_kthread_wake(rsp);
1966}
1967
1968static void rcu_gp_slow(struct rcu_state *rsp, int delay)
1969{
1970 if (delay > 0 &&
1971 !(rsp->gpnum % (rcu_num_nodes * PER_RCU_NODE_PERIOD * delay)))
1972 schedule_timeout_uninterruptible(delay);
1973}
1974
1975
1976
1977
1978static bool rcu_gp_init(struct rcu_state *rsp)
1979{
1980 unsigned long oldmask;
1981 struct rcu_data *rdp;
1982 struct rcu_node *rnp = rcu_get_root(rsp);
1983
1984 WRITE_ONCE(rsp->gp_activity, jiffies);
1985 raw_spin_lock_irq_rcu_node(rnp);
1986 if (!READ_ONCE(rsp->gp_flags)) {
1987
1988 raw_spin_unlock_irq_rcu_node(rnp);
1989 return false;
1990 }
1991 WRITE_ONCE(rsp->gp_flags, 0);
1992
1993 if (WARN_ON_ONCE(rcu_gp_in_progress(rsp))) {
1994
1995
1996
1997
1998 raw_spin_unlock_irq_rcu_node(rnp);
1999 return false;
2000 }
2001
2002
2003 record_gp_stall_check_time(rsp);
2004
2005 smp_store_release(&rsp->gpnum, rsp->gpnum + 1);
2006 trace_rcu_grace_period(rsp->name, rsp->gpnum, TPS("start"));
2007 raw_spin_unlock_irq_rcu_node(rnp);
2008
2009
2010
2011
2012
2013
2014
2015 rcu_for_each_leaf_node(rsp, rnp) {
2016 rcu_gp_slow(rsp, gp_preinit_delay);
2017 raw_spin_lock_irq_rcu_node(rnp);
2018 if (rnp->qsmaskinit == rnp->qsmaskinitnext &&
2019 !rnp->wait_blkd_tasks) {
2020
2021 raw_spin_unlock_irq_rcu_node(rnp);
2022 continue;
2023 }
2024
2025
2026 oldmask = rnp->qsmaskinit;
2027 rnp->qsmaskinit = rnp->qsmaskinitnext;
2028
2029
2030 if (!oldmask != !rnp->qsmaskinit) {
2031 if (!oldmask)
2032 rcu_init_new_rnp(rnp);
2033 else if (rcu_preempt_has_tasks(rnp))
2034 rnp->wait_blkd_tasks = true;
2035 else
2036 rcu_cleanup_dead_rnp(rnp);
2037 }
2038
2039
2040
2041
2042
2043
2044
2045
2046
2047
2048 if (rnp->wait_blkd_tasks &&
2049 (!rcu_preempt_has_tasks(rnp) ||
2050 rnp->qsmaskinit)) {
2051 rnp->wait_blkd_tasks = false;
2052 rcu_cleanup_dead_rnp(rnp);
2053 }
2054
2055 raw_spin_unlock_irq_rcu_node(rnp);
2056 }
2057
2058
2059
2060
2061
2062
2063
2064
2065
2066
2067
2068
2069
2070 rcu_for_each_node_breadth_first(rsp, rnp) {
2071 rcu_gp_slow(rsp, gp_init_delay);
2072 raw_spin_lock_irq_rcu_node(rnp);
2073 rdp = this_cpu_ptr(rsp->rda);
2074 rcu_preempt_check_blocked_tasks(rnp);
2075 rnp->qsmask = rnp->qsmaskinit;
2076 WRITE_ONCE(rnp->gpnum, rsp->gpnum);
2077 if (WARN_ON_ONCE(rnp->completed != rsp->completed))
2078 WRITE_ONCE(rnp->completed, rsp->completed);
2079 if (rnp == rdp->mynode)
2080 (void)__note_gp_changes(rsp, rnp, rdp);
2081 rcu_preempt_boost_start_gp(rnp);
2082 trace_rcu_grace_period_init(rsp->name, rnp->gpnum,
2083 rnp->level, rnp->grplo,
2084 rnp->grphi, rnp->qsmask);
2085 raw_spin_unlock_irq_rcu_node(rnp);
2086 cond_resched_rcu_qs();
2087 WRITE_ONCE(rsp->gp_activity, jiffies);
2088 }
2089
2090 return true;
2091}
2092
2093
2094
2095
2096
2097static bool rcu_gp_fqs_check_wake(struct rcu_state *rsp, int *gfp)
2098{
2099 struct rcu_node *rnp = rcu_get_root(rsp);
2100
2101
2102 *gfp = READ_ONCE(rsp->gp_flags);
2103 if (*gfp & RCU_GP_FLAG_FQS)
2104 return true;
2105
2106
2107 if (!READ_ONCE(rnp->qsmask) && !rcu_preempt_blocked_readers_cgp(rnp))
2108 return true;
2109
2110 return false;
2111}
2112
2113
2114
2115
2116static void rcu_gp_fqs(struct rcu_state *rsp, bool first_time)
2117{
2118 bool isidle = false;
2119 unsigned long maxj;
2120 struct rcu_node *rnp = rcu_get_root(rsp);
2121
2122 WRITE_ONCE(rsp->gp_activity, jiffies);
2123 rsp->n_force_qs++;
2124 if (first_time) {
2125
2126 if (is_sysidle_rcu_state(rsp)) {
2127 isidle = true;
2128 maxj = jiffies - ULONG_MAX / 4;
2129 }
2130 force_qs_rnp(rsp, dyntick_save_progress_counter,
2131 &isidle, &maxj);
2132 rcu_sysidle_report_gp(rsp, isidle, maxj);
2133 } else {
2134
2135 isidle = true;
2136 force_qs_rnp(rsp, rcu_implicit_dynticks_qs, &isidle, &maxj);
2137 }
2138
2139 if (READ_ONCE(rsp->gp_flags) & RCU_GP_FLAG_FQS) {
2140 raw_spin_lock_irq_rcu_node(rnp);
2141 WRITE_ONCE(rsp->gp_flags,
2142 READ_ONCE(rsp->gp_flags) & ~RCU_GP_FLAG_FQS);
2143 raw_spin_unlock_irq_rcu_node(rnp);
2144 }
2145}
2146
2147
2148
2149
2150static void rcu_gp_cleanup(struct rcu_state *rsp)
2151{
2152 unsigned long gp_duration;
2153 bool needgp = false;
2154 int nocb = 0;
2155 struct rcu_data *rdp;
2156 struct rcu_node *rnp = rcu_get_root(rsp);
2157 struct swait_queue_head *sq;
2158
2159 WRITE_ONCE(rsp->gp_activity, jiffies);
2160 raw_spin_lock_irq_rcu_node(rnp);
2161 gp_duration = jiffies - rsp->gp_start;
2162 if (gp_duration > rsp->gp_max)
2163 rsp->gp_max = gp_duration;
2164
2165
2166
2167
2168
2169
2170
2171
2172
2173 raw_spin_unlock_irq_rcu_node(rnp);
2174
2175
2176
2177
2178
2179
2180
2181
2182
2183
2184 rcu_for_each_node_breadth_first(rsp, rnp) {
2185 raw_spin_lock_irq_rcu_node(rnp);
2186 WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp));
2187 WARN_ON_ONCE(rnp->qsmask);
2188 WRITE_ONCE(rnp->completed, rsp->gpnum);
2189 rdp = this_cpu_ptr(rsp->rda);
2190 if (rnp == rdp->mynode)
2191 needgp = __note_gp_changes(rsp, rnp, rdp) || needgp;
2192
2193 nocb += rcu_future_gp_cleanup(rsp, rnp);
2194 sq = rcu_nocb_gp_get(rnp);
2195 raw_spin_unlock_irq_rcu_node(rnp);
2196 rcu_nocb_gp_cleanup(sq);
2197 cond_resched_rcu_qs();
2198 WRITE_ONCE(rsp->gp_activity, jiffies);
2199 rcu_gp_slow(rsp, gp_cleanup_delay);
2200 }
2201 rnp = rcu_get_root(rsp);
2202 raw_spin_lock_irq_rcu_node(rnp);
2203 rcu_nocb_gp_set(rnp, nocb);
2204
2205
2206 WRITE_ONCE(rsp->completed, rsp->gpnum);
2207 trace_rcu_grace_period(rsp->name, rsp->completed, TPS("end"));
2208 rsp->gp_state = RCU_GP_IDLE;
2209 rdp = this_cpu_ptr(rsp->rda);
2210
2211 needgp = rcu_advance_cbs(rsp, rnp, rdp) || needgp;
2212 if (needgp || cpu_needs_another_gp(rsp, rdp)) {
2213 WRITE_ONCE(rsp->gp_flags, RCU_GP_FLAG_INIT);
2214 trace_rcu_grace_period(rsp->name,
2215 READ_ONCE(rsp->gpnum),
2216 TPS("newreq"));
2217 }
2218 raw_spin_unlock_irq_rcu_node(rnp);
2219}
2220
2221
2222
2223
2224static int __noreturn rcu_gp_kthread(void *arg)
2225{
2226 bool first_gp_fqs;
2227 int gf;
2228 unsigned long j;
2229 int ret;
2230 struct rcu_state *rsp = arg;
2231 struct rcu_node *rnp = rcu_get_root(rsp);
2232
2233 rcu_bind_gp_kthread();
2234 for (;;) {
2235
2236
2237 for (;;) {
2238 trace_rcu_grace_period(rsp->name,
2239 READ_ONCE(rsp->gpnum),
2240 TPS("reqwait"));
2241 rsp->gp_state = RCU_GP_WAIT_GPS;
2242 swait_event_interruptible(rsp->gp_wq,
2243 READ_ONCE(rsp->gp_flags) &
2244 RCU_GP_FLAG_INIT);
2245 rsp->gp_state = RCU_GP_DONE_GPS;
2246
2247 if (rcu_gp_init(rsp))
2248 break;
2249 cond_resched_rcu_qs();
2250 WRITE_ONCE(rsp->gp_activity, jiffies);
2251 WARN_ON(signal_pending(current));
2252 trace_rcu_grace_period(rsp->name,
2253 READ_ONCE(rsp->gpnum),
2254 TPS("reqwaitsig"));
2255 }
2256
2257
2258 first_gp_fqs = true;
2259 j = jiffies_till_first_fqs;
2260 if (j > HZ) {
2261 j = HZ;
2262 jiffies_till_first_fqs = HZ;
2263 }
2264 ret = 0;
2265 for (;;) {
2266 if (!ret) {
2267 rsp->jiffies_force_qs = jiffies + j;
2268 WRITE_ONCE(rsp->jiffies_kick_kthreads,
2269 jiffies + 3 * j);
2270 }
2271 trace_rcu_grace_period(rsp->name,
2272 READ_ONCE(rsp->gpnum),
2273 TPS("fqswait"));
2274 rsp->gp_state = RCU_GP_WAIT_FQS;
2275 ret = swait_event_interruptible_timeout(rsp->gp_wq,
2276 rcu_gp_fqs_check_wake(rsp, &gf), j);
2277 rsp->gp_state = RCU_GP_DOING_FQS;
2278
2279
2280 if (!READ_ONCE(rnp->qsmask) &&
2281 !rcu_preempt_blocked_readers_cgp(rnp))
2282 break;
2283
2284 if (ULONG_CMP_GE(jiffies, rsp->jiffies_force_qs) ||
2285 (gf & RCU_GP_FLAG_FQS)) {
2286 trace_rcu_grace_period(rsp->name,
2287 READ_ONCE(rsp->gpnum),
2288 TPS("fqsstart"));
2289 rcu_gp_fqs(rsp, first_gp_fqs);
2290 first_gp_fqs = false;
2291 trace_rcu_grace_period(rsp->name,
2292 READ_ONCE(rsp->gpnum),
2293 TPS("fqsend"));
2294 cond_resched_rcu_qs();
2295 WRITE_ONCE(rsp->gp_activity, jiffies);
2296 ret = 0;
2297 j = jiffies_till_next_fqs;
2298 if (j > HZ) {
2299 j = HZ;
2300 jiffies_till_next_fqs = HZ;
2301 } else if (j < 1) {
2302 j = 1;
2303 jiffies_till_next_fqs = 1;
2304 }
2305 } else {
2306
2307 cond_resched_rcu_qs();
2308 WRITE_ONCE(rsp->gp_activity, jiffies);
2309 WARN_ON(signal_pending(current));
2310 trace_rcu_grace_period(rsp->name,
2311 READ_ONCE(rsp->gpnum),
2312 TPS("fqswaitsig"));
2313 ret = 1;
2314 j = jiffies;
2315 if (time_after(jiffies, rsp->jiffies_force_qs))
2316 j = 1;
2317 else
2318 j = rsp->jiffies_force_qs - j;
2319 }
2320 }
2321
2322
2323 rsp->gp_state = RCU_GP_CLEANUP;
2324 rcu_gp_cleanup(rsp);
2325 rsp->gp_state = RCU_GP_CLEANED;
2326 }
2327}
2328
2329
2330
2331
2332
2333
2334
2335
2336
2337
2338
2339
2340static bool
2341rcu_start_gp_advanced(struct rcu_state *rsp, struct rcu_node *rnp,
2342 struct rcu_data *rdp)
2343{
2344 if (!rsp->gp_kthread || !cpu_needs_another_gp(rsp, rdp)) {
2345
2346
2347
2348
2349
2350
2351 return false;
2352 }
2353 WRITE_ONCE(rsp->gp_flags, RCU_GP_FLAG_INIT);
2354 trace_rcu_grace_period(rsp->name, READ_ONCE(rsp->gpnum),
2355 TPS("newreq"));
2356
2357
2358
2359
2360
2361
2362 return true;
2363}
2364
2365
2366
2367
2368
2369
2370
2371
2372
2373
2374static bool rcu_start_gp(struct rcu_state *rsp)
2375{
2376 struct rcu_data *rdp = this_cpu_ptr(rsp->rda);
2377 struct rcu_node *rnp = rcu_get_root(rsp);
2378 bool ret = false;
2379
2380
2381
2382
2383
2384
2385
2386
2387
2388 ret = rcu_advance_cbs(rsp, rnp, rdp) || ret;
2389 ret = rcu_start_gp_advanced(rsp, rnp, rdp) || ret;
2390 return ret;
2391}
2392
2393
2394
2395
2396
2397
2398
2399
2400
2401
2402static void rcu_report_qs_rsp(struct rcu_state *rsp, unsigned long flags)
2403 __releases(rcu_get_root(rsp)->lock)
2404{
2405 WARN_ON_ONCE(!rcu_gp_in_progress(rsp));
2406 WRITE_ONCE(rsp->gp_flags, READ_ONCE(rsp->gp_flags) | RCU_GP_FLAG_FQS);
2407 raw_spin_unlock_irqrestore_rcu_node(rcu_get_root(rsp), flags);
2408 rcu_gp_kthread_wake(rsp);
2409}
2410
2411
2412
2413
2414
2415
2416
2417
2418
2419
2420
2421static void
2422rcu_report_qs_rnp(unsigned long mask, struct rcu_state *rsp,
2423 struct rcu_node *rnp, unsigned long gps, unsigned long flags)
2424 __releases(rnp->lock)
2425{
2426 unsigned long oldmask = 0;
2427 struct rcu_node *rnp_c;
2428
2429
2430 for (;;) {
2431 if (!(rnp->qsmask & mask) || rnp->gpnum != gps) {
2432
2433
2434
2435
2436
2437 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
2438 return;
2439 }
2440 WARN_ON_ONCE(oldmask);
2441 rnp->qsmask &= ~mask;
2442 trace_rcu_quiescent_state_report(rsp->name, rnp->gpnum,
2443 mask, rnp->qsmask, rnp->level,
2444 rnp->grplo, rnp->grphi,
2445 !!rnp->gp_tasks);
2446 if (rnp->qsmask != 0 || rcu_preempt_blocked_readers_cgp(rnp)) {
2447
2448
2449 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
2450 return;
2451 }
2452 mask = rnp->grpmask;
2453 if (rnp->parent == NULL) {
2454
2455
2456
2457 break;
2458 }
2459 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
2460 rnp_c = rnp;
2461 rnp = rnp->parent;
2462 raw_spin_lock_irqsave_rcu_node(rnp, flags);
2463 oldmask = rnp_c->qsmask;
2464 }
2465
2466
2467
2468
2469
2470
2471 rcu_report_qs_rsp(rsp, flags);
2472}
2473
2474
2475
2476
2477
2478
2479
2480
2481static void rcu_report_unblock_qs_rnp(struct rcu_state *rsp,
2482 struct rcu_node *rnp, unsigned long flags)
2483 __releases(rnp->lock)
2484{
2485 unsigned long gps;
2486 unsigned long mask;
2487 struct rcu_node *rnp_p;
2488
2489 if (rcu_state_p == &rcu_sched_state || rsp != rcu_state_p ||
2490 rnp->qsmask != 0 || rcu_preempt_blocked_readers_cgp(rnp)) {
2491 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
2492 return;
2493 }
2494
2495 rnp_p = rnp->parent;
2496 if (rnp_p == NULL) {
2497
2498
2499
2500
2501 rcu_report_qs_rsp(rsp, flags);
2502 return;
2503 }
2504
2505
2506 gps = rnp->gpnum;
2507 mask = rnp->grpmask;
2508 raw_spin_unlock_rcu_node(rnp);
2509 raw_spin_lock_rcu_node(rnp_p);
2510 rcu_report_qs_rnp(mask, rsp, rnp_p, gps, flags);
2511}
2512
2513
2514
2515
2516
2517static void
2518rcu_report_qs_rdp(int cpu, struct rcu_state *rsp, struct rcu_data *rdp)
2519{
2520 unsigned long flags;
2521 unsigned long mask;
2522 bool needwake;
2523 struct rcu_node *rnp;
2524
2525 rnp = rdp->mynode;
2526 raw_spin_lock_irqsave_rcu_node(rnp, flags);
2527 if (rdp->cpu_no_qs.b.norm || rdp->gpnum != rnp->gpnum ||
2528 rnp->completed == rnp->gpnum || rdp->gpwrap) {
2529
2530
2531
2532
2533
2534
2535
2536 rdp->cpu_no_qs.b.norm = true;
2537 rdp->rcu_qs_ctr_snap = __this_cpu_read(rcu_dynticks.rcu_qs_ctr);
2538 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
2539 return;
2540 }
2541 mask = rdp->grpmask;
2542 if ((rnp->qsmask & mask) == 0) {
2543 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
2544 } else {
2545 rdp->core_needs_qs = false;
2546
2547
2548
2549
2550
2551 needwake = rcu_accelerate_cbs(rsp, rnp, rdp);
2552
2553 rcu_report_qs_rnp(mask, rsp, rnp, rnp->gpnum, flags);
2554
2555 if (needwake)
2556 rcu_gp_kthread_wake(rsp);
2557 }
2558}
2559
2560
2561
2562
2563
2564
2565
2566static void
2567rcu_check_quiescent_state(struct rcu_state *rsp, struct rcu_data *rdp)
2568{
2569
2570 note_gp_changes(rsp, rdp);
2571
2572
2573
2574
2575
2576 if (!rdp->core_needs_qs)
2577 return;
2578
2579
2580
2581
2582
2583 if (rdp->cpu_no_qs.b.norm)
2584 return;
2585
2586
2587
2588
2589
2590 rcu_report_qs_rdp(rdp->cpu, rsp, rdp);
2591}
2592
2593
2594
2595
2596
2597
2598static void
2599rcu_send_cbs_to_orphanage(int cpu, struct rcu_state *rsp,
2600 struct rcu_node *rnp, struct rcu_data *rdp)
2601{
2602
2603 if (!IS_ENABLED(CONFIG_HOTPLUG_CPU) || rcu_is_nocb_cpu(rdp->cpu))
2604 return;
2605
2606
2607
2608
2609
2610
2611 rdp->n_cbs_orphaned += rcu_segcblist_n_cbs(&rdp->cblist);
2612 rcu_segcblist_extract_count(&rdp->cblist, &rsp->orphan_done);
2613
2614
2615
2616
2617
2618
2619
2620
2621 rcu_segcblist_extract_pend_cbs(&rdp->cblist, &rsp->orphan_pend);
2622
2623
2624
2625
2626
2627
2628 rcu_segcblist_extract_done_cbs(&rdp->cblist, &rsp->orphan_done);
2629
2630
2631 rcu_segcblist_disable(&rdp->cblist);
2632}
2633
2634
2635
2636
2637
2638static void rcu_adopt_orphan_cbs(struct rcu_state *rsp, unsigned long flags)
2639{
2640 struct rcu_data *rdp = raw_cpu_ptr(rsp->rda);
2641
2642
2643 if (!IS_ENABLED(CONFIG_HOTPLUG_CPU) ||
2644 rcu_nocb_adopt_orphan_cbs(rsp, rdp, flags))
2645 return;
2646
2647
2648 rdp->n_cbs_adopted += rsp->orphan_done.len;
2649 if (rsp->orphan_done.len_lazy != rsp->orphan_done.len)
2650 rcu_idle_count_callbacks_posted();
2651 rcu_segcblist_insert_count(&rdp->cblist, &rsp->orphan_done);
2652
2653
2654
2655
2656
2657
2658
2659
2660 rcu_segcblist_insert_done_cbs(&rdp->cblist, &rsp->orphan_done);
2661 WARN_ON_ONCE(rsp->orphan_done.head);
2662 rcu_segcblist_insert_pend_cbs(&rdp->cblist, &rsp->orphan_pend);
2663 WARN_ON_ONCE(rsp->orphan_pend.head);
2664 WARN_ON_ONCE(rcu_segcblist_empty(&rdp->cblist) !=
2665 !rcu_segcblist_n_cbs(&rdp->cblist));
2666}
2667
2668
2669
2670
2671static void rcu_cleanup_dying_cpu(struct rcu_state *rsp)
2672{
2673 RCU_TRACE(unsigned long mask;)
2674 RCU_TRACE(struct rcu_data *rdp = this_cpu_ptr(rsp->rda);)
2675 RCU_TRACE(struct rcu_node *rnp = rdp->mynode;)
2676
2677 if (!IS_ENABLED(CONFIG_HOTPLUG_CPU))
2678 return;
2679
2680 RCU_TRACE(mask = rdp->grpmask;)
2681 trace_rcu_grace_period(rsp->name,
2682 rnp->gpnum + 1 - !!(rnp->qsmask & mask),
2683 TPS("cpuofl"));
2684}
2685
2686
2687
2688
2689
2690
2691
2692
2693
2694
2695
2696
2697
2698
2699
2700
2701
2702
2703static void rcu_cleanup_dead_rnp(struct rcu_node *rnp_leaf)
2704{
2705 long mask;
2706 struct rcu_node *rnp = rnp_leaf;
2707
2708 if (!IS_ENABLED(CONFIG_HOTPLUG_CPU) ||
2709 rnp->qsmaskinit || rcu_preempt_has_tasks(rnp))
2710 return;
2711 for (;;) {
2712 mask = rnp->grpmask;
2713 rnp = rnp->parent;
2714 if (!rnp)
2715 break;
2716 raw_spin_lock_rcu_node(rnp);
2717 rnp->qsmaskinit &= ~mask;
2718 rnp->qsmask &= ~mask;
2719 if (rnp->qsmaskinit) {
2720 raw_spin_unlock_rcu_node(rnp);
2721
2722 return;
2723 }
2724 raw_spin_unlock_rcu_node(rnp);
2725 }
2726}
2727
2728
2729
2730
2731
2732
2733
2734
2735static void rcu_cleanup_dead_cpu(int cpu, struct rcu_state *rsp)
2736{
2737 unsigned long flags;
2738 struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
2739 struct rcu_node *rnp = rdp->mynode;
2740
2741 if (!IS_ENABLED(CONFIG_HOTPLUG_CPU))
2742 return;
2743
2744
2745 rcu_boost_kthread_setaffinity(rnp, -1);
2746
2747
2748 raw_spin_lock_irqsave(&rsp->orphan_lock, flags);
2749 rcu_send_cbs_to_orphanage(cpu, rsp, rnp, rdp);
2750 rcu_adopt_orphan_cbs(rsp, flags);
2751 raw_spin_unlock_irqrestore(&rsp->orphan_lock, flags);
2752
2753 WARN_ONCE(rcu_segcblist_n_cbs(&rdp->cblist) != 0 ||
2754 !rcu_segcblist_empty(&rdp->cblist),
2755 "rcu_cleanup_dead_cpu: Callbacks on offline CPU %d: qlen=%lu, 1stCB=%p\n",
2756 cpu, rcu_segcblist_n_cbs(&rdp->cblist),
2757 rcu_segcblist_first_cb(&rdp->cblist));
2758}
2759
2760
2761
2762
2763
2764static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp)
2765{
2766 unsigned long flags;
2767 struct rcu_head *rhp;
2768 struct rcu_cblist rcl = RCU_CBLIST_INITIALIZER(rcl);
2769 long bl, count;
2770
2771
2772 if (!rcu_segcblist_ready_cbs(&rdp->cblist)) {
2773 trace_rcu_batch_start(rsp->name,
2774 rcu_segcblist_n_lazy_cbs(&rdp->cblist),
2775 rcu_segcblist_n_cbs(&rdp->cblist), 0);
2776 trace_rcu_batch_end(rsp->name, 0,
2777 !rcu_segcblist_empty(&rdp->cblist),
2778 need_resched(), is_idle_task(current),
2779 rcu_is_callbacks_kthread());
2780 return;
2781 }
2782
2783
2784
2785
2786
2787
2788 local_irq_save(flags);
2789 WARN_ON_ONCE(cpu_is_offline(smp_processor_id()));
2790 bl = rdp->blimit;
2791 trace_rcu_batch_start(rsp->name, rcu_segcblist_n_lazy_cbs(&rdp->cblist),
2792 rcu_segcblist_n_cbs(&rdp->cblist), bl);
2793 rcu_segcblist_extract_done_cbs(&rdp->cblist, &rcl);
2794 local_irq_restore(flags);
2795
2796
2797 rhp = rcu_cblist_dequeue(&rcl);
2798 for (; rhp; rhp = rcu_cblist_dequeue(&rcl)) {
2799 debug_rcu_head_unqueue(rhp);
2800 if (__rcu_reclaim(rsp->name, rhp))
2801 rcu_cblist_dequeued_lazy(&rcl);
2802
2803
2804
2805
2806 if (-rcl.len >= bl &&
2807 (need_resched() ||
2808 (!is_idle_task(current) && !rcu_is_callbacks_kthread())))
2809 break;
2810 }
2811
2812 local_irq_save(flags);
2813 count = -rcl.len;
2814 trace_rcu_batch_end(rsp->name, count, !!rcl.head, need_resched(),
2815 is_idle_task(current), rcu_is_callbacks_kthread());
2816
2817
2818 rcu_segcblist_insert_done_cbs(&rdp->cblist, &rcl);
2819 smp_mb();
2820 rdp->n_cbs_invoked += count;
2821 rcu_segcblist_insert_count(&rdp->cblist, &rcl);
2822
2823
2824 count = rcu_segcblist_n_cbs(&rdp->cblist);
2825 if (rdp->blimit == LONG_MAX && count <= qlowmark)
2826 rdp->blimit = blimit;
2827
2828
2829 if (count == 0 && rdp->qlen_last_fqs_check != 0) {
2830 rdp->qlen_last_fqs_check = 0;
2831 rdp->n_force_qs_snap = rsp->n_force_qs;
2832 } else if (count < rdp->qlen_last_fqs_check - qhimark)
2833 rdp->qlen_last_fqs_check = count;
2834 WARN_ON_ONCE(rcu_segcblist_empty(&rdp->cblist) != (count == 0));
2835
2836 local_irq_restore(flags);
2837
2838
2839 if (rcu_segcblist_ready_cbs(&rdp->cblist))
2840 invoke_rcu_core();
2841}
2842
2843
2844
2845
2846
2847
2848
2849
2850
2851void rcu_check_callbacks(int user)
2852{
2853 trace_rcu_utilization(TPS("Start scheduler-tick"));
2854 increment_cpu_stall_ticks();
2855 if (user || rcu_is_cpu_rrupt_from_idle()) {
2856
2857
2858
2859
2860
2861
2862
2863
2864
2865
2866
2867
2868
2869 rcu_sched_qs();
2870 rcu_bh_qs();
2871
2872 } else if (!in_softirq()) {
2873
2874
2875
2876
2877
2878
2879
2880
2881 rcu_bh_qs();
2882 }
2883 rcu_preempt_check_callbacks();
2884 if (rcu_pending())
2885 invoke_rcu_core();
2886 if (user)
2887 rcu_note_voluntary_context_switch(current);
2888 trace_rcu_utilization(TPS("End scheduler-tick"));
2889}
2890
2891
2892
2893
2894
2895
2896
2897
2898static void force_qs_rnp(struct rcu_state *rsp,
2899 int (*f)(struct rcu_data *rsp, bool *isidle,
2900 unsigned long *maxj),
2901 bool *isidle, unsigned long *maxj)
2902{
2903 int cpu;
2904 unsigned long flags;
2905 unsigned long mask;
2906 struct rcu_node *rnp;
2907
2908 rcu_for_each_leaf_node(rsp, rnp) {
2909 cond_resched_rcu_qs();
2910 mask = 0;
2911 raw_spin_lock_irqsave_rcu_node(rnp, flags);
2912 if (rnp->qsmask == 0) {
2913 if (rcu_state_p == &rcu_sched_state ||
2914 rsp != rcu_state_p ||
2915 rcu_preempt_blocked_readers_cgp(rnp)) {
2916
2917
2918
2919
2920
2921 rcu_initiate_boost(rnp, flags);
2922
2923 continue;
2924 }
2925 if (rnp->parent &&
2926 (rnp->parent->qsmask & rnp->grpmask)) {
2927
2928
2929
2930
2931
2932 rcu_report_unblock_qs_rnp(rsp, rnp, flags);
2933
2934 continue;
2935 }
2936 }
2937 for_each_leaf_node_possible_cpu(rnp, cpu) {
2938 unsigned long bit = leaf_node_cpu_bit(rnp, cpu);
2939 if ((rnp->qsmask & bit) != 0) {
2940 if (f(per_cpu_ptr(rsp->rda, cpu), isidle, maxj))
2941 mask |= bit;
2942 }
2943 }
2944 if (mask != 0) {
2945
2946 rcu_report_qs_rnp(mask, rsp, rnp, rnp->gpnum, flags);
2947 } else {
2948
2949 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
2950 }
2951 }
2952}
2953
2954
2955
2956
2957
2958static void force_quiescent_state(struct rcu_state *rsp)
2959{
2960 unsigned long flags;
2961 bool ret;
2962 struct rcu_node *rnp;
2963 struct rcu_node *rnp_old = NULL;
2964
2965
2966 rnp = __this_cpu_read(rsp->rda->mynode);
2967 for (; rnp != NULL; rnp = rnp->parent) {
2968 ret = (READ_ONCE(rsp->gp_flags) & RCU_GP_FLAG_FQS) ||
2969 !raw_spin_trylock(&rnp->fqslock);
2970 if (rnp_old != NULL)
2971 raw_spin_unlock(&rnp_old->fqslock);
2972 if (ret) {
2973 rsp->n_force_qs_lh++;
2974 return;
2975 }
2976 rnp_old = rnp;
2977 }
2978
2979
2980
2981 raw_spin_lock_irqsave_rcu_node(rnp_old, flags);
2982 raw_spin_unlock(&rnp_old->fqslock);
2983 if (READ_ONCE(rsp->gp_flags) & RCU_GP_FLAG_FQS) {
2984 rsp->n_force_qs_lh++;
2985 raw_spin_unlock_irqrestore_rcu_node(rnp_old, flags);
2986 return;
2987 }
2988 WRITE_ONCE(rsp->gp_flags, READ_ONCE(rsp->gp_flags) | RCU_GP_FLAG_FQS);
2989 raw_spin_unlock_irqrestore_rcu_node(rnp_old, flags);
2990 rcu_gp_kthread_wake(rsp);
2991}
2992
2993
2994
2995
2996
2997
2998static void
2999__rcu_process_callbacks(struct rcu_state *rsp)
3000{
3001 unsigned long flags;
3002 bool needwake;
3003 struct rcu_data *rdp = raw_cpu_ptr(rsp->rda);
3004
3005 WARN_ON_ONCE(!rdp->beenonline);
3006
3007
3008 rcu_check_quiescent_state(rsp, rdp);
3009
3010
3011 local_irq_save(flags);
3012 if (cpu_needs_another_gp(rsp, rdp)) {
3013 raw_spin_lock_rcu_node(rcu_get_root(rsp));
3014 needwake = rcu_start_gp(rsp);
3015 raw_spin_unlock_irqrestore_rcu_node(rcu_get_root(rsp), flags);
3016 if (needwake)
3017 rcu_gp_kthread_wake(rsp);
3018 } else {
3019 local_irq_restore(flags);
3020 }
3021
3022
3023 if (rcu_segcblist_ready_cbs(&rdp->cblist))
3024 invoke_rcu_callbacks(rsp, rdp);
3025
3026
3027 do_nocb_deferred_wakeup(rdp);
3028}
3029
3030
3031
3032
3033static __latent_entropy void rcu_process_callbacks(struct softirq_action *unused)
3034{
3035 struct rcu_state *rsp;
3036
3037 if (cpu_is_offline(smp_processor_id()))
3038 return;
3039 trace_rcu_utilization(TPS("Start RCU core"));
3040 for_each_rcu_flavor(rsp)
3041 __rcu_process_callbacks(rsp);
3042 trace_rcu_utilization(TPS("End RCU core"));
3043}
3044
3045
3046
3047
3048
3049
3050
3051
3052static void invoke_rcu_callbacks(struct rcu_state *rsp, struct rcu_data *rdp)
3053{
3054 if (unlikely(!READ_ONCE(rcu_scheduler_fully_active)))
3055 return;
3056 if (likely(!rsp->boost)) {
3057 rcu_do_batch(rsp, rdp);
3058 return;
3059 }
3060 invoke_rcu_callbacks_kthread();
3061}
3062
3063static void invoke_rcu_core(void)
3064{
3065 if (cpu_online(smp_processor_id()))
3066 raise_softirq(RCU_SOFTIRQ);
3067}
3068
3069
3070
3071
3072static void __call_rcu_core(struct rcu_state *rsp, struct rcu_data *rdp,
3073 struct rcu_head *head, unsigned long flags)
3074{
3075 bool needwake;
3076
3077
3078
3079
3080
3081 if (!rcu_is_watching())
3082 invoke_rcu_core();
3083
3084
3085 if (irqs_disabled_flags(flags) || cpu_is_offline(smp_processor_id()))
3086 return;
3087
3088
3089
3090
3091
3092
3093
3094
3095 if (unlikely(rcu_segcblist_n_cbs(&rdp->cblist) >
3096 rdp->qlen_last_fqs_check + qhimark)) {
3097
3098
3099 note_gp_changes(rsp, rdp);
3100
3101
3102 if (!rcu_gp_in_progress(rsp)) {
3103 struct rcu_node *rnp_root = rcu_get_root(rsp);
3104
3105 raw_spin_lock_rcu_node(rnp_root);
3106 needwake = rcu_start_gp(rsp);
3107 raw_spin_unlock_rcu_node(rnp_root);
3108 if (needwake)
3109 rcu_gp_kthread_wake(rsp);
3110 } else {
3111
3112 rdp->blimit = LONG_MAX;
3113 if (rsp->n_force_qs == rdp->n_force_qs_snap &&
3114 rcu_segcblist_first_pend_cb(&rdp->cblist) != head)
3115 force_quiescent_state(rsp);
3116 rdp->n_force_qs_snap = rsp->n_force_qs;
3117 rdp->qlen_last_fqs_check = rcu_segcblist_n_cbs(&rdp->cblist);
3118 }
3119 }
3120}
3121
3122
3123
3124
3125static void rcu_leak_callback(struct rcu_head *rhp)
3126{
3127}
3128
3129
3130
3131
3132
3133
3134
3135static void
3136__call_rcu(struct rcu_head *head, rcu_callback_t func,
3137 struct rcu_state *rsp, int cpu, bool lazy)
3138{
3139 unsigned long flags;
3140 struct rcu_data *rdp;
3141
3142
3143 WARN_ON_ONCE((unsigned long)head & (sizeof(void *) - 1));
3144
3145 if (debug_rcu_head_queue(head)) {
3146
3147 WRITE_ONCE(head->func, rcu_leak_callback);
3148 WARN_ONCE(1, "__call_rcu(): Leaked duplicate callback\n");
3149 return;
3150 }
3151 head->func = func;
3152 head->next = NULL;
3153 local_irq_save(flags);
3154 rdp = this_cpu_ptr(rsp->rda);
3155
3156
3157 if (unlikely(!rcu_segcblist_is_enabled(&rdp->cblist)) || cpu != -1) {
3158 int offline;
3159
3160 if (cpu != -1)
3161 rdp = per_cpu_ptr(rsp->rda, cpu);
3162 if (likely(rdp->mynode)) {
3163
3164 offline = !__call_rcu_nocb(rdp, head, lazy, flags);
3165 WARN_ON_ONCE(offline);
3166
3167 local_irq_restore(flags);
3168 return;
3169 }
3170
3171
3172
3173
3174 BUG_ON(cpu != -1);
3175 WARN_ON_ONCE(!rcu_is_watching());
3176 if (rcu_segcblist_empty(&rdp->cblist))
3177 rcu_segcblist_init(&rdp->cblist);
3178 }
3179 rcu_segcblist_enqueue(&rdp->cblist, head, lazy);
3180 if (!lazy)
3181 rcu_idle_count_callbacks_posted();
3182
3183 if (__is_kfree_rcu_offset((unsigned long)func))
3184 trace_rcu_kfree_callback(rsp->name, head, (unsigned long)func,
3185 rcu_segcblist_n_lazy_cbs(&rdp->cblist),
3186 rcu_segcblist_n_cbs(&rdp->cblist));
3187 else
3188 trace_rcu_callback(rsp->name, head,
3189 rcu_segcblist_n_lazy_cbs(&rdp->cblist),
3190 rcu_segcblist_n_cbs(&rdp->cblist));
3191
3192
3193 __call_rcu_core(rsp, rdp, head, flags);
3194 local_irq_restore(flags);
3195}
3196
3197
3198
3199
3200void call_rcu_sched(struct rcu_head *head, rcu_callback_t func)
3201{
3202 __call_rcu(head, func, &rcu_sched_state, -1, 0);
3203}
3204EXPORT_SYMBOL_GPL(call_rcu_sched);
3205
3206
3207
3208
3209void call_rcu_bh(struct rcu_head *head, rcu_callback_t func)
3210{
3211 __call_rcu(head, func, &rcu_bh_state, -1, 0);
3212}
3213EXPORT_SYMBOL_GPL(call_rcu_bh);
3214
3215
3216
3217
3218
3219
3220
3221
3222void kfree_call_rcu(struct rcu_head *head,
3223 rcu_callback_t func)
3224{
3225 __call_rcu(head, func, rcu_state_p, -1, 1);
3226}
3227EXPORT_SYMBOL_GPL(kfree_call_rcu);
3228
3229
3230
3231
3232
3233
3234
3235
3236
3237
3238static inline int rcu_blocking_is_gp(void)
3239{
3240 int ret;
3241
3242 might_sleep();
3243 preempt_disable();
3244 ret = num_online_cpus() <= 1;
3245 preempt_enable();
3246 return ret;
3247}
3248
3249
3250
3251
3252
3253
3254
3255
3256
3257
3258
3259
3260
3261
3262
3263
3264
3265
3266
3267
3268
3269
3270
3271
3272
3273
3274
3275
3276
3277
3278
3279
3280
3281
3282
3283
3284
3285
3286
3287
3288
3289
3290void synchronize_sched(void)
3291{
3292 RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map) ||
3293 lock_is_held(&rcu_lock_map) ||
3294 lock_is_held(&rcu_sched_lock_map),
3295 "Illegal synchronize_sched() in RCU-sched read-side critical section");
3296 if (rcu_blocking_is_gp())
3297 return;
3298 if (rcu_gp_is_expedited())
3299 synchronize_sched_expedited();
3300 else
3301 wait_rcu_gp(call_rcu_sched);
3302}
3303EXPORT_SYMBOL_GPL(synchronize_sched);
3304
3305
3306
3307
3308
3309
3310
3311
3312
3313
3314
3315
3316
3317void synchronize_rcu_bh(void)
3318{
3319 RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map) ||
3320 lock_is_held(&rcu_lock_map) ||
3321 lock_is_held(&rcu_sched_lock_map),
3322 "Illegal synchronize_rcu_bh() in RCU-bh read-side critical section");
3323 if (rcu_blocking_is_gp())
3324 return;
3325 if (rcu_gp_is_expedited())
3326 synchronize_rcu_bh_expedited();
3327 else
3328 wait_rcu_gp(call_rcu_bh);
3329}
3330EXPORT_SYMBOL_GPL(synchronize_rcu_bh);
3331
3332
3333
3334
3335
3336
3337
3338
3339unsigned long get_state_synchronize_rcu(void)
3340{
3341
3342
3343
3344
3345 smp_mb();
3346
3347
3348
3349
3350
3351
3352 return smp_load_acquire(&rcu_state_p->gpnum);
3353}
3354EXPORT_SYMBOL_GPL(get_state_synchronize_rcu);
3355
3356
3357
3358
3359
3360
3361
3362
3363
3364
3365
3366
3367
3368
3369
3370void cond_synchronize_rcu(unsigned long oldstate)
3371{
3372 unsigned long newstate;
3373
3374
3375
3376
3377
3378 newstate = smp_load_acquire(&rcu_state_p->completed);
3379 if (ULONG_CMP_GE(oldstate, newstate))
3380 synchronize_rcu();
3381}
3382EXPORT_SYMBOL_GPL(cond_synchronize_rcu);
3383
3384
3385
3386
3387
3388
3389
3390
3391unsigned long get_state_synchronize_sched(void)
3392{
3393
3394
3395
3396
3397 smp_mb();
3398
3399
3400
3401
3402
3403
3404 return smp_load_acquire(&rcu_sched_state.gpnum);
3405}
3406EXPORT_SYMBOL_GPL(get_state_synchronize_sched);
3407
3408
3409
3410
3411
3412
3413
3414
3415
3416
3417
3418
3419
3420
3421
3422void cond_synchronize_sched(unsigned long oldstate)
3423{
3424 unsigned long newstate;
3425
3426
3427
3428
3429
3430 newstate = smp_load_acquire(&rcu_sched_state.completed);
3431 if (ULONG_CMP_GE(oldstate, newstate))
3432 synchronize_sched();
3433}
3434EXPORT_SYMBOL_GPL(cond_synchronize_sched);
3435
3436
3437
3438
3439
3440
3441
3442
3443static int __rcu_pending(struct rcu_state *rsp, struct rcu_data *rdp)
3444{
3445 struct rcu_node *rnp = rdp->mynode;
3446
3447 rdp->n_rcu_pending++;
3448
3449
3450 check_cpu_stall(rsp, rdp);
3451
3452
3453 if (rcu_nohz_full_cpu(rsp))
3454 return 0;
3455
3456
3457 if (rcu_scheduler_fully_active &&
3458 rdp->core_needs_qs && rdp->cpu_no_qs.b.norm &&
3459 rdp->rcu_qs_ctr_snap == __this_cpu_read(rcu_dynticks.rcu_qs_ctr)) {
3460 rdp->n_rp_core_needs_qs++;
3461 } else if (rdp->core_needs_qs && !rdp->cpu_no_qs.b.norm) {
3462 rdp->n_rp_report_qs++;
3463 return 1;
3464 }
3465
3466
3467 if (rcu_segcblist_ready_cbs(&rdp->cblist)) {
3468 rdp->n_rp_cb_ready++;
3469 return 1;
3470 }
3471
3472
3473 if (cpu_needs_another_gp(rsp, rdp)) {
3474 rdp->n_rp_cpu_needs_gp++;
3475 return 1;
3476 }
3477
3478
3479 if (READ_ONCE(rnp->completed) != rdp->completed) {
3480 rdp->n_rp_gp_completed++;
3481 return 1;
3482 }
3483
3484
3485 if (READ_ONCE(rnp->gpnum) != rdp->gpnum ||
3486 unlikely(READ_ONCE(rdp->gpwrap))) {
3487 rdp->n_rp_gp_started++;
3488 return 1;
3489 }
3490
3491
3492 if (rcu_nocb_need_deferred_wakeup(rdp)) {
3493 rdp->n_rp_nocb_defer_wakeup++;
3494 return 1;
3495 }
3496
3497
3498 rdp->n_rp_need_nothing++;
3499 return 0;
3500}
3501
3502
3503
3504
3505
3506
3507static int rcu_pending(void)
3508{
3509 struct rcu_state *rsp;
3510
3511 for_each_rcu_flavor(rsp)
3512 if (__rcu_pending(rsp, this_cpu_ptr(rsp->rda)))
3513 return 1;
3514 return 0;
3515}
3516
3517
3518
3519
3520
3521
3522static bool __maybe_unused rcu_cpu_has_callbacks(bool *all_lazy)
3523{
3524 bool al = true;
3525 bool hc = false;
3526 struct rcu_data *rdp;
3527 struct rcu_state *rsp;
3528
3529 for_each_rcu_flavor(rsp) {
3530 rdp = this_cpu_ptr(rsp->rda);
3531 if (rcu_segcblist_empty(&rdp->cblist))
3532 continue;
3533 hc = true;
3534 if (rcu_segcblist_n_nonlazy_cbs(&rdp->cblist) || !all_lazy) {
3535 al = false;
3536 break;
3537 }
3538 }
3539 if (all_lazy)
3540 *all_lazy = al;
3541 return hc;
3542}
3543
3544
3545
3546
3547
3548static void _rcu_barrier_trace(struct rcu_state *rsp, const char *s,
3549 int cpu, unsigned long done)
3550{
3551 trace_rcu_barrier(rsp->name, s, cpu,
3552 atomic_read(&rsp->barrier_cpu_count), done);
3553}
3554
3555
3556
3557
3558
3559static void rcu_barrier_callback(struct rcu_head *rhp)
3560{
3561 struct rcu_data *rdp = container_of(rhp, struct rcu_data, barrier_head);
3562 struct rcu_state *rsp = rdp->rsp;
3563
3564 if (atomic_dec_and_test(&rsp->barrier_cpu_count)) {
3565 _rcu_barrier_trace(rsp, "LastCB", -1, rsp->barrier_sequence);
3566 complete(&rsp->barrier_completion);
3567 } else {
3568 _rcu_barrier_trace(rsp, "CB", -1, rsp->barrier_sequence);
3569 }
3570}
3571
3572
3573
3574
3575static void rcu_barrier_func(void *type)
3576{
3577 struct rcu_state *rsp = type;
3578 struct rcu_data *rdp = raw_cpu_ptr(rsp->rda);
3579
3580 _rcu_barrier_trace(rsp, "IRQ", -1, rsp->barrier_sequence);
3581 atomic_inc(&rsp->barrier_cpu_count);
3582 rsp->call(&rdp->barrier_head, rcu_barrier_callback);
3583}
3584
3585
3586
3587
3588
3589static void _rcu_barrier(struct rcu_state *rsp)
3590{
3591 int cpu;
3592 struct rcu_data *rdp;
3593 unsigned long s = rcu_seq_snap(&rsp->barrier_sequence);
3594
3595 _rcu_barrier_trace(rsp, "Begin", -1, s);
3596
3597
3598 mutex_lock(&rsp->barrier_mutex);
3599
3600
3601 if (rcu_seq_done(&rsp->barrier_sequence, s)) {
3602 _rcu_barrier_trace(rsp, "EarlyExit", -1, rsp->barrier_sequence);
3603 smp_mb();
3604 mutex_unlock(&rsp->barrier_mutex);
3605 return;
3606 }
3607
3608
3609 rcu_seq_start(&rsp->barrier_sequence);
3610 _rcu_barrier_trace(rsp, "Inc1", -1, rsp->barrier_sequence);
3611
3612
3613
3614
3615
3616
3617
3618 init_completion(&rsp->barrier_completion);
3619 atomic_set(&rsp->barrier_cpu_count, 1);
3620 get_online_cpus();
3621
3622
3623
3624
3625
3626
3627 for_each_possible_cpu(cpu) {
3628 if (!cpu_online(cpu) && !rcu_is_nocb_cpu(cpu))
3629 continue;
3630 rdp = per_cpu_ptr(rsp->rda, cpu);
3631 if (rcu_is_nocb_cpu(cpu)) {
3632 if (!rcu_nocb_cpu_needs_barrier(rsp, cpu)) {
3633 _rcu_barrier_trace(rsp, "OfflineNoCB", cpu,
3634 rsp->barrier_sequence);
3635 } else {
3636 _rcu_barrier_trace(rsp, "OnlineNoCB", cpu,
3637 rsp->barrier_sequence);
3638 smp_mb__before_atomic();
3639 atomic_inc(&rsp->barrier_cpu_count);
3640 __call_rcu(&rdp->barrier_head,
3641 rcu_barrier_callback, rsp, cpu, 0);
3642 }
3643 } else if (rcu_segcblist_n_cbs(&rdp->cblist)) {
3644 _rcu_barrier_trace(rsp, "OnlineQ", cpu,
3645 rsp->barrier_sequence);
3646 smp_call_function_single(cpu, rcu_barrier_func, rsp, 1);
3647 } else {
3648 _rcu_barrier_trace(rsp, "OnlineNQ", cpu,
3649 rsp->barrier_sequence);
3650 }
3651 }
3652 put_online_cpus();
3653
3654
3655
3656
3657
3658 if (atomic_dec_and_test(&rsp->barrier_cpu_count))
3659 complete(&rsp->barrier_completion);
3660
3661
3662 wait_for_completion(&rsp->barrier_completion);
3663
3664
3665 _rcu_barrier_trace(rsp, "Inc2", -1, rsp->barrier_sequence);
3666 rcu_seq_end(&rsp->barrier_sequence);
3667
3668
3669 mutex_unlock(&rsp->barrier_mutex);
3670}
3671
3672
3673
3674
3675void rcu_barrier_bh(void)
3676{
3677 _rcu_barrier(&rcu_bh_state);
3678}
3679EXPORT_SYMBOL_GPL(rcu_barrier_bh);
3680
3681
3682
3683
3684void rcu_barrier_sched(void)
3685{
3686 _rcu_barrier(&rcu_sched_state);
3687}
3688EXPORT_SYMBOL_GPL(rcu_barrier_sched);
3689
3690
3691
3692
3693
3694
3695
3696static void rcu_init_new_rnp(struct rcu_node *rnp_leaf)
3697{
3698 long mask;
3699 struct rcu_node *rnp = rnp_leaf;
3700
3701 for (;;) {
3702 mask = rnp->grpmask;
3703 rnp = rnp->parent;
3704 if (rnp == NULL)
3705 return;
3706 raw_spin_lock_rcu_node(rnp);
3707 rnp->qsmaskinit |= mask;
3708 raw_spin_unlock_rcu_node(rnp);
3709 }
3710}
3711
3712
3713
3714
3715static void __init
3716rcu_boot_init_percpu_data(int cpu, struct rcu_state *rsp)
3717{
3718 unsigned long flags;
3719 struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
3720 struct rcu_node *rnp = rcu_get_root(rsp);
3721
3722
3723 raw_spin_lock_irqsave_rcu_node(rnp, flags);
3724 rdp->grpmask = leaf_node_cpu_bit(rdp->mynode, cpu);
3725 rdp->dynticks = &per_cpu(rcu_dynticks, cpu);
3726 WARN_ON_ONCE(rdp->dynticks->dynticks_nesting != DYNTICK_TASK_EXIT_IDLE);
3727 WARN_ON_ONCE(rcu_dynticks_in_eqs(rcu_dynticks_snap(rdp->dynticks)));
3728 rdp->cpu = cpu;
3729 rdp->rsp = rsp;
3730 rcu_boot_init_nocb_percpu_data(rdp);
3731 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
3732}
3733
3734
3735
3736
3737
3738
3739
3740static void
3741rcu_init_percpu_data(int cpu, struct rcu_state *rsp)
3742{
3743 unsigned long flags;
3744 struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
3745 struct rcu_node *rnp = rcu_get_root(rsp);
3746
3747
3748 raw_spin_lock_irqsave_rcu_node(rnp, flags);
3749 rdp->qlen_last_fqs_check = 0;
3750 rdp->n_force_qs_snap = rsp->n_force_qs;
3751 rdp->blimit = blimit;
3752 if (rcu_segcblist_empty(&rdp->cblist) &&
3753 !init_nocb_callback_list(rdp))
3754 rcu_segcblist_init(&rdp->cblist);
3755 rdp->dynticks->dynticks_nesting = DYNTICK_TASK_EXIT_IDLE;
3756 rcu_sysidle_init_percpu_data(rdp->dynticks);
3757 rcu_dynticks_eqs_online();
3758 raw_spin_unlock_rcu_node(rnp);
3759
3760
3761
3762
3763
3764
3765 rnp = rdp->mynode;
3766 raw_spin_lock_rcu_node(rnp);
3767 if (!rdp->beenonline)
3768 WRITE_ONCE(rsp->ncpus, READ_ONCE(rsp->ncpus) + 1);
3769 rdp->beenonline = true;
3770 rdp->gpnum = rnp->completed;
3771 rdp->completed = rnp->completed;
3772 rdp->cpu_no_qs.b.norm = true;
3773 rdp->rcu_qs_ctr_snap = per_cpu(rcu_dynticks.rcu_qs_ctr, cpu);
3774 rdp->core_needs_qs = false;
3775 trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("cpuonl"));
3776 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
3777}
3778
3779
3780
3781
3782
3783int rcutree_prepare_cpu(unsigned int cpu)
3784{
3785 struct rcu_state *rsp;
3786
3787 for_each_rcu_flavor(rsp)
3788 rcu_init_percpu_data(cpu, rsp);
3789
3790 rcu_prepare_kthreads(cpu);
3791 rcu_spawn_all_nocb_kthreads(cpu);
3792
3793 return 0;
3794}
3795
3796
3797
3798
3799static void rcutree_affinity_setting(unsigned int cpu, int outgoing)
3800{
3801 struct rcu_data *rdp = per_cpu_ptr(rcu_state_p->rda, cpu);
3802
3803 rcu_boost_kthread_setaffinity(rdp->mynode, outgoing);
3804}
3805
3806
3807
3808
3809
3810int rcutree_online_cpu(unsigned int cpu)
3811{
3812 sync_sched_exp_online_cleanup(cpu);
3813 rcutree_affinity_setting(cpu, -1);
3814 if (IS_ENABLED(CONFIG_TREE_SRCU))
3815 srcu_online_cpu(cpu);
3816 return 0;
3817}
3818
3819
3820
3821
3822
3823int rcutree_offline_cpu(unsigned int cpu)
3824{
3825 rcutree_affinity_setting(cpu, cpu);
3826 if (IS_ENABLED(CONFIG_TREE_SRCU))
3827 srcu_offline_cpu(cpu);
3828 return 0;
3829}
3830
3831
3832
3833
3834int rcutree_dying_cpu(unsigned int cpu)
3835{
3836 struct rcu_state *rsp;
3837
3838 for_each_rcu_flavor(rsp)
3839 rcu_cleanup_dying_cpu(rsp);
3840 return 0;
3841}
3842
3843
3844
3845
3846int rcutree_dead_cpu(unsigned int cpu)
3847{
3848 struct rcu_state *rsp;
3849
3850 for_each_rcu_flavor(rsp) {
3851 rcu_cleanup_dead_cpu(cpu, rsp);
3852 do_nocb_deferred_wakeup(per_cpu_ptr(rsp->rda, cpu));
3853 }
3854 return 0;
3855}
3856
3857
3858
3859
3860
3861
3862
3863
3864
3865
3866
3867
3868void rcu_cpu_starting(unsigned int cpu)
3869{
3870 unsigned long flags;
3871 unsigned long mask;
3872 struct rcu_data *rdp;
3873 struct rcu_node *rnp;
3874 struct rcu_state *rsp;
3875
3876 for_each_rcu_flavor(rsp) {
3877 rdp = per_cpu_ptr(rsp->rda, cpu);
3878 rnp = rdp->mynode;
3879 mask = rdp->grpmask;
3880 raw_spin_lock_irqsave_rcu_node(rnp, flags);
3881 rnp->qsmaskinitnext |= mask;
3882 rnp->expmaskinitnext |= mask;
3883 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
3884 }
3885}
3886
3887#ifdef CONFIG_HOTPLUG_CPU
3888
3889
3890
3891
3892
3893static void rcu_cleanup_dying_idle_cpu(int cpu, struct rcu_state *rsp)
3894{
3895 unsigned long flags;
3896 unsigned long mask;
3897 struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
3898 struct rcu_node *rnp = rdp->mynode;
3899
3900
3901 mask = rdp->grpmask;
3902 raw_spin_lock_irqsave_rcu_node(rnp, flags);
3903 rnp->qsmaskinitnext &= ~mask;
3904 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
3905}
3906
3907
3908
3909
3910
3911
3912
3913
3914
3915void rcu_report_dead(unsigned int cpu)
3916{
3917 struct rcu_state *rsp;
3918
3919
3920 preempt_disable();
3921 rcu_report_exp_rdp(&rcu_sched_state,
3922 this_cpu_ptr(rcu_sched_state.rda), true);
3923 preempt_enable();
3924 for_each_rcu_flavor(rsp)
3925 rcu_cleanup_dying_idle_cpu(cpu, rsp);
3926}
3927#endif
3928
3929
3930
3931
3932
3933static int rcu_pm_notify(struct notifier_block *self,
3934 unsigned long action, void *hcpu)
3935{
3936 switch (action) {
3937 case PM_HIBERNATION_PREPARE:
3938 case PM_SUSPEND_PREPARE:
3939 if (nr_cpu_ids <= 256)
3940 rcu_expedite_gp();
3941 break;
3942 case PM_POST_HIBERNATION:
3943 case PM_POST_SUSPEND:
3944 if (nr_cpu_ids <= 256)
3945 rcu_unexpedite_gp();
3946 break;
3947 default:
3948 break;
3949 }
3950 return NOTIFY_OK;
3951}
3952
3953
3954
3955
3956static int __init rcu_spawn_gp_kthread(void)
3957{
3958 unsigned long flags;
3959 int kthread_prio_in = kthread_prio;
3960 struct rcu_node *rnp;
3961 struct rcu_state *rsp;
3962 struct sched_param sp;
3963 struct task_struct *t;
3964
3965
3966 if (IS_ENABLED(CONFIG_RCU_BOOST) && kthread_prio < 1)
3967 kthread_prio = 1;
3968 else if (kthread_prio < 0)
3969 kthread_prio = 0;
3970 else if (kthread_prio > 99)
3971 kthread_prio = 99;
3972 if (kthread_prio != kthread_prio_in)
3973 pr_alert("rcu_spawn_gp_kthread(): Limited prio to %d from %d\n",
3974 kthread_prio, kthread_prio_in);
3975
3976 rcu_scheduler_fully_active = 1;
3977 for_each_rcu_flavor(rsp) {
3978 t = kthread_create(rcu_gp_kthread, rsp, "%s", rsp->name);
3979 BUG_ON(IS_ERR(t));
3980 rnp = rcu_get_root(rsp);
3981 raw_spin_lock_irqsave_rcu_node(rnp, flags);
3982 rsp->gp_kthread = t;
3983 if (kthread_prio) {
3984 sp.sched_priority = kthread_prio;
3985 sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
3986 }
3987 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
3988 wake_up_process(t);
3989 }
3990 rcu_spawn_nocb_kthreads();
3991 rcu_spawn_boost_kthreads();
3992 return 0;
3993}
3994early_initcall(rcu_spawn_gp_kthread);
3995
3996
3997
3998
3999
4000
4001
4002
4003
4004
4005
4006void rcu_scheduler_starting(void)
4007{
4008 WARN_ON(num_online_cpus() != 1);
4009 WARN_ON(nr_context_switches() > 0);
4010 rcu_test_sync_prims();
4011 rcu_scheduler_active = RCU_SCHEDULER_INIT;
4012 rcu_test_sync_prims();
4013}
4014
4015
4016
4017
4018static void __init rcu_init_one(struct rcu_state *rsp)
4019{
4020 static const char * const buf[] = RCU_NODE_NAME_INIT;
4021 static const char * const fqs[] = RCU_FQS_NAME_INIT;
4022 static struct lock_class_key rcu_node_class[RCU_NUM_LVLS];
4023 static struct lock_class_key rcu_fqs_class[RCU_NUM_LVLS];
4024
4025 int levelspread[RCU_NUM_LVLS];
4026 int cpustride = 1;
4027 int i;
4028 int j;
4029 struct rcu_node *rnp;
4030
4031 BUILD_BUG_ON(RCU_NUM_LVLS > ARRAY_SIZE(buf));
4032
4033
4034 if (rcu_num_lvls <= 0 || rcu_num_lvls > RCU_NUM_LVLS)
4035 panic("rcu_init_one: rcu_num_lvls out of range");
4036
4037
4038
4039 for (i = 1; i < rcu_num_lvls; i++)
4040 rsp->level[i] = rsp->level[i - 1] + num_rcu_lvl[i - 1];
4041 rcu_init_levelspread(levelspread, num_rcu_lvl);
4042
4043
4044
4045 for (i = rcu_num_lvls - 1; i >= 0; i--) {
4046 cpustride *= levelspread[i];
4047 rnp = rsp->level[i];
4048 for (j = 0; j < num_rcu_lvl[i]; j++, rnp++) {
4049 raw_spin_lock_init(&ACCESS_PRIVATE(rnp, lock));
4050 lockdep_set_class_and_name(&ACCESS_PRIVATE(rnp, lock),
4051 &rcu_node_class[i], buf[i]);
4052 raw_spin_lock_init(&rnp->fqslock);
4053 lockdep_set_class_and_name(&rnp->fqslock,
4054 &rcu_fqs_class[i], fqs[i]);
4055 rnp->gpnum = rsp->gpnum;
4056 rnp->completed = rsp->completed;
4057 rnp->qsmask = 0;
4058 rnp->qsmaskinit = 0;
4059 rnp->grplo = j * cpustride;
4060 rnp->grphi = (j + 1) * cpustride - 1;
4061 if (rnp->grphi >= nr_cpu_ids)
4062 rnp->grphi = nr_cpu_ids - 1;
4063 if (i == 0) {
4064 rnp->grpnum = 0;
4065 rnp->grpmask = 0;
4066 rnp->parent = NULL;
4067 } else {
4068 rnp->grpnum = j % levelspread[i - 1];
4069 rnp->grpmask = 1UL << rnp->grpnum;
4070 rnp->parent = rsp->level[i - 1] +
4071 j / levelspread[i - 1];
4072 }
4073 rnp->level = i;
4074 INIT_LIST_HEAD(&rnp->blkd_tasks);
4075 rcu_init_one_nocb(rnp);
4076 init_waitqueue_head(&rnp->exp_wq[0]);
4077 init_waitqueue_head(&rnp->exp_wq[1]);
4078 init_waitqueue_head(&rnp->exp_wq[2]);
4079 init_waitqueue_head(&rnp->exp_wq[3]);
4080 spin_lock_init(&rnp->exp_lock);
4081 }
4082 }
4083
4084 init_swait_queue_head(&rsp->gp_wq);
4085 init_swait_queue_head(&rsp->expedited_wq);
4086 rnp = rsp->level[rcu_num_lvls - 1];
4087 for_each_possible_cpu(i) {
4088 while (i > rnp->grphi)
4089 rnp++;
4090 per_cpu_ptr(rsp->rda, i)->mynode = rnp;
4091 rcu_boot_init_percpu_data(i, rsp);
4092 }
4093 list_add(&rsp->flavors, &rcu_struct_flavors);
4094}
4095
4096
4097
4098
4099
4100
4101static void __init rcu_init_geometry(void)
4102{
4103 ulong d;
4104 int i;
4105 int rcu_capacity[RCU_NUM_LVLS];
4106
4107
4108
4109
4110
4111
4112
4113
4114 d = RCU_JIFFIES_TILL_FORCE_QS + nr_cpu_ids / RCU_JIFFIES_FQS_DIV;
4115 if (jiffies_till_first_fqs == ULONG_MAX)
4116 jiffies_till_first_fqs = d;
4117 if (jiffies_till_next_fqs == ULONG_MAX)
4118 jiffies_till_next_fqs = d;
4119
4120
4121 if (rcu_fanout_leaf == RCU_FANOUT_LEAF &&
4122 nr_cpu_ids == NR_CPUS)
4123 return;
4124 pr_info("RCU: Adjusting geometry for rcu_fanout_leaf=%d, nr_cpu_ids=%d\n",
4125 rcu_fanout_leaf, nr_cpu_ids);
4126
4127
4128
4129
4130
4131
4132
4133 if (rcu_fanout_leaf < 2 ||
4134 rcu_fanout_leaf > sizeof(unsigned long) * 8) {
4135 rcu_fanout_leaf = RCU_FANOUT_LEAF;
4136 WARN_ON(1);
4137 return;
4138 }
4139
4140
4141
4142
4143
4144 rcu_capacity[0] = rcu_fanout_leaf;
4145 for (i = 1; i < RCU_NUM_LVLS; i++)
4146 rcu_capacity[i] = rcu_capacity[i - 1] * RCU_FANOUT;
4147
4148
4149
4150
4151
4152 if (nr_cpu_ids > rcu_capacity[RCU_NUM_LVLS - 1]) {
4153 rcu_fanout_leaf = RCU_FANOUT_LEAF;
4154 WARN_ON(1);
4155 return;
4156 }
4157
4158
4159 for (i = 0; nr_cpu_ids > rcu_capacity[i]; i++) {
4160 }
4161 rcu_num_lvls = i + 1;
4162
4163
4164 for (i = 0; i < rcu_num_lvls; i++) {
4165 int cap = rcu_capacity[(rcu_num_lvls - 1) - i];
4166 num_rcu_lvl[i] = DIV_ROUND_UP(nr_cpu_ids, cap);
4167 }
4168
4169
4170 rcu_num_nodes = 0;
4171 for (i = 0; i < rcu_num_lvls; i++)
4172 rcu_num_nodes += num_rcu_lvl[i];
4173}
4174
4175
4176
4177
4178
4179static void __init rcu_dump_rcu_node_tree(struct rcu_state *rsp)
4180{
4181 int level = 0;
4182 struct rcu_node *rnp;
4183
4184 pr_info("rcu_node tree layout dump\n");
4185 pr_info(" ");
4186 rcu_for_each_node_breadth_first(rsp, rnp) {
4187 if (rnp->level != level) {
4188 pr_cont("\n");
4189 pr_info(" ");
4190 level = rnp->level;
4191 }
4192 pr_cont("%d:%d ^%d ", rnp->grplo, rnp->grphi, rnp->grpnum);
4193 }
4194 pr_cont("\n");
4195}
4196
4197void __init rcu_init(void)
4198{
4199 int cpu;
4200
4201 rcu_early_boot_tests();
4202
4203 rcu_bootup_announce();
4204 rcu_init_geometry();
4205 rcu_init_one(&rcu_bh_state);
4206 rcu_init_one(&rcu_sched_state);
4207 if (dump_tree)
4208 rcu_dump_rcu_node_tree(&rcu_sched_state);
4209 __rcu_init_preempt();
4210 open_softirq(RCU_SOFTIRQ, rcu_process_callbacks);
4211
4212
4213
4214
4215
4216
4217 pm_notifier(rcu_pm_notify, 0);
4218 for_each_online_cpu(cpu) {
4219 rcutree_prepare_cpu(cpu);
4220 rcu_cpu_starting(cpu);
4221 if (IS_ENABLED(CONFIG_TREE_SRCU))
4222 srcu_online_cpu(cpu);
4223 }
4224}
4225
4226#include "tree_exp.h"
4227#include "tree_plugin.h"
4228