1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30#include <linux/types.h>
31#include <linux/kernel.h>
32#include <linux/init.h>
33#include <linux/spinlock.h>
34#include <linux/smp.h>
35#include <linux/rcupdate_wait.h>
36#include <linux/interrupt.h>
37#include <linux/sched.h>
38#include <linux/sched/debug.h>
39#include <linux/nmi.h>
40#include <linux/atomic.h>
41#include <linux/bitops.h>
42#include <linux/export.h>
43#include <linux/completion.h>
44#include <linux/moduleparam.h>
45#include <linux/percpu.h>
46#include <linux/notifier.h>
47#include <linux/cpu.h>
48#include <linux/mutex.h>
49#include <linux/time.h>
50#include <linux/kernel_stat.h>
51#include <linux/wait.h>
52#include <linux/kthread.h>
53#include <uapi/linux/sched/types.h>
54#include <linux/prefetch.h>
55#include <linux/delay.h>
56#include <linux/stop_machine.h>
57#include <linux/random.h>
58#include <linux/trace_events.h>
59#include <linux/suspend.h>
60#include <linux/ftrace.h>
61
62#include "tree.h"
63#include "rcu.h"
64
65#ifdef MODULE_PARAM_PREFIX
66#undef MODULE_PARAM_PREFIX
67#endif
68#define MODULE_PARAM_PREFIX "rcutree."
69
70
71
72
73
74
75
76
77
78
79
80#ifdef CONFIG_TRACING
81# define DEFINE_RCU_TPS(sname) \
82static char sname##_varname[] = #sname; \
83static const char *tp_##sname##_varname __used __tracepoint_string = sname##_varname;
84# define RCU_STATE_NAME(sname) sname##_varname
85#else
86# define DEFINE_RCU_TPS(sname)
87# define RCU_STATE_NAME(sname) __stringify(sname)
88#endif
89
90#define RCU_STATE_INITIALIZER(sname, sabbr, cr) \
91DEFINE_RCU_TPS(sname) \
92static DEFINE_PER_CPU_SHARED_ALIGNED(struct rcu_data, sname##_data); \
93struct rcu_state sname##_state = { \
94 .level = { &sname##_state.node[0] }, \
95 .rda = &sname##_data, \
96 .call = cr, \
97 .gp_state = RCU_GP_IDLE, \
98 .gpnum = 0UL - 300UL, \
99 .completed = 0UL - 300UL, \
100 .orphan_lock = __RAW_SPIN_LOCK_UNLOCKED(&sname##_state.orphan_lock), \
101 .orphan_pend = RCU_CBLIST_INITIALIZER(sname##_state.orphan_pend), \
102 .orphan_done = RCU_CBLIST_INITIALIZER(sname##_state.orphan_done), \
103 .barrier_mutex = __MUTEX_INITIALIZER(sname##_state.barrier_mutex), \
104 .name = RCU_STATE_NAME(sname), \
105 .abbr = sabbr, \
106 .exp_mutex = __MUTEX_INITIALIZER(sname##_state.exp_mutex), \
107 .exp_wake_mutex = __MUTEX_INITIALIZER(sname##_state.exp_wake_mutex), \
108}
109
110RCU_STATE_INITIALIZER(rcu_sched, 's', call_rcu_sched);
111RCU_STATE_INITIALIZER(rcu_bh, 'b', call_rcu_bh);
112
113static struct rcu_state *const rcu_state_p;
114LIST_HEAD(rcu_struct_flavors);
115
116
117static bool dump_tree;
118module_param(dump_tree, bool, 0444);
119
120static bool rcu_fanout_exact;
121module_param(rcu_fanout_exact, bool, 0444);
122
123static int rcu_fanout_leaf = RCU_FANOUT_LEAF;
124module_param(rcu_fanout_leaf, int, 0444);
125int rcu_num_lvls __read_mostly = RCU_NUM_LVLS;
126
127int num_rcu_lvl[] = NUM_RCU_LVL_INIT;
128int rcu_num_nodes __read_mostly = NUM_RCU_NODES;
129
130int sysctl_panic_on_rcu_stall __read_mostly;
131
132
133
134
135
136
137
138
139
140
141
142
143
144int rcu_scheduler_active __read_mostly;
145EXPORT_SYMBOL_GPL(rcu_scheduler_active);
146
147
148
149
150
151
152
153
154
155
156
157
158
159static int rcu_scheduler_fully_active __read_mostly;
160
161static void rcu_init_new_rnp(struct rcu_node *rnp_leaf);
162static void rcu_cleanup_dead_rnp(struct rcu_node *rnp_leaf);
163static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu);
164static void invoke_rcu_core(void);
165static void invoke_rcu_callbacks(struct rcu_state *rsp, struct rcu_data *rdp);
166static void rcu_report_exp_rdp(struct rcu_state *rsp,
167 struct rcu_data *rdp, bool wake);
168static void sync_sched_exp_online_cleanup(int cpu);
169
170
171static int kthread_prio = IS_ENABLED(CONFIG_RCU_BOOST) ? 1 : 0;
172module_param(kthread_prio, int, 0644);
173
174
175
176static int gp_preinit_delay;
177module_param(gp_preinit_delay, int, 0444);
178static int gp_init_delay;
179module_param(gp_init_delay, int, 0444);
180static int gp_cleanup_delay;
181module_param(gp_cleanup_delay, int, 0444);
182
183
184
185
186
187
188
189
190
191
192#define PER_RCU_NODE_PERIOD 3
193
194
195
196
197
198
199
200
201
202
203unsigned long rcutorture_testseq;
204unsigned long rcutorture_vernum;
205
206
207
208
209
210
211
212unsigned long rcu_rnp_online_cpus(struct rcu_node *rnp)
213{
214 return READ_ONCE(rnp->qsmaskinitnext);
215}
216
217
218
219
220
221
222static int rcu_gp_in_progress(struct rcu_state *rsp)
223{
224 return READ_ONCE(rsp->completed) != READ_ONCE(rsp->gpnum);
225}
226
227
228
229
230
231
232
233void rcu_sched_qs(void)
234{
235 RCU_LOCKDEP_WARN(preemptible(), "rcu_sched_qs() invoked with preemption enabled!!!");
236 if (!__this_cpu_read(rcu_sched_data.cpu_no_qs.s))
237 return;
238 trace_rcu_grace_period(TPS("rcu_sched"),
239 __this_cpu_read(rcu_sched_data.gpnum),
240 TPS("cpuqs"));
241 __this_cpu_write(rcu_sched_data.cpu_no_qs.b.norm, false);
242 if (!__this_cpu_read(rcu_sched_data.cpu_no_qs.b.exp))
243 return;
244 __this_cpu_write(rcu_sched_data.cpu_no_qs.b.exp, false);
245 rcu_report_exp_rdp(&rcu_sched_state,
246 this_cpu_ptr(&rcu_sched_data), true);
247}
248
249void rcu_bh_qs(void)
250{
251 RCU_LOCKDEP_WARN(preemptible(), "rcu_bh_qs() invoked with preemption enabled!!!");
252 if (__this_cpu_read(rcu_bh_data.cpu_no_qs.s)) {
253 trace_rcu_grace_period(TPS("rcu_bh"),
254 __this_cpu_read(rcu_bh_data.gpnum),
255 TPS("cpuqs"));
256 __this_cpu_write(rcu_bh_data.cpu_no_qs.b.norm, false);
257 }
258}
259
260
261
262
263
264#define RCU_DYNTICK_CTRL_MASK 0x1
265#define RCU_DYNTICK_CTRL_CTR (RCU_DYNTICK_CTRL_MASK + 1)
266#ifndef rcu_eqs_special_exit
267#define rcu_eqs_special_exit() do { } while (0)
268#endif
269
270static DEFINE_PER_CPU(struct rcu_dynticks, rcu_dynticks) = {
271 .dynticks_nesting = DYNTICK_TASK_EXIT_IDLE,
272 .dynticks = ATOMIC_INIT(RCU_DYNTICK_CTRL_CTR),
273};
274
275
276
277
278
279
280
281
282static DEFINE_PER_CPU(bool, disable_rcu_irq_enter);
283
284bool rcu_irq_enter_disabled(void)
285{
286 return this_cpu_read(disable_rcu_irq_enter);
287}
288
289
290
291
292
293static void rcu_dynticks_eqs_enter(void)
294{
295 struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
296 int seq;
297
298
299
300
301
302
303 seq = atomic_add_return(RCU_DYNTICK_CTRL_CTR, &rdtp->dynticks);
304
305 WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) &&
306 (seq & RCU_DYNTICK_CTRL_CTR));
307
308 WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) &&
309 (seq & RCU_DYNTICK_CTRL_MASK));
310}
311
312
313
314
315
316static void rcu_dynticks_eqs_exit(void)
317{
318 struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
319 int seq;
320
321
322
323
324
325
326 seq = atomic_add_return(RCU_DYNTICK_CTRL_CTR, &rdtp->dynticks);
327 WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) &&
328 !(seq & RCU_DYNTICK_CTRL_CTR));
329 if (seq & RCU_DYNTICK_CTRL_MASK) {
330 atomic_andnot(RCU_DYNTICK_CTRL_MASK, &rdtp->dynticks);
331 smp_mb__after_atomic();
332
333 rcu_eqs_special_exit();
334 }
335}
336
337
338
339
340
341
342
343
344
345
346
347static void rcu_dynticks_eqs_online(void)
348{
349 struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
350
351 if (atomic_read(&rdtp->dynticks) & RCU_DYNTICK_CTRL_CTR)
352 return;
353 atomic_add(RCU_DYNTICK_CTRL_CTR, &rdtp->dynticks);
354}
355
356
357
358
359
360
361bool rcu_dynticks_curr_cpu_in_eqs(void)
362{
363 struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
364
365 return !(atomic_read(&rdtp->dynticks) & RCU_DYNTICK_CTRL_CTR);
366}
367
368
369
370
371
372int rcu_dynticks_snap(struct rcu_dynticks *rdtp)
373{
374 int snap = atomic_add_return(0, &rdtp->dynticks);
375
376 return snap & ~RCU_DYNTICK_CTRL_MASK;
377}
378
379
380
381
382
383static bool rcu_dynticks_in_eqs(int snap)
384{
385 return !(snap & RCU_DYNTICK_CTRL_CTR);
386}
387
388
389
390
391
392
393static bool rcu_dynticks_in_eqs_since(struct rcu_dynticks *rdtp, int snap)
394{
395 return snap != rcu_dynticks_snap(rdtp);
396}
397
398
399
400
401
402static void rcu_dynticks_momentary_idle(void)
403{
404 struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
405 int special = atomic_add_return(2 * RCU_DYNTICK_CTRL_CTR,
406 &rdtp->dynticks);
407
408
409 WARN_ON_ONCE(!(special & RCU_DYNTICK_CTRL_CTR));
410}
411
412
413
414
415
416
417
418
419bool rcu_eqs_special_set(int cpu)
420{
421 int old;
422 int new;
423 struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu);
424
425 do {
426 old = atomic_read(&rdtp->dynticks);
427 if (old & RCU_DYNTICK_CTRL_CTR)
428 return false;
429 new = old | RCU_DYNTICK_CTRL_MASK;
430 } while (atomic_cmpxchg(&rdtp->dynticks, old, new) != old);
431 return true;
432}
433
434
435
436
437
438
439
440
441
442
443
444
445static void rcu_momentary_dyntick_idle(void)
446{
447 raw_cpu_write(rcu_dynticks.rcu_need_heavy_qs, false);
448 rcu_dynticks_momentary_idle();
449}
450
451
452
453
454
455
456void rcu_note_context_switch(bool preempt)
457{
458 barrier();
459 trace_rcu_utilization(TPS("Start context switch"));
460 rcu_sched_qs();
461 rcu_preempt_note_context_switch(preempt);
462
463 if (!smp_load_acquire(this_cpu_ptr(&rcu_dynticks.rcu_urgent_qs)))
464 goto out;
465 this_cpu_write(rcu_dynticks.rcu_urgent_qs, false);
466 if (unlikely(raw_cpu_read(rcu_dynticks.rcu_need_heavy_qs)))
467 rcu_momentary_dyntick_idle();
468 this_cpu_inc(rcu_dynticks.rcu_qs_ctr);
469 if (!preempt)
470 rcu_note_voluntary_context_switch_lite(current);
471out:
472 trace_rcu_utilization(TPS("End context switch"));
473 barrier();
474}
475EXPORT_SYMBOL_GPL(rcu_note_context_switch);
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490void rcu_all_qs(void)
491{
492 unsigned long flags;
493
494 if (!raw_cpu_read(rcu_dynticks.rcu_urgent_qs))
495 return;
496 preempt_disable();
497
498 if (!smp_load_acquire(this_cpu_ptr(&rcu_dynticks.rcu_urgent_qs))) {
499 preempt_enable();
500 return;
501 }
502 this_cpu_write(rcu_dynticks.rcu_urgent_qs, false);
503 barrier();
504 if (unlikely(raw_cpu_read(rcu_dynticks.rcu_need_heavy_qs))) {
505 local_irq_save(flags);
506 rcu_momentary_dyntick_idle();
507 local_irq_restore(flags);
508 }
509 if (unlikely(raw_cpu_read(rcu_sched_data.cpu_no_qs.b.exp)))
510 rcu_sched_qs();
511 this_cpu_inc(rcu_dynticks.rcu_qs_ctr);
512 barrier();
513 preempt_enable();
514}
515EXPORT_SYMBOL_GPL(rcu_all_qs);
516
517#define DEFAULT_RCU_BLIMIT 10
518static long blimit = DEFAULT_RCU_BLIMIT;
519#define DEFAULT_RCU_QHIMARK 10000
520static long qhimark = DEFAULT_RCU_QHIMARK;
521#define DEFAULT_RCU_QLOMARK 100
522static long qlowmark = DEFAULT_RCU_QLOMARK;
523
524module_param(blimit, long, 0444);
525module_param(qhimark, long, 0444);
526module_param(qlowmark, long, 0444);
527
528static ulong jiffies_till_first_fqs = ULONG_MAX;
529static ulong jiffies_till_next_fqs = ULONG_MAX;
530static bool rcu_kick_kthreads;
531
532module_param(jiffies_till_first_fqs, ulong, 0644);
533module_param(jiffies_till_next_fqs, ulong, 0644);
534module_param(rcu_kick_kthreads, bool, 0644);
535
536
537
538
539
540static ulong jiffies_till_sched_qs = HZ / 20;
541module_param(jiffies_till_sched_qs, ulong, 0644);
542
543static bool rcu_start_gp_advanced(struct rcu_state *rsp, struct rcu_node *rnp,
544 struct rcu_data *rdp);
545static void force_qs_rnp(struct rcu_state *rsp, int (*f)(struct rcu_data *rsp));
546static void force_quiescent_state(struct rcu_state *rsp);
547static int rcu_pending(void);
548
549
550
551
552unsigned long rcu_batches_started(void)
553{
554 return rcu_state_p->gpnum;
555}
556EXPORT_SYMBOL_GPL(rcu_batches_started);
557
558
559
560
561unsigned long rcu_batches_started_sched(void)
562{
563 return rcu_sched_state.gpnum;
564}
565EXPORT_SYMBOL_GPL(rcu_batches_started_sched);
566
567
568
569
570unsigned long rcu_batches_started_bh(void)
571{
572 return rcu_bh_state.gpnum;
573}
574EXPORT_SYMBOL_GPL(rcu_batches_started_bh);
575
576
577
578
579unsigned long rcu_batches_completed(void)
580{
581 return rcu_state_p->completed;
582}
583EXPORT_SYMBOL_GPL(rcu_batches_completed);
584
585
586
587
588unsigned long rcu_batches_completed_sched(void)
589{
590 return rcu_sched_state.completed;
591}
592EXPORT_SYMBOL_GPL(rcu_batches_completed_sched);
593
594
595
596
597unsigned long rcu_batches_completed_bh(void)
598{
599 return rcu_bh_state.completed;
600}
601EXPORT_SYMBOL_GPL(rcu_batches_completed_bh);
602
603
604
605
606
607
608
609unsigned long rcu_exp_batches_completed(void)
610{
611 return rcu_state_p->expedited_sequence;
612}
613EXPORT_SYMBOL_GPL(rcu_exp_batches_completed);
614
615
616
617
618
619unsigned long rcu_exp_batches_completed_sched(void)
620{
621 return rcu_sched_state.expedited_sequence;
622}
623EXPORT_SYMBOL_GPL(rcu_exp_batches_completed_sched);
624
625
626
627
628void rcu_force_quiescent_state(void)
629{
630 force_quiescent_state(rcu_state_p);
631}
632EXPORT_SYMBOL_GPL(rcu_force_quiescent_state);
633
634
635
636
637void rcu_bh_force_quiescent_state(void)
638{
639 force_quiescent_state(&rcu_bh_state);
640}
641EXPORT_SYMBOL_GPL(rcu_bh_force_quiescent_state);
642
643
644
645
646void rcu_sched_force_quiescent_state(void)
647{
648 force_quiescent_state(&rcu_sched_state);
649}
650EXPORT_SYMBOL_GPL(rcu_sched_force_quiescent_state);
651
652
653
654
655void show_rcu_gp_kthreads(void)
656{
657 struct rcu_state *rsp;
658
659 for_each_rcu_flavor(rsp) {
660 pr_info("%s: wait state: %d ->state: %#lx\n",
661 rsp->name, rsp->gp_state, rsp->gp_kthread->state);
662
663 }
664}
665EXPORT_SYMBOL_GPL(show_rcu_gp_kthreads);
666
667
668
669
670
671
672
673
674void rcutorture_record_test_transition(void)
675{
676 rcutorture_testseq++;
677 rcutorture_vernum = 0;
678}
679EXPORT_SYMBOL_GPL(rcutorture_record_test_transition);
680
681
682
683
684void rcutorture_get_gp_data(enum rcutorture_type test_type, int *flags,
685 unsigned long *gpnum, unsigned long *completed)
686{
687 struct rcu_state *rsp = NULL;
688
689 switch (test_type) {
690 case RCU_FLAVOR:
691 rsp = rcu_state_p;
692 break;
693 case RCU_BH_FLAVOR:
694 rsp = &rcu_bh_state;
695 break;
696 case RCU_SCHED_FLAVOR:
697 rsp = &rcu_sched_state;
698 break;
699 default:
700 break;
701 }
702 if (rsp == NULL)
703 return;
704 *flags = READ_ONCE(rsp->gp_flags);
705 *gpnum = READ_ONCE(rsp->gpnum);
706 *completed = READ_ONCE(rsp->completed);
707}
708EXPORT_SYMBOL_GPL(rcutorture_get_gp_data);
709
710
711
712
713
714
715void rcutorture_record_progress(unsigned long vernum)
716{
717 rcutorture_vernum++;
718}
719EXPORT_SYMBOL_GPL(rcutorture_record_progress);
720
721
722
723
724static struct rcu_node *rcu_get_root(struct rcu_state *rsp)
725{
726 return &rsp->node[0];
727}
728
729
730
731
732
733
734static int rcu_future_needs_gp(struct rcu_state *rsp)
735{
736 struct rcu_node *rnp = rcu_get_root(rsp);
737 int idx = (READ_ONCE(rnp->completed) + 1) & 0x1;
738 int *fp = &rnp->need_future_gp[idx];
739
740 RCU_LOCKDEP_WARN(!irqs_disabled(), "rcu_future_needs_gp() invoked with irqs enabled!!!");
741 return READ_ONCE(*fp);
742}
743
744
745
746
747
748
749static bool
750cpu_needs_another_gp(struct rcu_state *rsp, struct rcu_data *rdp)
751{
752 RCU_LOCKDEP_WARN(!irqs_disabled(), "cpu_needs_another_gp() invoked with irqs enabled!!!");
753 if (rcu_gp_in_progress(rsp))
754 return false;
755 if (rcu_future_needs_gp(rsp))
756 return true;
757 if (!rcu_segcblist_is_enabled(&rdp->cblist))
758 return false;
759 if (!rcu_segcblist_restempty(&rdp->cblist, RCU_NEXT_READY_TAIL))
760 return true;
761 if (rcu_segcblist_future_gp_needed(&rdp->cblist,
762 READ_ONCE(rsp->completed)))
763 return true;
764 return false;
765}
766
767
768
769
770
771
772
773static void rcu_eqs_enter_common(bool user)
774{
775 struct rcu_state *rsp;
776 struct rcu_data *rdp;
777 struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
778
779 RCU_LOCKDEP_WARN(!irqs_disabled(), "rcu_eqs_enter_common() invoked with irqs enabled!!!");
780 trace_rcu_dyntick(TPS("Start"), rdtp->dynticks_nesting, 0);
781 if (IS_ENABLED(CONFIG_RCU_EQS_DEBUG) &&
782 !user && !is_idle_task(current)) {
783 struct task_struct *idle __maybe_unused =
784 idle_task(smp_processor_id());
785
786 trace_rcu_dyntick(TPS("Error on entry: not idle task"), rdtp->dynticks_nesting, 0);
787 rcu_ftrace_dump(DUMP_ORIG);
788 WARN_ONCE(1, "Current pid: %d comm: %s / Idle pid: %d comm: %s",
789 current->pid, current->comm,
790 idle->pid, idle->comm);
791 }
792 for_each_rcu_flavor(rsp) {
793 rdp = this_cpu_ptr(rsp->rda);
794 do_nocb_deferred_wakeup(rdp);
795 }
796 rcu_prepare_for_idle();
797 __this_cpu_inc(disable_rcu_irq_enter);
798 rdtp->dynticks_nesting = 0;
799 rcu_dynticks_eqs_enter();
800 __this_cpu_dec(disable_rcu_irq_enter);
801 rcu_dynticks_task_enter();
802
803
804
805
806
807 RCU_LOCKDEP_WARN(lock_is_held(&rcu_lock_map),
808 "Illegal idle entry in RCU read-side critical section.");
809 RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map),
810 "Illegal idle entry in RCU-bh read-side critical section.");
811 RCU_LOCKDEP_WARN(lock_is_held(&rcu_sched_lock_map),
812 "Illegal idle entry in RCU-sched read-side critical section.");
813}
814
815
816
817
818
819static void rcu_eqs_enter(bool user)
820{
821 struct rcu_dynticks *rdtp;
822
823 rdtp = this_cpu_ptr(&rcu_dynticks);
824 WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) &&
825 (rdtp->dynticks_nesting & DYNTICK_TASK_NEST_MASK) == 0);
826 if ((rdtp->dynticks_nesting & DYNTICK_TASK_NEST_MASK) == DYNTICK_TASK_NEST_VALUE)
827 rcu_eqs_enter_common(user);
828 else
829 rdtp->dynticks_nesting -= DYNTICK_TASK_NEST_VALUE;
830}
831
832
833
834
835
836
837
838
839
840
841
842
843
844void rcu_idle_enter(void)
845{
846 unsigned long flags;
847
848 local_irq_save(flags);
849 rcu_eqs_enter(false);
850 local_irq_restore(flags);
851}
852EXPORT_SYMBOL_GPL(rcu_idle_enter);
853
854#ifdef CONFIG_NO_HZ_FULL
855
856
857
858
859
860
861
862
863void rcu_user_enter(void)
864{
865 rcu_eqs_enter(1);
866}
867#endif
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885void rcu_irq_exit(void)
886{
887 struct rcu_dynticks *rdtp;
888
889 RCU_LOCKDEP_WARN(!irqs_disabled(), "rcu_irq_exit() invoked with irqs enabled!!!");
890 rdtp = this_cpu_ptr(&rcu_dynticks);
891 WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) &&
892 rdtp->dynticks_nesting < 1);
893 if (rdtp->dynticks_nesting <= 1) {
894 rcu_eqs_enter_common(true);
895 } else {
896 trace_rcu_dyntick(TPS("--="), rdtp->dynticks_nesting, rdtp->dynticks_nesting - 1);
897 rdtp->dynticks_nesting--;
898 }
899}
900
901
902
903
904void rcu_irq_exit_irqson(void)
905{
906 unsigned long flags;
907
908 local_irq_save(flags);
909 rcu_irq_exit();
910 local_irq_restore(flags);
911}
912
913
914
915
916
917
918
919
920static void rcu_eqs_exit_common(long long oldval, int user)
921{
922 RCU_TRACE(struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);)
923
924 rcu_dynticks_task_exit();
925 rcu_dynticks_eqs_exit();
926 rcu_cleanup_after_idle();
927 trace_rcu_dyntick(TPS("End"), oldval, rdtp->dynticks_nesting);
928 if (IS_ENABLED(CONFIG_RCU_EQS_DEBUG) &&
929 !user && !is_idle_task(current)) {
930 struct task_struct *idle __maybe_unused =
931 idle_task(smp_processor_id());
932
933 trace_rcu_dyntick(TPS("Error on exit: not idle task"),
934 oldval, rdtp->dynticks_nesting);
935 rcu_ftrace_dump(DUMP_ORIG);
936 WARN_ONCE(1, "Current pid: %d comm: %s / Idle pid: %d comm: %s",
937 current->pid, current->comm,
938 idle->pid, idle->comm);
939 }
940}
941
942
943
944
945
946static void rcu_eqs_exit(bool user)
947{
948 struct rcu_dynticks *rdtp;
949 long long oldval;
950
951 RCU_LOCKDEP_WARN(!irqs_disabled(), "rcu_eqs_exit() invoked with irqs enabled!!!");
952 rdtp = this_cpu_ptr(&rcu_dynticks);
953 oldval = rdtp->dynticks_nesting;
954 WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && oldval < 0);
955 if (oldval & DYNTICK_TASK_NEST_MASK) {
956 rdtp->dynticks_nesting += DYNTICK_TASK_NEST_VALUE;
957 } else {
958 rdtp->dynticks_nesting = DYNTICK_TASK_EXIT_IDLE;
959 rcu_eqs_exit_common(oldval, user);
960 }
961}
962
963
964
965
966
967
968
969
970
971
972
973
974void rcu_idle_exit(void)
975{
976 unsigned long flags;
977
978 local_irq_save(flags);
979 rcu_eqs_exit(false);
980 local_irq_restore(flags);
981}
982EXPORT_SYMBOL_GPL(rcu_idle_exit);
983
984#ifdef CONFIG_NO_HZ_FULL
985
986
987
988
989
990
991void rcu_user_exit(void)
992{
993 rcu_eqs_exit(1);
994}
995#endif
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016void rcu_irq_enter(void)
1017{
1018 struct rcu_dynticks *rdtp;
1019 long long oldval;
1020
1021 RCU_LOCKDEP_WARN(!irqs_disabled(), "rcu_irq_enter() invoked with irqs enabled!!!");
1022 rdtp = this_cpu_ptr(&rcu_dynticks);
1023 oldval = rdtp->dynticks_nesting;
1024 rdtp->dynticks_nesting++;
1025 WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) &&
1026 rdtp->dynticks_nesting == 0);
1027 if (oldval)
1028 trace_rcu_dyntick(TPS("++="), oldval, rdtp->dynticks_nesting);
1029 else
1030 rcu_eqs_exit_common(oldval, true);
1031}
1032
1033
1034
1035
1036void rcu_irq_enter_irqson(void)
1037{
1038 unsigned long flags;
1039
1040 local_irq_save(flags);
1041 rcu_irq_enter();
1042 local_irq_restore(flags);
1043}
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054void rcu_nmi_enter(void)
1055{
1056 struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
1057 int incby = 2;
1058
1059
1060 WARN_ON_ONCE(rdtp->dynticks_nmi_nesting < 0);
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070 if (rcu_dynticks_curr_cpu_in_eqs()) {
1071 rcu_dynticks_eqs_exit();
1072 incby = 1;
1073 }
1074 rdtp->dynticks_nmi_nesting += incby;
1075 barrier();
1076}
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086void rcu_nmi_exit(void)
1087{
1088 struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
1089
1090
1091
1092
1093
1094
1095 WARN_ON_ONCE(rdtp->dynticks_nmi_nesting <= 0);
1096 WARN_ON_ONCE(rcu_dynticks_curr_cpu_in_eqs());
1097
1098
1099
1100
1101
1102 if (rdtp->dynticks_nmi_nesting != 1) {
1103 rdtp->dynticks_nmi_nesting -= 2;
1104 return;
1105 }
1106
1107
1108 rdtp->dynticks_nmi_nesting = 0;
1109 rcu_dynticks_eqs_enter();
1110}
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120bool notrace rcu_is_watching(void)
1121{
1122 bool ret;
1123
1124 preempt_disable_notrace();
1125 ret = !rcu_dynticks_curr_cpu_in_eqs();
1126 preempt_enable_notrace();
1127 return ret;
1128}
1129EXPORT_SYMBOL_GPL(rcu_is_watching);
1130
1131
1132
1133
1134
1135
1136
1137
1138void rcu_request_urgent_qs_task(struct task_struct *t)
1139{
1140 int cpu;
1141
1142 barrier();
1143 cpu = task_cpu(t);
1144 if (!task_curr(t))
1145 return;
1146 smp_store_release(per_cpu_ptr(&rcu_dynticks.rcu_urgent_qs, cpu), true);
1147}
1148
1149#if defined(CONFIG_PROVE_RCU) && defined(CONFIG_HOTPLUG_CPU)
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172bool rcu_lockdep_current_cpu_online(void)
1173{
1174 struct rcu_data *rdp;
1175 struct rcu_node *rnp;
1176 bool ret;
1177
1178 if (in_nmi())
1179 return true;
1180 preempt_disable();
1181 rdp = this_cpu_ptr(&rcu_sched_data);
1182 rnp = rdp->mynode;
1183 ret = (rdp->grpmask & rcu_rnp_online_cpus(rnp)) ||
1184 !rcu_scheduler_fully_active;
1185 preempt_enable();
1186 return ret;
1187}
1188EXPORT_SYMBOL_GPL(rcu_lockdep_current_cpu_online);
1189
1190#endif
1191
1192
1193
1194
1195
1196
1197
1198
1199static int rcu_is_cpu_rrupt_from_idle(void)
1200{
1201 return __this_cpu_read(rcu_dynticks.dynticks_nesting) <= 1;
1202}
1203
1204
1205
1206
1207
1208
1209static int dyntick_save_progress_counter(struct rcu_data *rdp)
1210{
1211 rdp->dynticks_snap = rcu_dynticks_snap(rdp->dynticks);
1212 if (rcu_dynticks_in_eqs(rdp->dynticks_snap)) {
1213 trace_rcu_fqs(rdp->rsp->name, rdp->gpnum, rdp->cpu, TPS("dti"));
1214 if (ULONG_CMP_LT(READ_ONCE(rdp->gpnum) + ULONG_MAX / 4,
1215 rdp->mynode->gpnum))
1216 WRITE_ONCE(rdp->gpwrap, true);
1217 return 1;
1218 }
1219 return 0;
1220}
1221
1222
1223
1224
1225
1226
1227
1228static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
1229{
1230 unsigned long jtsq;
1231 bool *rnhqp;
1232 bool *ruqp;
1233 unsigned long rjtsc;
1234 struct rcu_node *rnp;
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244 if (rcu_dynticks_in_eqs_since(rdp->dynticks, rdp->dynticks_snap)) {
1245 trace_rcu_fqs(rdp->rsp->name, rdp->gpnum, rdp->cpu, TPS("dti"));
1246 rdp->dynticks_fqs++;
1247 return 1;
1248 }
1249
1250
1251 jtsq = jiffies_till_sched_qs;
1252 rjtsc = rcu_jiffies_till_stall_check();
1253 if (jtsq > rjtsc / 2) {
1254 WRITE_ONCE(jiffies_till_sched_qs, rjtsc);
1255 jtsq = rjtsc / 2;
1256 } else if (jtsq < 1) {
1257 WRITE_ONCE(jiffies_till_sched_qs, 1);
1258 jtsq = 1;
1259 }
1260
1261
1262
1263
1264
1265
1266
1267 rnp = rdp->mynode;
1268 ruqp = per_cpu_ptr(&rcu_dynticks.rcu_urgent_qs, rdp->cpu);
1269 if (time_after(jiffies, rdp->rsp->gp_start + jtsq) &&
1270 READ_ONCE(rdp->rcu_qs_ctr_snap) != per_cpu(rcu_dynticks.rcu_qs_ctr, rdp->cpu) &&
1271 READ_ONCE(rdp->gpnum) == rnp->gpnum && !rdp->gpwrap) {
1272 trace_rcu_fqs(rdp->rsp->name, rdp->gpnum, rdp->cpu, TPS("rqc"));
1273 return 1;
1274 } else {
1275
1276 smp_store_release(ruqp, true);
1277 }
1278
1279
1280 if (!(rdp->grpmask & rcu_rnp_online_cpus(rnp))) {
1281 trace_rcu_fqs(rdp->rsp->name, rdp->gpnum, rdp->cpu, TPS("ofl"));
1282 rdp->offline_fqs++;
1283 return 1;
1284 }
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307 rnhqp = &per_cpu(rcu_dynticks.rcu_need_heavy_qs, rdp->cpu);
1308 if (!READ_ONCE(*rnhqp) &&
1309 (time_after(jiffies, rdp->rsp->gp_start + jtsq) ||
1310 time_after(jiffies, rdp->rsp->jiffies_resched))) {
1311 WRITE_ONCE(*rnhqp, true);
1312
1313 smp_store_release(ruqp, true);
1314 rdp->rsp->jiffies_resched += 5;
1315 }
1316
1317
1318
1319
1320
1321 if (jiffies - rdp->rsp->gp_start > rcu_jiffies_till_stall_check() / 2)
1322 resched_cpu(rdp->cpu);
1323
1324 return 0;
1325}
1326
1327static void record_gp_stall_check_time(struct rcu_state *rsp)
1328{
1329 unsigned long j = jiffies;
1330 unsigned long j1;
1331
1332 rsp->gp_start = j;
1333 smp_wmb();
1334 j1 = rcu_jiffies_till_stall_check();
1335 WRITE_ONCE(rsp->jiffies_stall, j + j1);
1336 rsp->jiffies_resched = j + j1 / 2;
1337 rsp->n_force_qs_gpstart = READ_ONCE(rsp->n_force_qs);
1338}
1339
1340
1341
1342
1343static const char *gp_state_getname(short gs)
1344{
1345 if (gs < 0 || gs >= ARRAY_SIZE(gp_state_names))
1346 return "???";
1347 return gp_state_names[gs];
1348}
1349
1350
1351
1352
1353static void rcu_check_gp_kthread_starvation(struct rcu_state *rsp)
1354{
1355 unsigned long gpa;
1356 unsigned long j;
1357
1358 j = jiffies;
1359 gpa = READ_ONCE(rsp->gp_activity);
1360 if (j - gpa > 2 * HZ) {
1361 pr_err("%s kthread starved for %ld jiffies! g%lu c%lu f%#x %s(%d) ->state=%#lx\n",
1362 rsp->name, j - gpa,
1363 rsp->gpnum, rsp->completed,
1364 rsp->gp_flags,
1365 gp_state_getname(rsp->gp_state), rsp->gp_state,
1366 rsp->gp_kthread ? rsp->gp_kthread->state : ~0);
1367 if (rsp->gp_kthread) {
1368 sched_show_task(rsp->gp_kthread);
1369 wake_up_process(rsp->gp_kthread);
1370 }
1371 }
1372}
1373
1374
1375
1376
1377
1378
1379
1380static void rcu_dump_cpu_stacks(struct rcu_state *rsp)
1381{
1382 int cpu;
1383 unsigned long flags;
1384 struct rcu_node *rnp;
1385
1386 rcu_for_each_leaf_node(rsp, rnp) {
1387 raw_spin_lock_irqsave_rcu_node(rnp, flags);
1388 for_each_leaf_node_possible_cpu(rnp, cpu)
1389 if (rnp->qsmask & leaf_node_cpu_bit(rnp, cpu))
1390 if (!trigger_single_cpu_backtrace(cpu))
1391 dump_cpu_task(cpu);
1392 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
1393 }
1394}
1395
1396
1397
1398
1399
1400static void rcu_stall_kick_kthreads(struct rcu_state *rsp)
1401{
1402 unsigned long j;
1403
1404 if (!rcu_kick_kthreads)
1405 return;
1406 j = READ_ONCE(rsp->jiffies_kick_kthreads);
1407 if (time_after(jiffies, j) && rsp->gp_kthread &&
1408 (rcu_gp_in_progress(rsp) || READ_ONCE(rsp->gp_flags))) {
1409 WARN_ONCE(1, "Kicking %s grace-period kthread\n", rsp->name);
1410 rcu_ftrace_dump(DUMP_ALL);
1411 wake_up_process(rsp->gp_kthread);
1412 WRITE_ONCE(rsp->jiffies_kick_kthreads, j + HZ);
1413 }
1414}
1415
1416static inline void panic_on_rcu_stall(void)
1417{
1418 if (sysctl_panic_on_rcu_stall)
1419 panic("RCU Stall\n");
1420}
1421
1422static void print_other_cpu_stall(struct rcu_state *rsp, unsigned long gpnum)
1423{
1424 int cpu;
1425 long delta;
1426 unsigned long flags;
1427 unsigned long gpa;
1428 unsigned long j;
1429 int ndetected = 0;
1430 struct rcu_node *rnp = rcu_get_root(rsp);
1431 long totqlen = 0;
1432
1433
1434 rcu_stall_kick_kthreads(rsp);
1435 if (rcu_cpu_stall_suppress)
1436 return;
1437
1438
1439
1440 raw_spin_lock_irqsave_rcu_node(rnp, flags);
1441 delta = jiffies - READ_ONCE(rsp->jiffies_stall);
1442 if (delta < RCU_STALL_RAT_DELAY || !rcu_gp_in_progress(rsp)) {
1443 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
1444 return;
1445 }
1446 WRITE_ONCE(rsp->jiffies_stall,
1447 jiffies + 3 * rcu_jiffies_till_stall_check() + 3);
1448 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
1449
1450
1451
1452
1453
1454
1455 pr_err("INFO: %s detected stalls on CPUs/tasks:",
1456 rsp->name);
1457 print_cpu_stall_info_begin();
1458 rcu_for_each_leaf_node(rsp, rnp) {
1459 raw_spin_lock_irqsave_rcu_node(rnp, flags);
1460 ndetected += rcu_print_task_stall(rnp);
1461 if (rnp->qsmask != 0) {
1462 for_each_leaf_node_possible_cpu(rnp, cpu)
1463 if (rnp->qsmask & leaf_node_cpu_bit(rnp, cpu)) {
1464 print_cpu_stall_info(rsp, cpu);
1465 ndetected++;
1466 }
1467 }
1468 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
1469 }
1470
1471 print_cpu_stall_info_end();
1472 for_each_possible_cpu(cpu)
1473 totqlen += rcu_segcblist_n_cbs(&per_cpu_ptr(rsp->rda,
1474 cpu)->cblist);
1475 pr_cont("(detected by %d, t=%ld jiffies, g=%ld, c=%ld, q=%lu)\n",
1476 smp_processor_id(), (long)(jiffies - rsp->gp_start),
1477 (long)rsp->gpnum, (long)rsp->completed, totqlen);
1478 if (ndetected) {
1479 rcu_dump_cpu_stacks(rsp);
1480
1481
1482 rcu_print_detail_task_stall(rsp);
1483 } else {
1484 if (READ_ONCE(rsp->gpnum) != gpnum ||
1485 READ_ONCE(rsp->completed) == gpnum) {
1486 pr_err("INFO: Stall ended before state dump start\n");
1487 } else {
1488 j = jiffies;
1489 gpa = READ_ONCE(rsp->gp_activity);
1490 pr_err("All QSes seen, last %s kthread activity %ld (%ld-%ld), jiffies_till_next_fqs=%ld, root ->qsmask %#lx\n",
1491 rsp->name, j - gpa, j, gpa,
1492 jiffies_till_next_fqs,
1493 rcu_get_root(rsp)->qsmask);
1494
1495 sched_show_task(current);
1496 }
1497 }
1498
1499 rcu_check_gp_kthread_starvation(rsp);
1500
1501 panic_on_rcu_stall();
1502
1503 force_quiescent_state(rsp);
1504}
1505
1506static void print_cpu_stall(struct rcu_state *rsp)
1507{
1508 int cpu;
1509 unsigned long flags;
1510 struct rcu_node *rnp = rcu_get_root(rsp);
1511 long totqlen = 0;
1512
1513
1514 rcu_stall_kick_kthreads(rsp);
1515 if (rcu_cpu_stall_suppress)
1516 return;
1517
1518
1519
1520
1521
1522
1523 pr_err("INFO: %s self-detected stall on CPU", rsp->name);
1524 print_cpu_stall_info_begin();
1525 print_cpu_stall_info(rsp, smp_processor_id());
1526 print_cpu_stall_info_end();
1527 for_each_possible_cpu(cpu)
1528 totqlen += rcu_segcblist_n_cbs(&per_cpu_ptr(rsp->rda,
1529 cpu)->cblist);
1530 pr_cont(" (t=%lu jiffies g=%ld c=%ld q=%lu)\n",
1531 jiffies - rsp->gp_start,
1532 (long)rsp->gpnum, (long)rsp->completed, totqlen);
1533
1534 rcu_check_gp_kthread_starvation(rsp);
1535
1536 rcu_dump_cpu_stacks(rsp);
1537
1538 raw_spin_lock_irqsave_rcu_node(rnp, flags);
1539 if (ULONG_CMP_GE(jiffies, READ_ONCE(rsp->jiffies_stall)))
1540 WRITE_ONCE(rsp->jiffies_stall,
1541 jiffies + 3 * rcu_jiffies_till_stall_check() + 3);
1542 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
1543
1544 panic_on_rcu_stall();
1545
1546
1547
1548
1549
1550
1551
1552
1553 resched_cpu(smp_processor_id());
1554}
1555
1556static void check_cpu_stall(struct rcu_state *rsp, struct rcu_data *rdp)
1557{
1558 unsigned long completed;
1559 unsigned long gpnum;
1560 unsigned long gps;
1561 unsigned long j;
1562 unsigned long js;
1563 struct rcu_node *rnp;
1564
1565 if ((rcu_cpu_stall_suppress && !rcu_kick_kthreads) ||
1566 !rcu_gp_in_progress(rsp))
1567 return;
1568 rcu_stall_kick_kthreads(rsp);
1569 j = jiffies;
1570
1571
1572
1573
1574
1575
1576
1577
1578
1579
1580
1581
1582
1583
1584
1585
1586
1587
1588 gpnum = READ_ONCE(rsp->gpnum);
1589 smp_rmb();
1590 js = READ_ONCE(rsp->jiffies_stall);
1591 smp_rmb();
1592 gps = READ_ONCE(rsp->gp_start);
1593 smp_rmb();
1594 completed = READ_ONCE(rsp->completed);
1595 if (ULONG_CMP_GE(completed, gpnum) ||
1596 ULONG_CMP_LT(j, js) ||
1597 ULONG_CMP_GE(gps, js))
1598 return;
1599 rnp = rdp->mynode;
1600 if (rcu_gp_in_progress(rsp) &&
1601 (READ_ONCE(rnp->qsmask) & rdp->grpmask)) {
1602
1603
1604 print_cpu_stall(rsp);
1605
1606 } else if (rcu_gp_in_progress(rsp) &&
1607 ULONG_CMP_GE(j, js + RCU_STALL_RAT_DELAY)) {
1608
1609
1610 print_other_cpu_stall(rsp, gpnum);
1611 }
1612}
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623void rcu_cpu_stall_reset(void)
1624{
1625 struct rcu_state *rsp;
1626
1627 for_each_rcu_flavor(rsp)
1628 WRITE_ONCE(rsp->jiffies_stall, jiffies + ULONG_MAX / 2);
1629}
1630
1631
1632
1633
1634
1635
1636
1637
1638
1639
1640static unsigned long rcu_cbs_completed(struct rcu_state *rsp,
1641 struct rcu_node *rnp)
1642{
1643 lockdep_assert_held(&rnp->lock);
1644
1645
1646
1647
1648
1649
1650
1651
1652 if (rcu_get_root(rsp) == rnp && rnp->gpnum == rnp->completed)
1653 return rnp->completed + 1;
1654
1655
1656
1657
1658
1659 return rnp->completed + 2;
1660}
1661
1662
1663
1664
1665
1666static void trace_rcu_future_gp(struct rcu_node *rnp, struct rcu_data *rdp,
1667 unsigned long c, const char *s)
1668{
1669 trace_rcu_future_grace_period(rdp->rsp->name, rnp->gpnum,
1670 rnp->completed, c, rnp->level,
1671 rnp->grplo, rnp->grphi, s);
1672}
1673
1674
1675
1676
1677
1678
1679
1680
1681
1682static bool __maybe_unused
1683rcu_start_future_gp(struct rcu_node *rnp, struct rcu_data *rdp,
1684 unsigned long *c_out)
1685{
1686 unsigned long c;
1687 bool ret = false;
1688 struct rcu_node *rnp_root = rcu_get_root(rdp->rsp);
1689
1690 lockdep_assert_held(&rnp->lock);
1691
1692
1693
1694
1695
1696 c = rcu_cbs_completed(rdp->rsp, rnp);
1697 trace_rcu_future_gp(rnp, rdp, c, TPS("Startleaf"));
1698 if (rnp->need_future_gp[c & 0x1]) {
1699 trace_rcu_future_gp(rnp, rdp, c, TPS("Prestartleaf"));
1700 goto out;
1701 }
1702
1703
1704
1705
1706
1707
1708
1709
1710
1711
1712
1713
1714
1715
1716 if (rnp->gpnum != rnp->completed ||
1717 READ_ONCE(rnp_root->gpnum) != READ_ONCE(rnp_root->completed)) {
1718 rnp->need_future_gp[c & 0x1]++;
1719 trace_rcu_future_gp(rnp, rdp, c, TPS("Startedleaf"));
1720 goto out;
1721 }
1722
1723
1724
1725
1726
1727
1728 if (rnp != rnp_root)
1729 raw_spin_lock_rcu_node(rnp_root);
1730
1731
1732
1733
1734
1735
1736 c = rcu_cbs_completed(rdp->rsp, rnp_root);
1737 if (!rcu_is_nocb_cpu(rdp->cpu))
1738 (void)rcu_segcblist_accelerate(&rdp->cblist, c);
1739
1740
1741
1742
1743
1744 if (rnp_root->need_future_gp[c & 0x1]) {
1745 trace_rcu_future_gp(rnp, rdp, c, TPS("Prestartedroot"));
1746 goto unlock_out;
1747 }
1748
1749
1750 rnp_root->need_future_gp[c & 0x1]++;
1751
1752
1753 if (rnp_root->gpnum != rnp_root->completed) {
1754 trace_rcu_future_gp(rnp, rdp, c, TPS("Startedleafroot"));
1755 } else {
1756 trace_rcu_future_gp(rnp, rdp, c, TPS("Startedroot"));
1757 ret = rcu_start_gp_advanced(rdp->rsp, rnp_root, rdp);
1758 }
1759unlock_out:
1760 if (rnp != rnp_root)
1761 raw_spin_unlock_rcu_node(rnp_root);
1762out:
1763 if (c_out != NULL)
1764 *c_out = c;
1765 return ret;
1766}
1767
1768
1769
1770
1771
1772static int rcu_future_gp_cleanup(struct rcu_state *rsp, struct rcu_node *rnp)
1773{
1774 int c = rnp->completed;
1775 int needmore;
1776 struct rcu_data *rdp = this_cpu_ptr(rsp->rda);
1777
1778 rnp->need_future_gp[c & 0x1] = 0;
1779 needmore = rnp->need_future_gp[(c + 1) & 0x1];
1780 trace_rcu_future_gp(rnp, rdp, c,
1781 needmore ? TPS("CleanupMore") : TPS("Cleanup"));
1782 return needmore;
1783}
1784
1785
1786
1787
1788
1789
1790
1791
1792static void rcu_gp_kthread_wake(struct rcu_state *rsp)
1793{
1794 if (current == rsp->gp_kthread ||
1795 !READ_ONCE(rsp->gp_flags) ||
1796 !rsp->gp_kthread)
1797 return;
1798 swake_up(&rsp->gp_wq);
1799}
1800
1801
1802
1803
1804
1805
1806
1807
1808
1809
1810
1811
1812
1813static bool rcu_accelerate_cbs(struct rcu_state *rsp, struct rcu_node *rnp,
1814 struct rcu_data *rdp)
1815{
1816 bool ret = false;
1817
1818 lockdep_assert_held(&rnp->lock);
1819
1820
1821 if (!rcu_segcblist_pend_cbs(&rdp->cblist))
1822 return false;
1823
1824
1825
1826
1827
1828
1829
1830
1831
1832
1833
1834 if (rcu_segcblist_accelerate(&rdp->cblist, rcu_cbs_completed(rsp, rnp)))
1835 ret = rcu_start_future_gp(rnp, rdp, NULL);
1836
1837
1838 if (rcu_segcblist_restempty(&rdp->cblist, RCU_WAIT_TAIL))
1839 trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("AccWaitCB"));
1840 else
1841 trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("AccReadyCB"));
1842 return ret;
1843}
1844
1845
1846
1847
1848
1849
1850
1851
1852
1853
1854
1855static bool rcu_advance_cbs(struct rcu_state *rsp, struct rcu_node *rnp,
1856 struct rcu_data *rdp)
1857{
1858 lockdep_assert_held(&rnp->lock);
1859
1860
1861 if (!rcu_segcblist_pend_cbs(&rdp->cblist))
1862 return false;
1863
1864
1865
1866
1867
1868 rcu_segcblist_advance(&rdp->cblist, rnp->completed);
1869
1870
1871 return rcu_accelerate_cbs(rsp, rnp, rdp);
1872}
1873
1874
1875
1876
1877
1878
1879
1880static bool __note_gp_changes(struct rcu_state *rsp, struct rcu_node *rnp,
1881 struct rcu_data *rdp)
1882{
1883 bool ret;
1884 bool need_gp;
1885
1886 lockdep_assert_held(&rnp->lock);
1887
1888
1889 if (rdp->completed == rnp->completed &&
1890 !unlikely(READ_ONCE(rdp->gpwrap))) {
1891
1892
1893 ret = rcu_accelerate_cbs(rsp, rnp, rdp);
1894
1895 } else {
1896
1897
1898 ret = rcu_advance_cbs(rsp, rnp, rdp);
1899
1900
1901 rdp->completed = rnp->completed;
1902 trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("cpuend"));
1903 }
1904
1905 if (rdp->gpnum != rnp->gpnum || unlikely(READ_ONCE(rdp->gpwrap))) {
1906
1907
1908
1909
1910
1911 rdp->gpnum = rnp->gpnum;
1912 trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("cpustart"));
1913 need_gp = !!(rnp->qsmask & rdp->grpmask);
1914 rdp->cpu_no_qs.b.norm = need_gp;
1915 rdp->rcu_qs_ctr_snap = __this_cpu_read(rcu_dynticks.rcu_qs_ctr);
1916 rdp->core_needs_qs = need_gp;
1917 zero_cpu_stall_ticks(rdp);
1918 WRITE_ONCE(rdp->gpwrap, false);
1919 }
1920 return ret;
1921}
1922
1923static void note_gp_changes(struct rcu_state *rsp, struct rcu_data *rdp)
1924{
1925 unsigned long flags;
1926 bool needwake;
1927 struct rcu_node *rnp;
1928
1929 local_irq_save(flags);
1930 rnp = rdp->mynode;
1931 if ((rdp->gpnum == READ_ONCE(rnp->gpnum) &&
1932 rdp->completed == READ_ONCE(rnp->completed) &&
1933 !unlikely(READ_ONCE(rdp->gpwrap))) ||
1934 !raw_spin_trylock_rcu_node(rnp)) {
1935 local_irq_restore(flags);
1936 return;
1937 }
1938 needwake = __note_gp_changes(rsp, rnp, rdp);
1939 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
1940 if (needwake)
1941 rcu_gp_kthread_wake(rsp);
1942}
1943
1944static void rcu_gp_slow(struct rcu_state *rsp, int delay)
1945{
1946 if (delay > 0 &&
1947 !(rsp->gpnum % (rcu_num_nodes * PER_RCU_NODE_PERIOD * delay)))
1948 schedule_timeout_uninterruptible(delay);
1949}
1950
1951
1952
1953
1954static bool rcu_gp_init(struct rcu_state *rsp)
1955{
1956 unsigned long oldmask;
1957 struct rcu_data *rdp;
1958 struct rcu_node *rnp = rcu_get_root(rsp);
1959
1960 WRITE_ONCE(rsp->gp_activity, jiffies);
1961 raw_spin_lock_irq_rcu_node(rnp);
1962 if (!READ_ONCE(rsp->gp_flags)) {
1963
1964 raw_spin_unlock_irq_rcu_node(rnp);
1965 return false;
1966 }
1967 WRITE_ONCE(rsp->gp_flags, 0);
1968
1969 if (WARN_ON_ONCE(rcu_gp_in_progress(rsp))) {
1970
1971
1972
1973
1974 raw_spin_unlock_irq_rcu_node(rnp);
1975 return false;
1976 }
1977
1978
1979 record_gp_stall_check_time(rsp);
1980
1981 smp_store_release(&rsp->gpnum, rsp->gpnum + 1);
1982 trace_rcu_grace_period(rsp->name, rsp->gpnum, TPS("start"));
1983 raw_spin_unlock_irq_rcu_node(rnp);
1984
1985
1986
1987
1988
1989
1990
1991 rcu_for_each_leaf_node(rsp, rnp) {
1992 rcu_gp_slow(rsp, gp_preinit_delay);
1993 raw_spin_lock_irq_rcu_node(rnp);
1994 if (rnp->qsmaskinit == rnp->qsmaskinitnext &&
1995 !rnp->wait_blkd_tasks) {
1996
1997 raw_spin_unlock_irq_rcu_node(rnp);
1998 continue;
1999 }
2000
2001
2002 oldmask = rnp->qsmaskinit;
2003 rnp->qsmaskinit = rnp->qsmaskinitnext;
2004
2005
2006 if (!oldmask != !rnp->qsmaskinit) {
2007 if (!oldmask)
2008 rcu_init_new_rnp(rnp);
2009 else if (rcu_preempt_has_tasks(rnp))
2010 rnp->wait_blkd_tasks = true;
2011 else
2012 rcu_cleanup_dead_rnp(rnp);
2013 }
2014
2015
2016
2017
2018
2019
2020
2021
2022
2023
2024 if (rnp->wait_blkd_tasks &&
2025 (!rcu_preempt_has_tasks(rnp) ||
2026 rnp->qsmaskinit)) {
2027 rnp->wait_blkd_tasks = false;
2028 rcu_cleanup_dead_rnp(rnp);
2029 }
2030
2031 raw_spin_unlock_irq_rcu_node(rnp);
2032 }
2033
2034
2035
2036
2037
2038
2039
2040
2041
2042
2043
2044
2045
2046 rcu_for_each_node_breadth_first(rsp, rnp) {
2047 rcu_gp_slow(rsp, gp_init_delay);
2048 raw_spin_lock_irq_rcu_node(rnp);
2049 rdp = this_cpu_ptr(rsp->rda);
2050 rcu_preempt_check_blocked_tasks(rnp);
2051 rnp->qsmask = rnp->qsmaskinit;
2052 WRITE_ONCE(rnp->gpnum, rsp->gpnum);
2053 if (WARN_ON_ONCE(rnp->completed != rsp->completed))
2054 WRITE_ONCE(rnp->completed, rsp->completed);
2055 if (rnp == rdp->mynode)
2056 (void)__note_gp_changes(rsp, rnp, rdp);
2057 rcu_preempt_boost_start_gp(rnp);
2058 trace_rcu_grace_period_init(rsp->name, rnp->gpnum,
2059 rnp->level, rnp->grplo,
2060 rnp->grphi, rnp->qsmask);
2061 raw_spin_unlock_irq_rcu_node(rnp);
2062 cond_resched_rcu_qs();
2063 WRITE_ONCE(rsp->gp_activity, jiffies);
2064 }
2065
2066 return true;
2067}
2068
2069
2070
2071
2072
2073static bool rcu_gp_fqs_check_wake(struct rcu_state *rsp, int *gfp)
2074{
2075 struct rcu_node *rnp = rcu_get_root(rsp);
2076
2077
2078 *gfp = READ_ONCE(rsp->gp_flags);
2079 if (*gfp & RCU_GP_FLAG_FQS)
2080 return true;
2081
2082
2083 if (!READ_ONCE(rnp->qsmask) && !rcu_preempt_blocked_readers_cgp(rnp))
2084 return true;
2085
2086 return false;
2087}
2088
2089
2090
2091
2092static void rcu_gp_fqs(struct rcu_state *rsp, bool first_time)
2093{
2094 struct rcu_node *rnp = rcu_get_root(rsp);
2095
2096 WRITE_ONCE(rsp->gp_activity, jiffies);
2097 rsp->n_force_qs++;
2098 if (first_time) {
2099
2100 force_qs_rnp(rsp, dyntick_save_progress_counter);
2101 } else {
2102
2103 force_qs_rnp(rsp, rcu_implicit_dynticks_qs);
2104 }
2105
2106 if (READ_ONCE(rsp->gp_flags) & RCU_GP_FLAG_FQS) {
2107 raw_spin_lock_irq_rcu_node(rnp);
2108 WRITE_ONCE(rsp->gp_flags,
2109 READ_ONCE(rsp->gp_flags) & ~RCU_GP_FLAG_FQS);
2110 raw_spin_unlock_irq_rcu_node(rnp);
2111 }
2112}
2113
2114
2115
2116
2117static void rcu_gp_cleanup(struct rcu_state *rsp)
2118{
2119 unsigned long gp_duration;
2120 bool needgp = false;
2121 int nocb = 0;
2122 struct rcu_data *rdp;
2123 struct rcu_node *rnp = rcu_get_root(rsp);
2124 struct swait_queue_head *sq;
2125
2126 WRITE_ONCE(rsp->gp_activity, jiffies);
2127 raw_spin_lock_irq_rcu_node(rnp);
2128 gp_duration = jiffies - rsp->gp_start;
2129 if (gp_duration > rsp->gp_max)
2130 rsp->gp_max = gp_duration;
2131
2132
2133
2134
2135
2136
2137
2138
2139
2140 raw_spin_unlock_irq_rcu_node(rnp);
2141
2142
2143
2144
2145
2146
2147
2148
2149
2150
2151 rcu_for_each_node_breadth_first(rsp, rnp) {
2152 raw_spin_lock_irq_rcu_node(rnp);
2153 WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp));
2154 WARN_ON_ONCE(rnp->qsmask);
2155 WRITE_ONCE(rnp->completed, rsp->gpnum);
2156 rdp = this_cpu_ptr(rsp->rda);
2157 if (rnp == rdp->mynode)
2158 needgp = __note_gp_changes(rsp, rnp, rdp) || needgp;
2159
2160 nocb += rcu_future_gp_cleanup(rsp, rnp);
2161 sq = rcu_nocb_gp_get(rnp);
2162 raw_spin_unlock_irq_rcu_node(rnp);
2163 rcu_nocb_gp_cleanup(sq);
2164 cond_resched_rcu_qs();
2165 WRITE_ONCE(rsp->gp_activity, jiffies);
2166 rcu_gp_slow(rsp, gp_cleanup_delay);
2167 }
2168 rnp = rcu_get_root(rsp);
2169 raw_spin_lock_irq_rcu_node(rnp);
2170 rcu_nocb_gp_set(rnp, nocb);
2171
2172
2173 WRITE_ONCE(rsp->completed, rsp->gpnum);
2174 trace_rcu_grace_period(rsp->name, rsp->completed, TPS("end"));
2175 rsp->gp_state = RCU_GP_IDLE;
2176 rdp = this_cpu_ptr(rsp->rda);
2177
2178 needgp = rcu_advance_cbs(rsp, rnp, rdp) || needgp;
2179 if (needgp || cpu_needs_another_gp(rsp, rdp)) {
2180 WRITE_ONCE(rsp->gp_flags, RCU_GP_FLAG_INIT);
2181 trace_rcu_grace_period(rsp->name,
2182 READ_ONCE(rsp->gpnum),
2183 TPS("newreq"));
2184 }
2185 raw_spin_unlock_irq_rcu_node(rnp);
2186}
2187
2188
2189
2190
2191static int __noreturn rcu_gp_kthread(void *arg)
2192{
2193 bool first_gp_fqs;
2194 int gf;
2195 unsigned long j;
2196 int ret;
2197 struct rcu_state *rsp = arg;
2198 struct rcu_node *rnp = rcu_get_root(rsp);
2199
2200 rcu_bind_gp_kthread();
2201 for (;;) {
2202
2203
2204 for (;;) {
2205 trace_rcu_grace_period(rsp->name,
2206 READ_ONCE(rsp->gpnum),
2207 TPS("reqwait"));
2208 rsp->gp_state = RCU_GP_WAIT_GPS;
2209 swait_event_interruptible(rsp->gp_wq,
2210 READ_ONCE(rsp->gp_flags) &
2211 RCU_GP_FLAG_INIT);
2212 rsp->gp_state = RCU_GP_DONE_GPS;
2213
2214 if (rcu_gp_init(rsp))
2215 break;
2216 cond_resched_rcu_qs();
2217 WRITE_ONCE(rsp->gp_activity, jiffies);
2218 WARN_ON(signal_pending(current));
2219 trace_rcu_grace_period(rsp->name,
2220 READ_ONCE(rsp->gpnum),
2221 TPS("reqwaitsig"));
2222 }
2223
2224
2225 first_gp_fqs = true;
2226 j = jiffies_till_first_fqs;
2227 if (j > HZ) {
2228 j = HZ;
2229 jiffies_till_first_fqs = HZ;
2230 }
2231 ret = 0;
2232 for (;;) {
2233 if (!ret) {
2234 rsp->jiffies_force_qs = jiffies + j;
2235 WRITE_ONCE(rsp->jiffies_kick_kthreads,
2236 jiffies + 3 * j);
2237 }
2238 trace_rcu_grace_period(rsp->name,
2239 READ_ONCE(rsp->gpnum),
2240 TPS("fqswait"));
2241 rsp->gp_state = RCU_GP_WAIT_FQS;
2242 ret = swait_event_interruptible_timeout(rsp->gp_wq,
2243 rcu_gp_fqs_check_wake(rsp, &gf), j);
2244 rsp->gp_state = RCU_GP_DOING_FQS;
2245
2246
2247 if (!READ_ONCE(rnp->qsmask) &&
2248 !rcu_preempt_blocked_readers_cgp(rnp))
2249 break;
2250
2251 if (ULONG_CMP_GE(jiffies, rsp->jiffies_force_qs) ||
2252 (gf & RCU_GP_FLAG_FQS)) {
2253 trace_rcu_grace_period(rsp->name,
2254 READ_ONCE(rsp->gpnum),
2255 TPS("fqsstart"));
2256 rcu_gp_fqs(rsp, first_gp_fqs);
2257 first_gp_fqs = false;
2258 trace_rcu_grace_period(rsp->name,
2259 READ_ONCE(rsp->gpnum),
2260 TPS("fqsend"));
2261 cond_resched_rcu_qs();
2262 WRITE_ONCE(rsp->gp_activity, jiffies);
2263 ret = 0;
2264 j = jiffies_till_next_fqs;
2265 if (j > HZ) {
2266 j = HZ;
2267 jiffies_till_next_fqs = HZ;
2268 } else if (j < 1) {
2269 j = 1;
2270 jiffies_till_next_fqs = 1;
2271 }
2272 } else {
2273
2274 cond_resched_rcu_qs();
2275 WRITE_ONCE(rsp->gp_activity, jiffies);
2276 WARN_ON(signal_pending(current));
2277 trace_rcu_grace_period(rsp->name,
2278 READ_ONCE(rsp->gpnum),
2279 TPS("fqswaitsig"));
2280 ret = 1;
2281 j = jiffies;
2282 if (time_after(jiffies, rsp->jiffies_force_qs))
2283 j = 1;
2284 else
2285 j = rsp->jiffies_force_qs - j;
2286 }
2287 }
2288
2289
2290 rsp->gp_state = RCU_GP_CLEANUP;
2291 rcu_gp_cleanup(rsp);
2292 rsp->gp_state = RCU_GP_CLEANED;
2293 }
2294}
2295
2296
2297
2298
2299
2300
2301
2302
2303
2304
2305
2306
2307static bool
2308rcu_start_gp_advanced(struct rcu_state *rsp, struct rcu_node *rnp,
2309 struct rcu_data *rdp)
2310{
2311 lockdep_assert_held(&rnp->lock);
2312 if (!rsp->gp_kthread || !cpu_needs_another_gp(rsp, rdp)) {
2313
2314
2315
2316
2317
2318
2319 return false;
2320 }
2321 WRITE_ONCE(rsp->gp_flags, RCU_GP_FLAG_INIT);
2322 trace_rcu_grace_period(rsp->name, READ_ONCE(rsp->gpnum),
2323 TPS("newreq"));
2324
2325
2326
2327
2328
2329
2330 return true;
2331}
2332
2333
2334
2335
2336
2337
2338
2339
2340
2341
2342static bool rcu_start_gp(struct rcu_state *rsp)
2343{
2344 struct rcu_data *rdp = this_cpu_ptr(rsp->rda);
2345 struct rcu_node *rnp = rcu_get_root(rsp);
2346 bool ret = false;
2347
2348
2349
2350
2351
2352
2353
2354
2355
2356 ret = rcu_advance_cbs(rsp, rnp, rdp) || ret;
2357 ret = rcu_start_gp_advanced(rsp, rnp, rdp) || ret;
2358 return ret;
2359}
2360
2361
2362
2363
2364
2365
2366
2367
2368
2369
2370static void rcu_report_qs_rsp(struct rcu_state *rsp, unsigned long flags)
2371 __releases(rcu_get_root(rsp)->lock)
2372{
2373 lockdep_assert_held(&rcu_get_root(rsp)->lock);
2374 WARN_ON_ONCE(!rcu_gp_in_progress(rsp));
2375 WRITE_ONCE(rsp->gp_flags, READ_ONCE(rsp->gp_flags) | RCU_GP_FLAG_FQS);
2376 raw_spin_unlock_irqrestore_rcu_node(rcu_get_root(rsp), flags);
2377 rcu_gp_kthread_wake(rsp);
2378}
2379
2380
2381
2382
2383
2384
2385
2386
2387
2388
2389
2390static void
2391rcu_report_qs_rnp(unsigned long mask, struct rcu_state *rsp,
2392 struct rcu_node *rnp, unsigned long gps, unsigned long flags)
2393 __releases(rnp->lock)
2394{
2395 unsigned long oldmask = 0;
2396 struct rcu_node *rnp_c;
2397
2398 lockdep_assert_held(&rnp->lock);
2399
2400
2401 for (;;) {
2402 if (!(rnp->qsmask & mask) || rnp->gpnum != gps) {
2403
2404
2405
2406
2407
2408 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
2409 return;
2410 }
2411 WARN_ON_ONCE(oldmask);
2412 rnp->qsmask &= ~mask;
2413 trace_rcu_quiescent_state_report(rsp->name, rnp->gpnum,
2414 mask, rnp->qsmask, rnp->level,
2415 rnp->grplo, rnp->grphi,
2416 !!rnp->gp_tasks);
2417 if (rnp->qsmask != 0 || rcu_preempt_blocked_readers_cgp(rnp)) {
2418
2419
2420 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
2421 return;
2422 }
2423 mask = rnp->grpmask;
2424 if (rnp->parent == NULL) {
2425
2426
2427
2428 break;
2429 }
2430 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
2431 rnp_c = rnp;
2432 rnp = rnp->parent;
2433 raw_spin_lock_irqsave_rcu_node(rnp, flags);
2434 oldmask = rnp_c->qsmask;
2435 }
2436
2437
2438
2439
2440
2441
2442 rcu_report_qs_rsp(rsp, flags);
2443}
2444
2445
2446
2447
2448
2449
2450
2451
2452static void rcu_report_unblock_qs_rnp(struct rcu_state *rsp,
2453 struct rcu_node *rnp, unsigned long flags)
2454 __releases(rnp->lock)
2455{
2456 unsigned long gps;
2457 unsigned long mask;
2458 struct rcu_node *rnp_p;
2459
2460 lockdep_assert_held(&rnp->lock);
2461 if (rcu_state_p == &rcu_sched_state || rsp != rcu_state_p ||
2462 rnp->qsmask != 0 || rcu_preempt_blocked_readers_cgp(rnp)) {
2463 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
2464 return;
2465 }
2466
2467 rnp_p = rnp->parent;
2468 if (rnp_p == NULL) {
2469
2470
2471
2472
2473 rcu_report_qs_rsp(rsp, flags);
2474 return;
2475 }
2476
2477
2478 gps = rnp->gpnum;
2479 mask = rnp->grpmask;
2480 raw_spin_unlock_rcu_node(rnp);
2481 raw_spin_lock_rcu_node(rnp_p);
2482 rcu_report_qs_rnp(mask, rsp, rnp_p, gps, flags);
2483}
2484
2485
2486
2487
2488
2489static void
2490rcu_report_qs_rdp(int cpu, struct rcu_state *rsp, struct rcu_data *rdp)
2491{
2492 unsigned long flags;
2493 unsigned long mask;
2494 bool needwake;
2495 struct rcu_node *rnp;
2496
2497 rnp = rdp->mynode;
2498 raw_spin_lock_irqsave_rcu_node(rnp, flags);
2499 if (rdp->cpu_no_qs.b.norm || rdp->gpnum != rnp->gpnum ||
2500 rnp->completed == rnp->gpnum || rdp->gpwrap) {
2501
2502
2503
2504
2505
2506
2507
2508 rdp->cpu_no_qs.b.norm = true;
2509 rdp->rcu_qs_ctr_snap = __this_cpu_read(rcu_dynticks.rcu_qs_ctr);
2510 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
2511 return;
2512 }
2513 mask = rdp->grpmask;
2514 if ((rnp->qsmask & mask) == 0) {
2515 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
2516 } else {
2517 rdp->core_needs_qs = false;
2518
2519
2520
2521
2522
2523 needwake = rcu_accelerate_cbs(rsp, rnp, rdp);
2524
2525 rcu_report_qs_rnp(mask, rsp, rnp, rnp->gpnum, flags);
2526
2527 if (needwake)
2528 rcu_gp_kthread_wake(rsp);
2529 }
2530}
2531
2532
2533
2534
2535
2536
2537
2538static void
2539rcu_check_quiescent_state(struct rcu_state *rsp, struct rcu_data *rdp)
2540{
2541
2542 note_gp_changes(rsp, rdp);
2543
2544
2545
2546
2547
2548 if (!rdp->core_needs_qs)
2549 return;
2550
2551
2552
2553
2554
2555 if (rdp->cpu_no_qs.b.norm)
2556 return;
2557
2558
2559
2560
2561
2562 rcu_report_qs_rdp(rdp->cpu, rsp, rdp);
2563}
2564
2565
2566
2567
2568
2569
2570static void
2571rcu_send_cbs_to_orphanage(int cpu, struct rcu_state *rsp,
2572 struct rcu_node *rnp, struct rcu_data *rdp)
2573{
2574 lockdep_assert_held(&rsp->orphan_lock);
2575
2576
2577 if (!IS_ENABLED(CONFIG_HOTPLUG_CPU) || rcu_is_nocb_cpu(rdp->cpu))
2578 return;
2579
2580
2581
2582
2583
2584
2585 rdp->n_cbs_orphaned += rcu_segcblist_n_cbs(&rdp->cblist);
2586 rcu_segcblist_extract_count(&rdp->cblist, &rsp->orphan_done);
2587
2588
2589
2590
2591
2592
2593
2594
2595 rcu_segcblist_extract_pend_cbs(&rdp->cblist, &rsp->orphan_pend);
2596
2597
2598
2599
2600
2601
2602 rcu_segcblist_extract_done_cbs(&rdp->cblist, &rsp->orphan_done);
2603
2604
2605 rcu_segcblist_disable(&rdp->cblist);
2606}
2607
2608
2609
2610
2611
2612static void rcu_adopt_orphan_cbs(struct rcu_state *rsp, unsigned long flags)
2613{
2614 struct rcu_data *rdp = raw_cpu_ptr(rsp->rda);
2615
2616 lockdep_assert_held(&rsp->orphan_lock);
2617
2618
2619 if (!IS_ENABLED(CONFIG_HOTPLUG_CPU) ||
2620 rcu_nocb_adopt_orphan_cbs(rsp, rdp, flags))
2621 return;
2622
2623
2624 rdp->n_cbs_adopted += rsp->orphan_done.len;
2625 if (rsp->orphan_done.len_lazy != rsp->orphan_done.len)
2626 rcu_idle_count_callbacks_posted();
2627 rcu_segcblist_insert_count(&rdp->cblist, &rsp->orphan_done);
2628
2629
2630
2631
2632
2633
2634
2635
2636 rcu_segcblist_insert_done_cbs(&rdp->cblist, &rsp->orphan_done);
2637 WARN_ON_ONCE(rsp->orphan_done.head);
2638 rcu_segcblist_insert_pend_cbs(&rdp->cblist, &rsp->orphan_pend);
2639 WARN_ON_ONCE(rsp->orphan_pend.head);
2640 WARN_ON_ONCE(rcu_segcblist_empty(&rdp->cblist) !=
2641 !rcu_segcblist_n_cbs(&rdp->cblist));
2642}
2643
2644
2645
2646
2647static void rcu_cleanup_dying_cpu(struct rcu_state *rsp)
2648{
2649 RCU_TRACE(unsigned long mask;)
2650 RCU_TRACE(struct rcu_data *rdp = this_cpu_ptr(rsp->rda);)
2651 RCU_TRACE(struct rcu_node *rnp = rdp->mynode;)
2652
2653 if (!IS_ENABLED(CONFIG_HOTPLUG_CPU))
2654 return;
2655
2656 RCU_TRACE(mask = rdp->grpmask;)
2657 trace_rcu_grace_period(rsp->name,
2658 rnp->gpnum + 1 - !!(rnp->qsmask & mask),
2659 TPS("cpuofl"));
2660}
2661
2662
2663
2664
2665
2666
2667
2668
2669
2670
2671
2672
2673
2674
2675
2676
2677
2678
2679static void rcu_cleanup_dead_rnp(struct rcu_node *rnp_leaf)
2680{
2681 long mask;
2682 struct rcu_node *rnp = rnp_leaf;
2683
2684 lockdep_assert_held(&rnp->lock);
2685 if (!IS_ENABLED(CONFIG_HOTPLUG_CPU) ||
2686 rnp->qsmaskinit || rcu_preempt_has_tasks(rnp))
2687 return;
2688 for (;;) {
2689 mask = rnp->grpmask;
2690 rnp = rnp->parent;
2691 if (!rnp)
2692 break;
2693 raw_spin_lock_rcu_node(rnp);
2694 rnp->qsmaskinit &= ~mask;
2695 rnp->qsmask &= ~mask;
2696 if (rnp->qsmaskinit) {
2697 raw_spin_unlock_rcu_node(rnp);
2698
2699 return;
2700 }
2701 raw_spin_unlock_rcu_node(rnp);
2702 }
2703}
2704
2705
2706
2707
2708
2709
2710
2711
2712static void rcu_cleanup_dead_cpu(int cpu, struct rcu_state *rsp)
2713{
2714 unsigned long flags;
2715 struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
2716 struct rcu_node *rnp = rdp->mynode;
2717
2718 if (!IS_ENABLED(CONFIG_HOTPLUG_CPU))
2719 return;
2720
2721
2722 rcu_boost_kthread_setaffinity(rnp, -1);
2723
2724
2725 raw_spin_lock_irqsave(&rsp->orphan_lock, flags);
2726 rcu_send_cbs_to_orphanage(cpu, rsp, rnp, rdp);
2727 rcu_adopt_orphan_cbs(rsp, flags);
2728 raw_spin_unlock_irqrestore(&rsp->orphan_lock, flags);
2729
2730 WARN_ONCE(rcu_segcblist_n_cbs(&rdp->cblist) != 0 ||
2731 !rcu_segcblist_empty(&rdp->cblist),
2732 "rcu_cleanup_dead_cpu: Callbacks on offline CPU %d: qlen=%lu, 1stCB=%p\n",
2733 cpu, rcu_segcblist_n_cbs(&rdp->cblist),
2734 rcu_segcblist_first_cb(&rdp->cblist));
2735}
2736
2737
2738
2739
2740
2741static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp)
2742{
2743 unsigned long flags;
2744 struct rcu_head *rhp;
2745 struct rcu_cblist rcl = RCU_CBLIST_INITIALIZER(rcl);
2746 long bl, count;
2747
2748
2749 if (!rcu_segcblist_ready_cbs(&rdp->cblist)) {
2750 trace_rcu_batch_start(rsp->name,
2751 rcu_segcblist_n_lazy_cbs(&rdp->cblist),
2752 rcu_segcblist_n_cbs(&rdp->cblist), 0);
2753 trace_rcu_batch_end(rsp->name, 0,
2754 !rcu_segcblist_empty(&rdp->cblist),
2755 need_resched(), is_idle_task(current),
2756 rcu_is_callbacks_kthread());
2757 return;
2758 }
2759
2760
2761
2762
2763
2764
2765 local_irq_save(flags);
2766 WARN_ON_ONCE(cpu_is_offline(smp_processor_id()));
2767 bl = rdp->blimit;
2768 trace_rcu_batch_start(rsp->name, rcu_segcblist_n_lazy_cbs(&rdp->cblist),
2769 rcu_segcblist_n_cbs(&rdp->cblist), bl);
2770 rcu_segcblist_extract_done_cbs(&rdp->cblist, &rcl);
2771 local_irq_restore(flags);
2772
2773
2774 rhp = rcu_cblist_dequeue(&rcl);
2775 for (; rhp; rhp = rcu_cblist_dequeue(&rcl)) {
2776 debug_rcu_head_unqueue(rhp);
2777 if (__rcu_reclaim(rsp->name, rhp))
2778 rcu_cblist_dequeued_lazy(&rcl);
2779
2780
2781
2782
2783 if (-rcl.len >= bl &&
2784 (need_resched() ||
2785 (!is_idle_task(current) && !rcu_is_callbacks_kthread())))
2786 break;
2787 }
2788
2789 local_irq_save(flags);
2790 count = -rcl.len;
2791 trace_rcu_batch_end(rsp->name, count, !!rcl.head, need_resched(),
2792 is_idle_task(current), rcu_is_callbacks_kthread());
2793
2794
2795 rcu_segcblist_insert_done_cbs(&rdp->cblist, &rcl);
2796 smp_mb();
2797 rdp->n_cbs_invoked += count;
2798 rcu_segcblist_insert_count(&rdp->cblist, &rcl);
2799
2800
2801 count = rcu_segcblist_n_cbs(&rdp->cblist);
2802 if (rdp->blimit == LONG_MAX && count <= qlowmark)
2803 rdp->blimit = blimit;
2804
2805
2806 if (count == 0 && rdp->qlen_last_fqs_check != 0) {
2807 rdp->qlen_last_fqs_check = 0;
2808 rdp->n_force_qs_snap = rsp->n_force_qs;
2809 } else if (count < rdp->qlen_last_fqs_check - qhimark)
2810 rdp->qlen_last_fqs_check = count;
2811 WARN_ON_ONCE(rcu_segcblist_empty(&rdp->cblist) != (count == 0));
2812
2813 local_irq_restore(flags);
2814
2815
2816 if (rcu_segcblist_ready_cbs(&rdp->cblist))
2817 invoke_rcu_core();
2818}
2819
2820
2821
2822
2823
2824
2825
2826
2827
2828void rcu_check_callbacks(int user)
2829{
2830 trace_rcu_utilization(TPS("Start scheduler-tick"));
2831 increment_cpu_stall_ticks();
2832 if (user || rcu_is_cpu_rrupt_from_idle()) {
2833
2834
2835
2836
2837
2838
2839
2840
2841
2842
2843
2844
2845
2846 rcu_sched_qs();
2847 rcu_bh_qs();
2848
2849 } else if (!in_softirq()) {
2850
2851
2852
2853
2854
2855
2856
2857
2858 rcu_bh_qs();
2859 }
2860 rcu_preempt_check_callbacks();
2861 if (rcu_pending())
2862 invoke_rcu_core();
2863 if (user)
2864 rcu_note_voluntary_context_switch(current);
2865 trace_rcu_utilization(TPS("End scheduler-tick"));
2866}
2867
2868
2869
2870
2871
2872
2873
2874
2875static void force_qs_rnp(struct rcu_state *rsp, int (*f)(struct rcu_data *rsp))
2876{
2877 int cpu;
2878 unsigned long flags;
2879 unsigned long mask;
2880 struct rcu_node *rnp;
2881
2882 rcu_for_each_leaf_node(rsp, rnp) {
2883 cond_resched_rcu_qs();
2884 mask = 0;
2885 raw_spin_lock_irqsave_rcu_node(rnp, flags);
2886 if (rnp->qsmask == 0) {
2887 if (rcu_state_p == &rcu_sched_state ||
2888 rsp != rcu_state_p ||
2889 rcu_preempt_blocked_readers_cgp(rnp)) {
2890
2891
2892
2893
2894
2895 rcu_initiate_boost(rnp, flags);
2896
2897 continue;
2898 }
2899 if (rnp->parent &&
2900 (rnp->parent->qsmask & rnp->grpmask)) {
2901
2902
2903
2904
2905
2906 rcu_report_unblock_qs_rnp(rsp, rnp, flags);
2907
2908 continue;
2909 }
2910 }
2911 for_each_leaf_node_possible_cpu(rnp, cpu) {
2912 unsigned long bit = leaf_node_cpu_bit(rnp, cpu);
2913 if ((rnp->qsmask & bit) != 0) {
2914 if (f(per_cpu_ptr(rsp->rda, cpu)))
2915 mask |= bit;
2916 }
2917 }
2918 if (mask != 0) {
2919
2920 rcu_report_qs_rnp(mask, rsp, rnp, rnp->gpnum, flags);
2921 } else {
2922
2923 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
2924 }
2925 }
2926}
2927
2928
2929
2930
2931
2932static void force_quiescent_state(struct rcu_state *rsp)
2933{
2934 unsigned long flags;
2935 bool ret;
2936 struct rcu_node *rnp;
2937 struct rcu_node *rnp_old = NULL;
2938
2939
2940 rnp = __this_cpu_read(rsp->rda->mynode);
2941 for (; rnp != NULL; rnp = rnp->parent) {
2942 ret = (READ_ONCE(rsp->gp_flags) & RCU_GP_FLAG_FQS) ||
2943 !raw_spin_trylock(&rnp->fqslock);
2944 if (rnp_old != NULL)
2945 raw_spin_unlock(&rnp_old->fqslock);
2946 if (ret) {
2947 rsp->n_force_qs_lh++;
2948 return;
2949 }
2950 rnp_old = rnp;
2951 }
2952
2953
2954
2955 raw_spin_lock_irqsave_rcu_node(rnp_old, flags);
2956 raw_spin_unlock(&rnp_old->fqslock);
2957 if (READ_ONCE(rsp->gp_flags) & RCU_GP_FLAG_FQS) {
2958 rsp->n_force_qs_lh++;
2959 raw_spin_unlock_irqrestore_rcu_node(rnp_old, flags);
2960 return;
2961 }
2962 WRITE_ONCE(rsp->gp_flags, READ_ONCE(rsp->gp_flags) | RCU_GP_FLAG_FQS);
2963 raw_spin_unlock_irqrestore_rcu_node(rnp_old, flags);
2964 rcu_gp_kthread_wake(rsp);
2965}
2966
2967
2968
2969
2970
2971
2972static void
2973__rcu_process_callbacks(struct rcu_state *rsp)
2974{
2975 unsigned long flags;
2976 bool needwake;
2977 struct rcu_data *rdp = raw_cpu_ptr(rsp->rda);
2978
2979 WARN_ON_ONCE(!rdp->beenonline);
2980
2981
2982 rcu_check_quiescent_state(rsp, rdp);
2983
2984
2985 local_irq_save(flags);
2986 if (cpu_needs_another_gp(rsp, rdp)) {
2987 raw_spin_lock_rcu_node(rcu_get_root(rsp));
2988 needwake = rcu_start_gp(rsp);
2989 raw_spin_unlock_irqrestore_rcu_node(rcu_get_root(rsp), flags);
2990 if (needwake)
2991 rcu_gp_kthread_wake(rsp);
2992 } else {
2993 local_irq_restore(flags);
2994 }
2995
2996
2997 if (rcu_segcblist_ready_cbs(&rdp->cblist))
2998 invoke_rcu_callbacks(rsp, rdp);
2999
3000
3001 do_nocb_deferred_wakeup(rdp);
3002}
3003
3004
3005
3006
3007static __latent_entropy void rcu_process_callbacks(struct softirq_action *unused)
3008{
3009 struct rcu_state *rsp;
3010
3011 if (cpu_is_offline(smp_processor_id()))
3012 return;
3013 trace_rcu_utilization(TPS("Start RCU core"));
3014 for_each_rcu_flavor(rsp)
3015 __rcu_process_callbacks(rsp);
3016 trace_rcu_utilization(TPS("End RCU core"));
3017}
3018
3019
3020
3021
3022
3023
3024
3025
3026static void invoke_rcu_callbacks(struct rcu_state *rsp, struct rcu_data *rdp)
3027{
3028 if (unlikely(!READ_ONCE(rcu_scheduler_fully_active)))
3029 return;
3030 if (likely(!rsp->boost)) {
3031 rcu_do_batch(rsp, rdp);
3032 return;
3033 }
3034 invoke_rcu_callbacks_kthread();
3035}
3036
3037static void invoke_rcu_core(void)
3038{
3039 if (cpu_online(smp_processor_id()))
3040 raise_softirq(RCU_SOFTIRQ);
3041}
3042
3043
3044
3045
3046static void __call_rcu_core(struct rcu_state *rsp, struct rcu_data *rdp,
3047 struct rcu_head *head, unsigned long flags)
3048{
3049 bool needwake;
3050
3051
3052
3053
3054
3055 if (!rcu_is_watching())
3056 invoke_rcu_core();
3057
3058
3059 if (irqs_disabled_flags(flags) || cpu_is_offline(smp_processor_id()))
3060 return;
3061
3062
3063
3064
3065
3066
3067
3068
3069 if (unlikely(rcu_segcblist_n_cbs(&rdp->cblist) >
3070 rdp->qlen_last_fqs_check + qhimark)) {
3071
3072
3073 note_gp_changes(rsp, rdp);
3074
3075
3076 if (!rcu_gp_in_progress(rsp)) {
3077 struct rcu_node *rnp_root = rcu_get_root(rsp);
3078
3079 raw_spin_lock_rcu_node(rnp_root);
3080 needwake = rcu_start_gp(rsp);
3081 raw_spin_unlock_rcu_node(rnp_root);
3082 if (needwake)
3083 rcu_gp_kthread_wake(rsp);
3084 } else {
3085
3086 rdp->blimit = LONG_MAX;
3087 if (rsp->n_force_qs == rdp->n_force_qs_snap &&
3088 rcu_segcblist_first_pend_cb(&rdp->cblist) != head)
3089 force_quiescent_state(rsp);
3090 rdp->n_force_qs_snap = rsp->n_force_qs;
3091 rdp->qlen_last_fqs_check = rcu_segcblist_n_cbs(&rdp->cblist);
3092 }
3093 }
3094}
3095
3096
3097
3098
3099static void rcu_leak_callback(struct rcu_head *rhp)
3100{
3101}
3102
3103
3104
3105
3106
3107
3108
3109static void
3110__call_rcu(struct rcu_head *head, rcu_callback_t func,
3111 struct rcu_state *rsp, int cpu, bool lazy)
3112{
3113 unsigned long flags;
3114 struct rcu_data *rdp;
3115
3116
3117 WARN_ON_ONCE((unsigned long)head & (sizeof(void *) - 1));
3118
3119 if (debug_rcu_head_queue(head)) {
3120
3121
3122
3123
3124
3125 WARN_ONCE(1, "__call_rcu(): Double-freed CB %p->%pF()!!!\n",
3126 head, head->func);
3127 WRITE_ONCE(head->func, rcu_leak_callback);
3128 return;
3129 }
3130 head->func = func;
3131 head->next = NULL;
3132 local_irq_save(flags);
3133 rdp = this_cpu_ptr(rsp->rda);
3134
3135
3136 if (unlikely(!rcu_segcblist_is_enabled(&rdp->cblist)) || cpu != -1) {
3137 int offline;
3138
3139 if (cpu != -1)
3140 rdp = per_cpu_ptr(rsp->rda, cpu);
3141 if (likely(rdp->mynode)) {
3142
3143 offline = !__call_rcu_nocb(rdp, head, lazy, flags);
3144 WARN_ON_ONCE(offline);
3145
3146 local_irq_restore(flags);
3147 return;
3148 }
3149
3150
3151
3152
3153 BUG_ON(cpu != -1);
3154 WARN_ON_ONCE(!rcu_is_watching());
3155 if (rcu_segcblist_empty(&rdp->cblist))
3156 rcu_segcblist_init(&rdp->cblist);
3157 }
3158 rcu_segcblist_enqueue(&rdp->cblist, head, lazy);
3159 if (!lazy)
3160 rcu_idle_count_callbacks_posted();
3161
3162 if (__is_kfree_rcu_offset((unsigned long)func))
3163 trace_rcu_kfree_callback(rsp->name, head, (unsigned long)func,
3164 rcu_segcblist_n_lazy_cbs(&rdp->cblist),
3165 rcu_segcblist_n_cbs(&rdp->cblist));
3166 else
3167 trace_rcu_callback(rsp->name, head,
3168 rcu_segcblist_n_lazy_cbs(&rdp->cblist),
3169 rcu_segcblist_n_cbs(&rdp->cblist));
3170
3171
3172 __call_rcu_core(rsp, rdp, head, flags);
3173 local_irq_restore(flags);
3174}
3175
3176
3177
3178
3179
3180
3181
3182
3183
3184
3185
3186
3187
3188
3189
3190
3191
3192
3193
3194
3195void call_rcu_sched(struct rcu_head *head, rcu_callback_t func)
3196{
3197 __call_rcu(head, func, &rcu_sched_state, -1, 0);
3198}
3199EXPORT_SYMBOL_GPL(call_rcu_sched);
3200
3201
3202
3203
3204
3205
3206
3207
3208
3209
3210
3211
3212
3213
3214
3215
3216
3217
3218
3219
3220
3221
3222void call_rcu_bh(struct rcu_head *head, rcu_callback_t func)
3223{
3224 __call_rcu(head, func, &rcu_bh_state, -1, 0);
3225}
3226EXPORT_SYMBOL_GPL(call_rcu_bh);
3227
3228
3229
3230
3231
3232
3233
3234
3235void kfree_call_rcu(struct rcu_head *head,
3236 rcu_callback_t func)
3237{
3238 __call_rcu(head, func, rcu_state_p, -1, 1);
3239}
3240EXPORT_SYMBOL_GPL(kfree_call_rcu);
3241
3242
3243
3244
3245
3246
3247
3248
3249
3250
3251static inline int rcu_blocking_is_gp(void)
3252{
3253 int ret;
3254
3255 might_sleep();
3256 preempt_disable();
3257 ret = num_online_cpus() <= 1;
3258 preempt_enable();
3259 return ret;
3260}
3261
3262
3263
3264
3265
3266
3267
3268
3269
3270
3271
3272
3273
3274
3275
3276
3277
3278
3279
3280
3281
3282
3283
3284
3285
3286
3287
3288
3289
3290
3291
3292
3293
3294
3295
3296
3297void synchronize_sched(void)
3298{
3299 RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map) ||
3300 lock_is_held(&rcu_lock_map) ||
3301 lock_is_held(&rcu_sched_lock_map),
3302 "Illegal synchronize_sched() in RCU-sched read-side critical section");
3303 if (rcu_blocking_is_gp())
3304 return;
3305 if (rcu_gp_is_expedited())
3306 synchronize_sched_expedited();
3307 else
3308 wait_rcu_gp(call_rcu_sched);
3309}
3310EXPORT_SYMBOL_GPL(synchronize_sched);
3311
3312
3313
3314
3315
3316
3317
3318
3319
3320
3321
3322
3323
3324void synchronize_rcu_bh(void)
3325{
3326 RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map) ||
3327 lock_is_held(&rcu_lock_map) ||
3328 lock_is_held(&rcu_sched_lock_map),
3329 "Illegal synchronize_rcu_bh() in RCU-bh read-side critical section");
3330 if (rcu_blocking_is_gp())
3331 return;
3332 if (rcu_gp_is_expedited())
3333 synchronize_rcu_bh_expedited();
3334 else
3335 wait_rcu_gp(call_rcu_bh);
3336}
3337EXPORT_SYMBOL_GPL(synchronize_rcu_bh);
3338
3339
3340
3341
3342
3343
3344
3345
3346unsigned long get_state_synchronize_rcu(void)
3347{
3348
3349
3350
3351
3352 smp_mb();
3353
3354
3355
3356
3357
3358
3359 return smp_load_acquire(&rcu_state_p->gpnum);
3360}
3361EXPORT_SYMBOL_GPL(get_state_synchronize_rcu);
3362
3363
3364
3365
3366
3367
3368
3369
3370
3371
3372
3373
3374
3375
3376
3377void cond_synchronize_rcu(unsigned long oldstate)
3378{
3379 unsigned long newstate;
3380
3381
3382
3383
3384
3385 newstate = smp_load_acquire(&rcu_state_p->completed);
3386 if (ULONG_CMP_GE(oldstate, newstate))
3387 synchronize_rcu();
3388}
3389EXPORT_SYMBOL_GPL(cond_synchronize_rcu);
3390
3391
3392
3393
3394
3395
3396
3397
3398unsigned long get_state_synchronize_sched(void)
3399{
3400
3401
3402
3403
3404 smp_mb();
3405
3406
3407
3408
3409
3410
3411 return smp_load_acquire(&rcu_sched_state.gpnum);
3412}
3413EXPORT_SYMBOL_GPL(get_state_synchronize_sched);
3414
3415
3416
3417
3418
3419
3420
3421
3422
3423
3424
3425
3426
3427
3428
3429void cond_synchronize_sched(unsigned long oldstate)
3430{
3431 unsigned long newstate;
3432
3433
3434
3435
3436
3437 newstate = smp_load_acquire(&rcu_sched_state.completed);
3438 if (ULONG_CMP_GE(oldstate, newstate))
3439 synchronize_sched();
3440}
3441EXPORT_SYMBOL_GPL(cond_synchronize_sched);
3442
3443
3444
3445
3446
3447
3448
3449
3450static int __rcu_pending(struct rcu_state *rsp, struct rcu_data *rdp)
3451{
3452 struct rcu_node *rnp = rdp->mynode;
3453
3454 rdp->n_rcu_pending++;
3455
3456
3457 check_cpu_stall(rsp, rdp);
3458
3459
3460 if (rcu_nohz_full_cpu(rsp))
3461 return 0;
3462
3463
3464 if (rcu_scheduler_fully_active &&
3465 rdp->core_needs_qs && rdp->cpu_no_qs.b.norm &&
3466 rdp->rcu_qs_ctr_snap == __this_cpu_read(rcu_dynticks.rcu_qs_ctr)) {
3467 rdp->n_rp_core_needs_qs++;
3468 } else if (rdp->core_needs_qs && !rdp->cpu_no_qs.b.norm) {
3469 rdp->n_rp_report_qs++;
3470 return 1;
3471 }
3472
3473
3474 if (rcu_segcblist_ready_cbs(&rdp->cblist)) {
3475 rdp->n_rp_cb_ready++;
3476 return 1;
3477 }
3478
3479
3480 if (cpu_needs_another_gp(rsp, rdp)) {
3481 rdp->n_rp_cpu_needs_gp++;
3482 return 1;
3483 }
3484
3485
3486 if (READ_ONCE(rnp->completed) != rdp->completed) {
3487 rdp->n_rp_gp_completed++;
3488 return 1;
3489 }
3490
3491
3492 if (READ_ONCE(rnp->gpnum) != rdp->gpnum ||
3493 unlikely(READ_ONCE(rdp->gpwrap))) {
3494 rdp->n_rp_gp_started++;
3495 return 1;
3496 }
3497
3498
3499 if (rcu_nocb_need_deferred_wakeup(rdp)) {
3500 rdp->n_rp_nocb_defer_wakeup++;
3501 return 1;
3502 }
3503
3504
3505 rdp->n_rp_need_nothing++;
3506 return 0;
3507}
3508
3509
3510
3511
3512
3513
3514static int rcu_pending(void)
3515{
3516 struct rcu_state *rsp;
3517
3518 for_each_rcu_flavor(rsp)
3519 if (__rcu_pending(rsp, this_cpu_ptr(rsp->rda)))
3520 return 1;
3521 return 0;
3522}
3523
3524
3525
3526
3527
3528
3529static bool __maybe_unused rcu_cpu_has_callbacks(bool *all_lazy)
3530{
3531 bool al = true;
3532 bool hc = false;
3533 struct rcu_data *rdp;
3534 struct rcu_state *rsp;
3535
3536 for_each_rcu_flavor(rsp) {
3537 rdp = this_cpu_ptr(rsp->rda);
3538 if (rcu_segcblist_empty(&rdp->cblist))
3539 continue;
3540 hc = true;
3541 if (rcu_segcblist_n_nonlazy_cbs(&rdp->cblist) || !all_lazy) {
3542 al = false;
3543 break;
3544 }
3545 }
3546 if (all_lazy)
3547 *all_lazy = al;
3548 return hc;
3549}
3550
3551
3552
3553
3554
3555static void _rcu_barrier_trace(struct rcu_state *rsp, const char *s,
3556 int cpu, unsigned long done)
3557{
3558 trace_rcu_barrier(rsp->name, s, cpu,
3559 atomic_read(&rsp->barrier_cpu_count), done);
3560}
3561
3562
3563
3564
3565
3566static void rcu_barrier_callback(struct rcu_head *rhp)
3567{
3568 struct rcu_data *rdp = container_of(rhp, struct rcu_data, barrier_head);
3569 struct rcu_state *rsp = rdp->rsp;
3570
3571 if (atomic_dec_and_test(&rsp->barrier_cpu_count)) {
3572 _rcu_barrier_trace(rsp, "LastCB", -1, rsp->barrier_sequence);
3573 complete(&rsp->barrier_completion);
3574 } else {
3575 _rcu_barrier_trace(rsp, "CB", -1, rsp->barrier_sequence);
3576 }
3577}
3578
3579
3580
3581
3582static void rcu_barrier_func(void *type)
3583{
3584 struct rcu_state *rsp = type;
3585 struct rcu_data *rdp = raw_cpu_ptr(rsp->rda);
3586
3587 _rcu_barrier_trace(rsp, "IRQ", -1, rsp->barrier_sequence);
3588 rdp->barrier_head.func = rcu_barrier_callback;
3589 debug_rcu_head_queue(&rdp->barrier_head);
3590 if (rcu_segcblist_entrain(&rdp->cblist, &rdp->barrier_head, 0)) {
3591 atomic_inc(&rsp->barrier_cpu_count);
3592 } else {
3593 debug_rcu_head_unqueue(&rdp->barrier_head);
3594 _rcu_barrier_trace(rsp, "IRQNQ", -1, rsp->barrier_sequence);
3595 }
3596}
3597
3598
3599
3600
3601
3602static void _rcu_barrier(struct rcu_state *rsp)
3603{
3604 int cpu;
3605 struct rcu_data *rdp;
3606 unsigned long s = rcu_seq_snap(&rsp->barrier_sequence);
3607
3608 _rcu_barrier_trace(rsp, "Begin", -1, s);
3609
3610
3611 mutex_lock(&rsp->barrier_mutex);
3612
3613
3614 if (rcu_seq_done(&rsp->barrier_sequence, s)) {
3615 _rcu_barrier_trace(rsp, "EarlyExit", -1, rsp->barrier_sequence);
3616 smp_mb();
3617 mutex_unlock(&rsp->barrier_mutex);
3618 return;
3619 }
3620
3621
3622 rcu_seq_start(&rsp->barrier_sequence);
3623 _rcu_barrier_trace(rsp, "Inc1", -1, rsp->barrier_sequence);
3624
3625
3626
3627
3628
3629
3630
3631 init_completion(&rsp->barrier_completion);
3632 atomic_set(&rsp->barrier_cpu_count, 1);
3633 get_online_cpus();
3634
3635
3636
3637
3638
3639
3640 for_each_possible_cpu(cpu) {
3641 if (!cpu_online(cpu) && !rcu_is_nocb_cpu(cpu))
3642 continue;
3643 rdp = per_cpu_ptr(rsp->rda, cpu);
3644 if (rcu_is_nocb_cpu(cpu)) {
3645 if (!rcu_nocb_cpu_needs_barrier(rsp, cpu)) {
3646 _rcu_barrier_trace(rsp, "OfflineNoCB", cpu,
3647 rsp->barrier_sequence);
3648 } else {
3649 _rcu_barrier_trace(rsp, "OnlineNoCB", cpu,
3650 rsp->barrier_sequence);
3651 smp_mb__before_atomic();
3652 atomic_inc(&rsp->barrier_cpu_count);
3653 __call_rcu(&rdp->barrier_head,
3654 rcu_barrier_callback, rsp, cpu, 0);
3655 }
3656 } else if (rcu_segcblist_n_cbs(&rdp->cblist)) {
3657 _rcu_barrier_trace(rsp, "OnlineQ", cpu,
3658 rsp->barrier_sequence);
3659 smp_call_function_single(cpu, rcu_barrier_func, rsp, 1);
3660 } else {
3661 _rcu_barrier_trace(rsp, "OnlineNQ", cpu,
3662 rsp->barrier_sequence);
3663 }
3664 }
3665 put_online_cpus();
3666
3667
3668
3669
3670
3671 if (atomic_dec_and_test(&rsp->barrier_cpu_count))
3672 complete(&rsp->barrier_completion);
3673
3674
3675 wait_for_completion(&rsp->barrier_completion);
3676
3677
3678 _rcu_barrier_trace(rsp, "Inc2", -1, rsp->barrier_sequence);
3679 rcu_seq_end(&rsp->barrier_sequence);
3680
3681
3682 mutex_unlock(&rsp->barrier_mutex);
3683}
3684
3685
3686
3687
3688void rcu_barrier_bh(void)
3689{
3690 _rcu_barrier(&rcu_bh_state);
3691}
3692EXPORT_SYMBOL_GPL(rcu_barrier_bh);
3693
3694
3695
3696
3697void rcu_barrier_sched(void)
3698{
3699 _rcu_barrier(&rcu_sched_state);
3700}
3701EXPORT_SYMBOL_GPL(rcu_barrier_sched);
3702
3703
3704
3705
3706
3707
3708
3709static void rcu_init_new_rnp(struct rcu_node *rnp_leaf)
3710{
3711 long mask;
3712 struct rcu_node *rnp = rnp_leaf;
3713
3714 lockdep_assert_held(&rnp->lock);
3715 for (;;) {
3716 mask = rnp->grpmask;
3717 rnp = rnp->parent;
3718 if (rnp == NULL)
3719 return;
3720 raw_spin_lock_rcu_node(rnp);
3721 rnp->qsmaskinit |= mask;
3722 raw_spin_unlock_rcu_node(rnp);
3723 }
3724}
3725
3726
3727
3728
3729static void __init
3730rcu_boot_init_percpu_data(int cpu, struct rcu_state *rsp)
3731{
3732 unsigned long flags;
3733 struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
3734 struct rcu_node *rnp = rcu_get_root(rsp);
3735
3736
3737 raw_spin_lock_irqsave_rcu_node(rnp, flags);
3738 rdp->grpmask = leaf_node_cpu_bit(rdp->mynode, cpu);
3739 rdp->dynticks = &per_cpu(rcu_dynticks, cpu);
3740 WARN_ON_ONCE(rdp->dynticks->dynticks_nesting != DYNTICK_TASK_EXIT_IDLE);
3741 WARN_ON_ONCE(rcu_dynticks_in_eqs(rcu_dynticks_snap(rdp->dynticks)));
3742 rdp->cpu = cpu;
3743 rdp->rsp = rsp;
3744 rcu_boot_init_nocb_percpu_data(rdp);
3745 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
3746}
3747
3748
3749
3750
3751
3752
3753
3754static void
3755rcu_init_percpu_data(int cpu, struct rcu_state *rsp)
3756{
3757 unsigned long flags;
3758 struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
3759 struct rcu_node *rnp = rcu_get_root(rsp);
3760
3761
3762 raw_spin_lock_irqsave_rcu_node(rnp, flags);
3763 rdp->qlen_last_fqs_check = 0;
3764 rdp->n_force_qs_snap = rsp->n_force_qs;
3765 rdp->blimit = blimit;
3766 if (rcu_segcblist_empty(&rdp->cblist) &&
3767 !init_nocb_callback_list(rdp))
3768 rcu_segcblist_init(&rdp->cblist);
3769 rdp->dynticks->dynticks_nesting = DYNTICK_TASK_EXIT_IDLE;
3770 rcu_dynticks_eqs_online();
3771 raw_spin_unlock_rcu_node(rnp);
3772
3773
3774
3775
3776
3777
3778 rnp = rdp->mynode;
3779 raw_spin_lock_rcu_node(rnp);
3780 if (!rdp->beenonline)
3781 WRITE_ONCE(rsp->ncpus, READ_ONCE(rsp->ncpus) + 1);
3782 rdp->beenonline = true;
3783 rdp->gpnum = rnp->completed;
3784 rdp->completed = rnp->completed;
3785 rdp->cpu_no_qs.b.norm = true;
3786 rdp->rcu_qs_ctr_snap = per_cpu(rcu_dynticks.rcu_qs_ctr, cpu);
3787 rdp->core_needs_qs = false;
3788 trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("cpuonl"));
3789 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
3790}
3791
3792
3793
3794
3795
3796int rcutree_prepare_cpu(unsigned int cpu)
3797{
3798 struct rcu_state *rsp;
3799
3800 for_each_rcu_flavor(rsp)
3801 rcu_init_percpu_data(cpu, rsp);
3802
3803 rcu_prepare_kthreads(cpu);
3804 rcu_spawn_all_nocb_kthreads(cpu);
3805
3806 return 0;
3807}
3808
3809
3810
3811
3812static void rcutree_affinity_setting(unsigned int cpu, int outgoing)
3813{
3814 struct rcu_data *rdp = per_cpu_ptr(rcu_state_p->rda, cpu);
3815
3816 rcu_boost_kthread_setaffinity(rdp->mynode, outgoing);
3817}
3818
3819
3820
3821
3822
3823int rcutree_online_cpu(unsigned int cpu)
3824{
3825 sync_sched_exp_online_cleanup(cpu);
3826 rcutree_affinity_setting(cpu, -1);
3827 if (IS_ENABLED(CONFIG_TREE_SRCU))
3828 srcu_online_cpu(cpu);
3829 return 0;
3830}
3831
3832
3833
3834
3835
3836int rcutree_offline_cpu(unsigned int cpu)
3837{
3838 rcutree_affinity_setting(cpu, cpu);
3839 if (IS_ENABLED(CONFIG_TREE_SRCU))
3840 srcu_offline_cpu(cpu);
3841 return 0;
3842}
3843
3844
3845
3846
3847int rcutree_dying_cpu(unsigned int cpu)
3848{
3849 struct rcu_state *rsp;
3850
3851 for_each_rcu_flavor(rsp)
3852 rcu_cleanup_dying_cpu(rsp);
3853 return 0;
3854}
3855
3856
3857
3858
3859int rcutree_dead_cpu(unsigned int cpu)
3860{
3861 struct rcu_state *rsp;
3862
3863 for_each_rcu_flavor(rsp) {
3864 rcu_cleanup_dead_cpu(cpu, rsp);
3865 do_nocb_deferred_wakeup(per_cpu_ptr(rsp->rda, cpu));
3866 }
3867 return 0;
3868}
3869
3870
3871
3872
3873
3874
3875
3876
3877
3878
3879
3880
3881void rcu_cpu_starting(unsigned int cpu)
3882{
3883 unsigned long flags;
3884 unsigned long mask;
3885 struct rcu_data *rdp;
3886 struct rcu_node *rnp;
3887 struct rcu_state *rsp;
3888
3889 for_each_rcu_flavor(rsp) {
3890 rdp = per_cpu_ptr(rsp->rda, cpu);
3891 rnp = rdp->mynode;
3892 mask = rdp->grpmask;
3893 raw_spin_lock_irqsave_rcu_node(rnp, flags);
3894 rnp->qsmaskinitnext |= mask;
3895 rnp->expmaskinitnext |= mask;
3896 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
3897 }
3898}
3899
3900#ifdef CONFIG_HOTPLUG_CPU
3901
3902
3903
3904
3905
3906static void rcu_cleanup_dying_idle_cpu(int cpu, struct rcu_state *rsp)
3907{
3908 unsigned long flags;
3909 unsigned long mask;
3910 struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
3911 struct rcu_node *rnp = rdp->mynode;
3912
3913
3914 mask = rdp->grpmask;
3915 raw_spin_lock_irqsave_rcu_node(rnp, flags);
3916 rnp->qsmaskinitnext &= ~mask;
3917 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
3918}
3919
3920
3921
3922
3923
3924
3925
3926
3927
3928void rcu_report_dead(unsigned int cpu)
3929{
3930 struct rcu_state *rsp;
3931
3932
3933 preempt_disable();
3934 rcu_report_exp_rdp(&rcu_sched_state,
3935 this_cpu_ptr(rcu_sched_state.rda), true);
3936 preempt_enable();
3937 for_each_rcu_flavor(rsp)
3938 rcu_cleanup_dying_idle_cpu(cpu, rsp);
3939}
3940#endif
3941
3942
3943
3944
3945
3946static int rcu_pm_notify(struct notifier_block *self,
3947 unsigned long action, void *hcpu)
3948{
3949 switch (action) {
3950 case PM_HIBERNATION_PREPARE:
3951 case PM_SUSPEND_PREPARE:
3952 if (nr_cpu_ids <= 256)
3953 rcu_expedite_gp();
3954 break;
3955 case PM_POST_HIBERNATION:
3956 case PM_POST_SUSPEND:
3957 if (nr_cpu_ids <= 256)
3958 rcu_unexpedite_gp();
3959 break;
3960 default:
3961 break;
3962 }
3963 return NOTIFY_OK;
3964}
3965
3966
3967
3968
3969static int __init rcu_spawn_gp_kthread(void)
3970{
3971 unsigned long flags;
3972 int kthread_prio_in = kthread_prio;
3973 struct rcu_node *rnp;
3974 struct rcu_state *rsp;
3975 struct sched_param sp;
3976 struct task_struct *t;
3977
3978
3979 if (IS_ENABLED(CONFIG_RCU_BOOST) && kthread_prio < 1)
3980 kthread_prio = 1;
3981 else if (kthread_prio < 0)
3982 kthread_prio = 0;
3983 else if (kthread_prio > 99)
3984 kthread_prio = 99;
3985 if (kthread_prio != kthread_prio_in)
3986 pr_alert("rcu_spawn_gp_kthread(): Limited prio to %d from %d\n",
3987 kthread_prio, kthread_prio_in);
3988
3989 rcu_scheduler_fully_active = 1;
3990 for_each_rcu_flavor(rsp) {
3991 t = kthread_create(rcu_gp_kthread, rsp, "%s", rsp->name);
3992 BUG_ON(IS_ERR(t));
3993 rnp = rcu_get_root(rsp);
3994 raw_spin_lock_irqsave_rcu_node(rnp, flags);
3995 rsp->gp_kthread = t;
3996 if (kthread_prio) {
3997 sp.sched_priority = kthread_prio;
3998 sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
3999 }
4000 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
4001 wake_up_process(t);
4002 }
4003 rcu_spawn_nocb_kthreads();
4004 rcu_spawn_boost_kthreads();
4005 return 0;
4006}
4007early_initcall(rcu_spawn_gp_kthread);
4008
4009
4010
4011
4012
4013
4014
4015
4016
4017
4018
4019void rcu_scheduler_starting(void)
4020{
4021 WARN_ON(num_online_cpus() != 1);
4022 WARN_ON(nr_context_switches() > 0);
4023 rcu_test_sync_prims();
4024 rcu_scheduler_active = RCU_SCHEDULER_INIT;
4025 rcu_test_sync_prims();
4026}
4027
4028
4029
4030
4031static void __init rcu_init_one(struct rcu_state *rsp)
4032{
4033 static const char * const buf[] = RCU_NODE_NAME_INIT;
4034 static const char * const fqs[] = RCU_FQS_NAME_INIT;
4035 static struct lock_class_key rcu_node_class[RCU_NUM_LVLS];
4036 static struct lock_class_key rcu_fqs_class[RCU_NUM_LVLS];
4037
4038 int levelspread[RCU_NUM_LVLS];
4039 int cpustride = 1;
4040 int i;
4041 int j;
4042 struct rcu_node *rnp;
4043
4044 BUILD_BUG_ON(RCU_NUM_LVLS > ARRAY_SIZE(buf));
4045
4046
4047 if (rcu_num_lvls <= 0 || rcu_num_lvls > RCU_NUM_LVLS)
4048 panic("rcu_init_one: rcu_num_lvls out of range");
4049
4050
4051
4052 for (i = 1; i < rcu_num_lvls; i++)
4053 rsp->level[i] = rsp->level[i - 1] + num_rcu_lvl[i - 1];
4054 rcu_init_levelspread(levelspread, num_rcu_lvl);
4055
4056
4057
4058 for (i = rcu_num_lvls - 1; i >= 0; i--) {
4059 cpustride *= levelspread[i];
4060 rnp = rsp->level[i];
4061 for (j = 0; j < num_rcu_lvl[i]; j++, rnp++) {
4062 raw_spin_lock_init(&ACCESS_PRIVATE(rnp, lock));
4063 lockdep_set_class_and_name(&ACCESS_PRIVATE(rnp, lock),
4064 &rcu_node_class[i], buf[i]);
4065 raw_spin_lock_init(&rnp->fqslock);
4066 lockdep_set_class_and_name(&rnp->fqslock,
4067 &rcu_fqs_class[i], fqs[i]);
4068 rnp->gpnum = rsp->gpnum;
4069 rnp->completed = rsp->completed;
4070 rnp->qsmask = 0;
4071 rnp->qsmaskinit = 0;
4072 rnp->grplo = j * cpustride;
4073 rnp->grphi = (j + 1) * cpustride - 1;
4074 if (rnp->grphi >= nr_cpu_ids)
4075 rnp->grphi = nr_cpu_ids - 1;
4076 if (i == 0) {
4077 rnp->grpnum = 0;
4078 rnp->grpmask = 0;
4079 rnp->parent = NULL;
4080 } else {
4081 rnp->grpnum = j % levelspread[i - 1];
4082 rnp->grpmask = 1UL << rnp->grpnum;
4083 rnp->parent = rsp->level[i - 1] +
4084 j / levelspread[i - 1];
4085 }
4086 rnp->level = i;
4087 INIT_LIST_HEAD(&rnp->blkd_tasks);
4088 rcu_init_one_nocb(rnp);
4089 init_waitqueue_head(&rnp->exp_wq[0]);
4090 init_waitqueue_head(&rnp->exp_wq[1]);
4091 init_waitqueue_head(&rnp->exp_wq[2]);
4092 init_waitqueue_head(&rnp->exp_wq[3]);
4093 spin_lock_init(&rnp->exp_lock);
4094 }
4095 }
4096
4097 init_swait_queue_head(&rsp->gp_wq);
4098 init_swait_queue_head(&rsp->expedited_wq);
4099 rnp = rsp->level[rcu_num_lvls - 1];
4100 for_each_possible_cpu(i) {
4101 while (i > rnp->grphi)
4102 rnp++;
4103 per_cpu_ptr(rsp->rda, i)->mynode = rnp;
4104 rcu_boot_init_percpu_data(i, rsp);
4105 }
4106 list_add(&rsp->flavors, &rcu_struct_flavors);
4107}
4108
4109
4110
4111
4112
4113
4114static void __init rcu_init_geometry(void)
4115{
4116 ulong d;
4117 int i;
4118 int rcu_capacity[RCU_NUM_LVLS];
4119
4120
4121
4122
4123
4124
4125
4126
4127 d = RCU_JIFFIES_TILL_FORCE_QS + nr_cpu_ids / RCU_JIFFIES_FQS_DIV;
4128 if (jiffies_till_first_fqs == ULONG_MAX)
4129 jiffies_till_first_fqs = d;
4130 if (jiffies_till_next_fqs == ULONG_MAX)
4131 jiffies_till_next_fqs = d;
4132
4133
4134 if (rcu_fanout_leaf == RCU_FANOUT_LEAF &&
4135 nr_cpu_ids == NR_CPUS)
4136 return;
4137 pr_info("RCU: Adjusting geometry for rcu_fanout_leaf=%d, nr_cpu_ids=%d\n",
4138 rcu_fanout_leaf, nr_cpu_ids);
4139
4140
4141
4142
4143
4144
4145
4146 if (rcu_fanout_leaf < 2 ||
4147 rcu_fanout_leaf > sizeof(unsigned long) * 8) {
4148 rcu_fanout_leaf = RCU_FANOUT_LEAF;
4149 WARN_ON(1);
4150 return;
4151 }
4152
4153
4154
4155
4156
4157 rcu_capacity[0] = rcu_fanout_leaf;
4158 for (i = 1; i < RCU_NUM_LVLS; i++)
4159 rcu_capacity[i] = rcu_capacity[i - 1] * RCU_FANOUT;
4160
4161
4162
4163
4164
4165 if (nr_cpu_ids > rcu_capacity[RCU_NUM_LVLS - 1]) {
4166 rcu_fanout_leaf = RCU_FANOUT_LEAF;
4167 WARN_ON(1);
4168 return;
4169 }
4170
4171
4172 for (i = 0; nr_cpu_ids > rcu_capacity[i]; i++) {
4173 }
4174 rcu_num_lvls = i + 1;
4175
4176
4177 for (i = 0; i < rcu_num_lvls; i++) {
4178 int cap = rcu_capacity[(rcu_num_lvls - 1) - i];
4179 num_rcu_lvl[i] = DIV_ROUND_UP(nr_cpu_ids, cap);
4180 }
4181
4182
4183 rcu_num_nodes = 0;
4184 for (i = 0; i < rcu_num_lvls; i++)
4185 rcu_num_nodes += num_rcu_lvl[i];
4186}
4187
4188
4189
4190
4191
4192static void __init rcu_dump_rcu_node_tree(struct rcu_state *rsp)
4193{
4194 int level = 0;
4195 struct rcu_node *rnp;
4196
4197 pr_info("rcu_node tree layout dump\n");
4198 pr_info(" ");
4199 rcu_for_each_node_breadth_first(rsp, rnp) {
4200 if (rnp->level != level) {
4201 pr_cont("\n");
4202 pr_info(" ");
4203 level = rnp->level;
4204 }
4205 pr_cont("%d:%d ^%d ", rnp->grplo, rnp->grphi, rnp->grpnum);
4206 }
4207 pr_cont("\n");
4208}
4209
4210void __init rcu_init(void)
4211{
4212 int cpu;
4213
4214 rcu_early_boot_tests();
4215
4216 rcu_bootup_announce();
4217 rcu_init_geometry();
4218 rcu_init_one(&rcu_bh_state);
4219 rcu_init_one(&rcu_sched_state);
4220 if (dump_tree)
4221 rcu_dump_rcu_node_tree(&rcu_sched_state);
4222 __rcu_init_preempt();
4223 open_softirq(RCU_SOFTIRQ, rcu_process_callbacks);
4224
4225
4226
4227
4228
4229
4230 pm_notifier(rcu_pm_notify, 0);
4231 for_each_online_cpu(cpu) {
4232 rcutree_prepare_cpu(cpu);
4233 rcu_cpu_starting(cpu);
4234 if (IS_ENABLED(CONFIG_TREE_SRCU))
4235 srcu_online_cpu(cpu);
4236 }
4237}
4238
4239#include "tree_exp.h"
4240#include "tree_plugin.h"
4241