1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30#include <linux/types.h>
31#include <linux/kernel.h>
32#include <linux/init.h>
33#include <linux/spinlock.h>
34#include <linux/smp.h>
35#include <linux/rcupdate_wait.h>
36#include <linux/interrupt.h>
37#include <linux/sched.h>
38#include <linux/sched/debug.h>
39#include <linux/nmi.h>
40#include <linux/atomic.h>
41#include <linux/bitops.h>
42#include <linux/export.h>
43#include <linux/completion.h>
44#include <linux/moduleparam.h>
45#include <linux/percpu.h>
46#include <linux/notifier.h>
47#include <linux/cpu.h>
48#include <linux/mutex.h>
49#include <linux/time.h>
50#include <linux/kernel_stat.h>
51#include <linux/wait.h>
52#include <linux/kthread.h>
53#include <uapi/linux/sched/types.h>
54#include <linux/prefetch.h>
55#include <linux/delay.h>
56#include <linux/stop_machine.h>
57#include <linux/random.h>
58#include <linux/trace_events.h>
59#include <linux/suspend.h>
60#include <linux/ftrace.h>
61
62#include "tree.h"
63#include "rcu.h"
64
65#ifdef MODULE_PARAM_PREFIX
66#undef MODULE_PARAM_PREFIX
67#endif
68#define MODULE_PARAM_PREFIX "rcutree."
69
70
71
72
73
74
75
76
77
78
79
80#ifdef CONFIG_TRACING
81# define DEFINE_RCU_TPS(sname) \
82static char sname##_varname[] = #sname; \
83static const char *tp_##sname##_varname __used __tracepoint_string = sname##_varname;
84# define RCU_STATE_NAME(sname) sname##_varname
85#else
86# define DEFINE_RCU_TPS(sname)
87# define RCU_STATE_NAME(sname) __stringify(sname)
88#endif
89
90#define RCU_STATE_INITIALIZER(sname, sabbr, cr) \
91DEFINE_RCU_TPS(sname) \
92static DEFINE_PER_CPU_SHARED_ALIGNED(struct rcu_data, sname##_data); \
93struct rcu_state sname##_state = { \
94 .level = { &sname##_state.node[0] }, \
95 .rda = &sname##_data, \
96 .call = cr, \
97 .gp_state = RCU_GP_IDLE, \
98 .gpnum = 0UL - 300UL, \
99 .completed = 0UL - 300UL, \
100 .barrier_mutex = __MUTEX_INITIALIZER(sname##_state.barrier_mutex), \
101 .name = RCU_STATE_NAME(sname), \
102 .abbr = sabbr, \
103 .exp_mutex = __MUTEX_INITIALIZER(sname##_state.exp_mutex), \
104 .exp_wake_mutex = __MUTEX_INITIALIZER(sname##_state.exp_wake_mutex), \
105}
106
107RCU_STATE_INITIALIZER(rcu_sched, 's', call_rcu_sched);
108RCU_STATE_INITIALIZER(rcu_bh, 'b', call_rcu_bh);
109
110static struct rcu_state *const rcu_state_p;
111LIST_HEAD(rcu_struct_flavors);
112
113
114static bool dump_tree;
115module_param(dump_tree, bool, 0444);
116
117static bool rcu_fanout_exact;
118module_param(rcu_fanout_exact, bool, 0444);
119
120static int rcu_fanout_leaf = RCU_FANOUT_LEAF;
121module_param(rcu_fanout_leaf, int, 0444);
122int rcu_num_lvls __read_mostly = RCU_NUM_LVLS;
123
124int num_rcu_lvl[] = NUM_RCU_LVL_INIT;
125int rcu_num_nodes __read_mostly = NUM_RCU_NODES;
126
127int sysctl_panic_on_rcu_stall __read_mostly;
128
129
130
131
132
133
134
135
136
137
138
139
140
141int rcu_scheduler_active __read_mostly;
142EXPORT_SYMBOL_GPL(rcu_scheduler_active);
143
144
145
146
147
148
149
150
151
152
153
154
155
156static int rcu_scheduler_fully_active __read_mostly;
157
158static void rcu_init_new_rnp(struct rcu_node *rnp_leaf);
159static void rcu_cleanup_dead_rnp(struct rcu_node *rnp_leaf);
160static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu);
161static void invoke_rcu_core(void);
162static void invoke_rcu_callbacks(struct rcu_state *rsp, struct rcu_data *rdp);
163static void rcu_report_exp_rdp(struct rcu_state *rsp,
164 struct rcu_data *rdp, bool wake);
165static void sync_sched_exp_online_cleanup(int cpu);
166
167
168static int kthread_prio = IS_ENABLED(CONFIG_RCU_BOOST) ? 1 : 0;
169module_param(kthread_prio, int, 0644);
170
171
172
173static int gp_preinit_delay;
174module_param(gp_preinit_delay, int, 0444);
175static int gp_init_delay;
176module_param(gp_init_delay, int, 0444);
177static int gp_cleanup_delay;
178module_param(gp_cleanup_delay, int, 0444);
179
180
181
182
183
184
185
186
187
188
189#define PER_RCU_NODE_PERIOD 3
190
191
192
193
194
195
196
197
198
199
200unsigned long rcutorture_testseq;
201unsigned long rcutorture_vernum;
202
203
204
205
206
207
208
209unsigned long rcu_rnp_online_cpus(struct rcu_node *rnp)
210{
211 return READ_ONCE(rnp->qsmaskinitnext);
212}
213
214
215
216
217
218
219static int rcu_gp_in_progress(struct rcu_state *rsp)
220{
221 return READ_ONCE(rsp->completed) != READ_ONCE(rsp->gpnum);
222}
223
224
225
226
227
228
229
230void rcu_sched_qs(void)
231{
232 RCU_LOCKDEP_WARN(preemptible(), "rcu_sched_qs() invoked with preemption enabled!!!");
233 if (!__this_cpu_read(rcu_sched_data.cpu_no_qs.s))
234 return;
235 trace_rcu_grace_period(TPS("rcu_sched"),
236 __this_cpu_read(rcu_sched_data.gpnum),
237 TPS("cpuqs"));
238 __this_cpu_write(rcu_sched_data.cpu_no_qs.b.norm, false);
239 if (!__this_cpu_read(rcu_sched_data.cpu_no_qs.b.exp))
240 return;
241 __this_cpu_write(rcu_sched_data.cpu_no_qs.b.exp, false);
242 rcu_report_exp_rdp(&rcu_sched_state,
243 this_cpu_ptr(&rcu_sched_data), true);
244}
245
246void rcu_bh_qs(void)
247{
248 RCU_LOCKDEP_WARN(preemptible(), "rcu_bh_qs() invoked with preemption enabled!!!");
249 if (__this_cpu_read(rcu_bh_data.cpu_no_qs.s)) {
250 trace_rcu_grace_period(TPS("rcu_bh"),
251 __this_cpu_read(rcu_bh_data.gpnum),
252 TPS("cpuqs"));
253 __this_cpu_write(rcu_bh_data.cpu_no_qs.b.norm, false);
254 }
255}
256
257
258
259
260
261#define RCU_DYNTICK_CTRL_MASK 0x1
262#define RCU_DYNTICK_CTRL_CTR (RCU_DYNTICK_CTRL_MASK + 1)
263#ifndef rcu_eqs_special_exit
264#define rcu_eqs_special_exit() do { } while (0)
265#endif
266
267static DEFINE_PER_CPU(struct rcu_dynticks, rcu_dynticks) = {
268 .dynticks_nesting = DYNTICK_TASK_EXIT_IDLE,
269 .dynticks = ATOMIC_INIT(RCU_DYNTICK_CTRL_CTR),
270};
271
272
273
274
275
276
277
278
279static DEFINE_PER_CPU(bool, disable_rcu_irq_enter);
280
281bool rcu_irq_enter_disabled(void)
282{
283 return this_cpu_read(disable_rcu_irq_enter);
284}
285
286
287
288
289
290static void rcu_dynticks_eqs_enter(void)
291{
292 struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
293 int seq;
294
295
296
297
298
299
300 seq = atomic_add_return(RCU_DYNTICK_CTRL_CTR, &rdtp->dynticks);
301
302 WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) &&
303 (seq & RCU_DYNTICK_CTRL_CTR));
304
305 WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) &&
306 (seq & RCU_DYNTICK_CTRL_MASK));
307}
308
309
310
311
312
313static void rcu_dynticks_eqs_exit(void)
314{
315 struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
316 int seq;
317
318
319
320
321
322
323 seq = atomic_add_return(RCU_DYNTICK_CTRL_CTR, &rdtp->dynticks);
324 WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) &&
325 !(seq & RCU_DYNTICK_CTRL_CTR));
326 if (seq & RCU_DYNTICK_CTRL_MASK) {
327 atomic_andnot(RCU_DYNTICK_CTRL_MASK, &rdtp->dynticks);
328 smp_mb__after_atomic();
329
330 rcu_eqs_special_exit();
331 }
332}
333
334
335
336
337
338
339
340
341
342
343
344static void rcu_dynticks_eqs_online(void)
345{
346 struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
347
348 if (atomic_read(&rdtp->dynticks) & RCU_DYNTICK_CTRL_CTR)
349 return;
350 atomic_add(RCU_DYNTICK_CTRL_CTR, &rdtp->dynticks);
351}
352
353
354
355
356
357
358bool rcu_dynticks_curr_cpu_in_eqs(void)
359{
360 struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
361
362 return !(atomic_read(&rdtp->dynticks) & RCU_DYNTICK_CTRL_CTR);
363}
364
365
366
367
368
369int rcu_dynticks_snap(struct rcu_dynticks *rdtp)
370{
371 int snap = atomic_add_return(0, &rdtp->dynticks);
372
373 return snap & ~RCU_DYNTICK_CTRL_MASK;
374}
375
376
377
378
379
380static bool rcu_dynticks_in_eqs(int snap)
381{
382 return !(snap & RCU_DYNTICK_CTRL_CTR);
383}
384
385
386
387
388
389
390static bool rcu_dynticks_in_eqs_since(struct rcu_dynticks *rdtp, int snap)
391{
392 return snap != rcu_dynticks_snap(rdtp);
393}
394
395
396
397
398
399static void rcu_dynticks_momentary_idle(void)
400{
401 struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
402 int special = atomic_add_return(2 * RCU_DYNTICK_CTRL_CTR,
403 &rdtp->dynticks);
404
405
406 WARN_ON_ONCE(!(special & RCU_DYNTICK_CTRL_CTR));
407}
408
409
410
411
412
413
414
415
416bool rcu_eqs_special_set(int cpu)
417{
418 int old;
419 int new;
420 struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu);
421
422 do {
423 old = atomic_read(&rdtp->dynticks);
424 if (old & RCU_DYNTICK_CTRL_CTR)
425 return false;
426 new = old | RCU_DYNTICK_CTRL_MASK;
427 } while (atomic_cmpxchg(&rdtp->dynticks, old, new) != old);
428 return true;
429}
430
431
432
433
434
435
436
437
438
439
440
441
442static void rcu_momentary_dyntick_idle(void)
443{
444 raw_cpu_write(rcu_dynticks.rcu_need_heavy_qs, false);
445 rcu_dynticks_momentary_idle();
446}
447
448
449
450
451
452
453void rcu_note_context_switch(bool preempt)
454{
455 barrier();
456 trace_rcu_utilization(TPS("Start context switch"));
457 rcu_sched_qs();
458 rcu_preempt_note_context_switch(preempt);
459
460 if (!smp_load_acquire(this_cpu_ptr(&rcu_dynticks.rcu_urgent_qs)))
461 goto out;
462 this_cpu_write(rcu_dynticks.rcu_urgent_qs, false);
463 if (unlikely(raw_cpu_read(rcu_dynticks.rcu_need_heavy_qs)))
464 rcu_momentary_dyntick_idle();
465 this_cpu_inc(rcu_dynticks.rcu_qs_ctr);
466 if (!preempt)
467 rcu_note_voluntary_context_switch_lite(current);
468out:
469 trace_rcu_utilization(TPS("End context switch"));
470 barrier();
471}
472EXPORT_SYMBOL_GPL(rcu_note_context_switch);
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487void rcu_all_qs(void)
488{
489 unsigned long flags;
490
491 if (!raw_cpu_read(rcu_dynticks.rcu_urgent_qs))
492 return;
493 preempt_disable();
494
495 if (!smp_load_acquire(this_cpu_ptr(&rcu_dynticks.rcu_urgent_qs))) {
496 preempt_enable();
497 return;
498 }
499 this_cpu_write(rcu_dynticks.rcu_urgent_qs, false);
500 barrier();
501 if (unlikely(raw_cpu_read(rcu_dynticks.rcu_need_heavy_qs))) {
502 local_irq_save(flags);
503 rcu_momentary_dyntick_idle();
504 local_irq_restore(flags);
505 }
506 if (unlikely(raw_cpu_read(rcu_sched_data.cpu_no_qs.b.exp)))
507 rcu_sched_qs();
508 this_cpu_inc(rcu_dynticks.rcu_qs_ctr);
509 barrier();
510 preempt_enable();
511}
512EXPORT_SYMBOL_GPL(rcu_all_qs);
513
514#define DEFAULT_RCU_BLIMIT 10
515static long blimit = DEFAULT_RCU_BLIMIT;
516#define DEFAULT_RCU_QHIMARK 10000
517static long qhimark = DEFAULT_RCU_QHIMARK;
518#define DEFAULT_RCU_QLOMARK 100
519static long qlowmark = DEFAULT_RCU_QLOMARK;
520
521module_param(blimit, long, 0444);
522module_param(qhimark, long, 0444);
523module_param(qlowmark, long, 0444);
524
525static ulong jiffies_till_first_fqs = ULONG_MAX;
526static ulong jiffies_till_next_fqs = ULONG_MAX;
527static bool rcu_kick_kthreads;
528
529module_param(jiffies_till_first_fqs, ulong, 0644);
530module_param(jiffies_till_next_fqs, ulong, 0644);
531module_param(rcu_kick_kthreads, bool, 0644);
532
533
534
535
536
537static ulong jiffies_till_sched_qs = HZ / 20;
538module_param(jiffies_till_sched_qs, ulong, 0644);
539
540static bool rcu_start_gp_advanced(struct rcu_state *rsp, struct rcu_node *rnp,
541 struct rcu_data *rdp);
542static void force_qs_rnp(struct rcu_state *rsp, int (*f)(struct rcu_data *rsp));
543static void force_quiescent_state(struct rcu_state *rsp);
544static int rcu_pending(void);
545
546
547
548
549unsigned long rcu_batches_started(void)
550{
551 return rcu_state_p->gpnum;
552}
553EXPORT_SYMBOL_GPL(rcu_batches_started);
554
555
556
557
558unsigned long rcu_batches_started_sched(void)
559{
560 return rcu_sched_state.gpnum;
561}
562EXPORT_SYMBOL_GPL(rcu_batches_started_sched);
563
564
565
566
567unsigned long rcu_batches_started_bh(void)
568{
569 return rcu_bh_state.gpnum;
570}
571EXPORT_SYMBOL_GPL(rcu_batches_started_bh);
572
573
574
575
576unsigned long rcu_batches_completed(void)
577{
578 return rcu_state_p->completed;
579}
580EXPORT_SYMBOL_GPL(rcu_batches_completed);
581
582
583
584
585unsigned long rcu_batches_completed_sched(void)
586{
587 return rcu_sched_state.completed;
588}
589EXPORT_SYMBOL_GPL(rcu_batches_completed_sched);
590
591
592
593
594unsigned long rcu_batches_completed_bh(void)
595{
596 return rcu_bh_state.completed;
597}
598EXPORT_SYMBOL_GPL(rcu_batches_completed_bh);
599
600
601
602
603
604
605
606unsigned long rcu_exp_batches_completed(void)
607{
608 return rcu_state_p->expedited_sequence;
609}
610EXPORT_SYMBOL_GPL(rcu_exp_batches_completed);
611
612
613
614
615
616unsigned long rcu_exp_batches_completed_sched(void)
617{
618 return rcu_sched_state.expedited_sequence;
619}
620EXPORT_SYMBOL_GPL(rcu_exp_batches_completed_sched);
621
622
623
624
625void rcu_force_quiescent_state(void)
626{
627 force_quiescent_state(rcu_state_p);
628}
629EXPORT_SYMBOL_GPL(rcu_force_quiescent_state);
630
631
632
633
634void rcu_bh_force_quiescent_state(void)
635{
636 force_quiescent_state(&rcu_bh_state);
637}
638EXPORT_SYMBOL_GPL(rcu_bh_force_quiescent_state);
639
640
641
642
643void rcu_sched_force_quiescent_state(void)
644{
645 force_quiescent_state(&rcu_sched_state);
646}
647EXPORT_SYMBOL_GPL(rcu_sched_force_quiescent_state);
648
649
650
651
652void show_rcu_gp_kthreads(void)
653{
654 struct rcu_state *rsp;
655
656 for_each_rcu_flavor(rsp) {
657 pr_info("%s: wait state: %d ->state: %#lx\n",
658 rsp->name, rsp->gp_state, rsp->gp_kthread->state);
659
660 }
661}
662EXPORT_SYMBOL_GPL(show_rcu_gp_kthreads);
663
664
665
666
667
668
669
670
671void rcutorture_record_test_transition(void)
672{
673 rcutorture_testseq++;
674 rcutorture_vernum = 0;
675}
676EXPORT_SYMBOL_GPL(rcutorture_record_test_transition);
677
678
679
680
681void rcutorture_get_gp_data(enum rcutorture_type test_type, int *flags,
682 unsigned long *gpnum, unsigned long *completed)
683{
684 struct rcu_state *rsp = NULL;
685
686 switch (test_type) {
687 case RCU_FLAVOR:
688 rsp = rcu_state_p;
689 break;
690 case RCU_BH_FLAVOR:
691 rsp = &rcu_bh_state;
692 break;
693 case RCU_SCHED_FLAVOR:
694 rsp = &rcu_sched_state;
695 break;
696 default:
697 break;
698 }
699 if (rsp == NULL)
700 return;
701 *flags = READ_ONCE(rsp->gp_flags);
702 *gpnum = READ_ONCE(rsp->gpnum);
703 *completed = READ_ONCE(rsp->completed);
704}
705EXPORT_SYMBOL_GPL(rcutorture_get_gp_data);
706
707
708
709
710
711
712void rcutorture_record_progress(unsigned long vernum)
713{
714 rcutorture_vernum++;
715}
716EXPORT_SYMBOL_GPL(rcutorture_record_progress);
717
718
719
720
721static struct rcu_node *rcu_get_root(struct rcu_state *rsp)
722{
723 return &rsp->node[0];
724}
725
726
727
728
729
730
731static int rcu_future_needs_gp(struct rcu_state *rsp)
732{
733 struct rcu_node *rnp = rcu_get_root(rsp);
734 int idx = (READ_ONCE(rnp->completed) + 1) & 0x1;
735 int *fp = &rnp->need_future_gp[idx];
736
737 RCU_LOCKDEP_WARN(!irqs_disabled(), "rcu_future_needs_gp() invoked with irqs enabled!!!");
738 return READ_ONCE(*fp);
739}
740
741
742
743
744
745
746static bool
747cpu_needs_another_gp(struct rcu_state *rsp, struct rcu_data *rdp)
748{
749 RCU_LOCKDEP_WARN(!irqs_disabled(), "cpu_needs_another_gp() invoked with irqs enabled!!!");
750 if (rcu_gp_in_progress(rsp))
751 return false;
752 if (rcu_future_needs_gp(rsp))
753 return true;
754 if (!rcu_segcblist_is_enabled(&rdp->cblist))
755 return false;
756 if (!rcu_segcblist_restempty(&rdp->cblist, RCU_NEXT_READY_TAIL))
757 return true;
758 if (rcu_segcblist_future_gp_needed(&rdp->cblist,
759 READ_ONCE(rsp->completed)))
760 return true;
761 return false;
762}
763
764
765
766
767
768
769
770static void rcu_eqs_enter_common(bool user)
771{
772 struct rcu_state *rsp;
773 struct rcu_data *rdp;
774 struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
775
776 RCU_LOCKDEP_WARN(!irqs_disabled(), "rcu_eqs_enter_common() invoked with irqs enabled!!!");
777 trace_rcu_dyntick(TPS("Start"), rdtp->dynticks_nesting, 0);
778 if (IS_ENABLED(CONFIG_RCU_EQS_DEBUG) &&
779 !user && !is_idle_task(current)) {
780 struct task_struct *idle __maybe_unused =
781 idle_task(smp_processor_id());
782
783 trace_rcu_dyntick(TPS("Error on entry: not idle task"), rdtp->dynticks_nesting, 0);
784 rcu_ftrace_dump(DUMP_ORIG);
785 WARN_ONCE(1, "Current pid: %d comm: %s / Idle pid: %d comm: %s",
786 current->pid, current->comm,
787 idle->pid, idle->comm);
788 }
789 for_each_rcu_flavor(rsp) {
790 rdp = this_cpu_ptr(rsp->rda);
791 do_nocb_deferred_wakeup(rdp);
792 }
793 rcu_prepare_for_idle();
794 __this_cpu_inc(disable_rcu_irq_enter);
795 rdtp->dynticks_nesting = 0;
796 rcu_dynticks_eqs_enter();
797 __this_cpu_dec(disable_rcu_irq_enter);
798 rcu_dynticks_task_enter();
799
800
801
802
803
804 RCU_LOCKDEP_WARN(lock_is_held(&rcu_lock_map),
805 "Illegal idle entry in RCU read-side critical section.");
806 RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map),
807 "Illegal idle entry in RCU-bh read-side critical section.");
808 RCU_LOCKDEP_WARN(lock_is_held(&rcu_sched_lock_map),
809 "Illegal idle entry in RCU-sched read-side critical section.");
810}
811
812
813
814
815
816static void rcu_eqs_enter(bool user)
817{
818 struct rcu_dynticks *rdtp;
819
820 rdtp = this_cpu_ptr(&rcu_dynticks);
821 WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) &&
822 (rdtp->dynticks_nesting & DYNTICK_TASK_NEST_MASK) == 0);
823 if ((rdtp->dynticks_nesting & DYNTICK_TASK_NEST_MASK) == DYNTICK_TASK_NEST_VALUE)
824 rcu_eqs_enter_common(user);
825 else
826 rdtp->dynticks_nesting -= DYNTICK_TASK_NEST_VALUE;
827}
828
829
830
831
832
833
834
835
836
837
838
839
840
841void rcu_idle_enter(void)
842{
843 RCU_LOCKDEP_WARN(!irqs_disabled(), "rcu_idle_enter() invoked with irqs enabled!!!");
844 rcu_eqs_enter(false);
845}
846
847#ifdef CONFIG_NO_HZ_FULL
848
849
850
851
852
853
854
855
856void rcu_user_enter(void)
857{
858 RCU_LOCKDEP_WARN(!irqs_disabled(), "rcu_user_enter() invoked with irqs enabled!!!");
859 rcu_eqs_enter(true);
860}
861#endif
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879void rcu_irq_exit(void)
880{
881 struct rcu_dynticks *rdtp;
882
883 RCU_LOCKDEP_WARN(!irqs_disabled(), "rcu_irq_exit() invoked with irqs enabled!!!");
884 rdtp = this_cpu_ptr(&rcu_dynticks);
885
886
887 if (rdtp->dynticks_nmi_nesting)
888 return;
889
890 WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) &&
891 rdtp->dynticks_nesting < 1);
892 if (rdtp->dynticks_nesting <= 1) {
893 rcu_eqs_enter_common(true);
894 } else {
895 trace_rcu_dyntick(TPS("--="), rdtp->dynticks_nesting, rdtp->dynticks_nesting - 1);
896 rdtp->dynticks_nesting--;
897 }
898}
899
900
901
902
903void rcu_irq_exit_irqson(void)
904{
905 unsigned long flags;
906
907 local_irq_save(flags);
908 rcu_irq_exit();
909 local_irq_restore(flags);
910}
911
912
913
914
915
916
917
918
919static void rcu_eqs_exit_common(long long oldval, int user)
920{
921 RCU_TRACE(struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);)
922
923 rcu_dynticks_task_exit();
924 rcu_dynticks_eqs_exit();
925 rcu_cleanup_after_idle();
926 trace_rcu_dyntick(TPS("End"), oldval, rdtp->dynticks_nesting);
927 if (IS_ENABLED(CONFIG_RCU_EQS_DEBUG) &&
928 !user && !is_idle_task(current)) {
929 struct task_struct *idle __maybe_unused =
930 idle_task(smp_processor_id());
931
932 trace_rcu_dyntick(TPS("Error on exit: not idle task"),
933 oldval, rdtp->dynticks_nesting);
934 rcu_ftrace_dump(DUMP_ORIG);
935 WARN_ONCE(1, "Current pid: %d comm: %s / Idle pid: %d comm: %s",
936 current->pid, current->comm,
937 idle->pid, idle->comm);
938 }
939}
940
941
942
943
944
945static void rcu_eqs_exit(bool user)
946{
947 struct rcu_dynticks *rdtp;
948 long long oldval;
949
950 RCU_LOCKDEP_WARN(!irqs_disabled(), "rcu_eqs_exit() invoked with irqs enabled!!!");
951 rdtp = this_cpu_ptr(&rcu_dynticks);
952 oldval = rdtp->dynticks_nesting;
953 WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && oldval < 0);
954 if (oldval & DYNTICK_TASK_NEST_MASK) {
955 rdtp->dynticks_nesting += DYNTICK_TASK_NEST_VALUE;
956 } else {
957 __this_cpu_inc(disable_rcu_irq_enter);
958 rdtp->dynticks_nesting = DYNTICK_TASK_EXIT_IDLE;
959 rcu_eqs_exit_common(oldval, user);
960 __this_cpu_dec(disable_rcu_irq_enter);
961 }
962}
963
964
965
966
967
968
969
970
971
972
973
974
975void rcu_idle_exit(void)
976{
977 unsigned long flags;
978
979 local_irq_save(flags);
980 rcu_eqs_exit(false);
981 local_irq_restore(flags);
982}
983
984#ifdef CONFIG_NO_HZ_FULL
985
986
987
988
989
990
991void rcu_user_exit(void)
992{
993 rcu_eqs_exit(1);
994}
995#endif
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016void rcu_irq_enter(void)
1017{
1018 struct rcu_dynticks *rdtp;
1019 long long oldval;
1020
1021 RCU_LOCKDEP_WARN(!irqs_disabled(), "rcu_irq_enter() invoked with irqs enabled!!!");
1022 rdtp = this_cpu_ptr(&rcu_dynticks);
1023
1024
1025 if (rdtp->dynticks_nmi_nesting)
1026 return;
1027
1028 oldval = rdtp->dynticks_nesting;
1029 rdtp->dynticks_nesting++;
1030 WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) &&
1031 rdtp->dynticks_nesting == 0);
1032 if (oldval)
1033 trace_rcu_dyntick(TPS("++="), oldval, rdtp->dynticks_nesting);
1034 else
1035 rcu_eqs_exit_common(oldval, true);
1036}
1037
1038
1039
1040
1041void rcu_irq_enter_irqson(void)
1042{
1043 unsigned long flags;
1044
1045 local_irq_save(flags);
1046 rcu_irq_enter();
1047 local_irq_restore(flags);
1048}
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059void rcu_nmi_enter(void)
1060{
1061 struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
1062 int incby = 2;
1063
1064
1065 WARN_ON_ONCE(rdtp->dynticks_nmi_nesting < 0);
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075 if (rcu_dynticks_curr_cpu_in_eqs()) {
1076 rcu_dynticks_eqs_exit();
1077 incby = 1;
1078 }
1079 rdtp->dynticks_nmi_nesting += incby;
1080 barrier();
1081}
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091void rcu_nmi_exit(void)
1092{
1093 struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
1094
1095
1096
1097
1098
1099
1100 WARN_ON_ONCE(rdtp->dynticks_nmi_nesting <= 0);
1101 WARN_ON_ONCE(rcu_dynticks_curr_cpu_in_eqs());
1102
1103
1104
1105
1106
1107 if (rdtp->dynticks_nmi_nesting != 1) {
1108 rdtp->dynticks_nmi_nesting -= 2;
1109 return;
1110 }
1111
1112
1113 rdtp->dynticks_nmi_nesting = 0;
1114 rcu_dynticks_eqs_enter();
1115}
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125bool notrace rcu_is_watching(void)
1126{
1127 bool ret;
1128
1129 preempt_disable_notrace();
1130 ret = !rcu_dynticks_curr_cpu_in_eqs();
1131 preempt_enable_notrace();
1132 return ret;
1133}
1134EXPORT_SYMBOL_GPL(rcu_is_watching);
1135
1136
1137
1138
1139
1140
1141
1142
1143void rcu_request_urgent_qs_task(struct task_struct *t)
1144{
1145 int cpu;
1146
1147 barrier();
1148 cpu = task_cpu(t);
1149 if (!task_curr(t))
1150 return;
1151 smp_store_release(per_cpu_ptr(&rcu_dynticks.rcu_urgent_qs, cpu), true);
1152}
1153
1154#if defined(CONFIG_PROVE_RCU) && defined(CONFIG_HOTPLUG_CPU)
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177bool rcu_lockdep_current_cpu_online(void)
1178{
1179 struct rcu_data *rdp;
1180 struct rcu_node *rnp;
1181 bool ret;
1182
1183 if (in_nmi())
1184 return true;
1185 preempt_disable();
1186 rdp = this_cpu_ptr(&rcu_sched_data);
1187 rnp = rdp->mynode;
1188 ret = (rdp->grpmask & rcu_rnp_online_cpus(rnp)) ||
1189 !rcu_scheduler_fully_active;
1190 preempt_enable();
1191 return ret;
1192}
1193EXPORT_SYMBOL_GPL(rcu_lockdep_current_cpu_online);
1194
1195#endif
1196
1197
1198
1199
1200
1201
1202
1203
1204static int rcu_is_cpu_rrupt_from_idle(void)
1205{
1206 return __this_cpu_read(rcu_dynticks.dynticks_nesting) <= 1;
1207}
1208
1209
1210
1211
1212
1213
1214static int dyntick_save_progress_counter(struct rcu_data *rdp)
1215{
1216 rdp->dynticks_snap = rcu_dynticks_snap(rdp->dynticks);
1217 if (rcu_dynticks_in_eqs(rdp->dynticks_snap)) {
1218 trace_rcu_fqs(rdp->rsp->name, rdp->gpnum, rdp->cpu, TPS("dti"));
1219 if (ULONG_CMP_LT(READ_ONCE(rdp->gpnum) + ULONG_MAX / 4,
1220 rdp->mynode->gpnum))
1221 WRITE_ONCE(rdp->gpwrap, true);
1222 return 1;
1223 }
1224 return 0;
1225}
1226
1227
1228
1229
1230
1231
1232
1233static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
1234{
1235 unsigned long jtsq;
1236 bool *rnhqp;
1237 bool *ruqp;
1238 unsigned long rjtsc;
1239 struct rcu_node *rnp;
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249 if (rcu_dynticks_in_eqs_since(rdp->dynticks, rdp->dynticks_snap)) {
1250 trace_rcu_fqs(rdp->rsp->name, rdp->gpnum, rdp->cpu, TPS("dti"));
1251 rdp->dynticks_fqs++;
1252 return 1;
1253 }
1254
1255
1256 jtsq = jiffies_till_sched_qs;
1257 rjtsc = rcu_jiffies_till_stall_check();
1258 if (jtsq > rjtsc / 2) {
1259 WRITE_ONCE(jiffies_till_sched_qs, rjtsc);
1260 jtsq = rjtsc / 2;
1261 } else if (jtsq < 1) {
1262 WRITE_ONCE(jiffies_till_sched_qs, 1);
1263 jtsq = 1;
1264 }
1265
1266
1267
1268
1269
1270
1271
1272 rnp = rdp->mynode;
1273 ruqp = per_cpu_ptr(&rcu_dynticks.rcu_urgent_qs, rdp->cpu);
1274 if (time_after(jiffies, rdp->rsp->gp_start + jtsq) &&
1275 READ_ONCE(rdp->rcu_qs_ctr_snap) != per_cpu(rcu_dynticks.rcu_qs_ctr, rdp->cpu) &&
1276 READ_ONCE(rdp->gpnum) == rnp->gpnum && !rdp->gpwrap) {
1277 trace_rcu_fqs(rdp->rsp->name, rdp->gpnum, rdp->cpu, TPS("rqc"));
1278 return 1;
1279 } else {
1280
1281 smp_store_release(ruqp, true);
1282 }
1283
1284
1285 if (!(rdp->grpmask & rcu_rnp_online_cpus(rnp))) {
1286 trace_rcu_fqs(rdp->rsp->name, rdp->gpnum, rdp->cpu, TPS("ofl"));
1287 rdp->offline_fqs++;
1288 return 1;
1289 }
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312 rnhqp = &per_cpu(rcu_dynticks.rcu_need_heavy_qs, rdp->cpu);
1313 if (!READ_ONCE(*rnhqp) &&
1314 (time_after(jiffies, rdp->rsp->gp_start + jtsq) ||
1315 time_after(jiffies, rdp->rsp->jiffies_resched))) {
1316 WRITE_ONCE(*rnhqp, true);
1317
1318 smp_store_release(ruqp, true);
1319 rdp->rsp->jiffies_resched += 5;
1320 }
1321
1322
1323
1324
1325
1326 if (jiffies - rdp->rsp->gp_start > rcu_jiffies_till_stall_check() / 2)
1327 resched_cpu(rdp->cpu);
1328
1329 return 0;
1330}
1331
1332static void record_gp_stall_check_time(struct rcu_state *rsp)
1333{
1334 unsigned long j = jiffies;
1335 unsigned long j1;
1336
1337 rsp->gp_start = j;
1338 smp_wmb();
1339 j1 = rcu_jiffies_till_stall_check();
1340 WRITE_ONCE(rsp->jiffies_stall, j + j1);
1341 rsp->jiffies_resched = j + j1 / 2;
1342 rsp->n_force_qs_gpstart = READ_ONCE(rsp->n_force_qs);
1343}
1344
1345
1346
1347
1348static const char *gp_state_getname(short gs)
1349{
1350 if (gs < 0 || gs >= ARRAY_SIZE(gp_state_names))
1351 return "???";
1352 return gp_state_names[gs];
1353}
1354
1355
1356
1357
1358static void rcu_check_gp_kthread_starvation(struct rcu_state *rsp)
1359{
1360 unsigned long gpa;
1361 unsigned long j;
1362
1363 j = jiffies;
1364 gpa = READ_ONCE(rsp->gp_activity);
1365 if (j - gpa > 2 * HZ) {
1366 pr_err("%s kthread starved for %ld jiffies! g%lu c%lu f%#x %s(%d) ->state=%#lx ->cpu=%d\n",
1367 rsp->name, j - gpa,
1368 rsp->gpnum, rsp->completed,
1369 rsp->gp_flags,
1370 gp_state_getname(rsp->gp_state), rsp->gp_state,
1371 rsp->gp_kthread ? rsp->gp_kthread->state : ~0,
1372 rsp->gp_kthread ? task_cpu(rsp->gp_kthread) : -1);
1373 if (rsp->gp_kthread) {
1374 sched_show_task(rsp->gp_kthread);
1375 wake_up_process(rsp->gp_kthread);
1376 }
1377 }
1378}
1379
1380
1381
1382
1383
1384
1385
1386static void rcu_dump_cpu_stacks(struct rcu_state *rsp)
1387{
1388 int cpu;
1389 unsigned long flags;
1390 struct rcu_node *rnp;
1391
1392 rcu_for_each_leaf_node(rsp, rnp) {
1393 raw_spin_lock_irqsave_rcu_node(rnp, flags);
1394 for_each_leaf_node_possible_cpu(rnp, cpu)
1395 if (rnp->qsmask & leaf_node_cpu_bit(rnp, cpu))
1396 if (!trigger_single_cpu_backtrace(cpu))
1397 dump_cpu_task(cpu);
1398 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
1399 }
1400}
1401
1402
1403
1404
1405
1406static void rcu_stall_kick_kthreads(struct rcu_state *rsp)
1407{
1408 unsigned long j;
1409
1410 if (!rcu_kick_kthreads)
1411 return;
1412 j = READ_ONCE(rsp->jiffies_kick_kthreads);
1413 if (time_after(jiffies, j) && rsp->gp_kthread &&
1414 (rcu_gp_in_progress(rsp) || READ_ONCE(rsp->gp_flags))) {
1415 WARN_ONCE(1, "Kicking %s grace-period kthread\n", rsp->name);
1416 rcu_ftrace_dump(DUMP_ALL);
1417 wake_up_process(rsp->gp_kthread);
1418 WRITE_ONCE(rsp->jiffies_kick_kthreads, j + HZ);
1419 }
1420}
1421
1422static inline void panic_on_rcu_stall(void)
1423{
1424 if (sysctl_panic_on_rcu_stall)
1425 panic("RCU Stall\n");
1426}
1427
1428static void print_other_cpu_stall(struct rcu_state *rsp, unsigned long gpnum)
1429{
1430 int cpu;
1431 long delta;
1432 unsigned long flags;
1433 unsigned long gpa;
1434 unsigned long j;
1435 int ndetected = 0;
1436 struct rcu_node *rnp = rcu_get_root(rsp);
1437 long totqlen = 0;
1438
1439
1440 rcu_stall_kick_kthreads(rsp);
1441 if (rcu_cpu_stall_suppress)
1442 return;
1443
1444
1445
1446 raw_spin_lock_irqsave_rcu_node(rnp, flags);
1447 delta = jiffies - READ_ONCE(rsp->jiffies_stall);
1448 if (delta < RCU_STALL_RAT_DELAY || !rcu_gp_in_progress(rsp)) {
1449 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
1450 return;
1451 }
1452 WRITE_ONCE(rsp->jiffies_stall,
1453 jiffies + 3 * rcu_jiffies_till_stall_check() + 3);
1454 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
1455
1456
1457
1458
1459
1460
1461 pr_err("INFO: %s detected stalls on CPUs/tasks:",
1462 rsp->name);
1463 print_cpu_stall_info_begin();
1464 rcu_for_each_leaf_node(rsp, rnp) {
1465 raw_spin_lock_irqsave_rcu_node(rnp, flags);
1466 ndetected += rcu_print_task_stall(rnp);
1467 if (rnp->qsmask != 0) {
1468 for_each_leaf_node_possible_cpu(rnp, cpu)
1469 if (rnp->qsmask & leaf_node_cpu_bit(rnp, cpu)) {
1470 print_cpu_stall_info(rsp, cpu);
1471 ndetected++;
1472 }
1473 }
1474 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
1475 }
1476
1477 print_cpu_stall_info_end();
1478 for_each_possible_cpu(cpu)
1479 totqlen += rcu_segcblist_n_cbs(&per_cpu_ptr(rsp->rda,
1480 cpu)->cblist);
1481 pr_cont("(detected by %d, t=%ld jiffies, g=%ld, c=%ld, q=%lu)\n",
1482 smp_processor_id(), (long)(jiffies - rsp->gp_start),
1483 (long)rsp->gpnum, (long)rsp->completed, totqlen);
1484 if (ndetected) {
1485 rcu_dump_cpu_stacks(rsp);
1486
1487
1488 rcu_print_detail_task_stall(rsp);
1489 } else {
1490 if (READ_ONCE(rsp->gpnum) != gpnum ||
1491 READ_ONCE(rsp->completed) == gpnum) {
1492 pr_err("INFO: Stall ended before state dump start\n");
1493 } else {
1494 j = jiffies;
1495 gpa = READ_ONCE(rsp->gp_activity);
1496 pr_err("All QSes seen, last %s kthread activity %ld (%ld-%ld), jiffies_till_next_fqs=%ld, root ->qsmask %#lx\n",
1497 rsp->name, j - gpa, j, gpa,
1498 jiffies_till_next_fqs,
1499 rcu_get_root(rsp)->qsmask);
1500
1501 sched_show_task(current);
1502 }
1503 }
1504
1505 rcu_check_gp_kthread_starvation(rsp);
1506
1507 panic_on_rcu_stall();
1508
1509 force_quiescent_state(rsp);
1510}
1511
1512static void print_cpu_stall(struct rcu_state *rsp)
1513{
1514 int cpu;
1515 unsigned long flags;
1516 struct rcu_node *rnp = rcu_get_root(rsp);
1517 long totqlen = 0;
1518
1519
1520 rcu_stall_kick_kthreads(rsp);
1521 if (rcu_cpu_stall_suppress)
1522 return;
1523
1524
1525
1526
1527
1528
1529 pr_err("INFO: %s self-detected stall on CPU", rsp->name);
1530 print_cpu_stall_info_begin();
1531 print_cpu_stall_info(rsp, smp_processor_id());
1532 print_cpu_stall_info_end();
1533 for_each_possible_cpu(cpu)
1534 totqlen += rcu_segcblist_n_cbs(&per_cpu_ptr(rsp->rda,
1535 cpu)->cblist);
1536 pr_cont(" (t=%lu jiffies g=%ld c=%ld q=%lu)\n",
1537 jiffies - rsp->gp_start,
1538 (long)rsp->gpnum, (long)rsp->completed, totqlen);
1539
1540 rcu_check_gp_kthread_starvation(rsp);
1541
1542 rcu_dump_cpu_stacks(rsp);
1543
1544 raw_spin_lock_irqsave_rcu_node(rnp, flags);
1545 if (ULONG_CMP_GE(jiffies, READ_ONCE(rsp->jiffies_stall)))
1546 WRITE_ONCE(rsp->jiffies_stall,
1547 jiffies + 3 * rcu_jiffies_till_stall_check() + 3);
1548 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
1549
1550 panic_on_rcu_stall();
1551
1552
1553
1554
1555
1556
1557
1558
1559 resched_cpu(smp_processor_id());
1560}
1561
1562static void check_cpu_stall(struct rcu_state *rsp, struct rcu_data *rdp)
1563{
1564 unsigned long completed;
1565 unsigned long gpnum;
1566 unsigned long gps;
1567 unsigned long j;
1568 unsigned long js;
1569 struct rcu_node *rnp;
1570
1571 if ((rcu_cpu_stall_suppress && !rcu_kick_kthreads) ||
1572 !rcu_gp_in_progress(rsp))
1573 return;
1574 rcu_stall_kick_kthreads(rsp);
1575 j = jiffies;
1576
1577
1578
1579
1580
1581
1582
1583
1584
1585
1586
1587
1588
1589
1590
1591
1592
1593
1594 gpnum = READ_ONCE(rsp->gpnum);
1595 smp_rmb();
1596 js = READ_ONCE(rsp->jiffies_stall);
1597 smp_rmb();
1598 gps = READ_ONCE(rsp->gp_start);
1599 smp_rmb();
1600 completed = READ_ONCE(rsp->completed);
1601 if (ULONG_CMP_GE(completed, gpnum) ||
1602 ULONG_CMP_LT(j, js) ||
1603 ULONG_CMP_GE(gps, js))
1604 return;
1605 rnp = rdp->mynode;
1606 if (rcu_gp_in_progress(rsp) &&
1607 (READ_ONCE(rnp->qsmask) & rdp->grpmask)) {
1608
1609
1610 print_cpu_stall(rsp);
1611
1612 } else if (rcu_gp_in_progress(rsp) &&
1613 ULONG_CMP_GE(j, js + RCU_STALL_RAT_DELAY)) {
1614
1615
1616 print_other_cpu_stall(rsp, gpnum);
1617 }
1618}
1619
1620
1621
1622
1623
1624
1625
1626
1627
1628
1629void rcu_cpu_stall_reset(void)
1630{
1631 struct rcu_state *rsp;
1632
1633 for_each_rcu_flavor(rsp)
1634 WRITE_ONCE(rsp->jiffies_stall, jiffies + ULONG_MAX / 2);
1635}
1636
1637
1638
1639
1640
1641
1642
1643
1644
1645
1646static unsigned long rcu_cbs_completed(struct rcu_state *rsp,
1647 struct rcu_node *rnp)
1648{
1649 lockdep_assert_held(&rnp->lock);
1650
1651
1652
1653
1654
1655
1656
1657
1658 if (rcu_get_root(rsp) == rnp && rnp->gpnum == rnp->completed)
1659 return rnp->completed + 1;
1660
1661
1662
1663
1664
1665 return rnp->completed + 2;
1666}
1667
1668
1669
1670
1671
1672static void trace_rcu_future_gp(struct rcu_node *rnp, struct rcu_data *rdp,
1673 unsigned long c, const char *s)
1674{
1675 trace_rcu_future_grace_period(rdp->rsp->name, rnp->gpnum,
1676 rnp->completed, c, rnp->level,
1677 rnp->grplo, rnp->grphi, s);
1678}
1679
1680
1681
1682
1683
1684
1685
1686
1687
1688static bool __maybe_unused
1689rcu_start_future_gp(struct rcu_node *rnp, struct rcu_data *rdp,
1690 unsigned long *c_out)
1691{
1692 unsigned long c;
1693 bool ret = false;
1694 struct rcu_node *rnp_root = rcu_get_root(rdp->rsp);
1695
1696 lockdep_assert_held(&rnp->lock);
1697
1698
1699
1700
1701
1702 c = rcu_cbs_completed(rdp->rsp, rnp);
1703 trace_rcu_future_gp(rnp, rdp, c, TPS("Startleaf"));
1704 if (rnp->need_future_gp[c & 0x1]) {
1705 trace_rcu_future_gp(rnp, rdp, c, TPS("Prestartleaf"));
1706 goto out;
1707 }
1708
1709
1710
1711
1712
1713
1714
1715
1716
1717
1718
1719
1720
1721
1722 if (rnp->gpnum != rnp->completed ||
1723 READ_ONCE(rnp_root->gpnum) != READ_ONCE(rnp_root->completed)) {
1724 rnp->need_future_gp[c & 0x1]++;
1725 trace_rcu_future_gp(rnp, rdp, c, TPS("Startedleaf"));
1726 goto out;
1727 }
1728
1729
1730
1731
1732
1733
1734 if (rnp != rnp_root)
1735 raw_spin_lock_rcu_node(rnp_root);
1736
1737
1738
1739
1740
1741
1742 c = rcu_cbs_completed(rdp->rsp, rnp_root);
1743 if (!rcu_is_nocb_cpu(rdp->cpu))
1744 (void)rcu_segcblist_accelerate(&rdp->cblist, c);
1745
1746
1747
1748
1749
1750 if (rnp_root->need_future_gp[c & 0x1]) {
1751 trace_rcu_future_gp(rnp, rdp, c, TPS("Prestartedroot"));
1752 goto unlock_out;
1753 }
1754
1755
1756 rnp_root->need_future_gp[c & 0x1]++;
1757
1758
1759 if (rnp_root->gpnum != rnp_root->completed) {
1760 trace_rcu_future_gp(rnp, rdp, c, TPS("Startedleafroot"));
1761 } else {
1762 trace_rcu_future_gp(rnp, rdp, c, TPS("Startedroot"));
1763 ret = rcu_start_gp_advanced(rdp->rsp, rnp_root, rdp);
1764 }
1765unlock_out:
1766 if (rnp != rnp_root)
1767 raw_spin_unlock_rcu_node(rnp_root);
1768out:
1769 if (c_out != NULL)
1770 *c_out = c;
1771 return ret;
1772}
1773
1774
1775
1776
1777
1778static int rcu_future_gp_cleanup(struct rcu_state *rsp, struct rcu_node *rnp)
1779{
1780 int c = rnp->completed;
1781 int needmore;
1782 struct rcu_data *rdp = this_cpu_ptr(rsp->rda);
1783
1784 rnp->need_future_gp[c & 0x1] = 0;
1785 needmore = rnp->need_future_gp[(c + 1) & 0x1];
1786 trace_rcu_future_gp(rnp, rdp, c,
1787 needmore ? TPS("CleanupMore") : TPS("Cleanup"));
1788 return needmore;
1789}
1790
1791
1792
1793
1794
1795
1796
1797
1798static void rcu_gp_kthread_wake(struct rcu_state *rsp)
1799{
1800 if (current == rsp->gp_kthread ||
1801 !READ_ONCE(rsp->gp_flags) ||
1802 !rsp->gp_kthread)
1803 return;
1804 swake_up(&rsp->gp_wq);
1805}
1806
1807
1808
1809
1810
1811
1812
1813
1814
1815
1816
1817
1818
1819static bool rcu_accelerate_cbs(struct rcu_state *rsp, struct rcu_node *rnp,
1820 struct rcu_data *rdp)
1821{
1822 bool ret = false;
1823
1824 lockdep_assert_held(&rnp->lock);
1825
1826
1827 if (!rcu_segcblist_pend_cbs(&rdp->cblist))
1828 return false;
1829
1830
1831
1832
1833
1834
1835
1836
1837
1838
1839
1840 if (rcu_segcblist_accelerate(&rdp->cblist, rcu_cbs_completed(rsp, rnp)))
1841 ret = rcu_start_future_gp(rnp, rdp, NULL);
1842
1843
1844 if (rcu_segcblist_restempty(&rdp->cblist, RCU_WAIT_TAIL))
1845 trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("AccWaitCB"));
1846 else
1847 trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("AccReadyCB"));
1848 return ret;
1849}
1850
1851
1852
1853
1854
1855
1856
1857
1858
1859
1860
1861static bool rcu_advance_cbs(struct rcu_state *rsp, struct rcu_node *rnp,
1862 struct rcu_data *rdp)
1863{
1864 lockdep_assert_held(&rnp->lock);
1865
1866
1867 if (!rcu_segcblist_pend_cbs(&rdp->cblist))
1868 return false;
1869
1870
1871
1872
1873
1874 rcu_segcblist_advance(&rdp->cblist, rnp->completed);
1875
1876
1877 return rcu_accelerate_cbs(rsp, rnp, rdp);
1878}
1879
1880
1881
1882
1883
1884
1885
1886static bool __note_gp_changes(struct rcu_state *rsp, struct rcu_node *rnp,
1887 struct rcu_data *rdp)
1888{
1889 bool ret;
1890 bool need_gp;
1891
1892 lockdep_assert_held(&rnp->lock);
1893
1894
1895 if (rdp->completed == rnp->completed &&
1896 !unlikely(READ_ONCE(rdp->gpwrap))) {
1897
1898
1899 ret = rcu_accelerate_cbs(rsp, rnp, rdp);
1900
1901 } else {
1902
1903
1904 ret = rcu_advance_cbs(rsp, rnp, rdp);
1905
1906
1907 rdp->completed = rnp->completed;
1908 trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("cpuend"));
1909 }
1910
1911 if (rdp->gpnum != rnp->gpnum || unlikely(READ_ONCE(rdp->gpwrap))) {
1912
1913
1914
1915
1916
1917 rdp->gpnum = rnp->gpnum;
1918 trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("cpustart"));
1919 need_gp = !!(rnp->qsmask & rdp->grpmask);
1920 rdp->cpu_no_qs.b.norm = need_gp;
1921 rdp->rcu_qs_ctr_snap = __this_cpu_read(rcu_dynticks.rcu_qs_ctr);
1922 rdp->core_needs_qs = need_gp;
1923 zero_cpu_stall_ticks(rdp);
1924 WRITE_ONCE(rdp->gpwrap, false);
1925 }
1926 return ret;
1927}
1928
1929static void note_gp_changes(struct rcu_state *rsp, struct rcu_data *rdp)
1930{
1931 unsigned long flags;
1932 bool needwake;
1933 struct rcu_node *rnp;
1934
1935 local_irq_save(flags);
1936 rnp = rdp->mynode;
1937 if ((rdp->gpnum == READ_ONCE(rnp->gpnum) &&
1938 rdp->completed == READ_ONCE(rnp->completed) &&
1939 !unlikely(READ_ONCE(rdp->gpwrap))) ||
1940 !raw_spin_trylock_rcu_node(rnp)) {
1941 local_irq_restore(flags);
1942 return;
1943 }
1944 needwake = __note_gp_changes(rsp, rnp, rdp);
1945 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
1946 if (needwake)
1947 rcu_gp_kthread_wake(rsp);
1948}
1949
1950static void rcu_gp_slow(struct rcu_state *rsp, int delay)
1951{
1952 if (delay > 0 &&
1953 !(rsp->gpnum % (rcu_num_nodes * PER_RCU_NODE_PERIOD * delay)))
1954 schedule_timeout_uninterruptible(delay);
1955}
1956
1957
1958
1959
1960static bool rcu_gp_init(struct rcu_state *rsp)
1961{
1962 unsigned long oldmask;
1963 struct rcu_data *rdp;
1964 struct rcu_node *rnp = rcu_get_root(rsp);
1965
1966 WRITE_ONCE(rsp->gp_activity, jiffies);
1967 raw_spin_lock_irq_rcu_node(rnp);
1968 if (!READ_ONCE(rsp->gp_flags)) {
1969
1970 raw_spin_unlock_irq_rcu_node(rnp);
1971 return false;
1972 }
1973 WRITE_ONCE(rsp->gp_flags, 0);
1974
1975 if (WARN_ON_ONCE(rcu_gp_in_progress(rsp))) {
1976
1977
1978
1979
1980 raw_spin_unlock_irq_rcu_node(rnp);
1981 return false;
1982 }
1983
1984
1985 record_gp_stall_check_time(rsp);
1986
1987 smp_store_release(&rsp->gpnum, rsp->gpnum + 1);
1988 trace_rcu_grace_period(rsp->name, rsp->gpnum, TPS("start"));
1989 raw_spin_unlock_irq_rcu_node(rnp);
1990
1991
1992
1993
1994
1995
1996
1997 rcu_for_each_leaf_node(rsp, rnp) {
1998 rcu_gp_slow(rsp, gp_preinit_delay);
1999 raw_spin_lock_irq_rcu_node(rnp);
2000 if (rnp->qsmaskinit == rnp->qsmaskinitnext &&
2001 !rnp->wait_blkd_tasks) {
2002
2003 raw_spin_unlock_irq_rcu_node(rnp);
2004 continue;
2005 }
2006
2007
2008 oldmask = rnp->qsmaskinit;
2009 rnp->qsmaskinit = rnp->qsmaskinitnext;
2010
2011
2012 if (!oldmask != !rnp->qsmaskinit) {
2013 if (!oldmask)
2014 rcu_init_new_rnp(rnp);
2015 else if (rcu_preempt_has_tasks(rnp))
2016 rnp->wait_blkd_tasks = true;
2017 else
2018 rcu_cleanup_dead_rnp(rnp);
2019 }
2020
2021
2022
2023
2024
2025
2026
2027
2028
2029
2030 if (rnp->wait_blkd_tasks &&
2031 (!rcu_preempt_has_tasks(rnp) ||
2032 rnp->qsmaskinit)) {
2033 rnp->wait_blkd_tasks = false;
2034 rcu_cleanup_dead_rnp(rnp);
2035 }
2036
2037 raw_spin_unlock_irq_rcu_node(rnp);
2038 }
2039
2040
2041
2042
2043
2044
2045
2046
2047
2048
2049
2050
2051
2052 rcu_for_each_node_breadth_first(rsp, rnp) {
2053 rcu_gp_slow(rsp, gp_init_delay);
2054 raw_spin_lock_irq_rcu_node(rnp);
2055 rdp = this_cpu_ptr(rsp->rda);
2056 rcu_preempt_check_blocked_tasks(rnp);
2057 rnp->qsmask = rnp->qsmaskinit;
2058 WRITE_ONCE(rnp->gpnum, rsp->gpnum);
2059 if (WARN_ON_ONCE(rnp->completed != rsp->completed))
2060 WRITE_ONCE(rnp->completed, rsp->completed);
2061 if (rnp == rdp->mynode)
2062 (void)__note_gp_changes(rsp, rnp, rdp);
2063 rcu_preempt_boost_start_gp(rnp);
2064 trace_rcu_grace_period_init(rsp->name, rnp->gpnum,
2065 rnp->level, rnp->grplo,
2066 rnp->grphi, rnp->qsmask);
2067 raw_spin_unlock_irq_rcu_node(rnp);
2068 cond_resched_rcu_qs();
2069 WRITE_ONCE(rsp->gp_activity, jiffies);
2070 }
2071
2072 return true;
2073}
2074
2075
2076
2077
2078
2079static bool rcu_gp_fqs_check_wake(struct rcu_state *rsp, int *gfp)
2080{
2081 struct rcu_node *rnp = rcu_get_root(rsp);
2082
2083
2084 *gfp = READ_ONCE(rsp->gp_flags);
2085 if (*gfp & RCU_GP_FLAG_FQS)
2086 return true;
2087
2088
2089 if (!READ_ONCE(rnp->qsmask) && !rcu_preempt_blocked_readers_cgp(rnp))
2090 return true;
2091
2092 return false;
2093}
2094
2095
2096
2097
2098static void rcu_gp_fqs(struct rcu_state *rsp, bool first_time)
2099{
2100 struct rcu_node *rnp = rcu_get_root(rsp);
2101
2102 WRITE_ONCE(rsp->gp_activity, jiffies);
2103 rsp->n_force_qs++;
2104 if (first_time) {
2105
2106 force_qs_rnp(rsp, dyntick_save_progress_counter);
2107 } else {
2108
2109 force_qs_rnp(rsp, rcu_implicit_dynticks_qs);
2110 }
2111
2112 if (READ_ONCE(rsp->gp_flags) & RCU_GP_FLAG_FQS) {
2113 raw_spin_lock_irq_rcu_node(rnp);
2114 WRITE_ONCE(rsp->gp_flags,
2115 READ_ONCE(rsp->gp_flags) & ~RCU_GP_FLAG_FQS);
2116 raw_spin_unlock_irq_rcu_node(rnp);
2117 }
2118}
2119
2120
2121
2122
2123static void rcu_gp_cleanup(struct rcu_state *rsp)
2124{
2125 unsigned long gp_duration;
2126 bool needgp = false;
2127 int nocb = 0;
2128 struct rcu_data *rdp;
2129 struct rcu_node *rnp = rcu_get_root(rsp);
2130 struct swait_queue_head *sq;
2131
2132 WRITE_ONCE(rsp->gp_activity, jiffies);
2133 raw_spin_lock_irq_rcu_node(rnp);
2134 gp_duration = jiffies - rsp->gp_start;
2135 if (gp_duration > rsp->gp_max)
2136 rsp->gp_max = gp_duration;
2137
2138
2139
2140
2141
2142
2143
2144
2145
2146 raw_spin_unlock_irq_rcu_node(rnp);
2147
2148
2149
2150
2151
2152
2153
2154
2155
2156
2157 rcu_for_each_node_breadth_first(rsp, rnp) {
2158 raw_spin_lock_irq_rcu_node(rnp);
2159 WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp));
2160 WARN_ON_ONCE(rnp->qsmask);
2161 WRITE_ONCE(rnp->completed, rsp->gpnum);
2162 rdp = this_cpu_ptr(rsp->rda);
2163 if (rnp == rdp->mynode)
2164 needgp = __note_gp_changes(rsp, rnp, rdp) || needgp;
2165
2166 nocb += rcu_future_gp_cleanup(rsp, rnp);
2167 sq = rcu_nocb_gp_get(rnp);
2168 raw_spin_unlock_irq_rcu_node(rnp);
2169 rcu_nocb_gp_cleanup(sq);
2170 cond_resched_rcu_qs();
2171 WRITE_ONCE(rsp->gp_activity, jiffies);
2172 rcu_gp_slow(rsp, gp_cleanup_delay);
2173 }
2174 rnp = rcu_get_root(rsp);
2175 raw_spin_lock_irq_rcu_node(rnp);
2176 rcu_nocb_gp_set(rnp, nocb);
2177
2178
2179 WRITE_ONCE(rsp->completed, rsp->gpnum);
2180 trace_rcu_grace_period(rsp->name, rsp->completed, TPS("end"));
2181 rsp->gp_state = RCU_GP_IDLE;
2182 rdp = this_cpu_ptr(rsp->rda);
2183
2184 needgp = rcu_advance_cbs(rsp, rnp, rdp) || needgp;
2185 if (needgp || cpu_needs_another_gp(rsp, rdp)) {
2186 WRITE_ONCE(rsp->gp_flags, RCU_GP_FLAG_INIT);
2187 trace_rcu_grace_period(rsp->name,
2188 READ_ONCE(rsp->gpnum),
2189 TPS("newreq"));
2190 }
2191 raw_spin_unlock_irq_rcu_node(rnp);
2192}
2193
2194
2195
2196
2197static int __noreturn rcu_gp_kthread(void *arg)
2198{
2199 bool first_gp_fqs;
2200 int gf;
2201 unsigned long j;
2202 int ret;
2203 struct rcu_state *rsp = arg;
2204 struct rcu_node *rnp = rcu_get_root(rsp);
2205
2206 rcu_bind_gp_kthread();
2207 for (;;) {
2208
2209
2210 for (;;) {
2211 trace_rcu_grace_period(rsp->name,
2212 READ_ONCE(rsp->gpnum),
2213 TPS("reqwait"));
2214 rsp->gp_state = RCU_GP_WAIT_GPS;
2215 swait_event_idle(rsp->gp_wq, READ_ONCE(rsp->gp_flags) &
2216 RCU_GP_FLAG_INIT);
2217 rsp->gp_state = RCU_GP_DONE_GPS;
2218
2219 if (rcu_gp_init(rsp))
2220 break;
2221 cond_resched_rcu_qs();
2222 WRITE_ONCE(rsp->gp_activity, jiffies);
2223 WARN_ON(signal_pending(current));
2224 trace_rcu_grace_period(rsp->name,
2225 READ_ONCE(rsp->gpnum),
2226 TPS("reqwaitsig"));
2227 }
2228
2229
2230 first_gp_fqs = true;
2231 j = jiffies_till_first_fqs;
2232 if (j > HZ) {
2233 j = HZ;
2234 jiffies_till_first_fqs = HZ;
2235 }
2236 ret = 0;
2237 for (;;) {
2238 if (!ret) {
2239 rsp->jiffies_force_qs = jiffies + j;
2240 WRITE_ONCE(rsp->jiffies_kick_kthreads,
2241 jiffies + 3 * j);
2242 }
2243 trace_rcu_grace_period(rsp->name,
2244 READ_ONCE(rsp->gpnum),
2245 TPS("fqswait"));
2246 rsp->gp_state = RCU_GP_WAIT_FQS;
2247 ret = swait_event_idle_timeout(rsp->gp_wq,
2248 rcu_gp_fqs_check_wake(rsp, &gf), j);
2249 rsp->gp_state = RCU_GP_DOING_FQS;
2250
2251
2252 if (!READ_ONCE(rnp->qsmask) &&
2253 !rcu_preempt_blocked_readers_cgp(rnp))
2254 break;
2255
2256 if (ULONG_CMP_GE(jiffies, rsp->jiffies_force_qs) ||
2257 (gf & RCU_GP_FLAG_FQS)) {
2258 trace_rcu_grace_period(rsp->name,
2259 READ_ONCE(rsp->gpnum),
2260 TPS("fqsstart"));
2261 rcu_gp_fqs(rsp, first_gp_fqs);
2262 first_gp_fqs = false;
2263 trace_rcu_grace_period(rsp->name,
2264 READ_ONCE(rsp->gpnum),
2265 TPS("fqsend"));
2266 cond_resched_rcu_qs();
2267 WRITE_ONCE(rsp->gp_activity, jiffies);
2268 ret = 0;
2269 j = jiffies_till_next_fqs;
2270 if (j > HZ) {
2271 j = HZ;
2272 jiffies_till_next_fqs = HZ;
2273 } else if (j < 1) {
2274 j = 1;
2275 jiffies_till_next_fqs = 1;
2276 }
2277 } else {
2278
2279 cond_resched_rcu_qs();
2280 WRITE_ONCE(rsp->gp_activity, jiffies);
2281 WARN_ON(signal_pending(current));
2282 trace_rcu_grace_period(rsp->name,
2283 READ_ONCE(rsp->gpnum),
2284 TPS("fqswaitsig"));
2285 ret = 1;
2286 j = jiffies;
2287 if (time_after(jiffies, rsp->jiffies_force_qs))
2288 j = 1;
2289 else
2290 j = rsp->jiffies_force_qs - j;
2291 }
2292 }
2293
2294
2295 rsp->gp_state = RCU_GP_CLEANUP;
2296 rcu_gp_cleanup(rsp);
2297 rsp->gp_state = RCU_GP_CLEANED;
2298 }
2299}
2300
2301
2302
2303
2304
2305
2306
2307
2308
2309
2310
2311
2312static bool
2313rcu_start_gp_advanced(struct rcu_state *rsp, struct rcu_node *rnp,
2314 struct rcu_data *rdp)
2315{
2316 lockdep_assert_held(&rnp->lock);
2317 if (!rsp->gp_kthread || !cpu_needs_another_gp(rsp, rdp)) {
2318
2319
2320
2321
2322
2323
2324 return false;
2325 }
2326 WRITE_ONCE(rsp->gp_flags, RCU_GP_FLAG_INIT);
2327 trace_rcu_grace_period(rsp->name, READ_ONCE(rsp->gpnum),
2328 TPS("newreq"));
2329
2330
2331
2332
2333
2334
2335 return true;
2336}
2337
2338
2339
2340
2341
2342
2343
2344
2345
2346
2347static bool rcu_start_gp(struct rcu_state *rsp)
2348{
2349 struct rcu_data *rdp = this_cpu_ptr(rsp->rda);
2350 struct rcu_node *rnp = rcu_get_root(rsp);
2351 bool ret = false;
2352
2353
2354
2355
2356
2357
2358
2359
2360
2361 ret = rcu_advance_cbs(rsp, rnp, rdp) || ret;
2362 ret = rcu_start_gp_advanced(rsp, rnp, rdp) || ret;
2363 return ret;
2364}
2365
2366
2367
2368
2369
2370
2371
2372
2373
2374
2375static void rcu_report_qs_rsp(struct rcu_state *rsp, unsigned long flags)
2376 __releases(rcu_get_root(rsp)->lock)
2377{
2378 lockdep_assert_held(&rcu_get_root(rsp)->lock);
2379 WARN_ON_ONCE(!rcu_gp_in_progress(rsp));
2380 WRITE_ONCE(rsp->gp_flags, READ_ONCE(rsp->gp_flags) | RCU_GP_FLAG_FQS);
2381 raw_spin_unlock_irqrestore_rcu_node(rcu_get_root(rsp), flags);
2382 rcu_gp_kthread_wake(rsp);
2383}
2384
2385
2386
2387
2388
2389
2390
2391
2392
2393
2394
2395static void
2396rcu_report_qs_rnp(unsigned long mask, struct rcu_state *rsp,
2397 struct rcu_node *rnp, unsigned long gps, unsigned long flags)
2398 __releases(rnp->lock)
2399{
2400 unsigned long oldmask = 0;
2401 struct rcu_node *rnp_c;
2402
2403 lockdep_assert_held(&rnp->lock);
2404
2405
2406 for (;;) {
2407 if (!(rnp->qsmask & mask) || rnp->gpnum != gps) {
2408
2409
2410
2411
2412
2413 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
2414 return;
2415 }
2416 WARN_ON_ONCE(oldmask);
2417 WARN_ON_ONCE(rnp->level != rcu_num_lvls - 1 &&
2418 rcu_preempt_blocked_readers_cgp(rnp));
2419 rnp->qsmask &= ~mask;
2420 trace_rcu_quiescent_state_report(rsp->name, rnp->gpnum,
2421 mask, rnp->qsmask, rnp->level,
2422 rnp->grplo, rnp->grphi,
2423 !!rnp->gp_tasks);
2424 if (rnp->qsmask != 0 || rcu_preempt_blocked_readers_cgp(rnp)) {
2425
2426
2427 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
2428 return;
2429 }
2430 mask = rnp->grpmask;
2431 if (rnp->parent == NULL) {
2432
2433
2434
2435 break;
2436 }
2437 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
2438 rnp_c = rnp;
2439 rnp = rnp->parent;
2440 raw_spin_lock_irqsave_rcu_node(rnp, flags);
2441 oldmask = rnp_c->qsmask;
2442 }
2443
2444
2445
2446
2447
2448
2449 rcu_report_qs_rsp(rsp, flags);
2450}
2451
2452
2453
2454
2455
2456
2457
2458
2459static void rcu_report_unblock_qs_rnp(struct rcu_state *rsp,
2460 struct rcu_node *rnp, unsigned long flags)
2461 __releases(rnp->lock)
2462{
2463 unsigned long gps;
2464 unsigned long mask;
2465 struct rcu_node *rnp_p;
2466
2467 lockdep_assert_held(&rnp->lock);
2468 if (rcu_state_p == &rcu_sched_state || rsp != rcu_state_p ||
2469 rnp->qsmask != 0 || rcu_preempt_blocked_readers_cgp(rnp)) {
2470 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
2471 return;
2472 }
2473
2474 rnp_p = rnp->parent;
2475 if (rnp_p == NULL) {
2476
2477
2478
2479
2480 rcu_report_qs_rsp(rsp, flags);
2481 return;
2482 }
2483
2484
2485 gps = rnp->gpnum;
2486 mask = rnp->grpmask;
2487 raw_spin_unlock_rcu_node(rnp);
2488 raw_spin_lock_rcu_node(rnp_p);
2489 rcu_report_qs_rnp(mask, rsp, rnp_p, gps, flags);
2490}
2491
2492
2493
2494
2495
2496static void
2497rcu_report_qs_rdp(int cpu, struct rcu_state *rsp, struct rcu_data *rdp)
2498{
2499 unsigned long flags;
2500 unsigned long mask;
2501 bool needwake;
2502 struct rcu_node *rnp;
2503
2504 rnp = rdp->mynode;
2505 raw_spin_lock_irqsave_rcu_node(rnp, flags);
2506 if (rdp->cpu_no_qs.b.norm || rdp->gpnum != rnp->gpnum ||
2507 rnp->completed == rnp->gpnum || rdp->gpwrap) {
2508
2509
2510
2511
2512
2513
2514
2515 rdp->cpu_no_qs.b.norm = true;
2516 rdp->rcu_qs_ctr_snap = __this_cpu_read(rcu_dynticks.rcu_qs_ctr);
2517 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
2518 return;
2519 }
2520 mask = rdp->grpmask;
2521 if ((rnp->qsmask & mask) == 0) {
2522 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
2523 } else {
2524 rdp->core_needs_qs = false;
2525
2526
2527
2528
2529
2530 needwake = rcu_accelerate_cbs(rsp, rnp, rdp);
2531
2532 rcu_report_qs_rnp(mask, rsp, rnp, rnp->gpnum, flags);
2533
2534 if (needwake)
2535 rcu_gp_kthread_wake(rsp);
2536 }
2537}
2538
2539
2540
2541
2542
2543
2544
2545static void
2546rcu_check_quiescent_state(struct rcu_state *rsp, struct rcu_data *rdp)
2547{
2548
2549 note_gp_changes(rsp, rdp);
2550
2551
2552
2553
2554
2555 if (!rdp->core_needs_qs)
2556 return;
2557
2558
2559
2560
2561
2562 if (rdp->cpu_no_qs.b.norm)
2563 return;
2564
2565
2566
2567
2568
2569 rcu_report_qs_rdp(rdp->cpu, rsp, rdp);
2570}
2571
2572
2573
2574
2575static void rcu_cleanup_dying_cpu(struct rcu_state *rsp)
2576{
2577 RCU_TRACE(unsigned long mask;)
2578 RCU_TRACE(struct rcu_data *rdp = this_cpu_ptr(rsp->rda);)
2579 RCU_TRACE(struct rcu_node *rnp = rdp->mynode;)
2580
2581 if (!IS_ENABLED(CONFIG_HOTPLUG_CPU))
2582 return;
2583
2584 RCU_TRACE(mask = rdp->grpmask;)
2585 trace_rcu_grace_period(rsp->name,
2586 rnp->gpnum + 1 - !!(rnp->qsmask & mask),
2587 TPS("cpuofl"));
2588}
2589
2590
2591
2592
2593
2594
2595
2596
2597
2598
2599
2600
2601
2602
2603
2604
2605
2606
2607static void rcu_cleanup_dead_rnp(struct rcu_node *rnp_leaf)
2608{
2609 long mask;
2610 struct rcu_node *rnp = rnp_leaf;
2611
2612 lockdep_assert_held(&rnp->lock);
2613 if (!IS_ENABLED(CONFIG_HOTPLUG_CPU) ||
2614 rnp->qsmaskinit || rcu_preempt_has_tasks(rnp))
2615 return;
2616 for (;;) {
2617 mask = rnp->grpmask;
2618 rnp = rnp->parent;
2619 if (!rnp)
2620 break;
2621 raw_spin_lock_rcu_node(rnp);
2622 rnp->qsmaskinit &= ~mask;
2623 rnp->qsmask &= ~mask;
2624 if (rnp->qsmaskinit) {
2625 raw_spin_unlock_rcu_node(rnp);
2626
2627 return;
2628 }
2629 raw_spin_unlock_rcu_node(rnp);
2630 }
2631}
2632
2633
2634
2635
2636
2637
2638
2639static void rcu_cleanup_dead_cpu(int cpu, struct rcu_state *rsp)
2640{
2641 struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
2642 struct rcu_node *rnp = rdp->mynode;
2643
2644 if (!IS_ENABLED(CONFIG_HOTPLUG_CPU))
2645 return;
2646
2647
2648 rcu_boost_kthread_setaffinity(rnp, -1);
2649}
2650
2651
2652
2653
2654
2655static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp)
2656{
2657 unsigned long flags;
2658 struct rcu_head *rhp;
2659 struct rcu_cblist rcl = RCU_CBLIST_INITIALIZER(rcl);
2660 long bl, count;
2661
2662
2663 if (!rcu_segcblist_ready_cbs(&rdp->cblist)) {
2664 trace_rcu_batch_start(rsp->name,
2665 rcu_segcblist_n_lazy_cbs(&rdp->cblist),
2666 rcu_segcblist_n_cbs(&rdp->cblist), 0);
2667 trace_rcu_batch_end(rsp->name, 0,
2668 !rcu_segcblist_empty(&rdp->cblist),
2669 need_resched(), is_idle_task(current),
2670 rcu_is_callbacks_kthread());
2671 return;
2672 }
2673
2674
2675
2676
2677
2678
2679 local_irq_save(flags);
2680 WARN_ON_ONCE(cpu_is_offline(smp_processor_id()));
2681 bl = rdp->blimit;
2682 trace_rcu_batch_start(rsp->name, rcu_segcblist_n_lazy_cbs(&rdp->cblist),
2683 rcu_segcblist_n_cbs(&rdp->cblist), bl);
2684 rcu_segcblist_extract_done_cbs(&rdp->cblist, &rcl);
2685 local_irq_restore(flags);
2686
2687
2688 rhp = rcu_cblist_dequeue(&rcl);
2689 for (; rhp; rhp = rcu_cblist_dequeue(&rcl)) {
2690 debug_rcu_head_unqueue(rhp);
2691 if (__rcu_reclaim(rsp->name, rhp))
2692 rcu_cblist_dequeued_lazy(&rcl);
2693
2694
2695
2696
2697 if (-rcl.len >= bl &&
2698 (need_resched() ||
2699 (!is_idle_task(current) && !rcu_is_callbacks_kthread())))
2700 break;
2701 }
2702
2703 local_irq_save(flags);
2704 count = -rcl.len;
2705 trace_rcu_batch_end(rsp->name, count, !!rcl.head, need_resched(),
2706 is_idle_task(current), rcu_is_callbacks_kthread());
2707
2708
2709 rcu_segcblist_insert_done_cbs(&rdp->cblist, &rcl);
2710 smp_mb();
2711 rdp->n_cbs_invoked += count;
2712 rcu_segcblist_insert_count(&rdp->cblist, &rcl);
2713
2714
2715 count = rcu_segcblist_n_cbs(&rdp->cblist);
2716 if (rdp->blimit == LONG_MAX && count <= qlowmark)
2717 rdp->blimit = blimit;
2718
2719
2720 if (count == 0 && rdp->qlen_last_fqs_check != 0) {
2721 rdp->qlen_last_fqs_check = 0;
2722 rdp->n_force_qs_snap = rsp->n_force_qs;
2723 } else if (count < rdp->qlen_last_fqs_check - qhimark)
2724 rdp->qlen_last_fqs_check = count;
2725 WARN_ON_ONCE(rcu_segcblist_empty(&rdp->cblist) != (count == 0));
2726
2727 local_irq_restore(flags);
2728
2729
2730 if (rcu_segcblist_ready_cbs(&rdp->cblist))
2731 invoke_rcu_core();
2732}
2733
2734
2735
2736
2737
2738
2739
2740
2741
2742void rcu_check_callbacks(int user)
2743{
2744 trace_rcu_utilization(TPS("Start scheduler-tick"));
2745 increment_cpu_stall_ticks();
2746 if (user || rcu_is_cpu_rrupt_from_idle()) {
2747
2748
2749
2750
2751
2752
2753
2754
2755
2756
2757
2758
2759
2760 rcu_sched_qs();
2761 rcu_bh_qs();
2762
2763 } else if (!in_softirq()) {
2764
2765
2766
2767
2768
2769
2770
2771
2772 rcu_bh_qs();
2773 }
2774 rcu_preempt_check_callbacks();
2775 if (rcu_pending())
2776 invoke_rcu_core();
2777 if (user)
2778 rcu_note_voluntary_context_switch(current);
2779 trace_rcu_utilization(TPS("End scheduler-tick"));
2780}
2781
2782
2783
2784
2785
2786
2787
2788
2789static void force_qs_rnp(struct rcu_state *rsp, int (*f)(struct rcu_data *rsp))
2790{
2791 int cpu;
2792 unsigned long flags;
2793 unsigned long mask;
2794 struct rcu_node *rnp;
2795
2796 rcu_for_each_leaf_node(rsp, rnp) {
2797 cond_resched_rcu_qs();
2798 mask = 0;
2799 raw_spin_lock_irqsave_rcu_node(rnp, flags);
2800 if (rnp->qsmask == 0) {
2801 if (rcu_state_p == &rcu_sched_state ||
2802 rsp != rcu_state_p ||
2803 rcu_preempt_blocked_readers_cgp(rnp)) {
2804
2805
2806
2807
2808
2809 rcu_initiate_boost(rnp, flags);
2810
2811 continue;
2812 }
2813 if (rnp->parent &&
2814 (rnp->parent->qsmask & rnp->grpmask)) {
2815
2816
2817
2818
2819
2820 rcu_report_unblock_qs_rnp(rsp, rnp, flags);
2821
2822 continue;
2823 }
2824 }
2825 for_each_leaf_node_possible_cpu(rnp, cpu) {
2826 unsigned long bit = leaf_node_cpu_bit(rnp, cpu);
2827 if ((rnp->qsmask & bit) != 0) {
2828 if (f(per_cpu_ptr(rsp->rda, cpu)))
2829 mask |= bit;
2830 }
2831 }
2832 if (mask != 0) {
2833
2834 rcu_report_qs_rnp(mask, rsp, rnp, rnp->gpnum, flags);
2835 } else {
2836
2837 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
2838 }
2839 }
2840}
2841
2842
2843
2844
2845
2846static void force_quiescent_state(struct rcu_state *rsp)
2847{
2848 unsigned long flags;
2849 bool ret;
2850 struct rcu_node *rnp;
2851 struct rcu_node *rnp_old = NULL;
2852
2853
2854 rnp = __this_cpu_read(rsp->rda->mynode);
2855 for (; rnp != NULL; rnp = rnp->parent) {
2856 ret = (READ_ONCE(rsp->gp_flags) & RCU_GP_FLAG_FQS) ||
2857 !raw_spin_trylock(&rnp->fqslock);
2858 if (rnp_old != NULL)
2859 raw_spin_unlock(&rnp_old->fqslock);
2860 if (ret) {
2861 rsp->n_force_qs_lh++;
2862 return;
2863 }
2864 rnp_old = rnp;
2865 }
2866
2867
2868
2869 raw_spin_lock_irqsave_rcu_node(rnp_old, flags);
2870 raw_spin_unlock(&rnp_old->fqslock);
2871 if (READ_ONCE(rsp->gp_flags) & RCU_GP_FLAG_FQS) {
2872 rsp->n_force_qs_lh++;
2873 raw_spin_unlock_irqrestore_rcu_node(rnp_old, flags);
2874 return;
2875 }
2876 WRITE_ONCE(rsp->gp_flags, READ_ONCE(rsp->gp_flags) | RCU_GP_FLAG_FQS);
2877 raw_spin_unlock_irqrestore_rcu_node(rnp_old, flags);
2878 rcu_gp_kthread_wake(rsp);
2879}
2880
2881
2882
2883
2884
2885
2886static void
2887__rcu_process_callbacks(struct rcu_state *rsp)
2888{
2889 unsigned long flags;
2890 bool needwake;
2891 struct rcu_data *rdp = raw_cpu_ptr(rsp->rda);
2892
2893 WARN_ON_ONCE(!rdp->beenonline);
2894
2895
2896 rcu_check_quiescent_state(rsp, rdp);
2897
2898
2899 local_irq_save(flags);
2900 if (cpu_needs_another_gp(rsp, rdp)) {
2901 raw_spin_lock_rcu_node(rcu_get_root(rsp));
2902 needwake = rcu_start_gp(rsp);
2903 raw_spin_unlock_irqrestore_rcu_node(rcu_get_root(rsp), flags);
2904 if (needwake)
2905 rcu_gp_kthread_wake(rsp);
2906 } else {
2907 local_irq_restore(flags);
2908 }
2909
2910
2911 if (rcu_segcblist_ready_cbs(&rdp->cblist))
2912 invoke_rcu_callbacks(rsp, rdp);
2913
2914
2915 do_nocb_deferred_wakeup(rdp);
2916}
2917
2918
2919
2920
2921static __latent_entropy void rcu_process_callbacks(struct softirq_action *unused)
2922{
2923 struct rcu_state *rsp;
2924
2925 if (cpu_is_offline(smp_processor_id()))
2926 return;
2927 trace_rcu_utilization(TPS("Start RCU core"));
2928 for_each_rcu_flavor(rsp)
2929 __rcu_process_callbacks(rsp);
2930 trace_rcu_utilization(TPS("End RCU core"));
2931}
2932
2933
2934
2935
2936
2937
2938
2939
2940static void invoke_rcu_callbacks(struct rcu_state *rsp, struct rcu_data *rdp)
2941{
2942 if (unlikely(!READ_ONCE(rcu_scheduler_fully_active)))
2943 return;
2944 if (likely(!rsp->boost)) {
2945 rcu_do_batch(rsp, rdp);
2946 return;
2947 }
2948 invoke_rcu_callbacks_kthread();
2949}
2950
2951static void invoke_rcu_core(void)
2952{
2953 if (cpu_online(smp_processor_id()))
2954 raise_softirq(RCU_SOFTIRQ);
2955}
2956
2957
2958
2959
2960static void __call_rcu_core(struct rcu_state *rsp, struct rcu_data *rdp,
2961 struct rcu_head *head, unsigned long flags)
2962{
2963 bool needwake;
2964
2965
2966
2967
2968
2969 if (!rcu_is_watching())
2970 invoke_rcu_core();
2971
2972
2973 if (irqs_disabled_flags(flags) || cpu_is_offline(smp_processor_id()))
2974 return;
2975
2976
2977
2978
2979
2980
2981
2982
2983 if (unlikely(rcu_segcblist_n_cbs(&rdp->cblist) >
2984 rdp->qlen_last_fqs_check + qhimark)) {
2985
2986
2987 note_gp_changes(rsp, rdp);
2988
2989
2990 if (!rcu_gp_in_progress(rsp)) {
2991 struct rcu_node *rnp_root = rcu_get_root(rsp);
2992
2993 raw_spin_lock_rcu_node(rnp_root);
2994 needwake = rcu_start_gp(rsp);
2995 raw_spin_unlock_rcu_node(rnp_root);
2996 if (needwake)
2997 rcu_gp_kthread_wake(rsp);
2998 } else {
2999
3000 rdp->blimit = LONG_MAX;
3001 if (rsp->n_force_qs == rdp->n_force_qs_snap &&
3002 rcu_segcblist_first_pend_cb(&rdp->cblist) != head)
3003 force_quiescent_state(rsp);
3004 rdp->n_force_qs_snap = rsp->n_force_qs;
3005 rdp->qlen_last_fqs_check = rcu_segcblist_n_cbs(&rdp->cblist);
3006 }
3007 }
3008}
3009
3010
3011
3012
3013static void rcu_leak_callback(struct rcu_head *rhp)
3014{
3015}
3016
3017
3018
3019
3020
3021
3022
3023static void
3024__call_rcu(struct rcu_head *head, rcu_callback_t func,
3025 struct rcu_state *rsp, int cpu, bool lazy)
3026{
3027 unsigned long flags;
3028 struct rcu_data *rdp;
3029
3030
3031 WARN_ON_ONCE((unsigned long)head & (sizeof(void *) - 1));
3032
3033 if (debug_rcu_head_queue(head)) {
3034
3035
3036
3037
3038
3039 WARN_ONCE(1, "__call_rcu(): Double-freed CB %p->%pF()!!!\n",
3040 head, head->func);
3041 WRITE_ONCE(head->func, rcu_leak_callback);
3042 return;
3043 }
3044 head->func = func;
3045 head->next = NULL;
3046 local_irq_save(flags);
3047 rdp = this_cpu_ptr(rsp->rda);
3048
3049
3050 if (unlikely(!rcu_segcblist_is_enabled(&rdp->cblist)) || cpu != -1) {
3051 int offline;
3052
3053 if (cpu != -1)
3054 rdp = per_cpu_ptr(rsp->rda, cpu);
3055 if (likely(rdp->mynode)) {
3056
3057 offline = !__call_rcu_nocb(rdp, head, lazy, flags);
3058 WARN_ON_ONCE(offline);
3059
3060 local_irq_restore(flags);
3061 return;
3062 }
3063
3064
3065
3066
3067 BUG_ON(cpu != -1);
3068 WARN_ON_ONCE(!rcu_is_watching());
3069 if (rcu_segcblist_empty(&rdp->cblist))
3070 rcu_segcblist_init(&rdp->cblist);
3071 }
3072 rcu_segcblist_enqueue(&rdp->cblist, head, lazy);
3073 if (!lazy)
3074 rcu_idle_count_callbacks_posted();
3075
3076 if (__is_kfree_rcu_offset((unsigned long)func))
3077 trace_rcu_kfree_callback(rsp->name, head, (unsigned long)func,
3078 rcu_segcblist_n_lazy_cbs(&rdp->cblist),
3079 rcu_segcblist_n_cbs(&rdp->cblist));
3080 else
3081 trace_rcu_callback(rsp->name, head,
3082 rcu_segcblist_n_lazy_cbs(&rdp->cblist),
3083 rcu_segcblist_n_cbs(&rdp->cblist));
3084
3085
3086 __call_rcu_core(rsp, rdp, head, flags);
3087 local_irq_restore(flags);
3088}
3089
3090
3091
3092
3093
3094
3095
3096
3097
3098
3099
3100
3101
3102
3103
3104
3105
3106
3107
3108
3109
3110void call_rcu_sched(struct rcu_head *head, rcu_callback_t func)
3111{
3112 __call_rcu(head, func, &rcu_sched_state, -1, 0);
3113}
3114EXPORT_SYMBOL_GPL(call_rcu_sched);
3115
3116
3117
3118
3119
3120
3121
3122
3123
3124
3125
3126
3127
3128
3129
3130
3131
3132
3133
3134
3135
3136
3137
3138void call_rcu_bh(struct rcu_head *head, rcu_callback_t func)
3139{
3140 __call_rcu(head, func, &rcu_bh_state, -1, 0);
3141}
3142EXPORT_SYMBOL_GPL(call_rcu_bh);
3143
3144
3145
3146
3147
3148
3149
3150
3151void kfree_call_rcu(struct rcu_head *head,
3152 rcu_callback_t func)
3153{
3154 __call_rcu(head, func, rcu_state_p, -1, 1);
3155}
3156EXPORT_SYMBOL_GPL(kfree_call_rcu);
3157
3158
3159
3160
3161
3162
3163
3164
3165
3166
3167static inline int rcu_blocking_is_gp(void)
3168{
3169 int ret;
3170
3171 might_sleep();
3172 preempt_disable();
3173 ret = num_online_cpus() <= 1;
3174 preempt_enable();
3175 return ret;
3176}
3177
3178
3179
3180
3181
3182
3183
3184
3185
3186
3187
3188
3189
3190
3191
3192
3193
3194
3195
3196
3197
3198
3199
3200
3201
3202
3203
3204
3205
3206
3207
3208
3209
3210
3211
3212
3213void synchronize_sched(void)
3214{
3215 RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map) ||
3216 lock_is_held(&rcu_lock_map) ||
3217 lock_is_held(&rcu_sched_lock_map),
3218 "Illegal synchronize_sched() in RCU-sched read-side critical section");
3219 if (rcu_blocking_is_gp())
3220 return;
3221 if (rcu_gp_is_expedited())
3222 synchronize_sched_expedited();
3223 else
3224 wait_rcu_gp(call_rcu_sched);
3225}
3226EXPORT_SYMBOL_GPL(synchronize_sched);
3227
3228
3229
3230
3231
3232
3233
3234
3235
3236
3237
3238
3239
3240void synchronize_rcu_bh(void)
3241{
3242 RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map) ||
3243 lock_is_held(&rcu_lock_map) ||
3244 lock_is_held(&rcu_sched_lock_map),
3245 "Illegal synchronize_rcu_bh() in RCU-bh read-side critical section");
3246 if (rcu_blocking_is_gp())
3247 return;
3248 if (rcu_gp_is_expedited())
3249 synchronize_rcu_bh_expedited();
3250 else
3251 wait_rcu_gp(call_rcu_bh);
3252}
3253EXPORT_SYMBOL_GPL(synchronize_rcu_bh);
3254
3255
3256
3257
3258
3259
3260
3261
3262unsigned long get_state_synchronize_rcu(void)
3263{
3264
3265
3266
3267
3268 smp_mb();
3269
3270
3271
3272
3273
3274
3275 return smp_load_acquire(&rcu_state_p->gpnum);
3276}
3277EXPORT_SYMBOL_GPL(get_state_synchronize_rcu);
3278
3279
3280
3281
3282
3283
3284
3285
3286
3287
3288
3289
3290
3291
3292
3293void cond_synchronize_rcu(unsigned long oldstate)
3294{
3295 unsigned long newstate;
3296
3297
3298
3299
3300
3301 newstate = smp_load_acquire(&rcu_state_p->completed);
3302 if (ULONG_CMP_GE(oldstate, newstate))
3303 synchronize_rcu();
3304}
3305EXPORT_SYMBOL_GPL(cond_synchronize_rcu);
3306
3307
3308
3309
3310
3311
3312
3313
3314unsigned long get_state_synchronize_sched(void)
3315{
3316
3317
3318
3319
3320 smp_mb();
3321
3322
3323
3324
3325
3326
3327 return smp_load_acquire(&rcu_sched_state.gpnum);
3328}
3329EXPORT_SYMBOL_GPL(get_state_synchronize_sched);
3330
3331
3332
3333
3334
3335
3336
3337
3338
3339
3340
3341
3342
3343
3344
3345void cond_synchronize_sched(unsigned long oldstate)
3346{
3347 unsigned long newstate;
3348
3349
3350
3351
3352
3353 newstate = smp_load_acquire(&rcu_sched_state.completed);
3354 if (ULONG_CMP_GE(oldstate, newstate))
3355 synchronize_sched();
3356}
3357EXPORT_SYMBOL_GPL(cond_synchronize_sched);
3358
3359
3360
3361
3362
3363
3364
3365
3366static int __rcu_pending(struct rcu_state *rsp, struct rcu_data *rdp)
3367{
3368 struct rcu_node *rnp = rdp->mynode;
3369
3370 rdp->n_rcu_pending++;
3371
3372
3373 check_cpu_stall(rsp, rdp);
3374
3375
3376 if (rcu_nohz_full_cpu(rsp))
3377 return 0;
3378
3379
3380 if (rcu_scheduler_fully_active &&
3381 rdp->core_needs_qs && rdp->cpu_no_qs.b.norm &&
3382 rdp->rcu_qs_ctr_snap == __this_cpu_read(rcu_dynticks.rcu_qs_ctr)) {
3383 rdp->n_rp_core_needs_qs++;
3384 } else if (rdp->core_needs_qs && !rdp->cpu_no_qs.b.norm) {
3385 rdp->n_rp_report_qs++;
3386 return 1;
3387 }
3388
3389
3390 if (rcu_segcblist_ready_cbs(&rdp->cblist)) {
3391 rdp->n_rp_cb_ready++;
3392 return 1;
3393 }
3394
3395
3396 if (cpu_needs_another_gp(rsp, rdp)) {
3397 rdp->n_rp_cpu_needs_gp++;
3398 return 1;
3399 }
3400
3401
3402 if (READ_ONCE(rnp->completed) != rdp->completed) {
3403 rdp->n_rp_gp_completed++;
3404 return 1;
3405 }
3406
3407
3408 if (READ_ONCE(rnp->gpnum) != rdp->gpnum ||
3409 unlikely(READ_ONCE(rdp->gpwrap))) {
3410 rdp->n_rp_gp_started++;
3411 return 1;
3412 }
3413
3414
3415 if (rcu_nocb_need_deferred_wakeup(rdp)) {
3416 rdp->n_rp_nocb_defer_wakeup++;
3417 return 1;
3418 }
3419
3420
3421 rdp->n_rp_need_nothing++;
3422 return 0;
3423}
3424
3425
3426
3427
3428
3429
3430static int rcu_pending(void)
3431{
3432 struct rcu_state *rsp;
3433
3434 for_each_rcu_flavor(rsp)
3435 if (__rcu_pending(rsp, this_cpu_ptr(rsp->rda)))
3436 return 1;
3437 return 0;
3438}
3439
3440
3441
3442
3443
3444
3445static bool __maybe_unused rcu_cpu_has_callbacks(bool *all_lazy)
3446{
3447 bool al = true;
3448 bool hc = false;
3449 struct rcu_data *rdp;
3450 struct rcu_state *rsp;
3451
3452 for_each_rcu_flavor(rsp) {
3453 rdp = this_cpu_ptr(rsp->rda);
3454 if (rcu_segcblist_empty(&rdp->cblist))
3455 continue;
3456 hc = true;
3457 if (rcu_segcblist_n_nonlazy_cbs(&rdp->cblist) || !all_lazy) {
3458 al = false;
3459 break;
3460 }
3461 }
3462 if (all_lazy)
3463 *all_lazy = al;
3464 return hc;
3465}
3466
3467
3468
3469
3470
3471static void _rcu_barrier_trace(struct rcu_state *rsp, const char *s,
3472 int cpu, unsigned long done)
3473{
3474 trace_rcu_barrier(rsp->name, s, cpu,
3475 atomic_read(&rsp->barrier_cpu_count), done);
3476}
3477
3478
3479
3480
3481
3482static void rcu_barrier_callback(struct rcu_head *rhp)
3483{
3484 struct rcu_data *rdp = container_of(rhp, struct rcu_data, barrier_head);
3485 struct rcu_state *rsp = rdp->rsp;
3486
3487 if (atomic_dec_and_test(&rsp->barrier_cpu_count)) {
3488 _rcu_barrier_trace(rsp, TPS("LastCB"), -1,
3489 rsp->barrier_sequence);
3490 complete(&rsp->barrier_completion);
3491 } else {
3492 _rcu_barrier_trace(rsp, TPS("CB"), -1, rsp->barrier_sequence);
3493 }
3494}
3495
3496
3497
3498
3499static void rcu_barrier_func(void *type)
3500{
3501 struct rcu_state *rsp = type;
3502 struct rcu_data *rdp = raw_cpu_ptr(rsp->rda);
3503
3504 _rcu_barrier_trace(rsp, TPS("IRQ"), -1, rsp->barrier_sequence);
3505 rdp->barrier_head.func = rcu_barrier_callback;
3506 debug_rcu_head_queue(&rdp->barrier_head);
3507 if (rcu_segcblist_entrain(&rdp->cblist, &rdp->barrier_head, 0)) {
3508 atomic_inc(&rsp->barrier_cpu_count);
3509 } else {
3510 debug_rcu_head_unqueue(&rdp->barrier_head);
3511 _rcu_barrier_trace(rsp, TPS("IRQNQ"), -1,
3512 rsp->barrier_sequence);
3513 }
3514}
3515
3516
3517
3518
3519
3520static void _rcu_barrier(struct rcu_state *rsp)
3521{
3522 int cpu;
3523 struct rcu_data *rdp;
3524 unsigned long s = rcu_seq_snap(&rsp->barrier_sequence);
3525
3526 _rcu_barrier_trace(rsp, TPS("Begin"), -1, s);
3527
3528
3529 mutex_lock(&rsp->barrier_mutex);
3530
3531
3532 if (rcu_seq_done(&rsp->barrier_sequence, s)) {
3533 _rcu_barrier_trace(rsp, TPS("EarlyExit"), -1,
3534 rsp->barrier_sequence);
3535 smp_mb();
3536 mutex_unlock(&rsp->barrier_mutex);
3537 return;
3538 }
3539
3540
3541 rcu_seq_start(&rsp->barrier_sequence);
3542 _rcu_barrier_trace(rsp, TPS("Inc1"), -1, rsp->barrier_sequence);
3543
3544
3545
3546
3547
3548
3549
3550 init_completion(&rsp->barrier_completion);
3551 atomic_set(&rsp->barrier_cpu_count, 1);
3552 get_online_cpus();
3553
3554
3555
3556
3557
3558
3559 for_each_possible_cpu(cpu) {
3560 if (!cpu_online(cpu) && !rcu_is_nocb_cpu(cpu))
3561 continue;
3562 rdp = per_cpu_ptr(rsp->rda, cpu);
3563 if (rcu_is_nocb_cpu(cpu)) {
3564 if (!rcu_nocb_cpu_needs_barrier(rsp, cpu)) {
3565 _rcu_barrier_trace(rsp, TPS("OfflineNoCB"), cpu,
3566 rsp->barrier_sequence);
3567 } else {
3568 _rcu_barrier_trace(rsp, TPS("OnlineNoCB"), cpu,
3569 rsp->barrier_sequence);
3570 smp_mb__before_atomic();
3571 atomic_inc(&rsp->barrier_cpu_count);
3572 __call_rcu(&rdp->barrier_head,
3573 rcu_barrier_callback, rsp, cpu, 0);
3574 }
3575 } else if (rcu_segcblist_n_cbs(&rdp->cblist)) {
3576 _rcu_barrier_trace(rsp, TPS("OnlineQ"), cpu,
3577 rsp->barrier_sequence);
3578 smp_call_function_single(cpu, rcu_barrier_func, rsp, 1);
3579 } else {
3580 _rcu_barrier_trace(rsp, TPS("OnlineNQ"), cpu,
3581 rsp->barrier_sequence);
3582 }
3583 }
3584 put_online_cpus();
3585
3586
3587
3588
3589
3590 if (atomic_dec_and_test(&rsp->barrier_cpu_count))
3591 complete(&rsp->barrier_completion);
3592
3593
3594 wait_for_completion(&rsp->barrier_completion);
3595
3596
3597 _rcu_barrier_trace(rsp, TPS("Inc2"), -1, rsp->barrier_sequence);
3598 rcu_seq_end(&rsp->barrier_sequence);
3599
3600
3601 mutex_unlock(&rsp->barrier_mutex);
3602}
3603
3604
3605
3606
3607void rcu_barrier_bh(void)
3608{
3609 _rcu_barrier(&rcu_bh_state);
3610}
3611EXPORT_SYMBOL_GPL(rcu_barrier_bh);
3612
3613
3614
3615
3616void rcu_barrier_sched(void)
3617{
3618 _rcu_barrier(&rcu_sched_state);
3619}
3620EXPORT_SYMBOL_GPL(rcu_barrier_sched);
3621
3622
3623
3624
3625
3626
3627
3628static void rcu_init_new_rnp(struct rcu_node *rnp_leaf)
3629{
3630 long mask;
3631 struct rcu_node *rnp = rnp_leaf;
3632
3633 lockdep_assert_held(&rnp->lock);
3634 for (;;) {
3635 mask = rnp->grpmask;
3636 rnp = rnp->parent;
3637 if (rnp == NULL)
3638 return;
3639 raw_spin_lock_rcu_node(rnp);
3640 rnp->qsmaskinit |= mask;
3641 raw_spin_unlock_rcu_node(rnp);
3642 }
3643}
3644
3645
3646
3647
3648static void __init
3649rcu_boot_init_percpu_data(int cpu, struct rcu_state *rsp)
3650{
3651 unsigned long flags;
3652 struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
3653 struct rcu_node *rnp = rcu_get_root(rsp);
3654
3655
3656 raw_spin_lock_irqsave_rcu_node(rnp, flags);
3657 rdp->grpmask = leaf_node_cpu_bit(rdp->mynode, cpu);
3658 rdp->dynticks = &per_cpu(rcu_dynticks, cpu);
3659 WARN_ON_ONCE(rdp->dynticks->dynticks_nesting != DYNTICK_TASK_EXIT_IDLE);
3660 WARN_ON_ONCE(rcu_dynticks_in_eqs(rcu_dynticks_snap(rdp->dynticks)));
3661 rdp->cpu = cpu;
3662 rdp->rsp = rsp;
3663 rcu_boot_init_nocb_percpu_data(rdp);
3664 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
3665}
3666
3667
3668
3669
3670
3671
3672
3673static void
3674rcu_init_percpu_data(int cpu, struct rcu_state *rsp)
3675{
3676 unsigned long flags;
3677 struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
3678 struct rcu_node *rnp = rcu_get_root(rsp);
3679
3680
3681 raw_spin_lock_irqsave_rcu_node(rnp, flags);
3682 rdp->qlen_last_fqs_check = 0;
3683 rdp->n_force_qs_snap = rsp->n_force_qs;
3684 rdp->blimit = blimit;
3685 if (rcu_segcblist_empty(&rdp->cblist) &&
3686 !init_nocb_callback_list(rdp))
3687 rcu_segcblist_init(&rdp->cblist);
3688 rdp->dynticks->dynticks_nesting = DYNTICK_TASK_EXIT_IDLE;
3689 rcu_dynticks_eqs_online();
3690 raw_spin_unlock_rcu_node(rnp);
3691
3692
3693
3694
3695
3696
3697 rnp = rdp->mynode;
3698 raw_spin_lock_rcu_node(rnp);
3699 rdp->beenonline = true;
3700 rdp->gpnum = rnp->completed;
3701 rdp->completed = rnp->completed;
3702 rdp->cpu_no_qs.b.norm = true;
3703 rdp->rcu_qs_ctr_snap = per_cpu(rcu_dynticks.rcu_qs_ctr, cpu);
3704 rdp->core_needs_qs = false;
3705 trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("cpuonl"));
3706 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
3707}
3708
3709
3710
3711
3712
3713int rcutree_prepare_cpu(unsigned int cpu)
3714{
3715 struct rcu_state *rsp;
3716
3717 for_each_rcu_flavor(rsp)
3718 rcu_init_percpu_data(cpu, rsp);
3719
3720 rcu_prepare_kthreads(cpu);
3721 rcu_spawn_all_nocb_kthreads(cpu);
3722
3723 return 0;
3724}
3725
3726
3727
3728
3729static void rcutree_affinity_setting(unsigned int cpu, int outgoing)
3730{
3731 struct rcu_data *rdp = per_cpu_ptr(rcu_state_p->rda, cpu);
3732
3733 rcu_boost_kthread_setaffinity(rdp->mynode, outgoing);
3734}
3735
3736
3737
3738
3739
3740int rcutree_online_cpu(unsigned int cpu)
3741{
3742 sync_sched_exp_online_cleanup(cpu);
3743 rcutree_affinity_setting(cpu, -1);
3744 if (IS_ENABLED(CONFIG_TREE_SRCU))
3745 srcu_online_cpu(cpu);
3746 return 0;
3747}
3748
3749
3750
3751
3752
3753int rcutree_offline_cpu(unsigned int cpu)
3754{
3755 rcutree_affinity_setting(cpu, cpu);
3756 if (IS_ENABLED(CONFIG_TREE_SRCU))
3757 srcu_offline_cpu(cpu);
3758 return 0;
3759}
3760
3761
3762
3763
3764int rcutree_dying_cpu(unsigned int cpu)
3765{
3766 struct rcu_state *rsp;
3767
3768 for_each_rcu_flavor(rsp)
3769 rcu_cleanup_dying_cpu(rsp);
3770 return 0;
3771}
3772
3773
3774
3775
3776int rcutree_dead_cpu(unsigned int cpu)
3777{
3778 struct rcu_state *rsp;
3779
3780 for_each_rcu_flavor(rsp) {
3781 rcu_cleanup_dead_cpu(cpu, rsp);
3782 do_nocb_deferred_wakeup(per_cpu_ptr(rsp->rda, cpu));
3783 }
3784 return 0;
3785}
3786
3787
3788
3789
3790
3791
3792
3793
3794
3795
3796
3797
3798void rcu_cpu_starting(unsigned int cpu)
3799{
3800 unsigned long flags;
3801 unsigned long mask;
3802 int nbits;
3803 unsigned long oldmask;
3804 struct rcu_data *rdp;
3805 struct rcu_node *rnp;
3806 struct rcu_state *rsp;
3807
3808 for_each_rcu_flavor(rsp) {
3809 rdp = per_cpu_ptr(rsp->rda, cpu);
3810 rnp = rdp->mynode;
3811 mask = rdp->grpmask;
3812 raw_spin_lock_irqsave_rcu_node(rnp, flags);
3813 rnp->qsmaskinitnext |= mask;
3814 oldmask = rnp->expmaskinitnext;
3815 rnp->expmaskinitnext |= mask;
3816 oldmask ^= rnp->expmaskinitnext;
3817 nbits = bitmap_weight(&oldmask, BITS_PER_LONG);
3818
3819 smp_store_release(&rsp->ncpus, rsp->ncpus + nbits);
3820 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
3821 }
3822 smp_mb();
3823}
3824
3825#ifdef CONFIG_HOTPLUG_CPU
3826
3827
3828
3829
3830
3831static void rcu_cleanup_dying_idle_cpu(int cpu, struct rcu_state *rsp)
3832{
3833 unsigned long flags;
3834 unsigned long mask;
3835 struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
3836 struct rcu_node *rnp = rdp->mynode;
3837
3838
3839 mask = rdp->grpmask;
3840 raw_spin_lock_irqsave_rcu_node(rnp, flags);
3841 rnp->qsmaskinitnext &= ~mask;
3842 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
3843}
3844
3845
3846
3847
3848
3849
3850
3851
3852
3853void rcu_report_dead(unsigned int cpu)
3854{
3855 struct rcu_state *rsp;
3856
3857
3858 preempt_disable();
3859 rcu_report_exp_rdp(&rcu_sched_state,
3860 this_cpu_ptr(rcu_sched_state.rda), true);
3861 preempt_enable();
3862 for_each_rcu_flavor(rsp)
3863 rcu_cleanup_dying_idle_cpu(cpu, rsp);
3864}
3865
3866
3867static void rcu_migrate_callbacks(int cpu, struct rcu_state *rsp)
3868{
3869 unsigned long flags;
3870 struct rcu_data *my_rdp;
3871 struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
3872 struct rcu_node *rnp_root = rcu_get_root(rdp->rsp);
3873
3874 if (rcu_is_nocb_cpu(cpu) || rcu_segcblist_empty(&rdp->cblist))
3875 return;
3876
3877 local_irq_save(flags);
3878 my_rdp = this_cpu_ptr(rsp->rda);
3879 if (rcu_nocb_adopt_orphan_cbs(my_rdp, rdp, flags)) {
3880 local_irq_restore(flags);
3881 return;
3882 }
3883 raw_spin_lock_rcu_node(rnp_root);
3884 rcu_advance_cbs(rsp, rnp_root, rdp);
3885 rcu_advance_cbs(rsp, rnp_root, my_rdp);
3886 rcu_segcblist_merge(&my_rdp->cblist, &rdp->cblist);
3887 WARN_ON_ONCE(rcu_segcblist_empty(&my_rdp->cblist) !=
3888 !rcu_segcblist_n_cbs(&my_rdp->cblist));
3889 raw_spin_unlock_irqrestore_rcu_node(rnp_root, flags);
3890 WARN_ONCE(rcu_segcblist_n_cbs(&rdp->cblist) != 0 ||
3891 !rcu_segcblist_empty(&rdp->cblist),
3892 "rcu_cleanup_dead_cpu: Callbacks on offline CPU %d: qlen=%lu, 1stCB=%p\n",
3893 cpu, rcu_segcblist_n_cbs(&rdp->cblist),
3894 rcu_segcblist_first_cb(&rdp->cblist));
3895}
3896
3897
3898
3899
3900
3901
3902void rcutree_migrate_callbacks(int cpu)
3903{
3904 struct rcu_state *rsp;
3905
3906 for_each_rcu_flavor(rsp)
3907 rcu_migrate_callbacks(cpu, rsp);
3908}
3909#endif
3910
3911
3912
3913
3914
3915static int rcu_pm_notify(struct notifier_block *self,
3916 unsigned long action, void *hcpu)
3917{
3918 switch (action) {
3919 case PM_HIBERNATION_PREPARE:
3920 case PM_SUSPEND_PREPARE:
3921 if (nr_cpu_ids <= 256)
3922 rcu_expedite_gp();
3923 break;
3924 case PM_POST_HIBERNATION:
3925 case PM_POST_SUSPEND:
3926 if (nr_cpu_ids <= 256)
3927 rcu_unexpedite_gp();
3928 break;
3929 default:
3930 break;
3931 }
3932 return NOTIFY_OK;
3933}
3934
3935
3936
3937
3938static int __init rcu_spawn_gp_kthread(void)
3939{
3940 unsigned long flags;
3941 int kthread_prio_in = kthread_prio;
3942 struct rcu_node *rnp;
3943 struct rcu_state *rsp;
3944 struct sched_param sp;
3945 struct task_struct *t;
3946
3947
3948 if (IS_ENABLED(CONFIG_RCU_BOOST) && kthread_prio < 1)
3949 kthread_prio = 1;
3950 else if (kthread_prio < 0)
3951 kthread_prio = 0;
3952 else if (kthread_prio > 99)
3953 kthread_prio = 99;
3954 if (kthread_prio != kthread_prio_in)
3955 pr_alert("rcu_spawn_gp_kthread(): Limited prio to %d from %d\n",
3956 kthread_prio, kthread_prio_in);
3957
3958 rcu_scheduler_fully_active = 1;
3959 for_each_rcu_flavor(rsp) {
3960 t = kthread_create(rcu_gp_kthread, rsp, "%s", rsp->name);
3961 BUG_ON(IS_ERR(t));
3962 rnp = rcu_get_root(rsp);
3963 raw_spin_lock_irqsave_rcu_node(rnp, flags);
3964 rsp->gp_kthread = t;
3965 if (kthread_prio) {
3966 sp.sched_priority = kthread_prio;
3967 sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
3968 }
3969 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
3970 wake_up_process(t);
3971 }
3972 rcu_spawn_nocb_kthreads();
3973 rcu_spawn_boost_kthreads();
3974 return 0;
3975}
3976early_initcall(rcu_spawn_gp_kthread);
3977
3978
3979
3980
3981
3982
3983
3984
3985
3986
3987
3988void rcu_scheduler_starting(void)
3989{
3990 WARN_ON(num_online_cpus() != 1);
3991 WARN_ON(nr_context_switches() > 0);
3992 rcu_test_sync_prims();
3993 rcu_scheduler_active = RCU_SCHEDULER_INIT;
3994 rcu_test_sync_prims();
3995}
3996
3997
3998
3999
4000static void __init rcu_init_one(struct rcu_state *rsp)
4001{
4002 static const char * const buf[] = RCU_NODE_NAME_INIT;
4003 static const char * const fqs[] = RCU_FQS_NAME_INIT;
4004 static struct lock_class_key rcu_node_class[RCU_NUM_LVLS];
4005 static struct lock_class_key rcu_fqs_class[RCU_NUM_LVLS];
4006
4007 int levelspread[RCU_NUM_LVLS];
4008 int cpustride = 1;
4009 int i;
4010 int j;
4011 struct rcu_node *rnp;
4012
4013 BUILD_BUG_ON(RCU_NUM_LVLS > ARRAY_SIZE(buf));
4014
4015
4016 if (rcu_num_lvls <= 0 || rcu_num_lvls > RCU_NUM_LVLS)
4017 panic("rcu_init_one: rcu_num_lvls out of range");
4018
4019
4020
4021 for (i = 1; i < rcu_num_lvls; i++)
4022 rsp->level[i] = rsp->level[i - 1] + num_rcu_lvl[i - 1];
4023 rcu_init_levelspread(levelspread, num_rcu_lvl);
4024
4025
4026
4027 for (i = rcu_num_lvls - 1; i >= 0; i--) {
4028 cpustride *= levelspread[i];
4029 rnp = rsp->level[i];
4030 for (j = 0; j < num_rcu_lvl[i]; j++, rnp++) {
4031 raw_spin_lock_init(&ACCESS_PRIVATE(rnp, lock));
4032 lockdep_set_class_and_name(&ACCESS_PRIVATE(rnp, lock),
4033 &rcu_node_class[i], buf[i]);
4034 raw_spin_lock_init(&rnp->fqslock);
4035 lockdep_set_class_and_name(&rnp->fqslock,
4036 &rcu_fqs_class[i], fqs[i]);
4037 rnp->gpnum = rsp->gpnum;
4038 rnp->completed = rsp->completed;
4039 rnp->qsmask = 0;
4040 rnp->qsmaskinit = 0;
4041 rnp->grplo = j * cpustride;
4042 rnp->grphi = (j + 1) * cpustride - 1;
4043 if (rnp->grphi >= nr_cpu_ids)
4044 rnp->grphi = nr_cpu_ids - 1;
4045 if (i == 0) {
4046 rnp->grpnum = 0;
4047 rnp->grpmask = 0;
4048 rnp->parent = NULL;
4049 } else {
4050 rnp->grpnum = j % levelspread[i - 1];
4051 rnp->grpmask = 1UL << rnp->grpnum;
4052 rnp->parent = rsp->level[i - 1] +
4053 j / levelspread[i - 1];
4054 }
4055 rnp->level = i;
4056 INIT_LIST_HEAD(&rnp->blkd_tasks);
4057 rcu_init_one_nocb(rnp);
4058 init_waitqueue_head(&rnp->exp_wq[0]);
4059 init_waitqueue_head(&rnp->exp_wq[1]);
4060 init_waitqueue_head(&rnp->exp_wq[2]);
4061 init_waitqueue_head(&rnp->exp_wq[3]);
4062 spin_lock_init(&rnp->exp_lock);
4063 }
4064 }
4065
4066 init_swait_queue_head(&rsp->gp_wq);
4067 init_swait_queue_head(&rsp->expedited_wq);
4068 rnp = rsp->level[rcu_num_lvls - 1];
4069 for_each_possible_cpu(i) {
4070 while (i > rnp->grphi)
4071 rnp++;
4072 per_cpu_ptr(rsp->rda, i)->mynode = rnp;
4073 rcu_boot_init_percpu_data(i, rsp);
4074 }
4075 list_add(&rsp->flavors, &rcu_struct_flavors);
4076}
4077
4078
4079
4080
4081
4082
4083static void __init rcu_init_geometry(void)
4084{
4085 ulong d;
4086 int i;
4087 int rcu_capacity[RCU_NUM_LVLS];
4088
4089
4090
4091
4092
4093
4094
4095
4096 d = RCU_JIFFIES_TILL_FORCE_QS + nr_cpu_ids / RCU_JIFFIES_FQS_DIV;
4097 if (jiffies_till_first_fqs == ULONG_MAX)
4098 jiffies_till_first_fqs = d;
4099 if (jiffies_till_next_fqs == ULONG_MAX)
4100 jiffies_till_next_fqs = d;
4101
4102
4103 if (rcu_fanout_leaf == RCU_FANOUT_LEAF &&
4104 nr_cpu_ids == NR_CPUS)
4105 return;
4106 pr_info("RCU: Adjusting geometry for rcu_fanout_leaf=%d, nr_cpu_ids=%u\n",
4107 rcu_fanout_leaf, nr_cpu_ids);
4108
4109
4110
4111
4112
4113
4114
4115 if (rcu_fanout_leaf < 2 ||
4116 rcu_fanout_leaf > sizeof(unsigned long) * 8) {
4117 rcu_fanout_leaf = RCU_FANOUT_LEAF;
4118 WARN_ON(1);
4119 return;
4120 }
4121
4122
4123
4124
4125
4126 rcu_capacity[0] = rcu_fanout_leaf;
4127 for (i = 1; i < RCU_NUM_LVLS; i++)
4128 rcu_capacity[i] = rcu_capacity[i - 1] * RCU_FANOUT;
4129
4130
4131
4132
4133
4134 if (nr_cpu_ids > rcu_capacity[RCU_NUM_LVLS - 1]) {
4135 rcu_fanout_leaf = RCU_FANOUT_LEAF;
4136 WARN_ON(1);
4137 return;
4138 }
4139
4140
4141 for (i = 0; nr_cpu_ids > rcu_capacity[i]; i++) {
4142 }
4143 rcu_num_lvls = i + 1;
4144
4145
4146 for (i = 0; i < rcu_num_lvls; i++) {
4147 int cap = rcu_capacity[(rcu_num_lvls - 1) - i];
4148 num_rcu_lvl[i] = DIV_ROUND_UP(nr_cpu_ids, cap);
4149 }
4150
4151
4152 rcu_num_nodes = 0;
4153 for (i = 0; i < rcu_num_lvls; i++)
4154 rcu_num_nodes += num_rcu_lvl[i];
4155}
4156
4157
4158
4159
4160
4161static void __init rcu_dump_rcu_node_tree(struct rcu_state *rsp)
4162{
4163 int level = 0;
4164 struct rcu_node *rnp;
4165
4166 pr_info("rcu_node tree layout dump\n");
4167 pr_info(" ");
4168 rcu_for_each_node_breadth_first(rsp, rnp) {
4169 if (rnp->level != level) {
4170 pr_cont("\n");
4171 pr_info(" ");
4172 level = rnp->level;
4173 }
4174 pr_cont("%d:%d ^%d ", rnp->grplo, rnp->grphi, rnp->grpnum);
4175 }
4176 pr_cont("\n");
4177}
4178
4179void __init rcu_init(void)
4180{
4181 int cpu;
4182
4183 rcu_early_boot_tests();
4184
4185 rcu_bootup_announce();
4186 rcu_init_geometry();
4187 rcu_init_one(&rcu_bh_state);
4188 rcu_init_one(&rcu_sched_state);
4189 if (dump_tree)
4190 rcu_dump_rcu_node_tree(&rcu_sched_state);
4191 __rcu_init_preempt();
4192 open_softirq(RCU_SOFTIRQ, rcu_process_callbacks);
4193
4194
4195
4196
4197
4198
4199 pm_notifier(rcu_pm_notify, 0);
4200 for_each_online_cpu(cpu) {
4201 rcutree_prepare_cpu(cpu);
4202 rcu_cpu_starting(cpu);
4203 if (IS_ENABLED(CONFIG_TREE_SRCU))
4204 srcu_online_cpu(cpu);
4205 }
4206}
4207
4208#include "tree_exp.h"
4209#include "tree_plugin.h"
4210