1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30#include <linux/types.h>
31#include <linux/kernel.h>
32#include <linux/init.h>
33#include <linux/spinlock.h>
34#include <linux/smp.h>
35#include <linux/rcupdate_wait.h>
36#include <linux/interrupt.h>
37#include <linux/sched.h>
38#include <linux/sched/debug.h>
39#include <linux/nmi.h>
40#include <linux/atomic.h>
41#include <linux/bitops.h>
42#include <linux/export.h>
43#include <linux/completion.h>
44#include <linux/moduleparam.h>
45#include <linux/percpu.h>
46#include <linux/notifier.h>
47#include <linux/cpu.h>
48#include <linux/mutex.h>
49#include <linux/time.h>
50#include <linux/kernel_stat.h>
51#include <linux/wait.h>
52#include <linux/kthread.h>
53#include <uapi/linux/sched/types.h>
54#include <linux/prefetch.h>
55#include <linux/delay.h>
56#include <linux/stop_machine.h>
57#include <linux/random.h>
58#include <linux/trace_events.h>
59#include <linux/suspend.h>
60#include <linux/ftrace.h>
61
62#include "tree.h"
63#include "rcu.h"
64
65#ifdef MODULE_PARAM_PREFIX
66#undef MODULE_PARAM_PREFIX
67#endif
68#define MODULE_PARAM_PREFIX "rcutree."
69
70
71
72
73
74
75
76
77
78
79
80#ifdef CONFIG_TRACING
81# define DEFINE_RCU_TPS(sname) \
82static char sname##_varname[] = #sname; \
83static const char *tp_##sname##_varname __used __tracepoint_string = sname##_varname;
84# define RCU_STATE_NAME(sname) sname##_varname
85#else
86# define DEFINE_RCU_TPS(sname)
87# define RCU_STATE_NAME(sname) __stringify(sname)
88#endif
89
90#define RCU_STATE_INITIALIZER(sname, sabbr, cr) \
91DEFINE_RCU_TPS(sname) \
92static DEFINE_PER_CPU_SHARED_ALIGNED(struct rcu_data, sname##_data); \
93struct rcu_state sname##_state = { \
94 .level = { &sname##_state.node[0] }, \
95 .rda = &sname##_data, \
96 .call = cr, \
97 .gp_state = RCU_GP_IDLE, \
98 .gpnum = 0UL - 300UL, \
99 .completed = 0UL - 300UL, \
100 .barrier_mutex = __MUTEX_INITIALIZER(sname##_state.barrier_mutex), \
101 .name = RCU_STATE_NAME(sname), \
102 .abbr = sabbr, \
103 .exp_mutex = __MUTEX_INITIALIZER(sname##_state.exp_mutex), \
104 .exp_wake_mutex = __MUTEX_INITIALIZER(sname##_state.exp_wake_mutex), \
105}
106
107RCU_STATE_INITIALIZER(rcu_sched, 's', call_rcu_sched);
108RCU_STATE_INITIALIZER(rcu_bh, 'b', call_rcu_bh);
109
110static struct rcu_state *const rcu_state_p;
111LIST_HEAD(rcu_struct_flavors);
112
113
114static bool dump_tree;
115module_param(dump_tree, bool, 0444);
116
117static bool rcu_fanout_exact;
118module_param(rcu_fanout_exact, bool, 0444);
119
120static int rcu_fanout_leaf = RCU_FANOUT_LEAF;
121module_param(rcu_fanout_leaf, int, 0444);
122int rcu_num_lvls __read_mostly = RCU_NUM_LVLS;
123
124int num_rcu_lvl[] = NUM_RCU_LVL_INIT;
125int rcu_num_nodes __read_mostly = NUM_RCU_NODES;
126
127int sysctl_panic_on_rcu_stall __read_mostly;
128
129
130
131
132
133
134
135
136
137
138
139
140
141int rcu_scheduler_active __read_mostly;
142EXPORT_SYMBOL_GPL(rcu_scheduler_active);
143
144
145
146
147
148
149
150
151
152
153
154
155
156static int rcu_scheduler_fully_active __read_mostly;
157
158static void rcu_init_new_rnp(struct rcu_node *rnp_leaf);
159static void rcu_cleanup_dead_rnp(struct rcu_node *rnp_leaf);
160static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu);
161static void invoke_rcu_core(void);
162static void invoke_rcu_callbacks(struct rcu_state *rsp, struct rcu_data *rdp);
163static void rcu_report_exp_rdp(struct rcu_state *rsp,
164 struct rcu_data *rdp, bool wake);
165static void sync_sched_exp_online_cleanup(int cpu);
166
167
168static int kthread_prio = IS_ENABLED(CONFIG_RCU_BOOST) ? 1 : 0;
169module_param(kthread_prio, int, 0644);
170
171
172
173static int gp_preinit_delay;
174module_param(gp_preinit_delay, int, 0444);
175static int gp_init_delay;
176module_param(gp_init_delay, int, 0444);
177static int gp_cleanup_delay;
178module_param(gp_cleanup_delay, int, 0444);
179
180
181
182
183
184
185
186
187
188
189#define PER_RCU_NODE_PERIOD 3
190
191
192
193
194
195
196
197
198
199
200unsigned long rcutorture_testseq;
201unsigned long rcutorture_vernum;
202
203
204
205
206
207
208
209unsigned long rcu_rnp_online_cpus(struct rcu_node *rnp)
210{
211 return READ_ONCE(rnp->qsmaskinitnext);
212}
213
214
215
216
217
218
219static int rcu_gp_in_progress(struct rcu_state *rsp)
220{
221 return READ_ONCE(rsp->completed) != READ_ONCE(rsp->gpnum);
222}
223
224
225
226
227
228
229
230void rcu_sched_qs(void)
231{
232 RCU_LOCKDEP_WARN(preemptible(), "rcu_sched_qs() invoked with preemption enabled!!!");
233 if (!__this_cpu_read(rcu_sched_data.cpu_no_qs.s))
234 return;
235 trace_rcu_grace_period(TPS("rcu_sched"),
236 __this_cpu_read(rcu_sched_data.gpnum),
237 TPS("cpuqs"));
238 __this_cpu_write(rcu_sched_data.cpu_no_qs.b.norm, false);
239 if (!__this_cpu_read(rcu_sched_data.cpu_no_qs.b.exp))
240 return;
241 __this_cpu_write(rcu_sched_data.cpu_no_qs.b.exp, false);
242 rcu_report_exp_rdp(&rcu_sched_state,
243 this_cpu_ptr(&rcu_sched_data), true);
244}
245
246void rcu_bh_qs(void)
247{
248 RCU_LOCKDEP_WARN(preemptible(), "rcu_bh_qs() invoked with preemption enabled!!!");
249 if (__this_cpu_read(rcu_bh_data.cpu_no_qs.s)) {
250 trace_rcu_grace_period(TPS("rcu_bh"),
251 __this_cpu_read(rcu_bh_data.gpnum),
252 TPS("cpuqs"));
253 __this_cpu_write(rcu_bh_data.cpu_no_qs.b.norm, false);
254 }
255}
256
257
258
259
260
261#define RCU_DYNTICK_CTRL_MASK 0x1
262#define RCU_DYNTICK_CTRL_CTR (RCU_DYNTICK_CTRL_MASK + 1)
263#ifndef rcu_eqs_special_exit
264#define rcu_eqs_special_exit() do { } while (0)
265#endif
266
267static DEFINE_PER_CPU(struct rcu_dynticks, rcu_dynticks) = {
268 .dynticks_nesting = 1,
269 .dynticks_nmi_nesting = DYNTICK_IRQ_NONIDLE,
270 .dynticks = ATOMIC_INIT(RCU_DYNTICK_CTRL_CTR),
271};
272
273
274
275
276
277static void rcu_dynticks_eqs_enter(void)
278{
279 struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
280 int seq;
281
282
283
284
285
286
287 seq = atomic_add_return(RCU_DYNTICK_CTRL_CTR, &rdtp->dynticks);
288
289 WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) &&
290 (seq & RCU_DYNTICK_CTRL_CTR));
291
292 WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) &&
293 (seq & RCU_DYNTICK_CTRL_MASK));
294}
295
296
297
298
299
300static void rcu_dynticks_eqs_exit(void)
301{
302 struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
303 int seq;
304
305
306
307
308
309
310 seq = atomic_add_return(RCU_DYNTICK_CTRL_CTR, &rdtp->dynticks);
311 WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) &&
312 !(seq & RCU_DYNTICK_CTRL_CTR));
313 if (seq & RCU_DYNTICK_CTRL_MASK) {
314 atomic_andnot(RCU_DYNTICK_CTRL_MASK, &rdtp->dynticks);
315 smp_mb__after_atomic();
316
317 rcu_eqs_special_exit();
318 }
319}
320
321
322
323
324
325
326
327
328
329
330
331static void rcu_dynticks_eqs_online(void)
332{
333 struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
334
335 if (atomic_read(&rdtp->dynticks) & RCU_DYNTICK_CTRL_CTR)
336 return;
337 atomic_add(RCU_DYNTICK_CTRL_CTR, &rdtp->dynticks);
338}
339
340
341
342
343
344
345bool rcu_dynticks_curr_cpu_in_eqs(void)
346{
347 struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
348
349 return !(atomic_read(&rdtp->dynticks) & RCU_DYNTICK_CTRL_CTR);
350}
351
352
353
354
355
356int rcu_dynticks_snap(struct rcu_dynticks *rdtp)
357{
358 int snap = atomic_add_return(0, &rdtp->dynticks);
359
360 return snap & ~RCU_DYNTICK_CTRL_MASK;
361}
362
363
364
365
366
367static bool rcu_dynticks_in_eqs(int snap)
368{
369 return !(snap & RCU_DYNTICK_CTRL_CTR);
370}
371
372
373
374
375
376
377static bool rcu_dynticks_in_eqs_since(struct rcu_dynticks *rdtp, int snap)
378{
379 return snap != rcu_dynticks_snap(rdtp);
380}
381
382
383
384
385
386static void rcu_dynticks_momentary_idle(void)
387{
388 struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
389 int special = atomic_add_return(2 * RCU_DYNTICK_CTRL_CTR,
390 &rdtp->dynticks);
391
392
393 WARN_ON_ONCE(!(special & RCU_DYNTICK_CTRL_CTR));
394}
395
396
397
398
399
400
401
402
403bool rcu_eqs_special_set(int cpu)
404{
405 int old;
406 int new;
407 struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu);
408
409 do {
410 old = atomic_read(&rdtp->dynticks);
411 if (old & RCU_DYNTICK_CTRL_CTR)
412 return false;
413 new = old | RCU_DYNTICK_CTRL_MASK;
414 } while (atomic_cmpxchg(&rdtp->dynticks, old, new) != old);
415 return true;
416}
417
418
419
420
421
422
423
424
425
426
427
428
429static void rcu_momentary_dyntick_idle(void)
430{
431 raw_cpu_write(rcu_dynticks.rcu_need_heavy_qs, false);
432 rcu_dynticks_momentary_idle();
433}
434
435
436
437
438
439
440void rcu_note_context_switch(bool preempt)
441{
442 barrier();
443 trace_rcu_utilization(TPS("Start context switch"));
444 rcu_sched_qs();
445 rcu_preempt_note_context_switch(preempt);
446
447 if (!smp_load_acquire(this_cpu_ptr(&rcu_dynticks.rcu_urgent_qs)))
448 goto out;
449 this_cpu_write(rcu_dynticks.rcu_urgent_qs, false);
450 if (unlikely(raw_cpu_read(rcu_dynticks.rcu_need_heavy_qs)))
451 rcu_momentary_dyntick_idle();
452 this_cpu_inc(rcu_dynticks.rcu_qs_ctr);
453 if (!preempt)
454 rcu_note_voluntary_context_switch_lite(current);
455out:
456 trace_rcu_utilization(TPS("End context switch"));
457 barrier();
458}
459EXPORT_SYMBOL_GPL(rcu_note_context_switch);
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474void rcu_all_qs(void)
475{
476 unsigned long flags;
477
478 if (!raw_cpu_read(rcu_dynticks.rcu_urgent_qs))
479 return;
480 preempt_disable();
481
482 if (!smp_load_acquire(this_cpu_ptr(&rcu_dynticks.rcu_urgent_qs))) {
483 preempt_enable();
484 return;
485 }
486 this_cpu_write(rcu_dynticks.rcu_urgent_qs, false);
487 barrier();
488 if (unlikely(raw_cpu_read(rcu_dynticks.rcu_need_heavy_qs))) {
489 local_irq_save(flags);
490 rcu_momentary_dyntick_idle();
491 local_irq_restore(flags);
492 }
493 if (unlikely(raw_cpu_read(rcu_sched_data.cpu_no_qs.b.exp)))
494 rcu_sched_qs();
495 this_cpu_inc(rcu_dynticks.rcu_qs_ctr);
496 barrier();
497 preempt_enable();
498}
499EXPORT_SYMBOL_GPL(rcu_all_qs);
500
501#define DEFAULT_RCU_BLIMIT 10
502static long blimit = DEFAULT_RCU_BLIMIT;
503#define DEFAULT_RCU_QHIMARK 10000
504static long qhimark = DEFAULT_RCU_QHIMARK;
505#define DEFAULT_RCU_QLOMARK 100
506static long qlowmark = DEFAULT_RCU_QLOMARK;
507
508module_param(blimit, long, 0444);
509module_param(qhimark, long, 0444);
510module_param(qlowmark, long, 0444);
511
512static ulong jiffies_till_first_fqs = ULONG_MAX;
513static ulong jiffies_till_next_fqs = ULONG_MAX;
514static bool rcu_kick_kthreads;
515
516module_param(jiffies_till_first_fqs, ulong, 0644);
517module_param(jiffies_till_next_fqs, ulong, 0644);
518module_param(rcu_kick_kthreads, bool, 0644);
519
520
521
522
523
524static ulong jiffies_till_sched_qs = HZ / 10;
525module_param(jiffies_till_sched_qs, ulong, 0444);
526
527static bool rcu_start_gp_advanced(struct rcu_state *rsp, struct rcu_node *rnp,
528 struct rcu_data *rdp);
529static void force_qs_rnp(struct rcu_state *rsp, int (*f)(struct rcu_data *rsp));
530static void force_quiescent_state(struct rcu_state *rsp);
531static int rcu_pending(void);
532
533
534
535
536unsigned long rcu_batches_started(void)
537{
538 return rcu_state_p->gpnum;
539}
540EXPORT_SYMBOL_GPL(rcu_batches_started);
541
542
543
544
545unsigned long rcu_batches_started_sched(void)
546{
547 return rcu_sched_state.gpnum;
548}
549EXPORT_SYMBOL_GPL(rcu_batches_started_sched);
550
551
552
553
554unsigned long rcu_batches_started_bh(void)
555{
556 return rcu_bh_state.gpnum;
557}
558EXPORT_SYMBOL_GPL(rcu_batches_started_bh);
559
560
561
562
563unsigned long rcu_batches_completed(void)
564{
565 return rcu_state_p->completed;
566}
567EXPORT_SYMBOL_GPL(rcu_batches_completed);
568
569
570
571
572unsigned long rcu_batches_completed_sched(void)
573{
574 return rcu_sched_state.completed;
575}
576EXPORT_SYMBOL_GPL(rcu_batches_completed_sched);
577
578
579
580
581unsigned long rcu_batches_completed_bh(void)
582{
583 return rcu_bh_state.completed;
584}
585EXPORT_SYMBOL_GPL(rcu_batches_completed_bh);
586
587
588
589
590
591
592
593unsigned long rcu_exp_batches_completed(void)
594{
595 return rcu_state_p->expedited_sequence;
596}
597EXPORT_SYMBOL_GPL(rcu_exp_batches_completed);
598
599
600
601
602
603unsigned long rcu_exp_batches_completed_sched(void)
604{
605 return rcu_sched_state.expedited_sequence;
606}
607EXPORT_SYMBOL_GPL(rcu_exp_batches_completed_sched);
608
609
610
611
612void rcu_force_quiescent_state(void)
613{
614 force_quiescent_state(rcu_state_p);
615}
616EXPORT_SYMBOL_GPL(rcu_force_quiescent_state);
617
618
619
620
621void rcu_bh_force_quiescent_state(void)
622{
623 force_quiescent_state(&rcu_bh_state);
624}
625EXPORT_SYMBOL_GPL(rcu_bh_force_quiescent_state);
626
627
628
629
630void rcu_sched_force_quiescent_state(void)
631{
632 force_quiescent_state(&rcu_sched_state);
633}
634EXPORT_SYMBOL_GPL(rcu_sched_force_quiescent_state);
635
636
637
638
639void show_rcu_gp_kthreads(void)
640{
641 struct rcu_state *rsp;
642
643 for_each_rcu_flavor(rsp) {
644 pr_info("%s: wait state: %d ->state: %#lx\n",
645 rsp->name, rsp->gp_state, rsp->gp_kthread->state);
646
647 }
648}
649EXPORT_SYMBOL_GPL(show_rcu_gp_kthreads);
650
651
652
653
654
655
656
657
658void rcutorture_record_test_transition(void)
659{
660 rcutorture_testseq++;
661 rcutorture_vernum = 0;
662}
663EXPORT_SYMBOL_GPL(rcutorture_record_test_transition);
664
665
666
667
668void rcutorture_get_gp_data(enum rcutorture_type test_type, int *flags,
669 unsigned long *gpnum, unsigned long *completed)
670{
671 struct rcu_state *rsp = NULL;
672
673 switch (test_type) {
674 case RCU_FLAVOR:
675 rsp = rcu_state_p;
676 break;
677 case RCU_BH_FLAVOR:
678 rsp = &rcu_bh_state;
679 break;
680 case RCU_SCHED_FLAVOR:
681 rsp = &rcu_sched_state;
682 break;
683 default:
684 break;
685 }
686 if (rsp == NULL)
687 return;
688 *flags = READ_ONCE(rsp->gp_flags);
689 *gpnum = READ_ONCE(rsp->gpnum);
690 *completed = READ_ONCE(rsp->completed);
691}
692EXPORT_SYMBOL_GPL(rcutorture_get_gp_data);
693
694
695
696
697
698
699void rcutorture_record_progress(unsigned long vernum)
700{
701 rcutorture_vernum++;
702}
703EXPORT_SYMBOL_GPL(rcutorture_record_progress);
704
705
706
707
708static struct rcu_node *rcu_get_root(struct rcu_state *rsp)
709{
710 return &rsp->node[0];
711}
712
713
714
715
716
717
718static int rcu_future_needs_gp(struct rcu_state *rsp)
719{
720 struct rcu_node *rnp = rcu_get_root(rsp);
721 int idx = (READ_ONCE(rnp->completed) + 1) & 0x1;
722 int *fp = &rnp->need_future_gp[idx];
723
724 lockdep_assert_irqs_disabled();
725 return READ_ONCE(*fp);
726}
727
728
729
730
731
732
733static bool
734cpu_needs_another_gp(struct rcu_state *rsp, struct rcu_data *rdp)
735{
736 lockdep_assert_irqs_disabled();
737 if (rcu_gp_in_progress(rsp))
738 return false;
739 if (rcu_future_needs_gp(rsp))
740 return true;
741 if (!rcu_segcblist_is_enabled(&rdp->cblist))
742 return false;
743 if (!rcu_segcblist_restempty(&rdp->cblist, RCU_NEXT_READY_TAIL))
744 return true;
745 if (rcu_segcblist_future_gp_needed(&rdp->cblist,
746 READ_ONCE(rsp->completed)))
747 return true;
748 return false;
749}
750
751
752
753
754
755
756
757
758
759static void rcu_eqs_enter(bool user)
760{
761 struct rcu_state *rsp;
762 struct rcu_data *rdp;
763 struct rcu_dynticks *rdtp;
764
765 rdtp = this_cpu_ptr(&rcu_dynticks);
766 WRITE_ONCE(rdtp->dynticks_nmi_nesting, 0);
767 WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) &&
768 rdtp->dynticks_nesting == 0);
769 if (rdtp->dynticks_nesting != 1) {
770 rdtp->dynticks_nesting--;
771 return;
772 }
773
774 lockdep_assert_irqs_disabled();
775 trace_rcu_dyntick(TPS("Start"), rdtp->dynticks_nesting, 0, rdtp->dynticks);
776 WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && !user && !is_idle_task(current));
777 for_each_rcu_flavor(rsp) {
778 rdp = this_cpu_ptr(rsp->rda);
779 do_nocb_deferred_wakeup(rdp);
780 }
781 rcu_prepare_for_idle();
782 WRITE_ONCE(rdtp->dynticks_nesting, 0);
783 rcu_dynticks_eqs_enter();
784 rcu_dynticks_task_enter();
785}
786
787
788
789
790
791
792
793
794
795
796
797
798void rcu_idle_enter(void)
799{
800 lockdep_assert_irqs_disabled();
801 rcu_eqs_enter(false);
802}
803
804#ifdef CONFIG_NO_HZ_FULL
805
806
807
808
809
810
811
812
813
814
815
816void rcu_user_enter(void)
817{
818 lockdep_assert_irqs_disabled();
819 rcu_eqs_enter(true);
820}
821#endif
822
823
824
825
826
827
828
829
830
831
832
833
834void rcu_nmi_exit(void)
835{
836 struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
837
838
839
840
841
842
843 WARN_ON_ONCE(rdtp->dynticks_nmi_nesting <= 0);
844 WARN_ON_ONCE(rcu_dynticks_curr_cpu_in_eqs());
845
846
847
848
849
850 if (rdtp->dynticks_nmi_nesting != 1) {
851 trace_rcu_dyntick(TPS("--="), rdtp->dynticks_nmi_nesting, rdtp->dynticks_nmi_nesting - 2, rdtp->dynticks);
852 WRITE_ONCE(rdtp->dynticks_nmi_nesting,
853 rdtp->dynticks_nmi_nesting - 2);
854 return;
855 }
856
857
858 trace_rcu_dyntick(TPS("Startirq"), rdtp->dynticks_nmi_nesting, 0, rdtp->dynticks);
859 WRITE_ONCE(rdtp->dynticks_nmi_nesting, 0);
860 rcu_dynticks_eqs_enter();
861}
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882void rcu_irq_exit(void)
883{
884 struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
885
886 lockdep_assert_irqs_disabled();
887 if (rdtp->dynticks_nmi_nesting == 1)
888 rcu_prepare_for_idle();
889 rcu_nmi_exit();
890 if (rdtp->dynticks_nmi_nesting == 0)
891 rcu_dynticks_task_enter();
892}
893
894
895
896
897
898
899
900void rcu_irq_exit_irqson(void)
901{
902 unsigned long flags;
903
904 local_irq_save(flags);
905 rcu_irq_exit();
906 local_irq_restore(flags);
907}
908
909
910
911
912
913
914
915
916
917static void rcu_eqs_exit(bool user)
918{
919 struct rcu_dynticks *rdtp;
920 long oldval;
921
922 lockdep_assert_irqs_disabled();
923 rdtp = this_cpu_ptr(&rcu_dynticks);
924 oldval = rdtp->dynticks_nesting;
925 WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && oldval < 0);
926 if (oldval) {
927 rdtp->dynticks_nesting++;
928 return;
929 }
930 rcu_dynticks_task_exit();
931 rcu_dynticks_eqs_exit();
932 rcu_cleanup_after_idle();
933 trace_rcu_dyntick(TPS("End"), rdtp->dynticks_nesting, 1, rdtp->dynticks);
934 WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && !user && !is_idle_task(current));
935 WRITE_ONCE(rdtp->dynticks_nesting, 1);
936 WRITE_ONCE(rdtp->dynticks_nmi_nesting, DYNTICK_IRQ_NONIDLE);
937}
938
939
940
941
942
943
944
945
946
947
948void rcu_idle_exit(void)
949{
950 unsigned long flags;
951
952 local_irq_save(flags);
953 rcu_eqs_exit(false);
954 local_irq_restore(flags);
955}
956
957#ifdef CONFIG_NO_HZ_FULL
958
959
960
961
962
963
964
965
966
967void rcu_user_exit(void)
968{
969 rcu_eqs_exit(1);
970}
971#endif
972
973
974
975
976
977
978
979
980
981
982
983
984
985void rcu_nmi_enter(void)
986{
987 struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
988 long incby = 2;
989
990
991 WARN_ON_ONCE(rdtp->dynticks_nmi_nesting < 0);
992
993
994
995
996
997
998
999
1000
1001 if (rcu_dynticks_curr_cpu_in_eqs()) {
1002 rcu_dynticks_eqs_exit();
1003 incby = 1;
1004 }
1005 trace_rcu_dyntick(incby == 1 ? TPS("Endirq") : TPS("++="),
1006 rdtp->dynticks_nmi_nesting,
1007 rdtp->dynticks_nmi_nesting + incby, rdtp->dynticks);
1008 WRITE_ONCE(rdtp->dynticks_nmi_nesting,
1009 rdtp->dynticks_nmi_nesting + incby);
1010 barrier();
1011}
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035void rcu_irq_enter(void)
1036{
1037 struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
1038
1039 lockdep_assert_irqs_disabled();
1040 if (rdtp->dynticks_nmi_nesting == 0)
1041 rcu_dynticks_task_exit();
1042 rcu_nmi_enter();
1043 if (rdtp->dynticks_nmi_nesting == 1)
1044 rcu_cleanup_after_idle();
1045}
1046
1047
1048
1049
1050
1051
1052
1053void rcu_irq_enter_irqson(void)
1054{
1055 unsigned long flags;
1056
1057 local_irq_save(flags);
1058 rcu_irq_enter();
1059 local_irq_restore(flags);
1060}
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070bool notrace rcu_is_watching(void)
1071{
1072 bool ret;
1073
1074 preempt_disable_notrace();
1075 ret = !rcu_dynticks_curr_cpu_in_eqs();
1076 preempt_enable_notrace();
1077 return ret;
1078}
1079EXPORT_SYMBOL_GPL(rcu_is_watching);
1080
1081
1082
1083
1084
1085
1086
1087
1088void rcu_request_urgent_qs_task(struct task_struct *t)
1089{
1090 int cpu;
1091
1092 barrier();
1093 cpu = task_cpu(t);
1094 if (!task_curr(t))
1095 return;
1096 smp_store_release(per_cpu_ptr(&rcu_dynticks.rcu_urgent_qs, cpu), true);
1097}
1098
1099#if defined(CONFIG_PROVE_RCU) && defined(CONFIG_HOTPLUG_CPU)
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122bool rcu_lockdep_current_cpu_online(void)
1123{
1124 struct rcu_data *rdp;
1125 struct rcu_node *rnp;
1126 bool ret;
1127
1128 if (in_nmi())
1129 return true;
1130 preempt_disable();
1131 rdp = this_cpu_ptr(&rcu_sched_data);
1132 rnp = rdp->mynode;
1133 ret = (rdp->grpmask & rcu_rnp_online_cpus(rnp)) ||
1134 !rcu_scheduler_fully_active;
1135 preempt_enable();
1136 return ret;
1137}
1138EXPORT_SYMBOL_GPL(rcu_lockdep_current_cpu_online);
1139
1140#endif
1141
1142
1143
1144
1145
1146
1147
1148
1149static int rcu_is_cpu_rrupt_from_idle(void)
1150{
1151 return __this_cpu_read(rcu_dynticks.dynticks_nesting) <= 0 &&
1152 __this_cpu_read(rcu_dynticks.dynticks_nmi_nesting) <= 1;
1153}
1154
1155
1156
1157
1158
1159
1160
1161
1162static void rcu_gpnum_ovf(struct rcu_node *rnp, struct rcu_data *rdp)
1163{
1164 raw_lockdep_assert_held_rcu_node(rnp);
1165 if (ULONG_CMP_LT(READ_ONCE(rdp->gpnum) + ULONG_MAX / 4, rnp->gpnum))
1166 WRITE_ONCE(rdp->gpwrap, true);
1167 if (ULONG_CMP_LT(rdp->rcu_iw_gpnum + ULONG_MAX / 4, rnp->gpnum))
1168 rdp->rcu_iw_gpnum = rnp->gpnum + ULONG_MAX / 4;
1169}
1170
1171
1172
1173
1174
1175
1176static int dyntick_save_progress_counter(struct rcu_data *rdp)
1177{
1178 rdp->dynticks_snap = rcu_dynticks_snap(rdp->dynticks);
1179 if (rcu_dynticks_in_eqs(rdp->dynticks_snap)) {
1180 trace_rcu_fqs(rdp->rsp->name, rdp->gpnum, rdp->cpu, TPS("dti"));
1181 rcu_gpnum_ovf(rdp->mynode, rdp);
1182 return 1;
1183 }
1184 return 0;
1185}
1186
1187
1188
1189
1190
1191
1192
1193static void rcu_iw_handler(struct irq_work *iwp)
1194{
1195 struct rcu_data *rdp;
1196 struct rcu_node *rnp;
1197
1198 rdp = container_of(iwp, struct rcu_data, rcu_iw);
1199 rnp = rdp->mynode;
1200 raw_spin_lock_rcu_node(rnp);
1201 if (!WARN_ON_ONCE(!rdp->rcu_iw_pending)) {
1202 rdp->rcu_iw_gpnum = rnp->gpnum;
1203 rdp->rcu_iw_pending = false;
1204 }
1205 raw_spin_unlock_rcu_node(rnp);
1206}
1207
1208
1209
1210
1211
1212
1213
1214static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
1215{
1216 unsigned long jtsq;
1217 bool *rnhqp;
1218 bool *ruqp;
1219 struct rcu_node *rnp = rdp->mynode;
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229 if (rcu_dynticks_in_eqs_since(rdp->dynticks, rdp->dynticks_snap)) {
1230 trace_rcu_fqs(rdp->rsp->name, rdp->gpnum, rdp->cpu, TPS("dti"));
1231 rdp->dynticks_fqs++;
1232 rcu_gpnum_ovf(rnp, rdp);
1233 return 1;
1234 }
1235
1236
1237
1238
1239
1240
1241
1242 jtsq = jiffies_till_sched_qs;
1243 ruqp = per_cpu_ptr(&rcu_dynticks.rcu_urgent_qs, rdp->cpu);
1244 if (time_after(jiffies, rdp->rsp->gp_start + jtsq) &&
1245 READ_ONCE(rdp->rcu_qs_ctr_snap) != per_cpu(rcu_dynticks.rcu_qs_ctr, rdp->cpu) &&
1246 READ_ONCE(rdp->gpnum) == rnp->gpnum && !rdp->gpwrap) {
1247 trace_rcu_fqs(rdp->rsp->name, rdp->gpnum, rdp->cpu, TPS("rqc"));
1248 rcu_gpnum_ovf(rnp, rdp);
1249 return 1;
1250 } else if (time_after(jiffies, rdp->rsp->gp_start + jtsq)) {
1251
1252 smp_store_release(ruqp, true);
1253 }
1254
1255
1256 if (!(rdp->grpmask & rcu_rnp_online_cpus(rnp))) {
1257 trace_rcu_fqs(rdp->rsp->name, rdp->gpnum, rdp->cpu, TPS("ofl"));
1258 rdp->offline_fqs++;
1259 rcu_gpnum_ovf(rnp, rdp);
1260 return 1;
1261 }
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280 rnhqp = &per_cpu(rcu_dynticks.rcu_need_heavy_qs, rdp->cpu);
1281 if (!READ_ONCE(*rnhqp) &&
1282 (time_after(jiffies, rdp->rsp->gp_start + jtsq) ||
1283 time_after(jiffies, rdp->rsp->jiffies_resched))) {
1284 WRITE_ONCE(*rnhqp, true);
1285
1286 smp_store_release(ruqp, true);
1287 rdp->rsp->jiffies_resched += jtsq;
1288 }
1289
1290
1291
1292
1293
1294
1295
1296 if (jiffies - rdp->rsp->gp_start > rcu_jiffies_till_stall_check() / 2) {
1297 resched_cpu(rdp->cpu);
1298 if (IS_ENABLED(CONFIG_IRQ_WORK) &&
1299 !rdp->rcu_iw_pending && rdp->rcu_iw_gpnum != rnp->gpnum &&
1300 (rnp->ffmask & rdp->grpmask)) {
1301 init_irq_work(&rdp->rcu_iw, rcu_iw_handler);
1302 rdp->rcu_iw_pending = true;
1303 rdp->rcu_iw_gpnum = rnp->gpnum;
1304 irq_work_queue_on(&rdp->rcu_iw, rdp->cpu);
1305 }
1306 }
1307
1308 return 0;
1309}
1310
1311static void record_gp_stall_check_time(struct rcu_state *rsp)
1312{
1313 unsigned long j = jiffies;
1314 unsigned long j1;
1315
1316 rsp->gp_start = j;
1317 smp_wmb();
1318 j1 = rcu_jiffies_till_stall_check();
1319 WRITE_ONCE(rsp->jiffies_stall, j + j1);
1320 rsp->jiffies_resched = j + j1 / 2;
1321 rsp->n_force_qs_gpstart = READ_ONCE(rsp->n_force_qs);
1322}
1323
1324
1325
1326
1327static const char *gp_state_getname(short gs)
1328{
1329 if (gs < 0 || gs >= ARRAY_SIZE(gp_state_names))
1330 return "???";
1331 return gp_state_names[gs];
1332}
1333
1334
1335
1336
1337static void rcu_check_gp_kthread_starvation(struct rcu_state *rsp)
1338{
1339 unsigned long gpa;
1340 unsigned long j;
1341
1342 j = jiffies;
1343 gpa = READ_ONCE(rsp->gp_activity);
1344 if (j - gpa > 2 * HZ) {
1345 pr_err("%s kthread starved for %ld jiffies! g%lu c%lu f%#x %s(%d) ->state=%#lx ->cpu=%d\n",
1346 rsp->name, j - gpa,
1347 rsp->gpnum, rsp->completed,
1348 rsp->gp_flags,
1349 gp_state_getname(rsp->gp_state), rsp->gp_state,
1350 rsp->gp_kthread ? rsp->gp_kthread->state : ~0,
1351 rsp->gp_kthread ? task_cpu(rsp->gp_kthread) : -1);
1352 if (rsp->gp_kthread) {
1353 pr_err("RCU grace-period kthread stack dump:\n");
1354 sched_show_task(rsp->gp_kthread);
1355 wake_up_process(rsp->gp_kthread);
1356 }
1357 }
1358}
1359
1360
1361
1362
1363
1364
1365
1366static void rcu_dump_cpu_stacks(struct rcu_state *rsp)
1367{
1368 int cpu;
1369 unsigned long flags;
1370 struct rcu_node *rnp;
1371
1372 rcu_for_each_leaf_node(rsp, rnp) {
1373 raw_spin_lock_irqsave_rcu_node(rnp, flags);
1374 for_each_leaf_node_possible_cpu(rnp, cpu)
1375 if (rnp->qsmask & leaf_node_cpu_bit(rnp, cpu))
1376 if (!trigger_single_cpu_backtrace(cpu))
1377 dump_cpu_task(cpu);
1378 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
1379 }
1380}
1381
1382
1383
1384
1385
1386static void rcu_stall_kick_kthreads(struct rcu_state *rsp)
1387{
1388 unsigned long j;
1389
1390 if (!rcu_kick_kthreads)
1391 return;
1392 j = READ_ONCE(rsp->jiffies_kick_kthreads);
1393 if (time_after(jiffies, j) && rsp->gp_kthread &&
1394 (rcu_gp_in_progress(rsp) || READ_ONCE(rsp->gp_flags))) {
1395 WARN_ONCE(1, "Kicking %s grace-period kthread\n", rsp->name);
1396 rcu_ftrace_dump(DUMP_ALL);
1397 wake_up_process(rsp->gp_kthread);
1398 WRITE_ONCE(rsp->jiffies_kick_kthreads, j + HZ);
1399 }
1400}
1401
1402static inline void panic_on_rcu_stall(void)
1403{
1404 if (sysctl_panic_on_rcu_stall)
1405 panic("RCU Stall\n");
1406}
1407
1408static void print_other_cpu_stall(struct rcu_state *rsp, unsigned long gpnum)
1409{
1410 int cpu;
1411 long delta;
1412 unsigned long flags;
1413 unsigned long gpa;
1414 unsigned long j;
1415 int ndetected = 0;
1416 struct rcu_node *rnp = rcu_get_root(rsp);
1417 long totqlen = 0;
1418
1419
1420 rcu_stall_kick_kthreads(rsp);
1421 if (rcu_cpu_stall_suppress)
1422 return;
1423
1424
1425
1426 raw_spin_lock_irqsave_rcu_node(rnp, flags);
1427 delta = jiffies - READ_ONCE(rsp->jiffies_stall);
1428 if (delta < RCU_STALL_RAT_DELAY || !rcu_gp_in_progress(rsp)) {
1429 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
1430 return;
1431 }
1432 WRITE_ONCE(rsp->jiffies_stall,
1433 jiffies + 3 * rcu_jiffies_till_stall_check() + 3);
1434 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
1435
1436
1437
1438
1439
1440
1441 pr_err("INFO: %s detected stalls on CPUs/tasks:",
1442 rsp->name);
1443 print_cpu_stall_info_begin();
1444 rcu_for_each_leaf_node(rsp, rnp) {
1445 raw_spin_lock_irqsave_rcu_node(rnp, flags);
1446 ndetected += rcu_print_task_stall(rnp);
1447 if (rnp->qsmask != 0) {
1448 for_each_leaf_node_possible_cpu(rnp, cpu)
1449 if (rnp->qsmask & leaf_node_cpu_bit(rnp, cpu)) {
1450 print_cpu_stall_info(rsp, cpu);
1451 ndetected++;
1452 }
1453 }
1454 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
1455 }
1456
1457 print_cpu_stall_info_end();
1458 for_each_possible_cpu(cpu)
1459 totqlen += rcu_segcblist_n_cbs(&per_cpu_ptr(rsp->rda,
1460 cpu)->cblist);
1461 pr_cont("(detected by %d, t=%ld jiffies, g=%ld, c=%ld, q=%lu)\n",
1462 smp_processor_id(), (long)(jiffies - rsp->gp_start),
1463 (long)rsp->gpnum, (long)rsp->completed, totqlen);
1464 if (ndetected) {
1465 rcu_dump_cpu_stacks(rsp);
1466
1467
1468 rcu_print_detail_task_stall(rsp);
1469 } else {
1470 if (READ_ONCE(rsp->gpnum) != gpnum ||
1471 READ_ONCE(rsp->completed) == gpnum) {
1472 pr_err("INFO: Stall ended before state dump start\n");
1473 } else {
1474 j = jiffies;
1475 gpa = READ_ONCE(rsp->gp_activity);
1476 pr_err("All QSes seen, last %s kthread activity %ld (%ld-%ld), jiffies_till_next_fqs=%ld, root ->qsmask %#lx\n",
1477 rsp->name, j - gpa, j, gpa,
1478 jiffies_till_next_fqs,
1479 rcu_get_root(rsp)->qsmask);
1480
1481 sched_show_task(current);
1482 }
1483 }
1484
1485 rcu_check_gp_kthread_starvation(rsp);
1486
1487 panic_on_rcu_stall();
1488
1489 force_quiescent_state(rsp);
1490}
1491
1492static void print_cpu_stall(struct rcu_state *rsp)
1493{
1494 int cpu;
1495 unsigned long flags;
1496 struct rcu_data *rdp = this_cpu_ptr(rsp->rda);
1497 struct rcu_node *rnp = rcu_get_root(rsp);
1498 long totqlen = 0;
1499
1500
1501 rcu_stall_kick_kthreads(rsp);
1502 if (rcu_cpu_stall_suppress)
1503 return;
1504
1505
1506
1507
1508
1509
1510 pr_err("INFO: %s self-detected stall on CPU", rsp->name);
1511 print_cpu_stall_info_begin();
1512 raw_spin_lock_irqsave_rcu_node(rdp->mynode, flags);
1513 print_cpu_stall_info(rsp, smp_processor_id());
1514 raw_spin_unlock_irqrestore_rcu_node(rdp->mynode, flags);
1515 print_cpu_stall_info_end();
1516 for_each_possible_cpu(cpu)
1517 totqlen += rcu_segcblist_n_cbs(&per_cpu_ptr(rsp->rda,
1518 cpu)->cblist);
1519 pr_cont(" (t=%lu jiffies g=%ld c=%ld q=%lu)\n",
1520 jiffies - rsp->gp_start,
1521 (long)rsp->gpnum, (long)rsp->completed, totqlen);
1522
1523 rcu_check_gp_kthread_starvation(rsp);
1524
1525 rcu_dump_cpu_stacks(rsp);
1526
1527 raw_spin_lock_irqsave_rcu_node(rnp, flags);
1528 if (ULONG_CMP_GE(jiffies, READ_ONCE(rsp->jiffies_stall)))
1529 WRITE_ONCE(rsp->jiffies_stall,
1530 jiffies + 3 * rcu_jiffies_till_stall_check() + 3);
1531 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
1532
1533 panic_on_rcu_stall();
1534
1535
1536
1537
1538
1539
1540
1541
1542 resched_cpu(smp_processor_id());
1543}
1544
1545static void check_cpu_stall(struct rcu_state *rsp, struct rcu_data *rdp)
1546{
1547 unsigned long completed;
1548 unsigned long gpnum;
1549 unsigned long gps;
1550 unsigned long j;
1551 unsigned long js;
1552 struct rcu_node *rnp;
1553
1554 if ((rcu_cpu_stall_suppress && !rcu_kick_kthreads) ||
1555 !rcu_gp_in_progress(rsp))
1556 return;
1557 rcu_stall_kick_kthreads(rsp);
1558 j = jiffies;
1559
1560
1561
1562
1563
1564
1565
1566
1567
1568
1569
1570
1571
1572
1573
1574
1575
1576
1577 gpnum = READ_ONCE(rsp->gpnum);
1578 smp_rmb();
1579 js = READ_ONCE(rsp->jiffies_stall);
1580 smp_rmb();
1581 gps = READ_ONCE(rsp->gp_start);
1582 smp_rmb();
1583 completed = READ_ONCE(rsp->completed);
1584 if (ULONG_CMP_GE(completed, gpnum) ||
1585 ULONG_CMP_LT(j, js) ||
1586 ULONG_CMP_GE(gps, js))
1587 return;
1588 rnp = rdp->mynode;
1589 if (rcu_gp_in_progress(rsp) &&
1590 (READ_ONCE(rnp->qsmask) & rdp->grpmask)) {
1591
1592
1593 print_cpu_stall(rsp);
1594
1595 } else if (rcu_gp_in_progress(rsp) &&
1596 ULONG_CMP_GE(j, js + RCU_STALL_RAT_DELAY)) {
1597
1598
1599 print_other_cpu_stall(rsp, gpnum);
1600 }
1601}
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612void rcu_cpu_stall_reset(void)
1613{
1614 struct rcu_state *rsp;
1615
1616 for_each_rcu_flavor(rsp)
1617 WRITE_ONCE(rsp->jiffies_stall, jiffies + ULONG_MAX / 2);
1618}
1619
1620
1621
1622
1623
1624
1625
1626
1627
1628
1629static unsigned long rcu_cbs_completed(struct rcu_state *rsp,
1630 struct rcu_node *rnp)
1631{
1632 raw_lockdep_assert_held_rcu_node(rnp);
1633
1634
1635
1636
1637
1638
1639
1640
1641 if (rcu_get_root(rsp) == rnp && rnp->gpnum == rnp->completed)
1642 return rnp->completed + 1;
1643
1644
1645
1646
1647
1648 return rnp->completed + 2;
1649}
1650
1651
1652
1653
1654
1655static void trace_rcu_future_gp(struct rcu_node *rnp, struct rcu_data *rdp,
1656 unsigned long c, const char *s)
1657{
1658 trace_rcu_future_grace_period(rdp->rsp->name, rnp->gpnum,
1659 rnp->completed, c, rnp->level,
1660 rnp->grplo, rnp->grphi, s);
1661}
1662
1663
1664
1665
1666
1667
1668
1669
1670
1671static bool __maybe_unused
1672rcu_start_future_gp(struct rcu_node *rnp, struct rcu_data *rdp,
1673 unsigned long *c_out)
1674{
1675 unsigned long c;
1676 bool ret = false;
1677 struct rcu_node *rnp_root = rcu_get_root(rdp->rsp);
1678
1679 raw_lockdep_assert_held_rcu_node(rnp);
1680
1681
1682
1683
1684
1685 c = rcu_cbs_completed(rdp->rsp, rnp);
1686 trace_rcu_future_gp(rnp, rdp, c, TPS("Startleaf"));
1687 if (rnp->need_future_gp[c & 0x1]) {
1688 trace_rcu_future_gp(rnp, rdp, c, TPS("Prestartleaf"));
1689 goto out;
1690 }
1691
1692
1693
1694
1695
1696
1697
1698
1699
1700
1701
1702
1703
1704
1705 if (rnp->gpnum != rnp->completed ||
1706 READ_ONCE(rnp_root->gpnum) != READ_ONCE(rnp_root->completed)) {
1707 rnp->need_future_gp[c & 0x1]++;
1708 trace_rcu_future_gp(rnp, rdp, c, TPS("Startedleaf"));
1709 goto out;
1710 }
1711
1712
1713
1714
1715
1716
1717 if (rnp != rnp_root)
1718 raw_spin_lock_rcu_node(rnp_root);
1719
1720
1721
1722
1723
1724
1725 c = rcu_cbs_completed(rdp->rsp, rnp_root);
1726 if (!rcu_is_nocb_cpu(rdp->cpu))
1727 (void)rcu_segcblist_accelerate(&rdp->cblist, c);
1728
1729
1730
1731
1732
1733 if (rnp_root->need_future_gp[c & 0x1]) {
1734 trace_rcu_future_gp(rnp, rdp, c, TPS("Prestartedroot"));
1735 goto unlock_out;
1736 }
1737
1738
1739 rnp_root->need_future_gp[c & 0x1]++;
1740
1741
1742 if (rnp_root->gpnum != rnp_root->completed) {
1743 trace_rcu_future_gp(rnp, rdp, c, TPS("Startedleafroot"));
1744 } else {
1745 trace_rcu_future_gp(rnp, rdp, c, TPS("Startedroot"));
1746 ret = rcu_start_gp_advanced(rdp->rsp, rnp_root, rdp);
1747 }
1748unlock_out:
1749 if (rnp != rnp_root)
1750 raw_spin_unlock_rcu_node(rnp_root);
1751out:
1752 if (c_out != NULL)
1753 *c_out = c;
1754 return ret;
1755}
1756
1757
1758
1759
1760
1761static int rcu_future_gp_cleanup(struct rcu_state *rsp, struct rcu_node *rnp)
1762{
1763 int c = rnp->completed;
1764 int needmore;
1765 struct rcu_data *rdp = this_cpu_ptr(rsp->rda);
1766
1767 rnp->need_future_gp[c & 0x1] = 0;
1768 needmore = rnp->need_future_gp[(c + 1) & 0x1];
1769 trace_rcu_future_gp(rnp, rdp, c,
1770 needmore ? TPS("CleanupMore") : TPS("Cleanup"));
1771 return needmore;
1772}
1773
1774
1775
1776
1777
1778
1779
1780
1781static void rcu_gp_kthread_wake(struct rcu_state *rsp)
1782{
1783 if (current == rsp->gp_kthread ||
1784 !READ_ONCE(rsp->gp_flags) ||
1785 !rsp->gp_kthread)
1786 return;
1787 swake_up(&rsp->gp_wq);
1788}
1789
1790
1791
1792
1793
1794
1795
1796
1797
1798
1799
1800
1801
1802static bool rcu_accelerate_cbs(struct rcu_state *rsp, struct rcu_node *rnp,
1803 struct rcu_data *rdp)
1804{
1805 bool ret = false;
1806
1807 raw_lockdep_assert_held_rcu_node(rnp);
1808
1809
1810 if (!rcu_segcblist_pend_cbs(&rdp->cblist))
1811 return false;
1812
1813
1814
1815
1816
1817
1818
1819
1820
1821
1822
1823 if (rcu_segcblist_accelerate(&rdp->cblist, rcu_cbs_completed(rsp, rnp)))
1824 ret = rcu_start_future_gp(rnp, rdp, NULL);
1825
1826
1827 if (rcu_segcblist_restempty(&rdp->cblist, RCU_WAIT_TAIL))
1828 trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("AccWaitCB"));
1829 else
1830 trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("AccReadyCB"));
1831 return ret;
1832}
1833
1834
1835
1836
1837
1838
1839
1840
1841
1842
1843
1844static bool rcu_advance_cbs(struct rcu_state *rsp, struct rcu_node *rnp,
1845 struct rcu_data *rdp)
1846{
1847 raw_lockdep_assert_held_rcu_node(rnp);
1848
1849
1850 if (!rcu_segcblist_pend_cbs(&rdp->cblist))
1851 return false;
1852
1853
1854
1855
1856
1857 rcu_segcblist_advance(&rdp->cblist, rnp->completed);
1858
1859
1860 return rcu_accelerate_cbs(rsp, rnp, rdp);
1861}
1862
1863
1864
1865
1866
1867
1868
1869static bool __note_gp_changes(struct rcu_state *rsp, struct rcu_node *rnp,
1870 struct rcu_data *rdp)
1871{
1872 bool ret;
1873 bool need_gp;
1874
1875 raw_lockdep_assert_held_rcu_node(rnp);
1876
1877
1878 if (rdp->completed == rnp->completed &&
1879 !unlikely(READ_ONCE(rdp->gpwrap))) {
1880
1881
1882 ret = rcu_accelerate_cbs(rsp, rnp, rdp);
1883
1884 } else {
1885
1886
1887 ret = rcu_advance_cbs(rsp, rnp, rdp);
1888
1889
1890 rdp->completed = rnp->completed;
1891 trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("cpuend"));
1892 }
1893
1894 if (rdp->gpnum != rnp->gpnum || unlikely(READ_ONCE(rdp->gpwrap))) {
1895
1896
1897
1898
1899
1900 rdp->gpnum = rnp->gpnum;
1901 trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("cpustart"));
1902 need_gp = !!(rnp->qsmask & rdp->grpmask);
1903 rdp->cpu_no_qs.b.norm = need_gp;
1904 rdp->rcu_qs_ctr_snap = __this_cpu_read(rcu_dynticks.rcu_qs_ctr);
1905 rdp->core_needs_qs = need_gp;
1906 zero_cpu_stall_ticks(rdp);
1907 WRITE_ONCE(rdp->gpwrap, false);
1908 rcu_gpnum_ovf(rnp, rdp);
1909 }
1910 return ret;
1911}
1912
1913static void note_gp_changes(struct rcu_state *rsp, struct rcu_data *rdp)
1914{
1915 unsigned long flags;
1916 bool needwake;
1917 struct rcu_node *rnp;
1918
1919 local_irq_save(flags);
1920 rnp = rdp->mynode;
1921 if ((rdp->gpnum == READ_ONCE(rnp->gpnum) &&
1922 rdp->completed == READ_ONCE(rnp->completed) &&
1923 !unlikely(READ_ONCE(rdp->gpwrap))) ||
1924 !raw_spin_trylock_rcu_node(rnp)) {
1925 local_irq_restore(flags);
1926 return;
1927 }
1928 needwake = __note_gp_changes(rsp, rnp, rdp);
1929 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
1930 if (needwake)
1931 rcu_gp_kthread_wake(rsp);
1932}
1933
1934static void rcu_gp_slow(struct rcu_state *rsp, int delay)
1935{
1936 if (delay > 0 &&
1937 !(rsp->gpnum % (rcu_num_nodes * PER_RCU_NODE_PERIOD * delay)))
1938 schedule_timeout_uninterruptible(delay);
1939}
1940
1941
1942
1943
1944static bool rcu_gp_init(struct rcu_state *rsp)
1945{
1946 unsigned long oldmask;
1947 struct rcu_data *rdp;
1948 struct rcu_node *rnp = rcu_get_root(rsp);
1949
1950 WRITE_ONCE(rsp->gp_activity, jiffies);
1951 raw_spin_lock_irq_rcu_node(rnp);
1952 if (!READ_ONCE(rsp->gp_flags)) {
1953
1954 raw_spin_unlock_irq_rcu_node(rnp);
1955 return false;
1956 }
1957 WRITE_ONCE(rsp->gp_flags, 0);
1958
1959 if (WARN_ON_ONCE(rcu_gp_in_progress(rsp))) {
1960
1961
1962
1963
1964 raw_spin_unlock_irq_rcu_node(rnp);
1965 return false;
1966 }
1967
1968
1969 record_gp_stall_check_time(rsp);
1970
1971 smp_store_release(&rsp->gpnum, rsp->gpnum + 1);
1972 trace_rcu_grace_period(rsp->name, rsp->gpnum, TPS("start"));
1973 raw_spin_unlock_irq_rcu_node(rnp);
1974
1975
1976
1977
1978
1979
1980
1981 rcu_for_each_leaf_node(rsp, rnp) {
1982 rcu_gp_slow(rsp, gp_preinit_delay);
1983 raw_spin_lock_irq_rcu_node(rnp);
1984 if (rnp->qsmaskinit == rnp->qsmaskinitnext &&
1985 !rnp->wait_blkd_tasks) {
1986
1987 raw_spin_unlock_irq_rcu_node(rnp);
1988 continue;
1989 }
1990
1991
1992 oldmask = rnp->qsmaskinit;
1993 rnp->qsmaskinit = rnp->qsmaskinitnext;
1994
1995
1996 if (!oldmask != !rnp->qsmaskinit) {
1997 if (!oldmask)
1998 rcu_init_new_rnp(rnp);
1999 else if (rcu_preempt_has_tasks(rnp))
2000 rnp->wait_blkd_tasks = true;
2001 else
2002 rcu_cleanup_dead_rnp(rnp);
2003 }
2004
2005
2006
2007
2008
2009
2010
2011
2012
2013
2014 if (rnp->wait_blkd_tasks &&
2015 (!rcu_preempt_has_tasks(rnp) ||
2016 rnp->qsmaskinit)) {
2017 rnp->wait_blkd_tasks = false;
2018 rcu_cleanup_dead_rnp(rnp);
2019 }
2020
2021 raw_spin_unlock_irq_rcu_node(rnp);
2022 }
2023
2024
2025
2026
2027
2028
2029
2030
2031
2032
2033
2034
2035
2036 rcu_for_each_node_breadth_first(rsp, rnp) {
2037 rcu_gp_slow(rsp, gp_init_delay);
2038 raw_spin_lock_irq_rcu_node(rnp);
2039 rdp = this_cpu_ptr(rsp->rda);
2040 rcu_preempt_check_blocked_tasks(rnp);
2041 rnp->qsmask = rnp->qsmaskinit;
2042 WRITE_ONCE(rnp->gpnum, rsp->gpnum);
2043 if (WARN_ON_ONCE(rnp->completed != rsp->completed))
2044 WRITE_ONCE(rnp->completed, rsp->completed);
2045 if (rnp == rdp->mynode)
2046 (void)__note_gp_changes(rsp, rnp, rdp);
2047 rcu_preempt_boost_start_gp(rnp);
2048 trace_rcu_grace_period_init(rsp->name, rnp->gpnum,
2049 rnp->level, rnp->grplo,
2050 rnp->grphi, rnp->qsmask);
2051 raw_spin_unlock_irq_rcu_node(rnp);
2052 cond_resched_rcu_qs();
2053 WRITE_ONCE(rsp->gp_activity, jiffies);
2054 }
2055
2056 return true;
2057}
2058
2059
2060
2061
2062
2063static bool rcu_gp_fqs_check_wake(struct rcu_state *rsp, int *gfp)
2064{
2065 struct rcu_node *rnp = rcu_get_root(rsp);
2066
2067
2068 *gfp = READ_ONCE(rsp->gp_flags);
2069 if (*gfp & RCU_GP_FLAG_FQS)
2070 return true;
2071
2072
2073 if (!READ_ONCE(rnp->qsmask) && !rcu_preempt_blocked_readers_cgp(rnp))
2074 return true;
2075
2076 return false;
2077}
2078
2079
2080
2081
2082static void rcu_gp_fqs(struct rcu_state *rsp, bool first_time)
2083{
2084 struct rcu_node *rnp = rcu_get_root(rsp);
2085
2086 WRITE_ONCE(rsp->gp_activity, jiffies);
2087 rsp->n_force_qs++;
2088 if (first_time) {
2089
2090 force_qs_rnp(rsp, dyntick_save_progress_counter);
2091 } else {
2092
2093 force_qs_rnp(rsp, rcu_implicit_dynticks_qs);
2094 }
2095
2096 if (READ_ONCE(rsp->gp_flags) & RCU_GP_FLAG_FQS) {
2097 raw_spin_lock_irq_rcu_node(rnp);
2098 WRITE_ONCE(rsp->gp_flags,
2099 READ_ONCE(rsp->gp_flags) & ~RCU_GP_FLAG_FQS);
2100 raw_spin_unlock_irq_rcu_node(rnp);
2101 }
2102}
2103
2104
2105
2106
2107static void rcu_gp_cleanup(struct rcu_state *rsp)
2108{
2109 unsigned long gp_duration;
2110 bool needgp = false;
2111 int nocb = 0;
2112 struct rcu_data *rdp;
2113 struct rcu_node *rnp = rcu_get_root(rsp);
2114 struct swait_queue_head *sq;
2115
2116 WRITE_ONCE(rsp->gp_activity, jiffies);
2117 raw_spin_lock_irq_rcu_node(rnp);
2118 gp_duration = jiffies - rsp->gp_start;
2119 if (gp_duration > rsp->gp_max)
2120 rsp->gp_max = gp_duration;
2121
2122
2123
2124
2125
2126
2127
2128
2129
2130 raw_spin_unlock_irq_rcu_node(rnp);
2131
2132
2133
2134
2135
2136
2137
2138
2139
2140
2141 rcu_for_each_node_breadth_first(rsp, rnp) {
2142 raw_spin_lock_irq_rcu_node(rnp);
2143 WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp));
2144 WARN_ON_ONCE(rnp->qsmask);
2145 WRITE_ONCE(rnp->completed, rsp->gpnum);
2146 rdp = this_cpu_ptr(rsp->rda);
2147 if (rnp == rdp->mynode)
2148 needgp = __note_gp_changes(rsp, rnp, rdp) || needgp;
2149
2150 nocb += rcu_future_gp_cleanup(rsp, rnp);
2151 sq = rcu_nocb_gp_get(rnp);
2152 raw_spin_unlock_irq_rcu_node(rnp);
2153 rcu_nocb_gp_cleanup(sq);
2154 cond_resched_rcu_qs();
2155 WRITE_ONCE(rsp->gp_activity, jiffies);
2156 rcu_gp_slow(rsp, gp_cleanup_delay);
2157 }
2158 rnp = rcu_get_root(rsp);
2159 raw_spin_lock_irq_rcu_node(rnp);
2160 rcu_nocb_gp_set(rnp, nocb);
2161
2162
2163 WRITE_ONCE(rsp->completed, rsp->gpnum);
2164 trace_rcu_grace_period(rsp->name, rsp->completed, TPS("end"));
2165 rsp->gp_state = RCU_GP_IDLE;
2166 rdp = this_cpu_ptr(rsp->rda);
2167
2168 needgp = rcu_advance_cbs(rsp, rnp, rdp) || needgp;
2169 if (needgp || cpu_needs_another_gp(rsp, rdp)) {
2170 WRITE_ONCE(rsp->gp_flags, RCU_GP_FLAG_INIT);
2171 trace_rcu_grace_period(rsp->name,
2172 READ_ONCE(rsp->gpnum),
2173 TPS("newreq"));
2174 }
2175 raw_spin_unlock_irq_rcu_node(rnp);
2176}
2177
2178
2179
2180
2181static int __noreturn rcu_gp_kthread(void *arg)
2182{
2183 bool first_gp_fqs;
2184 int gf;
2185 unsigned long j;
2186 int ret;
2187 struct rcu_state *rsp = arg;
2188 struct rcu_node *rnp = rcu_get_root(rsp);
2189
2190 rcu_bind_gp_kthread();
2191 for (;;) {
2192
2193
2194 for (;;) {
2195 trace_rcu_grace_period(rsp->name,
2196 READ_ONCE(rsp->gpnum),
2197 TPS("reqwait"));
2198 rsp->gp_state = RCU_GP_WAIT_GPS;
2199 swait_event_idle(rsp->gp_wq, READ_ONCE(rsp->gp_flags) &
2200 RCU_GP_FLAG_INIT);
2201 rsp->gp_state = RCU_GP_DONE_GPS;
2202
2203 if (rcu_gp_init(rsp))
2204 break;
2205 cond_resched_rcu_qs();
2206 WRITE_ONCE(rsp->gp_activity, jiffies);
2207 WARN_ON(signal_pending(current));
2208 trace_rcu_grace_period(rsp->name,
2209 READ_ONCE(rsp->gpnum),
2210 TPS("reqwaitsig"));
2211 }
2212
2213
2214 first_gp_fqs = true;
2215 j = jiffies_till_first_fqs;
2216 if (j > HZ) {
2217 j = HZ;
2218 jiffies_till_first_fqs = HZ;
2219 }
2220 ret = 0;
2221 for (;;) {
2222 if (!ret) {
2223 rsp->jiffies_force_qs = jiffies + j;
2224 WRITE_ONCE(rsp->jiffies_kick_kthreads,
2225 jiffies + 3 * j);
2226 }
2227 trace_rcu_grace_period(rsp->name,
2228 READ_ONCE(rsp->gpnum),
2229 TPS("fqswait"));
2230 rsp->gp_state = RCU_GP_WAIT_FQS;
2231 ret = swait_event_idle_timeout(rsp->gp_wq,
2232 rcu_gp_fqs_check_wake(rsp, &gf), j);
2233 rsp->gp_state = RCU_GP_DOING_FQS;
2234
2235
2236 if (!READ_ONCE(rnp->qsmask) &&
2237 !rcu_preempt_blocked_readers_cgp(rnp))
2238 break;
2239
2240 if (ULONG_CMP_GE(jiffies, rsp->jiffies_force_qs) ||
2241 (gf & RCU_GP_FLAG_FQS)) {
2242 trace_rcu_grace_period(rsp->name,
2243 READ_ONCE(rsp->gpnum),
2244 TPS("fqsstart"));
2245 rcu_gp_fqs(rsp, first_gp_fqs);
2246 first_gp_fqs = false;
2247 trace_rcu_grace_period(rsp->name,
2248 READ_ONCE(rsp->gpnum),
2249 TPS("fqsend"));
2250 cond_resched_rcu_qs();
2251 WRITE_ONCE(rsp->gp_activity, jiffies);
2252 ret = 0;
2253 j = jiffies_till_next_fqs;
2254 if (j > HZ) {
2255 j = HZ;
2256 jiffies_till_next_fqs = HZ;
2257 } else if (j < 1) {
2258 j = 1;
2259 jiffies_till_next_fqs = 1;
2260 }
2261 } else {
2262
2263 cond_resched_rcu_qs();
2264 WRITE_ONCE(rsp->gp_activity, jiffies);
2265 WARN_ON(signal_pending(current));
2266 trace_rcu_grace_period(rsp->name,
2267 READ_ONCE(rsp->gpnum),
2268 TPS("fqswaitsig"));
2269 ret = 1;
2270 j = jiffies;
2271 if (time_after(jiffies, rsp->jiffies_force_qs))
2272 j = 1;
2273 else
2274 j = rsp->jiffies_force_qs - j;
2275 }
2276 }
2277
2278
2279 rsp->gp_state = RCU_GP_CLEANUP;
2280 rcu_gp_cleanup(rsp);
2281 rsp->gp_state = RCU_GP_CLEANED;
2282 }
2283}
2284
2285
2286
2287
2288
2289
2290
2291
2292
2293
2294
2295
2296static bool
2297rcu_start_gp_advanced(struct rcu_state *rsp, struct rcu_node *rnp,
2298 struct rcu_data *rdp)
2299{
2300 raw_lockdep_assert_held_rcu_node(rnp);
2301 if (!rsp->gp_kthread || !cpu_needs_another_gp(rsp, rdp)) {
2302
2303
2304
2305
2306
2307
2308 return false;
2309 }
2310 WRITE_ONCE(rsp->gp_flags, RCU_GP_FLAG_INIT);
2311 trace_rcu_grace_period(rsp->name, READ_ONCE(rsp->gpnum),
2312 TPS("newreq"));
2313
2314
2315
2316
2317
2318
2319 return true;
2320}
2321
2322
2323
2324
2325
2326
2327
2328
2329
2330
2331static bool rcu_start_gp(struct rcu_state *rsp)
2332{
2333 struct rcu_data *rdp = this_cpu_ptr(rsp->rda);
2334 struct rcu_node *rnp = rcu_get_root(rsp);
2335 bool ret = false;
2336
2337
2338
2339
2340
2341
2342
2343
2344
2345 ret = rcu_advance_cbs(rsp, rnp, rdp) || ret;
2346 ret = rcu_start_gp_advanced(rsp, rnp, rdp) || ret;
2347 return ret;
2348}
2349
2350
2351
2352
2353
2354
2355
2356
2357
2358
2359static void rcu_report_qs_rsp(struct rcu_state *rsp, unsigned long flags)
2360 __releases(rcu_get_root(rsp)->lock)
2361{
2362 raw_lockdep_assert_held_rcu_node(rcu_get_root(rsp));
2363 WARN_ON_ONCE(!rcu_gp_in_progress(rsp));
2364 WRITE_ONCE(rsp->gp_flags, READ_ONCE(rsp->gp_flags) | RCU_GP_FLAG_FQS);
2365 raw_spin_unlock_irqrestore_rcu_node(rcu_get_root(rsp), flags);
2366 rcu_gp_kthread_wake(rsp);
2367}
2368
2369
2370
2371
2372
2373
2374
2375
2376
2377
2378
2379static void
2380rcu_report_qs_rnp(unsigned long mask, struct rcu_state *rsp,
2381 struct rcu_node *rnp, unsigned long gps, unsigned long flags)
2382 __releases(rnp->lock)
2383{
2384 unsigned long oldmask = 0;
2385 struct rcu_node *rnp_c;
2386
2387 raw_lockdep_assert_held_rcu_node(rnp);
2388
2389
2390 for (;;) {
2391 if (!(rnp->qsmask & mask) || rnp->gpnum != gps) {
2392
2393
2394
2395
2396
2397 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
2398 return;
2399 }
2400 WARN_ON_ONCE(oldmask);
2401 WARN_ON_ONCE(rnp->level != rcu_num_lvls - 1 &&
2402 rcu_preempt_blocked_readers_cgp(rnp));
2403 rnp->qsmask &= ~mask;
2404 trace_rcu_quiescent_state_report(rsp->name, rnp->gpnum,
2405 mask, rnp->qsmask, rnp->level,
2406 rnp->grplo, rnp->grphi,
2407 !!rnp->gp_tasks);
2408 if (rnp->qsmask != 0 || rcu_preempt_blocked_readers_cgp(rnp)) {
2409
2410
2411 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
2412 return;
2413 }
2414 mask = rnp->grpmask;
2415 if (rnp->parent == NULL) {
2416
2417
2418
2419 break;
2420 }
2421 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
2422 rnp_c = rnp;
2423 rnp = rnp->parent;
2424 raw_spin_lock_irqsave_rcu_node(rnp, flags);
2425 oldmask = rnp_c->qsmask;
2426 }
2427
2428
2429
2430
2431
2432
2433 rcu_report_qs_rsp(rsp, flags);
2434}
2435
2436
2437
2438
2439
2440
2441
2442
2443static void rcu_report_unblock_qs_rnp(struct rcu_state *rsp,
2444 struct rcu_node *rnp, unsigned long flags)
2445 __releases(rnp->lock)
2446{
2447 unsigned long gps;
2448 unsigned long mask;
2449 struct rcu_node *rnp_p;
2450
2451 raw_lockdep_assert_held_rcu_node(rnp);
2452 if (rcu_state_p == &rcu_sched_state || rsp != rcu_state_p ||
2453 rnp->qsmask != 0 || rcu_preempt_blocked_readers_cgp(rnp)) {
2454 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
2455 return;
2456 }
2457
2458 rnp_p = rnp->parent;
2459 if (rnp_p == NULL) {
2460
2461
2462
2463
2464 rcu_report_qs_rsp(rsp, flags);
2465 return;
2466 }
2467
2468
2469 gps = rnp->gpnum;
2470 mask = rnp->grpmask;
2471 raw_spin_unlock_rcu_node(rnp);
2472 raw_spin_lock_rcu_node(rnp_p);
2473 rcu_report_qs_rnp(mask, rsp, rnp_p, gps, flags);
2474}
2475
2476
2477
2478
2479
2480static void
2481rcu_report_qs_rdp(int cpu, struct rcu_state *rsp, struct rcu_data *rdp)
2482{
2483 unsigned long flags;
2484 unsigned long mask;
2485 bool needwake;
2486 struct rcu_node *rnp;
2487
2488 rnp = rdp->mynode;
2489 raw_spin_lock_irqsave_rcu_node(rnp, flags);
2490 if (rdp->cpu_no_qs.b.norm || rdp->gpnum != rnp->gpnum ||
2491 rnp->completed == rnp->gpnum || rdp->gpwrap) {
2492
2493
2494
2495
2496
2497
2498
2499 rdp->cpu_no_qs.b.norm = true;
2500 rdp->rcu_qs_ctr_snap = __this_cpu_read(rcu_dynticks.rcu_qs_ctr);
2501 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
2502 return;
2503 }
2504 mask = rdp->grpmask;
2505 if ((rnp->qsmask & mask) == 0) {
2506 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
2507 } else {
2508 rdp->core_needs_qs = false;
2509
2510
2511
2512
2513
2514 needwake = rcu_accelerate_cbs(rsp, rnp, rdp);
2515
2516 rcu_report_qs_rnp(mask, rsp, rnp, rnp->gpnum, flags);
2517
2518 if (needwake)
2519 rcu_gp_kthread_wake(rsp);
2520 }
2521}
2522
2523
2524
2525
2526
2527
2528
2529static void
2530rcu_check_quiescent_state(struct rcu_state *rsp, struct rcu_data *rdp)
2531{
2532
2533 note_gp_changes(rsp, rdp);
2534
2535
2536
2537
2538
2539 if (!rdp->core_needs_qs)
2540 return;
2541
2542
2543
2544
2545
2546 if (rdp->cpu_no_qs.b.norm)
2547 return;
2548
2549
2550
2551
2552
2553 rcu_report_qs_rdp(rdp->cpu, rsp, rdp);
2554}
2555
2556
2557
2558
2559static void rcu_cleanup_dying_cpu(struct rcu_state *rsp)
2560{
2561 RCU_TRACE(unsigned long mask;)
2562 RCU_TRACE(struct rcu_data *rdp = this_cpu_ptr(rsp->rda);)
2563 RCU_TRACE(struct rcu_node *rnp = rdp->mynode;)
2564
2565 if (!IS_ENABLED(CONFIG_HOTPLUG_CPU))
2566 return;
2567
2568 RCU_TRACE(mask = rdp->grpmask;)
2569 trace_rcu_grace_period(rsp->name,
2570 rnp->gpnum + 1 - !!(rnp->qsmask & mask),
2571 TPS("cpuofl"));
2572}
2573
2574
2575
2576
2577
2578
2579
2580
2581
2582
2583
2584
2585
2586
2587
2588
2589
2590
2591static void rcu_cleanup_dead_rnp(struct rcu_node *rnp_leaf)
2592{
2593 long mask;
2594 struct rcu_node *rnp = rnp_leaf;
2595
2596 raw_lockdep_assert_held_rcu_node(rnp);
2597 if (!IS_ENABLED(CONFIG_HOTPLUG_CPU) ||
2598 rnp->qsmaskinit || rcu_preempt_has_tasks(rnp))
2599 return;
2600 for (;;) {
2601 mask = rnp->grpmask;
2602 rnp = rnp->parent;
2603 if (!rnp)
2604 break;
2605 raw_spin_lock_rcu_node(rnp);
2606 rnp->qsmaskinit &= ~mask;
2607 rnp->qsmask &= ~mask;
2608 if (rnp->qsmaskinit) {
2609 raw_spin_unlock_rcu_node(rnp);
2610
2611 return;
2612 }
2613 raw_spin_unlock_rcu_node(rnp);
2614 }
2615}
2616
2617
2618
2619
2620
2621
2622
2623static void rcu_cleanup_dead_cpu(int cpu, struct rcu_state *rsp)
2624{
2625 struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
2626 struct rcu_node *rnp = rdp->mynode;
2627
2628 if (!IS_ENABLED(CONFIG_HOTPLUG_CPU))
2629 return;
2630
2631
2632 rcu_boost_kthread_setaffinity(rnp, -1);
2633}
2634
2635
2636
2637
2638
2639static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp)
2640{
2641 unsigned long flags;
2642 struct rcu_head *rhp;
2643 struct rcu_cblist rcl = RCU_CBLIST_INITIALIZER(rcl);
2644 long bl, count;
2645
2646
2647 if (!rcu_segcblist_ready_cbs(&rdp->cblist)) {
2648 trace_rcu_batch_start(rsp->name,
2649 rcu_segcblist_n_lazy_cbs(&rdp->cblist),
2650 rcu_segcblist_n_cbs(&rdp->cblist), 0);
2651 trace_rcu_batch_end(rsp->name, 0,
2652 !rcu_segcblist_empty(&rdp->cblist),
2653 need_resched(), is_idle_task(current),
2654 rcu_is_callbacks_kthread());
2655 return;
2656 }
2657
2658
2659
2660
2661
2662
2663 local_irq_save(flags);
2664 WARN_ON_ONCE(cpu_is_offline(smp_processor_id()));
2665 bl = rdp->blimit;
2666 trace_rcu_batch_start(rsp->name, rcu_segcblist_n_lazy_cbs(&rdp->cblist),
2667 rcu_segcblist_n_cbs(&rdp->cblist), bl);
2668 rcu_segcblist_extract_done_cbs(&rdp->cblist, &rcl);
2669 local_irq_restore(flags);
2670
2671
2672 rhp = rcu_cblist_dequeue(&rcl);
2673 for (; rhp; rhp = rcu_cblist_dequeue(&rcl)) {
2674 debug_rcu_head_unqueue(rhp);
2675 if (__rcu_reclaim(rsp->name, rhp))
2676 rcu_cblist_dequeued_lazy(&rcl);
2677
2678
2679
2680
2681 if (-rcl.len >= bl &&
2682 (need_resched() ||
2683 (!is_idle_task(current) && !rcu_is_callbacks_kthread())))
2684 break;
2685 }
2686
2687 local_irq_save(flags);
2688 count = -rcl.len;
2689 trace_rcu_batch_end(rsp->name, count, !!rcl.head, need_resched(),
2690 is_idle_task(current), rcu_is_callbacks_kthread());
2691
2692
2693 rcu_segcblist_insert_done_cbs(&rdp->cblist, &rcl);
2694 smp_mb();
2695 rcu_segcblist_insert_count(&rdp->cblist, &rcl);
2696
2697
2698 count = rcu_segcblist_n_cbs(&rdp->cblist);
2699 if (rdp->blimit == LONG_MAX && count <= qlowmark)
2700 rdp->blimit = blimit;
2701
2702
2703 if (count == 0 && rdp->qlen_last_fqs_check != 0) {
2704 rdp->qlen_last_fqs_check = 0;
2705 rdp->n_force_qs_snap = rsp->n_force_qs;
2706 } else if (count < rdp->qlen_last_fqs_check - qhimark)
2707 rdp->qlen_last_fqs_check = count;
2708
2709
2710
2711
2712
2713 WARN_ON_ONCE(rcu_segcblist_empty(&rdp->cblist) != (count == 0));
2714
2715 local_irq_restore(flags);
2716
2717
2718 if (rcu_segcblist_ready_cbs(&rdp->cblist))
2719 invoke_rcu_core();
2720}
2721
2722
2723
2724
2725
2726
2727
2728
2729
2730void rcu_check_callbacks(int user)
2731{
2732 trace_rcu_utilization(TPS("Start scheduler-tick"));
2733 increment_cpu_stall_ticks();
2734 if (user || rcu_is_cpu_rrupt_from_idle()) {
2735
2736
2737
2738
2739
2740
2741
2742
2743
2744
2745
2746
2747
2748 rcu_sched_qs();
2749 rcu_bh_qs();
2750
2751 } else if (!in_softirq()) {
2752
2753
2754
2755
2756
2757
2758
2759
2760 rcu_bh_qs();
2761 }
2762 rcu_preempt_check_callbacks();
2763 if (rcu_pending())
2764 invoke_rcu_core();
2765 if (user)
2766 rcu_note_voluntary_context_switch(current);
2767 trace_rcu_utilization(TPS("End scheduler-tick"));
2768}
2769
2770
2771
2772
2773
2774
2775
2776
2777static void force_qs_rnp(struct rcu_state *rsp, int (*f)(struct rcu_data *rsp))
2778{
2779 int cpu;
2780 unsigned long flags;
2781 unsigned long mask;
2782 struct rcu_node *rnp;
2783
2784 rcu_for_each_leaf_node(rsp, rnp) {
2785 cond_resched_rcu_qs();
2786 mask = 0;
2787 raw_spin_lock_irqsave_rcu_node(rnp, flags);
2788 if (rnp->qsmask == 0) {
2789 if (rcu_state_p == &rcu_sched_state ||
2790 rsp != rcu_state_p ||
2791 rcu_preempt_blocked_readers_cgp(rnp)) {
2792
2793
2794
2795
2796
2797 rcu_initiate_boost(rnp, flags);
2798
2799 continue;
2800 }
2801 if (rnp->parent &&
2802 (rnp->parent->qsmask & rnp->grpmask)) {
2803
2804
2805
2806
2807
2808 rcu_report_unblock_qs_rnp(rsp, rnp, flags);
2809
2810 continue;
2811 }
2812 }
2813 for_each_leaf_node_possible_cpu(rnp, cpu) {
2814 unsigned long bit = leaf_node_cpu_bit(rnp, cpu);
2815 if ((rnp->qsmask & bit) != 0) {
2816 if (f(per_cpu_ptr(rsp->rda, cpu)))
2817 mask |= bit;
2818 }
2819 }
2820 if (mask != 0) {
2821
2822 rcu_report_qs_rnp(mask, rsp, rnp, rnp->gpnum, flags);
2823 } else {
2824
2825 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
2826 }
2827 }
2828}
2829
2830
2831
2832
2833
2834static void force_quiescent_state(struct rcu_state *rsp)
2835{
2836 unsigned long flags;
2837 bool ret;
2838 struct rcu_node *rnp;
2839 struct rcu_node *rnp_old = NULL;
2840
2841
2842 rnp = __this_cpu_read(rsp->rda->mynode);
2843 for (; rnp != NULL; rnp = rnp->parent) {
2844 ret = (READ_ONCE(rsp->gp_flags) & RCU_GP_FLAG_FQS) ||
2845 !raw_spin_trylock(&rnp->fqslock);
2846 if (rnp_old != NULL)
2847 raw_spin_unlock(&rnp_old->fqslock);
2848 if (ret)
2849 return;
2850 rnp_old = rnp;
2851 }
2852
2853
2854
2855 raw_spin_lock_irqsave_rcu_node(rnp_old, flags);
2856 raw_spin_unlock(&rnp_old->fqslock);
2857 if (READ_ONCE(rsp->gp_flags) & RCU_GP_FLAG_FQS) {
2858 raw_spin_unlock_irqrestore_rcu_node(rnp_old, flags);
2859 return;
2860 }
2861 WRITE_ONCE(rsp->gp_flags, READ_ONCE(rsp->gp_flags) | RCU_GP_FLAG_FQS);
2862 raw_spin_unlock_irqrestore_rcu_node(rnp_old, flags);
2863 rcu_gp_kthread_wake(rsp);
2864}
2865
2866
2867
2868
2869
2870
2871static void
2872__rcu_process_callbacks(struct rcu_state *rsp)
2873{
2874 unsigned long flags;
2875 bool needwake;
2876 struct rcu_data *rdp = raw_cpu_ptr(rsp->rda);
2877
2878 WARN_ON_ONCE(!rdp->beenonline);
2879
2880
2881 rcu_check_quiescent_state(rsp, rdp);
2882
2883
2884 local_irq_save(flags);
2885 if (cpu_needs_another_gp(rsp, rdp)) {
2886 raw_spin_lock_rcu_node(rcu_get_root(rsp));
2887 needwake = rcu_start_gp(rsp);
2888 raw_spin_unlock_irqrestore_rcu_node(rcu_get_root(rsp), flags);
2889 if (needwake)
2890 rcu_gp_kthread_wake(rsp);
2891 } else {
2892 local_irq_restore(flags);
2893 }
2894
2895
2896 if (rcu_segcblist_ready_cbs(&rdp->cblist))
2897 invoke_rcu_callbacks(rsp, rdp);
2898
2899
2900 do_nocb_deferred_wakeup(rdp);
2901}
2902
2903
2904
2905
2906static __latent_entropy void rcu_process_callbacks(struct softirq_action *unused)
2907{
2908 struct rcu_state *rsp;
2909
2910 if (cpu_is_offline(smp_processor_id()))
2911 return;
2912 trace_rcu_utilization(TPS("Start RCU core"));
2913 for_each_rcu_flavor(rsp)
2914 __rcu_process_callbacks(rsp);
2915 trace_rcu_utilization(TPS("End RCU core"));
2916}
2917
2918
2919
2920
2921
2922
2923
2924
2925static void invoke_rcu_callbacks(struct rcu_state *rsp, struct rcu_data *rdp)
2926{
2927 if (unlikely(!READ_ONCE(rcu_scheduler_fully_active)))
2928 return;
2929 if (likely(!rsp->boost)) {
2930 rcu_do_batch(rsp, rdp);
2931 return;
2932 }
2933 invoke_rcu_callbacks_kthread();
2934}
2935
2936static void invoke_rcu_core(void)
2937{
2938 if (cpu_online(smp_processor_id()))
2939 raise_softirq(RCU_SOFTIRQ);
2940}
2941
2942
2943
2944
2945static void __call_rcu_core(struct rcu_state *rsp, struct rcu_data *rdp,
2946 struct rcu_head *head, unsigned long flags)
2947{
2948 bool needwake;
2949
2950
2951
2952
2953
2954 if (!rcu_is_watching())
2955 invoke_rcu_core();
2956
2957
2958 if (irqs_disabled_flags(flags) || cpu_is_offline(smp_processor_id()))
2959 return;
2960
2961
2962
2963
2964
2965
2966
2967
2968 if (unlikely(rcu_segcblist_n_cbs(&rdp->cblist) >
2969 rdp->qlen_last_fqs_check + qhimark)) {
2970
2971
2972 note_gp_changes(rsp, rdp);
2973
2974
2975 if (!rcu_gp_in_progress(rsp)) {
2976 struct rcu_node *rnp_root = rcu_get_root(rsp);
2977
2978 raw_spin_lock_rcu_node(rnp_root);
2979 needwake = rcu_start_gp(rsp);
2980 raw_spin_unlock_rcu_node(rnp_root);
2981 if (needwake)
2982 rcu_gp_kthread_wake(rsp);
2983 } else {
2984
2985 rdp->blimit = LONG_MAX;
2986 if (rsp->n_force_qs == rdp->n_force_qs_snap &&
2987 rcu_segcblist_first_pend_cb(&rdp->cblist) != head)
2988 force_quiescent_state(rsp);
2989 rdp->n_force_qs_snap = rsp->n_force_qs;
2990 rdp->qlen_last_fqs_check = rcu_segcblist_n_cbs(&rdp->cblist);
2991 }
2992 }
2993}
2994
2995
2996
2997
2998static void rcu_leak_callback(struct rcu_head *rhp)
2999{
3000}
3001
3002
3003
3004
3005
3006
3007
3008static void
3009__call_rcu(struct rcu_head *head, rcu_callback_t func,
3010 struct rcu_state *rsp, int cpu, bool lazy)
3011{
3012 unsigned long flags;
3013 struct rcu_data *rdp;
3014
3015
3016 WARN_ON_ONCE((unsigned long)head & (sizeof(void *) - 1));
3017
3018 if (debug_rcu_head_queue(head)) {
3019
3020
3021
3022
3023
3024 WARN_ONCE(1, "__call_rcu(): Double-freed CB %p->%pF()!!!\n",
3025 head, head->func);
3026 WRITE_ONCE(head->func, rcu_leak_callback);
3027 return;
3028 }
3029 head->func = func;
3030 head->next = NULL;
3031 local_irq_save(flags);
3032 rdp = this_cpu_ptr(rsp->rda);
3033
3034
3035 if (unlikely(!rcu_segcblist_is_enabled(&rdp->cblist)) || cpu != -1) {
3036 int offline;
3037
3038 if (cpu != -1)
3039 rdp = per_cpu_ptr(rsp->rda, cpu);
3040 if (likely(rdp->mynode)) {
3041
3042 offline = !__call_rcu_nocb(rdp, head, lazy, flags);
3043 WARN_ON_ONCE(offline);
3044
3045 local_irq_restore(flags);
3046 return;
3047 }
3048
3049
3050
3051
3052 BUG_ON(cpu != -1);
3053 WARN_ON_ONCE(!rcu_is_watching());
3054 if (rcu_segcblist_empty(&rdp->cblist))
3055 rcu_segcblist_init(&rdp->cblist);
3056 }
3057 rcu_segcblist_enqueue(&rdp->cblist, head, lazy);
3058 if (!lazy)
3059 rcu_idle_count_callbacks_posted();
3060
3061 if (__is_kfree_rcu_offset((unsigned long)func))
3062 trace_rcu_kfree_callback(rsp->name, head, (unsigned long)func,
3063 rcu_segcblist_n_lazy_cbs(&rdp->cblist),
3064 rcu_segcblist_n_cbs(&rdp->cblist));
3065 else
3066 trace_rcu_callback(rsp->name, head,
3067 rcu_segcblist_n_lazy_cbs(&rdp->cblist),
3068 rcu_segcblist_n_cbs(&rdp->cblist));
3069
3070
3071 __call_rcu_core(rsp, rdp, head, flags);
3072 local_irq_restore(flags);
3073}
3074
3075
3076
3077
3078
3079
3080
3081
3082
3083
3084
3085
3086
3087
3088
3089
3090
3091
3092
3093
3094
3095void call_rcu_sched(struct rcu_head *head, rcu_callback_t func)
3096{
3097 __call_rcu(head, func, &rcu_sched_state, -1, 0);
3098}
3099EXPORT_SYMBOL_GPL(call_rcu_sched);
3100
3101
3102
3103
3104
3105
3106
3107
3108
3109
3110
3111
3112
3113
3114
3115
3116
3117
3118
3119
3120
3121
3122
3123void call_rcu_bh(struct rcu_head *head, rcu_callback_t func)
3124{
3125 __call_rcu(head, func, &rcu_bh_state, -1, 0);
3126}
3127EXPORT_SYMBOL_GPL(call_rcu_bh);
3128
3129
3130
3131
3132
3133
3134
3135
3136void kfree_call_rcu(struct rcu_head *head,
3137 rcu_callback_t func)
3138{
3139 __call_rcu(head, func, rcu_state_p, -1, 1);
3140}
3141EXPORT_SYMBOL_GPL(kfree_call_rcu);
3142
3143
3144
3145
3146
3147
3148
3149
3150
3151
3152static inline int rcu_blocking_is_gp(void)
3153{
3154 int ret;
3155
3156 might_sleep();
3157 preempt_disable();
3158 ret = num_online_cpus() <= 1;
3159 preempt_enable();
3160 return ret;
3161}
3162
3163
3164
3165
3166
3167
3168
3169
3170
3171
3172
3173
3174
3175
3176
3177
3178
3179
3180
3181
3182
3183
3184
3185
3186
3187
3188
3189
3190
3191
3192
3193
3194
3195
3196
3197
3198void synchronize_sched(void)
3199{
3200 RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map) ||
3201 lock_is_held(&rcu_lock_map) ||
3202 lock_is_held(&rcu_sched_lock_map),
3203 "Illegal synchronize_sched() in RCU-sched read-side critical section");
3204 if (rcu_blocking_is_gp())
3205 return;
3206 if (rcu_gp_is_expedited())
3207 synchronize_sched_expedited();
3208 else
3209 wait_rcu_gp(call_rcu_sched);
3210}
3211EXPORT_SYMBOL_GPL(synchronize_sched);
3212
3213
3214
3215
3216
3217
3218
3219
3220
3221
3222
3223
3224
3225void synchronize_rcu_bh(void)
3226{
3227 RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map) ||
3228 lock_is_held(&rcu_lock_map) ||
3229 lock_is_held(&rcu_sched_lock_map),
3230 "Illegal synchronize_rcu_bh() in RCU-bh read-side critical section");
3231 if (rcu_blocking_is_gp())
3232 return;
3233 if (rcu_gp_is_expedited())
3234 synchronize_rcu_bh_expedited();
3235 else
3236 wait_rcu_gp(call_rcu_bh);
3237}
3238EXPORT_SYMBOL_GPL(synchronize_rcu_bh);
3239
3240
3241
3242
3243
3244
3245
3246
3247unsigned long get_state_synchronize_rcu(void)
3248{
3249
3250
3251
3252
3253 smp_mb();
3254
3255
3256
3257
3258
3259
3260 return smp_load_acquire(&rcu_state_p->gpnum);
3261}
3262EXPORT_SYMBOL_GPL(get_state_synchronize_rcu);
3263
3264
3265
3266
3267
3268
3269
3270
3271
3272
3273
3274
3275
3276
3277
3278void cond_synchronize_rcu(unsigned long oldstate)
3279{
3280 unsigned long newstate;
3281
3282
3283
3284
3285
3286 newstate = smp_load_acquire(&rcu_state_p->completed);
3287 if (ULONG_CMP_GE(oldstate, newstate))
3288 synchronize_rcu();
3289}
3290EXPORT_SYMBOL_GPL(cond_synchronize_rcu);
3291
3292
3293
3294
3295
3296
3297
3298
3299unsigned long get_state_synchronize_sched(void)
3300{
3301
3302
3303
3304
3305 smp_mb();
3306
3307
3308
3309
3310
3311
3312 return smp_load_acquire(&rcu_sched_state.gpnum);
3313}
3314EXPORT_SYMBOL_GPL(get_state_synchronize_sched);
3315
3316
3317
3318
3319
3320
3321
3322
3323
3324
3325
3326
3327
3328
3329
3330void cond_synchronize_sched(unsigned long oldstate)
3331{
3332 unsigned long newstate;
3333
3334
3335
3336
3337
3338 newstate = smp_load_acquire(&rcu_sched_state.completed);
3339 if (ULONG_CMP_GE(oldstate, newstate))
3340 synchronize_sched();
3341}
3342EXPORT_SYMBOL_GPL(cond_synchronize_sched);
3343
3344
3345
3346
3347
3348
3349
3350
3351static int __rcu_pending(struct rcu_state *rsp, struct rcu_data *rdp)
3352{
3353 struct rcu_node *rnp = rdp->mynode;
3354
3355
3356 check_cpu_stall(rsp, rdp);
3357
3358
3359 if (rcu_nohz_full_cpu(rsp))
3360 return 0;
3361
3362
3363 if (rdp->core_needs_qs && !rdp->cpu_no_qs.b.norm)
3364 return 1;
3365
3366
3367 if (rcu_segcblist_ready_cbs(&rdp->cblist))
3368 return 1;
3369
3370
3371 if (cpu_needs_another_gp(rsp, rdp))
3372 return 1;
3373
3374
3375 if (READ_ONCE(rnp->completed) != rdp->completed)
3376 return 1;
3377
3378
3379 if (READ_ONCE(rnp->gpnum) != rdp->gpnum ||
3380 unlikely(READ_ONCE(rdp->gpwrap)))
3381 return 1;
3382
3383
3384 if (rcu_nocb_need_deferred_wakeup(rdp))
3385 return 1;
3386
3387
3388 return 0;
3389}
3390
3391
3392
3393
3394
3395
3396static int rcu_pending(void)
3397{
3398 struct rcu_state *rsp;
3399
3400 for_each_rcu_flavor(rsp)
3401 if (__rcu_pending(rsp, this_cpu_ptr(rsp->rda)))
3402 return 1;
3403 return 0;
3404}
3405
3406
3407
3408
3409
3410
3411static bool __maybe_unused rcu_cpu_has_callbacks(bool *all_lazy)
3412{
3413 bool al = true;
3414 bool hc = false;
3415 struct rcu_data *rdp;
3416 struct rcu_state *rsp;
3417
3418 for_each_rcu_flavor(rsp) {
3419 rdp = this_cpu_ptr(rsp->rda);
3420 if (rcu_segcblist_empty(&rdp->cblist))
3421 continue;
3422 hc = true;
3423 if (rcu_segcblist_n_nonlazy_cbs(&rdp->cblist) || !all_lazy) {
3424 al = false;
3425 break;
3426 }
3427 }
3428 if (all_lazy)
3429 *all_lazy = al;
3430 return hc;
3431}
3432
3433
3434
3435
3436
3437static void _rcu_barrier_trace(struct rcu_state *rsp, const char *s,
3438 int cpu, unsigned long done)
3439{
3440 trace_rcu_barrier(rsp->name, s, cpu,
3441 atomic_read(&rsp->barrier_cpu_count), done);
3442}
3443
3444
3445
3446
3447
3448static void rcu_barrier_callback(struct rcu_head *rhp)
3449{
3450 struct rcu_data *rdp = container_of(rhp, struct rcu_data, barrier_head);
3451 struct rcu_state *rsp = rdp->rsp;
3452
3453 if (atomic_dec_and_test(&rsp->barrier_cpu_count)) {
3454 _rcu_barrier_trace(rsp, TPS("LastCB"), -1,
3455 rsp->barrier_sequence);
3456 complete(&rsp->barrier_completion);
3457 } else {
3458 _rcu_barrier_trace(rsp, TPS("CB"), -1, rsp->barrier_sequence);
3459 }
3460}
3461
3462
3463
3464
3465static void rcu_barrier_func(void *type)
3466{
3467 struct rcu_state *rsp = type;
3468 struct rcu_data *rdp = raw_cpu_ptr(rsp->rda);
3469
3470 _rcu_barrier_trace(rsp, TPS("IRQ"), -1, rsp->barrier_sequence);
3471 rdp->barrier_head.func = rcu_barrier_callback;
3472 debug_rcu_head_queue(&rdp->barrier_head);
3473 if (rcu_segcblist_entrain(&rdp->cblist, &rdp->barrier_head, 0)) {
3474 atomic_inc(&rsp->barrier_cpu_count);
3475 } else {
3476 debug_rcu_head_unqueue(&rdp->barrier_head);
3477 _rcu_barrier_trace(rsp, TPS("IRQNQ"), -1,
3478 rsp->barrier_sequence);
3479 }
3480}
3481
3482
3483
3484
3485
3486static void _rcu_barrier(struct rcu_state *rsp)
3487{
3488 int cpu;
3489 struct rcu_data *rdp;
3490 unsigned long s = rcu_seq_snap(&rsp->barrier_sequence);
3491
3492 _rcu_barrier_trace(rsp, TPS("Begin"), -1, s);
3493
3494
3495 mutex_lock(&rsp->barrier_mutex);
3496
3497
3498 if (rcu_seq_done(&rsp->barrier_sequence, s)) {
3499 _rcu_barrier_trace(rsp, TPS("EarlyExit"), -1,
3500 rsp->barrier_sequence);
3501 smp_mb();
3502 mutex_unlock(&rsp->barrier_mutex);
3503 return;
3504 }
3505
3506
3507 rcu_seq_start(&rsp->barrier_sequence);
3508 _rcu_barrier_trace(rsp, TPS("Inc1"), -1, rsp->barrier_sequence);
3509
3510
3511
3512
3513
3514
3515
3516 init_completion(&rsp->barrier_completion);
3517 atomic_set(&rsp->barrier_cpu_count, 1);
3518 get_online_cpus();
3519
3520
3521
3522
3523
3524
3525 for_each_possible_cpu(cpu) {
3526 if (!cpu_online(cpu) && !rcu_is_nocb_cpu(cpu))
3527 continue;
3528 rdp = per_cpu_ptr(rsp->rda, cpu);
3529 if (rcu_is_nocb_cpu(cpu)) {
3530 if (!rcu_nocb_cpu_needs_barrier(rsp, cpu)) {
3531 _rcu_barrier_trace(rsp, TPS("OfflineNoCB"), cpu,
3532 rsp->barrier_sequence);
3533 } else {
3534 _rcu_barrier_trace(rsp, TPS("OnlineNoCB"), cpu,
3535 rsp->barrier_sequence);
3536 smp_mb__before_atomic();
3537 atomic_inc(&rsp->barrier_cpu_count);
3538 __call_rcu(&rdp->barrier_head,
3539 rcu_barrier_callback, rsp, cpu, 0);
3540 }
3541 } else if (rcu_segcblist_n_cbs(&rdp->cblist)) {
3542 _rcu_barrier_trace(rsp, TPS("OnlineQ"), cpu,
3543 rsp->barrier_sequence);
3544 smp_call_function_single(cpu, rcu_barrier_func, rsp, 1);
3545 } else {
3546 _rcu_barrier_trace(rsp, TPS("OnlineNQ"), cpu,
3547 rsp->barrier_sequence);
3548 }
3549 }
3550 put_online_cpus();
3551
3552
3553
3554
3555
3556 if (atomic_dec_and_test(&rsp->barrier_cpu_count))
3557 complete(&rsp->barrier_completion);
3558
3559
3560 wait_for_completion(&rsp->barrier_completion);
3561
3562
3563 _rcu_barrier_trace(rsp, TPS("Inc2"), -1, rsp->barrier_sequence);
3564 rcu_seq_end(&rsp->barrier_sequence);
3565
3566
3567 mutex_unlock(&rsp->barrier_mutex);
3568}
3569
3570
3571
3572
3573void rcu_barrier_bh(void)
3574{
3575 _rcu_barrier(&rcu_bh_state);
3576}
3577EXPORT_SYMBOL_GPL(rcu_barrier_bh);
3578
3579
3580
3581
3582void rcu_barrier_sched(void)
3583{
3584 _rcu_barrier(&rcu_sched_state);
3585}
3586EXPORT_SYMBOL_GPL(rcu_barrier_sched);
3587
3588
3589
3590
3591
3592
3593
3594static void rcu_init_new_rnp(struct rcu_node *rnp_leaf)
3595{
3596 long mask;
3597 struct rcu_node *rnp = rnp_leaf;
3598
3599 raw_lockdep_assert_held_rcu_node(rnp);
3600 for (;;) {
3601 mask = rnp->grpmask;
3602 rnp = rnp->parent;
3603 if (rnp == NULL)
3604 return;
3605 raw_spin_lock_rcu_node(rnp);
3606 rnp->qsmaskinit |= mask;
3607 raw_spin_unlock_rcu_node(rnp);
3608 }
3609}
3610
3611
3612
3613
3614static void __init
3615rcu_boot_init_percpu_data(int cpu, struct rcu_state *rsp)
3616{
3617 struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
3618
3619
3620 rdp->grpmask = leaf_node_cpu_bit(rdp->mynode, cpu);
3621 rdp->dynticks = &per_cpu(rcu_dynticks, cpu);
3622 WARN_ON_ONCE(rdp->dynticks->dynticks_nesting != 1);
3623 WARN_ON_ONCE(rcu_dynticks_in_eqs(rcu_dynticks_snap(rdp->dynticks)));
3624 rdp->cpu = cpu;
3625 rdp->rsp = rsp;
3626 rcu_boot_init_nocb_percpu_data(rdp);
3627}
3628
3629
3630
3631
3632
3633
3634
3635static void
3636rcu_init_percpu_data(int cpu, struct rcu_state *rsp)
3637{
3638 unsigned long flags;
3639 struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
3640 struct rcu_node *rnp = rcu_get_root(rsp);
3641
3642
3643 raw_spin_lock_irqsave_rcu_node(rnp, flags);
3644 rdp->qlen_last_fqs_check = 0;
3645 rdp->n_force_qs_snap = rsp->n_force_qs;
3646 rdp->blimit = blimit;
3647 if (rcu_segcblist_empty(&rdp->cblist) &&
3648 !init_nocb_callback_list(rdp))
3649 rcu_segcblist_init(&rdp->cblist);
3650 rdp->dynticks->dynticks_nesting = 1;
3651 rcu_dynticks_eqs_online();
3652 raw_spin_unlock_rcu_node(rnp);
3653
3654
3655
3656
3657
3658
3659 rnp = rdp->mynode;
3660 raw_spin_lock_rcu_node(rnp);
3661 rdp->beenonline = true;
3662 rdp->gpnum = rnp->completed;
3663 rdp->completed = rnp->completed;
3664 rdp->cpu_no_qs.b.norm = true;
3665 rdp->rcu_qs_ctr_snap = per_cpu(rcu_dynticks.rcu_qs_ctr, cpu);
3666 rdp->core_needs_qs = false;
3667 rdp->rcu_iw_pending = false;
3668 rdp->rcu_iw_gpnum = rnp->gpnum - 1;
3669 trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("cpuonl"));
3670 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
3671}
3672
3673
3674
3675
3676
3677int rcutree_prepare_cpu(unsigned int cpu)
3678{
3679 struct rcu_state *rsp;
3680
3681 for_each_rcu_flavor(rsp)
3682 rcu_init_percpu_data(cpu, rsp);
3683
3684 rcu_prepare_kthreads(cpu);
3685 rcu_spawn_all_nocb_kthreads(cpu);
3686
3687 return 0;
3688}
3689
3690
3691
3692
3693static void rcutree_affinity_setting(unsigned int cpu, int outgoing)
3694{
3695 struct rcu_data *rdp = per_cpu_ptr(rcu_state_p->rda, cpu);
3696
3697 rcu_boost_kthread_setaffinity(rdp->mynode, outgoing);
3698}
3699
3700
3701
3702
3703
3704int rcutree_online_cpu(unsigned int cpu)
3705{
3706 unsigned long flags;
3707 struct rcu_data *rdp;
3708 struct rcu_node *rnp;
3709 struct rcu_state *rsp;
3710
3711 for_each_rcu_flavor(rsp) {
3712 rdp = per_cpu_ptr(rsp->rda, cpu);
3713 rnp = rdp->mynode;
3714 raw_spin_lock_irqsave_rcu_node(rnp, flags);
3715 rnp->ffmask |= rdp->grpmask;
3716 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
3717 }
3718 if (IS_ENABLED(CONFIG_TREE_SRCU))
3719 srcu_online_cpu(cpu);
3720 if (rcu_scheduler_active == RCU_SCHEDULER_INACTIVE)
3721 return 0;
3722 sync_sched_exp_online_cleanup(cpu);
3723 rcutree_affinity_setting(cpu, -1);
3724 return 0;
3725}
3726
3727
3728
3729
3730
3731int rcutree_offline_cpu(unsigned int cpu)
3732{
3733 unsigned long flags;
3734 struct rcu_data *rdp;
3735 struct rcu_node *rnp;
3736 struct rcu_state *rsp;
3737
3738 for_each_rcu_flavor(rsp) {
3739 rdp = per_cpu_ptr(rsp->rda, cpu);
3740 rnp = rdp->mynode;
3741 raw_spin_lock_irqsave_rcu_node(rnp, flags);
3742 rnp->ffmask &= ~rdp->grpmask;
3743 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
3744 }
3745
3746 rcutree_affinity_setting(cpu, cpu);
3747 if (IS_ENABLED(CONFIG_TREE_SRCU))
3748 srcu_offline_cpu(cpu);
3749 return 0;
3750}
3751
3752
3753
3754
3755int rcutree_dying_cpu(unsigned int cpu)
3756{
3757 struct rcu_state *rsp;
3758
3759 for_each_rcu_flavor(rsp)
3760 rcu_cleanup_dying_cpu(rsp);
3761 return 0;
3762}
3763
3764
3765
3766
3767int rcutree_dead_cpu(unsigned int cpu)
3768{
3769 struct rcu_state *rsp;
3770
3771 for_each_rcu_flavor(rsp) {
3772 rcu_cleanup_dead_cpu(cpu, rsp);
3773 do_nocb_deferred_wakeup(per_cpu_ptr(rsp->rda, cpu));
3774 }
3775 return 0;
3776}
3777
3778
3779
3780
3781
3782
3783
3784
3785
3786
3787
3788
3789void rcu_cpu_starting(unsigned int cpu)
3790{
3791 unsigned long flags;
3792 unsigned long mask;
3793 int nbits;
3794 unsigned long oldmask;
3795 struct rcu_data *rdp;
3796 struct rcu_node *rnp;
3797 struct rcu_state *rsp;
3798
3799 for_each_rcu_flavor(rsp) {
3800 rdp = per_cpu_ptr(rsp->rda, cpu);
3801 rnp = rdp->mynode;
3802 mask = rdp->grpmask;
3803 raw_spin_lock_irqsave_rcu_node(rnp, flags);
3804 rnp->qsmaskinitnext |= mask;
3805 oldmask = rnp->expmaskinitnext;
3806 rnp->expmaskinitnext |= mask;
3807 oldmask ^= rnp->expmaskinitnext;
3808 nbits = bitmap_weight(&oldmask, BITS_PER_LONG);
3809
3810 smp_store_release(&rsp->ncpus, rsp->ncpus + nbits);
3811 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
3812 }
3813 smp_mb();
3814}
3815
3816#ifdef CONFIG_HOTPLUG_CPU
3817
3818
3819
3820
3821
3822static void rcu_cleanup_dying_idle_cpu(int cpu, struct rcu_state *rsp)
3823{
3824 unsigned long flags;
3825 unsigned long mask;
3826 struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
3827 struct rcu_node *rnp = rdp->mynode;
3828
3829
3830 mask = rdp->grpmask;
3831 raw_spin_lock_irqsave_rcu_node(rnp, flags);
3832 rnp->qsmaskinitnext &= ~mask;
3833 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
3834}
3835
3836
3837
3838
3839
3840
3841
3842
3843
3844void rcu_report_dead(unsigned int cpu)
3845{
3846 struct rcu_state *rsp;
3847
3848
3849 preempt_disable();
3850 rcu_report_exp_rdp(&rcu_sched_state,
3851 this_cpu_ptr(rcu_sched_state.rda), true);
3852 preempt_enable();
3853 for_each_rcu_flavor(rsp)
3854 rcu_cleanup_dying_idle_cpu(cpu, rsp);
3855}
3856
3857
3858static void rcu_migrate_callbacks(int cpu, struct rcu_state *rsp)
3859{
3860 unsigned long flags;
3861 struct rcu_data *my_rdp;
3862 struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
3863 struct rcu_node *rnp_root = rcu_get_root(rdp->rsp);
3864
3865 if (rcu_is_nocb_cpu(cpu) || rcu_segcblist_empty(&rdp->cblist))
3866 return;
3867
3868 local_irq_save(flags);
3869 my_rdp = this_cpu_ptr(rsp->rda);
3870 if (rcu_nocb_adopt_orphan_cbs(my_rdp, rdp, flags)) {
3871 local_irq_restore(flags);
3872 return;
3873 }
3874 raw_spin_lock_rcu_node(rnp_root);
3875 rcu_advance_cbs(rsp, rnp_root, rdp);
3876 rcu_advance_cbs(rsp, rnp_root, my_rdp);
3877 rcu_segcblist_merge(&my_rdp->cblist, &rdp->cblist);
3878 WARN_ON_ONCE(rcu_segcblist_empty(&my_rdp->cblist) !=
3879 !rcu_segcblist_n_cbs(&my_rdp->cblist));
3880 raw_spin_unlock_irqrestore_rcu_node(rnp_root, flags);
3881 WARN_ONCE(rcu_segcblist_n_cbs(&rdp->cblist) != 0 ||
3882 !rcu_segcblist_empty(&rdp->cblist),
3883 "rcu_cleanup_dead_cpu: Callbacks on offline CPU %d: qlen=%lu, 1stCB=%p\n",
3884 cpu, rcu_segcblist_n_cbs(&rdp->cblist),
3885 rcu_segcblist_first_cb(&rdp->cblist));
3886}
3887
3888
3889
3890
3891
3892
3893void rcutree_migrate_callbacks(int cpu)
3894{
3895 struct rcu_state *rsp;
3896
3897 for_each_rcu_flavor(rsp)
3898 rcu_migrate_callbacks(cpu, rsp);
3899}
3900#endif
3901
3902
3903
3904
3905
3906static int rcu_pm_notify(struct notifier_block *self,
3907 unsigned long action, void *hcpu)
3908{
3909 switch (action) {
3910 case PM_HIBERNATION_PREPARE:
3911 case PM_SUSPEND_PREPARE:
3912 if (nr_cpu_ids <= 256)
3913 rcu_expedite_gp();
3914 break;
3915 case PM_POST_HIBERNATION:
3916 case PM_POST_SUSPEND:
3917 if (nr_cpu_ids <= 256)
3918 rcu_unexpedite_gp();
3919 break;
3920 default:
3921 break;
3922 }
3923 return NOTIFY_OK;
3924}
3925
3926
3927
3928
3929static int __init rcu_spawn_gp_kthread(void)
3930{
3931 unsigned long flags;
3932 int kthread_prio_in = kthread_prio;
3933 struct rcu_node *rnp;
3934 struct rcu_state *rsp;
3935 struct sched_param sp;
3936 struct task_struct *t;
3937
3938
3939 if (IS_ENABLED(CONFIG_RCU_BOOST) && kthread_prio < 1)
3940 kthread_prio = 1;
3941 else if (kthread_prio < 0)
3942 kthread_prio = 0;
3943 else if (kthread_prio > 99)
3944 kthread_prio = 99;
3945 if (kthread_prio != kthread_prio_in)
3946 pr_alert("rcu_spawn_gp_kthread(): Limited prio to %d from %d\n",
3947 kthread_prio, kthread_prio_in);
3948
3949 rcu_scheduler_fully_active = 1;
3950 for_each_rcu_flavor(rsp) {
3951 t = kthread_create(rcu_gp_kthread, rsp, "%s", rsp->name);
3952 BUG_ON(IS_ERR(t));
3953 rnp = rcu_get_root(rsp);
3954 raw_spin_lock_irqsave_rcu_node(rnp, flags);
3955 rsp->gp_kthread = t;
3956 if (kthread_prio) {
3957 sp.sched_priority = kthread_prio;
3958 sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
3959 }
3960 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
3961 wake_up_process(t);
3962 }
3963 rcu_spawn_nocb_kthreads();
3964 rcu_spawn_boost_kthreads();
3965 return 0;
3966}
3967early_initcall(rcu_spawn_gp_kthread);
3968
3969
3970
3971
3972
3973
3974
3975
3976
3977
3978
3979void rcu_scheduler_starting(void)
3980{
3981 WARN_ON(num_online_cpus() != 1);
3982 WARN_ON(nr_context_switches() > 0);
3983 rcu_test_sync_prims();
3984 rcu_scheduler_active = RCU_SCHEDULER_INIT;
3985 rcu_test_sync_prims();
3986}
3987
3988
3989
3990
3991static void __init rcu_init_one(struct rcu_state *rsp)
3992{
3993 static const char * const buf[] = RCU_NODE_NAME_INIT;
3994 static const char * const fqs[] = RCU_FQS_NAME_INIT;
3995 static struct lock_class_key rcu_node_class[RCU_NUM_LVLS];
3996 static struct lock_class_key rcu_fqs_class[RCU_NUM_LVLS];
3997
3998 int levelspread[RCU_NUM_LVLS];
3999 int cpustride = 1;
4000 int i;
4001 int j;
4002 struct rcu_node *rnp;
4003
4004 BUILD_BUG_ON(RCU_NUM_LVLS > ARRAY_SIZE(buf));
4005
4006
4007 if (rcu_num_lvls <= 0 || rcu_num_lvls > RCU_NUM_LVLS)
4008 panic("rcu_init_one: rcu_num_lvls out of range");
4009
4010
4011
4012 for (i = 1; i < rcu_num_lvls; i++)
4013 rsp->level[i] = rsp->level[i - 1] + num_rcu_lvl[i - 1];
4014 rcu_init_levelspread(levelspread, num_rcu_lvl);
4015
4016
4017
4018 for (i = rcu_num_lvls - 1; i >= 0; i--) {
4019 cpustride *= levelspread[i];
4020 rnp = rsp->level[i];
4021 for (j = 0; j < num_rcu_lvl[i]; j++, rnp++) {
4022 raw_spin_lock_init(&ACCESS_PRIVATE(rnp, lock));
4023 lockdep_set_class_and_name(&ACCESS_PRIVATE(rnp, lock),
4024 &rcu_node_class[i], buf[i]);
4025 raw_spin_lock_init(&rnp->fqslock);
4026 lockdep_set_class_and_name(&rnp->fqslock,
4027 &rcu_fqs_class[i], fqs[i]);
4028 rnp->gpnum = rsp->gpnum;
4029 rnp->completed = rsp->completed;
4030 rnp->qsmask = 0;
4031 rnp->qsmaskinit = 0;
4032 rnp->grplo = j * cpustride;
4033 rnp->grphi = (j + 1) * cpustride - 1;
4034 if (rnp->grphi >= nr_cpu_ids)
4035 rnp->grphi = nr_cpu_ids - 1;
4036 if (i == 0) {
4037 rnp->grpnum = 0;
4038 rnp->grpmask = 0;
4039 rnp->parent = NULL;
4040 } else {
4041 rnp->grpnum = j % levelspread[i - 1];
4042 rnp->grpmask = 1UL << rnp->grpnum;
4043 rnp->parent = rsp->level[i - 1] +
4044 j / levelspread[i - 1];
4045 }
4046 rnp->level = i;
4047 INIT_LIST_HEAD(&rnp->blkd_tasks);
4048 rcu_init_one_nocb(rnp);
4049 init_waitqueue_head(&rnp->exp_wq[0]);
4050 init_waitqueue_head(&rnp->exp_wq[1]);
4051 init_waitqueue_head(&rnp->exp_wq[2]);
4052 init_waitqueue_head(&rnp->exp_wq[3]);
4053 spin_lock_init(&rnp->exp_lock);
4054 }
4055 }
4056
4057 init_swait_queue_head(&rsp->gp_wq);
4058 init_swait_queue_head(&rsp->expedited_wq);
4059 rnp = rsp->level[rcu_num_lvls - 1];
4060 for_each_possible_cpu(i) {
4061 while (i > rnp->grphi)
4062 rnp++;
4063 per_cpu_ptr(rsp->rda, i)->mynode = rnp;
4064 rcu_boot_init_percpu_data(i, rsp);
4065 }
4066 list_add(&rsp->flavors, &rcu_struct_flavors);
4067}
4068
4069
4070
4071
4072
4073
4074static void __init rcu_init_geometry(void)
4075{
4076 ulong d;
4077 int i;
4078 int rcu_capacity[RCU_NUM_LVLS];
4079
4080
4081
4082
4083
4084
4085
4086
4087 d = RCU_JIFFIES_TILL_FORCE_QS + nr_cpu_ids / RCU_JIFFIES_FQS_DIV;
4088 if (jiffies_till_first_fqs == ULONG_MAX)
4089 jiffies_till_first_fqs = d;
4090 if (jiffies_till_next_fqs == ULONG_MAX)
4091 jiffies_till_next_fqs = d;
4092
4093
4094 if (rcu_fanout_leaf == RCU_FANOUT_LEAF &&
4095 nr_cpu_ids == NR_CPUS)
4096 return;
4097 pr_info("RCU: Adjusting geometry for rcu_fanout_leaf=%d, nr_cpu_ids=%u\n",
4098 rcu_fanout_leaf, nr_cpu_ids);
4099
4100
4101
4102
4103
4104
4105
4106 if (rcu_fanout_leaf < 2 ||
4107 rcu_fanout_leaf > sizeof(unsigned long) * 8) {
4108 rcu_fanout_leaf = RCU_FANOUT_LEAF;
4109 WARN_ON(1);
4110 return;
4111 }
4112
4113
4114
4115
4116
4117 rcu_capacity[0] = rcu_fanout_leaf;
4118 for (i = 1; i < RCU_NUM_LVLS; i++)
4119 rcu_capacity[i] = rcu_capacity[i - 1] * RCU_FANOUT;
4120
4121
4122
4123
4124
4125 if (nr_cpu_ids > rcu_capacity[RCU_NUM_LVLS - 1]) {
4126 rcu_fanout_leaf = RCU_FANOUT_LEAF;
4127 WARN_ON(1);
4128 return;
4129 }
4130
4131
4132 for (i = 0; nr_cpu_ids > rcu_capacity[i]; i++) {
4133 }
4134 rcu_num_lvls = i + 1;
4135
4136
4137 for (i = 0; i < rcu_num_lvls; i++) {
4138 int cap = rcu_capacity[(rcu_num_lvls - 1) - i];
4139 num_rcu_lvl[i] = DIV_ROUND_UP(nr_cpu_ids, cap);
4140 }
4141
4142
4143 rcu_num_nodes = 0;
4144 for (i = 0; i < rcu_num_lvls; i++)
4145 rcu_num_nodes += num_rcu_lvl[i];
4146}
4147
4148
4149
4150
4151
4152static void __init rcu_dump_rcu_node_tree(struct rcu_state *rsp)
4153{
4154 int level = 0;
4155 struct rcu_node *rnp;
4156
4157 pr_info("rcu_node tree layout dump\n");
4158 pr_info(" ");
4159 rcu_for_each_node_breadth_first(rsp, rnp) {
4160 if (rnp->level != level) {
4161 pr_cont("\n");
4162 pr_info(" ");
4163 level = rnp->level;
4164 }
4165 pr_cont("%d:%d ^%d ", rnp->grplo, rnp->grphi, rnp->grpnum);
4166 }
4167 pr_cont("\n");
4168}
4169
4170struct workqueue_struct *rcu_gp_wq;
4171
4172void __init rcu_init(void)
4173{
4174 int cpu;
4175
4176 rcu_early_boot_tests();
4177
4178 rcu_bootup_announce();
4179 rcu_init_geometry();
4180 rcu_init_one(&rcu_bh_state);
4181 rcu_init_one(&rcu_sched_state);
4182 if (dump_tree)
4183 rcu_dump_rcu_node_tree(&rcu_sched_state);
4184 __rcu_init_preempt();
4185 open_softirq(RCU_SOFTIRQ, rcu_process_callbacks);
4186
4187
4188
4189
4190
4191
4192 pm_notifier(rcu_pm_notify, 0);
4193 for_each_online_cpu(cpu) {
4194 rcutree_prepare_cpu(cpu);
4195 rcu_cpu_starting(cpu);
4196 rcutree_online_cpu(cpu);
4197 }
4198
4199
4200 rcu_gp_wq = alloc_workqueue("rcu_gp", WQ_MEM_RECLAIM, 0);
4201 WARN_ON(!rcu_gp_wq);
4202}
4203
4204#include "tree_exp.h"
4205#include "tree_plugin.h"
4206