1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30#include <linux/types.h>
31#include <linux/kernel.h>
32#include <linux/init.h>
33#include <linux/spinlock.h>
34#include <linux/smp.h>
35#include <linux/rcupdate.h>
36#include <linux/interrupt.h>
37#include <linux/sched.h>
38#include <linux/nmi.h>
39#include <linux/atomic.h>
40#include <linux/bitops.h>
41#include <linux/export.h>
42#include <linux/completion.h>
43#include <linux/moduleparam.h>
44#include <linux/module.h>
45#include <linux/percpu.h>
46#include <linux/notifier.h>
47#include <linux/cpu.h>
48#include <linux/mutex.h>
49#include <linux/time.h>
50#include <linux/kernel_stat.h>
51#include <linux/wait.h>
52#include <linux/kthread.h>
53#include <linux/prefetch.h>
54#include <linux/delay.h>
55#include <linux/stop_machine.h>
56#include <linux/random.h>
57#include <linux/trace_events.h>
58#include <linux/suspend.h>
59
60#include "tree.h"
61#include "rcu.h"
62
63MODULE_ALIAS("rcutree");
64#ifdef MODULE_PARAM_PREFIX
65#undef MODULE_PARAM_PREFIX
66#endif
67#define MODULE_PARAM_PREFIX "rcutree."
68
69
70
71static struct lock_class_key rcu_node_class[RCU_NUM_LVLS];
72static struct lock_class_key rcu_fqs_class[RCU_NUM_LVLS];
73
74
75
76
77
78
79
80
81
82#ifdef CONFIG_TRACING
83# define DEFINE_RCU_TPS(sname) \
84static char sname##_varname[] = #sname; \
85static const char *tp_##sname##_varname __used __tracepoint_string = sname##_varname;
86# define RCU_STATE_NAME(sname) sname##_varname
87#else
88# define DEFINE_RCU_TPS(sname)
89# define RCU_STATE_NAME(sname) __stringify(sname)
90#endif
91
92#define RCU_STATE_INITIALIZER(sname, sabbr, cr) \
93DEFINE_RCU_TPS(sname) \
94static DEFINE_PER_CPU_SHARED_ALIGNED(struct rcu_data, sname##_data); \
95struct rcu_state sname##_state = { \
96 .level = { &sname##_state.node[0] }, \
97 .rda = &sname##_data, \
98 .call = cr, \
99 .fqs_state = RCU_GP_IDLE, \
100 .gpnum = 0UL - 300UL, \
101 .completed = 0UL - 300UL, \
102 .orphan_lock = __RAW_SPIN_LOCK_UNLOCKED(&sname##_state.orphan_lock), \
103 .orphan_nxttail = &sname##_state.orphan_nxtlist, \
104 .orphan_donetail = &sname##_state.orphan_donelist, \
105 .barrier_mutex = __MUTEX_INITIALIZER(sname##_state.barrier_mutex), \
106 .name = RCU_STATE_NAME(sname), \
107 .abbr = sabbr, \
108}
109
110RCU_STATE_INITIALIZER(rcu_sched, 's', call_rcu_sched);
111RCU_STATE_INITIALIZER(rcu_bh, 'b', call_rcu_bh);
112
113static struct rcu_state *const rcu_state_p;
114static struct rcu_data __percpu *const rcu_data_p;
115LIST_HEAD(rcu_struct_flavors);
116
117
118static bool dump_tree;
119module_param(dump_tree, bool, 0444);
120
121static bool rcu_fanout_exact;
122module_param(rcu_fanout_exact, bool, 0444);
123
124static int rcu_fanout_leaf = RCU_FANOUT_LEAF;
125module_param(rcu_fanout_leaf, int, 0444);
126int rcu_num_lvls __read_mostly = RCU_NUM_LVLS;
127static int num_rcu_lvl[] = {
128 NUM_RCU_LVL_0,
129 NUM_RCU_LVL_1,
130 NUM_RCU_LVL_2,
131 NUM_RCU_LVL_3,
132 NUM_RCU_LVL_4,
133};
134int rcu_num_nodes __read_mostly = NUM_RCU_NODES;
135
136
137
138
139
140
141
142
143
144
145int rcu_scheduler_active __read_mostly;
146EXPORT_SYMBOL_GPL(rcu_scheduler_active);
147
148
149
150
151
152
153
154
155
156
157
158
159
160static int rcu_scheduler_fully_active __read_mostly;
161
162static void rcu_init_new_rnp(struct rcu_node *rnp_leaf);
163static void rcu_cleanup_dead_rnp(struct rcu_node *rnp_leaf);
164static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu);
165static void invoke_rcu_core(void);
166static void invoke_rcu_callbacks(struct rcu_state *rsp, struct rcu_data *rdp);
167
168
169#ifdef CONFIG_RCU_KTHREAD_PRIO
170static int kthread_prio = CONFIG_RCU_KTHREAD_PRIO;
171#else
172static int kthread_prio = IS_ENABLED(CONFIG_RCU_BOOST) ? 1 : 0;
173#endif
174module_param(kthread_prio, int, 0644);
175
176
177
178#ifdef CONFIG_RCU_TORTURE_TEST_SLOW_PREINIT
179static int gp_preinit_delay = CONFIG_RCU_TORTURE_TEST_SLOW_PREINIT_DELAY;
180module_param(gp_preinit_delay, int, 0644);
181#else
182static const int gp_preinit_delay;
183#endif
184
185#ifdef CONFIG_RCU_TORTURE_TEST_SLOW_INIT
186static int gp_init_delay = CONFIG_RCU_TORTURE_TEST_SLOW_INIT_DELAY;
187module_param(gp_init_delay, int, 0644);
188#else
189static const int gp_init_delay;
190#endif
191
192#ifdef CONFIG_RCU_TORTURE_TEST_SLOW_CLEANUP
193static int gp_cleanup_delay = CONFIG_RCU_TORTURE_TEST_SLOW_CLEANUP_DELAY;
194module_param(gp_cleanup_delay, int, 0644);
195#else
196static const int gp_cleanup_delay;
197#endif
198
199
200
201
202
203
204
205
206
207
208#define PER_RCU_NODE_PERIOD 3
209
210
211
212
213
214
215
216
217
218
219unsigned long rcutorture_testseq;
220unsigned long rcutorture_vernum;
221
222
223
224
225
226
227
228unsigned long rcu_rnp_online_cpus(struct rcu_node *rnp)
229{
230 return READ_ONCE(rnp->qsmaskinitnext);
231}
232
233
234
235
236
237
238static int rcu_gp_in_progress(struct rcu_state *rsp)
239{
240 return READ_ONCE(rsp->completed) != READ_ONCE(rsp->gpnum);
241}
242
243
244
245
246
247
248
249void rcu_sched_qs(void)
250{
251 if (!__this_cpu_read(rcu_sched_data.passed_quiesce)) {
252 trace_rcu_grace_period(TPS("rcu_sched"),
253 __this_cpu_read(rcu_sched_data.gpnum),
254 TPS("cpuqs"));
255 __this_cpu_write(rcu_sched_data.passed_quiesce, 1);
256 }
257}
258
259void rcu_bh_qs(void)
260{
261 if (!__this_cpu_read(rcu_bh_data.passed_quiesce)) {
262 trace_rcu_grace_period(TPS("rcu_bh"),
263 __this_cpu_read(rcu_bh_data.gpnum),
264 TPS("cpuqs"));
265 __this_cpu_write(rcu_bh_data.passed_quiesce, 1);
266 }
267}
268
269static DEFINE_PER_CPU(int, rcu_sched_qs_mask);
270
271static DEFINE_PER_CPU(struct rcu_dynticks, rcu_dynticks) = {
272 .dynticks_nesting = DYNTICK_TASK_EXIT_IDLE,
273 .dynticks = ATOMIC_INIT(1),
274#ifdef CONFIG_NO_HZ_FULL_SYSIDLE
275 .dynticks_idle_nesting = DYNTICK_TASK_NEST_VALUE,
276 .dynticks_idle = ATOMIC_INIT(1),
277#endif
278};
279
280DEFINE_PER_CPU_SHARED_ALIGNED(unsigned long, rcu_qs_ctr);
281EXPORT_PER_CPU_SYMBOL_GPL(rcu_qs_ctr);
282
283
284
285
286
287
288
289
290
291
292
293
294static void rcu_momentary_dyntick_idle(void)
295{
296 unsigned long flags;
297 struct rcu_data *rdp;
298 struct rcu_dynticks *rdtp;
299 int resched_mask;
300 struct rcu_state *rsp;
301
302 local_irq_save(flags);
303
304
305
306
307
308 resched_mask = raw_cpu_read(rcu_sched_qs_mask);
309 raw_cpu_write(rcu_sched_qs_mask, 0);
310
311
312 for_each_rcu_flavor(rsp) {
313 rdp = raw_cpu_ptr(rsp->rda);
314 if (!(resched_mask & rsp->flavor_mask))
315 continue;
316 smp_mb();
317 if (READ_ONCE(rdp->mynode->completed) !=
318 READ_ONCE(rdp->cond_resched_completed))
319 continue;
320
321
322
323
324
325
326
327 rdtp = this_cpu_ptr(&rcu_dynticks);
328 smp_mb__before_atomic();
329 atomic_add(2, &rdtp->dynticks);
330 smp_mb__after_atomic();
331 break;
332 }
333 local_irq_restore(flags);
334}
335
336
337
338
339
340
341void rcu_note_context_switch(void)
342{
343 trace_rcu_utilization(TPS("Start context switch"));
344 rcu_sched_qs();
345 rcu_preempt_note_context_switch();
346 if (unlikely(raw_cpu_read(rcu_sched_qs_mask)))
347 rcu_momentary_dyntick_idle();
348 trace_rcu_utilization(TPS("End context switch"));
349}
350EXPORT_SYMBOL_GPL(rcu_note_context_switch);
351
352
353
354
355
356
357
358
359
360void rcu_all_qs(void)
361{
362 if (unlikely(raw_cpu_read(rcu_sched_qs_mask)))
363 rcu_momentary_dyntick_idle();
364 this_cpu_inc(rcu_qs_ctr);
365}
366EXPORT_SYMBOL_GPL(rcu_all_qs);
367
368static long blimit = 10;
369static long qhimark = 10000;
370static long qlowmark = 100;
371
372module_param(blimit, long, 0444);
373module_param(qhimark, long, 0444);
374module_param(qlowmark, long, 0444);
375
376static ulong jiffies_till_first_fqs = ULONG_MAX;
377static ulong jiffies_till_next_fqs = ULONG_MAX;
378
379module_param(jiffies_till_first_fqs, ulong, 0644);
380module_param(jiffies_till_next_fqs, ulong, 0644);
381
382
383
384
385
386static ulong jiffies_till_sched_qs = HZ / 20;
387module_param(jiffies_till_sched_qs, ulong, 0644);
388
389static bool rcu_start_gp_advanced(struct rcu_state *rsp, struct rcu_node *rnp,
390 struct rcu_data *rdp);
391static void force_qs_rnp(struct rcu_state *rsp,
392 int (*f)(struct rcu_data *rsp, bool *isidle,
393 unsigned long *maxj),
394 bool *isidle, unsigned long *maxj);
395static void force_quiescent_state(struct rcu_state *rsp);
396static int rcu_pending(void);
397
398
399
400
401unsigned long rcu_batches_started(void)
402{
403 return rcu_state_p->gpnum;
404}
405EXPORT_SYMBOL_GPL(rcu_batches_started);
406
407
408
409
410unsigned long rcu_batches_started_sched(void)
411{
412 return rcu_sched_state.gpnum;
413}
414EXPORT_SYMBOL_GPL(rcu_batches_started_sched);
415
416
417
418
419unsigned long rcu_batches_started_bh(void)
420{
421 return rcu_bh_state.gpnum;
422}
423EXPORT_SYMBOL_GPL(rcu_batches_started_bh);
424
425
426
427
428unsigned long rcu_batches_completed(void)
429{
430 return rcu_state_p->completed;
431}
432EXPORT_SYMBOL_GPL(rcu_batches_completed);
433
434
435
436
437unsigned long rcu_batches_completed_sched(void)
438{
439 return rcu_sched_state.completed;
440}
441EXPORT_SYMBOL_GPL(rcu_batches_completed_sched);
442
443
444
445
446unsigned long rcu_batches_completed_bh(void)
447{
448 return rcu_bh_state.completed;
449}
450EXPORT_SYMBOL_GPL(rcu_batches_completed_bh);
451
452
453
454
455void rcu_force_quiescent_state(void)
456{
457 force_quiescent_state(rcu_state_p);
458}
459EXPORT_SYMBOL_GPL(rcu_force_quiescent_state);
460
461
462
463
464void rcu_bh_force_quiescent_state(void)
465{
466 force_quiescent_state(&rcu_bh_state);
467}
468EXPORT_SYMBOL_GPL(rcu_bh_force_quiescent_state);
469
470
471
472
473void rcu_sched_force_quiescent_state(void)
474{
475 force_quiescent_state(&rcu_sched_state);
476}
477EXPORT_SYMBOL_GPL(rcu_sched_force_quiescent_state);
478
479
480
481
482void show_rcu_gp_kthreads(void)
483{
484 struct rcu_state *rsp;
485
486 for_each_rcu_flavor(rsp) {
487 pr_info("%s: wait state: %d ->state: %#lx\n",
488 rsp->name, rsp->gp_state, rsp->gp_kthread->state);
489
490 }
491}
492EXPORT_SYMBOL_GPL(show_rcu_gp_kthreads);
493
494
495
496
497
498
499
500
501void rcutorture_record_test_transition(void)
502{
503 rcutorture_testseq++;
504 rcutorture_vernum = 0;
505}
506EXPORT_SYMBOL_GPL(rcutorture_record_test_transition);
507
508
509
510
511void rcutorture_get_gp_data(enum rcutorture_type test_type, int *flags,
512 unsigned long *gpnum, unsigned long *completed)
513{
514 struct rcu_state *rsp = NULL;
515
516 switch (test_type) {
517 case RCU_FLAVOR:
518 rsp = rcu_state_p;
519 break;
520 case RCU_BH_FLAVOR:
521 rsp = &rcu_bh_state;
522 break;
523 case RCU_SCHED_FLAVOR:
524 rsp = &rcu_sched_state;
525 break;
526 default:
527 break;
528 }
529 if (rsp != NULL) {
530 *flags = READ_ONCE(rsp->gp_flags);
531 *gpnum = READ_ONCE(rsp->gpnum);
532 *completed = READ_ONCE(rsp->completed);
533 return;
534 }
535 *flags = 0;
536 *gpnum = 0;
537 *completed = 0;
538}
539EXPORT_SYMBOL_GPL(rcutorture_get_gp_data);
540
541
542
543
544
545
546void rcutorture_record_progress(unsigned long vernum)
547{
548 rcutorture_vernum++;
549}
550EXPORT_SYMBOL_GPL(rcutorture_record_progress);
551
552
553
554
555static int
556cpu_has_callbacks_ready_to_invoke(struct rcu_data *rdp)
557{
558 return &rdp->nxtlist != rdp->nxttail[RCU_DONE_TAIL] &&
559 rdp->nxttail[RCU_DONE_TAIL] != NULL;
560}
561
562
563
564
565static struct rcu_node *rcu_get_root(struct rcu_state *rsp)
566{
567 return &rsp->node[0];
568}
569
570
571
572
573
574
575static int rcu_future_needs_gp(struct rcu_state *rsp)
576{
577 struct rcu_node *rnp = rcu_get_root(rsp);
578 int idx = (READ_ONCE(rnp->completed) + 1) & 0x1;
579 int *fp = &rnp->need_future_gp[idx];
580
581 return READ_ONCE(*fp);
582}
583
584
585
586
587
588
589static int
590cpu_needs_another_gp(struct rcu_state *rsp, struct rcu_data *rdp)
591{
592 int i;
593
594 if (rcu_gp_in_progress(rsp))
595 return 0;
596 if (rcu_future_needs_gp(rsp))
597 return 1;
598 if (!rdp->nxttail[RCU_NEXT_TAIL])
599 return 0;
600 if (*rdp->nxttail[RCU_NEXT_READY_TAIL])
601 return 1;
602 for (i = RCU_WAIT_TAIL; i < RCU_NEXT_TAIL; i++)
603 if (rdp->nxttail[i - 1] != rdp->nxttail[i] &&
604 ULONG_CMP_LT(READ_ONCE(rsp->completed),
605 rdp->nxtcompleted[i]))
606 return 1;
607 return 0;
608}
609
610
611
612
613
614
615
616
617static void rcu_eqs_enter_common(long long oldval, bool user)
618{
619 struct rcu_state *rsp;
620 struct rcu_data *rdp;
621 struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
622
623 trace_rcu_dyntick(TPS("Start"), oldval, rdtp->dynticks_nesting);
624 if (IS_ENABLED(CONFIG_RCU_EQS_DEBUG) &&
625 !user && !is_idle_task(current)) {
626 struct task_struct *idle __maybe_unused =
627 idle_task(smp_processor_id());
628
629 trace_rcu_dyntick(TPS("Error on entry: not idle task"), oldval, 0);
630 ftrace_dump(DUMP_ORIG);
631 WARN_ONCE(1, "Current pid: %d comm: %s / Idle pid: %d comm: %s",
632 current->pid, current->comm,
633 idle->pid, idle->comm);
634 }
635 for_each_rcu_flavor(rsp) {
636 rdp = this_cpu_ptr(rsp->rda);
637 do_nocb_deferred_wakeup(rdp);
638 }
639 rcu_prepare_for_idle();
640
641 smp_mb__before_atomic();
642 atomic_inc(&rdtp->dynticks);
643 smp_mb__after_atomic();
644 WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) &&
645 atomic_read(&rdtp->dynticks) & 0x1);
646 rcu_dynticks_task_enter();
647
648
649
650
651
652 rcu_lockdep_assert(!lock_is_held(&rcu_lock_map),
653 "Illegal idle entry in RCU read-side critical section.");
654 rcu_lockdep_assert(!lock_is_held(&rcu_bh_lock_map),
655 "Illegal idle entry in RCU-bh read-side critical section.");
656 rcu_lockdep_assert(!lock_is_held(&rcu_sched_lock_map),
657 "Illegal idle entry in RCU-sched read-side critical section.");
658}
659
660
661
662
663
664static void rcu_eqs_enter(bool user)
665{
666 long long oldval;
667 struct rcu_dynticks *rdtp;
668
669 rdtp = this_cpu_ptr(&rcu_dynticks);
670 oldval = rdtp->dynticks_nesting;
671 WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) &&
672 (oldval & DYNTICK_TASK_NEST_MASK) == 0);
673 if ((oldval & DYNTICK_TASK_NEST_MASK) == DYNTICK_TASK_NEST_VALUE) {
674 rdtp->dynticks_nesting = 0;
675 rcu_eqs_enter_common(oldval, user);
676 } else {
677 rdtp->dynticks_nesting -= DYNTICK_TASK_NEST_VALUE;
678 }
679}
680
681
682
683
684
685
686
687
688
689
690
691
692
693void rcu_idle_enter(void)
694{
695 unsigned long flags;
696
697 local_irq_save(flags);
698 rcu_eqs_enter(false);
699 rcu_sysidle_enter(0);
700 local_irq_restore(flags);
701}
702EXPORT_SYMBOL_GPL(rcu_idle_enter);
703
704#ifdef CONFIG_RCU_USER_QS
705
706
707
708
709
710
711
712
713void rcu_user_enter(void)
714{
715 rcu_eqs_enter(1);
716}
717#endif
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735void rcu_irq_exit(void)
736{
737 unsigned long flags;
738 long long oldval;
739 struct rcu_dynticks *rdtp;
740
741 local_irq_save(flags);
742 rdtp = this_cpu_ptr(&rcu_dynticks);
743 oldval = rdtp->dynticks_nesting;
744 rdtp->dynticks_nesting--;
745 WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) &&
746 rdtp->dynticks_nesting < 0);
747 if (rdtp->dynticks_nesting)
748 trace_rcu_dyntick(TPS("--="), oldval, rdtp->dynticks_nesting);
749 else
750 rcu_eqs_enter_common(oldval, true);
751 rcu_sysidle_enter(1);
752 local_irq_restore(flags);
753}
754
755
756
757
758
759
760
761
762static void rcu_eqs_exit_common(long long oldval, int user)
763{
764 struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
765
766 rcu_dynticks_task_exit();
767 smp_mb__before_atomic();
768 atomic_inc(&rdtp->dynticks);
769
770 smp_mb__after_atomic();
771 WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) &&
772 !(atomic_read(&rdtp->dynticks) & 0x1));
773 rcu_cleanup_after_idle();
774 trace_rcu_dyntick(TPS("End"), oldval, rdtp->dynticks_nesting);
775 if (IS_ENABLED(CONFIG_RCU_EQS_DEBUG) &&
776 !user && !is_idle_task(current)) {
777 struct task_struct *idle __maybe_unused =
778 idle_task(smp_processor_id());
779
780 trace_rcu_dyntick(TPS("Error on exit: not idle task"),
781 oldval, rdtp->dynticks_nesting);
782 ftrace_dump(DUMP_ORIG);
783 WARN_ONCE(1, "Current pid: %d comm: %s / Idle pid: %d comm: %s",
784 current->pid, current->comm,
785 idle->pid, idle->comm);
786 }
787}
788
789
790
791
792
793static void rcu_eqs_exit(bool user)
794{
795 struct rcu_dynticks *rdtp;
796 long long oldval;
797
798 rdtp = this_cpu_ptr(&rcu_dynticks);
799 oldval = rdtp->dynticks_nesting;
800 WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && oldval < 0);
801 if (oldval & DYNTICK_TASK_NEST_MASK) {
802 rdtp->dynticks_nesting += DYNTICK_TASK_NEST_VALUE;
803 } else {
804 rdtp->dynticks_nesting = DYNTICK_TASK_EXIT_IDLE;
805 rcu_eqs_exit_common(oldval, user);
806 }
807}
808
809
810
811
812
813
814
815
816
817
818
819
820void rcu_idle_exit(void)
821{
822 unsigned long flags;
823
824 local_irq_save(flags);
825 rcu_eqs_exit(false);
826 rcu_sysidle_exit(0);
827 local_irq_restore(flags);
828}
829EXPORT_SYMBOL_GPL(rcu_idle_exit);
830
831#ifdef CONFIG_RCU_USER_QS
832
833
834
835
836
837
838void rcu_user_exit(void)
839{
840 rcu_eqs_exit(1);
841}
842#endif
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863void rcu_irq_enter(void)
864{
865 unsigned long flags;
866 struct rcu_dynticks *rdtp;
867 long long oldval;
868
869 local_irq_save(flags);
870 rdtp = this_cpu_ptr(&rcu_dynticks);
871 oldval = rdtp->dynticks_nesting;
872 rdtp->dynticks_nesting++;
873 WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) &&
874 rdtp->dynticks_nesting == 0);
875 if (oldval)
876 trace_rcu_dyntick(TPS("++="), oldval, rdtp->dynticks_nesting);
877 else
878 rcu_eqs_exit_common(oldval, true);
879 rcu_sysidle_exit(1);
880 local_irq_restore(flags);
881}
882
883
884
885
886
887
888
889
890
891
892void rcu_nmi_enter(void)
893{
894 struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
895 int incby = 2;
896
897
898 WARN_ON_ONCE(rdtp->dynticks_nmi_nesting < 0);
899
900
901
902
903
904
905
906
907
908 if (!(atomic_read(&rdtp->dynticks) & 0x1)) {
909 smp_mb__before_atomic();
910 atomic_inc(&rdtp->dynticks);
911
912 smp_mb__after_atomic();
913 WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
914 incby = 1;
915 }
916 rdtp->dynticks_nmi_nesting += incby;
917 barrier();
918}
919
920
921
922
923
924
925
926
927
928void rcu_nmi_exit(void)
929{
930 struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
931
932
933
934
935
936
937 WARN_ON_ONCE(rdtp->dynticks_nmi_nesting <= 0);
938 WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
939
940
941
942
943
944 if (rdtp->dynticks_nmi_nesting != 1) {
945 rdtp->dynticks_nmi_nesting -= 2;
946 return;
947 }
948
949
950 rdtp->dynticks_nmi_nesting = 0;
951
952 smp_mb__before_atomic();
953 atomic_inc(&rdtp->dynticks);
954 smp_mb__after_atomic();
955 WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
956}
957
958
959
960
961
962
963
964
965
966bool notrace __rcu_is_watching(void)
967{
968 return atomic_read(this_cpu_ptr(&rcu_dynticks.dynticks)) & 0x1;
969}
970
971
972
973
974
975
976
977bool notrace rcu_is_watching(void)
978{
979 bool ret;
980
981 preempt_disable();
982 ret = __rcu_is_watching();
983 preempt_enable();
984 return ret;
985}
986EXPORT_SYMBOL_GPL(rcu_is_watching);
987
988#if defined(CONFIG_PROVE_RCU) && defined(CONFIG_HOTPLUG_CPU)
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011bool rcu_lockdep_current_cpu_online(void)
1012{
1013 struct rcu_data *rdp;
1014 struct rcu_node *rnp;
1015 bool ret;
1016
1017 if (in_nmi())
1018 return true;
1019 preempt_disable();
1020 rdp = this_cpu_ptr(&rcu_sched_data);
1021 rnp = rdp->mynode;
1022 ret = (rdp->grpmask & rcu_rnp_online_cpus(rnp)) ||
1023 !rcu_scheduler_fully_active;
1024 preempt_enable();
1025 return ret;
1026}
1027EXPORT_SYMBOL_GPL(rcu_lockdep_current_cpu_online);
1028
1029#endif
1030
1031
1032
1033
1034
1035
1036
1037
1038static int rcu_is_cpu_rrupt_from_idle(void)
1039{
1040 return __this_cpu_read(rcu_dynticks.dynticks_nesting) <= 1;
1041}
1042
1043
1044
1045
1046
1047
1048static int dyntick_save_progress_counter(struct rcu_data *rdp,
1049 bool *isidle, unsigned long *maxj)
1050{
1051 rdp->dynticks_snap = atomic_add_return(0, &rdp->dynticks->dynticks);
1052 rcu_sysidle_check_cpu(rdp, isidle, maxj);
1053 if ((rdp->dynticks_snap & 0x1) == 0) {
1054 trace_rcu_fqs(rdp->rsp->name, rdp->gpnum, rdp->cpu, TPS("dti"));
1055 return 1;
1056 } else {
1057 if (ULONG_CMP_LT(READ_ONCE(rdp->gpnum) + ULONG_MAX / 4,
1058 rdp->mynode->gpnum))
1059 WRITE_ONCE(rdp->gpwrap, true);
1060 return 0;
1061 }
1062}
1063
1064
1065
1066
1067
1068
1069
1070static int rcu_implicit_dynticks_qs(struct rcu_data *rdp,
1071 bool *isidle, unsigned long *maxj)
1072{
1073 unsigned int curr;
1074 int *rcrmp;
1075 unsigned int snap;
1076
1077 curr = (unsigned int)atomic_add_return(0, &rdp->dynticks->dynticks);
1078 snap = (unsigned int)rdp->dynticks_snap;
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088 if ((curr & 0x1) == 0 || UINT_CMP_GE(curr, snap + 2)) {
1089 trace_rcu_fqs(rdp->rsp->name, rdp->gpnum, rdp->cpu, TPS("dti"));
1090 rdp->dynticks_fqs++;
1091 return 1;
1092 }
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105 if (ULONG_CMP_GE(rdp->rsp->gp_start + 2, jiffies))
1106 return 0;
1107 barrier();
1108 if (cpu_is_offline(rdp->cpu)) {
1109 trace_rcu_fqs(rdp->rsp->name, rdp->gpnum, rdp->cpu, TPS("ofl"));
1110 rdp->offline_fqs++;
1111 return 1;
1112 }
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135 rcrmp = &per_cpu(rcu_sched_qs_mask, rdp->cpu);
1136 if (ULONG_CMP_GE(jiffies,
1137 rdp->rsp->gp_start + jiffies_till_sched_qs) ||
1138 ULONG_CMP_GE(jiffies, rdp->rsp->jiffies_resched)) {
1139 if (!(READ_ONCE(*rcrmp) & rdp->rsp->flavor_mask)) {
1140 WRITE_ONCE(rdp->cond_resched_completed,
1141 READ_ONCE(rdp->mynode->completed));
1142 smp_mb();
1143 WRITE_ONCE(*rcrmp,
1144 READ_ONCE(*rcrmp) + rdp->rsp->flavor_mask);
1145 resched_cpu(rdp->cpu);
1146 rdp->rsp->jiffies_resched += 5;
1147 } else if (ULONG_CMP_GE(jiffies, rdp->rsp->jiffies_resched)) {
1148
1149 resched_cpu(rdp->cpu);
1150 rdp->rsp->jiffies_resched += 5;
1151 }
1152 }
1153
1154 return 0;
1155}
1156
1157static void record_gp_stall_check_time(struct rcu_state *rsp)
1158{
1159 unsigned long j = jiffies;
1160 unsigned long j1;
1161
1162 rsp->gp_start = j;
1163 smp_wmb();
1164 j1 = rcu_jiffies_till_stall_check();
1165 WRITE_ONCE(rsp->jiffies_stall, j + j1);
1166 rsp->jiffies_resched = j + j1 / 2;
1167 rsp->n_force_qs_gpstart = READ_ONCE(rsp->n_force_qs);
1168}
1169
1170
1171
1172
1173static void rcu_check_gp_kthread_starvation(struct rcu_state *rsp)
1174{
1175 unsigned long gpa;
1176 unsigned long j;
1177
1178 j = jiffies;
1179 gpa = READ_ONCE(rsp->gp_activity);
1180 if (j - gpa > 2 * HZ)
1181 pr_err("%s kthread starved for %ld jiffies! g%lu c%lu f%#x\n",
1182 rsp->name, j - gpa,
1183 rsp->gpnum, rsp->completed, rsp->gp_flags);
1184}
1185
1186
1187
1188
1189static void rcu_dump_cpu_stacks(struct rcu_state *rsp)
1190{
1191 int cpu;
1192 unsigned long flags;
1193 struct rcu_node *rnp;
1194
1195 rcu_for_each_leaf_node(rsp, rnp) {
1196 raw_spin_lock_irqsave(&rnp->lock, flags);
1197 if (rnp->qsmask != 0) {
1198 for (cpu = 0; cpu <= rnp->grphi - rnp->grplo; cpu++)
1199 if (rnp->qsmask & (1UL << cpu))
1200 dump_cpu_task(rnp->grplo + cpu);
1201 }
1202 raw_spin_unlock_irqrestore(&rnp->lock, flags);
1203 }
1204}
1205
1206static void print_other_cpu_stall(struct rcu_state *rsp, unsigned long gpnum)
1207{
1208 int cpu;
1209 long delta;
1210 unsigned long flags;
1211 unsigned long gpa;
1212 unsigned long j;
1213 int ndetected = 0;
1214 struct rcu_node *rnp = rcu_get_root(rsp);
1215 long totqlen = 0;
1216
1217
1218
1219 raw_spin_lock_irqsave(&rnp->lock, flags);
1220 delta = jiffies - READ_ONCE(rsp->jiffies_stall);
1221 if (delta < RCU_STALL_RAT_DELAY || !rcu_gp_in_progress(rsp)) {
1222 raw_spin_unlock_irqrestore(&rnp->lock, flags);
1223 return;
1224 }
1225 WRITE_ONCE(rsp->jiffies_stall,
1226 jiffies + 3 * rcu_jiffies_till_stall_check() + 3);
1227 raw_spin_unlock_irqrestore(&rnp->lock, flags);
1228
1229
1230
1231
1232
1233
1234 pr_err("INFO: %s detected stalls on CPUs/tasks:",
1235 rsp->name);
1236 print_cpu_stall_info_begin();
1237 rcu_for_each_leaf_node(rsp, rnp) {
1238 raw_spin_lock_irqsave(&rnp->lock, flags);
1239 ndetected += rcu_print_task_stall(rnp);
1240 if (rnp->qsmask != 0) {
1241 for (cpu = 0; cpu <= rnp->grphi - rnp->grplo; cpu++)
1242 if (rnp->qsmask & (1UL << cpu)) {
1243 print_cpu_stall_info(rsp,
1244 rnp->grplo + cpu);
1245 ndetected++;
1246 }
1247 }
1248 raw_spin_unlock_irqrestore(&rnp->lock, flags);
1249 }
1250
1251 print_cpu_stall_info_end();
1252 for_each_possible_cpu(cpu)
1253 totqlen += per_cpu_ptr(rsp->rda, cpu)->qlen;
1254 pr_cont("(detected by %d, t=%ld jiffies, g=%ld, c=%ld, q=%lu)\n",
1255 smp_processor_id(), (long)(jiffies - rsp->gp_start),
1256 (long)rsp->gpnum, (long)rsp->completed, totqlen);
1257 if (ndetected) {
1258 rcu_dump_cpu_stacks(rsp);
1259 } else {
1260 if (READ_ONCE(rsp->gpnum) != gpnum ||
1261 READ_ONCE(rsp->completed) == gpnum) {
1262 pr_err("INFO: Stall ended before state dump start\n");
1263 } else {
1264 j = jiffies;
1265 gpa = READ_ONCE(rsp->gp_activity);
1266 pr_err("All QSes seen, last %s kthread activity %ld (%ld-%ld), jiffies_till_next_fqs=%ld, root ->qsmask %#lx\n",
1267 rsp->name, j - gpa, j, gpa,
1268 jiffies_till_next_fqs,
1269 rcu_get_root(rsp)->qsmask);
1270
1271 sched_show_task(current);
1272 }
1273 }
1274
1275
1276 rcu_print_detail_task_stall(rsp);
1277
1278 rcu_check_gp_kthread_starvation(rsp);
1279
1280 force_quiescent_state(rsp);
1281}
1282
1283static void print_cpu_stall(struct rcu_state *rsp)
1284{
1285 int cpu;
1286 unsigned long flags;
1287 struct rcu_node *rnp = rcu_get_root(rsp);
1288 long totqlen = 0;
1289
1290
1291
1292
1293
1294
1295 pr_err("INFO: %s self-detected stall on CPU", rsp->name);
1296 print_cpu_stall_info_begin();
1297 print_cpu_stall_info(rsp, smp_processor_id());
1298 print_cpu_stall_info_end();
1299 for_each_possible_cpu(cpu)
1300 totqlen += per_cpu_ptr(rsp->rda, cpu)->qlen;
1301 pr_cont(" (t=%lu jiffies g=%ld c=%ld q=%lu)\n",
1302 jiffies - rsp->gp_start,
1303 (long)rsp->gpnum, (long)rsp->completed, totqlen);
1304
1305 rcu_check_gp_kthread_starvation(rsp);
1306
1307 rcu_dump_cpu_stacks(rsp);
1308
1309 raw_spin_lock_irqsave(&rnp->lock, flags);
1310 if (ULONG_CMP_GE(jiffies, READ_ONCE(rsp->jiffies_stall)))
1311 WRITE_ONCE(rsp->jiffies_stall,
1312 jiffies + 3 * rcu_jiffies_till_stall_check() + 3);
1313 raw_spin_unlock_irqrestore(&rnp->lock, flags);
1314
1315
1316
1317
1318
1319
1320
1321
1322 resched_cpu(smp_processor_id());
1323}
1324
1325static void check_cpu_stall(struct rcu_state *rsp, struct rcu_data *rdp)
1326{
1327 unsigned long completed;
1328 unsigned long gpnum;
1329 unsigned long gps;
1330 unsigned long j;
1331 unsigned long js;
1332 struct rcu_node *rnp;
1333
1334 if (rcu_cpu_stall_suppress || !rcu_gp_in_progress(rsp))
1335 return;
1336 j = jiffies;
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355 gpnum = READ_ONCE(rsp->gpnum);
1356 smp_rmb();
1357 js = READ_ONCE(rsp->jiffies_stall);
1358 smp_rmb();
1359 gps = READ_ONCE(rsp->gp_start);
1360 smp_rmb();
1361 completed = READ_ONCE(rsp->completed);
1362 if (ULONG_CMP_GE(completed, gpnum) ||
1363 ULONG_CMP_LT(j, js) ||
1364 ULONG_CMP_GE(gps, js))
1365 return;
1366 rnp = rdp->mynode;
1367 if (rcu_gp_in_progress(rsp) &&
1368 (READ_ONCE(rnp->qsmask) & rdp->grpmask)) {
1369
1370
1371 print_cpu_stall(rsp);
1372
1373 } else if (rcu_gp_in_progress(rsp) &&
1374 ULONG_CMP_GE(j, js + RCU_STALL_RAT_DELAY)) {
1375
1376
1377 print_other_cpu_stall(rsp, gpnum);
1378 }
1379}
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390void rcu_cpu_stall_reset(void)
1391{
1392 struct rcu_state *rsp;
1393
1394 for_each_rcu_flavor(rsp)
1395 WRITE_ONCE(rsp->jiffies_stall, jiffies + ULONG_MAX / 2);
1396}
1397
1398
1399
1400
1401
1402
1403static void init_default_callback_list(struct rcu_data *rdp)
1404{
1405 int i;
1406
1407 rdp->nxtlist = NULL;
1408 for (i = 0; i < RCU_NEXT_SIZE; i++)
1409 rdp->nxttail[i] = &rdp->nxtlist;
1410}
1411
1412
1413
1414
1415static void init_callback_list(struct rcu_data *rdp)
1416{
1417 if (init_nocb_callback_list(rdp))
1418 return;
1419 init_default_callback_list(rdp);
1420}
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431static unsigned long rcu_cbs_completed(struct rcu_state *rsp,
1432 struct rcu_node *rnp)
1433{
1434
1435
1436
1437
1438
1439
1440
1441 if (rcu_get_root(rsp) == rnp && rnp->gpnum == rnp->completed)
1442 return rnp->completed + 1;
1443
1444
1445
1446
1447
1448 return rnp->completed + 2;
1449}
1450
1451
1452
1453
1454
1455static void trace_rcu_future_gp(struct rcu_node *rnp, struct rcu_data *rdp,
1456 unsigned long c, const char *s)
1457{
1458 trace_rcu_future_grace_period(rdp->rsp->name, rnp->gpnum,
1459 rnp->completed, c, rnp->level,
1460 rnp->grplo, rnp->grphi, s);
1461}
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471static bool __maybe_unused
1472rcu_start_future_gp(struct rcu_node *rnp, struct rcu_data *rdp,
1473 unsigned long *c_out)
1474{
1475 unsigned long c;
1476 int i;
1477 bool ret = false;
1478 struct rcu_node *rnp_root = rcu_get_root(rdp->rsp);
1479
1480
1481
1482
1483
1484 c = rcu_cbs_completed(rdp->rsp, rnp);
1485 trace_rcu_future_gp(rnp, rdp, c, TPS("Startleaf"));
1486 if (rnp->need_future_gp[c & 0x1]) {
1487 trace_rcu_future_gp(rnp, rdp, c, TPS("Prestartleaf"));
1488 goto out;
1489 }
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504 if (rnp->gpnum != rnp->completed ||
1505 READ_ONCE(rnp_root->gpnum) != READ_ONCE(rnp_root->completed)) {
1506 rnp->need_future_gp[c & 0x1]++;
1507 trace_rcu_future_gp(rnp, rdp, c, TPS("Startedleaf"));
1508 goto out;
1509 }
1510
1511
1512
1513
1514
1515
1516 if (rnp != rnp_root) {
1517 raw_spin_lock(&rnp_root->lock);
1518 smp_mb__after_unlock_lock();
1519 }
1520
1521
1522
1523
1524
1525
1526
1527 c = rcu_cbs_completed(rdp->rsp, rnp_root);
1528 for (i = RCU_DONE_TAIL; i < RCU_NEXT_TAIL; i++)
1529 if (ULONG_CMP_LT(c, rdp->nxtcompleted[i]))
1530 rdp->nxtcompleted[i] = c;
1531
1532
1533
1534
1535
1536 if (rnp_root->need_future_gp[c & 0x1]) {
1537 trace_rcu_future_gp(rnp, rdp, c, TPS("Prestartedroot"));
1538 goto unlock_out;
1539 }
1540
1541
1542 rnp_root->need_future_gp[c & 0x1]++;
1543
1544
1545 if (rnp_root->gpnum != rnp_root->completed) {
1546 trace_rcu_future_gp(rnp, rdp, c, TPS("Startedleafroot"));
1547 } else {
1548 trace_rcu_future_gp(rnp, rdp, c, TPS("Startedroot"));
1549 ret = rcu_start_gp_advanced(rdp->rsp, rnp_root, rdp);
1550 }
1551unlock_out:
1552 if (rnp != rnp_root)
1553 raw_spin_unlock(&rnp_root->lock);
1554out:
1555 if (c_out != NULL)
1556 *c_out = c;
1557 return ret;
1558}
1559
1560
1561
1562
1563
1564
1565
1566static int rcu_future_gp_cleanup(struct rcu_state *rsp, struct rcu_node *rnp)
1567{
1568 int c = rnp->completed;
1569 int needmore;
1570 struct rcu_data *rdp = this_cpu_ptr(rsp->rda);
1571
1572 rcu_nocb_gp_cleanup(rsp, rnp);
1573 rnp->need_future_gp[c & 0x1] = 0;
1574 needmore = rnp->need_future_gp[(c + 1) & 0x1];
1575 trace_rcu_future_gp(rnp, rdp, c,
1576 needmore ? TPS("CleanupMore") : TPS("Cleanup"));
1577 return needmore;
1578}
1579
1580
1581
1582
1583
1584
1585
1586
1587static void rcu_gp_kthread_wake(struct rcu_state *rsp)
1588{
1589 if (current == rsp->gp_kthread ||
1590 !READ_ONCE(rsp->gp_flags) ||
1591 !rsp->gp_kthread)
1592 return;
1593 wake_up(&rsp->gp_wq);
1594}
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608static bool rcu_accelerate_cbs(struct rcu_state *rsp, struct rcu_node *rnp,
1609 struct rcu_data *rdp)
1610{
1611 unsigned long c;
1612 int i;
1613 bool ret;
1614
1615
1616 if (!rdp->nxttail[RCU_NEXT_TAIL] || !*rdp->nxttail[RCU_DONE_TAIL])
1617 return false;
1618
1619
1620
1621
1622
1623
1624
1625
1626
1627
1628
1629
1630
1631
1632
1633 c = rcu_cbs_completed(rsp, rnp);
1634 for (i = RCU_NEXT_TAIL - 1; i > RCU_DONE_TAIL; i--)
1635 if (rdp->nxttail[i] != rdp->nxttail[i - 1] &&
1636 !ULONG_CMP_GE(rdp->nxtcompleted[i], c))
1637 break;
1638
1639
1640
1641
1642
1643
1644
1645 if (++i >= RCU_NEXT_TAIL)
1646 return false;
1647
1648
1649
1650
1651
1652
1653 for (; i <= RCU_NEXT_TAIL; i++) {
1654 rdp->nxttail[i] = rdp->nxttail[RCU_NEXT_TAIL];
1655 rdp->nxtcompleted[i] = c;
1656 }
1657
1658 ret = rcu_start_future_gp(rnp, rdp, NULL);
1659
1660
1661 if (!*rdp->nxttail[RCU_WAIT_TAIL])
1662 trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("AccWaitCB"));
1663 else
1664 trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("AccReadyCB"));
1665 return ret;
1666}
1667
1668
1669
1670
1671
1672
1673
1674
1675
1676
1677
1678static bool rcu_advance_cbs(struct rcu_state *rsp, struct rcu_node *rnp,
1679 struct rcu_data *rdp)
1680{
1681 int i, j;
1682
1683
1684 if (!rdp->nxttail[RCU_NEXT_TAIL] || !*rdp->nxttail[RCU_DONE_TAIL])
1685 return false;
1686
1687
1688
1689
1690
1691 for (i = RCU_WAIT_TAIL; i < RCU_NEXT_TAIL; i++) {
1692 if (ULONG_CMP_LT(rnp->completed, rdp->nxtcompleted[i]))
1693 break;
1694 rdp->nxttail[RCU_DONE_TAIL] = rdp->nxttail[i];
1695 }
1696
1697 for (j = RCU_WAIT_TAIL; j < i; j++)
1698 rdp->nxttail[j] = rdp->nxttail[RCU_DONE_TAIL];
1699
1700
1701 for (j = RCU_WAIT_TAIL; i < RCU_NEXT_TAIL; i++, j++) {
1702 if (rdp->nxttail[j] == rdp->nxttail[RCU_NEXT_TAIL])
1703 break;
1704 rdp->nxttail[j] = rdp->nxttail[i];
1705 rdp->nxtcompleted[j] = rdp->nxtcompleted[i];
1706 }
1707
1708
1709 return rcu_accelerate_cbs(rsp, rnp, rdp);
1710}
1711
1712
1713
1714
1715
1716
1717
1718static bool __note_gp_changes(struct rcu_state *rsp, struct rcu_node *rnp,
1719 struct rcu_data *rdp)
1720{
1721 bool ret;
1722
1723
1724 if (rdp->completed == rnp->completed &&
1725 !unlikely(READ_ONCE(rdp->gpwrap))) {
1726
1727
1728 ret = rcu_accelerate_cbs(rsp, rnp, rdp);
1729
1730 } else {
1731
1732
1733 ret = rcu_advance_cbs(rsp, rnp, rdp);
1734
1735
1736 rdp->completed = rnp->completed;
1737 trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("cpuend"));
1738 }
1739
1740 if (rdp->gpnum != rnp->gpnum || unlikely(READ_ONCE(rdp->gpwrap))) {
1741
1742
1743
1744
1745
1746 rdp->gpnum = rnp->gpnum;
1747 trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("cpustart"));
1748 rdp->passed_quiesce = 0;
1749 rdp->rcu_qs_ctr_snap = __this_cpu_read(rcu_qs_ctr);
1750 rdp->qs_pending = !!(rnp->qsmask & rdp->grpmask);
1751 zero_cpu_stall_ticks(rdp);
1752 WRITE_ONCE(rdp->gpwrap, false);
1753 }
1754 return ret;
1755}
1756
1757static void note_gp_changes(struct rcu_state *rsp, struct rcu_data *rdp)
1758{
1759 unsigned long flags;
1760 bool needwake;
1761 struct rcu_node *rnp;
1762
1763 local_irq_save(flags);
1764 rnp = rdp->mynode;
1765 if ((rdp->gpnum == READ_ONCE(rnp->gpnum) &&
1766 rdp->completed == READ_ONCE(rnp->completed) &&
1767 !unlikely(READ_ONCE(rdp->gpwrap))) ||
1768 !raw_spin_trylock(&rnp->lock)) {
1769 local_irq_restore(flags);
1770 return;
1771 }
1772 smp_mb__after_unlock_lock();
1773 needwake = __note_gp_changes(rsp, rnp, rdp);
1774 raw_spin_unlock_irqrestore(&rnp->lock, flags);
1775 if (needwake)
1776 rcu_gp_kthread_wake(rsp);
1777}
1778
1779static void rcu_gp_slow(struct rcu_state *rsp, int delay)
1780{
1781 if (delay > 0 &&
1782 !(rsp->gpnum % (rcu_num_nodes * PER_RCU_NODE_PERIOD * delay)))
1783 schedule_timeout_uninterruptible(delay);
1784}
1785
1786
1787
1788
1789static int rcu_gp_init(struct rcu_state *rsp)
1790{
1791 unsigned long oldmask;
1792 struct rcu_data *rdp;
1793 struct rcu_node *rnp = rcu_get_root(rsp);
1794
1795 WRITE_ONCE(rsp->gp_activity, jiffies);
1796 raw_spin_lock_irq(&rnp->lock);
1797 smp_mb__after_unlock_lock();
1798 if (!READ_ONCE(rsp->gp_flags)) {
1799
1800 raw_spin_unlock_irq(&rnp->lock);
1801 return 0;
1802 }
1803 WRITE_ONCE(rsp->gp_flags, 0);
1804
1805 if (WARN_ON_ONCE(rcu_gp_in_progress(rsp))) {
1806
1807
1808
1809
1810 raw_spin_unlock_irq(&rnp->lock);
1811 return 0;
1812 }
1813
1814
1815 record_gp_stall_check_time(rsp);
1816
1817 smp_store_release(&rsp->gpnum, rsp->gpnum + 1);
1818 trace_rcu_grace_period(rsp->name, rsp->gpnum, TPS("start"));
1819 raw_spin_unlock_irq(&rnp->lock);
1820
1821
1822
1823
1824
1825
1826
1827 rcu_for_each_leaf_node(rsp, rnp) {
1828 rcu_gp_slow(rsp, gp_preinit_delay);
1829 raw_spin_lock_irq(&rnp->lock);
1830 smp_mb__after_unlock_lock();
1831 if (rnp->qsmaskinit == rnp->qsmaskinitnext &&
1832 !rnp->wait_blkd_tasks) {
1833
1834 raw_spin_unlock_irq(&rnp->lock);
1835 continue;
1836 }
1837
1838
1839 oldmask = rnp->qsmaskinit;
1840 rnp->qsmaskinit = rnp->qsmaskinitnext;
1841
1842
1843 if (!oldmask != !rnp->qsmaskinit) {
1844 if (!oldmask)
1845 rcu_init_new_rnp(rnp);
1846 else if (rcu_preempt_has_tasks(rnp))
1847 rnp->wait_blkd_tasks = true;
1848 else
1849 rcu_cleanup_dead_rnp(rnp);
1850 }
1851
1852
1853
1854
1855
1856
1857
1858
1859
1860
1861 if (rnp->wait_blkd_tasks &&
1862 (!rcu_preempt_has_tasks(rnp) ||
1863 rnp->qsmaskinit)) {
1864 rnp->wait_blkd_tasks = false;
1865 rcu_cleanup_dead_rnp(rnp);
1866 }
1867
1868 raw_spin_unlock_irq(&rnp->lock);
1869 }
1870
1871
1872
1873
1874
1875
1876
1877
1878
1879
1880
1881
1882
1883
1884 rcu_for_each_node_breadth_first(rsp, rnp) {
1885 rcu_gp_slow(rsp, gp_init_delay);
1886 raw_spin_lock_irq(&rnp->lock);
1887 smp_mb__after_unlock_lock();
1888 rdp = this_cpu_ptr(rsp->rda);
1889 rcu_preempt_check_blocked_tasks(rnp);
1890 rnp->qsmask = rnp->qsmaskinit;
1891 WRITE_ONCE(rnp->gpnum, rsp->gpnum);
1892 if (WARN_ON_ONCE(rnp->completed != rsp->completed))
1893 WRITE_ONCE(rnp->completed, rsp->completed);
1894 if (rnp == rdp->mynode)
1895 (void)__note_gp_changes(rsp, rnp, rdp);
1896 rcu_preempt_boost_start_gp(rnp);
1897 trace_rcu_grace_period_init(rsp->name, rnp->gpnum,
1898 rnp->level, rnp->grplo,
1899 rnp->grphi, rnp->qsmask);
1900 raw_spin_unlock_irq(&rnp->lock);
1901 cond_resched_rcu_qs();
1902 WRITE_ONCE(rsp->gp_activity, jiffies);
1903 }
1904
1905 return 1;
1906}
1907
1908
1909
1910
1911static int rcu_gp_fqs(struct rcu_state *rsp, int fqs_state_in)
1912{
1913 int fqs_state = fqs_state_in;
1914 bool isidle = false;
1915 unsigned long maxj;
1916 struct rcu_node *rnp = rcu_get_root(rsp);
1917
1918 WRITE_ONCE(rsp->gp_activity, jiffies);
1919 rsp->n_force_qs++;
1920 if (fqs_state == RCU_SAVE_DYNTICK) {
1921
1922 if (is_sysidle_rcu_state(rsp)) {
1923 isidle = true;
1924 maxj = jiffies - ULONG_MAX / 4;
1925 }
1926 force_qs_rnp(rsp, dyntick_save_progress_counter,
1927 &isidle, &maxj);
1928 rcu_sysidle_report_gp(rsp, isidle, maxj);
1929 fqs_state = RCU_FORCE_QS;
1930 } else {
1931
1932 isidle = true;
1933 force_qs_rnp(rsp, rcu_implicit_dynticks_qs, &isidle, &maxj);
1934 }
1935
1936 if (READ_ONCE(rsp->gp_flags) & RCU_GP_FLAG_FQS) {
1937 raw_spin_lock_irq(&rnp->lock);
1938 smp_mb__after_unlock_lock();
1939 WRITE_ONCE(rsp->gp_flags,
1940 READ_ONCE(rsp->gp_flags) & ~RCU_GP_FLAG_FQS);
1941 raw_spin_unlock_irq(&rnp->lock);
1942 }
1943 return fqs_state;
1944}
1945
1946
1947
1948
1949static void rcu_gp_cleanup(struct rcu_state *rsp)
1950{
1951 unsigned long gp_duration;
1952 bool needgp = false;
1953 int nocb = 0;
1954 struct rcu_data *rdp;
1955 struct rcu_node *rnp = rcu_get_root(rsp);
1956
1957 WRITE_ONCE(rsp->gp_activity, jiffies);
1958 raw_spin_lock_irq(&rnp->lock);
1959 smp_mb__after_unlock_lock();
1960 gp_duration = jiffies - rsp->gp_start;
1961 if (gp_duration > rsp->gp_max)
1962 rsp->gp_max = gp_duration;
1963
1964
1965
1966
1967
1968
1969
1970
1971
1972 raw_spin_unlock_irq(&rnp->lock);
1973
1974
1975
1976
1977
1978
1979
1980
1981
1982
1983 rcu_for_each_node_breadth_first(rsp, rnp) {
1984 raw_spin_lock_irq(&rnp->lock);
1985 smp_mb__after_unlock_lock();
1986 WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp));
1987 WARN_ON_ONCE(rnp->qsmask);
1988 WRITE_ONCE(rnp->completed, rsp->gpnum);
1989 rdp = this_cpu_ptr(rsp->rda);
1990 if (rnp == rdp->mynode)
1991 needgp = __note_gp_changes(rsp, rnp, rdp) || needgp;
1992
1993 nocb += rcu_future_gp_cleanup(rsp, rnp);
1994 raw_spin_unlock_irq(&rnp->lock);
1995 cond_resched_rcu_qs();
1996 WRITE_ONCE(rsp->gp_activity, jiffies);
1997 rcu_gp_slow(rsp, gp_cleanup_delay);
1998 }
1999 rnp = rcu_get_root(rsp);
2000 raw_spin_lock_irq(&rnp->lock);
2001 smp_mb__after_unlock_lock();
2002 rcu_nocb_gp_set(rnp, nocb);
2003
2004
2005 WRITE_ONCE(rsp->completed, rsp->gpnum);
2006 trace_rcu_grace_period(rsp->name, rsp->completed, TPS("end"));
2007 rsp->fqs_state = RCU_GP_IDLE;
2008 rdp = this_cpu_ptr(rsp->rda);
2009
2010 needgp = rcu_advance_cbs(rsp, rnp, rdp) || needgp;
2011 if (needgp || cpu_needs_another_gp(rsp, rdp)) {
2012 WRITE_ONCE(rsp->gp_flags, RCU_GP_FLAG_INIT);
2013 trace_rcu_grace_period(rsp->name,
2014 READ_ONCE(rsp->gpnum),
2015 TPS("newreq"));
2016 }
2017 raw_spin_unlock_irq(&rnp->lock);
2018}
2019
2020
2021
2022
2023static int __noreturn rcu_gp_kthread(void *arg)
2024{
2025 int fqs_state;
2026 int gf;
2027 unsigned long j;
2028 int ret;
2029 struct rcu_state *rsp = arg;
2030 struct rcu_node *rnp = rcu_get_root(rsp);
2031
2032 rcu_bind_gp_kthread();
2033 for (;;) {
2034
2035
2036 for (;;) {
2037 trace_rcu_grace_period(rsp->name,
2038 READ_ONCE(rsp->gpnum),
2039 TPS("reqwait"));
2040 rsp->gp_state = RCU_GP_WAIT_GPS;
2041 wait_event_interruptible(rsp->gp_wq,
2042 READ_ONCE(rsp->gp_flags) &
2043 RCU_GP_FLAG_INIT);
2044
2045 if (rcu_gp_init(rsp))
2046 break;
2047 cond_resched_rcu_qs();
2048 WRITE_ONCE(rsp->gp_activity, jiffies);
2049 WARN_ON(signal_pending(current));
2050 trace_rcu_grace_period(rsp->name,
2051 READ_ONCE(rsp->gpnum),
2052 TPS("reqwaitsig"));
2053 }
2054
2055
2056 fqs_state = RCU_SAVE_DYNTICK;
2057 j = jiffies_till_first_fqs;
2058 if (j > HZ) {
2059 j = HZ;
2060 jiffies_till_first_fqs = HZ;
2061 }
2062 ret = 0;
2063 for (;;) {
2064 if (!ret)
2065 rsp->jiffies_force_qs = jiffies + j;
2066 trace_rcu_grace_period(rsp->name,
2067 READ_ONCE(rsp->gpnum),
2068 TPS("fqswait"));
2069 rsp->gp_state = RCU_GP_WAIT_FQS;
2070 ret = wait_event_interruptible_timeout(rsp->gp_wq,
2071 ((gf = READ_ONCE(rsp->gp_flags)) &
2072 RCU_GP_FLAG_FQS) ||
2073 (!READ_ONCE(rnp->qsmask) &&
2074 !rcu_preempt_blocked_readers_cgp(rnp)),
2075 j);
2076
2077
2078 if (!READ_ONCE(rnp->qsmask) &&
2079 !rcu_preempt_blocked_readers_cgp(rnp))
2080 break;
2081
2082 if (ULONG_CMP_GE(jiffies, rsp->jiffies_force_qs) ||
2083 (gf & RCU_GP_FLAG_FQS)) {
2084 trace_rcu_grace_period(rsp->name,
2085 READ_ONCE(rsp->gpnum),
2086 TPS("fqsstart"));
2087 fqs_state = rcu_gp_fqs(rsp, fqs_state);
2088 trace_rcu_grace_period(rsp->name,
2089 READ_ONCE(rsp->gpnum),
2090 TPS("fqsend"));
2091 cond_resched_rcu_qs();
2092 WRITE_ONCE(rsp->gp_activity, jiffies);
2093 } else {
2094
2095 cond_resched_rcu_qs();
2096 WRITE_ONCE(rsp->gp_activity, jiffies);
2097 WARN_ON(signal_pending(current));
2098 trace_rcu_grace_period(rsp->name,
2099 READ_ONCE(rsp->gpnum),
2100 TPS("fqswaitsig"));
2101 }
2102 j = jiffies_till_next_fqs;
2103 if (j > HZ) {
2104 j = HZ;
2105 jiffies_till_next_fqs = HZ;
2106 } else if (j < 1) {
2107 j = 1;
2108 jiffies_till_next_fqs = 1;
2109 }
2110 }
2111
2112
2113 rcu_gp_cleanup(rsp);
2114 }
2115}
2116
2117
2118
2119
2120
2121
2122
2123
2124
2125
2126
2127
2128static bool
2129rcu_start_gp_advanced(struct rcu_state *rsp, struct rcu_node *rnp,
2130 struct rcu_data *rdp)
2131{
2132 if (!rsp->gp_kthread || !cpu_needs_another_gp(rsp, rdp)) {
2133
2134
2135
2136
2137
2138
2139 return false;
2140 }
2141 WRITE_ONCE(rsp->gp_flags, RCU_GP_FLAG_INIT);
2142 trace_rcu_grace_period(rsp->name, READ_ONCE(rsp->gpnum),
2143 TPS("newreq"));
2144
2145
2146
2147
2148
2149
2150 return true;
2151}
2152
2153
2154
2155
2156
2157
2158
2159
2160
2161
2162static bool rcu_start_gp(struct rcu_state *rsp)
2163{
2164 struct rcu_data *rdp = this_cpu_ptr(rsp->rda);
2165 struct rcu_node *rnp = rcu_get_root(rsp);
2166 bool ret = false;
2167
2168
2169
2170
2171
2172
2173
2174
2175
2176 ret = rcu_advance_cbs(rsp, rnp, rdp) || ret;
2177 ret = rcu_start_gp_advanced(rsp, rnp, rdp) || ret;
2178 return ret;
2179}
2180
2181
2182
2183
2184
2185
2186
2187
2188static void rcu_report_qs_rsp(struct rcu_state *rsp, unsigned long flags)
2189 __releases(rcu_get_root(rsp)->lock)
2190{
2191 WARN_ON_ONCE(!rcu_gp_in_progress(rsp));
2192 WRITE_ONCE(rsp->gp_flags, READ_ONCE(rsp->gp_flags) | RCU_GP_FLAG_FQS);
2193 raw_spin_unlock_irqrestore(&rcu_get_root(rsp)->lock, flags);
2194 rcu_gp_kthread_wake(rsp);
2195}
2196
2197
2198
2199
2200
2201
2202
2203
2204
2205
2206
2207static void
2208rcu_report_qs_rnp(unsigned long mask, struct rcu_state *rsp,
2209 struct rcu_node *rnp, unsigned long gps, unsigned long flags)
2210 __releases(rnp->lock)
2211{
2212 unsigned long oldmask = 0;
2213 struct rcu_node *rnp_c;
2214
2215
2216 for (;;) {
2217 if (!(rnp->qsmask & mask) || rnp->gpnum != gps) {
2218
2219
2220
2221
2222
2223 raw_spin_unlock_irqrestore(&rnp->lock, flags);
2224 return;
2225 }
2226 WARN_ON_ONCE(oldmask);
2227 rnp->qsmask &= ~mask;
2228 trace_rcu_quiescent_state_report(rsp->name, rnp->gpnum,
2229 mask, rnp->qsmask, rnp->level,
2230 rnp->grplo, rnp->grphi,
2231 !!rnp->gp_tasks);
2232 if (rnp->qsmask != 0 || rcu_preempt_blocked_readers_cgp(rnp)) {
2233
2234
2235 raw_spin_unlock_irqrestore(&rnp->lock, flags);
2236 return;
2237 }
2238 mask = rnp->grpmask;
2239 if (rnp->parent == NULL) {
2240
2241
2242
2243 break;
2244 }
2245 raw_spin_unlock_irqrestore(&rnp->lock, flags);
2246 rnp_c = rnp;
2247 rnp = rnp->parent;
2248 raw_spin_lock_irqsave(&rnp->lock, flags);
2249 smp_mb__after_unlock_lock();
2250 oldmask = rnp_c->qsmask;
2251 }
2252
2253
2254
2255
2256
2257
2258 rcu_report_qs_rsp(rsp, flags);
2259}
2260
2261
2262
2263
2264
2265
2266
2267
2268static void rcu_report_unblock_qs_rnp(struct rcu_state *rsp,
2269 struct rcu_node *rnp, unsigned long flags)
2270 __releases(rnp->lock)
2271{
2272 unsigned long gps;
2273 unsigned long mask;
2274 struct rcu_node *rnp_p;
2275
2276 if (rcu_state_p == &rcu_sched_state || rsp != rcu_state_p ||
2277 rnp->qsmask != 0 || rcu_preempt_blocked_readers_cgp(rnp)) {
2278 raw_spin_unlock_irqrestore(&rnp->lock, flags);
2279 return;
2280 }
2281
2282 rnp_p = rnp->parent;
2283 if (rnp_p == NULL) {
2284
2285
2286
2287
2288 rcu_report_qs_rsp(rsp, flags);
2289 return;
2290 }
2291
2292
2293 gps = rnp->gpnum;
2294 mask = rnp->grpmask;
2295 raw_spin_unlock(&rnp->lock);
2296 raw_spin_lock(&rnp_p->lock);
2297 smp_mb__after_unlock_lock();
2298 rcu_report_qs_rnp(mask, rsp, rnp_p, gps, flags);
2299}
2300
2301
2302
2303
2304
2305
2306
2307
2308
2309
2310static void
2311rcu_report_qs_rdp(int cpu, struct rcu_state *rsp, struct rcu_data *rdp)
2312{
2313 unsigned long flags;
2314 unsigned long mask;
2315 bool needwake;
2316 struct rcu_node *rnp;
2317
2318 rnp = rdp->mynode;
2319 raw_spin_lock_irqsave(&rnp->lock, flags);
2320 smp_mb__after_unlock_lock();
2321 if ((rdp->passed_quiesce == 0 &&
2322 rdp->rcu_qs_ctr_snap == __this_cpu_read(rcu_qs_ctr)) ||
2323 rdp->gpnum != rnp->gpnum || rnp->completed == rnp->gpnum ||
2324 rdp->gpwrap) {
2325
2326
2327
2328
2329
2330
2331
2332 rdp->passed_quiesce = 0;
2333 rdp->rcu_qs_ctr_snap = __this_cpu_read(rcu_qs_ctr);
2334 raw_spin_unlock_irqrestore(&rnp->lock, flags);
2335 return;
2336 }
2337 mask = rdp->grpmask;
2338 if ((rnp->qsmask & mask) == 0) {
2339 raw_spin_unlock_irqrestore(&rnp->lock, flags);
2340 } else {
2341 rdp->qs_pending = 0;
2342
2343
2344
2345
2346
2347 needwake = rcu_accelerate_cbs(rsp, rnp, rdp);
2348
2349 rcu_report_qs_rnp(mask, rsp, rnp, rnp->gpnum, flags);
2350
2351 if (needwake)
2352 rcu_gp_kthread_wake(rsp);
2353 }
2354}
2355
2356
2357
2358
2359
2360
2361
2362static void
2363rcu_check_quiescent_state(struct rcu_state *rsp, struct rcu_data *rdp)
2364{
2365
2366 note_gp_changes(rsp, rdp);
2367
2368
2369
2370
2371
2372 if (!rdp->qs_pending)
2373 return;
2374
2375
2376
2377
2378
2379 if (!rdp->passed_quiesce &&
2380 rdp->rcu_qs_ctr_snap == __this_cpu_read(rcu_qs_ctr))
2381 return;
2382
2383
2384
2385
2386
2387 rcu_report_qs_rdp(rdp->cpu, rsp, rdp);
2388}
2389
2390
2391
2392
2393
2394
2395static void
2396rcu_send_cbs_to_orphanage(int cpu, struct rcu_state *rsp,
2397 struct rcu_node *rnp, struct rcu_data *rdp)
2398{
2399
2400 if (!IS_ENABLED(CONFIG_HOTPLUG_CPU) || rcu_is_nocb_cpu(rdp->cpu))
2401 return;
2402
2403
2404
2405
2406
2407
2408 if (rdp->nxtlist != NULL) {
2409 rsp->qlen_lazy += rdp->qlen_lazy;
2410 rsp->qlen += rdp->qlen;
2411 rdp->n_cbs_orphaned += rdp->qlen;
2412 rdp->qlen_lazy = 0;
2413 WRITE_ONCE(rdp->qlen, 0);
2414 }
2415
2416
2417
2418
2419
2420
2421
2422
2423
2424
2425 if (*rdp->nxttail[RCU_DONE_TAIL] != NULL) {
2426 *rsp->orphan_nxttail = *rdp->nxttail[RCU_DONE_TAIL];
2427 rsp->orphan_nxttail = rdp->nxttail[RCU_NEXT_TAIL];
2428 *rdp->nxttail[RCU_DONE_TAIL] = NULL;
2429 }
2430
2431
2432
2433
2434
2435
2436 if (rdp->nxtlist != NULL) {
2437 *rsp->orphan_donetail = rdp->nxtlist;
2438 rsp->orphan_donetail = rdp->nxttail[RCU_DONE_TAIL];
2439 }
2440
2441
2442
2443
2444
2445 init_callback_list(rdp);
2446 rdp->nxttail[RCU_NEXT_TAIL] = NULL;
2447}
2448
2449
2450
2451
2452
2453static void rcu_adopt_orphan_cbs(struct rcu_state *rsp, unsigned long flags)
2454{
2455 int i;
2456 struct rcu_data *rdp = raw_cpu_ptr(rsp->rda);
2457
2458
2459 if (!IS_ENABLED(CONFIG_HOTPLUG_CPU) ||
2460 rcu_nocb_adopt_orphan_cbs(rsp, rdp, flags))
2461 return;
2462
2463
2464 rdp->qlen_lazy += rsp->qlen_lazy;
2465 rdp->qlen += rsp->qlen;
2466 rdp->n_cbs_adopted += rsp->qlen;
2467 if (rsp->qlen_lazy != rsp->qlen)
2468 rcu_idle_count_callbacks_posted();
2469 rsp->qlen_lazy = 0;
2470 rsp->qlen = 0;
2471
2472
2473
2474
2475
2476
2477
2478
2479 if (rsp->orphan_donelist != NULL) {
2480 *rsp->orphan_donetail = *rdp->nxttail[RCU_DONE_TAIL];
2481 *rdp->nxttail[RCU_DONE_TAIL] = rsp->orphan_donelist;
2482 for (i = RCU_NEXT_SIZE - 1; i >= RCU_DONE_TAIL; i--)
2483 if (rdp->nxttail[i] == rdp->nxttail[RCU_DONE_TAIL])
2484 rdp->nxttail[i] = rsp->orphan_donetail;
2485 rsp->orphan_donelist = NULL;
2486 rsp->orphan_donetail = &rsp->orphan_donelist;
2487 }
2488
2489
2490 if (rsp->orphan_nxtlist != NULL) {
2491 *rdp->nxttail[RCU_NEXT_TAIL] = rsp->orphan_nxtlist;
2492 rdp->nxttail[RCU_NEXT_TAIL] = rsp->orphan_nxttail;
2493 rsp->orphan_nxtlist = NULL;
2494 rsp->orphan_nxttail = &rsp->orphan_nxtlist;
2495 }
2496}
2497
2498
2499
2500
2501static void rcu_cleanup_dying_cpu(struct rcu_state *rsp)
2502{
2503 RCU_TRACE(unsigned long mask);
2504 RCU_TRACE(struct rcu_data *rdp = this_cpu_ptr(rsp->rda));
2505 RCU_TRACE(struct rcu_node *rnp = rdp->mynode);
2506
2507 if (!IS_ENABLED(CONFIG_HOTPLUG_CPU))
2508 return;
2509
2510 RCU_TRACE(mask = rdp->grpmask);
2511 trace_rcu_grace_period(rsp->name,
2512 rnp->gpnum + 1 - !!(rnp->qsmask & mask),
2513 TPS("cpuofl"));
2514}
2515
2516
2517
2518
2519
2520
2521
2522
2523
2524
2525
2526
2527
2528
2529
2530
2531
2532
2533static void rcu_cleanup_dead_rnp(struct rcu_node *rnp_leaf)
2534{
2535 long mask;
2536 struct rcu_node *rnp = rnp_leaf;
2537
2538 if (!IS_ENABLED(CONFIG_HOTPLUG_CPU) ||
2539 rnp->qsmaskinit || rcu_preempt_has_tasks(rnp))
2540 return;
2541 for (;;) {
2542 mask = rnp->grpmask;
2543 rnp = rnp->parent;
2544 if (!rnp)
2545 break;
2546 raw_spin_lock(&rnp->lock);
2547 smp_mb__after_unlock_lock();
2548 rnp->qsmaskinit &= ~mask;
2549 rnp->qsmask &= ~mask;
2550 if (rnp->qsmaskinit) {
2551 raw_spin_unlock(&rnp->lock);
2552 return;
2553 }
2554 raw_spin_unlock(&rnp->lock);
2555 }
2556}
2557
2558
2559
2560
2561
2562
2563static void rcu_cleanup_dying_idle_cpu(int cpu, struct rcu_state *rsp)
2564{
2565 unsigned long flags;
2566 unsigned long mask;
2567 struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
2568 struct rcu_node *rnp = rdp->mynode;
2569
2570 if (!IS_ENABLED(CONFIG_HOTPLUG_CPU))
2571 return;
2572
2573
2574 mask = rdp->grpmask;
2575 raw_spin_lock_irqsave(&rnp->lock, flags);
2576 smp_mb__after_unlock_lock();
2577 rnp->qsmaskinitnext &= ~mask;
2578 raw_spin_unlock_irqrestore(&rnp->lock, flags);
2579}
2580
2581
2582
2583
2584
2585
2586
2587
2588static void rcu_cleanup_dead_cpu(int cpu, struct rcu_state *rsp)
2589{
2590 unsigned long flags;
2591 struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
2592 struct rcu_node *rnp = rdp->mynode;
2593
2594 if (!IS_ENABLED(CONFIG_HOTPLUG_CPU))
2595 return;
2596
2597
2598 rcu_boost_kthread_setaffinity(rnp, -1);
2599
2600
2601 raw_spin_lock_irqsave(&rsp->orphan_lock, flags);
2602 rcu_send_cbs_to_orphanage(cpu, rsp, rnp, rdp);
2603 rcu_adopt_orphan_cbs(rsp, flags);
2604 raw_spin_unlock_irqrestore(&rsp->orphan_lock, flags);
2605
2606 WARN_ONCE(rdp->qlen != 0 || rdp->nxtlist != NULL,
2607 "rcu_cleanup_dead_cpu: Callbacks on offline CPU %d: qlen=%lu, nxtlist=%p\n",
2608 cpu, rdp->qlen, rdp->nxtlist);
2609}
2610
2611
2612
2613
2614
2615static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp)
2616{
2617 unsigned long flags;
2618 struct rcu_head *next, *list, **tail;
2619 long bl, count, count_lazy;
2620 int i;
2621
2622
2623 if (!cpu_has_callbacks_ready_to_invoke(rdp)) {
2624 trace_rcu_batch_start(rsp->name, rdp->qlen_lazy, rdp->qlen, 0);
2625 trace_rcu_batch_end(rsp->name, 0, !!READ_ONCE(rdp->nxtlist),
2626 need_resched(), is_idle_task(current),
2627 rcu_is_callbacks_kthread());
2628 return;
2629 }
2630
2631
2632
2633
2634
2635 local_irq_save(flags);
2636 WARN_ON_ONCE(cpu_is_offline(smp_processor_id()));
2637 bl = rdp->blimit;
2638 trace_rcu_batch_start(rsp->name, rdp->qlen_lazy, rdp->qlen, bl);
2639 list = rdp->nxtlist;
2640 rdp->nxtlist = *rdp->nxttail[RCU_DONE_TAIL];
2641 *rdp->nxttail[RCU_DONE_TAIL] = NULL;
2642 tail = rdp->nxttail[RCU_DONE_TAIL];
2643 for (i = RCU_NEXT_SIZE - 1; i >= 0; i--)
2644 if (rdp->nxttail[i] == rdp->nxttail[RCU_DONE_TAIL])
2645 rdp->nxttail[i] = &rdp->nxtlist;
2646 local_irq_restore(flags);
2647
2648
2649 count = count_lazy = 0;
2650 while (list) {
2651 next = list->next;
2652 prefetch(next);
2653 debug_rcu_head_unqueue(list);
2654 if (__rcu_reclaim(rsp->name, list))
2655 count_lazy++;
2656 list = next;
2657
2658 if (++count >= bl &&
2659 (need_resched() ||
2660 (!is_idle_task(current) && !rcu_is_callbacks_kthread())))
2661 break;
2662 }
2663
2664 local_irq_save(flags);
2665 trace_rcu_batch_end(rsp->name, count, !!list, need_resched(),
2666 is_idle_task(current),
2667 rcu_is_callbacks_kthread());
2668
2669
2670 if (list != NULL) {
2671 *tail = rdp->nxtlist;
2672 rdp->nxtlist = list;
2673 for (i = 0; i < RCU_NEXT_SIZE; i++)
2674 if (&rdp->nxtlist == rdp->nxttail[i])
2675 rdp->nxttail[i] = tail;
2676 else
2677 break;
2678 }
2679 smp_mb();
2680 rdp->qlen_lazy -= count_lazy;
2681 WRITE_ONCE(rdp->qlen, rdp->qlen - count);
2682 rdp->n_cbs_invoked += count;
2683
2684
2685 if (rdp->blimit == LONG_MAX && rdp->qlen <= qlowmark)
2686 rdp->blimit = blimit;
2687
2688
2689 if (rdp->qlen == 0 && rdp->qlen_last_fqs_check != 0) {
2690 rdp->qlen_last_fqs_check = 0;
2691 rdp->n_force_qs_snap = rsp->n_force_qs;
2692 } else if (rdp->qlen < rdp->qlen_last_fqs_check - qhimark)
2693 rdp->qlen_last_fqs_check = rdp->qlen;
2694 WARN_ON_ONCE((rdp->nxtlist == NULL) != (rdp->qlen == 0));
2695
2696 local_irq_restore(flags);
2697
2698
2699 if (cpu_has_callbacks_ready_to_invoke(rdp))
2700 invoke_rcu_core();
2701}
2702
2703
2704
2705
2706
2707
2708
2709
2710
2711
2712void rcu_check_callbacks(int user)
2713{
2714 trace_rcu_utilization(TPS("Start scheduler-tick"));
2715 increment_cpu_stall_ticks();
2716 if (user || rcu_is_cpu_rrupt_from_idle()) {
2717
2718
2719
2720
2721
2722
2723
2724
2725
2726
2727
2728
2729
2730 rcu_sched_qs();
2731 rcu_bh_qs();
2732
2733 } else if (!in_softirq()) {
2734
2735
2736
2737
2738
2739
2740
2741
2742 rcu_bh_qs();
2743 }
2744 rcu_preempt_check_callbacks();
2745 if (rcu_pending())
2746 invoke_rcu_core();
2747 if (user)
2748 rcu_note_voluntary_context_switch(current);
2749 trace_rcu_utilization(TPS("End scheduler-tick"));
2750}
2751
2752
2753
2754
2755
2756
2757
2758
2759static void force_qs_rnp(struct rcu_state *rsp,
2760 int (*f)(struct rcu_data *rsp, bool *isidle,
2761 unsigned long *maxj),
2762 bool *isidle, unsigned long *maxj)
2763{
2764 unsigned long bit;
2765 int cpu;
2766 unsigned long flags;
2767 unsigned long mask;
2768 struct rcu_node *rnp;
2769
2770 rcu_for_each_leaf_node(rsp, rnp) {
2771 cond_resched_rcu_qs();
2772 mask = 0;
2773 raw_spin_lock_irqsave(&rnp->lock, flags);
2774 smp_mb__after_unlock_lock();
2775 if (rnp->qsmask == 0) {
2776 if (rcu_state_p == &rcu_sched_state ||
2777 rsp != rcu_state_p ||
2778 rcu_preempt_blocked_readers_cgp(rnp)) {
2779
2780
2781
2782
2783
2784 rcu_initiate_boost(rnp, flags);
2785
2786 continue;
2787 }
2788 if (rnp->parent &&
2789 (rnp->parent->qsmask & rnp->grpmask)) {
2790
2791
2792
2793
2794
2795 rcu_report_unblock_qs_rnp(rsp, rnp, flags);
2796
2797 continue;
2798 }
2799 }
2800 cpu = rnp->grplo;
2801 bit = 1;
2802 for (; cpu <= rnp->grphi; cpu++, bit <<= 1) {
2803 if ((rnp->qsmask & bit) != 0) {
2804 if (f(per_cpu_ptr(rsp->rda, cpu), isidle, maxj))
2805 mask |= bit;
2806 }
2807 }
2808 if (mask != 0) {
2809
2810 rcu_report_qs_rnp(mask, rsp, rnp, rnp->gpnum, flags);
2811 } else {
2812
2813 raw_spin_unlock_irqrestore(&rnp->lock, flags);
2814 }
2815 }
2816}
2817
2818
2819
2820
2821
2822static void force_quiescent_state(struct rcu_state *rsp)
2823{
2824 unsigned long flags;
2825 bool ret;
2826 struct rcu_node *rnp;
2827 struct rcu_node *rnp_old = NULL;
2828
2829
2830 rnp = __this_cpu_read(rsp->rda->mynode);
2831 for (; rnp != NULL; rnp = rnp->parent) {
2832 ret = (READ_ONCE(rsp->gp_flags) & RCU_GP_FLAG_FQS) ||
2833 !raw_spin_trylock(&rnp->fqslock);
2834 if (rnp_old != NULL)
2835 raw_spin_unlock(&rnp_old->fqslock);
2836 if (ret) {
2837 rsp->n_force_qs_lh++;
2838 return;
2839 }
2840 rnp_old = rnp;
2841 }
2842
2843
2844
2845 raw_spin_lock_irqsave(&rnp_old->lock, flags);
2846 smp_mb__after_unlock_lock();
2847 raw_spin_unlock(&rnp_old->fqslock);
2848 if (READ_ONCE(rsp->gp_flags) & RCU_GP_FLAG_FQS) {
2849 rsp->n_force_qs_lh++;
2850 raw_spin_unlock_irqrestore(&rnp_old->lock, flags);
2851 return;
2852 }
2853 WRITE_ONCE(rsp->gp_flags, READ_ONCE(rsp->gp_flags) | RCU_GP_FLAG_FQS);
2854 raw_spin_unlock_irqrestore(&rnp_old->lock, flags);
2855 rcu_gp_kthread_wake(rsp);
2856}
2857
2858
2859
2860
2861
2862
2863static void
2864__rcu_process_callbacks(struct rcu_state *rsp)
2865{
2866 unsigned long flags;
2867 bool needwake;
2868 struct rcu_data *rdp = raw_cpu_ptr(rsp->rda);
2869
2870 WARN_ON_ONCE(rdp->beenonline == 0);
2871
2872
2873 rcu_check_quiescent_state(rsp, rdp);
2874
2875
2876 local_irq_save(flags);
2877 if (cpu_needs_another_gp(rsp, rdp)) {
2878 raw_spin_lock(&rcu_get_root(rsp)->lock);
2879 needwake = rcu_start_gp(rsp);
2880 raw_spin_unlock_irqrestore(&rcu_get_root(rsp)->lock, flags);
2881 if (needwake)
2882 rcu_gp_kthread_wake(rsp);
2883 } else {
2884 local_irq_restore(flags);
2885 }
2886
2887
2888 if (cpu_has_callbacks_ready_to_invoke(rdp))
2889 invoke_rcu_callbacks(rsp, rdp);
2890
2891
2892 do_nocb_deferred_wakeup(rdp);
2893}
2894
2895
2896
2897
2898static void rcu_process_callbacks(struct softirq_action *unused)
2899{
2900 struct rcu_state *rsp;
2901
2902 if (cpu_is_offline(smp_processor_id()))
2903 return;
2904 trace_rcu_utilization(TPS("Start RCU core"));
2905 for_each_rcu_flavor(rsp)
2906 __rcu_process_callbacks(rsp);
2907 trace_rcu_utilization(TPS("End RCU core"));
2908}
2909
2910
2911
2912
2913
2914
2915
2916
2917static void invoke_rcu_callbacks(struct rcu_state *rsp, struct rcu_data *rdp)
2918{
2919 if (unlikely(!READ_ONCE(rcu_scheduler_fully_active)))
2920 return;
2921 if (likely(!rsp->boost)) {
2922 rcu_do_batch(rsp, rdp);
2923 return;
2924 }
2925 invoke_rcu_callbacks_kthread();
2926}
2927
2928static void invoke_rcu_core(void)
2929{
2930 if (cpu_online(smp_processor_id()))
2931 raise_softirq(RCU_SOFTIRQ);
2932}
2933
2934
2935
2936
2937static void __call_rcu_core(struct rcu_state *rsp, struct rcu_data *rdp,
2938 struct rcu_head *head, unsigned long flags)
2939{
2940 bool needwake;
2941
2942
2943
2944
2945
2946 if (!rcu_is_watching())
2947 invoke_rcu_core();
2948
2949
2950 if (irqs_disabled_flags(flags) || cpu_is_offline(smp_processor_id()))
2951 return;
2952
2953
2954
2955
2956
2957
2958
2959
2960 if (unlikely(rdp->qlen > rdp->qlen_last_fqs_check + qhimark)) {
2961
2962
2963 note_gp_changes(rsp, rdp);
2964
2965
2966 if (!rcu_gp_in_progress(rsp)) {
2967 struct rcu_node *rnp_root = rcu_get_root(rsp);
2968
2969 raw_spin_lock(&rnp_root->lock);
2970 smp_mb__after_unlock_lock();
2971 needwake = rcu_start_gp(rsp);
2972 raw_spin_unlock(&rnp_root->lock);
2973 if (needwake)
2974 rcu_gp_kthread_wake(rsp);
2975 } else {
2976
2977 rdp->blimit = LONG_MAX;
2978 if (rsp->n_force_qs == rdp->n_force_qs_snap &&
2979 *rdp->nxttail[RCU_DONE_TAIL] != head)
2980 force_quiescent_state(rsp);
2981 rdp->n_force_qs_snap = rsp->n_force_qs;
2982 rdp->qlen_last_fqs_check = rdp->qlen;
2983 }
2984 }
2985}
2986
2987
2988
2989
2990static void rcu_leak_callback(struct rcu_head *rhp)
2991{
2992}
2993
2994
2995
2996
2997
2998
2999
3000static void
3001__call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu),
3002 struct rcu_state *rsp, int cpu, bool lazy)
3003{
3004 unsigned long flags;
3005 struct rcu_data *rdp;
3006
3007 WARN_ON_ONCE((unsigned long)head & 0x1);
3008 if (debug_rcu_head_queue(head)) {
3009
3010 WRITE_ONCE(head->func, rcu_leak_callback);
3011 WARN_ONCE(1, "__call_rcu(): Leaked duplicate callback\n");
3012 return;
3013 }
3014 head->func = func;
3015 head->next = NULL;
3016
3017
3018
3019
3020
3021
3022
3023 local_irq_save(flags);
3024 rdp = this_cpu_ptr(rsp->rda);
3025
3026
3027 if (unlikely(rdp->nxttail[RCU_NEXT_TAIL] == NULL) || cpu != -1) {
3028 int offline;
3029
3030 if (cpu != -1)
3031 rdp = per_cpu_ptr(rsp->rda, cpu);
3032 if (likely(rdp->mynode)) {
3033
3034 offline = !__call_rcu_nocb(rdp, head, lazy, flags);
3035 WARN_ON_ONCE(offline);
3036
3037 local_irq_restore(flags);
3038 return;
3039 }
3040
3041
3042
3043
3044 BUG_ON(cpu != -1);
3045 WARN_ON_ONCE(!rcu_is_watching());
3046 if (!likely(rdp->nxtlist))
3047 init_default_callback_list(rdp);
3048 }
3049 WRITE_ONCE(rdp->qlen, rdp->qlen + 1);
3050 if (lazy)
3051 rdp->qlen_lazy++;
3052 else
3053 rcu_idle_count_callbacks_posted();
3054 smp_mb();
3055 *rdp->nxttail[RCU_NEXT_TAIL] = head;
3056 rdp->nxttail[RCU_NEXT_TAIL] = &head->next;
3057
3058 if (__is_kfree_rcu_offset((unsigned long)func))
3059 trace_rcu_kfree_callback(rsp->name, head, (unsigned long)func,
3060 rdp->qlen_lazy, rdp->qlen);
3061 else
3062 trace_rcu_callback(rsp->name, head, rdp->qlen_lazy, rdp->qlen);
3063
3064
3065 __call_rcu_core(rsp, rdp, head, flags);
3066 local_irq_restore(flags);
3067}
3068
3069
3070
3071
3072void call_rcu_sched(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
3073{
3074 __call_rcu(head, func, &rcu_sched_state, -1, 0);
3075}
3076EXPORT_SYMBOL_GPL(call_rcu_sched);
3077
3078
3079
3080
3081void call_rcu_bh(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
3082{
3083 __call_rcu(head, func, &rcu_bh_state, -1, 0);
3084}
3085EXPORT_SYMBOL_GPL(call_rcu_bh);
3086
3087
3088
3089
3090
3091
3092
3093
3094void kfree_call_rcu(struct rcu_head *head,
3095 void (*func)(struct rcu_head *rcu))
3096{
3097 __call_rcu(head, func, rcu_state_p, -1, 1);
3098}
3099EXPORT_SYMBOL_GPL(kfree_call_rcu);
3100
3101
3102
3103
3104
3105
3106
3107
3108
3109
3110static inline int rcu_blocking_is_gp(void)
3111{
3112 int ret;
3113
3114 might_sleep();
3115 preempt_disable();
3116 ret = num_online_cpus() <= 1;
3117 preempt_enable();
3118 return ret;
3119}
3120
3121
3122
3123
3124
3125
3126
3127
3128
3129
3130
3131
3132
3133
3134
3135
3136
3137
3138
3139
3140
3141
3142
3143
3144
3145
3146
3147
3148
3149
3150
3151
3152
3153
3154
3155
3156
3157
3158
3159
3160
3161
3162void synchronize_sched(void)
3163{
3164 rcu_lockdep_assert(!lock_is_held(&rcu_bh_lock_map) &&
3165 !lock_is_held(&rcu_lock_map) &&
3166 !lock_is_held(&rcu_sched_lock_map),
3167 "Illegal synchronize_sched() in RCU-sched read-side critical section");
3168 if (rcu_blocking_is_gp())
3169 return;
3170 if (rcu_gp_is_expedited())
3171 synchronize_sched_expedited();
3172 else
3173 wait_rcu_gp(call_rcu_sched);
3174}
3175EXPORT_SYMBOL_GPL(synchronize_sched);
3176
3177
3178
3179
3180
3181
3182
3183
3184
3185
3186
3187
3188
3189void synchronize_rcu_bh(void)
3190{
3191 rcu_lockdep_assert(!lock_is_held(&rcu_bh_lock_map) &&
3192 !lock_is_held(&rcu_lock_map) &&
3193 !lock_is_held(&rcu_sched_lock_map),
3194 "Illegal synchronize_rcu_bh() in RCU-bh read-side critical section");
3195 if (rcu_blocking_is_gp())
3196 return;
3197 if (rcu_gp_is_expedited())
3198 synchronize_rcu_bh_expedited();
3199 else
3200 wait_rcu_gp(call_rcu_bh);
3201}
3202EXPORT_SYMBOL_GPL(synchronize_rcu_bh);
3203
3204
3205
3206
3207
3208
3209
3210
3211unsigned long get_state_synchronize_rcu(void)
3212{
3213
3214
3215
3216
3217 smp_mb();
3218
3219
3220
3221
3222
3223
3224 return smp_load_acquire(&rcu_state_p->gpnum);
3225}
3226EXPORT_SYMBOL_GPL(get_state_synchronize_rcu);
3227
3228
3229
3230
3231
3232
3233
3234
3235
3236
3237
3238
3239
3240
3241
3242void cond_synchronize_rcu(unsigned long oldstate)
3243{
3244 unsigned long newstate;
3245
3246
3247
3248
3249
3250 newstate = smp_load_acquire(&rcu_state_p->completed);
3251 if (ULONG_CMP_GE(oldstate, newstate))
3252 synchronize_rcu();
3253}
3254EXPORT_SYMBOL_GPL(cond_synchronize_rcu);
3255
3256static int synchronize_sched_expedited_cpu_stop(void *data)
3257{
3258
3259
3260
3261
3262
3263
3264
3265
3266
3267
3268
3269 smp_mb();
3270 return 0;
3271}
3272
3273
3274
3275
3276
3277
3278
3279
3280
3281
3282
3283
3284
3285
3286
3287
3288
3289
3290
3291
3292
3293
3294
3295
3296
3297
3298
3299
3300
3301
3302
3303
3304
3305void synchronize_sched_expedited(void)
3306{
3307 cpumask_var_t cm;
3308 bool cma = false;
3309 int cpu;
3310 long firstsnap, s, snap;
3311 int trycount = 0;
3312 struct rcu_state *rsp = &rcu_sched_state;
3313
3314
3315
3316
3317
3318
3319
3320
3321
3322 if (ULONG_CMP_GE((ulong)atomic_long_read(&rsp->expedited_start),
3323 (ulong)atomic_long_read(&rsp->expedited_done) +
3324 ULONG_MAX / 8)) {
3325 wait_rcu_gp(call_rcu_sched);
3326 atomic_long_inc(&rsp->expedited_wrap);
3327 return;
3328 }
3329
3330
3331
3332
3333
3334 snap = atomic_long_inc_return(&rsp->expedited_start);
3335 firstsnap = snap;
3336 if (!try_get_online_cpus()) {
3337
3338 wait_rcu_gp(call_rcu_sched);
3339 atomic_long_inc(&rsp->expedited_normal);
3340 return;
3341 }
3342 WARN_ON_ONCE(cpu_is_offline(raw_smp_processor_id()));
3343
3344
3345 cma = zalloc_cpumask_var(&cm, GFP_KERNEL);
3346 if (cma) {
3347 cpumask_copy(cm, cpu_online_mask);
3348 cpumask_clear_cpu(raw_smp_processor_id(), cm);
3349 for_each_cpu(cpu, cm) {
3350 struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu);
3351
3352 if (!(atomic_add_return(0, &rdtp->dynticks) & 0x1))
3353 cpumask_clear_cpu(cpu, cm);
3354 }
3355 if (cpumask_weight(cm) == 0)
3356 goto all_cpus_idle;
3357 }
3358
3359
3360
3361
3362
3363 while (try_stop_cpus(cma ? cm : cpu_online_mask,
3364 synchronize_sched_expedited_cpu_stop,
3365 NULL) == -EAGAIN) {
3366 put_online_cpus();
3367 atomic_long_inc(&rsp->expedited_tryfail);
3368
3369
3370 s = atomic_long_read(&rsp->expedited_done);
3371 if (ULONG_CMP_GE((ulong)s, (ulong)firstsnap)) {
3372
3373 smp_mb__before_atomic();
3374 atomic_long_inc(&rsp->expedited_workdone1);
3375 free_cpumask_var(cm);
3376 return;
3377 }
3378
3379
3380 if (trycount++ < 10) {
3381 udelay(trycount * num_online_cpus());
3382 } else {
3383 wait_rcu_gp(call_rcu_sched);
3384 atomic_long_inc(&rsp->expedited_normal);
3385 free_cpumask_var(cm);
3386 return;
3387 }
3388
3389
3390 s = atomic_long_read(&rsp->expedited_done);
3391 if (ULONG_CMP_GE((ulong)s, (ulong)firstsnap)) {
3392
3393 smp_mb__before_atomic();
3394 atomic_long_inc(&rsp->expedited_workdone2);
3395 free_cpumask_var(cm);
3396 return;
3397 }
3398
3399
3400
3401
3402
3403
3404
3405
3406 if (!try_get_online_cpus()) {
3407
3408 wait_rcu_gp(call_rcu_sched);
3409 atomic_long_inc(&rsp->expedited_normal);
3410 free_cpumask_var(cm);
3411 return;
3412 }
3413 snap = atomic_long_read(&rsp->expedited_start);
3414 smp_mb();
3415 }
3416 atomic_long_inc(&rsp->expedited_stoppedcpus);
3417
3418all_cpus_idle:
3419 free_cpumask_var(cm);
3420
3421
3422
3423
3424
3425
3426
3427 do {
3428 atomic_long_inc(&rsp->expedited_done_tries);
3429 s = atomic_long_read(&rsp->expedited_done);
3430 if (ULONG_CMP_GE((ulong)s, (ulong)snap)) {
3431
3432 smp_mb__before_atomic();
3433 atomic_long_inc(&rsp->expedited_done_lost);
3434 break;
3435 }
3436 } while (atomic_long_cmpxchg(&rsp->expedited_done, s, snap) != s);
3437 atomic_long_inc(&rsp->expedited_done_exit);
3438
3439 put_online_cpus();
3440}
3441EXPORT_SYMBOL_GPL(synchronize_sched_expedited);
3442
3443
3444
3445
3446
3447
3448
3449
3450static int __rcu_pending(struct rcu_state *rsp, struct rcu_data *rdp)
3451{
3452 struct rcu_node *rnp = rdp->mynode;
3453
3454 rdp->n_rcu_pending++;
3455
3456
3457 check_cpu_stall(rsp, rdp);
3458
3459
3460 if (rcu_nohz_full_cpu(rsp))
3461 return 0;
3462
3463
3464 if (rcu_scheduler_fully_active &&
3465 rdp->qs_pending && !rdp->passed_quiesce &&
3466 rdp->rcu_qs_ctr_snap == __this_cpu_read(rcu_qs_ctr)) {
3467 rdp->n_rp_qs_pending++;
3468 } else if (rdp->qs_pending &&
3469 (rdp->passed_quiesce ||
3470 rdp->rcu_qs_ctr_snap != __this_cpu_read(rcu_qs_ctr))) {
3471 rdp->n_rp_report_qs++;
3472 return 1;
3473 }
3474
3475
3476 if (cpu_has_callbacks_ready_to_invoke(rdp)) {
3477 rdp->n_rp_cb_ready++;
3478 return 1;
3479 }
3480
3481
3482 if (cpu_needs_another_gp(rsp, rdp)) {
3483 rdp->n_rp_cpu_needs_gp++;
3484 return 1;
3485 }
3486
3487
3488 if (READ_ONCE(rnp->completed) != rdp->completed) {
3489 rdp->n_rp_gp_completed++;
3490 return 1;
3491 }
3492
3493
3494 if (READ_ONCE(rnp->gpnum) != rdp->gpnum ||
3495 unlikely(READ_ONCE(rdp->gpwrap))) {
3496 rdp->n_rp_gp_started++;
3497 return 1;
3498 }
3499
3500
3501 if (rcu_nocb_need_deferred_wakeup(rdp)) {
3502 rdp->n_rp_nocb_defer_wakeup++;
3503 return 1;
3504 }
3505
3506
3507 rdp->n_rp_need_nothing++;
3508 return 0;
3509}
3510
3511
3512
3513
3514
3515
3516static int rcu_pending(void)
3517{
3518 struct rcu_state *rsp;
3519
3520 for_each_rcu_flavor(rsp)
3521 if (__rcu_pending(rsp, this_cpu_ptr(rsp->rda)))
3522 return 1;
3523 return 0;
3524}
3525
3526
3527
3528
3529
3530
3531static bool __maybe_unused rcu_cpu_has_callbacks(bool *all_lazy)
3532{
3533 bool al = true;
3534 bool hc = false;
3535 struct rcu_data *rdp;
3536 struct rcu_state *rsp;
3537
3538 for_each_rcu_flavor(rsp) {
3539 rdp = this_cpu_ptr(rsp->rda);
3540 if (!rdp->nxtlist)
3541 continue;
3542 hc = true;
3543 if (rdp->qlen != rdp->qlen_lazy || !all_lazy) {
3544 al = false;
3545 break;
3546 }
3547 }
3548 if (all_lazy)
3549 *all_lazy = al;
3550 return hc;
3551}
3552
3553
3554
3555
3556
3557static void _rcu_barrier_trace(struct rcu_state *rsp, const char *s,
3558 int cpu, unsigned long done)
3559{
3560 trace_rcu_barrier(rsp->name, s, cpu,
3561 atomic_read(&rsp->barrier_cpu_count), done);
3562}
3563
3564
3565
3566
3567
3568static void rcu_barrier_callback(struct rcu_head *rhp)
3569{
3570 struct rcu_data *rdp = container_of(rhp, struct rcu_data, barrier_head);
3571 struct rcu_state *rsp = rdp->rsp;
3572
3573 if (atomic_dec_and_test(&rsp->barrier_cpu_count)) {
3574 _rcu_barrier_trace(rsp, "LastCB", -1, rsp->n_barrier_done);
3575 complete(&rsp->barrier_completion);
3576 } else {
3577 _rcu_barrier_trace(rsp, "CB", -1, rsp->n_barrier_done);
3578 }
3579}
3580
3581
3582
3583
3584static void rcu_barrier_func(void *type)
3585{
3586 struct rcu_state *rsp = type;
3587 struct rcu_data *rdp = raw_cpu_ptr(rsp->rda);
3588
3589 _rcu_barrier_trace(rsp, "IRQ", -1, rsp->n_barrier_done);
3590 atomic_inc(&rsp->barrier_cpu_count);
3591 rsp->call(&rdp->barrier_head, rcu_barrier_callback);
3592}
3593
3594
3595
3596
3597
3598static void _rcu_barrier(struct rcu_state *rsp)
3599{
3600 int cpu;
3601 struct rcu_data *rdp;
3602 unsigned long snap = READ_ONCE(rsp->n_barrier_done);
3603 unsigned long snap_done;
3604
3605 _rcu_barrier_trace(rsp, "Begin", -1, snap);
3606
3607
3608 mutex_lock(&rsp->barrier_mutex);
3609
3610
3611
3612
3613
3614 smp_mb();
3615
3616
3617
3618
3619
3620
3621
3622 snap_done = rsp->n_barrier_done;
3623 _rcu_barrier_trace(rsp, "Check", -1, snap_done);
3624
3625
3626
3627
3628
3629
3630
3631
3632
3633
3634
3635 if (ULONG_CMP_GE(snap_done, (snap + 3) & ~0x1)) {
3636 _rcu_barrier_trace(rsp, "EarlyExit", -1, snap_done);
3637 smp_mb();
3638 mutex_unlock(&rsp->barrier_mutex);
3639 return;
3640 }
3641
3642
3643
3644
3645
3646
3647 WRITE_ONCE(rsp->n_barrier_done, rsp->n_barrier_done + 1);
3648 WARN_ON_ONCE((rsp->n_barrier_done & 0x1) != 1);
3649 _rcu_barrier_trace(rsp, "Inc1", -1, rsp->n_barrier_done);
3650 smp_mb();
3651
3652
3653
3654
3655
3656
3657
3658 init_completion(&rsp->barrier_completion);
3659 atomic_set(&rsp->barrier_cpu_count, 1);
3660 get_online_cpus();
3661
3662
3663
3664
3665
3666
3667 for_each_possible_cpu(cpu) {
3668 if (!cpu_online(cpu) && !rcu_is_nocb_cpu(cpu))
3669 continue;
3670 rdp = per_cpu_ptr(rsp->rda, cpu);
3671 if (rcu_is_nocb_cpu(cpu)) {
3672 if (!rcu_nocb_cpu_needs_barrier(rsp, cpu)) {
3673 _rcu_barrier_trace(rsp, "OfflineNoCB", cpu,
3674 rsp->n_barrier_done);
3675 } else {
3676 _rcu_barrier_trace(rsp, "OnlineNoCB", cpu,
3677 rsp->n_barrier_done);
3678 smp_mb__before_atomic();
3679 atomic_inc(&rsp->barrier_cpu_count);
3680 __call_rcu(&rdp->barrier_head,
3681 rcu_barrier_callback, rsp, cpu, 0);
3682 }
3683 } else if (READ_ONCE(rdp->qlen)) {
3684 _rcu_barrier_trace(rsp, "OnlineQ", cpu,
3685 rsp->n_barrier_done);
3686 smp_call_function_single(cpu, rcu_barrier_func, rsp, 1);
3687 } else {
3688 _rcu_barrier_trace(rsp, "OnlineNQ", cpu,
3689 rsp->n_barrier_done);
3690 }
3691 }
3692 put_online_cpus();
3693
3694
3695
3696
3697
3698 if (atomic_dec_and_test(&rsp->barrier_cpu_count))
3699 complete(&rsp->barrier_completion);
3700
3701
3702 smp_mb();
3703 WRITE_ONCE(rsp->n_barrier_done, rsp->n_barrier_done + 1);
3704 WARN_ON_ONCE((rsp->n_barrier_done & 0x1) != 0);
3705 _rcu_barrier_trace(rsp, "Inc2", -1, rsp->n_barrier_done);
3706 smp_mb();
3707
3708
3709 wait_for_completion(&rsp->barrier_completion);
3710
3711
3712 mutex_unlock(&rsp->barrier_mutex);
3713}
3714
3715
3716
3717
3718void rcu_barrier_bh(void)
3719{
3720 _rcu_barrier(&rcu_bh_state);
3721}
3722EXPORT_SYMBOL_GPL(rcu_barrier_bh);
3723
3724
3725
3726
3727void rcu_barrier_sched(void)
3728{
3729 _rcu_barrier(&rcu_sched_state);
3730}
3731EXPORT_SYMBOL_GPL(rcu_barrier_sched);
3732
3733
3734
3735
3736
3737
3738
3739static void rcu_init_new_rnp(struct rcu_node *rnp_leaf)
3740{
3741 long mask;
3742 struct rcu_node *rnp = rnp_leaf;
3743
3744 for (;;) {
3745 mask = rnp->grpmask;
3746 rnp = rnp->parent;
3747 if (rnp == NULL)
3748 return;
3749 raw_spin_lock(&rnp->lock);
3750 rnp->qsmaskinit |= mask;
3751 raw_spin_unlock(&rnp->lock);
3752 }
3753}
3754
3755
3756
3757
3758static void __init
3759rcu_boot_init_percpu_data(int cpu, struct rcu_state *rsp)
3760{
3761 unsigned long flags;
3762 struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
3763 struct rcu_node *rnp = rcu_get_root(rsp);
3764
3765
3766 raw_spin_lock_irqsave(&rnp->lock, flags);
3767 rdp->grpmask = 1UL << (cpu - rdp->mynode->grplo);
3768 rdp->dynticks = &per_cpu(rcu_dynticks, cpu);
3769 WARN_ON_ONCE(rdp->dynticks->dynticks_nesting != DYNTICK_TASK_EXIT_IDLE);
3770 WARN_ON_ONCE(atomic_read(&rdp->dynticks->dynticks) != 1);
3771 rdp->cpu = cpu;
3772 rdp->rsp = rsp;
3773 rcu_boot_init_nocb_percpu_data(rdp);
3774 raw_spin_unlock_irqrestore(&rnp->lock, flags);
3775}
3776
3777
3778
3779
3780
3781
3782
3783static void
3784rcu_init_percpu_data(int cpu, struct rcu_state *rsp)
3785{
3786 unsigned long flags;
3787 unsigned long mask;
3788 struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
3789 struct rcu_node *rnp = rcu_get_root(rsp);
3790
3791
3792 raw_spin_lock_irqsave(&rnp->lock, flags);
3793 rdp->beenonline = 1;
3794 rdp->qlen_last_fqs_check = 0;
3795 rdp->n_force_qs_snap = rsp->n_force_qs;
3796 rdp->blimit = blimit;
3797 if (!rdp->nxtlist)
3798 init_callback_list(rdp);
3799 rdp->dynticks->dynticks_nesting = DYNTICK_TASK_EXIT_IDLE;
3800 rcu_sysidle_init_percpu_data(rdp->dynticks);
3801 atomic_set(&rdp->dynticks->dynticks,
3802 (atomic_read(&rdp->dynticks->dynticks) & ~0x1) + 1);
3803 raw_spin_unlock(&rnp->lock);
3804
3805
3806
3807
3808
3809
3810 rnp = rdp->mynode;
3811 mask = rdp->grpmask;
3812 raw_spin_lock(&rnp->lock);
3813 smp_mb__after_unlock_lock();
3814 rnp->qsmaskinitnext |= mask;
3815 rdp->gpnum = rnp->completed;
3816 rdp->completed = rnp->completed;
3817 rdp->passed_quiesce = false;
3818 rdp->rcu_qs_ctr_snap = per_cpu(rcu_qs_ctr, cpu);
3819 rdp->qs_pending = false;
3820 trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("cpuonl"));
3821 raw_spin_unlock_irqrestore(&rnp->lock, flags);
3822}
3823
3824static void rcu_prepare_cpu(int cpu)
3825{
3826 struct rcu_state *rsp;
3827
3828 for_each_rcu_flavor(rsp)
3829 rcu_init_percpu_data(cpu, rsp);
3830}
3831
3832
3833
3834
3835int rcu_cpu_notify(struct notifier_block *self,
3836 unsigned long action, void *hcpu)
3837{
3838 long cpu = (long)hcpu;
3839 struct rcu_data *rdp = per_cpu_ptr(rcu_state_p->rda, cpu);
3840 struct rcu_node *rnp = rdp->mynode;
3841 struct rcu_state *rsp;
3842
3843 switch (action) {
3844 case CPU_UP_PREPARE:
3845 case CPU_UP_PREPARE_FROZEN:
3846 rcu_prepare_cpu(cpu);
3847 rcu_prepare_kthreads(cpu);
3848 rcu_spawn_all_nocb_kthreads(cpu);
3849 break;
3850 case CPU_ONLINE:
3851 case CPU_DOWN_FAILED:
3852 rcu_boost_kthread_setaffinity(rnp, -1);
3853 break;
3854 case CPU_DOWN_PREPARE:
3855 rcu_boost_kthread_setaffinity(rnp, cpu);
3856 break;
3857 case CPU_DYING:
3858 case CPU_DYING_FROZEN:
3859 for_each_rcu_flavor(rsp)
3860 rcu_cleanup_dying_cpu(rsp);
3861 break;
3862 case CPU_DYING_IDLE:
3863 for_each_rcu_flavor(rsp) {
3864 rcu_cleanup_dying_idle_cpu(cpu, rsp);
3865 }
3866 break;
3867 case CPU_DEAD:
3868 case CPU_DEAD_FROZEN:
3869 case CPU_UP_CANCELED:
3870 case CPU_UP_CANCELED_FROZEN:
3871 for_each_rcu_flavor(rsp) {
3872 rcu_cleanup_dead_cpu(cpu, rsp);
3873 do_nocb_deferred_wakeup(per_cpu_ptr(rsp->rda, cpu));
3874 }
3875 break;
3876 default:
3877 break;
3878 }
3879 return NOTIFY_OK;
3880}
3881
3882static int rcu_pm_notify(struct notifier_block *self,
3883 unsigned long action, void *hcpu)
3884{
3885 switch (action) {
3886 case PM_HIBERNATION_PREPARE:
3887 case PM_SUSPEND_PREPARE:
3888 if (nr_cpu_ids <= 256)
3889 rcu_expedite_gp();
3890 break;
3891 case PM_POST_HIBERNATION:
3892 case PM_POST_SUSPEND:
3893 if (nr_cpu_ids <= 256)
3894 rcu_unexpedite_gp();
3895 break;
3896 default:
3897 break;
3898 }
3899 return NOTIFY_OK;
3900}
3901
3902
3903
3904
3905static int __init rcu_spawn_gp_kthread(void)
3906{
3907 unsigned long flags;
3908 int kthread_prio_in = kthread_prio;
3909 struct rcu_node *rnp;
3910 struct rcu_state *rsp;
3911 struct sched_param sp;
3912 struct task_struct *t;
3913
3914
3915 if (IS_ENABLED(CONFIG_RCU_BOOST) && kthread_prio < 1)
3916 kthread_prio = 1;
3917 else if (kthread_prio < 0)
3918 kthread_prio = 0;
3919 else if (kthread_prio > 99)
3920 kthread_prio = 99;
3921 if (kthread_prio != kthread_prio_in)
3922 pr_alert("rcu_spawn_gp_kthread(): Limited prio to %d from %d\n",
3923 kthread_prio, kthread_prio_in);
3924
3925 rcu_scheduler_fully_active = 1;
3926 for_each_rcu_flavor(rsp) {
3927 t = kthread_create(rcu_gp_kthread, rsp, "%s", rsp->name);
3928 BUG_ON(IS_ERR(t));
3929 rnp = rcu_get_root(rsp);
3930 raw_spin_lock_irqsave(&rnp->lock, flags);
3931 rsp->gp_kthread = t;
3932 if (kthread_prio) {
3933 sp.sched_priority = kthread_prio;
3934 sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
3935 }
3936 wake_up_process(t);
3937 raw_spin_unlock_irqrestore(&rnp->lock, flags);
3938 }
3939 rcu_spawn_nocb_kthreads();
3940 rcu_spawn_boost_kthreads();
3941 return 0;
3942}
3943early_initcall(rcu_spawn_gp_kthread);
3944
3945
3946
3947
3948
3949
3950
3951
3952
3953void rcu_scheduler_starting(void)
3954{
3955 WARN_ON(num_online_cpus() != 1);
3956 WARN_ON(nr_context_switches() > 0);
3957 rcu_scheduler_active = 1;
3958}
3959
3960
3961
3962
3963
3964static void __init rcu_init_levelspread(struct rcu_state *rsp)
3965{
3966 int i;
3967
3968 if (rcu_fanout_exact) {
3969 rsp->levelspread[rcu_num_lvls - 1] = rcu_fanout_leaf;
3970 for (i = rcu_num_lvls - 2; i >= 0; i--)
3971 rsp->levelspread[i] = RCU_FANOUT;
3972 } else {
3973 int ccur;
3974 int cprv;
3975
3976 cprv = nr_cpu_ids;
3977 for (i = rcu_num_lvls - 1; i >= 0; i--) {
3978 ccur = rsp->levelcnt[i];
3979 rsp->levelspread[i] = (cprv + ccur - 1) / ccur;
3980 cprv = ccur;
3981 }
3982 }
3983}
3984
3985
3986
3987
3988static void __init rcu_init_one(struct rcu_state *rsp,
3989 struct rcu_data __percpu *rda)
3990{
3991 static const char * const buf[] = {
3992 "rcu_node_0",
3993 "rcu_node_1",
3994 "rcu_node_2",
3995 "rcu_node_3" };
3996 static const char * const fqs[] = {
3997 "rcu_node_fqs_0",
3998 "rcu_node_fqs_1",
3999 "rcu_node_fqs_2",
4000 "rcu_node_fqs_3" };
4001 static u8 fl_mask = 0x1;
4002 int cpustride = 1;
4003 int i;
4004 int j;
4005 struct rcu_node *rnp;
4006
4007 BUILD_BUG_ON(MAX_RCU_LVLS > ARRAY_SIZE(buf));
4008
4009
4010 if (rcu_num_lvls <= 0 || rcu_num_lvls > RCU_NUM_LVLS)
4011 panic("rcu_init_one: rcu_num_lvls out of range");
4012
4013
4014
4015 for (i = 0; i < rcu_num_lvls; i++)
4016 rsp->levelcnt[i] = num_rcu_lvl[i];
4017 for (i = 1; i < rcu_num_lvls; i++)
4018 rsp->level[i] = rsp->level[i - 1] + rsp->levelcnt[i - 1];
4019 rcu_init_levelspread(rsp);
4020 rsp->flavor_mask = fl_mask;
4021 fl_mask <<= 1;
4022
4023
4024
4025 for (i = rcu_num_lvls - 1; i >= 0; i--) {
4026 cpustride *= rsp->levelspread[i];
4027 rnp = rsp->level[i];
4028 for (j = 0; j < rsp->levelcnt[i]; j++, rnp++) {
4029 raw_spin_lock_init(&rnp->lock);
4030 lockdep_set_class_and_name(&rnp->lock,
4031 &rcu_node_class[i], buf[i]);
4032 raw_spin_lock_init(&rnp->fqslock);
4033 lockdep_set_class_and_name(&rnp->fqslock,
4034 &rcu_fqs_class[i], fqs[i]);
4035 rnp->gpnum = rsp->gpnum;
4036 rnp->completed = rsp->completed;
4037 rnp->qsmask = 0;
4038 rnp->qsmaskinit = 0;
4039 rnp->grplo = j * cpustride;
4040 rnp->grphi = (j + 1) * cpustride - 1;
4041 if (rnp->grphi >= nr_cpu_ids)
4042 rnp->grphi = nr_cpu_ids - 1;
4043 if (i == 0) {
4044 rnp->grpnum = 0;
4045 rnp->grpmask = 0;
4046 rnp->parent = NULL;
4047 } else {
4048 rnp->grpnum = j % rsp->levelspread[i - 1];
4049 rnp->grpmask = 1UL << rnp->grpnum;
4050 rnp->parent = rsp->level[i - 1] +
4051 j / rsp->levelspread[i - 1];
4052 }
4053 rnp->level = i;
4054 INIT_LIST_HEAD(&rnp->blkd_tasks);
4055 rcu_init_one_nocb(rnp);
4056 }
4057 }
4058
4059 init_waitqueue_head(&rsp->gp_wq);
4060 rnp = rsp->level[rcu_num_lvls - 1];
4061 for_each_possible_cpu(i) {
4062 while (i > rnp->grphi)
4063 rnp++;
4064 per_cpu_ptr(rsp->rda, i)->mynode = rnp;
4065 rcu_boot_init_percpu_data(i, rsp);
4066 }
4067 list_add(&rsp->flavors, &rcu_struct_flavors);
4068}
4069
4070
4071
4072
4073
4074
4075static void __init rcu_init_geometry(void)
4076{
4077 ulong d;
4078 int i;
4079 int j;
4080 int n = nr_cpu_ids;
4081 int rcu_capacity[MAX_RCU_LVLS + 1];
4082
4083
4084
4085
4086
4087
4088
4089
4090 d = RCU_JIFFIES_TILL_FORCE_QS + nr_cpu_ids / RCU_JIFFIES_FQS_DIV;
4091 if (jiffies_till_first_fqs == ULONG_MAX)
4092 jiffies_till_first_fqs = d;
4093 if (jiffies_till_next_fqs == ULONG_MAX)
4094 jiffies_till_next_fqs = d;
4095
4096
4097 if (rcu_fanout_leaf == RCU_FANOUT_LEAF &&
4098 nr_cpu_ids == NR_CPUS)
4099 return;
4100 pr_info("RCU: Adjusting geometry for rcu_fanout_leaf=%d, nr_cpu_ids=%d\n",
4101 rcu_fanout_leaf, nr_cpu_ids);
4102
4103
4104
4105
4106
4107
4108 rcu_capacity[0] = 1;
4109 rcu_capacity[1] = rcu_fanout_leaf;
4110 for (i = 2; i <= MAX_RCU_LVLS; i++)
4111 rcu_capacity[i] = rcu_capacity[i - 1] * RCU_FANOUT;
4112
4113
4114
4115
4116
4117
4118
4119
4120
4121 if (rcu_fanout_leaf < RCU_FANOUT_LEAF ||
4122 rcu_fanout_leaf > sizeof(unsigned long) * 8 ||
4123 n > rcu_capacity[MAX_RCU_LVLS]) {
4124 WARN_ON(1);
4125 return;
4126 }
4127
4128
4129 for (i = 1; i <= MAX_RCU_LVLS; i++)
4130 if (n <= rcu_capacity[i]) {
4131 for (j = 0; j <= i; j++)
4132 num_rcu_lvl[j] =
4133 DIV_ROUND_UP(n, rcu_capacity[i - j]);
4134 rcu_num_lvls = i;
4135 for (j = i + 1; j <= MAX_RCU_LVLS; j++)
4136 num_rcu_lvl[j] = 0;
4137 break;
4138 }
4139
4140
4141 rcu_num_nodes = 0;
4142 for (i = 0; i <= MAX_RCU_LVLS; i++)
4143 rcu_num_nodes += num_rcu_lvl[i];
4144 rcu_num_nodes -= n;
4145}
4146
4147
4148
4149
4150
4151static void __init rcu_dump_rcu_node_tree(struct rcu_state *rsp)
4152{
4153 int level = 0;
4154 struct rcu_node *rnp;
4155
4156 pr_info("rcu_node tree layout dump\n");
4157 pr_info(" ");
4158 rcu_for_each_node_breadth_first(rsp, rnp) {
4159 if (rnp->level != level) {
4160 pr_cont("\n");
4161 pr_info(" ");
4162 level = rnp->level;
4163 }
4164 pr_cont("%d:%d ^%d ", rnp->grplo, rnp->grphi, rnp->grpnum);
4165 }
4166 pr_cont("\n");
4167}
4168
4169void __init rcu_init(void)
4170{
4171 int cpu;
4172
4173 rcu_early_boot_tests();
4174
4175 rcu_bootup_announce();
4176 rcu_init_geometry();
4177 rcu_init_one(&rcu_bh_state, &rcu_bh_data);
4178 rcu_init_one(&rcu_sched_state, &rcu_sched_data);
4179 if (dump_tree)
4180 rcu_dump_rcu_node_tree(&rcu_sched_state);
4181 __rcu_init_preempt();
4182 open_softirq(RCU_SOFTIRQ, rcu_process_callbacks);
4183
4184
4185
4186
4187
4188
4189 cpu_notifier(rcu_cpu_notify, 0);
4190 pm_notifier(rcu_pm_notify, 0);
4191 for_each_online_cpu(cpu)
4192 rcu_cpu_notify(NULL, CPU_UP_PREPARE, (void *)(long)cpu);
4193}
4194
4195#include "tree_plugin.h"
4196