1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27#include <linux/delay.h>
28#include <linux/gfp.h>
29#include <linux/oom.h>
30#include <linux/smpboot.h>
31#include "time/tick-internal.h"
32
33#define RCU_KTHREAD_PRIO 1
34
35#ifdef CONFIG_RCU_BOOST
36#define RCU_BOOST_PRIO CONFIG_RCU_BOOST_PRIO
37#else
38#define RCU_BOOST_PRIO RCU_KTHREAD_PRIO
39#endif
40
41#ifdef CONFIG_RCU_NOCB_CPU
42static cpumask_var_t rcu_nocb_mask;
43static bool have_rcu_nocb_mask;
44static bool __read_mostly rcu_nocb_poll;
45static char __initdata nocb_buf[NR_CPUS * 5];
46#endif
47
48
49
50
51
52
53static void __init rcu_bootup_announce_oddness(void)
54{
55#ifdef CONFIG_RCU_TRACE
56 pr_info("\tRCU debugfs-based tracing is enabled.\n");
57#endif
58#if (defined(CONFIG_64BIT) && CONFIG_RCU_FANOUT != 64) || (!defined(CONFIG_64BIT) && CONFIG_RCU_FANOUT != 32)
59 pr_info("\tCONFIG_RCU_FANOUT set to non-default value of %d\n",
60 CONFIG_RCU_FANOUT);
61#endif
62#ifdef CONFIG_RCU_FANOUT_EXACT
63 pr_info("\tHierarchical RCU autobalancing is disabled.\n");
64#endif
65#ifdef CONFIG_RCU_FAST_NO_HZ
66 pr_info("\tRCU dyntick-idle grace-period acceleration is enabled.\n");
67#endif
68#ifdef CONFIG_PROVE_RCU
69 pr_info("\tRCU lockdep checking is enabled.\n");
70#endif
71#ifdef CONFIG_RCU_TORTURE_TEST_RUNNABLE
72 pr_info("\tRCU torture testing starts during boot.\n");
73#endif
74#if defined(CONFIG_TREE_PREEMPT_RCU) && !defined(CONFIG_RCU_CPU_STALL_VERBOSE)
75 pr_info("\tDump stacks of tasks blocking RCU-preempt GP.\n");
76#endif
77#if defined(CONFIG_RCU_CPU_STALL_INFO)
78 pr_info("\tAdditional per-CPU info printed with stalls.\n");
79#endif
80#if NUM_RCU_LVL_4 != 0
81 pr_info("\tFour-level hierarchy is enabled.\n");
82#endif
83 if (rcu_fanout_leaf != CONFIG_RCU_FANOUT_LEAF)
84 pr_info("\tBoot-time adjustment of leaf fanout to %d.\n", rcu_fanout_leaf);
85 if (nr_cpu_ids != NR_CPUS)
86 pr_info("\tRCU restricting CPUs from NR_CPUS=%d to nr_cpu_ids=%d.\n", NR_CPUS, nr_cpu_ids);
87#ifdef CONFIG_RCU_NOCB_CPU
88#ifndef CONFIG_RCU_NOCB_CPU_NONE
89 if (!have_rcu_nocb_mask) {
90 zalloc_cpumask_var(&rcu_nocb_mask, GFP_KERNEL);
91 have_rcu_nocb_mask = true;
92 }
93#ifdef CONFIG_RCU_NOCB_CPU_ZERO
94 pr_info("\tOffload RCU callbacks from CPU 0\n");
95 cpumask_set_cpu(0, rcu_nocb_mask);
96#endif
97#ifdef CONFIG_RCU_NOCB_CPU_ALL
98 pr_info("\tOffload RCU callbacks from all CPUs\n");
99 cpumask_setall(rcu_nocb_mask);
100#endif
101#endif
102 if (have_rcu_nocb_mask) {
103 cpulist_scnprintf(nocb_buf, sizeof(nocb_buf), rcu_nocb_mask);
104 pr_info("\tOffload RCU callbacks from CPUs: %s.\n", nocb_buf);
105 if (rcu_nocb_poll)
106 pr_info("\tPoll for callbacks from no-CBs CPUs.\n");
107 }
108#endif
109}
110
111#ifdef CONFIG_TREE_PREEMPT_RCU
112
113RCU_STATE_INITIALIZER(rcu_preempt, 'p', call_rcu);
114static struct rcu_state *rcu_state = &rcu_preempt_state;
115
116static int rcu_preempted_readers_exp(struct rcu_node *rnp);
117
118
119
120
121static void __init rcu_bootup_announce(void)
122{
123 pr_info("Preemptible hierarchical RCU implementation.\n");
124 rcu_bootup_announce_oddness();
125}
126
127
128
129
130
131long rcu_batches_completed_preempt(void)
132{
133 return rcu_preempt_state.completed;
134}
135EXPORT_SYMBOL_GPL(rcu_batches_completed_preempt);
136
137
138
139
140long rcu_batches_completed(void)
141{
142 return rcu_batches_completed_preempt();
143}
144EXPORT_SYMBOL_GPL(rcu_batches_completed);
145
146
147
148
149void rcu_force_quiescent_state(void)
150{
151 force_quiescent_state(&rcu_preempt_state);
152}
153EXPORT_SYMBOL_GPL(rcu_force_quiescent_state);
154
155
156
157
158
159
160
161
162
163
164
165static void rcu_preempt_qs(int cpu)
166{
167 struct rcu_data *rdp = &per_cpu(rcu_preempt_data, cpu);
168
169 if (rdp->passed_quiesce == 0)
170 trace_rcu_grace_period(TPS("rcu_preempt"), rdp->gpnum, TPS("cpuqs"));
171 rdp->passed_quiesce = 1;
172 current->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_NEED_QS;
173}
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188static void rcu_preempt_note_context_switch(int cpu)
189{
190 struct task_struct *t = current;
191 unsigned long flags;
192 struct rcu_data *rdp;
193 struct rcu_node *rnp;
194
195 if (t->rcu_read_lock_nesting > 0 &&
196 (t->rcu_read_unlock_special & RCU_READ_UNLOCK_BLOCKED) == 0) {
197
198
199 rdp = per_cpu_ptr(rcu_preempt_state.rda, cpu);
200 rnp = rdp->mynode;
201 raw_spin_lock_irqsave(&rnp->lock, flags);
202 t->rcu_read_unlock_special |= RCU_READ_UNLOCK_BLOCKED;
203 t->rcu_blocked_node = rnp;
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223 WARN_ON_ONCE((rdp->grpmask & rnp->qsmaskinit) == 0);
224 WARN_ON_ONCE(!list_empty(&t->rcu_node_entry));
225 if ((rnp->qsmask & rdp->grpmask) && rnp->gp_tasks != NULL) {
226 list_add(&t->rcu_node_entry, rnp->gp_tasks->prev);
227 rnp->gp_tasks = &t->rcu_node_entry;
228#ifdef CONFIG_RCU_BOOST
229 if (rnp->boost_tasks != NULL)
230 rnp->boost_tasks = rnp->gp_tasks;
231#endif
232 } else {
233 list_add(&t->rcu_node_entry, &rnp->blkd_tasks);
234 if (rnp->qsmask & rdp->grpmask)
235 rnp->gp_tasks = &t->rcu_node_entry;
236 }
237 trace_rcu_preempt_task(rdp->rsp->name,
238 t->pid,
239 (rnp->qsmask & rdp->grpmask)
240 ? rnp->gpnum
241 : rnp->gpnum + 1);
242 raw_spin_unlock_irqrestore(&rnp->lock, flags);
243 } else if (t->rcu_read_lock_nesting < 0 &&
244 t->rcu_read_unlock_special) {
245
246
247
248
249
250 rcu_read_unlock_special(t);
251 }
252
253
254
255
256
257
258
259
260
261
262 local_irq_save(flags);
263 rcu_preempt_qs(cpu);
264 local_irq_restore(flags);
265}
266
267
268
269
270
271
272static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp)
273{
274 return rnp->gp_tasks != NULL;
275}
276
277
278
279
280
281
282
283
284static void rcu_report_unblock_qs_rnp(struct rcu_node *rnp, unsigned long flags)
285 __releases(rnp->lock)
286{
287 unsigned long mask;
288 struct rcu_node *rnp_p;
289
290 if (rnp->qsmask != 0 || rcu_preempt_blocked_readers_cgp(rnp)) {
291 raw_spin_unlock_irqrestore(&rnp->lock, flags);
292 return;
293 }
294
295 rnp_p = rnp->parent;
296 if (rnp_p == NULL) {
297
298
299
300
301
302 rcu_report_qs_rsp(&rcu_preempt_state, flags);
303 return;
304 }
305
306
307 mask = rnp->grpmask;
308 raw_spin_unlock(&rnp->lock);
309 raw_spin_lock(&rnp_p->lock);
310 rcu_report_qs_rnp(mask, &rcu_preempt_state, rnp_p, flags);
311}
312
313
314
315
316
317static struct list_head *rcu_next_node_entry(struct task_struct *t,
318 struct rcu_node *rnp)
319{
320 struct list_head *np;
321
322 np = t->rcu_node_entry.next;
323 if (np == &rnp->blkd_tasks)
324 np = NULL;
325 return np;
326}
327
328
329
330
331
332
333void rcu_read_unlock_special(struct task_struct *t)
334{
335 int empty;
336 int empty_exp;
337 int empty_exp_now;
338 unsigned long flags;
339 struct list_head *np;
340#ifdef CONFIG_RCU_BOOST
341 struct rt_mutex *rbmp = NULL;
342#endif
343 struct rcu_node *rnp;
344 int special;
345
346
347 if (in_nmi())
348 return;
349
350 local_irq_save(flags);
351
352
353
354
355
356 special = t->rcu_read_unlock_special;
357 if (special & RCU_READ_UNLOCK_NEED_QS) {
358 rcu_preempt_qs(smp_processor_id());
359 }
360
361
362 if (in_irq() || in_serving_softirq()) {
363 local_irq_restore(flags);
364 return;
365 }
366
367
368 if (special & RCU_READ_UNLOCK_BLOCKED) {
369 t->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_BLOCKED;
370
371
372
373
374
375
376 for (;;) {
377 rnp = t->rcu_blocked_node;
378 raw_spin_lock(&rnp->lock);
379 if (rnp == t->rcu_blocked_node)
380 break;
381 raw_spin_unlock(&rnp->lock);
382 }
383 empty = !rcu_preempt_blocked_readers_cgp(rnp);
384 empty_exp = !rcu_preempted_readers_exp(rnp);
385 smp_mb();
386 np = rcu_next_node_entry(t, rnp);
387 list_del_init(&t->rcu_node_entry);
388 t->rcu_blocked_node = NULL;
389 trace_rcu_unlock_preempted_task(TPS("rcu_preempt"),
390 rnp->gpnum, t->pid);
391 if (&t->rcu_node_entry == rnp->gp_tasks)
392 rnp->gp_tasks = np;
393 if (&t->rcu_node_entry == rnp->exp_tasks)
394 rnp->exp_tasks = np;
395#ifdef CONFIG_RCU_BOOST
396 if (&t->rcu_node_entry == rnp->boost_tasks)
397 rnp->boost_tasks = np;
398
399 if (t->rcu_boost_mutex) {
400 rbmp = t->rcu_boost_mutex;
401 t->rcu_boost_mutex = NULL;
402 }
403#endif
404
405
406
407
408
409
410
411 empty_exp_now = !rcu_preempted_readers_exp(rnp);
412 if (!empty && !rcu_preempt_blocked_readers_cgp(rnp)) {
413 trace_rcu_quiescent_state_report(TPS("preempt_rcu"),
414 rnp->gpnum,
415 0, rnp->qsmask,
416 rnp->level,
417 rnp->grplo,
418 rnp->grphi,
419 !!rnp->gp_tasks);
420 rcu_report_unblock_qs_rnp(rnp, flags);
421 } else {
422 raw_spin_unlock_irqrestore(&rnp->lock, flags);
423 }
424
425#ifdef CONFIG_RCU_BOOST
426
427 if (rbmp)
428 rt_mutex_unlock(rbmp);
429#endif
430
431
432
433
434
435 if (!empty_exp && empty_exp_now)
436 rcu_report_exp_rnp(&rcu_preempt_state, rnp, true);
437 } else {
438 local_irq_restore(flags);
439 }
440}
441
442#ifdef CONFIG_RCU_CPU_STALL_VERBOSE
443
444
445
446
447
448static void rcu_print_detail_task_stall_rnp(struct rcu_node *rnp)
449{
450 unsigned long flags;
451 struct task_struct *t;
452
453 raw_spin_lock_irqsave(&rnp->lock, flags);
454 if (!rcu_preempt_blocked_readers_cgp(rnp)) {
455 raw_spin_unlock_irqrestore(&rnp->lock, flags);
456 return;
457 }
458 t = list_entry(rnp->gp_tasks,
459 struct task_struct, rcu_node_entry);
460 list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry)
461 sched_show_task(t);
462 raw_spin_unlock_irqrestore(&rnp->lock, flags);
463}
464
465
466
467
468
469static void rcu_print_detail_task_stall(struct rcu_state *rsp)
470{
471 struct rcu_node *rnp = rcu_get_root(rsp);
472
473 rcu_print_detail_task_stall_rnp(rnp);
474 rcu_for_each_leaf_node(rsp, rnp)
475 rcu_print_detail_task_stall_rnp(rnp);
476}
477
478#else
479
480static void rcu_print_detail_task_stall(struct rcu_state *rsp)
481{
482}
483
484#endif
485
486#ifdef CONFIG_RCU_CPU_STALL_INFO
487
488static void rcu_print_task_stall_begin(struct rcu_node *rnp)
489{
490 pr_err("\tTasks blocked on level-%d rcu_node (CPUs %d-%d):",
491 rnp->level, rnp->grplo, rnp->grphi);
492}
493
494static void rcu_print_task_stall_end(void)
495{
496 pr_cont("\n");
497}
498
499#else
500
501static void rcu_print_task_stall_begin(struct rcu_node *rnp)
502{
503}
504
505static void rcu_print_task_stall_end(void)
506{
507}
508
509#endif
510
511
512
513
514
515static int rcu_print_task_stall(struct rcu_node *rnp)
516{
517 struct task_struct *t;
518 int ndetected = 0;
519
520 if (!rcu_preempt_blocked_readers_cgp(rnp))
521 return 0;
522 rcu_print_task_stall_begin(rnp);
523 t = list_entry(rnp->gp_tasks,
524 struct task_struct, rcu_node_entry);
525 list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry) {
526 pr_cont(" P%d", t->pid);
527 ndetected++;
528 }
529 rcu_print_task_stall_end();
530 return ndetected;
531}
532
533
534
535
536
537
538
539
540
541
542
543static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp)
544{
545 WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp));
546 if (!list_empty(&rnp->blkd_tasks))
547 rnp->gp_tasks = rnp->blkd_tasks.next;
548 WARN_ON_ONCE(rnp->qsmask);
549}
550
551#ifdef CONFIG_HOTPLUG_CPU
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567static int rcu_preempt_offline_tasks(struct rcu_state *rsp,
568 struct rcu_node *rnp,
569 struct rcu_data *rdp)
570{
571 struct list_head *lp;
572 struct list_head *lp_root;
573 int retval = 0;
574 struct rcu_node *rnp_root = rcu_get_root(rsp);
575 struct task_struct *t;
576
577 if (rnp == rnp_root) {
578 WARN_ONCE(1, "Last CPU thought to be offlined?");
579 return 0;
580 }
581
582
583 WARN_ON_ONCE(rnp != rdp->mynode);
584
585
586
587
588
589
590
591
592
593
594 if (rcu_preempt_blocked_readers_cgp(rnp) && rnp->qsmask == 0)
595 retval |= RCU_OFL_TASKS_NORM_GP;
596 if (rcu_preempted_readers_exp(rnp))
597 retval |= RCU_OFL_TASKS_EXP_GP;
598 lp = &rnp->blkd_tasks;
599 lp_root = &rnp_root->blkd_tasks;
600 while (!list_empty(lp)) {
601 t = list_entry(lp->next, typeof(*t), rcu_node_entry);
602 raw_spin_lock(&rnp_root->lock);
603 list_del(&t->rcu_node_entry);
604 t->rcu_blocked_node = rnp_root;
605 list_add(&t->rcu_node_entry, lp_root);
606 if (&t->rcu_node_entry == rnp->gp_tasks)
607 rnp_root->gp_tasks = rnp->gp_tasks;
608 if (&t->rcu_node_entry == rnp->exp_tasks)
609 rnp_root->exp_tasks = rnp->exp_tasks;
610#ifdef CONFIG_RCU_BOOST
611 if (&t->rcu_node_entry == rnp->boost_tasks)
612 rnp_root->boost_tasks = rnp->boost_tasks;
613#endif
614 raw_spin_unlock(&rnp_root->lock);
615 }
616
617 rnp->gp_tasks = NULL;
618 rnp->exp_tasks = NULL;
619#ifdef CONFIG_RCU_BOOST
620 rnp->boost_tasks = NULL;
621
622
623
624
625
626 raw_spin_lock(&rnp_root->lock);
627 if (rnp_root->boost_tasks != NULL &&
628 rnp_root->boost_tasks != rnp_root->gp_tasks &&
629 rnp_root->boost_tasks != rnp_root->exp_tasks)
630 rnp_root->boost_tasks = rnp_root->gp_tasks;
631 raw_spin_unlock(&rnp_root->lock);
632#endif
633
634 return retval;
635}
636
637#endif
638
639
640
641
642
643
644
645
646static void rcu_preempt_check_callbacks(int cpu)
647{
648 struct task_struct *t = current;
649
650 if (t->rcu_read_lock_nesting == 0) {
651 rcu_preempt_qs(cpu);
652 return;
653 }
654 if (t->rcu_read_lock_nesting > 0 &&
655 per_cpu(rcu_preempt_data, cpu).qs_pending)
656 t->rcu_read_unlock_special |= RCU_READ_UNLOCK_NEED_QS;
657}
658
659#ifdef CONFIG_RCU_BOOST
660
661static void rcu_preempt_do_callbacks(void)
662{
663 rcu_do_batch(&rcu_preempt_state, &__get_cpu_var(rcu_preempt_data));
664}
665
666#endif
667
668
669
670
671void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
672{
673 __call_rcu(head, func, &rcu_preempt_state, -1, 0);
674}
675EXPORT_SYMBOL_GPL(call_rcu);
676
677
678
679
680
681
682
683
684void kfree_call_rcu(struct rcu_head *head,
685 void (*func)(struct rcu_head *rcu))
686{
687 __call_rcu(head, func, &rcu_preempt_state, -1, 1);
688}
689EXPORT_SYMBOL_GPL(kfree_call_rcu);
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705void synchronize_rcu(void)
706{
707 rcu_lockdep_assert(!lock_is_held(&rcu_bh_lock_map) &&
708 !lock_is_held(&rcu_lock_map) &&
709 !lock_is_held(&rcu_sched_lock_map),
710 "Illegal synchronize_rcu() in RCU read-side critical section");
711 if (!rcu_scheduler_active)
712 return;
713 if (rcu_expedited)
714 synchronize_rcu_expedited();
715 else
716 wait_rcu_gp(call_rcu);
717}
718EXPORT_SYMBOL_GPL(synchronize_rcu);
719
720static DECLARE_WAIT_QUEUE_HEAD(sync_rcu_preempt_exp_wq);
721static unsigned long sync_rcu_preempt_exp_count;
722static DEFINE_MUTEX(sync_rcu_preempt_exp_mutex);
723
724
725
726
727
728
729
730static int rcu_preempted_readers_exp(struct rcu_node *rnp)
731{
732 return rnp->exp_tasks != NULL;
733}
734
735
736
737
738
739
740
741
742
743
744static int sync_rcu_preempt_exp_done(struct rcu_node *rnp)
745{
746 return !rcu_preempted_readers_exp(rnp) &&
747 ACCESS_ONCE(rnp->expmask) == 0;
748}
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp,
764 bool wake)
765{
766 unsigned long flags;
767 unsigned long mask;
768
769 raw_spin_lock_irqsave(&rnp->lock, flags);
770 for (;;) {
771 if (!sync_rcu_preempt_exp_done(rnp)) {
772 raw_spin_unlock_irqrestore(&rnp->lock, flags);
773 break;
774 }
775 if (rnp->parent == NULL) {
776 raw_spin_unlock_irqrestore(&rnp->lock, flags);
777 if (wake)
778 wake_up(&sync_rcu_preempt_exp_wq);
779 break;
780 }
781 mask = rnp->grpmask;
782 raw_spin_unlock(&rnp->lock);
783 rnp = rnp->parent;
784 raw_spin_lock(&rnp->lock);
785 rnp->expmask &= ~mask;
786 }
787}
788
789
790
791
792
793
794
795
796
797static void
798sync_rcu_preempt_exp_init(struct rcu_state *rsp, struct rcu_node *rnp)
799{
800 unsigned long flags;
801 int must_wait = 0;
802
803 raw_spin_lock_irqsave(&rnp->lock, flags);
804 if (list_empty(&rnp->blkd_tasks)) {
805 raw_spin_unlock_irqrestore(&rnp->lock, flags);
806 } else {
807 rnp->exp_tasks = rnp->blkd_tasks.next;
808 rcu_initiate_boost(rnp, flags);
809 must_wait = 1;
810 }
811 if (!must_wait)
812 rcu_report_exp_rnp(rsp, rnp, false);
813}
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832void synchronize_rcu_expedited(void)
833{
834 unsigned long flags;
835 struct rcu_node *rnp;
836 struct rcu_state *rsp = &rcu_preempt_state;
837 unsigned long snap;
838 int trycount = 0;
839
840 smp_mb();
841 snap = ACCESS_ONCE(sync_rcu_preempt_exp_count) + 1;
842 smp_mb();
843
844
845
846
847
848
849
850
851
852 get_online_cpus();
853
854
855
856
857
858
859 while (!mutex_trylock(&sync_rcu_preempt_exp_mutex)) {
860 if (ULONG_CMP_LT(snap,
861 ACCESS_ONCE(sync_rcu_preempt_exp_count))) {
862 put_online_cpus();
863 goto mb_ret;
864 }
865 if (trycount++ < 10) {
866 udelay(trycount * num_online_cpus());
867 } else {
868 put_online_cpus();
869 wait_rcu_gp(call_rcu);
870 return;
871 }
872 }
873 if (ULONG_CMP_LT(snap, ACCESS_ONCE(sync_rcu_preempt_exp_count))) {
874 put_online_cpus();
875 goto unlock_mb_ret;
876 }
877
878
879 synchronize_sched_expedited();
880
881
882 rcu_for_each_nonleaf_node_breadth_first(rsp, rnp) {
883 raw_spin_lock_irqsave(&rnp->lock, flags);
884 rnp->expmask = rnp->qsmaskinit;
885 raw_spin_unlock_irqrestore(&rnp->lock, flags);
886 }
887
888
889 rcu_for_each_leaf_node(rsp, rnp)
890 sync_rcu_preempt_exp_init(rsp, rnp);
891 if (NUM_RCU_NODES > 1)
892 sync_rcu_preempt_exp_init(rsp, rcu_get_root(rsp));
893
894 put_online_cpus();
895
896
897 rnp = rcu_get_root(rsp);
898 wait_event(sync_rcu_preempt_exp_wq,
899 sync_rcu_preempt_exp_done(rnp));
900
901
902 smp_mb();
903 ACCESS_ONCE(sync_rcu_preempt_exp_count)++;
904unlock_mb_ret:
905 mutex_unlock(&sync_rcu_preempt_exp_mutex);
906mb_ret:
907 smp_mb();
908}
909EXPORT_SYMBOL_GPL(synchronize_rcu_expedited);
910
911
912
913
914
915
916
917
918
919void rcu_barrier(void)
920{
921 _rcu_barrier(&rcu_preempt_state);
922}
923EXPORT_SYMBOL_GPL(rcu_barrier);
924
925
926
927
928static void __init __rcu_init_preempt(void)
929{
930 rcu_init_one(&rcu_preempt_state, &rcu_preempt_data);
931}
932
933
934
935
936
937
938
939void exit_rcu(void)
940{
941 struct task_struct *t = current;
942
943 if (likely(list_empty(¤t->rcu_node_entry)))
944 return;
945 t->rcu_read_lock_nesting = 1;
946 barrier();
947 t->rcu_read_unlock_special = RCU_READ_UNLOCK_BLOCKED;
948 __rcu_read_unlock();
949}
950
951#else
952
953static struct rcu_state *rcu_state = &rcu_sched_state;
954
955
956
957
958static void __init rcu_bootup_announce(void)
959{
960 pr_info("Hierarchical RCU implementation.\n");
961 rcu_bootup_announce_oddness();
962}
963
964
965
966
967long rcu_batches_completed(void)
968{
969 return rcu_batches_completed_sched();
970}
971EXPORT_SYMBOL_GPL(rcu_batches_completed);
972
973
974
975
976
977void rcu_force_quiescent_state(void)
978{
979 rcu_sched_force_quiescent_state();
980}
981EXPORT_SYMBOL_GPL(rcu_force_quiescent_state);
982
983
984
985
986
987static void rcu_preempt_note_context_switch(int cpu)
988{
989}
990
991
992
993
994
995static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp)
996{
997 return 0;
998}
999
1000#ifdef CONFIG_HOTPLUG_CPU
1001
1002
1003static void rcu_report_unblock_qs_rnp(struct rcu_node *rnp, unsigned long flags)
1004{
1005 raw_spin_unlock_irqrestore(&rnp->lock, flags);
1006}
1007
1008#endif
1009
1010
1011
1012
1013
1014static void rcu_print_detail_task_stall(struct rcu_state *rsp)
1015{
1016}
1017
1018
1019
1020
1021
1022static int rcu_print_task_stall(struct rcu_node *rnp)
1023{
1024 return 0;
1025}
1026
1027
1028
1029
1030
1031
1032static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp)
1033{
1034 WARN_ON_ONCE(rnp->qsmask);
1035}
1036
1037#ifdef CONFIG_HOTPLUG_CPU
1038
1039
1040
1041
1042
1043
1044
1045static int rcu_preempt_offline_tasks(struct rcu_state *rsp,
1046 struct rcu_node *rnp,
1047 struct rcu_data *rdp)
1048{
1049 return 0;
1050}
1051
1052#endif
1053
1054
1055
1056
1057
1058static void rcu_preempt_check_callbacks(int cpu)
1059{
1060}
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071void kfree_call_rcu(struct rcu_head *head,
1072 void (*func)(struct rcu_head *rcu))
1073{
1074 __call_rcu(head, func, &rcu_sched_state, -1, 1);
1075}
1076EXPORT_SYMBOL_GPL(kfree_call_rcu);
1077
1078
1079
1080
1081
1082void synchronize_rcu_expedited(void)
1083{
1084 synchronize_sched_expedited();
1085}
1086EXPORT_SYMBOL_GPL(synchronize_rcu_expedited);
1087
1088#ifdef CONFIG_HOTPLUG_CPU
1089
1090
1091
1092
1093
1094
1095static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp,
1096 bool wake)
1097{
1098}
1099
1100#endif
1101
1102
1103
1104
1105
1106void rcu_barrier(void)
1107{
1108 rcu_barrier_sched();
1109}
1110EXPORT_SYMBOL_GPL(rcu_barrier);
1111
1112
1113
1114
1115static void __init __rcu_init_preempt(void)
1116{
1117}
1118
1119
1120
1121
1122
1123void exit_rcu(void)
1124{
1125}
1126
1127#endif
1128
1129#ifdef CONFIG_RCU_BOOST
1130
1131#include "rtmutex_common.h"
1132
1133#ifdef CONFIG_RCU_TRACE
1134
1135static void rcu_initiate_boost_trace(struct rcu_node *rnp)
1136{
1137 if (list_empty(&rnp->blkd_tasks))
1138 rnp->n_balk_blkd_tasks++;
1139 else if (rnp->exp_tasks == NULL && rnp->gp_tasks == NULL)
1140 rnp->n_balk_exp_gp_tasks++;
1141 else if (rnp->gp_tasks != NULL && rnp->boost_tasks != NULL)
1142 rnp->n_balk_boost_tasks++;
1143 else if (rnp->gp_tasks != NULL && rnp->qsmask != 0)
1144 rnp->n_balk_notblocked++;
1145 else if (rnp->gp_tasks != NULL &&
1146 ULONG_CMP_LT(jiffies, rnp->boost_time))
1147 rnp->n_balk_notyet++;
1148 else
1149 rnp->n_balk_nos++;
1150}
1151
1152#else
1153
1154static void rcu_initiate_boost_trace(struct rcu_node *rnp)
1155{
1156}
1157
1158#endif
1159
1160static void rcu_wake_cond(struct task_struct *t, int status)
1161{
1162
1163
1164
1165
1166 if (status != RCU_KTHREAD_YIELDING || is_idle_task(current))
1167 wake_up_process(t);
1168}
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178static int rcu_boost(struct rcu_node *rnp)
1179{
1180 unsigned long flags;
1181 struct rt_mutex mtx;
1182 struct task_struct *t;
1183 struct list_head *tb;
1184
1185 if (rnp->exp_tasks == NULL && rnp->boost_tasks == NULL)
1186 return 0;
1187
1188 raw_spin_lock_irqsave(&rnp->lock, flags);
1189
1190
1191
1192
1193
1194 if (rnp->exp_tasks == NULL && rnp->boost_tasks == NULL) {
1195 raw_spin_unlock_irqrestore(&rnp->lock, flags);
1196 return 0;
1197 }
1198
1199
1200
1201
1202
1203
1204
1205 if (rnp->exp_tasks != NULL) {
1206 tb = rnp->exp_tasks;
1207 rnp->n_exp_boosts++;
1208 } else {
1209 tb = rnp->boost_tasks;
1210 rnp->n_normal_boosts++;
1211 }
1212 rnp->n_tasks_boosted++;
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230 t = container_of(tb, struct task_struct, rcu_node_entry);
1231 rt_mutex_init_proxy_locked(&mtx, t);
1232 t->rcu_boost_mutex = &mtx;
1233 raw_spin_unlock_irqrestore(&rnp->lock, flags);
1234 rt_mutex_lock(&mtx);
1235 rt_mutex_unlock(&mtx);
1236
1237 return ACCESS_ONCE(rnp->exp_tasks) != NULL ||
1238 ACCESS_ONCE(rnp->boost_tasks) != NULL;
1239}
1240
1241
1242
1243
1244
1245static int rcu_boost_kthread(void *arg)
1246{
1247 struct rcu_node *rnp = (struct rcu_node *)arg;
1248 int spincnt = 0;
1249 int more2boost;
1250
1251 trace_rcu_utilization(TPS("Start boost kthread@init"));
1252 for (;;) {
1253 rnp->boost_kthread_status = RCU_KTHREAD_WAITING;
1254 trace_rcu_utilization(TPS("End boost kthread@rcu_wait"));
1255 rcu_wait(rnp->boost_tasks || rnp->exp_tasks);
1256 trace_rcu_utilization(TPS("Start boost kthread@rcu_wait"));
1257 rnp->boost_kthread_status = RCU_KTHREAD_RUNNING;
1258 more2boost = rcu_boost(rnp);
1259 if (more2boost)
1260 spincnt++;
1261 else
1262 spincnt = 0;
1263 if (spincnt > 10) {
1264 rnp->boost_kthread_status = RCU_KTHREAD_YIELDING;
1265 trace_rcu_utilization(TPS("End boost kthread@rcu_yield"));
1266 schedule_timeout_interruptible(2);
1267 trace_rcu_utilization(TPS("Start boost kthread@rcu_yield"));
1268 spincnt = 0;
1269 }
1270 }
1271
1272 trace_rcu_utilization(TPS("End boost kthread@notreached"));
1273 return 0;
1274}
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags)
1287{
1288 struct task_struct *t;
1289
1290 if (!rcu_preempt_blocked_readers_cgp(rnp) && rnp->exp_tasks == NULL) {
1291 rnp->n_balk_exp_gp_tasks++;
1292 raw_spin_unlock_irqrestore(&rnp->lock, flags);
1293 return;
1294 }
1295 if (rnp->exp_tasks != NULL ||
1296 (rnp->gp_tasks != NULL &&
1297 rnp->boost_tasks == NULL &&
1298 rnp->qsmask == 0 &&
1299 ULONG_CMP_GE(jiffies, rnp->boost_time))) {
1300 if (rnp->exp_tasks == NULL)
1301 rnp->boost_tasks = rnp->gp_tasks;
1302 raw_spin_unlock_irqrestore(&rnp->lock, flags);
1303 t = rnp->boost_kthread_task;
1304 if (t)
1305 rcu_wake_cond(t, rnp->boost_kthread_status);
1306 } else {
1307 rcu_initiate_boost_trace(rnp);
1308 raw_spin_unlock_irqrestore(&rnp->lock, flags);
1309 }
1310}
1311
1312
1313
1314
1315static void invoke_rcu_callbacks_kthread(void)
1316{
1317 unsigned long flags;
1318
1319 local_irq_save(flags);
1320 __this_cpu_write(rcu_cpu_has_work, 1);
1321 if (__this_cpu_read(rcu_cpu_kthread_task) != NULL &&
1322 current != __this_cpu_read(rcu_cpu_kthread_task)) {
1323 rcu_wake_cond(__this_cpu_read(rcu_cpu_kthread_task),
1324 __this_cpu_read(rcu_cpu_kthread_status));
1325 }
1326 local_irq_restore(flags);
1327}
1328
1329
1330
1331
1332
1333static bool rcu_is_callbacks_kthread(void)
1334{
1335 return __get_cpu_var(rcu_cpu_kthread_task) == current;
1336}
1337
1338#define RCU_BOOST_DELAY_JIFFIES DIV_ROUND_UP(CONFIG_RCU_BOOST_DELAY * HZ, 1000)
1339
1340
1341
1342
1343static void rcu_preempt_boost_start_gp(struct rcu_node *rnp)
1344{
1345 rnp->boost_time = jiffies + RCU_BOOST_DELAY_JIFFIES;
1346}
1347
1348
1349
1350
1351
1352
1353static int rcu_spawn_one_boost_kthread(struct rcu_state *rsp,
1354 struct rcu_node *rnp)
1355{
1356 int rnp_index = rnp - &rsp->node[0];
1357 unsigned long flags;
1358 struct sched_param sp;
1359 struct task_struct *t;
1360
1361 if (&rcu_preempt_state != rsp)
1362 return 0;
1363
1364 if (!rcu_scheduler_fully_active || rnp->qsmaskinit == 0)
1365 return 0;
1366
1367 rsp->boost = 1;
1368 if (rnp->boost_kthread_task != NULL)
1369 return 0;
1370 t = kthread_create(rcu_boost_kthread, (void *)rnp,
1371 "rcub/%d", rnp_index);
1372 if (IS_ERR(t))
1373 return PTR_ERR(t);
1374 raw_spin_lock_irqsave(&rnp->lock, flags);
1375 rnp->boost_kthread_task = t;
1376 raw_spin_unlock_irqrestore(&rnp->lock, flags);
1377 sp.sched_priority = RCU_BOOST_PRIO;
1378 sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
1379 wake_up_process(t);
1380 return 0;
1381}
1382
1383static void rcu_kthread_do_work(void)
1384{
1385 rcu_do_batch(&rcu_sched_state, &__get_cpu_var(rcu_sched_data));
1386 rcu_do_batch(&rcu_bh_state, &__get_cpu_var(rcu_bh_data));
1387 rcu_preempt_do_callbacks();
1388}
1389
1390static void rcu_cpu_kthread_setup(unsigned int cpu)
1391{
1392 struct sched_param sp;
1393
1394 sp.sched_priority = RCU_KTHREAD_PRIO;
1395 sched_setscheduler_nocheck(current, SCHED_FIFO, &sp);
1396}
1397
1398static void rcu_cpu_kthread_park(unsigned int cpu)
1399{
1400 per_cpu(rcu_cpu_kthread_status, cpu) = RCU_KTHREAD_OFFCPU;
1401}
1402
1403static int rcu_cpu_kthread_should_run(unsigned int cpu)
1404{
1405 return __get_cpu_var(rcu_cpu_has_work);
1406}
1407
1408
1409
1410
1411
1412
1413static void rcu_cpu_kthread(unsigned int cpu)
1414{
1415 unsigned int *statusp = &__get_cpu_var(rcu_cpu_kthread_status);
1416 char work, *workp = &__get_cpu_var(rcu_cpu_has_work);
1417 int spincnt;
1418
1419 for (spincnt = 0; spincnt < 10; spincnt++) {
1420 trace_rcu_utilization(TPS("Start CPU kthread@rcu_wait"));
1421 local_bh_disable();
1422 *statusp = RCU_KTHREAD_RUNNING;
1423 this_cpu_inc(rcu_cpu_kthread_loops);
1424 local_irq_disable();
1425 work = *workp;
1426 *workp = 0;
1427 local_irq_enable();
1428 if (work)
1429 rcu_kthread_do_work();
1430 local_bh_enable();
1431 if (*workp == 0) {
1432 trace_rcu_utilization(TPS("End CPU kthread@rcu_wait"));
1433 *statusp = RCU_KTHREAD_WAITING;
1434 return;
1435 }
1436 }
1437 *statusp = RCU_KTHREAD_YIELDING;
1438 trace_rcu_utilization(TPS("Start CPU kthread@rcu_yield"));
1439 schedule_timeout_interruptible(2);
1440 trace_rcu_utilization(TPS("End CPU kthread@rcu_yield"));
1441 *statusp = RCU_KTHREAD_WAITING;
1442}
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu)
1454{
1455 struct task_struct *t = rnp->boost_kthread_task;
1456 unsigned long mask = rnp->qsmaskinit;
1457 cpumask_var_t cm;
1458 int cpu;
1459
1460 if (!t)
1461 return;
1462 if (!zalloc_cpumask_var(&cm, GFP_KERNEL))
1463 return;
1464 for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++, mask >>= 1)
1465 if ((mask & 0x1) && cpu != outgoingcpu)
1466 cpumask_set_cpu(cpu, cm);
1467 if (cpumask_weight(cm) == 0) {
1468 cpumask_setall(cm);
1469 for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++)
1470 cpumask_clear_cpu(cpu, cm);
1471 WARN_ON_ONCE(cpumask_weight(cm) == 0);
1472 }
1473 set_cpus_allowed_ptr(t, cm);
1474 free_cpumask_var(cm);
1475}
1476
1477static struct smp_hotplug_thread rcu_cpu_thread_spec = {
1478 .store = &rcu_cpu_kthread_task,
1479 .thread_should_run = rcu_cpu_kthread_should_run,
1480 .thread_fn = rcu_cpu_kthread,
1481 .thread_comm = "rcuc/%u",
1482 .setup = rcu_cpu_kthread_setup,
1483 .park = rcu_cpu_kthread_park,
1484};
1485
1486
1487
1488
1489static int __init rcu_spawn_kthreads(void)
1490{
1491 struct rcu_node *rnp;
1492 int cpu;
1493
1494 rcu_scheduler_fully_active = 1;
1495 for_each_possible_cpu(cpu)
1496 per_cpu(rcu_cpu_has_work, cpu) = 0;
1497 BUG_ON(smpboot_register_percpu_thread(&rcu_cpu_thread_spec));
1498 rnp = rcu_get_root(rcu_state);
1499 (void)rcu_spawn_one_boost_kthread(rcu_state, rnp);
1500 if (NUM_RCU_NODES > 1) {
1501 rcu_for_each_leaf_node(rcu_state, rnp)
1502 (void)rcu_spawn_one_boost_kthread(rcu_state, rnp);
1503 }
1504 return 0;
1505}
1506early_initcall(rcu_spawn_kthreads);
1507
1508static void rcu_prepare_kthreads(int cpu)
1509{
1510 struct rcu_data *rdp = per_cpu_ptr(rcu_state->rda, cpu);
1511 struct rcu_node *rnp = rdp->mynode;
1512
1513
1514 if (rcu_scheduler_fully_active)
1515 (void)rcu_spawn_one_boost_kthread(rcu_state, rnp);
1516}
1517
1518#else
1519
1520static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags)
1521{
1522 raw_spin_unlock_irqrestore(&rnp->lock, flags);
1523}
1524
1525static void invoke_rcu_callbacks_kthread(void)
1526{
1527 WARN_ON_ONCE(1);
1528}
1529
1530static bool rcu_is_callbacks_kthread(void)
1531{
1532 return false;
1533}
1534
1535static void rcu_preempt_boost_start_gp(struct rcu_node *rnp)
1536{
1537}
1538
1539static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu)
1540{
1541}
1542
1543static int __init rcu_scheduler_really_started(void)
1544{
1545 rcu_scheduler_fully_active = 1;
1546 return 0;
1547}
1548early_initcall(rcu_scheduler_really_started);
1549
1550static void rcu_prepare_kthreads(int cpu)
1551{
1552}
1553
1554#endif
1555
1556#if !defined(CONFIG_RCU_FAST_NO_HZ)
1557
1558
1559
1560
1561
1562
1563
1564
1565
1566
1567int rcu_needs_cpu(int cpu, unsigned long *delta_jiffies)
1568{
1569 *delta_jiffies = ULONG_MAX;
1570 return rcu_cpu_has_callbacks(cpu, NULL);
1571}
1572
1573
1574
1575
1576
1577static void rcu_cleanup_after_idle(int cpu)
1578{
1579}
1580
1581
1582
1583
1584
1585static void rcu_prepare_for_idle(int cpu)
1586{
1587}
1588
1589
1590
1591
1592
1593static void rcu_idle_count_callbacks_posted(void)
1594{
1595}
1596
1597#else
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622#define RCU_IDLE_GP_DELAY 4
1623#define RCU_IDLE_LAZY_GP_DELAY (6 * HZ)
1624
1625static int rcu_idle_gp_delay = RCU_IDLE_GP_DELAY;
1626module_param(rcu_idle_gp_delay, int, 0644);
1627static int rcu_idle_lazy_gp_delay = RCU_IDLE_LAZY_GP_DELAY;
1628module_param(rcu_idle_lazy_gp_delay, int, 0644);
1629
1630extern int tick_nohz_enabled;
1631
1632
1633
1634
1635
1636
1637static bool rcu_try_advance_all_cbs(void)
1638{
1639 bool cbs_ready = false;
1640 struct rcu_data *rdp;
1641 struct rcu_node *rnp;
1642 struct rcu_state *rsp;
1643
1644 for_each_rcu_flavor(rsp) {
1645 rdp = this_cpu_ptr(rsp->rda);
1646 rnp = rdp->mynode;
1647
1648
1649
1650
1651
1652
1653 if (rdp->completed != rnp->completed &&
1654 rdp->nxttail[RCU_DONE_TAIL] != rdp->nxttail[RCU_NEXT_TAIL])
1655 note_gp_changes(rsp, rdp);
1656
1657 if (cpu_has_callbacks_ready_to_invoke(rdp))
1658 cbs_ready = true;
1659 }
1660 return cbs_ready;
1661}
1662
1663
1664
1665
1666
1667
1668
1669
1670
1671int rcu_needs_cpu(int cpu, unsigned long *dj)
1672{
1673 struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu);
1674
1675
1676 rdtp->nonlazy_posted_snap = rdtp->nonlazy_posted;
1677
1678
1679 if (!rcu_cpu_has_callbacks(cpu, &rdtp->all_lazy)) {
1680 *dj = ULONG_MAX;
1681 return 0;
1682 }
1683
1684
1685 if (rcu_try_advance_all_cbs()) {
1686
1687 invoke_rcu_core();
1688 return 1;
1689 }
1690 rdtp->last_accelerate = jiffies;
1691
1692
1693 if (!rdtp->all_lazy) {
1694 *dj = round_up(rcu_idle_gp_delay + jiffies,
1695 rcu_idle_gp_delay) - jiffies;
1696 } else {
1697 *dj = round_jiffies(rcu_idle_lazy_gp_delay + jiffies) - jiffies;
1698 }
1699 return 0;
1700}
1701
1702
1703
1704
1705
1706
1707
1708
1709
1710
1711
1712static void rcu_prepare_for_idle(int cpu)
1713{
1714 struct rcu_data *rdp;
1715 struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu);
1716 struct rcu_node *rnp;
1717 struct rcu_state *rsp;
1718 int tne;
1719
1720
1721 tne = ACCESS_ONCE(tick_nohz_enabled);
1722 if (tne != rdtp->tick_nohz_enabled_snap) {
1723 if (rcu_cpu_has_callbacks(cpu, NULL))
1724 invoke_rcu_core();
1725 rdtp->tick_nohz_enabled_snap = tne;
1726 return;
1727 }
1728 if (!tne)
1729 return;
1730
1731
1732 if (rcu_is_nocb_cpu(cpu))
1733 return;
1734
1735
1736
1737
1738
1739
1740 if (rdtp->all_lazy &&
1741 rdtp->nonlazy_posted != rdtp->nonlazy_posted_snap) {
1742 invoke_rcu_core();
1743 return;
1744 }
1745
1746
1747
1748
1749
1750 if (rdtp->last_accelerate == jiffies)
1751 return;
1752 rdtp->last_accelerate = jiffies;
1753 for_each_rcu_flavor(rsp) {
1754 rdp = per_cpu_ptr(rsp->rda, cpu);
1755 if (!*rdp->nxttail[RCU_DONE_TAIL])
1756 continue;
1757 rnp = rdp->mynode;
1758 raw_spin_lock(&rnp->lock);
1759 rcu_accelerate_cbs(rsp, rnp, rdp);
1760 raw_spin_unlock(&rnp->lock);
1761 }
1762}
1763
1764
1765
1766
1767
1768
1769static void rcu_cleanup_after_idle(int cpu)
1770{
1771 struct rcu_data *rdp;
1772 struct rcu_state *rsp;
1773
1774 if (rcu_is_nocb_cpu(cpu))
1775 return;
1776 rcu_try_advance_all_cbs();
1777 for_each_rcu_flavor(rsp) {
1778 rdp = per_cpu_ptr(rsp->rda, cpu);
1779 if (cpu_has_callbacks_ready_to_invoke(rdp))
1780 invoke_rcu_core();
1781 }
1782}
1783
1784
1785
1786
1787
1788
1789
1790
1791
1792static void rcu_idle_count_callbacks_posted(void)
1793{
1794 __this_cpu_add(rcu_dynticks.nonlazy_posted, 1);
1795}
1796
1797
1798
1799
1800static atomic_t oom_callback_count;
1801static DECLARE_WAIT_QUEUE_HEAD(oom_callback_wq);
1802
1803
1804
1805
1806
1807static void rcu_oom_callback(struct rcu_head *rhp)
1808{
1809 if (atomic_dec_and_test(&oom_callback_count))
1810 wake_up(&oom_callback_wq);
1811}
1812
1813
1814
1815
1816
1817
1818
1819
1820static void rcu_oom_notify_cpu(void *unused)
1821{
1822 struct rcu_state *rsp;
1823 struct rcu_data *rdp;
1824
1825 for_each_rcu_flavor(rsp) {
1826 rdp = __this_cpu_ptr(rsp->rda);
1827 if (rdp->qlen_lazy != 0) {
1828 atomic_inc(&oom_callback_count);
1829 rsp->call(&rdp->oom_head, rcu_oom_callback);
1830 }
1831 }
1832}
1833
1834
1835
1836
1837
1838
1839
1840
1841static int rcu_oom_notify(struct notifier_block *self,
1842 unsigned long notused, void *nfreed)
1843{
1844 int cpu;
1845
1846
1847 wait_event(oom_callback_wq, atomic_read(&oom_callback_count) == 0);
1848
1849
1850
1851
1852
1853 atomic_set(&oom_callback_count, 1);
1854
1855 get_online_cpus();
1856 for_each_online_cpu(cpu) {
1857 smp_call_function_single(cpu, rcu_oom_notify_cpu, NULL, 1);
1858 cond_resched();
1859 }
1860 put_online_cpus();
1861
1862
1863 atomic_dec(&oom_callback_count);
1864
1865 return NOTIFY_OK;
1866}
1867
1868static struct notifier_block rcu_oom_nb = {
1869 .notifier_call = rcu_oom_notify
1870};
1871
1872static int __init rcu_register_oom_notifier(void)
1873{
1874 register_oom_notifier(&rcu_oom_nb);
1875 return 0;
1876}
1877early_initcall(rcu_register_oom_notifier);
1878
1879#endif
1880
1881#ifdef CONFIG_RCU_CPU_STALL_INFO
1882
1883#ifdef CONFIG_RCU_FAST_NO_HZ
1884
1885static void print_cpu_stall_fast_no_hz(char *cp, int cpu)
1886{
1887 struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu);
1888 unsigned long nlpd = rdtp->nonlazy_posted - rdtp->nonlazy_posted_snap;
1889
1890 sprintf(cp, "last_accelerate: %04lx/%04lx, nonlazy_posted: %ld, %c%c",
1891 rdtp->last_accelerate & 0xffff, jiffies & 0xffff,
1892 ulong2long(nlpd),
1893 rdtp->all_lazy ? 'L' : '.',
1894 rdtp->tick_nohz_enabled_snap ? '.' : 'D');
1895}
1896
1897#else
1898
1899static void print_cpu_stall_fast_no_hz(char *cp, int cpu)
1900{
1901 *cp = '\0';
1902}
1903
1904#endif
1905
1906
1907static void print_cpu_stall_info_begin(void)
1908{
1909 pr_cont("\n");
1910}
1911
1912
1913
1914
1915
1916
1917
1918
1919
1920
1921
1922
1923
1924static void print_cpu_stall_info(struct rcu_state *rsp, int cpu)
1925{
1926 char fast_no_hz[72];
1927 struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
1928 struct rcu_dynticks *rdtp = rdp->dynticks;
1929 char *ticks_title;
1930 unsigned long ticks_value;
1931
1932 if (rsp->gpnum == rdp->gpnum) {
1933 ticks_title = "ticks this GP";
1934 ticks_value = rdp->ticks_this_gp;
1935 } else {
1936 ticks_title = "GPs behind";
1937 ticks_value = rsp->gpnum - rdp->gpnum;
1938 }
1939 print_cpu_stall_fast_no_hz(fast_no_hz, cpu);
1940 pr_err("\t%d: (%lu %s) idle=%03x/%llx/%d softirq=%u/%u %s\n",
1941 cpu, ticks_value, ticks_title,
1942 atomic_read(&rdtp->dynticks) & 0xfff,
1943 rdtp->dynticks_nesting, rdtp->dynticks_nmi_nesting,
1944 rdp->softirq_snap, kstat_softirqs_cpu(RCU_SOFTIRQ, cpu),
1945 fast_no_hz);
1946}
1947
1948
1949static void print_cpu_stall_info_end(void)
1950{
1951 pr_err("\t");
1952}
1953
1954
1955static void zero_cpu_stall_ticks(struct rcu_data *rdp)
1956{
1957 rdp->ticks_this_gp = 0;
1958 rdp->softirq_snap = kstat_softirqs_cpu(RCU_SOFTIRQ, smp_processor_id());
1959}
1960
1961
1962static void increment_cpu_stall_ticks(void)
1963{
1964 struct rcu_state *rsp;
1965
1966 for_each_rcu_flavor(rsp)
1967 __this_cpu_ptr(rsp->rda)->ticks_this_gp++;
1968}
1969
1970#else
1971
1972static void print_cpu_stall_info_begin(void)
1973{
1974 pr_cont(" {");
1975}
1976
1977static void print_cpu_stall_info(struct rcu_state *rsp, int cpu)
1978{
1979 pr_cont(" %d", cpu);
1980}
1981
1982static void print_cpu_stall_info_end(void)
1983{
1984 pr_cont("} ");
1985}
1986
1987static void zero_cpu_stall_ticks(struct rcu_data *rdp)
1988{
1989}
1990
1991static void increment_cpu_stall_ticks(void)
1992{
1993}
1994
1995#endif
1996
1997#ifdef CONFIG_RCU_NOCB_CPU
1998
1999
2000
2001
2002
2003
2004
2005
2006
2007
2008
2009
2010
2011
2012
2013
2014
2015
2016
2017
2018
2019
2020
2021static int __init rcu_nocb_setup(char *str)
2022{
2023 alloc_bootmem_cpumask_var(&rcu_nocb_mask);
2024 have_rcu_nocb_mask = true;
2025 cpulist_parse(str, rcu_nocb_mask);
2026 return 1;
2027}
2028__setup("rcu_nocbs=", rcu_nocb_setup);
2029
2030static int __init parse_rcu_nocb_poll(char *arg)
2031{
2032 rcu_nocb_poll = 1;
2033 return 0;
2034}
2035early_param("rcu_nocb_poll", parse_rcu_nocb_poll);
2036
2037
2038
2039
2040
2041
2042
2043static int rcu_nocb_needs_gp(struct rcu_state *rsp)
2044{
2045 struct rcu_node *rnp = rcu_get_root(rsp);
2046
2047 return rnp->need_future_gp[(ACCESS_ONCE(rnp->completed) + 1) & 0x1];
2048}
2049
2050
2051
2052
2053
2054static void rcu_nocb_gp_cleanup(struct rcu_state *rsp, struct rcu_node *rnp)
2055{
2056 wake_up_all(&rnp->nocb_gp_wq[rnp->completed & 0x1]);
2057}
2058
2059
2060
2061
2062
2063
2064
2065
2066
2067static void rcu_nocb_gp_set(struct rcu_node *rnp, int nrq)
2068{
2069 rnp->need_future_gp[(rnp->completed + 1) & 0x1] += nrq;
2070}
2071
2072static void rcu_init_one_nocb(struct rcu_node *rnp)
2073{
2074 init_waitqueue_head(&rnp->nocb_gp_wq[0]);
2075 init_waitqueue_head(&rnp->nocb_gp_wq[1]);
2076}
2077
2078
2079bool rcu_is_nocb_cpu(int cpu)
2080{
2081 if (have_rcu_nocb_mask)
2082 return cpumask_test_cpu(cpu, rcu_nocb_mask);
2083 return false;
2084}
2085
2086
2087
2088
2089
2090
2091
2092
2093
2094static void __call_rcu_nocb_enqueue(struct rcu_data *rdp,
2095 struct rcu_head *rhp,
2096 struct rcu_head **rhtp,
2097 int rhcount, int rhcount_lazy)
2098{
2099 int len;
2100 struct rcu_head **old_rhpp;
2101 struct task_struct *t;
2102
2103
2104 old_rhpp = xchg(&rdp->nocb_tail, rhtp);
2105 ACCESS_ONCE(*old_rhpp) = rhp;
2106 atomic_long_add(rhcount, &rdp->nocb_q_count);
2107 atomic_long_add(rhcount_lazy, &rdp->nocb_q_count_lazy);
2108
2109
2110 t = ACCESS_ONCE(rdp->nocb_kthread);
2111 if (rcu_nocb_poll | !t)
2112 return;
2113 len = atomic_long_read(&rdp->nocb_q_count);
2114 if (old_rhpp == &rdp->nocb_head) {
2115 wake_up(&rdp->nocb_wq);
2116 rdp->qlen_last_fqs_check = 0;
2117 } else if (len > rdp->qlen_last_fqs_check + qhimark) {
2118 wake_up_process(t);
2119 rdp->qlen_last_fqs_check = LONG_MAX / 2;
2120 }
2121 return;
2122}
2123
2124
2125
2126
2127
2128
2129
2130
2131
2132
2133static bool __call_rcu_nocb(struct rcu_data *rdp, struct rcu_head *rhp,
2134 bool lazy)
2135{
2136
2137 if (!rcu_is_nocb_cpu(rdp->cpu))
2138 return 0;
2139 __call_rcu_nocb_enqueue(rdp, rhp, &rhp->next, 1, lazy);
2140 if (__is_kfree_rcu_offset((unsigned long)rhp->func))
2141 trace_rcu_kfree_callback(rdp->rsp->name, rhp,
2142 (unsigned long)rhp->func,
2143 rdp->qlen_lazy, rdp->qlen);
2144 else
2145 trace_rcu_callback(rdp->rsp->name, rhp,
2146 rdp->qlen_lazy, rdp->qlen);
2147 return 1;
2148}
2149
2150
2151
2152
2153
2154static bool __maybe_unused rcu_nocb_adopt_orphan_cbs(struct rcu_state *rsp,
2155 struct rcu_data *rdp)
2156{
2157 long ql = rsp->qlen;
2158 long qll = rsp->qlen_lazy;
2159
2160
2161 if (!rcu_is_nocb_cpu(smp_processor_id()))
2162 return 0;
2163 rsp->qlen = 0;
2164 rsp->qlen_lazy = 0;
2165
2166
2167 if (rsp->orphan_donelist != NULL) {
2168 __call_rcu_nocb_enqueue(rdp, rsp->orphan_donelist,
2169 rsp->orphan_donetail, ql, qll);
2170 ql = qll = 0;
2171 rsp->orphan_donelist = NULL;
2172 rsp->orphan_donetail = &rsp->orphan_donelist;
2173 }
2174 if (rsp->orphan_nxtlist != NULL) {
2175 __call_rcu_nocb_enqueue(rdp, rsp->orphan_nxtlist,
2176 rsp->orphan_nxttail, ql, qll);
2177 ql = qll = 0;
2178 rsp->orphan_nxtlist = NULL;
2179 rsp->orphan_nxttail = &rsp->orphan_nxtlist;
2180 }
2181 return 1;
2182}
2183
2184
2185
2186
2187
2188static void rcu_nocb_wait_gp(struct rcu_data *rdp)
2189{
2190 unsigned long c;
2191 bool d;
2192 unsigned long flags;
2193 struct rcu_node *rnp = rdp->mynode;
2194
2195 raw_spin_lock_irqsave(&rnp->lock, flags);
2196 c = rcu_start_future_gp(rnp, rdp);
2197 raw_spin_unlock_irqrestore(&rnp->lock, flags);
2198
2199
2200
2201
2202
2203 trace_rcu_future_gp(rnp, rdp, c, TPS("StartWait"));
2204 for (;;) {
2205 wait_event_interruptible(
2206 rnp->nocb_gp_wq[c & 0x1],
2207 (d = ULONG_CMP_GE(ACCESS_ONCE(rnp->completed), c)));
2208 if (likely(d))
2209 break;
2210 flush_signals(current);
2211 trace_rcu_future_gp(rnp, rdp, c, TPS("ResumeWait"));
2212 }
2213 trace_rcu_future_gp(rnp, rdp, c, TPS("EndWait"));
2214 smp_mb();
2215}
2216
2217
2218
2219
2220
2221static int rcu_nocb_kthread(void *arg)
2222{
2223 int c, cl;
2224 struct rcu_head *list;
2225 struct rcu_head *next;
2226 struct rcu_head **tail;
2227 struct rcu_data *rdp = arg;
2228
2229
2230 for (;;) {
2231
2232 if (!rcu_nocb_poll)
2233 wait_event_interruptible(rdp->nocb_wq, rdp->nocb_head);
2234 list = ACCESS_ONCE(rdp->nocb_head);
2235 if (!list) {
2236 schedule_timeout_interruptible(1);
2237 flush_signals(current);
2238 continue;
2239 }
2240
2241
2242
2243
2244
2245 ACCESS_ONCE(rdp->nocb_head) = NULL;
2246 tail = xchg(&rdp->nocb_tail, &rdp->nocb_head);
2247 c = atomic_long_xchg(&rdp->nocb_q_count, 0);
2248 cl = atomic_long_xchg(&rdp->nocb_q_count_lazy, 0);
2249 ACCESS_ONCE(rdp->nocb_p_count) += c;
2250 ACCESS_ONCE(rdp->nocb_p_count_lazy) += cl;
2251 rcu_nocb_wait_gp(rdp);
2252
2253
2254 trace_rcu_batch_start(rdp->rsp->name, cl, c, -1);
2255 c = cl = 0;
2256 while (list) {
2257 next = list->next;
2258
2259 while (next == NULL && &list->next != tail) {
2260 schedule_timeout_interruptible(1);
2261 next = list->next;
2262 }
2263 debug_rcu_head_unqueue(list);
2264 local_bh_disable();
2265 if (__rcu_reclaim(rdp->rsp->name, list))
2266 cl++;
2267 c++;
2268 local_bh_enable();
2269 list = next;
2270 }
2271 trace_rcu_batch_end(rdp->rsp->name, c, !!list, 0, 0, 1);
2272 ACCESS_ONCE(rdp->nocb_p_count) -= c;
2273 ACCESS_ONCE(rdp->nocb_p_count_lazy) -= cl;
2274 rdp->n_nocbs_invoked += c;
2275 }
2276 return 0;
2277}
2278
2279
2280static void __init rcu_boot_init_nocb_percpu_data(struct rcu_data *rdp)
2281{
2282 rdp->nocb_tail = &rdp->nocb_head;
2283 init_waitqueue_head(&rdp->nocb_wq);
2284}
2285
2286
2287static void __init rcu_spawn_nocb_kthreads(struct rcu_state *rsp)
2288{
2289 int cpu;
2290 struct rcu_data *rdp;
2291 struct task_struct *t;
2292
2293 if (rcu_nocb_mask == NULL)
2294 return;
2295 for_each_cpu(cpu, rcu_nocb_mask) {
2296 rdp = per_cpu_ptr(rsp->rda, cpu);
2297 t = kthread_run(rcu_nocb_kthread, rdp,
2298 "rcuo%c/%d", rsp->abbr, cpu);
2299 BUG_ON(IS_ERR(t));
2300 ACCESS_ONCE(rdp->nocb_kthread) = t;
2301 }
2302}
2303
2304
2305static bool init_nocb_callback_list(struct rcu_data *rdp)
2306{
2307 if (rcu_nocb_mask == NULL ||
2308 !cpumask_test_cpu(rdp->cpu, rcu_nocb_mask))
2309 return false;
2310 rdp->nxttail[RCU_NEXT_TAIL] = NULL;
2311 return true;
2312}
2313
2314#else
2315
2316static int rcu_nocb_needs_gp(struct rcu_state *rsp)
2317{
2318 return 0;
2319}
2320
2321static void rcu_nocb_gp_cleanup(struct rcu_state *rsp, struct rcu_node *rnp)
2322{
2323}
2324
2325static void rcu_nocb_gp_set(struct rcu_node *rnp, int nrq)
2326{
2327}
2328
2329static void rcu_init_one_nocb(struct rcu_node *rnp)
2330{
2331}
2332
2333static bool __call_rcu_nocb(struct rcu_data *rdp, struct rcu_head *rhp,
2334 bool lazy)
2335{
2336 return 0;
2337}
2338
2339static bool __maybe_unused rcu_nocb_adopt_orphan_cbs(struct rcu_state *rsp,
2340 struct rcu_data *rdp)
2341{
2342 return 0;
2343}
2344
2345static void __init rcu_boot_init_nocb_percpu_data(struct rcu_data *rdp)
2346{
2347}
2348
2349static void __init rcu_spawn_nocb_kthreads(struct rcu_state *rsp)
2350{
2351}
2352
2353static bool init_nocb_callback_list(struct rcu_data *rdp)
2354{
2355 return false;
2356}
2357
2358#endif
2359
2360
2361
2362
2363
2364
2365
2366
2367
2368
2369static void rcu_kick_nohz_cpu(int cpu)
2370{
2371#ifdef CONFIG_NO_HZ_FULL
2372 if (tick_nohz_full_cpu(cpu))
2373 smp_send_reschedule(cpu);
2374#endif
2375}
2376
2377
2378#ifdef CONFIG_NO_HZ_FULL_SYSIDLE
2379
2380
2381
2382
2383
2384#ifdef CONFIG_PREEMPT_RCU
2385static struct rcu_state *rcu_sysidle_state = &rcu_preempt_state;
2386#else
2387static struct rcu_state *rcu_sysidle_state = &rcu_sched_state;
2388#endif
2389
2390static int full_sysidle_state;
2391#define RCU_SYSIDLE_NOT 0
2392#define RCU_SYSIDLE_SHORT 1
2393#define RCU_SYSIDLE_LONG 2
2394#define RCU_SYSIDLE_FULL 3
2395#define RCU_SYSIDLE_FULL_NOTED 4
2396
2397
2398
2399
2400
2401
2402
2403static void rcu_sysidle_enter(struct rcu_dynticks *rdtp, int irq)
2404{
2405 unsigned long j;
2406
2407
2408 if (irq) {
2409 rdtp->dynticks_idle_nesting--;
2410 WARN_ON_ONCE(rdtp->dynticks_idle_nesting < 0);
2411 if (rdtp->dynticks_idle_nesting != 0)
2412 return;
2413 } else {
2414 if ((rdtp->dynticks_idle_nesting & DYNTICK_TASK_NEST_MASK) ==
2415 DYNTICK_TASK_NEST_VALUE) {
2416 rdtp->dynticks_idle_nesting = 0;
2417 } else {
2418 rdtp->dynticks_idle_nesting -= DYNTICK_TASK_NEST_VALUE;
2419 WARN_ON_ONCE(rdtp->dynticks_idle_nesting < 0);
2420 return;
2421 }
2422 }
2423
2424
2425 j = jiffies;
2426 ACCESS_ONCE(rdtp->dynticks_idle_jiffies) = j;
2427 smp_mb__before_atomic_inc();
2428 atomic_inc(&rdtp->dynticks_idle);
2429 smp_mb__after_atomic_inc();
2430 WARN_ON_ONCE(atomic_read(&rdtp->dynticks_idle) & 0x1);
2431}
2432
2433
2434
2435
2436
2437
2438
2439
2440
2441
2442void rcu_sysidle_force_exit(void)
2443{
2444 int oldstate = ACCESS_ONCE(full_sysidle_state);
2445 int newoldstate;
2446
2447
2448
2449
2450
2451
2452 while (oldstate > RCU_SYSIDLE_SHORT) {
2453 newoldstate = cmpxchg(&full_sysidle_state,
2454 oldstate, RCU_SYSIDLE_NOT);
2455 if (oldstate == newoldstate &&
2456 oldstate == RCU_SYSIDLE_FULL_NOTED) {
2457 rcu_kick_nohz_cpu(tick_do_timer_cpu);
2458 return;
2459 }
2460 oldstate = newoldstate;
2461 }
2462 smp_mb();
2463}
2464
2465
2466
2467
2468
2469
2470static void rcu_sysidle_exit(struct rcu_dynticks *rdtp, int irq)
2471{
2472
2473 if (irq) {
2474 rdtp->dynticks_idle_nesting++;
2475 WARN_ON_ONCE(rdtp->dynticks_idle_nesting <= 0);
2476 if (rdtp->dynticks_idle_nesting != 1)
2477 return;
2478 } else {
2479
2480
2481
2482
2483
2484 if (rdtp->dynticks_idle_nesting & DYNTICK_TASK_NEST_MASK) {
2485 rdtp->dynticks_idle_nesting += DYNTICK_TASK_NEST_VALUE;
2486 WARN_ON_ONCE(rdtp->dynticks_idle_nesting <= 0);
2487 return;
2488 } else {
2489 rdtp->dynticks_idle_nesting = DYNTICK_TASK_EXIT_IDLE;
2490 }
2491 }
2492
2493
2494 smp_mb__before_atomic_inc();
2495 atomic_inc(&rdtp->dynticks_idle);
2496 smp_mb__after_atomic_inc();
2497 WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks_idle) & 0x1));
2498
2499
2500
2501
2502
2503
2504
2505
2506
2507
2508 if (smp_processor_id() == tick_do_timer_cpu)
2509 return;
2510
2511
2512 rcu_sysidle_force_exit();
2513}
2514
2515
2516
2517
2518
2519static void rcu_sysidle_check_cpu(struct rcu_data *rdp, bool *isidle,
2520 unsigned long *maxj)
2521{
2522 int cur;
2523 unsigned long j;
2524 struct rcu_dynticks *rdtp = rdp->dynticks;
2525
2526
2527
2528
2529
2530
2531 if (!*isidle || rdp->rsp != rcu_sysidle_state ||
2532 cpu_is_offline(rdp->cpu) || rdp->cpu == tick_do_timer_cpu)
2533 return;
2534 if (rcu_gp_in_progress(rdp->rsp))
2535 WARN_ON_ONCE(smp_processor_id() != tick_do_timer_cpu);
2536
2537
2538 cur = atomic_read(&rdtp->dynticks_idle);
2539 if (cur & 0x1) {
2540 *isidle = false;
2541 return;
2542 }
2543 smp_mb();
2544
2545
2546 j = ACCESS_ONCE(rdtp->dynticks_idle_jiffies);
2547
2548 if (ULONG_CMP_LT(*maxj, j))
2549 *maxj = j;
2550}
2551
2552
2553
2554
2555static bool is_sysidle_rcu_state(struct rcu_state *rsp)
2556{
2557 return rsp == rcu_sysidle_state;
2558}
2559
2560
2561
2562
2563
2564static void rcu_bind_gp_kthread(void)
2565{
2566 int cpu = ACCESS_ONCE(tick_do_timer_cpu);
2567
2568 if (cpu < 0 || cpu >= nr_cpu_ids)
2569 return;
2570 if (raw_smp_processor_id() != cpu)
2571 set_cpus_allowed_ptr(current, cpumask_of(cpu));
2572}
2573
2574
2575
2576
2577
2578
2579
2580
2581
2582
2583static unsigned long rcu_sysidle_delay(void)
2584{
2585 if (nr_cpu_ids <= CONFIG_NO_HZ_FULL_SYSIDLE_SMALL)
2586 return 0;
2587 return DIV_ROUND_UP(nr_cpu_ids * HZ, rcu_fanout_leaf * 1000);
2588}
2589
2590
2591
2592
2593
2594static void rcu_sysidle(unsigned long j)
2595{
2596
2597 switch (ACCESS_ONCE(full_sysidle_state)) {
2598 case RCU_SYSIDLE_NOT:
2599
2600
2601 ACCESS_ONCE(full_sysidle_state) = RCU_SYSIDLE_SHORT;
2602 break;
2603
2604 case RCU_SYSIDLE_SHORT:
2605
2606
2607
2608
2609
2610 if (ULONG_CMP_GE(jiffies, j + rcu_sysidle_delay()))
2611 (void)cmpxchg(&full_sysidle_state,
2612 RCU_SYSIDLE_SHORT, RCU_SYSIDLE_LONG);
2613 break;
2614
2615 case RCU_SYSIDLE_LONG:
2616
2617
2618
2619
2620
2621 if (ULONG_CMP_GE(jiffies, j + rcu_sysidle_delay()))
2622 (void)cmpxchg(&full_sysidle_state,
2623 RCU_SYSIDLE_LONG, RCU_SYSIDLE_FULL);
2624 break;
2625
2626 default:
2627 break;
2628 }
2629}
2630
2631
2632
2633
2634
2635static void rcu_sysidle_cancel(void)
2636{
2637 smp_mb();
2638 ACCESS_ONCE(full_sysidle_state) = RCU_SYSIDLE_NOT;
2639}
2640
2641
2642
2643
2644
2645static void rcu_sysidle_report(struct rcu_state *rsp, int isidle,
2646 unsigned long maxj, bool gpkt)
2647{
2648 if (rsp != rcu_sysidle_state)
2649 return;
2650 if (gpkt && nr_cpu_ids <= CONFIG_NO_HZ_FULL_SYSIDLE_SMALL)
2651 return;
2652 if (isidle)
2653 rcu_sysidle(maxj);
2654 else
2655 rcu_sysidle_cancel();
2656}
2657
2658
2659
2660
2661
2662static void rcu_sysidle_report_gp(struct rcu_state *rsp, int isidle,
2663 unsigned long maxj)
2664{
2665 rcu_sysidle_report(rsp, isidle, maxj, true);
2666}
2667
2668
2669struct rcu_sysidle_head {
2670 struct rcu_head rh;
2671 int inuse;
2672};
2673
2674static void rcu_sysidle_cb(struct rcu_head *rhp)
2675{
2676 struct rcu_sysidle_head *rshp;
2677
2678
2679
2680
2681
2682
2683 smp_mb();
2684
2685 rshp = container_of(rhp, struct rcu_sysidle_head, rh);
2686 ACCESS_ONCE(rshp->inuse) = 0;
2687}
2688
2689
2690
2691
2692
2693bool rcu_sys_is_idle(void)
2694{
2695 static struct rcu_sysidle_head rsh;
2696 int rss = ACCESS_ONCE(full_sysidle_state);
2697
2698 if (WARN_ON_ONCE(smp_processor_id() != tick_do_timer_cpu))
2699 return false;
2700
2701
2702 if (nr_cpu_ids <= CONFIG_NO_HZ_FULL_SYSIDLE_SMALL) {
2703 int oldrss = rss - 1;
2704
2705
2706
2707
2708
2709 while (rss < RCU_SYSIDLE_FULL && oldrss < rss) {
2710 int cpu;
2711 bool isidle = true;
2712 unsigned long maxj = jiffies - ULONG_MAX / 4;
2713 struct rcu_data *rdp;
2714
2715
2716 for_each_possible_cpu(cpu) {
2717 rdp = per_cpu_ptr(rcu_sysidle_state->rda, cpu);
2718 rcu_sysidle_check_cpu(rdp, &isidle, &maxj);
2719 if (!isidle)
2720 break;
2721 }
2722 rcu_sysidle_report(rcu_sysidle_state,
2723 isidle, maxj, false);
2724 oldrss = rss;
2725 rss = ACCESS_ONCE(full_sysidle_state);
2726 }
2727 }
2728
2729
2730 if (rss == RCU_SYSIDLE_FULL) {
2731 rss = cmpxchg(&full_sysidle_state,
2732 RCU_SYSIDLE_FULL, RCU_SYSIDLE_FULL_NOTED);
2733 return rss == RCU_SYSIDLE_FULL;
2734 }
2735
2736 smp_mb();
2737
2738
2739 if (rss == RCU_SYSIDLE_FULL_NOTED)
2740 return true;
2741
2742
2743
2744
2745
2746
2747
2748
2749 if (nr_cpu_ids > CONFIG_NO_HZ_FULL_SYSIDLE_SMALL &&
2750 !rcu_gp_in_progress(rcu_sysidle_state) &&
2751 !rsh.inuse && xchg(&rsh.inuse, 1) == 0)
2752 call_rcu(&rsh.rh, rcu_sysidle_cb);
2753 return false;
2754}
2755
2756
2757
2758
2759static void rcu_sysidle_init_percpu_data(struct rcu_dynticks *rdtp)
2760{
2761 rdtp->dynticks_idle_nesting = DYNTICK_TASK_NEST_VALUE;
2762}
2763
2764#else
2765
2766static void rcu_sysidle_enter(struct rcu_dynticks *rdtp, int irq)
2767{
2768}
2769
2770static void rcu_sysidle_exit(struct rcu_dynticks *rdtp, int irq)
2771{
2772}
2773
2774static void rcu_sysidle_check_cpu(struct rcu_data *rdp, bool *isidle,
2775 unsigned long *maxj)
2776{
2777}
2778
2779static bool is_sysidle_rcu_state(struct rcu_state *rsp)
2780{
2781 return false;
2782}
2783
2784static void rcu_bind_gp_kthread(void)
2785{
2786}
2787
2788static void rcu_sysidle_report_gp(struct rcu_state *rsp, int isidle,
2789 unsigned long maxj)
2790{
2791}
2792
2793static void rcu_sysidle_init_percpu_data(struct rcu_dynticks *rdtp)
2794{
2795}
2796
2797#endif
2798