1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27#include <linux/delay.h>
28#include <linux/gfp.h>
29#include <linux/oom.h>
30#include <linux/smpboot.h>
31#include "../time/tick-internal.h"
32
33#ifdef CONFIG_RCU_BOOST
34
35#include "../locking/rtmutex_common.h"
36
37
38
39
40
41static DEFINE_PER_CPU(struct task_struct *, rcu_cpu_kthread_task);
42DEFINE_PER_CPU(unsigned int, rcu_cpu_kthread_status);
43DEFINE_PER_CPU(unsigned int, rcu_cpu_kthread_loops);
44DEFINE_PER_CPU(char, rcu_cpu_has_work);
45
46#else
47
48
49
50
51
52
53
54#define rt_mutex_owner(a) ({ WARN_ON_ONCE(1); NULL; })
55
56#endif
57
58#ifdef CONFIG_RCU_NOCB_CPU
59static cpumask_var_t rcu_nocb_mask;
60static bool have_rcu_nocb_mask;
61static bool __read_mostly rcu_nocb_poll;
62#endif
63
64
65
66
67
68static void __init rcu_bootup_announce_oddness(void)
69{
70 if (IS_ENABLED(CONFIG_RCU_TRACE))
71 pr_info("\tRCU debugfs-based tracing is enabled.\n");
72 if ((IS_ENABLED(CONFIG_64BIT) && RCU_FANOUT != 64) ||
73 (!IS_ENABLED(CONFIG_64BIT) && RCU_FANOUT != 32))
74 pr_info("\tCONFIG_RCU_FANOUT set to non-default value of %d\n",
75 RCU_FANOUT);
76 if (rcu_fanout_exact)
77 pr_info("\tHierarchical RCU autobalancing is disabled.\n");
78 if (IS_ENABLED(CONFIG_RCU_FAST_NO_HZ))
79 pr_info("\tRCU dyntick-idle grace-period acceleration is enabled.\n");
80 if (IS_ENABLED(CONFIG_PROVE_RCU))
81 pr_info("\tRCU lockdep checking is enabled.\n");
82 if (RCU_NUM_LVLS >= 4)
83 pr_info("\tFour(or more)-level hierarchy is enabled.\n");
84 if (RCU_FANOUT_LEAF != 16)
85 pr_info("\tBuild-time adjustment of leaf fanout to %d.\n",
86 RCU_FANOUT_LEAF);
87 if (rcu_fanout_leaf != RCU_FANOUT_LEAF)
88 pr_info("\tBoot-time adjustment of leaf fanout to %d.\n", rcu_fanout_leaf);
89 if (nr_cpu_ids != NR_CPUS)
90 pr_info("\tRCU restricting CPUs from NR_CPUS=%d to nr_cpu_ids=%d.\n", NR_CPUS, nr_cpu_ids);
91 if (IS_ENABLED(CONFIG_RCU_BOOST))
92 pr_info("\tRCU kthread priority: %d.\n", kthread_prio);
93}
94
95#ifdef CONFIG_PREEMPT_RCU
96
97RCU_STATE_INITIALIZER(rcu_preempt, 'p', call_rcu);
98static struct rcu_state *const rcu_state_p = &rcu_preempt_state;
99static struct rcu_data __percpu *const rcu_data_p = &rcu_preempt_data;
100
101static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp,
102 bool wake);
103
104
105
106
107static void __init rcu_bootup_announce(void)
108{
109 pr_info("Preemptible hierarchical RCU implementation.\n");
110 rcu_bootup_announce_oddness();
111}
112
113
114#define RCU_GP_TASKS 0x8
115#define RCU_EXP_TASKS 0x4
116#define RCU_GP_BLKD 0x2
117#define RCU_EXP_BLKD 0x1
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147static void rcu_preempt_ctxt_queue(struct rcu_node *rnp, struct rcu_data *rdp)
148 __releases(rnp->lock)
149{
150 int blkd_state = (rnp->gp_tasks ? RCU_GP_TASKS : 0) +
151 (rnp->exp_tasks ? RCU_EXP_TASKS : 0) +
152 (rnp->qsmask & rdp->grpmask ? RCU_GP_BLKD : 0) +
153 (rnp->expmask & rdp->grpmask ? RCU_EXP_BLKD : 0);
154 struct task_struct *t = current;
155
156
157
158
159
160
161 switch (blkd_state) {
162 case 0:
163 case RCU_EXP_TASKS:
164 case RCU_EXP_TASKS + RCU_GP_BLKD:
165 case RCU_GP_TASKS:
166 case RCU_GP_TASKS + RCU_EXP_TASKS:
167
168
169
170
171
172
173
174 list_add(&t->rcu_node_entry, &rnp->blkd_tasks);
175 break;
176
177 case RCU_EXP_BLKD:
178 case RCU_GP_BLKD:
179 case RCU_GP_BLKD + RCU_EXP_BLKD:
180 case RCU_GP_TASKS + RCU_EXP_BLKD:
181 case RCU_GP_TASKS + RCU_GP_BLKD + RCU_EXP_BLKD:
182 case RCU_GP_TASKS + RCU_EXP_TASKS + RCU_GP_BLKD + RCU_EXP_BLKD:
183
184
185
186
187
188
189
190
191
192 list_add_tail(&t->rcu_node_entry, &rnp->blkd_tasks);
193 break;
194
195 case RCU_EXP_TASKS + RCU_EXP_BLKD:
196 case RCU_EXP_TASKS + RCU_GP_BLKD + RCU_EXP_BLKD:
197 case RCU_GP_TASKS + RCU_EXP_TASKS + RCU_EXP_BLKD:
198
199
200
201
202
203
204
205 list_add(&t->rcu_node_entry, rnp->exp_tasks);
206 break;
207
208 case RCU_GP_TASKS + RCU_GP_BLKD:
209 case RCU_GP_TASKS + RCU_EXP_TASKS + RCU_GP_BLKD:
210
211
212
213
214
215
216 list_add(&t->rcu_node_entry, rnp->gp_tasks);
217 break;
218
219 default:
220
221
222 WARN_ON_ONCE(1);
223 break;
224 }
225
226
227
228
229
230
231
232 if (!rnp->gp_tasks && (blkd_state & RCU_GP_BLKD))
233 rnp->gp_tasks = &t->rcu_node_entry;
234 if (!rnp->exp_tasks && (blkd_state & RCU_EXP_BLKD))
235 rnp->exp_tasks = &t->rcu_node_entry;
236 raw_spin_unlock_rcu_node(rnp);
237
238
239
240
241
242
243
244 if (blkd_state & RCU_EXP_BLKD &&
245 t->rcu_read_unlock_special.b.exp_need_qs) {
246 t->rcu_read_unlock_special.b.exp_need_qs = false;
247 rcu_report_exp_rdp(rdp->rsp, rdp, true);
248 } else {
249 WARN_ON_ONCE(t->rcu_read_unlock_special.b.exp_need_qs);
250 }
251}
252
253
254
255
256
257
258
259
260
261
262static void rcu_preempt_qs(void)
263{
264 if (__this_cpu_read(rcu_data_p->cpu_no_qs.s)) {
265 trace_rcu_grace_period(TPS("rcu_preempt"),
266 __this_cpu_read(rcu_data_p->gpnum),
267 TPS("cpuqs"));
268 __this_cpu_write(rcu_data_p->cpu_no_qs.b.norm, false);
269 barrier();
270 current->rcu_read_unlock_special.b.need_qs = false;
271 }
272}
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287static void rcu_preempt_note_context_switch(void)
288{
289 struct task_struct *t = current;
290 struct rcu_data *rdp;
291 struct rcu_node *rnp;
292
293 if (t->rcu_read_lock_nesting > 0 &&
294 !t->rcu_read_unlock_special.b.blocked) {
295
296
297 rdp = this_cpu_ptr(rcu_state_p->rda);
298 rnp = rdp->mynode;
299 raw_spin_lock_rcu_node(rnp);
300 t->rcu_read_unlock_special.b.blocked = true;
301 t->rcu_blocked_node = rnp;
302
303
304
305
306
307
308 WARN_ON_ONCE((rdp->grpmask & rcu_rnp_online_cpus(rnp)) == 0);
309 WARN_ON_ONCE(!list_empty(&t->rcu_node_entry));
310 trace_rcu_preempt_task(rdp->rsp->name,
311 t->pid,
312 (rnp->qsmask & rdp->grpmask)
313 ? rnp->gpnum
314 : rnp->gpnum + 1);
315 rcu_preempt_ctxt_queue(rnp, rdp);
316 } else if (t->rcu_read_lock_nesting < 0 &&
317 t->rcu_read_unlock_special.s) {
318
319
320
321
322
323 rcu_read_unlock_special(t);
324 }
325
326
327
328
329
330
331
332
333
334
335 rcu_preempt_qs();
336}
337
338
339
340
341
342
343static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp)
344{
345 return rnp->gp_tasks != NULL;
346}
347
348
349
350
351
352static struct list_head *rcu_next_node_entry(struct task_struct *t,
353 struct rcu_node *rnp)
354{
355 struct list_head *np;
356
357 np = t->rcu_node_entry.next;
358 if (np == &rnp->blkd_tasks)
359 np = NULL;
360 return np;
361}
362
363
364
365
366
367static bool rcu_preempt_has_tasks(struct rcu_node *rnp)
368{
369 return !list_empty(&rnp->blkd_tasks);
370}
371
372
373
374
375
376
377void rcu_read_unlock_special(struct task_struct *t)
378{
379 bool empty_exp;
380 bool empty_norm;
381 bool empty_exp_now;
382 unsigned long flags;
383 struct list_head *np;
384 bool drop_boost_mutex = false;
385 struct rcu_data *rdp;
386 struct rcu_node *rnp;
387 union rcu_special special;
388
389
390 if (in_nmi())
391 return;
392
393 local_irq_save(flags);
394
395
396
397
398
399
400 special = t->rcu_read_unlock_special;
401 if (special.b.need_qs) {
402 rcu_preempt_qs();
403 t->rcu_read_unlock_special.b.need_qs = false;
404 if (!t->rcu_read_unlock_special.s) {
405 local_irq_restore(flags);
406 return;
407 }
408 }
409
410
411
412
413
414
415
416
417 if (special.b.exp_need_qs) {
418 WARN_ON_ONCE(special.b.blocked);
419 t->rcu_read_unlock_special.b.exp_need_qs = false;
420 rdp = this_cpu_ptr(rcu_state_p->rda);
421 rcu_report_exp_rdp(rcu_state_p, rdp, true);
422 if (!t->rcu_read_unlock_special.s) {
423 local_irq_restore(flags);
424 return;
425 }
426 }
427
428
429 if (in_irq() || in_serving_softirq()) {
430 lockdep_rcu_suspicious(__FILE__, __LINE__,
431 "rcu_read_unlock() from irq or softirq with blocking in critical section!!!\n");
432 pr_alert("->rcu_read_unlock_special: %#x (b: %d, enq: %d nq: %d)\n",
433 t->rcu_read_unlock_special.s,
434 t->rcu_read_unlock_special.b.blocked,
435 t->rcu_read_unlock_special.b.exp_need_qs,
436 t->rcu_read_unlock_special.b.need_qs);
437 local_irq_restore(flags);
438 return;
439 }
440
441
442 if (special.b.blocked) {
443 t->rcu_read_unlock_special.b.blocked = false;
444
445
446
447
448
449
450
451 rnp = t->rcu_blocked_node;
452 raw_spin_lock_rcu_node(rnp);
453 WARN_ON_ONCE(rnp != t->rcu_blocked_node);
454 empty_norm = !rcu_preempt_blocked_readers_cgp(rnp);
455 empty_exp = sync_rcu_preempt_exp_done(rnp);
456 smp_mb();
457 np = rcu_next_node_entry(t, rnp);
458 list_del_init(&t->rcu_node_entry);
459 t->rcu_blocked_node = NULL;
460 trace_rcu_unlock_preempted_task(TPS("rcu_preempt"),
461 rnp->gpnum, t->pid);
462 if (&t->rcu_node_entry == rnp->gp_tasks)
463 rnp->gp_tasks = np;
464 if (&t->rcu_node_entry == rnp->exp_tasks)
465 rnp->exp_tasks = np;
466 if (IS_ENABLED(CONFIG_RCU_BOOST)) {
467 if (&t->rcu_node_entry == rnp->boost_tasks)
468 rnp->boost_tasks = np;
469
470 drop_boost_mutex = rt_mutex_owner(&rnp->boost_mtx) == t;
471 }
472
473
474
475
476
477
478
479 empty_exp_now = sync_rcu_preempt_exp_done(rnp);
480 if (!empty_norm && !rcu_preempt_blocked_readers_cgp(rnp)) {
481 trace_rcu_quiescent_state_report(TPS("preempt_rcu"),
482 rnp->gpnum,
483 0, rnp->qsmask,
484 rnp->level,
485 rnp->grplo,
486 rnp->grphi,
487 !!rnp->gp_tasks);
488 rcu_report_unblock_qs_rnp(rcu_state_p, rnp, flags);
489 } else {
490 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
491 }
492
493
494 if (IS_ENABLED(CONFIG_RCU_BOOST) && drop_boost_mutex)
495 rt_mutex_unlock(&rnp->boost_mtx);
496
497
498
499
500
501 if (!empty_exp && empty_exp_now)
502 rcu_report_exp_rnp(rcu_state_p, rnp, true);
503 } else {
504 local_irq_restore(flags);
505 }
506}
507
508
509
510
511
512static void rcu_print_detail_task_stall_rnp(struct rcu_node *rnp)
513{
514 unsigned long flags;
515 struct task_struct *t;
516
517 raw_spin_lock_irqsave_rcu_node(rnp, flags);
518 if (!rcu_preempt_blocked_readers_cgp(rnp)) {
519 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
520 return;
521 }
522 t = list_entry(rnp->gp_tasks->prev,
523 struct task_struct, rcu_node_entry);
524 list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry)
525 sched_show_task(t);
526 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
527}
528
529
530
531
532
533static void rcu_print_detail_task_stall(struct rcu_state *rsp)
534{
535 struct rcu_node *rnp = rcu_get_root(rsp);
536
537 rcu_print_detail_task_stall_rnp(rnp);
538 rcu_for_each_leaf_node(rsp, rnp)
539 rcu_print_detail_task_stall_rnp(rnp);
540}
541
542static void rcu_print_task_stall_begin(struct rcu_node *rnp)
543{
544 pr_err("\tTasks blocked on level-%d rcu_node (CPUs %d-%d):",
545 rnp->level, rnp->grplo, rnp->grphi);
546}
547
548static void rcu_print_task_stall_end(void)
549{
550 pr_cont("\n");
551}
552
553
554
555
556
557static int rcu_print_task_stall(struct rcu_node *rnp)
558{
559 struct task_struct *t;
560 int ndetected = 0;
561
562 if (!rcu_preempt_blocked_readers_cgp(rnp))
563 return 0;
564 rcu_print_task_stall_begin(rnp);
565 t = list_entry(rnp->gp_tasks->prev,
566 struct task_struct, rcu_node_entry);
567 list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry) {
568 pr_cont(" P%d", t->pid);
569 ndetected++;
570 }
571 rcu_print_task_stall_end();
572 return ndetected;
573}
574
575
576
577
578
579
580static int rcu_print_task_exp_stall(struct rcu_node *rnp)
581{
582 struct task_struct *t;
583 int ndetected = 0;
584
585 if (!rnp->exp_tasks)
586 return 0;
587 t = list_entry(rnp->exp_tasks->prev,
588 struct task_struct, rcu_node_entry);
589 list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry) {
590 pr_cont(" P%d", t->pid);
591 ndetected++;
592 }
593 return ndetected;
594}
595
596
597
598
599
600
601
602
603
604
605
606static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp)
607{
608 WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp));
609 if (rcu_preempt_has_tasks(rnp))
610 rnp->gp_tasks = rnp->blkd_tasks.next;
611 WARN_ON_ONCE(rnp->qsmask);
612}
613
614
615
616
617
618
619
620
621static void rcu_preempt_check_callbacks(void)
622{
623 struct task_struct *t = current;
624
625 if (t->rcu_read_lock_nesting == 0) {
626 rcu_preempt_qs();
627 return;
628 }
629 if (t->rcu_read_lock_nesting > 0 &&
630 __this_cpu_read(rcu_data_p->core_needs_qs) &&
631 __this_cpu_read(rcu_data_p->cpu_no_qs.b.norm))
632 t->rcu_read_unlock_special.b.need_qs = true;
633}
634
635#ifdef CONFIG_RCU_BOOST
636
637static void rcu_preempt_do_callbacks(void)
638{
639 rcu_do_batch(rcu_state_p, this_cpu_ptr(rcu_data_p));
640}
641
642#endif
643
644
645
646
647void call_rcu(struct rcu_head *head, rcu_callback_t func)
648{
649 __call_rcu(head, func, rcu_state_p, -1, 0);
650}
651EXPORT_SYMBOL_GPL(call_rcu);
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667void synchronize_rcu(void)
668{
669 RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map) ||
670 lock_is_held(&rcu_lock_map) ||
671 lock_is_held(&rcu_sched_lock_map),
672 "Illegal synchronize_rcu() in RCU read-side critical section");
673 if (!rcu_scheduler_active)
674 return;
675 if (rcu_gp_is_expedited())
676 synchronize_rcu_expedited();
677 else
678 wait_rcu_gp(call_rcu);
679}
680EXPORT_SYMBOL_GPL(synchronize_rcu);
681
682
683
684
685
686
687
688
689
690void rcu_barrier(void)
691{
692 _rcu_barrier(rcu_state_p);
693}
694EXPORT_SYMBOL_GPL(rcu_barrier);
695
696
697
698
699static void __init __rcu_init_preempt(void)
700{
701 rcu_init_one(rcu_state_p);
702}
703
704
705
706
707
708
709
710void exit_rcu(void)
711{
712 struct task_struct *t = current;
713
714 if (likely(list_empty(¤t->rcu_node_entry)))
715 return;
716 t->rcu_read_lock_nesting = 1;
717 barrier();
718 t->rcu_read_unlock_special.b.blocked = true;
719 __rcu_read_unlock();
720}
721
722#else
723
724static struct rcu_state *const rcu_state_p = &rcu_sched_state;
725
726
727
728
729static void __init rcu_bootup_announce(void)
730{
731 pr_info("Hierarchical RCU implementation.\n");
732 rcu_bootup_announce_oddness();
733}
734
735
736
737
738
739static void rcu_preempt_note_context_switch(void)
740{
741}
742
743
744
745
746
747static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp)
748{
749 return 0;
750}
751
752
753
754
755static bool rcu_preempt_has_tasks(struct rcu_node *rnp)
756{
757 return false;
758}
759
760
761
762
763
764static void rcu_print_detail_task_stall(struct rcu_state *rsp)
765{
766}
767
768
769
770
771
772static int rcu_print_task_stall(struct rcu_node *rnp)
773{
774 return 0;
775}
776
777
778
779
780
781
782static int rcu_print_task_exp_stall(struct rcu_node *rnp)
783{
784 return 0;
785}
786
787
788
789
790
791
792static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp)
793{
794 WARN_ON_ONCE(rnp->qsmask);
795}
796
797
798
799
800
801static void rcu_preempt_check_callbacks(void)
802{
803}
804
805
806
807
808
809void rcu_barrier(void)
810{
811 rcu_barrier_sched();
812}
813EXPORT_SYMBOL_GPL(rcu_barrier);
814
815
816
817
818static void __init __rcu_init_preempt(void)
819{
820}
821
822
823
824
825
826void exit_rcu(void)
827{
828}
829
830#endif
831
832#ifdef CONFIG_RCU_BOOST
833
834#include "../locking/rtmutex_common.h"
835
836#ifdef CONFIG_RCU_TRACE
837
838static void rcu_initiate_boost_trace(struct rcu_node *rnp)
839{
840 if (!rcu_preempt_has_tasks(rnp))
841 rnp->n_balk_blkd_tasks++;
842 else if (rnp->exp_tasks == NULL && rnp->gp_tasks == NULL)
843 rnp->n_balk_exp_gp_tasks++;
844 else if (rnp->gp_tasks != NULL && rnp->boost_tasks != NULL)
845 rnp->n_balk_boost_tasks++;
846 else if (rnp->gp_tasks != NULL && rnp->qsmask != 0)
847 rnp->n_balk_notblocked++;
848 else if (rnp->gp_tasks != NULL &&
849 ULONG_CMP_LT(jiffies, rnp->boost_time))
850 rnp->n_balk_notyet++;
851 else
852 rnp->n_balk_nos++;
853}
854
855#else
856
857static void rcu_initiate_boost_trace(struct rcu_node *rnp)
858{
859}
860
861#endif
862
863static void rcu_wake_cond(struct task_struct *t, int status)
864{
865
866
867
868
869 if (status != RCU_KTHREAD_YIELDING || is_idle_task(current))
870 wake_up_process(t);
871}
872
873
874
875
876
877
878
879
880
881static int rcu_boost(struct rcu_node *rnp)
882{
883 unsigned long flags;
884 struct task_struct *t;
885 struct list_head *tb;
886
887 if (READ_ONCE(rnp->exp_tasks) == NULL &&
888 READ_ONCE(rnp->boost_tasks) == NULL)
889 return 0;
890
891 raw_spin_lock_irqsave_rcu_node(rnp, flags);
892
893
894
895
896
897 if (rnp->exp_tasks == NULL && rnp->boost_tasks == NULL) {
898 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
899 return 0;
900 }
901
902
903
904
905
906
907
908 if (rnp->exp_tasks != NULL) {
909 tb = rnp->exp_tasks;
910 rnp->n_exp_boosts++;
911 } else {
912 tb = rnp->boost_tasks;
913 rnp->n_normal_boosts++;
914 }
915 rnp->n_tasks_boosted++;
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933 t = container_of(tb, struct task_struct, rcu_node_entry);
934 rt_mutex_init_proxy_locked(&rnp->boost_mtx, t);
935 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
936
937 rt_mutex_lock(&rnp->boost_mtx);
938 rt_mutex_unlock(&rnp->boost_mtx);
939
940 return READ_ONCE(rnp->exp_tasks) != NULL ||
941 READ_ONCE(rnp->boost_tasks) != NULL;
942}
943
944
945
946
947static int rcu_boost_kthread(void *arg)
948{
949 struct rcu_node *rnp = (struct rcu_node *)arg;
950 int spincnt = 0;
951 int more2boost;
952
953 trace_rcu_utilization(TPS("Start boost kthread@init"));
954 for (;;) {
955 rnp->boost_kthread_status = RCU_KTHREAD_WAITING;
956 trace_rcu_utilization(TPS("End boost kthread@rcu_wait"));
957 rcu_wait(rnp->boost_tasks || rnp->exp_tasks);
958 trace_rcu_utilization(TPS("Start boost kthread@rcu_wait"));
959 rnp->boost_kthread_status = RCU_KTHREAD_RUNNING;
960 more2boost = rcu_boost(rnp);
961 if (more2boost)
962 spincnt++;
963 else
964 spincnt = 0;
965 if (spincnt > 10) {
966 rnp->boost_kthread_status = RCU_KTHREAD_YIELDING;
967 trace_rcu_utilization(TPS("End boost kthread@rcu_yield"));
968 schedule_timeout_interruptible(2);
969 trace_rcu_utilization(TPS("Start boost kthread@rcu_yield"));
970 spincnt = 0;
971 }
972 }
973
974 trace_rcu_utilization(TPS("End boost kthread@notreached"));
975 return 0;
976}
977
978
979
980
981
982
983
984
985
986
987
988static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags)
989 __releases(rnp->lock)
990{
991 struct task_struct *t;
992
993 if (!rcu_preempt_blocked_readers_cgp(rnp) && rnp->exp_tasks == NULL) {
994 rnp->n_balk_exp_gp_tasks++;
995 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
996 return;
997 }
998 if (rnp->exp_tasks != NULL ||
999 (rnp->gp_tasks != NULL &&
1000 rnp->boost_tasks == NULL &&
1001 rnp->qsmask == 0 &&
1002 ULONG_CMP_GE(jiffies, rnp->boost_time))) {
1003 if (rnp->exp_tasks == NULL)
1004 rnp->boost_tasks = rnp->gp_tasks;
1005 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
1006 t = rnp->boost_kthread_task;
1007 if (t)
1008 rcu_wake_cond(t, rnp->boost_kthread_status);
1009 } else {
1010 rcu_initiate_boost_trace(rnp);
1011 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
1012 }
1013}
1014
1015
1016
1017
1018static void invoke_rcu_callbacks_kthread(void)
1019{
1020 unsigned long flags;
1021
1022 local_irq_save(flags);
1023 __this_cpu_write(rcu_cpu_has_work, 1);
1024 if (__this_cpu_read(rcu_cpu_kthread_task) != NULL &&
1025 current != __this_cpu_read(rcu_cpu_kthread_task)) {
1026 rcu_wake_cond(__this_cpu_read(rcu_cpu_kthread_task),
1027 __this_cpu_read(rcu_cpu_kthread_status));
1028 }
1029 local_irq_restore(flags);
1030}
1031
1032
1033
1034
1035
1036static bool rcu_is_callbacks_kthread(void)
1037{
1038 return __this_cpu_read(rcu_cpu_kthread_task) == current;
1039}
1040
1041#define RCU_BOOST_DELAY_JIFFIES DIV_ROUND_UP(CONFIG_RCU_BOOST_DELAY * HZ, 1000)
1042
1043
1044
1045
1046static void rcu_preempt_boost_start_gp(struct rcu_node *rnp)
1047{
1048 rnp->boost_time = jiffies + RCU_BOOST_DELAY_JIFFIES;
1049}
1050
1051
1052
1053
1054
1055
1056static int rcu_spawn_one_boost_kthread(struct rcu_state *rsp,
1057 struct rcu_node *rnp)
1058{
1059 int rnp_index = rnp - &rsp->node[0];
1060 unsigned long flags;
1061 struct sched_param sp;
1062 struct task_struct *t;
1063
1064 if (rcu_state_p != rsp)
1065 return 0;
1066
1067 if (!rcu_scheduler_fully_active || rcu_rnp_online_cpus(rnp) == 0)
1068 return 0;
1069
1070 rsp->boost = 1;
1071 if (rnp->boost_kthread_task != NULL)
1072 return 0;
1073 t = kthread_create(rcu_boost_kthread, (void *)rnp,
1074 "rcub/%d", rnp_index);
1075 if (IS_ERR(t))
1076 return PTR_ERR(t);
1077 raw_spin_lock_irqsave_rcu_node(rnp, flags);
1078 rnp->boost_kthread_task = t;
1079 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
1080 sp.sched_priority = kthread_prio;
1081 sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
1082 wake_up_process(t);
1083 return 0;
1084}
1085
1086static void rcu_kthread_do_work(void)
1087{
1088 rcu_do_batch(&rcu_sched_state, this_cpu_ptr(&rcu_sched_data));
1089 rcu_do_batch(&rcu_bh_state, this_cpu_ptr(&rcu_bh_data));
1090 rcu_preempt_do_callbacks();
1091}
1092
1093static void rcu_cpu_kthread_setup(unsigned int cpu)
1094{
1095 struct sched_param sp;
1096
1097 sp.sched_priority = kthread_prio;
1098 sched_setscheduler_nocheck(current, SCHED_FIFO, &sp);
1099}
1100
1101static void rcu_cpu_kthread_park(unsigned int cpu)
1102{
1103 per_cpu(rcu_cpu_kthread_status, cpu) = RCU_KTHREAD_OFFCPU;
1104}
1105
1106static int rcu_cpu_kthread_should_run(unsigned int cpu)
1107{
1108 return __this_cpu_read(rcu_cpu_has_work);
1109}
1110
1111
1112
1113
1114
1115
1116static void rcu_cpu_kthread(unsigned int cpu)
1117{
1118 unsigned int *statusp = this_cpu_ptr(&rcu_cpu_kthread_status);
1119 char work, *workp = this_cpu_ptr(&rcu_cpu_has_work);
1120 int spincnt;
1121
1122 for (spincnt = 0; spincnt < 10; spincnt++) {
1123 trace_rcu_utilization(TPS("Start CPU kthread@rcu_wait"));
1124 local_bh_disable();
1125 *statusp = RCU_KTHREAD_RUNNING;
1126 this_cpu_inc(rcu_cpu_kthread_loops);
1127 local_irq_disable();
1128 work = *workp;
1129 *workp = 0;
1130 local_irq_enable();
1131 if (work)
1132 rcu_kthread_do_work();
1133 local_bh_enable();
1134 if (*workp == 0) {
1135 trace_rcu_utilization(TPS("End CPU kthread@rcu_wait"));
1136 *statusp = RCU_KTHREAD_WAITING;
1137 return;
1138 }
1139 }
1140 *statusp = RCU_KTHREAD_YIELDING;
1141 trace_rcu_utilization(TPS("Start CPU kthread@rcu_yield"));
1142 schedule_timeout_interruptible(2);
1143 trace_rcu_utilization(TPS("End CPU kthread@rcu_yield"));
1144 *statusp = RCU_KTHREAD_WAITING;
1145}
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu)
1157{
1158 struct task_struct *t = rnp->boost_kthread_task;
1159 unsigned long mask = rcu_rnp_online_cpus(rnp);
1160 cpumask_var_t cm;
1161 int cpu;
1162
1163 if (!t)
1164 return;
1165 if (!zalloc_cpumask_var(&cm, GFP_KERNEL))
1166 return;
1167 for_each_leaf_node_possible_cpu(rnp, cpu)
1168 if ((mask & leaf_node_cpu_bit(rnp, cpu)) &&
1169 cpu != outgoingcpu)
1170 cpumask_set_cpu(cpu, cm);
1171 if (cpumask_weight(cm) == 0)
1172 cpumask_setall(cm);
1173 set_cpus_allowed_ptr(t, cm);
1174 free_cpumask_var(cm);
1175}
1176
1177static struct smp_hotplug_thread rcu_cpu_thread_spec = {
1178 .store = &rcu_cpu_kthread_task,
1179 .thread_should_run = rcu_cpu_kthread_should_run,
1180 .thread_fn = rcu_cpu_kthread,
1181 .thread_comm = "rcuc/%u",
1182 .setup = rcu_cpu_kthread_setup,
1183 .park = rcu_cpu_kthread_park,
1184};
1185
1186
1187
1188
1189static void __init rcu_spawn_boost_kthreads(void)
1190{
1191 struct rcu_node *rnp;
1192 int cpu;
1193
1194 for_each_possible_cpu(cpu)
1195 per_cpu(rcu_cpu_has_work, cpu) = 0;
1196 BUG_ON(smpboot_register_percpu_thread(&rcu_cpu_thread_spec));
1197 rcu_for_each_leaf_node(rcu_state_p, rnp)
1198 (void)rcu_spawn_one_boost_kthread(rcu_state_p, rnp);
1199}
1200
1201static void rcu_prepare_kthreads(int cpu)
1202{
1203 struct rcu_data *rdp = per_cpu_ptr(rcu_state_p->rda, cpu);
1204 struct rcu_node *rnp = rdp->mynode;
1205
1206
1207 if (rcu_scheduler_fully_active)
1208 (void)rcu_spawn_one_boost_kthread(rcu_state_p, rnp);
1209}
1210
1211#else
1212
1213static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags)
1214 __releases(rnp->lock)
1215{
1216 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
1217}
1218
1219static void invoke_rcu_callbacks_kthread(void)
1220{
1221 WARN_ON_ONCE(1);
1222}
1223
1224static bool rcu_is_callbacks_kthread(void)
1225{
1226 return false;
1227}
1228
1229static void rcu_preempt_boost_start_gp(struct rcu_node *rnp)
1230{
1231}
1232
1233static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu)
1234{
1235}
1236
1237static void __init rcu_spawn_boost_kthreads(void)
1238{
1239}
1240
1241static void rcu_prepare_kthreads(int cpu)
1242{
1243}
1244
1245#endif
1246
1247#if !defined(CONFIG_RCU_FAST_NO_HZ)
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258int rcu_needs_cpu(u64 basemono, u64 *nextevt)
1259{
1260 *nextevt = KTIME_MAX;
1261 return IS_ENABLED(CONFIG_RCU_NOCB_CPU_ALL)
1262 ? 0 : rcu_cpu_has_callbacks(NULL);
1263}
1264
1265
1266
1267
1268
1269static void rcu_cleanup_after_idle(void)
1270{
1271}
1272
1273
1274
1275
1276
1277static void rcu_prepare_for_idle(void)
1278{
1279}
1280
1281
1282
1283
1284
1285static void rcu_idle_count_callbacks_posted(void)
1286{
1287}
1288
1289#else
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314#define RCU_IDLE_GP_DELAY 4
1315#define RCU_IDLE_LAZY_GP_DELAY (6 * HZ)
1316
1317static int rcu_idle_gp_delay = RCU_IDLE_GP_DELAY;
1318module_param(rcu_idle_gp_delay, int, 0644);
1319static int rcu_idle_lazy_gp_delay = RCU_IDLE_LAZY_GP_DELAY;
1320module_param(rcu_idle_lazy_gp_delay, int, 0644);
1321
1322
1323
1324
1325
1326
1327static bool __maybe_unused rcu_try_advance_all_cbs(void)
1328{
1329 bool cbs_ready = false;
1330 struct rcu_data *rdp;
1331 struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
1332 struct rcu_node *rnp;
1333 struct rcu_state *rsp;
1334
1335
1336 if (jiffies == rdtp->last_advance_all)
1337 return false;
1338 rdtp->last_advance_all = jiffies;
1339
1340 for_each_rcu_flavor(rsp) {
1341 rdp = this_cpu_ptr(rsp->rda);
1342 rnp = rdp->mynode;
1343
1344
1345
1346
1347
1348
1349 if ((rdp->completed != rnp->completed ||
1350 unlikely(READ_ONCE(rdp->gpwrap))) &&
1351 rdp->nxttail[RCU_DONE_TAIL] != rdp->nxttail[RCU_NEXT_TAIL])
1352 note_gp_changes(rsp, rdp);
1353
1354 if (cpu_has_callbacks_ready_to_invoke(rdp))
1355 cbs_ready = true;
1356 }
1357 return cbs_ready;
1358}
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368int rcu_needs_cpu(u64 basemono, u64 *nextevt)
1369{
1370 struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
1371 unsigned long dj;
1372
1373 if (IS_ENABLED(CONFIG_RCU_NOCB_CPU_ALL)) {
1374 *nextevt = KTIME_MAX;
1375 return 0;
1376 }
1377
1378
1379 rdtp->nonlazy_posted_snap = rdtp->nonlazy_posted;
1380
1381
1382 if (!rcu_cpu_has_callbacks(&rdtp->all_lazy)) {
1383 *nextevt = KTIME_MAX;
1384 return 0;
1385 }
1386
1387
1388 if (rcu_try_advance_all_cbs()) {
1389
1390 invoke_rcu_core();
1391 return 1;
1392 }
1393 rdtp->last_accelerate = jiffies;
1394
1395
1396 if (!rdtp->all_lazy) {
1397 dj = round_up(rcu_idle_gp_delay + jiffies,
1398 rcu_idle_gp_delay) - jiffies;
1399 } else {
1400 dj = round_jiffies(rcu_idle_lazy_gp_delay + jiffies) - jiffies;
1401 }
1402 *nextevt = basemono + dj * TICK_NSEC;
1403 return 0;
1404}
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416static void rcu_prepare_for_idle(void)
1417{
1418 bool needwake;
1419 struct rcu_data *rdp;
1420 struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
1421 struct rcu_node *rnp;
1422 struct rcu_state *rsp;
1423 int tne;
1424
1425 if (IS_ENABLED(CONFIG_RCU_NOCB_CPU_ALL) ||
1426 rcu_is_nocb_cpu(smp_processor_id()))
1427 return;
1428
1429
1430 tne = READ_ONCE(tick_nohz_active);
1431 if (tne != rdtp->tick_nohz_enabled_snap) {
1432 if (rcu_cpu_has_callbacks(NULL))
1433 invoke_rcu_core();
1434 rdtp->tick_nohz_enabled_snap = tne;
1435 return;
1436 }
1437 if (!tne)
1438 return;
1439
1440
1441
1442
1443
1444
1445 if (rdtp->all_lazy &&
1446 rdtp->nonlazy_posted != rdtp->nonlazy_posted_snap) {
1447 rdtp->all_lazy = false;
1448 rdtp->nonlazy_posted_snap = rdtp->nonlazy_posted;
1449 invoke_rcu_core();
1450 return;
1451 }
1452
1453
1454
1455
1456
1457 if (rdtp->last_accelerate == jiffies)
1458 return;
1459 rdtp->last_accelerate = jiffies;
1460 for_each_rcu_flavor(rsp) {
1461 rdp = this_cpu_ptr(rsp->rda);
1462 if (!*rdp->nxttail[RCU_DONE_TAIL])
1463 continue;
1464 rnp = rdp->mynode;
1465 raw_spin_lock_rcu_node(rnp);
1466 needwake = rcu_accelerate_cbs(rsp, rnp, rdp);
1467 raw_spin_unlock_rcu_node(rnp);
1468 if (needwake)
1469 rcu_gp_kthread_wake(rsp);
1470 }
1471}
1472
1473
1474
1475
1476
1477
1478static void rcu_cleanup_after_idle(void)
1479{
1480 if (IS_ENABLED(CONFIG_RCU_NOCB_CPU_ALL) ||
1481 rcu_is_nocb_cpu(smp_processor_id()))
1482 return;
1483 if (rcu_try_advance_all_cbs())
1484 invoke_rcu_core();
1485}
1486
1487
1488
1489
1490
1491
1492
1493
1494
1495static void rcu_idle_count_callbacks_posted(void)
1496{
1497 __this_cpu_add(rcu_dynticks.nonlazy_posted, 1);
1498}
1499
1500
1501
1502
1503static atomic_t oom_callback_count;
1504static DECLARE_WAIT_QUEUE_HEAD(oom_callback_wq);
1505
1506
1507
1508
1509
1510static void rcu_oom_callback(struct rcu_head *rhp)
1511{
1512 if (atomic_dec_and_test(&oom_callback_count))
1513 wake_up(&oom_callback_wq);
1514}
1515
1516
1517
1518
1519
1520
1521
1522
1523static void rcu_oom_notify_cpu(void *unused)
1524{
1525 struct rcu_state *rsp;
1526 struct rcu_data *rdp;
1527
1528 for_each_rcu_flavor(rsp) {
1529 rdp = raw_cpu_ptr(rsp->rda);
1530 if (rdp->qlen_lazy != 0) {
1531 atomic_inc(&oom_callback_count);
1532 rsp->call(&rdp->oom_head, rcu_oom_callback);
1533 }
1534 }
1535}
1536
1537
1538
1539
1540
1541
1542
1543
1544static int rcu_oom_notify(struct notifier_block *self,
1545 unsigned long notused, void *nfreed)
1546{
1547 int cpu;
1548
1549
1550 wait_event(oom_callback_wq, atomic_read(&oom_callback_count) == 0);
1551 smp_mb();
1552
1553
1554
1555
1556
1557 atomic_set(&oom_callback_count, 1);
1558
1559 for_each_online_cpu(cpu) {
1560 smp_call_function_single(cpu, rcu_oom_notify_cpu, NULL, 1);
1561 cond_resched_rcu_qs();
1562 }
1563
1564
1565 atomic_dec(&oom_callback_count);
1566
1567 return NOTIFY_OK;
1568}
1569
1570static struct notifier_block rcu_oom_nb = {
1571 .notifier_call = rcu_oom_notify
1572};
1573
1574static int __init rcu_register_oom_notifier(void)
1575{
1576 register_oom_notifier(&rcu_oom_nb);
1577 return 0;
1578}
1579early_initcall(rcu_register_oom_notifier);
1580
1581#endif
1582
1583#ifdef CONFIG_RCU_FAST_NO_HZ
1584
1585static void print_cpu_stall_fast_no_hz(char *cp, int cpu)
1586{
1587 struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu);
1588 unsigned long nlpd = rdtp->nonlazy_posted - rdtp->nonlazy_posted_snap;
1589
1590 sprintf(cp, "last_accelerate: %04lx/%04lx, nonlazy_posted: %ld, %c%c",
1591 rdtp->last_accelerate & 0xffff, jiffies & 0xffff,
1592 ulong2long(nlpd),
1593 rdtp->all_lazy ? 'L' : '.',
1594 rdtp->tick_nohz_enabled_snap ? '.' : 'D');
1595}
1596
1597#else
1598
1599static void print_cpu_stall_fast_no_hz(char *cp, int cpu)
1600{
1601 *cp = '\0';
1602}
1603
1604#endif
1605
1606
1607static void print_cpu_stall_info_begin(void)
1608{
1609 pr_cont("\n");
1610}
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623
1624static void print_cpu_stall_info(struct rcu_state *rsp, int cpu)
1625{
1626 char fast_no_hz[72];
1627 struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
1628 struct rcu_dynticks *rdtp = rdp->dynticks;
1629 char *ticks_title;
1630 unsigned long ticks_value;
1631
1632 if (rsp->gpnum == rdp->gpnum) {
1633 ticks_title = "ticks this GP";
1634 ticks_value = rdp->ticks_this_gp;
1635 } else {
1636 ticks_title = "GPs behind";
1637 ticks_value = rsp->gpnum - rdp->gpnum;
1638 }
1639 print_cpu_stall_fast_no_hz(fast_no_hz, cpu);
1640 pr_err("\t%d-%c%c%c: (%lu %s) idle=%03x/%llx/%d softirq=%u/%u fqs=%ld %s\n",
1641 cpu,
1642 "O."[!!cpu_online(cpu)],
1643 "o."[!!(rdp->grpmask & rdp->mynode->qsmaskinit)],
1644 "N."[!!(rdp->grpmask & rdp->mynode->qsmaskinitnext)],
1645 ticks_value, ticks_title,
1646 atomic_read(&rdtp->dynticks) & 0xfff,
1647 rdtp->dynticks_nesting, rdtp->dynticks_nmi_nesting,
1648 rdp->softirq_snap, kstat_softirqs_cpu(RCU_SOFTIRQ, cpu),
1649 READ_ONCE(rsp->n_force_qs) - rsp->n_force_qs_gpstart,
1650 fast_no_hz);
1651}
1652
1653
1654static void print_cpu_stall_info_end(void)
1655{
1656 pr_err("\t");
1657}
1658
1659
1660static void zero_cpu_stall_ticks(struct rcu_data *rdp)
1661{
1662 rdp->ticks_this_gp = 0;
1663 rdp->softirq_snap = kstat_softirqs_cpu(RCU_SOFTIRQ, smp_processor_id());
1664}
1665
1666
1667static void increment_cpu_stall_ticks(void)
1668{
1669 struct rcu_state *rsp;
1670
1671 for_each_rcu_flavor(rsp)
1672 raw_cpu_inc(rsp->rda->ticks_this_gp);
1673}
1674
1675#ifdef CONFIG_RCU_NOCB_CPU
1676
1677
1678
1679
1680
1681
1682
1683
1684
1685
1686
1687
1688
1689
1690
1691
1692
1693
1694
1695
1696
1697
1698
1699static int __init rcu_nocb_setup(char *str)
1700{
1701 alloc_bootmem_cpumask_var(&rcu_nocb_mask);
1702 have_rcu_nocb_mask = true;
1703 cpulist_parse(str, rcu_nocb_mask);
1704 return 1;
1705}
1706__setup("rcu_nocbs=", rcu_nocb_setup);
1707
1708static int __init parse_rcu_nocb_poll(char *arg)
1709{
1710 rcu_nocb_poll = 1;
1711 return 0;
1712}
1713early_param("rcu_nocb_poll", parse_rcu_nocb_poll);
1714
1715
1716
1717
1718
1719static void rcu_nocb_gp_cleanup(struct swait_queue_head *sq)
1720{
1721 swake_up_all(sq);
1722}
1723
1724
1725
1726
1727
1728
1729
1730
1731
1732static void rcu_nocb_gp_set(struct rcu_node *rnp, int nrq)
1733{
1734 rnp->need_future_gp[(rnp->completed + 1) & 0x1] += nrq;
1735}
1736
1737static struct swait_queue_head *rcu_nocb_gp_get(struct rcu_node *rnp)
1738{
1739 return &rnp->nocb_gp_wq[rnp->completed & 0x1];
1740}
1741
1742static void rcu_init_one_nocb(struct rcu_node *rnp)
1743{
1744 init_swait_queue_head(&rnp->nocb_gp_wq[0]);
1745 init_swait_queue_head(&rnp->nocb_gp_wq[1]);
1746}
1747
1748#ifndef CONFIG_RCU_NOCB_CPU_ALL
1749
1750bool rcu_is_nocb_cpu(int cpu)
1751{
1752 if (have_rcu_nocb_mask)
1753 return cpumask_test_cpu(cpu, rcu_nocb_mask);
1754 return false;
1755}
1756#endif
1757
1758
1759
1760
1761static void wake_nocb_leader(struct rcu_data *rdp, bool force)
1762{
1763 struct rcu_data *rdp_leader = rdp->nocb_leader;
1764
1765 if (!READ_ONCE(rdp_leader->nocb_kthread))
1766 return;
1767 if (READ_ONCE(rdp_leader->nocb_leader_sleep) || force) {
1768
1769 WRITE_ONCE(rdp_leader->nocb_leader_sleep, false);
1770 swake_up(&rdp_leader->nocb_wq);
1771 }
1772}
1773
1774
1775
1776
1777
1778static bool rcu_nocb_cpu_needs_barrier(struct rcu_state *rsp, int cpu)
1779{
1780 struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
1781 unsigned long ret;
1782#ifdef CONFIG_PROVE_RCU
1783 struct rcu_head *rhp;
1784#endif
1785
1786
1787
1788
1789
1790
1791
1792
1793
1794
1795
1796
1797
1798 ret = atomic_long_read(&rdp->nocb_q_count);
1799
1800#ifdef CONFIG_PROVE_RCU
1801 rhp = READ_ONCE(rdp->nocb_head);
1802 if (!rhp)
1803 rhp = READ_ONCE(rdp->nocb_gp_head);
1804 if (!rhp)
1805 rhp = READ_ONCE(rdp->nocb_follower_head);
1806
1807
1808 if (!READ_ONCE(rdp->nocb_kthread) && rhp &&
1809 rcu_scheduler_fully_active) {
1810
1811 pr_err("RCU: Never-onlined no-CBs CPU %d has CB %p\n",
1812 cpu, rhp->func);
1813 WARN_ON_ONCE(1);
1814 }
1815#endif
1816
1817 return !!ret;
1818}
1819
1820
1821
1822
1823
1824
1825
1826
1827
1828static void __call_rcu_nocb_enqueue(struct rcu_data *rdp,
1829 struct rcu_head *rhp,
1830 struct rcu_head **rhtp,
1831 int rhcount, int rhcount_lazy,
1832 unsigned long flags)
1833{
1834 int len;
1835 struct rcu_head **old_rhpp;
1836 struct task_struct *t;
1837
1838
1839 atomic_long_add(rhcount, &rdp->nocb_q_count);
1840
1841 old_rhpp = xchg(&rdp->nocb_tail, rhtp);
1842 WRITE_ONCE(*old_rhpp, rhp);
1843 atomic_long_add(rhcount_lazy, &rdp->nocb_q_count_lazy);
1844 smp_mb__after_atomic();
1845
1846
1847 t = READ_ONCE(rdp->nocb_kthread);
1848 if (rcu_nocb_poll || !t) {
1849 trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu,
1850 TPS("WakeNotPoll"));
1851 return;
1852 }
1853 len = atomic_long_read(&rdp->nocb_q_count);
1854 if (old_rhpp == &rdp->nocb_head) {
1855 if (!irqs_disabled_flags(flags)) {
1856
1857 wake_nocb_leader(rdp, false);
1858 trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu,
1859 TPS("WakeEmpty"));
1860 } else {
1861 rdp->nocb_defer_wakeup = RCU_NOGP_WAKE;
1862 trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu,
1863 TPS("WakeEmptyIsDeferred"));
1864 }
1865 rdp->qlen_last_fqs_check = 0;
1866 } else if (len > rdp->qlen_last_fqs_check + qhimark) {
1867
1868 if (!irqs_disabled_flags(flags)) {
1869 wake_nocb_leader(rdp, true);
1870 trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu,
1871 TPS("WakeOvf"));
1872 } else {
1873 rdp->nocb_defer_wakeup = RCU_NOGP_WAKE_FORCE;
1874 trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu,
1875 TPS("WakeOvfIsDeferred"));
1876 }
1877 rdp->qlen_last_fqs_check = LONG_MAX / 2;
1878 } else {
1879 trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, TPS("WakeNot"));
1880 }
1881 return;
1882}
1883
1884
1885
1886
1887
1888
1889
1890
1891
1892
1893static bool __call_rcu_nocb(struct rcu_data *rdp, struct rcu_head *rhp,
1894 bool lazy, unsigned long flags)
1895{
1896
1897 if (!rcu_is_nocb_cpu(rdp->cpu))
1898 return false;
1899 __call_rcu_nocb_enqueue(rdp, rhp, &rhp->next, 1, lazy, flags);
1900 if (__is_kfree_rcu_offset((unsigned long)rhp->func))
1901 trace_rcu_kfree_callback(rdp->rsp->name, rhp,
1902 (unsigned long)rhp->func,
1903 -atomic_long_read(&rdp->nocb_q_count_lazy),
1904 -atomic_long_read(&rdp->nocb_q_count));
1905 else
1906 trace_rcu_callback(rdp->rsp->name, rhp,
1907 -atomic_long_read(&rdp->nocb_q_count_lazy),
1908 -atomic_long_read(&rdp->nocb_q_count));
1909
1910
1911
1912
1913
1914
1915 if (irqs_disabled_flags(flags) &&
1916 !rcu_is_watching() &&
1917 cpu_online(smp_processor_id()))
1918 invoke_rcu_core();
1919
1920 return true;
1921}
1922
1923
1924
1925
1926
1927static bool __maybe_unused rcu_nocb_adopt_orphan_cbs(struct rcu_state *rsp,
1928 struct rcu_data *rdp,
1929 unsigned long flags)
1930{
1931 long ql = rsp->qlen;
1932 long qll = rsp->qlen_lazy;
1933
1934
1935 if (!rcu_is_nocb_cpu(smp_processor_id()))
1936 return false;
1937 rsp->qlen = 0;
1938 rsp->qlen_lazy = 0;
1939
1940
1941 if (rsp->orphan_donelist != NULL) {
1942 __call_rcu_nocb_enqueue(rdp, rsp->orphan_donelist,
1943 rsp->orphan_donetail, ql, qll, flags);
1944 ql = qll = 0;
1945 rsp->orphan_donelist = NULL;
1946 rsp->orphan_donetail = &rsp->orphan_donelist;
1947 }
1948 if (rsp->orphan_nxtlist != NULL) {
1949 __call_rcu_nocb_enqueue(rdp, rsp->orphan_nxtlist,
1950 rsp->orphan_nxttail, ql, qll, flags);
1951 ql = qll = 0;
1952 rsp->orphan_nxtlist = NULL;
1953 rsp->orphan_nxttail = &rsp->orphan_nxtlist;
1954 }
1955 return true;
1956}
1957
1958
1959
1960
1961
1962static void rcu_nocb_wait_gp(struct rcu_data *rdp)
1963{
1964 unsigned long c;
1965 bool d;
1966 unsigned long flags;
1967 bool needwake;
1968 struct rcu_node *rnp = rdp->mynode;
1969
1970 raw_spin_lock_irqsave_rcu_node(rnp, flags);
1971 needwake = rcu_start_future_gp(rnp, rdp, &c);
1972 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
1973 if (needwake)
1974 rcu_gp_kthread_wake(rdp->rsp);
1975
1976
1977
1978
1979
1980 trace_rcu_future_gp(rnp, rdp, c, TPS("StartWait"));
1981 for (;;) {
1982 swait_event_interruptible(
1983 rnp->nocb_gp_wq[c & 0x1],
1984 (d = ULONG_CMP_GE(READ_ONCE(rnp->completed), c)));
1985 if (likely(d))
1986 break;
1987 WARN_ON(signal_pending(current));
1988 trace_rcu_future_gp(rnp, rdp, c, TPS("ResumeWait"));
1989 }
1990 trace_rcu_future_gp(rnp, rdp, c, TPS("EndWait"));
1991 smp_mb();
1992}
1993
1994
1995
1996
1997
1998static void nocb_leader_wait(struct rcu_data *my_rdp)
1999{
2000 bool firsttime = true;
2001 bool gotcbs;
2002 struct rcu_data *rdp;
2003 struct rcu_head **tail;
2004
2005wait_again:
2006
2007
2008 if (!rcu_nocb_poll) {
2009 trace_rcu_nocb_wake(my_rdp->rsp->name, my_rdp->cpu, "Sleep");
2010 swait_event_interruptible(my_rdp->nocb_wq,
2011 !READ_ONCE(my_rdp->nocb_leader_sleep));
2012
2013 } else if (firsttime) {
2014 firsttime = false;
2015 trace_rcu_nocb_wake(my_rdp->rsp->name, my_rdp->cpu, "Poll");
2016 }
2017
2018
2019
2020
2021
2022
2023 gotcbs = false;
2024 for (rdp = my_rdp; rdp; rdp = rdp->nocb_next_follower) {
2025 rdp->nocb_gp_head = READ_ONCE(rdp->nocb_head);
2026 if (!rdp->nocb_gp_head)
2027 continue;
2028
2029
2030 WRITE_ONCE(rdp->nocb_head, NULL);
2031 rdp->nocb_gp_tail = xchg(&rdp->nocb_tail, &rdp->nocb_head);
2032 gotcbs = true;
2033 }
2034
2035
2036
2037
2038
2039 if (unlikely(!gotcbs)) {
2040 if (!rcu_nocb_poll)
2041 trace_rcu_nocb_wake(my_rdp->rsp->name, my_rdp->cpu,
2042 "WokeEmpty");
2043 WARN_ON(signal_pending(current));
2044 schedule_timeout_interruptible(1);
2045
2046
2047 my_rdp->nocb_leader_sleep = true;
2048 smp_mb();
2049 for (rdp = my_rdp; rdp; rdp = rdp->nocb_next_follower)
2050 if (READ_ONCE(rdp->nocb_head)) {
2051
2052 my_rdp->nocb_leader_sleep = false;
2053 break;
2054 }
2055 goto wait_again;
2056 }
2057
2058
2059 rcu_nocb_wait_gp(my_rdp);
2060
2061
2062
2063
2064
2065
2066 my_rdp->nocb_leader_sleep = true;
2067 smp_mb();
2068
2069
2070 for (rdp = my_rdp; rdp; rdp = rdp->nocb_next_follower) {
2071 if (READ_ONCE(rdp->nocb_head))
2072 my_rdp->nocb_leader_sleep = false;
2073 if (!rdp->nocb_gp_head)
2074 continue;
2075
2076
2077 tail = xchg(&rdp->nocb_follower_tail, rdp->nocb_gp_tail);
2078 *tail = rdp->nocb_gp_head;
2079 smp_mb__after_atomic();
2080 if (rdp != my_rdp && tail == &rdp->nocb_follower_head) {
2081
2082
2083
2084
2085 swake_up(&rdp->nocb_wq);
2086 }
2087 }
2088
2089
2090 if (!my_rdp->nocb_follower_head)
2091 goto wait_again;
2092}
2093
2094
2095
2096
2097
2098static void nocb_follower_wait(struct rcu_data *rdp)
2099{
2100 bool firsttime = true;
2101
2102 for (;;) {
2103 if (!rcu_nocb_poll) {
2104 trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu,
2105 "FollowerSleep");
2106 swait_event_interruptible(rdp->nocb_wq,
2107 READ_ONCE(rdp->nocb_follower_head));
2108 } else if (firsttime) {
2109
2110 firsttime = false;
2111 trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, "Poll");
2112 }
2113 if (smp_load_acquire(&rdp->nocb_follower_head)) {
2114
2115 return;
2116 }
2117 if (!rcu_nocb_poll)
2118 trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu,
2119 "WokeEmpty");
2120 WARN_ON(signal_pending(current));
2121 schedule_timeout_interruptible(1);
2122 }
2123}
2124
2125
2126
2127
2128
2129
2130
2131static int rcu_nocb_kthread(void *arg)
2132{
2133 int c, cl;
2134 struct rcu_head *list;
2135 struct rcu_head *next;
2136 struct rcu_head **tail;
2137 struct rcu_data *rdp = arg;
2138
2139
2140 for (;;) {
2141
2142 if (rdp->nocb_leader == rdp)
2143 nocb_leader_wait(rdp);
2144 else
2145 nocb_follower_wait(rdp);
2146
2147
2148 list = READ_ONCE(rdp->nocb_follower_head);
2149 BUG_ON(!list);
2150 trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, "WokeNonEmpty");
2151 WRITE_ONCE(rdp->nocb_follower_head, NULL);
2152 tail = xchg(&rdp->nocb_follower_tail, &rdp->nocb_follower_head);
2153
2154
2155 trace_rcu_batch_start(rdp->rsp->name,
2156 atomic_long_read(&rdp->nocb_q_count_lazy),
2157 atomic_long_read(&rdp->nocb_q_count), -1);
2158 c = cl = 0;
2159 while (list) {
2160 next = list->next;
2161
2162 while (next == NULL && &list->next != tail) {
2163 trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu,
2164 TPS("WaitQueue"));
2165 schedule_timeout_interruptible(1);
2166 trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu,
2167 TPS("WokeQueue"));
2168 next = list->next;
2169 }
2170 debug_rcu_head_unqueue(list);
2171 local_bh_disable();
2172 if (__rcu_reclaim(rdp->rsp->name, list))
2173 cl++;
2174 c++;
2175 local_bh_enable();
2176 cond_resched_rcu_qs();
2177 list = next;
2178 }
2179 trace_rcu_batch_end(rdp->rsp->name, c, !!list, 0, 0, 1);
2180 smp_mb__before_atomic();
2181 atomic_long_add(-c, &rdp->nocb_q_count);
2182 atomic_long_add(-cl, &rdp->nocb_q_count_lazy);
2183 rdp->n_nocbs_invoked += c;
2184 }
2185 return 0;
2186}
2187
2188
2189static int rcu_nocb_need_deferred_wakeup(struct rcu_data *rdp)
2190{
2191 return READ_ONCE(rdp->nocb_defer_wakeup);
2192}
2193
2194
2195static void do_nocb_deferred_wakeup(struct rcu_data *rdp)
2196{
2197 int ndw;
2198
2199 if (!rcu_nocb_need_deferred_wakeup(rdp))
2200 return;
2201 ndw = READ_ONCE(rdp->nocb_defer_wakeup);
2202 WRITE_ONCE(rdp->nocb_defer_wakeup, RCU_NOGP_WAKE_NOT);
2203 wake_nocb_leader(rdp, ndw == RCU_NOGP_WAKE_FORCE);
2204 trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, TPS("DeferredWake"));
2205}
2206
2207void __init rcu_init_nohz(void)
2208{
2209 int cpu;
2210 bool need_rcu_nocb_mask = true;
2211 struct rcu_state *rsp;
2212
2213#ifdef CONFIG_RCU_NOCB_CPU_NONE
2214 need_rcu_nocb_mask = false;
2215#endif
2216
2217#if defined(CONFIG_NO_HZ_FULL)
2218 if (tick_nohz_full_running && cpumask_weight(tick_nohz_full_mask))
2219 need_rcu_nocb_mask = true;
2220#endif
2221
2222 if (!have_rcu_nocb_mask && need_rcu_nocb_mask) {
2223 if (!zalloc_cpumask_var(&rcu_nocb_mask, GFP_KERNEL)) {
2224 pr_info("rcu_nocb_mask allocation failed, callback offloading disabled.\n");
2225 return;
2226 }
2227 have_rcu_nocb_mask = true;
2228 }
2229 if (!have_rcu_nocb_mask)
2230 return;
2231
2232#ifdef CONFIG_RCU_NOCB_CPU_ZERO
2233 pr_info("\tOffload RCU callbacks from CPU 0\n");
2234 cpumask_set_cpu(0, rcu_nocb_mask);
2235#endif
2236#ifdef CONFIG_RCU_NOCB_CPU_ALL
2237 pr_info("\tOffload RCU callbacks from all CPUs\n");
2238 cpumask_copy(rcu_nocb_mask, cpu_possible_mask);
2239#endif
2240#if defined(CONFIG_NO_HZ_FULL)
2241 if (tick_nohz_full_running)
2242 cpumask_or(rcu_nocb_mask, rcu_nocb_mask, tick_nohz_full_mask);
2243#endif
2244
2245 if (!cpumask_subset(rcu_nocb_mask, cpu_possible_mask)) {
2246 pr_info("\tNote: kernel parameter 'rcu_nocbs=' contains nonexistent CPUs.\n");
2247 cpumask_and(rcu_nocb_mask, cpu_possible_mask,
2248 rcu_nocb_mask);
2249 }
2250 pr_info("\tOffload RCU callbacks from CPUs: %*pbl.\n",
2251 cpumask_pr_args(rcu_nocb_mask));
2252 if (rcu_nocb_poll)
2253 pr_info("\tPoll for callbacks from no-CBs CPUs.\n");
2254
2255 for_each_rcu_flavor(rsp) {
2256 for_each_cpu(cpu, rcu_nocb_mask)
2257 init_nocb_callback_list(per_cpu_ptr(rsp->rda, cpu));
2258 rcu_organize_nocb_kthreads(rsp);
2259 }
2260}
2261
2262
2263static void __init rcu_boot_init_nocb_percpu_data(struct rcu_data *rdp)
2264{
2265 rdp->nocb_tail = &rdp->nocb_head;
2266 init_swait_queue_head(&rdp->nocb_wq);
2267 rdp->nocb_follower_tail = &rdp->nocb_follower_head;
2268}
2269
2270
2271
2272
2273
2274
2275
2276static void rcu_spawn_one_nocb_kthread(struct rcu_state *rsp, int cpu)
2277{
2278 struct rcu_data *rdp;
2279 struct rcu_data *rdp_last;
2280 struct rcu_data *rdp_old_leader;
2281 struct rcu_data *rdp_spawn = per_cpu_ptr(rsp->rda, cpu);
2282 struct task_struct *t;
2283
2284
2285
2286
2287
2288 if (!rcu_is_nocb_cpu(cpu) || rdp_spawn->nocb_kthread)
2289 return;
2290
2291
2292 rdp_old_leader = rdp_spawn->nocb_leader;
2293 if (rdp_old_leader != rdp_spawn && !rdp_old_leader->nocb_kthread) {
2294 rdp_last = NULL;
2295 rdp = rdp_old_leader;
2296 do {
2297 rdp->nocb_leader = rdp_spawn;
2298 if (rdp_last && rdp != rdp_spawn)
2299 rdp_last->nocb_next_follower = rdp;
2300 if (rdp == rdp_spawn) {
2301 rdp = rdp->nocb_next_follower;
2302 } else {
2303 rdp_last = rdp;
2304 rdp = rdp->nocb_next_follower;
2305 rdp_last->nocb_next_follower = NULL;
2306 }
2307 } while (rdp);
2308 rdp_spawn->nocb_next_follower = rdp_old_leader;
2309 }
2310
2311
2312 t = kthread_run(rcu_nocb_kthread, rdp_spawn,
2313 "rcuo%c/%d", rsp->abbr, cpu);
2314 BUG_ON(IS_ERR(t));
2315 WRITE_ONCE(rdp_spawn->nocb_kthread, t);
2316}
2317
2318
2319
2320
2321
2322static void rcu_spawn_all_nocb_kthreads(int cpu)
2323{
2324 struct rcu_state *rsp;
2325
2326 if (rcu_scheduler_fully_active)
2327 for_each_rcu_flavor(rsp)
2328 rcu_spawn_one_nocb_kthread(rsp, cpu);
2329}
2330
2331
2332
2333
2334
2335
2336
2337static void __init rcu_spawn_nocb_kthreads(void)
2338{
2339 int cpu;
2340
2341 for_each_online_cpu(cpu)
2342 rcu_spawn_all_nocb_kthreads(cpu);
2343}
2344
2345
2346static int rcu_nocb_leader_stride = -1;
2347module_param(rcu_nocb_leader_stride, int, 0444);
2348
2349
2350
2351
2352static void __init rcu_organize_nocb_kthreads(struct rcu_state *rsp)
2353{
2354 int cpu;
2355 int ls = rcu_nocb_leader_stride;
2356 int nl = 0;
2357 struct rcu_data *rdp;
2358 struct rcu_data *rdp_leader = NULL;
2359 struct rcu_data *rdp_prev = NULL;
2360
2361 if (!have_rcu_nocb_mask)
2362 return;
2363 if (ls == -1) {
2364 ls = int_sqrt(nr_cpu_ids);
2365 rcu_nocb_leader_stride = ls;
2366 }
2367
2368
2369
2370
2371
2372 for_each_cpu(cpu, rcu_nocb_mask) {
2373 rdp = per_cpu_ptr(rsp->rda, cpu);
2374 if (rdp->cpu >= nl) {
2375
2376 nl = DIV_ROUND_UP(rdp->cpu + 1, ls) * ls;
2377 rdp->nocb_leader = rdp;
2378 rdp_leader = rdp;
2379 } else {
2380
2381 rdp->nocb_leader = rdp_leader;
2382 rdp_prev->nocb_next_follower = rdp;
2383 }
2384 rdp_prev = rdp;
2385 }
2386}
2387
2388
2389static bool init_nocb_callback_list(struct rcu_data *rdp)
2390{
2391 if (!rcu_is_nocb_cpu(rdp->cpu))
2392 return false;
2393
2394
2395 if (rdp->nxtlist) {
2396 rdp->nocb_head = rdp->nxtlist;
2397 rdp->nocb_tail = rdp->nxttail[RCU_NEXT_TAIL];
2398 atomic_long_set(&rdp->nocb_q_count, rdp->qlen);
2399 atomic_long_set(&rdp->nocb_q_count_lazy, rdp->qlen_lazy);
2400 rdp->nxtlist = NULL;
2401 rdp->qlen = 0;
2402 rdp->qlen_lazy = 0;
2403 }
2404 rdp->nxttail[RCU_NEXT_TAIL] = NULL;
2405 return true;
2406}
2407
2408#else
2409
2410static bool rcu_nocb_cpu_needs_barrier(struct rcu_state *rsp, int cpu)
2411{
2412 WARN_ON_ONCE(1);
2413 return false;
2414}
2415
2416static void rcu_nocb_gp_cleanup(struct swait_queue_head *sq)
2417{
2418}
2419
2420static void rcu_nocb_gp_set(struct rcu_node *rnp, int nrq)
2421{
2422}
2423
2424static struct swait_queue_head *rcu_nocb_gp_get(struct rcu_node *rnp)
2425{
2426 return NULL;
2427}
2428
2429static void rcu_init_one_nocb(struct rcu_node *rnp)
2430{
2431}
2432
2433static bool __call_rcu_nocb(struct rcu_data *rdp, struct rcu_head *rhp,
2434 bool lazy, unsigned long flags)
2435{
2436 return false;
2437}
2438
2439static bool __maybe_unused rcu_nocb_adopt_orphan_cbs(struct rcu_state *rsp,
2440 struct rcu_data *rdp,
2441 unsigned long flags)
2442{
2443 return false;
2444}
2445
2446static void __init rcu_boot_init_nocb_percpu_data(struct rcu_data *rdp)
2447{
2448}
2449
2450static int rcu_nocb_need_deferred_wakeup(struct rcu_data *rdp)
2451{
2452 return false;
2453}
2454
2455static void do_nocb_deferred_wakeup(struct rcu_data *rdp)
2456{
2457}
2458
2459static void rcu_spawn_all_nocb_kthreads(int cpu)
2460{
2461}
2462
2463static void __init rcu_spawn_nocb_kthreads(void)
2464{
2465}
2466
2467static bool init_nocb_callback_list(struct rcu_data *rdp)
2468{
2469 return false;
2470}
2471
2472#endif
2473
2474
2475
2476
2477
2478
2479
2480
2481
2482
2483static void __maybe_unused rcu_kick_nohz_cpu(int cpu)
2484{
2485#ifdef CONFIG_NO_HZ_FULL
2486 if (tick_nohz_full_cpu(cpu))
2487 smp_send_reschedule(cpu);
2488#endif
2489}
2490
2491
2492#ifdef CONFIG_NO_HZ_FULL_SYSIDLE
2493
2494static int full_sysidle_state;
2495#define RCU_SYSIDLE_NOT 0
2496#define RCU_SYSIDLE_SHORT 1
2497#define RCU_SYSIDLE_LONG 2
2498#define RCU_SYSIDLE_FULL 3
2499#define RCU_SYSIDLE_FULL_NOTED 4
2500
2501
2502
2503
2504
2505
2506
2507static void rcu_sysidle_enter(int irq)
2508{
2509 unsigned long j;
2510 struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
2511
2512
2513 if (!tick_nohz_full_enabled())
2514 return;
2515
2516
2517 if (irq) {
2518 rdtp->dynticks_idle_nesting--;
2519 WARN_ON_ONCE(rdtp->dynticks_idle_nesting < 0);
2520 if (rdtp->dynticks_idle_nesting != 0)
2521 return;
2522 } else {
2523 if ((rdtp->dynticks_idle_nesting & DYNTICK_TASK_NEST_MASK) ==
2524 DYNTICK_TASK_NEST_VALUE) {
2525 rdtp->dynticks_idle_nesting = 0;
2526 } else {
2527 rdtp->dynticks_idle_nesting -= DYNTICK_TASK_NEST_VALUE;
2528 WARN_ON_ONCE(rdtp->dynticks_idle_nesting < 0);
2529 return;
2530 }
2531 }
2532
2533
2534 j = jiffies;
2535 WRITE_ONCE(rdtp->dynticks_idle_jiffies, j);
2536 smp_mb__before_atomic();
2537 atomic_inc(&rdtp->dynticks_idle);
2538 smp_mb__after_atomic();
2539 WARN_ON_ONCE(atomic_read(&rdtp->dynticks_idle) & 0x1);
2540}
2541
2542
2543
2544
2545
2546
2547
2548
2549
2550
2551void rcu_sysidle_force_exit(void)
2552{
2553 int oldstate = READ_ONCE(full_sysidle_state);
2554 int newoldstate;
2555
2556
2557
2558
2559
2560
2561 while (oldstate > RCU_SYSIDLE_SHORT) {
2562 newoldstate = cmpxchg(&full_sysidle_state,
2563 oldstate, RCU_SYSIDLE_NOT);
2564 if (oldstate == newoldstate &&
2565 oldstate == RCU_SYSIDLE_FULL_NOTED) {
2566 rcu_kick_nohz_cpu(tick_do_timer_cpu);
2567 return;
2568 }
2569 oldstate = newoldstate;
2570 }
2571 smp_mb();
2572}
2573
2574
2575
2576
2577
2578
2579static void rcu_sysidle_exit(int irq)
2580{
2581 struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
2582
2583
2584 if (!tick_nohz_full_enabled())
2585 return;
2586
2587
2588 if (irq) {
2589 rdtp->dynticks_idle_nesting++;
2590 WARN_ON_ONCE(rdtp->dynticks_idle_nesting <= 0);
2591 if (rdtp->dynticks_idle_nesting != 1)
2592 return;
2593 } else {
2594
2595
2596
2597
2598
2599 if (rdtp->dynticks_idle_nesting & DYNTICK_TASK_NEST_MASK) {
2600 rdtp->dynticks_idle_nesting += DYNTICK_TASK_NEST_VALUE;
2601 WARN_ON_ONCE(rdtp->dynticks_idle_nesting <= 0);
2602 return;
2603 } else {
2604 rdtp->dynticks_idle_nesting = DYNTICK_TASK_EXIT_IDLE;
2605 }
2606 }
2607
2608
2609 smp_mb__before_atomic();
2610 atomic_inc(&rdtp->dynticks_idle);
2611 smp_mb__after_atomic();
2612 WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks_idle) & 0x1));
2613
2614
2615
2616
2617
2618
2619
2620
2621
2622
2623 if (smp_processor_id() == tick_do_timer_cpu)
2624 return;
2625
2626
2627 rcu_sysidle_force_exit();
2628}
2629
2630
2631
2632
2633
2634
2635static void rcu_sysidle_check_cpu(struct rcu_data *rdp, bool *isidle,
2636 unsigned long *maxj)
2637{
2638 int cur;
2639 unsigned long j;
2640 struct rcu_dynticks *rdtp = rdp->dynticks;
2641
2642
2643 if (!tick_nohz_full_enabled())
2644 return;
2645
2646
2647
2648
2649
2650
2651 if (!*isidle || rdp->rsp != rcu_state_p ||
2652 cpu_is_offline(rdp->cpu) || rdp->cpu == tick_do_timer_cpu)
2653 return;
2654
2655 WARN_ON_ONCE(smp_processor_id() != tick_do_timer_cpu);
2656
2657
2658 cur = atomic_read(&rdtp->dynticks_idle);
2659 if (cur & 0x1) {
2660 *isidle = false;
2661 return;
2662 }
2663 smp_mb();
2664
2665
2666 j = READ_ONCE(rdtp->dynticks_idle_jiffies);
2667
2668 if (ULONG_CMP_LT(*maxj, j))
2669 *maxj = j;
2670}
2671
2672
2673
2674
2675static bool is_sysidle_rcu_state(struct rcu_state *rsp)
2676{
2677 return rsp == rcu_state_p;
2678}
2679
2680
2681
2682
2683
2684
2685
2686
2687
2688
2689static unsigned long rcu_sysidle_delay(void)
2690{
2691 if (nr_cpu_ids <= CONFIG_NO_HZ_FULL_SYSIDLE_SMALL)
2692 return 0;
2693 return DIV_ROUND_UP(nr_cpu_ids * HZ, rcu_fanout_leaf * 1000);
2694}
2695
2696
2697
2698
2699
2700static void rcu_sysidle(unsigned long j)
2701{
2702
2703 switch (READ_ONCE(full_sysidle_state)) {
2704 case RCU_SYSIDLE_NOT:
2705
2706
2707 WRITE_ONCE(full_sysidle_state, RCU_SYSIDLE_SHORT);
2708 break;
2709
2710 case RCU_SYSIDLE_SHORT:
2711
2712
2713
2714
2715
2716 if (ULONG_CMP_GE(jiffies, j + rcu_sysidle_delay()))
2717 (void)cmpxchg(&full_sysidle_state,
2718 RCU_SYSIDLE_SHORT, RCU_SYSIDLE_LONG);
2719 break;
2720
2721 case RCU_SYSIDLE_LONG:
2722
2723
2724
2725
2726
2727 if (ULONG_CMP_GE(jiffies, j + rcu_sysidle_delay()))
2728 (void)cmpxchg(&full_sysidle_state,
2729 RCU_SYSIDLE_LONG, RCU_SYSIDLE_FULL);
2730 break;
2731
2732 default:
2733 break;
2734 }
2735}
2736
2737
2738
2739
2740
2741static void rcu_sysidle_cancel(void)
2742{
2743 smp_mb();
2744 if (full_sysidle_state > RCU_SYSIDLE_SHORT)
2745 WRITE_ONCE(full_sysidle_state, RCU_SYSIDLE_NOT);
2746}
2747
2748
2749
2750
2751
2752static void rcu_sysidle_report(struct rcu_state *rsp, int isidle,
2753 unsigned long maxj, bool gpkt)
2754{
2755 if (rsp != rcu_state_p)
2756 return;
2757 if (gpkt && nr_cpu_ids <= CONFIG_NO_HZ_FULL_SYSIDLE_SMALL)
2758 return;
2759 if (isidle)
2760 rcu_sysidle(maxj);
2761 else
2762 rcu_sysidle_cancel();
2763}
2764
2765
2766
2767
2768
2769static void rcu_sysidle_report_gp(struct rcu_state *rsp, int isidle,
2770 unsigned long maxj)
2771{
2772
2773 if (!tick_nohz_full_enabled())
2774 return;
2775
2776 rcu_sysidle_report(rsp, isidle, maxj, true);
2777}
2778
2779
2780struct rcu_sysidle_head {
2781 struct rcu_head rh;
2782 int inuse;
2783};
2784
2785static void rcu_sysidle_cb(struct rcu_head *rhp)
2786{
2787 struct rcu_sysidle_head *rshp;
2788
2789
2790
2791
2792
2793
2794 smp_mb();
2795
2796 rshp = container_of(rhp, struct rcu_sysidle_head, rh);
2797 WRITE_ONCE(rshp->inuse, 0);
2798}
2799
2800
2801
2802
2803
2804
2805bool rcu_sys_is_idle(void)
2806{
2807 static struct rcu_sysidle_head rsh;
2808 int rss = READ_ONCE(full_sysidle_state);
2809
2810 if (WARN_ON_ONCE(smp_processor_id() != tick_do_timer_cpu))
2811 return false;
2812
2813
2814 if (nr_cpu_ids <= CONFIG_NO_HZ_FULL_SYSIDLE_SMALL) {
2815 int oldrss = rss - 1;
2816
2817
2818
2819
2820
2821 while (rss < RCU_SYSIDLE_FULL && oldrss < rss) {
2822 int cpu;
2823 bool isidle = true;
2824 unsigned long maxj = jiffies - ULONG_MAX / 4;
2825 struct rcu_data *rdp;
2826
2827
2828 for_each_possible_cpu(cpu) {
2829 rdp = per_cpu_ptr(rcu_state_p->rda, cpu);
2830 rcu_sysidle_check_cpu(rdp, &isidle, &maxj);
2831 if (!isidle)
2832 break;
2833 }
2834 rcu_sysidle_report(rcu_state_p, isidle, maxj, false);
2835 oldrss = rss;
2836 rss = READ_ONCE(full_sysidle_state);
2837 }
2838 }
2839
2840
2841 if (rss == RCU_SYSIDLE_FULL) {
2842 rss = cmpxchg(&full_sysidle_state,
2843 RCU_SYSIDLE_FULL, RCU_SYSIDLE_FULL_NOTED);
2844 return rss == RCU_SYSIDLE_FULL;
2845 }
2846
2847 smp_mb();
2848
2849
2850 if (rss == RCU_SYSIDLE_FULL_NOTED)
2851 return true;
2852
2853
2854
2855
2856
2857
2858
2859
2860 if (nr_cpu_ids > CONFIG_NO_HZ_FULL_SYSIDLE_SMALL &&
2861 !rcu_gp_in_progress(rcu_state_p) &&
2862 !rsh.inuse && xchg(&rsh.inuse, 1) == 0)
2863 call_rcu(&rsh.rh, rcu_sysidle_cb);
2864 return false;
2865}
2866
2867
2868
2869
2870static void rcu_sysidle_init_percpu_data(struct rcu_dynticks *rdtp)
2871{
2872 rdtp->dynticks_idle_nesting = DYNTICK_TASK_NEST_VALUE;
2873}
2874
2875#else
2876
2877static void rcu_sysidle_enter(int irq)
2878{
2879}
2880
2881static void rcu_sysidle_exit(int irq)
2882{
2883}
2884
2885static void rcu_sysidle_check_cpu(struct rcu_data *rdp, bool *isidle,
2886 unsigned long *maxj)
2887{
2888}
2889
2890static bool is_sysidle_rcu_state(struct rcu_state *rsp)
2891{
2892 return false;
2893}
2894
2895static void rcu_sysidle_report_gp(struct rcu_state *rsp, int isidle,
2896 unsigned long maxj)
2897{
2898}
2899
2900static void rcu_sysidle_init_percpu_data(struct rcu_dynticks *rdtp)
2901{
2902}
2903
2904#endif
2905
2906
2907
2908
2909
2910
2911
2912
2913
2914
2915static bool rcu_nohz_full_cpu(struct rcu_state *rsp)
2916{
2917#ifdef CONFIG_NO_HZ_FULL
2918 if (tick_nohz_full_cpu(smp_processor_id()) &&
2919 (!rcu_gp_in_progress(rsp) ||
2920 ULONG_CMP_LT(jiffies, READ_ONCE(rsp->gp_start) + HZ)))
2921 return true;
2922#endif
2923 return false;
2924}
2925
2926
2927
2928
2929
2930static void rcu_bind_gp_kthread(void)
2931{
2932 int __maybe_unused cpu;
2933
2934 if (!tick_nohz_full_enabled())
2935 return;
2936#ifdef CONFIG_NO_HZ_FULL_SYSIDLE
2937 cpu = tick_do_timer_cpu;
2938 if (cpu >= 0 && cpu < nr_cpu_ids)
2939 set_cpus_allowed_ptr(current, cpumask_of(cpu));
2940#else
2941 housekeeping_affine(current);
2942#endif
2943}
2944
2945
2946static void rcu_dynticks_task_enter(void)
2947{
2948#if defined(CONFIG_TASKS_RCU) && defined(CONFIG_NO_HZ_FULL)
2949 WRITE_ONCE(current->rcu_tasks_idle_cpu, smp_processor_id());
2950#endif
2951}
2952
2953
2954static void rcu_dynticks_task_exit(void)
2955{
2956#if defined(CONFIG_TASKS_RCU) && defined(CONFIG_NO_HZ_FULL)
2957 WRITE_ONCE(current->rcu_tasks_idle_cpu, -1);
2958#endif
2959}
2960