1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30#include <linux/types.h>
31#include <linux/kernel.h>
32#include <linux/init.h>
33#include <linux/spinlock.h>
34#include <linux/smp.h>
35#include <linux/rcupdate.h>
36#include <linux/interrupt.h>
37#include <linux/sched.h>
38#include <linux/nmi.h>
39#include <asm/atomic.h>
40#include <linux/bitops.h>
41#include <linux/module.h>
42#include <linux/completion.h>
43#include <linux/moduleparam.h>
44#include <linux/percpu.h>
45#include <linux/notifier.h>
46#include <linux/cpu.h>
47#include <linux/mutex.h>
48#include <linux/time.h>
49
50#include "rcutree.h"
51
52
53
54#define RCU_STATE_INITIALIZER(name) { \
55 .level = { &name.node[0] }, \
56 .levelcnt = { \
57 NUM_RCU_LVL_0, \
58 NUM_RCU_LVL_1, \
59 NUM_RCU_LVL_2, \
60 NUM_RCU_LVL_3, \
61 }, \
62 .signaled = RCU_GP_IDLE, \
63 .gpnum = -300, \
64 .completed = -300, \
65 .onofflock = __SPIN_LOCK_UNLOCKED(&name.onofflock), \
66 .orphan_cbs_list = NULL, \
67 .orphan_cbs_tail = &name.orphan_cbs_list, \
68 .orphan_qlen = 0, \
69 .fqslock = __SPIN_LOCK_UNLOCKED(&name.fqslock), \
70 .n_force_qs = 0, \
71 .n_force_qs_ngp = 0, \
72}
73
74struct rcu_state rcu_sched_state = RCU_STATE_INITIALIZER(rcu_sched_state);
75DEFINE_PER_CPU(struct rcu_data, rcu_sched_data);
76
77struct rcu_state rcu_bh_state = RCU_STATE_INITIALIZER(rcu_bh_state);
78DEFINE_PER_CPU(struct rcu_data, rcu_bh_data);
79
80
81
82
83
84
85
86static int rcu_gp_in_progress(struct rcu_state *rsp)
87{
88 return ACCESS_ONCE(rsp->completed) != ACCESS_ONCE(rsp->gpnum);
89}
90
91
92
93
94
95
96void rcu_sched_qs(int cpu)
97{
98 struct rcu_data *rdp;
99
100 rdp = &per_cpu(rcu_sched_data, cpu);
101 rdp->passed_quiesc_completed = rdp->completed;
102 barrier();
103 rdp->passed_quiesc = 1;
104 rcu_preempt_note_context_switch(cpu);
105}
106
107void rcu_bh_qs(int cpu)
108{
109 struct rcu_data *rdp;
110
111 rdp = &per_cpu(rcu_bh_data, cpu);
112 rdp->passed_quiesc_completed = rdp->completed;
113 barrier();
114 rdp->passed_quiesc = 1;
115}
116
117#ifdef CONFIG_NO_HZ
118DEFINE_PER_CPU(struct rcu_dynticks, rcu_dynticks) = {
119 .dynticks_nesting = 1,
120 .dynticks = 1,
121};
122#endif
123
124static int blimit = 10;
125static int qhimark = 10000;
126static int qlowmark = 100;
127
128module_param(blimit, int, 0);
129module_param(qhimark, int, 0);
130module_param(qlowmark, int, 0);
131
132static void force_quiescent_state(struct rcu_state *rsp, int relaxed);
133static int rcu_pending(int cpu);
134
135
136
137
138long rcu_batches_completed_sched(void)
139{
140 return rcu_sched_state.completed;
141}
142EXPORT_SYMBOL_GPL(rcu_batches_completed_sched);
143
144
145
146
147long rcu_batches_completed_bh(void)
148{
149 return rcu_bh_state.completed;
150}
151EXPORT_SYMBOL_GPL(rcu_batches_completed_bh);
152
153
154
155
156static int
157cpu_has_callbacks_ready_to_invoke(struct rcu_data *rdp)
158{
159 return &rdp->nxtlist != rdp->nxttail[RCU_DONE_TAIL];
160}
161
162
163
164
165static int
166cpu_needs_another_gp(struct rcu_state *rsp, struct rcu_data *rdp)
167{
168 return *rdp->nxttail[RCU_DONE_TAIL] && !rcu_gp_in_progress(rsp);
169}
170
171
172
173
174static struct rcu_node *rcu_get_root(struct rcu_state *rsp)
175{
176 return &rsp->node[0];
177}
178
179#ifdef CONFIG_SMP
180
181
182
183
184
185
186
187
188
189
190
191
192static int rcu_implicit_offline_qs(struct rcu_data *rdp)
193{
194
195
196
197
198 if (cpu_is_offline(rdp->cpu)) {
199 rdp->offline_fqs++;
200 return 1;
201 }
202
203
204 if (rdp->preemptable)
205 return 0;
206
207
208 if (rdp->cpu != smp_processor_id())
209 smp_send_reschedule(rdp->cpu);
210 else
211 set_need_resched();
212 rdp->resched_ipi++;
213 return 0;
214}
215
216#endif
217
218#ifdef CONFIG_NO_HZ
219
220
221
222
223
224
225
226
227
228void rcu_enter_nohz(void)
229{
230 unsigned long flags;
231 struct rcu_dynticks *rdtp;
232
233 smp_mb();
234 local_irq_save(flags);
235 rdtp = &__get_cpu_var(rcu_dynticks);
236 rdtp->dynticks++;
237 rdtp->dynticks_nesting--;
238 WARN_ON_ONCE(rdtp->dynticks & 0x1);
239 local_irq_restore(flags);
240}
241
242
243
244
245
246
247
248void rcu_exit_nohz(void)
249{
250 unsigned long flags;
251 struct rcu_dynticks *rdtp;
252
253 local_irq_save(flags);
254 rdtp = &__get_cpu_var(rcu_dynticks);
255 rdtp->dynticks++;
256 rdtp->dynticks_nesting++;
257 WARN_ON_ONCE(!(rdtp->dynticks & 0x1));
258 local_irq_restore(flags);
259 smp_mb();
260}
261
262
263
264
265
266
267
268
269void rcu_nmi_enter(void)
270{
271 struct rcu_dynticks *rdtp = &__get_cpu_var(rcu_dynticks);
272
273 if (rdtp->dynticks & 0x1)
274 return;
275 rdtp->dynticks_nmi++;
276 WARN_ON_ONCE(!(rdtp->dynticks_nmi & 0x1));
277 smp_mb();
278}
279
280
281
282
283
284
285
286
287void rcu_nmi_exit(void)
288{
289 struct rcu_dynticks *rdtp = &__get_cpu_var(rcu_dynticks);
290
291 if (rdtp->dynticks & 0x1)
292 return;
293 smp_mb();
294 rdtp->dynticks_nmi++;
295 WARN_ON_ONCE(rdtp->dynticks_nmi & 0x1);
296}
297
298
299
300
301
302
303
304void rcu_irq_enter(void)
305{
306 struct rcu_dynticks *rdtp = &__get_cpu_var(rcu_dynticks);
307
308 if (rdtp->dynticks_nesting++)
309 return;
310 rdtp->dynticks++;
311 WARN_ON_ONCE(!(rdtp->dynticks & 0x1));
312 smp_mb();
313}
314
315
316
317
318
319
320
321
322void rcu_irq_exit(void)
323{
324 struct rcu_dynticks *rdtp = &__get_cpu_var(rcu_dynticks);
325
326 if (--rdtp->dynticks_nesting)
327 return;
328 smp_mb();
329 rdtp->dynticks++;
330 WARN_ON_ONCE(rdtp->dynticks & 0x1);
331
332
333 if (__get_cpu_var(rcu_sched_data).nxtlist ||
334 __get_cpu_var(rcu_bh_data).nxtlist)
335 set_need_resched();
336}
337
338
339
340
341
342
343
344static void dyntick_record_completed(struct rcu_state *rsp, long comp)
345{
346 rsp->dynticks_completed = comp;
347}
348
349#ifdef CONFIG_SMP
350
351
352
353
354static long dyntick_recall_completed(struct rcu_state *rsp)
355{
356 return rsp->dynticks_completed;
357}
358
359
360
361
362
363
364static int dyntick_save_progress_counter(struct rcu_data *rdp)
365{
366 int ret;
367 int snap;
368 int snap_nmi;
369
370 snap = rdp->dynticks->dynticks;
371 snap_nmi = rdp->dynticks->dynticks_nmi;
372 smp_mb();
373 rdp->dynticks_snap = snap;
374 rdp->dynticks_nmi_snap = snap_nmi;
375 ret = ((snap & 0x1) == 0) && ((snap_nmi & 0x1) == 0);
376 if (ret)
377 rdp->dynticks_fqs++;
378 return ret;
379}
380
381
382
383
384
385
386
387static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
388{
389 long curr;
390 long curr_nmi;
391 long snap;
392 long snap_nmi;
393
394 curr = rdp->dynticks->dynticks;
395 snap = rdp->dynticks_snap;
396 curr_nmi = rdp->dynticks->dynticks_nmi;
397 snap_nmi = rdp->dynticks_nmi_snap;
398 smp_mb();
399
400
401
402
403
404
405
406
407
408 if ((curr != snap || (curr & 0x1) == 0) &&
409 (curr_nmi != snap_nmi || (curr_nmi & 0x1) == 0)) {
410 rdp->dynticks_fqs++;
411 return 1;
412 }
413
414
415 return rcu_implicit_offline_qs(rdp);
416}
417
418#endif
419
420#else
421
422static void dyntick_record_completed(struct rcu_state *rsp, long comp)
423{
424}
425
426#ifdef CONFIG_SMP
427
428
429
430
431
432
433
434
435static long dyntick_recall_completed(struct rcu_state *rsp)
436{
437 return rsp->completed;
438}
439
440static int dyntick_save_progress_counter(struct rcu_data *rdp)
441{
442 return 0;
443}
444
445static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
446{
447 return rcu_implicit_offline_qs(rdp);
448}
449
450#endif
451
452#endif
453
454#ifdef CONFIG_RCU_CPU_STALL_DETECTOR
455
456static void record_gp_stall_check_time(struct rcu_state *rsp)
457{
458 rsp->gp_start = jiffies;
459 rsp->jiffies_stall = jiffies + RCU_SECONDS_TILL_STALL_CHECK;
460}
461
462static void print_other_cpu_stall(struct rcu_state *rsp)
463{
464 int cpu;
465 long delta;
466 unsigned long flags;
467 struct rcu_node *rnp = rcu_get_root(rsp);
468
469
470
471 spin_lock_irqsave(&rnp->lock, flags);
472 delta = jiffies - rsp->jiffies_stall;
473 if (delta < RCU_STALL_RAT_DELAY || !rcu_gp_in_progress(rsp)) {
474 spin_unlock_irqrestore(&rnp->lock, flags);
475 return;
476 }
477 rsp->jiffies_stall = jiffies + RCU_SECONDS_TILL_STALL_RECHECK;
478
479
480
481
482
483 rcu_print_task_stall(rnp);
484 spin_unlock_irqrestore(&rnp->lock, flags);
485
486
487
488 printk(KERN_ERR "INFO: RCU detected CPU stalls:");
489 rcu_for_each_leaf_node(rsp, rnp) {
490 rcu_print_task_stall(rnp);
491 if (rnp->qsmask == 0)
492 continue;
493 for (cpu = 0; cpu <= rnp->grphi - rnp->grplo; cpu++)
494 if (rnp->qsmask & (1UL << cpu))
495 printk(" %d", rnp->grplo + cpu);
496 }
497 printk(" (detected by %d, t=%ld jiffies)\n",
498 smp_processor_id(), (long)(jiffies - rsp->gp_start));
499 trigger_all_cpu_backtrace();
500
501 force_quiescent_state(rsp, 0);
502}
503
504static void print_cpu_stall(struct rcu_state *rsp)
505{
506 unsigned long flags;
507 struct rcu_node *rnp = rcu_get_root(rsp);
508
509 printk(KERN_ERR "INFO: RCU detected CPU %d stall (t=%lu jiffies)\n",
510 smp_processor_id(), jiffies - rsp->gp_start);
511 trigger_all_cpu_backtrace();
512
513 spin_lock_irqsave(&rnp->lock, flags);
514 if ((long)(jiffies - rsp->jiffies_stall) >= 0)
515 rsp->jiffies_stall =
516 jiffies + RCU_SECONDS_TILL_STALL_RECHECK;
517 spin_unlock_irqrestore(&rnp->lock, flags);
518
519 set_need_resched();
520}
521
522static void check_cpu_stall(struct rcu_state *rsp, struct rcu_data *rdp)
523{
524 long delta;
525 struct rcu_node *rnp;
526
527 delta = jiffies - rsp->jiffies_stall;
528 rnp = rdp->mynode;
529 if ((rnp->qsmask & rdp->grpmask) && delta >= 0) {
530
531
532 print_cpu_stall(rsp);
533
534 } else if (rcu_gp_in_progress(rsp) && delta >= RCU_STALL_RAT_DELAY) {
535
536
537 print_other_cpu_stall(rsp);
538 }
539}
540
541#else
542
543static void record_gp_stall_check_time(struct rcu_state *rsp)
544{
545}
546
547static void check_cpu_stall(struct rcu_state *rsp, struct rcu_data *rdp)
548{
549}
550
551#endif
552
553
554
555
556
557
558static void note_new_gpnum(struct rcu_state *rsp, struct rcu_data *rdp)
559{
560 rdp->qs_pending = 1;
561 rdp->passed_quiesc = 0;
562 rdp->gpnum = rsp->gpnum;
563}
564
565
566
567
568
569
570static int
571check_for_new_grace_period(struct rcu_state *rsp, struct rcu_data *rdp)
572{
573 unsigned long flags;
574 int ret = 0;
575
576 local_irq_save(flags);
577 if (rdp->gpnum != rsp->gpnum) {
578 note_new_gpnum(rsp, rdp);
579 ret = 1;
580 }
581 local_irq_restore(flags);
582 return ret;
583}
584
585
586
587
588
589
590
591static void
592rcu_start_gp(struct rcu_state *rsp, unsigned long flags)
593 __releases(rcu_get_root(rsp)->lock)
594{
595 struct rcu_data *rdp = rsp->rda[smp_processor_id()];
596 struct rcu_node *rnp = rcu_get_root(rsp);
597
598 if (!cpu_needs_another_gp(rsp, rdp)) {
599 spin_unlock_irqrestore(&rnp->lock, flags);
600 return;
601 }
602
603
604 rsp->gpnum++;
605 WARN_ON_ONCE(rsp->signaled == RCU_GP_INIT);
606 rsp->signaled = RCU_GP_INIT;
607 rsp->jiffies_force_qs = jiffies + RCU_JIFFIES_TILL_FORCE_QS;
608 record_gp_stall_check_time(rsp);
609 dyntick_record_completed(rsp, rsp->completed - 1);
610 note_new_gpnum(rsp, rdp);
611
612
613
614
615
616
617
618
619
620
621
622
623 rdp->nxttail[RCU_NEXT_READY_TAIL] = rdp->nxttail[RCU_NEXT_TAIL];
624 rdp->nxttail[RCU_WAIT_TAIL] = rdp->nxttail[RCU_NEXT_TAIL];
625
626
627 if (NUM_RCU_NODES == 1) {
628 rcu_preempt_check_blocked_tasks(rnp);
629 rnp->qsmask = rnp->qsmaskinit;
630 rnp->gpnum = rsp->gpnum;
631 rsp->signaled = RCU_SIGNAL_INIT;
632 spin_unlock_irqrestore(&rnp->lock, flags);
633 return;
634 }
635
636 spin_unlock(&rnp->lock);
637
638
639
640 spin_lock(&rsp->onofflock);
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659 rcu_for_each_node_breadth_first(rsp, rnp) {
660 spin_lock(&rnp->lock);
661 rcu_preempt_check_blocked_tasks(rnp);
662 rnp->qsmask = rnp->qsmaskinit;
663 rnp->gpnum = rsp->gpnum;
664 spin_unlock(&rnp->lock);
665 }
666
667 rnp = rcu_get_root(rsp);
668 spin_lock(&rnp->lock);
669 rsp->signaled = RCU_SIGNAL_INIT;
670 spin_unlock(&rnp->lock);
671 spin_unlock_irqrestore(&rsp->onofflock, flags);
672}
673
674
675
676
677
678
679static void
680rcu_process_gp_end(struct rcu_state *rsp, struct rcu_data *rdp)
681{
682 long completed_snap;
683 unsigned long flags;
684
685 local_irq_save(flags);
686 completed_snap = ACCESS_ONCE(rsp->completed);
687
688
689 if (rdp->completed != completed_snap) {
690
691
692 rdp->nxttail[RCU_DONE_TAIL] = rdp->nxttail[RCU_WAIT_TAIL];
693 rdp->nxttail[RCU_WAIT_TAIL] = rdp->nxttail[RCU_NEXT_READY_TAIL];
694 rdp->nxttail[RCU_NEXT_READY_TAIL] = rdp->nxttail[RCU_NEXT_TAIL];
695
696
697 rdp->completed = completed_snap;
698 }
699 local_irq_restore(flags);
700}
701
702
703
704
705
706
707static void cpu_quiet_msk_finish(struct rcu_state *rsp, unsigned long flags)
708 __releases(rcu_get_root(rsp)->lock)
709{
710 WARN_ON_ONCE(!rcu_gp_in_progress(rsp));
711 rsp->completed = rsp->gpnum;
712 rsp->signaled = RCU_GP_IDLE;
713 rcu_process_gp_end(rsp, rsp->rda[smp_processor_id()]);
714 rcu_start_gp(rsp, flags);
715}
716
717
718
719
720
721
722
723
724static void
725cpu_quiet_msk(unsigned long mask, struct rcu_state *rsp, struct rcu_node *rnp,
726 unsigned long flags)
727 __releases(rnp->lock)
728{
729 struct rcu_node *rnp_c;
730
731
732 for (;;) {
733 if (!(rnp->qsmask & mask)) {
734
735
736 spin_unlock_irqrestore(&rnp->lock, flags);
737 return;
738 }
739 rnp->qsmask &= ~mask;
740 if (rnp->qsmask != 0 || rcu_preempted_readers(rnp)) {
741
742
743 spin_unlock_irqrestore(&rnp->lock, flags);
744 return;
745 }
746 mask = rnp->grpmask;
747 if (rnp->parent == NULL) {
748
749
750
751 break;
752 }
753 spin_unlock_irqrestore(&rnp->lock, flags);
754 rnp_c = rnp;
755 rnp = rnp->parent;
756 spin_lock_irqsave(&rnp->lock, flags);
757 WARN_ON_ONCE(rnp_c->qsmask);
758 }
759
760
761
762
763
764
765 cpu_quiet_msk_finish(rsp, flags);
766}
767
768
769
770
771
772
773
774
775static void
776cpu_quiet(int cpu, struct rcu_state *rsp, struct rcu_data *rdp, long lastcomp)
777{
778 unsigned long flags;
779 unsigned long mask;
780 struct rcu_node *rnp;
781
782 rnp = rdp->mynode;
783 spin_lock_irqsave(&rnp->lock, flags);
784 if (lastcomp != ACCESS_ONCE(rsp->completed)) {
785
786
787
788
789
790
791
792
793
794 rdp->passed_quiesc = 0;
795 spin_unlock_irqrestore(&rnp->lock, flags);
796 return;
797 }
798 mask = rdp->grpmask;
799 if ((rnp->qsmask & mask) == 0) {
800 spin_unlock_irqrestore(&rnp->lock, flags);
801 } else {
802 rdp->qs_pending = 0;
803
804
805
806
807
808 rdp->nxttail[RCU_NEXT_READY_TAIL] = rdp->nxttail[RCU_NEXT_TAIL];
809
810 cpu_quiet_msk(mask, rsp, rnp, flags);
811 }
812}
813
814
815
816
817
818
819
820static void
821rcu_check_quiescent_state(struct rcu_state *rsp, struct rcu_data *rdp)
822{
823
824 if (check_for_new_grace_period(rsp, rdp))
825 return;
826
827
828
829
830
831 if (!rdp->qs_pending)
832 return;
833
834
835
836
837
838 if (!rdp->passed_quiesc)
839 return;
840
841
842 cpu_quiet(rdp->cpu, rsp, rdp, rdp->passed_quiesc_completed);
843}
844
845#ifdef CONFIG_HOTPLUG_CPU
846
847
848
849
850
851
852
853
854static void rcu_send_cbs_to_orphanage(struct rcu_state *rsp)
855{
856 int i;
857 struct rcu_data *rdp = rsp->rda[smp_processor_id()];
858
859 if (rdp->nxtlist == NULL)
860 return;
861 spin_lock(&rsp->onofflock);
862 *rsp->orphan_cbs_tail = rdp->nxtlist;
863 rsp->orphan_cbs_tail = rdp->nxttail[RCU_NEXT_TAIL];
864 rdp->nxtlist = NULL;
865 for (i = 0; i < RCU_NEXT_SIZE; i++)
866 rdp->nxttail[i] = &rdp->nxtlist;
867 rsp->orphan_qlen += rdp->qlen;
868 rdp->qlen = 0;
869 spin_unlock(&rsp->onofflock);
870}
871
872
873
874
875static void rcu_adopt_orphan_cbs(struct rcu_state *rsp)
876{
877 unsigned long flags;
878 struct rcu_data *rdp;
879
880 spin_lock_irqsave(&rsp->onofflock, flags);
881 rdp = rsp->rda[smp_processor_id()];
882 if (rsp->orphan_cbs_list == NULL) {
883 spin_unlock_irqrestore(&rsp->onofflock, flags);
884 return;
885 }
886 *rdp->nxttail[RCU_NEXT_TAIL] = rsp->orphan_cbs_list;
887 rdp->nxttail[RCU_NEXT_TAIL] = rsp->orphan_cbs_tail;
888 rdp->qlen += rsp->orphan_qlen;
889 rsp->orphan_cbs_list = NULL;
890 rsp->orphan_cbs_tail = &rsp->orphan_cbs_list;
891 rsp->orphan_qlen = 0;
892 spin_unlock_irqrestore(&rsp->onofflock, flags);
893}
894
895
896
897
898
899static void __rcu_offline_cpu(int cpu, struct rcu_state *rsp)
900{
901 unsigned long flags;
902 long lastcomp;
903 unsigned long mask;
904 struct rcu_data *rdp = rsp->rda[cpu];
905 struct rcu_node *rnp;
906
907
908 spin_lock_irqsave(&rsp->onofflock, flags);
909
910
911 rnp = rdp->mynode;
912 mask = rdp->grpmask;
913 do {
914 spin_lock(&rnp->lock);
915 rnp->qsmaskinit &= ~mask;
916 if (rnp->qsmaskinit != 0) {
917 spin_unlock(&rnp->lock);
918 break;
919 }
920
921
922
923
924
925
926
927
928
929
930
931 if (rcu_preempt_offline_tasks(rsp, rnp, rdp) && !rnp->qsmask)
932 rnp->qsmask |= mask;
933
934 mask = rnp->grpmask;
935 spin_unlock(&rnp->lock);
936 rnp = rnp->parent;
937 } while (rnp != NULL);
938 lastcomp = rsp->completed;
939
940 spin_unlock_irqrestore(&rsp->onofflock, flags);
941
942 rcu_adopt_orphan_cbs(rsp);
943}
944
945
946
947
948
949
950
951static void rcu_offline_cpu(int cpu)
952{
953 __rcu_offline_cpu(cpu, &rcu_sched_state);
954 __rcu_offline_cpu(cpu, &rcu_bh_state);
955 rcu_preempt_offline_cpu(cpu);
956}
957
958#else
959
960static void rcu_send_cbs_to_orphanage(struct rcu_state *rsp)
961{
962}
963
964static void rcu_adopt_orphan_cbs(struct rcu_state *rsp)
965{
966}
967
968static void rcu_offline_cpu(int cpu)
969{
970}
971
972#endif
973
974
975
976
977
978static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp)
979{
980 unsigned long flags;
981 struct rcu_head *next, *list, **tail;
982 int count;
983
984
985 if (!cpu_has_callbacks_ready_to_invoke(rdp))
986 return;
987
988
989
990
991
992 local_irq_save(flags);
993 list = rdp->nxtlist;
994 rdp->nxtlist = *rdp->nxttail[RCU_DONE_TAIL];
995 *rdp->nxttail[RCU_DONE_TAIL] = NULL;
996 tail = rdp->nxttail[RCU_DONE_TAIL];
997 for (count = RCU_NEXT_SIZE - 1; count >= 0; count--)
998 if (rdp->nxttail[count] == rdp->nxttail[RCU_DONE_TAIL])
999 rdp->nxttail[count] = &rdp->nxtlist;
1000 local_irq_restore(flags);
1001
1002
1003 count = 0;
1004 while (list) {
1005 next = list->next;
1006 prefetch(next);
1007 list->func(list);
1008 list = next;
1009 if (++count >= rdp->blimit)
1010 break;
1011 }
1012
1013 local_irq_save(flags);
1014
1015
1016 rdp->qlen -= count;
1017 if (list != NULL) {
1018 *tail = rdp->nxtlist;
1019 rdp->nxtlist = list;
1020 for (count = 0; count < RCU_NEXT_SIZE; count++)
1021 if (&rdp->nxtlist == rdp->nxttail[count])
1022 rdp->nxttail[count] = tail;
1023 else
1024 break;
1025 }
1026
1027
1028 if (rdp->blimit == LONG_MAX && rdp->qlen <= qlowmark)
1029 rdp->blimit = blimit;
1030
1031
1032 if (rdp->qlen == 0 && rdp->qlen_last_fqs_check != 0) {
1033 rdp->qlen_last_fqs_check = 0;
1034 rdp->n_force_qs_snap = rsp->n_force_qs;
1035 } else if (rdp->qlen < rdp->qlen_last_fqs_check - qhimark)
1036 rdp->qlen_last_fqs_check = rdp->qlen;
1037
1038 local_irq_restore(flags);
1039
1040
1041 if (cpu_has_callbacks_ready_to_invoke(rdp))
1042 raise_softirq(RCU_SOFTIRQ);
1043}
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054void rcu_check_callbacks(int cpu, int user)
1055{
1056 if (!rcu_pending(cpu))
1057 return;
1058 if (user ||
1059 (idle_cpu(cpu) && rcu_scheduler_active &&
1060 !in_softirq() && hardirq_count() <= (1 << HARDIRQ_SHIFT))) {
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074 rcu_sched_qs(cpu);
1075 rcu_bh_qs(cpu);
1076
1077 } else if (!in_softirq()) {
1078
1079
1080
1081
1082
1083
1084
1085
1086 rcu_bh_qs(cpu);
1087 }
1088 rcu_preempt_check_callbacks(cpu);
1089 raise_softirq(RCU_SOFTIRQ);
1090}
1091
1092#ifdef CONFIG_SMP
1093
1094
1095
1096
1097
1098
1099
1100static int rcu_process_dyntick(struct rcu_state *rsp, long lastcomp,
1101 int (*f)(struct rcu_data *))
1102{
1103 unsigned long bit;
1104 int cpu;
1105 unsigned long flags;
1106 unsigned long mask;
1107 struct rcu_node *rnp;
1108
1109 rcu_for_each_leaf_node(rsp, rnp) {
1110 mask = 0;
1111 spin_lock_irqsave(&rnp->lock, flags);
1112 if (rsp->completed != lastcomp) {
1113 spin_unlock_irqrestore(&rnp->lock, flags);
1114 return 1;
1115 }
1116 if (rnp->qsmask == 0) {
1117 spin_unlock_irqrestore(&rnp->lock, flags);
1118 continue;
1119 }
1120 cpu = rnp->grplo;
1121 bit = 1;
1122 for (; cpu <= rnp->grphi; cpu++, bit <<= 1) {
1123 if ((rnp->qsmask & bit) != 0 && f(rsp->rda[cpu]))
1124 mask |= bit;
1125 }
1126 if (mask != 0 && rsp->completed == lastcomp) {
1127
1128
1129 cpu_quiet_msk(mask, rsp, rnp, flags);
1130 continue;
1131 }
1132 spin_unlock_irqrestore(&rnp->lock, flags);
1133 }
1134 return 0;
1135}
1136
1137
1138
1139
1140
1141static void force_quiescent_state(struct rcu_state *rsp, int relaxed)
1142{
1143 unsigned long flags;
1144 long lastcomp;
1145 struct rcu_node *rnp = rcu_get_root(rsp);
1146 u8 signaled;
1147
1148 if (!rcu_gp_in_progress(rsp))
1149 return;
1150 if (!spin_trylock_irqsave(&rsp->fqslock, flags)) {
1151 rsp->n_force_qs_lh++;
1152 return;
1153 }
1154 if (relaxed &&
1155 (long)(rsp->jiffies_force_qs - jiffies) >= 0)
1156 goto unlock_ret;
1157 rsp->n_force_qs++;
1158 spin_lock(&rnp->lock);
1159 lastcomp = rsp->completed;
1160 signaled = rsp->signaled;
1161 rsp->jiffies_force_qs = jiffies + RCU_JIFFIES_TILL_FORCE_QS;
1162 if (lastcomp == rsp->gpnum) {
1163 rsp->n_force_qs_ngp++;
1164 spin_unlock(&rnp->lock);
1165 goto unlock_ret;
1166 }
1167 spin_unlock(&rnp->lock);
1168 switch (signaled) {
1169 case RCU_GP_IDLE:
1170 case RCU_GP_INIT:
1171
1172 break;
1173
1174 case RCU_SAVE_DYNTICK:
1175
1176 if (RCU_SIGNAL_INIT != RCU_SAVE_DYNTICK)
1177 break;
1178
1179
1180 if (rcu_process_dyntick(rsp, lastcomp,
1181 dyntick_save_progress_counter))
1182 goto unlock_ret;
1183
1184
1185 spin_lock(&rnp->lock);
1186 if (lastcomp == rsp->completed &&
1187 rsp->signaled == RCU_SAVE_DYNTICK) {
1188 rsp->signaled = RCU_FORCE_QS;
1189 dyntick_record_completed(rsp, lastcomp);
1190 }
1191 spin_unlock(&rnp->lock);
1192 break;
1193
1194 case RCU_FORCE_QS:
1195
1196
1197 if (rcu_process_dyntick(rsp, dyntick_recall_completed(rsp),
1198 rcu_implicit_dynticks_qs))
1199 goto unlock_ret;
1200
1201
1202
1203 break;
1204 }
1205unlock_ret:
1206 spin_unlock_irqrestore(&rsp->fqslock, flags);
1207}
1208
1209#else
1210
1211static void force_quiescent_state(struct rcu_state *rsp, int relaxed)
1212{
1213 set_need_resched();
1214}
1215
1216#endif
1217
1218
1219
1220
1221
1222
1223static void
1224__rcu_process_callbacks(struct rcu_state *rsp, struct rcu_data *rdp)
1225{
1226 unsigned long flags;
1227
1228 WARN_ON_ONCE(rdp->beenonline == 0);
1229
1230
1231
1232
1233
1234 if ((long)(ACCESS_ONCE(rsp->jiffies_force_qs) - jiffies) < 0)
1235 force_quiescent_state(rsp, 1);
1236
1237
1238
1239
1240
1241 rcu_process_gp_end(rsp, rdp);
1242
1243
1244 rcu_check_quiescent_state(rsp, rdp);
1245
1246
1247 if (cpu_needs_another_gp(rsp, rdp)) {
1248 spin_lock_irqsave(&rcu_get_root(rsp)->lock, flags);
1249 rcu_start_gp(rsp, flags);
1250 }
1251
1252
1253 rcu_do_batch(rsp, rdp);
1254}
1255
1256
1257
1258
1259static void rcu_process_callbacks(struct softirq_action *unused)
1260{
1261
1262
1263
1264
1265
1266 smp_mb();
1267
1268 __rcu_process_callbacks(&rcu_sched_state,
1269 &__get_cpu_var(rcu_sched_data));
1270 __rcu_process_callbacks(&rcu_bh_state, &__get_cpu_var(rcu_bh_data));
1271 rcu_preempt_process_callbacks();
1272
1273
1274
1275
1276
1277
1278 smp_mb();
1279}
1280
1281static void
1282__call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu),
1283 struct rcu_state *rsp)
1284{
1285 unsigned long flags;
1286 struct rcu_data *rdp;
1287
1288 head->func = func;
1289 head->next = NULL;
1290
1291 smp_mb();
1292
1293
1294
1295
1296
1297
1298
1299 local_irq_save(flags);
1300 rdp = rsp->rda[smp_processor_id()];
1301 rcu_process_gp_end(rsp, rdp);
1302 check_for_new_grace_period(rsp, rdp);
1303
1304
1305 *rdp->nxttail[RCU_NEXT_TAIL] = head;
1306 rdp->nxttail[RCU_NEXT_TAIL] = &head->next;
1307
1308
1309 if (!rcu_gp_in_progress(rsp)) {
1310 unsigned long nestflag;
1311 struct rcu_node *rnp_root = rcu_get_root(rsp);
1312
1313 spin_lock_irqsave(&rnp_root->lock, nestflag);
1314 rcu_start_gp(rsp, nestflag);
1315 }
1316
1317
1318
1319
1320
1321
1322
1323
1324 if (unlikely(++rdp->qlen > rdp->qlen_last_fqs_check + qhimark)) {
1325 rdp->blimit = LONG_MAX;
1326 if (rsp->n_force_qs == rdp->n_force_qs_snap &&
1327 *rdp->nxttail[RCU_DONE_TAIL] != head)
1328 force_quiescent_state(rsp, 0);
1329 rdp->n_force_qs_snap = rsp->n_force_qs;
1330 rdp->qlen_last_fqs_check = rdp->qlen;
1331 } else if ((long)(ACCESS_ONCE(rsp->jiffies_force_qs) - jiffies) < 0)
1332 force_quiescent_state(rsp, 1);
1333 local_irq_restore(flags);
1334}
1335
1336
1337
1338
1339void call_rcu_sched(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
1340{
1341 __call_rcu(head, func, &rcu_sched_state);
1342}
1343EXPORT_SYMBOL_GPL(call_rcu_sched);
1344
1345
1346
1347
1348void call_rcu_bh(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
1349{
1350 __call_rcu(head, func, &rcu_bh_state);
1351}
1352EXPORT_SYMBOL_GPL(call_rcu_bh);
1353
1354
1355
1356
1357
1358
1359
1360
1361static int __rcu_pending(struct rcu_state *rsp, struct rcu_data *rdp)
1362{
1363 rdp->n_rcu_pending++;
1364
1365
1366 check_cpu_stall(rsp, rdp);
1367
1368
1369 if (rdp->qs_pending) {
1370 rdp->n_rp_qs_pending++;
1371 return 1;
1372 }
1373
1374
1375 if (cpu_has_callbacks_ready_to_invoke(rdp)) {
1376 rdp->n_rp_cb_ready++;
1377 return 1;
1378 }
1379
1380
1381 if (cpu_needs_another_gp(rsp, rdp)) {
1382 rdp->n_rp_cpu_needs_gp++;
1383 return 1;
1384 }
1385
1386
1387 if (ACCESS_ONCE(rsp->completed) != rdp->completed) {
1388 rdp->n_rp_gp_completed++;
1389 return 1;
1390 }
1391
1392
1393 if (ACCESS_ONCE(rsp->gpnum) != rdp->gpnum) {
1394 rdp->n_rp_gp_started++;
1395 return 1;
1396 }
1397
1398
1399 if (rcu_gp_in_progress(rsp) &&
1400 ((long)(ACCESS_ONCE(rsp->jiffies_force_qs) - jiffies) < 0)) {
1401 rdp->n_rp_need_fqs++;
1402 return 1;
1403 }
1404
1405
1406 rdp->n_rp_need_nothing++;
1407 return 0;
1408}
1409
1410
1411
1412
1413
1414
1415static int rcu_pending(int cpu)
1416{
1417 return __rcu_pending(&rcu_sched_state, &per_cpu(rcu_sched_data, cpu)) ||
1418 __rcu_pending(&rcu_bh_state, &per_cpu(rcu_bh_data, cpu)) ||
1419 rcu_preempt_pending(cpu);
1420}
1421
1422
1423
1424
1425
1426
1427
1428int rcu_needs_cpu(int cpu)
1429{
1430
1431 return per_cpu(rcu_sched_data, cpu).nxtlist ||
1432 per_cpu(rcu_bh_data, cpu).nxtlist ||
1433 rcu_preempt_needs_cpu(cpu);
1434}
1435
1436static DEFINE_PER_CPU(struct rcu_head, rcu_barrier_head) = {NULL};
1437static atomic_t rcu_barrier_cpu_count;
1438static DEFINE_MUTEX(rcu_barrier_mutex);
1439static struct completion rcu_barrier_completion;
1440
1441static void rcu_barrier_callback(struct rcu_head *notused)
1442{
1443 if (atomic_dec_and_test(&rcu_barrier_cpu_count))
1444 complete(&rcu_barrier_completion);
1445}
1446
1447
1448
1449
1450static void rcu_barrier_func(void *type)
1451{
1452 int cpu = smp_processor_id();
1453 struct rcu_head *head = &per_cpu(rcu_barrier_head, cpu);
1454 void (*call_rcu_func)(struct rcu_head *head,
1455 void (*func)(struct rcu_head *head));
1456
1457 atomic_inc(&rcu_barrier_cpu_count);
1458 call_rcu_func = type;
1459 call_rcu_func(head, rcu_barrier_callback);
1460}
1461
1462
1463
1464
1465
1466static void _rcu_barrier(struct rcu_state *rsp,
1467 void (*call_rcu_func)(struct rcu_head *head,
1468 void (*func)(struct rcu_head *head)))
1469{
1470 BUG_ON(in_interrupt());
1471
1472 mutex_lock(&rcu_barrier_mutex);
1473 init_completion(&rcu_barrier_completion);
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483 atomic_set(&rcu_barrier_cpu_count, 1);
1484 preempt_disable();
1485 rcu_adopt_orphan_cbs(rsp);
1486 on_each_cpu(rcu_barrier_func, (void *)call_rcu_func, 1);
1487 preempt_enable();
1488 if (atomic_dec_and_test(&rcu_barrier_cpu_count))
1489 complete(&rcu_barrier_completion);
1490 wait_for_completion(&rcu_barrier_completion);
1491 mutex_unlock(&rcu_barrier_mutex);
1492}
1493
1494
1495
1496
1497void rcu_barrier_bh(void)
1498{
1499 _rcu_barrier(&rcu_bh_state, call_rcu_bh);
1500}
1501EXPORT_SYMBOL_GPL(rcu_barrier_bh);
1502
1503
1504
1505
1506void rcu_barrier_sched(void)
1507{
1508 _rcu_barrier(&rcu_sched_state, call_rcu_sched);
1509}
1510EXPORT_SYMBOL_GPL(rcu_barrier_sched);
1511
1512
1513
1514
1515static void __init
1516rcu_boot_init_percpu_data(int cpu, struct rcu_state *rsp)
1517{
1518 unsigned long flags;
1519 int i;
1520 struct rcu_data *rdp = rsp->rda[cpu];
1521 struct rcu_node *rnp = rcu_get_root(rsp);
1522
1523
1524 spin_lock_irqsave(&rnp->lock, flags);
1525 rdp->grpmask = 1UL << (cpu - rdp->mynode->grplo);
1526 rdp->nxtlist = NULL;
1527 for (i = 0; i < RCU_NEXT_SIZE; i++)
1528 rdp->nxttail[i] = &rdp->nxtlist;
1529 rdp->qlen = 0;
1530#ifdef CONFIG_NO_HZ
1531 rdp->dynticks = &per_cpu(rcu_dynticks, cpu);
1532#endif
1533 rdp->cpu = cpu;
1534 spin_unlock_irqrestore(&rnp->lock, flags);
1535}
1536
1537
1538
1539
1540
1541
1542
1543static void __cpuinit
1544rcu_init_percpu_data(int cpu, struct rcu_state *rsp, int preemptable)
1545{
1546 unsigned long flags;
1547 long lastcomp;
1548 unsigned long mask;
1549 struct rcu_data *rdp = rsp->rda[cpu];
1550 struct rcu_node *rnp = rcu_get_root(rsp);
1551
1552
1553 spin_lock_irqsave(&rnp->lock, flags);
1554 lastcomp = rsp->completed;
1555 rdp->completed = lastcomp;
1556 rdp->gpnum = lastcomp;
1557 rdp->passed_quiesc = 0;
1558 rdp->qs_pending = 1;
1559 rdp->beenonline = 1;
1560 rdp->preemptable = preemptable;
1561 rdp->passed_quiesc_completed = lastcomp - 1;
1562 rdp->qlen_last_fqs_check = 0;
1563 rdp->n_force_qs_snap = rsp->n_force_qs;
1564 rdp->blimit = blimit;
1565 spin_unlock(&rnp->lock);
1566
1567
1568
1569
1570
1571
1572
1573 spin_lock(&rsp->onofflock);
1574
1575
1576 rnp = rdp->mynode;
1577 mask = rdp->grpmask;
1578 do {
1579
1580 spin_lock(&rnp->lock);
1581 rnp->qsmaskinit |= mask;
1582 mask = rnp->grpmask;
1583 spin_unlock(&rnp->lock);
1584 rnp = rnp->parent;
1585 } while (rnp != NULL && !(rnp->qsmaskinit & mask));
1586
1587 spin_unlock_irqrestore(&rsp->onofflock, flags);
1588}
1589
1590static void __cpuinit rcu_online_cpu(int cpu)
1591{
1592 rcu_init_percpu_data(cpu, &rcu_sched_state, 0);
1593 rcu_init_percpu_data(cpu, &rcu_bh_state, 0);
1594 rcu_preempt_init_percpu_data(cpu);
1595}
1596
1597
1598
1599
1600int __cpuinit rcu_cpu_notify(struct notifier_block *self,
1601 unsigned long action, void *hcpu)
1602{
1603 long cpu = (long)hcpu;
1604
1605 switch (action) {
1606 case CPU_UP_PREPARE:
1607 case CPU_UP_PREPARE_FROZEN:
1608 rcu_online_cpu(cpu);
1609 break;
1610 case CPU_DYING:
1611 case CPU_DYING_FROZEN:
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622 rcu_send_cbs_to_orphanage(&rcu_bh_state);
1623 rcu_send_cbs_to_orphanage(&rcu_sched_state);
1624 rcu_preempt_send_cbs_to_orphanage();
1625 break;
1626 case CPU_DEAD:
1627 case CPU_DEAD_FROZEN:
1628 case CPU_UP_CANCELED:
1629 case CPU_UP_CANCELED_FROZEN:
1630 rcu_offline_cpu(cpu);
1631 break;
1632 default:
1633 break;
1634 }
1635 return NOTIFY_OK;
1636}
1637
1638
1639
1640
1641
1642#ifdef CONFIG_RCU_FANOUT_EXACT
1643static void __init rcu_init_levelspread(struct rcu_state *rsp)
1644{
1645 int i;
1646
1647 for (i = NUM_RCU_LVLS - 1; i >= 0; i--)
1648 rsp->levelspread[i] = CONFIG_RCU_FANOUT;
1649}
1650#else
1651static void __init rcu_init_levelspread(struct rcu_state *rsp)
1652{
1653 int ccur;
1654 int cprv;
1655 int i;
1656
1657 cprv = NR_CPUS;
1658 for (i = NUM_RCU_LVLS - 1; i >= 0; i--) {
1659 ccur = rsp->levelcnt[i];
1660 rsp->levelspread[i] = (cprv + ccur - 1) / ccur;
1661 cprv = ccur;
1662 }
1663}
1664#endif
1665
1666
1667
1668
1669static void __init rcu_init_one(struct rcu_state *rsp)
1670{
1671 int cpustride = 1;
1672 int i;
1673 int j;
1674 struct rcu_node *rnp;
1675
1676
1677
1678 for (i = 1; i < NUM_RCU_LVLS; i++)
1679 rsp->level[i] = rsp->level[i - 1] + rsp->levelcnt[i - 1];
1680 rcu_init_levelspread(rsp);
1681
1682
1683
1684 for (i = NUM_RCU_LVLS - 1; i >= 0; i--) {
1685 cpustride *= rsp->levelspread[i];
1686 rnp = rsp->level[i];
1687 for (j = 0; j < rsp->levelcnt[i]; j++, rnp++) {
1688 if (rnp != rcu_get_root(rsp))
1689 spin_lock_init(&rnp->lock);
1690 rnp->gpnum = 0;
1691 rnp->qsmask = 0;
1692 rnp->qsmaskinit = 0;
1693 rnp->grplo = j * cpustride;
1694 rnp->grphi = (j + 1) * cpustride - 1;
1695 if (rnp->grphi >= NR_CPUS)
1696 rnp->grphi = NR_CPUS - 1;
1697 if (i == 0) {
1698 rnp->grpnum = 0;
1699 rnp->grpmask = 0;
1700 rnp->parent = NULL;
1701 } else {
1702 rnp->grpnum = j % rsp->levelspread[i - 1];
1703 rnp->grpmask = 1UL << rnp->grpnum;
1704 rnp->parent = rsp->level[i - 1] +
1705 j / rsp->levelspread[i - 1];
1706 }
1707 rnp->level = i;
1708 INIT_LIST_HEAD(&rnp->blocked_tasks[0]);
1709 INIT_LIST_HEAD(&rnp->blocked_tasks[1]);
1710 }
1711 }
1712 spin_lock_init(&rcu_get_root(rsp)->lock);
1713}
1714
1715
1716
1717
1718
1719
1720#define RCU_INIT_FLAVOR(rsp, rcu_data) \
1721do { \
1722 int i; \
1723 int j; \
1724 struct rcu_node *rnp; \
1725 \
1726 rcu_init_one(rsp); \
1727 rnp = (rsp)->level[NUM_RCU_LVLS - 1]; \
1728 j = 0; \
1729 for_each_possible_cpu(i) { \
1730 if (i > rnp[j].grphi) \
1731 j++; \
1732 per_cpu(rcu_data, i).mynode = &rnp[j]; \
1733 (rsp)->rda[i] = &per_cpu(rcu_data, i); \
1734 rcu_boot_init_percpu_data(i, rsp); \
1735 } \
1736} while (0)
1737
1738void __init __rcu_init(void)
1739{
1740 rcu_bootup_announce();
1741#ifdef CONFIG_RCU_CPU_STALL_DETECTOR
1742 printk(KERN_INFO "RCU-based detection of stalled CPUs is enabled.\n");
1743#endif
1744 RCU_INIT_FLAVOR(&rcu_sched_state, rcu_sched_data);
1745 RCU_INIT_FLAVOR(&rcu_bh_state, rcu_bh_data);
1746 __rcu_init_preempt();
1747 open_softirq(RCU_SOFTIRQ, rcu_process_callbacks);
1748}
1749
1750#include "rcutree_plugin.h"
1751