1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27#include <linux/delay.h>
28#include <linux/stop_machine.h>
29
30
31
32
33
34
35static void __init rcu_bootup_announce_oddness(void)
36{
37#ifdef CONFIG_RCU_TRACE
38 printk(KERN_INFO "\tRCU debugfs-based tracing is enabled.\n");
39#endif
40#if (defined(CONFIG_64BIT) && CONFIG_RCU_FANOUT != 64) || (!defined(CONFIG_64BIT) && CONFIG_RCU_FANOUT != 32)
41 printk(KERN_INFO "\tCONFIG_RCU_FANOUT set to non-default value of %d\n",
42 CONFIG_RCU_FANOUT);
43#endif
44#ifdef CONFIG_RCU_FANOUT_EXACT
45 printk(KERN_INFO "\tHierarchical RCU autobalancing is disabled.\n");
46#endif
47#ifdef CONFIG_RCU_FAST_NO_HZ
48 printk(KERN_INFO
49 "\tRCU dyntick-idle grace-period acceleration is enabled.\n");
50#endif
51#ifdef CONFIG_PROVE_RCU
52 printk(KERN_INFO "\tRCU lockdep checking is enabled.\n");
53#endif
54#ifdef CONFIG_RCU_TORTURE_TEST_RUNNABLE
55 printk(KERN_INFO "\tRCU torture testing starts during boot.\n");
56#endif
57#ifndef CONFIG_RCU_CPU_STALL_DETECTOR
58 printk(KERN_INFO
59 "\tRCU-based detection of stalled CPUs is disabled.\n");
60#endif
61#if defined(CONFIG_TREE_PREEMPT_RCU) && !defined(CONFIG_RCU_CPU_STALL_VERBOSE)
62 printk(KERN_INFO "\tVerbose stalled-CPUs detection is disabled.\n");
63#endif
64#if NUM_RCU_LVL_4 != 0
65 printk(KERN_INFO "\tExperimental four-level hierarchy is enabled.\n");
66#endif
67}
68
69#ifdef CONFIG_TREE_PREEMPT_RCU
70
71struct rcu_state rcu_preempt_state = RCU_STATE_INITIALIZER(rcu_preempt_state);
72DEFINE_PER_CPU(struct rcu_data, rcu_preempt_data);
73
74static int rcu_preempted_readers_exp(struct rcu_node *rnp);
75
76
77
78
79static void __init rcu_bootup_announce(void)
80{
81 printk(KERN_INFO "Preemptable hierarchical RCU implementation.\n");
82 rcu_bootup_announce_oddness();
83}
84
85
86
87
88
89long rcu_batches_completed_preempt(void)
90{
91 return rcu_preempt_state.completed;
92}
93EXPORT_SYMBOL_GPL(rcu_batches_completed_preempt);
94
95
96
97
98long rcu_batches_completed(void)
99{
100 return rcu_batches_completed_preempt();
101}
102EXPORT_SYMBOL_GPL(rcu_batches_completed);
103
104
105
106
107void rcu_force_quiescent_state(void)
108{
109 force_quiescent_state(&rcu_preempt_state, 0);
110}
111EXPORT_SYMBOL_GPL(rcu_force_quiescent_state);
112
113
114
115
116
117
118
119
120
121
122
123static void rcu_preempt_qs(int cpu)
124{
125 struct rcu_data *rdp = &per_cpu(rcu_preempt_data, cpu);
126
127 rdp->passed_quiesc_completed = rdp->gpnum - 1;
128 barrier();
129 rdp->passed_quiesc = 1;
130 current->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_NEED_QS;
131}
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146static void rcu_preempt_note_context_switch(int cpu)
147{
148 struct task_struct *t = current;
149 unsigned long flags;
150 int phase;
151 struct rcu_data *rdp;
152 struct rcu_node *rnp;
153
154 if (t->rcu_read_lock_nesting &&
155 (t->rcu_read_unlock_special & RCU_READ_UNLOCK_BLOCKED) == 0) {
156
157
158 rdp = per_cpu_ptr(rcu_preempt_state.rda, cpu);
159 rnp = rdp->mynode;
160 raw_spin_lock_irqsave(&rnp->lock, flags);
161 t->rcu_read_unlock_special |= RCU_READ_UNLOCK_BLOCKED;
162 t->rcu_blocked_node = rnp;
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177 WARN_ON_ONCE((rdp->grpmask & rnp->qsmaskinit) == 0);
178 WARN_ON_ONCE(!list_empty(&t->rcu_node_entry));
179 phase = (rnp->gpnum + !(rnp->qsmask & rdp->grpmask)) & 0x1;
180 list_add(&t->rcu_node_entry, &rnp->blocked_tasks[phase]);
181 raw_spin_unlock_irqrestore(&rnp->lock, flags);
182 }
183
184
185
186
187
188
189
190
191
192
193 local_irq_save(flags);
194 rcu_preempt_qs(cpu);
195 local_irq_restore(flags);
196}
197
198
199
200
201
202
203void __rcu_read_lock(void)
204{
205 current->rcu_read_lock_nesting++;
206 barrier();
207}
208EXPORT_SYMBOL_GPL(__rcu_read_lock);
209
210
211
212
213
214
215static int rcu_preempted_readers(struct rcu_node *rnp)
216{
217 int phase = rnp->gpnum & 0x1;
218
219 return !list_empty(&rnp->blocked_tasks[phase]) ||
220 !list_empty(&rnp->blocked_tasks[phase + 2]);
221}
222
223
224
225
226
227
228
229
230static void rcu_report_unblock_qs_rnp(struct rcu_node *rnp, unsigned long flags)
231 __releases(rnp->lock)
232{
233 unsigned long mask;
234 struct rcu_node *rnp_p;
235
236 if (rnp->qsmask != 0 || rcu_preempted_readers(rnp)) {
237 raw_spin_unlock_irqrestore(&rnp->lock, flags);
238 return;
239 }
240
241 rnp_p = rnp->parent;
242 if (rnp_p == NULL) {
243
244
245
246
247
248 rcu_report_qs_rsp(&rcu_preempt_state, flags);
249 return;
250 }
251
252
253 mask = rnp->grpmask;
254 raw_spin_unlock(&rnp->lock);
255 raw_spin_lock(&rnp_p->lock);
256 rcu_report_qs_rnp(mask, &rcu_preempt_state, rnp_p, flags);
257}
258
259
260
261
262
263
264static void rcu_read_unlock_special(struct task_struct *t)
265{
266 int empty;
267 int empty_exp;
268 unsigned long flags;
269 struct rcu_node *rnp;
270 int special;
271
272
273 if (in_nmi())
274 return;
275
276 local_irq_save(flags);
277
278
279
280
281
282 special = t->rcu_read_unlock_special;
283 if (special & RCU_READ_UNLOCK_NEED_QS) {
284 rcu_preempt_qs(smp_processor_id());
285 }
286
287
288 if (in_irq()) {
289 local_irq_restore(flags);
290 return;
291 }
292
293
294 if (special & RCU_READ_UNLOCK_BLOCKED) {
295 t->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_BLOCKED;
296
297
298
299
300
301
302 for (;;) {
303 rnp = t->rcu_blocked_node;
304 raw_spin_lock(&rnp->lock);
305 if (rnp == t->rcu_blocked_node)
306 break;
307 raw_spin_unlock(&rnp->lock);
308 }
309 empty = !rcu_preempted_readers(rnp);
310 empty_exp = !rcu_preempted_readers_exp(rnp);
311 smp_mb();
312 list_del_init(&t->rcu_node_entry);
313 t->rcu_blocked_node = NULL;
314
315
316
317
318
319
320 if (empty)
321 raw_spin_unlock_irqrestore(&rnp->lock, flags);
322 else
323 rcu_report_unblock_qs_rnp(rnp, flags);
324
325
326
327
328
329 if (!empty_exp && !rcu_preempted_readers_exp(rnp))
330 rcu_report_exp_rnp(&rcu_preempt_state, rnp);
331 } else {
332 local_irq_restore(flags);
333 }
334}
335
336
337
338
339
340
341
342
343void __rcu_read_unlock(void)
344{
345 struct task_struct *t = current;
346
347 barrier();
348 --t->rcu_read_lock_nesting;
349 barrier();
350 if (t->rcu_read_lock_nesting == 0 &&
351 unlikely(ACCESS_ONCE(t->rcu_read_unlock_special)))
352 rcu_read_unlock_special(t);
353#ifdef CONFIG_PROVE_LOCKING
354 WARN_ON_ONCE(ACCESS_ONCE(t->rcu_read_lock_nesting) < 0);
355#endif
356}
357EXPORT_SYMBOL_GPL(__rcu_read_unlock);
358
359#ifdef CONFIG_RCU_CPU_STALL_DETECTOR
360
361#ifdef CONFIG_RCU_CPU_STALL_VERBOSE
362
363
364
365
366
367static void rcu_print_detail_task_stall_rnp(struct rcu_node *rnp)
368{
369 unsigned long flags;
370 struct list_head *lp;
371 int phase;
372 struct task_struct *t;
373
374 if (rcu_preempted_readers(rnp)) {
375 raw_spin_lock_irqsave(&rnp->lock, flags);
376 phase = rnp->gpnum & 0x1;
377 lp = &rnp->blocked_tasks[phase];
378 list_for_each_entry(t, lp, rcu_node_entry)
379 sched_show_task(t);
380 raw_spin_unlock_irqrestore(&rnp->lock, flags);
381 }
382}
383
384
385
386
387
388static void rcu_print_detail_task_stall(struct rcu_state *rsp)
389{
390 struct rcu_node *rnp = rcu_get_root(rsp);
391
392 rcu_print_detail_task_stall_rnp(rnp);
393 rcu_for_each_leaf_node(rsp, rnp)
394 rcu_print_detail_task_stall_rnp(rnp);
395}
396
397#else
398
399static void rcu_print_detail_task_stall(struct rcu_state *rsp)
400{
401}
402
403#endif
404
405
406
407
408
409static void rcu_print_task_stall(struct rcu_node *rnp)
410{
411 struct list_head *lp;
412 int phase;
413 struct task_struct *t;
414
415 if (rcu_preempted_readers(rnp)) {
416 phase = rnp->gpnum & 0x1;
417 lp = &rnp->blocked_tasks[phase];
418 list_for_each_entry(t, lp, rcu_node_entry)
419 printk(" P%d", t->pid);
420 }
421}
422
423
424
425
426
427
428static void rcu_preempt_stall_reset(void)
429{
430 rcu_preempt_state.jiffies_stall = jiffies + ULONG_MAX / 2;
431}
432
433#endif
434
435
436
437
438
439
440
441
442static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp)
443{
444 WARN_ON_ONCE(rcu_preempted_readers(rnp));
445 WARN_ON_ONCE(rnp->qsmask);
446}
447
448#ifdef CONFIG_HOTPLUG_CPU
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464static int rcu_preempt_offline_tasks(struct rcu_state *rsp,
465 struct rcu_node *rnp,
466 struct rcu_data *rdp)
467{
468 int i;
469 struct list_head *lp;
470 struct list_head *lp_root;
471 int retval = 0;
472 struct rcu_node *rnp_root = rcu_get_root(rsp);
473 struct task_struct *tp;
474
475 if (rnp == rnp_root) {
476 WARN_ONCE(1, "Last CPU thought to be offlined?");
477 return 0;
478 }
479 WARN_ON_ONCE(rnp != rdp->mynode &&
480 (!list_empty(&rnp->blocked_tasks[0]) ||
481 !list_empty(&rnp->blocked_tasks[1]) ||
482 !list_empty(&rnp->blocked_tasks[2]) ||
483 !list_empty(&rnp->blocked_tasks[3])));
484
485
486
487
488
489
490
491 if (rcu_preempted_readers(rnp))
492 retval |= RCU_OFL_TASKS_NORM_GP;
493 if (rcu_preempted_readers_exp(rnp))
494 retval |= RCU_OFL_TASKS_EXP_GP;
495 for (i = 0; i < 4; i++) {
496 lp = &rnp->blocked_tasks[i];
497 lp_root = &rnp_root->blocked_tasks[i];
498 while (!list_empty(lp)) {
499 tp = list_entry(lp->next, typeof(*tp), rcu_node_entry);
500 raw_spin_lock(&rnp_root->lock);
501 list_del(&tp->rcu_node_entry);
502 tp->rcu_blocked_node = rnp_root;
503 list_add(&tp->rcu_node_entry, lp_root);
504 raw_spin_unlock(&rnp_root->lock);
505 }
506 }
507 return retval;
508}
509
510
511
512
513static void rcu_preempt_offline_cpu(int cpu)
514{
515 __rcu_offline_cpu(cpu, &rcu_preempt_state);
516}
517
518#endif
519
520
521
522
523
524
525
526
527static void rcu_preempt_check_callbacks(int cpu)
528{
529 struct task_struct *t = current;
530
531 if (t->rcu_read_lock_nesting == 0) {
532 rcu_preempt_qs(cpu);
533 return;
534 }
535 if (per_cpu(rcu_preempt_data, cpu).qs_pending)
536 t->rcu_read_unlock_special |= RCU_READ_UNLOCK_NEED_QS;
537}
538
539
540
541
542static void rcu_preempt_process_callbacks(void)
543{
544 __rcu_process_callbacks(&rcu_preempt_state,
545 &__get_cpu_var(rcu_preempt_data));
546}
547
548
549
550
551void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
552{
553 __call_rcu(head, func, &rcu_preempt_state);
554}
555EXPORT_SYMBOL_GPL(call_rcu);
556
557
558
559
560
561
562
563
564
565
566
567
568void synchronize_rcu(void)
569{
570 struct rcu_synchronize rcu;
571
572 if (!rcu_scheduler_active)
573 return;
574
575 init_rcu_head_on_stack(&rcu.head);
576 init_completion(&rcu.completion);
577
578 call_rcu(&rcu.head, wakeme_after_rcu);
579
580 wait_for_completion(&rcu.completion);
581 destroy_rcu_head_on_stack(&rcu.head);
582}
583EXPORT_SYMBOL_GPL(synchronize_rcu);
584
585static DECLARE_WAIT_QUEUE_HEAD(sync_rcu_preempt_exp_wq);
586static long sync_rcu_preempt_exp_count;
587static DEFINE_MUTEX(sync_rcu_preempt_exp_mutex);
588
589
590
591
592
593
594
595static int rcu_preempted_readers_exp(struct rcu_node *rnp)
596{
597 return !list_empty(&rnp->blocked_tasks[2]) ||
598 !list_empty(&rnp->blocked_tasks[3]);
599}
600
601
602
603
604
605
606
607
608
609
610static int sync_rcu_preempt_exp_done(struct rcu_node *rnp)
611{
612 return !rcu_preempted_readers_exp(rnp) &&
613 ACCESS_ONCE(rnp->expmask) == 0;
614}
615
616
617
618
619
620
621
622
623
624
625
626static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp)
627{
628 unsigned long flags;
629 unsigned long mask;
630
631 raw_spin_lock_irqsave(&rnp->lock, flags);
632 for (;;) {
633 if (!sync_rcu_preempt_exp_done(rnp))
634 break;
635 if (rnp->parent == NULL) {
636 wake_up(&sync_rcu_preempt_exp_wq);
637 break;
638 }
639 mask = rnp->grpmask;
640 raw_spin_unlock(&rnp->lock);
641 rnp = rnp->parent;
642 raw_spin_lock(&rnp->lock);
643 rnp->expmask &= ~mask;
644 }
645 raw_spin_unlock_irqrestore(&rnp->lock, flags);
646}
647
648
649
650
651
652
653
654
655static void
656sync_rcu_preempt_exp_init(struct rcu_state *rsp, struct rcu_node *rnp)
657{
658 int must_wait;
659
660 raw_spin_lock(&rnp->lock);
661 list_splice_init(&rnp->blocked_tasks[0], &rnp->blocked_tasks[2]);
662 list_splice_init(&rnp->blocked_tasks[1], &rnp->blocked_tasks[3]);
663 must_wait = rcu_preempted_readers_exp(rnp);
664 raw_spin_unlock(&rnp->lock);
665 if (!must_wait)
666 rcu_report_exp_rnp(rsp, rnp);
667}
668
669
670
671
672
673
674
675
676void synchronize_rcu_expedited(void)
677{
678 unsigned long flags;
679 struct rcu_node *rnp;
680 struct rcu_state *rsp = &rcu_preempt_state;
681 long snap;
682 int trycount = 0;
683
684 smp_mb();
685 snap = ACCESS_ONCE(sync_rcu_preempt_exp_count) + 1;
686 smp_mb();
687
688
689
690
691
692
693 while (!mutex_trylock(&sync_rcu_preempt_exp_mutex)) {
694 if (trycount++ < 10)
695 udelay(trycount * num_online_cpus());
696 else {
697 synchronize_rcu();
698 return;
699 }
700 if ((ACCESS_ONCE(sync_rcu_preempt_exp_count) - snap) > 0)
701 goto mb_ret;
702 }
703 if ((ACCESS_ONCE(sync_rcu_preempt_exp_count) - snap) > 0)
704 goto unlock_mb_ret;
705
706
707 synchronize_sched_expedited();
708
709 raw_spin_lock_irqsave(&rsp->onofflock, flags);
710
711
712 rcu_for_each_nonleaf_node_breadth_first(rsp, rnp) {
713 raw_spin_lock(&rnp->lock);
714 rnp->expmask = rnp->qsmaskinit;
715 raw_spin_unlock(&rnp->lock);
716 }
717
718
719 rcu_for_each_leaf_node(rsp, rnp)
720 sync_rcu_preempt_exp_init(rsp, rnp);
721 if (NUM_RCU_NODES > 1)
722 sync_rcu_preempt_exp_init(rsp, rcu_get_root(rsp));
723
724 raw_spin_unlock_irqrestore(&rsp->onofflock, flags);
725
726
727 rnp = rcu_get_root(rsp);
728 wait_event(sync_rcu_preempt_exp_wq,
729 sync_rcu_preempt_exp_done(rnp));
730
731
732 smp_mb();
733 ACCESS_ONCE(sync_rcu_preempt_exp_count)++;
734unlock_mb_ret:
735 mutex_unlock(&sync_rcu_preempt_exp_mutex);
736mb_ret:
737 smp_mb();
738}
739EXPORT_SYMBOL_GPL(synchronize_rcu_expedited);
740
741
742
743
744
745static int rcu_preempt_pending(int cpu)
746{
747 return __rcu_pending(&rcu_preempt_state,
748 &per_cpu(rcu_preempt_data, cpu));
749}
750
751
752
753
754static int rcu_preempt_needs_cpu(int cpu)
755{
756 return !!per_cpu(rcu_preempt_data, cpu).nxtlist;
757}
758
759
760
761
762void rcu_barrier(void)
763{
764 _rcu_barrier(&rcu_preempt_state, call_rcu);
765}
766EXPORT_SYMBOL_GPL(rcu_barrier);
767
768
769
770
771static void __cpuinit rcu_preempt_init_percpu_data(int cpu)
772{
773 rcu_init_percpu_data(cpu, &rcu_preempt_state, 1);
774}
775
776
777
778
779static void rcu_preempt_send_cbs_to_online(void)
780{
781 rcu_send_cbs_to_online(&rcu_preempt_state);
782}
783
784
785
786
787static void __init __rcu_init_preempt(void)
788{
789 rcu_init_one(&rcu_preempt_state, &rcu_preempt_data);
790}
791
792
793
794
795
796
797
798void exit_rcu(void)
799{
800 struct task_struct *t = current;
801
802 if (t->rcu_read_lock_nesting == 0)
803 return;
804 t->rcu_read_lock_nesting = 1;
805 rcu_read_unlock();
806}
807
808#else
809
810
811
812
813static void __init rcu_bootup_announce(void)
814{
815 printk(KERN_INFO "Hierarchical RCU implementation.\n");
816 rcu_bootup_announce_oddness();
817}
818
819
820
821
822long rcu_batches_completed(void)
823{
824 return rcu_batches_completed_sched();
825}
826EXPORT_SYMBOL_GPL(rcu_batches_completed);
827
828
829
830
831
832void rcu_force_quiescent_state(void)
833{
834 rcu_sched_force_quiescent_state();
835}
836EXPORT_SYMBOL_GPL(rcu_force_quiescent_state);
837
838
839
840
841
842static void rcu_preempt_note_context_switch(int cpu)
843{
844}
845
846
847
848
849
850static int rcu_preempted_readers(struct rcu_node *rnp)
851{
852 return 0;
853}
854
855#ifdef CONFIG_HOTPLUG_CPU
856
857
858static void rcu_report_unblock_qs_rnp(struct rcu_node *rnp, unsigned long flags)
859{
860 raw_spin_unlock_irqrestore(&rnp->lock, flags);
861}
862
863#endif
864
865#ifdef CONFIG_RCU_CPU_STALL_DETECTOR
866
867
868
869
870
871static void rcu_print_detail_task_stall(struct rcu_state *rsp)
872{
873}
874
875
876
877
878
879static void rcu_print_task_stall(struct rcu_node *rnp)
880{
881}
882
883
884
885
886
887static void rcu_preempt_stall_reset(void)
888{
889}
890
891#endif
892
893
894
895
896
897
898static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp)
899{
900 WARN_ON_ONCE(rnp->qsmask);
901}
902
903#ifdef CONFIG_HOTPLUG_CPU
904
905
906
907
908
909
910
911static int rcu_preempt_offline_tasks(struct rcu_state *rsp,
912 struct rcu_node *rnp,
913 struct rcu_data *rdp)
914{
915 return 0;
916}
917
918
919
920
921
922static void rcu_preempt_offline_cpu(int cpu)
923{
924}
925
926#endif
927
928
929
930
931
932static void rcu_preempt_check_callbacks(int cpu)
933{
934}
935
936
937
938
939
940static void rcu_preempt_process_callbacks(void)
941{
942}
943
944
945
946
947
948void synchronize_rcu_expedited(void)
949{
950 synchronize_sched_expedited();
951}
952EXPORT_SYMBOL_GPL(synchronize_rcu_expedited);
953
954#ifdef CONFIG_HOTPLUG_CPU
955
956
957
958
959
960
961static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp)
962{
963 return;
964}
965
966#endif
967
968
969
970
971static int rcu_preempt_pending(int cpu)
972{
973 return 0;
974}
975
976
977
978
979static int rcu_preempt_needs_cpu(int cpu)
980{
981 return 0;
982}
983
984
985
986
987
988void rcu_barrier(void)
989{
990 rcu_barrier_sched();
991}
992EXPORT_SYMBOL_GPL(rcu_barrier);
993
994
995
996
997
998static void __cpuinit rcu_preempt_init_percpu_data(int cpu)
999{
1000}
1001
1002
1003
1004
1005static void rcu_preempt_send_cbs_to_online(void)
1006{
1007}
1008
1009
1010
1011
1012static void __init __rcu_init_preempt(void)
1013{
1014}
1015
1016#endif
1017
1018#ifndef CONFIG_SMP
1019
1020void synchronize_sched_expedited(void)
1021{
1022 cond_resched();
1023}
1024EXPORT_SYMBOL_GPL(synchronize_sched_expedited);
1025
1026#else
1027
1028static atomic_t sync_sched_expedited_started = ATOMIC_INIT(0);
1029static atomic_t sync_sched_expedited_done = ATOMIC_INIT(0);
1030
1031static int synchronize_sched_expedited_cpu_stop(void *data)
1032{
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044 smp_mb();
1045 return 0;
1046}
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079void synchronize_sched_expedited(void)
1080{
1081 int firstsnap, s, snap, trycount = 0;
1082
1083
1084 firstsnap = snap = atomic_inc_return(&sync_sched_expedited_started);
1085 get_online_cpus();
1086
1087
1088
1089
1090
1091 while (try_stop_cpus(cpu_online_mask,
1092 synchronize_sched_expedited_cpu_stop,
1093 NULL) == -EAGAIN) {
1094 put_online_cpus();
1095
1096
1097 if (trycount++ < 10)
1098 udelay(trycount * num_online_cpus());
1099 else {
1100 synchronize_sched();
1101 return;
1102 }
1103
1104
1105 s = atomic_read(&sync_sched_expedited_done);
1106 if (UINT_CMP_GE((unsigned)s, (unsigned)firstsnap)) {
1107 smp_mb();
1108 return;
1109 }
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119 get_online_cpus();
1120 snap = atomic_read(&sync_sched_expedited_started) - 1;
1121 smp_mb();
1122 }
1123
1124
1125
1126
1127
1128
1129
1130 do {
1131 s = atomic_read(&sync_sched_expedited_done);
1132 if (UINT_CMP_GE((unsigned)s, (unsigned)snap)) {
1133 smp_mb();
1134 break;
1135 }
1136 } while (atomic_cmpxchg(&sync_sched_expedited_done, s, snap) != s);
1137
1138 put_online_cpus();
1139}
1140EXPORT_SYMBOL_GPL(synchronize_sched_expedited);
1141
1142#endif
1143
1144#if !defined(CONFIG_RCU_FAST_NO_HZ)
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156int rcu_needs_cpu(int cpu)
1157{
1158 return rcu_needs_cpu_quick_check(cpu);
1159}
1160
1161
1162
1163
1164
1165
1166static void rcu_needs_cpu_flush(void)
1167{
1168}
1169
1170#else
1171
1172#define RCU_NEEDS_CPU_FLUSHES 5
1173static DEFINE_PER_CPU(int, rcu_dyntick_drain);
1174static DEFINE_PER_CPU(unsigned long, rcu_dyntick_holdoff);
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193int rcu_needs_cpu(int cpu)
1194{
1195 int c = 0;
1196 int snap;
1197 int snap_nmi;
1198 int thatcpu;
1199
1200
1201 if (per_cpu(rcu_dyntick_holdoff, cpu) == jiffies)
1202 return rcu_needs_cpu_quick_check(cpu);
1203
1204
1205 for_each_online_cpu(thatcpu) {
1206 if (thatcpu == cpu)
1207 continue;
1208 snap = per_cpu(rcu_dynticks, thatcpu).dynticks;
1209 snap_nmi = per_cpu(rcu_dynticks, thatcpu).dynticks_nmi;
1210 smp_mb();
1211 if (((snap & 0x1) != 0) || ((snap_nmi & 0x1) != 0)) {
1212 per_cpu(rcu_dyntick_drain, cpu) = 0;
1213 per_cpu(rcu_dyntick_holdoff, cpu) = jiffies - 1;
1214 return rcu_needs_cpu_quick_check(cpu);
1215 }
1216 }
1217
1218
1219 if (per_cpu(rcu_dyntick_drain, cpu) <= 0) {
1220
1221 per_cpu(rcu_dyntick_drain, cpu) = RCU_NEEDS_CPU_FLUSHES;
1222 } else if (--per_cpu(rcu_dyntick_drain, cpu) <= 0) {
1223
1224 per_cpu(rcu_dyntick_holdoff, cpu) = jiffies;
1225 return rcu_needs_cpu_quick_check(cpu);
1226 }
1227
1228
1229 if (per_cpu(rcu_sched_data, cpu).nxtlist) {
1230 rcu_sched_qs(cpu);
1231 force_quiescent_state(&rcu_sched_state, 0);
1232 c = c || per_cpu(rcu_sched_data, cpu).nxtlist;
1233 }
1234 if (per_cpu(rcu_bh_data, cpu).nxtlist) {
1235 rcu_bh_qs(cpu);
1236 force_quiescent_state(&rcu_bh_state, 0);
1237 c = c || per_cpu(rcu_bh_data, cpu).nxtlist;
1238 }
1239
1240
1241 if (c)
1242 raise_softirq(RCU_SOFTIRQ);
1243 return c;
1244}
1245
1246
1247
1248
1249
1250static void rcu_needs_cpu_flush(void)
1251{
1252 int cpu = smp_processor_id();
1253 unsigned long flags;
1254
1255 if (per_cpu(rcu_dyntick_drain, cpu) <= 0)
1256 return;
1257 local_irq_save(flags);
1258 (void)rcu_needs_cpu(cpu);
1259 local_irq_restore(flags);
1260}
1261
1262#endif
1263