1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27#include <linux/delay.h>
28#include <linux/stop_machine.h>
29
30#define RCU_KTHREAD_PRIO 1
31
32#ifdef CONFIG_RCU_BOOST
33#define RCU_BOOST_PRIO CONFIG_RCU_BOOST_PRIO
34#else
35#define RCU_BOOST_PRIO RCU_KTHREAD_PRIO
36#endif
37
38
39
40
41
42
43static void __init rcu_bootup_announce_oddness(void)
44{
45#ifdef CONFIG_RCU_TRACE
46 printk(KERN_INFO "\tRCU debugfs-based tracing is enabled.\n");
47#endif
48#if (defined(CONFIG_64BIT) && CONFIG_RCU_FANOUT != 64) || (!defined(CONFIG_64BIT) && CONFIG_RCU_FANOUT != 32)
49 printk(KERN_INFO "\tCONFIG_RCU_FANOUT set to non-default value of %d\n",
50 CONFIG_RCU_FANOUT);
51#endif
52#ifdef CONFIG_RCU_FANOUT_EXACT
53 printk(KERN_INFO "\tHierarchical RCU autobalancing is disabled.\n");
54#endif
55#ifdef CONFIG_RCU_FAST_NO_HZ
56 printk(KERN_INFO
57 "\tRCU dyntick-idle grace-period acceleration is enabled.\n");
58#endif
59#ifdef CONFIG_PROVE_RCU
60 printk(KERN_INFO "\tRCU lockdep checking is enabled.\n");
61#endif
62#ifdef CONFIG_RCU_TORTURE_TEST_RUNNABLE
63 printk(KERN_INFO "\tRCU torture testing starts during boot.\n");
64#endif
65#if defined(CONFIG_TREE_PREEMPT_RCU) && !defined(CONFIG_RCU_CPU_STALL_VERBOSE)
66 printk(KERN_INFO "\tVerbose stalled-CPUs detection is disabled.\n");
67#endif
68#if NUM_RCU_LVL_4 != 0
69 printk(KERN_INFO "\tExperimental four-level hierarchy is enabled.\n");
70#endif
71}
72
73#ifdef CONFIG_TREE_PREEMPT_RCU
74
75struct rcu_state rcu_preempt_state = RCU_STATE_INITIALIZER(rcu_preempt);
76DEFINE_PER_CPU(struct rcu_data, rcu_preempt_data);
77static struct rcu_state *rcu_state = &rcu_preempt_state;
78
79static void rcu_read_unlock_special(struct task_struct *t);
80static int rcu_preempted_readers_exp(struct rcu_node *rnp);
81
82
83
84
85static void __init rcu_bootup_announce(void)
86{
87 printk(KERN_INFO "Preemptible hierarchical RCU implementation.\n");
88 rcu_bootup_announce_oddness();
89}
90
91
92
93
94
95long rcu_batches_completed_preempt(void)
96{
97 return rcu_preempt_state.completed;
98}
99EXPORT_SYMBOL_GPL(rcu_batches_completed_preempt);
100
101
102
103
104long rcu_batches_completed(void)
105{
106 return rcu_batches_completed_preempt();
107}
108EXPORT_SYMBOL_GPL(rcu_batches_completed);
109
110
111
112
113void rcu_force_quiescent_state(void)
114{
115 force_quiescent_state(&rcu_preempt_state, 0);
116}
117EXPORT_SYMBOL_GPL(rcu_force_quiescent_state);
118
119
120
121
122
123
124
125
126
127
128
129static void rcu_preempt_qs(int cpu)
130{
131 struct rcu_data *rdp = &per_cpu(rcu_preempt_data, cpu);
132
133 rdp->passed_quiesce_gpnum = rdp->gpnum;
134 barrier();
135 if (rdp->passed_quiesce == 0)
136 trace_rcu_grace_period("rcu_preempt", rdp->gpnum, "cpuqs");
137 rdp->passed_quiesce = 1;
138 current->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_NEED_QS;
139}
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154static void rcu_preempt_note_context_switch(int cpu)
155{
156 struct task_struct *t = current;
157 unsigned long flags;
158 struct rcu_data *rdp;
159 struct rcu_node *rnp;
160
161 if (t->rcu_read_lock_nesting > 0 &&
162 (t->rcu_read_unlock_special & RCU_READ_UNLOCK_BLOCKED) == 0) {
163
164
165 rdp = per_cpu_ptr(rcu_preempt_state.rda, cpu);
166 rnp = rdp->mynode;
167 raw_spin_lock_irqsave(&rnp->lock, flags);
168 t->rcu_read_unlock_special |= RCU_READ_UNLOCK_BLOCKED;
169 t->rcu_blocked_node = rnp;
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189 WARN_ON_ONCE((rdp->grpmask & rnp->qsmaskinit) == 0);
190 WARN_ON_ONCE(!list_empty(&t->rcu_node_entry));
191 if ((rnp->qsmask & rdp->grpmask) && rnp->gp_tasks != NULL) {
192 list_add(&t->rcu_node_entry, rnp->gp_tasks->prev);
193 rnp->gp_tasks = &t->rcu_node_entry;
194#ifdef CONFIG_RCU_BOOST
195 if (rnp->boost_tasks != NULL)
196 rnp->boost_tasks = rnp->gp_tasks;
197#endif
198 } else {
199 list_add(&t->rcu_node_entry, &rnp->blkd_tasks);
200 if (rnp->qsmask & rdp->grpmask)
201 rnp->gp_tasks = &t->rcu_node_entry;
202 }
203 trace_rcu_preempt_task(rdp->rsp->name,
204 t->pid,
205 (rnp->qsmask & rdp->grpmask)
206 ? rnp->gpnum
207 : rnp->gpnum + 1);
208 raw_spin_unlock_irqrestore(&rnp->lock, flags);
209 } else if (t->rcu_read_lock_nesting < 0 &&
210 t->rcu_read_unlock_special) {
211
212
213
214
215
216 rcu_read_unlock_special(t);
217 }
218
219
220
221
222
223
224
225
226
227
228 local_irq_save(flags);
229 rcu_preempt_qs(cpu);
230 local_irq_restore(flags);
231}
232
233
234
235
236
237
238void __rcu_read_lock(void)
239{
240 current->rcu_read_lock_nesting++;
241 barrier();
242}
243EXPORT_SYMBOL_GPL(__rcu_read_lock);
244
245
246
247
248
249
250static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp)
251{
252 return rnp->gp_tasks != NULL;
253}
254
255
256
257
258
259
260
261
262static void rcu_report_unblock_qs_rnp(struct rcu_node *rnp, unsigned long flags)
263 __releases(rnp->lock)
264{
265 unsigned long mask;
266 struct rcu_node *rnp_p;
267
268 if (rnp->qsmask != 0 || rcu_preempt_blocked_readers_cgp(rnp)) {
269 raw_spin_unlock_irqrestore(&rnp->lock, flags);
270 return;
271 }
272
273 rnp_p = rnp->parent;
274 if (rnp_p == NULL) {
275
276
277
278
279
280 rcu_report_qs_rsp(&rcu_preempt_state, flags);
281 return;
282 }
283
284
285 mask = rnp->grpmask;
286 raw_spin_unlock(&rnp->lock);
287 raw_spin_lock(&rnp_p->lock);
288 rcu_report_qs_rnp(mask, &rcu_preempt_state, rnp_p, flags);
289}
290
291
292
293
294
295static struct list_head *rcu_next_node_entry(struct task_struct *t,
296 struct rcu_node *rnp)
297{
298 struct list_head *np;
299
300 np = t->rcu_node_entry.next;
301 if (np == &rnp->blkd_tasks)
302 np = NULL;
303 return np;
304}
305
306
307
308
309
310
311static noinline void rcu_read_unlock_special(struct task_struct *t)
312{
313 int empty;
314 int empty_exp;
315 int empty_exp_now;
316 unsigned long flags;
317 struct list_head *np;
318#ifdef CONFIG_RCU_BOOST
319 struct rt_mutex *rbmp = NULL;
320#endif
321 struct rcu_node *rnp;
322 int special;
323
324
325 if (in_nmi())
326 return;
327
328 local_irq_save(flags);
329
330
331
332
333
334 special = t->rcu_read_unlock_special;
335 if (special & RCU_READ_UNLOCK_NEED_QS) {
336 rcu_preempt_qs(smp_processor_id());
337 }
338
339
340 if (in_irq() || in_serving_softirq()) {
341 local_irq_restore(flags);
342 return;
343 }
344
345
346 if (special & RCU_READ_UNLOCK_BLOCKED) {
347 t->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_BLOCKED;
348
349
350
351
352
353
354 for (;;) {
355 rnp = t->rcu_blocked_node;
356 raw_spin_lock(&rnp->lock);
357 if (rnp == t->rcu_blocked_node)
358 break;
359 raw_spin_unlock(&rnp->lock);
360 }
361 empty = !rcu_preempt_blocked_readers_cgp(rnp);
362 empty_exp = !rcu_preempted_readers_exp(rnp);
363 smp_mb();
364 np = rcu_next_node_entry(t, rnp);
365 list_del_init(&t->rcu_node_entry);
366 t->rcu_blocked_node = NULL;
367 trace_rcu_unlock_preempted_task("rcu_preempt",
368 rnp->gpnum, t->pid);
369 if (&t->rcu_node_entry == rnp->gp_tasks)
370 rnp->gp_tasks = np;
371 if (&t->rcu_node_entry == rnp->exp_tasks)
372 rnp->exp_tasks = np;
373#ifdef CONFIG_RCU_BOOST
374 if (&t->rcu_node_entry == rnp->boost_tasks)
375 rnp->boost_tasks = np;
376
377 if (t->rcu_boost_mutex) {
378 rbmp = t->rcu_boost_mutex;
379 t->rcu_boost_mutex = NULL;
380 }
381#endif
382
383
384
385
386
387
388
389 empty_exp_now = !rcu_preempted_readers_exp(rnp);
390 if (!empty && !rcu_preempt_blocked_readers_cgp(rnp)) {
391 trace_rcu_quiescent_state_report("preempt_rcu",
392 rnp->gpnum,
393 0, rnp->qsmask,
394 rnp->level,
395 rnp->grplo,
396 rnp->grphi,
397 !!rnp->gp_tasks);
398 rcu_report_unblock_qs_rnp(rnp, flags);
399 } else
400 raw_spin_unlock_irqrestore(&rnp->lock, flags);
401
402#ifdef CONFIG_RCU_BOOST
403
404 if (rbmp)
405 rt_mutex_unlock(rbmp);
406#endif
407
408
409
410
411
412 if (!empty_exp && empty_exp_now)
413 rcu_report_exp_rnp(&rcu_preempt_state, rnp, true);
414 } else {
415 local_irq_restore(flags);
416 }
417}
418
419
420
421
422
423
424
425
426void __rcu_read_unlock(void)
427{
428 struct task_struct *t = current;
429
430 if (t->rcu_read_lock_nesting != 1)
431 --t->rcu_read_lock_nesting;
432 else {
433 barrier();
434 t->rcu_read_lock_nesting = INT_MIN;
435 barrier();
436 if (unlikely(ACCESS_ONCE(t->rcu_read_unlock_special)))
437 rcu_read_unlock_special(t);
438 barrier();
439 t->rcu_read_lock_nesting = 0;
440 }
441#ifdef CONFIG_PROVE_LOCKING
442 {
443 int rrln = ACCESS_ONCE(t->rcu_read_lock_nesting);
444
445 WARN_ON_ONCE(rrln < 0 && rrln > INT_MIN / 2);
446 }
447#endif
448}
449EXPORT_SYMBOL_GPL(__rcu_read_unlock);
450
451#ifdef CONFIG_RCU_CPU_STALL_VERBOSE
452
453
454
455
456
457static void rcu_print_detail_task_stall_rnp(struct rcu_node *rnp)
458{
459 unsigned long flags;
460 struct task_struct *t;
461
462 if (!rcu_preempt_blocked_readers_cgp(rnp))
463 return;
464 raw_spin_lock_irqsave(&rnp->lock, flags);
465 t = list_entry(rnp->gp_tasks,
466 struct task_struct, rcu_node_entry);
467 list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry)
468 sched_show_task(t);
469 raw_spin_unlock_irqrestore(&rnp->lock, flags);
470}
471
472
473
474
475
476static void rcu_print_detail_task_stall(struct rcu_state *rsp)
477{
478 struct rcu_node *rnp = rcu_get_root(rsp);
479
480 rcu_print_detail_task_stall_rnp(rnp);
481 rcu_for_each_leaf_node(rsp, rnp)
482 rcu_print_detail_task_stall_rnp(rnp);
483}
484
485#else
486
487static void rcu_print_detail_task_stall(struct rcu_state *rsp)
488{
489}
490
491#endif
492
493
494
495
496
497static int rcu_print_task_stall(struct rcu_node *rnp)
498{
499 struct task_struct *t;
500 int ndetected = 0;
501
502 if (!rcu_preempt_blocked_readers_cgp(rnp))
503 return 0;
504 t = list_entry(rnp->gp_tasks,
505 struct task_struct, rcu_node_entry);
506 list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry) {
507 printk(" P%d", t->pid);
508 ndetected++;
509 }
510 return ndetected;
511}
512
513
514
515
516
517
518static void rcu_preempt_stall_reset(void)
519{
520 rcu_preempt_state.jiffies_stall = jiffies + ULONG_MAX / 2;
521}
522
523
524
525
526
527
528
529
530
531
532
533static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp)
534{
535 WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp));
536 if (!list_empty(&rnp->blkd_tasks))
537 rnp->gp_tasks = rnp->blkd_tasks.next;
538 WARN_ON_ONCE(rnp->qsmask);
539}
540
541#ifdef CONFIG_HOTPLUG_CPU
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557static int rcu_preempt_offline_tasks(struct rcu_state *rsp,
558 struct rcu_node *rnp,
559 struct rcu_data *rdp)
560{
561 struct list_head *lp;
562 struct list_head *lp_root;
563 int retval = 0;
564 struct rcu_node *rnp_root = rcu_get_root(rsp);
565 struct task_struct *t;
566
567 if (rnp == rnp_root) {
568 WARN_ONCE(1, "Last CPU thought to be offlined?");
569 return 0;
570 }
571
572
573 WARN_ON_ONCE(rnp != rdp->mynode);
574
575
576
577
578
579
580
581
582
583
584 if (rcu_preempt_blocked_readers_cgp(rnp))
585 retval |= RCU_OFL_TASKS_NORM_GP;
586 if (rcu_preempted_readers_exp(rnp))
587 retval |= RCU_OFL_TASKS_EXP_GP;
588 lp = &rnp->blkd_tasks;
589 lp_root = &rnp_root->blkd_tasks;
590 while (!list_empty(lp)) {
591 t = list_entry(lp->next, typeof(*t), rcu_node_entry);
592 raw_spin_lock(&rnp_root->lock);
593 list_del(&t->rcu_node_entry);
594 t->rcu_blocked_node = rnp_root;
595 list_add(&t->rcu_node_entry, lp_root);
596 if (&t->rcu_node_entry == rnp->gp_tasks)
597 rnp_root->gp_tasks = rnp->gp_tasks;
598 if (&t->rcu_node_entry == rnp->exp_tasks)
599 rnp_root->exp_tasks = rnp->exp_tasks;
600#ifdef CONFIG_RCU_BOOST
601 if (&t->rcu_node_entry == rnp->boost_tasks)
602 rnp_root->boost_tasks = rnp->boost_tasks;
603#endif
604 raw_spin_unlock(&rnp_root->lock);
605 }
606
607#ifdef CONFIG_RCU_BOOST
608
609 raw_spin_lock(&rnp_root->lock);
610 if (rnp_root->boost_tasks != NULL &&
611 rnp_root->boost_tasks != rnp_root->gp_tasks)
612 rnp_root->boost_tasks = rnp_root->gp_tasks;
613 raw_spin_unlock(&rnp_root->lock);
614#endif
615
616 rnp->gp_tasks = NULL;
617 rnp->exp_tasks = NULL;
618 return retval;
619}
620
621
622
623
624static void rcu_preempt_offline_cpu(int cpu)
625{
626 __rcu_offline_cpu(cpu, &rcu_preempt_state);
627}
628
629#endif
630
631
632
633
634
635
636
637
638static void rcu_preempt_check_callbacks(int cpu)
639{
640 struct task_struct *t = current;
641
642 if (t->rcu_read_lock_nesting == 0) {
643 rcu_preempt_qs(cpu);
644 return;
645 }
646 if (t->rcu_read_lock_nesting > 0 &&
647 per_cpu(rcu_preempt_data, cpu).qs_pending)
648 t->rcu_read_unlock_special |= RCU_READ_UNLOCK_NEED_QS;
649}
650
651
652
653
654static void rcu_preempt_process_callbacks(void)
655{
656 __rcu_process_callbacks(&rcu_preempt_state,
657 &__get_cpu_var(rcu_preempt_data));
658}
659
660#ifdef CONFIG_RCU_BOOST
661
662static void rcu_preempt_do_callbacks(void)
663{
664 rcu_do_batch(&rcu_preempt_state, &__get_cpu_var(rcu_preempt_data));
665}
666
667#endif
668
669
670
671
672void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
673{
674 __call_rcu(head, func, &rcu_preempt_state);
675}
676EXPORT_SYMBOL_GPL(call_rcu);
677
678
679
680
681
682
683
684
685
686
687
688
689void synchronize_rcu(void)
690{
691 if (!rcu_scheduler_active)
692 return;
693 wait_rcu_gp(call_rcu);
694}
695EXPORT_SYMBOL_GPL(synchronize_rcu);
696
697static DECLARE_WAIT_QUEUE_HEAD(sync_rcu_preempt_exp_wq);
698static long sync_rcu_preempt_exp_count;
699static DEFINE_MUTEX(sync_rcu_preempt_exp_mutex);
700
701
702
703
704
705
706
707static int rcu_preempted_readers_exp(struct rcu_node *rnp)
708{
709 return rnp->exp_tasks != NULL;
710}
711
712
713
714
715
716
717
718
719
720
721static int sync_rcu_preempt_exp_done(struct rcu_node *rnp)
722{
723 return !rcu_preempted_readers_exp(rnp) &&
724 ACCESS_ONCE(rnp->expmask) == 0;
725}
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp,
741 bool wake)
742{
743 unsigned long flags;
744 unsigned long mask;
745
746 raw_spin_lock_irqsave(&rnp->lock, flags);
747 for (;;) {
748 if (!sync_rcu_preempt_exp_done(rnp)) {
749 raw_spin_unlock_irqrestore(&rnp->lock, flags);
750 break;
751 }
752 if (rnp->parent == NULL) {
753 raw_spin_unlock_irqrestore(&rnp->lock, flags);
754 if (wake)
755 wake_up(&sync_rcu_preempt_exp_wq);
756 break;
757 }
758 mask = rnp->grpmask;
759 raw_spin_unlock(&rnp->lock);
760 rnp = rnp->parent;
761 raw_spin_lock(&rnp->lock);
762 rnp->expmask &= ~mask;
763 }
764}
765
766
767
768
769
770
771
772
773static void
774sync_rcu_preempt_exp_init(struct rcu_state *rsp, struct rcu_node *rnp)
775{
776 unsigned long flags;
777 int must_wait = 0;
778
779 raw_spin_lock_irqsave(&rnp->lock, flags);
780 if (list_empty(&rnp->blkd_tasks))
781 raw_spin_unlock_irqrestore(&rnp->lock, flags);
782 else {
783 rnp->exp_tasks = rnp->blkd_tasks.next;
784 rcu_initiate_boost(rnp, flags);
785 must_wait = 1;
786 }
787 if (!must_wait)
788 rcu_report_exp_rnp(rsp, rnp, false);
789}
790
791
792
793
794
795
796void synchronize_rcu_expedited(void)
797{
798 unsigned long flags;
799 struct rcu_node *rnp;
800 struct rcu_state *rsp = &rcu_preempt_state;
801 long snap;
802 int trycount = 0;
803
804 smp_mb();
805 snap = ACCESS_ONCE(sync_rcu_preempt_exp_count) + 1;
806 smp_mb();
807
808
809
810
811
812
813 while (!mutex_trylock(&sync_rcu_preempt_exp_mutex)) {
814 if (trycount++ < 10)
815 udelay(trycount * num_online_cpus());
816 else {
817 synchronize_rcu();
818 return;
819 }
820 if ((ACCESS_ONCE(sync_rcu_preempt_exp_count) - snap) > 0)
821 goto mb_ret;
822 }
823 if ((ACCESS_ONCE(sync_rcu_preempt_exp_count) - snap) > 0)
824 goto unlock_mb_ret;
825
826
827 synchronize_sched_expedited();
828
829 raw_spin_lock_irqsave(&rsp->onofflock, flags);
830
831
832 rcu_for_each_nonleaf_node_breadth_first(rsp, rnp) {
833 raw_spin_lock(&rnp->lock);
834 rnp->expmask = rnp->qsmaskinit;
835 raw_spin_unlock(&rnp->lock);
836 }
837
838
839 rcu_for_each_leaf_node(rsp, rnp)
840 sync_rcu_preempt_exp_init(rsp, rnp);
841 if (NUM_RCU_NODES > 1)
842 sync_rcu_preempt_exp_init(rsp, rcu_get_root(rsp));
843
844 raw_spin_unlock_irqrestore(&rsp->onofflock, flags);
845
846
847 rnp = rcu_get_root(rsp);
848 wait_event(sync_rcu_preempt_exp_wq,
849 sync_rcu_preempt_exp_done(rnp));
850
851
852 smp_mb();
853 ACCESS_ONCE(sync_rcu_preempt_exp_count)++;
854unlock_mb_ret:
855 mutex_unlock(&sync_rcu_preempt_exp_mutex);
856mb_ret:
857 smp_mb();
858}
859EXPORT_SYMBOL_GPL(synchronize_rcu_expedited);
860
861
862
863
864
865static int rcu_preempt_pending(int cpu)
866{
867 return __rcu_pending(&rcu_preempt_state,
868 &per_cpu(rcu_preempt_data, cpu));
869}
870
871
872
873
874static int rcu_preempt_needs_cpu(int cpu)
875{
876 return !!per_cpu(rcu_preempt_data, cpu).nxtlist;
877}
878
879
880
881
882void rcu_barrier(void)
883{
884 _rcu_barrier(&rcu_preempt_state, call_rcu);
885}
886EXPORT_SYMBOL_GPL(rcu_barrier);
887
888
889
890
891static void __cpuinit rcu_preempt_init_percpu_data(int cpu)
892{
893 rcu_init_percpu_data(cpu, &rcu_preempt_state, 1);
894}
895
896
897
898
899static void rcu_preempt_send_cbs_to_online(void)
900{
901 rcu_send_cbs_to_online(&rcu_preempt_state);
902}
903
904
905
906
907static void __init __rcu_init_preempt(void)
908{
909 rcu_init_one(&rcu_preempt_state, &rcu_preempt_data);
910}
911
912
913
914
915
916
917
918void exit_rcu(void)
919{
920 struct task_struct *t = current;
921
922 if (t->rcu_read_lock_nesting == 0)
923 return;
924 t->rcu_read_lock_nesting = 1;
925 __rcu_read_unlock();
926}
927
928#else
929
930static struct rcu_state *rcu_state = &rcu_sched_state;
931
932
933
934
935static void __init rcu_bootup_announce(void)
936{
937 printk(KERN_INFO "Hierarchical RCU implementation.\n");
938 rcu_bootup_announce_oddness();
939}
940
941
942
943
944long rcu_batches_completed(void)
945{
946 return rcu_batches_completed_sched();
947}
948EXPORT_SYMBOL_GPL(rcu_batches_completed);
949
950
951
952
953
954void rcu_force_quiescent_state(void)
955{
956 rcu_sched_force_quiescent_state();
957}
958EXPORT_SYMBOL_GPL(rcu_force_quiescent_state);
959
960
961
962
963
964static void rcu_preempt_note_context_switch(int cpu)
965{
966}
967
968
969
970
971
972static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp)
973{
974 return 0;
975}
976
977#ifdef CONFIG_HOTPLUG_CPU
978
979
980static void rcu_report_unblock_qs_rnp(struct rcu_node *rnp, unsigned long flags)
981{
982 raw_spin_unlock_irqrestore(&rnp->lock, flags);
983}
984
985#endif
986
987
988
989
990
991static void rcu_print_detail_task_stall(struct rcu_state *rsp)
992{
993}
994
995
996
997
998
999static int rcu_print_task_stall(struct rcu_node *rnp)
1000{
1001 return 0;
1002}
1003
1004
1005
1006
1007
1008static void rcu_preempt_stall_reset(void)
1009{
1010}
1011
1012
1013
1014
1015
1016
1017static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp)
1018{
1019 WARN_ON_ONCE(rnp->qsmask);
1020}
1021
1022#ifdef CONFIG_HOTPLUG_CPU
1023
1024
1025
1026
1027
1028
1029
1030static int rcu_preempt_offline_tasks(struct rcu_state *rsp,
1031 struct rcu_node *rnp,
1032 struct rcu_data *rdp)
1033{
1034 return 0;
1035}
1036
1037
1038
1039
1040
1041static void rcu_preempt_offline_cpu(int cpu)
1042{
1043}
1044
1045#endif
1046
1047
1048
1049
1050
1051static void rcu_preempt_check_callbacks(int cpu)
1052{
1053}
1054
1055
1056
1057
1058
1059static void rcu_preempt_process_callbacks(void)
1060{
1061}
1062
1063
1064
1065
1066
1067void synchronize_rcu_expedited(void)
1068{
1069 synchronize_sched_expedited();
1070}
1071EXPORT_SYMBOL_GPL(synchronize_rcu_expedited);
1072
1073#ifdef CONFIG_HOTPLUG_CPU
1074
1075
1076
1077
1078
1079
1080static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp,
1081 bool wake)
1082{
1083}
1084
1085#endif
1086
1087
1088
1089
1090static int rcu_preempt_pending(int cpu)
1091{
1092 return 0;
1093}
1094
1095
1096
1097
1098static int rcu_preempt_needs_cpu(int cpu)
1099{
1100 return 0;
1101}
1102
1103
1104
1105
1106
1107void rcu_barrier(void)
1108{
1109 rcu_barrier_sched();
1110}
1111EXPORT_SYMBOL_GPL(rcu_barrier);
1112
1113
1114
1115
1116
1117static void __cpuinit rcu_preempt_init_percpu_data(int cpu)
1118{
1119}
1120
1121
1122
1123
1124static void rcu_preempt_send_cbs_to_online(void)
1125{
1126}
1127
1128
1129
1130
1131static void __init __rcu_init_preempt(void)
1132{
1133}
1134
1135#endif
1136
1137#ifdef CONFIG_RCU_BOOST
1138
1139#include "rtmutex_common.h"
1140
1141#ifdef CONFIG_RCU_TRACE
1142
1143static void rcu_initiate_boost_trace(struct rcu_node *rnp)
1144{
1145 if (list_empty(&rnp->blkd_tasks))
1146 rnp->n_balk_blkd_tasks++;
1147 else if (rnp->exp_tasks == NULL && rnp->gp_tasks == NULL)
1148 rnp->n_balk_exp_gp_tasks++;
1149 else if (rnp->gp_tasks != NULL && rnp->boost_tasks != NULL)
1150 rnp->n_balk_boost_tasks++;
1151 else if (rnp->gp_tasks != NULL && rnp->qsmask != 0)
1152 rnp->n_balk_notblocked++;
1153 else if (rnp->gp_tasks != NULL &&
1154 ULONG_CMP_LT(jiffies, rnp->boost_time))
1155 rnp->n_balk_notyet++;
1156 else
1157 rnp->n_balk_nos++;
1158}
1159
1160#else
1161
1162static void rcu_initiate_boost_trace(struct rcu_node *rnp)
1163{
1164}
1165
1166#endif
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176static int rcu_boost(struct rcu_node *rnp)
1177{
1178 unsigned long flags;
1179 struct rt_mutex mtx;
1180 struct task_struct *t;
1181 struct list_head *tb;
1182
1183 if (rnp->exp_tasks == NULL && rnp->boost_tasks == NULL)
1184 return 0;
1185
1186 raw_spin_lock_irqsave(&rnp->lock, flags);
1187
1188
1189
1190
1191
1192 if (rnp->exp_tasks == NULL && rnp->boost_tasks == NULL) {
1193 raw_spin_unlock_irqrestore(&rnp->lock, flags);
1194 return 0;
1195 }
1196
1197
1198
1199
1200
1201
1202
1203 if (rnp->exp_tasks != NULL) {
1204 tb = rnp->exp_tasks;
1205 rnp->n_exp_boosts++;
1206 } else {
1207 tb = rnp->boost_tasks;
1208 rnp->n_normal_boosts++;
1209 }
1210 rnp->n_tasks_boosted++;
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228 t = container_of(tb, struct task_struct, rcu_node_entry);
1229 rt_mutex_init_proxy_locked(&mtx, t);
1230 t->rcu_boost_mutex = &mtx;
1231 raw_spin_unlock_irqrestore(&rnp->lock, flags);
1232 rt_mutex_lock(&mtx);
1233 rt_mutex_unlock(&mtx);
1234
1235 return ACCESS_ONCE(rnp->exp_tasks) != NULL ||
1236 ACCESS_ONCE(rnp->boost_tasks) != NULL;
1237}
1238
1239
1240
1241
1242
1243
1244
1245static void rcu_boost_kthread_timer(unsigned long arg)
1246{
1247 invoke_rcu_node_kthread((struct rcu_node *)arg);
1248}
1249
1250
1251
1252
1253
1254static int rcu_boost_kthread(void *arg)
1255{
1256 struct rcu_node *rnp = (struct rcu_node *)arg;
1257 int spincnt = 0;
1258 int more2boost;
1259
1260 trace_rcu_utilization("Start boost kthread@init");
1261 for (;;) {
1262 rnp->boost_kthread_status = RCU_KTHREAD_WAITING;
1263 trace_rcu_utilization("End boost kthread@rcu_wait");
1264 rcu_wait(rnp->boost_tasks || rnp->exp_tasks);
1265 trace_rcu_utilization("Start boost kthread@rcu_wait");
1266 rnp->boost_kthread_status = RCU_KTHREAD_RUNNING;
1267 more2boost = rcu_boost(rnp);
1268 if (more2boost)
1269 spincnt++;
1270 else
1271 spincnt = 0;
1272 if (spincnt > 10) {
1273 trace_rcu_utilization("End boost kthread@rcu_yield");
1274 rcu_yield(rcu_boost_kthread_timer, (unsigned long)rnp);
1275 trace_rcu_utilization("Start boost kthread@rcu_yield");
1276 spincnt = 0;
1277 }
1278 }
1279
1280 trace_rcu_utilization("End boost kthread@notreached");
1281 return 0;
1282}
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags)
1295{
1296 struct task_struct *t;
1297
1298 if (!rcu_preempt_blocked_readers_cgp(rnp) && rnp->exp_tasks == NULL) {
1299 rnp->n_balk_exp_gp_tasks++;
1300 raw_spin_unlock_irqrestore(&rnp->lock, flags);
1301 return;
1302 }
1303 if (rnp->exp_tasks != NULL ||
1304 (rnp->gp_tasks != NULL &&
1305 rnp->boost_tasks == NULL &&
1306 rnp->qsmask == 0 &&
1307 ULONG_CMP_GE(jiffies, rnp->boost_time))) {
1308 if (rnp->exp_tasks == NULL)
1309 rnp->boost_tasks = rnp->gp_tasks;
1310 raw_spin_unlock_irqrestore(&rnp->lock, flags);
1311 t = rnp->boost_kthread_task;
1312 if (t != NULL)
1313 wake_up_process(t);
1314 } else {
1315 rcu_initiate_boost_trace(rnp);
1316 raw_spin_unlock_irqrestore(&rnp->lock, flags);
1317 }
1318}
1319
1320
1321
1322
1323static void invoke_rcu_callbacks_kthread(void)
1324{
1325 unsigned long flags;
1326
1327 local_irq_save(flags);
1328 __this_cpu_write(rcu_cpu_has_work, 1);
1329 if (__this_cpu_read(rcu_cpu_kthread_task) != NULL &&
1330 current != __this_cpu_read(rcu_cpu_kthread_task))
1331 wake_up_process(__this_cpu_read(rcu_cpu_kthread_task));
1332 local_irq_restore(flags);
1333}
1334
1335
1336
1337
1338
1339static bool rcu_is_callbacks_kthread(void)
1340{
1341 return __get_cpu_var(rcu_cpu_kthread_task) == current;
1342}
1343
1344
1345
1346
1347
1348
1349static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp,
1350 cpumask_var_t cm)
1351{
1352 struct task_struct *t;
1353
1354 t = rnp->boost_kthread_task;
1355 if (t != NULL)
1356 set_cpus_allowed_ptr(rnp->boost_kthread_task, cm);
1357}
1358
1359#define RCU_BOOST_DELAY_JIFFIES DIV_ROUND_UP(CONFIG_RCU_BOOST_DELAY * HZ, 1000)
1360
1361
1362
1363
1364static void rcu_preempt_boost_start_gp(struct rcu_node *rnp)
1365{
1366 rnp->boost_time = jiffies + RCU_BOOST_DELAY_JIFFIES;
1367}
1368
1369
1370
1371
1372
1373
1374static int __cpuinit rcu_spawn_one_boost_kthread(struct rcu_state *rsp,
1375 struct rcu_node *rnp,
1376 int rnp_index)
1377{
1378 unsigned long flags;
1379 struct sched_param sp;
1380 struct task_struct *t;
1381
1382 if (&rcu_preempt_state != rsp)
1383 return 0;
1384 rsp->boost = 1;
1385 if (rnp->boost_kthread_task != NULL)
1386 return 0;
1387 t = kthread_create(rcu_boost_kthread, (void *)rnp,
1388 "rcub/%d", rnp_index);
1389 if (IS_ERR(t))
1390 return PTR_ERR(t);
1391 raw_spin_lock_irqsave(&rnp->lock, flags);
1392 rnp->boost_kthread_task = t;
1393 raw_spin_unlock_irqrestore(&rnp->lock, flags);
1394 sp.sched_priority = RCU_BOOST_PRIO;
1395 sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
1396 wake_up_process(t);
1397 return 0;
1398}
1399
1400#ifdef CONFIG_HOTPLUG_CPU
1401
1402
1403
1404
1405static void rcu_stop_cpu_kthread(int cpu)
1406{
1407 struct task_struct *t;
1408
1409
1410 t = per_cpu(rcu_cpu_kthread_task, cpu);
1411 if (t != NULL) {
1412 per_cpu(rcu_cpu_kthread_task, cpu) = NULL;
1413 kthread_stop(t);
1414 }
1415}
1416
1417#endif
1418
1419static void rcu_kthread_do_work(void)
1420{
1421 rcu_do_batch(&rcu_sched_state, &__get_cpu_var(rcu_sched_data));
1422 rcu_do_batch(&rcu_bh_state, &__get_cpu_var(rcu_bh_data));
1423 rcu_preempt_do_callbacks();
1424}
1425
1426
1427
1428
1429
1430
1431static void invoke_rcu_node_kthread(struct rcu_node *rnp)
1432{
1433 struct task_struct *t;
1434
1435 t = rnp->node_kthread_task;
1436 if (t != NULL)
1437 wake_up_process(t);
1438}
1439
1440
1441
1442
1443
1444
1445static void rcu_cpu_kthread_setrt(int cpu, int to_rt)
1446{
1447 int policy;
1448 struct sched_param sp;
1449 struct task_struct *t;
1450
1451 t = per_cpu(rcu_cpu_kthread_task, cpu);
1452 if (t == NULL)
1453 return;
1454 if (to_rt) {
1455 policy = SCHED_FIFO;
1456 sp.sched_priority = RCU_KTHREAD_PRIO;
1457 } else {
1458 policy = SCHED_NORMAL;
1459 sp.sched_priority = 0;
1460 }
1461 sched_setscheduler_nocheck(t, policy, &sp);
1462}
1463
1464
1465
1466
1467
1468
1469
1470static void rcu_cpu_kthread_timer(unsigned long arg)
1471{
1472 struct rcu_data *rdp = per_cpu_ptr(rcu_state->rda, arg);
1473 struct rcu_node *rnp = rdp->mynode;
1474
1475 atomic_or(rdp->grpmask, &rnp->wakemask);
1476 invoke_rcu_node_kthread(rnp);
1477}
1478
1479
1480
1481
1482
1483
1484
1485static void rcu_yield(void (*f)(unsigned long), unsigned long arg)
1486{
1487 struct sched_param sp;
1488 struct timer_list yield_timer;
1489 int prio = current->rt_priority;
1490
1491 setup_timer_on_stack(&yield_timer, f, arg);
1492 mod_timer(&yield_timer, jiffies + 2);
1493 sp.sched_priority = 0;
1494 sched_setscheduler_nocheck(current, SCHED_NORMAL, &sp);
1495 set_user_nice(current, 19);
1496 schedule();
1497 set_user_nice(current, 0);
1498 sp.sched_priority = prio;
1499 sched_setscheduler_nocheck(current, SCHED_FIFO, &sp);
1500 del_timer(&yield_timer);
1501}
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515static int rcu_cpu_kthread_should_stop(int cpu)
1516{
1517 while (cpu_is_offline(cpu) ||
1518 !cpumask_equal(¤t->cpus_allowed, cpumask_of(cpu)) ||
1519 smp_processor_id() != cpu) {
1520 if (kthread_should_stop())
1521 return 1;
1522 per_cpu(rcu_cpu_kthread_status, cpu) = RCU_KTHREAD_OFFCPU;
1523 per_cpu(rcu_cpu_kthread_cpu, cpu) = raw_smp_processor_id();
1524 local_bh_enable();
1525 schedule_timeout_uninterruptible(1);
1526 if (!cpumask_equal(¤t->cpus_allowed, cpumask_of(cpu)))
1527 set_cpus_allowed_ptr(current, cpumask_of(cpu));
1528 local_bh_disable();
1529 }
1530 per_cpu(rcu_cpu_kthread_cpu, cpu) = cpu;
1531 return 0;
1532}
1533
1534
1535
1536
1537
1538
1539static int rcu_cpu_kthread(void *arg)
1540{
1541 int cpu = (int)(long)arg;
1542 unsigned long flags;
1543 int spincnt = 0;
1544 unsigned int *statusp = &per_cpu(rcu_cpu_kthread_status, cpu);
1545 char work;
1546 char *workp = &per_cpu(rcu_cpu_has_work, cpu);
1547
1548 trace_rcu_utilization("Start CPU kthread@init");
1549 for (;;) {
1550 *statusp = RCU_KTHREAD_WAITING;
1551 trace_rcu_utilization("End CPU kthread@rcu_wait");
1552 rcu_wait(*workp != 0 || kthread_should_stop());
1553 trace_rcu_utilization("Start CPU kthread@rcu_wait");
1554 local_bh_disable();
1555 if (rcu_cpu_kthread_should_stop(cpu)) {
1556 local_bh_enable();
1557 break;
1558 }
1559 *statusp = RCU_KTHREAD_RUNNING;
1560 per_cpu(rcu_cpu_kthread_loops, cpu)++;
1561 local_irq_save(flags);
1562 work = *workp;
1563 *workp = 0;
1564 local_irq_restore(flags);
1565 if (work)
1566 rcu_kthread_do_work();
1567 local_bh_enable();
1568 if (*workp != 0)
1569 spincnt++;
1570 else
1571 spincnt = 0;
1572 if (spincnt > 10) {
1573 *statusp = RCU_KTHREAD_YIELDING;
1574 trace_rcu_utilization("End CPU kthread@rcu_yield");
1575 rcu_yield(rcu_cpu_kthread_timer, (unsigned long)cpu);
1576 trace_rcu_utilization("Start CPU kthread@rcu_yield");
1577 spincnt = 0;
1578 }
1579 }
1580 *statusp = RCU_KTHREAD_STOPPED;
1581 trace_rcu_utilization("End CPU kthread@term");
1582 return 0;
1583}
1584
1585
1586
1587
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606static int __cpuinit rcu_spawn_one_cpu_kthread(int cpu)
1607{
1608 struct sched_param sp;
1609 struct task_struct *t;
1610
1611 if (!rcu_scheduler_fully_active ||
1612 per_cpu(rcu_cpu_kthread_task, cpu) != NULL)
1613 return 0;
1614 t = kthread_create_on_node(rcu_cpu_kthread,
1615 (void *)(long)cpu,
1616 cpu_to_node(cpu),
1617 "rcuc/%d", cpu);
1618 if (IS_ERR(t))
1619 return PTR_ERR(t);
1620 if (cpu_online(cpu))
1621 kthread_bind(t, cpu);
1622 per_cpu(rcu_cpu_kthread_cpu, cpu) = cpu;
1623 WARN_ON_ONCE(per_cpu(rcu_cpu_kthread_task, cpu) != NULL);
1624 sp.sched_priority = RCU_KTHREAD_PRIO;
1625 sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
1626 per_cpu(rcu_cpu_kthread_task, cpu) = t;
1627 wake_up_process(t);
1628 return 0;
1629}
1630
1631
1632
1633
1634
1635
1636
1637static int rcu_node_kthread(void *arg)
1638{
1639 int cpu;
1640 unsigned long flags;
1641 unsigned long mask;
1642 struct rcu_node *rnp = (struct rcu_node *)arg;
1643 struct sched_param sp;
1644 struct task_struct *t;
1645
1646 for (;;) {
1647 rnp->node_kthread_status = RCU_KTHREAD_WAITING;
1648 rcu_wait(atomic_read(&rnp->wakemask) != 0);
1649 rnp->node_kthread_status = RCU_KTHREAD_RUNNING;
1650 raw_spin_lock_irqsave(&rnp->lock, flags);
1651 mask = atomic_xchg(&rnp->wakemask, 0);
1652 rcu_initiate_boost(rnp, flags);
1653 for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++, mask >>= 1) {
1654 if ((mask & 0x1) == 0)
1655 continue;
1656 preempt_disable();
1657 t = per_cpu(rcu_cpu_kthread_task, cpu);
1658 if (!cpu_online(cpu) || t == NULL) {
1659 preempt_enable();
1660 continue;
1661 }
1662 per_cpu(rcu_cpu_has_work, cpu) = 1;
1663 sp.sched_priority = RCU_KTHREAD_PRIO;
1664 sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
1665 preempt_enable();
1666 }
1667 }
1668
1669 rnp->node_kthread_status = RCU_KTHREAD_STOPPED;
1670 return 0;
1671}
1672
1673
1674
1675
1676
1677
1678
1679
1680
1681
1682static void rcu_node_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu)
1683{
1684 cpumask_var_t cm;
1685 int cpu;
1686 unsigned long mask = rnp->qsmaskinit;
1687
1688 if (rnp->node_kthread_task == NULL)
1689 return;
1690 if (!alloc_cpumask_var(&cm, GFP_KERNEL))
1691 return;
1692 cpumask_clear(cm);
1693 for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++, mask >>= 1)
1694 if ((mask & 0x1) && cpu != outgoingcpu)
1695 cpumask_set_cpu(cpu, cm);
1696 if (cpumask_weight(cm) == 0) {
1697 cpumask_setall(cm);
1698 for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++)
1699 cpumask_clear_cpu(cpu, cm);
1700 WARN_ON_ONCE(cpumask_weight(cm) == 0);
1701 }
1702 set_cpus_allowed_ptr(rnp->node_kthread_task, cm);
1703 rcu_boost_kthread_setaffinity(rnp, cm);
1704 free_cpumask_var(cm);
1705}
1706
1707
1708
1709
1710
1711
1712
1713static int __cpuinit rcu_spawn_one_node_kthread(struct rcu_state *rsp,
1714 struct rcu_node *rnp)
1715{
1716 unsigned long flags;
1717 int rnp_index = rnp - &rsp->node[0];
1718 struct sched_param sp;
1719 struct task_struct *t;
1720
1721 if (!rcu_scheduler_fully_active ||
1722 rnp->qsmaskinit == 0)
1723 return 0;
1724 if (rnp->node_kthread_task == NULL) {
1725 t = kthread_create(rcu_node_kthread, (void *)rnp,
1726 "rcun/%d", rnp_index);
1727 if (IS_ERR(t))
1728 return PTR_ERR(t);
1729 raw_spin_lock_irqsave(&rnp->lock, flags);
1730 rnp->node_kthread_task = t;
1731 raw_spin_unlock_irqrestore(&rnp->lock, flags);
1732 sp.sched_priority = 99;
1733 sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
1734 wake_up_process(t);
1735 }
1736 return rcu_spawn_one_boost_kthread(rsp, rnp, rnp_index);
1737}
1738
1739
1740
1741
1742static int __init rcu_spawn_kthreads(void)
1743{
1744 int cpu;
1745 struct rcu_node *rnp;
1746
1747 rcu_scheduler_fully_active = 1;
1748 for_each_possible_cpu(cpu) {
1749 per_cpu(rcu_cpu_has_work, cpu) = 0;
1750 if (cpu_online(cpu))
1751 (void)rcu_spawn_one_cpu_kthread(cpu);
1752 }
1753 rnp = rcu_get_root(rcu_state);
1754 (void)rcu_spawn_one_node_kthread(rcu_state, rnp);
1755 if (NUM_RCU_NODES > 1) {
1756 rcu_for_each_leaf_node(rcu_state, rnp)
1757 (void)rcu_spawn_one_node_kthread(rcu_state, rnp);
1758 }
1759 return 0;
1760}
1761early_initcall(rcu_spawn_kthreads);
1762
1763static void __cpuinit rcu_prepare_kthreads(int cpu)
1764{
1765 struct rcu_data *rdp = per_cpu_ptr(rcu_state->rda, cpu);
1766 struct rcu_node *rnp = rdp->mynode;
1767
1768
1769 if (rcu_scheduler_fully_active) {
1770 (void)rcu_spawn_one_cpu_kthread(cpu);
1771 if (rnp->node_kthread_task == NULL)
1772 (void)rcu_spawn_one_node_kthread(rcu_state, rnp);
1773 }
1774}
1775
1776#else
1777
1778static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags)
1779{
1780 raw_spin_unlock_irqrestore(&rnp->lock, flags);
1781}
1782
1783static void invoke_rcu_callbacks_kthread(void)
1784{
1785 WARN_ON_ONCE(1);
1786}
1787
1788static bool rcu_is_callbacks_kthread(void)
1789{
1790 return false;
1791}
1792
1793static void rcu_preempt_boost_start_gp(struct rcu_node *rnp)
1794{
1795}
1796
1797#ifdef CONFIG_HOTPLUG_CPU
1798
1799static void rcu_stop_cpu_kthread(int cpu)
1800{
1801}
1802
1803#endif
1804
1805static void rcu_node_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu)
1806{
1807}
1808
1809static void rcu_cpu_kthread_setrt(int cpu, int to_rt)
1810{
1811}
1812
1813static int __init rcu_scheduler_really_started(void)
1814{
1815 rcu_scheduler_fully_active = 1;
1816 return 0;
1817}
1818early_initcall(rcu_scheduler_really_started);
1819
1820static void __cpuinit rcu_prepare_kthreads(int cpu)
1821{
1822}
1823
1824#endif
1825
1826#ifndef CONFIG_SMP
1827
1828void synchronize_sched_expedited(void)
1829{
1830 cond_resched();
1831}
1832EXPORT_SYMBOL_GPL(synchronize_sched_expedited);
1833
1834#else
1835
1836static atomic_t sync_sched_expedited_started = ATOMIC_INIT(0);
1837static atomic_t sync_sched_expedited_done = ATOMIC_INIT(0);
1838
1839static int synchronize_sched_expedited_cpu_stop(void *data)
1840{
1841
1842
1843
1844
1845
1846
1847
1848
1849
1850
1851
1852 smp_mb();
1853 return 0;
1854}
1855
1856
1857
1858
1859
1860
1861
1862
1863
1864
1865
1866
1867
1868
1869
1870
1871
1872
1873
1874
1875
1876
1877
1878
1879
1880
1881
1882
1883
1884
1885
1886
1887void synchronize_sched_expedited(void)
1888{
1889 int firstsnap, s, snap, trycount = 0;
1890
1891
1892 firstsnap = snap = atomic_inc_return(&sync_sched_expedited_started);
1893 get_online_cpus();
1894
1895
1896
1897
1898
1899 while (try_stop_cpus(cpu_online_mask,
1900 synchronize_sched_expedited_cpu_stop,
1901 NULL) == -EAGAIN) {
1902 put_online_cpus();
1903
1904
1905 if (trycount++ < 10)
1906 udelay(trycount * num_online_cpus());
1907 else {
1908 synchronize_sched();
1909 return;
1910 }
1911
1912
1913 s = atomic_read(&sync_sched_expedited_done);
1914 if (UINT_CMP_GE((unsigned)s, (unsigned)firstsnap)) {
1915 smp_mb();
1916 return;
1917 }
1918
1919
1920
1921
1922
1923
1924
1925
1926
1927 get_online_cpus();
1928 snap = atomic_read(&sync_sched_expedited_started);
1929 smp_mb();
1930 }
1931
1932
1933
1934
1935
1936
1937
1938 do {
1939 s = atomic_read(&sync_sched_expedited_done);
1940 if (UINT_CMP_GE((unsigned)s, (unsigned)snap)) {
1941 smp_mb();
1942 break;
1943 }
1944 } while (atomic_cmpxchg(&sync_sched_expedited_done, s, snap) != s);
1945
1946 put_online_cpus();
1947}
1948EXPORT_SYMBOL_GPL(synchronize_sched_expedited);
1949
1950#endif
1951
1952#if !defined(CONFIG_RCU_FAST_NO_HZ)
1953
1954
1955
1956
1957
1958
1959
1960
1961
1962
1963int rcu_needs_cpu(int cpu)
1964{
1965 return rcu_cpu_has_callbacks(cpu);
1966}
1967
1968
1969
1970
1971static void rcu_prepare_for_idle_init(int cpu)
1972{
1973}
1974
1975
1976
1977
1978
1979static void rcu_cleanup_after_idle(int cpu)
1980{
1981}
1982
1983
1984
1985
1986
1987static void rcu_prepare_for_idle(int cpu)
1988{
1989}
1990
1991#else
1992
1993
1994
1995
1996
1997
1998
1999
2000
2001
2002
2003
2004
2005
2006
2007
2008
2009
2010
2011
2012
2013
2014
2015
2016
2017
2018
2019
2020
2021
2022
2023#define RCU_IDLE_FLUSHES 5
2024#define RCU_IDLE_OPT_FLUSHES 3
2025#define RCU_IDLE_GP_DELAY 6
2026
2027static DEFINE_PER_CPU(int, rcu_dyntick_drain);
2028static DEFINE_PER_CPU(unsigned long, rcu_dyntick_holdoff);
2029static DEFINE_PER_CPU(struct hrtimer, rcu_idle_gp_timer);
2030static ktime_t rcu_idle_gp_wait;
2031
2032
2033
2034
2035
2036
2037
2038
2039
2040
2041int rcu_needs_cpu(int cpu)
2042{
2043
2044 if (!rcu_cpu_has_callbacks(cpu))
2045 return 0;
2046
2047 return per_cpu(rcu_dyntick_holdoff, cpu) == jiffies;
2048}
2049
2050
2051
2052
2053
2054
2055
2056
2057static enum hrtimer_restart rcu_idle_gp_timer_func(struct hrtimer *hrtp)
2058{
2059 trace_rcu_prep_idle("Timer");
2060 return HRTIMER_NORESTART;
2061}
2062
2063
2064
2065
2066static void rcu_prepare_for_idle_init(int cpu)
2067{
2068 static int firsttime = 1;
2069 struct hrtimer *hrtp = &per_cpu(rcu_idle_gp_timer, cpu);
2070
2071 hrtimer_init(hrtp, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
2072 hrtp->function = rcu_idle_gp_timer_func;
2073 if (firsttime) {
2074 unsigned int upj = jiffies_to_usecs(RCU_IDLE_GP_DELAY);
2075
2076 rcu_idle_gp_wait = ns_to_ktime(upj * (u64)1000);
2077 firsttime = 0;
2078 }
2079}
2080
2081
2082
2083
2084
2085
2086static void rcu_cleanup_after_idle(int cpu)
2087{
2088 hrtimer_cancel(&per_cpu(rcu_idle_gp_timer, cpu));
2089}
2090
2091
2092
2093
2094
2095
2096
2097
2098
2099
2100
2101
2102
2103
2104
2105
2106
2107
2108
2109
2110static void rcu_prepare_for_idle(int cpu)
2111{
2112 unsigned long flags;
2113
2114 local_irq_save(flags);
2115
2116
2117
2118
2119
2120 if (!rcu_cpu_has_callbacks(cpu)) {
2121 per_cpu(rcu_dyntick_holdoff, cpu) = jiffies - 1;
2122 per_cpu(rcu_dyntick_drain, cpu) = 0;
2123 local_irq_restore(flags);
2124 trace_rcu_prep_idle("No callbacks");
2125 return;
2126 }
2127
2128
2129
2130
2131
2132 if (per_cpu(rcu_dyntick_holdoff, cpu) == jiffies) {
2133 local_irq_restore(flags);
2134 trace_rcu_prep_idle("In holdoff");
2135 return;
2136 }
2137
2138
2139 if (per_cpu(rcu_dyntick_drain, cpu) <= 0) {
2140
2141 per_cpu(rcu_dyntick_drain, cpu) = RCU_IDLE_FLUSHES;
2142 } else if (per_cpu(rcu_dyntick_drain, cpu) <= RCU_IDLE_OPT_FLUSHES &&
2143 !rcu_pending(cpu)) {
2144
2145 trace_rcu_prep_idle("Dyntick with callbacks");
2146 per_cpu(rcu_dyntick_drain, cpu) = 0;
2147 per_cpu(rcu_dyntick_holdoff, cpu) = jiffies - 1;
2148 hrtimer_start(&per_cpu(rcu_idle_gp_timer, cpu),
2149 rcu_idle_gp_wait, HRTIMER_MODE_REL);
2150 return;
2151 } else if (--per_cpu(rcu_dyntick_drain, cpu) <= 0) {
2152
2153 per_cpu(rcu_dyntick_holdoff, cpu) = jiffies;
2154 local_irq_restore(flags);
2155 trace_rcu_prep_idle("Begin holdoff");
2156 invoke_rcu_core();
2157 return;
2158 }
2159
2160
2161
2162
2163
2164#ifdef CONFIG_TREE_PREEMPT_RCU
2165 if (per_cpu(rcu_preempt_data, cpu).nxtlist) {
2166 local_irq_restore(flags);
2167 rcu_preempt_qs(cpu);
2168 force_quiescent_state(&rcu_preempt_state, 0);
2169 local_irq_save(flags);
2170 }
2171#endif
2172 if (per_cpu(rcu_sched_data, cpu).nxtlist) {
2173 local_irq_restore(flags);
2174 rcu_sched_qs(cpu);
2175 force_quiescent_state(&rcu_sched_state, 0);
2176 local_irq_save(flags);
2177 }
2178 if (per_cpu(rcu_bh_data, cpu).nxtlist) {
2179 local_irq_restore(flags);
2180 rcu_bh_qs(cpu);
2181 force_quiescent_state(&rcu_bh_state, 0);
2182 local_irq_save(flags);
2183 }
2184
2185
2186
2187
2188
2189 if (rcu_cpu_has_callbacks(cpu)) {
2190 local_irq_restore(flags);
2191 trace_rcu_prep_idle("More callbacks");
2192 invoke_rcu_core();
2193 } else {
2194 local_irq_restore(flags);
2195 trace_rcu_prep_idle("Callbacks drained");
2196 }
2197}
2198
2199#endif
2200