1
2
3
4
5
6
7
8
9
10#include <linux/lockdep.h>
11
12static void rcu_exp_handler(void *unused);
13static int rcu_print_task_exp_stall(struct rcu_node *rnp);
14
15
16
17
18static void rcu_exp_gp_seq_start(void)
19{
20 rcu_seq_start(&rcu_state.expedited_sequence);
21}
22
23
24
25
26
27static __maybe_unused unsigned long rcu_exp_gp_seq_endval(void)
28{
29 return rcu_seq_endval(&rcu_state.expedited_sequence);
30}
31
32
33
34
35static void rcu_exp_gp_seq_end(void)
36{
37 rcu_seq_end(&rcu_state.expedited_sequence);
38 smp_mb();
39}
40
41
42
43
44static unsigned long rcu_exp_gp_seq_snap(void)
45{
46 unsigned long s;
47
48 smp_mb();
49 s = rcu_seq_snap(&rcu_state.expedited_sequence);
50 trace_rcu_exp_grace_period(rcu_state.name, s, TPS("snap"));
51 return s;
52}
53
54
55
56
57
58
59static bool rcu_exp_gp_seq_done(unsigned long s)
60{
61 return rcu_seq_done(&rcu_state.expedited_sequence, s);
62}
63
64
65
66
67
68
69
70
71static void sync_exp_reset_tree_hotplug(void)
72{
73 bool done;
74 unsigned long flags;
75 unsigned long mask;
76 unsigned long oldmask;
77 int ncpus = smp_load_acquire(&rcu_state.ncpus);
78 struct rcu_node *rnp;
79 struct rcu_node *rnp_up;
80
81
82 if (likely(ncpus == rcu_state.ncpus_snap))
83 return;
84 rcu_state.ncpus_snap = ncpus;
85
86
87
88
89
90 rcu_for_each_leaf_node(rnp) {
91 raw_spin_lock_irqsave_rcu_node(rnp, flags);
92 if (rnp->expmaskinit == rnp->expmaskinitnext) {
93 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
94 continue;
95 }
96
97
98 oldmask = rnp->expmaskinit;
99 rnp->expmaskinit = rnp->expmaskinitnext;
100 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
101
102
103 if (oldmask)
104 continue;
105
106
107 mask = rnp->grpmask;
108 rnp_up = rnp->parent;
109 done = false;
110 while (rnp_up) {
111 raw_spin_lock_irqsave_rcu_node(rnp_up, flags);
112 if (rnp_up->expmaskinit)
113 done = true;
114 rnp_up->expmaskinit |= mask;
115 raw_spin_unlock_irqrestore_rcu_node(rnp_up, flags);
116 if (done)
117 break;
118 mask = rnp_up->grpmask;
119 rnp_up = rnp_up->parent;
120 }
121 }
122}
123
124
125
126
127
128static void __maybe_unused sync_exp_reset_tree(void)
129{
130 unsigned long flags;
131 struct rcu_node *rnp;
132
133 sync_exp_reset_tree_hotplug();
134 rcu_for_each_node_breadth_first(rnp) {
135 raw_spin_lock_irqsave_rcu_node(rnp, flags);
136 WARN_ON_ONCE(rnp->expmask);
137 rnp->expmask = rnp->expmaskinit;
138 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
139 }
140}
141
142
143
144
145
146
147
148
149
150
151static bool sync_rcu_preempt_exp_done(struct rcu_node *rnp)
152{
153 raw_lockdep_assert_held_rcu_node(rnp);
154
155 return rnp->exp_tasks == NULL &&
156 READ_ONCE(rnp->expmask) == 0;
157}
158
159
160
161
162
163
164static bool sync_rcu_preempt_exp_done_unlocked(struct rcu_node *rnp)
165{
166 unsigned long flags;
167 bool ret;
168
169 raw_spin_lock_irqsave_rcu_node(rnp, flags);
170 ret = sync_rcu_preempt_exp_done(rnp);
171 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
172
173 return ret;
174}
175
176
177
178
179
180
181
182
183
184
185
186
187static void __rcu_report_exp_rnp(struct rcu_node *rnp,
188 bool wake, unsigned long flags)
189 __releases(rnp->lock)
190{
191 unsigned long mask;
192
193 for (;;) {
194 if (!sync_rcu_preempt_exp_done(rnp)) {
195 if (!rnp->expmask)
196 rcu_initiate_boost(rnp, flags);
197 else
198 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
199 break;
200 }
201 if (rnp->parent == NULL) {
202 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
203 if (wake) {
204 smp_mb();
205 swake_up_one(&rcu_state.expedited_wq);
206 }
207 break;
208 }
209 mask = rnp->grpmask;
210 raw_spin_unlock_rcu_node(rnp);
211 rnp = rnp->parent;
212 raw_spin_lock_rcu_node(rnp);
213 WARN_ON_ONCE(!(rnp->expmask & mask));
214 rnp->expmask &= ~mask;
215 }
216}
217
218
219
220
221
222static void __maybe_unused rcu_report_exp_rnp(struct rcu_node *rnp, bool wake)
223{
224 unsigned long flags;
225
226 raw_spin_lock_irqsave_rcu_node(rnp, flags);
227 __rcu_report_exp_rnp(rnp, wake, flags);
228}
229
230
231
232
233
234static void rcu_report_exp_cpu_mult(struct rcu_node *rnp,
235 unsigned long mask, bool wake)
236{
237 unsigned long flags;
238
239 raw_spin_lock_irqsave_rcu_node(rnp, flags);
240 if (!(rnp->expmask & mask)) {
241 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
242 return;
243 }
244 rnp->expmask &= ~mask;
245 __rcu_report_exp_rnp(rnp, wake, flags);
246}
247
248
249
250
251static void rcu_report_exp_rdp(struct rcu_data *rdp)
252{
253 WRITE_ONCE(rdp->deferred_qs, false);
254 rcu_report_exp_cpu_mult(rdp->mynode, rdp->grpmask, true);
255}
256
257
258static bool sync_exp_work_done(unsigned long s)
259{
260 if (rcu_exp_gp_seq_done(s)) {
261 trace_rcu_exp_grace_period(rcu_state.name, s, TPS("done"));
262
263 smp_mb__before_atomic();
264 return true;
265 }
266 return false;
267}
268
269
270
271
272
273
274
275
276static bool exp_funnel_lock(unsigned long s)
277{
278 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, raw_smp_processor_id());
279 struct rcu_node *rnp = rdp->mynode;
280 struct rcu_node *rnp_root = rcu_get_root();
281
282
283 if (ULONG_CMP_LT(READ_ONCE(rnp->exp_seq_rq), s) &&
284 (rnp == rnp_root ||
285 ULONG_CMP_LT(READ_ONCE(rnp_root->exp_seq_rq), s)) &&
286 mutex_trylock(&rcu_state.exp_mutex))
287 goto fastpath;
288
289
290
291
292
293
294
295
296 for (; rnp != NULL; rnp = rnp->parent) {
297 if (sync_exp_work_done(s))
298 return true;
299
300
301 spin_lock(&rnp->exp_lock);
302 if (ULONG_CMP_GE(rnp->exp_seq_rq, s)) {
303
304
305 spin_unlock(&rnp->exp_lock);
306 trace_rcu_exp_funnel_lock(rcu_state.name, rnp->level,
307 rnp->grplo, rnp->grphi,
308 TPS("wait"));
309 wait_event(rnp->exp_wq[rcu_seq_ctr(s) & 0x3],
310 sync_exp_work_done(s));
311 return true;
312 }
313 rnp->exp_seq_rq = s;
314 spin_unlock(&rnp->exp_lock);
315 trace_rcu_exp_funnel_lock(rcu_state.name, rnp->level,
316 rnp->grplo, rnp->grphi, TPS("nxtlvl"));
317 }
318 mutex_lock(&rcu_state.exp_mutex);
319fastpath:
320 if (sync_exp_work_done(s)) {
321 mutex_unlock(&rcu_state.exp_mutex);
322 return true;
323 }
324 rcu_exp_gp_seq_start();
325 trace_rcu_exp_grace_period(rcu_state.name, s, TPS("start"));
326 return false;
327}
328
329
330
331
332
333static void sync_rcu_exp_select_node_cpus(struct work_struct *wp)
334{
335 int cpu;
336 unsigned long flags;
337 unsigned long mask_ofl_test;
338 unsigned long mask_ofl_ipi;
339 int ret;
340 struct rcu_exp_work *rewp =
341 container_of(wp, struct rcu_exp_work, rew_work);
342 struct rcu_node *rnp = container_of(rewp, struct rcu_node, rew);
343
344 raw_spin_lock_irqsave_rcu_node(rnp, flags);
345
346
347 mask_ofl_test = 0;
348 for_each_leaf_node_cpu_mask(rnp, cpu, rnp->expmask) {
349 unsigned long mask = leaf_node_cpu_bit(rnp, cpu);
350 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
351 int snap;
352
353 if (raw_smp_processor_id() == cpu ||
354 !(rnp->qsmaskinitnext & mask)) {
355 mask_ofl_test |= mask;
356 } else {
357 snap = rcu_dynticks_snap(rdp);
358 if (rcu_dynticks_in_eqs(snap))
359 mask_ofl_test |= mask;
360 else
361 rdp->exp_dynticks_snap = snap;
362 }
363 }
364 mask_ofl_ipi = rnp->expmask & ~mask_ofl_test;
365
366
367
368
369
370
371 if (rcu_preempt_has_tasks(rnp))
372 rnp->exp_tasks = rnp->blkd_tasks.next;
373 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
374
375
376 for_each_leaf_node_cpu_mask(rnp, cpu, rnp->expmask) {
377 unsigned long mask = leaf_node_cpu_bit(rnp, cpu);
378 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
379
380 if (!(mask_ofl_ipi & mask))
381 continue;
382retry_ipi:
383 if (rcu_dynticks_in_eqs_since(rdp, rdp->exp_dynticks_snap)) {
384 mask_ofl_test |= mask;
385 continue;
386 }
387 ret = smp_call_function_single(cpu, rcu_exp_handler, NULL, 0);
388 if (!ret) {
389 mask_ofl_ipi &= ~mask;
390 continue;
391 }
392
393 raw_spin_lock_irqsave_rcu_node(rnp, flags);
394 if ((rnp->qsmaskinitnext & mask) &&
395 (rnp->expmask & mask)) {
396
397 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
398 trace_rcu_exp_grace_period(rcu_state.name, rcu_exp_gp_seq_endval(), TPS("selectofl"));
399 schedule_timeout_uninterruptible(1);
400 goto retry_ipi;
401 }
402
403 if (!(rnp->expmask & mask))
404 mask_ofl_ipi &= ~mask;
405 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
406 }
407
408 mask_ofl_test |= mask_ofl_ipi;
409 if (mask_ofl_test)
410 rcu_report_exp_cpu_mult(rnp, mask_ofl_test, false);
411}
412
413
414
415
416
417static void sync_rcu_exp_select_cpus(void)
418{
419 int cpu;
420 struct rcu_node *rnp;
421
422 trace_rcu_exp_grace_period(rcu_state.name, rcu_exp_gp_seq_endval(), TPS("reset"));
423 sync_exp_reset_tree();
424 trace_rcu_exp_grace_period(rcu_state.name, rcu_exp_gp_seq_endval(), TPS("select"));
425
426
427 rcu_for_each_leaf_node(rnp) {
428 rnp->exp_need_flush = false;
429 if (!READ_ONCE(rnp->expmask))
430 continue;
431 if (!READ_ONCE(rcu_par_gp_wq) ||
432 rcu_scheduler_active != RCU_SCHEDULER_RUNNING ||
433 rcu_is_last_leaf_node(rnp)) {
434
435 sync_rcu_exp_select_node_cpus(&rnp->rew.rew_work);
436 continue;
437 }
438 INIT_WORK(&rnp->rew.rew_work, sync_rcu_exp_select_node_cpus);
439 cpu = find_next_bit(&rnp->ffmask, BITS_PER_LONG, -1);
440
441 if (unlikely(cpu > rnp->grphi - rnp->grplo))
442 cpu = WORK_CPU_UNBOUND;
443 else
444 cpu += rnp->grplo;
445 queue_work_on(cpu, rcu_par_gp_wq, &rnp->rew.rew_work);
446 rnp->exp_need_flush = true;
447 }
448
449
450 rcu_for_each_leaf_node(rnp)
451 if (rnp->exp_need_flush)
452 flush_work(&rnp->rew.rew_work);
453}
454
455static void synchronize_sched_expedited_wait(void)
456{
457 int cpu;
458 unsigned long jiffies_stall;
459 unsigned long jiffies_start;
460 unsigned long mask;
461 int ndetected;
462 struct rcu_node *rnp;
463 struct rcu_node *rnp_root = rcu_get_root();
464 int ret;
465
466 trace_rcu_exp_grace_period(rcu_state.name, rcu_exp_gp_seq_endval(), TPS("startwait"));
467 jiffies_stall = rcu_jiffies_till_stall_check();
468 jiffies_start = jiffies;
469
470 for (;;) {
471 ret = swait_event_timeout_exclusive(
472 rcu_state.expedited_wq,
473 sync_rcu_preempt_exp_done_unlocked(rnp_root),
474 jiffies_stall);
475 if (ret > 0 || sync_rcu_preempt_exp_done_unlocked(rnp_root))
476 return;
477 WARN_ON(ret < 0);
478 if (rcu_cpu_stall_suppress)
479 continue;
480 panic_on_rcu_stall();
481 pr_err("INFO: %s detected expedited stalls on CPUs/tasks: {",
482 rcu_state.name);
483 ndetected = 0;
484 rcu_for_each_leaf_node(rnp) {
485 ndetected += rcu_print_task_exp_stall(rnp);
486 for_each_leaf_node_possible_cpu(rnp, cpu) {
487 struct rcu_data *rdp;
488
489 mask = leaf_node_cpu_bit(rnp, cpu);
490 if (!(rnp->expmask & mask))
491 continue;
492 ndetected++;
493 rdp = per_cpu_ptr(&rcu_data, cpu);
494 pr_cont(" %d-%c%c%c", cpu,
495 "O."[!!cpu_online(cpu)],
496 "o."[!!(rdp->grpmask & rnp->expmaskinit)],
497 "N."[!!(rdp->grpmask & rnp->expmaskinitnext)]);
498 }
499 }
500 pr_cont(" } %lu jiffies s: %lu root: %#lx/%c\n",
501 jiffies - jiffies_start, rcu_state.expedited_sequence,
502 rnp_root->expmask, ".T"[!!rnp_root->exp_tasks]);
503 if (ndetected) {
504 pr_err("blocking rcu_node structures:");
505 rcu_for_each_node_breadth_first(rnp) {
506 if (rnp == rnp_root)
507 continue;
508 if (sync_rcu_preempt_exp_done_unlocked(rnp))
509 continue;
510 pr_cont(" l=%u:%d-%d:%#lx/%c",
511 rnp->level, rnp->grplo, rnp->grphi,
512 rnp->expmask,
513 ".T"[!!rnp->exp_tasks]);
514 }
515 pr_cont("\n");
516 }
517 rcu_for_each_leaf_node(rnp) {
518 for_each_leaf_node_possible_cpu(rnp, cpu) {
519 mask = leaf_node_cpu_bit(rnp, cpu);
520 if (!(rnp->expmask & mask))
521 continue;
522 dump_cpu_task(cpu);
523 }
524 }
525 jiffies_stall = 3 * rcu_jiffies_till_stall_check() + 3;
526 }
527}
528
529
530
531
532
533
534
535static void rcu_exp_wait_wake(unsigned long s)
536{
537 struct rcu_node *rnp;
538
539 synchronize_sched_expedited_wait();
540 rcu_exp_gp_seq_end();
541 trace_rcu_exp_grace_period(rcu_state.name, s, TPS("end"));
542
543
544
545
546
547 mutex_lock(&rcu_state.exp_wake_mutex);
548
549 rcu_for_each_node_breadth_first(rnp) {
550 if (ULONG_CMP_LT(READ_ONCE(rnp->exp_seq_rq), s)) {
551 spin_lock(&rnp->exp_lock);
552
553 if (ULONG_CMP_LT(rnp->exp_seq_rq, s))
554 rnp->exp_seq_rq = s;
555 spin_unlock(&rnp->exp_lock);
556 }
557 smp_mb();
558 wake_up_all(&rnp->exp_wq[rcu_seq_ctr(rcu_state.expedited_sequence) & 0x3]);
559 }
560 trace_rcu_exp_grace_period(rcu_state.name, s, TPS("endwake"));
561 mutex_unlock(&rcu_state.exp_wake_mutex);
562}
563
564
565
566
567
568static void rcu_exp_sel_wait_wake(unsigned long s)
569{
570
571 sync_rcu_exp_select_cpus();
572
573
574 rcu_exp_wait_wake(s);
575}
576
577
578
579
580static void wait_rcu_exp_gp(struct work_struct *wp)
581{
582 struct rcu_exp_work *rewp;
583
584 rewp = container_of(wp, struct rcu_exp_work, rew_work);
585 rcu_exp_sel_wait_wake(rewp->rew_s);
586}
587
588#ifdef CONFIG_PREEMPT_RCU
589
590
591
592
593
594
595
596
597static void rcu_exp_handler(void *unused)
598{
599 unsigned long flags;
600 struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
601 struct rcu_node *rnp = rdp->mynode;
602 struct task_struct *t = current;
603
604
605
606
607
608
609 if (!t->rcu_read_lock_nesting) {
610 if (!(preempt_count() & (PREEMPT_MASK | SOFTIRQ_MASK)) ||
611 rcu_dynticks_curr_cpu_in_eqs()) {
612 rcu_report_exp_rdp(rdp);
613 } else {
614 rdp->deferred_qs = true;
615 set_tsk_need_resched(t);
616 set_preempt_need_resched();
617 }
618 return;
619 }
620
621
622
623
624
625
626
627
628
629
630
631
632
633 if (t->rcu_read_lock_nesting > 0) {
634 raw_spin_lock_irqsave_rcu_node(rnp, flags);
635 if (rnp->expmask & rdp->grpmask) {
636 rdp->deferred_qs = true;
637 t->rcu_read_unlock_special.b.exp_hint = true;
638 }
639 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
640 return;
641 }
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659 rdp->deferred_qs = true;
660 if (!(preempt_count() & (PREEMPT_MASK | SOFTIRQ_MASK)) ||
661 WARN_ON_ONCE(rcu_dynticks_curr_cpu_in_eqs())) {
662 rcu_preempt_deferred_qs(t);
663 } else {
664 set_tsk_need_resched(t);
665 set_preempt_need_resched();
666 }
667}
668
669
670static void sync_sched_exp_online_cleanup(int cpu)
671{
672}
673
674
675
676
677
678
679static int rcu_print_task_exp_stall(struct rcu_node *rnp)
680{
681 struct task_struct *t;
682 int ndetected = 0;
683
684 if (!rnp->exp_tasks)
685 return 0;
686 t = list_entry(rnp->exp_tasks->prev,
687 struct task_struct, rcu_node_entry);
688 list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry) {
689 pr_cont(" P%d", t->pid);
690 ndetected++;
691 }
692 return ndetected;
693}
694
695#else
696
697
698static void rcu_exp_handler(void *unused)
699{
700 struct rcu_data *rdp;
701 struct rcu_node *rnp;
702
703 rdp = this_cpu_ptr(&rcu_data);
704 rnp = rdp->mynode;
705 if (!(READ_ONCE(rnp->expmask) & rdp->grpmask) ||
706 __this_cpu_read(rcu_data.cpu_no_qs.b.exp))
707 return;
708 if (rcu_is_cpu_rrupt_from_idle()) {
709 rcu_report_exp_rdp(this_cpu_ptr(&rcu_data));
710 return;
711 }
712 __this_cpu_write(rcu_data.cpu_no_qs.b.exp, true);
713
714 smp_store_release(this_cpu_ptr(&rcu_data.rcu_urgent_qs), true);
715 set_tsk_need_resched(current);
716 set_preempt_need_resched();
717}
718
719
720static void sync_sched_exp_online_cleanup(int cpu)
721{
722 struct rcu_data *rdp;
723 int ret;
724 struct rcu_node *rnp;
725
726 rdp = per_cpu_ptr(&rcu_data, cpu);
727 rnp = rdp->mynode;
728 if (!(READ_ONCE(rnp->expmask) & rdp->grpmask))
729 return;
730 ret = smp_call_function_single(cpu, rcu_exp_handler, NULL, 0);
731 WARN_ON_ONCE(ret);
732}
733
734
735
736
737
738
739static int rcu_print_task_exp_stall(struct rcu_node *rnp)
740{
741 return 0;
742}
743
744#endif
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766void synchronize_rcu_expedited(void)
767{
768 struct rcu_data *rdp;
769 struct rcu_exp_work rew;
770 struct rcu_node *rnp;
771 unsigned long s;
772
773 RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map) ||
774 lock_is_held(&rcu_lock_map) ||
775 lock_is_held(&rcu_sched_lock_map),
776 "Illegal synchronize_rcu_expedited() in RCU read-side critical section");
777
778
779 if (rcu_blocking_is_gp())
780 return;
781
782
783 if (rcu_gp_is_normal()) {
784 wait_rcu_gp(call_rcu);
785 return;
786 }
787
788
789 s = rcu_exp_gp_seq_snap();
790 if (exp_funnel_lock(s))
791 return;
792
793
794 if (unlikely(rcu_scheduler_active == RCU_SCHEDULER_INIT)) {
795
796 rcu_exp_sel_wait_wake(s);
797 } else {
798
799 rew.rew_s = s;
800 INIT_WORK_ONSTACK(&rew.rew_work, wait_rcu_exp_gp);
801 queue_work(rcu_gp_wq, &rew.rew_work);
802 }
803
804
805 rdp = per_cpu_ptr(&rcu_data, raw_smp_processor_id());
806 rnp = rcu_get_root();
807 wait_event(rnp->exp_wq[rcu_seq_ctr(s) & 0x3],
808 sync_exp_work_done(s));
809 smp_mb();
810
811
812 mutex_unlock(&rcu_state.exp_mutex);
813}
814EXPORT_SYMBOL_GPL(synchronize_rcu_expedited);
815