1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23#include <linux/lockdep.h>
24
25
26
27
28static void rcu_exp_gp_seq_start(void)
29{
30 rcu_seq_start(&rcu_state.expedited_sequence);
31}
32
33
34
35
36
37static __maybe_unused unsigned long rcu_exp_gp_seq_endval(void)
38{
39 return rcu_seq_endval(&rcu_state.expedited_sequence);
40}
41
42
43
44
45static void rcu_exp_gp_seq_end(void)
46{
47 rcu_seq_end(&rcu_state.expedited_sequence);
48 smp_mb();
49}
50
51
52
53
54static unsigned long rcu_exp_gp_seq_snap(void)
55{
56 unsigned long s;
57
58 smp_mb();
59 s = rcu_seq_snap(&rcu_state.expedited_sequence);
60 trace_rcu_exp_grace_period(rcu_state.name, s, TPS("snap"));
61 return s;
62}
63
64
65
66
67
68
69static bool rcu_exp_gp_seq_done(unsigned long s)
70{
71 return rcu_seq_done(&rcu_state.expedited_sequence, s);
72}
73
74
75
76
77
78
79
80
81static void sync_exp_reset_tree_hotplug(void)
82{
83 bool done;
84 unsigned long flags;
85 unsigned long mask;
86 unsigned long oldmask;
87 int ncpus = smp_load_acquire(&rcu_state.ncpus);
88 struct rcu_node *rnp;
89 struct rcu_node *rnp_up;
90
91
92 if (likely(ncpus == rcu_state.ncpus_snap))
93 return;
94 rcu_state.ncpus_snap = ncpus;
95
96
97
98
99
100 rcu_for_each_leaf_node(rnp) {
101 raw_spin_lock_irqsave_rcu_node(rnp, flags);
102 if (rnp->expmaskinit == rnp->expmaskinitnext) {
103 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
104 continue;
105 }
106
107
108 oldmask = rnp->expmaskinit;
109 rnp->expmaskinit = rnp->expmaskinitnext;
110 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
111
112
113 if (oldmask)
114 continue;
115
116
117 mask = rnp->grpmask;
118 rnp_up = rnp->parent;
119 done = false;
120 while (rnp_up) {
121 raw_spin_lock_irqsave_rcu_node(rnp_up, flags);
122 if (rnp_up->expmaskinit)
123 done = true;
124 rnp_up->expmaskinit |= mask;
125 raw_spin_unlock_irqrestore_rcu_node(rnp_up, flags);
126 if (done)
127 break;
128 mask = rnp_up->grpmask;
129 rnp_up = rnp_up->parent;
130 }
131 }
132}
133
134
135
136
137
138static void __maybe_unused sync_exp_reset_tree(void)
139{
140 unsigned long flags;
141 struct rcu_node *rnp;
142
143 sync_exp_reset_tree_hotplug();
144 rcu_for_each_node_breadth_first(rnp) {
145 raw_spin_lock_irqsave_rcu_node(rnp, flags);
146 WARN_ON_ONCE(rnp->expmask);
147 rnp->expmask = rnp->expmaskinit;
148 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
149 }
150}
151
152
153
154
155
156
157
158
159
160
161static bool sync_rcu_preempt_exp_done(struct rcu_node *rnp)
162{
163 raw_lockdep_assert_held_rcu_node(rnp);
164
165 return rnp->exp_tasks == NULL &&
166 READ_ONCE(rnp->expmask) == 0;
167}
168
169
170
171
172
173
174static bool sync_rcu_preempt_exp_done_unlocked(struct rcu_node *rnp)
175{
176 unsigned long flags;
177 bool ret;
178
179 raw_spin_lock_irqsave_rcu_node(rnp, flags);
180 ret = sync_rcu_preempt_exp_done(rnp);
181 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
182
183 return ret;
184}
185
186
187
188
189
190
191
192
193
194
195
196
197static void __rcu_report_exp_rnp(struct rcu_node *rnp,
198 bool wake, unsigned long flags)
199 __releases(rnp->lock)
200{
201 unsigned long mask;
202
203 for (;;) {
204 if (!sync_rcu_preempt_exp_done(rnp)) {
205 if (!rnp->expmask)
206 rcu_initiate_boost(rnp, flags);
207 else
208 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
209 break;
210 }
211 if (rnp->parent == NULL) {
212 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
213 if (wake) {
214 smp_mb();
215 swake_up(&rcu_state.expedited_wq);
216 }
217 break;
218 }
219 mask = rnp->grpmask;
220 raw_spin_unlock_rcu_node(rnp);
221 rnp = rnp->parent;
222 raw_spin_lock_rcu_node(rnp);
223 WARN_ON_ONCE(!(rnp->expmask & mask));
224 rnp->expmask &= ~mask;
225 }
226}
227
228
229
230
231
232static void __maybe_unused rcu_report_exp_rnp(struct rcu_node *rnp, bool wake)
233{
234 unsigned long flags;
235
236 raw_spin_lock_irqsave_rcu_node(rnp, flags);
237 __rcu_report_exp_rnp(rnp, wake, flags);
238}
239
240
241
242
243
244static void rcu_report_exp_cpu_mult(struct rcu_node *rnp,
245 unsigned long mask, bool wake)
246{
247 unsigned long flags;
248
249 raw_spin_lock_irqsave_rcu_node(rnp, flags);
250 if (!(rnp->expmask & mask)) {
251 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
252 return;
253 }
254 rnp->expmask &= ~mask;
255 __rcu_report_exp_rnp(rnp, wake, flags);
256}
257
258
259
260
261static void rcu_report_exp_rdp(struct rcu_data *rdp)
262{
263 WRITE_ONCE(rdp->deferred_qs, false);
264 rcu_report_exp_cpu_mult(rdp->mynode, rdp->grpmask, true);
265}
266
267
268static bool sync_exp_work_done(unsigned long s)
269{
270 if (rcu_exp_gp_seq_done(s)) {
271 trace_rcu_exp_grace_period(rcu_state.name, s, TPS("done"));
272
273 smp_mb__before_atomic();
274 return true;
275 }
276 return false;
277}
278
279
280
281
282
283
284
285
286static bool exp_funnel_lock(unsigned long s)
287{
288 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, raw_smp_processor_id());
289 struct rcu_node *rnp = rdp->mynode;
290 struct rcu_node *rnp_root = rcu_get_root();
291
292
293 if (ULONG_CMP_LT(READ_ONCE(rnp->exp_seq_rq), s) &&
294 (rnp == rnp_root ||
295 ULONG_CMP_LT(READ_ONCE(rnp_root->exp_seq_rq), s)) &&
296 mutex_trylock(&rcu_state.exp_mutex))
297 goto fastpath;
298
299
300
301
302
303
304
305
306 for (; rnp != NULL; rnp = rnp->parent) {
307 if (sync_exp_work_done(s))
308 return true;
309
310
311 spin_lock(&rnp->exp_lock);
312 if (ULONG_CMP_GE(rnp->exp_seq_rq, s)) {
313
314
315 spin_unlock(&rnp->exp_lock);
316 trace_rcu_exp_funnel_lock(rcu_state.name, rnp->level,
317 rnp->grplo, rnp->grphi,
318 TPS("wait"));
319 wait_event(rnp->exp_wq[rcu_seq_ctr(s) & 0x3],
320 sync_exp_work_done(s));
321 return true;
322 }
323 rnp->exp_seq_rq = s;
324 spin_unlock(&rnp->exp_lock);
325 trace_rcu_exp_funnel_lock(rcu_state.name, rnp->level,
326 rnp->grplo, rnp->grphi, TPS("nxtlvl"));
327 }
328 mutex_lock(&rcu_state.exp_mutex);
329fastpath:
330 if (sync_exp_work_done(s)) {
331 mutex_unlock(&rcu_state.exp_mutex);
332 return true;
333 }
334 rcu_exp_gp_seq_start();
335 trace_rcu_exp_grace_period(rcu_state.name, s, TPS("start"));
336 return false;
337}
338
339
340
341
342
343static void sync_rcu_exp_select_node_cpus(struct work_struct *wp)
344{
345 int cpu;
346 unsigned long flags;
347 smp_call_func_t func;
348 unsigned long mask_ofl_test;
349 unsigned long mask_ofl_ipi;
350 int ret;
351 struct rcu_exp_work *rewp =
352 container_of(wp, struct rcu_exp_work, rew_work);
353 struct rcu_node *rnp = container_of(rewp, struct rcu_node, rew);
354
355 func = rewp->rew_func;
356 raw_spin_lock_irqsave_rcu_node(rnp, flags);
357
358
359 mask_ofl_test = 0;
360 for_each_leaf_node_cpu_mask(rnp, cpu, rnp->expmask) {
361 unsigned long mask = leaf_node_cpu_bit(rnp, cpu);
362 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
363 int snap;
364
365 if (raw_smp_processor_id() == cpu ||
366 !(rnp->qsmaskinitnext & mask)) {
367 mask_ofl_test |= mask;
368 } else {
369 snap = rcu_dynticks_snap(rdp);
370 if (rcu_dynticks_in_eqs(snap))
371 mask_ofl_test |= mask;
372 else
373 rdp->exp_dynticks_snap = snap;
374 }
375 }
376 mask_ofl_ipi = rnp->expmask & ~mask_ofl_test;
377
378
379
380
381
382
383 if (rcu_preempt_has_tasks(rnp))
384 rnp->exp_tasks = rnp->blkd_tasks.next;
385 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
386
387
388 for_each_leaf_node_cpu_mask(rnp, cpu, rnp->expmask) {
389 unsigned long mask = leaf_node_cpu_bit(rnp, cpu);
390 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
391
392 if (!(mask_ofl_ipi & mask))
393 continue;
394retry_ipi:
395 if (rcu_dynticks_in_eqs_since(rdp, rdp->exp_dynticks_snap)) {
396 mask_ofl_test |= mask;
397 continue;
398 }
399 ret = smp_call_function_single(cpu, func, NULL, 0);
400 if (!ret) {
401 mask_ofl_ipi &= ~mask;
402 continue;
403 }
404
405 raw_spin_lock_irqsave_rcu_node(rnp, flags);
406 if ((rnp->qsmaskinitnext & mask) &&
407 (rnp->expmask & mask)) {
408
409 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
410 trace_rcu_exp_grace_period(rcu_state.name, rcu_exp_gp_seq_endval(), TPS("selectofl"));
411 schedule_timeout_uninterruptible(1);
412 goto retry_ipi;
413 }
414
415 if (!(rnp->expmask & mask))
416 mask_ofl_ipi &= ~mask;
417 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
418 }
419
420 mask_ofl_test |= mask_ofl_ipi;
421 if (mask_ofl_test)
422 rcu_report_exp_cpu_mult(rnp, mask_ofl_test, false);
423}
424
425
426
427
428
429static void sync_rcu_exp_select_cpus(smp_call_func_t func)
430{
431 int cpu;
432 struct rcu_node *rnp;
433
434 trace_rcu_exp_grace_period(rcu_state.name, rcu_exp_gp_seq_endval(), TPS("reset"));
435 sync_exp_reset_tree();
436 trace_rcu_exp_grace_period(rcu_state.name, rcu_exp_gp_seq_endval(), TPS("select"));
437
438
439 rcu_for_each_leaf_node(rnp) {
440 rnp->exp_need_flush = false;
441 if (!READ_ONCE(rnp->expmask))
442 continue;
443 rnp->rew.rew_func = func;
444 if (!READ_ONCE(rcu_par_gp_wq) ||
445 rcu_scheduler_active != RCU_SCHEDULER_RUNNING ||
446 rcu_is_last_leaf_node(rnp)) {
447
448 sync_rcu_exp_select_node_cpus(&rnp->rew.rew_work);
449 continue;
450 }
451 INIT_WORK(&rnp->rew.rew_work, sync_rcu_exp_select_node_cpus);
452 preempt_disable();
453 cpu = cpumask_next(rnp->grplo - 1, cpu_online_mask);
454
455 if (unlikely(cpu > rnp->grphi))
456 cpu = WORK_CPU_UNBOUND;
457 queue_work_on(cpu, rcu_par_gp_wq, &rnp->rew.rew_work);
458 preempt_enable();
459 rnp->exp_need_flush = true;
460 }
461
462
463 rcu_for_each_leaf_node(rnp)
464 if (rnp->exp_need_flush)
465 flush_work(&rnp->rew.rew_work);
466}
467
468static void synchronize_sched_expedited_wait(void)
469{
470 int cpu;
471 unsigned long jiffies_stall;
472 unsigned long jiffies_start;
473 unsigned long mask;
474 int ndetected;
475 struct rcu_node *rnp;
476 struct rcu_node *rnp_root = rcu_get_root();
477 int ret;
478
479 trace_rcu_exp_grace_period(rcu_state.name, rcu_exp_gp_seq_endval(), TPS("startwait"));
480 jiffies_stall = rcu_jiffies_till_stall_check();
481 jiffies_start = jiffies;
482
483 for (;;) {
484 ret = swait_event_timeout(
485 rcu_state.expedited_wq,
486 sync_rcu_preempt_exp_done_unlocked(rnp_root),
487 jiffies_stall);
488 if (ret > 0 || sync_rcu_preempt_exp_done_unlocked(rnp_root))
489 return;
490 WARN_ON(ret < 0);
491 if (rcu_cpu_stall_suppress)
492 continue;
493 panic_on_rcu_stall();
494 pr_err("INFO: %s detected expedited stalls on CPUs/tasks: {",
495 rcu_state.name);
496 ndetected = 0;
497 rcu_for_each_leaf_node(rnp) {
498 ndetected += rcu_print_task_exp_stall(rnp);
499 for_each_leaf_node_possible_cpu(rnp, cpu) {
500 struct rcu_data *rdp;
501
502 mask = leaf_node_cpu_bit(rnp, cpu);
503 if (!(rnp->expmask & mask))
504 continue;
505 ndetected++;
506 rdp = per_cpu_ptr(&rcu_data, cpu);
507 pr_cont(" %d-%c%c%c", cpu,
508 "O."[!!cpu_online(cpu)],
509 "o."[!!(rdp->grpmask & rnp->expmaskinit)],
510 "N."[!!(rdp->grpmask & rnp->expmaskinitnext)]);
511 }
512 }
513 pr_cont(" } %lu jiffies s: %lu root: %#lx/%c\n",
514 jiffies - jiffies_start, rcu_state.expedited_sequence,
515 rnp_root->expmask, ".T"[!!rnp_root->exp_tasks]);
516 if (ndetected) {
517 pr_err("blocking rcu_node structures:");
518 rcu_for_each_node_breadth_first(rnp) {
519 if (rnp == rnp_root)
520 continue;
521 if (sync_rcu_preempt_exp_done_unlocked(rnp))
522 continue;
523 pr_cont(" l=%u:%d-%d:%#lx/%c",
524 rnp->level, rnp->grplo, rnp->grphi,
525 rnp->expmask,
526 ".T"[!!rnp->exp_tasks]);
527 }
528 pr_cont("\n");
529 }
530 rcu_for_each_leaf_node(rnp) {
531 for_each_leaf_node_possible_cpu(rnp, cpu) {
532 mask = leaf_node_cpu_bit(rnp, cpu);
533 if (!(rnp->expmask & mask))
534 continue;
535 dump_cpu_task(cpu);
536 }
537 }
538 jiffies_stall = 3 * rcu_jiffies_till_stall_check() + 3;
539 }
540}
541
542
543
544
545
546
547
548static void rcu_exp_wait_wake(unsigned long s)
549{
550 struct rcu_node *rnp;
551
552 synchronize_sched_expedited_wait();
553 rcu_exp_gp_seq_end();
554 trace_rcu_exp_grace_period(rcu_state.name, s, TPS("end"));
555
556
557
558
559
560 mutex_lock(&rcu_state.exp_wake_mutex);
561
562 rcu_for_each_node_breadth_first(rnp) {
563 if (ULONG_CMP_LT(READ_ONCE(rnp->exp_seq_rq), s)) {
564 spin_lock(&rnp->exp_lock);
565
566 if (ULONG_CMP_LT(rnp->exp_seq_rq, s))
567 rnp->exp_seq_rq = s;
568 spin_unlock(&rnp->exp_lock);
569 }
570 smp_mb();
571 wake_up_all(&rnp->exp_wq[rcu_seq_ctr(rcu_state.expedited_sequence) & 0x3]);
572 }
573 trace_rcu_exp_grace_period(rcu_state.name, s, TPS("endwake"));
574 mutex_unlock(&rcu_state.exp_wake_mutex);
575}
576
577
578
579
580
581static void rcu_exp_sel_wait_wake(smp_call_func_t func, unsigned long s)
582{
583
584 sync_rcu_exp_select_cpus(func);
585
586
587 rcu_exp_wait_wake(s);
588}
589
590
591
592
593static void wait_rcu_exp_gp(struct work_struct *wp)
594{
595 struct rcu_exp_work *rewp;
596
597 rewp = container_of(wp, struct rcu_exp_work, rew_work);
598 rcu_exp_sel_wait_wake(rewp->rew_func, rewp->rew_s);
599}
600
601
602
603
604
605static void _synchronize_rcu_expedited(smp_call_func_t func)
606{
607 struct rcu_data *rdp;
608 struct rcu_exp_work rew;
609 struct rcu_node *rnp;
610 unsigned long s;
611
612
613 if (rcu_gp_is_normal()) {
614 wait_rcu_gp(call_rcu);
615 return;
616 }
617
618
619 s = rcu_exp_gp_seq_snap();
620 if (exp_funnel_lock(s))
621 return;
622
623
624 if (unlikely(rcu_scheduler_active == RCU_SCHEDULER_INIT)) {
625
626 rcu_exp_sel_wait_wake(func, s);
627 } else {
628
629 rew.rew_func = func;
630 rew.rew_s = s;
631 INIT_WORK_ONSTACK(&rew.rew_work, wait_rcu_exp_gp);
632 queue_work(rcu_gp_wq, &rew.rew_work);
633 }
634
635
636 rdp = per_cpu_ptr(&rcu_data, raw_smp_processor_id());
637 rnp = rcu_get_root();
638 wait_event(rnp->exp_wq[rcu_seq_ctr(s) & 0x3],
639 sync_exp_work_done(s));
640 smp_mb();
641
642
643 mutex_unlock(&rcu_state.exp_mutex);
644}
645
646#ifdef CONFIG_PREEMPT_RCU
647
648
649
650
651
652
653
654
655static void sync_rcu_exp_handler(void *unused)
656{
657 unsigned long flags;
658 struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
659 struct rcu_node *rnp = rdp->mynode;
660 struct task_struct *t = current;
661
662
663
664
665
666
667 if (!t->rcu_read_lock_nesting) {
668 if (!(preempt_count() & (PREEMPT_MASK | SOFTIRQ_MASK)) ||
669 rcu_dynticks_curr_cpu_in_eqs()) {
670 rcu_report_exp_rdp(rdp);
671 } else {
672 rdp->deferred_qs = true;
673 set_tsk_need_resched(t);
674 set_preempt_need_resched();
675 }
676 return;
677 }
678
679
680
681
682
683
684
685
686
687
688
689
690
691 if (t->rcu_read_lock_nesting > 0) {
692 raw_spin_lock_irqsave_rcu_node(rnp, flags);
693 if (rnp->expmask & rdp->grpmask)
694 rdp->deferred_qs = true;
695 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
696 }
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714 rdp->deferred_qs = true;
715 if (!(preempt_count() & (PREEMPT_MASK | SOFTIRQ_MASK)) ||
716 WARN_ON_ONCE(rcu_dynticks_curr_cpu_in_eqs())) {
717 rcu_preempt_deferred_qs(t);
718 } else {
719 set_tsk_need_resched(t);
720 set_preempt_need_resched();
721 }
722}
723
724
725static void sync_sched_exp_online_cleanup(int cpu)
726{
727}
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749void synchronize_rcu_expedited(void)
750{
751 RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map) ||
752 lock_is_held(&rcu_lock_map) ||
753 lock_is_held(&rcu_sched_lock_map),
754 "Illegal synchronize_rcu_expedited() in RCU read-side critical section");
755
756 if (rcu_scheduler_active == RCU_SCHEDULER_INACTIVE)
757 return;
758 _synchronize_rcu_expedited(sync_rcu_exp_handler);
759}
760EXPORT_SYMBOL_GPL(synchronize_rcu_expedited);
761
762#else
763
764
765static void sync_sched_exp_handler(void *unused)
766{
767 struct rcu_data *rdp;
768 struct rcu_node *rnp;
769
770 rdp = this_cpu_ptr(&rcu_data);
771 rnp = rdp->mynode;
772 if (!(READ_ONCE(rnp->expmask) & rdp->grpmask) ||
773 __this_cpu_read(rcu_data.cpu_no_qs.b.exp))
774 return;
775 if (rcu_is_cpu_rrupt_from_idle()) {
776 rcu_report_exp_rdp(this_cpu_ptr(&rcu_data));
777 return;
778 }
779 __this_cpu_write(rcu_data.cpu_no_qs.b.exp, true);
780
781 smp_store_release(this_cpu_ptr(&rcu_data.rcu_urgent_qs), true);
782 set_tsk_need_resched(current);
783 set_preempt_need_resched();
784}
785
786
787static void sync_sched_exp_online_cleanup(int cpu)
788{
789 struct rcu_data *rdp;
790 int ret;
791 struct rcu_node *rnp;
792
793 rdp = per_cpu_ptr(&rcu_data, cpu);
794 rnp = rdp->mynode;
795 if (!(READ_ONCE(rnp->expmask) & rdp->grpmask))
796 return;
797 ret = smp_call_function_single(cpu, sync_sched_exp_handler, NULL, 0);
798 WARN_ON_ONCE(ret);
799}
800
801
802
803
804
805
806
807
808
809
810static int rcu_blocking_is_gp(void)
811{
812 int ret;
813
814 might_sleep();
815 preempt_disable();
816 ret = num_online_cpus() <= 1;
817 preempt_enable();
818 return ret;
819}
820
821
822void synchronize_rcu_expedited(void)
823{
824 RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map) ||
825 lock_is_held(&rcu_lock_map) ||
826 lock_is_held(&rcu_sched_lock_map),
827 "Illegal synchronize_rcu_expedited() in RCU read-side critical section");
828
829
830 if (rcu_blocking_is_gp())
831 return;
832
833 _synchronize_rcu_expedited(sync_sched_exp_handler);
834}
835EXPORT_SYMBOL_GPL(synchronize_rcu_expedited);
836
837#endif
838