1
2
3
4
5
6
7
8
9
10#include <linux/lockdep.h>
11
12static void rcu_exp_handler(void *unused);
13static int rcu_print_task_exp_stall(struct rcu_node *rnp);
14
15
16
17
18static void rcu_exp_gp_seq_start(void)
19{
20 rcu_seq_start(&rcu_state.expedited_sequence);
21}
22
23
24
25
26
27static __maybe_unused unsigned long rcu_exp_gp_seq_endval(void)
28{
29 return rcu_seq_endval(&rcu_state.expedited_sequence);
30}
31
32
33
34
35static void rcu_exp_gp_seq_end(void)
36{
37 rcu_seq_end(&rcu_state.expedited_sequence);
38 smp_mb();
39}
40
41
42
43
44
45
46static unsigned long rcu_exp_gp_seq_snap(void)
47{
48 unsigned long s;
49
50 smp_mb();
51 s = rcu_seq_snap(&rcu_state.expedited_sequence);
52 trace_rcu_exp_grace_period(rcu_state.name, s, TPS("snap"));
53 return s;
54}
55
56
57
58
59
60
61static bool rcu_exp_gp_seq_done(unsigned long s)
62{
63 return rcu_seq_done(&rcu_state.expedited_sequence, s);
64}
65
66
67
68
69
70
71
72
73static void sync_exp_reset_tree_hotplug(void)
74{
75 bool done;
76 unsigned long flags;
77 unsigned long mask;
78 unsigned long oldmask;
79 int ncpus = smp_load_acquire(&rcu_state.ncpus);
80 struct rcu_node *rnp;
81 struct rcu_node *rnp_up;
82
83
84 if (likely(ncpus == rcu_state.ncpus_snap))
85 return;
86 rcu_state.ncpus_snap = ncpus;
87
88
89
90
91
92 rcu_for_each_leaf_node(rnp) {
93 raw_spin_lock_irqsave_rcu_node(rnp, flags);
94 if (rnp->expmaskinit == rnp->expmaskinitnext) {
95 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
96 continue;
97 }
98
99
100 oldmask = rnp->expmaskinit;
101 rnp->expmaskinit = rnp->expmaskinitnext;
102 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
103
104
105 if (oldmask)
106 continue;
107
108
109 mask = rnp->grpmask;
110 rnp_up = rnp->parent;
111 done = false;
112 while (rnp_up) {
113 raw_spin_lock_irqsave_rcu_node(rnp_up, flags);
114 if (rnp_up->expmaskinit)
115 done = true;
116 rnp_up->expmaskinit |= mask;
117 raw_spin_unlock_irqrestore_rcu_node(rnp_up, flags);
118 if (done)
119 break;
120 mask = rnp_up->grpmask;
121 rnp_up = rnp_up->parent;
122 }
123 }
124}
125
126
127
128
129
130static void __maybe_unused sync_exp_reset_tree(void)
131{
132 unsigned long flags;
133 struct rcu_node *rnp;
134
135 sync_exp_reset_tree_hotplug();
136 rcu_for_each_node_breadth_first(rnp) {
137 raw_spin_lock_irqsave_rcu_node(rnp, flags);
138 WARN_ON_ONCE(rnp->expmask);
139 WRITE_ONCE(rnp->expmask, rnp->expmaskinit);
140 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
141 }
142}
143
144
145
146
147
148
149
150static bool sync_rcu_exp_done(struct rcu_node *rnp)
151{
152 raw_lockdep_assert_held_rcu_node(rnp);
153 return READ_ONCE(rnp->exp_tasks) == NULL &&
154 READ_ONCE(rnp->expmask) == 0;
155}
156
157
158
159
160
161static bool sync_rcu_exp_done_unlocked(struct rcu_node *rnp)
162{
163 unsigned long flags;
164 bool ret;
165
166 raw_spin_lock_irqsave_rcu_node(rnp, flags);
167 ret = sync_rcu_exp_done(rnp);
168 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
169
170 return ret;
171}
172
173
174
175
176
177
178
179
180
181
182static void __rcu_report_exp_rnp(struct rcu_node *rnp,
183 bool wake, unsigned long flags)
184 __releases(rnp->lock)
185{
186 unsigned long mask;
187
188 raw_lockdep_assert_held_rcu_node(rnp);
189 for (;;) {
190 if (!sync_rcu_exp_done(rnp)) {
191 if (!rnp->expmask)
192 rcu_initiate_boost(rnp, flags);
193 else
194 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
195 break;
196 }
197 if (rnp->parent == NULL) {
198 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
199 if (wake) {
200 smp_mb();
201 swake_up_one(&rcu_state.expedited_wq);
202 }
203 break;
204 }
205 mask = rnp->grpmask;
206 raw_spin_unlock_rcu_node(rnp);
207 rnp = rnp->parent;
208 raw_spin_lock_rcu_node(rnp);
209 WARN_ON_ONCE(!(rnp->expmask & mask));
210 WRITE_ONCE(rnp->expmask, rnp->expmask & ~mask);
211 }
212}
213
214
215
216
217
218static void __maybe_unused rcu_report_exp_rnp(struct rcu_node *rnp, bool wake)
219{
220 unsigned long flags;
221
222 raw_spin_lock_irqsave_rcu_node(rnp, flags);
223 __rcu_report_exp_rnp(rnp, wake, flags);
224}
225
226
227
228
229
230static void rcu_report_exp_cpu_mult(struct rcu_node *rnp,
231 unsigned long mask, bool wake)
232{
233 int cpu;
234 unsigned long flags;
235 struct rcu_data *rdp;
236
237 raw_spin_lock_irqsave_rcu_node(rnp, flags);
238 if (!(rnp->expmask & mask)) {
239 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
240 return;
241 }
242 WRITE_ONCE(rnp->expmask, rnp->expmask & ~mask);
243 for_each_leaf_node_cpu_mask(rnp, cpu, mask) {
244 rdp = per_cpu_ptr(&rcu_data, cpu);
245 if (!IS_ENABLED(CONFIG_NO_HZ_FULL) || !rdp->rcu_forced_tick_exp)
246 continue;
247 rdp->rcu_forced_tick_exp = false;
248 tick_dep_clear_cpu(cpu, TICK_DEP_BIT_RCU_EXP);
249 }
250 __rcu_report_exp_rnp(rnp, wake, flags);
251}
252
253
254
255
256static void rcu_report_exp_rdp(struct rcu_data *rdp)
257{
258 WRITE_ONCE(rdp->exp_deferred_qs, false);
259 rcu_report_exp_cpu_mult(rdp->mynode, rdp->grpmask, true);
260}
261
262
263static bool sync_exp_work_done(unsigned long s)
264{
265 if (rcu_exp_gp_seq_done(s)) {
266 trace_rcu_exp_grace_period(rcu_state.name, s, TPS("done"));
267 smp_mb();
268 return true;
269 }
270 return false;
271}
272
273
274
275
276
277
278
279
280static bool exp_funnel_lock(unsigned long s)
281{
282 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, raw_smp_processor_id());
283 struct rcu_node *rnp = rdp->mynode;
284 struct rcu_node *rnp_root = rcu_get_root();
285
286
287 if (ULONG_CMP_LT(READ_ONCE(rnp->exp_seq_rq), s) &&
288 (rnp == rnp_root ||
289 ULONG_CMP_LT(READ_ONCE(rnp_root->exp_seq_rq), s)) &&
290 mutex_trylock(&rcu_state.exp_mutex))
291 goto fastpath;
292
293
294
295
296
297
298
299
300 for (; rnp != NULL; rnp = rnp->parent) {
301 if (sync_exp_work_done(s))
302 return true;
303
304
305 spin_lock(&rnp->exp_lock);
306 if (ULONG_CMP_GE(rnp->exp_seq_rq, s)) {
307
308
309 spin_unlock(&rnp->exp_lock);
310 trace_rcu_exp_funnel_lock(rcu_state.name, rnp->level,
311 rnp->grplo, rnp->grphi,
312 TPS("wait"));
313 wait_event(rnp->exp_wq[rcu_seq_ctr(s) & 0x3],
314 sync_exp_work_done(s));
315 return true;
316 }
317 WRITE_ONCE(rnp->exp_seq_rq, s);
318 spin_unlock(&rnp->exp_lock);
319 trace_rcu_exp_funnel_lock(rcu_state.name, rnp->level,
320 rnp->grplo, rnp->grphi, TPS("nxtlvl"));
321 }
322 mutex_lock(&rcu_state.exp_mutex);
323fastpath:
324 if (sync_exp_work_done(s)) {
325 mutex_unlock(&rcu_state.exp_mutex);
326 return true;
327 }
328 rcu_exp_gp_seq_start();
329 trace_rcu_exp_grace_period(rcu_state.name, s, TPS("start"));
330 return false;
331}
332
333
334
335
336
337static void sync_rcu_exp_select_node_cpus(struct work_struct *wp)
338{
339 int cpu;
340 unsigned long flags;
341 unsigned long mask_ofl_test;
342 unsigned long mask_ofl_ipi;
343 int ret;
344 struct rcu_exp_work *rewp =
345 container_of(wp, struct rcu_exp_work, rew_work);
346 struct rcu_node *rnp = container_of(rewp, struct rcu_node, rew);
347
348 raw_spin_lock_irqsave_rcu_node(rnp, flags);
349
350
351 mask_ofl_test = 0;
352 for_each_leaf_node_cpu_mask(rnp, cpu, rnp->expmask) {
353 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
354 unsigned long mask = rdp->grpmask;
355 int snap;
356
357 if (raw_smp_processor_id() == cpu ||
358 !(rnp->qsmaskinitnext & mask)) {
359 mask_ofl_test |= mask;
360 } else {
361 snap = rcu_dynticks_snap(rdp);
362 if (rcu_dynticks_in_eqs(snap))
363 mask_ofl_test |= mask;
364 else
365 rdp->exp_dynticks_snap = snap;
366 }
367 }
368 mask_ofl_ipi = rnp->expmask & ~mask_ofl_test;
369
370
371
372
373
374
375 if (rcu_preempt_has_tasks(rnp))
376 WRITE_ONCE(rnp->exp_tasks, rnp->blkd_tasks.next);
377 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
378
379
380 for_each_leaf_node_cpu_mask(rnp, cpu, mask_ofl_ipi) {
381 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
382 unsigned long mask = rdp->grpmask;
383
384retry_ipi:
385 if (rcu_dynticks_in_eqs_since(rdp, rdp->exp_dynticks_snap)) {
386 mask_ofl_test |= mask;
387 continue;
388 }
389 if (get_cpu() == cpu) {
390 put_cpu();
391 continue;
392 }
393 ret = smp_call_function_single(cpu, rcu_exp_handler, NULL, 0);
394 put_cpu();
395
396 if (!ret)
397 continue;
398
399
400 raw_spin_lock_irqsave_rcu_node(rnp, flags);
401 if ((rnp->qsmaskinitnext & mask) &&
402 (rnp->expmask & mask)) {
403
404 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
405 trace_rcu_exp_grace_period(rcu_state.name, rcu_exp_gp_seq_endval(), TPS("selectofl"));
406 schedule_timeout_idle(1);
407 goto retry_ipi;
408 }
409
410 if (rnp->expmask & mask)
411 mask_ofl_test |= mask;
412 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
413 }
414
415 if (mask_ofl_test)
416 rcu_report_exp_cpu_mult(rnp, mask_ofl_test, false);
417}
418
419
420
421
422
423static void sync_rcu_exp_select_cpus(void)
424{
425 int cpu;
426 struct rcu_node *rnp;
427
428 trace_rcu_exp_grace_period(rcu_state.name, rcu_exp_gp_seq_endval(), TPS("reset"));
429 sync_exp_reset_tree();
430 trace_rcu_exp_grace_period(rcu_state.name, rcu_exp_gp_seq_endval(), TPS("select"));
431
432
433 rcu_for_each_leaf_node(rnp) {
434 rnp->exp_need_flush = false;
435 if (!READ_ONCE(rnp->expmask))
436 continue;
437 if (!READ_ONCE(rcu_par_gp_wq) ||
438 rcu_scheduler_active != RCU_SCHEDULER_RUNNING ||
439 rcu_is_last_leaf_node(rnp)) {
440
441 sync_rcu_exp_select_node_cpus(&rnp->rew.rew_work);
442 continue;
443 }
444 INIT_WORK(&rnp->rew.rew_work, sync_rcu_exp_select_node_cpus);
445 cpu = find_next_bit(&rnp->ffmask, BITS_PER_LONG, -1);
446
447 if (unlikely(cpu > rnp->grphi - rnp->grplo))
448 cpu = WORK_CPU_UNBOUND;
449 else
450 cpu += rnp->grplo;
451 queue_work_on(cpu, rcu_par_gp_wq, &rnp->rew.rew_work);
452 rnp->exp_need_flush = true;
453 }
454
455
456 rcu_for_each_leaf_node(rnp)
457 if (rnp->exp_need_flush)
458 flush_work(&rnp->rew.rew_work);
459}
460
461
462
463
464
465
466static bool synchronize_rcu_expedited_wait_once(long tlimit)
467{
468 int t;
469 struct rcu_node *rnp_root = rcu_get_root();
470
471 t = swait_event_timeout_exclusive(rcu_state.expedited_wq,
472 sync_rcu_exp_done_unlocked(rnp_root),
473 tlimit);
474
475 if (t > 0 || sync_rcu_exp_done_unlocked(rnp_root))
476 return true;
477 WARN_ON(t < 0);
478 return false;
479}
480
481
482
483
484
485static void synchronize_rcu_expedited_wait(void)
486{
487 int cpu;
488 unsigned long j;
489 unsigned long jiffies_stall;
490 unsigned long jiffies_start;
491 unsigned long mask;
492 int ndetected;
493 struct rcu_data *rdp;
494 struct rcu_node *rnp;
495 struct rcu_node *rnp_root = rcu_get_root();
496
497 trace_rcu_exp_grace_period(rcu_state.name, rcu_exp_gp_seq_endval(), TPS("startwait"));
498 jiffies_stall = rcu_jiffies_till_stall_check();
499 jiffies_start = jiffies;
500 if (tick_nohz_full_enabled() && rcu_inkernel_boot_has_ended()) {
501 if (synchronize_rcu_expedited_wait_once(1))
502 return;
503 rcu_for_each_leaf_node(rnp) {
504 for_each_leaf_node_cpu_mask(rnp, cpu, rnp->expmask) {
505 rdp = per_cpu_ptr(&rcu_data, cpu);
506 if (rdp->rcu_forced_tick_exp)
507 continue;
508 rdp->rcu_forced_tick_exp = true;
509 tick_dep_set_cpu(cpu, TICK_DEP_BIT_RCU_EXP);
510 }
511 }
512 j = READ_ONCE(jiffies_till_first_fqs);
513 if (synchronize_rcu_expedited_wait_once(j + HZ))
514 return;
515 WARN_ON_ONCE(IS_ENABLED(CONFIG_PREEMPT_RT));
516 }
517
518 for (;;) {
519 if (synchronize_rcu_expedited_wait_once(jiffies_stall))
520 return;
521 if (rcu_stall_is_suppressed())
522 continue;
523 panic_on_rcu_stall();
524 trace_rcu_stall_warning(rcu_state.name, TPS("ExpeditedStall"));
525 pr_err("INFO: %s detected expedited stalls on CPUs/tasks: {",
526 rcu_state.name);
527 ndetected = 0;
528 rcu_for_each_leaf_node(rnp) {
529 ndetected += rcu_print_task_exp_stall(rnp);
530 for_each_leaf_node_possible_cpu(rnp, cpu) {
531 struct rcu_data *rdp;
532
533 mask = leaf_node_cpu_bit(rnp, cpu);
534 if (!(READ_ONCE(rnp->expmask) & mask))
535 continue;
536 ndetected++;
537 rdp = per_cpu_ptr(&rcu_data, cpu);
538 pr_cont(" %d-%c%c%c", cpu,
539 "O."[!!cpu_online(cpu)],
540 "o."[!!(rdp->grpmask & rnp->expmaskinit)],
541 "N."[!!(rdp->grpmask & rnp->expmaskinitnext)]);
542 }
543 }
544 pr_cont(" } %lu jiffies s: %lu root: %#lx/%c\n",
545 jiffies - jiffies_start, rcu_state.expedited_sequence,
546 data_race(rnp_root->expmask),
547 ".T"[!!data_race(rnp_root->exp_tasks)]);
548 if (ndetected) {
549 pr_err("blocking rcu_node structures (internal RCU debug):");
550 rcu_for_each_node_breadth_first(rnp) {
551 if (rnp == rnp_root)
552 continue;
553 if (sync_rcu_exp_done_unlocked(rnp))
554 continue;
555 pr_cont(" l=%u:%d-%d:%#lx/%c",
556 rnp->level, rnp->grplo, rnp->grphi,
557 data_race(rnp->expmask),
558 ".T"[!!data_race(rnp->exp_tasks)]);
559 }
560 pr_cont("\n");
561 }
562 rcu_for_each_leaf_node(rnp) {
563 for_each_leaf_node_possible_cpu(rnp, cpu) {
564 mask = leaf_node_cpu_bit(rnp, cpu);
565 if (!(READ_ONCE(rnp->expmask) & mask))
566 continue;
567 dump_cpu_task(cpu);
568 }
569 }
570 jiffies_stall = 3 * rcu_jiffies_till_stall_check() + 3;
571 }
572}
573
574
575
576
577
578
579
580static void rcu_exp_wait_wake(unsigned long s)
581{
582 struct rcu_node *rnp;
583
584 synchronize_rcu_expedited_wait();
585
586
587
588
589 mutex_lock(&rcu_state.exp_wake_mutex);
590 rcu_exp_gp_seq_end();
591 trace_rcu_exp_grace_period(rcu_state.name, s, TPS("end"));
592
593 rcu_for_each_node_breadth_first(rnp) {
594 if (ULONG_CMP_LT(READ_ONCE(rnp->exp_seq_rq), s)) {
595 spin_lock(&rnp->exp_lock);
596
597 if (ULONG_CMP_LT(rnp->exp_seq_rq, s))
598 WRITE_ONCE(rnp->exp_seq_rq, s);
599 spin_unlock(&rnp->exp_lock);
600 }
601 smp_mb();
602 wake_up_all(&rnp->exp_wq[rcu_seq_ctr(s) & 0x3]);
603 }
604 trace_rcu_exp_grace_period(rcu_state.name, s, TPS("endwake"));
605 mutex_unlock(&rcu_state.exp_wake_mutex);
606}
607
608
609
610
611
612static void rcu_exp_sel_wait_wake(unsigned long s)
613{
614
615 sync_rcu_exp_select_cpus();
616
617
618 rcu_exp_wait_wake(s);
619}
620
621
622
623
624static void wait_rcu_exp_gp(struct work_struct *wp)
625{
626 struct rcu_exp_work *rewp;
627
628 rewp = container_of(wp, struct rcu_exp_work, rew_work);
629 rcu_exp_sel_wait_wake(rewp->rew_s);
630}
631
632#ifdef CONFIG_PREEMPT_RCU
633
634
635
636
637
638
639
640
641static void rcu_exp_handler(void *unused)
642{
643 int depth = rcu_preempt_depth();
644 unsigned long flags;
645 struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
646 struct rcu_node *rnp = rdp->mynode;
647 struct task_struct *t = current;
648
649
650
651
652
653
654 if (!depth) {
655 if (!(preempt_count() & (PREEMPT_MASK | SOFTIRQ_MASK)) ||
656 rcu_dynticks_curr_cpu_in_eqs()) {
657 rcu_report_exp_rdp(rdp);
658 } else {
659 rdp->exp_deferred_qs = true;
660 set_tsk_need_resched(t);
661 set_preempt_need_resched();
662 }
663 return;
664 }
665
666
667
668
669
670
671
672
673
674
675
676
677
678 if (depth > 0) {
679 raw_spin_lock_irqsave_rcu_node(rnp, flags);
680 if (rnp->expmask & rdp->grpmask) {
681 rdp->exp_deferred_qs = true;
682 t->rcu_read_unlock_special.b.exp_hint = true;
683 }
684 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
685 return;
686 }
687
688
689 WARN_ON_ONCE(1);
690}
691
692
693static void sync_sched_exp_online_cleanup(int cpu)
694{
695}
696
697
698
699
700
701
702static int rcu_print_task_exp_stall(struct rcu_node *rnp)
703{
704 unsigned long flags;
705 int ndetected = 0;
706 struct task_struct *t;
707
708 if (!READ_ONCE(rnp->exp_tasks))
709 return 0;
710 raw_spin_lock_irqsave_rcu_node(rnp, flags);
711 t = list_entry(rnp->exp_tasks->prev,
712 struct task_struct, rcu_node_entry);
713 list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry) {
714 pr_cont(" P%d", t->pid);
715 ndetected++;
716 }
717 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
718 return ndetected;
719}
720
721#else
722
723
724static void rcu_exp_need_qs(void)
725{
726 __this_cpu_write(rcu_data.cpu_no_qs.b.exp, true);
727
728 smp_store_release(this_cpu_ptr(&rcu_data.rcu_urgent_qs), true);
729 set_tsk_need_resched(current);
730 set_preempt_need_resched();
731}
732
733
734static void rcu_exp_handler(void *unused)
735{
736 struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
737 struct rcu_node *rnp = rdp->mynode;
738
739 if (!(READ_ONCE(rnp->expmask) & rdp->grpmask) ||
740 __this_cpu_read(rcu_data.cpu_no_qs.b.exp))
741 return;
742 if (rcu_is_cpu_rrupt_from_idle()) {
743 rcu_report_exp_rdp(this_cpu_ptr(&rcu_data));
744 return;
745 }
746 rcu_exp_need_qs();
747}
748
749
750static void sync_sched_exp_online_cleanup(int cpu)
751{
752 unsigned long flags;
753 int my_cpu;
754 struct rcu_data *rdp;
755 int ret;
756 struct rcu_node *rnp;
757
758 rdp = per_cpu_ptr(&rcu_data, cpu);
759 rnp = rdp->mynode;
760 my_cpu = get_cpu();
761
762 if (!(READ_ONCE(rnp->expmask) & rdp->grpmask) ||
763 __this_cpu_read(rcu_data.cpu_no_qs.b.exp)) {
764 put_cpu();
765 return;
766 }
767
768 if (my_cpu == cpu) {
769 local_irq_save(flags);
770 rcu_exp_need_qs();
771 local_irq_restore(flags);
772 put_cpu();
773 return;
774 }
775
776 ret = smp_call_function_single(cpu, rcu_exp_handler, NULL, 0);
777 put_cpu();
778 WARN_ON_ONCE(ret);
779}
780
781
782
783
784
785
786static int rcu_print_task_exp_stall(struct rcu_node *rnp)
787{
788 return 0;
789}
790
791#endif
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813void synchronize_rcu_expedited(void)
814{
815 bool boottime = (rcu_scheduler_active == RCU_SCHEDULER_INIT);
816 struct rcu_exp_work rew;
817 struct rcu_node *rnp;
818 unsigned long s;
819
820 RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map) ||
821 lock_is_held(&rcu_lock_map) ||
822 lock_is_held(&rcu_sched_lock_map),
823 "Illegal synchronize_rcu_expedited() in RCU read-side critical section");
824
825
826 if (rcu_blocking_is_gp())
827 return;
828
829
830 if (rcu_gp_is_normal()) {
831 wait_rcu_gp(call_rcu);
832 return;
833 }
834
835
836 s = rcu_exp_gp_seq_snap();
837 if (exp_funnel_lock(s))
838 return;
839
840
841 if (unlikely(boottime)) {
842
843 rcu_exp_sel_wait_wake(s);
844 } else {
845
846 rew.rew_s = s;
847 INIT_WORK_ONSTACK(&rew.rew_work, wait_rcu_exp_gp);
848 queue_work(rcu_gp_wq, &rew.rew_work);
849 }
850
851
852 rnp = rcu_get_root();
853 wait_event(rnp->exp_wq[rcu_seq_ctr(s) & 0x3],
854 sync_exp_work_done(s));
855 smp_mb();
856
857
858 mutex_unlock(&rcu_state.exp_mutex);
859
860 if (likely(!boottime))
861 destroy_work_on_stack(&rew.rew_work);
862}
863EXPORT_SYMBOL_GPL(synchronize_rcu_expedited);
864