1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26static void rcu_exp_gp_seq_start(struct rcu_state *rsp)
27{
28 rcu_seq_start(&rsp->expedited_sequence);
29}
30
31
32
33
34static void rcu_exp_gp_seq_end(struct rcu_state *rsp)
35{
36 rcu_seq_end(&rsp->expedited_sequence);
37 smp_mb();
38}
39
40
41
42
43static unsigned long rcu_exp_gp_seq_snap(struct rcu_state *rsp)
44{
45 unsigned long s;
46
47 smp_mb();
48 s = rcu_seq_snap(&rsp->expedited_sequence);
49 trace_rcu_exp_grace_period(rsp->name, s, TPS("snap"));
50 return s;
51}
52
53
54
55
56
57
58static bool rcu_exp_gp_seq_done(struct rcu_state *rsp, unsigned long s)
59{
60 return rcu_seq_done(&rsp->expedited_sequence, s);
61}
62
63
64
65
66
67
68
69
70static void sync_exp_reset_tree_hotplug(struct rcu_state *rsp)
71{
72 bool done;
73 unsigned long flags;
74 unsigned long mask;
75 unsigned long oldmask;
76 int ncpus = smp_load_acquire(&rsp->ncpus);
77 struct rcu_node *rnp;
78 struct rcu_node *rnp_up;
79
80
81 if (likely(ncpus == rsp->ncpus_snap))
82 return;
83 rsp->ncpus_snap = ncpus;
84
85
86
87
88
89 rcu_for_each_leaf_node(rsp, rnp) {
90 raw_spin_lock_irqsave_rcu_node(rnp, flags);
91 if (rnp->expmaskinit == rnp->expmaskinitnext) {
92 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
93 continue;
94 }
95
96
97 oldmask = rnp->expmaskinit;
98 rnp->expmaskinit = rnp->expmaskinitnext;
99 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
100
101
102 if (oldmask)
103 continue;
104
105
106 mask = rnp->grpmask;
107 rnp_up = rnp->parent;
108 done = false;
109 while (rnp_up) {
110 raw_spin_lock_irqsave_rcu_node(rnp_up, flags);
111 if (rnp_up->expmaskinit)
112 done = true;
113 rnp_up->expmaskinit |= mask;
114 raw_spin_unlock_irqrestore_rcu_node(rnp_up, flags);
115 if (done)
116 break;
117 mask = rnp_up->grpmask;
118 rnp_up = rnp_up->parent;
119 }
120 }
121}
122
123
124
125
126
127static void __maybe_unused sync_exp_reset_tree(struct rcu_state *rsp)
128{
129 unsigned long flags;
130 struct rcu_node *rnp;
131
132 sync_exp_reset_tree_hotplug(rsp);
133 rcu_for_each_node_breadth_first(rsp, rnp) {
134 raw_spin_lock_irqsave_rcu_node(rnp, flags);
135 WARN_ON_ONCE(rnp->expmask);
136 rnp->expmask = rnp->expmaskinit;
137 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
138 }
139}
140
141
142
143
144
145
146
147
148
149
150static bool sync_rcu_preempt_exp_done(struct rcu_node *rnp)
151{
152 return rnp->exp_tasks == NULL &&
153 READ_ONCE(rnp->expmask) == 0;
154}
155
156
157
158
159
160
161
162
163
164
165
166
167static void __rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp,
168 bool wake, unsigned long flags)
169 __releases(rnp->lock)
170{
171 unsigned long mask;
172
173 for (;;) {
174 if (!sync_rcu_preempt_exp_done(rnp)) {
175 if (!rnp->expmask)
176 rcu_initiate_boost(rnp, flags);
177 else
178 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
179 break;
180 }
181 if (rnp->parent == NULL) {
182 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
183 if (wake) {
184 smp_mb();
185 swake_up(&rsp->expedited_wq);
186 }
187 break;
188 }
189 mask = rnp->grpmask;
190 raw_spin_unlock_rcu_node(rnp);
191 rnp = rnp->parent;
192 raw_spin_lock_rcu_node(rnp);
193 WARN_ON_ONCE(!(rnp->expmask & mask));
194 rnp->expmask &= ~mask;
195 }
196}
197
198
199
200
201
202
203
204static void __maybe_unused rcu_report_exp_rnp(struct rcu_state *rsp,
205 struct rcu_node *rnp, bool wake)
206{
207 unsigned long flags;
208
209 raw_spin_lock_irqsave_rcu_node(rnp, flags);
210 __rcu_report_exp_rnp(rsp, rnp, wake, flags);
211}
212
213
214
215
216
217
218static void rcu_report_exp_cpu_mult(struct rcu_state *rsp, struct rcu_node *rnp,
219 unsigned long mask, bool wake)
220{
221 unsigned long flags;
222
223 raw_spin_lock_irqsave_rcu_node(rnp, flags);
224 if (!(rnp->expmask & mask)) {
225 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
226 return;
227 }
228 rnp->expmask &= ~mask;
229 __rcu_report_exp_rnp(rsp, rnp, wake, flags);
230}
231
232
233
234
235static void rcu_report_exp_rdp(struct rcu_state *rsp, struct rcu_data *rdp,
236 bool wake)
237{
238 rcu_report_exp_cpu_mult(rsp, rdp->mynode, rdp->grpmask, wake);
239}
240
241
242static bool sync_exp_work_done(struct rcu_state *rsp, atomic_long_t *stat,
243 unsigned long s)
244{
245 if (rcu_exp_gp_seq_done(rsp, s)) {
246 trace_rcu_exp_grace_period(rsp->name, s, TPS("done"));
247
248 smp_mb__before_atomic();
249 atomic_long_inc(stat);
250 return true;
251 }
252 return false;
253}
254
255
256
257
258
259
260
261
262static bool exp_funnel_lock(struct rcu_state *rsp, unsigned long s)
263{
264 struct rcu_data *rdp = per_cpu_ptr(rsp->rda, raw_smp_processor_id());
265 struct rcu_node *rnp = rdp->mynode;
266 struct rcu_node *rnp_root = rcu_get_root(rsp);
267
268
269 if (ULONG_CMP_LT(READ_ONCE(rnp->exp_seq_rq), s) &&
270 (rnp == rnp_root ||
271 ULONG_CMP_LT(READ_ONCE(rnp_root->exp_seq_rq), s)) &&
272 mutex_trylock(&rsp->exp_mutex))
273 goto fastpath;
274
275
276
277
278
279
280
281
282 for (; rnp != NULL; rnp = rnp->parent) {
283 if (sync_exp_work_done(rsp, &rdp->exp_workdone1, s))
284 return true;
285
286
287 spin_lock(&rnp->exp_lock);
288 if (ULONG_CMP_GE(rnp->exp_seq_rq, s)) {
289
290
291 spin_unlock(&rnp->exp_lock);
292 trace_rcu_exp_funnel_lock(rsp->name, rnp->level,
293 rnp->grplo, rnp->grphi,
294 TPS("wait"));
295 wait_event(rnp->exp_wq[rcu_seq_ctr(s) & 0x3],
296 sync_exp_work_done(rsp,
297 &rdp->exp_workdone2, s));
298 return true;
299 }
300 rnp->exp_seq_rq = s;
301 spin_unlock(&rnp->exp_lock);
302 trace_rcu_exp_funnel_lock(rsp->name, rnp->level, rnp->grplo,
303 rnp->grphi, TPS("nxtlvl"));
304 }
305 mutex_lock(&rsp->exp_mutex);
306fastpath:
307 if (sync_exp_work_done(rsp, &rdp->exp_workdone3, s)) {
308 mutex_unlock(&rsp->exp_mutex);
309 return true;
310 }
311 rcu_exp_gp_seq_start(rsp);
312 trace_rcu_exp_grace_period(rsp->name, s, TPS("start"));
313 return false;
314}
315
316
317static void sync_sched_exp_handler(void *data)
318{
319 struct rcu_data *rdp;
320 struct rcu_node *rnp;
321 struct rcu_state *rsp = data;
322
323 rdp = this_cpu_ptr(rsp->rda);
324 rnp = rdp->mynode;
325 if (!(READ_ONCE(rnp->expmask) & rdp->grpmask) ||
326 __this_cpu_read(rcu_sched_data.cpu_no_qs.b.exp))
327 return;
328 if (rcu_is_cpu_rrupt_from_idle()) {
329 rcu_report_exp_rdp(&rcu_sched_state,
330 this_cpu_ptr(&rcu_sched_data), true);
331 return;
332 }
333 __this_cpu_write(rcu_sched_data.cpu_no_qs.b.exp, true);
334
335 smp_store_release(this_cpu_ptr(&rcu_dynticks.rcu_urgent_qs), true);
336 resched_cpu(smp_processor_id());
337}
338
339
340static void sync_sched_exp_online_cleanup(int cpu)
341{
342 struct rcu_data *rdp;
343 int ret;
344 struct rcu_node *rnp;
345 struct rcu_state *rsp = &rcu_sched_state;
346
347 rdp = per_cpu_ptr(rsp->rda, cpu);
348 rnp = rdp->mynode;
349 if (!(READ_ONCE(rnp->expmask) & rdp->grpmask))
350 return;
351 ret = smp_call_function_single(cpu, sync_sched_exp_handler, rsp, 0);
352 WARN_ON_ONCE(ret);
353}
354
355
356
357
358
359static void sync_rcu_exp_select_cpus(struct rcu_state *rsp,
360 smp_call_func_t func)
361{
362 int cpu;
363 unsigned long flags;
364 unsigned long mask_ofl_test;
365 unsigned long mask_ofl_ipi;
366 int ret;
367 struct rcu_node *rnp;
368
369 sync_exp_reset_tree(rsp);
370 rcu_for_each_leaf_node(rsp, rnp) {
371 raw_spin_lock_irqsave_rcu_node(rnp, flags);
372
373
374 mask_ofl_test = 0;
375 for_each_leaf_node_possible_cpu(rnp, cpu) {
376 struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
377
378 rdp->exp_dynticks_snap =
379 rcu_dynticks_snap(rdp->dynticks);
380 if (raw_smp_processor_id() == cpu ||
381 rcu_dynticks_in_eqs(rdp->exp_dynticks_snap) ||
382 !(rnp->qsmaskinitnext & rdp->grpmask))
383 mask_ofl_test |= rdp->grpmask;
384 }
385 mask_ofl_ipi = rnp->expmask & ~mask_ofl_test;
386
387
388
389
390
391
392 if (rcu_preempt_has_tasks(rnp))
393 rnp->exp_tasks = rnp->blkd_tasks.next;
394 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
395
396
397 for_each_leaf_node_possible_cpu(rnp, cpu) {
398 unsigned long mask = leaf_node_cpu_bit(rnp, cpu);
399 struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
400
401 if (!(mask_ofl_ipi & mask))
402 continue;
403retry_ipi:
404 if (rcu_dynticks_in_eqs_since(rdp->dynticks,
405 rdp->exp_dynticks_snap)) {
406 mask_ofl_test |= mask;
407 continue;
408 }
409 ret = smp_call_function_single(cpu, func, rsp, 0);
410 if (!ret) {
411 mask_ofl_ipi &= ~mask;
412 continue;
413 }
414
415 raw_spin_lock_irqsave_rcu_node(rnp, flags);
416 if ((rnp->qsmaskinitnext & mask) &&
417 (rnp->expmask & mask)) {
418
419 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
420 schedule_timeout_uninterruptible(1);
421 goto retry_ipi;
422 }
423
424 if (!(rnp->expmask & mask))
425 mask_ofl_ipi &= ~mask;
426 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
427 }
428
429 mask_ofl_test |= mask_ofl_ipi;
430 if (mask_ofl_test)
431 rcu_report_exp_cpu_mult(rsp, rnp, mask_ofl_test, false);
432 }
433}
434
435static void synchronize_sched_expedited_wait(struct rcu_state *rsp)
436{
437 int cpu;
438 unsigned long jiffies_stall;
439 unsigned long jiffies_start;
440 unsigned long mask;
441 int ndetected;
442 struct rcu_node *rnp;
443 struct rcu_node *rnp_root = rcu_get_root(rsp);
444 int ret;
445
446 jiffies_stall = rcu_jiffies_till_stall_check();
447 jiffies_start = jiffies;
448
449 for (;;) {
450 ret = swait_event_timeout(
451 rsp->expedited_wq,
452 sync_rcu_preempt_exp_done(rnp_root),
453 jiffies_stall);
454 if (ret > 0 || sync_rcu_preempt_exp_done(rnp_root))
455 return;
456 WARN_ON(ret < 0);
457 if (rcu_cpu_stall_suppress)
458 continue;
459 panic_on_rcu_stall();
460 pr_err("INFO: %s detected expedited stalls on CPUs/tasks: {",
461 rsp->name);
462 ndetected = 0;
463 rcu_for_each_leaf_node(rsp, rnp) {
464 ndetected += rcu_print_task_exp_stall(rnp);
465 for_each_leaf_node_possible_cpu(rnp, cpu) {
466 struct rcu_data *rdp;
467
468 mask = leaf_node_cpu_bit(rnp, cpu);
469 if (!(rnp->expmask & mask))
470 continue;
471 ndetected++;
472 rdp = per_cpu_ptr(rsp->rda, cpu);
473 pr_cont(" %d-%c%c%c", cpu,
474 "O."[!!cpu_online(cpu)],
475 "o."[!!(rdp->grpmask & rnp->expmaskinit)],
476 "N."[!!(rdp->grpmask & rnp->expmaskinitnext)]);
477 }
478 }
479 pr_cont(" } %lu jiffies s: %lu root: %#lx/%c\n",
480 jiffies - jiffies_start, rsp->expedited_sequence,
481 rnp_root->expmask, ".T"[!!rnp_root->exp_tasks]);
482 if (ndetected) {
483 pr_err("blocking rcu_node structures:");
484 rcu_for_each_node_breadth_first(rsp, rnp) {
485 if (rnp == rnp_root)
486 continue;
487 if (sync_rcu_preempt_exp_done(rnp))
488 continue;
489 pr_cont(" l=%u:%d-%d:%#lx/%c",
490 rnp->level, rnp->grplo, rnp->grphi,
491 rnp->expmask,
492 ".T"[!!rnp->exp_tasks]);
493 }
494 pr_cont("\n");
495 }
496 rcu_for_each_leaf_node(rsp, rnp) {
497 for_each_leaf_node_possible_cpu(rnp, cpu) {
498 mask = leaf_node_cpu_bit(rnp, cpu);
499 if (!(rnp->expmask & mask))
500 continue;
501 dump_cpu_task(cpu);
502 }
503 }
504 jiffies_stall = 3 * rcu_jiffies_till_stall_check() + 3;
505 }
506}
507
508
509
510
511
512
513
514static void rcu_exp_wait_wake(struct rcu_state *rsp, unsigned long s)
515{
516 struct rcu_node *rnp;
517
518 synchronize_sched_expedited_wait(rsp);
519 rcu_exp_gp_seq_end(rsp);
520 trace_rcu_exp_grace_period(rsp->name, s, TPS("end"));
521
522
523
524
525
526 mutex_lock(&rsp->exp_wake_mutex);
527
528 rcu_for_each_node_breadth_first(rsp, rnp) {
529 if (ULONG_CMP_LT(READ_ONCE(rnp->exp_seq_rq), s)) {
530 spin_lock(&rnp->exp_lock);
531
532 if (ULONG_CMP_LT(rnp->exp_seq_rq, s))
533 rnp->exp_seq_rq = s;
534 spin_unlock(&rnp->exp_lock);
535 }
536 smp_mb();
537 wake_up_all(&rnp->exp_wq[rcu_seq_ctr(rsp->expedited_sequence) & 0x3]);
538 }
539 trace_rcu_exp_grace_period(rsp->name, s, TPS("endwake"));
540 mutex_unlock(&rsp->exp_wake_mutex);
541}
542
543
544struct rcu_exp_work {
545 smp_call_func_t rew_func;
546 struct rcu_state *rew_rsp;
547 unsigned long rew_s;
548 struct work_struct rew_work;
549};
550
551
552
553
554
555static void rcu_exp_sel_wait_wake(struct rcu_state *rsp,
556 smp_call_func_t func, unsigned long s)
557{
558
559 sync_rcu_exp_select_cpus(rsp, func);
560
561
562 rcu_exp_wait_wake(rsp, s);
563}
564
565
566
567
568static void wait_rcu_exp_gp(struct work_struct *wp)
569{
570 struct rcu_exp_work *rewp;
571
572 rewp = container_of(wp, struct rcu_exp_work, rew_work);
573 rcu_exp_sel_wait_wake(rewp->rew_rsp, rewp->rew_func, rewp->rew_s);
574}
575
576
577
578
579
580static void _synchronize_rcu_expedited(struct rcu_state *rsp,
581 smp_call_func_t func)
582{
583 struct rcu_data *rdp;
584 struct rcu_exp_work rew;
585 struct rcu_node *rnp;
586 unsigned long s;
587
588
589 if (rcu_gp_is_normal()) {
590 wait_rcu_gp(rsp->call);
591 return;
592 }
593
594
595 s = rcu_exp_gp_seq_snap(rsp);
596 if (exp_funnel_lock(rsp, s))
597 return;
598
599
600 if (unlikely(rcu_scheduler_active == RCU_SCHEDULER_INIT)) {
601
602 rcu_exp_sel_wait_wake(rsp, func, s);
603 } else {
604
605 rew.rew_func = func;
606 rew.rew_rsp = rsp;
607 rew.rew_s = s;
608 INIT_WORK_ONSTACK(&rew.rew_work, wait_rcu_exp_gp);
609 schedule_work(&rew.rew_work);
610 }
611
612
613 rdp = per_cpu_ptr(rsp->rda, raw_smp_processor_id());
614 rnp = rcu_get_root(rsp);
615 wait_event(rnp->exp_wq[rcu_seq_ctr(s) & 0x3],
616 sync_exp_work_done(rsp, &rdp->exp_workdone0, s));
617 smp_mb();
618
619
620 mutex_unlock(&rsp->exp_mutex);
621}
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639void synchronize_sched_expedited(void)
640{
641 struct rcu_state *rsp = &rcu_sched_state;
642
643 RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map) ||
644 lock_is_held(&rcu_lock_map) ||
645 lock_is_held(&rcu_sched_lock_map),
646 "Illegal synchronize_sched_expedited() in RCU read-side critical section");
647
648
649 if (rcu_blocking_is_gp())
650 return;
651
652 _synchronize_rcu_expedited(rsp, sync_sched_exp_handler);
653}
654EXPORT_SYMBOL_GPL(synchronize_sched_expedited);
655
656#ifdef CONFIG_PREEMPT_RCU
657
658
659
660
661
662
663
664
665static void sync_rcu_exp_handler(void *info)
666{
667 struct rcu_data *rdp;
668 struct rcu_state *rsp = info;
669 struct task_struct *t = current;
670
671
672
673
674
675
676
677 if (t->rcu_read_lock_nesting > 0 &&
678 !t->rcu_read_unlock_special.b.blocked) {
679 t->rcu_read_unlock_special.b.exp_need_qs = true;
680 return;
681 }
682
683
684
685
686
687
688
689
690
691 rdp = this_cpu_ptr(rsp->rda);
692 rcu_report_exp_rdp(rsp, rdp, true);
693}
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713void synchronize_rcu_expedited(void)
714{
715 struct rcu_state *rsp = rcu_state_p;
716
717 RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map) ||
718 lock_is_held(&rcu_lock_map) ||
719 lock_is_held(&rcu_sched_lock_map),
720 "Illegal synchronize_rcu_expedited() in RCU read-side critical section");
721
722 if (rcu_scheduler_active == RCU_SCHEDULER_INACTIVE)
723 return;
724 _synchronize_rcu_expedited(rsp, sync_rcu_exp_handler);
725}
726EXPORT_SYMBOL_GPL(synchronize_rcu_expedited);
727
728#else
729
730
731
732
733
734void synchronize_rcu_expedited(void)
735{
736 synchronize_sched_expedited();
737}
738EXPORT_SYMBOL_GPL(synchronize_rcu_expedited);
739
740#endif
741