1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24static void rcu_exp_gp_seq_start(struct rcu_state *rsp)
25{
26 rcu_seq_start(&rsp->expedited_sequence);
27}
28static void rcu_exp_gp_seq_end(struct rcu_state *rsp)
29{
30 rcu_seq_end(&rsp->expedited_sequence);
31 smp_mb();
32}
33static unsigned long rcu_exp_gp_seq_snap(struct rcu_state *rsp)
34{
35 unsigned long s;
36
37 smp_mb();
38 s = rcu_seq_snap(&rsp->expedited_sequence);
39 trace_rcu_exp_grace_period(rsp->name, s, TPS("snap"));
40 return s;
41}
42static bool rcu_exp_gp_seq_done(struct rcu_state *rsp, unsigned long s)
43{
44 return rcu_seq_done(&rsp->expedited_sequence, s);
45}
46
47
48
49
50
51
52
53
54static void sync_exp_reset_tree_hotplug(struct rcu_state *rsp)
55{
56 bool done;
57 unsigned long flags;
58 unsigned long mask;
59 unsigned long oldmask;
60 int ncpus = READ_ONCE(rsp->ncpus);
61 struct rcu_node *rnp;
62 struct rcu_node *rnp_up;
63
64
65 if (likely(ncpus == rsp->ncpus_snap))
66 return;
67 rsp->ncpus_snap = ncpus;
68
69
70
71
72
73 rcu_for_each_leaf_node(rsp, rnp) {
74 raw_spin_lock_irqsave_rcu_node(rnp, flags);
75 if (rnp->expmaskinit == rnp->expmaskinitnext) {
76 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
77 continue;
78 }
79
80
81 oldmask = rnp->expmaskinit;
82 rnp->expmaskinit = rnp->expmaskinitnext;
83 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
84
85
86 if (oldmask)
87 continue;
88
89
90 mask = rnp->grpmask;
91 rnp_up = rnp->parent;
92 done = false;
93 while (rnp_up) {
94 raw_spin_lock_irqsave_rcu_node(rnp_up, flags);
95 if (rnp_up->expmaskinit)
96 done = true;
97 rnp_up->expmaskinit |= mask;
98 raw_spin_unlock_irqrestore_rcu_node(rnp_up, flags);
99 if (done)
100 break;
101 mask = rnp_up->grpmask;
102 rnp_up = rnp_up->parent;
103 }
104 }
105}
106
107
108
109
110
111static void __maybe_unused sync_exp_reset_tree(struct rcu_state *rsp)
112{
113 unsigned long flags;
114 struct rcu_node *rnp;
115
116 sync_exp_reset_tree_hotplug(rsp);
117 rcu_for_each_node_breadth_first(rsp, rnp) {
118 raw_spin_lock_irqsave_rcu_node(rnp, flags);
119 WARN_ON_ONCE(rnp->expmask);
120 rnp->expmask = rnp->expmaskinit;
121 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
122 }
123}
124
125
126
127
128
129
130
131
132
133
134static int sync_rcu_preempt_exp_done(struct rcu_node *rnp)
135{
136 return rnp->exp_tasks == NULL &&
137 READ_ONCE(rnp->expmask) == 0;
138}
139
140
141
142
143
144
145
146
147
148
149
150
151static void __rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp,
152 bool wake, unsigned long flags)
153 __releases(rnp->lock)
154{
155 unsigned long mask;
156
157 for (;;) {
158 if (!sync_rcu_preempt_exp_done(rnp)) {
159 if (!rnp->expmask)
160 rcu_initiate_boost(rnp, flags);
161 else
162 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
163 break;
164 }
165 if (rnp->parent == NULL) {
166 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
167 if (wake) {
168 smp_mb();
169 swake_up(&rsp->expedited_wq);
170 }
171 break;
172 }
173 mask = rnp->grpmask;
174 raw_spin_unlock_rcu_node(rnp);
175 rnp = rnp->parent;
176 raw_spin_lock_rcu_node(rnp);
177 WARN_ON_ONCE(!(rnp->expmask & mask));
178 rnp->expmask &= ~mask;
179 }
180}
181
182
183
184
185
186
187
188static void __maybe_unused rcu_report_exp_rnp(struct rcu_state *rsp,
189 struct rcu_node *rnp, bool wake)
190{
191 unsigned long flags;
192
193 raw_spin_lock_irqsave_rcu_node(rnp, flags);
194 __rcu_report_exp_rnp(rsp, rnp, wake, flags);
195}
196
197
198
199
200
201
202static void rcu_report_exp_cpu_mult(struct rcu_state *rsp, struct rcu_node *rnp,
203 unsigned long mask, bool wake)
204{
205 unsigned long flags;
206
207 raw_spin_lock_irqsave_rcu_node(rnp, flags);
208 if (!(rnp->expmask & mask)) {
209 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
210 return;
211 }
212 rnp->expmask &= ~mask;
213 __rcu_report_exp_rnp(rsp, rnp, wake, flags);
214}
215
216
217
218
219static void rcu_report_exp_rdp(struct rcu_state *rsp, struct rcu_data *rdp,
220 bool wake)
221{
222 rcu_report_exp_cpu_mult(rsp, rdp->mynode, rdp->grpmask, wake);
223}
224
225
226static bool sync_exp_work_done(struct rcu_state *rsp, atomic_long_t *stat,
227 unsigned long s)
228{
229 if (rcu_exp_gp_seq_done(rsp, s)) {
230 trace_rcu_exp_grace_period(rsp->name, s, TPS("done"));
231
232 smp_mb__before_atomic();
233 atomic_long_inc(stat);
234 return true;
235 }
236 return false;
237}
238
239
240
241
242
243
244
245
246static bool exp_funnel_lock(struct rcu_state *rsp, unsigned long s)
247{
248 struct rcu_data *rdp = per_cpu_ptr(rsp->rda, raw_smp_processor_id());
249 struct rcu_node *rnp = rdp->mynode;
250 struct rcu_node *rnp_root = rcu_get_root(rsp);
251
252
253 if (ULONG_CMP_LT(READ_ONCE(rnp->exp_seq_rq), s) &&
254 (rnp == rnp_root ||
255 ULONG_CMP_LT(READ_ONCE(rnp_root->exp_seq_rq), s)) &&
256 mutex_trylock(&rsp->exp_mutex))
257 goto fastpath;
258
259
260
261
262
263
264
265
266 for (; rnp != NULL; rnp = rnp->parent) {
267 if (sync_exp_work_done(rsp, &rdp->exp_workdone1, s))
268 return true;
269
270
271 spin_lock(&rnp->exp_lock);
272 if (ULONG_CMP_GE(rnp->exp_seq_rq, s)) {
273
274
275 spin_unlock(&rnp->exp_lock);
276 trace_rcu_exp_funnel_lock(rsp->name, rnp->level,
277 rnp->grplo, rnp->grphi,
278 TPS("wait"));
279 wait_event(rnp->exp_wq[(s >> 1) & 0x3],
280 sync_exp_work_done(rsp,
281 &rdp->exp_workdone2, s));
282 return true;
283 }
284 rnp->exp_seq_rq = s;
285 spin_unlock(&rnp->exp_lock);
286 trace_rcu_exp_funnel_lock(rsp->name, rnp->level, rnp->grplo,
287 rnp->grphi, TPS("nxtlvl"));
288 }
289 mutex_lock(&rsp->exp_mutex);
290fastpath:
291 if (sync_exp_work_done(rsp, &rdp->exp_workdone3, s)) {
292 mutex_unlock(&rsp->exp_mutex);
293 return true;
294 }
295 rcu_exp_gp_seq_start(rsp);
296 trace_rcu_exp_grace_period(rsp->name, s, TPS("start"));
297 return false;
298}
299
300
301static void sync_sched_exp_handler(void *data)
302{
303 struct rcu_data *rdp;
304 struct rcu_node *rnp;
305 struct rcu_state *rsp = data;
306
307 rdp = this_cpu_ptr(rsp->rda);
308 rnp = rdp->mynode;
309 if (!(READ_ONCE(rnp->expmask) & rdp->grpmask) ||
310 __this_cpu_read(rcu_sched_data.cpu_no_qs.b.exp))
311 return;
312 if (rcu_is_cpu_rrupt_from_idle()) {
313 rcu_report_exp_rdp(&rcu_sched_state,
314 this_cpu_ptr(&rcu_sched_data), true);
315 return;
316 }
317 __this_cpu_write(rcu_sched_data.cpu_no_qs.b.exp, true);
318 resched_cpu(smp_processor_id());
319}
320
321
322static void sync_sched_exp_online_cleanup(int cpu)
323{
324 struct rcu_data *rdp;
325 int ret;
326 struct rcu_node *rnp;
327 struct rcu_state *rsp = &rcu_sched_state;
328
329 rdp = per_cpu_ptr(rsp->rda, cpu);
330 rnp = rdp->mynode;
331 if (!(READ_ONCE(rnp->expmask) & rdp->grpmask))
332 return;
333 ret = smp_call_function_single(cpu, sync_sched_exp_handler, rsp, 0);
334 WARN_ON_ONCE(ret);
335}
336
337
338
339
340
341static void sync_rcu_exp_select_cpus(struct rcu_state *rsp,
342 smp_call_func_t func)
343{
344 int cpu;
345 unsigned long flags;
346 unsigned long mask_ofl_test;
347 unsigned long mask_ofl_ipi;
348 int ret;
349 struct rcu_node *rnp;
350
351 sync_exp_reset_tree(rsp);
352 rcu_for_each_leaf_node(rsp, rnp) {
353 raw_spin_lock_irqsave_rcu_node(rnp, flags);
354
355
356 mask_ofl_test = 0;
357 for_each_leaf_node_possible_cpu(rnp, cpu) {
358 struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
359 struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu);
360
361 if (raw_smp_processor_id() == cpu ||
362 !(atomic_add_return(0, &rdtp->dynticks) & 0x1))
363 mask_ofl_test |= rdp->grpmask;
364 }
365 mask_ofl_ipi = rnp->expmask & ~mask_ofl_test;
366
367
368
369
370
371
372 if (rcu_preempt_has_tasks(rnp))
373 rnp->exp_tasks = rnp->blkd_tasks.next;
374 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
375
376
377 for_each_leaf_node_possible_cpu(rnp, cpu) {
378 unsigned long mask = leaf_node_cpu_bit(rnp, cpu);
379 if (!(mask_ofl_ipi & mask))
380 continue;
381retry_ipi:
382 ret = smp_call_function_single(cpu, func, rsp, 0);
383 if (!ret) {
384 mask_ofl_ipi &= ~mask;
385 continue;
386 }
387
388 raw_spin_lock_irqsave_rcu_node(rnp, flags);
389 if (cpu_online(cpu) &&
390 (rnp->expmask & mask)) {
391 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
392 schedule_timeout_uninterruptible(1);
393 if (cpu_online(cpu) &&
394 (rnp->expmask & mask))
395 goto retry_ipi;
396 raw_spin_lock_irqsave_rcu_node(rnp, flags);
397 }
398 if (!(rnp->expmask & mask))
399 mask_ofl_ipi &= ~mask;
400 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
401 }
402
403 mask_ofl_test |= mask_ofl_ipi;
404 if (mask_ofl_test)
405 rcu_report_exp_cpu_mult(rsp, rnp, mask_ofl_test, false);
406 }
407}
408
409static void synchronize_sched_expedited_wait(struct rcu_state *rsp)
410{
411 int cpu;
412 unsigned long jiffies_stall;
413 unsigned long jiffies_start;
414 unsigned long mask;
415 int ndetected;
416 struct rcu_node *rnp;
417 struct rcu_node *rnp_root = rcu_get_root(rsp);
418 int ret;
419
420 jiffies_stall = rcu_jiffies_till_stall_check();
421 jiffies_start = jiffies;
422
423 for (;;) {
424 ret = swait_event_timeout(
425 rsp->expedited_wq,
426 sync_rcu_preempt_exp_done(rnp_root),
427 jiffies_stall);
428 if (ret > 0 || sync_rcu_preempt_exp_done(rnp_root))
429 return;
430 if (ret < 0) {
431
432 swait_event(rsp->expedited_wq,
433 sync_rcu_preempt_exp_done(rnp_root));
434 return;
435 }
436 pr_err("INFO: %s detected expedited stalls on CPUs/tasks: {",
437 rsp->name);
438 ndetected = 0;
439 rcu_for_each_leaf_node(rsp, rnp) {
440 ndetected += rcu_print_task_exp_stall(rnp);
441 for_each_leaf_node_possible_cpu(rnp, cpu) {
442 struct rcu_data *rdp;
443
444 mask = leaf_node_cpu_bit(rnp, cpu);
445 if (!(rnp->expmask & mask))
446 continue;
447 ndetected++;
448 rdp = per_cpu_ptr(rsp->rda, cpu);
449 pr_cont(" %d-%c%c%c", cpu,
450 "O."[!!cpu_online(cpu)],
451 "o."[!!(rdp->grpmask & rnp->expmaskinit)],
452 "N."[!!(rdp->grpmask & rnp->expmaskinitnext)]);
453 }
454 }
455 pr_cont(" } %lu jiffies s: %lu root: %#lx/%c\n",
456 jiffies - jiffies_start, rsp->expedited_sequence,
457 rnp_root->expmask, ".T"[!!rnp_root->exp_tasks]);
458 if (ndetected) {
459 pr_err("blocking rcu_node structures:");
460 rcu_for_each_node_breadth_first(rsp, rnp) {
461 if (rnp == rnp_root)
462 continue;
463 if (sync_rcu_preempt_exp_done(rnp))
464 continue;
465 pr_cont(" l=%u:%d-%d:%#lx/%c",
466 rnp->level, rnp->grplo, rnp->grphi,
467 rnp->expmask,
468 ".T"[!!rnp->exp_tasks]);
469 }
470 pr_cont("\n");
471 }
472 rcu_for_each_leaf_node(rsp, rnp) {
473 for_each_leaf_node_possible_cpu(rnp, cpu) {
474 mask = leaf_node_cpu_bit(rnp, cpu);
475 if (!(rnp->expmask & mask))
476 continue;
477 dump_cpu_task(cpu);
478 }
479 }
480 jiffies_stall = 3 * rcu_jiffies_till_stall_check() + 3;
481 }
482}
483
484
485
486
487
488
489
490static void rcu_exp_wait_wake(struct rcu_state *rsp, unsigned long s)
491{
492 struct rcu_node *rnp;
493
494 synchronize_sched_expedited_wait(rsp);
495 rcu_exp_gp_seq_end(rsp);
496 trace_rcu_exp_grace_period(rsp->name, s, TPS("end"));
497
498
499
500
501
502 mutex_lock(&rsp->exp_wake_mutex);
503 mutex_unlock(&rsp->exp_mutex);
504
505 rcu_for_each_node_breadth_first(rsp, rnp) {
506 if (ULONG_CMP_LT(READ_ONCE(rnp->exp_seq_rq), s)) {
507 spin_lock(&rnp->exp_lock);
508
509 if (ULONG_CMP_LT(rnp->exp_seq_rq, s))
510 rnp->exp_seq_rq = s;
511 spin_unlock(&rnp->exp_lock);
512 }
513 wake_up_all(&rnp->exp_wq[(rsp->expedited_sequence >> 1) & 0x3]);
514 }
515 trace_rcu_exp_grace_period(rsp->name, s, TPS("endwake"));
516 mutex_unlock(&rsp->exp_wake_mutex);
517}
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535void synchronize_sched_expedited(void)
536{
537 unsigned long s;
538 struct rcu_state *rsp = &rcu_sched_state;
539
540
541 if (rcu_blocking_is_gp())
542 return;
543
544
545 if (rcu_gp_is_normal()) {
546 wait_rcu_gp(call_rcu_sched);
547 return;
548 }
549
550
551 s = rcu_exp_gp_seq_snap(rsp);
552 if (exp_funnel_lock(rsp, s))
553 return;
554
555
556 sync_rcu_exp_select_cpus(rsp, sync_sched_exp_handler);
557
558
559 rcu_exp_wait_wake(rsp, s);
560}
561EXPORT_SYMBOL_GPL(synchronize_sched_expedited);
562
563#ifdef CONFIG_PREEMPT_RCU
564
565
566
567
568
569
570
571
572static void sync_rcu_exp_handler(void *info)
573{
574 struct rcu_data *rdp;
575 struct rcu_state *rsp = info;
576 struct task_struct *t = current;
577
578
579
580
581
582
583
584 if (t->rcu_read_lock_nesting > 0 &&
585 !t->rcu_read_unlock_special.b.blocked) {
586 t->rcu_read_unlock_special.b.exp_need_qs = true;
587 return;
588 }
589
590
591
592
593
594
595
596
597
598 rdp = this_cpu_ptr(rsp->rda);
599 rcu_report_exp_rdp(rsp, rdp, true);
600}
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620void synchronize_rcu_expedited(void)
621{
622 struct rcu_state *rsp = rcu_state_p;
623 unsigned long s;
624
625
626 if (rcu_gp_is_normal()) {
627 wait_rcu_gp(call_rcu);
628 return;
629 }
630
631 s = rcu_exp_gp_seq_snap(rsp);
632 if (exp_funnel_lock(rsp, s))
633 return;
634
635
636 sync_rcu_exp_select_cpus(rsp, sync_rcu_exp_handler);
637
638
639 rcu_exp_wait_wake(rsp, s);
640}
641EXPORT_SYMBOL_GPL(synchronize_rcu_expedited);
642
643#else
644
645
646
647
648
649void synchronize_rcu_expedited(void)
650{
651 synchronize_sched_expedited();
652}
653EXPORT_SYMBOL_GPL(synchronize_rcu_expedited);
654
655#endif
656