1
2
3
4
5
6
7
8
9
10
11
12
13
14
15int sysctl_panic_on_rcu_stall __read_mostly;
16int sysctl_max_rcu_stall_to_panic __read_mostly;
17
18#ifdef CONFIG_PROVE_RCU
19#define RCU_STALL_DELAY_DELTA (5 * HZ)
20#else
21#define RCU_STALL_DELAY_DELTA 0
22#endif
23#define RCU_STALL_MIGHT_DIV 8
24#define RCU_STALL_MIGHT_MIN (2 * HZ)
25
26
27int rcu_jiffies_till_stall_check(void)
28{
29 int till_stall_check = READ_ONCE(rcu_cpu_stall_timeout);
30
31
32
33
34
35 if (till_stall_check < 3) {
36 WRITE_ONCE(rcu_cpu_stall_timeout, 3);
37 till_stall_check = 3;
38 } else if (till_stall_check > 300) {
39 WRITE_ONCE(rcu_cpu_stall_timeout, 300);
40 till_stall_check = 300;
41 }
42 return till_stall_check * HZ + RCU_STALL_DELAY_DELTA;
43}
44EXPORT_SYMBOL_GPL(rcu_jiffies_till_stall_check);
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59bool rcu_gp_might_be_stalled(void)
60{
61 unsigned long d = rcu_jiffies_till_stall_check() / RCU_STALL_MIGHT_DIV;
62 unsigned long j = jiffies;
63
64 if (d < RCU_STALL_MIGHT_MIN)
65 d = RCU_STALL_MIGHT_MIN;
66 smp_mb();
67 if (!rcu_gp_in_progress())
68 return false;
69
70
71 smp_mb();
72
73 return !time_before(j, READ_ONCE(rcu_state.gp_start) + d);
74}
75
76
77void rcu_sysrq_start(void)
78{
79 if (!rcu_cpu_stall_suppress)
80 rcu_cpu_stall_suppress = 2;
81}
82
83void rcu_sysrq_end(void)
84{
85 if (rcu_cpu_stall_suppress == 2)
86 rcu_cpu_stall_suppress = 0;
87}
88
89
90static int rcu_panic(struct notifier_block *this, unsigned long ev, void *ptr)
91{
92 rcu_cpu_stall_suppress = 1;
93 return NOTIFY_DONE;
94}
95
96static struct notifier_block rcu_panic_block = {
97 .notifier_call = rcu_panic,
98};
99
100static int __init check_cpu_stall_init(void)
101{
102 atomic_notifier_chain_register(&panic_notifier_list, &rcu_panic_block);
103 return 0;
104}
105early_initcall(check_cpu_stall_init);
106
107
108static void panic_on_rcu_stall(void)
109{
110 static int cpu_stall;
111
112 if (++cpu_stall < sysctl_max_rcu_stall_to_panic)
113 return;
114
115 if (sysctl_panic_on_rcu_stall)
116 panic("RCU Stall\n");
117}
118
119
120
121
122
123
124
125
126
127
128void rcu_cpu_stall_reset(void)
129{
130 WRITE_ONCE(rcu_state.jiffies_stall, jiffies + ULONG_MAX / 2);
131}
132
133
134
135
136
137
138static void record_gp_stall_check_time(void)
139{
140 unsigned long j = jiffies;
141 unsigned long j1;
142
143 WRITE_ONCE(rcu_state.gp_start, j);
144 j1 = rcu_jiffies_till_stall_check();
145 smp_mb();
146 WRITE_ONCE(rcu_state.jiffies_stall, j + j1);
147 rcu_state.jiffies_resched = j + j1 / 2;
148 rcu_state.n_force_qs_gpstart = READ_ONCE(rcu_state.n_force_qs);
149}
150
151
152static void zero_cpu_stall_ticks(struct rcu_data *rdp)
153{
154 rdp->ticks_this_gp = 0;
155 rdp->softirq_snap = kstat_softirqs_cpu(RCU_SOFTIRQ, smp_processor_id());
156 WRITE_ONCE(rdp->last_fqs_resched, jiffies);
157}
158
159
160
161
162
163static void rcu_stall_kick_kthreads(void)
164{
165 unsigned long j;
166
167 if (!READ_ONCE(rcu_kick_kthreads))
168 return;
169 j = READ_ONCE(rcu_state.jiffies_kick_kthreads);
170 if (time_after(jiffies, j) && rcu_state.gp_kthread &&
171 (rcu_gp_in_progress() || READ_ONCE(rcu_state.gp_flags))) {
172 WARN_ONCE(1, "Kicking %s grace-period kthread\n",
173 rcu_state.name);
174 rcu_ftrace_dump(DUMP_ALL);
175 wake_up_process(rcu_state.gp_kthread);
176 WRITE_ONCE(rcu_state.jiffies_kick_kthreads, j + HZ);
177 }
178}
179
180
181
182
183
184
185static void rcu_iw_handler(struct irq_work *iwp)
186{
187 struct rcu_data *rdp;
188 struct rcu_node *rnp;
189
190 rdp = container_of(iwp, struct rcu_data, rcu_iw);
191 rnp = rdp->mynode;
192 raw_spin_lock_rcu_node(rnp);
193 if (!WARN_ON_ONCE(!rdp->rcu_iw_pending)) {
194 rdp->rcu_iw_gp_seq = rnp->gp_seq;
195 rdp->rcu_iw_pending = false;
196 }
197 raw_spin_unlock_rcu_node(rnp);
198}
199
200
201
202
203
204#ifdef CONFIG_PREEMPT_RCU
205
206
207
208
209
210static void rcu_print_detail_task_stall_rnp(struct rcu_node *rnp)
211{
212 unsigned long flags;
213 struct task_struct *t;
214
215 raw_spin_lock_irqsave_rcu_node(rnp, flags);
216 if (!rcu_preempt_blocked_readers_cgp(rnp)) {
217 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
218 return;
219 }
220 t = list_entry(rnp->gp_tasks->prev,
221 struct task_struct, rcu_node_entry);
222 list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry) {
223
224
225
226
227 touch_nmi_watchdog();
228 sched_show_task(t);
229 }
230 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
231}
232
233
234struct rcu_stall_chk_rdr {
235 int nesting;
236 union rcu_special rs;
237 bool on_blkd_list;
238};
239
240
241
242
243
244static bool check_slow_task(struct task_struct *t, void *arg)
245{
246 struct rcu_stall_chk_rdr *rscrp = arg;
247
248 if (task_curr(t))
249 return false;
250 rscrp->nesting = t->rcu_read_lock_nesting;
251 rscrp->rs = t->rcu_read_unlock_special;
252 rscrp->on_blkd_list = !list_empty(&t->rcu_node_entry);
253 return true;
254}
255
256
257
258
259
260static int rcu_print_task_stall(struct rcu_node *rnp, unsigned long flags)
261 __releases(rnp->lock)
262{
263 int i = 0;
264 int ndetected = 0;
265 struct rcu_stall_chk_rdr rscr;
266 struct task_struct *t;
267 struct task_struct *ts[8];
268
269 lockdep_assert_irqs_disabled();
270 if (!rcu_preempt_blocked_readers_cgp(rnp))
271 return 0;
272 pr_err("\tTasks blocked on level-%d rcu_node (CPUs %d-%d):",
273 rnp->level, rnp->grplo, rnp->grphi);
274 t = list_entry(rnp->gp_tasks->prev,
275 struct task_struct, rcu_node_entry);
276 list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry) {
277 get_task_struct(t);
278 ts[i++] = t;
279 if (i >= ARRAY_SIZE(ts))
280 break;
281 }
282 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
283 for (i--; i; i--) {
284 t = ts[i];
285 if (!try_invoke_on_locked_down_task(t, check_slow_task, &rscr))
286 pr_cont(" P%d", t->pid);
287 else
288 pr_cont(" P%d/%d:%c%c%c%c",
289 t->pid, rscr.nesting,
290 ".b"[rscr.rs.b.blocked],
291 ".q"[rscr.rs.b.need_qs],
292 ".e"[rscr.rs.b.exp_hint],
293 ".l"[rscr.on_blkd_list]);
294 lockdep_assert_irqs_disabled();
295 put_task_struct(t);
296 ndetected++;
297 }
298 pr_cont("\n");
299 return ndetected;
300}
301
302#else
303
304
305
306
307
308static void rcu_print_detail_task_stall_rnp(struct rcu_node *rnp)
309{
310}
311
312
313
314
315
316static int rcu_print_task_stall(struct rcu_node *rnp, unsigned long flags)
317{
318 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
319 return 0;
320}
321#endif
322
323
324
325
326
327
328
329static void rcu_dump_cpu_stacks(void)
330{
331 int cpu;
332 unsigned long flags;
333 struct rcu_node *rnp;
334
335 rcu_for_each_leaf_node(rnp) {
336 raw_spin_lock_irqsave_rcu_node(rnp, flags);
337 for_each_leaf_node_possible_cpu(rnp, cpu)
338 if (rnp->qsmask & leaf_node_cpu_bit(rnp, cpu)) {
339 if (cpu_is_offline(cpu))
340 pr_err("Offline CPU %d blocking current GP.\n", cpu);
341 else if (!trigger_single_cpu_backtrace(cpu))
342 dump_cpu_task(cpu);
343 }
344 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
345 }
346}
347
348#ifdef CONFIG_RCU_FAST_NO_HZ
349
350static void print_cpu_stall_fast_no_hz(char *cp, int cpu)
351{
352 struct rcu_data *rdp = &per_cpu(rcu_data, cpu);
353
354 sprintf(cp, "last_accelerate: %04lx/%04lx dyntick_enabled: %d",
355 rdp->last_accelerate & 0xffff, jiffies & 0xffff,
356 !!rdp->tick_nohz_enabled_snap);
357}
358
359#else
360
361static void print_cpu_stall_fast_no_hz(char *cp, int cpu)
362{
363 *cp = '\0';
364}
365
366#endif
367
368static const char * const gp_state_names[] = {
369 [RCU_GP_IDLE] = "RCU_GP_IDLE",
370 [RCU_GP_WAIT_GPS] = "RCU_GP_WAIT_GPS",
371 [RCU_GP_DONE_GPS] = "RCU_GP_DONE_GPS",
372 [RCU_GP_ONOFF] = "RCU_GP_ONOFF",
373 [RCU_GP_INIT] = "RCU_GP_INIT",
374 [RCU_GP_WAIT_FQS] = "RCU_GP_WAIT_FQS",
375 [RCU_GP_DOING_FQS] = "RCU_GP_DOING_FQS",
376 [RCU_GP_CLEANUP] = "RCU_GP_CLEANUP",
377 [RCU_GP_CLEANED] = "RCU_GP_CLEANED",
378};
379
380
381
382
383static const char *gp_state_getname(short gs)
384{
385 if (gs < 0 || gs >= ARRAY_SIZE(gp_state_names))
386 return "???";
387 return gp_state_names[gs];
388}
389
390
391static bool rcu_is_gp_kthread_starving(unsigned long *jp)
392{
393 unsigned long j = jiffies - READ_ONCE(rcu_state.gp_activity);
394
395 if (jp)
396 *jp = j;
397 return j > 2 * HZ;
398}
399
400
401
402
403
404
405
406
407
408
409
410
411static void print_cpu_stall_info(int cpu)
412{
413 unsigned long delta;
414 bool falsepositive;
415 char fast_no_hz[72];
416 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
417 char *ticks_title;
418 unsigned long ticks_value;
419
420
421
422
423
424 touch_nmi_watchdog();
425
426 ticks_value = rcu_seq_ctr(rcu_state.gp_seq - rdp->gp_seq);
427 if (ticks_value) {
428 ticks_title = "GPs behind";
429 } else {
430 ticks_title = "ticks this GP";
431 ticks_value = rdp->ticks_this_gp;
432 }
433 print_cpu_stall_fast_no_hz(fast_no_hz, cpu);
434 delta = rcu_seq_ctr(rdp->mynode->gp_seq - rdp->rcu_iw_gp_seq);
435 falsepositive = rcu_is_gp_kthread_starving(NULL) &&
436 rcu_dynticks_in_eqs(rcu_dynticks_snap(rdp));
437 pr_err("\t%d-%c%c%c%c: (%lu %s) idle=%03x/%ld/%#lx softirq=%u/%u fqs=%ld %s%s\n",
438 cpu,
439 "O."[!!cpu_online(cpu)],
440 "o."[!!(rdp->grpmask & rdp->mynode->qsmaskinit)],
441 "N."[!!(rdp->grpmask & rdp->mynode->qsmaskinitnext)],
442 !IS_ENABLED(CONFIG_IRQ_WORK) ? '?' :
443 rdp->rcu_iw_pending ? (int)min(delta, 9UL) + '0' :
444 "!."[!delta],
445 ticks_value, ticks_title,
446 rcu_dynticks_snap(rdp) & 0xfff,
447 rdp->dynticks_nesting, rdp->dynticks_nmi_nesting,
448 rdp->softirq_snap, kstat_softirqs_cpu(RCU_SOFTIRQ, cpu),
449 data_race(rcu_state.n_force_qs) - rcu_state.n_force_qs_gpstart,
450 fast_no_hz,
451 falsepositive ? " (false positive?)" : "");
452}
453
454
455static void rcu_check_gp_kthread_starvation(void)
456{
457 int cpu;
458 struct task_struct *gpk = rcu_state.gp_kthread;
459 unsigned long j;
460
461 if (rcu_is_gp_kthread_starving(&j)) {
462 cpu = gpk ? task_cpu(gpk) : -1;
463 pr_err("%s kthread starved for %ld jiffies! g%ld f%#x %s(%d) ->state=%#lx ->cpu=%d\n",
464 rcu_state.name, j,
465 (long)rcu_seq_current(&rcu_state.gp_seq),
466 data_race(rcu_state.gp_flags),
467 gp_state_getname(rcu_state.gp_state), rcu_state.gp_state,
468 gpk ? gpk->state : ~0, cpu);
469 if (gpk) {
470 pr_err("\tUnless %s kthread gets sufficient CPU time, OOM is now expected behavior.\n", rcu_state.name);
471 pr_err("RCU grace-period kthread stack dump:\n");
472 sched_show_task(gpk);
473 if (cpu >= 0) {
474 if (cpu_is_offline(cpu)) {
475 pr_err("RCU GP kthread last ran on offline CPU %d.\n", cpu);
476 } else {
477 pr_err("Stack dump where RCU GP kthread last ran:\n");
478 if (!trigger_single_cpu_backtrace(cpu))
479 dump_cpu_task(cpu);
480 }
481 }
482 wake_up_process(gpk);
483 }
484 }
485}
486
487
488static void rcu_check_gp_kthread_expired_fqs_timer(void)
489{
490 struct task_struct *gpk = rcu_state.gp_kthread;
491 short gp_state;
492 unsigned long jiffies_fqs;
493 int cpu;
494
495
496
497
498
499 gp_state = smp_load_acquire(&rcu_state.gp_state);
500 jiffies_fqs = READ_ONCE(rcu_state.jiffies_force_qs);
501
502 if (gp_state == RCU_GP_WAIT_FQS &&
503 time_after(jiffies, jiffies_fqs + RCU_STALL_MIGHT_MIN) &&
504 gpk && !READ_ONCE(gpk->on_rq)) {
505 cpu = task_cpu(gpk);
506 pr_err("%s kthread timer wakeup didn't happen for %ld jiffies! g%ld f%#x %s(%d) ->state=%#lx\n",
507 rcu_state.name, (jiffies - jiffies_fqs),
508 (long)rcu_seq_current(&rcu_state.gp_seq),
509 data_race(rcu_state.gp_flags),
510 gp_state_getname(RCU_GP_WAIT_FQS), RCU_GP_WAIT_FQS,
511 gpk->state);
512 pr_err("\tPossible timer handling issue on cpu=%d timer-softirq=%u\n",
513 cpu, kstat_softirqs_cpu(TIMER_SOFTIRQ, cpu));
514 }
515}
516
517static void print_other_cpu_stall(unsigned long gp_seq, unsigned long gps)
518{
519 int cpu;
520 unsigned long flags;
521 unsigned long gpa;
522 unsigned long j;
523 int ndetected = 0;
524 struct rcu_node *rnp;
525 long totqlen = 0;
526
527 lockdep_assert_irqs_disabled();
528
529
530 rcu_stall_kick_kthreads();
531 if (rcu_stall_is_suppressed())
532 return;
533
534
535
536
537
538
539 pr_err("INFO: %s detected stalls on CPUs/tasks:\n", rcu_state.name);
540 rcu_for_each_leaf_node(rnp) {
541 raw_spin_lock_irqsave_rcu_node(rnp, flags);
542 if (rnp->qsmask != 0) {
543 for_each_leaf_node_possible_cpu(rnp, cpu)
544 if (rnp->qsmask & leaf_node_cpu_bit(rnp, cpu)) {
545 print_cpu_stall_info(cpu);
546 ndetected++;
547 }
548 }
549 ndetected += rcu_print_task_stall(rnp, flags);
550 lockdep_assert_irqs_disabled();
551 }
552
553 for_each_possible_cpu(cpu)
554 totqlen += rcu_get_n_cbs_cpu(cpu);
555 pr_cont("\t(detected by %d, t=%ld jiffies, g=%ld, q=%lu)\n",
556 smp_processor_id(), (long)(jiffies - gps),
557 (long)rcu_seq_current(&rcu_state.gp_seq), totqlen);
558 if (ndetected) {
559 rcu_dump_cpu_stacks();
560
561
562 rcu_for_each_leaf_node(rnp)
563 rcu_print_detail_task_stall_rnp(rnp);
564 } else {
565 if (rcu_seq_current(&rcu_state.gp_seq) != gp_seq) {
566 pr_err("INFO: Stall ended before state dump start\n");
567 } else {
568 j = jiffies;
569 gpa = data_race(rcu_state.gp_activity);
570 pr_err("All QSes seen, last %s kthread activity %ld (%ld-%ld), jiffies_till_next_fqs=%ld, root ->qsmask %#lx\n",
571 rcu_state.name, j - gpa, j, gpa,
572 data_race(jiffies_till_next_fqs),
573 rcu_get_root()->qsmask);
574 }
575 }
576
577 if (ULONG_CMP_GE(jiffies, READ_ONCE(rcu_state.jiffies_stall)))
578 WRITE_ONCE(rcu_state.jiffies_stall,
579 jiffies + 3 * rcu_jiffies_till_stall_check() + 3);
580
581 rcu_check_gp_kthread_expired_fqs_timer();
582 rcu_check_gp_kthread_starvation();
583
584 panic_on_rcu_stall();
585
586 rcu_force_quiescent_state();
587}
588
589static void print_cpu_stall(unsigned long gps)
590{
591 int cpu;
592 unsigned long flags;
593 struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
594 struct rcu_node *rnp = rcu_get_root();
595 long totqlen = 0;
596
597 lockdep_assert_irqs_disabled();
598
599
600 rcu_stall_kick_kthreads();
601 if (rcu_stall_is_suppressed())
602 return;
603
604
605
606
607
608
609 pr_err("INFO: %s self-detected stall on CPU\n", rcu_state.name);
610 raw_spin_lock_irqsave_rcu_node(rdp->mynode, flags);
611 print_cpu_stall_info(smp_processor_id());
612 raw_spin_unlock_irqrestore_rcu_node(rdp->mynode, flags);
613 for_each_possible_cpu(cpu)
614 totqlen += rcu_get_n_cbs_cpu(cpu);
615 pr_cont("\t(t=%lu jiffies g=%ld q=%lu)\n",
616 jiffies - gps,
617 (long)rcu_seq_current(&rcu_state.gp_seq), totqlen);
618
619 rcu_check_gp_kthread_expired_fqs_timer();
620 rcu_check_gp_kthread_starvation();
621
622 rcu_dump_cpu_stacks();
623
624 raw_spin_lock_irqsave_rcu_node(rnp, flags);
625
626 if (ULONG_CMP_GE(jiffies, READ_ONCE(rcu_state.jiffies_stall)))
627 WRITE_ONCE(rcu_state.jiffies_stall,
628 jiffies + 3 * rcu_jiffies_till_stall_check() + 3);
629 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
630
631 panic_on_rcu_stall();
632
633
634
635
636
637
638
639
640 set_tsk_need_resched(current);
641 set_preempt_need_resched();
642}
643
644static void check_cpu_stall(struct rcu_data *rdp)
645{
646 unsigned long gs1;
647 unsigned long gs2;
648 unsigned long gps;
649 unsigned long j;
650 unsigned long jn;
651 unsigned long js;
652 struct rcu_node *rnp;
653
654 lockdep_assert_irqs_disabled();
655 if ((rcu_stall_is_suppressed() && !READ_ONCE(rcu_kick_kthreads)) ||
656 !rcu_gp_in_progress())
657 return;
658 rcu_stall_kick_kthreads();
659 j = jiffies;
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679 gs1 = READ_ONCE(rcu_state.gp_seq);
680 smp_rmb();
681 js = READ_ONCE(rcu_state.jiffies_stall);
682 smp_rmb();
683 gps = READ_ONCE(rcu_state.gp_start);
684 smp_rmb();
685 gs2 = READ_ONCE(rcu_state.gp_seq);
686 if (gs1 != gs2 ||
687 ULONG_CMP_LT(j, js) ||
688 ULONG_CMP_GE(gps, js))
689 return;
690 rnp = rdp->mynode;
691 jn = jiffies + 3 * rcu_jiffies_till_stall_check() + 3;
692 if (rcu_gp_in_progress() &&
693 (READ_ONCE(rnp->qsmask) & rdp->grpmask) &&
694 cmpxchg(&rcu_state.jiffies_stall, js, jn) == js) {
695
696
697 print_cpu_stall(gps);
698 if (READ_ONCE(rcu_cpu_stall_ftrace_dump))
699 rcu_ftrace_dump(DUMP_ALL);
700
701 } else if (rcu_gp_in_progress() &&
702 ULONG_CMP_GE(j, js + RCU_STALL_RAT_DELAY) &&
703 cmpxchg(&rcu_state.jiffies_stall, js, jn) == js) {
704
705
706 print_other_cpu_stall(gs2, gps);
707 if (READ_ONCE(rcu_cpu_stall_ftrace_dump))
708 rcu_ftrace_dump(DUMP_ALL);
709 }
710}
711
712
713
714
715
716
717
718
719
720void show_rcu_gp_kthreads(void)
721{
722 unsigned long cbs = 0;
723 int cpu;
724 unsigned long j;
725 unsigned long ja;
726 unsigned long jr;
727 unsigned long jw;
728 struct rcu_data *rdp;
729 struct rcu_node *rnp;
730 struct task_struct *t = READ_ONCE(rcu_state.gp_kthread);
731
732 j = jiffies;
733 ja = j - data_race(rcu_state.gp_activity);
734 jr = j - data_race(rcu_state.gp_req_activity);
735 jw = j - data_race(rcu_state.gp_wake_time);
736 pr_info("%s: wait state: %s(%d) ->state: %#lx delta ->gp_activity %lu ->gp_req_activity %lu ->gp_wake_time %lu ->gp_wake_seq %ld ->gp_seq %ld ->gp_seq_needed %ld ->gp_flags %#x\n",
737 rcu_state.name, gp_state_getname(rcu_state.gp_state),
738 rcu_state.gp_state, t ? t->state : 0x1ffffL,
739 ja, jr, jw, (long)data_race(rcu_state.gp_wake_seq),
740 (long)data_race(rcu_state.gp_seq),
741 (long)data_race(rcu_get_root()->gp_seq_needed),
742 data_race(rcu_state.gp_flags));
743 rcu_for_each_node_breadth_first(rnp) {
744 if (ULONG_CMP_GE(READ_ONCE(rcu_state.gp_seq),
745 READ_ONCE(rnp->gp_seq_needed)))
746 continue;
747 pr_info("\trcu_node %d:%d ->gp_seq %ld ->gp_seq_needed %ld\n",
748 rnp->grplo, rnp->grphi, (long)data_race(rnp->gp_seq),
749 (long)data_race(rnp->gp_seq_needed));
750 if (!rcu_is_leaf_node(rnp))
751 continue;
752 for_each_leaf_node_possible_cpu(rnp, cpu) {
753 rdp = per_cpu_ptr(&rcu_data, cpu);
754 if (READ_ONCE(rdp->gpwrap) ||
755 ULONG_CMP_GE(READ_ONCE(rcu_state.gp_seq),
756 READ_ONCE(rdp->gp_seq_needed)))
757 continue;
758 pr_info("\tcpu %d ->gp_seq_needed %ld\n",
759 cpu, (long)data_race(rdp->gp_seq_needed));
760 }
761 }
762 for_each_possible_cpu(cpu) {
763 rdp = per_cpu_ptr(&rcu_data, cpu);
764 cbs += data_race(rdp->n_cbs_invoked);
765 if (rcu_segcblist_is_offloaded(&rdp->cblist))
766 show_rcu_nocb_state(rdp);
767 }
768 pr_info("RCU callbacks invoked since boot: %lu\n", cbs);
769 show_rcu_tasks_gp_kthreads();
770}
771EXPORT_SYMBOL_GPL(show_rcu_gp_kthreads);
772
773
774
775
776
777static void rcu_check_gp_start_stall(struct rcu_node *rnp, struct rcu_data *rdp,
778 const unsigned long gpssdelay)
779{
780 unsigned long flags;
781 unsigned long j;
782 struct rcu_node *rnp_root = rcu_get_root();
783 static atomic_t warned = ATOMIC_INIT(0);
784
785 if (!IS_ENABLED(CONFIG_PROVE_RCU) || rcu_gp_in_progress() ||
786 ULONG_CMP_GE(READ_ONCE(rnp_root->gp_seq),
787 READ_ONCE(rnp_root->gp_seq_needed)) ||
788 !smp_load_acquire(&rcu_state.gp_kthread))
789 return;
790 j = jiffies;
791 if (time_before(j, READ_ONCE(rcu_state.gp_req_activity) + gpssdelay) ||
792 time_before(j, READ_ONCE(rcu_state.gp_activity) + gpssdelay) ||
793 atomic_read(&warned))
794 return;
795
796 raw_spin_lock_irqsave_rcu_node(rnp, flags);
797 j = jiffies;
798 if (rcu_gp_in_progress() ||
799 ULONG_CMP_GE(READ_ONCE(rnp_root->gp_seq),
800 READ_ONCE(rnp_root->gp_seq_needed)) ||
801 time_before(j, READ_ONCE(rcu_state.gp_req_activity) + gpssdelay) ||
802 time_before(j, READ_ONCE(rcu_state.gp_activity) + gpssdelay) ||
803 atomic_read(&warned)) {
804 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
805 return;
806 }
807
808
809 if (rnp_root != rnp)
810 raw_spin_lock_rcu_node(rnp_root);
811 j = jiffies;
812 if (rcu_gp_in_progress() ||
813 ULONG_CMP_GE(READ_ONCE(rnp_root->gp_seq),
814 READ_ONCE(rnp_root->gp_seq_needed)) ||
815 time_before(j, READ_ONCE(rcu_state.gp_req_activity) + gpssdelay) ||
816 time_before(j, READ_ONCE(rcu_state.gp_activity) + gpssdelay) ||
817 atomic_xchg(&warned, 1)) {
818 if (rnp_root != rnp)
819
820 raw_spin_unlock_rcu_node(rnp_root);
821 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
822 return;
823 }
824 WARN_ON(1);
825 if (rnp_root != rnp)
826 raw_spin_unlock_rcu_node(rnp_root);
827 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
828 show_rcu_gp_kthreads();
829}
830
831
832
833
834
835
836void rcu_fwd_progress_check(unsigned long j)
837{
838 unsigned long cbs;
839 int cpu;
840 unsigned long max_cbs = 0;
841 int max_cpu = -1;
842 struct rcu_data *rdp;
843
844 if (rcu_gp_in_progress()) {
845 pr_info("%s: GP age %lu jiffies\n",
846 __func__, jiffies - rcu_state.gp_start);
847 show_rcu_gp_kthreads();
848 } else {
849 pr_info("%s: Last GP end %lu jiffies ago\n",
850 __func__, jiffies - rcu_state.gp_end);
851 preempt_disable();
852 rdp = this_cpu_ptr(&rcu_data);
853 rcu_check_gp_start_stall(rdp->mynode, rdp, j);
854 preempt_enable();
855 }
856 for_each_possible_cpu(cpu) {
857 cbs = rcu_get_n_cbs_cpu(cpu);
858 if (!cbs)
859 continue;
860 if (max_cpu < 0)
861 pr_info("%s: callbacks", __func__);
862 pr_cont(" %d: %lu", cpu, cbs);
863 if (cbs <= max_cbs)
864 continue;
865 max_cbs = cbs;
866 max_cpu = cpu;
867 }
868 if (max_cpu >= 0)
869 pr_cont("\n");
870}
871EXPORT_SYMBOL_GPL(rcu_fwd_progress_check);
872
873
874static bool sysrq_rcu;
875module_param(sysrq_rcu, bool, 0444);
876
877
878static void sysrq_show_rcu(int key)
879{
880 show_rcu_gp_kthreads();
881}
882
883static const struct sysrq_key_op sysrq_rcudump_op = {
884 .handler = sysrq_show_rcu,
885 .help_msg = "show-rcu(y)",
886 .action_msg = "Show RCU tree",
887 .enable_mask = SYSRQ_ENABLE_DUMP,
888};
889
890static int __init rcu_sysrq_init(void)
891{
892 if (sysrq_rcu)
893 return register_sysrq_key('y', &sysrq_rcudump_op);
894 return 0;
895}
896early_initcall(rcu_sysrq_init);
897