1
2
3
4
5
6
7
8
9
10
11#include <asm/apic.h>
12#include <asm/nmi.h>
13
14#include <linux/cpumask.h>
15#include <linux/kdebug.h>
16#include <linux/notifier.h>
17#include <linux/kprobes.h>
18#include <linux/nmi.h>
19#include <linux/cpu.h>
20#include <linux/module.h>
21#include <linux/delay.h>
22#include <linux/seq_buf.h>
23
24#ifdef CONFIG_HARDLOCKUP_DETECTOR
25u64 hw_nmi_get_sample_period(int watchdog_thresh)
26{
27 return (u64)(cpu_khz) * 1000 * watchdog_thresh;
28}
29#endif
30
31#ifdef arch_trigger_all_cpu_backtrace
32
33static DECLARE_BITMAP(backtrace_mask, NR_CPUS) __read_mostly;
34static cpumask_t printtrace_mask;
35
36#define NMI_BUF_SIZE 8192
37
38struct nmi_seq_buf {
39 unsigned char buffer[NMI_BUF_SIZE];
40 struct seq_buf seq;
41};
42
43
44static DEFINE_PER_CPU(struct nmi_seq_buf, nmi_print_seq);
45
46
47static unsigned long backtrace_flag;
48
49static void print_seq_line(struct nmi_seq_buf *s, int start, int end)
50{
51 const char *buf = s->buffer + start;
52
53 printk("%.*s", (end - start) + 1, buf);
54}
55
56void arch_trigger_all_cpu_backtrace(bool include_self)
57{
58 struct nmi_seq_buf *s;
59 int len;
60 int cpu;
61 int i;
62 int this_cpu = get_cpu();
63
64 if (test_and_set_bit(0, &backtrace_flag)) {
65
66
67
68
69 put_cpu();
70 return;
71 }
72
73 cpumask_copy(to_cpumask(backtrace_mask), cpu_online_mask);
74 if (!include_self)
75 cpumask_clear_cpu(this_cpu, to_cpumask(backtrace_mask));
76
77 cpumask_copy(&printtrace_mask, to_cpumask(backtrace_mask));
78
79
80
81
82 for_each_cpu(cpu, to_cpumask(backtrace_mask)) {
83 s = &per_cpu(nmi_print_seq, cpu);
84 seq_buf_init(&s->seq, s->buffer, NMI_BUF_SIZE);
85 }
86
87 if (!cpumask_empty(to_cpumask(backtrace_mask))) {
88 pr_info("sending NMI to %s CPUs:\n",
89 (include_self ? "all" : "other"));
90 apic->send_IPI_mask(to_cpumask(backtrace_mask), NMI_VECTOR);
91 }
92
93
94 for (i = 0; i < 10 * 1000; i++) {
95 if (cpumask_empty(to_cpumask(backtrace_mask)))
96 break;
97 mdelay(1);
98 touch_softlockup_watchdog();
99 }
100
101
102
103
104
105 for_each_cpu(cpu, &printtrace_mask) {
106 int last_i = 0;
107
108 s = &per_cpu(nmi_print_seq, cpu);
109 len = seq_buf_used(&s->seq);
110 if (!len)
111 continue;
112
113
114 for (i = 0; i < len; i++) {
115 if (s->buffer[i] == '\n') {
116 print_seq_line(s, last_i, i);
117 last_i = i + 1;
118 }
119 }
120
121 if (last_i < len) {
122 print_seq_line(s, last_i, len - 1);
123 pr_cont("\n");
124 }
125 }
126
127 clear_bit(0, &backtrace_flag);
128 smp_mb__after_atomic();
129 put_cpu();
130}
131
132
133
134
135
136
137
138
139
140
141
142
143
144static int nmi_vprintk(const char *fmt, va_list args)
145{
146 struct nmi_seq_buf *s = this_cpu_ptr(&nmi_print_seq);
147 unsigned int len = seq_buf_used(&s->seq);
148
149 seq_buf_vprintf(&s->seq, fmt, args);
150 return seq_buf_used(&s->seq) - len;
151}
152
153static int __kprobes
154arch_trigger_all_cpu_backtrace_handler(unsigned int cmd, struct pt_regs *regs)
155{
156 int cpu;
157
158 cpu = smp_processor_id();
159
160 if (cpumask_test_cpu(cpu, to_cpumask(backtrace_mask))) {
161 printk_func_t printk_func_save = this_cpu_read(printk_func);
162
163
164 this_cpu_write(printk_func, nmi_vprintk);
165 if (regs && cpu_in_idle(instruction_pointer(regs))) {
166 pr_warn("NMI backtrace for cpu %d skipped: idling at pc %#lx\n",
167 cpu, instruction_pointer(regs));
168 } else {
169 printk(KERN_WARNING "NMI backtrace for cpu %d\n", cpu);
170 show_regs(regs);
171 }
172 this_cpu_write(printk_func, printk_func_save);
173
174 cpumask_clear_cpu(cpu, to_cpumask(backtrace_mask));
175 return NMI_HANDLED;
176 }
177
178 return NMI_DONE;
179}
180
181static int __init register_trigger_all_cpu_backtrace(void)
182{
183 register_nmi_handler(NMI_LOCAL, arch_trigger_all_cpu_backtrace_handler,
184 0, "arch_bt");
185 return 0;
186}
187early_initcall(register_trigger_all_cpu_backtrace);
188#endif
189