1
2
3
4
5
6
7
8
9
10
11
12
13
14#include <asm/apic.h>
15
16#include <linux/nmi.h>
17#include <linux/mm.h>
18#include <linux/delay.h>
19#include <linux/interrupt.h>
20#include <linux/module.h>
21#include <linux/sysdev.h>
22#include <linux/sysctl.h>
23#include <linux/percpu.h>
24#include <linux/kprobes.h>
25#include <linux/cpumask.h>
26#include <linux/kernel_stat.h>
27#include <linux/kdebug.h>
28#include <linux/smp.h>
29
30#include <asm/i8259.h>
31#include <asm/io_apic.h>
32#include <asm/proto.h>
33#include <asm/timer.h>
34
35#include <asm/mce.h>
36
37#include <asm/mach_traps.h>
38
39int unknown_nmi_panic;
40int nmi_watchdog_enabled;
41
42static cpumask_t backtrace_mask __read_mostly;
43
44
45
46
47
48
49
50atomic_t nmi_active = ATOMIC_INIT(0);
51EXPORT_SYMBOL(nmi_active);
52
53unsigned int nmi_watchdog = NMI_NONE;
54EXPORT_SYMBOL(nmi_watchdog);
55
56static int panic_on_timeout;
57
58static unsigned int nmi_hz = HZ;
59static DEFINE_PER_CPU(short, wd_enabled);
60static int endflag __initdata;
61
62static inline unsigned int get_nmi_count(int cpu)
63{
64 return per_cpu(irq_stat, cpu).__nmi_count;
65}
66
67static inline int mce_in_progress(void)
68{
69#if defined(CONFIG_X86_MCE)
70 return atomic_read(&mce_entry) > 0;
71#endif
72 return 0;
73}
74
75
76
77
78
79static inline unsigned int get_timer_irqs(int cpu)
80{
81 return per_cpu(irq_stat, cpu).apic_timer_irqs +
82 per_cpu(irq_stat, cpu).irq0_irqs;
83}
84
85#ifdef CONFIG_SMP
86
87
88
89
90
91static __init void nmi_cpu_busy(void *data)
92{
93 local_irq_enable_in_hardirq();
94
95
96
97
98
99
100
101
102 while (endflag == 0)
103 mb();
104}
105#endif
106
107static void report_broken_nmi(int cpu, unsigned int *prev_nmi_count)
108{
109 printk(KERN_CONT "\n");
110
111 printk(KERN_WARNING
112 "WARNING: CPU#%d: NMI appears to be stuck (%d->%d)!\n",
113 cpu, prev_nmi_count[cpu], get_nmi_count(cpu));
114
115 printk(KERN_WARNING
116 "Please report this to bugzilla.kernel.org,\n");
117 printk(KERN_WARNING
118 "and attach the output of the 'dmesg' command.\n");
119
120 per_cpu(wd_enabled, cpu) = 0;
121 atomic_dec(&nmi_active);
122}
123
124static void __acpi_nmi_disable(void *__unused)
125{
126 apic_write(APIC_LVT0, APIC_DM_NMI | APIC_LVT_MASKED);
127}
128
129int __init check_nmi_watchdog(void)
130{
131 unsigned int *prev_nmi_count;
132 int cpu;
133
134 if (!nmi_watchdog_active() || !atomic_read(&nmi_active))
135 return 0;
136
137 prev_nmi_count = kmalloc(nr_cpu_ids * sizeof(int), GFP_KERNEL);
138 if (!prev_nmi_count)
139 goto error;
140
141 printk(KERN_INFO "Testing NMI watchdog ... ");
142
143#ifdef CONFIG_SMP
144 if (nmi_watchdog == NMI_LOCAL_APIC)
145 smp_call_function(nmi_cpu_busy, (void *)&endflag, 0);
146#endif
147
148 for_each_possible_cpu(cpu)
149 prev_nmi_count[cpu] = get_nmi_count(cpu);
150 local_irq_enable();
151 mdelay((20 * 1000) / nmi_hz);
152
153 for_each_online_cpu(cpu) {
154 if (!per_cpu(wd_enabled, cpu))
155 continue;
156 if (get_nmi_count(cpu) - prev_nmi_count[cpu] <= 5)
157 report_broken_nmi(cpu, prev_nmi_count);
158 }
159 endflag = 1;
160 if (!atomic_read(&nmi_active)) {
161 kfree(prev_nmi_count);
162 atomic_set(&nmi_active, -1);
163 goto error;
164 }
165 printk("OK.\n");
166
167
168
169
170
171 if (nmi_watchdog == NMI_LOCAL_APIC)
172 nmi_hz = lapic_adjust_nmi_hz(1);
173
174 kfree(prev_nmi_count);
175 return 0;
176error:
177 if (nmi_watchdog == NMI_IO_APIC) {
178 if (!timer_through_8259)
179 disable_8259A_irq(0);
180 on_each_cpu(__acpi_nmi_disable, NULL, 1);
181 }
182
183#ifdef CONFIG_X86_32
184 timer_ack = 0;
185#endif
186 return -1;
187}
188
189static int __init setup_nmi_watchdog(char *str)
190{
191 unsigned int nmi;
192
193 if (!strncmp(str, "panic", 5)) {
194 panic_on_timeout = 1;
195 str = strchr(str, ',');
196 if (!str)
197 return 1;
198 ++str;
199 }
200
201 if (!strncmp(str, "lapic", 5))
202 nmi_watchdog = NMI_LOCAL_APIC;
203 else if (!strncmp(str, "ioapic", 6))
204 nmi_watchdog = NMI_IO_APIC;
205 else {
206 get_option(&str, &nmi);
207 if (nmi >= NMI_INVALID)
208 return 0;
209 nmi_watchdog = nmi;
210 }
211
212 return 1;
213}
214__setup("nmi_watchdog=", setup_nmi_watchdog);
215
216
217
218
219#ifdef CONFIG_PM
220
221static int nmi_pm_active;
222
223static int lapic_nmi_suspend(struct sys_device *dev, pm_message_t state)
224{
225
226 nmi_pm_active = atomic_read(&nmi_active);
227 stop_apic_nmi_watchdog(NULL);
228 BUG_ON(atomic_read(&nmi_active) != 0);
229 return 0;
230}
231
232static int lapic_nmi_resume(struct sys_device *dev)
233{
234
235 if (nmi_pm_active > 0) {
236 setup_apic_nmi_watchdog(NULL);
237 touch_nmi_watchdog();
238 }
239 return 0;
240}
241
242static struct sysdev_class nmi_sysclass = {
243 .name = "lapic_nmi",
244 .resume = lapic_nmi_resume,
245 .suspend = lapic_nmi_suspend,
246};
247
248static struct sys_device device_lapic_nmi = {
249 .id = 0,
250 .cls = &nmi_sysclass,
251};
252
253static int __init init_lapic_nmi_sysfs(void)
254{
255 int error;
256
257
258
259
260
261 if (nmi_watchdog != NMI_LOCAL_APIC)
262 return 0;
263
264 if (atomic_read(&nmi_active) < 0)
265 return 0;
266
267 error = sysdev_class_register(&nmi_sysclass);
268 if (!error)
269 error = sysdev_register(&device_lapic_nmi);
270 return error;
271}
272
273
274late_initcall(init_lapic_nmi_sysfs);
275
276#endif
277
278static void __acpi_nmi_enable(void *__unused)
279{
280 apic_write(APIC_LVT0, APIC_DM_NMI);
281}
282
283
284
285
286void acpi_nmi_enable(void)
287{
288 if (atomic_read(&nmi_active) && nmi_watchdog == NMI_IO_APIC)
289 on_each_cpu(__acpi_nmi_enable, NULL, 1);
290}
291
292
293
294
295void acpi_nmi_disable(void)
296{
297 if (atomic_read(&nmi_active) && nmi_watchdog == NMI_IO_APIC)
298 on_each_cpu(__acpi_nmi_disable, NULL, 1);
299}
300
301
302
303
304
305void cpu_nmi_set_wd_enabled(void)
306{
307 __get_cpu_var(wd_enabled) = 1;
308}
309
310void setup_apic_nmi_watchdog(void *unused)
311{
312 if (__get_cpu_var(wd_enabled))
313 return;
314
315
316
317 if (smp_processor_id() != 0 && atomic_read(&nmi_active) <= 0)
318 return;
319
320 switch (nmi_watchdog) {
321 case NMI_LOCAL_APIC:
322 if (lapic_watchdog_init(nmi_hz) < 0) {
323 __get_cpu_var(wd_enabled) = 0;
324 return;
325 }
326
327 case NMI_IO_APIC:
328 __get_cpu_var(wd_enabled) = 1;
329 atomic_inc(&nmi_active);
330 }
331}
332
333void stop_apic_nmi_watchdog(void *unused)
334{
335
336 if (!nmi_watchdog_active())
337 return;
338 if (__get_cpu_var(wd_enabled) == 0)
339 return;
340 if (nmi_watchdog == NMI_LOCAL_APIC)
341 lapic_watchdog_stop();
342 else
343 __acpi_nmi_disable(NULL);
344 __get_cpu_var(wd_enabled) = 0;
345 atomic_dec(&nmi_active);
346}
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362static DEFINE_PER_CPU(unsigned, last_irq_sum);
363static DEFINE_PER_CPU(local_t, alert_counter);
364static DEFINE_PER_CPU(int, nmi_touch);
365
366void touch_nmi_watchdog(void)
367{
368 if (nmi_watchdog_active()) {
369 unsigned cpu;
370
371
372
373
374
375
376 for_each_present_cpu(cpu) {
377 if (per_cpu(nmi_touch, cpu) != 1)
378 per_cpu(nmi_touch, cpu) = 1;
379 }
380 }
381
382
383
384
385 touch_softlockup_watchdog();
386}
387EXPORT_SYMBOL(touch_nmi_watchdog);
388
389notrace __kprobes int
390nmi_watchdog_tick(struct pt_regs *regs, unsigned reason)
391{
392
393
394
395
396
397 unsigned int sum;
398 int touched = 0;
399 int cpu = smp_processor_id();
400 int rc = 0;
401
402
403 if (notify_die(DIE_NMI, "nmi", regs, reason, 2, SIGINT)
404 == NOTIFY_STOP) {
405 rc = 1;
406 touched = 1;
407 }
408
409 sum = get_timer_irqs(cpu);
410
411 if (__get_cpu_var(nmi_touch)) {
412 __get_cpu_var(nmi_touch) = 0;
413 touched = 1;
414 }
415
416
417 if (cpumask_test_cpu(cpu, &backtrace_mask)) {
418 static DEFINE_SPINLOCK(lock);
419
420 spin_lock(&lock);
421 printk(KERN_WARNING "NMI backtrace for cpu %d\n", cpu);
422 show_regs(regs);
423 dump_stack();
424 spin_unlock(&lock);
425 cpumask_clear_cpu(cpu, &backtrace_mask);
426
427 rc = 1;
428 }
429
430
431 if (mce_in_progress())
432 touched = 1;
433
434
435 if (!touched && __get_cpu_var(last_irq_sum) == sum) {
436
437
438
439
440 local_inc(&__get_cpu_var(alert_counter));
441 if (local_read(&__get_cpu_var(alert_counter)) == 5 * nmi_hz)
442
443
444
445 die_nmi("BUG: NMI Watchdog detected LOCKUP",
446 regs, panic_on_timeout);
447 } else {
448 __get_cpu_var(last_irq_sum) = sum;
449 local_set(&__get_cpu_var(alert_counter), 0);
450 }
451
452
453 if (!__get_cpu_var(wd_enabled))
454 return rc;
455 switch (nmi_watchdog) {
456 case NMI_LOCAL_APIC:
457 rc |= lapic_wd_event(nmi_hz);
458 break;
459 case NMI_IO_APIC:
460
461
462
463
464
465 rc = 1;
466 break;
467 }
468 return rc;
469}
470
471#ifdef CONFIG_SYSCTL
472
473static void enable_ioapic_nmi_watchdog_single(void *unused)
474{
475 __get_cpu_var(wd_enabled) = 1;
476 atomic_inc(&nmi_active);
477 __acpi_nmi_enable(NULL);
478}
479
480static void enable_ioapic_nmi_watchdog(void)
481{
482 on_each_cpu(enable_ioapic_nmi_watchdog_single, NULL, 1);
483 touch_nmi_watchdog();
484}
485
486static void disable_ioapic_nmi_watchdog(void)
487{
488 on_each_cpu(stop_apic_nmi_watchdog, NULL, 1);
489}
490
491static int __init setup_unknown_nmi_panic(char *str)
492{
493 unknown_nmi_panic = 1;
494 return 1;
495}
496__setup("unknown_nmi_panic", setup_unknown_nmi_panic);
497
498static int unknown_nmi_panic_callback(struct pt_regs *regs, int cpu)
499{
500 unsigned char reason = get_nmi_reason();
501 char buf[64];
502
503 sprintf(buf, "NMI received for unknown reason %02x\n", reason);
504 die_nmi(buf, regs, 1);
505 return 0;
506}
507
508
509
510
511int proc_nmi_enabled(struct ctl_table *table, int write,
512 void __user *buffer, size_t *length, loff_t *ppos)
513{
514 int old_state;
515
516 nmi_watchdog_enabled = (atomic_read(&nmi_active) > 0) ? 1 : 0;
517 old_state = nmi_watchdog_enabled;
518 proc_dointvec(table, write, buffer, length, ppos);
519 if (!!old_state == !!nmi_watchdog_enabled)
520 return 0;
521
522 if (atomic_read(&nmi_active) < 0 || !nmi_watchdog_active()) {
523 printk(KERN_WARNING
524 "NMI watchdog is permanently disabled\n");
525 return -EIO;
526 }
527
528 if (nmi_watchdog == NMI_LOCAL_APIC) {
529 if (nmi_watchdog_enabled)
530 enable_lapic_nmi_watchdog();
531 else
532 disable_lapic_nmi_watchdog();
533 } else if (nmi_watchdog == NMI_IO_APIC) {
534 if (nmi_watchdog_enabled)
535 enable_ioapic_nmi_watchdog();
536 else
537 disable_ioapic_nmi_watchdog();
538 } else {
539 printk(KERN_WARNING
540 "NMI watchdog doesn't know what hardware to touch\n");
541 return -EIO;
542 }
543 return 0;
544}
545
546#endif
547
548int do_nmi_callback(struct pt_regs *regs, int cpu)
549{
550#ifdef CONFIG_SYSCTL
551 if (unknown_nmi_panic)
552 return unknown_nmi_panic_callback(regs, cpu);
553#endif
554 return 0;
555}
556
557void arch_trigger_all_cpu_backtrace(void)
558{
559 int i;
560
561 cpumask_copy(&backtrace_mask, cpu_online_mask);
562
563 printk(KERN_INFO "sending NMI to all CPUs:\n");
564 apic->send_IPI_all(NMI_VECTOR);
565
566
567 for (i = 0; i < 10 * 1000; i++) {
568 if (cpumask_empty(&backtrace_mask))
569 break;
570 mdelay(1);
571 }
572}
573