linux/arch/x86/kernel/apic/hw_nmi.c
<<
>>
Prefs
   1/*
   2 *  HW NMI watchdog support
   3 *
   4 *  started by Don Zickus, Copyright (C) 2010 Red Hat, Inc.
   5 *
   6 *  Arch specific calls to support NMI watchdog
   7 *
   8 *  Bits copied from original nmi.c file
   9 *
  10 */
  11#include <asm/apic.h>
  12#include <asm/nmi.h>
  13
  14#include <linux/cpumask.h>
  15#include <linux/kdebug.h>
  16#include <linux/notifier.h>
  17#include <linux/kprobes.h>
  18#include <linux/nmi.h>
  19#include <linux/module.h>
  20#include <linux/delay.h>
  21
  22#ifdef CONFIG_HARDLOCKUP_DETECTOR
  23u64 hw_nmi_get_sample_period(int watchdog_thresh)
  24{
  25        return (u64)(cpu_khz) * 1000 * watchdog_thresh;
  26}
  27#endif
  28
  29#ifdef arch_trigger_all_cpu_backtrace
  30/* For reliability, we're prepared to waste bits here. */
  31static DECLARE_BITMAP(backtrace_mask, NR_CPUS) __read_mostly;
  32
  33/* "in progress" flag of arch_trigger_all_cpu_backtrace */
  34static unsigned long backtrace_flag;
  35
  36void arch_trigger_all_cpu_backtrace(void)
  37{
  38        int i;
  39
  40        if (test_and_set_bit(0, &backtrace_flag))
  41                /*
  42                 * If there is already a trigger_all_cpu_backtrace() in progress
  43                 * (backtrace_flag == 1), don't output double cpu dump infos.
  44                 */
  45                return;
  46
  47        cpumask_copy(to_cpumask(backtrace_mask), cpu_online_mask);
  48
  49        printk(KERN_INFO "sending NMI to all CPUs:\n");
  50        apic->send_IPI_all(NMI_VECTOR);
  51
  52        /* Wait for up to 10 seconds for all CPUs to do the backtrace */
  53        for (i = 0; i < 10 * 1000; i++) {
  54                if (cpumask_empty(to_cpumask(backtrace_mask)))
  55                        break;
  56                mdelay(1);
  57        }
  58
  59        clear_bit(0, &backtrace_flag);
  60        smp_mb__after_clear_bit();
  61}
  62
  63static int __kprobes
  64arch_trigger_all_cpu_backtrace_handler(unsigned int cmd, struct pt_regs *regs)
  65{
  66        int cpu;
  67
  68        cpu = smp_processor_id();
  69
  70        if (cpumask_test_cpu(cpu, to_cpumask(backtrace_mask))) {
  71                static arch_spinlock_t lock = __ARCH_SPIN_LOCK_UNLOCKED;
  72
  73                arch_spin_lock(&lock);
  74                printk(KERN_WARNING "NMI backtrace for cpu %d\n", cpu);
  75                show_regs(regs);
  76                arch_spin_unlock(&lock);
  77                cpumask_clear_cpu(cpu, to_cpumask(backtrace_mask));
  78                return NMI_HANDLED;
  79        }
  80
  81        return NMI_DONE;
  82}
  83
  84static int __init register_trigger_all_cpu_backtrace(void)
  85{
  86        register_nmi_handler(NMI_LOCAL, arch_trigger_all_cpu_backtrace_handler,
  87                                0, "arch_bt");
  88        return 0;
  89}
  90early_initcall(register_trigger_all_cpu_backtrace);
  91#endif
  92