linux/include/linux/nmi.h
<<
>>
Prefs
   1/*
   2 *  linux/include/linux/nmi.h
   3 */
   4#ifndef LINUX_NMI_H
   5#define LINUX_NMI_H
   6
   7#include <linux/sched.h>
   8#include <asm/irq.h>
   9
  10/*
  11 * The run state of the lockup detectors is controlled by the content of the
  12 * 'watchdog_enabled' variable. Each lockup detector has its dedicated bit -
  13 * bit 0 for the hard lockup detector and bit 1 for the soft lockup detector.
  14 *
  15 * 'watchdog_user_enabled', 'nmi_watchdog_enabled' and 'soft_watchdog_enabled'
  16 * are variables that are only used as an 'interface' between the parameters
  17 * in /proc/sys/kernel and the internal state bits in 'watchdog_enabled'. The
  18 * 'watchdog_thresh' variable is handled differently because its value is not
  19 * boolean, and the lockup detectors are 'suspended' while 'watchdog_thresh'
  20 * is equal zero.
  21 */
  22#define NMI_WATCHDOG_ENABLED_BIT   0
  23#define SOFT_WATCHDOG_ENABLED_BIT  1
  24#define NMI_WATCHDOG_ENABLED      (1 << NMI_WATCHDOG_ENABLED_BIT)
  25#define SOFT_WATCHDOG_ENABLED     (1 << SOFT_WATCHDOG_ENABLED_BIT)
  26
  27/**
  28 * touch_nmi_watchdog - restart NMI watchdog timeout.
  29 * 
  30 * If the architecture supports the NMI watchdog, touch_nmi_watchdog()
  31 * may be used to reset the timeout - for code which intentionally
  32 * disables interrupts for a long time. This call is stateless.
  33 */
  34#if defined(CONFIG_HAVE_NMI_WATCHDOG) || defined(CONFIG_HARDLOCKUP_DETECTOR)
  35#include <asm/nmi.h>
  36extern void touch_nmi_watchdog(void);
  37#else
  38static inline void touch_nmi_watchdog(void)
  39{
  40        touch_softlockup_watchdog();
  41}
  42#endif
  43
  44#if defined(CONFIG_HARDLOCKUP_DETECTOR)
  45extern void hardlockup_detector_disable(void);
  46#else
  47static inline void hardlockup_detector_disable(void) {}
  48#endif
  49
  50/*
  51 * Create trigger_all_cpu_backtrace() out of the arch-provided
  52 * base function. Return whether such support was available,
  53 * to allow calling code to fall back to some other mechanism:
  54 */
  55#ifdef arch_trigger_cpumask_backtrace
  56static inline bool trigger_all_cpu_backtrace(void)
  57{
  58        arch_trigger_cpumask_backtrace(cpu_online_mask, false);
  59        return true;
  60}
  61
  62static inline bool trigger_allbutself_cpu_backtrace(void)
  63{
  64        arch_trigger_cpumask_backtrace(cpu_online_mask, true);
  65        return true;
  66}
  67
  68static inline bool trigger_cpumask_backtrace(struct cpumask *mask)
  69{
  70        arch_trigger_cpumask_backtrace(mask, false);
  71        return true;
  72}
  73
  74static inline bool trigger_single_cpu_backtrace(int cpu)
  75{
  76        arch_trigger_cpumask_backtrace(cpumask_of(cpu), false);
  77        return true;
  78}
  79
  80/* generic implementation */
  81void nmi_trigger_cpumask_backtrace(const cpumask_t *mask,
  82                                   bool exclude_self,
  83                                   void (*raise)(cpumask_t *mask));
  84bool nmi_cpu_backtrace(struct pt_regs *regs);
  85
  86#else
  87static inline bool trigger_all_cpu_backtrace(void)
  88{
  89        return false;
  90}
  91static inline bool trigger_allbutself_cpu_backtrace(void)
  92{
  93        return false;
  94}
  95static inline bool trigger_cpumask_backtrace(struct cpumask *mask)
  96{
  97        return false;
  98}
  99static inline bool trigger_single_cpu_backtrace(int cpu)
 100{
 101        return false;
 102}
 103#endif
 104
 105#ifdef CONFIG_LOCKUP_DETECTOR
 106u64 hw_nmi_get_sample_period(int watchdog_thresh);
 107extern int nmi_watchdog_enabled;
 108extern int soft_watchdog_enabled;
 109extern int watchdog_user_enabled;
 110extern int watchdog_thresh;
 111extern unsigned long watchdog_enabled;
 112extern unsigned long *watchdog_cpumask_bits;
 113extern atomic_t watchdog_park_in_progress;
 114#ifdef CONFIG_SMP
 115extern int sysctl_softlockup_all_cpu_backtrace;
 116extern int sysctl_hardlockup_all_cpu_backtrace;
 117#else
 118#define sysctl_softlockup_all_cpu_backtrace 0
 119#define sysctl_hardlockup_all_cpu_backtrace 0
 120#endif
 121extern bool is_hardlockup(void);
 122struct ctl_table;
 123extern int proc_watchdog(struct ctl_table *, int ,
 124                         void __user *, size_t *, loff_t *);
 125extern int proc_nmi_watchdog(struct ctl_table *, int ,
 126                             void __user *, size_t *, loff_t *);
 127extern int proc_soft_watchdog(struct ctl_table *, int ,
 128                              void __user *, size_t *, loff_t *);
 129extern int proc_watchdog_thresh(struct ctl_table *, int ,
 130                                void __user *, size_t *, loff_t *);
 131extern int proc_watchdog_cpumask(struct ctl_table *, int,
 132                                 void __user *, size_t *, loff_t *);
 133extern int lockup_detector_suspend(void);
 134extern void lockup_detector_resume(void);
 135#else
 136static inline int lockup_detector_suspend(void)
 137{
 138        return 0;
 139}
 140
 141static inline void lockup_detector_resume(void)
 142{
 143}
 144#endif
 145
 146#ifdef CONFIG_HAVE_ACPI_APEI_NMI
 147#include <asm/nmi.h>
 148#endif
 149
 150#endif
 151