1
2
3
4#ifndef LINUX_NMI_H
5#define LINUX_NMI_H
6
7#include <linux/sched.h>
8#include <asm/irq.h>
9
10
11
12
13
14
15
16
17#if defined(CONFIG_HAVE_NMI_WATCHDOG) || defined(CONFIG_HARDLOCKUP_DETECTOR)
18#include <asm/nmi.h>
19extern void touch_nmi_watchdog(void);
20#else
21static inline void touch_nmi_watchdog(void)
22{
23 touch_softlockup_watchdog();
24}
25#endif
26
27#if defined(CONFIG_HARDLOCKUP_DETECTOR)
28extern void hardlockup_detector_disable(void);
29#else
30static inline void hardlockup_detector_disable(void) {}
31#endif
32
33
34
35
36
37
38#ifdef arch_trigger_cpumask_backtrace
39static inline bool trigger_all_cpu_backtrace(void)
40{
41 arch_trigger_cpumask_backtrace(cpu_online_mask, false);
42 return true;
43}
44
45static inline bool trigger_allbutself_cpu_backtrace(void)
46{
47 arch_trigger_cpumask_backtrace(cpu_online_mask, true);
48 return true;
49}
50
51static inline bool trigger_cpumask_backtrace(struct cpumask *mask)
52{
53 arch_trigger_cpumask_backtrace(mask, false);
54 return true;
55}
56
57static inline bool trigger_single_cpu_backtrace(int cpu)
58{
59 arch_trigger_cpumask_backtrace(cpumask_of(cpu), false);
60 return true;
61}
62
63
64void nmi_trigger_cpumask_backtrace(const cpumask_t *mask,
65 bool exclude_self,
66 void (*raise)(cpumask_t *mask));
67bool nmi_cpu_backtrace(struct pt_regs *regs);
68
69#else
70static inline bool trigger_all_cpu_backtrace(void)
71{
72 return false;
73}
74static inline bool trigger_allbutself_cpu_backtrace(void)
75{
76 return false;
77}
78static inline bool trigger_cpumask_backtrace(struct cpumask *mask)
79{
80 return false;
81}
82static inline bool trigger_single_cpu_backtrace(int cpu)
83{
84 return false;
85}
86#endif
87
88#ifdef CONFIG_LOCKUP_DETECTOR
89u64 hw_nmi_get_sample_period(int watchdog_thresh);
90extern int nmi_watchdog_enabled;
91extern int soft_watchdog_enabled;
92extern int watchdog_user_enabled;
93extern int watchdog_thresh;
94extern unsigned long *watchdog_cpumask_bits;
95extern int sysctl_softlockup_all_cpu_backtrace;
96extern int sysctl_hardlockup_all_cpu_backtrace;
97struct ctl_table;
98extern int proc_watchdog(struct ctl_table *, int ,
99 void __user *, size_t *, loff_t *);
100extern int proc_nmi_watchdog(struct ctl_table *, int ,
101 void __user *, size_t *, loff_t *);
102extern int proc_soft_watchdog(struct ctl_table *, int ,
103 void __user *, size_t *, loff_t *);
104extern int proc_watchdog_thresh(struct ctl_table *, int ,
105 void __user *, size_t *, loff_t *);
106extern int proc_watchdog_cpumask(struct ctl_table *, int,
107 void __user *, size_t *, loff_t *);
108extern int lockup_detector_suspend(void);
109extern void lockup_detector_resume(void);
110#else
111static inline int lockup_detector_suspend(void)
112{
113 return 0;
114}
115
116static inline void lockup_detector_resume(void)
117{
118}
119#endif
120
121#ifdef CONFIG_HAVE_ACPI_APEI_NMI
122#include <asm/nmi.h>
123#endif
124
125#endif
126