1
2#ifndef _ASM_X86_MCE_H
3#define _ASM_X86_MCE_H
4
5#include <uapi/asm/mce.h>
6
7
8
9
10
11
12#define MCG_BANKCNT_MASK 0xff
13#define MCG_CTL_P (1ULL<<8)
14#define MCG_EXT_P (1ULL<<9)
15#define MCG_CMCI_P (1ULL<<10)
16#define MCG_EXT_CNT_MASK 0xff0000
17#define MCG_EXT_CNT_SHIFT 16
18#define MCG_EXT_CNT(c) (((c) & MCG_EXT_CNT_MASK) >> MCG_EXT_CNT_SHIFT)
19#define MCG_SER_P (1ULL<<24)
20#define MCG_ELOG_P (1ULL<<26)
21#define MCG_LMCE_P (1ULL<<27)
22
23
24#define MCG_STATUS_RIPV (1ULL<<0)
25#define MCG_STATUS_EIPV (1ULL<<1)
26#define MCG_STATUS_MCIP (1ULL<<2)
27#define MCG_STATUS_LMCES (1ULL<<3)
28
29
30#define MCG_EXT_CTL_LMCE_EN (1ULL<<0)
31
32
33#define MCI_STATUS_VAL (1ULL<<63)
34#define MCI_STATUS_OVER (1ULL<<62)
35#define MCI_STATUS_UC (1ULL<<61)
36#define MCI_STATUS_EN (1ULL<<60)
37#define MCI_STATUS_MISCV (1ULL<<59)
38#define MCI_STATUS_ADDRV (1ULL<<58)
39#define MCI_STATUS_PCC (1ULL<<57)
40#define MCI_STATUS_S (1ULL<<56)
41#define MCI_STATUS_AR (1ULL<<55)
42
43
44#define MCI_STATUS_TCC (1ULL<<55)
45#define MCI_STATUS_SYNDV (1ULL<<53)
46#define MCI_STATUS_DEFERRED (1ULL<<44)
47#define MCI_STATUS_POISON (1ULL<<43)
48
49
50
51
52
53
54
55
56#define MCI_CONFIG_MCAX 0x1
57#define MCI_IPID_MCATYPE 0xFFFF0000
58#define MCI_IPID_HWID 0xFFF
59
60
61
62
63
64
65
66
67
68#define MCACOD 0xefff
69
70
71#define MCACOD_SCRUB 0x00C0
72#define MCACOD_SCRUBMSK 0xeff0
73#define MCACOD_L3WB 0x017A
74#define MCACOD_DATA 0x0134
75#define MCACOD_INSTR 0x0150
76
77
78#define MCI_MISC_ADDR_LSB(m) ((m) & 0x3f)
79#define MCI_MISC_ADDR_MODE(m) (((m) >> 6) & 7)
80#define MCI_MISC_ADDR_SEGOFF 0
81#define MCI_MISC_ADDR_LINEAR 1
82#define MCI_MISC_ADDR_PHYS 2
83#define MCI_MISC_ADDR_MEM 3
84#define MCI_MISC_ADDR_GENERIC 7
85
86
87#define MCI_CTL2_CMCI_EN (1ULL << 30)
88#define MCI_CTL2_CMCI_THRESHOLD_MASK 0x7fffULL
89
90#define MCJ_CTX_MASK 3
91#define MCJ_CTX(flags) ((flags) & MCJ_CTX_MASK)
92#define MCJ_CTX_RANDOM 0
93#define MCJ_CTX_PROCESS 0x1
94#define MCJ_CTX_IRQ 0x2
95#define MCJ_NMI_BROADCAST 0x4
96#define MCJ_EXCEPTION 0x8
97#define MCJ_IRQ_BROADCAST 0x10
98
99#define MCE_OVERFLOW 0
100
101#define MCE_LOG_LEN 32
102#define MCE_LOG_SIGNATURE "MACHINECHECK"
103
104
105#define MSR_AMD64_SMCA_MC0_CTL 0xc0002000
106#define MSR_AMD64_SMCA_MC0_STATUS 0xc0002001
107#define MSR_AMD64_SMCA_MC0_ADDR 0xc0002002
108#define MSR_AMD64_SMCA_MC0_MISC0 0xc0002003
109#define MSR_AMD64_SMCA_MC0_CONFIG 0xc0002004
110#define MSR_AMD64_SMCA_MC0_IPID 0xc0002005
111#define MSR_AMD64_SMCA_MC0_SYND 0xc0002006
112#define MSR_AMD64_SMCA_MC0_DESTAT 0xc0002008
113#define MSR_AMD64_SMCA_MC0_DEADDR 0xc0002009
114#define MSR_AMD64_SMCA_MC0_MISC1 0xc000200a
115#define MSR_AMD64_SMCA_MCx_CTL(x) (MSR_AMD64_SMCA_MC0_CTL + 0x10*(x))
116#define MSR_AMD64_SMCA_MCx_STATUS(x) (MSR_AMD64_SMCA_MC0_STATUS + 0x10*(x))
117#define MSR_AMD64_SMCA_MCx_ADDR(x) (MSR_AMD64_SMCA_MC0_ADDR + 0x10*(x))
118#define MSR_AMD64_SMCA_MCx_MISC(x) (MSR_AMD64_SMCA_MC0_MISC0 + 0x10*(x))
119#define MSR_AMD64_SMCA_MCx_CONFIG(x) (MSR_AMD64_SMCA_MC0_CONFIG + 0x10*(x))
120#define MSR_AMD64_SMCA_MCx_IPID(x) (MSR_AMD64_SMCA_MC0_IPID + 0x10*(x))
121#define MSR_AMD64_SMCA_MCx_SYND(x) (MSR_AMD64_SMCA_MC0_SYND + 0x10*(x))
122#define MSR_AMD64_SMCA_MCx_DESTAT(x) (MSR_AMD64_SMCA_MC0_DESTAT + 0x10*(x))
123#define MSR_AMD64_SMCA_MCx_DEADDR(x) (MSR_AMD64_SMCA_MC0_DEADDR + 0x10*(x))
124#define MSR_AMD64_SMCA_MCx_MISCy(x, y) ((MSR_AMD64_SMCA_MC0_MISC1 + y) + (0x10*(x)))
125
126
127
128
129
130
131
132struct mce_log_buffer {
133 char signature[12];
134 unsigned len;
135 unsigned next;
136 unsigned flags;
137 unsigned recordlen;
138 struct mce entry[MCE_LOG_LEN];
139};
140
141struct mca_config {
142 bool dont_log_ce;
143 bool cmci_disabled;
144 bool lmce_disabled;
145 bool ignore_ce;
146 bool disabled;
147 bool ser;
148 bool recovery;
149 bool bios_cmci_threshold;
150 u8 banks;
151 s8 bootlog;
152 int tolerant;
153 int monarch_timeout;
154 int panic_timeout;
155 u32 rip_msr;
156};
157
158struct mce_vendor_flags {
159
160
161
162 __u64 overflow_recov : 1,
163
164
165
166
167
168
169 succor : 1,
170
171
172
173
174
175
176
177 smca : 1,
178
179 __reserved_0 : 61;
180};
181
182struct mca_msr_regs {
183 u32 (*ctl) (int bank);
184 u32 (*status) (int bank);
185 u32 (*addr) (int bank);
186 u32 (*misc) (int bank);
187};
188
189extern struct mce_vendor_flags mce_flags;
190
191extern struct mca_msr_regs msr_ops;
192
193enum mce_notifier_prios {
194 MCE_PRIO_FIRST = INT_MAX,
195 MCE_PRIO_SRAO = INT_MAX - 1,
196 MCE_PRIO_EXTLOG = INT_MAX - 2,
197 MCE_PRIO_NFIT = INT_MAX - 3,
198 MCE_PRIO_EDAC = INT_MAX - 4,
199 MCE_PRIO_MCELOG = 1,
200 MCE_PRIO_LOWEST = 0,
201};
202
203extern void mce_register_decode_chain(struct notifier_block *nb);
204extern void mce_unregister_decode_chain(struct notifier_block *nb);
205
206#include <linux/percpu.h>
207#include <linux/atomic.h>
208
209extern int mce_p5_enabled;
210
211#ifdef CONFIG_X86_MCE
212int mcheck_init(void);
213void mcheck_cpu_init(struct cpuinfo_x86 *c);
214void mcheck_cpu_clear(struct cpuinfo_x86 *c);
215void mcheck_vendor_init_severity(void);
216#else
217static inline int mcheck_init(void) { return 0; }
218static inline void mcheck_cpu_init(struct cpuinfo_x86 *c) {}
219static inline void mcheck_cpu_clear(struct cpuinfo_x86 *c) {}
220static inline void mcheck_vendor_init_severity(void) {}
221#endif
222
223#ifdef CONFIG_X86_ANCIENT_MCE
224void intel_p5_mcheck_init(struct cpuinfo_x86 *c);
225void winchip_mcheck_init(struct cpuinfo_x86 *c);
226static inline void enable_p5_mce(void) { mce_p5_enabled = 1; }
227#else
228static inline void intel_p5_mcheck_init(struct cpuinfo_x86 *c) {}
229static inline void winchip_mcheck_init(struct cpuinfo_x86 *c) {}
230static inline void enable_p5_mce(void) {}
231#endif
232
233void mce_setup(struct mce *m);
234void mce_log(struct mce *m);
235DECLARE_PER_CPU(struct device *, mce_device);
236
237
238
239
240
241
242#define MAX_NR_BANKS 32
243
244#ifdef CONFIG_X86_MCE_INTEL
245void mce_intel_feature_init(struct cpuinfo_x86 *c);
246void mce_intel_feature_clear(struct cpuinfo_x86 *c);
247void cmci_clear(void);
248void cmci_reenable(void);
249void cmci_rediscover(void);
250void cmci_recheck(void);
251#else
252static inline void mce_intel_feature_init(struct cpuinfo_x86 *c) { }
253static inline void mce_intel_feature_clear(struct cpuinfo_x86 *c) { }
254static inline void cmci_clear(void) {}
255static inline void cmci_reenable(void) {}
256static inline void cmci_rediscover(void) {}
257static inline void cmci_recheck(void) {}
258#endif
259
260#ifdef CONFIG_X86_MCE_AMD
261void mce_amd_feature_init(struct cpuinfo_x86 *c);
262int umc_normaddr_to_sysaddr(u64 norm_addr, u16 nid, u8 umc, u64 *sys_addr);
263#else
264static inline void mce_amd_feature_init(struct cpuinfo_x86 *c) { }
265static inline int umc_normaddr_to_sysaddr(u64 norm_addr, u16 nid, u8 umc, u64 *sys_addr) { return -EINVAL; };
266#endif
267
268int mce_available(struct cpuinfo_x86 *c);
269bool mce_is_memory_error(struct mce *m);
270
271DECLARE_PER_CPU(unsigned, mce_exception_count);
272DECLARE_PER_CPU(unsigned, mce_poll_count);
273
274typedef DECLARE_BITMAP(mce_banks_t, MAX_NR_BANKS);
275DECLARE_PER_CPU(mce_banks_t, mce_poll_banks);
276
277enum mcp_flags {
278 MCP_TIMESTAMP = BIT(0),
279 MCP_UC = BIT(1),
280 MCP_DONTLOG = BIT(2),
281};
282bool machine_check_poll(enum mcp_flags flags, mce_banks_t *b);
283
284int mce_notify_irq(void);
285
286DECLARE_PER_CPU(struct mce, injectm);
287
288
289extern void mce_disable_bank(int bank);
290
291
292
293
294
295
296extern void (*machine_check_vector)(struct pt_regs *, long error_code);
297void do_machine_check(struct pt_regs *, long);
298
299
300
301
302extern void (*mce_threshold_vector)(void);
303
304
305extern void (*deferred_error_int_vector)(void);
306
307
308
309
310
311void intel_init_thermal(struct cpuinfo_x86 *c);
312
313
314extern int (*platform_thermal_notify)(__u64 msr_val);
315
316
317extern int (*platform_thermal_package_notify)(__u64 msr_val);
318
319
320
321extern bool (*platform_thermal_package_rate_control)(void);
322
323#ifdef CONFIG_X86_THERMAL_VECTOR
324extern void mcheck_intel_therm_init(void);
325#else
326static inline void mcheck_intel_therm_init(void) { }
327#endif
328
329
330
331
332
333struct cper_sec_mem_err;
334extern void apei_mce_report_mem_error(int corrected,
335 struct cper_sec_mem_err *mem_err);
336
337
338
339
340
341#ifdef CONFIG_X86_MCE_AMD
342
343
344enum smca_bank_types {
345 SMCA_LS = 0,
346 SMCA_IF,
347 SMCA_L2_CACHE,
348 SMCA_DE,
349 SMCA_EX,
350 SMCA_FP,
351 SMCA_L3_CACHE,
352 SMCA_CS,
353 SMCA_PIE,
354 SMCA_UMC,
355 SMCA_PB,
356 SMCA_PSP,
357 SMCA_SMU,
358 N_SMCA_BANK_TYPES
359};
360
361#define HWID_MCATYPE(hwid, mcatype) (((hwid) << 16) | (mcatype))
362
363struct smca_hwid {
364 unsigned int bank_type;
365 u32 hwid_mcatype;
366 u32 xec_bitmap;
367 u8 count;
368};
369
370struct smca_bank {
371 struct smca_hwid *hwid;
372 u32 id;
373 u8 sysfs_id;
374};
375
376extern struct smca_bank smca_banks[MAX_NR_BANKS];
377
378extern const char *smca_get_long_name(enum smca_bank_types t);
379extern bool amd_mce_is_memory_error(struct mce *m);
380
381extern int mce_threshold_create_device(unsigned int cpu);
382extern int mce_threshold_remove_device(unsigned int cpu);
383
384#else
385
386static inline int mce_threshold_create_device(unsigned int cpu) { return 0; };
387static inline int mce_threshold_remove_device(unsigned int cpu) { return 0; };
388static inline bool amd_mce_is_memory_error(struct mce *m) { return false; };
389
390#endif
391
392#endif
393