1#ifndef _ASM_X86_MCE_H
2#define _ASM_X86_MCE_H
3
4#include <uapi/asm/mce.h>
5
6
7
8
9
10
11#define MCG_BANKCNT_MASK 0xff
12#define MCG_CTL_P (1ULL<<8)
13#define MCG_EXT_P (1ULL<<9)
14#define MCG_CMCI_P (1ULL<<10)
15#define MCG_EXT_CNT_MASK 0xff0000
16#define MCG_EXT_CNT_SHIFT 16
17#define MCG_EXT_CNT(c) (((c) & MCG_EXT_CNT_MASK) >> MCG_EXT_CNT_SHIFT)
18#define MCG_SER_P (1ULL<<24)
19#define MCG_ELOG_P (1ULL<<26)
20#define MCG_LMCE_P (1ULL<<27)
21
22
23#define MCG_STATUS_RIPV (1ULL<<0)
24#define MCG_STATUS_EIPV (1ULL<<1)
25#define MCG_STATUS_MCIP (1ULL<<2)
26#define MCG_STATUS_LMCES (1ULL<<3)
27
28
29#define MCG_EXT_CTL_LMCE_EN (1ULL<<0)
30
31
32#define MCI_STATUS_VAL (1ULL<<63)
33#define MCI_STATUS_OVER (1ULL<<62)
34#define MCI_STATUS_UC (1ULL<<61)
35#define MCI_STATUS_EN (1ULL<<60)
36#define MCI_STATUS_MISCV (1ULL<<59)
37#define MCI_STATUS_ADDRV (1ULL<<58)
38#define MCI_STATUS_PCC (1ULL<<57)
39#define MCI_STATUS_S (1ULL<<56)
40#define MCI_STATUS_AR (1ULL<<55)
41
42
43#define MCI_STATUS_TCC (1ULL<<55)
44#define MCI_STATUS_SYNDV (1ULL<<53)
45#define MCI_STATUS_DEFERRED (1ULL<<44)
46#define MCI_STATUS_POISON (1ULL<<43)
47
48
49
50
51
52
53
54
55#define MCI_CONFIG_MCAX 0x1
56#define MCI_IPID_MCATYPE 0xFFFF0000
57#define MCI_IPID_HWID 0xFFF
58
59
60
61
62
63
64
65
66
67#define MCACOD 0xefff
68
69
70#define MCACOD_SCRUB 0x00C0
71#define MCACOD_SCRUBMSK 0xeff0
72#define MCACOD_L3WB 0x017A
73#define MCACOD_DATA 0x0134
74#define MCACOD_INSTR 0x0150
75
76
77#define MCI_MISC_ADDR_LSB(m) ((m) & 0x3f)
78#define MCI_MISC_ADDR_MODE(m) (((m) >> 6) & 7)
79#define MCI_MISC_ADDR_SEGOFF 0
80#define MCI_MISC_ADDR_LINEAR 1
81#define MCI_MISC_ADDR_PHYS 2
82#define MCI_MISC_ADDR_MEM 3
83#define MCI_MISC_ADDR_GENERIC 7
84
85
86#define MCI_CTL2_CMCI_EN (1ULL << 30)
87#define MCI_CTL2_CMCI_THRESHOLD_MASK 0x7fffULL
88
89#define MCJ_CTX_MASK 3
90#define MCJ_CTX(flags) ((flags) & MCJ_CTX_MASK)
91#define MCJ_CTX_RANDOM 0
92#define MCJ_CTX_PROCESS 0x1
93#define MCJ_CTX_IRQ 0x2
94#define MCJ_NMI_BROADCAST 0x4
95#define MCJ_EXCEPTION 0x8
96#define MCJ_IRQ_BROADCAST 0x10
97
98#define MCE_OVERFLOW 0
99
100#define MCE_LOG_LEN 32
101#define MCE_LOG_SIGNATURE "MACHINECHECK"
102
103
104#define MSR_AMD64_SMCA_MC0_CTL 0xc0002000
105#define MSR_AMD64_SMCA_MC0_STATUS 0xc0002001
106#define MSR_AMD64_SMCA_MC0_ADDR 0xc0002002
107#define MSR_AMD64_SMCA_MC0_MISC0 0xc0002003
108#define MSR_AMD64_SMCA_MC0_CONFIG 0xc0002004
109#define MSR_AMD64_SMCA_MC0_IPID 0xc0002005
110#define MSR_AMD64_SMCA_MC0_SYND 0xc0002006
111#define MSR_AMD64_SMCA_MC0_DESTAT 0xc0002008
112#define MSR_AMD64_SMCA_MC0_DEADDR 0xc0002009
113#define MSR_AMD64_SMCA_MC0_MISC1 0xc000200a
114#define MSR_AMD64_SMCA_MCx_CTL(x) (MSR_AMD64_SMCA_MC0_CTL + 0x10*(x))
115#define MSR_AMD64_SMCA_MCx_STATUS(x) (MSR_AMD64_SMCA_MC0_STATUS + 0x10*(x))
116#define MSR_AMD64_SMCA_MCx_ADDR(x) (MSR_AMD64_SMCA_MC0_ADDR + 0x10*(x))
117#define MSR_AMD64_SMCA_MCx_MISC(x) (MSR_AMD64_SMCA_MC0_MISC0 + 0x10*(x))
118#define MSR_AMD64_SMCA_MCx_CONFIG(x) (MSR_AMD64_SMCA_MC0_CONFIG + 0x10*(x))
119#define MSR_AMD64_SMCA_MCx_IPID(x) (MSR_AMD64_SMCA_MC0_IPID + 0x10*(x))
120#define MSR_AMD64_SMCA_MCx_SYND(x) (MSR_AMD64_SMCA_MC0_SYND + 0x10*(x))
121#define MSR_AMD64_SMCA_MCx_DESTAT(x) (MSR_AMD64_SMCA_MC0_DESTAT + 0x10*(x))
122#define MSR_AMD64_SMCA_MCx_DEADDR(x) (MSR_AMD64_SMCA_MC0_DEADDR + 0x10*(x))
123#define MSR_AMD64_SMCA_MCx_MISCy(x, y) ((MSR_AMD64_SMCA_MC0_MISC1 + y) + (0x10*(x)))
124
125
126
127
128
129
130
131struct mce_log_buffer {
132 char signature[12];
133 unsigned len;
134 unsigned next;
135 unsigned flags;
136 unsigned recordlen;
137 struct mce entry[MCE_LOG_LEN];
138};
139
140struct mca_config {
141 bool dont_log_ce;
142 bool cmci_disabled;
143 bool lmce_disabled;
144 bool ignore_ce;
145 bool disabled;
146 bool ser;
147 bool recovery;
148 bool bios_cmci_threshold;
149 u8 banks;
150 s8 bootlog;
151 int tolerant;
152 int monarch_timeout;
153 int panic_timeout;
154 u32 rip_msr;
155};
156
157struct mce_vendor_flags {
158
159
160
161 __u64 overflow_recov : 1,
162
163
164
165
166
167
168 succor : 1,
169
170
171
172
173
174
175
176 smca : 1,
177
178 __reserved_0 : 61;
179};
180
181struct mca_msr_regs {
182 u32 (*ctl) (int bank);
183 u32 (*status) (int bank);
184 u32 (*addr) (int bank);
185 u32 (*misc) (int bank);
186};
187
188extern struct mce_vendor_flags mce_flags;
189
190extern struct mca_config mca_cfg;
191extern struct mca_msr_regs msr_ops;
192
193enum mce_notifier_prios {
194 MCE_PRIO_FIRST = INT_MAX,
195 MCE_PRIO_SRAO = INT_MAX - 1,
196 MCE_PRIO_EXTLOG = INT_MAX - 2,
197 MCE_PRIO_NFIT = INT_MAX - 3,
198 MCE_PRIO_EDAC = INT_MAX - 4,
199 MCE_PRIO_MCELOG = 1,
200 MCE_PRIO_LOWEST = 0,
201};
202
203extern void mce_register_decode_chain(struct notifier_block *nb);
204extern void mce_unregister_decode_chain(struct notifier_block *nb);
205
206#include <linux/percpu.h>
207#include <linux/atomic.h>
208
209extern int mce_p5_enabled;
210
211#ifdef CONFIG_X86_MCE
212int mcheck_init(void);
213void mcheck_cpu_init(struct cpuinfo_x86 *c);
214void mcheck_cpu_clear(struct cpuinfo_x86 *c);
215void mcheck_vendor_init_severity(void);
216#else
217static inline int mcheck_init(void) { return 0; }
218static inline void mcheck_cpu_init(struct cpuinfo_x86 *c) {}
219static inline void mcheck_cpu_clear(struct cpuinfo_x86 *c) {}
220static inline void mcheck_vendor_init_severity(void) {}
221#endif
222
223#ifdef CONFIG_X86_ANCIENT_MCE
224void intel_p5_mcheck_init(struct cpuinfo_x86 *c);
225void winchip_mcheck_init(struct cpuinfo_x86 *c);
226static inline void enable_p5_mce(void) { mce_p5_enabled = 1; }
227#else
228static inline void intel_p5_mcheck_init(struct cpuinfo_x86 *c) {}
229static inline void winchip_mcheck_init(struct cpuinfo_x86 *c) {}
230static inline void enable_p5_mce(void) {}
231#endif
232
233void mce_setup(struct mce *m);
234void mce_log(struct mce *m);
235DECLARE_PER_CPU(struct device *, mce_device);
236
237
238
239
240
241
242#define MAX_NR_BANKS 32
243
244#ifdef CONFIG_X86_MCE_INTEL
245void mce_intel_feature_init(struct cpuinfo_x86 *c);
246void mce_intel_feature_clear(struct cpuinfo_x86 *c);
247void cmci_clear(void);
248void cmci_reenable(void);
249void cmci_rediscover(void);
250void cmci_recheck(void);
251#else
252static inline void mce_intel_feature_init(struct cpuinfo_x86 *c) { }
253static inline void mce_intel_feature_clear(struct cpuinfo_x86 *c) { }
254static inline void cmci_clear(void) {}
255static inline void cmci_reenable(void) {}
256static inline void cmci_rediscover(void) {}
257static inline void cmci_recheck(void) {}
258#endif
259
260#ifdef CONFIG_X86_MCE_AMD
261void mce_amd_feature_init(struct cpuinfo_x86 *c);
262int umc_normaddr_to_sysaddr(u64 norm_addr, u16 nid, u8 umc, u64 *sys_addr);
263#else
264static inline void mce_amd_feature_init(struct cpuinfo_x86 *c) { }
265static inline int umc_normaddr_to_sysaddr(u64 norm_addr, u16 nid, u8 umc, u64 *sys_addr) { return -EINVAL; };
266#endif
267
268int mce_available(struct cpuinfo_x86 *c);
269bool mce_is_memory_error(struct mce *m);
270
271DECLARE_PER_CPU(unsigned, mce_exception_count);
272DECLARE_PER_CPU(unsigned, mce_poll_count);
273
274typedef DECLARE_BITMAP(mce_banks_t, MAX_NR_BANKS);
275DECLARE_PER_CPU(mce_banks_t, mce_poll_banks);
276
277enum mcp_flags {
278 MCP_TIMESTAMP = BIT(0),
279 MCP_UC = BIT(1),
280 MCP_DONTLOG = BIT(2),
281};
282bool machine_check_poll(enum mcp_flags flags, mce_banks_t *b);
283
284int mce_notify_irq(void);
285
286DECLARE_PER_CPU(struct mce, injectm);
287
288
289extern void mce_disable_bank(int bank);
290
291
292
293
294
295
296extern void (*machine_check_vector)(struct pt_regs *, long error_code);
297void do_machine_check(struct pt_regs *, long);
298
299
300
301
302extern void (*mce_threshold_vector)(void);
303
304
305extern void (*deferred_error_int_vector)(void);
306
307
308
309
310
311void intel_init_thermal(struct cpuinfo_x86 *c);
312
313
314extern int (*platform_thermal_notify)(__u64 msr_val);
315
316
317extern int (*platform_thermal_package_notify)(__u64 msr_val);
318
319
320
321extern bool (*platform_thermal_package_rate_control)(void);
322
323#ifdef CONFIG_X86_THERMAL_VECTOR
324extern void mcheck_intel_therm_init(void);
325#else
326static inline void mcheck_intel_therm_init(void) { }
327#endif
328
329
330
331
332
333struct cper_sec_mem_err;
334extern void apei_mce_report_mem_error(int corrected,
335 struct cper_sec_mem_err *mem_err);
336
337
338
339
340
341#ifdef CONFIG_X86_MCE_AMD
342
343
344enum smca_bank_types {
345 SMCA_LS = 0,
346 SMCA_IF,
347 SMCA_L2_CACHE,
348 SMCA_DE,
349 SMCA_EX,
350 SMCA_FP,
351 SMCA_L3_CACHE,
352 SMCA_CS,
353 SMCA_PIE,
354 SMCA_UMC,
355 SMCA_PB,
356 SMCA_PSP,
357 SMCA_SMU,
358 N_SMCA_BANK_TYPES
359};
360
361#define HWID_MCATYPE(hwid, mcatype) (((hwid) << 16) | (mcatype))
362
363struct smca_hwid {
364 unsigned int bank_type;
365 u32 hwid_mcatype;
366 u32 xec_bitmap;
367 u8 count;
368};
369
370struct smca_bank {
371 struct smca_hwid *hwid;
372 u32 id;
373 u8 sysfs_id;
374};
375
376extern struct smca_bank smca_banks[MAX_NR_BANKS];
377
378extern const char *smca_get_long_name(enum smca_bank_types t);
379
380extern int mce_threshold_create_device(unsigned int cpu);
381extern int mce_threshold_remove_device(unsigned int cpu);
382
383#else
384
385static inline int mce_threshold_create_device(unsigned int cpu) { return 0; };
386static inline int mce_threshold_remove_device(unsigned int cpu) { return 0; };
387
388#endif
389
390#endif
391