1#ifndef _ASM_X86_MCE_H
2#define _ASM_X86_MCE_H
3
4#include <uapi/asm/mce.h>
5
6
7
8
9
10
11#define MCG_BANKCNT_MASK 0xff
12#define MCG_CTL_P BIT_ULL(8)
13#define MCG_EXT_P BIT_ULL(9)
14#define MCG_CMCI_P BIT_ULL(10)
15#define MCG_EXT_CNT_MASK 0xff0000
16#define MCG_EXT_CNT_SHIFT 16
17#define MCG_EXT_CNT(c) (((c) & MCG_EXT_CNT_MASK) >> MCG_EXT_CNT_SHIFT)
18#define MCG_SER_P BIT_ULL(24)
19#define MCG_ELOG_P BIT_ULL(26)
20#define MCG_LMCE_P BIT_ULL(27)
21
22
23#define MCG_STATUS_RIPV BIT_ULL(0)
24#define MCG_STATUS_EIPV BIT_ULL(1)
25#define MCG_STATUS_MCIP BIT_ULL(2)
26#define MCG_STATUS_LMCES BIT_ULL(3)
27
28
29#define MCG_EXT_CTL_LMCE_EN BIT_ULL(0)
30
31
32#define MCI_STATUS_VAL BIT_ULL(63)
33#define MCI_STATUS_OVER BIT_ULL(62)
34#define MCI_STATUS_UC BIT_ULL(61)
35#define MCI_STATUS_EN BIT_ULL(60)
36#define MCI_STATUS_MISCV BIT_ULL(59)
37#define MCI_STATUS_ADDRV BIT_ULL(58)
38#define MCI_STATUS_PCC BIT_ULL(57)
39#define MCI_STATUS_S BIT_ULL(56)
40#define MCI_STATUS_AR BIT_ULL(55)
41#define MCI_STATUS_CEC_SHIFT 38
42#define MCI_STATUS_CEC_MASK GENMASK_ULL(52,38)
43#define MCI_STATUS_CEC(c) (((c) & MCI_STATUS_CEC_MASK) >> MCI_STATUS_CEC_SHIFT)
44
45
46#define MCI_STATUS_TCC BIT_ULL(55)
47#define MCI_STATUS_SYNDV BIT_ULL(53)
48#define MCI_STATUS_DEFERRED BIT_ULL(44)
49#define MCI_STATUS_POISON BIT_ULL(43)
50
51
52
53
54
55
56
57
58#define MCI_CONFIG_MCAX 0x1
59#define MCI_IPID_MCATYPE 0xFFFF0000
60#define MCI_IPID_HWID 0xFFF
61
62
63
64
65
66
67
68
69
70#define MCACOD 0xefff
71
72
73#define MCACOD_SCRUB 0x00C0
74#define MCACOD_SCRUBMSK 0xeff0
75#define MCACOD_L3WB 0x017A
76#define MCACOD_DATA 0x0134
77#define MCACOD_INSTR 0x0150
78
79
80#define MCI_MISC_ADDR_LSB(m) ((m) & 0x3f)
81#define MCI_MISC_ADDR_MODE(m) (((m) >> 6) & 7)
82#define MCI_MISC_ADDR_SEGOFF 0
83#define MCI_MISC_ADDR_LINEAR 1
84#define MCI_MISC_ADDR_PHYS 2
85#define MCI_MISC_ADDR_MEM 3
86#define MCI_MISC_ADDR_GENERIC 7
87
88
89#define MCI_CTL2_CMCI_EN BIT_ULL(30)
90#define MCI_CTL2_CMCI_THRESHOLD_MASK 0x7fffULL
91
92#define MCJ_CTX_MASK 3
93#define MCJ_CTX(flags) ((flags) & MCJ_CTX_MASK)
94#define MCJ_CTX_RANDOM 0
95#define MCJ_CTX_PROCESS 0x1
96#define MCJ_CTX_IRQ 0x2
97#define MCJ_NMI_BROADCAST 0x4
98#define MCJ_EXCEPTION 0x8
99#define MCJ_IRQ_BROADCAST 0x10
100
101#define MCE_OVERFLOW 0
102
103#define MCE_LOG_LEN 32
104#define MCE_LOG_SIGNATURE "MACHINECHECK"
105
106
107#define MSR_AMD64_SMCA_MC0_CTL 0xc0002000
108#define MSR_AMD64_SMCA_MC0_STATUS 0xc0002001
109#define MSR_AMD64_SMCA_MC0_ADDR 0xc0002002
110#define MSR_AMD64_SMCA_MC0_MISC0 0xc0002003
111#define MSR_AMD64_SMCA_MC0_CONFIG 0xc0002004
112#define MSR_AMD64_SMCA_MC0_IPID 0xc0002005
113#define MSR_AMD64_SMCA_MC0_SYND 0xc0002006
114#define MSR_AMD64_SMCA_MC0_DESTAT 0xc0002008
115#define MSR_AMD64_SMCA_MC0_DEADDR 0xc0002009
116#define MSR_AMD64_SMCA_MC0_MISC1 0xc000200a
117#define MSR_AMD64_SMCA_MCx_CTL(x) (MSR_AMD64_SMCA_MC0_CTL + 0x10*(x))
118#define MSR_AMD64_SMCA_MCx_STATUS(x) (MSR_AMD64_SMCA_MC0_STATUS + 0x10*(x))
119#define MSR_AMD64_SMCA_MCx_ADDR(x) (MSR_AMD64_SMCA_MC0_ADDR + 0x10*(x))
120#define MSR_AMD64_SMCA_MCx_MISC(x) (MSR_AMD64_SMCA_MC0_MISC0 + 0x10*(x))
121#define MSR_AMD64_SMCA_MCx_CONFIG(x) (MSR_AMD64_SMCA_MC0_CONFIG + 0x10*(x))
122#define MSR_AMD64_SMCA_MCx_IPID(x) (MSR_AMD64_SMCA_MC0_IPID + 0x10*(x))
123#define MSR_AMD64_SMCA_MCx_SYND(x) (MSR_AMD64_SMCA_MC0_SYND + 0x10*(x))
124#define MSR_AMD64_SMCA_MCx_DESTAT(x) (MSR_AMD64_SMCA_MC0_DESTAT + 0x10*(x))
125#define MSR_AMD64_SMCA_MCx_DEADDR(x) (MSR_AMD64_SMCA_MC0_DEADDR + 0x10*(x))
126#define MSR_AMD64_SMCA_MCx_MISCy(x, y) ((MSR_AMD64_SMCA_MC0_MISC1 + y) + (0x10*(x)))
127
128
129
130
131
132
133
134struct mce_log {
135 char signature[12];
136 unsigned len;
137 unsigned next;
138 unsigned flags;
139 unsigned recordlen;
140 struct mce entry[MCE_LOG_LEN];
141};
142
143struct mca_config {
144 bool dont_log_ce;
145 bool cmci_disabled;
146 bool lmce_disabled;
147 bool ignore_ce;
148 bool disabled;
149 bool ser;
150 bool recovery;
151 bool bios_cmci_threshold;
152 u8 banks;
153 s8 bootlog;
154 int tolerant;
155 int monarch_timeout;
156 int panic_timeout;
157 u32 rip_msr;
158};
159
160struct mce_vendor_flags {
161
162
163
164 __u64 overflow_recov : 1,
165
166
167
168
169
170
171 succor : 1,
172
173
174
175
176
177
178
179 smca : 1,
180
181 __reserved_0 : 61;
182};
183
184struct mca_msr_regs {
185 u32 (*ctl) (int bank);
186 u32 (*status) (int bank);
187 u32 (*addr) (int bank);
188 u32 (*misc) (int bank);
189};
190
191extern struct mce_vendor_flags mce_flags;
192
193extern struct mca_config mca_cfg;
194extern struct mca_msr_regs msr_ops;
195
196enum mce_notifier_prios {
197 MCE_PRIO_SRAO = INT_MAX,
198 MCE_PRIO_EXTLOG = INT_MAX - 1,
199 MCE_PRIO_NFIT = INT_MAX - 2,
200 MCE_PRIO_EDAC = INT_MAX - 3,
201 MCE_PRIO_LOWEST = 0,
202};
203
204struct notifier_block;
205extern void mce_register_decode_chain(struct notifier_block *nb);
206extern void mce_unregister_decode_chain(struct notifier_block *nb);
207
208#include <linux/percpu.h>
209#include <linux/atomic.h>
210
211extern int mce_p5_enabled;
212
213#ifdef CONFIG_X86_MCE
214int mcheck_init(void);
215void mcheck_cpu_init(struct cpuinfo_x86 *c);
216void mcheck_cpu_clear(struct cpuinfo_x86 *c);
217void mcheck_vendor_init_severity(void);
218#else
219static inline int mcheck_init(void) { return 0; }
220static inline void mcheck_cpu_init(struct cpuinfo_x86 *c) {}
221static inline void mcheck_cpu_clear(struct cpuinfo_x86 *c) {}
222static inline void mcheck_vendor_init_severity(void) {}
223#endif
224
225#ifdef CONFIG_X86_ANCIENT_MCE
226void intel_p5_mcheck_init(struct cpuinfo_x86 *c);
227void winchip_mcheck_init(struct cpuinfo_x86 *c);
228static inline void enable_p5_mce(void) { mce_p5_enabled = 1; }
229#else
230static inline void intel_p5_mcheck_init(struct cpuinfo_x86 *c) {}
231static inline void winchip_mcheck_init(struct cpuinfo_x86 *c) {}
232static inline void enable_p5_mce(void) {}
233#endif
234
235void mce_setup(struct mce *m);
236void mce_log(struct mce *m);
237DECLARE_PER_CPU(struct device *, mce_device);
238
239
240
241
242
243
244#define MAX_NR_BANKS 32
245
246#ifdef CONFIG_X86_MCE_INTEL
247void mce_intel_feature_init(struct cpuinfo_x86 *c);
248void mce_intel_feature_clear(struct cpuinfo_x86 *c);
249void cmci_clear(void);
250void cmci_reenable(void);
251void cmci_rediscover(void);
252void cmci_recheck(void);
253void lmce_clear(void);
254void lmce_enable(void);
255#else
256static inline void mce_intel_feature_init(struct cpuinfo_x86 *c) { }
257static inline void mce_intel_feature_clear(struct cpuinfo_x86 *c) { }
258static inline void cmci_clear(void) {}
259static inline void cmci_reenable(void) {}
260static inline void cmci_rediscover(void) {}
261static inline void cmci_recheck(void) {}
262static inline void lmce_clear(void) {}
263static inline void lmce_enable(void) {}
264#endif
265
266#ifdef CONFIG_X86_MCE_AMD
267void mce_amd_feature_init(struct cpuinfo_x86 *c);
268int umc_normaddr_to_sysaddr(u64 norm_addr, u16 nid, u8 umc, u64 *sys_addr);
269#else
270static inline void mce_amd_feature_init(struct cpuinfo_x86 *c) { }
271static inline int umc_normaddr_to_sysaddr(u64 norm_addr, u16 nid, u8 umc, u64 *sys_addr) { return -EINVAL; };
272#endif
273
274int mce_available(struct cpuinfo_x86 *c);
275bool mce_is_memory_error(struct mce *m);
276bool mce_is_correctable(struct mce *m);
277int mce_usable_address(struct mce *m);
278
279DECLARE_PER_CPU(unsigned, mce_exception_count);
280DECLARE_PER_CPU(unsigned, mce_poll_count);
281
282typedef DECLARE_BITMAP(mce_banks_t, MAX_NR_BANKS);
283DECLARE_PER_CPU(mce_banks_t, mce_poll_banks);
284
285enum mcp_flags {
286 MCP_TIMESTAMP = BIT(0),
287 MCP_UC = BIT(1),
288 MCP_DONTLOG = BIT(2),
289};
290bool machine_check_poll(enum mcp_flags flags, mce_banks_t *b);
291
292int mce_notify_irq(void);
293
294DECLARE_PER_CPU(struct mce, injectm);
295
296extern void register_mce_write_callback(ssize_t (*)(struct file *filp,
297 const char __user *ubuf,
298 size_t usize, loff_t *off));
299
300
301extern void mce_disable_bank(int bank);
302
303
304
305
306
307
308extern void (*machine_check_vector)(struct pt_regs *, long error_code);
309void do_machine_check(struct pt_regs *, long);
310
311
312
313
314
315extern void (*mce_threshold_vector)(void);
316extern void (*threshold_cpu_callback)(unsigned long action, unsigned int cpu);
317
318
319extern void (*deferred_error_int_vector)(void);
320
321
322
323
324
325void intel_init_thermal(struct cpuinfo_x86 *c);
326
327
328extern int (*platform_thermal_notify)(__u64 msr_val);
329
330
331extern int (*platform_thermal_package_notify)(__u64 msr_val);
332
333
334
335extern bool (*platform_thermal_package_rate_control)(void);
336
337#ifdef CONFIG_X86_THERMAL_VECTOR
338extern void mcheck_intel_therm_init(void);
339#else
340static inline void mcheck_intel_therm_init(void) { }
341#endif
342
343
344
345
346
347struct cper_sec_mem_err;
348extern void apei_mce_report_mem_error(int corrected,
349 struct cper_sec_mem_err *mem_err);
350
351
352
353
354
355#ifdef CONFIG_X86_MCE_AMD
356
357
358enum smca_bank_types {
359 SMCA_LS = 0,
360 SMCA_IF,
361 SMCA_L2_CACHE,
362 SMCA_DE,
363 SMCA_RESERVED,
364 SMCA_EX,
365 SMCA_FP,
366 SMCA_L3_CACHE,
367 SMCA_CS,
368 SMCA_CS_V2,
369 SMCA_PIE,
370 SMCA_UMC,
371 SMCA_PB,
372 SMCA_PSP,
373 SMCA_PSP_V2,
374 SMCA_SMU,
375 SMCA_SMU_V2,
376 SMCA_MP5,
377 SMCA_NBIO,
378 SMCA_PCIE,
379 N_SMCA_BANK_TYPES
380};
381
382#define HWID_MCATYPE(hwid, mcatype) (((hwid) << 16) | (mcatype))
383
384struct smca_hwid {
385 unsigned int bank_type;
386 u32 hwid_mcatype;
387 u32 xec_bitmap;
388 u8 count;
389};
390
391struct smca_bank {
392 struct smca_hwid *hwid;
393 u32 id;
394 u8 sysfs_id;
395};
396
397extern struct smca_bank smca_banks[MAX_NR_BANKS];
398
399extern const char *smca_get_long_name(enum smca_bank_types t);
400#endif
401
402#endif
403