1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65#include <linux/module.h>
66#include <linux/ctype.h>
67#include <linux/init.h>
68#include <linux/pci.h>
69#include <linux/pci_ids.h>
70#include <linux/slab.h>
71#include <linux/mmzone.h>
72#include <linux/edac.h>
73#include <asm/msr.h>
74#include "edac_core.h"
75#include "mce_amd.h"
76
77#define amd64_debug(fmt, arg...) \
78 edac_printk(KERN_DEBUG, "amd64", fmt, ##arg)
79
80#define amd64_info(fmt, arg...) \
81 edac_printk(KERN_INFO, "amd64", fmt, ##arg)
82
83#define amd64_notice(fmt, arg...) \
84 edac_printk(KERN_NOTICE, "amd64", fmt, ##arg)
85
86#define amd64_warn(fmt, arg...) \
87 edac_printk(KERN_WARNING, "amd64", fmt, ##arg)
88
89#define amd64_err(fmt, arg...) \
90 edac_printk(KERN_ERR, "amd64", fmt, ##arg)
91
92#define amd64_mc_warn(mci, fmt, arg...) \
93 edac_mc_chipset_printk(mci, KERN_WARNING, "amd64", fmt, ##arg)
94
95#define amd64_mc_err(mci, fmt, arg...) \
96 edac_mc_chipset_printk(mci, KERN_ERR, "amd64", fmt, ##arg)
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147#define EDAC_AMD64_VERSION "3.4.0"
148#define EDAC_MOD_STR "amd64_edac"
149
150
151#define K8_REV_D 1
152#define K8_REV_E 2
153#define K8_REV_F 4
154
155
156#define NUM_CHIPSELECTS 8
157#define DRAM_RANGES 8
158
159#define ON true
160#define OFF false
161
162
163
164
165
166
167
168#define GENMASK(lo, hi) (((1ULL << ((hi) - (lo) + 1)) - 1) << (lo))
169
170
171
172
173#define PCI_DEVICE_ID_AMD_15H_NB_F1 0x1601
174#define PCI_DEVICE_ID_AMD_15H_NB_F2 0x1602
175#define PCI_DEVICE_ID_AMD_16H_NB_F1 0x1531
176#define PCI_DEVICE_ID_AMD_16H_NB_F2 0x1532
177
178
179
180
181#define DRAM_BASE_LO 0x40
182#define DRAM_LIMIT_LO 0x44
183
184#define dram_intlv_en(pvt, i) ((u8)((pvt->ranges[i].base.lo >> 8) & 0x7))
185#define dram_rw(pvt, i) ((u8)(pvt->ranges[i].base.lo & 0x3))
186#define dram_intlv_sel(pvt, i) ((u8)((pvt->ranges[i].lim.lo >> 8) & 0x7))
187#define dram_dst_node(pvt, i) ((u8)(pvt->ranges[i].lim.lo & 0x7))
188
189#define DHAR 0xf0
190#define dhar_valid(pvt) ((pvt)->dhar & BIT(0))
191#define dhar_mem_hoist_valid(pvt) ((pvt)->dhar & BIT(1))
192#define dhar_base(pvt) ((pvt)->dhar & 0xff000000)
193#define k8_dhar_offset(pvt) (((pvt)->dhar & 0x0000ff00) << 16)
194
195
196#define f10_dhar_offset(pvt) (((pvt)->dhar & 0x0000ff80) << 16)
197
198#define DCT_CFG_SEL 0x10C
199
200#define DRAM_LOCAL_NODE_BASE 0x120
201#define DRAM_LOCAL_NODE_LIM 0x124
202
203#define DRAM_BASE_HI 0x140
204#define DRAM_LIMIT_HI 0x144
205
206
207
208
209
210#define DCSB0 0x40
211#define DCSB1 0x140
212#define DCSB_CS_ENABLE BIT(0)
213
214#define DCSM0 0x60
215#define DCSM1 0x160
216
217#define csrow_enabled(i, dct, pvt) ((pvt)->csels[(dct)].csbases[(i)] & DCSB_CS_ENABLE)
218
219#define DBAM0 0x80
220#define DBAM1 0x180
221
222
223#define DBAM_DIMM(i, reg) ((((reg) >> (4*(i)))) & 0xF)
224
225#define DBAM_MAX_VALUE 11
226
227#define DCLR0 0x90
228#define DCLR1 0x190
229#define REVE_WIDTH_128 BIT(16)
230#define WIDTH_128 BIT(11)
231
232#define DCHR0 0x94
233#define DCHR1 0x194
234#define DDR3_MODE BIT(8)
235
236#define DCT_SEL_LO 0x110
237#define dct_sel_baseaddr(pvt) ((pvt)->dct_sel_lo & 0xFFFFF800)
238#define dct_sel_interleave_addr(pvt) (((pvt)->dct_sel_lo >> 6) & 0x3)
239#define dct_high_range_enabled(pvt) ((pvt)->dct_sel_lo & BIT(0))
240#define dct_interleave_enabled(pvt) ((pvt)->dct_sel_lo & BIT(2))
241
242#define dct_ganging_enabled(pvt) ((boot_cpu_data.x86 == 0x10) && ((pvt)->dct_sel_lo & BIT(4)))
243
244#define dct_data_intlv_enabled(pvt) ((pvt)->dct_sel_lo & BIT(5))
245#define dct_memory_cleared(pvt) ((pvt)->dct_sel_lo & BIT(10))
246
247#define SWAP_INTLV_REG 0x10c
248
249#define DCT_SEL_HI 0x114
250
251
252
253
254#define NBCTL 0x40
255
256#define NBCFG 0x44
257#define NBCFG_CHIPKILL BIT(23)
258#define NBCFG_ECC_ENABLE BIT(22)
259
260
261#define F10_NBSL_EXT_ERR_ECC 0x8
262#define NBSL_PP_OBS 0x2
263
264#define SCRCTRL 0x58
265
266#define F10_ONLINE_SPARE 0xB0
267#define online_spare_swap_done(pvt, c) (((pvt)->online_spare >> (1 + 2 * (c))) & 0x1)
268#define online_spare_bad_dramcs(pvt, c) (((pvt)->online_spare >> (4 + 4 * (c))) & 0x7)
269
270#define F10_NB_ARRAY_ADDR 0xB8
271#define F10_NB_ARRAY_DRAM BIT(31)
272
273
274#define SET_NB_ARRAY_ADDR(section) (((section) & 0x3) << 1)
275
276#define F10_NB_ARRAY_DATA 0xBC
277#define F10_NB_ARR_ECC_WR_REQ BIT(17)
278#define SET_NB_DRAM_INJECTION_WRITE(inj) \
279 (BIT(((inj.word) & 0xF) + 20) | \
280 F10_NB_ARR_ECC_WR_REQ | inj.bit_map)
281#define SET_NB_DRAM_INJECTION_READ(inj) \
282 (BIT(((inj.word) & 0xF) + 20) | \
283 BIT(16) | inj.bit_map)
284
285
286#define NBCAP 0xE8
287#define NBCAP_CHIPKILL BIT(4)
288#define NBCAP_SECDED BIT(3)
289#define NBCAP_DCT_DUAL BIT(0)
290
291#define EXT_NB_MCA_CFG 0x180
292
293
294#define MSR_MCGCTL_NBE BIT(4)
295
296enum amd_families {
297 K8_CPUS = 0,
298 F10_CPUS,
299 F15_CPUS,
300 F16_CPUS,
301 NUM_FAMILIES,
302};
303
304
305struct error_injection {
306 u32 section;
307 u32 word;
308 u32 bit_map;
309};
310
311
312struct reg_pair {
313 u32 lo, hi;
314};
315
316
317
318
319struct dram_range {
320 struct reg_pair base;
321 struct reg_pair lim;
322};
323
324
325struct chip_select {
326 u32 csbases[NUM_CHIPSELECTS];
327 u8 b_cnt;
328
329 u32 csmasks[NUM_CHIPSELECTS];
330 u8 m_cnt;
331};
332
333struct amd64_pvt {
334 struct low_ops *ops;
335
336
337 struct pci_dev *F1, *F2, *F3;
338
339 u16 mc_node_id;
340 int ext_model;
341 int channel_count;
342
343
344 u32 dclr0;
345 u32 dclr1;
346 u32 dchr0;
347 u32 dchr1;
348 u32 nbcap;
349 u32 nbcfg;
350 u32 ext_nbcfg;
351 u32 dhar;
352 u32 dbam0;
353 u32 dbam1;
354
355
356 struct chip_select csels[2];
357
358
359 struct dram_range ranges[DRAM_RANGES];
360
361 u64 top_mem;
362 u64 top_mem2;
363
364 u32 dct_sel_lo;
365 u32 dct_sel_hi;
366 u32 online_spare;
367
368
369 u8 ecc_sym_sz;
370
371
372 struct error_injection injection;
373};
374
375enum err_codes {
376 DECODE_OK = 0,
377 ERR_NODE = -1,
378 ERR_CSROW = -2,
379 ERR_CHANNEL = -3,
380};
381
382struct err_info {
383 int err_code;
384 struct mem_ctl_info *src_mci;
385 int csrow;
386 int channel;
387 u16 syndrome;
388 u32 page;
389 u32 offset;
390};
391
392static inline u64 get_dram_base(struct amd64_pvt *pvt, u8 i)
393{
394 u64 addr = ((u64)pvt->ranges[i].base.lo & 0xffff0000) << 8;
395
396 if (boot_cpu_data.x86 == 0xf)
397 return addr;
398
399 return (((u64)pvt->ranges[i].base.hi & 0x000000ff) << 40) | addr;
400}
401
402static inline u64 get_dram_limit(struct amd64_pvt *pvt, u8 i)
403{
404 u64 lim = (((u64)pvt->ranges[i].lim.lo & 0xffff0000) << 8) | 0x00ffffff;
405
406 if (boot_cpu_data.x86 == 0xf)
407 return lim;
408
409 return (((u64)pvt->ranges[i].lim.hi & 0x000000ff) << 40) | lim;
410}
411
412static inline u16 extract_syndrome(u64 status)
413{
414 return ((status >> 47) & 0xff) | ((status >> 16) & 0xff00);
415}
416
417
418
419
420struct ecc_settings {
421 u32 old_nbctl;
422 bool nbctl_valid;
423
424 struct flags {
425 unsigned long nb_mce_enable:1;
426 unsigned long nb_ecc_prev:1;
427 } flags;
428};
429
430#ifdef CONFIG_EDAC_DEBUG
431int amd64_create_sysfs_dbg_files(struct mem_ctl_info *mci);
432void amd64_remove_sysfs_dbg_files(struct mem_ctl_info *mci);
433
434#else
435static inline int amd64_create_sysfs_dbg_files(struct mem_ctl_info *mci)
436{
437 return 0;
438}
439static void inline amd64_remove_sysfs_dbg_files(struct mem_ctl_info *mci)
440{
441}
442#endif
443
444#ifdef CONFIG_EDAC_AMD64_ERROR_INJECTION
445int amd64_create_sysfs_inject_files(struct mem_ctl_info *mci);
446void amd64_remove_sysfs_inject_files(struct mem_ctl_info *mci);
447
448#else
449static inline int amd64_create_sysfs_inject_files(struct mem_ctl_info *mci)
450{
451 return 0;
452}
453static inline void amd64_remove_sysfs_inject_files(struct mem_ctl_info *mci)
454{
455}
456#endif
457
458
459
460
461
462struct low_ops {
463 int (*early_channel_count) (struct amd64_pvt *pvt);
464 void (*map_sysaddr_to_csrow) (struct mem_ctl_info *mci, u64 sys_addr,
465 struct err_info *);
466 int (*dbam_to_cs) (struct amd64_pvt *pvt, u8 dct, unsigned cs_mode);
467 int (*read_dct_pci_cfg) (struct amd64_pvt *pvt, int offset,
468 u32 *val, const char *func);
469};
470
471struct amd64_family_type {
472 const char *ctl_name;
473 u16 f1_id, f3_id;
474 struct low_ops ops;
475};
476
477int __amd64_read_pci_cfg_dword(struct pci_dev *pdev, int offset,
478 u32 *val, const char *func);
479int __amd64_write_pci_cfg_dword(struct pci_dev *pdev, int offset,
480 u32 val, const char *func);
481
482#define amd64_read_pci_cfg(pdev, offset, val) \
483 __amd64_read_pci_cfg_dword(pdev, offset, val, __func__)
484
485#define amd64_write_pci_cfg(pdev, offset, val) \
486 __amd64_write_pci_cfg_dword(pdev, offset, val, __func__)
487
488#define amd64_read_dct_pci_cfg(pvt, offset, val) \
489 pvt->ops->read_dct_pci_cfg(pvt, offset, val, __func__)
490
491int amd64_get_dram_hole_info(struct mem_ctl_info *mci, u64 *hole_base,
492 u64 *hole_offset, u64 *hole_size);
493
494#define to_mci(k) container_of(k, struct mem_ctl_info, dev)
495
496
497static inline void disable_caches(void *dummy)
498{
499 write_cr0(read_cr0() | X86_CR0_CD);
500 wbinvd();
501}
502
503static inline void enable_caches(void *dummy)
504{
505 write_cr0(read_cr0() & ~X86_CR0_CD);
506}
507