1
2
3
4
5
6
7
8
9
10
11
12
13#include <linux/module.h>
14#include <linux/init.h>
15#include <linux/pci.h>
16#include <linux/pci_ids.h>
17#include <linux/slab.h>
18#include <linux/delay.h>
19#include <linux/edac.h>
20#include <linux/mmzone.h>
21#include <linux/smp.h>
22#include <linux/bitmap.h>
23#include <linux/math64.h>
24#include <linux/mod_devicetable.h>
25#include <asm/cpu_device_id.h>
26#include <asm/intel-family.h>
27#include <asm/processor.h>
28#include <asm/mce.h>
29
30#include "edac_module.h"
31
32
33static LIST_HEAD(sbridge_edac_list);
34
35
36
37
38#define SBRIDGE_REVISION " Ver: 1.1.2 "
39#define EDAC_MOD_STR "sbridge_edac"
40
41
42
43
44#define sbridge_printk(level, fmt, arg...) \
45 edac_printk(level, "sbridge", fmt, ##arg)
46
47#define sbridge_mc_printk(mci, level, fmt, arg...) \
48 edac_mc_chipset_printk(mci, level, "sbridge", fmt, ##arg)
49
50
51
52
53#define GET_BITFIELD(v, lo, hi) \
54 (((v) & GENMASK_ULL(hi, lo)) >> (lo))
55
56
57static const u32 sbridge_dram_rule[] = {
58 0x80, 0x88, 0x90, 0x98, 0xa0,
59 0xa8, 0xb0, 0xb8, 0xc0, 0xc8,
60};
61
62static const u32 ibridge_dram_rule[] = {
63 0x60, 0x68, 0x70, 0x78, 0x80,
64 0x88, 0x90, 0x98, 0xa0, 0xa8,
65 0xb0, 0xb8, 0xc0, 0xc8, 0xd0,
66 0xd8, 0xe0, 0xe8, 0xf0, 0xf8,
67};
68
69static const u32 knl_dram_rule[] = {
70 0x60, 0x68, 0x70, 0x78, 0x80,
71 0x88, 0x90, 0x98, 0xa0, 0xa8,
72 0xb0, 0xb8, 0xc0, 0xc8, 0xd0,
73 0xd8, 0xe0, 0xe8, 0xf0, 0xf8,
74 0x100, 0x108, 0x110, 0x118,
75};
76
77#define DRAM_RULE_ENABLE(reg) GET_BITFIELD(reg, 0, 0)
78#define A7MODE(reg) GET_BITFIELD(reg, 26, 26)
79
80static char *show_dram_attr(u32 attr)
81{
82 switch (attr) {
83 case 0:
84 return "DRAM";
85 case 1:
86 return "MMCFG";
87 case 2:
88 return "NXM";
89 default:
90 return "unknown";
91 }
92}
93
94static const u32 sbridge_interleave_list[] = {
95 0x84, 0x8c, 0x94, 0x9c, 0xa4,
96 0xac, 0xb4, 0xbc, 0xc4, 0xcc,
97};
98
99static const u32 ibridge_interleave_list[] = {
100 0x64, 0x6c, 0x74, 0x7c, 0x84,
101 0x8c, 0x94, 0x9c, 0xa4, 0xac,
102 0xb4, 0xbc, 0xc4, 0xcc, 0xd4,
103 0xdc, 0xe4, 0xec, 0xf4, 0xfc,
104};
105
106static const u32 knl_interleave_list[] = {
107 0x64, 0x6c, 0x74, 0x7c, 0x84,
108 0x8c, 0x94, 0x9c, 0xa4, 0xac,
109 0xb4, 0xbc, 0xc4, 0xcc, 0xd4,
110 0xdc, 0xe4, 0xec, 0xf4, 0xfc,
111 0x104, 0x10c, 0x114, 0x11c,
112};
113
114struct interleave_pkg {
115 unsigned char start;
116 unsigned char end;
117};
118
119static const struct interleave_pkg sbridge_interleave_pkg[] = {
120 { 0, 2 },
121 { 3, 5 },
122 { 8, 10 },
123 { 11, 13 },
124 { 16, 18 },
125 { 19, 21 },
126 { 24, 26 },
127 { 27, 29 },
128};
129
130static const struct interleave_pkg ibridge_interleave_pkg[] = {
131 { 0, 3 },
132 { 4, 7 },
133 { 8, 11 },
134 { 12, 15 },
135 { 16, 19 },
136 { 20, 23 },
137 { 24, 27 },
138 { 28, 31 },
139};
140
141static inline int sad_pkg(const struct interleave_pkg *table, u32 reg,
142 int interleave)
143{
144 return GET_BITFIELD(reg, table[interleave].start,
145 table[interleave].end);
146}
147
148
149
150#define TOLM 0x80
151#define TOHM 0x84
152#define HASWELL_TOLM 0xd0
153#define HASWELL_TOHM_0 0xd4
154#define HASWELL_TOHM_1 0xd8
155#define KNL_TOLM 0xd0
156#define KNL_TOHM_0 0xd4
157#define KNL_TOHM_1 0xd8
158
159#define GET_TOLM(reg) ((GET_BITFIELD(reg, 0, 3) << 28) | 0x3ffffff)
160#define GET_TOHM(reg) ((GET_BITFIELD(reg, 0, 20) << 25) | 0x3ffffff)
161
162
163
164#define SAD_TARGET 0xf0
165
166#define SOURCE_ID(reg) GET_BITFIELD(reg, 9, 11)
167
168#define SOURCE_ID_KNL(reg) GET_BITFIELD(reg, 12, 14)
169
170#define SAD_CONTROL 0xf4
171
172
173
174static const u32 tad_dram_rule[] = {
175 0x40, 0x44, 0x48, 0x4c,
176 0x50, 0x54, 0x58, 0x5c,
177 0x60, 0x64, 0x68, 0x6c,
178};
179#define MAX_TAD ARRAY_SIZE(tad_dram_rule)
180
181#define TAD_LIMIT(reg) ((GET_BITFIELD(reg, 12, 31) << 26) | 0x3ffffff)
182#define TAD_SOCK(reg) GET_BITFIELD(reg, 10, 11)
183#define TAD_CH(reg) GET_BITFIELD(reg, 8, 9)
184#define TAD_TGT3(reg) GET_BITFIELD(reg, 6, 7)
185#define TAD_TGT2(reg) GET_BITFIELD(reg, 4, 5)
186#define TAD_TGT1(reg) GET_BITFIELD(reg, 2, 3)
187#define TAD_TGT0(reg) GET_BITFIELD(reg, 0, 1)
188
189
190
191#define MCMTR 0x7c
192#define KNL_MCMTR 0x624
193
194#define IS_ECC_ENABLED(mcmtr) GET_BITFIELD(mcmtr, 2, 2)
195#define IS_LOCKSTEP_ENABLED(mcmtr) GET_BITFIELD(mcmtr, 1, 1)
196#define IS_CLOSE_PG(mcmtr) GET_BITFIELD(mcmtr, 0, 0)
197
198
199
200#define RASENABLES 0xac
201#define IS_MIRROR_ENABLED(reg) GET_BITFIELD(reg, 0, 0)
202
203
204
205static const int mtr_regs[] = {
206 0x80, 0x84, 0x88,
207};
208
209static const int knl_mtr_reg = 0xb60;
210
211#define RANK_DISABLE(mtr) GET_BITFIELD(mtr, 16, 19)
212#define IS_DIMM_PRESENT(mtr) GET_BITFIELD(mtr, 14, 14)
213#define RANK_CNT_BITS(mtr) GET_BITFIELD(mtr, 12, 13)
214#define RANK_WIDTH_BITS(mtr) GET_BITFIELD(mtr, 2, 4)
215#define COL_WIDTH_BITS(mtr) GET_BITFIELD(mtr, 0, 1)
216
217static const u32 tad_ch_nilv_offset[] = {
218 0x90, 0x94, 0x98, 0x9c,
219 0xa0, 0xa4, 0xa8, 0xac,
220 0xb0, 0xb4, 0xb8, 0xbc,
221};
222#define CHN_IDX_OFFSET(reg) GET_BITFIELD(reg, 28, 29)
223#define TAD_OFFSET(reg) (GET_BITFIELD(reg, 6, 25) << 26)
224
225static const u32 rir_way_limit[] = {
226 0x108, 0x10c, 0x110, 0x114, 0x118,
227};
228#define MAX_RIR_RANGES ARRAY_SIZE(rir_way_limit)
229
230#define IS_RIR_VALID(reg) GET_BITFIELD(reg, 31, 31)
231#define RIR_WAY(reg) GET_BITFIELD(reg, 28, 29)
232
233#define MAX_RIR_WAY 8
234
235static const u32 rir_offset[MAX_RIR_RANGES][MAX_RIR_WAY] = {
236 { 0x120, 0x124, 0x128, 0x12c, 0x130, 0x134, 0x138, 0x13c },
237 { 0x140, 0x144, 0x148, 0x14c, 0x150, 0x154, 0x158, 0x15c },
238 { 0x160, 0x164, 0x168, 0x16c, 0x170, 0x174, 0x178, 0x17c },
239 { 0x180, 0x184, 0x188, 0x18c, 0x190, 0x194, 0x198, 0x19c },
240 { 0x1a0, 0x1a4, 0x1a8, 0x1ac, 0x1b0, 0x1b4, 0x1b8, 0x1bc },
241};
242
243#define RIR_RNK_TGT(type, reg) (((type) == BROADWELL) ? \
244 GET_BITFIELD(reg, 20, 23) : GET_BITFIELD(reg, 16, 19))
245
246#define RIR_OFFSET(type, reg) (((type) == HASWELL || (type) == BROADWELL) ? \
247 GET_BITFIELD(reg, 2, 15) : GET_BITFIELD(reg, 2, 14))
248
249
250
251
252
253
254
255static const u32 correrrcnt[] = {
256 0x104, 0x108, 0x10c, 0x110,
257};
258
259#define RANK_ODD_OV(reg) GET_BITFIELD(reg, 31, 31)
260#define RANK_ODD_ERR_CNT(reg) GET_BITFIELD(reg, 16, 30)
261#define RANK_EVEN_OV(reg) GET_BITFIELD(reg, 15, 15)
262#define RANK_EVEN_ERR_CNT(reg) GET_BITFIELD(reg, 0, 14)
263
264static const u32 correrrthrsld[] = {
265 0x11c, 0x120, 0x124, 0x128,
266};
267
268#define RANK_ODD_ERR_THRSLD(reg) GET_BITFIELD(reg, 16, 30)
269#define RANK_EVEN_ERR_THRSLD(reg) GET_BITFIELD(reg, 0, 14)
270
271
272
273
274#define SB_RANK_CFG_A 0x0328
275
276#define IB_RANK_CFG_A 0x0320
277
278
279
280
281
282#define NUM_CHANNELS 4
283#define MAX_DIMMS 3
284#define KNL_MAX_CHAS 38
285#define KNL_MAX_CHANNELS 6
286#define KNL_MAX_EDCS 8
287#define CHANNEL_UNSPECIFIED 0xf
288
289enum type {
290 SANDY_BRIDGE,
291 IVY_BRIDGE,
292 HASWELL,
293 BROADWELL,
294 KNIGHTS_LANDING,
295};
296
297enum domain {
298 IMC0 = 0,
299 IMC1,
300 SOCK,
301};
302
303enum mirroring_mode {
304 NON_MIRRORING,
305 ADDR_RANGE_MIRRORING,
306 FULL_MIRRORING,
307};
308
309struct sbridge_pvt;
310struct sbridge_info {
311 enum type type;
312 u32 mcmtr;
313 u32 rankcfgr;
314 u64 (*get_tolm)(struct sbridge_pvt *pvt);
315 u64 (*get_tohm)(struct sbridge_pvt *pvt);
316 u64 (*rir_limit)(u32 reg);
317 u64 (*sad_limit)(u32 reg);
318 u32 (*interleave_mode)(u32 reg);
319 u32 (*dram_attr)(u32 reg);
320 const u32 *dram_rule;
321 const u32 *interleave_list;
322 const struct interleave_pkg *interleave_pkg;
323 u8 max_sad;
324 u8 max_interleave;
325 u8 (*get_node_id)(struct sbridge_pvt *pvt);
326 enum mem_type (*get_memory_type)(struct sbridge_pvt *pvt);
327 enum dev_type (*get_width)(struct sbridge_pvt *pvt, u32 mtr);
328 struct pci_dev *pci_vtd;
329};
330
331struct sbridge_channel {
332 u32 ranks;
333 u32 dimms;
334};
335
336struct pci_id_descr {
337 int dev_id;
338 int optional;
339 enum domain dom;
340};
341
342struct pci_id_table {
343 const struct pci_id_descr *descr;
344 int n_devs_per_imc;
345 int n_devs_per_sock;
346 int n_imcs_per_sock;
347 enum type type;
348};
349
350struct sbridge_dev {
351 struct list_head list;
352 u8 bus, mc;
353 u8 node_id, source_id;
354 struct pci_dev **pdev;
355 enum domain dom;
356 int n_devs;
357 int i_devs;
358 struct mem_ctl_info *mci;
359};
360
361struct knl_pvt {
362 struct pci_dev *pci_cha[KNL_MAX_CHAS];
363 struct pci_dev *pci_channel[KNL_MAX_CHANNELS];
364 struct pci_dev *pci_mc0;
365 struct pci_dev *pci_mc1;
366 struct pci_dev *pci_mc0_misc;
367 struct pci_dev *pci_mc1_misc;
368 struct pci_dev *pci_mc_info;
369};
370
371struct sbridge_pvt {
372
373 struct pci_dev *pci_ddrio;
374 struct pci_dev *pci_sad0, *pci_sad1;
375 struct pci_dev *pci_br0, *pci_br1;
376
377 struct pci_dev *pci_ha, *pci_ta, *pci_ras;
378 struct pci_dev *pci_tad[NUM_CHANNELS];
379
380 struct sbridge_dev *sbridge_dev;
381
382 struct sbridge_info info;
383 struct sbridge_channel channel[NUM_CHANNELS];
384
385
386 bool is_cur_addr_mirrored, is_lockstep, is_close_pg;
387 bool is_chan_hash;
388 enum mirroring_mode mirror_mode;
389
390
391 u64 tolm, tohm;
392 struct knl_pvt knl;
393};
394
395#define PCI_DESCR(device_id, opt, domain) \
396 .dev_id = (device_id), \
397 .optional = opt, \
398 .dom = domain
399
400static const struct pci_id_descr pci_dev_descr_sbridge[] = {
401
402 { PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_HA0, 0, IMC0) },
403
404
405 { PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TA, 0, IMC0) },
406 { PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_RAS, 0, IMC0) },
407 { PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD0, 0, IMC0) },
408 { PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD1, 0, IMC0) },
409 { PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD2, 0, IMC0) },
410 { PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD3, 0, IMC0) },
411 { PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_DDRIO, 1, SOCK) },
412
413
414 { PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_SAD0, 0, SOCK) },
415 { PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_SAD1, 0, SOCK) },
416
417
418 { PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_BR, 0, SOCK) },
419};
420
421#define PCI_ID_TABLE_ENTRY(A, N, M, T) { \
422 .descr = A, \
423 .n_devs_per_imc = N, \
424 .n_devs_per_sock = ARRAY_SIZE(A), \
425 .n_imcs_per_sock = M, \
426 .type = T \
427}
428
429static const struct pci_id_table pci_dev_descr_sbridge_table[] = {
430 PCI_ID_TABLE_ENTRY(pci_dev_descr_sbridge, ARRAY_SIZE(pci_dev_descr_sbridge), 1, SANDY_BRIDGE),
431 {0,}
432};
433
434
435
436
437
438
439
440#define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_1HA_DDRIO0 0x0eb8
441#define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_2HA_DDRIO0 0x0ebc
442
443
444#define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0 0x0ea0
445#define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TA 0x0ea8
446#define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_RAS 0x0e71
447#define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD0 0x0eaa
448#define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD1 0x0eab
449#define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD2 0x0eac
450#define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD3 0x0ead
451#define PCI_DEVICE_ID_INTEL_IBRIDGE_SAD 0x0ec8
452#define PCI_DEVICE_ID_INTEL_IBRIDGE_BR0 0x0ec9
453#define PCI_DEVICE_ID_INTEL_IBRIDGE_BR1 0x0eca
454#define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1 0x0e60
455#define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TA 0x0e68
456#define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_RAS 0x0e79
457#define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD0 0x0e6a
458#define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD1 0x0e6b
459#define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD2 0x0e6c
460#define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD3 0x0e6d
461
462static const struct pci_id_descr pci_dev_descr_ibridge[] = {
463
464 { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0, 0, IMC0) },
465
466
467 { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TA, 0, IMC0) },
468 { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_RAS, 0, IMC0) },
469 { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD0, 0, IMC0) },
470 { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD1, 0, IMC0) },
471 { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD2, 0, IMC0) },
472 { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD3, 0, IMC0) },
473
474
475 { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1, 1, IMC1) },
476 { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TA, 1, IMC1) },
477 { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_RAS, 1, IMC1) },
478 { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD0, 1, IMC1) },
479 { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD1, 1, IMC1) },
480 { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD2, 1, IMC1) },
481 { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD3, 1, IMC1) },
482
483 { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_1HA_DDRIO0, 1, SOCK) },
484 { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_2HA_DDRIO0, 1, SOCK) },
485
486
487 { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_SAD, 0, SOCK) },
488
489
490 { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_BR0, 1, SOCK) },
491 { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_BR1, 0, SOCK) },
492
493};
494
495static const struct pci_id_table pci_dev_descr_ibridge_table[] = {
496 PCI_ID_TABLE_ENTRY(pci_dev_descr_ibridge, 12, 2, IVY_BRIDGE),
497 {0,}
498};
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516#define HASWELL_DDRCRCLKCONTROLS 0xa10
517#define HASWELL_HASYSDEFEATURE2 0x84
518#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_VTD_MISC 0x2f28
519#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0 0x2fa0
520#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1 0x2f60
521#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TA 0x2fa8
522#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TM 0x2f71
523#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TA 0x2f68
524#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TM 0x2f79
525#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_CBO_SAD0 0x2ffc
526#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_CBO_SAD1 0x2ffd
527#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD0 0x2faa
528#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD1 0x2fab
529#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD2 0x2fac
530#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD3 0x2fad
531#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD0 0x2f6a
532#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD1 0x2f6b
533#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD2 0x2f6c
534#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD3 0x2f6d
535#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO0 0x2fbd
536#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO1 0x2fbf
537#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO2 0x2fb9
538#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO3 0x2fbb
539static const struct pci_id_descr pci_dev_descr_haswell[] = {
540
541 { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0, 0, IMC0) },
542 { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1, 1, IMC1) },
543
544 { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TA, 0, IMC0) },
545 { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TM, 0, IMC0) },
546 { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD0, 0, IMC0) },
547 { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD1, 0, IMC0) },
548 { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD2, 1, IMC0) },
549 { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD3, 1, IMC0) },
550
551 { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TA, 1, IMC1) },
552 { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TM, 1, IMC1) },
553 { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD0, 1, IMC1) },
554 { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD1, 1, IMC1) },
555 { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD2, 1, IMC1) },
556 { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD3, 1, IMC1) },
557
558 { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_CBO_SAD0, 0, SOCK) },
559 { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_CBO_SAD1, 0, SOCK) },
560 { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO0, 1, SOCK) },
561 { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO1, 1, SOCK) },
562 { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO2, 1, SOCK) },
563 { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO3, 1, SOCK) },
564};
565
566static const struct pci_id_table pci_dev_descr_haswell_table[] = {
567 PCI_ID_TABLE_ENTRY(pci_dev_descr_haswell, 13, 2, HASWELL),
568 {0,}
569};
570
571
572
573
574
575
576#define knl_channel_remap(mc, chan) ((mc) ? (chan) : (chan) + 3)
577
578
579#define PCI_DEVICE_ID_INTEL_KNL_IMC_MC 0x7840
580
581#define PCI_DEVICE_ID_INTEL_KNL_IMC_CHAN 0x7843
582
583#define PCI_DEVICE_ID_INTEL_KNL_IMC_TA 0x7844
584
585#define PCI_DEVICE_ID_INTEL_KNL_IMC_SAD0 0x782a
586
587#define PCI_DEVICE_ID_INTEL_KNL_IMC_SAD1 0x782b
588
589#define PCI_DEVICE_ID_INTEL_KNL_IMC_CHA 0x782c
590
591#define PCI_DEVICE_ID_INTEL_KNL_IMC_TOLHM 0x7810
592
593
594
595
596
597
598
599
600static const struct pci_id_descr pci_dev_descr_knl[] = {
601 [0 ... 1] = { PCI_DESCR(PCI_DEVICE_ID_INTEL_KNL_IMC_MC, 0, IMC0)},
602 [2 ... 7] = { PCI_DESCR(PCI_DEVICE_ID_INTEL_KNL_IMC_CHAN, 0, IMC0) },
603 [8] = { PCI_DESCR(PCI_DEVICE_ID_INTEL_KNL_IMC_TA, 0, IMC0) },
604 [9] = { PCI_DESCR(PCI_DEVICE_ID_INTEL_KNL_IMC_TOLHM, 0, IMC0) },
605 [10] = { PCI_DESCR(PCI_DEVICE_ID_INTEL_KNL_IMC_SAD0, 0, SOCK) },
606 [11] = { PCI_DESCR(PCI_DEVICE_ID_INTEL_KNL_IMC_SAD1, 0, SOCK) },
607 [12 ... 49] = { PCI_DESCR(PCI_DEVICE_ID_INTEL_KNL_IMC_CHA, 0, SOCK) },
608};
609
610static const struct pci_id_table pci_dev_descr_knl_table[] = {
611 PCI_ID_TABLE_ENTRY(pci_dev_descr_knl, ARRAY_SIZE(pci_dev_descr_knl), 1, KNIGHTS_LANDING),
612 {0,}
613};
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633#define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_VTD_MISC 0x6f28
634#define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0 0x6fa0
635#define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1 0x6f60
636#define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TA 0x6fa8
637#define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TM 0x6f71
638#define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TA 0x6f68
639#define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TM 0x6f79
640#define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_CBO_SAD0 0x6ffc
641#define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_CBO_SAD1 0x6ffd
642#define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD0 0x6faa
643#define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD1 0x6fab
644#define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD2 0x6fac
645#define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD3 0x6fad
646#define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TAD0 0x6f6a
647#define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TAD1 0x6f6b
648#define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TAD2 0x6f6c
649#define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TAD3 0x6f6d
650#define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_DDRIO0 0x6faf
651
652static const struct pci_id_descr pci_dev_descr_broadwell[] = {
653
654 { PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0, 0, IMC0) },
655 { PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1, 1, IMC1) },
656
657 { PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TA, 0, IMC0) },
658 { PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TM, 0, IMC0) },
659 { PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD0, 0, IMC0) },
660 { PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD1, 0, IMC0) },
661 { PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD2, 1, IMC0) },
662 { PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD3, 1, IMC0) },
663
664 { PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TA, 1, IMC1) },
665 { PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TM, 1, IMC1) },
666 { PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TAD0, 1, IMC1) },
667 { PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TAD1, 1, IMC1) },
668 { PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TAD2, 1, IMC1) },
669 { PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TAD3, 1, IMC1) },
670
671 { PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_CBO_SAD0, 0, SOCK) },
672 { PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_CBO_SAD1, 0, SOCK) },
673 { PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_DDRIO0, 1, SOCK) },
674};
675
676static const struct pci_id_table pci_dev_descr_broadwell_table[] = {
677 PCI_ID_TABLE_ENTRY(pci_dev_descr_broadwell, 10, 2, BROADWELL),
678 {0,}
679};
680
681
682
683
684
685
686static inline int numrank(enum type type, u32 mtr)
687{
688 int ranks = (1 << RANK_CNT_BITS(mtr));
689 int max = 4;
690
691 if (type == HASWELL || type == BROADWELL || type == KNIGHTS_LANDING)
692 max = 8;
693
694 if (ranks > max) {
695 edac_dbg(0, "Invalid number of ranks: %d (max = %i) raw value = %x (%04x)\n",
696 ranks, max, (unsigned int)RANK_CNT_BITS(mtr), mtr);
697 return -EINVAL;
698 }
699
700 return ranks;
701}
702
703static inline int numrow(u32 mtr)
704{
705 int rows = (RANK_WIDTH_BITS(mtr) + 12);
706
707 if (rows < 13 || rows > 18) {
708 edac_dbg(0, "Invalid number of rows: %d (should be between 14 and 17) raw value = %x (%04x)\n",
709 rows, (unsigned int)RANK_WIDTH_BITS(mtr), mtr);
710 return -EINVAL;
711 }
712
713 return 1 << rows;
714}
715
716static inline int numcol(u32 mtr)
717{
718 int cols = (COL_WIDTH_BITS(mtr) + 10);
719
720 if (cols > 12) {
721 edac_dbg(0, "Invalid number of cols: %d (max = 4) raw value = %x (%04x)\n",
722 cols, (unsigned int)COL_WIDTH_BITS(mtr), mtr);
723 return -EINVAL;
724 }
725
726 return 1 << cols;
727}
728
729static struct sbridge_dev *get_sbridge_dev(u8 bus, enum domain dom, int multi_bus,
730 struct sbridge_dev *prev)
731{
732 struct sbridge_dev *sbridge_dev;
733
734
735
736
737
738 if (multi_bus) {
739 return list_first_entry_or_null(&sbridge_edac_list,
740 struct sbridge_dev, list);
741 }
742
743 sbridge_dev = list_entry(prev ? prev->list.next
744 : sbridge_edac_list.next, struct sbridge_dev, list);
745
746 list_for_each_entry_from(sbridge_dev, &sbridge_edac_list, list) {
747 if (sbridge_dev->bus == bus && (dom == SOCK || dom == sbridge_dev->dom))
748 return sbridge_dev;
749 }
750
751 return NULL;
752}
753
754static struct sbridge_dev *alloc_sbridge_dev(u8 bus, enum domain dom,
755 const struct pci_id_table *table)
756{
757 struct sbridge_dev *sbridge_dev;
758
759 sbridge_dev = kzalloc(sizeof(*sbridge_dev), GFP_KERNEL);
760 if (!sbridge_dev)
761 return NULL;
762
763 sbridge_dev->pdev = kcalloc(table->n_devs_per_imc,
764 sizeof(*sbridge_dev->pdev),
765 GFP_KERNEL);
766 if (!sbridge_dev->pdev) {
767 kfree(sbridge_dev);
768 return NULL;
769 }
770
771 sbridge_dev->bus = bus;
772 sbridge_dev->dom = dom;
773 sbridge_dev->n_devs = table->n_devs_per_imc;
774 list_add_tail(&sbridge_dev->list, &sbridge_edac_list);
775
776 return sbridge_dev;
777}
778
779static void free_sbridge_dev(struct sbridge_dev *sbridge_dev)
780{
781 list_del(&sbridge_dev->list);
782 kfree(sbridge_dev->pdev);
783 kfree(sbridge_dev);
784}
785
786static u64 sbridge_get_tolm(struct sbridge_pvt *pvt)
787{
788 u32 reg;
789
790
791 pci_read_config_dword(pvt->pci_sad1, TOLM, ®);
792 return GET_TOLM(reg);
793}
794
795static u64 sbridge_get_tohm(struct sbridge_pvt *pvt)
796{
797 u32 reg;
798
799 pci_read_config_dword(pvt->pci_sad1, TOHM, ®);
800 return GET_TOHM(reg);
801}
802
803static u64 ibridge_get_tolm(struct sbridge_pvt *pvt)
804{
805 u32 reg;
806
807 pci_read_config_dword(pvt->pci_br1, TOLM, ®);
808
809 return GET_TOLM(reg);
810}
811
812static u64 ibridge_get_tohm(struct sbridge_pvt *pvt)
813{
814 u32 reg;
815
816 pci_read_config_dword(pvt->pci_br1, TOHM, ®);
817
818 return GET_TOHM(reg);
819}
820
821static u64 rir_limit(u32 reg)
822{
823 return ((u64)GET_BITFIELD(reg, 1, 10) << 29) | 0x1fffffff;
824}
825
826static u64 sad_limit(u32 reg)
827{
828 return (GET_BITFIELD(reg, 6, 25) << 26) | 0x3ffffff;
829}
830
831static u32 interleave_mode(u32 reg)
832{
833 return GET_BITFIELD(reg, 1, 1);
834}
835
836static u32 dram_attr(u32 reg)
837{
838 return GET_BITFIELD(reg, 2, 3);
839}
840
841static u64 knl_sad_limit(u32 reg)
842{
843 return (GET_BITFIELD(reg, 7, 26) << 26) | 0x3ffffff;
844}
845
846static u32 knl_interleave_mode(u32 reg)
847{
848 return GET_BITFIELD(reg, 1, 2);
849}
850
851static const char * const knl_intlv_mode[] = {
852 "[8:6]", "[10:8]", "[14:12]", "[32:30]"
853};
854
855static const char *get_intlv_mode_str(u32 reg, enum type t)
856{
857 if (t == KNIGHTS_LANDING)
858 return knl_intlv_mode[knl_interleave_mode(reg)];
859 else
860 return interleave_mode(reg) ? "[8:6]" : "[8:6]XOR[18:16]";
861}
862
863static u32 dram_attr_knl(u32 reg)
864{
865 return GET_BITFIELD(reg, 3, 4);
866}
867
868
869static enum mem_type get_memory_type(struct sbridge_pvt *pvt)
870{
871 u32 reg;
872 enum mem_type mtype;
873
874 if (pvt->pci_ddrio) {
875 pci_read_config_dword(pvt->pci_ddrio, pvt->info.rankcfgr,
876 ®);
877 if (GET_BITFIELD(reg, 11, 11))
878
879 mtype = MEM_RDDR3;
880 else
881 mtype = MEM_DDR3;
882 } else
883 mtype = MEM_UNKNOWN;
884
885 return mtype;
886}
887
888static enum mem_type haswell_get_memory_type(struct sbridge_pvt *pvt)
889{
890 u32 reg;
891 bool registered = false;
892 enum mem_type mtype = MEM_UNKNOWN;
893
894 if (!pvt->pci_ddrio)
895 goto out;
896
897 pci_read_config_dword(pvt->pci_ddrio,
898 HASWELL_DDRCRCLKCONTROLS, ®);
899
900 if (GET_BITFIELD(reg, 16, 16))
901 registered = true;
902
903 pci_read_config_dword(pvt->pci_ta, MCMTR, ®);
904 if (GET_BITFIELD(reg, 14, 14)) {
905 if (registered)
906 mtype = MEM_RDDR4;
907 else
908 mtype = MEM_DDR4;
909 } else {
910 if (registered)
911 mtype = MEM_RDDR3;
912 else
913 mtype = MEM_DDR3;
914 }
915
916out:
917 return mtype;
918}
919
920static enum dev_type knl_get_width(struct sbridge_pvt *pvt, u32 mtr)
921{
922
923 return DEV_X16;
924}
925
926static enum dev_type sbridge_get_width(struct sbridge_pvt *pvt, u32 mtr)
927{
928
929 return DEV_UNKNOWN;
930}
931
932static enum dev_type __ibridge_get_width(u32 mtr)
933{
934 enum dev_type type;
935
936 switch (mtr) {
937 case 3:
938 type = DEV_UNKNOWN;
939 break;
940 case 2:
941 type = DEV_X16;
942 break;
943 case 1:
944 type = DEV_X8;
945 break;
946 case 0:
947 type = DEV_X4;
948 break;
949 }
950
951 return type;
952}
953
954static enum dev_type ibridge_get_width(struct sbridge_pvt *pvt, u32 mtr)
955{
956
957
958
959
960 return __ibridge_get_width(GET_BITFIELD(mtr, 7, 8));
961}
962
963static enum dev_type broadwell_get_width(struct sbridge_pvt *pvt, u32 mtr)
964{
965
966 return __ibridge_get_width(GET_BITFIELD(mtr, 8, 9));
967}
968
969static enum mem_type knl_get_memory_type(struct sbridge_pvt *pvt)
970{
971
972 return MEM_RDDR4;
973}
974
975static u8 get_node_id(struct sbridge_pvt *pvt)
976{
977 u32 reg;
978 pci_read_config_dword(pvt->pci_br0, SAD_CONTROL, ®);
979 return GET_BITFIELD(reg, 0, 2);
980}
981
982static u8 haswell_get_node_id(struct sbridge_pvt *pvt)
983{
984 u32 reg;
985
986 pci_read_config_dword(pvt->pci_sad1, SAD_CONTROL, ®);
987 return GET_BITFIELD(reg, 0, 3);
988}
989
990static u8 knl_get_node_id(struct sbridge_pvt *pvt)
991{
992 u32 reg;
993
994 pci_read_config_dword(pvt->pci_sad1, SAD_CONTROL, ®);
995 return GET_BITFIELD(reg, 0, 2);
996}
997
998
999static u64 haswell_get_tolm(struct sbridge_pvt *pvt)
1000{
1001 u32 reg;
1002
1003 pci_read_config_dword(pvt->info.pci_vtd, HASWELL_TOLM, ®);
1004 return (GET_BITFIELD(reg, 26, 31) << 26) | 0x3ffffff;
1005}
1006
1007static u64 haswell_get_tohm(struct sbridge_pvt *pvt)
1008{
1009 u64 rc;
1010 u32 reg;
1011
1012 pci_read_config_dword(pvt->info.pci_vtd, HASWELL_TOHM_0, ®);
1013 rc = GET_BITFIELD(reg, 26, 31);
1014 pci_read_config_dword(pvt->info.pci_vtd, HASWELL_TOHM_1, ®);
1015 rc = ((reg << 6) | rc) << 26;
1016
1017 return rc | 0x1ffffff;
1018}
1019
1020static u64 knl_get_tolm(struct sbridge_pvt *pvt)
1021{
1022 u32 reg;
1023
1024 pci_read_config_dword(pvt->knl.pci_mc_info, KNL_TOLM, ®);
1025 return (GET_BITFIELD(reg, 26, 31) << 26) | 0x3ffffff;
1026}
1027
1028static u64 knl_get_tohm(struct sbridge_pvt *pvt)
1029{
1030 u64 rc;
1031 u32 reg_lo, reg_hi;
1032
1033 pci_read_config_dword(pvt->knl.pci_mc_info, KNL_TOHM_0, ®_lo);
1034 pci_read_config_dword(pvt->knl.pci_mc_info, KNL_TOHM_1, ®_hi);
1035 rc = ((u64)reg_hi << 32) | reg_lo;
1036 return rc | 0x3ffffff;
1037}
1038
1039
1040static u64 haswell_rir_limit(u32 reg)
1041{
1042 return (((u64)GET_BITFIELD(reg, 1, 11) + 1) << 29) - 1;
1043}
1044
1045static inline u8 sad_pkg_socket(u8 pkg)
1046{
1047
1048 return ((pkg >> 3) << 2) | (pkg & 0x3);
1049}
1050
1051static inline u8 sad_pkg_ha(u8 pkg)
1052{
1053 return (pkg >> 2) & 0x1;
1054}
1055
1056static int haswell_chan_hash(int idx, u64 addr)
1057{
1058 int i;
1059
1060
1061
1062
1063
1064 for (i = 12; i < 28; i += 2)
1065 idx ^= (addr >> i) & 3;
1066
1067 return idx;
1068}
1069
1070
1071static const u32 knl_tad_dram_limit_lo[] = {
1072 0x400, 0x500, 0x600, 0x700,
1073 0x800, 0x900, 0xa00, 0xb00,
1074};
1075
1076
1077static const u32 knl_tad_dram_offset_lo[] = {
1078 0x404, 0x504, 0x604, 0x704,
1079 0x804, 0x904, 0xa04, 0xb04,
1080};
1081
1082
1083static const u32 knl_tad_dram_hi[] = {
1084 0x408, 0x508, 0x608, 0x708,
1085 0x808, 0x908, 0xa08, 0xb08,
1086};
1087
1088
1089static const u32 knl_tad_ways[] = {
1090 8, 6, 4, 3, 2, 1,
1091};
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108static int knl_get_tad(const struct sbridge_pvt *pvt,
1109 const int entry,
1110 const int mc,
1111 u64 *offset,
1112 u64 *limit,
1113 int *ways)
1114{
1115 u32 reg_limit_lo, reg_offset_lo, reg_hi;
1116 struct pci_dev *pci_mc;
1117 int way_id;
1118
1119 switch (mc) {
1120 case 0:
1121 pci_mc = pvt->knl.pci_mc0;
1122 break;
1123 case 1:
1124 pci_mc = pvt->knl.pci_mc1;
1125 break;
1126 default:
1127 WARN_ON(1);
1128 return -EINVAL;
1129 }
1130
1131 pci_read_config_dword(pci_mc,
1132 knl_tad_dram_limit_lo[entry], ®_limit_lo);
1133 pci_read_config_dword(pci_mc,
1134 knl_tad_dram_offset_lo[entry], ®_offset_lo);
1135 pci_read_config_dword(pci_mc,
1136 knl_tad_dram_hi[entry], ®_hi);
1137
1138
1139 if (!GET_BITFIELD(reg_limit_lo, 0, 0))
1140 return -ENODEV;
1141
1142 way_id = GET_BITFIELD(reg_limit_lo, 3, 5);
1143
1144 if (way_id < ARRAY_SIZE(knl_tad_ways)) {
1145 *ways = knl_tad_ways[way_id];
1146 } else {
1147 *ways = 0;
1148 sbridge_printk(KERN_ERR,
1149 "Unexpected value %d in mc_tad_limit_lo wayness field\n",
1150 way_id);
1151 return -ENODEV;
1152 }
1153
1154
1155
1156
1157
1158 *offset = ((u64) GET_BITFIELD(reg_offset_lo, 6, 31) << 6) |
1159 ((u64) GET_BITFIELD(reg_hi, 0, 15) << 32);
1160 *limit = ((u64) GET_BITFIELD(reg_limit_lo, 6, 31) << 6) | 63 |
1161 ((u64) GET_BITFIELD(reg_hi, 16, 31) << 32);
1162
1163 return 0;
1164}
1165
1166
1167static int knl_channel_mc(int channel)
1168{
1169 WARN_ON(channel < 0 || channel >= 6);
1170
1171 return channel < 3 ? 1 : 0;
1172}
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189static u32 knl_get_edc_route(int entry, u32 reg)
1190{
1191 WARN_ON(entry >= KNL_MAX_EDCS);
1192 return GET_BITFIELD(reg, entry*3, (entry*3)+2);
1193}
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212static u32 knl_get_mc_route(int entry, u32 reg)
1213{
1214 int mc, chan;
1215
1216 WARN_ON(entry >= KNL_MAX_CHANNELS);
1217
1218 mc = GET_BITFIELD(reg, entry*3, (entry*3)+2);
1219 chan = GET_BITFIELD(reg, (entry*2) + 18, (entry*2) + 18 + 1);
1220
1221 return knl_channel_remap(mc, chan);
1222}
1223
1224
1225
1226
1227
1228static void knl_show_edc_route(u32 reg, char *s)
1229{
1230 int i;
1231
1232 for (i = 0; i < KNL_MAX_EDCS; i++) {
1233 s[i*2] = knl_get_edc_route(i, reg) + '0';
1234 s[i*2+1] = '-';
1235 }
1236
1237 s[KNL_MAX_EDCS*2 - 1] = '\0';
1238}
1239
1240
1241
1242
1243
1244static void knl_show_mc_route(u32 reg, char *s)
1245{
1246 int i;
1247
1248 for (i = 0; i < KNL_MAX_CHANNELS; i++) {
1249 s[i*2] = knl_get_mc_route(i, reg) + '0';
1250 s[i*2+1] = '-';
1251 }
1252
1253 s[KNL_MAX_CHANNELS*2 - 1] = '\0';
1254}
1255
1256#define KNL_EDC_ROUTE 0xb8
1257#define KNL_MC_ROUTE 0xb4
1258
1259
1260#define KNL_EDRAM(reg) GET_BITFIELD(reg, 29, 29)
1261
1262
1263#define KNL_CACHEABLE(reg) GET_BITFIELD(reg, 28, 28)
1264
1265
1266#define KNL_EDRAM_ONLY(reg) GET_BITFIELD(reg, 29, 29)
1267
1268
1269#define KNL_CACHEABLE(reg) GET_BITFIELD(reg, 28, 28)
1270
1271
1272#define KNL_MOD3(reg) GET_BITFIELD(reg, 27, 27)
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302static int knl_get_dimm_capacity(struct sbridge_pvt *pvt, u64 *mc_sizes)
1303{
1304 u64 sad_base, sad_size, sad_limit = 0;
1305 u64 tad_base, tad_size, tad_limit, tad_deadspace, tad_livespace;
1306 int sad_rule = 0;
1307 int tad_rule = 0;
1308 int intrlv_ways, tad_ways;
1309 u32 first_pkg, pkg;
1310 int i;
1311 u64 sad_actual_size[2];
1312 u32 dram_rule, interleave_reg;
1313 u32 mc_route_reg[KNL_MAX_CHAS];
1314 u32 edc_route_reg[KNL_MAX_CHAS];
1315 int edram_only;
1316 char edc_route_string[KNL_MAX_EDCS*2];
1317 char mc_route_string[KNL_MAX_CHANNELS*2];
1318 int cur_reg_start;
1319 int mc;
1320 int channel;
1321 int way;
1322 int participants[KNL_MAX_CHANNELS];
1323 int participant_count = 0;
1324
1325 for (i = 0; i < KNL_MAX_CHANNELS; i++)
1326 mc_sizes[i] = 0;
1327
1328
1329 cur_reg_start = 0;
1330 for (i = 0; i < KNL_MAX_CHAS; i++) {
1331 pci_read_config_dword(pvt->knl.pci_cha[i],
1332 KNL_EDC_ROUTE, &edc_route_reg[i]);
1333
1334 if (i > 0 && edc_route_reg[i] != edc_route_reg[i-1]) {
1335 knl_show_edc_route(edc_route_reg[i-1],
1336 edc_route_string);
1337 if (cur_reg_start == i-1)
1338 edac_dbg(0, "edc route table for CHA %d: %s\n",
1339 cur_reg_start, edc_route_string);
1340 else
1341 edac_dbg(0, "edc route table for CHA %d-%d: %s\n",
1342 cur_reg_start, i-1, edc_route_string);
1343 cur_reg_start = i;
1344 }
1345 }
1346 knl_show_edc_route(edc_route_reg[i-1], edc_route_string);
1347 if (cur_reg_start == i-1)
1348 edac_dbg(0, "edc route table for CHA %d: %s\n",
1349 cur_reg_start, edc_route_string);
1350 else
1351 edac_dbg(0, "edc route table for CHA %d-%d: %s\n",
1352 cur_reg_start, i-1, edc_route_string);
1353
1354
1355 cur_reg_start = 0;
1356 for (i = 0; i < KNL_MAX_CHAS; i++) {
1357 pci_read_config_dword(pvt->knl.pci_cha[i],
1358 KNL_MC_ROUTE, &mc_route_reg[i]);
1359
1360 if (i > 0 && mc_route_reg[i] != mc_route_reg[i-1]) {
1361 knl_show_mc_route(mc_route_reg[i-1], mc_route_string);
1362 if (cur_reg_start == i-1)
1363 edac_dbg(0, "mc route table for CHA %d: %s\n",
1364 cur_reg_start, mc_route_string);
1365 else
1366 edac_dbg(0, "mc route table for CHA %d-%d: %s\n",
1367 cur_reg_start, i-1, mc_route_string);
1368 cur_reg_start = i;
1369 }
1370 }
1371 knl_show_mc_route(mc_route_reg[i-1], mc_route_string);
1372 if (cur_reg_start == i-1)
1373 edac_dbg(0, "mc route table for CHA %d: %s\n",
1374 cur_reg_start, mc_route_string);
1375 else
1376 edac_dbg(0, "mc route table for CHA %d-%d: %s\n",
1377 cur_reg_start, i-1, mc_route_string);
1378
1379
1380 for (sad_rule = 0; sad_rule < pvt->info.max_sad; sad_rule++) {
1381
1382 sad_base = sad_limit;
1383
1384 pci_read_config_dword(pvt->pci_sad0,
1385 pvt->info.dram_rule[sad_rule], &dram_rule);
1386
1387 if (!DRAM_RULE_ENABLE(dram_rule))
1388 break;
1389
1390 edram_only = KNL_EDRAM_ONLY(dram_rule);
1391
1392 sad_limit = pvt->info.sad_limit(dram_rule)+1;
1393 sad_size = sad_limit - sad_base;
1394
1395 pci_read_config_dword(pvt->pci_sad0,
1396 pvt->info.interleave_list[sad_rule], &interleave_reg);
1397
1398
1399
1400
1401
1402 first_pkg = sad_pkg(pvt->info.interleave_pkg,
1403 interleave_reg, 0);
1404 for (intrlv_ways = 1; intrlv_ways < 8; intrlv_ways++) {
1405 pkg = sad_pkg(pvt->info.interleave_pkg,
1406 interleave_reg, intrlv_ways);
1407
1408 if ((pkg & 0x8) == 0) {
1409
1410
1411
1412
1413 edac_dbg(0, "Unexpected interleave target %d\n",
1414 pkg);
1415 return -1;
1416 }
1417
1418 if (pkg == first_pkg)
1419 break;
1420 }
1421 if (KNL_MOD3(dram_rule))
1422 intrlv_ways *= 3;
1423
1424 edac_dbg(3, "dram rule %d (base 0x%llx, limit 0x%llx), %d way interleave%s\n",
1425 sad_rule,
1426 sad_base,
1427 sad_limit,
1428 intrlv_ways,
1429 edram_only ? ", EDRAM" : "");
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442 for (mc = 0; mc < 2; mc++) {
1443 sad_actual_size[mc] = 0;
1444 tad_livespace = 0;
1445 for (tad_rule = 0;
1446 tad_rule < ARRAY_SIZE(
1447 knl_tad_dram_limit_lo);
1448 tad_rule++) {
1449 if (knl_get_tad(pvt,
1450 tad_rule,
1451 mc,
1452 &tad_deadspace,
1453 &tad_limit,
1454 &tad_ways))
1455 break;
1456
1457 tad_size = (tad_limit+1) -
1458 (tad_livespace + tad_deadspace);
1459 tad_livespace += tad_size;
1460 tad_base = (tad_limit+1) - tad_size;
1461
1462 if (tad_base < sad_base) {
1463 if (tad_limit > sad_base)
1464 edac_dbg(0, "TAD region overlaps lower SAD boundary -- TAD tables may be configured incorrectly.\n");
1465 } else if (tad_base < sad_limit) {
1466 if (tad_limit+1 > sad_limit) {
1467 edac_dbg(0, "TAD region overlaps upper SAD boundary -- TAD tables may be configured incorrectly.\n");
1468 } else {
1469
1470 edac_dbg(3, "TAD region %d 0x%llx - 0x%llx (%lld bytes) table%d\n",
1471 tad_rule, tad_base,
1472 tad_limit, tad_size,
1473 mc);
1474 sad_actual_size[mc] += tad_size;
1475 }
1476 }
1477 tad_base = tad_limit+1;
1478 }
1479 }
1480
1481 for (mc = 0; mc < 2; mc++) {
1482 edac_dbg(3, " total TAD DRAM footprint in table%d : 0x%llx (%lld bytes)\n",
1483 mc, sad_actual_size[mc], sad_actual_size[mc]);
1484 }
1485
1486
1487 if (edram_only)
1488 continue;
1489
1490
1491 for (channel = 0; channel < KNL_MAX_CHANNELS; channel++)
1492 participants[channel] = 0;
1493
1494
1495
1496
1497 for (channel = 0; channel < KNL_MAX_CHANNELS; channel++) {
1498 for (way = 0; way < intrlv_ways; way++) {
1499 int target;
1500 int cha;
1501
1502 if (KNL_MOD3(dram_rule))
1503 target = way;
1504 else
1505 target = 0x7 & sad_pkg(
1506 pvt->info.interleave_pkg, interleave_reg, way);
1507
1508 for (cha = 0; cha < KNL_MAX_CHAS; cha++) {
1509 if (knl_get_mc_route(target,
1510 mc_route_reg[cha]) == channel
1511 && !participants[channel]) {
1512 participant_count++;
1513 participants[channel] = 1;
1514 break;
1515 }
1516 }
1517 }
1518 }
1519
1520 if (participant_count != intrlv_ways)
1521 edac_dbg(0, "participant_count (%d) != interleave_ways (%d): DIMM size may be incorrect\n",
1522 participant_count, intrlv_ways);
1523
1524 for (channel = 0; channel < KNL_MAX_CHANNELS; channel++) {
1525 mc = knl_channel_mc(channel);
1526 if (participants[channel]) {
1527 edac_dbg(4, "mc channel %d contributes %lld bytes via sad entry %d\n",
1528 channel,
1529 sad_actual_size[mc]/intrlv_ways,
1530 sad_rule);
1531 mc_sizes[channel] +=
1532 sad_actual_size[mc]/intrlv_ways;
1533 }
1534 }
1535 }
1536
1537 return 0;
1538}
1539
1540static void get_source_id(struct mem_ctl_info *mci)
1541{
1542 struct sbridge_pvt *pvt = mci->pvt_info;
1543 u32 reg;
1544
1545 if (pvt->info.type == HASWELL || pvt->info.type == BROADWELL ||
1546 pvt->info.type == KNIGHTS_LANDING)
1547 pci_read_config_dword(pvt->pci_sad1, SAD_TARGET, ®);
1548 else
1549 pci_read_config_dword(pvt->pci_br0, SAD_TARGET, ®);
1550
1551 if (pvt->info.type == KNIGHTS_LANDING)
1552 pvt->sbridge_dev->source_id = SOURCE_ID_KNL(reg);
1553 else
1554 pvt->sbridge_dev->source_id = SOURCE_ID(reg);
1555}
1556
1557static int __populate_dimms(struct mem_ctl_info *mci,
1558 u64 knl_mc_sizes[KNL_MAX_CHANNELS],
1559 enum edac_type mode)
1560{
1561 struct sbridge_pvt *pvt = mci->pvt_info;
1562 int channels = pvt->info.type == KNIGHTS_LANDING ? KNL_MAX_CHANNELS
1563 : NUM_CHANNELS;
1564 unsigned int i, j, banks, ranks, rows, cols, npages;
1565 struct dimm_info *dimm;
1566 enum mem_type mtype;
1567 u64 size;
1568
1569 mtype = pvt->info.get_memory_type(pvt);
1570 if (mtype == MEM_RDDR3 || mtype == MEM_RDDR4)
1571 edac_dbg(0, "Memory is registered\n");
1572 else if (mtype == MEM_UNKNOWN)
1573 edac_dbg(0, "Cannot determine memory type\n");
1574 else
1575 edac_dbg(0, "Memory is unregistered\n");
1576
1577 if (mtype == MEM_DDR4 || mtype == MEM_RDDR4)
1578 banks = 16;
1579 else
1580 banks = 8;
1581
1582 for (i = 0; i < channels; i++) {
1583 u32 mtr;
1584
1585 int max_dimms_per_channel;
1586
1587 if (pvt->info.type == KNIGHTS_LANDING) {
1588 max_dimms_per_channel = 1;
1589 if (!pvt->knl.pci_channel[i])
1590 continue;
1591 } else {
1592 max_dimms_per_channel = ARRAY_SIZE(mtr_regs);
1593 if (!pvt->pci_tad[i])
1594 continue;
1595 }
1596
1597 for (j = 0; j < max_dimms_per_channel; j++) {
1598 dimm = EDAC_DIMM_PTR(mci->layers, mci->dimms, mci->n_layers, i, j, 0);
1599 if (pvt->info.type == KNIGHTS_LANDING) {
1600 pci_read_config_dword(pvt->knl.pci_channel[i],
1601 knl_mtr_reg, &mtr);
1602 } else {
1603 pci_read_config_dword(pvt->pci_tad[i],
1604 mtr_regs[j], &mtr);
1605 }
1606 edac_dbg(4, "Channel #%d MTR%d = %x\n", i, j, mtr);
1607 if (IS_DIMM_PRESENT(mtr)) {
1608 if (!IS_ECC_ENABLED(pvt->info.mcmtr)) {
1609 sbridge_printk(KERN_ERR, "CPU SrcID #%d, Ha #%d, Channel #%d has DIMMs, but ECC is disabled\n",
1610 pvt->sbridge_dev->source_id,
1611 pvt->sbridge_dev->dom, i);
1612 return -ENODEV;
1613 }
1614 pvt->channel[i].dimms++;
1615
1616 ranks = numrank(pvt->info.type, mtr);
1617
1618 if (pvt->info.type == KNIGHTS_LANDING) {
1619
1620 cols = 1 << 10;
1621 rows = knl_mc_sizes[i] /
1622 ((u64) cols * ranks * banks * 8);
1623 } else {
1624 rows = numrow(mtr);
1625 cols = numcol(mtr);
1626 }
1627
1628 size = ((u64)rows * cols * banks * ranks) >> (20 - 3);
1629 npages = MiB_TO_PAGES(size);
1630
1631 edac_dbg(0, "mc#%d: ha %d channel %d, dimm %d, %lld Mb (%d pages) bank: %d, rank: %d, row: %#x, col: %#x\n",
1632 pvt->sbridge_dev->mc, pvt->sbridge_dev->dom, i, j,
1633 size, npages,
1634 banks, ranks, rows, cols);
1635
1636 dimm->nr_pages = npages;
1637 dimm->grain = 32;
1638 dimm->dtype = pvt->info.get_width(pvt, mtr);
1639 dimm->mtype = mtype;
1640 dimm->edac_mode = mode;
1641 snprintf(dimm->label, sizeof(dimm->label),
1642 "CPU_SrcID#%u_Ha#%u_Chan#%u_DIMM#%u",
1643 pvt->sbridge_dev->source_id, pvt->sbridge_dev->dom, i, j);
1644 }
1645 }
1646 }
1647
1648 return 0;
1649}
1650
1651static int get_dimm_config(struct mem_ctl_info *mci)
1652{
1653 struct sbridge_pvt *pvt = mci->pvt_info;
1654 u64 knl_mc_sizes[KNL_MAX_CHANNELS];
1655 enum edac_type mode;
1656 u32 reg;
1657
1658 pvt->sbridge_dev->node_id = pvt->info.get_node_id(pvt);
1659 edac_dbg(0, "mc#%d: Node ID: %d, source ID: %d\n",
1660 pvt->sbridge_dev->mc,
1661 pvt->sbridge_dev->node_id,
1662 pvt->sbridge_dev->source_id);
1663
1664
1665
1666
1667 if (pvt->info.type == KNIGHTS_LANDING) {
1668 mode = EDAC_S4ECD4ED;
1669 pvt->mirror_mode = NON_MIRRORING;
1670 pvt->is_cur_addr_mirrored = false;
1671
1672 if (knl_get_dimm_capacity(pvt, knl_mc_sizes) != 0)
1673 return -1;
1674 if (pci_read_config_dword(pvt->pci_ta, KNL_MCMTR, &pvt->info.mcmtr)) {
1675 edac_dbg(0, "Failed to read KNL_MCMTR register\n");
1676 return -ENODEV;
1677 }
1678 } else {
1679 if (pvt->info.type == HASWELL || pvt->info.type == BROADWELL) {
1680 if (pci_read_config_dword(pvt->pci_ha, HASWELL_HASYSDEFEATURE2, ®)) {
1681 edac_dbg(0, "Failed to read HASWELL_HASYSDEFEATURE2 register\n");
1682 return -ENODEV;
1683 }
1684 pvt->is_chan_hash = GET_BITFIELD(reg, 21, 21);
1685 if (GET_BITFIELD(reg, 28, 28)) {
1686 pvt->mirror_mode = ADDR_RANGE_MIRRORING;
1687 edac_dbg(0, "Address range partial memory mirroring is enabled\n");
1688 goto next;
1689 }
1690 }
1691 if (pci_read_config_dword(pvt->pci_ras, RASENABLES, ®)) {
1692 edac_dbg(0, "Failed to read RASENABLES register\n");
1693 return -ENODEV;
1694 }
1695 if (IS_MIRROR_ENABLED(reg)) {
1696 pvt->mirror_mode = FULL_MIRRORING;
1697 edac_dbg(0, "Full memory mirroring is enabled\n");
1698 } else {
1699 pvt->mirror_mode = NON_MIRRORING;
1700 edac_dbg(0, "Memory mirroring is disabled\n");
1701 }
1702
1703next:
1704 if (pci_read_config_dword(pvt->pci_ta, MCMTR, &pvt->info.mcmtr)) {
1705 edac_dbg(0, "Failed to read MCMTR register\n");
1706 return -ENODEV;
1707 }
1708 if (IS_LOCKSTEP_ENABLED(pvt->info.mcmtr)) {
1709 edac_dbg(0, "Lockstep is enabled\n");
1710 mode = EDAC_S8ECD8ED;
1711 pvt->is_lockstep = true;
1712 } else {
1713 edac_dbg(0, "Lockstep is disabled\n");
1714 mode = EDAC_S4ECD4ED;
1715 pvt->is_lockstep = false;
1716 }
1717 if (IS_CLOSE_PG(pvt->info.mcmtr)) {
1718 edac_dbg(0, "address map is on closed page mode\n");
1719 pvt->is_close_pg = true;
1720 } else {
1721 edac_dbg(0, "address map is on open page mode\n");
1722 pvt->is_close_pg = false;
1723 }
1724 }
1725
1726 return __populate_dimms(mci, knl_mc_sizes, mode);
1727}
1728
1729static void get_memory_layout(const struct mem_ctl_info *mci)
1730{
1731 struct sbridge_pvt *pvt = mci->pvt_info;
1732 int i, j, k, n_sads, n_tads, sad_interl;
1733 u32 reg;
1734 u64 limit, prv = 0;
1735 u64 tmp_mb;
1736 u32 gb, mb;
1737 u32 rir_way;
1738
1739
1740
1741
1742
1743 pvt->tolm = pvt->info.get_tolm(pvt);
1744 tmp_mb = (1 + pvt->tolm) >> 20;
1745
1746 gb = div_u64_rem(tmp_mb, 1024, &mb);
1747 edac_dbg(0, "TOLM: %u.%03u GB (0x%016Lx)\n",
1748 gb, (mb*1000)/1024, (u64)pvt->tolm);
1749
1750
1751 pvt->tohm = pvt->info.get_tohm(pvt);
1752 tmp_mb = (1 + pvt->tohm) >> 20;
1753
1754 gb = div_u64_rem(tmp_mb, 1024, &mb);
1755 edac_dbg(0, "TOHM: %u.%03u GB (0x%016Lx)\n",
1756 gb, (mb*1000)/1024, (u64)pvt->tohm);
1757
1758
1759
1760
1761
1762
1763
1764 prv = 0;
1765 for (n_sads = 0; n_sads < pvt->info.max_sad; n_sads++) {
1766
1767 pci_read_config_dword(pvt->pci_sad0, pvt->info.dram_rule[n_sads],
1768 ®);
1769 limit = pvt->info.sad_limit(reg);
1770
1771 if (!DRAM_RULE_ENABLE(reg))
1772 continue;
1773
1774 if (limit <= prv)
1775 break;
1776
1777 tmp_mb = (limit + 1) >> 20;
1778 gb = div_u64_rem(tmp_mb, 1024, &mb);
1779 edac_dbg(0, "SAD#%d %s up to %u.%03u GB (0x%016Lx) Interleave: %s reg=0x%08x\n",
1780 n_sads,
1781 show_dram_attr(pvt->info.dram_attr(reg)),
1782 gb, (mb*1000)/1024,
1783 ((u64)tmp_mb) << 20L,
1784 get_intlv_mode_str(reg, pvt->info.type),
1785 reg);
1786 prv = limit;
1787
1788 pci_read_config_dword(pvt->pci_sad0, pvt->info.interleave_list[n_sads],
1789 ®);
1790 sad_interl = sad_pkg(pvt->info.interleave_pkg, reg, 0);
1791 for (j = 0; j < 8; j++) {
1792 u32 pkg = sad_pkg(pvt->info.interleave_pkg, reg, j);
1793 if (j > 0 && sad_interl == pkg)
1794 break;
1795
1796 edac_dbg(0, "SAD#%d, interleave #%d: %d\n",
1797 n_sads, j, pkg);
1798 }
1799 }
1800
1801 if (pvt->info.type == KNIGHTS_LANDING)
1802 return;
1803
1804
1805
1806
1807 prv = 0;
1808 for (n_tads = 0; n_tads < MAX_TAD; n_tads++) {
1809 pci_read_config_dword(pvt->pci_ha, tad_dram_rule[n_tads], ®);
1810 limit = TAD_LIMIT(reg);
1811 if (limit <= prv)
1812 break;
1813 tmp_mb = (limit + 1) >> 20;
1814
1815 gb = div_u64_rem(tmp_mb, 1024, &mb);
1816 edac_dbg(0, "TAD#%d: up to %u.%03u GB (0x%016Lx), socket interleave %d, memory interleave %d, TGT: %d, %d, %d, %d, reg=0x%08x\n",
1817 n_tads, gb, (mb*1000)/1024,
1818 ((u64)tmp_mb) << 20L,
1819 (u32)(1 << TAD_SOCK(reg)),
1820 (u32)TAD_CH(reg) + 1,
1821 (u32)TAD_TGT0(reg),
1822 (u32)TAD_TGT1(reg),
1823 (u32)TAD_TGT2(reg),
1824 (u32)TAD_TGT3(reg),
1825 reg);
1826 prv = limit;
1827 }
1828
1829
1830
1831
1832 for (i = 0; i < NUM_CHANNELS; i++) {
1833 if (!pvt->channel[i].dimms)
1834 continue;
1835 for (j = 0; j < n_tads; j++) {
1836 pci_read_config_dword(pvt->pci_tad[i],
1837 tad_ch_nilv_offset[j],
1838 ®);
1839 tmp_mb = TAD_OFFSET(reg) >> 20;
1840 gb = div_u64_rem(tmp_mb, 1024, &mb);
1841 edac_dbg(0, "TAD CH#%d, offset #%d: %u.%03u GB (0x%016Lx), reg=0x%08x\n",
1842 i, j,
1843 gb, (mb*1000)/1024,
1844 ((u64)tmp_mb) << 20L,
1845 reg);
1846 }
1847 }
1848
1849
1850
1851
1852 for (i = 0; i < NUM_CHANNELS; i++) {
1853 if (!pvt->channel[i].dimms)
1854 continue;
1855 for (j = 0; j < MAX_RIR_RANGES; j++) {
1856 pci_read_config_dword(pvt->pci_tad[i],
1857 rir_way_limit[j],
1858 ®);
1859
1860 if (!IS_RIR_VALID(reg))
1861 continue;
1862
1863 tmp_mb = pvt->info.rir_limit(reg) >> 20;
1864 rir_way = 1 << RIR_WAY(reg);
1865 gb = div_u64_rem(tmp_mb, 1024, &mb);
1866 edac_dbg(0, "CH#%d RIR#%d, limit: %u.%03u GB (0x%016Lx), way: %d, reg=0x%08x\n",
1867 i, j,
1868 gb, (mb*1000)/1024,
1869 ((u64)tmp_mb) << 20L,
1870 rir_way,
1871 reg);
1872
1873 for (k = 0; k < rir_way; k++) {
1874 pci_read_config_dword(pvt->pci_tad[i],
1875 rir_offset[j][k],
1876 ®);
1877 tmp_mb = RIR_OFFSET(pvt->info.type, reg) << 6;
1878
1879 gb = div_u64_rem(tmp_mb, 1024, &mb);
1880 edac_dbg(0, "CH#%d RIR#%d INTL#%d, offset %u.%03u GB (0x%016Lx), tgt: %d, reg=0x%08x\n",
1881 i, j, k,
1882 gb, (mb*1000)/1024,
1883 ((u64)tmp_mb) << 20L,
1884 (u32)RIR_RNK_TGT(pvt->info.type, reg),
1885 reg);
1886 }
1887 }
1888 }
1889}
1890
1891static struct mem_ctl_info *get_mci_for_node_id(u8 node_id, u8 ha)
1892{
1893 struct sbridge_dev *sbridge_dev;
1894
1895 list_for_each_entry(sbridge_dev, &sbridge_edac_list, list) {
1896 if (sbridge_dev->node_id == node_id && sbridge_dev->dom == ha)
1897 return sbridge_dev->mci;
1898 }
1899 return NULL;
1900}
1901
1902static int get_memory_error_data(struct mem_ctl_info *mci,
1903 u64 addr,
1904 u8 *socket, u8 *ha,
1905 long *channel_mask,
1906 u8 *rank,
1907 char **area_type, char *msg)
1908{
1909 struct mem_ctl_info *new_mci;
1910 struct sbridge_pvt *pvt = mci->pvt_info;
1911 struct pci_dev *pci_ha;
1912 int n_rir, n_sads, n_tads, sad_way, sck_xch;
1913 int sad_interl, idx, base_ch;
1914 int interleave_mode, shiftup = 0;
1915 unsigned sad_interleave[pvt->info.max_interleave];
1916 u32 reg, dram_rule;
1917 u8 ch_way, sck_way, pkg, sad_ha = 0;
1918 u32 tad_offset;
1919 u32 rir_way;
1920 u32 mb, gb;
1921 u64 ch_addr, offset, limit = 0, prv = 0;
1922
1923
1924
1925
1926
1927
1928
1929
1930
1931 if ((addr > (u64) pvt->tolm) && (addr < (1LL << 32))) {
1932 sprintf(msg, "Error at TOLM area, on addr 0x%08Lx", addr);
1933 return -EINVAL;
1934 }
1935 if (addr >= (u64)pvt->tohm) {
1936 sprintf(msg, "Error at MMIOH area, on addr 0x%016Lx", addr);
1937 return -EINVAL;
1938 }
1939
1940
1941
1942
1943 for (n_sads = 0; n_sads < pvt->info.max_sad; n_sads++) {
1944 pci_read_config_dword(pvt->pci_sad0, pvt->info.dram_rule[n_sads],
1945 ®);
1946
1947 if (!DRAM_RULE_ENABLE(reg))
1948 continue;
1949
1950 limit = pvt->info.sad_limit(reg);
1951 if (limit <= prv) {
1952 sprintf(msg, "Can't discover the memory socket");
1953 return -EINVAL;
1954 }
1955 if (addr <= limit)
1956 break;
1957 prv = limit;
1958 }
1959 if (n_sads == pvt->info.max_sad) {
1960 sprintf(msg, "Can't discover the memory socket");
1961 return -EINVAL;
1962 }
1963 dram_rule = reg;
1964 *area_type = show_dram_attr(pvt->info.dram_attr(dram_rule));
1965 interleave_mode = pvt->info.interleave_mode(dram_rule);
1966
1967 pci_read_config_dword(pvt->pci_sad0, pvt->info.interleave_list[n_sads],
1968 ®);
1969
1970 if (pvt->info.type == SANDY_BRIDGE) {
1971 sad_interl = sad_pkg(pvt->info.interleave_pkg, reg, 0);
1972 for (sad_way = 0; sad_way < 8; sad_way++) {
1973 u32 pkg = sad_pkg(pvt->info.interleave_pkg, reg, sad_way);
1974 if (sad_way > 0 && sad_interl == pkg)
1975 break;
1976 sad_interleave[sad_way] = pkg;
1977 edac_dbg(0, "SAD interleave #%d: %d\n",
1978 sad_way, sad_interleave[sad_way]);
1979 }
1980 edac_dbg(0, "mc#%d: Error detected on SAD#%d: address 0x%016Lx < 0x%016Lx, Interleave [%d:6]%s\n",
1981 pvt->sbridge_dev->mc,
1982 n_sads,
1983 addr,
1984 limit,
1985 sad_way + 7,
1986 !interleave_mode ? "" : "XOR[18:16]");
1987 if (interleave_mode)
1988 idx = ((addr >> 6) ^ (addr >> 16)) & 7;
1989 else
1990 idx = (addr >> 6) & 7;
1991 switch (sad_way) {
1992 case 1:
1993 idx = 0;
1994 break;
1995 case 2:
1996 idx = idx & 1;
1997 break;
1998 case 4:
1999 idx = idx & 3;
2000 break;
2001 case 8:
2002 break;
2003 default:
2004 sprintf(msg, "Can't discover socket interleave");
2005 return -EINVAL;
2006 }
2007 *socket = sad_interleave[idx];
2008 edac_dbg(0, "SAD interleave index: %d (wayness %d) = CPU socket %d\n",
2009 idx, sad_way, *socket);
2010 } else if (pvt->info.type == HASWELL || pvt->info.type == BROADWELL) {
2011 int bits, a7mode = A7MODE(dram_rule);
2012
2013 if (a7mode) {
2014
2015 bits = GET_BITFIELD(addr, 7, 8) << 1;
2016 bits |= GET_BITFIELD(addr, 9, 9);
2017 } else
2018 bits = GET_BITFIELD(addr, 6, 8);
2019
2020 if (interleave_mode == 0) {
2021
2022 idx = GET_BITFIELD(addr, 16, 18);
2023 idx ^= bits;
2024 } else
2025 idx = bits;
2026
2027 pkg = sad_pkg(pvt->info.interleave_pkg, reg, idx);
2028 *socket = sad_pkg_socket(pkg);
2029 sad_ha = sad_pkg_ha(pkg);
2030
2031 if (a7mode) {
2032
2033 pci_read_config_dword(pvt->pci_ha, HASWELL_HASYSDEFEATURE2, ®);
2034 shiftup = GET_BITFIELD(reg, 22, 22);
2035 }
2036
2037 edac_dbg(0, "SAD interleave package: %d = CPU socket %d, HA %i, shiftup: %i\n",
2038 idx, *socket, sad_ha, shiftup);
2039 } else {
2040
2041 idx = (addr >> 6) & 7;
2042 pkg = sad_pkg(pvt->info.interleave_pkg, reg, idx);
2043 *socket = sad_pkg_socket(pkg);
2044 sad_ha = sad_pkg_ha(pkg);
2045 edac_dbg(0, "SAD interleave package: %d = CPU socket %d, HA %d\n",
2046 idx, *socket, sad_ha);
2047 }
2048
2049 *ha = sad_ha;
2050
2051
2052
2053
2054
2055 new_mci = get_mci_for_node_id(*socket, sad_ha);
2056 if (!new_mci) {
2057 sprintf(msg, "Struct for socket #%u wasn't initialized",
2058 *socket);
2059 return -EINVAL;
2060 }
2061 mci = new_mci;
2062 pvt = mci->pvt_info;
2063
2064
2065
2066
2067 prv = 0;
2068 pci_ha = pvt->pci_ha;
2069 for (n_tads = 0; n_tads < MAX_TAD; n_tads++) {
2070 pci_read_config_dword(pci_ha, tad_dram_rule[n_tads], ®);
2071 limit = TAD_LIMIT(reg);
2072 if (limit <= prv) {
2073 sprintf(msg, "Can't discover the memory channel");
2074 return -EINVAL;
2075 }
2076 if (addr <= limit)
2077 break;
2078 prv = limit;
2079 }
2080 if (n_tads == MAX_TAD) {
2081 sprintf(msg, "Can't discover the memory channel");
2082 return -EINVAL;
2083 }
2084
2085 ch_way = TAD_CH(reg) + 1;
2086 sck_way = TAD_SOCK(reg);
2087
2088 if (ch_way == 3)
2089 idx = addr >> 6;
2090 else {
2091 idx = (addr >> (6 + sck_way + shiftup)) & 0x3;
2092 if (pvt->is_chan_hash)
2093 idx = haswell_chan_hash(idx, addr);
2094 }
2095 idx = idx % ch_way;
2096
2097
2098
2099
2100 switch (idx) {
2101 case 0:
2102 base_ch = TAD_TGT0(reg);
2103 break;
2104 case 1:
2105 base_ch = TAD_TGT1(reg);
2106 break;
2107 case 2:
2108 base_ch = TAD_TGT2(reg);
2109 break;
2110 case 3:
2111 base_ch = TAD_TGT3(reg);
2112 break;
2113 default:
2114 sprintf(msg, "Can't discover the TAD target");
2115 return -EINVAL;
2116 }
2117 *channel_mask = 1 << base_ch;
2118
2119 pci_read_config_dword(pvt->pci_tad[base_ch], tad_ch_nilv_offset[n_tads], &tad_offset);
2120
2121 if (pvt->mirror_mode == FULL_MIRRORING ||
2122 (pvt->mirror_mode == ADDR_RANGE_MIRRORING && n_tads == 0)) {
2123 *channel_mask |= 1 << ((base_ch + 2) % 4);
2124 switch(ch_way) {
2125 case 2:
2126 case 4:
2127 sck_xch = (1 << sck_way) * (ch_way >> 1);
2128 break;
2129 default:
2130 sprintf(msg, "Invalid mirror set. Can't decode addr");
2131 return -EINVAL;
2132 }
2133
2134 pvt->is_cur_addr_mirrored = true;
2135 } else {
2136 sck_xch = (1 << sck_way) * ch_way;
2137 pvt->is_cur_addr_mirrored = false;
2138 }
2139
2140 if (pvt->is_lockstep)
2141 *channel_mask |= 1 << ((base_ch + 1) % 4);
2142
2143 offset = TAD_OFFSET(tad_offset);
2144
2145 edac_dbg(0, "TAD#%d: address 0x%016Lx < 0x%016Lx, socket interleave %d, channel interleave %d (offset 0x%08Lx), index %d, base ch: %d, ch mask: 0x%02lx\n",
2146 n_tads,
2147 addr,
2148 limit,
2149 sck_way,
2150 ch_way,
2151 offset,
2152 idx,
2153 base_ch,
2154 *channel_mask);
2155
2156
2157
2158
2159 if (offset > addr) {
2160 sprintf(msg, "Can't calculate ch addr: TAD offset 0x%08Lx is too high for addr 0x%08Lx!",
2161 offset, addr);
2162 return -EINVAL;
2163 }
2164
2165 ch_addr = addr - offset;
2166 ch_addr >>= (6 + shiftup);
2167 ch_addr /= sck_xch;
2168 ch_addr <<= (6 + shiftup);
2169 ch_addr |= addr & ((1 << (6 + shiftup)) - 1);
2170
2171
2172
2173
2174 for (n_rir = 0; n_rir < MAX_RIR_RANGES; n_rir++) {
2175 pci_read_config_dword(pvt->pci_tad[base_ch], rir_way_limit[n_rir], ®);
2176
2177 if (!IS_RIR_VALID(reg))
2178 continue;
2179
2180 limit = pvt->info.rir_limit(reg);
2181 gb = div_u64_rem(limit >> 20, 1024, &mb);
2182 edac_dbg(0, "RIR#%d, limit: %u.%03u GB (0x%016Lx), way: %d\n",
2183 n_rir,
2184 gb, (mb*1000)/1024,
2185 limit,
2186 1 << RIR_WAY(reg));
2187 if (ch_addr <= limit)
2188 break;
2189 }
2190 if (n_rir == MAX_RIR_RANGES) {
2191 sprintf(msg, "Can't discover the memory rank for ch addr 0x%08Lx",
2192 ch_addr);
2193 return -EINVAL;
2194 }
2195 rir_way = RIR_WAY(reg);
2196
2197 if (pvt->is_close_pg)
2198 idx = (ch_addr >> 6);
2199 else
2200 idx = (ch_addr >> 13);
2201 idx %= 1 << rir_way;
2202
2203 pci_read_config_dword(pvt->pci_tad[base_ch], rir_offset[n_rir][idx], ®);
2204 *rank = RIR_RNK_TGT(pvt->info.type, reg);
2205
2206 edac_dbg(0, "RIR#%d: channel address 0x%08Lx < 0x%08Lx, RIR interleave %d, index %d\n",
2207 n_rir,
2208 ch_addr,
2209 limit,
2210 rir_way,
2211 idx);
2212
2213 return 0;
2214}
2215
2216
2217
2218
2219
2220
2221
2222
2223
2224static void sbridge_put_devices(struct sbridge_dev *sbridge_dev)
2225{
2226 int i;
2227
2228 edac_dbg(0, "\n");
2229 for (i = 0; i < sbridge_dev->n_devs; i++) {
2230 struct pci_dev *pdev = sbridge_dev->pdev[i];
2231 if (!pdev)
2232 continue;
2233 edac_dbg(0, "Removing dev %02x:%02x.%d\n",
2234 pdev->bus->number,
2235 PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));
2236 pci_dev_put(pdev);
2237 }
2238}
2239
2240static void sbridge_put_all_devices(void)
2241{
2242 struct sbridge_dev *sbridge_dev, *tmp;
2243
2244 list_for_each_entry_safe(sbridge_dev, tmp, &sbridge_edac_list, list) {
2245 sbridge_put_devices(sbridge_dev);
2246 free_sbridge_dev(sbridge_dev);
2247 }
2248}
2249
2250static int sbridge_get_onedevice(struct pci_dev **prev,
2251 u8 *num_mc,
2252 const struct pci_id_table *table,
2253 const unsigned devno,
2254 const int multi_bus)
2255{
2256 struct sbridge_dev *sbridge_dev = NULL;
2257 const struct pci_id_descr *dev_descr = &table->descr[devno];
2258 struct pci_dev *pdev = NULL;
2259 u8 bus = 0;
2260 int i = 0;
2261
2262 sbridge_printk(KERN_DEBUG,
2263 "Seeking for: PCI ID %04x:%04x\n",
2264 PCI_VENDOR_ID_INTEL, dev_descr->dev_id);
2265
2266 pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
2267 dev_descr->dev_id, *prev);
2268
2269 if (!pdev) {
2270 if (*prev) {
2271 *prev = pdev;
2272 return 0;
2273 }
2274
2275 if (dev_descr->optional)
2276 return 0;
2277
2278
2279 if (devno == 0)
2280 return -ENODEV;
2281
2282 sbridge_printk(KERN_INFO,
2283 "Device not found: %04x:%04x\n",
2284 PCI_VENDOR_ID_INTEL, dev_descr->dev_id);
2285
2286
2287 return -ENODEV;
2288 }
2289 bus = pdev->bus->number;
2290
2291next_imc:
2292 sbridge_dev = get_sbridge_dev(bus, dev_descr->dom, multi_bus, sbridge_dev);
2293 if (!sbridge_dev) {
2294
2295 if (dev_descr->dom == SOCK)
2296 goto out_imc;
2297
2298 sbridge_dev = alloc_sbridge_dev(bus, dev_descr->dom, table);
2299 if (!sbridge_dev) {
2300 pci_dev_put(pdev);
2301 return -ENOMEM;
2302 }
2303 (*num_mc)++;
2304 }
2305
2306 if (sbridge_dev->pdev[sbridge_dev->i_devs]) {
2307 sbridge_printk(KERN_ERR,
2308 "Duplicated device for %04x:%04x\n",
2309 PCI_VENDOR_ID_INTEL, dev_descr->dev_id);
2310 pci_dev_put(pdev);
2311 return -ENODEV;
2312 }
2313
2314 sbridge_dev->pdev[sbridge_dev->i_devs++] = pdev;
2315
2316
2317 if (++i > 1)
2318 pci_dev_get(pdev);
2319
2320 if (dev_descr->dom == SOCK && i < table->n_imcs_per_sock)
2321 goto next_imc;
2322
2323out_imc:
2324
2325 if (unlikely(pci_enable_device(pdev) < 0)) {
2326 sbridge_printk(KERN_ERR,
2327 "Couldn't enable %04x:%04x\n",
2328 PCI_VENDOR_ID_INTEL, dev_descr->dev_id);
2329 return -ENODEV;
2330 }
2331
2332 edac_dbg(0, "Detected %04x:%04x\n",
2333 PCI_VENDOR_ID_INTEL, dev_descr->dev_id);
2334
2335
2336
2337
2338
2339
2340 pci_dev_get(pdev);
2341
2342 *prev = pdev;
2343
2344 return 0;
2345}
2346
2347
2348
2349
2350
2351
2352
2353
2354
2355
2356static int sbridge_get_all_devices(u8 *num_mc,
2357 const struct pci_id_table *table)
2358{
2359 int i, rc;
2360 struct pci_dev *pdev = NULL;
2361 int allow_dups = 0;
2362 int multi_bus = 0;
2363
2364 if (table->type == KNIGHTS_LANDING)
2365 allow_dups = multi_bus = 1;
2366 while (table && table->descr) {
2367 for (i = 0; i < table->n_devs_per_sock; i++) {
2368 if (!allow_dups || i == 0 ||
2369 table->descr[i].dev_id !=
2370 table->descr[i-1].dev_id) {
2371 pdev = NULL;
2372 }
2373 do {
2374 rc = sbridge_get_onedevice(&pdev, num_mc,
2375 table, i, multi_bus);
2376 if (rc < 0) {
2377 if (i == 0) {
2378 i = table->n_devs_per_sock;
2379 break;
2380 }
2381 sbridge_put_all_devices();
2382 return -ENODEV;
2383 }
2384 } while (pdev && !allow_dups);
2385 }
2386 table++;
2387 }
2388
2389 return 0;
2390}
2391
2392
2393
2394
2395
2396
2397#define TAD_DEV_TO_CHAN(dev) (((dev) & 0xf) - 0xa)
2398
2399static int sbridge_mci_bind_devs(struct mem_ctl_info *mci,
2400 struct sbridge_dev *sbridge_dev)
2401{
2402 struct sbridge_pvt *pvt = mci->pvt_info;
2403 struct pci_dev *pdev;
2404 u8 saw_chan_mask = 0;
2405 int i;
2406
2407 for (i = 0; i < sbridge_dev->n_devs; i++) {
2408 pdev = sbridge_dev->pdev[i];
2409 if (!pdev)
2410 continue;
2411
2412 switch (pdev->device) {
2413 case PCI_DEVICE_ID_INTEL_SBRIDGE_SAD0:
2414 pvt->pci_sad0 = pdev;
2415 break;
2416 case PCI_DEVICE_ID_INTEL_SBRIDGE_SAD1:
2417 pvt->pci_sad1 = pdev;
2418 break;
2419 case PCI_DEVICE_ID_INTEL_SBRIDGE_BR:
2420 pvt->pci_br0 = pdev;
2421 break;
2422 case PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_HA0:
2423 pvt->pci_ha = pdev;
2424 break;
2425 case PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TA:
2426 pvt->pci_ta = pdev;
2427 break;
2428 case PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_RAS:
2429 pvt->pci_ras = pdev;
2430 break;
2431 case PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD0:
2432 case PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD1:
2433 case PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD2:
2434 case PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD3:
2435 {
2436 int id = TAD_DEV_TO_CHAN(pdev->device);
2437 pvt->pci_tad[id] = pdev;
2438 saw_chan_mask |= 1 << id;
2439 }
2440 break;
2441 case PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_DDRIO:
2442 pvt->pci_ddrio = pdev;
2443 break;
2444 default:
2445 goto error;
2446 }
2447
2448 edac_dbg(0, "Associated PCI %02x:%02x, bus %d with dev = %p\n",
2449 pdev->vendor, pdev->device,
2450 sbridge_dev->bus,
2451 pdev);
2452 }
2453
2454
2455 if (!pvt->pci_sad0 || !pvt->pci_sad1 || !pvt->pci_ha ||
2456 !pvt->pci_ras || !pvt->pci_ta)
2457 goto enodev;
2458
2459 if (saw_chan_mask != 0x0f)
2460 goto enodev;
2461 return 0;
2462
2463enodev:
2464 sbridge_printk(KERN_ERR, "Some needed devices are missing\n");
2465 return -ENODEV;
2466
2467error:
2468 sbridge_printk(KERN_ERR, "Unexpected device %02x:%02x\n",
2469 PCI_VENDOR_ID_INTEL, pdev->device);
2470 return -EINVAL;
2471}
2472
2473static int ibridge_mci_bind_devs(struct mem_ctl_info *mci,
2474 struct sbridge_dev *sbridge_dev)
2475{
2476 struct sbridge_pvt *pvt = mci->pvt_info;
2477 struct pci_dev *pdev;
2478 u8 saw_chan_mask = 0;
2479 int i;
2480
2481 for (i = 0; i < sbridge_dev->n_devs; i++) {
2482 pdev = sbridge_dev->pdev[i];
2483 if (!pdev)
2484 continue;
2485
2486 switch (pdev->device) {
2487 case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0:
2488 case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1:
2489 pvt->pci_ha = pdev;
2490 break;
2491 case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TA:
2492 case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TA:
2493 pvt->pci_ta = pdev;
2494 case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_RAS:
2495 case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_RAS:
2496 pvt->pci_ras = pdev;
2497 break;
2498 case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD0:
2499 case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD1:
2500 case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD2:
2501 case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD3:
2502 case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD0:
2503 case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD1:
2504 case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD2:
2505 case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD3:
2506 {
2507 int id = TAD_DEV_TO_CHAN(pdev->device);
2508 pvt->pci_tad[id] = pdev;
2509 saw_chan_mask |= 1 << id;
2510 }
2511 break;
2512 case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_2HA_DDRIO0:
2513 pvt->pci_ddrio = pdev;
2514 break;
2515 case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_1HA_DDRIO0:
2516 pvt->pci_ddrio = pdev;
2517 break;
2518 case PCI_DEVICE_ID_INTEL_IBRIDGE_SAD:
2519 pvt->pci_sad0 = pdev;
2520 break;
2521 case PCI_DEVICE_ID_INTEL_IBRIDGE_BR0:
2522 pvt->pci_br0 = pdev;
2523 break;
2524 case PCI_DEVICE_ID_INTEL_IBRIDGE_BR1:
2525 pvt->pci_br1 = pdev;
2526 break;
2527 default:
2528 goto error;
2529 }
2530
2531 edac_dbg(0, "Associated PCI %02x.%02d.%d with dev = %p\n",
2532 sbridge_dev->bus,
2533 PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn),
2534 pdev);
2535 }
2536
2537
2538 if (!pvt->pci_sad0 || !pvt->pci_ha || !pvt->pci_br0 ||
2539 !pvt->pci_br1 || !pvt->pci_ras || !pvt->pci_ta)
2540 goto enodev;
2541
2542 if (saw_chan_mask != 0x0f &&
2543 saw_chan_mask != 0x03)
2544 goto enodev;
2545 return 0;
2546
2547enodev:
2548 sbridge_printk(KERN_ERR, "Some needed devices are missing\n");
2549 return -ENODEV;
2550
2551error:
2552 sbridge_printk(KERN_ERR,
2553 "Unexpected device %02x:%02x\n", PCI_VENDOR_ID_INTEL,
2554 pdev->device);
2555 return -EINVAL;
2556}
2557
2558static int haswell_mci_bind_devs(struct mem_ctl_info *mci,
2559 struct sbridge_dev *sbridge_dev)
2560{
2561 struct sbridge_pvt *pvt = mci->pvt_info;
2562 struct pci_dev *pdev;
2563 u8 saw_chan_mask = 0;
2564 int i;
2565
2566
2567 if (pvt->info.pci_vtd == NULL)
2568
2569 pvt->info.pci_vtd = pci_get_device(PCI_VENDOR_ID_INTEL,
2570 PCI_DEVICE_ID_INTEL_HASWELL_IMC_VTD_MISC,
2571 NULL);
2572
2573 for (i = 0; i < sbridge_dev->n_devs; i++) {
2574 pdev = sbridge_dev->pdev[i];
2575 if (!pdev)
2576 continue;
2577
2578 switch (pdev->device) {
2579 case PCI_DEVICE_ID_INTEL_HASWELL_IMC_CBO_SAD0:
2580 pvt->pci_sad0 = pdev;
2581 break;
2582 case PCI_DEVICE_ID_INTEL_HASWELL_IMC_CBO_SAD1:
2583 pvt->pci_sad1 = pdev;
2584 break;
2585 case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0:
2586 case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1:
2587 pvt->pci_ha = pdev;
2588 break;
2589 case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TA:
2590 case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TA:
2591 pvt->pci_ta = pdev;
2592 break;
2593 case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TM:
2594 case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TM:
2595 pvt->pci_ras = pdev;
2596 break;
2597 case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD0:
2598 case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD1:
2599 case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD2:
2600 case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD3:
2601 case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD0:
2602 case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD1:
2603 case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD2:
2604 case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD3:
2605 {
2606 int id = TAD_DEV_TO_CHAN(pdev->device);
2607 pvt->pci_tad[id] = pdev;
2608 saw_chan_mask |= 1 << id;
2609 }
2610 break;
2611 case PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO0:
2612 case PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO1:
2613 case PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO2:
2614 case PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO3:
2615 if (!pvt->pci_ddrio)
2616 pvt->pci_ddrio = pdev;
2617 break;
2618 default:
2619 break;
2620 }
2621
2622 edac_dbg(0, "Associated PCI %02x.%02d.%d with dev = %p\n",
2623 sbridge_dev->bus,
2624 PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn),
2625 pdev);
2626 }
2627
2628
2629 if (!pvt->pci_sad0 || !pvt->pci_ha || !pvt->pci_sad1 ||
2630 !pvt->pci_ras || !pvt->pci_ta || !pvt->info.pci_vtd)
2631 goto enodev;
2632
2633 if (saw_chan_mask != 0x0f &&
2634 saw_chan_mask != 0x03)
2635 goto enodev;
2636 return 0;
2637
2638enodev:
2639 sbridge_printk(KERN_ERR, "Some needed devices are missing\n");
2640 return -ENODEV;
2641}
2642
2643static int broadwell_mci_bind_devs(struct mem_ctl_info *mci,
2644 struct sbridge_dev *sbridge_dev)
2645{
2646 struct sbridge_pvt *pvt = mci->pvt_info;
2647 struct pci_dev *pdev;
2648 u8 saw_chan_mask = 0;
2649 int i;
2650
2651
2652 if (pvt->info.pci_vtd == NULL)
2653
2654 pvt->info.pci_vtd = pci_get_device(PCI_VENDOR_ID_INTEL,
2655 PCI_DEVICE_ID_INTEL_BROADWELL_IMC_VTD_MISC,
2656 NULL);
2657
2658 for (i = 0; i < sbridge_dev->n_devs; i++) {
2659 pdev = sbridge_dev->pdev[i];
2660 if (!pdev)
2661 continue;
2662
2663 switch (pdev->device) {
2664 case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_CBO_SAD0:
2665 pvt->pci_sad0 = pdev;
2666 break;
2667 case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_CBO_SAD1:
2668 pvt->pci_sad1 = pdev;
2669 break;
2670 case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0:
2671 case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1:
2672 pvt->pci_ha = pdev;
2673 break;
2674 case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TA:
2675 case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TA:
2676 pvt->pci_ta = pdev;
2677 break;
2678 case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TM:
2679 case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TM:
2680 pvt->pci_ras = pdev;
2681 break;
2682 case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD0:
2683 case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD1:
2684 case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD2:
2685 case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD3:
2686 case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TAD0:
2687 case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TAD1:
2688 case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TAD2:
2689 case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TAD3:
2690 {
2691 int id = TAD_DEV_TO_CHAN(pdev->device);
2692 pvt->pci_tad[id] = pdev;
2693 saw_chan_mask |= 1 << id;
2694 }
2695 break;
2696 case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_DDRIO0:
2697 pvt->pci_ddrio = pdev;
2698 break;
2699 default:
2700 break;
2701 }
2702
2703 edac_dbg(0, "Associated PCI %02x.%02d.%d with dev = %p\n",
2704 sbridge_dev->bus,
2705 PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn),
2706 pdev);
2707 }
2708
2709
2710 if (!pvt->pci_sad0 || !pvt->pci_ha || !pvt->pci_sad1 ||
2711 !pvt->pci_ras || !pvt->pci_ta || !pvt->info.pci_vtd)
2712 goto enodev;
2713
2714 if (saw_chan_mask != 0x0f &&
2715 saw_chan_mask != 0x03)
2716 goto enodev;
2717 return 0;
2718
2719enodev:
2720 sbridge_printk(KERN_ERR, "Some needed devices are missing\n");
2721 return -ENODEV;
2722}
2723
2724static int knl_mci_bind_devs(struct mem_ctl_info *mci,
2725 struct sbridge_dev *sbridge_dev)
2726{
2727 struct sbridge_pvt *pvt = mci->pvt_info;
2728 struct pci_dev *pdev;
2729 int dev, func;
2730
2731 int i;
2732 int devidx;
2733
2734 for (i = 0; i < sbridge_dev->n_devs; i++) {
2735 pdev = sbridge_dev->pdev[i];
2736 if (!pdev)
2737 continue;
2738
2739
2740 dev = (pdev->devfn >> 3) & 0x1f;
2741 func = pdev->devfn & 0x7;
2742
2743 switch (pdev->device) {
2744 case PCI_DEVICE_ID_INTEL_KNL_IMC_MC:
2745 if (dev == 8)
2746 pvt->knl.pci_mc0 = pdev;
2747 else if (dev == 9)
2748 pvt->knl.pci_mc1 = pdev;
2749 else {
2750 sbridge_printk(KERN_ERR,
2751 "Memory controller in unexpected place! (dev %d, fn %d)\n",
2752 dev, func);
2753 continue;
2754 }
2755 break;
2756
2757 case PCI_DEVICE_ID_INTEL_KNL_IMC_SAD0:
2758 pvt->pci_sad0 = pdev;
2759 break;
2760
2761 case PCI_DEVICE_ID_INTEL_KNL_IMC_SAD1:
2762 pvt->pci_sad1 = pdev;
2763 break;
2764
2765 case PCI_DEVICE_ID_INTEL_KNL_IMC_CHA:
2766
2767
2768
2769 devidx = ((dev-14)*8)+func;
2770
2771 if (devidx < 0 || devidx >= KNL_MAX_CHAS) {
2772 sbridge_printk(KERN_ERR,
2773 "Caching and Home Agent in unexpected place! (dev %d, fn %d)\n",
2774 dev, func);
2775 continue;
2776 }
2777
2778 WARN_ON(pvt->knl.pci_cha[devidx] != NULL);
2779
2780 pvt->knl.pci_cha[devidx] = pdev;
2781 break;
2782
2783 case PCI_DEVICE_ID_INTEL_KNL_IMC_CHAN:
2784 devidx = -1;
2785
2786
2787
2788
2789
2790
2791 if (dev == 9)
2792 devidx = func-2;
2793 else if (dev == 8)
2794 devidx = 3 + (func-2);
2795
2796 if (devidx < 0 || devidx >= KNL_MAX_CHANNELS) {
2797 sbridge_printk(KERN_ERR,
2798 "DRAM Channel Registers in unexpected place! (dev %d, fn %d)\n",
2799 dev, func);
2800 continue;
2801 }
2802
2803 WARN_ON(pvt->knl.pci_channel[devidx] != NULL);
2804 pvt->knl.pci_channel[devidx] = pdev;
2805 break;
2806
2807 case PCI_DEVICE_ID_INTEL_KNL_IMC_TOLHM:
2808 pvt->knl.pci_mc_info = pdev;
2809 break;
2810
2811 case PCI_DEVICE_ID_INTEL_KNL_IMC_TA:
2812 pvt->pci_ta = pdev;
2813 break;
2814
2815 default:
2816 sbridge_printk(KERN_ERR, "Unexpected device %d\n",
2817 pdev->device);
2818 break;
2819 }
2820 }
2821
2822 if (!pvt->knl.pci_mc0 || !pvt->knl.pci_mc1 ||
2823 !pvt->pci_sad0 || !pvt->pci_sad1 ||
2824 !pvt->pci_ta) {
2825 goto enodev;
2826 }
2827
2828 for (i = 0; i < KNL_MAX_CHANNELS; i++) {
2829 if (!pvt->knl.pci_channel[i]) {
2830 sbridge_printk(KERN_ERR, "Missing channel %d\n", i);
2831 goto enodev;
2832 }
2833 }
2834
2835 for (i = 0; i < KNL_MAX_CHAS; i++) {
2836 if (!pvt->knl.pci_cha[i]) {
2837 sbridge_printk(KERN_ERR, "Missing CHA %d\n", i);
2838 goto enodev;
2839 }
2840 }
2841
2842 return 0;
2843
2844enodev:
2845 sbridge_printk(KERN_ERR, "Some needed devices are missing\n");
2846 return -ENODEV;
2847}
2848
2849
2850
2851
2852
2853
2854
2855
2856
2857
2858
2859static void sbridge_mce_output_error(struct mem_ctl_info *mci,
2860 const struct mce *m)
2861{
2862 struct mem_ctl_info *new_mci;
2863 struct sbridge_pvt *pvt = mci->pvt_info;
2864 enum hw_event_mc_err_type tp_event;
2865 char *type, *optype, msg[256];
2866 bool ripv = GET_BITFIELD(m->mcgstatus, 0, 0);
2867 bool overflow = GET_BITFIELD(m->status, 62, 62);
2868 bool uncorrected_error = GET_BITFIELD(m->status, 61, 61);
2869 bool recoverable;
2870 u32 core_err_cnt = GET_BITFIELD(m->status, 38, 52);
2871 u32 mscod = GET_BITFIELD(m->status, 16, 31);
2872 u32 errcode = GET_BITFIELD(m->status, 0, 15);
2873 u32 channel = GET_BITFIELD(m->status, 0, 3);
2874 u32 optypenum = GET_BITFIELD(m->status, 4, 6);
2875 long channel_mask, first_channel;
2876 u8 rank, socket, ha;
2877 int rc, dimm;
2878 char *area_type = NULL;
2879
2880 if (pvt->info.type != SANDY_BRIDGE)
2881 recoverable = true;
2882 else
2883 recoverable = GET_BITFIELD(m->status, 56, 56);
2884
2885 if (uncorrected_error) {
2886 if (ripv) {
2887 type = "FATAL";
2888 tp_event = HW_EVENT_ERR_FATAL;
2889 } else {
2890 type = "NON_FATAL";
2891 tp_event = HW_EVENT_ERR_UNCORRECTED;
2892 }
2893 } else {
2894 type = "CORRECTED";
2895 tp_event = HW_EVENT_ERR_CORRECTED;
2896 }
2897
2898
2899
2900
2901
2902
2903
2904
2905
2906
2907
2908
2909 if (! ((errcode & 0xef80) == 0x80)) {
2910 optype = "Can't parse: it is not a mem";
2911 } else {
2912 switch (optypenum) {
2913 case 0:
2914 optype = "generic undef request error";
2915 break;
2916 case 1:
2917 optype = "memory read error";
2918 break;
2919 case 2:
2920 optype = "memory write error";
2921 break;
2922 case 3:
2923 optype = "addr/cmd error";
2924 break;
2925 case 4:
2926 optype = "memory scrubbing error";
2927 break;
2928 default:
2929 optype = "reserved";
2930 break;
2931 }
2932 }
2933
2934
2935 if (!GET_BITFIELD(m->status, 58, 58))
2936 return;
2937
2938 if (pvt->info.type == KNIGHTS_LANDING) {
2939 if (channel == 14) {
2940 edac_dbg(0, "%s%s err_code:%04x:%04x EDRAM bank %d\n",
2941 overflow ? " OVERFLOW" : "",
2942 (uncorrected_error && recoverable)
2943 ? " recoverable" : "",
2944 mscod, errcode,
2945 m->bank);
2946 } else {
2947 char A = *("A");
2948
2949
2950
2951
2952
2953
2954
2955 channel = knl_channel_remap(m->bank == 16, channel);
2956 channel_mask = 1 << channel;
2957
2958 snprintf(msg, sizeof(msg),
2959 "%s%s err_code:%04x:%04x channel:%d (DIMM_%c)",
2960 overflow ? " OVERFLOW" : "",
2961 (uncorrected_error && recoverable)
2962 ? " recoverable" : " ",
2963 mscod, errcode, channel, A + channel);
2964 edac_mc_handle_error(tp_event, mci, core_err_cnt,
2965 m->addr >> PAGE_SHIFT, m->addr & ~PAGE_MASK, 0,
2966 channel, 0, -1,
2967 optype, msg);
2968 }
2969 return;
2970 } else {
2971 rc = get_memory_error_data(mci, m->addr, &socket, &ha,
2972 &channel_mask, &rank, &area_type, msg);
2973 }
2974
2975 if (rc < 0)
2976 goto err_parsing;
2977 new_mci = get_mci_for_node_id(socket, ha);
2978 if (!new_mci) {
2979 strcpy(msg, "Error: socket got corrupted!");
2980 goto err_parsing;
2981 }
2982 mci = new_mci;
2983 pvt = mci->pvt_info;
2984
2985 first_channel = find_first_bit(&channel_mask, NUM_CHANNELS);
2986
2987 if (rank < 4)
2988 dimm = 0;
2989 else if (rank < 8)
2990 dimm = 1;
2991 else
2992 dimm = 2;
2993
2994
2995
2996
2997
2998
2999
3000
3001 if (!pvt->is_lockstep && !pvt->is_cur_addr_mirrored && !pvt->is_close_pg)
3002 channel = first_channel;
3003
3004 snprintf(msg, sizeof(msg),
3005 "%s%s area:%s err_code:%04x:%04x socket:%d ha:%d channel_mask:%ld rank:%d",
3006 overflow ? " OVERFLOW" : "",
3007 (uncorrected_error && recoverable) ? " recoverable" : "",
3008 area_type,
3009 mscod, errcode,
3010 socket, ha,
3011 channel_mask,
3012 rank);
3013
3014 edac_dbg(0, "%s\n", msg);
3015
3016
3017
3018 if (channel == CHANNEL_UNSPECIFIED)
3019 channel = -1;
3020
3021
3022 edac_mc_handle_error(tp_event, mci, core_err_cnt,
3023 m->addr >> PAGE_SHIFT, m->addr & ~PAGE_MASK, 0,
3024 channel, dimm, -1,
3025 optype, msg);
3026 return;
3027err_parsing:
3028 edac_mc_handle_error(tp_event, mci, core_err_cnt, 0, 0, 0,
3029 -1, -1, -1,
3030 msg, "");
3031
3032}
3033
3034
3035
3036
3037
3038static int sbridge_mce_check_error(struct notifier_block *nb, unsigned long val,
3039 void *data)
3040{
3041 struct mce *mce = (struct mce *)data;
3042 struct mem_ctl_info *mci;
3043 struct sbridge_pvt *pvt;
3044 char *type;
3045
3046 if (edac_get_report_status() == EDAC_REPORTING_DISABLED)
3047 return NOTIFY_DONE;
3048
3049 mci = get_mci_for_node_id(mce->socketid, IMC0);
3050 if (!mci)
3051 return NOTIFY_DONE;
3052 pvt = mci->pvt_info;
3053
3054
3055
3056
3057
3058
3059
3060 if ((mce->status & 0xefff) >> 7 != 1)
3061 return NOTIFY_DONE;
3062
3063 if (mce->mcgstatus & MCG_STATUS_MCIP)
3064 type = "Exception";
3065 else
3066 type = "Event";
3067
3068 sbridge_mc_printk(mci, KERN_DEBUG, "HANDLING MCE MEMORY ERROR\n");
3069
3070 sbridge_mc_printk(mci, KERN_DEBUG, "CPU %d: Machine Check %s: %Lx "
3071 "Bank %d: %016Lx\n", mce->extcpu, type,
3072 mce->mcgstatus, mce->bank, mce->status);
3073 sbridge_mc_printk(mci, KERN_DEBUG, "TSC %llx ", mce->tsc);
3074 sbridge_mc_printk(mci, KERN_DEBUG, "ADDR %llx ", mce->addr);
3075 sbridge_mc_printk(mci, KERN_DEBUG, "MISC %llx ", mce->misc);
3076
3077 sbridge_mc_printk(mci, KERN_DEBUG, "PROCESSOR %u:%x TIME %llu SOCKET "
3078 "%u APIC %x\n", mce->cpuvendor, mce->cpuid,
3079 mce->time, mce->socketid, mce->apicid);
3080
3081 sbridge_mce_output_error(mci, mce);
3082
3083
3084 return NOTIFY_STOP;
3085}
3086
3087static struct notifier_block sbridge_mce_dec = {
3088 .notifier_call = sbridge_mce_check_error,
3089 .priority = MCE_PRIO_EDAC,
3090};
3091
3092
3093
3094
3095
3096static void sbridge_unregister_mci(struct sbridge_dev *sbridge_dev)
3097{
3098 struct mem_ctl_info *mci = sbridge_dev->mci;
3099 struct sbridge_pvt *pvt;
3100
3101 if (unlikely(!mci || !mci->pvt_info)) {
3102 edac_dbg(0, "MC: dev = %p\n", &sbridge_dev->pdev[0]->dev);
3103
3104 sbridge_printk(KERN_ERR, "Couldn't find mci handler\n");
3105 return;
3106 }
3107
3108 pvt = mci->pvt_info;
3109
3110 edac_dbg(0, "MC: mci = %p, dev = %p\n",
3111 mci, &sbridge_dev->pdev[0]->dev);
3112
3113
3114 edac_mc_del_mc(mci->pdev);
3115
3116 edac_dbg(1, "%s: free mci struct\n", mci->ctl_name);
3117 kfree(mci->ctl_name);
3118 edac_mc_free(mci);
3119 sbridge_dev->mci = NULL;
3120}
3121
3122static int sbridge_register_mci(struct sbridge_dev *sbridge_dev, enum type type)
3123{
3124 struct mem_ctl_info *mci;
3125 struct edac_mc_layer layers[2];
3126 struct sbridge_pvt *pvt;
3127 struct pci_dev *pdev = sbridge_dev->pdev[0];
3128 int rc;
3129
3130
3131 layers[0].type = EDAC_MC_LAYER_CHANNEL;
3132 layers[0].size = type == KNIGHTS_LANDING ?
3133 KNL_MAX_CHANNELS : NUM_CHANNELS;
3134 layers[0].is_virt_csrow = false;
3135 layers[1].type = EDAC_MC_LAYER_SLOT;
3136 layers[1].size = type == KNIGHTS_LANDING ? 1 : MAX_DIMMS;
3137 layers[1].is_virt_csrow = true;
3138 mci = edac_mc_alloc(sbridge_dev->mc, ARRAY_SIZE(layers), layers,
3139 sizeof(*pvt));
3140
3141 if (unlikely(!mci))
3142 return -ENOMEM;
3143
3144 edac_dbg(0, "MC: mci = %p, dev = %p\n",
3145 mci, &pdev->dev);
3146
3147 pvt = mci->pvt_info;
3148 memset(pvt, 0, sizeof(*pvt));
3149
3150
3151 pvt->sbridge_dev = sbridge_dev;
3152 sbridge_dev->mci = mci;
3153
3154 mci->mtype_cap = type == KNIGHTS_LANDING ?
3155 MEM_FLAG_DDR4 : MEM_FLAG_DDR3;
3156 mci->edac_ctl_cap = EDAC_FLAG_NONE;
3157 mci->edac_cap = EDAC_FLAG_NONE;
3158 mci->mod_name = "sb_edac.c";
3159 mci->dev_name = pci_name(pdev);
3160 mci->ctl_page_to_phys = NULL;
3161
3162 pvt->info.type = type;
3163 switch (type) {
3164 case IVY_BRIDGE:
3165 pvt->info.rankcfgr = IB_RANK_CFG_A;
3166 pvt->info.get_tolm = ibridge_get_tolm;
3167 pvt->info.get_tohm = ibridge_get_tohm;
3168 pvt->info.dram_rule = ibridge_dram_rule;
3169 pvt->info.get_memory_type = get_memory_type;
3170 pvt->info.get_node_id = get_node_id;
3171 pvt->info.rir_limit = rir_limit;
3172 pvt->info.sad_limit = sad_limit;
3173 pvt->info.interleave_mode = interleave_mode;
3174 pvt->info.dram_attr = dram_attr;
3175 pvt->info.max_sad = ARRAY_SIZE(ibridge_dram_rule);
3176 pvt->info.interleave_list = ibridge_interleave_list;
3177 pvt->info.max_interleave = ARRAY_SIZE(ibridge_interleave_list);
3178 pvt->info.interleave_pkg = ibridge_interleave_pkg;
3179 pvt->info.get_width = ibridge_get_width;
3180
3181
3182 rc = ibridge_mci_bind_devs(mci, sbridge_dev);
3183 if (unlikely(rc < 0))
3184 goto fail0;
3185 get_source_id(mci);
3186 mci->ctl_name = kasprintf(GFP_KERNEL, "Ivy Bridge SrcID#%d_Ha#%d",
3187 pvt->sbridge_dev->source_id, pvt->sbridge_dev->dom);
3188 break;
3189 case SANDY_BRIDGE:
3190 pvt->info.rankcfgr = SB_RANK_CFG_A;
3191 pvt->info.get_tolm = sbridge_get_tolm;
3192 pvt->info.get_tohm = sbridge_get_tohm;
3193 pvt->info.dram_rule = sbridge_dram_rule;
3194 pvt->info.get_memory_type = get_memory_type;
3195 pvt->info.get_node_id = get_node_id;
3196 pvt->info.rir_limit = rir_limit;
3197 pvt->info.sad_limit = sad_limit;
3198 pvt->info.interleave_mode = interleave_mode;
3199 pvt->info.dram_attr = dram_attr;
3200 pvt->info.max_sad = ARRAY_SIZE(sbridge_dram_rule);
3201 pvt->info.interleave_list = sbridge_interleave_list;
3202 pvt->info.max_interleave = ARRAY_SIZE(sbridge_interleave_list);
3203 pvt->info.interleave_pkg = sbridge_interleave_pkg;
3204 pvt->info.get_width = sbridge_get_width;
3205
3206
3207 rc = sbridge_mci_bind_devs(mci, sbridge_dev);
3208 if (unlikely(rc < 0))
3209 goto fail0;
3210 get_source_id(mci);
3211 mci->ctl_name = kasprintf(GFP_KERNEL, "Sandy Bridge SrcID#%d_Ha#%d",
3212 pvt->sbridge_dev->source_id, pvt->sbridge_dev->dom);
3213 break;
3214 case HASWELL:
3215
3216 pvt->info.get_tolm = haswell_get_tolm;
3217 pvt->info.get_tohm = haswell_get_tohm;
3218 pvt->info.dram_rule = ibridge_dram_rule;
3219 pvt->info.get_memory_type = haswell_get_memory_type;
3220 pvt->info.get_node_id = haswell_get_node_id;
3221 pvt->info.rir_limit = haswell_rir_limit;
3222 pvt->info.sad_limit = sad_limit;
3223 pvt->info.interleave_mode = interleave_mode;
3224 pvt->info.dram_attr = dram_attr;
3225 pvt->info.max_sad = ARRAY_SIZE(ibridge_dram_rule);
3226 pvt->info.interleave_list = ibridge_interleave_list;
3227 pvt->info.max_interleave = ARRAY_SIZE(ibridge_interleave_list);
3228 pvt->info.interleave_pkg = ibridge_interleave_pkg;
3229 pvt->info.get_width = ibridge_get_width;
3230
3231
3232 rc = haswell_mci_bind_devs(mci, sbridge_dev);
3233 if (unlikely(rc < 0))
3234 goto fail0;
3235 get_source_id(mci);
3236 mci->ctl_name = kasprintf(GFP_KERNEL, "Haswell SrcID#%d_Ha#%d",
3237 pvt->sbridge_dev->source_id, pvt->sbridge_dev->dom);
3238 break;
3239 case BROADWELL:
3240
3241 pvt->info.get_tolm = haswell_get_tolm;
3242 pvt->info.get_tohm = haswell_get_tohm;
3243 pvt->info.dram_rule = ibridge_dram_rule;
3244 pvt->info.get_memory_type = haswell_get_memory_type;
3245 pvt->info.get_node_id = haswell_get_node_id;
3246 pvt->info.rir_limit = haswell_rir_limit;
3247 pvt->info.sad_limit = sad_limit;
3248 pvt->info.interleave_mode = interleave_mode;
3249 pvt->info.dram_attr = dram_attr;
3250 pvt->info.max_sad = ARRAY_SIZE(ibridge_dram_rule);
3251 pvt->info.interleave_list = ibridge_interleave_list;
3252 pvt->info.max_interleave = ARRAY_SIZE(ibridge_interleave_list);
3253 pvt->info.interleave_pkg = ibridge_interleave_pkg;
3254 pvt->info.get_width = broadwell_get_width;
3255
3256
3257 rc = broadwell_mci_bind_devs(mci, sbridge_dev);
3258 if (unlikely(rc < 0))
3259 goto fail0;
3260 get_source_id(mci);
3261 mci->ctl_name = kasprintf(GFP_KERNEL, "Broadwell SrcID#%d_Ha#%d",
3262 pvt->sbridge_dev->source_id, pvt->sbridge_dev->dom);
3263 break;
3264 case KNIGHTS_LANDING:
3265
3266 pvt->info.get_tolm = knl_get_tolm;
3267 pvt->info.get_tohm = knl_get_tohm;
3268 pvt->info.dram_rule = knl_dram_rule;
3269 pvt->info.get_memory_type = knl_get_memory_type;
3270 pvt->info.get_node_id = knl_get_node_id;
3271 pvt->info.rir_limit = NULL;
3272 pvt->info.sad_limit = knl_sad_limit;
3273 pvt->info.interleave_mode = knl_interleave_mode;
3274 pvt->info.dram_attr = dram_attr_knl;
3275 pvt->info.max_sad = ARRAY_SIZE(knl_dram_rule);
3276 pvt->info.interleave_list = knl_interleave_list;
3277 pvt->info.max_interleave = ARRAY_SIZE(knl_interleave_list);
3278 pvt->info.interleave_pkg = ibridge_interleave_pkg;
3279 pvt->info.get_width = knl_get_width;
3280
3281 rc = knl_mci_bind_devs(mci, sbridge_dev);
3282 if (unlikely(rc < 0))
3283 goto fail0;
3284 get_source_id(mci);
3285 mci->ctl_name = kasprintf(GFP_KERNEL, "Knights Landing SrcID#%d_Ha#%d",
3286 pvt->sbridge_dev->source_id, pvt->sbridge_dev->dom);
3287 break;
3288 }
3289
3290
3291 rc = get_dimm_config(mci);
3292 if (rc < 0) {
3293 edac_dbg(0, "MC: failed to get_dimm_config()\n");
3294 goto fail;
3295 }
3296 get_memory_layout(mci);
3297
3298
3299 mci->pdev = &pdev->dev;
3300
3301
3302 if (unlikely(edac_mc_add_mc(mci))) {
3303 edac_dbg(0, "MC: failed edac_mc_add_mc()\n");
3304 rc = -EINVAL;
3305 goto fail;
3306 }
3307
3308 return 0;
3309
3310fail:
3311 kfree(mci->ctl_name);
3312fail0:
3313 edac_mc_free(mci);
3314 sbridge_dev->mci = NULL;
3315 return rc;
3316}
3317
3318#define ICPU(model, table) \
3319 { X86_VENDOR_INTEL, 6, model, 0, (unsigned long)&table }
3320
3321static const struct x86_cpu_id sbridge_cpuids[] = {
3322 ICPU(INTEL_FAM6_SANDYBRIDGE_X, pci_dev_descr_sbridge_table),
3323 ICPU(INTEL_FAM6_IVYBRIDGE_X, pci_dev_descr_ibridge_table),
3324 ICPU(INTEL_FAM6_HASWELL_X, pci_dev_descr_haswell_table),
3325 ICPU(INTEL_FAM6_BROADWELL_X, pci_dev_descr_broadwell_table),
3326 ICPU(INTEL_FAM6_BROADWELL_XEON_D, pci_dev_descr_broadwell_table),
3327 ICPU(INTEL_FAM6_XEON_PHI_KNL, pci_dev_descr_knl_table),
3328 ICPU(INTEL_FAM6_XEON_PHI_KNM, pci_dev_descr_knl_table),
3329 { }
3330};
3331MODULE_DEVICE_TABLE(x86cpu, sbridge_cpuids);
3332
3333
3334
3335
3336
3337
3338
3339
3340
3341static int sbridge_probe(const struct x86_cpu_id *id)
3342{
3343 int rc = -ENODEV;
3344 u8 mc, num_mc = 0;
3345 struct sbridge_dev *sbridge_dev;
3346 struct pci_id_table *ptable = (struct pci_id_table *)id->driver_data;
3347
3348
3349 rc = sbridge_get_all_devices(&num_mc, ptable);
3350
3351 if (unlikely(rc < 0)) {
3352 edac_dbg(0, "couldn't get all devices\n");
3353 goto fail0;
3354 }
3355
3356 mc = 0;
3357
3358 list_for_each_entry(sbridge_dev, &sbridge_edac_list, list) {
3359 edac_dbg(0, "Registering MC#%d (%d of %d)\n",
3360 mc, mc + 1, num_mc);
3361
3362 sbridge_dev->mc = mc++;
3363 rc = sbridge_register_mci(sbridge_dev, ptable->type);
3364 if (unlikely(rc < 0))
3365 goto fail1;
3366 }
3367
3368 sbridge_printk(KERN_INFO, "%s\n", SBRIDGE_REVISION);
3369
3370 return 0;
3371
3372fail1:
3373 list_for_each_entry(sbridge_dev, &sbridge_edac_list, list)
3374 sbridge_unregister_mci(sbridge_dev);
3375
3376 sbridge_put_all_devices();
3377fail0:
3378 return rc;
3379}
3380
3381
3382
3383
3384
3385static void sbridge_remove(void)
3386{
3387 struct sbridge_dev *sbridge_dev;
3388
3389 edac_dbg(0, "\n");
3390
3391 list_for_each_entry(sbridge_dev, &sbridge_edac_list, list)
3392 sbridge_unregister_mci(sbridge_dev);
3393
3394
3395 sbridge_put_all_devices();
3396}
3397
3398
3399
3400
3401
3402static int __init sbridge_init(void)
3403{
3404 const struct x86_cpu_id *id;
3405 int rc;
3406
3407 edac_dbg(2, "\n");
3408
3409 id = x86_match_cpu(sbridge_cpuids);
3410 if (!id)
3411 return -ENODEV;
3412
3413
3414 opstate_init();
3415
3416 rc = sbridge_probe(id);
3417
3418 if (rc >= 0) {
3419 mce_register_decode_chain(&sbridge_mce_dec);
3420 if (edac_get_report_status() == EDAC_REPORTING_DISABLED)
3421 sbridge_printk(KERN_WARNING, "Loading driver, error reporting disabled.\n");
3422 return 0;
3423 }
3424
3425 sbridge_printk(KERN_ERR, "Failed to register device with error %d.\n",
3426 rc);
3427
3428 return rc;
3429}
3430
3431
3432
3433
3434
3435static void __exit sbridge_exit(void)
3436{
3437 edac_dbg(2, "\n");
3438 sbridge_remove();
3439 mce_unregister_decode_chain(&sbridge_mce_dec);
3440}
3441
3442module_init(sbridge_init);
3443module_exit(sbridge_exit);
3444
3445module_param(edac_op_state, int, 0444);
3446MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");
3447
3448MODULE_LICENSE("GPL");
3449MODULE_AUTHOR("Mauro Carvalho Chehab");
3450MODULE_AUTHOR("Red Hat Inc. (http://www.redhat.com)");
3451MODULE_DESCRIPTION("MC Driver for Intel Sandy Bridge and Ivy Bridge memory controllers - "
3452 SBRIDGE_REVISION);
3453