1
2
3
4
5
6
7
8
9
10
11
12
13#include <linux/module.h>
14#include <linux/init.h>
15#include <linux/pci.h>
16#include <linux/pci_ids.h>
17#include <linux/slab.h>
18#include <linux/delay.h>
19#include <linux/edac.h>
20#include <linux/mmzone.h>
21#include <linux/smp.h>
22#include <linux/bitmap.h>
23#include <linux/math64.h>
24#include <linux/mod_devicetable.h>
25#include <asm/cpu_device_id.h>
26#include <asm/processor.h>
27#include <asm/mce.h>
28
29#include "edac_core.h"
30
31
32static LIST_HEAD(sbridge_edac_list);
33
34
35
36
37#define SBRIDGE_REVISION " Ver: 1.1.1 "
38#define EDAC_MOD_STR "sbridge_edac"
39
40
41
42
43#define sbridge_printk(level, fmt, arg...) \
44 edac_printk(level, "sbridge", fmt, ##arg)
45
46#define sbridge_mc_printk(mci, level, fmt, arg...) \
47 edac_mc_chipset_printk(mci, level, "sbridge", fmt, ##arg)
48
49
50
51
52#define GET_BITFIELD(v, lo, hi) \
53 (((v) & GENMASK_ULL(hi, lo)) >> (lo))
54
55
56static const u32 sbridge_dram_rule[] = {
57 0x80, 0x88, 0x90, 0x98, 0xa0,
58 0xa8, 0xb0, 0xb8, 0xc0, 0xc8,
59};
60
61static const u32 ibridge_dram_rule[] = {
62 0x60, 0x68, 0x70, 0x78, 0x80,
63 0x88, 0x90, 0x98, 0xa0, 0xa8,
64 0xb0, 0xb8, 0xc0, 0xc8, 0xd0,
65 0xd8, 0xe0, 0xe8, 0xf0, 0xf8,
66};
67
68static const u32 knl_dram_rule[] = {
69 0x60, 0x68, 0x70, 0x78, 0x80,
70 0x88, 0x90, 0x98, 0xa0, 0xa8,
71 0xb0, 0xb8, 0xc0, 0xc8, 0xd0,
72 0xd8, 0xe0, 0xe8, 0xf0, 0xf8,
73 0x100, 0x108, 0x110, 0x118,
74};
75
76#define DRAM_RULE_ENABLE(reg) GET_BITFIELD(reg, 0, 0)
77#define A7MODE(reg) GET_BITFIELD(reg, 26, 26)
78
79static char *show_dram_attr(u32 attr)
80{
81 switch (attr) {
82 case 0:
83 return "DRAM";
84 case 1:
85 return "MMCFG";
86 case 2:
87 return "NXM";
88 default:
89 return "unknown";
90 }
91}
92
93static const u32 sbridge_interleave_list[] = {
94 0x84, 0x8c, 0x94, 0x9c, 0xa4,
95 0xac, 0xb4, 0xbc, 0xc4, 0xcc,
96};
97
98static const u32 ibridge_interleave_list[] = {
99 0x64, 0x6c, 0x74, 0x7c, 0x84,
100 0x8c, 0x94, 0x9c, 0xa4, 0xac,
101 0xb4, 0xbc, 0xc4, 0xcc, 0xd4,
102 0xdc, 0xe4, 0xec, 0xf4, 0xfc,
103};
104
105static const u32 knl_interleave_list[] = {
106 0x64, 0x6c, 0x74, 0x7c, 0x84,
107 0x8c, 0x94, 0x9c, 0xa4, 0xac,
108 0xb4, 0xbc, 0xc4, 0xcc, 0xd4,
109 0xdc, 0xe4, 0xec, 0xf4, 0xfc,
110 0x104, 0x10c, 0x114, 0x11c,
111};
112
113struct interleave_pkg {
114 unsigned char start;
115 unsigned char end;
116};
117
118static const struct interleave_pkg sbridge_interleave_pkg[] = {
119 { 0, 2 },
120 { 3, 5 },
121 { 8, 10 },
122 { 11, 13 },
123 { 16, 18 },
124 { 19, 21 },
125 { 24, 26 },
126 { 27, 29 },
127};
128
129static const struct interleave_pkg ibridge_interleave_pkg[] = {
130 { 0, 3 },
131 { 4, 7 },
132 { 8, 11 },
133 { 12, 15 },
134 { 16, 19 },
135 { 20, 23 },
136 { 24, 27 },
137 { 28, 31 },
138};
139
140static inline int sad_pkg(const struct interleave_pkg *table, u32 reg,
141 int interleave)
142{
143 return GET_BITFIELD(reg, table[interleave].start,
144 table[interleave].end);
145}
146
147
148
149#define TOLM 0x80
150#define TOHM 0x84
151#define HASWELL_TOLM 0xd0
152#define HASWELL_TOHM_0 0xd4
153#define HASWELL_TOHM_1 0xd8
154#define KNL_TOLM 0xd0
155#define KNL_TOHM_0 0xd4
156#define KNL_TOHM_1 0xd8
157
158#define GET_TOLM(reg) ((GET_BITFIELD(reg, 0, 3) << 28) | 0x3ffffff)
159#define GET_TOHM(reg) ((GET_BITFIELD(reg, 0, 20) << 25) | 0x3ffffff)
160
161
162
163#define SAD_TARGET 0xf0
164
165#define SOURCE_ID(reg) GET_BITFIELD(reg, 9, 11)
166
167#define SOURCE_ID_KNL(reg) GET_BITFIELD(reg, 12, 14)
168
169#define SAD_CONTROL 0xf4
170
171
172
173static const u32 tad_dram_rule[] = {
174 0x40, 0x44, 0x48, 0x4c,
175 0x50, 0x54, 0x58, 0x5c,
176 0x60, 0x64, 0x68, 0x6c,
177};
178#define MAX_TAD ARRAY_SIZE(tad_dram_rule)
179
180#define TAD_LIMIT(reg) ((GET_BITFIELD(reg, 12, 31) << 26) | 0x3ffffff)
181#define TAD_SOCK(reg) GET_BITFIELD(reg, 10, 11)
182#define TAD_CH(reg) GET_BITFIELD(reg, 8, 9)
183#define TAD_TGT3(reg) GET_BITFIELD(reg, 6, 7)
184#define TAD_TGT2(reg) GET_BITFIELD(reg, 4, 5)
185#define TAD_TGT1(reg) GET_BITFIELD(reg, 2, 3)
186#define TAD_TGT0(reg) GET_BITFIELD(reg, 0, 1)
187
188
189
190#define MCMTR 0x7c
191#define KNL_MCMTR 0x624
192
193#define IS_ECC_ENABLED(mcmtr) GET_BITFIELD(mcmtr, 2, 2)
194#define IS_LOCKSTEP_ENABLED(mcmtr) GET_BITFIELD(mcmtr, 1, 1)
195#define IS_CLOSE_PG(mcmtr) GET_BITFIELD(mcmtr, 0, 0)
196
197
198
199#define RASENABLES 0xac
200#define IS_MIRROR_ENABLED(reg) GET_BITFIELD(reg, 0, 0)
201
202
203
204static const int mtr_regs[] = {
205 0x80, 0x84, 0x88,
206};
207
208static const int knl_mtr_reg = 0xb60;
209
210#define RANK_DISABLE(mtr) GET_BITFIELD(mtr, 16, 19)
211#define IS_DIMM_PRESENT(mtr) GET_BITFIELD(mtr, 14, 14)
212#define RANK_CNT_BITS(mtr) GET_BITFIELD(mtr, 12, 13)
213#define RANK_WIDTH_BITS(mtr) GET_BITFIELD(mtr, 2, 4)
214#define COL_WIDTH_BITS(mtr) GET_BITFIELD(mtr, 0, 1)
215
216static const u32 tad_ch_nilv_offset[] = {
217 0x90, 0x94, 0x98, 0x9c,
218 0xa0, 0xa4, 0xa8, 0xac,
219 0xb0, 0xb4, 0xb8, 0xbc,
220};
221#define CHN_IDX_OFFSET(reg) GET_BITFIELD(reg, 28, 29)
222#define TAD_OFFSET(reg) (GET_BITFIELD(reg, 6, 25) << 26)
223
224static const u32 rir_way_limit[] = {
225 0x108, 0x10c, 0x110, 0x114, 0x118,
226};
227#define MAX_RIR_RANGES ARRAY_SIZE(rir_way_limit)
228
229#define IS_RIR_VALID(reg) GET_BITFIELD(reg, 31, 31)
230#define RIR_WAY(reg) GET_BITFIELD(reg, 28, 29)
231
232#define MAX_RIR_WAY 8
233
234static const u32 rir_offset[MAX_RIR_RANGES][MAX_RIR_WAY] = {
235 { 0x120, 0x124, 0x128, 0x12c, 0x130, 0x134, 0x138, 0x13c },
236 { 0x140, 0x144, 0x148, 0x14c, 0x150, 0x154, 0x158, 0x15c },
237 { 0x160, 0x164, 0x168, 0x16c, 0x170, 0x174, 0x178, 0x17c },
238 { 0x180, 0x184, 0x188, 0x18c, 0x190, 0x194, 0x198, 0x19c },
239 { 0x1a0, 0x1a4, 0x1a8, 0x1ac, 0x1b0, 0x1b4, 0x1b8, 0x1bc },
240};
241
242#define RIR_RNK_TGT(type, reg) (((type) == BROADWELL) ? \
243 GET_BITFIELD(reg, 20, 23) : GET_BITFIELD(reg, 16, 19))
244
245#define RIR_OFFSET(type, reg) (((type) == HASWELL || (type) == BROADWELL) ? \
246 GET_BITFIELD(reg, 2, 15) : GET_BITFIELD(reg, 2, 14))
247
248
249
250
251
252
253
254static const u32 correrrcnt[] = {
255 0x104, 0x108, 0x10c, 0x110,
256};
257
258#define RANK_ODD_OV(reg) GET_BITFIELD(reg, 31, 31)
259#define RANK_ODD_ERR_CNT(reg) GET_BITFIELD(reg, 16, 30)
260#define RANK_EVEN_OV(reg) GET_BITFIELD(reg, 15, 15)
261#define RANK_EVEN_ERR_CNT(reg) GET_BITFIELD(reg, 0, 14)
262
263static const u32 correrrthrsld[] = {
264 0x11c, 0x120, 0x124, 0x128,
265};
266
267#define RANK_ODD_ERR_THRSLD(reg) GET_BITFIELD(reg, 16, 30)
268#define RANK_EVEN_ERR_THRSLD(reg) GET_BITFIELD(reg, 0, 14)
269
270
271
272
273#define SB_RANK_CFG_A 0x0328
274
275#define IB_RANK_CFG_A 0x0320
276
277
278
279
280
281#define NUM_CHANNELS 8
282#define MAX_DIMMS 3
283#define KNL_MAX_CHAS 38
284#define KNL_MAX_CHANNELS 6
285#define KNL_MAX_EDCS 8
286#define CHANNEL_UNSPECIFIED 0xf
287
288enum type {
289 SANDY_BRIDGE,
290 IVY_BRIDGE,
291 HASWELL,
292 BROADWELL,
293 KNIGHTS_LANDING,
294};
295
296struct sbridge_pvt;
297struct sbridge_info {
298 enum type type;
299 u32 mcmtr;
300 u32 rankcfgr;
301 u64 (*get_tolm)(struct sbridge_pvt *pvt);
302 u64 (*get_tohm)(struct sbridge_pvt *pvt);
303 u64 (*rir_limit)(u32 reg);
304 u64 (*sad_limit)(u32 reg);
305 u32 (*interleave_mode)(u32 reg);
306 char* (*show_interleave_mode)(u32 reg);
307 u32 (*dram_attr)(u32 reg);
308 const u32 *dram_rule;
309 const u32 *interleave_list;
310 const struct interleave_pkg *interleave_pkg;
311 u8 max_sad;
312 u8 max_interleave;
313 u8 (*get_node_id)(struct sbridge_pvt *pvt);
314 enum mem_type (*get_memory_type)(struct sbridge_pvt *pvt);
315 enum dev_type (*get_width)(struct sbridge_pvt *pvt, u32 mtr);
316 struct pci_dev *pci_vtd;
317};
318
319struct sbridge_channel {
320 u32 ranks;
321 u32 dimms;
322};
323
324struct pci_id_descr {
325 int dev_id;
326 int optional;
327};
328
329struct pci_id_table {
330 const struct pci_id_descr *descr;
331 int n_devs;
332 enum type type;
333};
334
335struct sbridge_dev {
336 struct list_head list;
337 u8 bus, mc;
338 u8 node_id, source_id;
339 struct pci_dev **pdev;
340 int n_devs;
341 struct mem_ctl_info *mci;
342};
343
344struct knl_pvt {
345 struct pci_dev *pci_cha[KNL_MAX_CHAS];
346 struct pci_dev *pci_channel[KNL_MAX_CHANNELS];
347 struct pci_dev *pci_mc0;
348 struct pci_dev *pci_mc1;
349 struct pci_dev *pci_mc0_misc;
350 struct pci_dev *pci_mc1_misc;
351 struct pci_dev *pci_mc_info;
352};
353
354struct sbridge_pvt {
355 struct pci_dev *pci_ta, *pci_ddrio, *pci_ras;
356 struct pci_dev *pci_sad0, *pci_sad1;
357 struct pci_dev *pci_ha0, *pci_ha1;
358 struct pci_dev *pci_br0, *pci_br1;
359 struct pci_dev *pci_ha1_ta;
360 struct pci_dev *pci_tad[NUM_CHANNELS];
361
362 struct sbridge_dev *sbridge_dev;
363
364 struct sbridge_info info;
365 struct sbridge_channel channel[NUM_CHANNELS];
366
367
368 bool is_mirrored, is_lockstep, is_close_pg;
369 bool is_chan_hash;
370
371
372 u64 tolm, tohm;
373 struct knl_pvt knl;
374};
375
376#define PCI_DESCR(device_id, opt) \
377 .dev_id = (device_id), \
378 .optional = opt
379
380static const struct pci_id_descr pci_dev_descr_sbridge[] = {
381
382 { PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_HA0, 0) },
383
384
385 { PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TA, 0) },
386 { PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_RAS, 0) },
387 { PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD0, 0) },
388 { PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD1, 0) },
389 { PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD2, 0) },
390 { PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD3, 0) },
391 { PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_DDRIO, 1) },
392
393
394 { PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_SAD0, 0) },
395 { PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_SAD1, 0) },
396
397
398 { PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_BR, 0) },
399};
400
401#define PCI_ID_TABLE_ENTRY(A, T) { \
402 .descr = A, \
403 .n_devs = ARRAY_SIZE(A), \
404 .type = T \
405}
406
407static const struct pci_id_table pci_dev_descr_sbridge_table[] = {
408 PCI_ID_TABLE_ENTRY(pci_dev_descr_sbridge, SANDY_BRIDGE),
409 {0,}
410};
411
412
413
414
415
416
417
418#define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_1HA_DDRIO0 0x0eb8
419#define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_2HA_DDRIO0 0x0ebc
420
421
422#define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0 0x0ea0
423#define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TA 0x0ea8
424#define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_RAS 0x0e71
425#define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD0 0x0eaa
426#define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD1 0x0eab
427#define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD2 0x0eac
428#define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD3 0x0ead
429#define PCI_DEVICE_ID_INTEL_IBRIDGE_SAD 0x0ec8
430#define PCI_DEVICE_ID_INTEL_IBRIDGE_BR0 0x0ec9
431#define PCI_DEVICE_ID_INTEL_IBRIDGE_BR1 0x0eca
432#define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1 0x0e60
433#define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TA 0x0e68
434#define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_RAS 0x0e79
435#define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD0 0x0e6a
436#define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD1 0x0e6b
437#define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD2 0x0e6c
438#define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD3 0x0e6d
439
440static const struct pci_id_descr pci_dev_descr_ibridge[] = {
441
442 { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0, 0) },
443
444
445 { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TA, 0) },
446 { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_RAS, 0) },
447 { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD0, 0) },
448 { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD1, 0) },
449 { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD2, 0) },
450 { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD3, 0) },
451
452
453 { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_SAD, 0) },
454
455
456 { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_BR0, 1) },
457 { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_BR1, 0) },
458
459
460 { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1, 1) },
461#if 0
462 { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TA, 1) },
463 { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_RAS, 1) },
464#endif
465 { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD0, 1) },
466 { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD1, 1) },
467 { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD2, 1) },
468 { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD3, 1) },
469
470 { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_1HA_DDRIO0, 1) },
471 { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_2HA_DDRIO0, 1) },
472};
473
474static const struct pci_id_table pci_dev_descr_ibridge_table[] = {
475 PCI_ID_TABLE_ENTRY(pci_dev_descr_ibridge, IVY_BRIDGE),
476 {0,}
477};
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495#define HASWELL_DDRCRCLKCONTROLS 0xa10
496#define HASWELL_HASYSDEFEATURE2 0x84
497#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_VTD_MISC 0x2f28
498#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0 0x2fa0
499#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1 0x2f60
500#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TA 0x2fa8
501#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_THERMAL 0x2f71
502#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TA 0x2f68
503#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_THERMAL 0x2f79
504#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_CBO_SAD0 0x2ffc
505#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_CBO_SAD1 0x2ffd
506#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD0 0x2faa
507#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD1 0x2fab
508#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD2 0x2fac
509#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD3 0x2fad
510#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD0 0x2f6a
511#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD1 0x2f6b
512#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD2 0x2f6c
513#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD3 0x2f6d
514#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO0 0x2fbd
515#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO1 0x2fbf
516#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO2 0x2fb9
517#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO3 0x2fbb
518static const struct pci_id_descr pci_dev_descr_haswell[] = {
519
520 { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0, 0) },
521
522 { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_CBO_SAD0, 0) },
523 { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_CBO_SAD1, 0) },
524
525 { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1, 1) },
526
527 { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TA, 0) },
528 { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_THERMAL, 0) },
529 { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD0, 0) },
530 { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD1, 0) },
531 { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD2, 1) },
532 { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD3, 1) },
533
534 { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO0, 1) },
535 { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO1, 1) },
536 { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO2, 1) },
537 { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO3, 1) },
538
539 { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TA, 1) },
540 { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_THERMAL, 1) },
541 { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD0, 1) },
542 { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD1, 1) },
543 { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD2, 1) },
544 { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD3, 1) },
545};
546
547static const struct pci_id_table pci_dev_descr_haswell_table[] = {
548 PCI_ID_TABLE_ENTRY(pci_dev_descr_haswell, HASWELL),
549 {0,}
550};
551
552
553
554
555
556
557#define knl_channel_remap(mc, chan) ((mc) ? (chan) : (chan) + 3)
558
559
560#define PCI_DEVICE_ID_INTEL_KNL_IMC_MC 0x7840
561
562#define PCI_DEVICE_ID_INTEL_KNL_IMC_CHANNEL 0x7843
563
564#define PCI_DEVICE_ID_INTEL_KNL_IMC_TA 0x7844
565
566#define PCI_DEVICE_ID_INTEL_KNL_IMC_SAD0 0x782a
567
568#define PCI_DEVICE_ID_INTEL_KNL_IMC_SAD1 0x782b
569
570#define PCI_DEVICE_ID_INTEL_KNL_IMC_CHA 0x782c
571
572#define PCI_DEVICE_ID_INTEL_KNL_IMC_TOLHM 0x7810
573
574
575
576
577
578
579
580
581static const struct pci_id_descr pci_dev_descr_knl[] = {
582 [0] = { PCI_DESCR(PCI_DEVICE_ID_INTEL_KNL_IMC_SAD0, 0) },
583 [1] = { PCI_DESCR(PCI_DEVICE_ID_INTEL_KNL_IMC_SAD1, 0) },
584 [2 ... 3] = { PCI_DESCR(PCI_DEVICE_ID_INTEL_KNL_IMC_MC, 0)},
585 [4 ... 41] = { PCI_DESCR(PCI_DEVICE_ID_INTEL_KNL_IMC_CHA, 0) },
586 [42 ... 47] = { PCI_DESCR(PCI_DEVICE_ID_INTEL_KNL_IMC_CHANNEL, 0) },
587 [48] = { PCI_DESCR(PCI_DEVICE_ID_INTEL_KNL_IMC_TA, 0) },
588 [49] = { PCI_DESCR(PCI_DEVICE_ID_INTEL_KNL_IMC_TOLHM, 0) },
589};
590
591static const struct pci_id_table pci_dev_descr_knl_table[] = {
592 PCI_ID_TABLE_ENTRY(pci_dev_descr_knl, KNIGHTS_LANDING),
593 {0,}
594};
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614#define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_VTD_MISC 0x6f28
615#define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0 0x6fa0
616#define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1 0x6f60
617#define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TA 0x6fa8
618#define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_THERMAL 0x6f71
619#define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TA 0x6f68
620#define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_THERMAL 0x6f79
621#define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_CBO_SAD0 0x6ffc
622#define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_CBO_SAD1 0x6ffd
623#define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD0 0x6faa
624#define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD1 0x6fab
625#define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD2 0x6fac
626#define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD3 0x6fad
627#define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TAD0 0x6f6a
628#define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TAD1 0x6f6b
629#define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TAD2 0x6f6c
630#define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TAD3 0x6f6d
631#define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_DDRIO0 0x6faf
632
633static const struct pci_id_descr pci_dev_descr_broadwell[] = {
634
635 { PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0, 0) },
636
637 { PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_CBO_SAD0, 0) },
638 { PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_CBO_SAD1, 0) },
639
640 { PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1, 1) },
641
642 { PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TA, 0) },
643 { PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_THERMAL, 0) },
644 { PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD0, 0) },
645 { PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD1, 0) },
646 { PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD2, 1) },
647 { PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD3, 1) },
648
649 { PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_DDRIO0, 1) },
650
651 { PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TA, 1) },
652 { PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_THERMAL, 1) },
653 { PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TAD0, 1) },
654 { PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TAD1, 1) },
655 { PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TAD2, 1) },
656 { PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TAD3, 1) },
657};
658
659static const struct pci_id_table pci_dev_descr_broadwell_table[] = {
660 PCI_ID_TABLE_ENTRY(pci_dev_descr_broadwell, BROADWELL),
661 {0,}
662};
663
664
665
666
667
668
669static inline int numrank(enum type type, u32 mtr)
670{
671 int ranks = (1 << RANK_CNT_BITS(mtr));
672 int max = 4;
673
674 if (type == HASWELL || type == BROADWELL || type == KNIGHTS_LANDING)
675 max = 8;
676
677 if (ranks > max) {
678 edac_dbg(0, "Invalid number of ranks: %d (max = %i) raw value = %x (%04x)\n",
679 ranks, max, (unsigned int)RANK_CNT_BITS(mtr), mtr);
680 return -EINVAL;
681 }
682
683 return ranks;
684}
685
686static inline int numrow(u32 mtr)
687{
688 int rows = (RANK_WIDTH_BITS(mtr) + 12);
689
690 if (rows < 13 || rows > 18) {
691 edac_dbg(0, "Invalid number of rows: %d (should be between 14 and 17) raw value = %x (%04x)\n",
692 rows, (unsigned int)RANK_WIDTH_BITS(mtr), mtr);
693 return -EINVAL;
694 }
695
696 return 1 << rows;
697}
698
699static inline int numcol(u32 mtr)
700{
701 int cols = (COL_WIDTH_BITS(mtr) + 10);
702
703 if (cols > 12) {
704 edac_dbg(0, "Invalid number of cols: %d (max = 4) raw value = %x (%04x)\n",
705 cols, (unsigned int)COL_WIDTH_BITS(mtr), mtr);
706 return -EINVAL;
707 }
708
709 return 1 << cols;
710}
711
712static struct sbridge_dev *get_sbridge_dev(u8 bus, int multi_bus)
713{
714 struct sbridge_dev *sbridge_dev;
715
716
717
718
719
720 if (multi_bus) {
721 return list_first_entry_or_null(&sbridge_edac_list,
722 struct sbridge_dev, list);
723 }
724
725 list_for_each_entry(sbridge_dev, &sbridge_edac_list, list) {
726 if (sbridge_dev->bus == bus)
727 return sbridge_dev;
728 }
729
730 return NULL;
731}
732
733static struct sbridge_dev *alloc_sbridge_dev(u8 bus,
734 const struct pci_id_table *table)
735{
736 struct sbridge_dev *sbridge_dev;
737
738 sbridge_dev = kzalloc(sizeof(*sbridge_dev), GFP_KERNEL);
739 if (!sbridge_dev)
740 return NULL;
741
742 sbridge_dev->pdev = kzalloc(sizeof(*sbridge_dev->pdev) * table->n_devs,
743 GFP_KERNEL);
744 if (!sbridge_dev->pdev) {
745 kfree(sbridge_dev);
746 return NULL;
747 }
748
749 sbridge_dev->bus = bus;
750 sbridge_dev->n_devs = table->n_devs;
751 list_add_tail(&sbridge_dev->list, &sbridge_edac_list);
752
753 return sbridge_dev;
754}
755
756static void free_sbridge_dev(struct sbridge_dev *sbridge_dev)
757{
758 list_del(&sbridge_dev->list);
759 kfree(sbridge_dev->pdev);
760 kfree(sbridge_dev);
761}
762
763static u64 sbridge_get_tolm(struct sbridge_pvt *pvt)
764{
765 u32 reg;
766
767
768 pci_read_config_dword(pvt->pci_sad1, TOLM, ®);
769 return GET_TOLM(reg);
770}
771
772static u64 sbridge_get_tohm(struct sbridge_pvt *pvt)
773{
774 u32 reg;
775
776 pci_read_config_dword(pvt->pci_sad1, TOHM, ®);
777 return GET_TOHM(reg);
778}
779
780static u64 ibridge_get_tolm(struct sbridge_pvt *pvt)
781{
782 u32 reg;
783
784 pci_read_config_dword(pvt->pci_br1, TOLM, ®);
785
786 return GET_TOLM(reg);
787}
788
789static u64 ibridge_get_tohm(struct sbridge_pvt *pvt)
790{
791 u32 reg;
792
793 pci_read_config_dword(pvt->pci_br1, TOHM, ®);
794
795 return GET_TOHM(reg);
796}
797
798static u64 rir_limit(u32 reg)
799{
800 return ((u64)GET_BITFIELD(reg, 1, 10) << 29) | 0x1fffffff;
801}
802
803static u64 sad_limit(u32 reg)
804{
805 return (GET_BITFIELD(reg, 6, 25) << 26) | 0x3ffffff;
806}
807
808static u32 interleave_mode(u32 reg)
809{
810 return GET_BITFIELD(reg, 1, 1);
811}
812
813char *show_interleave_mode(u32 reg)
814{
815 return interleave_mode(reg) ? "8:6" : "[8:6]XOR[18:16]";
816}
817
818static u32 dram_attr(u32 reg)
819{
820 return GET_BITFIELD(reg, 2, 3);
821}
822
823static u64 knl_sad_limit(u32 reg)
824{
825 return (GET_BITFIELD(reg, 7, 26) << 26) | 0x3ffffff;
826}
827
828static u32 knl_interleave_mode(u32 reg)
829{
830 return GET_BITFIELD(reg, 1, 2);
831}
832
833static char *knl_show_interleave_mode(u32 reg)
834{
835 char *s;
836
837 switch (knl_interleave_mode(reg)) {
838 case 0:
839 s = "use address bits [8:6]";
840 break;
841 case 1:
842 s = "use address bits [10:8]";
843 break;
844 case 2:
845 s = "use address bits [14:12]";
846 break;
847 case 3:
848 s = "use address bits [32:30]";
849 break;
850 default:
851 WARN_ON(1);
852 break;
853 }
854
855 return s;
856}
857
858static u32 dram_attr_knl(u32 reg)
859{
860 return GET_BITFIELD(reg, 3, 4);
861}
862
863
864static enum mem_type get_memory_type(struct sbridge_pvt *pvt)
865{
866 u32 reg;
867 enum mem_type mtype;
868
869 if (pvt->pci_ddrio) {
870 pci_read_config_dword(pvt->pci_ddrio, pvt->info.rankcfgr,
871 ®);
872 if (GET_BITFIELD(reg, 11, 11))
873
874 mtype = MEM_RDDR3;
875 else
876 mtype = MEM_DDR3;
877 } else
878 mtype = MEM_UNKNOWN;
879
880 return mtype;
881}
882
883static enum mem_type haswell_get_memory_type(struct sbridge_pvt *pvt)
884{
885 u32 reg;
886 bool registered = false;
887 enum mem_type mtype = MEM_UNKNOWN;
888
889 if (!pvt->pci_ddrio)
890 goto out;
891
892 pci_read_config_dword(pvt->pci_ddrio,
893 HASWELL_DDRCRCLKCONTROLS, ®);
894
895 if (GET_BITFIELD(reg, 16, 16))
896 registered = true;
897
898 pci_read_config_dword(pvt->pci_ta, MCMTR, ®);
899 if (GET_BITFIELD(reg, 14, 14)) {
900 if (registered)
901 mtype = MEM_RDDR4;
902 else
903 mtype = MEM_DDR4;
904 } else {
905 if (registered)
906 mtype = MEM_RDDR3;
907 else
908 mtype = MEM_DDR3;
909 }
910
911out:
912 return mtype;
913}
914
915static enum dev_type knl_get_width(struct sbridge_pvt *pvt, u32 mtr)
916{
917
918 return DEV_X16;
919}
920
921static enum dev_type sbridge_get_width(struct sbridge_pvt *pvt, u32 mtr)
922{
923
924 return DEV_UNKNOWN;
925}
926
927static enum dev_type __ibridge_get_width(u32 mtr)
928{
929 enum dev_type type;
930
931 switch (mtr) {
932 case 3:
933 type = DEV_UNKNOWN;
934 break;
935 case 2:
936 type = DEV_X16;
937 break;
938 case 1:
939 type = DEV_X8;
940 break;
941 case 0:
942 type = DEV_X4;
943 break;
944 }
945
946 return type;
947}
948
949static enum dev_type ibridge_get_width(struct sbridge_pvt *pvt, u32 mtr)
950{
951
952
953
954
955 return __ibridge_get_width(GET_BITFIELD(mtr, 7, 8));
956}
957
958static enum dev_type broadwell_get_width(struct sbridge_pvt *pvt, u32 mtr)
959{
960
961 return __ibridge_get_width(GET_BITFIELD(mtr, 8, 9));
962}
963
964static enum mem_type knl_get_memory_type(struct sbridge_pvt *pvt)
965{
966
967 return MEM_RDDR4;
968}
969
970static u8 get_node_id(struct sbridge_pvt *pvt)
971{
972 u32 reg;
973 pci_read_config_dword(pvt->pci_br0, SAD_CONTROL, ®);
974 return GET_BITFIELD(reg, 0, 2);
975}
976
977static u8 haswell_get_node_id(struct sbridge_pvt *pvt)
978{
979 u32 reg;
980
981 pci_read_config_dword(pvt->pci_sad1, SAD_CONTROL, ®);
982 return GET_BITFIELD(reg, 0, 3);
983}
984
985static u8 knl_get_node_id(struct sbridge_pvt *pvt)
986{
987 u32 reg;
988
989 pci_read_config_dword(pvt->pci_sad1, SAD_CONTROL, ®);
990 return GET_BITFIELD(reg, 0, 2);
991}
992
993
994static u64 haswell_get_tolm(struct sbridge_pvt *pvt)
995{
996 u32 reg;
997
998 pci_read_config_dword(pvt->info.pci_vtd, HASWELL_TOLM, ®);
999 return (GET_BITFIELD(reg, 26, 31) << 26) | 0x3ffffff;
1000}
1001
1002static u64 haswell_get_tohm(struct sbridge_pvt *pvt)
1003{
1004 u64 rc;
1005 u32 reg;
1006
1007 pci_read_config_dword(pvt->info.pci_vtd, HASWELL_TOHM_0, ®);
1008 rc = GET_BITFIELD(reg, 26, 31);
1009 pci_read_config_dword(pvt->info.pci_vtd, HASWELL_TOHM_1, ®);
1010 rc = ((reg << 6) | rc) << 26;
1011
1012 return rc | 0x1ffffff;
1013}
1014
1015static u64 knl_get_tolm(struct sbridge_pvt *pvt)
1016{
1017 u32 reg;
1018
1019 pci_read_config_dword(pvt->knl.pci_mc_info, KNL_TOLM, ®);
1020 return (GET_BITFIELD(reg, 26, 31) << 26) | 0x3ffffff;
1021}
1022
1023static u64 knl_get_tohm(struct sbridge_pvt *pvt)
1024{
1025 u64 rc;
1026 u32 reg_lo, reg_hi;
1027
1028 pci_read_config_dword(pvt->knl.pci_mc_info, KNL_TOHM_0, ®_lo);
1029 pci_read_config_dword(pvt->knl.pci_mc_info, KNL_TOHM_1, ®_hi);
1030 rc = ((u64)reg_hi << 32) | reg_lo;
1031 return rc | 0x3ffffff;
1032}
1033
1034
1035static u64 haswell_rir_limit(u32 reg)
1036{
1037 return (((u64)GET_BITFIELD(reg, 1, 11) + 1) << 29) - 1;
1038}
1039
1040static inline u8 sad_pkg_socket(u8 pkg)
1041{
1042
1043 return ((pkg >> 3) << 2) | (pkg & 0x3);
1044}
1045
1046static inline u8 sad_pkg_ha(u8 pkg)
1047{
1048 return (pkg >> 2) & 0x1;
1049}
1050
1051static int haswell_chan_hash(int idx, u64 addr)
1052{
1053 int i;
1054
1055
1056
1057
1058
1059 for (i = 12; i < 28; i += 2)
1060 idx ^= (addr >> i) & 3;
1061
1062 return idx;
1063}
1064
1065
1066
1067
1068static struct pci_dev *get_pdev_same_bus(u8 bus, u32 id)
1069{
1070 struct pci_dev *pdev = NULL;
1071
1072 do {
1073 pdev = pci_get_device(PCI_VENDOR_ID_INTEL, id, pdev);
1074 if (pdev && pdev->bus->number == bus)
1075 break;
1076 } while (pdev);
1077
1078 return pdev;
1079}
1080
1081
1082
1083
1084
1085
1086
1087
1088static int check_if_ecc_is_active(const u8 bus, enum type type)
1089{
1090 struct pci_dev *pdev = NULL;
1091 u32 mcmtr, id;
1092
1093 switch (type) {
1094 case IVY_BRIDGE:
1095 id = PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TA;
1096 break;
1097 case HASWELL:
1098 id = PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TA;
1099 break;
1100 case SANDY_BRIDGE:
1101 id = PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TA;
1102 break;
1103 case BROADWELL:
1104 id = PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TA;
1105 break;
1106 case KNIGHTS_LANDING:
1107
1108
1109
1110
1111 id = PCI_DEVICE_ID_INTEL_KNL_IMC_TA;
1112 break;
1113 default:
1114 return -ENODEV;
1115 }
1116
1117 if (type != KNIGHTS_LANDING)
1118 pdev = get_pdev_same_bus(bus, id);
1119 else
1120 pdev = pci_get_device(PCI_VENDOR_ID_INTEL, id, 0);
1121
1122 if (!pdev) {
1123 sbridge_printk(KERN_ERR, "Couldn't find PCI device "
1124 "%04x:%04x! on bus %02d\n",
1125 PCI_VENDOR_ID_INTEL, id, bus);
1126 return -ENODEV;
1127 }
1128
1129 pci_read_config_dword(pdev,
1130 type == KNIGHTS_LANDING ? KNL_MCMTR : MCMTR, &mcmtr);
1131 if (!IS_ECC_ENABLED(mcmtr)) {
1132 sbridge_printk(KERN_ERR, "ECC is disabled. Aborting\n");
1133 return -ENODEV;
1134 }
1135 return 0;
1136}
1137
1138
1139static const u32 knl_tad_dram_limit_lo[] = {
1140 0x400, 0x500, 0x600, 0x700,
1141 0x800, 0x900, 0xa00, 0xb00,
1142};
1143
1144
1145static const u32 knl_tad_dram_offset_lo[] = {
1146 0x404, 0x504, 0x604, 0x704,
1147 0x804, 0x904, 0xa04, 0xb04,
1148};
1149
1150
1151static const u32 knl_tad_dram_hi[] = {
1152 0x408, 0x508, 0x608, 0x708,
1153 0x808, 0x908, 0xa08, 0xb08,
1154};
1155
1156
1157static const u32 knl_tad_ways[] = {
1158 8, 6, 4, 3, 2, 1,
1159};
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176static int knl_get_tad(const struct sbridge_pvt *pvt,
1177 const int entry,
1178 const int mc,
1179 u64 *offset,
1180 u64 *limit,
1181 int *ways)
1182{
1183 u32 reg_limit_lo, reg_offset_lo, reg_hi;
1184 struct pci_dev *pci_mc;
1185 int way_id;
1186
1187 switch (mc) {
1188 case 0:
1189 pci_mc = pvt->knl.pci_mc0;
1190 break;
1191 case 1:
1192 pci_mc = pvt->knl.pci_mc1;
1193 break;
1194 default:
1195 WARN_ON(1);
1196 return -EINVAL;
1197 }
1198
1199 pci_read_config_dword(pci_mc,
1200 knl_tad_dram_limit_lo[entry], ®_limit_lo);
1201 pci_read_config_dword(pci_mc,
1202 knl_tad_dram_offset_lo[entry], ®_offset_lo);
1203 pci_read_config_dword(pci_mc,
1204 knl_tad_dram_hi[entry], ®_hi);
1205
1206
1207 if (!GET_BITFIELD(reg_limit_lo, 0, 0))
1208 return -ENODEV;
1209
1210 way_id = GET_BITFIELD(reg_limit_lo, 3, 5);
1211
1212 if (way_id < ARRAY_SIZE(knl_tad_ways)) {
1213 *ways = knl_tad_ways[way_id];
1214 } else {
1215 *ways = 0;
1216 sbridge_printk(KERN_ERR,
1217 "Unexpected value %d in mc_tad_limit_lo wayness field\n",
1218 way_id);
1219 return -ENODEV;
1220 }
1221
1222
1223
1224
1225
1226 *offset = ((u64) GET_BITFIELD(reg_offset_lo, 6, 31) << 6) |
1227 ((u64) GET_BITFIELD(reg_hi, 0, 15) << 32);
1228 *limit = ((u64) GET_BITFIELD(reg_limit_lo, 6, 31) << 6) | 63 |
1229 ((u64) GET_BITFIELD(reg_hi, 16, 31) << 32);
1230
1231 return 0;
1232}
1233
1234
1235static int knl_channel_mc(int channel)
1236{
1237 WARN_ON(channel < 0 || channel >= 6);
1238
1239 return channel < 3 ? 1 : 0;
1240}
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257static u32 knl_get_edc_route(int entry, u32 reg)
1258{
1259 WARN_ON(entry >= KNL_MAX_EDCS);
1260 return GET_BITFIELD(reg, entry*3, (entry*3)+2);
1261}
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280static u32 knl_get_mc_route(int entry, u32 reg)
1281{
1282 int mc, chan;
1283
1284 WARN_ON(entry >= KNL_MAX_CHANNELS);
1285
1286 mc = GET_BITFIELD(reg, entry*3, (entry*3)+2);
1287 chan = GET_BITFIELD(reg, (entry*2) + 18, (entry*2) + 18 + 1);
1288
1289 return knl_channel_remap(mc, chan);
1290}
1291
1292
1293
1294
1295
1296static void knl_show_edc_route(u32 reg, char *s)
1297{
1298 int i;
1299
1300 for (i = 0; i < KNL_MAX_EDCS; i++) {
1301 s[i*2] = knl_get_edc_route(i, reg) + '0';
1302 s[i*2+1] = '-';
1303 }
1304
1305 s[KNL_MAX_EDCS*2 - 1] = '\0';
1306}
1307
1308
1309
1310
1311
1312static void knl_show_mc_route(u32 reg, char *s)
1313{
1314 int i;
1315
1316 for (i = 0; i < KNL_MAX_CHANNELS; i++) {
1317 s[i*2] = knl_get_mc_route(i, reg) + '0';
1318 s[i*2+1] = '-';
1319 }
1320
1321 s[KNL_MAX_CHANNELS*2 - 1] = '\0';
1322}
1323
1324#define KNL_EDC_ROUTE 0xb8
1325#define KNL_MC_ROUTE 0xb4
1326
1327
1328#define KNL_EDRAM(reg) GET_BITFIELD(reg, 29, 29)
1329
1330
1331#define KNL_CACHEABLE(reg) GET_BITFIELD(reg, 28, 28)
1332
1333
1334#define KNL_EDRAM_ONLY(reg) GET_BITFIELD(reg, 29, 29)
1335
1336
1337#define KNL_CACHEABLE(reg) GET_BITFIELD(reg, 28, 28)
1338
1339
1340#define KNL_MOD3(reg) GET_BITFIELD(reg, 27, 27)
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370static int knl_get_dimm_capacity(struct sbridge_pvt *pvt, u64 *mc_sizes)
1371{
1372 u64 sad_base, sad_size, sad_limit = 0;
1373 u64 tad_base, tad_size, tad_limit, tad_deadspace, tad_livespace;
1374 int sad_rule = 0;
1375 int tad_rule = 0;
1376 int intrlv_ways, tad_ways;
1377 u32 first_pkg, pkg;
1378 int i;
1379 u64 sad_actual_size[2];
1380 u32 dram_rule, interleave_reg;
1381 u32 mc_route_reg[KNL_MAX_CHAS];
1382 u32 edc_route_reg[KNL_MAX_CHAS];
1383 int edram_only;
1384 char edc_route_string[KNL_MAX_EDCS*2];
1385 char mc_route_string[KNL_MAX_CHANNELS*2];
1386 int cur_reg_start;
1387 int mc;
1388 int channel;
1389 int way;
1390 int participants[KNL_MAX_CHANNELS];
1391 int participant_count = 0;
1392
1393 for (i = 0; i < KNL_MAX_CHANNELS; i++)
1394 mc_sizes[i] = 0;
1395
1396
1397 cur_reg_start = 0;
1398 for (i = 0; i < KNL_MAX_CHAS; i++) {
1399 pci_read_config_dword(pvt->knl.pci_cha[i],
1400 KNL_EDC_ROUTE, &edc_route_reg[i]);
1401
1402 if (i > 0 && edc_route_reg[i] != edc_route_reg[i-1]) {
1403 knl_show_edc_route(edc_route_reg[i-1],
1404 edc_route_string);
1405 if (cur_reg_start == i-1)
1406 edac_dbg(0, "edc route table for CHA %d: %s\n",
1407 cur_reg_start, edc_route_string);
1408 else
1409 edac_dbg(0, "edc route table for CHA %d-%d: %s\n",
1410 cur_reg_start, i-1, edc_route_string);
1411 cur_reg_start = i;
1412 }
1413 }
1414 knl_show_edc_route(edc_route_reg[i-1], edc_route_string);
1415 if (cur_reg_start == i-1)
1416 edac_dbg(0, "edc route table for CHA %d: %s\n",
1417 cur_reg_start, edc_route_string);
1418 else
1419 edac_dbg(0, "edc route table for CHA %d-%d: %s\n",
1420 cur_reg_start, i-1, edc_route_string);
1421
1422
1423 cur_reg_start = 0;
1424 for (i = 0; i < KNL_MAX_CHAS; i++) {
1425 pci_read_config_dword(pvt->knl.pci_cha[i],
1426 KNL_MC_ROUTE, &mc_route_reg[i]);
1427
1428 if (i > 0 && mc_route_reg[i] != mc_route_reg[i-1]) {
1429 knl_show_mc_route(mc_route_reg[i-1], mc_route_string);
1430 if (cur_reg_start == i-1)
1431 edac_dbg(0, "mc route table for CHA %d: %s\n",
1432 cur_reg_start, mc_route_string);
1433 else
1434 edac_dbg(0, "mc route table for CHA %d-%d: %s\n",
1435 cur_reg_start, i-1, mc_route_string);
1436 cur_reg_start = i;
1437 }
1438 }
1439 knl_show_mc_route(mc_route_reg[i-1], mc_route_string);
1440 if (cur_reg_start == i-1)
1441 edac_dbg(0, "mc route table for CHA %d: %s\n",
1442 cur_reg_start, mc_route_string);
1443 else
1444 edac_dbg(0, "mc route table for CHA %d-%d: %s\n",
1445 cur_reg_start, i-1, mc_route_string);
1446
1447
1448 for (sad_rule = 0; sad_rule < pvt->info.max_sad; sad_rule++) {
1449
1450 sad_base = sad_limit;
1451
1452 pci_read_config_dword(pvt->pci_sad0,
1453 pvt->info.dram_rule[sad_rule], &dram_rule);
1454
1455 if (!DRAM_RULE_ENABLE(dram_rule))
1456 break;
1457
1458 edram_only = KNL_EDRAM_ONLY(dram_rule);
1459
1460 sad_limit = pvt->info.sad_limit(dram_rule)+1;
1461 sad_size = sad_limit - sad_base;
1462
1463 pci_read_config_dword(pvt->pci_sad0,
1464 pvt->info.interleave_list[sad_rule], &interleave_reg);
1465
1466
1467
1468
1469
1470 first_pkg = sad_pkg(pvt->info.interleave_pkg,
1471 interleave_reg, 0);
1472 for (intrlv_ways = 1; intrlv_ways < 8; intrlv_ways++) {
1473 pkg = sad_pkg(pvt->info.interleave_pkg,
1474 interleave_reg, intrlv_ways);
1475
1476 if ((pkg & 0x8) == 0) {
1477
1478
1479
1480
1481 edac_dbg(0, "Unexpected interleave target %d\n",
1482 pkg);
1483 return -1;
1484 }
1485
1486 if (pkg == first_pkg)
1487 break;
1488 }
1489 if (KNL_MOD3(dram_rule))
1490 intrlv_ways *= 3;
1491
1492 edac_dbg(3, "dram rule %d (base 0x%llx, limit 0x%llx), %d way interleave%s\n",
1493 sad_rule,
1494 sad_base,
1495 sad_limit,
1496 intrlv_ways,
1497 edram_only ? ", EDRAM" : "");
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510 for (mc = 0; mc < 2; mc++) {
1511 sad_actual_size[mc] = 0;
1512 tad_livespace = 0;
1513 for (tad_rule = 0;
1514 tad_rule < ARRAY_SIZE(
1515 knl_tad_dram_limit_lo);
1516 tad_rule++) {
1517 if (knl_get_tad(pvt,
1518 tad_rule,
1519 mc,
1520 &tad_deadspace,
1521 &tad_limit,
1522 &tad_ways))
1523 break;
1524
1525 tad_size = (tad_limit+1) -
1526 (tad_livespace + tad_deadspace);
1527 tad_livespace += tad_size;
1528 tad_base = (tad_limit+1) - tad_size;
1529
1530 if (tad_base < sad_base) {
1531 if (tad_limit > sad_base)
1532 edac_dbg(0, "TAD region overlaps lower SAD boundary -- TAD tables may be configured incorrectly.\n");
1533 } else if (tad_base < sad_limit) {
1534 if (tad_limit+1 > sad_limit) {
1535 edac_dbg(0, "TAD region overlaps upper SAD boundary -- TAD tables may be configured incorrectly.\n");
1536 } else {
1537
1538 edac_dbg(3, "TAD region %d 0x%llx - 0x%llx (%lld bytes) table%d\n",
1539 tad_rule, tad_base,
1540 tad_limit, tad_size,
1541 mc);
1542 sad_actual_size[mc] += tad_size;
1543 }
1544 }
1545 tad_base = tad_limit+1;
1546 }
1547 }
1548
1549 for (mc = 0; mc < 2; mc++) {
1550 edac_dbg(3, " total TAD DRAM footprint in table%d : 0x%llx (%lld bytes)\n",
1551 mc, sad_actual_size[mc], sad_actual_size[mc]);
1552 }
1553
1554
1555 if (edram_only)
1556 continue;
1557
1558
1559 for (channel = 0; channel < KNL_MAX_CHANNELS; channel++)
1560 participants[channel] = 0;
1561
1562
1563
1564
1565 for (channel = 0; channel < KNL_MAX_CHANNELS; channel++) {
1566 for (way = 0; way < intrlv_ways; way++) {
1567 int target;
1568 int cha;
1569
1570 if (KNL_MOD3(dram_rule))
1571 target = way;
1572 else
1573 target = 0x7 & sad_pkg(
1574 pvt->info.interleave_pkg, interleave_reg, way);
1575
1576 for (cha = 0; cha < KNL_MAX_CHAS; cha++) {
1577 if (knl_get_mc_route(target,
1578 mc_route_reg[cha]) == channel
1579 && !participants[channel]) {
1580 participant_count++;
1581 participants[channel] = 1;
1582 break;
1583 }
1584 }
1585 }
1586 }
1587
1588 if (participant_count != intrlv_ways)
1589 edac_dbg(0, "participant_count (%d) != interleave_ways (%d): DIMM size may be incorrect\n",
1590 participant_count, intrlv_ways);
1591
1592 for (channel = 0; channel < KNL_MAX_CHANNELS; channel++) {
1593 mc = knl_channel_mc(channel);
1594 if (participants[channel]) {
1595 edac_dbg(4, "mc channel %d contributes %lld bytes via sad entry %d\n",
1596 channel,
1597 sad_actual_size[mc]/intrlv_ways,
1598 sad_rule);
1599 mc_sizes[channel] +=
1600 sad_actual_size[mc]/intrlv_ways;
1601 }
1602 }
1603 }
1604
1605 return 0;
1606}
1607
1608static int get_dimm_config(struct mem_ctl_info *mci)
1609{
1610 struct sbridge_pvt *pvt = mci->pvt_info;
1611 struct dimm_info *dimm;
1612 unsigned i, j, banks, ranks, rows, cols, npages;
1613 u64 size;
1614 u32 reg;
1615 enum edac_type mode;
1616 enum mem_type mtype;
1617 int channels = pvt->info.type == KNIGHTS_LANDING ?
1618 KNL_MAX_CHANNELS : NUM_CHANNELS;
1619 u64 knl_mc_sizes[KNL_MAX_CHANNELS];
1620
1621 if (pvt->info.type == HASWELL || pvt->info.type == BROADWELL) {
1622 pci_read_config_dword(pvt->pci_ha0, HASWELL_HASYSDEFEATURE2, ®);
1623 pvt->is_chan_hash = GET_BITFIELD(reg, 21, 21);
1624 }
1625 if (pvt->info.type == HASWELL || pvt->info.type == BROADWELL ||
1626 pvt->info.type == KNIGHTS_LANDING)
1627 pci_read_config_dword(pvt->pci_sad1, SAD_TARGET, ®);
1628 else
1629 pci_read_config_dword(pvt->pci_br0, SAD_TARGET, ®);
1630
1631 if (pvt->info.type == KNIGHTS_LANDING)
1632 pvt->sbridge_dev->source_id = SOURCE_ID_KNL(reg);
1633 else
1634 pvt->sbridge_dev->source_id = SOURCE_ID(reg);
1635
1636 pvt->sbridge_dev->node_id = pvt->info.get_node_id(pvt);
1637 edac_dbg(0, "mc#%d: Node ID: %d, source ID: %d\n",
1638 pvt->sbridge_dev->mc,
1639 pvt->sbridge_dev->node_id,
1640 pvt->sbridge_dev->source_id);
1641
1642
1643
1644
1645 if (pvt->info.type == KNIGHTS_LANDING) {
1646 mode = EDAC_S4ECD4ED;
1647 pvt->is_mirrored = false;
1648
1649 if (knl_get_dimm_capacity(pvt, knl_mc_sizes) != 0)
1650 return -1;
1651 } else {
1652 pci_read_config_dword(pvt->pci_ras, RASENABLES, ®);
1653 if (IS_MIRROR_ENABLED(reg)) {
1654 edac_dbg(0, "Memory mirror is enabled\n");
1655 pvt->is_mirrored = true;
1656 } else {
1657 edac_dbg(0, "Memory mirror is disabled\n");
1658 pvt->is_mirrored = false;
1659 }
1660
1661 pci_read_config_dword(pvt->pci_ta, MCMTR, &pvt->info.mcmtr);
1662 if (IS_LOCKSTEP_ENABLED(pvt->info.mcmtr)) {
1663 edac_dbg(0, "Lockstep is enabled\n");
1664 mode = EDAC_S8ECD8ED;
1665 pvt->is_lockstep = true;
1666 } else {
1667 edac_dbg(0, "Lockstep is disabled\n");
1668 mode = EDAC_S4ECD4ED;
1669 pvt->is_lockstep = false;
1670 }
1671 if (IS_CLOSE_PG(pvt->info.mcmtr)) {
1672 edac_dbg(0, "address map is on closed page mode\n");
1673 pvt->is_close_pg = true;
1674 } else {
1675 edac_dbg(0, "address map is on open page mode\n");
1676 pvt->is_close_pg = false;
1677 }
1678 }
1679
1680 mtype = pvt->info.get_memory_type(pvt);
1681 if (mtype == MEM_RDDR3 || mtype == MEM_RDDR4)
1682 edac_dbg(0, "Memory is registered\n");
1683 else if (mtype == MEM_UNKNOWN)
1684 edac_dbg(0, "Cannot determine memory type\n");
1685 else
1686 edac_dbg(0, "Memory is unregistered\n");
1687
1688 if (mtype == MEM_DDR4 || mtype == MEM_RDDR4)
1689 banks = 16;
1690 else
1691 banks = 8;
1692
1693 for (i = 0; i < channels; i++) {
1694 u32 mtr;
1695
1696 int max_dimms_per_channel;
1697
1698 if (pvt->info.type == KNIGHTS_LANDING) {
1699 max_dimms_per_channel = 1;
1700 if (!pvt->knl.pci_channel[i])
1701 continue;
1702 } else {
1703 max_dimms_per_channel = ARRAY_SIZE(mtr_regs);
1704 if (!pvt->pci_tad[i])
1705 continue;
1706 }
1707
1708 for (j = 0; j < max_dimms_per_channel; j++) {
1709 dimm = EDAC_DIMM_PTR(mci->layers, mci->dimms, mci->n_layers,
1710 i, j, 0);
1711 if (pvt->info.type == KNIGHTS_LANDING) {
1712 pci_read_config_dword(pvt->knl.pci_channel[i],
1713 knl_mtr_reg, &mtr);
1714 } else {
1715 pci_read_config_dword(pvt->pci_tad[i],
1716 mtr_regs[j], &mtr);
1717 }
1718 edac_dbg(4, "Channel #%d MTR%d = %x\n", i, j, mtr);
1719 if (IS_DIMM_PRESENT(mtr)) {
1720 pvt->channel[i].dimms++;
1721
1722 ranks = numrank(pvt->info.type, mtr);
1723
1724 if (pvt->info.type == KNIGHTS_LANDING) {
1725
1726 cols = 1 << 10;
1727 rows = knl_mc_sizes[i] /
1728 ((u64) cols * ranks * banks * 8);
1729 } else {
1730 rows = numrow(mtr);
1731 cols = numcol(mtr);
1732 }
1733
1734 size = ((u64)rows * cols * banks * ranks) >> (20 - 3);
1735 npages = MiB_TO_PAGES(size);
1736
1737 edac_dbg(0, "mc#%d: ha %d channel %d, dimm %d, %lld Mb (%d pages) bank: %d, rank: %d, row: %#x, col: %#x\n",
1738 pvt->sbridge_dev->mc, i/4, i%4, j,
1739 size, npages,
1740 banks, ranks, rows, cols);
1741
1742 dimm->nr_pages = npages;
1743 dimm->grain = 32;
1744 dimm->dtype = pvt->info.get_width(pvt, mtr);
1745 dimm->mtype = mtype;
1746 dimm->edac_mode = mode;
1747 snprintf(dimm->label, sizeof(dimm->label),
1748 "CPU_SrcID#%u_Ha#%u_Chan#%u_DIMM#%u",
1749 pvt->sbridge_dev->source_id, i/4, i%4, j);
1750 }
1751 }
1752 }
1753
1754 return 0;
1755}
1756
1757static void get_memory_layout(const struct mem_ctl_info *mci)
1758{
1759 struct sbridge_pvt *pvt = mci->pvt_info;
1760 int i, j, k, n_sads, n_tads, sad_interl;
1761 u32 reg;
1762 u64 limit, prv = 0;
1763 u64 tmp_mb;
1764 u32 gb, mb;
1765 u32 rir_way;
1766
1767
1768
1769
1770
1771 pvt->tolm = pvt->info.get_tolm(pvt);
1772 tmp_mb = (1 + pvt->tolm) >> 20;
1773
1774 gb = div_u64_rem(tmp_mb, 1024, &mb);
1775 edac_dbg(0, "TOLM: %u.%03u GB (0x%016Lx)\n",
1776 gb, (mb*1000)/1024, (u64)pvt->tolm);
1777
1778
1779 pvt->tohm = pvt->info.get_tohm(pvt);
1780 tmp_mb = (1 + pvt->tohm) >> 20;
1781
1782 gb = div_u64_rem(tmp_mb, 1024, &mb);
1783 edac_dbg(0, "TOHM: %u.%03u GB (0x%016Lx)\n",
1784 gb, (mb*1000)/1024, (u64)pvt->tohm);
1785
1786
1787
1788
1789
1790
1791
1792 prv = 0;
1793 for (n_sads = 0; n_sads < pvt->info.max_sad; n_sads++) {
1794
1795 pci_read_config_dword(pvt->pci_sad0, pvt->info.dram_rule[n_sads],
1796 ®);
1797 limit = pvt->info.sad_limit(reg);
1798
1799 if (!DRAM_RULE_ENABLE(reg))
1800 continue;
1801
1802 if (limit <= prv)
1803 break;
1804
1805 tmp_mb = (limit + 1) >> 20;
1806 gb = div_u64_rem(tmp_mb, 1024, &mb);
1807 edac_dbg(0, "SAD#%d %s up to %u.%03u GB (0x%016Lx) Interleave: %s reg=0x%08x\n",
1808 n_sads,
1809 show_dram_attr(pvt->info.dram_attr(reg)),
1810 gb, (mb*1000)/1024,
1811 ((u64)tmp_mb) << 20L,
1812 pvt->info.show_interleave_mode(reg),
1813 reg);
1814 prv = limit;
1815
1816 pci_read_config_dword(pvt->pci_sad0, pvt->info.interleave_list[n_sads],
1817 ®);
1818 sad_interl = sad_pkg(pvt->info.interleave_pkg, reg, 0);
1819 for (j = 0; j < 8; j++) {
1820 u32 pkg = sad_pkg(pvt->info.interleave_pkg, reg, j);
1821 if (j > 0 && sad_interl == pkg)
1822 break;
1823
1824 edac_dbg(0, "SAD#%d, interleave #%d: %d\n",
1825 n_sads, j, pkg);
1826 }
1827 }
1828
1829 if (pvt->info.type == KNIGHTS_LANDING)
1830 return;
1831
1832
1833
1834
1835 prv = 0;
1836 for (n_tads = 0; n_tads < MAX_TAD; n_tads++) {
1837 pci_read_config_dword(pvt->pci_ha0, tad_dram_rule[n_tads],
1838 ®);
1839 limit = TAD_LIMIT(reg);
1840 if (limit <= prv)
1841 break;
1842 tmp_mb = (limit + 1) >> 20;
1843
1844 gb = div_u64_rem(tmp_mb, 1024, &mb);
1845 edac_dbg(0, "TAD#%d: up to %u.%03u GB (0x%016Lx), socket interleave %d, memory interleave %d, TGT: %d, %d, %d, %d, reg=0x%08x\n",
1846 n_tads, gb, (mb*1000)/1024,
1847 ((u64)tmp_mb) << 20L,
1848 (u32)(1 << TAD_SOCK(reg)),
1849 (u32)TAD_CH(reg) + 1,
1850 (u32)TAD_TGT0(reg),
1851 (u32)TAD_TGT1(reg),
1852 (u32)TAD_TGT2(reg),
1853 (u32)TAD_TGT3(reg),
1854 reg);
1855 prv = limit;
1856 }
1857
1858
1859
1860
1861 for (i = 0; i < NUM_CHANNELS; i++) {
1862 if (!pvt->channel[i].dimms)
1863 continue;
1864 for (j = 0; j < n_tads; j++) {
1865 pci_read_config_dword(pvt->pci_tad[i],
1866 tad_ch_nilv_offset[j],
1867 ®);
1868 tmp_mb = TAD_OFFSET(reg) >> 20;
1869 gb = div_u64_rem(tmp_mb, 1024, &mb);
1870 edac_dbg(0, "TAD CH#%d, offset #%d: %u.%03u GB (0x%016Lx), reg=0x%08x\n",
1871 i, j,
1872 gb, (mb*1000)/1024,
1873 ((u64)tmp_mb) << 20L,
1874 reg);
1875 }
1876 }
1877
1878
1879
1880
1881 for (i = 0; i < NUM_CHANNELS; i++) {
1882 if (!pvt->channel[i].dimms)
1883 continue;
1884 for (j = 0; j < MAX_RIR_RANGES; j++) {
1885 pci_read_config_dword(pvt->pci_tad[i],
1886 rir_way_limit[j],
1887 ®);
1888
1889 if (!IS_RIR_VALID(reg))
1890 continue;
1891
1892 tmp_mb = pvt->info.rir_limit(reg) >> 20;
1893 rir_way = 1 << RIR_WAY(reg);
1894 gb = div_u64_rem(tmp_mb, 1024, &mb);
1895 edac_dbg(0, "CH#%d RIR#%d, limit: %u.%03u GB (0x%016Lx), way: %d, reg=0x%08x\n",
1896 i, j,
1897 gb, (mb*1000)/1024,
1898 ((u64)tmp_mb) << 20L,
1899 rir_way,
1900 reg);
1901
1902 for (k = 0; k < rir_way; k++) {
1903 pci_read_config_dword(pvt->pci_tad[i],
1904 rir_offset[j][k],
1905 ®);
1906 tmp_mb = RIR_OFFSET(pvt->info.type, reg) << 6;
1907
1908 gb = div_u64_rem(tmp_mb, 1024, &mb);
1909 edac_dbg(0, "CH#%d RIR#%d INTL#%d, offset %u.%03u GB (0x%016Lx), tgt: %d, reg=0x%08x\n",
1910 i, j, k,
1911 gb, (mb*1000)/1024,
1912 ((u64)tmp_mb) << 20L,
1913 (u32)RIR_RNK_TGT(pvt->info.type, reg),
1914 reg);
1915 }
1916 }
1917 }
1918}
1919
1920static struct mem_ctl_info *get_mci_for_node_id(u8 node_id)
1921{
1922 struct sbridge_dev *sbridge_dev;
1923
1924 list_for_each_entry(sbridge_dev, &sbridge_edac_list, list) {
1925 if (sbridge_dev->node_id == node_id)
1926 return sbridge_dev->mci;
1927 }
1928 return NULL;
1929}
1930
1931static int get_memory_error_data(struct mem_ctl_info *mci,
1932 u64 addr,
1933 u8 *socket, u8 *ha,
1934 long *channel_mask,
1935 u8 *rank,
1936 char **area_type, char *msg)
1937{
1938 struct mem_ctl_info *new_mci;
1939 struct sbridge_pvt *pvt = mci->pvt_info;
1940 struct pci_dev *pci_ha;
1941 int n_rir, n_sads, n_tads, sad_way, sck_xch;
1942 int sad_interl, idx, base_ch;
1943 int interleave_mode, shiftup = 0;
1944 unsigned sad_interleave[pvt->info.max_interleave];
1945 u32 reg, dram_rule;
1946 u8 ch_way, sck_way, pkg, sad_ha = 0, ch_add = 0;
1947 u32 tad_offset;
1948 u32 rir_way;
1949 u32 mb, gb;
1950 u64 ch_addr, offset, limit = 0, prv = 0;
1951
1952
1953
1954
1955
1956
1957
1958
1959
1960 if ((addr > (u64) pvt->tolm) && (addr < (1LL << 32))) {
1961 sprintf(msg, "Error at TOLM area, on addr 0x%08Lx", addr);
1962 return -EINVAL;
1963 }
1964 if (addr >= (u64)pvt->tohm) {
1965 sprintf(msg, "Error at MMIOH area, on addr 0x%016Lx", addr);
1966 return -EINVAL;
1967 }
1968
1969
1970
1971
1972 for (n_sads = 0; n_sads < pvt->info.max_sad; n_sads++) {
1973 pci_read_config_dword(pvt->pci_sad0, pvt->info.dram_rule[n_sads],
1974 ®);
1975
1976 if (!DRAM_RULE_ENABLE(reg))
1977 continue;
1978
1979 limit = pvt->info.sad_limit(reg);
1980 if (limit <= prv) {
1981 sprintf(msg, "Can't discover the memory socket");
1982 return -EINVAL;
1983 }
1984 if (addr <= limit)
1985 break;
1986 prv = limit;
1987 }
1988 if (n_sads == pvt->info.max_sad) {
1989 sprintf(msg, "Can't discover the memory socket");
1990 return -EINVAL;
1991 }
1992 dram_rule = reg;
1993 *area_type = show_dram_attr(pvt->info.dram_attr(dram_rule));
1994 interleave_mode = pvt->info.interleave_mode(dram_rule);
1995
1996 pci_read_config_dword(pvt->pci_sad0, pvt->info.interleave_list[n_sads],
1997 ®);
1998
1999 if (pvt->info.type == SANDY_BRIDGE) {
2000 sad_interl = sad_pkg(pvt->info.interleave_pkg, reg, 0);
2001 for (sad_way = 0; sad_way < 8; sad_way++) {
2002 u32 pkg = sad_pkg(pvt->info.interleave_pkg, reg, sad_way);
2003 if (sad_way > 0 && sad_interl == pkg)
2004 break;
2005 sad_interleave[sad_way] = pkg;
2006 edac_dbg(0, "SAD interleave #%d: %d\n",
2007 sad_way, sad_interleave[sad_way]);
2008 }
2009 edac_dbg(0, "mc#%d: Error detected on SAD#%d: address 0x%016Lx < 0x%016Lx, Interleave [%d:6]%s\n",
2010 pvt->sbridge_dev->mc,
2011 n_sads,
2012 addr,
2013 limit,
2014 sad_way + 7,
2015 !interleave_mode ? "" : "XOR[18:16]");
2016 if (interleave_mode)
2017 idx = ((addr >> 6) ^ (addr >> 16)) & 7;
2018 else
2019 idx = (addr >> 6) & 7;
2020 switch (sad_way) {
2021 case 1:
2022 idx = 0;
2023 break;
2024 case 2:
2025 idx = idx & 1;
2026 break;
2027 case 4:
2028 idx = idx & 3;
2029 break;
2030 case 8:
2031 break;
2032 default:
2033 sprintf(msg, "Can't discover socket interleave");
2034 return -EINVAL;
2035 }
2036 *socket = sad_interleave[idx];
2037 edac_dbg(0, "SAD interleave index: %d (wayness %d) = CPU socket %d\n",
2038 idx, sad_way, *socket);
2039 } else if (pvt->info.type == HASWELL || pvt->info.type == BROADWELL) {
2040 int bits, a7mode = A7MODE(dram_rule);
2041
2042 if (a7mode) {
2043
2044 bits = GET_BITFIELD(addr, 7, 8) << 1;
2045 bits |= GET_BITFIELD(addr, 9, 9);
2046 } else
2047 bits = GET_BITFIELD(addr, 6, 8);
2048
2049 if (interleave_mode == 0) {
2050
2051 idx = GET_BITFIELD(addr, 16, 18);
2052 idx ^= bits;
2053 } else
2054 idx = bits;
2055
2056 pkg = sad_pkg(pvt->info.interleave_pkg, reg, idx);
2057 *socket = sad_pkg_socket(pkg);
2058 sad_ha = sad_pkg_ha(pkg);
2059 if (sad_ha)
2060 ch_add = 4;
2061
2062 if (a7mode) {
2063
2064 pci_read_config_dword(pvt->pci_ha0,
2065 HASWELL_HASYSDEFEATURE2, ®);
2066 shiftup = GET_BITFIELD(reg, 22, 22);
2067 }
2068
2069 edac_dbg(0, "SAD interleave package: %d = CPU socket %d, HA %i, shiftup: %i\n",
2070 idx, *socket, sad_ha, shiftup);
2071 } else {
2072
2073 idx = (addr >> 6) & 7;
2074 pkg = sad_pkg(pvt->info.interleave_pkg, reg, idx);
2075 *socket = sad_pkg_socket(pkg);
2076 sad_ha = sad_pkg_ha(pkg);
2077 if (sad_ha)
2078 ch_add = 4;
2079 edac_dbg(0, "SAD interleave package: %d = CPU socket %d, HA %d\n",
2080 idx, *socket, sad_ha);
2081 }
2082
2083 *ha = sad_ha;
2084
2085
2086
2087
2088
2089 new_mci = get_mci_for_node_id(*socket);
2090 if (!new_mci) {
2091 sprintf(msg, "Struct for socket #%u wasn't initialized",
2092 *socket);
2093 return -EINVAL;
2094 }
2095 mci = new_mci;
2096 pvt = mci->pvt_info;
2097
2098
2099
2100
2101 prv = 0;
2102 if (pvt->info.type == SANDY_BRIDGE)
2103 pci_ha = pvt->pci_ha0;
2104 else {
2105 if (sad_ha)
2106 pci_ha = pvt->pci_ha1;
2107 else
2108 pci_ha = pvt->pci_ha0;
2109 }
2110 for (n_tads = 0; n_tads < MAX_TAD; n_tads++) {
2111 pci_read_config_dword(pci_ha, tad_dram_rule[n_tads], ®);
2112 limit = TAD_LIMIT(reg);
2113 if (limit <= prv) {
2114 sprintf(msg, "Can't discover the memory channel");
2115 return -EINVAL;
2116 }
2117 if (addr <= limit)
2118 break;
2119 prv = limit;
2120 }
2121 if (n_tads == MAX_TAD) {
2122 sprintf(msg, "Can't discover the memory channel");
2123 return -EINVAL;
2124 }
2125
2126 ch_way = TAD_CH(reg) + 1;
2127 sck_way = TAD_SOCK(reg);
2128
2129 if (ch_way == 3)
2130 idx = addr >> 6;
2131 else {
2132 idx = (addr >> (6 + sck_way + shiftup)) & 0x3;
2133 if (pvt->is_chan_hash)
2134 idx = haswell_chan_hash(idx, addr);
2135 }
2136 idx = idx % ch_way;
2137
2138
2139
2140
2141 switch (idx) {
2142 case 0:
2143 base_ch = TAD_TGT0(reg);
2144 break;
2145 case 1:
2146 base_ch = TAD_TGT1(reg);
2147 break;
2148 case 2:
2149 base_ch = TAD_TGT2(reg);
2150 break;
2151 case 3:
2152 base_ch = TAD_TGT3(reg);
2153 break;
2154 default:
2155 sprintf(msg, "Can't discover the TAD target");
2156 return -EINVAL;
2157 }
2158 *channel_mask = 1 << base_ch;
2159
2160 pci_read_config_dword(pvt->pci_tad[ch_add + base_ch],
2161 tad_ch_nilv_offset[n_tads],
2162 &tad_offset);
2163
2164 if (pvt->is_mirrored) {
2165 *channel_mask |= 1 << ((base_ch + 2) % 4);
2166 switch(ch_way) {
2167 case 2:
2168 case 4:
2169 sck_xch = (1 << sck_way) * (ch_way >> 1);
2170 break;
2171 default:
2172 sprintf(msg, "Invalid mirror set. Can't decode addr");
2173 return -EINVAL;
2174 }
2175 } else
2176 sck_xch = (1 << sck_way) * ch_way;
2177
2178 if (pvt->is_lockstep)
2179 *channel_mask |= 1 << ((base_ch + 1) % 4);
2180
2181 offset = TAD_OFFSET(tad_offset);
2182
2183 edac_dbg(0, "TAD#%d: address 0x%016Lx < 0x%016Lx, socket interleave %d, channel interleave %d (offset 0x%08Lx), index %d, base ch: %d, ch mask: 0x%02lx\n",
2184 n_tads,
2185 addr,
2186 limit,
2187 sck_way,
2188 ch_way,
2189 offset,
2190 idx,
2191 base_ch,
2192 *channel_mask);
2193
2194
2195
2196
2197 if (offset > addr) {
2198 sprintf(msg, "Can't calculate ch addr: TAD offset 0x%08Lx is too high for addr 0x%08Lx!",
2199 offset, addr);
2200 return -EINVAL;
2201 }
2202
2203 ch_addr = addr - offset;
2204 ch_addr >>= (6 + shiftup);
2205 ch_addr /= sck_xch;
2206 ch_addr <<= (6 + shiftup);
2207 ch_addr |= addr & ((1 << (6 + shiftup)) - 1);
2208
2209
2210
2211
2212 for (n_rir = 0; n_rir < MAX_RIR_RANGES; n_rir++) {
2213 pci_read_config_dword(pvt->pci_tad[ch_add + base_ch],
2214 rir_way_limit[n_rir],
2215 ®);
2216
2217 if (!IS_RIR_VALID(reg))
2218 continue;
2219
2220 limit = pvt->info.rir_limit(reg);
2221 gb = div_u64_rem(limit >> 20, 1024, &mb);
2222 edac_dbg(0, "RIR#%d, limit: %u.%03u GB (0x%016Lx), way: %d\n",
2223 n_rir,
2224 gb, (mb*1000)/1024,
2225 limit,
2226 1 << RIR_WAY(reg));
2227 if (ch_addr <= limit)
2228 break;
2229 }
2230 if (n_rir == MAX_RIR_RANGES) {
2231 sprintf(msg, "Can't discover the memory rank for ch addr 0x%08Lx",
2232 ch_addr);
2233 return -EINVAL;
2234 }
2235 rir_way = RIR_WAY(reg);
2236
2237 if (pvt->is_close_pg)
2238 idx = (ch_addr >> 6);
2239 else
2240 idx = (ch_addr >> 13);
2241 idx %= 1 << rir_way;
2242
2243 pci_read_config_dword(pvt->pci_tad[ch_add + base_ch],
2244 rir_offset[n_rir][idx],
2245 ®);
2246 *rank = RIR_RNK_TGT(pvt->info.type, reg);
2247
2248 edac_dbg(0, "RIR#%d: channel address 0x%08Lx < 0x%08Lx, RIR interleave %d, index %d\n",
2249 n_rir,
2250 ch_addr,
2251 limit,
2252 rir_way,
2253 idx);
2254
2255 return 0;
2256}
2257
2258
2259
2260
2261
2262
2263
2264
2265
2266static void sbridge_put_devices(struct sbridge_dev *sbridge_dev)
2267{
2268 int i;
2269
2270 edac_dbg(0, "\n");
2271 for (i = 0; i < sbridge_dev->n_devs; i++) {
2272 struct pci_dev *pdev = sbridge_dev->pdev[i];
2273 if (!pdev)
2274 continue;
2275 edac_dbg(0, "Removing dev %02x:%02x.%d\n",
2276 pdev->bus->number,
2277 PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));
2278 pci_dev_put(pdev);
2279 }
2280}
2281
2282static void sbridge_put_all_devices(void)
2283{
2284 struct sbridge_dev *sbridge_dev, *tmp;
2285
2286 list_for_each_entry_safe(sbridge_dev, tmp, &sbridge_edac_list, list) {
2287 sbridge_put_devices(sbridge_dev);
2288 free_sbridge_dev(sbridge_dev);
2289 }
2290}
2291
2292static int sbridge_get_onedevice(struct pci_dev **prev,
2293 u8 *num_mc,
2294 const struct pci_id_table *table,
2295 const unsigned devno,
2296 const int multi_bus)
2297{
2298 struct sbridge_dev *sbridge_dev;
2299 const struct pci_id_descr *dev_descr = &table->descr[devno];
2300 struct pci_dev *pdev = NULL;
2301 u8 bus = 0;
2302
2303 sbridge_printk(KERN_DEBUG,
2304 "Seeking for: PCI ID %04x:%04x\n",
2305 PCI_VENDOR_ID_INTEL, dev_descr->dev_id);
2306
2307 pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
2308 dev_descr->dev_id, *prev);
2309
2310 if (!pdev) {
2311 if (*prev) {
2312 *prev = pdev;
2313 return 0;
2314 }
2315
2316 if (dev_descr->optional)
2317 return 0;
2318
2319
2320 if (devno == 0)
2321 return -ENODEV;
2322
2323 sbridge_printk(KERN_INFO,
2324 "Device not found: %04x:%04x\n",
2325 PCI_VENDOR_ID_INTEL, dev_descr->dev_id);
2326
2327
2328 return -ENODEV;
2329 }
2330 bus = pdev->bus->number;
2331
2332 sbridge_dev = get_sbridge_dev(bus, multi_bus);
2333 if (!sbridge_dev) {
2334 sbridge_dev = alloc_sbridge_dev(bus, table);
2335 if (!sbridge_dev) {
2336 pci_dev_put(pdev);
2337 return -ENOMEM;
2338 }
2339 (*num_mc)++;
2340 }
2341
2342 if (sbridge_dev->pdev[devno]) {
2343 sbridge_printk(KERN_ERR,
2344 "Duplicated device for %04x:%04x\n",
2345 PCI_VENDOR_ID_INTEL, dev_descr->dev_id);
2346 pci_dev_put(pdev);
2347 return -ENODEV;
2348 }
2349
2350 sbridge_dev->pdev[devno] = pdev;
2351
2352
2353 if (unlikely(pci_enable_device(pdev) < 0)) {
2354 sbridge_printk(KERN_ERR,
2355 "Couldn't enable %04x:%04x\n",
2356 PCI_VENDOR_ID_INTEL, dev_descr->dev_id);
2357 return -ENODEV;
2358 }
2359
2360 edac_dbg(0, "Detected %04x:%04x\n",
2361 PCI_VENDOR_ID_INTEL, dev_descr->dev_id);
2362
2363
2364
2365
2366
2367
2368 pci_dev_get(pdev);
2369
2370 *prev = pdev;
2371
2372 return 0;
2373}
2374
2375
2376
2377
2378
2379
2380
2381
2382
2383
2384static int sbridge_get_all_devices(u8 *num_mc,
2385 const struct pci_id_table *table)
2386{
2387 int i, rc;
2388 struct pci_dev *pdev = NULL;
2389 int allow_dups = 0;
2390 int multi_bus = 0;
2391
2392 if (table->type == KNIGHTS_LANDING)
2393 allow_dups = multi_bus = 1;
2394 while (table && table->descr) {
2395 for (i = 0; i < table->n_devs; i++) {
2396 if (!allow_dups || i == 0 ||
2397 table->descr[i].dev_id !=
2398 table->descr[i-1].dev_id) {
2399 pdev = NULL;
2400 }
2401 do {
2402 rc = sbridge_get_onedevice(&pdev, num_mc,
2403 table, i, multi_bus);
2404 if (rc < 0) {
2405 if (i == 0) {
2406 i = table->n_devs;
2407 break;
2408 }
2409 sbridge_put_all_devices();
2410 return -ENODEV;
2411 }
2412 } while (pdev && !allow_dups);
2413 }
2414 table++;
2415 }
2416
2417 return 0;
2418}
2419
2420static int sbridge_mci_bind_devs(struct mem_ctl_info *mci,
2421 struct sbridge_dev *sbridge_dev)
2422{
2423 struct sbridge_pvt *pvt = mci->pvt_info;
2424 struct pci_dev *pdev;
2425 u8 saw_chan_mask = 0;
2426 int i;
2427
2428 for (i = 0; i < sbridge_dev->n_devs; i++) {
2429 pdev = sbridge_dev->pdev[i];
2430 if (!pdev)
2431 continue;
2432
2433 switch (pdev->device) {
2434 case PCI_DEVICE_ID_INTEL_SBRIDGE_SAD0:
2435 pvt->pci_sad0 = pdev;
2436 break;
2437 case PCI_DEVICE_ID_INTEL_SBRIDGE_SAD1:
2438 pvt->pci_sad1 = pdev;
2439 break;
2440 case PCI_DEVICE_ID_INTEL_SBRIDGE_BR:
2441 pvt->pci_br0 = pdev;
2442 break;
2443 case PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_HA0:
2444 pvt->pci_ha0 = pdev;
2445 break;
2446 case PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TA:
2447 pvt->pci_ta = pdev;
2448 break;
2449 case PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_RAS:
2450 pvt->pci_ras = pdev;
2451 break;
2452 case PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD0:
2453 case PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD1:
2454 case PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD2:
2455 case PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD3:
2456 {
2457 int id = pdev->device - PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD0;
2458 pvt->pci_tad[id] = pdev;
2459 saw_chan_mask |= 1 << id;
2460 }
2461 break;
2462 case PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_DDRIO:
2463 pvt->pci_ddrio = pdev;
2464 break;
2465 default:
2466 goto error;
2467 }
2468
2469 edac_dbg(0, "Associated PCI %02x:%02x, bus %d with dev = %p\n",
2470 pdev->vendor, pdev->device,
2471 sbridge_dev->bus,
2472 pdev);
2473 }
2474
2475
2476 if (!pvt->pci_sad0 || !pvt->pci_sad1 || !pvt->pci_ha0 ||
2477 !pvt->pci_ras || !pvt->pci_ta)
2478 goto enodev;
2479
2480 if (saw_chan_mask != 0x0f)
2481 goto enodev;
2482 return 0;
2483
2484enodev:
2485 sbridge_printk(KERN_ERR, "Some needed devices are missing\n");
2486 return -ENODEV;
2487
2488error:
2489 sbridge_printk(KERN_ERR, "Unexpected device %02x:%02x\n",
2490 PCI_VENDOR_ID_INTEL, pdev->device);
2491 return -EINVAL;
2492}
2493
2494static int ibridge_mci_bind_devs(struct mem_ctl_info *mci,
2495 struct sbridge_dev *sbridge_dev)
2496{
2497 struct sbridge_pvt *pvt = mci->pvt_info;
2498 struct pci_dev *pdev;
2499 u8 saw_chan_mask = 0;
2500 int i;
2501
2502 for (i = 0; i < sbridge_dev->n_devs; i++) {
2503 pdev = sbridge_dev->pdev[i];
2504 if (!pdev)
2505 continue;
2506
2507 switch (pdev->device) {
2508 case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0:
2509 pvt->pci_ha0 = pdev;
2510 break;
2511 case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TA:
2512 pvt->pci_ta = pdev;
2513 case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_RAS:
2514 pvt->pci_ras = pdev;
2515 break;
2516 case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD0:
2517 case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD1:
2518 case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD2:
2519 case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD3:
2520 {
2521 int id = pdev->device - PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD0;
2522 pvt->pci_tad[id] = pdev;
2523 saw_chan_mask |= 1 << id;
2524 }
2525 break;
2526 case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_2HA_DDRIO0:
2527 pvt->pci_ddrio = pdev;
2528 break;
2529 case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_1HA_DDRIO0:
2530 pvt->pci_ddrio = pdev;
2531 break;
2532 case PCI_DEVICE_ID_INTEL_IBRIDGE_SAD:
2533 pvt->pci_sad0 = pdev;
2534 break;
2535 case PCI_DEVICE_ID_INTEL_IBRIDGE_BR0:
2536 pvt->pci_br0 = pdev;
2537 break;
2538 case PCI_DEVICE_ID_INTEL_IBRIDGE_BR1:
2539 pvt->pci_br1 = pdev;
2540 break;
2541 case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1:
2542 pvt->pci_ha1 = pdev;
2543 break;
2544 case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD0:
2545 case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD1:
2546 case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD2:
2547 case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD3:
2548 {
2549 int id = pdev->device - PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD0 + 4;
2550 pvt->pci_tad[id] = pdev;
2551 saw_chan_mask |= 1 << id;
2552 }
2553 break;
2554 default:
2555 goto error;
2556 }
2557
2558 edac_dbg(0, "Associated PCI %02x.%02d.%d with dev = %p\n",
2559 sbridge_dev->bus,
2560 PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn),
2561 pdev);
2562 }
2563
2564
2565 if (!pvt->pci_sad0 || !pvt->pci_ha0 || !pvt->pci_br0 ||
2566 !pvt->pci_br1 || !pvt->pci_ras || !pvt->pci_ta)
2567 goto enodev;
2568
2569 if (saw_chan_mask != 0x0f &&
2570 saw_chan_mask != 0x33 &&
2571 saw_chan_mask != 0xff)
2572 goto enodev;
2573 return 0;
2574
2575enodev:
2576 sbridge_printk(KERN_ERR, "Some needed devices are missing\n");
2577 return -ENODEV;
2578
2579error:
2580 sbridge_printk(KERN_ERR,
2581 "Unexpected device %02x:%02x\n", PCI_VENDOR_ID_INTEL,
2582 pdev->device);
2583 return -EINVAL;
2584}
2585
2586static int haswell_mci_bind_devs(struct mem_ctl_info *mci,
2587 struct sbridge_dev *sbridge_dev)
2588{
2589 struct sbridge_pvt *pvt = mci->pvt_info;
2590 struct pci_dev *pdev;
2591 u8 saw_chan_mask = 0;
2592 int i;
2593
2594
2595 if (pvt->info.pci_vtd == NULL)
2596
2597 pvt->info.pci_vtd = pci_get_device(PCI_VENDOR_ID_INTEL,
2598 PCI_DEVICE_ID_INTEL_HASWELL_IMC_VTD_MISC,
2599 NULL);
2600
2601 for (i = 0; i < sbridge_dev->n_devs; i++) {
2602 pdev = sbridge_dev->pdev[i];
2603 if (!pdev)
2604 continue;
2605
2606 switch (pdev->device) {
2607 case PCI_DEVICE_ID_INTEL_HASWELL_IMC_CBO_SAD0:
2608 pvt->pci_sad0 = pdev;
2609 break;
2610 case PCI_DEVICE_ID_INTEL_HASWELL_IMC_CBO_SAD1:
2611 pvt->pci_sad1 = pdev;
2612 break;
2613 case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0:
2614 pvt->pci_ha0 = pdev;
2615 break;
2616 case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TA:
2617 pvt->pci_ta = pdev;
2618 break;
2619 case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_THERMAL:
2620 pvt->pci_ras = pdev;
2621 break;
2622 case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD0:
2623 case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD1:
2624 case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD2:
2625 case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD3:
2626 {
2627 int id = pdev->device - PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD0;
2628
2629 pvt->pci_tad[id] = pdev;
2630 saw_chan_mask |= 1 << id;
2631 }
2632 break;
2633 case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD0:
2634 case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD1:
2635 case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD2:
2636 case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD3:
2637 {
2638 int id = pdev->device - PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD0 + 4;
2639
2640 pvt->pci_tad[id] = pdev;
2641 saw_chan_mask |= 1 << id;
2642 }
2643 break;
2644 case PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO0:
2645 case PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO1:
2646 case PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO2:
2647 case PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO3:
2648 if (!pvt->pci_ddrio)
2649 pvt->pci_ddrio = pdev;
2650 break;
2651 case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1:
2652 pvt->pci_ha1 = pdev;
2653 break;
2654 case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TA:
2655 pvt->pci_ha1_ta = pdev;
2656 break;
2657 default:
2658 break;
2659 }
2660
2661 edac_dbg(0, "Associated PCI %02x.%02d.%d with dev = %p\n",
2662 sbridge_dev->bus,
2663 PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn),
2664 pdev);
2665 }
2666
2667
2668 if (!pvt->pci_sad0 || !pvt->pci_ha0 || !pvt->pci_sad1 ||
2669 !pvt->pci_ras || !pvt->pci_ta || !pvt->info.pci_vtd)
2670 goto enodev;
2671
2672 if (saw_chan_mask != 0x0f &&
2673 saw_chan_mask != 0x33 &&
2674 saw_chan_mask != 0xff)
2675 goto enodev;
2676 return 0;
2677
2678enodev:
2679 sbridge_printk(KERN_ERR, "Some needed devices are missing\n");
2680 return -ENODEV;
2681}
2682
2683static int broadwell_mci_bind_devs(struct mem_ctl_info *mci,
2684 struct sbridge_dev *sbridge_dev)
2685{
2686 struct sbridge_pvt *pvt = mci->pvt_info;
2687 struct pci_dev *pdev;
2688 u8 saw_chan_mask = 0;
2689 int i;
2690
2691
2692 if (pvt->info.pci_vtd == NULL)
2693
2694 pvt->info.pci_vtd = pci_get_device(PCI_VENDOR_ID_INTEL,
2695 PCI_DEVICE_ID_INTEL_BROADWELL_IMC_VTD_MISC,
2696 NULL);
2697
2698 for (i = 0; i < sbridge_dev->n_devs; i++) {
2699 pdev = sbridge_dev->pdev[i];
2700 if (!pdev)
2701 continue;
2702
2703 switch (pdev->device) {
2704 case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_CBO_SAD0:
2705 pvt->pci_sad0 = pdev;
2706 break;
2707 case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_CBO_SAD1:
2708 pvt->pci_sad1 = pdev;
2709 break;
2710 case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0:
2711 pvt->pci_ha0 = pdev;
2712 break;
2713 case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TA:
2714 pvt->pci_ta = pdev;
2715 break;
2716 case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_THERMAL:
2717 pvt->pci_ras = pdev;
2718 break;
2719 case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD0:
2720 case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD1:
2721 case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD2:
2722 case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD3:
2723 {
2724 int id = pdev->device - PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD0;
2725 pvt->pci_tad[id] = pdev;
2726 saw_chan_mask |= 1 << id;
2727 }
2728 break;
2729 case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TAD0:
2730 case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TAD1:
2731 case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TAD2:
2732 case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TAD3:
2733 {
2734 int id = pdev->device - PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TAD0 + 4;
2735 pvt->pci_tad[id] = pdev;
2736 saw_chan_mask |= 1 << id;
2737 }
2738 break;
2739 case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_DDRIO0:
2740 pvt->pci_ddrio = pdev;
2741 break;
2742 case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1:
2743 pvt->pci_ha1 = pdev;
2744 break;
2745 case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TA:
2746 pvt->pci_ha1_ta = pdev;
2747 break;
2748 default:
2749 break;
2750 }
2751
2752 edac_dbg(0, "Associated PCI %02x.%02d.%d with dev = %p\n",
2753 sbridge_dev->bus,
2754 PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn),
2755 pdev);
2756 }
2757
2758
2759 if (!pvt->pci_sad0 || !pvt->pci_ha0 || !pvt->pci_sad1 ||
2760 !pvt->pci_ras || !pvt->pci_ta || !pvt->info.pci_vtd)
2761 goto enodev;
2762
2763 if (saw_chan_mask != 0x0f &&
2764 saw_chan_mask != 0x33 &&
2765 saw_chan_mask != 0xff)
2766 goto enodev;
2767 return 0;
2768
2769enodev:
2770 sbridge_printk(KERN_ERR, "Some needed devices are missing\n");
2771 return -ENODEV;
2772}
2773
2774static int knl_mci_bind_devs(struct mem_ctl_info *mci,
2775 struct sbridge_dev *sbridge_dev)
2776{
2777 struct sbridge_pvt *pvt = mci->pvt_info;
2778 struct pci_dev *pdev;
2779 int dev, func;
2780
2781 int i;
2782 int devidx;
2783
2784 for (i = 0; i < sbridge_dev->n_devs; i++) {
2785 pdev = sbridge_dev->pdev[i];
2786 if (!pdev)
2787 continue;
2788
2789
2790 dev = (pdev->devfn >> 3) & 0x1f;
2791 func = pdev->devfn & 0x7;
2792
2793 switch (pdev->device) {
2794 case PCI_DEVICE_ID_INTEL_KNL_IMC_MC:
2795 if (dev == 8)
2796 pvt->knl.pci_mc0 = pdev;
2797 else if (dev == 9)
2798 pvt->knl.pci_mc1 = pdev;
2799 else {
2800 sbridge_printk(KERN_ERR,
2801 "Memory controller in unexpected place! (dev %d, fn %d)\n",
2802 dev, func);
2803 continue;
2804 }
2805 break;
2806
2807 case PCI_DEVICE_ID_INTEL_KNL_IMC_SAD0:
2808 pvt->pci_sad0 = pdev;
2809 break;
2810
2811 case PCI_DEVICE_ID_INTEL_KNL_IMC_SAD1:
2812 pvt->pci_sad1 = pdev;
2813 break;
2814
2815 case PCI_DEVICE_ID_INTEL_KNL_IMC_CHA:
2816
2817
2818
2819 devidx = ((dev-14)*8)+func;
2820
2821 if (devidx < 0 || devidx >= KNL_MAX_CHAS) {
2822 sbridge_printk(KERN_ERR,
2823 "Caching and Home Agent in unexpected place! (dev %d, fn %d)\n",
2824 dev, func);
2825 continue;
2826 }
2827
2828 WARN_ON(pvt->knl.pci_cha[devidx] != NULL);
2829
2830 pvt->knl.pci_cha[devidx] = pdev;
2831 break;
2832
2833 case PCI_DEVICE_ID_INTEL_KNL_IMC_CHANNEL:
2834 devidx = -1;
2835
2836
2837
2838
2839
2840
2841 if (dev == 9)
2842 devidx = func-2;
2843 else if (dev == 8)
2844 devidx = 3 + (func-2);
2845
2846 if (devidx < 0 || devidx >= KNL_MAX_CHANNELS) {
2847 sbridge_printk(KERN_ERR,
2848 "DRAM Channel Registers in unexpected place! (dev %d, fn %d)\n",
2849 dev, func);
2850 continue;
2851 }
2852
2853 WARN_ON(pvt->knl.pci_channel[devidx] != NULL);
2854 pvt->knl.pci_channel[devidx] = pdev;
2855 break;
2856
2857 case PCI_DEVICE_ID_INTEL_KNL_IMC_TOLHM:
2858 pvt->knl.pci_mc_info = pdev;
2859 break;
2860
2861 case PCI_DEVICE_ID_INTEL_KNL_IMC_TA:
2862 pvt->pci_ta = pdev;
2863 break;
2864
2865 default:
2866 sbridge_printk(KERN_ERR, "Unexpected device %d\n",
2867 pdev->device);
2868 break;
2869 }
2870 }
2871
2872 if (!pvt->knl.pci_mc0 || !pvt->knl.pci_mc1 ||
2873 !pvt->pci_sad0 || !pvt->pci_sad1 ||
2874 !pvt->pci_ta) {
2875 goto enodev;
2876 }
2877
2878 for (i = 0; i < KNL_MAX_CHANNELS; i++) {
2879 if (!pvt->knl.pci_channel[i]) {
2880 sbridge_printk(KERN_ERR, "Missing channel %d\n", i);
2881 goto enodev;
2882 }
2883 }
2884
2885 for (i = 0; i < KNL_MAX_CHAS; i++) {
2886 if (!pvt->knl.pci_cha[i]) {
2887 sbridge_printk(KERN_ERR, "Missing CHA %d\n", i);
2888 goto enodev;
2889 }
2890 }
2891
2892 return 0;
2893
2894enodev:
2895 sbridge_printk(KERN_ERR, "Some needed devices are missing\n");
2896 return -ENODEV;
2897}
2898
2899
2900
2901
2902
2903
2904
2905
2906
2907
2908
2909static void sbridge_mce_output_error(struct mem_ctl_info *mci,
2910 const struct mce *m)
2911{
2912 struct mem_ctl_info *new_mci;
2913 struct sbridge_pvt *pvt = mci->pvt_info;
2914 enum hw_event_mc_err_type tp_event;
2915 char *type, *optype, msg[256];
2916 bool ripv = GET_BITFIELD(m->mcgstatus, 0, 0);
2917 bool overflow = GET_BITFIELD(m->status, 62, 62);
2918 bool uncorrected_error = GET_BITFIELD(m->status, 61, 61);
2919 bool recoverable;
2920 u32 core_err_cnt = GET_BITFIELD(m->status, 38, 52);
2921 u32 mscod = GET_BITFIELD(m->status, 16, 31);
2922 u32 errcode = GET_BITFIELD(m->status, 0, 15);
2923 u32 channel = GET_BITFIELD(m->status, 0, 3);
2924 u32 optypenum = GET_BITFIELD(m->status, 4, 6);
2925 long channel_mask, first_channel;
2926 u8 rank, socket, ha;
2927 int rc, dimm;
2928 char *area_type = NULL;
2929
2930 if (pvt->info.type != SANDY_BRIDGE)
2931 recoverable = true;
2932 else
2933 recoverable = GET_BITFIELD(m->status, 56, 56);
2934
2935 if (uncorrected_error) {
2936 if (ripv) {
2937 type = "FATAL";
2938 tp_event = HW_EVENT_ERR_FATAL;
2939 } else {
2940 type = "NON_FATAL";
2941 tp_event = HW_EVENT_ERR_UNCORRECTED;
2942 }
2943 } else {
2944 type = "CORRECTED";
2945 tp_event = HW_EVENT_ERR_CORRECTED;
2946 }
2947
2948
2949
2950
2951
2952
2953
2954
2955
2956
2957
2958
2959 if (! ((errcode & 0xef80) == 0x80)) {
2960 optype = "Can't parse: it is not a mem";
2961 } else {
2962 switch (optypenum) {
2963 case 0:
2964 optype = "generic undef request error";
2965 break;
2966 case 1:
2967 optype = "memory read error";
2968 break;
2969 case 2:
2970 optype = "memory write error";
2971 break;
2972 case 3:
2973 optype = "addr/cmd error";
2974 break;
2975 case 4:
2976 optype = "memory scrubbing error";
2977 break;
2978 default:
2979 optype = "reserved";
2980 break;
2981 }
2982 }
2983
2984
2985 if (!GET_BITFIELD(m->status, 58, 58))
2986 return;
2987
2988 if (pvt->info.type == KNIGHTS_LANDING) {
2989 if (channel == 14) {
2990 edac_dbg(0, "%s%s err_code:%04x:%04x EDRAM bank %d\n",
2991 overflow ? " OVERFLOW" : "",
2992 (uncorrected_error && recoverable)
2993 ? " recoverable" : "",
2994 mscod, errcode,
2995 m->bank);
2996 } else {
2997 char A = *("A");
2998
2999
3000
3001
3002
3003
3004
3005 channel = knl_channel_remap(m->bank == 16, channel);
3006 channel_mask = 1 << channel;
3007
3008 snprintf(msg, sizeof(msg),
3009 "%s%s err_code:%04x:%04x channel:%d (DIMM_%c)",
3010 overflow ? " OVERFLOW" : "",
3011 (uncorrected_error && recoverable)
3012 ? " recoverable" : " ",
3013 mscod, errcode, channel, A + channel);
3014 edac_mc_handle_error(tp_event, mci, core_err_cnt,
3015 m->addr >> PAGE_SHIFT, m->addr & ~PAGE_MASK, 0,
3016 channel, 0, -1,
3017 optype, msg);
3018 }
3019 return;
3020 } else {
3021 rc = get_memory_error_data(mci, m->addr, &socket, &ha,
3022 &channel_mask, &rank, &area_type, msg);
3023 }
3024
3025 if (rc < 0)
3026 goto err_parsing;
3027 new_mci = get_mci_for_node_id(socket);
3028 if (!new_mci) {
3029 strcpy(msg, "Error: socket got corrupted!");
3030 goto err_parsing;
3031 }
3032 mci = new_mci;
3033 pvt = mci->pvt_info;
3034
3035 first_channel = find_first_bit(&channel_mask, NUM_CHANNELS);
3036
3037 if (rank < 4)
3038 dimm = 0;
3039 else if (rank < 8)
3040 dimm = 1;
3041 else
3042 dimm = 2;
3043
3044
3045
3046
3047
3048
3049
3050
3051 if (!pvt->is_lockstep && !pvt->is_mirrored && !pvt->is_close_pg)
3052 channel = first_channel;
3053
3054 snprintf(msg, sizeof(msg),
3055 "%s%s area:%s err_code:%04x:%04x socket:%d ha:%d channel_mask:%ld rank:%d",
3056 overflow ? " OVERFLOW" : "",
3057 (uncorrected_error && recoverable) ? " recoverable" : "",
3058 area_type,
3059 mscod, errcode,
3060 socket, ha,
3061 channel_mask,
3062 rank);
3063
3064 edac_dbg(0, "%s\n", msg);
3065
3066
3067
3068 if (channel == CHANNEL_UNSPECIFIED)
3069 channel = -1;
3070
3071
3072 edac_mc_handle_error(tp_event, mci, core_err_cnt,
3073 m->addr >> PAGE_SHIFT, m->addr & ~PAGE_MASK, 0,
3074 4*ha+channel, dimm, -1,
3075 optype, msg);
3076 return;
3077err_parsing:
3078 edac_mc_handle_error(tp_event, mci, core_err_cnt, 0, 0, 0,
3079 -1, -1, -1,
3080 msg, "");
3081
3082}
3083
3084
3085
3086
3087
3088static int sbridge_mce_check_error(struct notifier_block *nb, unsigned long val,
3089 void *data)
3090{
3091 struct mce *mce = (struct mce *)data;
3092 struct mem_ctl_info *mci;
3093 struct sbridge_pvt *pvt;
3094 char *type;
3095
3096 if (get_edac_report_status() == EDAC_REPORTING_DISABLED)
3097 return NOTIFY_DONE;
3098
3099 mci = get_mci_for_node_id(mce->socketid);
3100 if (!mci)
3101 return NOTIFY_DONE;
3102 pvt = mci->pvt_info;
3103
3104
3105
3106
3107
3108
3109
3110 if ((mce->status & 0xefff) >> 7 != 1)
3111 return NOTIFY_DONE;
3112
3113 if (mce->mcgstatus & MCG_STATUS_MCIP)
3114 type = "Exception";
3115 else
3116 type = "Event";
3117
3118 sbridge_mc_printk(mci, KERN_DEBUG, "HANDLING MCE MEMORY ERROR\n");
3119
3120 sbridge_mc_printk(mci, KERN_DEBUG, "CPU %d: Machine Check %s: %Lx "
3121 "Bank %d: %016Lx\n", mce->extcpu, type,
3122 mce->mcgstatus, mce->bank, mce->status);
3123 sbridge_mc_printk(mci, KERN_DEBUG, "TSC %llx ", mce->tsc);
3124 sbridge_mc_printk(mci, KERN_DEBUG, "ADDR %llx ", mce->addr);
3125 sbridge_mc_printk(mci, KERN_DEBUG, "MISC %llx ", mce->misc);
3126
3127 sbridge_mc_printk(mci, KERN_DEBUG, "PROCESSOR %u:%x TIME %llu SOCKET "
3128 "%u APIC %x\n", mce->cpuvendor, mce->cpuid,
3129 mce->time, mce->socketid, mce->apicid);
3130
3131 sbridge_mce_output_error(mci, mce);
3132
3133
3134 return NOTIFY_STOP;
3135}
3136
3137static struct notifier_block sbridge_mce_dec = {
3138 .notifier_call = sbridge_mce_check_error,
3139};
3140
3141
3142
3143
3144
3145static void sbridge_unregister_mci(struct sbridge_dev *sbridge_dev)
3146{
3147 struct mem_ctl_info *mci = sbridge_dev->mci;
3148 struct sbridge_pvt *pvt;
3149
3150 if (unlikely(!mci || !mci->pvt_info)) {
3151 edac_dbg(0, "MC: dev = %p\n", &sbridge_dev->pdev[0]->dev);
3152
3153 sbridge_printk(KERN_ERR, "Couldn't find mci handler\n");
3154 return;
3155 }
3156
3157 pvt = mci->pvt_info;
3158
3159 edac_dbg(0, "MC: mci = %p, dev = %p\n",
3160 mci, &sbridge_dev->pdev[0]->dev);
3161
3162
3163 edac_mc_del_mc(mci->pdev);
3164
3165 edac_dbg(1, "%s: free mci struct\n", mci->ctl_name);
3166 kfree(mci->ctl_name);
3167 edac_mc_free(mci);
3168 sbridge_dev->mci = NULL;
3169}
3170
3171static int sbridge_register_mci(struct sbridge_dev *sbridge_dev, enum type type)
3172{
3173 struct mem_ctl_info *mci;
3174 struct edac_mc_layer layers[2];
3175 struct sbridge_pvt *pvt;
3176 struct pci_dev *pdev = sbridge_dev->pdev[0];
3177 int rc;
3178
3179
3180 rc = check_if_ecc_is_active(sbridge_dev->bus, type);
3181 if (unlikely(rc < 0))
3182 return rc;
3183
3184
3185 layers[0].type = EDAC_MC_LAYER_CHANNEL;
3186 layers[0].size = type == KNIGHTS_LANDING ?
3187 KNL_MAX_CHANNELS : NUM_CHANNELS;
3188 layers[0].is_virt_csrow = false;
3189 layers[1].type = EDAC_MC_LAYER_SLOT;
3190 layers[1].size = type == KNIGHTS_LANDING ? 1 : MAX_DIMMS;
3191 layers[1].is_virt_csrow = true;
3192 mci = edac_mc_alloc(sbridge_dev->mc, ARRAY_SIZE(layers), layers,
3193 sizeof(*pvt));
3194
3195 if (unlikely(!mci))
3196 return -ENOMEM;
3197
3198 edac_dbg(0, "MC: mci = %p, dev = %p\n",
3199 mci, &pdev->dev);
3200
3201 pvt = mci->pvt_info;
3202 memset(pvt, 0, sizeof(*pvt));
3203
3204
3205 pvt->sbridge_dev = sbridge_dev;
3206 sbridge_dev->mci = mci;
3207
3208 mci->mtype_cap = type == KNIGHTS_LANDING ?
3209 MEM_FLAG_DDR4 : MEM_FLAG_DDR3;
3210 mci->edac_ctl_cap = EDAC_FLAG_NONE;
3211 mci->edac_cap = EDAC_FLAG_NONE;
3212 mci->mod_name = "sbridge_edac.c";
3213 mci->mod_ver = SBRIDGE_REVISION;
3214 mci->dev_name = pci_name(pdev);
3215 mci->ctl_page_to_phys = NULL;
3216
3217 pvt->info.type = type;
3218 switch (type) {
3219 case IVY_BRIDGE:
3220 pvt->info.rankcfgr = IB_RANK_CFG_A;
3221 pvt->info.get_tolm = ibridge_get_tolm;
3222 pvt->info.get_tohm = ibridge_get_tohm;
3223 pvt->info.dram_rule = ibridge_dram_rule;
3224 pvt->info.get_memory_type = get_memory_type;
3225 pvt->info.get_node_id = get_node_id;
3226 pvt->info.rir_limit = rir_limit;
3227 pvt->info.sad_limit = sad_limit;
3228 pvt->info.interleave_mode = interleave_mode;
3229 pvt->info.show_interleave_mode = show_interleave_mode;
3230 pvt->info.dram_attr = dram_attr;
3231 pvt->info.max_sad = ARRAY_SIZE(ibridge_dram_rule);
3232 pvt->info.interleave_list = ibridge_interleave_list;
3233 pvt->info.max_interleave = ARRAY_SIZE(ibridge_interleave_list);
3234 pvt->info.interleave_pkg = ibridge_interleave_pkg;
3235 pvt->info.get_width = ibridge_get_width;
3236 mci->ctl_name = kasprintf(GFP_KERNEL, "Ivy Bridge Socket#%d", mci->mc_idx);
3237
3238
3239 rc = ibridge_mci_bind_devs(mci, sbridge_dev);
3240 if (unlikely(rc < 0))
3241 goto fail0;
3242 break;
3243 case SANDY_BRIDGE:
3244 pvt->info.rankcfgr = SB_RANK_CFG_A;
3245 pvt->info.get_tolm = sbridge_get_tolm;
3246 pvt->info.get_tohm = sbridge_get_tohm;
3247 pvt->info.dram_rule = sbridge_dram_rule;
3248 pvt->info.get_memory_type = get_memory_type;
3249 pvt->info.get_node_id = get_node_id;
3250 pvt->info.rir_limit = rir_limit;
3251 pvt->info.sad_limit = sad_limit;
3252 pvt->info.interleave_mode = interleave_mode;
3253 pvt->info.show_interleave_mode = show_interleave_mode;
3254 pvt->info.dram_attr = dram_attr;
3255 pvt->info.max_sad = ARRAY_SIZE(sbridge_dram_rule);
3256 pvt->info.interleave_list = sbridge_interleave_list;
3257 pvt->info.max_interleave = ARRAY_SIZE(sbridge_interleave_list);
3258 pvt->info.interleave_pkg = sbridge_interleave_pkg;
3259 pvt->info.get_width = sbridge_get_width;
3260 mci->ctl_name = kasprintf(GFP_KERNEL, "Sandy Bridge Socket#%d", mci->mc_idx);
3261
3262
3263 rc = sbridge_mci_bind_devs(mci, sbridge_dev);
3264 if (unlikely(rc < 0))
3265 goto fail0;
3266 break;
3267 case HASWELL:
3268
3269 pvt->info.get_tolm = haswell_get_tolm;
3270 pvt->info.get_tohm = haswell_get_tohm;
3271 pvt->info.dram_rule = ibridge_dram_rule;
3272 pvt->info.get_memory_type = haswell_get_memory_type;
3273 pvt->info.get_node_id = haswell_get_node_id;
3274 pvt->info.rir_limit = haswell_rir_limit;
3275 pvt->info.sad_limit = sad_limit;
3276 pvt->info.interleave_mode = interleave_mode;
3277 pvt->info.show_interleave_mode = show_interleave_mode;
3278 pvt->info.dram_attr = dram_attr;
3279 pvt->info.max_sad = ARRAY_SIZE(ibridge_dram_rule);
3280 pvt->info.interleave_list = ibridge_interleave_list;
3281 pvt->info.max_interleave = ARRAY_SIZE(ibridge_interleave_list);
3282 pvt->info.interleave_pkg = ibridge_interleave_pkg;
3283 pvt->info.get_width = ibridge_get_width;
3284 mci->ctl_name = kasprintf(GFP_KERNEL, "Haswell Socket#%d", mci->mc_idx);
3285
3286
3287 rc = haswell_mci_bind_devs(mci, sbridge_dev);
3288 if (unlikely(rc < 0))
3289 goto fail0;
3290 break;
3291 case BROADWELL:
3292
3293 pvt->info.get_tolm = haswell_get_tolm;
3294 pvt->info.get_tohm = haswell_get_tohm;
3295 pvt->info.dram_rule = ibridge_dram_rule;
3296 pvt->info.get_memory_type = haswell_get_memory_type;
3297 pvt->info.get_node_id = haswell_get_node_id;
3298 pvt->info.rir_limit = haswell_rir_limit;
3299 pvt->info.sad_limit = sad_limit;
3300 pvt->info.interleave_mode = interleave_mode;
3301 pvt->info.show_interleave_mode = show_interleave_mode;
3302 pvt->info.dram_attr = dram_attr;
3303 pvt->info.max_sad = ARRAY_SIZE(ibridge_dram_rule);
3304 pvt->info.interleave_list = ibridge_interleave_list;
3305 pvt->info.max_interleave = ARRAY_SIZE(ibridge_interleave_list);
3306 pvt->info.interleave_pkg = ibridge_interleave_pkg;
3307 pvt->info.get_width = broadwell_get_width;
3308 mci->ctl_name = kasprintf(GFP_KERNEL, "Broadwell Socket#%d", mci->mc_idx);
3309
3310
3311 rc = broadwell_mci_bind_devs(mci, sbridge_dev);
3312 if (unlikely(rc < 0))
3313 goto fail0;
3314 break;
3315 case KNIGHTS_LANDING:
3316
3317 pvt->info.get_tolm = knl_get_tolm;
3318 pvt->info.get_tohm = knl_get_tohm;
3319 pvt->info.dram_rule = knl_dram_rule;
3320 pvt->info.get_memory_type = knl_get_memory_type;
3321 pvt->info.get_node_id = knl_get_node_id;
3322 pvt->info.rir_limit = NULL;
3323 pvt->info.sad_limit = knl_sad_limit;
3324 pvt->info.interleave_mode = knl_interleave_mode;
3325 pvt->info.show_interleave_mode = knl_show_interleave_mode;
3326 pvt->info.dram_attr = dram_attr_knl;
3327 pvt->info.max_sad = ARRAY_SIZE(knl_dram_rule);
3328 pvt->info.interleave_list = knl_interleave_list;
3329 pvt->info.max_interleave = ARRAY_SIZE(knl_interleave_list);
3330 pvt->info.interleave_pkg = ibridge_interleave_pkg;
3331 pvt->info.get_width = knl_get_width;
3332 mci->ctl_name = kasprintf(GFP_KERNEL,
3333 "Knights Landing Socket#%d", mci->mc_idx);
3334
3335 rc = knl_mci_bind_devs(mci, sbridge_dev);
3336 if (unlikely(rc < 0))
3337 goto fail0;
3338 break;
3339 }
3340
3341
3342 get_dimm_config(mci);
3343 get_memory_layout(mci);
3344
3345
3346 mci->pdev = &pdev->dev;
3347
3348
3349 if (unlikely(edac_mc_add_mc(mci))) {
3350 edac_dbg(0, "MC: failed edac_mc_add_mc()\n");
3351 rc = -EINVAL;
3352 goto fail0;
3353 }
3354
3355 return 0;
3356
3357fail0:
3358 kfree(mci->ctl_name);
3359 edac_mc_free(mci);
3360 sbridge_dev->mci = NULL;
3361 return rc;
3362}
3363
3364#define ICPU(model, table) \
3365 { X86_VENDOR_INTEL, 6, model, 0, (unsigned long)&table }
3366
3367static const struct x86_cpu_id sbridge_cpuids[] = {
3368 ICPU(0x2d, pci_dev_descr_sbridge_table),
3369 ICPU(0x3e, pci_dev_descr_ibridge_table),
3370 ICPU(0x3f, pci_dev_descr_haswell_table),
3371 ICPU(0x4f, pci_dev_descr_broadwell_table),
3372 ICPU(0x56, pci_dev_descr_broadwell_table),
3373 ICPU(0x57, pci_dev_descr_knl_table),
3374 { }
3375};
3376MODULE_DEVICE_TABLE(x86cpu, sbridge_cpuids);
3377
3378
3379
3380
3381
3382
3383
3384
3385
3386static int sbridge_probe(const struct x86_cpu_id *id)
3387{
3388 int rc = -ENODEV;
3389 u8 mc, num_mc = 0;
3390 struct sbridge_dev *sbridge_dev;
3391 struct pci_id_table *ptable = (struct pci_id_table *)id->driver_data;
3392
3393
3394 rc = sbridge_get_all_devices(&num_mc, ptable);
3395
3396 if (unlikely(rc < 0)) {
3397 edac_dbg(0, "couldn't get all devices\n");
3398 goto fail0;
3399 }
3400
3401 mc = 0;
3402
3403 list_for_each_entry(sbridge_dev, &sbridge_edac_list, list) {
3404 edac_dbg(0, "Registering MC#%d (%d of %d)\n",
3405 mc, mc + 1, num_mc);
3406
3407 sbridge_dev->mc = mc++;
3408 rc = sbridge_register_mci(sbridge_dev, ptable->type);
3409 if (unlikely(rc < 0))
3410 goto fail1;
3411 }
3412
3413 sbridge_printk(KERN_INFO, "%s\n", SBRIDGE_REVISION);
3414
3415 return 0;
3416
3417fail1:
3418 list_for_each_entry(sbridge_dev, &sbridge_edac_list, list)
3419 sbridge_unregister_mci(sbridge_dev);
3420
3421 sbridge_put_all_devices();
3422fail0:
3423 return rc;
3424}
3425
3426
3427
3428
3429
3430static void sbridge_remove(void)
3431{
3432 struct sbridge_dev *sbridge_dev;
3433
3434 edac_dbg(0, "\n");
3435
3436 list_for_each_entry(sbridge_dev, &sbridge_edac_list, list)
3437 sbridge_unregister_mci(sbridge_dev);
3438
3439
3440 sbridge_put_all_devices();
3441}
3442
3443
3444
3445
3446
3447static int __init sbridge_init(void)
3448{
3449 const struct x86_cpu_id *id;
3450 int rc;
3451
3452 edac_dbg(2, "\n");
3453
3454 id = x86_match_cpu(sbridge_cpuids);
3455 if (!id)
3456 return -ENODEV;
3457
3458
3459 opstate_init();
3460
3461 rc = sbridge_probe(id);
3462
3463 if (rc >= 0) {
3464 mce_register_decode_chain(&sbridge_mce_dec);
3465 if (get_edac_report_status() == EDAC_REPORTING_DISABLED)
3466 sbridge_printk(KERN_WARNING, "Loading driver, error reporting disabled.\n");
3467 return 0;
3468 }
3469
3470 sbridge_printk(KERN_ERR, "Failed to register device with error %d.\n",
3471 rc);
3472
3473 return rc;
3474}
3475
3476
3477
3478
3479
3480static void __exit sbridge_exit(void)
3481{
3482 edac_dbg(2, "\n");
3483 sbridge_remove();
3484 mce_unregister_decode_chain(&sbridge_mce_dec);
3485}
3486
3487module_init(sbridge_init);
3488module_exit(sbridge_exit);
3489
3490module_param(edac_op_state, int, 0444);
3491MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");
3492
3493MODULE_LICENSE("GPL");
3494MODULE_AUTHOR("Mauro Carvalho Chehab");
3495MODULE_AUTHOR("Red Hat Inc. (http://www.redhat.com)");
3496MODULE_DESCRIPTION("MC Driver for Intel Sandy Bridge and Ivy Bridge memory controllers - "
3497 SBRIDGE_REVISION);
3498