1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27#include <linux/module.h>
28#include <linux/init.h>
29#include <linux/pci.h>
30#include <linux/pci_ids.h>
31#include <linux/slab.h>
32#include <linux/delay.h>
33#include <linux/edac.h>
34#include <linux/mmzone.h>
35#include <linux/smp.h>
36#include <linux/bitmap.h>
37#include <linux/math64.h>
38#include <linux/mod_devicetable.h>
39#include <asm/cpu_device_id.h>
40#include <asm/intel-family.h>
41#include <asm/processor.h>
42#include <asm/mce.h>
43
44#include "edac_core.h"
45#include "edac_module.h"
46#include "pnd2_edac.h"
47
48#define APL_NUM_CHANNELS 4
49#define DNV_NUM_CHANNELS 2
50#define DNV_MAX_DIMMS 2
51
52enum type {
53 APL,
54 DNV,
55};
56
57struct dram_addr {
58 int chan;
59 int dimm;
60 int rank;
61 int bank;
62 int row;
63 int col;
64};
65
66struct pnd2_pvt {
67 int dimm_geom[APL_NUM_CHANNELS];
68 u64 tolm, tohm;
69};
70
71
72
73
74
75
76
77
78
79
80static struct region {
81 u64 base;
82 u64 limit;
83 u8 enabled;
84} mot, as0, as1, as2;
85
86static struct dunit_ops {
87 char *name;
88 enum type type;
89 int pmiaddr_shift;
90 int pmiidx_shift;
91 int channels;
92 int dimms_per_channel;
93 int (*rd_reg)(int port, int off, int op, void *data, size_t sz, char *name);
94 int (*get_registers)(void);
95 int (*check_ecc)(void);
96 void (*mk_region)(char *name, struct region *rp, void *asym);
97 void (*get_dimm_config)(struct mem_ctl_info *mci);
98 int (*pmi2mem)(struct mem_ctl_info *mci, u64 pmiaddr, u32 pmiidx,
99 struct dram_addr *daddr, char *msg);
100} *ops;
101
102static struct mem_ctl_info *pnd2_mci;
103
104#define PND2_MSG_SIZE 256
105
106
107#define pnd2_printk(level, fmt, arg...) \
108 edac_printk(level, "pnd2", fmt, ##arg)
109
110#define pnd2_mc_printk(mci, level, fmt, arg...) \
111 edac_mc_chipset_printk(mci, level, "pnd2", fmt, ##arg)
112
113#define MOT_CHAN_INTLV_BIT_1SLC_2CH 12
114#define MOT_CHAN_INTLV_BIT_2SLC_2CH 13
115#define SELECTOR_DISABLED (-1)
116#define _4GB (1ul << 32)
117
118#define PMI_ADDRESS_WIDTH 31
119#define PND_MAX_PHYS_BIT 39
120
121#define APL_ASYMSHIFT 28
122#define DNV_ASYMSHIFT 31
123#define CH_HASH_MASK_LSB 6
124#define SLICE_HASH_MASK_LSB 6
125#define MOT_SLC_INTLV_BIT 12
126#define LOG2_PMI_ADDR_GRANULARITY 5
127#define MOT_SHIFT 24
128
129#define GET_BITFIELD(v, lo, hi) (((v) & GENMASK_ULL(hi, lo)) >> (lo))
130#define U64_LSHIFT(val, s) ((u64)(val) << (s))
131
132
133
134
135
136
137static struct pci_bus *p2sb_bus;
138#define P2SB_DEVFN PCI_DEVFN(0xd, 0)
139#define P2SB_ADDR_OFF 0xd0
140#define P2SB_DATA_OFF 0xd4
141#define P2SB_STAT_OFF 0xd8
142#define P2SB_ROUT_OFF 0xda
143#define P2SB_EADD_OFF 0xdc
144#define P2SB_HIDE_OFF 0xe1
145
146#define P2SB_BUSY 1
147
148#define P2SB_READ(size, off, ptr) \
149 pci_bus_read_config_##size(p2sb_bus, P2SB_DEVFN, off, ptr)
150#define P2SB_WRITE(size, off, val) \
151 pci_bus_write_config_##size(p2sb_bus, P2SB_DEVFN, off, val)
152
153static bool p2sb_is_busy(u16 *status)
154{
155 P2SB_READ(word, P2SB_STAT_OFF, status);
156
157 return !!(*status & P2SB_BUSY);
158}
159
160static int _apl_rd_reg(int port, int off, int op, u32 *data)
161{
162 int retries = 0xff, ret;
163 u16 status;
164 u8 hidden;
165
166
167 P2SB_READ(byte, P2SB_HIDE_OFF, &hidden);
168 if (hidden)
169 P2SB_WRITE(byte, P2SB_HIDE_OFF, 0);
170
171 if (p2sb_is_busy(&status)) {
172 ret = -EAGAIN;
173 goto out;
174 }
175
176 P2SB_WRITE(dword, P2SB_ADDR_OFF, (port << 24) | off);
177 P2SB_WRITE(dword, P2SB_DATA_OFF, 0);
178 P2SB_WRITE(dword, P2SB_EADD_OFF, 0);
179 P2SB_WRITE(word, P2SB_ROUT_OFF, 0);
180 P2SB_WRITE(word, P2SB_STAT_OFF, (op << 8) | P2SB_BUSY);
181
182 while (p2sb_is_busy(&status)) {
183 if (retries-- == 0) {
184 ret = -EBUSY;
185 goto out;
186 }
187 }
188
189 P2SB_READ(dword, P2SB_DATA_OFF, data);
190 ret = (status >> 1) & 0x3;
191out:
192
193 if (hidden)
194 P2SB_WRITE(byte, P2SB_HIDE_OFF, hidden);
195
196 return ret;
197}
198
199static int apl_rd_reg(int port, int off, int op, void *data, size_t sz, char *name)
200{
201 int ret = 0;
202
203 edac_dbg(2, "Read %s port=%x off=%x op=%x\n", name, port, off, op);
204 switch (sz) {
205 case 8:
206 ret = _apl_rd_reg(port, off + 4, op, (u32 *)(data + 4));
207
208 case 4:
209 ret |= _apl_rd_reg(port, off, op, (u32 *)data);
210 pnd2_printk(KERN_DEBUG, "%s=%x%08x ret=%d\n", name,
211 sz == 8 ? *((u32 *)(data + 4)) : 0, *((u32 *)data), ret);
212 break;
213 }
214
215 return ret;
216}
217
218static u64 get_mem_ctrl_hub_base_addr(void)
219{
220 struct b_cr_mchbar_lo_pci lo;
221 struct b_cr_mchbar_hi_pci hi;
222 struct pci_dev *pdev;
223
224 pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x1980, NULL);
225 if (pdev) {
226 pci_read_config_dword(pdev, 0x48, (u32 *)&lo);
227 pci_read_config_dword(pdev, 0x4c, (u32 *)&hi);
228 pci_dev_put(pdev);
229 } else {
230 return 0;
231 }
232
233 if (!lo.enable) {
234 edac_dbg(2, "MMIO via memory controller hub base address is disabled!\n");
235 return 0;
236 }
237
238 return U64_LSHIFT(hi.base, 32) | U64_LSHIFT(lo.base, 15);
239}
240
241static u64 get_sideband_reg_base_addr(void)
242{
243 struct pci_dev *pdev;
244 u32 hi, lo;
245 u8 hidden;
246
247 pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x19dd, NULL);
248 if (pdev) {
249
250 pci_read_config_byte(pdev, 0xe1, &hidden);
251 if (hidden)
252 pci_write_config_byte(pdev, 0xe1, 0);
253
254 pci_read_config_dword(pdev, 0x10, &lo);
255 pci_read_config_dword(pdev, 0x14, &hi);
256 lo &= 0xfffffff0;
257
258
259 if (hidden)
260 pci_write_config_byte(pdev, 0xe1, hidden);
261
262 pci_dev_put(pdev);
263 return (U64_LSHIFT(hi, 32) | U64_LSHIFT(lo, 0));
264 } else {
265 return 0xfd000000;
266 }
267}
268
269static int dnv_rd_reg(int port, int off, int op, void *data, size_t sz, char *name)
270{
271 struct pci_dev *pdev;
272 char *base;
273 u64 addr;
274
275 if (op == 4) {
276 pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x1980, NULL);
277 if (!pdev)
278 return -ENODEV;
279
280 pci_read_config_dword(pdev, off, data);
281 pci_dev_put(pdev);
282 } else {
283
284 if (op == 0 && port == 0x4c) {
285 addr = get_mem_ctrl_hub_base_addr();
286 if (!addr)
287 return -ENODEV;
288 } else {
289
290 addr = get_sideband_reg_base_addr();
291 if (!addr)
292 return -ENODEV;
293 addr += (port << 16);
294 }
295
296 base = ioremap((resource_size_t)addr, 0x10000);
297 if (!base)
298 return -ENODEV;
299
300 if (sz == 8)
301 *(u32 *)(data + 4) = *(u32 *)(base + off + 4);
302 *(u32 *)data = *(u32 *)(base + off);
303
304 iounmap(base);
305 }
306
307 edac_dbg(2, "Read %s=%.8x_%.8x\n", name,
308 (sz == 8) ? *(u32 *)(data + 4) : 0, *(u32 *)data);
309
310 return 0;
311}
312
313#define RD_REGP(regp, regname, port) \
314 ops->rd_reg(port, \
315 regname##_offset, \
316 regname##_r_opcode, \
317 regp, sizeof(struct regname), \
318 #regname)
319
320#define RD_REG(regp, regname) \
321 ops->rd_reg(regname ## _port, \
322 regname##_offset, \
323 regname##_r_opcode, \
324 regp, sizeof(struct regname), \
325 #regname)
326
327static u64 top_lm, top_hm;
328static bool two_slices;
329static bool two_channels;
330
331static u8 sym_chan_mask;
332static u8 asym_chan_mask;
333static u8 chan_mask;
334
335static int slice_selector = -1;
336static int chan_selector = -1;
337static u64 slice_hash_mask;
338static u64 chan_hash_mask;
339
340static void mk_region(char *name, struct region *rp, u64 base, u64 limit)
341{
342 rp->enabled = 1;
343 rp->base = base;
344 rp->limit = limit;
345 edac_dbg(2, "Region:%s [%llx, %llx]\n", name, base, limit);
346}
347
348static void mk_region_mask(char *name, struct region *rp, u64 base, u64 mask)
349{
350 if (mask == 0) {
351 pr_info(FW_BUG "MOT mask cannot be zero\n");
352 return;
353 }
354 if (mask != GENMASK_ULL(PND_MAX_PHYS_BIT, __ffs(mask))) {
355 pr_info(FW_BUG "MOT mask not power of two\n");
356 return;
357 }
358 if (base & ~mask) {
359 pr_info(FW_BUG "MOT region base/mask alignment error\n");
360 return;
361 }
362 rp->base = base;
363 rp->limit = (base | ~mask) & GENMASK_ULL(PND_MAX_PHYS_BIT, 0);
364 rp->enabled = 1;
365 edac_dbg(2, "Region:%s [%llx, %llx]\n", name, base, rp->limit);
366}
367
368static bool in_region(struct region *rp, u64 addr)
369{
370 if (!rp->enabled)
371 return false;
372
373 return rp->base <= addr && addr <= rp->limit;
374}
375
376static int gen_sym_mask(struct b_cr_slice_channel_hash *p)
377{
378 int mask = 0;
379
380 if (!p->slice_0_mem_disabled)
381 mask |= p->sym_slice0_channel_enabled;
382
383 if (!p->slice_1_disabled)
384 mask |= p->sym_slice1_channel_enabled << 2;
385
386 if (p->ch_1_disabled || p->enable_pmi_dual_data_mode)
387 mask &= 0x5;
388
389 return mask;
390}
391
392static int gen_asym_mask(struct b_cr_slice_channel_hash *p,
393 struct b_cr_asym_mem_region0_mchbar *as0,
394 struct b_cr_asym_mem_region1_mchbar *as1,
395 struct b_cr_asym_2way_mem_region_mchbar *as2way)
396{
397 const int intlv[] = { 0x5, 0xA, 0x3, 0xC };
398 int mask = 0;
399
400 if (as2way->asym_2way_interleave_enable)
401 mask = intlv[as2way->asym_2way_intlv_mode];
402 if (as0->slice0_asym_enable)
403 mask |= (1 << as0->slice0_asym_channel_select);
404 if (as1->slice1_asym_enable)
405 mask |= (4 << as1->slice1_asym_channel_select);
406 if (p->slice_0_mem_disabled)
407 mask &= 0xc;
408 if (p->slice_1_disabled)
409 mask &= 0x3;
410 if (p->ch_1_disabled || p->enable_pmi_dual_data_mode)
411 mask &= 0x5;
412
413 return mask;
414}
415
416static struct b_cr_tolud_pci tolud;
417static struct b_cr_touud_lo_pci touud_lo;
418static struct b_cr_touud_hi_pci touud_hi;
419static struct b_cr_asym_mem_region0_mchbar asym0;
420static struct b_cr_asym_mem_region1_mchbar asym1;
421static struct b_cr_asym_2way_mem_region_mchbar asym_2way;
422static struct b_cr_mot_out_base_mchbar mot_base;
423static struct b_cr_mot_out_mask_mchbar mot_mask;
424static struct b_cr_slice_channel_hash chash;
425
426
427
428
429
430
431
432static const int apl_dports[APL_NUM_CHANNELS] = { 0x18, 0x10, 0x11, 0x19 };
433static struct d_cr_drp0 drp0[APL_NUM_CHANNELS];
434
435
436static const int dnv_dports[DNV_NUM_CHANNELS] = { 0x10, 0x12 };
437static struct d_cr_dsch dsch;
438static struct d_cr_ecc_ctrl ecc_ctrl[DNV_NUM_CHANNELS];
439static struct d_cr_drp drp[DNV_NUM_CHANNELS];
440static struct d_cr_dmap dmap[DNV_NUM_CHANNELS];
441static struct d_cr_dmap1 dmap1[DNV_NUM_CHANNELS];
442static struct d_cr_dmap2 dmap2[DNV_NUM_CHANNELS];
443static struct d_cr_dmap3 dmap3[DNV_NUM_CHANNELS];
444static struct d_cr_dmap4 dmap4[DNV_NUM_CHANNELS];
445static struct d_cr_dmap5 dmap5[DNV_NUM_CHANNELS];
446
447static void apl_mk_region(char *name, struct region *rp, void *asym)
448{
449 struct b_cr_asym_mem_region0_mchbar *a = asym;
450
451 mk_region(name, rp,
452 U64_LSHIFT(a->slice0_asym_base, APL_ASYMSHIFT),
453 U64_LSHIFT(a->slice0_asym_limit, APL_ASYMSHIFT) +
454 GENMASK_ULL(APL_ASYMSHIFT - 1, 0));
455}
456
457static void dnv_mk_region(char *name, struct region *rp, void *asym)
458{
459 struct b_cr_asym_mem_region_denverton *a = asym;
460
461 mk_region(name, rp,
462 U64_LSHIFT(a->slice_asym_base, DNV_ASYMSHIFT),
463 U64_LSHIFT(a->slice_asym_limit, DNV_ASYMSHIFT) +
464 GENMASK_ULL(DNV_ASYMSHIFT - 1, 0));
465}
466
467static int apl_get_registers(void)
468{
469 int ret = -ENODEV;
470 int i;
471
472 if (RD_REG(&asym_2way, b_cr_asym_2way_mem_region_mchbar))
473 return -ENODEV;
474
475
476
477
478
479 for (i = 0; i < APL_NUM_CHANNELS; i++)
480 if (!RD_REGP(&drp0[i], d_cr_drp0, apl_dports[i]))
481 ret = 0;
482
483 return ret;
484}
485
486static int dnv_get_registers(void)
487{
488 int i;
489
490 if (RD_REG(&dsch, d_cr_dsch))
491 return -ENODEV;
492
493 for (i = 0; i < DNV_NUM_CHANNELS; i++)
494 if (RD_REGP(&ecc_ctrl[i], d_cr_ecc_ctrl, dnv_dports[i]) ||
495 RD_REGP(&drp[i], d_cr_drp, dnv_dports[i]) ||
496 RD_REGP(&dmap[i], d_cr_dmap, dnv_dports[i]) ||
497 RD_REGP(&dmap1[i], d_cr_dmap1, dnv_dports[i]) ||
498 RD_REGP(&dmap2[i], d_cr_dmap2, dnv_dports[i]) ||
499 RD_REGP(&dmap3[i], d_cr_dmap3, dnv_dports[i]) ||
500 RD_REGP(&dmap4[i], d_cr_dmap4, dnv_dports[i]) ||
501 RD_REGP(&dmap5[i], d_cr_dmap5, dnv_dports[i]))
502 return -ENODEV;
503
504 return 0;
505}
506
507
508
509
510
511
512static int get_registers(void)
513{
514 const int intlv[] = { 10, 11, 12, 12 };
515
516 if (RD_REG(&tolud, b_cr_tolud_pci) ||
517 RD_REG(&touud_lo, b_cr_touud_lo_pci) ||
518 RD_REG(&touud_hi, b_cr_touud_hi_pci) ||
519 RD_REG(&asym0, b_cr_asym_mem_region0_mchbar) ||
520 RD_REG(&asym1, b_cr_asym_mem_region1_mchbar) ||
521 RD_REG(&mot_base, b_cr_mot_out_base_mchbar) ||
522 RD_REG(&mot_mask, b_cr_mot_out_mask_mchbar) ||
523 RD_REG(&chash, b_cr_slice_channel_hash))
524 return -ENODEV;
525
526 if (ops->get_registers())
527 return -ENODEV;
528
529 if (ops->type == DNV) {
530
531 asym0.slice0_asym_channel_select = 0;
532 asym1.slice1_asym_channel_select = 0;
533
534 chash.sym_slice0_channel_enabled = 0x1;
535 chash.sym_slice1_channel_enabled = 0x1;
536 }
537
538 if (asym0.slice0_asym_enable)
539 ops->mk_region("as0", &as0, &asym0);
540
541 if (asym1.slice1_asym_enable)
542 ops->mk_region("as1", &as1, &asym1);
543
544 if (asym_2way.asym_2way_interleave_enable) {
545 mk_region("as2way", &as2,
546 U64_LSHIFT(asym_2way.asym_2way_base, APL_ASYMSHIFT),
547 U64_LSHIFT(asym_2way.asym_2way_limit, APL_ASYMSHIFT) +
548 GENMASK_ULL(APL_ASYMSHIFT - 1, 0));
549 }
550
551 if (mot_base.imr_en) {
552 mk_region_mask("mot", &mot,
553 U64_LSHIFT(mot_base.mot_out_base, MOT_SHIFT),
554 U64_LSHIFT(mot_mask.mot_out_mask, MOT_SHIFT));
555 }
556
557 top_lm = U64_LSHIFT(tolud.tolud, 20);
558 top_hm = U64_LSHIFT(touud_hi.touud, 32) | U64_LSHIFT(touud_lo.touud, 20);
559
560 two_slices = !chash.slice_1_disabled &&
561 !chash.slice_0_mem_disabled &&
562 (chash.sym_slice0_channel_enabled != 0) &&
563 (chash.sym_slice1_channel_enabled != 0);
564 two_channels = !chash.ch_1_disabled &&
565 !chash.enable_pmi_dual_data_mode &&
566 ((chash.sym_slice0_channel_enabled == 3) ||
567 (chash.sym_slice1_channel_enabled == 3));
568
569 sym_chan_mask = gen_sym_mask(&chash);
570 asym_chan_mask = gen_asym_mask(&chash, &asym0, &asym1, &asym_2way);
571 chan_mask = sym_chan_mask | asym_chan_mask;
572
573 if (two_slices && !two_channels) {
574 if (chash.hvm_mode)
575 slice_selector = 29;
576 else
577 slice_selector = intlv[chash.interleave_mode];
578 } else if (!two_slices && two_channels) {
579 if (chash.hvm_mode)
580 chan_selector = 29;
581 else
582 chan_selector = intlv[chash.interleave_mode];
583 } else if (two_slices && two_channels) {
584 if (chash.hvm_mode) {
585 slice_selector = 29;
586 chan_selector = 30;
587 } else {
588 slice_selector = intlv[chash.interleave_mode];
589 chan_selector = intlv[chash.interleave_mode] + 1;
590 }
591 }
592
593 if (two_slices) {
594 if (!chash.hvm_mode)
595 slice_hash_mask = chash.slice_hash_mask << SLICE_HASH_MASK_LSB;
596 if (!two_channels)
597 slice_hash_mask |= BIT_ULL(slice_selector);
598 }
599
600 if (two_channels) {
601 if (!chash.hvm_mode)
602 chan_hash_mask = chash.ch_hash_mask << CH_HASH_MASK_LSB;
603 if (!two_slices)
604 chan_hash_mask |= BIT_ULL(chan_selector);
605 }
606
607 return 0;
608}
609
610
611static u64 remove_mmio_gap(u64 sys)
612{
613 return (sys < _4GB) ? sys : sys - (_4GB - top_lm);
614}
615
616
617static void remove_addr_bit(u64 *addr, int bitidx)
618{
619 u64 mask;
620
621 if (bitidx == -1)
622 return;
623
624 mask = (1ull << bitidx) - 1;
625 *addr = ((*addr >> 1) & ~mask) | (*addr & mask);
626}
627
628
629static int hash_by_mask(u64 addr, u64 mask)
630{
631 u64 result = addr & mask;
632
633 result = (result >> 32) ^ result;
634 result = (result >> 16) ^ result;
635 result = (result >> 8) ^ result;
636 result = (result >> 4) ^ result;
637 result = (result >> 2) ^ result;
638 result = (result >> 1) ^ result;
639
640 return (int)result & 1;
641}
642
643
644
645
646
647static int sys2pmi(const u64 addr, u32 *pmiidx, u64 *pmiaddr, char *msg)
648{
649 u64 contig_addr, contig_base, contig_offset, contig_base_adj;
650 int mot_intlv_bit = two_slices ? MOT_CHAN_INTLV_BIT_2SLC_2CH :
651 MOT_CHAN_INTLV_BIT_1SLC_2CH;
652 int slice_intlv_bit_rm = SELECTOR_DISABLED;
653 int chan_intlv_bit_rm = SELECTOR_DISABLED;
654
655 bool mot_hit = in_region(&mot, addr);
656
657 int sym_channels = hweight8(sym_chan_mask);
658
659
660
661
662
663
664
665 int sym_chan_shift = sym_channels >> 1;
666
667
668 if (addr >= (1ul << PND_MAX_PHYS_BIT) ||
669 (addr >= top_lm && addr < _4GB) || addr >= top_hm) {
670 snprintf(msg, PND2_MSG_SIZE, "Error address 0x%llx is not DRAM", addr);
671 return -EINVAL;
672 }
673
674
675 contig_addr = remove_mmio_gap(addr);
676
677 if (in_region(&as0, addr)) {
678 *pmiidx = asym0.slice0_asym_channel_select;
679
680 contig_base = remove_mmio_gap(as0.base);
681 contig_offset = contig_addr - contig_base;
682 contig_base_adj = (contig_base >> sym_chan_shift) *
683 ((chash.sym_slice0_channel_enabled >> (*pmiidx & 1)) & 1);
684 contig_addr = contig_offset + ((sym_channels > 0) ? contig_base_adj : 0ull);
685 } else if (in_region(&as1, addr)) {
686 *pmiidx = 2u + asym1.slice1_asym_channel_select;
687
688 contig_base = remove_mmio_gap(as1.base);
689 contig_offset = contig_addr - contig_base;
690 contig_base_adj = (contig_base >> sym_chan_shift) *
691 ((chash.sym_slice1_channel_enabled >> (*pmiidx & 1)) & 1);
692 contig_addr = contig_offset + ((sym_channels > 0) ? contig_base_adj : 0ull);
693 } else if (in_region(&as2, addr) && (asym_2way.asym_2way_intlv_mode == 0x3ul)) {
694 bool channel1;
695
696 mot_intlv_bit = MOT_CHAN_INTLV_BIT_1SLC_2CH;
697 *pmiidx = (asym_2way.asym_2way_intlv_mode & 1) << 1;
698 channel1 = mot_hit ? ((bool)((addr >> mot_intlv_bit) & 1)) :
699 hash_by_mask(contig_addr, chan_hash_mask);
700 *pmiidx |= (u32)channel1;
701
702 contig_base = remove_mmio_gap(as2.base);
703 chan_intlv_bit_rm = mot_hit ? mot_intlv_bit : chan_selector;
704 contig_offset = contig_addr - contig_base;
705 remove_addr_bit(&contig_offset, chan_intlv_bit_rm);
706 contig_addr = (contig_base >> sym_chan_shift) + contig_offset;
707 } else {
708
709 *pmiidx = 0u;
710
711 if (two_slices) {
712 bool slice1;
713
714 if (mot_hit) {
715 slice_intlv_bit_rm = MOT_SLC_INTLV_BIT;
716 slice1 = (addr >> MOT_SLC_INTLV_BIT) & 1;
717 } else {
718 slice_intlv_bit_rm = slice_selector;
719 slice1 = hash_by_mask(addr, slice_hash_mask);
720 }
721
722 *pmiidx = (u32)slice1 << 1;
723 }
724
725 if (two_channels) {
726 bool channel1;
727
728 mot_intlv_bit = two_slices ? MOT_CHAN_INTLV_BIT_2SLC_2CH :
729 MOT_CHAN_INTLV_BIT_1SLC_2CH;
730
731 if (mot_hit) {
732 chan_intlv_bit_rm = mot_intlv_bit;
733 channel1 = (addr >> mot_intlv_bit) & 1;
734 } else {
735 chan_intlv_bit_rm = chan_selector;
736 channel1 = hash_by_mask(contig_addr, chan_hash_mask);
737 }
738
739 *pmiidx |= (u32)channel1;
740 }
741 }
742
743
744 remove_addr_bit(&contig_addr, chan_intlv_bit_rm);
745
746 remove_addr_bit(&contig_addr, slice_intlv_bit_rm);
747 *pmiaddr = contig_addr;
748
749 return 0;
750}
751
752
753#define C(n) (0x10 | (n))
754#define B(n) (0x20 | (n))
755#define R(n) (0x40 | (n))
756#define RS (0x80)
757
758
759#define AMAP_1KB 0
760#define AMAP_2KB 1
761#define AMAP_4KB 2
762#define AMAP_RSVD 3
763
764
765#define DEN_4Gb 0
766#define DEN_8Gb 2
767
768
769#define X8 0
770#define X16 1
771
772static struct dimm_geometry {
773 u8 addrdec;
774 u8 dden;
775 u8 dwid;
776 u8 rowbits, colbits;
777 u16 bits[PMI_ADDRESS_WIDTH];
778} dimms[] = {
779 {
780 .addrdec = AMAP_1KB, .dden = DEN_4Gb, .dwid = X16,
781 .rowbits = 15, .colbits = 10,
782 .bits = {
783 C(2), C(3), C(4), C(5), C(6), B(0), B(1), B(2), R(0),
784 R(1), R(2), R(3), R(4), R(5), R(6), R(7), R(8), R(9),
785 R(10), C(7), C(8), C(9), R(11), RS, R(12), R(13), R(14),
786 0, 0, 0, 0
787 }
788 },
789 {
790 .addrdec = AMAP_1KB, .dden = DEN_4Gb, .dwid = X8,
791 .rowbits = 16, .colbits = 10,
792 .bits = {
793 C(2), C(3), C(4), C(5), C(6), B(0), B(1), B(2), R(0),
794 R(1), R(2), R(3), R(4), R(5), R(6), R(7), R(8), R(9),
795 R(10), C(7), C(8), C(9), R(11), RS, R(12), R(13), R(14),
796 R(15), 0, 0, 0
797 }
798 },
799 {
800 .addrdec = AMAP_1KB, .dden = DEN_8Gb, .dwid = X16,
801 .rowbits = 16, .colbits = 10,
802 .bits = {
803 C(2), C(3), C(4), C(5), C(6), B(0), B(1), B(2), R(0),
804 R(1), R(2), R(3), R(4), R(5), R(6), R(7), R(8), R(9),
805 R(10), C(7), C(8), C(9), R(11), RS, R(12), R(13), R(14),
806 R(15), 0, 0, 0
807 }
808 },
809 {
810 .addrdec = AMAP_1KB, .dden = DEN_8Gb, .dwid = X8,
811 .rowbits = 16, .colbits = 11,
812 .bits = {
813 C(2), C(3), C(4), C(5), C(6), B(0), B(1), B(2), R(0),
814 R(1), R(2), R(3), R(4), R(5), R(6), R(7), R(8), R(9),
815 R(10), C(7), C(8), C(9), R(11), RS, C(11), R(12), R(13),
816 R(14), R(15), 0, 0
817 }
818 },
819 {
820 .addrdec = AMAP_2KB, .dden = DEN_4Gb, .dwid = X16,
821 .rowbits = 15, .colbits = 10,
822 .bits = {
823 C(2), C(3), C(4), C(5), C(6), C(7), B(0), B(1), B(2),
824 R(0), R(1), R(2), R(3), R(4), R(5), R(6), R(7), R(8),
825 R(9), R(10), C(8), C(9), R(11), RS, R(12), R(13), R(14),
826 0, 0, 0, 0
827 }
828 },
829 {
830 .addrdec = AMAP_2KB, .dden = DEN_4Gb, .dwid = X8,
831 .rowbits = 16, .colbits = 10,
832 .bits = {
833 C(2), C(3), C(4), C(5), C(6), C(7), B(0), B(1), B(2),
834 R(0), R(1), R(2), R(3), R(4), R(5), R(6), R(7), R(8),
835 R(9), R(10), C(8), C(9), R(11), RS, R(12), R(13), R(14),
836 R(15), 0, 0, 0
837 }
838 },
839 {
840 .addrdec = AMAP_2KB, .dden = DEN_8Gb, .dwid = X16,
841 .rowbits = 16, .colbits = 10,
842 .bits = {
843 C(2), C(3), C(4), C(5), C(6), C(7), B(0), B(1), B(2),
844 R(0), R(1), R(2), R(3), R(4), R(5), R(6), R(7), R(8),
845 R(9), R(10), C(8), C(9), R(11), RS, R(12), R(13), R(14),
846 R(15), 0, 0, 0
847 }
848 },
849 {
850 .addrdec = AMAP_2KB, .dden = DEN_8Gb, .dwid = X8,
851 .rowbits = 16, .colbits = 11,
852 .bits = {
853 C(2), C(3), C(4), C(5), C(6), C(7), B(0), B(1), B(2),
854 R(0), R(1), R(2), R(3), R(4), R(5), R(6), R(7), R(8),
855 R(9), R(10), C(8), C(9), R(11), RS, C(11), R(12), R(13),
856 R(14), R(15), 0, 0
857 }
858 },
859 {
860 .addrdec = AMAP_4KB, .dden = DEN_4Gb, .dwid = X16,
861 .rowbits = 15, .colbits = 10,
862 .bits = {
863 C(2), C(3), C(4), C(5), C(6), C(7), C(8), B(0), B(1),
864 B(2), R(0), R(1), R(2), R(3), R(4), R(5), R(6), R(7),
865 R(8), R(9), R(10), C(9), R(11), RS, R(12), R(13), R(14),
866 0, 0, 0, 0
867 }
868 },
869 {
870 .addrdec = AMAP_4KB, .dden = DEN_4Gb, .dwid = X8,
871 .rowbits = 16, .colbits = 10,
872 .bits = {
873 C(2), C(3), C(4), C(5), C(6), C(7), C(8), B(0), B(1),
874 B(2), R(0), R(1), R(2), R(3), R(4), R(5), R(6), R(7),
875 R(8), R(9), R(10), C(9), R(11), RS, R(12), R(13), R(14),
876 R(15), 0, 0, 0
877 }
878 },
879 {
880 .addrdec = AMAP_4KB, .dden = DEN_8Gb, .dwid = X16,
881 .rowbits = 16, .colbits = 10,
882 .bits = {
883 C(2), C(3), C(4), C(5), C(6), C(7), C(8), B(0), B(1),
884 B(2), R(0), R(1), R(2), R(3), R(4), R(5), R(6), R(7),
885 R(8), R(9), R(10), C(9), R(11), RS, R(12), R(13), R(14),
886 R(15), 0, 0, 0
887 }
888 },
889 {
890 .addrdec = AMAP_4KB, .dden = DEN_8Gb, .dwid = X8,
891 .rowbits = 16, .colbits = 11,
892 .bits = {
893 C(2), C(3), C(4), C(5), C(6), C(7), C(8), B(0), B(1),
894 B(2), R(0), R(1), R(2), R(3), R(4), R(5), R(6), R(7),
895 R(8), R(9), R(10), C(9), R(11), RS, C(11), R(12), R(13),
896 R(14), R(15), 0, 0
897 }
898 }
899};
900
901static int bank_hash(u64 pmiaddr, int idx, int shft)
902{
903 int bhash = 0;
904
905 switch (idx) {
906 case 0:
907 bhash ^= ((pmiaddr >> (12 + shft)) ^ (pmiaddr >> (9 + shft))) & 1;
908 break;
909 case 1:
910 bhash ^= (((pmiaddr >> (10 + shft)) ^ (pmiaddr >> (8 + shft))) & 1) << 1;
911 bhash ^= ((pmiaddr >> 22) & 1) << 1;
912 break;
913 case 2:
914 bhash ^= (((pmiaddr >> (13 + shft)) ^ (pmiaddr >> (11 + shft))) & 1) << 2;
915 break;
916 }
917
918 return bhash;
919}
920
921static int rank_hash(u64 pmiaddr)
922{
923 return ((pmiaddr >> 16) ^ (pmiaddr >> 10)) & 1;
924}
925
926
927static int apl_pmi2mem(struct mem_ctl_info *mci, u64 pmiaddr, u32 pmiidx,
928 struct dram_addr *daddr, char *msg)
929{
930 struct d_cr_drp0 *cr_drp0 = &drp0[pmiidx];
931 struct pnd2_pvt *pvt = mci->pvt_info;
932 int g = pvt->dimm_geom[pmiidx];
933 struct dimm_geometry *d = &dimms[g];
934 int column = 0, bank = 0, row = 0, rank = 0;
935 int i, idx, type, skiprs = 0;
936
937 for (i = 0; i < PMI_ADDRESS_WIDTH; i++) {
938 int bit = (pmiaddr >> i) & 1;
939
940 if (i + skiprs >= PMI_ADDRESS_WIDTH) {
941 snprintf(msg, PND2_MSG_SIZE, "Bad dimm_geometry[] table\n");
942 return -EINVAL;
943 }
944
945 type = d->bits[i + skiprs] & ~0xf;
946 idx = d->bits[i + skiprs] & 0xf;
947
948
949
950
951
952 if (type == RS && (cr_drp0->rken0 + cr_drp0->rken1) == 1) {
953 skiprs = 1;
954 type = d->bits[i + skiprs] & ~0xf;
955 idx = d->bits[i + skiprs] & 0xf;
956 }
957
958 switch (type) {
959 case C(0):
960 column |= (bit << idx);
961 break;
962 case B(0):
963 bank |= (bit << idx);
964 if (cr_drp0->bahen)
965 bank ^= bank_hash(pmiaddr, idx, d->addrdec);
966 break;
967 case R(0):
968 row |= (bit << idx);
969 break;
970 case RS:
971 rank = bit;
972 if (cr_drp0->rsien)
973 rank ^= rank_hash(pmiaddr);
974 break;
975 default:
976 if (bit) {
977 snprintf(msg, PND2_MSG_SIZE, "Bad translation\n");
978 return -EINVAL;
979 }
980 goto done;
981 }
982 }
983
984done:
985 daddr->col = column;
986 daddr->bank = bank;
987 daddr->row = row;
988 daddr->rank = rank;
989 daddr->dimm = 0;
990
991 return 0;
992}
993
994
995#define dnv_get_bit(pmi, in, out) ((int)(((pmi) >> (in)) & 1u) << (out))
996
997static int dnv_pmi2mem(struct mem_ctl_info *mci, u64 pmiaddr, u32 pmiidx,
998 struct dram_addr *daddr, char *msg)
999{
1000
1001 daddr->rank = dnv_get_bit(pmiaddr, dmap[pmiidx].rs0 + 13, 0);
1002
1003 daddr->rank |= dnv_get_bit(pmiaddr, dmap[pmiidx].rs1 + 13, 1);
1004
1005
1006
1007
1008
1009 daddr->dimm = (daddr->rank >= 2) ^ drp[pmiidx].dimmflip;
1010
1011 daddr->bank = dnv_get_bit(pmiaddr, dmap[pmiidx].ba0 + 6, 0);
1012 daddr->bank |= dnv_get_bit(pmiaddr, dmap[pmiidx].ba1 + 6, 1);
1013 daddr->bank |= dnv_get_bit(pmiaddr, dmap[pmiidx].bg0 + 6, 2);
1014 if (dsch.ddr4en)
1015 daddr->bank |= dnv_get_bit(pmiaddr, dmap[pmiidx].bg1 + 6, 3);
1016 if (dmap1[pmiidx].bxor) {
1017 if (dsch.ddr4en) {
1018 daddr->bank ^= dnv_get_bit(pmiaddr, dmap3[pmiidx].row6 + 6, 0);
1019 daddr->bank ^= dnv_get_bit(pmiaddr, dmap3[pmiidx].row7 + 6, 1);
1020 if (dsch.chan_width == 0)
1021
1022 daddr->bank ^= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca3 + 6, 2);
1023 else
1024
1025 daddr->bank ^= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca4 + 6, 2);
1026 daddr->bank ^= dnv_get_bit(pmiaddr, dmap2[pmiidx].row2 + 6, 3);
1027 } else {
1028 daddr->bank ^= dnv_get_bit(pmiaddr, dmap2[pmiidx].row2 + 6, 0);
1029 daddr->bank ^= dnv_get_bit(pmiaddr, dmap3[pmiidx].row6 + 6, 1);
1030 if (dsch.chan_width == 0)
1031 daddr->bank ^= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca3 + 6, 2);
1032 else
1033 daddr->bank ^= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca4 + 6, 2);
1034 }
1035 }
1036
1037 daddr->row = dnv_get_bit(pmiaddr, dmap2[pmiidx].row0 + 6, 0);
1038 daddr->row |= dnv_get_bit(pmiaddr, dmap2[pmiidx].row1 + 6, 1);
1039 daddr->row |= dnv_get_bit(pmiaddr, dmap2[pmiidx].row2 + 6, 2);
1040 daddr->row |= dnv_get_bit(pmiaddr, dmap2[pmiidx].row3 + 6, 3);
1041 daddr->row |= dnv_get_bit(pmiaddr, dmap2[pmiidx].row4 + 6, 4);
1042 daddr->row |= dnv_get_bit(pmiaddr, dmap2[pmiidx].row5 + 6, 5);
1043 daddr->row |= dnv_get_bit(pmiaddr, dmap3[pmiidx].row6 + 6, 6);
1044 daddr->row |= dnv_get_bit(pmiaddr, dmap3[pmiidx].row7 + 6, 7);
1045 daddr->row |= dnv_get_bit(pmiaddr, dmap3[pmiidx].row8 + 6, 8);
1046 daddr->row |= dnv_get_bit(pmiaddr, dmap3[pmiidx].row9 + 6, 9);
1047 daddr->row |= dnv_get_bit(pmiaddr, dmap3[pmiidx].row10 + 6, 10);
1048 daddr->row |= dnv_get_bit(pmiaddr, dmap3[pmiidx].row11 + 6, 11);
1049 daddr->row |= dnv_get_bit(pmiaddr, dmap4[pmiidx].row12 + 6, 12);
1050 daddr->row |= dnv_get_bit(pmiaddr, dmap4[pmiidx].row13 + 6, 13);
1051 if (dmap4[pmiidx].row14 != 31)
1052 daddr->row |= dnv_get_bit(pmiaddr, dmap4[pmiidx].row14 + 6, 14);
1053 if (dmap4[pmiidx].row15 != 31)
1054 daddr->row |= dnv_get_bit(pmiaddr, dmap4[pmiidx].row15 + 6, 15);
1055 if (dmap4[pmiidx].row16 != 31)
1056 daddr->row |= dnv_get_bit(pmiaddr, dmap4[pmiidx].row16 + 6, 16);
1057 if (dmap4[pmiidx].row17 != 31)
1058 daddr->row |= dnv_get_bit(pmiaddr, dmap4[pmiidx].row17 + 6, 17);
1059
1060 daddr->col = dnv_get_bit(pmiaddr, dmap5[pmiidx].ca3 + 6, 3);
1061 daddr->col |= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca4 + 6, 4);
1062 daddr->col |= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca5 + 6, 5);
1063 daddr->col |= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca6 + 6, 6);
1064 daddr->col |= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca7 + 6, 7);
1065 daddr->col |= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca8 + 6, 8);
1066 daddr->col |= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca9 + 6, 9);
1067 if (!dsch.ddr4en && dmap1[pmiidx].ca11 != 0x3f)
1068 daddr->col |= dnv_get_bit(pmiaddr, dmap1[pmiidx].ca11 + 13, 11);
1069
1070 return 0;
1071}
1072
1073static int check_channel(int ch)
1074{
1075 if (drp0[ch].dramtype != 0) {
1076 pnd2_printk(KERN_INFO, "Unsupported DIMM in channel %d\n", ch);
1077 return 1;
1078 } else if (drp0[ch].eccen == 0) {
1079 pnd2_printk(KERN_INFO, "ECC disabled on channel %d\n", ch);
1080 return 1;
1081 }
1082 return 0;
1083}
1084
1085static int apl_check_ecc_active(void)
1086{
1087 int i, ret = 0;
1088
1089
1090 for (i = 0; i < APL_NUM_CHANNELS; i++)
1091 if (chan_mask & BIT(i))
1092 ret += check_channel(i);
1093 return ret ? -EINVAL : 0;
1094}
1095
1096#define DIMMS_PRESENT(d) ((d)->rken0 + (d)->rken1 + (d)->rken2 + (d)->rken3)
1097
1098static int check_unit(int ch)
1099{
1100 struct d_cr_drp *d = &drp[ch];
1101
1102 if (DIMMS_PRESENT(d) && !ecc_ctrl[ch].eccen) {
1103 pnd2_printk(KERN_INFO, "ECC disabled on channel %d\n", ch);
1104 return 1;
1105 }
1106 return 0;
1107}
1108
1109static int dnv_check_ecc_active(void)
1110{
1111 int i, ret = 0;
1112
1113 for (i = 0; i < DNV_NUM_CHANNELS; i++)
1114 ret += check_unit(i);
1115 return ret ? -EINVAL : 0;
1116}
1117
1118static int get_memory_error_data(struct mem_ctl_info *mci, u64 addr,
1119 struct dram_addr *daddr, char *msg)
1120{
1121 u64 pmiaddr;
1122 u32 pmiidx;
1123 int ret;
1124
1125 ret = sys2pmi(addr, &pmiidx, &pmiaddr, msg);
1126 if (ret)
1127 return ret;
1128
1129 pmiaddr >>= ops->pmiaddr_shift;
1130
1131 pmiidx >>= ops->pmiidx_shift;
1132 daddr->chan = pmiidx;
1133
1134 ret = ops->pmi2mem(mci, pmiaddr, pmiidx, daddr, msg);
1135 if (ret)
1136 return ret;
1137
1138 edac_dbg(0, "SysAddr=%llx PmiAddr=%llx Channel=%d DIMM=%d Rank=%d Bank=%d Row=%d Column=%d\n",
1139 addr, pmiaddr, daddr->chan, daddr->dimm, daddr->rank, daddr->bank, daddr->row, daddr->col);
1140
1141 return 0;
1142}
1143
1144static void pnd2_mce_output_error(struct mem_ctl_info *mci, const struct mce *m,
1145 struct dram_addr *daddr)
1146{
1147 enum hw_event_mc_err_type tp_event;
1148 char *optype, msg[PND2_MSG_SIZE];
1149 bool ripv = m->mcgstatus & MCG_STATUS_RIPV;
1150 bool overflow = m->status & MCI_STATUS_OVER;
1151 bool uc_err = m->status & MCI_STATUS_UC;
1152 bool recov = m->status & MCI_STATUS_S;
1153 u32 core_err_cnt = GET_BITFIELD(m->status, 38, 52);
1154 u32 mscod = GET_BITFIELD(m->status, 16, 31);
1155 u32 errcode = GET_BITFIELD(m->status, 0, 15);
1156 u32 optypenum = GET_BITFIELD(m->status, 4, 6);
1157 int rc;
1158
1159 tp_event = uc_err ? (ripv ? HW_EVENT_ERR_FATAL : HW_EVENT_ERR_UNCORRECTED) :
1160 HW_EVENT_ERR_CORRECTED;
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173 if (!((errcode & 0xef80) == 0x80)) {
1174 optype = "Can't parse: it is not a mem";
1175 } else {
1176 switch (optypenum) {
1177 case 0:
1178 optype = "generic undef request error";
1179 break;
1180 case 1:
1181 optype = "memory read error";
1182 break;
1183 case 2:
1184 optype = "memory write error";
1185 break;
1186 case 3:
1187 optype = "addr/cmd error";
1188 break;
1189 case 4:
1190 optype = "memory scrubbing error";
1191 break;
1192 default:
1193 optype = "reserved";
1194 break;
1195 }
1196 }
1197
1198
1199 if (!(m->status & MCI_STATUS_ADDRV))
1200 return;
1201
1202 rc = get_memory_error_data(mci, m->addr, daddr, msg);
1203 if (rc)
1204 goto address_error;
1205
1206 snprintf(msg, sizeof(msg),
1207 "%s%s err_code:%04x:%04x channel:%d DIMM:%d rank:%d row:%d bank:%d col:%d",
1208 overflow ? " OVERFLOW" : "", (uc_err && recov) ? " recoverable" : "", mscod,
1209 errcode, daddr->chan, daddr->dimm, daddr->rank, daddr->row, daddr->bank, daddr->col);
1210
1211 edac_dbg(0, "%s\n", msg);
1212
1213
1214 edac_mc_handle_error(tp_event, mci, core_err_cnt, m->addr >> PAGE_SHIFT,
1215 m->addr & ~PAGE_MASK, 0, daddr->chan, daddr->dimm, -1, optype, msg);
1216
1217 return;
1218
1219address_error:
1220 edac_mc_handle_error(tp_event, mci, core_err_cnt, 0, 0, 0, -1, -1, -1, msg, "");
1221}
1222
1223static void apl_get_dimm_config(struct mem_ctl_info *mci)
1224{
1225 struct pnd2_pvt *pvt = mci->pvt_info;
1226 struct dimm_info *dimm;
1227 struct d_cr_drp0 *d;
1228 u64 capacity;
1229 int i, g;
1230
1231 for (i = 0; i < APL_NUM_CHANNELS; i++) {
1232 if (!(chan_mask & BIT(i)))
1233 continue;
1234
1235 dimm = EDAC_DIMM_PTR(mci->layers, mci->dimms, mci->n_layers, i, 0, 0);
1236 if (!dimm) {
1237 edac_dbg(0, "No allocated DIMM for channel %d\n", i);
1238 continue;
1239 }
1240
1241 d = &drp0[i];
1242 for (g = 0; g < ARRAY_SIZE(dimms); g++)
1243 if (dimms[g].addrdec == d->addrdec &&
1244 dimms[g].dden == d->dden &&
1245 dimms[g].dwid == d->dwid)
1246 break;
1247
1248 if (g == ARRAY_SIZE(dimms)) {
1249 edac_dbg(0, "Channel %d: unrecognized DIMM\n", i);
1250 continue;
1251 }
1252
1253 pvt->dimm_geom[i] = g;
1254 capacity = (d->rken0 + d->rken1) * 8 * (1ul << dimms[g].rowbits) *
1255 (1ul << dimms[g].colbits);
1256 edac_dbg(0, "Channel %d: %lld MByte DIMM\n", i, capacity >> (20 - 3));
1257 dimm->nr_pages = MiB_TO_PAGES(capacity >> (20 - 3));
1258 dimm->grain = 32;
1259 dimm->dtype = (d->dwid == 0) ? DEV_X8 : DEV_X16;
1260 dimm->mtype = MEM_DDR3;
1261 dimm->edac_mode = EDAC_SECDED;
1262 snprintf(dimm->label, sizeof(dimm->label), "Slice#%d_Chan#%d", i / 2, i % 2);
1263 }
1264}
1265
1266static const int dnv_dtypes[] = {
1267 DEV_X8, DEV_X4, DEV_X16, DEV_UNKNOWN
1268};
1269
1270static void dnv_get_dimm_config(struct mem_ctl_info *mci)
1271{
1272 int i, j, ranks_of_dimm[DNV_MAX_DIMMS], banks, rowbits, colbits, memtype;
1273 struct dimm_info *dimm;
1274 struct d_cr_drp *d;
1275 u64 capacity;
1276
1277 if (dsch.ddr4en) {
1278 memtype = MEM_DDR4;
1279 banks = 16;
1280 colbits = 10;
1281 } else {
1282 memtype = MEM_DDR3;
1283 banks = 8;
1284 }
1285
1286 for (i = 0; i < DNV_NUM_CHANNELS; i++) {
1287 if (dmap4[i].row14 == 31)
1288 rowbits = 14;
1289 else if (dmap4[i].row15 == 31)
1290 rowbits = 15;
1291 else if (dmap4[i].row16 == 31)
1292 rowbits = 16;
1293 else if (dmap4[i].row17 == 31)
1294 rowbits = 17;
1295 else
1296 rowbits = 18;
1297
1298 if (memtype == MEM_DDR3) {
1299 if (dmap1[i].ca11 != 0x3f)
1300 colbits = 12;
1301 else
1302 colbits = 10;
1303 }
1304
1305 d = &drp[i];
1306
1307 ranks_of_dimm[0] = d->rken0 + d->rken1;
1308
1309 ranks_of_dimm[1] = d->rken2 + d->rken3;
1310
1311 for (j = 0; j < DNV_MAX_DIMMS; j++) {
1312 if (!ranks_of_dimm[j])
1313 continue;
1314
1315 dimm = EDAC_DIMM_PTR(mci->layers, mci->dimms, mci->n_layers, i, j, 0);
1316 if (!dimm) {
1317 edac_dbg(0, "No allocated DIMM for channel %d DIMM %d\n", i, j);
1318 continue;
1319 }
1320
1321 capacity = ranks_of_dimm[j] * banks * (1ul << rowbits) * (1ul << colbits);
1322 edac_dbg(0, "Channel %d DIMM %d: %lld MByte DIMM\n", i, j, capacity >> (20 - 3));
1323 dimm->nr_pages = MiB_TO_PAGES(capacity >> (20 - 3));
1324 dimm->grain = 32;
1325 dimm->dtype = dnv_dtypes[j ? d->dimmdwid0 : d->dimmdwid1];
1326 dimm->mtype = memtype;
1327 dimm->edac_mode = EDAC_SECDED;
1328 snprintf(dimm->label, sizeof(dimm->label), "Chan#%d_DIMM#%d", i, j);
1329 }
1330 }
1331}
1332
1333static int pnd2_register_mci(struct mem_ctl_info **ppmci)
1334{
1335 struct edac_mc_layer layers[2];
1336 struct mem_ctl_info *mci;
1337 struct pnd2_pvt *pvt;
1338 int rc;
1339
1340 rc = ops->check_ecc();
1341 if (rc < 0)
1342 return rc;
1343
1344
1345 layers[0].type = EDAC_MC_LAYER_CHANNEL;
1346 layers[0].size = ops->channels;
1347 layers[0].is_virt_csrow = false;
1348 layers[1].type = EDAC_MC_LAYER_SLOT;
1349 layers[1].size = ops->dimms_per_channel;
1350 layers[1].is_virt_csrow = true;
1351 mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, sizeof(*pvt));
1352 if (!mci)
1353 return -ENOMEM;
1354
1355 pvt = mci->pvt_info;
1356 memset(pvt, 0, sizeof(*pvt));
1357
1358 mci->mod_name = "pnd2_edac.c";
1359 mci->dev_name = ops->name;
1360 mci->ctl_name = "Pondicherry2";
1361
1362
1363 ops->get_dimm_config(mci);
1364
1365 if (edac_mc_add_mc(mci)) {
1366 edac_dbg(0, "MC: failed edac_mc_add_mc()\n");
1367 edac_mc_free(mci);
1368 return -EINVAL;
1369 }
1370
1371 *ppmci = mci;
1372
1373 return 0;
1374}
1375
1376static void pnd2_unregister_mci(struct mem_ctl_info *mci)
1377{
1378 if (unlikely(!mci || !mci->pvt_info)) {
1379 pnd2_printk(KERN_ERR, "Couldn't find mci handler\n");
1380 return;
1381 }
1382
1383
1384 edac_mc_del_mc(NULL);
1385 edac_dbg(1, "%s: free mci struct\n", mci->ctl_name);
1386 edac_mc_free(mci);
1387}
1388
1389
1390
1391
1392
1393static int pnd2_mce_check_error(struct notifier_block *nb, unsigned long val, void *data)
1394{
1395 struct mce *mce = (struct mce *)data;
1396 struct mem_ctl_info *mci;
1397 struct dram_addr daddr;
1398 char *type;
1399
1400 if (get_edac_report_status() == EDAC_REPORTING_DISABLED)
1401 return NOTIFY_DONE;
1402
1403 mci = pnd2_mci;
1404 if (!mci)
1405 return NOTIFY_DONE;
1406
1407
1408
1409
1410
1411
1412
1413 if ((mce->status & 0xefff) >> 7 != 1)
1414 return NOTIFY_DONE;
1415
1416 if (mce->mcgstatus & MCG_STATUS_MCIP)
1417 type = "Exception";
1418 else
1419 type = "Event";
1420
1421 pnd2_mc_printk(mci, KERN_INFO, "HANDLING MCE MEMORY ERROR\n");
1422 pnd2_mc_printk(mci, KERN_INFO, "CPU %u: Machine Check %s: %llx Bank %u: %llx\n",
1423 mce->extcpu, type, mce->mcgstatus, mce->bank, mce->status);
1424 pnd2_mc_printk(mci, KERN_INFO, "TSC %llx ", mce->tsc);
1425 pnd2_mc_printk(mci, KERN_INFO, "ADDR %llx ", mce->addr);
1426 pnd2_mc_printk(mci, KERN_INFO, "MISC %llx ", mce->misc);
1427 pnd2_mc_printk(mci, KERN_INFO, "PROCESSOR %u:%x TIME %llu SOCKET %u APIC %x\n",
1428 mce->cpuvendor, mce->cpuid, mce->time, mce->socketid, mce->apicid);
1429
1430 pnd2_mce_output_error(mci, mce, &daddr);
1431
1432
1433 return NOTIFY_STOP;
1434}
1435
1436static struct notifier_block pnd2_mce_dec = {
1437 .notifier_call = pnd2_mce_check_error,
1438};
1439
1440
1441
1442#if 0
1443
1444
1445
1446
1447static u64 pnd2_fake_addr;
1448#define PND2_BLOB_SIZE 1024
1449static char pnd2_result[PND2_BLOB_SIZE];
1450static struct dentry *pnd2_test;
1451static struct debugfs_blob_wrapper pnd2_blob = {
1452 .data = pnd2_result,
1453 .size = 0
1454};
1455
1456static int debugfs_u64_set(void *data, u64 val)
1457{
1458 struct dram_addr daddr;
1459 struct mce m;
1460
1461 *(u64 *)data = val;
1462 m.mcgstatus = 0;
1463
1464 m.status = MCI_STATUS_ADDRV + 0x9f;
1465 m.addr = val;
1466 pnd2_mce_output_error(pnd2_mci, &m, &daddr);
1467 snprintf(pnd2_blob.data, PND2_BLOB_SIZE,
1468 "SysAddr=%llx Channel=%d DIMM=%d Rank=%d Bank=%d Row=%d Column=%d\n",
1469 m.addr, daddr.chan, daddr.dimm, daddr.rank, daddr.bank, daddr.row, daddr.col);
1470 pnd2_blob.size = strlen(pnd2_blob.data);
1471
1472 return 0;
1473}
1474DEFINE_DEBUGFS_ATTRIBUTE(fops_u64_wo, NULL, debugfs_u64_set, "%llu\n");
1475
1476static void setup_pnd2_debug(void)
1477{
1478 pnd2_test = edac_debugfs_create_dir("pnd2_test");
1479 edac_debugfs_create_file("pnd2_debug_addr", 0200, pnd2_test,
1480 &pnd2_fake_addr, &fops_u64_wo);
1481 debugfs_create_blob("pnd2_debug_results", 0400, pnd2_test, &pnd2_blob);
1482}
1483
1484static void teardown_pnd2_debug(void)
1485{
1486 debugfs_remove_recursive(pnd2_test);
1487}
1488#else
1489static void setup_pnd2_debug(void) {}
1490static void teardown_pnd2_debug(void) {}
1491#endif
1492
1493static int pnd2_probe(void)
1494{
1495 int rc;
1496
1497 edac_dbg(2, "\n");
1498 rc = get_registers();
1499 if (rc)
1500 return rc;
1501
1502 return pnd2_register_mci(&pnd2_mci);
1503}
1504
1505static void pnd2_remove(void)
1506{
1507 edac_dbg(0, "\n");
1508 pnd2_unregister_mci(pnd2_mci);
1509}
1510
1511static struct dunit_ops apl_ops = {
1512 .name = "pnd2/apl",
1513 .type = APL,
1514 .pmiaddr_shift = LOG2_PMI_ADDR_GRANULARITY,
1515 .pmiidx_shift = 0,
1516 .channels = APL_NUM_CHANNELS,
1517 .dimms_per_channel = 1,
1518 .rd_reg = apl_rd_reg,
1519 .get_registers = apl_get_registers,
1520 .check_ecc = apl_check_ecc_active,
1521 .mk_region = apl_mk_region,
1522 .get_dimm_config = apl_get_dimm_config,
1523 .pmi2mem = apl_pmi2mem,
1524};
1525
1526static struct dunit_ops dnv_ops = {
1527 .name = "pnd2/dnv",
1528 .type = DNV,
1529 .pmiaddr_shift = 0,
1530 .pmiidx_shift = 1,
1531 .channels = DNV_NUM_CHANNELS,
1532 .dimms_per_channel = 2,
1533 .rd_reg = dnv_rd_reg,
1534 .get_registers = dnv_get_registers,
1535 .check_ecc = dnv_check_ecc_active,
1536 .mk_region = dnv_mk_region,
1537 .get_dimm_config = dnv_get_dimm_config,
1538 .pmi2mem = dnv_pmi2mem,
1539};
1540
1541static const struct x86_cpu_id pnd2_cpuids[] = {
1542 { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_GOLDMONT, 0, (kernel_ulong_t)&apl_ops },
1543 { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_DENVERTON, 0, (kernel_ulong_t)&dnv_ops },
1544 { }
1545};
1546MODULE_DEVICE_TABLE(x86cpu, pnd2_cpuids);
1547
1548static int __init pnd2_init(void)
1549{
1550 const struct x86_cpu_id *id;
1551 int rc;
1552
1553 edac_dbg(2, "\n");
1554
1555 id = x86_match_cpu(pnd2_cpuids);
1556 if (!id)
1557 return -ENODEV;
1558
1559 ops = (struct dunit_ops *)id->driver_data;
1560
1561 if (ops->type == APL) {
1562 p2sb_bus = pci_find_bus(0, 0);
1563 if (!p2sb_bus)
1564 return -ENODEV;
1565 }
1566
1567
1568 opstate_init();
1569
1570 rc = pnd2_probe();
1571 if (rc < 0) {
1572 pnd2_printk(KERN_ERR, "Failed to register device with error %d.\n", rc);
1573 return rc;
1574 }
1575
1576 if (!pnd2_mci)
1577 return -ENODEV;
1578
1579 mce_register_decode_chain(&pnd2_mce_dec);
1580 setup_pnd2_debug();
1581
1582 return 0;
1583}
1584
1585static void __exit pnd2_exit(void)
1586{
1587 edac_dbg(2, "\n");
1588 teardown_pnd2_debug();
1589 mce_unregister_decode_chain(&pnd2_mce_dec);
1590 pnd2_remove();
1591}
1592
1593module_init(pnd2_init);
1594module_exit(pnd2_exit);
1595
1596module_param(edac_op_state, int, 0444);
1597MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");
1598
1599MODULE_LICENSE("GPL v2");
1600MODULE_AUTHOR("Tony Luck");
1601MODULE_DESCRIPTION("MC Driver for Intel SoC using Pondicherry memory controller");
1602