1#include "amd64_edac.h"
2#include <asm/amd_nb.h>
3
4static struct edac_pci_ctl_info *pci_ctl;
5
6static int report_gart_errors;
7module_param(report_gart_errors, int, 0644);
8
9
10
11
12
13static int ecc_enable_override;
14module_param(ecc_enable_override, int, 0644);
15
16static struct msr __percpu *msrs;
17
18static struct amd64_family_type *fam_type;
19
20
21static struct ecc_settings **ecc_stngs;
22
23
24
25
26
27
28
29
30static const struct scrubrate {
31 u32 scrubval;
32 u32 bandwidth;
33} scrubrates[] = {
34 { 0x01, 1600000000UL},
35 { 0x02, 800000000UL},
36 { 0x03, 400000000UL},
37 { 0x04, 200000000UL},
38 { 0x05, 100000000UL},
39 { 0x06, 50000000UL},
40 { 0x07, 25000000UL},
41 { 0x08, 12284069UL},
42 { 0x09, 6274509UL},
43 { 0x0A, 3121951UL},
44 { 0x0B, 1560975UL},
45 { 0x0C, 781440UL},
46 { 0x0D, 390720UL},
47 { 0x0E, 195300UL},
48 { 0x0F, 97650UL},
49 { 0x10, 48854UL},
50 { 0x11, 24427UL},
51 { 0x12, 12213UL},
52 { 0x13, 6101UL},
53 { 0x14, 3051UL},
54 { 0x15, 1523UL},
55 { 0x16, 761UL},
56 { 0x00, 0UL},
57};
58
59int __amd64_read_pci_cfg_dword(struct pci_dev *pdev, int offset,
60 u32 *val, const char *func)
61{
62 int err = 0;
63
64 err = pci_read_config_dword(pdev, offset, val);
65 if (err)
66 amd64_warn("%s: error reading F%dx%03x.\n",
67 func, PCI_FUNC(pdev->devfn), offset);
68
69 return err;
70}
71
72int __amd64_write_pci_cfg_dword(struct pci_dev *pdev, int offset,
73 u32 val, const char *func)
74{
75 int err = 0;
76
77 err = pci_write_config_dword(pdev, offset, val);
78 if (err)
79 amd64_warn("%s: error writing to F%dx%03x.\n",
80 func, PCI_FUNC(pdev->devfn), offset);
81
82 return err;
83}
84
85
86
87
88static void f15h_select_dct(struct amd64_pvt *pvt, u8 dct)
89{
90 u32 reg = 0;
91
92 amd64_read_pci_cfg(pvt->F1, DCT_CFG_SEL, ®);
93 reg &= (pvt->model == 0x30) ? ~3 : ~1;
94 reg |= dct;
95 amd64_write_pci_cfg(pvt->F1, DCT_CFG_SEL, reg);
96}
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112static inline int amd64_read_dct_pci_cfg(struct amd64_pvt *pvt, u8 dct,
113 int offset, u32 *val)
114{
115 switch (pvt->fam) {
116 case 0xf:
117 if (dct || offset >= 0x100)
118 return -EINVAL;
119 break;
120
121 case 0x10:
122 if (dct) {
123
124
125
126
127
128 if (dct_ganging_enabled(pvt))
129 return 0;
130
131 offset += 0x100;
132 }
133 break;
134
135 case 0x15:
136
137
138
139
140 dct = (dct && pvt->model == 0x30) ? 3 : dct;
141 f15h_select_dct(pvt, dct);
142 break;
143
144 case 0x16:
145 if (dct)
146 return -EINVAL;
147 break;
148
149 default:
150 break;
151 }
152 return amd64_read_pci_cfg(pvt->F2, offset, val);
153}
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169static inline void __f17h_set_scrubval(struct amd64_pvt *pvt, u32 scrubval)
170{
171
172
173
174
175
176 if (scrubval >= 0x5 && scrubval <= 0x14) {
177 scrubval -= 0x5;
178 pci_write_bits32(pvt->F6, F17H_SCR_LIMIT_ADDR, scrubval, 0xF);
179 pci_write_bits32(pvt->F6, F17H_SCR_BASE_ADDR, 1, 0x1);
180 } else {
181 pci_write_bits32(pvt->F6, F17H_SCR_BASE_ADDR, 0, 0x1);
182 }
183}
184
185
186
187
188static int __set_scrub_rate(struct amd64_pvt *pvt, u32 new_bw, u32 min_rate)
189{
190 u32 scrubval;
191 int i;
192
193
194
195
196
197
198
199
200
201
202 for (i = 0; i < ARRAY_SIZE(scrubrates) - 1; i++) {
203
204
205
206
207 if (scrubrates[i].scrubval < min_rate)
208 continue;
209
210 if (scrubrates[i].bandwidth <= new_bw)
211 break;
212 }
213
214 scrubval = scrubrates[i].scrubval;
215
216 if (pvt->umc) {
217 __f17h_set_scrubval(pvt, scrubval);
218 } else if (pvt->fam == 0x15 && pvt->model == 0x60) {
219 f15h_select_dct(pvt, 0);
220 pci_write_bits32(pvt->F2, F15H_M60H_SCRCTRL, scrubval, 0x001F);
221 f15h_select_dct(pvt, 1);
222 pci_write_bits32(pvt->F2, F15H_M60H_SCRCTRL, scrubval, 0x001F);
223 } else {
224 pci_write_bits32(pvt->F3, SCRCTRL, scrubval, 0x001F);
225 }
226
227 if (scrubval)
228 return scrubrates[i].bandwidth;
229
230 return 0;
231}
232
233static int set_scrub_rate(struct mem_ctl_info *mci, u32 bw)
234{
235 struct amd64_pvt *pvt = mci->pvt_info;
236 u32 min_scrubrate = 0x5;
237
238 if (pvt->fam == 0xf)
239 min_scrubrate = 0x0;
240
241 if (pvt->fam == 0x15) {
242
243 if (pvt->model < 0x10)
244 f15h_select_dct(pvt, 0);
245
246 if (pvt->model == 0x60)
247 min_scrubrate = 0x6;
248 }
249 return __set_scrub_rate(pvt, bw, min_scrubrate);
250}
251
252static int get_scrub_rate(struct mem_ctl_info *mci)
253{
254 struct amd64_pvt *pvt = mci->pvt_info;
255 int i, retval = -EINVAL;
256 u32 scrubval = 0;
257
258 if (pvt->umc) {
259 amd64_read_pci_cfg(pvt->F6, F17H_SCR_BASE_ADDR, &scrubval);
260 if (scrubval & BIT(0)) {
261 amd64_read_pci_cfg(pvt->F6, F17H_SCR_LIMIT_ADDR, &scrubval);
262 scrubval &= 0xF;
263 scrubval += 0x5;
264 } else {
265 scrubval = 0;
266 }
267 } else if (pvt->fam == 0x15) {
268
269 if (pvt->model < 0x10)
270 f15h_select_dct(pvt, 0);
271
272 if (pvt->model == 0x60)
273 amd64_read_pci_cfg(pvt->F2, F15H_M60H_SCRCTRL, &scrubval);
274 } else {
275 amd64_read_pci_cfg(pvt->F3, SCRCTRL, &scrubval);
276 }
277
278 scrubval = scrubval & 0x001F;
279
280 for (i = 0; i < ARRAY_SIZE(scrubrates); i++) {
281 if (scrubrates[i].scrubval == scrubval) {
282 retval = scrubrates[i].bandwidth;
283 break;
284 }
285 }
286 return retval;
287}
288
289
290
291
292
293static bool base_limit_match(struct amd64_pvt *pvt, u64 sys_addr, u8 nid)
294{
295 u64 addr;
296
297
298
299
300
301
302
303 addr = sys_addr & 0x000000ffffffffffull;
304
305 return ((addr >= get_dram_base(pvt, nid)) &&
306 (addr <= get_dram_limit(pvt, nid)));
307}
308
309
310
311
312
313
314
315static struct mem_ctl_info *find_mc_by_sys_addr(struct mem_ctl_info *mci,
316 u64 sys_addr)
317{
318 struct amd64_pvt *pvt;
319 u8 node_id;
320 u32 intlv_en, bits;
321
322
323
324
325
326 pvt = mci->pvt_info;
327
328
329
330
331
332
333 intlv_en = dram_intlv_en(pvt, 0);
334
335 if (intlv_en == 0) {
336 for (node_id = 0; node_id < DRAM_RANGES; node_id++) {
337 if (base_limit_match(pvt, sys_addr, node_id))
338 goto found;
339 }
340 goto err_no_match;
341 }
342
343 if (unlikely((intlv_en != 0x01) &&
344 (intlv_en != 0x03) &&
345 (intlv_en != 0x07))) {
346 amd64_warn("DRAM Base[IntlvEn] junk value: 0x%x, BIOS bug?\n", intlv_en);
347 return NULL;
348 }
349
350 bits = (((u32) sys_addr) >> 12) & intlv_en;
351
352 for (node_id = 0; ; ) {
353 if ((dram_intlv_sel(pvt, node_id) & intlv_en) == bits)
354 break;
355
356 if (++node_id >= DRAM_RANGES)
357 goto err_no_match;
358 }
359
360
361 if (unlikely(!base_limit_match(pvt, sys_addr, node_id))) {
362 amd64_warn("%s: sys_addr 0x%llx falls outside base/limit address"
363 "range for node %d with node interleaving enabled.\n",
364 __func__, sys_addr, node_id);
365 return NULL;
366 }
367
368found:
369 return edac_mc_find((int)node_id);
370
371err_no_match:
372 edac_dbg(2, "sys_addr 0x%lx doesn't match any node\n",
373 (unsigned long)sys_addr);
374
375 return NULL;
376}
377
378
379
380
381
382static void get_cs_base_and_mask(struct amd64_pvt *pvt, int csrow, u8 dct,
383 u64 *base, u64 *mask)
384{
385 u64 csbase, csmask, base_bits, mask_bits;
386 u8 addr_shift;
387
388 if (pvt->fam == 0xf && pvt->ext_model < K8_REV_F) {
389 csbase = pvt->csels[dct].csbases[csrow];
390 csmask = pvt->csels[dct].csmasks[csrow];
391 base_bits = GENMASK_ULL(31, 21) | GENMASK_ULL(15, 9);
392 mask_bits = GENMASK_ULL(29, 21) | GENMASK_ULL(15, 9);
393 addr_shift = 4;
394
395
396
397
398
399 } else if (pvt->fam == 0x16 ||
400 (pvt->fam == 0x15 && pvt->model >= 0x30)) {
401 csbase = pvt->csels[dct].csbases[csrow];
402 csmask = pvt->csels[dct].csmasks[csrow >> 1];
403
404 *base = (csbase & GENMASK_ULL(15, 5)) << 6;
405 *base |= (csbase & GENMASK_ULL(30, 19)) << 8;
406
407 *mask = ~0ULL;
408
409 *mask &= ~((GENMASK_ULL(15, 5) << 6) |
410 (GENMASK_ULL(30, 19) << 8));
411
412 *mask |= (csmask & GENMASK_ULL(15, 5)) << 6;
413 *mask |= (csmask & GENMASK_ULL(30, 19)) << 8;
414
415 return;
416 } else {
417 csbase = pvt->csels[dct].csbases[csrow];
418 csmask = pvt->csels[dct].csmasks[csrow >> 1];
419 addr_shift = 8;
420
421 if (pvt->fam == 0x15)
422 base_bits = mask_bits =
423 GENMASK_ULL(30,19) | GENMASK_ULL(13,5);
424 else
425 base_bits = mask_bits =
426 GENMASK_ULL(28,19) | GENMASK_ULL(13,5);
427 }
428
429 *base = (csbase & base_bits) << addr_shift;
430
431 *mask = ~0ULL;
432
433 *mask &= ~(mask_bits << addr_shift);
434
435 *mask |= (csmask & mask_bits) << addr_shift;
436}
437
438#define for_each_chip_select(i, dct, pvt) \
439 for (i = 0; i < pvt->csels[dct].b_cnt; i++)
440
441#define chip_select_base(i, dct, pvt) \
442 pvt->csels[dct].csbases[i]
443
444#define for_each_chip_select_mask(i, dct, pvt) \
445 for (i = 0; i < pvt->csels[dct].m_cnt; i++)
446
447#define for_each_umc(i) \
448 for (i = 0; i < fam_type->max_mcs; i++)
449
450
451
452
453
454static int input_addr_to_csrow(struct mem_ctl_info *mci, u64 input_addr)
455{
456 struct amd64_pvt *pvt;
457 int csrow;
458 u64 base, mask;
459
460 pvt = mci->pvt_info;
461
462 for_each_chip_select(csrow, 0, pvt) {
463 if (!csrow_enabled(csrow, 0, pvt))
464 continue;
465
466 get_cs_base_and_mask(pvt, csrow, 0, &base, &mask);
467
468 mask = ~mask;
469
470 if ((input_addr & mask) == (base & mask)) {
471 edac_dbg(2, "InputAddr 0x%lx matches csrow %d (node %d)\n",
472 (unsigned long)input_addr, csrow,
473 pvt->mc_node_id);
474
475 return csrow;
476 }
477 }
478 edac_dbg(2, "no matching csrow for InputAddr 0x%lx (MC node %d)\n",
479 (unsigned long)input_addr, pvt->mc_node_id);
480
481 return -1;
482}
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500int amd64_get_dram_hole_info(struct mem_ctl_info *mci, u64 *hole_base,
501 u64 *hole_offset, u64 *hole_size)
502{
503 struct amd64_pvt *pvt = mci->pvt_info;
504
505
506 if (pvt->fam == 0xf && pvt->ext_model < K8_REV_E) {
507 edac_dbg(1, " revision %d for node %d does not support DHAR\n",
508 pvt->ext_model, pvt->mc_node_id);
509 return 1;
510 }
511
512
513 if (pvt->fam >= 0x10 && !dhar_mem_hoist_valid(pvt)) {
514 edac_dbg(1, " Dram Memory Hoisting is DISABLED on this system\n");
515 return 1;
516 }
517
518 if (!dhar_valid(pvt)) {
519 edac_dbg(1, " Dram Memory Hoisting is DISABLED on this node %d\n",
520 pvt->mc_node_id);
521 return 1;
522 }
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542 *hole_base = dhar_base(pvt);
543 *hole_size = (1ULL << 32) - *hole_base;
544
545 *hole_offset = (pvt->fam > 0xf) ? f10_dhar_offset(pvt)
546 : k8_dhar_offset(pvt);
547
548 edac_dbg(1, " DHAR info for node %d base 0x%lx offset 0x%lx size 0x%lx\n",
549 pvt->mc_node_id, (unsigned long)*hole_base,
550 (unsigned long)*hole_offset, (unsigned long)*hole_size);
551
552 return 0;
553}
554EXPORT_SYMBOL_GPL(amd64_get_dram_hole_info);
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585static u64 sys_addr_to_dram_addr(struct mem_ctl_info *mci, u64 sys_addr)
586{
587 struct amd64_pvt *pvt = mci->pvt_info;
588 u64 dram_base, hole_base, hole_offset, hole_size, dram_addr;
589 int ret;
590
591 dram_base = get_dram_base(pvt, pvt->mc_node_id);
592
593 ret = amd64_get_dram_hole_info(mci, &hole_base, &hole_offset,
594 &hole_size);
595 if (!ret) {
596 if ((sys_addr >= (1ULL << 32)) &&
597 (sys_addr < ((1ULL << 32) + hole_size))) {
598
599 dram_addr = sys_addr - hole_offset;
600
601 edac_dbg(2, "using DHAR to translate SysAddr 0x%lx to DramAddr 0x%lx\n",
602 (unsigned long)sys_addr,
603 (unsigned long)dram_addr);
604
605 return dram_addr;
606 }
607 }
608
609
610
611
612
613
614
615
616
617
618 dram_addr = (sys_addr & GENMASK_ULL(39, 0)) - dram_base;
619
620 edac_dbg(2, "using DRAM Base register to translate SysAddr 0x%lx to DramAddr 0x%lx\n",
621 (unsigned long)sys_addr, (unsigned long)dram_addr);
622 return dram_addr;
623}
624
625
626
627
628
629
630static int num_node_interleave_bits(unsigned intlv_en)
631{
632 static const int intlv_shift_table[] = { 0, 1, 0, 2, 0, 0, 0, 3 };
633 int n;
634
635 BUG_ON(intlv_en > 7);
636 n = intlv_shift_table[intlv_en];
637 return n;
638}
639
640
641static u64 dram_addr_to_input_addr(struct mem_ctl_info *mci, u64 dram_addr)
642{
643 struct amd64_pvt *pvt;
644 int intlv_shift;
645 u64 input_addr;
646
647 pvt = mci->pvt_info;
648
649
650
651
652
653 intlv_shift = num_node_interleave_bits(dram_intlv_en(pvt, 0));
654 input_addr = ((dram_addr >> intlv_shift) & GENMASK_ULL(35, 12)) +
655 (dram_addr & 0xfff);
656
657 edac_dbg(2, " Intlv Shift=%d DramAddr=0x%lx maps to InputAddr=0x%lx\n",
658 intlv_shift, (unsigned long)dram_addr,
659 (unsigned long)input_addr);
660
661 return input_addr;
662}
663
664
665
666
667
668static u64 sys_addr_to_input_addr(struct mem_ctl_info *mci, u64 sys_addr)
669{
670 u64 input_addr;
671
672 input_addr =
673 dram_addr_to_input_addr(mci, sys_addr_to_dram_addr(mci, sys_addr));
674
675 edac_dbg(2, "SysAddr 0x%lx translates to InputAddr 0x%lx\n",
676 (unsigned long)sys_addr, (unsigned long)input_addr);
677
678 return input_addr;
679}
680
681
682static inline void error_address_to_page_and_offset(u64 error_address,
683 struct err_info *err)
684{
685 err->page = (u32) (error_address >> PAGE_SHIFT);
686 err->offset = ((u32) error_address) & ~PAGE_MASK;
687}
688
689
690
691
692
693
694
695
696
697static int sys_addr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr)
698{
699 int csrow;
700
701 csrow = input_addr_to_csrow(mci, sys_addr_to_input_addr(mci, sys_addr));
702
703 if (csrow == -1)
704 amd64_mc_err(mci, "Failed to translate InputAddr to csrow for "
705 "address 0x%lx\n", (unsigned long)sys_addr);
706 return csrow;
707}
708
709static int get_channel_from_ecc_syndrome(struct mem_ctl_info *, u16);
710
711
712
713
714
715static unsigned long determine_edac_cap(struct amd64_pvt *pvt)
716{
717 unsigned long edac_cap = EDAC_FLAG_NONE;
718 u8 bit;
719
720 if (pvt->umc) {
721 u8 i, umc_en_mask = 0, dimm_ecc_en_mask = 0;
722
723 for_each_umc(i) {
724 if (!(pvt->umc[i].sdp_ctrl & UMC_SDP_INIT))
725 continue;
726
727 umc_en_mask |= BIT(i);
728
729
730 if (pvt->umc[i].umc_cfg & BIT(12))
731 dimm_ecc_en_mask |= BIT(i);
732 }
733
734 if (umc_en_mask == dimm_ecc_en_mask)
735 edac_cap = EDAC_FLAG_SECDED;
736 } else {
737 bit = (pvt->fam > 0xf || pvt->ext_model >= K8_REV_F)
738 ? 19
739 : 17;
740
741 if (pvt->dclr0 & BIT(bit))
742 edac_cap = EDAC_FLAG_SECDED;
743 }
744
745 return edac_cap;
746}
747
748static void debug_display_dimm_sizes(struct amd64_pvt *, u8);
749
750static void debug_dump_dramcfg_low(struct amd64_pvt *pvt, u32 dclr, int chan)
751{
752 edac_dbg(1, "F2x%d90 (DRAM Cfg Low): 0x%08x\n", chan, dclr);
753
754 if (pvt->dram_type == MEM_LRDDR3) {
755 u32 dcsm = pvt->csels[chan].csmasks[0];
756
757
758
759
760
761 edac_dbg(1, " LRDIMM %dx rank multiply\n", (dcsm & 0x3));
762 }
763
764 edac_dbg(1, "All DIMMs support ECC:%s\n",
765 (dclr & BIT(19)) ? "yes" : "no");
766
767
768 edac_dbg(1, " PAR/ERR parity: %s\n",
769 (dclr & BIT(8)) ? "enabled" : "disabled");
770
771 if (pvt->fam == 0x10)
772 edac_dbg(1, " DCT 128bit mode width: %s\n",
773 (dclr & BIT(11)) ? "128b" : "64b");
774
775 edac_dbg(1, " x4 logical DIMMs present: L0: %s L1: %s L2: %s L3: %s\n",
776 (dclr & BIT(12)) ? "yes" : "no",
777 (dclr & BIT(13)) ? "yes" : "no",
778 (dclr & BIT(14)) ? "yes" : "no",
779 (dclr & BIT(15)) ? "yes" : "no");
780}
781
782#define CS_EVEN_PRIMARY BIT(0)
783#define CS_ODD_PRIMARY BIT(1)
784#define CS_EVEN_SECONDARY BIT(2)
785#define CS_ODD_SECONDARY BIT(3)
786
787#define CS_EVEN (CS_EVEN_PRIMARY | CS_EVEN_SECONDARY)
788#define CS_ODD (CS_ODD_PRIMARY | CS_ODD_SECONDARY)
789
790static int f17_get_cs_mode(int dimm, u8 ctrl, struct amd64_pvt *pvt)
791{
792 int cs_mode = 0;
793
794 if (csrow_enabled(2 * dimm, ctrl, pvt))
795 cs_mode |= CS_EVEN_PRIMARY;
796
797 if (csrow_enabled(2 * dimm + 1, ctrl, pvt))
798 cs_mode |= CS_ODD_PRIMARY;
799
800
801 if (csrow_sec_enabled(2 * dimm + 1, ctrl, pvt))
802 cs_mode |= CS_ODD_SECONDARY;
803
804 return cs_mode;
805}
806
807static void debug_display_dimm_sizes_df(struct amd64_pvt *pvt, u8 ctrl)
808{
809 int dimm, size0, size1, cs0, cs1, cs_mode;
810
811 edac_printk(KERN_DEBUG, EDAC_MC, "UMC%d chip selects:\n", ctrl);
812
813 for (dimm = 0; dimm < 2; dimm++) {
814 cs0 = dimm * 2;
815 cs1 = dimm * 2 + 1;
816
817 cs_mode = f17_get_cs_mode(dimm, ctrl, pvt);
818
819 size0 = pvt->ops->dbam_to_cs(pvt, ctrl, cs_mode, cs0);
820 size1 = pvt->ops->dbam_to_cs(pvt, ctrl, cs_mode, cs1);
821
822 amd64_info(EDAC_MC ": %d: %5dMB %d: %5dMB\n",
823 cs0, size0,
824 cs1, size1);
825 }
826}
827
828static void __dump_misc_regs_df(struct amd64_pvt *pvt)
829{
830 struct amd64_umc *umc;
831 u32 i, tmp, umc_base;
832
833 for_each_umc(i) {
834 umc_base = get_umc_base(i);
835 umc = &pvt->umc[i];
836
837 edac_dbg(1, "UMC%d DIMM cfg: 0x%x\n", i, umc->dimm_cfg);
838 edac_dbg(1, "UMC%d UMC cfg: 0x%x\n", i, umc->umc_cfg);
839 edac_dbg(1, "UMC%d SDP ctrl: 0x%x\n", i, umc->sdp_ctrl);
840 edac_dbg(1, "UMC%d ECC ctrl: 0x%x\n", i, umc->ecc_ctrl);
841
842 amd_smn_read(pvt->mc_node_id, umc_base + UMCCH_ECC_BAD_SYMBOL, &tmp);
843 edac_dbg(1, "UMC%d ECC bad symbol: 0x%x\n", i, tmp);
844
845 amd_smn_read(pvt->mc_node_id, umc_base + UMCCH_UMC_CAP, &tmp);
846 edac_dbg(1, "UMC%d UMC cap: 0x%x\n", i, tmp);
847 edac_dbg(1, "UMC%d UMC cap high: 0x%x\n", i, umc->umc_cap_hi);
848
849 edac_dbg(1, "UMC%d ECC capable: %s, ChipKill ECC capable: %s\n",
850 i, (umc->umc_cap_hi & BIT(30)) ? "yes" : "no",
851 (umc->umc_cap_hi & BIT(31)) ? "yes" : "no");
852 edac_dbg(1, "UMC%d All DIMMs support ECC: %s\n",
853 i, (umc->umc_cfg & BIT(12)) ? "yes" : "no");
854 edac_dbg(1, "UMC%d x4 DIMMs present: %s\n",
855 i, (umc->dimm_cfg & BIT(6)) ? "yes" : "no");
856 edac_dbg(1, "UMC%d x16 DIMMs present: %s\n",
857 i, (umc->dimm_cfg & BIT(7)) ? "yes" : "no");
858
859 if (pvt->dram_type == MEM_LRDDR4) {
860 amd_smn_read(pvt->mc_node_id, umc_base + UMCCH_ADDR_CFG, &tmp);
861 edac_dbg(1, "UMC%d LRDIMM %dx rank multiply\n",
862 i, 1 << ((tmp >> 4) & 0x3));
863 }
864
865 debug_display_dimm_sizes_df(pvt, i);
866 }
867
868 edac_dbg(1, "F0x104 (DRAM Hole Address): 0x%08x, base: 0x%08x\n",
869 pvt->dhar, dhar_base(pvt));
870}
871
872
873static void __dump_misc_regs(struct amd64_pvt *pvt)
874{
875 edac_dbg(1, "F3xE8 (NB Cap): 0x%08x\n", pvt->nbcap);
876
877 edac_dbg(1, " NB two channel DRAM capable: %s\n",
878 (pvt->nbcap & NBCAP_DCT_DUAL) ? "yes" : "no");
879
880 edac_dbg(1, " ECC capable: %s, ChipKill ECC capable: %s\n",
881 (pvt->nbcap & NBCAP_SECDED) ? "yes" : "no",
882 (pvt->nbcap & NBCAP_CHIPKILL) ? "yes" : "no");
883
884 debug_dump_dramcfg_low(pvt, pvt->dclr0, 0);
885
886 edac_dbg(1, "F3xB0 (Online Spare): 0x%08x\n", pvt->online_spare);
887
888 edac_dbg(1, "F1xF0 (DRAM Hole Address): 0x%08x, base: 0x%08x, offset: 0x%08x\n",
889 pvt->dhar, dhar_base(pvt),
890 (pvt->fam == 0xf) ? k8_dhar_offset(pvt)
891 : f10_dhar_offset(pvt));
892
893 debug_display_dimm_sizes(pvt, 0);
894
895
896 if (pvt->fam == 0xf)
897 return;
898
899 debug_display_dimm_sizes(pvt, 1);
900
901
902 if (!dct_ganging_enabled(pvt))
903 debug_dump_dramcfg_low(pvt, pvt->dclr1, 1);
904}
905
906
907static void dump_misc_regs(struct amd64_pvt *pvt)
908{
909 if (pvt->umc)
910 __dump_misc_regs_df(pvt);
911 else
912 __dump_misc_regs(pvt);
913
914 edac_dbg(1, " DramHoleValid: %s\n", dhar_valid(pvt) ? "yes" : "no");
915
916 amd64_info("using x%u syndromes.\n", pvt->ecc_sym_sz);
917}
918
919
920
921
922static void prep_chip_selects(struct amd64_pvt *pvt)
923{
924 if (pvt->fam == 0xf && pvt->ext_model < K8_REV_F) {
925 pvt->csels[0].b_cnt = pvt->csels[1].b_cnt = 8;
926 pvt->csels[0].m_cnt = pvt->csels[1].m_cnt = 8;
927 } else if (pvt->fam == 0x15 && pvt->model == 0x30) {
928 pvt->csels[0].b_cnt = pvt->csels[1].b_cnt = 4;
929 pvt->csels[0].m_cnt = pvt->csels[1].m_cnt = 2;
930 } else if (pvt->fam >= 0x17) {
931 int umc;
932
933 for_each_umc(umc) {
934 pvt->csels[umc].b_cnt = 4;
935 pvt->csels[umc].m_cnt = 2;
936 }
937
938 } else {
939 pvt->csels[0].b_cnt = pvt->csels[1].b_cnt = 8;
940 pvt->csels[0].m_cnt = pvt->csels[1].m_cnt = 4;
941 }
942}
943
944static void read_umc_base_mask(struct amd64_pvt *pvt)
945{
946 u32 umc_base_reg, umc_base_reg_sec;
947 u32 umc_mask_reg, umc_mask_reg_sec;
948 u32 base_reg, base_reg_sec;
949 u32 mask_reg, mask_reg_sec;
950 u32 *base, *base_sec;
951 u32 *mask, *mask_sec;
952 int cs, umc;
953
954 for_each_umc(umc) {
955 umc_base_reg = get_umc_base(umc) + UMCCH_BASE_ADDR;
956 umc_base_reg_sec = get_umc_base(umc) + UMCCH_BASE_ADDR_SEC;
957
958 for_each_chip_select(cs, umc, pvt) {
959 base = &pvt->csels[umc].csbases[cs];
960 base_sec = &pvt->csels[umc].csbases_sec[cs];
961
962 base_reg = umc_base_reg + (cs * 4);
963 base_reg_sec = umc_base_reg_sec + (cs * 4);
964
965 if (!amd_smn_read(pvt->mc_node_id, base_reg, base))
966 edac_dbg(0, " DCSB%d[%d]=0x%08x reg: 0x%x\n",
967 umc, cs, *base, base_reg);
968
969 if (!amd_smn_read(pvt->mc_node_id, base_reg_sec, base_sec))
970 edac_dbg(0, " DCSB_SEC%d[%d]=0x%08x reg: 0x%x\n",
971 umc, cs, *base_sec, base_reg_sec);
972 }
973
974 umc_mask_reg = get_umc_base(umc) + UMCCH_ADDR_MASK;
975 umc_mask_reg_sec = get_umc_base(umc) + UMCCH_ADDR_MASK_SEC;
976
977 for_each_chip_select_mask(cs, umc, pvt) {
978 mask = &pvt->csels[umc].csmasks[cs];
979 mask_sec = &pvt->csels[umc].csmasks_sec[cs];
980
981 mask_reg = umc_mask_reg + (cs * 4);
982 mask_reg_sec = umc_mask_reg_sec + (cs * 4);
983
984 if (!amd_smn_read(pvt->mc_node_id, mask_reg, mask))
985 edac_dbg(0, " DCSM%d[%d]=0x%08x reg: 0x%x\n",
986 umc, cs, *mask, mask_reg);
987
988 if (!amd_smn_read(pvt->mc_node_id, mask_reg_sec, mask_sec))
989 edac_dbg(0, " DCSM_SEC%d[%d]=0x%08x reg: 0x%x\n",
990 umc, cs, *mask_sec, mask_reg_sec);
991 }
992 }
993}
994
995
996
997
998static void read_dct_base_mask(struct amd64_pvt *pvt)
999{
1000 int cs;
1001
1002 prep_chip_selects(pvt);
1003
1004 if (pvt->umc)
1005 return read_umc_base_mask(pvt);
1006
1007 for_each_chip_select(cs, 0, pvt) {
1008 int reg0 = DCSB0 + (cs * 4);
1009 int reg1 = DCSB1 + (cs * 4);
1010 u32 *base0 = &pvt->csels[0].csbases[cs];
1011 u32 *base1 = &pvt->csels[1].csbases[cs];
1012
1013 if (!amd64_read_dct_pci_cfg(pvt, 0, reg0, base0))
1014 edac_dbg(0, " DCSB0[%d]=0x%08x reg: F2x%x\n",
1015 cs, *base0, reg0);
1016
1017 if (pvt->fam == 0xf)
1018 continue;
1019
1020 if (!amd64_read_dct_pci_cfg(pvt, 1, reg0, base1))
1021 edac_dbg(0, " DCSB1[%d]=0x%08x reg: F2x%x\n",
1022 cs, *base1, (pvt->fam == 0x10) ? reg1
1023 : reg0);
1024 }
1025
1026 for_each_chip_select_mask(cs, 0, pvt) {
1027 int reg0 = DCSM0 + (cs * 4);
1028 int reg1 = DCSM1 + (cs * 4);
1029 u32 *mask0 = &pvt->csels[0].csmasks[cs];
1030 u32 *mask1 = &pvt->csels[1].csmasks[cs];
1031
1032 if (!amd64_read_dct_pci_cfg(pvt, 0, reg0, mask0))
1033 edac_dbg(0, " DCSM0[%d]=0x%08x reg: F2x%x\n",
1034 cs, *mask0, reg0);
1035
1036 if (pvt->fam == 0xf)
1037 continue;
1038
1039 if (!amd64_read_dct_pci_cfg(pvt, 1, reg0, mask1))
1040 edac_dbg(0, " DCSM1[%d]=0x%08x reg: F2x%x\n",
1041 cs, *mask1, (pvt->fam == 0x10) ? reg1
1042 : reg0);
1043 }
1044}
1045
1046static void determine_memory_type(struct amd64_pvt *pvt)
1047{
1048 u32 dram_ctrl, dcsm;
1049
1050 if (pvt->umc) {
1051 if ((pvt->umc[0].dimm_cfg | pvt->umc[1].dimm_cfg) & BIT(5))
1052 pvt->dram_type = MEM_LRDDR4;
1053 else if ((pvt->umc[0].dimm_cfg | pvt->umc[1].dimm_cfg) & BIT(4))
1054 pvt->dram_type = MEM_RDDR4;
1055 else
1056 pvt->dram_type = MEM_DDR4;
1057 return;
1058 }
1059
1060 switch (pvt->fam) {
1061 case 0xf:
1062 if (pvt->ext_model >= K8_REV_F)
1063 goto ddr3;
1064
1065 pvt->dram_type = (pvt->dclr0 & BIT(18)) ? MEM_DDR : MEM_RDDR;
1066 return;
1067
1068 case 0x10:
1069 if (pvt->dchr0 & DDR3_MODE)
1070 goto ddr3;
1071
1072 pvt->dram_type = (pvt->dclr0 & BIT(16)) ? MEM_DDR2 : MEM_RDDR2;
1073 return;
1074
1075 case 0x15:
1076 if (pvt->model < 0x60)
1077 goto ddr3;
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088 amd64_read_dct_pci_cfg(pvt, 0, DRAM_CONTROL, &dram_ctrl);
1089 dcsm = pvt->csels[0].csmasks[0];
1090
1091 if (((dram_ctrl >> 8) & 0x7) == 0x2)
1092 pvt->dram_type = MEM_DDR4;
1093 else if (pvt->dclr0 & BIT(16))
1094 pvt->dram_type = MEM_DDR3;
1095 else if (dcsm & 0x3)
1096 pvt->dram_type = MEM_LRDDR3;
1097 else
1098 pvt->dram_type = MEM_RDDR3;
1099
1100 return;
1101
1102 case 0x16:
1103 goto ddr3;
1104
1105 default:
1106 WARN(1, KERN_ERR "%s: Family??? 0x%x\n", __func__, pvt->fam);
1107 pvt->dram_type = MEM_EMPTY;
1108 }
1109 return;
1110
1111ddr3:
1112 pvt->dram_type = (pvt->dclr0 & BIT(16)) ? MEM_DDR3 : MEM_RDDR3;
1113}
1114
1115
1116static int k8_early_channel_count(struct amd64_pvt *pvt)
1117{
1118 int flag;
1119
1120 if (pvt->ext_model >= K8_REV_F)
1121
1122 flag = pvt->dclr0 & WIDTH_128;
1123 else
1124
1125 flag = pvt->dclr0 & REVE_WIDTH_128;
1126
1127
1128 pvt->dclr1 = 0;
1129
1130 return (flag) ? 2 : 1;
1131}
1132
1133
1134static u64 get_error_address(struct amd64_pvt *pvt, struct mce *m)
1135{
1136 u16 mce_nid = amd_get_nb_id(m->extcpu);
1137 struct mem_ctl_info *mci;
1138 u8 start_bit = 1;
1139 u8 end_bit = 47;
1140 u64 addr;
1141
1142 mci = edac_mc_find(mce_nid);
1143 if (!mci)
1144 return 0;
1145
1146 pvt = mci->pvt_info;
1147
1148 if (pvt->fam == 0xf) {
1149 start_bit = 3;
1150 end_bit = 39;
1151 }
1152
1153 addr = m->addr & GENMASK_ULL(end_bit, start_bit);
1154
1155
1156
1157
1158 if (pvt->fam == 0x15) {
1159 u64 cc6_base, tmp_addr;
1160 u32 tmp;
1161 u8 intlv_en;
1162
1163 if ((addr & GENMASK_ULL(47, 24)) >> 24 != 0x00fdf7)
1164 return addr;
1165
1166
1167 amd64_read_pci_cfg(pvt->F1, DRAM_LOCAL_NODE_LIM, &tmp);
1168 intlv_en = tmp >> 21 & 0x7;
1169
1170
1171 cc6_base = (tmp & GENMASK_ULL(20, 0)) << 3;
1172
1173
1174 cc6_base |= intlv_en ^ 0x7;
1175
1176
1177 cc6_base <<= 24;
1178
1179 if (!intlv_en)
1180 return cc6_base | (addr & GENMASK_ULL(23, 0));
1181
1182 amd64_read_pci_cfg(pvt->F1, DRAM_LOCAL_NODE_BASE, &tmp);
1183
1184
1185 tmp_addr = (addr & GENMASK_ULL(23, 12)) << __fls(intlv_en + 1);
1186
1187
1188 tmp_addr |= (tmp & GENMASK_ULL(23, 21)) >> 9;
1189
1190
1191 tmp_addr |= addr & GENMASK_ULL(11, 0);
1192
1193 return cc6_base | tmp_addr;
1194 }
1195
1196 return addr;
1197}
1198
1199static struct pci_dev *pci_get_related_function(unsigned int vendor,
1200 unsigned int device,
1201 struct pci_dev *related)
1202{
1203 struct pci_dev *dev = NULL;
1204
1205 while ((dev = pci_get_device(vendor, device, dev))) {
1206 if (pci_domain_nr(dev->bus) == pci_domain_nr(related->bus) &&
1207 (dev->bus->number == related->bus->number) &&
1208 (PCI_SLOT(dev->devfn) == PCI_SLOT(related->devfn)))
1209 break;
1210 }
1211
1212 return dev;
1213}
1214
1215static void read_dram_base_limit_regs(struct amd64_pvt *pvt, unsigned range)
1216{
1217 struct amd_northbridge *nb;
1218 struct pci_dev *f1 = NULL;
1219 unsigned int pci_func;
1220 int off = range << 3;
1221 u32 llim;
1222
1223 amd64_read_pci_cfg(pvt->F1, DRAM_BASE_LO + off, &pvt->ranges[range].base.lo);
1224 amd64_read_pci_cfg(pvt->F1, DRAM_LIMIT_LO + off, &pvt->ranges[range].lim.lo);
1225
1226 if (pvt->fam == 0xf)
1227 return;
1228
1229 if (!dram_rw(pvt, range))
1230 return;
1231
1232 amd64_read_pci_cfg(pvt->F1, DRAM_BASE_HI + off, &pvt->ranges[range].base.hi);
1233 amd64_read_pci_cfg(pvt->F1, DRAM_LIMIT_HI + off, &pvt->ranges[range].lim.hi);
1234
1235
1236 if (pvt->fam != 0x15)
1237 return;
1238
1239 nb = node_to_amd_nb(dram_dst_node(pvt, range));
1240 if (WARN_ON(!nb))
1241 return;
1242
1243 if (pvt->model == 0x60)
1244 pci_func = PCI_DEVICE_ID_AMD_15H_M60H_NB_F1;
1245 else if (pvt->model == 0x30)
1246 pci_func = PCI_DEVICE_ID_AMD_15H_M30H_NB_F1;
1247 else
1248 pci_func = PCI_DEVICE_ID_AMD_15H_NB_F1;
1249
1250 f1 = pci_get_related_function(nb->misc->vendor, pci_func, nb->misc);
1251 if (WARN_ON(!f1))
1252 return;
1253
1254 amd64_read_pci_cfg(f1, DRAM_LOCAL_NODE_LIM, &llim);
1255
1256 pvt->ranges[range].lim.lo &= GENMASK_ULL(15, 0);
1257
1258
1259 pvt->ranges[range].lim.lo |= ((llim & 0x1fff) << 3 | 0x7) << 16;
1260
1261 pvt->ranges[range].lim.hi &= GENMASK_ULL(7, 0);
1262
1263
1264 pvt->ranges[range].lim.hi |= llim >> 13;
1265
1266 pci_dev_put(f1);
1267}
1268
1269static void k8_map_sysaddr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr,
1270 struct err_info *err)
1271{
1272 struct amd64_pvt *pvt = mci->pvt_info;
1273
1274 error_address_to_page_and_offset(sys_addr, err);
1275
1276
1277
1278
1279
1280 err->src_mci = find_mc_by_sys_addr(mci, sys_addr);
1281 if (!err->src_mci) {
1282 amd64_mc_err(mci, "failed to map error addr 0x%lx to a node\n",
1283 (unsigned long)sys_addr);
1284 err->err_code = ERR_NODE;
1285 return;
1286 }
1287
1288
1289 err->csrow = sys_addr_to_csrow(err->src_mci, sys_addr);
1290 if (err->csrow < 0) {
1291 err->err_code = ERR_CSROW;
1292 return;
1293 }
1294
1295
1296 if (pvt->nbcfg & NBCFG_CHIPKILL) {
1297 err->channel = get_channel_from_ecc_syndrome(mci, err->syndrome);
1298 if (err->channel < 0) {
1299
1300
1301
1302
1303
1304 amd64_mc_warn(err->src_mci, "unknown syndrome 0x%04x - "
1305 "possible error reporting race\n",
1306 err->syndrome);
1307 err->err_code = ERR_CHANNEL;
1308 return;
1309 }
1310 } else {
1311
1312
1313
1314
1315
1316
1317
1318
1319 err->channel = ((sys_addr & BIT(3)) != 0);
1320 }
1321}
1322
1323static int ddr2_cs_size(unsigned i, bool dct_width)
1324{
1325 unsigned shift = 0;
1326
1327 if (i <= 2)
1328 shift = i;
1329 else if (!(i & 0x1))
1330 shift = i >> 1;
1331 else
1332 shift = (i + 1) >> 1;
1333
1334 return 128 << (shift + !!dct_width);
1335}
1336
1337static int k8_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
1338 unsigned cs_mode, int cs_mask_nr)
1339{
1340 u32 dclr = dct ? pvt->dclr1 : pvt->dclr0;
1341
1342 if (pvt->ext_model >= K8_REV_F) {
1343 WARN_ON(cs_mode > 11);
1344 return ddr2_cs_size(cs_mode, dclr & WIDTH_128);
1345 }
1346 else if (pvt->ext_model >= K8_REV_D) {
1347 unsigned diff;
1348 WARN_ON(cs_mode > 10);
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374 diff = cs_mode/3 + (unsigned)(cs_mode > 5);
1375
1376 return 32 << (cs_mode - diff);
1377 }
1378 else {
1379 WARN_ON(cs_mode > 6);
1380 return 32 << cs_mode;
1381 }
1382}
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392static int f1x_early_channel_count(struct amd64_pvt *pvt)
1393{
1394 int i, j, channels = 0;
1395
1396
1397 if (pvt->fam == 0x10 && (pvt->dclr0 & WIDTH_128))
1398 return 2;
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408 edac_dbg(0, "Data width is not 128 bits - need more decoding\n");
1409
1410
1411
1412
1413
1414
1415 for (i = 0; i < 2; i++) {
1416 u32 dbam = (i ? pvt->dbam1 : pvt->dbam0);
1417
1418 for (j = 0; j < 4; j++) {
1419 if (DBAM_DIMM(j, dbam) > 0) {
1420 channels++;
1421 break;
1422 }
1423 }
1424 }
1425
1426 if (channels > 2)
1427 channels = 2;
1428
1429 amd64_info("MCT channel count: %d\n", channels);
1430
1431 return channels;
1432}
1433
1434static int f17_early_channel_count(struct amd64_pvt *pvt)
1435{
1436 int i, channels = 0;
1437
1438
1439 for_each_umc(i)
1440 channels += !!(pvt->umc[i].sdp_ctrl & UMC_SDP_INIT);
1441
1442 amd64_info("MCT channel count: %d\n", channels);
1443
1444 return channels;
1445}
1446
1447static int ddr3_cs_size(unsigned i, bool dct_width)
1448{
1449 unsigned shift = 0;
1450 int cs_size = 0;
1451
1452 if (i == 0 || i == 3 || i == 4)
1453 cs_size = -1;
1454 else if (i <= 2)
1455 shift = i;
1456 else if (i == 12)
1457 shift = 7;
1458 else if (!(i & 0x1))
1459 shift = i >> 1;
1460 else
1461 shift = (i + 1) >> 1;
1462
1463 if (cs_size != -1)
1464 cs_size = (128 * (1 << !!dct_width)) << shift;
1465
1466 return cs_size;
1467}
1468
1469static int ddr3_lrdimm_cs_size(unsigned i, unsigned rank_multiply)
1470{
1471 unsigned shift = 0;
1472 int cs_size = 0;
1473
1474 if (i < 4 || i == 6)
1475 cs_size = -1;
1476 else if (i == 12)
1477 shift = 7;
1478 else if (!(i & 0x1))
1479 shift = i >> 1;
1480 else
1481 shift = (i + 1) >> 1;
1482
1483 if (cs_size != -1)
1484 cs_size = rank_multiply * (128 << shift);
1485
1486 return cs_size;
1487}
1488
1489static int ddr4_cs_size(unsigned i)
1490{
1491 int cs_size = 0;
1492
1493 if (i == 0)
1494 cs_size = -1;
1495 else if (i == 1)
1496 cs_size = 1024;
1497 else
1498
1499 cs_size = 1024 * (1 << (i >> 1));
1500
1501 return cs_size;
1502}
1503
1504static int f10_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
1505 unsigned cs_mode, int cs_mask_nr)
1506{
1507 u32 dclr = dct ? pvt->dclr1 : pvt->dclr0;
1508
1509 WARN_ON(cs_mode > 11);
1510
1511 if (pvt->dchr0 & DDR3_MODE || pvt->dchr1 & DDR3_MODE)
1512 return ddr3_cs_size(cs_mode, dclr & WIDTH_128);
1513 else
1514 return ddr2_cs_size(cs_mode, dclr & WIDTH_128);
1515}
1516
1517
1518
1519
1520static int f15_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
1521 unsigned cs_mode, int cs_mask_nr)
1522{
1523 WARN_ON(cs_mode > 12);
1524
1525 return ddr3_cs_size(cs_mode, false);
1526}
1527
1528
1529static int f15_m60h_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
1530 unsigned cs_mode, int cs_mask_nr)
1531{
1532 int cs_size;
1533 u32 dcsm = pvt->csels[dct].csmasks[cs_mask_nr];
1534
1535 WARN_ON(cs_mode > 12);
1536
1537 if (pvt->dram_type == MEM_DDR4) {
1538 if (cs_mode > 9)
1539 return -1;
1540
1541 cs_size = ddr4_cs_size(cs_mode);
1542 } else if (pvt->dram_type == MEM_LRDDR3) {
1543 unsigned rank_multiply = dcsm & 0xf;
1544
1545 if (rank_multiply == 3)
1546 rank_multiply = 4;
1547 cs_size = ddr3_lrdimm_cs_size(cs_mode, rank_multiply);
1548 } else {
1549
1550 if (cs_mode == 0x1)
1551 return -1;
1552
1553 cs_size = ddr3_cs_size(cs_mode, false);
1554 }
1555
1556 return cs_size;
1557}
1558
1559
1560
1561
1562static int f16_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
1563 unsigned cs_mode, int cs_mask_nr)
1564{
1565 WARN_ON(cs_mode > 12);
1566
1567 if (cs_mode == 6 || cs_mode == 8 ||
1568 cs_mode == 9 || cs_mode == 12)
1569 return -1;
1570 else
1571 return ddr3_cs_size(cs_mode, false);
1572}
1573
1574static int f17_addr_mask_to_cs_size(struct amd64_pvt *pvt, u8 umc,
1575 unsigned int cs_mode, int csrow_nr)
1576{
1577 u32 addr_mask_orig, addr_mask_deinterleaved;
1578 u32 msb, weight, num_zero_bits;
1579 int dimm, size = 0;
1580
1581
1582 if (!cs_mode)
1583 return size;
1584
1585
1586 if (!(cs_mode & CS_EVEN) && !(csrow_nr & 1))
1587 return size;
1588
1589
1590 if (!(cs_mode & CS_ODD) && (csrow_nr & 1))
1591 return size;
1592
1593
1594
1595
1596
1597
1598 dimm = csrow_nr >> 1;
1599
1600
1601 if ((csrow_nr & 1) && (cs_mode & CS_ODD_SECONDARY))
1602 addr_mask_orig = pvt->csels[umc].csmasks_sec[dimm];
1603 else
1604 addr_mask_orig = pvt->csels[umc].csmasks[dimm];
1605
1606
1607
1608
1609
1610
1611
1612
1613 msb = fls(addr_mask_orig) - 1;
1614 weight = hweight_long(addr_mask_orig);
1615 num_zero_bits = msb - weight;
1616
1617
1618 addr_mask_deinterleaved = GENMASK_ULL(msb - num_zero_bits, 1);
1619
1620 edac_dbg(1, "CS%d DIMM%d AddrMasks:\n", csrow_nr, dimm);
1621 edac_dbg(1, " Original AddrMask: 0x%x\n", addr_mask_orig);
1622 edac_dbg(1, " Deinterleaved AddrMask: 0x%x\n", addr_mask_deinterleaved);
1623
1624
1625 size = (addr_mask_deinterleaved >> 2) + 1;
1626
1627
1628 return size >> 10;
1629}
1630
1631static void read_dram_ctl_register(struct amd64_pvt *pvt)
1632{
1633
1634 if (pvt->fam == 0xf)
1635 return;
1636
1637 if (!amd64_read_pci_cfg(pvt->F2, DCT_SEL_LO, &pvt->dct_sel_lo)) {
1638 edac_dbg(0, "F2x110 (DCTSelLow): 0x%08x, High range addrs at: 0x%x\n",
1639 pvt->dct_sel_lo, dct_sel_baseaddr(pvt));
1640
1641 edac_dbg(0, " DCTs operate in %s mode\n",
1642 (dct_ganging_enabled(pvt) ? "ganged" : "unganged"));
1643
1644 if (!dct_ganging_enabled(pvt))
1645 edac_dbg(0, " Address range split per DCT: %s\n",
1646 (dct_high_range_enabled(pvt) ? "yes" : "no"));
1647
1648 edac_dbg(0, " data interleave for ECC: %s, DRAM cleared since last warm reset: %s\n",
1649 (dct_data_intlv_enabled(pvt) ? "enabled" : "disabled"),
1650 (dct_memory_cleared(pvt) ? "yes" : "no"));
1651
1652 edac_dbg(0, " channel interleave: %s, "
1653 "interleave bits selector: 0x%x\n",
1654 (dct_interleave_enabled(pvt) ? "enabled" : "disabled"),
1655 dct_sel_interleave_addr(pvt));
1656 }
1657
1658 amd64_read_pci_cfg(pvt->F2, DCT_SEL_HI, &pvt->dct_sel_hi);
1659}
1660
1661
1662
1663
1664
1665static u8 f15_m30h_determine_channel(struct amd64_pvt *pvt, u64 sys_addr,
1666 u8 intlv_en, int num_dcts_intlv,
1667 u32 dct_sel)
1668{
1669 u8 channel = 0;
1670 u8 select;
1671
1672 if (!(intlv_en))
1673 return (u8)(dct_sel);
1674
1675 if (num_dcts_intlv == 2) {
1676 select = (sys_addr >> 8) & 0x3;
1677 channel = select ? 0x3 : 0;
1678 } else if (num_dcts_intlv == 4) {
1679 u8 intlv_addr = dct_sel_interleave_addr(pvt);
1680 switch (intlv_addr) {
1681 case 0x4:
1682 channel = (sys_addr >> 8) & 0x3;
1683 break;
1684 case 0x5:
1685 channel = (sys_addr >> 9) & 0x3;
1686 break;
1687 }
1688 }
1689 return channel;
1690}
1691
1692
1693
1694
1695
1696static u8 f1x_determine_channel(struct amd64_pvt *pvt, u64 sys_addr,
1697 bool hi_range_sel, u8 intlv_en)
1698{
1699 u8 dct_sel_high = (pvt->dct_sel_lo >> 1) & 1;
1700
1701 if (dct_ganging_enabled(pvt))
1702 return 0;
1703
1704 if (hi_range_sel)
1705 return dct_sel_high;
1706
1707
1708
1709
1710 if (dct_interleave_enabled(pvt)) {
1711 u8 intlv_addr = dct_sel_interleave_addr(pvt);
1712
1713
1714 if (!intlv_addr)
1715 return sys_addr >> 6 & 1;
1716
1717 if (intlv_addr & 0x2) {
1718 u8 shift = intlv_addr & 0x1 ? 9 : 6;
1719 u32 temp = hweight_long((u32) ((sys_addr >> 16) & 0x1F)) & 1;
1720
1721 return ((sys_addr >> shift) & 1) ^ temp;
1722 }
1723
1724 if (intlv_addr & 0x4) {
1725 u8 shift = intlv_addr & 0x1 ? 9 : 8;
1726
1727 return (sys_addr >> shift) & 1;
1728 }
1729
1730 return (sys_addr >> (12 + hweight8(intlv_en))) & 1;
1731 }
1732
1733 if (dct_high_range_enabled(pvt))
1734 return ~dct_sel_high & 1;
1735
1736 return 0;
1737}
1738
1739
1740static u64 f1x_get_norm_dct_addr(struct amd64_pvt *pvt, u8 range,
1741 u64 sys_addr, bool hi_rng,
1742 u32 dct_sel_base_addr)
1743{
1744 u64 chan_off;
1745 u64 dram_base = get_dram_base(pvt, range);
1746 u64 hole_off = f10_dhar_offset(pvt);
1747 u64 dct_sel_base_off = (u64)(pvt->dct_sel_hi & 0xFFFFFC00) << 16;
1748
1749 if (hi_rng) {
1750
1751
1752
1753
1754
1755
1756
1757
1758
1759
1760
1761 if ((!(dct_sel_base_addr >> 16) ||
1762 dct_sel_base_addr < dhar_base(pvt)) &&
1763 dhar_valid(pvt) &&
1764 (sys_addr >= BIT_64(32)))
1765 chan_off = hole_off;
1766 else
1767 chan_off = dct_sel_base_off;
1768 } else {
1769
1770
1771
1772
1773
1774
1775
1776
1777
1778 if (dhar_valid(pvt) && (sys_addr >= BIT_64(32)))
1779 chan_off = hole_off;
1780 else
1781 chan_off = dram_base;
1782 }
1783
1784 return (sys_addr & GENMASK_ULL(47,6)) - (chan_off & GENMASK_ULL(47,23));
1785}
1786
1787
1788
1789
1790
1791static int f10_process_possible_spare(struct amd64_pvt *pvt, u8 dct, int csrow)
1792{
1793 int tmp_cs;
1794
1795 if (online_spare_swap_done(pvt, dct) &&
1796 csrow == online_spare_bad_dramcs(pvt, dct)) {
1797
1798 for_each_chip_select(tmp_cs, dct, pvt) {
1799 if (chip_select_base(tmp_cs, dct, pvt) & 0x2) {
1800 csrow = tmp_cs;
1801 break;
1802 }
1803 }
1804 }
1805 return csrow;
1806}
1807
1808
1809
1810
1811
1812
1813
1814
1815
1816static int f1x_lookup_addr_in_dct(u64 in_addr, u8 nid, u8 dct)
1817{
1818 struct mem_ctl_info *mci;
1819 struct amd64_pvt *pvt;
1820 u64 cs_base, cs_mask;
1821 int cs_found = -EINVAL;
1822 int csrow;
1823
1824 mci = edac_mc_find(nid);
1825 if (!mci)
1826 return cs_found;
1827
1828 pvt = mci->pvt_info;
1829
1830 edac_dbg(1, "input addr: 0x%llx, DCT: %d\n", in_addr, dct);
1831
1832 for_each_chip_select(csrow, dct, pvt) {
1833 if (!csrow_enabled(csrow, dct, pvt))
1834 continue;
1835
1836 get_cs_base_and_mask(pvt, csrow, dct, &cs_base, &cs_mask);
1837
1838 edac_dbg(1, " CSROW=%d CSBase=0x%llx CSMask=0x%llx\n",
1839 csrow, cs_base, cs_mask);
1840
1841 cs_mask = ~cs_mask;
1842
1843 edac_dbg(1, " (InputAddr & ~CSMask)=0x%llx (CSBase & ~CSMask)=0x%llx\n",
1844 (in_addr & cs_mask), (cs_base & cs_mask));
1845
1846 if ((in_addr & cs_mask) == (cs_base & cs_mask)) {
1847 if (pvt->fam == 0x15 && pvt->model >= 0x30) {
1848 cs_found = csrow;
1849 break;
1850 }
1851 cs_found = f10_process_possible_spare(pvt, dct, csrow);
1852
1853 edac_dbg(1, " MATCH csrow=%d\n", cs_found);
1854 break;
1855 }
1856 }
1857 return cs_found;
1858}
1859
1860
1861
1862
1863
1864
1865static u64 f1x_swap_interleaved_region(struct amd64_pvt *pvt, u64 sys_addr)
1866{
1867 u32 swap_reg, swap_base, swap_limit, rgn_size, tmp_addr;
1868
1869 if (pvt->fam == 0x10) {
1870
1871 if (pvt->model < 4 || (pvt->model < 0xa && pvt->stepping < 3))
1872 return sys_addr;
1873 }
1874
1875 amd64_read_pci_cfg(pvt->F2, SWAP_INTLV_REG, &swap_reg);
1876
1877 if (!(swap_reg & 0x1))
1878 return sys_addr;
1879
1880 swap_base = (swap_reg >> 3) & 0x7f;
1881 swap_limit = (swap_reg >> 11) & 0x7f;
1882 rgn_size = (swap_reg >> 20) & 0x7f;
1883 tmp_addr = sys_addr >> 27;
1884
1885 if (!(sys_addr >> 34) &&
1886 (((tmp_addr >= swap_base) &&
1887 (tmp_addr <= swap_limit)) ||
1888 (tmp_addr < rgn_size)))
1889 return sys_addr ^ (u64)swap_base << 27;
1890
1891 return sys_addr;
1892}
1893
1894
1895static int f1x_match_to_this_node(struct amd64_pvt *pvt, unsigned range,
1896 u64 sys_addr, int *chan_sel)
1897{
1898 int cs_found = -EINVAL;
1899 u64 chan_addr;
1900 u32 dct_sel_base;
1901 u8 channel;
1902 bool high_range = false;
1903
1904 u8 node_id = dram_dst_node(pvt, range);
1905 u8 intlv_en = dram_intlv_en(pvt, range);
1906 u32 intlv_sel = dram_intlv_sel(pvt, range);
1907
1908 edac_dbg(1, "(range %d) SystemAddr= 0x%llx Limit=0x%llx\n",
1909 range, sys_addr, get_dram_limit(pvt, range));
1910
1911 if (dhar_valid(pvt) &&
1912 dhar_base(pvt) <= sys_addr &&
1913 sys_addr < BIT_64(32)) {
1914 amd64_warn("Huh? Address is in the MMIO hole: 0x%016llx\n",
1915 sys_addr);
1916 return -EINVAL;
1917 }
1918
1919 if (intlv_en && (intlv_sel != ((sys_addr >> 12) & intlv_en)))
1920 return -EINVAL;
1921
1922 sys_addr = f1x_swap_interleaved_region(pvt, sys_addr);
1923
1924 dct_sel_base = dct_sel_baseaddr(pvt);
1925
1926
1927
1928
1929
1930 if (dct_high_range_enabled(pvt) &&
1931 !dct_ganging_enabled(pvt) &&
1932 ((sys_addr >> 27) >= (dct_sel_base >> 11)))
1933 high_range = true;
1934
1935 channel = f1x_determine_channel(pvt, sys_addr, high_range, intlv_en);
1936
1937 chan_addr = f1x_get_norm_dct_addr(pvt, range, sys_addr,
1938 high_range, dct_sel_base);
1939
1940
1941 if (intlv_en)
1942 chan_addr = ((chan_addr >> (12 + hweight8(intlv_en))) << 12) |
1943 (chan_addr & 0xfff);
1944
1945
1946 if (dct_interleave_enabled(pvt) &&
1947 !dct_high_range_enabled(pvt) &&
1948 !dct_ganging_enabled(pvt)) {
1949
1950 if (dct_sel_interleave_addr(pvt) != 1) {
1951 if (dct_sel_interleave_addr(pvt) == 0x3)
1952
1953 chan_addr = ((chan_addr >> 10) << 9) |
1954 (chan_addr & 0x1ff);
1955 else
1956
1957 chan_addr = ((chan_addr >> 7) << 6) |
1958 (chan_addr & 0x3f);
1959 } else
1960
1961 chan_addr = ((chan_addr >> 13) << 12) |
1962 (chan_addr & 0xfff);
1963 }
1964
1965 edac_dbg(1, " Normalized DCT addr: 0x%llx\n", chan_addr);
1966
1967 cs_found = f1x_lookup_addr_in_dct(chan_addr, node_id, channel);
1968
1969 if (cs_found >= 0)
1970 *chan_sel = channel;
1971
1972 return cs_found;
1973}
1974
1975static int f15_m30h_match_to_this_node(struct amd64_pvt *pvt, unsigned range,
1976 u64 sys_addr, int *chan_sel)
1977{
1978 int cs_found = -EINVAL;
1979 int num_dcts_intlv = 0;
1980 u64 chan_addr, chan_offset;
1981 u64 dct_base, dct_limit;
1982 u32 dct_cont_base_reg, dct_cont_limit_reg, tmp;
1983 u8 channel, alias_channel, leg_mmio_hole, dct_sel, dct_offset_en;
1984
1985 u64 dhar_offset = f10_dhar_offset(pvt);
1986 u8 intlv_addr = dct_sel_interleave_addr(pvt);
1987 u8 node_id = dram_dst_node(pvt, range);
1988 u8 intlv_en = dram_intlv_en(pvt, range);
1989
1990 amd64_read_pci_cfg(pvt->F1, DRAM_CONT_BASE, &dct_cont_base_reg);
1991 amd64_read_pci_cfg(pvt->F1, DRAM_CONT_LIMIT, &dct_cont_limit_reg);
1992
1993 dct_offset_en = (u8) ((dct_cont_base_reg >> 3) & BIT(0));
1994 dct_sel = (u8) ((dct_cont_base_reg >> 4) & 0x7);
1995
1996 edac_dbg(1, "(range %d) SystemAddr= 0x%llx Limit=0x%llx\n",
1997 range, sys_addr, get_dram_limit(pvt, range));
1998
1999 if (!(get_dram_base(pvt, range) <= sys_addr) &&
2000 !(get_dram_limit(pvt, range) >= sys_addr))
2001 return -EINVAL;
2002
2003 if (dhar_valid(pvt) &&
2004 dhar_base(pvt) <= sys_addr &&
2005 sys_addr < BIT_64(32)) {
2006 amd64_warn("Huh? Address is in the MMIO hole: 0x%016llx\n",
2007 sys_addr);
2008 return -EINVAL;
2009 }
2010
2011
2012 dct_base = (u64) dct_sel_baseaddr(pvt);
2013 dct_limit = (dct_cont_limit_reg >> 11) & 0x1FFF;
2014
2015 if (!(dct_cont_base_reg & BIT(0)) &&
2016 !(dct_base <= (sys_addr >> 27) &&
2017 dct_limit >= (sys_addr >> 27)))
2018 return -EINVAL;
2019
2020
2021 num_dcts_intlv = (int) hweight8(intlv_en);
2022
2023 if (!(num_dcts_intlv % 2 == 0) || (num_dcts_intlv > 4))
2024 return -EINVAL;
2025
2026 if (pvt->model >= 0x60)
2027 channel = f1x_determine_channel(pvt, sys_addr, false, intlv_en);
2028 else
2029 channel = f15_m30h_determine_channel(pvt, sys_addr, intlv_en,
2030 num_dcts_intlv, dct_sel);
2031
2032
2033 if (channel > 3)
2034 return -EINVAL;
2035
2036 leg_mmio_hole = (u8) (dct_cont_base_reg >> 1 & BIT(0));
2037
2038
2039 if (leg_mmio_hole && (sys_addr >= BIT_64(32)))
2040 chan_offset = dhar_offset;
2041 else
2042 chan_offset = dct_base << 27;
2043
2044 chan_addr = sys_addr - chan_offset;
2045
2046
2047 if (num_dcts_intlv == 2) {
2048 if (intlv_addr == 0x4)
2049 chan_addr = ((chan_addr >> 9) << 8) |
2050 (chan_addr & 0xff);
2051 else if (intlv_addr == 0x5)
2052 chan_addr = ((chan_addr >> 10) << 9) |
2053 (chan_addr & 0x1ff);
2054 else
2055 return -EINVAL;
2056
2057 } else if (num_dcts_intlv == 4) {
2058 if (intlv_addr == 0x4)
2059 chan_addr = ((chan_addr >> 10) << 8) |
2060 (chan_addr & 0xff);
2061 else if (intlv_addr == 0x5)
2062 chan_addr = ((chan_addr >> 11) << 9) |
2063 (chan_addr & 0x1ff);
2064 else
2065 return -EINVAL;
2066 }
2067
2068 if (dct_offset_en) {
2069 amd64_read_pci_cfg(pvt->F1,
2070 DRAM_CONT_HIGH_OFF + (int) channel * 4,
2071 &tmp);
2072 chan_addr += (u64) ((tmp >> 11) & 0xfff) << 27;
2073 }
2074
2075 f15h_select_dct(pvt, channel);
2076
2077 edac_dbg(1, " Normalized DCT addr: 0x%llx\n", chan_addr);
2078
2079
2080
2081
2082
2083
2084
2085
2086
2087 alias_channel = (channel == 3) ? 1 : channel;
2088
2089 cs_found = f1x_lookup_addr_in_dct(chan_addr, node_id, alias_channel);
2090
2091 if (cs_found >= 0)
2092 *chan_sel = alias_channel;
2093
2094 return cs_found;
2095}
2096
2097static int f1x_translate_sysaddr_to_cs(struct amd64_pvt *pvt,
2098 u64 sys_addr,
2099 int *chan_sel)
2100{
2101 int cs_found = -EINVAL;
2102 unsigned range;
2103
2104 for (range = 0; range < DRAM_RANGES; range++) {
2105 if (!dram_rw(pvt, range))
2106 continue;
2107
2108 if (pvt->fam == 0x15 && pvt->model >= 0x30)
2109 cs_found = f15_m30h_match_to_this_node(pvt, range,
2110 sys_addr,
2111 chan_sel);
2112
2113 else if ((get_dram_base(pvt, range) <= sys_addr) &&
2114 (get_dram_limit(pvt, range) >= sys_addr)) {
2115 cs_found = f1x_match_to_this_node(pvt, range,
2116 sys_addr, chan_sel);
2117 if (cs_found >= 0)
2118 break;
2119 }
2120 }
2121 return cs_found;
2122}
2123
2124
2125
2126
2127
2128
2129
2130
2131static void f1x_map_sysaddr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr,
2132 struct err_info *err)
2133{
2134 struct amd64_pvt *pvt = mci->pvt_info;
2135
2136 error_address_to_page_and_offset(sys_addr, err);
2137
2138 err->csrow = f1x_translate_sysaddr_to_cs(pvt, sys_addr, &err->channel);
2139 if (err->csrow < 0) {
2140 err->err_code = ERR_CSROW;
2141 return;
2142 }
2143
2144
2145
2146
2147
2148
2149 if (dct_ganging_enabled(pvt))
2150 err->channel = get_channel_from_ecc_syndrome(mci, err->syndrome);
2151}
2152
2153
2154
2155
2156
2157static void debug_display_dimm_sizes(struct amd64_pvt *pvt, u8 ctrl)
2158{
2159 int dimm, size0, size1;
2160 u32 *dcsb = ctrl ? pvt->csels[1].csbases : pvt->csels[0].csbases;
2161 u32 dbam = ctrl ? pvt->dbam1 : pvt->dbam0;
2162
2163 if (pvt->fam == 0xf) {
2164
2165 if (pvt->ext_model < K8_REV_F)
2166 return;
2167 else
2168 WARN_ON(ctrl != 0);
2169 }
2170
2171 if (pvt->fam == 0x10) {
2172 dbam = (ctrl && !dct_ganging_enabled(pvt)) ? pvt->dbam1
2173 : pvt->dbam0;
2174 dcsb = (ctrl && !dct_ganging_enabled(pvt)) ?
2175 pvt->csels[1].csbases :
2176 pvt->csels[0].csbases;
2177 } else if (ctrl) {
2178 dbam = pvt->dbam0;
2179 dcsb = pvt->csels[1].csbases;
2180 }
2181 edac_dbg(1, "F2x%d80 (DRAM Bank Address Mapping): 0x%08x\n",
2182 ctrl, dbam);
2183
2184 edac_printk(KERN_DEBUG, EDAC_MC, "DCT%d chip selects:\n", ctrl);
2185
2186
2187 for (dimm = 0; dimm < 4; dimm++) {
2188
2189 size0 = 0;
2190 if (dcsb[dimm*2] & DCSB_CS_ENABLE)
2191
2192
2193
2194
2195
2196
2197 size0 = pvt->ops->dbam_to_cs(pvt, ctrl,
2198 DBAM_DIMM(dimm, dbam),
2199 dimm);
2200
2201 size1 = 0;
2202 if (dcsb[dimm*2 + 1] & DCSB_CS_ENABLE)
2203 size1 = pvt->ops->dbam_to_cs(pvt, ctrl,
2204 DBAM_DIMM(dimm, dbam),
2205 dimm);
2206
2207 amd64_info(EDAC_MC ": %d: %5dMB %d: %5dMB\n",
2208 dimm * 2, size0,
2209 dimm * 2 + 1, size1);
2210 }
2211}
2212
2213static struct amd64_family_type family_types[] = {
2214 [K8_CPUS] = {
2215 .ctl_name = "K8",
2216 .f1_id = PCI_DEVICE_ID_AMD_K8_NB_ADDRMAP,
2217 .f2_id = PCI_DEVICE_ID_AMD_K8_NB_MEMCTL,
2218 .max_mcs = 2,
2219 .ops = {
2220 .early_channel_count = k8_early_channel_count,
2221 .map_sysaddr_to_csrow = k8_map_sysaddr_to_csrow,
2222 .dbam_to_cs = k8_dbam_to_chip_select,
2223 }
2224 },
2225 [F10_CPUS] = {
2226 .ctl_name = "F10h",
2227 .f1_id = PCI_DEVICE_ID_AMD_10H_NB_MAP,
2228 .f2_id = PCI_DEVICE_ID_AMD_10H_NB_DRAM,
2229 .max_mcs = 2,
2230 .ops = {
2231 .early_channel_count = f1x_early_channel_count,
2232 .map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow,
2233 .dbam_to_cs = f10_dbam_to_chip_select,
2234 }
2235 },
2236 [F15_CPUS] = {
2237 .ctl_name = "F15h",
2238 .f1_id = PCI_DEVICE_ID_AMD_15H_NB_F1,
2239 .f2_id = PCI_DEVICE_ID_AMD_15H_NB_F2,
2240 .max_mcs = 2,
2241 .ops = {
2242 .early_channel_count = f1x_early_channel_count,
2243 .map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow,
2244 .dbam_to_cs = f15_dbam_to_chip_select,
2245 }
2246 },
2247 [F15_M30H_CPUS] = {
2248 .ctl_name = "F15h_M30h",
2249 .f1_id = PCI_DEVICE_ID_AMD_15H_M30H_NB_F1,
2250 .f2_id = PCI_DEVICE_ID_AMD_15H_M30H_NB_F2,
2251 .max_mcs = 2,
2252 .ops = {
2253 .early_channel_count = f1x_early_channel_count,
2254 .map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow,
2255 .dbam_to_cs = f16_dbam_to_chip_select,
2256 }
2257 },
2258 [F15_M60H_CPUS] = {
2259 .ctl_name = "F15h_M60h",
2260 .f1_id = PCI_DEVICE_ID_AMD_15H_M60H_NB_F1,
2261 .f2_id = PCI_DEVICE_ID_AMD_15H_M60H_NB_F2,
2262 .max_mcs = 2,
2263 .ops = {
2264 .early_channel_count = f1x_early_channel_count,
2265 .map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow,
2266 .dbam_to_cs = f15_m60h_dbam_to_chip_select,
2267 }
2268 },
2269 [F16_CPUS] = {
2270 .ctl_name = "F16h",
2271 .f1_id = PCI_DEVICE_ID_AMD_16H_NB_F1,
2272 .f2_id = PCI_DEVICE_ID_AMD_16H_NB_F2,
2273 .max_mcs = 2,
2274 .ops = {
2275 .early_channel_count = f1x_early_channel_count,
2276 .map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow,
2277 .dbam_to_cs = f16_dbam_to_chip_select,
2278 }
2279 },
2280 [F16_M30H_CPUS] = {
2281 .ctl_name = "F16h_M30h",
2282 .f1_id = PCI_DEVICE_ID_AMD_16H_M30H_NB_F1,
2283 .f2_id = PCI_DEVICE_ID_AMD_16H_M30H_NB_F2,
2284 .max_mcs = 2,
2285 .ops = {
2286 .early_channel_count = f1x_early_channel_count,
2287 .map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow,
2288 .dbam_to_cs = f16_dbam_to_chip_select,
2289 }
2290 },
2291 [F17_CPUS] = {
2292 .ctl_name = "F17h",
2293 .f0_id = PCI_DEVICE_ID_AMD_17H_DF_F0,
2294 .f6_id = PCI_DEVICE_ID_AMD_17H_DF_F6,
2295 .max_mcs = 2,
2296 .ops = {
2297 .early_channel_count = f17_early_channel_count,
2298 .dbam_to_cs = f17_addr_mask_to_cs_size,
2299 }
2300 },
2301 [F17_M30H_CPUS] = {
2302 .ctl_name = "F17h_M30h",
2303 .f0_id = PCI_DEVICE_ID_AMD_17H_M30H_DF_F0,
2304 .f6_id = PCI_DEVICE_ID_AMD_17H_M30H_DF_F6,
2305 .max_mcs = 8,
2306 .ops = {
2307 .early_channel_count = f17_early_channel_count,
2308 .dbam_to_cs = f17_addr_mask_to_cs_size,
2309 }
2310 },
2311 [F17_M10H_CPUS] = {
2312 .ctl_name = "F17h_M10h",
2313 .f0_id = PCI_DEVICE_ID_AMD_17H_M10H_DF_F0,
2314 .f6_id = PCI_DEVICE_ID_AMD_17H_M10H_DF_F6,
2315 .max_mcs = 2,
2316 .ops = {
2317 .early_channel_count = f17_early_channel_count,
2318 .dbam_to_cs = f17_addr_mask_to_cs_size,
2319 }
2320 },
2321 [F19_CPUS] = {
2322 .ctl_name = "F19h",
2323 .f0_id = PCI_DEVICE_ID_AMD_19H_DF_F0,
2324 .f6_id = PCI_DEVICE_ID_AMD_19H_DF_F6,
2325 .max_mcs = 8,
2326 .ops = {
2327 .early_channel_count = f17_early_channel_count,
2328 .dbam_to_cs = f17_addr_mask_to_cs_size,
2329 }
2330 },
2331};
2332
2333
2334
2335
2336
2337
2338
2339
2340static const u16 x4_vectors[] = {
2341 0x2f57, 0x1afe, 0x66cc, 0xdd88,
2342 0x11eb, 0x3396, 0x7f4c, 0xeac8,
2343 0x0001, 0x0002, 0x0004, 0x0008,
2344 0x1013, 0x3032, 0x4044, 0x8088,
2345 0x106b, 0x30d6, 0x70fc, 0xe0a8,
2346 0x4857, 0xc4fe, 0x13cc, 0x3288,
2347 0x1ac5, 0x2f4a, 0x5394, 0xa1e8,
2348 0x1f39, 0x251e, 0xbd6c, 0x6bd8,
2349 0x15c1, 0x2a42, 0x89ac, 0x4758,
2350 0x2b03, 0x1602, 0x4f0c, 0xca08,
2351 0x1f07, 0x3a0e, 0x6b04, 0xbd08,
2352 0x8ba7, 0x465e, 0x244c, 0x1cc8,
2353 0x2b87, 0x164e, 0x642c, 0xdc18,
2354 0x40b9, 0x80de, 0x1094, 0x20e8,
2355 0x27db, 0x1eb6, 0x9dac, 0x7b58,
2356 0x11c1, 0x2242, 0x84ac, 0x4c58,
2357 0x1be5, 0x2d7a, 0x5e34, 0xa718,
2358 0x4b39, 0x8d1e, 0x14b4, 0x28d8,
2359 0x4c97, 0xc87e, 0x11fc, 0x33a8,
2360 0x8e97, 0x497e, 0x2ffc, 0x1aa8,
2361 0x16b3, 0x3d62, 0x4f34, 0x8518,
2362 0x1e2f, 0x391a, 0x5cac, 0xf858,
2363 0x1d9f, 0x3b7a, 0x572c, 0xfe18,
2364 0x15f5, 0x2a5a, 0x5264, 0xa3b8,
2365 0x1dbb, 0x3b66, 0x715c, 0xe3f8,
2366 0x4397, 0xc27e, 0x17fc, 0x3ea8,
2367 0x1617, 0x3d3e, 0x6464, 0xb8b8,
2368 0x23ff, 0x12aa, 0xab6c, 0x56d8,
2369 0x2dfb, 0x1ba6, 0x913c, 0x7328,
2370 0x185d, 0x2ca6, 0x7914, 0x9e28,
2371 0x171b, 0x3e36, 0x7d7c, 0xebe8,
2372 0x4199, 0x82ee, 0x19f4, 0x2e58,
2373 0x4807, 0xc40e, 0x130c, 0x3208,
2374 0x1905, 0x2e0a, 0x5804, 0xac08,
2375 0x213f, 0x132a, 0xadfc, 0x5ba8,
2376 0x19a9, 0x2efe, 0xb5cc, 0x6f88,
2377};
2378
2379static const u16 x8_vectors[] = {
2380 0x0145, 0x028a, 0x2374, 0x43c8, 0xa1f0, 0x0520, 0x0a40, 0x1480,
2381 0x0211, 0x0422, 0x0844, 0x1088, 0x01b0, 0x44e0, 0x23c0, 0xed80,
2382 0x1011, 0x0116, 0x022c, 0x0458, 0x08b0, 0x8c60, 0x2740, 0x4e80,
2383 0x0411, 0x0822, 0x1044, 0x0158, 0x02b0, 0x2360, 0x46c0, 0xab80,
2384 0x0811, 0x1022, 0x012c, 0x0258, 0x04b0, 0x4660, 0x8cc0, 0x2780,
2385 0x2071, 0x40e2, 0xa0c4, 0x0108, 0x0210, 0x0420, 0x0840, 0x1080,
2386 0x4071, 0x80e2, 0x0104, 0x0208, 0x0410, 0x0820, 0x1040, 0x2080,
2387 0x8071, 0x0102, 0x0204, 0x0408, 0x0810, 0x1020, 0x2040, 0x4080,
2388 0x019d, 0x03d6, 0x136c, 0x2198, 0x50b0, 0xb2e0, 0x0740, 0x0e80,
2389 0x0189, 0x03ea, 0x072c, 0x0e58, 0x1cb0, 0x56e0, 0x37c0, 0xf580,
2390 0x01fd, 0x0376, 0x06ec, 0x0bb8, 0x1110, 0x2220, 0x4440, 0x8880,
2391 0x0163, 0x02c6, 0x1104, 0x0758, 0x0eb0, 0x2be0, 0x6140, 0xc280,
2392 0x02fd, 0x01c6, 0x0b5c, 0x1108, 0x07b0, 0x25a0, 0x8840, 0x6180,
2393 0x0801, 0x012e, 0x025c, 0x04b8, 0x1370, 0x26e0, 0x57c0, 0xb580,
2394 0x0401, 0x0802, 0x015c, 0x02b8, 0x22b0, 0x13e0, 0x7140, 0xe280,
2395 0x0201, 0x0402, 0x0804, 0x01b8, 0x11b0, 0x31a0, 0x8040, 0x7180,
2396 0x0101, 0x0202, 0x0404, 0x0808, 0x1010, 0x2020, 0x4040, 0x8080,
2397 0x0001, 0x0002, 0x0004, 0x0008, 0x0010, 0x0020, 0x0040, 0x0080,
2398 0x0100, 0x0200, 0x0400, 0x0800, 0x1000, 0x2000, 0x4000, 0x8000,
2399};
2400
2401static int decode_syndrome(u16 syndrome, const u16 *vectors, unsigned num_vecs,
2402 unsigned v_dim)
2403{
2404 unsigned int i, err_sym;
2405
2406 for (err_sym = 0; err_sym < num_vecs / v_dim; err_sym++) {
2407 u16 s = syndrome;
2408 unsigned v_idx = err_sym * v_dim;
2409 unsigned v_end = (err_sym + 1) * v_dim;
2410
2411
2412 for (i = 1; i < (1U << 16); i <<= 1) {
2413
2414
2415 if (v_idx < v_end && vectors[v_idx] & i) {
2416 u16 ev_comp = vectors[v_idx++];
2417
2418
2419 if (s & i) {
2420
2421 s ^= ev_comp;
2422
2423 if (!s)
2424 return err_sym;
2425 }
2426
2427 } else if (s & i)
2428
2429 break;
2430 }
2431 }
2432
2433 edac_dbg(0, "syndrome(%x) not found\n", syndrome);
2434 return -1;
2435}
2436
2437static int map_err_sym_to_channel(int err_sym, int sym_size)
2438{
2439 if (sym_size == 4)
2440 switch (err_sym) {
2441 case 0x20:
2442 case 0x21:
2443 return 0;
2444 break;
2445 case 0x22:
2446 case 0x23:
2447 return 1;
2448 break;
2449 default:
2450 return err_sym >> 4;
2451 break;
2452 }
2453
2454 else
2455 switch (err_sym) {
2456
2457 case 0x10:
2458 WARN(1, KERN_ERR "Invalid error symbol: 0x%x\n",
2459 err_sym);
2460 return -1;
2461 break;
2462
2463 case 0x11:
2464 return 0;
2465 break;
2466 case 0x12:
2467 return 1;
2468 break;
2469 default:
2470 return err_sym >> 3;
2471 break;
2472 }
2473 return -1;
2474}
2475
2476static int get_channel_from_ecc_syndrome(struct mem_ctl_info *mci, u16 syndrome)
2477{
2478 struct amd64_pvt *pvt = mci->pvt_info;
2479 int err_sym = -1;
2480
2481 if (pvt->ecc_sym_sz == 8)
2482 err_sym = decode_syndrome(syndrome, x8_vectors,
2483 ARRAY_SIZE(x8_vectors),
2484 pvt->ecc_sym_sz);
2485 else if (pvt->ecc_sym_sz == 4)
2486 err_sym = decode_syndrome(syndrome, x4_vectors,
2487 ARRAY_SIZE(x4_vectors),
2488 pvt->ecc_sym_sz);
2489 else {
2490 amd64_warn("Illegal syndrome type: %u\n", pvt->ecc_sym_sz);
2491 return err_sym;
2492 }
2493
2494 return map_err_sym_to_channel(err_sym, pvt->ecc_sym_sz);
2495}
2496
2497static void __log_ecc_error(struct mem_ctl_info *mci, struct err_info *err,
2498 u8 ecc_type)
2499{
2500 enum hw_event_mc_err_type err_type;
2501 const char *string;
2502
2503 if (ecc_type == 2)
2504 err_type = HW_EVENT_ERR_CORRECTED;
2505 else if (ecc_type == 1)
2506 err_type = HW_EVENT_ERR_UNCORRECTED;
2507 else if (ecc_type == 3)
2508 err_type = HW_EVENT_ERR_DEFERRED;
2509 else {
2510 WARN(1, "Something is rotten in the state of Denmark.\n");
2511 return;
2512 }
2513
2514 switch (err->err_code) {
2515 case DECODE_OK:
2516 string = "";
2517 break;
2518 case ERR_NODE:
2519 string = "Failed to map error addr to a node";
2520 break;
2521 case ERR_CSROW:
2522 string = "Failed to map error addr to a csrow";
2523 break;
2524 case ERR_CHANNEL:
2525 string = "Unknown syndrome - possible error reporting race";
2526 break;
2527 case ERR_SYND:
2528 string = "MCA_SYND not valid - unknown syndrome and csrow";
2529 break;
2530 case ERR_NORM_ADDR:
2531 string = "Cannot decode normalized address";
2532 break;
2533 default:
2534 string = "WTF error";
2535 break;
2536 }
2537
2538 edac_mc_handle_error(err_type, mci, 1,
2539 err->page, err->offset, err->syndrome,
2540 err->csrow, err->channel, -1,
2541 string, "");
2542}
2543
2544static inline void decode_bus_error(int node_id, struct mce *m)
2545{
2546 struct mem_ctl_info *mci;
2547 struct amd64_pvt *pvt;
2548 u8 ecc_type = (m->status >> 45) & 0x3;
2549 u8 xec = XEC(m->status, 0x1f);
2550 u16 ec = EC(m->status);
2551 u64 sys_addr;
2552 struct err_info err;
2553
2554 mci = edac_mc_find(node_id);
2555 if (!mci)
2556 return;
2557
2558 pvt = mci->pvt_info;
2559
2560
2561 if (PP(ec) == NBSL_PP_OBS)
2562 return;
2563
2564
2565 if (xec && xec != F10_NBSL_EXT_ERR_ECC)
2566 return;
2567
2568 memset(&err, 0, sizeof(err));
2569
2570 sys_addr = get_error_address(pvt, m);
2571
2572 if (ecc_type == 2)
2573 err.syndrome = extract_syndrome(m->status);
2574
2575 pvt->ops->map_sysaddr_to_csrow(mci, sys_addr, &err);
2576
2577 __log_ecc_error(mci, &err, ecc_type);
2578}
2579
2580
2581
2582
2583
2584
2585
2586
2587
2588
2589static int find_umc_channel(struct mce *m)
2590{
2591 return (m->ipid & GENMASK(31, 0)) >> 20;
2592}
2593
2594static void decode_umc_error(int node_id, struct mce *m)
2595{
2596 u8 ecc_type = (m->status >> 45) & 0x3;
2597 struct mem_ctl_info *mci;
2598 struct amd64_pvt *pvt;
2599 struct err_info err;
2600 u64 sys_addr;
2601
2602 mci = edac_mc_find(node_id);
2603 if (!mci)
2604 return;
2605
2606 pvt = mci->pvt_info;
2607
2608 memset(&err, 0, sizeof(err));
2609
2610 if (m->status & MCI_STATUS_DEFERRED)
2611 ecc_type = 3;
2612
2613 err.channel = find_umc_channel(m);
2614
2615 if (!(m->status & MCI_STATUS_SYNDV)) {
2616 err.err_code = ERR_SYND;
2617 goto log_error;
2618 }
2619
2620 if (ecc_type == 2) {
2621 u8 length = (m->synd >> 18) & 0x3f;
2622
2623 if (length)
2624 err.syndrome = (m->synd >> 32) & GENMASK(length - 1, 0);
2625 else
2626 err.err_code = ERR_CHANNEL;
2627 }
2628
2629 err.csrow = m->synd & 0x7;
2630
2631 if (umc_normaddr_to_sysaddr(m->addr, pvt->mc_node_id, err.channel, &sys_addr)) {
2632 err.err_code = ERR_NORM_ADDR;
2633 goto log_error;
2634 }
2635
2636 error_address_to_page_and_offset(sys_addr, &err);
2637
2638log_error:
2639 __log_ecc_error(mci, &err, ecc_type);
2640}
2641
2642
2643
2644
2645
2646
2647static int
2648reserve_mc_sibling_devs(struct amd64_pvt *pvt, u16 pci_id1, u16 pci_id2)
2649{
2650 if (pvt->umc) {
2651 pvt->F0 = pci_get_related_function(pvt->F3->vendor, pci_id1, pvt->F3);
2652 if (!pvt->F0) {
2653 amd64_err("F0 not found, device 0x%x (broken BIOS?)\n", pci_id1);
2654 return -ENODEV;
2655 }
2656
2657 pvt->F6 = pci_get_related_function(pvt->F3->vendor, pci_id2, pvt->F3);
2658 if (!pvt->F6) {
2659 pci_dev_put(pvt->F0);
2660 pvt->F0 = NULL;
2661
2662 amd64_err("F6 not found: device 0x%x (broken BIOS?)\n", pci_id2);
2663 return -ENODEV;
2664 }
2665
2666 edac_dbg(1, "F0: %s\n", pci_name(pvt->F0));
2667 edac_dbg(1, "F3: %s\n", pci_name(pvt->F3));
2668 edac_dbg(1, "F6: %s\n", pci_name(pvt->F6));
2669
2670 return 0;
2671 }
2672
2673
2674 pvt->F1 = pci_get_related_function(pvt->F3->vendor, pci_id1, pvt->F3);
2675 if (!pvt->F1) {
2676 amd64_err("F1 not found: device 0x%x (broken BIOS?)\n", pci_id1);
2677 return -ENODEV;
2678 }
2679
2680
2681 pvt->F2 = pci_get_related_function(pvt->F3->vendor, pci_id2, pvt->F3);
2682 if (!pvt->F2) {
2683 pci_dev_put(pvt->F1);
2684 pvt->F1 = NULL;
2685
2686 amd64_err("F2 not found: device 0x%x (broken BIOS?)\n", pci_id2);
2687 return -ENODEV;
2688 }
2689
2690 edac_dbg(1, "F1: %s\n", pci_name(pvt->F1));
2691 edac_dbg(1, "F2: %s\n", pci_name(pvt->F2));
2692 edac_dbg(1, "F3: %s\n", pci_name(pvt->F3));
2693
2694 return 0;
2695}
2696
2697static void free_mc_sibling_devs(struct amd64_pvt *pvt)
2698{
2699 if (pvt->umc) {
2700 pci_dev_put(pvt->F0);
2701 pci_dev_put(pvt->F6);
2702 } else {
2703 pci_dev_put(pvt->F1);
2704 pci_dev_put(pvt->F2);
2705 }
2706}
2707
2708static void determine_ecc_sym_sz(struct amd64_pvt *pvt)
2709{
2710 pvt->ecc_sym_sz = 4;
2711
2712 if (pvt->umc) {
2713 u8 i;
2714
2715 for_each_umc(i) {
2716
2717 if (pvt->umc[i].sdp_ctrl & UMC_SDP_INIT) {
2718 if (pvt->umc[i].ecc_ctrl & BIT(9)) {
2719 pvt->ecc_sym_sz = 16;
2720 return;
2721 } else if (pvt->umc[i].ecc_ctrl & BIT(7)) {
2722 pvt->ecc_sym_sz = 8;
2723 return;
2724 }
2725 }
2726 }
2727 } else if (pvt->fam >= 0x10) {
2728 u32 tmp;
2729
2730 amd64_read_pci_cfg(pvt->F3, EXT_NB_MCA_CFG, &tmp);
2731
2732 if (pvt->fam != 0x16)
2733 amd64_read_dct_pci_cfg(pvt, 1, DBAM0, &pvt->dbam1);
2734
2735
2736 if ((pvt->fam > 0x10 || pvt->model > 7) && tmp & BIT(25))
2737 pvt->ecc_sym_sz = 8;
2738 }
2739}
2740
2741
2742
2743
2744static void __read_mc_regs_df(struct amd64_pvt *pvt)
2745{
2746 u8 nid = pvt->mc_node_id;
2747 struct amd64_umc *umc;
2748 u32 i, umc_base;
2749
2750
2751 for_each_umc(i) {
2752
2753 umc_base = get_umc_base(i);
2754 umc = &pvt->umc[i];
2755
2756 amd_smn_read(nid, umc_base + UMCCH_DIMM_CFG, &umc->dimm_cfg);
2757 amd_smn_read(nid, umc_base + UMCCH_UMC_CFG, &umc->umc_cfg);
2758 amd_smn_read(nid, umc_base + UMCCH_SDP_CTRL, &umc->sdp_ctrl);
2759 amd_smn_read(nid, umc_base + UMCCH_ECC_CTRL, &umc->ecc_ctrl);
2760 amd_smn_read(nid, umc_base + UMCCH_UMC_CAP_HI, &umc->umc_cap_hi);
2761 }
2762}
2763
2764
2765
2766
2767
2768static void read_mc_regs(struct amd64_pvt *pvt)
2769{
2770 unsigned int range;
2771 u64 msr_val;
2772
2773
2774
2775
2776
2777 rdmsrl(MSR_K8_TOP_MEM1, pvt->top_mem);
2778 edac_dbg(0, " TOP_MEM: 0x%016llx\n", pvt->top_mem);
2779
2780
2781 rdmsrl(MSR_K8_SYSCFG, msr_val);
2782 if (msr_val & BIT(21)) {
2783 rdmsrl(MSR_K8_TOP_MEM2, pvt->top_mem2);
2784 edac_dbg(0, " TOP_MEM2: 0x%016llx\n", pvt->top_mem2);
2785 } else {
2786 edac_dbg(0, " TOP_MEM2 disabled\n");
2787 }
2788
2789 if (pvt->umc) {
2790 __read_mc_regs_df(pvt);
2791 amd64_read_pci_cfg(pvt->F0, DF_DHAR, &pvt->dhar);
2792
2793 goto skip;
2794 }
2795
2796 amd64_read_pci_cfg(pvt->F3, NBCAP, &pvt->nbcap);
2797
2798 read_dram_ctl_register(pvt);
2799
2800 for (range = 0; range < DRAM_RANGES; range++) {
2801 u8 rw;
2802
2803
2804 read_dram_base_limit_regs(pvt, range);
2805
2806 rw = dram_rw(pvt, range);
2807 if (!rw)
2808 continue;
2809
2810 edac_dbg(1, " DRAM range[%d], base: 0x%016llx; limit: 0x%016llx\n",
2811 range,
2812 get_dram_base(pvt, range),
2813 get_dram_limit(pvt, range));
2814
2815 edac_dbg(1, " IntlvEn=%s; Range access: %s%s IntlvSel=%d DstNode=%d\n",
2816 dram_intlv_en(pvt, range) ? "Enabled" : "Disabled",
2817 (rw & 0x1) ? "R" : "-",
2818 (rw & 0x2) ? "W" : "-",
2819 dram_intlv_sel(pvt, range),
2820 dram_dst_node(pvt, range));
2821 }
2822
2823 amd64_read_pci_cfg(pvt->F1, DHAR, &pvt->dhar);
2824 amd64_read_dct_pci_cfg(pvt, 0, DBAM0, &pvt->dbam0);
2825
2826 amd64_read_pci_cfg(pvt->F3, F10_ONLINE_SPARE, &pvt->online_spare);
2827
2828 amd64_read_dct_pci_cfg(pvt, 0, DCLR0, &pvt->dclr0);
2829 amd64_read_dct_pci_cfg(pvt, 0, DCHR0, &pvt->dchr0);
2830
2831 if (!dct_ganging_enabled(pvt)) {
2832 amd64_read_dct_pci_cfg(pvt, 1, DCLR0, &pvt->dclr1);
2833 amd64_read_dct_pci_cfg(pvt, 1, DCHR0, &pvt->dchr1);
2834 }
2835
2836skip:
2837 read_dct_base_mask(pvt);
2838
2839 determine_memory_type(pvt);
2840 edac_dbg(1, " DIMM type: %s\n", edac_mem_types[pvt->dram_type]);
2841
2842 determine_ecc_sym_sz(pvt);
2843
2844 dump_misc_regs(pvt);
2845}
2846
2847
2848
2849
2850
2851
2852
2853
2854
2855
2856
2857
2858
2859
2860
2861
2862
2863
2864
2865
2866
2867
2868
2869
2870
2871
2872
2873
2874
2875
2876
2877
2878
2879
2880
2881static u32 get_csrow_nr_pages(struct amd64_pvt *pvt, u8 dct, int csrow_nr_orig)
2882{
2883 u32 dbam = dct ? pvt->dbam1 : pvt->dbam0;
2884 int csrow_nr = csrow_nr_orig;
2885 u32 cs_mode, nr_pages;
2886
2887 if (!pvt->umc) {
2888 csrow_nr >>= 1;
2889 cs_mode = DBAM_DIMM(csrow_nr, dbam);
2890 } else {
2891 cs_mode = f17_get_cs_mode(csrow_nr >> 1, dct, pvt);
2892 }
2893
2894 nr_pages = pvt->ops->dbam_to_cs(pvt, dct, cs_mode, csrow_nr);
2895 nr_pages <<= 20 - PAGE_SHIFT;
2896
2897 edac_dbg(0, "csrow: %d, channel: %d, DBAM idx: %d\n",
2898 csrow_nr_orig, dct, cs_mode);
2899 edac_dbg(0, "nr_pages/channel: %u\n", nr_pages);
2900
2901 return nr_pages;
2902}
2903
2904static int init_csrows_df(struct mem_ctl_info *mci)
2905{
2906 struct amd64_pvt *pvt = mci->pvt_info;
2907 enum edac_type edac_mode = EDAC_NONE;
2908 enum dev_type dev_type = DEV_UNKNOWN;
2909 struct dimm_info *dimm;
2910 int empty = 1;
2911 u8 umc, cs;
2912
2913 if (mci->edac_ctl_cap & EDAC_FLAG_S16ECD16ED) {
2914 edac_mode = EDAC_S16ECD16ED;
2915 dev_type = DEV_X16;
2916 } else if (mci->edac_ctl_cap & EDAC_FLAG_S8ECD8ED) {
2917 edac_mode = EDAC_S8ECD8ED;
2918 dev_type = DEV_X8;
2919 } else if (mci->edac_ctl_cap & EDAC_FLAG_S4ECD4ED) {
2920 edac_mode = EDAC_S4ECD4ED;
2921 dev_type = DEV_X4;
2922 } else if (mci->edac_ctl_cap & EDAC_FLAG_SECDED) {
2923 edac_mode = EDAC_SECDED;
2924 }
2925
2926 for_each_umc(umc) {
2927 for_each_chip_select(cs, umc, pvt) {
2928 if (!csrow_enabled(cs, umc, pvt))
2929 continue;
2930
2931 empty = 0;
2932 dimm = mci->csrows[cs]->channels[umc]->dimm;
2933
2934 edac_dbg(1, "MC node: %d, csrow: %d\n",
2935 pvt->mc_node_id, cs);
2936
2937 dimm->nr_pages = get_csrow_nr_pages(pvt, umc, cs);
2938 dimm->mtype = pvt->dram_type;
2939 dimm->edac_mode = edac_mode;
2940 dimm->dtype = dev_type;
2941 }
2942 }
2943
2944 return empty;
2945}
2946
2947
2948
2949
2950
2951static int init_csrows(struct mem_ctl_info *mci)
2952{
2953 struct amd64_pvt *pvt = mci->pvt_info;
2954 enum edac_type edac_mode = EDAC_NONE;
2955 struct csrow_info *csrow;
2956 struct dimm_info *dimm;
2957 int i, j, empty = 1;
2958 int nr_pages = 0;
2959 u32 val;
2960
2961 if (pvt->umc)
2962 return init_csrows_df(mci);
2963
2964 amd64_read_pci_cfg(pvt->F3, NBCFG, &val);
2965
2966 pvt->nbcfg = val;
2967
2968 edac_dbg(0, "node %d, NBCFG=0x%08x[ChipKillEccCap: %d|DramEccEn: %d]\n",
2969 pvt->mc_node_id, val,
2970 !!(val & NBCFG_CHIPKILL), !!(val & NBCFG_ECC_ENABLE));
2971
2972
2973
2974
2975 for_each_chip_select(i, 0, pvt) {
2976 bool row_dct0 = !!csrow_enabled(i, 0, pvt);
2977 bool row_dct1 = false;
2978
2979 if (pvt->fam != 0xf)
2980 row_dct1 = !!csrow_enabled(i, 1, pvt);
2981
2982 if (!row_dct0 && !row_dct1)
2983 continue;
2984
2985 csrow = mci->csrows[i];
2986 empty = 0;
2987
2988 edac_dbg(1, "MC node: %d, csrow: %d\n",
2989 pvt->mc_node_id, i);
2990
2991 if (row_dct0) {
2992 nr_pages = get_csrow_nr_pages(pvt, 0, i);
2993 csrow->channels[0]->dimm->nr_pages = nr_pages;
2994 }
2995
2996
2997 if (pvt->fam != 0xf && row_dct1) {
2998 int row_dct1_pages = get_csrow_nr_pages(pvt, 1, i);
2999
3000 csrow->channels[1]->dimm->nr_pages = row_dct1_pages;
3001 nr_pages += row_dct1_pages;
3002 }
3003
3004 edac_dbg(1, "Total csrow%d pages: %u\n", i, nr_pages);
3005
3006
3007 if (pvt->nbcfg & NBCFG_ECC_ENABLE) {
3008 edac_mode = (pvt->nbcfg & NBCFG_CHIPKILL)
3009 ? EDAC_S4ECD4ED
3010 : EDAC_SECDED;
3011 }
3012
3013 for (j = 0; j < pvt->channel_count; j++) {
3014 dimm = csrow->channels[j]->dimm;
3015 dimm->mtype = pvt->dram_type;
3016 dimm->edac_mode = edac_mode;
3017 }
3018 }
3019
3020 return empty;
3021}
3022
3023
3024static void get_cpus_on_this_dct_cpumask(struct cpumask *mask, u16 nid)
3025{
3026 int cpu;
3027
3028 for_each_online_cpu(cpu)
3029 if (amd_get_nb_id(cpu) == nid)
3030 cpumask_set_cpu(cpu, mask);
3031}
3032
3033
3034static bool nb_mce_bank_enabled_on_node(u16 nid)
3035{
3036 cpumask_var_t mask;
3037 int cpu, nbe;
3038 bool ret = false;
3039
3040 if (!zalloc_cpumask_var(&mask, GFP_KERNEL)) {
3041 amd64_warn("%s: Error allocating mask\n", __func__);
3042 return false;
3043 }
3044
3045 get_cpus_on_this_dct_cpumask(mask, nid);
3046
3047 rdmsr_on_cpus(mask, MSR_IA32_MCG_CTL, msrs);
3048
3049 for_each_cpu(cpu, mask) {
3050 struct msr *reg = per_cpu_ptr(msrs, cpu);
3051 nbe = reg->l & MSR_MCGCTL_NBE;
3052
3053 edac_dbg(0, "core: %u, MCG_CTL: 0x%llx, NB MSR is %s\n",
3054 cpu, reg->q,
3055 (nbe ? "enabled" : "disabled"));
3056
3057 if (!nbe)
3058 goto out;
3059 }
3060 ret = true;
3061
3062out:
3063 free_cpumask_var(mask);
3064 return ret;
3065}
3066
3067static int toggle_ecc_err_reporting(struct ecc_settings *s, u16 nid, bool on)
3068{
3069 cpumask_var_t cmask;
3070 int cpu;
3071
3072 if (!zalloc_cpumask_var(&cmask, GFP_KERNEL)) {
3073 amd64_warn("%s: error allocating mask\n", __func__);
3074 return -ENOMEM;
3075 }
3076
3077 get_cpus_on_this_dct_cpumask(cmask, nid);
3078
3079 rdmsr_on_cpus(cmask, MSR_IA32_MCG_CTL, msrs);
3080
3081 for_each_cpu(cpu, cmask) {
3082
3083 struct msr *reg = per_cpu_ptr(msrs, cpu);
3084
3085 if (on) {
3086 if (reg->l & MSR_MCGCTL_NBE)
3087 s->flags.nb_mce_enable = 1;
3088
3089 reg->l |= MSR_MCGCTL_NBE;
3090 } else {
3091
3092
3093
3094 if (!s->flags.nb_mce_enable)
3095 reg->l &= ~MSR_MCGCTL_NBE;
3096 }
3097 }
3098 wrmsr_on_cpus(cmask, MSR_IA32_MCG_CTL, msrs);
3099
3100 free_cpumask_var(cmask);
3101
3102 return 0;
3103}
3104
3105static bool enable_ecc_error_reporting(struct ecc_settings *s, u16 nid,
3106 struct pci_dev *F3)
3107{
3108 bool ret = true;
3109 u32 value, mask = 0x3;
3110
3111 if (toggle_ecc_err_reporting(s, nid, ON)) {
3112 amd64_warn("Error enabling ECC reporting over MCGCTL!\n");
3113 return false;
3114 }
3115
3116 amd64_read_pci_cfg(F3, NBCTL, &value);
3117
3118 s->old_nbctl = value & mask;
3119 s->nbctl_valid = true;
3120
3121 value |= mask;
3122 amd64_write_pci_cfg(F3, NBCTL, value);
3123
3124 amd64_read_pci_cfg(F3, NBCFG, &value);
3125
3126 edac_dbg(0, "1: node %d, NBCFG=0x%08x[DramEccEn: %d]\n",
3127 nid, value, !!(value & NBCFG_ECC_ENABLE));
3128
3129 if (!(value & NBCFG_ECC_ENABLE)) {
3130 amd64_warn("DRAM ECC disabled on this node, enabling...\n");
3131
3132 s->flags.nb_ecc_prev = 0;
3133
3134
3135 value |= NBCFG_ECC_ENABLE;
3136 amd64_write_pci_cfg(F3, NBCFG, value);
3137
3138 amd64_read_pci_cfg(F3, NBCFG, &value);
3139
3140 if (!(value & NBCFG_ECC_ENABLE)) {
3141 amd64_warn("Hardware rejected DRAM ECC enable,"
3142 "check memory DIMM configuration.\n");
3143 ret = false;
3144 } else {
3145 amd64_info("Hardware accepted DRAM ECC Enable\n");
3146 }
3147 } else {
3148 s->flags.nb_ecc_prev = 1;
3149 }
3150
3151 edac_dbg(0, "2: node %d, NBCFG=0x%08x[DramEccEn: %d]\n",
3152 nid, value, !!(value & NBCFG_ECC_ENABLE));
3153
3154 return ret;
3155}
3156
3157static void restore_ecc_error_reporting(struct ecc_settings *s, u16 nid,
3158 struct pci_dev *F3)
3159{
3160 u32 value, mask = 0x3;
3161
3162 if (!s->nbctl_valid)
3163 return;
3164
3165 amd64_read_pci_cfg(F3, NBCTL, &value);
3166 value &= ~mask;
3167 value |= s->old_nbctl;
3168
3169 amd64_write_pci_cfg(F3, NBCTL, value);
3170
3171
3172 if (!s->flags.nb_ecc_prev) {
3173 amd64_read_pci_cfg(F3, NBCFG, &value);
3174 value &= ~NBCFG_ECC_ENABLE;
3175 amd64_write_pci_cfg(F3, NBCFG, value);
3176 }
3177
3178
3179 if (toggle_ecc_err_reporting(s, nid, OFF))
3180 amd64_warn("Error restoring NB MCGCTL settings!\n");
3181}
3182
3183
3184
3185
3186
3187
3188
3189static const char *ecc_msg =
3190 "ECC disabled in the BIOS or no ECC capability, module will not load.\n"
3191 " Either enable ECC checking or force module loading by setting "
3192 "'ecc_enable_override'.\n"
3193 " (Note that use of the override may cause unknown side effects.)\n";
3194
3195static bool ecc_enabled(struct pci_dev *F3, u16 nid)
3196{
3197 bool nb_mce_en = false;
3198 u8 ecc_en = 0, i;
3199 u32 value;
3200
3201 if (boot_cpu_data.x86 >= 0x17) {
3202 u8 umc_en_mask = 0, ecc_en_mask = 0;
3203
3204 for_each_umc(i) {
3205 u32 base = get_umc_base(i);
3206
3207
3208 if (amd_smn_read(nid, base + UMCCH_SDP_CTRL, &value))
3209 continue;
3210
3211 if (!(value & UMC_SDP_INIT))
3212 continue;
3213
3214 umc_en_mask |= BIT(i);
3215
3216 if (amd_smn_read(nid, base + UMCCH_UMC_CAP_HI, &value))
3217 continue;
3218
3219 if (value & UMC_ECC_ENABLED)
3220 ecc_en_mask |= BIT(i);
3221 }
3222
3223
3224 if (umc_en_mask)
3225 ecc_en = umc_en_mask == ecc_en_mask;
3226 else
3227 edac_dbg(0, "Node %d: No enabled UMCs.\n", nid);
3228
3229
3230 nb_mce_en = true;
3231 } else {
3232 amd64_read_pci_cfg(F3, NBCFG, &value);
3233
3234 ecc_en = !!(value & NBCFG_ECC_ENABLE);
3235
3236 nb_mce_en = nb_mce_bank_enabled_on_node(nid);
3237 if (!nb_mce_en)
3238 edac_dbg(0, "NB MCE bank disabled, set MSR 0x%08x[4] on node %d to enable.\n",
3239 MSR_IA32_MCG_CTL, nid);
3240 }
3241
3242 amd64_info("Node %d: DRAM ECC %s.\n",
3243 nid, (ecc_en ? "enabled" : "disabled"));
3244
3245 if (!ecc_en || !nb_mce_en) {
3246 amd64_info("%s", ecc_msg);
3247 return false;
3248 }
3249 return true;
3250}
3251
3252static inline void
3253f17h_determine_edac_ctl_cap(struct mem_ctl_info *mci, struct amd64_pvt *pvt)
3254{
3255 u8 i, ecc_en = 1, cpk_en = 1, dev_x4 = 1, dev_x16 = 1;
3256
3257 for_each_umc(i) {
3258 if (pvt->umc[i].sdp_ctrl & UMC_SDP_INIT) {
3259 ecc_en &= !!(pvt->umc[i].umc_cap_hi & UMC_ECC_ENABLED);
3260 cpk_en &= !!(pvt->umc[i].umc_cap_hi & UMC_ECC_CHIPKILL_CAP);
3261
3262 dev_x4 &= !!(pvt->umc[i].dimm_cfg & BIT(6));
3263 dev_x16 &= !!(pvt->umc[i].dimm_cfg & BIT(7));
3264 }
3265 }
3266
3267
3268 if (ecc_en) {
3269 mci->edac_ctl_cap |= EDAC_FLAG_SECDED;
3270
3271 if (!cpk_en)
3272 return;
3273
3274 if (dev_x4)
3275 mci->edac_ctl_cap |= EDAC_FLAG_S4ECD4ED;
3276 else if (dev_x16)
3277 mci->edac_ctl_cap |= EDAC_FLAG_S16ECD16ED;
3278 else
3279 mci->edac_ctl_cap |= EDAC_FLAG_S8ECD8ED;
3280 }
3281}
3282
3283static void setup_mci_misc_attrs(struct mem_ctl_info *mci)
3284{
3285 struct amd64_pvt *pvt = mci->pvt_info;
3286
3287 mci->mtype_cap = MEM_FLAG_DDR2 | MEM_FLAG_RDDR2;
3288 mci->edac_ctl_cap = EDAC_FLAG_NONE;
3289
3290 if (pvt->umc) {
3291 f17h_determine_edac_ctl_cap(mci, pvt);
3292 } else {
3293 if (pvt->nbcap & NBCAP_SECDED)
3294 mci->edac_ctl_cap |= EDAC_FLAG_SECDED;
3295
3296 if (pvt->nbcap & NBCAP_CHIPKILL)
3297 mci->edac_ctl_cap |= EDAC_FLAG_S4ECD4ED;
3298 }
3299
3300 mci->edac_cap = determine_edac_cap(pvt);
3301 mci->mod_name = EDAC_MOD_STR;
3302 mci->ctl_name = fam_type->ctl_name;
3303 mci->dev_name = pci_name(pvt->F3);
3304 mci->ctl_page_to_phys = NULL;
3305
3306
3307 mci->set_sdram_scrub_rate = set_scrub_rate;
3308 mci->get_sdram_scrub_rate = get_scrub_rate;
3309}
3310
3311
3312
3313
3314static struct amd64_family_type *per_family_init(struct amd64_pvt *pvt)
3315{
3316 pvt->ext_model = boot_cpu_data.x86_model >> 4;
3317 pvt->stepping = boot_cpu_data.x86_stepping;
3318 pvt->model = boot_cpu_data.x86_model;
3319 pvt->fam = boot_cpu_data.x86;
3320
3321 switch (pvt->fam) {
3322 case 0xf:
3323 fam_type = &family_types[K8_CPUS];
3324 pvt->ops = &family_types[K8_CPUS].ops;
3325 break;
3326
3327 case 0x10:
3328 fam_type = &family_types[F10_CPUS];
3329 pvt->ops = &family_types[F10_CPUS].ops;
3330 break;
3331
3332 case 0x15:
3333 if (pvt->model == 0x30) {
3334 fam_type = &family_types[F15_M30H_CPUS];
3335 pvt->ops = &family_types[F15_M30H_CPUS].ops;
3336 break;
3337 } else if (pvt->model == 0x60) {
3338 fam_type = &family_types[F15_M60H_CPUS];
3339 pvt->ops = &family_types[F15_M60H_CPUS].ops;
3340 break;
3341 }
3342
3343 fam_type = &family_types[F15_CPUS];
3344 pvt->ops = &family_types[F15_CPUS].ops;
3345 break;
3346
3347 case 0x16:
3348 if (pvt->model == 0x30) {
3349 fam_type = &family_types[F16_M30H_CPUS];
3350 pvt->ops = &family_types[F16_M30H_CPUS].ops;
3351 break;
3352 }
3353 fam_type = &family_types[F16_CPUS];
3354 pvt->ops = &family_types[F16_CPUS].ops;
3355 break;
3356
3357 case 0x17:
3358 if (pvt->model >= 0x10 && pvt->model <= 0x2f) {
3359 fam_type = &family_types[F17_M10H_CPUS];
3360 pvt->ops = &family_types[F17_M10H_CPUS].ops;
3361 break;
3362 } else if (pvt->model >= 0x30 && pvt->model <= 0x3f) {
3363 fam_type = &family_types[F17_M30H_CPUS];
3364 pvt->ops = &family_types[F17_M30H_CPUS].ops;
3365 break;
3366 }
3367 fam_type = &family_types[F17_CPUS];
3368 pvt->ops = &family_types[F17_CPUS].ops;
3369 break;
3370
3371 case 0x19:
3372 fam_type = &family_types[F19_CPUS];
3373 pvt->ops = &family_types[F19_CPUS].ops;
3374 family_types[F19_CPUS].ctl_name = "F19h";
3375 break;
3376
3377 default:
3378 amd64_err("Unsupported family!\n");
3379 return NULL;
3380 }
3381
3382 amd64_info("%s %sdetected (node %d).\n", fam_type->ctl_name,
3383 (pvt->fam == 0xf ?
3384 (pvt->ext_model >= K8_REV_F ? "revF or later "
3385 : "revE or earlier ")
3386 : ""), pvt->mc_node_id);
3387 return fam_type;
3388}
3389
3390static const struct attribute_group *amd64_edac_attr_groups[] = {
3391#ifdef CONFIG_EDAC_DEBUG
3392 &amd64_edac_dbg_group,
3393#endif
3394#ifdef CONFIG_EDAC_AMD64_ERROR_INJECTION
3395 &amd64_edac_inj_group,
3396#endif
3397 NULL
3398};
3399
3400static int hw_info_get(struct amd64_pvt *pvt)
3401{
3402 u16 pci_id1, pci_id2;
3403 int ret = -EINVAL;
3404
3405 if (pvt->fam >= 0x17) {
3406 pvt->umc = kcalloc(fam_type->max_mcs, sizeof(struct amd64_umc), GFP_KERNEL);
3407 if (!pvt->umc)
3408 return -ENOMEM;
3409
3410 pci_id1 = fam_type->f0_id;
3411 pci_id2 = fam_type->f6_id;
3412 } else {
3413 pci_id1 = fam_type->f1_id;
3414 pci_id2 = fam_type->f2_id;
3415 }
3416
3417 ret = reserve_mc_sibling_devs(pvt, pci_id1, pci_id2);
3418 if (ret)
3419 return ret;
3420
3421 read_mc_regs(pvt);
3422
3423 return 0;
3424}
3425
3426static void hw_info_put(struct amd64_pvt *pvt)
3427{
3428 if (pvt->F0 || pvt->F1)
3429 free_mc_sibling_devs(pvt);
3430
3431 kfree(pvt->umc);
3432}
3433
3434static int init_one_instance(struct amd64_pvt *pvt)
3435{
3436 struct mem_ctl_info *mci = NULL;
3437 struct edac_mc_layer layers[2];
3438 int ret = -EINVAL;
3439
3440
3441
3442
3443
3444
3445 pvt->channel_count = pvt->ops->early_channel_count(pvt);
3446 if (pvt->channel_count < 0)
3447 return ret;
3448
3449 ret = -ENOMEM;
3450 layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
3451 layers[0].size = pvt->csels[0].b_cnt;
3452 layers[0].is_virt_csrow = true;
3453 layers[1].type = EDAC_MC_LAYER_CHANNEL;
3454
3455
3456
3457
3458
3459
3460 layers[1].size = fam_type->max_mcs;
3461 layers[1].is_virt_csrow = false;
3462
3463 mci = edac_mc_alloc(pvt->mc_node_id, ARRAY_SIZE(layers), layers, 0);
3464 if (!mci)
3465 return ret;
3466
3467 mci->pvt_info = pvt;
3468 mci->pdev = &pvt->F3->dev;
3469
3470 setup_mci_misc_attrs(mci);
3471
3472 if (init_csrows(mci))
3473 mci->edac_cap = EDAC_FLAG_NONE;
3474
3475 ret = -ENODEV;
3476 if (edac_mc_add_mc_with_groups(mci, amd64_edac_attr_groups)) {
3477 edac_dbg(1, "failed edac_mc_add_mc()\n");
3478 edac_mc_free(mci);
3479 return ret;
3480 }
3481
3482 return 0;
3483}
3484
3485static int probe_one_instance(unsigned int nid)
3486{
3487 struct pci_dev *F3 = node_to_amd_nb(nid)->misc;
3488 struct amd64_pvt *pvt = NULL;
3489 struct ecc_settings *s;
3490 int ret;
3491
3492 ret = -ENOMEM;
3493 s = kzalloc(sizeof(struct ecc_settings), GFP_KERNEL);
3494 if (!s)
3495 goto err_out;
3496
3497 ecc_stngs[nid] = s;
3498
3499 pvt = kzalloc(sizeof(struct amd64_pvt), GFP_KERNEL);
3500 if (!pvt)
3501 goto err_settings;
3502
3503 pvt->mc_node_id = nid;
3504 pvt->F3 = F3;
3505
3506 fam_type = per_family_init(pvt);
3507 if (!fam_type)
3508 goto err_enable;
3509
3510 ret = hw_info_get(pvt);
3511 if (ret < 0)
3512 goto err_enable;
3513
3514 if (!ecc_enabled(F3, nid)) {
3515 ret = 0;
3516
3517 if (!ecc_enable_override)
3518 goto err_enable;
3519
3520 if (boot_cpu_data.x86 >= 0x17) {
3521 amd64_warn("Forcing ECC on is not recommended on newer systems. Please enable ECC in BIOS.");
3522 goto err_enable;
3523 } else
3524 amd64_warn("Forcing ECC on!\n");
3525
3526 if (!enable_ecc_error_reporting(s, nid, F3))
3527 goto err_enable;
3528 }
3529
3530 ret = init_one_instance(pvt);
3531 if (ret < 0) {
3532 amd64_err("Error probing instance: %d\n", nid);
3533
3534 if (boot_cpu_data.x86 < 0x17)
3535 restore_ecc_error_reporting(s, nid, F3);
3536
3537 goto err_enable;
3538 }
3539
3540 return ret;
3541
3542err_enable:
3543 hw_info_put(pvt);
3544 kfree(pvt);
3545
3546err_settings:
3547 kfree(s);
3548 ecc_stngs[nid] = NULL;
3549
3550err_out:
3551 return ret;
3552}
3553
3554static void remove_one_instance(unsigned int nid)
3555{
3556 struct pci_dev *F3 = node_to_amd_nb(nid)->misc;
3557 struct ecc_settings *s = ecc_stngs[nid];
3558 struct mem_ctl_info *mci;
3559 struct amd64_pvt *pvt;
3560
3561 mci = find_mci_by_dev(&F3->dev);
3562 WARN_ON(!mci);
3563
3564
3565 mci = edac_mc_del_mc(&F3->dev);
3566 if (!mci)
3567 return;
3568
3569 pvt = mci->pvt_info;
3570
3571 restore_ecc_error_reporting(s, nid, F3);
3572
3573 kfree(ecc_stngs[nid]);
3574 ecc_stngs[nid] = NULL;
3575
3576
3577 mci->pvt_info = NULL;
3578
3579 hw_info_put(pvt);
3580 kfree(pvt);
3581 edac_mc_free(mci);
3582}
3583
3584static void setup_pci_device(void)
3585{
3586 struct mem_ctl_info *mci;
3587 struct amd64_pvt *pvt;
3588
3589 if (pci_ctl)
3590 return;
3591
3592 mci = edac_mc_find(0);
3593 if (!mci)
3594 return;
3595
3596 pvt = mci->pvt_info;
3597 if (pvt->umc)
3598 pci_ctl = edac_pci_create_generic_ctl(&pvt->F0->dev, EDAC_MOD_STR);
3599 else
3600 pci_ctl = edac_pci_create_generic_ctl(&pvt->F2->dev, EDAC_MOD_STR);
3601 if (!pci_ctl) {
3602 pr_warn("%s(): Unable to create PCI control\n", __func__);
3603 pr_warn("%s(): PCI error report via EDAC not set\n", __func__);
3604 }
3605}
3606
3607static const struct x86_cpu_id amd64_cpuids[] = {
3608 X86_MATCH_VENDOR_FAM(AMD, 0x0F, NULL),
3609 X86_MATCH_VENDOR_FAM(AMD, 0x10, NULL),
3610 X86_MATCH_VENDOR_FAM(AMD, 0x15, NULL),
3611 X86_MATCH_VENDOR_FAM(AMD, 0x16, NULL),
3612 X86_MATCH_VENDOR_FAM(AMD, 0x17, NULL),
3613 X86_MATCH_VENDOR_FAM(AMD, 0x19, NULL),
3614 { }
3615};
3616MODULE_DEVICE_TABLE(x86cpu, amd64_cpuids);
3617
3618static int __init amd64_edac_init(void)
3619{
3620 const char *owner;
3621 int err = -ENODEV;
3622 int i;
3623
3624 owner = edac_get_owner();
3625 if (owner && strncmp(owner, EDAC_MOD_STR, sizeof(EDAC_MOD_STR)))
3626 return -EBUSY;
3627
3628 if (!x86_match_cpu(amd64_cpuids))
3629 return -ENODEV;
3630
3631 if (amd_cache_northbridges() < 0)
3632 return -ENODEV;
3633
3634 opstate_init();
3635
3636 err = -ENOMEM;
3637 ecc_stngs = kcalloc(amd_nb_num(), sizeof(ecc_stngs[0]), GFP_KERNEL);
3638 if (!ecc_stngs)
3639 goto err_free;
3640
3641 msrs = msrs_alloc();
3642 if (!msrs)
3643 goto err_free;
3644
3645 for (i = 0; i < amd_nb_num(); i++) {
3646 err = probe_one_instance(i);
3647 if (err) {
3648
3649 while (--i >= 0)
3650 remove_one_instance(i);
3651
3652 goto err_pci;
3653 }
3654 }
3655
3656 if (!edac_has_mcs()) {
3657 err = -ENODEV;
3658 goto err_pci;
3659 }
3660
3661
3662 if (report_gart_errors)
3663 amd_report_gart_errors(true);
3664
3665 if (boot_cpu_data.x86 >= 0x17)
3666 amd_register_ecc_decoder(decode_umc_error);
3667 else
3668 amd_register_ecc_decoder(decode_bus_error);
3669
3670 setup_pci_device();
3671
3672#ifdef CONFIG_X86_32
3673 amd64_err("%s on 32-bit is unsupported. USE AT YOUR OWN RISK!\n", EDAC_MOD_STR);
3674#endif
3675
3676 printk(KERN_INFO "AMD64 EDAC driver v%s\n", EDAC_AMD64_VERSION);
3677
3678 return 0;
3679
3680err_pci:
3681 msrs_free(msrs);
3682 msrs = NULL;
3683
3684err_free:
3685 kfree(ecc_stngs);
3686 ecc_stngs = NULL;
3687
3688 return err;
3689}
3690
3691static void __exit amd64_edac_exit(void)
3692{
3693 int i;
3694
3695 if (pci_ctl)
3696 edac_pci_release_generic_ctl(pci_ctl);
3697
3698
3699 amd_report_gart_errors(false);
3700
3701 if (boot_cpu_data.x86 >= 0x17)
3702 amd_unregister_ecc_decoder(decode_umc_error);
3703 else
3704 amd_unregister_ecc_decoder(decode_bus_error);
3705
3706 for (i = 0; i < amd_nb_num(); i++)
3707 remove_one_instance(i);
3708
3709 kfree(ecc_stngs);
3710 ecc_stngs = NULL;
3711
3712 msrs_free(msrs);
3713 msrs = NULL;
3714}
3715
3716module_init(amd64_edac_init);
3717module_exit(amd64_edac_exit);
3718
3719MODULE_LICENSE("GPL");
3720MODULE_AUTHOR("SoftwareBitMaker: Doug Thompson, "
3721 "Dave Peterson, Thayne Harbaugh");
3722MODULE_DESCRIPTION("MC support for AMD64 memory controllers - "
3723 EDAC_AMD64_VERSION);
3724
3725module_param(edac_op_state, int, 0444);
3726MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");
3727