1
2
3
4
5
6
7
8
9
10
11
12#include <linux/interrupt.h>
13#include <linux/notifier.h>
14#include <linux/kobject.h>
15#include <linux/percpu.h>
16#include <linux/errno.h>
17#include <linux/sched.h>
18#include <linux/sysfs.h>
19#include <linux/slab.h>
20#include <linux/init.h>
21#include <linux/cpu.h>
22#include <linux/smp.h>
23#include <linux/string.h>
24
25#include <asm/amd_nb.h>
26#include <asm/traps.h>
27#include <asm/apic.h>
28#include <asm/mce.h>
29#include <asm/msr.h>
30#include <asm/trace/irq_vectors.h>
31
32#include "internal.h"
33
34#define NR_BLOCKS 5
35#define THRESHOLD_MAX 0xFFF
36#define INT_TYPE_APIC 0x00020000
37#define MASK_VALID_HI 0x80000000
38#define MASK_CNTP_HI 0x40000000
39#define MASK_LOCKED_HI 0x20000000
40#define MASK_LVTOFF_HI 0x00F00000
41#define MASK_COUNT_EN_HI 0x00080000
42#define MASK_INT_TYPE_HI 0x00060000
43#define MASK_OVERFLOW_HI 0x00010000
44#define MASK_ERR_COUNT_HI 0x00000FFF
45#define MASK_BLKPTR_LO 0xFF000000
46#define MCG_XBLK_ADDR 0xC0000400
47
48
49#define MSR_CU_DEF_ERR 0xC0000410
50#define MASK_DEF_LVTOFF 0x000000F0
51#define MASK_DEF_INT_TYPE 0x00000006
52#define DEF_LVT_OFF 0x2
53#define DEF_INT_TYPE_APIC 0x2
54
55
56
57
58#define SMCA_THR_LVT_OFF 0xF000
59
60static bool thresholding_irq_en;
61
62static const char * const th_names[] = {
63 "load_store",
64 "insn_fetch",
65 "combined_unit",
66 "decode_unit",
67 "northbridge",
68 "execution_unit",
69};
70
71static const char * const smca_umc_block_names[] = {
72 "dram_ecc",
73 "misc_umc"
74};
75
76struct smca_bank_name {
77 const char *name;
78 const char *long_name;
79};
80
81static struct smca_bank_name smca_names[] = {
82 [SMCA_LS] = { "load_store", "Load Store Unit" },
83 [SMCA_IF] = { "insn_fetch", "Instruction Fetch Unit" },
84 [SMCA_L2_CACHE] = { "l2_cache", "L2 Cache" },
85 [SMCA_DE] = { "decode_unit", "Decode Unit" },
86 [SMCA_RESERVED] = { "reserved", "Reserved" },
87 [SMCA_EX] = { "execution_unit", "Execution Unit" },
88 [SMCA_FP] = { "floating_point", "Floating Point Unit" },
89 [SMCA_L3_CACHE] = { "l3_cache", "L3 Cache" },
90 [SMCA_CS] = { "coherent_slave", "Coherent Slave" },
91 [SMCA_PIE] = { "pie", "Power, Interrupts, etc." },
92 [SMCA_UMC] = { "umc", "Unified Memory Controller" },
93 [SMCA_PB] = { "param_block", "Parameter Block" },
94 [SMCA_PSP] = { "psp", "Platform Security Processor" },
95 [SMCA_SMU] = { "smu", "System Management Unit" },
96};
97
98static u32 smca_bank_addrs[MAX_NR_BANKS][NR_BLOCKS] __ro_after_init =
99{
100 [0 ... MAX_NR_BANKS - 1] = { [0 ... NR_BLOCKS - 1] = -1 }
101};
102
103static const char *smca_get_name(enum smca_bank_types t)
104{
105 if (t >= N_SMCA_BANK_TYPES)
106 return NULL;
107
108 return smca_names[t].name;
109}
110
111const char *smca_get_long_name(enum smca_bank_types t)
112{
113 if (t >= N_SMCA_BANK_TYPES)
114 return NULL;
115
116 return smca_names[t].long_name;
117}
118EXPORT_SYMBOL_GPL(smca_get_long_name);
119
120static enum smca_bank_types smca_get_bank_type(unsigned int bank)
121{
122 struct smca_bank *b;
123
124 if (bank >= MAX_NR_BANKS)
125 return N_SMCA_BANK_TYPES;
126
127 b = &smca_banks[bank];
128 if (!b->hwid)
129 return N_SMCA_BANK_TYPES;
130
131 return b->hwid->bank_type;
132}
133
134static struct smca_hwid smca_hwid_mcatypes[] = {
135
136
137
138 { SMCA_RESERVED, HWID_MCATYPE(0x00, 0x0), 0x0 },
139
140
141 { SMCA_LS, HWID_MCATYPE(0xB0, 0x0), 0x1FFFEF },
142 { SMCA_IF, HWID_MCATYPE(0xB0, 0x1), 0x3FFF },
143 { SMCA_L2_CACHE, HWID_MCATYPE(0xB0, 0x2), 0xF },
144 { SMCA_DE, HWID_MCATYPE(0xB0, 0x3), 0x1FF },
145
146 { SMCA_EX, HWID_MCATYPE(0xB0, 0x5), 0x7FF },
147 { SMCA_FP, HWID_MCATYPE(0xB0, 0x6), 0x7F },
148 { SMCA_L3_CACHE, HWID_MCATYPE(0xB0, 0x7), 0xFF },
149
150
151 { SMCA_CS, HWID_MCATYPE(0x2E, 0x0), 0x1FF },
152 { SMCA_PIE, HWID_MCATYPE(0x2E, 0x1), 0xF },
153
154
155 { SMCA_UMC, HWID_MCATYPE(0x96, 0x0), 0x3F },
156
157
158 { SMCA_PB, HWID_MCATYPE(0x05, 0x0), 0x1 },
159
160
161 { SMCA_PSP, HWID_MCATYPE(0xFF, 0x0), 0x1 },
162
163
164 { SMCA_SMU, HWID_MCATYPE(0x01, 0x0), 0x1 },
165};
166
167struct smca_bank smca_banks[MAX_NR_BANKS];
168EXPORT_SYMBOL_GPL(smca_banks);
169
170
171
172
173
174
175
176
177
178#define MAX_MCATYPE_NAME_LEN 30
179static char buf_mcatype[MAX_MCATYPE_NAME_LEN];
180
181static DEFINE_PER_CPU(struct threshold_bank **, threshold_banks);
182static DEFINE_PER_CPU(unsigned int, bank_map);
183
184static void amd_threshold_interrupt(void);
185static void amd_deferred_error_interrupt(void);
186
187static void default_deferred_error_interrupt(void)
188{
189 pr_err("Unexpected deferred interrupt at vector %x\n", DEFERRED_ERROR_VECTOR);
190}
191void (*deferred_error_int_vector)(void) = default_deferred_error_interrupt;
192
193static void smca_configure(unsigned int bank, unsigned int cpu)
194{
195 unsigned int i, hwid_mcatype;
196 struct smca_hwid *s_hwid;
197 u32 high, low;
198 u32 smca_config = MSR_AMD64_SMCA_MCx_CONFIG(bank);
199
200
201 if (!rdmsr_safe(smca_config, &low, &high)) {
202
203
204
205
206
207
208
209
210
211 high |= BIT(0);
212
213
214
215
216
217
218
219
220
221
222
223
224 if ((low & BIT(5)) && !((high >> 5) & 0x3))
225 high |= BIT(5);
226
227 wrmsr(smca_config, low, high);
228 }
229
230
231 if (smca_banks[bank].hwid)
232 return;
233
234 if (rdmsr_safe_on_cpu(cpu, MSR_AMD64_SMCA_MCx_IPID(bank), &low, &high)) {
235 pr_warn("Failed to read MCA_IPID for bank %d\n", bank);
236 return;
237 }
238
239 hwid_mcatype = HWID_MCATYPE(high & MCI_IPID_HWID,
240 (high & MCI_IPID_MCATYPE) >> 16);
241
242 for (i = 0; i < ARRAY_SIZE(smca_hwid_mcatypes); i++) {
243 s_hwid = &smca_hwid_mcatypes[i];
244 if (hwid_mcatype == s_hwid->hwid_mcatype) {
245 smca_banks[bank].hwid = s_hwid;
246 smca_banks[bank].id = low;
247 smca_banks[bank].sysfs_id = s_hwid->count++;
248 break;
249 }
250 }
251}
252
253struct thresh_restart {
254 struct threshold_block *b;
255 int reset;
256 int set_lvt_off;
257 int lvt_off;
258 u16 old_limit;
259};
260
261static inline bool is_shared_bank(int bank)
262{
263
264
265
266
267 if (mce_flags.smca)
268 return false;
269
270
271 return (bank == 4);
272}
273
274static const char *bank4_names(const struct threshold_block *b)
275{
276 switch (b->address) {
277
278 case 0x00000413:
279 return "dram";
280
281 case 0xc0000408:
282 return "ht_links";
283
284 case 0xc0000409:
285 return "l3_cache";
286
287 default:
288 WARN(1, "Funny MSR: 0x%08x\n", b->address);
289 return "";
290 }
291};
292
293
294static bool lvt_interrupt_supported(unsigned int bank, u32 msr_high_bits)
295{
296
297
298
299 if (bank == 4)
300 return true;
301
302
303
304
305
306 return msr_high_bits & BIT(28);
307}
308
309static int lvt_off_valid(struct threshold_block *b, int apic, u32 lo, u32 hi)
310{
311 int msr = (hi & MASK_LVTOFF_HI) >> 20;
312
313 if (apic < 0) {
314 pr_err(FW_BUG "cpu %d, failed to setup threshold interrupt "
315 "for bank %d, block %d (MSR%08X=0x%x%08x)\n", b->cpu,
316 b->bank, b->block, b->address, hi, lo);
317 return 0;
318 }
319
320 if (apic != msr) {
321
322
323
324
325
326 if (mce_flags.smca)
327 return 0;
328
329 pr_err(FW_BUG "cpu %d, invalid threshold interrupt offset %d "
330 "for bank %d, block %d (MSR%08X=0x%x%08x)\n",
331 b->cpu, apic, b->bank, b->block, b->address, hi, lo);
332 return 0;
333 }
334
335 return 1;
336};
337
338
339static void threshold_restart_bank(void *_tr)
340{
341 struct thresh_restart *tr = _tr;
342 u32 hi, lo;
343
344 rdmsr(tr->b->address, lo, hi);
345
346 if (tr->b->threshold_limit < (hi & THRESHOLD_MAX))
347 tr->reset = 1;
348
349 if (tr->reset) {
350 hi =
351 (hi & ~(MASK_ERR_COUNT_HI | MASK_OVERFLOW_HI)) |
352 (THRESHOLD_MAX - tr->b->threshold_limit);
353 } else if (tr->old_limit) {
354 int new_count = (hi & THRESHOLD_MAX) +
355 (tr->old_limit - tr->b->threshold_limit);
356
357 hi = (hi & ~MASK_ERR_COUNT_HI) |
358 (new_count & THRESHOLD_MAX);
359 }
360
361
362 hi &= ~MASK_INT_TYPE_HI;
363
364 if (!tr->b->interrupt_capable)
365 goto done;
366
367 if (tr->set_lvt_off) {
368 if (lvt_off_valid(tr->b, tr->lvt_off, lo, hi)) {
369
370 hi &= ~MASK_LVTOFF_HI;
371 hi |= tr->lvt_off << 20;
372 }
373 }
374
375 if (tr->b->interrupt_enable)
376 hi |= INT_TYPE_APIC;
377
378 done:
379
380 hi |= MASK_COUNT_EN_HI;
381 wrmsr(tr->b->address, lo, hi);
382}
383
384static void mce_threshold_block_init(struct threshold_block *b, int offset)
385{
386 struct thresh_restart tr = {
387 .b = b,
388 .set_lvt_off = 1,
389 .lvt_off = offset,
390 };
391
392 b->threshold_limit = THRESHOLD_MAX;
393 threshold_restart_bank(&tr);
394};
395
396static int setup_APIC_mce_threshold(int reserved, int new)
397{
398 if (reserved < 0 && !setup_APIC_eilvt(new, THRESHOLD_APIC_VECTOR,
399 APIC_EILVT_MSG_FIX, 0))
400 return new;
401
402 return reserved;
403}
404
405static int setup_APIC_deferred_error(int reserved, int new)
406{
407 if (reserved < 0 && !setup_APIC_eilvt(new, DEFERRED_ERROR_VECTOR,
408 APIC_EILVT_MSG_FIX, 0))
409 return new;
410
411 return reserved;
412}
413
414static void deferred_error_interrupt_enable(struct cpuinfo_x86 *c)
415{
416 u32 low = 0, high = 0;
417 int def_offset = -1, def_new;
418
419 if (rdmsr_safe(MSR_CU_DEF_ERR, &low, &high))
420 return;
421
422 def_new = (low & MASK_DEF_LVTOFF) >> 4;
423 if (!(low & MASK_DEF_LVTOFF)) {
424 pr_err(FW_BUG "Your BIOS is not setting up LVT offset 0x2 for deferred error IRQs correctly.\n");
425 def_new = DEF_LVT_OFF;
426 low = (low & ~MASK_DEF_LVTOFF) | (DEF_LVT_OFF << 4);
427 }
428
429 def_offset = setup_APIC_deferred_error(def_offset, def_new);
430 if ((def_offset == def_new) &&
431 (deferred_error_int_vector != amd_deferred_error_interrupt))
432 deferred_error_int_vector = amd_deferred_error_interrupt;
433
434 if (!mce_flags.smca)
435 low = (low & ~MASK_DEF_INT_TYPE) | DEF_INT_TYPE_APIC;
436
437 wrmsr(MSR_CU_DEF_ERR, low, high);
438}
439
440static u32 smca_get_block_address(unsigned int bank, unsigned int block)
441{
442 u32 low, high;
443 u32 addr = 0;
444
445 if (smca_get_bank_type(bank) == SMCA_RESERVED)
446 return addr;
447
448 if (!block)
449 return MSR_AMD64_SMCA_MCx_MISC(bank);
450
451
452 if (smca_bank_addrs[bank][block] != -1)
453 return smca_bank_addrs[bank][block];
454
455
456
457
458
459 if (rdmsr_safe(MSR_AMD64_SMCA_MCx_CONFIG(bank), &low, &high))
460 goto out;
461
462 if (!(low & MCI_CONFIG_MCAX))
463 goto out;
464
465 if (!rdmsr_safe(MSR_AMD64_SMCA_MCx_MISC(bank), &low, &high) &&
466 (low & MASK_BLKPTR_LO))
467 addr = MSR_AMD64_SMCA_MCx_MISCy(bank, block - 1);
468
469out:
470 smca_bank_addrs[bank][block] = addr;
471 return addr;
472}
473
474static u32 get_block_address(u32 current_addr, u32 low, u32 high,
475 unsigned int bank, unsigned int block)
476{
477 u32 addr = 0, offset = 0;
478
479 if ((bank >= mca_cfg.banks) || (block >= NR_BLOCKS))
480 return addr;
481
482 if (mce_flags.smca)
483 return smca_get_block_address(bank, block);
484
485
486 switch (block) {
487 case 0:
488 addr = msr_ops.misc(bank);
489 break;
490 case 1:
491 offset = ((low & MASK_BLKPTR_LO) >> 21);
492 if (offset)
493 addr = MCG_XBLK_ADDR + offset;
494 break;
495 default:
496 addr = ++current_addr;
497 }
498 return addr;
499}
500
501static int
502prepare_threshold_block(unsigned int bank, unsigned int block, u32 addr,
503 int offset, u32 misc_high)
504{
505 unsigned int cpu = smp_processor_id();
506 u32 smca_low, smca_high;
507 struct threshold_block b;
508 int new;
509
510 if (!block)
511 per_cpu(bank_map, cpu) |= (1 << bank);
512
513 memset(&b, 0, sizeof(b));
514 b.cpu = cpu;
515 b.bank = bank;
516 b.block = block;
517 b.address = addr;
518 b.interrupt_capable = lvt_interrupt_supported(bank, misc_high);
519
520 if (!b.interrupt_capable)
521 goto done;
522
523 b.interrupt_enable = 1;
524
525 if (!mce_flags.smca) {
526 new = (misc_high & MASK_LVTOFF_HI) >> 20;
527 goto set_offset;
528 }
529
530
531 if (rdmsr_safe(MSR_CU_DEF_ERR, &smca_low, &smca_high))
532 goto out;
533
534 new = (smca_low & SMCA_THR_LVT_OFF) >> 12;
535
536set_offset:
537 offset = setup_APIC_mce_threshold(offset, new);
538 if (offset == new)
539 thresholding_irq_en = true;
540
541done:
542 mce_threshold_block_init(&b, offset);
543
544out:
545 return offset;
546}
547
548
549void mce_amd_feature_init(struct cpuinfo_x86 *c)
550{
551 u32 low = 0, high = 0, address = 0;
552 unsigned int bank, block, cpu = smp_processor_id();
553 int offset = -1;
554
555 for (bank = 0; bank < mca_cfg.banks; ++bank) {
556 if (mce_flags.smca)
557 smca_configure(bank, cpu);
558
559 for (block = 0; block < NR_BLOCKS; ++block) {
560 address = get_block_address(address, low, high, bank, block);
561 if (!address)
562 break;
563
564 if (rdmsr_safe(address, &low, &high))
565 break;
566
567 if (!(high & MASK_VALID_HI))
568 continue;
569
570 if (!(high & MASK_CNTP_HI) ||
571 (high & MASK_LOCKED_HI))
572 continue;
573
574 offset = prepare_threshold_block(bank, block, address, offset, high);
575 }
576 }
577
578 if (mce_flags.succor)
579 deferred_error_interrupt_enable(c);
580}
581
582int umc_normaddr_to_sysaddr(u64 norm_addr, u16 nid, u8 umc, u64 *sys_addr)
583{
584 u64 dram_base_addr, dram_limit_addr, dram_hole_base;
585
586 u64 ret_addr = norm_addr;
587
588 u32 tmp;
589
590 u8 die_id_shift, die_id_mask, socket_id_shift, socket_id_mask;
591 u8 intlv_num_dies, intlv_num_chan, intlv_num_sockets;
592 u8 intlv_addr_sel, intlv_addr_bit;
593 u8 num_intlv_bits, hashed_bit;
594 u8 lgcy_mmio_hole_en, base = 0;
595 u8 cs_mask, cs_id = 0;
596 bool hash_enabled = false;
597
598
599 if (amd_df_indirect_read(nid, 0, 0x1B4, umc, &tmp))
600 goto out_err;
601
602
603 if (tmp & BIT(0)) {
604 u64 hi_addr_offset = (tmp & GENMASK_ULL(31, 20)) << 8;
605
606 if (norm_addr >= hi_addr_offset) {
607 ret_addr -= hi_addr_offset;
608 base = 1;
609 }
610 }
611
612
613 if (amd_df_indirect_read(nid, 0, 0x110 + (8 * base), umc, &tmp))
614 goto out_err;
615
616
617 if (!(tmp & BIT(0))) {
618 pr_err("%s: Invalid DramBaseAddress range: 0x%x.\n",
619 __func__, tmp);
620 goto out_err;
621 }
622
623 lgcy_mmio_hole_en = tmp & BIT(1);
624 intlv_num_chan = (tmp >> 4) & 0xF;
625 intlv_addr_sel = (tmp >> 8) & 0x7;
626 dram_base_addr = (tmp & GENMASK_ULL(31, 12)) << 16;
627
628
629 if (intlv_addr_sel > 3) {
630 pr_err("%s: Invalid interleave address select %d.\n",
631 __func__, intlv_addr_sel);
632 goto out_err;
633 }
634
635
636 if (amd_df_indirect_read(nid, 0, 0x114 + (8 * base), umc, &tmp))
637 goto out_err;
638
639 intlv_num_sockets = (tmp >> 8) & 0x1;
640 intlv_num_dies = (tmp >> 10) & 0x3;
641 dram_limit_addr = ((tmp & GENMASK_ULL(31, 12)) << 16) | GENMASK_ULL(27, 0);
642
643 intlv_addr_bit = intlv_addr_sel + 8;
644
645
646 switch (intlv_num_chan) {
647 case 0: intlv_num_chan = 0; break;
648 case 1: intlv_num_chan = 1; break;
649 case 3: intlv_num_chan = 2; break;
650 case 5: intlv_num_chan = 3; break;
651 case 7: intlv_num_chan = 4; break;
652
653 case 8: intlv_num_chan = 1;
654 hash_enabled = true;
655 break;
656 default:
657 pr_err("%s: Invalid number of interleaved channels %d.\n",
658 __func__, intlv_num_chan);
659 goto out_err;
660 }
661
662 num_intlv_bits = intlv_num_chan;
663
664 if (intlv_num_dies > 2) {
665 pr_err("%s: Invalid number of interleaved nodes/dies %d.\n",
666 __func__, intlv_num_dies);
667 goto out_err;
668 }
669
670 num_intlv_bits += intlv_num_dies;
671
672
673 num_intlv_bits += intlv_num_sockets;
674
675
676 if (num_intlv_bits > 4) {
677 pr_err("%s: Invalid interleave bits %d.\n",
678 __func__, num_intlv_bits);
679 goto out_err;
680 }
681
682 if (num_intlv_bits > 0) {
683 u64 temp_addr_x, temp_addr_i, temp_addr_y;
684 u8 die_id_bit, sock_id_bit, cs_fabric_id;
685
686
687
688
689
690
691
692 if (amd_df_indirect_read(nid, 0, 0x50, umc, &tmp))
693 goto out_err;
694
695 cs_fabric_id = (tmp >> 8) & 0xFF;
696 die_id_bit = 0;
697
698
699 if (intlv_num_chan) {
700 die_id_bit = intlv_num_chan;
701 cs_mask = (1 << die_id_bit) - 1;
702 cs_id = cs_fabric_id & cs_mask;
703 }
704
705 sock_id_bit = die_id_bit;
706
707
708 if (intlv_num_dies || intlv_num_sockets)
709 if (amd_df_indirect_read(nid, 1, 0x208, umc, &tmp))
710 goto out_err;
711
712
713 if (intlv_num_dies) {
714 sock_id_bit = die_id_bit + intlv_num_dies;
715 die_id_shift = (tmp >> 24) & 0xF;
716 die_id_mask = (tmp >> 8) & 0xFF;
717
718 cs_id |= ((cs_fabric_id & die_id_mask) >> die_id_shift) << die_id_bit;
719 }
720
721
722 if (intlv_num_sockets) {
723 socket_id_shift = (tmp >> 28) & 0xF;
724 socket_id_mask = (tmp >> 16) & 0xFF;
725
726 cs_id |= ((cs_fabric_id & socket_id_mask) >> socket_id_shift) << sock_id_bit;
727 }
728
729
730
731
732
733
734
735
736
737 temp_addr_y = ret_addr & GENMASK_ULL(intlv_addr_bit-1, 0);
738 temp_addr_i = (cs_id << intlv_addr_bit);
739 temp_addr_x = (ret_addr & GENMASK_ULL(63, intlv_addr_bit)) << num_intlv_bits;
740 ret_addr = temp_addr_x | temp_addr_i | temp_addr_y;
741 }
742
743
744 ret_addr += dram_base_addr;
745
746
747 if (lgcy_mmio_hole_en) {
748 if (amd_df_indirect_read(nid, 0, 0x104, umc, &tmp))
749 goto out_err;
750
751 dram_hole_base = tmp & GENMASK(31, 24);
752 if (ret_addr >= dram_hole_base)
753 ret_addr += (BIT_ULL(32) - dram_hole_base);
754 }
755
756 if (hash_enabled) {
757
758 hashed_bit = (ret_addr >> 12) ^
759 (ret_addr >> 18) ^
760 (ret_addr >> 21) ^
761 (ret_addr >> 30) ^
762 cs_id;
763
764 hashed_bit &= BIT(0);
765
766 if (hashed_bit != ((ret_addr >> intlv_addr_bit) & BIT(0)))
767 ret_addr ^= BIT(intlv_addr_bit);
768 }
769
770
771 if (ret_addr > dram_limit_addr)
772 goto out_err;
773
774 *sys_addr = ret_addr;
775 return 0;
776
777out_err:
778 return -EINVAL;
779}
780EXPORT_SYMBOL_GPL(umc_normaddr_to_sysaddr);
781
782bool amd_mce_is_memory_error(struct mce *m)
783{
784
785 u8 xec = (m->status >> 16) & 0x1f;
786
787 if (mce_flags.smca)
788 return smca_get_bank_type(m->bank) == SMCA_UMC && xec == 0x0;
789
790 return m->bank == 4 && xec == 0x8;
791}
792
793static void __log_error(unsigned int bank, u64 status, u64 addr, u64 misc)
794{
795 struct mce m;
796
797 mce_setup(&m);
798
799 m.status = status;
800 m.misc = misc;
801 m.bank = bank;
802 m.tsc = rdtsc();
803
804 if (m.status & MCI_STATUS_ADDRV) {
805 m.addr = addr;
806
807
808
809
810
811 if (mce_flags.smca) {
812 u8 lsb = (m.addr >> 56) & 0x3f;
813
814 m.addr &= GENMASK_ULL(55, lsb);
815 }
816 }
817
818 if (mce_flags.smca) {
819 rdmsrl(MSR_AMD64_SMCA_MCx_IPID(bank), m.ipid);
820
821 if (m.status & MCI_STATUS_SYNDV)
822 rdmsrl(MSR_AMD64_SMCA_MCx_SYND(bank), m.synd);
823 }
824
825 mce_log(&m);
826}
827
828asmlinkage __visible void __irq_entry smp_deferred_error_interrupt(struct pt_regs *regs)
829{
830 entering_irq();
831 trace_deferred_error_apic_entry(DEFERRED_ERROR_VECTOR);
832 inc_irq_stat(irq_deferred_error_count);
833 deferred_error_int_vector();
834 trace_deferred_error_apic_exit(DEFERRED_ERROR_VECTOR);
835 exiting_ack_irq();
836}
837
838
839
840
841static inline bool
842_log_error_bank(unsigned int bank, u32 msr_stat, u32 msr_addr, u64 misc)
843{
844 u64 status, addr = 0;
845
846 rdmsrl(msr_stat, status);
847 if (!(status & MCI_STATUS_VAL))
848 return false;
849
850 if (status & MCI_STATUS_ADDRV)
851 rdmsrl(msr_addr, addr);
852
853 __log_error(bank, status, addr, misc);
854
855 wrmsrl(msr_stat, 0);
856
857 return status & MCI_STATUS_DEFERRED;
858}
859
860
861
862
863
864
865
866
867
868
869static void log_error_deferred(unsigned int bank)
870{
871 bool defrd;
872
873 defrd = _log_error_bank(bank, msr_ops.status(bank),
874 msr_ops.addr(bank), 0);
875
876 if (!mce_flags.smca)
877 return;
878
879
880 if (defrd) {
881 wrmsrl(MSR_AMD64_SMCA_MCx_DESTAT(bank), 0);
882 return;
883 }
884
885
886
887
888
889 _log_error_bank(bank, MSR_AMD64_SMCA_MCx_DESTAT(bank),
890 MSR_AMD64_SMCA_MCx_DEADDR(bank), 0);
891}
892
893
894static void amd_deferred_error_interrupt(void)
895{
896 unsigned int bank;
897
898 for (bank = 0; bank < mca_cfg.banks; ++bank)
899 log_error_deferred(bank);
900}
901
902static void log_error_thresholding(unsigned int bank, u64 misc)
903{
904 _log_error_bank(bank, msr_ops.status(bank), msr_ops.addr(bank), misc);
905}
906
907static void log_and_reset_block(struct threshold_block *block)
908{
909 struct thresh_restart tr;
910 u32 low = 0, high = 0;
911
912 if (!block)
913 return;
914
915 if (rdmsr_safe(block->address, &low, &high))
916 return;
917
918 if (!(high & MASK_OVERFLOW_HI))
919 return;
920
921
922 log_error_thresholding(block->bank, ((u64)high << 32) | low);
923
924
925 memset(&tr, 0, sizeof(tr));
926 tr.b = block;
927 threshold_restart_bank(&tr);
928}
929
930
931
932
933
934static void amd_threshold_interrupt(void)
935{
936 struct threshold_block *first_block = NULL, *block = NULL, *tmp = NULL;
937 unsigned int bank, cpu = smp_processor_id();
938
939 for (bank = 0; bank < mca_cfg.banks; ++bank) {
940 if (!(per_cpu(bank_map, cpu) & (1 << bank)))
941 continue;
942
943 first_block = per_cpu(threshold_banks, cpu)[bank]->blocks;
944 if (!first_block)
945 continue;
946
947
948
949
950
951 log_and_reset_block(first_block);
952 list_for_each_entry_safe(block, tmp, &first_block->miscj, miscj)
953 log_and_reset_block(block);
954 }
955}
956
957
958
959
960
961struct threshold_attr {
962 struct attribute attr;
963 ssize_t (*show) (struct threshold_block *, char *);
964 ssize_t (*store) (struct threshold_block *, const char *, size_t count);
965};
966
967#define SHOW_FIELDS(name) \
968static ssize_t show_ ## name(struct threshold_block *b, char *buf) \
969{ \
970 return sprintf(buf, "%lu\n", (unsigned long) b->name); \
971}
972SHOW_FIELDS(interrupt_enable)
973SHOW_FIELDS(threshold_limit)
974
975static ssize_t
976store_interrupt_enable(struct threshold_block *b, const char *buf, size_t size)
977{
978 struct thresh_restart tr;
979 unsigned long new;
980
981 if (!b->interrupt_capable)
982 return -EINVAL;
983
984 if (kstrtoul(buf, 0, &new) < 0)
985 return -EINVAL;
986
987 b->interrupt_enable = !!new;
988
989 memset(&tr, 0, sizeof(tr));
990 tr.b = b;
991
992 smp_call_function_single(b->cpu, threshold_restart_bank, &tr, 1);
993
994 return size;
995}
996
997static ssize_t
998store_threshold_limit(struct threshold_block *b, const char *buf, size_t size)
999{
1000 struct thresh_restart tr;
1001 unsigned long new;
1002
1003 if (kstrtoul(buf, 0, &new) < 0)
1004 return -EINVAL;
1005
1006 if (new > THRESHOLD_MAX)
1007 new = THRESHOLD_MAX;
1008 if (new < 1)
1009 new = 1;
1010
1011 memset(&tr, 0, sizeof(tr));
1012 tr.old_limit = b->threshold_limit;
1013 b->threshold_limit = new;
1014 tr.b = b;
1015
1016 smp_call_function_single(b->cpu, threshold_restart_bank, &tr, 1);
1017
1018 return size;
1019}
1020
1021static ssize_t show_error_count(struct threshold_block *b, char *buf)
1022{
1023 u32 lo, hi;
1024
1025 rdmsr_on_cpu(b->cpu, b->address, &lo, &hi);
1026
1027 return sprintf(buf, "%u\n", ((hi & THRESHOLD_MAX) -
1028 (THRESHOLD_MAX - b->threshold_limit)));
1029}
1030
1031static struct threshold_attr error_count = {
1032 .attr = {.name = __stringify(error_count), .mode = 0444 },
1033 .show = show_error_count,
1034};
1035
1036#define RW_ATTR(val) \
1037static struct threshold_attr val = { \
1038 .attr = {.name = __stringify(val), .mode = 0644 }, \
1039 .show = show_## val, \
1040 .store = store_## val, \
1041};
1042
1043RW_ATTR(interrupt_enable);
1044RW_ATTR(threshold_limit);
1045
1046static struct attribute *default_attrs[] = {
1047 &threshold_limit.attr,
1048 &error_count.attr,
1049 NULL,
1050 NULL,
1051};
1052
1053#define to_block(k) container_of(k, struct threshold_block, kobj)
1054#define to_attr(a) container_of(a, struct threshold_attr, attr)
1055
1056static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
1057{
1058 struct threshold_block *b = to_block(kobj);
1059 struct threshold_attr *a = to_attr(attr);
1060 ssize_t ret;
1061
1062 ret = a->show ? a->show(b, buf) : -EIO;
1063
1064 return ret;
1065}
1066
1067static ssize_t store(struct kobject *kobj, struct attribute *attr,
1068 const char *buf, size_t count)
1069{
1070 struct threshold_block *b = to_block(kobj);
1071 struct threshold_attr *a = to_attr(attr);
1072 ssize_t ret;
1073
1074 ret = a->store ? a->store(b, buf, count) : -EIO;
1075
1076 return ret;
1077}
1078
1079static const struct sysfs_ops threshold_ops = {
1080 .show = show,
1081 .store = store,
1082};
1083
1084static struct kobj_type threshold_ktype = {
1085 .sysfs_ops = &threshold_ops,
1086 .default_attrs = default_attrs,
1087};
1088
1089static const char *get_name(unsigned int bank, struct threshold_block *b)
1090{
1091 enum smca_bank_types bank_type;
1092
1093 if (!mce_flags.smca) {
1094 if (b && bank == 4)
1095 return bank4_names(b);
1096
1097 return th_names[bank];
1098 }
1099
1100 bank_type = smca_get_bank_type(bank);
1101 if (bank_type >= N_SMCA_BANK_TYPES)
1102 return NULL;
1103
1104 if (b && bank_type == SMCA_UMC) {
1105 if (b->block < ARRAY_SIZE(smca_umc_block_names))
1106 return smca_umc_block_names[b->block];
1107 return NULL;
1108 }
1109
1110 if (smca_banks[bank].hwid->count == 1)
1111 return smca_get_name(bank_type);
1112
1113 snprintf(buf_mcatype, MAX_MCATYPE_NAME_LEN,
1114 "%s_%x", smca_get_name(bank_type),
1115 smca_banks[bank].sysfs_id);
1116 return buf_mcatype;
1117}
1118
1119static int allocate_threshold_blocks(unsigned int cpu, unsigned int bank,
1120 unsigned int block, u32 address)
1121{
1122 struct threshold_block *b = NULL;
1123 u32 low, high;
1124 int err;
1125
1126 if ((bank >= mca_cfg.banks) || (block >= NR_BLOCKS))
1127 return 0;
1128
1129 if (rdmsr_safe_on_cpu(cpu, address, &low, &high))
1130 return 0;
1131
1132 if (!(high & MASK_VALID_HI)) {
1133 if (block)
1134 goto recurse;
1135 else
1136 return 0;
1137 }
1138
1139 if (!(high & MASK_CNTP_HI) ||
1140 (high & MASK_LOCKED_HI))
1141 goto recurse;
1142
1143 b = kzalloc(sizeof(struct threshold_block), GFP_KERNEL);
1144 if (!b)
1145 return -ENOMEM;
1146
1147 b->block = block;
1148 b->bank = bank;
1149 b->cpu = cpu;
1150 b->address = address;
1151 b->interrupt_enable = 0;
1152 b->interrupt_capable = lvt_interrupt_supported(bank, high);
1153 b->threshold_limit = THRESHOLD_MAX;
1154
1155 if (b->interrupt_capable) {
1156 threshold_ktype.default_attrs[2] = &interrupt_enable.attr;
1157 b->interrupt_enable = 1;
1158 } else {
1159 threshold_ktype.default_attrs[2] = NULL;
1160 }
1161
1162 INIT_LIST_HEAD(&b->miscj);
1163
1164 if (per_cpu(threshold_banks, cpu)[bank]->blocks) {
1165 list_add(&b->miscj,
1166 &per_cpu(threshold_banks, cpu)[bank]->blocks->miscj);
1167 } else {
1168 per_cpu(threshold_banks, cpu)[bank]->blocks = b;
1169 }
1170
1171 err = kobject_init_and_add(&b->kobj, &threshold_ktype,
1172 per_cpu(threshold_banks, cpu)[bank]->kobj,
1173 get_name(bank, b));
1174 if (err)
1175 goto out_free;
1176recurse:
1177 address = get_block_address(address, low, high, bank, ++block);
1178 if (!address)
1179 return 0;
1180
1181 err = allocate_threshold_blocks(cpu, bank, block, address);
1182 if (err)
1183 goto out_free;
1184
1185 if (b)
1186 kobject_uevent(&b->kobj, KOBJ_ADD);
1187
1188 return err;
1189
1190out_free:
1191 if (b) {
1192 kobject_put(&b->kobj);
1193 list_del(&b->miscj);
1194 kfree(b);
1195 }
1196 return err;
1197}
1198
1199static int __threshold_add_blocks(struct threshold_bank *b)
1200{
1201 struct list_head *head = &b->blocks->miscj;
1202 struct threshold_block *pos = NULL;
1203 struct threshold_block *tmp = NULL;
1204 int err = 0;
1205
1206 err = kobject_add(&b->blocks->kobj, b->kobj, b->blocks->kobj.name);
1207 if (err)
1208 return err;
1209
1210 list_for_each_entry_safe(pos, tmp, head, miscj) {
1211
1212 err = kobject_add(&pos->kobj, b->kobj, pos->kobj.name);
1213 if (err) {
1214 list_for_each_entry_safe_reverse(pos, tmp, head, miscj)
1215 kobject_del(&pos->kobj);
1216
1217 return err;
1218 }
1219 }
1220 return err;
1221}
1222
1223static int threshold_create_bank(unsigned int cpu, unsigned int bank)
1224{
1225 struct device *dev = per_cpu(mce_device, cpu);
1226 struct amd_northbridge *nb = NULL;
1227 struct threshold_bank *b = NULL;
1228 const char *name = get_name(bank, NULL);
1229 int err = 0;
1230
1231 if (!dev)
1232 return -ENODEV;
1233
1234 if (is_shared_bank(bank)) {
1235 nb = node_to_amd_nb(amd_get_nb_id(cpu));
1236
1237
1238 if (nb && nb->bank4) {
1239
1240 b = nb->bank4;
1241 err = kobject_add(b->kobj, &dev->kobj, name);
1242 if (err)
1243 goto out;
1244
1245 per_cpu(threshold_banks, cpu)[bank] = b;
1246 refcount_inc(&b->cpus);
1247
1248 err = __threshold_add_blocks(b);
1249
1250 goto out;
1251 }
1252 }
1253
1254 b = kzalloc(sizeof(struct threshold_bank), GFP_KERNEL);
1255 if (!b) {
1256 err = -ENOMEM;
1257 goto out;
1258 }
1259
1260 b->kobj = kobject_create_and_add(name, &dev->kobj);
1261 if (!b->kobj) {
1262 err = -EINVAL;
1263 goto out_free;
1264 }
1265
1266 per_cpu(threshold_banks, cpu)[bank] = b;
1267
1268 if (is_shared_bank(bank)) {
1269 refcount_set(&b->cpus, 1);
1270
1271
1272 if (nb) {
1273 WARN_ON(nb->bank4);
1274 nb->bank4 = b;
1275 }
1276 }
1277
1278 err = allocate_threshold_blocks(cpu, bank, 0, msr_ops.misc(bank));
1279 if (!err)
1280 goto out;
1281
1282 out_free:
1283 kfree(b);
1284
1285 out:
1286 return err;
1287}
1288
1289static void deallocate_threshold_block(unsigned int cpu,
1290 unsigned int bank)
1291{
1292 struct threshold_block *pos = NULL;
1293 struct threshold_block *tmp = NULL;
1294 struct threshold_bank *head = per_cpu(threshold_banks, cpu)[bank];
1295
1296 if (!head)
1297 return;
1298
1299 list_for_each_entry_safe(pos, tmp, &head->blocks->miscj, miscj) {
1300 kobject_put(&pos->kobj);
1301 list_del(&pos->miscj);
1302 kfree(pos);
1303 }
1304
1305 kfree(per_cpu(threshold_banks, cpu)[bank]->blocks);
1306 per_cpu(threshold_banks, cpu)[bank]->blocks = NULL;
1307}
1308
1309static void __threshold_remove_blocks(struct threshold_bank *b)
1310{
1311 struct threshold_block *pos = NULL;
1312 struct threshold_block *tmp = NULL;
1313
1314 kobject_del(b->kobj);
1315
1316 list_for_each_entry_safe(pos, tmp, &b->blocks->miscj, miscj)
1317 kobject_del(&pos->kobj);
1318}
1319
1320static void threshold_remove_bank(unsigned int cpu, int bank)
1321{
1322 struct amd_northbridge *nb;
1323 struct threshold_bank *b;
1324
1325 b = per_cpu(threshold_banks, cpu)[bank];
1326 if (!b)
1327 return;
1328
1329 if (!b->blocks)
1330 goto free_out;
1331
1332 if (is_shared_bank(bank)) {
1333 if (!refcount_dec_and_test(&b->cpus)) {
1334 __threshold_remove_blocks(b);
1335 per_cpu(threshold_banks, cpu)[bank] = NULL;
1336 return;
1337 } else {
1338
1339
1340
1341
1342 nb = node_to_amd_nb(amd_get_nb_id(cpu));
1343 nb->bank4 = NULL;
1344 }
1345 }
1346
1347 deallocate_threshold_block(cpu, bank);
1348
1349free_out:
1350 kobject_del(b->kobj);
1351 kobject_put(b->kobj);
1352 kfree(b);
1353 per_cpu(threshold_banks, cpu)[bank] = NULL;
1354}
1355
1356int mce_threshold_remove_device(unsigned int cpu)
1357{
1358 unsigned int bank;
1359
1360 for (bank = 0; bank < mca_cfg.banks; ++bank) {
1361 if (!(per_cpu(bank_map, cpu) & (1 << bank)))
1362 continue;
1363 threshold_remove_bank(cpu, bank);
1364 }
1365 kfree(per_cpu(threshold_banks, cpu));
1366 per_cpu(threshold_banks, cpu) = NULL;
1367 return 0;
1368}
1369
1370
1371int mce_threshold_create_device(unsigned int cpu)
1372{
1373 unsigned int bank;
1374 struct threshold_bank **bp;
1375 int err = 0;
1376
1377 bp = per_cpu(threshold_banks, cpu);
1378 if (bp)
1379 return 0;
1380
1381 bp = kcalloc(mca_cfg.banks, sizeof(struct threshold_bank *),
1382 GFP_KERNEL);
1383 if (!bp)
1384 return -ENOMEM;
1385
1386 per_cpu(threshold_banks, cpu) = bp;
1387
1388 for (bank = 0; bank < mca_cfg.banks; ++bank) {
1389 if (!(per_cpu(bank_map, cpu) & (1 << bank)))
1390 continue;
1391 err = threshold_create_bank(cpu, bank);
1392 if (err)
1393 goto err;
1394 }
1395 return err;
1396err:
1397 mce_threshold_remove_device(cpu);
1398 return err;
1399}
1400
1401static __init int threshold_init_device(void)
1402{
1403 unsigned lcpu = 0;
1404
1405
1406 for_each_online_cpu(lcpu) {
1407 int err = mce_threshold_create_device(lcpu);
1408
1409 if (err)
1410 return err;
1411 }
1412
1413 if (thresholding_irq_en)
1414 mce_threshold_vector = amd_threshold_interrupt;
1415
1416 return 0;
1417}
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438late_initcall(threshold_init_device);
1439