1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17#include <linux/cpu.h>
18#include <linux/errno.h>
19#include <linux/init.h>
20#include <linux/interrupt.h>
21#include <linux/kobject.h>
22#include <linux/notifier.h>
23#include <linux/sched.h>
24#include <linux/smp.h>
25#include <linux/sysdev.h>
26#include <linux/sysfs.h>
27#include <asm/apic.h>
28#include <asm/mce.h>
29#include <asm/msr.h>
30#include <asm/percpu.h>
31#include <asm/idle.h>
32
33#define PFX "mce_threshold: "
34#define VERSION "version 1.1.1"
35#define NR_BANKS 6
36#define NR_BLOCKS 9
37#define THRESHOLD_MAX 0xFFF
38#define INT_TYPE_APIC 0x00020000
39#define MASK_VALID_HI 0x80000000
40#define MASK_CNTP_HI 0x40000000
41#define MASK_LOCKED_HI 0x20000000
42#define MASK_LVTOFF_HI 0x00F00000
43#define MASK_COUNT_EN_HI 0x00080000
44#define MASK_INT_TYPE_HI 0x00060000
45#define MASK_OVERFLOW_HI 0x00010000
46#define MASK_ERR_COUNT_HI 0x00000FFF
47#define MASK_BLKPTR_LO 0xFF000000
48#define MCG_XBLK_ADDR 0xC0000400
49
50struct threshold_block {
51 unsigned int block;
52 unsigned int bank;
53 unsigned int cpu;
54 u32 address;
55 u16 interrupt_enable;
56 u16 threshold_limit;
57 struct kobject kobj;
58 struct list_head miscj;
59};
60
61
62static struct threshold_block threshold_defaults = {
63 .interrupt_enable = 0,
64 .threshold_limit = THRESHOLD_MAX,
65};
66
67struct threshold_bank {
68 struct kobject kobj;
69 struct threshold_block *blocks;
70 cpumask_t cpus;
71};
72static DEFINE_PER_CPU(struct threshold_bank *, threshold_banks[NR_BANKS]);
73
74#ifdef CONFIG_SMP
75static unsigned char shared_bank[NR_BANKS] = {
76 0, 0, 0, 0, 1
77};
78#endif
79
80static DEFINE_PER_CPU(unsigned char, bank_map);
81
82
83
84
85
86
87static void threshold_restart_bank(struct threshold_block *b,
88 int reset, u16 old_limit)
89{
90 u32 mci_misc_hi, mci_misc_lo;
91
92 rdmsr(b->address, mci_misc_lo, mci_misc_hi);
93
94 if (b->threshold_limit < (mci_misc_hi & THRESHOLD_MAX))
95 reset = 1;
96
97 if (reset) {
98 mci_misc_hi =
99 (mci_misc_hi & ~(MASK_ERR_COUNT_HI | MASK_OVERFLOW_HI)) |
100 (THRESHOLD_MAX - b->threshold_limit);
101 } else if (old_limit) {
102 int new_count = (mci_misc_hi & THRESHOLD_MAX) +
103 (old_limit - b->threshold_limit);
104 mci_misc_hi = (mci_misc_hi & ~MASK_ERR_COUNT_HI) |
105 (new_count & THRESHOLD_MAX);
106 }
107
108 b->interrupt_enable ?
109 (mci_misc_hi = (mci_misc_hi & ~MASK_INT_TYPE_HI) | INT_TYPE_APIC) :
110 (mci_misc_hi &= ~MASK_INT_TYPE_HI);
111
112 mci_misc_hi |= MASK_COUNT_EN_HI;
113 wrmsr(b->address, mci_misc_lo, mci_misc_hi);
114}
115
116
117void __cpuinit mce_amd_feature_init(struct cpuinfo_x86 *c)
118{
119 unsigned int bank, block;
120 unsigned int cpu = smp_processor_id();
121 u32 low = 0, high = 0, address = 0;
122
123 for (bank = 0; bank < NR_BANKS; ++bank) {
124 for (block = 0; block < NR_BLOCKS; ++block) {
125 if (block == 0)
126 address = MSR_IA32_MC0_MISC + bank * 4;
127 else if (block == 1) {
128 address = (low & MASK_BLKPTR_LO) >> 21;
129 if (!address)
130 break;
131 address += MCG_XBLK_ADDR;
132 }
133 else
134 ++address;
135
136 if (rdmsr_safe(address, &low, &high))
137 break;
138
139 if (!(high & MASK_VALID_HI)) {
140 if (block)
141 continue;
142 else
143 break;
144 }
145
146 if (!(high & MASK_CNTP_HI) ||
147 (high & MASK_LOCKED_HI))
148 continue;
149
150 if (!block)
151 per_cpu(bank_map, cpu) |= (1 << bank);
152#ifdef CONFIG_SMP
153 if (shared_bank[bank] && c->cpu_core_id)
154 break;
155#endif
156 high &= ~MASK_LVTOFF_HI;
157 high |= K8_APIC_EXT_LVT_ENTRY_THRESHOLD << 20;
158 wrmsr(address, low, high);
159
160 setup_APIC_extended_lvt(K8_APIC_EXT_LVT_ENTRY_THRESHOLD,
161 THRESHOLD_APIC_VECTOR,
162 K8_APIC_EXT_INT_MSG_FIX, 0);
163
164 threshold_defaults.address = address;
165 threshold_restart_bank(&threshold_defaults, 0, 0);
166 }
167 }
168}
169
170
171
172
173
174
175
176
177
178
179asmlinkage void mce_threshold_interrupt(void)
180{
181 unsigned int bank, block;
182 struct mce m;
183 u32 low = 0, high = 0, address = 0;
184
185 ack_APIC_irq();
186 exit_idle();
187 irq_enter();
188
189 memset(&m, 0, sizeof(m));
190 rdtscll(m.tsc);
191 m.cpu = smp_processor_id();
192
193
194 for (bank = 0; bank < NR_BANKS; ++bank) {
195 if (!(per_cpu(bank_map, m.cpu) & (1 << bank)))
196 continue;
197 for (block = 0; block < NR_BLOCKS; ++block) {
198 if (block == 0)
199 address = MSR_IA32_MC0_MISC + bank * 4;
200 else if (block == 1) {
201 address = (low & MASK_BLKPTR_LO) >> 21;
202 if (!address)
203 break;
204 address += MCG_XBLK_ADDR;
205 }
206 else
207 ++address;
208
209 if (rdmsr_safe(address, &low, &high))
210 break;
211
212 if (!(high & MASK_VALID_HI)) {
213 if (block)
214 continue;
215 else
216 break;
217 }
218
219 if (!(high & MASK_CNTP_HI) ||
220 (high & MASK_LOCKED_HI))
221 continue;
222
223
224
225 do_machine_check(NULL, 0);
226
227 if (high & MASK_OVERFLOW_HI) {
228 rdmsrl(address, m.misc);
229 rdmsrl(MSR_IA32_MC0_STATUS + bank * 4,
230 m.status);
231 m.bank = K8_MCE_THRESHOLD_BASE
232 + bank * NR_BLOCKS
233 + block;
234 mce_log(&m);
235 goto out;
236 }
237 }
238 }
239out:
240 add_pda(irq_threshold_count, 1);
241 irq_exit();
242}
243
244
245
246
247
248struct threshold_attr {
249 struct attribute attr;
250 ssize_t(*show) (struct threshold_block *, char *);
251 ssize_t(*store) (struct threshold_block *, const char *, size_t count);
252};
253
254static cpumask_t affinity_set(unsigned int cpu)
255{
256 cpumask_t oldmask = current->cpus_allowed;
257 cpumask_t newmask = CPU_MASK_NONE;
258 cpu_set(cpu, newmask);
259 set_cpus_allowed(current, newmask);
260 return oldmask;
261}
262
263static void affinity_restore(cpumask_t oldmask)
264{
265 set_cpus_allowed(current, oldmask);
266}
267
268#define SHOW_FIELDS(name) \
269static ssize_t show_ ## name(struct threshold_block * b, char *buf) \
270{ \
271 return sprintf(buf, "%lx\n", (unsigned long) b->name); \
272}
273SHOW_FIELDS(interrupt_enable)
274SHOW_FIELDS(threshold_limit)
275
276static ssize_t store_interrupt_enable(struct threshold_block *b,
277 const char *buf, size_t count)
278{
279 char *end;
280 cpumask_t oldmask;
281 unsigned long new = simple_strtoul(buf, &end, 0);
282 if (end == buf)
283 return -EINVAL;
284 b->interrupt_enable = !!new;
285
286 oldmask = affinity_set(b->cpu);
287 threshold_restart_bank(b, 0, 0);
288 affinity_restore(oldmask);
289
290 return end - buf;
291}
292
293static ssize_t store_threshold_limit(struct threshold_block *b,
294 const char *buf, size_t count)
295{
296 char *end;
297 cpumask_t oldmask;
298 u16 old;
299 unsigned long new = simple_strtoul(buf, &end, 0);
300 if (end == buf)
301 return -EINVAL;
302 if (new > THRESHOLD_MAX)
303 new = THRESHOLD_MAX;
304 if (new < 1)
305 new = 1;
306 old = b->threshold_limit;
307 b->threshold_limit = new;
308
309 oldmask = affinity_set(b->cpu);
310 threshold_restart_bank(b, 0, old);
311 affinity_restore(oldmask);
312
313 return end - buf;
314}
315
316static ssize_t show_error_count(struct threshold_block *b, char *buf)
317{
318 u32 high, low;
319 cpumask_t oldmask;
320 oldmask = affinity_set(b->cpu);
321 rdmsr(b->address, low, high);
322 affinity_restore(oldmask);
323 return sprintf(buf, "%x\n",
324 (high & 0xFFF) - (THRESHOLD_MAX - b->threshold_limit));
325}
326
327static ssize_t store_error_count(struct threshold_block *b,
328 const char *buf, size_t count)
329{
330 cpumask_t oldmask;
331 oldmask = affinity_set(b->cpu);
332 threshold_restart_bank(b, 1, 0);
333 affinity_restore(oldmask);
334 return 1;
335}
336
337#define THRESHOLD_ATTR(_name,_mode,_show,_store) { \
338 .attr = {.name = __stringify(_name), .mode = _mode }, \
339 .show = _show, \
340 .store = _store, \
341};
342
343#define RW_ATTR(name) \
344static struct threshold_attr name = \
345 THRESHOLD_ATTR(name, 0644, show_## name, store_## name)
346
347RW_ATTR(interrupt_enable);
348RW_ATTR(threshold_limit);
349RW_ATTR(error_count);
350
351static struct attribute *default_attrs[] = {
352 &interrupt_enable.attr,
353 &threshold_limit.attr,
354 &error_count.attr,
355 NULL
356};
357
358#define to_block(k) container_of(k, struct threshold_block, kobj)
359#define to_attr(a) container_of(a, struct threshold_attr, attr)
360
361static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
362{
363 struct threshold_block *b = to_block(kobj);
364 struct threshold_attr *a = to_attr(attr);
365 ssize_t ret;
366 ret = a->show ? a->show(b, buf) : -EIO;
367 return ret;
368}
369
370static ssize_t store(struct kobject *kobj, struct attribute *attr,
371 const char *buf, size_t count)
372{
373 struct threshold_block *b = to_block(kobj);
374 struct threshold_attr *a = to_attr(attr);
375 ssize_t ret;
376 ret = a->store ? a->store(b, buf, count) : -EIO;
377 return ret;
378}
379
380static struct sysfs_ops threshold_ops = {
381 .show = show,
382 .store = store,
383};
384
385static struct kobj_type threshold_ktype = {
386 .sysfs_ops = &threshold_ops,
387 .default_attrs = default_attrs,
388};
389
390static __cpuinit int allocate_threshold_blocks(unsigned int cpu,
391 unsigned int bank,
392 unsigned int block,
393 u32 address)
394{
395 int err;
396 u32 low, high;
397 struct threshold_block *b = NULL;
398
399 if ((bank >= NR_BANKS) || (block >= NR_BLOCKS))
400 return 0;
401
402 if (rdmsr_safe(address, &low, &high))
403 return 0;
404
405 if (!(high & MASK_VALID_HI)) {
406 if (block)
407 goto recurse;
408 else
409 return 0;
410 }
411
412 if (!(high & MASK_CNTP_HI) ||
413 (high & MASK_LOCKED_HI))
414 goto recurse;
415
416 b = kzalloc(sizeof(struct threshold_block), GFP_KERNEL);
417 if (!b)
418 return -ENOMEM;
419
420 b->block = block;
421 b->bank = bank;
422 b->cpu = cpu;
423 b->address = address;
424 b->interrupt_enable = 0;
425 b->threshold_limit = THRESHOLD_MAX;
426
427 INIT_LIST_HEAD(&b->miscj);
428
429 if (per_cpu(threshold_banks, cpu)[bank]->blocks)
430 list_add(&b->miscj,
431 &per_cpu(threshold_banks, cpu)[bank]->blocks->miscj);
432 else
433 per_cpu(threshold_banks, cpu)[bank]->blocks = b;
434
435 kobject_set_name(&b->kobj, "misc%i", block);
436 b->kobj.parent = &per_cpu(threshold_banks, cpu)[bank]->kobj;
437 b->kobj.ktype = &threshold_ktype;
438 err = kobject_register(&b->kobj);
439 if (err)
440 goto out_free;
441recurse:
442 if (!block) {
443 address = (low & MASK_BLKPTR_LO) >> 21;
444 if (!address)
445 return 0;
446 address += MCG_XBLK_ADDR;
447 } else
448 ++address;
449
450 err = allocate_threshold_blocks(cpu, bank, ++block, address);
451 if (err)
452 goto out_free;
453
454 return err;
455
456out_free:
457 if (b) {
458 kobject_unregister(&b->kobj);
459 kfree(b);
460 }
461 return err;
462}
463
464
465static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank)
466{
467 int i, err = 0;
468 struct threshold_bank *b = NULL;
469 cpumask_t oldmask = CPU_MASK_NONE;
470 char name[32];
471
472 sprintf(name, "threshold_bank%i", bank);
473
474#ifdef CONFIG_SMP
475 if (cpu_data(cpu).cpu_core_id && shared_bank[bank]) {
476 i = first_cpu(per_cpu(cpu_core_map, cpu));
477
478
479 if (cpu_data(i).cpu_core_id)
480 goto out;
481
482
483 if (per_cpu(threshold_banks, cpu)[bank])
484 goto out;
485
486 b = per_cpu(threshold_banks, i)[bank];
487
488 if (!b)
489 goto out;
490
491 err = sysfs_create_link(&per_cpu(device_mce, cpu).kobj,
492 &b->kobj, name);
493 if (err)
494 goto out;
495
496 b->cpus = per_cpu(cpu_core_map, cpu);
497 per_cpu(threshold_banks, cpu)[bank] = b;
498 goto out;
499 }
500#endif
501
502 b = kzalloc(sizeof(struct threshold_bank), GFP_KERNEL);
503 if (!b) {
504 err = -ENOMEM;
505 goto out;
506 }
507
508 kobject_set_name(&b->kobj, "threshold_bank%i", bank);
509 b->kobj.parent = &per_cpu(device_mce, cpu).kobj;
510#ifndef CONFIG_SMP
511 b->cpus = CPU_MASK_ALL;
512#else
513 b->cpus = per_cpu(cpu_core_map, cpu);
514#endif
515 err = kobject_register(&b->kobj);
516 if (err)
517 goto out_free;
518
519 per_cpu(threshold_banks, cpu)[bank] = b;
520
521 oldmask = affinity_set(cpu);
522 err = allocate_threshold_blocks(cpu, bank, 0,
523 MSR_IA32_MC0_MISC + bank * 4);
524 affinity_restore(oldmask);
525
526 if (err)
527 goto out_free;
528
529 for_each_cpu_mask(i, b->cpus) {
530 if (i == cpu)
531 continue;
532
533 err = sysfs_create_link(&per_cpu(device_mce, i).kobj,
534 &b->kobj, name);
535 if (err)
536 goto out;
537
538 per_cpu(threshold_banks, i)[bank] = b;
539 }
540
541 goto out;
542
543out_free:
544 per_cpu(threshold_banks, cpu)[bank] = NULL;
545 kfree(b);
546out:
547 return err;
548}
549
550
551static __cpuinit int threshold_create_device(unsigned int cpu)
552{
553 unsigned int bank;
554 int err = 0;
555
556 for (bank = 0; bank < NR_BANKS; ++bank) {
557 if (!(per_cpu(bank_map, cpu) & 1 << bank))
558 continue;
559 err = threshold_create_bank(cpu, bank);
560 if (err)
561 goto out;
562 }
563out:
564 return err;
565}
566
567
568
569
570
571
572
573static void deallocate_threshold_block(unsigned int cpu,
574 unsigned int bank)
575{
576 struct threshold_block *pos = NULL;
577 struct threshold_block *tmp = NULL;
578 struct threshold_bank *head = per_cpu(threshold_banks, cpu)[bank];
579
580 if (!head)
581 return;
582
583 list_for_each_entry_safe(pos, tmp, &head->blocks->miscj, miscj) {
584 kobject_unregister(&pos->kobj);
585 list_del(&pos->miscj);
586 kfree(pos);
587 }
588
589 kfree(per_cpu(threshold_banks, cpu)[bank]->blocks);
590 per_cpu(threshold_banks, cpu)[bank]->blocks = NULL;
591}
592
593static void threshold_remove_bank(unsigned int cpu, int bank)
594{
595 int i = 0;
596 struct threshold_bank *b;
597 char name[32];
598
599 b = per_cpu(threshold_banks, cpu)[bank];
600
601 if (!b)
602 return;
603
604 if (!b->blocks)
605 goto free_out;
606
607 sprintf(name, "threshold_bank%i", bank);
608
609#ifdef CONFIG_SMP
610
611 if (shared_bank[bank] && b->blocks->cpu != cpu) {
612 sysfs_remove_link(&per_cpu(device_mce, cpu).kobj, name);
613 per_cpu(threshold_banks, cpu)[bank] = NULL;
614 return;
615 }
616#endif
617
618
619 for_each_cpu_mask(i, b->cpus) {
620 if (i == cpu)
621 continue;
622
623 sysfs_remove_link(&per_cpu(device_mce, i).kobj, name);
624 per_cpu(threshold_banks, i)[bank] = NULL;
625 }
626
627 deallocate_threshold_block(cpu, bank);
628
629free_out:
630 kobject_unregister(&b->kobj);
631 kfree(b);
632 per_cpu(threshold_banks, cpu)[bank] = NULL;
633}
634
635static void threshold_remove_device(unsigned int cpu)
636{
637 unsigned int bank;
638
639 for (bank = 0; bank < NR_BANKS; ++bank) {
640 if (!(per_cpu(bank_map, cpu) & 1 << bank))
641 continue;
642 threshold_remove_bank(cpu, bank);
643 }
644}
645
646
647static int threshold_cpu_callback(struct notifier_block *nfb,
648 unsigned long action, void *hcpu)
649{
650
651 unsigned int cpu = (unsigned long)hcpu;
652
653 if (cpu >= NR_CPUS)
654 goto out;
655
656 switch (action) {
657 case CPU_ONLINE:
658 case CPU_ONLINE_FROZEN:
659 threshold_create_device(cpu);
660 break;
661 case CPU_DEAD:
662 case CPU_DEAD_FROZEN:
663 threshold_remove_device(cpu);
664 break;
665 default:
666 break;
667 }
668 out:
669 return NOTIFY_OK;
670}
671
672static struct notifier_block threshold_cpu_notifier = {
673 .notifier_call = threshold_cpu_callback,
674};
675
676static __init int threshold_init_device(void)
677{
678 unsigned lcpu = 0;
679
680
681 for_each_online_cpu(lcpu) {
682 int err = threshold_create_device(lcpu);
683 if (err)
684 return err;
685 }
686 register_hotcpu_notifier(&threshold_cpu_notifier);
687 return 0;
688}
689
690device_initcall(threshold_init_device);
691