1
2#include <linux/perf_event.h>
3#include <linux/export.h>
4#include <linux/types.h>
5#include <linux/init.h>
6#include <linux/slab.h>
7#include <linux/delay.h>
8#include <asm/apicdef.h>
9#include <asm/nmi.h>
10
11#include "../perf_event.h"
12
13static DEFINE_PER_CPU(unsigned int, perf_nmi_counter);
14
15static __initconst const u64 amd_hw_cache_event_ids
16 [PERF_COUNT_HW_CACHE_MAX]
17 [PERF_COUNT_HW_CACHE_OP_MAX]
18 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
19{
20 [ C(L1D) ] = {
21 [ C(OP_READ) ] = {
22 [ C(RESULT_ACCESS) ] = 0x0040,
23 [ C(RESULT_MISS) ] = 0x0141,
24 },
25 [ C(OP_WRITE) ] = {
26 [ C(RESULT_ACCESS) ] = 0,
27 [ C(RESULT_MISS) ] = 0,
28 },
29 [ C(OP_PREFETCH) ] = {
30 [ C(RESULT_ACCESS) ] = 0x0267,
31 [ C(RESULT_MISS) ] = 0x0167,
32 },
33 },
34 [ C(L1I ) ] = {
35 [ C(OP_READ) ] = {
36 [ C(RESULT_ACCESS) ] = 0x0080,
37 [ C(RESULT_MISS) ] = 0x0081,
38 },
39 [ C(OP_WRITE) ] = {
40 [ C(RESULT_ACCESS) ] = -1,
41 [ C(RESULT_MISS) ] = -1,
42 },
43 [ C(OP_PREFETCH) ] = {
44 [ C(RESULT_ACCESS) ] = 0x014B,
45 [ C(RESULT_MISS) ] = 0,
46 },
47 },
48 [ C(LL ) ] = {
49 [ C(OP_READ) ] = {
50 [ C(RESULT_ACCESS) ] = 0x037D,
51 [ C(RESULT_MISS) ] = 0x037E,
52 },
53 [ C(OP_WRITE) ] = {
54 [ C(RESULT_ACCESS) ] = 0x017F,
55 [ C(RESULT_MISS) ] = 0,
56 },
57 [ C(OP_PREFETCH) ] = {
58 [ C(RESULT_ACCESS) ] = 0,
59 [ C(RESULT_MISS) ] = 0,
60 },
61 },
62 [ C(DTLB) ] = {
63 [ C(OP_READ) ] = {
64 [ C(RESULT_ACCESS) ] = 0x0040,
65 [ C(RESULT_MISS) ] = 0x0746,
66 },
67 [ C(OP_WRITE) ] = {
68 [ C(RESULT_ACCESS) ] = 0,
69 [ C(RESULT_MISS) ] = 0,
70 },
71 [ C(OP_PREFETCH) ] = {
72 [ C(RESULT_ACCESS) ] = 0,
73 [ C(RESULT_MISS) ] = 0,
74 },
75 },
76 [ C(ITLB) ] = {
77 [ C(OP_READ) ] = {
78 [ C(RESULT_ACCESS) ] = 0x0080,
79 [ C(RESULT_MISS) ] = 0x0385,
80 },
81 [ C(OP_WRITE) ] = {
82 [ C(RESULT_ACCESS) ] = -1,
83 [ C(RESULT_MISS) ] = -1,
84 },
85 [ C(OP_PREFETCH) ] = {
86 [ C(RESULT_ACCESS) ] = -1,
87 [ C(RESULT_MISS) ] = -1,
88 },
89 },
90 [ C(BPU ) ] = {
91 [ C(OP_READ) ] = {
92 [ C(RESULT_ACCESS) ] = 0x00c2,
93 [ C(RESULT_MISS) ] = 0x00c3,
94 },
95 [ C(OP_WRITE) ] = {
96 [ C(RESULT_ACCESS) ] = -1,
97 [ C(RESULT_MISS) ] = -1,
98 },
99 [ C(OP_PREFETCH) ] = {
100 [ C(RESULT_ACCESS) ] = -1,
101 [ C(RESULT_MISS) ] = -1,
102 },
103 },
104 [ C(NODE) ] = {
105 [ C(OP_READ) ] = {
106 [ C(RESULT_ACCESS) ] = 0xb8e9,
107 [ C(RESULT_MISS) ] = 0x98e9,
108 },
109 [ C(OP_WRITE) ] = {
110 [ C(RESULT_ACCESS) ] = -1,
111 [ C(RESULT_MISS) ] = -1,
112 },
113 [ C(OP_PREFETCH) ] = {
114 [ C(RESULT_ACCESS) ] = -1,
115 [ C(RESULT_MISS) ] = -1,
116 },
117 },
118};
119
120static __initconst const u64 amd_hw_cache_event_ids_f17h
121 [PERF_COUNT_HW_CACHE_MAX]
122 [PERF_COUNT_HW_CACHE_OP_MAX]
123 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
124[C(L1D)] = {
125 [C(OP_READ)] = {
126 [C(RESULT_ACCESS)] = 0x0040,
127 [C(RESULT_MISS)] = 0xc860,
128 },
129 [C(OP_WRITE)] = {
130 [C(RESULT_ACCESS)] = 0,
131 [C(RESULT_MISS)] = 0,
132 },
133 [C(OP_PREFETCH)] = {
134 [C(RESULT_ACCESS)] = 0xff5a,
135 [C(RESULT_MISS)] = 0,
136 },
137},
138[C(L1I)] = {
139 [C(OP_READ)] = {
140 [C(RESULT_ACCESS)] = 0x0080,
141 [C(RESULT_MISS)] = 0x0081,
142 },
143 [C(OP_WRITE)] = {
144 [C(RESULT_ACCESS)] = -1,
145 [C(RESULT_MISS)] = -1,
146 },
147 [C(OP_PREFETCH)] = {
148 [C(RESULT_ACCESS)] = 0,
149 [C(RESULT_MISS)] = 0,
150 },
151},
152[C(LL)] = {
153 [C(OP_READ)] = {
154 [C(RESULT_ACCESS)] = 0,
155 [C(RESULT_MISS)] = 0,
156 },
157 [C(OP_WRITE)] = {
158 [C(RESULT_ACCESS)] = 0,
159 [C(RESULT_MISS)] = 0,
160 },
161 [C(OP_PREFETCH)] = {
162 [C(RESULT_ACCESS)] = 0,
163 [C(RESULT_MISS)] = 0,
164 },
165},
166[C(DTLB)] = {
167 [C(OP_READ)] = {
168 [C(RESULT_ACCESS)] = 0xff45,
169 [C(RESULT_MISS)] = 0xf045,
170 },
171 [C(OP_WRITE)] = {
172 [C(RESULT_ACCESS)] = 0,
173 [C(RESULT_MISS)] = 0,
174 },
175 [C(OP_PREFETCH)] = {
176 [C(RESULT_ACCESS)] = 0,
177 [C(RESULT_MISS)] = 0,
178 },
179},
180[C(ITLB)] = {
181 [C(OP_READ)] = {
182 [C(RESULT_ACCESS)] = 0x0084,
183 [C(RESULT_MISS)] = 0xff85,
184 },
185 [C(OP_WRITE)] = {
186 [C(RESULT_ACCESS)] = -1,
187 [C(RESULT_MISS)] = -1,
188 },
189 [C(OP_PREFETCH)] = {
190 [C(RESULT_ACCESS)] = -1,
191 [C(RESULT_MISS)] = -1,
192 },
193},
194[C(BPU)] = {
195 [C(OP_READ)] = {
196 [C(RESULT_ACCESS)] = 0x00c2,
197 [C(RESULT_MISS)] = 0x00c3,
198 },
199 [C(OP_WRITE)] = {
200 [C(RESULT_ACCESS)] = -1,
201 [C(RESULT_MISS)] = -1,
202 },
203 [C(OP_PREFETCH)] = {
204 [C(RESULT_ACCESS)] = -1,
205 [C(RESULT_MISS)] = -1,
206 },
207},
208[C(NODE)] = {
209 [C(OP_READ)] = {
210 [C(RESULT_ACCESS)] = 0,
211 [C(RESULT_MISS)] = 0,
212 },
213 [C(OP_WRITE)] = {
214 [C(RESULT_ACCESS)] = -1,
215 [C(RESULT_MISS)] = -1,
216 },
217 [C(OP_PREFETCH)] = {
218 [C(RESULT_ACCESS)] = -1,
219 [C(RESULT_MISS)] = -1,
220 },
221},
222};
223
224
225
226
227static const u64 amd_perfmon_event_map[PERF_COUNT_HW_MAX] =
228{
229 [PERF_COUNT_HW_CPU_CYCLES] = 0x0076,
230 [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0,
231 [PERF_COUNT_HW_CACHE_REFERENCES] = 0x077d,
232 [PERF_COUNT_HW_CACHE_MISSES] = 0x077e,
233 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c2,
234 [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c3,
235 [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = 0x00d0,
236 [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = 0x00d1,
237};
238
239
240
241
242static const u64 amd_f17h_perfmon_event_map[PERF_COUNT_HW_MAX] =
243{
244 [PERF_COUNT_HW_CPU_CYCLES] = 0x0076,
245 [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0,
246 [PERF_COUNT_HW_CACHE_REFERENCES] = 0xff60,
247 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c2,
248 [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c3,
249 [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = 0x0287,
250 [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = 0x0187,
251};
252
253static u64 amd_pmu_event_map(int hw_event)
254{
255 if (boot_cpu_data.x86 >= 0x17)
256 return amd_f17h_perfmon_event_map[hw_event];
257
258 return amd_perfmon_event_map[hw_event];
259}
260
261
262
263
264static unsigned int event_offsets[X86_PMC_IDX_MAX] __read_mostly;
265static unsigned int count_offsets[X86_PMC_IDX_MAX] __read_mostly;
266
267
268
269
270
271
272
273
274static inline int amd_pmu_addr_offset(int index, bool eventsel)
275{
276 int offset;
277
278 if (!index)
279 return index;
280
281 if (eventsel)
282 offset = event_offsets[index];
283 else
284 offset = count_offsets[index];
285
286 if (offset)
287 return offset;
288
289 if (!boot_cpu_has(X86_FEATURE_PERFCTR_CORE))
290 offset = index;
291 else
292 offset = index << 1;
293
294 if (eventsel)
295 event_offsets[index] = offset;
296 else
297 count_offsets[index] = offset;
298
299 return offset;
300}
301
302static int amd_core_hw_config(struct perf_event *event)
303{
304 if (event->attr.exclude_host && event->attr.exclude_guest)
305
306
307
308
309
310 event->hw.config &= ~(ARCH_PERFMON_EVENTSEL_USR |
311 ARCH_PERFMON_EVENTSEL_OS);
312 else if (event->attr.exclude_host)
313 event->hw.config |= AMD64_EVENTSEL_GUESTONLY;
314 else if (event->attr.exclude_guest)
315 event->hw.config |= AMD64_EVENTSEL_HOSTONLY;
316
317 return 0;
318}
319
320
321
322
323static inline unsigned int amd_get_event_code(struct hw_perf_event *hwc)
324{
325 return ((hwc->config >> 24) & 0x0f00) | (hwc->config & 0x00ff);
326}
327
328static inline int amd_is_nb_event(struct hw_perf_event *hwc)
329{
330 return (hwc->config & 0xe0) == 0xe0;
331}
332
333static inline int amd_has_nb(struct cpu_hw_events *cpuc)
334{
335 struct amd_nb *nb = cpuc->amd_nb;
336
337 return nb && nb->nb_id != -1;
338}
339
340static int amd_pmu_hw_config(struct perf_event *event)
341{
342 int ret;
343
344
345 if (event->attr.precise_ip && get_ibs_caps())
346 return -ENOENT;
347
348 if (has_branch_stack(event))
349 return -EOPNOTSUPP;
350
351 ret = x86_pmu_hw_config(event);
352 if (ret)
353 return ret;
354
355 if (event->attr.type == PERF_TYPE_RAW)
356 event->hw.config |= event->attr.config & AMD64_RAW_EVENT_MASK;
357
358 return amd_core_hw_config(event);
359}
360
361static void __amd_put_nb_event_constraints(struct cpu_hw_events *cpuc,
362 struct perf_event *event)
363{
364 struct amd_nb *nb = cpuc->amd_nb;
365 int i;
366
367
368
369
370
371
372
373
374
375 for (i = 0; i < x86_pmu.num_counters; i++) {
376 if (cmpxchg(nb->owners + i, event, NULL) == event)
377 break;
378 }
379}
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417static struct event_constraint *
418__amd_get_nb_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event,
419 struct event_constraint *c)
420{
421 struct hw_perf_event *hwc = &event->hw;
422 struct amd_nb *nb = cpuc->amd_nb;
423 struct perf_event *old;
424 int idx, new = -1;
425
426 if (!c)
427 c = &unconstrained;
428
429 if (cpuc->is_fake)
430 return c;
431
432
433
434
435
436
437
438
439
440
441
442 for_each_set_bit(idx, c->idxmsk, x86_pmu.num_counters) {
443 if (new == -1 || hwc->idx == idx)
444
445 old = cmpxchg(nb->owners + idx, NULL, event);
446 else if (nb->owners[idx] == event)
447
448 old = event;
449 else
450 continue;
451
452 if (old && old != event)
453 continue;
454
455
456 if (new != -1)
457 cmpxchg(nb->owners + new, event, NULL);
458 new = idx;
459
460
461 if (old == event)
462 break;
463 }
464
465 if (new == -1)
466 return &emptyconstraint;
467
468 return &nb->event_constraints[new];
469}
470
471static struct amd_nb *amd_alloc_nb(int cpu)
472{
473 struct amd_nb *nb;
474 int i;
475
476 nb = kzalloc_node(sizeof(struct amd_nb), GFP_KERNEL, cpu_to_node(cpu));
477 if (!nb)
478 return NULL;
479
480 nb->nb_id = -1;
481
482
483
484
485 for (i = 0; i < x86_pmu.num_counters; i++) {
486 __set_bit(i, nb->event_constraints[i].idxmsk);
487 nb->event_constraints[i].weight = 1;
488 }
489 return nb;
490}
491
492static int amd_pmu_cpu_prepare(int cpu)
493{
494 struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
495
496 WARN_ON_ONCE(cpuc->amd_nb);
497
498 if (!x86_pmu.amd_nb_constraints)
499 return 0;
500
501 cpuc->amd_nb = amd_alloc_nb(cpu);
502 if (!cpuc->amd_nb)
503 return -ENOMEM;
504
505 return 0;
506}
507
508static void amd_pmu_cpu_starting(int cpu)
509{
510 struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
511 void **onln = &cpuc->kfree_on_online[X86_PERF_KFREE_SHARED];
512 struct amd_nb *nb;
513 int i, nb_id;
514
515 cpuc->perf_ctr_virt_mask = AMD64_EVENTSEL_HOSTONLY;
516
517 if (!x86_pmu.amd_nb_constraints)
518 return;
519
520 nb_id = amd_get_nb_id(cpu);
521 WARN_ON_ONCE(nb_id == BAD_APICID);
522
523 for_each_online_cpu(i) {
524 nb = per_cpu(cpu_hw_events, i).amd_nb;
525 if (WARN_ON_ONCE(!nb))
526 continue;
527
528 if (nb->nb_id == nb_id) {
529 *onln = cpuc->amd_nb;
530 cpuc->amd_nb = nb;
531 break;
532 }
533 }
534
535 cpuc->amd_nb->nb_id = nb_id;
536 cpuc->amd_nb->refcnt++;
537}
538
539static void amd_pmu_cpu_dead(int cpu)
540{
541 struct cpu_hw_events *cpuhw;
542
543 if (!x86_pmu.amd_nb_constraints)
544 return;
545
546 cpuhw = &per_cpu(cpu_hw_events, cpu);
547
548 if (cpuhw->amd_nb) {
549 struct amd_nb *nb = cpuhw->amd_nb;
550
551 if (nb->nb_id == -1 || --nb->refcnt == 0)
552 kfree(nb);
553
554 cpuhw->amd_nb = NULL;
555 }
556}
557
558
559
560
561
562
563
564
565#define OVERFLOW_WAIT_COUNT 50
566
567static void amd_pmu_wait_on_overflow(int idx)
568{
569 unsigned int i;
570 u64 counter;
571
572
573
574
575
576
577 for (i = 0; i < OVERFLOW_WAIT_COUNT; i++) {
578 rdmsrl(x86_pmu_event_addr(idx), counter);
579 if (counter & (1ULL << (x86_pmu.cntval_bits - 1)))
580 break;
581
582
583 udelay(1);
584 }
585}
586
587static void amd_pmu_disable_all(void)
588{
589 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
590 int idx;
591
592 x86_pmu_disable_all();
593
594
595
596
597
598
599 if (in_nmi())
600 return;
601
602
603
604
605
606
607
608 for (idx = 0; idx < x86_pmu.num_counters; idx++) {
609 if (!test_bit(idx, cpuc->active_mask))
610 continue;
611
612 amd_pmu_wait_on_overflow(idx);
613 }
614}
615
616static void amd_pmu_disable_event(struct perf_event *event)
617{
618 x86_pmu_disable_event(event);
619
620
621
622
623
624
625
626
627 if (in_nmi())
628 return;
629
630 amd_pmu_wait_on_overflow(event->hw.idx);
631}
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650static int amd_pmu_handle_irq(struct pt_regs *regs)
651{
652 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
653 int active, handled;
654
655
656
657
658
659
660 active = __bitmap_weight(cpuc->active_mask, X86_PMC_IDX_MAX);
661
662
663 handled = x86_pmu_handle_irq(regs);
664
665
666
667
668
669 if (handled) {
670 this_cpu_write(perf_nmi_counter,
671 min_t(unsigned int, 2, active));
672
673 return handled;
674 }
675
676 if (!this_cpu_read(perf_nmi_counter))
677 return NMI_DONE;
678
679 this_cpu_dec(perf_nmi_counter);
680
681 return NMI_HANDLED;
682}
683
684static struct event_constraint *
685amd_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
686 struct perf_event *event)
687{
688
689
690
691 if (!(amd_has_nb(cpuc) && amd_is_nb_event(&event->hw)))
692 return &unconstrained;
693
694 return __amd_get_nb_event_constraints(cpuc, event, NULL);
695}
696
697static void amd_put_event_constraints(struct cpu_hw_events *cpuc,
698 struct perf_event *event)
699{
700 if (amd_has_nb(cpuc) && amd_is_nb_event(&event->hw))
701 __amd_put_nb_event_constraints(cpuc, event);
702}
703
704PMU_FORMAT_ATTR(event, "config:0-7,32-35");
705PMU_FORMAT_ATTR(umask, "config:8-15" );
706PMU_FORMAT_ATTR(edge, "config:18" );
707PMU_FORMAT_ATTR(inv, "config:23" );
708PMU_FORMAT_ATTR(cmask, "config:24-31" );
709
710static struct attribute *amd_format_attr[] = {
711 &format_attr_event.attr,
712 &format_attr_umask.attr,
713 &format_attr_edge.attr,
714 &format_attr_inv.attr,
715 &format_attr_cmask.attr,
716 NULL,
717};
718
719
720
721#define AMD_EVENT_TYPE_MASK 0x000000F0ULL
722
723#define AMD_EVENT_FP 0x00000000ULL ... 0x00000010ULL
724#define AMD_EVENT_LS 0x00000020ULL ... 0x00000030ULL
725#define AMD_EVENT_DC 0x00000040ULL ... 0x00000050ULL
726#define AMD_EVENT_CU 0x00000060ULL ... 0x00000070ULL
727#define AMD_EVENT_IC_DE 0x00000080ULL ... 0x00000090ULL
728#define AMD_EVENT_EX_LS 0x000000C0ULL
729#define AMD_EVENT_DE 0x000000D0ULL
730#define AMD_EVENT_NB 0x000000E0ULL ... 0x000000F0ULL
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786static struct event_constraint amd_f15_PMC0 = EVENT_CONSTRAINT(0, 0x01, 0);
787static struct event_constraint amd_f15_PMC20 = EVENT_CONSTRAINT(0, 0x07, 0);
788static struct event_constraint amd_f15_PMC3 = EVENT_CONSTRAINT(0, 0x08, 0);
789static struct event_constraint amd_f15_PMC30 = EVENT_CONSTRAINT_OVERLAP(0, 0x09, 0);
790static struct event_constraint amd_f15_PMC50 = EVENT_CONSTRAINT(0, 0x3F, 0);
791static struct event_constraint amd_f15_PMC53 = EVENT_CONSTRAINT(0, 0x38, 0);
792
793static struct event_constraint *
794amd_get_event_constraints_f15h(struct cpu_hw_events *cpuc, int idx,
795 struct perf_event *event)
796{
797 struct hw_perf_event *hwc = &event->hw;
798 unsigned int event_code = amd_get_event_code(hwc);
799
800 switch (event_code & AMD_EVENT_TYPE_MASK) {
801 case AMD_EVENT_FP:
802 switch (event_code) {
803 case 0x000:
804 if (!(hwc->config & 0x0000F000ULL))
805 break;
806 if (!(hwc->config & 0x00000F00ULL))
807 break;
808 return &amd_f15_PMC3;
809 case 0x004:
810 if (hweight_long(hwc->config & ARCH_PERFMON_EVENTSEL_UMASK) <= 1)
811 break;
812 return &amd_f15_PMC3;
813 case 0x003:
814 case 0x00B:
815 case 0x00D:
816 return &amd_f15_PMC3;
817 }
818 return &amd_f15_PMC53;
819 case AMD_EVENT_LS:
820 case AMD_EVENT_DC:
821 case AMD_EVENT_EX_LS:
822 switch (event_code) {
823 case 0x023:
824 case 0x043:
825 case 0x045:
826 case 0x046:
827 case 0x054:
828 case 0x055:
829 return &amd_f15_PMC20;
830 case 0x02D:
831 return &amd_f15_PMC3;
832 case 0x02E:
833 return &amd_f15_PMC30;
834 case 0x031:
835 if (hweight_long(hwc->config & ARCH_PERFMON_EVENTSEL_UMASK) <= 1)
836 return &amd_f15_PMC20;
837 return &emptyconstraint;
838 case 0x1C0:
839 return &amd_f15_PMC53;
840 default:
841 return &amd_f15_PMC50;
842 }
843 case AMD_EVENT_CU:
844 case AMD_EVENT_IC_DE:
845 case AMD_EVENT_DE:
846 switch (event_code) {
847 case 0x08F:
848 case 0x187:
849 case 0x188:
850 return &amd_f15_PMC0;
851 case 0x0DB ... 0x0DF:
852 case 0x1D6:
853 case 0x1D8:
854 return &amd_f15_PMC50;
855 default:
856 return &amd_f15_PMC20;
857 }
858 case AMD_EVENT_NB:
859
860 return &emptyconstraint;
861 default:
862 return &emptyconstraint;
863 }
864}
865
866static ssize_t amd_event_sysfs_show(char *page, u64 config)
867{
868 u64 event = (config & ARCH_PERFMON_EVENTSEL_EVENT) |
869 (config & AMD64_EVENTSEL_EVENT) >> 24;
870
871 return x86_event_sysfs_show(page, config, event);
872}
873
874static __initconst const struct x86_pmu amd_pmu = {
875 .name = "AMD",
876 .handle_irq = amd_pmu_handle_irq,
877 .disable_all = amd_pmu_disable_all,
878 .enable_all = x86_pmu_enable_all,
879 .enable = x86_pmu_enable_event,
880 .disable = amd_pmu_disable_event,
881 .hw_config = amd_pmu_hw_config,
882 .schedule_events = x86_schedule_events,
883 .eventsel = MSR_K7_EVNTSEL0,
884 .perfctr = MSR_K7_PERFCTR0,
885 .addr_offset = amd_pmu_addr_offset,
886 .event_map = amd_pmu_event_map,
887 .max_events = ARRAY_SIZE(amd_perfmon_event_map),
888 .num_counters = AMD64_NUM_COUNTERS,
889 .cntval_bits = 48,
890 .cntval_mask = (1ULL << 48) - 1,
891 .apic = 1,
892
893 .max_period = (1ULL << 47) - 1,
894 .get_event_constraints = amd_get_event_constraints,
895 .put_event_constraints = amd_put_event_constraints,
896
897 .format_attrs = amd_format_attr,
898 .events_sysfs_show = amd_event_sysfs_show,
899
900 .cpu_prepare = amd_pmu_cpu_prepare,
901 .cpu_starting = amd_pmu_cpu_starting,
902 .cpu_dead = amd_pmu_cpu_dead,
903
904 .amd_nb_constraints = 1,
905};
906
907static int __init amd_core_pmu_init(void)
908{
909 if (!boot_cpu_has(X86_FEATURE_PERFCTR_CORE))
910 return 0;
911
912 switch (boot_cpu_data.x86) {
913 case 0x15:
914 pr_cont("Fam15h ");
915 x86_pmu.get_event_constraints = amd_get_event_constraints_f15h;
916 break;
917 case 0x17:
918 pr_cont("Fam17h ");
919
920
921
922
923 break;
924 case 0x18:
925 pr_cont("Fam18h ");
926
927 break;
928 default:
929 pr_err("core perfctr but no constraints; unknown hardware!\n");
930 return -ENODEV;
931 }
932
933
934
935
936
937
938 x86_pmu.eventsel = MSR_F15H_PERF_CTL;
939 x86_pmu.perfctr = MSR_F15H_PERF_CTR;
940 x86_pmu.num_counters = AMD64_NUM_COUNTERS_CORE;
941
942
943
944
945 x86_pmu.amd_nb_constraints = 0;
946
947 pr_cont("core perfctr, ");
948 return 0;
949}
950
951__init int amd_pmu_init(void)
952{
953 int ret;
954
955
956 if (boot_cpu_data.x86 < 6)
957 return -ENODEV;
958
959 x86_pmu = amd_pmu;
960
961 ret = amd_core_pmu_init();
962 if (ret)
963 return ret;
964
965 if (num_possible_cpus() == 1) {
966
967
968
969
970 x86_pmu.amd_nb_constraints = 0;
971 }
972
973 if (boot_cpu_data.x86 >= 0x17)
974 memcpy(hw_cache_event_ids, amd_hw_cache_event_ids_f17h, sizeof(hw_cache_event_ids));
975 else
976 memcpy(hw_cache_event_ids, amd_hw_cache_event_ids, sizeof(hw_cache_event_ids));
977
978 return 0;
979}
980
981void amd_pmu_enable_virt(void)
982{
983 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
984
985 cpuc->perf_ctr_virt_mask = 0;
986
987
988 amd_pmu_disable_all();
989 x86_pmu_enable_all(0);
990}
991EXPORT_SYMBOL_GPL(amd_pmu_enable_virt);
992
993void amd_pmu_disable_virt(void)
994{
995 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
996
997
998
999
1000
1001
1002
1003 cpuc->perf_ctr_virt_mask = AMD64_EVENTSEL_HOSTONLY;
1004
1005
1006 amd_pmu_disable_all();
1007 x86_pmu_enable_all(0);
1008}
1009EXPORT_SYMBOL_GPL(amd_pmu_disable_virt);
1010