1
2
3
4
5
6
7
8
9
10
11#include <linux/perf_event.h>
12#include <linux/percpu.h>
13#include <linux/types.h>
14#include <linux/slab.h>
15#include <linux/init.h>
16#include <linux/cpu.h>
17#include <linux/cpumask.h>
18
19#include <asm/cpufeature.h>
20#include <asm/perf_event.h>
21#include <asm/msr.h>
22#include <asm/smp.h>
23
24#define NUM_COUNTERS_NB 4
25#define NUM_COUNTERS_L2 4
26#define NUM_COUNTERS_L3 6
27#define MAX_COUNTERS 6
28
29#define RDPMC_BASE_NB 6
30#define RDPMC_BASE_LLC 10
31
32#define COUNTER_SHIFT 16
33
34#undef pr_fmt
35#define pr_fmt(fmt) "amd_uncore: " fmt
36
37static int num_counters_llc;
38static int num_counters_nb;
39static bool l3_mask;
40
41static HLIST_HEAD(uncore_unused_list);
42
43struct amd_uncore {
44 int id;
45 int refcnt;
46 int cpu;
47 int num_counters;
48 int rdpmc_base;
49 u32 msr_base;
50 cpumask_t *active_mask;
51 struct pmu *pmu;
52 struct perf_event *events[MAX_COUNTERS];
53 struct hlist_node node;
54};
55
56static struct amd_uncore * __percpu *amd_uncore_nb;
57static struct amd_uncore * __percpu *amd_uncore_llc;
58
59static struct pmu amd_nb_pmu;
60static struct pmu amd_llc_pmu;
61
62static cpumask_t amd_nb_active_mask;
63static cpumask_t amd_llc_active_mask;
64
65static bool is_nb_event(struct perf_event *event)
66{
67 return event->pmu->type == amd_nb_pmu.type;
68}
69
70static bool is_llc_event(struct perf_event *event)
71{
72 return event->pmu->type == amd_llc_pmu.type;
73}
74
75static struct amd_uncore *event_to_amd_uncore(struct perf_event *event)
76{
77 if (is_nb_event(event) && amd_uncore_nb)
78 return *per_cpu_ptr(amd_uncore_nb, event->cpu);
79 else if (is_llc_event(event) && amd_uncore_llc)
80 return *per_cpu_ptr(amd_uncore_llc, event->cpu);
81
82 return NULL;
83}
84
85static void amd_uncore_read(struct perf_event *event)
86{
87 struct hw_perf_event *hwc = &event->hw;
88 u64 prev, new;
89 s64 delta;
90
91
92
93
94
95
96 prev = local64_read(&hwc->prev_count);
97 rdpmcl(hwc->event_base_rdpmc, new);
98 local64_set(&hwc->prev_count, new);
99 delta = (new << COUNTER_SHIFT) - (prev << COUNTER_SHIFT);
100 delta >>= COUNTER_SHIFT;
101 local64_add(delta, &event->count);
102}
103
104static void amd_uncore_start(struct perf_event *event, int flags)
105{
106 struct hw_perf_event *hwc = &event->hw;
107
108 if (flags & PERF_EF_RELOAD)
109 wrmsrl(hwc->event_base, (u64)local64_read(&hwc->prev_count));
110
111 hwc->state = 0;
112 wrmsrl(hwc->config_base, (hwc->config | ARCH_PERFMON_EVENTSEL_ENABLE));
113 perf_event_update_userpage(event);
114}
115
116static void amd_uncore_stop(struct perf_event *event, int flags)
117{
118 struct hw_perf_event *hwc = &event->hw;
119
120 wrmsrl(hwc->config_base, hwc->config);
121 hwc->state |= PERF_HES_STOPPED;
122
123 if ((flags & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) {
124 amd_uncore_read(event);
125 hwc->state |= PERF_HES_UPTODATE;
126 }
127}
128
129static int amd_uncore_add(struct perf_event *event, int flags)
130{
131 int i;
132 struct amd_uncore *uncore = event_to_amd_uncore(event);
133 struct hw_perf_event *hwc = &event->hw;
134
135
136 if (hwc->idx != -1 && uncore->events[hwc->idx] == event)
137 goto out;
138
139 for (i = 0; i < uncore->num_counters; i++) {
140 if (uncore->events[i] == event) {
141 hwc->idx = i;
142 goto out;
143 }
144 }
145
146
147 hwc->idx = -1;
148 for (i = 0; i < uncore->num_counters; i++) {
149 if (cmpxchg(&uncore->events[i], NULL, event) == NULL) {
150 hwc->idx = i;
151 break;
152 }
153 }
154
155out:
156 if (hwc->idx == -1)
157 return -EBUSY;
158
159 hwc->config_base = uncore->msr_base + (2 * hwc->idx);
160 hwc->event_base = uncore->msr_base + 1 + (2 * hwc->idx);
161 hwc->event_base_rdpmc = uncore->rdpmc_base + hwc->idx;
162 hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
163
164 if (flags & PERF_EF_START)
165 amd_uncore_start(event, PERF_EF_RELOAD);
166
167 return 0;
168}
169
170static void amd_uncore_del(struct perf_event *event, int flags)
171{
172 int i;
173 struct amd_uncore *uncore = event_to_amd_uncore(event);
174 struct hw_perf_event *hwc = &event->hw;
175
176 amd_uncore_stop(event, PERF_EF_UPDATE);
177
178 for (i = 0; i < uncore->num_counters; i++) {
179 if (cmpxchg(&uncore->events[i], event, NULL) == event)
180 break;
181 }
182
183 hwc->idx = -1;
184}
185
186static int amd_uncore_event_init(struct perf_event *event)
187{
188 struct amd_uncore *uncore;
189 struct hw_perf_event *hwc = &event->hw;
190
191 if (event->attr.type != event->pmu->type)
192 return -ENOENT;
193
194
195
196
197
198
199
200
201 if (is_sampling_event(event) || event->attach_state & PERF_ATTACH_TASK)
202 return -EINVAL;
203
204
205 hwc->config = event->attr.config & AMD64_RAW_EVENT_MASK_NB;
206 hwc->idx = -1;
207
208
209
210
211
212 if (l3_mask)
213 hwc->config |= (AMD64_L3_SLICE_MASK | AMD64_L3_THREAD_MASK);
214
215 if (event->cpu < 0)
216 return -EINVAL;
217
218 uncore = event_to_amd_uncore(event);
219 if (!uncore)
220 return -ENODEV;
221
222
223
224
225
226 event->cpu = uncore->cpu;
227
228 return 0;
229}
230
231static ssize_t amd_uncore_attr_show_cpumask(struct device *dev,
232 struct device_attribute *attr,
233 char *buf)
234{
235 cpumask_t *active_mask;
236 struct pmu *pmu = dev_get_drvdata(dev);
237
238 if (pmu->type == amd_nb_pmu.type)
239 active_mask = &amd_nb_active_mask;
240 else if (pmu->type == amd_llc_pmu.type)
241 active_mask = &amd_llc_active_mask;
242 else
243 return 0;
244
245 return cpumap_print_to_pagebuf(true, buf, active_mask);
246}
247static DEVICE_ATTR(cpumask, S_IRUGO, amd_uncore_attr_show_cpumask, NULL);
248
249static struct attribute *amd_uncore_attrs[] = {
250 &dev_attr_cpumask.attr,
251 NULL,
252};
253
254static struct attribute_group amd_uncore_attr_group = {
255 .attrs = amd_uncore_attrs,
256};
257
258
259
260
261
262#define AMD_FORMAT_ATTR(_dev, _name, _format) \
263static ssize_t \
264_dev##_show##_name(struct device *dev, \
265 struct device_attribute *attr, \
266 char *page) \
267{ \
268 BUILD_BUG_ON(sizeof(_format) >= PAGE_SIZE); \
269 return sprintf(page, _format "\n"); \
270} \
271static struct device_attribute format_attr_##_dev##_name = __ATTR_RO(_dev);
272
273
274#define AMD_ATTRIBUTE(_name) \
275static struct attribute *amd_uncore_format_attr_##_name[] = { \
276 &format_attr_event_##_name.attr, \
277 &format_attr_umask.attr, \
278 NULL, \
279}; \
280static struct attribute_group amd_uncore_format_group_##_name = { \
281 .name = "format", \
282 .attrs = amd_uncore_format_attr_##_name, \
283}; \
284static const struct attribute_group *amd_uncore_attr_groups_##_name[] = { \
285 &amd_uncore_attr_group, \
286 &amd_uncore_format_group_##_name, \
287 NULL, \
288};
289
290AMD_FORMAT_ATTR(event, , "config:0-7,32-35");
291AMD_FORMAT_ATTR(umask, , "config:8-15");
292AMD_FORMAT_ATTR(event, _df, "config:0-7,32-35,59-60");
293AMD_FORMAT_ATTR(event, _l3, "config:0-7");
294AMD_ATTRIBUTE(df);
295AMD_ATTRIBUTE(l3);
296
297static struct pmu amd_nb_pmu = {
298 .task_ctx_nr = perf_invalid_context,
299 .event_init = amd_uncore_event_init,
300 .add = amd_uncore_add,
301 .del = amd_uncore_del,
302 .start = amd_uncore_start,
303 .stop = amd_uncore_stop,
304 .read = amd_uncore_read,
305 .capabilities = PERF_PMU_CAP_NO_EXCLUDE,
306};
307
308static struct pmu amd_llc_pmu = {
309 .task_ctx_nr = perf_invalid_context,
310 .event_init = amd_uncore_event_init,
311 .add = amd_uncore_add,
312 .del = amd_uncore_del,
313 .start = amd_uncore_start,
314 .stop = amd_uncore_stop,
315 .read = amd_uncore_read,
316 .capabilities = PERF_PMU_CAP_NO_EXCLUDE,
317};
318
319static struct amd_uncore *amd_uncore_alloc(unsigned int cpu)
320{
321 return kzalloc_node(sizeof(struct amd_uncore), GFP_KERNEL,
322 cpu_to_node(cpu));
323}
324
325static int amd_uncore_cpu_up_prepare(unsigned int cpu)
326{
327 struct amd_uncore *uncore_nb = NULL, *uncore_llc;
328
329 if (amd_uncore_nb) {
330 uncore_nb = amd_uncore_alloc(cpu);
331 if (!uncore_nb)
332 goto fail;
333 uncore_nb->cpu = cpu;
334 uncore_nb->num_counters = num_counters_nb;
335 uncore_nb->rdpmc_base = RDPMC_BASE_NB;
336 uncore_nb->msr_base = MSR_F15H_NB_PERF_CTL;
337 uncore_nb->active_mask = &amd_nb_active_mask;
338 uncore_nb->pmu = &amd_nb_pmu;
339 uncore_nb->id = -1;
340 *per_cpu_ptr(amd_uncore_nb, cpu) = uncore_nb;
341 }
342
343 if (amd_uncore_llc) {
344 uncore_llc = amd_uncore_alloc(cpu);
345 if (!uncore_llc)
346 goto fail;
347 uncore_llc->cpu = cpu;
348 uncore_llc->num_counters = num_counters_llc;
349 uncore_llc->rdpmc_base = RDPMC_BASE_LLC;
350 uncore_llc->msr_base = MSR_F16H_L2I_PERF_CTL;
351 uncore_llc->active_mask = &amd_llc_active_mask;
352 uncore_llc->pmu = &amd_llc_pmu;
353 uncore_llc->id = -1;
354 *per_cpu_ptr(amd_uncore_llc, cpu) = uncore_llc;
355 }
356
357 return 0;
358
359fail:
360 if (amd_uncore_nb)
361 *per_cpu_ptr(amd_uncore_nb, cpu) = NULL;
362 kfree(uncore_nb);
363 return -ENOMEM;
364}
365
366static struct amd_uncore *
367amd_uncore_find_online_sibling(struct amd_uncore *this,
368 struct amd_uncore * __percpu *uncores)
369{
370 unsigned int cpu;
371 struct amd_uncore *that;
372
373 for_each_online_cpu(cpu) {
374 that = *per_cpu_ptr(uncores, cpu);
375
376 if (!that)
377 continue;
378
379 if (this == that)
380 continue;
381
382 if (this->id == that->id) {
383 hlist_add_head(&this->node, &uncore_unused_list);
384 this = that;
385 break;
386 }
387 }
388
389 this->refcnt++;
390 return this;
391}
392
393static int amd_uncore_cpu_starting(unsigned int cpu)
394{
395 unsigned int eax, ebx, ecx, edx;
396 struct amd_uncore *uncore;
397
398 if (amd_uncore_nb) {
399 uncore = *per_cpu_ptr(amd_uncore_nb, cpu);
400 cpuid(0x8000001e, &eax, &ebx, &ecx, &edx);
401 uncore->id = ecx & 0xff;
402
403 uncore = amd_uncore_find_online_sibling(uncore, amd_uncore_nb);
404 *per_cpu_ptr(amd_uncore_nb, cpu) = uncore;
405 }
406
407 if (amd_uncore_llc) {
408 uncore = *per_cpu_ptr(amd_uncore_llc, cpu);
409 uncore->id = per_cpu(cpu_llc_id, cpu);
410
411 uncore = amd_uncore_find_online_sibling(uncore, amd_uncore_llc);
412 *per_cpu_ptr(amd_uncore_llc, cpu) = uncore;
413 }
414
415 return 0;
416}
417
418static void uncore_clean_online(void)
419{
420 struct amd_uncore *uncore;
421 struct hlist_node *n;
422
423 hlist_for_each_entry_safe(uncore, n, &uncore_unused_list, node) {
424 hlist_del(&uncore->node);
425 kfree(uncore);
426 }
427}
428
429static void uncore_online(unsigned int cpu,
430 struct amd_uncore * __percpu *uncores)
431{
432 struct amd_uncore *uncore = *per_cpu_ptr(uncores, cpu);
433
434 uncore_clean_online();
435
436 if (cpu == uncore->cpu)
437 cpumask_set_cpu(cpu, uncore->active_mask);
438}
439
440static int amd_uncore_cpu_online(unsigned int cpu)
441{
442 if (amd_uncore_nb)
443 uncore_online(cpu, amd_uncore_nb);
444
445 if (amd_uncore_llc)
446 uncore_online(cpu, amd_uncore_llc);
447
448 return 0;
449}
450
451static void uncore_down_prepare(unsigned int cpu,
452 struct amd_uncore * __percpu *uncores)
453{
454 unsigned int i;
455 struct amd_uncore *this = *per_cpu_ptr(uncores, cpu);
456
457 if (this->cpu != cpu)
458 return;
459
460
461 for_each_online_cpu(i) {
462 struct amd_uncore *that = *per_cpu_ptr(uncores, i);
463
464 if (cpu == i)
465 continue;
466
467 if (this == that) {
468 perf_pmu_migrate_context(this->pmu, cpu, i);
469 cpumask_clear_cpu(cpu, that->active_mask);
470 cpumask_set_cpu(i, that->active_mask);
471 that->cpu = i;
472 break;
473 }
474 }
475}
476
477static int amd_uncore_cpu_down_prepare(unsigned int cpu)
478{
479 if (amd_uncore_nb)
480 uncore_down_prepare(cpu, amd_uncore_nb);
481
482 if (amd_uncore_llc)
483 uncore_down_prepare(cpu, amd_uncore_llc);
484
485 return 0;
486}
487
488static void uncore_dead(unsigned int cpu, struct amd_uncore * __percpu *uncores)
489{
490 struct amd_uncore *uncore = *per_cpu_ptr(uncores, cpu);
491
492 if (cpu == uncore->cpu)
493 cpumask_clear_cpu(cpu, uncore->active_mask);
494
495 if (!--uncore->refcnt)
496 kfree(uncore);
497 *per_cpu_ptr(uncores, cpu) = NULL;
498}
499
500static int amd_uncore_cpu_dead(unsigned int cpu)
501{
502 if (amd_uncore_nb)
503 uncore_dead(cpu, amd_uncore_nb);
504
505 if (amd_uncore_llc)
506 uncore_dead(cpu, amd_uncore_llc);
507
508 return 0;
509}
510
511static int __init amd_uncore_init(void)
512{
513 int ret = -ENODEV;
514
515 if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD &&
516 boot_cpu_data.x86_vendor != X86_VENDOR_HYGON)
517 return -ENODEV;
518
519 if (!boot_cpu_has(X86_FEATURE_TOPOEXT))
520 return -ENODEV;
521
522 if (boot_cpu_data.x86 == 0x17 || boot_cpu_data.x86 == 0x18) {
523
524
525
526
527
528
529 num_counters_nb = NUM_COUNTERS_NB;
530 num_counters_llc = NUM_COUNTERS_L3;
531 amd_nb_pmu.name = "amd_df";
532 amd_llc_pmu.name = "amd_l3";
533 format_attr_event_df.show = &event_show_df;
534 format_attr_event_l3.show = &event_show_l3;
535 l3_mask = true;
536 } else {
537 num_counters_nb = NUM_COUNTERS_NB;
538 num_counters_llc = NUM_COUNTERS_L2;
539 amd_nb_pmu.name = "amd_nb";
540 amd_llc_pmu.name = "amd_l2";
541 format_attr_event_df = format_attr_event;
542 format_attr_event_l3 = format_attr_event;
543 l3_mask = false;
544 }
545
546 amd_nb_pmu.attr_groups = amd_uncore_attr_groups_df;
547 amd_llc_pmu.attr_groups = amd_uncore_attr_groups_l3;
548
549 if (boot_cpu_has(X86_FEATURE_PERFCTR_NB)) {
550 amd_uncore_nb = alloc_percpu(struct amd_uncore *);
551 if (!amd_uncore_nb) {
552 ret = -ENOMEM;
553 goto fail_nb;
554 }
555 ret = perf_pmu_register(&amd_nb_pmu, amd_nb_pmu.name, -1);
556 if (ret)
557 goto fail_nb;
558
559 pr_info("%s NB counters detected\n",
560 boot_cpu_data.x86_vendor == X86_VENDOR_HYGON ?
561 "HYGON" : "AMD");
562 ret = 0;
563 }
564
565 if (boot_cpu_has(X86_FEATURE_PERFCTR_LLC)) {
566 amd_uncore_llc = alloc_percpu(struct amd_uncore *);
567 if (!amd_uncore_llc) {
568 ret = -ENOMEM;
569 goto fail_llc;
570 }
571 ret = perf_pmu_register(&amd_llc_pmu, amd_llc_pmu.name, -1);
572 if (ret)
573 goto fail_llc;
574
575 pr_info("%s LLC counters detected\n",
576 boot_cpu_data.x86_vendor == X86_VENDOR_HYGON ?
577 "HYGON" : "AMD");
578 ret = 0;
579 }
580
581
582
583
584 if (cpuhp_setup_state(CPUHP_PERF_X86_AMD_UNCORE_PREP,
585 "perf/x86/amd/uncore:prepare",
586 amd_uncore_cpu_up_prepare, amd_uncore_cpu_dead))
587 goto fail_llc;
588
589 if (cpuhp_setup_state(CPUHP_AP_PERF_X86_AMD_UNCORE_STARTING,
590 "perf/x86/amd/uncore:starting",
591 amd_uncore_cpu_starting, NULL))
592 goto fail_prep;
593 if (cpuhp_setup_state(CPUHP_AP_PERF_X86_AMD_UNCORE_ONLINE,
594 "perf/x86/amd/uncore:online",
595 amd_uncore_cpu_online,
596 amd_uncore_cpu_down_prepare))
597 goto fail_start;
598 return 0;
599
600fail_start:
601 cpuhp_remove_state(CPUHP_AP_PERF_X86_AMD_UNCORE_STARTING);
602fail_prep:
603 cpuhp_remove_state(CPUHP_PERF_X86_AMD_UNCORE_PREP);
604fail_llc:
605 if (boot_cpu_has(X86_FEATURE_PERFCTR_NB))
606 perf_pmu_unregister(&amd_nb_pmu);
607 if (amd_uncore_llc)
608 free_percpu(amd_uncore_llc);
609fail_nb:
610 if (amd_uncore_nb)
611 free_percpu(amd_uncore_nb);
612
613 return ret;
614}
615device_initcall(amd_uncore_init);
616