1
2
3
4
5
6
7
8
9#include <linux/perf_event.h>
10#include <linux/slab.h>
11#include <asm/opal.h>
12#include <asm/imc-pmu.h>
13#include <asm/cputhreads.h>
14#include <asm/smp.h>
15#include <linux/string.h>
16
17
18
19
20
21
22
23static DEFINE_MUTEX(nest_init_lock);
24static DEFINE_PER_CPU(struct imc_pmu_ref *, local_nest_imc_refc);
25static struct imc_pmu **per_nest_pmu_arr;
26static cpumask_t nest_imc_cpumask;
27static struct imc_pmu_ref *nest_imc_refc;
28static int nest_pmus;
29
30
31
32static cpumask_t core_imc_cpumask;
33static struct imc_pmu_ref *core_imc_refc;
34static struct imc_pmu *core_imc_pmu;
35
36
37
38static DEFINE_PER_CPU(u64 *, thread_imc_mem);
39static struct imc_pmu *thread_imc_pmu;
40static int thread_imc_mem_size;
41
42
43static DEFINE_PER_CPU(u64 *, trace_imc_mem);
44static struct imc_pmu_ref *trace_imc_refc;
45static int trace_imc_mem_size;
46
47static struct imc_pmu *imc_event_to_pmu(struct perf_event *event)
48{
49 return container_of(event->pmu, struct imc_pmu, pmu);
50}
51
52PMU_FORMAT_ATTR(event, "config:0-61");
53PMU_FORMAT_ATTR(offset, "config:0-31");
54PMU_FORMAT_ATTR(rvalue, "config:32");
55PMU_FORMAT_ATTR(mode, "config:33-40");
56static struct attribute *imc_format_attrs[] = {
57 &format_attr_event.attr,
58 &format_attr_offset.attr,
59 &format_attr_rvalue.attr,
60 &format_attr_mode.attr,
61 NULL,
62};
63
64static struct attribute_group imc_format_group = {
65 .name = "format",
66 .attrs = imc_format_attrs,
67};
68
69
70PMU_FORMAT_ATTR(cpmc_reserved, "config:0-19");
71PMU_FORMAT_ATTR(cpmc_event, "config:20-27");
72PMU_FORMAT_ATTR(cpmc_samplesel, "config:28-29");
73PMU_FORMAT_ATTR(cpmc_load, "config:30-61");
74static struct attribute *trace_imc_format_attrs[] = {
75 &format_attr_event.attr,
76 &format_attr_cpmc_reserved.attr,
77 &format_attr_cpmc_event.attr,
78 &format_attr_cpmc_samplesel.attr,
79 &format_attr_cpmc_load.attr,
80 NULL,
81};
82
83static struct attribute_group trace_imc_format_group = {
84.name = "format",
85.attrs = trace_imc_format_attrs,
86};
87
88
89static ssize_t imc_pmu_cpumask_get_attr(struct device *dev,
90 struct device_attribute *attr,
91 char *buf)
92{
93 struct pmu *pmu = dev_get_drvdata(dev);
94 struct imc_pmu *imc_pmu = container_of(pmu, struct imc_pmu, pmu);
95 cpumask_t *active_mask;
96
97 switch(imc_pmu->domain){
98 case IMC_DOMAIN_NEST:
99 active_mask = &nest_imc_cpumask;
100 break;
101 case IMC_DOMAIN_CORE:
102 active_mask = &core_imc_cpumask;
103 break;
104 default:
105 return 0;
106 }
107
108 return cpumap_print_to_pagebuf(true, buf, active_mask);
109}
110
111static DEVICE_ATTR(cpumask, S_IRUGO, imc_pmu_cpumask_get_attr, NULL);
112
113static struct attribute *imc_pmu_cpumask_attrs[] = {
114 &dev_attr_cpumask.attr,
115 NULL,
116};
117
118static struct attribute_group imc_pmu_cpumask_attr_group = {
119 .attrs = imc_pmu_cpumask_attrs,
120};
121
122
123static struct attribute *device_str_attr_create(const char *name, const char *str)
124{
125 struct perf_pmu_events_attr *attr;
126
127 attr = kzalloc(sizeof(*attr), GFP_KERNEL);
128 if (!attr)
129 return NULL;
130 sysfs_attr_init(&attr->attr.attr);
131
132 attr->event_str = str;
133 attr->attr.attr.name = name;
134 attr->attr.attr.mode = 0444;
135 attr->attr.show = perf_event_sysfs_show;
136
137 return &attr->attr.attr;
138}
139
140static int imc_parse_event(struct device_node *np, const char *scale,
141 const char *unit, const char *prefix,
142 u32 base, struct imc_events *event)
143{
144 const char *s;
145 u32 reg;
146
147 if (of_property_read_u32(np, "reg", ®))
148 goto error;
149
150 event->value = base + reg;
151
152 if (of_property_read_string(np, "event-name", &s))
153 goto error;
154
155 event->name = kasprintf(GFP_KERNEL, "%s%s", prefix, s);
156 if (!event->name)
157 goto error;
158
159 if (of_property_read_string(np, "scale", &s))
160 s = scale;
161
162 if (s) {
163 event->scale = kstrdup(s, GFP_KERNEL);
164 if (!event->scale)
165 goto error;
166 }
167
168 if (of_property_read_string(np, "unit", &s))
169 s = unit;
170
171 if (s) {
172 event->unit = kstrdup(s, GFP_KERNEL);
173 if (!event->unit)
174 goto error;
175 }
176
177 return 0;
178error:
179 kfree(event->unit);
180 kfree(event->scale);
181 kfree(event->name);
182 return -EINVAL;
183}
184
185
186
187
188
189static void imc_free_events(struct imc_events *events, int nr_entries)
190{
191 int i;
192
193
194 if (!events)
195 return;
196 for (i = 0; i < nr_entries; i++) {
197 kfree(events[i].unit);
198 kfree(events[i].scale);
199 kfree(events[i].name);
200 }
201
202 kfree(events);
203}
204
205
206
207
208
209static int update_events_in_group(struct device_node *node, struct imc_pmu *pmu)
210{
211 struct attribute_group *attr_group;
212 struct attribute **attrs, *dev_str;
213 struct device_node *np, *pmu_events;
214 u32 handle, base_reg;
215 int i = 0, j = 0, ct, ret;
216 const char *prefix, *g_scale, *g_unit;
217 const char *ev_val_str, *ev_scale_str, *ev_unit_str;
218
219 if (!of_property_read_u32(node, "events", &handle))
220 pmu_events = of_find_node_by_phandle(handle);
221 else
222 return 0;
223
224
225 if (!pmu_events)
226 return 0;
227
228
229 ct = of_get_child_count(pmu_events);
230
231
232 if (of_property_read_string(node, "events-prefix", &prefix))
233 return 0;
234
235
236 if (of_property_read_string(node, "scale", &g_scale))
237 g_scale = NULL;
238
239 if (of_property_read_string(node, "unit", &g_unit))
240 g_unit = NULL;
241
242
243 of_property_read_u32(node, "reg", &base_reg);
244
245
246 pmu->events = kcalloc(ct, sizeof(struct imc_events), GFP_KERNEL);
247 if (!pmu->events)
248 return -ENOMEM;
249
250 ct = 0;
251
252 for_each_child_of_node(pmu_events, np) {
253 ret = imc_parse_event(np, g_scale, g_unit, prefix, base_reg, &pmu->events[ct]);
254 if (!ret)
255 ct++;
256 }
257
258
259 attr_group = kzalloc(sizeof(*attr_group), GFP_KERNEL);
260 if (!attr_group) {
261 imc_free_events(pmu->events, ct);
262 return -ENOMEM;
263 }
264
265
266
267
268
269
270
271
272
273 attrs = kcalloc(((ct * 3) + 1), sizeof(struct attribute *), GFP_KERNEL);
274 if (!attrs) {
275 kfree(attr_group);
276 imc_free_events(pmu->events, ct);
277 return -ENOMEM;
278 }
279
280 attr_group->name = "events";
281 attr_group->attrs = attrs;
282 do {
283 ev_val_str = kasprintf(GFP_KERNEL, "event=0x%x", pmu->events[i].value);
284 dev_str = device_str_attr_create(pmu->events[i].name, ev_val_str);
285 if (!dev_str)
286 continue;
287
288 attrs[j++] = dev_str;
289 if (pmu->events[i].scale) {
290 ev_scale_str = kasprintf(GFP_KERNEL, "%s.scale", pmu->events[i].name);
291 dev_str = device_str_attr_create(ev_scale_str, pmu->events[i].scale);
292 if (!dev_str)
293 continue;
294
295 attrs[j++] = dev_str;
296 }
297
298 if (pmu->events[i].unit) {
299 ev_unit_str = kasprintf(GFP_KERNEL, "%s.unit", pmu->events[i].name);
300 dev_str = device_str_attr_create(ev_unit_str, pmu->events[i].unit);
301 if (!dev_str)
302 continue;
303
304 attrs[j++] = dev_str;
305 }
306 } while (++i < ct);
307
308
309 pmu->attr_groups[IMC_EVENT_ATTR] = attr_group;
310
311 return 0;
312}
313
314
315static struct imc_pmu_ref *get_nest_pmu_ref(int cpu)
316{
317 return per_cpu(local_nest_imc_refc, cpu);
318}
319
320static void nest_change_cpu_context(int old_cpu, int new_cpu)
321{
322 struct imc_pmu **pn = per_nest_pmu_arr;
323
324 if (old_cpu < 0 || new_cpu < 0)
325 return;
326
327 while (*pn) {
328 perf_pmu_migrate_context(&(*pn)->pmu, old_cpu, new_cpu);
329 pn++;
330 }
331}
332
333static int ppc_nest_imc_cpu_offline(unsigned int cpu)
334{
335 int nid, target = -1;
336 const struct cpumask *l_cpumask;
337 struct imc_pmu_ref *ref;
338
339
340
341
342
343 if (!cpumask_test_and_clear_cpu(cpu, &nest_imc_cpumask))
344 return 0;
345
346
347
348
349
350
351
352
353
354
355
356 if (!nest_pmus)
357 return 0;
358
359
360
361
362
363 nid = cpu_to_node(cpu);
364 l_cpumask = cpumask_of_node(nid);
365 target = cpumask_last(l_cpumask);
366
367
368
369
370
371 if (unlikely(target == cpu))
372 target = cpumask_any_but(l_cpumask, cpu);
373
374
375
376
377
378 if (target >= 0 && target < nr_cpu_ids) {
379 cpumask_set_cpu(target, &nest_imc_cpumask);
380 nest_change_cpu_context(cpu, target);
381 } else {
382 opal_imc_counters_stop(OPAL_IMC_COUNTERS_NEST,
383 get_hard_smp_processor_id(cpu));
384
385
386
387
388 ref = get_nest_pmu_ref(cpu);
389 if (!ref)
390 return -EINVAL;
391
392 ref->refc = 0;
393 }
394 return 0;
395}
396
397static int ppc_nest_imc_cpu_online(unsigned int cpu)
398{
399 const struct cpumask *l_cpumask;
400 static struct cpumask tmp_mask;
401 int res;
402
403
404 l_cpumask = cpumask_of_node(cpu_to_node(cpu));
405
406
407
408
409
410 if (cpumask_and(&tmp_mask, l_cpumask, &nest_imc_cpumask))
411 return 0;
412
413
414
415
416
417 res = opal_imc_counters_stop(OPAL_IMC_COUNTERS_NEST,
418 get_hard_smp_processor_id(cpu));
419 if (res)
420 return res;
421
422
423 cpumask_set_cpu(cpu, &nest_imc_cpumask);
424 return 0;
425}
426
427static int nest_pmu_cpumask_init(void)
428{
429 return cpuhp_setup_state(CPUHP_AP_PERF_POWERPC_NEST_IMC_ONLINE,
430 "perf/powerpc/imc:online",
431 ppc_nest_imc_cpu_online,
432 ppc_nest_imc_cpu_offline);
433}
434
435static void nest_imc_counters_release(struct perf_event *event)
436{
437 int rc, node_id;
438 struct imc_pmu_ref *ref;
439
440 if (event->cpu < 0)
441 return;
442
443 node_id = cpu_to_node(event->cpu);
444
445
446
447
448
449
450
451 ref = get_nest_pmu_ref(event->cpu);
452 if (!ref)
453 return;
454
455
456 mutex_lock(&ref->lock);
457 if (ref->refc == 0) {
458
459
460
461
462
463
464
465
466
467
468 mutex_unlock(&ref->lock);
469 return;
470 }
471 ref->refc--;
472 if (ref->refc == 0) {
473 rc = opal_imc_counters_stop(OPAL_IMC_COUNTERS_NEST,
474 get_hard_smp_processor_id(event->cpu));
475 if (rc) {
476 mutex_unlock(&ref->lock);
477 pr_err("nest-imc: Unable to stop the counters for core %d\n", node_id);
478 return;
479 }
480 } else if (ref->refc < 0) {
481 WARN(1, "nest-imc: Invalid event reference count\n");
482 ref->refc = 0;
483 }
484 mutex_unlock(&ref->lock);
485}
486
487static int nest_imc_event_init(struct perf_event *event)
488{
489 int chip_id, rc, node_id;
490 u32 l_config, config = event->attr.config;
491 struct imc_mem_info *pcni;
492 struct imc_pmu *pmu;
493 struct imc_pmu_ref *ref;
494 bool flag = false;
495
496 if (event->attr.type != event->pmu->type)
497 return -ENOENT;
498
499
500 if (event->hw.sample_period)
501 return -EINVAL;
502
503 if (event->cpu < 0)
504 return -EINVAL;
505
506 pmu = imc_event_to_pmu(event);
507
508
509 if ((config & IMC_EVENT_OFFSET_MASK) > pmu->counter_mem_size)
510 return -EINVAL;
511
512
513
514
515
516 chip_id = cpu_to_chip_id(event->cpu);
517
518
519 if (chip_id < 0)
520 return -ENODEV;
521
522 pcni = pmu->mem_info;
523 do {
524 if (pcni->id == chip_id) {
525 flag = true;
526 break;
527 }
528 pcni++;
529 } while (pcni->vbase != 0);
530
531 if (!flag)
532 return -ENODEV;
533
534
535
536
537 l_config = config & IMC_EVENT_OFFSET_MASK;
538 event->hw.event_base = (u64)pcni->vbase + l_config;
539 node_id = cpu_to_node(event->cpu);
540
541
542
543
544
545
546 ref = get_nest_pmu_ref(event->cpu);
547 if (!ref)
548 return -EINVAL;
549
550 mutex_lock(&ref->lock);
551 if (ref->refc == 0) {
552 rc = opal_imc_counters_start(OPAL_IMC_COUNTERS_NEST,
553 get_hard_smp_processor_id(event->cpu));
554 if (rc) {
555 mutex_unlock(&ref->lock);
556 pr_err("nest-imc: Unable to start the counters for node %d\n",
557 node_id);
558 return rc;
559 }
560 }
561 ++ref->refc;
562 mutex_unlock(&ref->lock);
563
564 event->destroy = nest_imc_counters_release;
565 return 0;
566}
567
568
569
570
571
572
573
574
575
576static int core_imc_mem_init(int cpu, int size)
577{
578 int nid, rc = 0, core_id = (cpu / threads_per_core);
579 struct imc_mem_info *mem_info;
580
581
582
583
584
585 nid = cpu_to_node(cpu);
586 mem_info = &core_imc_pmu->mem_info[core_id];
587 mem_info->id = core_id;
588
589
590 mem_info->vbase = page_address(alloc_pages_node(nid,
591 GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE |
592 __GFP_NOWARN, get_order(size)));
593 if (!mem_info->vbase)
594 return -ENOMEM;
595
596
597 core_imc_refc[core_id].id = core_id;
598 mutex_init(&core_imc_refc[core_id].lock);
599
600 rc = opal_imc_counters_init(OPAL_IMC_COUNTERS_CORE,
601 __pa((void *)mem_info->vbase),
602 get_hard_smp_processor_id(cpu));
603 if (rc) {
604 free_pages((u64)mem_info->vbase, get_order(size));
605 mem_info->vbase = NULL;
606 }
607
608 return rc;
609}
610
611static bool is_core_imc_mem_inited(int cpu)
612{
613 struct imc_mem_info *mem_info;
614 int core_id = (cpu / threads_per_core);
615
616 mem_info = &core_imc_pmu->mem_info[core_id];
617 if (!mem_info->vbase)
618 return false;
619
620 return true;
621}
622
623static int ppc_core_imc_cpu_online(unsigned int cpu)
624{
625 const struct cpumask *l_cpumask;
626 static struct cpumask tmp_mask;
627 int ret = 0;
628
629
630 l_cpumask = cpu_sibling_mask(cpu);
631
632
633 if (cpumask_and(&tmp_mask, l_cpumask, &core_imc_cpumask))
634 return 0;
635
636 if (!is_core_imc_mem_inited(cpu)) {
637 ret = core_imc_mem_init(cpu, core_imc_pmu->counter_mem_size);
638 if (ret) {
639 pr_info("core_imc memory allocation for cpu %d failed\n", cpu);
640 return ret;
641 }
642 }
643
644
645 cpumask_set_cpu(cpu, &core_imc_cpumask);
646 return 0;
647}
648
649static int ppc_core_imc_cpu_offline(unsigned int cpu)
650{
651 unsigned int core_id;
652 int ncpu;
653 struct imc_pmu_ref *ref;
654
655
656
657
658
659 if (!cpumask_test_and_clear_cpu(cpu, &core_imc_cpumask))
660 return 0;
661
662
663
664
665
666
667
668
669
670
671
672
673 if (!core_imc_pmu->pmu.event_init)
674 return 0;
675
676
677 ncpu = cpumask_last(cpu_sibling_mask(cpu));
678
679 if (unlikely(ncpu == cpu))
680 ncpu = cpumask_any_but(cpu_sibling_mask(cpu), cpu);
681
682 if (ncpu >= 0 && ncpu < nr_cpu_ids) {
683 cpumask_set_cpu(ncpu, &core_imc_cpumask);
684 perf_pmu_migrate_context(&core_imc_pmu->pmu, cpu, ncpu);
685 } else {
686
687
688
689
690
691 opal_imc_counters_stop(OPAL_IMC_COUNTERS_CORE,
692 get_hard_smp_processor_id(cpu));
693 core_id = cpu / threads_per_core;
694 ref = &core_imc_refc[core_id];
695 if (!ref)
696 return -EINVAL;
697
698 ref->refc = 0;
699 }
700 return 0;
701}
702
703static int core_imc_pmu_cpumask_init(void)
704{
705 return cpuhp_setup_state(CPUHP_AP_PERF_POWERPC_CORE_IMC_ONLINE,
706 "perf/powerpc/imc_core:online",
707 ppc_core_imc_cpu_online,
708 ppc_core_imc_cpu_offline);
709}
710
711static void core_imc_counters_release(struct perf_event *event)
712{
713 int rc, core_id;
714 struct imc_pmu_ref *ref;
715
716 if (event->cpu < 0)
717 return;
718
719
720
721
722
723
724 core_id = event->cpu / threads_per_core;
725
726
727 ref = &core_imc_refc[core_id];
728 if (!ref)
729 return;
730
731 mutex_lock(&ref->lock);
732 if (ref->refc == 0) {
733
734
735
736
737
738
739
740
741
742
743 mutex_unlock(&ref->lock);
744 return;
745 }
746 ref->refc--;
747 if (ref->refc == 0) {
748 rc = opal_imc_counters_stop(OPAL_IMC_COUNTERS_CORE,
749 get_hard_smp_processor_id(event->cpu));
750 if (rc) {
751 mutex_unlock(&ref->lock);
752 pr_err("IMC: Unable to stop the counters for core %d\n", core_id);
753 return;
754 }
755 } else if (ref->refc < 0) {
756 WARN(1, "core-imc: Invalid event reference count\n");
757 ref->refc = 0;
758 }
759 mutex_unlock(&ref->lock);
760}
761
762static int core_imc_event_init(struct perf_event *event)
763{
764 int core_id, rc;
765 u64 config = event->attr.config;
766 struct imc_mem_info *pcmi;
767 struct imc_pmu *pmu;
768 struct imc_pmu_ref *ref;
769
770 if (event->attr.type != event->pmu->type)
771 return -ENOENT;
772
773
774 if (event->hw.sample_period)
775 return -EINVAL;
776
777 if (event->cpu < 0)
778 return -EINVAL;
779
780 event->hw.idx = -1;
781 pmu = imc_event_to_pmu(event);
782
783
784 if (((config & IMC_EVENT_OFFSET_MASK) > pmu->counter_mem_size))
785 return -EINVAL;
786
787 if (!is_core_imc_mem_inited(event->cpu))
788 return -ENODEV;
789
790 core_id = event->cpu / threads_per_core;
791 pcmi = &core_imc_pmu->mem_info[core_id];
792 if ((!pcmi->vbase))
793 return -ENODEV;
794
795
796 ref = &core_imc_refc[core_id];
797 if (!ref)
798 return -EINVAL;
799
800
801
802
803
804
805
806 mutex_lock(&ref->lock);
807 if (ref->refc == 0) {
808 rc = opal_imc_counters_start(OPAL_IMC_COUNTERS_CORE,
809 get_hard_smp_processor_id(event->cpu));
810 if (rc) {
811 mutex_unlock(&ref->lock);
812 pr_err("core-imc: Unable to start the counters for core %d\n",
813 core_id);
814 return rc;
815 }
816 }
817 ++ref->refc;
818 mutex_unlock(&ref->lock);
819
820 event->hw.event_base = (u64)pcmi->vbase + (config & IMC_EVENT_OFFSET_MASK);
821 event->destroy = core_imc_counters_release;
822 return 0;
823}
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846static int thread_imc_mem_alloc(int cpu_id, int size)
847{
848 u64 *local_mem = per_cpu(thread_imc_mem, cpu_id);
849 int nid = cpu_to_node(cpu_id);
850
851 if (!local_mem) {
852
853
854
855
856 local_mem = page_address(alloc_pages_node(nid,
857 GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE |
858 __GFP_NOWARN, get_order(size)));
859 if (!local_mem)
860 return -ENOMEM;
861
862 per_cpu(thread_imc_mem, cpu_id) = local_mem;
863 }
864
865 mtspr(SPRN_LDBAR, 0);
866 return 0;
867}
868
869static int ppc_thread_imc_cpu_online(unsigned int cpu)
870{
871 return thread_imc_mem_alloc(cpu, thread_imc_mem_size);
872}
873
874static int ppc_thread_imc_cpu_offline(unsigned int cpu)
875{
876 mtspr(SPRN_LDBAR, 0);
877 return 0;
878}
879
880static int thread_imc_cpu_init(void)
881{
882 return cpuhp_setup_state(CPUHP_AP_PERF_POWERPC_THREAD_IMC_ONLINE,
883 "perf/powerpc/imc_thread:online",
884 ppc_thread_imc_cpu_online,
885 ppc_thread_imc_cpu_offline);
886}
887
888static int thread_imc_event_init(struct perf_event *event)
889{
890 u32 config = event->attr.config;
891 struct task_struct *target;
892 struct imc_pmu *pmu;
893
894 if (event->attr.type != event->pmu->type)
895 return -ENOENT;
896
897 if (!capable(CAP_SYS_ADMIN))
898 return -EACCES;
899
900
901 if (event->hw.sample_period)
902 return -EINVAL;
903
904 event->hw.idx = -1;
905 pmu = imc_event_to_pmu(event);
906
907
908 if (((config & IMC_EVENT_OFFSET_MASK) > pmu->counter_mem_size))
909 return -EINVAL;
910
911 target = event->hw.target;
912 if (!target)
913 return -EINVAL;
914
915 event->pmu->task_ctx_nr = perf_sw_context;
916 return 0;
917}
918
919static bool is_thread_imc_pmu(struct perf_event *event)
920{
921 if (!strncmp(event->pmu->name, "thread_imc", strlen("thread_imc")))
922 return true;
923
924 return false;
925}
926
927static u64 * get_event_base_addr(struct perf_event *event)
928{
929 u64 addr;
930
931 if (is_thread_imc_pmu(event)) {
932 addr = (u64)per_cpu(thread_imc_mem, smp_processor_id());
933 return (u64 *)(addr + (event->attr.config & IMC_EVENT_OFFSET_MASK));
934 }
935
936 return (u64 *)event->hw.event_base;
937}
938
939static void thread_imc_pmu_start_txn(struct pmu *pmu,
940 unsigned int txn_flags)
941{
942 if (txn_flags & ~PERF_PMU_TXN_ADD)
943 return;
944 perf_pmu_disable(pmu);
945}
946
947static void thread_imc_pmu_cancel_txn(struct pmu *pmu)
948{
949 perf_pmu_enable(pmu);
950}
951
952static int thread_imc_pmu_commit_txn(struct pmu *pmu)
953{
954 perf_pmu_enable(pmu);
955 return 0;
956}
957
958static u64 imc_read_counter(struct perf_event *event)
959{
960 u64 *addr, data;
961
962
963
964
965
966
967
968 addr = get_event_base_addr(event);
969 data = be64_to_cpu(READ_ONCE(*addr));
970 local64_set(&event->hw.prev_count, data);
971
972 return data;
973}
974
975static void imc_event_update(struct perf_event *event)
976{
977 u64 counter_prev, counter_new, final_count;
978
979 counter_prev = local64_read(&event->hw.prev_count);
980 counter_new = imc_read_counter(event);
981 final_count = counter_new - counter_prev;
982
983
984 local64_add(final_count, &event->count);
985}
986
987static void imc_event_start(struct perf_event *event, int flags)
988{
989
990
991
992
993
994
995 imc_read_counter(event);
996}
997
998static void imc_event_stop(struct perf_event *event, int flags)
999{
1000
1001
1002
1003
1004 imc_event_update(event);
1005}
1006
1007static int imc_event_add(struct perf_event *event, int flags)
1008{
1009 if (flags & PERF_EF_START)
1010 imc_event_start(event, flags);
1011
1012 return 0;
1013}
1014
1015static int thread_imc_event_add(struct perf_event *event, int flags)
1016{
1017 int core_id;
1018 struct imc_pmu_ref *ref;
1019 u64 ldbar_value, *local_mem = per_cpu(thread_imc_mem, smp_processor_id());
1020
1021 if (flags & PERF_EF_START)
1022 imc_event_start(event, flags);
1023
1024 if (!is_core_imc_mem_inited(smp_processor_id()))
1025 return -EINVAL;
1026
1027 core_id = smp_processor_id() / threads_per_core;
1028 ldbar_value = ((u64)local_mem & THREAD_IMC_LDBAR_MASK) | THREAD_IMC_ENABLE;
1029 mtspr(SPRN_LDBAR, ldbar_value);
1030
1031
1032
1033
1034
1035
1036
1037 ref = &core_imc_refc[core_id];
1038 if (!ref)
1039 return -EINVAL;
1040
1041 mutex_lock(&ref->lock);
1042 if (ref->refc == 0) {
1043 if (opal_imc_counters_start(OPAL_IMC_COUNTERS_CORE,
1044 get_hard_smp_processor_id(smp_processor_id()))) {
1045 mutex_unlock(&ref->lock);
1046 pr_err("thread-imc: Unable to start the counter\
1047 for core %d\n", core_id);
1048 return -EINVAL;
1049 }
1050 }
1051 ++ref->refc;
1052 mutex_unlock(&ref->lock);
1053 return 0;
1054}
1055
1056static void thread_imc_event_del(struct perf_event *event, int flags)
1057{
1058
1059 int core_id;
1060 struct imc_pmu_ref *ref;
1061
1062 mtspr(SPRN_LDBAR, 0);
1063
1064 core_id = smp_processor_id() / threads_per_core;
1065 ref = &core_imc_refc[core_id];
1066
1067 mutex_lock(&ref->lock);
1068 ref->refc--;
1069 if (ref->refc == 0) {
1070 if (opal_imc_counters_stop(OPAL_IMC_COUNTERS_CORE,
1071 get_hard_smp_processor_id(smp_processor_id()))) {
1072 mutex_unlock(&ref->lock);
1073 pr_err("thread-imc: Unable to stop the counters\
1074 for core %d\n", core_id);
1075 return;
1076 }
1077 } else if (ref->refc < 0) {
1078 ref->refc = 0;
1079 }
1080 mutex_unlock(&ref->lock);
1081
1082
1083
1084
1085 imc_event_update(event);
1086}
1087
1088
1089
1090
1091static int trace_imc_mem_alloc(int cpu_id, int size)
1092{
1093 u64 *local_mem = per_cpu(trace_imc_mem, cpu_id);
1094 int phys_id = cpu_to_node(cpu_id), rc = 0;
1095 int core_id = (cpu_id / threads_per_core);
1096
1097 if (!local_mem) {
1098 local_mem = page_address(alloc_pages_node(phys_id,
1099 GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE |
1100 __GFP_NOWARN, get_order(size)));
1101 if (!local_mem)
1102 return -ENOMEM;
1103 per_cpu(trace_imc_mem, cpu_id) = local_mem;
1104
1105
1106 rc = opal_imc_counters_init(OPAL_IMC_COUNTERS_TRACE, __pa((void *)local_mem),
1107 get_hard_smp_processor_id(cpu_id));
1108 if (rc) {
1109 pr_info("IMC:opal init failed for trace imc\n");
1110 return rc;
1111 }
1112 }
1113
1114
1115 trace_imc_refc[core_id].id = core_id;
1116 mutex_init(&trace_imc_refc[core_id].lock);
1117
1118 mtspr(SPRN_LDBAR, 0);
1119 return 0;
1120}
1121
1122static int ppc_trace_imc_cpu_online(unsigned int cpu)
1123{
1124 return trace_imc_mem_alloc(cpu, trace_imc_mem_size);
1125}
1126
1127static int ppc_trace_imc_cpu_offline(unsigned int cpu)
1128{
1129 mtspr(SPRN_LDBAR, 0);
1130 return 0;
1131}
1132
1133static int trace_imc_cpu_init(void)
1134{
1135 return cpuhp_setup_state(CPUHP_AP_PERF_POWERPC_TRACE_IMC_ONLINE,
1136 "perf/powerpc/imc_trace:online",
1137 ppc_trace_imc_cpu_online,
1138 ppc_trace_imc_cpu_offline);
1139}
1140
1141static u64 get_trace_imc_event_base_addr(void)
1142{
1143 return (u64)per_cpu(trace_imc_mem, smp_processor_id());
1144}
1145
1146
1147
1148
1149
1150static int trace_imc_prepare_sample(struct trace_imc_data *mem,
1151 struct perf_sample_data *data,
1152 u64 *prev_tb,
1153 struct perf_event_header *header,
1154 struct perf_event *event)
1155{
1156
1157 if (be64_to_cpu(READ_ONCE(mem->tb1)) > *prev_tb)
1158 *prev_tb = be64_to_cpu(READ_ONCE(mem->tb1));
1159 else
1160 return -EINVAL;
1161
1162 if ((be64_to_cpu(READ_ONCE(mem->tb1)) & IMC_TRACE_RECORD_TB1_MASK) !=
1163 be64_to_cpu(READ_ONCE(mem->tb2)))
1164 return -EINVAL;
1165
1166
1167 data->ip = be64_to_cpu(READ_ONCE(mem->ip));
1168 data->period = event->hw.last_period;
1169
1170 header->type = PERF_RECORD_SAMPLE;
1171 header->size = sizeof(*header) + event->header_size;
1172 header->misc = 0;
1173
1174 if (is_kernel_addr(data->ip))
1175 header->misc |= PERF_RECORD_MISC_KERNEL;
1176 else
1177 header->misc |= PERF_RECORD_MISC_USER;
1178
1179 perf_event_header__init_id(header, data, event);
1180
1181 return 0;
1182}
1183
1184static void dump_trace_imc_data(struct perf_event *event)
1185{
1186 struct trace_imc_data *mem;
1187 int i, ret;
1188 u64 prev_tb = 0;
1189
1190 mem = (struct trace_imc_data *)get_trace_imc_event_base_addr();
1191 for (i = 0; i < (trace_imc_mem_size / sizeof(struct trace_imc_data));
1192 i++, mem++) {
1193 struct perf_sample_data data;
1194 struct perf_event_header header;
1195
1196 ret = trace_imc_prepare_sample(mem, &data, &prev_tb, &header, event);
1197 if (ret)
1198 break;
1199 else {
1200
1201 struct perf_output_handle handle;
1202
1203 if (perf_output_begin(&handle, event, header.size))
1204 return;
1205
1206 perf_output_sample(&handle, &header, &data, event);
1207 perf_output_end(&handle);
1208 }
1209 }
1210}
1211
1212static int trace_imc_event_add(struct perf_event *event, int flags)
1213{
1214 int core_id = smp_processor_id() / threads_per_core;
1215 struct imc_pmu_ref *ref = NULL;
1216 u64 local_mem, ldbar_value;
1217
1218
1219 local_mem = get_trace_imc_event_base_addr();
1220 ldbar_value = ((u64)local_mem & THREAD_IMC_LDBAR_MASK) | TRACE_IMC_ENABLE;
1221
1222 if (core_imc_refc)
1223 ref = &core_imc_refc[core_id];
1224 if (!ref) {
1225
1226 if (trace_imc_refc)
1227 ref = &trace_imc_refc[core_id];
1228 if (!ref)
1229 return -EINVAL;
1230 }
1231 mtspr(SPRN_LDBAR, ldbar_value);
1232 mutex_lock(&ref->lock);
1233 if (ref->refc == 0) {
1234 if (opal_imc_counters_start(OPAL_IMC_COUNTERS_TRACE,
1235 get_hard_smp_processor_id(smp_processor_id()))) {
1236 mutex_unlock(&ref->lock);
1237 pr_err("trace-imc: Unable to start the counters for core %d\n", core_id);
1238 mtspr(SPRN_LDBAR, 0);
1239 return -EINVAL;
1240 }
1241 }
1242 ++ref->refc;
1243 mutex_unlock(&ref->lock);
1244
1245 return 0;
1246}
1247
1248static void trace_imc_event_read(struct perf_event *event)
1249{
1250 return;
1251}
1252
1253static void trace_imc_event_stop(struct perf_event *event, int flags)
1254{
1255 u64 local_mem = get_trace_imc_event_base_addr();
1256 dump_trace_imc_data(event);
1257 memset((void *)local_mem, 0, sizeof(u64));
1258}
1259
1260static void trace_imc_event_start(struct perf_event *event, int flags)
1261{
1262 return;
1263}
1264
1265static void trace_imc_event_del(struct perf_event *event, int flags)
1266{
1267 int core_id = smp_processor_id() / threads_per_core;
1268 struct imc_pmu_ref *ref = NULL;
1269
1270 if (core_imc_refc)
1271 ref = &core_imc_refc[core_id];
1272 if (!ref) {
1273
1274 if (trace_imc_refc)
1275 ref = &trace_imc_refc[core_id];
1276 if (!ref)
1277 return;
1278 }
1279 mtspr(SPRN_LDBAR, 0);
1280 mutex_lock(&ref->lock);
1281 ref->refc--;
1282 if (ref->refc == 0) {
1283 if (opal_imc_counters_stop(OPAL_IMC_COUNTERS_TRACE,
1284 get_hard_smp_processor_id(smp_processor_id()))) {
1285 mutex_unlock(&ref->lock);
1286 pr_err("trace-imc: Unable to stop the counters for core %d\n", core_id);
1287 return;
1288 }
1289 } else if (ref->refc < 0) {
1290 ref->refc = 0;
1291 }
1292 mutex_unlock(&ref->lock);
1293 trace_imc_event_stop(event, flags);
1294}
1295
1296static int trace_imc_event_init(struct perf_event *event)
1297{
1298 struct task_struct *target;
1299
1300 if (event->attr.type != event->pmu->type)
1301 return -ENOENT;
1302
1303 if (!capable(CAP_SYS_ADMIN))
1304 return -EACCES;
1305
1306
1307 if (event->attr.sample_period == 0)
1308 return -ENOENT;
1309
1310 event->hw.idx = -1;
1311 target = event->hw.target;
1312
1313 event->pmu->task_ctx_nr = perf_hw_context;
1314 return 0;
1315}
1316
1317
1318static int update_pmu_ops(struct imc_pmu *pmu)
1319{
1320 pmu->pmu.task_ctx_nr = perf_invalid_context;
1321 pmu->pmu.add = imc_event_add;
1322 pmu->pmu.del = imc_event_stop;
1323 pmu->pmu.start = imc_event_start;
1324 pmu->pmu.stop = imc_event_stop;
1325 pmu->pmu.read = imc_event_update;
1326 pmu->pmu.attr_groups = pmu->attr_groups;
1327 pmu->pmu.capabilities = PERF_PMU_CAP_NO_EXCLUDE;
1328 pmu->attr_groups[IMC_FORMAT_ATTR] = &imc_format_group;
1329
1330 switch (pmu->domain) {
1331 case IMC_DOMAIN_NEST:
1332 pmu->pmu.event_init = nest_imc_event_init;
1333 pmu->attr_groups[IMC_CPUMASK_ATTR] = &imc_pmu_cpumask_attr_group;
1334 break;
1335 case IMC_DOMAIN_CORE:
1336 pmu->pmu.event_init = core_imc_event_init;
1337 pmu->attr_groups[IMC_CPUMASK_ATTR] = &imc_pmu_cpumask_attr_group;
1338 break;
1339 case IMC_DOMAIN_THREAD:
1340 pmu->pmu.event_init = thread_imc_event_init;
1341 pmu->pmu.add = thread_imc_event_add;
1342 pmu->pmu.del = thread_imc_event_del;
1343 pmu->pmu.start_txn = thread_imc_pmu_start_txn;
1344 pmu->pmu.cancel_txn = thread_imc_pmu_cancel_txn;
1345 pmu->pmu.commit_txn = thread_imc_pmu_commit_txn;
1346 break;
1347 case IMC_DOMAIN_TRACE:
1348 pmu->pmu.event_init = trace_imc_event_init;
1349 pmu->pmu.add = trace_imc_event_add;
1350 pmu->pmu.del = trace_imc_event_del;
1351 pmu->pmu.start = trace_imc_event_start;
1352 pmu->pmu.stop = trace_imc_event_stop;
1353 pmu->pmu.read = trace_imc_event_read;
1354 pmu->attr_groups[IMC_FORMAT_ATTR] = &trace_imc_format_group;
1355 default:
1356 break;
1357 }
1358
1359 return 0;
1360}
1361
1362
1363static int init_nest_pmu_ref(void)
1364{
1365 int nid, i, cpu;
1366
1367 nest_imc_refc = kcalloc(num_possible_nodes(), sizeof(*nest_imc_refc),
1368 GFP_KERNEL);
1369
1370 if (!nest_imc_refc)
1371 return -ENOMEM;
1372
1373 i = 0;
1374 for_each_node(nid) {
1375
1376
1377
1378
1379 mutex_init(&nest_imc_refc[i].lock);
1380
1381
1382
1383
1384
1385
1386
1387 nest_imc_refc[i++].id = nid;
1388 }
1389
1390
1391
1392
1393
1394 for_each_possible_cpu(cpu) {
1395 nid = cpu_to_node(cpu);
1396 for (i = 0; i < num_possible_nodes(); i++) {
1397 if (nest_imc_refc[i].id == nid) {
1398 per_cpu(local_nest_imc_refc, cpu) = &nest_imc_refc[i];
1399 break;
1400 }
1401 }
1402 }
1403 return 0;
1404}
1405
1406static void cleanup_all_core_imc_memory(void)
1407{
1408 int i, nr_cores = DIV_ROUND_UP(num_possible_cpus(), threads_per_core);
1409 struct imc_mem_info *ptr = core_imc_pmu->mem_info;
1410 int size = core_imc_pmu->counter_mem_size;
1411
1412
1413 for (i = 0; i < nr_cores; i++) {
1414 if (ptr[i].vbase)
1415 free_pages((u64)ptr[i].vbase, get_order(size));
1416 }
1417
1418 kfree(ptr);
1419 kfree(core_imc_refc);
1420}
1421
1422static void thread_imc_ldbar_disable(void *dummy)
1423{
1424
1425
1426
1427
1428 mtspr(SPRN_LDBAR, 0);
1429}
1430
1431void thread_imc_disable(void)
1432{
1433 on_each_cpu(thread_imc_ldbar_disable, NULL, 1);
1434}
1435
1436static void cleanup_all_thread_imc_memory(void)
1437{
1438 int i, order = get_order(thread_imc_mem_size);
1439
1440 for_each_online_cpu(i) {
1441 if (per_cpu(thread_imc_mem, i))
1442 free_pages((u64)per_cpu(thread_imc_mem, i), order);
1443
1444 }
1445}
1446
1447static void cleanup_all_trace_imc_memory(void)
1448{
1449 int i, order = get_order(trace_imc_mem_size);
1450
1451 for_each_online_cpu(i) {
1452 if (per_cpu(trace_imc_mem, i))
1453 free_pages((u64)per_cpu(trace_imc_mem, i), order);
1454
1455 }
1456 kfree(trace_imc_refc);
1457}
1458
1459
1460static void imc_common_mem_free(struct imc_pmu *pmu_ptr)
1461{
1462 if (pmu_ptr->attr_groups[IMC_EVENT_ATTR])
1463 kfree(pmu_ptr->attr_groups[IMC_EVENT_ATTR]->attrs);
1464 kfree(pmu_ptr->attr_groups[IMC_EVENT_ATTR]);
1465}
1466
1467
1468
1469
1470
1471
1472
1473static void imc_common_cpuhp_mem_free(struct imc_pmu *pmu_ptr)
1474{
1475 if (pmu_ptr->domain == IMC_DOMAIN_NEST) {
1476 mutex_lock(&nest_init_lock);
1477 if (nest_pmus == 1) {
1478 cpuhp_remove_state(CPUHP_AP_PERF_POWERPC_NEST_IMC_ONLINE);
1479 kfree(nest_imc_refc);
1480 kfree(per_nest_pmu_arr);
1481 per_nest_pmu_arr = NULL;
1482 }
1483
1484 if (nest_pmus > 0)
1485 nest_pmus--;
1486 mutex_unlock(&nest_init_lock);
1487 }
1488
1489
1490 if (pmu_ptr->domain == IMC_DOMAIN_CORE) {
1491 cpuhp_remove_state(CPUHP_AP_PERF_POWERPC_CORE_IMC_ONLINE);
1492 cleanup_all_core_imc_memory();
1493 }
1494
1495
1496 if (pmu_ptr->domain == IMC_DOMAIN_THREAD) {
1497 cpuhp_remove_state(CPUHP_AP_PERF_POWERPC_THREAD_IMC_ONLINE);
1498 cleanup_all_thread_imc_memory();
1499 }
1500
1501 if (pmu_ptr->domain == IMC_DOMAIN_TRACE) {
1502 cpuhp_remove_state(CPUHP_AP_PERF_POWERPC_TRACE_IMC_ONLINE);
1503 cleanup_all_trace_imc_memory();
1504 }
1505}
1506
1507
1508
1509
1510
1511void unregister_thread_imc(void)
1512{
1513 imc_common_cpuhp_mem_free(thread_imc_pmu);
1514 imc_common_mem_free(thread_imc_pmu);
1515 perf_pmu_unregister(&thread_imc_pmu->pmu);
1516}
1517
1518
1519
1520
1521static int imc_mem_init(struct imc_pmu *pmu_ptr, struct device_node *parent,
1522 int pmu_index)
1523{
1524 const char *s;
1525 int nr_cores, cpu, res = -ENOMEM;
1526
1527 if (of_property_read_string(parent, "name", &s))
1528 return -ENODEV;
1529
1530 switch (pmu_ptr->domain) {
1531 case IMC_DOMAIN_NEST:
1532
1533 pmu_ptr->pmu.name = kasprintf(GFP_KERNEL, "%s%s_imc", "nest_", s);
1534 if (!pmu_ptr->pmu.name)
1535 goto err;
1536
1537
1538 if (!per_nest_pmu_arr) {
1539 per_nest_pmu_arr = kcalloc(get_max_nest_dev() + 1,
1540 sizeof(struct imc_pmu *),
1541 GFP_KERNEL);
1542 if (!per_nest_pmu_arr)
1543 goto err;
1544 }
1545 per_nest_pmu_arr[pmu_index] = pmu_ptr;
1546 break;
1547 case IMC_DOMAIN_CORE:
1548
1549 pmu_ptr->pmu.name = kasprintf(GFP_KERNEL, "%s%s", s, "_imc");
1550 if (!pmu_ptr->pmu.name)
1551 goto err;
1552
1553 nr_cores = DIV_ROUND_UP(num_possible_cpus(), threads_per_core);
1554 pmu_ptr->mem_info = kcalloc(nr_cores, sizeof(struct imc_mem_info),
1555 GFP_KERNEL);
1556
1557 if (!pmu_ptr->mem_info)
1558 goto err;
1559
1560 core_imc_refc = kcalloc(nr_cores, sizeof(struct imc_pmu_ref),
1561 GFP_KERNEL);
1562
1563 if (!core_imc_refc) {
1564 kfree(pmu_ptr->mem_info);
1565 goto err;
1566 }
1567
1568 core_imc_pmu = pmu_ptr;
1569 break;
1570 case IMC_DOMAIN_THREAD:
1571
1572 pmu_ptr->pmu.name = kasprintf(GFP_KERNEL, "%s%s", s, "_imc");
1573 if (!pmu_ptr->pmu.name)
1574 goto err;
1575
1576 thread_imc_mem_size = pmu_ptr->counter_mem_size;
1577 for_each_online_cpu(cpu) {
1578 res = thread_imc_mem_alloc(cpu, pmu_ptr->counter_mem_size);
1579 if (res) {
1580 cleanup_all_thread_imc_memory();
1581 goto err;
1582 }
1583 }
1584
1585 thread_imc_pmu = pmu_ptr;
1586 break;
1587 case IMC_DOMAIN_TRACE:
1588
1589 pmu_ptr->pmu.name = kasprintf(GFP_KERNEL, "%s%s", s, "_imc");
1590 if (!pmu_ptr->pmu.name)
1591 return -ENOMEM;
1592
1593 nr_cores = DIV_ROUND_UP(num_possible_cpus(), threads_per_core);
1594 trace_imc_refc = kcalloc(nr_cores, sizeof(struct imc_pmu_ref),
1595 GFP_KERNEL);
1596 if (!trace_imc_refc)
1597 return -ENOMEM;
1598
1599 trace_imc_mem_size = pmu_ptr->counter_mem_size;
1600 for_each_online_cpu(cpu) {
1601 res = trace_imc_mem_alloc(cpu, trace_imc_mem_size);
1602 if (res) {
1603 cleanup_all_trace_imc_memory();
1604 goto err;
1605 }
1606 }
1607 break;
1608 default:
1609 return -EINVAL;
1610 }
1611
1612 return 0;
1613err:
1614 return res;
1615}
1616
1617
1618
1619
1620
1621
1622
1623
1624
1625
1626
1627int init_imc_pmu(struct device_node *parent, struct imc_pmu *pmu_ptr, int pmu_idx)
1628{
1629 int ret;
1630
1631 ret = imc_mem_init(pmu_ptr, parent, pmu_idx);
1632 if (ret)
1633 goto err_free_mem;
1634
1635 switch (pmu_ptr->domain) {
1636 case IMC_DOMAIN_NEST:
1637
1638
1639
1640
1641
1642
1643 mutex_lock(&nest_init_lock);
1644 if (nest_pmus == 0) {
1645 ret = init_nest_pmu_ref();
1646 if (ret) {
1647 mutex_unlock(&nest_init_lock);
1648 kfree(per_nest_pmu_arr);
1649 per_nest_pmu_arr = NULL;
1650 goto err_free_mem;
1651 }
1652
1653 ret = nest_pmu_cpumask_init();
1654 if (ret) {
1655 mutex_unlock(&nest_init_lock);
1656 kfree(nest_imc_refc);
1657 kfree(per_nest_pmu_arr);
1658 per_nest_pmu_arr = NULL;
1659 goto err_free_mem;
1660 }
1661 }
1662 nest_pmus++;
1663 mutex_unlock(&nest_init_lock);
1664 break;
1665 case IMC_DOMAIN_CORE:
1666 ret = core_imc_pmu_cpumask_init();
1667 if (ret) {
1668 cleanup_all_core_imc_memory();
1669 goto err_free_mem;
1670 }
1671
1672 break;
1673 case IMC_DOMAIN_THREAD:
1674 ret = thread_imc_cpu_init();
1675 if (ret) {
1676 cleanup_all_thread_imc_memory();
1677 goto err_free_mem;
1678 }
1679
1680 break;
1681 case IMC_DOMAIN_TRACE:
1682 ret = trace_imc_cpu_init();
1683 if (ret) {
1684 cleanup_all_trace_imc_memory();
1685 goto err_free_mem;
1686 }
1687
1688 break;
1689 default:
1690 return -EINVAL;
1691 }
1692
1693 ret = update_events_in_group(parent, pmu_ptr);
1694 if (ret)
1695 goto err_free_cpuhp_mem;
1696
1697 ret = update_pmu_ops(pmu_ptr);
1698 if (ret)
1699 goto err_free_cpuhp_mem;
1700
1701 ret = perf_pmu_register(&pmu_ptr->pmu, pmu_ptr->pmu.name, -1);
1702 if (ret)
1703 goto err_free_cpuhp_mem;
1704
1705 pr_debug("%s performance monitor hardware support registered\n",
1706 pmu_ptr->pmu.name);
1707
1708 return 0;
1709
1710err_free_cpuhp_mem:
1711 imc_common_cpuhp_mem_free(pmu_ptr);
1712err_free_mem:
1713 imc_common_mem_free(pmu_ptr);
1714 return ret;
1715}
1716