1
2
3
4
5
6
7
8
9#include <linux/perf_event.h>
10#include <linux/slab.h>
11#include <asm/opal.h>
12#include <asm/imc-pmu.h>
13#include <asm/cputhreads.h>
14#include <asm/smp.h>
15#include <linux/string.h>
16
17
18
19
20
21
22
23static DEFINE_MUTEX(nest_init_lock);
24static DEFINE_PER_CPU(struct imc_pmu_ref *, local_nest_imc_refc);
25static struct imc_pmu **per_nest_pmu_arr;
26static cpumask_t nest_imc_cpumask;
27static struct imc_pmu_ref *nest_imc_refc;
28static int nest_pmus;
29
30
31
32static cpumask_t core_imc_cpumask;
33static struct imc_pmu_ref *core_imc_refc;
34static struct imc_pmu *core_imc_pmu;
35
36
37
38static DEFINE_PER_CPU(u64 *, thread_imc_mem);
39static struct imc_pmu *thread_imc_pmu;
40static int thread_imc_mem_size;
41
42
43static DEFINE_PER_CPU(u64 *, trace_imc_mem);
44static struct imc_pmu_ref *trace_imc_refc;
45static int trace_imc_mem_size;
46
47static struct imc_pmu *imc_event_to_pmu(struct perf_event *event)
48{
49 return container_of(event->pmu, struct imc_pmu, pmu);
50}
51
52PMU_FORMAT_ATTR(event, "config:0-61");
53PMU_FORMAT_ATTR(offset, "config:0-31");
54PMU_FORMAT_ATTR(rvalue, "config:32");
55PMU_FORMAT_ATTR(mode, "config:33-40");
56static struct attribute *imc_format_attrs[] = {
57 &format_attr_event.attr,
58 &format_attr_offset.attr,
59 &format_attr_rvalue.attr,
60 &format_attr_mode.attr,
61 NULL,
62};
63
64static struct attribute_group imc_format_group = {
65 .name = "format",
66 .attrs = imc_format_attrs,
67};
68
69
70PMU_FORMAT_ATTR(cpmc_reserved, "config:0-19");
71PMU_FORMAT_ATTR(cpmc_event, "config:20-27");
72PMU_FORMAT_ATTR(cpmc_samplesel, "config:28-29");
73PMU_FORMAT_ATTR(cpmc_load, "config:30-61");
74static struct attribute *trace_imc_format_attrs[] = {
75 &format_attr_event.attr,
76 &format_attr_cpmc_reserved.attr,
77 &format_attr_cpmc_event.attr,
78 &format_attr_cpmc_samplesel.attr,
79 &format_attr_cpmc_load.attr,
80 NULL,
81};
82
83static struct attribute_group trace_imc_format_group = {
84.name = "format",
85.attrs = trace_imc_format_attrs,
86};
87
88
89static ssize_t imc_pmu_cpumask_get_attr(struct device *dev,
90 struct device_attribute *attr,
91 char *buf)
92{
93 struct pmu *pmu = dev_get_drvdata(dev);
94 struct imc_pmu *imc_pmu = container_of(pmu, struct imc_pmu, pmu);
95 cpumask_t *active_mask;
96
97 switch(imc_pmu->domain){
98 case IMC_DOMAIN_NEST:
99 active_mask = &nest_imc_cpumask;
100 break;
101 case IMC_DOMAIN_CORE:
102 active_mask = &core_imc_cpumask;
103 break;
104 default:
105 return 0;
106 }
107
108 return cpumap_print_to_pagebuf(true, buf, active_mask);
109}
110
111static DEVICE_ATTR(cpumask, S_IRUGO, imc_pmu_cpumask_get_attr, NULL);
112
113static struct attribute *imc_pmu_cpumask_attrs[] = {
114 &dev_attr_cpumask.attr,
115 NULL,
116};
117
118static struct attribute_group imc_pmu_cpumask_attr_group = {
119 .attrs = imc_pmu_cpumask_attrs,
120};
121
122
123static struct attribute *device_str_attr_create(const char *name, const char *str)
124{
125 struct perf_pmu_events_attr *attr;
126
127 attr = kzalloc(sizeof(*attr), GFP_KERNEL);
128 if (!attr)
129 return NULL;
130 sysfs_attr_init(&attr->attr.attr);
131
132 attr->event_str = str;
133 attr->attr.attr.name = name;
134 attr->attr.attr.mode = 0444;
135 attr->attr.show = perf_event_sysfs_show;
136
137 return &attr->attr.attr;
138}
139
140static int imc_parse_event(struct device_node *np, const char *scale,
141 const char *unit, const char *prefix,
142 u32 base, struct imc_events *event)
143{
144 const char *s;
145 u32 reg;
146
147 if (of_property_read_u32(np, "reg", ®))
148 goto error;
149
150 event->value = base + reg;
151
152 if (of_property_read_string(np, "event-name", &s))
153 goto error;
154
155 event->name = kasprintf(GFP_KERNEL, "%s%s", prefix, s);
156 if (!event->name)
157 goto error;
158
159 if (of_property_read_string(np, "scale", &s))
160 s = scale;
161
162 if (s) {
163 event->scale = kstrdup(s, GFP_KERNEL);
164 if (!event->scale)
165 goto error;
166 }
167
168 if (of_property_read_string(np, "unit", &s))
169 s = unit;
170
171 if (s) {
172 event->unit = kstrdup(s, GFP_KERNEL);
173 if (!event->unit)
174 goto error;
175 }
176
177 return 0;
178error:
179 kfree(event->unit);
180 kfree(event->scale);
181 kfree(event->name);
182 return -EINVAL;
183}
184
185
186
187
188
189static void imc_free_events(struct imc_events *events, int nr_entries)
190{
191 int i;
192
193
194 if (!events)
195 return;
196 for (i = 0; i < nr_entries; i++) {
197 kfree(events[i].unit);
198 kfree(events[i].scale);
199 kfree(events[i].name);
200 }
201
202 kfree(events);
203}
204
205
206
207
208
209static int update_events_in_group(struct device_node *node, struct imc_pmu *pmu)
210{
211 struct attribute_group *attr_group;
212 struct attribute **attrs, *dev_str;
213 struct device_node *np, *pmu_events;
214 u32 handle, base_reg;
215 int i = 0, j = 0, ct, ret;
216 const char *prefix, *g_scale, *g_unit;
217 const char *ev_val_str, *ev_scale_str, *ev_unit_str;
218
219 if (!of_property_read_u32(node, "events", &handle))
220 pmu_events = of_find_node_by_phandle(handle);
221 else
222 return 0;
223
224
225 if (!pmu_events)
226 return 0;
227
228
229 ct = of_get_child_count(pmu_events);
230
231
232 if (of_property_read_string(node, "events-prefix", &prefix))
233 return 0;
234
235
236 if (of_property_read_string(node, "scale", &g_scale))
237 g_scale = NULL;
238
239 if (of_property_read_string(node, "unit", &g_unit))
240 g_unit = NULL;
241
242
243 of_property_read_u32(node, "reg", &base_reg);
244
245
246 pmu->events = kcalloc(ct, sizeof(struct imc_events), GFP_KERNEL);
247 if (!pmu->events)
248 return -ENOMEM;
249
250 ct = 0;
251
252 for_each_child_of_node(pmu_events, np) {
253 ret = imc_parse_event(np, g_scale, g_unit, prefix, base_reg, &pmu->events[ct]);
254 if (!ret)
255 ct++;
256 }
257
258
259 attr_group = kzalloc(sizeof(*attr_group), GFP_KERNEL);
260 if (!attr_group) {
261 imc_free_events(pmu->events, ct);
262 return -ENOMEM;
263 }
264
265
266
267
268
269
270
271
272
273 attrs = kcalloc(((ct * 3) + 1), sizeof(struct attribute *), GFP_KERNEL);
274 if (!attrs) {
275 kfree(attr_group);
276 imc_free_events(pmu->events, ct);
277 return -ENOMEM;
278 }
279
280 attr_group->name = "events";
281 attr_group->attrs = attrs;
282 do {
283 ev_val_str = kasprintf(GFP_KERNEL, "event=0x%x", pmu->events[i].value);
284 dev_str = device_str_attr_create(pmu->events[i].name, ev_val_str);
285 if (!dev_str)
286 continue;
287
288 attrs[j++] = dev_str;
289 if (pmu->events[i].scale) {
290 ev_scale_str = kasprintf(GFP_KERNEL, "%s.scale", pmu->events[i].name);
291 dev_str = device_str_attr_create(ev_scale_str, pmu->events[i].scale);
292 if (!dev_str)
293 continue;
294
295 attrs[j++] = dev_str;
296 }
297
298 if (pmu->events[i].unit) {
299 ev_unit_str = kasprintf(GFP_KERNEL, "%s.unit", pmu->events[i].name);
300 dev_str = device_str_attr_create(ev_unit_str, pmu->events[i].unit);
301 if (!dev_str)
302 continue;
303
304 attrs[j++] = dev_str;
305 }
306 } while (++i < ct);
307
308
309 pmu->attr_groups[IMC_EVENT_ATTR] = attr_group;
310
311 return 0;
312}
313
314
315static struct imc_pmu_ref *get_nest_pmu_ref(int cpu)
316{
317 return per_cpu(local_nest_imc_refc, cpu);
318}
319
320static void nest_change_cpu_context(int old_cpu, int new_cpu)
321{
322 struct imc_pmu **pn = per_nest_pmu_arr;
323
324 if (old_cpu < 0 || new_cpu < 0)
325 return;
326
327 while (*pn) {
328 perf_pmu_migrate_context(&(*pn)->pmu, old_cpu, new_cpu);
329 pn++;
330 }
331}
332
333static int ppc_nest_imc_cpu_offline(unsigned int cpu)
334{
335 int nid, target = -1;
336 const struct cpumask *l_cpumask;
337 struct imc_pmu_ref *ref;
338
339
340
341
342
343 if (!cpumask_test_and_clear_cpu(cpu, &nest_imc_cpumask))
344 return 0;
345
346
347
348
349
350
351
352
353
354
355
356 if (!nest_pmus)
357 return 0;
358
359
360
361
362
363 nid = cpu_to_node(cpu);
364 l_cpumask = cpumask_of_node(nid);
365 target = cpumask_any_but(l_cpumask, cpu);
366
367
368
369
370
371 if (target >= 0 && target < nr_cpu_ids) {
372 cpumask_set_cpu(target, &nest_imc_cpumask);
373 nest_change_cpu_context(cpu, target);
374 } else {
375 opal_imc_counters_stop(OPAL_IMC_COUNTERS_NEST,
376 get_hard_smp_processor_id(cpu));
377
378
379
380
381 ref = get_nest_pmu_ref(cpu);
382 if (!ref)
383 return -EINVAL;
384
385 ref->refc = 0;
386 }
387 return 0;
388}
389
390static int ppc_nest_imc_cpu_online(unsigned int cpu)
391{
392 const struct cpumask *l_cpumask;
393 static struct cpumask tmp_mask;
394 int res;
395
396
397 l_cpumask = cpumask_of_node(cpu_to_node(cpu));
398
399
400
401
402
403 if (cpumask_and(&tmp_mask, l_cpumask, &nest_imc_cpumask))
404 return 0;
405
406
407
408
409
410 res = opal_imc_counters_stop(OPAL_IMC_COUNTERS_NEST,
411 get_hard_smp_processor_id(cpu));
412 if (res)
413 return res;
414
415
416 cpumask_set_cpu(cpu, &nest_imc_cpumask);
417 return 0;
418}
419
420static int nest_pmu_cpumask_init(void)
421{
422 return cpuhp_setup_state(CPUHP_AP_PERF_POWERPC_NEST_IMC_ONLINE,
423 "perf/powerpc/imc:online",
424 ppc_nest_imc_cpu_online,
425 ppc_nest_imc_cpu_offline);
426}
427
428static void nest_imc_counters_release(struct perf_event *event)
429{
430 int rc, node_id;
431 struct imc_pmu_ref *ref;
432
433 if (event->cpu < 0)
434 return;
435
436 node_id = cpu_to_node(event->cpu);
437
438
439
440
441
442
443
444 ref = get_nest_pmu_ref(event->cpu);
445 if (!ref)
446 return;
447
448
449 mutex_lock(&ref->lock);
450 if (ref->refc == 0) {
451
452
453
454
455
456
457
458
459
460
461 mutex_unlock(&ref->lock);
462 return;
463 }
464 ref->refc--;
465 if (ref->refc == 0) {
466 rc = opal_imc_counters_stop(OPAL_IMC_COUNTERS_NEST,
467 get_hard_smp_processor_id(event->cpu));
468 if (rc) {
469 mutex_unlock(&ref->lock);
470 pr_err("nest-imc: Unable to stop the counters for core %d\n", node_id);
471 return;
472 }
473 } else if (ref->refc < 0) {
474 WARN(1, "nest-imc: Invalid event reference count\n");
475 ref->refc = 0;
476 }
477 mutex_unlock(&ref->lock);
478}
479
480static int nest_imc_event_init(struct perf_event *event)
481{
482 int chip_id, rc, node_id;
483 u32 l_config, config = event->attr.config;
484 struct imc_mem_info *pcni;
485 struct imc_pmu *pmu;
486 struct imc_pmu_ref *ref;
487 bool flag = false;
488
489 if (event->attr.type != event->pmu->type)
490 return -ENOENT;
491
492
493 if (event->hw.sample_period)
494 return -EINVAL;
495
496 if (event->cpu < 0)
497 return -EINVAL;
498
499 pmu = imc_event_to_pmu(event);
500
501
502 if ((config & IMC_EVENT_OFFSET_MASK) > pmu->counter_mem_size)
503 return -EINVAL;
504
505
506
507
508
509 chip_id = cpu_to_chip_id(event->cpu);
510
511
512 if (chip_id < 0)
513 return -ENODEV;
514
515 pcni = pmu->mem_info;
516 do {
517 if (pcni->id == chip_id) {
518 flag = true;
519 break;
520 }
521 pcni++;
522 } while (pcni->vbase != 0);
523
524 if (!flag)
525 return -ENODEV;
526
527
528
529
530 l_config = config & IMC_EVENT_OFFSET_MASK;
531 event->hw.event_base = (u64)pcni->vbase + l_config;
532 node_id = cpu_to_node(event->cpu);
533
534
535
536
537
538
539 ref = get_nest_pmu_ref(event->cpu);
540 if (!ref)
541 return -EINVAL;
542
543 mutex_lock(&ref->lock);
544 if (ref->refc == 0) {
545 rc = opal_imc_counters_start(OPAL_IMC_COUNTERS_NEST,
546 get_hard_smp_processor_id(event->cpu));
547 if (rc) {
548 mutex_unlock(&ref->lock);
549 pr_err("nest-imc: Unable to start the counters for node %d\n",
550 node_id);
551 return rc;
552 }
553 }
554 ++ref->refc;
555 mutex_unlock(&ref->lock);
556
557 event->destroy = nest_imc_counters_release;
558 return 0;
559}
560
561
562
563
564
565
566
567
568
569static int core_imc_mem_init(int cpu, int size)
570{
571 int nid, rc = 0, core_id = (cpu / threads_per_core);
572 struct imc_mem_info *mem_info;
573
574
575
576
577
578 nid = cpu_to_node(cpu);
579 mem_info = &core_imc_pmu->mem_info[core_id];
580 mem_info->id = core_id;
581
582
583 mem_info->vbase = page_address(alloc_pages_node(nid,
584 GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE |
585 __GFP_NOWARN, get_order(size)));
586 if (!mem_info->vbase)
587 return -ENOMEM;
588
589
590 core_imc_refc[core_id].id = core_id;
591 mutex_init(&core_imc_refc[core_id].lock);
592
593 rc = opal_imc_counters_init(OPAL_IMC_COUNTERS_CORE,
594 __pa((void *)mem_info->vbase),
595 get_hard_smp_processor_id(cpu));
596 if (rc) {
597 free_pages((u64)mem_info->vbase, get_order(size));
598 mem_info->vbase = NULL;
599 }
600
601 return rc;
602}
603
604static bool is_core_imc_mem_inited(int cpu)
605{
606 struct imc_mem_info *mem_info;
607 int core_id = (cpu / threads_per_core);
608
609 mem_info = &core_imc_pmu->mem_info[core_id];
610 if (!mem_info->vbase)
611 return false;
612
613 return true;
614}
615
616static int ppc_core_imc_cpu_online(unsigned int cpu)
617{
618 const struct cpumask *l_cpumask;
619 static struct cpumask tmp_mask;
620 int ret = 0;
621
622
623 l_cpumask = cpu_sibling_mask(cpu);
624
625
626 if (cpumask_and(&tmp_mask, l_cpumask, &core_imc_cpumask))
627 return 0;
628
629 if (!is_core_imc_mem_inited(cpu)) {
630 ret = core_imc_mem_init(cpu, core_imc_pmu->counter_mem_size);
631 if (ret) {
632 pr_info("core_imc memory allocation for cpu %d failed\n", cpu);
633 return ret;
634 }
635 }
636
637
638 cpumask_set_cpu(cpu, &core_imc_cpumask);
639 return 0;
640}
641
642static int ppc_core_imc_cpu_offline(unsigned int cpu)
643{
644 unsigned int core_id;
645 int ncpu;
646 struct imc_pmu_ref *ref;
647
648
649
650
651
652 if (!cpumask_test_and_clear_cpu(cpu, &core_imc_cpumask))
653 return 0;
654
655
656
657
658
659
660
661
662
663
664
665
666 if (!core_imc_pmu->pmu.event_init)
667 return 0;
668
669
670 ncpu = cpumask_any_but(cpu_sibling_mask(cpu), cpu);
671
672 if (ncpu >= 0 && ncpu < nr_cpu_ids) {
673 cpumask_set_cpu(ncpu, &core_imc_cpumask);
674 perf_pmu_migrate_context(&core_imc_pmu->pmu, cpu, ncpu);
675 } else {
676
677
678
679
680
681 opal_imc_counters_stop(OPAL_IMC_COUNTERS_CORE,
682 get_hard_smp_processor_id(cpu));
683 core_id = cpu / threads_per_core;
684 ref = &core_imc_refc[core_id];
685 if (!ref)
686 return -EINVAL;
687
688 ref->refc = 0;
689 }
690 return 0;
691}
692
693static int core_imc_pmu_cpumask_init(void)
694{
695 return cpuhp_setup_state(CPUHP_AP_PERF_POWERPC_CORE_IMC_ONLINE,
696 "perf/powerpc/imc_core:online",
697 ppc_core_imc_cpu_online,
698 ppc_core_imc_cpu_offline);
699}
700
701static void core_imc_counters_release(struct perf_event *event)
702{
703 int rc, core_id;
704 struct imc_pmu_ref *ref;
705
706 if (event->cpu < 0)
707 return;
708
709
710
711
712
713
714 core_id = event->cpu / threads_per_core;
715
716
717 ref = &core_imc_refc[core_id];
718 if (!ref)
719 return;
720
721 mutex_lock(&ref->lock);
722 if (ref->refc == 0) {
723
724
725
726
727
728
729
730
731
732
733 mutex_unlock(&ref->lock);
734 return;
735 }
736 ref->refc--;
737 if (ref->refc == 0) {
738 rc = opal_imc_counters_stop(OPAL_IMC_COUNTERS_CORE,
739 get_hard_smp_processor_id(event->cpu));
740 if (rc) {
741 mutex_unlock(&ref->lock);
742 pr_err("IMC: Unable to stop the counters for core %d\n", core_id);
743 return;
744 }
745 } else if (ref->refc < 0) {
746 WARN(1, "core-imc: Invalid event reference count\n");
747 ref->refc = 0;
748 }
749 mutex_unlock(&ref->lock);
750}
751
752static int core_imc_event_init(struct perf_event *event)
753{
754 int core_id, rc;
755 u64 config = event->attr.config;
756 struct imc_mem_info *pcmi;
757 struct imc_pmu *pmu;
758 struct imc_pmu_ref *ref;
759
760 if (event->attr.type != event->pmu->type)
761 return -ENOENT;
762
763
764 if (event->hw.sample_period)
765 return -EINVAL;
766
767 if (event->cpu < 0)
768 return -EINVAL;
769
770 event->hw.idx = -1;
771 pmu = imc_event_to_pmu(event);
772
773
774 if (((config & IMC_EVENT_OFFSET_MASK) > pmu->counter_mem_size))
775 return -EINVAL;
776
777 if (!is_core_imc_mem_inited(event->cpu))
778 return -ENODEV;
779
780 core_id = event->cpu / threads_per_core;
781 pcmi = &core_imc_pmu->mem_info[core_id];
782 if ((!pcmi->vbase))
783 return -ENODEV;
784
785
786 ref = &core_imc_refc[core_id];
787 if (!ref)
788 return -EINVAL;
789
790
791
792
793
794
795
796 mutex_lock(&ref->lock);
797 if (ref->refc == 0) {
798 rc = opal_imc_counters_start(OPAL_IMC_COUNTERS_CORE,
799 get_hard_smp_processor_id(event->cpu));
800 if (rc) {
801 mutex_unlock(&ref->lock);
802 pr_err("core-imc: Unable to start the counters for core %d\n",
803 core_id);
804 return rc;
805 }
806 }
807 ++ref->refc;
808 mutex_unlock(&ref->lock);
809
810 event->hw.event_base = (u64)pcmi->vbase + (config & IMC_EVENT_OFFSET_MASK);
811 event->destroy = core_imc_counters_release;
812 return 0;
813}
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836static int thread_imc_mem_alloc(int cpu_id, int size)
837{
838 u64 *local_mem = per_cpu(thread_imc_mem, cpu_id);
839 int nid = cpu_to_node(cpu_id);
840
841 if (!local_mem) {
842
843
844
845
846 local_mem = page_address(alloc_pages_node(nid,
847 GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE |
848 __GFP_NOWARN, get_order(size)));
849 if (!local_mem)
850 return -ENOMEM;
851
852 per_cpu(thread_imc_mem, cpu_id) = local_mem;
853 }
854
855 mtspr(SPRN_LDBAR, 0);
856 return 0;
857}
858
859static int ppc_thread_imc_cpu_online(unsigned int cpu)
860{
861 return thread_imc_mem_alloc(cpu, thread_imc_mem_size);
862}
863
864static int ppc_thread_imc_cpu_offline(unsigned int cpu)
865{
866 mtspr(SPRN_LDBAR, 0);
867 return 0;
868}
869
870static int thread_imc_cpu_init(void)
871{
872 return cpuhp_setup_state(CPUHP_AP_PERF_POWERPC_THREAD_IMC_ONLINE,
873 "perf/powerpc/imc_thread:online",
874 ppc_thread_imc_cpu_online,
875 ppc_thread_imc_cpu_offline);
876}
877
878static int thread_imc_event_init(struct perf_event *event)
879{
880 u32 config = event->attr.config;
881 struct task_struct *target;
882 struct imc_pmu *pmu;
883
884 if (event->attr.type != event->pmu->type)
885 return -ENOENT;
886
887 if (!capable(CAP_SYS_ADMIN))
888 return -EACCES;
889
890
891 if (event->hw.sample_period)
892 return -EINVAL;
893
894 event->hw.idx = -1;
895 pmu = imc_event_to_pmu(event);
896
897
898 if (((config & IMC_EVENT_OFFSET_MASK) > pmu->counter_mem_size))
899 return -EINVAL;
900
901 target = event->hw.target;
902 if (!target)
903 return -EINVAL;
904
905 event->pmu->task_ctx_nr = perf_sw_context;
906 return 0;
907}
908
909static bool is_thread_imc_pmu(struct perf_event *event)
910{
911 if (!strncmp(event->pmu->name, "thread_imc", strlen("thread_imc")))
912 return true;
913
914 return false;
915}
916
917static u64 * get_event_base_addr(struct perf_event *event)
918{
919 u64 addr;
920
921 if (is_thread_imc_pmu(event)) {
922 addr = (u64)per_cpu(thread_imc_mem, smp_processor_id());
923 return (u64 *)(addr + (event->attr.config & IMC_EVENT_OFFSET_MASK));
924 }
925
926 return (u64 *)event->hw.event_base;
927}
928
929static void thread_imc_pmu_start_txn(struct pmu *pmu,
930 unsigned int txn_flags)
931{
932 if (txn_flags & ~PERF_PMU_TXN_ADD)
933 return;
934 perf_pmu_disable(pmu);
935}
936
937static void thread_imc_pmu_cancel_txn(struct pmu *pmu)
938{
939 perf_pmu_enable(pmu);
940}
941
942static int thread_imc_pmu_commit_txn(struct pmu *pmu)
943{
944 perf_pmu_enable(pmu);
945 return 0;
946}
947
948static u64 imc_read_counter(struct perf_event *event)
949{
950 u64 *addr, data;
951
952
953
954
955
956
957
958 addr = get_event_base_addr(event);
959 data = be64_to_cpu(READ_ONCE(*addr));
960 local64_set(&event->hw.prev_count, data);
961
962 return data;
963}
964
965static void imc_event_update(struct perf_event *event)
966{
967 u64 counter_prev, counter_new, final_count;
968
969 counter_prev = local64_read(&event->hw.prev_count);
970 counter_new = imc_read_counter(event);
971 final_count = counter_new - counter_prev;
972
973
974 local64_add(final_count, &event->count);
975}
976
977static void imc_event_start(struct perf_event *event, int flags)
978{
979
980
981
982
983
984
985 imc_read_counter(event);
986}
987
988static void imc_event_stop(struct perf_event *event, int flags)
989{
990
991
992
993
994 imc_event_update(event);
995}
996
997static int imc_event_add(struct perf_event *event, int flags)
998{
999 if (flags & PERF_EF_START)
1000 imc_event_start(event, flags);
1001
1002 return 0;
1003}
1004
1005static int thread_imc_event_add(struct perf_event *event, int flags)
1006{
1007 int core_id;
1008 struct imc_pmu_ref *ref;
1009 u64 ldbar_value, *local_mem = per_cpu(thread_imc_mem, smp_processor_id());
1010
1011 if (flags & PERF_EF_START)
1012 imc_event_start(event, flags);
1013
1014 if (!is_core_imc_mem_inited(smp_processor_id()))
1015 return -EINVAL;
1016
1017 core_id = smp_processor_id() / threads_per_core;
1018 ldbar_value = ((u64)local_mem & THREAD_IMC_LDBAR_MASK) | THREAD_IMC_ENABLE;
1019 mtspr(SPRN_LDBAR, ldbar_value);
1020
1021
1022
1023
1024
1025
1026
1027 ref = &core_imc_refc[core_id];
1028 if (!ref)
1029 return -EINVAL;
1030
1031 mutex_lock(&ref->lock);
1032 if (ref->refc == 0) {
1033 if (opal_imc_counters_start(OPAL_IMC_COUNTERS_CORE,
1034 get_hard_smp_processor_id(smp_processor_id()))) {
1035 mutex_unlock(&ref->lock);
1036 pr_err("thread-imc: Unable to start the counter\
1037 for core %d\n", core_id);
1038 return -EINVAL;
1039 }
1040 }
1041 ++ref->refc;
1042 mutex_unlock(&ref->lock);
1043 return 0;
1044}
1045
1046static void thread_imc_event_del(struct perf_event *event, int flags)
1047{
1048
1049 int core_id;
1050 struct imc_pmu_ref *ref;
1051
1052 mtspr(SPRN_LDBAR, 0);
1053
1054 core_id = smp_processor_id() / threads_per_core;
1055 ref = &core_imc_refc[core_id];
1056
1057 mutex_lock(&ref->lock);
1058 ref->refc--;
1059 if (ref->refc == 0) {
1060 if (opal_imc_counters_stop(OPAL_IMC_COUNTERS_CORE,
1061 get_hard_smp_processor_id(smp_processor_id()))) {
1062 mutex_unlock(&ref->lock);
1063 pr_err("thread-imc: Unable to stop the counters\
1064 for core %d\n", core_id);
1065 return;
1066 }
1067 } else if (ref->refc < 0) {
1068 ref->refc = 0;
1069 }
1070 mutex_unlock(&ref->lock);
1071
1072
1073
1074
1075 imc_event_update(event);
1076}
1077
1078
1079
1080
1081static int trace_imc_mem_alloc(int cpu_id, int size)
1082{
1083 u64 *local_mem = per_cpu(trace_imc_mem, cpu_id);
1084 int phys_id = cpu_to_node(cpu_id), rc = 0;
1085 int core_id = (cpu_id / threads_per_core);
1086
1087 if (!local_mem) {
1088 local_mem = page_address(alloc_pages_node(phys_id,
1089 GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE |
1090 __GFP_NOWARN, get_order(size)));
1091 if (!local_mem)
1092 return -ENOMEM;
1093 per_cpu(trace_imc_mem, cpu_id) = local_mem;
1094
1095
1096 rc = opal_imc_counters_init(OPAL_IMC_COUNTERS_TRACE, __pa((void *)local_mem),
1097 get_hard_smp_processor_id(cpu_id));
1098 if (rc) {
1099 pr_info("IMC:opal init failed for trace imc\n");
1100 return rc;
1101 }
1102 }
1103
1104
1105 trace_imc_refc[core_id].id = core_id;
1106 mutex_init(&trace_imc_refc[core_id].lock);
1107
1108 mtspr(SPRN_LDBAR, 0);
1109 return 0;
1110}
1111
1112static int ppc_trace_imc_cpu_online(unsigned int cpu)
1113{
1114 return trace_imc_mem_alloc(cpu, trace_imc_mem_size);
1115}
1116
1117static int ppc_trace_imc_cpu_offline(unsigned int cpu)
1118{
1119 mtspr(SPRN_LDBAR, 0);
1120 return 0;
1121}
1122
1123static int trace_imc_cpu_init(void)
1124{
1125 return cpuhp_setup_state(CPUHP_AP_PERF_POWERPC_TRACE_IMC_ONLINE,
1126 "perf/powerpc/imc_trace:online",
1127 ppc_trace_imc_cpu_online,
1128 ppc_trace_imc_cpu_offline);
1129}
1130
1131static u64 get_trace_imc_event_base_addr(void)
1132{
1133 return (u64)per_cpu(trace_imc_mem, smp_processor_id());
1134}
1135
1136
1137
1138
1139
1140static int trace_imc_prepare_sample(struct trace_imc_data *mem,
1141 struct perf_sample_data *data,
1142 u64 *prev_tb,
1143 struct perf_event_header *header,
1144 struct perf_event *event)
1145{
1146
1147 if (be64_to_cpu(READ_ONCE(mem->tb1)) > *prev_tb)
1148 *prev_tb = be64_to_cpu(READ_ONCE(mem->tb1));
1149 else
1150 return -EINVAL;
1151
1152 if ((be64_to_cpu(READ_ONCE(mem->tb1)) & IMC_TRACE_RECORD_TB1_MASK) !=
1153 be64_to_cpu(READ_ONCE(mem->tb2)))
1154 return -EINVAL;
1155
1156
1157 data->ip = be64_to_cpu(READ_ONCE(mem->ip));
1158 data->period = event->hw.last_period;
1159
1160 header->type = PERF_RECORD_SAMPLE;
1161 header->size = sizeof(*header) + event->header_size;
1162 header->misc = 0;
1163
1164 if (is_kernel_addr(data->ip))
1165 header->misc |= PERF_RECORD_MISC_KERNEL;
1166 else
1167 header->misc |= PERF_RECORD_MISC_USER;
1168
1169 perf_event_header__init_id(header, data, event);
1170
1171 return 0;
1172}
1173
1174static void dump_trace_imc_data(struct perf_event *event)
1175{
1176 struct trace_imc_data *mem;
1177 int i, ret;
1178 u64 prev_tb = 0;
1179
1180 mem = (struct trace_imc_data *)get_trace_imc_event_base_addr();
1181 for (i = 0; i < (trace_imc_mem_size / sizeof(struct trace_imc_data));
1182 i++, mem++) {
1183 struct perf_sample_data data;
1184 struct perf_event_header header;
1185
1186 ret = trace_imc_prepare_sample(mem, &data, &prev_tb, &header, event);
1187 if (ret)
1188 break;
1189 else {
1190
1191 struct perf_output_handle handle;
1192
1193 if (perf_output_begin(&handle, event, header.size))
1194 return;
1195
1196 perf_output_sample(&handle, &header, &data, event);
1197 perf_output_end(&handle);
1198 }
1199 }
1200}
1201
1202static int trace_imc_event_add(struct perf_event *event, int flags)
1203{
1204 int core_id = smp_processor_id() / threads_per_core;
1205 struct imc_pmu_ref *ref = NULL;
1206 u64 local_mem, ldbar_value;
1207
1208
1209 local_mem = get_trace_imc_event_base_addr();
1210 ldbar_value = ((u64)local_mem & THREAD_IMC_LDBAR_MASK) | TRACE_IMC_ENABLE;
1211
1212 if (core_imc_refc)
1213 ref = &core_imc_refc[core_id];
1214 if (!ref) {
1215
1216 if (trace_imc_refc)
1217 ref = &trace_imc_refc[core_id];
1218 if (!ref)
1219 return -EINVAL;
1220 }
1221 mtspr(SPRN_LDBAR, ldbar_value);
1222 mutex_lock(&ref->lock);
1223 if (ref->refc == 0) {
1224 if (opal_imc_counters_start(OPAL_IMC_COUNTERS_TRACE,
1225 get_hard_smp_processor_id(smp_processor_id()))) {
1226 mutex_unlock(&ref->lock);
1227 pr_err("trace-imc: Unable to start the counters for core %d\n", core_id);
1228 mtspr(SPRN_LDBAR, 0);
1229 return -EINVAL;
1230 }
1231 }
1232 ++ref->refc;
1233 mutex_unlock(&ref->lock);
1234
1235 return 0;
1236}
1237
1238static void trace_imc_event_read(struct perf_event *event)
1239{
1240 return;
1241}
1242
1243static void trace_imc_event_stop(struct perf_event *event, int flags)
1244{
1245 u64 local_mem = get_trace_imc_event_base_addr();
1246 dump_trace_imc_data(event);
1247 memset((void *)local_mem, 0, sizeof(u64));
1248}
1249
1250static void trace_imc_event_start(struct perf_event *event, int flags)
1251{
1252 return;
1253}
1254
1255static void trace_imc_event_del(struct perf_event *event, int flags)
1256{
1257 int core_id = smp_processor_id() / threads_per_core;
1258 struct imc_pmu_ref *ref = NULL;
1259
1260 if (core_imc_refc)
1261 ref = &core_imc_refc[core_id];
1262 if (!ref) {
1263
1264 if (trace_imc_refc)
1265 ref = &trace_imc_refc[core_id];
1266 if (!ref)
1267 return;
1268 }
1269 mtspr(SPRN_LDBAR, 0);
1270 mutex_lock(&ref->lock);
1271 ref->refc--;
1272 if (ref->refc == 0) {
1273 if (opal_imc_counters_stop(OPAL_IMC_COUNTERS_TRACE,
1274 get_hard_smp_processor_id(smp_processor_id()))) {
1275 mutex_unlock(&ref->lock);
1276 pr_err("trace-imc: Unable to stop the counters for core %d\n", core_id);
1277 return;
1278 }
1279 } else if (ref->refc < 0) {
1280 ref->refc = 0;
1281 }
1282 mutex_unlock(&ref->lock);
1283 trace_imc_event_stop(event, flags);
1284}
1285
1286static int trace_imc_event_init(struct perf_event *event)
1287{
1288 struct task_struct *target;
1289
1290 if (event->attr.type != event->pmu->type)
1291 return -ENOENT;
1292
1293 if (!capable(CAP_SYS_ADMIN))
1294 return -EACCES;
1295
1296
1297 if (event->attr.sample_period == 0)
1298 return -ENOENT;
1299
1300 event->hw.idx = -1;
1301 target = event->hw.target;
1302
1303 event->pmu->task_ctx_nr = perf_hw_context;
1304 return 0;
1305}
1306
1307
1308static int update_pmu_ops(struct imc_pmu *pmu)
1309{
1310 pmu->pmu.task_ctx_nr = perf_invalid_context;
1311 pmu->pmu.add = imc_event_add;
1312 pmu->pmu.del = imc_event_stop;
1313 pmu->pmu.start = imc_event_start;
1314 pmu->pmu.stop = imc_event_stop;
1315 pmu->pmu.read = imc_event_update;
1316 pmu->pmu.attr_groups = pmu->attr_groups;
1317 pmu->pmu.capabilities = PERF_PMU_CAP_NO_EXCLUDE;
1318 pmu->attr_groups[IMC_FORMAT_ATTR] = &imc_format_group;
1319
1320 switch (pmu->domain) {
1321 case IMC_DOMAIN_NEST:
1322 pmu->pmu.event_init = nest_imc_event_init;
1323 pmu->attr_groups[IMC_CPUMASK_ATTR] = &imc_pmu_cpumask_attr_group;
1324 break;
1325 case IMC_DOMAIN_CORE:
1326 pmu->pmu.event_init = core_imc_event_init;
1327 pmu->attr_groups[IMC_CPUMASK_ATTR] = &imc_pmu_cpumask_attr_group;
1328 break;
1329 case IMC_DOMAIN_THREAD:
1330 pmu->pmu.event_init = thread_imc_event_init;
1331 pmu->pmu.add = thread_imc_event_add;
1332 pmu->pmu.del = thread_imc_event_del;
1333 pmu->pmu.start_txn = thread_imc_pmu_start_txn;
1334 pmu->pmu.cancel_txn = thread_imc_pmu_cancel_txn;
1335 pmu->pmu.commit_txn = thread_imc_pmu_commit_txn;
1336 break;
1337 case IMC_DOMAIN_TRACE:
1338 pmu->pmu.event_init = trace_imc_event_init;
1339 pmu->pmu.add = trace_imc_event_add;
1340 pmu->pmu.del = trace_imc_event_del;
1341 pmu->pmu.start = trace_imc_event_start;
1342 pmu->pmu.stop = trace_imc_event_stop;
1343 pmu->pmu.read = trace_imc_event_read;
1344 pmu->attr_groups[IMC_FORMAT_ATTR] = &trace_imc_format_group;
1345 default:
1346 break;
1347 }
1348
1349 return 0;
1350}
1351
1352
1353static int init_nest_pmu_ref(void)
1354{
1355 int nid, i, cpu;
1356
1357 nest_imc_refc = kcalloc(num_possible_nodes(), sizeof(*nest_imc_refc),
1358 GFP_KERNEL);
1359
1360 if (!nest_imc_refc)
1361 return -ENOMEM;
1362
1363 i = 0;
1364 for_each_node(nid) {
1365
1366
1367
1368
1369 mutex_init(&nest_imc_refc[i].lock);
1370
1371
1372
1373
1374
1375
1376
1377 nest_imc_refc[i++].id = nid;
1378 }
1379
1380
1381
1382
1383
1384 for_each_possible_cpu(cpu) {
1385 nid = cpu_to_node(cpu);
1386 for (i = 0; i < num_possible_nodes(); i++) {
1387 if (nest_imc_refc[i].id == nid) {
1388 per_cpu(local_nest_imc_refc, cpu) = &nest_imc_refc[i];
1389 break;
1390 }
1391 }
1392 }
1393 return 0;
1394}
1395
1396static void cleanup_all_core_imc_memory(void)
1397{
1398 int i, nr_cores = DIV_ROUND_UP(num_possible_cpus(), threads_per_core);
1399 struct imc_mem_info *ptr = core_imc_pmu->mem_info;
1400 int size = core_imc_pmu->counter_mem_size;
1401
1402
1403 for (i = 0; i < nr_cores; i++) {
1404 if (ptr[i].vbase)
1405 free_pages((u64)ptr[i].vbase, get_order(size));
1406 }
1407
1408 kfree(ptr);
1409 kfree(core_imc_refc);
1410}
1411
1412static void thread_imc_ldbar_disable(void *dummy)
1413{
1414
1415
1416
1417
1418 mtspr(SPRN_LDBAR, 0);
1419}
1420
1421void thread_imc_disable(void)
1422{
1423 on_each_cpu(thread_imc_ldbar_disable, NULL, 1);
1424}
1425
1426static void cleanup_all_thread_imc_memory(void)
1427{
1428 int i, order = get_order(thread_imc_mem_size);
1429
1430 for_each_online_cpu(i) {
1431 if (per_cpu(thread_imc_mem, i))
1432 free_pages((u64)per_cpu(thread_imc_mem, i), order);
1433
1434 }
1435}
1436
1437static void cleanup_all_trace_imc_memory(void)
1438{
1439 int i, order = get_order(trace_imc_mem_size);
1440
1441 for_each_online_cpu(i) {
1442 if (per_cpu(trace_imc_mem, i))
1443 free_pages((u64)per_cpu(trace_imc_mem, i), order);
1444
1445 }
1446 kfree(trace_imc_refc);
1447}
1448
1449
1450static void imc_common_mem_free(struct imc_pmu *pmu_ptr)
1451{
1452 if (pmu_ptr->attr_groups[IMC_EVENT_ATTR])
1453 kfree(pmu_ptr->attr_groups[IMC_EVENT_ATTR]->attrs);
1454 kfree(pmu_ptr->attr_groups[IMC_EVENT_ATTR]);
1455}
1456
1457
1458
1459
1460
1461
1462
1463static void imc_common_cpuhp_mem_free(struct imc_pmu *pmu_ptr)
1464{
1465 if (pmu_ptr->domain == IMC_DOMAIN_NEST) {
1466 mutex_lock(&nest_init_lock);
1467 if (nest_pmus == 1) {
1468 cpuhp_remove_state(CPUHP_AP_PERF_POWERPC_NEST_IMC_ONLINE);
1469 kfree(nest_imc_refc);
1470 kfree(per_nest_pmu_arr);
1471 per_nest_pmu_arr = NULL;
1472 }
1473
1474 if (nest_pmus > 0)
1475 nest_pmus--;
1476 mutex_unlock(&nest_init_lock);
1477 }
1478
1479
1480 if (pmu_ptr->domain == IMC_DOMAIN_CORE) {
1481 cpuhp_remove_state(CPUHP_AP_PERF_POWERPC_CORE_IMC_ONLINE);
1482 cleanup_all_core_imc_memory();
1483 }
1484
1485
1486 if (pmu_ptr->domain == IMC_DOMAIN_THREAD) {
1487 cpuhp_remove_state(CPUHP_AP_PERF_POWERPC_THREAD_IMC_ONLINE);
1488 cleanup_all_thread_imc_memory();
1489 }
1490
1491 if (pmu_ptr->domain == IMC_DOMAIN_TRACE) {
1492 cpuhp_remove_state(CPUHP_AP_PERF_POWERPC_TRACE_IMC_ONLINE);
1493 cleanup_all_trace_imc_memory();
1494 }
1495}
1496
1497
1498
1499
1500
1501void unregister_thread_imc(void)
1502{
1503 imc_common_cpuhp_mem_free(thread_imc_pmu);
1504 imc_common_mem_free(thread_imc_pmu);
1505 perf_pmu_unregister(&thread_imc_pmu->pmu);
1506}
1507
1508
1509
1510
1511static int imc_mem_init(struct imc_pmu *pmu_ptr, struct device_node *parent,
1512 int pmu_index)
1513{
1514 const char *s;
1515 int nr_cores, cpu, res = -ENOMEM;
1516
1517 if (of_property_read_string(parent, "name", &s))
1518 return -ENODEV;
1519
1520 switch (pmu_ptr->domain) {
1521 case IMC_DOMAIN_NEST:
1522
1523 pmu_ptr->pmu.name = kasprintf(GFP_KERNEL, "%s%s_imc", "nest_", s);
1524 if (!pmu_ptr->pmu.name)
1525 goto err;
1526
1527
1528 if (!per_nest_pmu_arr) {
1529 per_nest_pmu_arr = kcalloc(get_max_nest_dev() + 1,
1530 sizeof(struct imc_pmu *),
1531 GFP_KERNEL);
1532 if (!per_nest_pmu_arr)
1533 goto err;
1534 }
1535 per_nest_pmu_arr[pmu_index] = pmu_ptr;
1536 break;
1537 case IMC_DOMAIN_CORE:
1538
1539 pmu_ptr->pmu.name = kasprintf(GFP_KERNEL, "%s%s", s, "_imc");
1540 if (!pmu_ptr->pmu.name)
1541 goto err;
1542
1543 nr_cores = DIV_ROUND_UP(num_possible_cpus(), threads_per_core);
1544 pmu_ptr->mem_info = kcalloc(nr_cores, sizeof(struct imc_mem_info),
1545 GFP_KERNEL);
1546
1547 if (!pmu_ptr->mem_info)
1548 goto err;
1549
1550 core_imc_refc = kcalloc(nr_cores, sizeof(struct imc_pmu_ref),
1551 GFP_KERNEL);
1552
1553 if (!core_imc_refc) {
1554 kfree(pmu_ptr->mem_info);
1555 goto err;
1556 }
1557
1558 core_imc_pmu = pmu_ptr;
1559 break;
1560 case IMC_DOMAIN_THREAD:
1561
1562 pmu_ptr->pmu.name = kasprintf(GFP_KERNEL, "%s%s", s, "_imc");
1563 if (!pmu_ptr->pmu.name)
1564 goto err;
1565
1566 thread_imc_mem_size = pmu_ptr->counter_mem_size;
1567 for_each_online_cpu(cpu) {
1568 res = thread_imc_mem_alloc(cpu, pmu_ptr->counter_mem_size);
1569 if (res) {
1570 cleanup_all_thread_imc_memory();
1571 goto err;
1572 }
1573 }
1574
1575 thread_imc_pmu = pmu_ptr;
1576 break;
1577 case IMC_DOMAIN_TRACE:
1578
1579 pmu_ptr->pmu.name = kasprintf(GFP_KERNEL, "%s%s", s, "_imc");
1580 if (!pmu_ptr->pmu.name)
1581 return -ENOMEM;
1582
1583 nr_cores = DIV_ROUND_UP(num_possible_cpus(), threads_per_core);
1584 trace_imc_refc = kcalloc(nr_cores, sizeof(struct imc_pmu_ref),
1585 GFP_KERNEL);
1586 if (!trace_imc_refc)
1587 return -ENOMEM;
1588
1589 trace_imc_mem_size = pmu_ptr->counter_mem_size;
1590 for_each_online_cpu(cpu) {
1591 res = trace_imc_mem_alloc(cpu, trace_imc_mem_size);
1592 if (res) {
1593 cleanup_all_trace_imc_memory();
1594 goto err;
1595 }
1596 }
1597 break;
1598 default:
1599 return -EINVAL;
1600 }
1601
1602 return 0;
1603err:
1604 return res;
1605}
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617int init_imc_pmu(struct device_node *parent, struct imc_pmu *pmu_ptr, int pmu_idx)
1618{
1619 int ret;
1620
1621 ret = imc_mem_init(pmu_ptr, parent, pmu_idx);
1622 if (ret)
1623 goto err_free_mem;
1624
1625 switch (pmu_ptr->domain) {
1626 case IMC_DOMAIN_NEST:
1627
1628
1629
1630
1631
1632
1633 mutex_lock(&nest_init_lock);
1634 if (nest_pmus == 0) {
1635 ret = init_nest_pmu_ref();
1636 if (ret) {
1637 mutex_unlock(&nest_init_lock);
1638 kfree(per_nest_pmu_arr);
1639 per_nest_pmu_arr = NULL;
1640 goto err_free_mem;
1641 }
1642
1643 ret = nest_pmu_cpumask_init();
1644 if (ret) {
1645 mutex_unlock(&nest_init_lock);
1646 kfree(nest_imc_refc);
1647 kfree(per_nest_pmu_arr);
1648 per_nest_pmu_arr = NULL;
1649 goto err_free_mem;
1650 }
1651 }
1652 nest_pmus++;
1653 mutex_unlock(&nest_init_lock);
1654 break;
1655 case IMC_DOMAIN_CORE:
1656 ret = core_imc_pmu_cpumask_init();
1657 if (ret) {
1658 cleanup_all_core_imc_memory();
1659 goto err_free_mem;
1660 }
1661
1662 break;
1663 case IMC_DOMAIN_THREAD:
1664 ret = thread_imc_cpu_init();
1665 if (ret) {
1666 cleanup_all_thread_imc_memory();
1667 goto err_free_mem;
1668 }
1669
1670 break;
1671 case IMC_DOMAIN_TRACE:
1672 ret = trace_imc_cpu_init();
1673 if (ret) {
1674 cleanup_all_trace_imc_memory();
1675 goto err_free_mem;
1676 }
1677
1678 break;
1679 default:
1680 return -EINVAL;
1681 }
1682
1683 ret = update_events_in_group(parent, pmu_ptr);
1684 if (ret)
1685 goto err_free_cpuhp_mem;
1686
1687 ret = update_pmu_ops(pmu_ptr);
1688 if (ret)
1689 goto err_free_cpuhp_mem;
1690
1691 ret = perf_pmu_register(&pmu_ptr->pmu, pmu_ptr->pmu.name, -1);
1692 if (ret)
1693 goto err_free_cpuhp_mem;
1694
1695 pr_debug("%s performance monitor hardware support registered\n",
1696 pmu_ptr->pmu.name);
1697
1698 return 0;
1699
1700err_free_cpuhp_mem:
1701 imc_common_cpuhp_mem_free(pmu_ptr);
1702err_free_mem:
1703 imc_common_mem_free(pmu_ptr);
1704 return ret;
1705}
1706