1#undef DEBUG
2
3
4
5
6
7
8
9
10
11
12#define pr_fmt(fmt) "hw perfevents: " fmt
13
14#include <linux/bitmap.h>
15#include <linux/cpumask.h>
16#include <linux/cpu_pm.h>
17#include <linux/export.h>
18#include <linux/kernel.h>
19#include <linux/perf/arm_pmu.h>
20#include <linux/platform_device.h>
21#include <linux/slab.h>
22#include <linux/sched/clock.h>
23#include <linux/spinlock.h>
24#include <linux/irq.h>
25#include <linux/irqdesc.h>
26
27#include <asm/irq_regs.h>
28
29static int
30armpmu_map_cache_event(const unsigned (*cache_map)
31 [PERF_COUNT_HW_CACHE_MAX]
32 [PERF_COUNT_HW_CACHE_OP_MAX]
33 [PERF_COUNT_HW_CACHE_RESULT_MAX],
34 u64 config)
35{
36 unsigned int cache_type, cache_op, cache_result, ret;
37
38 cache_type = (config >> 0) & 0xff;
39 if (cache_type >= PERF_COUNT_HW_CACHE_MAX)
40 return -EINVAL;
41
42 cache_op = (config >> 8) & 0xff;
43 if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX)
44 return -EINVAL;
45
46 cache_result = (config >> 16) & 0xff;
47 if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
48 return -EINVAL;
49
50 ret = (int)(*cache_map)[cache_type][cache_op][cache_result];
51
52 if (ret == CACHE_OP_UNSUPPORTED)
53 return -ENOENT;
54
55 return ret;
56}
57
58static int
59armpmu_map_hw_event(const unsigned (*event_map)[PERF_COUNT_HW_MAX], u64 config)
60{
61 int mapping;
62
63 if (config >= PERF_COUNT_HW_MAX)
64 return -EINVAL;
65
66 mapping = (*event_map)[config];
67 return mapping == HW_OP_UNSUPPORTED ? -ENOENT : mapping;
68}
69
70static int
71armpmu_map_raw_event(u32 raw_event_mask, u64 config)
72{
73 return (int)(config & raw_event_mask);
74}
75
76int
77armpmu_map_event(struct perf_event *event,
78 const unsigned (*event_map)[PERF_COUNT_HW_MAX],
79 const unsigned (*cache_map)
80 [PERF_COUNT_HW_CACHE_MAX]
81 [PERF_COUNT_HW_CACHE_OP_MAX]
82 [PERF_COUNT_HW_CACHE_RESULT_MAX],
83 u32 raw_event_mask)
84{
85 u64 config = event->attr.config;
86 int type = event->attr.type;
87
88 if (type == event->pmu->type)
89 return armpmu_map_raw_event(raw_event_mask, config);
90
91 switch (type) {
92 case PERF_TYPE_HARDWARE:
93 return armpmu_map_hw_event(event_map, config);
94 case PERF_TYPE_HW_CACHE:
95 return armpmu_map_cache_event(cache_map, config);
96 case PERF_TYPE_RAW:
97 return armpmu_map_raw_event(raw_event_mask, config);
98 }
99
100 return -ENOENT;
101}
102
103int armpmu_event_set_period(struct perf_event *event)
104{
105 struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
106 struct hw_perf_event *hwc = &event->hw;
107 s64 left = local64_read(&hwc->period_left);
108 s64 period = hwc->sample_period;
109 int ret = 0;
110
111 if (unlikely(left <= -period)) {
112 left = period;
113 local64_set(&hwc->period_left, left);
114 hwc->last_period = period;
115 ret = 1;
116 }
117
118 if (unlikely(left <= 0)) {
119 left += period;
120 local64_set(&hwc->period_left, left);
121 hwc->last_period = period;
122 ret = 1;
123 }
124
125
126
127
128
129
130
131 if (left > (armpmu->max_period >> 1))
132 left = armpmu->max_period >> 1;
133
134 local64_set(&hwc->prev_count, (u64)-left);
135
136 armpmu->write_counter(event, (u64)(-left) & 0xffffffff);
137
138 perf_event_update_userpage(event);
139
140 return ret;
141}
142
143u64 armpmu_event_update(struct perf_event *event)
144{
145 struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
146 struct hw_perf_event *hwc = &event->hw;
147 u64 delta, prev_raw_count, new_raw_count;
148
149again:
150 prev_raw_count = local64_read(&hwc->prev_count);
151 new_raw_count = armpmu->read_counter(event);
152
153 if (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
154 new_raw_count) != prev_raw_count)
155 goto again;
156
157 delta = (new_raw_count - prev_raw_count) & armpmu->max_period;
158
159 local64_add(delta, &event->count);
160 local64_sub(delta, &hwc->period_left);
161
162 return new_raw_count;
163}
164
165static void
166armpmu_read(struct perf_event *event)
167{
168 armpmu_event_update(event);
169}
170
171static void
172armpmu_stop(struct perf_event *event, int flags)
173{
174 struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
175 struct hw_perf_event *hwc = &event->hw;
176
177
178
179
180
181 if (!(hwc->state & PERF_HES_STOPPED)) {
182 armpmu->disable(event);
183 armpmu_event_update(event);
184 hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
185 }
186}
187
188static void armpmu_start(struct perf_event *event, int flags)
189{
190 struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
191 struct hw_perf_event *hwc = &event->hw;
192
193
194
195
196
197 if (flags & PERF_EF_RELOAD)
198 WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE));
199
200 hwc->state = 0;
201
202
203
204
205
206
207
208 armpmu_event_set_period(event);
209 armpmu->enable(event);
210}
211
212static void
213armpmu_del(struct perf_event *event, int flags)
214{
215 struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
216 struct pmu_hw_events *hw_events = this_cpu_ptr(armpmu->hw_events);
217 struct hw_perf_event *hwc = &event->hw;
218 int idx = hwc->idx;
219
220 armpmu_stop(event, PERF_EF_UPDATE);
221 hw_events->events[idx] = NULL;
222 clear_bit(idx, hw_events->used_mask);
223 if (armpmu->clear_event_idx)
224 armpmu->clear_event_idx(hw_events, event);
225
226 perf_event_update_userpage(event);
227}
228
229static int
230armpmu_add(struct perf_event *event, int flags)
231{
232 struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
233 struct pmu_hw_events *hw_events = this_cpu_ptr(armpmu->hw_events);
234 struct hw_perf_event *hwc = &event->hw;
235 int idx;
236
237
238 if (!cpumask_test_cpu(smp_processor_id(), &armpmu->supported_cpus))
239 return -ENOENT;
240
241
242 idx = armpmu->get_event_idx(hw_events, event);
243 if (idx < 0)
244 return idx;
245
246
247
248
249
250 event->hw.idx = idx;
251 armpmu->disable(event);
252 hw_events->events[idx] = event;
253
254 hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
255 if (flags & PERF_EF_START)
256 armpmu_start(event, PERF_EF_RELOAD);
257
258
259 perf_event_update_userpage(event);
260
261 return 0;
262}
263
264static int
265validate_event(struct pmu *pmu, struct pmu_hw_events *hw_events,
266 struct perf_event *event)
267{
268 struct arm_pmu *armpmu;
269
270 if (is_software_event(event))
271 return 1;
272
273
274
275
276
277
278 if (event->pmu != pmu)
279 return 0;
280
281 if (event->state < PERF_EVENT_STATE_OFF)
282 return 1;
283
284 if (event->state == PERF_EVENT_STATE_OFF && !event->attr.enable_on_exec)
285 return 1;
286
287 armpmu = to_arm_pmu(event->pmu);
288 return armpmu->get_event_idx(hw_events, event) >= 0;
289}
290
291static int
292validate_group(struct perf_event *event)
293{
294 struct perf_event *sibling, *leader = event->group_leader;
295 struct pmu_hw_events fake_pmu;
296
297
298
299
300
301 memset(&fake_pmu.used_mask, 0, sizeof(fake_pmu.used_mask));
302
303 if (!validate_event(event->pmu, &fake_pmu, leader))
304 return -EINVAL;
305
306 list_for_each_entry(sibling, &leader->sibling_list, group_entry) {
307 if (!validate_event(event->pmu, &fake_pmu, sibling))
308 return -EINVAL;
309 }
310
311 if (!validate_event(event->pmu, &fake_pmu, event))
312 return -EINVAL;
313
314 return 0;
315}
316
317static struct arm_pmu_platdata *armpmu_get_platdata(struct arm_pmu *armpmu)
318{
319 struct platform_device *pdev = armpmu->plat_device;
320
321 return pdev ? dev_get_platdata(&pdev->dev) : NULL;
322}
323
324static irqreturn_t armpmu_dispatch_irq(int irq, void *dev)
325{
326 struct arm_pmu *armpmu;
327 struct arm_pmu_platdata *plat;
328 int ret;
329 u64 start_clock, finish_clock;
330
331
332
333
334
335
336
337 armpmu = *(void **)dev;
338
339 plat = armpmu_get_platdata(armpmu);
340
341 start_clock = sched_clock();
342 if (plat && plat->handle_irq)
343 ret = plat->handle_irq(irq, armpmu, armpmu->handle_irq);
344 else
345 ret = armpmu->handle_irq(irq, armpmu);
346 finish_clock = sched_clock();
347
348 perf_sample_event_took(finish_clock - start_clock);
349 return ret;
350}
351
352static int
353event_requires_mode_exclusion(struct perf_event_attr *attr)
354{
355 return attr->exclude_idle || attr->exclude_user ||
356 attr->exclude_kernel || attr->exclude_hv;
357}
358
359static int
360__hw_perf_event_init(struct perf_event *event)
361{
362 struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
363 struct hw_perf_event *hwc = &event->hw;
364 int mapping;
365
366 mapping = armpmu->map_event(event);
367
368 if (mapping < 0) {
369 pr_debug("event %x:%llx not supported\n", event->attr.type,
370 event->attr.config);
371 return mapping;
372 }
373
374
375
376
377
378
379
380 hwc->idx = -1;
381 hwc->config_base = 0;
382 hwc->config = 0;
383 hwc->event_base = 0;
384
385
386
387
388 if ((!armpmu->set_event_filter ||
389 armpmu->set_event_filter(hwc, &event->attr)) &&
390 event_requires_mode_exclusion(&event->attr)) {
391 pr_debug("ARM performance counters do not support "
392 "mode exclusion\n");
393 return -EOPNOTSUPP;
394 }
395
396
397
398
399 hwc->config_base |= (unsigned long)mapping;
400
401 if (!is_sampling_event(event)) {
402
403
404
405
406
407
408 hwc->sample_period = armpmu->max_period >> 1;
409 hwc->last_period = hwc->sample_period;
410 local64_set(&hwc->period_left, hwc->sample_period);
411 }
412
413 if (event->group_leader != event) {
414 if (validate_group(event) != 0)
415 return -EINVAL;
416 }
417
418 return 0;
419}
420
421static int armpmu_event_init(struct perf_event *event)
422{
423 struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
424
425
426
427
428
429
430
431
432 if (event->cpu != -1 &&
433 !cpumask_test_cpu(event->cpu, &armpmu->supported_cpus))
434 return -ENOENT;
435
436
437 if (has_branch_stack(event))
438 return -EOPNOTSUPP;
439
440 if (armpmu->map_event(event) == -ENOENT)
441 return -ENOENT;
442
443 return __hw_perf_event_init(event);
444}
445
446static void armpmu_enable(struct pmu *pmu)
447{
448 struct arm_pmu *armpmu = to_arm_pmu(pmu);
449 struct pmu_hw_events *hw_events = this_cpu_ptr(armpmu->hw_events);
450 int enabled = bitmap_weight(hw_events->used_mask, armpmu->num_events);
451
452
453 if (!cpumask_test_cpu(smp_processor_id(), &armpmu->supported_cpus))
454 return;
455
456 if (enabled)
457 armpmu->start(armpmu);
458}
459
460static void armpmu_disable(struct pmu *pmu)
461{
462 struct arm_pmu *armpmu = to_arm_pmu(pmu);
463
464
465 if (!cpumask_test_cpu(smp_processor_id(), &armpmu->supported_cpus))
466 return;
467
468 armpmu->stop(armpmu);
469}
470
471
472
473
474
475
476static int armpmu_filter_match(struct perf_event *event)
477{
478 struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
479 unsigned int cpu = smp_processor_id();
480 return cpumask_test_cpu(cpu, &armpmu->supported_cpus);
481}
482
483static ssize_t armpmu_cpumask_show(struct device *dev,
484 struct device_attribute *attr, char *buf)
485{
486 struct arm_pmu *armpmu = to_arm_pmu(dev_get_drvdata(dev));
487 return cpumap_print_to_pagebuf(true, buf, &armpmu->supported_cpus);
488}
489
490static DEVICE_ATTR(cpus, S_IRUGO, armpmu_cpumask_show, NULL);
491
492static struct attribute *armpmu_common_attrs[] = {
493 &dev_attr_cpus.attr,
494 NULL,
495};
496
497static struct attribute_group armpmu_common_attr_group = {
498 .attrs = armpmu_common_attrs,
499};
500
501
502static struct arm_pmu *__oprofile_cpu_pmu;
503
504
505
506
507
508const char *perf_pmu_name(void)
509{
510 if (!__oprofile_cpu_pmu)
511 return NULL;
512
513 return __oprofile_cpu_pmu->name;
514}
515EXPORT_SYMBOL_GPL(perf_pmu_name);
516
517int perf_num_counters(void)
518{
519 int max_events = 0;
520
521 if (__oprofile_cpu_pmu != NULL)
522 max_events = __oprofile_cpu_pmu->num_events;
523
524 return max_events;
525}
526EXPORT_SYMBOL_GPL(perf_num_counters);
527
528void armpmu_free_irq(struct arm_pmu *armpmu, int cpu)
529{
530 struct pmu_hw_events __percpu *hw_events = armpmu->hw_events;
531 int irq = per_cpu(hw_events->irq, cpu);
532
533 if (!cpumask_test_and_clear_cpu(cpu, &armpmu->active_irqs))
534 return;
535
536 if (irq_is_percpu(irq)) {
537 free_percpu_irq(irq, &hw_events->percpu_pmu);
538 cpumask_clear(&armpmu->active_irqs);
539 return;
540 }
541
542 free_irq(irq, per_cpu_ptr(&hw_events->percpu_pmu, cpu));
543}
544
545void armpmu_free_irqs(struct arm_pmu *armpmu)
546{
547 int cpu;
548
549 for_each_cpu(cpu, &armpmu->supported_cpus)
550 armpmu_free_irq(armpmu, cpu);
551}
552
553int armpmu_request_irq(struct arm_pmu *armpmu, int cpu)
554{
555 int err = 0;
556 struct pmu_hw_events __percpu *hw_events = armpmu->hw_events;
557 const irq_handler_t handler = armpmu_dispatch_irq;
558 int irq = per_cpu(hw_events->irq, cpu);
559 if (!irq)
560 return 0;
561
562 if (irq_is_percpu(irq) && cpumask_empty(&armpmu->active_irqs)) {
563 err = request_percpu_irq(irq, handler, "arm-pmu",
564 &hw_events->percpu_pmu);
565 } else if (irq_is_percpu(irq)) {
566 int other_cpu = cpumask_first(&armpmu->active_irqs);
567 int other_irq = per_cpu(hw_events->irq, other_cpu);
568
569 if (irq != other_irq) {
570 pr_warn("mismatched PPIs detected.\n");
571 err = -EINVAL;
572 goto err_out;
573 }
574 } else {
575 struct arm_pmu_platdata *platdata = armpmu_get_platdata(armpmu);
576 unsigned long irq_flags;
577
578 err = irq_force_affinity(irq, cpumask_of(cpu));
579
580 if (err && num_possible_cpus() > 1) {
581 pr_warn("unable to set irq affinity (irq=%d, cpu=%u)\n",
582 irq, cpu);
583 goto err_out;
584 }
585
586 if (platdata && platdata->irq_flags) {
587 irq_flags = platdata->irq_flags;
588 } else {
589 irq_flags = IRQF_PERCPU |
590 IRQF_NOBALANCING |
591 IRQF_NO_THREAD;
592 }
593
594 err = request_irq(irq, handler, irq_flags, "arm-pmu",
595 per_cpu_ptr(&hw_events->percpu_pmu, cpu));
596 }
597
598 if (err)
599 goto err_out;
600
601 cpumask_set_cpu(cpu, &armpmu->active_irqs);
602 return 0;
603
604err_out:
605 pr_err("unable to request IRQ%d for ARM PMU counters\n", irq);
606 return err;
607}
608
609int armpmu_request_irqs(struct arm_pmu *armpmu)
610{
611 int cpu, err;
612
613 for_each_cpu(cpu, &armpmu->supported_cpus) {
614 err = armpmu_request_irq(armpmu, cpu);
615 if (err)
616 break;
617 }
618
619 return err;
620}
621
622static int armpmu_get_cpu_irq(struct arm_pmu *pmu, int cpu)
623{
624 struct pmu_hw_events __percpu *hw_events = pmu->hw_events;
625 return per_cpu(hw_events->irq, cpu);
626}
627
628
629
630
631
632
633
634static int arm_perf_starting_cpu(unsigned int cpu, struct hlist_node *node)
635{
636 struct arm_pmu *pmu = hlist_entry_safe(node, struct arm_pmu, node);
637 int irq;
638
639 if (!cpumask_test_cpu(cpu, &pmu->supported_cpus))
640 return 0;
641 if (pmu->reset)
642 pmu->reset(pmu);
643
644 irq = armpmu_get_cpu_irq(pmu, cpu);
645 if (irq) {
646 if (irq_is_percpu(irq)) {
647 enable_percpu_irq(irq, IRQ_TYPE_NONE);
648 return 0;
649 }
650 }
651
652 return 0;
653}
654
655static int arm_perf_teardown_cpu(unsigned int cpu, struct hlist_node *node)
656{
657 struct arm_pmu *pmu = hlist_entry_safe(node, struct arm_pmu, node);
658 int irq;
659
660 if (!cpumask_test_cpu(cpu, &pmu->supported_cpus))
661 return 0;
662
663 irq = armpmu_get_cpu_irq(pmu, cpu);
664 if (irq && irq_is_percpu(irq))
665 disable_percpu_irq(irq);
666
667 return 0;
668}
669
670#ifdef CONFIG_CPU_PM
671static void cpu_pm_pmu_setup(struct arm_pmu *armpmu, unsigned long cmd)
672{
673 struct pmu_hw_events *hw_events = this_cpu_ptr(armpmu->hw_events);
674 struct perf_event *event;
675 int idx;
676
677 for (idx = 0; idx < armpmu->num_events; idx++) {
678
679
680
681
682 if (!test_bit(idx, hw_events->used_mask))
683 continue;
684
685 event = hw_events->events[idx];
686
687 switch (cmd) {
688 case CPU_PM_ENTER:
689
690
691
692 armpmu_stop(event, PERF_EF_UPDATE);
693 break;
694 case CPU_PM_EXIT:
695 case CPU_PM_ENTER_FAILED:
696
697
698
699
700
701
702
703
704
705
706
707
708 RCU_NONIDLE(armpmu_start(event, PERF_EF_RELOAD));
709 break;
710 default:
711 break;
712 }
713 }
714}
715
716static int cpu_pm_pmu_notify(struct notifier_block *b, unsigned long cmd,
717 void *v)
718{
719 struct arm_pmu *armpmu = container_of(b, struct arm_pmu, cpu_pm_nb);
720 struct pmu_hw_events *hw_events = this_cpu_ptr(armpmu->hw_events);
721 int enabled = bitmap_weight(hw_events->used_mask, armpmu->num_events);
722
723 if (!cpumask_test_cpu(smp_processor_id(), &armpmu->supported_cpus))
724 return NOTIFY_DONE;
725
726
727
728
729
730 if (cmd == CPU_PM_EXIT && armpmu->reset)
731 armpmu->reset(armpmu);
732
733 if (!enabled)
734 return NOTIFY_OK;
735
736 switch (cmd) {
737 case CPU_PM_ENTER:
738 armpmu->stop(armpmu);
739 cpu_pm_pmu_setup(armpmu, cmd);
740 break;
741 case CPU_PM_EXIT:
742 cpu_pm_pmu_setup(armpmu, cmd);
743 case CPU_PM_ENTER_FAILED:
744 armpmu->start(armpmu);
745 break;
746 default:
747 return NOTIFY_DONE;
748 }
749
750 return NOTIFY_OK;
751}
752
753static int cpu_pm_pmu_register(struct arm_pmu *cpu_pmu)
754{
755 cpu_pmu->cpu_pm_nb.notifier_call = cpu_pm_pmu_notify;
756 return cpu_pm_register_notifier(&cpu_pmu->cpu_pm_nb);
757}
758
759static void cpu_pm_pmu_unregister(struct arm_pmu *cpu_pmu)
760{
761 cpu_pm_unregister_notifier(&cpu_pmu->cpu_pm_nb);
762}
763#else
764static inline int cpu_pm_pmu_register(struct arm_pmu *cpu_pmu) { return 0; }
765static inline void cpu_pm_pmu_unregister(struct arm_pmu *cpu_pmu) { }
766#endif
767
768static int cpu_pmu_init(struct arm_pmu *cpu_pmu)
769{
770 int err;
771
772 err = cpuhp_state_add_instance(CPUHP_AP_PERF_ARM_STARTING,
773 &cpu_pmu->node);
774 if (err)
775 goto out;
776
777 err = cpu_pm_pmu_register(cpu_pmu);
778 if (err)
779 goto out_unregister;
780
781 return 0;
782
783out_unregister:
784 cpuhp_state_remove_instance_nocalls(CPUHP_AP_PERF_ARM_STARTING,
785 &cpu_pmu->node);
786out:
787 return err;
788}
789
790static void cpu_pmu_destroy(struct arm_pmu *cpu_pmu)
791{
792 cpu_pm_pmu_unregister(cpu_pmu);
793 cpuhp_state_remove_instance_nocalls(CPUHP_AP_PERF_ARM_STARTING,
794 &cpu_pmu->node);
795}
796
797struct arm_pmu *armpmu_alloc(void)
798{
799 struct arm_pmu *pmu;
800 int cpu;
801
802 pmu = kzalloc(sizeof(*pmu), GFP_KERNEL);
803 if (!pmu) {
804 pr_info("failed to allocate PMU device!\n");
805 goto out;
806 }
807
808 pmu->hw_events = alloc_percpu(struct pmu_hw_events);
809 if (!pmu->hw_events) {
810 pr_info("failed to allocate per-cpu PMU data.\n");
811 goto out_free_pmu;
812 }
813
814 pmu->pmu = (struct pmu) {
815 .pmu_enable = armpmu_enable,
816 .pmu_disable = armpmu_disable,
817 .event_init = armpmu_event_init,
818 .add = armpmu_add,
819 .del = armpmu_del,
820 .start = armpmu_start,
821 .stop = armpmu_stop,
822 .read = armpmu_read,
823 .filter_match = armpmu_filter_match,
824 .attr_groups = pmu->attr_groups,
825
826
827
828
829
830
831
832 .capabilities = PERF_PMU_CAP_HETEROGENEOUS_CPUS,
833 };
834
835 pmu->attr_groups[ARMPMU_ATTR_GROUP_COMMON] =
836 &armpmu_common_attr_group;
837
838 for_each_possible_cpu(cpu) {
839 struct pmu_hw_events *events;
840
841 events = per_cpu_ptr(pmu->hw_events, cpu);
842 raw_spin_lock_init(&events->pmu_lock);
843 events->percpu_pmu = pmu;
844 }
845
846 return pmu;
847
848out_free_pmu:
849 kfree(pmu);
850out:
851 return NULL;
852}
853
854void armpmu_free(struct arm_pmu *pmu)
855{
856 free_percpu(pmu->hw_events);
857 kfree(pmu);
858}
859
860int armpmu_register(struct arm_pmu *pmu)
861{
862 int ret;
863
864 ret = cpu_pmu_init(pmu);
865 if (ret)
866 return ret;
867
868 ret = perf_pmu_register(&pmu->pmu, pmu->name, -1);
869 if (ret)
870 goto out_destroy;
871
872 if (!__oprofile_cpu_pmu)
873 __oprofile_cpu_pmu = pmu;
874
875 pr_info("enabled with %s PMU driver, %d counters available\n",
876 pmu->name, pmu->num_events);
877
878 return 0;
879
880out_destroy:
881 cpu_pmu_destroy(pmu);
882 return ret;
883}
884
885static int arm_pmu_hp_init(void)
886{
887 int ret;
888
889 ret = cpuhp_setup_state_multi(CPUHP_AP_PERF_ARM_STARTING,
890 "perf/arm/pmu:starting",
891 arm_perf_starting_cpu,
892 arm_perf_teardown_cpu);
893 if (ret)
894 pr_err("CPU hotplug notifier for ARM PMU could not be registered: %d\n",
895 ret);
896 return ret;
897}
898subsys_initcall(arm_pmu_hp_init);
899