1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17#include <linux/atomic.h>
18#include <linux/export.h>
19#include <linux/init.h>
20#include <linux/irqchip/metag.h>
21#include <linux/perf_event.h>
22#include <linux/slab.h>
23
24#include <asm/core_reg.h>
25#include <asm/io.h>
26#include <asm/irq.h>
27#include <asm/processor.h>
28
29#include "perf_event.h"
30
31static int _hw_perf_event_init(struct perf_event *);
32static void _hw_perf_event_destroy(struct perf_event *);
33
34
35static struct metag_pmu *metag_pmu __read_mostly;
36
37
38static DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events);
39
40
41const char *perf_pmu_name(void)
42{
43 if (!metag_pmu)
44 return NULL;
45
46 return metag_pmu->name;
47}
48EXPORT_SYMBOL_GPL(perf_pmu_name);
49
50int perf_num_counters(void)
51{
52 if (metag_pmu)
53 return metag_pmu->max_events;
54
55 return 0;
56}
57EXPORT_SYMBOL_GPL(perf_num_counters);
58
59static inline int metag_pmu_initialised(void)
60{
61 return !!metag_pmu;
62}
63
64static void release_pmu_hardware(void)
65{
66 int irq;
67 unsigned int version = (metag_pmu->version &
68 (METAC_ID_MINOR_BITS | METAC_ID_REV_BITS)) >>
69 METAC_ID_REV_S;
70
71
72 if (version < 0x0104)
73 return;
74
75 irq = internal_irq_map(17);
76 if (irq >= 0)
77 free_irq(irq, (void *)1);
78
79 irq = internal_irq_map(16);
80 if (irq >= 0)
81 free_irq(irq, (void *)0);
82}
83
84static int reserve_pmu_hardware(void)
85{
86 int err = 0, irq[2];
87 unsigned int version = (metag_pmu->version &
88 (METAC_ID_MINOR_BITS | METAC_ID_REV_BITS)) >>
89 METAC_ID_REV_S;
90
91
92 if (version < 0x0104)
93 goto out;
94
95
96
97
98
99
100
101 irq[0] = internal_irq_map(16);
102 if (irq[0] < 0) {
103 pr_err("unable to map internal IRQ %d\n", 16);
104 goto out;
105 }
106 err = request_irq(irq[0], metag_pmu->handle_irq, IRQF_NOBALANCING,
107 "metagpmu0", (void *)0);
108 if (err) {
109 pr_err("unable to request IRQ%d for metag PMU counters\n",
110 irq[0]);
111 goto out;
112 }
113
114 irq[1] = internal_irq_map(17);
115 if (irq[1] < 0) {
116 pr_err("unable to map internal IRQ %d\n", 17);
117 goto out_irq1;
118 }
119 err = request_irq(irq[1], metag_pmu->handle_irq, IRQF_NOBALANCING,
120 "metagpmu1", (void *)1);
121 if (err) {
122 pr_err("unable to request IRQ%d for metag PMU counters\n",
123 irq[1]);
124 goto out_irq1;
125 }
126
127 return 0;
128
129out_irq1:
130 free_irq(irq[0], (void *)0);
131out:
132 return err;
133}
134
135
136static void metag_pmu_enable(struct pmu *pmu)
137{
138}
139
140static void metag_pmu_disable(struct pmu *pmu)
141{
142}
143
144static int metag_pmu_event_init(struct perf_event *event)
145{
146 int err = 0;
147 atomic_t *active_events = &metag_pmu->active_events;
148
149 if (!metag_pmu_initialised()) {
150 err = -ENODEV;
151 goto out;
152 }
153
154 if (has_branch_stack(event))
155 return -EOPNOTSUPP;
156
157 event->destroy = _hw_perf_event_destroy;
158
159 if (!atomic_inc_not_zero(active_events)) {
160 mutex_lock(&metag_pmu->reserve_mutex);
161 if (atomic_read(active_events) == 0)
162 err = reserve_pmu_hardware();
163
164 if (!err)
165 atomic_inc(active_events);
166
167 mutex_unlock(&metag_pmu->reserve_mutex);
168 }
169
170
171 switch (event->attr.type) {
172 case PERF_TYPE_HARDWARE:
173 case PERF_TYPE_HW_CACHE:
174 case PERF_TYPE_RAW:
175 err = _hw_perf_event_init(event);
176 break;
177
178 default:
179 return -ENOENT;
180 }
181
182 if (err)
183 event->destroy(event);
184
185out:
186 return err;
187}
188
189void metag_pmu_event_update(struct perf_event *event,
190 struct hw_perf_event *hwc, int idx)
191{
192 u64 prev_raw_count, new_raw_count;
193 s64 delta;
194
195
196
197
198
199
200
201
202
203
204again:
205 prev_raw_count = local64_read(&hwc->prev_count);
206 new_raw_count = metag_pmu->read(idx);
207
208 if (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
209 new_raw_count) != prev_raw_count)
210 goto again;
211
212
213
214
215 delta = (new_raw_count - prev_raw_count) & MAX_PERIOD;
216
217 local64_add(delta, &event->count);
218 local64_sub(delta, &hwc->period_left);
219}
220
221int metag_pmu_event_set_period(struct perf_event *event,
222 struct hw_perf_event *hwc, int idx)
223{
224 s64 left = local64_read(&hwc->period_left);
225 s64 period = hwc->sample_period;
226 int ret = 0;
227
228
229 if (unlikely(period != hwc->last_period))
230 left += period - hwc->last_period;
231
232 if (unlikely(left <= -period)) {
233 left = period;
234 local64_set(&hwc->period_left, left);
235 hwc->last_period = period;
236 ret = 1;
237 }
238
239 if (unlikely(left <= 0)) {
240 left += period;
241 local64_set(&hwc->period_left, left);
242 hwc->last_period = period;
243 ret = 1;
244 }
245
246 if (left > (s64)metag_pmu->max_period)
247 left = metag_pmu->max_period;
248
249 if (metag_pmu->write) {
250 local64_set(&hwc->prev_count, -(s32)left);
251 metag_pmu->write(idx, -left & MAX_PERIOD);
252 }
253
254 perf_event_update_userpage(event);
255
256 return ret;
257}
258
259static void metag_pmu_start(struct perf_event *event, int flags)
260{
261 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
262 struct hw_perf_event *hwc = &event->hw;
263 int idx = hwc->idx;
264
265 if (WARN_ON_ONCE(idx == -1))
266 return;
267
268
269
270
271 if (flags & PERF_EF_RELOAD)
272 WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE));
273
274 hwc->state = 0;
275
276
277
278
279
280
281
282
283
284
285
286 if (metag_pmu->max_period)
287 metag_pmu_event_set_period(event, hwc, hwc->idx);
288 cpuc->events[idx] = event;
289 metag_pmu->enable(hwc, idx);
290}
291
292static void metag_pmu_stop(struct perf_event *event, int flags)
293{
294 struct hw_perf_event *hwc = &event->hw;
295
296
297
298
299
300 if (!(hwc->state & PERF_HES_STOPPED)) {
301 metag_pmu_event_update(event, hwc, hwc->idx);
302 metag_pmu->disable(hwc, hwc->idx);
303 hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
304 }
305}
306
307static int metag_pmu_add(struct perf_event *event, int flags)
308{
309 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
310 struct hw_perf_event *hwc = &event->hw;
311 int idx = 0, ret = 0;
312
313 perf_pmu_disable(event->pmu);
314
315
316 if (hwc->config == 0x100) {
317 if (__test_and_set_bit(METAG_INST_COUNTER,
318 cpuc->used_mask)) {
319 ret = -EAGAIN;
320 goto out;
321 }
322 idx = METAG_INST_COUNTER;
323 } else {
324
325 idx = find_first_zero_bit(cpuc->used_mask,
326 atomic_read(&metag_pmu->active_events));
327 if (idx >= METAG_INST_COUNTER) {
328 ret = -EAGAIN;
329 goto out;
330 }
331
332 __set_bit(idx, cpuc->used_mask);
333 }
334 hwc->idx = idx;
335
336
337 metag_pmu->disable(hwc, idx);
338
339 hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
340 if (flags & PERF_EF_START)
341 metag_pmu_start(event, PERF_EF_RELOAD);
342
343 perf_event_update_userpage(event);
344out:
345 perf_pmu_enable(event->pmu);
346 return ret;
347}
348
349static void metag_pmu_del(struct perf_event *event, int flags)
350{
351 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
352 struct hw_perf_event *hwc = &event->hw;
353 int idx = hwc->idx;
354
355 WARN_ON(idx < 0);
356 metag_pmu_stop(event, PERF_EF_UPDATE);
357 cpuc->events[idx] = NULL;
358 __clear_bit(idx, cpuc->used_mask);
359
360 perf_event_update_userpage(event);
361}
362
363static void metag_pmu_read(struct perf_event *event)
364{
365 struct hw_perf_event *hwc = &event->hw;
366
367
368 if (hwc->idx < 0)
369 return;
370
371 metag_pmu_event_update(event, hwc, hwc->idx);
372}
373
374static struct pmu pmu = {
375 .pmu_enable = metag_pmu_enable,
376 .pmu_disable = metag_pmu_disable,
377
378 .event_init = metag_pmu_event_init,
379
380 .add = metag_pmu_add,
381 .del = metag_pmu_del,
382 .start = metag_pmu_start,
383 .stop = metag_pmu_stop,
384 .read = metag_pmu_read,
385};
386
387
388static const int metag_general_events[] = {
389 [PERF_COUNT_HW_CPU_CYCLES] = 0x03,
390 [PERF_COUNT_HW_INSTRUCTIONS] = 0x100,
391 [PERF_COUNT_HW_CACHE_REFERENCES] = -1,
392 [PERF_COUNT_HW_CACHE_MISSES] = -1,
393 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = -1,
394 [PERF_COUNT_HW_BRANCH_MISSES] = -1,
395 [PERF_COUNT_HW_BUS_CYCLES] = -1,
396 [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = -1,
397 [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = -1,
398 [PERF_COUNT_HW_REF_CPU_CYCLES] = -1,
399};
400
401static const int metag_pmu_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = {
402 [C(L1D)] = {
403 [C(OP_READ)] = {
404 [C(RESULT_ACCESS)] = 0x08,
405 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
406 },
407 [C(OP_WRITE)] = {
408 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
409 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
410 },
411 [C(OP_PREFETCH)] = {
412 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
413 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
414 },
415 },
416 [C(L1I)] = {
417 [C(OP_READ)] = {
418 [C(RESULT_ACCESS)] = 0x09,
419 [C(RESULT_MISS)] = 0x0a,
420 },
421 [C(OP_WRITE)] = {
422 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
423 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
424 },
425 [C(OP_PREFETCH)] = {
426 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
427 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
428 },
429 },
430 [C(LL)] = {
431 [C(OP_READ)] = {
432 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
433 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
434 },
435 [C(OP_WRITE)] = {
436 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
437 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
438 },
439 [C(OP_PREFETCH)] = {
440 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
441 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
442 },
443 },
444 [C(DTLB)] = {
445 [C(OP_READ)] = {
446 [C(RESULT_ACCESS)] = 0xd0,
447 [C(RESULT_MISS)] = 0xd2,
448 },
449 [C(OP_WRITE)] = {
450 [C(RESULT_ACCESS)] = 0xd4,
451 [C(RESULT_MISS)] = 0xd5,
452 },
453 [C(OP_PREFETCH)] = {
454 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
455 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
456 },
457 },
458 [C(ITLB)] = {
459 [C(OP_READ)] = {
460 [C(RESULT_ACCESS)] = 0xd1,
461 [C(RESULT_MISS)] = 0xd3,
462 },
463 [C(OP_WRITE)] = {
464 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
465 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
466 },
467 [C(OP_PREFETCH)] = {
468 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
469 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
470 },
471 },
472 [C(BPU)] = {
473 [C(OP_READ)] = {
474 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
475 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
476 },
477 [C(OP_WRITE)] = {
478 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
479 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
480 },
481 [C(OP_PREFETCH)] = {
482 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
483 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
484 },
485 },
486 [C(NODE)] = {
487 [C(OP_READ)] = {
488 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
489 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
490 },
491 [C(OP_WRITE)] = {
492 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
493 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
494 },
495 [C(OP_PREFETCH)] = {
496 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
497 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
498 },
499 },
500};
501
502
503static void _hw_perf_event_destroy(struct perf_event *event)
504{
505 atomic_t *active_events = &metag_pmu->active_events;
506 struct mutex *pmu_mutex = &metag_pmu->reserve_mutex;
507
508 if (atomic_dec_and_mutex_lock(active_events, pmu_mutex)) {
509 release_pmu_hardware();
510 mutex_unlock(pmu_mutex);
511 }
512}
513
514static int _hw_perf_cache_event(int config, int *evp)
515{
516 unsigned long type, op, result;
517 int ev;
518
519 if (!metag_pmu->cache_events)
520 return -EINVAL;
521
522
523 type = config & 0xff;
524 op = (config >> 8) & 0xff;
525 result = (config >> 16) & 0xff;
526
527 if (type >= PERF_COUNT_HW_CACHE_MAX ||
528 op >= PERF_COUNT_HW_CACHE_OP_MAX ||
529 result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
530 return -EINVAL;
531
532 ev = (*metag_pmu->cache_events)[type][op][result];
533 if (ev == 0)
534 return -EOPNOTSUPP;
535 if (ev == -1)
536 return -EINVAL;
537 *evp = ev;
538 return 0;
539}
540
541static int _hw_perf_event_init(struct perf_event *event)
542{
543 struct perf_event_attr *attr = &event->attr;
544 struct hw_perf_event *hwc = &event->hw;
545 int mapping = 0, err;
546
547 switch (attr->type) {
548 case PERF_TYPE_HARDWARE:
549 if (attr->config >= PERF_COUNT_HW_MAX)
550 return -EINVAL;
551
552 mapping = metag_pmu->event_map(attr->config);
553 break;
554
555 case PERF_TYPE_HW_CACHE:
556 err = _hw_perf_cache_event(attr->config, &mapping);
557 if (err)
558 return err;
559 break;
560
561 case PERF_TYPE_RAW:
562 mapping = attr->config;
563 break;
564 }
565
566
567 if (mapping == -1)
568 return -EINVAL;
569
570
571
572
573
574
575
576 hwc->idx = -1;
577
578
579 hwc->config |= (unsigned long)mapping;
580
581
582
583
584
585
586
587 if (metag_pmu->max_period) {
588 if (!hwc->sample_period) {
589 hwc->sample_period = metag_pmu->max_period >> 1;
590 hwc->last_period = hwc->sample_period;
591 local64_set(&hwc->period_left, hwc->sample_period);
592 }
593 }
594
595 return 0;
596}
597
598static void metag_pmu_enable_counter(struct hw_perf_event *event, int idx)
599{
600 struct cpu_hw_events *events = this_cpu_ptr(&cpu_hw_events);
601 unsigned int config = event->config;
602 unsigned int tmp = config & 0xf0;
603 unsigned long flags;
604
605 raw_spin_lock_irqsave(&events->pmu_lock, flags);
606
607
608
609
610
611 if (METAG_INST_COUNTER == idx) {
612 WARN_ONCE((config != 0x100),
613 "invalid configuration (%d) for counter (%d)\n",
614 config, idx);
615 local64_set(&event->prev_count, __core_reg_get(TXTACTCYC));
616 goto unlock;
617 }
618
619
620 if (tmp) {
621 void *perf_addr;
622
623
624
625
626
627 switch (tmp) {
628 case 0xd0:
629 perf_addr = (void *)PERF_ICORE(idx);
630 break;
631
632 case 0xf0:
633 perf_addr = (void *)PERF_CHAN(idx);
634 break;
635
636 default:
637 perf_addr = NULL;
638 break;
639 }
640
641 if (perf_addr)
642 metag_out32((config & 0x0f), perf_addr);
643
644
645
646
647
648 config = tmp >> 4;
649 }
650
651 tmp = ((config & 0xf) << 28) |
652 ((1 << 24) << hard_processor_id());
653 if (metag_pmu->max_period)
654
655
656
657
658 tmp |= metag_in32(PERF_COUNT(idx)) & 0x00ffffff;
659 else
660
661
662
663
664 local64_set(&event->prev_count, 0);
665
666 metag_out32(tmp, PERF_COUNT(idx));
667unlock:
668 raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
669}
670
671static void metag_pmu_disable_counter(struct hw_perf_event *event, int idx)
672{
673 struct cpu_hw_events *events = this_cpu_ptr(&cpu_hw_events);
674 unsigned int tmp = 0;
675 unsigned long flags;
676
677
678
679
680
681
682 if (METAG_INST_COUNTER == idx)
683 return;
684
685
686
687
688
689
690
691
692
693
694
695
696 raw_spin_lock_irqsave(&events->pmu_lock, flags);
697
698 tmp = metag_in32(PERF_COUNT(idx));
699 tmp &= 0x00ffffff;
700 metag_out32(tmp, PERF_COUNT(idx));
701
702 raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
703}
704
705static u64 metag_pmu_read_counter(int idx)
706{
707 u32 tmp = 0;
708
709 if (METAG_INST_COUNTER == idx) {
710 tmp = __core_reg_get(TXTACTCYC);
711 goto out;
712 }
713
714 tmp = metag_in32(PERF_COUNT(idx)) & 0x00ffffff;
715out:
716 return tmp;
717}
718
719static void metag_pmu_write_counter(int idx, u32 val)
720{
721 struct cpu_hw_events *events = this_cpu_ptr(&cpu_hw_events);
722 u32 tmp = 0;
723 unsigned long flags;
724
725
726
727
728
729 if (METAG_INST_COUNTER == idx)
730 return;
731
732
733
734
735
736 raw_spin_lock_irqsave(&events->pmu_lock, flags);
737
738 val &= 0x00ffffff;
739 tmp = metag_in32(PERF_COUNT(idx)) & 0xff000000;
740 val |= tmp;
741 metag_out32(val, PERF_COUNT(idx));
742
743 raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
744}
745
746static int metag_pmu_event_map(int idx)
747{
748 return metag_general_events[idx];
749}
750
751static irqreturn_t metag_pmu_counter_overflow(int irq, void *dev)
752{
753 int idx = (int)dev;
754 struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events);
755 struct perf_event *event = cpuhw->events[idx];
756 struct hw_perf_event *hwc = &event->hw;
757 struct pt_regs *regs = get_irq_regs();
758 struct perf_sample_data sampledata;
759 unsigned long flags;
760 u32 counter = 0;
761
762
763
764
765
766
767 __global_lock2(flags);
768 counter = metag_in32(PERF_COUNT(idx));
769 metag_out32((counter & 0x00ffffff), PERF_COUNT(idx));
770 __global_unlock2(flags);
771
772
773 metag_pmu_event_update(event, hwc, idx);
774 perf_sample_data_init(&sampledata, 0, hwc->last_period);
775 metag_pmu_event_set_period(event, hwc, idx);
776
777
778
779
780
781
782 if (!perf_event_overflow(event, &sampledata, regs)) {
783 __global_lock2(flags);
784 counter = (counter & 0xff000000) |
785 (metag_in32(PERF_COUNT(idx)) & 0x00ffffff);
786 metag_out32(counter, PERF_COUNT(idx));
787 __global_unlock2(flags);
788 }
789
790 return IRQ_HANDLED;
791}
792
793static struct metag_pmu _metag_pmu = {
794 .handle_irq = metag_pmu_counter_overflow,
795 .enable = metag_pmu_enable_counter,
796 .disable = metag_pmu_disable_counter,
797 .read = metag_pmu_read_counter,
798 .write = metag_pmu_write_counter,
799 .event_map = metag_pmu_event_map,
800 .cache_events = &metag_pmu_cache_events,
801 .max_period = MAX_PERIOD,
802 .max_events = MAX_HWEVENTS,
803};
804
805
806static int metag_pmu_cpu_notify(struct notifier_block *b, unsigned long action,
807 void *hcpu)
808{
809 unsigned int cpu = (unsigned int)hcpu;
810 struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
811
812 if ((action & ~CPU_TASKS_FROZEN) != CPU_STARTING)
813 return NOTIFY_DONE;
814
815 memset(cpuc, 0, sizeof(struct cpu_hw_events));
816 raw_spin_lock_init(&cpuc->pmu_lock);
817
818 return NOTIFY_OK;
819}
820
821static struct notifier_block metag_pmu_notifier = {
822 .notifier_call = metag_pmu_cpu_notify,
823};
824
825
826static int __init init_hw_perf_events(void)
827{
828 int ret = 0, cpu;
829 u32 version = *(u32 *)METAC_ID;
830 int major = (version & METAC_ID_MAJOR_BITS) >> METAC_ID_MAJOR_S;
831 int min_rev = (version & (METAC_ID_MINOR_BITS | METAC_ID_REV_BITS))
832 >> METAC_ID_REV_S;
833
834
835 if (0x02 > major) {
836 pr_info("no hardware counter support available\n");
837 goto out;
838 } else if (0x02 == major) {
839 metag_pmu = &_metag_pmu;
840
841 if (min_rev < 0x0104) {
842
843
844
845
846 metag_pmu->handle_irq = NULL;
847 metag_pmu->write = NULL;
848 metag_pmu->max_period = 0;
849 }
850
851 metag_pmu->name = "meta2";
852 metag_pmu->version = version;
853 metag_pmu->pmu = pmu;
854 }
855
856 pr_info("enabled with %s PMU driver, %d counters available\n",
857 metag_pmu->name, metag_pmu->max_events);
858
859
860
861
862
863
864 if (metag_pmu->max_period == 0) {
865 metag_pmu->pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT;
866 }
867
868
869 atomic_set(&metag_pmu->active_events, 0);
870 mutex_init(&metag_pmu->reserve_mutex);
871
872
873 metag_out32(0, PERF_COUNT(0));
874 metag_out32(0, PERF_COUNT(1));
875
876 for_each_possible_cpu(cpu) {
877 struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
878
879 memset(cpuc, 0, sizeof(struct cpu_hw_events));
880 raw_spin_lock_init(&cpuc->pmu_lock);
881 }
882
883 register_cpu_notifier(&metag_pmu_notifier);
884 ret = perf_pmu_register(&pmu, metag_pmu->name, PERF_TYPE_RAW);
885out:
886 return ret;
887}
888early_initcall(init_hw_perf_events);
889