1
2
3
4
5
6
7
8
9
10
11
12
13
14
15#include <linux/cpumask.h>
16#include <linux/interrupt.h>
17#include <linux/smp.h>
18#include <linux/kernel.h>
19#include <linux/perf_event.h>
20#include <linux/uaccess.h>
21
22#include <asm/irq.h>
23#include <asm/irq_regs.h>
24#include <asm/stacktrace.h>
25#include <asm/time.h>
26
27#define MIPS_MAX_HWEVENTS 4
28#define MIPS_TCS_PER_COUNTER 2
29#define MIPS_CPUID_TO_COUNTER_MASK (MIPS_TCS_PER_COUNTER - 1)
30
31struct cpu_hw_events {
32
33 struct perf_event *events[MIPS_MAX_HWEVENTS];
34
35
36
37
38
39 unsigned long used_mask[BITS_TO_LONGS(MIPS_MAX_HWEVENTS)];
40
41
42
43
44
45
46 unsigned int saved_ctrl[MIPS_MAX_HWEVENTS];
47};
48DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = {
49 .saved_ctrl = {0},
50};
51
52
53struct mips_perf_event {
54 unsigned int event_id;
55
56
57
58
59
60 unsigned int cntr_mask;
61 #define CNTR_EVEN 0x55555555
62 #define CNTR_ODD 0xaaaaaaaa
63 #define CNTR_ALL 0xffffffff
64 enum {
65 T = 0,
66 V = 1,
67 P = 2,
68 } range;
69};
70
71static struct mips_perf_event raw_event;
72static DEFINE_MUTEX(raw_event_mutex);
73
74#define C(x) PERF_COUNT_HW_CACHE_##x
75
76struct mips_pmu {
77 u64 max_period;
78 u64 valid_count;
79 u64 overflow;
80 const char *name;
81 int irq;
82 u64 (*read_counter)(unsigned int idx);
83 void (*write_counter)(unsigned int idx, u64 val);
84 const struct mips_perf_event *(*map_raw_event)(u64 config);
85 const struct mips_perf_event (*general_event_map)[PERF_COUNT_HW_MAX];
86 const struct mips_perf_event (*cache_event_map)
87 [PERF_COUNT_HW_CACHE_MAX]
88 [PERF_COUNT_HW_CACHE_OP_MAX]
89 [PERF_COUNT_HW_CACHE_RESULT_MAX];
90 unsigned int num_counters;
91};
92
93static int counter_bits;
94static struct mips_pmu mipspmu;
95
96#define M_PERFCTL_EVENT(event) (((event) << MIPS_PERFCTRL_EVENT_S) & \
97 MIPS_PERFCTRL_EVENT)
98#define M_PERFCTL_VPEID(vpe) ((vpe) << MIPS_PERFCTRL_VPEID_S)
99
100#ifdef CONFIG_CPU_BMIPS5000
101#define M_PERFCTL_MT_EN(filter) 0
102#else
103#define M_PERFCTL_MT_EN(filter) (filter)
104#endif
105
106#define M_TC_EN_ALL M_PERFCTL_MT_EN(MIPS_PERFCTRL_MT_EN_ALL)
107#define M_TC_EN_VPE M_PERFCTL_MT_EN(MIPS_PERFCTRL_MT_EN_VPE)
108#define M_TC_EN_TC M_PERFCTL_MT_EN(MIPS_PERFCTRL_MT_EN_TC)
109
110#define M_PERFCTL_COUNT_EVENT_WHENEVER (MIPS_PERFCTRL_EXL | \
111 MIPS_PERFCTRL_K | \
112 MIPS_PERFCTRL_U | \
113 MIPS_PERFCTRL_S | \
114 MIPS_PERFCTRL_IE)
115
116#ifdef CONFIG_MIPS_MT_SMP
117#define M_PERFCTL_CONFIG_MASK 0x3fff801f
118#else
119#define M_PERFCTL_CONFIG_MASK 0x1f
120#endif
121
122#define CNTR_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
123
124#ifdef CONFIG_MIPS_PERF_SHARED_TC_COUNTERS
125static DEFINE_RWLOCK(pmuint_rwlock);
126
127#if defined(CONFIG_CPU_BMIPS5000)
128#define vpe_id() (cpu_has_mipsmt_pertccounters ? \
129 0 : (smp_processor_id() & MIPS_CPUID_TO_COUNTER_MASK))
130#else
131#define vpe_id() (cpu_has_mipsmt_pertccounters ? \
132 0 : cpu_vpe_id(¤t_cpu_data))
133#endif
134
135
136static unsigned int vpe_shift(void)
137{
138 if (num_possible_cpus() > 1)
139 return 1;
140
141 return 0;
142}
143
144static unsigned int counters_total_to_per_cpu(unsigned int counters)
145{
146 return counters >> vpe_shift();
147}
148
149#else
150#define vpe_id() 0
151
152#endif
153
154static void resume_local_counters(void);
155static void pause_local_counters(void);
156static irqreturn_t mipsxx_pmu_handle_irq(int, void *);
157static int mipsxx_pmu_handle_shared_irq(void);
158
159
160
161
162
163
164
165#define LOONGSON_PMU_TYPE0 0
166#define LOONGSON_PMU_TYPE1 1
167#define LOONGSON_PMU_TYPE2 2
168#define LOONGSON_PMU_TYPE3 3
169
170static inline int get_loongson3_pmu_type(void)
171{
172 if (boot_cpu_type() != CPU_LOONGSON64)
173 return LOONGSON_PMU_TYPE0;
174 if ((boot_cpu_data.processor_id & PRID_COMP_MASK) == PRID_COMP_LEGACY)
175 return LOONGSON_PMU_TYPE1;
176 if ((boot_cpu_data.processor_id & PRID_IMP_MASK) == PRID_IMP_LOONGSON_64C)
177 return LOONGSON_PMU_TYPE2;
178 if ((boot_cpu_data.processor_id & PRID_IMP_MASK) == PRID_IMP_LOONGSON_64G)
179 return LOONGSON_PMU_TYPE3;
180
181 return LOONGSON_PMU_TYPE0;
182}
183
184static unsigned int mipsxx_pmu_swizzle_perf_idx(unsigned int idx)
185{
186 if (vpe_id() == 1)
187 idx = (idx + 2) & 3;
188 return idx;
189}
190
191static u64 mipsxx_pmu_read_counter(unsigned int idx)
192{
193 idx = mipsxx_pmu_swizzle_perf_idx(idx);
194
195 switch (idx) {
196 case 0:
197
198
199
200
201 return (u32)read_c0_perfcntr0();
202 case 1:
203 return (u32)read_c0_perfcntr1();
204 case 2:
205 return (u32)read_c0_perfcntr2();
206 case 3:
207 return (u32)read_c0_perfcntr3();
208 default:
209 WARN_ONCE(1, "Invalid performance counter number (%d)\n", idx);
210 return 0;
211 }
212}
213
214static u64 mipsxx_pmu_read_counter_64(unsigned int idx)
215{
216 u64 mask = CNTR_BIT_MASK(counter_bits);
217 idx = mipsxx_pmu_swizzle_perf_idx(idx);
218
219 switch (idx) {
220 case 0:
221 return read_c0_perfcntr0_64() & mask;
222 case 1:
223 return read_c0_perfcntr1_64() & mask;
224 case 2:
225 return read_c0_perfcntr2_64() & mask;
226 case 3:
227 return read_c0_perfcntr3_64() & mask;
228 default:
229 WARN_ONCE(1, "Invalid performance counter number (%d)\n", idx);
230 return 0;
231 }
232}
233
234static void mipsxx_pmu_write_counter(unsigned int idx, u64 val)
235{
236 idx = mipsxx_pmu_swizzle_perf_idx(idx);
237
238 switch (idx) {
239 case 0:
240 write_c0_perfcntr0(val);
241 return;
242 case 1:
243 write_c0_perfcntr1(val);
244 return;
245 case 2:
246 write_c0_perfcntr2(val);
247 return;
248 case 3:
249 write_c0_perfcntr3(val);
250 return;
251 }
252}
253
254static void mipsxx_pmu_write_counter_64(unsigned int idx, u64 val)
255{
256 val &= CNTR_BIT_MASK(counter_bits);
257 idx = mipsxx_pmu_swizzle_perf_idx(idx);
258
259 switch (idx) {
260 case 0:
261 write_c0_perfcntr0_64(val);
262 return;
263 case 1:
264 write_c0_perfcntr1_64(val);
265 return;
266 case 2:
267 write_c0_perfcntr2_64(val);
268 return;
269 case 3:
270 write_c0_perfcntr3_64(val);
271 return;
272 }
273}
274
275static unsigned int mipsxx_pmu_read_control(unsigned int idx)
276{
277 idx = mipsxx_pmu_swizzle_perf_idx(idx);
278
279 switch (idx) {
280 case 0:
281 return read_c0_perfctrl0();
282 case 1:
283 return read_c0_perfctrl1();
284 case 2:
285 return read_c0_perfctrl2();
286 case 3:
287 return read_c0_perfctrl3();
288 default:
289 WARN_ONCE(1, "Invalid performance counter number (%d)\n", idx);
290 return 0;
291 }
292}
293
294static void mipsxx_pmu_write_control(unsigned int idx, unsigned int val)
295{
296 idx = mipsxx_pmu_swizzle_perf_idx(idx);
297
298 switch (idx) {
299 case 0:
300 write_c0_perfctrl0(val);
301 return;
302 case 1:
303 write_c0_perfctrl1(val);
304 return;
305 case 2:
306 write_c0_perfctrl2(val);
307 return;
308 case 3:
309 write_c0_perfctrl3(val);
310 return;
311 }
312}
313
314static int mipsxx_pmu_alloc_counter(struct cpu_hw_events *cpuc,
315 struct hw_perf_event *hwc)
316{
317 int i;
318 unsigned long cntr_mask;
319
320
321
322
323
324 if (get_loongson3_pmu_type() == LOONGSON_PMU_TYPE2)
325 cntr_mask = (hwc->event_base >> 10) & 0xffff;
326 else
327 cntr_mask = (hwc->event_base >> 8) & 0xffff;
328
329 for (i = mipspmu.num_counters - 1; i >= 0; i--) {
330
331
332
333
334
335
336
337
338
339
340 if (test_bit(i, &cntr_mask) &&
341 !test_and_set_bit(i, cpuc->used_mask))
342 return i;
343 }
344
345 return -EAGAIN;
346}
347
348static void mipsxx_pmu_enable_event(struct hw_perf_event *evt, int idx)
349{
350 struct perf_event *event = container_of(evt, struct perf_event, hw);
351 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
352 unsigned int range = evt->event_base >> 24;
353
354 WARN_ON(idx < 0 || idx >= mipspmu.num_counters);
355
356 if (get_loongson3_pmu_type() == LOONGSON_PMU_TYPE2)
357 cpuc->saved_ctrl[idx] = M_PERFCTL_EVENT(evt->event_base & 0x3ff) |
358 (evt->config_base & M_PERFCTL_CONFIG_MASK) |
359
360 MIPS_PERFCTRL_IE;
361 else
362 cpuc->saved_ctrl[idx] = M_PERFCTL_EVENT(evt->event_base & 0xff) |
363 (evt->config_base & M_PERFCTL_CONFIG_MASK) |
364
365 MIPS_PERFCTRL_IE;
366
367 if (IS_ENABLED(CONFIG_CPU_BMIPS5000)) {
368
369 cpuc->saved_ctrl[idx] |=
370 (1 << (12 + vpe_id())) | BRCM_PERFCTRL_TC;
371 } else if (IS_ENABLED(CONFIG_MIPS_MT_SMP) && range > V) {
372
373 pr_debug("Enabling perf counter for all TCs\n");
374 cpuc->saved_ctrl[idx] |= M_TC_EN_ALL;
375 } else {
376 unsigned int cpu, ctrl;
377
378
379
380
381
382
383 cpu = (event->cpu >= 0) ? event->cpu : smp_processor_id();
384
385 ctrl = M_PERFCTL_VPEID(cpu_vpe_id(&cpu_data[cpu]));
386 ctrl |= M_TC_EN_VPE;
387 cpuc->saved_ctrl[idx] |= ctrl;
388 pr_debug("Enabling perf counter for CPU%d\n", cpu);
389 }
390
391
392
393}
394
395static void mipsxx_pmu_disable_event(int idx)
396{
397 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
398 unsigned long flags;
399
400 WARN_ON(idx < 0 || idx >= mipspmu.num_counters);
401
402 local_irq_save(flags);
403 cpuc->saved_ctrl[idx] = mipsxx_pmu_read_control(idx) &
404 ~M_PERFCTL_COUNT_EVENT_WHENEVER;
405 mipsxx_pmu_write_control(idx, cpuc->saved_ctrl[idx]);
406 local_irq_restore(flags);
407}
408
409static int mipspmu_event_set_period(struct perf_event *event,
410 struct hw_perf_event *hwc,
411 int idx)
412{
413 u64 left = local64_read(&hwc->period_left);
414 u64 period = hwc->sample_period;
415 int ret = 0;
416
417 if (unlikely((left + period) & (1ULL << 63))) {
418
419 left = period;
420 local64_set(&hwc->period_left, left);
421 hwc->last_period = period;
422 ret = 1;
423 } else if (unlikely((left + period) <= period)) {
424
425 left += period;
426 local64_set(&hwc->period_left, left);
427 hwc->last_period = period;
428 ret = 1;
429 }
430
431 if (left > mipspmu.max_period) {
432 left = mipspmu.max_period;
433 local64_set(&hwc->period_left, left);
434 }
435
436 local64_set(&hwc->prev_count, mipspmu.overflow - left);
437
438 if (get_loongson3_pmu_type() == LOONGSON_PMU_TYPE2)
439 mipsxx_pmu_write_control(idx,
440 M_PERFCTL_EVENT(hwc->event_base & 0x3ff));
441
442 mipspmu.write_counter(idx, mipspmu.overflow - left);
443
444 perf_event_update_userpage(event);
445
446 return ret;
447}
448
449static void mipspmu_event_update(struct perf_event *event,
450 struct hw_perf_event *hwc,
451 int idx)
452{
453 u64 prev_raw_count, new_raw_count;
454 u64 delta;
455
456again:
457 prev_raw_count = local64_read(&hwc->prev_count);
458 new_raw_count = mipspmu.read_counter(idx);
459
460 if (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
461 new_raw_count) != prev_raw_count)
462 goto again;
463
464 delta = new_raw_count - prev_raw_count;
465
466 local64_add(delta, &event->count);
467 local64_sub(delta, &hwc->period_left);
468}
469
470static void mipspmu_start(struct perf_event *event, int flags)
471{
472 struct hw_perf_event *hwc = &event->hw;
473
474 if (flags & PERF_EF_RELOAD)
475 WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE));
476
477 hwc->state = 0;
478
479
480 mipspmu_event_set_period(event, hwc, hwc->idx);
481
482
483 mipsxx_pmu_enable_event(hwc, hwc->idx);
484}
485
486static void mipspmu_stop(struct perf_event *event, int flags)
487{
488 struct hw_perf_event *hwc = &event->hw;
489
490 if (!(hwc->state & PERF_HES_STOPPED)) {
491
492 mipsxx_pmu_disable_event(hwc->idx);
493 barrier();
494 mipspmu_event_update(event, hwc, hwc->idx);
495 hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
496 }
497}
498
499static int mipspmu_add(struct perf_event *event, int flags)
500{
501 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
502 struct hw_perf_event *hwc = &event->hw;
503 int idx;
504 int err = 0;
505
506 perf_pmu_disable(event->pmu);
507
508
509 idx = mipsxx_pmu_alloc_counter(cpuc, hwc);
510 if (idx < 0) {
511 err = idx;
512 goto out;
513 }
514
515
516
517
518
519 event->hw.idx = idx;
520 mipsxx_pmu_disable_event(idx);
521 cpuc->events[idx] = event;
522
523 hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
524 if (flags & PERF_EF_START)
525 mipspmu_start(event, PERF_EF_RELOAD);
526
527
528 perf_event_update_userpage(event);
529
530out:
531 perf_pmu_enable(event->pmu);
532 return err;
533}
534
535static void mipspmu_del(struct perf_event *event, int flags)
536{
537 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
538 struct hw_perf_event *hwc = &event->hw;
539 int idx = hwc->idx;
540
541 WARN_ON(idx < 0 || idx >= mipspmu.num_counters);
542
543 mipspmu_stop(event, PERF_EF_UPDATE);
544 cpuc->events[idx] = NULL;
545 clear_bit(idx, cpuc->used_mask);
546
547 perf_event_update_userpage(event);
548}
549
550static void mipspmu_read(struct perf_event *event)
551{
552 struct hw_perf_event *hwc = &event->hw;
553
554
555 if (hwc->idx < 0)
556 return;
557
558 mipspmu_event_update(event, hwc, hwc->idx);
559}
560
561static void mipspmu_enable(struct pmu *pmu)
562{
563#ifdef CONFIG_MIPS_PERF_SHARED_TC_COUNTERS
564 write_unlock(&pmuint_rwlock);
565#endif
566 resume_local_counters();
567}
568
569
570
571
572
573
574
575
576
577
578
579
580static void mipspmu_disable(struct pmu *pmu)
581{
582 pause_local_counters();
583#ifdef CONFIG_MIPS_PERF_SHARED_TC_COUNTERS
584 write_lock(&pmuint_rwlock);
585#endif
586}
587
588static atomic_t active_events = ATOMIC_INIT(0);
589static DEFINE_MUTEX(pmu_reserve_mutex);
590static int (*save_perf_irq)(void);
591
592static int mipspmu_get_irq(void)
593{
594 int err;
595
596 if (mipspmu.irq >= 0) {
597
598 err = request_irq(mipspmu.irq, mipsxx_pmu_handle_irq,
599 IRQF_PERCPU | IRQF_NOBALANCING |
600 IRQF_NO_THREAD | IRQF_NO_SUSPEND |
601 IRQF_SHARED,
602 "mips_perf_pmu", &mipspmu);
603 if (err) {
604 pr_warn("Unable to request IRQ%d for MIPS performance counters!\n",
605 mipspmu.irq);
606 }
607 } else if (cp0_perfcount_irq < 0) {
608
609
610
611 save_perf_irq = perf_irq;
612 perf_irq = mipsxx_pmu_handle_shared_irq;
613 err = 0;
614 } else {
615 pr_warn("The platform hasn't properly defined its interrupt controller\n");
616 err = -ENOENT;
617 }
618
619 return err;
620}
621
622static void mipspmu_free_irq(void)
623{
624 if (mipspmu.irq >= 0)
625 free_irq(mipspmu.irq, &mipspmu);
626 else if (cp0_perfcount_irq < 0)
627 perf_irq = save_perf_irq;
628}
629
630
631
632
633
634static void reset_counters(void *arg);
635static int __hw_perf_event_init(struct perf_event *event);
636
637static void hw_perf_event_destroy(struct perf_event *event)
638{
639 if (atomic_dec_and_mutex_lock(&active_events,
640 &pmu_reserve_mutex)) {
641
642
643
644
645 on_each_cpu(reset_counters,
646 (void *)(long)mipspmu.num_counters, 1);
647 mipspmu_free_irq();
648 mutex_unlock(&pmu_reserve_mutex);
649 }
650}
651
652static int mipspmu_event_init(struct perf_event *event)
653{
654 int err = 0;
655
656
657 if (has_branch_stack(event))
658 return -EOPNOTSUPP;
659
660 switch (event->attr.type) {
661 case PERF_TYPE_RAW:
662 case PERF_TYPE_HARDWARE:
663 case PERF_TYPE_HW_CACHE:
664 break;
665
666 default:
667 return -ENOENT;
668 }
669
670 if (event->cpu >= 0 && !cpu_online(event->cpu))
671 return -ENODEV;
672
673 if (!atomic_inc_not_zero(&active_events)) {
674 mutex_lock(&pmu_reserve_mutex);
675 if (atomic_read(&active_events) == 0)
676 err = mipspmu_get_irq();
677
678 if (!err)
679 atomic_inc(&active_events);
680 mutex_unlock(&pmu_reserve_mutex);
681 }
682
683 if (err)
684 return err;
685
686 return __hw_perf_event_init(event);
687}
688
689static struct pmu pmu = {
690 .pmu_enable = mipspmu_enable,
691 .pmu_disable = mipspmu_disable,
692 .event_init = mipspmu_event_init,
693 .add = mipspmu_add,
694 .del = mipspmu_del,
695 .start = mipspmu_start,
696 .stop = mipspmu_stop,
697 .read = mipspmu_read,
698};
699
700static unsigned int mipspmu_perf_event_encode(const struct mips_perf_event *pev)
701{
702
703
704
705
706#ifdef CONFIG_MIPS_MT_SMP
707 if (num_possible_cpus() > 1)
708 return ((unsigned int)pev->range << 24) |
709 (pev->cntr_mask & 0xffff00) |
710 (pev->event_id & 0xff);
711 else
712#endif
713 {
714 if (get_loongson3_pmu_type() == LOONGSON_PMU_TYPE2)
715 return (pev->cntr_mask & 0xfffc00) |
716 (pev->event_id & 0x3ff);
717 else
718 return (pev->cntr_mask & 0xffff00) |
719 (pev->event_id & 0xff);
720 }
721}
722
723static const struct mips_perf_event *mipspmu_map_general_event(int idx)
724{
725
726 if ((*mipspmu.general_event_map)[idx].cntr_mask == 0)
727 return ERR_PTR(-EOPNOTSUPP);
728 return &(*mipspmu.general_event_map)[idx];
729}
730
731static const struct mips_perf_event *mipspmu_map_cache_event(u64 config)
732{
733 unsigned int cache_type, cache_op, cache_result;
734 const struct mips_perf_event *pev;
735
736 cache_type = (config >> 0) & 0xff;
737 if (cache_type >= PERF_COUNT_HW_CACHE_MAX)
738 return ERR_PTR(-EINVAL);
739
740 cache_op = (config >> 8) & 0xff;
741 if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX)
742 return ERR_PTR(-EINVAL);
743
744 cache_result = (config >> 16) & 0xff;
745 if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
746 return ERR_PTR(-EINVAL);
747
748 pev = &((*mipspmu.cache_event_map)
749 [cache_type]
750 [cache_op]
751 [cache_result]);
752
753 if (pev->cntr_mask == 0)
754 return ERR_PTR(-EOPNOTSUPP);
755
756 return pev;
757
758}
759
760static int validate_group(struct perf_event *event)
761{
762 struct perf_event *sibling, *leader = event->group_leader;
763 struct cpu_hw_events fake_cpuc;
764
765 memset(&fake_cpuc, 0, sizeof(fake_cpuc));
766
767 if (mipsxx_pmu_alloc_counter(&fake_cpuc, &leader->hw) < 0)
768 return -EINVAL;
769
770 for_each_sibling_event(sibling, leader) {
771 if (mipsxx_pmu_alloc_counter(&fake_cpuc, &sibling->hw) < 0)
772 return -EINVAL;
773 }
774
775 if (mipsxx_pmu_alloc_counter(&fake_cpuc, &event->hw) < 0)
776 return -EINVAL;
777
778 return 0;
779}
780
781
782static void handle_associated_event(struct cpu_hw_events *cpuc,
783 int idx, struct perf_sample_data *data,
784 struct pt_regs *regs)
785{
786 struct perf_event *event = cpuc->events[idx];
787 struct hw_perf_event *hwc = &event->hw;
788
789 mipspmu_event_update(event, hwc, idx);
790 data->period = event->hw.last_period;
791 if (!mipspmu_event_set_period(event, hwc, idx))
792 return;
793
794 if (perf_event_overflow(event, data, regs))
795 mipsxx_pmu_disable_event(idx);
796}
797
798
799static int __n_counters(void)
800{
801 if (!cpu_has_perf)
802 return 0;
803 if (!(read_c0_perfctrl0() & MIPS_PERFCTRL_M))
804 return 1;
805 if (!(read_c0_perfctrl1() & MIPS_PERFCTRL_M))
806 return 2;
807 if (!(read_c0_perfctrl2() & MIPS_PERFCTRL_M))
808 return 3;
809
810 return 4;
811}
812
813static int n_counters(void)
814{
815 int counters;
816
817 switch (current_cpu_type()) {
818 case CPU_R10000:
819 counters = 2;
820 break;
821
822 case CPU_R12000:
823 case CPU_R14000:
824 case CPU_R16000:
825 counters = 4;
826 break;
827
828 default:
829 counters = __n_counters();
830 }
831
832 return counters;
833}
834
835static void loongson3_reset_counters(void *arg)
836{
837 int counters = (int)(long)arg;
838
839 switch (counters) {
840 case 4:
841 mipsxx_pmu_write_control(3, 0);
842 mipspmu.write_counter(3, 0);
843 mipsxx_pmu_write_control(3, 127<<5);
844 mipspmu.write_counter(3, 0);
845 mipsxx_pmu_write_control(3, 191<<5);
846 mipspmu.write_counter(3, 0);
847 mipsxx_pmu_write_control(3, 255<<5);
848 mipspmu.write_counter(3, 0);
849 mipsxx_pmu_write_control(3, 319<<5);
850 mipspmu.write_counter(3, 0);
851 mipsxx_pmu_write_control(3, 383<<5);
852 mipspmu.write_counter(3, 0);
853 mipsxx_pmu_write_control(3, 575<<5);
854 mipspmu.write_counter(3, 0);
855 fallthrough;
856 case 3:
857 mipsxx_pmu_write_control(2, 0);
858 mipspmu.write_counter(2, 0);
859 mipsxx_pmu_write_control(2, 127<<5);
860 mipspmu.write_counter(2, 0);
861 mipsxx_pmu_write_control(2, 191<<5);
862 mipspmu.write_counter(2, 0);
863 mipsxx_pmu_write_control(2, 255<<5);
864 mipspmu.write_counter(2, 0);
865 mipsxx_pmu_write_control(2, 319<<5);
866 mipspmu.write_counter(2, 0);
867 mipsxx_pmu_write_control(2, 383<<5);
868 mipspmu.write_counter(2, 0);
869 mipsxx_pmu_write_control(2, 575<<5);
870 mipspmu.write_counter(2, 0);
871 fallthrough;
872 case 2:
873 mipsxx_pmu_write_control(1, 0);
874 mipspmu.write_counter(1, 0);
875 mipsxx_pmu_write_control(1, 127<<5);
876 mipspmu.write_counter(1, 0);
877 mipsxx_pmu_write_control(1, 191<<5);
878 mipspmu.write_counter(1, 0);
879 mipsxx_pmu_write_control(1, 255<<5);
880 mipspmu.write_counter(1, 0);
881 mipsxx_pmu_write_control(1, 319<<5);
882 mipspmu.write_counter(1, 0);
883 mipsxx_pmu_write_control(1, 383<<5);
884 mipspmu.write_counter(1, 0);
885 mipsxx_pmu_write_control(1, 575<<5);
886 mipspmu.write_counter(1, 0);
887 fallthrough;
888 case 1:
889 mipsxx_pmu_write_control(0, 0);
890 mipspmu.write_counter(0, 0);
891 mipsxx_pmu_write_control(0, 127<<5);
892 mipspmu.write_counter(0, 0);
893 mipsxx_pmu_write_control(0, 191<<5);
894 mipspmu.write_counter(0, 0);
895 mipsxx_pmu_write_control(0, 255<<5);
896 mipspmu.write_counter(0, 0);
897 mipsxx_pmu_write_control(0, 319<<5);
898 mipspmu.write_counter(0, 0);
899 mipsxx_pmu_write_control(0, 383<<5);
900 mipspmu.write_counter(0, 0);
901 mipsxx_pmu_write_control(0, 575<<5);
902 mipspmu.write_counter(0, 0);
903 break;
904 }
905}
906
907static void reset_counters(void *arg)
908{
909 int counters = (int)(long)arg;
910
911 if (get_loongson3_pmu_type() == LOONGSON_PMU_TYPE2) {
912 loongson3_reset_counters(arg);
913 return;
914 }
915
916 switch (counters) {
917 case 4:
918 mipsxx_pmu_write_control(3, 0);
919 mipspmu.write_counter(3, 0);
920 fallthrough;
921 case 3:
922 mipsxx_pmu_write_control(2, 0);
923 mipspmu.write_counter(2, 0);
924 fallthrough;
925 case 2:
926 mipsxx_pmu_write_control(1, 0);
927 mipspmu.write_counter(1, 0);
928 fallthrough;
929 case 1:
930 mipsxx_pmu_write_control(0, 0);
931 mipspmu.write_counter(0, 0);
932 break;
933 }
934}
935
936
937static const struct mips_perf_event mipsxxcore_event_map
938 [PERF_COUNT_HW_MAX] = {
939 [PERF_COUNT_HW_CPU_CYCLES] = { 0x00, CNTR_EVEN | CNTR_ODD, P },
940 [PERF_COUNT_HW_INSTRUCTIONS] = { 0x01, CNTR_EVEN | CNTR_ODD, T },
941 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 0x02, CNTR_EVEN, T },
942 [PERF_COUNT_HW_BRANCH_MISSES] = { 0x02, CNTR_ODD, T },
943};
944
945
946static const struct mips_perf_event mipsxxcore_event_map2
947 [PERF_COUNT_HW_MAX] = {
948 [PERF_COUNT_HW_CPU_CYCLES] = { 0x00, CNTR_EVEN | CNTR_ODD, P },
949 [PERF_COUNT_HW_INSTRUCTIONS] = { 0x01, CNTR_EVEN | CNTR_ODD, T },
950 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 0x27, CNTR_EVEN, T },
951 [PERF_COUNT_HW_BRANCH_MISSES] = { 0x27, CNTR_ODD, T },
952};
953
954static const struct mips_perf_event i6x00_event_map[PERF_COUNT_HW_MAX] = {
955 [PERF_COUNT_HW_CPU_CYCLES] = { 0x00, CNTR_EVEN | CNTR_ODD },
956 [PERF_COUNT_HW_INSTRUCTIONS] = { 0x01, CNTR_EVEN | CNTR_ODD },
957
958 [PERF_COUNT_HW_CACHE_REFERENCES] = { 0x45, CNTR_EVEN | CNTR_ODD },
959 [PERF_COUNT_HW_CACHE_MISSES] = { 0x48, CNTR_EVEN | CNTR_ODD },
960 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 0x15, CNTR_EVEN | CNTR_ODD },
961 [PERF_COUNT_HW_BRANCH_MISSES] = { 0x16, CNTR_EVEN | CNTR_ODD },
962};
963
964static const struct mips_perf_event loongson3_event_map1[PERF_COUNT_HW_MAX] = {
965 [PERF_COUNT_HW_CPU_CYCLES] = { 0x00, CNTR_EVEN },
966 [PERF_COUNT_HW_INSTRUCTIONS] = { 0x00, CNTR_ODD },
967 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 0x01, CNTR_EVEN },
968 [PERF_COUNT_HW_BRANCH_MISSES] = { 0x01, CNTR_ODD },
969};
970
971static const struct mips_perf_event loongson3_event_map2[PERF_COUNT_HW_MAX] = {
972 [PERF_COUNT_HW_CPU_CYCLES] = { 0x80, CNTR_ALL },
973 [PERF_COUNT_HW_INSTRUCTIONS] = { 0x81, CNTR_ALL },
974 [PERF_COUNT_HW_CACHE_MISSES] = { 0x18, CNTR_ALL },
975 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 0x94, CNTR_ALL },
976 [PERF_COUNT_HW_BRANCH_MISSES] = { 0x9c, CNTR_ALL },
977};
978
979static const struct mips_perf_event loongson3_event_map3[PERF_COUNT_HW_MAX] = {
980 [PERF_COUNT_HW_CPU_CYCLES] = { 0x00, CNTR_ALL },
981 [PERF_COUNT_HW_INSTRUCTIONS] = { 0x01, CNTR_ALL },
982 [PERF_COUNT_HW_CACHE_REFERENCES] = { 0x1c, CNTR_ALL },
983 [PERF_COUNT_HW_CACHE_MISSES] = { 0x1d, CNTR_ALL },
984 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 0x02, CNTR_ALL },
985 [PERF_COUNT_HW_BRANCH_MISSES] = { 0x08, CNTR_ALL },
986};
987
988static const struct mips_perf_event octeon_event_map[PERF_COUNT_HW_MAX] = {
989 [PERF_COUNT_HW_CPU_CYCLES] = { 0x01, CNTR_ALL },
990 [PERF_COUNT_HW_INSTRUCTIONS] = { 0x03, CNTR_ALL },
991 [PERF_COUNT_HW_CACHE_REFERENCES] = { 0x2b, CNTR_ALL },
992 [PERF_COUNT_HW_CACHE_MISSES] = { 0x2e, CNTR_ALL },
993 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 0x08, CNTR_ALL },
994 [PERF_COUNT_HW_BRANCH_MISSES] = { 0x09, CNTR_ALL },
995 [PERF_COUNT_HW_BUS_CYCLES] = { 0x25, CNTR_ALL },
996};
997
998static const struct mips_perf_event bmips5000_event_map
999 [PERF_COUNT_HW_MAX] = {
1000 [PERF_COUNT_HW_CPU_CYCLES] = { 0x00, CNTR_EVEN | CNTR_ODD, T },
1001 [PERF_COUNT_HW_INSTRUCTIONS] = { 0x01, CNTR_EVEN | CNTR_ODD, T },
1002 [PERF_COUNT_HW_BRANCH_MISSES] = { 0x02, CNTR_ODD, T },
1003};
1004
1005static const struct mips_perf_event xlp_event_map[PERF_COUNT_HW_MAX] = {
1006 [PERF_COUNT_HW_CPU_CYCLES] = { 0x01, CNTR_ALL },
1007 [PERF_COUNT_HW_INSTRUCTIONS] = { 0x18, CNTR_ALL },
1008 [PERF_COUNT_HW_CACHE_REFERENCES] = { 0x04, CNTR_ALL },
1009 [PERF_COUNT_HW_CACHE_MISSES] = { 0x07, CNTR_ALL },
1010 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 0x1b, CNTR_ALL },
1011 [PERF_COUNT_HW_BRANCH_MISSES] = { 0x1c, CNTR_ALL },
1012};
1013
1014
1015static const struct mips_perf_event mipsxxcore_cache_map
1016 [PERF_COUNT_HW_CACHE_MAX]
1017 [PERF_COUNT_HW_CACHE_OP_MAX]
1018 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
1019[C(L1D)] = {
1020
1021
1022
1023
1024
1025
1026 [C(OP_READ)] = {
1027 [C(RESULT_ACCESS)] = { 0x0a, CNTR_EVEN, T },
1028 [C(RESULT_MISS)] = { 0x0b, CNTR_EVEN | CNTR_ODD, T },
1029 },
1030 [C(OP_WRITE)] = {
1031 [C(RESULT_ACCESS)] = { 0x0a, CNTR_EVEN, T },
1032 [C(RESULT_MISS)] = { 0x0b, CNTR_EVEN | CNTR_ODD, T },
1033 },
1034},
1035[C(L1I)] = {
1036 [C(OP_READ)] = {
1037 [C(RESULT_ACCESS)] = { 0x09, CNTR_EVEN, T },
1038 [C(RESULT_MISS)] = { 0x09, CNTR_ODD, T },
1039 },
1040 [C(OP_WRITE)] = {
1041 [C(RESULT_ACCESS)] = { 0x09, CNTR_EVEN, T },
1042 [C(RESULT_MISS)] = { 0x09, CNTR_ODD, T },
1043 },
1044 [C(OP_PREFETCH)] = {
1045 [C(RESULT_ACCESS)] = { 0x14, CNTR_EVEN, T },
1046
1047
1048
1049
1050 },
1051},
1052[C(LL)] = {
1053 [C(OP_READ)] = {
1054 [C(RESULT_ACCESS)] = { 0x15, CNTR_ODD, P },
1055 [C(RESULT_MISS)] = { 0x16, CNTR_EVEN, P },
1056 },
1057 [C(OP_WRITE)] = {
1058 [C(RESULT_ACCESS)] = { 0x15, CNTR_ODD, P },
1059 [C(RESULT_MISS)] = { 0x16, CNTR_EVEN, P },
1060 },
1061},
1062[C(DTLB)] = {
1063 [C(OP_READ)] = {
1064 [C(RESULT_ACCESS)] = { 0x06, CNTR_EVEN, T },
1065 [C(RESULT_MISS)] = { 0x06, CNTR_ODD, T },
1066 },
1067 [C(OP_WRITE)] = {
1068 [C(RESULT_ACCESS)] = { 0x06, CNTR_EVEN, T },
1069 [C(RESULT_MISS)] = { 0x06, CNTR_ODD, T },
1070 },
1071},
1072[C(ITLB)] = {
1073 [C(OP_READ)] = {
1074 [C(RESULT_ACCESS)] = { 0x05, CNTR_EVEN, T },
1075 [C(RESULT_MISS)] = { 0x05, CNTR_ODD, T },
1076 },
1077 [C(OP_WRITE)] = {
1078 [C(RESULT_ACCESS)] = { 0x05, CNTR_EVEN, T },
1079 [C(RESULT_MISS)] = { 0x05, CNTR_ODD, T },
1080 },
1081},
1082[C(BPU)] = {
1083
1084 [C(OP_READ)] = {
1085 [C(RESULT_ACCESS)] = { 0x02, CNTR_EVEN, T },
1086 [C(RESULT_MISS)] = { 0x02, CNTR_ODD, T },
1087 },
1088 [C(OP_WRITE)] = {
1089 [C(RESULT_ACCESS)] = { 0x02, CNTR_EVEN, T },
1090 [C(RESULT_MISS)] = { 0x02, CNTR_ODD, T },
1091 },
1092},
1093};
1094
1095
1096static const struct mips_perf_event mipsxxcore_cache_map2
1097 [PERF_COUNT_HW_CACHE_MAX]
1098 [PERF_COUNT_HW_CACHE_OP_MAX]
1099 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
1100[C(L1D)] = {
1101
1102
1103
1104
1105
1106
1107 [C(OP_READ)] = {
1108 [C(RESULT_ACCESS)] = { 0x17, CNTR_ODD, T },
1109 [C(RESULT_MISS)] = { 0x18, CNTR_ODD, T },
1110 },
1111 [C(OP_WRITE)] = {
1112 [C(RESULT_ACCESS)] = { 0x17, CNTR_ODD, T },
1113 [C(RESULT_MISS)] = { 0x18, CNTR_ODD, T },
1114 },
1115},
1116[C(L1I)] = {
1117 [C(OP_READ)] = {
1118 [C(RESULT_ACCESS)] = { 0x06, CNTR_EVEN, T },
1119 [C(RESULT_MISS)] = { 0x06, CNTR_ODD, T },
1120 },
1121 [C(OP_WRITE)] = {
1122 [C(RESULT_ACCESS)] = { 0x06, CNTR_EVEN, T },
1123 [C(RESULT_MISS)] = { 0x06, CNTR_ODD, T },
1124 },
1125 [C(OP_PREFETCH)] = {
1126 [C(RESULT_ACCESS)] = { 0x34, CNTR_EVEN, T },
1127
1128
1129
1130
1131 },
1132},
1133[C(LL)] = {
1134 [C(OP_READ)] = {
1135 [C(RESULT_ACCESS)] = { 0x1c, CNTR_ODD, P },
1136 [C(RESULT_MISS)] = { 0x1d, CNTR_EVEN, P },
1137 },
1138 [C(OP_WRITE)] = {
1139 [C(RESULT_ACCESS)] = { 0x1c, CNTR_ODD, P },
1140 [C(RESULT_MISS)] = { 0x1d, CNTR_EVEN, P },
1141 },
1142},
1143
1144
1145
1146
1147
1148[C(ITLB)] = {
1149 [C(OP_READ)] = {
1150 [C(RESULT_ACCESS)] = { 0x04, CNTR_EVEN, T },
1151 [C(RESULT_MISS)] = { 0x04, CNTR_ODD, T },
1152 },
1153 [C(OP_WRITE)] = {
1154 [C(RESULT_ACCESS)] = { 0x04, CNTR_EVEN, T },
1155 [C(RESULT_MISS)] = { 0x04, CNTR_ODD, T },
1156 },
1157},
1158[C(BPU)] = {
1159
1160 [C(OP_READ)] = {
1161 [C(RESULT_ACCESS)] = { 0x27, CNTR_EVEN, T },
1162 [C(RESULT_MISS)] = { 0x27, CNTR_ODD, T },
1163 },
1164 [C(OP_WRITE)] = {
1165 [C(RESULT_ACCESS)] = { 0x27, CNTR_EVEN, T },
1166 [C(RESULT_MISS)] = { 0x27, CNTR_ODD, T },
1167 },
1168},
1169};
1170
1171static const struct mips_perf_event i6x00_cache_map
1172 [PERF_COUNT_HW_CACHE_MAX]
1173 [PERF_COUNT_HW_CACHE_OP_MAX]
1174 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
1175[C(L1D)] = {
1176 [C(OP_READ)] = {
1177 [C(RESULT_ACCESS)] = { 0x46, CNTR_EVEN | CNTR_ODD },
1178 [C(RESULT_MISS)] = { 0x49, CNTR_EVEN | CNTR_ODD },
1179 },
1180 [C(OP_WRITE)] = {
1181 [C(RESULT_ACCESS)] = { 0x47, CNTR_EVEN | CNTR_ODD },
1182 [C(RESULT_MISS)] = { 0x4a, CNTR_EVEN | CNTR_ODD },
1183 },
1184},
1185[C(L1I)] = {
1186 [C(OP_READ)] = {
1187 [C(RESULT_ACCESS)] = { 0x84, CNTR_EVEN | CNTR_ODD },
1188 [C(RESULT_MISS)] = { 0x85, CNTR_EVEN | CNTR_ODD },
1189 },
1190},
1191[C(DTLB)] = {
1192
1193 [C(OP_READ)] = {
1194 [C(RESULT_ACCESS)] = { 0x40, CNTR_EVEN | CNTR_ODD },
1195 [C(RESULT_MISS)] = { 0x41, CNTR_EVEN | CNTR_ODD },
1196 },
1197 [C(OP_WRITE)] = {
1198 [C(RESULT_ACCESS)] = { 0x40, CNTR_EVEN | CNTR_ODD },
1199 [C(RESULT_MISS)] = { 0x41, CNTR_EVEN | CNTR_ODD },
1200 },
1201},
1202[C(BPU)] = {
1203
1204 [C(OP_READ)] = {
1205 [C(RESULT_ACCESS)] = { 0x15, CNTR_EVEN | CNTR_ODD },
1206 [C(RESULT_MISS)] = { 0x16, CNTR_EVEN | CNTR_ODD },
1207 },
1208},
1209};
1210
1211static const struct mips_perf_event loongson3_cache_map1
1212 [PERF_COUNT_HW_CACHE_MAX]
1213 [PERF_COUNT_HW_CACHE_OP_MAX]
1214 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
1215[C(L1D)] = {
1216
1217
1218
1219
1220
1221
1222 [C(OP_READ)] = {
1223 [C(RESULT_MISS)] = { 0x04, CNTR_ODD },
1224 },
1225 [C(OP_WRITE)] = {
1226 [C(RESULT_MISS)] = { 0x04, CNTR_ODD },
1227 },
1228},
1229[C(L1I)] = {
1230 [C(OP_READ)] = {
1231 [C(RESULT_MISS)] = { 0x04, CNTR_EVEN },
1232 },
1233 [C(OP_WRITE)] = {
1234 [C(RESULT_MISS)] = { 0x04, CNTR_EVEN },
1235 },
1236},
1237[C(DTLB)] = {
1238 [C(OP_READ)] = {
1239 [C(RESULT_MISS)] = { 0x09, CNTR_ODD },
1240 },
1241 [C(OP_WRITE)] = {
1242 [C(RESULT_MISS)] = { 0x09, CNTR_ODD },
1243 },
1244},
1245[C(ITLB)] = {
1246 [C(OP_READ)] = {
1247 [C(RESULT_MISS)] = { 0x0c, CNTR_ODD },
1248 },
1249 [C(OP_WRITE)] = {
1250 [C(RESULT_MISS)] = { 0x0c, CNTR_ODD },
1251 },
1252},
1253[C(BPU)] = {
1254
1255 [C(OP_READ)] = {
1256 [C(RESULT_ACCESS)] = { 0x01, CNTR_EVEN },
1257 [C(RESULT_MISS)] = { 0x01, CNTR_ODD },
1258 },
1259 [C(OP_WRITE)] = {
1260 [C(RESULT_ACCESS)] = { 0x01, CNTR_EVEN },
1261 [C(RESULT_MISS)] = { 0x01, CNTR_ODD },
1262 },
1263},
1264};
1265
1266static const struct mips_perf_event loongson3_cache_map2
1267 [PERF_COUNT_HW_CACHE_MAX]
1268 [PERF_COUNT_HW_CACHE_OP_MAX]
1269 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
1270[C(L1D)] = {
1271
1272
1273
1274
1275
1276
1277 [C(OP_READ)] = {
1278 [C(RESULT_ACCESS)] = { 0x156, CNTR_ALL },
1279 },
1280 [C(OP_WRITE)] = {
1281 [C(RESULT_ACCESS)] = { 0x155, CNTR_ALL },
1282 [C(RESULT_MISS)] = { 0x153, CNTR_ALL },
1283 },
1284},
1285[C(L1I)] = {
1286 [C(OP_READ)] = {
1287 [C(RESULT_MISS)] = { 0x18, CNTR_ALL },
1288 },
1289 [C(OP_WRITE)] = {
1290 [C(RESULT_MISS)] = { 0x18, CNTR_ALL },
1291 },
1292},
1293[C(LL)] = {
1294 [C(OP_READ)] = {
1295 [C(RESULT_ACCESS)] = { 0x1b6, CNTR_ALL },
1296 },
1297 [C(OP_WRITE)] = {
1298 [C(RESULT_ACCESS)] = { 0x1b7, CNTR_ALL },
1299 },
1300 [C(OP_PREFETCH)] = {
1301 [C(RESULT_ACCESS)] = { 0x1bf, CNTR_ALL },
1302 },
1303},
1304[C(DTLB)] = {
1305 [C(OP_READ)] = {
1306 [C(RESULT_MISS)] = { 0x92, CNTR_ALL },
1307 },
1308 [C(OP_WRITE)] = {
1309 [C(RESULT_MISS)] = { 0x92, CNTR_ALL },
1310 },
1311},
1312[C(ITLB)] = {
1313 [C(OP_READ)] = {
1314 [C(RESULT_MISS)] = { 0x1a, CNTR_ALL },
1315 },
1316 [C(OP_WRITE)] = {
1317 [C(RESULT_MISS)] = { 0x1a, CNTR_ALL },
1318 },
1319},
1320[C(BPU)] = {
1321
1322 [C(OP_READ)] = {
1323 [C(RESULT_ACCESS)] = { 0x94, CNTR_ALL },
1324 [C(RESULT_MISS)] = { 0x9c, CNTR_ALL },
1325 },
1326},
1327};
1328
1329static const struct mips_perf_event loongson3_cache_map3
1330 [PERF_COUNT_HW_CACHE_MAX]
1331 [PERF_COUNT_HW_CACHE_OP_MAX]
1332 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
1333[C(L1D)] = {
1334
1335
1336
1337
1338
1339
1340 [C(OP_READ)] = {
1341 [C(RESULT_ACCESS)] = { 0x1e, CNTR_ALL },
1342 [C(RESULT_MISS)] = { 0x1f, CNTR_ALL },
1343 },
1344 [C(OP_PREFETCH)] = {
1345 [C(RESULT_ACCESS)] = { 0xaa, CNTR_ALL },
1346 [C(RESULT_MISS)] = { 0xa9, CNTR_ALL },
1347 },
1348},
1349[C(L1I)] = {
1350 [C(OP_READ)] = {
1351 [C(RESULT_ACCESS)] = { 0x1c, CNTR_ALL },
1352 [C(RESULT_MISS)] = { 0x1d, CNTR_ALL },
1353 },
1354},
1355[C(LL)] = {
1356 [C(OP_READ)] = {
1357 [C(RESULT_ACCESS)] = { 0x2e, CNTR_ALL },
1358 [C(RESULT_MISS)] = { 0x2f, CNTR_ALL },
1359 },
1360},
1361[C(DTLB)] = {
1362 [C(OP_READ)] = {
1363 [C(RESULT_ACCESS)] = { 0x14, CNTR_ALL },
1364 [C(RESULT_MISS)] = { 0x1b, CNTR_ALL },
1365 },
1366},
1367[C(ITLB)] = {
1368 [C(OP_READ)] = {
1369 [C(RESULT_MISS)] = { 0x1a, CNTR_ALL },
1370 },
1371},
1372[C(BPU)] = {
1373
1374 [C(OP_READ)] = {
1375 [C(RESULT_ACCESS)] = { 0x02, CNTR_ALL },
1376 [C(RESULT_MISS)] = { 0x08, CNTR_ALL },
1377 },
1378},
1379};
1380
1381
1382static const struct mips_perf_event bmips5000_cache_map
1383 [PERF_COUNT_HW_CACHE_MAX]
1384 [PERF_COUNT_HW_CACHE_OP_MAX]
1385 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
1386[C(L1D)] = {
1387
1388
1389
1390
1391
1392
1393 [C(OP_READ)] = {
1394 [C(RESULT_ACCESS)] = { 12, CNTR_EVEN, T },
1395 [C(RESULT_MISS)] = { 12, CNTR_ODD, T },
1396 },
1397 [C(OP_WRITE)] = {
1398 [C(RESULT_ACCESS)] = { 12, CNTR_EVEN, T },
1399 [C(RESULT_MISS)] = { 12, CNTR_ODD, T },
1400 },
1401},
1402[C(L1I)] = {
1403 [C(OP_READ)] = {
1404 [C(RESULT_ACCESS)] = { 10, CNTR_EVEN, T },
1405 [C(RESULT_MISS)] = { 10, CNTR_ODD, T },
1406 },
1407 [C(OP_WRITE)] = {
1408 [C(RESULT_ACCESS)] = { 10, CNTR_EVEN, T },
1409 [C(RESULT_MISS)] = { 10, CNTR_ODD, T },
1410 },
1411 [C(OP_PREFETCH)] = {
1412 [C(RESULT_ACCESS)] = { 23, CNTR_EVEN, T },
1413
1414
1415
1416
1417 },
1418},
1419[C(LL)] = {
1420 [C(OP_READ)] = {
1421 [C(RESULT_ACCESS)] = { 28, CNTR_EVEN, P },
1422 [C(RESULT_MISS)] = { 28, CNTR_ODD, P },
1423 },
1424 [C(OP_WRITE)] = {
1425 [C(RESULT_ACCESS)] = { 28, CNTR_EVEN, P },
1426 [C(RESULT_MISS)] = { 28, CNTR_ODD, P },
1427 },
1428},
1429[C(BPU)] = {
1430
1431 [C(OP_READ)] = {
1432 [C(RESULT_MISS)] = { 0x02, CNTR_ODD, T },
1433 },
1434 [C(OP_WRITE)] = {
1435 [C(RESULT_MISS)] = { 0x02, CNTR_ODD, T },
1436 },
1437},
1438};
1439
1440static const struct mips_perf_event octeon_cache_map
1441 [PERF_COUNT_HW_CACHE_MAX]
1442 [PERF_COUNT_HW_CACHE_OP_MAX]
1443 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
1444[C(L1D)] = {
1445 [C(OP_READ)] = {
1446 [C(RESULT_ACCESS)] = { 0x2b, CNTR_ALL },
1447 [C(RESULT_MISS)] = { 0x2e, CNTR_ALL },
1448 },
1449 [C(OP_WRITE)] = {
1450 [C(RESULT_ACCESS)] = { 0x30, CNTR_ALL },
1451 },
1452},
1453[C(L1I)] = {
1454 [C(OP_READ)] = {
1455 [C(RESULT_ACCESS)] = { 0x18, CNTR_ALL },
1456 },
1457 [C(OP_PREFETCH)] = {
1458 [C(RESULT_ACCESS)] = { 0x19, CNTR_ALL },
1459 },
1460},
1461[C(DTLB)] = {
1462
1463
1464
1465
1466 [C(OP_READ)] = {
1467 [C(RESULT_MISS)] = { 0x35, CNTR_ALL },
1468 },
1469 [C(OP_WRITE)] = {
1470 [C(RESULT_MISS)] = { 0x35, CNTR_ALL },
1471 },
1472},
1473[C(ITLB)] = {
1474 [C(OP_READ)] = {
1475 [C(RESULT_MISS)] = { 0x37, CNTR_ALL },
1476 },
1477},
1478};
1479
1480static const struct mips_perf_event xlp_cache_map
1481 [PERF_COUNT_HW_CACHE_MAX]
1482 [PERF_COUNT_HW_CACHE_OP_MAX]
1483 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
1484[C(L1D)] = {
1485 [C(OP_READ)] = {
1486 [C(RESULT_ACCESS)] = { 0x31, CNTR_ALL },
1487 [C(RESULT_MISS)] = { 0x30, CNTR_ALL },
1488 },
1489 [C(OP_WRITE)] = {
1490 [C(RESULT_ACCESS)] = { 0x2f, CNTR_ALL },
1491 [C(RESULT_MISS)] = { 0x2e, CNTR_ALL },
1492 },
1493},
1494[C(L1I)] = {
1495 [C(OP_READ)] = {
1496 [C(RESULT_ACCESS)] = { 0x04, CNTR_ALL },
1497 [C(RESULT_MISS)] = { 0x07, CNTR_ALL },
1498 },
1499},
1500[C(LL)] = {
1501 [C(OP_READ)] = {
1502 [C(RESULT_ACCESS)] = { 0x35, CNTR_ALL },
1503 [C(RESULT_MISS)] = { 0x37, CNTR_ALL },
1504 },
1505 [C(OP_WRITE)] = {
1506 [C(RESULT_ACCESS)] = { 0x34, CNTR_ALL },
1507 [C(RESULT_MISS)] = { 0x36, CNTR_ALL },
1508 },
1509},
1510[C(DTLB)] = {
1511
1512
1513
1514
1515 [C(OP_READ)] = {
1516 [C(RESULT_MISS)] = { 0x2d, CNTR_ALL },
1517 },
1518 [C(OP_WRITE)] = {
1519 [C(RESULT_MISS)] = { 0x2d, CNTR_ALL },
1520 },
1521},
1522[C(ITLB)] = {
1523 [C(OP_READ)] = {
1524 [C(RESULT_MISS)] = { 0x08, CNTR_ALL },
1525 },
1526 [C(OP_WRITE)] = {
1527 [C(RESULT_MISS)] = { 0x08, CNTR_ALL },
1528 },
1529},
1530[C(BPU)] = {
1531 [C(OP_READ)] = {
1532 [C(RESULT_MISS)] = { 0x25, CNTR_ALL },
1533 },
1534},
1535};
1536
1537static int __hw_perf_event_init(struct perf_event *event)
1538{
1539 struct perf_event_attr *attr = &event->attr;
1540 struct hw_perf_event *hwc = &event->hw;
1541 const struct mips_perf_event *pev;
1542 int err;
1543
1544
1545 if (PERF_TYPE_HARDWARE == event->attr.type) {
1546 if (event->attr.config >= PERF_COUNT_HW_MAX)
1547 return -EINVAL;
1548 pev = mipspmu_map_general_event(event->attr.config);
1549 } else if (PERF_TYPE_HW_CACHE == event->attr.type) {
1550 pev = mipspmu_map_cache_event(event->attr.config);
1551 } else if (PERF_TYPE_RAW == event->attr.type) {
1552
1553 mutex_lock(&raw_event_mutex);
1554 pev = mipspmu.map_raw_event(event->attr.config);
1555 } else {
1556
1557 return -EOPNOTSUPP;
1558 }
1559
1560 if (IS_ERR(pev)) {
1561 if (PERF_TYPE_RAW == event->attr.type)
1562 mutex_unlock(&raw_event_mutex);
1563 return PTR_ERR(pev);
1564 }
1565
1566
1567
1568
1569
1570 hwc->config_base = MIPS_PERFCTRL_IE;
1571
1572 hwc->event_base = mipspmu_perf_event_encode(pev);
1573 if (PERF_TYPE_RAW == event->attr.type)
1574 mutex_unlock(&raw_event_mutex);
1575
1576 if (!attr->exclude_user)
1577 hwc->config_base |= MIPS_PERFCTRL_U;
1578 if (!attr->exclude_kernel) {
1579 hwc->config_base |= MIPS_PERFCTRL_K;
1580
1581 hwc->config_base |= MIPS_PERFCTRL_EXL;
1582 }
1583 if (!attr->exclude_hv)
1584 hwc->config_base |= MIPS_PERFCTRL_S;
1585
1586 hwc->config_base &= M_PERFCTL_CONFIG_MASK;
1587
1588
1589
1590
1591 hwc->idx = -1;
1592 hwc->config = 0;
1593
1594 if (!hwc->sample_period) {
1595 hwc->sample_period = mipspmu.max_period;
1596 hwc->last_period = hwc->sample_period;
1597 local64_set(&hwc->period_left, hwc->sample_period);
1598 }
1599
1600 err = 0;
1601 if (event->group_leader != event)
1602 err = validate_group(event);
1603
1604 event->destroy = hw_perf_event_destroy;
1605
1606 if (err)
1607 event->destroy(event);
1608
1609 return err;
1610}
1611
1612static void pause_local_counters(void)
1613{
1614 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1615 int ctr = mipspmu.num_counters;
1616 unsigned long flags;
1617
1618 local_irq_save(flags);
1619 do {
1620 ctr--;
1621 cpuc->saved_ctrl[ctr] = mipsxx_pmu_read_control(ctr);
1622 mipsxx_pmu_write_control(ctr, cpuc->saved_ctrl[ctr] &
1623 ~M_PERFCTL_COUNT_EVENT_WHENEVER);
1624 } while (ctr > 0);
1625 local_irq_restore(flags);
1626}
1627
1628static void resume_local_counters(void)
1629{
1630 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1631 int ctr = mipspmu.num_counters;
1632
1633 do {
1634 ctr--;
1635 mipsxx_pmu_write_control(ctr, cpuc->saved_ctrl[ctr]);
1636 } while (ctr > 0);
1637}
1638
1639static int mipsxx_pmu_handle_shared_irq(void)
1640{
1641 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1642 struct perf_sample_data data;
1643 unsigned int counters = mipspmu.num_counters;
1644 u64 counter;
1645 int n, handled = IRQ_NONE;
1646 struct pt_regs *regs;
1647
1648 if (cpu_has_perf_cntr_intr_bit && !(read_c0_cause() & CAUSEF_PCI))
1649 return handled;
1650
1651
1652
1653
1654
1655
1656
1657 pause_local_counters();
1658#ifdef CONFIG_MIPS_PERF_SHARED_TC_COUNTERS
1659 read_lock(&pmuint_rwlock);
1660#endif
1661
1662 regs = get_irq_regs();
1663
1664 perf_sample_data_init(&data, 0, 0);
1665
1666 for (n = counters - 1; n >= 0; n--) {
1667 if (!test_bit(n, cpuc->used_mask))
1668 continue;
1669
1670 counter = mipspmu.read_counter(n);
1671 if (!(counter & mipspmu.overflow))
1672 continue;
1673
1674 handle_associated_event(cpuc, n, &data, regs);
1675 handled = IRQ_HANDLED;
1676 }
1677
1678#ifdef CONFIG_MIPS_PERF_SHARED_TC_COUNTERS
1679 read_unlock(&pmuint_rwlock);
1680#endif
1681 resume_local_counters();
1682
1683
1684
1685
1686
1687
1688 if (handled == IRQ_HANDLED)
1689 irq_work_run();
1690
1691 return handled;
1692}
1693
1694static irqreturn_t mipsxx_pmu_handle_irq(int irq, void *dev)
1695{
1696 return mipsxx_pmu_handle_shared_irq();
1697}
1698
1699
1700#define IS_BOTH_COUNTERS_24K_EVENT(b) \
1701 ((b) == 0 || (b) == 1 || (b) == 11)
1702
1703
1704#define IS_BOTH_COUNTERS_34K_EVENT(b) \
1705 ((b) == 0 || (b) == 1 || (b) == 11)
1706#ifdef CONFIG_MIPS_MT_SMP
1707#define IS_RANGE_P_34K_EVENT(r, b) \
1708 ((b) == 0 || (r) == 18 || (b) == 21 || (b) == 22 || \
1709 (b) == 25 || (b) == 39 || (r) == 44 || (r) == 174 || \
1710 (r) == 176 || ((b) >= 50 && (b) <= 55) || \
1711 ((b) >= 64 && (b) <= 67))
1712#define IS_RANGE_V_34K_EVENT(r) ((r) == 47)
1713#endif
1714
1715
1716#define IS_BOTH_COUNTERS_74K_EVENT(b) \
1717 ((b) == 0 || (b) == 1)
1718
1719
1720#define IS_BOTH_COUNTERS_PROAPTIV_EVENT(b) \
1721 ((b) == 0 || (b) == 1)
1722
1723#define IS_BOTH_COUNTERS_P5600_EVENT(b) \
1724 ((b) == 0 || (b) == 1)
1725
1726
1727#define IS_BOTH_COUNTERS_1004K_EVENT(b) \
1728 ((b) == 0 || (b) == 1 || (b) == 11)
1729#ifdef CONFIG_MIPS_MT_SMP
1730#define IS_RANGE_P_1004K_EVENT(r, b) \
1731 ((b) == 0 || (r) == 18 || (b) == 21 || (b) == 22 || \
1732 (b) == 25 || (b) == 36 || (b) == 39 || (r) == 44 || \
1733 (r) == 174 || (r) == 176 || ((b) >= 50 && (b) <= 59) || \
1734 (r) == 188 || (b) == 61 || (b) == 62 || \
1735 ((b) >= 64 && (b) <= 67))
1736#define IS_RANGE_V_1004K_EVENT(r) ((r) == 47)
1737#endif
1738
1739
1740#define IS_BOTH_COUNTERS_INTERAPTIV_EVENT(b) \
1741 ((b) == 0 || (b) == 1 || (b) == 11)
1742#ifdef CONFIG_MIPS_MT_SMP
1743
1744#define IS_RANGE_P_INTERAPTIV_EVENT(r, b) \
1745 ((b) == 0 || (r) == 18 || (b) == 21 || (b) == 22 || \
1746 (b) == 25 || (b) == 36 || (b) == 38 || (b) == 39 || \
1747 (r) == 44 || (r) == 174 || (r) == 176 || ((b) >= 50 && \
1748 (b) <= 59) || (r) == 188 || (b) == 61 || (b) == 62 || \
1749 ((b) >= 64 && (b) <= 67))
1750#define IS_RANGE_V_INTERAPTIV_EVENT(r) ((r) == 47 || (r) == 175)
1751#endif
1752
1753
1754#define IS_BOTH_COUNTERS_BMIPS5000_EVENT(b) \
1755 ((b) == 0 || (b) == 1)
1756
1757
1758
1759
1760
1761
1762
1763
1764
1765
1766
1767
1768
1769
1770static const struct mips_perf_event *mipsxx_pmu_map_raw_event(u64 config)
1771{
1772
1773 int pmu_type;
1774 unsigned int raw_id = config & 0xff;
1775 unsigned int base_id = raw_id & 0x7f;
1776
1777 switch (current_cpu_type()) {
1778 case CPU_24K:
1779 if (IS_BOTH_COUNTERS_24K_EVENT(base_id))
1780 raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
1781 else
1782 raw_event.cntr_mask =
1783 raw_id > 127 ? CNTR_ODD : CNTR_EVEN;
1784#ifdef CONFIG_MIPS_MT_SMP
1785
1786
1787
1788
1789 raw_event.range = P;
1790#endif
1791 break;
1792 case CPU_34K:
1793 if (IS_BOTH_COUNTERS_34K_EVENT(base_id))
1794 raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
1795 else
1796 raw_event.cntr_mask =
1797 raw_id > 127 ? CNTR_ODD : CNTR_EVEN;
1798#ifdef CONFIG_MIPS_MT_SMP
1799 if (IS_RANGE_P_34K_EVENT(raw_id, base_id))
1800 raw_event.range = P;
1801 else if (unlikely(IS_RANGE_V_34K_EVENT(raw_id)))
1802 raw_event.range = V;
1803 else
1804 raw_event.range = T;
1805#endif
1806 break;
1807 case CPU_74K:
1808 case CPU_1074K:
1809 if (IS_BOTH_COUNTERS_74K_EVENT(base_id))
1810 raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
1811 else
1812 raw_event.cntr_mask =
1813 raw_id > 127 ? CNTR_ODD : CNTR_EVEN;
1814#ifdef CONFIG_MIPS_MT_SMP
1815 raw_event.range = P;
1816#endif
1817 break;
1818 case CPU_PROAPTIV:
1819 if (IS_BOTH_COUNTERS_PROAPTIV_EVENT(base_id))
1820 raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
1821 else
1822 raw_event.cntr_mask =
1823 raw_id > 127 ? CNTR_ODD : CNTR_EVEN;
1824#ifdef CONFIG_MIPS_MT_SMP
1825 raw_event.range = P;
1826#endif
1827 break;
1828 case CPU_P5600:
1829 case CPU_P6600:
1830
1831 raw_id = config & 0x1ff;
1832 base_id = raw_id & 0xff;
1833 if (IS_BOTH_COUNTERS_P5600_EVENT(base_id))
1834 raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
1835 else
1836 raw_event.cntr_mask =
1837 raw_id > 255 ? CNTR_ODD : CNTR_EVEN;
1838#ifdef CONFIG_MIPS_MT_SMP
1839 raw_event.range = P;
1840#endif
1841 break;
1842 case CPU_I6400:
1843 case CPU_I6500:
1844
1845 base_id = config & 0xff;
1846 raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
1847 break;
1848 case CPU_1004K:
1849 if (IS_BOTH_COUNTERS_1004K_EVENT(base_id))
1850 raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
1851 else
1852 raw_event.cntr_mask =
1853 raw_id > 127 ? CNTR_ODD : CNTR_EVEN;
1854#ifdef CONFIG_MIPS_MT_SMP
1855 if (IS_RANGE_P_1004K_EVENT(raw_id, base_id))
1856 raw_event.range = P;
1857 else if (unlikely(IS_RANGE_V_1004K_EVENT(raw_id)))
1858 raw_event.range = V;
1859 else
1860 raw_event.range = T;
1861#endif
1862 break;
1863 case CPU_INTERAPTIV:
1864 if (IS_BOTH_COUNTERS_INTERAPTIV_EVENT(base_id))
1865 raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
1866 else
1867 raw_event.cntr_mask =
1868 raw_id > 127 ? CNTR_ODD : CNTR_EVEN;
1869#ifdef CONFIG_MIPS_MT_SMP
1870 if (IS_RANGE_P_INTERAPTIV_EVENT(raw_id, base_id))
1871 raw_event.range = P;
1872 else if (unlikely(IS_RANGE_V_INTERAPTIV_EVENT(raw_id)))
1873 raw_event.range = V;
1874 else
1875 raw_event.range = T;
1876#endif
1877 break;
1878 case CPU_BMIPS5000:
1879 if (IS_BOTH_COUNTERS_BMIPS5000_EVENT(base_id))
1880 raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
1881 else
1882 raw_event.cntr_mask =
1883 raw_id > 127 ? CNTR_ODD : CNTR_EVEN;
1884 break;
1885 case CPU_LOONGSON64:
1886 pmu_type = get_loongson3_pmu_type();
1887
1888 switch (pmu_type) {
1889 case LOONGSON_PMU_TYPE1:
1890 raw_event.cntr_mask =
1891 raw_id > 127 ? CNTR_ODD : CNTR_EVEN;
1892 break;
1893 case LOONGSON_PMU_TYPE2:
1894 base_id = config & 0x3ff;
1895 raw_event.cntr_mask = CNTR_ALL;
1896
1897 if ((base_id >= 1 && base_id < 28) ||
1898 (base_id >= 64 && base_id < 90) ||
1899 (base_id >= 128 && base_id < 164) ||
1900 (base_id >= 192 && base_id < 200) ||
1901 (base_id >= 256 && base_id < 275) ||
1902 (base_id >= 320 && base_id < 361) ||
1903 (base_id >= 384 && base_id < 574))
1904 break;
1905
1906 return ERR_PTR(-EOPNOTSUPP);
1907 case LOONGSON_PMU_TYPE3:
1908 base_id = raw_id;
1909 raw_event.cntr_mask = CNTR_ALL;
1910 break;
1911 }
1912 break;
1913 }
1914
1915 raw_event.event_id = base_id;
1916
1917 return &raw_event;
1918}
1919
1920static const struct mips_perf_event *octeon_pmu_map_raw_event(u64 config)
1921{
1922 unsigned int base_id = config & 0x7f;
1923 unsigned int event_max;
1924
1925
1926 raw_event.cntr_mask = CNTR_ALL;
1927 raw_event.event_id = base_id;
1928
1929 if (current_cpu_type() == CPU_CAVIUM_OCTEON3)
1930 event_max = 0x5f;
1931 else if (current_cpu_type() == CPU_CAVIUM_OCTEON2)
1932 event_max = 0x42;
1933 else
1934 event_max = 0x3a;
1935
1936 if (base_id > event_max) {
1937 return ERR_PTR(-EOPNOTSUPP);
1938 }
1939
1940 switch (base_id) {
1941 case 0x00:
1942 case 0x0f:
1943 case 0x1e:
1944 case 0x1f:
1945 case 0x2f:
1946 case 0x34:
1947 case 0x3e ... 0x3f:
1948 return ERR_PTR(-EOPNOTSUPP);
1949 default:
1950 break;
1951 }
1952
1953 return &raw_event;
1954}
1955
1956static const struct mips_perf_event *xlp_pmu_map_raw_event(u64 config)
1957{
1958 unsigned int raw_id = config & 0xff;
1959
1960
1961 if ((raw_id < 0x01) || (raw_id > 0x3f))
1962 return ERR_PTR(-EOPNOTSUPP);
1963
1964 raw_event.cntr_mask = CNTR_ALL;
1965 raw_event.event_id = raw_id;
1966
1967 return &raw_event;
1968}
1969
1970static int __init
1971init_hw_perf_events(void)
1972{
1973 int counters, irq, pmu_type;
1974
1975 pr_info("Performance counters: ");
1976
1977 counters = n_counters();
1978 if (counters == 0) {
1979 pr_cont("No available PMU.\n");
1980 return -ENODEV;
1981 }
1982
1983#ifdef CONFIG_MIPS_PERF_SHARED_TC_COUNTERS
1984 if (!cpu_has_mipsmt_pertccounters)
1985 counters = counters_total_to_per_cpu(counters);
1986#endif
1987
1988 if (get_c0_perfcount_int)
1989 irq = get_c0_perfcount_int();
1990 else if (cp0_perfcount_irq >= 0)
1991 irq = MIPS_CPU_IRQ_BASE + cp0_perfcount_irq;
1992 else
1993 irq = -1;
1994
1995 mipspmu.map_raw_event = mipsxx_pmu_map_raw_event;
1996
1997 switch (current_cpu_type()) {
1998 case CPU_24K:
1999 mipspmu.name = "mips/24K";
2000 mipspmu.general_event_map = &mipsxxcore_event_map;
2001 mipspmu.cache_event_map = &mipsxxcore_cache_map;
2002 break;
2003 case CPU_34K:
2004 mipspmu.name = "mips/34K";
2005 mipspmu.general_event_map = &mipsxxcore_event_map;
2006 mipspmu.cache_event_map = &mipsxxcore_cache_map;
2007 break;
2008 case CPU_74K:
2009 mipspmu.name = "mips/74K";
2010 mipspmu.general_event_map = &mipsxxcore_event_map2;
2011 mipspmu.cache_event_map = &mipsxxcore_cache_map2;
2012 break;
2013 case CPU_PROAPTIV:
2014 mipspmu.name = "mips/proAptiv";
2015 mipspmu.general_event_map = &mipsxxcore_event_map2;
2016 mipspmu.cache_event_map = &mipsxxcore_cache_map2;
2017 break;
2018 case CPU_P5600:
2019 mipspmu.name = "mips/P5600";
2020 mipspmu.general_event_map = &mipsxxcore_event_map2;
2021 mipspmu.cache_event_map = &mipsxxcore_cache_map2;
2022 break;
2023 case CPU_P6600:
2024 mipspmu.name = "mips/P6600";
2025 mipspmu.general_event_map = &mipsxxcore_event_map2;
2026 mipspmu.cache_event_map = &mipsxxcore_cache_map2;
2027 break;
2028 case CPU_I6400:
2029 mipspmu.name = "mips/I6400";
2030 mipspmu.general_event_map = &i6x00_event_map;
2031 mipspmu.cache_event_map = &i6x00_cache_map;
2032 break;
2033 case CPU_I6500:
2034 mipspmu.name = "mips/I6500";
2035 mipspmu.general_event_map = &i6x00_event_map;
2036 mipspmu.cache_event_map = &i6x00_cache_map;
2037 break;
2038 case CPU_1004K:
2039 mipspmu.name = "mips/1004K";
2040 mipspmu.general_event_map = &mipsxxcore_event_map;
2041 mipspmu.cache_event_map = &mipsxxcore_cache_map;
2042 break;
2043 case CPU_1074K:
2044 mipspmu.name = "mips/1074K";
2045 mipspmu.general_event_map = &mipsxxcore_event_map;
2046 mipspmu.cache_event_map = &mipsxxcore_cache_map;
2047 break;
2048 case CPU_INTERAPTIV:
2049 mipspmu.name = "mips/interAptiv";
2050 mipspmu.general_event_map = &mipsxxcore_event_map;
2051 mipspmu.cache_event_map = &mipsxxcore_cache_map;
2052 break;
2053 case CPU_LOONGSON32:
2054 mipspmu.name = "mips/loongson1";
2055 mipspmu.general_event_map = &mipsxxcore_event_map;
2056 mipspmu.cache_event_map = &mipsxxcore_cache_map;
2057 break;
2058 case CPU_LOONGSON64:
2059 mipspmu.name = "mips/loongson3";
2060 pmu_type = get_loongson3_pmu_type();
2061
2062 switch (pmu_type) {
2063 case LOONGSON_PMU_TYPE1:
2064 counters = 2;
2065 mipspmu.general_event_map = &loongson3_event_map1;
2066 mipspmu.cache_event_map = &loongson3_cache_map1;
2067 break;
2068 case LOONGSON_PMU_TYPE2:
2069 counters = 4;
2070 mipspmu.general_event_map = &loongson3_event_map2;
2071 mipspmu.cache_event_map = &loongson3_cache_map2;
2072 break;
2073 case LOONGSON_PMU_TYPE3:
2074 counters = 4;
2075 mipspmu.general_event_map = &loongson3_event_map3;
2076 mipspmu.cache_event_map = &loongson3_cache_map3;
2077 break;
2078 }
2079 break;
2080 case CPU_CAVIUM_OCTEON:
2081 case CPU_CAVIUM_OCTEON_PLUS:
2082 case CPU_CAVIUM_OCTEON2:
2083 case CPU_CAVIUM_OCTEON3:
2084 mipspmu.name = "octeon";
2085 mipspmu.general_event_map = &octeon_event_map;
2086 mipspmu.cache_event_map = &octeon_cache_map;
2087 mipspmu.map_raw_event = octeon_pmu_map_raw_event;
2088 break;
2089 case CPU_BMIPS5000:
2090 mipspmu.name = "BMIPS5000";
2091 mipspmu.general_event_map = &bmips5000_event_map;
2092 mipspmu.cache_event_map = &bmips5000_cache_map;
2093 break;
2094 case CPU_XLP:
2095 mipspmu.name = "xlp";
2096 mipspmu.general_event_map = &xlp_event_map;
2097 mipspmu.cache_event_map = &xlp_cache_map;
2098 mipspmu.map_raw_event = xlp_pmu_map_raw_event;
2099 break;
2100 default:
2101 pr_cont("Either hardware does not support performance "
2102 "counters, or not yet implemented.\n");
2103 return -ENODEV;
2104 }
2105
2106 mipspmu.num_counters = counters;
2107 mipspmu.irq = irq;
2108
2109 if (read_c0_perfctrl0() & MIPS_PERFCTRL_W) {
2110 if (get_loongson3_pmu_type() == LOONGSON_PMU_TYPE2) {
2111 counter_bits = 48;
2112 mipspmu.max_period = (1ULL << 47) - 1;
2113 mipspmu.valid_count = (1ULL << 47) - 1;
2114 mipspmu.overflow = 1ULL << 47;
2115 } else {
2116 counter_bits = 64;
2117 mipspmu.max_period = (1ULL << 63) - 1;
2118 mipspmu.valid_count = (1ULL << 63) - 1;
2119 mipspmu.overflow = 1ULL << 63;
2120 }
2121 mipspmu.read_counter = mipsxx_pmu_read_counter_64;
2122 mipspmu.write_counter = mipsxx_pmu_write_counter_64;
2123 } else {
2124 counter_bits = 32;
2125 mipspmu.max_period = (1ULL << 31) - 1;
2126 mipspmu.valid_count = (1ULL << 31) - 1;
2127 mipspmu.overflow = 1ULL << 31;
2128 mipspmu.read_counter = mipsxx_pmu_read_counter;
2129 mipspmu.write_counter = mipsxx_pmu_write_counter;
2130 }
2131
2132 on_each_cpu(reset_counters, (void *)(long)counters, 1);
2133
2134 pr_cont("%s PMU enabled, %d %d-bit counters available to each "
2135 "CPU, irq %d%s\n", mipspmu.name, counters, counter_bits, irq,
2136 irq < 0 ? " (share with timer interrupt)" : "");
2137
2138 perf_pmu_register(&pmu, "cpu", PERF_TYPE_RAW);
2139
2140 return 0;
2141}
2142early_initcall(init_hw_perf_events);
2143