1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18#include <linux/cpumask.h>
19#include <linux/interrupt.h>
20#include <linux/smp.h>
21#include <linux/kernel.h>
22#include <linux/perf_event.h>
23#include <linux/uaccess.h>
24
25#include <asm/irq.h>
26#include <asm/irq_regs.h>
27#include <asm/stacktrace.h>
28#include <asm/time.h>
29
30#define MIPS_MAX_HWEVENTS 4
31#define MIPS_TCS_PER_COUNTER 2
32#define MIPS_CPUID_TO_COUNTER_MASK (MIPS_TCS_PER_COUNTER - 1)
33
34struct cpu_hw_events {
35
36 struct perf_event *events[MIPS_MAX_HWEVENTS];
37
38
39
40
41
42 unsigned long used_mask[BITS_TO_LONGS(MIPS_MAX_HWEVENTS)];
43
44
45
46
47
48
49 unsigned int saved_ctrl[MIPS_MAX_HWEVENTS];
50};
51DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = {
52 .saved_ctrl = {0},
53};
54
55
56struct mips_perf_event {
57 unsigned int event_id;
58
59
60
61
62
63 unsigned int cntr_mask;
64 #define CNTR_EVEN 0x55555555
65 #define CNTR_ODD 0xaaaaaaaa
66 #define CNTR_ALL 0xffffffff
67#ifdef CONFIG_MIPS_MT_SMP
68 enum {
69 T = 0,
70 V = 1,
71 P = 2,
72 } range;
73#else
74 #define T
75 #define V
76 #define P
77#endif
78};
79
80static struct mips_perf_event raw_event;
81static DEFINE_MUTEX(raw_event_mutex);
82
83#define C(x) PERF_COUNT_HW_CACHE_##x
84
85struct mips_pmu {
86 u64 max_period;
87 u64 valid_count;
88 u64 overflow;
89 const char *name;
90 int irq;
91 u64 (*read_counter)(unsigned int idx);
92 void (*write_counter)(unsigned int idx, u64 val);
93 const struct mips_perf_event *(*map_raw_event)(u64 config);
94 const struct mips_perf_event (*general_event_map)[PERF_COUNT_HW_MAX];
95 const struct mips_perf_event (*cache_event_map)
96 [PERF_COUNT_HW_CACHE_MAX]
97 [PERF_COUNT_HW_CACHE_OP_MAX]
98 [PERF_COUNT_HW_CACHE_RESULT_MAX];
99 unsigned int num_counters;
100};
101
102static struct mips_pmu mipspmu;
103
104#define M_PERFCTL_EVENT(event) (((event) << MIPS_PERFCTRL_EVENT_S) & \
105 MIPS_PERFCTRL_EVENT)
106#define M_PERFCTL_VPEID(vpe) ((vpe) << MIPS_PERFCTRL_VPEID_S)
107
108#ifdef CONFIG_CPU_BMIPS5000
109#define M_PERFCTL_MT_EN(filter) 0
110#else
111#define M_PERFCTL_MT_EN(filter) (filter)
112#endif
113
114#define M_TC_EN_ALL M_PERFCTL_MT_EN(MIPS_PERFCTRL_MT_EN_ALL)
115#define M_TC_EN_VPE M_PERFCTL_MT_EN(MIPS_PERFCTRL_MT_EN_VPE)
116#define M_TC_EN_TC M_PERFCTL_MT_EN(MIPS_PERFCTRL_MT_EN_TC)
117
118#define M_PERFCTL_COUNT_EVENT_WHENEVER (MIPS_PERFCTRL_EXL | \
119 MIPS_PERFCTRL_K | \
120 MIPS_PERFCTRL_U | \
121 MIPS_PERFCTRL_S | \
122 MIPS_PERFCTRL_IE)
123
124#ifdef CONFIG_MIPS_MT_SMP
125#define M_PERFCTL_CONFIG_MASK 0x3fff801f
126#else
127#define M_PERFCTL_CONFIG_MASK 0x1f
128#endif
129
130
131#ifdef CONFIG_MIPS_PERF_SHARED_TC_COUNTERS
132static DEFINE_RWLOCK(pmuint_rwlock);
133
134#if defined(CONFIG_CPU_BMIPS5000)
135#define vpe_id() (cpu_has_mipsmt_pertccounters ? \
136 0 : (smp_processor_id() & MIPS_CPUID_TO_COUNTER_MASK))
137#else
138#define vpe_id() (cpu_has_mipsmt_pertccounters ? \
139 0 : cpu_vpe_id(¤t_cpu_data))
140#endif
141
142
143static unsigned int vpe_shift(void)
144{
145 if (num_possible_cpus() > 1)
146 return 1;
147
148 return 0;
149}
150
151static unsigned int counters_total_to_per_cpu(unsigned int counters)
152{
153 return counters >> vpe_shift();
154}
155
156#else
157#define vpe_id() 0
158
159#endif
160
161static void resume_local_counters(void);
162static void pause_local_counters(void);
163static irqreturn_t mipsxx_pmu_handle_irq(int, void *);
164static int mipsxx_pmu_handle_shared_irq(void);
165
166static unsigned int mipsxx_pmu_swizzle_perf_idx(unsigned int idx)
167{
168 if (vpe_id() == 1)
169 idx = (idx + 2) & 3;
170 return idx;
171}
172
173static u64 mipsxx_pmu_read_counter(unsigned int idx)
174{
175 idx = mipsxx_pmu_swizzle_perf_idx(idx);
176
177 switch (idx) {
178 case 0:
179
180
181
182
183 return (u32)read_c0_perfcntr0();
184 case 1:
185 return (u32)read_c0_perfcntr1();
186 case 2:
187 return (u32)read_c0_perfcntr2();
188 case 3:
189 return (u32)read_c0_perfcntr3();
190 default:
191 WARN_ONCE(1, "Invalid performance counter number (%d)\n", idx);
192 return 0;
193 }
194}
195
196static u64 mipsxx_pmu_read_counter_64(unsigned int idx)
197{
198 idx = mipsxx_pmu_swizzle_perf_idx(idx);
199
200 switch (idx) {
201 case 0:
202 return read_c0_perfcntr0_64();
203 case 1:
204 return read_c0_perfcntr1_64();
205 case 2:
206 return read_c0_perfcntr2_64();
207 case 3:
208 return read_c0_perfcntr3_64();
209 default:
210 WARN_ONCE(1, "Invalid performance counter number (%d)\n", idx);
211 return 0;
212 }
213}
214
215static void mipsxx_pmu_write_counter(unsigned int idx, u64 val)
216{
217 idx = mipsxx_pmu_swizzle_perf_idx(idx);
218
219 switch (idx) {
220 case 0:
221 write_c0_perfcntr0(val);
222 return;
223 case 1:
224 write_c0_perfcntr1(val);
225 return;
226 case 2:
227 write_c0_perfcntr2(val);
228 return;
229 case 3:
230 write_c0_perfcntr3(val);
231 return;
232 }
233}
234
235static void mipsxx_pmu_write_counter_64(unsigned int idx, u64 val)
236{
237 idx = mipsxx_pmu_swizzle_perf_idx(idx);
238
239 switch (idx) {
240 case 0:
241 write_c0_perfcntr0_64(val);
242 return;
243 case 1:
244 write_c0_perfcntr1_64(val);
245 return;
246 case 2:
247 write_c0_perfcntr2_64(val);
248 return;
249 case 3:
250 write_c0_perfcntr3_64(val);
251 return;
252 }
253}
254
255static unsigned int mipsxx_pmu_read_control(unsigned int idx)
256{
257 idx = mipsxx_pmu_swizzle_perf_idx(idx);
258
259 switch (idx) {
260 case 0:
261 return read_c0_perfctrl0();
262 case 1:
263 return read_c0_perfctrl1();
264 case 2:
265 return read_c0_perfctrl2();
266 case 3:
267 return read_c0_perfctrl3();
268 default:
269 WARN_ONCE(1, "Invalid performance counter number (%d)\n", idx);
270 return 0;
271 }
272}
273
274static void mipsxx_pmu_write_control(unsigned int idx, unsigned int val)
275{
276 idx = mipsxx_pmu_swizzle_perf_idx(idx);
277
278 switch (idx) {
279 case 0:
280 write_c0_perfctrl0(val);
281 return;
282 case 1:
283 write_c0_perfctrl1(val);
284 return;
285 case 2:
286 write_c0_perfctrl2(val);
287 return;
288 case 3:
289 write_c0_perfctrl3(val);
290 return;
291 }
292}
293
294static int mipsxx_pmu_alloc_counter(struct cpu_hw_events *cpuc,
295 struct hw_perf_event *hwc)
296{
297 int i;
298
299
300
301
302
303 unsigned long cntr_mask = (hwc->event_base >> 8) & 0xffff;
304
305 for (i = mipspmu.num_counters - 1; i >= 0; i--) {
306
307
308
309
310
311
312
313
314
315
316 if (test_bit(i, &cntr_mask) &&
317 !test_and_set_bit(i, cpuc->used_mask))
318 return i;
319 }
320
321 return -EAGAIN;
322}
323
324static void mipsxx_pmu_enable_event(struct hw_perf_event *evt, int idx)
325{
326 struct perf_event *event = container_of(evt, struct perf_event, hw);
327 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
328#ifdef CONFIG_MIPS_MT_SMP
329 unsigned int range = evt->event_base >> 24;
330#endif
331
332 WARN_ON(idx < 0 || idx >= mipspmu.num_counters);
333
334 cpuc->saved_ctrl[idx] = M_PERFCTL_EVENT(evt->event_base & 0xff) |
335 (evt->config_base & M_PERFCTL_CONFIG_MASK) |
336
337 MIPS_PERFCTRL_IE;
338
339#ifdef CONFIG_CPU_BMIPS5000
340 {
341
342 cpuc->saved_ctrl[idx] |=
343 (1 << (12 + vpe_id())) | BRCM_PERFCTRL_TC;
344 }
345#else
346#ifdef CONFIG_MIPS_MT_SMP
347 if (range > V) {
348
349 pr_debug("Enabling perf counter for all TCs\n");
350 cpuc->saved_ctrl[idx] |= M_TC_EN_ALL;
351 } else
352#endif
353 {
354 unsigned int cpu, ctrl;
355
356
357
358
359
360
361 cpu = (event->cpu >= 0) ? event->cpu : smp_processor_id();
362
363 ctrl = M_PERFCTL_VPEID(cpu_vpe_id(&cpu_data[cpu]));
364 ctrl |= M_TC_EN_VPE;
365 cpuc->saved_ctrl[idx] |= ctrl;
366 pr_debug("Enabling perf counter for CPU%d\n", cpu);
367 }
368#endif
369
370
371
372}
373
374static void mipsxx_pmu_disable_event(int idx)
375{
376 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
377 unsigned long flags;
378
379 WARN_ON(idx < 0 || idx >= mipspmu.num_counters);
380
381 local_irq_save(flags);
382 cpuc->saved_ctrl[idx] = mipsxx_pmu_read_control(idx) &
383 ~M_PERFCTL_COUNT_EVENT_WHENEVER;
384 mipsxx_pmu_write_control(idx, cpuc->saved_ctrl[idx]);
385 local_irq_restore(flags);
386}
387
388static int mipspmu_event_set_period(struct perf_event *event,
389 struct hw_perf_event *hwc,
390 int idx)
391{
392 u64 left = local64_read(&hwc->period_left);
393 u64 period = hwc->sample_period;
394 int ret = 0;
395
396 if (unlikely((left + period) & (1ULL << 63))) {
397
398 left = period;
399 local64_set(&hwc->period_left, left);
400 hwc->last_period = period;
401 ret = 1;
402 } else if (unlikely((left + period) <= period)) {
403
404 left += period;
405 local64_set(&hwc->period_left, left);
406 hwc->last_period = period;
407 ret = 1;
408 }
409
410 if (left > mipspmu.max_period) {
411 left = mipspmu.max_period;
412 local64_set(&hwc->period_left, left);
413 }
414
415 local64_set(&hwc->prev_count, mipspmu.overflow - left);
416
417 mipspmu.write_counter(idx, mipspmu.overflow - left);
418
419 perf_event_update_userpage(event);
420
421 return ret;
422}
423
424static void mipspmu_event_update(struct perf_event *event,
425 struct hw_perf_event *hwc,
426 int idx)
427{
428 u64 prev_raw_count, new_raw_count;
429 u64 delta;
430
431again:
432 prev_raw_count = local64_read(&hwc->prev_count);
433 new_raw_count = mipspmu.read_counter(idx);
434
435 if (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
436 new_raw_count) != prev_raw_count)
437 goto again;
438
439 delta = new_raw_count - prev_raw_count;
440
441 local64_add(delta, &event->count);
442 local64_sub(delta, &hwc->period_left);
443}
444
445static void mipspmu_start(struct perf_event *event, int flags)
446{
447 struct hw_perf_event *hwc = &event->hw;
448
449 if (flags & PERF_EF_RELOAD)
450 WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE));
451
452 hwc->state = 0;
453
454
455 mipspmu_event_set_period(event, hwc, hwc->idx);
456
457
458 mipsxx_pmu_enable_event(hwc, hwc->idx);
459}
460
461static void mipspmu_stop(struct perf_event *event, int flags)
462{
463 struct hw_perf_event *hwc = &event->hw;
464
465 if (!(hwc->state & PERF_HES_STOPPED)) {
466
467 mipsxx_pmu_disable_event(hwc->idx);
468 barrier();
469 mipspmu_event_update(event, hwc, hwc->idx);
470 hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
471 }
472}
473
474static int mipspmu_add(struct perf_event *event, int flags)
475{
476 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
477 struct hw_perf_event *hwc = &event->hw;
478 int idx;
479 int err = 0;
480
481 perf_pmu_disable(event->pmu);
482
483
484 idx = mipsxx_pmu_alloc_counter(cpuc, hwc);
485 if (idx < 0) {
486 err = idx;
487 goto out;
488 }
489
490
491
492
493
494 event->hw.idx = idx;
495 mipsxx_pmu_disable_event(idx);
496 cpuc->events[idx] = event;
497
498 hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
499 if (flags & PERF_EF_START)
500 mipspmu_start(event, PERF_EF_RELOAD);
501
502
503 perf_event_update_userpage(event);
504
505out:
506 perf_pmu_enable(event->pmu);
507 return err;
508}
509
510static void mipspmu_del(struct perf_event *event, int flags)
511{
512 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
513 struct hw_perf_event *hwc = &event->hw;
514 int idx = hwc->idx;
515
516 WARN_ON(idx < 0 || idx >= mipspmu.num_counters);
517
518 mipspmu_stop(event, PERF_EF_UPDATE);
519 cpuc->events[idx] = NULL;
520 clear_bit(idx, cpuc->used_mask);
521
522 perf_event_update_userpage(event);
523}
524
525static void mipspmu_read(struct perf_event *event)
526{
527 struct hw_perf_event *hwc = &event->hw;
528
529
530 if (hwc->idx < 0)
531 return;
532
533 mipspmu_event_update(event, hwc, hwc->idx);
534}
535
536static void mipspmu_enable(struct pmu *pmu)
537{
538#ifdef CONFIG_MIPS_PERF_SHARED_TC_COUNTERS
539 write_unlock(&pmuint_rwlock);
540#endif
541 resume_local_counters();
542}
543
544
545
546
547
548
549
550
551
552
553
554
555static void mipspmu_disable(struct pmu *pmu)
556{
557 pause_local_counters();
558#ifdef CONFIG_MIPS_PERF_SHARED_TC_COUNTERS
559 write_lock(&pmuint_rwlock);
560#endif
561}
562
563static atomic_t active_events = ATOMIC_INIT(0);
564static DEFINE_MUTEX(pmu_reserve_mutex);
565static int (*save_perf_irq)(void);
566
567static int mipspmu_get_irq(void)
568{
569 int err;
570
571 if (mipspmu.irq >= 0) {
572
573 err = request_irq(mipspmu.irq, mipsxx_pmu_handle_irq,
574 IRQF_PERCPU | IRQF_NOBALANCING |
575 IRQF_NO_THREAD | IRQF_NO_SUSPEND |
576 IRQF_SHARED,
577 "mips_perf_pmu", &mipspmu);
578 if (err) {
579 pr_warn("Unable to request IRQ%d for MIPS performance counters!\n",
580 mipspmu.irq);
581 }
582 } else if (cp0_perfcount_irq < 0) {
583
584
585
586 save_perf_irq = perf_irq;
587 perf_irq = mipsxx_pmu_handle_shared_irq;
588 err = 0;
589 } else {
590 pr_warn("The platform hasn't properly defined its interrupt controller\n");
591 err = -ENOENT;
592 }
593
594 return err;
595}
596
597static void mipspmu_free_irq(void)
598{
599 if (mipspmu.irq >= 0)
600 free_irq(mipspmu.irq, &mipspmu);
601 else if (cp0_perfcount_irq < 0)
602 perf_irq = save_perf_irq;
603}
604
605
606
607
608
609static void reset_counters(void *arg);
610static int __hw_perf_event_init(struct perf_event *event);
611
612static void hw_perf_event_destroy(struct perf_event *event)
613{
614 if (atomic_dec_and_mutex_lock(&active_events,
615 &pmu_reserve_mutex)) {
616
617
618
619
620 on_each_cpu(reset_counters,
621 (void *)(long)mipspmu.num_counters, 1);
622 mipspmu_free_irq();
623 mutex_unlock(&pmu_reserve_mutex);
624 }
625}
626
627static int mipspmu_event_init(struct perf_event *event)
628{
629 int err = 0;
630
631
632 if (has_branch_stack(event))
633 return -EOPNOTSUPP;
634
635 switch (event->attr.type) {
636 case PERF_TYPE_RAW:
637 case PERF_TYPE_HARDWARE:
638 case PERF_TYPE_HW_CACHE:
639 break;
640
641 default:
642 return -ENOENT;
643 }
644
645 if (event->cpu >= 0 && !cpu_online(event->cpu))
646 return -ENODEV;
647
648 if (!atomic_inc_not_zero(&active_events)) {
649 mutex_lock(&pmu_reserve_mutex);
650 if (atomic_read(&active_events) == 0)
651 err = mipspmu_get_irq();
652
653 if (!err)
654 atomic_inc(&active_events);
655 mutex_unlock(&pmu_reserve_mutex);
656 }
657
658 if (err)
659 return err;
660
661 return __hw_perf_event_init(event);
662}
663
664static struct pmu pmu = {
665 .pmu_enable = mipspmu_enable,
666 .pmu_disable = mipspmu_disable,
667 .event_init = mipspmu_event_init,
668 .add = mipspmu_add,
669 .del = mipspmu_del,
670 .start = mipspmu_start,
671 .stop = mipspmu_stop,
672 .read = mipspmu_read,
673};
674
675static unsigned int mipspmu_perf_event_encode(const struct mips_perf_event *pev)
676{
677
678
679
680
681#ifdef CONFIG_MIPS_MT_SMP
682 if (num_possible_cpus() > 1)
683 return ((unsigned int)pev->range << 24) |
684 (pev->cntr_mask & 0xffff00) |
685 (pev->event_id & 0xff);
686 else
687#endif
688 return ((pev->cntr_mask & 0xffff00) |
689 (pev->event_id & 0xff));
690}
691
692static const struct mips_perf_event *mipspmu_map_general_event(int idx)
693{
694
695 if ((*mipspmu.general_event_map)[idx].cntr_mask == 0)
696 return ERR_PTR(-EOPNOTSUPP);
697 return &(*mipspmu.general_event_map)[idx];
698}
699
700static const struct mips_perf_event *mipspmu_map_cache_event(u64 config)
701{
702 unsigned int cache_type, cache_op, cache_result;
703 const struct mips_perf_event *pev;
704
705 cache_type = (config >> 0) & 0xff;
706 if (cache_type >= PERF_COUNT_HW_CACHE_MAX)
707 return ERR_PTR(-EINVAL);
708
709 cache_op = (config >> 8) & 0xff;
710 if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX)
711 return ERR_PTR(-EINVAL);
712
713 cache_result = (config >> 16) & 0xff;
714 if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
715 return ERR_PTR(-EINVAL);
716
717 pev = &((*mipspmu.cache_event_map)
718 [cache_type]
719 [cache_op]
720 [cache_result]);
721
722 if (pev->cntr_mask == 0)
723 return ERR_PTR(-EOPNOTSUPP);
724
725 return pev;
726
727}
728
729static int validate_group(struct perf_event *event)
730{
731 struct perf_event *sibling, *leader = event->group_leader;
732 struct cpu_hw_events fake_cpuc;
733
734 memset(&fake_cpuc, 0, sizeof(fake_cpuc));
735
736 if (mipsxx_pmu_alloc_counter(&fake_cpuc, &leader->hw) < 0)
737 return -EINVAL;
738
739 for_each_sibling_event(sibling, leader) {
740 if (mipsxx_pmu_alloc_counter(&fake_cpuc, &sibling->hw) < 0)
741 return -EINVAL;
742 }
743
744 if (mipsxx_pmu_alloc_counter(&fake_cpuc, &event->hw) < 0)
745 return -EINVAL;
746
747 return 0;
748}
749
750
751static void handle_associated_event(struct cpu_hw_events *cpuc,
752 int idx, struct perf_sample_data *data,
753 struct pt_regs *regs)
754{
755 struct perf_event *event = cpuc->events[idx];
756 struct hw_perf_event *hwc = &event->hw;
757
758 mipspmu_event_update(event, hwc, idx);
759 data->period = event->hw.last_period;
760 if (!mipspmu_event_set_period(event, hwc, idx))
761 return;
762
763 if (perf_event_overflow(event, data, regs))
764 mipsxx_pmu_disable_event(idx);
765}
766
767
768static int __n_counters(void)
769{
770 if (!cpu_has_perf)
771 return 0;
772 if (!(read_c0_perfctrl0() & MIPS_PERFCTRL_M))
773 return 1;
774 if (!(read_c0_perfctrl1() & MIPS_PERFCTRL_M))
775 return 2;
776 if (!(read_c0_perfctrl2() & MIPS_PERFCTRL_M))
777 return 3;
778
779 return 4;
780}
781
782static int n_counters(void)
783{
784 int counters;
785
786 switch (current_cpu_type()) {
787 case CPU_R10000:
788 counters = 2;
789 break;
790
791 case CPU_R12000:
792 case CPU_R14000:
793 case CPU_R16000:
794 counters = 4;
795 break;
796
797 default:
798 counters = __n_counters();
799 }
800
801 return counters;
802}
803
804static void reset_counters(void *arg)
805{
806 int counters = (int)(long)arg;
807 switch (counters) {
808 case 4:
809 mipsxx_pmu_write_control(3, 0);
810 mipspmu.write_counter(3, 0);
811 case 3:
812 mipsxx_pmu_write_control(2, 0);
813 mipspmu.write_counter(2, 0);
814 case 2:
815 mipsxx_pmu_write_control(1, 0);
816 mipspmu.write_counter(1, 0);
817 case 1:
818 mipsxx_pmu_write_control(0, 0);
819 mipspmu.write_counter(0, 0);
820 }
821}
822
823
824static const struct mips_perf_event mipsxxcore_event_map
825 [PERF_COUNT_HW_MAX] = {
826 [PERF_COUNT_HW_CPU_CYCLES] = { 0x00, CNTR_EVEN | CNTR_ODD, P },
827 [PERF_COUNT_HW_INSTRUCTIONS] = { 0x01, CNTR_EVEN | CNTR_ODD, T },
828 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 0x02, CNTR_EVEN, T },
829 [PERF_COUNT_HW_BRANCH_MISSES] = { 0x02, CNTR_ODD, T },
830};
831
832
833static const struct mips_perf_event mipsxxcore_event_map2
834 [PERF_COUNT_HW_MAX] = {
835 [PERF_COUNT_HW_CPU_CYCLES] = { 0x00, CNTR_EVEN | CNTR_ODD, P },
836 [PERF_COUNT_HW_INSTRUCTIONS] = { 0x01, CNTR_EVEN | CNTR_ODD, T },
837 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 0x27, CNTR_EVEN, T },
838 [PERF_COUNT_HW_BRANCH_MISSES] = { 0x27, CNTR_ODD, T },
839};
840
841static const struct mips_perf_event i6x00_event_map[PERF_COUNT_HW_MAX] = {
842 [PERF_COUNT_HW_CPU_CYCLES] = { 0x00, CNTR_EVEN | CNTR_ODD },
843 [PERF_COUNT_HW_INSTRUCTIONS] = { 0x01, CNTR_EVEN | CNTR_ODD },
844
845 [PERF_COUNT_HW_CACHE_REFERENCES] = { 0x45, CNTR_EVEN | CNTR_ODD },
846 [PERF_COUNT_HW_CACHE_MISSES] = { 0x48, CNTR_EVEN | CNTR_ODD },
847 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 0x15, CNTR_EVEN | CNTR_ODD },
848 [PERF_COUNT_HW_BRANCH_MISSES] = { 0x16, CNTR_EVEN | CNTR_ODD },
849};
850
851static const struct mips_perf_event loongson3_event_map[PERF_COUNT_HW_MAX] = {
852 [PERF_COUNT_HW_CPU_CYCLES] = { 0x00, CNTR_EVEN },
853 [PERF_COUNT_HW_INSTRUCTIONS] = { 0x00, CNTR_ODD },
854 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 0x01, CNTR_EVEN },
855 [PERF_COUNT_HW_BRANCH_MISSES] = { 0x01, CNTR_ODD },
856};
857
858static const struct mips_perf_event octeon_event_map[PERF_COUNT_HW_MAX] = {
859 [PERF_COUNT_HW_CPU_CYCLES] = { 0x01, CNTR_ALL },
860 [PERF_COUNT_HW_INSTRUCTIONS] = { 0x03, CNTR_ALL },
861 [PERF_COUNT_HW_CACHE_REFERENCES] = { 0x2b, CNTR_ALL },
862 [PERF_COUNT_HW_CACHE_MISSES] = { 0x2e, CNTR_ALL },
863 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 0x08, CNTR_ALL },
864 [PERF_COUNT_HW_BRANCH_MISSES] = { 0x09, CNTR_ALL },
865 [PERF_COUNT_HW_BUS_CYCLES] = { 0x25, CNTR_ALL },
866};
867
868static const struct mips_perf_event bmips5000_event_map
869 [PERF_COUNT_HW_MAX] = {
870 [PERF_COUNT_HW_CPU_CYCLES] = { 0x00, CNTR_EVEN | CNTR_ODD, T },
871 [PERF_COUNT_HW_INSTRUCTIONS] = { 0x01, CNTR_EVEN | CNTR_ODD, T },
872 [PERF_COUNT_HW_BRANCH_MISSES] = { 0x02, CNTR_ODD, T },
873};
874
875static const struct mips_perf_event xlp_event_map[PERF_COUNT_HW_MAX] = {
876 [PERF_COUNT_HW_CPU_CYCLES] = { 0x01, CNTR_ALL },
877 [PERF_COUNT_HW_INSTRUCTIONS] = { 0x18, CNTR_ALL },
878 [PERF_COUNT_HW_CACHE_REFERENCES] = { 0x04, CNTR_ALL },
879 [PERF_COUNT_HW_CACHE_MISSES] = { 0x07, CNTR_ALL },
880 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 0x1b, CNTR_ALL },
881 [PERF_COUNT_HW_BRANCH_MISSES] = { 0x1c, CNTR_ALL },
882};
883
884
885static const struct mips_perf_event mipsxxcore_cache_map
886 [PERF_COUNT_HW_CACHE_MAX]
887 [PERF_COUNT_HW_CACHE_OP_MAX]
888 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
889[C(L1D)] = {
890
891
892
893
894
895
896 [C(OP_READ)] = {
897 [C(RESULT_ACCESS)] = { 0x0a, CNTR_EVEN, T },
898 [C(RESULT_MISS)] = { 0x0b, CNTR_EVEN | CNTR_ODD, T },
899 },
900 [C(OP_WRITE)] = {
901 [C(RESULT_ACCESS)] = { 0x0a, CNTR_EVEN, T },
902 [C(RESULT_MISS)] = { 0x0b, CNTR_EVEN | CNTR_ODD, T },
903 },
904},
905[C(L1I)] = {
906 [C(OP_READ)] = {
907 [C(RESULT_ACCESS)] = { 0x09, CNTR_EVEN, T },
908 [C(RESULT_MISS)] = { 0x09, CNTR_ODD, T },
909 },
910 [C(OP_WRITE)] = {
911 [C(RESULT_ACCESS)] = { 0x09, CNTR_EVEN, T },
912 [C(RESULT_MISS)] = { 0x09, CNTR_ODD, T },
913 },
914 [C(OP_PREFETCH)] = {
915 [C(RESULT_ACCESS)] = { 0x14, CNTR_EVEN, T },
916
917
918
919
920 },
921},
922[C(LL)] = {
923 [C(OP_READ)] = {
924 [C(RESULT_ACCESS)] = { 0x15, CNTR_ODD, P },
925 [C(RESULT_MISS)] = { 0x16, CNTR_EVEN, P },
926 },
927 [C(OP_WRITE)] = {
928 [C(RESULT_ACCESS)] = { 0x15, CNTR_ODD, P },
929 [C(RESULT_MISS)] = { 0x16, CNTR_EVEN, P },
930 },
931},
932[C(DTLB)] = {
933 [C(OP_READ)] = {
934 [C(RESULT_ACCESS)] = { 0x06, CNTR_EVEN, T },
935 [C(RESULT_MISS)] = { 0x06, CNTR_ODD, T },
936 },
937 [C(OP_WRITE)] = {
938 [C(RESULT_ACCESS)] = { 0x06, CNTR_EVEN, T },
939 [C(RESULT_MISS)] = { 0x06, CNTR_ODD, T },
940 },
941},
942[C(ITLB)] = {
943 [C(OP_READ)] = {
944 [C(RESULT_ACCESS)] = { 0x05, CNTR_EVEN, T },
945 [C(RESULT_MISS)] = { 0x05, CNTR_ODD, T },
946 },
947 [C(OP_WRITE)] = {
948 [C(RESULT_ACCESS)] = { 0x05, CNTR_EVEN, T },
949 [C(RESULT_MISS)] = { 0x05, CNTR_ODD, T },
950 },
951},
952[C(BPU)] = {
953
954 [C(OP_READ)] = {
955 [C(RESULT_ACCESS)] = { 0x02, CNTR_EVEN, T },
956 [C(RESULT_MISS)] = { 0x02, CNTR_ODD, T },
957 },
958 [C(OP_WRITE)] = {
959 [C(RESULT_ACCESS)] = { 0x02, CNTR_EVEN, T },
960 [C(RESULT_MISS)] = { 0x02, CNTR_ODD, T },
961 },
962},
963};
964
965
966static const struct mips_perf_event mipsxxcore_cache_map2
967 [PERF_COUNT_HW_CACHE_MAX]
968 [PERF_COUNT_HW_CACHE_OP_MAX]
969 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
970[C(L1D)] = {
971
972
973
974
975
976
977 [C(OP_READ)] = {
978 [C(RESULT_ACCESS)] = { 0x17, CNTR_ODD, T },
979 [C(RESULT_MISS)] = { 0x18, CNTR_ODD, T },
980 },
981 [C(OP_WRITE)] = {
982 [C(RESULT_ACCESS)] = { 0x17, CNTR_ODD, T },
983 [C(RESULT_MISS)] = { 0x18, CNTR_ODD, T },
984 },
985},
986[C(L1I)] = {
987 [C(OP_READ)] = {
988 [C(RESULT_ACCESS)] = { 0x06, CNTR_EVEN, T },
989 [C(RESULT_MISS)] = { 0x06, CNTR_ODD, T },
990 },
991 [C(OP_WRITE)] = {
992 [C(RESULT_ACCESS)] = { 0x06, CNTR_EVEN, T },
993 [C(RESULT_MISS)] = { 0x06, CNTR_ODD, T },
994 },
995 [C(OP_PREFETCH)] = {
996 [C(RESULT_ACCESS)] = { 0x34, CNTR_EVEN, T },
997
998
999
1000
1001 },
1002},
1003[C(LL)] = {
1004 [C(OP_READ)] = {
1005 [C(RESULT_ACCESS)] = { 0x1c, CNTR_ODD, P },
1006 [C(RESULT_MISS)] = { 0x1d, CNTR_EVEN, P },
1007 },
1008 [C(OP_WRITE)] = {
1009 [C(RESULT_ACCESS)] = { 0x1c, CNTR_ODD, P },
1010 [C(RESULT_MISS)] = { 0x1d, CNTR_EVEN, P },
1011 },
1012},
1013
1014
1015
1016
1017
1018[C(ITLB)] = {
1019 [C(OP_READ)] = {
1020 [C(RESULT_ACCESS)] = { 0x04, CNTR_EVEN, T },
1021 [C(RESULT_MISS)] = { 0x04, CNTR_ODD, T },
1022 },
1023 [C(OP_WRITE)] = {
1024 [C(RESULT_ACCESS)] = { 0x04, CNTR_EVEN, T },
1025 [C(RESULT_MISS)] = { 0x04, CNTR_ODD, T },
1026 },
1027},
1028[C(BPU)] = {
1029
1030 [C(OP_READ)] = {
1031 [C(RESULT_ACCESS)] = { 0x27, CNTR_EVEN, T },
1032 [C(RESULT_MISS)] = { 0x27, CNTR_ODD, T },
1033 },
1034 [C(OP_WRITE)] = {
1035 [C(RESULT_ACCESS)] = { 0x27, CNTR_EVEN, T },
1036 [C(RESULT_MISS)] = { 0x27, CNTR_ODD, T },
1037 },
1038},
1039};
1040
1041static const struct mips_perf_event i6x00_cache_map
1042 [PERF_COUNT_HW_CACHE_MAX]
1043 [PERF_COUNT_HW_CACHE_OP_MAX]
1044 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
1045[C(L1D)] = {
1046 [C(OP_READ)] = {
1047 [C(RESULT_ACCESS)] = { 0x46, CNTR_EVEN | CNTR_ODD },
1048 [C(RESULT_MISS)] = { 0x49, CNTR_EVEN | CNTR_ODD },
1049 },
1050 [C(OP_WRITE)] = {
1051 [C(RESULT_ACCESS)] = { 0x47, CNTR_EVEN | CNTR_ODD },
1052 [C(RESULT_MISS)] = { 0x4a, CNTR_EVEN | CNTR_ODD },
1053 },
1054},
1055[C(L1I)] = {
1056 [C(OP_READ)] = {
1057 [C(RESULT_ACCESS)] = { 0x84, CNTR_EVEN | CNTR_ODD },
1058 [C(RESULT_MISS)] = { 0x85, CNTR_EVEN | CNTR_ODD },
1059 },
1060},
1061[C(DTLB)] = {
1062
1063 [C(OP_READ)] = {
1064 [C(RESULT_ACCESS)] = { 0x40, CNTR_EVEN | CNTR_ODD },
1065 [C(RESULT_MISS)] = { 0x41, CNTR_EVEN | CNTR_ODD },
1066 },
1067 [C(OP_WRITE)] = {
1068 [C(RESULT_ACCESS)] = { 0x40, CNTR_EVEN | CNTR_ODD },
1069 [C(RESULT_MISS)] = { 0x41, CNTR_EVEN | CNTR_ODD },
1070 },
1071},
1072[C(BPU)] = {
1073
1074 [C(OP_READ)] = {
1075 [C(RESULT_ACCESS)] = { 0x15, CNTR_EVEN | CNTR_ODD },
1076 [C(RESULT_MISS)] = { 0x16, CNTR_EVEN | CNTR_ODD },
1077 },
1078},
1079};
1080
1081static const struct mips_perf_event loongson3_cache_map
1082 [PERF_COUNT_HW_CACHE_MAX]
1083 [PERF_COUNT_HW_CACHE_OP_MAX]
1084 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
1085[C(L1D)] = {
1086
1087
1088
1089
1090
1091
1092 [C(OP_READ)] = {
1093 [C(RESULT_MISS)] = { 0x04, CNTR_ODD },
1094 },
1095 [C(OP_WRITE)] = {
1096 [C(RESULT_MISS)] = { 0x04, CNTR_ODD },
1097 },
1098},
1099[C(L1I)] = {
1100 [C(OP_READ)] = {
1101 [C(RESULT_MISS)] = { 0x04, CNTR_EVEN },
1102 },
1103 [C(OP_WRITE)] = {
1104 [C(RESULT_MISS)] = { 0x04, CNTR_EVEN },
1105 },
1106},
1107[C(DTLB)] = {
1108 [C(OP_READ)] = {
1109 [C(RESULT_MISS)] = { 0x09, CNTR_ODD },
1110 },
1111 [C(OP_WRITE)] = {
1112 [C(RESULT_MISS)] = { 0x09, CNTR_ODD },
1113 },
1114},
1115[C(ITLB)] = {
1116 [C(OP_READ)] = {
1117 [C(RESULT_MISS)] = { 0x0c, CNTR_ODD },
1118 },
1119 [C(OP_WRITE)] = {
1120 [C(RESULT_MISS)] = { 0x0c, CNTR_ODD },
1121 },
1122},
1123[C(BPU)] = {
1124
1125 [C(OP_READ)] = {
1126 [C(RESULT_ACCESS)] = { 0x02, CNTR_EVEN },
1127 [C(RESULT_MISS)] = { 0x02, CNTR_ODD },
1128 },
1129 [C(OP_WRITE)] = {
1130 [C(RESULT_ACCESS)] = { 0x02, CNTR_EVEN },
1131 [C(RESULT_MISS)] = { 0x02, CNTR_ODD },
1132 },
1133},
1134};
1135
1136
1137static const struct mips_perf_event bmips5000_cache_map
1138 [PERF_COUNT_HW_CACHE_MAX]
1139 [PERF_COUNT_HW_CACHE_OP_MAX]
1140 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
1141[C(L1D)] = {
1142
1143
1144
1145
1146
1147
1148 [C(OP_READ)] = {
1149 [C(RESULT_ACCESS)] = { 12, CNTR_EVEN, T },
1150 [C(RESULT_MISS)] = { 12, CNTR_ODD, T },
1151 },
1152 [C(OP_WRITE)] = {
1153 [C(RESULT_ACCESS)] = { 12, CNTR_EVEN, T },
1154 [C(RESULT_MISS)] = { 12, CNTR_ODD, T },
1155 },
1156},
1157[C(L1I)] = {
1158 [C(OP_READ)] = {
1159 [C(RESULT_ACCESS)] = { 10, CNTR_EVEN, T },
1160 [C(RESULT_MISS)] = { 10, CNTR_ODD, T },
1161 },
1162 [C(OP_WRITE)] = {
1163 [C(RESULT_ACCESS)] = { 10, CNTR_EVEN, T },
1164 [C(RESULT_MISS)] = { 10, CNTR_ODD, T },
1165 },
1166 [C(OP_PREFETCH)] = {
1167 [C(RESULT_ACCESS)] = { 23, CNTR_EVEN, T },
1168
1169
1170
1171
1172 },
1173},
1174[C(LL)] = {
1175 [C(OP_READ)] = {
1176 [C(RESULT_ACCESS)] = { 28, CNTR_EVEN, P },
1177 [C(RESULT_MISS)] = { 28, CNTR_ODD, P },
1178 },
1179 [C(OP_WRITE)] = {
1180 [C(RESULT_ACCESS)] = { 28, CNTR_EVEN, P },
1181 [C(RESULT_MISS)] = { 28, CNTR_ODD, P },
1182 },
1183},
1184[C(BPU)] = {
1185
1186 [C(OP_READ)] = {
1187 [C(RESULT_MISS)] = { 0x02, CNTR_ODD, T },
1188 },
1189 [C(OP_WRITE)] = {
1190 [C(RESULT_MISS)] = { 0x02, CNTR_ODD, T },
1191 },
1192},
1193};
1194
1195
1196static const struct mips_perf_event octeon_cache_map
1197 [PERF_COUNT_HW_CACHE_MAX]
1198 [PERF_COUNT_HW_CACHE_OP_MAX]
1199 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
1200[C(L1D)] = {
1201 [C(OP_READ)] = {
1202 [C(RESULT_ACCESS)] = { 0x2b, CNTR_ALL },
1203 [C(RESULT_MISS)] = { 0x2e, CNTR_ALL },
1204 },
1205 [C(OP_WRITE)] = {
1206 [C(RESULT_ACCESS)] = { 0x30, CNTR_ALL },
1207 },
1208},
1209[C(L1I)] = {
1210 [C(OP_READ)] = {
1211 [C(RESULT_ACCESS)] = { 0x18, CNTR_ALL },
1212 },
1213 [C(OP_PREFETCH)] = {
1214 [C(RESULT_ACCESS)] = { 0x19, CNTR_ALL },
1215 },
1216},
1217[C(DTLB)] = {
1218
1219
1220
1221
1222 [C(OP_READ)] = {
1223 [C(RESULT_MISS)] = { 0x35, CNTR_ALL },
1224 },
1225 [C(OP_WRITE)] = {
1226 [C(RESULT_MISS)] = { 0x35, CNTR_ALL },
1227 },
1228},
1229[C(ITLB)] = {
1230 [C(OP_READ)] = {
1231 [C(RESULT_MISS)] = { 0x37, CNTR_ALL },
1232 },
1233},
1234};
1235
1236static const struct mips_perf_event xlp_cache_map
1237 [PERF_COUNT_HW_CACHE_MAX]
1238 [PERF_COUNT_HW_CACHE_OP_MAX]
1239 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
1240[C(L1D)] = {
1241 [C(OP_READ)] = {
1242 [C(RESULT_ACCESS)] = { 0x31, CNTR_ALL },
1243 [C(RESULT_MISS)] = { 0x30, CNTR_ALL },
1244 },
1245 [C(OP_WRITE)] = {
1246 [C(RESULT_ACCESS)] = { 0x2f, CNTR_ALL },
1247 [C(RESULT_MISS)] = { 0x2e, CNTR_ALL },
1248 },
1249},
1250[C(L1I)] = {
1251 [C(OP_READ)] = {
1252 [C(RESULT_ACCESS)] = { 0x04, CNTR_ALL },
1253 [C(RESULT_MISS)] = { 0x07, CNTR_ALL },
1254 },
1255},
1256[C(LL)] = {
1257 [C(OP_READ)] = {
1258 [C(RESULT_ACCESS)] = { 0x35, CNTR_ALL },
1259 [C(RESULT_MISS)] = { 0x37, CNTR_ALL },
1260 },
1261 [C(OP_WRITE)] = {
1262 [C(RESULT_ACCESS)] = { 0x34, CNTR_ALL },
1263 [C(RESULT_MISS)] = { 0x36, CNTR_ALL },
1264 },
1265},
1266[C(DTLB)] = {
1267
1268
1269
1270
1271 [C(OP_READ)] = {
1272 [C(RESULT_MISS)] = { 0x2d, CNTR_ALL },
1273 },
1274 [C(OP_WRITE)] = {
1275 [C(RESULT_MISS)] = { 0x2d, CNTR_ALL },
1276 },
1277},
1278[C(ITLB)] = {
1279 [C(OP_READ)] = {
1280 [C(RESULT_MISS)] = { 0x08, CNTR_ALL },
1281 },
1282 [C(OP_WRITE)] = {
1283 [C(RESULT_MISS)] = { 0x08, CNTR_ALL },
1284 },
1285},
1286[C(BPU)] = {
1287 [C(OP_READ)] = {
1288 [C(RESULT_MISS)] = { 0x25, CNTR_ALL },
1289 },
1290},
1291};
1292
1293static int __hw_perf_event_init(struct perf_event *event)
1294{
1295 struct perf_event_attr *attr = &event->attr;
1296 struct hw_perf_event *hwc = &event->hw;
1297 const struct mips_perf_event *pev;
1298 int err;
1299
1300
1301 if (PERF_TYPE_HARDWARE == event->attr.type) {
1302 if (event->attr.config >= PERF_COUNT_HW_MAX)
1303 return -EINVAL;
1304 pev = mipspmu_map_general_event(event->attr.config);
1305 } else if (PERF_TYPE_HW_CACHE == event->attr.type) {
1306 pev = mipspmu_map_cache_event(event->attr.config);
1307 } else if (PERF_TYPE_RAW == event->attr.type) {
1308
1309 mutex_lock(&raw_event_mutex);
1310 pev = mipspmu.map_raw_event(event->attr.config);
1311 } else {
1312
1313 return -EOPNOTSUPP;
1314 }
1315
1316 if (IS_ERR(pev)) {
1317 if (PERF_TYPE_RAW == event->attr.type)
1318 mutex_unlock(&raw_event_mutex);
1319 return PTR_ERR(pev);
1320 }
1321
1322
1323
1324
1325
1326 hwc->config_base = MIPS_PERFCTRL_IE;
1327
1328 hwc->event_base = mipspmu_perf_event_encode(pev);
1329 if (PERF_TYPE_RAW == event->attr.type)
1330 mutex_unlock(&raw_event_mutex);
1331
1332 if (!attr->exclude_user)
1333 hwc->config_base |= MIPS_PERFCTRL_U;
1334 if (!attr->exclude_kernel) {
1335 hwc->config_base |= MIPS_PERFCTRL_K;
1336
1337 hwc->config_base |= MIPS_PERFCTRL_EXL;
1338 }
1339 if (!attr->exclude_hv)
1340 hwc->config_base |= MIPS_PERFCTRL_S;
1341
1342 hwc->config_base &= M_PERFCTL_CONFIG_MASK;
1343
1344
1345
1346
1347 hwc->idx = -1;
1348 hwc->config = 0;
1349
1350 if (!hwc->sample_period) {
1351 hwc->sample_period = mipspmu.max_period;
1352 hwc->last_period = hwc->sample_period;
1353 local64_set(&hwc->period_left, hwc->sample_period);
1354 }
1355
1356 err = 0;
1357 if (event->group_leader != event)
1358 err = validate_group(event);
1359
1360 event->destroy = hw_perf_event_destroy;
1361
1362 if (err)
1363 event->destroy(event);
1364
1365 return err;
1366}
1367
1368static void pause_local_counters(void)
1369{
1370 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1371 int ctr = mipspmu.num_counters;
1372 unsigned long flags;
1373
1374 local_irq_save(flags);
1375 do {
1376 ctr--;
1377 cpuc->saved_ctrl[ctr] = mipsxx_pmu_read_control(ctr);
1378 mipsxx_pmu_write_control(ctr, cpuc->saved_ctrl[ctr] &
1379 ~M_PERFCTL_COUNT_EVENT_WHENEVER);
1380 } while (ctr > 0);
1381 local_irq_restore(flags);
1382}
1383
1384static void resume_local_counters(void)
1385{
1386 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1387 int ctr = mipspmu.num_counters;
1388
1389 do {
1390 ctr--;
1391 mipsxx_pmu_write_control(ctr, cpuc->saved_ctrl[ctr]);
1392 } while (ctr > 0);
1393}
1394
1395static int mipsxx_pmu_handle_shared_irq(void)
1396{
1397 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1398 struct perf_sample_data data;
1399 unsigned int counters = mipspmu.num_counters;
1400 u64 counter;
1401 int handled = IRQ_NONE;
1402 struct pt_regs *regs;
1403
1404 if (cpu_has_perf_cntr_intr_bit && !(read_c0_cause() & CAUSEF_PCI))
1405 return handled;
1406
1407
1408
1409
1410
1411
1412
1413 pause_local_counters();
1414#ifdef CONFIG_MIPS_PERF_SHARED_TC_COUNTERS
1415 read_lock(&pmuint_rwlock);
1416#endif
1417
1418 regs = get_irq_regs();
1419
1420 perf_sample_data_init(&data, 0, 0);
1421
1422 switch (counters) {
1423#define HANDLE_COUNTER(n) \
1424 case n + 1: \
1425 if (test_bit(n, cpuc->used_mask)) { \
1426 counter = mipspmu.read_counter(n); \
1427 if (counter & mipspmu.overflow) { \
1428 handle_associated_event(cpuc, n, &data, regs); \
1429 handled = IRQ_HANDLED; \
1430 } \
1431 }
1432 HANDLE_COUNTER(3)
1433 HANDLE_COUNTER(2)
1434 HANDLE_COUNTER(1)
1435 HANDLE_COUNTER(0)
1436 }
1437
1438#ifdef CONFIG_MIPS_PERF_SHARED_TC_COUNTERS
1439 read_unlock(&pmuint_rwlock);
1440#endif
1441 resume_local_counters();
1442
1443
1444
1445
1446
1447
1448 if (handled == IRQ_HANDLED)
1449 irq_work_run();
1450
1451 return handled;
1452}
1453
1454static irqreturn_t mipsxx_pmu_handle_irq(int irq, void *dev)
1455{
1456 return mipsxx_pmu_handle_shared_irq();
1457}
1458
1459
1460#define IS_BOTH_COUNTERS_24K_EVENT(b) \
1461 ((b) == 0 || (b) == 1 || (b) == 11)
1462
1463
1464#define IS_BOTH_COUNTERS_34K_EVENT(b) \
1465 ((b) == 0 || (b) == 1 || (b) == 11)
1466#ifdef CONFIG_MIPS_MT_SMP
1467#define IS_RANGE_P_34K_EVENT(r, b) \
1468 ((b) == 0 || (r) == 18 || (b) == 21 || (b) == 22 || \
1469 (b) == 25 || (b) == 39 || (r) == 44 || (r) == 174 || \
1470 (r) == 176 || ((b) >= 50 && (b) <= 55) || \
1471 ((b) >= 64 && (b) <= 67))
1472#define IS_RANGE_V_34K_EVENT(r) ((r) == 47)
1473#endif
1474
1475
1476#define IS_BOTH_COUNTERS_74K_EVENT(b) \
1477 ((b) == 0 || (b) == 1)
1478
1479
1480#define IS_BOTH_COUNTERS_PROAPTIV_EVENT(b) \
1481 ((b) == 0 || (b) == 1)
1482
1483#define IS_BOTH_COUNTERS_P5600_EVENT(b) \
1484 ((b) == 0 || (b) == 1)
1485
1486
1487#define IS_BOTH_COUNTERS_1004K_EVENT(b) \
1488 ((b) == 0 || (b) == 1 || (b) == 11)
1489#ifdef CONFIG_MIPS_MT_SMP
1490#define IS_RANGE_P_1004K_EVENT(r, b) \
1491 ((b) == 0 || (r) == 18 || (b) == 21 || (b) == 22 || \
1492 (b) == 25 || (b) == 36 || (b) == 39 || (r) == 44 || \
1493 (r) == 174 || (r) == 176 || ((b) >= 50 && (b) <= 59) || \
1494 (r) == 188 || (b) == 61 || (b) == 62 || \
1495 ((b) >= 64 && (b) <= 67))
1496#define IS_RANGE_V_1004K_EVENT(r) ((r) == 47)
1497#endif
1498
1499
1500#define IS_BOTH_COUNTERS_INTERAPTIV_EVENT(b) \
1501 ((b) == 0 || (b) == 1 || (b) == 11)
1502#ifdef CONFIG_MIPS_MT_SMP
1503
1504#define IS_RANGE_P_INTERAPTIV_EVENT(r, b) \
1505 ((b) == 0 || (r) == 18 || (b) == 21 || (b) == 22 || \
1506 (b) == 25 || (b) == 36 || (b) == 38 || (b) == 39 || \
1507 (r) == 44 || (r) == 174 || (r) == 176 || ((b) >= 50 && \
1508 (b) <= 59) || (r) == 188 || (b) == 61 || (b) == 62 || \
1509 ((b) >= 64 && (b) <= 67))
1510#define IS_RANGE_V_INTERAPTIV_EVENT(r) ((r) == 47 || (r) == 175)
1511#endif
1512
1513
1514#define IS_BOTH_COUNTERS_BMIPS5000_EVENT(b) \
1515 ((b) == 0 || (b) == 1)
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530static const struct mips_perf_event *mipsxx_pmu_map_raw_event(u64 config)
1531{
1532
1533 unsigned int raw_id = config & 0xff;
1534 unsigned int base_id = raw_id & 0x7f;
1535
1536 switch (current_cpu_type()) {
1537 case CPU_24K:
1538 if (IS_BOTH_COUNTERS_24K_EVENT(base_id))
1539 raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
1540 else
1541 raw_event.cntr_mask =
1542 raw_id > 127 ? CNTR_ODD : CNTR_EVEN;
1543#ifdef CONFIG_MIPS_MT_SMP
1544
1545
1546
1547
1548 raw_event.range = P;
1549#endif
1550 break;
1551 case CPU_34K:
1552 if (IS_BOTH_COUNTERS_34K_EVENT(base_id))
1553 raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
1554 else
1555 raw_event.cntr_mask =
1556 raw_id > 127 ? CNTR_ODD : CNTR_EVEN;
1557#ifdef CONFIG_MIPS_MT_SMP
1558 if (IS_RANGE_P_34K_EVENT(raw_id, base_id))
1559 raw_event.range = P;
1560 else if (unlikely(IS_RANGE_V_34K_EVENT(raw_id)))
1561 raw_event.range = V;
1562 else
1563 raw_event.range = T;
1564#endif
1565 break;
1566 case CPU_74K:
1567 case CPU_1074K:
1568 if (IS_BOTH_COUNTERS_74K_EVENT(base_id))
1569 raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
1570 else
1571 raw_event.cntr_mask =
1572 raw_id > 127 ? CNTR_ODD : CNTR_EVEN;
1573#ifdef CONFIG_MIPS_MT_SMP
1574 raw_event.range = P;
1575#endif
1576 break;
1577 case CPU_PROAPTIV:
1578 if (IS_BOTH_COUNTERS_PROAPTIV_EVENT(base_id))
1579 raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
1580 else
1581 raw_event.cntr_mask =
1582 raw_id > 127 ? CNTR_ODD : CNTR_EVEN;
1583#ifdef CONFIG_MIPS_MT_SMP
1584 raw_event.range = P;
1585#endif
1586 break;
1587 case CPU_P5600:
1588 case CPU_P6600:
1589
1590 raw_id = config & 0x1ff;
1591 base_id = raw_id & 0xff;
1592 if (IS_BOTH_COUNTERS_P5600_EVENT(base_id))
1593 raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
1594 else
1595 raw_event.cntr_mask =
1596 raw_id > 255 ? CNTR_ODD : CNTR_EVEN;
1597#ifdef CONFIG_MIPS_MT_SMP
1598 raw_event.range = P;
1599#endif
1600 break;
1601 case CPU_I6400:
1602 case CPU_I6500:
1603
1604 base_id = config & 0xff;
1605 raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
1606 break;
1607 case CPU_1004K:
1608 if (IS_BOTH_COUNTERS_1004K_EVENT(base_id))
1609 raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
1610 else
1611 raw_event.cntr_mask =
1612 raw_id > 127 ? CNTR_ODD : CNTR_EVEN;
1613#ifdef CONFIG_MIPS_MT_SMP
1614 if (IS_RANGE_P_1004K_EVENT(raw_id, base_id))
1615 raw_event.range = P;
1616 else if (unlikely(IS_RANGE_V_1004K_EVENT(raw_id)))
1617 raw_event.range = V;
1618 else
1619 raw_event.range = T;
1620#endif
1621 break;
1622 case CPU_INTERAPTIV:
1623 if (IS_BOTH_COUNTERS_INTERAPTIV_EVENT(base_id))
1624 raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
1625 else
1626 raw_event.cntr_mask =
1627 raw_id > 127 ? CNTR_ODD : CNTR_EVEN;
1628#ifdef CONFIG_MIPS_MT_SMP
1629 if (IS_RANGE_P_INTERAPTIV_EVENT(raw_id, base_id))
1630 raw_event.range = P;
1631 else if (unlikely(IS_RANGE_V_INTERAPTIV_EVENT(raw_id)))
1632 raw_event.range = V;
1633 else
1634 raw_event.range = T;
1635#endif
1636 break;
1637 case CPU_BMIPS5000:
1638 if (IS_BOTH_COUNTERS_BMIPS5000_EVENT(base_id))
1639 raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
1640 else
1641 raw_event.cntr_mask =
1642 raw_id > 127 ? CNTR_ODD : CNTR_EVEN;
1643 break;
1644 case CPU_LOONGSON3:
1645 raw_event.cntr_mask = raw_id > 127 ? CNTR_ODD : CNTR_EVEN;
1646 break;
1647 }
1648
1649 raw_event.event_id = base_id;
1650
1651 return &raw_event;
1652}
1653
1654static const struct mips_perf_event *octeon_pmu_map_raw_event(u64 config)
1655{
1656 unsigned int raw_id = config & 0xff;
1657 unsigned int base_id = raw_id & 0x7f;
1658
1659
1660 raw_event.cntr_mask = CNTR_ALL;
1661 raw_event.event_id = base_id;
1662
1663 if (current_cpu_type() == CPU_CAVIUM_OCTEON2) {
1664 if (base_id > 0x42)
1665 return ERR_PTR(-EOPNOTSUPP);
1666 } else {
1667 if (base_id > 0x3a)
1668 return ERR_PTR(-EOPNOTSUPP);
1669 }
1670
1671 switch (base_id) {
1672 case 0x00:
1673 case 0x0f:
1674 case 0x1e:
1675 case 0x1f:
1676 case 0x2f:
1677 case 0x34:
1678 case 0x3b ... 0x3f:
1679 return ERR_PTR(-EOPNOTSUPP);
1680 default:
1681 break;
1682 }
1683
1684 return &raw_event;
1685}
1686
1687static const struct mips_perf_event *xlp_pmu_map_raw_event(u64 config)
1688{
1689 unsigned int raw_id = config & 0xff;
1690
1691
1692 if ((raw_id < 0x01) || (raw_id > 0x3f))
1693 return ERR_PTR(-EOPNOTSUPP);
1694
1695 raw_event.cntr_mask = CNTR_ALL;
1696 raw_event.event_id = raw_id;
1697
1698 return &raw_event;
1699}
1700
1701static int __init
1702init_hw_perf_events(void)
1703{
1704 int counters, irq;
1705 int counter_bits;
1706
1707 pr_info("Performance counters: ");
1708
1709 counters = n_counters();
1710 if (counters == 0) {
1711 pr_cont("No available PMU.\n");
1712 return -ENODEV;
1713 }
1714
1715#ifdef CONFIG_MIPS_PERF_SHARED_TC_COUNTERS
1716 if (!cpu_has_mipsmt_pertccounters)
1717 counters = counters_total_to_per_cpu(counters);
1718#endif
1719
1720 if (get_c0_perfcount_int)
1721 irq = get_c0_perfcount_int();
1722 else if (cp0_perfcount_irq >= 0)
1723 irq = MIPS_CPU_IRQ_BASE + cp0_perfcount_irq;
1724 else
1725 irq = -1;
1726
1727 mipspmu.map_raw_event = mipsxx_pmu_map_raw_event;
1728
1729 switch (current_cpu_type()) {
1730 case CPU_24K:
1731 mipspmu.name = "mips/24K";
1732 mipspmu.general_event_map = &mipsxxcore_event_map;
1733 mipspmu.cache_event_map = &mipsxxcore_cache_map;
1734 break;
1735 case CPU_34K:
1736 mipspmu.name = "mips/34K";
1737 mipspmu.general_event_map = &mipsxxcore_event_map;
1738 mipspmu.cache_event_map = &mipsxxcore_cache_map;
1739 break;
1740 case CPU_74K:
1741 mipspmu.name = "mips/74K";
1742 mipspmu.general_event_map = &mipsxxcore_event_map2;
1743 mipspmu.cache_event_map = &mipsxxcore_cache_map2;
1744 break;
1745 case CPU_PROAPTIV:
1746 mipspmu.name = "mips/proAptiv";
1747 mipspmu.general_event_map = &mipsxxcore_event_map2;
1748 mipspmu.cache_event_map = &mipsxxcore_cache_map2;
1749 break;
1750 case CPU_P5600:
1751 mipspmu.name = "mips/P5600";
1752 mipspmu.general_event_map = &mipsxxcore_event_map2;
1753 mipspmu.cache_event_map = &mipsxxcore_cache_map2;
1754 break;
1755 case CPU_P6600:
1756 mipspmu.name = "mips/P6600";
1757 mipspmu.general_event_map = &mipsxxcore_event_map2;
1758 mipspmu.cache_event_map = &mipsxxcore_cache_map2;
1759 break;
1760 case CPU_I6400:
1761 mipspmu.name = "mips/I6400";
1762 mipspmu.general_event_map = &i6x00_event_map;
1763 mipspmu.cache_event_map = &i6x00_cache_map;
1764 break;
1765 case CPU_I6500:
1766 mipspmu.name = "mips/I6500";
1767 mipspmu.general_event_map = &i6x00_event_map;
1768 mipspmu.cache_event_map = &i6x00_cache_map;
1769 break;
1770 case CPU_1004K:
1771 mipspmu.name = "mips/1004K";
1772 mipspmu.general_event_map = &mipsxxcore_event_map;
1773 mipspmu.cache_event_map = &mipsxxcore_cache_map;
1774 break;
1775 case CPU_1074K:
1776 mipspmu.name = "mips/1074K";
1777 mipspmu.general_event_map = &mipsxxcore_event_map;
1778 mipspmu.cache_event_map = &mipsxxcore_cache_map;
1779 break;
1780 case CPU_INTERAPTIV:
1781 mipspmu.name = "mips/interAptiv";
1782 mipspmu.general_event_map = &mipsxxcore_event_map;
1783 mipspmu.cache_event_map = &mipsxxcore_cache_map;
1784 break;
1785 case CPU_LOONGSON1:
1786 mipspmu.name = "mips/loongson1";
1787 mipspmu.general_event_map = &mipsxxcore_event_map;
1788 mipspmu.cache_event_map = &mipsxxcore_cache_map;
1789 break;
1790 case CPU_LOONGSON3:
1791 mipspmu.name = "mips/loongson3";
1792 mipspmu.general_event_map = &loongson3_event_map;
1793 mipspmu.cache_event_map = &loongson3_cache_map;
1794 break;
1795 case CPU_CAVIUM_OCTEON:
1796 case CPU_CAVIUM_OCTEON_PLUS:
1797 case CPU_CAVIUM_OCTEON2:
1798 mipspmu.name = "octeon";
1799 mipspmu.general_event_map = &octeon_event_map;
1800 mipspmu.cache_event_map = &octeon_cache_map;
1801 mipspmu.map_raw_event = octeon_pmu_map_raw_event;
1802 break;
1803 case CPU_BMIPS5000:
1804 mipspmu.name = "BMIPS5000";
1805 mipspmu.general_event_map = &bmips5000_event_map;
1806 mipspmu.cache_event_map = &bmips5000_cache_map;
1807 break;
1808 case CPU_XLP:
1809 mipspmu.name = "xlp";
1810 mipspmu.general_event_map = &xlp_event_map;
1811 mipspmu.cache_event_map = &xlp_cache_map;
1812 mipspmu.map_raw_event = xlp_pmu_map_raw_event;
1813 break;
1814 default:
1815 pr_cont("Either hardware does not support performance "
1816 "counters, or not yet implemented.\n");
1817 return -ENODEV;
1818 }
1819
1820 mipspmu.num_counters = counters;
1821 mipspmu.irq = irq;
1822
1823 if (read_c0_perfctrl0() & MIPS_PERFCTRL_W) {
1824 mipspmu.max_period = (1ULL << 63) - 1;
1825 mipspmu.valid_count = (1ULL << 63) - 1;
1826 mipspmu.overflow = 1ULL << 63;
1827 mipspmu.read_counter = mipsxx_pmu_read_counter_64;
1828 mipspmu.write_counter = mipsxx_pmu_write_counter_64;
1829 counter_bits = 64;
1830 } else {
1831 mipspmu.max_period = (1ULL << 31) - 1;
1832 mipspmu.valid_count = (1ULL << 31) - 1;
1833 mipspmu.overflow = 1ULL << 31;
1834 mipspmu.read_counter = mipsxx_pmu_read_counter;
1835 mipspmu.write_counter = mipsxx_pmu_write_counter;
1836 counter_bits = 32;
1837 }
1838
1839 on_each_cpu(reset_counters, (void *)(long)counters, 1);
1840
1841 pr_cont("%s PMU enabled, %d %d-bit counters available to each "
1842 "CPU, irq %d%s\n", mipspmu.name, counters, counter_bits, irq,
1843 irq < 0 ? " (share with timer interrupt)" : "");
1844
1845 perf_pmu_register(&pmu, "cpu", PERF_TYPE_RAW);
1846
1847 return 0;
1848}
1849early_initcall(init_hw_perf_events);
1850