1
2
3
4
5
6
7
8
9
10
11#include <linux/kernel.h>
12#include <linux/sched.h>
13#include <linux/perf_event.h>
14#include <linux/percpu.h>
15#include <linux/hardirq.h>
16#include <asm/reg.h>
17#include <asm/pmc.h>
18#include <asm/machdep.h>
19#include <asm/firmware.h>
20#include <asm/ptrace.h>
21
22struct cpu_hw_events {
23 int n_events;
24 int n_percpu;
25 int disabled;
26 int n_added;
27 int n_limited;
28 u8 pmcs_enabled;
29 struct perf_event *event[MAX_HWEVENTS];
30 u64 events[MAX_HWEVENTS];
31 unsigned int flags[MAX_HWEVENTS];
32 unsigned long mmcr[3];
33 struct perf_event *limited_counter[MAX_LIMITED_HWCOUNTERS];
34 u8 limited_hwidx[MAX_LIMITED_HWCOUNTERS];
35 u64 alternatives[MAX_HWEVENTS][MAX_EVENT_ALTERNATIVES];
36 unsigned long amasks[MAX_HWEVENTS][MAX_EVENT_ALTERNATIVES];
37 unsigned long avalues[MAX_HWEVENTS][MAX_EVENT_ALTERNATIVES];
38
39 unsigned int group_flag;
40 int n_txn_start;
41};
42DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events);
43
44struct power_pmu *ppmu;
45
46
47
48
49
50
51
52
53static unsigned int freeze_events_kernel = MMCR0_FCS;
54
55
56
57
58
59#ifdef CONFIG_PPC32
60
61#define MMCR0_FCHV 0
62#define MMCR0_PMCjCE MMCR0_PMCnCE
63
64#define SPRN_MMCRA SPRN_MMCR2
65#define MMCRA_SAMPLE_ENABLE 0
66
67static inline unsigned long perf_ip_adjust(struct pt_regs *regs)
68{
69 return 0;
70}
71static inline void perf_get_data_addr(struct pt_regs *regs, u64 *addrp) { }
72static inline u32 perf_get_misc_flags(struct pt_regs *regs)
73{
74 return 0;
75}
76static inline void perf_read_regs(struct pt_regs *regs)
77{
78 regs->result = 0;
79}
80static inline int perf_intr_is_nmi(struct pt_regs *regs)
81{
82 return 0;
83}
84
85static inline int siar_valid(struct pt_regs *regs)
86{
87 return 1;
88}
89
90#endif
91
92
93
94
95#ifdef CONFIG_PPC64
96
97static inline unsigned long perf_ip_adjust(struct pt_regs *regs)
98{
99 unsigned long mmcra = regs->dsisr;
100
101 if ((mmcra & MMCRA_SAMPLE_ENABLE) && !(ppmu->flags & PPMU_ALT_SIPR)) {
102 unsigned long slot = (mmcra & MMCRA_SLOT) >> MMCRA_SLOT_SHIFT;
103 if (slot > 1)
104 return 4 * (slot - 1);
105 }
106 return 0;
107}
108
109
110
111
112
113
114
115
116
117static inline void perf_get_data_addr(struct pt_regs *regs, u64 *addrp)
118{
119 unsigned long mmcra = regs->dsisr;
120 unsigned long sdsync;
121
122 if (ppmu->flags & PPMU_SIAR_VALID)
123 sdsync = POWER7P_MMCRA_SDAR_VALID;
124 else if (ppmu->flags & PPMU_ALT_SIPR)
125 sdsync = POWER6_MMCRA_SDSYNC;
126 else
127 sdsync = MMCRA_SDSYNC;
128
129 if (!(mmcra & MMCRA_SAMPLE_ENABLE) || (mmcra & sdsync))
130 *addrp = mfspr(SPRN_SDAR);
131}
132
133static bool mmcra_sihv(unsigned long mmcra)
134{
135 unsigned long sihv = MMCRA_SIHV;
136
137 if (ppmu->flags & PPMU_ALT_SIPR)
138 sihv = POWER6_MMCRA_SIHV;
139
140 return !!(mmcra & sihv);
141}
142
143static bool mmcra_sipr(unsigned long mmcra)
144{
145 unsigned long sipr = MMCRA_SIPR;
146
147 if (ppmu->flags & PPMU_ALT_SIPR)
148 sipr = POWER6_MMCRA_SIPR;
149
150 return !!(mmcra & sipr);
151}
152
153static inline u32 perf_flags_from_msr(struct pt_regs *regs)
154{
155 if (regs->msr & MSR_PR)
156 return PERF_RECORD_MISC_USER;
157 if ((regs->msr & MSR_HV) && freeze_events_kernel != MMCR0_FCHV)
158 return PERF_RECORD_MISC_HYPERVISOR;
159 return PERF_RECORD_MISC_KERNEL;
160}
161
162static inline u32 perf_get_misc_flags(struct pt_regs *regs)
163{
164 unsigned long mmcra = regs->dsisr;
165 unsigned long use_siar = regs->result;
166
167 if (!use_siar)
168 return perf_flags_from_msr(regs);
169
170
171
172
173
174
175
176 if (ppmu->flags & PPMU_NO_SIPR) {
177 unsigned long siar = mfspr(SPRN_SIAR);
178 if (siar >= PAGE_OFFSET)
179 return PERF_RECORD_MISC_KERNEL;
180 return PERF_RECORD_MISC_USER;
181 }
182
183
184 if (mmcra_sipr(mmcra))
185 return PERF_RECORD_MISC_USER;
186 if (mmcra_sihv(mmcra) && (freeze_events_kernel != MMCR0_FCHV))
187 return PERF_RECORD_MISC_HYPERVISOR;
188 return PERF_RECORD_MISC_KERNEL;
189}
190
191
192
193
194
195
196
197static inline void perf_read_regs(struct pt_regs *regs)
198{
199 unsigned long mmcra = mfspr(SPRN_MMCRA);
200 int marked = mmcra & MMCRA_SAMPLE_ENABLE;
201 int use_siar;
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220 if (TRAP(regs) != 0xf00)
221 use_siar = 0;
222 else if (marked)
223 use_siar = 1;
224 else if ((ppmu->flags & PPMU_NO_CONT_SAMPLING))
225 use_siar = 0;
226 else if (!(ppmu->flags & PPMU_NO_SIPR) && mmcra_sipr(mmcra))
227 use_siar = 0;
228 else
229 use_siar = 1;
230
231 regs->dsisr = mmcra;
232 regs->result = use_siar;
233}
234
235
236
237
238
239static inline int perf_intr_is_nmi(struct pt_regs *regs)
240{
241 return !regs->softe;
242}
243
244
245
246
247
248
249
250
251static inline int siar_valid(struct pt_regs *regs)
252{
253 unsigned long mmcra = regs->dsisr;
254 int marked = mmcra & MMCRA_SAMPLE_ENABLE;
255
256 if ((ppmu->flags & PPMU_SIAR_VALID) && marked)
257 return mmcra & POWER7P_MMCRA_SIAR_VALID;
258
259 return 1;
260}
261
262#endif
263
264static void perf_event_interrupt(struct pt_regs *regs);
265
266void perf_event_print_debug(void)
267{
268}
269
270
271
272
273static unsigned long read_pmc(int idx)
274{
275 unsigned long val;
276
277 switch (idx) {
278 case 1:
279 val = mfspr(SPRN_PMC1);
280 break;
281 case 2:
282 val = mfspr(SPRN_PMC2);
283 break;
284 case 3:
285 val = mfspr(SPRN_PMC3);
286 break;
287 case 4:
288 val = mfspr(SPRN_PMC4);
289 break;
290 case 5:
291 val = mfspr(SPRN_PMC5);
292 break;
293 case 6:
294 val = mfspr(SPRN_PMC6);
295 break;
296#ifdef CONFIG_PPC64
297 case 7:
298 val = mfspr(SPRN_PMC7);
299 break;
300 case 8:
301 val = mfspr(SPRN_PMC8);
302 break;
303#endif
304 default:
305 printk(KERN_ERR "oops trying to read PMC%d\n", idx);
306 val = 0;
307 }
308 return val;
309}
310
311
312
313
314static void write_pmc(int idx, unsigned long val)
315{
316 switch (idx) {
317 case 1:
318 mtspr(SPRN_PMC1, val);
319 break;
320 case 2:
321 mtspr(SPRN_PMC2, val);
322 break;
323 case 3:
324 mtspr(SPRN_PMC3, val);
325 break;
326 case 4:
327 mtspr(SPRN_PMC4, val);
328 break;
329 case 5:
330 mtspr(SPRN_PMC5, val);
331 break;
332 case 6:
333 mtspr(SPRN_PMC6, val);
334 break;
335#ifdef CONFIG_PPC64
336 case 7:
337 mtspr(SPRN_PMC7, val);
338 break;
339 case 8:
340 mtspr(SPRN_PMC8, val);
341 break;
342#endif
343 default:
344 printk(KERN_ERR "oops trying to write PMC%d\n", idx);
345 }
346}
347
348
349
350
351
352
353
354static int power_check_constraints(struct cpu_hw_events *cpuhw,
355 u64 event_id[], unsigned int cflags[],
356 int n_ev)
357{
358 unsigned long mask, value, nv;
359 unsigned long smasks[MAX_HWEVENTS], svalues[MAX_HWEVENTS];
360 int n_alt[MAX_HWEVENTS], choice[MAX_HWEVENTS];
361 int i, j;
362 unsigned long addf = ppmu->add_fields;
363 unsigned long tadd = ppmu->test_adder;
364
365 if (n_ev > ppmu->n_counter)
366 return -1;
367
368
369 for (i = 0; i < n_ev; ++i) {
370 if ((cflags[i] & PPMU_LIMITED_PMC_REQD)
371 && !ppmu->limited_pmc_event(event_id[i])) {
372 ppmu->get_alternatives(event_id[i], cflags[i],
373 cpuhw->alternatives[i]);
374 event_id[i] = cpuhw->alternatives[i][0];
375 }
376 if (ppmu->get_constraint(event_id[i], &cpuhw->amasks[i][0],
377 &cpuhw->avalues[i][0]))
378 return -1;
379 }
380 value = mask = 0;
381 for (i = 0; i < n_ev; ++i) {
382 nv = (value | cpuhw->avalues[i][0]) +
383 (value & cpuhw->avalues[i][0] & addf);
384 if ((((nv + tadd) ^ value) & mask) != 0 ||
385 (((nv + tadd) ^ cpuhw->avalues[i][0]) &
386 cpuhw->amasks[i][0]) != 0)
387 break;
388 value = nv;
389 mask |= cpuhw->amasks[i][0];
390 }
391 if (i == n_ev)
392 return 0;
393
394
395 if (!ppmu->get_alternatives)
396 return -1;
397 for (i = 0; i < n_ev; ++i) {
398 choice[i] = 0;
399 n_alt[i] = ppmu->get_alternatives(event_id[i], cflags[i],
400 cpuhw->alternatives[i]);
401 for (j = 1; j < n_alt[i]; ++j)
402 ppmu->get_constraint(cpuhw->alternatives[i][j],
403 &cpuhw->amasks[i][j],
404 &cpuhw->avalues[i][j]);
405 }
406
407
408 i = 0;
409 j = -1;
410 value = mask = nv = 0;
411 while (i < n_ev) {
412 if (j >= 0) {
413
414 value = svalues[i];
415 mask = smasks[i];
416 j = choice[i];
417 }
418
419
420
421
422 while (++j < n_alt[i]) {
423 nv = (value | cpuhw->avalues[i][j]) +
424 (value & cpuhw->avalues[i][j] & addf);
425 if ((((nv + tadd) ^ value) & mask) == 0 &&
426 (((nv + tadd) ^ cpuhw->avalues[i][j])
427 & cpuhw->amasks[i][j]) == 0)
428 break;
429 }
430 if (j >= n_alt[i]) {
431
432
433
434
435
436 if (--i < 0)
437 return -1;
438 } else {
439
440
441
442
443
444
445 choice[i] = j;
446 svalues[i] = value;
447 smasks[i] = mask;
448 value = nv;
449 mask |= cpuhw->amasks[i][j];
450 ++i;
451 j = -1;
452 }
453 }
454
455
456 for (i = 0; i < n_ev; ++i)
457 event_id[i] = cpuhw->alternatives[i][choice[i]];
458 return 0;
459}
460
461
462
463
464
465
466static int check_excludes(struct perf_event **ctrs, unsigned int cflags[],
467 int n_prev, int n_new)
468{
469 int eu = 0, ek = 0, eh = 0;
470 int i, n, first;
471 struct perf_event *event;
472
473 n = n_prev + n_new;
474 if (n <= 1)
475 return 0;
476
477 first = 1;
478 for (i = 0; i < n; ++i) {
479 if (cflags[i] & PPMU_LIMITED_PMC_OK) {
480 cflags[i] &= ~PPMU_LIMITED_PMC_REQD;
481 continue;
482 }
483 event = ctrs[i];
484 if (first) {
485 eu = event->attr.exclude_user;
486 ek = event->attr.exclude_kernel;
487 eh = event->attr.exclude_hv;
488 first = 0;
489 } else if (event->attr.exclude_user != eu ||
490 event->attr.exclude_kernel != ek ||
491 event->attr.exclude_hv != eh) {
492 return -EAGAIN;
493 }
494 }
495
496 if (eu || ek || eh)
497 for (i = 0; i < n; ++i)
498 if (cflags[i] & PPMU_LIMITED_PMC_OK)
499 cflags[i] |= PPMU_LIMITED_PMC_REQD;
500
501 return 0;
502}
503
504static u64 check_and_compute_delta(u64 prev, u64 val)
505{
506 u64 delta = (val - prev) & 0xfffffffful;
507
508
509
510
511
512
513
514
515
516
517 if (prev > val && (prev - val) < 256)
518 delta = 0;
519
520 return delta;
521}
522
523static void power_pmu_read(struct perf_event *event)
524{
525 s64 val, delta, prev;
526
527 if (event->hw.state & PERF_HES_STOPPED)
528 return;
529
530 if (!event->hw.idx)
531 return;
532
533
534
535
536
537 do {
538 prev = local64_read(&event->hw.prev_count);
539 barrier();
540 val = read_pmc(event->hw.idx);
541 delta = check_and_compute_delta(prev, val);
542 if (!delta)
543 return;
544 } while (local64_cmpxchg(&event->hw.prev_count, prev, val) != prev);
545
546 local64_add(delta, &event->count);
547 local64_sub(delta, &event->hw.period_left);
548}
549
550
551
552
553
554
555static int is_limited_pmc(int pmcnum)
556{
557 return (ppmu->flags & PPMU_LIMITED_PMC5_6)
558 && (pmcnum == 5 || pmcnum == 6);
559}
560
561static void freeze_limited_counters(struct cpu_hw_events *cpuhw,
562 unsigned long pmc5, unsigned long pmc6)
563{
564 struct perf_event *event;
565 u64 val, prev, delta;
566 int i;
567
568 for (i = 0; i < cpuhw->n_limited; ++i) {
569 event = cpuhw->limited_counter[i];
570 if (!event->hw.idx)
571 continue;
572 val = (event->hw.idx == 5) ? pmc5 : pmc6;
573 prev = local64_read(&event->hw.prev_count);
574 event->hw.idx = 0;
575 delta = check_and_compute_delta(prev, val);
576 if (delta)
577 local64_add(delta, &event->count);
578 }
579}
580
581static void thaw_limited_counters(struct cpu_hw_events *cpuhw,
582 unsigned long pmc5, unsigned long pmc6)
583{
584 struct perf_event *event;
585 u64 val, prev;
586 int i;
587
588 for (i = 0; i < cpuhw->n_limited; ++i) {
589 event = cpuhw->limited_counter[i];
590 event->hw.idx = cpuhw->limited_hwidx[i];
591 val = (event->hw.idx == 5) ? pmc5 : pmc6;
592 prev = local64_read(&event->hw.prev_count);
593 if (check_and_compute_delta(prev, val))
594 local64_set(&event->hw.prev_count, val);
595 perf_event_update_userpage(event);
596 }
597}
598
599
600
601
602
603
604
605
606
607
608
609
610static void write_mmcr0(struct cpu_hw_events *cpuhw, unsigned long mmcr0)
611{
612 unsigned long pmc5, pmc6;
613
614 if (!cpuhw->n_limited) {
615 mtspr(SPRN_MMCR0, mmcr0);
616 return;
617 }
618
619
620
621
622
623
624
625
626 asm volatile("mtspr %3,%2; mfspr %0,%4; mfspr %1,%5"
627 : "=&r" (pmc5), "=&r" (pmc6)
628 : "r" (mmcr0 & ~(MMCR0_PMC1CE | MMCR0_PMCjCE)),
629 "i" (SPRN_MMCR0),
630 "i" (SPRN_PMC5), "i" (SPRN_PMC6));
631
632 if (mmcr0 & MMCR0_FC)
633 freeze_limited_counters(cpuhw, pmc5, pmc6);
634 else
635 thaw_limited_counters(cpuhw, pmc5, pmc6);
636
637
638
639
640
641 if (mmcr0 & (MMCR0_PMC1CE | MMCR0_PMCjCE))
642 mtspr(SPRN_MMCR0, mmcr0);
643}
644
645
646
647
648
649static void power_pmu_disable(struct pmu *pmu)
650{
651 struct cpu_hw_events *cpuhw;
652 unsigned long flags;
653
654 if (!ppmu)
655 return;
656 local_irq_save(flags);
657 cpuhw = &__get_cpu_var(cpu_hw_events);
658
659 if (!cpuhw->disabled) {
660 cpuhw->disabled = 1;
661 cpuhw->n_added = 0;
662
663
664
665
666 if (!cpuhw->pmcs_enabled) {
667 ppc_enable_pmcs();
668 cpuhw->pmcs_enabled = 1;
669 }
670
671
672
673
674 if (cpuhw->mmcr[2] & MMCRA_SAMPLE_ENABLE) {
675 mtspr(SPRN_MMCRA,
676 cpuhw->mmcr[2] & ~MMCRA_SAMPLE_ENABLE);
677 mb();
678 }
679
680
681
682
683
684
685
686 write_mmcr0(cpuhw, mfspr(SPRN_MMCR0) | MMCR0_FC);
687 mb();
688 }
689 local_irq_restore(flags);
690}
691
692
693
694
695
696
697static void power_pmu_enable(struct pmu *pmu)
698{
699 struct perf_event *event;
700 struct cpu_hw_events *cpuhw;
701 unsigned long flags;
702 long i;
703 unsigned long val;
704 s64 left;
705 unsigned int hwc_index[MAX_HWEVENTS];
706 int n_lim;
707 int idx;
708
709 if (!ppmu)
710 return;
711 local_irq_save(flags);
712 cpuhw = &__get_cpu_var(cpu_hw_events);
713 if (!cpuhw->disabled) {
714 local_irq_restore(flags);
715 return;
716 }
717 cpuhw->disabled = 0;
718
719
720
721
722
723
724
725 if (!cpuhw->n_added) {
726 mtspr(SPRN_MMCRA, cpuhw->mmcr[2] & ~MMCRA_SAMPLE_ENABLE);
727 mtspr(SPRN_MMCR1, cpuhw->mmcr[1]);
728 if (cpuhw->n_events == 0)
729 ppc_set_pmu_inuse(0);
730 goto out_enable;
731 }
732
733
734
735
736 if (ppmu->compute_mmcr(cpuhw->events, cpuhw->n_events, hwc_index,
737 cpuhw->mmcr)) {
738
739 printk(KERN_ERR "oops compute_mmcr failed\n");
740 goto out;
741 }
742
743
744
745
746
747
748
749 event = cpuhw->event[0];
750 if (event->attr.exclude_user)
751 cpuhw->mmcr[0] |= MMCR0_FCP;
752 if (event->attr.exclude_kernel)
753 cpuhw->mmcr[0] |= freeze_events_kernel;
754 if (event->attr.exclude_hv)
755 cpuhw->mmcr[0] |= MMCR0_FCHV;
756
757
758
759
760
761
762 ppc_set_pmu_inuse(1);
763 mtspr(SPRN_MMCRA, cpuhw->mmcr[2] & ~MMCRA_SAMPLE_ENABLE);
764 mtspr(SPRN_MMCR1, cpuhw->mmcr[1]);
765 mtspr(SPRN_MMCR0, (cpuhw->mmcr[0] & ~(MMCR0_PMC1CE | MMCR0_PMCjCE))
766 | MMCR0_FC);
767
768
769
770
771
772 for (i = 0; i < cpuhw->n_events; ++i) {
773 event = cpuhw->event[i];
774 if (event->hw.idx && event->hw.idx != hwc_index[i] + 1) {
775 power_pmu_read(event);
776 write_pmc(event->hw.idx, 0);
777 event->hw.idx = 0;
778 }
779 }
780
781
782
783
784 cpuhw->n_limited = n_lim = 0;
785 for (i = 0; i < cpuhw->n_events; ++i) {
786 event = cpuhw->event[i];
787 if (event->hw.idx)
788 continue;
789 idx = hwc_index[i] + 1;
790 if (is_limited_pmc(idx)) {
791 cpuhw->limited_counter[n_lim] = event;
792 cpuhw->limited_hwidx[n_lim] = idx;
793 ++n_lim;
794 continue;
795 }
796 val = 0;
797 if (event->hw.sample_period) {
798 left = local64_read(&event->hw.period_left);
799 if (left < 0x80000000L)
800 val = 0x80000000L - left;
801 }
802 local64_set(&event->hw.prev_count, val);
803 event->hw.idx = idx;
804 if (event->hw.state & PERF_HES_STOPPED)
805 val = 0;
806 write_pmc(idx, val);
807 perf_event_update_userpage(event);
808 }
809 cpuhw->n_limited = n_lim;
810 cpuhw->mmcr[0] |= MMCR0_PMXE | MMCR0_FCECE;
811
812 out_enable:
813 mb();
814 write_mmcr0(cpuhw, cpuhw->mmcr[0]);
815
816
817
818
819 if (cpuhw->mmcr[2] & MMCRA_SAMPLE_ENABLE) {
820 mb();
821 mtspr(SPRN_MMCRA, cpuhw->mmcr[2]);
822 }
823
824 out:
825 local_irq_restore(flags);
826}
827
828static int collect_events(struct perf_event *group, int max_count,
829 struct perf_event *ctrs[], u64 *events,
830 unsigned int *flags)
831{
832 int n = 0;
833 struct perf_event *event;
834
835 if (!is_software_event(group)) {
836 if (n >= max_count)
837 return -1;
838 ctrs[n] = group;
839 flags[n] = group->hw.event_base;
840 events[n++] = group->hw.config;
841 }
842 list_for_each_entry(event, &group->sibling_list, group_entry) {
843 if (!is_software_event(event) &&
844 event->state != PERF_EVENT_STATE_OFF) {
845 if (n >= max_count)
846 return -1;
847 ctrs[n] = event;
848 flags[n] = event->hw.event_base;
849 events[n++] = event->hw.config;
850 }
851 }
852 return n;
853}
854
855
856
857
858
859
860
861static int power_pmu_add(struct perf_event *event, int ef_flags)
862{
863 struct cpu_hw_events *cpuhw;
864 unsigned long flags;
865 int n0;
866 int ret = -EAGAIN;
867
868 local_irq_save(flags);
869 perf_pmu_disable(event->pmu);
870
871
872
873
874
875 cpuhw = &__get_cpu_var(cpu_hw_events);
876 n0 = cpuhw->n_events;
877 if (n0 >= ppmu->n_counter)
878 goto out;
879 cpuhw->event[n0] = event;
880 cpuhw->events[n0] = event->hw.config;
881 cpuhw->flags[n0] = event->hw.event_base;
882
883
884
885
886
887
888
889 if (!(ef_flags & PERF_EF_START))
890 event->hw.state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
891 else
892 event->hw.state = 0;
893
894
895
896
897
898
899 if (cpuhw->group_flag & PERF_EVENT_TXN)
900 goto nocheck;
901
902 if (check_excludes(cpuhw->event, cpuhw->flags, n0, 1))
903 goto out;
904 if (power_check_constraints(cpuhw, cpuhw->events, cpuhw->flags, n0 + 1))
905 goto out;
906 event->hw.config = cpuhw->events[n0];
907
908nocheck:
909 ++cpuhw->n_events;
910 ++cpuhw->n_added;
911
912 ret = 0;
913 out:
914 perf_pmu_enable(event->pmu);
915 local_irq_restore(flags);
916 return ret;
917}
918
919
920
921
922static void power_pmu_del(struct perf_event *event, int ef_flags)
923{
924 struct cpu_hw_events *cpuhw;
925 long i;
926 unsigned long flags;
927
928 local_irq_save(flags);
929 perf_pmu_disable(event->pmu);
930
931 power_pmu_read(event);
932
933 cpuhw = &__get_cpu_var(cpu_hw_events);
934 for (i = 0; i < cpuhw->n_events; ++i) {
935 if (event == cpuhw->event[i]) {
936 while (++i < cpuhw->n_events) {
937 cpuhw->event[i-1] = cpuhw->event[i];
938 cpuhw->events[i-1] = cpuhw->events[i];
939 cpuhw->flags[i-1] = cpuhw->flags[i];
940 }
941 --cpuhw->n_events;
942 ppmu->disable_pmc(event->hw.idx - 1, cpuhw->mmcr);
943 if (event->hw.idx) {
944 write_pmc(event->hw.idx, 0);
945 event->hw.idx = 0;
946 }
947 perf_event_update_userpage(event);
948 break;
949 }
950 }
951 for (i = 0; i < cpuhw->n_limited; ++i)
952 if (event == cpuhw->limited_counter[i])
953 break;
954 if (i < cpuhw->n_limited) {
955 while (++i < cpuhw->n_limited) {
956 cpuhw->limited_counter[i-1] = cpuhw->limited_counter[i];
957 cpuhw->limited_hwidx[i-1] = cpuhw->limited_hwidx[i];
958 }
959 --cpuhw->n_limited;
960 }
961 if (cpuhw->n_events == 0) {
962
963 cpuhw->mmcr[0] &= ~(MMCR0_PMXE | MMCR0_FCECE);
964 }
965
966 perf_pmu_enable(event->pmu);
967 local_irq_restore(flags);
968}
969
970
971
972
973
974
975static void power_pmu_start(struct perf_event *event, int ef_flags)
976{
977 unsigned long flags;
978 s64 left;
979 unsigned long val;
980
981 if (!event->hw.idx || !event->hw.sample_period)
982 return;
983
984 if (!(event->hw.state & PERF_HES_STOPPED))
985 return;
986
987 if (ef_flags & PERF_EF_RELOAD)
988 WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE));
989
990 local_irq_save(flags);
991 perf_pmu_disable(event->pmu);
992
993 event->hw.state = 0;
994 left = local64_read(&event->hw.period_left);
995
996 val = 0;
997 if (left < 0x80000000L)
998 val = 0x80000000L - left;
999
1000 write_pmc(event->hw.idx, val);
1001
1002 perf_event_update_userpage(event);
1003 perf_pmu_enable(event->pmu);
1004 local_irq_restore(flags);
1005}
1006
1007static void power_pmu_stop(struct perf_event *event, int ef_flags)
1008{
1009 unsigned long flags;
1010
1011 if (!event->hw.idx || !event->hw.sample_period)
1012 return;
1013
1014 if (event->hw.state & PERF_HES_STOPPED)
1015 return;
1016
1017 local_irq_save(flags);
1018 perf_pmu_disable(event->pmu);
1019
1020 power_pmu_read(event);
1021 event->hw.state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
1022 write_pmc(event->hw.idx, 0);
1023
1024 perf_event_update_userpage(event);
1025 perf_pmu_enable(event->pmu);
1026 local_irq_restore(flags);
1027}
1028
1029
1030
1031
1032
1033
1034void power_pmu_start_txn(struct pmu *pmu)
1035{
1036 struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
1037
1038 perf_pmu_disable(pmu);
1039 cpuhw->group_flag |= PERF_EVENT_TXN;
1040 cpuhw->n_txn_start = cpuhw->n_events;
1041}
1042
1043
1044
1045
1046
1047
1048void power_pmu_cancel_txn(struct pmu *pmu)
1049{
1050 struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
1051
1052 cpuhw->group_flag &= ~PERF_EVENT_TXN;
1053 perf_pmu_enable(pmu);
1054}
1055
1056
1057
1058
1059
1060
1061int power_pmu_commit_txn(struct pmu *pmu)
1062{
1063 struct cpu_hw_events *cpuhw;
1064 long i, n;
1065
1066 if (!ppmu)
1067 return -EAGAIN;
1068 cpuhw = &__get_cpu_var(cpu_hw_events);
1069 n = cpuhw->n_events;
1070 if (check_excludes(cpuhw->event, cpuhw->flags, 0, n))
1071 return -EAGAIN;
1072 i = power_check_constraints(cpuhw, cpuhw->events, cpuhw->flags, n);
1073 if (i < 0)
1074 return -EAGAIN;
1075
1076 for (i = cpuhw->n_txn_start; i < n; ++i)
1077 cpuhw->event[i]->hw.config = cpuhw->events[i];
1078
1079 cpuhw->group_flag &= ~PERF_EVENT_TXN;
1080 perf_pmu_enable(pmu);
1081 return 0;
1082}
1083
1084
1085
1086
1087
1088
1089
1090
1091static int can_go_on_limited_pmc(struct perf_event *event, u64 ev,
1092 unsigned int flags)
1093{
1094 int n;
1095 u64 alt[MAX_EVENT_ALTERNATIVES];
1096
1097 if (event->attr.exclude_user
1098 || event->attr.exclude_kernel
1099 || event->attr.exclude_hv
1100 || event->attr.sample_period)
1101 return 0;
1102
1103 if (ppmu->limited_pmc_event(ev))
1104 return 1;
1105
1106
1107
1108
1109
1110 if (!ppmu->get_alternatives)
1111 return 0;
1112
1113 flags |= PPMU_LIMITED_PMC_OK | PPMU_LIMITED_PMC_REQD;
1114 n = ppmu->get_alternatives(ev, flags, alt);
1115
1116 return n > 0;
1117}
1118
1119
1120
1121
1122
1123
1124static u64 normal_pmc_alternative(u64 ev, unsigned long flags)
1125{
1126 u64 alt[MAX_EVENT_ALTERNATIVES];
1127 int n;
1128
1129 flags &= ~(PPMU_LIMITED_PMC_OK | PPMU_LIMITED_PMC_REQD);
1130 n = ppmu->get_alternatives(ev, flags, alt);
1131 if (!n)
1132 return 0;
1133 return alt[0];
1134}
1135
1136
1137static atomic_t num_events;
1138
1139static DEFINE_MUTEX(pmc_reserve_mutex);
1140
1141
1142
1143
1144static void hw_perf_event_destroy(struct perf_event *event)
1145{
1146 if (!atomic_add_unless(&num_events, -1, 1)) {
1147 mutex_lock(&pmc_reserve_mutex);
1148 if (atomic_dec_return(&num_events) == 0)
1149 release_pmc_hardware();
1150 mutex_unlock(&pmc_reserve_mutex);
1151 }
1152}
1153
1154
1155
1156
1157static int hw_perf_cache_event(u64 config, u64 *eventp)
1158{
1159 unsigned long type, op, result;
1160 int ev;
1161
1162 if (!ppmu->cache_events)
1163 return -EINVAL;
1164
1165
1166 type = config & 0xff;
1167 op = (config >> 8) & 0xff;
1168 result = (config >> 16) & 0xff;
1169
1170 if (type >= PERF_COUNT_HW_CACHE_MAX ||
1171 op >= PERF_COUNT_HW_CACHE_OP_MAX ||
1172 result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
1173 return -EINVAL;
1174
1175 ev = (*ppmu->cache_events)[type][op][result];
1176 if (ev == 0)
1177 return -EOPNOTSUPP;
1178 if (ev == -1)
1179 return -EINVAL;
1180 *eventp = ev;
1181 return 0;
1182}
1183
1184static int power_pmu_event_init(struct perf_event *event)
1185{
1186 u64 ev;
1187 unsigned long flags;
1188 struct perf_event *ctrs[MAX_HWEVENTS];
1189 u64 events[MAX_HWEVENTS];
1190 unsigned int cflags[MAX_HWEVENTS];
1191 int n;
1192 int err;
1193 struct cpu_hw_events *cpuhw;
1194
1195 if (!ppmu)
1196 return -ENOENT;
1197
1198
1199 if (has_branch_stack(event))
1200 return -EOPNOTSUPP;
1201
1202 switch (event->attr.type) {
1203 case PERF_TYPE_HARDWARE:
1204 ev = event->attr.config;
1205 if (ev >= ppmu->n_generic || ppmu->generic_events[ev] == 0)
1206 return -EOPNOTSUPP;
1207 ev = ppmu->generic_events[ev];
1208 break;
1209 case PERF_TYPE_HW_CACHE:
1210 err = hw_perf_cache_event(event->attr.config, &ev);
1211 if (err)
1212 return err;
1213 break;
1214 case PERF_TYPE_RAW:
1215 ev = event->attr.config;
1216 break;
1217 default:
1218 return -ENOENT;
1219 }
1220
1221 event->hw.config_base = ev;
1222 event->hw.idx = 0;
1223
1224
1225
1226
1227
1228
1229 if (!firmware_has_feature(FW_FEATURE_LPAR))
1230 event->attr.exclude_hv = 0;
1231
1232
1233
1234
1235
1236
1237
1238 flags = 0;
1239 if (event->attach_state & PERF_ATTACH_TASK)
1240 flags |= PPMU_ONLY_COUNT_RUN;
1241
1242
1243
1244
1245
1246 if (ppmu->flags & PPMU_LIMITED_PMC5_6) {
1247 if (can_go_on_limited_pmc(event, ev, flags)) {
1248 flags |= PPMU_LIMITED_PMC_OK;
1249 } else if (ppmu->limited_pmc_event(ev)) {
1250
1251
1252
1253
1254
1255 ev = normal_pmc_alternative(ev, flags);
1256 if (!ev)
1257 return -EINVAL;
1258 }
1259 }
1260
1261
1262
1263
1264
1265
1266 n = 0;
1267 if (event->group_leader != event) {
1268 n = collect_events(event->group_leader, ppmu->n_counter - 1,
1269 ctrs, events, cflags);
1270 if (n < 0)
1271 return -EINVAL;
1272 }
1273 events[n] = ev;
1274 ctrs[n] = event;
1275 cflags[n] = flags;
1276 if (check_excludes(ctrs, cflags, n, 1))
1277 return -EINVAL;
1278
1279 cpuhw = &get_cpu_var(cpu_hw_events);
1280 err = power_check_constraints(cpuhw, events, cflags, n + 1);
1281 put_cpu_var(cpu_hw_events);
1282 if (err)
1283 return -EINVAL;
1284
1285 event->hw.config = events[n];
1286 event->hw.event_base = cflags[n];
1287 event->hw.last_period = event->hw.sample_period;
1288 local64_set(&event->hw.period_left, event->hw.last_period);
1289
1290
1291
1292
1293
1294
1295
1296 err = 0;
1297 if (!atomic_inc_not_zero(&num_events)) {
1298 mutex_lock(&pmc_reserve_mutex);
1299 if (atomic_read(&num_events) == 0 &&
1300 reserve_pmc_hardware(perf_event_interrupt))
1301 err = -EBUSY;
1302 else
1303 atomic_inc(&num_events);
1304 mutex_unlock(&pmc_reserve_mutex);
1305 }
1306 event->destroy = hw_perf_event_destroy;
1307
1308 return err;
1309}
1310
1311static int power_pmu_event_idx(struct perf_event *event)
1312{
1313 return event->hw.idx;
1314}
1315
1316ssize_t power_events_sysfs_show(struct device *dev,
1317 struct device_attribute *attr, char *page)
1318{
1319 struct perf_pmu_events_attr *pmu_attr;
1320
1321 pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr);
1322
1323 return sprintf(page, "event=0x%02llx\n", pmu_attr->id);
1324}
1325
1326struct pmu power_pmu = {
1327 .pmu_enable = power_pmu_enable,
1328 .pmu_disable = power_pmu_disable,
1329 .event_init = power_pmu_event_init,
1330 .add = power_pmu_add,
1331 .del = power_pmu_del,
1332 .start = power_pmu_start,
1333 .stop = power_pmu_stop,
1334 .read = power_pmu_read,
1335 .start_txn = power_pmu_start_txn,
1336 .cancel_txn = power_pmu_cancel_txn,
1337 .commit_txn = power_pmu_commit_txn,
1338 .event_idx = power_pmu_event_idx,
1339};
1340
1341
1342
1343
1344
1345
1346
1347static void record_and_restart(struct perf_event *event, unsigned long val,
1348 struct pt_regs *regs)
1349{
1350 u64 period = event->hw.sample_period;
1351 s64 prev, delta, left;
1352 int record = 0;
1353
1354 if (event->hw.state & PERF_HES_STOPPED) {
1355 write_pmc(event->hw.idx, 0);
1356 return;
1357 }
1358
1359
1360 prev = local64_read(&event->hw.prev_count);
1361 delta = check_and_compute_delta(prev, val);
1362 local64_add(delta, &event->count);
1363
1364
1365
1366
1367
1368 val = 0;
1369 left = local64_read(&event->hw.period_left) - delta;
1370 if (delta == 0)
1371 left++;
1372 if (period) {
1373 if (left <= 0) {
1374 left += period;
1375 if (left <= 0)
1376 left = period;
1377 record = siar_valid(regs);
1378 event->hw.last_period = event->hw.sample_period;
1379 }
1380 if (left < 0x80000000LL)
1381 val = 0x80000000LL - left;
1382 }
1383
1384 write_pmc(event->hw.idx, val);
1385 local64_set(&event->hw.prev_count, val);
1386 local64_set(&event->hw.period_left, left);
1387 perf_event_update_userpage(event);
1388
1389
1390
1391
1392 if (record) {
1393 struct perf_sample_data data;
1394
1395 perf_sample_data_init(&data, ~0ULL, event->hw.last_period);
1396
1397 if (event->attr.sample_type & PERF_SAMPLE_ADDR)
1398 perf_get_data_addr(regs, &data.addr);
1399
1400 if (perf_event_overflow(event, &data, regs))
1401 power_pmu_stop(event, 0);
1402 }
1403}
1404
1405
1406
1407
1408
1409unsigned long perf_misc_flags(struct pt_regs *regs)
1410{
1411 u32 flags = perf_get_misc_flags(regs);
1412
1413 if (flags)
1414 return flags;
1415 return user_mode(regs) ? PERF_RECORD_MISC_USER :
1416 PERF_RECORD_MISC_KERNEL;
1417}
1418
1419
1420
1421
1422
1423unsigned long perf_instruction_pointer(struct pt_regs *regs)
1424{
1425 unsigned long use_siar = regs->result;
1426
1427 if (use_siar && siar_valid(regs))
1428 return mfspr(SPRN_SIAR) + perf_ip_adjust(regs);
1429 else if (use_siar)
1430 return 0;
1431 else
1432 return regs->nip;
1433}
1434
1435static bool pmc_overflow_power7(unsigned long val)
1436{
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448 if ((0x80000000 - val) <= 256)
1449 return true;
1450
1451 return false;
1452}
1453
1454static bool pmc_overflow(unsigned long val)
1455{
1456 if ((int)val < 0)
1457 return true;
1458
1459 return false;
1460}
1461
1462
1463
1464
1465static void perf_event_interrupt(struct pt_regs *regs)
1466{
1467 int i, j;
1468 struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
1469 struct perf_event *event;
1470 unsigned long val[8];
1471 int found, active;
1472 int nmi;
1473
1474 if (cpuhw->n_limited)
1475 freeze_limited_counters(cpuhw, mfspr(SPRN_PMC5),
1476 mfspr(SPRN_PMC6));
1477
1478 perf_read_regs(regs);
1479
1480 nmi = perf_intr_is_nmi(regs);
1481 if (nmi)
1482 nmi_enter();
1483 else
1484 irq_enter();
1485
1486
1487 for (i = 0; i < ppmu->n_counter; ++i)
1488 val[i] = read_pmc(i + 1);
1489
1490
1491 found = 0;
1492 for (i = 0; i < ppmu->n_counter; ++i) {
1493 if (!pmc_overflow(val[i]))
1494 continue;
1495 if (is_limited_pmc(i + 1))
1496 continue;
1497
1498
1499
1500
1501
1502 found = 1;
1503 active = 0;
1504 for (j = 0; j < cpuhw->n_events; ++j) {
1505 event = cpuhw->event[j];
1506 if (event->hw.idx == (i + 1)) {
1507 active = 1;
1508 record_and_restart(event, val[i], regs);
1509 break;
1510 }
1511 }
1512 if (!active)
1513
1514 write_pmc(i + 1, 0);
1515 }
1516 if (!found && pvr_version_is(PVR_POWER7)) {
1517
1518 for (i = 0; i < cpuhw->n_events; ++i) {
1519 event = cpuhw->event[i];
1520 if (!event->hw.idx || is_limited_pmc(event->hw.idx))
1521 continue;
1522 if (pmc_overflow_power7(val[event->hw.idx - 1])) {
1523
1524 found = 1;
1525 record_and_restart(event,
1526 val[event->hw.idx - 1],
1527 regs);
1528 }
1529 }
1530 }
1531 if ((!found) && printk_ratelimit())
1532 printk(KERN_WARNING "Can't find PMC that caused IRQ\n");
1533
1534
1535
1536
1537
1538
1539
1540
1541 write_mmcr0(cpuhw, cpuhw->mmcr[0]);
1542
1543 if (nmi)
1544 nmi_exit();
1545 else
1546 irq_exit();
1547}
1548
1549static void power_pmu_setup(int cpu)
1550{
1551 struct cpu_hw_events *cpuhw = &per_cpu(cpu_hw_events, cpu);
1552
1553 if (!ppmu)
1554 return;
1555 memset(cpuhw, 0, sizeof(*cpuhw));
1556 cpuhw->mmcr[0] = MMCR0_FC;
1557}
1558
1559static int __cpuinit
1560power_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu)
1561{
1562 unsigned int cpu = (long)hcpu;
1563
1564 switch (action & ~CPU_TASKS_FROZEN) {
1565 case CPU_UP_PREPARE:
1566 power_pmu_setup(cpu);
1567 break;
1568
1569 default:
1570 break;
1571 }
1572
1573 return NOTIFY_OK;
1574}
1575
1576int __cpuinit register_power_pmu(struct power_pmu *pmu)
1577{
1578 if (ppmu)
1579 return -EBUSY;
1580
1581 ppmu = pmu;
1582 pr_info("%s performance monitor hardware support registered\n",
1583 pmu->name);
1584
1585 power_pmu.attr_groups = ppmu->attr_groups;
1586
1587#ifdef MSR_HV
1588
1589
1590
1591 if (mfmsr() & MSR_HV)
1592 freeze_events_kernel = MMCR0_FCHV;
1593#endif
1594
1595 perf_pmu_register(&power_pmu, "cpu", PERF_TYPE_RAW);
1596 perf_cpu_notifier(power_pmu_notifier);
1597
1598 return 0;
1599}
1600