1
2
3
4
5
6
7
8
9
10
11#include <linux/kernel.h>
12#include <linux/sched.h>
13#include <linux/perf_event.h>
14#include <linux/percpu.h>
15#include <linux/hardirq.h>
16#include <linux/uaccess.h>
17#include <asm/reg.h>
18#include <asm/pmc.h>
19#include <asm/machdep.h>
20#include <asm/firmware.h>
21#include <asm/ptrace.h>
22#include <asm/code-patching.h>
23
24#define BHRB_MAX_ENTRIES 32
25#define BHRB_TARGET 0x0000000000000002
26#define BHRB_PREDICTION 0x0000000000000001
27#define BHRB_EA 0xFFFFFFFFFFFFFFFC
28
29struct cpu_hw_events {
30 int n_events;
31 int n_percpu;
32 int disabled;
33 int n_added;
34 int n_limited;
35 u8 pmcs_enabled;
36 struct perf_event *event[MAX_HWEVENTS];
37 u64 events[MAX_HWEVENTS];
38 unsigned int flags[MAX_HWEVENTS];
39 unsigned long mmcr[3];
40 struct perf_event *limited_counter[MAX_LIMITED_HWCOUNTERS];
41 u8 limited_hwidx[MAX_LIMITED_HWCOUNTERS];
42 u64 alternatives[MAX_HWEVENTS][MAX_EVENT_ALTERNATIVES];
43 unsigned long amasks[MAX_HWEVENTS][MAX_EVENT_ALTERNATIVES];
44 unsigned long avalues[MAX_HWEVENTS][MAX_EVENT_ALTERNATIVES];
45
46 unsigned int group_flag;
47 int n_txn_start;
48
49
50 u64 bhrb_filter;
51 int bhrb_users;
52 void *bhrb_context;
53 struct perf_branch_stack bhrb_stack;
54 struct perf_branch_entry bhrb_entries[BHRB_MAX_ENTRIES];
55};
56
57DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events);
58
59struct power_pmu *ppmu;
60
61
62
63
64
65
66
67
68static unsigned int freeze_events_kernel = MMCR0_FCS;
69
70
71
72
73
74#ifdef CONFIG_PPC32
75
76#define MMCR0_FCHV 0
77#define MMCR0_PMCjCE MMCR0_PMCnCE
78
79#define SPRN_MMCRA SPRN_MMCR2
80#define MMCRA_SAMPLE_ENABLE 0
81
82static inline unsigned long perf_ip_adjust(struct pt_regs *regs)
83{
84 return 0;
85}
86static inline void perf_get_data_addr(struct pt_regs *regs, u64 *addrp) { }
87static inline u32 perf_get_misc_flags(struct pt_regs *regs)
88{
89 return 0;
90}
91static inline void perf_read_regs(struct pt_regs *regs)
92{
93 regs->result = 0;
94}
95static inline int perf_intr_is_nmi(struct pt_regs *regs)
96{
97 return 0;
98}
99
100static inline int siar_valid(struct pt_regs *regs)
101{
102 return 1;
103}
104
105static inline void power_pmu_bhrb_enable(struct perf_event *event) {}
106static inline void power_pmu_bhrb_disable(struct perf_event *event) {}
107void power_pmu_flush_branch_stack(void) {}
108static inline void power_pmu_bhrb_read(struct cpu_hw_events *cpuhw) {}
109#endif
110
111static bool regs_use_siar(struct pt_regs *regs)
112{
113 return !!regs->result;
114}
115
116
117
118
119#ifdef CONFIG_PPC64
120
121static inline unsigned long perf_ip_adjust(struct pt_regs *regs)
122{
123 unsigned long mmcra = regs->dsisr;
124
125 if ((ppmu->flags & PPMU_HAS_SSLOT) && (mmcra & MMCRA_SAMPLE_ENABLE)) {
126 unsigned long slot = (mmcra & MMCRA_SLOT) >> MMCRA_SLOT_SHIFT;
127 if (slot > 1)
128 return 4 * (slot - 1);
129 }
130
131 return 0;
132}
133
134
135
136
137
138
139
140
141
142static inline void perf_get_data_addr(struct pt_regs *regs, u64 *addrp)
143{
144 unsigned long mmcra = regs->dsisr;
145 bool sdar_valid;
146
147 if (ppmu->flags & PPMU_HAS_SIER)
148 sdar_valid = regs->dar & SIER_SDAR_VALID;
149 else {
150 unsigned long sdsync;
151
152 if (ppmu->flags & PPMU_SIAR_VALID)
153 sdsync = POWER7P_MMCRA_SDAR_VALID;
154 else if (ppmu->flags & PPMU_ALT_SIPR)
155 sdsync = POWER6_MMCRA_SDSYNC;
156 else
157 sdsync = MMCRA_SDSYNC;
158
159 sdar_valid = mmcra & sdsync;
160 }
161
162 if (!(mmcra & MMCRA_SAMPLE_ENABLE) || sdar_valid)
163 *addrp = mfspr(SPRN_SDAR);
164}
165
166static bool regs_sihv(struct pt_regs *regs)
167{
168 unsigned long sihv = MMCRA_SIHV;
169
170 if (ppmu->flags & PPMU_HAS_SIER)
171 return !!(regs->dar & SIER_SIHV);
172
173 if (ppmu->flags & PPMU_ALT_SIPR)
174 sihv = POWER6_MMCRA_SIHV;
175
176 return !!(regs->dsisr & sihv);
177}
178
179static bool regs_sipr(struct pt_regs *regs)
180{
181 unsigned long sipr = MMCRA_SIPR;
182
183 if (ppmu->flags & PPMU_HAS_SIER)
184 return !!(regs->dar & SIER_SIPR);
185
186 if (ppmu->flags & PPMU_ALT_SIPR)
187 sipr = POWER6_MMCRA_SIPR;
188
189 return !!(regs->dsisr & sipr);
190}
191
192static inline u32 perf_flags_from_msr(struct pt_regs *regs)
193{
194 if (regs->msr & MSR_PR)
195 return PERF_RECORD_MISC_USER;
196 if ((regs->msr & MSR_HV) && freeze_events_kernel != MMCR0_FCHV)
197 return PERF_RECORD_MISC_HYPERVISOR;
198 return PERF_RECORD_MISC_KERNEL;
199}
200
201static inline u32 perf_get_misc_flags(struct pt_regs *regs)
202{
203 bool use_siar = regs_use_siar(regs);
204
205 if (!use_siar)
206 return perf_flags_from_msr(regs);
207
208
209
210
211
212
213
214 if (ppmu->flags & PPMU_NO_SIPR) {
215 unsigned long siar = mfspr(SPRN_SIAR);
216 if (siar >= PAGE_OFFSET)
217 return PERF_RECORD_MISC_KERNEL;
218 return PERF_RECORD_MISC_USER;
219 }
220
221
222 if (regs_sipr(regs))
223 return PERF_RECORD_MISC_USER;
224
225 if (regs_sihv(regs) && (freeze_events_kernel != MMCR0_FCHV))
226 return PERF_RECORD_MISC_HYPERVISOR;
227
228 return PERF_RECORD_MISC_KERNEL;
229}
230
231
232
233
234
235
236
237
238static inline void perf_read_regs(struct pt_regs *regs)
239{
240 unsigned long mmcra = mfspr(SPRN_MMCRA);
241 int marked = mmcra & MMCRA_SAMPLE_ENABLE;
242 int use_siar;
243
244 regs->dsisr = mmcra;
245
246 if (ppmu->flags & PPMU_HAS_SIER)
247 regs->dar = mfspr(SPRN_SIER);
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266 if (TRAP(regs) != 0xf00)
267 use_siar = 0;
268 else if (marked)
269 use_siar = 1;
270 else if ((ppmu->flags & PPMU_NO_CONT_SAMPLING))
271 use_siar = 0;
272 else if (!(ppmu->flags & PPMU_NO_SIPR) && regs_sipr(regs))
273 use_siar = 0;
274 else
275 use_siar = 1;
276
277 regs->result = use_siar;
278}
279
280
281
282
283
284static inline int perf_intr_is_nmi(struct pt_regs *regs)
285{
286 return !regs->softe;
287}
288
289
290
291
292
293
294
295
296static inline int siar_valid(struct pt_regs *regs)
297{
298 unsigned long mmcra = regs->dsisr;
299 int marked = mmcra & MMCRA_SAMPLE_ENABLE;
300
301 if (marked) {
302 if (ppmu->flags & PPMU_HAS_SIER)
303 return regs->dar & SIER_SIAR_VALID;
304
305 if (ppmu->flags & PPMU_SIAR_VALID)
306 return mmcra & POWER7P_MMCRA_SIAR_VALID;
307 }
308
309 return 1;
310}
311
312
313
314static void power_pmu_bhrb_reset(void)
315{
316 asm volatile(PPC_CLRBHRB);
317}
318
319static void power_pmu_bhrb_enable(struct perf_event *event)
320{
321 struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
322
323 if (!ppmu->bhrb_nr)
324 return;
325
326
327 if (event->ctx->task && cpuhw->bhrb_context != event->ctx) {
328 power_pmu_bhrb_reset();
329 cpuhw->bhrb_context = event->ctx;
330 }
331 cpuhw->bhrb_users++;
332}
333
334static void power_pmu_bhrb_disable(struct perf_event *event)
335{
336 struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
337
338 if (!ppmu->bhrb_nr)
339 return;
340
341 cpuhw->bhrb_users--;
342 WARN_ON_ONCE(cpuhw->bhrb_users < 0);
343
344 if (!cpuhw->disabled && !cpuhw->bhrb_users) {
345
346
347
348
349
350 cpuhw->bhrb_context = NULL;
351 }
352}
353
354
355
356
357void power_pmu_flush_branch_stack(void)
358{
359 if (ppmu->bhrb_nr)
360 power_pmu_bhrb_reset();
361}
362
363static __u64 power_pmu_bhrb_to(u64 addr)
364{
365 unsigned int instr;
366 int ret;
367 __u64 target;
368
369 if (is_kernel_addr(addr))
370 return branch_target((unsigned int *)addr);
371
372
373 pagefault_disable();
374 ret = __get_user_inatomic(instr, (unsigned int __user *)addr);
375 if (ret) {
376 pagefault_enable();
377 return 0;
378 }
379 pagefault_enable();
380
381 target = branch_target(&instr);
382 if ((!target) || (instr & BRANCH_ABSOLUTE))
383 return target;
384
385
386 return target - (unsigned long)&instr + addr;
387}
388
389
390void power_pmu_bhrb_read(struct cpu_hw_events *cpuhw)
391{
392 u64 val;
393 u64 addr;
394 int r_index, u_index, pred;
395
396 r_index = 0;
397 u_index = 0;
398 while (r_index < ppmu->bhrb_nr) {
399
400 val = read_bhrb(r_index++);
401 if (!val)
402
403 break;
404 else {
405 addr = val & BHRB_EA;
406 pred = val & BHRB_PREDICTION;
407
408 if (!addr)
409
410 continue;
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430 if (val & BHRB_TARGET) {
431
432
433
434 cpuhw->bhrb_entries[u_index].to = addr;
435 cpuhw->bhrb_entries[u_index].mispred = pred;
436 cpuhw->bhrb_entries[u_index].predicted = ~pred;
437
438
439 val = read_bhrb(r_index++);
440 addr = val & BHRB_EA;
441 if (val & BHRB_TARGET) {
442
443
444 r_index--;
445 addr = 0;
446 }
447 cpuhw->bhrb_entries[u_index].from = addr;
448 } else {
449
450
451 cpuhw->bhrb_entries[u_index].from = addr;
452 cpuhw->bhrb_entries[u_index].to =
453 power_pmu_bhrb_to(addr);
454 cpuhw->bhrb_entries[u_index].mispred = pred;
455 cpuhw->bhrb_entries[u_index].predicted = ~pred;
456 }
457 u_index++;
458
459 }
460 }
461 cpuhw->bhrb_stack.nr = u_index;
462 return;
463}
464
465#endif
466
467static void perf_event_interrupt(struct pt_regs *regs);
468
469void perf_event_print_debug(void)
470{
471}
472
473
474
475
476static unsigned long read_pmc(int idx)
477{
478 unsigned long val;
479
480 switch (idx) {
481 case 1:
482 val = mfspr(SPRN_PMC1);
483 break;
484 case 2:
485 val = mfspr(SPRN_PMC2);
486 break;
487 case 3:
488 val = mfspr(SPRN_PMC3);
489 break;
490 case 4:
491 val = mfspr(SPRN_PMC4);
492 break;
493 case 5:
494 val = mfspr(SPRN_PMC5);
495 break;
496 case 6:
497 val = mfspr(SPRN_PMC6);
498 break;
499#ifdef CONFIG_PPC64
500 case 7:
501 val = mfspr(SPRN_PMC7);
502 break;
503 case 8:
504 val = mfspr(SPRN_PMC8);
505 break;
506#endif
507 default:
508 printk(KERN_ERR "oops trying to read PMC%d\n", idx);
509 val = 0;
510 }
511 return val;
512}
513
514
515
516
517static void write_pmc(int idx, unsigned long val)
518{
519 switch (idx) {
520 case 1:
521 mtspr(SPRN_PMC1, val);
522 break;
523 case 2:
524 mtspr(SPRN_PMC2, val);
525 break;
526 case 3:
527 mtspr(SPRN_PMC3, val);
528 break;
529 case 4:
530 mtspr(SPRN_PMC4, val);
531 break;
532 case 5:
533 mtspr(SPRN_PMC5, val);
534 break;
535 case 6:
536 mtspr(SPRN_PMC6, val);
537 break;
538#ifdef CONFIG_PPC64
539 case 7:
540 mtspr(SPRN_PMC7, val);
541 break;
542 case 8:
543 mtspr(SPRN_PMC8, val);
544 break;
545#endif
546 default:
547 printk(KERN_ERR "oops trying to write PMC%d\n", idx);
548 }
549}
550
551
552
553
554
555
556
557static int power_check_constraints(struct cpu_hw_events *cpuhw,
558 u64 event_id[], unsigned int cflags[],
559 int n_ev)
560{
561 unsigned long mask, value, nv;
562 unsigned long smasks[MAX_HWEVENTS], svalues[MAX_HWEVENTS];
563 int n_alt[MAX_HWEVENTS], choice[MAX_HWEVENTS];
564 int i, j;
565 unsigned long addf = ppmu->add_fields;
566 unsigned long tadd = ppmu->test_adder;
567
568 if (n_ev > ppmu->n_counter)
569 return -1;
570
571
572 for (i = 0; i < n_ev; ++i) {
573 if ((cflags[i] & PPMU_LIMITED_PMC_REQD)
574 && !ppmu->limited_pmc_event(event_id[i])) {
575 ppmu->get_alternatives(event_id[i], cflags[i],
576 cpuhw->alternatives[i]);
577 event_id[i] = cpuhw->alternatives[i][0];
578 }
579 if (ppmu->get_constraint(event_id[i], &cpuhw->amasks[i][0],
580 &cpuhw->avalues[i][0]))
581 return -1;
582 }
583 value = mask = 0;
584 for (i = 0; i < n_ev; ++i) {
585 nv = (value | cpuhw->avalues[i][0]) +
586 (value & cpuhw->avalues[i][0] & addf);
587 if ((((nv + tadd) ^ value) & mask) != 0 ||
588 (((nv + tadd) ^ cpuhw->avalues[i][0]) &
589 cpuhw->amasks[i][0]) != 0)
590 break;
591 value = nv;
592 mask |= cpuhw->amasks[i][0];
593 }
594 if (i == n_ev)
595 return 0;
596
597
598 if (!ppmu->get_alternatives)
599 return -1;
600 for (i = 0; i < n_ev; ++i) {
601 choice[i] = 0;
602 n_alt[i] = ppmu->get_alternatives(event_id[i], cflags[i],
603 cpuhw->alternatives[i]);
604 for (j = 1; j < n_alt[i]; ++j)
605 ppmu->get_constraint(cpuhw->alternatives[i][j],
606 &cpuhw->amasks[i][j],
607 &cpuhw->avalues[i][j]);
608 }
609
610
611 i = 0;
612 j = -1;
613 value = mask = nv = 0;
614 while (i < n_ev) {
615 if (j >= 0) {
616
617 value = svalues[i];
618 mask = smasks[i];
619 j = choice[i];
620 }
621
622
623
624
625 while (++j < n_alt[i]) {
626 nv = (value | cpuhw->avalues[i][j]) +
627 (value & cpuhw->avalues[i][j] & addf);
628 if ((((nv + tadd) ^ value) & mask) == 0 &&
629 (((nv + tadd) ^ cpuhw->avalues[i][j])
630 & cpuhw->amasks[i][j]) == 0)
631 break;
632 }
633 if (j >= n_alt[i]) {
634
635
636
637
638
639 if (--i < 0)
640 return -1;
641 } else {
642
643
644
645
646
647
648 choice[i] = j;
649 svalues[i] = value;
650 smasks[i] = mask;
651 value = nv;
652 mask |= cpuhw->amasks[i][j];
653 ++i;
654 j = -1;
655 }
656 }
657
658
659 for (i = 0; i < n_ev; ++i)
660 event_id[i] = cpuhw->alternatives[i][choice[i]];
661 return 0;
662}
663
664
665
666
667
668
669static int check_excludes(struct perf_event **ctrs, unsigned int cflags[],
670 int n_prev, int n_new)
671{
672 int eu = 0, ek = 0, eh = 0;
673 int i, n, first;
674 struct perf_event *event;
675
676 n = n_prev + n_new;
677 if (n <= 1)
678 return 0;
679
680 first = 1;
681 for (i = 0; i < n; ++i) {
682 if (cflags[i] & PPMU_LIMITED_PMC_OK) {
683 cflags[i] &= ~PPMU_LIMITED_PMC_REQD;
684 continue;
685 }
686 event = ctrs[i];
687 if (first) {
688 eu = event->attr.exclude_user;
689 ek = event->attr.exclude_kernel;
690 eh = event->attr.exclude_hv;
691 first = 0;
692 } else if (event->attr.exclude_user != eu ||
693 event->attr.exclude_kernel != ek ||
694 event->attr.exclude_hv != eh) {
695 return -EAGAIN;
696 }
697 }
698
699 if (eu || ek || eh)
700 for (i = 0; i < n; ++i)
701 if (cflags[i] & PPMU_LIMITED_PMC_OK)
702 cflags[i] |= PPMU_LIMITED_PMC_REQD;
703
704 return 0;
705}
706
707static u64 check_and_compute_delta(u64 prev, u64 val)
708{
709 u64 delta = (val - prev) & 0xfffffffful;
710
711
712
713
714
715
716
717
718
719
720 if (prev > val && (prev - val) < 256)
721 delta = 0;
722
723 return delta;
724}
725
726static void power_pmu_read(struct perf_event *event)
727{
728 s64 val, delta, prev;
729
730 if (event->hw.state & PERF_HES_STOPPED)
731 return;
732
733 if (!event->hw.idx)
734 return;
735
736
737
738
739
740 do {
741 prev = local64_read(&event->hw.prev_count);
742 barrier();
743 val = read_pmc(event->hw.idx);
744 delta = check_and_compute_delta(prev, val);
745 if (!delta)
746 return;
747 } while (local64_cmpxchg(&event->hw.prev_count, prev, val) != prev);
748
749 local64_add(delta, &event->count);
750 local64_sub(delta, &event->hw.period_left);
751}
752
753
754
755
756
757
758static int is_limited_pmc(int pmcnum)
759{
760 return (ppmu->flags & PPMU_LIMITED_PMC5_6)
761 && (pmcnum == 5 || pmcnum == 6);
762}
763
764static void freeze_limited_counters(struct cpu_hw_events *cpuhw,
765 unsigned long pmc5, unsigned long pmc6)
766{
767 struct perf_event *event;
768 u64 val, prev, delta;
769 int i;
770
771 for (i = 0; i < cpuhw->n_limited; ++i) {
772 event = cpuhw->limited_counter[i];
773 if (!event->hw.idx)
774 continue;
775 val = (event->hw.idx == 5) ? pmc5 : pmc6;
776 prev = local64_read(&event->hw.prev_count);
777 event->hw.idx = 0;
778 delta = check_and_compute_delta(prev, val);
779 if (delta)
780 local64_add(delta, &event->count);
781 }
782}
783
784static void thaw_limited_counters(struct cpu_hw_events *cpuhw,
785 unsigned long pmc5, unsigned long pmc6)
786{
787 struct perf_event *event;
788 u64 val, prev;
789 int i;
790
791 for (i = 0; i < cpuhw->n_limited; ++i) {
792 event = cpuhw->limited_counter[i];
793 event->hw.idx = cpuhw->limited_hwidx[i];
794 val = (event->hw.idx == 5) ? pmc5 : pmc6;
795 prev = local64_read(&event->hw.prev_count);
796 if (check_and_compute_delta(prev, val))
797 local64_set(&event->hw.prev_count, val);
798 perf_event_update_userpage(event);
799 }
800}
801
802
803
804
805
806
807
808
809
810
811
812
813static void write_mmcr0(struct cpu_hw_events *cpuhw, unsigned long mmcr0)
814{
815 unsigned long pmc5, pmc6;
816
817 if (!cpuhw->n_limited) {
818 mtspr(SPRN_MMCR0, mmcr0);
819 return;
820 }
821
822
823
824
825
826
827
828
829 asm volatile("mtspr %3,%2; mfspr %0,%4; mfspr %1,%5"
830 : "=&r" (pmc5), "=&r" (pmc6)
831 : "r" (mmcr0 & ~(MMCR0_PMC1CE | MMCR0_PMCjCE)),
832 "i" (SPRN_MMCR0),
833 "i" (SPRN_PMC5), "i" (SPRN_PMC6));
834
835 if (mmcr0 & MMCR0_FC)
836 freeze_limited_counters(cpuhw, pmc5, pmc6);
837 else
838 thaw_limited_counters(cpuhw, pmc5, pmc6);
839
840
841
842
843
844 if (mmcr0 & (MMCR0_PMC1CE | MMCR0_PMCjCE))
845 mtspr(SPRN_MMCR0, mmcr0);
846}
847
848
849
850
851
852static void power_pmu_disable(struct pmu *pmu)
853{
854 struct cpu_hw_events *cpuhw;
855 unsigned long flags;
856
857 if (!ppmu)
858 return;
859 local_irq_save(flags);
860 cpuhw = &__get_cpu_var(cpu_hw_events);
861
862 if (!cpuhw->disabled) {
863 cpuhw->disabled = 1;
864 cpuhw->n_added = 0;
865
866
867
868
869 if (!cpuhw->pmcs_enabled) {
870 ppc_enable_pmcs();
871 cpuhw->pmcs_enabled = 1;
872 }
873
874
875
876
877 if (cpuhw->mmcr[2] & MMCRA_SAMPLE_ENABLE) {
878 mtspr(SPRN_MMCRA,
879 cpuhw->mmcr[2] & ~MMCRA_SAMPLE_ENABLE);
880 mb();
881 }
882
883
884
885
886
887
888
889 write_mmcr0(cpuhw, mfspr(SPRN_MMCR0) | MMCR0_FC);
890 mb();
891 }
892 local_irq_restore(flags);
893}
894
895
896
897
898
899
900static void power_pmu_enable(struct pmu *pmu)
901{
902 struct perf_event *event;
903 struct cpu_hw_events *cpuhw;
904 unsigned long flags;
905 long i;
906 unsigned long val;
907 s64 left;
908 unsigned int hwc_index[MAX_HWEVENTS];
909 int n_lim;
910 int idx;
911
912 if (!ppmu)
913 return;
914 local_irq_save(flags);
915 cpuhw = &__get_cpu_var(cpu_hw_events);
916 if (!cpuhw->disabled) {
917 local_irq_restore(flags);
918 return;
919 }
920 cpuhw->disabled = 0;
921
922
923
924
925
926
927
928 if (!cpuhw->n_added) {
929 mtspr(SPRN_MMCRA, cpuhw->mmcr[2] & ~MMCRA_SAMPLE_ENABLE);
930 mtspr(SPRN_MMCR1, cpuhw->mmcr[1]);
931 if (cpuhw->n_events == 0)
932 ppc_set_pmu_inuse(0);
933 goto out_enable;
934 }
935
936
937
938
939 if (ppmu->compute_mmcr(cpuhw->events, cpuhw->n_events, hwc_index,
940 cpuhw->mmcr)) {
941
942 printk(KERN_ERR "oops compute_mmcr failed\n");
943 goto out;
944 }
945
946
947
948
949
950
951
952 event = cpuhw->event[0];
953 if (event->attr.exclude_user)
954 cpuhw->mmcr[0] |= MMCR0_FCP;
955 if (event->attr.exclude_kernel)
956 cpuhw->mmcr[0] |= freeze_events_kernel;
957 if (event->attr.exclude_hv)
958 cpuhw->mmcr[0] |= MMCR0_FCHV;
959
960
961
962
963
964
965 ppc_set_pmu_inuse(1);
966 mtspr(SPRN_MMCRA, cpuhw->mmcr[2] & ~MMCRA_SAMPLE_ENABLE);
967 mtspr(SPRN_MMCR1, cpuhw->mmcr[1]);
968 mtspr(SPRN_MMCR0, (cpuhw->mmcr[0] & ~(MMCR0_PMC1CE | MMCR0_PMCjCE))
969 | MMCR0_FC);
970
971
972
973
974
975 for (i = 0; i < cpuhw->n_events; ++i) {
976 event = cpuhw->event[i];
977 if (event->hw.idx && event->hw.idx != hwc_index[i] + 1) {
978 power_pmu_read(event);
979 write_pmc(event->hw.idx, 0);
980 event->hw.idx = 0;
981 }
982 }
983
984
985
986
987 cpuhw->n_limited = n_lim = 0;
988 for (i = 0; i < cpuhw->n_events; ++i) {
989 event = cpuhw->event[i];
990 if (event->hw.idx)
991 continue;
992 idx = hwc_index[i] + 1;
993 if (is_limited_pmc(idx)) {
994 cpuhw->limited_counter[n_lim] = event;
995 cpuhw->limited_hwidx[n_lim] = idx;
996 ++n_lim;
997 continue;
998 }
999 val = 0;
1000 if (event->hw.sample_period) {
1001 left = local64_read(&event->hw.period_left);
1002 if (left < 0x80000000L)
1003 val = 0x80000000L - left;
1004 }
1005 local64_set(&event->hw.prev_count, val);
1006 event->hw.idx = idx;
1007 if (event->hw.state & PERF_HES_STOPPED)
1008 val = 0;
1009 write_pmc(idx, val);
1010 perf_event_update_userpage(event);
1011 }
1012 cpuhw->n_limited = n_lim;
1013 cpuhw->mmcr[0] |= MMCR0_PMXE | MMCR0_FCECE;
1014
1015 out_enable:
1016 mb();
1017 write_mmcr0(cpuhw, cpuhw->mmcr[0]);
1018
1019
1020
1021
1022 if (cpuhw->mmcr[2] & MMCRA_SAMPLE_ENABLE) {
1023 mb();
1024 mtspr(SPRN_MMCRA, cpuhw->mmcr[2]);
1025 }
1026
1027 out:
1028 if (cpuhw->bhrb_users)
1029 ppmu->config_bhrb(cpuhw->bhrb_filter);
1030
1031 local_irq_restore(flags);
1032}
1033
1034static int collect_events(struct perf_event *group, int max_count,
1035 struct perf_event *ctrs[], u64 *events,
1036 unsigned int *flags)
1037{
1038 int n = 0;
1039 struct perf_event *event;
1040
1041 if (!is_software_event(group)) {
1042 if (n >= max_count)
1043 return -1;
1044 ctrs[n] = group;
1045 flags[n] = group->hw.event_base;
1046 events[n++] = group->hw.config;
1047 }
1048 list_for_each_entry(event, &group->sibling_list, group_entry) {
1049 if (!is_software_event(event) &&
1050 event->state != PERF_EVENT_STATE_OFF) {
1051 if (n >= max_count)
1052 return -1;
1053 ctrs[n] = event;
1054 flags[n] = event->hw.event_base;
1055 events[n++] = event->hw.config;
1056 }
1057 }
1058 return n;
1059}
1060
1061
1062
1063
1064
1065
1066
1067static int power_pmu_add(struct perf_event *event, int ef_flags)
1068{
1069 struct cpu_hw_events *cpuhw;
1070 unsigned long flags;
1071 int n0;
1072 int ret = -EAGAIN;
1073
1074 local_irq_save(flags);
1075 perf_pmu_disable(event->pmu);
1076
1077
1078
1079
1080
1081 cpuhw = &__get_cpu_var(cpu_hw_events);
1082 n0 = cpuhw->n_events;
1083 if (n0 >= ppmu->n_counter)
1084 goto out;
1085 cpuhw->event[n0] = event;
1086 cpuhw->events[n0] = event->hw.config;
1087 cpuhw->flags[n0] = event->hw.event_base;
1088
1089
1090
1091
1092
1093
1094
1095 if (!(ef_flags & PERF_EF_START))
1096 event->hw.state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
1097 else
1098 event->hw.state = 0;
1099
1100
1101
1102
1103
1104
1105 if (cpuhw->group_flag & PERF_EVENT_TXN)
1106 goto nocheck;
1107
1108 if (check_excludes(cpuhw->event, cpuhw->flags, n0, 1))
1109 goto out;
1110 if (power_check_constraints(cpuhw, cpuhw->events, cpuhw->flags, n0 + 1))
1111 goto out;
1112 event->hw.config = cpuhw->events[n0];
1113
1114nocheck:
1115 ++cpuhw->n_events;
1116 ++cpuhw->n_added;
1117
1118 ret = 0;
1119 out:
1120 if (has_branch_stack(event))
1121 power_pmu_bhrb_enable(event);
1122
1123 perf_pmu_enable(event->pmu);
1124 local_irq_restore(flags);
1125 return ret;
1126}
1127
1128
1129
1130
1131static void power_pmu_del(struct perf_event *event, int ef_flags)
1132{
1133 struct cpu_hw_events *cpuhw;
1134 long i;
1135 unsigned long flags;
1136
1137 local_irq_save(flags);
1138 perf_pmu_disable(event->pmu);
1139
1140 power_pmu_read(event);
1141
1142 cpuhw = &__get_cpu_var(cpu_hw_events);
1143 for (i = 0; i < cpuhw->n_events; ++i) {
1144 if (event == cpuhw->event[i]) {
1145 while (++i < cpuhw->n_events) {
1146 cpuhw->event[i-1] = cpuhw->event[i];
1147 cpuhw->events[i-1] = cpuhw->events[i];
1148 cpuhw->flags[i-1] = cpuhw->flags[i];
1149 }
1150 --cpuhw->n_events;
1151 ppmu->disable_pmc(event->hw.idx - 1, cpuhw->mmcr);
1152 if (event->hw.idx) {
1153 write_pmc(event->hw.idx, 0);
1154 event->hw.idx = 0;
1155 }
1156 perf_event_update_userpage(event);
1157 break;
1158 }
1159 }
1160 for (i = 0; i < cpuhw->n_limited; ++i)
1161 if (event == cpuhw->limited_counter[i])
1162 break;
1163 if (i < cpuhw->n_limited) {
1164 while (++i < cpuhw->n_limited) {
1165 cpuhw->limited_counter[i-1] = cpuhw->limited_counter[i];
1166 cpuhw->limited_hwidx[i-1] = cpuhw->limited_hwidx[i];
1167 }
1168 --cpuhw->n_limited;
1169 }
1170 if (cpuhw->n_events == 0) {
1171
1172 cpuhw->mmcr[0] &= ~(MMCR0_PMXE | MMCR0_FCECE);
1173 }
1174
1175 if (has_branch_stack(event))
1176 power_pmu_bhrb_disable(event);
1177
1178 perf_pmu_enable(event->pmu);
1179 local_irq_restore(flags);
1180}
1181
1182
1183
1184
1185
1186
1187static void power_pmu_start(struct perf_event *event, int ef_flags)
1188{
1189 unsigned long flags;
1190 s64 left;
1191 unsigned long val;
1192
1193 if (!event->hw.idx || !event->hw.sample_period)
1194 return;
1195
1196 if (!(event->hw.state & PERF_HES_STOPPED))
1197 return;
1198
1199 if (ef_flags & PERF_EF_RELOAD)
1200 WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE));
1201
1202 local_irq_save(flags);
1203 perf_pmu_disable(event->pmu);
1204
1205 event->hw.state = 0;
1206 left = local64_read(&event->hw.period_left);
1207
1208 val = 0;
1209 if (left < 0x80000000L)
1210 val = 0x80000000L - left;
1211
1212 write_pmc(event->hw.idx, val);
1213
1214 perf_event_update_userpage(event);
1215 perf_pmu_enable(event->pmu);
1216 local_irq_restore(flags);
1217}
1218
1219static void power_pmu_stop(struct perf_event *event, int ef_flags)
1220{
1221 unsigned long flags;
1222
1223 if (!event->hw.idx || !event->hw.sample_period)
1224 return;
1225
1226 if (event->hw.state & PERF_HES_STOPPED)
1227 return;
1228
1229 local_irq_save(flags);
1230 perf_pmu_disable(event->pmu);
1231
1232 power_pmu_read(event);
1233 event->hw.state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
1234 write_pmc(event->hw.idx, 0);
1235
1236 perf_event_update_userpage(event);
1237 perf_pmu_enable(event->pmu);
1238 local_irq_restore(flags);
1239}
1240
1241
1242
1243
1244
1245
1246void power_pmu_start_txn(struct pmu *pmu)
1247{
1248 struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
1249
1250 perf_pmu_disable(pmu);
1251 cpuhw->group_flag |= PERF_EVENT_TXN;
1252 cpuhw->n_txn_start = cpuhw->n_events;
1253}
1254
1255
1256
1257
1258
1259
1260void power_pmu_cancel_txn(struct pmu *pmu)
1261{
1262 struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
1263
1264 cpuhw->group_flag &= ~PERF_EVENT_TXN;
1265 perf_pmu_enable(pmu);
1266}
1267
1268
1269
1270
1271
1272
1273int power_pmu_commit_txn(struct pmu *pmu)
1274{
1275 struct cpu_hw_events *cpuhw;
1276 long i, n;
1277
1278 if (!ppmu)
1279 return -EAGAIN;
1280 cpuhw = &__get_cpu_var(cpu_hw_events);
1281 n = cpuhw->n_events;
1282 if (check_excludes(cpuhw->event, cpuhw->flags, 0, n))
1283 return -EAGAIN;
1284 i = power_check_constraints(cpuhw, cpuhw->events, cpuhw->flags, n);
1285 if (i < 0)
1286 return -EAGAIN;
1287
1288 for (i = cpuhw->n_txn_start; i < n; ++i)
1289 cpuhw->event[i]->hw.config = cpuhw->events[i];
1290
1291 cpuhw->group_flag &= ~PERF_EVENT_TXN;
1292 perf_pmu_enable(pmu);
1293 return 0;
1294}
1295
1296
1297
1298
1299
1300
1301
1302
1303static int can_go_on_limited_pmc(struct perf_event *event, u64 ev,
1304 unsigned int flags)
1305{
1306 int n;
1307 u64 alt[MAX_EVENT_ALTERNATIVES];
1308
1309 if (event->attr.exclude_user
1310 || event->attr.exclude_kernel
1311 || event->attr.exclude_hv
1312 || event->attr.sample_period)
1313 return 0;
1314
1315 if (ppmu->limited_pmc_event(ev))
1316 return 1;
1317
1318
1319
1320
1321
1322 if (!ppmu->get_alternatives)
1323 return 0;
1324
1325 flags |= PPMU_LIMITED_PMC_OK | PPMU_LIMITED_PMC_REQD;
1326 n = ppmu->get_alternatives(ev, flags, alt);
1327
1328 return n > 0;
1329}
1330
1331
1332
1333
1334
1335
1336static u64 normal_pmc_alternative(u64 ev, unsigned long flags)
1337{
1338 u64 alt[MAX_EVENT_ALTERNATIVES];
1339 int n;
1340
1341 flags &= ~(PPMU_LIMITED_PMC_OK | PPMU_LIMITED_PMC_REQD);
1342 n = ppmu->get_alternatives(ev, flags, alt);
1343 if (!n)
1344 return 0;
1345 return alt[0];
1346}
1347
1348
1349static atomic_t num_events;
1350
1351static DEFINE_MUTEX(pmc_reserve_mutex);
1352
1353
1354
1355
1356static void hw_perf_event_destroy(struct perf_event *event)
1357{
1358 if (!atomic_add_unless(&num_events, -1, 1)) {
1359 mutex_lock(&pmc_reserve_mutex);
1360 if (atomic_dec_return(&num_events) == 0)
1361 release_pmc_hardware();
1362 mutex_unlock(&pmc_reserve_mutex);
1363 }
1364}
1365
1366
1367
1368
1369static int hw_perf_cache_event(u64 config, u64 *eventp)
1370{
1371 unsigned long type, op, result;
1372 int ev;
1373
1374 if (!ppmu->cache_events)
1375 return -EINVAL;
1376
1377
1378 type = config & 0xff;
1379 op = (config >> 8) & 0xff;
1380 result = (config >> 16) & 0xff;
1381
1382 if (type >= PERF_COUNT_HW_CACHE_MAX ||
1383 op >= PERF_COUNT_HW_CACHE_OP_MAX ||
1384 result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
1385 return -EINVAL;
1386
1387 ev = (*ppmu->cache_events)[type][op][result];
1388 if (ev == 0)
1389 return -EOPNOTSUPP;
1390 if (ev == -1)
1391 return -EINVAL;
1392 *eventp = ev;
1393 return 0;
1394}
1395
1396static int power_pmu_event_init(struct perf_event *event)
1397{
1398 u64 ev;
1399 unsigned long flags;
1400 struct perf_event *ctrs[MAX_HWEVENTS];
1401 u64 events[MAX_HWEVENTS];
1402 unsigned int cflags[MAX_HWEVENTS];
1403 int n;
1404 int err;
1405 struct cpu_hw_events *cpuhw;
1406
1407 if (!ppmu)
1408 return -ENOENT;
1409
1410 if (has_branch_stack(event)) {
1411
1412 if (!(ppmu->flags & PPMU_BHRB))
1413 return -EOPNOTSUPP;
1414 }
1415
1416 switch (event->attr.type) {
1417 case PERF_TYPE_HARDWARE:
1418 ev = event->attr.config;
1419 if (ev >= ppmu->n_generic || ppmu->generic_events[ev] == 0)
1420 return -EOPNOTSUPP;
1421 ev = ppmu->generic_events[ev];
1422 break;
1423 case PERF_TYPE_HW_CACHE:
1424 err = hw_perf_cache_event(event->attr.config, &ev);
1425 if (err)
1426 return err;
1427 break;
1428 case PERF_TYPE_RAW:
1429 ev = event->attr.config;
1430 break;
1431 default:
1432 return -ENOENT;
1433 }
1434
1435 event->hw.config_base = ev;
1436 event->hw.idx = 0;
1437
1438
1439
1440
1441
1442
1443 if (!firmware_has_feature(FW_FEATURE_LPAR))
1444 event->attr.exclude_hv = 0;
1445
1446
1447
1448
1449
1450
1451
1452 flags = 0;
1453 if (event->attach_state & PERF_ATTACH_TASK)
1454 flags |= PPMU_ONLY_COUNT_RUN;
1455
1456
1457
1458
1459
1460 if (ppmu->flags & PPMU_LIMITED_PMC5_6) {
1461 if (can_go_on_limited_pmc(event, ev, flags)) {
1462 flags |= PPMU_LIMITED_PMC_OK;
1463 } else if (ppmu->limited_pmc_event(ev)) {
1464
1465
1466
1467
1468
1469 ev = normal_pmc_alternative(ev, flags);
1470 if (!ev)
1471 return -EINVAL;
1472 }
1473 }
1474
1475
1476
1477
1478
1479
1480 n = 0;
1481 if (event->group_leader != event) {
1482 n = collect_events(event->group_leader, ppmu->n_counter - 1,
1483 ctrs, events, cflags);
1484 if (n < 0)
1485 return -EINVAL;
1486 }
1487 events[n] = ev;
1488 ctrs[n] = event;
1489 cflags[n] = flags;
1490 if (check_excludes(ctrs, cflags, n, 1))
1491 return -EINVAL;
1492
1493 cpuhw = &get_cpu_var(cpu_hw_events);
1494 err = power_check_constraints(cpuhw, events, cflags, n + 1);
1495
1496 if (has_branch_stack(event)) {
1497 cpuhw->bhrb_filter = ppmu->bhrb_filter_map(
1498 event->attr.branch_sample_type);
1499
1500 if(cpuhw->bhrb_filter == -1)
1501 return -EOPNOTSUPP;
1502 }
1503
1504 put_cpu_var(cpu_hw_events);
1505 if (err)
1506 return -EINVAL;
1507
1508 event->hw.config = events[n];
1509 event->hw.event_base = cflags[n];
1510 event->hw.last_period = event->hw.sample_period;
1511 local64_set(&event->hw.period_left, event->hw.last_period);
1512
1513
1514
1515
1516
1517
1518
1519 err = 0;
1520 if (!atomic_inc_not_zero(&num_events)) {
1521 mutex_lock(&pmc_reserve_mutex);
1522 if (atomic_read(&num_events) == 0 &&
1523 reserve_pmc_hardware(perf_event_interrupt))
1524 err = -EBUSY;
1525 else
1526 atomic_inc(&num_events);
1527 mutex_unlock(&pmc_reserve_mutex);
1528 }
1529 event->destroy = hw_perf_event_destroy;
1530
1531 return err;
1532}
1533
1534static int power_pmu_event_idx(struct perf_event *event)
1535{
1536 return event->hw.idx;
1537}
1538
1539ssize_t power_events_sysfs_show(struct device *dev,
1540 struct device_attribute *attr, char *page)
1541{
1542 struct perf_pmu_events_attr *pmu_attr;
1543
1544 pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr);
1545
1546 return sprintf(page, "event=0x%02llx\n", pmu_attr->id);
1547}
1548
1549struct pmu power_pmu = {
1550 .pmu_enable = power_pmu_enable,
1551 .pmu_disable = power_pmu_disable,
1552 .event_init = power_pmu_event_init,
1553 .add = power_pmu_add,
1554 .del = power_pmu_del,
1555 .start = power_pmu_start,
1556 .stop = power_pmu_stop,
1557 .read = power_pmu_read,
1558 .start_txn = power_pmu_start_txn,
1559 .cancel_txn = power_pmu_cancel_txn,
1560 .commit_txn = power_pmu_commit_txn,
1561 .event_idx = power_pmu_event_idx,
1562 .flush_branch_stack = power_pmu_flush_branch_stack,
1563};
1564
1565
1566
1567
1568
1569
1570static void record_and_restart(struct perf_event *event, unsigned long val,
1571 struct pt_regs *regs)
1572{
1573 u64 period = event->hw.sample_period;
1574 s64 prev, delta, left;
1575 int record = 0;
1576
1577 if (event->hw.state & PERF_HES_STOPPED) {
1578 write_pmc(event->hw.idx, 0);
1579 return;
1580 }
1581
1582
1583 prev = local64_read(&event->hw.prev_count);
1584 delta = check_and_compute_delta(prev, val);
1585 local64_add(delta, &event->count);
1586
1587
1588
1589
1590
1591 val = 0;
1592 left = local64_read(&event->hw.period_left) - delta;
1593 if (delta == 0)
1594 left++;
1595 if (period) {
1596 if (left <= 0) {
1597 left += period;
1598 if (left <= 0)
1599 left = period;
1600 record = siar_valid(regs);
1601 event->hw.last_period = event->hw.sample_period;
1602 }
1603 if (left < 0x80000000LL)
1604 val = 0x80000000LL - left;
1605 }
1606
1607 write_pmc(event->hw.idx, val);
1608 local64_set(&event->hw.prev_count, val);
1609 local64_set(&event->hw.period_left, left);
1610 perf_event_update_userpage(event);
1611
1612
1613
1614
1615 if (record) {
1616 struct perf_sample_data data;
1617
1618 perf_sample_data_init(&data, ~0ULL, event->hw.last_period);
1619
1620 if (event->attr.sample_type & PERF_SAMPLE_ADDR)
1621 perf_get_data_addr(regs, &data.addr);
1622
1623 if (event->attr.sample_type & PERF_SAMPLE_BRANCH_STACK) {
1624 struct cpu_hw_events *cpuhw;
1625 cpuhw = &__get_cpu_var(cpu_hw_events);
1626 power_pmu_bhrb_read(cpuhw);
1627 data.br_stack = &cpuhw->bhrb_stack;
1628 }
1629
1630 if (perf_event_overflow(event, &data, regs))
1631 power_pmu_stop(event, 0);
1632 }
1633}
1634
1635
1636
1637
1638
1639unsigned long perf_misc_flags(struct pt_regs *regs)
1640{
1641 u32 flags = perf_get_misc_flags(regs);
1642
1643 if (flags)
1644 return flags;
1645 return user_mode(regs) ? PERF_RECORD_MISC_USER :
1646 PERF_RECORD_MISC_KERNEL;
1647}
1648
1649
1650
1651
1652
1653unsigned long perf_instruction_pointer(struct pt_regs *regs)
1654{
1655 bool use_siar = regs_use_siar(regs);
1656
1657 if (use_siar && siar_valid(regs))
1658 return mfspr(SPRN_SIAR) + perf_ip_adjust(regs);
1659 else if (use_siar)
1660 return 0;
1661 else
1662 return regs->nip;
1663}
1664
1665static bool pmc_overflow_power7(unsigned long val)
1666{
1667
1668
1669
1670
1671
1672
1673
1674
1675
1676
1677
1678 if ((0x80000000 - val) <= 256)
1679 return true;
1680
1681 return false;
1682}
1683
1684static bool pmc_overflow(unsigned long val)
1685{
1686 if ((int)val < 0)
1687 return true;
1688
1689 return false;
1690}
1691
1692
1693
1694
1695static void perf_event_interrupt(struct pt_regs *regs)
1696{
1697 int i, j;
1698 struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
1699 struct perf_event *event;
1700 unsigned long val[8];
1701 int found, active;
1702 int nmi;
1703
1704 if (cpuhw->n_limited)
1705 freeze_limited_counters(cpuhw, mfspr(SPRN_PMC5),
1706 mfspr(SPRN_PMC6));
1707
1708 perf_read_regs(regs);
1709
1710 nmi = perf_intr_is_nmi(regs);
1711 if (nmi)
1712 nmi_enter();
1713 else
1714 irq_enter();
1715
1716
1717 for (i = 0; i < ppmu->n_counter; ++i)
1718 val[i] = read_pmc(i + 1);
1719
1720
1721 found = 0;
1722 for (i = 0; i < ppmu->n_counter; ++i) {
1723 if (!pmc_overflow(val[i]))
1724 continue;
1725 if (is_limited_pmc(i + 1))
1726 continue;
1727
1728
1729
1730
1731
1732 found = 1;
1733 active = 0;
1734 for (j = 0; j < cpuhw->n_events; ++j) {
1735 event = cpuhw->event[j];
1736 if (event->hw.idx == (i + 1)) {
1737 active = 1;
1738 record_and_restart(event, val[i], regs);
1739 break;
1740 }
1741 }
1742 if (!active)
1743
1744 write_pmc(i + 1, 0);
1745 }
1746 if (!found && pvr_version_is(PVR_POWER7)) {
1747
1748 for (i = 0; i < cpuhw->n_events; ++i) {
1749 event = cpuhw->event[i];
1750 if (!event->hw.idx || is_limited_pmc(event->hw.idx))
1751 continue;
1752 if (pmc_overflow_power7(val[event->hw.idx - 1])) {
1753
1754 found = 1;
1755 record_and_restart(event,
1756 val[event->hw.idx - 1],
1757 regs);
1758 }
1759 }
1760 }
1761 if (!found && !nmi && printk_ratelimit())
1762 printk(KERN_WARNING "Can't find PMC that caused IRQ\n");
1763
1764
1765
1766
1767
1768
1769
1770
1771 write_mmcr0(cpuhw, cpuhw->mmcr[0]);
1772
1773 if (nmi)
1774 nmi_exit();
1775 else
1776 irq_exit();
1777}
1778
1779static void power_pmu_setup(int cpu)
1780{
1781 struct cpu_hw_events *cpuhw = &per_cpu(cpu_hw_events, cpu);
1782
1783 if (!ppmu)
1784 return;
1785 memset(cpuhw, 0, sizeof(*cpuhw));
1786 cpuhw->mmcr[0] = MMCR0_FC;
1787}
1788
1789static int __cpuinit
1790power_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu)
1791{
1792 unsigned int cpu = (long)hcpu;
1793
1794 switch (action & ~CPU_TASKS_FROZEN) {
1795 case CPU_UP_PREPARE:
1796 power_pmu_setup(cpu);
1797 break;
1798
1799 default:
1800 break;
1801 }
1802
1803 return NOTIFY_OK;
1804}
1805
1806int __cpuinit register_power_pmu(struct power_pmu *pmu)
1807{
1808 if (ppmu)
1809 return -EBUSY;
1810
1811 ppmu = pmu;
1812 pr_info("%s performance monitor hardware support registered\n",
1813 pmu->name);
1814
1815 power_pmu.attr_groups = ppmu->attr_groups;
1816
1817#ifdef MSR_HV
1818
1819
1820
1821 if (mfmsr() & MSR_HV)
1822 freeze_events_kernel = MMCR0_FCHV;
1823#endif
1824
1825 perf_pmu_register(&power_pmu, "cpu", PERF_TYPE_RAW);
1826 perf_cpu_notifier(power_pmu_notifier);
1827
1828 return 0;
1829}
1830