1
2
3
4
5
6
7
8
9
10
11
12
13
14
15#include <linux/errno.h>
16#include <linux/bug.h>
17#include <linux/spinlock.h>
18#include <linux/export.h>
19
20#include <asm/processor.h>
21#include <asm/cputable.h>
22#include <asm/pmc.h>
23
24#ifndef MMCR0_PMAO
25#define MMCR0_PMAO 0
26#endif
27
28static void dummy_perf(struct pt_regs *regs)
29{
30#if defined(CONFIG_FSL_EMB_PERFMON)
31 mtpmr(PMRN_PMGC0, mfpmr(PMRN_PMGC0) & ~PMGC0_PMIE);
32#elif defined(CONFIG_PPC64) || defined(CONFIG_6xx)
33 if (cur_cpu_spec->pmc_type == PPC_PMC_IBM)
34 mtspr(SPRN_MMCR0, mfspr(SPRN_MMCR0) & ~(MMCR0_PMXE|MMCR0_PMAO));
35#else
36 mtspr(SPRN_MMCR0, mfspr(SPRN_MMCR0) & ~MMCR0_PMXE);
37#endif
38}
39
40
41static DEFINE_RAW_SPINLOCK(pmc_owner_lock);
42static void *pmc_owner_caller;
43perf_irq_t perf_irq = dummy_perf;
44
45int reserve_pmc_hardware(perf_irq_t new_perf_irq)
46{
47 int err = 0;
48
49 raw_spin_lock(&pmc_owner_lock);
50
51 if (pmc_owner_caller) {
52 printk(KERN_WARNING "reserve_pmc_hardware: "
53 "PMC hardware busy (reserved by caller %p)\n",
54 pmc_owner_caller);
55 err = -EBUSY;
56 goto out;
57 }
58
59 pmc_owner_caller = __builtin_return_address(0);
60 perf_irq = new_perf_irq ? new_perf_irq : dummy_perf;
61
62 out:
63 raw_spin_unlock(&pmc_owner_lock);
64 return err;
65}
66EXPORT_SYMBOL_GPL(reserve_pmc_hardware);
67
68void release_pmc_hardware(void)
69{
70 raw_spin_lock(&pmc_owner_lock);
71
72 WARN_ON(! pmc_owner_caller);
73
74 pmc_owner_caller = NULL;
75 perf_irq = dummy_perf;
76
77 raw_spin_unlock(&pmc_owner_lock);
78}
79EXPORT_SYMBOL_GPL(release_pmc_hardware);
80
81#ifdef CONFIG_PPC64
82void power4_enable_pmcs(void)
83{
84 unsigned long hid0;
85
86 hid0 = mfspr(SPRN_HID0);
87 hid0 |= 1UL << (63 - 20);
88
89
90 asm volatile(
91 "sync\n"
92 "mtspr %1, %0\n"
93 "mfspr %0, %1\n"
94 "mfspr %0, %1\n"
95 "mfspr %0, %1\n"
96 "mfspr %0, %1\n"
97 "mfspr %0, %1\n"
98 "mfspr %0, %1\n"
99 "isync" : "=&r" (hid0) : "i" (SPRN_HID0), "0" (hid0):
100 "memory");
101}
102#endif
103