1
2
3
4
5
6
7
8
9
10
11#include <asm/irq_regs.h>
12#include <asm/perf_event.h>
13#include <asm/sysreg.h>
14#include <asm/virt.h>
15
16#include <clocksource/arm_arch_timer.h>
17
18#include <linux/acpi.h>
19#include <linux/clocksource.h>
20#include <linux/kvm_host.h>
21#include <linux/of.h>
22#include <linux/perf/arm_pmu.h>
23#include <linux/platform_device.h>
24#include <linux/sched_clock.h>
25#include <linux/smp.h>
26
27
28#define ARMV8_A53_PERFCTR_PREF_LINEFILL 0xC2
29
30
31#define ARMV8_THUNDER_PERFCTR_L1D_CACHE_MISS_ST 0xE9
32#define ARMV8_THUNDER_PERFCTR_L1D_CACHE_PREF_ACCESS 0xEA
33#define ARMV8_THUNDER_PERFCTR_L1D_CACHE_PREF_MISS 0xEB
34#define ARMV8_THUNDER_PERFCTR_L1I_CACHE_PREF_ACCESS 0xEC
35#define ARMV8_THUNDER_PERFCTR_L1I_CACHE_PREF_MISS 0xED
36
37
38
39
40
41
42static const unsigned armv8_pmuv3_perf_map[PERF_COUNT_HW_MAX] = {
43 PERF_MAP_ALL_UNSUPPORTED,
44 [PERF_COUNT_HW_CPU_CYCLES] = ARMV8_PMUV3_PERFCTR_CPU_CYCLES,
45 [PERF_COUNT_HW_INSTRUCTIONS] = ARMV8_PMUV3_PERFCTR_INST_RETIRED,
46 [PERF_COUNT_HW_CACHE_REFERENCES] = ARMV8_PMUV3_PERFCTR_L1D_CACHE,
47 [PERF_COUNT_HW_CACHE_MISSES] = ARMV8_PMUV3_PERFCTR_L1D_CACHE_REFILL,
48 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV8_PMUV3_PERFCTR_PC_WRITE_RETIRED,
49 [PERF_COUNT_HW_BRANCH_MISSES] = ARMV8_PMUV3_PERFCTR_BR_MIS_PRED,
50 [PERF_COUNT_HW_BUS_CYCLES] = ARMV8_PMUV3_PERFCTR_BUS_CYCLES,
51 [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = ARMV8_PMUV3_PERFCTR_STALL_FRONTEND,
52 [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = ARMV8_PMUV3_PERFCTR_STALL_BACKEND,
53};
54
55static const unsigned armv8_pmuv3_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
56 [PERF_COUNT_HW_CACHE_OP_MAX]
57 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
58 PERF_CACHE_MAP_ALL_UNSUPPORTED,
59
60 [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_L1D_CACHE,
61 [C(L1D)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_L1D_CACHE_REFILL,
62
63 [C(L1I)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_L1I_CACHE,
64 [C(L1I)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_L1I_CACHE_REFILL,
65
66 [C(DTLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_L1D_TLB_REFILL,
67 [C(DTLB)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_L1D_TLB,
68
69 [C(ITLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_L1I_TLB_REFILL,
70 [C(ITLB)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_L1I_TLB,
71
72 [C(LL)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_LL_CACHE_MISS_RD,
73 [C(LL)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_LL_CACHE_RD,
74
75 [C(BPU)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_BR_PRED,
76 [C(BPU)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_BR_MIS_PRED,
77};
78
79static const unsigned armv8_a53_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
80 [PERF_COUNT_HW_CACHE_OP_MAX]
81 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
82 PERF_CACHE_MAP_ALL_UNSUPPORTED,
83
84 [C(L1D)][C(OP_PREFETCH)][C(RESULT_MISS)] = ARMV8_A53_PERFCTR_PREF_LINEFILL,
85
86 [C(NODE)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_RD,
87 [C(NODE)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_WR,
88};
89
90static const unsigned armv8_a57_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
91 [PERF_COUNT_HW_CACHE_OP_MAX]
92 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
93 PERF_CACHE_MAP_ALL_UNSUPPORTED,
94
95 [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_RD,
96 [C(L1D)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_REFILL_RD,
97 [C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_WR,
98 [C(L1D)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_REFILL_WR,
99
100 [C(DTLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_IMPDEF_PERFCTR_L1D_TLB_REFILL_RD,
101 [C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV8_IMPDEF_PERFCTR_L1D_TLB_REFILL_WR,
102
103 [C(NODE)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_RD,
104 [C(NODE)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_WR,
105};
106
107static const unsigned armv8_a73_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
108 [PERF_COUNT_HW_CACHE_OP_MAX]
109 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
110 PERF_CACHE_MAP_ALL_UNSUPPORTED,
111
112 [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_RD,
113 [C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_WR,
114};
115
116static const unsigned armv8_thunder_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
117 [PERF_COUNT_HW_CACHE_OP_MAX]
118 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
119 PERF_CACHE_MAP_ALL_UNSUPPORTED,
120
121 [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_RD,
122 [C(L1D)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_REFILL_RD,
123 [C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_WR,
124 [C(L1D)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV8_THUNDER_PERFCTR_L1D_CACHE_MISS_ST,
125 [C(L1D)][C(OP_PREFETCH)][C(RESULT_ACCESS)] = ARMV8_THUNDER_PERFCTR_L1D_CACHE_PREF_ACCESS,
126 [C(L1D)][C(OP_PREFETCH)][C(RESULT_MISS)] = ARMV8_THUNDER_PERFCTR_L1D_CACHE_PREF_MISS,
127
128 [C(L1I)][C(OP_PREFETCH)][C(RESULT_ACCESS)] = ARMV8_THUNDER_PERFCTR_L1I_CACHE_PREF_ACCESS,
129 [C(L1I)][C(OP_PREFETCH)][C(RESULT_MISS)] = ARMV8_THUNDER_PERFCTR_L1I_CACHE_PREF_MISS,
130
131 [C(DTLB)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_L1D_TLB_RD,
132 [C(DTLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_IMPDEF_PERFCTR_L1D_TLB_REFILL_RD,
133 [C(DTLB)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_L1D_TLB_WR,
134 [C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV8_IMPDEF_PERFCTR_L1D_TLB_REFILL_WR,
135};
136
137static const unsigned armv8_vulcan_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
138 [PERF_COUNT_HW_CACHE_OP_MAX]
139 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
140 PERF_CACHE_MAP_ALL_UNSUPPORTED,
141
142 [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_RD,
143 [C(L1D)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_REFILL_RD,
144 [C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_WR,
145 [C(L1D)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_REFILL_WR,
146
147 [C(DTLB)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_L1D_TLB_RD,
148 [C(DTLB)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_L1D_TLB_WR,
149 [C(DTLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_IMPDEF_PERFCTR_L1D_TLB_REFILL_RD,
150 [C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV8_IMPDEF_PERFCTR_L1D_TLB_REFILL_WR,
151
152 [C(NODE)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_RD,
153 [C(NODE)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_WR,
154};
155
156static ssize_t
157armv8pmu_events_sysfs_show(struct device *dev,
158 struct device_attribute *attr, char *page)
159{
160 struct perf_pmu_events_attr *pmu_attr;
161
162 pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr);
163
164 return sprintf(page, "event=0x%04llx\n", pmu_attr->id);
165}
166
167#define ARMV8_EVENT_ATTR(name, config) \
168 PMU_EVENT_ATTR_ID(name, armv8pmu_events_sysfs_show, config)
169
170static struct attribute *armv8_pmuv3_event_attrs[] = {
171 ARMV8_EVENT_ATTR(sw_incr, ARMV8_PMUV3_PERFCTR_SW_INCR),
172 ARMV8_EVENT_ATTR(l1i_cache_refill, ARMV8_PMUV3_PERFCTR_L1I_CACHE_REFILL),
173 ARMV8_EVENT_ATTR(l1i_tlb_refill, ARMV8_PMUV3_PERFCTR_L1I_TLB_REFILL),
174 ARMV8_EVENT_ATTR(l1d_cache_refill, ARMV8_PMUV3_PERFCTR_L1D_CACHE_REFILL),
175 ARMV8_EVENT_ATTR(l1d_cache, ARMV8_PMUV3_PERFCTR_L1D_CACHE),
176 ARMV8_EVENT_ATTR(l1d_tlb_refill, ARMV8_PMUV3_PERFCTR_L1D_TLB_REFILL),
177 ARMV8_EVENT_ATTR(ld_retired, ARMV8_PMUV3_PERFCTR_LD_RETIRED),
178 ARMV8_EVENT_ATTR(st_retired, ARMV8_PMUV3_PERFCTR_ST_RETIRED),
179 ARMV8_EVENT_ATTR(inst_retired, ARMV8_PMUV3_PERFCTR_INST_RETIRED),
180 ARMV8_EVENT_ATTR(exc_taken, ARMV8_PMUV3_PERFCTR_EXC_TAKEN),
181 ARMV8_EVENT_ATTR(exc_return, ARMV8_PMUV3_PERFCTR_EXC_RETURN),
182 ARMV8_EVENT_ATTR(cid_write_retired, ARMV8_PMUV3_PERFCTR_CID_WRITE_RETIRED),
183 ARMV8_EVENT_ATTR(pc_write_retired, ARMV8_PMUV3_PERFCTR_PC_WRITE_RETIRED),
184 ARMV8_EVENT_ATTR(br_immed_retired, ARMV8_PMUV3_PERFCTR_BR_IMMED_RETIRED),
185 ARMV8_EVENT_ATTR(br_return_retired, ARMV8_PMUV3_PERFCTR_BR_RETURN_RETIRED),
186 ARMV8_EVENT_ATTR(unaligned_ldst_retired, ARMV8_PMUV3_PERFCTR_UNALIGNED_LDST_RETIRED),
187 ARMV8_EVENT_ATTR(br_mis_pred, ARMV8_PMUV3_PERFCTR_BR_MIS_PRED),
188 ARMV8_EVENT_ATTR(cpu_cycles, ARMV8_PMUV3_PERFCTR_CPU_CYCLES),
189 ARMV8_EVENT_ATTR(br_pred, ARMV8_PMUV3_PERFCTR_BR_PRED),
190 ARMV8_EVENT_ATTR(mem_access, ARMV8_PMUV3_PERFCTR_MEM_ACCESS),
191 ARMV8_EVENT_ATTR(l1i_cache, ARMV8_PMUV3_PERFCTR_L1I_CACHE),
192 ARMV8_EVENT_ATTR(l1d_cache_wb, ARMV8_PMUV3_PERFCTR_L1D_CACHE_WB),
193 ARMV8_EVENT_ATTR(l2d_cache, ARMV8_PMUV3_PERFCTR_L2D_CACHE),
194 ARMV8_EVENT_ATTR(l2d_cache_refill, ARMV8_PMUV3_PERFCTR_L2D_CACHE_REFILL),
195 ARMV8_EVENT_ATTR(l2d_cache_wb, ARMV8_PMUV3_PERFCTR_L2D_CACHE_WB),
196 ARMV8_EVENT_ATTR(bus_access, ARMV8_PMUV3_PERFCTR_BUS_ACCESS),
197 ARMV8_EVENT_ATTR(memory_error, ARMV8_PMUV3_PERFCTR_MEMORY_ERROR),
198 ARMV8_EVENT_ATTR(inst_spec, ARMV8_PMUV3_PERFCTR_INST_SPEC),
199 ARMV8_EVENT_ATTR(ttbr_write_retired, ARMV8_PMUV3_PERFCTR_TTBR_WRITE_RETIRED),
200 ARMV8_EVENT_ATTR(bus_cycles, ARMV8_PMUV3_PERFCTR_BUS_CYCLES),
201
202 ARMV8_EVENT_ATTR(l1d_cache_allocate, ARMV8_PMUV3_PERFCTR_L1D_CACHE_ALLOCATE),
203 ARMV8_EVENT_ATTR(l2d_cache_allocate, ARMV8_PMUV3_PERFCTR_L2D_CACHE_ALLOCATE),
204 ARMV8_EVENT_ATTR(br_retired, ARMV8_PMUV3_PERFCTR_BR_RETIRED),
205 ARMV8_EVENT_ATTR(br_mis_pred_retired, ARMV8_PMUV3_PERFCTR_BR_MIS_PRED_RETIRED),
206 ARMV8_EVENT_ATTR(stall_frontend, ARMV8_PMUV3_PERFCTR_STALL_FRONTEND),
207 ARMV8_EVENT_ATTR(stall_backend, ARMV8_PMUV3_PERFCTR_STALL_BACKEND),
208 ARMV8_EVENT_ATTR(l1d_tlb, ARMV8_PMUV3_PERFCTR_L1D_TLB),
209 ARMV8_EVENT_ATTR(l1i_tlb, ARMV8_PMUV3_PERFCTR_L1I_TLB),
210 ARMV8_EVENT_ATTR(l2i_cache, ARMV8_PMUV3_PERFCTR_L2I_CACHE),
211 ARMV8_EVENT_ATTR(l2i_cache_refill, ARMV8_PMUV3_PERFCTR_L2I_CACHE_REFILL),
212 ARMV8_EVENT_ATTR(l3d_cache_allocate, ARMV8_PMUV3_PERFCTR_L3D_CACHE_ALLOCATE),
213 ARMV8_EVENT_ATTR(l3d_cache_refill, ARMV8_PMUV3_PERFCTR_L3D_CACHE_REFILL),
214 ARMV8_EVENT_ATTR(l3d_cache, ARMV8_PMUV3_PERFCTR_L3D_CACHE),
215 ARMV8_EVENT_ATTR(l3d_cache_wb, ARMV8_PMUV3_PERFCTR_L3D_CACHE_WB),
216 ARMV8_EVENT_ATTR(l2d_tlb_refill, ARMV8_PMUV3_PERFCTR_L2D_TLB_REFILL),
217 ARMV8_EVENT_ATTR(l2i_tlb_refill, ARMV8_PMUV3_PERFCTR_L2I_TLB_REFILL),
218 ARMV8_EVENT_ATTR(l2d_tlb, ARMV8_PMUV3_PERFCTR_L2D_TLB),
219 ARMV8_EVENT_ATTR(l2i_tlb, ARMV8_PMUV3_PERFCTR_L2I_TLB),
220 ARMV8_EVENT_ATTR(remote_access, ARMV8_PMUV3_PERFCTR_REMOTE_ACCESS),
221 ARMV8_EVENT_ATTR(ll_cache, ARMV8_PMUV3_PERFCTR_LL_CACHE),
222 ARMV8_EVENT_ATTR(ll_cache_miss, ARMV8_PMUV3_PERFCTR_LL_CACHE_MISS),
223 ARMV8_EVENT_ATTR(dtlb_walk, ARMV8_PMUV3_PERFCTR_DTLB_WALK),
224 ARMV8_EVENT_ATTR(itlb_walk, ARMV8_PMUV3_PERFCTR_ITLB_WALK),
225 ARMV8_EVENT_ATTR(ll_cache_rd, ARMV8_PMUV3_PERFCTR_LL_CACHE_RD),
226 ARMV8_EVENT_ATTR(ll_cache_miss_rd, ARMV8_PMUV3_PERFCTR_LL_CACHE_MISS_RD),
227 ARMV8_EVENT_ATTR(remote_access_rd, ARMV8_PMUV3_PERFCTR_REMOTE_ACCESS_RD),
228 ARMV8_EVENT_ATTR(l1d_cache_lmiss_rd, ARMV8_PMUV3_PERFCTR_L1D_CACHE_LMISS_RD),
229 ARMV8_EVENT_ATTR(op_retired, ARMV8_PMUV3_PERFCTR_OP_RETIRED),
230 ARMV8_EVENT_ATTR(op_spec, ARMV8_PMUV3_PERFCTR_OP_SPEC),
231 ARMV8_EVENT_ATTR(stall, ARMV8_PMUV3_PERFCTR_STALL),
232 ARMV8_EVENT_ATTR(stall_slot_backend, ARMV8_PMUV3_PERFCTR_STALL_SLOT_BACKEND),
233 ARMV8_EVENT_ATTR(stall_slot_frontend, ARMV8_PMUV3_PERFCTR_STALL_SLOT_FRONTEND),
234 ARMV8_EVENT_ATTR(stall_slot, ARMV8_PMUV3_PERFCTR_STALL_SLOT),
235 ARMV8_EVENT_ATTR(sample_pop, ARMV8_SPE_PERFCTR_SAMPLE_POP),
236 ARMV8_EVENT_ATTR(sample_feed, ARMV8_SPE_PERFCTR_SAMPLE_FEED),
237 ARMV8_EVENT_ATTR(sample_filtrate, ARMV8_SPE_PERFCTR_SAMPLE_FILTRATE),
238 ARMV8_EVENT_ATTR(sample_collision, ARMV8_SPE_PERFCTR_SAMPLE_COLLISION),
239 ARMV8_EVENT_ATTR(cnt_cycles, ARMV8_AMU_PERFCTR_CNT_CYCLES),
240 ARMV8_EVENT_ATTR(stall_backend_mem, ARMV8_AMU_PERFCTR_STALL_BACKEND_MEM),
241 ARMV8_EVENT_ATTR(l1i_cache_lmiss, ARMV8_PMUV3_PERFCTR_L1I_CACHE_LMISS),
242 ARMV8_EVENT_ATTR(l2d_cache_lmiss_rd, ARMV8_PMUV3_PERFCTR_L2D_CACHE_LMISS_RD),
243 ARMV8_EVENT_ATTR(l2i_cache_lmiss, ARMV8_PMUV3_PERFCTR_L2I_CACHE_LMISS),
244 ARMV8_EVENT_ATTR(l3d_cache_lmiss_rd, ARMV8_PMUV3_PERFCTR_L3D_CACHE_LMISS_RD),
245 ARMV8_EVENT_ATTR(ldst_align_lat, ARMV8_PMUV3_PERFCTR_LDST_ALIGN_LAT),
246 ARMV8_EVENT_ATTR(ld_align_lat, ARMV8_PMUV3_PERFCTR_LD_ALIGN_LAT),
247 ARMV8_EVENT_ATTR(st_align_lat, ARMV8_PMUV3_PERFCTR_ST_ALIGN_LAT),
248 ARMV8_EVENT_ATTR(mem_access_checked, ARMV8_MTE_PERFCTR_MEM_ACCESS_CHECKED),
249 ARMV8_EVENT_ATTR(mem_access_checked_rd, ARMV8_MTE_PERFCTR_MEM_ACCESS_CHECKED_RD),
250 ARMV8_EVENT_ATTR(mem_access_checked_wr, ARMV8_MTE_PERFCTR_MEM_ACCESS_CHECKED_WR),
251 NULL,
252};
253
254static umode_t
255armv8pmu_event_attr_is_visible(struct kobject *kobj,
256 struct attribute *attr, int unused)
257{
258 struct device *dev = kobj_to_dev(kobj);
259 struct pmu *pmu = dev_get_drvdata(dev);
260 struct arm_pmu *cpu_pmu = container_of(pmu, struct arm_pmu, pmu);
261 struct perf_pmu_events_attr *pmu_attr;
262
263 pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr.attr);
264
265 if (pmu_attr->id < ARMV8_PMUV3_MAX_COMMON_EVENTS &&
266 test_bit(pmu_attr->id, cpu_pmu->pmceid_bitmap))
267 return attr->mode;
268
269 if (pmu_attr->id >= ARMV8_PMUV3_EXT_COMMON_EVENT_BASE) {
270 u64 id = pmu_attr->id - ARMV8_PMUV3_EXT_COMMON_EVENT_BASE;
271
272 if (id < ARMV8_PMUV3_MAX_COMMON_EVENTS &&
273 test_bit(id, cpu_pmu->pmceid_ext_bitmap))
274 return attr->mode;
275 }
276
277 return 0;
278}
279
280static const struct attribute_group armv8_pmuv3_events_attr_group = {
281 .name = "events",
282 .attrs = armv8_pmuv3_event_attrs,
283 .is_visible = armv8pmu_event_attr_is_visible,
284};
285
286PMU_FORMAT_ATTR(event, "config:0-15");
287PMU_FORMAT_ATTR(long, "config1:0");
288PMU_FORMAT_ATTR(rdpmc, "config1:1");
289
290static int sysctl_perf_user_access __read_mostly;
291
292static inline bool armv8pmu_event_is_64bit(struct perf_event *event)
293{
294 return event->attr.config1 & 0x1;
295}
296
297static inline bool armv8pmu_event_want_user_access(struct perf_event *event)
298{
299 return event->attr.config1 & 0x2;
300}
301
302static struct attribute *armv8_pmuv3_format_attrs[] = {
303 &format_attr_event.attr,
304 &format_attr_long.attr,
305 &format_attr_rdpmc.attr,
306 NULL,
307};
308
309static const struct attribute_group armv8_pmuv3_format_attr_group = {
310 .name = "format",
311 .attrs = armv8_pmuv3_format_attrs,
312};
313
314static ssize_t slots_show(struct device *dev, struct device_attribute *attr,
315 char *page)
316{
317 struct pmu *pmu = dev_get_drvdata(dev);
318 struct arm_pmu *cpu_pmu = container_of(pmu, struct arm_pmu, pmu);
319 u32 slots = cpu_pmu->reg_pmmir & ARMV8_PMU_SLOTS_MASK;
320
321 return sysfs_emit(page, "0x%08x\n", slots);
322}
323
324static DEVICE_ATTR_RO(slots);
325
326static ssize_t bus_slots_show(struct device *dev, struct device_attribute *attr,
327 char *page)
328{
329 struct pmu *pmu = dev_get_drvdata(dev);
330 struct arm_pmu *cpu_pmu = container_of(pmu, struct arm_pmu, pmu);
331 u32 bus_slots = (cpu_pmu->reg_pmmir >> ARMV8_PMU_BUS_SLOTS_SHIFT)
332 & ARMV8_PMU_BUS_SLOTS_MASK;
333
334 return sysfs_emit(page, "0x%08x\n", bus_slots);
335}
336
337static DEVICE_ATTR_RO(bus_slots);
338
339static ssize_t bus_width_show(struct device *dev, struct device_attribute *attr,
340 char *page)
341{
342 struct pmu *pmu = dev_get_drvdata(dev);
343 struct arm_pmu *cpu_pmu = container_of(pmu, struct arm_pmu, pmu);
344 u32 bus_width = (cpu_pmu->reg_pmmir >> ARMV8_PMU_BUS_WIDTH_SHIFT)
345 & ARMV8_PMU_BUS_WIDTH_MASK;
346 u32 val = 0;
347
348
349 if (bus_width > 2 && bus_width < 13)
350 val = 1 << (bus_width - 1);
351
352 return sysfs_emit(page, "0x%08x\n", val);
353}
354
355static DEVICE_ATTR_RO(bus_width);
356
357static struct attribute *armv8_pmuv3_caps_attrs[] = {
358 &dev_attr_slots.attr,
359 &dev_attr_bus_slots.attr,
360 &dev_attr_bus_width.attr,
361 NULL,
362};
363
364static const struct attribute_group armv8_pmuv3_caps_attr_group = {
365 .name = "caps",
366 .attrs = armv8_pmuv3_caps_attrs,
367};
368
369
370
371
372#define ARMV8_IDX_CYCLE_COUNTER 0
373#define ARMV8_IDX_COUNTER0 1
374#define ARMV8_IDX_CYCLE_COUNTER_USER 32
375
376
377
378
379
380
381static bool armv8pmu_has_long_event(struct arm_pmu *cpu_pmu)
382{
383 return (cpu_pmu->pmuver >= ID_AA64DFR0_PMUVER_8_5);
384}
385
386static inline bool armv8pmu_event_has_user_read(struct perf_event *event)
387{
388 return event->hw.flags & PERF_EVENT_FLAG_USER_READ_CNT;
389}
390
391
392
393
394
395
396static inline bool armv8pmu_event_is_chained(struct perf_event *event)
397{
398 int idx = event->hw.idx;
399 struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
400
401 return !armv8pmu_event_has_user_read(event) &&
402 armv8pmu_event_is_64bit(event) &&
403 !armv8pmu_has_long_event(cpu_pmu) &&
404 (idx != ARMV8_IDX_CYCLE_COUNTER);
405}
406
407
408
409
410
411
412
413
414#define ARMV8_IDX_TO_COUNTER(x) \
415 (((x) - ARMV8_IDX_COUNTER0) & ARMV8_PMU_COUNTER_MASK)
416
417
418
419
420
421#define PMEVN_CASE(n, case_macro) \
422 case n: case_macro(n); break
423
424#define PMEVN_SWITCH(x, case_macro) \
425 do { \
426 switch (x) { \
427 PMEVN_CASE(0, case_macro); \
428 PMEVN_CASE(1, case_macro); \
429 PMEVN_CASE(2, case_macro); \
430 PMEVN_CASE(3, case_macro); \
431 PMEVN_CASE(4, case_macro); \
432 PMEVN_CASE(5, case_macro); \
433 PMEVN_CASE(6, case_macro); \
434 PMEVN_CASE(7, case_macro); \
435 PMEVN_CASE(8, case_macro); \
436 PMEVN_CASE(9, case_macro); \
437 PMEVN_CASE(10, case_macro); \
438 PMEVN_CASE(11, case_macro); \
439 PMEVN_CASE(12, case_macro); \
440 PMEVN_CASE(13, case_macro); \
441 PMEVN_CASE(14, case_macro); \
442 PMEVN_CASE(15, case_macro); \
443 PMEVN_CASE(16, case_macro); \
444 PMEVN_CASE(17, case_macro); \
445 PMEVN_CASE(18, case_macro); \
446 PMEVN_CASE(19, case_macro); \
447 PMEVN_CASE(20, case_macro); \
448 PMEVN_CASE(21, case_macro); \
449 PMEVN_CASE(22, case_macro); \
450 PMEVN_CASE(23, case_macro); \
451 PMEVN_CASE(24, case_macro); \
452 PMEVN_CASE(25, case_macro); \
453 PMEVN_CASE(26, case_macro); \
454 PMEVN_CASE(27, case_macro); \
455 PMEVN_CASE(28, case_macro); \
456 PMEVN_CASE(29, case_macro); \
457 PMEVN_CASE(30, case_macro); \
458 default: WARN(1, "Invalid PMEV* index\n"); \
459 } \
460 } while (0)
461
462#define RETURN_READ_PMEVCNTRN(n) \
463 return read_sysreg(pmevcntr##n##_el0)
464static unsigned long read_pmevcntrn(int n)
465{
466 PMEVN_SWITCH(n, RETURN_READ_PMEVCNTRN);
467 return 0;
468}
469
470#define WRITE_PMEVCNTRN(n) \
471 write_sysreg(val, pmevcntr##n##_el0)
472static void write_pmevcntrn(int n, unsigned long val)
473{
474 PMEVN_SWITCH(n, WRITE_PMEVCNTRN);
475}
476
477#define WRITE_PMEVTYPERN(n) \
478 write_sysreg(val, pmevtyper##n##_el0)
479static void write_pmevtypern(int n, unsigned long val)
480{
481 PMEVN_SWITCH(n, WRITE_PMEVTYPERN);
482}
483
484static inline u32 armv8pmu_pmcr_read(void)
485{
486 return read_sysreg(pmcr_el0);
487}
488
489static inline void armv8pmu_pmcr_write(u32 val)
490{
491 val &= ARMV8_PMU_PMCR_MASK;
492 isb();
493 write_sysreg(val, pmcr_el0);
494}
495
496static inline int armv8pmu_has_overflowed(u32 pmovsr)
497{
498 return pmovsr & ARMV8_PMU_OVERFLOWED_MASK;
499}
500
501static inline int armv8pmu_counter_has_overflowed(u32 pmnc, int idx)
502{
503 return pmnc & BIT(ARMV8_IDX_TO_COUNTER(idx));
504}
505
506static inline u64 armv8pmu_read_evcntr(int idx)
507{
508 u32 counter = ARMV8_IDX_TO_COUNTER(idx);
509
510 return read_pmevcntrn(counter);
511}
512
513static inline u64 armv8pmu_read_hw_counter(struct perf_event *event)
514{
515 int idx = event->hw.idx;
516 u64 val = armv8pmu_read_evcntr(idx);
517
518 if (armv8pmu_event_is_chained(event))
519 val = (val << 32) | armv8pmu_read_evcntr(idx - 1);
520 return val;
521}
522
523
524
525
526
527
528
529static bool armv8pmu_event_needs_bias(struct perf_event *event)
530{
531 struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
532 struct hw_perf_event *hwc = &event->hw;
533 int idx = hwc->idx;
534
535 if (armv8pmu_event_is_64bit(event))
536 return false;
537
538 if (armv8pmu_has_long_event(cpu_pmu) ||
539 idx == ARMV8_IDX_CYCLE_COUNTER)
540 return true;
541
542 return false;
543}
544
545static u64 armv8pmu_bias_long_counter(struct perf_event *event, u64 value)
546{
547 if (armv8pmu_event_needs_bias(event))
548 value |= GENMASK(63, 32);
549
550 return value;
551}
552
553static u64 armv8pmu_unbias_long_counter(struct perf_event *event, u64 value)
554{
555 if (armv8pmu_event_needs_bias(event))
556 value &= ~GENMASK(63, 32);
557
558 return value;
559}
560
561static u64 armv8pmu_read_counter(struct perf_event *event)
562{
563 struct hw_perf_event *hwc = &event->hw;
564 int idx = hwc->idx;
565 u64 value;
566
567 if (idx == ARMV8_IDX_CYCLE_COUNTER)
568 value = read_sysreg(pmccntr_el0);
569 else
570 value = armv8pmu_read_hw_counter(event);
571
572 return armv8pmu_unbias_long_counter(event, value);
573}
574
575static inline void armv8pmu_write_evcntr(int idx, u64 value)
576{
577 u32 counter = ARMV8_IDX_TO_COUNTER(idx);
578
579 write_pmevcntrn(counter, value);
580}
581
582static inline void armv8pmu_write_hw_counter(struct perf_event *event,
583 u64 value)
584{
585 int idx = event->hw.idx;
586
587 if (armv8pmu_event_is_chained(event)) {
588 armv8pmu_write_evcntr(idx, upper_32_bits(value));
589 armv8pmu_write_evcntr(idx - 1, lower_32_bits(value));
590 } else {
591 armv8pmu_write_evcntr(idx, value);
592 }
593}
594
595static void armv8pmu_write_counter(struct perf_event *event, u64 value)
596{
597 struct hw_perf_event *hwc = &event->hw;
598 int idx = hwc->idx;
599
600 value = armv8pmu_bias_long_counter(event, value);
601
602 if (idx == ARMV8_IDX_CYCLE_COUNTER)
603 write_sysreg(value, pmccntr_el0);
604 else
605 armv8pmu_write_hw_counter(event, value);
606}
607
608static inline void armv8pmu_write_evtype(int idx, u32 val)
609{
610 u32 counter = ARMV8_IDX_TO_COUNTER(idx);
611
612 val &= ARMV8_PMU_EVTYPE_MASK;
613 write_pmevtypern(counter, val);
614}
615
616static inline void armv8pmu_write_event_type(struct perf_event *event)
617{
618 struct hw_perf_event *hwc = &event->hw;
619 int idx = hwc->idx;
620
621
622
623
624
625
626 if (armv8pmu_event_is_chained(event)) {
627 u32 chain_evt = ARMV8_PMUV3_PERFCTR_CHAIN |
628 ARMV8_PMU_INCLUDE_EL2;
629
630 armv8pmu_write_evtype(idx - 1, hwc->config_base);
631 armv8pmu_write_evtype(idx, chain_evt);
632 } else {
633 if (idx == ARMV8_IDX_CYCLE_COUNTER)
634 write_sysreg(hwc->config_base, pmccfiltr_el0);
635 else
636 armv8pmu_write_evtype(idx, hwc->config_base);
637 }
638}
639
640static u32 armv8pmu_event_cnten_mask(struct perf_event *event)
641{
642 int counter = ARMV8_IDX_TO_COUNTER(event->hw.idx);
643 u32 mask = BIT(counter);
644
645 if (armv8pmu_event_is_chained(event))
646 mask |= BIT(counter - 1);
647 return mask;
648}
649
650static inline void armv8pmu_enable_counter(u32 mask)
651{
652
653
654
655
656 isb();
657 write_sysreg(mask, pmcntenset_el0);
658}
659
660static inline void armv8pmu_enable_event_counter(struct perf_event *event)
661{
662 struct perf_event_attr *attr = &event->attr;
663 u32 mask = armv8pmu_event_cnten_mask(event);
664
665 kvm_set_pmu_events(mask, attr);
666
667
668 if (!kvm_pmu_counter_deferred(attr))
669 armv8pmu_enable_counter(mask);
670}
671
672static inline void armv8pmu_disable_counter(u32 mask)
673{
674 write_sysreg(mask, pmcntenclr_el0);
675
676
677
678
679 isb();
680}
681
682static inline void armv8pmu_disable_event_counter(struct perf_event *event)
683{
684 struct perf_event_attr *attr = &event->attr;
685 u32 mask = armv8pmu_event_cnten_mask(event);
686
687 kvm_clr_pmu_events(mask);
688
689
690 if (!kvm_pmu_counter_deferred(attr))
691 armv8pmu_disable_counter(mask);
692}
693
694static inline void armv8pmu_enable_intens(u32 mask)
695{
696 write_sysreg(mask, pmintenset_el1);
697}
698
699static inline void armv8pmu_enable_event_irq(struct perf_event *event)
700{
701 u32 counter = ARMV8_IDX_TO_COUNTER(event->hw.idx);
702 armv8pmu_enable_intens(BIT(counter));
703}
704
705static inline void armv8pmu_disable_intens(u32 mask)
706{
707 write_sysreg(mask, pmintenclr_el1);
708 isb();
709
710 write_sysreg(mask, pmovsclr_el0);
711 isb();
712}
713
714static inline void armv8pmu_disable_event_irq(struct perf_event *event)
715{
716 u32 counter = ARMV8_IDX_TO_COUNTER(event->hw.idx);
717 armv8pmu_disable_intens(BIT(counter));
718}
719
720static inline u32 armv8pmu_getreset_flags(void)
721{
722 u32 value;
723
724
725 value = read_sysreg(pmovsclr_el0);
726
727
728 value &= ARMV8_PMU_OVSR_MASK;
729 write_sysreg(value, pmovsclr_el0);
730
731 return value;
732}
733
734static void armv8pmu_disable_user_access(void)
735{
736 write_sysreg(0, pmuserenr_el0);
737}
738
739static void armv8pmu_enable_user_access(struct arm_pmu *cpu_pmu)
740{
741 int i;
742 struct pmu_hw_events *cpuc = this_cpu_ptr(cpu_pmu->hw_events);
743
744
745 for_each_clear_bit(i, cpuc->used_mask, cpu_pmu->num_events) {
746 if (i == ARMV8_IDX_CYCLE_COUNTER)
747 write_sysreg(0, pmccntr_el0);
748 else
749 armv8pmu_write_evcntr(i, 0);
750 }
751
752 write_sysreg(0, pmuserenr_el0);
753 write_sysreg(ARMV8_PMU_USERENR_ER | ARMV8_PMU_USERENR_CR, pmuserenr_el0);
754}
755
756static void armv8pmu_enable_event(struct perf_event *event)
757{
758
759
760
761
762
763
764
765
766 armv8pmu_disable_event_counter(event);
767
768
769
770
771 armv8pmu_write_event_type(event);
772
773
774
775
776 armv8pmu_enable_event_irq(event);
777
778
779
780
781 armv8pmu_enable_event_counter(event);
782}
783
784static void armv8pmu_disable_event(struct perf_event *event)
785{
786
787
788
789 armv8pmu_disable_event_counter(event);
790
791
792
793
794 armv8pmu_disable_event_irq(event);
795}
796
797static void armv8pmu_start(struct arm_pmu *cpu_pmu)
798{
799 struct perf_event_context *task_ctx =
800 this_cpu_ptr(cpu_pmu->pmu.pmu_cpu_context)->task_ctx;
801
802 if (sysctl_perf_user_access && task_ctx && task_ctx->nr_user)
803 armv8pmu_enable_user_access(cpu_pmu);
804 else
805 armv8pmu_disable_user_access();
806
807
808 armv8pmu_pmcr_write(armv8pmu_pmcr_read() | ARMV8_PMU_PMCR_E);
809}
810
811static void armv8pmu_stop(struct arm_pmu *cpu_pmu)
812{
813
814 armv8pmu_pmcr_write(armv8pmu_pmcr_read() & ~ARMV8_PMU_PMCR_E);
815}
816
817static irqreturn_t armv8pmu_handle_irq(struct arm_pmu *cpu_pmu)
818{
819 u32 pmovsr;
820 struct perf_sample_data data;
821 struct pmu_hw_events *cpuc = this_cpu_ptr(cpu_pmu->hw_events);
822 struct pt_regs *regs;
823 int idx;
824
825
826
827
828 pmovsr = armv8pmu_getreset_flags();
829
830
831
832
833 if (!armv8pmu_has_overflowed(pmovsr))
834 return IRQ_NONE;
835
836
837
838
839 regs = get_irq_regs();
840
841
842
843
844
845 armv8pmu_stop(cpu_pmu);
846 for (idx = 0; idx < cpu_pmu->num_events; ++idx) {
847 struct perf_event *event = cpuc->events[idx];
848 struct hw_perf_event *hwc;
849
850
851 if (!event)
852 continue;
853
854
855
856
857
858 if (!armv8pmu_counter_has_overflowed(pmovsr, idx))
859 continue;
860
861 hwc = &event->hw;
862 armpmu_event_update(event);
863 perf_sample_data_init(&data, 0, hwc->last_period);
864 if (!armpmu_event_set_period(event))
865 continue;
866
867
868
869
870
871
872 if (perf_event_overflow(event, &data, regs))
873 cpu_pmu->disable(event);
874 }
875 armv8pmu_start(cpu_pmu);
876
877 return IRQ_HANDLED;
878}
879
880static int armv8pmu_get_single_idx(struct pmu_hw_events *cpuc,
881 struct arm_pmu *cpu_pmu)
882{
883 int idx;
884
885 for (idx = ARMV8_IDX_COUNTER0; idx < cpu_pmu->num_events; idx++) {
886 if (!test_and_set_bit(idx, cpuc->used_mask))
887 return idx;
888 }
889 return -EAGAIN;
890}
891
892static int armv8pmu_get_chain_idx(struct pmu_hw_events *cpuc,
893 struct arm_pmu *cpu_pmu)
894{
895 int idx;
896
897
898
899
900
901 for (idx = ARMV8_IDX_COUNTER0 + 1; idx < cpu_pmu->num_events; idx += 2) {
902 if (!test_and_set_bit(idx, cpuc->used_mask)) {
903
904 if (!test_and_set_bit(idx - 1, cpuc->used_mask))
905 return idx;
906
907 clear_bit(idx, cpuc->used_mask);
908 }
909 }
910 return -EAGAIN;
911}
912
913static int armv8pmu_get_event_idx(struct pmu_hw_events *cpuc,
914 struct perf_event *event)
915{
916 struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
917 struct hw_perf_event *hwc = &event->hw;
918 unsigned long evtype = hwc->config_base & ARMV8_PMU_EVTYPE_EVENT;
919
920
921 if (evtype == ARMV8_PMUV3_PERFCTR_CPU_CYCLES) {
922 if (!test_and_set_bit(ARMV8_IDX_CYCLE_COUNTER, cpuc->used_mask))
923 return ARMV8_IDX_CYCLE_COUNTER;
924 else if (armv8pmu_event_is_64bit(event) &&
925 armv8pmu_event_want_user_access(event) &&
926 !armv8pmu_has_long_event(cpu_pmu))
927 return -EAGAIN;
928 }
929
930
931
932
933 if (armv8pmu_event_is_chained(event))
934 return armv8pmu_get_chain_idx(cpuc, cpu_pmu);
935 else
936 return armv8pmu_get_single_idx(cpuc, cpu_pmu);
937}
938
939static void armv8pmu_clear_event_idx(struct pmu_hw_events *cpuc,
940 struct perf_event *event)
941{
942 int idx = event->hw.idx;
943
944 clear_bit(idx, cpuc->used_mask);
945 if (armv8pmu_event_is_chained(event))
946 clear_bit(idx - 1, cpuc->used_mask);
947}
948
949static int armv8pmu_user_event_idx(struct perf_event *event)
950{
951 if (!sysctl_perf_user_access || !armv8pmu_event_has_user_read(event))
952 return 0;
953
954
955
956
957
958
959 if (event->hw.idx == ARMV8_IDX_CYCLE_COUNTER)
960 return ARMV8_IDX_CYCLE_COUNTER_USER;
961
962 return event->hw.idx;
963}
964
965
966
967
968static int armv8pmu_set_event_filter(struct hw_perf_event *event,
969 struct perf_event_attr *attr)
970{
971 unsigned long config_base = 0;
972
973 if (attr->exclude_idle)
974 return -EPERM;
975
976
977
978
979
980
981
982 if (is_kernel_in_hyp_mode()) {
983 if (!attr->exclude_kernel && !attr->exclude_host)
984 config_base |= ARMV8_PMU_INCLUDE_EL2;
985 if (attr->exclude_guest)
986 config_base |= ARMV8_PMU_EXCLUDE_EL1;
987 if (attr->exclude_host)
988 config_base |= ARMV8_PMU_EXCLUDE_EL0;
989 } else {
990 if (!attr->exclude_hv && !attr->exclude_host)
991 config_base |= ARMV8_PMU_INCLUDE_EL2;
992 }
993
994
995
996
997 if (attr->exclude_kernel)
998 config_base |= ARMV8_PMU_EXCLUDE_EL1;
999
1000 if (attr->exclude_user)
1001 config_base |= ARMV8_PMU_EXCLUDE_EL0;
1002
1003
1004
1005
1006
1007 event->config_base = config_base;
1008
1009 return 0;
1010}
1011
1012static int armv8pmu_filter_match(struct perf_event *event)
1013{
1014 unsigned long evtype = event->hw.config_base & ARMV8_PMU_EVTYPE_EVENT;
1015 return evtype != ARMV8_PMUV3_PERFCTR_CHAIN;
1016}
1017
1018static void armv8pmu_reset(void *info)
1019{
1020 struct arm_pmu *cpu_pmu = (struct arm_pmu *)info;
1021 u32 pmcr;
1022
1023
1024 armv8pmu_disable_counter(U32_MAX);
1025 armv8pmu_disable_intens(U32_MAX);
1026
1027
1028 kvm_clr_pmu_events(U32_MAX);
1029
1030
1031
1032
1033
1034 pmcr = ARMV8_PMU_PMCR_P | ARMV8_PMU_PMCR_C | ARMV8_PMU_PMCR_LC;
1035
1036
1037 if (armv8pmu_has_long_event(cpu_pmu))
1038 pmcr |= ARMV8_PMU_PMCR_LP;
1039
1040 armv8pmu_pmcr_write(pmcr);
1041}
1042
1043static int __armv8_pmuv3_map_event(struct perf_event *event,
1044 const unsigned (*extra_event_map)
1045 [PERF_COUNT_HW_MAX],
1046 const unsigned (*extra_cache_map)
1047 [PERF_COUNT_HW_CACHE_MAX]
1048 [PERF_COUNT_HW_CACHE_OP_MAX]
1049 [PERF_COUNT_HW_CACHE_RESULT_MAX])
1050{
1051 int hw_event_id;
1052 struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
1053
1054 hw_event_id = armpmu_map_event(event, &armv8_pmuv3_perf_map,
1055 &armv8_pmuv3_perf_cache_map,
1056 ARMV8_PMU_EVTYPE_EVENT);
1057
1058 if (armv8pmu_event_is_64bit(event))
1059 event->hw.flags |= ARMPMU_EVT_64BIT;
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069 if (armv8pmu_event_want_user_access(event)) {
1070 if (!(event->attach_state & PERF_ATTACH_TASK))
1071 return -EINVAL;
1072 if (armv8pmu_event_is_64bit(event) &&
1073 (hw_event_id != ARMV8_PMUV3_PERFCTR_CPU_CYCLES) &&
1074 !armv8pmu_has_long_event(armpmu))
1075 return -EOPNOTSUPP;
1076
1077 event->hw.flags |= PERF_EVENT_FLAG_USER_READ_CNT;
1078 }
1079
1080
1081 if ((hw_event_id > 0) && (hw_event_id < ARMV8_PMUV3_MAX_COMMON_EVENTS)
1082 && test_bit(hw_event_id, armpmu->pmceid_bitmap)) {
1083 return hw_event_id;
1084 }
1085
1086 return armpmu_map_event(event, extra_event_map, extra_cache_map,
1087 ARMV8_PMU_EVTYPE_EVENT);
1088}
1089
1090static int armv8_pmuv3_map_event(struct perf_event *event)
1091{
1092 return __armv8_pmuv3_map_event(event, NULL, NULL);
1093}
1094
1095static int armv8_a53_map_event(struct perf_event *event)
1096{
1097 return __armv8_pmuv3_map_event(event, NULL, &armv8_a53_perf_cache_map);
1098}
1099
1100static int armv8_a57_map_event(struct perf_event *event)
1101{
1102 return __armv8_pmuv3_map_event(event, NULL, &armv8_a57_perf_cache_map);
1103}
1104
1105static int armv8_a73_map_event(struct perf_event *event)
1106{
1107 return __armv8_pmuv3_map_event(event, NULL, &armv8_a73_perf_cache_map);
1108}
1109
1110static int armv8_thunder_map_event(struct perf_event *event)
1111{
1112 return __armv8_pmuv3_map_event(event, NULL,
1113 &armv8_thunder_perf_cache_map);
1114}
1115
1116static int armv8_vulcan_map_event(struct perf_event *event)
1117{
1118 return __armv8_pmuv3_map_event(event, NULL,
1119 &armv8_vulcan_perf_cache_map);
1120}
1121
1122struct armv8pmu_probe_info {
1123 struct arm_pmu *pmu;
1124 bool present;
1125};
1126
1127static void __armv8pmu_probe_pmu(void *info)
1128{
1129 struct armv8pmu_probe_info *probe = info;
1130 struct arm_pmu *cpu_pmu = probe->pmu;
1131 u64 dfr0;
1132 u64 pmceid_raw[2];
1133 u32 pmceid[2];
1134 int pmuver;
1135
1136 dfr0 = read_sysreg(id_aa64dfr0_el1);
1137 pmuver = cpuid_feature_extract_unsigned_field(dfr0,
1138 ID_AA64DFR0_PMUVER_SHIFT);
1139 if (pmuver == ID_AA64DFR0_PMUVER_IMP_DEF || pmuver == 0)
1140 return;
1141
1142 cpu_pmu->pmuver = pmuver;
1143 probe->present = true;
1144
1145
1146 cpu_pmu->num_events = (armv8pmu_pmcr_read() >> ARMV8_PMU_PMCR_N_SHIFT)
1147 & ARMV8_PMU_PMCR_N_MASK;
1148
1149
1150 cpu_pmu->num_events += 1;
1151
1152 pmceid[0] = pmceid_raw[0] = read_sysreg(pmceid0_el0);
1153 pmceid[1] = pmceid_raw[1] = read_sysreg(pmceid1_el0);
1154
1155 bitmap_from_arr32(cpu_pmu->pmceid_bitmap,
1156 pmceid, ARMV8_PMUV3_MAX_COMMON_EVENTS);
1157
1158 pmceid[0] = pmceid_raw[0] >> 32;
1159 pmceid[1] = pmceid_raw[1] >> 32;
1160
1161 bitmap_from_arr32(cpu_pmu->pmceid_ext_bitmap,
1162 pmceid, ARMV8_PMUV3_MAX_COMMON_EVENTS);
1163
1164
1165 if (pmuver >= ID_AA64DFR0_PMUVER_8_4 && (pmceid_raw[1] & BIT(31)))
1166 cpu_pmu->reg_pmmir = read_cpuid(PMMIR_EL1);
1167 else
1168 cpu_pmu->reg_pmmir = 0;
1169}
1170
1171static int armv8pmu_probe_pmu(struct arm_pmu *cpu_pmu)
1172{
1173 struct armv8pmu_probe_info probe = {
1174 .pmu = cpu_pmu,
1175 .present = false,
1176 };
1177 int ret;
1178
1179 ret = smp_call_function_any(&cpu_pmu->supported_cpus,
1180 __armv8pmu_probe_pmu,
1181 &probe, 1);
1182 if (ret)
1183 return ret;
1184
1185 return probe.present ? 0 : -ENODEV;
1186}
1187
1188static void armv8pmu_disable_user_access_ipi(void *unused)
1189{
1190 armv8pmu_disable_user_access();
1191}
1192
1193static int armv8pmu_proc_user_access_handler(struct ctl_table *table, int write,
1194 void *buffer, size_t *lenp, loff_t *ppos)
1195{
1196 int ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
1197 if (ret || !write || sysctl_perf_user_access)
1198 return ret;
1199
1200 on_each_cpu(armv8pmu_disable_user_access_ipi, NULL, 1);
1201 return 0;
1202}
1203
1204static struct ctl_table armv8_pmu_sysctl_table[] = {
1205 {
1206 .procname = "perf_user_access",
1207 .data = &sysctl_perf_user_access,
1208 .maxlen = sizeof(unsigned int),
1209 .mode = 0644,
1210 .proc_handler = armv8pmu_proc_user_access_handler,
1211 .extra1 = SYSCTL_ZERO,
1212 .extra2 = SYSCTL_ONE,
1213 },
1214 { }
1215};
1216
1217static void armv8_pmu_register_sysctl_table(void)
1218{
1219 static u32 tbl_registered = 0;
1220
1221 if (!cmpxchg_relaxed(&tbl_registered, 0, 1))
1222 register_sysctl("kernel", armv8_pmu_sysctl_table);
1223}
1224
1225static int armv8_pmu_init(struct arm_pmu *cpu_pmu, char *name,
1226 int (*map_event)(struct perf_event *event),
1227 const struct attribute_group *events,
1228 const struct attribute_group *format,
1229 const struct attribute_group *caps)
1230{
1231 int ret = armv8pmu_probe_pmu(cpu_pmu);
1232 if (ret)
1233 return ret;
1234
1235 cpu_pmu->handle_irq = armv8pmu_handle_irq;
1236 cpu_pmu->enable = armv8pmu_enable_event;
1237 cpu_pmu->disable = armv8pmu_disable_event;
1238 cpu_pmu->read_counter = armv8pmu_read_counter;
1239 cpu_pmu->write_counter = armv8pmu_write_counter;
1240 cpu_pmu->get_event_idx = armv8pmu_get_event_idx;
1241 cpu_pmu->clear_event_idx = armv8pmu_clear_event_idx;
1242 cpu_pmu->start = armv8pmu_start;
1243 cpu_pmu->stop = armv8pmu_stop;
1244 cpu_pmu->reset = armv8pmu_reset;
1245 cpu_pmu->set_event_filter = armv8pmu_set_event_filter;
1246 cpu_pmu->filter_match = armv8pmu_filter_match;
1247
1248 cpu_pmu->pmu.event_idx = armv8pmu_user_event_idx;
1249
1250 cpu_pmu->name = name;
1251 cpu_pmu->map_event = map_event;
1252 cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] = events ?
1253 events : &armv8_pmuv3_events_attr_group;
1254 cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] = format ?
1255 format : &armv8_pmuv3_format_attr_group;
1256 cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_CAPS] = caps ?
1257 caps : &armv8_pmuv3_caps_attr_group;
1258
1259 armv8_pmu_register_sysctl_table();
1260 return 0;
1261}
1262
1263static int armv8_pmu_init_nogroups(struct arm_pmu *cpu_pmu, char *name,
1264 int (*map_event)(struct perf_event *event))
1265{
1266 return armv8_pmu_init(cpu_pmu, name, map_event, NULL, NULL, NULL);
1267}
1268
1269#define PMUV3_INIT_SIMPLE(name) \
1270static int name##_pmu_init(struct arm_pmu *cpu_pmu) \
1271{ \
1272 return armv8_pmu_init_nogroups(cpu_pmu, #name, armv8_pmuv3_map_event);\
1273}
1274
1275PMUV3_INIT_SIMPLE(armv8_pmuv3)
1276
1277PMUV3_INIT_SIMPLE(armv8_cortex_a34)
1278PMUV3_INIT_SIMPLE(armv8_cortex_a55)
1279PMUV3_INIT_SIMPLE(armv8_cortex_a65)
1280PMUV3_INIT_SIMPLE(armv8_cortex_a75)
1281PMUV3_INIT_SIMPLE(armv8_cortex_a76)
1282PMUV3_INIT_SIMPLE(armv8_cortex_a77)
1283PMUV3_INIT_SIMPLE(armv8_cortex_a78)
1284PMUV3_INIT_SIMPLE(armv9_cortex_a510)
1285PMUV3_INIT_SIMPLE(armv9_cortex_a710)
1286PMUV3_INIT_SIMPLE(armv8_cortex_x1)
1287PMUV3_INIT_SIMPLE(armv9_cortex_x2)
1288PMUV3_INIT_SIMPLE(armv8_neoverse_e1)
1289PMUV3_INIT_SIMPLE(armv8_neoverse_n1)
1290PMUV3_INIT_SIMPLE(armv9_neoverse_n2)
1291PMUV3_INIT_SIMPLE(armv8_neoverse_v1)
1292
1293PMUV3_INIT_SIMPLE(armv8_nvidia_carmel)
1294PMUV3_INIT_SIMPLE(armv8_nvidia_denver)
1295
1296static int armv8_a35_pmu_init(struct arm_pmu *cpu_pmu)
1297{
1298 return armv8_pmu_init_nogroups(cpu_pmu, "armv8_cortex_a35",
1299 armv8_a53_map_event);
1300}
1301
1302static int armv8_a53_pmu_init(struct arm_pmu *cpu_pmu)
1303{
1304 return armv8_pmu_init_nogroups(cpu_pmu, "armv8_cortex_a53",
1305 armv8_a53_map_event);
1306}
1307
1308static int armv8_a57_pmu_init(struct arm_pmu *cpu_pmu)
1309{
1310 return armv8_pmu_init_nogroups(cpu_pmu, "armv8_cortex_a57",
1311 armv8_a57_map_event);
1312}
1313
1314static int armv8_a72_pmu_init(struct arm_pmu *cpu_pmu)
1315{
1316 return armv8_pmu_init_nogroups(cpu_pmu, "armv8_cortex_a72",
1317 armv8_a57_map_event);
1318}
1319
1320static int armv8_a73_pmu_init(struct arm_pmu *cpu_pmu)
1321{
1322 return armv8_pmu_init_nogroups(cpu_pmu, "armv8_cortex_a73",
1323 armv8_a73_map_event);
1324}
1325
1326static int armv8_thunder_pmu_init(struct arm_pmu *cpu_pmu)
1327{
1328 return armv8_pmu_init_nogroups(cpu_pmu, "armv8_cavium_thunder",
1329 armv8_thunder_map_event);
1330}
1331
1332static int armv8_vulcan_pmu_init(struct arm_pmu *cpu_pmu)
1333{
1334 return armv8_pmu_init_nogroups(cpu_pmu, "armv8_brcm_vulcan",
1335 armv8_vulcan_map_event);
1336}
1337
1338static const struct of_device_id armv8_pmu_of_device_ids[] = {
1339 {.compatible = "arm,armv8-pmuv3", .data = armv8_pmuv3_pmu_init},
1340 {.compatible = "arm,cortex-a34-pmu", .data = armv8_cortex_a34_pmu_init},
1341 {.compatible = "arm,cortex-a35-pmu", .data = armv8_a35_pmu_init},
1342 {.compatible = "arm,cortex-a53-pmu", .data = armv8_a53_pmu_init},
1343 {.compatible = "arm,cortex-a55-pmu", .data = armv8_cortex_a55_pmu_init},
1344 {.compatible = "arm,cortex-a57-pmu", .data = armv8_a57_pmu_init},
1345 {.compatible = "arm,cortex-a65-pmu", .data = armv8_cortex_a65_pmu_init},
1346 {.compatible = "arm,cortex-a72-pmu", .data = armv8_a72_pmu_init},
1347 {.compatible = "arm,cortex-a73-pmu", .data = armv8_a73_pmu_init},
1348 {.compatible = "arm,cortex-a75-pmu", .data = armv8_cortex_a75_pmu_init},
1349 {.compatible = "arm,cortex-a76-pmu", .data = armv8_cortex_a76_pmu_init},
1350 {.compatible = "arm,cortex-a77-pmu", .data = armv8_cortex_a77_pmu_init},
1351 {.compatible = "arm,cortex-a78-pmu", .data = armv8_cortex_a78_pmu_init},
1352 {.compatible = "arm,cortex-a510-pmu", .data = armv9_cortex_a510_pmu_init},
1353 {.compatible = "arm,cortex-a710-pmu", .data = armv9_cortex_a710_pmu_init},
1354 {.compatible = "arm,cortex-x1-pmu", .data = armv8_cortex_x1_pmu_init},
1355 {.compatible = "arm,cortex-x2-pmu", .data = armv9_cortex_x2_pmu_init},
1356 {.compatible = "arm,neoverse-e1-pmu", .data = armv8_neoverse_e1_pmu_init},
1357 {.compatible = "arm,neoverse-n1-pmu", .data = armv8_neoverse_n1_pmu_init},
1358 {.compatible = "arm,neoverse-n2-pmu", .data = armv9_neoverse_n2_pmu_init},
1359 {.compatible = "arm,neoverse-v1-pmu", .data = armv8_neoverse_v1_pmu_init},
1360 {.compatible = "cavium,thunder-pmu", .data = armv8_thunder_pmu_init},
1361 {.compatible = "brcm,vulcan-pmu", .data = armv8_vulcan_pmu_init},
1362 {.compatible = "nvidia,carmel-pmu", .data = armv8_nvidia_carmel_pmu_init},
1363 {.compatible = "nvidia,denver-pmu", .data = armv8_nvidia_denver_pmu_init},
1364 {},
1365};
1366
1367static int armv8_pmu_device_probe(struct platform_device *pdev)
1368{
1369 return arm_pmu_device_probe(pdev, armv8_pmu_of_device_ids, NULL);
1370}
1371
1372static struct platform_driver armv8_pmu_driver = {
1373 .driver = {
1374 .name = ARMV8_PMU_PDEV_NAME,
1375 .of_match_table = armv8_pmu_of_device_ids,
1376 .suppress_bind_attrs = true,
1377 },
1378 .probe = armv8_pmu_device_probe,
1379};
1380
1381static int __init armv8_pmu_driver_init(void)
1382{
1383 if (acpi_disabled)
1384 return platform_driver_register(&armv8_pmu_driver);
1385 else
1386 return arm_pmu_acpi_probe(armv8_pmuv3_pmu_init);
1387}
1388device_initcall(armv8_pmu_driver_init)
1389
1390void arch_perf_update_userpage(struct perf_event *event,
1391 struct perf_event_mmap_page *userpg, u64 now)
1392{
1393 struct clock_read_data *rd;
1394 unsigned int seq;
1395 u64 ns;
1396
1397 userpg->cap_user_time = 0;
1398 userpg->cap_user_time_zero = 0;
1399 userpg->cap_user_time_short = 0;
1400 userpg->cap_user_rdpmc = armv8pmu_event_has_user_read(event);
1401
1402 if (userpg->cap_user_rdpmc) {
1403 if (event->hw.flags & ARMPMU_EVT_64BIT)
1404 userpg->pmc_width = 64;
1405 else
1406 userpg->pmc_width = 32;
1407 }
1408
1409 do {
1410 rd = sched_clock_read_begin(&seq);
1411
1412 if (rd->read_sched_clock != arch_timer_read_counter)
1413 return;
1414
1415 userpg->time_mult = rd->mult;
1416 userpg->time_shift = rd->shift;
1417 userpg->time_zero = rd->epoch_ns;
1418 userpg->time_cycles = rd->epoch_cyc;
1419 userpg->time_mask = rd->sched_clock_mask;
1420
1421
1422
1423
1424
1425
1426 ns = mul_u64_u32_shr(rd->epoch_cyc, rd->mult, rd->shift);
1427 userpg->time_zero -= ns;
1428
1429 } while (sched_clock_read_retry(seq));
1430
1431 userpg->time_offset = userpg->time_zero - now;
1432
1433
1434
1435
1436
1437
1438
1439 if (userpg->time_shift == 32) {
1440 userpg->time_shift = 31;
1441 userpg->time_mult >>= 1;
1442 }
1443
1444
1445
1446
1447
1448 userpg->cap_user_time = 1;
1449 userpg->cap_user_time_zero = 1;
1450 userpg->cap_user_time_short = 1;
1451}
1452