1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23#include <linux/kernel.h>
24#include <linux/init.h>
25#include <linux/io.h>
26#include <linux/irq.h>
27#include <linux/perf_event.h>
28#include <linux/export.h>
29#include <asm/processor.h>
30
31struct cpu_hw_events {
32 struct perf_event *events[MAX_HWEVENTS];
33 unsigned long used_mask[BITS_TO_LONGS(MAX_HWEVENTS)];
34 unsigned long active_mask[BITS_TO_LONGS(MAX_HWEVENTS)];
35};
36
37DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events);
38
39static struct sh_pmu *sh_pmu __read_mostly;
40
41
42static atomic_t num_events;
43
44static DEFINE_MUTEX(pmc_reserve_mutex);
45
46
47
48
49int reserve_pmc_hardware(void)
50{
51 return 0;
52}
53
54void release_pmc_hardware(void)
55{
56}
57
58static inline int sh_pmu_initialized(void)
59{
60 return !!sh_pmu;
61}
62
63const char *perf_pmu_name(void)
64{
65 if (!sh_pmu)
66 return NULL;
67
68 return sh_pmu->name;
69}
70EXPORT_SYMBOL_GPL(perf_pmu_name);
71
72int perf_num_counters(void)
73{
74 if (!sh_pmu)
75 return 0;
76
77 return sh_pmu->num_events;
78}
79EXPORT_SYMBOL_GPL(perf_num_counters);
80
81
82
83
84static void hw_perf_event_destroy(struct perf_event *event)
85{
86 if (!atomic_add_unless(&num_events, -1, 1)) {
87 mutex_lock(&pmc_reserve_mutex);
88 if (atomic_dec_return(&num_events) == 0)
89 release_pmc_hardware();
90 mutex_unlock(&pmc_reserve_mutex);
91 }
92}
93
94static int hw_perf_cache_event(int config, int *evp)
95{
96 unsigned long type, op, result;
97 int ev;
98
99 if (!sh_pmu->cache_events)
100 return -EINVAL;
101
102
103 type = config & 0xff;
104 op = (config >> 8) & 0xff;
105 result = (config >> 16) & 0xff;
106
107 if (type >= PERF_COUNT_HW_CACHE_MAX ||
108 op >= PERF_COUNT_HW_CACHE_OP_MAX ||
109 result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
110 return -EINVAL;
111
112 ev = (*sh_pmu->cache_events)[type][op][result];
113 if (ev == 0)
114 return -EOPNOTSUPP;
115 if (ev == -1)
116 return -EINVAL;
117 *evp = ev;
118 return 0;
119}
120
121static int __hw_perf_event_init(struct perf_event *event)
122{
123 struct perf_event_attr *attr = &event->attr;
124 struct hw_perf_event *hwc = &event->hw;
125 int config = -1;
126 int err;
127
128 if (!sh_pmu_initialized())
129 return -ENODEV;
130
131
132
133
134
135
136
137
138 err = 0;
139 if (!atomic_inc_not_zero(&num_events)) {
140 mutex_lock(&pmc_reserve_mutex);
141 if (atomic_read(&num_events) == 0 &&
142 reserve_pmc_hardware())
143 err = -EBUSY;
144 else
145 atomic_inc(&num_events);
146 mutex_unlock(&pmc_reserve_mutex);
147 }
148
149 if (err)
150 return err;
151
152 event->destroy = hw_perf_event_destroy;
153
154 switch (attr->type) {
155 case PERF_TYPE_RAW:
156 config = attr->config & sh_pmu->raw_event_mask;
157 break;
158 case PERF_TYPE_HW_CACHE:
159 err = hw_perf_cache_event(attr->config, &config);
160 if (err)
161 return err;
162 break;
163 case PERF_TYPE_HARDWARE:
164 if (attr->config >= sh_pmu->max_events)
165 return -EINVAL;
166
167 config = sh_pmu->event_map(attr->config);
168 break;
169 }
170
171 if (config == -1)
172 return -EINVAL;
173
174 hwc->config |= config;
175
176 return 0;
177}
178
179static void sh_perf_event_update(struct perf_event *event,
180 struct hw_perf_event *hwc, int idx)
181{
182 u64 prev_raw_count, new_raw_count;
183 s64 delta;
184 int shift = 0;
185
186
187
188
189
190
191
192
193
194
195
196
197
198again:
199 prev_raw_count = local64_read(&hwc->prev_count);
200 new_raw_count = sh_pmu->read(idx);
201
202 if (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
203 new_raw_count) != prev_raw_count)
204 goto again;
205
206
207
208
209
210
211
212
213
214 delta = (new_raw_count << shift) - (prev_raw_count << shift);
215 delta >>= shift;
216
217 local64_add(delta, &event->count);
218}
219
220static void sh_pmu_stop(struct perf_event *event, int flags)
221{
222 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
223 struct hw_perf_event *hwc = &event->hw;
224 int idx = hwc->idx;
225
226 if (!(event->hw.state & PERF_HES_STOPPED)) {
227 sh_pmu->disable(hwc, idx);
228 cpuc->events[idx] = NULL;
229 event->hw.state |= PERF_HES_STOPPED;
230 }
231
232 if ((flags & PERF_EF_UPDATE) && !(event->hw.state & PERF_HES_UPTODATE)) {
233 sh_perf_event_update(event, &event->hw, idx);
234 event->hw.state |= PERF_HES_UPTODATE;
235 }
236}
237
238static void sh_pmu_start(struct perf_event *event, int flags)
239{
240 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
241 struct hw_perf_event *hwc = &event->hw;
242 int idx = hwc->idx;
243
244 if (WARN_ON_ONCE(idx == -1))
245 return;
246
247 if (flags & PERF_EF_RELOAD)
248 WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE));
249
250 cpuc->events[idx] = event;
251 event->hw.state = 0;
252 sh_pmu->enable(hwc, idx);
253}
254
255static void sh_pmu_del(struct perf_event *event, int flags)
256{
257 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
258
259 sh_pmu_stop(event, PERF_EF_UPDATE);
260 __clear_bit(event->hw.idx, cpuc->used_mask);
261
262 perf_event_update_userpage(event);
263}
264
265static int sh_pmu_add(struct perf_event *event, int flags)
266{
267 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
268 struct hw_perf_event *hwc = &event->hw;
269 int idx = hwc->idx;
270 int ret = -EAGAIN;
271
272 perf_pmu_disable(event->pmu);
273
274 if (__test_and_set_bit(idx, cpuc->used_mask)) {
275 idx = find_first_zero_bit(cpuc->used_mask, sh_pmu->num_events);
276 if (idx == sh_pmu->num_events)
277 goto out;
278
279 __set_bit(idx, cpuc->used_mask);
280 hwc->idx = idx;
281 }
282
283 sh_pmu->disable(hwc, idx);
284
285 event->hw.state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
286 if (flags & PERF_EF_START)
287 sh_pmu_start(event, PERF_EF_RELOAD);
288
289 perf_event_update_userpage(event);
290 ret = 0;
291out:
292 perf_pmu_enable(event->pmu);
293 return ret;
294}
295
296static void sh_pmu_read(struct perf_event *event)
297{
298 sh_perf_event_update(event, &event->hw, event->hw.idx);
299}
300
301static int sh_pmu_event_init(struct perf_event *event)
302{
303 int err;
304
305
306 if (has_branch_stack(event))
307 return -EOPNOTSUPP;
308
309 switch (event->attr.type) {
310 case PERF_TYPE_RAW:
311 case PERF_TYPE_HW_CACHE:
312 case PERF_TYPE_HARDWARE:
313 err = __hw_perf_event_init(event);
314 break;
315
316 default:
317 return -ENOENT;
318 }
319
320 if (unlikely(err)) {
321 if (event->destroy)
322 event->destroy(event);
323 }
324
325 return err;
326}
327
328static void sh_pmu_enable(struct pmu *pmu)
329{
330 if (!sh_pmu_initialized())
331 return;
332
333 sh_pmu->enable_all();
334}
335
336static void sh_pmu_disable(struct pmu *pmu)
337{
338 if (!sh_pmu_initialized())
339 return;
340
341 sh_pmu->disable_all();
342}
343
344static struct pmu pmu = {
345 .pmu_enable = sh_pmu_enable,
346 .pmu_disable = sh_pmu_disable,
347 .event_init = sh_pmu_event_init,
348 .add = sh_pmu_add,
349 .del = sh_pmu_del,
350 .start = sh_pmu_start,
351 .stop = sh_pmu_stop,
352 .read = sh_pmu_read,
353};
354
355static int sh_pmu_prepare_cpu(unsigned int cpu)
356{
357 struct cpu_hw_events *cpuhw = &per_cpu(cpu_hw_events, cpu);
358
359 memset(cpuhw, 0, sizeof(struct cpu_hw_events));
360 return 0;
361}
362
363int register_sh_pmu(struct sh_pmu *_pmu)
364{
365 if (sh_pmu)
366 return -EBUSY;
367 sh_pmu = _pmu;
368
369 pr_info("Performance Events: %s support registered\n", _pmu->name);
370
371
372
373
374
375
376 pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT;
377
378 WARN_ON(_pmu->num_events > MAX_HWEVENTS);
379
380 perf_pmu_register(&pmu, "cpu", PERF_TYPE_RAW);
381 cpuhp_setup_state(CPUHP_PERF_SUPERH, "PERF_SUPERH", sh_pmu_prepare_cpu,
382 NULL);
383 return 0;
384}
385