1#undef DEBUG
2
3
4
5
6
7
8
9
10
11
12
13#define pr_fmt(fmt) "hw perfevents: " fmt
14
15#include <linux/interrupt.h>
16#include <linux/kernel.h>
17#include <linux/module.h>
18#include <linux/perf_event.h>
19#include <linux/platform_device.h>
20#include <linux/spinlock.h>
21#include <linux/uaccess.h>
22
23#include <asm/cputype.h>
24#include <asm/irq.h>
25#include <asm/irq_regs.h>
26#include <asm/pmu.h>
27#include <asm/stacktrace.h>
28
29static struct platform_device *pmu_device;
30
31
32
33
34
35static DEFINE_RAW_SPINLOCK(pmu_lock);
36
37
38
39
40
41
42
43
44
45
46#define ARMPMU_MAX_HWEVENTS 33
47
48
49struct cpu_hw_events {
50
51
52
53
54 struct perf_event *events[ARMPMU_MAX_HWEVENTS];
55
56
57
58
59
60 unsigned long used_mask[BITS_TO_LONGS(ARMPMU_MAX_HWEVENTS)];
61
62
63
64
65
66 unsigned long active_mask[BITS_TO_LONGS(ARMPMU_MAX_HWEVENTS)];
67};
68static DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events);
69
70struct arm_pmu {
71 enum arm_perf_pmu_ids id;
72 const char *name;
73 irqreturn_t (*handle_irq)(int irq_num, void *dev);
74 void (*enable)(struct hw_perf_event *evt, int idx);
75 void (*disable)(struct hw_perf_event *evt, int idx);
76 int (*get_event_idx)(struct cpu_hw_events *cpuc,
77 struct hw_perf_event *hwc);
78 u32 (*read_counter)(int idx);
79 void (*write_counter)(int idx, u32 val);
80 void (*start)(void);
81 void (*stop)(void);
82 const unsigned (*cache_map)[PERF_COUNT_HW_CACHE_MAX]
83 [PERF_COUNT_HW_CACHE_OP_MAX]
84 [PERF_COUNT_HW_CACHE_RESULT_MAX];
85 const unsigned (*event_map)[PERF_COUNT_HW_MAX];
86 u32 raw_event_mask;
87 int num_events;
88 u64 max_period;
89};
90
91
92static const struct arm_pmu *armpmu;
93
94enum arm_perf_pmu_ids
95armpmu_get_pmu_id(void)
96{
97 int id = -ENODEV;
98
99 if (armpmu != NULL)
100 id = armpmu->id;
101
102 return id;
103}
104EXPORT_SYMBOL_GPL(armpmu_get_pmu_id);
105
106int
107armpmu_get_max_events(void)
108{
109 int max_events = 0;
110
111 if (armpmu != NULL)
112 max_events = armpmu->num_events;
113
114 return max_events;
115}
116EXPORT_SYMBOL_GPL(armpmu_get_max_events);
117
118int perf_num_counters(void)
119{
120 return armpmu_get_max_events();
121}
122EXPORT_SYMBOL_GPL(perf_num_counters);
123
124#define HW_OP_UNSUPPORTED 0xFFFF
125
126#define C(_x) \
127 PERF_COUNT_HW_CACHE_##_x
128
129#define CACHE_OP_UNSUPPORTED 0xFFFF
130
131static int
132armpmu_map_cache_event(u64 config)
133{
134 unsigned int cache_type, cache_op, cache_result, ret;
135
136 cache_type = (config >> 0) & 0xff;
137 if (cache_type >= PERF_COUNT_HW_CACHE_MAX)
138 return -EINVAL;
139
140 cache_op = (config >> 8) & 0xff;
141 if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX)
142 return -EINVAL;
143
144 cache_result = (config >> 16) & 0xff;
145 if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
146 return -EINVAL;
147
148 ret = (int)(*armpmu->cache_map)[cache_type][cache_op][cache_result];
149
150 if (ret == CACHE_OP_UNSUPPORTED)
151 return -ENOENT;
152
153 return ret;
154}
155
156static int
157armpmu_map_event(u64 config)
158{
159 int mapping = (*armpmu->event_map)[config];
160 return mapping == HW_OP_UNSUPPORTED ? -EOPNOTSUPP : mapping;
161}
162
163static int
164armpmu_map_raw_event(u64 config)
165{
166 return (int)(config & armpmu->raw_event_mask);
167}
168
169static int
170armpmu_event_set_period(struct perf_event *event,
171 struct hw_perf_event *hwc,
172 int idx)
173{
174 s64 left = local64_read(&hwc->period_left);
175 s64 period = hwc->sample_period;
176 int ret = 0;
177
178 if (unlikely(left <= -period)) {
179 left = period;
180 local64_set(&hwc->period_left, left);
181 hwc->last_period = period;
182 ret = 1;
183 }
184
185 if (unlikely(left <= 0)) {
186 left += period;
187 local64_set(&hwc->period_left, left);
188 hwc->last_period = period;
189 ret = 1;
190 }
191
192 if (left > (s64)armpmu->max_period)
193 left = armpmu->max_period;
194
195 local64_set(&hwc->prev_count, (u64)-left);
196
197 armpmu->write_counter(idx, (u64)(-left) & 0xffffffff);
198
199 perf_event_update_userpage(event);
200
201 return ret;
202}
203
204static u64
205armpmu_event_update(struct perf_event *event,
206 struct hw_perf_event *hwc,
207 int idx)
208{
209 int shift = 64 - 32;
210 s64 prev_raw_count, new_raw_count;
211 u64 delta;
212
213again:
214 prev_raw_count = local64_read(&hwc->prev_count);
215 new_raw_count = armpmu->read_counter(idx);
216
217 if (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
218 new_raw_count) != prev_raw_count)
219 goto again;
220
221 delta = (new_raw_count << shift) - (prev_raw_count << shift);
222 delta >>= shift;
223
224 local64_add(delta, &event->count);
225 local64_sub(delta, &hwc->period_left);
226
227 return new_raw_count;
228}
229
230static void
231armpmu_read(struct perf_event *event)
232{
233 struct hw_perf_event *hwc = &event->hw;
234
235
236 if (hwc->idx < 0)
237 return;
238
239 armpmu_event_update(event, hwc, hwc->idx);
240}
241
242static void
243armpmu_stop(struct perf_event *event, int flags)
244{
245 struct hw_perf_event *hwc = &event->hw;
246
247 if (!armpmu)
248 return;
249
250
251
252
253
254 if (!(hwc->state & PERF_HES_STOPPED)) {
255 armpmu->disable(hwc, hwc->idx);
256 barrier();
257 armpmu_event_update(event, hwc, hwc->idx);
258 hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
259 }
260}
261
262static void
263armpmu_start(struct perf_event *event, int flags)
264{
265 struct hw_perf_event *hwc = &event->hw;
266
267 if (!armpmu)
268 return;
269
270
271
272
273
274 if (flags & PERF_EF_RELOAD)
275 WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE));
276
277 hwc->state = 0;
278
279
280
281
282
283
284
285 armpmu_event_set_period(event, hwc, hwc->idx);
286 armpmu->enable(hwc, hwc->idx);
287}
288
289static void
290armpmu_del(struct perf_event *event, int flags)
291{
292 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
293 struct hw_perf_event *hwc = &event->hw;
294 int idx = hwc->idx;
295
296 WARN_ON(idx < 0);
297
298 clear_bit(idx, cpuc->active_mask);
299 armpmu_stop(event, PERF_EF_UPDATE);
300 cpuc->events[idx] = NULL;
301 clear_bit(idx, cpuc->used_mask);
302
303 perf_event_update_userpage(event);
304}
305
306static int
307armpmu_add(struct perf_event *event, int flags)
308{
309 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
310 struct hw_perf_event *hwc = &event->hw;
311 int idx;
312 int err = 0;
313
314 perf_pmu_disable(event->pmu);
315
316
317 idx = armpmu->get_event_idx(cpuc, hwc);
318 if (idx < 0) {
319 err = idx;
320 goto out;
321 }
322
323
324
325
326
327 event->hw.idx = idx;
328 armpmu->disable(hwc, idx);
329 cpuc->events[idx] = event;
330 set_bit(idx, cpuc->active_mask);
331
332 hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
333 if (flags & PERF_EF_START)
334 armpmu_start(event, PERF_EF_RELOAD);
335
336
337 perf_event_update_userpage(event);
338
339out:
340 perf_pmu_enable(event->pmu);
341 return err;
342}
343
344static struct pmu pmu;
345
346static int
347validate_event(struct cpu_hw_events *cpuc,
348 struct perf_event *event)
349{
350 struct hw_perf_event fake_event = event->hw;
351
352 if (event->pmu != &pmu || event->state <= PERF_EVENT_STATE_OFF)
353 return 1;
354
355 return armpmu->get_event_idx(cpuc, &fake_event) >= 0;
356}
357
358static int
359validate_group(struct perf_event *event)
360{
361 struct perf_event *sibling, *leader = event->group_leader;
362 struct cpu_hw_events fake_pmu;
363
364 memset(&fake_pmu, 0, sizeof(fake_pmu));
365
366 if (!validate_event(&fake_pmu, leader))
367 return -ENOSPC;
368
369 list_for_each_entry(sibling, &leader->sibling_list, group_entry) {
370 if (!validate_event(&fake_pmu, sibling))
371 return -ENOSPC;
372 }
373
374 if (!validate_event(&fake_pmu, event))
375 return -ENOSPC;
376
377 return 0;
378}
379
380static int
381armpmu_reserve_hardware(void)
382{
383 int i, err = -ENODEV, irq;
384
385 pmu_device = reserve_pmu(ARM_PMU_DEVICE_CPU);
386 if (IS_ERR(pmu_device)) {
387 pr_warning("unable to reserve pmu\n");
388 return PTR_ERR(pmu_device);
389 }
390
391 init_pmu(ARM_PMU_DEVICE_CPU);
392
393 if (pmu_device->num_resources < 1) {
394 pr_err("no irqs for PMUs defined\n");
395 return -ENODEV;
396 }
397
398 for (i = 0; i < pmu_device->num_resources; ++i) {
399 irq = platform_get_irq(pmu_device, i);
400 if (irq < 0)
401 continue;
402
403 err = request_irq(irq, armpmu->handle_irq,
404 IRQF_DISABLED | IRQF_NOBALANCING,
405 "armpmu", NULL);
406 if (err) {
407 pr_warning("unable to request IRQ%d for ARM perf "
408 "counters\n", irq);
409 break;
410 }
411 }
412
413 if (err) {
414 for (i = i - 1; i >= 0; --i) {
415 irq = platform_get_irq(pmu_device, i);
416 if (irq >= 0)
417 free_irq(irq, NULL);
418 }
419 release_pmu(pmu_device);
420 pmu_device = NULL;
421 }
422
423 return err;
424}
425
426static void
427armpmu_release_hardware(void)
428{
429 int i, irq;
430
431 for (i = pmu_device->num_resources - 1; i >= 0; --i) {
432 irq = platform_get_irq(pmu_device, i);
433 if (irq >= 0)
434 free_irq(irq, NULL);
435 }
436 armpmu->stop();
437
438 release_pmu(pmu_device);
439 pmu_device = NULL;
440}
441
442static atomic_t active_events = ATOMIC_INIT(0);
443static DEFINE_MUTEX(pmu_reserve_mutex);
444
445static void
446hw_perf_event_destroy(struct perf_event *event)
447{
448 if (atomic_dec_and_mutex_lock(&active_events, &pmu_reserve_mutex)) {
449 armpmu_release_hardware();
450 mutex_unlock(&pmu_reserve_mutex);
451 }
452}
453
454static int
455__hw_perf_event_init(struct perf_event *event)
456{
457 struct hw_perf_event *hwc = &event->hw;
458 int mapping, err;
459
460
461 if (PERF_TYPE_HARDWARE == event->attr.type) {
462 mapping = armpmu_map_event(event->attr.config);
463 } else if (PERF_TYPE_HW_CACHE == event->attr.type) {
464 mapping = armpmu_map_cache_event(event->attr.config);
465 } else if (PERF_TYPE_RAW == event->attr.type) {
466 mapping = armpmu_map_raw_event(event->attr.config);
467 } else {
468 pr_debug("event type %x not supported\n", event->attr.type);
469 return -EOPNOTSUPP;
470 }
471
472 if (mapping < 0) {
473 pr_debug("event %x:%llx not supported\n", event->attr.type,
474 event->attr.config);
475 return mapping;
476 }
477
478
479
480
481
482
483 if (event->attr.exclude_kernel || event->attr.exclude_user ||
484 event->attr.exclude_hv || event->attr.exclude_idle) {
485 pr_debug("ARM performance counters do not support "
486 "mode exclusion\n");
487 return -EPERM;
488 }
489
490
491
492
493
494
495
496 hwc->idx = -1;
497
498
499
500
501
502
503
504 hwc->config_base = (unsigned long)mapping;
505 hwc->config = 0;
506 hwc->event_base = 0;
507
508 if (!hwc->sample_period) {
509 hwc->sample_period = armpmu->max_period;
510 hwc->last_period = hwc->sample_period;
511 local64_set(&hwc->period_left, hwc->sample_period);
512 }
513
514 err = 0;
515 if (event->group_leader != event) {
516 err = validate_group(event);
517 if (err)
518 return -EINVAL;
519 }
520
521 return err;
522}
523
524static int armpmu_event_init(struct perf_event *event)
525{
526 int err = 0;
527
528 switch (event->attr.type) {
529 case PERF_TYPE_RAW:
530 case PERF_TYPE_HARDWARE:
531 case PERF_TYPE_HW_CACHE:
532 break;
533
534 default:
535 return -ENOENT;
536 }
537
538 if (!armpmu)
539 return -ENODEV;
540
541 event->destroy = hw_perf_event_destroy;
542
543 if (!atomic_inc_not_zero(&active_events)) {
544 if (atomic_read(&active_events) > armpmu->num_events) {
545 atomic_dec(&active_events);
546 return -ENOSPC;
547 }
548
549 mutex_lock(&pmu_reserve_mutex);
550 if (atomic_read(&active_events) == 0) {
551 err = armpmu_reserve_hardware();
552 }
553
554 if (!err)
555 atomic_inc(&active_events);
556 mutex_unlock(&pmu_reserve_mutex);
557 }
558
559 if (err)
560 return err;
561
562 err = __hw_perf_event_init(event);
563 if (err)
564 hw_perf_event_destroy(event);
565
566 return err;
567}
568
569static void armpmu_enable(struct pmu *pmu)
570{
571
572 int idx;
573 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
574
575 if (!armpmu)
576 return;
577
578 for (idx = 0; idx <= armpmu->num_events; ++idx) {
579 struct perf_event *event = cpuc->events[idx];
580
581 if (!event)
582 continue;
583
584 armpmu->enable(&event->hw, idx);
585 }
586
587 armpmu->start();
588}
589
590static void armpmu_disable(struct pmu *pmu)
591{
592 if (armpmu)
593 armpmu->stop();
594}
595
596static struct pmu pmu = {
597 .pmu_enable = armpmu_enable,
598 .pmu_disable = armpmu_disable,
599 .event_init = armpmu_event_init,
600 .add = armpmu_add,
601 .del = armpmu_del,
602 .start = armpmu_start,
603 .stop = armpmu_stop,
604 .read = armpmu_read,
605};
606
607
608#include "perf_event_xscale.c"
609#include "perf_event_v6.c"
610#include "perf_event_v7.c"
611
612static int __init
613init_hw_perf_events(void)
614{
615 unsigned long cpuid = read_cpuid_id();
616 unsigned long implementor = (cpuid & 0xFF000000) >> 24;
617 unsigned long part_number = (cpuid & 0xFFF0);
618
619
620 if (0x41 == implementor) {
621 switch (part_number) {
622 case 0xB360:
623 case 0xB560:
624 case 0xB760:
625 armpmu = armv6pmu_init();
626 break;
627 case 0xB020:
628 armpmu = armv6mpcore_pmu_init();
629 break;
630 case 0xC080:
631 armpmu = armv7_a8_pmu_init();
632 break;
633 case 0xC090:
634 armpmu = armv7_a9_pmu_init();
635 break;
636 }
637
638 } else if (0x69 == implementor) {
639 part_number = (cpuid >> 13) & 0x7;
640 switch (part_number) {
641 case 1:
642 armpmu = xscale1pmu_init();
643 break;
644 case 2:
645 armpmu = xscale2pmu_init();
646 break;
647 }
648 }
649
650 if (armpmu) {
651 pr_info("enabled with %s PMU driver, %d counters available\n",
652 armpmu->name, armpmu->num_events);
653 } else {
654 pr_info("no hardware support available\n");
655 }
656
657 perf_pmu_register(&pmu, "cpu", PERF_TYPE_RAW);
658
659 return 0;
660}
661early_initcall(init_hw_perf_events);
662
663
664
665
666
667
668
669
670
671
672
673
674
675struct frame_tail {
676 struct frame_tail __user *fp;
677 unsigned long sp;
678 unsigned long lr;
679} __attribute__((packed));
680
681
682
683
684
685static struct frame_tail __user *
686user_backtrace(struct frame_tail __user *tail,
687 struct perf_callchain_entry *entry)
688{
689 struct frame_tail buftail;
690
691
692 if (!access_ok(VERIFY_READ, tail, sizeof(buftail)))
693 return NULL;
694 if (__copy_from_user_inatomic(&buftail, tail, sizeof(buftail)))
695 return NULL;
696
697 perf_callchain_store(entry, buftail.lr);
698
699
700
701
702
703 if (tail + 1 >= buftail.fp)
704 return NULL;
705
706 return buftail.fp - 1;
707}
708
709void
710perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
711{
712 struct frame_tail __user *tail;
713
714
715 tail = (struct frame_tail __user *)regs->ARM_fp - 1;
716
717 while (tail && !((unsigned long)tail & 0x3))
718 tail = user_backtrace(tail, entry);
719}
720
721
722
723
724
725
726static int
727callchain_trace(struct stackframe *fr,
728 void *data)
729{
730 struct perf_callchain_entry *entry = data;
731 perf_callchain_store(entry, fr->pc);
732 return 0;
733}
734
735void
736perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs)
737{
738 struct stackframe fr;
739
740 fr.fp = regs->ARM_fp;
741 fr.sp = regs->ARM_sp;
742 fr.lr = regs->ARM_lr;
743 fr.pc = regs->ARM_pc;
744 walk_stackframe(&fr, callchain_trace, entry);
745}
746