1
2
3
4
5
6
7
8
9
10
11
12
13
14
15#include <linux/perf_event.h>
16#include <linux/capability.h>
17#include <linux/notifier.h>
18#include <linux/hardirq.h>
19#include <linux/kprobes.h>
20#include <linux/module.h>
21#include <linux/kdebug.h>
22#include <linux/sched.h>
23#include <linux/uaccess.h>
24#include <linux/slab.h>
25#include <linux/cpu.h>
26#include <linux/bitops.h>
27#include <linux/device.h>
28
29#include <asm/apic.h>
30#include <asm/stacktrace.h>
31#include <asm/nmi.h>
32#include <asm/smp.h>
33#include <asm/alternative.h>
34#include <asm/timer.h>
35#include <asm/desc.h>
36#include <asm/ldt.h>
37
38#include "perf_event.h"
39
40struct x86_pmu x86_pmu __read_mostly;
41
42DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = {
43 .enabled = 1,
44};
45
46u64 __read_mostly hw_cache_event_ids
47 [PERF_COUNT_HW_CACHE_MAX]
48 [PERF_COUNT_HW_CACHE_OP_MAX]
49 [PERF_COUNT_HW_CACHE_RESULT_MAX];
50u64 __read_mostly hw_cache_extra_regs
51 [PERF_COUNT_HW_CACHE_MAX]
52 [PERF_COUNT_HW_CACHE_OP_MAX]
53 [PERF_COUNT_HW_CACHE_RESULT_MAX];
54
55
56
57
58
59
60u64 x86_perf_event_update(struct perf_event *event)
61{
62 struct hw_perf_event *hwc = &event->hw;
63 int shift = 64 - x86_pmu.cntval_bits;
64 u64 prev_raw_count, new_raw_count;
65 int idx = hwc->idx;
66 s64 delta;
67
68 if (idx == INTEL_PMC_IDX_FIXED_BTS)
69 return 0;
70
71
72
73
74
75
76
77
78again:
79 prev_raw_count = local64_read(&hwc->prev_count);
80 rdpmcl(hwc->event_base_rdpmc, new_raw_count);
81
82 if (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
83 new_raw_count) != prev_raw_count)
84 goto again;
85
86
87
88
89
90
91
92
93
94 delta = (new_raw_count << shift) - (prev_raw_count << shift);
95 delta >>= shift;
96
97 local64_add(delta, &event->count);
98 local64_sub(delta, &hwc->period_left);
99
100 return new_raw_count;
101}
102
103
104
105
106static int x86_pmu_extra_regs(u64 config, struct perf_event *event)
107{
108 struct hw_perf_event_extra *reg;
109 struct extra_reg *er;
110
111 reg = &event->hw.extra_reg;
112
113 if (!x86_pmu.extra_regs)
114 return 0;
115
116 for (er = x86_pmu.extra_regs; er->msr; er++) {
117 if (er->event != (config & er->config_mask))
118 continue;
119 if (event->attr.config1 & ~er->valid_mask)
120 return -EINVAL;
121
122 if (!er->extra_msr_access)
123 return -ENXIO;
124
125 reg->idx = er->idx;
126 reg->config = event->attr.config1;
127 reg->reg = er->msr;
128 break;
129 }
130 return 0;
131}
132
133static atomic_t active_events;
134static DEFINE_MUTEX(pmc_reserve_mutex);
135
136#ifdef CONFIG_X86_LOCAL_APIC
137
138static bool reserve_pmc_hardware(void)
139{
140 int i;
141
142 for (i = 0; i < x86_pmu.num_counters; i++) {
143 if (!reserve_perfctr_nmi(x86_pmu_event_addr(i)))
144 goto perfctr_fail;
145 }
146
147 for (i = 0; i < x86_pmu.num_counters; i++) {
148 if (!reserve_evntsel_nmi(x86_pmu_config_addr(i)))
149 goto eventsel_fail;
150 }
151
152 return true;
153
154eventsel_fail:
155 for (i--; i >= 0; i--)
156 release_evntsel_nmi(x86_pmu_config_addr(i));
157
158 i = x86_pmu.num_counters;
159
160perfctr_fail:
161 for (i--; i >= 0; i--)
162 release_perfctr_nmi(x86_pmu_event_addr(i));
163
164 return false;
165}
166
167static void release_pmc_hardware(void)
168{
169 int i;
170
171 for (i = 0; i < x86_pmu.num_counters; i++) {
172 release_perfctr_nmi(x86_pmu_event_addr(i));
173 release_evntsel_nmi(x86_pmu_config_addr(i));
174 }
175}
176
177#else
178
179static bool reserve_pmc_hardware(void) { return true; }
180static void release_pmc_hardware(void) {}
181
182#endif
183
184static bool check_hw_exists(void)
185{
186 u64 val, val_fail, val_new= ~0;
187 int i, reg, reg_fail, ret = 0;
188 int bios_fail = 0;
189
190
191
192
193
194 for (i = 0; i < x86_pmu.num_counters; i++) {
195 reg = x86_pmu_config_addr(i);
196 ret = rdmsrl_safe(reg, &val);
197 if (ret)
198 goto msr_fail;
199 if (val & ARCH_PERFMON_EVENTSEL_ENABLE) {
200 bios_fail = 1;
201 val_fail = val;
202 reg_fail = reg;
203 }
204 }
205
206 if (x86_pmu.num_counters_fixed) {
207 reg = MSR_ARCH_PERFMON_FIXED_CTR_CTRL;
208 ret = rdmsrl_safe(reg, &val);
209 if (ret)
210 goto msr_fail;
211 for (i = 0; i < x86_pmu.num_counters_fixed; i++) {
212 if (val & (0x03 << i*4)) {
213 bios_fail = 1;
214 val_fail = val;
215 reg_fail = reg;
216 }
217 }
218 }
219
220
221
222
223
224
225 reg = x86_pmu_event_addr(0);
226 if (rdmsrl_safe(reg, &val))
227 goto msr_fail;
228 val ^= 0xffffUL;
229 ret = wrmsrl_safe(reg, val);
230 ret |= rdmsrl_safe(reg, &val_new);
231 if (ret || val != val_new)
232 goto msr_fail;
233
234
235
236
237 if (bios_fail) {
238 printk(KERN_CONT "Broken BIOS detected, complain to your hardware vendor.\n");
239 printk(KERN_ERR FW_BUG "the BIOS has corrupted hw-PMU resources (MSR %x is %Lx)\n", reg_fail, val_fail);
240 }
241
242 return true;
243
244msr_fail:
245 printk(KERN_CONT "Broken PMU hardware detected, using software events only.\n");
246 printk("%sFailed to access perfctr msr (MSR %x is %Lx)\n",
247 boot_cpu_has(X86_FEATURE_HYPERVISOR) ? KERN_INFO : KERN_ERR,
248 reg, val_new);
249
250 return false;
251}
252
253static void hw_perf_event_destroy(struct perf_event *event)
254{
255 if (atomic_dec_and_mutex_lock(&active_events, &pmc_reserve_mutex)) {
256 release_pmc_hardware();
257 release_ds_buffers();
258 mutex_unlock(&pmc_reserve_mutex);
259 }
260}
261
262static inline int x86_pmu_initialized(void)
263{
264 return x86_pmu.handle_irq != NULL;
265}
266
267static inline int
268set_ext_hw_attr(struct hw_perf_event *hwc, struct perf_event *event)
269{
270 struct perf_event_attr *attr = &event->attr;
271 unsigned int cache_type, cache_op, cache_result;
272 u64 config, val;
273
274 config = attr->config;
275
276 cache_type = (config >> 0) & 0xff;
277 if (cache_type >= PERF_COUNT_HW_CACHE_MAX)
278 return -EINVAL;
279
280 cache_op = (config >> 8) & 0xff;
281 if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX)
282 return -EINVAL;
283
284 cache_result = (config >> 16) & 0xff;
285 if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
286 return -EINVAL;
287
288 val = hw_cache_event_ids[cache_type][cache_op][cache_result];
289
290 if (val == 0)
291 return -ENOENT;
292
293 if (val == -1)
294 return -EINVAL;
295
296 hwc->config |= val;
297 attr->config1 = hw_cache_extra_regs[cache_type][cache_op][cache_result];
298 return x86_pmu_extra_regs(val, event);
299}
300
301int x86_setup_perfctr(struct perf_event *event)
302{
303 struct perf_event_attr *attr = &event->attr;
304 struct hw_perf_event *hwc = &event->hw;
305 u64 config;
306
307 if (!is_sampling_event(event)) {
308 hwc->sample_period = x86_pmu.max_period;
309 hwc->last_period = hwc->sample_period;
310 local64_set(&hwc->period_left, hwc->sample_period);
311 }
312
313 if (attr->type == PERF_TYPE_RAW)
314 return x86_pmu_extra_regs(event->attr.config, event);
315
316 if (attr->type == PERF_TYPE_HW_CACHE)
317 return set_ext_hw_attr(hwc, event);
318
319 if (attr->config >= x86_pmu.max_events)
320 return -EINVAL;
321
322
323
324
325 config = x86_pmu.event_map(attr->config);
326
327 if (config == 0)
328 return -ENOENT;
329
330 if (config == -1LL)
331 return -EINVAL;
332
333
334
335
336 if (attr->config == PERF_COUNT_HW_BRANCH_INSTRUCTIONS &&
337 !attr->freq && hwc->sample_period == 1) {
338
339 if (!x86_pmu.bts_active)
340 return -EOPNOTSUPP;
341
342
343 if (!attr->exclude_kernel)
344 return -EOPNOTSUPP;
345 }
346
347 hwc->config |= config;
348
349 return 0;
350}
351
352
353
354
355
356
357
358static inline int precise_br_compat(struct perf_event *event)
359{
360 u64 m = event->attr.branch_sample_type;
361 u64 b = 0;
362
363
364 if (!(m & PERF_SAMPLE_BRANCH_ANY))
365 return 0;
366
367 m &= PERF_SAMPLE_BRANCH_KERNEL | PERF_SAMPLE_BRANCH_USER;
368
369 if (!event->attr.exclude_user)
370 b |= PERF_SAMPLE_BRANCH_USER;
371
372 if (!event->attr.exclude_kernel)
373 b |= PERF_SAMPLE_BRANCH_KERNEL;
374
375
376
377
378
379 return m == b;
380}
381
382int x86_pmu_hw_config(struct perf_event *event)
383{
384 if (event->attr.precise_ip) {
385 int precise = 0;
386
387
388 if (x86_pmu.pebs_active && !x86_pmu.pebs_broken) {
389 precise++;
390
391
392 if (x86_pmu.lbr_nr || x86_pmu.intel_cap.pebs_format >= 2)
393 precise++;
394 }
395
396 if (event->attr.precise_ip > precise)
397 return -EOPNOTSUPP;
398
399
400
401
402 if (event->attr.precise_ip > 1 &&
403 x86_pmu.intel_cap.pebs_format < 2) {
404 u64 *br_type = &event->attr.branch_sample_type;
405
406 if (has_branch_stack(event)) {
407 if (!precise_br_compat(event))
408 return -EOPNOTSUPP;
409
410
411
412 } else {
413
414
415
416
417
418
419
420 *br_type = PERF_SAMPLE_BRANCH_ANY;
421
422 if (!event->attr.exclude_user)
423 *br_type |= PERF_SAMPLE_BRANCH_USER;
424
425 if (!event->attr.exclude_kernel)
426 *br_type |= PERF_SAMPLE_BRANCH_KERNEL;
427 }
428 }
429 }
430
431
432
433
434
435 event->hw.config = ARCH_PERFMON_EVENTSEL_INT;
436
437
438
439
440 if (!event->attr.exclude_user)
441 event->hw.config |= ARCH_PERFMON_EVENTSEL_USR;
442 if (!event->attr.exclude_kernel)
443 event->hw.config |= ARCH_PERFMON_EVENTSEL_OS;
444
445 if (event->attr.type == PERF_TYPE_RAW)
446 event->hw.config |= event->attr.config & X86_RAW_EVENT_MASK;
447
448 return x86_setup_perfctr(event);
449}
450
451
452
453
454static int __x86_pmu_event_init(struct perf_event *event)
455{
456 int err;
457
458 if (!x86_pmu_initialized())
459 return -ENODEV;
460
461 err = 0;
462 if (!atomic_inc_not_zero(&active_events)) {
463 mutex_lock(&pmc_reserve_mutex);
464 if (atomic_read(&active_events) == 0) {
465 if (!reserve_pmc_hardware())
466 err = -EBUSY;
467 else
468 reserve_ds_buffers();
469 }
470 if (!err)
471 atomic_inc(&active_events);
472 mutex_unlock(&pmc_reserve_mutex);
473 }
474 if (err)
475 return err;
476
477 event->destroy = hw_perf_event_destroy;
478
479 event->hw.idx = -1;
480 event->hw.last_cpu = -1;
481 event->hw.last_tag = ~0ULL;
482
483
484 event->hw.extra_reg.idx = EXTRA_REG_NONE;
485 event->hw.branch_reg.idx = EXTRA_REG_NONE;
486
487 return x86_pmu.hw_config(event);
488}
489
490void x86_pmu_disable_all(void)
491{
492 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
493 int idx;
494
495 for (idx = 0; idx < x86_pmu.num_counters; idx++) {
496 u64 val;
497
498 if (!test_bit(idx, cpuc->active_mask))
499 continue;
500 rdmsrl(x86_pmu_config_addr(idx), val);
501 if (!(val & ARCH_PERFMON_EVENTSEL_ENABLE))
502 continue;
503 val &= ~ARCH_PERFMON_EVENTSEL_ENABLE;
504 wrmsrl(x86_pmu_config_addr(idx), val);
505 }
506}
507
508static void x86_pmu_disable(struct pmu *pmu)
509{
510 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
511
512 if (!x86_pmu_initialized())
513 return;
514
515 if (!cpuc->enabled)
516 return;
517
518 cpuc->n_added = 0;
519 cpuc->enabled = 0;
520 barrier();
521
522 x86_pmu.disable_all();
523}
524
525void x86_pmu_enable_all(int added)
526{
527 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
528 int idx;
529
530 for (idx = 0; idx < x86_pmu.num_counters; idx++) {
531 struct hw_perf_event *hwc = &cpuc->events[idx]->hw;
532
533 if (!test_bit(idx, cpuc->active_mask))
534 continue;
535
536 __x86_pmu_enable_event(hwc, ARCH_PERFMON_EVENTSEL_ENABLE);
537 }
538}
539
540static struct pmu pmu;
541
542static inline int is_x86_event(struct perf_event *event)
543{
544 return event->pmu == &pmu;
545}
546
547
548
549
550
551
552
553
554struct sched_state {
555 int weight;
556 int event;
557 int counter;
558 int unassigned;
559 unsigned long used[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
560};
561
562
563#define SCHED_STATES_MAX 2
564
565struct perf_sched {
566 int max_weight;
567 int max_events;
568 struct perf_event **events;
569 struct sched_state state;
570 int saved_states;
571 struct sched_state saved[SCHED_STATES_MAX];
572};
573
574
575
576
577static void perf_sched_init(struct perf_sched *sched, struct perf_event **events,
578 int num, int wmin, int wmax)
579{
580 int idx;
581
582 memset(sched, 0, sizeof(*sched));
583 sched->max_events = num;
584 sched->max_weight = wmax;
585 sched->events = events;
586
587 for (idx = 0; idx < num; idx++) {
588 if (events[idx]->hw.constraint->weight == wmin)
589 break;
590 }
591
592 sched->state.event = idx;
593 sched->state.weight = wmin;
594 sched->state.unassigned = num;
595}
596
597static void perf_sched_save_state(struct perf_sched *sched)
598{
599 if (WARN_ON_ONCE(sched->saved_states >= SCHED_STATES_MAX))
600 return;
601
602 sched->saved[sched->saved_states] = sched->state;
603 sched->saved_states++;
604}
605
606static bool perf_sched_restore_state(struct perf_sched *sched)
607{
608 if (!sched->saved_states)
609 return false;
610
611 sched->saved_states--;
612 sched->state = sched->saved[sched->saved_states];
613
614
615 clear_bit(sched->state.counter++, sched->state.used);
616
617 return true;
618}
619
620
621
622
623
624static bool __perf_sched_find_counter(struct perf_sched *sched)
625{
626 struct event_constraint *c;
627 int idx;
628
629 if (!sched->state.unassigned)
630 return false;
631
632 if (sched->state.event >= sched->max_events)
633 return false;
634
635 c = sched->events[sched->state.event]->hw.constraint;
636
637 if (c->idxmsk64 & (~0ULL << INTEL_PMC_IDX_FIXED)) {
638 idx = INTEL_PMC_IDX_FIXED;
639 for_each_set_bit_from(idx, c->idxmsk, X86_PMC_IDX_MAX) {
640 if (!__test_and_set_bit(idx, sched->state.used))
641 goto done;
642 }
643 }
644
645 idx = sched->state.counter;
646 for_each_set_bit_from(idx, c->idxmsk, INTEL_PMC_IDX_FIXED) {
647 if (!__test_and_set_bit(idx, sched->state.used))
648 goto done;
649 }
650
651 return false;
652
653done:
654 sched->state.counter = idx;
655
656 if (c->overlap)
657 perf_sched_save_state(sched);
658
659 return true;
660}
661
662static bool perf_sched_find_counter(struct perf_sched *sched)
663{
664 while (!__perf_sched_find_counter(sched)) {
665 if (!perf_sched_restore_state(sched))
666 return false;
667 }
668
669 return true;
670}
671
672
673
674
675
676static bool perf_sched_next_event(struct perf_sched *sched)
677{
678 struct event_constraint *c;
679
680 if (!sched->state.unassigned || !--sched->state.unassigned)
681 return false;
682
683 do {
684
685 sched->state.event++;
686 if (sched->state.event >= sched->max_events) {
687
688 sched->state.event = 0;
689 sched->state.weight++;
690 if (sched->state.weight > sched->max_weight)
691 return false;
692 }
693 c = sched->events[sched->state.event]->hw.constraint;
694 } while (c->weight != sched->state.weight);
695
696 sched->state.counter = 0;
697
698 return true;
699}
700
701
702
703
704int perf_assign_events(struct perf_event **events, int n,
705 int wmin, int wmax, int *assign)
706{
707 struct perf_sched sched;
708
709 perf_sched_init(&sched, events, n, wmin, wmax);
710
711 do {
712 if (!perf_sched_find_counter(&sched))
713 break;
714 if (assign)
715 assign[sched.state.event] = sched.state.counter;
716 } while (perf_sched_next_event(&sched));
717
718 return sched.state.unassigned;
719}
720EXPORT_SYMBOL_GPL(perf_assign_events);
721
722int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign)
723{
724 struct event_constraint *c;
725 unsigned long used_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
726 struct perf_event *e;
727 int i, wmin, wmax, num = 0;
728 struct hw_perf_event *hwc;
729
730 bitmap_zero(used_mask, X86_PMC_IDX_MAX);
731
732 for (i = 0, wmin = X86_PMC_IDX_MAX, wmax = 0; i < n; i++) {
733 hwc = &cpuc->event_list[i]->hw;
734 c = x86_pmu.get_event_constraints(cpuc, cpuc->event_list[i]);
735 hwc->constraint = c;
736
737 wmin = min(wmin, c->weight);
738 wmax = max(wmax, c->weight);
739 }
740
741
742
743
744 for (i = 0; i < n; i++) {
745 hwc = &cpuc->event_list[i]->hw;
746 c = hwc->constraint;
747
748
749 if (hwc->idx == -1)
750 break;
751
752
753 if (!test_bit(hwc->idx, c->idxmsk))
754 break;
755
756
757 if (test_bit(hwc->idx, used_mask))
758 break;
759
760 __set_bit(hwc->idx, used_mask);
761 if (assign)
762 assign[i] = hwc->idx;
763 }
764
765
766 if (i != n)
767 num = perf_assign_events(cpuc->event_list, n, wmin,
768 wmax, assign);
769
770
771
772
773
774 if (!num && assign) {
775 for (i = 0; i < n; i++) {
776 e = cpuc->event_list[i];
777 e->hw.flags |= PERF_X86_EVENT_COMMITTED;
778 }
779 }
780
781
782
783
784 if (!assign || num) {
785 for (i = 0; i < n; i++) {
786 e = cpuc->event_list[i];
787
788
789
790
791 if ((e->hw.flags & PERF_X86_EVENT_COMMITTED))
792 continue;
793
794 if (x86_pmu.put_event_constraints)
795 x86_pmu.put_event_constraints(cpuc, e);
796 }
797 }
798 return num ? -EINVAL : 0;
799}
800
801
802
803
804
805static int collect_events(struct cpu_hw_events *cpuc, struct perf_event *leader, bool dogrp)
806{
807 struct perf_event *event;
808 int n, max_count;
809
810 max_count = x86_pmu.num_counters + x86_pmu.num_counters_fixed;
811
812
813 n = cpuc->n_events;
814
815 if (is_x86_event(leader)) {
816 if (n >= max_count)
817 return -EINVAL;
818 cpuc->event_list[n] = leader;
819 n++;
820 }
821 if (!dogrp)
822 return n;
823
824 list_for_each_entry(event, &leader->sibling_list, group_entry) {
825 if (!is_x86_event(event) ||
826 event->state <= PERF_EVENT_STATE_OFF)
827 continue;
828
829 if (n >= max_count)
830 return -EINVAL;
831
832 cpuc->event_list[n] = event;
833 n++;
834 }
835 return n;
836}
837
838static inline void x86_assign_hw_event(struct perf_event *event,
839 struct cpu_hw_events *cpuc, int i)
840{
841 struct hw_perf_event *hwc = &event->hw;
842
843 hwc->idx = cpuc->assign[i];
844 hwc->last_cpu = smp_processor_id();
845 hwc->last_tag = ++cpuc->tags[i];
846
847 if (hwc->idx == INTEL_PMC_IDX_FIXED_BTS) {
848 hwc->config_base = 0;
849 hwc->event_base = 0;
850 } else if (hwc->idx >= INTEL_PMC_IDX_FIXED) {
851 hwc->config_base = MSR_ARCH_PERFMON_FIXED_CTR_CTRL;
852 hwc->event_base = MSR_ARCH_PERFMON_FIXED_CTR0 + (hwc->idx - INTEL_PMC_IDX_FIXED);
853 hwc->event_base_rdpmc = (hwc->idx - INTEL_PMC_IDX_FIXED) | 1<<30;
854 } else {
855 hwc->config_base = x86_pmu_config_addr(hwc->idx);
856 hwc->event_base = x86_pmu_event_addr(hwc->idx);
857 hwc->event_base_rdpmc = x86_pmu_rdpmc_index(hwc->idx);
858 }
859}
860
861static inline int match_prev_assignment(struct hw_perf_event *hwc,
862 struct cpu_hw_events *cpuc,
863 int i)
864{
865 return hwc->idx == cpuc->assign[i] &&
866 hwc->last_cpu == smp_processor_id() &&
867 hwc->last_tag == cpuc->tags[i];
868}
869
870static void x86_pmu_start(struct perf_event *event, int flags);
871
872static void x86_pmu_enable(struct pmu *pmu)
873{
874 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
875 struct perf_event *event;
876 struct hw_perf_event *hwc;
877 int i, added = cpuc->n_added;
878
879 if (!x86_pmu_initialized())
880 return;
881
882 if (cpuc->enabled)
883 return;
884
885 if (cpuc->n_added) {
886 int n_running = cpuc->n_events - cpuc->n_added;
887
888
889
890
891
892
893 for (i = 0; i < n_running; i++) {
894 event = cpuc->event_list[i];
895 hwc = &event->hw;
896
897
898
899
900
901
902
903 if (hwc->idx == -1 ||
904 match_prev_assignment(hwc, cpuc, i))
905 continue;
906
907
908
909
910
911 if (hwc->state & PERF_HES_STOPPED)
912 hwc->state |= PERF_HES_ARCH;
913
914 x86_pmu_stop(event, PERF_EF_UPDATE);
915 }
916
917
918
919
920 for (i = 0; i < cpuc->n_events; i++) {
921 event = cpuc->event_list[i];
922 hwc = &event->hw;
923
924 if (!match_prev_assignment(hwc, cpuc, i))
925 x86_assign_hw_event(event, cpuc, i);
926 else if (i < n_running)
927 continue;
928
929 if (hwc->state & PERF_HES_ARCH)
930 continue;
931
932 x86_pmu_start(event, PERF_EF_RELOAD);
933 }
934 cpuc->n_added = 0;
935 perf_events_lapic_init();
936 }
937
938 cpuc->enabled = 1;
939 barrier();
940
941 x86_pmu.enable_all(added);
942}
943
944static DEFINE_PER_CPU(u64 [X86_PMC_IDX_MAX], pmc_prev_left);
945
946
947
948
949
950int x86_perf_event_set_period(struct perf_event *event)
951{
952 struct hw_perf_event *hwc = &event->hw;
953 s64 left = local64_read(&hwc->period_left);
954 s64 period = hwc->sample_period;
955 int ret = 0, idx = hwc->idx;
956
957 if (idx == INTEL_PMC_IDX_FIXED_BTS)
958 return 0;
959
960
961
962
963 if (unlikely(left <= -period)) {
964 left = period;
965 local64_set(&hwc->period_left, left);
966 hwc->last_period = period;
967 ret = 1;
968 }
969
970 if (unlikely(left <= 0)) {
971 left += period;
972 local64_set(&hwc->period_left, left);
973 hwc->last_period = period;
974 ret = 1;
975 }
976
977
978
979 if (unlikely(left < 2))
980 left = 2;
981
982 if (left > x86_pmu.max_period)
983 left = x86_pmu.max_period;
984
985 per_cpu(pmc_prev_left[idx], smp_processor_id()) = left;
986
987
988
989
990
991 local64_set(&hwc->prev_count, (u64)-left);
992
993 wrmsrl(hwc->event_base, (u64)(-left) & x86_pmu.cntval_mask);
994
995
996
997
998
999
1000 if (x86_pmu.perfctr_second_write) {
1001 wrmsrl(hwc->event_base,
1002 (u64)(-left) & x86_pmu.cntval_mask);
1003 }
1004
1005 perf_event_update_userpage(event);
1006
1007 return ret;
1008}
1009
1010void x86_pmu_enable_event(struct perf_event *event)
1011{
1012 if (__this_cpu_read(cpu_hw_events.enabled))
1013 __x86_pmu_enable_event(&event->hw,
1014 ARCH_PERFMON_EVENTSEL_ENABLE);
1015}
1016
1017
1018
1019
1020
1021
1022
1023static int x86_pmu_add(struct perf_event *event, int flags)
1024{
1025 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1026 struct hw_perf_event *hwc;
1027 int assign[X86_PMC_IDX_MAX];
1028 int n, n0, ret;
1029
1030 hwc = &event->hw;
1031
1032 perf_pmu_disable(event->pmu);
1033 n0 = cpuc->n_events;
1034 ret = n = collect_events(cpuc, event, false);
1035 if (ret < 0)
1036 goto out;
1037
1038 hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
1039 if (!(flags & PERF_EF_START))
1040 hwc->state |= PERF_HES_ARCH;
1041
1042
1043
1044
1045
1046
1047 if (cpuc->group_flag & PERF_EVENT_TXN)
1048 goto done_collect;
1049
1050 ret = x86_pmu.schedule_events(cpuc, n, assign);
1051 if (ret)
1052 goto out;
1053
1054
1055
1056
1057 memcpy(cpuc->assign, assign, n*sizeof(int));
1058
1059done_collect:
1060
1061
1062
1063
1064 cpuc->n_events = n;
1065 cpuc->n_added += n - n0;
1066 cpuc->n_txn += n - n0;
1067
1068 ret = 0;
1069out:
1070 perf_pmu_enable(event->pmu);
1071 return ret;
1072}
1073
1074static void x86_pmu_start(struct perf_event *event, int flags)
1075{
1076 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1077 int idx = event->hw.idx;
1078
1079 if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED)))
1080 return;
1081
1082 if (WARN_ON_ONCE(idx == -1))
1083 return;
1084
1085 if (flags & PERF_EF_RELOAD) {
1086 WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE));
1087 x86_perf_event_set_period(event);
1088 }
1089
1090 event->hw.state = 0;
1091
1092 cpuc->events[idx] = event;
1093 __set_bit(idx, cpuc->active_mask);
1094 __set_bit(idx, cpuc->running);
1095 x86_pmu.enable(event);
1096 perf_event_update_userpage(event);
1097}
1098
1099void perf_event_print_debug(void)
1100{
1101 u64 ctrl, status, overflow, pmc_ctrl, pmc_count, prev_left, fixed;
1102 u64 pebs;
1103 struct cpu_hw_events *cpuc;
1104 unsigned long flags;
1105 int cpu, idx;
1106
1107 if (!x86_pmu.num_counters)
1108 return;
1109
1110 local_irq_save(flags);
1111
1112 cpu = smp_processor_id();
1113 cpuc = &per_cpu(cpu_hw_events, cpu);
1114
1115 if (x86_pmu.version >= 2) {
1116 rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, ctrl);
1117 rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status);
1118 rdmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, overflow);
1119 rdmsrl(MSR_ARCH_PERFMON_FIXED_CTR_CTRL, fixed);
1120 rdmsrl(MSR_IA32_PEBS_ENABLE, pebs);
1121
1122 pr_info("\n");
1123 pr_info("CPU#%d: ctrl: %016llx\n", cpu, ctrl);
1124 pr_info("CPU#%d: status: %016llx\n", cpu, status);
1125 pr_info("CPU#%d: overflow: %016llx\n", cpu, overflow);
1126 pr_info("CPU#%d: fixed: %016llx\n", cpu, fixed);
1127 pr_info("CPU#%d: pebs: %016llx\n", cpu, pebs);
1128 }
1129 pr_info("CPU#%d: active: %016llx\n", cpu, *(u64 *)cpuc->active_mask);
1130
1131 for (idx = 0; idx < x86_pmu.num_counters; idx++) {
1132 rdmsrl(x86_pmu_config_addr(idx), pmc_ctrl);
1133 rdmsrl(x86_pmu_event_addr(idx), pmc_count);
1134
1135 prev_left = per_cpu(pmc_prev_left[idx], cpu);
1136
1137 pr_info("CPU#%d: gen-PMC%d ctrl: %016llx\n",
1138 cpu, idx, pmc_ctrl);
1139 pr_info("CPU#%d: gen-PMC%d count: %016llx\n",
1140 cpu, idx, pmc_count);
1141 pr_info("CPU#%d: gen-PMC%d left: %016llx\n",
1142 cpu, idx, prev_left);
1143 }
1144 for (idx = 0; idx < x86_pmu.num_counters_fixed; idx++) {
1145 rdmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, pmc_count);
1146
1147 pr_info("CPU#%d: fixed-PMC%d count: %016llx\n",
1148 cpu, idx, pmc_count);
1149 }
1150 local_irq_restore(flags);
1151}
1152
1153void x86_pmu_stop(struct perf_event *event, int flags)
1154{
1155 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1156 struct hw_perf_event *hwc = &event->hw;
1157
1158 if (__test_and_clear_bit(hwc->idx, cpuc->active_mask)) {
1159 x86_pmu.disable(event);
1160 cpuc->events[hwc->idx] = NULL;
1161 WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED);
1162 hwc->state |= PERF_HES_STOPPED;
1163 }
1164
1165 if ((flags & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) {
1166
1167
1168
1169
1170 x86_perf_event_update(event);
1171 hwc->state |= PERF_HES_UPTODATE;
1172 }
1173}
1174
1175static void x86_pmu_del(struct perf_event *event, int flags)
1176{
1177 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1178 int i;
1179
1180
1181
1182
1183 event->hw.flags &= ~PERF_X86_EVENT_COMMITTED;
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193 if (cpuc->group_flag & PERF_EVENT_TXN)
1194 return;
1195
1196
1197
1198
1199 x86_pmu_stop(event, PERF_EF_UPDATE);
1200
1201 for (i = 0; i < cpuc->n_events; i++) {
1202 if (event == cpuc->event_list[i])
1203 break;
1204 }
1205
1206 if (WARN_ON_ONCE(i == cpuc->n_events))
1207 return;
1208
1209
1210 if (i >= cpuc->n_events - cpuc->n_added)
1211 --cpuc->n_added;
1212
1213 if (x86_pmu.put_event_constraints)
1214 x86_pmu.put_event_constraints(cpuc, event);
1215
1216
1217 while (++i < cpuc->n_events)
1218 cpuc->event_list[i-1] = cpuc->event_list[i];
1219 --cpuc->n_events;
1220
1221 perf_event_update_userpage(event);
1222}
1223
1224int x86_pmu_handle_irq(struct pt_regs *regs)
1225{
1226 struct perf_sample_data data;
1227 struct cpu_hw_events *cpuc;
1228 struct perf_event *event;
1229 int idx, handled = 0;
1230 u64 val;
1231
1232 cpuc = this_cpu_ptr(&cpu_hw_events);
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242 apic_write(APIC_LVTPC, APIC_DM_NMI);
1243
1244 for (idx = 0; idx < x86_pmu.num_counters; idx++) {
1245 if (!test_bit(idx, cpuc->active_mask)) {
1246
1247
1248
1249
1250
1251 if (__test_and_clear_bit(idx, cpuc->running))
1252 handled++;
1253 continue;
1254 }
1255
1256 event = cpuc->events[idx];
1257
1258 val = x86_perf_event_update(event);
1259 if (val & (1ULL << (x86_pmu.cntval_bits - 1)))
1260 continue;
1261
1262
1263
1264
1265 handled++;
1266 perf_sample_data_init(&data, 0, event->hw.last_period);
1267
1268 if (!x86_perf_event_set_period(event))
1269 continue;
1270
1271 if (perf_event_overflow(event, &data, regs))
1272 x86_pmu_stop(event, 0);
1273 }
1274
1275 if (handled)
1276 inc_irq_stat(apic_perf_irqs);
1277
1278 return handled;
1279}
1280
1281void perf_events_lapic_init(void)
1282{
1283 if (!x86_pmu.apic || !x86_pmu_initialized())
1284 return;
1285
1286
1287
1288
1289 apic_write(APIC_LVTPC, APIC_DM_NMI);
1290}
1291
1292static int
1293perf_event_nmi_handler(unsigned int cmd, struct pt_regs *regs)
1294{
1295 u64 start_clock;
1296 u64 finish_clock;
1297 int ret;
1298
1299 if (!atomic_read(&active_events))
1300 return NMI_DONE;
1301
1302 start_clock = sched_clock();
1303 ret = x86_pmu.handle_irq(regs);
1304 finish_clock = sched_clock();
1305
1306 perf_sample_event_took(finish_clock - start_clock);
1307
1308 return ret;
1309}
1310NOKPROBE_SYMBOL(perf_event_nmi_handler);
1311
1312struct event_constraint emptyconstraint;
1313struct event_constraint unconstrained;
1314
1315static int
1316x86_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu)
1317{
1318 unsigned int cpu = (long)hcpu;
1319 struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
1320 int ret = NOTIFY_OK;
1321
1322 switch (action & ~CPU_TASKS_FROZEN) {
1323 case CPU_UP_PREPARE:
1324 cpuc->kfree_on_online = NULL;
1325 if (x86_pmu.cpu_prepare)
1326 ret = x86_pmu.cpu_prepare(cpu);
1327 break;
1328
1329 case CPU_STARTING:
1330 if (x86_pmu.attr_rdpmc)
1331 set_in_cr4(X86_CR4_PCE);
1332 if (x86_pmu.cpu_starting)
1333 x86_pmu.cpu_starting(cpu);
1334 break;
1335
1336 case CPU_ONLINE:
1337 kfree(cpuc->kfree_on_online);
1338 break;
1339
1340 case CPU_DYING:
1341 if (x86_pmu.cpu_dying)
1342 x86_pmu.cpu_dying(cpu);
1343 break;
1344
1345 case CPU_UP_CANCELED:
1346 case CPU_DEAD:
1347 if (x86_pmu.cpu_dead)
1348 x86_pmu.cpu_dead(cpu);
1349 break;
1350
1351 default:
1352 break;
1353 }
1354
1355 return ret;
1356}
1357
1358static void __init pmu_check_apic(void)
1359{
1360 if (cpu_has_apic)
1361 return;
1362
1363 x86_pmu.apic = 0;
1364 pr_info("no APIC, boot with the \"lapic\" boot parameter to force-enable it.\n");
1365 pr_info("no hardware sampling interrupt available.\n");
1366
1367
1368
1369
1370
1371
1372
1373 pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT;
1374
1375}
1376
1377static struct attribute_group x86_pmu_format_group = {
1378 .name = "format",
1379 .attrs = NULL,
1380};
1381
1382
1383
1384
1385
1386static void __init filter_events(struct attribute **attrs)
1387{
1388 struct device_attribute *d;
1389 struct perf_pmu_events_attr *pmu_attr;
1390 int i, j;
1391
1392 for (i = 0; attrs[i]; i++) {
1393 d = (struct device_attribute *)attrs[i];
1394 pmu_attr = container_of(d, struct perf_pmu_events_attr, attr);
1395
1396 if (pmu_attr->event_str)
1397 continue;
1398 if (x86_pmu.event_map(i))
1399 continue;
1400
1401 for (j = i; attrs[j]; j++)
1402 attrs[j] = attrs[j + 1];
1403
1404
1405 i--;
1406 }
1407}
1408
1409
1410static __init struct attribute **merge_attr(struct attribute **a, struct attribute **b)
1411{
1412 struct attribute **new;
1413 int j, i;
1414
1415 for (j = 0; a[j]; j++)
1416 ;
1417 for (i = 0; b[i]; i++)
1418 j++;
1419 j++;
1420
1421 new = kmalloc(sizeof(struct attribute *) * j, GFP_KERNEL);
1422 if (!new)
1423 return NULL;
1424
1425 j = 0;
1426 for (i = 0; a[i]; i++)
1427 new[j++] = a[i];
1428 for (i = 0; b[i]; i++)
1429 new[j++] = b[i];
1430 new[j] = NULL;
1431
1432 return new;
1433}
1434
1435ssize_t events_sysfs_show(struct device *dev, struct device_attribute *attr,
1436 char *page)
1437{
1438 struct perf_pmu_events_attr *pmu_attr = \
1439 container_of(attr, struct perf_pmu_events_attr, attr);
1440 u64 config = x86_pmu.event_map(pmu_attr->id);
1441
1442
1443 if (pmu_attr->event_str)
1444 return sprintf(page, "%s", pmu_attr->event_str);
1445
1446 return x86_pmu.events_sysfs_show(page, config);
1447}
1448
1449EVENT_ATTR(cpu-cycles, CPU_CYCLES );
1450EVENT_ATTR(instructions, INSTRUCTIONS );
1451EVENT_ATTR(cache-references, CACHE_REFERENCES );
1452EVENT_ATTR(cache-misses, CACHE_MISSES );
1453EVENT_ATTR(branch-instructions, BRANCH_INSTRUCTIONS );
1454EVENT_ATTR(branch-misses, BRANCH_MISSES );
1455EVENT_ATTR(bus-cycles, BUS_CYCLES );
1456EVENT_ATTR(stalled-cycles-frontend, STALLED_CYCLES_FRONTEND );
1457EVENT_ATTR(stalled-cycles-backend, STALLED_CYCLES_BACKEND );
1458EVENT_ATTR(ref-cycles, REF_CPU_CYCLES );
1459
1460static struct attribute *empty_attrs;
1461
1462static struct attribute *events_attr[] = {
1463 EVENT_PTR(CPU_CYCLES),
1464 EVENT_PTR(INSTRUCTIONS),
1465 EVENT_PTR(CACHE_REFERENCES),
1466 EVENT_PTR(CACHE_MISSES),
1467 EVENT_PTR(BRANCH_INSTRUCTIONS),
1468 EVENT_PTR(BRANCH_MISSES),
1469 EVENT_PTR(BUS_CYCLES),
1470 EVENT_PTR(STALLED_CYCLES_FRONTEND),
1471 EVENT_PTR(STALLED_CYCLES_BACKEND),
1472 EVENT_PTR(REF_CPU_CYCLES),
1473 NULL,
1474};
1475
1476static struct attribute_group x86_pmu_events_group = {
1477 .name = "events",
1478 .attrs = events_attr,
1479};
1480
1481ssize_t x86_event_sysfs_show(char *page, u64 config, u64 event)
1482{
1483 u64 umask = (config & ARCH_PERFMON_EVENTSEL_UMASK) >> 8;
1484 u64 cmask = (config & ARCH_PERFMON_EVENTSEL_CMASK) >> 24;
1485 bool edge = (config & ARCH_PERFMON_EVENTSEL_EDGE);
1486 bool pc = (config & ARCH_PERFMON_EVENTSEL_PIN_CONTROL);
1487 bool any = (config & ARCH_PERFMON_EVENTSEL_ANY);
1488 bool inv = (config & ARCH_PERFMON_EVENTSEL_INV);
1489 ssize_t ret;
1490
1491
1492
1493
1494
1495 ret = sprintf(page, "event=0x%02llx", event);
1496
1497 if (umask)
1498 ret += sprintf(page + ret, ",umask=0x%02llx", umask);
1499
1500 if (edge)
1501 ret += sprintf(page + ret, ",edge");
1502
1503 if (pc)
1504 ret += sprintf(page + ret, ",pc");
1505
1506 if (any)
1507 ret += sprintf(page + ret, ",any");
1508
1509 if (inv)
1510 ret += sprintf(page + ret, ",inv");
1511
1512 if (cmask)
1513 ret += sprintf(page + ret, ",cmask=0x%02llx", cmask);
1514
1515 ret += sprintf(page + ret, "\n");
1516
1517 return ret;
1518}
1519
1520static int __init init_hw_perf_events(void)
1521{
1522 struct x86_pmu_quirk *quirk;
1523 int err;
1524
1525 pr_info("Performance Events: ");
1526
1527 switch (boot_cpu_data.x86_vendor) {
1528 case X86_VENDOR_INTEL:
1529 err = intel_pmu_init();
1530 break;
1531 case X86_VENDOR_AMD:
1532 err = amd_pmu_init();
1533 break;
1534 default:
1535 err = -ENOTSUPP;
1536 }
1537 if (err != 0) {
1538 pr_cont("no PMU driver, software events only.\n");
1539 return 0;
1540 }
1541
1542 pmu_check_apic();
1543
1544
1545 if (!check_hw_exists())
1546 return 0;
1547
1548 pr_cont("%s PMU driver.\n", x86_pmu.name);
1549
1550 x86_pmu.attr_rdpmc = 1;
1551
1552 for (quirk = x86_pmu.quirks; quirk; quirk = quirk->next)
1553 quirk->func();
1554
1555 if (!x86_pmu.intel_ctrl)
1556 x86_pmu.intel_ctrl = (1 << x86_pmu.num_counters) - 1;
1557
1558 perf_events_lapic_init();
1559 register_nmi_handler(NMI_LOCAL, perf_event_nmi_handler, 0, "PMI");
1560
1561 unconstrained = (struct event_constraint)
1562 __EVENT_CONSTRAINT(0, (1ULL << x86_pmu.num_counters) - 1,
1563 0, x86_pmu.num_counters, 0, 0);
1564
1565 x86_pmu_format_group.attrs = x86_pmu.format_attrs;
1566
1567 if (x86_pmu.event_attrs)
1568 x86_pmu_events_group.attrs = x86_pmu.event_attrs;
1569
1570 if (!x86_pmu.events_sysfs_show)
1571 x86_pmu_events_group.attrs = &empty_attrs;
1572 else
1573 filter_events(x86_pmu_events_group.attrs);
1574
1575 if (x86_pmu.cpu_events) {
1576 struct attribute **tmp;
1577
1578 tmp = merge_attr(x86_pmu_events_group.attrs, x86_pmu.cpu_events);
1579 if (!WARN_ON(!tmp))
1580 x86_pmu_events_group.attrs = tmp;
1581 }
1582
1583 pr_info("... version: %d\n", x86_pmu.version);
1584 pr_info("... bit width: %d\n", x86_pmu.cntval_bits);
1585 pr_info("... generic registers: %d\n", x86_pmu.num_counters);
1586 pr_info("... value mask: %016Lx\n", x86_pmu.cntval_mask);
1587 pr_info("... max period: %016Lx\n", x86_pmu.max_period);
1588 pr_info("... fixed-purpose events: %d\n", x86_pmu.num_counters_fixed);
1589 pr_info("... event mask: %016Lx\n", x86_pmu.intel_ctrl);
1590
1591 perf_pmu_register(&pmu, "cpu", PERF_TYPE_RAW);
1592 perf_cpu_notifier(x86_pmu_notifier);
1593
1594 return 0;
1595}
1596early_initcall(init_hw_perf_events);
1597
1598static inline void x86_pmu_read(struct perf_event *event)
1599{
1600 x86_perf_event_update(event);
1601}
1602
1603
1604
1605
1606
1607
1608static void x86_pmu_start_txn(struct pmu *pmu)
1609{
1610 perf_pmu_disable(pmu);
1611 __this_cpu_or(cpu_hw_events.group_flag, PERF_EVENT_TXN);
1612 __this_cpu_write(cpu_hw_events.n_txn, 0);
1613}
1614
1615
1616
1617
1618
1619
1620static void x86_pmu_cancel_txn(struct pmu *pmu)
1621{
1622 __this_cpu_and(cpu_hw_events.group_flag, ~PERF_EVENT_TXN);
1623
1624
1625
1626
1627 __this_cpu_sub(cpu_hw_events.n_added, __this_cpu_read(cpu_hw_events.n_txn));
1628 __this_cpu_sub(cpu_hw_events.n_events, __this_cpu_read(cpu_hw_events.n_txn));
1629 perf_pmu_enable(pmu);
1630}
1631
1632
1633
1634
1635
1636
1637
1638
1639static int x86_pmu_commit_txn(struct pmu *pmu)
1640{
1641 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1642 int assign[X86_PMC_IDX_MAX];
1643 int n, ret;
1644
1645 n = cpuc->n_events;
1646
1647 if (!x86_pmu_initialized())
1648 return -EAGAIN;
1649
1650 ret = x86_pmu.schedule_events(cpuc, n, assign);
1651 if (ret)
1652 return ret;
1653
1654
1655
1656
1657
1658 memcpy(cpuc->assign, assign, n*sizeof(int));
1659
1660 cpuc->group_flag &= ~PERF_EVENT_TXN;
1661 perf_pmu_enable(pmu);
1662 return 0;
1663}
1664
1665
1666
1667
1668
1669
1670
1671
1672static void free_fake_cpuc(struct cpu_hw_events *cpuc)
1673{
1674 kfree(cpuc->shared_regs);
1675 kfree(cpuc);
1676}
1677
1678static struct cpu_hw_events *allocate_fake_cpuc(void)
1679{
1680 struct cpu_hw_events *cpuc;
1681 int cpu = raw_smp_processor_id();
1682
1683 cpuc = kzalloc(sizeof(*cpuc), GFP_KERNEL);
1684 if (!cpuc)
1685 return ERR_PTR(-ENOMEM);
1686
1687
1688 if (x86_pmu.extra_regs) {
1689 cpuc->shared_regs = allocate_shared_regs(cpu);
1690 if (!cpuc->shared_regs)
1691 goto error;
1692 }
1693 cpuc->is_fake = 1;
1694 return cpuc;
1695error:
1696 free_fake_cpuc(cpuc);
1697 return ERR_PTR(-ENOMEM);
1698}
1699
1700
1701
1702
1703static int validate_event(struct perf_event *event)
1704{
1705 struct cpu_hw_events *fake_cpuc;
1706 struct event_constraint *c;
1707 int ret = 0;
1708
1709 fake_cpuc = allocate_fake_cpuc();
1710 if (IS_ERR(fake_cpuc))
1711 return PTR_ERR(fake_cpuc);
1712
1713 c = x86_pmu.get_event_constraints(fake_cpuc, event);
1714
1715 if (!c || !c->weight)
1716 ret = -EINVAL;
1717
1718 if (x86_pmu.put_event_constraints)
1719 x86_pmu.put_event_constraints(fake_cpuc, event);
1720
1721 free_fake_cpuc(fake_cpuc);
1722
1723 return ret;
1724}
1725
1726
1727
1728
1729
1730
1731
1732
1733
1734
1735
1736
1737static int validate_group(struct perf_event *event)
1738{
1739 struct perf_event *leader = event->group_leader;
1740 struct cpu_hw_events *fake_cpuc;
1741 int ret = -EINVAL, n;
1742
1743 fake_cpuc = allocate_fake_cpuc();
1744 if (IS_ERR(fake_cpuc))
1745 return PTR_ERR(fake_cpuc);
1746
1747
1748
1749
1750
1751
1752 n = collect_events(fake_cpuc, leader, true);
1753 if (n < 0)
1754 goto out;
1755
1756 fake_cpuc->n_events = n;
1757 n = collect_events(fake_cpuc, event, false);
1758 if (n < 0)
1759 goto out;
1760
1761 fake_cpuc->n_events = n;
1762
1763 ret = x86_pmu.schedule_events(fake_cpuc, n, NULL);
1764
1765out:
1766 free_fake_cpuc(fake_cpuc);
1767 return ret;
1768}
1769
1770static int x86_pmu_event_init(struct perf_event *event)
1771{
1772 struct pmu *tmp;
1773 int err;
1774
1775 switch (event->attr.type) {
1776 case PERF_TYPE_RAW:
1777 case PERF_TYPE_HARDWARE:
1778 case PERF_TYPE_HW_CACHE:
1779 break;
1780
1781 default:
1782 return -ENOENT;
1783 }
1784
1785 err = __x86_pmu_event_init(event);
1786 if (!err) {
1787
1788
1789
1790
1791
1792 tmp = event->pmu;
1793 event->pmu = &pmu;
1794
1795 if (event->group_leader != event)
1796 err = validate_group(event);
1797 else
1798 err = validate_event(event);
1799
1800 event->pmu = tmp;
1801 }
1802 if (err) {
1803 if (event->destroy)
1804 event->destroy(event);
1805 }
1806
1807 return err;
1808}
1809
1810static int x86_pmu_event_idx(struct perf_event *event)
1811{
1812 int idx = event->hw.idx;
1813
1814 if (!x86_pmu.attr_rdpmc)
1815 return 0;
1816
1817 if (x86_pmu.num_counters_fixed && idx >= INTEL_PMC_IDX_FIXED) {
1818 idx -= INTEL_PMC_IDX_FIXED;
1819 idx |= 1 << 30;
1820 }
1821
1822 return idx + 1;
1823}
1824
1825static ssize_t get_attr_rdpmc(struct device *cdev,
1826 struct device_attribute *attr,
1827 char *buf)
1828{
1829 return snprintf(buf, 40, "%d\n", x86_pmu.attr_rdpmc);
1830}
1831
1832static void change_rdpmc(void *info)
1833{
1834 bool enable = !!(unsigned long)info;
1835
1836 if (enable)
1837 set_in_cr4(X86_CR4_PCE);
1838 else
1839 clear_in_cr4(X86_CR4_PCE);
1840}
1841
1842static ssize_t set_attr_rdpmc(struct device *cdev,
1843 struct device_attribute *attr,
1844 const char *buf, size_t count)
1845{
1846 unsigned long val;
1847 ssize_t ret;
1848
1849 ret = kstrtoul(buf, 0, &val);
1850 if (ret)
1851 return ret;
1852
1853 if (x86_pmu.attr_rdpmc_broken)
1854 return -ENOTSUPP;
1855
1856 if (!!val != !!x86_pmu.attr_rdpmc) {
1857 x86_pmu.attr_rdpmc = !!val;
1858 on_each_cpu(change_rdpmc, (void *)val, 1);
1859 }
1860
1861 return count;
1862}
1863
1864static DEVICE_ATTR(rdpmc, S_IRUSR | S_IWUSR, get_attr_rdpmc, set_attr_rdpmc);
1865
1866static struct attribute *x86_pmu_attrs[] = {
1867 &dev_attr_rdpmc.attr,
1868 NULL,
1869};
1870
1871static struct attribute_group x86_pmu_attr_group = {
1872 .attrs = x86_pmu_attrs,
1873};
1874
1875static const struct attribute_group *x86_pmu_attr_groups[] = {
1876 &x86_pmu_attr_group,
1877 &x86_pmu_format_group,
1878 &x86_pmu_events_group,
1879 NULL,
1880};
1881
1882static void x86_pmu_flush_branch_stack(void)
1883{
1884 if (x86_pmu.flush_branch_stack)
1885 x86_pmu.flush_branch_stack();
1886}
1887
1888void perf_check_microcode(void)
1889{
1890 if (x86_pmu.check_microcode)
1891 x86_pmu.check_microcode();
1892}
1893EXPORT_SYMBOL_GPL(perf_check_microcode);
1894
1895static struct pmu pmu = {
1896 .pmu_enable = x86_pmu_enable,
1897 .pmu_disable = x86_pmu_disable,
1898
1899 .attr_groups = x86_pmu_attr_groups,
1900
1901 .event_init = x86_pmu_event_init,
1902
1903 .add = x86_pmu_add,
1904 .del = x86_pmu_del,
1905 .start = x86_pmu_start,
1906 .stop = x86_pmu_stop,
1907 .read = x86_pmu_read,
1908
1909 .start_txn = x86_pmu_start_txn,
1910 .cancel_txn = x86_pmu_cancel_txn,
1911 .commit_txn = x86_pmu_commit_txn,
1912
1913 .event_idx = x86_pmu_event_idx,
1914 .flush_branch_stack = x86_pmu_flush_branch_stack,
1915};
1916
1917void arch_perf_update_userpage(struct perf_event_mmap_page *userpg, u64 now)
1918{
1919 struct cyc2ns_data *data;
1920
1921 userpg->cap_user_time = 0;
1922 userpg->cap_user_time_zero = 0;
1923 userpg->cap_user_rdpmc = x86_pmu.attr_rdpmc;
1924 userpg->pmc_width = x86_pmu.cntval_bits;
1925
1926 if (!sched_clock_stable())
1927 return;
1928
1929 data = cyc2ns_read_begin();
1930
1931 userpg->cap_user_time = 1;
1932 userpg->time_mult = data->cyc2ns_mul;
1933 userpg->time_shift = data->cyc2ns_shift;
1934 userpg->time_offset = data->cyc2ns_offset - now;
1935
1936 userpg->cap_user_time_zero = 1;
1937 userpg->time_zero = data->cyc2ns_offset;
1938
1939 cyc2ns_read_end(data);
1940}
1941
1942
1943
1944
1945
1946static int backtrace_stack(void *data, char *name)
1947{
1948 return 0;
1949}
1950
1951static void backtrace_address(void *data, unsigned long addr, int reliable)
1952{
1953 struct perf_callchain_entry *entry = data;
1954
1955 perf_callchain_store(entry, addr);
1956}
1957
1958static const struct stacktrace_ops backtrace_ops = {
1959 .stack = backtrace_stack,
1960 .address = backtrace_address,
1961 .walk_stack = print_context_stack_bp,
1962};
1963
1964void
1965perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs)
1966{
1967 if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
1968
1969 return;
1970 }
1971
1972 perf_callchain_store(entry, regs->ip);
1973
1974 dump_trace(NULL, regs, NULL, 0, &backtrace_ops, entry);
1975}
1976
1977static inline int
1978valid_user_frame(const void __user *fp, unsigned long size)
1979{
1980 return (__range_not_ok(fp, size, TASK_SIZE) == 0);
1981}
1982
1983static unsigned long get_segment_base(unsigned int segment)
1984{
1985 struct desc_struct *desc;
1986 int idx = segment >> 3;
1987
1988 if ((segment & SEGMENT_TI_MASK) == SEGMENT_LDT) {
1989 if (idx > LDT_ENTRIES)
1990 return 0;
1991
1992 if (idx > current->active_mm->context.size)
1993 return 0;
1994
1995 desc = current->active_mm->context.ldt;
1996 } else {
1997 if (idx > GDT_ENTRIES)
1998 return 0;
1999
2000 desc = raw_cpu_ptr(gdt_page.gdt);
2001 }
2002
2003 return get_desc_base(desc + idx);
2004}
2005
2006#ifdef CONFIG_COMPAT
2007
2008#include <asm/compat.h>
2009
2010static inline int
2011perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry *entry)
2012{
2013
2014 unsigned long ss_base, cs_base;
2015 struct stack_frame_ia32 frame;
2016 const void __user *fp;
2017
2018 if (!test_thread_flag(TIF_IA32))
2019 return 0;
2020
2021 cs_base = get_segment_base(regs->cs);
2022 ss_base = get_segment_base(regs->ss);
2023
2024 fp = compat_ptr(ss_base + regs->bp);
2025 while (entry->nr < PERF_MAX_STACK_DEPTH) {
2026 unsigned long bytes;
2027 frame.next_frame = 0;
2028 frame.return_address = 0;
2029
2030 bytes = copy_from_user_nmi(&frame, fp, sizeof(frame));
2031 if (bytes != 0)
2032 break;
2033
2034 if (!valid_user_frame(fp, sizeof(frame)))
2035 break;
2036
2037 perf_callchain_store(entry, cs_base + frame.return_address);
2038 fp = compat_ptr(ss_base + frame.next_frame);
2039 }
2040 return 1;
2041}
2042#else
2043static inline int
2044perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry *entry)
2045{
2046 return 0;
2047}
2048#endif
2049
2050void
2051perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
2052{
2053 struct stack_frame frame;
2054 const void __user *fp;
2055
2056 if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
2057
2058 return;
2059 }
2060
2061
2062
2063
2064 if (regs->flags & (X86_VM_MASK | PERF_EFLAGS_VM))
2065 return;
2066
2067 fp = (void __user *)regs->bp;
2068
2069 perf_callchain_store(entry, regs->ip);
2070
2071 if (!current->mm)
2072 return;
2073
2074 if (perf_callchain_user32(regs, entry))
2075 return;
2076
2077 while (entry->nr < PERF_MAX_STACK_DEPTH) {
2078 unsigned long bytes;
2079 frame.next_frame = NULL;
2080 frame.return_address = 0;
2081
2082 bytes = copy_from_user_nmi(&frame, fp, sizeof(frame));
2083 if (bytes != 0)
2084 break;
2085
2086 if (!valid_user_frame(fp, sizeof(frame)))
2087 break;
2088
2089 perf_callchain_store(entry, frame.return_address);
2090 fp = frame.next_frame;
2091 }
2092}
2093
2094
2095
2096
2097
2098
2099
2100
2101
2102
2103
2104
2105
2106
2107static unsigned long code_segment_base(struct pt_regs *regs)
2108{
2109
2110
2111
2112
2113 if (regs->flags & X86_VM_MASK)
2114 return 0x10 * regs->cs;
2115
2116
2117
2118
2119
2120#ifdef CONFIG_X86_32
2121 if (user_mode(regs) && regs->cs != __USER_CS)
2122 return get_segment_base(regs->cs);
2123#else
2124 if (test_thread_flag(TIF_IA32)) {
2125 if (user_mode(regs) && regs->cs != __USER32_CS)
2126 return get_segment_base(regs->cs);
2127 }
2128#endif
2129 return 0;
2130}
2131
2132unsigned long perf_instruction_pointer(struct pt_regs *regs)
2133{
2134 if (perf_guest_cbs && perf_guest_cbs->is_in_guest())
2135 return perf_guest_cbs->get_guest_ip();
2136
2137 return regs->ip + code_segment_base(regs);
2138}
2139
2140unsigned long perf_misc_flags(struct pt_regs *regs)
2141{
2142 int misc = 0;
2143
2144 if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
2145 if (perf_guest_cbs->is_user_mode())
2146 misc |= PERF_RECORD_MISC_GUEST_USER;
2147 else
2148 misc |= PERF_RECORD_MISC_GUEST_KERNEL;
2149 } else {
2150 if (user_mode(regs))
2151 misc |= PERF_RECORD_MISC_USER;
2152 else
2153 misc |= PERF_RECORD_MISC_KERNEL;
2154 }
2155
2156 if (regs->flags & PERF_EFLAGS_EXACT)
2157 misc |= PERF_RECORD_MISC_EXACT_IP;
2158
2159 return misc;
2160}
2161
2162void perf_get_x86_pmu_capability(struct x86_pmu_capability *cap)
2163{
2164 cap->version = x86_pmu.version;
2165 cap->num_counters_gp = x86_pmu.num_counters;
2166 cap->num_counters_fixed = x86_pmu.num_counters_fixed;
2167 cap->bit_width_gp = x86_pmu.cntval_bits;
2168 cap->bit_width_fixed = x86_pmu.cntval_bits;
2169 cap->events_mask = (unsigned int)x86_pmu.events_maskl;
2170 cap->events_mask_len = x86_pmu.events_mask_len;
2171}
2172EXPORT_SYMBOL_GPL(perf_get_x86_pmu_capability);
2173