1
2
3
4
5
6
7
8
9
10
11
12
13
14
15#include <linux/perf_event.h>
16
17#include <asm/intel_ds.h>
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35enum extra_reg_type {
36 EXTRA_REG_NONE = -1,
37
38 EXTRA_REG_RSP_0 = 0,
39 EXTRA_REG_RSP_1 = 1,
40 EXTRA_REG_LBR = 2,
41 EXTRA_REG_LDLAT = 3,
42 EXTRA_REG_FE = 4,
43
44 EXTRA_REG_MAX
45};
46
47struct event_constraint {
48 union {
49 unsigned long idxmsk[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
50 u64 idxmsk64;
51 };
52 u64 code;
53 u64 cmask;
54 int weight;
55 int overlap;
56 int flags;
57};
58
59
60
61#define PERF_X86_EVENT_PEBS_LDLAT 0x0001
62#define PERF_X86_EVENT_PEBS_ST 0x0002
63#define PERF_X86_EVENT_PEBS_ST_HSW 0x0004
64#define PERF_X86_EVENT_COMMITTED 0x0008
65#define PERF_X86_EVENT_PEBS_LD_HSW 0x0010
66#define PERF_X86_EVENT_PEBS_NA_HSW 0x0020
67#define PERF_X86_EVENT_EXCL 0x0040
68#define PERF_X86_EVENT_DYNAMIC 0x0080
69#define PERF_X86_EVENT_RDPMC_ALLOWED 0x0100
70#define PERF_X86_EVENT_EXCL_ACCT 0x0200
71#define PERF_X86_EVENT_AUTO_RELOAD 0x0400
72#define PERF_X86_EVENT_LARGE_PEBS 0x0800
73
74
75struct amd_nb {
76 int nb_id;
77 int refcnt;
78 struct perf_event *owners[X86_PMC_IDX_MAX];
79 struct event_constraint event_constraints[X86_PMC_IDX_MAX];
80};
81
82#define PEBS_COUNTER_MASK ((1ULL << MAX_PEBS_EVENTS) - 1)
83
84
85
86
87
88
89
90
91#define LARGE_PEBS_FLAGS \
92 (PERF_SAMPLE_IP | PERF_SAMPLE_TID | PERF_SAMPLE_ADDR | \
93 PERF_SAMPLE_ID | PERF_SAMPLE_CPU | PERF_SAMPLE_STREAM_ID | \
94 PERF_SAMPLE_DATA_SRC | PERF_SAMPLE_IDENTIFIER | \
95 PERF_SAMPLE_TRANSACTION | PERF_SAMPLE_PHYS_ADDR | \
96 PERF_SAMPLE_REGS_INTR | PERF_SAMPLE_REGS_USER | \
97 PERF_SAMPLE_PERIOD)
98
99#define PEBS_REGS \
100 (PERF_REG_X86_AX | \
101 PERF_REG_X86_BX | \
102 PERF_REG_X86_CX | \
103 PERF_REG_X86_DX | \
104 PERF_REG_X86_DI | \
105 PERF_REG_X86_SI | \
106 PERF_REG_X86_SP | \
107 PERF_REG_X86_BP | \
108 PERF_REG_X86_IP | \
109 PERF_REG_X86_FLAGS | \
110 PERF_REG_X86_R8 | \
111 PERF_REG_X86_R9 | \
112 PERF_REG_X86_R10 | \
113 PERF_REG_X86_R11 | \
114 PERF_REG_X86_R12 | \
115 PERF_REG_X86_R13 | \
116 PERF_REG_X86_R14 | \
117 PERF_REG_X86_R15)
118
119
120
121
122struct er_account {
123 raw_spinlock_t lock;
124 u64 config;
125 u64 reg;
126 atomic_t ref;
127};
128
129
130
131
132
133
134
135struct intel_shared_regs {
136 struct er_account regs[EXTRA_REG_MAX];
137 int refcnt;
138 unsigned core_id;
139};
140
141enum intel_excl_state_type {
142 INTEL_EXCL_UNUSED = 0,
143 INTEL_EXCL_SHARED = 1,
144 INTEL_EXCL_EXCLUSIVE = 2,
145};
146
147struct intel_excl_states {
148 enum intel_excl_state_type state[X86_PMC_IDX_MAX];
149 bool sched_started;
150};
151
152struct intel_excl_cntrs {
153 raw_spinlock_t lock;
154
155 struct intel_excl_states states[2];
156
157 union {
158 u16 has_exclusive[2];
159 u32 exclusive_present;
160 };
161
162 int refcnt;
163 unsigned core_id;
164};
165
166struct x86_perf_task_context;
167#define MAX_LBR_ENTRIES 32
168
169enum {
170 X86_PERF_KFREE_SHARED = 0,
171 X86_PERF_KFREE_EXCL = 1,
172 X86_PERF_KFREE_MAX
173};
174
175struct cpu_hw_events {
176
177
178
179 struct perf_event *events[X86_PMC_IDX_MAX];
180 unsigned long active_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
181 unsigned long running[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
182 int enabled;
183
184 int n_events;
185 int n_added;
186
187 int n_txn;
188
189 int assign[X86_PMC_IDX_MAX];
190 u64 tags[X86_PMC_IDX_MAX];
191
192 struct perf_event *event_list[X86_PMC_IDX_MAX];
193 struct event_constraint *event_constraint[X86_PMC_IDX_MAX];
194
195 int n_excl;
196
197 unsigned int txn_flags;
198 int is_fake;
199
200
201
202
203 struct debug_store *ds;
204 void *ds_pebs_vaddr;
205 void *ds_bts_vaddr;
206 u64 pebs_enabled;
207 int n_pebs;
208 int n_large_pebs;
209
210
211
212
213 int lbr_users;
214 struct perf_branch_stack lbr_stack;
215 struct perf_branch_entry lbr_entries[MAX_LBR_ENTRIES];
216 struct er_account *lbr_sel;
217 u64 br_sel;
218 struct x86_perf_task_context *last_task_ctx;
219 int last_log_id;
220
221
222
223
224 u64 intel_ctrl_guest_mask;
225 u64 intel_ctrl_host_mask;
226 struct perf_guest_switch_msr guest_switch_msrs[X86_PMC_IDX_MAX];
227
228
229
230
231 u64 intel_cp_status;
232
233
234
235
236
237 struct intel_shared_regs *shared_regs;
238
239
240
241 struct event_constraint *constraint_list;
242 struct intel_excl_cntrs *excl_cntrs;
243 int excl_thread_id;
244
245
246
247
248 struct amd_nb *amd_nb;
249
250 u64 perf_ctr_virt_mask;
251
252 void *kfree_on_online[X86_PERF_KFREE_MAX];
253};
254
255#define __EVENT_CONSTRAINT(c, n, m, w, o, f) {\
256 { .idxmsk64 = (n) }, \
257 .code = (c), \
258 .cmask = (m), \
259 .weight = (w), \
260 .overlap = (o), \
261 .flags = f, \
262}
263
264#define EVENT_CONSTRAINT(c, n, m) \
265 __EVENT_CONSTRAINT(c, n, m, HWEIGHT(n), 0, 0)
266
267#define INTEL_EXCLEVT_CONSTRAINT(c, n) \
268 __EVENT_CONSTRAINT(c, n, ARCH_PERFMON_EVENTSEL_EVENT, HWEIGHT(n),\
269 0, PERF_X86_EVENT_EXCL)
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292#define EVENT_CONSTRAINT_OVERLAP(c, n, m) \
293 __EVENT_CONSTRAINT(c, n, m, HWEIGHT(n), 1, 0)
294
295
296
297
298#define INTEL_EVENT_CONSTRAINT(c, n) \
299 EVENT_CONSTRAINT(c, n, ARCH_PERFMON_EVENTSEL_EVENT)
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314#define FIXED_EVENT_FLAGS (X86_RAW_EVENT_MASK|HSW_IN_TX|HSW_IN_TX_CHECKPOINTED)
315#define FIXED_EVENT_CONSTRAINT(c, n) \
316 EVENT_CONSTRAINT(c, (1ULL << (32+n)), FIXED_EVENT_FLAGS)
317
318
319
320
321#define INTEL_UEVENT_CONSTRAINT(c, n) \
322 EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK)
323
324
325#define INTEL_UBIT_EVENT_CONSTRAINT(c, n) \
326 EVENT_CONSTRAINT(c, n, ARCH_PERFMON_EVENTSEL_EVENT|(c))
327
328
329#define INTEL_FLAGS_UEVENT_CONSTRAINT(c, n) \
330 EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS)
331
332#define INTEL_EXCLUEVT_CONSTRAINT(c, n) \
333 __EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK, \
334 HWEIGHT(n), 0, PERF_X86_EVENT_EXCL)
335
336#define INTEL_PLD_CONSTRAINT(c, n) \
337 __EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \
338 HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_LDLAT)
339
340#define INTEL_PST_CONSTRAINT(c, n) \
341 __EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \
342 HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_ST)
343
344
345#define INTEL_FLAGS_EVENT_CONSTRAINT(c, n) \
346 EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS)
347
348
349#define INTEL_ALL_EVENT_CONSTRAINT(code, n) \
350 EVENT_CONSTRAINT(code, n, X86_ALL_EVENT_FLAGS)
351
352
353#define INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_ST(code, n) \
354 __EVENT_CONSTRAINT(code, n, \
355 ARCH_PERFMON_EVENTSEL_EVENT|X86_ALL_EVENT_FLAGS, \
356 HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_ST_HSW)
357
358
359#define INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_LD(code, n) \
360 __EVENT_CONSTRAINT(code, n, \
361 ARCH_PERFMON_EVENTSEL_EVENT|X86_ALL_EVENT_FLAGS, \
362 HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_LD_HSW)
363
364#define INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_XLD(code, n) \
365 __EVENT_CONSTRAINT(code, n, \
366 ARCH_PERFMON_EVENTSEL_EVENT|X86_ALL_EVENT_FLAGS, \
367 HWEIGHT(n), 0, \
368 PERF_X86_EVENT_PEBS_LD_HSW|PERF_X86_EVENT_EXCL)
369
370
371#define INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(code, n) \
372 __EVENT_CONSTRAINT(code, n, \
373 INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \
374 HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_ST_HSW)
375
376#define INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_XST(code, n) \
377 __EVENT_CONSTRAINT(code, n, \
378 INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \
379 HWEIGHT(n), 0, \
380 PERF_X86_EVENT_PEBS_ST_HSW|PERF_X86_EVENT_EXCL)
381
382
383#define INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(code, n) \
384 __EVENT_CONSTRAINT(code, n, \
385 INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \
386 HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_LD_HSW)
387
388#define INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_XLD(code, n) \
389 __EVENT_CONSTRAINT(code, n, \
390 INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \
391 HWEIGHT(n), 0, \
392 PERF_X86_EVENT_PEBS_LD_HSW|PERF_X86_EVENT_EXCL)
393
394
395#define INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_NA(code, n) \
396 __EVENT_CONSTRAINT(code, n, \
397 INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \
398 HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_NA_HSW)
399
400
401
402
403
404
405
406
407
408#define EVENT_CONSTRAINT_END { .weight = -1 }
409
410
411
412
413#define for_each_event_constraint(e, c) \
414 for ((e) = (c); (e)->weight != -1; (e)++)
415
416
417
418
419
420
421
422
423
424
425
426struct extra_reg {
427 unsigned int event;
428 unsigned int msr;
429 u64 config_mask;
430 u64 valid_mask;
431 int idx;
432 bool extra_msr_access;
433};
434
435#define EVENT_EXTRA_REG(e, ms, m, vm, i) { \
436 .event = (e), \
437 .msr = (ms), \
438 .config_mask = (m), \
439 .valid_mask = (vm), \
440 .idx = EXTRA_REG_##i, \
441 .extra_msr_access = true, \
442 }
443
444#define INTEL_EVENT_EXTRA_REG(event, msr, vm, idx) \
445 EVENT_EXTRA_REG(event, msr, ARCH_PERFMON_EVENTSEL_EVENT, vm, idx)
446
447#define INTEL_UEVENT_EXTRA_REG(event, msr, vm, idx) \
448 EVENT_EXTRA_REG(event, msr, ARCH_PERFMON_EVENTSEL_EVENT | \
449 ARCH_PERFMON_EVENTSEL_UMASK, vm, idx)
450
451#define INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(c) \
452 INTEL_UEVENT_EXTRA_REG(c, \
453 MSR_PEBS_LD_LAT_THRESHOLD, \
454 0xffff, \
455 LDLAT)
456
457#define EVENT_EXTRA_END EVENT_EXTRA_REG(0, 0, 0, 0, RSP_0)
458
459union perf_capabilities {
460 struct {
461 u64 lbr_format:6;
462 u64 pebs_trap:1;
463 u64 pebs_arch_reg:1;
464 u64 pebs_format:4;
465 u64 smm_freeze:1;
466
467
468
469
470 u64 full_width_write:1;
471 };
472 u64 capabilities;
473};
474
475struct x86_pmu_quirk {
476 struct x86_pmu_quirk *next;
477 void (*func)(void);
478};
479
480union x86_pmu_config {
481 struct {
482 u64 event:8,
483 umask:8,
484 usr:1,
485 os:1,
486 edge:1,
487 pc:1,
488 interrupt:1,
489 __reserved1:1,
490 en:1,
491 inv:1,
492 cmask:8,
493 event2:4,
494 __reserved2:4,
495 go:1,
496 ho:1;
497 } bits;
498 u64 value;
499};
500
501#define X86_CONFIG(args...) ((union x86_pmu_config){.bits = {args}}).value
502
503enum {
504 x86_lbr_exclusive_lbr,
505 x86_lbr_exclusive_bts,
506 x86_lbr_exclusive_pt,
507 x86_lbr_exclusive_max,
508};
509
510
511
512
513struct x86_pmu {
514
515
516
517 const char *name;
518 int version;
519 int (*handle_irq)(struct pt_regs *);
520 void (*disable_all)(void);
521 void (*enable_all)(int added);
522 void (*enable)(struct perf_event *);
523 void (*disable)(struct perf_event *);
524 void (*add)(struct perf_event *);
525 void (*del)(struct perf_event *);
526 void (*read)(struct perf_event *event);
527 int (*hw_config)(struct perf_event *event);
528 int (*schedule_events)(struct cpu_hw_events *cpuc, int n, int *assign);
529 unsigned eventsel;
530 unsigned perfctr;
531 int (*addr_offset)(int index, bool eventsel);
532 int (*rdpmc_index)(int index);
533 u64 (*event_map)(int);
534 int max_events;
535 int num_counters;
536 int num_counters_fixed;
537 int cntval_bits;
538 u64 cntval_mask;
539 union {
540 unsigned long events_maskl;
541 unsigned long events_mask[BITS_TO_LONGS(ARCH_PERFMON_EVENTS_COUNT)];
542 };
543 int events_mask_len;
544 int apic;
545 u64 max_period;
546 struct event_constraint *
547 (*get_event_constraints)(struct cpu_hw_events *cpuc,
548 int idx,
549 struct perf_event *event);
550
551 void (*put_event_constraints)(struct cpu_hw_events *cpuc,
552 struct perf_event *event);
553
554 void (*start_scheduling)(struct cpu_hw_events *cpuc);
555
556 void (*commit_scheduling)(struct cpu_hw_events *cpuc, int idx, int cntr);
557
558 void (*stop_scheduling)(struct cpu_hw_events *cpuc);
559
560 struct event_constraint *event_constraints;
561 struct x86_pmu_quirk *quirks;
562 int perfctr_second_write;
563 u64 (*limit_period)(struct perf_event *event, u64 l);
564
565
566 unsigned int late_ack :1,
567 counter_freezing :1;
568
569
570
571 int attr_rdpmc_broken;
572 int attr_rdpmc;
573 struct attribute **format_attrs;
574 struct attribute **event_attrs;
575 struct attribute **caps_attrs;
576
577 ssize_t (*events_sysfs_show)(char *page, u64 config);
578 struct attribute **cpu_events;
579
580 unsigned long attr_freeze_on_smi;
581 struct attribute **attrs;
582
583
584
585
586 int (*cpu_prepare)(int cpu);
587 void (*cpu_starting)(int cpu);
588 void (*cpu_dying)(int cpu);
589 void (*cpu_dead)(int cpu);
590
591 void (*check_microcode)(void);
592 void (*sched_task)(struct perf_event_context *ctx,
593 bool sched_in);
594
595
596
597
598 u64 intel_ctrl;
599 union perf_capabilities intel_cap;
600
601
602
603
604 unsigned int bts :1,
605 bts_active :1,
606 pebs :1,
607 pebs_active :1,
608 pebs_broken :1,
609 pebs_prec_dist :1,
610 pebs_no_tlb :1;
611 int pebs_record_size;
612 int pebs_buffer_size;
613 void (*drain_pebs)(struct pt_regs *regs);
614 struct event_constraint *pebs_constraints;
615 void (*pebs_aliases)(struct perf_event *event);
616 int max_pebs_events;
617 unsigned long large_pebs_flags;
618
619
620
621
622 unsigned long lbr_tos, lbr_from, lbr_to;
623 int lbr_nr;
624 u64 lbr_sel_mask;
625 const int *lbr_sel_map;
626 bool lbr_double_abort;
627 bool lbr_pt_coexist;
628
629
630
631
632 atomic_t lbr_exclusive[x86_lbr_exclusive_max];
633
634
635
636
637 unsigned int amd_nb_constraints : 1;
638
639
640
641
642 struct extra_reg *extra_regs;
643 unsigned int flags;
644
645
646
647
648 struct perf_guest_switch_msr *(*guest_get_msrs)(int *nr);
649};
650
651struct x86_perf_task_context {
652 u64 lbr_from[MAX_LBR_ENTRIES];
653 u64 lbr_to[MAX_LBR_ENTRIES];
654 u64 lbr_info[MAX_LBR_ENTRIES];
655 int tos;
656 int valid_lbrs;
657 int lbr_callstack_users;
658 int lbr_stack_state;
659 int log_id;
660};
661
662#define x86_add_quirk(func_) \
663do { \
664 static struct x86_pmu_quirk __quirk __initdata = { \
665 .func = func_, \
666 }; \
667 __quirk.next = x86_pmu.quirks; \
668 x86_pmu.quirks = &__quirk; \
669} while (0)
670
671
672
673
674#define PMU_FL_NO_HT_SHARING 0x1
675#define PMU_FL_HAS_RSP_1 0x2
676#define PMU_FL_EXCL_CNTRS 0x4
677#define PMU_FL_EXCL_ENABLED 0x8
678#define PMU_FL_PEBS_ALL 0x10
679
680#define EVENT_VAR(_id) event_attr_##_id
681#define EVENT_PTR(_id) &event_attr_##_id.attr.attr
682
683#define EVENT_ATTR(_name, _id) \
684static struct perf_pmu_events_attr EVENT_VAR(_id) = { \
685 .attr = __ATTR(_name, 0444, events_sysfs_show, NULL), \
686 .id = PERF_COUNT_HW_##_id, \
687 .event_str = NULL, \
688};
689
690#define EVENT_ATTR_STR(_name, v, str) \
691static struct perf_pmu_events_attr event_attr_##v = { \
692 .attr = __ATTR(_name, 0444, events_sysfs_show, NULL), \
693 .id = 0, \
694 .event_str = str, \
695};
696
697#define EVENT_ATTR_STR_HT(_name, v, noht, ht) \
698static struct perf_pmu_events_ht_attr event_attr_##v = { \
699 .attr = __ATTR(_name, 0444, events_ht_sysfs_show, NULL),\
700 .id = 0, \
701 .event_str_noht = noht, \
702 .event_str_ht = ht, \
703}
704
705extern struct x86_pmu x86_pmu __read_mostly;
706
707static inline bool x86_pmu_has_lbr_callstack(void)
708{
709 return x86_pmu.lbr_sel_map &&
710 x86_pmu.lbr_sel_map[PERF_SAMPLE_BRANCH_CALL_STACK_SHIFT] > 0;
711}
712
713DECLARE_PER_CPU(struct cpu_hw_events, cpu_hw_events);
714
715int x86_perf_event_set_period(struct perf_event *event);
716
717
718
719
720
721
722
723
724
725#define C(x) PERF_COUNT_HW_CACHE_##x
726
727extern u64 __read_mostly hw_cache_event_ids
728 [PERF_COUNT_HW_CACHE_MAX]
729 [PERF_COUNT_HW_CACHE_OP_MAX]
730 [PERF_COUNT_HW_CACHE_RESULT_MAX];
731extern u64 __read_mostly hw_cache_extra_regs
732 [PERF_COUNT_HW_CACHE_MAX]
733 [PERF_COUNT_HW_CACHE_OP_MAX]
734 [PERF_COUNT_HW_CACHE_RESULT_MAX];
735
736u64 x86_perf_event_update(struct perf_event *event);
737
738static inline unsigned int x86_pmu_config_addr(int index)
739{
740 return x86_pmu.eventsel + (x86_pmu.addr_offset ?
741 x86_pmu.addr_offset(index, true) : index);
742}
743
744static inline unsigned int x86_pmu_event_addr(int index)
745{
746 return x86_pmu.perfctr + (x86_pmu.addr_offset ?
747 x86_pmu.addr_offset(index, false) : index);
748}
749
750static inline int x86_pmu_rdpmc_index(int index)
751{
752 return x86_pmu.rdpmc_index ? x86_pmu.rdpmc_index(index) : index;
753}
754
755int x86_add_exclusive(unsigned int what);
756
757void x86_del_exclusive(unsigned int what);
758
759int x86_reserve_hardware(void);
760
761void x86_release_hardware(void);
762
763int x86_pmu_max_precise(void);
764
765void hw_perf_lbr_event_destroy(struct perf_event *event);
766
767int x86_setup_perfctr(struct perf_event *event);
768
769int x86_pmu_hw_config(struct perf_event *event);
770
771void x86_pmu_disable_all(void);
772
773static inline void __x86_pmu_enable_event(struct hw_perf_event *hwc,
774 u64 enable_mask)
775{
776 u64 disable_mask = __this_cpu_read(cpu_hw_events.perf_ctr_virt_mask);
777
778 if (hwc->extra_reg.reg)
779 wrmsrl(hwc->extra_reg.reg, hwc->extra_reg.config);
780 wrmsrl(hwc->config_base, (hwc->config | enable_mask) & ~disable_mask);
781}
782
783void x86_pmu_enable_all(int added);
784
785int perf_assign_events(struct event_constraint **constraints, int n,
786 int wmin, int wmax, int gpmax, int *assign);
787int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign);
788
789void x86_pmu_stop(struct perf_event *event, int flags);
790
791static inline void x86_pmu_disable_event(struct perf_event *event)
792{
793 struct hw_perf_event *hwc = &event->hw;
794
795 wrmsrl(hwc->config_base, hwc->config);
796}
797
798void x86_pmu_enable_event(struct perf_event *event);
799
800int x86_pmu_handle_irq(struct pt_regs *regs);
801
802extern struct event_constraint emptyconstraint;
803
804extern struct event_constraint unconstrained;
805
806static inline bool kernel_ip(unsigned long ip)
807{
808#ifdef CONFIG_X86_32
809 return ip > PAGE_OFFSET;
810#else
811 return (long)ip < 0;
812#endif
813}
814
815
816
817
818
819
820
821
822
823
824
825
826
827static inline void set_linear_ip(struct pt_regs *regs, unsigned long ip)
828{
829 regs->cs = kernel_ip(ip) ? __KERNEL_CS : __USER_CS;
830 if (regs->flags & X86_VM_MASK)
831 regs->flags ^= (PERF_EFLAGS_VM | X86_VM_MASK);
832 regs->ip = ip;
833}
834
835ssize_t x86_event_sysfs_show(char *page, u64 config, u64 event);
836ssize_t intel_event_sysfs_show(char *page, u64 config);
837
838struct attribute **merge_attr(struct attribute **a, struct attribute **b);
839
840ssize_t events_sysfs_show(struct device *dev, struct device_attribute *attr,
841 char *page);
842ssize_t events_ht_sysfs_show(struct device *dev, struct device_attribute *attr,
843 char *page);
844
845#ifdef CONFIG_CPU_SUP_AMD
846
847int amd_pmu_init(void);
848
849#else
850
851static inline int amd_pmu_init(void)
852{
853 return 0;
854}
855
856#endif
857
858#ifdef CONFIG_CPU_SUP_INTEL
859
860static inline bool intel_pmu_has_bts(struct perf_event *event)
861{
862 struct hw_perf_event *hwc = &event->hw;
863 unsigned int hw_event, bts_event;
864
865 if (event->attr.freq)
866 return false;
867
868 hw_event = hwc->config & INTEL_ARCH_EVENT_MASK;
869 bts_event = x86_pmu.event_map(PERF_COUNT_HW_BRANCH_INSTRUCTIONS);
870
871 return hw_event == bts_event && hwc->sample_period == 1;
872}
873
874int intel_pmu_save_and_restart(struct perf_event *event);
875
876struct event_constraint *
877x86_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
878 struct perf_event *event);
879
880struct intel_shared_regs *allocate_shared_regs(int cpu);
881
882int intel_pmu_init(void);
883
884void init_debug_store_on_cpu(int cpu);
885
886void fini_debug_store_on_cpu(int cpu);
887
888void release_ds_buffers(void);
889
890void reserve_ds_buffers(void);
891
892extern struct event_constraint bts_constraint;
893
894void intel_pmu_enable_bts(u64 config);
895
896void intel_pmu_disable_bts(void);
897
898int intel_pmu_drain_bts_buffer(void);
899
900extern struct event_constraint intel_core2_pebs_event_constraints[];
901
902extern struct event_constraint intel_atom_pebs_event_constraints[];
903
904extern struct event_constraint intel_slm_pebs_event_constraints[];
905
906extern struct event_constraint intel_glm_pebs_event_constraints[];
907
908extern struct event_constraint intel_glp_pebs_event_constraints[];
909
910extern struct event_constraint intel_nehalem_pebs_event_constraints[];
911
912extern struct event_constraint intel_westmere_pebs_event_constraints[];
913
914extern struct event_constraint intel_snb_pebs_event_constraints[];
915
916extern struct event_constraint intel_ivb_pebs_event_constraints[];
917
918extern struct event_constraint intel_hsw_pebs_event_constraints[];
919
920extern struct event_constraint intel_bdw_pebs_event_constraints[];
921
922extern struct event_constraint intel_skl_pebs_event_constraints[];
923
924struct event_constraint *intel_pebs_constraints(struct perf_event *event);
925
926void intel_pmu_pebs_add(struct perf_event *event);
927
928void intel_pmu_pebs_del(struct perf_event *event);
929
930void intel_pmu_pebs_enable(struct perf_event *event);
931
932void intel_pmu_pebs_disable(struct perf_event *event);
933
934void intel_pmu_pebs_enable_all(void);
935
936void intel_pmu_pebs_disable_all(void);
937
938void intel_pmu_pebs_sched_task(struct perf_event_context *ctx, bool sched_in);
939
940void intel_pmu_auto_reload_read(struct perf_event *event);
941
942void intel_ds_init(void);
943
944void intel_pmu_lbr_sched_task(struct perf_event_context *ctx, bool sched_in);
945
946u64 lbr_from_signext_quirk_wr(u64 val);
947
948void intel_pmu_lbr_reset(void);
949
950void intel_pmu_lbr_add(struct perf_event *event);
951
952void intel_pmu_lbr_del(struct perf_event *event);
953
954void intel_pmu_lbr_enable_all(bool pmi);
955
956void intel_pmu_lbr_disable_all(void);
957
958void intel_pmu_lbr_read(void);
959
960void intel_pmu_lbr_init_core(void);
961
962void intel_pmu_lbr_init_nhm(void);
963
964void intel_pmu_lbr_init_atom(void);
965
966void intel_pmu_lbr_init_slm(void);
967
968void intel_pmu_lbr_init_snb(void);
969
970void intel_pmu_lbr_init_hsw(void);
971
972void intel_pmu_lbr_init_skl(void);
973
974void intel_pmu_lbr_init_knl(void);
975
976void intel_pmu_pebs_data_source_nhm(void);
977
978void intel_pmu_pebs_data_source_skl(bool pmem);
979
980int intel_pmu_setup_lbr_filter(struct perf_event *event);
981
982void intel_pt_interrupt(void);
983
984int intel_bts_interrupt(void);
985
986void intel_bts_enable_local(void);
987
988void intel_bts_disable_local(void);
989
990int p4_pmu_init(void);
991
992int p6_pmu_init(void);
993
994int knc_pmu_init(void);
995
996static inline int is_ht_workaround_enabled(void)
997{
998 return !!(x86_pmu.flags & PMU_FL_EXCL_ENABLED);
999}
1000
1001#else
1002
1003static inline void reserve_ds_buffers(void)
1004{
1005}
1006
1007static inline void release_ds_buffers(void)
1008{
1009}
1010
1011static inline int intel_pmu_init(void)
1012{
1013 return 0;
1014}
1015
1016static inline struct intel_shared_regs *allocate_shared_regs(int cpu)
1017{
1018 return NULL;
1019}
1020
1021static inline int is_ht_workaround_enabled(void)
1022{
1023 return 0;
1024}
1025#endif
1026