1
2
3
4
5
6
7
8
9
10
11
12
13
14
15#include <linux/perf_event.h>
16
17#if 0
18#undef wrmsrl
19#define wrmsrl(msr, val) \
20do { \
21 unsigned int _msr = (msr); \
22 u64 _val = (val); \
23 trace_printk("wrmsrl(%x, %Lx)\n", (unsigned int)(_msr), \
24 (unsigned long long)(_val)); \
25 native_write_msr((_msr), (u32)(_val), (u32)(_val >> 32)); \
26} while (0)
27#endif
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43enum extra_reg_type {
44 EXTRA_REG_NONE = -1,
45
46 EXTRA_REG_RSP_0 = 0,
47 EXTRA_REG_RSP_1 = 1,
48 EXTRA_REG_LBR = 2,
49 EXTRA_REG_LDLAT = 3,
50 EXTRA_REG_FE = 4,
51
52 EXTRA_REG_MAX
53};
54
55struct event_constraint {
56 union {
57 unsigned long idxmsk[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
58 u64 idxmsk64;
59 };
60 u64 code;
61 u64 cmask;
62 int weight;
63 int overlap;
64 int flags;
65};
66
67
68
69#define PERF_X86_EVENT_PEBS_LDLAT 0x0001
70#define PERF_X86_EVENT_PEBS_ST 0x0002
71#define PERF_X86_EVENT_PEBS_ST_HSW 0x0004
72#define PERF_X86_EVENT_COMMITTED 0x0008
73#define PERF_X86_EVENT_PEBS_LD_HSW 0x0010
74#define PERF_X86_EVENT_PEBS_NA_HSW 0x0020
75#define PERF_X86_EVENT_EXCL 0x0040
76#define PERF_X86_EVENT_DYNAMIC 0x0080
77#define PERF_X86_EVENT_RDPMC_ALLOWED 0x0100
78#define PERF_X86_EVENT_EXCL_ACCT 0x0200
79#define PERF_X86_EVENT_AUTO_RELOAD 0x0400
80#define PERF_X86_EVENT_FREERUNNING 0x0800
81
82
83struct amd_nb {
84 int nb_id;
85 int refcnt;
86 struct perf_event *owners[X86_PMC_IDX_MAX];
87 struct event_constraint event_constraints[X86_PMC_IDX_MAX];
88};
89
90
91#define MAX_PEBS_EVENTS 8
92
93
94
95
96
97
98
99#define PEBS_FREERUNNING_FLAGS \
100 (PERF_SAMPLE_IP | PERF_SAMPLE_TID | PERF_SAMPLE_ADDR | \
101 PERF_SAMPLE_ID | PERF_SAMPLE_CPU | PERF_SAMPLE_STREAM_ID | \
102 PERF_SAMPLE_DATA_SRC | PERF_SAMPLE_IDENTIFIER | \
103 PERF_SAMPLE_TRANSACTION)
104
105
106
107
108
109
110struct debug_store {
111 u64 bts_buffer_base;
112 u64 bts_index;
113 u64 bts_absolute_maximum;
114 u64 bts_interrupt_threshold;
115 u64 pebs_buffer_base;
116 u64 pebs_index;
117 u64 pebs_absolute_maximum;
118 u64 pebs_interrupt_threshold;
119 u64 pebs_event_reset[MAX_PEBS_EVENTS];
120};
121
122
123
124
125struct er_account {
126 raw_spinlock_t lock;
127 u64 config;
128 u64 reg;
129 atomic_t ref;
130};
131
132
133
134
135
136
137
138struct intel_shared_regs {
139 struct er_account regs[EXTRA_REG_MAX];
140 int refcnt;
141 unsigned core_id;
142};
143
144enum intel_excl_state_type {
145 INTEL_EXCL_UNUSED = 0,
146 INTEL_EXCL_SHARED = 1,
147 INTEL_EXCL_EXCLUSIVE = 2,
148};
149
150struct intel_excl_states {
151 enum intel_excl_state_type state[X86_PMC_IDX_MAX];
152 bool sched_started;
153};
154
155struct intel_excl_cntrs {
156 raw_spinlock_t lock;
157
158 struct intel_excl_states states[2];
159
160 union {
161 u16 has_exclusive[2];
162 u32 exclusive_present;
163 };
164
165 int refcnt;
166 unsigned core_id;
167};
168
169#define MAX_LBR_ENTRIES 32
170
171enum {
172 X86_PERF_KFREE_SHARED = 0,
173 X86_PERF_KFREE_EXCL = 1,
174 X86_PERF_KFREE_MAX
175};
176
177struct cpu_hw_events {
178
179
180
181 struct perf_event *events[X86_PMC_IDX_MAX];
182 unsigned long active_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
183 unsigned long running[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
184 int enabled;
185
186 int n_events;
187 int n_added;
188
189 int n_txn;
190
191 int assign[X86_PMC_IDX_MAX];
192 u64 tags[X86_PMC_IDX_MAX];
193
194 struct perf_event *event_list[X86_PMC_IDX_MAX];
195 struct event_constraint *event_constraint[X86_PMC_IDX_MAX];
196
197 int n_excl;
198
199 unsigned int txn_flags;
200 int is_fake;
201
202
203
204
205 struct debug_store *ds;
206 u64 pebs_enabled;
207
208
209
210
211 int lbr_users;
212 void *lbr_context;
213 struct perf_branch_stack lbr_stack;
214 struct perf_branch_entry lbr_entries[MAX_LBR_ENTRIES];
215 struct er_account *lbr_sel;
216 u64 br_sel;
217
218
219
220
221 u64 intel_ctrl_guest_mask;
222 u64 intel_ctrl_host_mask;
223 struct perf_guest_switch_msr guest_switch_msrs[X86_PMC_IDX_MAX];
224
225
226
227
228 u64 intel_cp_status;
229
230
231
232
233
234 struct intel_shared_regs *shared_regs;
235
236
237
238 struct event_constraint *constraint_list;
239 struct intel_excl_cntrs *excl_cntrs;
240 int excl_thread_id;
241
242
243
244
245 struct amd_nb *amd_nb;
246
247 u64 perf_ctr_virt_mask;
248
249 void *kfree_on_online[X86_PERF_KFREE_MAX];
250};
251
252#define __EVENT_CONSTRAINT(c, n, m, w, o, f) {\
253 { .idxmsk64 = (n) }, \
254 .code = (c), \
255 .cmask = (m), \
256 .weight = (w), \
257 .overlap = (o), \
258 .flags = f, \
259}
260
261#define EVENT_CONSTRAINT(c, n, m) \
262 __EVENT_CONSTRAINT(c, n, m, HWEIGHT(n), 0, 0)
263
264#define INTEL_EXCLEVT_CONSTRAINT(c, n) \
265 __EVENT_CONSTRAINT(c, n, ARCH_PERFMON_EVENTSEL_EVENT, HWEIGHT(n),\
266 0, PERF_X86_EVENT_EXCL)
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289#define EVENT_CONSTRAINT_OVERLAP(c, n, m) \
290 __EVENT_CONSTRAINT(c, n, m, HWEIGHT(n), 1, 0)
291
292
293
294
295#define INTEL_EVENT_CONSTRAINT(c, n) \
296 EVENT_CONSTRAINT(c, n, ARCH_PERFMON_EVENTSEL_EVENT)
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311#define FIXED_EVENT_FLAGS (X86_RAW_EVENT_MASK|HSW_IN_TX|HSW_IN_TX_CHECKPOINTED)
312#define FIXED_EVENT_CONSTRAINT(c, n) \
313 EVENT_CONSTRAINT(c, (1ULL << (32+n)), FIXED_EVENT_FLAGS)
314
315
316
317
318#define INTEL_UEVENT_CONSTRAINT(c, n) \
319 EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK)
320
321
322#define INTEL_FLAGS_UEVENT_CONSTRAINT(c, n) \
323 EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS)
324
325#define INTEL_EXCLUEVT_CONSTRAINT(c, n) \
326 __EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK, \
327 HWEIGHT(n), 0, PERF_X86_EVENT_EXCL)
328
329#define INTEL_PLD_CONSTRAINT(c, n) \
330 __EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \
331 HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_LDLAT)
332
333#define INTEL_PST_CONSTRAINT(c, n) \
334 __EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \
335 HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_ST)
336
337
338#define INTEL_FLAGS_EVENT_CONSTRAINT(c, n) \
339 EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS)
340
341
342#define INTEL_ALL_EVENT_CONSTRAINT(code, n) \
343 EVENT_CONSTRAINT(code, n, X86_ALL_EVENT_FLAGS)
344
345
346#define INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_ST(code, n) \
347 __EVENT_CONSTRAINT(code, n, \
348 ARCH_PERFMON_EVENTSEL_EVENT|X86_ALL_EVENT_FLAGS, \
349 HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_ST_HSW)
350
351
352#define INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_LD(code, n) \
353 __EVENT_CONSTRAINT(code, n, \
354 ARCH_PERFMON_EVENTSEL_EVENT|X86_ALL_EVENT_FLAGS, \
355 HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_LD_HSW)
356
357#define INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_XLD(code, n) \
358 __EVENT_CONSTRAINT(code, n, \
359 ARCH_PERFMON_EVENTSEL_EVENT|X86_ALL_EVENT_FLAGS, \
360 HWEIGHT(n), 0, \
361 PERF_X86_EVENT_PEBS_LD_HSW|PERF_X86_EVENT_EXCL)
362
363
364#define INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(code, n) \
365 __EVENT_CONSTRAINT(code, n, \
366 INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \
367 HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_ST_HSW)
368
369#define INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_XST(code, n) \
370 __EVENT_CONSTRAINT(code, n, \
371 INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \
372 HWEIGHT(n), 0, \
373 PERF_X86_EVENT_PEBS_ST_HSW|PERF_X86_EVENT_EXCL)
374
375
376#define INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(code, n) \
377 __EVENT_CONSTRAINT(code, n, \
378 INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \
379 HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_LD_HSW)
380
381#define INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_XLD(code, n) \
382 __EVENT_CONSTRAINT(code, n, \
383 INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \
384 HWEIGHT(n), 0, \
385 PERF_X86_EVENT_PEBS_LD_HSW|PERF_X86_EVENT_EXCL)
386
387
388#define INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_NA(code, n) \
389 __EVENT_CONSTRAINT(code, n, \
390 INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \
391 HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_NA_HSW)
392
393
394
395
396
397
398
399
400
401#define EVENT_CONSTRAINT_END { .weight = -1 }
402
403
404
405
406#define for_each_event_constraint(e, c) \
407 for ((e) = (c); (e)->weight != -1; (e)++)
408
409
410
411
412
413
414
415
416
417
418
419struct extra_reg {
420 unsigned int event;
421 unsigned int msr;
422 u64 config_mask;
423 u64 valid_mask;
424 int idx;
425 bool extra_msr_access;
426};
427
428#define EVENT_EXTRA_REG(e, ms, m, vm, i) { \
429 .event = (e), \
430 .msr = (ms), \
431 .config_mask = (m), \
432 .valid_mask = (vm), \
433 .idx = EXTRA_REG_##i, \
434 .extra_msr_access = true, \
435 }
436
437#define INTEL_EVENT_EXTRA_REG(event, msr, vm, idx) \
438 EVENT_EXTRA_REG(event, msr, ARCH_PERFMON_EVENTSEL_EVENT, vm, idx)
439
440#define INTEL_UEVENT_EXTRA_REG(event, msr, vm, idx) \
441 EVENT_EXTRA_REG(event, msr, ARCH_PERFMON_EVENTSEL_EVENT | \
442 ARCH_PERFMON_EVENTSEL_UMASK, vm, idx)
443
444#define INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(c) \
445 INTEL_UEVENT_EXTRA_REG(c, \
446 MSR_PEBS_LD_LAT_THRESHOLD, \
447 0xffff, \
448 LDLAT)
449
450#define EVENT_EXTRA_END EVENT_EXTRA_REG(0, 0, 0, 0, RSP_0)
451
452union perf_capabilities {
453 struct {
454 u64 lbr_format:6;
455 u64 pebs_trap:1;
456 u64 pebs_arch_reg:1;
457 u64 pebs_format:4;
458 u64 smm_freeze:1;
459
460
461
462
463 u64 full_width_write:1;
464 };
465 u64 capabilities;
466};
467
468struct x86_pmu_quirk {
469 struct x86_pmu_quirk *next;
470 void (*func)(void);
471};
472
473union x86_pmu_config {
474 struct {
475 u64 event:8,
476 umask:8,
477 usr:1,
478 os:1,
479 edge:1,
480 pc:1,
481 interrupt:1,
482 __reserved1:1,
483 en:1,
484 inv:1,
485 cmask:8,
486 event2:4,
487 __reserved2:4,
488 go:1,
489 ho:1;
490 } bits;
491 u64 value;
492};
493
494#define X86_CONFIG(args...) ((union x86_pmu_config){.bits = {args}}).value
495
496enum {
497 x86_lbr_exclusive_lbr,
498 x86_lbr_exclusive_bts,
499 x86_lbr_exclusive_pt,
500 x86_lbr_exclusive_max,
501};
502
503
504
505
506struct x86_pmu {
507
508
509
510 const char *name;
511 int version;
512 int (*handle_irq)(struct pt_regs *);
513 void (*disable_all)(void);
514 void (*enable_all)(int added);
515 void (*enable)(struct perf_event *);
516 void (*disable)(struct perf_event *);
517 int (*hw_config)(struct perf_event *event);
518 int (*schedule_events)(struct cpu_hw_events *cpuc, int n, int *assign);
519 unsigned eventsel;
520 unsigned perfctr;
521 int (*addr_offset)(int index, bool eventsel);
522 int (*rdpmc_index)(int index);
523 u64 (*event_map)(int);
524 int max_events;
525 int num_counters;
526 int num_counters_fixed;
527 int cntval_bits;
528 u64 cntval_mask;
529 union {
530 unsigned long events_maskl;
531 unsigned long events_mask[BITS_TO_LONGS(ARCH_PERFMON_EVENTS_COUNT)];
532 };
533 int events_mask_len;
534 int apic;
535 u64 max_period;
536 struct event_constraint *
537 (*get_event_constraints)(struct cpu_hw_events *cpuc,
538 int idx,
539 struct perf_event *event);
540
541 void (*put_event_constraints)(struct cpu_hw_events *cpuc,
542 struct perf_event *event);
543
544 void (*start_scheduling)(struct cpu_hw_events *cpuc);
545
546 void (*commit_scheduling)(struct cpu_hw_events *cpuc, int idx, int cntr);
547
548 void (*stop_scheduling)(struct cpu_hw_events *cpuc);
549
550 struct event_constraint *event_constraints;
551 struct x86_pmu_quirk *quirks;
552 int perfctr_second_write;
553 bool late_ack;
554 unsigned (*limit_period)(struct perf_event *event, unsigned l);
555
556
557
558
559 int attr_rdpmc_broken;
560 int attr_rdpmc;
561 struct attribute **format_attrs;
562 struct attribute **event_attrs;
563
564 ssize_t (*events_sysfs_show)(char *page, u64 config);
565 struct attribute **cpu_events;
566
567
568
569
570 int (*cpu_prepare)(int cpu);
571 void (*cpu_starting)(int cpu);
572 void (*cpu_dying)(int cpu);
573 void (*cpu_dead)(int cpu);
574
575 void (*check_microcode)(void);
576 void (*sched_task)(struct perf_event_context *ctx,
577 bool sched_in);
578
579
580
581
582 u64 intel_ctrl;
583 union perf_capabilities intel_cap;
584
585
586
587
588 unsigned int bts :1,
589 bts_active :1,
590 pebs :1,
591 pebs_active :1,
592 pebs_broken :1;
593 int pebs_record_size;
594 void (*drain_pebs)(struct pt_regs *regs);
595 struct event_constraint *pebs_constraints;
596 void (*pebs_aliases)(struct perf_event *event);
597 int max_pebs_events;
598 unsigned long free_running_flags;
599
600
601
602
603 unsigned long lbr_tos, lbr_from, lbr_to;
604 int lbr_nr;
605 u64 lbr_sel_mask;
606 const int *lbr_sel_map;
607 bool lbr_double_abort;
608
609
610
611
612 atomic_t lbr_exclusive[x86_lbr_exclusive_max];
613
614
615
616
617 struct extra_reg *extra_regs;
618 unsigned int flags;
619
620
621
622
623 struct perf_guest_switch_msr *(*guest_get_msrs)(int *nr);
624};
625
626struct x86_perf_task_context {
627 u64 lbr_from[MAX_LBR_ENTRIES];
628 u64 lbr_to[MAX_LBR_ENTRIES];
629 u64 lbr_info[MAX_LBR_ENTRIES];
630 int tos;
631 int lbr_callstack_users;
632 int lbr_stack_state;
633};
634
635#define x86_add_quirk(func_) \
636do { \
637 static struct x86_pmu_quirk __quirk __initdata = { \
638 .func = func_, \
639 }; \
640 __quirk.next = x86_pmu.quirks; \
641 x86_pmu.quirks = &__quirk; \
642} while (0)
643
644
645
646
647#define PMU_FL_NO_HT_SHARING 0x1
648#define PMU_FL_HAS_RSP_1 0x2
649#define PMU_FL_EXCL_CNTRS 0x4
650#define PMU_FL_EXCL_ENABLED 0x8
651
652#define EVENT_VAR(_id) event_attr_##_id
653#define EVENT_PTR(_id) &event_attr_##_id.attr.attr
654
655#define EVENT_ATTR(_name, _id) \
656static struct perf_pmu_events_attr EVENT_VAR(_id) = { \
657 .attr = __ATTR(_name, 0444, events_sysfs_show, NULL), \
658 .id = PERF_COUNT_HW_##_id, \
659 .event_str = NULL, \
660};
661
662#define EVENT_ATTR_STR(_name, v, str) \
663static struct perf_pmu_events_attr event_attr_##v = { \
664 .attr = __ATTR(_name, 0444, events_sysfs_show, NULL), \
665 .id = 0, \
666 .event_str = str, \
667};
668
669extern struct x86_pmu x86_pmu __read_mostly;
670
671static inline bool x86_pmu_has_lbr_callstack(void)
672{
673 return x86_pmu.lbr_sel_map &&
674 x86_pmu.lbr_sel_map[PERF_SAMPLE_BRANCH_CALL_STACK_SHIFT] > 0;
675}
676
677DECLARE_PER_CPU(struct cpu_hw_events, cpu_hw_events);
678
679int x86_perf_event_set_period(struct perf_event *event);
680
681
682
683
684
685
686
687
688
689#define C(x) PERF_COUNT_HW_CACHE_##x
690
691extern u64 __read_mostly hw_cache_event_ids
692 [PERF_COUNT_HW_CACHE_MAX]
693 [PERF_COUNT_HW_CACHE_OP_MAX]
694 [PERF_COUNT_HW_CACHE_RESULT_MAX];
695extern u64 __read_mostly hw_cache_extra_regs
696 [PERF_COUNT_HW_CACHE_MAX]
697 [PERF_COUNT_HW_CACHE_OP_MAX]
698 [PERF_COUNT_HW_CACHE_RESULT_MAX];
699
700u64 x86_perf_event_update(struct perf_event *event);
701
702static inline unsigned int x86_pmu_config_addr(int index)
703{
704 return x86_pmu.eventsel + (x86_pmu.addr_offset ?
705 x86_pmu.addr_offset(index, true) : index);
706}
707
708static inline unsigned int x86_pmu_event_addr(int index)
709{
710 return x86_pmu.perfctr + (x86_pmu.addr_offset ?
711 x86_pmu.addr_offset(index, false) : index);
712}
713
714static inline int x86_pmu_rdpmc_index(int index)
715{
716 return x86_pmu.rdpmc_index ? x86_pmu.rdpmc_index(index) : index;
717}
718
719int x86_add_exclusive(unsigned int what);
720
721void x86_del_exclusive(unsigned int what);
722
723int x86_reserve_hardware(void);
724
725void x86_release_hardware(void);
726
727void hw_perf_lbr_event_destroy(struct perf_event *event);
728
729int x86_setup_perfctr(struct perf_event *event);
730
731int x86_pmu_hw_config(struct perf_event *event);
732
733void x86_pmu_disable_all(void);
734
735static inline void __x86_pmu_enable_event(struct hw_perf_event *hwc,
736 u64 enable_mask)
737{
738 u64 disable_mask = __this_cpu_read(cpu_hw_events.perf_ctr_virt_mask);
739
740 if (hwc->extra_reg.reg)
741 wrmsrl(hwc->extra_reg.reg, hwc->extra_reg.config);
742 wrmsrl(hwc->config_base, (hwc->config | enable_mask) & ~disable_mask);
743}
744
745void x86_pmu_enable_all(int added);
746
747int perf_assign_events(struct event_constraint **constraints, int n,
748 int wmin, int wmax, int gpmax, int *assign);
749int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign);
750
751void x86_pmu_stop(struct perf_event *event, int flags);
752
753static inline void x86_pmu_disable_event(struct perf_event *event)
754{
755 struct hw_perf_event *hwc = &event->hw;
756
757 wrmsrl(hwc->config_base, hwc->config);
758}
759
760void x86_pmu_enable_event(struct perf_event *event);
761
762int x86_pmu_handle_irq(struct pt_regs *regs);
763
764extern struct event_constraint emptyconstraint;
765
766extern struct event_constraint unconstrained;
767
768static inline bool kernel_ip(unsigned long ip)
769{
770#ifdef CONFIG_X86_32
771 return ip > PAGE_OFFSET;
772#else
773 return (long)ip < 0;
774#endif
775}
776
777
778
779
780
781
782
783
784
785
786
787
788
789static inline void set_linear_ip(struct pt_regs *regs, unsigned long ip)
790{
791 regs->cs = kernel_ip(ip) ? __KERNEL_CS : __USER_CS;
792 if (regs->flags & X86_VM_MASK)
793 regs->flags ^= (PERF_EFLAGS_VM | X86_VM_MASK);
794 regs->ip = ip;
795}
796
797ssize_t x86_event_sysfs_show(char *page, u64 config, u64 event);
798ssize_t intel_event_sysfs_show(char *page, u64 config);
799
800struct attribute **merge_attr(struct attribute **a, struct attribute **b);
801
802#ifdef CONFIG_CPU_SUP_AMD
803
804int amd_pmu_init(void);
805
806#else
807
808static inline int amd_pmu_init(void)
809{
810 return 0;
811}
812
813#endif
814
815#ifdef CONFIG_CPU_SUP_INTEL
816
817static inline bool intel_pmu_has_bts(struct perf_event *event)
818{
819 if (event->attr.config == PERF_COUNT_HW_BRANCH_INSTRUCTIONS &&
820 !event->attr.freq && event->hw.sample_period == 1)
821 return true;
822
823 return false;
824}
825
826int intel_pmu_save_and_restart(struct perf_event *event);
827
828struct event_constraint *
829x86_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
830 struct perf_event *event);
831
832struct intel_shared_regs *allocate_shared_regs(int cpu);
833
834int intel_pmu_init(void);
835
836void init_debug_store_on_cpu(int cpu);
837
838void fini_debug_store_on_cpu(int cpu);
839
840void release_ds_buffers(void);
841
842void reserve_ds_buffers(void);
843
844extern struct event_constraint bts_constraint;
845
846void intel_pmu_enable_bts(u64 config);
847
848void intel_pmu_disable_bts(void);
849
850int intel_pmu_drain_bts_buffer(void);
851
852extern struct event_constraint intel_core2_pebs_event_constraints[];
853
854extern struct event_constraint intel_atom_pebs_event_constraints[];
855
856extern struct event_constraint intel_slm_pebs_event_constraints[];
857
858extern struct event_constraint intel_nehalem_pebs_event_constraints[];
859
860extern struct event_constraint intel_westmere_pebs_event_constraints[];
861
862extern struct event_constraint intel_snb_pebs_event_constraints[];
863
864extern struct event_constraint intel_ivb_pebs_event_constraints[];
865
866extern struct event_constraint intel_hsw_pebs_event_constraints[];
867
868extern struct event_constraint intel_skl_pebs_event_constraints[];
869
870struct event_constraint *intel_pebs_constraints(struct perf_event *event);
871
872void intel_pmu_pebs_enable(struct perf_event *event);
873
874void intel_pmu_pebs_disable(struct perf_event *event);
875
876void intel_pmu_pebs_enable_all(void);
877
878void intel_pmu_pebs_disable_all(void);
879
880void intel_pmu_pebs_sched_task(struct perf_event_context *ctx, bool sched_in);
881
882void intel_ds_init(void);
883
884void intel_pmu_lbr_sched_task(struct perf_event_context *ctx, bool sched_in);
885
886void intel_pmu_lbr_reset(void);
887
888void intel_pmu_lbr_enable(struct perf_event *event);
889
890void intel_pmu_lbr_disable(struct perf_event *event);
891
892void intel_pmu_lbr_enable_all(bool pmi);
893
894void intel_pmu_lbr_disable_all(void);
895
896void intel_pmu_lbr_read(void);
897
898void intel_pmu_lbr_init_core(void);
899
900void intel_pmu_lbr_init_nhm(void);
901
902void intel_pmu_lbr_init_atom(void);
903
904void intel_pmu_lbr_init_snb(void);
905
906void intel_pmu_lbr_init_hsw(void);
907
908void intel_pmu_lbr_init_skl(void);
909
910int intel_pmu_setup_lbr_filter(struct perf_event *event);
911
912void intel_pt_interrupt(void);
913
914int intel_bts_interrupt(void);
915
916void intel_bts_enable_local(void);
917
918void intel_bts_disable_local(void);
919
920int p4_pmu_init(void);
921
922int p6_pmu_init(void);
923
924int knc_pmu_init(void);
925
926ssize_t events_sysfs_show(struct device *dev, struct device_attribute *attr,
927 char *page);
928
929static inline int is_ht_workaround_enabled(void)
930{
931 return !!(x86_pmu.flags & PMU_FL_EXCL_ENABLED);
932}
933
934#else
935
936static inline void reserve_ds_buffers(void)
937{
938}
939
940static inline void release_ds_buffers(void)
941{
942}
943
944static inline int intel_pmu_init(void)
945{
946 return 0;
947}
948
949static inline struct intel_shared_regs *allocate_shared_regs(int cpu)
950{
951 return NULL;
952}
953
954static inline int is_ht_workaround_enabled(void)
955{
956 return 0;
957}
958#endif
959