1
2
3
4
5
6
7
8
9
10
11
12
13
14
15#include <linux/perf_event.h>
16
17#include <asm/intel_ds.h>
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35enum extra_reg_type {
36 EXTRA_REG_NONE = -1,
37
38 EXTRA_REG_RSP_0 = 0,
39 EXTRA_REG_RSP_1 = 1,
40 EXTRA_REG_LBR = 2,
41 EXTRA_REG_LDLAT = 3,
42 EXTRA_REG_FE = 4,
43
44 EXTRA_REG_MAX
45};
46
47struct event_constraint {
48 union {
49 unsigned long idxmsk[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
50 u64 idxmsk64;
51 };
52 u64 code;
53 u64 cmask;
54 int weight;
55 int overlap;
56 int flags;
57 unsigned int size;
58};
59
60static inline bool constraint_match(struct event_constraint *c, u64 ecode)
61{
62 return ((ecode & c->cmask) - c->code) <= (u64)c->size;
63}
64
65
66
67
68#define PERF_X86_EVENT_PEBS_LDLAT 0x0001
69#define PERF_X86_EVENT_PEBS_ST 0x0002
70#define PERF_X86_EVENT_PEBS_ST_HSW 0x0004
71#define PERF_X86_EVENT_PEBS_LD_HSW 0x0008
72#define PERF_X86_EVENT_PEBS_NA_HSW 0x0010
73#define PERF_X86_EVENT_EXCL 0x0020
74#define PERF_X86_EVENT_DYNAMIC 0x0040
75#define PERF_X86_EVENT_RDPMC_ALLOWED 0x0080
76#define PERF_X86_EVENT_EXCL_ACCT 0x0100
77#define PERF_X86_EVENT_AUTO_RELOAD 0x0200
78#define PERF_X86_EVENT_LARGE_PEBS 0x0400
79
80struct amd_nb {
81 int nb_id;
82 int refcnt;
83 struct perf_event *owners[X86_PMC_IDX_MAX];
84 struct event_constraint event_constraints[X86_PMC_IDX_MAX];
85};
86
87#define PEBS_COUNTER_MASK ((1ULL << MAX_PEBS_EVENTS) - 1)
88
89
90
91
92
93
94
95
96#define LARGE_PEBS_FLAGS \
97 (PERF_SAMPLE_IP | PERF_SAMPLE_TID | PERF_SAMPLE_ADDR | \
98 PERF_SAMPLE_ID | PERF_SAMPLE_CPU | PERF_SAMPLE_STREAM_ID | \
99 PERF_SAMPLE_DATA_SRC | PERF_SAMPLE_IDENTIFIER | \
100 PERF_SAMPLE_TRANSACTION | PERF_SAMPLE_PHYS_ADDR | \
101 PERF_SAMPLE_REGS_INTR | PERF_SAMPLE_REGS_USER | \
102 PERF_SAMPLE_PERIOD)
103
104#define PEBS_GP_REGS \
105 ((1ULL << PERF_REG_X86_AX) | \
106 (1ULL << PERF_REG_X86_BX) | \
107 (1ULL << PERF_REG_X86_CX) | \
108 (1ULL << PERF_REG_X86_DX) | \
109 (1ULL << PERF_REG_X86_DI) | \
110 (1ULL << PERF_REG_X86_SI) | \
111 (1ULL << PERF_REG_X86_SP) | \
112 (1ULL << PERF_REG_X86_BP) | \
113 (1ULL << PERF_REG_X86_IP) | \
114 (1ULL << PERF_REG_X86_FLAGS) | \
115 (1ULL << PERF_REG_X86_R8) | \
116 (1ULL << PERF_REG_X86_R9) | \
117 (1ULL << PERF_REG_X86_R10) | \
118 (1ULL << PERF_REG_X86_R11) | \
119 (1ULL << PERF_REG_X86_R12) | \
120 (1ULL << PERF_REG_X86_R13) | \
121 (1ULL << PERF_REG_X86_R14) | \
122 (1ULL << PERF_REG_X86_R15))
123
124
125
126
127struct er_account {
128 raw_spinlock_t lock;
129 u64 config;
130 u64 reg;
131 atomic_t ref;
132};
133
134
135
136
137
138
139
140struct intel_shared_regs {
141 struct er_account regs[EXTRA_REG_MAX];
142 int refcnt;
143 unsigned core_id;
144};
145
146enum intel_excl_state_type {
147 INTEL_EXCL_UNUSED = 0,
148 INTEL_EXCL_SHARED = 1,
149 INTEL_EXCL_EXCLUSIVE = 2,
150};
151
152struct intel_excl_states {
153 enum intel_excl_state_type state[X86_PMC_IDX_MAX];
154 bool sched_started;
155};
156
157struct intel_excl_cntrs {
158 raw_spinlock_t lock;
159
160 struct intel_excl_states states[2];
161
162 union {
163 u16 has_exclusive[2];
164 u32 exclusive_present;
165 };
166
167 int refcnt;
168 unsigned core_id;
169};
170
171struct x86_perf_task_context;
172#define MAX_LBR_ENTRIES 32
173
174enum {
175 X86_PERF_KFREE_SHARED = 0,
176 X86_PERF_KFREE_EXCL = 1,
177 X86_PERF_KFREE_MAX
178};
179
180struct cpu_hw_events {
181
182
183
184 struct perf_event *events[X86_PMC_IDX_MAX];
185 unsigned long active_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
186 unsigned long running[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
187 int enabled;
188
189 int n_events;
190 int n_added;
191
192 int n_txn;
193
194 int assign[X86_PMC_IDX_MAX];
195 u64 tags[X86_PMC_IDX_MAX];
196
197 struct perf_event *event_list[X86_PMC_IDX_MAX];
198 struct event_constraint *event_constraint[X86_PMC_IDX_MAX];
199
200 int n_excl;
201
202 unsigned int txn_flags;
203 int is_fake;
204
205
206
207
208 struct debug_store *ds;
209 void *ds_pebs_vaddr;
210 void *ds_bts_vaddr;
211 u64 pebs_enabled;
212 int n_pebs;
213 int n_large_pebs;
214
215
216 u64 pebs_data_cfg;
217 u64 active_pebs_data_cfg;
218 int pebs_record_size;
219
220
221
222
223 int lbr_users;
224 int lbr_pebs_users;
225 struct perf_branch_stack lbr_stack;
226 struct perf_branch_entry lbr_entries[MAX_LBR_ENTRIES];
227 struct er_account *lbr_sel;
228 u64 br_sel;
229 struct x86_perf_task_context *last_task_ctx;
230 int last_log_id;
231
232
233
234
235 u64 intel_ctrl_guest_mask;
236 u64 intel_ctrl_host_mask;
237 struct perf_guest_switch_msr guest_switch_msrs[X86_PMC_IDX_MAX];
238
239
240
241
242 u64 intel_cp_status;
243
244
245
246
247
248 struct intel_shared_regs *shared_regs;
249
250
251
252 struct event_constraint *constraint_list;
253 struct intel_excl_cntrs *excl_cntrs;
254 int excl_thread_id;
255
256
257
258
259 u64 tfa_shadow;
260
261
262
263
264 struct amd_nb *amd_nb;
265
266 u64 perf_ctr_virt_mask;
267
268 void *kfree_on_online[X86_PERF_KFREE_MAX];
269};
270
271#define __EVENT_CONSTRAINT_RANGE(c, e, n, m, w, o, f) { \
272 { .idxmsk64 = (n) }, \
273 .code = (c), \
274 .size = (e) - (c), \
275 .cmask = (m), \
276 .weight = (w), \
277 .overlap = (o), \
278 .flags = f, \
279}
280
281#define __EVENT_CONSTRAINT(c, n, m, w, o, f) \
282 __EVENT_CONSTRAINT_RANGE(c, c, n, m, w, o, f)
283
284#define EVENT_CONSTRAINT(c, n, m) \
285 __EVENT_CONSTRAINT(c, n, m, HWEIGHT(n), 0, 0)
286
287
288
289
290
291#define EVENT_CONSTRAINT_RANGE(c, e, n, m) \
292 __EVENT_CONSTRAINT_RANGE(c, e, n, m, HWEIGHT(n), 0, 0)
293
294#define INTEL_EXCLEVT_CONSTRAINT(c, n) \
295 __EVENT_CONSTRAINT(c, n, ARCH_PERFMON_EVENTSEL_EVENT, HWEIGHT(n),\
296 0, PERF_X86_EVENT_EXCL)
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319#define EVENT_CONSTRAINT_OVERLAP(c, n, m) \
320 __EVENT_CONSTRAINT(c, n, m, HWEIGHT(n), 1, 0)
321
322
323
324
325#define INTEL_EVENT_CONSTRAINT(c, n) \
326 EVENT_CONSTRAINT(c, n, ARCH_PERFMON_EVENTSEL_EVENT)
327
328
329
330
331#define INTEL_EVENT_CONSTRAINT_RANGE(c, e, n) \
332 EVENT_CONSTRAINT_RANGE(c, e, n, ARCH_PERFMON_EVENTSEL_EVENT)
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347#define FIXED_EVENT_FLAGS (X86_RAW_EVENT_MASK|HSW_IN_TX|HSW_IN_TX_CHECKPOINTED)
348#define FIXED_EVENT_CONSTRAINT(c, n) \
349 EVENT_CONSTRAINT(c, (1ULL << (32+n)), FIXED_EVENT_FLAGS)
350
351
352
353
354#define INTEL_UEVENT_CONSTRAINT(c, n) \
355 EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK)
356
357
358#define INTEL_UBIT_EVENT_CONSTRAINT(c, n) \
359 EVENT_CONSTRAINT(c, n, ARCH_PERFMON_EVENTSEL_EVENT|(c))
360
361
362#define INTEL_FLAGS_UEVENT_CONSTRAINT(c, n) \
363 EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS)
364
365#define INTEL_EXCLUEVT_CONSTRAINT(c, n) \
366 __EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK, \
367 HWEIGHT(n), 0, PERF_X86_EVENT_EXCL)
368
369#define INTEL_PLD_CONSTRAINT(c, n) \
370 __EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \
371 HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_LDLAT)
372
373#define INTEL_PST_CONSTRAINT(c, n) \
374 __EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \
375 HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_ST)
376
377
378#define INTEL_FLAGS_EVENT_CONSTRAINT(c, n) \
379 EVENT_CONSTRAINT(c, n, ARCH_PERFMON_EVENTSEL_EVENT|X86_ALL_EVENT_FLAGS)
380
381#define INTEL_FLAGS_EVENT_CONSTRAINT_RANGE(c, e, n) \
382 EVENT_CONSTRAINT_RANGE(c, e, n, ARCH_PERFMON_EVENTSEL_EVENT|X86_ALL_EVENT_FLAGS)
383
384
385#define INTEL_ALL_EVENT_CONSTRAINT(code, n) \
386 EVENT_CONSTRAINT(code, n, X86_ALL_EVENT_FLAGS)
387
388
389#define INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_ST(code, n) \
390 __EVENT_CONSTRAINT(code, n, \
391 ARCH_PERFMON_EVENTSEL_EVENT|X86_ALL_EVENT_FLAGS, \
392 HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_ST_HSW)
393
394
395#define INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_LD(code, n) \
396 __EVENT_CONSTRAINT(code, n, \
397 ARCH_PERFMON_EVENTSEL_EVENT|X86_ALL_EVENT_FLAGS, \
398 HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_LD_HSW)
399
400#define INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_LD_RANGE(code, end, n) \
401 __EVENT_CONSTRAINT_RANGE(code, end, n, \
402 ARCH_PERFMON_EVENTSEL_EVENT|X86_ALL_EVENT_FLAGS, \
403 HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_LD_HSW)
404
405#define INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_XLD(code, n) \
406 __EVENT_CONSTRAINT(code, n, \
407 ARCH_PERFMON_EVENTSEL_EVENT|X86_ALL_EVENT_FLAGS, \
408 HWEIGHT(n), 0, \
409 PERF_X86_EVENT_PEBS_LD_HSW|PERF_X86_EVENT_EXCL)
410
411
412#define INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(code, n) \
413 __EVENT_CONSTRAINT(code, n, \
414 INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \
415 HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_ST_HSW)
416
417#define INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_XST(code, n) \
418 __EVENT_CONSTRAINT(code, n, \
419 INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \
420 HWEIGHT(n), 0, \
421 PERF_X86_EVENT_PEBS_ST_HSW|PERF_X86_EVENT_EXCL)
422
423
424#define INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(code, n) \
425 __EVENT_CONSTRAINT(code, n, \
426 INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \
427 HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_LD_HSW)
428
429#define INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_XLD(code, n) \
430 __EVENT_CONSTRAINT(code, n, \
431 INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \
432 HWEIGHT(n), 0, \
433 PERF_X86_EVENT_PEBS_LD_HSW|PERF_X86_EVENT_EXCL)
434
435
436#define INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_NA(code, n) \
437 __EVENT_CONSTRAINT(code, n, \
438 INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \
439 HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_NA_HSW)
440
441
442
443
444
445
446
447
448
449#define EVENT_CONSTRAINT_END { .weight = -1 }
450
451
452
453
454#define for_each_event_constraint(e, c) \
455 for ((e) = (c); (e)->weight != -1; (e)++)
456
457
458
459
460
461
462
463
464
465
466
467struct extra_reg {
468 unsigned int event;
469 unsigned int msr;
470 u64 config_mask;
471 u64 valid_mask;
472 int idx;
473 bool extra_msr_access;
474};
475
476#define EVENT_EXTRA_REG(e, ms, m, vm, i) { \
477 .event = (e), \
478 .msr = (ms), \
479 .config_mask = (m), \
480 .valid_mask = (vm), \
481 .idx = EXTRA_REG_##i, \
482 .extra_msr_access = true, \
483 }
484
485#define INTEL_EVENT_EXTRA_REG(event, msr, vm, idx) \
486 EVENT_EXTRA_REG(event, msr, ARCH_PERFMON_EVENTSEL_EVENT, vm, idx)
487
488#define INTEL_UEVENT_EXTRA_REG(event, msr, vm, idx) \
489 EVENT_EXTRA_REG(event, msr, ARCH_PERFMON_EVENTSEL_EVENT | \
490 ARCH_PERFMON_EVENTSEL_UMASK, vm, idx)
491
492#define INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(c) \
493 INTEL_UEVENT_EXTRA_REG(c, \
494 MSR_PEBS_LD_LAT_THRESHOLD, \
495 0xffff, \
496 LDLAT)
497
498#define EVENT_EXTRA_END EVENT_EXTRA_REG(0, 0, 0, 0, RSP_0)
499
500union perf_capabilities {
501 struct {
502 u64 lbr_format:6;
503 u64 pebs_trap:1;
504 u64 pebs_arch_reg:1;
505 u64 pebs_format:4;
506 u64 smm_freeze:1;
507
508
509
510
511 u64 full_width_write:1;
512 u64 pebs_baseline:1;
513 };
514 u64 capabilities;
515};
516
517struct x86_pmu_quirk {
518 struct x86_pmu_quirk *next;
519 void (*func)(void);
520};
521
522union x86_pmu_config {
523 struct {
524 u64 event:8,
525 umask:8,
526 usr:1,
527 os:1,
528 edge:1,
529 pc:1,
530 interrupt:1,
531 __reserved1:1,
532 en:1,
533 inv:1,
534 cmask:8,
535 event2:4,
536 __reserved2:4,
537 go:1,
538 ho:1;
539 } bits;
540 u64 value;
541};
542
543#define X86_CONFIG(args...) ((union x86_pmu_config){.bits = {args}}).value
544
545enum {
546 x86_lbr_exclusive_lbr,
547 x86_lbr_exclusive_bts,
548 x86_lbr_exclusive_pt,
549 x86_lbr_exclusive_max,
550};
551
552
553
554
555struct x86_pmu {
556
557
558
559 const char *name;
560 int version;
561 int (*handle_irq)(struct pt_regs *);
562 void (*disable_all)(void);
563 void (*enable_all)(int added);
564 void (*enable)(struct perf_event *);
565 void (*disable)(struct perf_event *);
566 void (*add)(struct perf_event *);
567 void (*del)(struct perf_event *);
568 void (*read)(struct perf_event *event);
569 int (*hw_config)(struct perf_event *event);
570 int (*schedule_events)(struct cpu_hw_events *cpuc, int n, int *assign);
571 unsigned eventsel;
572 unsigned perfctr;
573 int (*addr_offset)(int index, bool eventsel);
574 int (*rdpmc_index)(int index);
575 u64 (*event_map)(int);
576 int max_events;
577 int num_counters;
578 int num_counters_fixed;
579 int cntval_bits;
580 u64 cntval_mask;
581 union {
582 unsigned long events_maskl;
583 unsigned long events_mask[BITS_TO_LONGS(ARCH_PERFMON_EVENTS_COUNT)];
584 };
585 int events_mask_len;
586 int apic;
587 u64 max_period;
588 struct event_constraint *
589 (*get_event_constraints)(struct cpu_hw_events *cpuc,
590 int idx,
591 struct perf_event *event);
592
593 void (*put_event_constraints)(struct cpu_hw_events *cpuc,
594 struct perf_event *event);
595
596 void (*start_scheduling)(struct cpu_hw_events *cpuc);
597
598 void (*commit_scheduling)(struct cpu_hw_events *cpuc, int idx, int cntr);
599
600 void (*stop_scheduling)(struct cpu_hw_events *cpuc);
601
602 struct event_constraint *event_constraints;
603 struct x86_pmu_quirk *quirks;
604 int perfctr_second_write;
605 u64 (*limit_period)(struct perf_event *event, u64 l);
606
607
608 unsigned int late_ack :1,
609 counter_freezing :1;
610
611
612
613 int attr_rdpmc_broken;
614 int attr_rdpmc;
615 struct attribute **format_attrs;
616 struct attribute **event_attrs;
617 struct attribute **caps_attrs;
618
619 ssize_t (*events_sysfs_show)(char *page, u64 config);
620 struct attribute **cpu_events;
621
622 unsigned long attr_freeze_on_smi;
623 struct attribute **attrs;
624
625
626
627
628 int (*cpu_prepare)(int cpu);
629 void (*cpu_starting)(int cpu);
630 void (*cpu_dying)(int cpu);
631 void (*cpu_dead)(int cpu);
632
633 void (*check_microcode)(void);
634 void (*sched_task)(struct perf_event_context *ctx,
635 bool sched_in);
636
637
638
639
640 u64 intel_ctrl;
641 union perf_capabilities intel_cap;
642
643
644
645
646 unsigned int bts :1,
647 bts_active :1,
648 pebs :1,
649 pebs_active :1,
650 pebs_broken :1,
651 pebs_prec_dist :1,
652 pebs_no_tlb :1,
653 pebs_no_isolation :1;
654 int pebs_record_size;
655 int pebs_buffer_size;
656 int max_pebs_events;
657 void (*drain_pebs)(struct pt_regs *regs);
658 struct event_constraint *pebs_constraints;
659 void (*pebs_aliases)(struct perf_event *event);
660 unsigned long large_pebs_flags;
661 u64 rtm_abort_event;
662
663
664
665
666 unsigned long lbr_tos, lbr_from, lbr_to;
667 int lbr_nr;
668 u64 lbr_sel_mask;
669 const int *lbr_sel_map;
670 bool lbr_double_abort;
671 bool lbr_pt_coexist;
672
673
674
675
676 atomic_t lbr_exclusive[x86_lbr_exclusive_max];
677
678
679
680
681 unsigned int amd_nb_constraints : 1;
682
683
684
685
686 struct extra_reg *extra_regs;
687 unsigned int flags;
688
689
690
691
692 struct perf_guest_switch_msr *(*guest_get_msrs)(int *nr);
693
694
695
696
697 int (*check_period) (struct perf_event *event, u64 period);
698};
699
700struct x86_perf_task_context {
701 u64 lbr_from[MAX_LBR_ENTRIES];
702 u64 lbr_to[MAX_LBR_ENTRIES];
703 u64 lbr_info[MAX_LBR_ENTRIES];
704 int tos;
705 int valid_lbrs;
706 int lbr_callstack_users;
707 int lbr_stack_state;
708 int log_id;
709};
710
711#define x86_add_quirk(func_) \
712do { \
713 static struct x86_pmu_quirk __quirk __initdata = { \
714 .func = func_, \
715 }; \
716 __quirk.next = x86_pmu.quirks; \
717 x86_pmu.quirks = &__quirk; \
718} while (0)
719
720
721
722
723#define PMU_FL_NO_HT_SHARING 0x1
724#define PMU_FL_HAS_RSP_1 0x2
725#define PMU_FL_EXCL_CNTRS 0x4
726#define PMU_FL_EXCL_ENABLED 0x8
727#define PMU_FL_PEBS_ALL 0x10
728#define PMU_FL_TFA 0x20
729
730#define EVENT_VAR(_id) event_attr_##_id
731#define EVENT_PTR(_id) &event_attr_##_id.attr.attr
732
733#define EVENT_ATTR(_name, _id) \
734static struct perf_pmu_events_attr EVENT_VAR(_id) = { \
735 .attr = __ATTR(_name, 0444, events_sysfs_show, NULL), \
736 .id = PERF_COUNT_HW_##_id, \
737 .event_str = NULL, \
738};
739
740#define EVENT_ATTR_STR(_name, v, str) \
741static struct perf_pmu_events_attr event_attr_##v = { \
742 .attr = __ATTR(_name, 0444, events_sysfs_show, NULL), \
743 .id = 0, \
744 .event_str = str, \
745};
746
747#define EVENT_ATTR_STR_HT(_name, v, noht, ht) \
748static struct perf_pmu_events_ht_attr event_attr_##v = { \
749 .attr = __ATTR(_name, 0444, events_ht_sysfs_show, NULL),\
750 .id = 0, \
751 .event_str_noht = noht, \
752 .event_str_ht = ht, \
753}
754
755struct pmu *x86_get_pmu(void);
756extern struct x86_pmu x86_pmu __read_mostly;
757
758static inline bool x86_pmu_has_lbr_callstack(void)
759{
760 return x86_pmu.lbr_sel_map &&
761 x86_pmu.lbr_sel_map[PERF_SAMPLE_BRANCH_CALL_STACK_SHIFT] > 0;
762}
763
764DECLARE_PER_CPU(struct cpu_hw_events, cpu_hw_events);
765
766int x86_perf_event_set_period(struct perf_event *event);
767
768
769
770
771
772
773
774
775
776#define C(x) PERF_COUNT_HW_CACHE_##x
777
778extern u64 __read_mostly hw_cache_event_ids
779 [PERF_COUNT_HW_CACHE_MAX]
780 [PERF_COUNT_HW_CACHE_OP_MAX]
781 [PERF_COUNT_HW_CACHE_RESULT_MAX];
782extern u64 __read_mostly hw_cache_extra_regs
783 [PERF_COUNT_HW_CACHE_MAX]
784 [PERF_COUNT_HW_CACHE_OP_MAX]
785 [PERF_COUNT_HW_CACHE_RESULT_MAX];
786
787u64 x86_perf_event_update(struct perf_event *event);
788
789static inline unsigned int x86_pmu_config_addr(int index)
790{
791 return x86_pmu.eventsel + (x86_pmu.addr_offset ?
792 x86_pmu.addr_offset(index, true) : index);
793}
794
795static inline unsigned int x86_pmu_event_addr(int index)
796{
797 return x86_pmu.perfctr + (x86_pmu.addr_offset ?
798 x86_pmu.addr_offset(index, false) : index);
799}
800
801static inline int x86_pmu_rdpmc_index(int index)
802{
803 return x86_pmu.rdpmc_index ? x86_pmu.rdpmc_index(index) : index;
804}
805
806int x86_add_exclusive(unsigned int what);
807
808void x86_del_exclusive(unsigned int what);
809
810int x86_reserve_hardware(void);
811
812void x86_release_hardware(void);
813
814int x86_pmu_max_precise(void);
815
816void hw_perf_lbr_event_destroy(struct perf_event *event);
817
818int x86_setup_perfctr(struct perf_event *event);
819
820int x86_pmu_hw_config(struct perf_event *event);
821
822void x86_pmu_disable_all(void);
823
824static inline void __x86_pmu_enable_event(struct hw_perf_event *hwc,
825 u64 enable_mask)
826{
827 u64 disable_mask = __this_cpu_read(cpu_hw_events.perf_ctr_virt_mask);
828
829 if (hwc->extra_reg.reg)
830 wrmsrl(hwc->extra_reg.reg, hwc->extra_reg.config);
831 wrmsrl(hwc->config_base, (hwc->config | enable_mask) & ~disable_mask);
832}
833
834void x86_pmu_enable_all(int added);
835
836int perf_assign_events(struct event_constraint **constraints, int n,
837 int wmin, int wmax, int gpmax, int *assign);
838int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign);
839
840void x86_pmu_stop(struct perf_event *event, int flags);
841
842static inline void x86_pmu_disable_event(struct perf_event *event)
843{
844 struct hw_perf_event *hwc = &event->hw;
845
846 wrmsrl(hwc->config_base, hwc->config);
847}
848
849void x86_pmu_enable_event(struct perf_event *event);
850
851int x86_pmu_handle_irq(struct pt_regs *regs);
852
853extern struct event_constraint emptyconstraint;
854
855extern struct event_constraint unconstrained;
856
857static inline bool kernel_ip(unsigned long ip)
858{
859#ifdef CONFIG_X86_32
860 return ip > PAGE_OFFSET;
861#else
862 return (long)ip < 0;
863#endif
864}
865
866
867
868
869
870
871
872
873
874
875
876
877
878static inline void set_linear_ip(struct pt_regs *regs, unsigned long ip)
879{
880 regs->cs = kernel_ip(ip) ? __KERNEL_CS : __USER_CS;
881 if (regs->flags & X86_VM_MASK)
882 regs->flags ^= (PERF_EFLAGS_VM | X86_VM_MASK);
883 regs->ip = ip;
884}
885
886ssize_t x86_event_sysfs_show(char *page, u64 config, u64 event);
887ssize_t intel_event_sysfs_show(char *page, u64 config);
888
889struct attribute **merge_attr(struct attribute **a, struct attribute **b);
890
891ssize_t events_sysfs_show(struct device *dev, struct device_attribute *attr,
892 char *page);
893ssize_t events_ht_sysfs_show(struct device *dev, struct device_attribute *attr,
894 char *page);
895
896#ifdef CONFIG_CPU_SUP_AMD
897
898int amd_pmu_init(void);
899
900#else
901
902static inline int amd_pmu_init(void)
903{
904 return 0;
905}
906
907#endif
908
909#ifdef CONFIG_CPU_SUP_INTEL
910
911static inline bool intel_pmu_has_bts_period(struct perf_event *event, u64 period)
912{
913 struct hw_perf_event *hwc = &event->hw;
914 unsigned int hw_event, bts_event;
915
916 if (event->attr.freq)
917 return false;
918
919 hw_event = hwc->config & INTEL_ARCH_EVENT_MASK;
920 bts_event = x86_pmu.event_map(PERF_COUNT_HW_BRANCH_INSTRUCTIONS);
921
922 return hw_event == bts_event && period == 1;
923}
924
925static inline bool intel_pmu_has_bts(struct perf_event *event)
926{
927 struct hw_perf_event *hwc = &event->hw;
928
929 return intel_pmu_has_bts_period(event, hwc->sample_period);
930}
931
932int intel_pmu_save_and_restart(struct perf_event *event);
933
934struct event_constraint *
935x86_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
936 struct perf_event *event);
937
938extern int intel_cpuc_prepare(struct cpu_hw_events *cpuc, int cpu);
939extern void intel_cpuc_finish(struct cpu_hw_events *cpuc);
940
941int intel_pmu_init(void);
942
943void init_debug_store_on_cpu(int cpu);
944
945void fini_debug_store_on_cpu(int cpu);
946
947void release_ds_buffers(void);
948
949void reserve_ds_buffers(void);
950
951extern struct event_constraint bts_constraint;
952
953void intel_pmu_enable_bts(u64 config);
954
955void intel_pmu_disable_bts(void);
956
957int intel_pmu_drain_bts_buffer(void);
958
959extern struct event_constraint intel_core2_pebs_event_constraints[];
960
961extern struct event_constraint intel_atom_pebs_event_constraints[];
962
963extern struct event_constraint intel_slm_pebs_event_constraints[];
964
965extern struct event_constraint intel_glm_pebs_event_constraints[];
966
967extern struct event_constraint intel_glp_pebs_event_constraints[];
968
969extern struct event_constraint intel_nehalem_pebs_event_constraints[];
970
971extern struct event_constraint intel_westmere_pebs_event_constraints[];
972
973extern struct event_constraint intel_snb_pebs_event_constraints[];
974
975extern struct event_constraint intel_ivb_pebs_event_constraints[];
976
977extern struct event_constraint intel_hsw_pebs_event_constraints[];
978
979extern struct event_constraint intel_bdw_pebs_event_constraints[];
980
981extern struct event_constraint intel_skl_pebs_event_constraints[];
982
983extern struct event_constraint intel_icl_pebs_event_constraints[];
984
985struct event_constraint *intel_pebs_constraints(struct perf_event *event);
986
987void intel_pmu_pebs_add(struct perf_event *event);
988
989void intel_pmu_pebs_del(struct perf_event *event);
990
991void intel_pmu_pebs_enable(struct perf_event *event);
992
993void intel_pmu_pebs_disable(struct perf_event *event);
994
995void intel_pmu_pebs_enable_all(void);
996
997void intel_pmu_pebs_disable_all(void);
998
999void intel_pmu_pebs_sched_task(struct perf_event_context *ctx, bool sched_in);
1000
1001void intel_pmu_auto_reload_read(struct perf_event *event);
1002
1003void intel_pmu_store_pebs_lbrs(struct pebs_lbr *lbr);
1004
1005void intel_ds_init(void);
1006
1007void intel_pmu_lbr_sched_task(struct perf_event_context *ctx, bool sched_in);
1008
1009u64 lbr_from_signext_quirk_wr(u64 val);
1010
1011void intel_pmu_lbr_reset(void);
1012
1013void intel_pmu_lbr_add(struct perf_event *event);
1014
1015void intel_pmu_lbr_del(struct perf_event *event);
1016
1017void intel_pmu_lbr_enable_all(bool pmi);
1018
1019void intel_pmu_lbr_disable_all(void);
1020
1021void intel_pmu_lbr_read(void);
1022
1023void intel_pmu_lbr_init_core(void);
1024
1025void intel_pmu_lbr_init_nhm(void);
1026
1027void intel_pmu_lbr_init_atom(void);
1028
1029void intel_pmu_lbr_init_slm(void);
1030
1031void intel_pmu_lbr_init_snb(void);
1032
1033void intel_pmu_lbr_init_hsw(void);
1034
1035void intel_pmu_lbr_init_skl(void);
1036
1037void intel_pmu_lbr_init_knl(void);
1038
1039void intel_pmu_pebs_data_source_nhm(void);
1040
1041void intel_pmu_pebs_data_source_skl(bool pmem);
1042
1043int intel_pmu_setup_lbr_filter(struct perf_event *event);
1044
1045void intel_pt_interrupt(void);
1046
1047int intel_bts_interrupt(void);
1048
1049void intel_bts_enable_local(void);
1050
1051void intel_bts_disable_local(void);
1052
1053int p4_pmu_init(void);
1054
1055int p6_pmu_init(void);
1056
1057int knc_pmu_init(void);
1058
1059static inline int is_ht_workaround_enabled(void)
1060{
1061 return !!(x86_pmu.flags & PMU_FL_EXCL_ENABLED);
1062}
1063
1064#else
1065
1066static inline void reserve_ds_buffers(void)
1067{
1068}
1069
1070static inline void release_ds_buffers(void)
1071{
1072}
1073
1074static inline int intel_pmu_init(void)
1075{
1076 return 0;
1077}
1078
1079static inline int intel_cpuc_prepare(struct cpu_hw_events *cpuc, int cpu)
1080{
1081 return 0;
1082}
1083
1084static inline void intel_cpuc_finish(struct cpu_hw_events *cpuc)
1085{
1086}
1087
1088static inline int is_ht_workaround_enabled(void)
1089{
1090 return 0;
1091}
1092#endif
1093