1
2
3
4
5
6
7
8
9
10
11
12
13
14
15#include <linux/perf_event.h>
16
17#include <asm/intel_ds.h>
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35enum extra_reg_type {
36 EXTRA_REG_NONE = -1,
37
38 EXTRA_REG_RSP_0 = 0,
39 EXTRA_REG_RSP_1 = 1,
40 EXTRA_REG_LBR = 2,
41 EXTRA_REG_LDLAT = 3,
42 EXTRA_REG_FE = 4,
43
44 EXTRA_REG_MAX
45};
46
47struct event_constraint {
48 union {
49 unsigned long idxmsk[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
50 u64 idxmsk64;
51 };
52 u64 code;
53 u64 cmask;
54 int weight;
55 int overlap;
56 int flags;
57 unsigned int size;
58};
59
60static inline bool constraint_match(struct event_constraint *c, u64 ecode)
61{
62 return ((ecode & c->cmask) - c->code) <= (u64)c->size;
63}
64
65
66
67
68#define PERF_X86_EVENT_PEBS_LDLAT 0x0001
69#define PERF_X86_EVENT_PEBS_ST 0x0002
70#define PERF_X86_EVENT_PEBS_ST_HSW 0x0004
71#define PERF_X86_EVENT_PEBS_LD_HSW 0x0008
72#define PERF_X86_EVENT_PEBS_NA_HSW 0x0010
73#define PERF_X86_EVENT_EXCL 0x0020
74#define PERF_X86_EVENT_DYNAMIC 0x0040
75#define PERF_X86_EVENT_RDPMC_ALLOWED 0x0080
76#define PERF_X86_EVENT_EXCL_ACCT 0x0100
77#define PERF_X86_EVENT_AUTO_RELOAD 0x0200
78#define PERF_X86_EVENT_LARGE_PEBS 0x0400
79#define PERF_X86_EVENT_PEBS_VIA_PT 0x0800
80#define PERF_X86_EVENT_PAIR 0x1000
81#define PERF_X86_EVENT_LBR_SELECT 0x2000
82#define PERF_X86_EVENT_TOPDOWN 0x4000
83
84static inline bool is_topdown_count(struct perf_event *event)
85{
86 return event->hw.flags & PERF_X86_EVENT_TOPDOWN;
87}
88
89static inline bool is_metric_event(struct perf_event *event)
90{
91 u64 config = event->attr.config;
92
93 return ((config & ARCH_PERFMON_EVENTSEL_EVENT) == 0) &&
94 ((config & INTEL_ARCH_EVENT_MASK) >= INTEL_TD_METRIC_RETIRING) &&
95 ((config & INTEL_ARCH_EVENT_MASK) <= INTEL_TD_METRIC_MAX);
96}
97
98static inline bool is_slots_event(struct perf_event *event)
99{
100 return (event->attr.config & INTEL_ARCH_EVENT_MASK) == INTEL_TD_SLOTS;
101}
102
103static inline bool is_topdown_event(struct perf_event *event)
104{
105 return is_metric_event(event) || is_slots_event(event);
106}
107
108struct amd_nb {
109 int nb_id;
110 int refcnt;
111 struct perf_event *owners[X86_PMC_IDX_MAX];
112 struct event_constraint event_constraints[X86_PMC_IDX_MAX];
113};
114
115#define PEBS_COUNTER_MASK ((1ULL << MAX_PEBS_EVENTS) - 1)
116#define PEBS_PMI_AFTER_EACH_RECORD BIT_ULL(60)
117#define PEBS_OUTPUT_OFFSET 61
118#define PEBS_OUTPUT_MASK (3ull << PEBS_OUTPUT_OFFSET)
119#define PEBS_OUTPUT_PT (1ull << PEBS_OUTPUT_OFFSET)
120#define PEBS_VIA_PT_MASK (PEBS_OUTPUT_PT | PEBS_PMI_AFTER_EACH_RECORD)
121
122
123
124
125
126
127
128
129#define LARGE_PEBS_FLAGS \
130 (PERF_SAMPLE_IP | PERF_SAMPLE_TID | PERF_SAMPLE_ADDR | \
131 PERF_SAMPLE_ID | PERF_SAMPLE_CPU | PERF_SAMPLE_STREAM_ID | \
132 PERF_SAMPLE_DATA_SRC | PERF_SAMPLE_IDENTIFIER | \
133 PERF_SAMPLE_TRANSACTION | PERF_SAMPLE_PHYS_ADDR | \
134 PERF_SAMPLE_REGS_INTR | PERF_SAMPLE_REGS_USER | \
135 PERF_SAMPLE_PERIOD)
136
137#define PEBS_GP_REGS \
138 ((1ULL << PERF_REG_X86_AX) | \
139 (1ULL << PERF_REG_X86_BX) | \
140 (1ULL << PERF_REG_X86_CX) | \
141 (1ULL << PERF_REG_X86_DX) | \
142 (1ULL << PERF_REG_X86_DI) | \
143 (1ULL << PERF_REG_X86_SI) | \
144 (1ULL << PERF_REG_X86_SP) | \
145 (1ULL << PERF_REG_X86_BP) | \
146 (1ULL << PERF_REG_X86_IP) | \
147 (1ULL << PERF_REG_X86_FLAGS) | \
148 (1ULL << PERF_REG_X86_R8) | \
149 (1ULL << PERF_REG_X86_R9) | \
150 (1ULL << PERF_REG_X86_R10) | \
151 (1ULL << PERF_REG_X86_R11) | \
152 (1ULL << PERF_REG_X86_R12) | \
153 (1ULL << PERF_REG_X86_R13) | \
154 (1ULL << PERF_REG_X86_R14) | \
155 (1ULL << PERF_REG_X86_R15))
156
157
158
159
160struct er_account {
161 raw_spinlock_t lock;
162 u64 config;
163 u64 reg;
164 atomic_t ref;
165};
166
167
168
169
170
171
172
173struct intel_shared_regs {
174 struct er_account regs[EXTRA_REG_MAX];
175 int refcnt;
176 unsigned core_id;
177};
178
179enum intel_excl_state_type {
180 INTEL_EXCL_UNUSED = 0,
181 INTEL_EXCL_SHARED = 1,
182 INTEL_EXCL_EXCLUSIVE = 2,
183};
184
185struct intel_excl_states {
186 enum intel_excl_state_type state[X86_PMC_IDX_MAX];
187 bool sched_started;
188};
189
190struct intel_excl_cntrs {
191 raw_spinlock_t lock;
192
193 struct intel_excl_states states[2];
194
195 union {
196 u16 has_exclusive[2];
197 u32 exclusive_present;
198 };
199
200 int refcnt;
201 unsigned core_id;
202};
203
204struct x86_perf_task_context;
205#define MAX_LBR_ENTRIES 32
206
207enum {
208 LBR_FORMAT_32 = 0x00,
209 LBR_FORMAT_LIP = 0x01,
210 LBR_FORMAT_EIP = 0x02,
211 LBR_FORMAT_EIP_FLAGS = 0x03,
212 LBR_FORMAT_EIP_FLAGS2 = 0x04,
213 LBR_FORMAT_INFO = 0x05,
214 LBR_FORMAT_TIME = 0x06,
215 LBR_FORMAT_MAX_KNOWN = LBR_FORMAT_TIME,
216};
217
218enum {
219 X86_PERF_KFREE_SHARED = 0,
220 X86_PERF_KFREE_EXCL = 1,
221 X86_PERF_KFREE_MAX
222};
223
224struct cpu_hw_events {
225
226
227
228 struct perf_event *events[X86_PMC_IDX_MAX];
229 unsigned long active_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
230 unsigned long running[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
231 int enabled;
232
233 int n_events;
234 int n_added;
235
236 int n_txn;
237
238 int n_txn_pair;
239 int n_txn_metric;
240 int assign[X86_PMC_IDX_MAX];
241 u64 tags[X86_PMC_IDX_MAX];
242
243 struct perf_event *event_list[X86_PMC_IDX_MAX];
244 struct event_constraint *event_constraint[X86_PMC_IDX_MAX];
245
246 int n_excl;
247
248 unsigned int txn_flags;
249 int is_fake;
250
251
252
253
254 struct debug_store *ds;
255 void *ds_pebs_vaddr;
256 void *ds_bts_vaddr;
257 u64 pebs_enabled;
258 int n_pebs;
259 int n_large_pebs;
260 int n_pebs_via_pt;
261 int pebs_output;
262
263
264 u64 pebs_data_cfg;
265 u64 active_pebs_data_cfg;
266 int pebs_record_size;
267
268
269
270
271 int lbr_users;
272 int lbr_pebs_users;
273 struct perf_branch_stack lbr_stack;
274 struct perf_branch_entry lbr_entries[MAX_LBR_ENTRIES];
275 union {
276 struct er_account *lbr_sel;
277 struct er_account *lbr_ctl;
278 };
279 u64 br_sel;
280 void *last_task_ctx;
281 int last_log_id;
282 int lbr_select;
283 void *lbr_xsave;
284
285
286
287
288 u64 intel_ctrl_guest_mask;
289 u64 intel_ctrl_host_mask;
290 struct perf_guest_switch_msr guest_switch_msrs[X86_PMC_IDX_MAX];
291
292
293
294
295 u64 intel_cp_status;
296
297
298
299
300
301 struct intel_shared_regs *shared_regs;
302
303
304
305 struct event_constraint *constraint_list;
306 struct intel_excl_cntrs *excl_cntrs;
307 int excl_thread_id;
308
309
310
311
312 u64 tfa_shadow;
313
314
315
316
317
318 int n_metric;
319
320
321
322
323 struct amd_nb *amd_nb;
324
325 u64 perf_ctr_virt_mask;
326 int n_pair;
327
328 void *kfree_on_online[X86_PERF_KFREE_MAX];
329};
330
331#define __EVENT_CONSTRAINT_RANGE(c, e, n, m, w, o, f) { \
332 { .idxmsk64 = (n) }, \
333 .code = (c), \
334 .size = (e) - (c), \
335 .cmask = (m), \
336 .weight = (w), \
337 .overlap = (o), \
338 .flags = f, \
339}
340
341#define __EVENT_CONSTRAINT(c, n, m, w, o, f) \
342 __EVENT_CONSTRAINT_RANGE(c, c, n, m, w, o, f)
343
344#define EVENT_CONSTRAINT(c, n, m) \
345 __EVENT_CONSTRAINT(c, n, m, HWEIGHT(n), 0, 0)
346
347
348
349
350
351#define EVENT_CONSTRAINT_RANGE(c, e, n, m) \
352 __EVENT_CONSTRAINT_RANGE(c, e, n, m, HWEIGHT(n), 0, 0)
353
354#define INTEL_EXCLEVT_CONSTRAINT(c, n) \
355 __EVENT_CONSTRAINT(c, n, ARCH_PERFMON_EVENTSEL_EVENT, HWEIGHT(n),\
356 0, PERF_X86_EVENT_EXCL)
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379#define EVENT_CONSTRAINT_OVERLAP(c, n, m) \
380 __EVENT_CONSTRAINT(c, n, m, HWEIGHT(n), 1, 0)
381
382
383
384
385#define INTEL_EVENT_CONSTRAINT(c, n) \
386 EVENT_CONSTRAINT(c, n, ARCH_PERFMON_EVENTSEL_EVENT)
387
388
389
390
391#define INTEL_EVENT_CONSTRAINT_RANGE(c, e, n) \
392 EVENT_CONSTRAINT_RANGE(c, e, n, ARCH_PERFMON_EVENTSEL_EVENT)
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407#define FIXED_EVENT_FLAGS (X86_RAW_EVENT_MASK|HSW_IN_TX|HSW_IN_TX_CHECKPOINTED)
408#define FIXED_EVENT_CONSTRAINT(c, n) \
409 EVENT_CONSTRAINT(c, (1ULL << (32+n)), FIXED_EVENT_FLAGS)
410
411
412
413
414
415
416
417
418
419
420#define METRIC_EVENT_CONSTRAINT(c, n) \
421 EVENT_CONSTRAINT(c, (1ULL << (INTEL_PMC_IDX_METRIC_BASE + n)), \
422 INTEL_ARCH_EVENT_MASK)
423
424
425
426
427#define INTEL_UEVENT_CONSTRAINT(c, n) \
428 EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK)
429
430
431#define INTEL_UBIT_EVENT_CONSTRAINT(c, n) \
432 EVENT_CONSTRAINT(c, n, ARCH_PERFMON_EVENTSEL_EVENT|(c))
433
434
435#define INTEL_FLAGS_UEVENT_CONSTRAINT(c, n) \
436 EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS)
437
438#define INTEL_EXCLUEVT_CONSTRAINT(c, n) \
439 __EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK, \
440 HWEIGHT(n), 0, PERF_X86_EVENT_EXCL)
441
442#define INTEL_PLD_CONSTRAINT(c, n) \
443 __EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \
444 HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_LDLAT)
445
446#define INTEL_PST_CONSTRAINT(c, n) \
447 __EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \
448 HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_ST)
449
450
451#define INTEL_FLAGS_EVENT_CONSTRAINT(c, n) \
452 EVENT_CONSTRAINT(c, n, ARCH_PERFMON_EVENTSEL_EVENT|X86_ALL_EVENT_FLAGS)
453
454#define INTEL_FLAGS_EVENT_CONSTRAINT_RANGE(c, e, n) \
455 EVENT_CONSTRAINT_RANGE(c, e, n, ARCH_PERFMON_EVENTSEL_EVENT|X86_ALL_EVENT_FLAGS)
456
457
458#define INTEL_ALL_EVENT_CONSTRAINT(code, n) \
459 EVENT_CONSTRAINT(code, n, X86_ALL_EVENT_FLAGS)
460
461
462#define INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_ST(code, n) \
463 __EVENT_CONSTRAINT(code, n, \
464 ARCH_PERFMON_EVENTSEL_EVENT|X86_ALL_EVENT_FLAGS, \
465 HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_ST_HSW)
466
467
468#define INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_LD(code, n) \
469 __EVENT_CONSTRAINT(code, n, \
470 ARCH_PERFMON_EVENTSEL_EVENT|X86_ALL_EVENT_FLAGS, \
471 HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_LD_HSW)
472
473#define INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_LD_RANGE(code, end, n) \
474 __EVENT_CONSTRAINT_RANGE(code, end, n, \
475 ARCH_PERFMON_EVENTSEL_EVENT|X86_ALL_EVENT_FLAGS, \
476 HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_LD_HSW)
477
478#define INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_XLD(code, n) \
479 __EVENT_CONSTRAINT(code, n, \
480 ARCH_PERFMON_EVENTSEL_EVENT|X86_ALL_EVENT_FLAGS, \
481 HWEIGHT(n), 0, \
482 PERF_X86_EVENT_PEBS_LD_HSW|PERF_X86_EVENT_EXCL)
483
484
485#define INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(code, n) \
486 __EVENT_CONSTRAINT(code, n, \
487 INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \
488 HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_ST_HSW)
489
490#define INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_XST(code, n) \
491 __EVENT_CONSTRAINT(code, n, \
492 INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \
493 HWEIGHT(n), 0, \
494 PERF_X86_EVENT_PEBS_ST_HSW|PERF_X86_EVENT_EXCL)
495
496
497#define INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(code, n) \
498 __EVENT_CONSTRAINT(code, n, \
499 INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \
500 HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_LD_HSW)
501
502#define INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_XLD(code, n) \
503 __EVENT_CONSTRAINT(code, n, \
504 INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \
505 HWEIGHT(n), 0, \
506 PERF_X86_EVENT_PEBS_LD_HSW|PERF_X86_EVENT_EXCL)
507
508
509#define INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_NA(code, n) \
510 __EVENT_CONSTRAINT(code, n, \
511 INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \
512 HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_NA_HSW)
513
514
515
516
517
518
519
520
521
522#define EVENT_CONSTRAINT_END { .weight = -1 }
523
524
525
526
527#define for_each_event_constraint(e, c) \
528 for ((e) = (c); (e)->weight != -1; (e)++)
529
530
531
532
533
534
535
536
537
538
539
540struct extra_reg {
541 unsigned int event;
542 unsigned int msr;
543 u64 config_mask;
544 u64 valid_mask;
545 int idx;
546 bool extra_msr_access;
547};
548
549#define EVENT_EXTRA_REG(e, ms, m, vm, i) { \
550 .event = (e), \
551 .msr = (ms), \
552 .config_mask = (m), \
553 .valid_mask = (vm), \
554 .idx = EXTRA_REG_##i, \
555 .extra_msr_access = true, \
556 }
557
558#define INTEL_EVENT_EXTRA_REG(event, msr, vm, idx) \
559 EVENT_EXTRA_REG(event, msr, ARCH_PERFMON_EVENTSEL_EVENT, vm, idx)
560
561#define INTEL_UEVENT_EXTRA_REG(event, msr, vm, idx) \
562 EVENT_EXTRA_REG(event, msr, ARCH_PERFMON_EVENTSEL_EVENT | \
563 ARCH_PERFMON_EVENTSEL_UMASK, vm, idx)
564
565#define INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(c) \
566 INTEL_UEVENT_EXTRA_REG(c, \
567 MSR_PEBS_LD_LAT_THRESHOLD, \
568 0xffff, \
569 LDLAT)
570
571#define EVENT_EXTRA_END EVENT_EXTRA_REG(0, 0, 0, 0, RSP_0)
572
573union perf_capabilities {
574 struct {
575 u64 lbr_format:6;
576 u64 pebs_trap:1;
577 u64 pebs_arch_reg:1;
578 u64 pebs_format:4;
579 u64 smm_freeze:1;
580
581
582
583
584 u64 full_width_write:1;
585 u64 pebs_baseline:1;
586 u64 perf_metrics:1;
587 u64 pebs_output_pt_available:1;
588 u64 anythread_deprecated:1;
589 };
590 u64 capabilities;
591};
592
593struct x86_pmu_quirk {
594 struct x86_pmu_quirk *next;
595 void (*func)(void);
596};
597
598union x86_pmu_config {
599 struct {
600 u64 event:8,
601 umask:8,
602 usr:1,
603 os:1,
604 edge:1,
605 pc:1,
606 interrupt:1,
607 __reserved1:1,
608 en:1,
609 inv:1,
610 cmask:8,
611 event2:4,
612 __reserved2:4,
613 go:1,
614 ho:1;
615 } bits;
616 u64 value;
617};
618
619#define X86_CONFIG(args...) ((union x86_pmu_config){.bits = {args}}).value
620
621enum {
622 x86_lbr_exclusive_lbr,
623 x86_lbr_exclusive_bts,
624 x86_lbr_exclusive_pt,
625 x86_lbr_exclusive_max,
626};
627
628
629
630
631struct x86_pmu {
632
633
634
635 const char *name;
636 int version;
637 int (*handle_irq)(struct pt_regs *);
638 void (*disable_all)(void);
639 void (*enable_all)(int added);
640 void (*enable)(struct perf_event *);
641 void (*disable)(struct perf_event *);
642 void (*add)(struct perf_event *);
643 void (*del)(struct perf_event *);
644 void (*read)(struct perf_event *event);
645 int (*hw_config)(struct perf_event *event);
646 int (*schedule_events)(struct cpu_hw_events *cpuc, int n, int *assign);
647 unsigned eventsel;
648 unsigned perfctr;
649 int (*addr_offset)(int index, bool eventsel);
650 int (*rdpmc_index)(int index);
651 u64 (*event_map)(int);
652 int max_events;
653 int num_counters;
654 int num_counters_fixed;
655 int cntval_bits;
656 u64 cntval_mask;
657 union {
658 unsigned long events_maskl;
659 unsigned long events_mask[BITS_TO_LONGS(ARCH_PERFMON_EVENTS_COUNT)];
660 };
661 int events_mask_len;
662 int apic;
663 u64 max_period;
664 struct event_constraint *
665 (*get_event_constraints)(struct cpu_hw_events *cpuc,
666 int idx,
667 struct perf_event *event);
668
669 void (*put_event_constraints)(struct cpu_hw_events *cpuc,
670 struct perf_event *event);
671
672 void (*start_scheduling)(struct cpu_hw_events *cpuc);
673
674 void (*commit_scheduling)(struct cpu_hw_events *cpuc, int idx, int cntr);
675
676 void (*stop_scheduling)(struct cpu_hw_events *cpuc);
677
678 struct event_constraint *event_constraints;
679 struct x86_pmu_quirk *quirks;
680 int perfctr_second_write;
681 u64 (*limit_period)(struct perf_event *event, u64 l);
682
683
684 unsigned int late_ack :1,
685 enabled_ack :1,
686 counter_freezing :1;
687
688
689
690 int attr_rdpmc_broken;
691 int attr_rdpmc;
692 struct attribute **format_attrs;
693
694 ssize_t (*events_sysfs_show)(char *page, u64 config);
695 const struct attribute_group **attr_update;
696
697 unsigned long attr_freeze_on_smi;
698
699
700
701
702 int (*cpu_prepare)(int cpu);
703 void (*cpu_starting)(int cpu);
704 void (*cpu_dying)(int cpu);
705 void (*cpu_dead)(int cpu);
706
707 void (*check_microcode)(void);
708 void (*sched_task)(struct perf_event_context *ctx,
709 bool sched_in);
710
711
712
713
714 u64 intel_ctrl;
715 union perf_capabilities intel_cap;
716
717
718
719
720 unsigned int bts :1,
721 bts_active :1,
722 pebs :1,
723 pebs_active :1,
724 pebs_broken :1,
725 pebs_prec_dist :1,
726 pebs_no_tlb :1,
727 pebs_no_isolation :1;
728 int pebs_record_size;
729 int pebs_buffer_size;
730 int max_pebs_events;
731 void (*drain_pebs)(struct pt_regs *regs, struct perf_sample_data *data);
732 struct event_constraint *pebs_constraints;
733 void (*pebs_aliases)(struct perf_event *event);
734 unsigned long large_pebs_flags;
735 u64 rtm_abort_event;
736
737
738
739
740 unsigned int lbr_tos, lbr_from, lbr_to,
741 lbr_info, lbr_nr;
742 union {
743 u64 lbr_sel_mask;
744 u64 lbr_ctl_mask;
745 };
746 union {
747 const int *lbr_sel_map;
748 int *lbr_ctl_map;
749 };
750 bool lbr_double_abort;
751 bool lbr_pt_coexist;
752
753
754
755
756 unsigned int lbr_depth_mask:8;
757 unsigned int lbr_deep_c_reset:1;
758 unsigned int lbr_lip:1;
759 unsigned int lbr_cpl:1;
760 unsigned int lbr_filter:1;
761 unsigned int lbr_call_stack:1;
762 unsigned int lbr_mispred:1;
763 unsigned int lbr_timed_lbr:1;
764 unsigned int lbr_br_type:1;
765
766 void (*lbr_reset)(void);
767 void (*lbr_read)(struct cpu_hw_events *cpuc);
768 void (*lbr_save)(void *ctx);
769 void (*lbr_restore)(void *ctx);
770
771
772
773
774 atomic_t lbr_exclusive[x86_lbr_exclusive_max];
775
776
777
778
779 u64 (*update_topdown_event)(struct perf_event *event);
780 int (*set_topdown_event_period)(struct perf_event *event);
781
782
783
784
785
786
787 void (*swap_task_ctx)(struct perf_event_context *prev,
788 struct perf_event_context *next);
789
790
791
792
793 unsigned int amd_nb_constraints : 1;
794 u64 perf_ctr_pair_en;
795
796
797
798
799 struct extra_reg *extra_regs;
800 unsigned int flags;
801
802
803
804
805 struct perf_guest_switch_msr *(*guest_get_msrs)(int *nr);
806
807
808
809
810 int (*check_period) (struct perf_event *event, u64 period);
811
812 int (*aux_output_match) (struct perf_event *event);
813};
814
815struct x86_perf_task_context_opt {
816 int lbr_callstack_users;
817 int lbr_stack_state;
818 int log_id;
819};
820
821struct x86_perf_task_context {
822 u64 lbr_sel;
823 int tos;
824 int valid_lbrs;
825 struct x86_perf_task_context_opt opt;
826 struct lbr_entry lbr[MAX_LBR_ENTRIES];
827};
828
829struct x86_perf_task_context_arch_lbr {
830 struct x86_perf_task_context_opt opt;
831 struct lbr_entry entries[];
832};
833
834
835
836
837
838
839
840
841
842struct x86_perf_task_context_arch_lbr_xsave {
843 struct x86_perf_task_context_opt opt;
844
845 union {
846 struct xregs_state xsave;
847 struct {
848 struct fxregs_state i387;
849 struct xstate_header header;
850 struct arch_lbr_state lbr;
851 } __attribute__ ((packed, aligned (XSAVE_ALIGNMENT)));
852 };
853};
854
855#define x86_add_quirk(func_) \
856do { \
857 static struct x86_pmu_quirk __quirk __initdata = { \
858 .func = func_, \
859 }; \
860 __quirk.next = x86_pmu.quirks; \
861 x86_pmu.quirks = &__quirk; \
862} while (0)
863
864
865
866
867#define PMU_FL_NO_HT_SHARING 0x1
868#define PMU_FL_HAS_RSP_1 0x2
869#define PMU_FL_EXCL_CNTRS 0x4
870#define PMU_FL_EXCL_ENABLED 0x8
871#define PMU_FL_PEBS_ALL 0x10
872#define PMU_FL_TFA 0x20
873#define PMU_FL_PAIR 0x40
874
875#define EVENT_VAR(_id) event_attr_##_id
876#define EVENT_PTR(_id) &event_attr_##_id.attr.attr
877
878#define EVENT_ATTR(_name, _id) \
879static struct perf_pmu_events_attr EVENT_VAR(_id) = { \
880 .attr = __ATTR(_name, 0444, events_sysfs_show, NULL), \
881 .id = PERF_COUNT_HW_##_id, \
882 .event_str = NULL, \
883};
884
885#define EVENT_ATTR_STR(_name, v, str) \
886static struct perf_pmu_events_attr event_attr_##v = { \
887 .attr = __ATTR(_name, 0444, events_sysfs_show, NULL), \
888 .id = 0, \
889 .event_str = str, \
890};
891
892#define EVENT_ATTR_STR_HT(_name, v, noht, ht) \
893static struct perf_pmu_events_ht_attr event_attr_##v = { \
894 .attr = __ATTR(_name, 0444, events_ht_sysfs_show, NULL),\
895 .id = 0, \
896 .event_str_noht = noht, \
897 .event_str_ht = ht, \
898}
899
900struct pmu *x86_get_pmu(void);
901extern struct x86_pmu x86_pmu __read_mostly;
902
903static __always_inline struct x86_perf_task_context_opt *task_context_opt(void *ctx)
904{
905 if (static_cpu_has(X86_FEATURE_ARCH_LBR))
906 return &((struct x86_perf_task_context_arch_lbr *)ctx)->opt;
907
908 return &((struct x86_perf_task_context *)ctx)->opt;
909}
910
911static inline bool x86_pmu_has_lbr_callstack(void)
912{
913 return x86_pmu.lbr_sel_map &&
914 x86_pmu.lbr_sel_map[PERF_SAMPLE_BRANCH_CALL_STACK_SHIFT] > 0;
915}
916
917DECLARE_PER_CPU(struct cpu_hw_events, cpu_hw_events);
918
919int x86_perf_event_set_period(struct perf_event *event);
920
921
922
923
924
925
926
927
928
929#define C(x) PERF_COUNT_HW_CACHE_##x
930
931extern u64 __read_mostly hw_cache_event_ids
932 [PERF_COUNT_HW_CACHE_MAX]
933 [PERF_COUNT_HW_CACHE_OP_MAX]
934 [PERF_COUNT_HW_CACHE_RESULT_MAX];
935extern u64 __read_mostly hw_cache_extra_regs
936 [PERF_COUNT_HW_CACHE_MAX]
937 [PERF_COUNT_HW_CACHE_OP_MAX]
938 [PERF_COUNT_HW_CACHE_RESULT_MAX];
939
940u64 x86_perf_event_update(struct perf_event *event);
941
942static inline unsigned int x86_pmu_config_addr(int index)
943{
944 return x86_pmu.eventsel + (x86_pmu.addr_offset ?
945 x86_pmu.addr_offset(index, true) : index);
946}
947
948static inline unsigned int x86_pmu_event_addr(int index)
949{
950 return x86_pmu.perfctr + (x86_pmu.addr_offset ?
951 x86_pmu.addr_offset(index, false) : index);
952}
953
954static inline int x86_pmu_rdpmc_index(int index)
955{
956 return x86_pmu.rdpmc_index ? x86_pmu.rdpmc_index(index) : index;
957}
958
959int x86_add_exclusive(unsigned int what);
960
961void x86_del_exclusive(unsigned int what);
962
963int x86_reserve_hardware(void);
964
965void x86_release_hardware(void);
966
967int x86_pmu_max_precise(void);
968
969void hw_perf_lbr_event_destroy(struct perf_event *event);
970
971int x86_setup_perfctr(struct perf_event *event);
972
973int x86_pmu_hw_config(struct perf_event *event);
974
975void x86_pmu_disable_all(void);
976
977static inline bool is_counter_pair(struct hw_perf_event *hwc)
978{
979 return hwc->flags & PERF_X86_EVENT_PAIR;
980}
981
982static inline void __x86_pmu_enable_event(struct hw_perf_event *hwc,
983 u64 enable_mask)
984{
985 u64 disable_mask = __this_cpu_read(cpu_hw_events.perf_ctr_virt_mask);
986
987 if (hwc->extra_reg.reg)
988 wrmsrl(hwc->extra_reg.reg, hwc->extra_reg.config);
989
990
991
992
993
994 if (is_counter_pair(hwc))
995 wrmsrl(x86_pmu_config_addr(hwc->idx + 1), x86_pmu.perf_ctr_pair_en);
996
997 wrmsrl(hwc->config_base, (hwc->config | enable_mask) & ~disable_mask);
998}
999
1000void x86_pmu_enable_all(int added);
1001
1002int perf_assign_events(struct event_constraint **constraints, int n,
1003 int wmin, int wmax, int gpmax, int *assign);
1004int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign);
1005
1006void x86_pmu_stop(struct perf_event *event, int flags);
1007
1008static inline void x86_pmu_disable_event(struct perf_event *event)
1009{
1010 struct hw_perf_event *hwc = &event->hw;
1011
1012 wrmsrl(hwc->config_base, hwc->config);
1013
1014 if (is_counter_pair(hwc))
1015 wrmsrl(x86_pmu_config_addr(hwc->idx + 1), 0);
1016}
1017
1018void x86_pmu_enable_event(struct perf_event *event);
1019
1020int x86_pmu_handle_irq(struct pt_regs *regs);
1021
1022extern struct event_constraint emptyconstraint;
1023
1024extern struct event_constraint unconstrained;
1025
1026static inline bool kernel_ip(unsigned long ip)
1027{
1028#ifdef CONFIG_X86_32
1029 return ip > PAGE_OFFSET;
1030#else
1031 return (long)ip < 0;
1032#endif
1033}
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047static inline void set_linear_ip(struct pt_regs *regs, unsigned long ip)
1048{
1049 regs->cs = kernel_ip(ip) ? __KERNEL_CS : __USER_CS;
1050 if (regs->flags & X86_VM_MASK)
1051 regs->flags ^= (PERF_EFLAGS_VM | X86_VM_MASK);
1052 regs->ip = ip;
1053}
1054
1055ssize_t x86_event_sysfs_show(char *page, u64 config, u64 event);
1056ssize_t intel_event_sysfs_show(char *page, u64 config);
1057
1058ssize_t events_sysfs_show(struct device *dev, struct device_attribute *attr,
1059 char *page);
1060ssize_t events_ht_sysfs_show(struct device *dev, struct device_attribute *attr,
1061 char *page);
1062
1063#ifdef CONFIG_CPU_SUP_AMD
1064
1065int amd_pmu_init(void);
1066
1067#else
1068
1069static inline int amd_pmu_init(void)
1070{
1071 return 0;
1072}
1073
1074#endif
1075
1076static inline int is_pebs_pt(struct perf_event *event)
1077{
1078 return !!(event->hw.flags & PERF_X86_EVENT_PEBS_VIA_PT);
1079}
1080
1081#ifdef CONFIG_CPU_SUP_INTEL
1082
1083static inline bool intel_pmu_has_bts_period(struct perf_event *event, u64 period)
1084{
1085 struct hw_perf_event *hwc = &event->hw;
1086 unsigned int hw_event, bts_event;
1087
1088 if (event->attr.freq)
1089 return false;
1090
1091 hw_event = hwc->config & INTEL_ARCH_EVENT_MASK;
1092 bts_event = x86_pmu.event_map(PERF_COUNT_HW_BRANCH_INSTRUCTIONS);
1093
1094 return hw_event == bts_event && period == 1;
1095}
1096
1097static inline bool intel_pmu_has_bts(struct perf_event *event)
1098{
1099 struct hw_perf_event *hwc = &event->hw;
1100
1101 return intel_pmu_has_bts_period(event, hwc->sample_period);
1102}
1103
1104int intel_pmu_save_and_restart(struct perf_event *event);
1105
1106struct event_constraint *
1107x86_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
1108 struct perf_event *event);
1109
1110extern int intel_cpuc_prepare(struct cpu_hw_events *cpuc, int cpu);
1111extern void intel_cpuc_finish(struct cpu_hw_events *cpuc);
1112
1113int intel_pmu_init(void);
1114
1115void init_debug_store_on_cpu(int cpu);
1116
1117void fini_debug_store_on_cpu(int cpu);
1118
1119void release_ds_buffers(void);
1120
1121void reserve_ds_buffers(void);
1122
1123void release_lbr_buffers(void);
1124
1125extern struct event_constraint bts_constraint;
1126extern struct event_constraint vlbr_constraint;
1127
1128void intel_pmu_enable_bts(u64 config);
1129
1130void intel_pmu_disable_bts(void);
1131
1132int intel_pmu_drain_bts_buffer(void);
1133
1134extern struct event_constraint intel_core2_pebs_event_constraints[];
1135
1136extern struct event_constraint intel_atom_pebs_event_constraints[];
1137
1138extern struct event_constraint intel_slm_pebs_event_constraints[];
1139
1140extern struct event_constraint intel_glm_pebs_event_constraints[];
1141
1142extern struct event_constraint intel_glp_pebs_event_constraints[];
1143
1144extern struct event_constraint intel_nehalem_pebs_event_constraints[];
1145
1146extern struct event_constraint intel_westmere_pebs_event_constraints[];
1147
1148extern struct event_constraint intel_snb_pebs_event_constraints[];
1149
1150extern struct event_constraint intel_ivb_pebs_event_constraints[];
1151
1152extern struct event_constraint intel_hsw_pebs_event_constraints[];
1153
1154extern struct event_constraint intel_bdw_pebs_event_constraints[];
1155
1156extern struct event_constraint intel_skl_pebs_event_constraints[];
1157
1158extern struct event_constraint intel_icl_pebs_event_constraints[];
1159
1160struct event_constraint *intel_pebs_constraints(struct perf_event *event);
1161
1162void intel_pmu_pebs_add(struct perf_event *event);
1163
1164void intel_pmu_pebs_del(struct perf_event *event);
1165
1166void intel_pmu_pebs_enable(struct perf_event *event);
1167
1168void intel_pmu_pebs_disable(struct perf_event *event);
1169
1170void intel_pmu_pebs_enable_all(void);
1171
1172void intel_pmu_pebs_disable_all(void);
1173
1174void intel_pmu_pebs_sched_task(struct perf_event_context *ctx, bool sched_in);
1175
1176void intel_pmu_auto_reload_read(struct perf_event *event);
1177
1178void intel_pmu_store_pebs_lbrs(struct lbr_entry *lbr);
1179
1180void intel_ds_init(void);
1181
1182void intel_pmu_lbr_swap_task_ctx(struct perf_event_context *prev,
1183 struct perf_event_context *next);
1184
1185void intel_pmu_lbr_sched_task(struct perf_event_context *ctx, bool sched_in);
1186
1187u64 lbr_from_signext_quirk_wr(u64 val);
1188
1189void intel_pmu_lbr_reset(void);
1190
1191void intel_pmu_lbr_reset_32(void);
1192
1193void intel_pmu_lbr_reset_64(void);
1194
1195void intel_pmu_lbr_add(struct perf_event *event);
1196
1197void intel_pmu_lbr_del(struct perf_event *event);
1198
1199void intel_pmu_lbr_enable_all(bool pmi);
1200
1201void intel_pmu_lbr_disable_all(void);
1202
1203void intel_pmu_lbr_read(void);
1204
1205void intel_pmu_lbr_read_32(struct cpu_hw_events *cpuc);
1206
1207void intel_pmu_lbr_read_64(struct cpu_hw_events *cpuc);
1208
1209void intel_pmu_lbr_save(void *ctx);
1210
1211void intel_pmu_lbr_restore(void *ctx);
1212
1213void intel_pmu_lbr_init_core(void);
1214
1215void intel_pmu_lbr_init_nhm(void);
1216
1217void intel_pmu_lbr_init_atom(void);
1218
1219void intel_pmu_lbr_init_slm(void);
1220
1221void intel_pmu_lbr_init_snb(void);
1222
1223void intel_pmu_lbr_init_hsw(void);
1224
1225void intel_pmu_lbr_init_skl(void);
1226
1227void intel_pmu_lbr_init_knl(void);
1228
1229void intel_pmu_arch_lbr_init(void);
1230
1231void intel_pmu_pebs_data_source_nhm(void);
1232
1233void intel_pmu_pebs_data_source_skl(bool pmem);
1234
1235int intel_pmu_setup_lbr_filter(struct perf_event *event);
1236
1237void intel_pt_interrupt(void);
1238
1239int intel_bts_interrupt(void);
1240
1241void intel_bts_enable_local(void);
1242
1243void intel_bts_disable_local(void);
1244
1245int p4_pmu_init(void);
1246
1247int p6_pmu_init(void);
1248
1249int knc_pmu_init(void);
1250
1251static inline int is_ht_workaround_enabled(void)
1252{
1253 return !!(x86_pmu.flags & PMU_FL_EXCL_ENABLED);
1254}
1255
1256#else
1257
1258static inline void reserve_ds_buffers(void)
1259{
1260}
1261
1262static inline void release_ds_buffers(void)
1263{
1264}
1265
1266static inline void release_lbr_buffers(void)
1267{
1268}
1269
1270static inline int intel_pmu_init(void)
1271{
1272 return 0;
1273}
1274
1275static inline int intel_cpuc_prepare(struct cpu_hw_events *cpuc, int cpu)
1276{
1277 return 0;
1278}
1279
1280static inline void intel_cpuc_finish(struct cpu_hw_events *cpuc)
1281{
1282}
1283
1284static inline int is_ht_workaround_enabled(void)
1285{
1286 return 0;
1287}
1288#endif
1289
1290#if ((defined CONFIG_CPU_SUP_CENTAUR) || (defined CONFIG_CPU_SUP_ZHAOXIN))
1291int zhaoxin_pmu_init(void);
1292#else
1293static inline int zhaoxin_pmu_init(void)
1294{
1295 return 0;
1296}
1297#endif
1298