1
2
3
4
5
6
7
8
9
10
11
12
13
14
15#include <linux/perf_event.h>
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33enum extra_reg_type {
34 EXTRA_REG_NONE = -1,
35
36 EXTRA_REG_RSP_0 = 0,
37 EXTRA_REG_RSP_1 = 1,
38 EXTRA_REG_LBR = 2,
39 EXTRA_REG_LDLAT = 3,
40 EXTRA_REG_FE = 4,
41
42 EXTRA_REG_MAX
43};
44
45struct event_constraint {
46 union {
47 unsigned long idxmsk[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
48 u64 idxmsk64;
49 };
50 u64 code;
51 u64 cmask;
52 int weight;
53 int overlap;
54 int flags;
55};
56
57
58
59#define PERF_X86_EVENT_PEBS_LDLAT 0x0001
60#define PERF_X86_EVENT_PEBS_ST 0x0002
61#define PERF_X86_EVENT_PEBS_ST_HSW 0x0004
62#define PERF_X86_EVENT_COMMITTED 0x0008
63#define PERF_X86_EVENT_PEBS_LD_HSW 0x0010
64#define PERF_X86_EVENT_PEBS_NA_HSW 0x0020
65#define PERF_X86_EVENT_EXCL 0x0040
66#define PERF_X86_EVENT_DYNAMIC 0x0080
67#define PERF_X86_EVENT_RDPMC_ALLOWED 0x0100
68#define PERF_X86_EVENT_EXCL_ACCT 0x0200
69#define PERF_X86_EVENT_AUTO_RELOAD 0x0400
70#define PERF_X86_EVENT_FREERUNNING 0x0800
71
72
73struct amd_nb {
74 int nb_id;
75 int refcnt;
76 struct perf_event *owners[X86_PMC_IDX_MAX];
77 struct event_constraint event_constraints[X86_PMC_IDX_MAX];
78};
79
80
81#define MAX_PEBS_EVENTS 8
82#define PEBS_COUNTER_MASK ((1ULL << MAX_PEBS_EVENTS) - 1)
83
84
85
86
87
88
89
90#define PEBS_FREERUNNING_FLAGS \
91 (PERF_SAMPLE_IP | PERF_SAMPLE_TID | PERF_SAMPLE_ADDR | \
92 PERF_SAMPLE_ID | PERF_SAMPLE_CPU | PERF_SAMPLE_STREAM_ID | \
93 PERF_SAMPLE_DATA_SRC | PERF_SAMPLE_IDENTIFIER | \
94 PERF_SAMPLE_TRANSACTION | PERF_SAMPLE_PHYS_ADDR)
95
96
97
98
99
100
101struct debug_store {
102 u64 bts_buffer_base;
103 u64 bts_index;
104 u64 bts_absolute_maximum;
105 u64 bts_interrupt_threshold;
106 u64 pebs_buffer_base;
107 u64 pebs_index;
108 u64 pebs_absolute_maximum;
109 u64 pebs_interrupt_threshold;
110 u64 pebs_event_reset[MAX_PEBS_EVENTS];
111};
112
113
114
115
116struct er_account {
117 raw_spinlock_t lock;
118 u64 config;
119 u64 reg;
120 atomic_t ref;
121};
122
123
124
125
126
127
128
129struct intel_shared_regs {
130 struct er_account regs[EXTRA_REG_MAX];
131 int refcnt;
132 unsigned core_id;
133};
134
135enum intel_excl_state_type {
136 INTEL_EXCL_UNUSED = 0,
137 INTEL_EXCL_SHARED = 1,
138 INTEL_EXCL_EXCLUSIVE = 2,
139};
140
141struct intel_excl_states {
142 enum intel_excl_state_type state[X86_PMC_IDX_MAX];
143 bool sched_started;
144};
145
146struct intel_excl_cntrs {
147 raw_spinlock_t lock;
148
149 struct intel_excl_states states[2];
150
151 union {
152 u16 has_exclusive[2];
153 u32 exclusive_present;
154 };
155
156 int refcnt;
157 unsigned core_id;
158};
159
160#define MAX_LBR_ENTRIES 32
161
162enum {
163 X86_PERF_KFREE_SHARED = 0,
164 X86_PERF_KFREE_EXCL = 1,
165 X86_PERF_KFREE_MAX
166};
167
168struct cpu_hw_events {
169
170
171
172 struct perf_event *events[X86_PMC_IDX_MAX];
173 unsigned long active_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
174 unsigned long running[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
175 int enabled;
176
177 int n_events;
178 int n_added;
179
180 int n_txn;
181
182 int assign[X86_PMC_IDX_MAX];
183 u64 tags[X86_PMC_IDX_MAX];
184
185 struct perf_event *event_list[X86_PMC_IDX_MAX];
186 struct event_constraint *event_constraint[X86_PMC_IDX_MAX];
187
188 int n_excl;
189
190 unsigned int txn_flags;
191 int is_fake;
192
193
194
195
196 struct debug_store *ds;
197 u64 pebs_enabled;
198 int n_pebs;
199 int n_large_pebs;
200
201
202
203
204 int lbr_users;
205 struct perf_branch_stack lbr_stack;
206 struct perf_branch_entry lbr_entries[MAX_LBR_ENTRIES];
207 struct er_account *lbr_sel;
208 u64 br_sel;
209
210
211
212
213 u64 intel_ctrl_guest_mask;
214 u64 intel_ctrl_host_mask;
215 struct perf_guest_switch_msr guest_switch_msrs[X86_PMC_IDX_MAX];
216
217
218
219
220 u64 intel_cp_status;
221
222
223
224
225
226 struct intel_shared_regs *shared_regs;
227
228
229
230 struct event_constraint *constraint_list;
231 struct intel_excl_cntrs *excl_cntrs;
232 int excl_thread_id;
233
234
235
236
237 struct amd_nb *amd_nb;
238
239 u64 perf_ctr_virt_mask;
240
241 void *kfree_on_online[X86_PERF_KFREE_MAX];
242};
243
244#define __EVENT_CONSTRAINT(c, n, m, w, o, f) {\
245 { .idxmsk64 = (n) }, \
246 .code = (c), \
247 .cmask = (m), \
248 .weight = (w), \
249 .overlap = (o), \
250 .flags = f, \
251}
252
253#define EVENT_CONSTRAINT(c, n, m) \
254 __EVENT_CONSTRAINT(c, n, m, HWEIGHT(n), 0, 0)
255
256#define INTEL_EXCLEVT_CONSTRAINT(c, n) \
257 __EVENT_CONSTRAINT(c, n, ARCH_PERFMON_EVENTSEL_EVENT, HWEIGHT(n),\
258 0, PERF_X86_EVENT_EXCL)
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281#define EVENT_CONSTRAINT_OVERLAP(c, n, m) \
282 __EVENT_CONSTRAINT(c, n, m, HWEIGHT(n), 1, 0)
283
284
285
286
287#define INTEL_EVENT_CONSTRAINT(c, n) \
288 EVENT_CONSTRAINT(c, n, ARCH_PERFMON_EVENTSEL_EVENT)
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303#define FIXED_EVENT_FLAGS (X86_RAW_EVENT_MASK|HSW_IN_TX|HSW_IN_TX_CHECKPOINTED)
304#define FIXED_EVENT_CONSTRAINT(c, n) \
305 EVENT_CONSTRAINT(c, (1ULL << (32+n)), FIXED_EVENT_FLAGS)
306
307
308
309
310#define INTEL_UEVENT_CONSTRAINT(c, n) \
311 EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK)
312
313
314#define INTEL_UBIT_EVENT_CONSTRAINT(c, n) \
315 EVENT_CONSTRAINT(c, n, ARCH_PERFMON_EVENTSEL_EVENT|(c))
316
317
318#define INTEL_FLAGS_UEVENT_CONSTRAINT(c, n) \
319 EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS)
320
321#define INTEL_EXCLUEVT_CONSTRAINT(c, n) \
322 __EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK, \
323 HWEIGHT(n), 0, PERF_X86_EVENT_EXCL)
324
325#define INTEL_PLD_CONSTRAINT(c, n) \
326 __EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \
327 HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_LDLAT)
328
329#define INTEL_PST_CONSTRAINT(c, n) \
330 __EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \
331 HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_ST)
332
333
334#define INTEL_FLAGS_EVENT_CONSTRAINT(c, n) \
335 EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS)
336
337
338#define INTEL_ALL_EVENT_CONSTRAINT(code, n) \
339 EVENT_CONSTRAINT(code, n, X86_ALL_EVENT_FLAGS)
340
341
342#define INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_ST(code, n) \
343 __EVENT_CONSTRAINT(code, n, \
344 ARCH_PERFMON_EVENTSEL_EVENT|X86_ALL_EVENT_FLAGS, \
345 HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_ST_HSW)
346
347
348#define INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_LD(code, n) \
349 __EVENT_CONSTRAINT(code, n, \
350 ARCH_PERFMON_EVENTSEL_EVENT|X86_ALL_EVENT_FLAGS, \
351 HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_LD_HSW)
352
353#define INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_XLD(code, n) \
354 __EVENT_CONSTRAINT(code, n, \
355 ARCH_PERFMON_EVENTSEL_EVENT|X86_ALL_EVENT_FLAGS, \
356 HWEIGHT(n), 0, \
357 PERF_X86_EVENT_PEBS_LD_HSW|PERF_X86_EVENT_EXCL)
358
359
360#define INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(code, n) \
361 __EVENT_CONSTRAINT(code, n, \
362 INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \
363 HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_ST_HSW)
364
365#define INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_XST(code, n) \
366 __EVENT_CONSTRAINT(code, n, \
367 INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \
368 HWEIGHT(n), 0, \
369 PERF_X86_EVENT_PEBS_ST_HSW|PERF_X86_EVENT_EXCL)
370
371
372#define INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(code, n) \
373 __EVENT_CONSTRAINT(code, n, \
374 INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \
375 HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_LD_HSW)
376
377#define INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_XLD(code, n) \
378 __EVENT_CONSTRAINT(code, n, \
379 INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \
380 HWEIGHT(n), 0, \
381 PERF_X86_EVENT_PEBS_LD_HSW|PERF_X86_EVENT_EXCL)
382
383
384#define INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_NA(code, n) \
385 __EVENT_CONSTRAINT(code, n, \
386 INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \
387 HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_NA_HSW)
388
389
390
391
392
393
394
395
396
397#define EVENT_CONSTRAINT_END { .weight = -1 }
398
399
400
401
402#define for_each_event_constraint(e, c) \
403 for ((e) = (c); (e)->weight != -1; (e)++)
404
405
406
407
408
409
410
411
412
413
414
415struct extra_reg {
416 unsigned int event;
417 unsigned int msr;
418 u64 config_mask;
419 u64 valid_mask;
420 int idx;
421 bool extra_msr_access;
422};
423
424#define EVENT_EXTRA_REG(e, ms, m, vm, i) { \
425 .event = (e), \
426 .msr = (ms), \
427 .config_mask = (m), \
428 .valid_mask = (vm), \
429 .idx = EXTRA_REG_##i, \
430 .extra_msr_access = true, \
431 }
432
433#define INTEL_EVENT_EXTRA_REG(event, msr, vm, idx) \
434 EVENT_EXTRA_REG(event, msr, ARCH_PERFMON_EVENTSEL_EVENT, vm, idx)
435
436#define INTEL_UEVENT_EXTRA_REG(event, msr, vm, idx) \
437 EVENT_EXTRA_REG(event, msr, ARCH_PERFMON_EVENTSEL_EVENT | \
438 ARCH_PERFMON_EVENTSEL_UMASK, vm, idx)
439
440#define INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(c) \
441 INTEL_UEVENT_EXTRA_REG(c, \
442 MSR_PEBS_LD_LAT_THRESHOLD, \
443 0xffff, \
444 LDLAT)
445
446#define EVENT_EXTRA_END EVENT_EXTRA_REG(0, 0, 0, 0, RSP_0)
447
448union perf_capabilities {
449 struct {
450 u64 lbr_format:6;
451 u64 pebs_trap:1;
452 u64 pebs_arch_reg:1;
453 u64 pebs_format:4;
454 u64 smm_freeze:1;
455
456
457
458
459 u64 full_width_write:1;
460 };
461 u64 capabilities;
462};
463
464struct x86_pmu_quirk {
465 struct x86_pmu_quirk *next;
466 void (*func)(void);
467};
468
469union x86_pmu_config {
470 struct {
471 u64 event:8,
472 umask:8,
473 usr:1,
474 os:1,
475 edge:1,
476 pc:1,
477 interrupt:1,
478 __reserved1:1,
479 en:1,
480 inv:1,
481 cmask:8,
482 event2:4,
483 __reserved2:4,
484 go:1,
485 ho:1;
486 } bits;
487 u64 value;
488};
489
490#define X86_CONFIG(args...) ((union x86_pmu_config){.bits = {args}}).value
491
492enum {
493 x86_lbr_exclusive_lbr,
494 x86_lbr_exclusive_bts,
495 x86_lbr_exclusive_pt,
496 x86_lbr_exclusive_max,
497};
498
499
500
501
502struct x86_pmu {
503
504
505
506 const char *name;
507 int version;
508 int (*handle_irq)(struct pt_regs *);
509 void (*disable_all)(void);
510 void (*enable_all)(int added);
511 void (*enable)(struct perf_event *);
512 void (*disable)(struct perf_event *);
513 void (*add)(struct perf_event *);
514 void (*del)(struct perf_event *);
515 int (*hw_config)(struct perf_event *event);
516 int (*schedule_events)(struct cpu_hw_events *cpuc, int n, int *assign);
517 unsigned eventsel;
518 unsigned perfctr;
519 int (*addr_offset)(int index, bool eventsel);
520 int (*rdpmc_index)(int index);
521 u64 (*event_map)(int);
522 int max_events;
523 int num_counters;
524 int num_counters_fixed;
525 int cntval_bits;
526 u64 cntval_mask;
527 union {
528 unsigned long events_maskl;
529 unsigned long events_mask[BITS_TO_LONGS(ARCH_PERFMON_EVENTS_COUNT)];
530 };
531 int events_mask_len;
532 int apic;
533 u64 max_period;
534 struct event_constraint *
535 (*get_event_constraints)(struct cpu_hw_events *cpuc,
536 int idx,
537 struct perf_event *event);
538
539 void (*put_event_constraints)(struct cpu_hw_events *cpuc,
540 struct perf_event *event);
541
542 void (*start_scheduling)(struct cpu_hw_events *cpuc);
543
544 void (*commit_scheduling)(struct cpu_hw_events *cpuc, int idx, int cntr);
545
546 void (*stop_scheduling)(struct cpu_hw_events *cpuc);
547
548 struct event_constraint *event_constraints;
549 struct x86_pmu_quirk *quirks;
550 int perfctr_second_write;
551 bool late_ack;
552 unsigned (*limit_period)(struct perf_event *event, unsigned l);
553
554
555
556
557 int attr_rdpmc_broken;
558 int attr_rdpmc;
559 struct attribute **format_attrs;
560 struct attribute **event_attrs;
561 struct attribute **caps_attrs;
562
563 ssize_t (*events_sysfs_show)(char *page, u64 config);
564 struct attribute **cpu_events;
565
566 unsigned long attr_freeze_on_smi;
567 struct attribute **attrs;
568
569
570
571
572 int (*cpu_prepare)(int cpu);
573 void (*cpu_starting)(int cpu);
574 void (*cpu_dying)(int cpu);
575 void (*cpu_dead)(int cpu);
576
577 void (*check_microcode)(void);
578 void (*sched_task)(struct perf_event_context *ctx,
579 bool sched_in);
580
581
582
583
584 u64 intel_ctrl;
585 union perf_capabilities intel_cap;
586
587
588
589
590 unsigned int bts :1,
591 bts_active :1,
592 pebs :1,
593 pebs_active :1,
594 pebs_broken :1,
595 pebs_prec_dist :1,
596 pebs_no_tlb :1;
597 int pebs_record_size;
598 int pebs_buffer_size;
599 void (*drain_pebs)(struct pt_regs *regs);
600 struct event_constraint *pebs_constraints;
601 void (*pebs_aliases)(struct perf_event *event);
602 int max_pebs_events;
603 unsigned long free_running_flags;
604
605
606
607
608 unsigned long lbr_tos, lbr_from, lbr_to;
609 int lbr_nr;
610 u64 lbr_sel_mask;
611 const int *lbr_sel_map;
612 bool lbr_double_abort;
613 bool lbr_pt_coexist;
614
615
616
617
618 atomic_t lbr_exclusive[x86_lbr_exclusive_max];
619
620
621
622
623 unsigned int amd_nb_constraints : 1;
624
625
626
627
628 struct extra_reg *extra_regs;
629 unsigned int flags;
630
631
632
633
634 struct perf_guest_switch_msr *(*guest_get_msrs)(int *nr);
635};
636
637struct x86_perf_task_context {
638 u64 lbr_from[MAX_LBR_ENTRIES];
639 u64 lbr_to[MAX_LBR_ENTRIES];
640 u64 lbr_info[MAX_LBR_ENTRIES];
641 int tos;
642 int lbr_callstack_users;
643 int lbr_stack_state;
644};
645
646#define x86_add_quirk(func_) \
647do { \
648 static struct x86_pmu_quirk __quirk __initdata = { \
649 .func = func_, \
650 }; \
651 __quirk.next = x86_pmu.quirks; \
652 x86_pmu.quirks = &__quirk; \
653} while (0)
654
655
656
657
658#define PMU_FL_NO_HT_SHARING 0x1
659#define PMU_FL_HAS_RSP_1 0x2
660#define PMU_FL_EXCL_CNTRS 0x4
661#define PMU_FL_EXCL_ENABLED 0x8
662
663#define EVENT_VAR(_id) event_attr_##_id
664#define EVENT_PTR(_id) &event_attr_##_id.attr.attr
665
666#define EVENT_ATTR(_name, _id) \
667static struct perf_pmu_events_attr EVENT_VAR(_id) = { \
668 .attr = __ATTR(_name, 0444, events_sysfs_show, NULL), \
669 .id = PERF_COUNT_HW_##_id, \
670 .event_str = NULL, \
671};
672
673#define EVENT_ATTR_STR(_name, v, str) \
674static struct perf_pmu_events_attr event_attr_##v = { \
675 .attr = __ATTR(_name, 0444, events_sysfs_show, NULL), \
676 .id = 0, \
677 .event_str = str, \
678};
679
680#define EVENT_ATTR_STR_HT(_name, v, noht, ht) \
681static struct perf_pmu_events_ht_attr event_attr_##v = { \
682 .attr = __ATTR(_name, 0444, events_ht_sysfs_show, NULL),\
683 .id = 0, \
684 .event_str_noht = noht, \
685 .event_str_ht = ht, \
686}
687
688extern struct x86_pmu x86_pmu __read_mostly;
689
690static inline bool x86_pmu_has_lbr_callstack(void)
691{
692 return x86_pmu.lbr_sel_map &&
693 x86_pmu.lbr_sel_map[PERF_SAMPLE_BRANCH_CALL_STACK_SHIFT] > 0;
694}
695
696DECLARE_PER_CPU(struct cpu_hw_events, cpu_hw_events);
697
698int x86_perf_event_set_period(struct perf_event *event);
699
700
701
702
703
704
705
706
707
708#define C(x) PERF_COUNT_HW_CACHE_##x
709
710extern u64 __read_mostly hw_cache_event_ids
711 [PERF_COUNT_HW_CACHE_MAX]
712 [PERF_COUNT_HW_CACHE_OP_MAX]
713 [PERF_COUNT_HW_CACHE_RESULT_MAX];
714extern u64 __read_mostly hw_cache_extra_regs
715 [PERF_COUNT_HW_CACHE_MAX]
716 [PERF_COUNT_HW_CACHE_OP_MAX]
717 [PERF_COUNT_HW_CACHE_RESULT_MAX];
718
719u64 x86_perf_event_update(struct perf_event *event);
720
721static inline unsigned int x86_pmu_config_addr(int index)
722{
723 return x86_pmu.eventsel + (x86_pmu.addr_offset ?
724 x86_pmu.addr_offset(index, true) : index);
725}
726
727static inline unsigned int x86_pmu_event_addr(int index)
728{
729 return x86_pmu.perfctr + (x86_pmu.addr_offset ?
730 x86_pmu.addr_offset(index, false) : index);
731}
732
733static inline int x86_pmu_rdpmc_index(int index)
734{
735 return x86_pmu.rdpmc_index ? x86_pmu.rdpmc_index(index) : index;
736}
737
738int x86_add_exclusive(unsigned int what);
739
740void x86_del_exclusive(unsigned int what);
741
742int x86_reserve_hardware(void);
743
744void x86_release_hardware(void);
745
746int x86_pmu_max_precise(void);
747
748void hw_perf_lbr_event_destroy(struct perf_event *event);
749
750int x86_setup_perfctr(struct perf_event *event);
751
752int x86_pmu_hw_config(struct perf_event *event);
753
754void x86_pmu_disable_all(void);
755
756static inline void __x86_pmu_enable_event(struct hw_perf_event *hwc,
757 u64 enable_mask)
758{
759 u64 disable_mask = __this_cpu_read(cpu_hw_events.perf_ctr_virt_mask);
760
761 if (hwc->extra_reg.reg)
762 wrmsrl(hwc->extra_reg.reg, hwc->extra_reg.config);
763 wrmsrl(hwc->config_base, (hwc->config | enable_mask) & ~disable_mask);
764}
765
766void x86_pmu_enable_all(int added);
767
768int perf_assign_events(struct event_constraint **constraints, int n,
769 int wmin, int wmax, int gpmax, int *assign);
770int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign);
771
772void x86_pmu_stop(struct perf_event *event, int flags);
773
774static inline void x86_pmu_disable_event(struct perf_event *event)
775{
776 struct hw_perf_event *hwc = &event->hw;
777
778 wrmsrl(hwc->config_base, hwc->config);
779}
780
781void x86_pmu_enable_event(struct perf_event *event);
782
783int x86_pmu_handle_irq(struct pt_regs *regs);
784
785extern struct event_constraint emptyconstraint;
786
787extern struct event_constraint unconstrained;
788
789static inline bool kernel_ip(unsigned long ip)
790{
791#ifdef CONFIG_X86_32
792 return ip > PAGE_OFFSET;
793#else
794 return (long)ip < 0;
795#endif
796}
797
798
799
800
801
802
803
804
805
806
807
808
809
810static inline void set_linear_ip(struct pt_regs *regs, unsigned long ip)
811{
812 regs->cs = kernel_ip(ip) ? __KERNEL_CS : __USER_CS;
813 if (regs->flags & X86_VM_MASK)
814 regs->flags ^= (PERF_EFLAGS_VM | X86_VM_MASK);
815 regs->ip = ip;
816}
817
818ssize_t x86_event_sysfs_show(char *page, u64 config, u64 event);
819ssize_t intel_event_sysfs_show(char *page, u64 config);
820
821struct attribute **merge_attr(struct attribute **a, struct attribute **b);
822
823ssize_t events_sysfs_show(struct device *dev, struct device_attribute *attr,
824 char *page);
825ssize_t events_ht_sysfs_show(struct device *dev, struct device_attribute *attr,
826 char *page);
827
828#ifdef CONFIG_CPU_SUP_AMD
829
830int amd_pmu_init(void);
831
832#else
833
834static inline int amd_pmu_init(void)
835{
836 return 0;
837}
838
839#endif
840
841#ifdef CONFIG_CPU_SUP_INTEL
842
843static inline bool intel_pmu_has_bts(struct perf_event *event)
844{
845 if (event->attr.config == PERF_COUNT_HW_BRANCH_INSTRUCTIONS &&
846 !event->attr.freq && event->hw.sample_period == 1)
847 return true;
848
849 return false;
850}
851
852int intel_pmu_save_and_restart(struct perf_event *event);
853
854struct event_constraint *
855x86_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
856 struct perf_event *event);
857
858struct intel_shared_regs *allocate_shared_regs(int cpu);
859
860int intel_pmu_init(void);
861
862void init_debug_store_on_cpu(int cpu);
863
864void fini_debug_store_on_cpu(int cpu);
865
866void release_ds_buffers(void);
867
868void reserve_ds_buffers(void);
869
870extern struct event_constraint bts_constraint;
871
872void intel_pmu_enable_bts(u64 config);
873
874void intel_pmu_disable_bts(void);
875
876int intel_pmu_drain_bts_buffer(void);
877
878extern struct event_constraint intel_core2_pebs_event_constraints[];
879
880extern struct event_constraint intel_atom_pebs_event_constraints[];
881
882extern struct event_constraint intel_slm_pebs_event_constraints[];
883
884extern struct event_constraint intel_glm_pebs_event_constraints[];
885
886extern struct event_constraint intel_glp_pebs_event_constraints[];
887
888extern struct event_constraint intel_nehalem_pebs_event_constraints[];
889
890extern struct event_constraint intel_westmere_pebs_event_constraints[];
891
892extern struct event_constraint intel_snb_pebs_event_constraints[];
893
894extern struct event_constraint intel_ivb_pebs_event_constraints[];
895
896extern struct event_constraint intel_hsw_pebs_event_constraints[];
897
898extern struct event_constraint intel_bdw_pebs_event_constraints[];
899
900extern struct event_constraint intel_skl_pebs_event_constraints[];
901
902struct event_constraint *intel_pebs_constraints(struct perf_event *event);
903
904void intel_pmu_pebs_add(struct perf_event *event);
905
906void intel_pmu_pebs_del(struct perf_event *event);
907
908void intel_pmu_pebs_enable(struct perf_event *event);
909
910void intel_pmu_pebs_disable(struct perf_event *event);
911
912void intel_pmu_pebs_enable_all(void);
913
914void intel_pmu_pebs_disable_all(void);
915
916void intel_pmu_pebs_sched_task(struct perf_event_context *ctx, bool sched_in);
917
918void intel_ds_init(void);
919
920void intel_pmu_lbr_sched_task(struct perf_event_context *ctx, bool sched_in);
921
922u64 lbr_from_signext_quirk_wr(u64 val);
923
924void intel_pmu_lbr_reset(void);
925
926void intel_pmu_lbr_add(struct perf_event *event);
927
928void intel_pmu_lbr_del(struct perf_event *event);
929
930void intel_pmu_lbr_enable_all(bool pmi);
931
932void intel_pmu_lbr_disable_all(void);
933
934void intel_pmu_lbr_read(void);
935
936void intel_pmu_lbr_init_core(void);
937
938void intel_pmu_lbr_init_nhm(void);
939
940void intel_pmu_lbr_init_atom(void);
941
942void intel_pmu_lbr_init_slm(void);
943
944void intel_pmu_lbr_init_snb(void);
945
946void intel_pmu_lbr_init_hsw(void);
947
948void intel_pmu_lbr_init_skl(void);
949
950void intel_pmu_lbr_init_knl(void);
951
952void intel_pmu_pebs_data_source_nhm(void);
953
954void intel_pmu_pebs_data_source_skl(bool pmem);
955
956int intel_pmu_setup_lbr_filter(struct perf_event *event);
957
958void intel_pt_interrupt(void);
959
960int intel_bts_interrupt(void);
961
962void intel_bts_enable_local(void);
963
964void intel_bts_disable_local(void);
965
966int p4_pmu_init(void);
967
968int p6_pmu_init(void);
969
970int knc_pmu_init(void);
971
972static inline int is_ht_workaround_enabled(void)
973{
974 return !!(x86_pmu.flags & PMU_FL_EXCL_ENABLED);
975}
976
977#else
978
979static inline void reserve_ds_buffers(void)
980{
981}
982
983static inline void release_ds_buffers(void)
984{
985}
986
987static inline int intel_pmu_init(void)
988{
989 return 0;
990}
991
992static inline struct intel_shared_regs *allocate_shared_regs(int cpu)
993{
994 return NULL;
995}
996
997static inline int is_ht_workaround_enabled(void)
998{
999 return 0;
1000}
1001#endif
1002