1
2#ifndef _ASM_X86_PERF_EVENT_H
3#define _ASM_X86_PERF_EVENT_H
4
5
6
7
8
9#define INTEL_PMC_MAX_GENERIC 32
10#define INTEL_PMC_MAX_FIXED 4
11#define INTEL_PMC_IDX_FIXED 32
12
13#define X86_PMC_IDX_MAX 64
14
15#define MSR_ARCH_PERFMON_PERFCTR0 0xc1
16#define MSR_ARCH_PERFMON_PERFCTR1 0xc2
17
18#define MSR_ARCH_PERFMON_EVENTSEL0 0x186
19#define MSR_ARCH_PERFMON_EVENTSEL1 0x187
20
21#define ARCH_PERFMON_EVENTSEL_EVENT 0x000000FFULL
22#define ARCH_PERFMON_EVENTSEL_UMASK 0x0000FF00ULL
23#define ARCH_PERFMON_EVENTSEL_USR (1ULL << 16)
24#define ARCH_PERFMON_EVENTSEL_OS (1ULL << 17)
25#define ARCH_PERFMON_EVENTSEL_EDGE (1ULL << 18)
26#define ARCH_PERFMON_EVENTSEL_PIN_CONTROL (1ULL << 19)
27#define ARCH_PERFMON_EVENTSEL_INT (1ULL << 20)
28#define ARCH_PERFMON_EVENTSEL_ANY (1ULL << 21)
29#define ARCH_PERFMON_EVENTSEL_ENABLE (1ULL << 22)
30#define ARCH_PERFMON_EVENTSEL_INV (1ULL << 23)
31#define ARCH_PERFMON_EVENTSEL_CMASK 0xFF000000ULL
32
33#define HSW_IN_TX (1ULL << 32)
34#define HSW_IN_TX_CHECKPOINTED (1ULL << 33)
35#define ICL_EVENTSEL_ADAPTIVE (1ULL << 34)
36#define ICL_FIXED_0_ADAPTIVE (1ULL << 32)
37
38#define AMD64_EVENTSEL_INT_CORE_ENABLE (1ULL << 36)
39#define AMD64_EVENTSEL_GUESTONLY (1ULL << 40)
40#define AMD64_EVENTSEL_HOSTONLY (1ULL << 41)
41
42#define AMD64_EVENTSEL_INT_CORE_SEL_SHIFT 37
43#define AMD64_EVENTSEL_INT_CORE_SEL_MASK \
44 (0xFULL << AMD64_EVENTSEL_INT_CORE_SEL_SHIFT)
45
46#define AMD64_EVENTSEL_EVENT \
47 (ARCH_PERFMON_EVENTSEL_EVENT | (0x0FULL << 32))
48#define INTEL_ARCH_EVENT_MASK \
49 (ARCH_PERFMON_EVENTSEL_UMASK | ARCH_PERFMON_EVENTSEL_EVENT)
50
51#define AMD64_L3_SLICE_SHIFT 48
52#define AMD64_L3_SLICE_MASK \
53 (0xFULL << AMD64_L3_SLICE_SHIFT)
54#define AMD64_L3_SLICEID_MASK \
55 (0x7ULL << AMD64_L3_SLICE_SHIFT)
56
57#define AMD64_L3_THREAD_SHIFT 56
58#define AMD64_L3_THREAD_MASK \
59 (0xFFULL << AMD64_L3_THREAD_SHIFT)
60#define AMD64_L3_F19H_THREAD_MASK \
61 (0x3ULL << AMD64_L3_THREAD_SHIFT)
62
63#define AMD64_L3_EN_ALL_CORES BIT_ULL(47)
64#define AMD64_L3_EN_ALL_SLICES BIT_ULL(46)
65
66#define AMD64_L3_COREID_SHIFT 42
67#define AMD64_L3_COREID_MASK \
68 (0x7ULL << AMD64_L3_COREID_SHIFT)
69
70#define X86_RAW_EVENT_MASK \
71 (ARCH_PERFMON_EVENTSEL_EVENT | \
72 ARCH_PERFMON_EVENTSEL_UMASK | \
73 ARCH_PERFMON_EVENTSEL_EDGE | \
74 ARCH_PERFMON_EVENTSEL_INV | \
75 ARCH_PERFMON_EVENTSEL_CMASK)
76#define X86_ALL_EVENT_FLAGS \
77 (ARCH_PERFMON_EVENTSEL_EDGE | \
78 ARCH_PERFMON_EVENTSEL_INV | \
79 ARCH_PERFMON_EVENTSEL_CMASK | \
80 ARCH_PERFMON_EVENTSEL_ANY | \
81 ARCH_PERFMON_EVENTSEL_PIN_CONTROL | \
82 HSW_IN_TX | \
83 HSW_IN_TX_CHECKPOINTED)
84#define AMD64_RAW_EVENT_MASK \
85 (X86_RAW_EVENT_MASK | \
86 AMD64_EVENTSEL_EVENT)
87#define AMD64_RAW_EVENT_MASK_NB \
88 (AMD64_EVENTSEL_EVENT | \
89 ARCH_PERFMON_EVENTSEL_UMASK)
90#define AMD64_NUM_COUNTERS 4
91#define AMD64_NUM_COUNTERS_CORE 6
92#define AMD64_NUM_COUNTERS_NB 4
93
94#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL 0x3c
95#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK (0x00 << 8)
96#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX 0
97#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_PRESENT \
98 (1 << (ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX))
99
100#define ARCH_PERFMON_BRANCH_MISSES_RETIRED 6
101#define ARCH_PERFMON_EVENTS_COUNT 7
102
103#define PEBS_DATACFG_MEMINFO BIT_ULL(0)
104#define PEBS_DATACFG_GP BIT_ULL(1)
105#define PEBS_DATACFG_XMMS BIT_ULL(2)
106#define PEBS_DATACFG_LBRS BIT_ULL(3)
107#define PEBS_DATACFG_LBR_SHIFT 24
108
109
110
111
112
113union cpuid10_eax {
114 struct {
115 unsigned int version_id:8;
116 unsigned int num_counters:8;
117 unsigned int bit_width:8;
118 unsigned int mask_length:8;
119 } split;
120 unsigned int full;
121};
122
123union cpuid10_ebx {
124 struct {
125 unsigned int no_unhalted_core_cycles:1;
126 unsigned int no_instructions_retired:1;
127 unsigned int no_unhalted_reference_cycles:1;
128 unsigned int no_llc_reference:1;
129 unsigned int no_llc_misses:1;
130 unsigned int no_branch_instruction_retired:1;
131 unsigned int no_branch_misses_retired:1;
132 } split;
133 unsigned int full;
134};
135
136union cpuid10_edx {
137 struct {
138 unsigned int num_counters_fixed:5;
139 unsigned int bit_width_fixed:8;
140 unsigned int reserved:19;
141 } split;
142 unsigned int full;
143};
144
145struct x86_pmu_capability {
146 int version;
147 int num_counters_gp;
148 int num_counters_fixed;
149 int bit_width_gp;
150 int bit_width_fixed;
151 unsigned int events_mask;
152 int events_mask_len;
153};
154
155
156
157
158
159
160
161
162#define MSR_ARCH_PERFMON_FIXED_CTR_CTRL 0x38d
163
164
165
166
167
168
169#define MSR_ARCH_PERFMON_FIXED_CTR0 0x309
170#define INTEL_PMC_IDX_FIXED_INSTRUCTIONS (INTEL_PMC_IDX_FIXED + 0)
171
172
173#define MSR_ARCH_PERFMON_FIXED_CTR1 0x30a
174#define INTEL_PMC_IDX_FIXED_CPU_CYCLES (INTEL_PMC_IDX_FIXED + 1)
175
176
177#define MSR_ARCH_PERFMON_FIXED_CTR2 0x30b
178#define INTEL_PMC_IDX_FIXED_REF_CYCLES (INTEL_PMC_IDX_FIXED + 2)
179#define INTEL_PMC_MSK_FIXED_REF_CYCLES (1ULL << INTEL_PMC_IDX_FIXED_REF_CYCLES)
180
181
182
183
184
185
186
187
188#define INTEL_PMC_IDX_FIXED_BTS (INTEL_PMC_IDX_FIXED + 16)
189
190#define GLOBAL_STATUS_COND_CHG BIT_ULL(63)
191#define GLOBAL_STATUS_BUFFER_OVF BIT_ULL(62)
192#define GLOBAL_STATUS_UNC_OVF BIT_ULL(61)
193#define GLOBAL_STATUS_ASIF BIT_ULL(60)
194#define GLOBAL_STATUS_COUNTERS_FROZEN BIT_ULL(59)
195#define GLOBAL_STATUS_LBRS_FROZEN BIT_ULL(58)
196#define GLOBAL_STATUS_TRACE_TOPAPMI BIT_ULL(55)
197
198
199
200
201
202struct pebs_basic {
203 u64 format_size;
204 u64 ip;
205 u64 applicable_counters;
206 u64 tsc;
207};
208
209struct pebs_meminfo {
210 u64 address;
211 u64 aux;
212 u64 latency;
213 u64 tsx_tuning;
214};
215
216struct pebs_gprs {
217 u64 flags, ip, ax, cx, dx, bx, sp, bp, si, di;
218 u64 r8, r9, r10, r11, r12, r13, r14, r15;
219};
220
221struct pebs_xmm {
222 u64 xmm[16*2];
223};
224
225struct pebs_lbr_entry {
226 u64 from, to, info;
227};
228
229struct pebs_lbr {
230 struct pebs_lbr_entry lbr[0];
231};
232
233
234
235
236
237#define IBS_CPUID_FEATURES 0x8000001b
238
239
240
241
242
243#define IBS_CAPS_AVAIL (1U<<0)
244#define IBS_CAPS_FETCHSAM (1U<<1)
245#define IBS_CAPS_OPSAM (1U<<2)
246#define IBS_CAPS_RDWROPCNT (1U<<3)
247#define IBS_CAPS_OPCNT (1U<<4)
248#define IBS_CAPS_BRNTRGT (1U<<5)
249#define IBS_CAPS_OPCNTEXT (1U<<6)
250#define IBS_CAPS_RIPINVALIDCHK (1U<<7)
251#define IBS_CAPS_OPBRNFUSE (1U<<8)
252#define IBS_CAPS_FETCHCTLEXTD (1U<<9)
253#define IBS_CAPS_OPDATA4 (1U<<10)
254
255#define IBS_CAPS_DEFAULT (IBS_CAPS_AVAIL \
256 | IBS_CAPS_FETCHSAM \
257 | IBS_CAPS_OPSAM)
258
259
260
261
262#define IBSCTL 0x1cc
263#define IBSCTL_LVT_OFFSET_VALID (1ULL<<8)
264#define IBSCTL_LVT_OFFSET_MASK 0x0F
265
266
267#define IBS_FETCH_RAND_EN (1ULL<<57)
268#define IBS_FETCH_VAL (1ULL<<49)
269#define IBS_FETCH_ENABLE (1ULL<<48)
270#define IBS_FETCH_CNT 0xFFFF0000ULL
271#define IBS_FETCH_MAX_CNT 0x0000FFFFULL
272
273
274
275
276
277
278#define IBS_OP_CUR_CNT (0xFFF80ULL<<32)
279#define IBS_OP_CUR_CNT_RAND (0x0007FULL<<32)
280#define IBS_OP_CNT_CTL (1ULL<<19)
281#define IBS_OP_VAL (1ULL<<18)
282#define IBS_OP_ENABLE (1ULL<<17)
283#define IBS_OP_MAX_CNT 0x0000FFFFULL
284#define IBS_OP_MAX_CNT_EXT 0x007FFFFFULL
285#define IBS_RIP_INVALID (1ULL<<38)
286
287#ifdef CONFIG_X86_LOCAL_APIC
288extern u32 get_ibs_caps(void);
289#else
290static inline u32 get_ibs_caps(void) { return 0; }
291#endif
292
293#ifdef CONFIG_PERF_EVENTS
294extern void perf_events_lapic_init(void);
295
296
297
298
299
300
301
302
303
304
305#define PERF_EFLAGS_EXACT (1UL << 3)
306#define PERF_EFLAGS_VM (1UL << 5)
307
308struct pt_regs;
309struct x86_perf_regs {
310 struct pt_regs regs;
311 u64 *xmm_regs;
312};
313
314extern unsigned long perf_instruction_pointer(struct pt_regs *regs);
315extern unsigned long perf_misc_flags(struct pt_regs *regs);
316#define perf_misc_flags(regs) perf_misc_flags(regs)
317
318#include <asm/stacktrace.h>
319
320
321
322
323
324#define perf_arch_fetch_caller_regs(regs, __ip) { \
325 (regs)->ip = (__ip); \
326 (regs)->sp = (unsigned long)__builtin_frame_address(0); \
327 (regs)->cs = __KERNEL_CS; \
328 regs->flags = 0; \
329}
330
331struct perf_guest_switch_msr {
332 unsigned msr;
333 u64 host, guest;
334};
335
336extern void perf_get_x86_pmu_capability(struct x86_pmu_capability *cap);
337extern void perf_check_microcode(void);
338extern int x86_perf_rdpmc_index(struct perf_event *event);
339#else
340static inline void perf_get_x86_pmu_capability(struct x86_pmu_capability *cap)
341{
342 memset(cap, 0, sizeof(*cap));
343}
344
345static inline void perf_events_lapic_init(void) { }
346static inline void perf_check_microcode(void) { }
347#endif
348
349#if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_INTEL)
350extern struct perf_guest_switch_msr *perf_guest_get_msrs(int *nr);
351#else
352static inline struct perf_guest_switch_msr *perf_guest_get_msrs(int *nr)
353{
354 *nr = 0;
355 return NULL;
356}
357#endif
358
359#ifdef CONFIG_CPU_SUP_INTEL
360 extern void intel_pt_handle_vmx(int on);
361#else
362static inline void intel_pt_handle_vmx(int on)
363{
364
365}
366#endif
367
368#if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_AMD)
369 extern void amd_pmu_enable_virt(void);
370 extern void amd_pmu_disable_virt(void);
371#else
372 static inline void amd_pmu_enable_virt(void) { }
373 static inline void amd_pmu_disable_virt(void) { }
374#endif
375
376#define arch_perf_out_copy_user copy_from_user_nmi
377
378#endif
379