1
2
3
4
5
6
7
8
9#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
10
11#include <linux/stddef.h>
12#include <linux/types.h>
13#include <linux/init.h>
14#include <linux/slab.h>
15#include <linux/export.h>
16#include <linux/nmi.h>
17
18#include <asm/cpufeature.h>
19#include <asm/hardirq.h>
20#include <asm/intel-family.h>
21#include <asm/apic.h>
22#include <asm/cpu_device_id.h>
23
24#include "../perf_event.h"
25
26
27
28
29static u64 intel_perfmon_event_map[PERF_COUNT_HW_MAX] __read_mostly =
30{
31 [PERF_COUNT_HW_CPU_CYCLES] = 0x003c,
32 [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0,
33 [PERF_COUNT_HW_CACHE_REFERENCES] = 0x4f2e,
34 [PERF_COUNT_HW_CACHE_MISSES] = 0x412e,
35 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c4,
36 [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c5,
37 [PERF_COUNT_HW_BUS_CYCLES] = 0x013c,
38 [PERF_COUNT_HW_REF_CPU_CYCLES] = 0x0300,
39};
40
41static struct event_constraint intel_core_event_constraints[] __read_mostly =
42{
43 INTEL_EVENT_CONSTRAINT(0x11, 0x2),
44 INTEL_EVENT_CONSTRAINT(0x12, 0x2),
45 INTEL_EVENT_CONSTRAINT(0x13, 0x2),
46 INTEL_EVENT_CONSTRAINT(0x14, 0x1),
47 INTEL_EVENT_CONSTRAINT(0x19, 0x2),
48 INTEL_EVENT_CONSTRAINT(0xc1, 0x1),
49 EVENT_CONSTRAINT_END
50};
51
52static struct event_constraint intel_core2_event_constraints[] __read_mostly =
53{
54 FIXED_EVENT_CONSTRAINT(0x00c0, 0),
55 FIXED_EVENT_CONSTRAINT(0x003c, 1),
56 FIXED_EVENT_CONSTRAINT(0x0300, 2),
57 INTEL_EVENT_CONSTRAINT(0x10, 0x1),
58 INTEL_EVENT_CONSTRAINT(0x11, 0x2),
59 INTEL_EVENT_CONSTRAINT(0x12, 0x2),
60 INTEL_EVENT_CONSTRAINT(0x13, 0x2),
61 INTEL_EVENT_CONSTRAINT(0x14, 0x1),
62 INTEL_EVENT_CONSTRAINT(0x18, 0x1),
63 INTEL_EVENT_CONSTRAINT(0x19, 0x2),
64 INTEL_EVENT_CONSTRAINT(0xa1, 0x1),
65 INTEL_EVENT_CONSTRAINT(0xc9, 0x1),
66 INTEL_EVENT_CONSTRAINT(0xcb, 0x1),
67 EVENT_CONSTRAINT_END
68};
69
70static struct event_constraint intel_nehalem_event_constraints[] __read_mostly =
71{
72 FIXED_EVENT_CONSTRAINT(0x00c0, 0),
73 FIXED_EVENT_CONSTRAINT(0x003c, 1),
74 FIXED_EVENT_CONSTRAINT(0x0300, 2),
75 INTEL_EVENT_CONSTRAINT(0x40, 0x3),
76 INTEL_EVENT_CONSTRAINT(0x41, 0x3),
77 INTEL_EVENT_CONSTRAINT(0x42, 0x3),
78 INTEL_EVENT_CONSTRAINT(0x43, 0x3),
79 INTEL_EVENT_CONSTRAINT(0x48, 0x3),
80 INTEL_EVENT_CONSTRAINT(0x4e, 0x3),
81 INTEL_EVENT_CONSTRAINT(0x51, 0x3),
82 INTEL_EVENT_CONSTRAINT(0x63, 0x3),
83 EVENT_CONSTRAINT_END
84};
85
86static struct extra_reg intel_nehalem_extra_regs[] __read_mostly =
87{
88
89 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0xffff, RSP_0),
90 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x100b),
91 EVENT_EXTRA_END
92};
93
94static struct event_constraint intel_westmere_event_constraints[] __read_mostly =
95{
96 FIXED_EVENT_CONSTRAINT(0x00c0, 0),
97 FIXED_EVENT_CONSTRAINT(0x003c, 1),
98 FIXED_EVENT_CONSTRAINT(0x0300, 2),
99 INTEL_EVENT_CONSTRAINT(0x51, 0x3),
100 INTEL_EVENT_CONSTRAINT(0x60, 0x1),
101 INTEL_EVENT_CONSTRAINT(0x63, 0x3),
102 INTEL_EVENT_CONSTRAINT(0xb3, 0x1),
103 EVENT_CONSTRAINT_END
104};
105
106static struct event_constraint intel_snb_event_constraints[] __read_mostly =
107{
108 FIXED_EVENT_CONSTRAINT(0x00c0, 0),
109 FIXED_EVENT_CONSTRAINT(0x003c, 1),
110 FIXED_EVENT_CONSTRAINT(0x0300, 2),
111 INTEL_UEVENT_CONSTRAINT(0x04a3, 0xf),
112 INTEL_UEVENT_CONSTRAINT(0x05a3, 0xf),
113 INTEL_UEVENT_CONSTRAINT(0x02a3, 0x4),
114 INTEL_UEVENT_CONSTRAINT(0x06a3, 0x4),
115 INTEL_EVENT_CONSTRAINT(0x48, 0x4),
116 INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2),
117 INTEL_EVENT_CONSTRAINT(0xcd, 0x8),
118 INTEL_UEVENT_CONSTRAINT(0x04a3, 0xf),
119 INTEL_UEVENT_CONSTRAINT(0x02a3, 0x4),
120
121
122
123
124
125 INTEL_EXCLEVT_CONSTRAINT(0xd0, 0xf),
126 INTEL_EXCLEVT_CONSTRAINT(0xd1, 0xf),
127 INTEL_EXCLEVT_CONSTRAINT(0xd2, 0xf),
128 INTEL_EXCLEVT_CONSTRAINT(0xd3, 0xf),
129
130 EVENT_CONSTRAINT_END
131};
132
133static struct event_constraint intel_ivb_event_constraints[] __read_mostly =
134{
135 FIXED_EVENT_CONSTRAINT(0x00c0, 0),
136 FIXED_EVENT_CONSTRAINT(0x003c, 1),
137 FIXED_EVENT_CONSTRAINT(0x0300, 2),
138 INTEL_UEVENT_CONSTRAINT(0x0148, 0x4),
139 INTEL_UEVENT_CONSTRAINT(0x0279, 0xf),
140 INTEL_UEVENT_CONSTRAINT(0x019c, 0xf),
141 INTEL_UEVENT_CONSTRAINT(0x02a3, 0xf),
142 INTEL_UEVENT_CONSTRAINT(0x04a3, 0xf),
143 INTEL_UEVENT_CONSTRAINT(0x05a3, 0xf),
144 INTEL_UEVENT_CONSTRAINT(0x06a3, 0xf),
145 INTEL_UEVENT_CONSTRAINT(0x08a3, 0x4),
146 INTEL_UEVENT_CONSTRAINT(0x0ca3, 0x4),
147 INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2),
148
149
150
151
152
153 INTEL_EXCLEVT_CONSTRAINT(0xd0, 0xf),
154 INTEL_EXCLEVT_CONSTRAINT(0xd1, 0xf),
155 INTEL_EXCLEVT_CONSTRAINT(0xd2, 0xf),
156 INTEL_EXCLEVT_CONSTRAINT(0xd3, 0xf),
157
158 EVENT_CONSTRAINT_END
159};
160
161static struct extra_reg intel_westmere_extra_regs[] __read_mostly =
162{
163
164 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0xffff, RSP_0),
165 INTEL_UEVENT_EXTRA_REG(0x01bb, MSR_OFFCORE_RSP_1, 0xffff, RSP_1),
166 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x100b),
167 EVENT_EXTRA_END
168};
169
170static struct event_constraint intel_v1_event_constraints[] __read_mostly =
171{
172 EVENT_CONSTRAINT_END
173};
174
175static struct event_constraint intel_gen_event_constraints[] __read_mostly =
176{
177 FIXED_EVENT_CONSTRAINT(0x00c0, 0),
178 FIXED_EVENT_CONSTRAINT(0x003c, 1),
179 FIXED_EVENT_CONSTRAINT(0x0300, 2),
180 EVENT_CONSTRAINT_END
181};
182
183static struct event_constraint intel_slm_event_constraints[] __read_mostly =
184{
185 FIXED_EVENT_CONSTRAINT(0x00c0, 0),
186 FIXED_EVENT_CONSTRAINT(0x003c, 1),
187 FIXED_EVENT_CONSTRAINT(0x0300, 2),
188 EVENT_CONSTRAINT_END
189};
190
191static struct event_constraint intel_skl_event_constraints[] = {
192 FIXED_EVENT_CONSTRAINT(0x00c0, 0),
193 FIXED_EVENT_CONSTRAINT(0x003c, 1),
194 FIXED_EVENT_CONSTRAINT(0x0300, 2),
195 INTEL_UEVENT_CONSTRAINT(0x1c0, 0x2),
196
197
198
199
200 INTEL_EVENT_CONSTRAINT(0xd0, 0xf),
201 INTEL_EVENT_CONSTRAINT(0xd1, 0xf),
202 INTEL_EVENT_CONSTRAINT(0xd2, 0xf),
203 INTEL_EVENT_CONSTRAINT(0xcd, 0xf),
204 INTEL_EVENT_CONSTRAINT(0xc6, 0xf),
205
206 EVENT_CONSTRAINT_END
207};
208
209static struct extra_reg intel_knl_extra_regs[] __read_mostly = {
210 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x799ffbb6e7ull, RSP_0),
211 INTEL_UEVENT_EXTRA_REG(0x02b7, MSR_OFFCORE_RSP_1, 0x399ffbffe7ull, RSP_1),
212 EVENT_EXTRA_END
213};
214
215static struct extra_reg intel_snb_extra_regs[] __read_mostly = {
216
217 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x3f807f8fffull, RSP_0),
218 INTEL_UEVENT_EXTRA_REG(0x01bb, MSR_OFFCORE_RSP_1, 0x3f807f8fffull, RSP_1),
219 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd),
220 EVENT_EXTRA_END
221};
222
223static struct extra_reg intel_snbep_extra_regs[] __read_mostly = {
224
225 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x3fffff8fffull, RSP_0),
226 INTEL_UEVENT_EXTRA_REG(0x01bb, MSR_OFFCORE_RSP_1, 0x3fffff8fffull, RSP_1),
227 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd),
228 EVENT_EXTRA_END
229};
230
231static struct extra_reg intel_skl_extra_regs[] __read_mostly = {
232 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x3fffff8fffull, RSP_0),
233 INTEL_UEVENT_EXTRA_REG(0x01bb, MSR_OFFCORE_RSP_1, 0x3fffff8fffull, RSP_1),
234 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd),
235
236
237
238
239 INTEL_UEVENT_EXTRA_REG(0x01c6, MSR_PEBS_FRONTEND, 0x7fff17, FE),
240 EVENT_EXTRA_END
241};
242
243static struct event_constraint intel_icl_event_constraints[] = {
244 FIXED_EVENT_CONSTRAINT(0x00c0, 0),
245 INTEL_UEVENT_CONSTRAINT(0x1c0, 0),
246 FIXED_EVENT_CONSTRAINT(0x003c, 1),
247 FIXED_EVENT_CONSTRAINT(0x0300, 2),
248 FIXED_EVENT_CONSTRAINT(0x0400, 3),
249 INTEL_EVENT_CONSTRAINT_RANGE(0x03, 0x0a, 0xf),
250 INTEL_EVENT_CONSTRAINT_RANGE(0x1f, 0x28, 0xf),
251 INTEL_EVENT_CONSTRAINT(0x32, 0xf),
252 INTEL_EVENT_CONSTRAINT_RANGE(0x48, 0x54, 0xf),
253 INTEL_EVENT_CONSTRAINT_RANGE(0x60, 0x8b, 0xf),
254 INTEL_UEVENT_CONSTRAINT(0x04a3, 0xff),
255 INTEL_UEVENT_CONSTRAINT(0x10a3, 0xff),
256 INTEL_EVENT_CONSTRAINT(0xa3, 0xf),
257 INTEL_EVENT_CONSTRAINT_RANGE(0xa8, 0xb0, 0xf),
258 INTEL_EVENT_CONSTRAINT_RANGE(0xb7, 0xbd, 0xf),
259 INTEL_EVENT_CONSTRAINT_RANGE(0xd0, 0xe6, 0xf),
260 INTEL_EVENT_CONSTRAINT_RANGE(0xf0, 0xf4, 0xf),
261 EVENT_CONSTRAINT_END
262};
263
264static struct extra_reg intel_icl_extra_regs[] __read_mostly = {
265 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x3fffffbfffull, RSP_0),
266 INTEL_UEVENT_EXTRA_REG(0x01bb, MSR_OFFCORE_RSP_1, 0x3fffffbfffull, RSP_1),
267 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd),
268 INTEL_UEVENT_EXTRA_REG(0x01c6, MSR_PEBS_FRONTEND, 0x7fff17, FE),
269 EVENT_EXTRA_END
270};
271
272EVENT_ATTR_STR(mem-loads, mem_ld_nhm, "event=0x0b,umask=0x10,ldlat=3");
273EVENT_ATTR_STR(mem-loads, mem_ld_snb, "event=0xcd,umask=0x1,ldlat=3");
274EVENT_ATTR_STR(mem-stores, mem_st_snb, "event=0xcd,umask=0x2");
275
276static struct attribute *nhm_mem_events_attrs[] = {
277 EVENT_PTR(mem_ld_nhm),
278 NULL,
279};
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295EVENT_ATTR_STR_HT(topdown-total-slots, td_total_slots,
296 "event=0x3c,umask=0x0",
297 "event=0x3c,umask=0x0,any=1");
298EVENT_ATTR_STR_HT(topdown-total-slots.scale, td_total_slots_scale, "4", "2");
299EVENT_ATTR_STR(topdown-slots-issued, td_slots_issued,
300 "event=0xe,umask=0x1");
301EVENT_ATTR_STR(topdown-slots-retired, td_slots_retired,
302 "event=0xc2,umask=0x2");
303EVENT_ATTR_STR(topdown-fetch-bubbles, td_fetch_bubbles,
304 "event=0x9c,umask=0x1");
305EVENT_ATTR_STR_HT(topdown-recovery-bubbles, td_recovery_bubbles,
306 "event=0xd,umask=0x3,cmask=1",
307 "event=0xd,umask=0x3,cmask=1,any=1");
308EVENT_ATTR_STR_HT(topdown-recovery-bubbles.scale, td_recovery_bubbles_scale,
309 "4", "2");
310
311static struct attribute *snb_events_attrs[] = {
312 EVENT_PTR(td_slots_issued),
313 EVENT_PTR(td_slots_retired),
314 EVENT_PTR(td_fetch_bubbles),
315 EVENT_PTR(td_total_slots),
316 EVENT_PTR(td_total_slots_scale),
317 EVENT_PTR(td_recovery_bubbles),
318 EVENT_PTR(td_recovery_bubbles_scale),
319 NULL,
320};
321
322static struct attribute *snb_mem_events_attrs[] = {
323 EVENT_PTR(mem_ld_snb),
324 EVENT_PTR(mem_st_snb),
325 NULL,
326};
327
328static struct event_constraint intel_hsw_event_constraints[] = {
329 FIXED_EVENT_CONSTRAINT(0x00c0, 0),
330 FIXED_EVENT_CONSTRAINT(0x003c, 1),
331 FIXED_EVENT_CONSTRAINT(0x0300, 2),
332 INTEL_UEVENT_CONSTRAINT(0x148, 0x4),
333 INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2),
334 INTEL_EVENT_CONSTRAINT(0xcd, 0x8),
335
336 INTEL_UEVENT_CONSTRAINT(0x08a3, 0x4),
337
338 INTEL_UEVENT_CONSTRAINT(0x0ca3, 0x4),
339
340 INTEL_UEVENT_CONSTRAINT(0x04a3, 0xf),
341
342
343
344
345
346 INTEL_EXCLEVT_CONSTRAINT(0xd0, 0xf),
347 INTEL_EXCLEVT_CONSTRAINT(0xd1, 0xf),
348 INTEL_EXCLEVT_CONSTRAINT(0xd2, 0xf),
349 INTEL_EXCLEVT_CONSTRAINT(0xd3, 0xf),
350
351 EVENT_CONSTRAINT_END
352};
353
354static struct event_constraint intel_bdw_event_constraints[] = {
355 FIXED_EVENT_CONSTRAINT(0x00c0, 0),
356 FIXED_EVENT_CONSTRAINT(0x003c, 1),
357 FIXED_EVENT_CONSTRAINT(0x0300, 2),
358 INTEL_UEVENT_CONSTRAINT(0x148, 0x4),
359 INTEL_UBIT_EVENT_CONSTRAINT(0x8a3, 0x4),
360
361
362
363 INTEL_EVENT_CONSTRAINT(0xd0, 0xf),
364 INTEL_EVENT_CONSTRAINT(0xd1, 0xf),
365 INTEL_EVENT_CONSTRAINT(0xd2, 0xf),
366 INTEL_EVENT_CONSTRAINT(0xcd, 0xf),
367 EVENT_CONSTRAINT_END
368};
369
370static u64 intel_pmu_event_map(int hw_event)
371{
372 return intel_perfmon_event_map[hw_event];
373}
374
375
376
377
378
379
380
381
382
383
384#define SKL_DEMAND_DATA_RD BIT_ULL(0)
385#define SKL_DEMAND_RFO BIT_ULL(1)
386#define SKL_ANY_RESPONSE BIT_ULL(16)
387#define SKL_SUPPLIER_NONE BIT_ULL(17)
388#define SKL_L3_MISS_LOCAL_DRAM BIT_ULL(26)
389#define SKL_L3_MISS_REMOTE_HOP0_DRAM BIT_ULL(27)
390#define SKL_L3_MISS_REMOTE_HOP1_DRAM BIT_ULL(28)
391#define SKL_L3_MISS_REMOTE_HOP2P_DRAM BIT_ULL(29)
392#define SKL_L3_MISS (SKL_L3_MISS_LOCAL_DRAM| \
393 SKL_L3_MISS_REMOTE_HOP0_DRAM| \
394 SKL_L3_MISS_REMOTE_HOP1_DRAM| \
395 SKL_L3_MISS_REMOTE_HOP2P_DRAM)
396#define SKL_SPL_HIT BIT_ULL(30)
397#define SKL_SNOOP_NONE BIT_ULL(31)
398#define SKL_SNOOP_NOT_NEEDED BIT_ULL(32)
399#define SKL_SNOOP_MISS BIT_ULL(33)
400#define SKL_SNOOP_HIT_NO_FWD BIT_ULL(34)
401#define SKL_SNOOP_HIT_WITH_FWD BIT_ULL(35)
402#define SKL_SNOOP_HITM BIT_ULL(36)
403#define SKL_SNOOP_NON_DRAM BIT_ULL(37)
404#define SKL_ANY_SNOOP (SKL_SPL_HIT|SKL_SNOOP_NONE| \
405 SKL_SNOOP_NOT_NEEDED|SKL_SNOOP_MISS| \
406 SKL_SNOOP_HIT_NO_FWD|SKL_SNOOP_HIT_WITH_FWD| \
407 SKL_SNOOP_HITM|SKL_SNOOP_NON_DRAM)
408#define SKL_DEMAND_READ SKL_DEMAND_DATA_RD
409#define SKL_SNOOP_DRAM (SKL_SNOOP_NONE| \
410 SKL_SNOOP_NOT_NEEDED|SKL_SNOOP_MISS| \
411 SKL_SNOOP_HIT_NO_FWD|SKL_SNOOP_HIT_WITH_FWD| \
412 SKL_SNOOP_HITM|SKL_SPL_HIT)
413#define SKL_DEMAND_WRITE SKL_DEMAND_RFO
414#define SKL_LLC_ACCESS SKL_ANY_RESPONSE
415#define SKL_L3_MISS_REMOTE (SKL_L3_MISS_REMOTE_HOP0_DRAM| \
416 SKL_L3_MISS_REMOTE_HOP1_DRAM| \
417 SKL_L3_MISS_REMOTE_HOP2P_DRAM)
418
419static __initconst const u64 skl_hw_cache_event_ids
420 [PERF_COUNT_HW_CACHE_MAX]
421 [PERF_COUNT_HW_CACHE_OP_MAX]
422 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
423{
424 [ C(L1D ) ] = {
425 [ C(OP_READ) ] = {
426 [ C(RESULT_ACCESS) ] = 0x81d0,
427 [ C(RESULT_MISS) ] = 0x151,
428 },
429 [ C(OP_WRITE) ] = {
430 [ C(RESULT_ACCESS) ] = 0x82d0,
431 [ C(RESULT_MISS) ] = 0x0,
432 },
433 [ C(OP_PREFETCH) ] = {
434 [ C(RESULT_ACCESS) ] = 0x0,
435 [ C(RESULT_MISS) ] = 0x0,
436 },
437 },
438 [ C(L1I ) ] = {
439 [ C(OP_READ) ] = {
440 [ C(RESULT_ACCESS) ] = 0x0,
441 [ C(RESULT_MISS) ] = 0x283,
442 },
443 [ C(OP_WRITE) ] = {
444 [ C(RESULT_ACCESS) ] = -1,
445 [ C(RESULT_MISS) ] = -1,
446 },
447 [ C(OP_PREFETCH) ] = {
448 [ C(RESULT_ACCESS) ] = 0x0,
449 [ C(RESULT_MISS) ] = 0x0,
450 },
451 },
452 [ C(LL ) ] = {
453 [ C(OP_READ) ] = {
454 [ C(RESULT_ACCESS) ] = 0x1b7,
455 [ C(RESULT_MISS) ] = 0x1b7,
456 },
457 [ C(OP_WRITE) ] = {
458 [ C(RESULT_ACCESS) ] = 0x1b7,
459 [ C(RESULT_MISS) ] = 0x1b7,
460 },
461 [ C(OP_PREFETCH) ] = {
462 [ C(RESULT_ACCESS) ] = 0x0,
463 [ C(RESULT_MISS) ] = 0x0,
464 },
465 },
466 [ C(DTLB) ] = {
467 [ C(OP_READ) ] = {
468 [ C(RESULT_ACCESS) ] = 0x81d0,
469 [ C(RESULT_MISS) ] = 0xe08,
470 },
471 [ C(OP_WRITE) ] = {
472 [ C(RESULT_ACCESS) ] = 0x82d0,
473 [ C(RESULT_MISS) ] = 0xe49,
474 },
475 [ C(OP_PREFETCH) ] = {
476 [ C(RESULT_ACCESS) ] = 0x0,
477 [ C(RESULT_MISS) ] = 0x0,
478 },
479 },
480 [ C(ITLB) ] = {
481 [ C(OP_READ) ] = {
482 [ C(RESULT_ACCESS) ] = 0x2085,
483 [ C(RESULT_MISS) ] = 0xe85,
484 },
485 [ C(OP_WRITE) ] = {
486 [ C(RESULT_ACCESS) ] = -1,
487 [ C(RESULT_MISS) ] = -1,
488 },
489 [ C(OP_PREFETCH) ] = {
490 [ C(RESULT_ACCESS) ] = -1,
491 [ C(RESULT_MISS) ] = -1,
492 },
493 },
494 [ C(BPU ) ] = {
495 [ C(OP_READ) ] = {
496 [ C(RESULT_ACCESS) ] = 0xc4,
497 [ C(RESULT_MISS) ] = 0xc5,
498 },
499 [ C(OP_WRITE) ] = {
500 [ C(RESULT_ACCESS) ] = -1,
501 [ C(RESULT_MISS) ] = -1,
502 },
503 [ C(OP_PREFETCH) ] = {
504 [ C(RESULT_ACCESS) ] = -1,
505 [ C(RESULT_MISS) ] = -1,
506 },
507 },
508 [ C(NODE) ] = {
509 [ C(OP_READ) ] = {
510 [ C(RESULT_ACCESS) ] = 0x1b7,
511 [ C(RESULT_MISS) ] = 0x1b7,
512 },
513 [ C(OP_WRITE) ] = {
514 [ C(RESULT_ACCESS) ] = 0x1b7,
515 [ C(RESULT_MISS) ] = 0x1b7,
516 },
517 [ C(OP_PREFETCH) ] = {
518 [ C(RESULT_ACCESS) ] = 0x0,
519 [ C(RESULT_MISS) ] = 0x0,
520 },
521 },
522};
523
524static __initconst const u64 skl_hw_cache_extra_regs
525 [PERF_COUNT_HW_CACHE_MAX]
526 [PERF_COUNT_HW_CACHE_OP_MAX]
527 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
528{
529 [ C(LL ) ] = {
530 [ C(OP_READ) ] = {
531 [ C(RESULT_ACCESS) ] = SKL_DEMAND_READ|
532 SKL_LLC_ACCESS|SKL_ANY_SNOOP,
533 [ C(RESULT_MISS) ] = SKL_DEMAND_READ|
534 SKL_L3_MISS|SKL_ANY_SNOOP|
535 SKL_SUPPLIER_NONE,
536 },
537 [ C(OP_WRITE) ] = {
538 [ C(RESULT_ACCESS) ] = SKL_DEMAND_WRITE|
539 SKL_LLC_ACCESS|SKL_ANY_SNOOP,
540 [ C(RESULT_MISS) ] = SKL_DEMAND_WRITE|
541 SKL_L3_MISS|SKL_ANY_SNOOP|
542 SKL_SUPPLIER_NONE,
543 },
544 [ C(OP_PREFETCH) ] = {
545 [ C(RESULT_ACCESS) ] = 0x0,
546 [ C(RESULT_MISS) ] = 0x0,
547 },
548 },
549 [ C(NODE) ] = {
550 [ C(OP_READ) ] = {
551 [ C(RESULT_ACCESS) ] = SKL_DEMAND_READ|
552 SKL_L3_MISS_LOCAL_DRAM|SKL_SNOOP_DRAM,
553 [ C(RESULT_MISS) ] = SKL_DEMAND_READ|
554 SKL_L3_MISS_REMOTE|SKL_SNOOP_DRAM,
555 },
556 [ C(OP_WRITE) ] = {
557 [ C(RESULT_ACCESS) ] = SKL_DEMAND_WRITE|
558 SKL_L3_MISS_LOCAL_DRAM|SKL_SNOOP_DRAM,
559 [ C(RESULT_MISS) ] = SKL_DEMAND_WRITE|
560 SKL_L3_MISS_REMOTE|SKL_SNOOP_DRAM,
561 },
562 [ C(OP_PREFETCH) ] = {
563 [ C(RESULT_ACCESS) ] = 0x0,
564 [ C(RESULT_MISS) ] = 0x0,
565 },
566 },
567};
568
569#define SNB_DMND_DATA_RD (1ULL << 0)
570#define SNB_DMND_RFO (1ULL << 1)
571#define SNB_DMND_IFETCH (1ULL << 2)
572#define SNB_DMND_WB (1ULL << 3)
573#define SNB_PF_DATA_RD (1ULL << 4)
574#define SNB_PF_RFO (1ULL << 5)
575#define SNB_PF_IFETCH (1ULL << 6)
576#define SNB_LLC_DATA_RD (1ULL << 7)
577#define SNB_LLC_RFO (1ULL << 8)
578#define SNB_LLC_IFETCH (1ULL << 9)
579#define SNB_BUS_LOCKS (1ULL << 10)
580#define SNB_STRM_ST (1ULL << 11)
581#define SNB_OTHER (1ULL << 15)
582#define SNB_RESP_ANY (1ULL << 16)
583#define SNB_NO_SUPP (1ULL << 17)
584#define SNB_LLC_HITM (1ULL << 18)
585#define SNB_LLC_HITE (1ULL << 19)
586#define SNB_LLC_HITS (1ULL << 20)
587#define SNB_LLC_HITF (1ULL << 21)
588#define SNB_LOCAL (1ULL << 22)
589#define SNB_REMOTE (0xffULL << 23)
590#define SNB_SNP_NONE (1ULL << 31)
591#define SNB_SNP_NOT_NEEDED (1ULL << 32)
592#define SNB_SNP_MISS (1ULL << 33)
593#define SNB_NO_FWD (1ULL << 34)
594#define SNB_SNP_FWD (1ULL << 35)
595#define SNB_HITM (1ULL << 36)
596#define SNB_NON_DRAM (1ULL << 37)
597
598#define SNB_DMND_READ (SNB_DMND_DATA_RD|SNB_LLC_DATA_RD)
599#define SNB_DMND_WRITE (SNB_DMND_RFO|SNB_LLC_RFO)
600#define SNB_DMND_PREFETCH (SNB_PF_DATA_RD|SNB_PF_RFO)
601
602#define SNB_SNP_ANY (SNB_SNP_NONE|SNB_SNP_NOT_NEEDED| \
603 SNB_SNP_MISS|SNB_NO_FWD|SNB_SNP_FWD| \
604 SNB_HITM)
605
606#define SNB_DRAM_ANY (SNB_LOCAL|SNB_REMOTE|SNB_SNP_ANY)
607#define SNB_DRAM_REMOTE (SNB_REMOTE|SNB_SNP_ANY)
608
609#define SNB_L3_ACCESS SNB_RESP_ANY
610#define SNB_L3_MISS (SNB_DRAM_ANY|SNB_NON_DRAM)
611
612static __initconst const u64 snb_hw_cache_extra_regs
613 [PERF_COUNT_HW_CACHE_MAX]
614 [PERF_COUNT_HW_CACHE_OP_MAX]
615 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
616{
617 [ C(LL ) ] = {
618 [ C(OP_READ) ] = {
619 [ C(RESULT_ACCESS) ] = SNB_DMND_READ|SNB_L3_ACCESS,
620 [ C(RESULT_MISS) ] = SNB_DMND_READ|SNB_L3_MISS,
621 },
622 [ C(OP_WRITE) ] = {
623 [ C(RESULT_ACCESS) ] = SNB_DMND_WRITE|SNB_L3_ACCESS,
624 [ C(RESULT_MISS) ] = SNB_DMND_WRITE|SNB_L3_MISS,
625 },
626 [ C(OP_PREFETCH) ] = {
627 [ C(RESULT_ACCESS) ] = SNB_DMND_PREFETCH|SNB_L3_ACCESS,
628 [ C(RESULT_MISS) ] = SNB_DMND_PREFETCH|SNB_L3_MISS,
629 },
630 },
631 [ C(NODE) ] = {
632 [ C(OP_READ) ] = {
633 [ C(RESULT_ACCESS) ] = SNB_DMND_READ|SNB_DRAM_ANY,
634 [ C(RESULT_MISS) ] = SNB_DMND_READ|SNB_DRAM_REMOTE,
635 },
636 [ C(OP_WRITE) ] = {
637 [ C(RESULT_ACCESS) ] = SNB_DMND_WRITE|SNB_DRAM_ANY,
638 [ C(RESULT_MISS) ] = SNB_DMND_WRITE|SNB_DRAM_REMOTE,
639 },
640 [ C(OP_PREFETCH) ] = {
641 [ C(RESULT_ACCESS) ] = SNB_DMND_PREFETCH|SNB_DRAM_ANY,
642 [ C(RESULT_MISS) ] = SNB_DMND_PREFETCH|SNB_DRAM_REMOTE,
643 },
644 },
645};
646
647static __initconst const u64 snb_hw_cache_event_ids
648 [PERF_COUNT_HW_CACHE_MAX]
649 [PERF_COUNT_HW_CACHE_OP_MAX]
650 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
651{
652 [ C(L1D) ] = {
653 [ C(OP_READ) ] = {
654 [ C(RESULT_ACCESS) ] = 0xf1d0,
655 [ C(RESULT_MISS) ] = 0x0151,
656 },
657 [ C(OP_WRITE) ] = {
658 [ C(RESULT_ACCESS) ] = 0xf2d0,
659 [ C(RESULT_MISS) ] = 0x0851,
660 },
661 [ C(OP_PREFETCH) ] = {
662 [ C(RESULT_ACCESS) ] = 0x0,
663 [ C(RESULT_MISS) ] = 0x024e,
664 },
665 },
666 [ C(L1I ) ] = {
667 [ C(OP_READ) ] = {
668 [ C(RESULT_ACCESS) ] = 0x0,
669 [ C(RESULT_MISS) ] = 0x0280,
670 },
671 [ C(OP_WRITE) ] = {
672 [ C(RESULT_ACCESS) ] = -1,
673 [ C(RESULT_MISS) ] = -1,
674 },
675 [ C(OP_PREFETCH) ] = {
676 [ C(RESULT_ACCESS) ] = 0x0,
677 [ C(RESULT_MISS) ] = 0x0,
678 },
679 },
680 [ C(LL ) ] = {
681 [ C(OP_READ) ] = {
682
683 [ C(RESULT_ACCESS) ] = 0x01b7,
684
685 [ C(RESULT_MISS) ] = 0x01b7,
686 },
687 [ C(OP_WRITE) ] = {
688
689 [ C(RESULT_ACCESS) ] = 0x01b7,
690
691 [ C(RESULT_MISS) ] = 0x01b7,
692 },
693 [ C(OP_PREFETCH) ] = {
694
695 [ C(RESULT_ACCESS) ] = 0x01b7,
696
697 [ C(RESULT_MISS) ] = 0x01b7,
698 },
699 },
700 [ C(DTLB) ] = {
701 [ C(OP_READ) ] = {
702 [ C(RESULT_ACCESS) ] = 0x81d0,
703 [ C(RESULT_MISS) ] = 0x0108,
704 },
705 [ C(OP_WRITE) ] = {
706 [ C(RESULT_ACCESS) ] = 0x82d0,
707 [ C(RESULT_MISS) ] = 0x0149,
708 },
709 [ C(OP_PREFETCH) ] = {
710 [ C(RESULT_ACCESS) ] = 0x0,
711 [ C(RESULT_MISS) ] = 0x0,
712 },
713 },
714 [ C(ITLB) ] = {
715 [ C(OP_READ) ] = {
716 [ C(RESULT_ACCESS) ] = 0x1085,
717 [ C(RESULT_MISS) ] = 0x0185,
718 },
719 [ C(OP_WRITE) ] = {
720 [ C(RESULT_ACCESS) ] = -1,
721 [ C(RESULT_MISS) ] = -1,
722 },
723 [ C(OP_PREFETCH) ] = {
724 [ C(RESULT_ACCESS) ] = -1,
725 [ C(RESULT_MISS) ] = -1,
726 },
727 },
728 [ C(BPU ) ] = {
729 [ C(OP_READ) ] = {
730 [ C(RESULT_ACCESS) ] = 0x00c4,
731 [ C(RESULT_MISS) ] = 0x00c5,
732 },
733 [ C(OP_WRITE) ] = {
734 [ C(RESULT_ACCESS) ] = -1,
735 [ C(RESULT_MISS) ] = -1,
736 },
737 [ C(OP_PREFETCH) ] = {
738 [ C(RESULT_ACCESS) ] = -1,
739 [ C(RESULT_MISS) ] = -1,
740 },
741 },
742 [ C(NODE) ] = {
743 [ C(OP_READ) ] = {
744 [ C(RESULT_ACCESS) ] = 0x01b7,
745 [ C(RESULT_MISS) ] = 0x01b7,
746 },
747 [ C(OP_WRITE) ] = {
748 [ C(RESULT_ACCESS) ] = 0x01b7,
749 [ C(RESULT_MISS) ] = 0x01b7,
750 },
751 [ C(OP_PREFETCH) ] = {
752 [ C(RESULT_ACCESS) ] = 0x01b7,
753 [ C(RESULT_MISS) ] = 0x01b7,
754 },
755 },
756
757};
758
759
760
761
762
763
764
765
766
767
768#define HSW_DEMAND_DATA_RD BIT_ULL(0)
769#define HSW_DEMAND_RFO BIT_ULL(1)
770#define HSW_ANY_RESPONSE BIT_ULL(16)
771#define HSW_SUPPLIER_NONE BIT_ULL(17)
772#define HSW_L3_MISS_LOCAL_DRAM BIT_ULL(22)
773#define HSW_L3_MISS_REMOTE_HOP0 BIT_ULL(27)
774#define HSW_L3_MISS_REMOTE_HOP1 BIT_ULL(28)
775#define HSW_L3_MISS_REMOTE_HOP2P BIT_ULL(29)
776#define HSW_L3_MISS (HSW_L3_MISS_LOCAL_DRAM| \
777 HSW_L3_MISS_REMOTE_HOP0|HSW_L3_MISS_REMOTE_HOP1| \
778 HSW_L3_MISS_REMOTE_HOP2P)
779#define HSW_SNOOP_NONE BIT_ULL(31)
780#define HSW_SNOOP_NOT_NEEDED BIT_ULL(32)
781#define HSW_SNOOP_MISS BIT_ULL(33)
782#define HSW_SNOOP_HIT_NO_FWD BIT_ULL(34)
783#define HSW_SNOOP_HIT_WITH_FWD BIT_ULL(35)
784#define HSW_SNOOP_HITM BIT_ULL(36)
785#define HSW_SNOOP_NON_DRAM BIT_ULL(37)
786#define HSW_ANY_SNOOP (HSW_SNOOP_NONE| \
787 HSW_SNOOP_NOT_NEEDED|HSW_SNOOP_MISS| \
788 HSW_SNOOP_HIT_NO_FWD|HSW_SNOOP_HIT_WITH_FWD| \
789 HSW_SNOOP_HITM|HSW_SNOOP_NON_DRAM)
790#define HSW_SNOOP_DRAM (HSW_ANY_SNOOP & ~HSW_SNOOP_NON_DRAM)
791#define HSW_DEMAND_READ HSW_DEMAND_DATA_RD
792#define HSW_DEMAND_WRITE HSW_DEMAND_RFO
793#define HSW_L3_MISS_REMOTE (HSW_L3_MISS_REMOTE_HOP0|\
794 HSW_L3_MISS_REMOTE_HOP1|HSW_L3_MISS_REMOTE_HOP2P)
795#define HSW_LLC_ACCESS HSW_ANY_RESPONSE
796
797#define BDW_L3_MISS_LOCAL BIT(26)
798#define BDW_L3_MISS (BDW_L3_MISS_LOCAL| \
799 HSW_L3_MISS_REMOTE_HOP0|HSW_L3_MISS_REMOTE_HOP1| \
800 HSW_L3_MISS_REMOTE_HOP2P)
801
802
803static __initconst const u64 hsw_hw_cache_event_ids
804 [PERF_COUNT_HW_CACHE_MAX]
805 [PERF_COUNT_HW_CACHE_OP_MAX]
806 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
807{
808 [ C(L1D ) ] = {
809 [ C(OP_READ) ] = {
810 [ C(RESULT_ACCESS) ] = 0x81d0,
811 [ C(RESULT_MISS) ] = 0x151,
812 },
813 [ C(OP_WRITE) ] = {
814 [ C(RESULT_ACCESS) ] = 0x82d0,
815 [ C(RESULT_MISS) ] = 0x0,
816 },
817 [ C(OP_PREFETCH) ] = {
818 [ C(RESULT_ACCESS) ] = 0x0,
819 [ C(RESULT_MISS) ] = 0x0,
820 },
821 },
822 [ C(L1I ) ] = {
823 [ C(OP_READ) ] = {
824 [ C(RESULT_ACCESS) ] = 0x0,
825 [ C(RESULT_MISS) ] = 0x280,
826 },
827 [ C(OP_WRITE) ] = {
828 [ C(RESULT_ACCESS) ] = -1,
829 [ C(RESULT_MISS) ] = -1,
830 },
831 [ C(OP_PREFETCH) ] = {
832 [ C(RESULT_ACCESS) ] = 0x0,
833 [ C(RESULT_MISS) ] = 0x0,
834 },
835 },
836 [ C(LL ) ] = {
837 [ C(OP_READ) ] = {
838 [ C(RESULT_ACCESS) ] = 0x1b7,
839 [ C(RESULT_MISS) ] = 0x1b7,
840 },
841 [ C(OP_WRITE) ] = {
842 [ C(RESULT_ACCESS) ] = 0x1b7,
843 [ C(RESULT_MISS) ] = 0x1b7,
844 },
845 [ C(OP_PREFETCH) ] = {
846 [ C(RESULT_ACCESS) ] = 0x0,
847 [ C(RESULT_MISS) ] = 0x0,
848 },
849 },
850 [ C(DTLB) ] = {
851 [ C(OP_READ) ] = {
852 [ C(RESULT_ACCESS) ] = 0x81d0,
853 [ C(RESULT_MISS) ] = 0x108,
854 },
855 [ C(OP_WRITE) ] = {
856 [ C(RESULT_ACCESS) ] = 0x82d0,
857 [ C(RESULT_MISS) ] = 0x149,
858 },
859 [ C(OP_PREFETCH) ] = {
860 [ C(RESULT_ACCESS) ] = 0x0,
861 [ C(RESULT_MISS) ] = 0x0,
862 },
863 },
864 [ C(ITLB) ] = {
865 [ C(OP_READ) ] = {
866 [ C(RESULT_ACCESS) ] = 0x6085,
867 [ C(RESULT_MISS) ] = 0x185,
868 },
869 [ C(OP_WRITE) ] = {
870 [ C(RESULT_ACCESS) ] = -1,
871 [ C(RESULT_MISS) ] = -1,
872 },
873 [ C(OP_PREFETCH) ] = {
874 [ C(RESULT_ACCESS) ] = -1,
875 [ C(RESULT_MISS) ] = -1,
876 },
877 },
878 [ C(BPU ) ] = {
879 [ C(OP_READ) ] = {
880 [ C(RESULT_ACCESS) ] = 0xc4,
881 [ C(RESULT_MISS) ] = 0xc5,
882 },
883 [ C(OP_WRITE) ] = {
884 [ C(RESULT_ACCESS) ] = -1,
885 [ C(RESULT_MISS) ] = -1,
886 },
887 [ C(OP_PREFETCH) ] = {
888 [ C(RESULT_ACCESS) ] = -1,
889 [ C(RESULT_MISS) ] = -1,
890 },
891 },
892 [ C(NODE) ] = {
893 [ C(OP_READ) ] = {
894 [ C(RESULT_ACCESS) ] = 0x1b7,
895 [ C(RESULT_MISS) ] = 0x1b7,
896 },
897 [ C(OP_WRITE) ] = {
898 [ C(RESULT_ACCESS) ] = 0x1b7,
899 [ C(RESULT_MISS) ] = 0x1b7,
900 },
901 [ C(OP_PREFETCH) ] = {
902 [ C(RESULT_ACCESS) ] = 0x0,
903 [ C(RESULT_MISS) ] = 0x0,
904 },
905 },
906};
907
908static __initconst const u64 hsw_hw_cache_extra_regs
909 [PERF_COUNT_HW_CACHE_MAX]
910 [PERF_COUNT_HW_CACHE_OP_MAX]
911 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
912{
913 [ C(LL ) ] = {
914 [ C(OP_READ) ] = {
915 [ C(RESULT_ACCESS) ] = HSW_DEMAND_READ|
916 HSW_LLC_ACCESS,
917 [ C(RESULT_MISS) ] = HSW_DEMAND_READ|
918 HSW_L3_MISS|HSW_ANY_SNOOP,
919 },
920 [ C(OP_WRITE) ] = {
921 [ C(RESULT_ACCESS) ] = HSW_DEMAND_WRITE|
922 HSW_LLC_ACCESS,
923 [ C(RESULT_MISS) ] = HSW_DEMAND_WRITE|
924 HSW_L3_MISS|HSW_ANY_SNOOP,
925 },
926 [ C(OP_PREFETCH) ] = {
927 [ C(RESULT_ACCESS) ] = 0x0,
928 [ C(RESULT_MISS) ] = 0x0,
929 },
930 },
931 [ C(NODE) ] = {
932 [ C(OP_READ) ] = {
933 [ C(RESULT_ACCESS) ] = HSW_DEMAND_READ|
934 HSW_L3_MISS_LOCAL_DRAM|
935 HSW_SNOOP_DRAM,
936 [ C(RESULT_MISS) ] = HSW_DEMAND_READ|
937 HSW_L3_MISS_REMOTE|
938 HSW_SNOOP_DRAM,
939 },
940 [ C(OP_WRITE) ] = {
941 [ C(RESULT_ACCESS) ] = HSW_DEMAND_WRITE|
942 HSW_L3_MISS_LOCAL_DRAM|
943 HSW_SNOOP_DRAM,
944 [ C(RESULT_MISS) ] = HSW_DEMAND_WRITE|
945 HSW_L3_MISS_REMOTE|
946 HSW_SNOOP_DRAM,
947 },
948 [ C(OP_PREFETCH) ] = {
949 [ C(RESULT_ACCESS) ] = 0x0,
950 [ C(RESULT_MISS) ] = 0x0,
951 },
952 },
953};
954
955static __initconst const u64 westmere_hw_cache_event_ids
956 [PERF_COUNT_HW_CACHE_MAX]
957 [PERF_COUNT_HW_CACHE_OP_MAX]
958 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
959{
960 [ C(L1D) ] = {
961 [ C(OP_READ) ] = {
962 [ C(RESULT_ACCESS) ] = 0x010b,
963 [ C(RESULT_MISS) ] = 0x0151,
964 },
965 [ C(OP_WRITE) ] = {
966 [ C(RESULT_ACCESS) ] = 0x020b,
967 [ C(RESULT_MISS) ] = 0x0251,
968 },
969 [ C(OP_PREFETCH) ] = {
970 [ C(RESULT_ACCESS) ] = 0x014e,
971 [ C(RESULT_MISS) ] = 0x024e,
972 },
973 },
974 [ C(L1I ) ] = {
975 [ C(OP_READ) ] = {
976 [ C(RESULT_ACCESS) ] = 0x0380,
977 [ C(RESULT_MISS) ] = 0x0280,
978 },
979 [ C(OP_WRITE) ] = {
980 [ C(RESULT_ACCESS) ] = -1,
981 [ C(RESULT_MISS) ] = -1,
982 },
983 [ C(OP_PREFETCH) ] = {
984 [ C(RESULT_ACCESS) ] = 0x0,
985 [ C(RESULT_MISS) ] = 0x0,
986 },
987 },
988 [ C(LL ) ] = {
989 [ C(OP_READ) ] = {
990
991 [ C(RESULT_ACCESS) ] = 0x01b7,
992
993 [ C(RESULT_MISS) ] = 0x01b7,
994 },
995
996
997
998
999 [ C(OP_WRITE) ] = {
1000
1001 [ C(RESULT_ACCESS) ] = 0x01b7,
1002
1003 [ C(RESULT_MISS) ] = 0x01b7,
1004 },
1005 [ C(OP_PREFETCH) ] = {
1006
1007 [ C(RESULT_ACCESS) ] = 0x01b7,
1008
1009 [ C(RESULT_MISS) ] = 0x01b7,
1010 },
1011 },
1012 [ C(DTLB) ] = {
1013 [ C(OP_READ) ] = {
1014 [ C(RESULT_ACCESS) ] = 0x010b,
1015 [ C(RESULT_MISS) ] = 0x0108,
1016 },
1017 [ C(OP_WRITE) ] = {
1018 [ C(RESULT_ACCESS) ] = 0x020b,
1019 [ C(RESULT_MISS) ] = 0x010c,
1020 },
1021 [ C(OP_PREFETCH) ] = {
1022 [ C(RESULT_ACCESS) ] = 0x0,
1023 [ C(RESULT_MISS) ] = 0x0,
1024 },
1025 },
1026 [ C(ITLB) ] = {
1027 [ C(OP_READ) ] = {
1028 [ C(RESULT_ACCESS) ] = 0x01c0,
1029 [ C(RESULT_MISS) ] = 0x0185,
1030 },
1031 [ C(OP_WRITE) ] = {
1032 [ C(RESULT_ACCESS) ] = -1,
1033 [ C(RESULT_MISS) ] = -1,
1034 },
1035 [ C(OP_PREFETCH) ] = {
1036 [ C(RESULT_ACCESS) ] = -1,
1037 [ C(RESULT_MISS) ] = -1,
1038 },
1039 },
1040 [ C(BPU ) ] = {
1041 [ C(OP_READ) ] = {
1042 [ C(RESULT_ACCESS) ] = 0x00c4,
1043 [ C(RESULT_MISS) ] = 0x03e8,
1044 },
1045 [ C(OP_WRITE) ] = {
1046 [ C(RESULT_ACCESS) ] = -1,
1047 [ C(RESULT_MISS) ] = -1,
1048 },
1049 [ C(OP_PREFETCH) ] = {
1050 [ C(RESULT_ACCESS) ] = -1,
1051 [ C(RESULT_MISS) ] = -1,
1052 },
1053 },
1054 [ C(NODE) ] = {
1055 [ C(OP_READ) ] = {
1056 [ C(RESULT_ACCESS) ] = 0x01b7,
1057 [ C(RESULT_MISS) ] = 0x01b7,
1058 },
1059 [ C(OP_WRITE) ] = {
1060 [ C(RESULT_ACCESS) ] = 0x01b7,
1061 [ C(RESULT_MISS) ] = 0x01b7,
1062 },
1063 [ C(OP_PREFETCH) ] = {
1064 [ C(RESULT_ACCESS) ] = 0x01b7,
1065 [ C(RESULT_MISS) ] = 0x01b7,
1066 },
1067 },
1068};
1069
1070
1071
1072
1073
1074
1075#define NHM_DMND_DATA_RD (1 << 0)
1076#define NHM_DMND_RFO (1 << 1)
1077#define NHM_DMND_IFETCH (1 << 2)
1078#define NHM_DMND_WB (1 << 3)
1079#define NHM_PF_DATA_RD (1 << 4)
1080#define NHM_PF_DATA_RFO (1 << 5)
1081#define NHM_PF_IFETCH (1 << 6)
1082#define NHM_OFFCORE_OTHER (1 << 7)
1083#define NHM_UNCORE_HIT (1 << 8)
1084#define NHM_OTHER_CORE_HIT_SNP (1 << 9)
1085#define NHM_OTHER_CORE_HITM (1 << 10)
1086
1087#define NHM_REMOTE_CACHE_FWD (1 << 12)
1088#define NHM_REMOTE_DRAM (1 << 13)
1089#define NHM_LOCAL_DRAM (1 << 14)
1090#define NHM_NON_DRAM (1 << 15)
1091
1092#define NHM_LOCAL (NHM_LOCAL_DRAM|NHM_REMOTE_CACHE_FWD)
1093#define NHM_REMOTE (NHM_REMOTE_DRAM)
1094
1095#define NHM_DMND_READ (NHM_DMND_DATA_RD)
1096#define NHM_DMND_WRITE (NHM_DMND_RFO|NHM_DMND_WB)
1097#define NHM_DMND_PREFETCH (NHM_PF_DATA_RD|NHM_PF_DATA_RFO)
1098
1099#define NHM_L3_HIT (NHM_UNCORE_HIT|NHM_OTHER_CORE_HIT_SNP|NHM_OTHER_CORE_HITM)
1100#define NHM_L3_MISS (NHM_NON_DRAM|NHM_LOCAL_DRAM|NHM_REMOTE_DRAM|NHM_REMOTE_CACHE_FWD)
1101#define NHM_L3_ACCESS (NHM_L3_HIT|NHM_L3_MISS)
1102
1103static __initconst const u64 nehalem_hw_cache_extra_regs
1104 [PERF_COUNT_HW_CACHE_MAX]
1105 [PERF_COUNT_HW_CACHE_OP_MAX]
1106 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
1107{
1108 [ C(LL ) ] = {
1109 [ C(OP_READ) ] = {
1110 [ C(RESULT_ACCESS) ] = NHM_DMND_READ|NHM_L3_ACCESS,
1111 [ C(RESULT_MISS) ] = NHM_DMND_READ|NHM_L3_MISS,
1112 },
1113 [ C(OP_WRITE) ] = {
1114 [ C(RESULT_ACCESS) ] = NHM_DMND_WRITE|NHM_L3_ACCESS,
1115 [ C(RESULT_MISS) ] = NHM_DMND_WRITE|NHM_L3_MISS,
1116 },
1117 [ C(OP_PREFETCH) ] = {
1118 [ C(RESULT_ACCESS) ] = NHM_DMND_PREFETCH|NHM_L3_ACCESS,
1119 [ C(RESULT_MISS) ] = NHM_DMND_PREFETCH|NHM_L3_MISS,
1120 },
1121 },
1122 [ C(NODE) ] = {
1123 [ C(OP_READ) ] = {
1124 [ C(RESULT_ACCESS) ] = NHM_DMND_READ|NHM_LOCAL|NHM_REMOTE,
1125 [ C(RESULT_MISS) ] = NHM_DMND_READ|NHM_REMOTE,
1126 },
1127 [ C(OP_WRITE) ] = {
1128 [ C(RESULT_ACCESS) ] = NHM_DMND_WRITE|NHM_LOCAL|NHM_REMOTE,
1129 [ C(RESULT_MISS) ] = NHM_DMND_WRITE|NHM_REMOTE,
1130 },
1131 [ C(OP_PREFETCH) ] = {
1132 [ C(RESULT_ACCESS) ] = NHM_DMND_PREFETCH|NHM_LOCAL|NHM_REMOTE,
1133 [ C(RESULT_MISS) ] = NHM_DMND_PREFETCH|NHM_REMOTE,
1134 },
1135 },
1136};
1137
1138static __initconst const u64 nehalem_hw_cache_event_ids
1139 [PERF_COUNT_HW_CACHE_MAX]
1140 [PERF_COUNT_HW_CACHE_OP_MAX]
1141 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
1142{
1143 [ C(L1D) ] = {
1144 [ C(OP_READ) ] = {
1145 [ C(RESULT_ACCESS) ] = 0x010b,
1146 [ C(RESULT_MISS) ] = 0x0151,
1147 },
1148 [ C(OP_WRITE) ] = {
1149 [ C(RESULT_ACCESS) ] = 0x020b,
1150 [ C(RESULT_MISS) ] = 0x0251,
1151 },
1152 [ C(OP_PREFETCH) ] = {
1153 [ C(RESULT_ACCESS) ] = 0x014e,
1154 [ C(RESULT_MISS) ] = 0x024e,
1155 },
1156 },
1157 [ C(L1I ) ] = {
1158 [ C(OP_READ) ] = {
1159 [ C(RESULT_ACCESS) ] = 0x0380,
1160 [ C(RESULT_MISS) ] = 0x0280,
1161 },
1162 [ C(OP_WRITE) ] = {
1163 [ C(RESULT_ACCESS) ] = -1,
1164 [ C(RESULT_MISS) ] = -1,
1165 },
1166 [ C(OP_PREFETCH) ] = {
1167 [ C(RESULT_ACCESS) ] = 0x0,
1168 [ C(RESULT_MISS) ] = 0x0,
1169 },
1170 },
1171 [ C(LL ) ] = {
1172 [ C(OP_READ) ] = {
1173
1174 [ C(RESULT_ACCESS) ] = 0x01b7,
1175
1176 [ C(RESULT_MISS) ] = 0x01b7,
1177 },
1178
1179
1180
1181
1182 [ C(OP_WRITE) ] = {
1183
1184 [ C(RESULT_ACCESS) ] = 0x01b7,
1185
1186 [ C(RESULT_MISS) ] = 0x01b7,
1187 },
1188 [ C(OP_PREFETCH) ] = {
1189
1190 [ C(RESULT_ACCESS) ] = 0x01b7,
1191
1192 [ C(RESULT_MISS) ] = 0x01b7,
1193 },
1194 },
1195 [ C(DTLB) ] = {
1196 [ C(OP_READ) ] = {
1197 [ C(RESULT_ACCESS) ] = 0x0f40,
1198 [ C(RESULT_MISS) ] = 0x0108,
1199 },
1200 [ C(OP_WRITE) ] = {
1201 [ C(RESULT_ACCESS) ] = 0x0f41,
1202 [ C(RESULT_MISS) ] = 0x010c,
1203 },
1204 [ C(OP_PREFETCH) ] = {
1205 [ C(RESULT_ACCESS) ] = 0x0,
1206 [ C(RESULT_MISS) ] = 0x0,
1207 },
1208 },
1209 [ C(ITLB) ] = {
1210 [ C(OP_READ) ] = {
1211 [ C(RESULT_ACCESS) ] = 0x01c0,
1212 [ C(RESULT_MISS) ] = 0x20c8,
1213 },
1214 [ C(OP_WRITE) ] = {
1215 [ C(RESULT_ACCESS) ] = -1,
1216 [ C(RESULT_MISS) ] = -1,
1217 },
1218 [ C(OP_PREFETCH) ] = {
1219 [ C(RESULT_ACCESS) ] = -1,
1220 [ C(RESULT_MISS) ] = -1,
1221 },
1222 },
1223 [ C(BPU ) ] = {
1224 [ C(OP_READ) ] = {
1225 [ C(RESULT_ACCESS) ] = 0x00c4,
1226 [ C(RESULT_MISS) ] = 0x03e8,
1227 },
1228 [ C(OP_WRITE) ] = {
1229 [ C(RESULT_ACCESS) ] = -1,
1230 [ C(RESULT_MISS) ] = -1,
1231 },
1232 [ C(OP_PREFETCH) ] = {
1233 [ C(RESULT_ACCESS) ] = -1,
1234 [ C(RESULT_MISS) ] = -1,
1235 },
1236 },
1237 [ C(NODE) ] = {
1238 [ C(OP_READ) ] = {
1239 [ C(RESULT_ACCESS) ] = 0x01b7,
1240 [ C(RESULT_MISS) ] = 0x01b7,
1241 },
1242 [ C(OP_WRITE) ] = {
1243 [ C(RESULT_ACCESS) ] = 0x01b7,
1244 [ C(RESULT_MISS) ] = 0x01b7,
1245 },
1246 [ C(OP_PREFETCH) ] = {
1247 [ C(RESULT_ACCESS) ] = 0x01b7,
1248 [ C(RESULT_MISS) ] = 0x01b7,
1249 },
1250 },
1251};
1252
1253static __initconst const u64 core2_hw_cache_event_ids
1254 [PERF_COUNT_HW_CACHE_MAX]
1255 [PERF_COUNT_HW_CACHE_OP_MAX]
1256 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
1257{
1258 [ C(L1D) ] = {
1259 [ C(OP_READ) ] = {
1260 [ C(RESULT_ACCESS) ] = 0x0f40,
1261 [ C(RESULT_MISS) ] = 0x0140,
1262 },
1263 [ C(OP_WRITE) ] = {
1264 [ C(RESULT_ACCESS) ] = 0x0f41,
1265 [ C(RESULT_MISS) ] = 0x0141,
1266 },
1267 [ C(OP_PREFETCH) ] = {
1268 [ C(RESULT_ACCESS) ] = 0x104e,
1269 [ C(RESULT_MISS) ] = 0,
1270 },
1271 },
1272 [ C(L1I ) ] = {
1273 [ C(OP_READ) ] = {
1274 [ C(RESULT_ACCESS) ] = 0x0080,
1275 [ C(RESULT_MISS) ] = 0x0081,
1276 },
1277 [ C(OP_WRITE) ] = {
1278 [ C(RESULT_ACCESS) ] = -1,
1279 [ C(RESULT_MISS) ] = -1,
1280 },
1281 [ C(OP_PREFETCH) ] = {
1282 [ C(RESULT_ACCESS) ] = 0,
1283 [ C(RESULT_MISS) ] = 0,
1284 },
1285 },
1286 [ C(LL ) ] = {
1287 [ C(OP_READ) ] = {
1288 [ C(RESULT_ACCESS) ] = 0x4f29,
1289 [ C(RESULT_MISS) ] = 0x4129,
1290 },
1291 [ C(OP_WRITE) ] = {
1292 [ C(RESULT_ACCESS) ] = 0x4f2A,
1293 [ C(RESULT_MISS) ] = 0x412A,
1294 },
1295 [ C(OP_PREFETCH) ] = {
1296 [ C(RESULT_ACCESS) ] = 0,
1297 [ C(RESULT_MISS) ] = 0,
1298 },
1299 },
1300 [ C(DTLB) ] = {
1301 [ C(OP_READ) ] = {
1302 [ C(RESULT_ACCESS) ] = 0x0f40,
1303 [ C(RESULT_MISS) ] = 0x0208,
1304 },
1305 [ C(OP_WRITE) ] = {
1306 [ C(RESULT_ACCESS) ] = 0x0f41,
1307 [ C(RESULT_MISS) ] = 0x0808,
1308 },
1309 [ C(OP_PREFETCH) ] = {
1310 [ C(RESULT_ACCESS) ] = 0,
1311 [ C(RESULT_MISS) ] = 0,
1312 },
1313 },
1314 [ C(ITLB) ] = {
1315 [ C(OP_READ) ] = {
1316 [ C(RESULT_ACCESS) ] = 0x00c0,
1317 [ C(RESULT_MISS) ] = 0x1282,
1318 },
1319 [ C(OP_WRITE) ] = {
1320 [ C(RESULT_ACCESS) ] = -1,
1321 [ C(RESULT_MISS) ] = -1,
1322 },
1323 [ C(OP_PREFETCH) ] = {
1324 [ C(RESULT_ACCESS) ] = -1,
1325 [ C(RESULT_MISS) ] = -1,
1326 },
1327 },
1328 [ C(BPU ) ] = {
1329 [ C(OP_READ) ] = {
1330 [ C(RESULT_ACCESS) ] = 0x00c4,
1331 [ C(RESULT_MISS) ] = 0x00c5,
1332 },
1333 [ C(OP_WRITE) ] = {
1334 [ C(RESULT_ACCESS) ] = -1,
1335 [ C(RESULT_MISS) ] = -1,
1336 },
1337 [ C(OP_PREFETCH) ] = {
1338 [ C(RESULT_ACCESS) ] = -1,
1339 [ C(RESULT_MISS) ] = -1,
1340 },
1341 },
1342};
1343
1344static __initconst const u64 atom_hw_cache_event_ids
1345 [PERF_COUNT_HW_CACHE_MAX]
1346 [PERF_COUNT_HW_CACHE_OP_MAX]
1347 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
1348{
1349 [ C(L1D) ] = {
1350 [ C(OP_READ) ] = {
1351 [ C(RESULT_ACCESS) ] = 0x2140,
1352 [ C(RESULT_MISS) ] = 0,
1353 },
1354 [ C(OP_WRITE) ] = {
1355 [ C(RESULT_ACCESS) ] = 0x2240,
1356 [ C(RESULT_MISS) ] = 0,
1357 },
1358 [ C(OP_PREFETCH) ] = {
1359 [ C(RESULT_ACCESS) ] = 0x0,
1360 [ C(RESULT_MISS) ] = 0,
1361 },
1362 },
1363 [ C(L1I ) ] = {
1364 [ C(OP_READ) ] = {
1365 [ C(RESULT_ACCESS) ] = 0x0380,
1366 [ C(RESULT_MISS) ] = 0x0280,
1367 },
1368 [ C(OP_WRITE) ] = {
1369 [ C(RESULT_ACCESS) ] = -1,
1370 [ C(RESULT_MISS) ] = -1,
1371 },
1372 [ C(OP_PREFETCH) ] = {
1373 [ C(RESULT_ACCESS) ] = 0,
1374 [ C(RESULT_MISS) ] = 0,
1375 },
1376 },
1377 [ C(LL ) ] = {
1378 [ C(OP_READ) ] = {
1379 [ C(RESULT_ACCESS) ] = 0x4f29,
1380 [ C(RESULT_MISS) ] = 0x4129,
1381 },
1382 [ C(OP_WRITE) ] = {
1383 [ C(RESULT_ACCESS) ] = 0x4f2A,
1384 [ C(RESULT_MISS) ] = 0x412A,
1385 },
1386 [ C(OP_PREFETCH) ] = {
1387 [ C(RESULT_ACCESS) ] = 0,
1388 [ C(RESULT_MISS) ] = 0,
1389 },
1390 },
1391 [ C(DTLB) ] = {
1392 [ C(OP_READ) ] = {
1393 [ C(RESULT_ACCESS) ] = 0x2140,
1394 [ C(RESULT_MISS) ] = 0x0508,
1395 },
1396 [ C(OP_WRITE) ] = {
1397 [ C(RESULT_ACCESS) ] = 0x2240,
1398 [ C(RESULT_MISS) ] = 0x0608,
1399 },
1400 [ C(OP_PREFETCH) ] = {
1401 [ C(RESULT_ACCESS) ] = 0,
1402 [ C(RESULT_MISS) ] = 0,
1403 },
1404 },
1405 [ C(ITLB) ] = {
1406 [ C(OP_READ) ] = {
1407 [ C(RESULT_ACCESS) ] = 0x00c0,
1408 [ C(RESULT_MISS) ] = 0x0282,
1409 },
1410 [ C(OP_WRITE) ] = {
1411 [ C(RESULT_ACCESS) ] = -1,
1412 [ C(RESULT_MISS) ] = -1,
1413 },
1414 [ C(OP_PREFETCH) ] = {
1415 [ C(RESULT_ACCESS) ] = -1,
1416 [ C(RESULT_MISS) ] = -1,
1417 },
1418 },
1419 [ C(BPU ) ] = {
1420 [ C(OP_READ) ] = {
1421 [ C(RESULT_ACCESS) ] = 0x00c4,
1422 [ C(RESULT_MISS) ] = 0x00c5,
1423 },
1424 [ C(OP_WRITE) ] = {
1425 [ C(RESULT_ACCESS) ] = -1,
1426 [ C(RESULT_MISS) ] = -1,
1427 },
1428 [ C(OP_PREFETCH) ] = {
1429 [ C(RESULT_ACCESS) ] = -1,
1430 [ C(RESULT_MISS) ] = -1,
1431 },
1432 },
1433};
1434
1435EVENT_ATTR_STR(topdown-total-slots, td_total_slots_slm, "event=0x3c");
1436EVENT_ATTR_STR(topdown-total-slots.scale, td_total_slots_scale_slm, "2");
1437
1438EVENT_ATTR_STR(topdown-fetch-bubbles, td_fetch_bubbles_slm,
1439 "event=0xca,umask=0x50");
1440EVENT_ATTR_STR(topdown-fetch-bubbles.scale, td_fetch_bubbles_scale_slm, "2");
1441
1442EVENT_ATTR_STR(topdown-slots-issued, td_slots_issued_slm,
1443 "event=0xc2,umask=0x10");
1444
1445EVENT_ATTR_STR(topdown-slots-retired, td_slots_retired_slm,
1446 "event=0xc2,umask=0x10");
1447
1448static struct attribute *slm_events_attrs[] = {
1449 EVENT_PTR(td_total_slots_slm),
1450 EVENT_PTR(td_total_slots_scale_slm),
1451 EVENT_PTR(td_fetch_bubbles_slm),
1452 EVENT_PTR(td_fetch_bubbles_scale_slm),
1453 EVENT_PTR(td_slots_issued_slm),
1454 EVENT_PTR(td_slots_retired_slm),
1455 NULL
1456};
1457
1458static struct extra_reg intel_slm_extra_regs[] __read_mostly =
1459{
1460
1461 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x768005ffffull, RSP_0),
1462 INTEL_UEVENT_EXTRA_REG(0x02b7, MSR_OFFCORE_RSP_1, 0x368005ffffull, RSP_1),
1463 EVENT_EXTRA_END
1464};
1465
1466#define SLM_DMND_READ SNB_DMND_DATA_RD
1467#define SLM_DMND_WRITE SNB_DMND_RFO
1468#define SLM_DMND_PREFETCH (SNB_PF_DATA_RD|SNB_PF_RFO)
1469
1470#define SLM_SNP_ANY (SNB_SNP_NONE|SNB_SNP_MISS|SNB_NO_FWD|SNB_HITM)
1471#define SLM_LLC_ACCESS SNB_RESP_ANY
1472#define SLM_LLC_MISS (SLM_SNP_ANY|SNB_NON_DRAM)
1473
1474static __initconst const u64 slm_hw_cache_extra_regs
1475 [PERF_COUNT_HW_CACHE_MAX]
1476 [PERF_COUNT_HW_CACHE_OP_MAX]
1477 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
1478{
1479 [ C(LL ) ] = {
1480 [ C(OP_READ) ] = {
1481 [ C(RESULT_ACCESS) ] = SLM_DMND_READ|SLM_LLC_ACCESS,
1482 [ C(RESULT_MISS) ] = 0,
1483 },
1484 [ C(OP_WRITE) ] = {
1485 [ C(RESULT_ACCESS) ] = SLM_DMND_WRITE|SLM_LLC_ACCESS,
1486 [ C(RESULT_MISS) ] = SLM_DMND_WRITE|SLM_LLC_MISS,
1487 },
1488 [ C(OP_PREFETCH) ] = {
1489 [ C(RESULT_ACCESS) ] = SLM_DMND_PREFETCH|SLM_LLC_ACCESS,
1490 [ C(RESULT_MISS) ] = SLM_DMND_PREFETCH|SLM_LLC_MISS,
1491 },
1492 },
1493};
1494
1495static __initconst const u64 slm_hw_cache_event_ids
1496 [PERF_COUNT_HW_CACHE_MAX]
1497 [PERF_COUNT_HW_CACHE_OP_MAX]
1498 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
1499{
1500 [ C(L1D) ] = {
1501 [ C(OP_READ) ] = {
1502 [ C(RESULT_ACCESS) ] = 0,
1503 [ C(RESULT_MISS) ] = 0x0104,
1504 },
1505 [ C(OP_WRITE) ] = {
1506 [ C(RESULT_ACCESS) ] = 0,
1507 [ C(RESULT_MISS) ] = 0,
1508 },
1509 [ C(OP_PREFETCH) ] = {
1510 [ C(RESULT_ACCESS) ] = 0,
1511 [ C(RESULT_MISS) ] = 0,
1512 },
1513 },
1514 [ C(L1I ) ] = {
1515 [ C(OP_READ) ] = {
1516 [ C(RESULT_ACCESS) ] = 0x0380,
1517 [ C(RESULT_MISS) ] = 0x0280,
1518 },
1519 [ C(OP_WRITE) ] = {
1520 [ C(RESULT_ACCESS) ] = -1,
1521 [ C(RESULT_MISS) ] = -1,
1522 },
1523 [ C(OP_PREFETCH) ] = {
1524 [ C(RESULT_ACCESS) ] = 0,
1525 [ C(RESULT_MISS) ] = 0,
1526 },
1527 },
1528 [ C(LL ) ] = {
1529 [ C(OP_READ) ] = {
1530
1531 [ C(RESULT_ACCESS) ] = 0x01b7,
1532 [ C(RESULT_MISS) ] = 0,
1533 },
1534 [ C(OP_WRITE) ] = {
1535
1536 [ C(RESULT_ACCESS) ] = 0x01b7,
1537
1538 [ C(RESULT_MISS) ] = 0x01b7,
1539 },
1540 [ C(OP_PREFETCH) ] = {
1541
1542 [ C(RESULT_ACCESS) ] = 0x01b7,
1543
1544 [ C(RESULT_MISS) ] = 0x01b7,
1545 },
1546 },
1547 [ C(DTLB) ] = {
1548 [ C(OP_READ) ] = {
1549 [ C(RESULT_ACCESS) ] = 0,
1550 [ C(RESULT_MISS) ] = 0x0804,
1551 },
1552 [ C(OP_WRITE) ] = {
1553 [ C(RESULT_ACCESS) ] = 0,
1554 [ C(RESULT_MISS) ] = 0,
1555 },
1556 [ C(OP_PREFETCH) ] = {
1557 [ C(RESULT_ACCESS) ] = 0,
1558 [ C(RESULT_MISS) ] = 0,
1559 },
1560 },
1561 [ C(ITLB) ] = {
1562 [ C(OP_READ) ] = {
1563 [ C(RESULT_ACCESS) ] = 0x00c0,
1564 [ C(RESULT_MISS) ] = 0x40205,
1565 },
1566 [ C(OP_WRITE) ] = {
1567 [ C(RESULT_ACCESS) ] = -1,
1568 [ C(RESULT_MISS) ] = -1,
1569 },
1570 [ C(OP_PREFETCH) ] = {
1571 [ C(RESULT_ACCESS) ] = -1,
1572 [ C(RESULT_MISS) ] = -1,
1573 },
1574 },
1575 [ C(BPU ) ] = {
1576 [ C(OP_READ) ] = {
1577 [ C(RESULT_ACCESS) ] = 0x00c4,
1578 [ C(RESULT_MISS) ] = 0x00c5,
1579 },
1580 [ C(OP_WRITE) ] = {
1581 [ C(RESULT_ACCESS) ] = -1,
1582 [ C(RESULT_MISS) ] = -1,
1583 },
1584 [ C(OP_PREFETCH) ] = {
1585 [ C(RESULT_ACCESS) ] = -1,
1586 [ C(RESULT_MISS) ] = -1,
1587 },
1588 },
1589};
1590
1591EVENT_ATTR_STR(topdown-total-slots, td_total_slots_glm, "event=0x3c");
1592EVENT_ATTR_STR(topdown-total-slots.scale, td_total_slots_scale_glm, "3");
1593
1594EVENT_ATTR_STR(topdown-fetch-bubbles, td_fetch_bubbles_glm, "event=0x9c");
1595
1596EVENT_ATTR_STR(topdown-recovery-bubbles, td_recovery_bubbles_glm, "event=0xca,umask=0x02");
1597
1598EVENT_ATTR_STR(topdown-slots-retired, td_slots_retired_glm, "event=0xc2");
1599
1600EVENT_ATTR_STR(topdown-slots-issued, td_slots_issued_glm, "event=0x0e");
1601
1602static struct attribute *glm_events_attrs[] = {
1603 EVENT_PTR(td_total_slots_glm),
1604 EVENT_PTR(td_total_slots_scale_glm),
1605 EVENT_PTR(td_fetch_bubbles_glm),
1606 EVENT_PTR(td_recovery_bubbles_glm),
1607 EVENT_PTR(td_slots_issued_glm),
1608 EVENT_PTR(td_slots_retired_glm),
1609 NULL
1610};
1611
1612static struct extra_reg intel_glm_extra_regs[] __read_mostly = {
1613
1614 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x760005ffbfull, RSP_0),
1615 INTEL_UEVENT_EXTRA_REG(0x02b7, MSR_OFFCORE_RSP_1, 0x360005ffbfull, RSP_1),
1616 EVENT_EXTRA_END
1617};
1618
1619#define GLM_DEMAND_DATA_RD BIT_ULL(0)
1620#define GLM_DEMAND_RFO BIT_ULL(1)
1621#define GLM_ANY_RESPONSE BIT_ULL(16)
1622#define GLM_SNP_NONE_OR_MISS BIT_ULL(33)
1623#define GLM_DEMAND_READ GLM_DEMAND_DATA_RD
1624#define GLM_DEMAND_WRITE GLM_DEMAND_RFO
1625#define GLM_DEMAND_PREFETCH (SNB_PF_DATA_RD|SNB_PF_RFO)
1626#define GLM_LLC_ACCESS GLM_ANY_RESPONSE
1627#define GLM_SNP_ANY (GLM_SNP_NONE_OR_MISS|SNB_NO_FWD|SNB_HITM)
1628#define GLM_LLC_MISS (GLM_SNP_ANY|SNB_NON_DRAM)
1629
1630static __initconst const u64 glm_hw_cache_event_ids
1631 [PERF_COUNT_HW_CACHE_MAX]
1632 [PERF_COUNT_HW_CACHE_OP_MAX]
1633 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
1634 [C(L1D)] = {
1635 [C(OP_READ)] = {
1636 [C(RESULT_ACCESS)] = 0x81d0,
1637 [C(RESULT_MISS)] = 0x0,
1638 },
1639 [C(OP_WRITE)] = {
1640 [C(RESULT_ACCESS)] = 0x82d0,
1641 [C(RESULT_MISS)] = 0x0,
1642 },
1643 [C(OP_PREFETCH)] = {
1644 [C(RESULT_ACCESS)] = 0x0,
1645 [C(RESULT_MISS)] = 0x0,
1646 },
1647 },
1648 [C(L1I)] = {
1649 [C(OP_READ)] = {
1650 [C(RESULT_ACCESS)] = 0x0380,
1651 [C(RESULT_MISS)] = 0x0280,
1652 },
1653 [C(OP_WRITE)] = {
1654 [C(RESULT_ACCESS)] = -1,
1655 [C(RESULT_MISS)] = -1,
1656 },
1657 [C(OP_PREFETCH)] = {
1658 [C(RESULT_ACCESS)] = 0x0,
1659 [C(RESULT_MISS)] = 0x0,
1660 },
1661 },
1662 [C(LL)] = {
1663 [C(OP_READ)] = {
1664 [C(RESULT_ACCESS)] = 0x1b7,
1665 [C(RESULT_MISS)] = 0x1b7,
1666 },
1667 [C(OP_WRITE)] = {
1668 [C(RESULT_ACCESS)] = 0x1b7,
1669 [C(RESULT_MISS)] = 0x1b7,
1670 },
1671 [C(OP_PREFETCH)] = {
1672 [C(RESULT_ACCESS)] = 0x1b7,
1673 [C(RESULT_MISS)] = 0x1b7,
1674 },
1675 },
1676 [C(DTLB)] = {
1677 [C(OP_READ)] = {
1678 [C(RESULT_ACCESS)] = 0x81d0,
1679 [C(RESULT_MISS)] = 0x0,
1680 },
1681 [C(OP_WRITE)] = {
1682 [C(RESULT_ACCESS)] = 0x82d0,
1683 [C(RESULT_MISS)] = 0x0,
1684 },
1685 [C(OP_PREFETCH)] = {
1686 [C(RESULT_ACCESS)] = 0x0,
1687 [C(RESULT_MISS)] = 0x0,
1688 },
1689 },
1690 [C(ITLB)] = {
1691 [C(OP_READ)] = {
1692 [C(RESULT_ACCESS)] = 0x00c0,
1693 [C(RESULT_MISS)] = 0x0481,
1694 },
1695 [C(OP_WRITE)] = {
1696 [C(RESULT_ACCESS)] = -1,
1697 [C(RESULT_MISS)] = -1,
1698 },
1699 [C(OP_PREFETCH)] = {
1700 [C(RESULT_ACCESS)] = -1,
1701 [C(RESULT_MISS)] = -1,
1702 },
1703 },
1704 [C(BPU)] = {
1705 [C(OP_READ)] = {
1706 [C(RESULT_ACCESS)] = 0x00c4,
1707 [C(RESULT_MISS)] = 0x00c5,
1708 },
1709 [C(OP_WRITE)] = {
1710 [C(RESULT_ACCESS)] = -1,
1711 [C(RESULT_MISS)] = -1,
1712 },
1713 [C(OP_PREFETCH)] = {
1714 [C(RESULT_ACCESS)] = -1,
1715 [C(RESULT_MISS)] = -1,
1716 },
1717 },
1718};
1719
1720static __initconst const u64 glm_hw_cache_extra_regs
1721 [PERF_COUNT_HW_CACHE_MAX]
1722 [PERF_COUNT_HW_CACHE_OP_MAX]
1723 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
1724 [C(LL)] = {
1725 [C(OP_READ)] = {
1726 [C(RESULT_ACCESS)] = GLM_DEMAND_READ|
1727 GLM_LLC_ACCESS,
1728 [C(RESULT_MISS)] = GLM_DEMAND_READ|
1729 GLM_LLC_MISS,
1730 },
1731 [C(OP_WRITE)] = {
1732 [C(RESULT_ACCESS)] = GLM_DEMAND_WRITE|
1733 GLM_LLC_ACCESS,
1734 [C(RESULT_MISS)] = GLM_DEMAND_WRITE|
1735 GLM_LLC_MISS,
1736 },
1737 [C(OP_PREFETCH)] = {
1738 [C(RESULT_ACCESS)] = GLM_DEMAND_PREFETCH|
1739 GLM_LLC_ACCESS,
1740 [C(RESULT_MISS)] = GLM_DEMAND_PREFETCH|
1741 GLM_LLC_MISS,
1742 },
1743 },
1744};
1745
1746static __initconst const u64 glp_hw_cache_event_ids
1747 [PERF_COUNT_HW_CACHE_MAX]
1748 [PERF_COUNT_HW_CACHE_OP_MAX]
1749 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
1750 [C(L1D)] = {
1751 [C(OP_READ)] = {
1752 [C(RESULT_ACCESS)] = 0x81d0,
1753 [C(RESULT_MISS)] = 0x0,
1754 },
1755 [C(OP_WRITE)] = {
1756 [C(RESULT_ACCESS)] = 0x82d0,
1757 [C(RESULT_MISS)] = 0x0,
1758 },
1759 [C(OP_PREFETCH)] = {
1760 [C(RESULT_ACCESS)] = 0x0,
1761 [C(RESULT_MISS)] = 0x0,
1762 },
1763 },
1764 [C(L1I)] = {
1765 [C(OP_READ)] = {
1766 [C(RESULT_ACCESS)] = 0x0380,
1767 [C(RESULT_MISS)] = 0x0280,
1768 },
1769 [C(OP_WRITE)] = {
1770 [C(RESULT_ACCESS)] = -1,
1771 [C(RESULT_MISS)] = -1,
1772 },
1773 [C(OP_PREFETCH)] = {
1774 [C(RESULT_ACCESS)] = 0x0,
1775 [C(RESULT_MISS)] = 0x0,
1776 },
1777 },
1778 [C(LL)] = {
1779 [C(OP_READ)] = {
1780 [C(RESULT_ACCESS)] = 0x1b7,
1781 [C(RESULT_MISS)] = 0x1b7,
1782 },
1783 [C(OP_WRITE)] = {
1784 [C(RESULT_ACCESS)] = 0x1b7,
1785 [C(RESULT_MISS)] = 0x1b7,
1786 },
1787 [C(OP_PREFETCH)] = {
1788 [C(RESULT_ACCESS)] = 0x0,
1789 [C(RESULT_MISS)] = 0x0,
1790 },
1791 },
1792 [C(DTLB)] = {
1793 [C(OP_READ)] = {
1794 [C(RESULT_ACCESS)] = 0x81d0,
1795 [C(RESULT_MISS)] = 0xe08,
1796 },
1797 [C(OP_WRITE)] = {
1798 [C(RESULT_ACCESS)] = 0x82d0,
1799 [C(RESULT_MISS)] = 0xe49,
1800 },
1801 [C(OP_PREFETCH)] = {
1802 [C(RESULT_ACCESS)] = 0x0,
1803 [C(RESULT_MISS)] = 0x0,
1804 },
1805 },
1806 [C(ITLB)] = {
1807 [C(OP_READ)] = {
1808 [C(RESULT_ACCESS)] = 0x00c0,
1809 [C(RESULT_MISS)] = 0x0481,
1810 },
1811 [C(OP_WRITE)] = {
1812 [C(RESULT_ACCESS)] = -1,
1813 [C(RESULT_MISS)] = -1,
1814 },
1815 [C(OP_PREFETCH)] = {
1816 [C(RESULT_ACCESS)] = -1,
1817 [C(RESULT_MISS)] = -1,
1818 },
1819 },
1820 [C(BPU)] = {
1821 [C(OP_READ)] = {
1822 [C(RESULT_ACCESS)] = 0x00c4,
1823 [C(RESULT_MISS)] = 0x00c5,
1824 },
1825 [C(OP_WRITE)] = {
1826 [C(RESULT_ACCESS)] = -1,
1827 [C(RESULT_MISS)] = -1,
1828 },
1829 [C(OP_PREFETCH)] = {
1830 [C(RESULT_ACCESS)] = -1,
1831 [C(RESULT_MISS)] = -1,
1832 },
1833 },
1834};
1835
1836static __initconst const u64 glp_hw_cache_extra_regs
1837 [PERF_COUNT_HW_CACHE_MAX]
1838 [PERF_COUNT_HW_CACHE_OP_MAX]
1839 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
1840 [C(LL)] = {
1841 [C(OP_READ)] = {
1842 [C(RESULT_ACCESS)] = GLM_DEMAND_READ|
1843 GLM_LLC_ACCESS,
1844 [C(RESULT_MISS)] = GLM_DEMAND_READ|
1845 GLM_LLC_MISS,
1846 },
1847 [C(OP_WRITE)] = {
1848 [C(RESULT_ACCESS)] = GLM_DEMAND_WRITE|
1849 GLM_LLC_ACCESS,
1850 [C(RESULT_MISS)] = GLM_DEMAND_WRITE|
1851 GLM_LLC_MISS,
1852 },
1853 [C(OP_PREFETCH)] = {
1854 [C(RESULT_ACCESS)] = 0x0,
1855 [C(RESULT_MISS)] = 0x0,
1856 },
1857 },
1858};
1859
1860#define TNT_LOCAL_DRAM BIT_ULL(26)
1861#define TNT_DEMAND_READ GLM_DEMAND_DATA_RD
1862#define TNT_DEMAND_WRITE GLM_DEMAND_RFO
1863#define TNT_LLC_ACCESS GLM_ANY_RESPONSE
1864#define TNT_SNP_ANY (SNB_SNP_NOT_NEEDED|SNB_SNP_MISS| \
1865 SNB_NO_FWD|SNB_SNP_FWD|SNB_HITM)
1866#define TNT_LLC_MISS (TNT_SNP_ANY|SNB_NON_DRAM|TNT_LOCAL_DRAM)
1867
1868static __initconst const u64 tnt_hw_cache_extra_regs
1869 [PERF_COUNT_HW_CACHE_MAX]
1870 [PERF_COUNT_HW_CACHE_OP_MAX]
1871 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
1872 [C(LL)] = {
1873 [C(OP_READ)] = {
1874 [C(RESULT_ACCESS)] = TNT_DEMAND_READ|
1875 TNT_LLC_ACCESS,
1876 [C(RESULT_MISS)] = TNT_DEMAND_READ|
1877 TNT_LLC_MISS,
1878 },
1879 [C(OP_WRITE)] = {
1880 [C(RESULT_ACCESS)] = TNT_DEMAND_WRITE|
1881 TNT_LLC_ACCESS,
1882 [C(RESULT_MISS)] = TNT_DEMAND_WRITE|
1883 TNT_LLC_MISS,
1884 },
1885 [C(OP_PREFETCH)] = {
1886 [C(RESULT_ACCESS)] = 0x0,
1887 [C(RESULT_MISS)] = 0x0,
1888 },
1889 },
1890};
1891
1892static struct extra_reg intel_tnt_extra_regs[] __read_mostly = {
1893
1894 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0xffffff9fffull, RSP_0),
1895 INTEL_UEVENT_EXTRA_REG(0x02b7, MSR_OFFCORE_RSP_1, 0xffffff9fffull, RSP_1),
1896 EVENT_EXTRA_END
1897};
1898
1899#define KNL_OT_L2_HITE BIT_ULL(19)
1900#define KNL_OT_L2_HITF BIT_ULL(20)
1901#define KNL_MCDRAM_LOCAL BIT_ULL(21)
1902#define KNL_MCDRAM_FAR BIT_ULL(22)
1903#define KNL_DDR_LOCAL BIT_ULL(23)
1904#define KNL_DDR_FAR BIT_ULL(24)
1905#define KNL_DRAM_ANY (KNL_MCDRAM_LOCAL | KNL_MCDRAM_FAR | \
1906 KNL_DDR_LOCAL | KNL_DDR_FAR)
1907#define KNL_L2_READ SLM_DMND_READ
1908#define KNL_L2_WRITE SLM_DMND_WRITE
1909#define KNL_L2_PREFETCH SLM_DMND_PREFETCH
1910#define KNL_L2_ACCESS SLM_LLC_ACCESS
1911#define KNL_L2_MISS (KNL_OT_L2_HITE | KNL_OT_L2_HITF | \
1912 KNL_DRAM_ANY | SNB_SNP_ANY | \
1913 SNB_NON_DRAM)
1914
1915static __initconst const u64 knl_hw_cache_extra_regs
1916 [PERF_COUNT_HW_CACHE_MAX]
1917 [PERF_COUNT_HW_CACHE_OP_MAX]
1918 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
1919 [C(LL)] = {
1920 [C(OP_READ)] = {
1921 [C(RESULT_ACCESS)] = KNL_L2_READ | KNL_L2_ACCESS,
1922 [C(RESULT_MISS)] = 0,
1923 },
1924 [C(OP_WRITE)] = {
1925 [C(RESULT_ACCESS)] = KNL_L2_WRITE | KNL_L2_ACCESS,
1926 [C(RESULT_MISS)] = KNL_L2_WRITE | KNL_L2_MISS,
1927 },
1928 [C(OP_PREFETCH)] = {
1929 [C(RESULT_ACCESS)] = KNL_L2_PREFETCH | KNL_L2_ACCESS,
1930 [C(RESULT_MISS)] = KNL_L2_PREFETCH | KNL_L2_MISS,
1931 },
1932 },
1933};
1934
1935
1936
1937
1938
1939
1940
1941
1942
1943
1944
1945
1946
1947
1948static void __intel_pmu_disable_all(void)
1949{
1950 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1951
1952 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0);
1953
1954 if (test_bit(INTEL_PMC_IDX_FIXED_BTS, cpuc->active_mask))
1955 intel_pmu_disable_bts();
1956
1957 intel_pmu_pebs_disable_all();
1958}
1959
1960static void intel_pmu_disable_all(void)
1961{
1962 __intel_pmu_disable_all();
1963 intel_pmu_lbr_disable_all();
1964}
1965
1966static void __intel_pmu_enable_all(int added, bool pmi)
1967{
1968 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1969
1970 intel_pmu_pebs_enable_all();
1971 intel_pmu_lbr_enable_all(pmi);
1972 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL,
1973 x86_pmu.intel_ctrl & ~cpuc->intel_ctrl_guest_mask);
1974
1975 if (test_bit(INTEL_PMC_IDX_FIXED_BTS, cpuc->active_mask)) {
1976 struct perf_event *event =
1977 cpuc->events[INTEL_PMC_IDX_FIXED_BTS];
1978
1979 if (WARN_ON_ONCE(!event))
1980 return;
1981
1982 intel_pmu_enable_bts(event->hw.config);
1983 }
1984}
1985
1986static void intel_pmu_enable_all(int added)
1987{
1988 __intel_pmu_enable_all(added, false);
1989}
1990
1991
1992
1993
1994
1995
1996
1997
1998
1999
2000
2001
2002
2003
2004
2005static void intel_pmu_nhm_workaround(void)
2006{
2007 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2008 static const unsigned long nhm_magic[4] = {
2009 0x4300B5,
2010 0x4300D2,
2011 0x4300B1,
2012 0x4300B1
2013 };
2014 struct perf_event *event;
2015 int i;
2016
2017
2018
2019
2020
2021
2022
2023
2024
2025
2026
2027
2028
2029
2030
2031
2032
2033
2034
2035
2036
2037
2038
2039 for (i = 0; i < 4; i++) {
2040 event = cpuc->events[i];
2041 if (event)
2042 x86_perf_event_update(event);
2043 }
2044
2045 for (i = 0; i < 4; i++) {
2046 wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + i, nhm_magic[i]);
2047 wrmsrl(MSR_ARCH_PERFMON_PERFCTR0 + i, 0x0);
2048 }
2049
2050 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0xf);
2051 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0x0);
2052
2053 for (i = 0; i < 4; i++) {
2054 event = cpuc->events[i];
2055
2056 if (event) {
2057 x86_perf_event_set_period(event);
2058 __x86_pmu_enable_event(&event->hw,
2059 ARCH_PERFMON_EVENTSEL_ENABLE);
2060 } else
2061 wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + i, 0x0);
2062 }
2063}
2064
2065static void intel_pmu_nhm_enable_all(int added)
2066{
2067 if (added)
2068 intel_pmu_nhm_workaround();
2069 intel_pmu_enable_all(added);
2070}
2071
2072static void intel_set_tfa(struct cpu_hw_events *cpuc, bool on)
2073{
2074 u64 val = on ? MSR_TFA_RTM_FORCE_ABORT : 0;
2075
2076 if (cpuc->tfa_shadow != val) {
2077 cpuc->tfa_shadow = val;
2078 wrmsrl(MSR_TSX_FORCE_ABORT, val);
2079 }
2080}
2081
2082static void intel_tfa_commit_scheduling(struct cpu_hw_events *cpuc, int idx, int cntr)
2083{
2084
2085
2086
2087 if (cntr == 3)
2088 intel_set_tfa(cpuc, true);
2089}
2090
2091static void intel_tfa_pmu_enable_all(int added)
2092{
2093 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2094
2095
2096
2097
2098
2099 if (!test_bit(3, cpuc->active_mask))
2100 intel_set_tfa(cpuc, false);
2101
2102 intel_pmu_enable_all(added);
2103}
2104
2105static void enable_counter_freeze(void)
2106{
2107 update_debugctlmsr(get_debugctlmsr() |
2108 DEBUGCTLMSR_FREEZE_PERFMON_ON_PMI);
2109}
2110
2111static void disable_counter_freeze(void)
2112{
2113 update_debugctlmsr(get_debugctlmsr() &
2114 ~DEBUGCTLMSR_FREEZE_PERFMON_ON_PMI);
2115}
2116
2117static inline u64 intel_pmu_get_status(void)
2118{
2119 u64 status;
2120
2121 rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status);
2122
2123 return status;
2124}
2125
2126static inline void intel_pmu_ack_status(u64 ack)
2127{
2128 wrmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, ack);
2129}
2130
2131static void intel_pmu_disable_fixed(struct hw_perf_event *hwc)
2132{
2133 int idx = hwc->idx - INTEL_PMC_IDX_FIXED;
2134 u64 ctrl_val, mask;
2135
2136 mask = 0xfULL << (idx * 4);
2137
2138 rdmsrl(hwc->config_base, ctrl_val);
2139 ctrl_val &= ~mask;
2140 wrmsrl(hwc->config_base, ctrl_val);
2141}
2142
2143static inline bool event_is_checkpointed(struct perf_event *event)
2144{
2145 return (event->hw.config & HSW_IN_TX_CHECKPOINTED) != 0;
2146}
2147
2148static void intel_pmu_disable_event(struct perf_event *event)
2149{
2150 struct hw_perf_event *hwc = &event->hw;
2151 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2152
2153 if (unlikely(hwc->idx == INTEL_PMC_IDX_FIXED_BTS)) {
2154 intel_pmu_disable_bts();
2155 intel_pmu_drain_bts_buffer();
2156 return;
2157 }
2158
2159 cpuc->intel_ctrl_guest_mask &= ~(1ull << hwc->idx);
2160 cpuc->intel_ctrl_host_mask &= ~(1ull << hwc->idx);
2161 cpuc->intel_cp_status &= ~(1ull << hwc->idx);
2162
2163 if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL))
2164 intel_pmu_disable_fixed(hwc);
2165 else
2166 x86_pmu_disable_event(event);
2167
2168
2169
2170
2171
2172 if (unlikely(event->attr.precise_ip))
2173 intel_pmu_pebs_disable(event);
2174}
2175
2176static void intel_pmu_del_event(struct perf_event *event)
2177{
2178 if (needs_branch_stack(event))
2179 intel_pmu_lbr_del(event);
2180 if (event->attr.precise_ip)
2181 intel_pmu_pebs_del(event);
2182}
2183
2184static void intel_pmu_read_event(struct perf_event *event)
2185{
2186 if (event->hw.flags & PERF_X86_EVENT_AUTO_RELOAD)
2187 intel_pmu_auto_reload_read(event);
2188 else
2189 x86_perf_event_update(event);
2190}
2191
2192static void intel_pmu_enable_fixed(struct perf_event *event)
2193{
2194 struct hw_perf_event *hwc = &event->hw;
2195 int idx = hwc->idx - INTEL_PMC_IDX_FIXED;
2196 u64 ctrl_val, mask, bits = 0;
2197
2198
2199
2200
2201
2202
2203 if (!event->attr.precise_ip)
2204 bits |= 0x8;
2205 if (hwc->config & ARCH_PERFMON_EVENTSEL_USR)
2206 bits |= 0x2;
2207 if (hwc->config & ARCH_PERFMON_EVENTSEL_OS)
2208 bits |= 0x1;
2209
2210
2211
2212
2213 if (x86_pmu.version > 2 && hwc->config & ARCH_PERFMON_EVENTSEL_ANY)
2214 bits |= 0x4;
2215
2216 bits <<= (idx * 4);
2217 mask = 0xfULL << (idx * 4);
2218
2219 if (x86_pmu.intel_cap.pebs_baseline && event->attr.precise_ip) {
2220 bits |= ICL_FIXED_0_ADAPTIVE << (idx * 4);
2221 mask |= ICL_FIXED_0_ADAPTIVE << (idx * 4);
2222 }
2223
2224 rdmsrl(hwc->config_base, ctrl_val);
2225 ctrl_val &= ~mask;
2226 ctrl_val |= bits;
2227 wrmsrl(hwc->config_base, ctrl_val);
2228}
2229
2230static void intel_pmu_enable_event(struct perf_event *event)
2231{
2232 struct hw_perf_event *hwc = &event->hw;
2233 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2234
2235 if (unlikely(hwc->idx == INTEL_PMC_IDX_FIXED_BTS)) {
2236 if (!__this_cpu_read(cpu_hw_events.enabled))
2237 return;
2238
2239 intel_pmu_enable_bts(hwc->config);
2240 return;
2241 }
2242
2243 if (event->attr.exclude_host)
2244 cpuc->intel_ctrl_guest_mask |= (1ull << hwc->idx);
2245 if (event->attr.exclude_guest)
2246 cpuc->intel_ctrl_host_mask |= (1ull << hwc->idx);
2247
2248 if (unlikely(event_is_checkpointed(event)))
2249 cpuc->intel_cp_status |= (1ull << hwc->idx);
2250
2251 if (unlikely(event->attr.precise_ip))
2252 intel_pmu_pebs_enable(event);
2253
2254 if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) {
2255 intel_pmu_enable_fixed(event);
2256 return;
2257 }
2258
2259 __x86_pmu_enable_event(hwc, ARCH_PERFMON_EVENTSEL_ENABLE);
2260}
2261
2262static void intel_pmu_add_event(struct perf_event *event)
2263{
2264 if (event->attr.precise_ip)
2265 intel_pmu_pebs_add(event);
2266 if (needs_branch_stack(event))
2267 intel_pmu_lbr_add(event);
2268}
2269
2270
2271
2272
2273
2274int intel_pmu_save_and_restart(struct perf_event *event)
2275{
2276 x86_perf_event_update(event);
2277
2278
2279
2280
2281
2282
2283 if (unlikely(event_is_checkpointed(event))) {
2284
2285 wrmsrl(event->hw.event_base, 0);
2286 local64_set(&event->hw.prev_count, 0);
2287 }
2288 return x86_perf_event_set_period(event);
2289}
2290
2291static void intel_pmu_reset(void)
2292{
2293 struct debug_store *ds = __this_cpu_read(cpu_hw_events.ds);
2294 unsigned long flags;
2295 int idx;
2296
2297 if (!x86_pmu.num_counters)
2298 return;
2299
2300 local_irq_save(flags);
2301
2302 pr_info("clearing PMU state on CPU#%d\n", smp_processor_id());
2303
2304 for (idx = 0; idx < x86_pmu.num_counters; idx++) {
2305 wrmsrl_safe(x86_pmu_config_addr(idx), 0ull);
2306 wrmsrl_safe(x86_pmu_event_addr(idx), 0ull);
2307 }
2308 for (idx = 0; idx < x86_pmu.num_counters_fixed; idx++)
2309 wrmsrl_safe(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, 0ull);
2310
2311 if (ds)
2312 ds->bts_index = ds->bts_buffer_base;
2313
2314
2315 if (x86_pmu.version >= 2) {
2316 intel_pmu_ack_status(intel_pmu_get_status());
2317 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0);
2318 }
2319
2320
2321 if (x86_pmu.lbr_nr) {
2322 update_debugctlmsr(get_debugctlmsr() &
2323 ~(DEBUGCTLMSR_FREEZE_LBRS_ON_PMI|DEBUGCTLMSR_LBR));
2324 }
2325
2326 local_irq_restore(flags);
2327}
2328
2329static int handle_pmi_common(struct pt_regs *regs, u64 status)
2330{
2331 struct perf_sample_data data;
2332 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2333 int bit;
2334 int handled = 0;
2335
2336 inc_irq_stat(apic_perf_irqs);
2337
2338
2339
2340
2341
2342 status &= ~(GLOBAL_STATUS_COND_CHG |
2343 GLOBAL_STATUS_ASIF |
2344 GLOBAL_STATUS_LBRS_FROZEN);
2345 if (!status)
2346 return 0;
2347
2348
2349
2350
2351
2352
2353
2354
2355
2356
2357
2358
2359
2360
2361
2362
2363
2364
2365
2366
2367 if (x86_pmu.flags & PMU_FL_PEBS_ALL)
2368 status &= ~cpuc->pebs_enabled;
2369 else
2370 status &= ~(cpuc->pebs_enabled & PEBS_COUNTER_MASK);
2371
2372
2373
2374
2375 if (__test_and_clear_bit(62, (unsigned long *)&status)) {
2376 handled++;
2377 x86_pmu.drain_pebs(regs);
2378 status &= x86_pmu.intel_ctrl | GLOBAL_STATUS_TRACE_TOPAPMI;
2379 }
2380
2381
2382
2383
2384 if (__test_and_clear_bit(55, (unsigned long *)&status)) {
2385 handled++;
2386 if (unlikely(perf_guest_cbs && perf_guest_cbs->is_in_guest() &&
2387 perf_guest_cbs->handle_intel_pt_intr))
2388 perf_guest_cbs->handle_intel_pt_intr();
2389 else
2390 intel_pt_interrupt();
2391 }
2392
2393
2394
2395
2396
2397
2398 status |= cpuc->intel_cp_status;
2399
2400 for_each_set_bit(bit, (unsigned long *)&status, X86_PMC_IDX_MAX) {
2401 struct perf_event *event = cpuc->events[bit];
2402
2403 handled++;
2404
2405 if (!test_bit(bit, cpuc->active_mask))
2406 continue;
2407
2408 if (!intel_pmu_save_and_restart(event))
2409 continue;
2410
2411 perf_sample_data_init(&data, 0, event->hw.last_period);
2412
2413 if (has_branch_stack(event))
2414 data.br_stack = &cpuc->lbr_stack;
2415
2416 if (perf_event_overflow(event, &data, regs))
2417 x86_pmu_stop(event, 0);
2418 }
2419
2420 return handled;
2421}
2422
2423static bool disable_counter_freezing = true;
2424static int __init intel_perf_counter_freezing_setup(char *s)
2425{
2426 bool res;
2427
2428 if (kstrtobool(s, &res))
2429 return -EINVAL;
2430
2431 disable_counter_freezing = !res;
2432 return 1;
2433}
2434__setup("perf_v4_pmi=", intel_perf_counter_freezing_setup);
2435
2436
2437
2438
2439
2440
2441
2442
2443static int intel_pmu_handle_irq_v4(struct pt_regs *regs)
2444{
2445 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2446 int handled = 0;
2447 bool bts = false;
2448 u64 status;
2449 int pmu_enabled = cpuc->enabled;
2450 int loops = 0;
2451
2452
2453 cpuc->enabled = 0;
2454 if (test_bit(INTEL_PMC_IDX_FIXED_BTS, cpuc->active_mask)) {
2455 bts = true;
2456 intel_bts_disable_local();
2457 handled = intel_pmu_drain_bts_buffer();
2458 handled += intel_bts_interrupt();
2459 }
2460 status = intel_pmu_get_status();
2461 if (!status)
2462 goto done;
2463again:
2464 intel_pmu_lbr_read();
2465 if (++loops > 100) {
2466 static bool warned;
2467
2468 if (!warned) {
2469 WARN(1, "perfevents: irq loop stuck!\n");
2470 perf_event_print_debug();
2471 warned = true;
2472 }
2473 intel_pmu_reset();
2474 goto done;
2475 }
2476
2477
2478 handled += handle_pmi_common(regs, status);
2479done:
2480
2481 apic_write(APIC_LVTPC, APIC_DM_NMI);
2482
2483
2484
2485
2486
2487
2488 if (status) {
2489 intel_pmu_ack_status(status);
2490 } else {
2491
2492
2493
2494
2495
2496
2497
2498
2499
2500
2501 status = intel_pmu_get_status();
2502 if (status)
2503 goto again;
2504 }
2505
2506 if (bts)
2507 intel_bts_enable_local();
2508 cpuc->enabled = pmu_enabled;
2509 return handled;
2510}
2511
2512
2513
2514
2515
2516static int intel_pmu_handle_irq(struct pt_regs *regs)
2517{
2518 struct cpu_hw_events *cpuc;
2519 int loops;
2520 u64 status;
2521 int handled;
2522 int pmu_enabled;
2523
2524 cpuc = this_cpu_ptr(&cpu_hw_events);
2525
2526
2527
2528
2529
2530 pmu_enabled = cpuc->enabled;
2531
2532
2533
2534
2535 if (!x86_pmu.late_ack)
2536 apic_write(APIC_LVTPC, APIC_DM_NMI);
2537 intel_bts_disable_local();
2538 cpuc->enabled = 0;
2539 __intel_pmu_disable_all();
2540 handled = intel_pmu_drain_bts_buffer();
2541 handled += intel_bts_interrupt();
2542 status = intel_pmu_get_status();
2543 if (!status)
2544 goto done;
2545
2546 loops = 0;
2547again:
2548 intel_pmu_lbr_read();
2549 intel_pmu_ack_status(status);
2550 if (++loops > 100) {
2551 static bool warned;
2552
2553 if (!warned) {
2554 WARN(1, "perfevents: irq loop stuck!\n");
2555 perf_event_print_debug();
2556 warned = true;
2557 }
2558 intel_pmu_reset();
2559 goto done;
2560 }
2561
2562 handled += handle_pmi_common(regs, status);
2563
2564
2565
2566
2567 status = intel_pmu_get_status();
2568 if (status)
2569 goto again;
2570
2571done:
2572
2573 cpuc->enabled = pmu_enabled;
2574 if (pmu_enabled)
2575 __intel_pmu_enable_all(0, true);
2576 intel_bts_enable_local();
2577
2578
2579
2580
2581
2582
2583 if (x86_pmu.late_ack)
2584 apic_write(APIC_LVTPC, APIC_DM_NMI);
2585 return handled;
2586}
2587
2588static struct event_constraint *
2589intel_bts_constraints(struct perf_event *event)
2590{
2591 if (unlikely(intel_pmu_has_bts(event)))
2592 return &bts_constraint;
2593
2594 return NULL;
2595}
2596
2597static int intel_alt_er(int idx, u64 config)
2598{
2599 int alt_idx = idx;
2600
2601 if (!(x86_pmu.flags & PMU_FL_HAS_RSP_1))
2602 return idx;
2603
2604 if (idx == EXTRA_REG_RSP_0)
2605 alt_idx = EXTRA_REG_RSP_1;
2606
2607 if (idx == EXTRA_REG_RSP_1)
2608 alt_idx = EXTRA_REG_RSP_0;
2609
2610 if (config & ~x86_pmu.extra_regs[alt_idx].valid_mask)
2611 return idx;
2612
2613 return alt_idx;
2614}
2615
2616static void intel_fixup_er(struct perf_event *event, int idx)
2617{
2618 event->hw.extra_reg.idx = idx;
2619
2620 if (idx == EXTRA_REG_RSP_0) {
2621 event->hw.config &= ~INTEL_ARCH_EVENT_MASK;
2622 event->hw.config |= x86_pmu.extra_regs[EXTRA_REG_RSP_0].event;
2623 event->hw.extra_reg.reg = MSR_OFFCORE_RSP_0;
2624 } else if (idx == EXTRA_REG_RSP_1) {
2625 event->hw.config &= ~INTEL_ARCH_EVENT_MASK;
2626 event->hw.config |= x86_pmu.extra_regs[EXTRA_REG_RSP_1].event;
2627 event->hw.extra_reg.reg = MSR_OFFCORE_RSP_1;
2628 }
2629}
2630
2631
2632
2633
2634
2635
2636
2637
2638static struct event_constraint *
2639__intel_shared_reg_get_constraints(struct cpu_hw_events *cpuc,
2640 struct perf_event *event,
2641 struct hw_perf_event_extra *reg)
2642{
2643 struct event_constraint *c = &emptyconstraint;
2644 struct er_account *era;
2645 unsigned long flags;
2646 int idx = reg->idx;
2647
2648
2649
2650
2651
2652
2653 if (reg->alloc && !cpuc->is_fake)
2654 return NULL;
2655
2656again:
2657 era = &cpuc->shared_regs->regs[idx];
2658
2659
2660
2661
2662 raw_spin_lock_irqsave(&era->lock, flags);
2663
2664 if (!atomic_read(&era->ref) || era->config == reg->config) {
2665
2666
2667
2668
2669
2670
2671
2672
2673
2674
2675
2676 if (!cpuc->is_fake) {
2677 if (idx != reg->idx)
2678 intel_fixup_er(event, idx);
2679
2680
2681
2682
2683
2684
2685
2686 reg->alloc = 1;
2687 }
2688
2689
2690 era->config = reg->config;
2691 era->reg = reg->reg;
2692
2693
2694 atomic_inc(&era->ref);
2695
2696
2697
2698
2699
2700 c = NULL;
2701 } else {
2702 idx = intel_alt_er(idx, reg->config);
2703 if (idx != reg->idx) {
2704 raw_spin_unlock_irqrestore(&era->lock, flags);
2705 goto again;
2706 }
2707 }
2708 raw_spin_unlock_irqrestore(&era->lock, flags);
2709
2710 return c;
2711}
2712
2713static void
2714__intel_shared_reg_put_constraints(struct cpu_hw_events *cpuc,
2715 struct hw_perf_event_extra *reg)
2716{
2717 struct er_account *era;
2718
2719
2720
2721
2722
2723
2724
2725
2726
2727 if (!reg->alloc || cpuc->is_fake)
2728 return;
2729
2730 era = &cpuc->shared_regs->regs[reg->idx];
2731
2732
2733 atomic_dec(&era->ref);
2734
2735
2736 reg->alloc = 0;
2737}
2738
2739static struct event_constraint *
2740intel_shared_regs_constraints(struct cpu_hw_events *cpuc,
2741 struct perf_event *event)
2742{
2743 struct event_constraint *c = NULL, *d;
2744 struct hw_perf_event_extra *xreg, *breg;
2745
2746 xreg = &event->hw.extra_reg;
2747 if (xreg->idx != EXTRA_REG_NONE) {
2748 c = __intel_shared_reg_get_constraints(cpuc, event, xreg);
2749 if (c == &emptyconstraint)
2750 return c;
2751 }
2752 breg = &event->hw.branch_reg;
2753 if (breg->idx != EXTRA_REG_NONE) {
2754 d = __intel_shared_reg_get_constraints(cpuc, event, breg);
2755 if (d == &emptyconstraint) {
2756 __intel_shared_reg_put_constraints(cpuc, xreg);
2757 c = d;
2758 }
2759 }
2760 return c;
2761}
2762
2763struct event_constraint *
2764x86_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
2765 struct perf_event *event)
2766{
2767 struct event_constraint *c;
2768
2769 if (x86_pmu.event_constraints) {
2770 for_each_event_constraint(c, x86_pmu.event_constraints) {
2771 if (constraint_match(c, event->hw.config)) {
2772 event->hw.flags |= c->flags;
2773 return c;
2774 }
2775 }
2776 }
2777
2778 return &unconstrained;
2779}
2780
2781static struct event_constraint *
2782__intel_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
2783 struct perf_event *event)
2784{
2785 struct event_constraint *c;
2786
2787 c = intel_bts_constraints(event);
2788 if (c)
2789 return c;
2790
2791 c = intel_shared_regs_constraints(cpuc, event);
2792 if (c)
2793 return c;
2794
2795 c = intel_pebs_constraints(event);
2796 if (c)
2797 return c;
2798
2799 return x86_get_event_constraints(cpuc, idx, event);
2800}
2801
2802static void
2803intel_start_scheduling(struct cpu_hw_events *cpuc)
2804{
2805 struct intel_excl_cntrs *excl_cntrs = cpuc->excl_cntrs;
2806 struct intel_excl_states *xl;
2807 int tid = cpuc->excl_thread_id;
2808
2809
2810
2811
2812 if (cpuc->is_fake || !is_ht_workaround_enabled())
2813 return;
2814
2815
2816
2817
2818 if (WARN_ON_ONCE(!excl_cntrs))
2819 return;
2820
2821 xl = &excl_cntrs->states[tid];
2822
2823 xl->sched_started = true;
2824
2825
2826
2827
2828
2829 raw_spin_lock(&excl_cntrs->lock);
2830}
2831
2832static void intel_commit_scheduling(struct cpu_hw_events *cpuc, int idx, int cntr)
2833{
2834 struct intel_excl_cntrs *excl_cntrs = cpuc->excl_cntrs;
2835 struct event_constraint *c = cpuc->event_constraint[idx];
2836 struct intel_excl_states *xl;
2837 int tid = cpuc->excl_thread_id;
2838
2839 if (cpuc->is_fake || !is_ht_workaround_enabled())
2840 return;
2841
2842 if (WARN_ON_ONCE(!excl_cntrs))
2843 return;
2844
2845 if (!(c->flags & PERF_X86_EVENT_DYNAMIC))
2846 return;
2847
2848 xl = &excl_cntrs->states[tid];
2849
2850 lockdep_assert_held(&excl_cntrs->lock);
2851
2852 if (c->flags & PERF_X86_EVENT_EXCL)
2853 xl->state[cntr] = INTEL_EXCL_EXCLUSIVE;
2854 else
2855 xl->state[cntr] = INTEL_EXCL_SHARED;
2856}
2857
2858static void
2859intel_stop_scheduling(struct cpu_hw_events *cpuc)
2860{
2861 struct intel_excl_cntrs *excl_cntrs = cpuc->excl_cntrs;
2862 struct intel_excl_states *xl;
2863 int tid = cpuc->excl_thread_id;
2864
2865
2866
2867
2868 if (cpuc->is_fake || !is_ht_workaround_enabled())
2869 return;
2870
2871
2872
2873 if (WARN_ON_ONCE(!excl_cntrs))
2874 return;
2875
2876 xl = &excl_cntrs->states[tid];
2877
2878 xl->sched_started = false;
2879
2880
2881
2882 raw_spin_unlock(&excl_cntrs->lock);
2883}
2884
2885static struct event_constraint *
2886dyn_constraint(struct cpu_hw_events *cpuc, struct event_constraint *c, int idx)
2887{
2888 WARN_ON_ONCE(!cpuc->constraint_list);
2889
2890 if (!(c->flags & PERF_X86_EVENT_DYNAMIC)) {
2891 struct event_constraint *cx;
2892
2893
2894
2895
2896 cx = &cpuc->constraint_list[idx];
2897
2898
2899
2900
2901
2902 *cx = *c;
2903
2904
2905
2906
2907 cx->flags |= PERF_X86_EVENT_DYNAMIC;
2908 c = cx;
2909 }
2910
2911 return c;
2912}
2913
2914static struct event_constraint *
2915intel_get_excl_constraints(struct cpu_hw_events *cpuc, struct perf_event *event,
2916 int idx, struct event_constraint *c)
2917{
2918 struct intel_excl_cntrs *excl_cntrs = cpuc->excl_cntrs;
2919 struct intel_excl_states *xlo;
2920 int tid = cpuc->excl_thread_id;
2921 int is_excl, i, w;
2922
2923
2924
2925
2926
2927 if (cpuc->is_fake || !is_ht_workaround_enabled())
2928 return c;
2929
2930
2931
2932
2933 if (WARN_ON_ONCE(!excl_cntrs))
2934 return c;
2935
2936
2937
2938
2939
2940
2941
2942
2943
2944 c = dyn_constraint(cpuc, c, idx);
2945
2946
2947
2948
2949
2950
2951
2952
2953
2954
2955
2956 xlo = &excl_cntrs->states[tid ^ 1];
2957
2958
2959
2960
2961
2962 is_excl = c->flags & PERF_X86_EVENT_EXCL;
2963 if (is_excl && !(event->hw.flags & PERF_X86_EVENT_EXCL_ACCT)) {
2964 event->hw.flags |= PERF_X86_EVENT_EXCL_ACCT;
2965 if (!cpuc->n_excl++)
2966 WRITE_ONCE(excl_cntrs->has_exclusive[tid], 1);
2967 }
2968
2969
2970
2971
2972
2973
2974
2975
2976
2977 w = c->weight;
2978 for_each_set_bit(i, c->idxmsk, X86_PMC_IDX_MAX) {
2979
2980
2981
2982
2983
2984 if (xlo->state[i] == INTEL_EXCL_EXCLUSIVE) {
2985 __clear_bit(i, c->idxmsk);
2986 w--;
2987 continue;
2988 }
2989
2990
2991
2992
2993
2994 if (is_excl && xlo->state[i] == INTEL_EXCL_SHARED) {
2995 __clear_bit(i, c->idxmsk);
2996 w--;
2997 continue;
2998 }
2999 }
3000
3001
3002
3003
3004
3005
3006 if (!w)
3007 c = &emptyconstraint;
3008
3009 c->weight = w;
3010
3011 return c;
3012}
3013
3014static struct event_constraint *
3015intel_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
3016 struct perf_event *event)
3017{
3018 struct event_constraint *c1, *c2;
3019
3020 c1 = cpuc->event_constraint[idx];
3021
3022
3023
3024
3025
3026
3027 c2 = __intel_get_event_constraints(cpuc, idx, event);
3028 if (c1) {
3029 WARN_ON_ONCE(!(c1->flags & PERF_X86_EVENT_DYNAMIC));
3030 bitmap_copy(c1->idxmsk, c2->idxmsk, X86_PMC_IDX_MAX);
3031 c1->weight = c2->weight;
3032 c2 = c1;
3033 }
3034
3035 if (cpuc->excl_cntrs)
3036 return intel_get_excl_constraints(cpuc, event, idx, c2);
3037
3038 return c2;
3039}
3040
3041static void intel_put_excl_constraints(struct cpu_hw_events *cpuc,
3042 struct perf_event *event)
3043{
3044 struct hw_perf_event *hwc = &event->hw;
3045 struct intel_excl_cntrs *excl_cntrs = cpuc->excl_cntrs;
3046 int tid = cpuc->excl_thread_id;
3047 struct intel_excl_states *xl;
3048
3049
3050
3051
3052 if (cpuc->is_fake)
3053 return;
3054
3055 if (WARN_ON_ONCE(!excl_cntrs))
3056 return;
3057
3058 if (hwc->flags & PERF_X86_EVENT_EXCL_ACCT) {
3059 hwc->flags &= ~PERF_X86_EVENT_EXCL_ACCT;
3060 if (!--cpuc->n_excl)
3061 WRITE_ONCE(excl_cntrs->has_exclusive[tid], 0);
3062 }
3063
3064
3065
3066
3067
3068 if (hwc->idx >= 0) {
3069 xl = &excl_cntrs->states[tid];
3070
3071
3072
3073
3074
3075
3076 if (!xl->sched_started)
3077 raw_spin_lock(&excl_cntrs->lock);
3078
3079 xl->state[hwc->idx] = INTEL_EXCL_UNUSED;
3080
3081 if (!xl->sched_started)
3082 raw_spin_unlock(&excl_cntrs->lock);
3083 }
3084}
3085
3086static void
3087intel_put_shared_regs_event_constraints(struct cpu_hw_events *cpuc,
3088 struct perf_event *event)
3089{
3090 struct hw_perf_event_extra *reg;
3091
3092 reg = &event->hw.extra_reg;
3093 if (reg->idx != EXTRA_REG_NONE)
3094 __intel_shared_reg_put_constraints(cpuc, reg);
3095
3096 reg = &event->hw.branch_reg;
3097 if (reg->idx != EXTRA_REG_NONE)
3098 __intel_shared_reg_put_constraints(cpuc, reg);
3099}
3100
3101static void intel_put_event_constraints(struct cpu_hw_events *cpuc,
3102 struct perf_event *event)
3103{
3104 intel_put_shared_regs_event_constraints(cpuc, event);
3105
3106
3107
3108
3109
3110
3111 if (cpuc->excl_cntrs)
3112 intel_put_excl_constraints(cpuc, event);
3113}
3114
3115static void intel_pebs_aliases_core2(struct perf_event *event)
3116{
3117 if ((event->hw.config & X86_RAW_EVENT_MASK) == 0x003c) {
3118
3119
3120
3121
3122
3123
3124
3125
3126
3127
3128
3129
3130
3131
3132
3133
3134
3135
3136 u64 alt_config = X86_CONFIG(.event=0xc0, .inv=1, .cmask=16);
3137
3138 alt_config |= (event->hw.config & ~X86_RAW_EVENT_MASK);
3139 event->hw.config = alt_config;
3140 }
3141}
3142
3143static void intel_pebs_aliases_snb(struct perf_event *event)
3144{
3145 if ((event->hw.config & X86_RAW_EVENT_MASK) == 0x003c) {
3146
3147
3148
3149
3150
3151
3152
3153
3154
3155
3156
3157
3158
3159
3160
3161
3162
3163
3164 u64 alt_config = X86_CONFIG(.event=0xc2, .umask=0x01, .inv=1, .cmask=16);
3165
3166 alt_config |= (event->hw.config & ~X86_RAW_EVENT_MASK);
3167 event->hw.config = alt_config;
3168 }
3169}
3170
3171static void intel_pebs_aliases_precdist(struct perf_event *event)
3172{
3173 if ((event->hw.config & X86_RAW_EVENT_MASK) == 0x003c) {
3174
3175
3176
3177
3178
3179
3180
3181
3182
3183
3184
3185
3186
3187
3188 u64 alt_config = X86_CONFIG(.event=0xc0, .umask=0x01, .inv=1, .cmask=16);
3189
3190 alt_config |= (event->hw.config & ~X86_RAW_EVENT_MASK);
3191 event->hw.config = alt_config;
3192 }
3193}
3194
3195static void intel_pebs_aliases_ivb(struct perf_event *event)
3196{
3197 if (event->attr.precise_ip < 3)
3198 return intel_pebs_aliases_snb(event);
3199 return intel_pebs_aliases_precdist(event);
3200}
3201
3202static void intel_pebs_aliases_skl(struct perf_event *event)
3203{
3204 if (event->attr.precise_ip < 3)
3205 return intel_pebs_aliases_core2(event);
3206 return intel_pebs_aliases_precdist(event);
3207}
3208
3209static unsigned long intel_pmu_large_pebs_flags(struct perf_event *event)
3210{
3211 unsigned long flags = x86_pmu.large_pebs_flags;
3212
3213 if (event->attr.use_clockid)
3214 flags &= ~PERF_SAMPLE_TIME;
3215 if (!event->attr.exclude_kernel)
3216 flags &= ~PERF_SAMPLE_REGS_USER;
3217 if (event->attr.sample_regs_user & ~PEBS_GP_REGS)
3218 flags &= ~(PERF_SAMPLE_REGS_USER | PERF_SAMPLE_REGS_INTR);
3219 return flags;
3220}
3221
3222static int intel_pmu_bts_config(struct perf_event *event)
3223{
3224 struct perf_event_attr *attr = &event->attr;
3225
3226 if (unlikely(intel_pmu_has_bts(event))) {
3227
3228 if (!x86_pmu.bts_active)
3229 return -EOPNOTSUPP;
3230
3231
3232 if (!attr->exclude_kernel)
3233 return -EOPNOTSUPP;
3234
3235
3236 if (attr->precise_ip)
3237 return -EOPNOTSUPP;
3238
3239
3240 if (x86_add_exclusive(x86_lbr_exclusive_lbr))
3241 return -EBUSY;
3242
3243 event->destroy = hw_perf_lbr_event_destroy;
3244 }
3245
3246 return 0;
3247}
3248
3249static int core_pmu_hw_config(struct perf_event *event)
3250{
3251 int ret = x86_pmu_hw_config(event);
3252
3253 if (ret)
3254 return ret;
3255
3256 return intel_pmu_bts_config(event);
3257}
3258
3259static int intel_pmu_hw_config(struct perf_event *event)
3260{
3261 int ret = x86_pmu_hw_config(event);
3262
3263 if (ret)
3264 return ret;
3265
3266 ret = intel_pmu_bts_config(event);
3267 if (ret)
3268 return ret;
3269
3270 if (event->attr.precise_ip) {
3271 if (!(event->attr.freq || (event->attr.wakeup_events && !event->attr.watermark))) {
3272 event->hw.flags |= PERF_X86_EVENT_AUTO_RELOAD;
3273 if (!(event->attr.sample_type &
3274 ~intel_pmu_large_pebs_flags(event)))
3275 event->hw.flags |= PERF_X86_EVENT_LARGE_PEBS;
3276 }
3277 if (x86_pmu.pebs_aliases)
3278 x86_pmu.pebs_aliases(event);
3279
3280 if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN)
3281 event->attr.sample_type |= __PERF_SAMPLE_CALLCHAIN_EARLY;
3282 }
3283
3284 if (needs_branch_stack(event)) {
3285 ret = intel_pmu_setup_lbr_filter(event);
3286 if (ret)
3287 return ret;
3288
3289
3290
3291
3292 if (!unlikely(intel_pmu_has_bts(event))) {
3293
3294 if (x86_add_exclusive(x86_lbr_exclusive_lbr))
3295 return -EBUSY;
3296
3297 event->destroy = hw_perf_lbr_event_destroy;
3298 }
3299 }
3300
3301 if (event->attr.type != PERF_TYPE_RAW)
3302 return 0;
3303
3304 if (!(event->attr.config & ARCH_PERFMON_EVENTSEL_ANY))
3305 return 0;
3306
3307 if (x86_pmu.version < 3)
3308 return -EINVAL;
3309
3310 if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN))
3311 return -EACCES;
3312
3313 event->hw.config |= ARCH_PERFMON_EVENTSEL_ANY;
3314
3315 return 0;
3316}
3317
3318struct perf_guest_switch_msr *perf_guest_get_msrs(int *nr)
3319{
3320 if (x86_pmu.guest_get_msrs)
3321 return x86_pmu.guest_get_msrs(nr);
3322 *nr = 0;
3323 return NULL;
3324}
3325EXPORT_SYMBOL_GPL(perf_guest_get_msrs);
3326
3327static struct perf_guest_switch_msr *intel_guest_get_msrs(int *nr)
3328{
3329 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
3330 struct perf_guest_switch_msr *arr = cpuc->guest_switch_msrs;
3331
3332 arr[0].msr = MSR_CORE_PERF_GLOBAL_CTRL;
3333 arr[0].host = x86_pmu.intel_ctrl & ~cpuc->intel_ctrl_guest_mask;
3334 arr[0].guest = x86_pmu.intel_ctrl & ~cpuc->intel_ctrl_host_mask;
3335 if (x86_pmu.flags & PMU_FL_PEBS_ALL)
3336 arr[0].guest &= ~cpuc->pebs_enabled;
3337 else
3338 arr[0].guest &= ~(cpuc->pebs_enabled & PEBS_COUNTER_MASK);
3339 *nr = 1;
3340
3341 if (x86_pmu.pebs && x86_pmu.pebs_no_isolation) {
3342
3343
3344
3345
3346
3347
3348
3349
3350 arr[1].msr = MSR_IA32_PEBS_ENABLE;
3351 arr[1].host = cpuc->pebs_enabled;
3352 arr[1].guest = 0;
3353 *nr = 2;
3354 }
3355
3356 return arr;
3357}
3358
3359static struct perf_guest_switch_msr *core_guest_get_msrs(int *nr)
3360{
3361 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
3362 struct perf_guest_switch_msr *arr = cpuc->guest_switch_msrs;
3363 int idx;
3364
3365 for (idx = 0; idx < x86_pmu.num_counters; idx++) {
3366 struct perf_event *event = cpuc->events[idx];
3367
3368 arr[idx].msr = x86_pmu_config_addr(idx);
3369 arr[idx].host = arr[idx].guest = 0;
3370
3371 if (!test_bit(idx, cpuc->active_mask))
3372 continue;
3373
3374 arr[idx].host = arr[idx].guest =
3375 event->hw.config | ARCH_PERFMON_EVENTSEL_ENABLE;
3376
3377 if (event->attr.exclude_host)
3378 arr[idx].host &= ~ARCH_PERFMON_EVENTSEL_ENABLE;
3379 else if (event->attr.exclude_guest)
3380 arr[idx].guest &= ~ARCH_PERFMON_EVENTSEL_ENABLE;
3381 }
3382
3383 *nr = x86_pmu.num_counters;
3384 return arr;
3385}
3386
3387static void core_pmu_enable_event(struct perf_event *event)
3388{
3389 if (!event->attr.exclude_host)
3390 x86_pmu_enable_event(event);
3391}
3392
3393static void core_pmu_enable_all(int added)
3394{
3395 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
3396 int idx;
3397
3398 for (idx = 0; idx < x86_pmu.num_counters; idx++) {
3399 struct hw_perf_event *hwc = &cpuc->events[idx]->hw;
3400
3401 if (!test_bit(idx, cpuc->active_mask) ||
3402 cpuc->events[idx]->attr.exclude_host)
3403 continue;
3404
3405 __x86_pmu_enable_event(hwc, ARCH_PERFMON_EVENTSEL_ENABLE);
3406 }
3407}
3408
3409static int hsw_hw_config(struct perf_event *event)
3410{
3411 int ret = intel_pmu_hw_config(event);
3412
3413 if (ret)
3414 return ret;
3415 if (!boot_cpu_has(X86_FEATURE_RTM) && !boot_cpu_has(X86_FEATURE_HLE))
3416 return 0;
3417 event->hw.config |= event->attr.config & (HSW_IN_TX|HSW_IN_TX_CHECKPOINTED);
3418
3419
3420
3421
3422
3423
3424 if ((event->hw.config & (HSW_IN_TX|HSW_IN_TX_CHECKPOINTED)) &&
3425 ((event->hw.config & ARCH_PERFMON_EVENTSEL_ANY) ||
3426 event->attr.precise_ip > 0))
3427 return -EOPNOTSUPP;
3428
3429 if (event_is_checkpointed(event)) {
3430
3431
3432
3433
3434
3435
3436
3437
3438
3439 if (event->attr.sample_period > 0 &&
3440 event->attr.sample_period < 0x7fffffff)
3441 return -EOPNOTSUPP;
3442 }
3443 return 0;
3444}
3445
3446static struct event_constraint counter0_constraint =
3447 INTEL_ALL_EVENT_CONSTRAINT(0, 0x1);
3448
3449static struct event_constraint counter2_constraint =
3450 EVENT_CONSTRAINT(0, 0x4, 0);
3451
3452static struct event_constraint fixed0_constraint =
3453 FIXED_EVENT_CONSTRAINT(0x00c0, 0);
3454
3455static struct event_constraint fixed0_counter0_constraint =
3456 INTEL_ALL_EVENT_CONSTRAINT(0, 0x100000001ULL);
3457
3458static struct event_constraint *
3459hsw_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
3460 struct perf_event *event)
3461{
3462 struct event_constraint *c;
3463
3464 c = intel_get_event_constraints(cpuc, idx, event);
3465
3466
3467 if (event->hw.config & HSW_IN_TX_CHECKPOINTED) {
3468 if (c->idxmsk64 & (1U << 2))
3469 return &counter2_constraint;
3470 return &emptyconstraint;
3471 }
3472
3473 return c;
3474}
3475
3476static struct event_constraint *
3477icl_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
3478 struct perf_event *event)
3479{
3480
3481
3482
3483
3484 if ((event->attr.precise_ip == 3) &&
3485 constraint_match(&fixed0_constraint, event->hw.config))
3486 return &fixed0_constraint;
3487
3488 return hsw_get_event_constraints(cpuc, idx, event);
3489}
3490
3491static struct event_constraint *
3492glp_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
3493 struct perf_event *event)
3494{
3495 struct event_constraint *c;
3496
3497
3498 if (event->attr.precise_ip == 3)
3499 return &counter0_constraint;
3500
3501 c = intel_get_event_constraints(cpuc, idx, event);
3502
3503 return c;
3504}
3505
3506static struct event_constraint *
3507tnt_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
3508 struct perf_event *event)
3509{
3510 struct event_constraint *c;
3511
3512
3513
3514
3515
3516 if (event->attr.precise_ip == 3) {
3517
3518 if (constraint_match(&fixed0_constraint, event->hw.config))
3519 return &fixed0_counter0_constraint;
3520
3521 return &counter0_constraint;
3522 }
3523
3524 c = intel_get_event_constraints(cpuc, idx, event);
3525
3526 return c;
3527}
3528
3529static bool allow_tsx_force_abort = true;
3530
3531static struct event_constraint *
3532tfa_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
3533 struct perf_event *event)
3534{
3535 struct event_constraint *c = hsw_get_event_constraints(cpuc, idx, event);
3536
3537
3538
3539
3540 if (!allow_tsx_force_abort && test_bit(3, c->idxmsk)) {
3541 c = dyn_constraint(cpuc, c, idx);
3542 c->idxmsk64 &= ~(1ULL << 3);
3543 c->weight--;
3544 }
3545
3546 return c;
3547}
3548
3549
3550
3551
3552
3553
3554
3555
3556
3557
3558
3559
3560
3561
3562
3563
3564static u64 bdw_limit_period(struct perf_event *event, u64 left)
3565{
3566 if ((event->hw.config & INTEL_ARCH_EVENT_MASK) ==
3567 X86_CONFIG(.event=0xc0, .umask=0x01)) {
3568 if (left < 128)
3569 left = 128;
3570 left &= ~0x3fULL;
3571 }
3572 return left;
3573}
3574
3575static u64 nhm_limit_period(struct perf_event *event, u64 left)
3576{
3577 return max(left, 32ULL);
3578}
3579
3580PMU_FORMAT_ATTR(event, "config:0-7" );
3581PMU_FORMAT_ATTR(umask, "config:8-15" );
3582PMU_FORMAT_ATTR(edge, "config:18" );
3583PMU_FORMAT_ATTR(pc, "config:19" );
3584PMU_FORMAT_ATTR(any, "config:21" );
3585PMU_FORMAT_ATTR(inv, "config:23" );
3586PMU_FORMAT_ATTR(cmask, "config:24-31" );
3587PMU_FORMAT_ATTR(in_tx, "config:32");
3588PMU_FORMAT_ATTR(in_tx_cp, "config:33");
3589
3590static struct attribute *intel_arch_formats_attr[] = {
3591 &format_attr_event.attr,
3592 &format_attr_umask.attr,
3593 &format_attr_edge.attr,
3594 &format_attr_pc.attr,
3595 &format_attr_inv.attr,
3596 &format_attr_cmask.attr,
3597 NULL,
3598};
3599
3600ssize_t intel_event_sysfs_show(char *page, u64 config)
3601{
3602 u64 event = (config & ARCH_PERFMON_EVENTSEL_EVENT);
3603
3604 return x86_event_sysfs_show(page, config, event);
3605}
3606
3607static struct intel_shared_regs *allocate_shared_regs(int cpu)
3608{
3609 struct intel_shared_regs *regs;
3610 int i;
3611
3612 regs = kzalloc_node(sizeof(struct intel_shared_regs),
3613 GFP_KERNEL, cpu_to_node(cpu));
3614 if (regs) {
3615
3616
3617
3618 for (i = 0; i < EXTRA_REG_MAX; i++)
3619 raw_spin_lock_init(®s->regs[i].lock);
3620
3621 regs->core_id = -1;
3622 }
3623 return regs;
3624}
3625
3626static struct intel_excl_cntrs *allocate_excl_cntrs(int cpu)
3627{
3628 struct intel_excl_cntrs *c;
3629
3630 c = kzalloc_node(sizeof(struct intel_excl_cntrs),
3631 GFP_KERNEL, cpu_to_node(cpu));
3632 if (c) {
3633 raw_spin_lock_init(&c->lock);
3634 c->core_id = -1;
3635 }
3636 return c;
3637}
3638
3639
3640int intel_cpuc_prepare(struct cpu_hw_events *cpuc, int cpu)
3641{
3642 cpuc->pebs_record_size = x86_pmu.pebs_record_size;
3643
3644 if (x86_pmu.extra_regs || x86_pmu.lbr_sel_map) {
3645 cpuc->shared_regs = allocate_shared_regs(cpu);
3646 if (!cpuc->shared_regs)
3647 goto err;
3648 }
3649
3650 if (x86_pmu.flags & (PMU_FL_EXCL_CNTRS | PMU_FL_TFA)) {
3651 size_t sz = X86_PMC_IDX_MAX * sizeof(struct event_constraint);
3652
3653 cpuc->constraint_list = kzalloc_node(sz, GFP_KERNEL, cpu_to_node(cpu));
3654 if (!cpuc->constraint_list)
3655 goto err_shared_regs;
3656 }
3657
3658 if (x86_pmu.flags & PMU_FL_EXCL_CNTRS) {
3659 cpuc->excl_cntrs = allocate_excl_cntrs(cpu);
3660 if (!cpuc->excl_cntrs)
3661 goto err_constraint_list;
3662
3663 cpuc->excl_thread_id = 0;
3664 }
3665
3666 return 0;
3667
3668err_constraint_list:
3669 kfree(cpuc->constraint_list);
3670 cpuc->constraint_list = NULL;
3671
3672err_shared_regs:
3673 kfree(cpuc->shared_regs);
3674 cpuc->shared_regs = NULL;
3675
3676err:
3677 return -ENOMEM;
3678}
3679
3680static int intel_pmu_cpu_prepare(int cpu)
3681{
3682 return intel_cpuc_prepare(&per_cpu(cpu_hw_events, cpu), cpu);
3683}
3684
3685static void flip_smm_bit(void *data)
3686{
3687 unsigned long set = *(unsigned long *)data;
3688
3689 if (set > 0) {
3690 msr_set_bit(MSR_IA32_DEBUGCTLMSR,
3691 DEBUGCTLMSR_FREEZE_IN_SMM_BIT);
3692 } else {
3693 msr_clear_bit(MSR_IA32_DEBUGCTLMSR,
3694 DEBUGCTLMSR_FREEZE_IN_SMM_BIT);
3695 }
3696}
3697
3698static void intel_pmu_cpu_starting(int cpu)
3699{
3700 struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
3701 int core_id = topology_core_id(cpu);
3702 int i;
3703
3704 init_debug_store_on_cpu(cpu);
3705
3706
3707
3708 intel_pmu_lbr_reset();
3709
3710 cpuc->lbr_sel = NULL;
3711
3712 if (x86_pmu.flags & PMU_FL_TFA) {
3713 WARN_ON_ONCE(cpuc->tfa_shadow);
3714 cpuc->tfa_shadow = ~0ULL;
3715 intel_set_tfa(cpuc, false);
3716 }
3717
3718 if (x86_pmu.version > 1)
3719 flip_smm_bit(&x86_pmu.attr_freeze_on_smi);
3720
3721 if (x86_pmu.counter_freezing)
3722 enable_counter_freeze();
3723
3724 if (!cpuc->shared_regs)
3725 return;
3726
3727 if (!(x86_pmu.flags & PMU_FL_NO_HT_SHARING)) {
3728 for_each_cpu(i, topology_sibling_cpumask(cpu)) {
3729 struct intel_shared_regs *pc;
3730
3731 pc = per_cpu(cpu_hw_events, i).shared_regs;
3732 if (pc && pc->core_id == core_id) {
3733 cpuc->kfree_on_online[0] = cpuc->shared_regs;
3734 cpuc->shared_regs = pc;
3735 break;
3736 }
3737 }
3738 cpuc->shared_regs->core_id = core_id;
3739 cpuc->shared_regs->refcnt++;
3740 }
3741
3742 if (x86_pmu.lbr_sel_map)
3743 cpuc->lbr_sel = &cpuc->shared_regs->regs[EXTRA_REG_LBR];
3744
3745 if (x86_pmu.flags & PMU_FL_EXCL_CNTRS) {
3746 for_each_cpu(i, topology_sibling_cpumask(cpu)) {
3747 struct cpu_hw_events *sibling;
3748 struct intel_excl_cntrs *c;
3749
3750 sibling = &per_cpu(cpu_hw_events, i);
3751 c = sibling->excl_cntrs;
3752 if (c && c->core_id == core_id) {
3753 cpuc->kfree_on_online[1] = cpuc->excl_cntrs;
3754 cpuc->excl_cntrs = c;
3755 if (!sibling->excl_thread_id)
3756 cpuc->excl_thread_id = 1;
3757 break;
3758 }
3759 }
3760 cpuc->excl_cntrs->core_id = core_id;
3761 cpuc->excl_cntrs->refcnt++;
3762 }
3763}
3764
3765static void free_excl_cntrs(struct cpu_hw_events *cpuc)
3766{
3767 struct intel_excl_cntrs *c;
3768
3769 c = cpuc->excl_cntrs;
3770 if (c) {
3771 if (c->core_id == -1 || --c->refcnt == 0)
3772 kfree(c);
3773 cpuc->excl_cntrs = NULL;
3774 }
3775
3776 kfree(cpuc->constraint_list);
3777 cpuc->constraint_list = NULL;
3778}
3779
3780static void intel_pmu_cpu_dying(int cpu)
3781{
3782 fini_debug_store_on_cpu(cpu);
3783
3784 if (x86_pmu.counter_freezing)
3785 disable_counter_freeze();
3786}
3787
3788void intel_cpuc_finish(struct cpu_hw_events *cpuc)
3789{
3790 struct intel_shared_regs *pc;
3791
3792 pc = cpuc->shared_regs;
3793 if (pc) {
3794 if (pc->core_id == -1 || --pc->refcnt == 0)
3795 kfree(pc);
3796 cpuc->shared_regs = NULL;
3797 }
3798
3799 free_excl_cntrs(cpuc);
3800}
3801
3802static void intel_pmu_cpu_dead(int cpu)
3803{
3804 intel_cpuc_finish(&per_cpu(cpu_hw_events, cpu));
3805}
3806
3807static void intel_pmu_sched_task(struct perf_event_context *ctx,
3808 bool sched_in)
3809{
3810 intel_pmu_pebs_sched_task(ctx, sched_in);
3811 intel_pmu_lbr_sched_task(ctx, sched_in);
3812}
3813
3814static int intel_pmu_check_period(struct perf_event *event, u64 value)
3815{
3816 return intel_pmu_has_bts_period(event, value) ? -EINVAL : 0;
3817}
3818
3819PMU_FORMAT_ATTR(offcore_rsp, "config1:0-63");
3820
3821PMU_FORMAT_ATTR(ldlat, "config1:0-15");
3822
3823PMU_FORMAT_ATTR(frontend, "config1:0-23");
3824
3825static struct attribute *intel_arch3_formats_attr[] = {
3826 &format_attr_event.attr,
3827 &format_attr_umask.attr,
3828 &format_attr_edge.attr,
3829 &format_attr_pc.attr,
3830 &format_attr_any.attr,
3831 &format_attr_inv.attr,
3832 &format_attr_cmask.attr,
3833 NULL,
3834};
3835
3836static struct attribute *hsw_format_attr[] = {
3837 &format_attr_in_tx.attr,
3838 &format_attr_in_tx_cp.attr,
3839 &format_attr_offcore_rsp.attr,
3840 &format_attr_ldlat.attr,
3841 NULL
3842};
3843
3844static struct attribute *nhm_format_attr[] = {
3845 &format_attr_offcore_rsp.attr,
3846 &format_attr_ldlat.attr,
3847 NULL
3848};
3849
3850static struct attribute *slm_format_attr[] = {
3851 &format_attr_offcore_rsp.attr,
3852 NULL
3853};
3854
3855static struct attribute *skl_format_attr[] = {
3856 &format_attr_frontend.attr,
3857 NULL,
3858};
3859
3860static __initconst const struct x86_pmu core_pmu = {
3861 .name = "core",
3862 .handle_irq = x86_pmu_handle_irq,
3863 .disable_all = x86_pmu_disable_all,
3864 .enable_all = core_pmu_enable_all,
3865 .enable = core_pmu_enable_event,
3866 .disable = x86_pmu_disable_event,
3867 .hw_config = core_pmu_hw_config,
3868 .schedule_events = x86_schedule_events,
3869 .eventsel = MSR_ARCH_PERFMON_EVENTSEL0,
3870 .perfctr = MSR_ARCH_PERFMON_PERFCTR0,
3871 .event_map = intel_pmu_event_map,
3872 .max_events = ARRAY_SIZE(intel_perfmon_event_map),
3873 .apic = 1,
3874 .large_pebs_flags = LARGE_PEBS_FLAGS,
3875
3876
3877
3878
3879
3880
3881 .max_period = (1ULL<<31) - 1,
3882 .get_event_constraints = intel_get_event_constraints,
3883 .put_event_constraints = intel_put_event_constraints,
3884 .event_constraints = intel_core_event_constraints,
3885 .guest_get_msrs = core_guest_get_msrs,
3886 .format_attrs = intel_arch_formats_attr,
3887 .events_sysfs_show = intel_event_sysfs_show,
3888
3889
3890
3891
3892
3893
3894
3895 .cpu_prepare = intel_pmu_cpu_prepare,
3896 .cpu_starting = intel_pmu_cpu_starting,
3897 .cpu_dying = intel_pmu_cpu_dying,
3898 .cpu_dead = intel_pmu_cpu_dead,
3899
3900 .check_period = intel_pmu_check_period,
3901};
3902
3903static __initconst const struct x86_pmu intel_pmu = {
3904 .name = "Intel",
3905 .handle_irq = intel_pmu_handle_irq,
3906 .disable_all = intel_pmu_disable_all,
3907 .enable_all = intel_pmu_enable_all,
3908 .enable = intel_pmu_enable_event,
3909 .disable = intel_pmu_disable_event,
3910 .add = intel_pmu_add_event,
3911 .del = intel_pmu_del_event,
3912 .read = intel_pmu_read_event,
3913 .hw_config = intel_pmu_hw_config,
3914 .schedule_events = x86_schedule_events,
3915 .eventsel = MSR_ARCH_PERFMON_EVENTSEL0,
3916 .perfctr = MSR_ARCH_PERFMON_PERFCTR0,
3917 .event_map = intel_pmu_event_map,
3918 .max_events = ARRAY_SIZE(intel_perfmon_event_map),
3919 .apic = 1,
3920 .large_pebs_flags = LARGE_PEBS_FLAGS,
3921
3922
3923
3924
3925
3926 .max_period = (1ULL << 31) - 1,
3927 .get_event_constraints = intel_get_event_constraints,
3928 .put_event_constraints = intel_put_event_constraints,
3929 .pebs_aliases = intel_pebs_aliases_core2,
3930
3931 .format_attrs = intel_arch3_formats_attr,
3932 .events_sysfs_show = intel_event_sysfs_show,
3933
3934 .cpu_prepare = intel_pmu_cpu_prepare,
3935 .cpu_starting = intel_pmu_cpu_starting,
3936 .cpu_dying = intel_pmu_cpu_dying,
3937 .cpu_dead = intel_pmu_cpu_dead,
3938
3939 .guest_get_msrs = intel_guest_get_msrs,
3940 .sched_task = intel_pmu_sched_task,
3941
3942 .check_period = intel_pmu_check_period,
3943};
3944
3945static __init void intel_clovertown_quirk(void)
3946{
3947
3948
3949
3950
3951
3952
3953
3954
3955
3956
3957
3958
3959
3960
3961
3962
3963
3964
3965
3966 pr_warn("PEBS disabled due to CPU errata\n");
3967 x86_pmu.pebs = 0;
3968 x86_pmu.pebs_constraints = NULL;
3969}
3970
3971static const struct x86_cpu_desc isolation_ucodes[] = {
3972 INTEL_CPU_DESC(INTEL_FAM6_HASWELL_CORE, 3, 0x0000001f),
3973 INTEL_CPU_DESC(INTEL_FAM6_HASWELL_ULT, 1, 0x0000001e),
3974 INTEL_CPU_DESC(INTEL_FAM6_HASWELL_GT3E, 1, 0x00000015),
3975 INTEL_CPU_DESC(INTEL_FAM6_HASWELL_X, 2, 0x00000037),
3976 INTEL_CPU_DESC(INTEL_FAM6_HASWELL_X, 4, 0x0000000a),
3977 INTEL_CPU_DESC(INTEL_FAM6_BROADWELL_CORE, 4, 0x00000023),
3978 INTEL_CPU_DESC(INTEL_FAM6_BROADWELL_GT3E, 1, 0x00000014),
3979 INTEL_CPU_DESC(INTEL_FAM6_BROADWELL_XEON_D, 2, 0x00000010),
3980 INTEL_CPU_DESC(INTEL_FAM6_BROADWELL_XEON_D, 3, 0x07000009),
3981 INTEL_CPU_DESC(INTEL_FAM6_BROADWELL_XEON_D, 4, 0x0f000009),
3982 INTEL_CPU_DESC(INTEL_FAM6_BROADWELL_XEON_D, 5, 0x0e000002),
3983 INTEL_CPU_DESC(INTEL_FAM6_BROADWELL_X, 2, 0x0b000014),
3984 INTEL_CPU_DESC(INTEL_FAM6_SKYLAKE_X, 3, 0x00000021),
3985 INTEL_CPU_DESC(INTEL_FAM6_SKYLAKE_X, 4, 0x00000000),
3986 INTEL_CPU_DESC(INTEL_FAM6_SKYLAKE_MOBILE, 3, 0x0000007c),
3987 INTEL_CPU_DESC(INTEL_FAM6_SKYLAKE_DESKTOP, 3, 0x0000007c),
3988 INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE_DESKTOP, 9, 0x0000004e),
3989 INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE_MOBILE, 9, 0x0000004e),
3990 INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE_MOBILE, 10, 0x0000004e),
3991 INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE_MOBILE, 11, 0x0000004e),
3992 INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE_MOBILE, 12, 0x0000004e),
3993 INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE_DESKTOP, 10, 0x0000004e),
3994 INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE_DESKTOP, 11, 0x0000004e),
3995 INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE_DESKTOP, 12, 0x0000004e),
3996 INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE_DESKTOP, 13, 0x0000004e),
3997 {}
3998};
3999
4000static void intel_check_pebs_isolation(void)
4001{
4002 x86_pmu.pebs_no_isolation = !x86_cpu_has_min_microcode_rev(isolation_ucodes);
4003}
4004
4005static __init void intel_pebs_isolation_quirk(void)
4006{
4007 WARN_ON_ONCE(x86_pmu.check_microcode);
4008 x86_pmu.check_microcode = intel_check_pebs_isolation;
4009 intel_check_pebs_isolation();
4010}
4011
4012static const struct x86_cpu_desc pebs_ucodes[] = {
4013 INTEL_CPU_DESC(INTEL_FAM6_SANDYBRIDGE, 7, 0x00000028),
4014 INTEL_CPU_DESC(INTEL_FAM6_SANDYBRIDGE_X, 6, 0x00000618),
4015 INTEL_CPU_DESC(INTEL_FAM6_SANDYBRIDGE_X, 7, 0x0000070c),
4016 {}
4017};
4018
4019static bool intel_snb_pebs_broken(void)
4020{
4021 return !x86_cpu_has_min_microcode_rev(pebs_ucodes);
4022}
4023
4024static void intel_snb_check_microcode(void)
4025{
4026 if (intel_snb_pebs_broken() == x86_pmu.pebs_broken)
4027 return;
4028
4029
4030
4031
4032 if (x86_pmu.pebs_broken) {
4033 pr_info("PEBS enabled due to microcode update\n");
4034 x86_pmu.pebs_broken = 0;
4035 } else {
4036 pr_info("PEBS disabled due to CPU errata, please upgrade microcode\n");
4037 x86_pmu.pebs_broken = 1;
4038 }
4039}
4040
4041static bool is_lbr_from(unsigned long msr)
4042{
4043 unsigned long lbr_from_nr = x86_pmu.lbr_from + x86_pmu.lbr_nr;
4044
4045 return x86_pmu.lbr_from <= msr && msr < lbr_from_nr;
4046}
4047
4048
4049
4050
4051
4052static bool check_msr(unsigned long msr, u64 mask)
4053{
4054 u64 val_old, val_new, val_tmp;
4055
4056
4057
4058
4059
4060 if (!boot_cpu_has(X86_FEATURE_HYPERVISOR))
4061 return true;
4062
4063
4064
4065
4066
4067
4068 if (rdmsrl_safe(msr, &val_old))
4069 return false;
4070
4071
4072
4073
4074 val_tmp = val_old ^ mask;
4075
4076 if (is_lbr_from(msr))
4077 val_tmp = lbr_from_signext_quirk_wr(val_tmp);
4078
4079 if (wrmsrl_safe(msr, val_tmp) ||
4080 rdmsrl_safe(msr, &val_new))
4081 return false;
4082
4083
4084
4085
4086
4087 if (val_new != val_tmp)
4088 return false;
4089
4090 if (is_lbr_from(msr))
4091 val_old = lbr_from_signext_quirk_wr(val_old);
4092
4093
4094
4095
4096 wrmsrl(msr, val_old);
4097
4098 return true;
4099}
4100
4101static __init void intel_sandybridge_quirk(void)
4102{
4103 x86_pmu.check_microcode = intel_snb_check_microcode;
4104 cpus_read_lock();
4105 intel_snb_check_microcode();
4106 cpus_read_unlock();
4107}
4108
4109static const struct { int id; char *name; } intel_arch_events_map[] __initconst = {
4110 { PERF_COUNT_HW_CPU_CYCLES, "cpu cycles" },
4111 { PERF_COUNT_HW_INSTRUCTIONS, "instructions" },
4112 { PERF_COUNT_HW_BUS_CYCLES, "bus cycles" },
4113 { PERF_COUNT_HW_CACHE_REFERENCES, "cache references" },
4114 { PERF_COUNT_HW_CACHE_MISSES, "cache misses" },
4115 { PERF_COUNT_HW_BRANCH_INSTRUCTIONS, "branch instructions" },
4116 { PERF_COUNT_HW_BRANCH_MISSES, "branch misses" },
4117};
4118
4119static __init void intel_arch_events_quirk(void)
4120{
4121 int bit;
4122
4123
4124 for_each_set_bit(bit, x86_pmu.events_mask, ARRAY_SIZE(intel_arch_events_map)) {
4125 intel_perfmon_event_map[intel_arch_events_map[bit].id] = 0;
4126 pr_warn("CPUID marked event: \'%s\' unavailable\n",
4127 intel_arch_events_map[bit].name);
4128 }
4129}
4130
4131static __init void intel_nehalem_quirk(void)
4132{
4133 union cpuid10_ebx ebx;
4134
4135 ebx.full = x86_pmu.events_maskl;
4136 if (ebx.split.no_branch_misses_retired) {
4137
4138
4139
4140
4141
4142
4143 intel_perfmon_event_map[PERF_COUNT_HW_BRANCH_MISSES] = 0x7f89;
4144 ebx.split.no_branch_misses_retired = 0;
4145 x86_pmu.events_maskl = ebx.full;
4146 pr_info("CPU erratum AAJ80 worked around\n");
4147 }
4148}
4149
4150static const struct x86_cpu_desc counter_freezing_ucodes[] = {
4151 INTEL_CPU_DESC(INTEL_FAM6_ATOM_GOLDMONT, 2, 0x0000000e),
4152 INTEL_CPU_DESC(INTEL_FAM6_ATOM_GOLDMONT, 9, 0x0000002e),
4153 INTEL_CPU_DESC(INTEL_FAM6_ATOM_GOLDMONT, 10, 0x00000008),
4154 INTEL_CPU_DESC(INTEL_FAM6_ATOM_GOLDMONT_X, 1, 0x00000028),
4155 INTEL_CPU_DESC(INTEL_FAM6_ATOM_GOLDMONT_PLUS, 1, 0x00000028),
4156 INTEL_CPU_DESC(INTEL_FAM6_ATOM_GOLDMONT_PLUS, 8, 0x00000006),
4157 {}
4158};
4159
4160static bool intel_counter_freezing_broken(void)
4161{
4162 return !x86_cpu_has_min_microcode_rev(counter_freezing_ucodes);
4163}
4164
4165static __init void intel_counter_freezing_quirk(void)
4166{
4167
4168 if (disable_counter_freezing)
4169 return;
4170
4171
4172
4173
4174
4175 if (intel_counter_freezing_broken()) {
4176 pr_info("PMU counter freezing disabled due to CPU errata,"
4177 "please upgrade microcode\n");
4178 x86_pmu.counter_freezing = false;
4179 x86_pmu.handle_irq = intel_pmu_handle_irq;
4180 }
4181}
4182
4183
4184
4185
4186
4187
4188
4189
4190
4191
4192
4193
4194
4195static __init void intel_ht_bug(void)
4196{
4197 x86_pmu.flags |= PMU_FL_EXCL_CNTRS | PMU_FL_EXCL_ENABLED;
4198
4199 x86_pmu.start_scheduling = intel_start_scheduling;
4200 x86_pmu.commit_scheduling = intel_commit_scheduling;
4201 x86_pmu.stop_scheduling = intel_stop_scheduling;
4202}
4203
4204EVENT_ATTR_STR(mem-loads, mem_ld_hsw, "event=0xcd,umask=0x1,ldlat=3");
4205EVENT_ATTR_STR(mem-stores, mem_st_hsw, "event=0xd0,umask=0x82")
4206
4207
4208EVENT_ATTR_STR(tx-start, tx_start, "event=0xc9,umask=0x1");
4209EVENT_ATTR_STR(tx-commit, tx_commit, "event=0xc9,umask=0x2");
4210EVENT_ATTR_STR(tx-abort, tx_abort, "event=0xc9,umask=0x4");
4211EVENT_ATTR_STR(tx-capacity, tx_capacity, "event=0x54,umask=0x2");
4212EVENT_ATTR_STR(tx-conflict, tx_conflict, "event=0x54,umask=0x1");
4213EVENT_ATTR_STR(el-start, el_start, "event=0xc8,umask=0x1");
4214EVENT_ATTR_STR(el-commit, el_commit, "event=0xc8,umask=0x2");
4215EVENT_ATTR_STR(el-abort, el_abort, "event=0xc8,umask=0x4");
4216EVENT_ATTR_STR(el-capacity, el_capacity, "event=0x54,umask=0x2");
4217EVENT_ATTR_STR(el-conflict, el_conflict, "event=0x54,umask=0x1");
4218EVENT_ATTR_STR(cycles-t, cycles_t, "event=0x3c,in_tx=1");
4219EVENT_ATTR_STR(cycles-ct, cycles_ct, "event=0x3c,in_tx=1,in_tx_cp=1");
4220
4221static struct attribute *hsw_events_attrs[] = {
4222 EVENT_PTR(td_slots_issued),
4223 EVENT_PTR(td_slots_retired),
4224 EVENT_PTR(td_fetch_bubbles),
4225 EVENT_PTR(td_total_slots),
4226 EVENT_PTR(td_total_slots_scale),
4227 EVENT_PTR(td_recovery_bubbles),
4228 EVENT_PTR(td_recovery_bubbles_scale),
4229 NULL
4230};
4231
4232static struct attribute *hsw_mem_events_attrs[] = {
4233 EVENT_PTR(mem_ld_hsw),
4234 EVENT_PTR(mem_st_hsw),
4235 NULL,
4236};
4237
4238static struct attribute *hsw_tsx_events_attrs[] = {
4239 EVENT_PTR(tx_start),
4240 EVENT_PTR(tx_commit),
4241 EVENT_PTR(tx_abort),
4242 EVENT_PTR(tx_capacity),
4243 EVENT_PTR(tx_conflict),
4244 EVENT_PTR(el_start),
4245 EVENT_PTR(el_commit),
4246 EVENT_PTR(el_abort),
4247 EVENT_PTR(el_capacity),
4248 EVENT_PTR(el_conflict),
4249 EVENT_PTR(cycles_t),
4250 EVENT_PTR(cycles_ct),
4251 NULL
4252};
4253
4254EVENT_ATTR_STR(tx-capacity-read, tx_capacity_read, "event=0x54,umask=0x80");
4255EVENT_ATTR_STR(tx-capacity-write, tx_capacity_write, "event=0x54,umask=0x2");
4256EVENT_ATTR_STR(el-capacity-read, el_capacity_read, "event=0x54,umask=0x80");
4257EVENT_ATTR_STR(el-capacity-write, el_capacity_write, "event=0x54,umask=0x2");
4258
4259static struct attribute *icl_events_attrs[] = {
4260 EVENT_PTR(mem_ld_hsw),
4261 EVENT_PTR(mem_st_hsw),
4262 NULL,
4263};
4264
4265static struct attribute *icl_tsx_events_attrs[] = {
4266 EVENT_PTR(tx_start),
4267 EVENT_PTR(tx_abort),
4268 EVENT_PTR(tx_commit),
4269 EVENT_PTR(tx_capacity_read),
4270 EVENT_PTR(tx_capacity_write),
4271 EVENT_PTR(tx_conflict),
4272 EVENT_PTR(el_start),
4273 EVENT_PTR(el_abort),
4274 EVENT_PTR(el_commit),
4275 EVENT_PTR(el_capacity_read),
4276 EVENT_PTR(el_capacity_write),
4277 EVENT_PTR(el_conflict),
4278 EVENT_PTR(cycles_t),
4279 EVENT_PTR(cycles_ct),
4280 NULL,
4281};
4282
4283static ssize_t freeze_on_smi_show(struct device *cdev,
4284 struct device_attribute *attr,
4285 char *buf)
4286{
4287 return sprintf(buf, "%lu\n", x86_pmu.attr_freeze_on_smi);
4288}
4289
4290static DEFINE_MUTEX(freeze_on_smi_mutex);
4291
4292static ssize_t freeze_on_smi_store(struct device *cdev,
4293 struct device_attribute *attr,
4294 const char *buf, size_t count)
4295{
4296 unsigned long val;
4297 ssize_t ret;
4298
4299 ret = kstrtoul(buf, 0, &val);
4300 if (ret)
4301 return ret;
4302
4303 if (val > 1)
4304 return -EINVAL;
4305
4306 mutex_lock(&freeze_on_smi_mutex);
4307
4308 if (x86_pmu.attr_freeze_on_smi == val)
4309 goto done;
4310
4311 x86_pmu.attr_freeze_on_smi = val;
4312
4313 get_online_cpus();
4314 on_each_cpu(flip_smm_bit, &val, 1);
4315 put_online_cpus();
4316done:
4317 mutex_unlock(&freeze_on_smi_mutex);
4318
4319 return count;
4320}
4321
4322static void update_tfa_sched(void *ignored)
4323{
4324 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
4325
4326
4327
4328
4329
4330 if (test_bit(3, cpuc->active_mask))
4331 perf_pmu_resched(x86_get_pmu());
4332}
4333
4334static ssize_t show_sysctl_tfa(struct device *cdev,
4335 struct device_attribute *attr,
4336 char *buf)
4337{
4338 return snprintf(buf, 40, "%d\n", allow_tsx_force_abort);
4339}
4340
4341static ssize_t set_sysctl_tfa(struct device *cdev,
4342 struct device_attribute *attr,
4343 const char *buf, size_t count)
4344{
4345 bool val;
4346 ssize_t ret;
4347
4348 ret = kstrtobool(buf, &val);
4349 if (ret)
4350 return ret;
4351
4352
4353 if (val == allow_tsx_force_abort)
4354 return count;
4355
4356 allow_tsx_force_abort = val;
4357
4358 get_online_cpus();
4359 on_each_cpu(update_tfa_sched, NULL, 1);
4360 put_online_cpus();
4361
4362 return count;
4363}
4364
4365
4366static DEVICE_ATTR_RW(freeze_on_smi);
4367
4368static ssize_t branches_show(struct device *cdev,
4369 struct device_attribute *attr,
4370 char *buf)
4371{
4372 return snprintf(buf, PAGE_SIZE, "%d\n", x86_pmu.lbr_nr);
4373}
4374
4375static DEVICE_ATTR_RO(branches);
4376
4377static struct attribute *lbr_attrs[] = {
4378 &dev_attr_branches.attr,
4379 NULL
4380};
4381
4382static char pmu_name_str[30];
4383
4384static ssize_t pmu_name_show(struct device *cdev,
4385 struct device_attribute *attr,
4386 char *buf)
4387{
4388 return snprintf(buf, PAGE_SIZE, "%s\n", pmu_name_str);
4389}
4390
4391static DEVICE_ATTR_RO(pmu_name);
4392
4393static struct attribute *intel_pmu_caps_attrs[] = {
4394 &dev_attr_pmu_name.attr,
4395 NULL
4396};
4397
4398static DEVICE_ATTR(allow_tsx_force_abort, 0644,
4399 show_sysctl_tfa,
4400 set_sysctl_tfa);
4401
4402static struct attribute *intel_pmu_attrs[] = {
4403 &dev_attr_freeze_on_smi.attr,
4404 &dev_attr_allow_tsx_force_abort.attr,
4405 NULL,
4406};
4407
4408static umode_t
4409tsx_is_visible(struct kobject *kobj, struct attribute *attr, int i)
4410{
4411 return boot_cpu_has(X86_FEATURE_RTM) ? attr->mode : 0;
4412}
4413
4414static umode_t
4415pebs_is_visible(struct kobject *kobj, struct attribute *attr, int i)
4416{
4417 return x86_pmu.pebs ? attr->mode : 0;
4418}
4419
4420static umode_t
4421lbr_is_visible(struct kobject *kobj, struct attribute *attr, int i)
4422{
4423 return x86_pmu.lbr_nr ? attr->mode : 0;
4424}
4425
4426static umode_t
4427exra_is_visible(struct kobject *kobj, struct attribute *attr, int i)
4428{
4429 return x86_pmu.version >= 2 ? attr->mode : 0;
4430}
4431
4432static umode_t
4433default_is_visible(struct kobject *kobj, struct attribute *attr, int i)
4434{
4435 if (attr == &dev_attr_allow_tsx_force_abort.attr)
4436 return x86_pmu.flags & PMU_FL_TFA ? attr->mode : 0;
4437
4438 return attr->mode;
4439}
4440
4441static struct attribute_group group_events_td = {
4442 .name = "events",
4443};
4444
4445static struct attribute_group group_events_mem = {
4446 .name = "events",
4447 .is_visible = pebs_is_visible,
4448};
4449
4450static struct attribute_group group_events_tsx = {
4451 .name = "events",
4452 .is_visible = tsx_is_visible,
4453};
4454
4455static struct attribute_group group_caps_gen = {
4456 .name = "caps",
4457 .attrs = intel_pmu_caps_attrs,
4458};
4459
4460static struct attribute_group group_caps_lbr = {
4461 .name = "caps",
4462 .attrs = lbr_attrs,
4463 .is_visible = lbr_is_visible,
4464};
4465
4466static struct attribute_group group_format_extra = {
4467 .name = "format",
4468 .is_visible = exra_is_visible,
4469};
4470
4471static struct attribute_group group_format_extra_skl = {
4472 .name = "format",
4473 .is_visible = exra_is_visible,
4474};
4475
4476static struct attribute_group group_default = {
4477 .attrs = intel_pmu_attrs,
4478 .is_visible = default_is_visible,
4479};
4480
4481static const struct attribute_group *attr_update[] = {
4482 &group_events_td,
4483 &group_events_mem,
4484 &group_events_tsx,
4485 &group_caps_gen,
4486 &group_caps_lbr,
4487 &group_format_extra,
4488 &group_format_extra_skl,
4489 &group_default,
4490 NULL,
4491};
4492
4493static struct attribute *empty_attrs;
4494
4495__init int intel_pmu_init(void)
4496{
4497 struct attribute **extra_skl_attr = &empty_attrs;
4498 struct attribute **extra_attr = &empty_attrs;
4499 struct attribute **td_attr = &empty_attrs;
4500 struct attribute **mem_attr = &empty_attrs;
4501 struct attribute **tsx_attr = &empty_attrs;
4502 union cpuid10_edx edx;
4503 union cpuid10_eax eax;
4504 union cpuid10_ebx ebx;
4505 struct event_constraint *c;
4506 unsigned int unused;
4507 struct extra_reg *er;
4508 bool pmem = false;
4509 int version, i;
4510 char *name;
4511
4512 if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) {
4513 switch (boot_cpu_data.x86) {
4514 case 0x6:
4515 return p6_pmu_init();
4516 case 0xb:
4517 return knc_pmu_init();
4518 case 0xf:
4519 return p4_pmu_init();
4520 }
4521 return -ENODEV;
4522 }
4523
4524
4525
4526
4527
4528 cpuid(10, &eax.full, &ebx.full, &unused, &edx.full);
4529 if (eax.split.mask_length < ARCH_PERFMON_EVENTS_COUNT)
4530 return -ENODEV;
4531
4532 version = eax.split.version_id;
4533 if (version < 2)
4534 x86_pmu = core_pmu;
4535 else
4536 x86_pmu = intel_pmu;
4537
4538 x86_pmu.version = version;
4539 x86_pmu.num_counters = eax.split.num_counters;
4540 x86_pmu.cntval_bits = eax.split.bit_width;
4541 x86_pmu.cntval_mask = (1ULL << eax.split.bit_width) - 1;
4542
4543 x86_pmu.events_maskl = ebx.full;
4544 x86_pmu.events_mask_len = eax.split.mask_length;
4545
4546 x86_pmu.max_pebs_events = min_t(unsigned, MAX_PEBS_EVENTS, x86_pmu.num_counters);
4547
4548
4549
4550
4551
4552 if (version > 1) {
4553 int assume = 3 * !boot_cpu_has(X86_FEATURE_HYPERVISOR);
4554
4555 x86_pmu.num_counters_fixed =
4556 max((int)edx.split.num_counters_fixed, assume);
4557 }
4558
4559 if (version >= 4)
4560 x86_pmu.counter_freezing = !disable_counter_freezing;
4561
4562 if (boot_cpu_has(X86_FEATURE_PDCM)) {
4563 u64 capabilities;
4564
4565 rdmsrl(MSR_IA32_PERF_CAPABILITIES, capabilities);
4566 x86_pmu.intel_cap.capabilities = capabilities;
4567 }
4568
4569 intel_ds_init();
4570
4571 x86_add_quirk(intel_arch_events_quirk);
4572
4573
4574
4575
4576 switch (boot_cpu_data.x86_model) {
4577 case INTEL_FAM6_CORE_YONAH:
4578 pr_cont("Core events, ");
4579 name = "core";
4580 break;
4581
4582 case INTEL_FAM6_CORE2_MEROM:
4583 x86_add_quirk(intel_clovertown_quirk);
4584
4585
4586 case INTEL_FAM6_CORE2_MEROM_L:
4587 case INTEL_FAM6_CORE2_PENRYN:
4588 case INTEL_FAM6_CORE2_DUNNINGTON:
4589 memcpy(hw_cache_event_ids, core2_hw_cache_event_ids,
4590 sizeof(hw_cache_event_ids));
4591
4592 intel_pmu_lbr_init_core();
4593
4594 x86_pmu.event_constraints = intel_core2_event_constraints;
4595 x86_pmu.pebs_constraints = intel_core2_pebs_event_constraints;
4596 pr_cont("Core2 events, ");
4597 name = "core2";
4598 break;
4599
4600 case INTEL_FAM6_NEHALEM:
4601 case INTEL_FAM6_NEHALEM_EP:
4602 case INTEL_FAM6_NEHALEM_EX:
4603 memcpy(hw_cache_event_ids, nehalem_hw_cache_event_ids,
4604 sizeof(hw_cache_event_ids));
4605 memcpy(hw_cache_extra_regs, nehalem_hw_cache_extra_regs,
4606 sizeof(hw_cache_extra_regs));
4607
4608 intel_pmu_lbr_init_nhm();
4609
4610 x86_pmu.event_constraints = intel_nehalem_event_constraints;
4611 x86_pmu.pebs_constraints = intel_nehalem_pebs_event_constraints;
4612 x86_pmu.enable_all = intel_pmu_nhm_enable_all;
4613 x86_pmu.extra_regs = intel_nehalem_extra_regs;
4614 x86_pmu.limit_period = nhm_limit_period;
4615
4616 mem_attr = nhm_mem_events_attrs;
4617
4618
4619 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] =
4620 X86_CONFIG(.event=0x0e, .umask=0x01, .inv=1, .cmask=1);
4621
4622 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] =
4623 X86_CONFIG(.event=0xb1, .umask=0x3f, .inv=1, .cmask=1);
4624
4625 intel_pmu_pebs_data_source_nhm();
4626 x86_add_quirk(intel_nehalem_quirk);
4627 x86_pmu.pebs_no_tlb = 1;
4628 extra_attr = nhm_format_attr;
4629
4630 pr_cont("Nehalem events, ");
4631 name = "nehalem";
4632 break;
4633
4634 case INTEL_FAM6_ATOM_BONNELL:
4635 case INTEL_FAM6_ATOM_BONNELL_MID:
4636 case INTEL_FAM6_ATOM_SALTWELL:
4637 case INTEL_FAM6_ATOM_SALTWELL_MID:
4638 case INTEL_FAM6_ATOM_SALTWELL_TABLET:
4639 memcpy(hw_cache_event_ids, atom_hw_cache_event_ids,
4640 sizeof(hw_cache_event_ids));
4641
4642 intel_pmu_lbr_init_atom();
4643
4644 x86_pmu.event_constraints = intel_gen_event_constraints;
4645 x86_pmu.pebs_constraints = intel_atom_pebs_event_constraints;
4646 x86_pmu.pebs_aliases = intel_pebs_aliases_core2;
4647 pr_cont("Atom events, ");
4648 name = "bonnell";
4649 break;
4650
4651 case INTEL_FAM6_ATOM_SILVERMONT:
4652 case INTEL_FAM6_ATOM_SILVERMONT_X:
4653 case INTEL_FAM6_ATOM_SILVERMONT_MID:
4654 case INTEL_FAM6_ATOM_AIRMONT:
4655 case INTEL_FAM6_ATOM_AIRMONT_MID:
4656 memcpy(hw_cache_event_ids, slm_hw_cache_event_ids,
4657 sizeof(hw_cache_event_ids));
4658 memcpy(hw_cache_extra_regs, slm_hw_cache_extra_regs,
4659 sizeof(hw_cache_extra_regs));
4660
4661 intel_pmu_lbr_init_slm();
4662
4663 x86_pmu.event_constraints = intel_slm_event_constraints;
4664 x86_pmu.pebs_constraints = intel_slm_pebs_event_constraints;
4665 x86_pmu.extra_regs = intel_slm_extra_regs;
4666 x86_pmu.flags |= PMU_FL_HAS_RSP_1;
4667 td_attr = slm_events_attrs;
4668 extra_attr = slm_format_attr;
4669 pr_cont("Silvermont events, ");
4670 name = "silvermont";
4671 break;
4672
4673 case INTEL_FAM6_ATOM_GOLDMONT:
4674 case INTEL_FAM6_ATOM_GOLDMONT_X:
4675 x86_add_quirk(intel_counter_freezing_quirk);
4676 memcpy(hw_cache_event_ids, glm_hw_cache_event_ids,
4677 sizeof(hw_cache_event_ids));
4678 memcpy(hw_cache_extra_regs, glm_hw_cache_extra_regs,
4679 sizeof(hw_cache_extra_regs));
4680
4681 intel_pmu_lbr_init_skl();
4682
4683 x86_pmu.event_constraints = intel_slm_event_constraints;
4684 x86_pmu.pebs_constraints = intel_glm_pebs_event_constraints;
4685 x86_pmu.extra_regs = intel_glm_extra_regs;
4686
4687
4688
4689
4690
4691 x86_pmu.pebs_aliases = NULL;
4692 x86_pmu.pebs_prec_dist = true;
4693 x86_pmu.lbr_pt_coexist = true;
4694 x86_pmu.flags |= PMU_FL_HAS_RSP_1;
4695 td_attr = glm_events_attrs;
4696 extra_attr = slm_format_attr;
4697 pr_cont("Goldmont events, ");
4698 name = "goldmont";
4699 break;
4700
4701 case INTEL_FAM6_ATOM_GOLDMONT_PLUS:
4702 x86_add_quirk(intel_counter_freezing_quirk);
4703 memcpy(hw_cache_event_ids, glp_hw_cache_event_ids,
4704 sizeof(hw_cache_event_ids));
4705 memcpy(hw_cache_extra_regs, glp_hw_cache_extra_regs,
4706 sizeof(hw_cache_extra_regs));
4707
4708 intel_pmu_lbr_init_skl();
4709
4710 x86_pmu.event_constraints = intel_slm_event_constraints;
4711 x86_pmu.extra_regs = intel_glm_extra_regs;
4712
4713
4714
4715
4716 x86_pmu.pebs_aliases = NULL;
4717 x86_pmu.pebs_prec_dist = true;
4718 x86_pmu.lbr_pt_coexist = true;
4719 x86_pmu.flags |= PMU_FL_HAS_RSP_1;
4720 x86_pmu.flags |= PMU_FL_PEBS_ALL;
4721 x86_pmu.get_event_constraints = glp_get_event_constraints;
4722 td_attr = glm_events_attrs;
4723
4724 event_attr_td_total_slots_scale_glm.event_str = "4";
4725 extra_attr = slm_format_attr;
4726 pr_cont("Goldmont plus events, ");
4727 name = "goldmont_plus";
4728 break;
4729
4730 case INTEL_FAM6_ATOM_TREMONT_X:
4731 x86_pmu.late_ack = true;
4732 memcpy(hw_cache_event_ids, glp_hw_cache_event_ids,
4733 sizeof(hw_cache_event_ids));
4734 memcpy(hw_cache_extra_regs, tnt_hw_cache_extra_regs,
4735 sizeof(hw_cache_extra_regs));
4736 hw_cache_event_ids[C(ITLB)][C(OP_READ)][C(RESULT_ACCESS)] = -1;
4737
4738 intel_pmu_lbr_init_skl();
4739
4740 x86_pmu.event_constraints = intel_slm_event_constraints;
4741 x86_pmu.extra_regs = intel_tnt_extra_regs;
4742
4743
4744
4745
4746 x86_pmu.pebs_aliases = NULL;
4747 x86_pmu.pebs_prec_dist = true;
4748 x86_pmu.lbr_pt_coexist = true;
4749 x86_pmu.flags |= PMU_FL_HAS_RSP_1;
4750 x86_pmu.get_event_constraints = tnt_get_event_constraints;
4751 extra_attr = slm_format_attr;
4752 pr_cont("Tremont events, ");
4753 name = "Tremont";
4754 break;
4755
4756 case INTEL_FAM6_WESTMERE:
4757 case INTEL_FAM6_WESTMERE_EP:
4758 case INTEL_FAM6_WESTMERE_EX:
4759 memcpy(hw_cache_event_ids, westmere_hw_cache_event_ids,
4760 sizeof(hw_cache_event_ids));
4761 memcpy(hw_cache_extra_regs, nehalem_hw_cache_extra_regs,
4762 sizeof(hw_cache_extra_regs));
4763
4764 intel_pmu_lbr_init_nhm();
4765
4766 x86_pmu.event_constraints = intel_westmere_event_constraints;
4767 x86_pmu.enable_all = intel_pmu_nhm_enable_all;
4768 x86_pmu.pebs_constraints = intel_westmere_pebs_event_constraints;
4769 x86_pmu.extra_regs = intel_westmere_extra_regs;
4770 x86_pmu.flags |= PMU_FL_HAS_RSP_1;
4771
4772 mem_attr = nhm_mem_events_attrs;
4773
4774
4775 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] =
4776 X86_CONFIG(.event=0x0e, .umask=0x01, .inv=1, .cmask=1);
4777
4778 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] =
4779 X86_CONFIG(.event=0xb1, .umask=0x3f, .inv=1, .cmask=1);
4780
4781 intel_pmu_pebs_data_source_nhm();
4782 extra_attr = nhm_format_attr;
4783 pr_cont("Westmere events, ");
4784 name = "westmere";
4785 break;
4786
4787 case INTEL_FAM6_SANDYBRIDGE:
4788 case INTEL_FAM6_SANDYBRIDGE_X:
4789 x86_add_quirk(intel_sandybridge_quirk);
4790 x86_add_quirk(intel_ht_bug);
4791 memcpy(hw_cache_event_ids, snb_hw_cache_event_ids,
4792 sizeof(hw_cache_event_ids));
4793 memcpy(hw_cache_extra_regs, snb_hw_cache_extra_regs,
4794 sizeof(hw_cache_extra_regs));
4795
4796 intel_pmu_lbr_init_snb();
4797
4798 x86_pmu.event_constraints = intel_snb_event_constraints;
4799 x86_pmu.pebs_constraints = intel_snb_pebs_event_constraints;
4800 x86_pmu.pebs_aliases = intel_pebs_aliases_snb;
4801 if (boot_cpu_data.x86_model == INTEL_FAM6_SANDYBRIDGE_X)
4802 x86_pmu.extra_regs = intel_snbep_extra_regs;
4803 else
4804 x86_pmu.extra_regs = intel_snb_extra_regs;
4805
4806
4807
4808 x86_pmu.flags |= PMU_FL_HAS_RSP_1;
4809 x86_pmu.flags |= PMU_FL_NO_HT_SHARING;
4810
4811 td_attr = snb_events_attrs;
4812 mem_attr = snb_mem_events_attrs;
4813
4814
4815 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] =
4816 X86_CONFIG(.event=0x0e, .umask=0x01, .inv=1, .cmask=1);
4817
4818 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] =
4819 X86_CONFIG(.event=0xb1, .umask=0x01, .inv=1, .cmask=1);
4820
4821 extra_attr = nhm_format_attr;
4822
4823 pr_cont("SandyBridge events, ");
4824 name = "sandybridge";
4825 break;
4826
4827 case INTEL_FAM6_IVYBRIDGE:
4828 case INTEL_FAM6_IVYBRIDGE_X:
4829 x86_add_quirk(intel_ht_bug);
4830 memcpy(hw_cache_event_ids, snb_hw_cache_event_ids,
4831 sizeof(hw_cache_event_ids));
4832
4833 hw_cache_event_ids[C(DTLB)][C(OP_READ)][C(RESULT_MISS)] = 0x8108;
4834
4835 memcpy(hw_cache_extra_regs, snb_hw_cache_extra_regs,
4836 sizeof(hw_cache_extra_regs));
4837
4838 intel_pmu_lbr_init_snb();
4839
4840 x86_pmu.event_constraints = intel_ivb_event_constraints;
4841 x86_pmu.pebs_constraints = intel_ivb_pebs_event_constraints;
4842 x86_pmu.pebs_aliases = intel_pebs_aliases_ivb;
4843 x86_pmu.pebs_prec_dist = true;
4844 if (boot_cpu_data.x86_model == INTEL_FAM6_IVYBRIDGE_X)
4845 x86_pmu.extra_regs = intel_snbep_extra_regs;
4846 else
4847 x86_pmu.extra_regs = intel_snb_extra_regs;
4848
4849 x86_pmu.flags |= PMU_FL_HAS_RSP_1;
4850 x86_pmu.flags |= PMU_FL_NO_HT_SHARING;
4851
4852 td_attr = snb_events_attrs;
4853 mem_attr = snb_mem_events_attrs;
4854
4855
4856 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] =
4857 X86_CONFIG(.event=0x0e, .umask=0x01, .inv=1, .cmask=1);
4858
4859 extra_attr = nhm_format_attr;
4860
4861 pr_cont("IvyBridge events, ");
4862 name = "ivybridge";
4863 break;
4864
4865
4866 case INTEL_FAM6_HASWELL_CORE:
4867 case INTEL_FAM6_HASWELL_X:
4868 case INTEL_FAM6_HASWELL_ULT:
4869 case INTEL_FAM6_HASWELL_GT3E:
4870 x86_add_quirk(intel_ht_bug);
4871 x86_add_quirk(intel_pebs_isolation_quirk);
4872 x86_pmu.late_ack = true;
4873 memcpy(hw_cache_event_ids, hsw_hw_cache_event_ids, sizeof(hw_cache_event_ids));
4874 memcpy(hw_cache_extra_regs, hsw_hw_cache_extra_regs, sizeof(hw_cache_extra_regs));
4875
4876 intel_pmu_lbr_init_hsw();
4877
4878 x86_pmu.event_constraints = intel_hsw_event_constraints;
4879 x86_pmu.pebs_constraints = intel_hsw_pebs_event_constraints;
4880 x86_pmu.extra_regs = intel_snbep_extra_regs;
4881 x86_pmu.pebs_aliases = intel_pebs_aliases_ivb;
4882 x86_pmu.pebs_prec_dist = true;
4883
4884 x86_pmu.flags |= PMU_FL_HAS_RSP_1;
4885 x86_pmu.flags |= PMU_FL_NO_HT_SHARING;
4886
4887 x86_pmu.hw_config = hsw_hw_config;
4888 x86_pmu.get_event_constraints = hsw_get_event_constraints;
4889 x86_pmu.lbr_double_abort = true;
4890 extra_attr = boot_cpu_has(X86_FEATURE_RTM) ?
4891 hsw_format_attr : nhm_format_attr;
4892 td_attr = hsw_events_attrs;
4893 mem_attr = hsw_mem_events_attrs;
4894 tsx_attr = hsw_tsx_events_attrs;
4895 pr_cont("Haswell events, ");
4896 name = "haswell";
4897 break;
4898
4899 case INTEL_FAM6_BROADWELL_CORE:
4900 case INTEL_FAM6_BROADWELL_XEON_D:
4901 case INTEL_FAM6_BROADWELL_GT3E:
4902 case INTEL_FAM6_BROADWELL_X:
4903 x86_add_quirk(intel_pebs_isolation_quirk);
4904 x86_pmu.late_ack = true;
4905 memcpy(hw_cache_event_ids, hsw_hw_cache_event_ids, sizeof(hw_cache_event_ids));
4906 memcpy(hw_cache_extra_regs, hsw_hw_cache_extra_regs, sizeof(hw_cache_extra_regs));
4907
4908
4909 hw_cache_extra_regs[C(LL)][C(OP_READ)][C(RESULT_MISS)] = HSW_DEMAND_READ |
4910 BDW_L3_MISS|HSW_SNOOP_DRAM;
4911 hw_cache_extra_regs[C(LL)][C(OP_WRITE)][C(RESULT_MISS)] = HSW_DEMAND_WRITE|BDW_L3_MISS|
4912 HSW_SNOOP_DRAM;
4913 hw_cache_extra_regs[C(NODE)][C(OP_READ)][C(RESULT_ACCESS)] = HSW_DEMAND_READ|
4914 BDW_L3_MISS_LOCAL|HSW_SNOOP_DRAM;
4915 hw_cache_extra_regs[C(NODE)][C(OP_WRITE)][C(RESULT_ACCESS)] = HSW_DEMAND_WRITE|
4916 BDW_L3_MISS_LOCAL|HSW_SNOOP_DRAM;
4917
4918 intel_pmu_lbr_init_hsw();
4919
4920 x86_pmu.event_constraints = intel_bdw_event_constraints;
4921 x86_pmu.pebs_constraints = intel_bdw_pebs_event_constraints;
4922 x86_pmu.extra_regs = intel_snbep_extra_regs;
4923 x86_pmu.pebs_aliases = intel_pebs_aliases_ivb;
4924 x86_pmu.pebs_prec_dist = true;
4925
4926 x86_pmu.flags |= PMU_FL_HAS_RSP_1;
4927 x86_pmu.flags |= PMU_FL_NO_HT_SHARING;
4928
4929 x86_pmu.hw_config = hsw_hw_config;
4930 x86_pmu.get_event_constraints = hsw_get_event_constraints;
4931 x86_pmu.limit_period = bdw_limit_period;
4932 extra_attr = boot_cpu_has(X86_FEATURE_RTM) ?
4933 hsw_format_attr : nhm_format_attr;
4934 td_attr = hsw_events_attrs;
4935 mem_attr = hsw_mem_events_attrs;
4936 tsx_attr = hsw_tsx_events_attrs;
4937 pr_cont("Broadwell events, ");
4938 name = "broadwell";
4939 break;
4940
4941 case INTEL_FAM6_XEON_PHI_KNL:
4942 case INTEL_FAM6_XEON_PHI_KNM:
4943 memcpy(hw_cache_event_ids,
4944 slm_hw_cache_event_ids, sizeof(hw_cache_event_ids));
4945 memcpy(hw_cache_extra_regs,
4946 knl_hw_cache_extra_regs, sizeof(hw_cache_extra_regs));
4947 intel_pmu_lbr_init_knl();
4948
4949 x86_pmu.event_constraints = intel_slm_event_constraints;
4950 x86_pmu.pebs_constraints = intel_slm_pebs_event_constraints;
4951 x86_pmu.extra_regs = intel_knl_extra_regs;
4952
4953
4954 x86_pmu.flags |= PMU_FL_HAS_RSP_1;
4955 x86_pmu.flags |= PMU_FL_NO_HT_SHARING;
4956 extra_attr = slm_format_attr;
4957 pr_cont("Knights Landing/Mill events, ");
4958 name = "knights-landing";
4959 break;
4960
4961 case INTEL_FAM6_SKYLAKE_X:
4962 pmem = true;
4963
4964 case INTEL_FAM6_SKYLAKE_MOBILE:
4965 case INTEL_FAM6_SKYLAKE_DESKTOP:
4966 case INTEL_FAM6_KABYLAKE_MOBILE:
4967 case INTEL_FAM6_KABYLAKE_DESKTOP:
4968 x86_add_quirk(intel_pebs_isolation_quirk);
4969 x86_pmu.late_ack = true;
4970 memcpy(hw_cache_event_ids, skl_hw_cache_event_ids, sizeof(hw_cache_event_ids));
4971 memcpy(hw_cache_extra_regs, skl_hw_cache_extra_regs, sizeof(hw_cache_extra_regs));
4972 intel_pmu_lbr_init_skl();
4973
4974
4975 event_attr_td_recovery_bubbles.event_str_noht =
4976 "event=0xd,umask=0x1,cmask=1";
4977 event_attr_td_recovery_bubbles.event_str_ht =
4978 "event=0xd,umask=0x1,cmask=1,any=1";
4979
4980 x86_pmu.event_constraints = intel_skl_event_constraints;
4981 x86_pmu.pebs_constraints = intel_skl_pebs_event_constraints;
4982 x86_pmu.extra_regs = intel_skl_extra_regs;
4983 x86_pmu.pebs_aliases = intel_pebs_aliases_skl;
4984 x86_pmu.pebs_prec_dist = true;
4985
4986 x86_pmu.flags |= PMU_FL_HAS_RSP_1;
4987 x86_pmu.flags |= PMU_FL_NO_HT_SHARING;
4988
4989 x86_pmu.hw_config = hsw_hw_config;
4990 x86_pmu.get_event_constraints = hsw_get_event_constraints;
4991 extra_attr = boot_cpu_has(X86_FEATURE_RTM) ?
4992 hsw_format_attr : nhm_format_attr;
4993 extra_skl_attr = skl_format_attr;
4994 td_attr = hsw_events_attrs;
4995 mem_attr = hsw_mem_events_attrs;
4996 tsx_attr = hsw_tsx_events_attrs;
4997 intel_pmu_pebs_data_source_skl(pmem);
4998
4999 if (boot_cpu_has(X86_FEATURE_TSX_FORCE_ABORT)) {
5000 x86_pmu.flags |= PMU_FL_TFA;
5001 x86_pmu.get_event_constraints = tfa_get_event_constraints;
5002 x86_pmu.enable_all = intel_tfa_pmu_enable_all;
5003 x86_pmu.commit_scheduling = intel_tfa_commit_scheduling;
5004 }
5005
5006 pr_cont("Skylake events, ");
5007 name = "skylake";
5008 break;
5009
5010 case INTEL_FAM6_ICELAKE_X:
5011 case INTEL_FAM6_ICELAKE_XEON_D:
5012 pmem = true;
5013
5014 case INTEL_FAM6_ICELAKE_MOBILE:
5015 case INTEL_FAM6_ICELAKE_DESKTOP:
5016 x86_pmu.late_ack = true;
5017 memcpy(hw_cache_event_ids, skl_hw_cache_event_ids, sizeof(hw_cache_event_ids));
5018 memcpy(hw_cache_extra_regs, skl_hw_cache_extra_regs, sizeof(hw_cache_extra_regs));
5019 hw_cache_event_ids[C(ITLB)][C(OP_READ)][C(RESULT_ACCESS)] = -1;
5020 intel_pmu_lbr_init_skl();
5021
5022 x86_pmu.event_constraints = intel_icl_event_constraints;
5023 x86_pmu.pebs_constraints = intel_icl_pebs_event_constraints;
5024 x86_pmu.extra_regs = intel_icl_extra_regs;
5025 x86_pmu.pebs_aliases = NULL;
5026 x86_pmu.pebs_prec_dist = true;
5027 x86_pmu.flags |= PMU_FL_HAS_RSP_1;
5028 x86_pmu.flags |= PMU_FL_NO_HT_SHARING;
5029
5030 x86_pmu.hw_config = hsw_hw_config;
5031 x86_pmu.get_event_constraints = icl_get_event_constraints;
5032 extra_attr = boot_cpu_has(X86_FEATURE_RTM) ?
5033 hsw_format_attr : nhm_format_attr;
5034 extra_skl_attr = skl_format_attr;
5035 mem_attr = icl_events_attrs;
5036 tsx_attr = icl_tsx_events_attrs;
5037 x86_pmu.rtm_abort_event = X86_CONFIG(.event=0xca, .umask=0x02);
5038 x86_pmu.lbr_pt_coexist = true;
5039 intel_pmu_pebs_data_source_skl(pmem);
5040 pr_cont("Icelake events, ");
5041 name = "icelake";
5042 break;
5043
5044 default:
5045 switch (x86_pmu.version) {
5046 case 1:
5047 x86_pmu.event_constraints = intel_v1_event_constraints;
5048 pr_cont("generic architected perfmon v1, ");
5049 name = "generic_arch_v1";
5050 break;
5051 default:
5052
5053
5054
5055 x86_pmu.event_constraints = intel_gen_event_constraints;
5056 pr_cont("generic architected perfmon, ");
5057 name = "generic_arch_v2+";
5058 break;
5059 }
5060 }
5061
5062 snprintf(pmu_name_str, sizeof(pmu_name_str), "%s", name);
5063
5064
5065 group_events_td.attrs = td_attr;
5066 group_events_mem.attrs = mem_attr;
5067 group_events_tsx.attrs = tsx_attr;
5068 group_format_extra.attrs = extra_attr;
5069 group_format_extra_skl.attrs = extra_skl_attr;
5070
5071 x86_pmu.attr_update = attr_update;
5072
5073 if (x86_pmu.num_counters > INTEL_PMC_MAX_GENERIC) {
5074 WARN(1, KERN_ERR "hw perf events %d > max(%d), clipping!",
5075 x86_pmu.num_counters, INTEL_PMC_MAX_GENERIC);
5076 x86_pmu.num_counters = INTEL_PMC_MAX_GENERIC;
5077 }
5078 x86_pmu.intel_ctrl = (1ULL << x86_pmu.num_counters) - 1;
5079
5080 if (x86_pmu.num_counters_fixed > INTEL_PMC_MAX_FIXED) {
5081 WARN(1, KERN_ERR "hw perf events fixed %d > max(%d), clipping!",
5082 x86_pmu.num_counters_fixed, INTEL_PMC_MAX_FIXED);
5083 x86_pmu.num_counters_fixed = INTEL_PMC_MAX_FIXED;
5084 }
5085
5086 x86_pmu.intel_ctrl |=
5087 ((1LL << x86_pmu.num_counters_fixed)-1) << INTEL_PMC_IDX_FIXED;
5088
5089 if (x86_pmu.event_constraints) {
5090
5091
5092
5093
5094 for_each_event_constraint(c, x86_pmu.event_constraints) {
5095 if (c->cmask == FIXED_EVENT_FLAGS
5096 && c->idxmsk64 != INTEL_PMC_MSK_FIXED_REF_CYCLES) {
5097 c->idxmsk64 |= (1ULL << x86_pmu.num_counters) - 1;
5098 }
5099 c->idxmsk64 &=
5100 ~(~0ULL << (INTEL_PMC_IDX_FIXED + x86_pmu.num_counters_fixed));
5101 c->weight = hweight64(c->idxmsk64);
5102 }
5103 }
5104
5105
5106
5107
5108
5109
5110
5111 if (x86_pmu.lbr_nr && !check_msr(x86_pmu.lbr_tos, 0x3UL))
5112 x86_pmu.lbr_nr = 0;
5113 for (i = 0; i < x86_pmu.lbr_nr; i++) {
5114 if (!(check_msr(x86_pmu.lbr_from + i, 0xffffUL) &&
5115 check_msr(x86_pmu.lbr_to + i, 0xffffUL)))
5116 x86_pmu.lbr_nr = 0;
5117 }
5118
5119 if (x86_pmu.lbr_nr)
5120 pr_cont("%d-deep LBR, ", x86_pmu.lbr_nr);
5121
5122
5123
5124
5125
5126
5127 if (x86_pmu.extra_regs) {
5128 for (er = x86_pmu.extra_regs; er->msr; er++) {
5129 er->extra_msr_access = check_msr(er->msr, 0x11UL);
5130
5131 if ((er->idx == EXTRA_REG_LBR) && !er->extra_msr_access)
5132 x86_pmu.lbr_sel_map = NULL;
5133 }
5134 }
5135
5136
5137 if (x86_pmu.intel_cap.full_width_write) {
5138 x86_pmu.max_period = x86_pmu.cntval_mask >> 1;
5139 x86_pmu.perfctr = MSR_IA32_PMC0;
5140 pr_cont("full-width counters, ");
5141 }
5142
5143
5144
5145
5146
5147 if (x86_pmu.counter_freezing)
5148 x86_pmu.handle_irq = intel_pmu_handle_irq_v4;
5149
5150 return 0;
5151}
5152
5153
5154
5155
5156
5157
5158
5159static __init int fixup_ht_bug(void)
5160{
5161 int c;
5162
5163
5164
5165 if (!(x86_pmu.flags & PMU_FL_EXCL_ENABLED))
5166 return 0;
5167
5168 if (topology_max_smt_threads() > 1) {
5169 pr_info("PMU erratum BJ122, BV98, HSD29 worked around, HT is on\n");
5170 return 0;
5171 }
5172
5173 cpus_read_lock();
5174
5175 hardlockup_detector_perf_stop();
5176
5177 x86_pmu.flags &= ~(PMU_FL_EXCL_CNTRS | PMU_FL_EXCL_ENABLED);
5178
5179 x86_pmu.start_scheduling = NULL;
5180 x86_pmu.commit_scheduling = NULL;
5181 x86_pmu.stop_scheduling = NULL;
5182
5183 hardlockup_detector_perf_restart();
5184
5185 for_each_online_cpu(c)
5186 free_excl_cntrs(&per_cpu(cpu_hw_events, c));
5187
5188 cpus_read_unlock();
5189 pr_info("PMU erratum BJ122, BV98, HSD29 workaround disabled, HT off\n");
5190 return 0;
5191}
5192subsys_initcall(fixup_ht_bug)
5193