1
2#include <linux/bitops.h>
3#include <linux/types.h>
4#include <linux/slab.h>
5
6#include <asm/cpu_entry_area.h>
7#include <asm/perf_event.h>
8#include <asm/tlbflush.h>
9#include <asm/insn.h>
10#include <asm/io.h>
11
12#include "../perf_event.h"
13
14
15DEFINE_PER_CPU_PAGE_ALIGNED(struct debug_store, cpu_debug_store);
16
17
18#define BTS_RECORD_SIZE 24
19
20#define PEBS_FIXUP_SIZE PAGE_SIZE
21
22
23
24
25
26
27
28
29
30
31
32
33union intel_x86_pebs_dse {
34 u64 val;
35 struct {
36 unsigned int ld_dse:4;
37 unsigned int ld_stlb_miss:1;
38 unsigned int ld_locked:1;
39 unsigned int ld_reserved:26;
40 };
41 struct {
42 unsigned int st_l1d_hit:1;
43 unsigned int st_reserved1:3;
44 unsigned int st_stlb_miss:1;
45 unsigned int st_locked:1;
46 unsigned int st_reserved2:26;
47 };
48};
49
50
51
52
53
54
55#define P(a, b) PERF_MEM_S(a, b)
56#define OP_LH (P(OP, LOAD) | P(LVL, HIT))
57#define LEVEL(x) P(LVLNUM, x)
58#define REM P(REMOTE, REMOTE)
59#define SNOOP_NONE_MISS (P(SNOOP, NONE) | P(SNOOP, MISS))
60
61
62static u64 pebs_data_source[] = {
63 P(OP, LOAD) | P(LVL, MISS) | LEVEL(L3) | P(SNOOP, NA),
64 OP_LH | P(LVL, L1) | LEVEL(L1) | P(SNOOP, NONE),
65 OP_LH | P(LVL, LFB) | LEVEL(LFB) | P(SNOOP, NONE),
66 OP_LH | P(LVL, L2) | LEVEL(L2) | P(SNOOP, NONE),
67 OP_LH | P(LVL, L3) | LEVEL(L3) | P(SNOOP, NONE),
68 OP_LH | P(LVL, L3) | LEVEL(L3) | P(SNOOP, MISS),
69 OP_LH | P(LVL, L3) | LEVEL(L3) | P(SNOOP, HIT),
70 OP_LH | P(LVL, L3) | LEVEL(L3) | P(SNOOP, HITM),
71 OP_LH | P(LVL, REM_CCE1) | REM | LEVEL(L3) | P(SNOOP, HIT),
72 OP_LH | P(LVL, REM_CCE1) | REM | LEVEL(L3) | P(SNOOP, HITM),
73 OP_LH | P(LVL, LOC_RAM) | LEVEL(RAM) | P(SNOOP, HIT),
74 OP_LH | P(LVL, REM_RAM1) | REM | LEVEL(L3) | P(SNOOP, HIT),
75 OP_LH | P(LVL, LOC_RAM) | LEVEL(RAM) | SNOOP_NONE_MISS,
76 OP_LH | P(LVL, REM_RAM1) | LEVEL(RAM) | REM | SNOOP_NONE_MISS,
77 OP_LH | P(LVL, IO) | LEVEL(NA) | P(SNOOP, NONE),
78 OP_LH | P(LVL, UNC) | LEVEL(NA) | P(SNOOP, NONE),
79};
80
81
82void __init intel_pmu_pebs_data_source_nhm(void)
83{
84 pebs_data_source[0x05] = OP_LH | P(LVL, L3) | LEVEL(L3) | P(SNOOP, HIT);
85 pebs_data_source[0x06] = OP_LH | P(LVL, L3) | LEVEL(L3) | P(SNOOP, HITM);
86 pebs_data_source[0x07] = OP_LH | P(LVL, L3) | LEVEL(L3) | P(SNOOP, HITM);
87}
88
89void __init intel_pmu_pebs_data_source_skl(bool pmem)
90{
91 u64 pmem_or_l4 = pmem ? LEVEL(PMEM) : LEVEL(L4);
92
93 pebs_data_source[0x08] = OP_LH | pmem_or_l4 | P(SNOOP, HIT);
94 pebs_data_source[0x09] = OP_LH | pmem_or_l4 | REM | P(SNOOP, HIT);
95 pebs_data_source[0x0b] = OP_LH | LEVEL(RAM) | REM | P(SNOOP, NONE);
96 pebs_data_source[0x0c] = OP_LH | LEVEL(ANY_CACHE) | REM | P(SNOOPX, FWD);
97 pebs_data_source[0x0d] = OP_LH | LEVEL(ANY_CACHE) | REM | P(SNOOP, HITM);
98}
99
100static u64 precise_store_data(u64 status)
101{
102 union intel_x86_pebs_dse dse;
103 u64 val = P(OP, STORE) | P(SNOOP, NA) | P(LVL, L1) | P(TLB, L2);
104
105 dse.val = status;
106
107
108
109
110
111
112
113
114 if (dse.st_stlb_miss)
115 val |= P(TLB, MISS);
116 else
117 val |= P(TLB, HIT);
118
119
120
121
122
123
124 if (dse.st_l1d_hit)
125 val |= P(LVL, HIT);
126 else
127 val |= P(LVL, MISS);
128
129
130
131
132 if (dse.st_locked)
133 val |= P(LOCK, LOCKED);
134
135 return val;
136}
137
138static u64 precise_datala_hsw(struct perf_event *event, u64 status)
139{
140 union perf_mem_data_src dse;
141
142 dse.val = PERF_MEM_NA;
143
144 if (event->hw.flags & PERF_X86_EVENT_PEBS_ST_HSW)
145 dse.mem_op = PERF_MEM_OP_STORE;
146 else if (event->hw.flags & PERF_X86_EVENT_PEBS_LD_HSW)
147 dse.mem_op = PERF_MEM_OP_LOAD;
148
149
150
151
152
153
154
155
156
157 if (event->hw.flags & PERF_X86_EVENT_PEBS_ST_HSW) {
158 if (status & 1)
159 dse.mem_lvl = PERF_MEM_LVL_L1 | PERF_MEM_LVL_HIT;
160 else
161 dse.mem_lvl = PERF_MEM_LVL_L1 | PERF_MEM_LVL_MISS;
162 }
163 return dse.val;
164}
165
166static u64 load_latency_data(u64 status)
167{
168 union intel_x86_pebs_dse dse;
169 u64 val;
170
171 dse.val = status;
172
173
174
175
176 val = pebs_data_source[dse.ld_dse];
177
178
179
180
181 if (x86_pmu.pebs_no_tlb) {
182 val |= P(TLB, NA) | P(LOCK, NA);
183 return val;
184 }
185
186
187
188
189
190 if (dse.ld_stlb_miss)
191 val |= P(TLB, MISS) | P(TLB, L2);
192 else
193 val |= P(TLB, HIT) | P(TLB, L1) | P(TLB, L2);
194
195
196
197
198 if (dse.ld_locked)
199 val |= P(LOCK, LOCKED);
200
201 return val;
202}
203
204struct pebs_record_core {
205 u64 flags, ip;
206 u64 ax, bx, cx, dx;
207 u64 si, di, bp, sp;
208 u64 r8, r9, r10, r11;
209 u64 r12, r13, r14, r15;
210};
211
212struct pebs_record_nhm {
213 u64 flags, ip;
214 u64 ax, bx, cx, dx;
215 u64 si, di, bp, sp;
216 u64 r8, r9, r10, r11;
217 u64 r12, r13, r14, r15;
218 u64 status, dla, dse, lat;
219};
220
221
222
223
224struct pebs_record_hsw {
225 u64 flags, ip;
226 u64 ax, bx, cx, dx;
227 u64 si, di, bp, sp;
228 u64 r8, r9, r10, r11;
229 u64 r12, r13, r14, r15;
230 u64 status, dla, dse, lat;
231 u64 real_ip, tsx_tuning;
232};
233
234union hsw_tsx_tuning {
235 struct {
236 u32 cycles_last_block : 32,
237 hle_abort : 1,
238 rtm_abort : 1,
239 instruction_abort : 1,
240 non_instruction_abort : 1,
241 retry : 1,
242 data_conflict : 1,
243 capacity_writes : 1,
244 capacity_reads : 1;
245 };
246 u64 value;
247};
248
249#define PEBS_HSW_TSX_FLAGS 0xff00000000ULL
250
251
252
253struct pebs_record_skl {
254 u64 flags, ip;
255 u64 ax, bx, cx, dx;
256 u64 si, di, bp, sp;
257 u64 r8, r9, r10, r11;
258 u64 r12, r13, r14, r15;
259 u64 status, dla, dse, lat;
260 u64 real_ip, tsx_tuning;
261 u64 tsc;
262};
263
264void init_debug_store_on_cpu(int cpu)
265{
266 struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
267
268 if (!ds)
269 return;
270
271 wrmsr_on_cpu(cpu, MSR_IA32_DS_AREA,
272 (u32)((u64)(unsigned long)ds),
273 (u32)((u64)(unsigned long)ds >> 32));
274}
275
276void fini_debug_store_on_cpu(int cpu)
277{
278 if (!per_cpu(cpu_hw_events, cpu).ds)
279 return;
280
281 wrmsr_on_cpu(cpu, MSR_IA32_DS_AREA, 0, 0);
282}
283
284static DEFINE_PER_CPU(void *, insn_buffer);
285
286static void ds_update_cea(void *cea, void *addr, size_t size, pgprot_t prot)
287{
288 unsigned long start = (unsigned long)cea;
289 phys_addr_t pa;
290 size_t msz = 0;
291
292 pa = virt_to_phys(addr);
293
294 preempt_disable();
295 for (; msz < size; msz += PAGE_SIZE, pa += PAGE_SIZE, cea += PAGE_SIZE)
296 cea_set_pte(cea, pa, prot);
297
298
299
300
301
302 flush_tlb_kernel_range(start, start + size);
303 preempt_enable();
304}
305
306static void ds_clear_cea(void *cea, size_t size)
307{
308 unsigned long start = (unsigned long)cea;
309 size_t msz = 0;
310
311 preempt_disable();
312 for (; msz < size; msz += PAGE_SIZE, cea += PAGE_SIZE)
313 cea_set_pte(cea, 0, PAGE_NONE);
314
315 flush_tlb_kernel_range(start, start + size);
316 preempt_enable();
317}
318
319static void *dsalloc_pages(size_t size, gfp_t flags, int cpu)
320{
321 unsigned int order = get_order(size);
322 int node = cpu_to_node(cpu);
323 struct page *page;
324
325 page = __alloc_pages_node(node, flags | __GFP_ZERO, order);
326 return page ? page_address(page) : NULL;
327}
328
329static void dsfree_pages(const void *buffer, size_t size)
330{
331 if (buffer)
332 free_pages((unsigned long)buffer, get_order(size));
333}
334
335static int alloc_pebs_buffer(int cpu)
336{
337 struct cpu_hw_events *hwev = per_cpu_ptr(&cpu_hw_events, cpu);
338 struct debug_store *ds = hwev->ds;
339 size_t bsiz = x86_pmu.pebs_buffer_size;
340 int max, node = cpu_to_node(cpu);
341 void *buffer, *insn_buff, *cea;
342
343 if (!x86_pmu.pebs)
344 return 0;
345
346 buffer = dsalloc_pages(bsiz, GFP_KERNEL, cpu);
347 if (unlikely(!buffer))
348 return -ENOMEM;
349
350
351
352
353
354 if (x86_pmu.intel_cap.pebs_format < 2) {
355 insn_buff = kzalloc_node(PEBS_FIXUP_SIZE, GFP_KERNEL, node);
356 if (!insn_buff) {
357 dsfree_pages(buffer, bsiz);
358 return -ENOMEM;
359 }
360 per_cpu(insn_buffer, cpu) = insn_buff;
361 }
362 hwev->ds_pebs_vaddr = buffer;
363
364 cea = &get_cpu_entry_area(cpu)->cpu_debug_buffers.pebs_buffer;
365 ds->pebs_buffer_base = (unsigned long) cea;
366 ds_update_cea(cea, buffer, bsiz, PAGE_KERNEL);
367 ds->pebs_index = ds->pebs_buffer_base;
368 max = x86_pmu.pebs_record_size * (bsiz / x86_pmu.pebs_record_size);
369 ds->pebs_absolute_maximum = ds->pebs_buffer_base + max;
370 return 0;
371}
372
373static void release_pebs_buffer(int cpu)
374{
375 struct cpu_hw_events *hwev = per_cpu_ptr(&cpu_hw_events, cpu);
376 void *cea;
377
378 if (!x86_pmu.pebs)
379 return;
380
381 kfree(per_cpu(insn_buffer, cpu));
382 per_cpu(insn_buffer, cpu) = NULL;
383
384
385 cea = &get_cpu_entry_area(cpu)->cpu_debug_buffers.pebs_buffer;
386 ds_clear_cea(cea, x86_pmu.pebs_buffer_size);
387 dsfree_pages(hwev->ds_pebs_vaddr, x86_pmu.pebs_buffer_size);
388 hwev->ds_pebs_vaddr = NULL;
389}
390
391static int alloc_bts_buffer(int cpu)
392{
393 struct cpu_hw_events *hwev = per_cpu_ptr(&cpu_hw_events, cpu);
394 struct debug_store *ds = hwev->ds;
395 void *buffer, *cea;
396 int max;
397
398 if (!x86_pmu.bts)
399 return 0;
400
401 buffer = dsalloc_pages(BTS_BUFFER_SIZE, GFP_KERNEL | __GFP_NOWARN, cpu);
402 if (unlikely(!buffer)) {
403 WARN_ONCE(1, "%s: BTS buffer allocation failure\n", __func__);
404 return -ENOMEM;
405 }
406 hwev->ds_bts_vaddr = buffer;
407
408 cea = &get_cpu_entry_area(cpu)->cpu_debug_buffers.bts_buffer;
409 ds->bts_buffer_base = (unsigned long) cea;
410 ds_update_cea(cea, buffer, BTS_BUFFER_SIZE, PAGE_KERNEL);
411 ds->bts_index = ds->bts_buffer_base;
412 max = BTS_BUFFER_SIZE / BTS_RECORD_SIZE;
413 ds->bts_absolute_maximum = ds->bts_buffer_base +
414 max * BTS_RECORD_SIZE;
415 ds->bts_interrupt_threshold = ds->bts_absolute_maximum -
416 (max / 16) * BTS_RECORD_SIZE;
417 return 0;
418}
419
420static void release_bts_buffer(int cpu)
421{
422 struct cpu_hw_events *hwev = per_cpu_ptr(&cpu_hw_events, cpu);
423 void *cea;
424
425 if (!x86_pmu.bts)
426 return;
427
428
429 cea = &get_cpu_entry_area(cpu)->cpu_debug_buffers.bts_buffer;
430 ds_clear_cea(cea, BTS_BUFFER_SIZE);
431 dsfree_pages(hwev->ds_bts_vaddr, BTS_BUFFER_SIZE);
432 hwev->ds_bts_vaddr = NULL;
433}
434
435static int alloc_ds_buffer(int cpu)
436{
437 struct debug_store *ds = &get_cpu_entry_area(cpu)->cpu_debug_store;
438
439 memset(ds, 0, sizeof(*ds));
440 per_cpu(cpu_hw_events, cpu).ds = ds;
441 return 0;
442}
443
444static void release_ds_buffer(int cpu)
445{
446 per_cpu(cpu_hw_events, cpu).ds = NULL;
447}
448
449void release_ds_buffers(void)
450{
451 int cpu;
452
453 if (!x86_pmu.bts && !x86_pmu.pebs)
454 return;
455
456 for_each_possible_cpu(cpu)
457 release_ds_buffer(cpu);
458
459 for_each_possible_cpu(cpu) {
460
461
462
463
464
465 fini_debug_store_on_cpu(cpu);
466 }
467
468 for_each_possible_cpu(cpu) {
469 release_pebs_buffer(cpu);
470 release_bts_buffer(cpu);
471 }
472}
473
474void reserve_ds_buffers(void)
475{
476 int bts_err = 0, pebs_err = 0;
477 int cpu;
478
479 x86_pmu.bts_active = 0;
480 x86_pmu.pebs_active = 0;
481
482 if (!x86_pmu.bts && !x86_pmu.pebs)
483 return;
484
485 if (!x86_pmu.bts)
486 bts_err = 1;
487
488 if (!x86_pmu.pebs)
489 pebs_err = 1;
490
491 for_each_possible_cpu(cpu) {
492 if (alloc_ds_buffer(cpu)) {
493 bts_err = 1;
494 pebs_err = 1;
495 }
496
497 if (!bts_err && alloc_bts_buffer(cpu))
498 bts_err = 1;
499
500 if (!pebs_err && alloc_pebs_buffer(cpu))
501 pebs_err = 1;
502
503 if (bts_err && pebs_err)
504 break;
505 }
506
507 if (bts_err) {
508 for_each_possible_cpu(cpu)
509 release_bts_buffer(cpu);
510 }
511
512 if (pebs_err) {
513 for_each_possible_cpu(cpu)
514 release_pebs_buffer(cpu);
515 }
516
517 if (bts_err && pebs_err) {
518 for_each_possible_cpu(cpu)
519 release_ds_buffer(cpu);
520 } else {
521 if (x86_pmu.bts && !bts_err)
522 x86_pmu.bts_active = 1;
523
524 if (x86_pmu.pebs && !pebs_err)
525 x86_pmu.pebs_active = 1;
526
527 for_each_possible_cpu(cpu) {
528
529
530
531
532 init_debug_store_on_cpu(cpu);
533 }
534 }
535}
536
537
538
539
540
541struct event_constraint bts_constraint =
542 EVENT_CONSTRAINT(0, 1ULL << INTEL_PMC_IDX_FIXED_BTS, 0);
543
544void intel_pmu_enable_bts(u64 config)
545{
546 unsigned long debugctlmsr;
547
548 debugctlmsr = get_debugctlmsr();
549
550 debugctlmsr |= DEBUGCTLMSR_TR;
551 debugctlmsr |= DEBUGCTLMSR_BTS;
552 if (config & ARCH_PERFMON_EVENTSEL_INT)
553 debugctlmsr |= DEBUGCTLMSR_BTINT;
554
555 if (!(config & ARCH_PERFMON_EVENTSEL_OS))
556 debugctlmsr |= DEBUGCTLMSR_BTS_OFF_OS;
557
558 if (!(config & ARCH_PERFMON_EVENTSEL_USR))
559 debugctlmsr |= DEBUGCTLMSR_BTS_OFF_USR;
560
561 update_debugctlmsr(debugctlmsr);
562}
563
564void intel_pmu_disable_bts(void)
565{
566 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
567 unsigned long debugctlmsr;
568
569 if (!cpuc->ds)
570 return;
571
572 debugctlmsr = get_debugctlmsr();
573
574 debugctlmsr &=
575 ~(DEBUGCTLMSR_TR | DEBUGCTLMSR_BTS | DEBUGCTLMSR_BTINT |
576 DEBUGCTLMSR_BTS_OFF_OS | DEBUGCTLMSR_BTS_OFF_USR);
577
578 update_debugctlmsr(debugctlmsr);
579}
580
581int intel_pmu_drain_bts_buffer(void)
582{
583 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
584 struct debug_store *ds = cpuc->ds;
585 struct bts_record {
586 u64 from;
587 u64 to;
588 u64 flags;
589 };
590 struct perf_event *event = cpuc->events[INTEL_PMC_IDX_FIXED_BTS];
591 struct bts_record *at, *base, *top;
592 struct perf_output_handle handle;
593 struct perf_event_header header;
594 struct perf_sample_data data;
595 unsigned long skip = 0;
596 struct pt_regs regs;
597
598 if (!event)
599 return 0;
600
601 if (!x86_pmu.bts_active)
602 return 0;
603
604 base = (struct bts_record *)(unsigned long)ds->bts_buffer_base;
605 top = (struct bts_record *)(unsigned long)ds->bts_index;
606
607 if (top <= base)
608 return 0;
609
610 memset(®s, 0, sizeof(regs));
611
612 ds->bts_index = ds->bts_buffer_base;
613
614 perf_sample_data_init(&data, 0, event->hw.last_period);
615
616
617
618
619
620
621
622
623
624
625
626 for (at = base; at < top; at++) {
627
628
629
630
631
632 if (event->attr.exclude_kernel &&
633 (kernel_ip(at->from) || kernel_ip(at->to)))
634 skip++;
635 }
636
637
638
639
640
641
642 rcu_read_lock();
643 perf_prepare_sample(&header, &data, event, ®s);
644
645 if (perf_output_begin(&handle, event, header.size *
646 (top - base - skip)))
647 goto unlock;
648
649 for (at = base; at < top; at++) {
650
651 if (event->attr.exclude_kernel &&
652 (kernel_ip(at->from) || kernel_ip(at->to)))
653 continue;
654
655 data.ip = at->from;
656 data.addr = at->to;
657
658 perf_output_sample(&handle, &header, &data, event);
659 }
660
661 perf_output_end(&handle);
662
663
664 event->hw.interrupts++;
665 event->pending_kill = POLL_IN;
666unlock:
667 rcu_read_unlock();
668 return 1;
669}
670
671static inline void intel_pmu_drain_pebs_buffer(void)
672{
673 struct pt_regs regs;
674
675 x86_pmu.drain_pebs(®s);
676}
677
678
679
680
681struct event_constraint intel_core2_pebs_event_constraints[] = {
682 INTEL_FLAGS_UEVENT_CONSTRAINT(0x00c0, 0x1),
683 INTEL_FLAGS_UEVENT_CONSTRAINT(0xfec1, 0x1),
684 INTEL_FLAGS_UEVENT_CONSTRAINT(0x00c5, 0x1),
685 INTEL_FLAGS_UEVENT_CONSTRAINT(0x1fc7, 0x1),
686 INTEL_FLAGS_EVENT_CONSTRAINT(0xcb, 0x1),
687
688 INTEL_FLAGS_UEVENT_CONSTRAINT(0x108000c0, 0x01),
689 EVENT_CONSTRAINT_END
690};
691
692struct event_constraint intel_atom_pebs_event_constraints[] = {
693 INTEL_FLAGS_UEVENT_CONSTRAINT(0x00c0, 0x1),
694 INTEL_FLAGS_UEVENT_CONSTRAINT(0x00c5, 0x1),
695 INTEL_FLAGS_EVENT_CONSTRAINT(0xcb, 0x1),
696
697 INTEL_FLAGS_UEVENT_CONSTRAINT(0x108000c0, 0x01),
698
699 INTEL_ALL_EVENT_CONSTRAINT(0, 0x1),
700 EVENT_CONSTRAINT_END
701};
702
703struct event_constraint intel_slm_pebs_event_constraints[] = {
704
705 INTEL_FLAGS_UEVENT_CONSTRAINT(0x108000c0, 0x1),
706
707 INTEL_ALL_EVENT_CONSTRAINT(0, 0x1),
708 EVENT_CONSTRAINT_END
709};
710
711struct event_constraint intel_glm_pebs_event_constraints[] = {
712
713 INTEL_ALL_EVENT_CONSTRAINT(0, 0x1),
714 EVENT_CONSTRAINT_END
715};
716
717struct event_constraint intel_nehalem_pebs_event_constraints[] = {
718 INTEL_PLD_CONSTRAINT(0x100b, 0xf),
719 INTEL_FLAGS_EVENT_CONSTRAINT(0x0f, 0xf),
720 INTEL_FLAGS_UEVENT_CONSTRAINT(0x010c, 0xf),
721 INTEL_FLAGS_EVENT_CONSTRAINT(0xc0, 0xf),
722 INTEL_EVENT_CONSTRAINT(0xc2, 0xf),
723 INTEL_FLAGS_EVENT_CONSTRAINT(0xc4, 0xf),
724 INTEL_FLAGS_UEVENT_CONSTRAINT(0x02c5, 0xf),
725 INTEL_FLAGS_EVENT_CONSTRAINT(0xc7, 0xf),
726 INTEL_FLAGS_UEVENT_CONSTRAINT(0x20c8, 0xf),
727 INTEL_FLAGS_EVENT_CONSTRAINT(0xcb, 0xf),
728 INTEL_FLAGS_EVENT_CONSTRAINT(0xf7, 0xf),
729
730 INTEL_FLAGS_UEVENT_CONSTRAINT(0x108000c0, 0x0f),
731 EVENT_CONSTRAINT_END
732};
733
734struct event_constraint intel_westmere_pebs_event_constraints[] = {
735 INTEL_PLD_CONSTRAINT(0x100b, 0xf),
736 INTEL_FLAGS_EVENT_CONSTRAINT(0x0f, 0xf),
737 INTEL_FLAGS_UEVENT_CONSTRAINT(0x010c, 0xf),
738 INTEL_FLAGS_EVENT_CONSTRAINT(0xc0, 0xf),
739 INTEL_EVENT_CONSTRAINT(0xc2, 0xf),
740 INTEL_FLAGS_EVENT_CONSTRAINT(0xc4, 0xf),
741 INTEL_FLAGS_EVENT_CONSTRAINT(0xc5, 0xf),
742 INTEL_FLAGS_EVENT_CONSTRAINT(0xc7, 0xf),
743 INTEL_FLAGS_UEVENT_CONSTRAINT(0x20c8, 0xf),
744 INTEL_FLAGS_EVENT_CONSTRAINT(0xcb, 0xf),
745 INTEL_FLAGS_EVENT_CONSTRAINT(0xf7, 0xf),
746
747 INTEL_FLAGS_UEVENT_CONSTRAINT(0x108000c0, 0x0f),
748 EVENT_CONSTRAINT_END
749};
750
751struct event_constraint intel_snb_pebs_event_constraints[] = {
752 INTEL_FLAGS_UEVENT_CONSTRAINT(0x01c0, 0x2),
753 INTEL_PLD_CONSTRAINT(0x01cd, 0x8),
754 INTEL_PST_CONSTRAINT(0x02cd, 0x8),
755
756 INTEL_FLAGS_UEVENT_CONSTRAINT(0x108001c2, 0xf),
757 INTEL_EXCLEVT_CONSTRAINT(0xd0, 0xf),
758 INTEL_EXCLEVT_CONSTRAINT(0xd1, 0xf),
759 INTEL_EXCLEVT_CONSTRAINT(0xd2, 0xf),
760 INTEL_EXCLEVT_CONSTRAINT(0xd3, 0xf),
761
762 INTEL_ALL_EVENT_CONSTRAINT(0, 0xf),
763 EVENT_CONSTRAINT_END
764};
765
766struct event_constraint intel_ivb_pebs_event_constraints[] = {
767 INTEL_FLAGS_UEVENT_CONSTRAINT(0x01c0, 0x2),
768 INTEL_PLD_CONSTRAINT(0x01cd, 0x8),
769 INTEL_PST_CONSTRAINT(0x02cd, 0x8),
770
771 INTEL_FLAGS_UEVENT_CONSTRAINT(0x108001c2, 0xf),
772
773 INTEL_FLAGS_UEVENT_CONSTRAINT(0x108001c0, 0x2),
774 INTEL_EXCLEVT_CONSTRAINT(0xd0, 0xf),
775 INTEL_EXCLEVT_CONSTRAINT(0xd1, 0xf),
776 INTEL_EXCLEVT_CONSTRAINT(0xd2, 0xf),
777 INTEL_EXCLEVT_CONSTRAINT(0xd3, 0xf),
778
779 INTEL_ALL_EVENT_CONSTRAINT(0, 0xf),
780 EVENT_CONSTRAINT_END
781};
782
783struct event_constraint intel_hsw_pebs_event_constraints[] = {
784 INTEL_FLAGS_UEVENT_CONSTRAINT(0x01c0, 0x2),
785 INTEL_PLD_CONSTRAINT(0x01cd, 0xf),
786
787 INTEL_FLAGS_UEVENT_CONSTRAINT(0x108001c2, 0xf),
788
789 INTEL_FLAGS_UEVENT_CONSTRAINT(0x108001c0, 0x2),
790 INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_NA(0x01c2, 0xf),
791 INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_XLD(0x11d0, 0xf),
792 INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_XLD(0x21d0, 0xf),
793 INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_XLD(0x41d0, 0xf),
794 INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_XLD(0x81d0, 0xf),
795 INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_XST(0x12d0, 0xf),
796 INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_XST(0x42d0, 0xf),
797 INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_XST(0x82d0, 0xf),
798 INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_XLD(0xd1, 0xf),
799 INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_XLD(0xd2, 0xf),
800 INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_XLD(0xd3, 0xf),
801
802 INTEL_ALL_EVENT_CONSTRAINT(0, 0xf),
803 EVENT_CONSTRAINT_END
804};
805
806struct event_constraint intel_bdw_pebs_event_constraints[] = {
807 INTEL_FLAGS_UEVENT_CONSTRAINT(0x01c0, 0x2),
808 INTEL_PLD_CONSTRAINT(0x01cd, 0xf),
809
810 INTEL_FLAGS_UEVENT_CONSTRAINT(0x108001c2, 0xf),
811
812 INTEL_FLAGS_UEVENT_CONSTRAINT(0x108001c0, 0x2),
813 INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_NA(0x01c2, 0xf),
814 INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x11d0, 0xf),
815 INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x21d0, 0xf),
816 INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x41d0, 0xf),
817 INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x81d0, 0xf),
818 INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x12d0, 0xf),
819 INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x42d0, 0xf),
820 INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x82d0, 0xf),
821 INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_LD(0xd1, 0xf),
822 INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_LD(0xd2, 0xf),
823 INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_LD(0xd3, 0xf),
824
825 INTEL_ALL_EVENT_CONSTRAINT(0, 0xf),
826 EVENT_CONSTRAINT_END
827};
828
829
830struct event_constraint intel_skl_pebs_event_constraints[] = {
831 INTEL_FLAGS_UEVENT_CONSTRAINT(0x1c0, 0x2),
832
833 INTEL_FLAGS_UEVENT_CONSTRAINT(0x108001c0, 0x2),
834
835 INTEL_FLAGS_UEVENT_CONSTRAINT(0x108000c0, 0x0f),
836 INTEL_PLD_CONSTRAINT(0x1cd, 0xf),
837 INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x11d0, 0xf),
838 INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x12d0, 0xf),
839 INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x21d0, 0xf),
840 INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x22d0, 0xf),
841 INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x41d0, 0xf),
842 INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x42d0, 0xf),
843 INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x81d0, 0xf),
844 INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x82d0, 0xf),
845 INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_LD(0xd1, 0xf),
846 INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_LD(0xd2, 0xf),
847 INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_LD(0xd3, 0xf),
848
849 INTEL_ALL_EVENT_CONSTRAINT(0, 0xf),
850 EVENT_CONSTRAINT_END
851};
852
853struct event_constraint intel_icl_pebs_event_constraints[] = {
854 INTEL_FLAGS_UEVENT_CONSTRAINT(0x1c0, 0x100000000ULL),
855 INTEL_FLAGS_UEVENT_CONSTRAINT(0x0400, 0x800000000ULL),
856
857 INTEL_PLD_CONSTRAINT(0x1cd, 0xff),
858 INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x1d0, 0xf),
859 INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x2d0, 0xf),
860
861 INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_LD_RANGE(0xd1, 0xd4, 0xf),
862
863 INTEL_FLAGS_EVENT_CONSTRAINT(0xd0, 0xf),
864
865
866
867
868
869
870 EVENT_CONSTRAINT_END
871};
872
873struct event_constraint *intel_pebs_constraints(struct perf_event *event)
874{
875 struct event_constraint *c;
876
877 if (!event->attr.precise_ip)
878 return NULL;
879
880 if (x86_pmu.pebs_constraints) {
881 for_each_event_constraint(c, x86_pmu.pebs_constraints) {
882 if (constraint_match(c, event->hw.config)) {
883 event->hw.flags |= c->flags;
884 return c;
885 }
886 }
887 }
888
889
890
891
892
893 if (x86_pmu.flags & PMU_FL_PEBS_ALL)
894 return NULL;
895
896 return &emptyconstraint;
897}
898
899
900
901
902
903
904static inline bool pebs_needs_sched_cb(struct cpu_hw_events *cpuc)
905{
906 if (cpuc->n_pebs == cpuc->n_pebs_via_pt)
907 return false;
908
909 return cpuc->n_pebs && (cpuc->n_pebs == cpuc->n_large_pebs);
910}
911
912void intel_pmu_pebs_sched_task(struct perf_event_context *ctx, bool sched_in)
913{
914 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
915
916 if (!sched_in && pebs_needs_sched_cb(cpuc))
917 intel_pmu_drain_pebs_buffer();
918}
919
920static inline void pebs_update_threshold(struct cpu_hw_events *cpuc)
921{
922 struct debug_store *ds = cpuc->ds;
923 u64 threshold;
924 int reserved;
925
926 if (cpuc->n_pebs_via_pt)
927 return;
928
929 if (x86_pmu.flags & PMU_FL_PEBS_ALL)
930 reserved = x86_pmu.max_pebs_events + x86_pmu.num_counters_fixed;
931 else
932 reserved = x86_pmu.max_pebs_events;
933
934 if (cpuc->n_pebs == cpuc->n_large_pebs) {
935 threshold = ds->pebs_absolute_maximum -
936 reserved * cpuc->pebs_record_size;
937 } else {
938 threshold = ds->pebs_buffer_base + cpuc->pebs_record_size;
939 }
940
941 ds->pebs_interrupt_threshold = threshold;
942}
943
944static void adaptive_pebs_record_size_update(void)
945{
946 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
947 u64 pebs_data_cfg = cpuc->pebs_data_cfg;
948 int sz = sizeof(struct pebs_basic);
949
950 if (pebs_data_cfg & PEBS_DATACFG_MEMINFO)
951 sz += sizeof(struct pebs_meminfo);
952 if (pebs_data_cfg & PEBS_DATACFG_GP)
953 sz += sizeof(struct pebs_gprs);
954 if (pebs_data_cfg & PEBS_DATACFG_XMMS)
955 sz += sizeof(struct pebs_xmm);
956 if (pebs_data_cfg & PEBS_DATACFG_LBRS)
957 sz += x86_pmu.lbr_nr * sizeof(struct lbr_entry);
958
959 cpuc->pebs_record_size = sz;
960}
961
962#define PERF_PEBS_MEMINFO_TYPE (PERF_SAMPLE_ADDR | PERF_SAMPLE_DATA_SRC | \
963 PERF_SAMPLE_PHYS_ADDR | PERF_SAMPLE_WEIGHT | \
964 PERF_SAMPLE_TRANSACTION)
965
966static u64 pebs_update_adaptive_cfg(struct perf_event *event)
967{
968 struct perf_event_attr *attr = &event->attr;
969 u64 sample_type = attr->sample_type;
970 u64 pebs_data_cfg = 0;
971 bool gprs, tsx_weight;
972
973 if (!(sample_type & ~(PERF_SAMPLE_IP|PERF_SAMPLE_TIME)) &&
974 attr->precise_ip > 1)
975 return pebs_data_cfg;
976
977 if (sample_type & PERF_PEBS_MEMINFO_TYPE)
978 pebs_data_cfg |= PEBS_DATACFG_MEMINFO;
979
980
981
982
983
984
985
986 gprs = (sample_type & PERF_SAMPLE_REGS_INTR) &&
987 (attr->sample_regs_intr & PEBS_GP_REGS);
988
989 tsx_weight = (sample_type & PERF_SAMPLE_WEIGHT) &&
990 ((attr->config & INTEL_ARCH_EVENT_MASK) ==
991 x86_pmu.rtm_abort_event);
992
993 if (gprs || (attr->precise_ip < 2) || tsx_weight)
994 pebs_data_cfg |= PEBS_DATACFG_GP;
995
996 if ((sample_type & PERF_SAMPLE_REGS_INTR) &&
997 (attr->sample_regs_intr & PERF_REG_EXTENDED_MASK))
998 pebs_data_cfg |= PEBS_DATACFG_XMMS;
999
1000 if (sample_type & PERF_SAMPLE_BRANCH_STACK) {
1001
1002
1003
1004
1005 pebs_data_cfg |= PEBS_DATACFG_LBRS |
1006 ((x86_pmu.lbr_nr-1) << PEBS_DATACFG_LBR_SHIFT);
1007 }
1008
1009 return pebs_data_cfg;
1010}
1011
1012static void
1013pebs_update_state(bool needed_cb, struct cpu_hw_events *cpuc,
1014 struct perf_event *event, bool add)
1015{
1016 struct pmu *pmu = event->ctx->pmu;
1017
1018
1019
1020
1021
1022 bool update = cpuc->n_pebs == 1;
1023
1024 if (needed_cb != pebs_needs_sched_cb(cpuc)) {
1025 if (!needed_cb)
1026 perf_sched_cb_inc(pmu);
1027 else
1028 perf_sched_cb_dec(pmu);
1029
1030 update = true;
1031 }
1032
1033
1034
1035
1036
1037 if (x86_pmu.intel_cap.pebs_baseline && add) {
1038 u64 pebs_data_cfg;
1039
1040
1041 if (cpuc->n_pebs == 1) {
1042 cpuc->pebs_data_cfg = 0;
1043 cpuc->pebs_record_size = sizeof(struct pebs_basic);
1044 }
1045
1046 pebs_data_cfg = pebs_update_adaptive_cfg(event);
1047
1048
1049 if (pebs_data_cfg & ~cpuc->pebs_data_cfg) {
1050 cpuc->pebs_data_cfg |= pebs_data_cfg;
1051 adaptive_pebs_record_size_update();
1052 update = true;
1053 }
1054 }
1055
1056 if (update)
1057 pebs_update_threshold(cpuc);
1058}
1059
1060void intel_pmu_pebs_add(struct perf_event *event)
1061{
1062 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1063 struct hw_perf_event *hwc = &event->hw;
1064 bool needed_cb = pebs_needs_sched_cb(cpuc);
1065
1066 cpuc->n_pebs++;
1067 if (hwc->flags & PERF_X86_EVENT_LARGE_PEBS)
1068 cpuc->n_large_pebs++;
1069 if (hwc->flags & PERF_X86_EVENT_PEBS_VIA_PT)
1070 cpuc->n_pebs_via_pt++;
1071
1072 pebs_update_state(needed_cb, cpuc, event, true);
1073}
1074
1075static void intel_pmu_pebs_via_pt_disable(struct perf_event *event)
1076{
1077 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1078
1079 if (!is_pebs_pt(event))
1080 return;
1081
1082 if (!(cpuc->pebs_enabled & ~PEBS_VIA_PT_MASK))
1083 cpuc->pebs_enabled &= ~PEBS_VIA_PT_MASK;
1084}
1085
1086static void intel_pmu_pebs_via_pt_enable(struct perf_event *event)
1087{
1088 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1089 struct hw_perf_event *hwc = &event->hw;
1090 struct debug_store *ds = cpuc->ds;
1091
1092 if (!is_pebs_pt(event))
1093 return;
1094
1095 if (!(event->hw.flags & PERF_X86_EVENT_LARGE_PEBS))
1096 cpuc->pebs_enabled |= PEBS_PMI_AFTER_EACH_RECORD;
1097
1098 cpuc->pebs_enabled |= PEBS_OUTPUT_PT;
1099
1100 wrmsrl(MSR_RELOAD_PMC0 + hwc->idx, ds->pebs_event_reset[hwc->idx]);
1101}
1102
1103void intel_pmu_pebs_enable(struct perf_event *event)
1104{
1105 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1106 struct hw_perf_event *hwc = &event->hw;
1107 struct debug_store *ds = cpuc->ds;
1108
1109 hwc->config &= ~ARCH_PERFMON_EVENTSEL_INT;
1110
1111 cpuc->pebs_enabled |= 1ULL << hwc->idx;
1112
1113 if ((event->hw.flags & PERF_X86_EVENT_PEBS_LDLAT) && (x86_pmu.version < 5))
1114 cpuc->pebs_enabled |= 1ULL << (hwc->idx + 32);
1115 else if (event->hw.flags & PERF_X86_EVENT_PEBS_ST)
1116 cpuc->pebs_enabled |= 1ULL << 63;
1117
1118 if (x86_pmu.intel_cap.pebs_baseline) {
1119 hwc->config |= ICL_EVENTSEL_ADAPTIVE;
1120 if (cpuc->pebs_data_cfg != cpuc->active_pebs_data_cfg) {
1121 wrmsrl(MSR_PEBS_DATA_CFG, cpuc->pebs_data_cfg);
1122 cpuc->active_pebs_data_cfg = cpuc->pebs_data_cfg;
1123 }
1124 }
1125
1126
1127
1128
1129
1130 if (hwc->flags & PERF_X86_EVENT_AUTO_RELOAD) {
1131 unsigned int idx = hwc->idx;
1132
1133 if (idx >= INTEL_PMC_IDX_FIXED)
1134 idx = MAX_PEBS_EVENTS + (idx - INTEL_PMC_IDX_FIXED);
1135 ds->pebs_event_reset[idx] =
1136 (u64)(-hwc->sample_period) & x86_pmu.cntval_mask;
1137 } else {
1138 ds->pebs_event_reset[hwc->idx] = 0;
1139 }
1140
1141 intel_pmu_pebs_via_pt_enable(event);
1142}
1143
1144void intel_pmu_pebs_del(struct perf_event *event)
1145{
1146 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1147 struct hw_perf_event *hwc = &event->hw;
1148 bool needed_cb = pebs_needs_sched_cb(cpuc);
1149
1150 cpuc->n_pebs--;
1151 if (hwc->flags & PERF_X86_EVENT_LARGE_PEBS)
1152 cpuc->n_large_pebs--;
1153 if (hwc->flags & PERF_X86_EVENT_PEBS_VIA_PT)
1154 cpuc->n_pebs_via_pt--;
1155
1156 pebs_update_state(needed_cb, cpuc, event, false);
1157}
1158
1159void intel_pmu_pebs_disable(struct perf_event *event)
1160{
1161 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1162 struct hw_perf_event *hwc = &event->hw;
1163
1164 if (cpuc->n_pebs == cpuc->n_large_pebs &&
1165 cpuc->n_pebs != cpuc->n_pebs_via_pt)
1166 intel_pmu_drain_pebs_buffer();
1167
1168 cpuc->pebs_enabled &= ~(1ULL << hwc->idx);
1169
1170 if ((event->hw.flags & PERF_X86_EVENT_PEBS_LDLAT) &&
1171 (x86_pmu.version < 5))
1172 cpuc->pebs_enabled &= ~(1ULL << (hwc->idx + 32));
1173 else if (event->hw.flags & PERF_X86_EVENT_PEBS_ST)
1174 cpuc->pebs_enabled &= ~(1ULL << 63);
1175
1176 intel_pmu_pebs_via_pt_disable(event);
1177
1178 if (cpuc->enabled)
1179 wrmsrl(MSR_IA32_PEBS_ENABLE, cpuc->pebs_enabled);
1180
1181 hwc->config |= ARCH_PERFMON_EVENTSEL_INT;
1182}
1183
1184void intel_pmu_pebs_enable_all(void)
1185{
1186 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1187
1188 if (cpuc->pebs_enabled)
1189 wrmsrl(MSR_IA32_PEBS_ENABLE, cpuc->pebs_enabled);
1190}
1191
1192void intel_pmu_pebs_disable_all(void)
1193{
1194 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1195
1196 if (cpuc->pebs_enabled)
1197 wrmsrl(MSR_IA32_PEBS_ENABLE, 0);
1198}
1199
1200static int intel_pmu_pebs_fixup_ip(struct pt_regs *regs)
1201{
1202 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1203 unsigned long from = cpuc->lbr_entries[0].from;
1204 unsigned long old_to, to = cpuc->lbr_entries[0].to;
1205 unsigned long ip = regs->ip;
1206 int is_64bit = 0;
1207 void *kaddr;
1208 int size;
1209
1210
1211
1212
1213 if (!x86_pmu.intel_cap.pebs_trap)
1214 return 1;
1215
1216
1217
1218
1219 if (!cpuc->lbr_stack.nr || !from || !to)
1220 return 0;
1221
1222
1223
1224
1225 if (kernel_ip(ip) != kernel_ip(to))
1226 return 0;
1227
1228
1229
1230
1231
1232 if ((ip - to) > PEBS_FIXUP_SIZE)
1233 return 0;
1234
1235
1236
1237
1238 if (ip == to) {
1239 set_linear_ip(regs, from);
1240 return 1;
1241 }
1242
1243 size = ip - to;
1244 if (!kernel_ip(ip)) {
1245 int bytes;
1246 u8 *buf = this_cpu_read(insn_buffer);
1247
1248
1249 bytes = copy_from_user_nmi(buf, (void __user *)to, size);
1250 if (bytes != 0)
1251 return 0;
1252
1253 kaddr = buf;
1254 } else {
1255 kaddr = (void *)to;
1256 }
1257
1258 do {
1259 struct insn insn;
1260
1261 old_to = to;
1262
1263#ifdef CONFIG_X86_64
1264 is_64bit = kernel_ip(to) || !test_thread_flag(TIF_IA32);
1265#endif
1266 insn_init(&insn, kaddr, size, is_64bit);
1267 insn_get_length(&insn);
1268
1269
1270
1271
1272
1273
1274 if (!insn.length)
1275 break;
1276
1277 to += insn.length;
1278 kaddr += insn.length;
1279 size -= insn.length;
1280 } while (to < ip);
1281
1282 if (to == ip) {
1283 set_linear_ip(regs, old_to);
1284 return 1;
1285 }
1286
1287
1288
1289
1290
1291 return 0;
1292}
1293
1294static inline u64 intel_get_tsx_weight(u64 tsx_tuning)
1295{
1296 if (tsx_tuning) {
1297 union hsw_tsx_tuning tsx = { .value = tsx_tuning };
1298 return tsx.cycles_last_block;
1299 }
1300 return 0;
1301}
1302
1303static inline u64 intel_get_tsx_transaction(u64 tsx_tuning, u64 ax)
1304{
1305 u64 txn = (tsx_tuning & PEBS_HSW_TSX_FLAGS) >> 32;
1306
1307
1308 if ((txn & PERF_TXN_TRANSACTION) && (ax & 1))
1309 txn |= ((ax >> 24) & 0xff) << PERF_TXN_ABORT_SHIFT;
1310 return txn;
1311}
1312
1313static inline u64 get_pebs_status(void *n)
1314{
1315 if (x86_pmu.intel_cap.pebs_format < 4)
1316 return ((struct pebs_record_nhm *)n)->status;
1317 return ((struct pebs_basic *)n)->applicable_counters;
1318}
1319
1320#define PERF_X86_EVENT_PEBS_HSW_PREC \
1321 (PERF_X86_EVENT_PEBS_ST_HSW | \
1322 PERF_X86_EVENT_PEBS_LD_HSW | \
1323 PERF_X86_EVENT_PEBS_NA_HSW)
1324
1325static u64 get_data_src(struct perf_event *event, u64 aux)
1326{
1327 u64 val = PERF_MEM_NA;
1328 int fl = event->hw.flags;
1329 bool fst = fl & (PERF_X86_EVENT_PEBS_ST | PERF_X86_EVENT_PEBS_HSW_PREC);
1330
1331 if (fl & PERF_X86_EVENT_PEBS_LDLAT)
1332 val = load_latency_data(aux);
1333 else if (fst && (fl & PERF_X86_EVENT_PEBS_HSW_PREC))
1334 val = precise_datala_hsw(event, aux);
1335 else if (fst)
1336 val = precise_store_data(aux);
1337 return val;
1338}
1339
1340static void setup_pebs_fixed_sample_data(struct perf_event *event,
1341 struct pt_regs *iregs, void *__pebs,
1342 struct perf_sample_data *data,
1343 struct pt_regs *regs)
1344{
1345
1346
1347
1348
1349 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1350 struct pebs_record_skl *pebs = __pebs;
1351 u64 sample_type;
1352 int fll;
1353
1354 if (pebs == NULL)
1355 return;
1356
1357 sample_type = event->attr.sample_type;
1358 fll = event->hw.flags & PERF_X86_EVENT_PEBS_LDLAT;
1359
1360 perf_sample_data_init(data, 0, event->hw.last_period);
1361
1362 data->period = event->hw.last_period;
1363
1364
1365
1366
1367 if (fll && (sample_type & PERF_SAMPLE_WEIGHT))
1368 data->weight = pebs->lat;
1369
1370
1371
1372
1373 if (sample_type & PERF_SAMPLE_DATA_SRC)
1374 data->data_src.val = get_data_src(event, pebs->dse);
1375
1376
1377
1378
1379
1380
1381
1382 if (sample_type & PERF_SAMPLE_CALLCHAIN)
1383 data->callchain = perf_callchain(event, iregs);
1384
1385
1386
1387
1388
1389
1390
1391
1392 *regs = *iregs;
1393
1394
1395
1396
1397
1398
1399 regs->flags = pebs->flags & ~PERF_EFLAGS_EXACT;
1400
1401 if (sample_type & PERF_SAMPLE_REGS_INTR) {
1402 regs->ax = pebs->ax;
1403 regs->bx = pebs->bx;
1404 regs->cx = pebs->cx;
1405 regs->dx = pebs->dx;
1406 regs->si = pebs->si;
1407 regs->di = pebs->di;
1408
1409 regs->bp = pebs->bp;
1410 regs->sp = pebs->sp;
1411
1412#ifndef CONFIG_X86_32
1413 regs->r8 = pebs->r8;
1414 regs->r9 = pebs->r9;
1415 regs->r10 = pebs->r10;
1416 regs->r11 = pebs->r11;
1417 regs->r12 = pebs->r12;
1418 regs->r13 = pebs->r13;
1419 regs->r14 = pebs->r14;
1420 regs->r15 = pebs->r15;
1421#endif
1422 }
1423
1424 if (event->attr.precise_ip > 1) {
1425
1426
1427
1428
1429
1430 if (x86_pmu.intel_cap.pebs_format >= 2) {
1431 set_linear_ip(regs, pebs->real_ip);
1432 regs->flags |= PERF_EFLAGS_EXACT;
1433 } else {
1434
1435 set_linear_ip(regs, pebs->ip);
1436
1437
1438
1439
1440
1441
1442 if (intel_pmu_pebs_fixup_ip(regs))
1443 regs->flags |= PERF_EFLAGS_EXACT;
1444 }
1445 } else {
1446
1447
1448
1449
1450 set_linear_ip(regs, pebs->ip);
1451 }
1452
1453
1454 if ((sample_type & (PERF_SAMPLE_ADDR | PERF_SAMPLE_PHYS_ADDR)) &&
1455 x86_pmu.intel_cap.pebs_format >= 1)
1456 data->addr = pebs->dla;
1457
1458 if (x86_pmu.intel_cap.pebs_format >= 2) {
1459
1460 if ((sample_type & PERF_SAMPLE_WEIGHT) && !fll)
1461 data->weight = intel_get_tsx_weight(pebs->tsx_tuning);
1462
1463 if (sample_type & PERF_SAMPLE_TRANSACTION)
1464 data->txn = intel_get_tsx_transaction(pebs->tsx_tuning,
1465 pebs->ax);
1466 }
1467
1468
1469
1470
1471
1472
1473
1474 if (x86_pmu.intel_cap.pebs_format >= 3 &&
1475 event->attr.use_clockid == 0)
1476 data->time = native_sched_clock_from_tsc(pebs->tsc);
1477
1478 if (has_branch_stack(event))
1479 data->br_stack = &cpuc->lbr_stack;
1480}
1481
1482static void adaptive_pebs_save_regs(struct pt_regs *regs,
1483 struct pebs_gprs *gprs)
1484{
1485 regs->ax = gprs->ax;
1486 regs->bx = gprs->bx;
1487 regs->cx = gprs->cx;
1488 regs->dx = gprs->dx;
1489 regs->si = gprs->si;
1490 regs->di = gprs->di;
1491 regs->bp = gprs->bp;
1492 regs->sp = gprs->sp;
1493#ifndef CONFIG_X86_32
1494 regs->r8 = gprs->r8;
1495 regs->r9 = gprs->r9;
1496 regs->r10 = gprs->r10;
1497 regs->r11 = gprs->r11;
1498 regs->r12 = gprs->r12;
1499 regs->r13 = gprs->r13;
1500 regs->r14 = gprs->r14;
1501 regs->r15 = gprs->r15;
1502#endif
1503}
1504
1505
1506
1507
1508
1509static void setup_pebs_adaptive_sample_data(struct perf_event *event,
1510 struct pt_regs *iregs, void *__pebs,
1511 struct perf_sample_data *data,
1512 struct pt_regs *regs)
1513{
1514 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1515 struct pebs_basic *basic = __pebs;
1516 void *next_record = basic + 1;
1517 u64 sample_type;
1518 u64 format_size;
1519 struct pebs_meminfo *meminfo = NULL;
1520 struct pebs_gprs *gprs = NULL;
1521 struct x86_perf_regs *perf_regs;
1522
1523 if (basic == NULL)
1524 return;
1525
1526 perf_regs = container_of(regs, struct x86_perf_regs, regs);
1527 perf_regs->xmm_regs = NULL;
1528
1529 sample_type = event->attr.sample_type;
1530 format_size = basic->format_size;
1531 perf_sample_data_init(data, 0, event->hw.last_period);
1532 data->period = event->hw.last_period;
1533
1534 if (event->attr.use_clockid == 0)
1535 data->time = native_sched_clock_from_tsc(basic->tsc);
1536
1537
1538
1539
1540
1541
1542
1543 if (sample_type & PERF_SAMPLE_CALLCHAIN)
1544 data->callchain = perf_callchain(event, iregs);
1545
1546 *regs = *iregs;
1547
1548 set_linear_ip(regs, basic->ip);
1549 regs->flags = PERF_EFLAGS_EXACT;
1550
1551
1552
1553
1554
1555
1556 if (format_size & PEBS_DATACFG_MEMINFO) {
1557 meminfo = next_record;
1558 next_record = meminfo + 1;
1559 }
1560
1561 if (format_size & PEBS_DATACFG_GP) {
1562 gprs = next_record;
1563 next_record = gprs + 1;
1564
1565 if (event->attr.precise_ip < 2) {
1566 set_linear_ip(regs, gprs->ip);
1567 regs->flags &= ~PERF_EFLAGS_EXACT;
1568 }
1569
1570 if (sample_type & PERF_SAMPLE_REGS_INTR)
1571 adaptive_pebs_save_regs(regs, gprs);
1572 }
1573
1574 if (format_size & PEBS_DATACFG_MEMINFO) {
1575 if (sample_type & PERF_SAMPLE_WEIGHT)
1576 data->weight = meminfo->latency ?:
1577 intel_get_tsx_weight(meminfo->tsx_tuning);
1578
1579 if (sample_type & PERF_SAMPLE_DATA_SRC)
1580 data->data_src.val = get_data_src(event, meminfo->aux);
1581
1582 if (sample_type & (PERF_SAMPLE_ADDR | PERF_SAMPLE_PHYS_ADDR))
1583 data->addr = meminfo->address;
1584
1585 if (sample_type & PERF_SAMPLE_TRANSACTION)
1586 data->txn = intel_get_tsx_transaction(meminfo->tsx_tuning,
1587 gprs ? gprs->ax : 0);
1588 }
1589
1590 if (format_size & PEBS_DATACFG_XMMS) {
1591 struct pebs_xmm *xmm = next_record;
1592
1593 next_record = xmm + 1;
1594 perf_regs->xmm_regs = xmm->xmm;
1595 }
1596
1597 if (format_size & PEBS_DATACFG_LBRS) {
1598 struct lbr_entry *lbr = next_record;
1599 int num_lbr = ((format_size >> PEBS_DATACFG_LBR_SHIFT)
1600 & 0xff) + 1;
1601 next_record = next_record + num_lbr * sizeof(struct lbr_entry);
1602
1603 if (has_branch_stack(event)) {
1604 intel_pmu_store_pebs_lbrs(lbr);
1605 data->br_stack = &cpuc->lbr_stack;
1606 }
1607 }
1608
1609 WARN_ONCE(next_record != __pebs + (format_size >> 48),
1610 "PEBS record size %llu, expected %llu, config %llx\n",
1611 format_size >> 48,
1612 (u64)(next_record - __pebs),
1613 basic->format_size);
1614}
1615
1616static inline void *
1617get_next_pebs_record_by_bit(void *base, void *top, int bit)
1618{
1619 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1620 void *at;
1621 u64 pebs_status;
1622
1623
1624
1625
1626
1627 if (x86_pmu.intel_cap.pebs_format < 1)
1628 return base;
1629
1630 if (base == NULL)
1631 return NULL;
1632
1633 for (at = base; at < top; at += cpuc->pebs_record_size) {
1634 unsigned long status = get_pebs_status(at);
1635
1636 if (test_bit(bit, (unsigned long *)&status)) {
1637
1638 if (x86_pmu.intel_cap.pebs_format >= 3)
1639 return at;
1640
1641 if (status == (1 << bit))
1642 return at;
1643
1644
1645 pebs_status = status & cpuc->pebs_enabled;
1646 pebs_status &= PEBS_COUNTER_MASK;
1647 if (pebs_status == (1 << bit))
1648 return at;
1649 }
1650 }
1651 return NULL;
1652}
1653
1654void intel_pmu_auto_reload_read(struct perf_event *event)
1655{
1656 WARN_ON(!(event->hw.flags & PERF_X86_EVENT_AUTO_RELOAD));
1657
1658 perf_pmu_disable(event->pmu);
1659 intel_pmu_drain_pebs_buffer();
1660 perf_pmu_enable(event->pmu);
1661}
1662
1663
1664
1665
1666static int
1667intel_pmu_save_and_restart_reload(struct perf_event *event, int count)
1668{
1669 struct hw_perf_event *hwc = &event->hw;
1670 int shift = 64 - x86_pmu.cntval_bits;
1671 u64 period = hwc->sample_period;
1672 u64 prev_raw_count, new_raw_count;
1673 s64 new, old;
1674
1675 WARN_ON(!period);
1676
1677
1678
1679
1680 WARN_ON(this_cpu_read(cpu_hw_events.enabled));
1681
1682 prev_raw_count = local64_read(&hwc->prev_count);
1683 rdpmcl(hwc->event_base_rdpmc, new_raw_count);
1684 local64_set(&hwc->prev_count, new_raw_count);
1685
1686
1687
1688
1689
1690
1691
1692
1693
1694
1695
1696
1697
1698
1699
1700
1701
1702
1703
1704
1705
1706
1707
1708
1709
1710
1711
1712
1713 new = ((s64)(new_raw_count << shift) >> shift);
1714 old = ((s64)(prev_raw_count << shift) >> shift);
1715 local64_add(new - old + count * period, &event->count);
1716
1717 local64_set(&hwc->period_left, -new);
1718
1719 perf_event_update_userpage(event);
1720
1721 return 0;
1722}
1723
1724static void __intel_pmu_pebs_event(struct perf_event *event,
1725 struct pt_regs *iregs,
1726 void *base, void *top,
1727 int bit, int count,
1728 void (*setup_sample)(struct perf_event *,
1729 struct pt_regs *,
1730 void *,
1731 struct perf_sample_data *,
1732 struct pt_regs *))
1733{
1734 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1735 struct hw_perf_event *hwc = &event->hw;
1736 struct perf_sample_data data;
1737 struct x86_perf_regs perf_regs;
1738 struct pt_regs *regs = &perf_regs.regs;
1739 void *at = get_next_pebs_record_by_bit(base, top, bit);
1740
1741 if (hwc->flags & PERF_X86_EVENT_AUTO_RELOAD) {
1742
1743
1744
1745
1746
1747
1748 intel_pmu_save_and_restart_reload(event, count);
1749 } else if (!intel_pmu_save_and_restart(event))
1750 return;
1751
1752 while (count > 1) {
1753 setup_sample(event, iregs, at, &data, regs);
1754 perf_event_output(event, &data, regs);
1755 at += cpuc->pebs_record_size;
1756 at = get_next_pebs_record_by_bit(at, top, bit);
1757 count--;
1758 }
1759
1760 setup_sample(event, iregs, at, &data, regs);
1761
1762
1763
1764
1765
1766 if (perf_event_overflow(event, &data, regs)) {
1767 x86_pmu_stop(event, 0);
1768 return;
1769 }
1770
1771}
1772
1773static void intel_pmu_drain_pebs_core(struct pt_regs *iregs)
1774{
1775 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1776 struct debug_store *ds = cpuc->ds;
1777 struct perf_event *event = cpuc->events[0];
1778 struct pebs_record_core *at, *top;
1779 int n;
1780
1781 if (!x86_pmu.pebs_active)
1782 return;
1783
1784 at = (struct pebs_record_core *)(unsigned long)ds->pebs_buffer_base;
1785 top = (struct pebs_record_core *)(unsigned long)ds->pebs_index;
1786
1787
1788
1789
1790 ds->pebs_index = ds->pebs_buffer_base;
1791
1792 if (!test_bit(0, cpuc->active_mask))
1793 return;
1794
1795 WARN_ON_ONCE(!event);
1796
1797 if (!event->attr.precise_ip)
1798 return;
1799
1800 n = top - at;
1801 if (n <= 0) {
1802 if (event->hw.flags & PERF_X86_EVENT_AUTO_RELOAD)
1803 intel_pmu_save_and_restart_reload(event, 0);
1804 return;
1805 }
1806
1807 __intel_pmu_pebs_event(event, iregs, at, top, 0, n,
1808 setup_pebs_fixed_sample_data);
1809}
1810
1811static void intel_pmu_pebs_event_update_no_drain(struct cpu_hw_events *cpuc, int size)
1812{
1813 struct perf_event *event;
1814 int bit;
1815
1816
1817
1818
1819
1820
1821
1822
1823 for_each_set_bit(bit, (unsigned long *)&cpuc->pebs_enabled, size) {
1824 event = cpuc->events[bit];
1825 if (event->hw.flags & PERF_X86_EVENT_AUTO_RELOAD)
1826 intel_pmu_save_and_restart_reload(event, 0);
1827 }
1828}
1829
1830static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs)
1831{
1832 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1833 struct debug_store *ds = cpuc->ds;
1834 struct perf_event *event;
1835 void *base, *at, *top;
1836 short counts[INTEL_PMC_IDX_FIXED + MAX_FIXED_PEBS_EVENTS] = {};
1837 short error[INTEL_PMC_IDX_FIXED + MAX_FIXED_PEBS_EVENTS] = {};
1838 int bit, i, size;
1839 u64 mask;
1840
1841 if (!x86_pmu.pebs_active)
1842 return;
1843
1844 base = (struct pebs_record_nhm *)(unsigned long)ds->pebs_buffer_base;
1845 top = (struct pebs_record_nhm *)(unsigned long)ds->pebs_index;
1846
1847 ds->pebs_index = ds->pebs_buffer_base;
1848
1849 mask = (1ULL << x86_pmu.max_pebs_events) - 1;
1850 size = x86_pmu.max_pebs_events;
1851 if (x86_pmu.flags & PMU_FL_PEBS_ALL) {
1852 mask |= ((1ULL << x86_pmu.num_counters_fixed) - 1) << INTEL_PMC_IDX_FIXED;
1853 size = INTEL_PMC_IDX_FIXED + x86_pmu.num_counters_fixed;
1854 }
1855
1856 if (unlikely(base >= top)) {
1857 intel_pmu_pebs_event_update_no_drain(cpuc, size);
1858 return;
1859 }
1860
1861 for (at = base; at < top; at += x86_pmu.pebs_record_size) {
1862 struct pebs_record_nhm *p = at;
1863 u64 pebs_status;
1864
1865 pebs_status = p->status & cpuc->pebs_enabled;
1866 pebs_status &= mask;
1867
1868
1869 if (x86_pmu.intel_cap.pebs_format >= 3) {
1870 for_each_set_bit(bit, (unsigned long *)&pebs_status, size)
1871 counts[bit]++;
1872
1873 continue;
1874 }
1875
1876
1877
1878
1879
1880
1881
1882
1883
1884 if (!pebs_status && cpuc->pebs_enabled &&
1885 !(cpuc->pebs_enabled & (cpuc->pebs_enabled-1)))
1886 pebs_status = cpuc->pebs_enabled;
1887
1888 bit = find_first_bit((unsigned long *)&pebs_status,
1889 x86_pmu.max_pebs_events);
1890 if (bit >= x86_pmu.max_pebs_events)
1891 continue;
1892
1893
1894
1895
1896
1897
1898
1899
1900
1901
1902
1903
1904
1905
1906
1907
1908 if (p->status != (1ULL << bit)) {
1909 for_each_set_bit(i, (unsigned long *)&pebs_status, size)
1910 error[i]++;
1911 continue;
1912 }
1913
1914 counts[bit]++;
1915 }
1916
1917 for_each_set_bit(bit, (unsigned long *)&mask, size) {
1918 if ((counts[bit] == 0) && (error[bit] == 0))
1919 continue;
1920
1921 event = cpuc->events[bit];
1922 if (WARN_ON_ONCE(!event))
1923 continue;
1924
1925 if (WARN_ON_ONCE(!event->attr.precise_ip))
1926 continue;
1927
1928
1929 if (error[bit]) {
1930 perf_log_lost_samples(event, error[bit]);
1931
1932 if (perf_event_account_interrupt(event))
1933 x86_pmu_stop(event, 0);
1934 }
1935
1936 if (counts[bit]) {
1937 __intel_pmu_pebs_event(event, iregs, base,
1938 top, bit, counts[bit],
1939 setup_pebs_fixed_sample_data);
1940 }
1941 }
1942}
1943
1944static void intel_pmu_drain_pebs_icl(struct pt_regs *iregs)
1945{
1946 short counts[INTEL_PMC_IDX_FIXED + MAX_FIXED_PEBS_EVENTS] = {};
1947 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1948 struct debug_store *ds = cpuc->ds;
1949 struct perf_event *event;
1950 void *base, *at, *top;
1951 int bit, size;
1952 u64 mask;
1953
1954 if (!x86_pmu.pebs_active)
1955 return;
1956
1957 base = (struct pebs_basic *)(unsigned long)ds->pebs_buffer_base;
1958 top = (struct pebs_basic *)(unsigned long)ds->pebs_index;
1959
1960 ds->pebs_index = ds->pebs_buffer_base;
1961
1962 mask = ((1ULL << x86_pmu.max_pebs_events) - 1) |
1963 (((1ULL << x86_pmu.num_counters_fixed) - 1) << INTEL_PMC_IDX_FIXED);
1964 size = INTEL_PMC_IDX_FIXED + x86_pmu.num_counters_fixed;
1965
1966 if (unlikely(base >= top)) {
1967 intel_pmu_pebs_event_update_no_drain(cpuc, size);
1968 return;
1969 }
1970
1971 for (at = base; at < top; at += cpuc->pebs_record_size) {
1972 u64 pebs_status;
1973
1974 pebs_status = get_pebs_status(at) & cpuc->pebs_enabled;
1975 pebs_status &= mask;
1976
1977 for_each_set_bit(bit, (unsigned long *)&pebs_status, size)
1978 counts[bit]++;
1979 }
1980
1981 for_each_set_bit(bit, (unsigned long *)&mask, size) {
1982 if (counts[bit] == 0)
1983 continue;
1984
1985 event = cpuc->events[bit];
1986 if (WARN_ON_ONCE(!event))
1987 continue;
1988
1989 if (WARN_ON_ONCE(!event->attr.precise_ip))
1990 continue;
1991
1992 __intel_pmu_pebs_event(event, iregs, base,
1993 top, bit, counts[bit],
1994 setup_pebs_adaptive_sample_data);
1995 }
1996}
1997
1998
1999
2000
2001
2002void __init intel_ds_init(void)
2003{
2004
2005
2006
2007 if (!boot_cpu_has(X86_FEATURE_DTES64))
2008 return;
2009
2010 x86_pmu.bts = boot_cpu_has(X86_FEATURE_BTS);
2011 x86_pmu.pebs = boot_cpu_has(X86_FEATURE_PEBS);
2012 x86_pmu.pebs_buffer_size = PEBS_BUFFER_SIZE;
2013 if (x86_pmu.version <= 4)
2014 x86_pmu.pebs_no_isolation = 1;
2015
2016 if (x86_pmu.pebs) {
2017 char pebs_type = x86_pmu.intel_cap.pebs_trap ? '+' : '-';
2018 char *pebs_qual = "";
2019 int format = x86_pmu.intel_cap.pebs_format;
2020
2021 if (format < 4)
2022 x86_pmu.intel_cap.pebs_baseline = 0;
2023
2024 switch (format) {
2025 case 0:
2026 pr_cont("PEBS fmt0%c, ", pebs_type);
2027 x86_pmu.pebs_record_size = sizeof(struct pebs_record_core);
2028
2029
2030
2031
2032
2033
2034
2035 x86_pmu.pebs_buffer_size = PAGE_SIZE;
2036 x86_pmu.drain_pebs = intel_pmu_drain_pebs_core;
2037 break;
2038
2039 case 1:
2040 pr_cont("PEBS fmt1%c, ", pebs_type);
2041 x86_pmu.pebs_record_size = sizeof(struct pebs_record_nhm);
2042 x86_pmu.drain_pebs = intel_pmu_drain_pebs_nhm;
2043 break;
2044
2045 case 2:
2046 pr_cont("PEBS fmt2%c, ", pebs_type);
2047 x86_pmu.pebs_record_size = sizeof(struct pebs_record_hsw);
2048 x86_pmu.drain_pebs = intel_pmu_drain_pebs_nhm;
2049 break;
2050
2051 case 3:
2052 pr_cont("PEBS fmt3%c, ", pebs_type);
2053 x86_pmu.pebs_record_size =
2054 sizeof(struct pebs_record_skl);
2055 x86_pmu.drain_pebs = intel_pmu_drain_pebs_nhm;
2056 x86_pmu.large_pebs_flags |= PERF_SAMPLE_TIME;
2057 break;
2058
2059 case 4:
2060 x86_pmu.drain_pebs = intel_pmu_drain_pebs_icl;
2061 x86_pmu.pebs_record_size = sizeof(struct pebs_basic);
2062 if (x86_pmu.intel_cap.pebs_baseline) {
2063 x86_pmu.large_pebs_flags |=
2064 PERF_SAMPLE_BRANCH_STACK |
2065 PERF_SAMPLE_TIME;
2066 x86_pmu.flags |= PMU_FL_PEBS_ALL;
2067 pebs_qual = "-baseline";
2068 x86_get_pmu()->capabilities |= PERF_PMU_CAP_EXTENDED_REGS;
2069 } else {
2070
2071 x86_pmu.large_pebs_flags &=
2072 ~(PERF_SAMPLE_ADDR |
2073 PERF_SAMPLE_TIME |
2074 PERF_SAMPLE_DATA_SRC |
2075 PERF_SAMPLE_TRANSACTION |
2076 PERF_SAMPLE_REGS_USER |
2077 PERF_SAMPLE_REGS_INTR);
2078 }
2079 pr_cont("PEBS fmt4%c%s, ", pebs_type, pebs_qual);
2080
2081 if (x86_pmu.intel_cap.pebs_output_pt_available) {
2082 pr_cont("PEBS-via-PT, ");
2083 x86_get_pmu()->capabilities |= PERF_PMU_CAP_AUX_OUTPUT;
2084 }
2085
2086 break;
2087
2088 default:
2089 pr_cont("no PEBS fmt%d%c, ", format, pebs_type);
2090 x86_pmu.pebs = 0;
2091 }
2092 }
2093}
2094
2095void perf_restore_debug_store(void)
2096{
2097 struct debug_store *ds = __this_cpu_read(cpu_hw_events.ds);
2098
2099 if (!x86_pmu.bts && !x86_pmu.pebs)
2100 return;
2101
2102 wrmsrl(MSR_IA32_DS_AREA, (unsigned long)ds);
2103}
2104