1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19#undef DEBUG
20
21#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
22
23#include <linux/types.h>
24#include <linux/slab.h>
25#include <linux/device.h>
26
27#include <asm/perf_event.h>
28#include <asm/insn.h>
29#include <asm/io.h>
30#include <asm/intel_pt.h>
31#include <asm/intel-family.h>
32
33#include "../perf_event.h"
34#include "pt.h"
35
36static DEFINE_PER_CPU(struct pt, pt_ctx);
37
38static struct pt_pmu pt_pmu;
39
40
41
42
43
44
45
46
47
48
49
50
51#define PT_CAP(_n, _l, _r, _m) \
52 [PT_CAP_ ## _n] = { .name = __stringify(_n), .leaf = _l, \
53 .reg = _r, .mask = _m }
54
55static struct pt_cap_desc {
56 const char *name;
57 u32 leaf;
58 u8 reg;
59 u32 mask;
60} pt_caps[] = {
61 PT_CAP(max_subleaf, 0, CPUID_EAX, 0xffffffff),
62 PT_CAP(cr3_filtering, 0, CPUID_EBX, BIT(0)),
63 PT_CAP(psb_cyc, 0, CPUID_EBX, BIT(1)),
64 PT_CAP(ip_filtering, 0, CPUID_EBX, BIT(2)),
65 PT_CAP(mtc, 0, CPUID_EBX, BIT(3)),
66 PT_CAP(ptwrite, 0, CPUID_EBX, BIT(4)),
67 PT_CAP(power_event_trace, 0, CPUID_EBX, BIT(5)),
68 PT_CAP(topa_output, 0, CPUID_ECX, BIT(0)),
69 PT_CAP(topa_multiple_entries, 0, CPUID_ECX, BIT(1)),
70 PT_CAP(single_range_output, 0, CPUID_ECX, BIT(2)),
71 PT_CAP(payloads_lip, 0, CPUID_ECX, BIT(31)),
72 PT_CAP(num_address_ranges, 1, CPUID_EAX, 0x3),
73 PT_CAP(mtc_periods, 1, CPUID_EAX, 0xffff0000),
74 PT_CAP(cycle_thresholds, 1, CPUID_EBX, 0xffff),
75 PT_CAP(psb_periods, 1, CPUID_EBX, 0xffff0000),
76};
77
78static u32 pt_cap_get(enum pt_capabilities cap)
79{
80 struct pt_cap_desc *cd = &pt_caps[cap];
81 u32 c = pt_pmu.caps[cd->leaf * PT_CPUID_REGS_NUM + cd->reg];
82 unsigned int shift = __ffs(cd->mask);
83
84 return (c & cd->mask) >> shift;
85}
86
87static ssize_t pt_cap_show(struct device *cdev,
88 struct device_attribute *attr,
89 char *buf)
90{
91 struct dev_ext_attribute *ea =
92 container_of(attr, struct dev_ext_attribute, attr);
93 enum pt_capabilities cap = (long)ea->var;
94
95 return snprintf(buf, PAGE_SIZE, "%x\n", pt_cap_get(cap));
96}
97
98static struct attribute_group pt_cap_group = {
99 .name = "caps",
100};
101
102PMU_FORMAT_ATTR(pt, "config:0" );
103PMU_FORMAT_ATTR(cyc, "config:1" );
104PMU_FORMAT_ATTR(pwr_evt, "config:4" );
105PMU_FORMAT_ATTR(fup_on_ptw, "config:5" );
106PMU_FORMAT_ATTR(mtc, "config:9" );
107PMU_FORMAT_ATTR(tsc, "config:10" );
108PMU_FORMAT_ATTR(noretcomp, "config:11" );
109PMU_FORMAT_ATTR(ptw, "config:12" );
110PMU_FORMAT_ATTR(branch, "config:13" );
111PMU_FORMAT_ATTR(mtc_period, "config:14-17" );
112PMU_FORMAT_ATTR(cyc_thresh, "config:19-22" );
113PMU_FORMAT_ATTR(psb_period, "config:24-27" );
114
115static struct attribute *pt_formats_attr[] = {
116 &format_attr_pt.attr,
117 &format_attr_cyc.attr,
118 &format_attr_pwr_evt.attr,
119 &format_attr_fup_on_ptw.attr,
120 &format_attr_mtc.attr,
121 &format_attr_tsc.attr,
122 &format_attr_noretcomp.attr,
123 &format_attr_ptw.attr,
124 &format_attr_branch.attr,
125 &format_attr_mtc_period.attr,
126 &format_attr_cyc_thresh.attr,
127 &format_attr_psb_period.attr,
128 NULL,
129};
130
131static struct attribute_group pt_format_group = {
132 .name = "format",
133 .attrs = pt_formats_attr,
134};
135
136static ssize_t
137pt_timing_attr_show(struct device *dev, struct device_attribute *attr,
138 char *page)
139{
140 struct perf_pmu_events_attr *pmu_attr =
141 container_of(attr, struct perf_pmu_events_attr, attr);
142
143 switch (pmu_attr->id) {
144 case 0:
145 return sprintf(page, "%lu\n", pt_pmu.max_nonturbo_ratio);
146 case 1:
147 return sprintf(page, "%u:%u\n",
148 pt_pmu.tsc_art_num,
149 pt_pmu.tsc_art_den);
150 default:
151 break;
152 }
153
154 return -EINVAL;
155}
156
157PMU_EVENT_ATTR(max_nonturbo_ratio, timing_attr_max_nonturbo_ratio, 0,
158 pt_timing_attr_show);
159PMU_EVENT_ATTR(tsc_art_ratio, timing_attr_tsc_art_ratio, 1,
160 pt_timing_attr_show);
161
162static struct attribute *pt_timing_attr[] = {
163 &timing_attr_max_nonturbo_ratio.attr.attr,
164 &timing_attr_tsc_art_ratio.attr.attr,
165 NULL,
166};
167
168static struct attribute_group pt_timing_group = {
169 .attrs = pt_timing_attr,
170};
171
172static const struct attribute_group *pt_attr_groups[] = {
173 &pt_cap_group,
174 &pt_format_group,
175 &pt_timing_group,
176 NULL,
177};
178
179static int __init pt_pmu_hw_init(void)
180{
181 struct dev_ext_attribute *de_attrs;
182 struct attribute **attrs;
183 size_t size;
184 u64 reg;
185 int ret;
186 long i;
187
188 rdmsrl(MSR_PLATFORM_INFO, reg);
189 pt_pmu.max_nonturbo_ratio = (reg & 0xff00) >> 8;
190
191
192
193
194
195
196 if (boot_cpu_data.cpuid_level >= CPUID_TSC_LEAF) {
197 u32 eax, ebx, ecx, edx;
198
199 cpuid(CPUID_TSC_LEAF, &eax, &ebx, &ecx, &edx);
200
201 pt_pmu.tsc_art_num = ebx;
202 pt_pmu.tsc_art_den = eax;
203 }
204
205
206 switch (boot_cpu_data.x86_model) {
207 case INTEL_FAM6_BROADWELL_CORE:
208 case INTEL_FAM6_BROADWELL_XEON_D:
209 case INTEL_FAM6_BROADWELL_GT3E:
210 case INTEL_FAM6_BROADWELL_X:
211
212 pt_pmu.branch_en_always_on = true;
213 break;
214 default:
215 break;
216 }
217
218 if (boot_cpu_has(X86_FEATURE_VMX)) {
219
220
221
222
223
224 rdmsrl(MSR_IA32_VMX_MISC, reg);
225 if (reg & BIT(14))
226 pt_pmu.vmx = true;
227 }
228
229 attrs = NULL;
230
231 for (i = 0; i < PT_CPUID_LEAVES; i++) {
232 cpuid_count(20, i,
233 &pt_pmu.caps[CPUID_EAX + i*PT_CPUID_REGS_NUM],
234 &pt_pmu.caps[CPUID_EBX + i*PT_CPUID_REGS_NUM],
235 &pt_pmu.caps[CPUID_ECX + i*PT_CPUID_REGS_NUM],
236 &pt_pmu.caps[CPUID_EDX + i*PT_CPUID_REGS_NUM]);
237 }
238
239 ret = -ENOMEM;
240 size = sizeof(struct attribute *) * (ARRAY_SIZE(pt_caps)+1);
241 attrs = kzalloc(size, GFP_KERNEL);
242 if (!attrs)
243 goto fail;
244
245 size = sizeof(struct dev_ext_attribute) * (ARRAY_SIZE(pt_caps)+1);
246 de_attrs = kzalloc(size, GFP_KERNEL);
247 if (!de_attrs)
248 goto fail;
249
250 for (i = 0; i < ARRAY_SIZE(pt_caps); i++) {
251 struct dev_ext_attribute *de_attr = de_attrs + i;
252
253 de_attr->attr.attr.name = pt_caps[i].name;
254
255 sysfs_attr_init(&de_attr->attr.attr);
256
257 de_attr->attr.attr.mode = S_IRUGO;
258 de_attr->attr.show = pt_cap_show;
259 de_attr->var = (void *)i;
260
261 attrs[i] = &de_attr->attr.attr;
262 }
263
264 pt_cap_group.attrs = attrs;
265
266 return 0;
267
268fail:
269 kfree(attrs);
270
271 return ret;
272}
273
274#define RTIT_CTL_CYC_PSB (RTIT_CTL_CYCLEACC | \
275 RTIT_CTL_CYC_THRESH | \
276 RTIT_CTL_PSB_FREQ)
277
278#define RTIT_CTL_MTC (RTIT_CTL_MTC_EN | \
279 RTIT_CTL_MTC_RANGE)
280
281#define RTIT_CTL_PTW (RTIT_CTL_PTW_EN | \
282 RTIT_CTL_FUP_ON_PTW)
283
284
285
286
287
288
289
290
291
292#define RTIT_CTL_PASSTHROUGH RTIT_CTL_TRACEEN
293
294#define PT_CONFIG_MASK (RTIT_CTL_TRACEEN | \
295 RTIT_CTL_TSC_EN | \
296 RTIT_CTL_DISRETC | \
297 RTIT_CTL_BRANCH_EN | \
298 RTIT_CTL_CYC_PSB | \
299 RTIT_CTL_MTC | \
300 RTIT_CTL_PWR_EVT_EN | \
301 RTIT_CTL_FUP_ON_PTW | \
302 RTIT_CTL_PTW_EN)
303
304static bool pt_event_valid(struct perf_event *event)
305{
306 u64 config = event->attr.config;
307 u64 allowed, requested;
308
309 if ((config & PT_CONFIG_MASK) != config)
310 return false;
311
312 if (config & RTIT_CTL_CYC_PSB) {
313 if (!pt_cap_get(PT_CAP_psb_cyc))
314 return false;
315
316 allowed = pt_cap_get(PT_CAP_psb_periods);
317 requested = (config & RTIT_CTL_PSB_FREQ) >>
318 RTIT_CTL_PSB_FREQ_OFFSET;
319 if (requested && (!(allowed & BIT(requested))))
320 return false;
321
322 allowed = pt_cap_get(PT_CAP_cycle_thresholds);
323 requested = (config & RTIT_CTL_CYC_THRESH) >>
324 RTIT_CTL_CYC_THRESH_OFFSET;
325 if (requested && (!(allowed & BIT(requested))))
326 return false;
327 }
328
329 if (config & RTIT_CTL_MTC) {
330
331
332
333
334
335
336
337 if (!pt_cap_get(PT_CAP_mtc))
338 return false;
339
340 allowed = pt_cap_get(PT_CAP_mtc_periods);
341 if (!allowed)
342 return false;
343
344 requested = (config & RTIT_CTL_MTC_RANGE) >>
345 RTIT_CTL_MTC_RANGE_OFFSET;
346
347 if (!(allowed & BIT(requested)))
348 return false;
349 }
350
351 if (config & RTIT_CTL_PWR_EVT_EN &&
352 !pt_cap_get(PT_CAP_power_event_trace))
353 return false;
354
355 if (config & RTIT_CTL_PTW) {
356 if (!pt_cap_get(PT_CAP_ptwrite))
357 return false;
358
359
360 if ((config & RTIT_CTL_FUP_ON_PTW) &&
361 !(config & RTIT_CTL_PTW_EN))
362 return false;
363 }
364
365
366
367
368
369
370
371
372
373
374
375
376 if (config & RTIT_CTL_PASSTHROUGH) {
377
378
379
380
381 if (pt_pmu.branch_en_always_on &&
382 !(config & RTIT_CTL_BRANCH_EN))
383 return false;
384 } else {
385
386
387
388 if (config & RTIT_CTL_BRANCH_EN)
389 return false;
390 }
391
392 return true;
393}
394
395
396
397
398
399
400
401static const struct pt_address_range {
402 unsigned long msr_a;
403 unsigned long msr_b;
404 unsigned int reg_off;
405} pt_address_ranges[] = {
406 {
407 .msr_a = MSR_IA32_RTIT_ADDR0_A,
408 .msr_b = MSR_IA32_RTIT_ADDR0_B,
409 .reg_off = RTIT_CTL_ADDR0_OFFSET,
410 },
411 {
412 .msr_a = MSR_IA32_RTIT_ADDR1_A,
413 .msr_b = MSR_IA32_RTIT_ADDR1_B,
414 .reg_off = RTIT_CTL_ADDR1_OFFSET,
415 },
416 {
417 .msr_a = MSR_IA32_RTIT_ADDR2_A,
418 .msr_b = MSR_IA32_RTIT_ADDR2_B,
419 .reg_off = RTIT_CTL_ADDR2_OFFSET,
420 },
421 {
422 .msr_a = MSR_IA32_RTIT_ADDR3_A,
423 .msr_b = MSR_IA32_RTIT_ADDR3_B,
424 .reg_off = RTIT_CTL_ADDR3_OFFSET,
425 }
426};
427
428static u64 pt_config_filters(struct perf_event *event)
429{
430 struct pt_filters *filters = event->hw.addr_filters;
431 struct pt *pt = this_cpu_ptr(&pt_ctx);
432 unsigned int range = 0;
433 u64 rtit_ctl = 0;
434
435 if (!filters)
436 return 0;
437
438 perf_event_addr_filters_sync(event);
439
440 for (range = 0; range < filters->nr_filters; range++) {
441 struct pt_filter *filter = &filters->filter[range];
442
443
444
445
446
447
448
449
450
451
452
453 if (pt->filters.filter[range].msr_a != filter->msr_a) {
454 wrmsrl(pt_address_ranges[range].msr_a, filter->msr_a);
455 pt->filters.filter[range].msr_a = filter->msr_a;
456 }
457
458 if (pt->filters.filter[range].msr_b != filter->msr_b) {
459 wrmsrl(pt_address_ranges[range].msr_b, filter->msr_b);
460 pt->filters.filter[range].msr_b = filter->msr_b;
461 }
462
463 rtit_ctl |= filter->config << pt_address_ranges[range].reg_off;
464 }
465
466 return rtit_ctl;
467}
468
469static void pt_config(struct perf_event *event)
470{
471 struct pt *pt = this_cpu_ptr(&pt_ctx);
472 u64 reg;
473
474
475 if (!event->hw.config) {
476 perf_event_itrace_started(event);
477 wrmsrl(MSR_IA32_RTIT_STATUS, 0);
478 }
479
480 reg = pt_config_filters(event);
481 reg |= RTIT_CTL_TOPA | RTIT_CTL_TRACEEN;
482
483
484
485
486
487
488
489
490 if (event->attr.config & BIT(0)) {
491 reg |= event->attr.config & RTIT_CTL_BRANCH_EN;
492 } else {
493 reg |= RTIT_CTL_BRANCH_EN;
494 }
495
496 if (!event->attr.exclude_kernel)
497 reg |= RTIT_CTL_OS;
498 if (!event->attr.exclude_user)
499 reg |= RTIT_CTL_USR;
500
501 reg |= (event->attr.config & PT_CONFIG_MASK);
502
503 event->hw.config = reg;
504 if (READ_ONCE(pt->vmx_on))
505 perf_aux_output_flag(&pt->handle, PERF_AUX_FLAG_PARTIAL);
506 else
507 wrmsrl(MSR_IA32_RTIT_CTL, reg);
508}
509
510static void pt_config_stop(struct perf_event *event)
511{
512 struct pt *pt = this_cpu_ptr(&pt_ctx);
513 u64 ctl = READ_ONCE(event->hw.config);
514
515
516 if (!(ctl & RTIT_CTL_TRACEEN))
517 return;
518
519 ctl &= ~RTIT_CTL_TRACEEN;
520 if (!READ_ONCE(pt->vmx_on))
521 wrmsrl(MSR_IA32_RTIT_CTL, ctl);
522
523 WRITE_ONCE(event->hw.config, ctl);
524
525
526
527
528
529
530
531
532
533 wmb();
534}
535
536static void pt_config_buffer(void *buf, unsigned int topa_idx,
537 unsigned int output_off)
538{
539 u64 reg;
540
541 wrmsrl(MSR_IA32_RTIT_OUTPUT_BASE, virt_to_phys(buf));
542
543 reg = 0x7f | ((u64)topa_idx << 7) | ((u64)output_off << 32);
544
545 wrmsrl(MSR_IA32_RTIT_OUTPUT_MASK, reg);
546}
547
548
549
550
551
552
553#define TENTS_PER_PAGE (((PAGE_SIZE - 40) / sizeof(struct topa_entry)) - 1)
554
555
556
557
558
559
560
561
562
563
564struct topa {
565 struct topa_entry table[TENTS_PER_PAGE];
566 struct list_head list;
567 u64 phys;
568 u64 offset;
569 size_t size;
570 int last;
571};
572
573
574#define TOPA_ENTRY(t, i) ((i) == -1 ? &(t)->table[(t)->last] : &(t)->table[(i)])
575
576
577
578
579
580
581
582
583static struct topa *topa_alloc(int cpu, gfp_t gfp)
584{
585 int node = cpu_to_node(cpu);
586 struct topa *topa;
587 struct page *p;
588
589 p = alloc_pages_node(node, gfp | __GFP_ZERO, 0);
590 if (!p)
591 return NULL;
592
593 topa = page_address(p);
594 topa->last = 0;
595 topa->phys = page_to_phys(p);
596
597
598
599
600
601 if (!pt_cap_get(PT_CAP_topa_multiple_entries)) {
602 TOPA_ENTRY(topa, 1)->base = topa->phys >> TOPA_SHIFT;
603 TOPA_ENTRY(topa, 1)->end = 1;
604 }
605
606 return topa;
607}
608
609
610
611
612
613static void topa_free(struct topa *topa)
614{
615 free_page((unsigned long)topa);
616}
617
618
619
620
621
622
623
624
625
626
627static void topa_insert_table(struct pt_buffer *buf, struct topa *topa)
628{
629 struct topa *last = buf->last;
630
631 list_add_tail(&topa->list, &buf->tables);
632
633 if (!buf->first) {
634 buf->first = buf->last = buf->cur = topa;
635 return;
636 }
637
638 topa->offset = last->offset + last->size;
639 buf->last = topa;
640
641 if (!pt_cap_get(PT_CAP_topa_multiple_entries))
642 return;
643
644 BUG_ON(last->last != TENTS_PER_PAGE - 1);
645
646 TOPA_ENTRY(last, -1)->base = topa->phys >> TOPA_SHIFT;
647 TOPA_ENTRY(last, -1)->end = 1;
648}
649
650
651
652
653
654static bool topa_table_full(struct topa *topa)
655{
656
657 if (!pt_cap_get(PT_CAP_topa_multiple_entries))
658 return !!topa->last;
659
660 return topa->last == TENTS_PER_PAGE - 1;
661}
662
663
664
665
666
667
668
669
670
671
672
673static int topa_insert_pages(struct pt_buffer *buf, gfp_t gfp)
674{
675 struct topa *topa = buf->last;
676 int order = 0;
677 struct page *p;
678
679 p = virt_to_page(buf->data_pages[buf->nr_pages]);
680 if (PagePrivate(p))
681 order = page_private(p);
682
683 if (topa_table_full(topa)) {
684 topa = topa_alloc(buf->cpu, gfp);
685 if (!topa)
686 return -ENOMEM;
687
688 topa_insert_table(buf, topa);
689 }
690
691 TOPA_ENTRY(topa, -1)->base = page_to_phys(p) >> TOPA_SHIFT;
692 TOPA_ENTRY(topa, -1)->size = order;
693 if (!buf->snapshot && !pt_cap_get(PT_CAP_topa_multiple_entries)) {
694 TOPA_ENTRY(topa, -1)->intr = 1;
695 TOPA_ENTRY(topa, -1)->stop = 1;
696 }
697
698 topa->last++;
699 topa->size += sizes(order);
700
701 buf->nr_pages += 1ul << order;
702
703 return 0;
704}
705
706
707
708
709
710static void pt_topa_dump(struct pt_buffer *buf)
711{
712 struct topa *topa;
713
714 list_for_each_entry(topa, &buf->tables, list) {
715 int i;
716
717 pr_debug("# table @%p (%016Lx), off %llx size %zx\n", topa->table,
718 topa->phys, topa->offset, topa->size);
719 for (i = 0; i < TENTS_PER_PAGE; i++) {
720 pr_debug("# entry @%p (%lx sz %u %c%c%c) raw=%16llx\n",
721 &topa->table[i],
722 (unsigned long)topa->table[i].base << TOPA_SHIFT,
723 sizes(topa->table[i].size),
724 topa->table[i].end ? 'E' : ' ',
725 topa->table[i].intr ? 'I' : ' ',
726 topa->table[i].stop ? 'S' : ' ',
727 *(u64 *)&topa->table[i]);
728 if ((pt_cap_get(PT_CAP_topa_multiple_entries) &&
729 topa->table[i].stop) ||
730 topa->table[i].end)
731 break;
732 }
733 }
734}
735
736
737
738
739
740
741
742static void pt_buffer_advance(struct pt_buffer *buf)
743{
744 buf->output_off = 0;
745 buf->cur_idx++;
746
747 if (buf->cur_idx == buf->cur->last) {
748 if (buf->cur == buf->last)
749 buf->cur = buf->first;
750 else
751 buf->cur = list_entry(buf->cur->list.next, struct topa,
752 list);
753 buf->cur_idx = 0;
754 }
755}
756
757
758
759
760
761
762
763static void pt_update_head(struct pt *pt)
764{
765 struct pt_buffer *buf = perf_get_aux(&pt->handle);
766 u64 topa_idx, base, old;
767
768
769 base = buf->cur->offset + buf->output_off;
770
771
772 for (topa_idx = 0; topa_idx < buf->cur_idx; topa_idx++)
773 base += sizes(buf->cur->table[topa_idx].size);
774
775 if (buf->snapshot) {
776 local_set(&buf->data_size, base);
777 } else {
778 old = (local64_xchg(&buf->head, base) &
779 ((buf->nr_pages << PAGE_SHIFT) - 1));
780 if (base < old)
781 base += buf->nr_pages << PAGE_SHIFT;
782
783 local_add(base - old, &buf->data_size);
784 }
785}
786
787
788
789
790
791static void *pt_buffer_region(struct pt_buffer *buf)
792{
793 return phys_to_virt(buf->cur->table[buf->cur_idx].base << TOPA_SHIFT);
794}
795
796
797
798
799
800static size_t pt_buffer_region_size(struct pt_buffer *buf)
801{
802 return sizes(buf->cur->table[buf->cur_idx].size);
803}
804
805
806
807
808
809static void pt_handle_status(struct pt *pt)
810{
811 struct pt_buffer *buf = perf_get_aux(&pt->handle);
812 int advance = 0;
813 u64 status;
814
815 rdmsrl(MSR_IA32_RTIT_STATUS, status);
816
817 if (status & RTIT_STATUS_ERROR) {
818 pr_err_ratelimited("ToPA ERROR encountered, trying to recover\n");
819 pt_topa_dump(buf);
820 status &= ~RTIT_STATUS_ERROR;
821 }
822
823 if (status & RTIT_STATUS_STOPPED) {
824 status &= ~RTIT_STATUS_STOPPED;
825
826
827
828
829
830
831 if (!pt_cap_get(PT_CAP_topa_multiple_entries) ||
832 buf->output_off == sizes(TOPA_ENTRY(buf->cur, buf->cur_idx)->size)) {
833 perf_aux_output_flag(&pt->handle,
834 PERF_AUX_FLAG_TRUNCATED);
835 advance++;
836 }
837 }
838
839
840
841
842
843 if (!pt_cap_get(PT_CAP_topa_multiple_entries) && !buf->snapshot &&
844 pt_buffer_region_size(buf) - buf->output_off <= TOPA_PMI_MARGIN) {
845 void *head = pt_buffer_region(buf);
846
847
848 memset(head + buf->output_off, 0,
849 pt_buffer_region_size(buf) -
850 buf->output_off);
851 advance++;
852 }
853
854 if (advance)
855 pt_buffer_advance(buf);
856
857 wrmsrl(MSR_IA32_RTIT_STATUS, status);
858}
859
860
861
862
863
864
865
866static void pt_read_offset(struct pt_buffer *buf)
867{
868 u64 offset, base_topa;
869
870 rdmsrl(MSR_IA32_RTIT_OUTPUT_BASE, base_topa);
871 buf->cur = phys_to_virt(base_topa);
872
873 rdmsrl(MSR_IA32_RTIT_OUTPUT_MASK, offset);
874
875 buf->output_off = offset >> 32;
876
877 buf->cur_idx = (offset & 0xffffff80) >> 7;
878}
879
880
881
882
883
884
885
886
887
888
889static unsigned int pt_topa_next_entry(struct pt_buffer *buf, unsigned int pg)
890{
891 struct topa_entry *te = buf->topa_index[pg];
892
893
894 if (buf->first == buf->last && buf->first->last == 1)
895 return pg;
896
897 do {
898 pg++;
899 pg &= buf->nr_pages - 1;
900 } while (buf->topa_index[pg] == te);
901
902 return pg;
903}
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918static int pt_buffer_reset_markers(struct pt_buffer *buf,
919 struct perf_output_handle *handle)
920
921{
922 unsigned long head = local64_read(&buf->head);
923 unsigned long idx, npages, wakeup;
924
925
926 if (buf->output_off + handle->size + 1 <
927 sizes(TOPA_ENTRY(buf->cur, buf->cur_idx)->size)) {
928 perf_aux_output_flag(handle, PERF_AUX_FLAG_TRUNCATED);
929 return -EINVAL;
930 }
931
932
933
934 if (!pt_cap_get(PT_CAP_topa_multiple_entries))
935 return 0;
936
937
938 buf->topa_index[buf->stop_pos]->stop = 0;
939 buf->topa_index[buf->stop_pos]->intr = 0;
940 buf->topa_index[buf->intr_pos]->intr = 0;
941
942
943 npages = handle->size >> PAGE_SHIFT;
944
945
946 if (!offset_in_page(head + handle->size + 1))
947 npages++;
948
949 idx = (head >> PAGE_SHIFT) + npages;
950 idx &= buf->nr_pages - 1;
951 buf->stop_pos = idx;
952
953 wakeup = handle->wakeup >> PAGE_SHIFT;
954
955
956 idx = (head >> PAGE_SHIFT) + npages - 1;
957 if (idx > wakeup)
958 idx = wakeup;
959
960 idx &= buf->nr_pages - 1;
961 buf->intr_pos = idx;
962
963 buf->topa_index[buf->stop_pos]->stop = 1;
964 buf->topa_index[buf->stop_pos]->intr = 1;
965 buf->topa_index[buf->intr_pos]->intr = 1;
966
967 return 0;
968}
969
970
971
972
973
974
975
976
977static void pt_buffer_setup_topa_index(struct pt_buffer *buf)
978{
979 struct topa *cur = buf->first, *prev = buf->last;
980 struct topa_entry *te_cur = TOPA_ENTRY(cur, 0),
981 *te_prev = TOPA_ENTRY(prev, prev->last - 1);
982 int pg = 0, idx = 0;
983
984 while (pg < buf->nr_pages) {
985 int tidx;
986
987
988 for (tidx = 0; tidx < 1 << te_cur->size; tidx++, pg++)
989 buf->topa_index[pg] = te_prev;
990
991 te_prev = te_cur;
992
993 if (idx == cur->last - 1) {
994
995 idx = 0;
996 cur = list_entry(cur->list.next, struct topa, list);
997 } else {
998 idx++;
999 }
1000 te_cur = TOPA_ENTRY(cur, idx);
1001 }
1002
1003}
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020static void pt_buffer_reset_offsets(struct pt_buffer *buf, unsigned long head)
1021{
1022 int pg;
1023
1024 if (buf->snapshot)
1025 head &= (buf->nr_pages << PAGE_SHIFT) - 1;
1026
1027 pg = (head >> PAGE_SHIFT) & (buf->nr_pages - 1);
1028 pg = pt_topa_next_entry(buf, pg);
1029
1030 buf->cur = (struct topa *)((unsigned long)buf->topa_index[pg] & PAGE_MASK);
1031 buf->cur_idx = ((unsigned long)buf->topa_index[pg] -
1032 (unsigned long)buf->cur) / sizeof(struct topa_entry);
1033 buf->output_off = head & (sizes(buf->cur->table[buf->cur_idx].size) - 1);
1034
1035 local64_set(&buf->head, head);
1036 local_set(&buf->data_size, 0);
1037}
1038
1039
1040
1041
1042
1043static void pt_buffer_fini_topa(struct pt_buffer *buf)
1044{
1045 struct topa *topa, *iter;
1046
1047 list_for_each_entry_safe(topa, iter, &buf->tables, list) {
1048
1049
1050
1051
1052 topa_free(topa);
1053 }
1054}
1055
1056
1057
1058
1059
1060
1061
1062static int pt_buffer_init_topa(struct pt_buffer *buf, unsigned long nr_pages,
1063 gfp_t gfp)
1064{
1065 struct topa *topa;
1066 int err;
1067
1068 topa = topa_alloc(buf->cpu, gfp);
1069 if (!topa)
1070 return -ENOMEM;
1071
1072 topa_insert_table(buf, topa);
1073
1074 while (buf->nr_pages < nr_pages) {
1075 err = topa_insert_pages(buf, gfp);
1076 if (err) {
1077 pt_buffer_fini_topa(buf);
1078 return -ENOMEM;
1079 }
1080 }
1081
1082 pt_buffer_setup_topa_index(buf);
1083
1084
1085 if (pt_cap_get(PT_CAP_topa_multiple_entries)) {
1086 TOPA_ENTRY(buf->last, -1)->base = buf->first->phys >> TOPA_SHIFT;
1087 TOPA_ENTRY(buf->last, -1)->end = 1;
1088 }
1089
1090 pt_topa_dump(buf);
1091 return 0;
1092}
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106static void *
1107pt_buffer_setup_aux(int cpu, void **pages, int nr_pages, bool snapshot)
1108{
1109 struct pt_buffer *buf;
1110 int node, ret;
1111
1112 if (!nr_pages)
1113 return NULL;
1114
1115 if (cpu == -1)
1116 cpu = raw_smp_processor_id();
1117 node = cpu_to_node(cpu);
1118
1119 buf = kzalloc_node(offsetof(struct pt_buffer, topa_index[nr_pages]),
1120 GFP_KERNEL, node);
1121 if (!buf)
1122 return NULL;
1123
1124 buf->cpu = cpu;
1125 buf->snapshot = snapshot;
1126 buf->data_pages = pages;
1127
1128 INIT_LIST_HEAD(&buf->tables);
1129
1130 ret = pt_buffer_init_topa(buf, nr_pages, GFP_KERNEL);
1131 if (ret) {
1132 kfree(buf);
1133 return NULL;
1134 }
1135
1136 return buf;
1137}
1138
1139
1140
1141
1142
1143static void pt_buffer_free_aux(void *data)
1144{
1145 struct pt_buffer *buf = data;
1146
1147 pt_buffer_fini_topa(buf);
1148 kfree(buf);
1149}
1150
1151static int pt_addr_filters_init(struct perf_event *event)
1152{
1153 struct pt_filters *filters;
1154 int node = event->cpu == -1 ? -1 : cpu_to_node(event->cpu);
1155
1156 if (!pt_cap_get(PT_CAP_num_address_ranges))
1157 return 0;
1158
1159 filters = kzalloc_node(sizeof(struct pt_filters), GFP_KERNEL, node);
1160 if (!filters)
1161 return -ENOMEM;
1162
1163 if (event->parent)
1164 memcpy(filters, event->parent->hw.addr_filters,
1165 sizeof(*filters));
1166
1167 event->hw.addr_filters = filters;
1168
1169 return 0;
1170}
1171
1172static void pt_addr_filters_fini(struct perf_event *event)
1173{
1174 kfree(event->hw.addr_filters);
1175 event->hw.addr_filters = NULL;
1176}
1177
1178static inline bool valid_kernel_ip(unsigned long ip)
1179{
1180 return virt_addr_valid(ip) && kernel_ip(ip);
1181}
1182
1183static int pt_event_addr_filters_validate(struct list_head *filters)
1184{
1185 struct perf_addr_filter *filter;
1186 int range = 0;
1187
1188 list_for_each_entry(filter, filters, entry) {
1189
1190 if (!filter->range || !filter->size)
1191 return -EOPNOTSUPP;
1192
1193 if (!filter->inode) {
1194 if (!valid_kernel_ip(filter->offset))
1195 return -EINVAL;
1196
1197 if (!valid_kernel_ip(filter->offset + filter->size))
1198 return -EINVAL;
1199 }
1200
1201 if (++range > pt_cap_get(PT_CAP_num_address_ranges))
1202 return -EOPNOTSUPP;
1203 }
1204
1205 return 0;
1206}
1207
1208static void pt_event_addr_filters_sync(struct perf_event *event)
1209{
1210 struct perf_addr_filters_head *head = perf_event_addr_filters(event);
1211 unsigned long msr_a, msr_b, *offs = event->addr_filters_offs;
1212 struct pt_filters *filters = event->hw.addr_filters;
1213 struct perf_addr_filter *filter;
1214 int range = 0;
1215
1216 if (!filters)
1217 return;
1218
1219 list_for_each_entry(filter, &head->list, entry) {
1220 if (filter->inode && !offs[range]) {
1221 msr_a = msr_b = 0;
1222 } else {
1223
1224 msr_a = filter->offset + offs[range];
1225 msr_b = filter->size + msr_a - 1;
1226 }
1227
1228 filters->filter[range].msr_a = msr_a;
1229 filters->filter[range].msr_b = msr_b;
1230 filters->filter[range].config = filter->filter ? 1 : 2;
1231 range++;
1232 }
1233
1234 filters->nr_filters = range;
1235}
1236
1237
1238
1239
1240void intel_pt_interrupt(void)
1241{
1242 struct pt *pt = this_cpu_ptr(&pt_ctx);
1243 struct pt_buffer *buf;
1244 struct perf_event *event = pt->handle.event;
1245
1246
1247
1248
1249
1250
1251 if (!READ_ONCE(pt->handle_nmi))
1252 return;
1253
1254 if (!event)
1255 return;
1256
1257 pt_config_stop(event);
1258
1259 buf = perf_get_aux(&pt->handle);
1260 if (!buf)
1261 return;
1262
1263 pt_read_offset(buf);
1264
1265 pt_handle_status(pt);
1266
1267 pt_update_head(pt);
1268
1269 perf_aux_output_end(&pt->handle, local_xchg(&buf->data_size, 0));
1270
1271 if (!event->hw.state) {
1272 int ret;
1273
1274 buf = perf_aux_output_begin(&pt->handle, event);
1275 if (!buf) {
1276 event->hw.state = PERF_HES_STOPPED;
1277 return;
1278 }
1279
1280 pt_buffer_reset_offsets(buf, pt->handle.head);
1281
1282 ret = pt_buffer_reset_markers(buf, &pt->handle);
1283 if (ret) {
1284 perf_aux_output_end(&pt->handle, 0);
1285 return;
1286 }
1287
1288 pt_config_buffer(buf->cur->table, buf->cur_idx,
1289 buf->output_off);
1290 pt_config(event);
1291 }
1292}
1293
1294void intel_pt_handle_vmx(int on)
1295{
1296 struct pt *pt = this_cpu_ptr(&pt_ctx);
1297 struct perf_event *event;
1298 unsigned long flags;
1299
1300
1301 if (pt_pmu.vmx)
1302 return;
1303
1304
1305
1306
1307
1308
1309
1310 local_irq_save(flags);
1311 WRITE_ONCE(pt->vmx_on, on);
1312
1313
1314
1315
1316
1317 event = pt->handle.event;
1318 if (event)
1319 perf_aux_output_flag(&pt->handle,
1320 PERF_AUX_FLAG_PARTIAL);
1321
1322
1323 if (!on && event)
1324 wrmsrl(MSR_IA32_RTIT_CTL, event->hw.config);
1325
1326 local_irq_restore(flags);
1327}
1328EXPORT_SYMBOL_GPL(intel_pt_handle_vmx);
1329
1330
1331
1332
1333
1334static void pt_event_start(struct perf_event *event, int mode)
1335{
1336 struct hw_perf_event *hwc = &event->hw;
1337 struct pt *pt = this_cpu_ptr(&pt_ctx);
1338 struct pt_buffer *buf;
1339
1340 buf = perf_aux_output_begin(&pt->handle, event);
1341 if (!buf)
1342 goto fail_stop;
1343
1344 pt_buffer_reset_offsets(buf, pt->handle.head);
1345 if (!buf->snapshot) {
1346 if (pt_buffer_reset_markers(buf, &pt->handle))
1347 goto fail_end_stop;
1348 }
1349
1350 WRITE_ONCE(pt->handle_nmi, 1);
1351 hwc->state = 0;
1352
1353 pt_config_buffer(buf->cur->table, buf->cur_idx,
1354 buf->output_off);
1355 pt_config(event);
1356
1357 return;
1358
1359fail_end_stop:
1360 perf_aux_output_end(&pt->handle, 0);
1361fail_stop:
1362 hwc->state = PERF_HES_STOPPED;
1363}
1364
1365static void pt_event_stop(struct perf_event *event, int mode)
1366{
1367 struct pt *pt = this_cpu_ptr(&pt_ctx);
1368
1369
1370
1371
1372
1373 WRITE_ONCE(pt->handle_nmi, 0);
1374
1375 pt_config_stop(event);
1376
1377 if (event->hw.state == PERF_HES_STOPPED)
1378 return;
1379
1380 event->hw.state = PERF_HES_STOPPED;
1381
1382 if (mode & PERF_EF_UPDATE) {
1383 struct pt_buffer *buf = perf_get_aux(&pt->handle);
1384
1385 if (!buf)
1386 return;
1387
1388 if (WARN_ON_ONCE(pt->handle.event != event))
1389 return;
1390
1391 pt_read_offset(buf);
1392
1393 pt_handle_status(pt);
1394
1395 pt_update_head(pt);
1396
1397 if (buf->snapshot)
1398 pt->handle.head =
1399 local_xchg(&buf->data_size,
1400 buf->nr_pages << PAGE_SHIFT);
1401 perf_aux_output_end(&pt->handle, local_xchg(&buf->data_size, 0));
1402 }
1403}
1404
1405static void pt_event_del(struct perf_event *event, int mode)
1406{
1407 pt_event_stop(event, PERF_EF_UPDATE);
1408}
1409
1410static int pt_event_add(struct perf_event *event, int mode)
1411{
1412 struct pt *pt = this_cpu_ptr(&pt_ctx);
1413 struct hw_perf_event *hwc = &event->hw;
1414 int ret = -EBUSY;
1415
1416 if (pt->handle.event)
1417 goto fail;
1418
1419 if (mode & PERF_EF_START) {
1420 pt_event_start(event, 0);
1421 ret = -EINVAL;
1422 if (hwc->state == PERF_HES_STOPPED)
1423 goto fail;
1424 } else {
1425 hwc->state = PERF_HES_STOPPED;
1426 }
1427
1428 ret = 0;
1429fail:
1430
1431 return ret;
1432}
1433
1434static void pt_event_read(struct perf_event *event)
1435{
1436}
1437
1438static void pt_event_destroy(struct perf_event *event)
1439{
1440 pt_addr_filters_fini(event);
1441 x86_del_exclusive(x86_lbr_exclusive_pt);
1442}
1443
1444static int pt_event_init(struct perf_event *event)
1445{
1446 if (event->attr.type != pt_pmu.pmu.type)
1447 return -ENOENT;
1448
1449 if (!pt_event_valid(event))
1450 return -EINVAL;
1451
1452 if (x86_add_exclusive(x86_lbr_exclusive_pt))
1453 return -EBUSY;
1454
1455 if (pt_addr_filters_init(event)) {
1456 x86_del_exclusive(x86_lbr_exclusive_pt);
1457 return -ENOMEM;
1458 }
1459
1460 event->destroy = pt_event_destroy;
1461
1462 return 0;
1463}
1464
1465void cpu_emergency_stop_pt(void)
1466{
1467 struct pt *pt = this_cpu_ptr(&pt_ctx);
1468
1469 if (pt->handle.event)
1470 pt_event_stop(pt->handle.event, PERF_EF_UPDATE);
1471}
1472
1473static __init int pt_init(void)
1474{
1475 int ret, cpu, prior_warn = 0;
1476
1477 BUILD_BUG_ON(sizeof(struct topa) > PAGE_SIZE);
1478
1479 if (!boot_cpu_has(X86_FEATURE_INTEL_PT))
1480 return -ENODEV;
1481
1482 get_online_cpus();
1483 for_each_online_cpu(cpu) {
1484 u64 ctl;
1485
1486 ret = rdmsrl_safe_on_cpu(cpu, MSR_IA32_RTIT_CTL, &ctl);
1487 if (!ret && (ctl & RTIT_CTL_TRACEEN))
1488 prior_warn++;
1489 }
1490 put_online_cpus();
1491
1492 if (prior_warn) {
1493 x86_add_exclusive(x86_lbr_exclusive_pt);
1494 pr_warn("PT is enabled at boot time, doing nothing\n");
1495
1496 return -EBUSY;
1497 }
1498
1499 ret = pt_pmu_hw_init();
1500 if (ret)
1501 return ret;
1502
1503 if (!pt_cap_get(PT_CAP_topa_output)) {
1504 pr_warn("ToPA output is not supported on this CPU\n");
1505 return -ENODEV;
1506 }
1507
1508 if (!pt_cap_get(PT_CAP_topa_multiple_entries))
1509 pt_pmu.pmu.capabilities =
1510 PERF_PMU_CAP_AUX_NO_SG | PERF_PMU_CAP_AUX_SW_DOUBLEBUF;
1511
1512 pt_pmu.pmu.capabilities |= PERF_PMU_CAP_EXCLUSIVE | PERF_PMU_CAP_ITRACE;
1513 pt_pmu.pmu.attr_groups = pt_attr_groups;
1514 pt_pmu.pmu.task_ctx_nr = perf_sw_context;
1515 pt_pmu.pmu.event_init = pt_event_init;
1516 pt_pmu.pmu.add = pt_event_add;
1517 pt_pmu.pmu.del = pt_event_del;
1518 pt_pmu.pmu.start = pt_event_start;
1519 pt_pmu.pmu.stop = pt_event_stop;
1520 pt_pmu.pmu.read = pt_event_read;
1521 pt_pmu.pmu.setup_aux = pt_buffer_setup_aux;
1522 pt_pmu.pmu.free_aux = pt_buffer_free_aux;
1523 pt_pmu.pmu.addr_filters_sync = pt_event_addr_filters_sync;
1524 pt_pmu.pmu.addr_filters_validate = pt_event_addr_filters_validate;
1525 pt_pmu.pmu.nr_addr_filters =
1526 pt_cap_get(PT_CAP_num_address_ranges);
1527
1528 ret = perf_pmu_register(&pt_pmu.pmu, "intel_pt", -1);
1529
1530 return ret;
1531}
1532arch_initcall(pt_init);
1533