1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19#undef DEBUG
20
21#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
22
23#include <linux/types.h>
24#include <linux/slab.h>
25#include <linux/device.h>
26
27#include <asm/perf_event.h>
28#include <asm/insn.h>
29#include <asm/io.h>
30#include <asm/intel_pt.h>
31
32#include "../perf_event.h"
33#include "pt.h"
34
35static DEFINE_PER_CPU(struct pt, pt_ctx);
36
37static struct pt_pmu pt_pmu;
38
39enum cpuid_regs {
40 CR_EAX = 0,
41 CR_ECX,
42 CR_EDX,
43 CR_EBX
44};
45
46
47
48
49
50
51
52
53
54
55
56
57#define PT_CAP(_n, _l, _r, _m) \
58 [PT_CAP_ ## _n] = { .name = __stringify(_n), .leaf = _l, \
59 .reg = _r, .mask = _m }
60
61static struct pt_cap_desc {
62 const char *name;
63 u32 leaf;
64 u8 reg;
65 u32 mask;
66} pt_caps[] = {
67 PT_CAP(max_subleaf, 0, CR_EAX, 0xffffffff),
68 PT_CAP(cr3_filtering, 0, CR_EBX, BIT(0)),
69 PT_CAP(psb_cyc, 0, CR_EBX, BIT(1)),
70 PT_CAP(ip_filtering, 0, CR_EBX, BIT(2)),
71 PT_CAP(mtc, 0, CR_EBX, BIT(3)),
72 PT_CAP(topa_output, 0, CR_ECX, BIT(0)),
73 PT_CAP(topa_multiple_entries, 0, CR_ECX, BIT(1)),
74 PT_CAP(single_range_output, 0, CR_ECX, BIT(2)),
75 PT_CAP(payloads_lip, 0, CR_ECX, BIT(31)),
76 PT_CAP(num_address_ranges, 1, CR_EAX, 0x3),
77 PT_CAP(mtc_periods, 1, CR_EAX, 0xffff0000),
78 PT_CAP(cycle_thresholds, 1, CR_EBX, 0xffff),
79 PT_CAP(psb_periods, 1, CR_EBX, 0xffff0000),
80};
81
82static u32 pt_cap_get(enum pt_capabilities cap)
83{
84 struct pt_cap_desc *cd = &pt_caps[cap];
85 u32 c = pt_pmu.caps[cd->leaf * PT_CPUID_REGS_NUM + cd->reg];
86 unsigned int shift = __ffs(cd->mask);
87
88 return (c & cd->mask) >> shift;
89}
90
91static ssize_t pt_cap_show(struct device *cdev,
92 struct device_attribute *attr,
93 char *buf)
94{
95 struct dev_ext_attribute *ea =
96 container_of(attr, struct dev_ext_attribute, attr);
97 enum pt_capabilities cap = (long)ea->var;
98
99 return snprintf(buf, PAGE_SIZE, "%x\n", pt_cap_get(cap));
100}
101
102static struct attribute_group pt_cap_group = {
103 .name = "caps",
104};
105
106PMU_FORMAT_ATTR(cyc, "config:1" );
107PMU_FORMAT_ATTR(mtc, "config:9" );
108PMU_FORMAT_ATTR(tsc, "config:10" );
109PMU_FORMAT_ATTR(noretcomp, "config:11" );
110PMU_FORMAT_ATTR(mtc_period, "config:14-17" );
111PMU_FORMAT_ATTR(cyc_thresh, "config:19-22" );
112PMU_FORMAT_ATTR(psb_period, "config:24-27" );
113
114static struct attribute *pt_formats_attr[] = {
115 &format_attr_cyc.attr,
116 &format_attr_mtc.attr,
117 &format_attr_tsc.attr,
118 &format_attr_noretcomp.attr,
119 &format_attr_mtc_period.attr,
120 &format_attr_cyc_thresh.attr,
121 &format_attr_psb_period.attr,
122 NULL,
123};
124
125static struct attribute_group pt_format_group = {
126 .name = "format",
127 .attrs = pt_formats_attr,
128};
129
130static ssize_t
131pt_timing_attr_show(struct device *dev, struct device_attribute *attr,
132 char *page)
133{
134 struct perf_pmu_events_attr *pmu_attr =
135 container_of(attr, struct perf_pmu_events_attr, attr);
136
137 switch (pmu_attr->id) {
138 case 0:
139 return sprintf(page, "%lu\n", pt_pmu.max_nonturbo_ratio);
140 case 1:
141 return sprintf(page, "%u:%u\n",
142 pt_pmu.tsc_art_num,
143 pt_pmu.tsc_art_den);
144 default:
145 break;
146 }
147
148 return -EINVAL;
149}
150
151PMU_EVENT_ATTR(max_nonturbo_ratio, timing_attr_max_nonturbo_ratio, 0,
152 pt_timing_attr_show);
153PMU_EVENT_ATTR(tsc_art_ratio, timing_attr_tsc_art_ratio, 1,
154 pt_timing_attr_show);
155
156static struct attribute *pt_timing_attr[] = {
157 &timing_attr_max_nonturbo_ratio.attr.attr,
158 &timing_attr_tsc_art_ratio.attr.attr,
159 NULL,
160};
161
162static struct attribute_group pt_timing_group = {
163 .attrs = pt_timing_attr,
164};
165
166static const struct attribute_group *pt_attr_groups[] = {
167 &pt_cap_group,
168 &pt_format_group,
169 &pt_timing_group,
170 NULL,
171};
172
173static int __init pt_pmu_hw_init(void)
174{
175 struct dev_ext_attribute *de_attrs;
176 struct attribute **attrs;
177 size_t size;
178 u64 reg;
179 int ret;
180 long i;
181
182 rdmsrl(MSR_PLATFORM_INFO, reg);
183 pt_pmu.max_nonturbo_ratio = (reg & 0xff00) >> 8;
184
185
186
187
188
189
190 if (boot_cpu_data.cpuid_level >= CPUID_TSC_LEAF) {
191 u32 eax, ebx, ecx, edx;
192
193 cpuid(CPUID_TSC_LEAF, &eax, &ebx, &ecx, &edx);
194
195 pt_pmu.tsc_art_num = ebx;
196 pt_pmu.tsc_art_den = eax;
197 }
198
199 if (boot_cpu_has(X86_FEATURE_VMX)) {
200
201
202
203
204
205 rdmsrl(MSR_IA32_VMX_MISC, reg);
206 if (reg & BIT(14))
207 pt_pmu.vmx = true;
208 }
209
210 attrs = NULL;
211
212 for (i = 0; i < PT_CPUID_LEAVES; i++) {
213 cpuid_count(20, i,
214 &pt_pmu.caps[CR_EAX + i*PT_CPUID_REGS_NUM],
215 &pt_pmu.caps[CR_EBX + i*PT_CPUID_REGS_NUM],
216 &pt_pmu.caps[CR_ECX + i*PT_CPUID_REGS_NUM],
217 &pt_pmu.caps[CR_EDX + i*PT_CPUID_REGS_NUM]);
218 }
219
220 ret = -ENOMEM;
221 size = sizeof(struct attribute *) * (ARRAY_SIZE(pt_caps)+1);
222 attrs = kzalloc(size, GFP_KERNEL);
223 if (!attrs)
224 goto fail;
225
226 size = sizeof(struct dev_ext_attribute) * (ARRAY_SIZE(pt_caps)+1);
227 de_attrs = kzalloc(size, GFP_KERNEL);
228 if (!de_attrs)
229 goto fail;
230
231 for (i = 0; i < ARRAY_SIZE(pt_caps); i++) {
232 struct dev_ext_attribute *de_attr = de_attrs + i;
233
234 de_attr->attr.attr.name = pt_caps[i].name;
235
236 sysfs_attr_init(&de_attr->attr.attr);
237
238 de_attr->attr.attr.mode = S_IRUGO;
239 de_attr->attr.show = pt_cap_show;
240 de_attr->var = (void *)i;
241
242 attrs[i] = &de_attr->attr.attr;
243 }
244
245 pt_cap_group.attrs = attrs;
246
247 return 0;
248
249fail:
250 kfree(attrs);
251
252 return ret;
253}
254
255#define RTIT_CTL_CYC_PSB (RTIT_CTL_CYCLEACC | \
256 RTIT_CTL_CYC_THRESH | \
257 RTIT_CTL_PSB_FREQ)
258
259#define RTIT_CTL_MTC (RTIT_CTL_MTC_EN | \
260 RTIT_CTL_MTC_RANGE)
261
262#define PT_CONFIG_MASK (RTIT_CTL_TSC_EN | \
263 RTIT_CTL_DISRETC | \
264 RTIT_CTL_CYC_PSB | \
265 RTIT_CTL_MTC)
266
267static bool pt_event_valid(struct perf_event *event)
268{
269 u64 config = event->attr.config;
270 u64 allowed, requested;
271
272 if ((config & PT_CONFIG_MASK) != config)
273 return false;
274
275 if (config & RTIT_CTL_CYC_PSB) {
276 if (!pt_cap_get(PT_CAP_psb_cyc))
277 return false;
278
279 allowed = pt_cap_get(PT_CAP_psb_periods);
280 requested = (config & RTIT_CTL_PSB_FREQ) >>
281 RTIT_CTL_PSB_FREQ_OFFSET;
282 if (requested && (!(allowed & BIT(requested))))
283 return false;
284
285 allowed = pt_cap_get(PT_CAP_cycle_thresholds);
286 requested = (config & RTIT_CTL_CYC_THRESH) >>
287 RTIT_CTL_CYC_THRESH_OFFSET;
288 if (requested && (!(allowed & BIT(requested))))
289 return false;
290 }
291
292 if (config & RTIT_CTL_MTC) {
293
294
295
296
297
298
299
300 if (!pt_cap_get(PT_CAP_mtc))
301 return false;
302
303 allowed = pt_cap_get(PT_CAP_mtc_periods);
304 if (!allowed)
305 return false;
306
307 requested = (config & RTIT_CTL_MTC_RANGE) >>
308 RTIT_CTL_MTC_RANGE_OFFSET;
309
310 if (!(allowed & BIT(requested)))
311 return false;
312 }
313
314 return true;
315}
316
317
318
319
320
321
322
323static const struct pt_address_range {
324 unsigned long msr_a;
325 unsigned long msr_b;
326 unsigned int reg_off;
327} pt_address_ranges[] = {
328 {
329 .msr_a = MSR_IA32_RTIT_ADDR0_A,
330 .msr_b = MSR_IA32_RTIT_ADDR0_B,
331 .reg_off = RTIT_CTL_ADDR0_OFFSET,
332 },
333 {
334 .msr_a = MSR_IA32_RTIT_ADDR1_A,
335 .msr_b = MSR_IA32_RTIT_ADDR1_B,
336 .reg_off = RTIT_CTL_ADDR1_OFFSET,
337 },
338 {
339 .msr_a = MSR_IA32_RTIT_ADDR2_A,
340 .msr_b = MSR_IA32_RTIT_ADDR2_B,
341 .reg_off = RTIT_CTL_ADDR2_OFFSET,
342 },
343 {
344 .msr_a = MSR_IA32_RTIT_ADDR3_A,
345 .msr_b = MSR_IA32_RTIT_ADDR3_B,
346 .reg_off = RTIT_CTL_ADDR3_OFFSET,
347 }
348};
349
350static u64 pt_config_filters(struct perf_event *event)
351{
352 struct pt_filters *filters = event->hw.addr_filters;
353 struct pt *pt = this_cpu_ptr(&pt_ctx);
354 unsigned int range = 0;
355 u64 rtit_ctl = 0;
356
357 if (!filters)
358 return 0;
359
360 perf_event_addr_filters_sync(event);
361
362 for (range = 0; range < filters->nr_filters; range++) {
363 struct pt_filter *filter = &filters->filter[range];
364
365
366
367
368
369
370
371
372
373
374
375 if (pt->filters.filter[range].msr_a != filter->msr_a) {
376 wrmsrl(pt_address_ranges[range].msr_a, filter->msr_a);
377 pt->filters.filter[range].msr_a = filter->msr_a;
378 }
379
380 if (pt->filters.filter[range].msr_b != filter->msr_b) {
381 wrmsrl(pt_address_ranges[range].msr_b, filter->msr_b);
382 pt->filters.filter[range].msr_b = filter->msr_b;
383 }
384
385 rtit_ctl |= filter->config << pt_address_ranges[range].reg_off;
386 }
387
388 return rtit_ctl;
389}
390
391static void pt_config(struct perf_event *event)
392{
393 u64 reg;
394
395 if (!event->hw.itrace_started) {
396 event->hw.itrace_started = 1;
397 wrmsrl(MSR_IA32_RTIT_STATUS, 0);
398 }
399
400 reg = pt_config_filters(event);
401 reg |= RTIT_CTL_TOPA | RTIT_CTL_BRANCH_EN | RTIT_CTL_TRACEEN;
402
403 if (!event->attr.exclude_kernel)
404 reg |= RTIT_CTL_OS;
405 if (!event->attr.exclude_user)
406 reg |= RTIT_CTL_USR;
407
408 reg |= (event->attr.config & PT_CONFIG_MASK);
409
410 event->hw.config = reg;
411 wrmsrl(MSR_IA32_RTIT_CTL, reg);
412}
413
414static void pt_config_stop(struct perf_event *event)
415{
416 u64 ctl = READ_ONCE(event->hw.config);
417
418
419 if (!(ctl & RTIT_CTL_TRACEEN))
420 return;
421
422 ctl &= ~RTIT_CTL_TRACEEN;
423 wrmsrl(MSR_IA32_RTIT_CTL, ctl);
424
425 WRITE_ONCE(event->hw.config, ctl);
426
427
428
429
430
431
432
433
434
435 wmb();
436}
437
438static void pt_config_buffer(void *buf, unsigned int topa_idx,
439 unsigned int output_off)
440{
441 u64 reg;
442
443 wrmsrl(MSR_IA32_RTIT_OUTPUT_BASE, virt_to_phys(buf));
444
445 reg = 0x7f | ((u64)topa_idx << 7) | ((u64)output_off << 32);
446
447 wrmsrl(MSR_IA32_RTIT_OUTPUT_MASK, reg);
448}
449
450
451
452
453
454
455#define TENTS_PER_PAGE (((PAGE_SIZE - 40) / sizeof(struct topa_entry)) - 1)
456
457
458
459
460
461
462
463
464
465
466struct topa {
467 struct topa_entry table[TENTS_PER_PAGE];
468 struct list_head list;
469 u64 phys;
470 u64 offset;
471 size_t size;
472 int last;
473};
474
475
476#define TOPA_ENTRY(t, i) ((i) == -1 ? &(t)->table[(t)->last] : &(t)->table[(i)])
477
478
479
480
481
482
483
484
485static struct topa *topa_alloc(int cpu, gfp_t gfp)
486{
487 int node = cpu_to_node(cpu);
488 struct topa *topa;
489 struct page *p;
490
491 p = alloc_pages_node(node, gfp | __GFP_ZERO, 0);
492 if (!p)
493 return NULL;
494
495 topa = page_address(p);
496 topa->last = 0;
497 topa->phys = page_to_phys(p);
498
499
500
501
502
503 if (!pt_cap_get(PT_CAP_topa_multiple_entries)) {
504 TOPA_ENTRY(topa, 1)->base = topa->phys >> TOPA_SHIFT;
505 TOPA_ENTRY(topa, 1)->end = 1;
506 }
507
508 return topa;
509}
510
511
512
513
514
515static void topa_free(struct topa *topa)
516{
517 free_page((unsigned long)topa);
518}
519
520
521
522
523
524
525
526
527
528
529static void topa_insert_table(struct pt_buffer *buf, struct topa *topa)
530{
531 struct topa *last = buf->last;
532
533 list_add_tail(&topa->list, &buf->tables);
534
535 if (!buf->first) {
536 buf->first = buf->last = buf->cur = topa;
537 return;
538 }
539
540 topa->offset = last->offset + last->size;
541 buf->last = topa;
542
543 if (!pt_cap_get(PT_CAP_topa_multiple_entries))
544 return;
545
546 BUG_ON(last->last != TENTS_PER_PAGE - 1);
547
548 TOPA_ENTRY(last, -1)->base = topa->phys >> TOPA_SHIFT;
549 TOPA_ENTRY(last, -1)->end = 1;
550}
551
552
553
554
555
556static bool topa_table_full(struct topa *topa)
557{
558
559 if (!pt_cap_get(PT_CAP_topa_multiple_entries))
560 return !!topa->last;
561
562 return topa->last == TENTS_PER_PAGE - 1;
563}
564
565
566
567
568
569
570
571
572
573
574
575static int topa_insert_pages(struct pt_buffer *buf, gfp_t gfp)
576{
577 struct topa *topa = buf->last;
578 int order = 0;
579 struct page *p;
580
581 p = virt_to_page(buf->data_pages[buf->nr_pages]);
582 if (PagePrivate(p))
583 order = page_private(p);
584
585 if (topa_table_full(topa)) {
586 topa = topa_alloc(buf->cpu, gfp);
587 if (!topa)
588 return -ENOMEM;
589
590 topa_insert_table(buf, topa);
591 }
592
593 TOPA_ENTRY(topa, -1)->base = page_to_phys(p) >> TOPA_SHIFT;
594 TOPA_ENTRY(topa, -1)->size = order;
595 if (!buf->snapshot && !pt_cap_get(PT_CAP_topa_multiple_entries)) {
596 TOPA_ENTRY(topa, -1)->intr = 1;
597 TOPA_ENTRY(topa, -1)->stop = 1;
598 }
599
600 topa->last++;
601 topa->size += sizes(order);
602
603 buf->nr_pages += 1ul << order;
604
605 return 0;
606}
607
608
609
610
611
612static void pt_topa_dump(struct pt_buffer *buf)
613{
614 struct topa *topa;
615
616 list_for_each_entry(topa, &buf->tables, list) {
617 int i;
618
619 pr_debug("# table @%p (%016Lx), off %llx size %zx\n", topa->table,
620 topa->phys, topa->offset, topa->size);
621 for (i = 0; i < TENTS_PER_PAGE; i++) {
622 pr_debug("# entry @%p (%lx sz %u %c%c%c) raw=%16llx\n",
623 &topa->table[i],
624 (unsigned long)topa->table[i].base << TOPA_SHIFT,
625 sizes(topa->table[i].size),
626 topa->table[i].end ? 'E' : ' ',
627 topa->table[i].intr ? 'I' : ' ',
628 topa->table[i].stop ? 'S' : ' ',
629 *(u64 *)&topa->table[i]);
630 if ((pt_cap_get(PT_CAP_topa_multiple_entries) &&
631 topa->table[i].stop) ||
632 topa->table[i].end)
633 break;
634 }
635 }
636}
637
638
639
640
641
642
643
644static void pt_buffer_advance(struct pt_buffer *buf)
645{
646 buf->output_off = 0;
647 buf->cur_idx++;
648
649 if (buf->cur_idx == buf->cur->last) {
650 if (buf->cur == buf->last)
651 buf->cur = buf->first;
652 else
653 buf->cur = list_entry(buf->cur->list.next, struct topa,
654 list);
655 buf->cur_idx = 0;
656 }
657}
658
659
660
661
662
663
664
665static void pt_update_head(struct pt *pt)
666{
667 struct pt_buffer *buf = perf_get_aux(&pt->handle);
668 u64 topa_idx, base, old;
669
670
671 base = buf->cur->offset + buf->output_off;
672
673
674 for (topa_idx = 0; topa_idx < buf->cur_idx; topa_idx++)
675 base += sizes(buf->cur->table[topa_idx].size);
676
677 if (buf->snapshot) {
678 local_set(&buf->data_size, base);
679 } else {
680 old = (local64_xchg(&buf->head, base) &
681 ((buf->nr_pages << PAGE_SHIFT) - 1));
682 if (base < old)
683 base += buf->nr_pages << PAGE_SHIFT;
684
685 local_add(base - old, &buf->data_size);
686 }
687}
688
689
690
691
692
693static void *pt_buffer_region(struct pt_buffer *buf)
694{
695 return phys_to_virt(buf->cur->table[buf->cur_idx].base << TOPA_SHIFT);
696}
697
698
699
700
701
702static size_t pt_buffer_region_size(struct pt_buffer *buf)
703{
704 return sizes(buf->cur->table[buf->cur_idx].size);
705}
706
707
708
709
710
711static void pt_handle_status(struct pt *pt)
712{
713 struct pt_buffer *buf = perf_get_aux(&pt->handle);
714 int advance = 0;
715 u64 status;
716
717 rdmsrl(MSR_IA32_RTIT_STATUS, status);
718
719 if (status & RTIT_STATUS_ERROR) {
720 pr_err_ratelimited("ToPA ERROR encountered, trying to recover\n");
721 pt_topa_dump(buf);
722 status &= ~RTIT_STATUS_ERROR;
723 }
724
725 if (status & RTIT_STATUS_STOPPED) {
726 status &= ~RTIT_STATUS_STOPPED;
727
728
729
730
731
732
733 if (!pt_cap_get(PT_CAP_topa_multiple_entries) ||
734 buf->output_off == sizes(TOPA_ENTRY(buf->cur, buf->cur_idx)->size)) {
735 local_inc(&buf->lost);
736 advance++;
737 }
738 }
739
740
741
742
743
744 if (!pt_cap_get(PT_CAP_topa_multiple_entries) && !buf->snapshot &&
745 pt_buffer_region_size(buf) - buf->output_off <= TOPA_PMI_MARGIN) {
746 void *head = pt_buffer_region(buf);
747
748
749 memset(head + buf->output_off, 0,
750 pt_buffer_region_size(buf) -
751 buf->output_off);
752 advance++;
753 }
754
755 if (advance)
756 pt_buffer_advance(buf);
757
758 wrmsrl(MSR_IA32_RTIT_STATUS, status);
759}
760
761
762
763
764
765
766
767static void pt_read_offset(struct pt_buffer *buf)
768{
769 u64 offset, base_topa;
770
771 rdmsrl(MSR_IA32_RTIT_OUTPUT_BASE, base_topa);
772 buf->cur = phys_to_virt(base_topa);
773
774 rdmsrl(MSR_IA32_RTIT_OUTPUT_MASK, offset);
775
776 buf->output_off = offset >> 32;
777
778 buf->cur_idx = (offset & 0xffffff80) >> 7;
779}
780
781
782
783
784
785
786
787
788
789
790static unsigned int pt_topa_next_entry(struct pt_buffer *buf, unsigned int pg)
791{
792 struct topa_entry *te = buf->topa_index[pg];
793
794
795 if (buf->first == buf->last && buf->first->last == 1)
796 return pg;
797
798 do {
799 pg++;
800 pg &= buf->nr_pages - 1;
801 } while (buf->topa_index[pg] == te);
802
803 return pg;
804}
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819static int pt_buffer_reset_markers(struct pt_buffer *buf,
820 struct perf_output_handle *handle)
821
822{
823 unsigned long head = local64_read(&buf->head);
824 unsigned long idx, npages, wakeup;
825
826
827 if (buf->output_off + handle->size + 1 <
828 sizes(TOPA_ENTRY(buf->cur, buf->cur_idx)->size))
829 return -EINVAL;
830
831
832
833 if (!pt_cap_get(PT_CAP_topa_multiple_entries))
834 return 0;
835
836
837 buf->topa_index[buf->stop_pos]->stop = 0;
838 buf->topa_index[buf->stop_pos]->intr = 0;
839 buf->topa_index[buf->intr_pos]->intr = 0;
840
841
842 npages = handle->size >> PAGE_SHIFT;
843
844
845 if (!offset_in_page(head + handle->size + 1))
846 npages++;
847
848 idx = (head >> PAGE_SHIFT) + npages;
849 idx &= buf->nr_pages - 1;
850 buf->stop_pos = idx;
851
852 wakeup = handle->wakeup >> PAGE_SHIFT;
853
854
855 idx = (head >> PAGE_SHIFT) + npages - 1;
856 if (idx > wakeup)
857 idx = wakeup;
858
859 idx &= buf->nr_pages - 1;
860 buf->intr_pos = idx;
861
862 buf->topa_index[buf->stop_pos]->stop = 1;
863 buf->topa_index[buf->stop_pos]->intr = 1;
864 buf->topa_index[buf->intr_pos]->intr = 1;
865
866 return 0;
867}
868
869
870
871
872
873
874
875
876static void pt_buffer_setup_topa_index(struct pt_buffer *buf)
877{
878 struct topa *cur = buf->first, *prev = buf->last;
879 struct topa_entry *te_cur = TOPA_ENTRY(cur, 0),
880 *te_prev = TOPA_ENTRY(prev, prev->last - 1);
881 int pg = 0, idx = 0;
882
883 while (pg < buf->nr_pages) {
884 int tidx;
885
886
887 for (tidx = 0; tidx < 1 << te_cur->size; tidx++, pg++)
888 buf->topa_index[pg] = te_prev;
889
890 te_prev = te_cur;
891
892 if (idx == cur->last - 1) {
893
894 idx = 0;
895 cur = list_entry(cur->list.next, struct topa, list);
896 } else {
897 idx++;
898 }
899 te_cur = TOPA_ENTRY(cur, idx);
900 }
901
902}
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919static void pt_buffer_reset_offsets(struct pt_buffer *buf, unsigned long head)
920{
921 int pg;
922
923 if (buf->snapshot)
924 head &= (buf->nr_pages << PAGE_SHIFT) - 1;
925
926 pg = (head >> PAGE_SHIFT) & (buf->nr_pages - 1);
927 pg = pt_topa_next_entry(buf, pg);
928
929 buf->cur = (struct topa *)((unsigned long)buf->topa_index[pg] & PAGE_MASK);
930 buf->cur_idx = ((unsigned long)buf->topa_index[pg] -
931 (unsigned long)buf->cur) / sizeof(struct topa_entry);
932 buf->output_off = head & (sizes(buf->cur->table[buf->cur_idx].size) - 1);
933
934 local64_set(&buf->head, head);
935 local_set(&buf->data_size, 0);
936}
937
938
939
940
941
942static void pt_buffer_fini_topa(struct pt_buffer *buf)
943{
944 struct topa *topa, *iter;
945
946 list_for_each_entry_safe(topa, iter, &buf->tables, list) {
947
948
949
950
951 topa_free(topa);
952 }
953}
954
955
956
957
958
959
960
961static int pt_buffer_init_topa(struct pt_buffer *buf, unsigned long nr_pages,
962 gfp_t gfp)
963{
964 struct topa *topa;
965 int err;
966
967 topa = topa_alloc(buf->cpu, gfp);
968 if (!topa)
969 return -ENOMEM;
970
971 topa_insert_table(buf, topa);
972
973 while (buf->nr_pages < nr_pages) {
974 err = topa_insert_pages(buf, gfp);
975 if (err) {
976 pt_buffer_fini_topa(buf);
977 return -ENOMEM;
978 }
979 }
980
981 pt_buffer_setup_topa_index(buf);
982
983
984 if (pt_cap_get(PT_CAP_topa_multiple_entries)) {
985 TOPA_ENTRY(buf->last, -1)->base = buf->first->phys >> TOPA_SHIFT;
986 TOPA_ENTRY(buf->last, -1)->end = 1;
987 }
988
989 pt_topa_dump(buf);
990 return 0;
991}
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005static void *
1006pt_buffer_setup_aux(int cpu, void **pages, int nr_pages, bool snapshot)
1007{
1008 struct pt_buffer *buf;
1009 int node, ret;
1010
1011 if (!nr_pages)
1012 return NULL;
1013
1014 if (cpu == -1)
1015 cpu = raw_smp_processor_id();
1016 node = cpu_to_node(cpu);
1017
1018 buf = kzalloc_node(offsetof(struct pt_buffer, topa_index[nr_pages]),
1019 GFP_KERNEL, node);
1020 if (!buf)
1021 return NULL;
1022
1023 buf->cpu = cpu;
1024 buf->snapshot = snapshot;
1025 buf->data_pages = pages;
1026
1027 INIT_LIST_HEAD(&buf->tables);
1028
1029 ret = pt_buffer_init_topa(buf, nr_pages, GFP_KERNEL);
1030 if (ret) {
1031 kfree(buf);
1032 return NULL;
1033 }
1034
1035 return buf;
1036}
1037
1038
1039
1040
1041
1042static void pt_buffer_free_aux(void *data)
1043{
1044 struct pt_buffer *buf = data;
1045
1046 pt_buffer_fini_topa(buf);
1047 kfree(buf);
1048}
1049
1050static int pt_addr_filters_init(struct perf_event *event)
1051{
1052 struct pt_filters *filters;
1053 int node = event->cpu == -1 ? -1 : cpu_to_node(event->cpu);
1054
1055 if (!pt_cap_get(PT_CAP_num_address_ranges))
1056 return 0;
1057
1058 filters = kzalloc_node(sizeof(struct pt_filters), GFP_KERNEL, node);
1059 if (!filters)
1060 return -ENOMEM;
1061
1062 if (event->parent)
1063 memcpy(filters, event->parent->hw.addr_filters,
1064 sizeof(*filters));
1065
1066 event->hw.addr_filters = filters;
1067
1068 return 0;
1069}
1070
1071static void pt_addr_filters_fini(struct perf_event *event)
1072{
1073 kfree(event->hw.addr_filters);
1074 event->hw.addr_filters = NULL;
1075}
1076
1077static inline bool valid_kernel_ip(unsigned long ip)
1078{
1079 return virt_addr_valid(ip) && kernel_ip(ip);
1080}
1081
1082static int pt_event_addr_filters_validate(struct list_head *filters)
1083{
1084 struct perf_addr_filter *filter;
1085 int range = 0;
1086
1087 list_for_each_entry(filter, filters, entry) {
1088
1089 if (!filter->range || !filter->size)
1090 return -EOPNOTSUPP;
1091
1092 if (!filter->inode) {
1093 if (!valid_kernel_ip(filter->offset))
1094 return -EINVAL;
1095
1096 if (!valid_kernel_ip(filter->offset + filter->size))
1097 return -EINVAL;
1098 }
1099
1100 if (++range > pt_cap_get(PT_CAP_num_address_ranges))
1101 return -EOPNOTSUPP;
1102 }
1103
1104 return 0;
1105}
1106
1107static void pt_event_addr_filters_sync(struct perf_event *event)
1108{
1109 struct perf_addr_filters_head *head = perf_event_addr_filters(event);
1110 unsigned long msr_a, msr_b, *offs = event->addr_filters_offs;
1111 struct pt_filters *filters = event->hw.addr_filters;
1112 struct perf_addr_filter *filter;
1113 int range = 0;
1114
1115 if (!filters)
1116 return;
1117
1118 list_for_each_entry(filter, &head->list, entry) {
1119 if (filter->inode && !offs[range]) {
1120 msr_a = msr_b = 0;
1121 } else {
1122
1123 msr_a = filter->offset + offs[range];
1124 msr_b = filter->size + msr_a - 1;
1125 }
1126
1127 filters->filter[range].msr_a = msr_a;
1128 filters->filter[range].msr_b = msr_b;
1129 filters->filter[range].config = filter->filter ? 1 : 2;
1130 range++;
1131 }
1132
1133 filters->nr_filters = range;
1134}
1135
1136
1137
1138
1139void intel_pt_interrupt(void)
1140{
1141 struct pt *pt = this_cpu_ptr(&pt_ctx);
1142 struct pt_buffer *buf;
1143 struct perf_event *event = pt->handle.event;
1144
1145
1146
1147
1148
1149
1150 if (!READ_ONCE(pt->handle_nmi))
1151 return;
1152
1153
1154
1155
1156 if (READ_ONCE(pt->vmx_on))
1157 return;
1158
1159 if (!event)
1160 return;
1161
1162 pt_config_stop(event);
1163
1164 buf = perf_get_aux(&pt->handle);
1165 if (!buf)
1166 return;
1167
1168 pt_read_offset(buf);
1169
1170 pt_handle_status(pt);
1171
1172 pt_update_head(pt);
1173
1174 perf_aux_output_end(&pt->handle, local_xchg(&buf->data_size, 0),
1175 local_xchg(&buf->lost, 0));
1176
1177 if (!event->hw.state) {
1178 int ret;
1179
1180 buf = perf_aux_output_begin(&pt->handle, event);
1181 if (!buf) {
1182 event->hw.state = PERF_HES_STOPPED;
1183 return;
1184 }
1185
1186 pt_buffer_reset_offsets(buf, pt->handle.head);
1187
1188 ret = pt_buffer_reset_markers(buf, &pt->handle);
1189 if (ret) {
1190 perf_aux_output_end(&pt->handle, 0, true);
1191 return;
1192 }
1193
1194 pt_config_buffer(buf->cur->table, buf->cur_idx,
1195 buf->output_off);
1196 pt_config(event);
1197 }
1198}
1199
1200void intel_pt_handle_vmx(int on)
1201{
1202 struct pt *pt = this_cpu_ptr(&pt_ctx);
1203 struct perf_event *event;
1204 unsigned long flags;
1205
1206
1207 if (pt_pmu.vmx)
1208 return;
1209
1210
1211
1212
1213
1214
1215
1216 local_irq_save(flags);
1217 WRITE_ONCE(pt->vmx_on, on);
1218
1219 if (on) {
1220
1221 event = pt->handle.event;
1222 if (event)
1223 event->hw.config = 0;
1224 }
1225 local_irq_restore(flags);
1226}
1227EXPORT_SYMBOL_GPL(intel_pt_handle_vmx);
1228
1229
1230
1231
1232
1233static void pt_event_start(struct perf_event *event, int mode)
1234{
1235 struct hw_perf_event *hwc = &event->hw;
1236 struct pt *pt = this_cpu_ptr(&pt_ctx);
1237 struct pt_buffer *buf;
1238
1239 if (READ_ONCE(pt->vmx_on))
1240 return;
1241
1242 buf = perf_aux_output_begin(&pt->handle, event);
1243 if (!buf)
1244 goto fail_stop;
1245
1246 pt_buffer_reset_offsets(buf, pt->handle.head);
1247 if (!buf->snapshot) {
1248 if (pt_buffer_reset_markers(buf, &pt->handle))
1249 goto fail_end_stop;
1250 }
1251
1252 WRITE_ONCE(pt->handle_nmi, 1);
1253 hwc->state = 0;
1254
1255 pt_config_buffer(buf->cur->table, buf->cur_idx,
1256 buf->output_off);
1257 pt_config(event);
1258
1259 return;
1260
1261fail_end_stop:
1262 perf_aux_output_end(&pt->handle, 0, true);
1263fail_stop:
1264 hwc->state = PERF_HES_STOPPED;
1265}
1266
1267static void pt_event_stop(struct perf_event *event, int mode)
1268{
1269 struct pt *pt = this_cpu_ptr(&pt_ctx);
1270
1271
1272
1273
1274
1275 WRITE_ONCE(pt->handle_nmi, 0);
1276
1277 pt_config_stop(event);
1278
1279 if (event->hw.state == PERF_HES_STOPPED)
1280 return;
1281
1282 event->hw.state = PERF_HES_STOPPED;
1283
1284 if (mode & PERF_EF_UPDATE) {
1285 struct pt_buffer *buf = perf_get_aux(&pt->handle);
1286
1287 if (!buf)
1288 return;
1289
1290 if (WARN_ON_ONCE(pt->handle.event != event))
1291 return;
1292
1293 pt_read_offset(buf);
1294
1295 pt_handle_status(pt);
1296
1297 pt_update_head(pt);
1298
1299 if (buf->snapshot)
1300 pt->handle.head =
1301 local_xchg(&buf->data_size,
1302 buf->nr_pages << PAGE_SHIFT);
1303 perf_aux_output_end(&pt->handle, local_xchg(&buf->data_size, 0),
1304 local_xchg(&buf->lost, 0));
1305 }
1306}
1307
1308static void pt_event_del(struct perf_event *event, int mode)
1309{
1310 pt_event_stop(event, PERF_EF_UPDATE);
1311}
1312
1313static int pt_event_add(struct perf_event *event, int mode)
1314{
1315 struct pt *pt = this_cpu_ptr(&pt_ctx);
1316 struct hw_perf_event *hwc = &event->hw;
1317 int ret = -EBUSY;
1318
1319 if (pt->handle.event)
1320 goto fail;
1321
1322 if (mode & PERF_EF_START) {
1323 pt_event_start(event, 0);
1324 ret = -EINVAL;
1325 if (hwc->state == PERF_HES_STOPPED)
1326 goto fail;
1327 } else {
1328 hwc->state = PERF_HES_STOPPED;
1329 }
1330
1331 ret = 0;
1332fail:
1333
1334 return ret;
1335}
1336
1337static void pt_event_read(struct perf_event *event)
1338{
1339}
1340
1341static void pt_event_destroy(struct perf_event *event)
1342{
1343 pt_addr_filters_fini(event);
1344 x86_del_exclusive(x86_lbr_exclusive_pt);
1345}
1346
1347static int pt_event_init(struct perf_event *event)
1348{
1349 if (event->attr.type != pt_pmu.pmu.type)
1350 return -ENOENT;
1351
1352 if (!pt_event_valid(event))
1353 return -EINVAL;
1354
1355 if (x86_add_exclusive(x86_lbr_exclusive_pt))
1356 return -EBUSY;
1357
1358 if (pt_addr_filters_init(event)) {
1359 x86_del_exclusive(x86_lbr_exclusive_pt);
1360 return -ENOMEM;
1361 }
1362
1363 event->destroy = pt_event_destroy;
1364
1365 return 0;
1366}
1367
1368void cpu_emergency_stop_pt(void)
1369{
1370 struct pt *pt = this_cpu_ptr(&pt_ctx);
1371
1372 if (pt->handle.event)
1373 pt_event_stop(pt->handle.event, PERF_EF_UPDATE);
1374}
1375
1376static __init int pt_init(void)
1377{
1378 int ret, cpu, prior_warn = 0;
1379
1380 BUILD_BUG_ON(sizeof(struct topa) > PAGE_SIZE);
1381
1382 if (!boot_cpu_has(X86_FEATURE_INTEL_PT))
1383 return -ENODEV;
1384
1385 get_online_cpus();
1386 for_each_online_cpu(cpu) {
1387 u64 ctl;
1388
1389 ret = rdmsrl_safe_on_cpu(cpu, MSR_IA32_RTIT_CTL, &ctl);
1390 if (!ret && (ctl & RTIT_CTL_TRACEEN))
1391 prior_warn++;
1392 }
1393 put_online_cpus();
1394
1395 if (prior_warn) {
1396 x86_add_exclusive(x86_lbr_exclusive_pt);
1397 pr_warn("PT is enabled at boot time, doing nothing\n");
1398
1399 return -EBUSY;
1400 }
1401
1402 ret = pt_pmu_hw_init();
1403 if (ret)
1404 return ret;
1405
1406 if (!pt_cap_get(PT_CAP_topa_output)) {
1407 pr_warn("ToPA output is not supported on this CPU\n");
1408 return -ENODEV;
1409 }
1410
1411 if (!pt_cap_get(PT_CAP_topa_multiple_entries))
1412 pt_pmu.pmu.capabilities =
1413 PERF_PMU_CAP_AUX_NO_SG | PERF_PMU_CAP_AUX_SW_DOUBLEBUF;
1414
1415 pt_pmu.pmu.capabilities |= PERF_PMU_CAP_EXCLUSIVE | PERF_PMU_CAP_ITRACE;
1416 pt_pmu.pmu.attr_groups = pt_attr_groups;
1417 pt_pmu.pmu.task_ctx_nr = perf_sw_context;
1418 pt_pmu.pmu.event_init = pt_event_init;
1419 pt_pmu.pmu.add = pt_event_add;
1420 pt_pmu.pmu.del = pt_event_del;
1421 pt_pmu.pmu.start = pt_event_start;
1422 pt_pmu.pmu.stop = pt_event_stop;
1423 pt_pmu.pmu.read = pt_event_read;
1424 pt_pmu.pmu.setup_aux = pt_buffer_setup_aux;
1425 pt_pmu.pmu.free_aux = pt_buffer_free_aux;
1426 pt_pmu.pmu.addr_filters_sync = pt_event_addr_filters_sync;
1427 pt_pmu.pmu.addr_filters_validate = pt_event_addr_filters_validate;
1428 pt_pmu.pmu.nr_addr_filters =
1429 pt_cap_get(PT_CAP_num_address_ranges);
1430
1431 ret = perf_pmu_register(&pt_pmu.pmu, "intel_pt", -1);
1432
1433 return ret;
1434}
1435arch_initcall(pt_init);
1436