1
2
3
4
5
6
7
8
9
10
11
12
13
14#define DRVNAME "arm_trbe"
15
16#define pr_fmt(fmt) DRVNAME ": " fmt
17
18#include <asm/barrier.h>
19#include <asm/cpufeature.h>
20
21#include "coresight-self-hosted-trace.h"
22#include "coresight-trbe.h"
23
24#define PERF_IDX2OFF(idx, buf) ((idx) % ((buf)->nr_pages << PAGE_SHIFT))
25
26
27
28
29
30
31
32
33
34#define ETE_IGNORE_PACKET 0x70
35
36
37
38
39
40
41
42
43
44#define TRBE_TRACE_MIN_BUF_SIZE 64
45
46enum trbe_fault_action {
47 TRBE_FAULT_ACT_WRAP,
48 TRBE_FAULT_ACT_SPURIOUS,
49 TRBE_FAULT_ACT_FATAL,
50};
51
52struct trbe_buf {
53
54
55
56
57
58
59
60
61 unsigned long trbe_base;
62
63 unsigned long trbe_hw_base;
64 unsigned long trbe_limit;
65 unsigned long trbe_write;
66 int nr_pages;
67 void **pages;
68 bool snapshot;
69 struct trbe_cpudata *cpudata;
70};
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92#define TRBE_WORKAROUND_OVERWRITE_FILL_MODE 0
93#define TRBE_WORKAROUND_WRITE_OUT_OF_RANGE 1
94
95static int trbe_errata_cpucaps[] = {
96 [TRBE_WORKAROUND_OVERWRITE_FILL_MODE] = ARM64_WORKAROUND_TRBE_OVERWRITE_FILL_MODE,
97 [TRBE_WORKAROUND_WRITE_OUT_OF_RANGE] = ARM64_WORKAROUND_TRBE_WRITE_OUT_OF_RANGE,
98 -1,
99};
100
101
102#define TRBE_ERRATA_MAX (ARRAY_SIZE(trbe_errata_cpucaps) - 1)
103
104
105
106
107
108#define TRBE_WORKAROUND_OVERWRITE_FILL_MODE_SKIP_BYTES 256
109
110
111
112
113
114
115
116
117
118
119
120struct trbe_cpudata {
121 bool trbe_flag;
122 u64 trbe_hw_align;
123 u64 trbe_align;
124 int cpu;
125 enum cs_mode mode;
126 struct trbe_buf *buf;
127 struct trbe_drvdata *drvdata;
128 DECLARE_BITMAP(errata, TRBE_ERRATA_MAX);
129};
130
131struct trbe_drvdata {
132 struct trbe_cpudata __percpu *cpudata;
133 struct perf_output_handle * __percpu *handle;
134 struct hlist_node hotplug_node;
135 int irq;
136 cpumask_t supported_cpus;
137 enum cpuhp_state trbe_online;
138 struct platform_device *pdev;
139};
140
141static void trbe_check_errata(struct trbe_cpudata *cpudata)
142{
143 int i;
144
145 for (i = 0; i < TRBE_ERRATA_MAX; i++) {
146 int cap = trbe_errata_cpucaps[i];
147
148 if (WARN_ON_ONCE(cap < 0))
149 return;
150 if (this_cpu_has_cap(cap))
151 set_bit(i, cpudata->errata);
152 }
153}
154
155static inline bool trbe_has_erratum(struct trbe_cpudata *cpudata, int i)
156{
157 return (i < TRBE_ERRATA_MAX) && test_bit(i, cpudata->errata);
158}
159
160static inline bool trbe_may_overwrite_in_fill_mode(struct trbe_cpudata *cpudata)
161{
162 return trbe_has_erratum(cpudata, TRBE_WORKAROUND_OVERWRITE_FILL_MODE);
163}
164
165static inline bool trbe_may_write_out_of_range(struct trbe_cpudata *cpudata)
166{
167 return trbe_has_erratum(cpudata, TRBE_WORKAROUND_WRITE_OUT_OF_RANGE);
168}
169
170static int trbe_alloc_node(struct perf_event *event)
171{
172 if (event->cpu == -1)
173 return NUMA_NO_NODE;
174 return cpu_to_node(event->cpu);
175}
176
177static void trbe_drain_buffer(void)
178{
179 tsb_csync();
180 dsb(nsh);
181}
182
183static void trbe_drain_and_disable_local(void)
184{
185 u64 trblimitr = read_sysreg_s(SYS_TRBLIMITR_EL1);
186
187 trbe_drain_buffer();
188
189
190
191
192
193 trblimitr &= ~TRBLIMITR_ENABLE;
194 write_sysreg_s(trblimitr, SYS_TRBLIMITR_EL1);
195 isb();
196}
197
198static void trbe_reset_local(void)
199{
200 trbe_drain_and_disable_local();
201 write_sysreg_s(0, SYS_TRBLIMITR_EL1);
202 write_sysreg_s(0, SYS_TRBPTR_EL1);
203 write_sysreg_s(0, SYS_TRBBASER_EL1);
204 write_sysreg_s(0, SYS_TRBSR_EL1);
205}
206
207static void trbe_report_wrap_event(struct perf_output_handle *handle)
208{
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223 perf_aux_output_flag(handle, PERF_AUX_FLAG_COLLISION);
224}
225
226static void trbe_stop_and_truncate_event(struct perf_output_handle *handle)
227{
228 struct trbe_buf *buf = etm_perf_sink_config(handle);
229
230
231
232
233
234
235
236
237 trbe_drain_and_disable_local();
238 perf_aux_output_flag(handle, PERF_AUX_FLAG_TRUNCATED);
239 perf_aux_output_end(handle, 0);
240 *this_cpu_ptr(buf->cpudata->drvdata->handle) = NULL;
241}
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286static void __trbe_pad_buf(struct trbe_buf *buf, u64 offset, int len)
287{
288 memset((void *)buf->trbe_base + offset, ETE_IGNORE_PACKET, len);
289}
290
291static void trbe_pad_buf(struct perf_output_handle *handle, int len)
292{
293 struct trbe_buf *buf = etm_perf_sink_config(handle);
294 u64 head = PERF_IDX2OFF(handle->head, buf);
295
296 __trbe_pad_buf(buf, head, len);
297 if (!buf->snapshot)
298 perf_aux_output_skip(handle, len);
299}
300
301static unsigned long trbe_snapshot_offset(struct perf_output_handle *handle)
302{
303 struct trbe_buf *buf = etm_perf_sink_config(handle);
304
305
306
307
308
309
310 return buf->nr_pages * PAGE_SIZE;
311}
312
313static u64 trbe_min_trace_buf_size(struct perf_output_handle *handle)
314{
315 u64 size = TRBE_TRACE_MIN_BUF_SIZE;
316 struct trbe_buf *buf = etm_perf_sink_config(handle);
317 struct trbe_cpudata *cpudata = buf->cpudata;
318
319
320
321
322
323
324
325
326
327 if (trbe_may_write_out_of_range(cpudata))
328 size += PAGE_SIZE;
329 return size;
330}
331
332
333
334
335
336
337
338
339
340
341
342static unsigned long __trbe_normal_offset(struct perf_output_handle *handle)
343{
344 struct trbe_buf *buf = etm_perf_sink_config(handle);
345 struct trbe_cpudata *cpudata = buf->cpudata;
346 const u64 bufsize = buf->nr_pages * PAGE_SIZE;
347 u64 limit = bufsize;
348 u64 head, tail, wakeup;
349
350 head = PERF_IDX2OFF(handle->head, buf);
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368 if (!IS_ALIGNED(head, cpudata->trbe_align)) {
369 unsigned long delta = roundup(head, cpudata->trbe_align) - head;
370
371 delta = min(delta, handle->size);
372 trbe_pad_buf(handle, delta);
373 head = PERF_IDX2OFF(handle->head, buf);
374 }
375
376
377
378
379
380
381
382
383
384
385 if (!handle->size)
386 return 0;
387
388
389 tail = PERF_IDX2OFF(handle->head + handle->size, buf);
390 wakeup = PERF_IDX2OFF(handle->wakeup, buf);
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436 if (head < tail)
437 limit = round_down(tail, PAGE_SIZE);
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453 if (handle->wakeup < (handle->head + handle->size) && head <= wakeup)
454 limit = min(limit, round_up(wakeup, PAGE_SIZE));
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484 if (limit > head)
485 return limit;
486
487 trbe_pad_buf(handle, handle->size);
488 return 0;
489}
490
491static unsigned long trbe_normal_offset(struct perf_output_handle *handle)
492{
493 struct trbe_buf *buf = etm_perf_sink_config(handle);
494 u64 limit = __trbe_normal_offset(handle);
495 u64 head = PERF_IDX2OFF(handle->head, buf);
496
497
498
499
500
501
502
503
504
505 while (limit && ((limit - head) < trbe_min_trace_buf_size(handle))) {
506 trbe_pad_buf(handle, limit - head);
507 limit = __trbe_normal_offset(handle);
508 head = PERF_IDX2OFF(handle->head, buf);
509 }
510 return limit;
511}
512
513static unsigned long compute_trbe_buffer_limit(struct perf_output_handle *handle)
514{
515 struct trbe_buf *buf = etm_perf_sink_config(handle);
516 unsigned long offset;
517
518 if (buf->snapshot)
519 offset = trbe_snapshot_offset(handle);
520 else
521 offset = trbe_normal_offset(handle);
522 return buf->trbe_base + offset;
523}
524
525static void clr_trbe_status(void)
526{
527 u64 trbsr = read_sysreg_s(SYS_TRBSR_EL1);
528
529 WARN_ON(is_trbe_enabled());
530 trbsr &= ~TRBSR_IRQ;
531 trbsr &= ~TRBSR_TRG;
532 trbsr &= ~TRBSR_WRAP;
533 trbsr &= ~(TRBSR_EC_MASK << TRBSR_EC_SHIFT);
534 trbsr &= ~(TRBSR_BSC_MASK << TRBSR_BSC_SHIFT);
535 trbsr &= ~TRBSR_STOP;
536 write_sysreg_s(trbsr, SYS_TRBSR_EL1);
537}
538
539static void set_trbe_limit_pointer_enabled(unsigned long addr)
540{
541 u64 trblimitr = read_sysreg_s(SYS_TRBLIMITR_EL1);
542
543 WARN_ON(!IS_ALIGNED(addr, (1UL << TRBLIMITR_LIMIT_SHIFT)));
544 WARN_ON(!IS_ALIGNED(addr, PAGE_SIZE));
545
546 trblimitr &= ~TRBLIMITR_NVM;
547 trblimitr &= ~(TRBLIMITR_FILL_MODE_MASK << TRBLIMITR_FILL_MODE_SHIFT);
548 trblimitr &= ~(TRBLIMITR_TRIG_MODE_MASK << TRBLIMITR_TRIG_MODE_SHIFT);
549 trblimitr &= ~(TRBLIMITR_LIMIT_MASK << TRBLIMITR_LIMIT_SHIFT);
550
551
552
553
554
555
556
557
558
559
560 trblimitr |= (TRBE_FILL_MODE_FILL & TRBLIMITR_FILL_MODE_MASK) << TRBLIMITR_FILL_MODE_SHIFT;
561
562
563
564
565
566 trblimitr |= (TRBE_TRIG_MODE_IGNORE & TRBLIMITR_TRIG_MODE_MASK) <<
567 TRBLIMITR_TRIG_MODE_SHIFT;
568 trblimitr |= (addr & PAGE_MASK);
569
570 trblimitr |= TRBLIMITR_ENABLE;
571 write_sysreg_s(trblimitr, SYS_TRBLIMITR_EL1);
572
573
574 isb();
575}
576
577static void trbe_enable_hw(struct trbe_buf *buf)
578{
579 WARN_ON(buf->trbe_hw_base < buf->trbe_base);
580 WARN_ON(buf->trbe_write < buf->trbe_hw_base);
581 WARN_ON(buf->trbe_write >= buf->trbe_limit);
582 set_trbe_disabled();
583 isb();
584 clr_trbe_status();
585 set_trbe_base_pointer(buf->trbe_hw_base);
586 set_trbe_write_pointer(buf->trbe_write);
587
588
589
590
591
592 isb();
593 set_trbe_limit_pointer_enabled(buf->trbe_limit);
594}
595
596static enum trbe_fault_action trbe_get_fault_act(struct perf_output_handle *handle,
597 u64 trbsr)
598{
599 int ec = get_trbe_ec(trbsr);
600 int bsc = get_trbe_bsc(trbsr);
601 struct trbe_buf *buf = etm_perf_sink_config(handle);
602 struct trbe_cpudata *cpudata = buf->cpudata;
603
604 WARN_ON(is_trbe_running(trbsr));
605 if (is_trbe_trg(trbsr) || is_trbe_abort(trbsr))
606 return TRBE_FAULT_ACT_FATAL;
607
608 if ((ec == TRBE_EC_STAGE1_ABORT) || (ec == TRBE_EC_STAGE2_ABORT))
609 return TRBE_FAULT_ACT_FATAL;
610
611
612
613
614
615
616 if ((is_trbe_wrap(trbsr) && (ec == TRBE_EC_OTHERS) && (bsc == TRBE_BSC_FILLED)) &&
617 (trbe_may_overwrite_in_fill_mode(cpudata) ||
618 get_trbe_write_pointer() == get_trbe_base_pointer()))
619 return TRBE_FAULT_ACT_WRAP;
620
621 return TRBE_FAULT_ACT_SPURIOUS;
622}
623
624static unsigned long trbe_get_trace_size(struct perf_output_handle *handle,
625 struct trbe_buf *buf, bool wrap)
626{
627 u64 write;
628 u64 start_off, end_off;
629 u64 size;
630 u64 overwrite_skip = TRBE_WORKAROUND_OVERWRITE_FILL_MODE_SKIP_BYTES;
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647 if (wrap)
648 write = get_trbe_limit_pointer();
649 else
650 write = get_trbe_write_pointer();
651
652
653
654
655
656
657 end_off = write - buf->trbe_base;
658 start_off = PERF_IDX2OFF(handle->head, buf);
659
660 if (WARN_ON_ONCE(end_off < start_off))
661 return 0;
662
663 size = end_off - start_off;
664
665
666
667
668
669 if (trbe_has_erratum(buf->cpudata, TRBE_WORKAROUND_OVERWRITE_FILL_MODE) &&
670 !WARN_ON(size < overwrite_skip))
671 __trbe_pad_buf(buf, start_off, overwrite_skip);
672
673 return size;
674}
675
676static void *arm_trbe_alloc_buffer(struct coresight_device *csdev,
677 struct perf_event *event, void **pages,
678 int nr_pages, bool snapshot)
679{
680 struct trbe_buf *buf;
681 struct page **pglist;
682 int i;
683
684
685
686
687
688
689
690 if (nr_pages < 2)
691 return NULL;
692
693 buf = kzalloc_node(sizeof(*buf), GFP_KERNEL, trbe_alloc_node(event));
694 if (!buf)
695 return ERR_PTR(-ENOMEM);
696
697 pglist = kcalloc(nr_pages, sizeof(*pglist), GFP_KERNEL);
698 if (!pglist) {
699 kfree(buf);
700 return ERR_PTR(-ENOMEM);
701 }
702
703 for (i = 0; i < nr_pages; i++)
704 pglist[i] = virt_to_page(pages[i]);
705
706 buf->trbe_base = (unsigned long)vmap(pglist, nr_pages, VM_MAP, PAGE_KERNEL);
707 if (!buf->trbe_base) {
708 kfree(pglist);
709 kfree(buf);
710 return ERR_PTR(-ENOMEM);
711 }
712 buf->trbe_limit = buf->trbe_base + nr_pages * PAGE_SIZE;
713 buf->trbe_write = buf->trbe_base;
714 buf->snapshot = snapshot;
715 buf->nr_pages = nr_pages;
716 buf->pages = pages;
717 kfree(pglist);
718 return buf;
719}
720
721static void arm_trbe_free_buffer(void *config)
722{
723 struct trbe_buf *buf = config;
724
725 vunmap((void *)buf->trbe_base);
726 kfree(buf);
727}
728
729static unsigned long arm_trbe_update_buffer(struct coresight_device *csdev,
730 struct perf_output_handle *handle,
731 void *config)
732{
733 struct trbe_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
734 struct trbe_cpudata *cpudata = dev_get_drvdata(&csdev->dev);
735 struct trbe_buf *buf = config;
736 enum trbe_fault_action act;
737 unsigned long size, status;
738 unsigned long flags;
739 bool wrap = false;
740
741 WARN_ON(buf->cpudata != cpudata);
742 WARN_ON(cpudata->cpu != smp_processor_id());
743 WARN_ON(cpudata->drvdata != drvdata);
744 if (cpudata->mode != CS_MODE_PERF)
745 return 0;
746
747
748
749
750
751
752
753
754
755
756 local_irq_save(flags);
757
758
759
760
761
762
763
764
765 if (!is_trbe_enabled()) {
766 size = 0;
767 goto done;
768 }
769
770
771
772
773
774
775
776
777
778 trbe_drain_and_disable_local();
779
780
781 status = read_sysreg_s(SYS_TRBSR_EL1);
782 if (is_trbe_irq(status)) {
783
784
785
786
787
788
789 clr_trbe_irq();
790 isb();
791
792 act = trbe_get_fault_act(handle, status);
793
794
795
796
797 if (act != TRBE_FAULT_ACT_WRAP) {
798 size = 0;
799 goto done;
800 }
801
802 trbe_report_wrap_event(handle);
803 wrap = true;
804 }
805
806 size = trbe_get_trace_size(handle, buf, wrap);
807
808done:
809 local_irq_restore(flags);
810
811 if (buf->snapshot)
812 handle->head += size;
813 return size;
814}
815
816
817static int trbe_apply_work_around_before_enable(struct trbe_buf *buf)
818{
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892 if (trbe_has_erratum(buf->cpudata, TRBE_WORKAROUND_OVERWRITE_FILL_MODE)) {
893 if (WARN_ON(!IS_ALIGNED(buf->trbe_write, PAGE_SIZE)))
894 return -EINVAL;
895 buf->trbe_hw_base = buf->trbe_write;
896 buf->trbe_write += TRBE_WORKAROUND_OVERWRITE_FILL_MODE_SKIP_BYTES;
897 }
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916 if (trbe_has_erratum(buf->cpudata, TRBE_WORKAROUND_WRITE_OUT_OF_RANGE)) {
917 s64 space = buf->trbe_limit - buf->trbe_write;
918
919
920
921
922 if (WARN_ON(space <= PAGE_SIZE ||
923 !IS_ALIGNED(buf->trbe_limit, PAGE_SIZE)))
924 return -EINVAL;
925 buf->trbe_limit -= PAGE_SIZE;
926 }
927
928 return 0;
929}
930
931static int __arm_trbe_enable(struct trbe_buf *buf,
932 struct perf_output_handle *handle)
933{
934 int ret = 0;
935
936 perf_aux_output_flag(handle, PERF_AUX_FLAG_CORESIGHT_FORMAT_RAW);
937 buf->trbe_limit = compute_trbe_buffer_limit(handle);
938 buf->trbe_write = buf->trbe_base + PERF_IDX2OFF(handle->head, buf);
939 if (buf->trbe_limit == buf->trbe_base) {
940 ret = -ENOSPC;
941 goto err;
942 }
943
944 buf->trbe_hw_base = buf->trbe_base;
945
946 ret = trbe_apply_work_around_before_enable(buf);
947 if (ret)
948 goto err;
949
950 *this_cpu_ptr(buf->cpudata->drvdata->handle) = handle;
951 trbe_enable_hw(buf);
952 return 0;
953err:
954 trbe_stop_and_truncate_event(handle);
955 return ret;
956}
957
958static int arm_trbe_enable(struct coresight_device *csdev, u32 mode, void *data)
959{
960 struct trbe_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
961 struct trbe_cpudata *cpudata = dev_get_drvdata(&csdev->dev);
962 struct perf_output_handle *handle = data;
963 struct trbe_buf *buf = etm_perf_sink_config(handle);
964
965 WARN_ON(cpudata->cpu != smp_processor_id());
966 WARN_ON(cpudata->drvdata != drvdata);
967 if (mode != CS_MODE_PERF)
968 return -EINVAL;
969
970 cpudata->buf = buf;
971 cpudata->mode = mode;
972 buf->cpudata = cpudata;
973
974 return __arm_trbe_enable(buf, handle);
975}
976
977static int arm_trbe_disable(struct coresight_device *csdev)
978{
979 struct trbe_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
980 struct trbe_cpudata *cpudata = dev_get_drvdata(&csdev->dev);
981 struct trbe_buf *buf = cpudata->buf;
982
983 WARN_ON(buf->cpudata != cpudata);
984 WARN_ON(cpudata->cpu != smp_processor_id());
985 WARN_ON(cpudata->drvdata != drvdata);
986 if (cpudata->mode != CS_MODE_PERF)
987 return -EINVAL;
988
989 trbe_drain_and_disable_local();
990 buf->cpudata = NULL;
991 cpudata->buf = NULL;
992 cpudata->mode = CS_MODE_DISABLED;
993 return 0;
994}
995
996static void trbe_handle_spurious(struct perf_output_handle *handle)
997{
998 u64 limitr = read_sysreg_s(SYS_TRBLIMITR_EL1);
999
1000
1001
1002
1003
1004
1005 limitr |= TRBLIMITR_ENABLE;
1006 write_sysreg_s(limitr, SYS_TRBLIMITR_EL1);
1007 isb();
1008}
1009
1010static int trbe_handle_overflow(struct perf_output_handle *handle)
1011{
1012 struct perf_event *event = handle->event;
1013 struct trbe_buf *buf = etm_perf_sink_config(handle);
1014 unsigned long size;
1015 struct etm_event_data *event_data;
1016
1017 size = trbe_get_trace_size(handle, buf, true);
1018 if (buf->snapshot)
1019 handle->head += size;
1020
1021 trbe_report_wrap_event(handle);
1022 perf_aux_output_end(handle, size);
1023 event_data = perf_aux_output_begin(handle, event);
1024 if (!event_data) {
1025
1026
1027
1028
1029
1030
1031 trbe_drain_and_disable_local();
1032 *this_cpu_ptr(buf->cpudata->drvdata->handle) = NULL;
1033 return -EINVAL;
1034 }
1035
1036 return __arm_trbe_enable(buf, handle);
1037}
1038
1039static bool is_perf_trbe(struct perf_output_handle *handle)
1040{
1041 struct trbe_buf *buf = etm_perf_sink_config(handle);
1042 struct trbe_cpudata *cpudata = buf->cpudata;
1043 struct trbe_drvdata *drvdata = cpudata->drvdata;
1044 int cpu = smp_processor_id();
1045
1046 WARN_ON(buf->trbe_hw_base != get_trbe_base_pointer());
1047 WARN_ON(buf->trbe_limit != get_trbe_limit_pointer());
1048
1049 if (cpudata->mode != CS_MODE_PERF)
1050 return false;
1051
1052 if (cpudata->cpu != cpu)
1053 return false;
1054
1055 if (!cpumask_test_cpu(cpu, &drvdata->supported_cpus))
1056 return false;
1057
1058 return true;
1059}
1060
1061static irqreturn_t arm_trbe_irq_handler(int irq, void *dev)
1062{
1063 struct perf_output_handle **handle_ptr = dev;
1064 struct perf_output_handle *handle = *handle_ptr;
1065 enum trbe_fault_action act;
1066 u64 status;
1067 bool truncated = false;
1068 u64 trfcr;
1069
1070
1071 status = read_sysreg_s(SYS_TRBSR_EL1);
1072
1073
1074
1075
1076 if (!is_trbe_irq(status))
1077 return IRQ_NONE;
1078
1079
1080 trfcr = cpu_prohibit_trace();
1081
1082
1083
1084
1085 trbe_drain_and_disable_local();
1086 clr_trbe_irq();
1087 isb();
1088
1089 if (WARN_ON_ONCE(!handle) || !perf_get_aux(handle))
1090 return IRQ_NONE;
1091
1092 if (!is_perf_trbe(handle))
1093 return IRQ_NONE;
1094
1095 act = trbe_get_fault_act(handle, status);
1096 switch (act) {
1097 case TRBE_FAULT_ACT_WRAP:
1098 truncated = !!trbe_handle_overflow(handle);
1099 break;
1100 case TRBE_FAULT_ACT_SPURIOUS:
1101 trbe_handle_spurious(handle);
1102 break;
1103 case TRBE_FAULT_ACT_FATAL:
1104 trbe_stop_and_truncate_event(handle);
1105 truncated = true;
1106 break;
1107 }
1108
1109
1110
1111
1112
1113
1114
1115
1116 if (truncated)
1117 irq_work_run();
1118 else
1119 write_trfcr(trfcr);
1120
1121 return IRQ_HANDLED;
1122}
1123
1124static const struct coresight_ops_sink arm_trbe_sink_ops = {
1125 .enable = arm_trbe_enable,
1126 .disable = arm_trbe_disable,
1127 .alloc_buffer = arm_trbe_alloc_buffer,
1128 .free_buffer = arm_trbe_free_buffer,
1129 .update_buffer = arm_trbe_update_buffer,
1130};
1131
1132static const struct coresight_ops arm_trbe_cs_ops = {
1133 .sink_ops = &arm_trbe_sink_ops,
1134};
1135
1136static ssize_t align_show(struct device *dev, struct device_attribute *attr, char *buf)
1137{
1138 struct trbe_cpudata *cpudata = dev_get_drvdata(dev);
1139
1140 return sprintf(buf, "%llx\n", cpudata->trbe_hw_align);
1141}
1142static DEVICE_ATTR_RO(align);
1143
1144static ssize_t flag_show(struct device *dev, struct device_attribute *attr, char *buf)
1145{
1146 struct trbe_cpudata *cpudata = dev_get_drvdata(dev);
1147
1148 return sprintf(buf, "%d\n", cpudata->trbe_flag);
1149}
1150static DEVICE_ATTR_RO(flag);
1151
1152static struct attribute *arm_trbe_attrs[] = {
1153 &dev_attr_align.attr,
1154 &dev_attr_flag.attr,
1155 NULL,
1156};
1157
1158static const struct attribute_group arm_trbe_group = {
1159 .attrs = arm_trbe_attrs,
1160};
1161
1162static const struct attribute_group *arm_trbe_groups[] = {
1163 &arm_trbe_group,
1164 NULL,
1165};
1166
1167static void arm_trbe_enable_cpu(void *info)
1168{
1169 struct trbe_drvdata *drvdata = info;
1170
1171 trbe_reset_local();
1172 enable_percpu_irq(drvdata->irq, IRQ_TYPE_NONE);
1173}
1174
1175static void arm_trbe_register_coresight_cpu(struct trbe_drvdata *drvdata, int cpu)
1176{
1177 struct trbe_cpudata *cpudata = per_cpu_ptr(drvdata->cpudata, cpu);
1178 struct coresight_device *trbe_csdev = coresight_get_percpu_sink(cpu);
1179 struct coresight_desc desc = { 0 };
1180 struct device *dev;
1181
1182 if (WARN_ON(trbe_csdev))
1183 return;
1184
1185
1186 if (WARN_ON(!cpudata->drvdata))
1187 return;
1188
1189 dev = &cpudata->drvdata->pdev->dev;
1190 desc.name = devm_kasprintf(dev, GFP_KERNEL, "trbe%d", cpu);
1191 if (!desc.name)
1192 goto cpu_clear;
1193
1194 desc.type = CORESIGHT_DEV_TYPE_SINK;
1195 desc.subtype.sink_subtype = CORESIGHT_DEV_SUBTYPE_SINK_PERCPU_SYSMEM;
1196 desc.ops = &arm_trbe_cs_ops;
1197 desc.pdata = dev_get_platdata(dev);
1198 desc.groups = arm_trbe_groups;
1199 desc.dev = dev;
1200 trbe_csdev = coresight_register(&desc);
1201 if (IS_ERR(trbe_csdev))
1202 goto cpu_clear;
1203
1204 dev_set_drvdata(&trbe_csdev->dev, cpudata);
1205 coresight_set_percpu_sink(cpu, trbe_csdev);
1206 return;
1207cpu_clear:
1208 cpumask_clear_cpu(cpu, &drvdata->supported_cpus);
1209}
1210
1211
1212
1213
1214static void arm_trbe_probe_cpu(void *info)
1215{
1216 struct trbe_drvdata *drvdata = info;
1217 int cpu = smp_processor_id();
1218 struct trbe_cpudata *cpudata = per_cpu_ptr(drvdata->cpudata, cpu);
1219 u64 trbidr;
1220
1221 if (WARN_ON(!cpudata))
1222 goto cpu_clear;
1223
1224 if (!is_trbe_available()) {
1225 pr_err("TRBE is not implemented on cpu %d\n", cpu);
1226 goto cpu_clear;
1227 }
1228
1229 trbidr = read_sysreg_s(SYS_TRBIDR_EL1);
1230 if (!is_trbe_programmable(trbidr)) {
1231 pr_err("TRBE is owned in higher exception level on cpu %d\n", cpu);
1232 goto cpu_clear;
1233 }
1234
1235 cpudata->trbe_hw_align = 1ULL << get_trbe_address_align(trbidr);
1236 if (cpudata->trbe_hw_align > SZ_2K) {
1237 pr_err("Unsupported alignment on cpu %d\n", cpu);
1238 goto cpu_clear;
1239 }
1240
1241
1242
1243
1244
1245 trbe_check_errata(cpudata);
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258 if (trbe_may_overwrite_in_fill_mode(cpudata))
1259 cpudata->trbe_align = PAGE_SIZE;
1260 else
1261 cpudata->trbe_align = cpudata->trbe_hw_align;
1262
1263 cpudata->trbe_flag = get_trbe_flag_update(trbidr);
1264 cpudata->cpu = cpu;
1265 cpudata->drvdata = drvdata;
1266 return;
1267cpu_clear:
1268 cpumask_clear_cpu(cpu, &drvdata->supported_cpus);
1269}
1270
1271static void arm_trbe_remove_coresight_cpu(void *info)
1272{
1273 int cpu = smp_processor_id();
1274 struct trbe_drvdata *drvdata = info;
1275 struct trbe_cpudata *cpudata = per_cpu_ptr(drvdata->cpudata, cpu);
1276 struct coresight_device *trbe_csdev = coresight_get_percpu_sink(cpu);
1277
1278 disable_percpu_irq(drvdata->irq);
1279 trbe_reset_local();
1280 if (trbe_csdev) {
1281 coresight_unregister(trbe_csdev);
1282 cpudata->drvdata = NULL;
1283 coresight_set_percpu_sink(cpu, NULL);
1284 }
1285}
1286
1287static int arm_trbe_probe_coresight(struct trbe_drvdata *drvdata)
1288{
1289 int cpu;
1290
1291 drvdata->cpudata = alloc_percpu(typeof(*drvdata->cpudata));
1292 if (!drvdata->cpudata)
1293 return -ENOMEM;
1294
1295 for_each_cpu(cpu, &drvdata->supported_cpus) {
1296
1297 if (smp_call_function_single(cpu, arm_trbe_probe_cpu, drvdata, 1))
1298 continue;
1299 if (cpumask_test_cpu(cpu, &drvdata->supported_cpus))
1300 arm_trbe_register_coresight_cpu(drvdata, cpu);
1301 if (cpumask_test_cpu(cpu, &drvdata->supported_cpus))
1302 smp_call_function_single(cpu, arm_trbe_enable_cpu, drvdata, 1);
1303 }
1304 return 0;
1305}
1306
1307static int arm_trbe_remove_coresight(struct trbe_drvdata *drvdata)
1308{
1309 int cpu;
1310
1311 for_each_cpu(cpu, &drvdata->supported_cpus)
1312 smp_call_function_single(cpu, arm_trbe_remove_coresight_cpu, drvdata, 1);
1313 free_percpu(drvdata->cpudata);
1314 return 0;
1315}
1316
1317static void arm_trbe_probe_hotplugged_cpu(struct trbe_drvdata *drvdata)
1318{
1319 preempt_disable();
1320 arm_trbe_probe_cpu(drvdata);
1321 preempt_enable();
1322}
1323
1324static int arm_trbe_cpu_startup(unsigned int cpu, struct hlist_node *node)
1325{
1326 struct trbe_drvdata *drvdata = hlist_entry_safe(node, struct trbe_drvdata, hotplug_node);
1327
1328 if (cpumask_test_cpu(cpu, &drvdata->supported_cpus)) {
1329
1330
1331
1332
1333
1334 if (!coresight_get_percpu_sink(cpu)) {
1335 arm_trbe_probe_hotplugged_cpu(drvdata);
1336 if (cpumask_test_cpu(cpu, &drvdata->supported_cpus))
1337 arm_trbe_register_coresight_cpu(drvdata, cpu);
1338 if (cpumask_test_cpu(cpu, &drvdata->supported_cpus))
1339 arm_trbe_enable_cpu(drvdata);
1340 } else {
1341 arm_trbe_enable_cpu(drvdata);
1342 }
1343 }
1344 return 0;
1345}
1346
1347static int arm_trbe_cpu_teardown(unsigned int cpu, struct hlist_node *node)
1348{
1349 struct trbe_drvdata *drvdata = hlist_entry_safe(node, struct trbe_drvdata, hotplug_node);
1350
1351 if (cpumask_test_cpu(cpu, &drvdata->supported_cpus)) {
1352 disable_percpu_irq(drvdata->irq);
1353 trbe_reset_local();
1354 }
1355 return 0;
1356}
1357
1358static int arm_trbe_probe_cpuhp(struct trbe_drvdata *drvdata)
1359{
1360 enum cpuhp_state trbe_online;
1361 int ret;
1362
1363 trbe_online = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, DRVNAME,
1364 arm_trbe_cpu_startup, arm_trbe_cpu_teardown);
1365 if (trbe_online < 0)
1366 return trbe_online;
1367
1368 ret = cpuhp_state_add_instance(trbe_online, &drvdata->hotplug_node);
1369 if (ret) {
1370 cpuhp_remove_multi_state(trbe_online);
1371 return ret;
1372 }
1373 drvdata->trbe_online = trbe_online;
1374 return 0;
1375}
1376
1377static void arm_trbe_remove_cpuhp(struct trbe_drvdata *drvdata)
1378{
1379 cpuhp_remove_multi_state(drvdata->trbe_online);
1380}
1381
1382static int arm_trbe_probe_irq(struct platform_device *pdev,
1383 struct trbe_drvdata *drvdata)
1384{
1385 int ret;
1386
1387 drvdata->irq = platform_get_irq(pdev, 0);
1388 if (drvdata->irq < 0) {
1389 pr_err("IRQ not found for the platform device\n");
1390 return drvdata->irq;
1391 }
1392
1393 if (!irq_is_percpu(drvdata->irq)) {
1394 pr_err("IRQ is not a PPI\n");
1395 return -EINVAL;
1396 }
1397
1398 if (irq_get_percpu_devid_partition(drvdata->irq, &drvdata->supported_cpus))
1399 return -EINVAL;
1400
1401 drvdata->handle = alloc_percpu(struct perf_output_handle *);
1402 if (!drvdata->handle)
1403 return -ENOMEM;
1404
1405 ret = request_percpu_irq(drvdata->irq, arm_trbe_irq_handler, DRVNAME, drvdata->handle);
1406 if (ret) {
1407 free_percpu(drvdata->handle);
1408 return ret;
1409 }
1410 return 0;
1411}
1412
1413static void arm_trbe_remove_irq(struct trbe_drvdata *drvdata)
1414{
1415 free_percpu_irq(drvdata->irq, drvdata->handle);
1416 free_percpu(drvdata->handle);
1417}
1418
1419static int arm_trbe_device_probe(struct platform_device *pdev)
1420{
1421 struct coresight_platform_data *pdata;
1422 struct trbe_drvdata *drvdata;
1423 struct device *dev = &pdev->dev;
1424 int ret;
1425
1426 drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
1427 if (!drvdata)
1428 return -ENOMEM;
1429
1430 pdata = coresight_get_platform_data(dev);
1431 if (IS_ERR(pdata))
1432 return PTR_ERR(pdata);
1433
1434 dev_set_drvdata(dev, drvdata);
1435 dev->platform_data = pdata;
1436 drvdata->pdev = pdev;
1437 ret = arm_trbe_probe_irq(pdev, drvdata);
1438 if (ret)
1439 return ret;
1440
1441 ret = arm_trbe_probe_coresight(drvdata);
1442 if (ret)
1443 goto probe_failed;
1444
1445 ret = arm_trbe_probe_cpuhp(drvdata);
1446 if (ret)
1447 goto cpuhp_failed;
1448
1449 return 0;
1450cpuhp_failed:
1451 arm_trbe_remove_coresight(drvdata);
1452probe_failed:
1453 arm_trbe_remove_irq(drvdata);
1454 return ret;
1455}
1456
1457static int arm_trbe_device_remove(struct platform_device *pdev)
1458{
1459 struct trbe_drvdata *drvdata = platform_get_drvdata(pdev);
1460
1461 arm_trbe_remove_cpuhp(drvdata);
1462 arm_trbe_remove_coresight(drvdata);
1463 arm_trbe_remove_irq(drvdata);
1464 return 0;
1465}
1466
1467static const struct of_device_id arm_trbe_of_match[] = {
1468 { .compatible = "arm,trace-buffer-extension"},
1469 {},
1470};
1471MODULE_DEVICE_TABLE(of, arm_trbe_of_match);
1472
1473static struct platform_driver arm_trbe_driver = {
1474 .driver = {
1475 .name = DRVNAME,
1476 .of_match_table = of_match_ptr(arm_trbe_of_match),
1477 .suppress_bind_attrs = true,
1478 },
1479 .probe = arm_trbe_device_probe,
1480 .remove = arm_trbe_device_remove,
1481};
1482
1483static int __init arm_trbe_init(void)
1484{
1485 int ret;
1486
1487 if (arm64_kernel_unmapped_at_el0()) {
1488 pr_err("TRBE wouldn't work if kernel gets unmapped at EL0\n");
1489 return -EOPNOTSUPP;
1490 }
1491
1492 ret = platform_driver_register(&arm_trbe_driver);
1493 if (!ret)
1494 return 0;
1495
1496 pr_err("Error registering %s platform driver\n", DRVNAME);
1497 return ret;
1498}
1499
1500static void __exit arm_trbe_exit(void)
1501{
1502 platform_driver_unregister(&arm_trbe_driver);
1503}
1504module_init(arm_trbe_init);
1505module_exit(arm_trbe_exit);
1506
1507MODULE_AUTHOR("Anshuman Khandual <anshuman.khandual@arm.com>");
1508MODULE_DESCRIPTION("Arm Trace Buffer Extension (TRBE) driver");
1509MODULE_LICENSE("GPL v2");
1510