1
2
3
4
5
6
7
8
9
10
11
12
13
14#define DRVNAME "arm_trbe"
15
16#define pr_fmt(fmt) DRVNAME ": " fmt
17
18#include <asm/barrier.h>
19#include "coresight-trbe.h"
20
21#define PERF_IDX2OFF(idx, buf) ((idx) % ((buf)->nr_pages << PAGE_SHIFT))
22
23
24
25
26
27
28
29
30
31#define ETE_IGNORE_PACKET 0x70
32
33
34
35
36
37
38
39
40
41#define TRBE_TRACE_MIN_BUF_SIZE 64
42
43enum trbe_fault_action {
44 TRBE_FAULT_ACT_WRAP,
45 TRBE_FAULT_ACT_SPURIOUS,
46 TRBE_FAULT_ACT_FATAL,
47};
48
49struct trbe_buf {
50
51
52
53
54
55
56
57
58 unsigned long trbe_base;
59 unsigned long trbe_limit;
60 unsigned long trbe_write;
61 int nr_pages;
62 void **pages;
63 bool snapshot;
64 struct trbe_cpudata *cpudata;
65};
66
67struct trbe_cpudata {
68 bool trbe_flag;
69 u64 trbe_align;
70 int cpu;
71 enum cs_mode mode;
72 struct trbe_buf *buf;
73 struct trbe_drvdata *drvdata;
74};
75
76struct trbe_drvdata {
77 struct trbe_cpudata __percpu *cpudata;
78 struct perf_output_handle * __percpu *handle;
79 struct hlist_node hotplug_node;
80 int irq;
81 cpumask_t supported_cpus;
82 enum cpuhp_state trbe_online;
83 struct platform_device *pdev;
84};
85
86static int trbe_alloc_node(struct perf_event *event)
87{
88 if (event->cpu == -1)
89 return NUMA_NO_NODE;
90 return cpu_to_node(event->cpu);
91}
92
93static void trbe_drain_buffer(void)
94{
95 tsb_csync();
96 dsb(nsh);
97}
98
99static void trbe_drain_and_disable_local(void)
100{
101 u64 trblimitr = read_sysreg_s(SYS_TRBLIMITR_EL1);
102
103 trbe_drain_buffer();
104
105
106
107
108
109 trblimitr &= ~TRBLIMITR_ENABLE;
110 write_sysreg_s(trblimitr, SYS_TRBLIMITR_EL1);
111 isb();
112}
113
114static void trbe_reset_local(void)
115{
116 trbe_drain_and_disable_local();
117 write_sysreg_s(0, SYS_TRBLIMITR_EL1);
118 write_sysreg_s(0, SYS_TRBPTR_EL1);
119 write_sysreg_s(0, SYS_TRBBASER_EL1);
120 write_sysreg_s(0, SYS_TRBSR_EL1);
121}
122
123static void trbe_stop_and_truncate_event(struct perf_output_handle *handle)
124{
125 struct trbe_buf *buf = etm_perf_sink_config(handle);
126
127
128
129
130
131
132
133
134 trbe_drain_and_disable_local();
135 perf_aux_output_flag(handle, PERF_AUX_FLAG_TRUNCATED);
136 *this_cpu_ptr(buf->cpudata->drvdata->handle) = NULL;
137}
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181static void trbe_pad_buf(struct perf_output_handle *handle, int len)
182{
183 struct trbe_buf *buf = etm_perf_sink_config(handle);
184 u64 head = PERF_IDX2OFF(handle->head, buf);
185
186 memset((void *)buf->trbe_base + head, ETE_IGNORE_PACKET, len);
187 if (!buf->snapshot)
188 perf_aux_output_skip(handle, len);
189}
190
191static unsigned long trbe_snapshot_offset(struct perf_output_handle *handle)
192{
193 struct trbe_buf *buf = etm_perf_sink_config(handle);
194
195
196
197
198
199
200 return buf->nr_pages * PAGE_SIZE;
201}
202
203
204
205
206
207
208
209
210
211
212
213static unsigned long __trbe_normal_offset(struct perf_output_handle *handle)
214{
215 struct trbe_buf *buf = etm_perf_sink_config(handle);
216 struct trbe_cpudata *cpudata = buf->cpudata;
217 const u64 bufsize = buf->nr_pages * PAGE_SIZE;
218 u64 limit = bufsize;
219 u64 head, tail, wakeup;
220
221 head = PERF_IDX2OFF(handle->head, buf);
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239 if (!IS_ALIGNED(head, cpudata->trbe_align)) {
240 unsigned long delta = roundup(head, cpudata->trbe_align) - head;
241
242 delta = min(delta, handle->size);
243 trbe_pad_buf(handle, delta);
244 head = PERF_IDX2OFF(handle->head, buf);
245 }
246
247
248
249
250
251
252
253
254
255
256
257
258 if (!handle->size) {
259 perf_aux_output_flag(handle, PERF_AUX_FLAG_TRUNCATED);
260 return 0;
261 }
262
263
264 tail = PERF_IDX2OFF(handle->head + handle->size, buf);
265 wakeup = PERF_IDX2OFF(handle->wakeup, buf);
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311 if (head < tail)
312 limit = round_down(tail, PAGE_SIZE);
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328 if (handle->wakeup < (handle->head + handle->size) && head <= wakeup)
329 limit = min(limit, round_up(wakeup, PAGE_SIZE));
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359 if (limit > head)
360 return limit;
361
362 trbe_pad_buf(handle, handle->size);
363 perf_aux_output_flag(handle, PERF_AUX_FLAG_TRUNCATED);
364 return 0;
365}
366
367static unsigned long trbe_normal_offset(struct perf_output_handle *handle)
368{
369 struct trbe_buf *buf = perf_get_aux(handle);
370 u64 limit = __trbe_normal_offset(handle);
371 u64 head = PERF_IDX2OFF(handle->head, buf);
372
373
374
375
376
377
378 if (limit && (limit - head < TRBE_TRACE_MIN_BUF_SIZE)) {
379 trbe_pad_buf(handle, limit - head);
380 limit = __trbe_normal_offset(handle);
381 }
382 return limit;
383}
384
385static unsigned long compute_trbe_buffer_limit(struct perf_output_handle *handle)
386{
387 struct trbe_buf *buf = etm_perf_sink_config(handle);
388 unsigned long offset;
389
390 if (buf->snapshot)
391 offset = trbe_snapshot_offset(handle);
392 else
393 offset = trbe_normal_offset(handle);
394 return buf->trbe_base + offset;
395}
396
397static void clr_trbe_status(void)
398{
399 u64 trbsr = read_sysreg_s(SYS_TRBSR_EL1);
400
401 WARN_ON(is_trbe_enabled());
402 trbsr &= ~TRBSR_IRQ;
403 trbsr &= ~TRBSR_TRG;
404 trbsr &= ~TRBSR_WRAP;
405 trbsr &= ~(TRBSR_EC_MASK << TRBSR_EC_SHIFT);
406 trbsr &= ~(TRBSR_BSC_MASK << TRBSR_BSC_SHIFT);
407 trbsr &= ~TRBSR_STOP;
408 write_sysreg_s(trbsr, SYS_TRBSR_EL1);
409}
410
411static void set_trbe_limit_pointer_enabled(unsigned long addr)
412{
413 u64 trblimitr = read_sysreg_s(SYS_TRBLIMITR_EL1);
414
415 WARN_ON(!IS_ALIGNED(addr, (1UL << TRBLIMITR_LIMIT_SHIFT)));
416 WARN_ON(!IS_ALIGNED(addr, PAGE_SIZE));
417
418 trblimitr &= ~TRBLIMITR_NVM;
419 trblimitr &= ~(TRBLIMITR_FILL_MODE_MASK << TRBLIMITR_FILL_MODE_SHIFT);
420 trblimitr &= ~(TRBLIMITR_TRIG_MODE_MASK << TRBLIMITR_TRIG_MODE_SHIFT);
421 trblimitr &= ~(TRBLIMITR_LIMIT_MASK << TRBLIMITR_LIMIT_SHIFT);
422
423
424
425
426
427
428
429
430
431
432 trblimitr |= (TRBE_FILL_MODE_FILL & TRBLIMITR_FILL_MODE_MASK) << TRBLIMITR_FILL_MODE_SHIFT;
433
434
435
436
437
438 trblimitr |= (TRBE_TRIG_MODE_IGNORE & TRBLIMITR_TRIG_MODE_MASK) <<
439 TRBLIMITR_TRIG_MODE_SHIFT;
440 trblimitr |= (addr & PAGE_MASK);
441
442 trblimitr |= TRBLIMITR_ENABLE;
443 write_sysreg_s(trblimitr, SYS_TRBLIMITR_EL1);
444
445
446 isb();
447}
448
449static void trbe_enable_hw(struct trbe_buf *buf)
450{
451 WARN_ON(buf->trbe_write < buf->trbe_base);
452 WARN_ON(buf->trbe_write >= buf->trbe_limit);
453 set_trbe_disabled();
454 isb();
455 clr_trbe_status();
456 set_trbe_base_pointer(buf->trbe_base);
457 set_trbe_write_pointer(buf->trbe_write);
458
459
460
461
462
463 isb();
464 set_trbe_limit_pointer_enabled(buf->trbe_limit);
465}
466
467static enum trbe_fault_action trbe_get_fault_act(u64 trbsr)
468{
469 int ec = get_trbe_ec(trbsr);
470 int bsc = get_trbe_bsc(trbsr);
471
472 WARN_ON(is_trbe_running(trbsr));
473 if (is_trbe_trg(trbsr) || is_trbe_abort(trbsr))
474 return TRBE_FAULT_ACT_FATAL;
475
476 if ((ec == TRBE_EC_STAGE1_ABORT) || (ec == TRBE_EC_STAGE2_ABORT))
477 return TRBE_FAULT_ACT_FATAL;
478
479 if (is_trbe_wrap(trbsr) && (ec == TRBE_EC_OTHERS) && (bsc == TRBE_BSC_FILLED)) {
480 if (get_trbe_write_pointer() == get_trbe_base_pointer())
481 return TRBE_FAULT_ACT_WRAP;
482 }
483 return TRBE_FAULT_ACT_SPURIOUS;
484}
485
486static void *arm_trbe_alloc_buffer(struct coresight_device *csdev,
487 struct perf_event *event, void **pages,
488 int nr_pages, bool snapshot)
489{
490 struct trbe_buf *buf;
491 struct page **pglist;
492 int i;
493
494
495
496
497
498
499
500 if (nr_pages < 2)
501 return NULL;
502
503 buf = kzalloc_node(sizeof(*buf), GFP_KERNEL, trbe_alloc_node(event));
504 if (!buf)
505 return ERR_PTR(-ENOMEM);
506
507 pglist = kcalloc(nr_pages, sizeof(*pglist), GFP_KERNEL);
508 if (!pglist) {
509 kfree(buf);
510 return ERR_PTR(-ENOMEM);
511 }
512
513 for (i = 0; i < nr_pages; i++)
514 pglist[i] = virt_to_page(pages[i]);
515
516 buf->trbe_base = (unsigned long)vmap(pglist, nr_pages, VM_MAP, PAGE_KERNEL);
517 if (!buf->trbe_base) {
518 kfree(pglist);
519 kfree(buf);
520 return ERR_PTR(-ENOMEM);
521 }
522 buf->trbe_limit = buf->trbe_base + nr_pages * PAGE_SIZE;
523 buf->trbe_write = buf->trbe_base;
524 buf->snapshot = snapshot;
525 buf->nr_pages = nr_pages;
526 buf->pages = pages;
527 kfree(pglist);
528 return buf;
529}
530
531static void arm_trbe_free_buffer(void *config)
532{
533 struct trbe_buf *buf = config;
534
535 vunmap((void *)buf->trbe_base);
536 kfree(buf);
537}
538
539static unsigned long arm_trbe_update_buffer(struct coresight_device *csdev,
540 struct perf_output_handle *handle,
541 void *config)
542{
543 struct trbe_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
544 struct trbe_cpudata *cpudata = dev_get_drvdata(&csdev->dev);
545 struct trbe_buf *buf = config;
546 enum trbe_fault_action act;
547 unsigned long size, offset;
548 unsigned long write, base, status;
549 unsigned long flags;
550
551 WARN_ON(buf->cpudata != cpudata);
552 WARN_ON(cpudata->cpu != smp_processor_id());
553 WARN_ON(cpudata->drvdata != drvdata);
554 if (cpudata->mode != CS_MODE_PERF)
555 return 0;
556
557 perf_aux_output_flag(handle, PERF_AUX_FLAG_CORESIGHT_FORMAT_RAW);
558
559
560
561
562
563
564
565
566
567
568 local_irq_save(flags);
569
570
571
572
573
574
575
576
577 if (!is_trbe_enabled()) {
578 size = 0;
579 goto done;
580 }
581
582
583
584
585
586
587
588
589
590 trbe_drain_and_disable_local();
591 write = get_trbe_write_pointer();
592 base = get_trbe_base_pointer();
593
594
595 status = read_sysreg_s(SYS_TRBSR_EL1);
596 if (is_trbe_irq(status)) {
597
598
599
600
601
602
603 clr_trbe_irq();
604 isb();
605
606 act = trbe_get_fault_act(status);
607
608
609
610
611 if (act != TRBE_FAULT_ACT_WRAP) {
612 size = 0;
613 goto done;
614 }
615
616
617
618
619
620
621 write = get_trbe_limit_pointer();
622 perf_aux_output_flag(handle, PERF_AUX_FLAG_TRUNCATED);
623 }
624
625 offset = write - base;
626 if (WARN_ON_ONCE(offset < PERF_IDX2OFF(handle->head, buf)))
627 size = 0;
628 else
629 size = offset - PERF_IDX2OFF(handle->head, buf);
630
631done:
632 local_irq_restore(flags);
633
634 if (buf->snapshot)
635 handle->head += size;
636 return size;
637}
638
639static int arm_trbe_enable(struct coresight_device *csdev, u32 mode, void *data)
640{
641 struct trbe_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
642 struct trbe_cpudata *cpudata = dev_get_drvdata(&csdev->dev);
643 struct perf_output_handle *handle = data;
644 struct trbe_buf *buf = etm_perf_sink_config(handle);
645
646 WARN_ON(cpudata->cpu != smp_processor_id());
647 WARN_ON(cpudata->drvdata != drvdata);
648 if (mode != CS_MODE_PERF)
649 return -EINVAL;
650
651 *this_cpu_ptr(drvdata->handle) = handle;
652 cpudata->buf = buf;
653 cpudata->mode = mode;
654 buf->cpudata = cpudata;
655 buf->trbe_limit = compute_trbe_buffer_limit(handle);
656 buf->trbe_write = buf->trbe_base + PERF_IDX2OFF(handle->head, buf);
657 if (buf->trbe_limit == buf->trbe_base) {
658 trbe_stop_and_truncate_event(handle);
659 return 0;
660 }
661 trbe_enable_hw(buf);
662 return 0;
663}
664
665static int arm_trbe_disable(struct coresight_device *csdev)
666{
667 struct trbe_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
668 struct trbe_cpudata *cpudata = dev_get_drvdata(&csdev->dev);
669 struct trbe_buf *buf = cpudata->buf;
670
671 WARN_ON(buf->cpudata != cpudata);
672 WARN_ON(cpudata->cpu != smp_processor_id());
673 WARN_ON(cpudata->drvdata != drvdata);
674 if (cpudata->mode != CS_MODE_PERF)
675 return -EINVAL;
676
677 trbe_drain_and_disable_local();
678 buf->cpudata = NULL;
679 cpudata->buf = NULL;
680 cpudata->mode = CS_MODE_DISABLED;
681 return 0;
682}
683
684static void trbe_handle_spurious(struct perf_output_handle *handle)
685{
686 struct trbe_buf *buf = etm_perf_sink_config(handle);
687
688 buf->trbe_limit = compute_trbe_buffer_limit(handle);
689 buf->trbe_write = buf->trbe_base + PERF_IDX2OFF(handle->head, buf);
690 if (buf->trbe_limit == buf->trbe_base) {
691 trbe_drain_and_disable_local();
692 return;
693 }
694 trbe_enable_hw(buf);
695}
696
697static void trbe_handle_overflow(struct perf_output_handle *handle)
698{
699 struct perf_event *event = handle->event;
700 struct trbe_buf *buf = etm_perf_sink_config(handle);
701 unsigned long offset, size;
702 struct etm_event_data *event_data;
703
704 offset = get_trbe_limit_pointer() - get_trbe_base_pointer();
705 size = offset - PERF_IDX2OFF(handle->head, buf);
706 if (buf->snapshot)
707 handle->head += size;
708
709
710
711
712
713 perf_aux_output_flag(handle, PERF_AUX_FLAG_CORESIGHT_FORMAT_RAW |
714 PERF_AUX_FLAG_TRUNCATED);
715 perf_aux_output_end(handle, size);
716 event_data = perf_aux_output_begin(handle, event);
717 if (!event_data) {
718
719
720
721
722
723
724 trbe_drain_and_disable_local();
725 *this_cpu_ptr(buf->cpudata->drvdata->handle) = NULL;
726 return;
727 }
728 buf->trbe_limit = compute_trbe_buffer_limit(handle);
729 buf->trbe_write = buf->trbe_base + PERF_IDX2OFF(handle->head, buf);
730 if (buf->trbe_limit == buf->trbe_base) {
731 trbe_stop_and_truncate_event(handle);
732 return;
733 }
734 *this_cpu_ptr(buf->cpudata->drvdata->handle) = handle;
735 trbe_enable_hw(buf);
736}
737
738static bool is_perf_trbe(struct perf_output_handle *handle)
739{
740 struct trbe_buf *buf = etm_perf_sink_config(handle);
741 struct trbe_cpudata *cpudata = buf->cpudata;
742 struct trbe_drvdata *drvdata = cpudata->drvdata;
743 int cpu = smp_processor_id();
744
745 WARN_ON(buf->trbe_base != get_trbe_base_pointer());
746 WARN_ON(buf->trbe_limit != get_trbe_limit_pointer());
747
748 if (cpudata->mode != CS_MODE_PERF)
749 return false;
750
751 if (cpudata->cpu != cpu)
752 return false;
753
754 if (!cpumask_test_cpu(cpu, &drvdata->supported_cpus))
755 return false;
756
757 return true;
758}
759
760static irqreturn_t arm_trbe_irq_handler(int irq, void *dev)
761{
762 struct perf_output_handle **handle_ptr = dev;
763 struct perf_output_handle *handle = *handle_ptr;
764 enum trbe_fault_action act;
765 u64 status;
766
767
768
769
770
771 trbe_drain_and_disable_local();
772
773 status = read_sysreg_s(SYS_TRBSR_EL1);
774
775
776
777
778 if (!is_trbe_irq(status))
779 return IRQ_NONE;
780
781 clr_trbe_irq();
782 isb();
783
784 if (WARN_ON_ONCE(!handle) || !perf_get_aux(handle))
785 return IRQ_NONE;
786
787 if (!is_perf_trbe(handle))
788 return IRQ_NONE;
789
790
791
792
793
794 irq_work_run();
795
796 act = trbe_get_fault_act(status);
797 switch (act) {
798 case TRBE_FAULT_ACT_WRAP:
799 trbe_handle_overflow(handle);
800 break;
801 case TRBE_FAULT_ACT_SPURIOUS:
802 trbe_handle_spurious(handle);
803 break;
804 case TRBE_FAULT_ACT_FATAL:
805 trbe_stop_and_truncate_event(handle);
806 break;
807 }
808 return IRQ_HANDLED;
809}
810
811static const struct coresight_ops_sink arm_trbe_sink_ops = {
812 .enable = arm_trbe_enable,
813 .disable = arm_trbe_disable,
814 .alloc_buffer = arm_trbe_alloc_buffer,
815 .free_buffer = arm_trbe_free_buffer,
816 .update_buffer = arm_trbe_update_buffer,
817};
818
819static const struct coresight_ops arm_trbe_cs_ops = {
820 .sink_ops = &arm_trbe_sink_ops,
821};
822
823static ssize_t align_show(struct device *dev, struct device_attribute *attr, char *buf)
824{
825 struct trbe_cpudata *cpudata = dev_get_drvdata(dev);
826
827 return sprintf(buf, "%llx\n", cpudata->trbe_align);
828}
829static DEVICE_ATTR_RO(align);
830
831static ssize_t flag_show(struct device *dev, struct device_attribute *attr, char *buf)
832{
833 struct trbe_cpudata *cpudata = dev_get_drvdata(dev);
834
835 return sprintf(buf, "%d\n", cpudata->trbe_flag);
836}
837static DEVICE_ATTR_RO(flag);
838
839static struct attribute *arm_trbe_attrs[] = {
840 &dev_attr_align.attr,
841 &dev_attr_flag.attr,
842 NULL,
843};
844
845static const struct attribute_group arm_trbe_group = {
846 .attrs = arm_trbe_attrs,
847};
848
849static const struct attribute_group *arm_trbe_groups[] = {
850 &arm_trbe_group,
851 NULL,
852};
853
854static void arm_trbe_enable_cpu(void *info)
855{
856 struct trbe_drvdata *drvdata = info;
857
858 trbe_reset_local();
859 enable_percpu_irq(drvdata->irq, IRQ_TYPE_NONE);
860}
861
862static void arm_trbe_register_coresight_cpu(struct trbe_drvdata *drvdata, int cpu)
863{
864 struct trbe_cpudata *cpudata = per_cpu_ptr(drvdata->cpudata, cpu);
865 struct coresight_device *trbe_csdev = coresight_get_percpu_sink(cpu);
866 struct coresight_desc desc = { 0 };
867 struct device *dev;
868
869 if (WARN_ON(trbe_csdev))
870 return;
871
872 dev = &cpudata->drvdata->pdev->dev;
873 desc.name = devm_kasprintf(dev, GFP_KERNEL, "trbe%d", cpu);
874 if (!desc.name)
875 goto cpu_clear;
876
877 desc.type = CORESIGHT_DEV_TYPE_SINK;
878 desc.subtype.sink_subtype = CORESIGHT_DEV_SUBTYPE_SINK_PERCPU_SYSMEM;
879 desc.ops = &arm_trbe_cs_ops;
880 desc.pdata = dev_get_platdata(dev);
881 desc.groups = arm_trbe_groups;
882 desc.dev = dev;
883 trbe_csdev = coresight_register(&desc);
884 if (IS_ERR(trbe_csdev))
885 goto cpu_clear;
886
887 dev_set_drvdata(&trbe_csdev->dev, cpudata);
888 coresight_set_percpu_sink(cpu, trbe_csdev);
889 return;
890cpu_clear:
891 cpumask_clear_cpu(cpu, &drvdata->supported_cpus);
892}
893
894static void arm_trbe_probe_cpu(void *info)
895{
896 struct trbe_drvdata *drvdata = info;
897 int cpu = smp_processor_id();
898 struct trbe_cpudata *cpudata = per_cpu_ptr(drvdata->cpudata, cpu);
899 u64 trbidr;
900
901 if (WARN_ON(!cpudata))
902 goto cpu_clear;
903
904 if (!is_trbe_available()) {
905 pr_err("TRBE is not implemented on cpu %d\n", cpu);
906 goto cpu_clear;
907 }
908
909 trbidr = read_sysreg_s(SYS_TRBIDR_EL1);
910 if (!is_trbe_programmable(trbidr)) {
911 pr_err("TRBE is owned in higher exception level on cpu %d\n", cpu);
912 goto cpu_clear;
913 }
914
915 cpudata->trbe_align = 1ULL << get_trbe_address_align(trbidr);
916 if (cpudata->trbe_align > SZ_2K) {
917 pr_err("Unsupported alignment on cpu %d\n", cpu);
918 goto cpu_clear;
919 }
920 cpudata->trbe_flag = get_trbe_flag_update(trbidr);
921 cpudata->cpu = cpu;
922 cpudata->drvdata = drvdata;
923 return;
924cpu_clear:
925 cpumask_clear_cpu(cpu, &drvdata->supported_cpus);
926}
927
928static void arm_trbe_remove_coresight_cpu(void *info)
929{
930 int cpu = smp_processor_id();
931 struct trbe_drvdata *drvdata = info;
932 struct trbe_cpudata *cpudata = per_cpu_ptr(drvdata->cpudata, cpu);
933 struct coresight_device *trbe_csdev = coresight_get_percpu_sink(cpu);
934
935 disable_percpu_irq(drvdata->irq);
936 trbe_reset_local();
937 if (trbe_csdev) {
938 coresight_unregister(trbe_csdev);
939 cpudata->drvdata = NULL;
940 coresight_set_percpu_sink(cpu, NULL);
941 }
942}
943
944static int arm_trbe_probe_coresight(struct trbe_drvdata *drvdata)
945{
946 int cpu;
947
948 drvdata->cpudata = alloc_percpu(typeof(*drvdata->cpudata));
949 if (!drvdata->cpudata)
950 return -ENOMEM;
951
952 for_each_cpu(cpu, &drvdata->supported_cpus) {
953 smp_call_function_single(cpu, arm_trbe_probe_cpu, drvdata, 1);
954 if (cpumask_test_cpu(cpu, &drvdata->supported_cpus))
955 arm_trbe_register_coresight_cpu(drvdata, cpu);
956 if (cpumask_test_cpu(cpu, &drvdata->supported_cpus))
957 smp_call_function_single(cpu, arm_trbe_enable_cpu, drvdata, 1);
958 }
959 return 0;
960}
961
962static int arm_trbe_remove_coresight(struct trbe_drvdata *drvdata)
963{
964 int cpu;
965
966 for_each_cpu(cpu, &drvdata->supported_cpus)
967 smp_call_function_single(cpu, arm_trbe_remove_coresight_cpu, drvdata, 1);
968 free_percpu(drvdata->cpudata);
969 return 0;
970}
971
972static int arm_trbe_cpu_startup(unsigned int cpu, struct hlist_node *node)
973{
974 struct trbe_drvdata *drvdata = hlist_entry_safe(node, struct trbe_drvdata, hotplug_node);
975
976 if (cpumask_test_cpu(cpu, &drvdata->supported_cpus)) {
977
978
979
980
981
982 if (!coresight_get_percpu_sink(cpu)) {
983 arm_trbe_probe_cpu(drvdata);
984 if (cpumask_test_cpu(cpu, &drvdata->supported_cpus))
985 arm_trbe_register_coresight_cpu(drvdata, cpu);
986 if (cpumask_test_cpu(cpu, &drvdata->supported_cpus))
987 arm_trbe_enable_cpu(drvdata);
988 } else {
989 arm_trbe_enable_cpu(drvdata);
990 }
991 }
992 return 0;
993}
994
995static int arm_trbe_cpu_teardown(unsigned int cpu, struct hlist_node *node)
996{
997 struct trbe_drvdata *drvdata = hlist_entry_safe(node, struct trbe_drvdata, hotplug_node);
998
999 if (cpumask_test_cpu(cpu, &drvdata->supported_cpus)) {
1000 disable_percpu_irq(drvdata->irq);
1001 trbe_reset_local();
1002 }
1003 return 0;
1004}
1005
1006static int arm_trbe_probe_cpuhp(struct trbe_drvdata *drvdata)
1007{
1008 enum cpuhp_state trbe_online;
1009 int ret;
1010
1011 trbe_online = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, DRVNAME,
1012 arm_trbe_cpu_startup, arm_trbe_cpu_teardown);
1013 if (trbe_online < 0)
1014 return trbe_online;
1015
1016 ret = cpuhp_state_add_instance(trbe_online, &drvdata->hotplug_node);
1017 if (ret) {
1018 cpuhp_remove_multi_state(trbe_online);
1019 return ret;
1020 }
1021 drvdata->trbe_online = trbe_online;
1022 return 0;
1023}
1024
1025static void arm_trbe_remove_cpuhp(struct trbe_drvdata *drvdata)
1026{
1027 cpuhp_remove_multi_state(drvdata->trbe_online);
1028}
1029
1030static int arm_trbe_probe_irq(struct platform_device *pdev,
1031 struct trbe_drvdata *drvdata)
1032{
1033 int ret;
1034
1035 drvdata->irq = platform_get_irq(pdev, 0);
1036 if (drvdata->irq < 0) {
1037 pr_err("IRQ not found for the platform device\n");
1038 return drvdata->irq;
1039 }
1040
1041 if (!irq_is_percpu(drvdata->irq)) {
1042 pr_err("IRQ is not a PPI\n");
1043 return -EINVAL;
1044 }
1045
1046 if (irq_get_percpu_devid_partition(drvdata->irq, &drvdata->supported_cpus))
1047 return -EINVAL;
1048
1049 drvdata->handle = alloc_percpu(struct perf_output_handle *);
1050 if (!drvdata->handle)
1051 return -ENOMEM;
1052
1053 ret = request_percpu_irq(drvdata->irq, arm_trbe_irq_handler, DRVNAME, drvdata->handle);
1054 if (ret) {
1055 free_percpu(drvdata->handle);
1056 return ret;
1057 }
1058 return 0;
1059}
1060
1061static void arm_trbe_remove_irq(struct trbe_drvdata *drvdata)
1062{
1063 free_percpu_irq(drvdata->irq, drvdata->handle);
1064 free_percpu(drvdata->handle);
1065}
1066
1067static int arm_trbe_device_probe(struct platform_device *pdev)
1068{
1069 struct coresight_platform_data *pdata;
1070 struct trbe_drvdata *drvdata;
1071 struct device *dev = &pdev->dev;
1072 int ret;
1073
1074 drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
1075 if (!drvdata)
1076 return -ENOMEM;
1077
1078 pdata = coresight_get_platform_data(dev);
1079 if (IS_ERR(pdata))
1080 return PTR_ERR(pdata);
1081
1082 dev_set_drvdata(dev, drvdata);
1083 dev->platform_data = pdata;
1084 drvdata->pdev = pdev;
1085 ret = arm_trbe_probe_irq(pdev, drvdata);
1086 if (ret)
1087 return ret;
1088
1089 ret = arm_trbe_probe_coresight(drvdata);
1090 if (ret)
1091 goto probe_failed;
1092
1093 ret = arm_trbe_probe_cpuhp(drvdata);
1094 if (ret)
1095 goto cpuhp_failed;
1096
1097 return 0;
1098cpuhp_failed:
1099 arm_trbe_remove_coresight(drvdata);
1100probe_failed:
1101 arm_trbe_remove_irq(drvdata);
1102 return ret;
1103}
1104
1105static int arm_trbe_device_remove(struct platform_device *pdev)
1106{
1107 struct trbe_drvdata *drvdata = platform_get_drvdata(pdev);
1108
1109 arm_trbe_remove_cpuhp(drvdata);
1110 arm_trbe_remove_coresight(drvdata);
1111 arm_trbe_remove_irq(drvdata);
1112 return 0;
1113}
1114
1115static const struct of_device_id arm_trbe_of_match[] = {
1116 { .compatible = "arm,trace-buffer-extension"},
1117 {},
1118};
1119MODULE_DEVICE_TABLE(of, arm_trbe_of_match);
1120
1121static struct platform_driver arm_trbe_driver = {
1122 .driver = {
1123 .name = DRVNAME,
1124 .of_match_table = of_match_ptr(arm_trbe_of_match),
1125 .suppress_bind_attrs = true,
1126 },
1127 .probe = arm_trbe_device_probe,
1128 .remove = arm_trbe_device_remove,
1129};
1130
1131static int __init arm_trbe_init(void)
1132{
1133 int ret;
1134
1135 if (arm64_kernel_unmapped_at_el0()) {
1136 pr_err("TRBE wouldn't work if kernel gets unmapped at EL0\n");
1137 return -EOPNOTSUPP;
1138 }
1139
1140 ret = platform_driver_register(&arm_trbe_driver);
1141 if (!ret)
1142 return 0;
1143
1144 pr_err("Error registering %s platform driver\n", DRVNAME);
1145 return ret;
1146}
1147
1148static void __exit arm_trbe_exit(void)
1149{
1150 platform_driver_unregister(&arm_trbe_driver);
1151}
1152module_init(arm_trbe_init);
1153module_exit(arm_trbe_exit);
1154
1155MODULE_AUTHOR("Anshuman Khandual <anshuman.khandual@arm.com>");
1156MODULE_DESCRIPTION("Arm Trace Buffer Extension (TRBE) driver");
1157MODULE_LICENSE("GPL v2");
1158