1
2
3
4
5
6#include <linux/kernel.h>
7#include <linux/moduleparam.h>
8#include <linux/init.h>
9#include <linux/types.h>
10#include <linux/device.h>
11#include <linux/io.h>
12#include <linux/err.h>
13#include <linux/fs.h>
14#include <linux/slab.h>
15#include <linux/delay.h>
16#include <linux/smp.h>
17#include <linux/sysfs.h>
18#include <linux/stat.h>
19#include <linux/clk.h>
20#include <linux/cpu.h>
21#include <linux/coresight.h>
22#include <linux/coresight-pmu.h>
23#include <linux/pm_wakeup.h>
24#include <linux/amba/bus.h>
25#include <linux/seq_file.h>
26#include <linux/uaccess.h>
27#include <linux/perf_event.h>
28#include <linux/pm_runtime.h>
29#include <asm/sections.h>
30#include <asm/local.h>
31#include <asm/virt.h>
32
33#include "coresight-etm4x.h"
34#include "coresight-etm-perf.h"
35
36static int boot_enable;
37module_param_named(boot_enable, boot_enable, int, S_IRUGO);
38
39
40static int etm4_count;
41static struct etmv4_drvdata *etmdrvdata[NR_CPUS];
42static void etm4_set_default_config(struct etmv4_config *config);
43static int etm4_set_event_filters(struct etmv4_drvdata *drvdata,
44 struct perf_event *event);
45
46static enum cpuhp_state hp_online;
47
48static void etm4_os_unlock(struct etmv4_drvdata *drvdata)
49{
50
51 writel_relaxed(0x0, drvdata->base + TRCOSLAR);
52 drvdata->os_unlock = true;
53 isb();
54}
55
56static bool etm4_arch_supported(u8 arch)
57{
58
59 switch (arch & 0xf0) {
60 case ETM_ARCH_V4:
61 break;
62 default:
63 return false;
64 }
65 return true;
66}
67
68static int etm4_cpu_id(struct coresight_device *csdev)
69{
70 struct etmv4_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
71
72 return drvdata->cpu;
73}
74
75static int etm4_trace_id(struct coresight_device *csdev)
76{
77 struct etmv4_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
78
79 return drvdata->trcid;
80}
81
82struct etm4_enable_arg {
83 struct etmv4_drvdata *drvdata;
84 int rc;
85};
86
87static int etm4_enable_hw(struct etmv4_drvdata *drvdata)
88{
89 int i, rc;
90 struct etmv4_config *config = &drvdata->config;
91 struct device *etm_dev = &drvdata->csdev->dev;
92
93 CS_UNLOCK(drvdata->base);
94
95 etm4_os_unlock(drvdata);
96
97 rc = coresight_claim_device_unlocked(drvdata->base);
98 if (rc)
99 goto done;
100
101
102 writel_relaxed(0, drvdata->base + TRCPRGCTLR);
103
104
105 if (coresight_timeout(drvdata->base, TRCSTATR, TRCSTATR_IDLE_BIT, 1))
106 dev_err(etm_dev,
107 "timeout while waiting for Idle Trace Status\n");
108
109 writel_relaxed(config->pe_sel, drvdata->base + TRCPROCSELR);
110 writel_relaxed(config->cfg, drvdata->base + TRCCONFIGR);
111
112 writel_relaxed(0x0, drvdata->base + TRCAUXCTLR);
113 writel_relaxed(config->eventctrl0, drvdata->base + TRCEVENTCTL0R);
114 writel_relaxed(config->eventctrl1, drvdata->base + TRCEVENTCTL1R);
115 writel_relaxed(config->stall_ctrl, drvdata->base + TRCSTALLCTLR);
116 writel_relaxed(config->ts_ctrl, drvdata->base + TRCTSCTLR);
117 writel_relaxed(config->syncfreq, drvdata->base + TRCSYNCPR);
118 writel_relaxed(config->ccctlr, drvdata->base + TRCCCCTLR);
119 writel_relaxed(config->bb_ctrl, drvdata->base + TRCBBCTLR);
120 writel_relaxed(drvdata->trcid, drvdata->base + TRCTRACEIDR);
121 writel_relaxed(config->vinst_ctrl, drvdata->base + TRCVICTLR);
122 writel_relaxed(config->viiectlr, drvdata->base + TRCVIIECTLR);
123 writel_relaxed(config->vissctlr,
124 drvdata->base + TRCVISSCTLR);
125 writel_relaxed(config->vipcssctlr,
126 drvdata->base + TRCVIPCSSCTLR);
127 for (i = 0; i < drvdata->nrseqstate - 1; i++)
128 writel_relaxed(config->seq_ctrl[i],
129 drvdata->base + TRCSEQEVRn(i));
130 writel_relaxed(config->seq_rst, drvdata->base + TRCSEQRSTEVR);
131 writel_relaxed(config->seq_state, drvdata->base + TRCSEQSTR);
132 writel_relaxed(config->ext_inp, drvdata->base + TRCEXTINSELR);
133 for (i = 0; i < drvdata->nr_cntr; i++) {
134 writel_relaxed(config->cntrldvr[i],
135 drvdata->base + TRCCNTRLDVRn(i));
136 writel_relaxed(config->cntr_ctrl[i],
137 drvdata->base + TRCCNTCTLRn(i));
138 writel_relaxed(config->cntr_val[i],
139 drvdata->base + TRCCNTVRn(i));
140 }
141
142
143
144
145
146 for (i = 2; i < drvdata->nr_resource * 2; i++)
147 writel_relaxed(config->res_ctrl[i],
148 drvdata->base + TRCRSCTLRn(i));
149
150 for (i = 0; i < drvdata->nr_ss_cmp; i++) {
151 writel_relaxed(config->ss_ctrl[i],
152 drvdata->base + TRCSSCCRn(i));
153 writel_relaxed(config->ss_status[i],
154 drvdata->base + TRCSSCSRn(i));
155 writel_relaxed(config->ss_pe_cmp[i],
156 drvdata->base + TRCSSPCICRn(i));
157 }
158 for (i = 0; i < drvdata->nr_addr_cmp; i++) {
159 writeq_relaxed(config->addr_val[i],
160 drvdata->base + TRCACVRn(i));
161 writeq_relaxed(config->addr_acc[i],
162 drvdata->base + TRCACATRn(i));
163 }
164 for (i = 0; i < drvdata->numcidc; i++)
165 writeq_relaxed(config->ctxid_pid[i],
166 drvdata->base + TRCCIDCVRn(i));
167 writel_relaxed(config->ctxid_mask0, drvdata->base + TRCCIDCCTLR0);
168 writel_relaxed(config->ctxid_mask1, drvdata->base + TRCCIDCCTLR1);
169
170 for (i = 0; i < drvdata->numvmidc; i++)
171 writeq_relaxed(config->vmid_val[i],
172 drvdata->base + TRCVMIDCVRn(i));
173 writel_relaxed(config->vmid_mask0, drvdata->base + TRCVMIDCCTLR0);
174 writel_relaxed(config->vmid_mask1, drvdata->base + TRCVMIDCCTLR1);
175
176
177
178
179
180 writel_relaxed(readl_relaxed(drvdata->base + TRCPDCR) | TRCPDCR_PU,
181 drvdata->base + TRCPDCR);
182
183
184 writel_relaxed(1, drvdata->base + TRCPRGCTLR);
185
186
187 if (coresight_timeout(drvdata->base, TRCSTATR, TRCSTATR_IDLE_BIT, 0))
188 dev_err(etm_dev,
189 "timeout while waiting for Idle Trace Status\n");
190
191done:
192 CS_LOCK(drvdata->base);
193
194 dev_dbg(etm_dev, "cpu: %d enable smp call done: %d\n",
195 drvdata->cpu, rc);
196 return rc;
197}
198
199static void etm4_enable_hw_smp_call(void *info)
200{
201 struct etm4_enable_arg *arg = info;
202
203 if (WARN_ON(!arg))
204 return;
205 arg->rc = etm4_enable_hw(arg->drvdata);
206}
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221static int etm4_config_timestamp_event(struct etmv4_drvdata *drvdata)
222{
223 int ctridx, ret = -EINVAL;
224 int counter, rselector;
225 u32 val = 0;
226 struct etmv4_config *config = &drvdata->config;
227
228
229 if (!drvdata->nr_cntr)
230 goto out;
231
232
233 for (ctridx = 0; ctridx < drvdata->nr_cntr; ctridx++)
234 if (config->cntr_val[ctridx] == 0)
235 break;
236
237
238 if (ctridx == drvdata->nr_cntr) {
239 pr_debug("%s: no available counter found\n", __func__);
240 ret = -ENOSPC;
241 goto out;
242 }
243
244
245
246
247
248
249
250 for (rselector = 2; rselector < drvdata->nr_resource * 2; rselector++)
251 if (!config->res_ctrl[rselector])
252 break;
253
254 if (rselector == drvdata->nr_resource * 2) {
255 pr_debug("%s: no available resource selector found\n",
256 __func__);
257 ret = -ENOSPC;
258 goto out;
259 }
260
261
262 counter = 1 << ctridx;
263
264
265
266
267
268 config->cntr_val[ctridx] = 1;
269 config->cntrldvr[ctridx] = 1;
270
271
272 val = 0x1 << 16 |
273 0x0 << 7 |
274 0x1;
275
276 config->cntr_ctrl[ctridx] = val;
277
278 val = 0x2 << 16 |
279 counter << 0;
280
281 config->res_ctrl[rselector] = val;
282
283 val = 0x0 << 7 |
284 rselector;
285
286 config->ts_ctrl = val;
287
288 ret = 0;
289out:
290 return ret;
291}
292
293static int etm4_parse_event_config(struct etmv4_drvdata *drvdata,
294 struct perf_event *event)
295{
296 int ret = 0;
297 struct etmv4_config *config = &drvdata->config;
298 struct perf_event_attr *attr = &event->attr;
299
300 if (!attr) {
301 ret = -EINVAL;
302 goto out;
303 }
304
305
306 memset(config, 0, sizeof(struct etmv4_config));
307
308 if (attr->exclude_kernel)
309 config->mode = ETM_MODE_EXCL_KERN;
310
311 if (attr->exclude_user)
312 config->mode = ETM_MODE_EXCL_USER;
313
314
315 etm4_set_default_config(config);
316
317
318 ret = etm4_set_event_filters(drvdata, event);
319 if (ret)
320 goto out;
321
322
323 if (attr->config & BIT(ETM_OPT_CYCACC)) {
324 config->cfg |= BIT(4);
325
326 config->ccctlr = ETM_CYC_THRESHOLD_DEFAULT;
327 }
328 if (attr->config & BIT(ETM_OPT_TS)) {
329
330
331
332
333
334 ret = etm4_config_timestamp_event(drvdata);
335
336
337
338
339
340 if (ret)
341 goto out;
342
343
344 config->cfg |= BIT(11);
345 }
346
347 if (attr->config & BIT(ETM_OPT_CTXTID))
348
349 config->cfg |= BIT(ETM4_CFG_BIT_CTXTID);
350
351
352 if ((attr->config & BIT(ETM_OPT_RETSTK)) && drvdata->retstack)
353
354 config->cfg |= BIT(12);
355
356out:
357 return ret;
358}
359
360static int etm4_enable_perf(struct coresight_device *csdev,
361 struct perf_event *event)
362{
363 int ret = 0;
364 struct etmv4_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
365
366 if (WARN_ON_ONCE(drvdata->cpu != smp_processor_id())) {
367 ret = -EINVAL;
368 goto out;
369 }
370
371
372 ret = etm4_parse_event_config(drvdata, event);
373 if (ret)
374 goto out;
375
376 ret = etm4_enable_hw(drvdata);
377
378out:
379 return ret;
380}
381
382static int etm4_enable_sysfs(struct coresight_device *csdev)
383{
384 struct etmv4_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
385 struct etm4_enable_arg arg = { 0 };
386 int ret;
387
388 spin_lock(&drvdata->spinlock);
389
390
391
392
393
394 arg.drvdata = drvdata;
395 ret = smp_call_function_single(drvdata->cpu,
396 etm4_enable_hw_smp_call, &arg, 1);
397 if (!ret)
398 ret = arg.rc;
399 if (!ret)
400 drvdata->sticky_enable = true;
401 spin_unlock(&drvdata->spinlock);
402
403 if (!ret)
404 dev_dbg(&csdev->dev, "ETM tracing enabled\n");
405 return ret;
406}
407
408static int etm4_enable(struct coresight_device *csdev,
409 struct perf_event *event, u32 mode)
410{
411 int ret;
412 u32 val;
413 struct etmv4_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
414
415 val = local_cmpxchg(&drvdata->mode, CS_MODE_DISABLED, mode);
416
417
418 if (val)
419 return -EBUSY;
420
421 switch (mode) {
422 case CS_MODE_SYSFS:
423 ret = etm4_enable_sysfs(csdev);
424 break;
425 case CS_MODE_PERF:
426 ret = etm4_enable_perf(csdev, event);
427 break;
428 default:
429 ret = -EINVAL;
430 }
431
432
433 if (ret)
434 local_set(&drvdata->mode, CS_MODE_DISABLED);
435
436 return ret;
437}
438
439static void etm4_disable_hw(void *info)
440{
441 u32 control;
442 struct etmv4_drvdata *drvdata = info;
443
444 CS_UNLOCK(drvdata->base);
445
446
447 control = readl_relaxed(drvdata->base + TRCPDCR);
448 control &= ~TRCPDCR_PU;
449 writel_relaxed(control, drvdata->base + TRCPDCR);
450
451 control = readl_relaxed(drvdata->base + TRCPRGCTLR);
452
453
454 control &= ~0x1;
455
456
457 mb();
458 isb();
459 writel_relaxed(control, drvdata->base + TRCPRGCTLR);
460
461 coresight_disclaim_device_unlocked(drvdata->base);
462
463 CS_LOCK(drvdata->base);
464
465 dev_dbg(&drvdata->csdev->dev,
466 "cpu: %d disable smp call done\n", drvdata->cpu);
467}
468
469static int etm4_disable_perf(struct coresight_device *csdev,
470 struct perf_event *event)
471{
472 u32 control;
473 struct etm_filters *filters = event->hw.addr_filters;
474 struct etmv4_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
475
476 if (WARN_ON_ONCE(drvdata->cpu != smp_processor_id()))
477 return -EINVAL;
478
479 etm4_disable_hw(drvdata);
480
481
482
483
484
485
486
487 control = readl_relaxed(drvdata->base + TRCVICTLR);
488
489 filters->ssstatus = (control & BIT(9));
490
491 return 0;
492}
493
494static void etm4_disable_sysfs(struct coresight_device *csdev)
495{
496 struct etmv4_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
497
498
499
500
501
502
503
504 cpus_read_lock();
505 spin_lock(&drvdata->spinlock);
506
507
508
509
510
511 smp_call_function_single(drvdata->cpu, etm4_disable_hw, drvdata, 1);
512
513 spin_unlock(&drvdata->spinlock);
514 cpus_read_unlock();
515
516 dev_dbg(&csdev->dev, "ETM tracing disabled\n");
517}
518
519static void etm4_disable(struct coresight_device *csdev,
520 struct perf_event *event)
521{
522 u32 mode;
523 struct etmv4_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
524
525
526
527
528
529
530 mode = local_read(&drvdata->mode);
531
532 switch (mode) {
533 case CS_MODE_DISABLED:
534 break;
535 case CS_MODE_SYSFS:
536 etm4_disable_sysfs(csdev);
537 break;
538 case CS_MODE_PERF:
539 etm4_disable_perf(csdev, event);
540 break;
541 }
542
543 if (mode)
544 local_set(&drvdata->mode, CS_MODE_DISABLED);
545}
546
547static const struct coresight_ops_source etm4_source_ops = {
548 .cpu_id = etm4_cpu_id,
549 .trace_id = etm4_trace_id,
550 .enable = etm4_enable,
551 .disable = etm4_disable,
552};
553
554static const struct coresight_ops etm4_cs_ops = {
555 .source_ops = &etm4_source_ops,
556};
557
558static void etm4_init_arch_data(void *info)
559{
560 u32 etmidr0;
561 u32 etmidr1;
562 u32 etmidr2;
563 u32 etmidr3;
564 u32 etmidr4;
565 u32 etmidr5;
566 struct etmv4_drvdata *drvdata = info;
567
568
569 etm4_os_unlock(drvdata);
570
571 CS_UNLOCK(drvdata->base);
572
573
574 etmidr0 = readl_relaxed(drvdata->base + TRCIDR0);
575
576
577 if (BMVAL(etmidr0, 1, 1) && BMVAL(etmidr0, 2, 2))
578 drvdata->instrp0 = true;
579 else
580 drvdata->instrp0 = false;
581
582
583 if (BMVAL(etmidr0, 5, 5))
584 drvdata->trcbb = true;
585 else
586 drvdata->trcbb = false;
587
588
589 if (BMVAL(etmidr0, 6, 6))
590 drvdata->trccond = true;
591 else
592 drvdata->trccond = false;
593
594
595 if (BMVAL(etmidr0, 7, 7))
596 drvdata->trccci = true;
597 else
598 drvdata->trccci = false;
599
600
601 if (BMVAL(etmidr0, 9, 9))
602 drvdata->retstack = true;
603 else
604 drvdata->retstack = false;
605
606
607 drvdata->nr_event = BMVAL(etmidr0, 10, 11);
608
609 drvdata->q_support = BMVAL(etmidr0, 15, 16);
610
611 drvdata->ts_size = BMVAL(etmidr0, 24, 28);
612
613
614 etmidr1 = readl_relaxed(drvdata->base + TRCIDR1);
615
616
617
618
619 drvdata->arch = BMVAL(etmidr1, 4, 11);
620
621
622 etmidr2 = readl_relaxed(drvdata->base + TRCIDR2);
623
624 drvdata->ctxid_size = BMVAL(etmidr2, 5, 9);
625
626 drvdata->vmid_size = BMVAL(etmidr2, 10, 14);
627
628 drvdata->ccsize = BMVAL(etmidr2, 25, 28);
629
630 etmidr3 = readl_relaxed(drvdata->base + TRCIDR3);
631
632 drvdata->ccitmin = BMVAL(etmidr3, 0, 11);
633
634 drvdata->s_ex_level = BMVAL(etmidr3, 16, 19);
635
636 drvdata->ns_ex_level = BMVAL(etmidr3, 20, 23);
637
638
639
640
641
642 if (BMVAL(etmidr3, 24, 24))
643 drvdata->trc_error = true;
644 else
645 drvdata->trc_error = false;
646
647
648 if (BMVAL(etmidr3, 25, 25))
649 drvdata->syncpr = true;
650 else
651 drvdata->syncpr = false;
652
653
654 if (BMVAL(etmidr3, 26, 26))
655 drvdata->stallctl = true;
656 else
657 drvdata->stallctl = false;
658
659
660 if (BMVAL(etmidr3, 27, 27))
661 drvdata->sysstall = true;
662 else
663 drvdata->sysstall = false;
664
665
666 drvdata->nr_pe = BMVAL(etmidr3, 28, 30);
667
668
669 if (BMVAL(etmidr3, 31, 31))
670 drvdata->nooverflow = true;
671 else
672 drvdata->nooverflow = false;
673
674
675 etmidr4 = readl_relaxed(drvdata->base + TRCIDR4);
676
677 drvdata->nr_addr_cmp = BMVAL(etmidr4, 0, 3);
678
679 drvdata->nr_pe_cmp = BMVAL(etmidr4, 12, 15);
680
681
682
683
684
685
686 drvdata->nr_resource = BMVAL(etmidr4, 16, 19) + 1;
687
688
689
690
691 drvdata->nr_ss_cmp = BMVAL(etmidr4, 20, 23);
692
693 drvdata->numcidc = BMVAL(etmidr4, 24, 27);
694
695 drvdata->numvmidc = BMVAL(etmidr4, 28, 31);
696
697 etmidr5 = readl_relaxed(drvdata->base + TRCIDR5);
698
699 drvdata->nr_ext_inp = BMVAL(etmidr5, 0, 8);
700
701 drvdata->trcid_size = BMVAL(etmidr5, 16, 21);
702
703 if (BMVAL(etmidr5, 22, 22))
704 drvdata->atbtrig = true;
705 else
706 drvdata->atbtrig = false;
707
708
709
710
711 if (BMVAL(etmidr5, 23, 23))
712 drvdata->lpoverride = true;
713 else
714 drvdata->lpoverride = false;
715
716 drvdata->nrseqstate = BMVAL(etmidr5, 25, 27);
717
718 drvdata->nr_cntr = BMVAL(etmidr5, 28, 30);
719 CS_LOCK(drvdata->base);
720}
721
722static void etm4_set_default_config(struct etmv4_config *config)
723{
724
725 config->eventctrl0 = 0x0;
726 config->eventctrl1 = 0x0;
727
728
729 config->stall_ctrl = 0x0;
730
731
732 config->syncfreq = 0xC;
733
734
735 config->ts_ctrl = 0x0;
736
737
738 config->vinst_ctrl |= BIT(0);
739}
740
741static u64 etm4_get_ns_access_type(struct etmv4_config *config)
742{
743 u64 access_type = 0;
744
745
746
747
748
749
750
751
752
753 if (!is_kernel_in_hyp_mode()) {
754
755 access_type = ETM_EXLEVEL_NS_HYP;
756 if (config->mode & ETM_MODE_EXCL_KERN)
757 access_type |= ETM_EXLEVEL_NS_OS;
758 } else if (config->mode & ETM_MODE_EXCL_KERN) {
759 access_type = ETM_EXLEVEL_NS_HYP;
760 }
761
762 if (config->mode & ETM_MODE_EXCL_USER)
763 access_type |= ETM_EXLEVEL_NS_APP;
764
765 return access_type;
766}
767
768static u64 etm4_get_access_type(struct etmv4_config *config)
769{
770 u64 access_type = etm4_get_ns_access_type(config);
771
772
773
774
775
776 access_type |= (ETM_EXLEVEL_S_APP |
777 ETM_EXLEVEL_S_OS |
778 ETM_EXLEVEL_S_HYP);
779
780 return access_type;
781}
782
783static void etm4_set_comparator_filter(struct etmv4_config *config,
784 u64 start, u64 stop, int comparator)
785{
786 u64 access_type = etm4_get_access_type(config);
787
788
789 config->addr_val[comparator] = start;
790 config->addr_acc[comparator] = access_type;
791 config->addr_type[comparator] = ETM_ADDR_TYPE_RANGE;
792
793
794 config->addr_val[comparator + 1] = stop;
795 config->addr_acc[comparator + 1] = access_type;
796 config->addr_type[comparator + 1] = ETM_ADDR_TYPE_RANGE;
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813 config->viiectlr |= BIT(comparator / 2);
814}
815
816static void etm4_set_start_stop_filter(struct etmv4_config *config,
817 u64 address, int comparator,
818 enum etm_addr_type type)
819{
820 int shift;
821 u64 access_type = etm4_get_access_type(config);
822
823
824 config->addr_val[comparator] = address;
825 config->addr_acc[comparator] = access_type;
826 config->addr_type[comparator] = type;
827
828
829
830
831
832
833 shift = (type == ETM_ADDR_TYPE_START ? 0 : 16);
834 config->vissctlr |= BIT(shift + comparator);
835}
836
837static void etm4_set_default_filter(struct etmv4_config *config)
838{
839 u64 start, stop;
840
841
842
843
844
845 start = 0x0;
846 stop = ~0x0;
847
848 etm4_set_comparator_filter(config, start, stop,
849 ETM_DEFAULT_ADDR_COMP);
850
851
852
853
854
855 config->vinst_ctrl |= BIT(9);
856
857
858 config->vissctlr = 0x0;
859}
860
861static void etm4_set_default(struct etmv4_config *config)
862{
863 if (WARN_ON_ONCE(!config))
864 return;
865
866
867
868
869
870
871
872
873
874
875 etm4_set_default_config(config);
876 etm4_set_default_filter(config);
877}
878
879static int etm4_get_next_comparator(struct etmv4_drvdata *drvdata, u32 type)
880{
881 int nr_comparator, index = 0;
882 struct etmv4_config *config = &drvdata->config;
883
884
885
886
887
888 nr_comparator = drvdata->nr_addr_cmp * 2;
889
890
891 while (index < nr_comparator) {
892 switch (type) {
893 case ETM_ADDR_TYPE_RANGE:
894 if (config->addr_type[index] == ETM_ADDR_TYPE_NONE &&
895 config->addr_type[index + 1] == ETM_ADDR_TYPE_NONE)
896 return index;
897
898
899 index += 2;
900 break;
901 case ETM_ADDR_TYPE_START:
902 case ETM_ADDR_TYPE_STOP:
903 if (config->addr_type[index] == ETM_ADDR_TYPE_NONE)
904 return index;
905
906
907 index += 1;
908 break;
909 default:
910 return -EINVAL;
911 }
912 }
913
914
915 return -ENOSPC;
916}
917
918static int etm4_set_event_filters(struct etmv4_drvdata *drvdata,
919 struct perf_event *event)
920{
921 int i, comparator, ret = 0;
922 u64 address;
923 struct etmv4_config *config = &drvdata->config;
924 struct etm_filters *filters = event->hw.addr_filters;
925
926 if (!filters)
927 goto default_filter;
928
929
930 perf_event_addr_filters_sync(event);
931
932
933
934
935
936 if (!filters->nr_filters)
937 goto default_filter;
938
939 for (i = 0; i < filters->nr_filters; i++) {
940 struct etm_filter *filter = &filters->etm_filter[i];
941 enum etm_addr_type type = filter->type;
942
943
944 comparator = etm4_get_next_comparator(drvdata, type);
945 if (comparator < 0) {
946 ret = comparator;
947 goto out;
948 }
949
950 switch (type) {
951 case ETM_ADDR_TYPE_RANGE:
952 etm4_set_comparator_filter(config,
953 filter->start_addr,
954 filter->stop_addr,
955 comparator);
956
957
958
959
960 config->vinst_ctrl |= BIT(9);
961
962
963 config->vissctlr = 0x0;
964 break;
965 case ETM_ADDR_TYPE_START:
966 case ETM_ADDR_TYPE_STOP:
967
968 address = (type == ETM_ADDR_TYPE_START ?
969 filter->start_addr :
970 filter->stop_addr);
971
972
973 etm4_set_start_stop_filter(config, address,
974 comparator, type);
975
976
977
978
979
980
981
982
983
984
985
986
987 if (filters->ssstatus)
988 config->vinst_ctrl |= BIT(9);
989
990
991 config->viiectlr = 0x0;
992 break;
993 default:
994 ret = -EINVAL;
995 goto out;
996 }
997 }
998
999 goto out;
1000
1001
1002default_filter:
1003 etm4_set_default_filter(config);
1004
1005out:
1006 return ret;
1007}
1008
1009void etm4_config_trace_mode(struct etmv4_config *config)
1010{
1011 u32 addr_acc, mode;
1012
1013 mode = config->mode;
1014 mode &= (ETM_MODE_EXCL_KERN | ETM_MODE_EXCL_USER);
1015
1016
1017 WARN_ON_ONCE(mode == (ETM_MODE_EXCL_KERN | ETM_MODE_EXCL_USER));
1018
1019
1020 if (!(mode & ETM_MODE_EXCL_KERN) && !(mode & ETM_MODE_EXCL_USER))
1021 return;
1022
1023 addr_acc = config->addr_acc[ETM_DEFAULT_ADDR_COMP];
1024
1025 addr_acc &= ~(ETM_EXLEVEL_NS_APP | ETM_EXLEVEL_NS_OS |
1026 ETM_EXLEVEL_NS_HYP);
1027
1028 addr_acc |= etm4_get_ns_access_type(config);
1029
1030 config->addr_acc[ETM_DEFAULT_ADDR_COMP] = addr_acc;
1031 config->addr_acc[ETM_DEFAULT_ADDR_COMP + 1] = addr_acc;
1032}
1033
1034static int etm4_online_cpu(unsigned int cpu)
1035{
1036 if (!etmdrvdata[cpu])
1037 return 0;
1038
1039 if (etmdrvdata[cpu]->boot_enable && !etmdrvdata[cpu]->sticky_enable)
1040 coresight_enable(etmdrvdata[cpu]->csdev);
1041 return 0;
1042}
1043
1044static int etm4_starting_cpu(unsigned int cpu)
1045{
1046 if (!etmdrvdata[cpu])
1047 return 0;
1048
1049 spin_lock(&etmdrvdata[cpu]->spinlock);
1050 if (!etmdrvdata[cpu]->os_unlock) {
1051 etm4_os_unlock(etmdrvdata[cpu]);
1052 etmdrvdata[cpu]->os_unlock = true;
1053 }
1054
1055 if (local_read(&etmdrvdata[cpu]->mode))
1056 etm4_enable_hw(etmdrvdata[cpu]);
1057 spin_unlock(&etmdrvdata[cpu]->spinlock);
1058 return 0;
1059}
1060
1061static int etm4_dying_cpu(unsigned int cpu)
1062{
1063 if (!etmdrvdata[cpu])
1064 return 0;
1065
1066 spin_lock(&etmdrvdata[cpu]->spinlock);
1067 if (local_read(&etmdrvdata[cpu]->mode))
1068 etm4_disable_hw(etmdrvdata[cpu]);
1069 spin_unlock(&etmdrvdata[cpu]->spinlock);
1070 return 0;
1071}
1072
1073static void etm4_init_trace_id(struct etmv4_drvdata *drvdata)
1074{
1075 drvdata->trcid = coresight_get_trace_id(drvdata->cpu);
1076}
1077
1078static int etm4_probe(struct amba_device *adev, const struct amba_id *id)
1079{
1080 int ret;
1081 void __iomem *base;
1082 struct device *dev = &adev->dev;
1083 struct coresight_platform_data *pdata = NULL;
1084 struct etmv4_drvdata *drvdata;
1085 struct resource *res = &adev->res;
1086 struct coresight_desc desc = { 0 };
1087
1088 drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
1089 if (!drvdata)
1090 return -ENOMEM;
1091
1092 dev_set_drvdata(dev, drvdata);
1093
1094
1095 base = devm_ioremap_resource(dev, res);
1096 if (IS_ERR(base))
1097 return PTR_ERR(base);
1098
1099 drvdata->base = base;
1100
1101 spin_lock_init(&drvdata->spinlock);
1102
1103 drvdata->cpu = coresight_get_cpu(dev);
1104 if (drvdata->cpu < 0)
1105 return drvdata->cpu;
1106
1107 desc.name = devm_kasprintf(dev, GFP_KERNEL, "etm%d", drvdata->cpu);
1108 if (!desc.name)
1109 return -ENOMEM;
1110
1111 cpus_read_lock();
1112 etmdrvdata[drvdata->cpu] = drvdata;
1113
1114 if (smp_call_function_single(drvdata->cpu,
1115 etm4_init_arch_data, drvdata, 1))
1116 dev_err(dev, "ETM arch init failed\n");
1117
1118 if (!etm4_count++) {
1119 cpuhp_setup_state_nocalls_cpuslocked(CPUHP_AP_ARM_CORESIGHT_STARTING,
1120 "arm/coresight4:starting",
1121 etm4_starting_cpu, etm4_dying_cpu);
1122 ret = cpuhp_setup_state_nocalls_cpuslocked(CPUHP_AP_ONLINE_DYN,
1123 "arm/coresight4:online",
1124 etm4_online_cpu, NULL);
1125 if (ret < 0)
1126 goto err_arch_supported;
1127 hp_online = ret;
1128 }
1129
1130 cpus_read_unlock();
1131
1132 if (etm4_arch_supported(drvdata->arch) == false) {
1133 ret = -EINVAL;
1134 goto err_arch_supported;
1135 }
1136
1137 etm4_init_trace_id(drvdata);
1138 etm4_set_default(&drvdata->config);
1139
1140 pdata = coresight_get_platform_data(dev);
1141 if (IS_ERR(pdata)) {
1142 ret = PTR_ERR(pdata);
1143 goto err_arch_supported;
1144 }
1145 adev->dev.platform_data = pdata;
1146
1147 desc.type = CORESIGHT_DEV_TYPE_SOURCE;
1148 desc.subtype.source_subtype = CORESIGHT_DEV_SUBTYPE_SOURCE_PROC;
1149 desc.ops = &etm4_cs_ops;
1150 desc.pdata = pdata;
1151 desc.dev = dev;
1152 desc.groups = coresight_etmv4_groups;
1153 drvdata->csdev = coresight_register(&desc);
1154 if (IS_ERR(drvdata->csdev)) {
1155 ret = PTR_ERR(drvdata->csdev);
1156 goto err_arch_supported;
1157 }
1158
1159 ret = etm_perf_symlink(drvdata->csdev, true);
1160 if (ret) {
1161 coresight_unregister(drvdata->csdev);
1162 goto err_arch_supported;
1163 }
1164
1165 pm_runtime_put(&adev->dev);
1166 dev_info(&drvdata->csdev->dev, "CPU%d: ETM v%d.%d initialized\n",
1167 drvdata->cpu, drvdata->arch >> 4, drvdata->arch & 0xf);
1168
1169 if (boot_enable) {
1170 coresight_enable(drvdata->csdev);
1171 drvdata->boot_enable = true;
1172 }
1173
1174 return 0;
1175
1176err_arch_supported:
1177 if (--etm4_count == 0) {
1178 cpuhp_remove_state_nocalls(CPUHP_AP_ARM_CORESIGHT_STARTING);
1179 if (hp_online)
1180 cpuhp_remove_state_nocalls(hp_online);
1181 }
1182 return ret;
1183}
1184
1185static struct amba_cs_uci_id uci_id_etm4[] = {
1186 {
1187
1188 .devarch = 0x47704a13,
1189 .devarch_mask = 0xfff0ffff,
1190 .devtype = 0x00000013,
1191 }
1192};
1193
1194static const struct amba_id etm4_ids[] = {
1195 CS_AMBA_ID(0x000bb95d),
1196 CS_AMBA_ID(0x000bb95e),
1197 CS_AMBA_ID(0x000bb95a),
1198 CS_AMBA_ID(0x000bb959),
1199 CS_AMBA_UCI_ID(0x000bb9da, uci_id_etm4),
1200 {},
1201};
1202
1203static struct amba_driver etm4x_driver = {
1204 .drv = {
1205 .name = "coresight-etm4x",
1206 .suppress_bind_attrs = true,
1207 },
1208 .probe = etm4_probe,
1209 .id_table = etm4_ids,
1210};
1211builtin_amba_driver(etm4x_driver);
1212