1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18#include <linux/circ_buf.h>
19#include <linux/coresight.h>
20#include <linux/perf_event.h>
21#include <linux/slab.h>
22#include "coresight-priv.h"
23#include "coresight-tmc.h"
24
25static void tmc_etb_enable_hw(struct tmc_drvdata *drvdata)
26{
27 CS_UNLOCK(drvdata->base);
28
29
30 tmc_wait_for_tmcready(drvdata);
31
32 writel_relaxed(TMC_MODE_CIRCULAR_BUFFER, drvdata->base + TMC_MODE);
33 writel_relaxed(TMC_FFCR_EN_FMT | TMC_FFCR_EN_TI |
34 TMC_FFCR_FON_FLIN | TMC_FFCR_FON_TRIG_EVT |
35 TMC_FFCR_TRIGON_TRIGIN,
36 drvdata->base + TMC_FFCR);
37
38 writel_relaxed(drvdata->trigger_cntr, drvdata->base + TMC_TRG);
39 tmc_enable_hw(drvdata);
40
41 CS_LOCK(drvdata->base);
42}
43
44static void tmc_etb_dump_hw(struct tmc_drvdata *drvdata)
45{
46 bool lost = false;
47 char *bufp;
48 const u32 *barrier;
49 u32 read_data, status;
50 int i;
51
52
53
54
55
56 status = readl_relaxed(drvdata->base + TMC_STS);
57 if (status & TMC_STS_FULL)
58 lost = true;
59
60 bufp = drvdata->buf;
61 drvdata->len = 0;
62 barrier = barrier_pkt;
63 while (1) {
64 for (i = 0; i < drvdata->memwidth; i++) {
65 read_data = readl_relaxed(drvdata->base + TMC_RRD);
66 if (read_data == 0xFFFFFFFF)
67 return;
68
69 if (lost && *barrier) {
70 read_data = *barrier;
71 barrier++;
72 }
73
74 memcpy(bufp, &read_data, 4);
75 bufp += 4;
76 drvdata->len += 4;
77 }
78 }
79}
80
81static void tmc_etb_disable_hw(struct tmc_drvdata *drvdata)
82{
83 CS_UNLOCK(drvdata->base);
84
85 tmc_flush_and_stop(drvdata);
86
87
88
89
90 if (drvdata->mode == CS_MODE_SYSFS)
91 tmc_etb_dump_hw(drvdata);
92 tmc_disable_hw(drvdata);
93
94 CS_LOCK(drvdata->base);
95}
96
97static void tmc_etf_enable_hw(struct tmc_drvdata *drvdata)
98{
99 CS_UNLOCK(drvdata->base);
100
101
102 tmc_wait_for_tmcready(drvdata);
103
104 writel_relaxed(TMC_MODE_HARDWARE_FIFO, drvdata->base + TMC_MODE);
105 writel_relaxed(TMC_FFCR_EN_FMT | TMC_FFCR_EN_TI,
106 drvdata->base + TMC_FFCR);
107 writel_relaxed(0x0, drvdata->base + TMC_BUFWM);
108 tmc_enable_hw(drvdata);
109
110 CS_LOCK(drvdata->base);
111}
112
113static void tmc_etf_disable_hw(struct tmc_drvdata *drvdata)
114{
115 CS_UNLOCK(drvdata->base);
116
117 tmc_flush_and_stop(drvdata);
118 tmc_disable_hw(drvdata);
119
120 CS_LOCK(drvdata->base);
121}
122
123static int tmc_enable_etf_sink_sysfs(struct coresight_device *csdev)
124{
125 int ret = 0;
126 bool used = false;
127 char *buf = NULL;
128 unsigned long flags;
129 struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
130
131
132
133
134
135 spin_lock_irqsave(&drvdata->spinlock, flags);
136 if (!drvdata->buf) {
137 spin_unlock_irqrestore(&drvdata->spinlock, flags);
138
139
140 buf = kzalloc(drvdata->size, GFP_KERNEL);
141 if (!buf)
142 return -ENOMEM;
143
144
145 spin_lock_irqsave(&drvdata->spinlock, flags);
146 }
147
148 if (drvdata->reading) {
149 ret = -EBUSY;
150 goto out;
151 }
152
153
154
155
156
157
158 if (drvdata->mode == CS_MODE_SYSFS)
159 goto out;
160
161
162
163
164
165
166
167
168
169
170 if (drvdata->buf) {
171 memset(drvdata->buf, 0, drvdata->size);
172 } else {
173 used = true;
174 drvdata->buf = buf;
175 }
176
177 drvdata->mode = CS_MODE_SYSFS;
178 tmc_etb_enable_hw(drvdata);
179out:
180 spin_unlock_irqrestore(&drvdata->spinlock, flags);
181
182
183 if (!used)
184 kfree(buf);
185
186 return ret;
187}
188
189static int tmc_enable_etf_sink_perf(struct coresight_device *csdev)
190{
191 int ret = 0;
192 unsigned long flags;
193 struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
194
195 spin_lock_irqsave(&drvdata->spinlock, flags);
196 if (drvdata->reading) {
197 ret = -EINVAL;
198 goto out;
199 }
200
201
202
203
204
205
206 if (drvdata->mode != CS_MODE_DISABLED) {
207 ret = -EINVAL;
208 goto out;
209 }
210
211 drvdata->mode = CS_MODE_PERF;
212 tmc_etb_enable_hw(drvdata);
213out:
214 spin_unlock_irqrestore(&drvdata->spinlock, flags);
215
216 return ret;
217}
218
219static int tmc_enable_etf_sink(struct coresight_device *csdev, u32 mode)
220{
221 int ret;
222 struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
223
224 switch (mode) {
225 case CS_MODE_SYSFS:
226 ret = tmc_enable_etf_sink_sysfs(csdev);
227 break;
228 case CS_MODE_PERF:
229 ret = tmc_enable_etf_sink_perf(csdev);
230 break;
231
232 default:
233 ret = -EINVAL;
234 break;
235 }
236
237 if (ret)
238 return ret;
239
240 dev_info(drvdata->dev, "TMC-ETB/ETF enabled\n");
241 return 0;
242}
243
244static void tmc_disable_etf_sink(struct coresight_device *csdev)
245{
246 unsigned long flags;
247 struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
248
249 spin_lock_irqsave(&drvdata->spinlock, flags);
250 if (drvdata->reading) {
251 spin_unlock_irqrestore(&drvdata->spinlock, flags);
252 return;
253 }
254
255
256 if (drvdata->mode != CS_MODE_DISABLED) {
257 tmc_etb_disable_hw(drvdata);
258 drvdata->mode = CS_MODE_DISABLED;
259 }
260
261 spin_unlock_irqrestore(&drvdata->spinlock, flags);
262
263 dev_info(drvdata->dev, "TMC-ETB/ETF disabled\n");
264}
265
266static int tmc_enable_etf_link(struct coresight_device *csdev,
267 int inport, int outport)
268{
269 unsigned long flags;
270 struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
271
272 spin_lock_irqsave(&drvdata->spinlock, flags);
273 if (drvdata->reading) {
274 spin_unlock_irqrestore(&drvdata->spinlock, flags);
275 return -EBUSY;
276 }
277
278 tmc_etf_enable_hw(drvdata);
279 drvdata->mode = CS_MODE_SYSFS;
280 spin_unlock_irqrestore(&drvdata->spinlock, flags);
281
282 dev_info(drvdata->dev, "TMC-ETF enabled\n");
283 return 0;
284}
285
286static void tmc_disable_etf_link(struct coresight_device *csdev,
287 int inport, int outport)
288{
289 unsigned long flags;
290 struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
291
292 spin_lock_irqsave(&drvdata->spinlock, flags);
293 if (drvdata->reading) {
294 spin_unlock_irqrestore(&drvdata->spinlock, flags);
295 return;
296 }
297
298 tmc_etf_disable_hw(drvdata);
299 drvdata->mode = CS_MODE_DISABLED;
300 spin_unlock_irqrestore(&drvdata->spinlock, flags);
301
302 dev_info(drvdata->dev, "TMC-ETF disabled\n");
303}
304
305static void *tmc_alloc_etf_buffer(struct coresight_device *csdev, int cpu,
306 void **pages, int nr_pages, bool overwrite)
307{
308 int node;
309 struct cs_buffers *buf;
310
311 if (cpu == -1)
312 cpu = smp_processor_id();
313 node = cpu_to_node(cpu);
314
315
316 buf = kzalloc_node(sizeof(struct cs_buffers), GFP_KERNEL, node);
317 if (!buf)
318 return NULL;
319
320 buf->snapshot = overwrite;
321 buf->nr_pages = nr_pages;
322 buf->data_pages = pages;
323
324 return buf;
325}
326
327static void tmc_free_etf_buffer(void *config)
328{
329 struct cs_buffers *buf = config;
330
331 kfree(buf);
332}
333
334static int tmc_set_etf_buffer(struct coresight_device *csdev,
335 struct perf_output_handle *handle,
336 void *sink_config)
337{
338 int ret = 0;
339 unsigned long head;
340 struct cs_buffers *buf = sink_config;
341
342
343 head = handle->head & ((buf->nr_pages << PAGE_SHIFT) - 1);
344
345
346 buf->cur = head / PAGE_SIZE;
347
348
349 buf->offset = head % PAGE_SIZE;
350
351 local_set(&buf->data_size, 0);
352
353 return ret;
354}
355
356static unsigned long tmc_reset_etf_buffer(struct coresight_device *csdev,
357 struct perf_output_handle *handle,
358 void *sink_config)
359{
360 long size = 0;
361 struct cs_buffers *buf = sink_config;
362
363 if (buf) {
364
365
366
367
368
369 if (buf->snapshot)
370 handle->head = local_xchg(&buf->data_size,
371 buf->nr_pages << PAGE_SHIFT);
372
373
374
375
376
377
378
379 size = local_xchg(&buf->data_size, 0);
380 }
381
382 return size;
383}
384
385static void tmc_update_etf_buffer(struct coresight_device *csdev,
386 struct perf_output_handle *handle,
387 void *sink_config)
388{
389 bool lost = false;
390 int i, cur;
391 const u32 *barrier;
392 u32 *buf_ptr;
393 u64 read_ptr, write_ptr;
394 u32 status, to_read;
395 unsigned long offset;
396 struct cs_buffers *buf = sink_config;
397 struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
398
399 if (!buf)
400 return;
401
402
403 if (WARN_ON_ONCE(drvdata->mode != CS_MODE_PERF))
404 return;
405
406 CS_UNLOCK(drvdata->base);
407
408 tmc_flush_and_stop(drvdata);
409
410 read_ptr = tmc_read_rrp(drvdata);
411 write_ptr = tmc_read_rwp(drvdata);
412
413
414
415
416
417 status = readl_relaxed(drvdata->base + TMC_STS);
418 if (status & TMC_STS_FULL) {
419 lost = true;
420 to_read = drvdata->size;
421 } else {
422 to_read = CIRC_CNT(write_ptr, read_ptr, drvdata->size);
423 }
424
425
426
427
428
429
430 if (to_read > handle->size) {
431 u32 mask = 0;
432
433
434
435
436
437
438
439
440
441 switch (drvdata->memwidth) {
442 case TMC_MEM_INTF_WIDTH_32BITS:
443 case TMC_MEM_INTF_WIDTH_64BITS:
444 case TMC_MEM_INTF_WIDTH_128BITS:
445 mask = GENMASK(31, 5);
446 break;
447 case TMC_MEM_INTF_WIDTH_256BITS:
448 mask = GENMASK(31, 6);
449 break;
450 }
451
452
453
454
455
456 to_read = handle->size & mask;
457
458 read_ptr = (write_ptr + drvdata->size) - to_read;
459
460 if (read_ptr > (drvdata->size - 1))
461 read_ptr -= drvdata->size;
462
463 tmc_write_rrp(drvdata, read_ptr);
464 lost = true;
465 }
466
467 if (lost)
468 perf_aux_output_flag(handle, PERF_AUX_FLAG_TRUNCATED);
469
470 cur = buf->cur;
471 offset = buf->offset;
472 barrier = barrier_pkt;
473
474
475 for (i = 0; i < to_read; i += 4) {
476 buf_ptr = buf->data_pages[cur] + offset;
477 *buf_ptr = readl_relaxed(drvdata->base + TMC_RRD);
478
479 if (lost && *barrier) {
480 *buf_ptr = *barrier;
481 barrier++;
482 }
483
484 offset += 4;
485 if (offset >= PAGE_SIZE) {
486 offset = 0;
487 cur++;
488
489 cur &= buf->nr_pages - 1;
490 }
491 }
492
493
494
495
496
497
498
499 if (buf->snapshot)
500 local_set(&buf->data_size, (cur * PAGE_SIZE) + offset);
501 else
502 local_add(to_read, &buf->data_size);
503
504 CS_LOCK(drvdata->base);
505}
506
507static const struct coresight_ops_sink tmc_etf_sink_ops = {
508 .enable = tmc_enable_etf_sink,
509 .disable = tmc_disable_etf_sink,
510 .alloc_buffer = tmc_alloc_etf_buffer,
511 .free_buffer = tmc_free_etf_buffer,
512 .set_buffer = tmc_set_etf_buffer,
513 .reset_buffer = tmc_reset_etf_buffer,
514 .update_buffer = tmc_update_etf_buffer,
515};
516
517static const struct coresight_ops_link tmc_etf_link_ops = {
518 .enable = tmc_enable_etf_link,
519 .disable = tmc_disable_etf_link,
520};
521
522const struct coresight_ops tmc_etb_cs_ops = {
523 .sink_ops = &tmc_etf_sink_ops,
524};
525
526const struct coresight_ops tmc_etf_cs_ops = {
527 .sink_ops = &tmc_etf_sink_ops,
528 .link_ops = &tmc_etf_link_ops,
529};
530
531int tmc_read_prepare_etb(struct tmc_drvdata *drvdata)
532{
533 enum tmc_mode mode;
534 int ret = 0;
535 unsigned long flags;
536
537
538 if (WARN_ON_ONCE(drvdata->config_type != TMC_CONFIG_TYPE_ETB &&
539 drvdata->config_type != TMC_CONFIG_TYPE_ETF))
540 return -EINVAL;
541
542 spin_lock_irqsave(&drvdata->spinlock, flags);
543
544 if (drvdata->reading) {
545 ret = -EBUSY;
546 goto out;
547 }
548
549
550 mode = readl_relaxed(drvdata->base + TMC_MODE);
551 if (mode != TMC_MODE_CIRCULAR_BUFFER) {
552 ret = -EINVAL;
553 goto out;
554 }
555
556
557 if (drvdata->mode == CS_MODE_PERF) {
558 ret = -EINVAL;
559 goto out;
560 }
561
562
563 if (drvdata->buf == NULL) {
564 ret = -EINVAL;
565 goto out;
566 }
567
568
569 if (drvdata->mode == CS_MODE_SYSFS)
570 tmc_etb_disable_hw(drvdata);
571
572 drvdata->reading = true;
573out:
574 spin_unlock_irqrestore(&drvdata->spinlock, flags);
575
576 return ret;
577}
578
579int tmc_read_unprepare_etb(struct tmc_drvdata *drvdata)
580{
581 char *buf = NULL;
582 enum tmc_mode mode;
583 unsigned long flags;
584
585
586 if (WARN_ON_ONCE(drvdata->config_type != TMC_CONFIG_TYPE_ETB &&
587 drvdata->config_type != TMC_CONFIG_TYPE_ETF))
588 return -EINVAL;
589
590 spin_lock_irqsave(&drvdata->spinlock, flags);
591
592
593 mode = readl_relaxed(drvdata->base + TMC_MODE);
594 if (mode != TMC_MODE_CIRCULAR_BUFFER) {
595 spin_unlock_irqrestore(&drvdata->spinlock, flags);
596 return -EINVAL;
597 }
598
599
600 if (drvdata->mode == CS_MODE_SYSFS) {
601
602
603
604
605
606
607
608
609 memset(drvdata->buf, 0, drvdata->size);
610 tmc_etb_enable_hw(drvdata);
611 } else {
612
613
614
615
616 buf = drvdata->buf;
617 drvdata->buf = NULL;
618 }
619
620 drvdata->reading = false;
621 spin_unlock_irqrestore(&drvdata->spinlock, flags);
622
623
624
625
626
627 kfree(buf);
628
629 return 0;
630}
631