1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18#include <linux/circ_buf.h>
19#include <linux/coresight.h>
20#include <linux/perf_event.h>
21#include <linux/slab.h>
22#include "coresight-priv.h"
23#include "coresight-tmc.h"
24
25static void tmc_etb_enable_hw(struct tmc_drvdata *drvdata)
26{
27 CS_UNLOCK(drvdata->base);
28
29
30 tmc_wait_for_tmcready(drvdata);
31
32 writel_relaxed(TMC_MODE_CIRCULAR_BUFFER, drvdata->base + TMC_MODE);
33 writel_relaxed(TMC_FFCR_EN_FMT | TMC_FFCR_EN_TI |
34 TMC_FFCR_FON_FLIN | TMC_FFCR_FON_TRIG_EVT |
35 TMC_FFCR_TRIGON_TRIGIN,
36 drvdata->base + TMC_FFCR);
37
38 writel_relaxed(drvdata->trigger_cntr, drvdata->base + TMC_TRG);
39 tmc_enable_hw(drvdata);
40
41 CS_LOCK(drvdata->base);
42}
43
44static void tmc_etb_dump_hw(struct tmc_drvdata *drvdata)
45{
46 char *bufp;
47 u32 read_data;
48 int i;
49
50 bufp = drvdata->buf;
51 drvdata->len = 0;
52 while (1) {
53 for (i = 0; i < drvdata->memwidth; i++) {
54 read_data = readl_relaxed(drvdata->base + TMC_RRD);
55 if (read_data == 0xFFFFFFFF)
56 return;
57 memcpy(bufp, &read_data, 4);
58 bufp += 4;
59 drvdata->len += 4;
60 }
61 }
62}
63
64static void tmc_etb_disable_hw(struct tmc_drvdata *drvdata)
65{
66 CS_UNLOCK(drvdata->base);
67
68 tmc_flush_and_stop(drvdata);
69
70
71
72
73 if (drvdata->mode == CS_MODE_SYSFS)
74 tmc_etb_dump_hw(drvdata);
75 tmc_disable_hw(drvdata);
76
77 CS_LOCK(drvdata->base);
78}
79
80static void tmc_etf_enable_hw(struct tmc_drvdata *drvdata)
81{
82 CS_UNLOCK(drvdata->base);
83
84
85 tmc_wait_for_tmcready(drvdata);
86
87 writel_relaxed(TMC_MODE_HARDWARE_FIFO, drvdata->base + TMC_MODE);
88 writel_relaxed(TMC_FFCR_EN_FMT | TMC_FFCR_EN_TI,
89 drvdata->base + TMC_FFCR);
90 writel_relaxed(0x0, drvdata->base + TMC_BUFWM);
91 tmc_enable_hw(drvdata);
92
93 CS_LOCK(drvdata->base);
94}
95
96static void tmc_etf_disable_hw(struct tmc_drvdata *drvdata)
97{
98 CS_UNLOCK(drvdata->base);
99
100 tmc_flush_and_stop(drvdata);
101 tmc_disable_hw(drvdata);
102
103 CS_LOCK(drvdata->base);
104}
105
106static int tmc_enable_etf_sink_sysfs(struct coresight_device *csdev)
107{
108 int ret = 0;
109 bool used = false;
110 char *buf = NULL;
111 unsigned long flags;
112 struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
113
114
115
116
117
118 spin_lock_irqsave(&drvdata->spinlock, flags);
119 if (!drvdata->buf) {
120 spin_unlock_irqrestore(&drvdata->spinlock, flags);
121
122
123 buf = kzalloc(drvdata->size, GFP_KERNEL);
124 if (!buf)
125 return -ENOMEM;
126
127
128 spin_lock_irqsave(&drvdata->spinlock, flags);
129 }
130
131 if (drvdata->reading) {
132 ret = -EBUSY;
133 goto out;
134 }
135
136
137
138
139
140
141 if (drvdata->mode == CS_MODE_SYSFS)
142 goto out;
143
144
145
146
147
148
149
150
151
152
153 if (drvdata->buf) {
154 memset(drvdata->buf, 0, drvdata->size);
155 } else {
156 used = true;
157 drvdata->buf = buf;
158 }
159
160 drvdata->mode = CS_MODE_SYSFS;
161 tmc_etb_enable_hw(drvdata);
162out:
163 spin_unlock_irqrestore(&drvdata->spinlock, flags);
164
165
166 if (!used)
167 kfree(buf);
168
169 if (!ret)
170 dev_info(drvdata->dev, "TMC-ETB/ETF enabled\n");
171
172 return ret;
173}
174
175static int tmc_enable_etf_sink_perf(struct coresight_device *csdev)
176{
177 int ret = 0;
178 unsigned long flags;
179 struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
180
181 spin_lock_irqsave(&drvdata->spinlock, flags);
182 if (drvdata->reading) {
183 ret = -EINVAL;
184 goto out;
185 }
186
187
188
189
190
191
192 if (drvdata->mode != CS_MODE_DISABLED) {
193 ret = -EINVAL;
194 goto out;
195 }
196
197 drvdata->mode = CS_MODE_PERF;
198 tmc_etb_enable_hw(drvdata);
199out:
200 spin_unlock_irqrestore(&drvdata->spinlock, flags);
201
202 return ret;
203}
204
205static int tmc_enable_etf_sink(struct coresight_device *csdev, u32 mode)
206{
207 switch (mode) {
208 case CS_MODE_SYSFS:
209 return tmc_enable_etf_sink_sysfs(csdev);
210 case CS_MODE_PERF:
211 return tmc_enable_etf_sink_perf(csdev);
212 }
213
214
215 return -EINVAL;
216}
217
218static void tmc_disable_etf_sink(struct coresight_device *csdev)
219{
220 unsigned long flags;
221 struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
222
223 spin_lock_irqsave(&drvdata->spinlock, flags);
224 if (drvdata->reading) {
225 spin_unlock_irqrestore(&drvdata->spinlock, flags);
226 return;
227 }
228
229
230 if (drvdata->mode != CS_MODE_DISABLED) {
231 tmc_etb_disable_hw(drvdata);
232 drvdata->mode = CS_MODE_DISABLED;
233 }
234
235 spin_unlock_irqrestore(&drvdata->spinlock, flags);
236
237 dev_info(drvdata->dev, "TMC-ETB/ETF disabled\n");
238}
239
240static int tmc_enable_etf_link(struct coresight_device *csdev,
241 int inport, int outport)
242{
243 unsigned long flags;
244 struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
245
246 spin_lock_irqsave(&drvdata->spinlock, flags);
247 if (drvdata->reading) {
248 spin_unlock_irqrestore(&drvdata->spinlock, flags);
249 return -EBUSY;
250 }
251
252 tmc_etf_enable_hw(drvdata);
253 drvdata->mode = CS_MODE_SYSFS;
254 spin_unlock_irqrestore(&drvdata->spinlock, flags);
255
256 dev_info(drvdata->dev, "TMC-ETF enabled\n");
257 return 0;
258}
259
260static void tmc_disable_etf_link(struct coresight_device *csdev,
261 int inport, int outport)
262{
263 unsigned long flags;
264 struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
265
266 spin_lock_irqsave(&drvdata->spinlock, flags);
267 if (drvdata->reading) {
268 spin_unlock_irqrestore(&drvdata->spinlock, flags);
269 return;
270 }
271
272 tmc_etf_disable_hw(drvdata);
273 drvdata->mode = CS_MODE_DISABLED;
274 spin_unlock_irqrestore(&drvdata->spinlock, flags);
275
276 dev_info(drvdata->dev, "TMC disabled\n");
277}
278
279static void *tmc_alloc_etf_buffer(struct coresight_device *csdev, int cpu,
280 void **pages, int nr_pages, bool overwrite)
281{
282 int node;
283 struct cs_buffers *buf;
284
285 if (cpu == -1)
286 cpu = smp_processor_id();
287 node = cpu_to_node(cpu);
288
289
290 buf = kzalloc_node(sizeof(struct cs_buffers), GFP_KERNEL, node);
291 if (!buf)
292 return NULL;
293
294 buf->snapshot = overwrite;
295 buf->nr_pages = nr_pages;
296 buf->data_pages = pages;
297
298 return buf;
299}
300
301static void tmc_free_etf_buffer(void *config)
302{
303 struct cs_buffers *buf = config;
304
305 kfree(buf);
306}
307
308static int tmc_set_etf_buffer(struct coresight_device *csdev,
309 struct perf_output_handle *handle,
310 void *sink_config)
311{
312 int ret = 0;
313 unsigned long head;
314 struct cs_buffers *buf = sink_config;
315
316
317 head = handle->head & ((buf->nr_pages << PAGE_SHIFT) - 1);
318
319
320 buf->cur = head / PAGE_SIZE;
321
322
323 buf->offset = head % PAGE_SIZE;
324
325 local_set(&buf->data_size, 0);
326
327 return ret;
328}
329
330static unsigned long tmc_reset_etf_buffer(struct coresight_device *csdev,
331 struct perf_output_handle *handle,
332 void *sink_config, bool *lost)
333{
334 long size = 0;
335 struct cs_buffers *buf = sink_config;
336
337 if (buf) {
338
339
340
341
342
343 if (buf->snapshot)
344 handle->head = local_xchg(&buf->data_size,
345 buf->nr_pages << PAGE_SHIFT);
346
347
348
349
350
351
352
353 *lost = !!local_xchg(&buf->lost, 0);
354 size = local_xchg(&buf->data_size, 0);
355 }
356
357 return size;
358}
359
360static void tmc_update_etf_buffer(struct coresight_device *csdev,
361 struct perf_output_handle *handle,
362 void *sink_config)
363{
364 int i, cur;
365 u32 *buf_ptr;
366 u32 read_ptr, write_ptr;
367 u32 status, to_read;
368 unsigned long offset;
369 struct cs_buffers *buf = sink_config;
370 struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
371
372 if (!buf)
373 return;
374
375
376 if (WARN_ON_ONCE(drvdata->mode != CS_MODE_PERF))
377 return;
378
379 CS_UNLOCK(drvdata->base);
380
381 tmc_flush_and_stop(drvdata);
382
383 read_ptr = readl_relaxed(drvdata->base + TMC_RRP);
384 write_ptr = readl_relaxed(drvdata->base + TMC_RWP);
385
386
387
388
389
390 status = readl_relaxed(drvdata->base + TMC_STS);
391 if (status & TMC_STS_FULL) {
392 local_inc(&buf->lost);
393 to_read = drvdata->size;
394 } else {
395 to_read = CIRC_CNT(write_ptr, read_ptr, drvdata->size);
396 }
397
398
399
400
401
402
403 if (to_read > handle->size) {
404 u32 mask = 0;
405
406
407
408
409
410
411
412
413
414 switch (drvdata->memwidth) {
415 case TMC_MEM_INTF_WIDTH_32BITS:
416 case TMC_MEM_INTF_WIDTH_64BITS:
417 case TMC_MEM_INTF_WIDTH_128BITS:
418 mask = GENMASK(31, 5);
419 break;
420 case TMC_MEM_INTF_WIDTH_256BITS:
421 mask = GENMASK(31, 6);
422 break;
423 }
424
425
426
427
428
429 to_read = handle->size & mask;
430
431 read_ptr = (write_ptr + drvdata->size) - to_read;
432
433 if (read_ptr > (drvdata->size - 1))
434 read_ptr -= drvdata->size;
435
436 writel_relaxed(read_ptr, drvdata->base + TMC_RRP);
437 local_inc(&buf->lost);
438 }
439
440 cur = buf->cur;
441 offset = buf->offset;
442
443
444 for (i = 0; i < to_read; i += 4) {
445 buf_ptr = buf->data_pages[cur] + offset;
446 *buf_ptr = readl_relaxed(drvdata->base + TMC_RRD);
447
448 offset += 4;
449 if (offset >= PAGE_SIZE) {
450 offset = 0;
451 cur++;
452
453 cur &= buf->nr_pages - 1;
454 }
455 }
456
457
458
459
460
461
462
463 if (buf->snapshot)
464 local_set(&buf->data_size, (cur * PAGE_SIZE) + offset);
465 else
466 local_add(to_read, &buf->data_size);
467
468 CS_LOCK(drvdata->base);
469}
470
471static const struct coresight_ops_sink tmc_etf_sink_ops = {
472 .enable = tmc_enable_etf_sink,
473 .disable = tmc_disable_etf_sink,
474 .alloc_buffer = tmc_alloc_etf_buffer,
475 .free_buffer = tmc_free_etf_buffer,
476 .set_buffer = tmc_set_etf_buffer,
477 .reset_buffer = tmc_reset_etf_buffer,
478 .update_buffer = tmc_update_etf_buffer,
479};
480
481static const struct coresight_ops_link tmc_etf_link_ops = {
482 .enable = tmc_enable_etf_link,
483 .disable = tmc_disable_etf_link,
484};
485
486const struct coresight_ops tmc_etb_cs_ops = {
487 .sink_ops = &tmc_etf_sink_ops,
488};
489
490const struct coresight_ops tmc_etf_cs_ops = {
491 .sink_ops = &tmc_etf_sink_ops,
492 .link_ops = &tmc_etf_link_ops,
493};
494
495int tmc_read_prepare_etb(struct tmc_drvdata *drvdata)
496{
497 enum tmc_mode mode;
498 int ret = 0;
499 unsigned long flags;
500
501
502 if (WARN_ON_ONCE(drvdata->config_type != TMC_CONFIG_TYPE_ETB &&
503 drvdata->config_type != TMC_CONFIG_TYPE_ETF))
504 return -EINVAL;
505
506 spin_lock_irqsave(&drvdata->spinlock, flags);
507
508 if (drvdata->reading) {
509 ret = -EBUSY;
510 goto out;
511 }
512
513
514 mode = readl_relaxed(drvdata->base + TMC_MODE);
515 if (mode != TMC_MODE_CIRCULAR_BUFFER) {
516 ret = -EINVAL;
517 goto out;
518 }
519
520
521 if (drvdata->mode == CS_MODE_PERF) {
522 ret = -EINVAL;
523 goto out;
524 }
525
526
527 if (drvdata->buf == NULL) {
528 ret = -EINVAL;
529 goto out;
530 }
531
532
533 if (drvdata->mode == CS_MODE_SYSFS)
534 tmc_etb_disable_hw(drvdata);
535
536 drvdata->reading = true;
537out:
538 spin_unlock_irqrestore(&drvdata->spinlock, flags);
539
540 return ret;
541}
542
543int tmc_read_unprepare_etb(struct tmc_drvdata *drvdata)
544{
545 char *buf = NULL;
546 enum tmc_mode mode;
547 unsigned long flags;
548
549
550 if (WARN_ON_ONCE(drvdata->config_type != TMC_CONFIG_TYPE_ETB &&
551 drvdata->config_type != TMC_CONFIG_TYPE_ETF))
552 return -EINVAL;
553
554 spin_lock_irqsave(&drvdata->spinlock, flags);
555
556
557 mode = readl_relaxed(drvdata->base + TMC_MODE);
558 if (mode != TMC_MODE_CIRCULAR_BUFFER) {
559 spin_unlock_irqrestore(&drvdata->spinlock, flags);
560 return -EINVAL;
561 }
562
563
564 if (drvdata->mode == CS_MODE_SYSFS) {
565
566
567
568
569
570
571
572
573 memset(drvdata->buf, 0, drvdata->size);
574 tmc_etb_enable_hw(drvdata);
575 } else {
576
577
578
579
580 buf = drvdata->buf;
581 drvdata->buf = NULL;
582 }
583
584 drvdata->reading = false;
585 spin_unlock_irqrestore(&drvdata->spinlock, flags);
586
587
588
589
590
591 kfree(buf);
592
593 return 0;
594}
595