1
2
3
4
5
6
7
8#include <linux/atomic.h>
9#include <linux/kernel.h>
10#include <linux/init.h>
11#include <linux/types.h>
12#include <linux/device.h>
13#include <linux/io.h>
14#include <linux/err.h>
15#include <linux/fs.h>
16#include <linux/miscdevice.h>
17#include <linux/uaccess.h>
18#include <linux/slab.h>
19#include <linux/spinlock.h>
20#include <linux/pm_runtime.h>
21#include <linux/seq_file.h>
22#include <linux/coresight.h>
23#include <linux/amba/bus.h>
24#include <linux/clk.h>
25#include <linux/circ_buf.h>
26#include <linux/mm.h>
27#include <linux/perf_event.h>
28
29
30#include "coresight-priv.h"
31#include "coresight-etm-perf.h"
32
33#define ETB_RAM_DEPTH_REG 0x004
34#define ETB_STATUS_REG 0x00c
35#define ETB_RAM_READ_DATA_REG 0x010
36#define ETB_RAM_READ_POINTER 0x014
37#define ETB_RAM_WRITE_POINTER 0x018
38#define ETB_TRG 0x01c
39#define ETB_CTL_REG 0x020
40#define ETB_RWD_REG 0x024
41#define ETB_FFSR 0x300
42#define ETB_FFCR 0x304
43#define ETB_ITMISCOP0 0xee0
44#define ETB_ITTRFLINACK 0xee4
45#define ETB_ITTRFLIN 0xee8
46#define ETB_ITATBDATA0 0xeeC
47#define ETB_ITATBCTR2 0xef0
48#define ETB_ITATBCTR1 0xef4
49#define ETB_ITATBCTR0 0xef8
50
51
52
53#define ETB_STATUS_RAM_FULL BIT(0)
54
55#define ETB_CTL_CAPT_EN BIT(0)
56
57#define ETB_FFCR_EN_FTC BIT(0)
58#define ETB_FFCR_FON_MAN BIT(6)
59#define ETB_FFCR_STOP_FI BIT(12)
60#define ETB_FFCR_STOP_TRIGGER BIT(13)
61
62#define ETB_FFCR_BIT 6
63#define ETB_FFSR_BIT 1
64#define ETB_FRAME_SIZE_WORDS 4
65
66DEFINE_CORESIGHT_DEVLIST(etb_devs, "etb");
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83struct etb_drvdata {
84 void __iomem *base;
85 struct clk *atclk;
86 struct coresight_device *csdev;
87 struct miscdevice miscdev;
88 spinlock_t spinlock;
89 local_t reading;
90 pid_t pid;
91 u8 *buf;
92 u32 mode;
93 u32 buffer_depth;
94 u32 trigger_cntr;
95};
96
97static int etb_set_buffer(struct coresight_device *csdev,
98 struct perf_output_handle *handle);
99
100static inline unsigned int etb_get_buffer_depth(struct etb_drvdata *drvdata)
101{
102 return readl_relaxed(drvdata->base + ETB_RAM_DEPTH_REG);
103}
104
105static void __etb_enable_hw(struct etb_drvdata *drvdata)
106{
107 int i;
108 u32 depth;
109
110 CS_UNLOCK(drvdata->base);
111
112 depth = drvdata->buffer_depth;
113
114 writel_relaxed(0x0, drvdata->base + ETB_RAM_WRITE_POINTER);
115
116 for (i = 0; i < depth; i++)
117 writel_relaxed(0x0, drvdata->base + ETB_RWD_REG);
118
119
120 writel_relaxed(0x0, drvdata->base + ETB_RAM_WRITE_POINTER);
121
122 writel_relaxed(0x0, drvdata->base + ETB_RAM_READ_POINTER);
123
124 writel_relaxed(drvdata->trigger_cntr, drvdata->base + ETB_TRG);
125 writel_relaxed(ETB_FFCR_EN_FTC | ETB_FFCR_STOP_TRIGGER,
126 drvdata->base + ETB_FFCR);
127
128 writel_relaxed(ETB_CTL_CAPT_EN, drvdata->base + ETB_CTL_REG);
129
130 CS_LOCK(drvdata->base);
131}
132
133static int etb_enable_hw(struct etb_drvdata *drvdata)
134{
135 int rc = coresight_claim_device(drvdata->csdev);
136
137 if (rc)
138 return rc;
139
140 __etb_enable_hw(drvdata);
141 return 0;
142}
143
144static int etb_enable_sysfs(struct coresight_device *csdev)
145{
146 int ret = 0;
147 unsigned long flags;
148 struct etb_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
149
150 spin_lock_irqsave(&drvdata->spinlock, flags);
151
152
153 if (drvdata->mode == CS_MODE_PERF) {
154 ret = -EBUSY;
155 goto out;
156 }
157
158 if (drvdata->mode == CS_MODE_DISABLED) {
159 ret = etb_enable_hw(drvdata);
160 if (ret)
161 goto out;
162
163 drvdata->mode = CS_MODE_SYSFS;
164 }
165
166 atomic_inc(csdev->refcnt);
167out:
168 spin_unlock_irqrestore(&drvdata->spinlock, flags);
169 return ret;
170}
171
172static int etb_enable_perf(struct coresight_device *csdev, void *data)
173{
174 int ret = 0;
175 pid_t pid;
176 unsigned long flags;
177 struct etb_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
178 struct perf_output_handle *handle = data;
179 struct cs_buffers *buf = etm_perf_sink_config(handle);
180
181 spin_lock_irqsave(&drvdata->spinlock, flags);
182
183
184 if (drvdata->mode == CS_MODE_SYSFS) {
185 ret = -EBUSY;
186 goto out;
187 }
188
189
190 pid = buf->pid;
191
192 if (drvdata->pid != -1 && drvdata->pid != pid) {
193 ret = -EBUSY;
194 goto out;
195 }
196
197
198
199
200
201 if (drvdata->pid == pid) {
202 atomic_inc(csdev->refcnt);
203 goto out;
204 }
205
206
207
208
209
210
211 ret = etb_set_buffer(csdev, handle);
212 if (ret)
213 goto out;
214
215 ret = etb_enable_hw(drvdata);
216 if (!ret) {
217
218 drvdata->pid = pid;
219 drvdata->mode = CS_MODE_PERF;
220 atomic_inc(csdev->refcnt);
221 }
222
223out:
224 spin_unlock_irqrestore(&drvdata->spinlock, flags);
225 return ret;
226}
227
228static int etb_enable(struct coresight_device *csdev, u32 mode, void *data)
229{
230 int ret;
231
232 switch (mode) {
233 case CS_MODE_SYSFS:
234 ret = etb_enable_sysfs(csdev);
235 break;
236 case CS_MODE_PERF:
237 ret = etb_enable_perf(csdev, data);
238 break;
239 default:
240 ret = -EINVAL;
241 break;
242 }
243
244 if (ret)
245 return ret;
246
247 dev_dbg(&csdev->dev, "ETB enabled\n");
248 return 0;
249}
250
251static void __etb_disable_hw(struct etb_drvdata *drvdata)
252{
253 u32 ffcr;
254 struct device *dev = &drvdata->csdev->dev;
255 struct csdev_access *csa = &drvdata->csdev->access;
256
257 CS_UNLOCK(drvdata->base);
258
259 ffcr = readl_relaxed(drvdata->base + ETB_FFCR);
260
261 ffcr |= ETB_FFCR_STOP_FI;
262 writel_relaxed(ffcr, drvdata->base + ETB_FFCR);
263
264 ffcr |= ETB_FFCR_FON_MAN;
265 writel_relaxed(ffcr, drvdata->base + ETB_FFCR);
266
267 if (coresight_timeout(csa, ETB_FFCR, ETB_FFCR_BIT, 0)) {
268 dev_err(dev,
269 "timeout while waiting for completion of Manual Flush\n");
270 }
271
272
273 writel_relaxed(0x0, drvdata->base + ETB_CTL_REG);
274
275 if (coresight_timeout(csa, ETB_FFSR, ETB_FFSR_BIT, 1)) {
276 dev_err(dev,
277 "timeout while waiting for Formatter to Stop\n");
278 }
279
280 CS_LOCK(drvdata->base);
281}
282
283static void etb_dump_hw(struct etb_drvdata *drvdata)
284{
285 bool lost = false;
286 int i;
287 u8 *buf_ptr;
288 u32 read_data, depth;
289 u32 read_ptr, write_ptr;
290 u32 frame_off, frame_endoff;
291 struct device *dev = &drvdata->csdev->dev;
292
293 CS_UNLOCK(drvdata->base);
294
295 read_ptr = readl_relaxed(drvdata->base + ETB_RAM_READ_POINTER);
296 write_ptr = readl_relaxed(drvdata->base + ETB_RAM_WRITE_POINTER);
297
298 frame_off = write_ptr % ETB_FRAME_SIZE_WORDS;
299 frame_endoff = ETB_FRAME_SIZE_WORDS - frame_off;
300 if (frame_off) {
301 dev_err(dev,
302 "write_ptr: %lu not aligned to formatter frame size\n",
303 (unsigned long)write_ptr);
304 dev_err(dev, "frameoff: %lu, frame_endoff: %lu\n",
305 (unsigned long)frame_off, (unsigned long)frame_endoff);
306 write_ptr += frame_endoff;
307 }
308
309 if ((readl_relaxed(drvdata->base + ETB_STATUS_REG)
310 & ETB_STATUS_RAM_FULL) == 0) {
311 writel_relaxed(0x0, drvdata->base + ETB_RAM_READ_POINTER);
312 } else {
313 writel_relaxed(write_ptr, drvdata->base + ETB_RAM_READ_POINTER);
314 lost = true;
315 }
316
317 depth = drvdata->buffer_depth;
318 buf_ptr = drvdata->buf;
319 for (i = 0; i < depth; i++) {
320 read_data = readl_relaxed(drvdata->base +
321 ETB_RAM_READ_DATA_REG);
322 *(u32 *)buf_ptr = read_data;
323 buf_ptr += 4;
324 }
325
326 if (lost)
327 coresight_insert_barrier_packet(drvdata->buf);
328
329 if (frame_off) {
330 buf_ptr -= (frame_endoff * 4);
331 for (i = 0; i < frame_endoff; i++) {
332 *buf_ptr++ = 0x0;
333 *buf_ptr++ = 0x0;
334 *buf_ptr++ = 0x0;
335 *buf_ptr++ = 0x0;
336 }
337 }
338
339 writel_relaxed(read_ptr, drvdata->base + ETB_RAM_READ_POINTER);
340
341 CS_LOCK(drvdata->base);
342}
343
344static void etb_disable_hw(struct etb_drvdata *drvdata)
345{
346 __etb_disable_hw(drvdata);
347 etb_dump_hw(drvdata);
348 coresight_disclaim_device(drvdata->csdev);
349}
350
351static int etb_disable(struct coresight_device *csdev)
352{
353 struct etb_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
354 unsigned long flags;
355
356 spin_lock_irqsave(&drvdata->spinlock, flags);
357
358 if (atomic_dec_return(csdev->refcnt)) {
359 spin_unlock_irqrestore(&drvdata->spinlock, flags);
360 return -EBUSY;
361 }
362
363
364 WARN_ON_ONCE(drvdata->mode == CS_MODE_DISABLED);
365 etb_disable_hw(drvdata);
366
367 drvdata->pid = -1;
368 drvdata->mode = CS_MODE_DISABLED;
369 spin_unlock_irqrestore(&drvdata->spinlock, flags);
370
371 dev_dbg(&csdev->dev, "ETB disabled\n");
372 return 0;
373}
374
375static void *etb_alloc_buffer(struct coresight_device *csdev,
376 struct perf_event *event, void **pages,
377 int nr_pages, bool overwrite)
378{
379 int node;
380 struct cs_buffers *buf;
381
382 node = (event->cpu == -1) ? NUMA_NO_NODE : cpu_to_node(event->cpu);
383
384 buf = kzalloc_node(sizeof(struct cs_buffers), GFP_KERNEL, node);
385 if (!buf)
386 return NULL;
387
388 buf->pid = task_pid_nr(event->owner);
389 buf->snapshot = overwrite;
390 buf->nr_pages = nr_pages;
391 buf->data_pages = pages;
392
393 return buf;
394}
395
396static void etb_free_buffer(void *config)
397{
398 struct cs_buffers *buf = config;
399
400 kfree(buf);
401}
402
403static int etb_set_buffer(struct coresight_device *csdev,
404 struct perf_output_handle *handle)
405{
406 int ret = 0;
407 unsigned long head;
408 struct cs_buffers *buf = etm_perf_sink_config(handle);
409
410 if (!buf)
411 return -EINVAL;
412
413
414 head = handle->head & ((buf->nr_pages << PAGE_SHIFT) - 1);
415
416
417 buf->cur = head / PAGE_SIZE;
418
419
420 buf->offset = head % PAGE_SIZE;
421
422 local_set(&buf->data_size, 0);
423
424 return ret;
425}
426
427static unsigned long etb_update_buffer(struct coresight_device *csdev,
428 struct perf_output_handle *handle,
429 void *sink_config)
430{
431 bool lost = false;
432 int i, cur;
433 u8 *buf_ptr;
434 const u32 *barrier;
435 u32 read_ptr, write_ptr, capacity;
436 u32 status, read_data;
437 unsigned long offset, to_read = 0, flags;
438 struct cs_buffers *buf = sink_config;
439 struct etb_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
440
441 if (!buf)
442 return 0;
443
444 capacity = drvdata->buffer_depth * ETB_FRAME_SIZE_WORDS;
445
446 spin_lock_irqsave(&drvdata->spinlock, flags);
447
448
449 if (atomic_read(csdev->refcnt) != 1)
450 goto out;
451
452 __etb_disable_hw(drvdata);
453 CS_UNLOCK(drvdata->base);
454
455
456 read_ptr = readl_relaxed(drvdata->base + ETB_RAM_READ_POINTER);
457 write_ptr = readl_relaxed(drvdata->base + ETB_RAM_WRITE_POINTER);
458
459
460
461
462
463
464 if (write_ptr % ETB_FRAME_SIZE_WORDS) {
465 dev_err(&csdev->dev,
466 "write_ptr: %lu not aligned to formatter frame size\n",
467 (unsigned long)write_ptr);
468
469 write_ptr &= ~(ETB_FRAME_SIZE_WORDS - 1);
470 lost = true;
471 }
472
473
474
475
476
477
478
479 status = readl_relaxed(drvdata->base + ETB_STATUS_REG);
480 if (status & ETB_STATUS_RAM_FULL) {
481 lost = true;
482 to_read = capacity;
483 read_ptr = write_ptr;
484 } else {
485 to_read = CIRC_CNT(write_ptr, read_ptr, drvdata->buffer_depth);
486 to_read *= ETB_FRAME_SIZE_WORDS;
487 }
488
489
490
491
492
493
494
495
496
497
498
499 if (!buf->snapshot && to_read > handle->size) {
500 u32 mask = ~(ETB_FRAME_SIZE_WORDS - 1);
501
502
503 to_read = handle->size & mask;
504
505
506
507
508 read_ptr = (write_ptr + drvdata->buffer_depth) -
509 to_read / ETB_FRAME_SIZE_WORDS;
510
511 if (read_ptr > (drvdata->buffer_depth - 1))
512 read_ptr -= drvdata->buffer_depth;
513
514 lost = true;
515 }
516
517
518
519
520
521
522
523 if (!buf->snapshot && lost)
524 perf_aux_output_flag(handle, PERF_AUX_FLAG_TRUNCATED);
525
526
527 writel_relaxed(read_ptr, drvdata->base + ETB_RAM_READ_POINTER);
528
529 cur = buf->cur;
530 offset = buf->offset;
531 barrier = coresight_barrier_pkt;
532
533 for (i = 0; i < to_read; i += 4) {
534 buf_ptr = buf->data_pages[cur] + offset;
535 read_data = readl_relaxed(drvdata->base +
536 ETB_RAM_READ_DATA_REG);
537 if (lost && i < CORESIGHT_BARRIER_PKT_SIZE) {
538 read_data = *barrier;
539 barrier++;
540 }
541
542 *(u32 *)buf_ptr = read_data;
543 buf_ptr += 4;
544
545 offset += 4;
546 if (offset >= PAGE_SIZE) {
547 offset = 0;
548 cur++;
549
550 cur &= buf->nr_pages - 1;
551 }
552 }
553
554
555 writel_relaxed(0x0, drvdata->base + ETB_RAM_READ_POINTER);
556 writel_relaxed(0x0, drvdata->base + ETB_RAM_WRITE_POINTER);
557
558
559
560
561
562
563
564 if (buf->snapshot)
565 handle->head += to_read;
566
567 __etb_enable_hw(drvdata);
568 CS_LOCK(drvdata->base);
569out:
570 spin_unlock_irqrestore(&drvdata->spinlock, flags);
571
572 return to_read;
573}
574
575static const struct coresight_ops_sink etb_sink_ops = {
576 .enable = etb_enable,
577 .disable = etb_disable,
578 .alloc_buffer = etb_alloc_buffer,
579 .free_buffer = etb_free_buffer,
580 .update_buffer = etb_update_buffer,
581};
582
583static const struct coresight_ops etb_cs_ops = {
584 .sink_ops = &etb_sink_ops,
585};
586
587static void etb_dump(struct etb_drvdata *drvdata)
588{
589 unsigned long flags;
590
591 spin_lock_irqsave(&drvdata->spinlock, flags);
592 if (drvdata->mode == CS_MODE_SYSFS) {
593 __etb_disable_hw(drvdata);
594 etb_dump_hw(drvdata);
595 __etb_enable_hw(drvdata);
596 }
597 spin_unlock_irqrestore(&drvdata->spinlock, flags);
598
599 dev_dbg(&drvdata->csdev->dev, "ETB dumped\n");
600}
601
602static int etb_open(struct inode *inode, struct file *file)
603{
604 struct etb_drvdata *drvdata = container_of(file->private_data,
605 struct etb_drvdata, miscdev);
606
607 if (local_cmpxchg(&drvdata->reading, 0, 1))
608 return -EBUSY;
609
610 dev_dbg(&drvdata->csdev->dev, "%s: successfully opened\n", __func__);
611 return 0;
612}
613
614static ssize_t etb_read(struct file *file, char __user *data,
615 size_t len, loff_t *ppos)
616{
617 u32 depth;
618 struct etb_drvdata *drvdata = container_of(file->private_data,
619 struct etb_drvdata, miscdev);
620 struct device *dev = &drvdata->csdev->dev;
621
622 etb_dump(drvdata);
623
624 depth = drvdata->buffer_depth;
625 if (*ppos + len > depth * 4)
626 len = depth * 4 - *ppos;
627
628 if (copy_to_user(data, drvdata->buf + *ppos, len)) {
629 dev_dbg(dev,
630 "%s: copy_to_user failed\n", __func__);
631 return -EFAULT;
632 }
633
634 *ppos += len;
635
636 dev_dbg(dev, "%s: %zu bytes copied, %d bytes left\n",
637 __func__, len, (int)(depth * 4 - *ppos));
638 return len;
639}
640
641static int etb_release(struct inode *inode, struct file *file)
642{
643 struct etb_drvdata *drvdata = container_of(file->private_data,
644 struct etb_drvdata, miscdev);
645 local_set(&drvdata->reading, 0);
646
647 dev_dbg(&drvdata->csdev->dev, "%s: released\n", __func__);
648 return 0;
649}
650
651static const struct file_operations etb_fops = {
652 .owner = THIS_MODULE,
653 .open = etb_open,
654 .read = etb_read,
655 .release = etb_release,
656 .llseek = no_llseek,
657};
658
659#define coresight_etb10_reg(name, offset) \
660 coresight_simple_reg32(struct etb_drvdata, name, offset)
661
662coresight_etb10_reg(rdp, ETB_RAM_DEPTH_REG);
663coresight_etb10_reg(sts, ETB_STATUS_REG);
664coresight_etb10_reg(rrp, ETB_RAM_READ_POINTER);
665coresight_etb10_reg(rwp, ETB_RAM_WRITE_POINTER);
666coresight_etb10_reg(trg, ETB_TRG);
667coresight_etb10_reg(ctl, ETB_CTL_REG);
668coresight_etb10_reg(ffsr, ETB_FFSR);
669coresight_etb10_reg(ffcr, ETB_FFCR);
670
671static struct attribute *coresight_etb_mgmt_attrs[] = {
672 &dev_attr_rdp.attr,
673 &dev_attr_sts.attr,
674 &dev_attr_rrp.attr,
675 &dev_attr_rwp.attr,
676 &dev_attr_trg.attr,
677 &dev_attr_ctl.attr,
678 &dev_attr_ffsr.attr,
679 &dev_attr_ffcr.attr,
680 NULL,
681};
682
683static ssize_t trigger_cntr_show(struct device *dev,
684 struct device_attribute *attr, char *buf)
685{
686 struct etb_drvdata *drvdata = dev_get_drvdata(dev->parent);
687 unsigned long val = drvdata->trigger_cntr;
688
689 return sprintf(buf, "%#lx\n", val);
690}
691
692static ssize_t trigger_cntr_store(struct device *dev,
693 struct device_attribute *attr,
694 const char *buf, size_t size)
695{
696 int ret;
697 unsigned long val;
698 struct etb_drvdata *drvdata = dev_get_drvdata(dev->parent);
699
700 ret = kstrtoul(buf, 16, &val);
701 if (ret)
702 return ret;
703
704 drvdata->trigger_cntr = val;
705 return size;
706}
707static DEVICE_ATTR_RW(trigger_cntr);
708
709static struct attribute *coresight_etb_attrs[] = {
710 &dev_attr_trigger_cntr.attr,
711 NULL,
712};
713
714static const struct attribute_group coresight_etb_group = {
715 .attrs = coresight_etb_attrs,
716};
717
718static const struct attribute_group coresight_etb_mgmt_group = {
719 .attrs = coresight_etb_mgmt_attrs,
720 .name = "mgmt",
721};
722
723static const struct attribute_group *coresight_etb_groups[] = {
724 &coresight_etb_group,
725 &coresight_etb_mgmt_group,
726 NULL,
727};
728
729static int etb_probe(struct amba_device *adev, const struct amba_id *id)
730{
731 int ret;
732 void __iomem *base;
733 struct device *dev = &adev->dev;
734 struct coresight_platform_data *pdata = NULL;
735 struct etb_drvdata *drvdata;
736 struct resource *res = &adev->res;
737 struct coresight_desc desc = { 0 };
738
739 desc.name = coresight_alloc_device_name(&etb_devs, dev);
740 if (!desc.name)
741 return -ENOMEM;
742
743 drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
744 if (!drvdata)
745 return -ENOMEM;
746
747 drvdata->atclk = devm_clk_get(&adev->dev, "atclk");
748 if (!IS_ERR(drvdata->atclk)) {
749 ret = clk_prepare_enable(drvdata->atclk);
750 if (ret)
751 return ret;
752 }
753 dev_set_drvdata(dev, drvdata);
754
755
756 base = devm_ioremap_resource(dev, res);
757 if (IS_ERR(base))
758 return PTR_ERR(base);
759
760 drvdata->base = base;
761 desc.access = CSDEV_ACCESS_IOMEM(base);
762
763 spin_lock_init(&drvdata->spinlock);
764
765 drvdata->buffer_depth = etb_get_buffer_depth(drvdata);
766
767 if (drvdata->buffer_depth & 0x80000000)
768 return -EINVAL;
769
770 drvdata->buf = devm_kcalloc(dev,
771 drvdata->buffer_depth, 4, GFP_KERNEL);
772 if (!drvdata->buf)
773 return -ENOMEM;
774
775
776 drvdata->pid = -1;
777
778 pdata = coresight_get_platform_data(dev);
779 if (IS_ERR(pdata))
780 return PTR_ERR(pdata);
781 adev->dev.platform_data = pdata;
782
783 desc.type = CORESIGHT_DEV_TYPE_SINK;
784 desc.subtype.sink_subtype = CORESIGHT_DEV_SUBTYPE_SINK_BUFFER;
785 desc.ops = &etb_cs_ops;
786 desc.pdata = pdata;
787 desc.dev = dev;
788 desc.groups = coresight_etb_groups;
789 drvdata->csdev = coresight_register(&desc);
790 if (IS_ERR(drvdata->csdev))
791 return PTR_ERR(drvdata->csdev);
792
793 drvdata->miscdev.name = desc.name;
794 drvdata->miscdev.minor = MISC_DYNAMIC_MINOR;
795 drvdata->miscdev.fops = &etb_fops;
796 ret = misc_register(&drvdata->miscdev);
797 if (ret)
798 goto err_misc_register;
799
800 pm_runtime_put(&adev->dev);
801 return 0;
802
803err_misc_register:
804 coresight_unregister(drvdata->csdev);
805 return ret;
806}
807
808static void etb_remove(struct amba_device *adev)
809{
810 struct etb_drvdata *drvdata = dev_get_drvdata(&adev->dev);
811
812
813
814
815
816
817 misc_deregister(&drvdata->miscdev);
818 coresight_unregister(drvdata->csdev);
819}
820
821#ifdef CONFIG_PM
822static int etb_runtime_suspend(struct device *dev)
823{
824 struct etb_drvdata *drvdata = dev_get_drvdata(dev);
825
826 if (drvdata && !IS_ERR(drvdata->atclk))
827 clk_disable_unprepare(drvdata->atclk);
828
829 return 0;
830}
831
832static int etb_runtime_resume(struct device *dev)
833{
834 struct etb_drvdata *drvdata = dev_get_drvdata(dev);
835
836 if (drvdata && !IS_ERR(drvdata->atclk))
837 clk_prepare_enable(drvdata->atclk);
838
839 return 0;
840}
841#endif
842
843static const struct dev_pm_ops etb_dev_pm_ops = {
844 SET_RUNTIME_PM_OPS(etb_runtime_suspend, etb_runtime_resume, NULL)
845};
846
847static const struct amba_id etb_ids[] = {
848 {
849 .id = 0x000bb907,
850 .mask = 0x000fffff,
851 },
852 { 0, 0},
853};
854
855MODULE_DEVICE_TABLE(amba, etb_ids);
856
857static struct amba_driver etb_driver = {
858 .drv = {
859 .name = "coresight-etb10",
860 .owner = THIS_MODULE,
861 .pm = &etb_dev_pm_ops,
862 .suppress_bind_attrs = true,
863
864 },
865 .probe = etb_probe,
866 .remove = etb_remove,
867 .id_table = etb_ids,
868};
869
870module_amba_driver(etb_driver);
871
872MODULE_AUTHOR("Pratik Patel <pratikp@codeaurora.org>");
873MODULE_AUTHOR("Mathieu Poirier <mathieu.poirier@linaro.org>");
874MODULE_DESCRIPTION("Arm CoreSight Embedded Trace Buffer driver");
875MODULE_LICENSE("GPL v2");
876