1
2
3
4
5
6#include <linux/clk.h>
7#include <linux/delay.h>
8#include <linux/device.h>
9#include <linux/dmaengine.h>
10#include <linux/dma-mapping.h>
11#include <linux/init.h>
12#include <linux/interrupt.h>
13#include <linux/io.h>
14#include <linux/kernel.h>
15#include <linux/module.h>
16#include <linux/of.h>
17#include <linux/of_address.h>
18#include <linux/of_irq.h>
19#include <linux/of_dma.h>
20#include <linux/platform_device.h>
21#include <linux/reset.h>
22#include <linux/scatterlist.h>
23#include <linux/slab.h>
24
25#include "../dmaengine.h"
26#include "../virt-dma.h"
27
28
29#define ADM_CHAN_MULTI 0x4
30#define ADM_CI_MULTI 0x4
31#define ADM_CRCI_MULTI 0x4
32#define ADM_EE_MULTI 0x800
33#define ADM_CHAN_OFFS(chan) (ADM_CHAN_MULTI * (chan))
34#define ADM_EE_OFFS(ee) (ADM_EE_MULTI * (ee))
35#define ADM_CHAN_EE_OFFS(chan, ee) (ADM_CHAN_OFFS(chan) + ADM_EE_OFFS(ee))
36#define ADM_CHAN_OFFS(chan) (ADM_CHAN_MULTI * (chan))
37#define ADM_CI_OFFS(ci) (ADM_CHAN_OFF(ci))
38#define ADM_CH_CMD_PTR(chan, ee) (ADM_CHAN_EE_OFFS(chan, ee))
39#define ADM_CH_RSLT(chan, ee) (0x40 + ADM_CHAN_EE_OFFS(chan, ee))
40#define ADM_CH_FLUSH_STATE0(chan, ee) (0x80 + ADM_CHAN_EE_OFFS(chan, ee))
41#define ADM_CH_STATUS_SD(chan, ee) (0x200 + ADM_CHAN_EE_OFFS(chan, ee))
42#define ADM_CH_CONF(chan) (0x240 + ADM_CHAN_OFFS(chan))
43#define ADM_CH_RSLT_CONF(chan, ee) (0x300 + ADM_CHAN_EE_OFFS(chan, ee))
44#define ADM_SEC_DOMAIN_IRQ_STATUS(ee) (0x380 + ADM_EE_OFFS(ee))
45#define ADM_CI_CONF(ci) (0x390 + (ci) * ADM_CI_MULTI)
46#define ADM_GP_CTL 0x3d8
47#define ADM_CRCI_CTL(crci, ee) (0x400 + (crci) * ADM_CRCI_MULTI + \
48 ADM_EE_OFFS(ee))
49
50
51#define ADM_CH_STATUS_VALID BIT(1)
52
53
54#define ADM_CH_RSLT_VALID BIT(31)
55#define ADM_CH_RSLT_ERR BIT(3)
56#define ADM_CH_RSLT_FLUSH BIT(2)
57#define ADM_CH_RSLT_TPD BIT(1)
58
59
60#define ADM_CH_CONF_SHADOW_EN BIT(12)
61#define ADM_CH_CONF_MPU_DISABLE BIT(11)
62#define ADM_CH_CONF_PERM_MPU_CONF BIT(9)
63#define ADM_CH_CONF_FORCE_RSLT_EN BIT(7)
64#define ADM_CH_CONF_SEC_DOMAIN(ee) ((((ee) & 0x3) << 4) | (((ee) & 0x4) << 11))
65
66
67#define ADM_CH_RSLT_CONF_FLUSH_EN BIT(1)
68#define ADM_CH_RSLT_CONF_IRQ_EN BIT(0)
69
70
71#define ADM_CRCI_CTL_MUX_SEL BIT(18)
72#define ADM_CRCI_CTL_RST BIT(17)
73
74
75#define ADM_CI_RANGE_END(x) ((x) << 24)
76#define ADM_CI_RANGE_START(x) ((x) << 16)
77#define ADM_CI_BURST_4_WORDS BIT(2)
78#define ADM_CI_BURST_8_WORDS BIT(3)
79
80
81#define ADM_GP_CTL_LP_EN BIT(12)
82#define ADM_GP_CTL_LP_CNT(x) ((x) << 8)
83
84
85#define ADM_CPLE_LP BIT(31)
86#define ADM_CPLE_CMD_PTR_LIST BIT(29)
87
88
89#define ADM_CMD_LC BIT(31)
90#define ADM_CMD_DST_CRCI(n) (((n) & 0xf) << 7)
91#define ADM_CMD_SRC_CRCI(n) (((n) & 0xf) << 3)
92
93#define ADM_CMD_TYPE_SINGLE 0x0
94#define ADM_CMD_TYPE_BOX 0x3
95
96#define ADM_CRCI_MUX_SEL BIT(4)
97#define ADM_DESC_ALIGN 8
98#define ADM_MAX_XFER (SZ_64K - 1)
99#define ADM_MAX_ROWS (SZ_64K - 1)
100#define ADM_MAX_CHANNELS 16
101
102struct adm_desc_hw_box {
103 u32 cmd;
104 u32 src_addr;
105 u32 dst_addr;
106 u32 row_len;
107 u32 num_rows;
108 u32 row_offset;
109};
110
111struct adm_desc_hw_single {
112 u32 cmd;
113 u32 src_addr;
114 u32 dst_addr;
115 u32 len;
116};
117
118struct adm_async_desc {
119 struct virt_dma_desc vd;
120 struct adm_device *adev;
121
122 size_t length;
123 enum dma_transfer_direction dir;
124 dma_addr_t dma_addr;
125 size_t dma_len;
126
127 void *cpl;
128 dma_addr_t cp_addr;
129 u32 crci;
130 u32 mux;
131 u32 blk_size;
132};
133
134struct adm_chan {
135 struct virt_dma_chan vc;
136 struct adm_device *adev;
137
138
139 u32 id;
140
141 struct adm_async_desc *curr_txd;
142 struct dma_slave_config slave;
143 struct list_head node;
144
145 int error;
146 int initialized;
147};
148
149static inline struct adm_chan *to_adm_chan(struct dma_chan *common)
150{
151 return container_of(common, struct adm_chan, vc.chan);
152}
153
154struct adm_device {
155 void __iomem *regs;
156 struct device *dev;
157 struct dma_device common;
158 struct device_dma_parameters dma_parms;
159 struct adm_chan *channels;
160
161 u32 ee;
162
163 struct clk *core_clk;
164 struct clk *iface_clk;
165
166 struct reset_control *clk_reset;
167 struct reset_control *c0_reset;
168 struct reset_control *c1_reset;
169 struct reset_control *c2_reset;
170 int irq;
171};
172
173
174
175
176
177
178
179
180static void adm_free_chan(struct dma_chan *chan)
181{
182
183 vchan_free_chan_resources(to_virt_chan(chan));
184}
185
186
187
188
189
190
191static int adm_get_blksize(unsigned int burst)
192{
193 int ret;
194
195 switch (burst) {
196 case 16:
197 case 32:
198 case 64:
199 case 128:
200 ret = ffs(burst >> 4) - 1;
201 break;
202 case 192:
203 ret = 4;
204 break;
205 case 256:
206 ret = 5;
207 break;
208 default:
209 ret = -EINVAL;
210 break;
211 }
212
213 return ret;
214}
215
216
217
218
219
220
221
222
223
224
225
226static void *adm_process_fc_descriptors(struct adm_chan *achan, void *desc,
227 struct scatterlist *sg, u32 crci,
228 u32 burst,
229 enum dma_transfer_direction direction)
230{
231 struct adm_desc_hw_box *box_desc = NULL;
232 struct adm_desc_hw_single *single_desc;
233 u32 remainder = sg_dma_len(sg);
234 u32 rows, row_offset, crci_cmd;
235 u32 mem_addr = sg_dma_address(sg);
236 u32 *incr_addr = &mem_addr;
237 u32 *src, *dst;
238
239 if (direction == DMA_DEV_TO_MEM) {
240 crci_cmd = ADM_CMD_SRC_CRCI(crci);
241 row_offset = burst;
242 src = &achan->slave.src_addr;
243 dst = &mem_addr;
244 } else {
245 crci_cmd = ADM_CMD_DST_CRCI(crci);
246 row_offset = burst << 16;
247 src = &mem_addr;
248 dst = &achan->slave.dst_addr;
249 }
250
251 while (remainder >= burst) {
252 box_desc = desc;
253 box_desc->cmd = ADM_CMD_TYPE_BOX | crci_cmd;
254 box_desc->row_offset = row_offset;
255 box_desc->src_addr = *src;
256 box_desc->dst_addr = *dst;
257
258 rows = remainder / burst;
259 rows = min_t(u32, rows, ADM_MAX_ROWS);
260 box_desc->num_rows = rows << 16 | rows;
261 box_desc->row_len = burst << 16 | burst;
262
263 *incr_addr += burst * rows;
264 remainder -= burst * rows;
265 desc += sizeof(*box_desc);
266 }
267
268
269 if (remainder) {
270 single_desc = desc;
271 single_desc->cmd = ADM_CMD_TYPE_SINGLE | crci_cmd;
272 single_desc->len = remainder;
273 single_desc->src_addr = *src;
274 single_desc->dst_addr = *dst;
275 desc += sizeof(*single_desc);
276
277 if (sg_is_last(sg))
278 single_desc->cmd |= ADM_CMD_LC;
279 } else {
280 if (box_desc && sg_is_last(sg))
281 box_desc->cmd |= ADM_CMD_LC;
282 }
283
284 return desc;
285}
286
287
288
289
290
291
292
293
294
295static void *adm_process_non_fc_descriptors(struct adm_chan *achan, void *desc,
296 struct scatterlist *sg,
297 enum dma_transfer_direction direction)
298{
299 struct adm_desc_hw_single *single_desc;
300 u32 remainder = sg_dma_len(sg);
301 u32 mem_addr = sg_dma_address(sg);
302 u32 *incr_addr = &mem_addr;
303 u32 *src, *dst;
304
305 if (direction == DMA_DEV_TO_MEM) {
306 src = &achan->slave.src_addr;
307 dst = &mem_addr;
308 } else {
309 src = &mem_addr;
310 dst = &achan->slave.dst_addr;
311 }
312
313 do {
314 single_desc = desc;
315 single_desc->cmd = ADM_CMD_TYPE_SINGLE;
316 single_desc->src_addr = *src;
317 single_desc->dst_addr = *dst;
318 single_desc->len = (remainder > ADM_MAX_XFER) ?
319 ADM_MAX_XFER : remainder;
320
321 remainder -= single_desc->len;
322 *incr_addr += single_desc->len;
323 desc += sizeof(*single_desc);
324 } while (remainder);
325
326
327 if (sg_is_last(sg))
328 single_desc->cmd |= ADM_CMD_LC;
329
330 return desc;
331}
332
333
334
335
336
337
338
339
340
341
342
343static struct dma_async_tx_descriptor *adm_prep_slave_sg(struct dma_chan *chan,
344 struct scatterlist *sgl,
345 unsigned int sg_len,
346 enum dma_transfer_direction direction,
347 unsigned long flags,
348 void *context)
349{
350 struct adm_chan *achan = to_adm_chan(chan);
351 struct adm_device *adev = achan->adev;
352 struct adm_async_desc *async_desc;
353 struct scatterlist *sg;
354 dma_addr_t cple_addr;
355 u32 i, burst;
356 u32 single_count = 0, box_count = 0, crci = 0;
357 void *desc;
358 u32 *cple;
359 int blk_size = 0;
360
361 if (!is_slave_direction(direction)) {
362 dev_err(adev->dev, "invalid dma direction\n");
363 return NULL;
364 }
365
366
367
368
369 burst = (direction == DMA_MEM_TO_DEV) ?
370 achan->slave.dst_maxburst :
371 achan->slave.src_maxburst;
372
373
374 if (achan->slave.device_fc) {
375 blk_size = adm_get_blksize(burst);
376 if (blk_size < 0) {
377 dev_err(adev->dev, "invalid burst value: %d\n",
378 burst);
379 return ERR_PTR(-EINVAL);
380 }
381
382 crci = achan->slave.slave_id & 0xf;
383 if (!crci || achan->slave.slave_id > 0x1f) {
384 dev_err(adev->dev, "invalid crci value\n");
385 return ERR_PTR(-EINVAL);
386 }
387 }
388
389
390 for_each_sg(sgl, sg, sg_len, i) {
391 if (achan->slave.device_fc) {
392 box_count += DIV_ROUND_UP(sg_dma_len(sg) / burst,
393 ADM_MAX_ROWS);
394 if (sg_dma_len(sg) % burst)
395 single_count++;
396 } else {
397 single_count += DIV_ROUND_UP(sg_dma_len(sg),
398 ADM_MAX_XFER);
399 }
400 }
401
402 async_desc = kzalloc(sizeof(*async_desc), GFP_NOWAIT);
403 if (!async_desc)
404 return ERR_PTR(-ENOMEM);
405
406 if (crci)
407 async_desc->mux = achan->slave.slave_id & ADM_CRCI_MUX_SEL ?
408 ADM_CRCI_CTL_MUX_SEL : 0;
409 async_desc->crci = crci;
410 async_desc->blk_size = blk_size;
411 async_desc->dma_len = single_count * sizeof(struct adm_desc_hw_single) +
412 box_count * sizeof(struct adm_desc_hw_box) +
413 sizeof(*cple) + 2 * ADM_DESC_ALIGN;
414
415 async_desc->cpl = kzalloc(async_desc->dma_len, GFP_NOWAIT);
416 if (!async_desc->cpl)
417 goto free;
418
419 async_desc->adev = adev;
420
421
422 cple = PTR_ALIGN(async_desc->cpl, ADM_DESC_ALIGN);
423 desc = PTR_ALIGN(cple + 1, ADM_DESC_ALIGN);
424
425 for_each_sg(sgl, sg, sg_len, i) {
426 async_desc->length += sg_dma_len(sg);
427
428 if (achan->slave.device_fc)
429 desc = adm_process_fc_descriptors(achan, desc, sg, crci,
430 burst, direction);
431 else
432 desc = adm_process_non_fc_descriptors(achan, desc, sg,
433 direction);
434 }
435
436 async_desc->dma_addr = dma_map_single(adev->dev, async_desc->cpl,
437 async_desc->dma_len,
438 DMA_TO_DEVICE);
439 if (dma_mapping_error(adev->dev, async_desc->dma_addr))
440 goto free;
441
442 cple_addr = async_desc->dma_addr + ((void *)cple - async_desc->cpl);
443
444
445 dma_sync_single_for_cpu(adev->dev, cple_addr, sizeof(*cple),
446 DMA_TO_DEVICE);
447 *cple = ADM_CPLE_LP;
448 *cple |= (async_desc->dma_addr + ADM_DESC_ALIGN) >> 3;
449 dma_sync_single_for_device(adev->dev, cple_addr, sizeof(*cple),
450 DMA_TO_DEVICE);
451
452 return vchan_tx_prep(&achan->vc, &async_desc->vd, flags);
453
454free:
455 kfree(async_desc);
456 return ERR_PTR(-ENOMEM);
457}
458
459
460
461
462
463
464
465
466
467static int adm_terminate_all(struct dma_chan *chan)
468{
469 struct adm_chan *achan = to_adm_chan(chan);
470 struct adm_device *adev = achan->adev;
471 unsigned long flags;
472 LIST_HEAD(head);
473
474 spin_lock_irqsave(&achan->vc.lock, flags);
475 vchan_get_all_descriptors(&achan->vc, &head);
476
477
478 writel_relaxed(0x0,
479 adev->regs + ADM_CH_FLUSH_STATE0(achan->id, adev->ee));
480
481 spin_unlock_irqrestore(&achan->vc.lock, flags);
482
483 vchan_dma_desc_free_list(&achan->vc, &head);
484
485 return 0;
486}
487
488static int adm_slave_config(struct dma_chan *chan, struct dma_slave_config *cfg)
489{
490 struct adm_chan *achan = to_adm_chan(chan);
491 unsigned long flag;
492
493 spin_lock_irqsave(&achan->vc.lock, flag);
494 memcpy(&achan->slave, cfg, sizeof(struct dma_slave_config));
495 spin_unlock_irqrestore(&achan->vc.lock, flag);
496
497 return 0;
498}
499
500
501
502
503
504static void adm_start_dma(struct adm_chan *achan)
505{
506 struct virt_dma_desc *vd = vchan_next_desc(&achan->vc);
507 struct adm_device *adev = achan->adev;
508 struct adm_async_desc *async_desc;
509
510 lockdep_assert_held(&achan->vc.lock);
511
512 if (!vd)
513 return;
514
515 list_del(&vd->node);
516
517
518 async_desc = container_of(vd, struct adm_async_desc, vd);
519 achan->curr_txd = async_desc;
520
521
522 achan->error = 0;
523
524 if (!achan->initialized) {
525
526 writel(ADM_CH_CONF_SHADOW_EN |
527 ADM_CH_CONF_PERM_MPU_CONF |
528 ADM_CH_CONF_MPU_DISABLE |
529 ADM_CH_CONF_SEC_DOMAIN(adev->ee),
530 adev->regs + ADM_CH_CONF(achan->id));
531
532 writel(ADM_CH_RSLT_CONF_IRQ_EN | ADM_CH_RSLT_CONF_FLUSH_EN,
533 adev->regs + ADM_CH_RSLT_CONF(achan->id, adev->ee));
534
535 achan->initialized = 1;
536 }
537
538
539 if (async_desc->crci) {
540 writel(async_desc->mux | async_desc->blk_size,
541 adev->regs + ADM_CRCI_CTL(async_desc->crci, adev->ee));
542 }
543
544
545 wmb();
546
547
548 writel(ALIGN(async_desc->dma_addr, ADM_DESC_ALIGN) >> 3,
549 adev->regs + ADM_CH_CMD_PTR(achan->id, adev->ee));
550}
551
552
553
554
555
556
557
558
559static irqreturn_t adm_dma_irq(int irq, void *data)
560{
561 struct adm_device *adev = data;
562 u32 srcs, i;
563 struct adm_async_desc *async_desc;
564 unsigned long flags;
565
566 srcs = readl_relaxed(adev->regs +
567 ADM_SEC_DOMAIN_IRQ_STATUS(adev->ee));
568
569 for (i = 0; i < ADM_MAX_CHANNELS; i++) {
570 struct adm_chan *achan = &adev->channels[i];
571 u32 status, result;
572
573 if (srcs & BIT(i)) {
574 status = readl_relaxed(adev->regs +
575 ADM_CH_STATUS_SD(i, adev->ee));
576
577
578 if (!(status & ADM_CH_STATUS_VALID))
579 continue;
580
581 result = readl_relaxed(adev->regs +
582 ADM_CH_RSLT(i, adev->ee));
583
584
585 if (!(result & ADM_CH_RSLT_VALID))
586 continue;
587
588
589 if (result & (ADM_CH_RSLT_ERR | ADM_CH_RSLT_FLUSH))
590 achan->error = 1;
591
592 spin_lock_irqsave(&achan->vc.lock, flags);
593 async_desc = achan->curr_txd;
594
595 achan->curr_txd = NULL;
596
597 if (async_desc) {
598 vchan_cookie_complete(&async_desc->vd);
599
600
601 adm_start_dma(achan);
602 }
603
604 spin_unlock_irqrestore(&achan->vc.lock, flags);
605 }
606 }
607
608 return IRQ_HANDLED;
609}
610
611
612
613
614
615
616
617
618
619static enum dma_status adm_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
620 struct dma_tx_state *txstate)
621{
622 struct adm_chan *achan = to_adm_chan(chan);
623 struct virt_dma_desc *vd;
624 enum dma_status ret;
625 unsigned long flags;
626 size_t residue = 0;
627
628 ret = dma_cookie_status(chan, cookie, txstate);
629 if (ret == DMA_COMPLETE || !txstate)
630 return ret;
631
632 spin_lock_irqsave(&achan->vc.lock, flags);
633
634 vd = vchan_find_desc(&achan->vc, cookie);
635 if (vd)
636 residue = container_of(vd, struct adm_async_desc, vd)->length;
637
638 spin_unlock_irqrestore(&achan->vc.lock, flags);
639
640
641
642
643
644
645 dma_set_residue(txstate, residue);
646
647 if (achan->error)
648 return DMA_ERROR;
649
650 return ret;
651}
652
653
654
655
656
657
658
659static void adm_issue_pending(struct dma_chan *chan)
660{
661 struct adm_chan *achan = to_adm_chan(chan);
662 unsigned long flags;
663
664 spin_lock_irqsave(&achan->vc.lock, flags);
665
666 if (vchan_issue_pending(&achan->vc) && !achan->curr_txd)
667 adm_start_dma(achan);
668 spin_unlock_irqrestore(&achan->vc.lock, flags);
669}
670
671
672
673
674
675
676static void adm_dma_free_desc(struct virt_dma_desc *vd)
677{
678 struct adm_async_desc *async_desc = container_of(vd,
679 struct adm_async_desc, vd);
680
681 dma_unmap_single(async_desc->adev->dev, async_desc->dma_addr,
682 async_desc->dma_len, DMA_TO_DEVICE);
683 kfree(async_desc->cpl);
684 kfree(async_desc);
685}
686
687static void adm_channel_init(struct adm_device *adev, struct adm_chan *achan,
688 u32 index)
689{
690 achan->id = index;
691 achan->adev = adev;
692
693 vchan_init(&achan->vc, &adev->common);
694 achan->vc.desc_free = adm_dma_free_desc;
695}
696
697static int adm_dma_probe(struct platform_device *pdev)
698{
699 struct adm_device *adev;
700 int ret;
701 u32 i;
702
703 adev = devm_kzalloc(&pdev->dev, sizeof(*adev), GFP_KERNEL);
704 if (!adev)
705 return -ENOMEM;
706
707 adev->dev = &pdev->dev;
708
709 adev->regs = devm_platform_ioremap_resource(pdev, 0);
710 if (IS_ERR(adev->regs))
711 return PTR_ERR(adev->regs);
712
713 adev->irq = platform_get_irq(pdev, 0);
714 if (adev->irq < 0)
715 return adev->irq;
716
717 ret = of_property_read_u32(pdev->dev.of_node, "qcom,ee", &adev->ee);
718 if (ret) {
719 dev_err(adev->dev, "Execution environment unspecified\n");
720 return ret;
721 }
722
723 adev->core_clk = devm_clk_get(adev->dev, "core");
724 if (IS_ERR(adev->core_clk))
725 return PTR_ERR(adev->core_clk);
726
727 adev->iface_clk = devm_clk_get(adev->dev, "iface");
728 if (IS_ERR(adev->iface_clk))
729 return PTR_ERR(adev->iface_clk);
730
731 adev->clk_reset = devm_reset_control_get_exclusive(&pdev->dev, "clk");
732 if (IS_ERR(adev->clk_reset)) {
733 dev_err(adev->dev, "failed to get ADM0 reset\n");
734 return PTR_ERR(adev->clk_reset);
735 }
736
737 adev->c0_reset = devm_reset_control_get_exclusive(&pdev->dev, "c0");
738 if (IS_ERR(adev->c0_reset)) {
739 dev_err(adev->dev, "failed to get ADM0 C0 reset\n");
740 return PTR_ERR(adev->c0_reset);
741 }
742
743 adev->c1_reset = devm_reset_control_get_exclusive(&pdev->dev, "c1");
744 if (IS_ERR(adev->c1_reset)) {
745 dev_err(adev->dev, "failed to get ADM0 C1 reset\n");
746 return PTR_ERR(adev->c1_reset);
747 }
748
749 adev->c2_reset = devm_reset_control_get_exclusive(&pdev->dev, "c2");
750 if (IS_ERR(adev->c2_reset)) {
751 dev_err(adev->dev, "failed to get ADM0 C2 reset\n");
752 return PTR_ERR(adev->c2_reset);
753 }
754
755 ret = clk_prepare_enable(adev->core_clk);
756 if (ret) {
757 dev_err(adev->dev, "failed to prepare/enable core clock\n");
758 return ret;
759 }
760
761 ret = clk_prepare_enable(adev->iface_clk);
762 if (ret) {
763 dev_err(adev->dev, "failed to prepare/enable iface clock\n");
764 goto err_disable_core_clk;
765 }
766
767 reset_control_assert(adev->clk_reset);
768 reset_control_assert(adev->c0_reset);
769 reset_control_assert(adev->c1_reset);
770 reset_control_assert(adev->c2_reset);
771
772 udelay(2);
773
774 reset_control_deassert(adev->clk_reset);
775 reset_control_deassert(adev->c0_reset);
776 reset_control_deassert(adev->c1_reset);
777 reset_control_deassert(adev->c2_reset);
778
779 adev->channels = devm_kcalloc(adev->dev, ADM_MAX_CHANNELS,
780 sizeof(*adev->channels), GFP_KERNEL);
781
782 if (!adev->channels) {
783 ret = -ENOMEM;
784 goto err_disable_clks;
785 }
786
787
788 INIT_LIST_HEAD(&adev->common.channels);
789
790 for (i = 0; i < ADM_MAX_CHANNELS; i++)
791 adm_channel_init(adev, &adev->channels[i], i);
792
793
794 for (i = 0; i < 16; i++)
795 writel(ADM_CRCI_CTL_RST, adev->regs +
796 ADM_CRCI_CTL(i, adev->ee));
797
798
799 writel(ADM_CI_RANGE_START(0x40) | ADM_CI_RANGE_END(0xb0) |
800 ADM_CI_BURST_8_WORDS, adev->regs + ADM_CI_CONF(0));
801 writel(ADM_CI_RANGE_START(0x2a) | ADM_CI_RANGE_END(0x2c) |
802 ADM_CI_BURST_8_WORDS, adev->regs + ADM_CI_CONF(1));
803 writel(ADM_CI_RANGE_START(0x12) | ADM_CI_RANGE_END(0x28) |
804 ADM_CI_BURST_8_WORDS, adev->regs + ADM_CI_CONF(2));
805 writel(ADM_GP_CTL_LP_EN | ADM_GP_CTL_LP_CNT(0xf),
806 adev->regs + ADM_GP_CTL);
807
808 ret = devm_request_irq(adev->dev, adev->irq, adm_dma_irq,
809 0, "adm_dma", adev);
810 if (ret)
811 goto err_disable_clks;
812
813 platform_set_drvdata(pdev, adev);
814
815 adev->common.dev = adev->dev;
816 adev->common.dev->dma_parms = &adev->dma_parms;
817
818
819 dma_cap_zero(adev->common.cap_mask);
820 dma_cap_set(DMA_SLAVE, adev->common.cap_mask);
821 dma_cap_set(DMA_PRIVATE, adev->common.cap_mask);
822
823
824 adev->common.directions = BIT(DMA_DEV_TO_MEM | DMA_MEM_TO_DEV);
825 adev->common.residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
826 adev->common.src_addr_widths = DMA_SLAVE_BUSWIDTH_4_BYTES;
827 adev->common.dst_addr_widths = DMA_SLAVE_BUSWIDTH_4_BYTES;
828 adev->common.device_free_chan_resources = adm_free_chan;
829 adev->common.device_prep_slave_sg = adm_prep_slave_sg;
830 adev->common.device_issue_pending = adm_issue_pending;
831 adev->common.device_tx_status = adm_tx_status;
832 adev->common.device_terminate_all = adm_terminate_all;
833 adev->common.device_config = adm_slave_config;
834
835 ret = dma_async_device_register(&adev->common);
836 if (ret) {
837 dev_err(adev->dev, "failed to register dma async device\n");
838 goto err_disable_clks;
839 }
840
841 ret = of_dma_controller_register(pdev->dev.of_node,
842 of_dma_xlate_by_chan_id,
843 &adev->common);
844 if (ret)
845 goto err_unregister_dma;
846
847 return 0;
848
849err_unregister_dma:
850 dma_async_device_unregister(&adev->common);
851err_disable_clks:
852 clk_disable_unprepare(adev->iface_clk);
853err_disable_core_clk:
854 clk_disable_unprepare(adev->core_clk);
855
856 return ret;
857}
858
859static int adm_dma_remove(struct platform_device *pdev)
860{
861 struct adm_device *adev = platform_get_drvdata(pdev);
862 struct adm_chan *achan;
863 u32 i;
864
865 of_dma_controller_free(pdev->dev.of_node);
866 dma_async_device_unregister(&adev->common);
867
868 for (i = 0; i < ADM_MAX_CHANNELS; i++) {
869 achan = &adev->channels[i];
870
871
872 writel(0, adev->regs + ADM_CH_RSLT_CONF(achan->id, adev->ee));
873
874 tasklet_kill(&adev->channels[i].vc.task);
875 adm_terminate_all(&adev->channels[i].vc.chan);
876 }
877
878 devm_free_irq(adev->dev, adev->irq, adev);
879
880 clk_disable_unprepare(adev->core_clk);
881 clk_disable_unprepare(adev->iface_clk);
882
883 return 0;
884}
885
886static const struct of_device_id adm_of_match[] = {
887 { .compatible = "qcom,adm", },
888 {}
889};
890MODULE_DEVICE_TABLE(of, adm_of_match);
891
892static struct platform_driver adm_dma_driver = {
893 .probe = adm_dma_probe,
894 .remove = adm_dma_remove,
895 .driver = {
896 .name = "adm-dma-engine",
897 .of_match_table = adm_of_match,
898 },
899};
900
901module_platform_driver(adm_dma_driver);
902
903MODULE_AUTHOR("Andy Gross <agross@codeaurora.org>");
904MODULE_DESCRIPTION("QCOM ADM DMA engine driver");
905MODULE_LICENSE("GPL v2");
906