1
2
3
4
5
6
7
8
9#include <linux/module.h>
10#include <linux/device.h>
11#include <linux/kernel.h>
12#include <linux/pm_runtime.h>
13#include <linux/dmaengine.h>
14#include <linux/err.h>
15#include <linux/interrupt.h>
16#include <linux/irq.h>
17#include <linux/dma/edma.h>
18#include <linux/dma-mapping.h>
19
20#include "dw-edma-core.h"
21#include "dw-edma-v0-core.h"
22#include "../dmaengine.h"
23#include "../virt-dma.h"
24
25static inline
26struct device *dchan2dev(struct dma_chan *dchan)
27{
28 return &dchan->dev->device;
29}
30
31static inline
32struct device *chan2dev(struct dw_edma_chan *chan)
33{
34 return &chan->vc.chan.dev->device;
35}
36
37static inline
38struct dw_edma_desc *vd2dw_edma_desc(struct virt_dma_desc *vd)
39{
40 return container_of(vd, struct dw_edma_desc, vd);
41}
42
43static struct dw_edma_burst *dw_edma_alloc_burst(struct dw_edma_chunk *chunk)
44{
45 struct dw_edma_burst *burst;
46
47 burst = kzalloc(sizeof(*burst), GFP_NOWAIT);
48 if (unlikely(!burst))
49 return NULL;
50
51 INIT_LIST_HEAD(&burst->list);
52 if (chunk->burst) {
53
54 chunk->bursts_alloc++;
55 list_add_tail(&burst->list, &chunk->burst->list);
56 } else {
57
58 chunk->bursts_alloc = 0;
59 chunk->burst = burst;
60 }
61
62 return burst;
63}
64
65static struct dw_edma_chunk *dw_edma_alloc_chunk(struct dw_edma_desc *desc)
66{
67 struct dw_edma_chan *chan = desc->chan;
68 struct dw_edma *dw = chan->chip->dw;
69 struct dw_edma_chunk *chunk;
70
71 chunk = kzalloc(sizeof(*chunk), GFP_NOWAIT);
72 if (unlikely(!chunk))
73 return NULL;
74
75 INIT_LIST_HEAD(&chunk->list);
76 chunk->chan = chan;
77
78
79
80
81
82
83 chunk->cb = !(desc->chunks_alloc % 2);
84 if (chan->dir == EDMA_DIR_WRITE) {
85 chunk->ll_region.paddr = dw->ll_region_wr[chan->id].paddr;
86 chunk->ll_region.vaddr = dw->ll_region_wr[chan->id].vaddr;
87 } else {
88 chunk->ll_region.paddr = dw->ll_region_rd[chan->id].paddr;
89 chunk->ll_region.vaddr = dw->ll_region_rd[chan->id].vaddr;
90 }
91
92 if (desc->chunk) {
93
94 if (!dw_edma_alloc_burst(chunk)) {
95 kfree(chunk);
96 return NULL;
97 }
98 desc->chunks_alloc++;
99 list_add_tail(&chunk->list, &desc->chunk->list);
100 } else {
101
102 chunk->burst = NULL;
103 desc->chunks_alloc = 0;
104 desc->chunk = chunk;
105 }
106
107 return chunk;
108}
109
110static struct dw_edma_desc *dw_edma_alloc_desc(struct dw_edma_chan *chan)
111{
112 struct dw_edma_desc *desc;
113
114 desc = kzalloc(sizeof(*desc), GFP_NOWAIT);
115 if (unlikely(!desc))
116 return NULL;
117
118 desc->chan = chan;
119 if (!dw_edma_alloc_chunk(desc)) {
120 kfree(desc);
121 return NULL;
122 }
123
124 return desc;
125}
126
127static void dw_edma_free_burst(struct dw_edma_chunk *chunk)
128{
129 struct dw_edma_burst *child, *_next;
130
131
132 list_for_each_entry_safe(child, _next, &chunk->burst->list, list) {
133 list_del(&child->list);
134 kfree(child);
135 chunk->bursts_alloc--;
136 }
137
138
139 kfree(child);
140 chunk->burst = NULL;
141}
142
143static void dw_edma_free_chunk(struct dw_edma_desc *desc)
144{
145 struct dw_edma_chunk *child, *_next;
146
147 if (!desc->chunk)
148 return;
149
150
151 list_for_each_entry_safe(child, _next, &desc->chunk->list, list) {
152 dw_edma_free_burst(child);
153 list_del(&child->list);
154 kfree(child);
155 desc->chunks_alloc--;
156 }
157
158
159 kfree(child);
160 desc->chunk = NULL;
161}
162
163static void dw_edma_free_desc(struct dw_edma_desc *desc)
164{
165 dw_edma_free_chunk(desc);
166 kfree(desc);
167}
168
169static void vchan_free_desc(struct virt_dma_desc *vdesc)
170{
171 dw_edma_free_desc(vd2dw_edma_desc(vdesc));
172}
173
174static void dw_edma_start_transfer(struct dw_edma_chan *chan)
175{
176 struct dw_edma_chunk *child;
177 struct dw_edma_desc *desc;
178 struct virt_dma_desc *vd;
179
180 vd = vchan_next_desc(&chan->vc);
181 if (!vd)
182 return;
183
184 desc = vd2dw_edma_desc(vd);
185 if (!desc)
186 return;
187
188 child = list_first_entry_or_null(&desc->chunk->list,
189 struct dw_edma_chunk, list);
190 if (!child)
191 return;
192
193 dw_edma_v0_core_start(child, !desc->xfer_sz);
194 desc->xfer_sz += child->ll_region.sz;
195 dw_edma_free_burst(child);
196 list_del(&child->list);
197 kfree(child);
198 desc->chunks_alloc--;
199}
200
201static int dw_edma_device_config(struct dma_chan *dchan,
202 struct dma_slave_config *config)
203{
204 struct dw_edma_chan *chan = dchan2dw_edma_chan(dchan);
205
206 memcpy(&chan->config, config, sizeof(*config));
207 chan->configured = true;
208
209 return 0;
210}
211
212static int dw_edma_device_pause(struct dma_chan *dchan)
213{
214 struct dw_edma_chan *chan = dchan2dw_edma_chan(dchan);
215 int err = 0;
216
217 if (!chan->configured)
218 err = -EPERM;
219 else if (chan->status != EDMA_ST_BUSY)
220 err = -EPERM;
221 else if (chan->request != EDMA_REQ_NONE)
222 err = -EPERM;
223 else
224 chan->request = EDMA_REQ_PAUSE;
225
226 return err;
227}
228
229static int dw_edma_device_resume(struct dma_chan *dchan)
230{
231 struct dw_edma_chan *chan = dchan2dw_edma_chan(dchan);
232 int err = 0;
233
234 if (!chan->configured) {
235 err = -EPERM;
236 } else if (chan->status != EDMA_ST_PAUSE) {
237 err = -EPERM;
238 } else if (chan->request != EDMA_REQ_NONE) {
239 err = -EPERM;
240 } else {
241 chan->status = EDMA_ST_BUSY;
242 dw_edma_start_transfer(chan);
243 }
244
245 return err;
246}
247
248static int dw_edma_device_terminate_all(struct dma_chan *dchan)
249{
250 struct dw_edma_chan *chan = dchan2dw_edma_chan(dchan);
251 int err = 0;
252 LIST_HEAD(head);
253
254 if (!chan->configured) {
255
256 } else if (chan->status == EDMA_ST_PAUSE) {
257 chan->status = EDMA_ST_IDLE;
258 chan->configured = false;
259 } else if (chan->status == EDMA_ST_IDLE) {
260 chan->configured = false;
261 } else if (dw_edma_v0_core_ch_status(chan) == DMA_COMPLETE) {
262
263
264
265
266 chan->status = EDMA_ST_IDLE;
267 chan->configured = false;
268 } else if (chan->request > EDMA_REQ_PAUSE) {
269 err = -EPERM;
270 } else {
271 chan->request = EDMA_REQ_STOP;
272 }
273
274 return err;
275}
276
277static void dw_edma_device_issue_pending(struct dma_chan *dchan)
278{
279 struct dw_edma_chan *chan = dchan2dw_edma_chan(dchan);
280 unsigned long flags;
281
282 spin_lock_irqsave(&chan->vc.lock, flags);
283 if (chan->configured && chan->request == EDMA_REQ_NONE &&
284 chan->status == EDMA_ST_IDLE && vchan_issue_pending(&chan->vc)) {
285 chan->status = EDMA_ST_BUSY;
286 dw_edma_start_transfer(chan);
287 }
288 spin_unlock_irqrestore(&chan->vc.lock, flags);
289}
290
291static enum dma_status
292dw_edma_device_tx_status(struct dma_chan *dchan, dma_cookie_t cookie,
293 struct dma_tx_state *txstate)
294{
295 struct dw_edma_chan *chan = dchan2dw_edma_chan(dchan);
296 struct dw_edma_desc *desc;
297 struct virt_dma_desc *vd;
298 unsigned long flags;
299 enum dma_status ret;
300 u32 residue = 0;
301
302 ret = dma_cookie_status(dchan, cookie, txstate);
303 if (ret == DMA_COMPLETE)
304 return ret;
305
306 if (ret == DMA_IN_PROGRESS && chan->status == EDMA_ST_PAUSE)
307 ret = DMA_PAUSED;
308
309 if (!txstate)
310 goto ret_residue;
311
312 spin_lock_irqsave(&chan->vc.lock, flags);
313 vd = vchan_find_desc(&chan->vc, cookie);
314 if (vd) {
315 desc = vd2dw_edma_desc(vd);
316 if (desc)
317 residue = desc->alloc_sz - desc->xfer_sz;
318 }
319 spin_unlock_irqrestore(&chan->vc.lock, flags);
320
321ret_residue:
322 dma_set_residue(txstate, residue);
323
324 return ret;
325}
326
327static struct dma_async_tx_descriptor *
328dw_edma_device_transfer(struct dw_edma_transfer *xfer)
329{
330 struct dw_edma_chan *chan = dchan2dw_edma_chan(xfer->dchan);
331 enum dma_transfer_direction dir = xfer->direction;
332 phys_addr_t src_addr, dst_addr;
333 struct scatterlist *sg = NULL;
334 struct dw_edma_chunk *chunk;
335 struct dw_edma_burst *burst;
336 struct dw_edma_desc *desc;
337 u32 cnt = 0;
338 int i;
339
340 if (!chan->configured)
341 return NULL;
342
343 switch (chan->config.direction) {
344 case DMA_DEV_TO_MEM:
345 if (dir == DMA_DEV_TO_MEM && chan->dir == EDMA_DIR_READ)
346 break;
347 return NULL;
348 case DMA_MEM_TO_DEV:
349 if (dir == DMA_MEM_TO_DEV && chan->dir == EDMA_DIR_WRITE)
350 break;
351 return NULL;
352 default:
353 if (dir == DMA_MEM_TO_DEV && chan->dir == EDMA_DIR_READ)
354 break;
355 if (dir == DMA_DEV_TO_MEM && chan->dir == EDMA_DIR_WRITE)
356 break;
357 return NULL;
358 }
359
360 if (xfer->type == EDMA_XFER_CYCLIC) {
361 if (!xfer->xfer.cyclic.len || !xfer->xfer.cyclic.cnt)
362 return NULL;
363 } else if (xfer->type == EDMA_XFER_SCATTER_GATHER) {
364 if (xfer->xfer.sg.len < 1)
365 return NULL;
366 } else if (xfer->type == EDMA_XFER_INTERLEAVED) {
367 if (!xfer->xfer.il->numf)
368 return NULL;
369 if (xfer->xfer.il->numf > 0 && xfer->xfer.il->frame_size > 0)
370 return NULL;
371 } else {
372 return NULL;
373 }
374
375 desc = dw_edma_alloc_desc(chan);
376 if (unlikely(!desc))
377 goto err_alloc;
378
379 chunk = dw_edma_alloc_chunk(desc);
380 if (unlikely(!chunk))
381 goto err_alloc;
382
383 if (xfer->type == EDMA_XFER_INTERLEAVED) {
384 src_addr = xfer->xfer.il->src_start;
385 dst_addr = xfer->xfer.il->dst_start;
386 } else {
387 src_addr = chan->config.src_addr;
388 dst_addr = chan->config.dst_addr;
389 }
390
391 if (xfer->type == EDMA_XFER_CYCLIC) {
392 cnt = xfer->xfer.cyclic.cnt;
393 } else if (xfer->type == EDMA_XFER_SCATTER_GATHER) {
394 cnt = xfer->xfer.sg.len;
395 sg = xfer->xfer.sg.sgl;
396 } else if (xfer->type == EDMA_XFER_INTERLEAVED) {
397 if (xfer->xfer.il->numf > 0)
398 cnt = xfer->xfer.il->numf;
399 else
400 cnt = xfer->xfer.il->frame_size;
401 }
402
403 for (i = 0; i < cnt; i++) {
404 if (xfer->type == EDMA_XFER_SCATTER_GATHER && !sg)
405 break;
406
407 if (chunk->bursts_alloc == chan->ll_max) {
408 chunk = dw_edma_alloc_chunk(desc);
409 if (unlikely(!chunk))
410 goto err_alloc;
411 }
412
413 burst = dw_edma_alloc_burst(chunk);
414 if (unlikely(!burst))
415 goto err_alloc;
416
417 if (xfer->type == EDMA_XFER_CYCLIC)
418 burst->sz = xfer->xfer.cyclic.len;
419 else if (xfer->type == EDMA_XFER_SCATTER_GATHER)
420 burst->sz = sg_dma_len(sg);
421 else if (xfer->type == EDMA_XFER_INTERLEAVED)
422 burst->sz = xfer->xfer.il->sgl[i].size;
423
424 chunk->ll_region.sz += burst->sz;
425 desc->alloc_sz += burst->sz;
426
427 if (chan->dir == EDMA_DIR_WRITE) {
428 burst->sar = src_addr;
429 if (xfer->type == EDMA_XFER_CYCLIC) {
430 burst->dar = xfer->xfer.cyclic.paddr;
431 } else if (xfer->type == EDMA_XFER_SCATTER_GATHER) {
432 src_addr += sg_dma_len(sg);
433 burst->dar = sg_dma_address(sg);
434
435
436
437
438
439
440
441 }
442 } else {
443 burst->dar = dst_addr;
444 if (xfer->type == EDMA_XFER_CYCLIC) {
445 burst->sar = xfer->xfer.cyclic.paddr;
446 } else if (xfer->type == EDMA_XFER_SCATTER_GATHER) {
447 dst_addr += sg_dma_len(sg);
448 burst->sar = sg_dma_address(sg);
449
450
451
452
453
454
455
456 }
457 }
458
459 if (xfer->type == EDMA_XFER_SCATTER_GATHER) {
460 sg = sg_next(sg);
461 } else if (xfer->type == EDMA_XFER_INTERLEAVED &&
462 xfer->xfer.il->frame_size > 0) {
463 struct dma_interleaved_template *il = xfer->xfer.il;
464 struct data_chunk *dc = &il->sgl[i];
465
466 if (il->src_sgl) {
467 src_addr += burst->sz;
468 src_addr += dmaengine_get_src_icg(il, dc);
469 }
470
471 if (il->dst_sgl) {
472 dst_addr += burst->sz;
473 dst_addr += dmaengine_get_dst_icg(il, dc);
474 }
475 }
476 }
477
478 return vchan_tx_prep(&chan->vc, &desc->vd, xfer->flags);
479
480err_alloc:
481 if (desc)
482 dw_edma_free_desc(desc);
483
484 return NULL;
485}
486
487static struct dma_async_tx_descriptor *
488dw_edma_device_prep_slave_sg(struct dma_chan *dchan, struct scatterlist *sgl,
489 unsigned int len,
490 enum dma_transfer_direction direction,
491 unsigned long flags, void *context)
492{
493 struct dw_edma_transfer xfer;
494
495 xfer.dchan = dchan;
496 xfer.direction = direction;
497 xfer.xfer.sg.sgl = sgl;
498 xfer.xfer.sg.len = len;
499 xfer.flags = flags;
500 xfer.type = EDMA_XFER_SCATTER_GATHER;
501
502 return dw_edma_device_transfer(&xfer);
503}
504
505static struct dma_async_tx_descriptor *
506dw_edma_device_prep_dma_cyclic(struct dma_chan *dchan, dma_addr_t paddr,
507 size_t len, size_t count,
508 enum dma_transfer_direction direction,
509 unsigned long flags)
510{
511 struct dw_edma_transfer xfer;
512
513 xfer.dchan = dchan;
514 xfer.direction = direction;
515 xfer.xfer.cyclic.paddr = paddr;
516 xfer.xfer.cyclic.len = len;
517 xfer.xfer.cyclic.cnt = count;
518 xfer.flags = flags;
519 xfer.type = EDMA_XFER_CYCLIC;
520
521 return dw_edma_device_transfer(&xfer);
522}
523
524static struct dma_async_tx_descriptor *
525dw_edma_device_prep_interleaved_dma(struct dma_chan *dchan,
526 struct dma_interleaved_template *ilt,
527 unsigned long flags)
528{
529 struct dw_edma_transfer xfer;
530
531 xfer.dchan = dchan;
532 xfer.direction = ilt->dir;
533 xfer.xfer.il = ilt;
534 xfer.flags = flags;
535 xfer.type = EDMA_XFER_INTERLEAVED;
536
537 return dw_edma_device_transfer(&xfer);
538}
539
540static void dw_edma_done_interrupt(struct dw_edma_chan *chan)
541{
542 struct dw_edma_desc *desc;
543 struct virt_dma_desc *vd;
544 unsigned long flags;
545
546 dw_edma_v0_core_clear_done_int(chan);
547
548 spin_lock_irqsave(&chan->vc.lock, flags);
549 vd = vchan_next_desc(&chan->vc);
550 if (vd) {
551 switch (chan->request) {
552 case EDMA_REQ_NONE:
553 desc = vd2dw_edma_desc(vd);
554 if (desc->chunks_alloc) {
555 chan->status = EDMA_ST_BUSY;
556 dw_edma_start_transfer(chan);
557 } else {
558 list_del(&vd->node);
559 vchan_cookie_complete(vd);
560 chan->status = EDMA_ST_IDLE;
561 }
562 break;
563
564 case EDMA_REQ_STOP:
565 list_del(&vd->node);
566 vchan_cookie_complete(vd);
567 chan->request = EDMA_REQ_NONE;
568 chan->status = EDMA_ST_IDLE;
569 break;
570
571 case EDMA_REQ_PAUSE:
572 chan->request = EDMA_REQ_NONE;
573 chan->status = EDMA_ST_PAUSE;
574 break;
575
576 default:
577 break;
578 }
579 }
580 spin_unlock_irqrestore(&chan->vc.lock, flags);
581}
582
583static void dw_edma_abort_interrupt(struct dw_edma_chan *chan)
584{
585 struct virt_dma_desc *vd;
586 unsigned long flags;
587
588 dw_edma_v0_core_clear_abort_int(chan);
589
590 spin_lock_irqsave(&chan->vc.lock, flags);
591 vd = vchan_next_desc(&chan->vc);
592 if (vd) {
593 list_del(&vd->node);
594 vchan_cookie_complete(vd);
595 }
596 spin_unlock_irqrestore(&chan->vc.lock, flags);
597 chan->request = EDMA_REQ_NONE;
598 chan->status = EDMA_ST_IDLE;
599}
600
601static irqreturn_t dw_edma_interrupt(int irq, void *data, bool write)
602{
603 struct dw_edma_irq *dw_irq = data;
604 struct dw_edma *dw = dw_irq->dw;
605 unsigned long total, pos, val;
606 unsigned long off;
607 u32 mask;
608
609 if (write) {
610 total = dw->wr_ch_cnt;
611 off = 0;
612 mask = dw_irq->wr_mask;
613 } else {
614 total = dw->rd_ch_cnt;
615 off = dw->wr_ch_cnt;
616 mask = dw_irq->rd_mask;
617 }
618
619 val = dw_edma_v0_core_status_done_int(dw, write ?
620 EDMA_DIR_WRITE :
621 EDMA_DIR_READ);
622 val &= mask;
623 for_each_set_bit(pos, &val, total) {
624 struct dw_edma_chan *chan = &dw->chan[pos + off];
625
626 dw_edma_done_interrupt(chan);
627 }
628
629 val = dw_edma_v0_core_status_abort_int(dw, write ?
630 EDMA_DIR_WRITE :
631 EDMA_DIR_READ);
632 val &= mask;
633 for_each_set_bit(pos, &val, total) {
634 struct dw_edma_chan *chan = &dw->chan[pos + off];
635
636 dw_edma_abort_interrupt(chan);
637 }
638
639 return IRQ_HANDLED;
640}
641
642static inline irqreturn_t dw_edma_interrupt_write(int irq, void *data)
643{
644 return dw_edma_interrupt(irq, data, true);
645}
646
647static inline irqreturn_t dw_edma_interrupt_read(int irq, void *data)
648{
649 return dw_edma_interrupt(irq, data, false);
650}
651
652static irqreturn_t dw_edma_interrupt_common(int irq, void *data)
653{
654 dw_edma_interrupt(irq, data, true);
655 dw_edma_interrupt(irq, data, false);
656
657 return IRQ_HANDLED;
658}
659
660static int dw_edma_alloc_chan_resources(struct dma_chan *dchan)
661{
662 struct dw_edma_chan *chan = dchan2dw_edma_chan(dchan);
663
664 if (chan->status != EDMA_ST_IDLE)
665 return -EBUSY;
666
667 pm_runtime_get(chan->chip->dev);
668
669 return 0;
670}
671
672static void dw_edma_free_chan_resources(struct dma_chan *dchan)
673{
674 unsigned long timeout = jiffies + msecs_to_jiffies(5000);
675 struct dw_edma_chan *chan = dchan2dw_edma_chan(dchan);
676 int ret;
677
678 while (time_before(jiffies, timeout)) {
679 ret = dw_edma_device_terminate_all(dchan);
680 if (!ret)
681 break;
682
683 if (time_after_eq(jiffies, timeout))
684 return;
685
686 cpu_relax();
687 }
688
689 pm_runtime_put(chan->chip->dev);
690}
691
692static int dw_edma_channel_setup(struct dw_edma_chip *chip, bool write,
693 u32 wr_alloc, u32 rd_alloc)
694{
695 struct dw_edma_region *dt_region;
696 struct device *dev = chip->dev;
697 struct dw_edma *dw = chip->dw;
698 struct dw_edma_chan *chan;
699 struct dw_edma_irq *irq;
700 struct dma_device *dma;
701 u32 alloc, off_alloc;
702 u32 i, j, cnt;
703 int err = 0;
704 u32 pos;
705
706 if (write) {
707 i = 0;
708 cnt = dw->wr_ch_cnt;
709 dma = &dw->wr_edma;
710 alloc = wr_alloc;
711 off_alloc = 0;
712 } else {
713 i = dw->wr_ch_cnt;
714 cnt = dw->rd_ch_cnt;
715 dma = &dw->rd_edma;
716 alloc = rd_alloc;
717 off_alloc = wr_alloc;
718 }
719
720 INIT_LIST_HEAD(&dma->channels);
721 for (j = 0; (alloc || dw->nr_irqs == 1) && j < cnt; j++, i++) {
722 chan = &dw->chan[i];
723
724 dt_region = devm_kzalloc(dev, sizeof(*dt_region), GFP_KERNEL);
725 if (!dt_region)
726 return -ENOMEM;
727
728 chan->vc.chan.private = dt_region;
729
730 chan->chip = chip;
731 chan->id = j;
732 chan->dir = write ? EDMA_DIR_WRITE : EDMA_DIR_READ;
733 chan->configured = false;
734 chan->request = EDMA_REQ_NONE;
735 chan->status = EDMA_ST_IDLE;
736
737 if (write)
738 chan->ll_max = (dw->ll_region_wr[j].sz / EDMA_LL_SZ);
739 else
740 chan->ll_max = (dw->ll_region_rd[j].sz / EDMA_LL_SZ);
741 chan->ll_max -= 1;
742
743 dev_vdbg(dev, "L. List:\tChannel %s[%u] max_cnt=%u\n",
744 write ? "write" : "read", j, chan->ll_max);
745
746 if (dw->nr_irqs == 1)
747 pos = 0;
748 else
749 pos = off_alloc + (j % alloc);
750
751 irq = &dw->irq[pos];
752
753 if (write)
754 irq->wr_mask |= BIT(j);
755 else
756 irq->rd_mask |= BIT(j);
757
758 irq->dw = dw;
759 memcpy(&chan->msi, &irq->msi, sizeof(chan->msi));
760
761 dev_vdbg(dev, "MSI:\t\tChannel %s[%u] addr=0x%.8x%.8x, data=0x%.8x\n",
762 write ? "write" : "read", j,
763 chan->msi.address_hi, chan->msi.address_lo,
764 chan->msi.data);
765
766 chan->vc.desc_free = vchan_free_desc;
767 vchan_init(&chan->vc, dma);
768
769 if (write) {
770 dt_region->paddr = dw->dt_region_wr[j].paddr;
771 dt_region->vaddr = dw->dt_region_wr[j].vaddr;
772 dt_region->sz = dw->dt_region_wr[j].sz;
773 } else {
774 dt_region->paddr = dw->dt_region_rd[j].paddr;
775 dt_region->vaddr = dw->dt_region_rd[j].vaddr;
776 dt_region->sz = dw->dt_region_rd[j].sz;
777 }
778
779 dw_edma_v0_core_device_config(chan);
780 }
781
782
783 dma_cap_zero(dma->cap_mask);
784 dma_cap_set(DMA_SLAVE, dma->cap_mask);
785 dma_cap_set(DMA_CYCLIC, dma->cap_mask);
786 dma_cap_set(DMA_PRIVATE, dma->cap_mask);
787 dma_cap_set(DMA_INTERLEAVE, dma->cap_mask);
788 dma->directions = BIT(write ? DMA_DEV_TO_MEM : DMA_MEM_TO_DEV);
789 dma->src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
790 dma->dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
791 dma->residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
792 dma->chancnt = cnt;
793
794
795 dma->dev = chip->dev;
796 dma->device_alloc_chan_resources = dw_edma_alloc_chan_resources;
797 dma->device_free_chan_resources = dw_edma_free_chan_resources;
798 dma->device_config = dw_edma_device_config;
799 dma->device_pause = dw_edma_device_pause;
800 dma->device_resume = dw_edma_device_resume;
801 dma->device_terminate_all = dw_edma_device_terminate_all;
802 dma->device_issue_pending = dw_edma_device_issue_pending;
803 dma->device_tx_status = dw_edma_device_tx_status;
804 dma->device_prep_slave_sg = dw_edma_device_prep_slave_sg;
805 dma->device_prep_dma_cyclic = dw_edma_device_prep_dma_cyclic;
806 dma->device_prep_interleaved_dma = dw_edma_device_prep_interleaved_dma;
807
808 dma_set_max_seg_size(dma->dev, U32_MAX);
809
810
811 err = dma_async_device_register(dma);
812
813 return err;
814}
815
816static inline void dw_edma_dec_irq_alloc(int *nr_irqs, u32 *alloc, u16 cnt)
817{
818 if (*nr_irqs && *alloc < cnt) {
819 (*alloc)++;
820 (*nr_irqs)--;
821 }
822}
823
824static inline void dw_edma_add_irq_mask(u32 *mask, u32 alloc, u16 cnt)
825{
826 while (*mask * alloc < cnt)
827 (*mask)++;
828}
829
830static int dw_edma_irq_request(struct dw_edma_chip *chip,
831 u32 *wr_alloc, u32 *rd_alloc)
832{
833 struct device *dev = chip->dev;
834 struct dw_edma *dw = chip->dw;
835 u32 wr_mask = 1;
836 u32 rd_mask = 1;
837 int i, err = 0;
838 u32 ch_cnt;
839 int irq;
840
841 ch_cnt = dw->wr_ch_cnt + dw->rd_ch_cnt;
842
843 if (dw->nr_irqs < 1)
844 return -EINVAL;
845
846 if (dw->nr_irqs == 1) {
847
848 irq = dw->ops->irq_vector(dev, 0);
849 err = request_irq(irq, dw_edma_interrupt_common,
850 IRQF_SHARED, dw->name, &dw->irq[0]);
851 if (err) {
852 dw->nr_irqs = 0;
853 return err;
854 }
855
856 if (irq_get_msi_desc(irq))
857 get_cached_msi_msg(irq, &dw->irq[0].msi);
858 } else {
859
860 int tmp = dw->nr_irqs;
861
862 while (tmp && (*wr_alloc + *rd_alloc) < ch_cnt) {
863 dw_edma_dec_irq_alloc(&tmp, wr_alloc, dw->wr_ch_cnt);
864 dw_edma_dec_irq_alloc(&tmp, rd_alloc, dw->rd_ch_cnt);
865 }
866
867 dw_edma_add_irq_mask(&wr_mask, *wr_alloc, dw->wr_ch_cnt);
868 dw_edma_add_irq_mask(&rd_mask, *rd_alloc, dw->rd_ch_cnt);
869
870 for (i = 0; i < (*wr_alloc + *rd_alloc); i++) {
871 irq = dw->ops->irq_vector(dev, i);
872 err = request_irq(irq,
873 i < *wr_alloc ?
874 dw_edma_interrupt_write :
875 dw_edma_interrupt_read,
876 IRQF_SHARED, dw->name,
877 &dw->irq[i]);
878 if (err) {
879 dw->nr_irqs = i;
880 return err;
881 }
882
883 if (irq_get_msi_desc(irq))
884 get_cached_msi_msg(irq, &dw->irq[i].msi);
885 }
886
887 dw->nr_irqs = i;
888 }
889
890 return err;
891}
892
893int dw_edma_probe(struct dw_edma_chip *chip)
894{
895 struct device *dev;
896 struct dw_edma *dw;
897 u32 wr_alloc = 0;
898 u32 rd_alloc = 0;
899 int i, err;
900
901 if (!chip)
902 return -EINVAL;
903
904 dev = chip->dev;
905 if (!dev)
906 return -EINVAL;
907
908 dw = chip->dw;
909 if (!dw || !dw->irq || !dw->ops || !dw->ops->irq_vector)
910 return -EINVAL;
911
912 raw_spin_lock_init(&dw->lock);
913
914 dw->wr_ch_cnt = min_t(u16, dw->wr_ch_cnt,
915 dw_edma_v0_core_ch_count(dw, EDMA_DIR_WRITE));
916 dw->wr_ch_cnt = min_t(u16, dw->wr_ch_cnt, EDMA_MAX_WR_CH);
917
918 dw->rd_ch_cnt = min_t(u16, dw->rd_ch_cnt,
919 dw_edma_v0_core_ch_count(dw, EDMA_DIR_READ));
920 dw->rd_ch_cnt = min_t(u16, dw->rd_ch_cnt, EDMA_MAX_RD_CH);
921
922 if (!dw->wr_ch_cnt && !dw->rd_ch_cnt)
923 return -EINVAL;
924
925 dev_vdbg(dev, "Channels:\twrite=%d, read=%d\n",
926 dw->wr_ch_cnt, dw->rd_ch_cnt);
927
928
929 dw->chan = devm_kcalloc(dev, dw->wr_ch_cnt + dw->rd_ch_cnt,
930 sizeof(*dw->chan), GFP_KERNEL);
931 if (!dw->chan)
932 return -ENOMEM;
933
934 snprintf(dw->name, sizeof(dw->name), "dw-edma-core:%d", chip->id);
935
936
937 dw_edma_v0_core_off(dw);
938
939
940 err = dw_edma_irq_request(chip, &wr_alloc, &rd_alloc);
941 if (err)
942 return err;
943
944
945 err = dw_edma_channel_setup(chip, true, wr_alloc, rd_alloc);
946 if (err)
947 goto err_irq_free;
948
949
950 err = dw_edma_channel_setup(chip, false, wr_alloc, rd_alloc);
951 if (err)
952 goto err_irq_free;
953
954
955 pm_runtime_enable(dev);
956
957
958 dw_edma_v0_core_debugfs_on(chip);
959
960 return 0;
961
962err_irq_free:
963 for (i = (dw->nr_irqs - 1); i >= 0; i--)
964 free_irq(dw->ops->irq_vector(dev, i), &dw->irq[i]);
965
966 dw->nr_irqs = 0;
967
968 return err;
969}
970EXPORT_SYMBOL_GPL(dw_edma_probe);
971
972int dw_edma_remove(struct dw_edma_chip *chip)
973{
974 struct dw_edma_chan *chan, *_chan;
975 struct device *dev = chip->dev;
976 struct dw_edma *dw = chip->dw;
977 int i;
978
979
980 dw_edma_v0_core_off(dw);
981
982
983 for (i = (dw->nr_irqs - 1); i >= 0; i--)
984 free_irq(dw->ops->irq_vector(dev, i), &dw->irq[i]);
985
986
987 pm_runtime_disable(dev);
988
989
990 dma_async_device_unregister(&dw->wr_edma);
991 list_for_each_entry_safe(chan, _chan, &dw->wr_edma.channels,
992 vc.chan.device_node) {
993 tasklet_kill(&chan->vc.task);
994 list_del(&chan->vc.chan.device_node);
995 }
996
997 dma_async_device_unregister(&dw->rd_edma);
998 list_for_each_entry_safe(chan, _chan, &dw->rd_edma.channels,
999 vc.chan.device_node) {
1000 tasklet_kill(&chan->vc.task);
1001 list_del(&chan->vc.chan.device_node);
1002 }
1003
1004
1005 dw_edma_v0_core_debugfs_off(chip);
1006
1007 return 0;
1008}
1009EXPORT_SYMBOL_GPL(dw_edma_remove);
1010
1011MODULE_LICENSE("GPL v2");
1012MODULE_DESCRIPTION("Synopsys DesignWare eDMA controller core driver");
1013MODULE_AUTHOR("Gustavo Pimentel <gustavo.pimentel@synopsys.com>");
1014