1
2
3
4
5
6
7
8
9
10
11
12
13#include <linux/dma-mapping.h>
14#include <linux/dmaengine.h>
15#include <linux/interrupt.h>
16#include <linux/list.h>
17#include <linux/module.h>
18#include <linux/mutex.h>
19#include <linux/of.h>
20#include <linux/of_dma.h>
21#include <linux/of_platform.h>
22#include <linux/platform_device.h>
23#include <linux/pm_runtime.h>
24#include <linux/slab.h>
25#include <linux/spinlock.h>
26
27#include "../dmaengine.h"
28
29
30
31
32
33
34
35
36struct rcar_dmac_xfer_chunk {
37 struct list_head node;
38
39 dma_addr_t src_addr;
40 dma_addr_t dst_addr;
41 u32 size;
42};
43
44
45
46
47
48
49
50struct rcar_dmac_hw_desc {
51 u32 sar;
52 u32 dar;
53 u32 tcr;
54 u32 reserved;
55} __attribute__((__packed__));
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74struct rcar_dmac_desc {
75 struct dma_async_tx_descriptor async_tx;
76 enum dma_transfer_direction direction;
77 unsigned int xfer_shift;
78 u32 chcr;
79
80 struct list_head node;
81 struct list_head chunks;
82 struct rcar_dmac_xfer_chunk *running;
83 unsigned int nchunks;
84
85 struct {
86 bool use;
87 struct rcar_dmac_hw_desc *mem;
88 dma_addr_t dma;
89 size_t size;
90 } hwdescs;
91
92 unsigned int size;
93 bool cyclic;
94};
95
96#define to_rcar_dmac_desc(d) container_of(d, struct rcar_dmac_desc, async_tx)
97
98
99
100
101
102
103
104struct rcar_dmac_desc_page {
105 struct list_head node;
106
107 union {
108 struct rcar_dmac_desc descs[0];
109 struct rcar_dmac_xfer_chunk chunks[0];
110 };
111};
112
113#define RCAR_DMAC_DESCS_PER_PAGE \
114 ((PAGE_SIZE - offsetof(struct rcar_dmac_desc_page, descs)) / \
115 sizeof(struct rcar_dmac_desc))
116#define RCAR_DMAC_XFER_CHUNKS_PER_PAGE \
117 ((PAGE_SIZE - offsetof(struct rcar_dmac_desc_page, chunks)) / \
118 sizeof(struct rcar_dmac_xfer_chunk))
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140struct rcar_dmac_chan {
141 struct dma_chan chan;
142 void __iomem *iomem;
143 unsigned int index;
144
145 unsigned int src_xfer_size;
146 unsigned int dst_xfer_size;
147 dma_addr_t src_slave_addr;
148 dma_addr_t dst_slave_addr;
149 int mid_rid;
150
151 spinlock_t lock;
152
153 struct {
154 struct list_head free;
155 struct list_head pending;
156 struct list_head active;
157 struct list_head done;
158 struct list_head wait;
159 struct rcar_dmac_desc *running;
160
161 struct list_head chunks_free;
162
163 struct list_head pages;
164 } desc;
165};
166
167#define to_rcar_dmac_chan(c) container_of(c, struct rcar_dmac_chan, chan)
168
169
170
171
172
173
174
175
176
177
178struct rcar_dmac {
179 struct dma_device engine;
180 struct device *dev;
181 void __iomem *iomem;
182
183 unsigned int n_channels;
184 struct rcar_dmac_chan *channels;
185
186 unsigned long modules[256 / BITS_PER_LONG];
187};
188
189#define to_rcar_dmac(d) container_of(d, struct rcar_dmac, engine)
190
191
192
193
194
195#define RCAR_DMAC_CHAN_OFFSET(i) (0x8000 + 0x80 * (i))
196
197#define RCAR_DMAISTA 0x0020
198#define RCAR_DMASEC 0x0030
199#define RCAR_DMAOR 0x0060
200#define RCAR_DMAOR_PRI_FIXED (0 << 8)
201#define RCAR_DMAOR_PRI_ROUND_ROBIN (3 << 8)
202#define RCAR_DMAOR_AE (1 << 2)
203#define RCAR_DMAOR_DME (1 << 0)
204#define RCAR_DMACHCLR 0x0080
205#define RCAR_DMADPSEC 0x00a0
206
207#define RCAR_DMASAR 0x0000
208#define RCAR_DMADAR 0x0004
209#define RCAR_DMATCR 0x0008
210#define RCAR_DMATCR_MASK 0x00ffffff
211#define RCAR_DMATSR 0x0028
212#define RCAR_DMACHCR 0x000c
213#define RCAR_DMACHCR_CAE (1 << 31)
214#define RCAR_DMACHCR_CAIE (1 << 30)
215#define RCAR_DMACHCR_DPM_DISABLED (0 << 28)
216#define RCAR_DMACHCR_DPM_ENABLED (1 << 28)
217#define RCAR_DMACHCR_DPM_REPEAT (2 << 28)
218#define RCAR_DMACHCR_DPM_INFINITE (3 << 28)
219#define RCAR_DMACHCR_RPT_SAR (1 << 27)
220#define RCAR_DMACHCR_RPT_DAR (1 << 26)
221#define RCAR_DMACHCR_RPT_TCR (1 << 25)
222#define RCAR_DMACHCR_DPB (1 << 22)
223#define RCAR_DMACHCR_DSE (1 << 19)
224#define RCAR_DMACHCR_DSIE (1 << 18)
225#define RCAR_DMACHCR_TS_1B ((0 << 20) | (0 << 3))
226#define RCAR_DMACHCR_TS_2B ((0 << 20) | (1 << 3))
227#define RCAR_DMACHCR_TS_4B ((0 << 20) | (2 << 3))
228#define RCAR_DMACHCR_TS_16B ((0 << 20) | (3 << 3))
229#define RCAR_DMACHCR_TS_32B ((1 << 20) | (0 << 3))
230#define RCAR_DMACHCR_TS_64B ((1 << 20) | (1 << 3))
231#define RCAR_DMACHCR_TS_8B ((1 << 20) | (3 << 3))
232#define RCAR_DMACHCR_DM_FIXED (0 << 14)
233#define RCAR_DMACHCR_DM_INC (1 << 14)
234#define RCAR_DMACHCR_DM_DEC (2 << 14)
235#define RCAR_DMACHCR_SM_FIXED (0 << 12)
236#define RCAR_DMACHCR_SM_INC (1 << 12)
237#define RCAR_DMACHCR_SM_DEC (2 << 12)
238#define RCAR_DMACHCR_RS_AUTO (4 << 8)
239#define RCAR_DMACHCR_RS_DMARS (8 << 8)
240#define RCAR_DMACHCR_IE (1 << 2)
241#define RCAR_DMACHCR_TE (1 << 1)
242#define RCAR_DMACHCR_DE (1 << 0)
243#define RCAR_DMATCRB 0x0018
244#define RCAR_DMATSRB 0x0038
245#define RCAR_DMACHCRB 0x001c
246#define RCAR_DMACHCRB_DCNT(n) ((n) << 24)
247#define RCAR_DMACHCRB_DPTR_MASK (0xff << 16)
248#define RCAR_DMACHCRB_DPTR_SHIFT 16
249#define RCAR_DMACHCRB_DRST (1 << 15)
250#define RCAR_DMACHCRB_DTS (1 << 8)
251#define RCAR_DMACHCRB_SLM_NORMAL (0 << 4)
252#define RCAR_DMACHCRB_SLM_CLK(n) ((8 | (n)) << 4)
253#define RCAR_DMACHCRB_PRI(n) ((n) << 0)
254#define RCAR_DMARS 0x0040
255#define RCAR_DMABUFCR 0x0048
256#define RCAR_DMABUFCR_MBU(n) ((n) << 16)
257#define RCAR_DMABUFCR_ULB(n) ((n) << 0)
258#define RCAR_DMADPBASE 0x0050
259#define RCAR_DMADPBASE_MASK 0xfffffff0
260#define RCAR_DMADPBASE_SEL (1 << 0)
261#define RCAR_DMADPCR 0x0054
262#define RCAR_DMADPCR_DIPT(n) ((n) << 24)
263#define RCAR_DMAFIXSAR 0x0010
264#define RCAR_DMAFIXDAR 0x0014
265#define RCAR_DMAFIXDPBASE 0x0060
266
267
268#define RCAR_DMAC_MEMCPY_XFER_SIZE 4
269
270
271
272
273
274static void rcar_dmac_write(struct rcar_dmac *dmac, u32 reg, u32 data)
275{
276 if (reg == RCAR_DMAOR)
277 writew(data, dmac->iomem + reg);
278 else
279 writel(data, dmac->iomem + reg);
280}
281
282static u32 rcar_dmac_read(struct rcar_dmac *dmac, u32 reg)
283{
284 if (reg == RCAR_DMAOR)
285 return readw(dmac->iomem + reg);
286 else
287 return readl(dmac->iomem + reg);
288}
289
290static u32 rcar_dmac_chan_read(struct rcar_dmac_chan *chan, u32 reg)
291{
292 if (reg == RCAR_DMARS)
293 return readw(chan->iomem + reg);
294 else
295 return readl(chan->iomem + reg);
296}
297
298static void rcar_dmac_chan_write(struct rcar_dmac_chan *chan, u32 reg, u32 data)
299{
300 if (reg == RCAR_DMARS)
301 writew(data, chan->iomem + reg);
302 else
303 writel(data, chan->iomem + reg);
304}
305
306
307
308
309
310static bool rcar_dmac_chan_is_busy(struct rcar_dmac_chan *chan)
311{
312 u32 chcr = rcar_dmac_chan_read(chan, RCAR_DMACHCR);
313
314 return (chcr & (RCAR_DMACHCR_DE | RCAR_DMACHCR_TE)) == RCAR_DMACHCR_DE;
315}
316
317static void rcar_dmac_chan_start_xfer(struct rcar_dmac_chan *chan)
318{
319 struct rcar_dmac_desc *desc = chan->desc.running;
320 u32 chcr = desc->chcr;
321
322 WARN_ON_ONCE(rcar_dmac_chan_is_busy(chan));
323
324 if (chan->mid_rid >= 0)
325 rcar_dmac_chan_write(chan, RCAR_DMARS, chan->mid_rid);
326
327 if (desc->hwdescs.use) {
328 struct rcar_dmac_xfer_chunk *chunk;
329
330 dev_dbg(chan->chan.device->dev,
331 "chan%u: queue desc %p: %u@%pad\n",
332 chan->index, desc, desc->nchunks, &desc->hwdescs.dma);
333
334#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
335 rcar_dmac_chan_write(chan, RCAR_DMAFIXDPBASE,
336 desc->hwdescs.dma >> 32);
337#endif
338 rcar_dmac_chan_write(chan, RCAR_DMADPBASE,
339 (desc->hwdescs.dma & 0xfffffff0) |
340 RCAR_DMADPBASE_SEL);
341 rcar_dmac_chan_write(chan, RCAR_DMACHCRB,
342 RCAR_DMACHCRB_DCNT(desc->nchunks - 1) |
343 RCAR_DMACHCRB_DRST);
344
345
346
347
348
349
350
351
352 chunk = list_first_entry(&desc->chunks,
353 struct rcar_dmac_xfer_chunk, node);
354 rcar_dmac_chan_write(chan, RCAR_DMADAR,
355 chunk->dst_addr & 0xffffffff);
356
357
358
359
360
361 rcar_dmac_chan_write(chan, RCAR_DMADPCR, RCAR_DMADPCR_DIPT(1));
362
363 chcr |= RCAR_DMACHCR_RPT_SAR | RCAR_DMACHCR_RPT_DAR
364 | RCAR_DMACHCR_RPT_TCR | RCAR_DMACHCR_DPB;
365
366
367
368
369
370 if (!desc->cyclic)
371 chcr |= RCAR_DMACHCR_DPM_ENABLED | RCAR_DMACHCR_IE;
372
373
374
375
376 else if (desc->async_tx.callback)
377 chcr |= RCAR_DMACHCR_DPM_INFINITE | RCAR_DMACHCR_DSIE;
378
379
380
381
382 else
383 chcr |= RCAR_DMACHCR_DPM_INFINITE;
384 } else {
385 struct rcar_dmac_xfer_chunk *chunk = desc->running;
386
387 dev_dbg(chan->chan.device->dev,
388 "chan%u: queue chunk %p: %u@%pad -> %pad\n",
389 chan->index, chunk, chunk->size, &chunk->src_addr,
390 &chunk->dst_addr);
391
392#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
393 rcar_dmac_chan_write(chan, RCAR_DMAFIXSAR,
394 chunk->src_addr >> 32);
395 rcar_dmac_chan_write(chan, RCAR_DMAFIXDAR,
396 chunk->dst_addr >> 32);
397#endif
398 rcar_dmac_chan_write(chan, RCAR_DMASAR,
399 chunk->src_addr & 0xffffffff);
400 rcar_dmac_chan_write(chan, RCAR_DMADAR,
401 chunk->dst_addr & 0xffffffff);
402 rcar_dmac_chan_write(chan, RCAR_DMATCR,
403 chunk->size >> desc->xfer_shift);
404
405 chcr |= RCAR_DMACHCR_DPM_DISABLED | RCAR_DMACHCR_IE;
406 }
407
408 rcar_dmac_chan_write(chan, RCAR_DMACHCR, chcr | RCAR_DMACHCR_DE);
409}
410
411static int rcar_dmac_init(struct rcar_dmac *dmac)
412{
413 u16 dmaor;
414
415
416 rcar_dmac_write(dmac, RCAR_DMACHCLR, 0x7fff);
417 rcar_dmac_write(dmac, RCAR_DMAOR,
418 RCAR_DMAOR_PRI_FIXED | RCAR_DMAOR_DME);
419
420 dmaor = rcar_dmac_read(dmac, RCAR_DMAOR);
421 if ((dmaor & (RCAR_DMAOR_AE | RCAR_DMAOR_DME)) != RCAR_DMAOR_DME) {
422 dev_warn(dmac->dev, "DMAOR initialization failed.\n");
423 return -EIO;
424 }
425
426 return 0;
427}
428
429
430
431
432
433static dma_cookie_t rcar_dmac_tx_submit(struct dma_async_tx_descriptor *tx)
434{
435 struct rcar_dmac_chan *chan = to_rcar_dmac_chan(tx->chan);
436 struct rcar_dmac_desc *desc = to_rcar_dmac_desc(tx);
437 unsigned long flags;
438 dma_cookie_t cookie;
439
440 spin_lock_irqsave(&chan->lock, flags);
441
442 cookie = dma_cookie_assign(tx);
443
444 dev_dbg(chan->chan.device->dev, "chan%u: submit #%d@%p\n",
445 chan->index, tx->cookie, desc);
446
447 list_add_tail(&desc->node, &chan->desc.pending);
448 desc->running = list_first_entry(&desc->chunks,
449 struct rcar_dmac_xfer_chunk, node);
450
451 spin_unlock_irqrestore(&chan->lock, flags);
452
453 return cookie;
454}
455
456
457
458
459
460
461
462
463
464
465static int rcar_dmac_desc_alloc(struct rcar_dmac_chan *chan, gfp_t gfp)
466{
467 struct rcar_dmac_desc_page *page;
468 LIST_HEAD(list);
469 unsigned int i;
470
471 page = (void *)get_zeroed_page(gfp);
472 if (!page)
473 return -ENOMEM;
474
475 for (i = 0; i < RCAR_DMAC_DESCS_PER_PAGE; ++i) {
476 struct rcar_dmac_desc *desc = &page->descs[i];
477
478 dma_async_tx_descriptor_init(&desc->async_tx, &chan->chan);
479 desc->async_tx.tx_submit = rcar_dmac_tx_submit;
480 INIT_LIST_HEAD(&desc->chunks);
481
482 list_add_tail(&desc->node, &list);
483 }
484
485 spin_lock_irq(&chan->lock);
486 list_splice_tail(&list, &chan->desc.free);
487 list_add_tail(&page->node, &chan->desc.pages);
488 spin_unlock_irq(&chan->lock);
489
490 return 0;
491}
492
493
494
495
496
497
498
499
500
501
502
503
504
505static void rcar_dmac_desc_put(struct rcar_dmac_chan *chan,
506 struct rcar_dmac_desc *desc)
507{
508 unsigned long flags;
509
510 spin_lock_irqsave(&chan->lock, flags);
511 list_splice_tail_init(&desc->chunks, &chan->desc.chunks_free);
512 list_add_tail(&desc->node, &chan->desc.free);
513 spin_unlock_irqrestore(&chan->lock, flags);
514}
515
516static void rcar_dmac_desc_recycle_acked(struct rcar_dmac_chan *chan)
517{
518 struct rcar_dmac_desc *desc, *_desc;
519 LIST_HEAD(list);
520
521
522
523
524
525
526
527 spin_lock_irq(&chan->lock);
528 list_splice_init(&chan->desc.wait, &list);
529 spin_unlock_irq(&chan->lock);
530
531 list_for_each_entry_safe(desc, _desc, &list, node) {
532 if (async_tx_test_ack(&desc->async_tx)) {
533 list_del(&desc->node);
534 rcar_dmac_desc_put(chan, desc);
535 }
536 }
537
538 if (list_empty(&list))
539 return;
540
541
542 spin_lock_irq(&chan->lock);
543 list_splice(&list, &chan->desc.wait);
544 spin_unlock_irq(&chan->lock);
545}
546
547
548
549
550
551
552
553
554
555
556static struct rcar_dmac_desc *rcar_dmac_desc_get(struct rcar_dmac_chan *chan)
557{
558 struct rcar_dmac_desc *desc;
559 int ret;
560
561
562 rcar_dmac_desc_recycle_acked(chan);
563
564 spin_lock_irq(&chan->lock);
565
566 while (list_empty(&chan->desc.free)) {
567
568
569
570
571
572
573 spin_unlock_irq(&chan->lock);
574 ret = rcar_dmac_desc_alloc(chan, GFP_NOWAIT);
575 if (ret < 0)
576 return NULL;
577 spin_lock_irq(&chan->lock);
578 }
579
580 desc = list_first_entry(&chan->desc.free, struct rcar_dmac_desc, node);
581 list_del(&desc->node);
582
583 spin_unlock_irq(&chan->lock);
584
585 return desc;
586}
587
588
589
590
591
592
593static int rcar_dmac_xfer_chunk_alloc(struct rcar_dmac_chan *chan, gfp_t gfp)
594{
595 struct rcar_dmac_desc_page *page;
596 LIST_HEAD(list);
597 unsigned int i;
598
599 page = (void *)get_zeroed_page(gfp);
600 if (!page)
601 return -ENOMEM;
602
603 for (i = 0; i < RCAR_DMAC_XFER_CHUNKS_PER_PAGE; ++i) {
604 struct rcar_dmac_xfer_chunk *chunk = &page->chunks[i];
605
606 list_add_tail(&chunk->node, &list);
607 }
608
609 spin_lock_irq(&chan->lock);
610 list_splice_tail(&list, &chan->desc.chunks_free);
611 list_add_tail(&page->node, &chan->desc.pages);
612 spin_unlock_irq(&chan->lock);
613
614 return 0;
615}
616
617
618
619
620
621
622
623
624
625
626static struct rcar_dmac_xfer_chunk *
627rcar_dmac_xfer_chunk_get(struct rcar_dmac_chan *chan)
628{
629 struct rcar_dmac_xfer_chunk *chunk;
630 int ret;
631
632 spin_lock_irq(&chan->lock);
633
634 while (list_empty(&chan->desc.chunks_free)) {
635
636
637
638
639
640
641 spin_unlock_irq(&chan->lock);
642 ret = rcar_dmac_xfer_chunk_alloc(chan, GFP_NOWAIT);
643 if (ret < 0)
644 return NULL;
645 spin_lock_irq(&chan->lock);
646 }
647
648 chunk = list_first_entry(&chan->desc.chunks_free,
649 struct rcar_dmac_xfer_chunk, node);
650 list_del(&chunk->node);
651
652 spin_unlock_irq(&chan->lock);
653
654 return chunk;
655}
656
657static void rcar_dmac_realloc_hwdesc(struct rcar_dmac_chan *chan,
658 struct rcar_dmac_desc *desc, size_t size)
659{
660
661
662
663
664
665
666 size = PAGE_ALIGN(size);
667
668 if (desc->hwdescs.size == size)
669 return;
670
671 if (desc->hwdescs.mem) {
672 dma_free_coherent(chan->chan.device->dev, desc->hwdescs.size,
673 desc->hwdescs.mem, desc->hwdescs.dma);
674 desc->hwdescs.mem = NULL;
675 desc->hwdescs.size = 0;
676 }
677
678 if (!size)
679 return;
680
681 desc->hwdescs.mem = dma_alloc_coherent(chan->chan.device->dev, size,
682 &desc->hwdescs.dma, GFP_NOWAIT);
683 if (!desc->hwdescs.mem)
684 return;
685
686 desc->hwdescs.size = size;
687}
688
689static int rcar_dmac_fill_hwdesc(struct rcar_dmac_chan *chan,
690 struct rcar_dmac_desc *desc)
691{
692 struct rcar_dmac_xfer_chunk *chunk;
693 struct rcar_dmac_hw_desc *hwdesc;
694
695 rcar_dmac_realloc_hwdesc(chan, desc, desc->nchunks * sizeof(*hwdesc));
696
697 hwdesc = desc->hwdescs.mem;
698 if (!hwdesc)
699 return -ENOMEM;
700
701 list_for_each_entry(chunk, &desc->chunks, node) {
702 hwdesc->sar = chunk->src_addr;
703 hwdesc->dar = chunk->dst_addr;
704 hwdesc->tcr = chunk->size >> desc->xfer_shift;
705 hwdesc++;
706 }
707
708 return 0;
709}
710
711
712
713
714
715static void rcar_dmac_chan_halt(struct rcar_dmac_chan *chan)
716{
717 u32 chcr = rcar_dmac_chan_read(chan, RCAR_DMACHCR);
718
719 chcr &= ~(RCAR_DMACHCR_DSE | RCAR_DMACHCR_DSIE | RCAR_DMACHCR_IE |
720 RCAR_DMACHCR_TE | RCAR_DMACHCR_DE);
721 rcar_dmac_chan_write(chan, RCAR_DMACHCR, chcr);
722}
723
724static void rcar_dmac_chan_reinit(struct rcar_dmac_chan *chan)
725{
726 struct rcar_dmac_desc *desc, *_desc;
727 unsigned long flags;
728 LIST_HEAD(descs);
729
730 spin_lock_irqsave(&chan->lock, flags);
731
732
733 list_splice_init(&chan->desc.pending, &descs);
734 list_splice_init(&chan->desc.active, &descs);
735 list_splice_init(&chan->desc.done, &descs);
736 list_splice_init(&chan->desc.wait, &descs);
737
738 chan->desc.running = NULL;
739
740 spin_unlock_irqrestore(&chan->lock, flags);
741
742 list_for_each_entry_safe(desc, _desc, &descs, node) {
743 list_del(&desc->node);
744 rcar_dmac_desc_put(chan, desc);
745 }
746}
747
748static void rcar_dmac_stop(struct rcar_dmac *dmac)
749{
750 rcar_dmac_write(dmac, RCAR_DMAOR, 0);
751}
752
753static void rcar_dmac_abort(struct rcar_dmac *dmac)
754{
755 unsigned int i;
756
757
758 for (i = 0; i < dmac->n_channels; ++i) {
759 struct rcar_dmac_chan *chan = &dmac->channels[i];
760
761
762 spin_lock(&chan->lock);
763 rcar_dmac_chan_halt(chan);
764 spin_unlock(&chan->lock);
765
766 rcar_dmac_chan_reinit(chan);
767 }
768}
769
770
771
772
773
774static void rcar_dmac_chan_configure_desc(struct rcar_dmac_chan *chan,
775 struct rcar_dmac_desc *desc)
776{
777 static const u32 chcr_ts[] = {
778 RCAR_DMACHCR_TS_1B, RCAR_DMACHCR_TS_2B,
779 RCAR_DMACHCR_TS_4B, RCAR_DMACHCR_TS_8B,
780 RCAR_DMACHCR_TS_16B, RCAR_DMACHCR_TS_32B,
781 RCAR_DMACHCR_TS_64B,
782 };
783
784 unsigned int xfer_size;
785 u32 chcr;
786
787 switch (desc->direction) {
788 case DMA_DEV_TO_MEM:
789 chcr = RCAR_DMACHCR_DM_INC | RCAR_DMACHCR_SM_FIXED
790 | RCAR_DMACHCR_RS_DMARS;
791 xfer_size = chan->src_xfer_size;
792 break;
793
794 case DMA_MEM_TO_DEV:
795 chcr = RCAR_DMACHCR_DM_FIXED | RCAR_DMACHCR_SM_INC
796 | RCAR_DMACHCR_RS_DMARS;
797 xfer_size = chan->dst_xfer_size;
798 break;
799
800 case DMA_MEM_TO_MEM:
801 default:
802 chcr = RCAR_DMACHCR_DM_INC | RCAR_DMACHCR_SM_INC
803 | RCAR_DMACHCR_RS_AUTO;
804 xfer_size = RCAR_DMAC_MEMCPY_XFER_SIZE;
805 break;
806 }
807
808 desc->xfer_shift = ilog2(xfer_size);
809 desc->chcr = chcr | chcr_ts[desc->xfer_shift];
810}
811
812
813
814
815
816
817
818
819
820
821
822static struct dma_async_tx_descriptor *
823rcar_dmac_chan_prep_sg(struct rcar_dmac_chan *chan, struct scatterlist *sgl,
824 unsigned int sg_len, dma_addr_t dev_addr,
825 enum dma_transfer_direction dir, unsigned long dma_flags,
826 bool cyclic)
827{
828 struct rcar_dmac_xfer_chunk *chunk;
829 struct rcar_dmac_desc *desc;
830 struct scatterlist *sg;
831 unsigned int nchunks = 0;
832 unsigned int max_chunk_size;
833 unsigned int full_size = 0;
834 bool highmem = false;
835 unsigned int i;
836
837 desc = rcar_dmac_desc_get(chan);
838 if (!desc)
839 return NULL;
840
841 desc->async_tx.flags = dma_flags;
842 desc->async_tx.cookie = -EBUSY;
843
844 desc->cyclic = cyclic;
845 desc->direction = dir;
846
847 rcar_dmac_chan_configure_desc(chan, desc);
848
849 max_chunk_size = (RCAR_DMATCR_MASK + 1) << desc->xfer_shift;
850
851
852
853
854
855 for_each_sg(sgl, sg, sg_len, i) {
856 dma_addr_t mem_addr = sg_dma_address(sg);
857 unsigned int len = sg_dma_len(sg);
858
859 full_size += len;
860
861 while (len) {
862 unsigned int size = min(len, max_chunk_size);
863
864#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
865
866
867
868
869 if (dev_addr >> 32 != (dev_addr + size - 1) >> 32)
870 size = ALIGN(dev_addr, 1ULL << 32) - dev_addr;
871 if (mem_addr >> 32 != (mem_addr + size - 1) >> 32)
872 size = ALIGN(mem_addr, 1ULL << 32) - mem_addr;
873
874
875
876
877
878
879 if (dev_addr >> 32 || mem_addr >> 32)
880 highmem = true;
881#endif
882
883 chunk = rcar_dmac_xfer_chunk_get(chan);
884 if (!chunk) {
885 rcar_dmac_desc_put(chan, desc);
886 return NULL;
887 }
888
889 if (dir == DMA_DEV_TO_MEM) {
890 chunk->src_addr = dev_addr;
891 chunk->dst_addr = mem_addr;
892 } else {
893 chunk->src_addr = mem_addr;
894 chunk->dst_addr = dev_addr;
895 }
896
897 chunk->size = size;
898
899 dev_dbg(chan->chan.device->dev,
900 "chan%u: chunk %p/%p sgl %u@%p, %u/%u %pad -> %pad\n",
901 chan->index, chunk, desc, i, sg, size, len,
902 &chunk->src_addr, &chunk->dst_addr);
903
904 mem_addr += size;
905 if (dir == DMA_MEM_TO_MEM)
906 dev_addr += size;
907
908 len -= size;
909
910 list_add_tail(&chunk->node, &desc->chunks);
911 nchunks++;
912 }
913 }
914
915 desc->nchunks = nchunks;
916 desc->size = full_size;
917
918
919
920
921
922
923
924
925
926
927
928 desc->hwdescs.use = !highmem && nchunks > 1;
929 if (desc->hwdescs.use) {
930 if (rcar_dmac_fill_hwdesc(chan, desc) < 0)
931 desc->hwdescs.use = false;
932 }
933
934 return &desc->async_tx;
935}
936
937
938
939
940
941static int rcar_dmac_alloc_chan_resources(struct dma_chan *chan)
942{
943 struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan);
944 int ret;
945
946 INIT_LIST_HEAD(&rchan->desc.chunks_free);
947 INIT_LIST_HEAD(&rchan->desc.pages);
948
949
950 ret = rcar_dmac_xfer_chunk_alloc(rchan, GFP_KERNEL);
951 if (ret < 0)
952 return -ENOMEM;
953
954 ret = rcar_dmac_desc_alloc(rchan, GFP_KERNEL);
955 if (ret < 0)
956 return -ENOMEM;
957
958 return pm_runtime_get_sync(chan->device->dev);
959}
960
961static void rcar_dmac_free_chan_resources(struct dma_chan *chan)
962{
963 struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan);
964 struct rcar_dmac *dmac = to_rcar_dmac(chan->device);
965 struct rcar_dmac_desc_page *page, *_page;
966 struct rcar_dmac_desc *desc;
967 LIST_HEAD(list);
968
969
970 spin_lock_irq(&rchan->lock);
971 rcar_dmac_chan_halt(rchan);
972 spin_unlock_irq(&rchan->lock);
973
974
975
976 if (rchan->mid_rid >= 0) {
977
978 clear_bit(rchan->mid_rid, dmac->modules);
979 rchan->mid_rid = -EINVAL;
980 }
981
982 list_splice_init(&rchan->desc.free, &list);
983 list_splice_init(&rchan->desc.pending, &list);
984 list_splice_init(&rchan->desc.active, &list);
985 list_splice_init(&rchan->desc.done, &list);
986 list_splice_init(&rchan->desc.wait, &list);
987
988 list_for_each_entry(desc, &list, node)
989 rcar_dmac_realloc_hwdesc(rchan, desc, 0);
990
991 list_for_each_entry_safe(page, _page, &rchan->desc.pages, node) {
992 list_del(&page->node);
993 free_page((unsigned long)page);
994 }
995
996 pm_runtime_put(chan->device->dev);
997}
998
999static struct dma_async_tx_descriptor *
1000rcar_dmac_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dma_dest,
1001 dma_addr_t dma_src, size_t len, unsigned long flags)
1002{
1003 struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan);
1004 struct scatterlist sgl;
1005
1006 if (!len)
1007 return NULL;
1008
1009 sg_init_table(&sgl, 1);
1010 sg_set_page(&sgl, pfn_to_page(PFN_DOWN(dma_src)), len,
1011 offset_in_page(dma_src));
1012 sg_dma_address(&sgl) = dma_src;
1013 sg_dma_len(&sgl) = len;
1014
1015 return rcar_dmac_chan_prep_sg(rchan, &sgl, 1, dma_dest,
1016 DMA_MEM_TO_MEM, flags, false);
1017}
1018
1019static struct dma_async_tx_descriptor *
1020rcar_dmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
1021 unsigned int sg_len, enum dma_transfer_direction dir,
1022 unsigned long flags, void *context)
1023{
1024 struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan);
1025 dma_addr_t dev_addr;
1026
1027
1028 if (rchan->mid_rid < 0 || !sg_len) {
1029 dev_warn(chan->device->dev,
1030 "%s: bad parameter: len=%d, id=%d\n",
1031 __func__, sg_len, rchan->mid_rid);
1032 return NULL;
1033 }
1034
1035 dev_addr = dir == DMA_DEV_TO_MEM
1036 ? rchan->src_slave_addr : rchan->dst_slave_addr;
1037 return rcar_dmac_chan_prep_sg(rchan, sgl, sg_len, dev_addr,
1038 dir, flags, false);
1039}
1040
1041#define RCAR_DMAC_MAX_SG_LEN 32
1042
1043static struct dma_async_tx_descriptor *
1044rcar_dmac_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr,
1045 size_t buf_len, size_t period_len,
1046 enum dma_transfer_direction dir, unsigned long flags)
1047{
1048 struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan);
1049 struct dma_async_tx_descriptor *desc;
1050 struct scatterlist *sgl;
1051 dma_addr_t dev_addr;
1052 unsigned int sg_len;
1053 unsigned int i;
1054
1055
1056 if (rchan->mid_rid < 0 || buf_len < period_len) {
1057 dev_warn(chan->device->dev,
1058 "%s: bad parameter: buf_len=%zu, period_len=%zu, id=%d\n",
1059 __func__, buf_len, period_len, rchan->mid_rid);
1060 return NULL;
1061 }
1062
1063 sg_len = buf_len / period_len;
1064 if (sg_len > RCAR_DMAC_MAX_SG_LEN) {
1065 dev_err(chan->device->dev,
1066 "chan%u: sg length %d exceds limit %d",
1067 rchan->index, sg_len, RCAR_DMAC_MAX_SG_LEN);
1068 return NULL;
1069 }
1070
1071
1072
1073
1074
1075 sgl = kcalloc(sg_len, sizeof(*sgl), GFP_NOWAIT);
1076 if (!sgl)
1077 return NULL;
1078
1079 sg_init_table(sgl, sg_len);
1080
1081 for (i = 0; i < sg_len; ++i) {
1082 dma_addr_t src = buf_addr + (period_len * i);
1083
1084 sg_set_page(&sgl[i], pfn_to_page(PFN_DOWN(src)), period_len,
1085 offset_in_page(src));
1086 sg_dma_address(&sgl[i]) = src;
1087 sg_dma_len(&sgl[i]) = period_len;
1088 }
1089
1090 dev_addr = dir == DMA_DEV_TO_MEM
1091 ? rchan->src_slave_addr : rchan->dst_slave_addr;
1092 desc = rcar_dmac_chan_prep_sg(rchan, sgl, sg_len, dev_addr,
1093 dir, flags, true);
1094
1095 kfree(sgl);
1096 return desc;
1097}
1098
1099static int rcar_dmac_device_config(struct dma_chan *chan,
1100 struct dma_slave_config *cfg)
1101{
1102 struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan);
1103
1104
1105
1106
1107
1108 rchan->src_slave_addr = cfg->src_addr;
1109 rchan->dst_slave_addr = cfg->dst_addr;
1110 rchan->src_xfer_size = cfg->src_addr_width;
1111 rchan->dst_xfer_size = cfg->dst_addr_width;
1112
1113 return 0;
1114}
1115
1116static int rcar_dmac_chan_terminate_all(struct dma_chan *chan)
1117{
1118 struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan);
1119 unsigned long flags;
1120
1121 spin_lock_irqsave(&rchan->lock, flags);
1122 rcar_dmac_chan_halt(rchan);
1123 spin_unlock_irqrestore(&rchan->lock, flags);
1124
1125
1126
1127
1128
1129
1130 rcar_dmac_chan_reinit(rchan);
1131
1132 return 0;
1133}
1134
1135static unsigned int rcar_dmac_chan_get_residue(struct rcar_dmac_chan *chan,
1136 dma_cookie_t cookie)
1137{
1138 struct rcar_dmac_desc *desc = chan->desc.running;
1139 struct rcar_dmac_xfer_chunk *running = NULL;
1140 struct rcar_dmac_xfer_chunk *chunk;
1141 unsigned int residue = 0;
1142 unsigned int dptr = 0;
1143
1144 if (!desc)
1145 return 0;
1146
1147
1148
1149
1150
1151
1152 if (cookie != desc->async_tx.cookie)
1153 return desc->size;
1154
1155
1156
1157
1158
1159
1160
1161 if (desc->hwdescs.use) {
1162 dptr = (rcar_dmac_chan_read(chan, RCAR_DMACHCRB) &
1163 RCAR_DMACHCRB_DPTR_MASK) >> RCAR_DMACHCRB_DPTR_SHIFT;
1164 WARN_ON(dptr >= desc->nchunks);
1165 } else {
1166 running = desc->running;
1167 }
1168
1169
1170 list_for_each_entry_reverse(chunk, &desc->chunks, node) {
1171 if (chunk == running || ++dptr == desc->nchunks)
1172 break;
1173
1174 residue += chunk->size;
1175 }
1176
1177
1178 residue += rcar_dmac_chan_read(chan, RCAR_DMATCR) << desc->xfer_shift;
1179
1180 return residue;
1181}
1182
1183static enum dma_status rcar_dmac_tx_status(struct dma_chan *chan,
1184 dma_cookie_t cookie,
1185 struct dma_tx_state *txstate)
1186{
1187 struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan);
1188 enum dma_status status;
1189 unsigned long flags;
1190 unsigned int residue;
1191
1192 status = dma_cookie_status(chan, cookie, txstate);
1193 if (status == DMA_COMPLETE || !txstate)
1194 return status;
1195
1196 spin_lock_irqsave(&rchan->lock, flags);
1197 residue = rcar_dmac_chan_get_residue(rchan, cookie);
1198 spin_unlock_irqrestore(&rchan->lock, flags);
1199
1200 dma_set_residue(txstate, residue);
1201
1202 return status;
1203}
1204
1205static void rcar_dmac_issue_pending(struct dma_chan *chan)
1206{
1207 struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan);
1208 unsigned long flags;
1209
1210 spin_lock_irqsave(&rchan->lock, flags);
1211
1212 if (list_empty(&rchan->desc.pending))
1213 goto done;
1214
1215
1216 list_splice_tail_init(&rchan->desc.pending, &rchan->desc.active);
1217
1218
1219
1220
1221
1222 if (!rchan->desc.running) {
1223 struct rcar_dmac_desc *desc;
1224
1225 desc = list_first_entry(&rchan->desc.active,
1226 struct rcar_dmac_desc, node);
1227 rchan->desc.running = desc;
1228
1229 rcar_dmac_chan_start_xfer(rchan);
1230 }
1231
1232done:
1233 spin_unlock_irqrestore(&rchan->lock, flags);
1234}
1235
1236
1237
1238
1239
1240static irqreturn_t rcar_dmac_isr_desc_stage_end(struct rcar_dmac_chan *chan)
1241{
1242 struct rcar_dmac_desc *desc = chan->desc.running;
1243 unsigned int stage;
1244
1245 if (WARN_ON(!desc || !desc->cyclic)) {
1246
1247
1248
1249
1250
1251 return IRQ_NONE;
1252 }
1253
1254
1255 stage = (rcar_dmac_chan_read(chan, RCAR_DMACHCRB) &
1256 RCAR_DMACHCRB_DPTR_MASK) >> RCAR_DMACHCRB_DPTR_SHIFT;
1257 rcar_dmac_chan_write(chan, RCAR_DMADPCR, RCAR_DMADPCR_DIPT(stage));
1258
1259 return IRQ_WAKE_THREAD;
1260}
1261
1262static irqreturn_t rcar_dmac_isr_transfer_end(struct rcar_dmac_chan *chan)
1263{
1264 struct rcar_dmac_desc *desc = chan->desc.running;
1265 irqreturn_t ret = IRQ_WAKE_THREAD;
1266
1267 if (WARN_ON_ONCE(!desc)) {
1268
1269
1270
1271
1272
1273 return IRQ_NONE;
1274 }
1275
1276
1277
1278
1279
1280
1281 if (!desc->hwdescs.use) {
1282
1283
1284
1285
1286
1287 if (!list_is_last(&desc->running->node, &desc->chunks)) {
1288 desc->running = list_next_entry(desc->running, node);
1289 if (!desc->cyclic)
1290 ret = IRQ_HANDLED;
1291 goto done;
1292 }
1293
1294
1295
1296
1297
1298 if (desc->cyclic) {
1299 desc->running =
1300 list_first_entry(&desc->chunks,
1301 struct rcar_dmac_xfer_chunk,
1302 node);
1303 goto done;
1304 }
1305 }
1306
1307
1308 list_move_tail(&desc->node, &chan->desc.done);
1309
1310
1311 if (!list_empty(&chan->desc.active))
1312 chan->desc.running = list_first_entry(&chan->desc.active,
1313 struct rcar_dmac_desc,
1314 node);
1315 else
1316 chan->desc.running = NULL;
1317
1318done:
1319 if (chan->desc.running)
1320 rcar_dmac_chan_start_xfer(chan);
1321
1322 return ret;
1323}
1324
1325static irqreturn_t rcar_dmac_isr_channel(int irq, void *dev)
1326{
1327 u32 mask = RCAR_DMACHCR_DSE | RCAR_DMACHCR_TE;
1328 struct rcar_dmac_chan *chan = dev;
1329 irqreturn_t ret = IRQ_NONE;
1330 u32 chcr;
1331
1332 spin_lock(&chan->lock);
1333
1334 chcr = rcar_dmac_chan_read(chan, RCAR_DMACHCR);
1335 if (chcr & RCAR_DMACHCR_TE)
1336 mask |= RCAR_DMACHCR_DE;
1337 rcar_dmac_chan_write(chan, RCAR_DMACHCR, chcr & ~mask);
1338
1339 if (chcr & RCAR_DMACHCR_DSE)
1340 ret |= rcar_dmac_isr_desc_stage_end(chan);
1341
1342 if (chcr & RCAR_DMACHCR_TE)
1343 ret |= rcar_dmac_isr_transfer_end(chan);
1344
1345 spin_unlock(&chan->lock);
1346
1347 return ret;
1348}
1349
1350static irqreturn_t rcar_dmac_isr_channel_thread(int irq, void *dev)
1351{
1352 struct rcar_dmac_chan *chan = dev;
1353 struct rcar_dmac_desc *desc;
1354
1355 spin_lock_irq(&chan->lock);
1356
1357
1358 if (chan->desc.running && chan->desc.running->cyclic) {
1359 dma_async_tx_callback callback;
1360 void *callback_param;
1361
1362 desc = chan->desc.running;
1363 callback = desc->async_tx.callback;
1364 callback_param = desc->async_tx.callback_param;
1365
1366 if (callback) {
1367 spin_unlock_irq(&chan->lock);
1368 callback(callback_param);
1369 spin_lock_irq(&chan->lock);
1370 }
1371 }
1372
1373
1374
1375
1376
1377 while (!list_empty(&chan->desc.done)) {
1378 desc = list_first_entry(&chan->desc.done, struct rcar_dmac_desc,
1379 node);
1380 dma_cookie_complete(&desc->async_tx);
1381 list_del(&desc->node);
1382
1383 if (desc->async_tx.callback) {
1384 spin_unlock_irq(&chan->lock);
1385
1386
1387
1388
1389
1390 desc->async_tx.callback(desc->async_tx.callback_param);
1391 spin_lock_irq(&chan->lock);
1392 }
1393
1394 list_add_tail(&desc->node, &chan->desc.wait);
1395 }
1396
1397 spin_unlock_irq(&chan->lock);
1398
1399
1400 rcar_dmac_desc_recycle_acked(chan);
1401
1402 return IRQ_HANDLED;
1403}
1404
1405static irqreturn_t rcar_dmac_isr_error(int irq, void *data)
1406{
1407 struct rcar_dmac *dmac = data;
1408
1409 if (!(rcar_dmac_read(dmac, RCAR_DMAOR) & RCAR_DMAOR_AE))
1410 return IRQ_NONE;
1411
1412
1413
1414
1415
1416 rcar_dmac_stop(dmac);
1417 rcar_dmac_abort(dmac);
1418 rcar_dmac_init(dmac);
1419
1420 return IRQ_HANDLED;
1421}
1422
1423
1424
1425
1426
1427static bool rcar_dmac_chan_filter(struct dma_chan *chan, void *arg)
1428{
1429 struct rcar_dmac *dmac = to_rcar_dmac(chan->device);
1430 struct of_phandle_args *dma_spec = arg;
1431
1432
1433
1434
1435
1436
1437
1438
1439 if (chan->device->device_config != rcar_dmac_device_config ||
1440 dma_spec->np != chan->device->dev->of_node)
1441 return false;
1442
1443 return !test_and_set_bit(dma_spec->args[0], dmac->modules);
1444}
1445
1446static struct dma_chan *rcar_dmac_of_xlate(struct of_phandle_args *dma_spec,
1447 struct of_dma *ofdma)
1448{
1449 struct rcar_dmac_chan *rchan;
1450 struct dma_chan *chan;
1451 dma_cap_mask_t mask;
1452
1453 if (dma_spec->args_count != 1)
1454 return NULL;
1455
1456
1457 dma_cap_zero(mask);
1458 dma_cap_set(DMA_SLAVE, mask);
1459
1460 chan = dma_request_channel(mask, rcar_dmac_chan_filter, dma_spec);
1461 if (!chan)
1462 return NULL;
1463
1464 rchan = to_rcar_dmac_chan(chan);
1465 rchan->mid_rid = dma_spec->args[0];
1466
1467 return chan;
1468}
1469
1470
1471
1472
1473
1474#ifdef CONFIG_PM_SLEEP
1475static int rcar_dmac_sleep_suspend(struct device *dev)
1476{
1477
1478
1479
1480 return 0;
1481}
1482
1483static int rcar_dmac_sleep_resume(struct device *dev)
1484{
1485
1486 return 0;
1487}
1488#endif
1489
1490#ifdef CONFIG_PM
1491static int rcar_dmac_runtime_suspend(struct device *dev)
1492{
1493 return 0;
1494}
1495
1496static int rcar_dmac_runtime_resume(struct device *dev)
1497{
1498 struct rcar_dmac *dmac = dev_get_drvdata(dev);
1499
1500 return rcar_dmac_init(dmac);
1501}
1502#endif
1503
1504static const struct dev_pm_ops rcar_dmac_pm = {
1505 SET_SYSTEM_SLEEP_PM_OPS(rcar_dmac_sleep_suspend, rcar_dmac_sleep_resume)
1506 SET_RUNTIME_PM_OPS(rcar_dmac_runtime_suspend, rcar_dmac_runtime_resume,
1507 NULL)
1508};
1509
1510
1511
1512
1513
1514static int rcar_dmac_chan_probe(struct rcar_dmac *dmac,
1515 struct rcar_dmac_chan *rchan,
1516 unsigned int index)
1517{
1518 struct platform_device *pdev = to_platform_device(dmac->dev);
1519 struct dma_chan *chan = &rchan->chan;
1520 char pdev_irqname[5];
1521 char *irqname;
1522 int irq;
1523 int ret;
1524
1525 rchan->index = index;
1526 rchan->iomem = dmac->iomem + RCAR_DMAC_CHAN_OFFSET(index);
1527 rchan->mid_rid = -EINVAL;
1528
1529 spin_lock_init(&rchan->lock);
1530
1531 INIT_LIST_HEAD(&rchan->desc.free);
1532 INIT_LIST_HEAD(&rchan->desc.pending);
1533 INIT_LIST_HEAD(&rchan->desc.active);
1534 INIT_LIST_HEAD(&rchan->desc.done);
1535 INIT_LIST_HEAD(&rchan->desc.wait);
1536
1537
1538 sprintf(pdev_irqname, "ch%u", index);
1539 irq = platform_get_irq_byname(pdev, pdev_irqname);
1540 if (irq < 0) {
1541 dev_err(dmac->dev, "no IRQ specified for channel %u\n", index);
1542 return -ENODEV;
1543 }
1544
1545 irqname = devm_kasprintf(dmac->dev, GFP_KERNEL, "%s:%u",
1546 dev_name(dmac->dev), index);
1547 if (!irqname)
1548 return -ENOMEM;
1549
1550 ret = devm_request_threaded_irq(dmac->dev, irq, rcar_dmac_isr_channel,
1551 rcar_dmac_isr_channel_thread, 0,
1552 irqname, rchan);
1553 if (ret) {
1554 dev_err(dmac->dev, "failed to request IRQ %u (%d)\n", irq, ret);
1555 return ret;
1556 }
1557
1558
1559
1560
1561
1562 chan->device = &dmac->engine;
1563 dma_cookie_init(chan);
1564
1565 list_add_tail(&chan->device_node, &dmac->engine.channels);
1566
1567 return 0;
1568}
1569
1570static int rcar_dmac_parse_of(struct device *dev, struct rcar_dmac *dmac)
1571{
1572 struct device_node *np = dev->of_node;
1573 int ret;
1574
1575 ret = of_property_read_u32(np, "dma-channels", &dmac->n_channels);
1576 if (ret < 0) {
1577 dev_err(dev, "unable to read dma-channels property\n");
1578 return ret;
1579 }
1580
1581 if (dmac->n_channels <= 0 || dmac->n_channels >= 100) {
1582 dev_err(dev, "invalid number of channels %u\n",
1583 dmac->n_channels);
1584 return -EINVAL;
1585 }
1586
1587 return 0;
1588}
1589
1590static int rcar_dmac_probe(struct platform_device *pdev)
1591{
1592 const enum dma_slave_buswidth widths = DMA_SLAVE_BUSWIDTH_1_BYTE |
1593 DMA_SLAVE_BUSWIDTH_2_BYTES | DMA_SLAVE_BUSWIDTH_4_BYTES |
1594 DMA_SLAVE_BUSWIDTH_8_BYTES | DMA_SLAVE_BUSWIDTH_16_BYTES |
1595 DMA_SLAVE_BUSWIDTH_32_BYTES | DMA_SLAVE_BUSWIDTH_64_BYTES;
1596 unsigned int channels_offset = 0;
1597 struct dma_device *engine;
1598 struct rcar_dmac *dmac;
1599 struct resource *mem;
1600 unsigned int i;
1601 char *irqname;
1602 int irq;
1603 int ret;
1604
1605 dmac = devm_kzalloc(&pdev->dev, sizeof(*dmac), GFP_KERNEL);
1606 if (!dmac)
1607 return -ENOMEM;
1608
1609 dmac->dev = &pdev->dev;
1610 platform_set_drvdata(pdev, dmac);
1611
1612 ret = rcar_dmac_parse_of(&pdev->dev, dmac);
1613 if (ret < 0)
1614 return ret;
1615
1616
1617
1618
1619
1620
1621
1622
1623
1624 if (pdev->dev.iommu_group) {
1625 dmac->n_channels--;
1626 channels_offset = 1;
1627 }
1628
1629 dmac->channels = devm_kcalloc(&pdev->dev, dmac->n_channels,
1630 sizeof(*dmac->channels), GFP_KERNEL);
1631 if (!dmac->channels)
1632 return -ENOMEM;
1633
1634
1635 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1636 dmac->iomem = devm_ioremap_resource(&pdev->dev, mem);
1637 if (IS_ERR(dmac->iomem))
1638 return PTR_ERR(dmac->iomem);
1639
1640 irq = platform_get_irq_byname(pdev, "error");
1641 if (irq < 0) {
1642 dev_err(&pdev->dev, "no error IRQ specified\n");
1643 return -ENODEV;
1644 }
1645
1646 irqname = devm_kasprintf(dmac->dev, GFP_KERNEL, "%s:error",
1647 dev_name(dmac->dev));
1648 if (!irqname)
1649 return -ENOMEM;
1650
1651 ret = devm_request_irq(&pdev->dev, irq, rcar_dmac_isr_error, 0,
1652 irqname, dmac);
1653 if (ret) {
1654 dev_err(&pdev->dev, "failed to request IRQ %u (%d)\n",
1655 irq, ret);
1656 return ret;
1657 }
1658
1659
1660 pm_runtime_enable(&pdev->dev);
1661 ret = pm_runtime_get_sync(&pdev->dev);
1662 if (ret < 0) {
1663 dev_err(&pdev->dev, "runtime PM get sync failed (%d)\n", ret);
1664 return ret;
1665 }
1666
1667 ret = rcar_dmac_init(dmac);
1668 pm_runtime_put(&pdev->dev);
1669
1670 if (ret) {
1671 dev_err(&pdev->dev, "failed to reset device\n");
1672 goto error;
1673 }
1674
1675
1676 INIT_LIST_HEAD(&dmac->engine.channels);
1677
1678 for (i = 0; i < dmac->n_channels; ++i) {
1679 ret = rcar_dmac_chan_probe(dmac, &dmac->channels[i],
1680 i + channels_offset);
1681 if (ret < 0)
1682 goto error;
1683 }
1684
1685
1686 ret = of_dma_controller_register(pdev->dev.of_node, rcar_dmac_of_xlate,
1687 NULL);
1688 if (ret < 0)
1689 goto error;
1690
1691
1692
1693
1694
1695
1696 engine = &dmac->engine;
1697 dma_cap_set(DMA_MEMCPY, engine->cap_mask);
1698 dma_cap_set(DMA_SLAVE, engine->cap_mask);
1699
1700 engine->dev = &pdev->dev;
1701 engine->copy_align = ilog2(RCAR_DMAC_MEMCPY_XFER_SIZE);
1702
1703 engine->src_addr_widths = widths;
1704 engine->dst_addr_widths = widths;
1705 engine->directions = BIT(DMA_MEM_TO_DEV) | BIT(DMA_DEV_TO_MEM);
1706 engine->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
1707
1708 engine->device_alloc_chan_resources = rcar_dmac_alloc_chan_resources;
1709 engine->device_free_chan_resources = rcar_dmac_free_chan_resources;
1710 engine->device_prep_dma_memcpy = rcar_dmac_prep_dma_memcpy;
1711 engine->device_prep_slave_sg = rcar_dmac_prep_slave_sg;
1712 engine->device_prep_dma_cyclic = rcar_dmac_prep_dma_cyclic;
1713 engine->device_config = rcar_dmac_device_config;
1714 engine->device_terminate_all = rcar_dmac_chan_terminate_all;
1715 engine->device_tx_status = rcar_dmac_tx_status;
1716 engine->device_issue_pending = rcar_dmac_issue_pending;
1717
1718 ret = dma_async_device_register(engine);
1719 if (ret < 0)
1720 goto error;
1721
1722 return 0;
1723
1724error:
1725 of_dma_controller_free(pdev->dev.of_node);
1726 pm_runtime_disable(&pdev->dev);
1727 return ret;
1728}
1729
1730static int rcar_dmac_remove(struct platform_device *pdev)
1731{
1732 struct rcar_dmac *dmac = platform_get_drvdata(pdev);
1733
1734 of_dma_controller_free(pdev->dev.of_node);
1735 dma_async_device_unregister(&dmac->engine);
1736
1737 pm_runtime_disable(&pdev->dev);
1738
1739 return 0;
1740}
1741
1742static void rcar_dmac_shutdown(struct platform_device *pdev)
1743{
1744 struct rcar_dmac *dmac = platform_get_drvdata(pdev);
1745
1746 rcar_dmac_stop(dmac);
1747}
1748
1749static const struct of_device_id rcar_dmac_of_ids[] = {
1750 { .compatible = "renesas,rcar-dmac", },
1751 { }
1752};
1753MODULE_DEVICE_TABLE(of, rcar_dmac_of_ids);
1754
1755static struct platform_driver rcar_dmac_driver = {
1756 .driver = {
1757 .pm = &rcar_dmac_pm,
1758 .name = "rcar-dmac",
1759 .of_match_table = rcar_dmac_of_ids,
1760 },
1761 .probe = rcar_dmac_probe,
1762 .remove = rcar_dmac_remove,
1763 .shutdown = rcar_dmac_shutdown,
1764};
1765
1766module_platform_driver(rcar_dmac_driver);
1767
1768MODULE_DESCRIPTION("R-Car Gen2 DMA Controller Driver");
1769MODULE_AUTHOR("Laurent Pinchart <laurent.pinchart@ideasonboard.com>");
1770MODULE_LICENSE("GPL v2");
1771