1
2
3
4
5
6
7
8
9
10#include <linux/delay.h>
11#include <linux/dma-mapping.h>
12#include <linux/dmaengine.h>
13#include <linux/interrupt.h>
14#include <linux/list.h>
15#include <linux/module.h>
16#include <linux/mutex.h>
17#include <linux/of.h>
18#include <linux/of_dma.h>
19#include <linux/of_platform.h>
20#include <linux/platform_device.h>
21#include <linux/pm_runtime.h>
22#include <linux/slab.h>
23#include <linux/spinlock.h>
24
25#include "../dmaengine.h"
26
27
28
29
30
31
32
33
34struct rcar_dmac_xfer_chunk {
35 struct list_head node;
36
37 dma_addr_t src_addr;
38 dma_addr_t dst_addr;
39 u32 size;
40};
41
42
43
44
45
46
47
48struct rcar_dmac_hw_desc {
49 u32 sar;
50 u32 dar;
51 u32 tcr;
52 u32 reserved;
53} __attribute__((__packed__));
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72struct rcar_dmac_desc {
73 struct dma_async_tx_descriptor async_tx;
74 enum dma_transfer_direction direction;
75 unsigned int xfer_shift;
76 u32 chcr;
77
78 struct list_head node;
79 struct list_head chunks;
80 struct rcar_dmac_xfer_chunk *running;
81 unsigned int nchunks;
82
83 struct {
84 bool use;
85 struct rcar_dmac_hw_desc *mem;
86 dma_addr_t dma;
87 size_t size;
88 } hwdescs;
89
90 unsigned int size;
91 bool cyclic;
92};
93
94#define to_rcar_dmac_desc(d) container_of(d, struct rcar_dmac_desc, async_tx)
95
96
97
98
99
100
101
102struct rcar_dmac_desc_page {
103 struct list_head node;
104
105 union {
106 struct rcar_dmac_desc descs[0];
107 struct rcar_dmac_xfer_chunk chunks[0];
108 };
109};
110
111#define RCAR_DMAC_DESCS_PER_PAGE \
112 ((PAGE_SIZE - offsetof(struct rcar_dmac_desc_page, descs)) / \
113 sizeof(struct rcar_dmac_desc))
114#define RCAR_DMAC_XFER_CHUNKS_PER_PAGE \
115 ((PAGE_SIZE - offsetof(struct rcar_dmac_desc_page, chunks)) / \
116 sizeof(struct rcar_dmac_xfer_chunk))
117
118
119
120
121
122
123struct rcar_dmac_chan_slave {
124 phys_addr_t slave_addr;
125 unsigned int xfer_size;
126};
127
128
129
130
131
132
133
134struct rcar_dmac_chan_map {
135 dma_addr_t addr;
136 enum dma_data_direction dir;
137 struct rcar_dmac_chan_slave slave;
138};
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159struct rcar_dmac_chan {
160 struct dma_chan chan;
161 void __iomem *iomem;
162 unsigned int index;
163 int irq;
164
165 struct rcar_dmac_chan_slave src;
166 struct rcar_dmac_chan_slave dst;
167 struct rcar_dmac_chan_map map;
168 int mid_rid;
169
170 spinlock_t lock;
171
172 struct {
173 struct list_head free;
174 struct list_head pending;
175 struct list_head active;
176 struct list_head done;
177 struct list_head wait;
178 struct rcar_dmac_desc *running;
179
180 struct list_head chunks_free;
181
182 struct list_head pages;
183 } desc;
184};
185
186#define to_rcar_dmac_chan(c) container_of(c, struct rcar_dmac_chan, chan)
187
188
189
190
191
192
193
194
195
196
197
198struct rcar_dmac {
199 struct dma_device engine;
200 struct device *dev;
201 void __iomem *iomem;
202 struct device_dma_parameters parms;
203
204 unsigned int n_channels;
205 struct rcar_dmac_chan *channels;
206 u32 channels_mask;
207
208 DECLARE_BITMAP(modules, 256);
209};
210
211#define to_rcar_dmac(d) container_of(d, struct rcar_dmac, engine)
212
213
214
215
216
217
218struct rcar_dmac_of_data {
219 u32 chan_offset_base;
220 u32 chan_offset_stride;
221};
222
223
224
225
226
227#define RCAR_DMAISTA 0x0020
228#define RCAR_DMASEC 0x0030
229#define RCAR_DMAOR 0x0060
230#define RCAR_DMAOR_PRI_FIXED (0 << 8)
231#define RCAR_DMAOR_PRI_ROUND_ROBIN (3 << 8)
232#define RCAR_DMAOR_AE (1 << 2)
233#define RCAR_DMAOR_DME (1 << 0)
234#define RCAR_DMACHCLR 0x0080
235#define RCAR_DMADPSEC 0x00a0
236
237#define RCAR_DMASAR 0x0000
238#define RCAR_DMADAR 0x0004
239#define RCAR_DMATCR 0x0008
240#define RCAR_DMATCR_MASK 0x00ffffff
241#define RCAR_DMATSR 0x0028
242#define RCAR_DMACHCR 0x000c
243#define RCAR_DMACHCR_CAE (1 << 31)
244#define RCAR_DMACHCR_CAIE (1 << 30)
245#define RCAR_DMACHCR_DPM_DISABLED (0 << 28)
246#define RCAR_DMACHCR_DPM_ENABLED (1 << 28)
247#define RCAR_DMACHCR_DPM_REPEAT (2 << 28)
248#define RCAR_DMACHCR_DPM_INFINITE (3 << 28)
249#define RCAR_DMACHCR_RPT_SAR (1 << 27)
250#define RCAR_DMACHCR_RPT_DAR (1 << 26)
251#define RCAR_DMACHCR_RPT_TCR (1 << 25)
252#define RCAR_DMACHCR_DPB (1 << 22)
253#define RCAR_DMACHCR_DSE (1 << 19)
254#define RCAR_DMACHCR_DSIE (1 << 18)
255#define RCAR_DMACHCR_TS_1B ((0 << 20) | (0 << 3))
256#define RCAR_DMACHCR_TS_2B ((0 << 20) | (1 << 3))
257#define RCAR_DMACHCR_TS_4B ((0 << 20) | (2 << 3))
258#define RCAR_DMACHCR_TS_16B ((0 << 20) | (3 << 3))
259#define RCAR_DMACHCR_TS_32B ((1 << 20) | (0 << 3))
260#define RCAR_DMACHCR_TS_64B ((1 << 20) | (1 << 3))
261#define RCAR_DMACHCR_TS_8B ((1 << 20) | (3 << 3))
262#define RCAR_DMACHCR_DM_FIXED (0 << 14)
263#define RCAR_DMACHCR_DM_INC (1 << 14)
264#define RCAR_DMACHCR_DM_DEC (2 << 14)
265#define RCAR_DMACHCR_SM_FIXED (0 << 12)
266#define RCAR_DMACHCR_SM_INC (1 << 12)
267#define RCAR_DMACHCR_SM_DEC (2 << 12)
268#define RCAR_DMACHCR_RS_AUTO (4 << 8)
269#define RCAR_DMACHCR_RS_DMARS (8 << 8)
270#define RCAR_DMACHCR_IE (1 << 2)
271#define RCAR_DMACHCR_TE (1 << 1)
272#define RCAR_DMACHCR_DE (1 << 0)
273#define RCAR_DMATCRB 0x0018
274#define RCAR_DMATSRB 0x0038
275#define RCAR_DMACHCRB 0x001c
276#define RCAR_DMACHCRB_DCNT(n) ((n) << 24)
277#define RCAR_DMACHCRB_DPTR_MASK (0xff << 16)
278#define RCAR_DMACHCRB_DPTR_SHIFT 16
279#define RCAR_DMACHCRB_DRST (1 << 15)
280#define RCAR_DMACHCRB_DTS (1 << 8)
281#define RCAR_DMACHCRB_SLM_NORMAL (0 << 4)
282#define RCAR_DMACHCRB_SLM_CLK(n) ((8 | (n)) << 4)
283#define RCAR_DMACHCRB_PRI(n) ((n) << 0)
284#define RCAR_DMARS 0x0040
285#define RCAR_DMABUFCR 0x0048
286#define RCAR_DMABUFCR_MBU(n) ((n) << 16)
287#define RCAR_DMABUFCR_ULB(n) ((n) << 0)
288#define RCAR_DMADPBASE 0x0050
289#define RCAR_DMADPBASE_MASK 0xfffffff0
290#define RCAR_DMADPBASE_SEL (1 << 0)
291#define RCAR_DMADPCR 0x0054
292#define RCAR_DMADPCR_DIPT(n) ((n) << 24)
293#define RCAR_DMAFIXSAR 0x0010
294#define RCAR_DMAFIXDAR 0x0014
295#define RCAR_DMAFIXDPBASE 0x0060
296
297
298#define RCAR_DMAC_MEMCPY_XFER_SIZE 4
299
300
301
302
303
304static void rcar_dmac_write(struct rcar_dmac *dmac, u32 reg, u32 data)
305{
306 if (reg == RCAR_DMAOR)
307 writew(data, dmac->iomem + reg);
308 else
309 writel(data, dmac->iomem + reg);
310}
311
312static u32 rcar_dmac_read(struct rcar_dmac *dmac, u32 reg)
313{
314 if (reg == RCAR_DMAOR)
315 return readw(dmac->iomem + reg);
316 else
317 return readl(dmac->iomem + reg);
318}
319
320static u32 rcar_dmac_chan_read(struct rcar_dmac_chan *chan, u32 reg)
321{
322 if (reg == RCAR_DMARS)
323 return readw(chan->iomem + reg);
324 else
325 return readl(chan->iomem + reg);
326}
327
328static void rcar_dmac_chan_write(struct rcar_dmac_chan *chan, u32 reg, u32 data)
329{
330 if (reg == RCAR_DMARS)
331 writew(data, chan->iomem + reg);
332 else
333 writel(data, chan->iomem + reg);
334}
335
336
337
338
339
340static bool rcar_dmac_chan_is_busy(struct rcar_dmac_chan *chan)
341{
342 u32 chcr = rcar_dmac_chan_read(chan, RCAR_DMACHCR);
343
344 return !!(chcr & (RCAR_DMACHCR_DE | RCAR_DMACHCR_TE));
345}
346
347static void rcar_dmac_chan_start_xfer(struct rcar_dmac_chan *chan)
348{
349 struct rcar_dmac_desc *desc = chan->desc.running;
350 u32 chcr = desc->chcr;
351
352 WARN_ON_ONCE(rcar_dmac_chan_is_busy(chan));
353
354 if (chan->mid_rid >= 0)
355 rcar_dmac_chan_write(chan, RCAR_DMARS, chan->mid_rid);
356
357 if (desc->hwdescs.use) {
358 struct rcar_dmac_xfer_chunk *chunk =
359 list_first_entry(&desc->chunks,
360 struct rcar_dmac_xfer_chunk, node);
361
362 dev_dbg(chan->chan.device->dev,
363 "chan%u: queue desc %p: %u@%pad\n",
364 chan->index, desc, desc->nchunks, &desc->hwdescs.dma);
365
366#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
367 rcar_dmac_chan_write(chan, RCAR_DMAFIXSAR,
368 chunk->src_addr >> 32);
369 rcar_dmac_chan_write(chan, RCAR_DMAFIXDAR,
370 chunk->dst_addr >> 32);
371 rcar_dmac_chan_write(chan, RCAR_DMAFIXDPBASE,
372 desc->hwdescs.dma >> 32);
373#endif
374 rcar_dmac_chan_write(chan, RCAR_DMADPBASE,
375 (desc->hwdescs.dma & 0xfffffff0) |
376 RCAR_DMADPBASE_SEL);
377 rcar_dmac_chan_write(chan, RCAR_DMACHCRB,
378 RCAR_DMACHCRB_DCNT(desc->nchunks - 1) |
379 RCAR_DMACHCRB_DRST);
380
381
382
383
384
385
386
387
388 rcar_dmac_chan_write(chan, RCAR_DMADAR,
389 chunk->dst_addr & 0xffffffff);
390
391
392
393
394
395 rcar_dmac_chan_write(chan, RCAR_DMADPCR, RCAR_DMADPCR_DIPT(1));
396
397 chcr |= RCAR_DMACHCR_RPT_SAR | RCAR_DMACHCR_RPT_DAR
398 | RCAR_DMACHCR_RPT_TCR | RCAR_DMACHCR_DPB;
399
400
401
402
403
404 if (!desc->cyclic)
405 chcr |= RCAR_DMACHCR_DPM_ENABLED | RCAR_DMACHCR_IE;
406
407
408
409
410 else if (desc->async_tx.callback)
411 chcr |= RCAR_DMACHCR_DPM_INFINITE | RCAR_DMACHCR_DSIE;
412
413
414
415
416 else
417 chcr |= RCAR_DMACHCR_DPM_INFINITE;
418 } else {
419 struct rcar_dmac_xfer_chunk *chunk = desc->running;
420
421 dev_dbg(chan->chan.device->dev,
422 "chan%u: queue chunk %p: %u@%pad -> %pad\n",
423 chan->index, chunk, chunk->size, &chunk->src_addr,
424 &chunk->dst_addr);
425
426#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
427 rcar_dmac_chan_write(chan, RCAR_DMAFIXSAR,
428 chunk->src_addr >> 32);
429 rcar_dmac_chan_write(chan, RCAR_DMAFIXDAR,
430 chunk->dst_addr >> 32);
431#endif
432 rcar_dmac_chan_write(chan, RCAR_DMASAR,
433 chunk->src_addr & 0xffffffff);
434 rcar_dmac_chan_write(chan, RCAR_DMADAR,
435 chunk->dst_addr & 0xffffffff);
436 rcar_dmac_chan_write(chan, RCAR_DMATCR,
437 chunk->size >> desc->xfer_shift);
438
439 chcr |= RCAR_DMACHCR_DPM_DISABLED | RCAR_DMACHCR_IE;
440 }
441
442 rcar_dmac_chan_write(chan, RCAR_DMACHCR,
443 chcr | RCAR_DMACHCR_DE | RCAR_DMACHCR_CAIE);
444}
445
446static int rcar_dmac_init(struct rcar_dmac *dmac)
447{
448 u16 dmaor;
449
450
451 rcar_dmac_write(dmac, RCAR_DMACHCLR, dmac->channels_mask);
452 rcar_dmac_write(dmac, RCAR_DMAOR,
453 RCAR_DMAOR_PRI_FIXED | RCAR_DMAOR_DME);
454
455 dmaor = rcar_dmac_read(dmac, RCAR_DMAOR);
456 if ((dmaor & (RCAR_DMAOR_AE | RCAR_DMAOR_DME)) != RCAR_DMAOR_DME) {
457 dev_warn(dmac->dev, "DMAOR initialization failed.\n");
458 return -EIO;
459 }
460
461 return 0;
462}
463
464
465
466
467
468static dma_cookie_t rcar_dmac_tx_submit(struct dma_async_tx_descriptor *tx)
469{
470 struct rcar_dmac_chan *chan = to_rcar_dmac_chan(tx->chan);
471 struct rcar_dmac_desc *desc = to_rcar_dmac_desc(tx);
472 unsigned long flags;
473 dma_cookie_t cookie;
474
475 spin_lock_irqsave(&chan->lock, flags);
476
477 cookie = dma_cookie_assign(tx);
478
479 dev_dbg(chan->chan.device->dev, "chan%u: submit #%d@%p\n",
480 chan->index, tx->cookie, desc);
481
482 list_add_tail(&desc->node, &chan->desc.pending);
483 desc->running = list_first_entry(&desc->chunks,
484 struct rcar_dmac_xfer_chunk, node);
485
486 spin_unlock_irqrestore(&chan->lock, flags);
487
488 return cookie;
489}
490
491
492
493
494
495
496
497
498
499
500static int rcar_dmac_desc_alloc(struct rcar_dmac_chan *chan, gfp_t gfp)
501{
502 struct rcar_dmac_desc_page *page;
503 unsigned long flags;
504 LIST_HEAD(list);
505 unsigned int i;
506
507 page = (void *)get_zeroed_page(gfp);
508 if (!page)
509 return -ENOMEM;
510
511 for (i = 0; i < RCAR_DMAC_DESCS_PER_PAGE; ++i) {
512 struct rcar_dmac_desc *desc = &page->descs[i];
513
514 dma_async_tx_descriptor_init(&desc->async_tx, &chan->chan);
515 desc->async_tx.tx_submit = rcar_dmac_tx_submit;
516 INIT_LIST_HEAD(&desc->chunks);
517
518 list_add_tail(&desc->node, &list);
519 }
520
521 spin_lock_irqsave(&chan->lock, flags);
522 list_splice_tail(&list, &chan->desc.free);
523 list_add_tail(&page->node, &chan->desc.pages);
524 spin_unlock_irqrestore(&chan->lock, flags);
525
526 return 0;
527}
528
529
530
531
532
533
534
535
536
537
538
539
540
541static void rcar_dmac_desc_put(struct rcar_dmac_chan *chan,
542 struct rcar_dmac_desc *desc)
543{
544 unsigned long flags;
545
546 spin_lock_irqsave(&chan->lock, flags);
547 list_splice_tail_init(&desc->chunks, &chan->desc.chunks_free);
548 list_add(&desc->node, &chan->desc.free);
549 spin_unlock_irqrestore(&chan->lock, flags);
550}
551
552static void rcar_dmac_desc_recycle_acked(struct rcar_dmac_chan *chan)
553{
554 struct rcar_dmac_desc *desc, *_desc;
555 unsigned long flags;
556 LIST_HEAD(list);
557
558
559
560
561
562
563
564 spin_lock_irqsave(&chan->lock, flags);
565 list_splice_init(&chan->desc.wait, &list);
566 spin_unlock_irqrestore(&chan->lock, flags);
567
568 list_for_each_entry_safe(desc, _desc, &list, node) {
569 if (async_tx_test_ack(&desc->async_tx)) {
570 list_del(&desc->node);
571 rcar_dmac_desc_put(chan, desc);
572 }
573 }
574
575 if (list_empty(&list))
576 return;
577
578
579 spin_lock_irqsave(&chan->lock, flags);
580 list_splice(&list, &chan->desc.wait);
581 spin_unlock_irqrestore(&chan->lock, flags);
582}
583
584
585
586
587
588
589
590
591
592
593static struct rcar_dmac_desc *rcar_dmac_desc_get(struct rcar_dmac_chan *chan)
594{
595 struct rcar_dmac_desc *desc;
596 unsigned long flags;
597 int ret;
598
599
600 rcar_dmac_desc_recycle_acked(chan);
601
602 spin_lock_irqsave(&chan->lock, flags);
603
604 while (list_empty(&chan->desc.free)) {
605
606
607
608
609
610
611 spin_unlock_irqrestore(&chan->lock, flags);
612 ret = rcar_dmac_desc_alloc(chan, GFP_NOWAIT);
613 if (ret < 0)
614 return NULL;
615 spin_lock_irqsave(&chan->lock, flags);
616 }
617
618 desc = list_first_entry(&chan->desc.free, struct rcar_dmac_desc, node);
619 list_del(&desc->node);
620
621 spin_unlock_irqrestore(&chan->lock, flags);
622
623 return desc;
624}
625
626
627
628
629
630
631static int rcar_dmac_xfer_chunk_alloc(struct rcar_dmac_chan *chan, gfp_t gfp)
632{
633 struct rcar_dmac_desc_page *page;
634 unsigned long flags;
635 LIST_HEAD(list);
636 unsigned int i;
637
638 page = (void *)get_zeroed_page(gfp);
639 if (!page)
640 return -ENOMEM;
641
642 for (i = 0; i < RCAR_DMAC_XFER_CHUNKS_PER_PAGE; ++i) {
643 struct rcar_dmac_xfer_chunk *chunk = &page->chunks[i];
644
645 list_add_tail(&chunk->node, &list);
646 }
647
648 spin_lock_irqsave(&chan->lock, flags);
649 list_splice_tail(&list, &chan->desc.chunks_free);
650 list_add_tail(&page->node, &chan->desc.pages);
651 spin_unlock_irqrestore(&chan->lock, flags);
652
653 return 0;
654}
655
656
657
658
659
660
661
662
663
664
665static struct rcar_dmac_xfer_chunk *
666rcar_dmac_xfer_chunk_get(struct rcar_dmac_chan *chan)
667{
668 struct rcar_dmac_xfer_chunk *chunk;
669 unsigned long flags;
670 int ret;
671
672 spin_lock_irqsave(&chan->lock, flags);
673
674 while (list_empty(&chan->desc.chunks_free)) {
675
676
677
678
679
680
681 spin_unlock_irqrestore(&chan->lock, flags);
682 ret = rcar_dmac_xfer_chunk_alloc(chan, GFP_NOWAIT);
683 if (ret < 0)
684 return NULL;
685 spin_lock_irqsave(&chan->lock, flags);
686 }
687
688 chunk = list_first_entry(&chan->desc.chunks_free,
689 struct rcar_dmac_xfer_chunk, node);
690 list_del(&chunk->node);
691
692 spin_unlock_irqrestore(&chan->lock, flags);
693
694 return chunk;
695}
696
697static void rcar_dmac_realloc_hwdesc(struct rcar_dmac_chan *chan,
698 struct rcar_dmac_desc *desc, size_t size)
699{
700
701
702
703
704
705
706 size = PAGE_ALIGN(size);
707
708 if (desc->hwdescs.size == size)
709 return;
710
711 if (desc->hwdescs.mem) {
712 dma_free_coherent(chan->chan.device->dev, desc->hwdescs.size,
713 desc->hwdescs.mem, desc->hwdescs.dma);
714 desc->hwdescs.mem = NULL;
715 desc->hwdescs.size = 0;
716 }
717
718 if (!size)
719 return;
720
721 desc->hwdescs.mem = dma_alloc_coherent(chan->chan.device->dev, size,
722 &desc->hwdescs.dma, GFP_NOWAIT);
723 if (!desc->hwdescs.mem)
724 return;
725
726 desc->hwdescs.size = size;
727}
728
729static int rcar_dmac_fill_hwdesc(struct rcar_dmac_chan *chan,
730 struct rcar_dmac_desc *desc)
731{
732 struct rcar_dmac_xfer_chunk *chunk;
733 struct rcar_dmac_hw_desc *hwdesc;
734
735 rcar_dmac_realloc_hwdesc(chan, desc, desc->nchunks * sizeof(*hwdesc));
736
737 hwdesc = desc->hwdescs.mem;
738 if (!hwdesc)
739 return -ENOMEM;
740
741 list_for_each_entry(chunk, &desc->chunks, node) {
742 hwdesc->sar = chunk->src_addr;
743 hwdesc->dar = chunk->dst_addr;
744 hwdesc->tcr = chunk->size >> desc->xfer_shift;
745 hwdesc++;
746 }
747
748 return 0;
749}
750
751
752
753
754static void rcar_dmac_chcr_de_barrier(struct rcar_dmac_chan *chan)
755{
756 u32 chcr;
757 unsigned int i;
758
759
760
761
762
763 for (i = 0; i < 1024; i++) {
764 chcr = rcar_dmac_chan_read(chan, RCAR_DMACHCR);
765 if (!(chcr & RCAR_DMACHCR_DE))
766 return;
767 udelay(1);
768 }
769
770 dev_err(chan->chan.device->dev, "CHCR DE check error\n");
771}
772
773static void rcar_dmac_clear_chcr_de(struct rcar_dmac_chan *chan)
774{
775 u32 chcr = rcar_dmac_chan_read(chan, RCAR_DMACHCR);
776
777
778 rcar_dmac_chan_write(chan, RCAR_DMACHCR, (chcr & ~RCAR_DMACHCR_DE));
779
780
781 rcar_dmac_chcr_de_barrier(chan);
782}
783
784static void rcar_dmac_chan_halt(struct rcar_dmac_chan *chan)
785{
786 u32 chcr = rcar_dmac_chan_read(chan, RCAR_DMACHCR);
787
788 chcr &= ~(RCAR_DMACHCR_DSE | RCAR_DMACHCR_DSIE | RCAR_DMACHCR_IE |
789 RCAR_DMACHCR_TE | RCAR_DMACHCR_DE |
790 RCAR_DMACHCR_CAE | RCAR_DMACHCR_CAIE);
791 rcar_dmac_chan_write(chan, RCAR_DMACHCR, chcr);
792 rcar_dmac_chcr_de_barrier(chan);
793}
794
795static void rcar_dmac_chan_reinit(struct rcar_dmac_chan *chan)
796{
797 struct rcar_dmac_desc *desc, *_desc;
798 unsigned long flags;
799 LIST_HEAD(descs);
800
801 spin_lock_irqsave(&chan->lock, flags);
802
803
804 list_splice_init(&chan->desc.pending, &descs);
805 list_splice_init(&chan->desc.active, &descs);
806 list_splice_init(&chan->desc.done, &descs);
807 list_splice_init(&chan->desc.wait, &descs);
808
809 chan->desc.running = NULL;
810
811 spin_unlock_irqrestore(&chan->lock, flags);
812
813 list_for_each_entry_safe(desc, _desc, &descs, node) {
814 list_del(&desc->node);
815 rcar_dmac_desc_put(chan, desc);
816 }
817}
818
819static void rcar_dmac_stop_all_chan(struct rcar_dmac *dmac)
820{
821 unsigned int i;
822
823
824 for (i = 0; i < dmac->n_channels; ++i) {
825 struct rcar_dmac_chan *chan = &dmac->channels[i];
826
827 if (!(dmac->channels_mask & BIT(i)))
828 continue;
829
830
831 spin_lock_irq(&chan->lock);
832 rcar_dmac_chan_halt(chan);
833 spin_unlock_irq(&chan->lock);
834 }
835}
836
837static int rcar_dmac_chan_pause(struct dma_chan *chan)
838{
839 unsigned long flags;
840 struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan);
841
842 spin_lock_irqsave(&rchan->lock, flags);
843 rcar_dmac_clear_chcr_de(rchan);
844 spin_unlock_irqrestore(&rchan->lock, flags);
845
846 return 0;
847}
848
849
850
851
852
853static void rcar_dmac_chan_configure_desc(struct rcar_dmac_chan *chan,
854 struct rcar_dmac_desc *desc)
855{
856 static const u32 chcr_ts[] = {
857 RCAR_DMACHCR_TS_1B, RCAR_DMACHCR_TS_2B,
858 RCAR_DMACHCR_TS_4B, RCAR_DMACHCR_TS_8B,
859 RCAR_DMACHCR_TS_16B, RCAR_DMACHCR_TS_32B,
860 RCAR_DMACHCR_TS_64B,
861 };
862
863 unsigned int xfer_size;
864 u32 chcr;
865
866 switch (desc->direction) {
867 case DMA_DEV_TO_MEM:
868 chcr = RCAR_DMACHCR_DM_INC | RCAR_DMACHCR_SM_FIXED
869 | RCAR_DMACHCR_RS_DMARS;
870 xfer_size = chan->src.xfer_size;
871 break;
872
873 case DMA_MEM_TO_DEV:
874 chcr = RCAR_DMACHCR_DM_FIXED | RCAR_DMACHCR_SM_INC
875 | RCAR_DMACHCR_RS_DMARS;
876 xfer_size = chan->dst.xfer_size;
877 break;
878
879 case DMA_MEM_TO_MEM:
880 default:
881 chcr = RCAR_DMACHCR_DM_INC | RCAR_DMACHCR_SM_INC
882 | RCAR_DMACHCR_RS_AUTO;
883 xfer_size = RCAR_DMAC_MEMCPY_XFER_SIZE;
884 break;
885 }
886
887 desc->xfer_shift = ilog2(xfer_size);
888 desc->chcr = chcr | chcr_ts[desc->xfer_shift];
889}
890
891
892
893
894
895
896
897
898
899
900
901static struct dma_async_tx_descriptor *
902rcar_dmac_chan_prep_sg(struct rcar_dmac_chan *chan, struct scatterlist *sgl,
903 unsigned int sg_len, dma_addr_t dev_addr,
904 enum dma_transfer_direction dir, unsigned long dma_flags,
905 bool cyclic)
906{
907 struct rcar_dmac_xfer_chunk *chunk;
908 struct rcar_dmac_desc *desc;
909 struct scatterlist *sg;
910 unsigned int nchunks = 0;
911 unsigned int max_chunk_size;
912 unsigned int full_size = 0;
913 bool cross_boundary = false;
914 unsigned int i;
915#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
916 u32 high_dev_addr;
917 u32 high_mem_addr;
918#endif
919
920 desc = rcar_dmac_desc_get(chan);
921 if (!desc)
922 return NULL;
923
924 desc->async_tx.flags = dma_flags;
925 desc->async_tx.cookie = -EBUSY;
926
927 desc->cyclic = cyclic;
928 desc->direction = dir;
929
930 rcar_dmac_chan_configure_desc(chan, desc);
931
932 max_chunk_size = RCAR_DMATCR_MASK << desc->xfer_shift;
933
934
935
936
937
938 for_each_sg(sgl, sg, sg_len, i) {
939 dma_addr_t mem_addr = sg_dma_address(sg);
940 unsigned int len = sg_dma_len(sg);
941
942 full_size += len;
943
944#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
945 if (i == 0) {
946 high_dev_addr = dev_addr >> 32;
947 high_mem_addr = mem_addr >> 32;
948 }
949
950 if ((dev_addr >> 32 != high_dev_addr) ||
951 (mem_addr >> 32 != high_mem_addr))
952 cross_boundary = true;
953#endif
954 while (len) {
955 unsigned int size = min(len, max_chunk_size);
956
957#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
958
959
960
961
962 if (dev_addr >> 32 != (dev_addr + size - 1) >> 32) {
963 size = ALIGN(dev_addr, 1ULL << 32) - dev_addr;
964 cross_boundary = true;
965 }
966 if (mem_addr >> 32 != (mem_addr + size - 1) >> 32) {
967 size = ALIGN(mem_addr, 1ULL << 32) - mem_addr;
968 cross_boundary = true;
969 }
970#endif
971
972 chunk = rcar_dmac_xfer_chunk_get(chan);
973 if (!chunk) {
974 rcar_dmac_desc_put(chan, desc);
975 return NULL;
976 }
977
978 if (dir == DMA_DEV_TO_MEM) {
979 chunk->src_addr = dev_addr;
980 chunk->dst_addr = mem_addr;
981 } else {
982 chunk->src_addr = mem_addr;
983 chunk->dst_addr = dev_addr;
984 }
985
986 chunk->size = size;
987
988 dev_dbg(chan->chan.device->dev,
989 "chan%u: chunk %p/%p sgl %u@%p, %u/%u %pad -> %pad\n",
990 chan->index, chunk, desc, i, sg, size, len,
991 &chunk->src_addr, &chunk->dst_addr);
992
993 mem_addr += size;
994 if (dir == DMA_MEM_TO_MEM)
995 dev_addr += size;
996
997 len -= size;
998
999 list_add_tail(&chunk->node, &desc->chunks);
1000 nchunks++;
1001 }
1002 }
1003
1004 desc->nchunks = nchunks;
1005 desc->size = full_size;
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015 desc->hwdescs.use = !cross_boundary && nchunks > 1;
1016 if (desc->hwdescs.use) {
1017 if (rcar_dmac_fill_hwdesc(chan, desc) < 0)
1018 desc->hwdescs.use = false;
1019 }
1020
1021 return &desc->async_tx;
1022}
1023
1024
1025
1026
1027
1028static int rcar_dmac_alloc_chan_resources(struct dma_chan *chan)
1029{
1030 struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan);
1031 int ret;
1032
1033 INIT_LIST_HEAD(&rchan->desc.chunks_free);
1034 INIT_LIST_HEAD(&rchan->desc.pages);
1035
1036
1037 ret = rcar_dmac_xfer_chunk_alloc(rchan, GFP_KERNEL);
1038 if (ret < 0)
1039 return -ENOMEM;
1040
1041 ret = rcar_dmac_desc_alloc(rchan, GFP_KERNEL);
1042 if (ret < 0)
1043 return -ENOMEM;
1044
1045 return pm_runtime_get_sync(chan->device->dev);
1046}
1047
1048static void rcar_dmac_free_chan_resources(struct dma_chan *chan)
1049{
1050 struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan);
1051 struct rcar_dmac *dmac = to_rcar_dmac(chan->device);
1052 struct rcar_dmac_chan_map *map = &rchan->map;
1053 struct rcar_dmac_desc_page *page, *_page;
1054 struct rcar_dmac_desc *desc;
1055 LIST_HEAD(list);
1056
1057
1058 spin_lock_irq(&rchan->lock);
1059 rcar_dmac_chan_halt(rchan);
1060 spin_unlock_irq(&rchan->lock);
1061
1062
1063
1064
1065
1066 synchronize_irq(rchan->irq);
1067
1068 if (rchan->mid_rid >= 0) {
1069
1070 clear_bit(rchan->mid_rid, dmac->modules);
1071 rchan->mid_rid = -EINVAL;
1072 }
1073
1074 list_splice_init(&rchan->desc.free, &list);
1075 list_splice_init(&rchan->desc.pending, &list);
1076 list_splice_init(&rchan->desc.active, &list);
1077 list_splice_init(&rchan->desc.done, &list);
1078 list_splice_init(&rchan->desc.wait, &list);
1079
1080 rchan->desc.running = NULL;
1081
1082 list_for_each_entry(desc, &list, node)
1083 rcar_dmac_realloc_hwdesc(rchan, desc, 0);
1084
1085 list_for_each_entry_safe(page, _page, &rchan->desc.pages, node) {
1086 list_del(&page->node);
1087 free_page((unsigned long)page);
1088 }
1089
1090
1091 if (map->slave.xfer_size) {
1092 dma_unmap_resource(chan->device->dev, map->addr,
1093 map->slave.xfer_size, map->dir, 0);
1094 map->slave.xfer_size = 0;
1095 }
1096
1097 pm_runtime_put(chan->device->dev);
1098}
1099
1100static struct dma_async_tx_descriptor *
1101rcar_dmac_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dma_dest,
1102 dma_addr_t dma_src, size_t len, unsigned long flags)
1103{
1104 struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan);
1105 struct scatterlist sgl;
1106
1107 if (!len)
1108 return NULL;
1109
1110 sg_init_table(&sgl, 1);
1111 sg_set_page(&sgl, pfn_to_page(PFN_DOWN(dma_src)), len,
1112 offset_in_page(dma_src));
1113 sg_dma_address(&sgl) = dma_src;
1114 sg_dma_len(&sgl) = len;
1115
1116 return rcar_dmac_chan_prep_sg(rchan, &sgl, 1, dma_dest,
1117 DMA_MEM_TO_MEM, flags, false);
1118}
1119
1120static int rcar_dmac_map_slave_addr(struct dma_chan *chan,
1121 enum dma_transfer_direction dir)
1122{
1123 struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan);
1124 struct rcar_dmac_chan_map *map = &rchan->map;
1125 phys_addr_t dev_addr;
1126 size_t dev_size;
1127 enum dma_data_direction dev_dir;
1128
1129 if (dir == DMA_DEV_TO_MEM) {
1130 dev_addr = rchan->src.slave_addr;
1131 dev_size = rchan->src.xfer_size;
1132 dev_dir = DMA_TO_DEVICE;
1133 } else {
1134 dev_addr = rchan->dst.slave_addr;
1135 dev_size = rchan->dst.xfer_size;
1136 dev_dir = DMA_FROM_DEVICE;
1137 }
1138
1139
1140 if (dev_addr == map->slave.slave_addr &&
1141 dev_size == map->slave.xfer_size &&
1142 dev_dir == map->dir)
1143 return 0;
1144
1145
1146 if (map->slave.xfer_size)
1147 dma_unmap_resource(chan->device->dev, map->addr,
1148 map->slave.xfer_size, map->dir, 0);
1149 map->slave.xfer_size = 0;
1150
1151
1152 map->addr = dma_map_resource(chan->device->dev, dev_addr, dev_size,
1153 dev_dir, 0);
1154
1155 if (dma_mapping_error(chan->device->dev, map->addr)) {
1156 dev_err(chan->device->dev,
1157 "chan%u: failed to map %zx@%pap", rchan->index,
1158 dev_size, &dev_addr);
1159 return -EIO;
1160 }
1161
1162 dev_dbg(chan->device->dev, "chan%u: map %zx@%pap to %pad dir: %s\n",
1163 rchan->index, dev_size, &dev_addr, &map->addr,
1164 dev_dir == DMA_TO_DEVICE ? "DMA_TO_DEVICE" : "DMA_FROM_DEVICE");
1165
1166 map->slave.slave_addr = dev_addr;
1167 map->slave.xfer_size = dev_size;
1168 map->dir = dev_dir;
1169
1170 return 0;
1171}
1172
1173static struct dma_async_tx_descriptor *
1174rcar_dmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
1175 unsigned int sg_len, enum dma_transfer_direction dir,
1176 unsigned long flags, void *context)
1177{
1178 struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan);
1179
1180
1181 if (rchan->mid_rid < 0 || !sg_len || !sg_dma_len(sgl)) {
1182 dev_warn(chan->device->dev,
1183 "%s: bad parameter: len=%d, id=%d\n",
1184 __func__, sg_len, rchan->mid_rid);
1185 return NULL;
1186 }
1187
1188 if (rcar_dmac_map_slave_addr(chan, dir))
1189 return NULL;
1190
1191 return rcar_dmac_chan_prep_sg(rchan, sgl, sg_len, rchan->map.addr,
1192 dir, flags, false);
1193}
1194
1195#define RCAR_DMAC_MAX_SG_LEN 32
1196
1197static struct dma_async_tx_descriptor *
1198rcar_dmac_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr,
1199 size_t buf_len, size_t period_len,
1200 enum dma_transfer_direction dir, unsigned long flags)
1201{
1202 struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan);
1203 struct dma_async_tx_descriptor *desc;
1204 struct scatterlist *sgl;
1205 unsigned int sg_len;
1206 unsigned int i;
1207
1208
1209 if (rchan->mid_rid < 0 || buf_len < period_len) {
1210 dev_warn(chan->device->dev,
1211 "%s: bad parameter: buf_len=%zu, period_len=%zu, id=%d\n",
1212 __func__, buf_len, period_len, rchan->mid_rid);
1213 return NULL;
1214 }
1215
1216 if (rcar_dmac_map_slave_addr(chan, dir))
1217 return NULL;
1218
1219 sg_len = buf_len / period_len;
1220 if (sg_len > RCAR_DMAC_MAX_SG_LEN) {
1221 dev_err(chan->device->dev,
1222 "chan%u: sg length %d exceeds limit %d",
1223 rchan->index, sg_len, RCAR_DMAC_MAX_SG_LEN);
1224 return NULL;
1225 }
1226
1227
1228
1229
1230
1231 sgl = kcalloc(sg_len, sizeof(*sgl), GFP_NOWAIT);
1232 if (!sgl)
1233 return NULL;
1234
1235 sg_init_table(sgl, sg_len);
1236
1237 for (i = 0; i < sg_len; ++i) {
1238 dma_addr_t src = buf_addr + (period_len * i);
1239
1240 sg_set_page(&sgl[i], pfn_to_page(PFN_DOWN(src)), period_len,
1241 offset_in_page(src));
1242 sg_dma_address(&sgl[i]) = src;
1243 sg_dma_len(&sgl[i]) = period_len;
1244 }
1245
1246 desc = rcar_dmac_chan_prep_sg(rchan, sgl, sg_len, rchan->map.addr,
1247 dir, flags, true);
1248
1249 kfree(sgl);
1250 return desc;
1251}
1252
1253static int rcar_dmac_device_config(struct dma_chan *chan,
1254 struct dma_slave_config *cfg)
1255{
1256 struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan);
1257
1258
1259
1260
1261
1262 rchan->src.slave_addr = cfg->src_addr;
1263 rchan->dst.slave_addr = cfg->dst_addr;
1264 rchan->src.xfer_size = cfg->src_addr_width;
1265 rchan->dst.xfer_size = cfg->dst_addr_width;
1266
1267 return 0;
1268}
1269
1270static int rcar_dmac_chan_terminate_all(struct dma_chan *chan)
1271{
1272 struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan);
1273 unsigned long flags;
1274
1275 spin_lock_irqsave(&rchan->lock, flags);
1276 rcar_dmac_chan_halt(rchan);
1277 spin_unlock_irqrestore(&rchan->lock, flags);
1278
1279
1280
1281
1282
1283
1284 rcar_dmac_chan_reinit(rchan);
1285
1286 return 0;
1287}
1288
1289static unsigned int rcar_dmac_chan_get_residue(struct rcar_dmac_chan *chan,
1290 dma_cookie_t cookie)
1291{
1292 struct rcar_dmac_desc *desc = chan->desc.running;
1293 struct rcar_dmac_xfer_chunk *running = NULL;
1294 struct rcar_dmac_xfer_chunk *chunk;
1295 enum dma_status status;
1296 unsigned int residue = 0;
1297 unsigned int dptr = 0;
1298 unsigned int chcrb;
1299 unsigned int tcrb;
1300 unsigned int i;
1301
1302 if (!desc)
1303 return 0;
1304
1305
1306
1307
1308
1309
1310
1311 status = dma_cookie_status(&chan->chan, cookie, NULL);
1312 if (status == DMA_COMPLETE)
1313 return 0;
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325 if (cookie != desc->async_tx.cookie) {
1326 list_for_each_entry(desc, &chan->desc.done, node) {
1327 if (cookie == desc->async_tx.cookie)
1328 return 0;
1329 }
1330 list_for_each_entry(desc, &chan->desc.pending, node) {
1331 if (cookie == desc->async_tx.cookie)
1332 return desc->size;
1333 }
1334 list_for_each_entry(desc, &chan->desc.active, node) {
1335 if (cookie == desc->async_tx.cookie)
1336 return desc->size;
1337 }
1338
1339
1340
1341
1342
1343
1344 WARN(1, "No descriptor for cookie!");
1345 return 0;
1346 }
1347
1348
1349
1350
1351
1352
1353
1354
1355 for (i = 0; i < 3; i++) {
1356 chcrb = rcar_dmac_chan_read(chan, RCAR_DMACHCRB) &
1357 RCAR_DMACHCRB_DPTR_MASK;
1358 tcrb = rcar_dmac_chan_read(chan, RCAR_DMATCRB);
1359
1360 if (chcrb == (rcar_dmac_chan_read(chan, RCAR_DMACHCRB) &
1361 RCAR_DMACHCRB_DPTR_MASK))
1362 break;
1363 }
1364 WARN_ONCE(i >= 3, "residue might be not continuous!");
1365
1366
1367
1368
1369
1370
1371
1372 if (desc->hwdescs.use) {
1373 dptr = chcrb >> RCAR_DMACHCRB_DPTR_SHIFT;
1374 if (dptr == 0)
1375 dptr = desc->nchunks;
1376 dptr--;
1377 WARN_ON(dptr >= desc->nchunks);
1378 } else {
1379 running = desc->running;
1380 }
1381
1382
1383 list_for_each_entry_reverse(chunk, &desc->chunks, node) {
1384 if (chunk == running || ++dptr == desc->nchunks)
1385 break;
1386
1387 residue += chunk->size;
1388 }
1389
1390
1391 residue += tcrb << desc->xfer_shift;
1392
1393 return residue;
1394}
1395
1396static enum dma_status rcar_dmac_tx_status(struct dma_chan *chan,
1397 dma_cookie_t cookie,
1398 struct dma_tx_state *txstate)
1399{
1400 struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan);
1401 enum dma_status status;
1402 unsigned long flags;
1403 unsigned int residue;
1404 bool cyclic;
1405
1406 status = dma_cookie_status(chan, cookie, txstate);
1407 if (status == DMA_COMPLETE || !txstate)
1408 return status;
1409
1410 spin_lock_irqsave(&rchan->lock, flags);
1411 residue = rcar_dmac_chan_get_residue(rchan, cookie);
1412 cyclic = rchan->desc.running ? rchan->desc.running->cyclic : false;
1413 spin_unlock_irqrestore(&rchan->lock, flags);
1414
1415
1416 if (!residue && !cyclic)
1417 return DMA_COMPLETE;
1418
1419 dma_set_residue(txstate, residue);
1420
1421 return status;
1422}
1423
1424static void rcar_dmac_issue_pending(struct dma_chan *chan)
1425{
1426 struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan);
1427 unsigned long flags;
1428
1429 spin_lock_irqsave(&rchan->lock, flags);
1430
1431 if (list_empty(&rchan->desc.pending))
1432 goto done;
1433
1434
1435 list_splice_tail_init(&rchan->desc.pending, &rchan->desc.active);
1436
1437
1438
1439
1440
1441 if (!rchan->desc.running) {
1442 struct rcar_dmac_desc *desc;
1443
1444 desc = list_first_entry(&rchan->desc.active,
1445 struct rcar_dmac_desc, node);
1446 rchan->desc.running = desc;
1447
1448 rcar_dmac_chan_start_xfer(rchan);
1449 }
1450
1451done:
1452 spin_unlock_irqrestore(&rchan->lock, flags);
1453}
1454
1455static void rcar_dmac_device_synchronize(struct dma_chan *chan)
1456{
1457 struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan);
1458
1459 synchronize_irq(rchan->irq);
1460}
1461
1462
1463
1464
1465
1466static irqreturn_t rcar_dmac_isr_desc_stage_end(struct rcar_dmac_chan *chan)
1467{
1468 struct rcar_dmac_desc *desc = chan->desc.running;
1469 unsigned int stage;
1470
1471 if (WARN_ON(!desc || !desc->cyclic)) {
1472
1473
1474
1475
1476
1477 return IRQ_NONE;
1478 }
1479
1480
1481 stage = (rcar_dmac_chan_read(chan, RCAR_DMACHCRB) &
1482 RCAR_DMACHCRB_DPTR_MASK) >> RCAR_DMACHCRB_DPTR_SHIFT;
1483 rcar_dmac_chan_write(chan, RCAR_DMADPCR, RCAR_DMADPCR_DIPT(stage));
1484
1485 return IRQ_WAKE_THREAD;
1486}
1487
1488static irqreturn_t rcar_dmac_isr_transfer_end(struct rcar_dmac_chan *chan)
1489{
1490 struct rcar_dmac_desc *desc = chan->desc.running;
1491 irqreturn_t ret = IRQ_WAKE_THREAD;
1492
1493 if (WARN_ON_ONCE(!desc)) {
1494
1495
1496
1497
1498
1499 return IRQ_NONE;
1500 }
1501
1502
1503
1504
1505
1506
1507 if (!desc->hwdescs.use) {
1508
1509
1510
1511
1512
1513 if (!list_is_last(&desc->running->node, &desc->chunks)) {
1514 desc->running = list_next_entry(desc->running, node);
1515 if (!desc->cyclic)
1516 ret = IRQ_HANDLED;
1517 goto done;
1518 }
1519
1520
1521
1522
1523
1524 if (desc->cyclic) {
1525 desc->running =
1526 list_first_entry(&desc->chunks,
1527 struct rcar_dmac_xfer_chunk,
1528 node);
1529 goto done;
1530 }
1531 }
1532
1533
1534 list_move_tail(&desc->node, &chan->desc.done);
1535
1536
1537 if (!list_empty(&chan->desc.active))
1538 chan->desc.running = list_first_entry(&chan->desc.active,
1539 struct rcar_dmac_desc,
1540 node);
1541 else
1542 chan->desc.running = NULL;
1543
1544done:
1545 if (chan->desc.running)
1546 rcar_dmac_chan_start_xfer(chan);
1547
1548 return ret;
1549}
1550
1551static irqreturn_t rcar_dmac_isr_channel(int irq, void *dev)
1552{
1553 u32 mask = RCAR_DMACHCR_DSE | RCAR_DMACHCR_TE;
1554 struct rcar_dmac_chan *chan = dev;
1555 irqreturn_t ret = IRQ_NONE;
1556 bool reinit = false;
1557 u32 chcr;
1558
1559 spin_lock(&chan->lock);
1560
1561 chcr = rcar_dmac_chan_read(chan, RCAR_DMACHCR);
1562 if (chcr & RCAR_DMACHCR_CAE) {
1563 struct rcar_dmac *dmac = to_rcar_dmac(chan->chan.device);
1564
1565
1566
1567
1568
1569
1570 rcar_dmac_write(dmac, RCAR_DMACHCLR, 1 << chan->index);
1571 rcar_dmac_chcr_de_barrier(chan);
1572 reinit = true;
1573 goto spin_lock_end;
1574 }
1575
1576 if (chcr & RCAR_DMACHCR_TE)
1577 mask |= RCAR_DMACHCR_DE;
1578 rcar_dmac_chan_write(chan, RCAR_DMACHCR, chcr & ~mask);
1579 if (mask & RCAR_DMACHCR_DE)
1580 rcar_dmac_chcr_de_barrier(chan);
1581
1582 if (chcr & RCAR_DMACHCR_DSE)
1583 ret |= rcar_dmac_isr_desc_stage_end(chan);
1584
1585 if (chcr & RCAR_DMACHCR_TE)
1586 ret |= rcar_dmac_isr_transfer_end(chan);
1587
1588spin_lock_end:
1589 spin_unlock(&chan->lock);
1590
1591 if (reinit) {
1592 dev_err(chan->chan.device->dev, "Channel Address Error\n");
1593
1594 rcar_dmac_chan_reinit(chan);
1595 ret = IRQ_HANDLED;
1596 }
1597
1598 return ret;
1599}
1600
1601static irqreturn_t rcar_dmac_isr_channel_thread(int irq, void *dev)
1602{
1603 struct rcar_dmac_chan *chan = dev;
1604 struct rcar_dmac_desc *desc;
1605 struct dmaengine_desc_callback cb;
1606
1607 spin_lock_irq(&chan->lock);
1608
1609
1610 if (chan->desc.running && chan->desc.running->cyclic) {
1611 desc = chan->desc.running;
1612 dmaengine_desc_get_callback(&desc->async_tx, &cb);
1613
1614 if (dmaengine_desc_callback_valid(&cb)) {
1615 spin_unlock_irq(&chan->lock);
1616 dmaengine_desc_callback_invoke(&cb, NULL);
1617 spin_lock_irq(&chan->lock);
1618 }
1619 }
1620
1621
1622
1623
1624
1625 while (!list_empty(&chan->desc.done)) {
1626 desc = list_first_entry(&chan->desc.done, struct rcar_dmac_desc,
1627 node);
1628 dma_cookie_complete(&desc->async_tx);
1629 list_del(&desc->node);
1630
1631 dmaengine_desc_get_callback(&desc->async_tx, &cb);
1632 if (dmaengine_desc_callback_valid(&cb)) {
1633 spin_unlock_irq(&chan->lock);
1634
1635
1636
1637
1638
1639 dmaengine_desc_callback_invoke(&cb, NULL);
1640 spin_lock_irq(&chan->lock);
1641 }
1642
1643 list_add_tail(&desc->node, &chan->desc.wait);
1644 }
1645
1646 spin_unlock_irq(&chan->lock);
1647
1648
1649 rcar_dmac_desc_recycle_acked(chan);
1650
1651 return IRQ_HANDLED;
1652}
1653
1654
1655
1656
1657
1658static bool rcar_dmac_chan_filter(struct dma_chan *chan, void *arg)
1659{
1660 struct rcar_dmac *dmac = to_rcar_dmac(chan->device);
1661 struct of_phandle_args *dma_spec = arg;
1662
1663
1664
1665
1666
1667
1668
1669
1670 if (chan->device->device_config != rcar_dmac_device_config)
1671 return false;
1672
1673 return !test_and_set_bit(dma_spec->args[0], dmac->modules);
1674}
1675
1676static struct dma_chan *rcar_dmac_of_xlate(struct of_phandle_args *dma_spec,
1677 struct of_dma *ofdma)
1678{
1679 struct rcar_dmac_chan *rchan;
1680 struct dma_chan *chan;
1681 dma_cap_mask_t mask;
1682
1683 if (dma_spec->args_count != 1)
1684 return NULL;
1685
1686
1687 dma_cap_zero(mask);
1688 dma_cap_set(DMA_SLAVE, mask);
1689
1690 chan = __dma_request_channel(&mask, rcar_dmac_chan_filter, dma_spec,
1691 ofdma->of_node);
1692 if (!chan)
1693 return NULL;
1694
1695 rchan = to_rcar_dmac_chan(chan);
1696 rchan->mid_rid = dma_spec->args[0];
1697
1698 return chan;
1699}
1700
1701
1702
1703
1704
1705#ifdef CONFIG_PM
1706static int rcar_dmac_runtime_suspend(struct device *dev)
1707{
1708 return 0;
1709}
1710
1711static int rcar_dmac_runtime_resume(struct device *dev)
1712{
1713 struct rcar_dmac *dmac = dev_get_drvdata(dev);
1714
1715 return rcar_dmac_init(dmac);
1716}
1717#endif
1718
1719static const struct dev_pm_ops rcar_dmac_pm = {
1720
1721
1722
1723
1724
1725 SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
1726 pm_runtime_force_resume)
1727 SET_RUNTIME_PM_OPS(rcar_dmac_runtime_suspend, rcar_dmac_runtime_resume,
1728 NULL)
1729};
1730
1731
1732
1733
1734
1735static int rcar_dmac_chan_probe(struct rcar_dmac *dmac,
1736 struct rcar_dmac_chan *rchan,
1737 const struct rcar_dmac_of_data *data,
1738 unsigned int index)
1739{
1740 struct platform_device *pdev = to_platform_device(dmac->dev);
1741 struct dma_chan *chan = &rchan->chan;
1742 char pdev_irqname[5];
1743 char *irqname;
1744 int ret;
1745
1746 rchan->index = index;
1747 rchan->iomem = dmac->iomem + data->chan_offset_base +
1748 data->chan_offset_stride * index;
1749 rchan->mid_rid = -EINVAL;
1750
1751 spin_lock_init(&rchan->lock);
1752
1753 INIT_LIST_HEAD(&rchan->desc.free);
1754 INIT_LIST_HEAD(&rchan->desc.pending);
1755 INIT_LIST_HEAD(&rchan->desc.active);
1756 INIT_LIST_HEAD(&rchan->desc.done);
1757 INIT_LIST_HEAD(&rchan->desc.wait);
1758
1759
1760 sprintf(pdev_irqname, "ch%u", index);
1761 rchan->irq = platform_get_irq_byname(pdev, pdev_irqname);
1762 if (rchan->irq < 0)
1763 return -ENODEV;
1764
1765 irqname = devm_kasprintf(dmac->dev, GFP_KERNEL, "%s:%u",
1766 dev_name(dmac->dev), index);
1767 if (!irqname)
1768 return -ENOMEM;
1769
1770
1771
1772
1773
1774 chan->device = &dmac->engine;
1775 dma_cookie_init(chan);
1776
1777 list_add_tail(&chan->device_node, &dmac->engine.channels);
1778
1779 ret = devm_request_threaded_irq(dmac->dev, rchan->irq,
1780 rcar_dmac_isr_channel,
1781 rcar_dmac_isr_channel_thread, 0,
1782 irqname, rchan);
1783 if (ret) {
1784 dev_err(dmac->dev, "failed to request IRQ %u (%d)\n",
1785 rchan->irq, ret);
1786 return ret;
1787 }
1788
1789 return 0;
1790}
1791
1792#define RCAR_DMAC_MAX_CHANNELS 32
1793
1794static int rcar_dmac_parse_of(struct device *dev, struct rcar_dmac *dmac)
1795{
1796 struct device_node *np = dev->of_node;
1797 int ret;
1798
1799 ret = of_property_read_u32(np, "dma-channels", &dmac->n_channels);
1800 if (ret < 0) {
1801 dev_err(dev, "unable to read dma-channels property\n");
1802 return ret;
1803 }
1804
1805
1806 if (dmac->n_channels <= 0 ||
1807 dmac->n_channels >= RCAR_DMAC_MAX_CHANNELS) {
1808 dev_err(dev, "invalid number of channels %u\n",
1809 dmac->n_channels);
1810 return -EINVAL;
1811 }
1812
1813
1814
1815
1816
1817 dmac->channels_mask = GENMASK(dmac->n_channels - 1, 0);
1818 of_property_read_u32(np, "dma-channel-mask", &dmac->channels_mask);
1819
1820
1821 dmac->channels_mask &= GENMASK(dmac->n_channels - 1, 0);
1822
1823 return 0;
1824}
1825
1826static int rcar_dmac_probe(struct platform_device *pdev)
1827{
1828 const enum dma_slave_buswidth widths = DMA_SLAVE_BUSWIDTH_1_BYTE |
1829 DMA_SLAVE_BUSWIDTH_2_BYTES | DMA_SLAVE_BUSWIDTH_4_BYTES |
1830 DMA_SLAVE_BUSWIDTH_8_BYTES | DMA_SLAVE_BUSWIDTH_16_BYTES |
1831 DMA_SLAVE_BUSWIDTH_32_BYTES | DMA_SLAVE_BUSWIDTH_64_BYTES;
1832 struct dma_device *engine;
1833 struct rcar_dmac *dmac;
1834 const struct rcar_dmac_of_data *data;
1835 unsigned int i;
1836 int ret;
1837
1838 data = of_device_get_match_data(&pdev->dev);
1839 if (!data)
1840 return -EINVAL;
1841
1842 dmac = devm_kzalloc(&pdev->dev, sizeof(*dmac), GFP_KERNEL);
1843 if (!dmac)
1844 return -ENOMEM;
1845
1846 dmac->dev = &pdev->dev;
1847 platform_set_drvdata(pdev, dmac);
1848 dmac->dev->dma_parms = &dmac->parms;
1849 dma_set_max_seg_size(dmac->dev, RCAR_DMATCR_MASK);
1850 dma_set_mask_and_coherent(dmac->dev, DMA_BIT_MASK(40));
1851
1852 ret = rcar_dmac_parse_of(&pdev->dev, dmac);
1853 if (ret < 0)
1854 return ret;
1855
1856
1857
1858
1859
1860
1861
1862
1863
1864 if (device_iommu_mapped(&pdev->dev))
1865 dmac->channels_mask &= ~BIT(0);
1866
1867 dmac->channels = devm_kcalloc(&pdev->dev, dmac->n_channels,
1868 sizeof(*dmac->channels), GFP_KERNEL);
1869 if (!dmac->channels)
1870 return -ENOMEM;
1871
1872
1873 dmac->iomem = devm_platform_ioremap_resource(pdev, 0);
1874 if (IS_ERR(dmac->iomem))
1875 return PTR_ERR(dmac->iomem);
1876
1877
1878 pm_runtime_enable(&pdev->dev);
1879 ret = pm_runtime_get_sync(&pdev->dev);
1880 if (ret < 0) {
1881 dev_err(&pdev->dev, "runtime PM get sync failed (%d)\n", ret);
1882 return ret;
1883 }
1884
1885 ret = rcar_dmac_init(dmac);
1886 pm_runtime_put(&pdev->dev);
1887
1888 if (ret) {
1889 dev_err(&pdev->dev, "failed to reset device\n");
1890 goto error;
1891 }
1892
1893
1894 engine = &dmac->engine;
1895
1896 dma_cap_set(DMA_MEMCPY, engine->cap_mask);
1897 dma_cap_set(DMA_SLAVE, engine->cap_mask);
1898
1899 engine->dev = &pdev->dev;
1900 engine->copy_align = ilog2(RCAR_DMAC_MEMCPY_XFER_SIZE);
1901
1902 engine->src_addr_widths = widths;
1903 engine->dst_addr_widths = widths;
1904 engine->directions = BIT(DMA_MEM_TO_DEV) | BIT(DMA_DEV_TO_MEM);
1905 engine->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
1906
1907 engine->device_alloc_chan_resources = rcar_dmac_alloc_chan_resources;
1908 engine->device_free_chan_resources = rcar_dmac_free_chan_resources;
1909 engine->device_prep_dma_memcpy = rcar_dmac_prep_dma_memcpy;
1910 engine->device_prep_slave_sg = rcar_dmac_prep_slave_sg;
1911 engine->device_prep_dma_cyclic = rcar_dmac_prep_dma_cyclic;
1912 engine->device_config = rcar_dmac_device_config;
1913 engine->device_pause = rcar_dmac_chan_pause;
1914 engine->device_terminate_all = rcar_dmac_chan_terminate_all;
1915 engine->device_tx_status = rcar_dmac_tx_status;
1916 engine->device_issue_pending = rcar_dmac_issue_pending;
1917 engine->device_synchronize = rcar_dmac_device_synchronize;
1918
1919 INIT_LIST_HEAD(&engine->channels);
1920
1921 for (i = 0; i < dmac->n_channels; ++i) {
1922 if (!(dmac->channels_mask & BIT(i)))
1923 continue;
1924
1925 ret = rcar_dmac_chan_probe(dmac, &dmac->channels[i], data, i);
1926 if (ret < 0)
1927 goto error;
1928 }
1929
1930
1931 ret = of_dma_controller_register(pdev->dev.of_node, rcar_dmac_of_xlate,
1932 NULL);
1933 if (ret < 0)
1934 goto error;
1935
1936
1937
1938
1939
1940
1941 ret = dma_async_device_register(engine);
1942 if (ret < 0)
1943 goto error;
1944
1945 return 0;
1946
1947error:
1948 of_dma_controller_free(pdev->dev.of_node);
1949 pm_runtime_disable(&pdev->dev);
1950 return ret;
1951}
1952
1953static int rcar_dmac_remove(struct platform_device *pdev)
1954{
1955 struct rcar_dmac *dmac = platform_get_drvdata(pdev);
1956
1957 of_dma_controller_free(pdev->dev.of_node);
1958 dma_async_device_unregister(&dmac->engine);
1959
1960 pm_runtime_disable(&pdev->dev);
1961
1962 return 0;
1963}
1964
1965static void rcar_dmac_shutdown(struct platform_device *pdev)
1966{
1967 struct rcar_dmac *dmac = platform_get_drvdata(pdev);
1968
1969 rcar_dmac_stop_all_chan(dmac);
1970}
1971
1972static const struct rcar_dmac_of_data rcar_dmac_data = {
1973 .chan_offset_base = 0x8000,
1974 .chan_offset_stride = 0x80,
1975};
1976
1977static const struct of_device_id rcar_dmac_of_ids[] = {
1978 {
1979 .compatible = "renesas,rcar-dmac",
1980 .data = &rcar_dmac_data,
1981 },
1982 { }
1983};
1984MODULE_DEVICE_TABLE(of, rcar_dmac_of_ids);
1985
1986static struct platform_driver rcar_dmac_driver = {
1987 .driver = {
1988 .pm = &rcar_dmac_pm,
1989 .name = "rcar-dmac",
1990 .of_match_table = rcar_dmac_of_ids,
1991 },
1992 .probe = rcar_dmac_probe,
1993 .remove = rcar_dmac_remove,
1994 .shutdown = rcar_dmac_shutdown,
1995};
1996
1997module_platform_driver(rcar_dmac_driver);
1998
1999MODULE_DESCRIPTION("R-Car Gen2 DMA Controller Driver");
2000MODULE_AUTHOR("Laurent Pinchart <laurent.pinchart@ideasonboard.com>");
2001MODULE_LICENSE("GPL v2");
2002