1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21#include <linux/clk.h>
22#include <linux/init.h>
23#include <linux/interrupt.h>
24#include <linux/dmaengine.h>
25#include <linux/module.h>
26#include <linux/platform_device.h>
27#include <linux/slab.h>
28
29#include <linux/platform_data/dma-ep93xx.h>
30
31#include "dmaengine.h"
32
33
34#define M2P_CONTROL 0x0000
35#define M2P_CONTROL_STALLINT BIT(0)
36#define M2P_CONTROL_NFBINT BIT(1)
37#define M2P_CONTROL_CH_ERROR_INT BIT(3)
38#define M2P_CONTROL_ENABLE BIT(4)
39#define M2P_CONTROL_ICE BIT(6)
40
41#define M2P_INTERRUPT 0x0004
42#define M2P_INTERRUPT_STALL BIT(0)
43#define M2P_INTERRUPT_NFB BIT(1)
44#define M2P_INTERRUPT_ERROR BIT(3)
45
46#define M2P_PPALLOC 0x0008
47#define M2P_STATUS 0x000c
48
49#define M2P_MAXCNT0 0x0020
50#define M2P_BASE0 0x0024
51#define M2P_MAXCNT1 0x0030
52#define M2P_BASE1 0x0034
53
54#define M2P_STATE_IDLE 0
55#define M2P_STATE_STALL 1
56#define M2P_STATE_ON 2
57#define M2P_STATE_NEXT 3
58
59
60#define M2M_CONTROL 0x0000
61#define M2M_CONTROL_DONEINT BIT(2)
62#define M2M_CONTROL_ENABLE BIT(3)
63#define M2M_CONTROL_START BIT(4)
64#define M2M_CONTROL_DAH BIT(11)
65#define M2M_CONTROL_SAH BIT(12)
66#define M2M_CONTROL_PW_SHIFT 9
67#define M2M_CONTROL_PW_8 (0 << M2M_CONTROL_PW_SHIFT)
68#define M2M_CONTROL_PW_16 (1 << M2M_CONTROL_PW_SHIFT)
69#define M2M_CONTROL_PW_32 (2 << M2M_CONTROL_PW_SHIFT)
70#define M2M_CONTROL_PW_MASK (3 << M2M_CONTROL_PW_SHIFT)
71#define M2M_CONTROL_TM_SHIFT 13
72#define M2M_CONTROL_TM_TX (1 << M2M_CONTROL_TM_SHIFT)
73#define M2M_CONTROL_TM_RX (2 << M2M_CONTROL_TM_SHIFT)
74#define M2M_CONTROL_NFBINT BIT(21)
75#define M2M_CONTROL_RSS_SHIFT 22
76#define M2M_CONTROL_RSS_SSPRX (1 << M2M_CONTROL_RSS_SHIFT)
77#define M2M_CONTROL_RSS_SSPTX (2 << M2M_CONTROL_RSS_SHIFT)
78#define M2M_CONTROL_RSS_IDE (3 << M2M_CONTROL_RSS_SHIFT)
79#define M2M_CONTROL_NO_HDSK BIT(24)
80#define M2M_CONTROL_PWSC_SHIFT 25
81
82#define M2M_INTERRUPT 0x0004
83#define M2M_INTERRUPT_MASK 6
84
85#define M2M_STATUS 0x000c
86#define M2M_STATUS_CTL_SHIFT 1
87#define M2M_STATUS_CTL_IDLE (0 << M2M_STATUS_CTL_SHIFT)
88#define M2M_STATUS_CTL_STALL (1 << M2M_STATUS_CTL_SHIFT)
89#define M2M_STATUS_CTL_MEMRD (2 << M2M_STATUS_CTL_SHIFT)
90#define M2M_STATUS_CTL_MEMWR (3 << M2M_STATUS_CTL_SHIFT)
91#define M2M_STATUS_CTL_BWCWAIT (4 << M2M_STATUS_CTL_SHIFT)
92#define M2M_STATUS_CTL_MASK (7 << M2M_STATUS_CTL_SHIFT)
93#define M2M_STATUS_BUF_SHIFT 4
94#define M2M_STATUS_BUF_NO (0 << M2M_STATUS_BUF_SHIFT)
95#define M2M_STATUS_BUF_ON (1 << M2M_STATUS_BUF_SHIFT)
96#define M2M_STATUS_BUF_NEXT (2 << M2M_STATUS_BUF_SHIFT)
97#define M2M_STATUS_BUF_MASK (3 << M2M_STATUS_BUF_SHIFT)
98#define M2M_STATUS_DONE BIT(6)
99
100#define M2M_BCR0 0x0010
101#define M2M_BCR1 0x0014
102#define M2M_SAR_BASE0 0x0018
103#define M2M_SAR_BASE1 0x001c
104#define M2M_DAR_BASE0 0x002c
105#define M2M_DAR_BASE1 0x0030
106
107#define DMA_MAX_CHAN_BYTES 0xffff
108#define DMA_MAX_CHAN_DESCRIPTORS 32
109
110struct ep93xx_dma_engine;
111
112
113
114
115
116
117
118
119
120
121
122struct ep93xx_dma_desc {
123 u32 src_addr;
124 u32 dst_addr;
125 size_t size;
126 bool complete;
127 struct dma_async_tx_descriptor txd;
128 struct list_head tx_list;
129 struct list_head node;
130};
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163struct ep93xx_dma_chan {
164 struct dma_chan chan;
165 const struct ep93xx_dma_engine *edma;
166 void __iomem *regs;
167 int irq;
168 struct clk *clk;
169 struct tasklet_struct tasklet;
170
171 spinlock_t lock;
172 unsigned long flags;
173
174#define EP93XX_DMA_IS_CYCLIC 0
175
176 int buffer;
177 struct list_head active;
178 struct list_head queue;
179 struct list_head free_list;
180 u32 runtime_addr;
181 u32 runtime_ctrl;
182};
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200struct ep93xx_dma_engine {
201 struct dma_device dma_dev;
202 bool m2m;
203 int (*hw_setup)(struct ep93xx_dma_chan *);
204 void (*hw_shutdown)(struct ep93xx_dma_chan *);
205 void (*hw_submit)(struct ep93xx_dma_chan *);
206 int (*hw_interrupt)(struct ep93xx_dma_chan *);
207#define INTERRUPT_UNKNOWN 0
208#define INTERRUPT_DONE 1
209#define INTERRUPT_NEXT_BUFFER 2
210
211 size_t num_channels;
212 struct ep93xx_dma_chan channels[];
213};
214
215static inline struct device *chan2dev(struct ep93xx_dma_chan *edmac)
216{
217 return &edmac->chan.dev->device;
218}
219
220static struct ep93xx_dma_chan *to_ep93xx_dma_chan(struct dma_chan *chan)
221{
222 return container_of(chan, struct ep93xx_dma_chan, chan);
223}
224
225
226
227
228
229
230
231
232
233
234
235
236static void ep93xx_dma_set_active(struct ep93xx_dma_chan *edmac,
237 struct ep93xx_dma_desc *desc)
238{
239 BUG_ON(!list_empty(&edmac->active));
240
241 list_add_tail(&desc->node, &edmac->active);
242
243
244 while (!list_empty(&desc->tx_list)) {
245 struct ep93xx_dma_desc *d = list_first_entry(&desc->tx_list,
246 struct ep93xx_dma_desc, node);
247
248
249
250
251
252
253
254 d->txd.callback = desc->txd.callback;
255 d->txd.callback_param = desc->txd.callback_param;
256
257 list_move_tail(&d->node, &edmac->active);
258 }
259}
260
261
262static struct ep93xx_dma_desc *
263ep93xx_dma_get_active(struct ep93xx_dma_chan *edmac)
264{
265 if (list_empty(&edmac->active))
266 return NULL;
267
268 return list_first_entry(&edmac->active, struct ep93xx_dma_desc, node);
269}
270
271
272
273
274
275
276
277
278
279
280
281
282
283static bool ep93xx_dma_advance_active(struct ep93xx_dma_chan *edmac)
284{
285 struct ep93xx_dma_desc *desc;
286
287 list_rotate_left(&edmac->active);
288
289 if (test_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags))
290 return true;
291
292 desc = ep93xx_dma_get_active(edmac);
293 if (!desc)
294 return false;
295
296
297
298
299
300 return !desc->txd.cookie;
301}
302
303
304
305
306
307static void m2p_set_control(struct ep93xx_dma_chan *edmac, u32 control)
308{
309 writel(control, edmac->regs + M2P_CONTROL);
310
311
312
313
314 readl(edmac->regs + M2P_CONTROL);
315}
316
317static int m2p_hw_setup(struct ep93xx_dma_chan *edmac)
318{
319 struct ep93xx_dma_data *data = edmac->chan.private;
320 u32 control;
321
322 writel(data->port & 0xf, edmac->regs + M2P_PPALLOC);
323
324 control = M2P_CONTROL_CH_ERROR_INT | M2P_CONTROL_ICE
325 | M2P_CONTROL_ENABLE;
326 m2p_set_control(edmac, control);
327
328 return 0;
329}
330
331static inline u32 m2p_channel_state(struct ep93xx_dma_chan *edmac)
332{
333 return (readl(edmac->regs + M2P_STATUS) >> 4) & 0x3;
334}
335
336static void m2p_hw_shutdown(struct ep93xx_dma_chan *edmac)
337{
338 u32 control;
339
340 control = readl(edmac->regs + M2P_CONTROL);
341 control &= ~(M2P_CONTROL_STALLINT | M2P_CONTROL_NFBINT);
342 m2p_set_control(edmac, control);
343
344 while (m2p_channel_state(edmac) >= M2P_STATE_ON)
345 cpu_relax();
346
347 m2p_set_control(edmac, 0);
348
349 while (m2p_channel_state(edmac) == M2P_STATE_STALL)
350 cpu_relax();
351}
352
353static void m2p_fill_desc(struct ep93xx_dma_chan *edmac)
354{
355 struct ep93xx_dma_desc *desc;
356 u32 bus_addr;
357
358 desc = ep93xx_dma_get_active(edmac);
359 if (!desc) {
360 dev_warn(chan2dev(edmac), "M2P: empty descriptor list\n");
361 return;
362 }
363
364 if (ep93xx_dma_chan_direction(&edmac->chan) == DMA_MEM_TO_DEV)
365 bus_addr = desc->src_addr;
366 else
367 bus_addr = desc->dst_addr;
368
369 if (edmac->buffer == 0) {
370 writel(desc->size, edmac->regs + M2P_MAXCNT0);
371 writel(bus_addr, edmac->regs + M2P_BASE0);
372 } else {
373 writel(desc->size, edmac->regs + M2P_MAXCNT1);
374 writel(bus_addr, edmac->regs + M2P_BASE1);
375 }
376
377 edmac->buffer ^= 1;
378}
379
380static void m2p_hw_submit(struct ep93xx_dma_chan *edmac)
381{
382 u32 control = readl(edmac->regs + M2P_CONTROL);
383
384 m2p_fill_desc(edmac);
385 control |= M2P_CONTROL_STALLINT;
386
387 if (ep93xx_dma_advance_active(edmac)) {
388 m2p_fill_desc(edmac);
389 control |= M2P_CONTROL_NFBINT;
390 }
391
392 m2p_set_control(edmac, control);
393}
394
395static int m2p_hw_interrupt(struct ep93xx_dma_chan *edmac)
396{
397 u32 irq_status = readl(edmac->regs + M2P_INTERRUPT);
398 u32 control;
399
400 if (irq_status & M2P_INTERRUPT_ERROR) {
401 struct ep93xx_dma_desc *desc = ep93xx_dma_get_active(edmac);
402
403
404 writel(1, edmac->regs + M2P_INTERRUPT);
405
406
407
408
409
410
411
412
413
414 dev_err(chan2dev(edmac),
415 "DMA transfer failed! Details:\n"
416 "\tcookie : %d\n"
417 "\tsrc_addr : 0x%08x\n"
418 "\tdst_addr : 0x%08x\n"
419 "\tsize : %zu\n",
420 desc->txd.cookie, desc->src_addr, desc->dst_addr,
421 desc->size);
422 }
423
424 switch (irq_status & (M2P_INTERRUPT_STALL | M2P_INTERRUPT_NFB)) {
425 case M2P_INTERRUPT_STALL:
426
427 control = readl(edmac->regs + M2P_CONTROL);
428 control &= ~(M2P_CONTROL_STALLINT | M2P_CONTROL_NFBINT);
429 m2p_set_control(edmac, control);
430
431 return INTERRUPT_DONE;
432
433 case M2P_INTERRUPT_NFB:
434 if (ep93xx_dma_advance_active(edmac))
435 m2p_fill_desc(edmac);
436
437 return INTERRUPT_NEXT_BUFFER;
438 }
439
440 return INTERRUPT_UNKNOWN;
441}
442
443
444
445
446
447static int m2m_hw_setup(struct ep93xx_dma_chan *edmac)
448{
449 const struct ep93xx_dma_data *data = edmac->chan.private;
450 u32 control = 0;
451
452 if (!data) {
453
454 writel(control, edmac->regs + M2M_CONTROL);
455 return 0;
456 }
457
458 switch (data->port) {
459 case EP93XX_DMA_SSP:
460
461
462
463
464
465 control = (5 << M2M_CONTROL_PWSC_SHIFT);
466 control |= M2M_CONTROL_NO_HDSK;
467
468 if (data->direction == DMA_MEM_TO_DEV) {
469 control |= M2M_CONTROL_DAH;
470 control |= M2M_CONTROL_TM_TX;
471 control |= M2M_CONTROL_RSS_SSPTX;
472 } else {
473 control |= M2M_CONTROL_SAH;
474 control |= M2M_CONTROL_TM_RX;
475 control |= M2M_CONTROL_RSS_SSPRX;
476 }
477 break;
478
479 case EP93XX_DMA_IDE:
480
481
482
483
484 if (data->direction == DMA_MEM_TO_DEV) {
485
486 control = (3 << M2M_CONTROL_PWSC_SHIFT);
487 control |= M2M_CONTROL_DAH;
488 control |= M2M_CONTROL_TM_TX;
489 } else {
490 control = (2 << M2M_CONTROL_PWSC_SHIFT);
491 control |= M2M_CONTROL_SAH;
492 control |= M2M_CONTROL_TM_RX;
493 }
494
495 control |= M2M_CONTROL_NO_HDSK;
496 control |= M2M_CONTROL_RSS_IDE;
497 control |= M2M_CONTROL_PW_16;
498 break;
499
500 default:
501 return -EINVAL;
502 }
503
504 writel(control, edmac->regs + M2M_CONTROL);
505 return 0;
506}
507
508static void m2m_hw_shutdown(struct ep93xx_dma_chan *edmac)
509{
510
511 writel(0, edmac->regs + M2M_CONTROL);
512}
513
514static void m2m_fill_desc(struct ep93xx_dma_chan *edmac)
515{
516 struct ep93xx_dma_desc *desc;
517
518 desc = ep93xx_dma_get_active(edmac);
519 if (!desc) {
520 dev_warn(chan2dev(edmac), "M2M: empty descriptor list\n");
521 return;
522 }
523
524 if (edmac->buffer == 0) {
525 writel(desc->src_addr, edmac->regs + M2M_SAR_BASE0);
526 writel(desc->dst_addr, edmac->regs + M2M_DAR_BASE0);
527 writel(desc->size, edmac->regs + M2M_BCR0);
528 } else {
529 writel(desc->src_addr, edmac->regs + M2M_SAR_BASE1);
530 writel(desc->dst_addr, edmac->regs + M2M_DAR_BASE1);
531 writel(desc->size, edmac->regs + M2M_BCR1);
532 }
533
534 edmac->buffer ^= 1;
535}
536
537static void m2m_hw_submit(struct ep93xx_dma_chan *edmac)
538{
539 struct ep93xx_dma_data *data = edmac->chan.private;
540 u32 control = readl(edmac->regs + M2M_CONTROL);
541
542
543
544
545
546
547 control &= ~M2M_CONTROL_PW_MASK;
548 control |= edmac->runtime_ctrl;
549
550 m2m_fill_desc(edmac);
551 control |= M2M_CONTROL_DONEINT;
552
553 if (ep93xx_dma_advance_active(edmac)) {
554 m2m_fill_desc(edmac);
555 control |= M2M_CONTROL_NFBINT;
556 }
557
558
559
560
561
562 control |= M2M_CONTROL_ENABLE;
563 writel(control, edmac->regs + M2M_CONTROL);
564
565 if (!data) {
566
567
568
569
570 control |= M2M_CONTROL_START;
571 writel(control, edmac->regs + M2M_CONTROL);
572 }
573}
574
575
576
577
578
579
580
581
582
583
584
585static int m2m_hw_interrupt(struct ep93xx_dma_chan *edmac)
586{
587 u32 status = readl(edmac->regs + M2M_STATUS);
588 u32 ctl_fsm = status & M2M_STATUS_CTL_MASK;
589 u32 buf_fsm = status & M2M_STATUS_BUF_MASK;
590 bool done = status & M2M_STATUS_DONE;
591 bool last_done;
592 u32 control;
593 struct ep93xx_dma_desc *desc;
594
595
596 if (!(readl(edmac->regs + M2M_INTERRUPT) & M2M_INTERRUPT_MASK))
597 return INTERRUPT_UNKNOWN;
598
599 if (done) {
600
601 writel(0, edmac->regs + M2M_INTERRUPT);
602 }
603
604
605
606
607
608 desc = ep93xx_dma_get_active(edmac);
609 last_done = !desc || desc->txd.cookie;
610
611
612
613
614
615
616 if (!last_done &&
617 (buf_fsm == M2M_STATUS_BUF_NO ||
618 buf_fsm == M2M_STATUS_BUF_ON)) {
619
620
621
622
623
624
625 if (ep93xx_dma_advance_active(edmac)) {
626 m2m_fill_desc(edmac);
627 if (done && !edmac->chan.private) {
628
629 control = readl(edmac->regs + M2M_CONTROL);
630 control |= M2M_CONTROL_START;
631 writel(control, edmac->regs + M2M_CONTROL);
632 }
633 return INTERRUPT_NEXT_BUFFER;
634 } else {
635 last_done = true;
636 }
637 }
638
639
640
641
642
643 if (last_done &&
644 buf_fsm == M2M_STATUS_BUF_NO &&
645 ctl_fsm == M2M_STATUS_CTL_STALL) {
646
647 control = readl(edmac->regs + M2M_CONTROL);
648 control &= ~(M2M_CONTROL_DONEINT | M2M_CONTROL_NFBINT
649 | M2M_CONTROL_ENABLE);
650 writel(control, edmac->regs + M2M_CONTROL);
651 return INTERRUPT_DONE;
652 }
653
654
655
656
657 return INTERRUPT_NEXT_BUFFER;
658}
659
660
661
662
663
664static struct ep93xx_dma_desc *
665ep93xx_dma_desc_get(struct ep93xx_dma_chan *edmac)
666{
667 struct ep93xx_dma_desc *desc, *_desc;
668 struct ep93xx_dma_desc *ret = NULL;
669 unsigned long flags;
670
671 spin_lock_irqsave(&edmac->lock, flags);
672 list_for_each_entry_safe(desc, _desc, &edmac->free_list, node) {
673 if (async_tx_test_ack(&desc->txd)) {
674 list_del_init(&desc->node);
675
676
677 desc->src_addr = 0;
678 desc->dst_addr = 0;
679 desc->size = 0;
680 desc->complete = false;
681 desc->txd.cookie = 0;
682 desc->txd.callback = NULL;
683 desc->txd.callback_param = NULL;
684
685 ret = desc;
686 break;
687 }
688 }
689 spin_unlock_irqrestore(&edmac->lock, flags);
690 return ret;
691}
692
693static void ep93xx_dma_desc_put(struct ep93xx_dma_chan *edmac,
694 struct ep93xx_dma_desc *desc)
695{
696 if (desc) {
697 unsigned long flags;
698
699 spin_lock_irqsave(&edmac->lock, flags);
700 list_splice_init(&desc->tx_list, &edmac->free_list);
701 list_add(&desc->node, &edmac->free_list);
702 spin_unlock_irqrestore(&edmac->lock, flags);
703 }
704}
705
706
707
708
709
710
711
712
713
714static void ep93xx_dma_advance_work(struct ep93xx_dma_chan *edmac)
715{
716 struct ep93xx_dma_desc *new;
717 unsigned long flags;
718
719 spin_lock_irqsave(&edmac->lock, flags);
720 if (!list_empty(&edmac->active) || list_empty(&edmac->queue)) {
721 spin_unlock_irqrestore(&edmac->lock, flags);
722 return;
723 }
724
725
726 new = list_first_entry(&edmac->queue, struct ep93xx_dma_desc, node);
727 list_del_init(&new->node);
728
729 ep93xx_dma_set_active(edmac, new);
730
731
732 edmac->edma->hw_submit(edmac);
733 spin_unlock_irqrestore(&edmac->lock, flags);
734}
735
736static void ep93xx_dma_unmap_buffers(struct ep93xx_dma_desc *desc)
737{
738 struct device *dev = desc->txd.chan->device->dev;
739
740 if (!(desc->txd.flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
741 if (desc->txd.flags & DMA_COMPL_SRC_UNMAP_SINGLE)
742 dma_unmap_single(dev, desc->src_addr, desc->size,
743 DMA_TO_DEVICE);
744 else
745 dma_unmap_page(dev, desc->src_addr, desc->size,
746 DMA_TO_DEVICE);
747 }
748 if (!(desc->txd.flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
749 if (desc->txd.flags & DMA_COMPL_DEST_UNMAP_SINGLE)
750 dma_unmap_single(dev, desc->dst_addr, desc->size,
751 DMA_FROM_DEVICE);
752 else
753 dma_unmap_page(dev, desc->dst_addr, desc->size,
754 DMA_FROM_DEVICE);
755 }
756}
757
758static void ep93xx_dma_tasklet(unsigned long data)
759{
760 struct ep93xx_dma_chan *edmac = (struct ep93xx_dma_chan *)data;
761 struct ep93xx_dma_desc *desc, *d;
762 dma_async_tx_callback callback = NULL;
763 void *callback_param = NULL;
764 LIST_HEAD(list);
765
766 spin_lock_irq(&edmac->lock);
767
768
769
770
771
772 desc = ep93xx_dma_get_active(edmac);
773 if (desc) {
774 if (desc->complete) {
775
776 if (!test_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags))
777 dma_cookie_complete(&desc->txd);
778 list_splice_init(&edmac->active, &list);
779 }
780 callback = desc->txd.callback;
781 callback_param = desc->txd.callback_param;
782 }
783 spin_unlock_irq(&edmac->lock);
784
785
786 ep93xx_dma_advance_work(edmac);
787
788
789 list_for_each_entry_safe(desc, d, &list, node) {
790
791
792
793
794 if (!edmac->chan.private)
795 ep93xx_dma_unmap_buffers(desc);
796
797 ep93xx_dma_desc_put(edmac, desc);
798 }
799
800 if (callback)
801 callback(callback_param);
802}
803
804static irqreturn_t ep93xx_dma_interrupt(int irq, void *dev_id)
805{
806 struct ep93xx_dma_chan *edmac = dev_id;
807 struct ep93xx_dma_desc *desc;
808 irqreturn_t ret = IRQ_HANDLED;
809
810 spin_lock(&edmac->lock);
811
812 desc = ep93xx_dma_get_active(edmac);
813 if (!desc) {
814 dev_warn(chan2dev(edmac),
815 "got interrupt while active list is empty\n");
816 spin_unlock(&edmac->lock);
817 return IRQ_NONE;
818 }
819
820 switch (edmac->edma->hw_interrupt(edmac)) {
821 case INTERRUPT_DONE:
822 desc->complete = true;
823 tasklet_schedule(&edmac->tasklet);
824 break;
825
826 case INTERRUPT_NEXT_BUFFER:
827 if (test_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags))
828 tasklet_schedule(&edmac->tasklet);
829 break;
830
831 default:
832 dev_warn(chan2dev(edmac), "unknown interrupt!\n");
833 ret = IRQ_NONE;
834 break;
835 }
836
837 spin_unlock(&edmac->lock);
838 return ret;
839}
840
841
842
843
844
845
846
847
848
849static dma_cookie_t ep93xx_dma_tx_submit(struct dma_async_tx_descriptor *tx)
850{
851 struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(tx->chan);
852 struct ep93xx_dma_desc *desc;
853 dma_cookie_t cookie;
854 unsigned long flags;
855
856 spin_lock_irqsave(&edmac->lock, flags);
857 cookie = dma_cookie_assign(tx);
858
859 desc = container_of(tx, struct ep93xx_dma_desc, txd);
860
861
862
863
864
865
866 if (list_empty(&edmac->active)) {
867 ep93xx_dma_set_active(edmac, desc);
868 edmac->edma->hw_submit(edmac);
869 } else {
870 list_add_tail(&desc->node, &edmac->queue);
871 }
872
873 spin_unlock_irqrestore(&edmac->lock, flags);
874 return cookie;
875}
876
877
878
879
880
881
882
883
884
885static int ep93xx_dma_alloc_chan_resources(struct dma_chan *chan)
886{
887 struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
888 struct ep93xx_dma_data *data = chan->private;
889 const char *name = dma_chan_name(chan);
890 int ret, i;
891
892
893 if (!edmac->edma->m2m) {
894 if (!data)
895 return -EINVAL;
896 if (data->port < EP93XX_DMA_I2S1 ||
897 data->port > EP93XX_DMA_IRDA)
898 return -EINVAL;
899 if (data->direction != ep93xx_dma_chan_direction(chan))
900 return -EINVAL;
901 } else {
902 if (data) {
903 switch (data->port) {
904 case EP93XX_DMA_SSP:
905 case EP93XX_DMA_IDE:
906 if (!is_slave_direction(data->direction))
907 return -EINVAL;
908 break;
909 default:
910 return -EINVAL;
911 }
912 }
913 }
914
915 if (data && data->name)
916 name = data->name;
917
918 ret = clk_enable(edmac->clk);
919 if (ret)
920 return ret;
921
922 ret = request_irq(edmac->irq, ep93xx_dma_interrupt, 0, name, edmac);
923 if (ret)
924 goto fail_clk_disable;
925
926 spin_lock_irq(&edmac->lock);
927 dma_cookie_init(&edmac->chan);
928 ret = edmac->edma->hw_setup(edmac);
929 spin_unlock_irq(&edmac->lock);
930
931 if (ret)
932 goto fail_free_irq;
933
934 for (i = 0; i < DMA_MAX_CHAN_DESCRIPTORS; i++) {
935 struct ep93xx_dma_desc *desc;
936
937 desc = kzalloc(sizeof(*desc), GFP_KERNEL);
938 if (!desc) {
939 dev_warn(chan2dev(edmac), "not enough descriptors\n");
940 break;
941 }
942
943 INIT_LIST_HEAD(&desc->tx_list);
944
945 dma_async_tx_descriptor_init(&desc->txd, chan);
946 desc->txd.flags = DMA_CTRL_ACK;
947 desc->txd.tx_submit = ep93xx_dma_tx_submit;
948
949 ep93xx_dma_desc_put(edmac, desc);
950 }
951
952 return i;
953
954fail_free_irq:
955 free_irq(edmac->irq, edmac);
956fail_clk_disable:
957 clk_disable(edmac->clk);
958
959 return ret;
960}
961
962
963
964
965
966
967
968
969static void ep93xx_dma_free_chan_resources(struct dma_chan *chan)
970{
971 struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
972 struct ep93xx_dma_desc *desc, *d;
973 unsigned long flags;
974 LIST_HEAD(list);
975
976 BUG_ON(!list_empty(&edmac->active));
977 BUG_ON(!list_empty(&edmac->queue));
978
979 spin_lock_irqsave(&edmac->lock, flags);
980 edmac->edma->hw_shutdown(edmac);
981 edmac->runtime_addr = 0;
982 edmac->runtime_ctrl = 0;
983 edmac->buffer = 0;
984 list_splice_init(&edmac->free_list, &list);
985 spin_unlock_irqrestore(&edmac->lock, flags);
986
987 list_for_each_entry_safe(desc, d, &list, node)
988 kfree(desc);
989
990 clk_disable(edmac->clk);
991 free_irq(edmac->irq, edmac);
992}
993
994
995
996
997
998
999
1000
1001
1002
1003
1004static struct dma_async_tx_descriptor *
1005ep93xx_dma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest,
1006 dma_addr_t src, size_t len, unsigned long flags)
1007{
1008 struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
1009 struct ep93xx_dma_desc *desc, *first;
1010 size_t bytes, offset;
1011
1012 first = NULL;
1013 for (offset = 0; offset < len; offset += bytes) {
1014 desc = ep93xx_dma_desc_get(edmac);
1015 if (!desc) {
1016 dev_warn(chan2dev(edmac), "couln't get descriptor\n");
1017 goto fail;
1018 }
1019
1020 bytes = min_t(size_t, len - offset, DMA_MAX_CHAN_BYTES);
1021
1022 desc->src_addr = src + offset;
1023 desc->dst_addr = dest + offset;
1024 desc->size = bytes;
1025
1026 if (!first)
1027 first = desc;
1028 else
1029 list_add_tail(&desc->node, &first->tx_list);
1030 }
1031
1032 first->txd.cookie = -EBUSY;
1033 first->txd.flags = flags;
1034
1035 return &first->txd;
1036fail:
1037 ep93xx_dma_desc_put(edmac, first);
1038 return NULL;
1039}
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052static struct dma_async_tx_descriptor *
1053ep93xx_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
1054 unsigned int sg_len, enum dma_transfer_direction dir,
1055 unsigned long flags, void *context)
1056{
1057 struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
1058 struct ep93xx_dma_desc *desc, *first;
1059 struct scatterlist *sg;
1060 int i;
1061
1062 if (!edmac->edma->m2m && dir != ep93xx_dma_chan_direction(chan)) {
1063 dev_warn(chan2dev(edmac),
1064 "channel was configured with different direction\n");
1065 return NULL;
1066 }
1067
1068 if (test_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags)) {
1069 dev_warn(chan2dev(edmac),
1070 "channel is already used for cyclic transfers\n");
1071 return NULL;
1072 }
1073
1074 first = NULL;
1075 for_each_sg(sgl, sg, sg_len, i) {
1076 size_t sg_len = sg_dma_len(sg);
1077
1078 if (sg_len > DMA_MAX_CHAN_BYTES) {
1079 dev_warn(chan2dev(edmac), "too big transfer size %d\n",
1080 sg_len);
1081 goto fail;
1082 }
1083
1084 desc = ep93xx_dma_desc_get(edmac);
1085 if (!desc) {
1086 dev_warn(chan2dev(edmac), "couln't get descriptor\n");
1087 goto fail;
1088 }
1089
1090 if (dir == DMA_MEM_TO_DEV) {
1091 desc->src_addr = sg_dma_address(sg);
1092 desc->dst_addr = edmac->runtime_addr;
1093 } else {
1094 desc->src_addr = edmac->runtime_addr;
1095 desc->dst_addr = sg_dma_address(sg);
1096 }
1097 desc->size = sg_len;
1098
1099 if (!first)
1100 first = desc;
1101 else
1102 list_add_tail(&desc->node, &first->tx_list);
1103 }
1104
1105 first->txd.cookie = -EBUSY;
1106 first->txd.flags = flags;
1107
1108 return &first->txd;
1109
1110fail:
1111 ep93xx_dma_desc_put(edmac, first);
1112 return NULL;
1113}
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133static struct dma_async_tx_descriptor *
1134ep93xx_dma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t dma_addr,
1135 size_t buf_len, size_t period_len,
1136 enum dma_transfer_direction dir, unsigned long flags,
1137 void *context)
1138{
1139 struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
1140 struct ep93xx_dma_desc *desc, *first;
1141 size_t offset = 0;
1142
1143 if (!edmac->edma->m2m && dir != ep93xx_dma_chan_direction(chan)) {
1144 dev_warn(chan2dev(edmac),
1145 "channel was configured with different direction\n");
1146 return NULL;
1147 }
1148
1149 if (test_and_set_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags)) {
1150 dev_warn(chan2dev(edmac),
1151 "channel is already used for cyclic transfers\n");
1152 return NULL;
1153 }
1154
1155 if (period_len > DMA_MAX_CHAN_BYTES) {
1156 dev_warn(chan2dev(edmac), "too big period length %d\n",
1157 period_len);
1158 return NULL;
1159 }
1160
1161
1162 first = NULL;
1163 for (offset = 0; offset < buf_len; offset += period_len) {
1164 desc = ep93xx_dma_desc_get(edmac);
1165 if (!desc) {
1166 dev_warn(chan2dev(edmac), "couln't get descriptor\n");
1167 goto fail;
1168 }
1169
1170 if (dir == DMA_MEM_TO_DEV) {
1171 desc->src_addr = dma_addr + offset;
1172 desc->dst_addr = edmac->runtime_addr;
1173 } else {
1174 desc->src_addr = edmac->runtime_addr;
1175 desc->dst_addr = dma_addr + offset;
1176 }
1177
1178 desc->size = period_len;
1179
1180 if (!first)
1181 first = desc;
1182 else
1183 list_add_tail(&desc->node, &first->tx_list);
1184 }
1185
1186 first->txd.cookie = -EBUSY;
1187
1188 return &first->txd;
1189
1190fail:
1191 ep93xx_dma_desc_put(edmac, first);
1192 return NULL;
1193}
1194
1195
1196
1197
1198
1199
1200
1201
1202static int ep93xx_dma_terminate_all(struct ep93xx_dma_chan *edmac)
1203{
1204 struct ep93xx_dma_desc *desc, *_d;
1205 unsigned long flags;
1206 LIST_HEAD(list);
1207
1208 spin_lock_irqsave(&edmac->lock, flags);
1209
1210 edmac->edma->hw_shutdown(edmac);
1211 clear_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags);
1212 list_splice_init(&edmac->active, &list);
1213 list_splice_init(&edmac->queue, &list);
1214
1215
1216
1217
1218 edmac->edma->hw_setup(edmac);
1219 spin_unlock_irqrestore(&edmac->lock, flags);
1220
1221 list_for_each_entry_safe(desc, _d, &list, node)
1222 ep93xx_dma_desc_put(edmac, desc);
1223
1224 return 0;
1225}
1226
1227static int ep93xx_dma_slave_config(struct ep93xx_dma_chan *edmac,
1228 struct dma_slave_config *config)
1229{
1230 enum dma_slave_buswidth width;
1231 unsigned long flags;
1232 u32 addr, ctrl;
1233
1234 if (!edmac->edma->m2m)
1235 return -EINVAL;
1236
1237 switch (config->direction) {
1238 case DMA_DEV_TO_MEM:
1239 width = config->src_addr_width;
1240 addr = config->src_addr;
1241 break;
1242
1243 case DMA_MEM_TO_DEV:
1244 width = config->dst_addr_width;
1245 addr = config->dst_addr;
1246 break;
1247
1248 default:
1249 return -EINVAL;
1250 }
1251
1252 switch (width) {
1253 case DMA_SLAVE_BUSWIDTH_1_BYTE:
1254 ctrl = 0;
1255 break;
1256 case DMA_SLAVE_BUSWIDTH_2_BYTES:
1257 ctrl = M2M_CONTROL_PW_16;
1258 break;
1259 case DMA_SLAVE_BUSWIDTH_4_BYTES:
1260 ctrl = M2M_CONTROL_PW_32;
1261 break;
1262 default:
1263 return -EINVAL;
1264 }
1265
1266 spin_lock_irqsave(&edmac->lock, flags);
1267 edmac->runtime_addr = addr;
1268 edmac->runtime_ctrl = ctrl;
1269 spin_unlock_irqrestore(&edmac->lock, flags);
1270
1271 return 0;
1272}
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283static int ep93xx_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
1284 unsigned long arg)
1285{
1286 struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
1287 struct dma_slave_config *config;
1288
1289 switch (cmd) {
1290 case DMA_TERMINATE_ALL:
1291 return ep93xx_dma_terminate_all(edmac);
1292
1293 case DMA_SLAVE_CONFIG:
1294 config = (struct dma_slave_config *)arg;
1295 return ep93xx_dma_slave_config(edmac, config);
1296
1297 default:
1298 break;
1299 }
1300
1301 return -ENOSYS;
1302}
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312static enum dma_status ep93xx_dma_tx_status(struct dma_chan *chan,
1313 dma_cookie_t cookie,
1314 struct dma_tx_state *state)
1315{
1316 return dma_cookie_status(chan, cookie, state);
1317}
1318
1319
1320
1321
1322
1323
1324
1325
1326static void ep93xx_dma_issue_pending(struct dma_chan *chan)
1327{
1328 ep93xx_dma_advance_work(to_ep93xx_dma_chan(chan));
1329}
1330
1331static int __init ep93xx_dma_probe(struct platform_device *pdev)
1332{
1333 struct ep93xx_dma_platform_data *pdata = dev_get_platdata(&pdev->dev);
1334 struct ep93xx_dma_engine *edma;
1335 struct dma_device *dma_dev;
1336 size_t edma_size;
1337 int ret, i;
1338
1339 edma_size = pdata->num_channels * sizeof(struct ep93xx_dma_chan);
1340 edma = kzalloc(sizeof(*edma) + edma_size, GFP_KERNEL);
1341 if (!edma)
1342 return -ENOMEM;
1343
1344 dma_dev = &edma->dma_dev;
1345 edma->m2m = platform_get_device_id(pdev)->driver_data;
1346 edma->num_channels = pdata->num_channels;
1347
1348 INIT_LIST_HEAD(&dma_dev->channels);
1349 for (i = 0; i < pdata->num_channels; i++) {
1350 const struct ep93xx_dma_chan_data *cdata = &pdata->channels[i];
1351 struct ep93xx_dma_chan *edmac = &edma->channels[i];
1352
1353 edmac->chan.device = dma_dev;
1354 edmac->regs = cdata->base;
1355 edmac->irq = cdata->irq;
1356 edmac->edma = edma;
1357
1358 edmac->clk = clk_get(NULL, cdata->name);
1359 if (IS_ERR(edmac->clk)) {
1360 dev_warn(&pdev->dev, "failed to get clock for %s\n",
1361 cdata->name);
1362 continue;
1363 }
1364
1365 spin_lock_init(&edmac->lock);
1366 INIT_LIST_HEAD(&edmac->active);
1367 INIT_LIST_HEAD(&edmac->queue);
1368 INIT_LIST_HEAD(&edmac->free_list);
1369 tasklet_init(&edmac->tasklet, ep93xx_dma_tasklet,
1370 (unsigned long)edmac);
1371
1372 list_add_tail(&edmac->chan.device_node,
1373 &dma_dev->channels);
1374 }
1375
1376 dma_cap_zero(dma_dev->cap_mask);
1377 dma_cap_set(DMA_SLAVE, dma_dev->cap_mask);
1378 dma_cap_set(DMA_CYCLIC, dma_dev->cap_mask);
1379
1380 dma_dev->dev = &pdev->dev;
1381 dma_dev->device_alloc_chan_resources = ep93xx_dma_alloc_chan_resources;
1382 dma_dev->device_free_chan_resources = ep93xx_dma_free_chan_resources;
1383 dma_dev->device_prep_slave_sg = ep93xx_dma_prep_slave_sg;
1384 dma_dev->device_prep_dma_cyclic = ep93xx_dma_prep_dma_cyclic;
1385 dma_dev->device_control = ep93xx_dma_control;
1386 dma_dev->device_issue_pending = ep93xx_dma_issue_pending;
1387 dma_dev->device_tx_status = ep93xx_dma_tx_status;
1388
1389 dma_set_max_seg_size(dma_dev->dev, DMA_MAX_CHAN_BYTES);
1390
1391 if (edma->m2m) {
1392 dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask);
1393 dma_dev->device_prep_dma_memcpy = ep93xx_dma_prep_dma_memcpy;
1394
1395 edma->hw_setup = m2m_hw_setup;
1396 edma->hw_shutdown = m2m_hw_shutdown;
1397 edma->hw_submit = m2m_hw_submit;
1398 edma->hw_interrupt = m2m_hw_interrupt;
1399 } else {
1400 dma_cap_set(DMA_PRIVATE, dma_dev->cap_mask);
1401
1402 edma->hw_setup = m2p_hw_setup;
1403 edma->hw_shutdown = m2p_hw_shutdown;
1404 edma->hw_submit = m2p_hw_submit;
1405 edma->hw_interrupt = m2p_hw_interrupt;
1406 }
1407
1408 ret = dma_async_device_register(dma_dev);
1409 if (unlikely(ret)) {
1410 for (i = 0; i < edma->num_channels; i++) {
1411 struct ep93xx_dma_chan *edmac = &edma->channels[i];
1412 if (!IS_ERR_OR_NULL(edmac->clk))
1413 clk_put(edmac->clk);
1414 }
1415 kfree(edma);
1416 } else {
1417 dev_info(dma_dev->dev, "EP93xx M2%s DMA ready\n",
1418 edma->m2m ? "M" : "P");
1419 }
1420
1421 return ret;
1422}
1423
1424static struct platform_device_id ep93xx_dma_driver_ids[] = {
1425 { "ep93xx-dma-m2p", 0 },
1426 { "ep93xx-dma-m2m", 1 },
1427 { },
1428};
1429
1430static struct platform_driver ep93xx_dma_driver = {
1431 .driver = {
1432 .name = "ep93xx-dma",
1433 },
1434 .id_table = ep93xx_dma_driver_ids,
1435};
1436
1437static int __init ep93xx_dma_module_init(void)
1438{
1439 return platform_driver_probe(&ep93xx_dma_driver, ep93xx_dma_probe);
1440}
1441subsys_initcall(ep93xx_dma_module_init);
1442
1443MODULE_AUTHOR("Mika Westerberg <mika.westerberg@iki.fi>");
1444MODULE_DESCRIPTION("EP93xx DMA driver");
1445MODULE_LICENSE("GPL");
1446