1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21#include <linux/clk.h>
22#include <linux/init.h>
23#include <linux/interrupt.h>
24#include <linux/dmaengine.h>
25#include <linux/module.h>
26#include <linux/platform_device.h>
27#include <linux/slab.h>
28
29#include <linux/platform_data/dma-ep93xx.h>
30
31#include "dmaengine.h"
32
33
34#define M2P_CONTROL 0x0000
35#define M2P_CONTROL_STALLINT BIT(0)
36#define M2P_CONTROL_NFBINT BIT(1)
37#define M2P_CONTROL_CH_ERROR_INT BIT(3)
38#define M2P_CONTROL_ENABLE BIT(4)
39#define M2P_CONTROL_ICE BIT(6)
40
41#define M2P_INTERRUPT 0x0004
42#define M2P_INTERRUPT_STALL BIT(0)
43#define M2P_INTERRUPT_NFB BIT(1)
44#define M2P_INTERRUPT_ERROR BIT(3)
45
46#define M2P_PPALLOC 0x0008
47#define M2P_STATUS 0x000c
48
49#define M2P_MAXCNT0 0x0020
50#define M2P_BASE0 0x0024
51#define M2P_MAXCNT1 0x0030
52#define M2P_BASE1 0x0034
53
54#define M2P_STATE_IDLE 0
55#define M2P_STATE_STALL 1
56#define M2P_STATE_ON 2
57#define M2P_STATE_NEXT 3
58
59
60#define M2M_CONTROL 0x0000
61#define M2M_CONTROL_DONEINT BIT(2)
62#define M2M_CONTROL_ENABLE BIT(3)
63#define M2M_CONTROL_START BIT(4)
64#define M2M_CONTROL_DAH BIT(11)
65#define M2M_CONTROL_SAH BIT(12)
66#define M2M_CONTROL_PW_SHIFT 9
67#define M2M_CONTROL_PW_8 (0 << M2M_CONTROL_PW_SHIFT)
68#define M2M_CONTROL_PW_16 (1 << M2M_CONTROL_PW_SHIFT)
69#define M2M_CONTROL_PW_32 (2 << M2M_CONTROL_PW_SHIFT)
70#define M2M_CONTROL_PW_MASK (3 << M2M_CONTROL_PW_SHIFT)
71#define M2M_CONTROL_TM_SHIFT 13
72#define M2M_CONTROL_TM_TX (1 << M2M_CONTROL_TM_SHIFT)
73#define M2M_CONTROL_TM_RX (2 << M2M_CONTROL_TM_SHIFT)
74#define M2M_CONTROL_NFBINT BIT(21)
75#define M2M_CONTROL_RSS_SHIFT 22
76#define M2M_CONTROL_RSS_SSPRX (1 << M2M_CONTROL_RSS_SHIFT)
77#define M2M_CONTROL_RSS_SSPTX (2 << M2M_CONTROL_RSS_SHIFT)
78#define M2M_CONTROL_RSS_IDE (3 << M2M_CONTROL_RSS_SHIFT)
79#define M2M_CONTROL_NO_HDSK BIT(24)
80#define M2M_CONTROL_PWSC_SHIFT 25
81
82#define M2M_INTERRUPT 0x0004
83#define M2M_INTERRUPT_MASK 6
84
85#define M2M_STATUS 0x000c
86#define M2M_STATUS_CTL_SHIFT 1
87#define M2M_STATUS_CTL_IDLE (0 << M2M_STATUS_CTL_SHIFT)
88#define M2M_STATUS_CTL_STALL (1 << M2M_STATUS_CTL_SHIFT)
89#define M2M_STATUS_CTL_MEMRD (2 << M2M_STATUS_CTL_SHIFT)
90#define M2M_STATUS_CTL_MEMWR (3 << M2M_STATUS_CTL_SHIFT)
91#define M2M_STATUS_CTL_BWCWAIT (4 << M2M_STATUS_CTL_SHIFT)
92#define M2M_STATUS_CTL_MASK (7 << M2M_STATUS_CTL_SHIFT)
93#define M2M_STATUS_BUF_SHIFT 4
94#define M2M_STATUS_BUF_NO (0 << M2M_STATUS_BUF_SHIFT)
95#define M2M_STATUS_BUF_ON (1 << M2M_STATUS_BUF_SHIFT)
96#define M2M_STATUS_BUF_NEXT (2 << M2M_STATUS_BUF_SHIFT)
97#define M2M_STATUS_BUF_MASK (3 << M2M_STATUS_BUF_SHIFT)
98#define M2M_STATUS_DONE BIT(6)
99
100#define M2M_BCR0 0x0010
101#define M2M_BCR1 0x0014
102#define M2M_SAR_BASE0 0x0018
103#define M2M_SAR_BASE1 0x001c
104#define M2M_DAR_BASE0 0x002c
105#define M2M_DAR_BASE1 0x0030
106
107#define DMA_MAX_CHAN_BYTES 0xffff
108#define DMA_MAX_CHAN_DESCRIPTORS 32
109
110struct ep93xx_dma_engine;
111
112
113
114
115
116
117
118
119
120
121
122struct ep93xx_dma_desc {
123 u32 src_addr;
124 u32 dst_addr;
125 size_t size;
126 bool complete;
127 struct dma_async_tx_descriptor txd;
128 struct list_head tx_list;
129 struct list_head node;
130};
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163struct ep93xx_dma_chan {
164 struct dma_chan chan;
165 const struct ep93xx_dma_engine *edma;
166 void __iomem *regs;
167 int irq;
168 struct clk *clk;
169 struct tasklet_struct tasklet;
170
171 spinlock_t lock;
172 unsigned long flags;
173
174#define EP93XX_DMA_IS_CYCLIC 0
175
176 int buffer;
177 struct list_head active;
178 struct list_head queue;
179 struct list_head free_list;
180 u32 runtime_addr;
181 u32 runtime_ctrl;
182};
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200struct ep93xx_dma_engine {
201 struct dma_device dma_dev;
202 bool m2m;
203 int (*hw_setup)(struct ep93xx_dma_chan *);
204 void (*hw_shutdown)(struct ep93xx_dma_chan *);
205 void (*hw_submit)(struct ep93xx_dma_chan *);
206 int (*hw_interrupt)(struct ep93xx_dma_chan *);
207#define INTERRUPT_UNKNOWN 0
208#define INTERRUPT_DONE 1
209#define INTERRUPT_NEXT_BUFFER 2
210
211 size_t num_channels;
212 struct ep93xx_dma_chan channels[];
213};
214
215static inline struct device *chan2dev(struct ep93xx_dma_chan *edmac)
216{
217 return &edmac->chan.dev->device;
218}
219
220static struct ep93xx_dma_chan *to_ep93xx_dma_chan(struct dma_chan *chan)
221{
222 return container_of(chan, struct ep93xx_dma_chan, chan);
223}
224
225
226
227
228
229
230
231
232
233
234
235
236static void ep93xx_dma_set_active(struct ep93xx_dma_chan *edmac,
237 struct ep93xx_dma_desc *desc)
238{
239 BUG_ON(!list_empty(&edmac->active));
240
241 list_add_tail(&desc->node, &edmac->active);
242
243
244 while (!list_empty(&desc->tx_list)) {
245 struct ep93xx_dma_desc *d = list_first_entry(&desc->tx_list,
246 struct ep93xx_dma_desc, node);
247
248
249
250
251
252
253
254 d->txd.callback = desc->txd.callback;
255 d->txd.callback_param = desc->txd.callback_param;
256
257 list_move_tail(&d->node, &edmac->active);
258 }
259}
260
261
262static struct ep93xx_dma_desc *
263ep93xx_dma_get_active(struct ep93xx_dma_chan *edmac)
264{
265 if (list_empty(&edmac->active))
266 return NULL;
267
268 return list_first_entry(&edmac->active, struct ep93xx_dma_desc, node);
269}
270
271
272
273
274
275
276
277
278
279
280
281
282
283static bool ep93xx_dma_advance_active(struct ep93xx_dma_chan *edmac)
284{
285 struct ep93xx_dma_desc *desc;
286
287 list_rotate_left(&edmac->active);
288
289 if (test_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags))
290 return true;
291
292 desc = ep93xx_dma_get_active(edmac);
293 if (!desc)
294 return false;
295
296
297
298
299
300 return !desc->txd.cookie;
301}
302
303
304
305
306
307static void m2p_set_control(struct ep93xx_dma_chan *edmac, u32 control)
308{
309 writel(control, edmac->regs + M2P_CONTROL);
310
311
312
313
314 readl(edmac->regs + M2P_CONTROL);
315}
316
317static int m2p_hw_setup(struct ep93xx_dma_chan *edmac)
318{
319 struct ep93xx_dma_data *data = edmac->chan.private;
320 u32 control;
321
322 writel(data->port & 0xf, edmac->regs + M2P_PPALLOC);
323
324 control = M2P_CONTROL_CH_ERROR_INT | M2P_CONTROL_ICE
325 | M2P_CONTROL_ENABLE;
326 m2p_set_control(edmac, control);
327
328 return 0;
329}
330
331static inline u32 m2p_channel_state(struct ep93xx_dma_chan *edmac)
332{
333 return (readl(edmac->regs + M2P_STATUS) >> 4) & 0x3;
334}
335
336static void m2p_hw_shutdown(struct ep93xx_dma_chan *edmac)
337{
338 u32 control;
339
340 control = readl(edmac->regs + M2P_CONTROL);
341 control &= ~(M2P_CONTROL_STALLINT | M2P_CONTROL_NFBINT);
342 m2p_set_control(edmac, control);
343
344 while (m2p_channel_state(edmac) >= M2P_STATE_ON)
345 cpu_relax();
346
347 m2p_set_control(edmac, 0);
348
349 while (m2p_channel_state(edmac) == M2P_STATE_STALL)
350 cpu_relax();
351}
352
353static void m2p_fill_desc(struct ep93xx_dma_chan *edmac)
354{
355 struct ep93xx_dma_desc *desc;
356 u32 bus_addr;
357
358 desc = ep93xx_dma_get_active(edmac);
359 if (!desc) {
360 dev_warn(chan2dev(edmac), "M2P: empty descriptor list\n");
361 return;
362 }
363
364 if (ep93xx_dma_chan_direction(&edmac->chan) == DMA_MEM_TO_DEV)
365 bus_addr = desc->src_addr;
366 else
367 bus_addr = desc->dst_addr;
368
369 if (edmac->buffer == 0) {
370 writel(desc->size, edmac->regs + M2P_MAXCNT0);
371 writel(bus_addr, edmac->regs + M2P_BASE0);
372 } else {
373 writel(desc->size, edmac->regs + M2P_MAXCNT1);
374 writel(bus_addr, edmac->regs + M2P_BASE1);
375 }
376
377 edmac->buffer ^= 1;
378}
379
380static void m2p_hw_submit(struct ep93xx_dma_chan *edmac)
381{
382 u32 control = readl(edmac->regs + M2P_CONTROL);
383
384 m2p_fill_desc(edmac);
385 control |= M2P_CONTROL_STALLINT;
386
387 if (ep93xx_dma_advance_active(edmac)) {
388 m2p_fill_desc(edmac);
389 control |= M2P_CONTROL_NFBINT;
390 }
391
392 m2p_set_control(edmac, control);
393}
394
395static int m2p_hw_interrupt(struct ep93xx_dma_chan *edmac)
396{
397 u32 irq_status = readl(edmac->regs + M2P_INTERRUPT);
398 u32 control;
399
400 if (irq_status & M2P_INTERRUPT_ERROR) {
401 struct ep93xx_dma_desc *desc = ep93xx_dma_get_active(edmac);
402
403
404 writel(1, edmac->regs + M2P_INTERRUPT);
405
406
407
408
409
410
411
412
413
414 dev_err(chan2dev(edmac),
415 "DMA transfer failed! Details:\n"
416 "\tcookie : %d\n"
417 "\tsrc_addr : 0x%08x\n"
418 "\tdst_addr : 0x%08x\n"
419 "\tsize : %zu\n",
420 desc->txd.cookie, desc->src_addr, desc->dst_addr,
421 desc->size);
422 }
423
424
425
426
427
428
429 if (!(irq_status & (M2P_INTERRUPT_STALL | M2P_INTERRUPT_NFB)))
430 return INTERRUPT_UNKNOWN;
431
432 if (ep93xx_dma_advance_active(edmac)) {
433 m2p_fill_desc(edmac);
434 return INTERRUPT_NEXT_BUFFER;
435 }
436
437
438 control = readl(edmac->regs + M2P_CONTROL);
439 control &= ~(M2P_CONTROL_STALLINT | M2P_CONTROL_NFBINT);
440 m2p_set_control(edmac, control);
441
442 return INTERRUPT_DONE;
443}
444
445
446
447
448
449static int m2m_hw_setup(struct ep93xx_dma_chan *edmac)
450{
451 const struct ep93xx_dma_data *data = edmac->chan.private;
452 u32 control = 0;
453
454 if (!data) {
455
456 writel(control, edmac->regs + M2M_CONTROL);
457 return 0;
458 }
459
460 switch (data->port) {
461 case EP93XX_DMA_SSP:
462
463
464
465
466
467 control = (5 << M2M_CONTROL_PWSC_SHIFT);
468 control |= M2M_CONTROL_NO_HDSK;
469
470 if (data->direction == DMA_MEM_TO_DEV) {
471 control |= M2M_CONTROL_DAH;
472 control |= M2M_CONTROL_TM_TX;
473 control |= M2M_CONTROL_RSS_SSPTX;
474 } else {
475 control |= M2M_CONTROL_SAH;
476 control |= M2M_CONTROL_TM_RX;
477 control |= M2M_CONTROL_RSS_SSPRX;
478 }
479 break;
480
481 case EP93XX_DMA_IDE:
482
483
484
485
486 if (data->direction == DMA_MEM_TO_DEV) {
487
488 control = (3 << M2M_CONTROL_PWSC_SHIFT);
489 control |= M2M_CONTROL_DAH;
490 control |= M2M_CONTROL_TM_TX;
491 } else {
492 control = (2 << M2M_CONTROL_PWSC_SHIFT);
493 control |= M2M_CONTROL_SAH;
494 control |= M2M_CONTROL_TM_RX;
495 }
496
497 control |= M2M_CONTROL_NO_HDSK;
498 control |= M2M_CONTROL_RSS_IDE;
499 control |= M2M_CONTROL_PW_16;
500 break;
501
502 default:
503 return -EINVAL;
504 }
505
506 writel(control, edmac->regs + M2M_CONTROL);
507 return 0;
508}
509
510static void m2m_hw_shutdown(struct ep93xx_dma_chan *edmac)
511{
512
513 writel(0, edmac->regs + M2M_CONTROL);
514}
515
516static void m2m_fill_desc(struct ep93xx_dma_chan *edmac)
517{
518 struct ep93xx_dma_desc *desc;
519
520 desc = ep93xx_dma_get_active(edmac);
521 if (!desc) {
522 dev_warn(chan2dev(edmac), "M2M: empty descriptor list\n");
523 return;
524 }
525
526 if (edmac->buffer == 0) {
527 writel(desc->src_addr, edmac->regs + M2M_SAR_BASE0);
528 writel(desc->dst_addr, edmac->regs + M2M_DAR_BASE0);
529 writel(desc->size, edmac->regs + M2M_BCR0);
530 } else {
531 writel(desc->src_addr, edmac->regs + M2M_SAR_BASE1);
532 writel(desc->dst_addr, edmac->regs + M2M_DAR_BASE1);
533 writel(desc->size, edmac->regs + M2M_BCR1);
534 }
535
536 edmac->buffer ^= 1;
537}
538
539static void m2m_hw_submit(struct ep93xx_dma_chan *edmac)
540{
541 struct ep93xx_dma_data *data = edmac->chan.private;
542 u32 control = readl(edmac->regs + M2M_CONTROL);
543
544
545
546
547
548
549 control &= ~M2M_CONTROL_PW_MASK;
550 control |= edmac->runtime_ctrl;
551
552 m2m_fill_desc(edmac);
553 control |= M2M_CONTROL_DONEINT;
554
555 if (ep93xx_dma_advance_active(edmac)) {
556 m2m_fill_desc(edmac);
557 control |= M2M_CONTROL_NFBINT;
558 }
559
560
561
562
563
564 control |= M2M_CONTROL_ENABLE;
565 writel(control, edmac->regs + M2M_CONTROL);
566
567 if (!data) {
568
569
570
571
572 control |= M2M_CONTROL_START;
573 writel(control, edmac->regs + M2M_CONTROL);
574 }
575}
576
577
578
579
580
581
582
583
584
585
586
587static int m2m_hw_interrupt(struct ep93xx_dma_chan *edmac)
588{
589 u32 status = readl(edmac->regs + M2M_STATUS);
590 u32 ctl_fsm = status & M2M_STATUS_CTL_MASK;
591 u32 buf_fsm = status & M2M_STATUS_BUF_MASK;
592 bool done = status & M2M_STATUS_DONE;
593 bool last_done;
594 u32 control;
595 struct ep93xx_dma_desc *desc;
596
597
598 if (!(readl(edmac->regs + M2M_INTERRUPT) & M2M_INTERRUPT_MASK))
599 return INTERRUPT_UNKNOWN;
600
601 if (done) {
602
603 writel(0, edmac->regs + M2M_INTERRUPT);
604 }
605
606
607
608
609
610 desc = ep93xx_dma_get_active(edmac);
611 last_done = !desc || desc->txd.cookie;
612
613
614
615
616
617
618 if (!last_done &&
619 (buf_fsm == M2M_STATUS_BUF_NO ||
620 buf_fsm == M2M_STATUS_BUF_ON)) {
621
622
623
624
625
626
627 if (ep93xx_dma_advance_active(edmac)) {
628 m2m_fill_desc(edmac);
629 if (done && !edmac->chan.private) {
630
631 control = readl(edmac->regs + M2M_CONTROL);
632 control |= M2M_CONTROL_START;
633 writel(control, edmac->regs + M2M_CONTROL);
634 }
635 return INTERRUPT_NEXT_BUFFER;
636 } else {
637 last_done = true;
638 }
639 }
640
641
642
643
644
645 if (last_done &&
646 buf_fsm == M2M_STATUS_BUF_NO &&
647 ctl_fsm == M2M_STATUS_CTL_STALL) {
648
649 control = readl(edmac->regs + M2M_CONTROL);
650 control &= ~(M2M_CONTROL_DONEINT | M2M_CONTROL_NFBINT
651 | M2M_CONTROL_ENABLE);
652 writel(control, edmac->regs + M2M_CONTROL);
653 return INTERRUPT_DONE;
654 }
655
656
657
658
659 return INTERRUPT_NEXT_BUFFER;
660}
661
662
663
664
665
666static struct ep93xx_dma_desc *
667ep93xx_dma_desc_get(struct ep93xx_dma_chan *edmac)
668{
669 struct ep93xx_dma_desc *desc, *_desc;
670 struct ep93xx_dma_desc *ret = NULL;
671 unsigned long flags;
672
673 spin_lock_irqsave(&edmac->lock, flags);
674 list_for_each_entry_safe(desc, _desc, &edmac->free_list, node) {
675 if (async_tx_test_ack(&desc->txd)) {
676 list_del_init(&desc->node);
677
678
679 desc->src_addr = 0;
680 desc->dst_addr = 0;
681 desc->size = 0;
682 desc->complete = false;
683 desc->txd.cookie = 0;
684 desc->txd.callback = NULL;
685 desc->txd.callback_param = NULL;
686
687 ret = desc;
688 break;
689 }
690 }
691 spin_unlock_irqrestore(&edmac->lock, flags);
692 return ret;
693}
694
695static void ep93xx_dma_desc_put(struct ep93xx_dma_chan *edmac,
696 struct ep93xx_dma_desc *desc)
697{
698 if (desc) {
699 unsigned long flags;
700
701 spin_lock_irqsave(&edmac->lock, flags);
702 list_splice_init(&desc->tx_list, &edmac->free_list);
703 list_add(&desc->node, &edmac->free_list);
704 spin_unlock_irqrestore(&edmac->lock, flags);
705 }
706}
707
708
709
710
711
712
713
714
715
716static void ep93xx_dma_advance_work(struct ep93xx_dma_chan *edmac)
717{
718 struct ep93xx_dma_desc *new;
719 unsigned long flags;
720
721 spin_lock_irqsave(&edmac->lock, flags);
722 if (!list_empty(&edmac->active) || list_empty(&edmac->queue)) {
723 spin_unlock_irqrestore(&edmac->lock, flags);
724 return;
725 }
726
727
728 new = list_first_entry(&edmac->queue, struct ep93xx_dma_desc, node);
729 list_del_init(&new->node);
730
731 ep93xx_dma_set_active(edmac, new);
732
733
734 edmac->edma->hw_submit(edmac);
735 spin_unlock_irqrestore(&edmac->lock, flags);
736}
737
738static void ep93xx_dma_tasklet(unsigned long data)
739{
740 struct ep93xx_dma_chan *edmac = (struct ep93xx_dma_chan *)data;
741 struct ep93xx_dma_desc *desc, *d;
742 dma_async_tx_callback callback = NULL;
743 void *callback_param = NULL;
744 LIST_HEAD(list);
745
746 spin_lock_irq(&edmac->lock);
747
748
749
750
751
752 desc = ep93xx_dma_get_active(edmac);
753 if (desc) {
754 if (desc->complete) {
755
756 if (!test_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags))
757 dma_cookie_complete(&desc->txd);
758 list_splice_init(&edmac->active, &list);
759 }
760 callback = desc->txd.callback;
761 callback_param = desc->txd.callback_param;
762 }
763 spin_unlock_irq(&edmac->lock);
764
765
766 ep93xx_dma_advance_work(edmac);
767
768
769 list_for_each_entry_safe(desc, d, &list, node) {
770 dma_descriptor_unmap(&desc->txd);
771 ep93xx_dma_desc_put(edmac, desc);
772 }
773
774 if (callback)
775 callback(callback_param);
776}
777
778static irqreturn_t ep93xx_dma_interrupt(int irq, void *dev_id)
779{
780 struct ep93xx_dma_chan *edmac = dev_id;
781 struct ep93xx_dma_desc *desc;
782 irqreturn_t ret = IRQ_HANDLED;
783
784 spin_lock(&edmac->lock);
785
786 desc = ep93xx_dma_get_active(edmac);
787 if (!desc) {
788 dev_warn(chan2dev(edmac),
789 "got interrupt while active list is empty\n");
790 spin_unlock(&edmac->lock);
791 return IRQ_NONE;
792 }
793
794 switch (edmac->edma->hw_interrupt(edmac)) {
795 case INTERRUPT_DONE:
796 desc->complete = true;
797 tasklet_schedule(&edmac->tasklet);
798 break;
799
800 case INTERRUPT_NEXT_BUFFER:
801 if (test_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags))
802 tasklet_schedule(&edmac->tasklet);
803 break;
804
805 default:
806 dev_warn(chan2dev(edmac), "unknown interrupt!\n");
807 ret = IRQ_NONE;
808 break;
809 }
810
811 spin_unlock(&edmac->lock);
812 return ret;
813}
814
815
816
817
818
819
820
821
822
823static dma_cookie_t ep93xx_dma_tx_submit(struct dma_async_tx_descriptor *tx)
824{
825 struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(tx->chan);
826 struct ep93xx_dma_desc *desc;
827 dma_cookie_t cookie;
828 unsigned long flags;
829
830 spin_lock_irqsave(&edmac->lock, flags);
831 cookie = dma_cookie_assign(tx);
832
833 desc = container_of(tx, struct ep93xx_dma_desc, txd);
834
835
836
837
838
839
840 if (list_empty(&edmac->active)) {
841 ep93xx_dma_set_active(edmac, desc);
842 edmac->edma->hw_submit(edmac);
843 } else {
844 list_add_tail(&desc->node, &edmac->queue);
845 }
846
847 spin_unlock_irqrestore(&edmac->lock, flags);
848 return cookie;
849}
850
851
852
853
854
855
856
857
858
859static int ep93xx_dma_alloc_chan_resources(struct dma_chan *chan)
860{
861 struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
862 struct ep93xx_dma_data *data = chan->private;
863 const char *name = dma_chan_name(chan);
864 int ret, i;
865
866
867 if (!edmac->edma->m2m) {
868 if (!data)
869 return -EINVAL;
870 if (data->port < EP93XX_DMA_I2S1 ||
871 data->port > EP93XX_DMA_IRDA)
872 return -EINVAL;
873 if (data->direction != ep93xx_dma_chan_direction(chan))
874 return -EINVAL;
875 } else {
876 if (data) {
877 switch (data->port) {
878 case EP93XX_DMA_SSP:
879 case EP93XX_DMA_IDE:
880 if (!is_slave_direction(data->direction))
881 return -EINVAL;
882 break;
883 default:
884 return -EINVAL;
885 }
886 }
887 }
888
889 if (data && data->name)
890 name = data->name;
891
892 ret = clk_enable(edmac->clk);
893 if (ret)
894 return ret;
895
896 ret = request_irq(edmac->irq, ep93xx_dma_interrupt, 0, name, edmac);
897 if (ret)
898 goto fail_clk_disable;
899
900 spin_lock_irq(&edmac->lock);
901 dma_cookie_init(&edmac->chan);
902 ret = edmac->edma->hw_setup(edmac);
903 spin_unlock_irq(&edmac->lock);
904
905 if (ret)
906 goto fail_free_irq;
907
908 for (i = 0; i < DMA_MAX_CHAN_DESCRIPTORS; i++) {
909 struct ep93xx_dma_desc *desc;
910
911 desc = kzalloc(sizeof(*desc), GFP_KERNEL);
912 if (!desc) {
913 dev_warn(chan2dev(edmac), "not enough descriptors\n");
914 break;
915 }
916
917 INIT_LIST_HEAD(&desc->tx_list);
918
919 dma_async_tx_descriptor_init(&desc->txd, chan);
920 desc->txd.flags = DMA_CTRL_ACK;
921 desc->txd.tx_submit = ep93xx_dma_tx_submit;
922
923 ep93xx_dma_desc_put(edmac, desc);
924 }
925
926 return i;
927
928fail_free_irq:
929 free_irq(edmac->irq, edmac);
930fail_clk_disable:
931 clk_disable(edmac->clk);
932
933 return ret;
934}
935
936
937
938
939
940
941
942
943static void ep93xx_dma_free_chan_resources(struct dma_chan *chan)
944{
945 struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
946 struct ep93xx_dma_desc *desc, *d;
947 unsigned long flags;
948 LIST_HEAD(list);
949
950 BUG_ON(!list_empty(&edmac->active));
951 BUG_ON(!list_empty(&edmac->queue));
952
953 spin_lock_irqsave(&edmac->lock, flags);
954 edmac->edma->hw_shutdown(edmac);
955 edmac->runtime_addr = 0;
956 edmac->runtime_ctrl = 0;
957 edmac->buffer = 0;
958 list_splice_init(&edmac->free_list, &list);
959 spin_unlock_irqrestore(&edmac->lock, flags);
960
961 list_for_each_entry_safe(desc, d, &list, node)
962 kfree(desc);
963
964 clk_disable(edmac->clk);
965 free_irq(edmac->irq, edmac);
966}
967
968
969
970
971
972
973
974
975
976
977
978static struct dma_async_tx_descriptor *
979ep93xx_dma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest,
980 dma_addr_t src, size_t len, unsigned long flags)
981{
982 struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
983 struct ep93xx_dma_desc *desc, *first;
984 size_t bytes, offset;
985
986 first = NULL;
987 for (offset = 0; offset < len; offset += bytes) {
988 desc = ep93xx_dma_desc_get(edmac);
989 if (!desc) {
990 dev_warn(chan2dev(edmac), "couln't get descriptor\n");
991 goto fail;
992 }
993
994 bytes = min_t(size_t, len - offset, DMA_MAX_CHAN_BYTES);
995
996 desc->src_addr = src + offset;
997 desc->dst_addr = dest + offset;
998 desc->size = bytes;
999
1000 if (!first)
1001 first = desc;
1002 else
1003 list_add_tail(&desc->node, &first->tx_list);
1004 }
1005
1006 first->txd.cookie = -EBUSY;
1007 first->txd.flags = flags;
1008
1009 return &first->txd;
1010fail:
1011 ep93xx_dma_desc_put(edmac, first);
1012 return NULL;
1013}
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026static struct dma_async_tx_descriptor *
1027ep93xx_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
1028 unsigned int sg_len, enum dma_transfer_direction dir,
1029 unsigned long flags, void *context)
1030{
1031 struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
1032 struct ep93xx_dma_desc *desc, *first;
1033 struct scatterlist *sg;
1034 int i;
1035
1036 if (!edmac->edma->m2m && dir != ep93xx_dma_chan_direction(chan)) {
1037 dev_warn(chan2dev(edmac),
1038 "channel was configured with different direction\n");
1039 return NULL;
1040 }
1041
1042 if (test_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags)) {
1043 dev_warn(chan2dev(edmac),
1044 "channel is already used for cyclic transfers\n");
1045 return NULL;
1046 }
1047
1048 first = NULL;
1049 for_each_sg(sgl, sg, sg_len, i) {
1050 size_t sg_len = sg_dma_len(sg);
1051
1052 if (sg_len > DMA_MAX_CHAN_BYTES) {
1053 dev_warn(chan2dev(edmac), "too big transfer size %d\n",
1054 sg_len);
1055 goto fail;
1056 }
1057
1058 desc = ep93xx_dma_desc_get(edmac);
1059 if (!desc) {
1060 dev_warn(chan2dev(edmac), "couln't get descriptor\n");
1061 goto fail;
1062 }
1063
1064 if (dir == DMA_MEM_TO_DEV) {
1065 desc->src_addr = sg_dma_address(sg);
1066 desc->dst_addr = edmac->runtime_addr;
1067 } else {
1068 desc->src_addr = edmac->runtime_addr;
1069 desc->dst_addr = sg_dma_address(sg);
1070 }
1071 desc->size = sg_len;
1072
1073 if (!first)
1074 first = desc;
1075 else
1076 list_add_tail(&desc->node, &first->tx_list);
1077 }
1078
1079 first->txd.cookie = -EBUSY;
1080 first->txd.flags = flags;
1081
1082 return &first->txd;
1083
1084fail:
1085 ep93xx_dma_desc_put(edmac, first);
1086 return NULL;
1087}
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106static struct dma_async_tx_descriptor *
1107ep93xx_dma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t dma_addr,
1108 size_t buf_len, size_t period_len,
1109 enum dma_transfer_direction dir, unsigned long flags)
1110{
1111 struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
1112 struct ep93xx_dma_desc *desc, *first;
1113 size_t offset = 0;
1114
1115 if (!edmac->edma->m2m && dir != ep93xx_dma_chan_direction(chan)) {
1116 dev_warn(chan2dev(edmac),
1117 "channel was configured with different direction\n");
1118 return NULL;
1119 }
1120
1121 if (test_and_set_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags)) {
1122 dev_warn(chan2dev(edmac),
1123 "channel is already used for cyclic transfers\n");
1124 return NULL;
1125 }
1126
1127 if (period_len > DMA_MAX_CHAN_BYTES) {
1128 dev_warn(chan2dev(edmac), "too big period length %d\n",
1129 period_len);
1130 return NULL;
1131 }
1132
1133
1134 first = NULL;
1135 for (offset = 0; offset < buf_len; offset += period_len) {
1136 desc = ep93xx_dma_desc_get(edmac);
1137 if (!desc) {
1138 dev_warn(chan2dev(edmac), "couln't get descriptor\n");
1139 goto fail;
1140 }
1141
1142 if (dir == DMA_MEM_TO_DEV) {
1143 desc->src_addr = dma_addr + offset;
1144 desc->dst_addr = edmac->runtime_addr;
1145 } else {
1146 desc->src_addr = edmac->runtime_addr;
1147 desc->dst_addr = dma_addr + offset;
1148 }
1149
1150 desc->size = period_len;
1151
1152 if (!first)
1153 first = desc;
1154 else
1155 list_add_tail(&desc->node, &first->tx_list);
1156 }
1157
1158 first->txd.cookie = -EBUSY;
1159
1160 return &first->txd;
1161
1162fail:
1163 ep93xx_dma_desc_put(edmac, first);
1164 return NULL;
1165}
1166
1167
1168
1169
1170
1171
1172
1173
1174static int ep93xx_dma_terminate_all(struct dma_chan *chan)
1175{
1176 struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
1177 struct ep93xx_dma_desc *desc, *_d;
1178 unsigned long flags;
1179 LIST_HEAD(list);
1180
1181 spin_lock_irqsave(&edmac->lock, flags);
1182
1183 edmac->edma->hw_shutdown(edmac);
1184 clear_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags);
1185 list_splice_init(&edmac->active, &list);
1186 list_splice_init(&edmac->queue, &list);
1187
1188
1189
1190
1191 edmac->edma->hw_setup(edmac);
1192 spin_unlock_irqrestore(&edmac->lock, flags);
1193
1194 list_for_each_entry_safe(desc, _d, &list, node)
1195 ep93xx_dma_desc_put(edmac, desc);
1196
1197 return 0;
1198}
1199
1200static int ep93xx_dma_slave_config(struct dma_chan *chan,
1201 struct dma_slave_config *config)
1202{
1203 struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
1204 enum dma_slave_buswidth width;
1205 unsigned long flags;
1206 u32 addr, ctrl;
1207
1208 if (!edmac->edma->m2m)
1209 return -EINVAL;
1210
1211 switch (config->direction) {
1212 case DMA_DEV_TO_MEM:
1213 width = config->src_addr_width;
1214 addr = config->src_addr;
1215 break;
1216
1217 case DMA_MEM_TO_DEV:
1218 width = config->dst_addr_width;
1219 addr = config->dst_addr;
1220 break;
1221
1222 default:
1223 return -EINVAL;
1224 }
1225
1226 switch (width) {
1227 case DMA_SLAVE_BUSWIDTH_1_BYTE:
1228 ctrl = 0;
1229 break;
1230 case DMA_SLAVE_BUSWIDTH_2_BYTES:
1231 ctrl = M2M_CONTROL_PW_16;
1232 break;
1233 case DMA_SLAVE_BUSWIDTH_4_BYTES:
1234 ctrl = M2M_CONTROL_PW_32;
1235 break;
1236 default:
1237 return -EINVAL;
1238 }
1239
1240 spin_lock_irqsave(&edmac->lock, flags);
1241 edmac->runtime_addr = addr;
1242 edmac->runtime_ctrl = ctrl;
1243 spin_unlock_irqrestore(&edmac->lock, flags);
1244
1245 return 0;
1246}
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256static enum dma_status ep93xx_dma_tx_status(struct dma_chan *chan,
1257 dma_cookie_t cookie,
1258 struct dma_tx_state *state)
1259{
1260 return dma_cookie_status(chan, cookie, state);
1261}
1262
1263
1264
1265
1266
1267
1268
1269
1270static void ep93xx_dma_issue_pending(struct dma_chan *chan)
1271{
1272 ep93xx_dma_advance_work(to_ep93xx_dma_chan(chan));
1273}
1274
1275static int __init ep93xx_dma_probe(struct platform_device *pdev)
1276{
1277 struct ep93xx_dma_platform_data *pdata = dev_get_platdata(&pdev->dev);
1278 struct ep93xx_dma_engine *edma;
1279 struct dma_device *dma_dev;
1280 size_t edma_size;
1281 int ret, i;
1282
1283 edma_size = pdata->num_channels * sizeof(struct ep93xx_dma_chan);
1284 edma = kzalloc(sizeof(*edma) + edma_size, GFP_KERNEL);
1285 if (!edma)
1286 return -ENOMEM;
1287
1288 dma_dev = &edma->dma_dev;
1289 edma->m2m = platform_get_device_id(pdev)->driver_data;
1290 edma->num_channels = pdata->num_channels;
1291
1292 INIT_LIST_HEAD(&dma_dev->channels);
1293 for (i = 0; i < pdata->num_channels; i++) {
1294 const struct ep93xx_dma_chan_data *cdata = &pdata->channels[i];
1295 struct ep93xx_dma_chan *edmac = &edma->channels[i];
1296
1297 edmac->chan.device = dma_dev;
1298 edmac->regs = cdata->base;
1299 edmac->irq = cdata->irq;
1300 edmac->edma = edma;
1301
1302 edmac->clk = clk_get(NULL, cdata->name);
1303 if (IS_ERR(edmac->clk)) {
1304 dev_warn(&pdev->dev, "failed to get clock for %s\n",
1305 cdata->name);
1306 continue;
1307 }
1308
1309 spin_lock_init(&edmac->lock);
1310 INIT_LIST_HEAD(&edmac->active);
1311 INIT_LIST_HEAD(&edmac->queue);
1312 INIT_LIST_HEAD(&edmac->free_list);
1313 tasklet_init(&edmac->tasklet, ep93xx_dma_tasklet,
1314 (unsigned long)edmac);
1315
1316 list_add_tail(&edmac->chan.device_node,
1317 &dma_dev->channels);
1318 }
1319
1320 dma_cap_zero(dma_dev->cap_mask);
1321 dma_cap_set(DMA_SLAVE, dma_dev->cap_mask);
1322 dma_cap_set(DMA_CYCLIC, dma_dev->cap_mask);
1323
1324 dma_dev->dev = &pdev->dev;
1325 dma_dev->device_alloc_chan_resources = ep93xx_dma_alloc_chan_resources;
1326 dma_dev->device_free_chan_resources = ep93xx_dma_free_chan_resources;
1327 dma_dev->device_prep_slave_sg = ep93xx_dma_prep_slave_sg;
1328 dma_dev->device_prep_dma_cyclic = ep93xx_dma_prep_dma_cyclic;
1329 dma_dev->device_config = ep93xx_dma_slave_config;
1330 dma_dev->device_terminate_all = ep93xx_dma_terminate_all;
1331 dma_dev->device_issue_pending = ep93xx_dma_issue_pending;
1332 dma_dev->device_tx_status = ep93xx_dma_tx_status;
1333
1334 dma_set_max_seg_size(dma_dev->dev, DMA_MAX_CHAN_BYTES);
1335
1336 if (edma->m2m) {
1337 dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask);
1338 dma_dev->device_prep_dma_memcpy = ep93xx_dma_prep_dma_memcpy;
1339
1340 edma->hw_setup = m2m_hw_setup;
1341 edma->hw_shutdown = m2m_hw_shutdown;
1342 edma->hw_submit = m2m_hw_submit;
1343 edma->hw_interrupt = m2m_hw_interrupt;
1344 } else {
1345 dma_cap_set(DMA_PRIVATE, dma_dev->cap_mask);
1346
1347 edma->hw_setup = m2p_hw_setup;
1348 edma->hw_shutdown = m2p_hw_shutdown;
1349 edma->hw_submit = m2p_hw_submit;
1350 edma->hw_interrupt = m2p_hw_interrupt;
1351 }
1352
1353 ret = dma_async_device_register(dma_dev);
1354 if (unlikely(ret)) {
1355 for (i = 0; i < edma->num_channels; i++) {
1356 struct ep93xx_dma_chan *edmac = &edma->channels[i];
1357 if (!IS_ERR_OR_NULL(edmac->clk))
1358 clk_put(edmac->clk);
1359 }
1360 kfree(edma);
1361 } else {
1362 dev_info(dma_dev->dev, "EP93xx M2%s DMA ready\n",
1363 edma->m2m ? "M" : "P");
1364 }
1365
1366 return ret;
1367}
1368
1369static const struct platform_device_id ep93xx_dma_driver_ids[] = {
1370 { "ep93xx-dma-m2p", 0 },
1371 { "ep93xx-dma-m2m", 1 },
1372 { },
1373};
1374
1375static struct platform_driver ep93xx_dma_driver = {
1376 .driver = {
1377 .name = "ep93xx-dma",
1378 },
1379 .id_table = ep93xx_dma_driver_ids,
1380};
1381
1382static int __init ep93xx_dma_module_init(void)
1383{
1384 return platform_driver_probe(&ep93xx_dma_driver, ep93xx_dma_probe);
1385}
1386subsys_initcall(ep93xx_dma_module_init);
1387
1388MODULE_AUTHOR("Mika Westerberg <mika.westerberg@iki.fi>");
1389MODULE_DESCRIPTION("EP93xx DMA driver");
1390MODULE_LICENSE("GPL");
1391