1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21#include <linux/clk.h>
22#include <linux/init.h>
23#include <linux/interrupt.h>
24#include <linux/dmaengine.h>
25#include <linux/module.h>
26#include <linux/platform_device.h>
27#include <linux/slab.h>
28
29#include <mach/dma.h>
30
31#include "dmaengine.h"
32
33
34#define M2P_CONTROL 0x0000
35#define M2P_CONTROL_STALLINT BIT(0)
36#define M2P_CONTROL_NFBINT BIT(1)
37#define M2P_CONTROL_CH_ERROR_INT BIT(3)
38#define M2P_CONTROL_ENABLE BIT(4)
39#define M2P_CONTROL_ICE BIT(6)
40
41#define M2P_INTERRUPT 0x0004
42#define M2P_INTERRUPT_STALL BIT(0)
43#define M2P_INTERRUPT_NFB BIT(1)
44#define M2P_INTERRUPT_ERROR BIT(3)
45
46#define M2P_PPALLOC 0x0008
47#define M2P_STATUS 0x000c
48
49#define M2P_MAXCNT0 0x0020
50#define M2P_BASE0 0x0024
51#define M2P_MAXCNT1 0x0030
52#define M2P_BASE1 0x0034
53
54#define M2P_STATE_IDLE 0
55#define M2P_STATE_STALL 1
56#define M2P_STATE_ON 2
57#define M2P_STATE_NEXT 3
58
59
60#define M2M_CONTROL 0x0000
61#define M2M_CONTROL_DONEINT BIT(2)
62#define M2M_CONTROL_ENABLE BIT(3)
63#define M2M_CONTROL_START BIT(4)
64#define M2M_CONTROL_DAH BIT(11)
65#define M2M_CONTROL_SAH BIT(12)
66#define M2M_CONTROL_PW_SHIFT 9
67#define M2M_CONTROL_PW_8 (0 << M2M_CONTROL_PW_SHIFT)
68#define M2M_CONTROL_PW_16 (1 << M2M_CONTROL_PW_SHIFT)
69#define M2M_CONTROL_PW_32 (2 << M2M_CONTROL_PW_SHIFT)
70#define M2M_CONTROL_PW_MASK (3 << M2M_CONTROL_PW_SHIFT)
71#define M2M_CONTROL_TM_SHIFT 13
72#define M2M_CONTROL_TM_TX (1 << M2M_CONTROL_TM_SHIFT)
73#define M2M_CONTROL_TM_RX (2 << M2M_CONTROL_TM_SHIFT)
74#define M2M_CONTROL_RSS_SHIFT 22
75#define M2M_CONTROL_RSS_SSPRX (1 << M2M_CONTROL_RSS_SHIFT)
76#define M2M_CONTROL_RSS_SSPTX (2 << M2M_CONTROL_RSS_SHIFT)
77#define M2M_CONTROL_RSS_IDE (3 << M2M_CONTROL_RSS_SHIFT)
78#define M2M_CONTROL_NO_HDSK BIT(24)
79#define M2M_CONTROL_PWSC_SHIFT 25
80
81#define M2M_INTERRUPT 0x0004
82#define M2M_INTERRUPT_DONEINT BIT(1)
83
84#define M2M_BCR0 0x0010
85#define M2M_BCR1 0x0014
86#define M2M_SAR_BASE0 0x0018
87#define M2M_SAR_BASE1 0x001c
88#define M2M_DAR_BASE0 0x002c
89#define M2M_DAR_BASE1 0x0030
90
91#define DMA_MAX_CHAN_BYTES 0xffff
92#define DMA_MAX_CHAN_DESCRIPTORS 32
93
94struct ep93xx_dma_engine;
95
96
97
98
99
100
101
102
103
104
105
106struct ep93xx_dma_desc {
107 u32 src_addr;
108 u32 dst_addr;
109 size_t size;
110 bool complete;
111 struct dma_async_tx_descriptor txd;
112 struct list_head tx_list;
113 struct list_head node;
114};
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147struct ep93xx_dma_chan {
148 struct dma_chan chan;
149 const struct ep93xx_dma_engine *edma;
150 void __iomem *regs;
151 int irq;
152 struct clk *clk;
153 struct tasklet_struct tasklet;
154
155 spinlock_t lock;
156 unsigned long flags;
157
158#define EP93XX_DMA_IS_CYCLIC 0
159
160 int buffer;
161 struct list_head active;
162 struct list_head queue;
163 struct list_head free_list;
164 u32 runtime_addr;
165 u32 runtime_ctrl;
166};
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184struct ep93xx_dma_engine {
185 struct dma_device dma_dev;
186 bool m2m;
187 int (*hw_setup)(struct ep93xx_dma_chan *);
188 void (*hw_shutdown)(struct ep93xx_dma_chan *);
189 void (*hw_submit)(struct ep93xx_dma_chan *);
190 int (*hw_interrupt)(struct ep93xx_dma_chan *);
191#define INTERRUPT_UNKNOWN 0
192#define INTERRUPT_DONE 1
193#define INTERRUPT_NEXT_BUFFER 2
194
195 size_t num_channels;
196 struct ep93xx_dma_chan channels[];
197};
198
199static inline struct device *chan2dev(struct ep93xx_dma_chan *edmac)
200{
201 return &edmac->chan.dev->device;
202}
203
204static struct ep93xx_dma_chan *to_ep93xx_dma_chan(struct dma_chan *chan)
205{
206 return container_of(chan, struct ep93xx_dma_chan, chan);
207}
208
209
210
211
212
213
214
215
216
217
218
219
220static void ep93xx_dma_set_active(struct ep93xx_dma_chan *edmac,
221 struct ep93xx_dma_desc *desc)
222{
223 BUG_ON(!list_empty(&edmac->active));
224
225 list_add_tail(&desc->node, &edmac->active);
226
227
228 while (!list_empty(&desc->tx_list)) {
229 struct ep93xx_dma_desc *d = list_first_entry(&desc->tx_list,
230 struct ep93xx_dma_desc, node);
231
232
233
234
235
236
237
238 d->txd.callback = desc->txd.callback;
239 d->txd.callback_param = desc->txd.callback_param;
240
241 list_move_tail(&d->node, &edmac->active);
242 }
243}
244
245
246static struct ep93xx_dma_desc *
247ep93xx_dma_get_active(struct ep93xx_dma_chan *edmac)
248{
249 if (list_empty(&edmac->active))
250 return NULL;
251
252 return list_first_entry(&edmac->active, struct ep93xx_dma_desc, node);
253}
254
255
256
257
258
259
260
261
262
263
264
265
266
267static bool ep93xx_dma_advance_active(struct ep93xx_dma_chan *edmac)
268{
269 struct ep93xx_dma_desc *desc;
270
271 list_rotate_left(&edmac->active);
272
273 if (test_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags))
274 return true;
275
276 desc = ep93xx_dma_get_active(edmac);
277 if (!desc)
278 return false;
279
280
281
282
283
284 return !desc->txd.cookie;
285}
286
287
288
289
290
291static void m2p_set_control(struct ep93xx_dma_chan *edmac, u32 control)
292{
293 writel(control, edmac->regs + M2P_CONTROL);
294
295
296
297
298 readl(edmac->regs + M2P_CONTROL);
299}
300
301static int m2p_hw_setup(struct ep93xx_dma_chan *edmac)
302{
303 struct ep93xx_dma_data *data = edmac->chan.private;
304 u32 control;
305
306 writel(data->port & 0xf, edmac->regs + M2P_PPALLOC);
307
308 control = M2P_CONTROL_CH_ERROR_INT | M2P_CONTROL_ICE
309 | M2P_CONTROL_ENABLE;
310 m2p_set_control(edmac, control);
311
312 return 0;
313}
314
315static inline u32 m2p_channel_state(struct ep93xx_dma_chan *edmac)
316{
317 return (readl(edmac->regs + M2P_STATUS) >> 4) & 0x3;
318}
319
320static void m2p_hw_shutdown(struct ep93xx_dma_chan *edmac)
321{
322 u32 control;
323
324 control = readl(edmac->regs + M2P_CONTROL);
325 control &= ~(M2P_CONTROL_STALLINT | M2P_CONTROL_NFBINT);
326 m2p_set_control(edmac, control);
327
328 while (m2p_channel_state(edmac) >= M2P_STATE_ON)
329 cpu_relax();
330
331 m2p_set_control(edmac, 0);
332
333 while (m2p_channel_state(edmac) == M2P_STATE_STALL)
334 cpu_relax();
335}
336
337static void m2p_fill_desc(struct ep93xx_dma_chan *edmac)
338{
339 struct ep93xx_dma_desc *desc;
340 u32 bus_addr;
341
342 desc = ep93xx_dma_get_active(edmac);
343 if (!desc) {
344 dev_warn(chan2dev(edmac), "M2P: empty descriptor list\n");
345 return;
346 }
347
348 if (ep93xx_dma_chan_direction(&edmac->chan) == DMA_MEM_TO_DEV)
349 bus_addr = desc->src_addr;
350 else
351 bus_addr = desc->dst_addr;
352
353 if (edmac->buffer == 0) {
354 writel(desc->size, edmac->regs + M2P_MAXCNT0);
355 writel(bus_addr, edmac->regs + M2P_BASE0);
356 } else {
357 writel(desc->size, edmac->regs + M2P_MAXCNT1);
358 writel(bus_addr, edmac->regs + M2P_BASE1);
359 }
360
361 edmac->buffer ^= 1;
362}
363
364static void m2p_hw_submit(struct ep93xx_dma_chan *edmac)
365{
366 u32 control = readl(edmac->regs + M2P_CONTROL);
367
368 m2p_fill_desc(edmac);
369 control |= M2P_CONTROL_STALLINT;
370
371 if (ep93xx_dma_advance_active(edmac)) {
372 m2p_fill_desc(edmac);
373 control |= M2P_CONTROL_NFBINT;
374 }
375
376 m2p_set_control(edmac, control);
377}
378
379static int m2p_hw_interrupt(struct ep93xx_dma_chan *edmac)
380{
381 u32 irq_status = readl(edmac->regs + M2P_INTERRUPT);
382 u32 control;
383
384 if (irq_status & M2P_INTERRUPT_ERROR) {
385 struct ep93xx_dma_desc *desc = ep93xx_dma_get_active(edmac);
386
387
388 writel(1, edmac->regs + M2P_INTERRUPT);
389
390
391
392
393
394
395
396
397
398 dev_err(chan2dev(edmac),
399 "DMA transfer failed! Details:\n"
400 "\tcookie : %d\n"
401 "\tsrc_addr : 0x%08x\n"
402 "\tdst_addr : 0x%08x\n"
403 "\tsize : %zu\n",
404 desc->txd.cookie, desc->src_addr, desc->dst_addr,
405 desc->size);
406 }
407
408 switch (irq_status & (M2P_INTERRUPT_STALL | M2P_INTERRUPT_NFB)) {
409 case M2P_INTERRUPT_STALL:
410
411 control = readl(edmac->regs + M2P_CONTROL);
412 control &= ~(M2P_CONTROL_STALLINT | M2P_CONTROL_NFBINT);
413 m2p_set_control(edmac, control);
414
415 return INTERRUPT_DONE;
416
417 case M2P_INTERRUPT_NFB:
418 if (ep93xx_dma_advance_active(edmac))
419 m2p_fill_desc(edmac);
420
421 return INTERRUPT_NEXT_BUFFER;
422 }
423
424 return INTERRUPT_UNKNOWN;
425}
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440static int m2m_hw_setup(struct ep93xx_dma_chan *edmac)
441{
442 const struct ep93xx_dma_data *data = edmac->chan.private;
443 u32 control = 0;
444
445 if (!data) {
446
447 writel(control, edmac->regs + M2M_CONTROL);
448 return 0;
449 }
450
451 switch (data->port) {
452 case EP93XX_DMA_SSP:
453
454
455
456
457
458 control = (5 << M2M_CONTROL_PWSC_SHIFT);
459 control |= M2M_CONTROL_NO_HDSK;
460
461 if (data->direction == DMA_MEM_TO_DEV) {
462 control |= M2M_CONTROL_DAH;
463 control |= M2M_CONTROL_TM_TX;
464 control |= M2M_CONTROL_RSS_SSPTX;
465 } else {
466 control |= M2M_CONTROL_SAH;
467 control |= M2M_CONTROL_TM_RX;
468 control |= M2M_CONTROL_RSS_SSPRX;
469 }
470 break;
471
472 case EP93XX_DMA_IDE:
473
474
475
476
477 if (data->direction == DMA_MEM_TO_DEV) {
478
479 control = (3 << M2M_CONTROL_PWSC_SHIFT);
480 control |= M2M_CONTROL_DAH;
481 control |= M2M_CONTROL_TM_TX;
482 } else {
483 control = (2 << M2M_CONTROL_PWSC_SHIFT);
484 control |= M2M_CONTROL_SAH;
485 control |= M2M_CONTROL_TM_RX;
486 }
487
488 control |= M2M_CONTROL_NO_HDSK;
489 control |= M2M_CONTROL_RSS_IDE;
490 control |= M2M_CONTROL_PW_16;
491 break;
492
493 default:
494 return -EINVAL;
495 }
496
497 writel(control, edmac->regs + M2M_CONTROL);
498 return 0;
499}
500
501static void m2m_hw_shutdown(struct ep93xx_dma_chan *edmac)
502{
503
504 writel(0, edmac->regs + M2M_CONTROL);
505}
506
507static void m2m_fill_desc(struct ep93xx_dma_chan *edmac)
508{
509 struct ep93xx_dma_desc *desc;
510
511 desc = ep93xx_dma_get_active(edmac);
512 if (!desc) {
513 dev_warn(chan2dev(edmac), "M2M: empty descriptor list\n");
514 return;
515 }
516
517 if (edmac->buffer == 0) {
518 writel(desc->src_addr, edmac->regs + M2M_SAR_BASE0);
519 writel(desc->dst_addr, edmac->regs + M2M_DAR_BASE0);
520 writel(desc->size, edmac->regs + M2M_BCR0);
521 } else {
522 writel(desc->src_addr, edmac->regs + M2M_SAR_BASE1);
523 writel(desc->dst_addr, edmac->regs + M2M_DAR_BASE1);
524 writel(desc->size, edmac->regs + M2M_BCR1);
525 }
526
527 edmac->buffer ^= 1;
528}
529
530static void m2m_hw_submit(struct ep93xx_dma_chan *edmac)
531{
532 struct ep93xx_dma_data *data = edmac->chan.private;
533 u32 control = readl(edmac->regs + M2M_CONTROL);
534
535
536
537
538
539
540 control &= ~M2M_CONTROL_PW_MASK;
541 control |= edmac->runtime_ctrl;
542
543 m2m_fill_desc(edmac);
544 control |= M2M_CONTROL_DONEINT;
545
546
547
548
549
550 control |= M2M_CONTROL_ENABLE;
551 writel(control, edmac->regs + M2M_CONTROL);
552
553 if (!data) {
554
555
556
557
558 control |= M2M_CONTROL_START;
559 writel(control, edmac->regs + M2M_CONTROL);
560 }
561}
562
563static int m2m_hw_interrupt(struct ep93xx_dma_chan *edmac)
564{
565 u32 control;
566
567 if (!(readl(edmac->regs + M2M_INTERRUPT) & M2M_INTERRUPT_DONEINT))
568 return INTERRUPT_UNKNOWN;
569
570
571 writel(0, edmac->regs + M2M_INTERRUPT);
572
573
574 control = readl(edmac->regs + M2M_CONTROL);
575 control &= ~(M2M_CONTROL_DONEINT | M2M_CONTROL_ENABLE);
576 writel(control, edmac->regs + M2M_CONTROL);
577
578
579
580
581
582
583 if (ep93xx_dma_advance_active(edmac)) {
584 edmac->edma->hw_submit(edmac);
585 return INTERRUPT_NEXT_BUFFER;
586 }
587
588 return INTERRUPT_DONE;
589}
590
591
592
593
594
595static struct ep93xx_dma_desc *
596ep93xx_dma_desc_get(struct ep93xx_dma_chan *edmac)
597{
598 struct ep93xx_dma_desc *desc, *_desc;
599 struct ep93xx_dma_desc *ret = NULL;
600 unsigned long flags;
601
602 spin_lock_irqsave(&edmac->lock, flags);
603 list_for_each_entry_safe(desc, _desc, &edmac->free_list, node) {
604 if (async_tx_test_ack(&desc->txd)) {
605 list_del_init(&desc->node);
606
607
608 desc->src_addr = 0;
609 desc->dst_addr = 0;
610 desc->size = 0;
611 desc->complete = false;
612 desc->txd.cookie = 0;
613 desc->txd.callback = NULL;
614 desc->txd.callback_param = NULL;
615
616 ret = desc;
617 break;
618 }
619 }
620 spin_unlock_irqrestore(&edmac->lock, flags);
621 return ret;
622}
623
624static void ep93xx_dma_desc_put(struct ep93xx_dma_chan *edmac,
625 struct ep93xx_dma_desc *desc)
626{
627 if (desc) {
628 unsigned long flags;
629
630 spin_lock_irqsave(&edmac->lock, flags);
631 list_splice_init(&desc->tx_list, &edmac->free_list);
632 list_add(&desc->node, &edmac->free_list);
633 spin_unlock_irqrestore(&edmac->lock, flags);
634 }
635}
636
637
638
639
640
641
642
643
644
645static void ep93xx_dma_advance_work(struct ep93xx_dma_chan *edmac)
646{
647 struct ep93xx_dma_desc *new;
648 unsigned long flags;
649
650 spin_lock_irqsave(&edmac->lock, flags);
651 if (!list_empty(&edmac->active) || list_empty(&edmac->queue)) {
652 spin_unlock_irqrestore(&edmac->lock, flags);
653 return;
654 }
655
656
657 new = list_first_entry(&edmac->queue, struct ep93xx_dma_desc, node);
658 list_del_init(&new->node);
659
660 ep93xx_dma_set_active(edmac, new);
661
662
663 edmac->edma->hw_submit(edmac);
664 spin_unlock_irqrestore(&edmac->lock, flags);
665}
666
667static void ep93xx_dma_unmap_buffers(struct ep93xx_dma_desc *desc)
668{
669 struct device *dev = desc->txd.chan->device->dev;
670
671 if (!(desc->txd.flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
672 if (desc->txd.flags & DMA_COMPL_SRC_UNMAP_SINGLE)
673 dma_unmap_single(dev, desc->src_addr, desc->size,
674 DMA_TO_DEVICE);
675 else
676 dma_unmap_page(dev, desc->src_addr, desc->size,
677 DMA_TO_DEVICE);
678 }
679 if (!(desc->txd.flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
680 if (desc->txd.flags & DMA_COMPL_DEST_UNMAP_SINGLE)
681 dma_unmap_single(dev, desc->dst_addr, desc->size,
682 DMA_FROM_DEVICE);
683 else
684 dma_unmap_page(dev, desc->dst_addr, desc->size,
685 DMA_FROM_DEVICE);
686 }
687}
688
689static void ep93xx_dma_tasklet(unsigned long data)
690{
691 struct ep93xx_dma_chan *edmac = (struct ep93xx_dma_chan *)data;
692 struct ep93xx_dma_desc *desc, *d;
693 dma_async_tx_callback callback = NULL;
694 void *callback_param = NULL;
695 LIST_HEAD(list);
696
697 spin_lock_irq(&edmac->lock);
698
699
700
701
702
703 desc = ep93xx_dma_get_active(edmac);
704 if (desc) {
705 if (desc->complete) {
706
707 if (!test_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags))
708 dma_cookie_complete(&desc->txd);
709 list_splice_init(&edmac->active, &list);
710 }
711 callback = desc->txd.callback;
712 callback_param = desc->txd.callback_param;
713 }
714 spin_unlock_irq(&edmac->lock);
715
716
717 ep93xx_dma_advance_work(edmac);
718
719
720 list_for_each_entry_safe(desc, d, &list, node) {
721
722
723
724
725 if (!edmac->chan.private)
726 ep93xx_dma_unmap_buffers(desc);
727
728 ep93xx_dma_desc_put(edmac, desc);
729 }
730
731 if (callback)
732 callback(callback_param);
733}
734
735static irqreturn_t ep93xx_dma_interrupt(int irq, void *dev_id)
736{
737 struct ep93xx_dma_chan *edmac = dev_id;
738 struct ep93xx_dma_desc *desc;
739 irqreturn_t ret = IRQ_HANDLED;
740
741 spin_lock(&edmac->lock);
742
743 desc = ep93xx_dma_get_active(edmac);
744 if (!desc) {
745 dev_warn(chan2dev(edmac),
746 "got interrupt while active list is empty\n");
747 spin_unlock(&edmac->lock);
748 return IRQ_NONE;
749 }
750
751 switch (edmac->edma->hw_interrupt(edmac)) {
752 case INTERRUPT_DONE:
753 desc->complete = true;
754 tasklet_schedule(&edmac->tasklet);
755 break;
756
757 case INTERRUPT_NEXT_BUFFER:
758 if (test_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags))
759 tasklet_schedule(&edmac->tasklet);
760 break;
761
762 default:
763 dev_warn(chan2dev(edmac), "unknown interrupt!\n");
764 ret = IRQ_NONE;
765 break;
766 }
767
768 spin_unlock(&edmac->lock);
769 return ret;
770}
771
772
773
774
775
776
777
778
779
780static dma_cookie_t ep93xx_dma_tx_submit(struct dma_async_tx_descriptor *tx)
781{
782 struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(tx->chan);
783 struct ep93xx_dma_desc *desc;
784 dma_cookie_t cookie;
785 unsigned long flags;
786
787 spin_lock_irqsave(&edmac->lock, flags);
788 cookie = dma_cookie_assign(tx);
789
790 desc = container_of(tx, struct ep93xx_dma_desc, txd);
791
792
793
794
795
796
797 if (list_empty(&edmac->active)) {
798 ep93xx_dma_set_active(edmac, desc);
799 edmac->edma->hw_submit(edmac);
800 } else {
801 list_add_tail(&desc->node, &edmac->queue);
802 }
803
804 spin_unlock_irqrestore(&edmac->lock, flags);
805 return cookie;
806}
807
808
809
810
811
812
813
814
815
816static int ep93xx_dma_alloc_chan_resources(struct dma_chan *chan)
817{
818 struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
819 struct ep93xx_dma_data *data = chan->private;
820 const char *name = dma_chan_name(chan);
821 int ret, i;
822
823
824 if (!edmac->edma->m2m) {
825 if (!data)
826 return -EINVAL;
827 if (data->port < EP93XX_DMA_I2S1 ||
828 data->port > EP93XX_DMA_IRDA)
829 return -EINVAL;
830 if (data->direction != ep93xx_dma_chan_direction(chan))
831 return -EINVAL;
832 } else {
833 if (data) {
834 switch (data->port) {
835 case EP93XX_DMA_SSP:
836 case EP93XX_DMA_IDE:
837 if (data->direction != DMA_MEM_TO_DEV &&
838 data->direction != DMA_DEV_TO_MEM)
839 return -EINVAL;
840 break;
841 default:
842 return -EINVAL;
843 }
844 }
845 }
846
847 if (data && data->name)
848 name = data->name;
849
850 ret = clk_enable(edmac->clk);
851 if (ret)
852 return ret;
853
854 ret = request_irq(edmac->irq, ep93xx_dma_interrupt, 0, name, edmac);
855 if (ret)
856 goto fail_clk_disable;
857
858 spin_lock_irq(&edmac->lock);
859 dma_cookie_init(&edmac->chan);
860 ret = edmac->edma->hw_setup(edmac);
861 spin_unlock_irq(&edmac->lock);
862
863 if (ret)
864 goto fail_free_irq;
865
866 for (i = 0; i < DMA_MAX_CHAN_DESCRIPTORS; i++) {
867 struct ep93xx_dma_desc *desc;
868
869 desc = kzalloc(sizeof(*desc), GFP_KERNEL);
870 if (!desc) {
871 dev_warn(chan2dev(edmac), "not enough descriptors\n");
872 break;
873 }
874
875 INIT_LIST_HEAD(&desc->tx_list);
876
877 dma_async_tx_descriptor_init(&desc->txd, chan);
878 desc->txd.flags = DMA_CTRL_ACK;
879 desc->txd.tx_submit = ep93xx_dma_tx_submit;
880
881 ep93xx_dma_desc_put(edmac, desc);
882 }
883
884 return i;
885
886fail_free_irq:
887 free_irq(edmac->irq, edmac);
888fail_clk_disable:
889 clk_disable(edmac->clk);
890
891 return ret;
892}
893
894
895
896
897
898
899
900
901static void ep93xx_dma_free_chan_resources(struct dma_chan *chan)
902{
903 struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
904 struct ep93xx_dma_desc *desc, *d;
905 unsigned long flags;
906 LIST_HEAD(list);
907
908 BUG_ON(!list_empty(&edmac->active));
909 BUG_ON(!list_empty(&edmac->queue));
910
911 spin_lock_irqsave(&edmac->lock, flags);
912 edmac->edma->hw_shutdown(edmac);
913 edmac->runtime_addr = 0;
914 edmac->runtime_ctrl = 0;
915 edmac->buffer = 0;
916 list_splice_init(&edmac->free_list, &list);
917 spin_unlock_irqrestore(&edmac->lock, flags);
918
919 list_for_each_entry_safe(desc, d, &list, node)
920 kfree(desc);
921
922 clk_disable(edmac->clk);
923 free_irq(edmac->irq, edmac);
924}
925
926
927
928
929
930
931
932
933
934
935
936static struct dma_async_tx_descriptor *
937ep93xx_dma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest,
938 dma_addr_t src, size_t len, unsigned long flags)
939{
940 struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
941 struct ep93xx_dma_desc *desc, *first;
942 size_t bytes, offset;
943
944 first = NULL;
945 for (offset = 0; offset < len; offset += bytes) {
946 desc = ep93xx_dma_desc_get(edmac);
947 if (!desc) {
948 dev_warn(chan2dev(edmac), "couln't get descriptor\n");
949 goto fail;
950 }
951
952 bytes = min_t(size_t, len - offset, DMA_MAX_CHAN_BYTES);
953
954 desc->src_addr = src + offset;
955 desc->dst_addr = dest + offset;
956 desc->size = bytes;
957
958 if (!first)
959 first = desc;
960 else
961 list_add_tail(&desc->node, &first->tx_list);
962 }
963
964 first->txd.cookie = -EBUSY;
965 first->txd.flags = flags;
966
967 return &first->txd;
968fail:
969 ep93xx_dma_desc_put(edmac, first);
970 return NULL;
971}
972
973
974
975
976
977
978
979
980
981
982
983
984static struct dma_async_tx_descriptor *
985ep93xx_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
986 unsigned int sg_len, enum dma_transfer_direction dir,
987 unsigned long flags, void *context)
988{
989 struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
990 struct ep93xx_dma_desc *desc, *first;
991 struct scatterlist *sg;
992 int i;
993
994 if (!edmac->edma->m2m && dir != ep93xx_dma_chan_direction(chan)) {
995 dev_warn(chan2dev(edmac),
996 "channel was configured with different direction\n");
997 return NULL;
998 }
999
1000 if (test_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags)) {
1001 dev_warn(chan2dev(edmac),
1002 "channel is already used for cyclic transfers\n");
1003 return NULL;
1004 }
1005
1006 first = NULL;
1007 for_each_sg(sgl, sg, sg_len, i) {
1008 size_t sg_len = sg_dma_len(sg);
1009
1010 if (sg_len > DMA_MAX_CHAN_BYTES) {
1011 dev_warn(chan2dev(edmac), "too big transfer size %d\n",
1012 sg_len);
1013 goto fail;
1014 }
1015
1016 desc = ep93xx_dma_desc_get(edmac);
1017 if (!desc) {
1018 dev_warn(chan2dev(edmac), "couln't get descriptor\n");
1019 goto fail;
1020 }
1021
1022 if (dir == DMA_MEM_TO_DEV) {
1023 desc->src_addr = sg_dma_address(sg);
1024 desc->dst_addr = edmac->runtime_addr;
1025 } else {
1026 desc->src_addr = edmac->runtime_addr;
1027 desc->dst_addr = sg_dma_address(sg);
1028 }
1029 desc->size = sg_len;
1030
1031 if (!first)
1032 first = desc;
1033 else
1034 list_add_tail(&desc->node, &first->tx_list);
1035 }
1036
1037 first->txd.cookie = -EBUSY;
1038 first->txd.flags = flags;
1039
1040 return &first->txd;
1041
1042fail:
1043 ep93xx_dma_desc_put(edmac, first);
1044 return NULL;
1045}
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064static struct dma_async_tx_descriptor *
1065ep93xx_dma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t dma_addr,
1066 size_t buf_len, size_t period_len,
1067 enum dma_transfer_direction dir, void *context)
1068{
1069 struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
1070 struct ep93xx_dma_desc *desc, *first;
1071 size_t offset = 0;
1072
1073 if (!edmac->edma->m2m && dir != ep93xx_dma_chan_direction(chan)) {
1074 dev_warn(chan2dev(edmac),
1075 "channel was configured with different direction\n");
1076 return NULL;
1077 }
1078
1079 if (test_and_set_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags)) {
1080 dev_warn(chan2dev(edmac),
1081 "channel is already used for cyclic transfers\n");
1082 return NULL;
1083 }
1084
1085 if (period_len > DMA_MAX_CHAN_BYTES) {
1086 dev_warn(chan2dev(edmac), "too big period length %d\n",
1087 period_len);
1088 return NULL;
1089 }
1090
1091
1092 first = NULL;
1093 for (offset = 0; offset < buf_len; offset += period_len) {
1094 desc = ep93xx_dma_desc_get(edmac);
1095 if (!desc) {
1096 dev_warn(chan2dev(edmac), "couln't get descriptor\n");
1097 goto fail;
1098 }
1099
1100 if (dir == DMA_MEM_TO_DEV) {
1101 desc->src_addr = dma_addr + offset;
1102 desc->dst_addr = edmac->runtime_addr;
1103 } else {
1104 desc->src_addr = edmac->runtime_addr;
1105 desc->dst_addr = dma_addr + offset;
1106 }
1107
1108 desc->size = period_len;
1109
1110 if (!first)
1111 first = desc;
1112 else
1113 list_add_tail(&desc->node, &first->tx_list);
1114 }
1115
1116 first->txd.cookie = -EBUSY;
1117
1118 return &first->txd;
1119
1120fail:
1121 ep93xx_dma_desc_put(edmac, first);
1122 return NULL;
1123}
1124
1125
1126
1127
1128
1129
1130
1131
1132static int ep93xx_dma_terminate_all(struct ep93xx_dma_chan *edmac)
1133{
1134 struct ep93xx_dma_desc *desc, *_d;
1135 unsigned long flags;
1136 LIST_HEAD(list);
1137
1138 spin_lock_irqsave(&edmac->lock, flags);
1139
1140 edmac->edma->hw_shutdown(edmac);
1141 clear_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags);
1142 list_splice_init(&edmac->active, &list);
1143 list_splice_init(&edmac->queue, &list);
1144
1145
1146
1147
1148 edmac->edma->hw_setup(edmac);
1149 spin_unlock_irqrestore(&edmac->lock, flags);
1150
1151 list_for_each_entry_safe(desc, _d, &list, node)
1152 ep93xx_dma_desc_put(edmac, desc);
1153
1154 return 0;
1155}
1156
1157static int ep93xx_dma_slave_config(struct ep93xx_dma_chan *edmac,
1158 struct dma_slave_config *config)
1159{
1160 enum dma_slave_buswidth width;
1161 unsigned long flags;
1162 u32 addr, ctrl;
1163
1164 if (!edmac->edma->m2m)
1165 return -EINVAL;
1166
1167 switch (config->direction) {
1168 case DMA_DEV_TO_MEM:
1169 width = config->src_addr_width;
1170 addr = config->src_addr;
1171 break;
1172
1173 case DMA_MEM_TO_DEV:
1174 width = config->dst_addr_width;
1175 addr = config->dst_addr;
1176 break;
1177
1178 default:
1179 return -EINVAL;
1180 }
1181
1182 switch (width) {
1183 case DMA_SLAVE_BUSWIDTH_1_BYTE:
1184 ctrl = 0;
1185 break;
1186 case DMA_SLAVE_BUSWIDTH_2_BYTES:
1187 ctrl = M2M_CONTROL_PW_16;
1188 break;
1189 case DMA_SLAVE_BUSWIDTH_4_BYTES:
1190 ctrl = M2M_CONTROL_PW_32;
1191 break;
1192 default:
1193 return -EINVAL;
1194 }
1195
1196 spin_lock_irqsave(&edmac->lock, flags);
1197 edmac->runtime_addr = addr;
1198 edmac->runtime_ctrl = ctrl;
1199 spin_unlock_irqrestore(&edmac->lock, flags);
1200
1201 return 0;
1202}
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213static int ep93xx_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
1214 unsigned long arg)
1215{
1216 struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
1217 struct dma_slave_config *config;
1218
1219 switch (cmd) {
1220 case DMA_TERMINATE_ALL:
1221 return ep93xx_dma_terminate_all(edmac);
1222
1223 case DMA_SLAVE_CONFIG:
1224 config = (struct dma_slave_config *)arg;
1225 return ep93xx_dma_slave_config(edmac, config);
1226
1227 default:
1228 break;
1229 }
1230
1231 return -ENOSYS;
1232}
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242static enum dma_status ep93xx_dma_tx_status(struct dma_chan *chan,
1243 dma_cookie_t cookie,
1244 struct dma_tx_state *state)
1245{
1246 struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
1247 enum dma_status ret;
1248 unsigned long flags;
1249
1250 spin_lock_irqsave(&edmac->lock, flags);
1251 ret = dma_cookie_status(chan, cookie, state);
1252 spin_unlock_irqrestore(&edmac->lock, flags);
1253
1254 return ret;
1255}
1256
1257
1258
1259
1260
1261
1262
1263
1264static void ep93xx_dma_issue_pending(struct dma_chan *chan)
1265{
1266 ep93xx_dma_advance_work(to_ep93xx_dma_chan(chan));
1267}
1268
1269static int __init ep93xx_dma_probe(struct platform_device *pdev)
1270{
1271 struct ep93xx_dma_platform_data *pdata = dev_get_platdata(&pdev->dev);
1272 struct ep93xx_dma_engine *edma;
1273 struct dma_device *dma_dev;
1274 size_t edma_size;
1275 int ret, i;
1276
1277 edma_size = pdata->num_channels * sizeof(struct ep93xx_dma_chan);
1278 edma = kzalloc(sizeof(*edma) + edma_size, GFP_KERNEL);
1279 if (!edma)
1280 return -ENOMEM;
1281
1282 dma_dev = &edma->dma_dev;
1283 edma->m2m = platform_get_device_id(pdev)->driver_data;
1284 edma->num_channels = pdata->num_channels;
1285
1286 INIT_LIST_HEAD(&dma_dev->channels);
1287 for (i = 0; i < pdata->num_channels; i++) {
1288 const struct ep93xx_dma_chan_data *cdata = &pdata->channels[i];
1289 struct ep93xx_dma_chan *edmac = &edma->channels[i];
1290
1291 edmac->chan.device = dma_dev;
1292 edmac->regs = cdata->base;
1293 edmac->irq = cdata->irq;
1294 edmac->edma = edma;
1295
1296 edmac->clk = clk_get(NULL, cdata->name);
1297 if (IS_ERR(edmac->clk)) {
1298 dev_warn(&pdev->dev, "failed to get clock for %s\n",
1299 cdata->name);
1300 continue;
1301 }
1302
1303 spin_lock_init(&edmac->lock);
1304 INIT_LIST_HEAD(&edmac->active);
1305 INIT_LIST_HEAD(&edmac->queue);
1306 INIT_LIST_HEAD(&edmac->free_list);
1307 tasklet_init(&edmac->tasklet, ep93xx_dma_tasklet,
1308 (unsigned long)edmac);
1309
1310 list_add_tail(&edmac->chan.device_node,
1311 &dma_dev->channels);
1312 }
1313
1314 dma_cap_zero(dma_dev->cap_mask);
1315 dma_cap_set(DMA_SLAVE, dma_dev->cap_mask);
1316 dma_cap_set(DMA_CYCLIC, dma_dev->cap_mask);
1317
1318 dma_dev->dev = &pdev->dev;
1319 dma_dev->device_alloc_chan_resources = ep93xx_dma_alloc_chan_resources;
1320 dma_dev->device_free_chan_resources = ep93xx_dma_free_chan_resources;
1321 dma_dev->device_prep_slave_sg = ep93xx_dma_prep_slave_sg;
1322 dma_dev->device_prep_dma_cyclic = ep93xx_dma_prep_dma_cyclic;
1323 dma_dev->device_control = ep93xx_dma_control;
1324 dma_dev->device_issue_pending = ep93xx_dma_issue_pending;
1325 dma_dev->device_tx_status = ep93xx_dma_tx_status;
1326
1327 dma_set_max_seg_size(dma_dev->dev, DMA_MAX_CHAN_BYTES);
1328
1329 if (edma->m2m) {
1330 dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask);
1331 dma_dev->device_prep_dma_memcpy = ep93xx_dma_prep_dma_memcpy;
1332
1333 edma->hw_setup = m2m_hw_setup;
1334 edma->hw_shutdown = m2m_hw_shutdown;
1335 edma->hw_submit = m2m_hw_submit;
1336 edma->hw_interrupt = m2m_hw_interrupt;
1337 } else {
1338 dma_cap_set(DMA_PRIVATE, dma_dev->cap_mask);
1339
1340 edma->hw_setup = m2p_hw_setup;
1341 edma->hw_shutdown = m2p_hw_shutdown;
1342 edma->hw_submit = m2p_hw_submit;
1343 edma->hw_interrupt = m2p_hw_interrupt;
1344 }
1345
1346 ret = dma_async_device_register(dma_dev);
1347 if (unlikely(ret)) {
1348 for (i = 0; i < edma->num_channels; i++) {
1349 struct ep93xx_dma_chan *edmac = &edma->channels[i];
1350 if (!IS_ERR_OR_NULL(edmac->clk))
1351 clk_put(edmac->clk);
1352 }
1353 kfree(edma);
1354 } else {
1355 dev_info(dma_dev->dev, "EP93xx M2%s DMA ready\n",
1356 edma->m2m ? "M" : "P");
1357 }
1358
1359 return ret;
1360}
1361
1362static struct platform_device_id ep93xx_dma_driver_ids[] = {
1363 { "ep93xx-dma-m2p", 0 },
1364 { "ep93xx-dma-m2m", 1 },
1365 { },
1366};
1367
1368static struct platform_driver ep93xx_dma_driver = {
1369 .driver = {
1370 .name = "ep93xx-dma",
1371 },
1372 .id_table = ep93xx_dma_driver_ids,
1373};
1374
1375static int __init ep93xx_dma_module_init(void)
1376{
1377 return platform_driver_probe(&ep93xx_dma_driver, ep93xx_dma_probe);
1378}
1379subsys_initcall(ep93xx_dma_module_init);
1380
1381MODULE_AUTHOR("Mika Westerberg <mika.westerberg@iki.fi>");
1382MODULE_DESCRIPTION("EP93xx DMA driver");
1383MODULE_LICENSE("GPL");
1384