1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21#include <linux/clk.h>
22#include <linux/init.h>
23#include <linux/interrupt.h>
24#include <linux/dmaengine.h>
25#include <linux/module.h>
26#include <linux/platform_device.h>
27#include <linux/slab.h>
28
29#include <mach/dma.h>
30
31#include "dmaengine.h"
32
33
34#define M2P_CONTROL 0x0000
35#define M2P_CONTROL_STALLINT BIT(0)
36#define M2P_CONTROL_NFBINT BIT(1)
37#define M2P_CONTROL_CH_ERROR_INT BIT(3)
38#define M2P_CONTROL_ENABLE BIT(4)
39#define M2P_CONTROL_ICE BIT(6)
40
41#define M2P_INTERRUPT 0x0004
42#define M2P_INTERRUPT_STALL BIT(0)
43#define M2P_INTERRUPT_NFB BIT(1)
44#define M2P_INTERRUPT_ERROR BIT(3)
45
46#define M2P_PPALLOC 0x0008
47#define M2P_STATUS 0x000c
48
49#define M2P_MAXCNT0 0x0020
50#define M2P_BASE0 0x0024
51#define M2P_MAXCNT1 0x0030
52#define M2P_BASE1 0x0034
53
54#define M2P_STATE_IDLE 0
55#define M2P_STATE_STALL 1
56#define M2P_STATE_ON 2
57#define M2P_STATE_NEXT 3
58
59
60#define M2M_CONTROL 0x0000
61#define M2M_CONTROL_DONEINT BIT(2)
62#define M2M_CONTROL_ENABLE BIT(3)
63#define M2M_CONTROL_START BIT(4)
64#define M2M_CONTROL_DAH BIT(11)
65#define M2M_CONTROL_SAH BIT(12)
66#define M2M_CONTROL_PW_SHIFT 9
67#define M2M_CONTROL_PW_8 (0 << M2M_CONTROL_PW_SHIFT)
68#define M2M_CONTROL_PW_16 (1 << M2M_CONTROL_PW_SHIFT)
69#define M2M_CONTROL_PW_32 (2 << M2M_CONTROL_PW_SHIFT)
70#define M2M_CONTROL_PW_MASK (3 << M2M_CONTROL_PW_SHIFT)
71#define M2M_CONTROL_TM_SHIFT 13
72#define M2M_CONTROL_TM_TX (1 << M2M_CONTROL_TM_SHIFT)
73#define M2M_CONTROL_TM_RX (2 << M2M_CONTROL_TM_SHIFT)
74#define M2M_CONTROL_NFBINT BIT(21)
75#define M2M_CONTROL_RSS_SHIFT 22
76#define M2M_CONTROL_RSS_SSPRX (1 << M2M_CONTROL_RSS_SHIFT)
77#define M2M_CONTROL_RSS_SSPTX (2 << M2M_CONTROL_RSS_SHIFT)
78#define M2M_CONTROL_RSS_IDE (3 << M2M_CONTROL_RSS_SHIFT)
79#define M2M_CONTROL_NO_HDSK BIT(24)
80#define M2M_CONTROL_PWSC_SHIFT 25
81
82#define M2M_INTERRUPT 0x0004
83#define M2M_INTERRUPT_MASK 6
84
85#define M2M_STATUS 0x000c
86#define M2M_STATUS_CTL_SHIFT 1
87#define M2M_STATUS_CTL_IDLE (0 << M2M_STATUS_CTL_SHIFT)
88#define M2M_STATUS_CTL_STALL (1 << M2M_STATUS_CTL_SHIFT)
89#define M2M_STATUS_CTL_MEMRD (2 << M2M_STATUS_CTL_SHIFT)
90#define M2M_STATUS_CTL_MEMWR (3 << M2M_STATUS_CTL_SHIFT)
91#define M2M_STATUS_CTL_BWCWAIT (4 << M2M_STATUS_CTL_SHIFT)
92#define M2M_STATUS_CTL_MASK (7 << M2M_STATUS_CTL_SHIFT)
93#define M2M_STATUS_BUF_SHIFT 4
94#define M2M_STATUS_BUF_NO (0 << M2M_STATUS_BUF_SHIFT)
95#define M2M_STATUS_BUF_ON (1 << M2M_STATUS_BUF_SHIFT)
96#define M2M_STATUS_BUF_NEXT (2 << M2M_STATUS_BUF_SHIFT)
97#define M2M_STATUS_BUF_MASK (3 << M2M_STATUS_BUF_SHIFT)
98#define M2M_STATUS_DONE BIT(6)
99
100#define M2M_BCR0 0x0010
101#define M2M_BCR1 0x0014
102#define M2M_SAR_BASE0 0x0018
103#define M2M_SAR_BASE1 0x001c
104#define M2M_DAR_BASE0 0x002c
105#define M2M_DAR_BASE1 0x0030
106
107#define DMA_MAX_CHAN_BYTES 0xffff
108#define DMA_MAX_CHAN_DESCRIPTORS 32
109
110struct ep93xx_dma_engine;
111
112
113
114
115
116
117
118
119
120
121
122struct ep93xx_dma_desc {
123 u32 src_addr;
124 u32 dst_addr;
125 size_t size;
126 bool complete;
127 struct dma_async_tx_descriptor txd;
128 struct list_head tx_list;
129 struct list_head node;
130};
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163struct ep93xx_dma_chan {
164 struct dma_chan chan;
165 const struct ep93xx_dma_engine *edma;
166 void __iomem *regs;
167 int irq;
168 struct clk *clk;
169 struct tasklet_struct tasklet;
170
171 spinlock_t lock;
172 unsigned long flags;
173
174#define EP93XX_DMA_IS_CYCLIC 0
175
176 int buffer;
177 struct list_head active;
178 struct list_head queue;
179 struct list_head free_list;
180 u32 runtime_addr;
181 u32 runtime_ctrl;
182};
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200struct ep93xx_dma_engine {
201 struct dma_device dma_dev;
202 bool m2m;
203 int (*hw_setup)(struct ep93xx_dma_chan *);
204 void (*hw_shutdown)(struct ep93xx_dma_chan *);
205 void (*hw_submit)(struct ep93xx_dma_chan *);
206 int (*hw_interrupt)(struct ep93xx_dma_chan *);
207#define INTERRUPT_UNKNOWN 0
208#define INTERRUPT_DONE 1
209#define INTERRUPT_NEXT_BUFFER 2
210
211 size_t num_channels;
212 struct ep93xx_dma_chan channels[];
213};
214
215static inline struct device *chan2dev(struct ep93xx_dma_chan *edmac)
216{
217 return &edmac->chan.dev->device;
218}
219
220static struct ep93xx_dma_chan *to_ep93xx_dma_chan(struct dma_chan *chan)
221{
222 return container_of(chan, struct ep93xx_dma_chan, chan);
223}
224
225
226
227
228
229
230
231
232
233
234
235
236static void ep93xx_dma_set_active(struct ep93xx_dma_chan *edmac,
237 struct ep93xx_dma_desc *desc)
238{
239 BUG_ON(!list_empty(&edmac->active));
240
241 list_add_tail(&desc->node, &edmac->active);
242
243
244 while (!list_empty(&desc->tx_list)) {
245 struct ep93xx_dma_desc *d = list_first_entry(&desc->tx_list,
246 struct ep93xx_dma_desc, node);
247
248
249
250
251
252
253
254 d->txd.callback = desc->txd.callback;
255 d->txd.callback_param = desc->txd.callback_param;
256
257 list_move_tail(&d->node, &edmac->active);
258 }
259}
260
261
262static struct ep93xx_dma_desc *
263ep93xx_dma_get_active(struct ep93xx_dma_chan *edmac)
264{
265 if (list_empty(&edmac->active))
266 return NULL;
267
268 return list_first_entry(&edmac->active, struct ep93xx_dma_desc, node);
269}
270
271
272
273
274
275
276
277
278
279
280
281
282
283static bool ep93xx_dma_advance_active(struct ep93xx_dma_chan *edmac)
284{
285 struct ep93xx_dma_desc *desc;
286
287 list_rotate_left(&edmac->active);
288
289 if (test_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags))
290 return true;
291
292 desc = ep93xx_dma_get_active(edmac);
293 if (!desc)
294 return false;
295
296
297
298
299
300 return !desc->txd.cookie;
301}
302
303
304
305
306
307static void m2p_set_control(struct ep93xx_dma_chan *edmac, u32 control)
308{
309 writel(control, edmac->regs + M2P_CONTROL);
310
311
312
313
314 readl(edmac->regs + M2P_CONTROL);
315}
316
317static int m2p_hw_setup(struct ep93xx_dma_chan *edmac)
318{
319 struct ep93xx_dma_data *data = edmac->chan.private;
320 u32 control;
321
322 writel(data->port & 0xf, edmac->regs + M2P_PPALLOC);
323
324 control = M2P_CONTROL_CH_ERROR_INT | M2P_CONTROL_ICE
325 | M2P_CONTROL_ENABLE;
326 m2p_set_control(edmac, control);
327
328 return 0;
329}
330
331static inline u32 m2p_channel_state(struct ep93xx_dma_chan *edmac)
332{
333 return (readl(edmac->regs + M2P_STATUS) >> 4) & 0x3;
334}
335
336static void m2p_hw_shutdown(struct ep93xx_dma_chan *edmac)
337{
338 u32 control;
339
340 control = readl(edmac->regs + M2P_CONTROL);
341 control &= ~(M2P_CONTROL_STALLINT | M2P_CONTROL_NFBINT);
342 m2p_set_control(edmac, control);
343
344 while (m2p_channel_state(edmac) >= M2P_STATE_ON)
345 cpu_relax();
346
347 m2p_set_control(edmac, 0);
348
349 while (m2p_channel_state(edmac) == M2P_STATE_STALL)
350 cpu_relax();
351}
352
353static void m2p_fill_desc(struct ep93xx_dma_chan *edmac)
354{
355 struct ep93xx_dma_desc *desc;
356 u32 bus_addr;
357
358 desc = ep93xx_dma_get_active(edmac);
359 if (!desc) {
360 dev_warn(chan2dev(edmac), "M2P: empty descriptor list\n");
361 return;
362 }
363
364 if (ep93xx_dma_chan_direction(&edmac->chan) == DMA_MEM_TO_DEV)
365 bus_addr = desc->src_addr;
366 else
367 bus_addr = desc->dst_addr;
368
369 if (edmac->buffer == 0) {
370 writel(desc->size, edmac->regs + M2P_MAXCNT0);
371 writel(bus_addr, edmac->regs + M2P_BASE0);
372 } else {
373 writel(desc->size, edmac->regs + M2P_MAXCNT1);
374 writel(bus_addr, edmac->regs + M2P_BASE1);
375 }
376
377 edmac->buffer ^= 1;
378}
379
380static void m2p_hw_submit(struct ep93xx_dma_chan *edmac)
381{
382 u32 control = readl(edmac->regs + M2P_CONTROL);
383
384 m2p_fill_desc(edmac);
385 control |= M2P_CONTROL_STALLINT;
386
387 if (ep93xx_dma_advance_active(edmac)) {
388 m2p_fill_desc(edmac);
389 control |= M2P_CONTROL_NFBINT;
390 }
391
392 m2p_set_control(edmac, control);
393}
394
395static int m2p_hw_interrupt(struct ep93xx_dma_chan *edmac)
396{
397 u32 irq_status = readl(edmac->regs + M2P_INTERRUPT);
398 u32 control;
399
400 if (irq_status & M2P_INTERRUPT_ERROR) {
401 struct ep93xx_dma_desc *desc = ep93xx_dma_get_active(edmac);
402
403
404 writel(1, edmac->regs + M2P_INTERRUPT);
405
406
407
408
409
410
411
412
413
414 dev_err(chan2dev(edmac),
415 "DMA transfer failed! Details:\n"
416 "\tcookie : %d\n"
417 "\tsrc_addr : 0x%08x\n"
418 "\tdst_addr : 0x%08x\n"
419 "\tsize : %zu\n",
420 desc->txd.cookie, desc->src_addr, desc->dst_addr,
421 desc->size);
422 }
423
424 switch (irq_status & (M2P_INTERRUPT_STALL | M2P_INTERRUPT_NFB)) {
425 case M2P_INTERRUPT_STALL:
426
427 control = readl(edmac->regs + M2P_CONTROL);
428 control &= ~(M2P_CONTROL_STALLINT | M2P_CONTROL_NFBINT);
429 m2p_set_control(edmac, control);
430
431 return INTERRUPT_DONE;
432
433 case M2P_INTERRUPT_NFB:
434 if (ep93xx_dma_advance_active(edmac))
435 m2p_fill_desc(edmac);
436
437 return INTERRUPT_NEXT_BUFFER;
438 }
439
440 return INTERRUPT_UNKNOWN;
441}
442
443
444
445
446
447static int m2m_hw_setup(struct ep93xx_dma_chan *edmac)
448{
449 const struct ep93xx_dma_data *data = edmac->chan.private;
450 u32 control = 0;
451
452 if (!data) {
453
454 writel(control, edmac->regs + M2M_CONTROL);
455 return 0;
456 }
457
458 switch (data->port) {
459 case EP93XX_DMA_SSP:
460
461
462
463
464
465 control = (5 << M2M_CONTROL_PWSC_SHIFT);
466 control |= M2M_CONTROL_NO_HDSK;
467
468 if (data->direction == DMA_MEM_TO_DEV) {
469 control |= M2M_CONTROL_DAH;
470 control |= M2M_CONTROL_TM_TX;
471 control |= M2M_CONTROL_RSS_SSPTX;
472 } else {
473 control |= M2M_CONTROL_SAH;
474 control |= M2M_CONTROL_TM_RX;
475 control |= M2M_CONTROL_RSS_SSPRX;
476 }
477 break;
478
479 case EP93XX_DMA_IDE:
480
481
482
483
484 if (data->direction == DMA_MEM_TO_DEV) {
485
486 control = (3 << M2M_CONTROL_PWSC_SHIFT);
487 control |= M2M_CONTROL_DAH;
488 control |= M2M_CONTROL_TM_TX;
489 } else {
490 control = (2 << M2M_CONTROL_PWSC_SHIFT);
491 control |= M2M_CONTROL_SAH;
492 control |= M2M_CONTROL_TM_RX;
493 }
494
495 control |= M2M_CONTROL_NO_HDSK;
496 control |= M2M_CONTROL_RSS_IDE;
497 control |= M2M_CONTROL_PW_16;
498 break;
499
500 default:
501 return -EINVAL;
502 }
503
504 writel(control, edmac->regs + M2M_CONTROL);
505 return 0;
506}
507
508static void m2m_hw_shutdown(struct ep93xx_dma_chan *edmac)
509{
510
511 writel(0, edmac->regs + M2M_CONTROL);
512}
513
514static void m2m_fill_desc(struct ep93xx_dma_chan *edmac)
515{
516 struct ep93xx_dma_desc *desc;
517
518 desc = ep93xx_dma_get_active(edmac);
519 if (!desc) {
520 dev_warn(chan2dev(edmac), "M2M: empty descriptor list\n");
521 return;
522 }
523
524 if (edmac->buffer == 0) {
525 writel(desc->src_addr, edmac->regs + M2M_SAR_BASE0);
526 writel(desc->dst_addr, edmac->regs + M2M_DAR_BASE0);
527 writel(desc->size, edmac->regs + M2M_BCR0);
528 } else {
529 writel(desc->src_addr, edmac->regs + M2M_SAR_BASE1);
530 writel(desc->dst_addr, edmac->regs + M2M_DAR_BASE1);
531 writel(desc->size, edmac->regs + M2M_BCR1);
532 }
533
534 edmac->buffer ^= 1;
535}
536
537static void m2m_hw_submit(struct ep93xx_dma_chan *edmac)
538{
539 struct ep93xx_dma_data *data = edmac->chan.private;
540 u32 control = readl(edmac->regs + M2M_CONTROL);
541
542
543
544
545
546
547 control &= ~M2M_CONTROL_PW_MASK;
548 control |= edmac->runtime_ctrl;
549
550 m2m_fill_desc(edmac);
551 control |= M2M_CONTROL_DONEINT;
552
553 if (ep93xx_dma_advance_active(edmac)) {
554 m2m_fill_desc(edmac);
555 control |= M2M_CONTROL_NFBINT;
556 }
557
558
559
560
561
562 control |= M2M_CONTROL_ENABLE;
563 writel(control, edmac->regs + M2M_CONTROL);
564
565 if (!data) {
566
567
568
569
570 control |= M2M_CONTROL_START;
571 writel(control, edmac->regs + M2M_CONTROL);
572 }
573}
574
575
576
577
578
579
580
581
582
583
584
585static int m2m_hw_interrupt(struct ep93xx_dma_chan *edmac)
586{
587 u32 status = readl(edmac->regs + M2M_STATUS);
588 u32 ctl_fsm = status & M2M_STATUS_CTL_MASK;
589 u32 buf_fsm = status & M2M_STATUS_BUF_MASK;
590 bool done = status & M2M_STATUS_DONE;
591 bool last_done;
592 u32 control;
593 struct ep93xx_dma_desc *desc;
594
595
596 if (!(readl(edmac->regs + M2M_INTERRUPT) & M2M_INTERRUPT_MASK))
597 return INTERRUPT_UNKNOWN;
598
599 if (done) {
600
601 writel(0, edmac->regs + M2M_INTERRUPT);
602 }
603
604
605
606
607
608 desc = ep93xx_dma_get_active(edmac);
609 last_done = !desc || desc->txd.cookie;
610
611
612
613
614
615
616 if (!last_done &&
617 (buf_fsm == M2M_STATUS_BUF_NO ||
618 buf_fsm == M2M_STATUS_BUF_ON)) {
619
620
621
622
623
624
625 if (ep93xx_dma_advance_active(edmac)) {
626 m2m_fill_desc(edmac);
627 if (done && !edmac->chan.private) {
628
629 control = readl(edmac->regs + M2M_CONTROL);
630 control |= M2M_CONTROL_START;
631 writel(control, edmac->regs + M2M_CONTROL);
632 }
633 return INTERRUPT_NEXT_BUFFER;
634 } else {
635 last_done = true;
636 }
637 }
638
639
640
641
642
643 if (last_done &&
644 buf_fsm == M2M_STATUS_BUF_NO &&
645 ctl_fsm == M2M_STATUS_CTL_STALL) {
646
647 control = readl(edmac->regs + M2M_CONTROL);
648 control &= ~(M2M_CONTROL_DONEINT | M2M_CONTROL_NFBINT
649 | M2M_CONTROL_ENABLE);
650 writel(control, edmac->regs + M2M_CONTROL);
651 return INTERRUPT_DONE;
652 }
653
654
655
656
657 return INTERRUPT_NEXT_BUFFER;
658}
659
660
661
662
663
664static struct ep93xx_dma_desc *
665ep93xx_dma_desc_get(struct ep93xx_dma_chan *edmac)
666{
667 struct ep93xx_dma_desc *desc, *_desc;
668 struct ep93xx_dma_desc *ret = NULL;
669 unsigned long flags;
670
671 spin_lock_irqsave(&edmac->lock, flags);
672 list_for_each_entry_safe(desc, _desc, &edmac->free_list, node) {
673 if (async_tx_test_ack(&desc->txd)) {
674 list_del_init(&desc->node);
675
676
677 desc->src_addr = 0;
678 desc->dst_addr = 0;
679 desc->size = 0;
680 desc->complete = false;
681 desc->txd.cookie = 0;
682 desc->txd.callback = NULL;
683 desc->txd.callback_param = NULL;
684
685 ret = desc;
686 break;
687 }
688 }
689 spin_unlock_irqrestore(&edmac->lock, flags);
690 return ret;
691}
692
693static void ep93xx_dma_desc_put(struct ep93xx_dma_chan *edmac,
694 struct ep93xx_dma_desc *desc)
695{
696 if (desc) {
697 unsigned long flags;
698
699 spin_lock_irqsave(&edmac->lock, flags);
700 list_splice_init(&desc->tx_list, &edmac->free_list);
701 list_add(&desc->node, &edmac->free_list);
702 spin_unlock_irqrestore(&edmac->lock, flags);
703 }
704}
705
706
707
708
709
710
711
712
713
714static void ep93xx_dma_advance_work(struct ep93xx_dma_chan *edmac)
715{
716 struct ep93xx_dma_desc *new;
717 unsigned long flags;
718
719 spin_lock_irqsave(&edmac->lock, flags);
720 if (!list_empty(&edmac->active) || list_empty(&edmac->queue)) {
721 spin_unlock_irqrestore(&edmac->lock, flags);
722 return;
723 }
724
725
726 new = list_first_entry(&edmac->queue, struct ep93xx_dma_desc, node);
727 list_del_init(&new->node);
728
729 ep93xx_dma_set_active(edmac, new);
730
731
732 edmac->edma->hw_submit(edmac);
733 spin_unlock_irqrestore(&edmac->lock, flags);
734}
735
736static void ep93xx_dma_unmap_buffers(struct ep93xx_dma_desc *desc)
737{
738 struct device *dev = desc->txd.chan->device->dev;
739
740 if (!(desc->txd.flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
741 if (desc->txd.flags & DMA_COMPL_SRC_UNMAP_SINGLE)
742 dma_unmap_single(dev, desc->src_addr, desc->size,
743 DMA_TO_DEVICE);
744 else
745 dma_unmap_page(dev, desc->src_addr, desc->size,
746 DMA_TO_DEVICE);
747 }
748 if (!(desc->txd.flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
749 if (desc->txd.flags & DMA_COMPL_DEST_UNMAP_SINGLE)
750 dma_unmap_single(dev, desc->dst_addr, desc->size,
751 DMA_FROM_DEVICE);
752 else
753 dma_unmap_page(dev, desc->dst_addr, desc->size,
754 DMA_FROM_DEVICE);
755 }
756}
757
758static void ep93xx_dma_tasklet(unsigned long data)
759{
760 struct ep93xx_dma_chan *edmac = (struct ep93xx_dma_chan *)data;
761 struct ep93xx_dma_desc *desc, *d;
762 dma_async_tx_callback callback = NULL;
763 void *callback_param = NULL;
764 LIST_HEAD(list);
765
766 spin_lock_irq(&edmac->lock);
767
768
769
770
771
772 desc = ep93xx_dma_get_active(edmac);
773 if (desc) {
774 if (desc->complete) {
775
776 if (!test_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags))
777 dma_cookie_complete(&desc->txd);
778 list_splice_init(&edmac->active, &list);
779 }
780 callback = desc->txd.callback;
781 callback_param = desc->txd.callback_param;
782 }
783 spin_unlock_irq(&edmac->lock);
784
785
786 ep93xx_dma_advance_work(edmac);
787
788
789 list_for_each_entry_safe(desc, d, &list, node) {
790
791
792
793
794 if (!edmac->chan.private)
795 ep93xx_dma_unmap_buffers(desc);
796
797 ep93xx_dma_desc_put(edmac, desc);
798 }
799
800 if (callback)
801 callback(callback_param);
802}
803
804static irqreturn_t ep93xx_dma_interrupt(int irq, void *dev_id)
805{
806 struct ep93xx_dma_chan *edmac = dev_id;
807 struct ep93xx_dma_desc *desc;
808 irqreturn_t ret = IRQ_HANDLED;
809
810 spin_lock(&edmac->lock);
811
812 desc = ep93xx_dma_get_active(edmac);
813 if (!desc) {
814 dev_warn(chan2dev(edmac),
815 "got interrupt while active list is empty\n");
816 spin_unlock(&edmac->lock);
817 return IRQ_NONE;
818 }
819
820 switch (edmac->edma->hw_interrupt(edmac)) {
821 case INTERRUPT_DONE:
822 desc->complete = true;
823 tasklet_schedule(&edmac->tasklet);
824 break;
825
826 case INTERRUPT_NEXT_BUFFER:
827 if (test_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags))
828 tasklet_schedule(&edmac->tasklet);
829 break;
830
831 default:
832 dev_warn(chan2dev(edmac), "unknown interrupt!\n");
833 ret = IRQ_NONE;
834 break;
835 }
836
837 spin_unlock(&edmac->lock);
838 return ret;
839}
840
841
842
843
844
845
846
847
848
849static dma_cookie_t ep93xx_dma_tx_submit(struct dma_async_tx_descriptor *tx)
850{
851 struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(tx->chan);
852 struct ep93xx_dma_desc *desc;
853 dma_cookie_t cookie;
854 unsigned long flags;
855
856 spin_lock_irqsave(&edmac->lock, flags);
857 cookie = dma_cookie_assign(tx);
858
859 desc = container_of(tx, struct ep93xx_dma_desc, txd);
860
861
862
863
864
865
866 if (list_empty(&edmac->active)) {
867 ep93xx_dma_set_active(edmac, desc);
868 edmac->edma->hw_submit(edmac);
869 } else {
870 list_add_tail(&desc->node, &edmac->queue);
871 }
872
873 spin_unlock_irqrestore(&edmac->lock, flags);
874 return cookie;
875}
876
877
878
879
880
881
882
883
884
885static int ep93xx_dma_alloc_chan_resources(struct dma_chan *chan)
886{
887 struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
888 struct ep93xx_dma_data *data = chan->private;
889 const char *name = dma_chan_name(chan);
890 int ret, i;
891
892
893 if (!edmac->edma->m2m) {
894 if (!data)
895 return -EINVAL;
896 if (data->port < EP93XX_DMA_I2S1 ||
897 data->port > EP93XX_DMA_IRDA)
898 return -EINVAL;
899 if (data->direction != ep93xx_dma_chan_direction(chan))
900 return -EINVAL;
901 } else {
902 if (data) {
903 switch (data->port) {
904 case EP93XX_DMA_SSP:
905 case EP93XX_DMA_IDE:
906 if (data->direction != DMA_MEM_TO_DEV &&
907 data->direction != DMA_DEV_TO_MEM)
908 return -EINVAL;
909 break;
910 default:
911 return -EINVAL;
912 }
913 }
914 }
915
916 if (data && data->name)
917 name = data->name;
918
919 ret = clk_enable(edmac->clk);
920 if (ret)
921 return ret;
922
923 ret = request_irq(edmac->irq, ep93xx_dma_interrupt, 0, name, edmac);
924 if (ret)
925 goto fail_clk_disable;
926
927 spin_lock_irq(&edmac->lock);
928 dma_cookie_init(&edmac->chan);
929 ret = edmac->edma->hw_setup(edmac);
930 spin_unlock_irq(&edmac->lock);
931
932 if (ret)
933 goto fail_free_irq;
934
935 for (i = 0; i < DMA_MAX_CHAN_DESCRIPTORS; i++) {
936 struct ep93xx_dma_desc *desc;
937
938 desc = kzalloc(sizeof(*desc), GFP_KERNEL);
939 if (!desc) {
940 dev_warn(chan2dev(edmac), "not enough descriptors\n");
941 break;
942 }
943
944 INIT_LIST_HEAD(&desc->tx_list);
945
946 dma_async_tx_descriptor_init(&desc->txd, chan);
947 desc->txd.flags = DMA_CTRL_ACK;
948 desc->txd.tx_submit = ep93xx_dma_tx_submit;
949
950 ep93xx_dma_desc_put(edmac, desc);
951 }
952
953 return i;
954
955fail_free_irq:
956 free_irq(edmac->irq, edmac);
957fail_clk_disable:
958 clk_disable(edmac->clk);
959
960 return ret;
961}
962
963
964
965
966
967
968
969
970static void ep93xx_dma_free_chan_resources(struct dma_chan *chan)
971{
972 struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
973 struct ep93xx_dma_desc *desc, *d;
974 unsigned long flags;
975 LIST_HEAD(list);
976
977 BUG_ON(!list_empty(&edmac->active));
978 BUG_ON(!list_empty(&edmac->queue));
979
980 spin_lock_irqsave(&edmac->lock, flags);
981 edmac->edma->hw_shutdown(edmac);
982 edmac->runtime_addr = 0;
983 edmac->runtime_ctrl = 0;
984 edmac->buffer = 0;
985 list_splice_init(&edmac->free_list, &list);
986 spin_unlock_irqrestore(&edmac->lock, flags);
987
988 list_for_each_entry_safe(desc, d, &list, node)
989 kfree(desc);
990
991 clk_disable(edmac->clk);
992 free_irq(edmac->irq, edmac);
993}
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005static struct dma_async_tx_descriptor *
1006ep93xx_dma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest,
1007 dma_addr_t src, size_t len, unsigned long flags)
1008{
1009 struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
1010 struct ep93xx_dma_desc *desc, *first;
1011 size_t bytes, offset;
1012
1013 first = NULL;
1014 for (offset = 0; offset < len; offset += bytes) {
1015 desc = ep93xx_dma_desc_get(edmac);
1016 if (!desc) {
1017 dev_warn(chan2dev(edmac), "couln't get descriptor\n");
1018 goto fail;
1019 }
1020
1021 bytes = min_t(size_t, len - offset, DMA_MAX_CHAN_BYTES);
1022
1023 desc->src_addr = src + offset;
1024 desc->dst_addr = dest + offset;
1025 desc->size = bytes;
1026
1027 if (!first)
1028 first = desc;
1029 else
1030 list_add_tail(&desc->node, &first->tx_list);
1031 }
1032
1033 first->txd.cookie = -EBUSY;
1034 first->txd.flags = flags;
1035
1036 return &first->txd;
1037fail:
1038 ep93xx_dma_desc_put(edmac, first);
1039 return NULL;
1040}
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053static struct dma_async_tx_descriptor *
1054ep93xx_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
1055 unsigned int sg_len, enum dma_transfer_direction dir,
1056 unsigned long flags, void *context)
1057{
1058 struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
1059 struct ep93xx_dma_desc *desc, *first;
1060 struct scatterlist *sg;
1061 int i;
1062
1063 if (!edmac->edma->m2m && dir != ep93xx_dma_chan_direction(chan)) {
1064 dev_warn(chan2dev(edmac),
1065 "channel was configured with different direction\n");
1066 return NULL;
1067 }
1068
1069 if (test_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags)) {
1070 dev_warn(chan2dev(edmac),
1071 "channel is already used for cyclic transfers\n");
1072 return NULL;
1073 }
1074
1075 first = NULL;
1076 for_each_sg(sgl, sg, sg_len, i) {
1077 size_t sg_len = sg_dma_len(sg);
1078
1079 if (sg_len > DMA_MAX_CHAN_BYTES) {
1080 dev_warn(chan2dev(edmac), "too big transfer size %d\n",
1081 sg_len);
1082 goto fail;
1083 }
1084
1085 desc = ep93xx_dma_desc_get(edmac);
1086 if (!desc) {
1087 dev_warn(chan2dev(edmac), "couln't get descriptor\n");
1088 goto fail;
1089 }
1090
1091 if (dir == DMA_MEM_TO_DEV) {
1092 desc->src_addr = sg_dma_address(sg);
1093 desc->dst_addr = edmac->runtime_addr;
1094 } else {
1095 desc->src_addr = edmac->runtime_addr;
1096 desc->dst_addr = sg_dma_address(sg);
1097 }
1098 desc->size = sg_len;
1099
1100 if (!first)
1101 first = desc;
1102 else
1103 list_add_tail(&desc->node, &first->tx_list);
1104 }
1105
1106 first->txd.cookie = -EBUSY;
1107 first->txd.flags = flags;
1108
1109 return &first->txd;
1110
1111fail:
1112 ep93xx_dma_desc_put(edmac, first);
1113 return NULL;
1114}
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133static struct dma_async_tx_descriptor *
1134ep93xx_dma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t dma_addr,
1135 size_t buf_len, size_t period_len,
1136 enum dma_transfer_direction dir, void *context)
1137{
1138 struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
1139 struct ep93xx_dma_desc *desc, *first;
1140 size_t offset = 0;
1141
1142 if (!edmac->edma->m2m && dir != ep93xx_dma_chan_direction(chan)) {
1143 dev_warn(chan2dev(edmac),
1144 "channel was configured with different direction\n");
1145 return NULL;
1146 }
1147
1148 if (test_and_set_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags)) {
1149 dev_warn(chan2dev(edmac),
1150 "channel is already used for cyclic transfers\n");
1151 return NULL;
1152 }
1153
1154 if (period_len > DMA_MAX_CHAN_BYTES) {
1155 dev_warn(chan2dev(edmac), "too big period length %d\n",
1156 period_len);
1157 return NULL;
1158 }
1159
1160
1161 first = NULL;
1162 for (offset = 0; offset < buf_len; offset += period_len) {
1163 desc = ep93xx_dma_desc_get(edmac);
1164 if (!desc) {
1165 dev_warn(chan2dev(edmac), "couln't get descriptor\n");
1166 goto fail;
1167 }
1168
1169 if (dir == DMA_MEM_TO_DEV) {
1170 desc->src_addr = dma_addr + offset;
1171 desc->dst_addr = edmac->runtime_addr;
1172 } else {
1173 desc->src_addr = edmac->runtime_addr;
1174 desc->dst_addr = dma_addr + offset;
1175 }
1176
1177 desc->size = period_len;
1178
1179 if (!first)
1180 first = desc;
1181 else
1182 list_add_tail(&desc->node, &first->tx_list);
1183 }
1184
1185 first->txd.cookie = -EBUSY;
1186
1187 return &first->txd;
1188
1189fail:
1190 ep93xx_dma_desc_put(edmac, first);
1191 return NULL;
1192}
1193
1194
1195
1196
1197
1198
1199
1200
1201static int ep93xx_dma_terminate_all(struct ep93xx_dma_chan *edmac)
1202{
1203 struct ep93xx_dma_desc *desc, *_d;
1204 unsigned long flags;
1205 LIST_HEAD(list);
1206
1207 spin_lock_irqsave(&edmac->lock, flags);
1208
1209 edmac->edma->hw_shutdown(edmac);
1210 clear_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags);
1211 list_splice_init(&edmac->active, &list);
1212 list_splice_init(&edmac->queue, &list);
1213
1214
1215
1216
1217 edmac->edma->hw_setup(edmac);
1218 spin_unlock_irqrestore(&edmac->lock, flags);
1219
1220 list_for_each_entry_safe(desc, _d, &list, node)
1221 ep93xx_dma_desc_put(edmac, desc);
1222
1223 return 0;
1224}
1225
1226static int ep93xx_dma_slave_config(struct ep93xx_dma_chan *edmac,
1227 struct dma_slave_config *config)
1228{
1229 enum dma_slave_buswidth width;
1230 unsigned long flags;
1231 u32 addr, ctrl;
1232
1233 if (!edmac->edma->m2m)
1234 return -EINVAL;
1235
1236 switch (config->direction) {
1237 case DMA_DEV_TO_MEM:
1238 width = config->src_addr_width;
1239 addr = config->src_addr;
1240 break;
1241
1242 case DMA_MEM_TO_DEV:
1243 width = config->dst_addr_width;
1244 addr = config->dst_addr;
1245 break;
1246
1247 default:
1248 return -EINVAL;
1249 }
1250
1251 switch (width) {
1252 case DMA_SLAVE_BUSWIDTH_1_BYTE:
1253 ctrl = 0;
1254 break;
1255 case DMA_SLAVE_BUSWIDTH_2_BYTES:
1256 ctrl = M2M_CONTROL_PW_16;
1257 break;
1258 case DMA_SLAVE_BUSWIDTH_4_BYTES:
1259 ctrl = M2M_CONTROL_PW_32;
1260 break;
1261 default:
1262 return -EINVAL;
1263 }
1264
1265 spin_lock_irqsave(&edmac->lock, flags);
1266 edmac->runtime_addr = addr;
1267 edmac->runtime_ctrl = ctrl;
1268 spin_unlock_irqrestore(&edmac->lock, flags);
1269
1270 return 0;
1271}
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282static int ep93xx_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
1283 unsigned long arg)
1284{
1285 struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
1286 struct dma_slave_config *config;
1287
1288 switch (cmd) {
1289 case DMA_TERMINATE_ALL:
1290 return ep93xx_dma_terminate_all(edmac);
1291
1292 case DMA_SLAVE_CONFIG:
1293 config = (struct dma_slave_config *)arg;
1294 return ep93xx_dma_slave_config(edmac, config);
1295
1296 default:
1297 break;
1298 }
1299
1300 return -ENOSYS;
1301}
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311static enum dma_status ep93xx_dma_tx_status(struct dma_chan *chan,
1312 dma_cookie_t cookie,
1313 struct dma_tx_state *state)
1314{
1315 struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
1316 enum dma_status ret;
1317 unsigned long flags;
1318
1319 spin_lock_irqsave(&edmac->lock, flags);
1320 ret = dma_cookie_status(chan, cookie, state);
1321 spin_unlock_irqrestore(&edmac->lock, flags);
1322
1323 return ret;
1324}
1325
1326
1327
1328
1329
1330
1331
1332
1333static void ep93xx_dma_issue_pending(struct dma_chan *chan)
1334{
1335 ep93xx_dma_advance_work(to_ep93xx_dma_chan(chan));
1336}
1337
1338static int __init ep93xx_dma_probe(struct platform_device *pdev)
1339{
1340 struct ep93xx_dma_platform_data *pdata = dev_get_platdata(&pdev->dev);
1341 struct ep93xx_dma_engine *edma;
1342 struct dma_device *dma_dev;
1343 size_t edma_size;
1344 int ret, i;
1345
1346 edma_size = pdata->num_channels * sizeof(struct ep93xx_dma_chan);
1347 edma = kzalloc(sizeof(*edma) + edma_size, GFP_KERNEL);
1348 if (!edma)
1349 return -ENOMEM;
1350
1351 dma_dev = &edma->dma_dev;
1352 edma->m2m = platform_get_device_id(pdev)->driver_data;
1353 edma->num_channels = pdata->num_channels;
1354
1355 INIT_LIST_HEAD(&dma_dev->channels);
1356 for (i = 0; i < pdata->num_channels; i++) {
1357 const struct ep93xx_dma_chan_data *cdata = &pdata->channels[i];
1358 struct ep93xx_dma_chan *edmac = &edma->channels[i];
1359
1360 edmac->chan.device = dma_dev;
1361 edmac->regs = cdata->base;
1362 edmac->irq = cdata->irq;
1363 edmac->edma = edma;
1364
1365 edmac->clk = clk_get(NULL, cdata->name);
1366 if (IS_ERR(edmac->clk)) {
1367 dev_warn(&pdev->dev, "failed to get clock for %s\n",
1368 cdata->name);
1369 continue;
1370 }
1371
1372 spin_lock_init(&edmac->lock);
1373 INIT_LIST_HEAD(&edmac->active);
1374 INIT_LIST_HEAD(&edmac->queue);
1375 INIT_LIST_HEAD(&edmac->free_list);
1376 tasklet_init(&edmac->tasklet, ep93xx_dma_tasklet,
1377 (unsigned long)edmac);
1378
1379 list_add_tail(&edmac->chan.device_node,
1380 &dma_dev->channels);
1381 }
1382
1383 dma_cap_zero(dma_dev->cap_mask);
1384 dma_cap_set(DMA_SLAVE, dma_dev->cap_mask);
1385 dma_cap_set(DMA_CYCLIC, dma_dev->cap_mask);
1386
1387 dma_dev->dev = &pdev->dev;
1388 dma_dev->device_alloc_chan_resources = ep93xx_dma_alloc_chan_resources;
1389 dma_dev->device_free_chan_resources = ep93xx_dma_free_chan_resources;
1390 dma_dev->device_prep_slave_sg = ep93xx_dma_prep_slave_sg;
1391 dma_dev->device_prep_dma_cyclic = ep93xx_dma_prep_dma_cyclic;
1392 dma_dev->device_control = ep93xx_dma_control;
1393 dma_dev->device_issue_pending = ep93xx_dma_issue_pending;
1394 dma_dev->device_tx_status = ep93xx_dma_tx_status;
1395
1396 dma_set_max_seg_size(dma_dev->dev, DMA_MAX_CHAN_BYTES);
1397
1398 if (edma->m2m) {
1399 dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask);
1400 dma_dev->device_prep_dma_memcpy = ep93xx_dma_prep_dma_memcpy;
1401
1402 edma->hw_setup = m2m_hw_setup;
1403 edma->hw_shutdown = m2m_hw_shutdown;
1404 edma->hw_submit = m2m_hw_submit;
1405 edma->hw_interrupt = m2m_hw_interrupt;
1406 } else {
1407 dma_cap_set(DMA_PRIVATE, dma_dev->cap_mask);
1408
1409 edma->hw_setup = m2p_hw_setup;
1410 edma->hw_shutdown = m2p_hw_shutdown;
1411 edma->hw_submit = m2p_hw_submit;
1412 edma->hw_interrupt = m2p_hw_interrupt;
1413 }
1414
1415 ret = dma_async_device_register(dma_dev);
1416 if (unlikely(ret)) {
1417 for (i = 0; i < edma->num_channels; i++) {
1418 struct ep93xx_dma_chan *edmac = &edma->channels[i];
1419 if (!IS_ERR_OR_NULL(edmac->clk))
1420 clk_put(edmac->clk);
1421 }
1422 kfree(edma);
1423 } else {
1424 dev_info(dma_dev->dev, "EP93xx M2%s DMA ready\n",
1425 edma->m2m ? "M" : "P");
1426 }
1427
1428 return ret;
1429}
1430
1431static struct platform_device_id ep93xx_dma_driver_ids[] = {
1432 { "ep93xx-dma-m2p", 0 },
1433 { "ep93xx-dma-m2m", 1 },
1434 { },
1435};
1436
1437static struct platform_driver ep93xx_dma_driver = {
1438 .driver = {
1439 .name = "ep93xx-dma",
1440 },
1441 .id_table = ep93xx_dma_driver_ids,
1442};
1443
1444static int __init ep93xx_dma_module_init(void)
1445{
1446 return platform_driver_probe(&ep93xx_dma_driver, ep93xx_dma_probe);
1447}
1448subsys_initcall(ep93xx_dma_module_init);
1449
1450MODULE_AUTHOR("Mika Westerberg <mika.westerberg@iki.fi>");
1451MODULE_DESCRIPTION("EP93xx DMA driver");
1452MODULE_LICENSE("GPL");
1453