1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21#include <linux/clk.h>
22#include <linux/init.h>
23#include <linux/interrupt.h>
24#include <linux/dmaengine.h>
25#include <linux/module.h>
26#include <linux/platform_device.h>
27#include <linux/slab.h>
28
29#include <linux/platform_data/dma-ep93xx.h>
30
31#include "dmaengine.h"
32
33
34#define M2P_CONTROL 0x0000
35#define M2P_CONTROL_STALLINT BIT(0)
36#define M2P_CONTROL_NFBINT BIT(1)
37#define M2P_CONTROL_CH_ERROR_INT BIT(3)
38#define M2P_CONTROL_ENABLE BIT(4)
39#define M2P_CONTROL_ICE BIT(6)
40
41#define M2P_INTERRUPT 0x0004
42#define M2P_INTERRUPT_STALL BIT(0)
43#define M2P_INTERRUPT_NFB BIT(1)
44#define M2P_INTERRUPT_ERROR BIT(3)
45
46#define M2P_PPALLOC 0x0008
47#define M2P_STATUS 0x000c
48
49#define M2P_MAXCNT0 0x0020
50#define M2P_BASE0 0x0024
51#define M2P_MAXCNT1 0x0030
52#define M2P_BASE1 0x0034
53
54#define M2P_STATE_IDLE 0
55#define M2P_STATE_STALL 1
56#define M2P_STATE_ON 2
57#define M2P_STATE_NEXT 3
58
59
60#define M2M_CONTROL 0x0000
61#define M2M_CONTROL_DONEINT BIT(2)
62#define M2M_CONTROL_ENABLE BIT(3)
63#define M2M_CONTROL_START BIT(4)
64#define M2M_CONTROL_DAH BIT(11)
65#define M2M_CONTROL_SAH BIT(12)
66#define M2M_CONTROL_PW_SHIFT 9
67#define M2M_CONTROL_PW_8 (0 << M2M_CONTROL_PW_SHIFT)
68#define M2M_CONTROL_PW_16 (1 << M2M_CONTROL_PW_SHIFT)
69#define M2M_CONTROL_PW_32 (2 << M2M_CONTROL_PW_SHIFT)
70#define M2M_CONTROL_PW_MASK (3 << M2M_CONTROL_PW_SHIFT)
71#define M2M_CONTROL_TM_SHIFT 13
72#define M2M_CONTROL_TM_TX (1 << M2M_CONTROL_TM_SHIFT)
73#define M2M_CONTROL_TM_RX (2 << M2M_CONTROL_TM_SHIFT)
74#define M2M_CONTROL_NFBINT BIT(21)
75#define M2M_CONTROL_RSS_SHIFT 22
76#define M2M_CONTROL_RSS_SSPRX (1 << M2M_CONTROL_RSS_SHIFT)
77#define M2M_CONTROL_RSS_SSPTX (2 << M2M_CONTROL_RSS_SHIFT)
78#define M2M_CONTROL_RSS_IDE (3 << M2M_CONTROL_RSS_SHIFT)
79#define M2M_CONTROL_NO_HDSK BIT(24)
80#define M2M_CONTROL_PWSC_SHIFT 25
81
82#define M2M_INTERRUPT 0x0004
83#define M2M_INTERRUPT_MASK 6
84
85#define M2M_STATUS 0x000c
86#define M2M_STATUS_CTL_SHIFT 1
87#define M2M_STATUS_CTL_IDLE (0 << M2M_STATUS_CTL_SHIFT)
88#define M2M_STATUS_CTL_STALL (1 << M2M_STATUS_CTL_SHIFT)
89#define M2M_STATUS_CTL_MEMRD (2 << M2M_STATUS_CTL_SHIFT)
90#define M2M_STATUS_CTL_MEMWR (3 << M2M_STATUS_CTL_SHIFT)
91#define M2M_STATUS_CTL_BWCWAIT (4 << M2M_STATUS_CTL_SHIFT)
92#define M2M_STATUS_CTL_MASK (7 << M2M_STATUS_CTL_SHIFT)
93#define M2M_STATUS_BUF_SHIFT 4
94#define M2M_STATUS_BUF_NO (0 << M2M_STATUS_BUF_SHIFT)
95#define M2M_STATUS_BUF_ON (1 << M2M_STATUS_BUF_SHIFT)
96#define M2M_STATUS_BUF_NEXT (2 << M2M_STATUS_BUF_SHIFT)
97#define M2M_STATUS_BUF_MASK (3 << M2M_STATUS_BUF_SHIFT)
98#define M2M_STATUS_DONE BIT(6)
99
100#define M2M_BCR0 0x0010
101#define M2M_BCR1 0x0014
102#define M2M_SAR_BASE0 0x0018
103#define M2M_SAR_BASE1 0x001c
104#define M2M_DAR_BASE0 0x002c
105#define M2M_DAR_BASE1 0x0030
106
107#define DMA_MAX_CHAN_BYTES 0xffff
108#define DMA_MAX_CHAN_DESCRIPTORS 32
109
110struct ep93xx_dma_engine;
111
112
113
114
115
116
117
118
119
120
121
122struct ep93xx_dma_desc {
123 u32 src_addr;
124 u32 dst_addr;
125 size_t size;
126 bool complete;
127 struct dma_async_tx_descriptor txd;
128 struct list_head tx_list;
129 struct list_head node;
130};
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163struct ep93xx_dma_chan {
164 struct dma_chan chan;
165 const struct ep93xx_dma_engine *edma;
166 void __iomem *regs;
167 int irq;
168 struct clk *clk;
169 struct tasklet_struct tasklet;
170
171 spinlock_t lock;
172 unsigned long flags;
173
174#define EP93XX_DMA_IS_CYCLIC 0
175
176 int buffer;
177 struct list_head active;
178 struct list_head queue;
179 struct list_head free_list;
180 u32 runtime_addr;
181 u32 runtime_ctrl;
182};
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200struct ep93xx_dma_engine {
201 struct dma_device dma_dev;
202 bool m2m;
203 int (*hw_setup)(struct ep93xx_dma_chan *);
204 void (*hw_synchronize)(struct ep93xx_dma_chan *);
205 void (*hw_shutdown)(struct ep93xx_dma_chan *);
206 void (*hw_submit)(struct ep93xx_dma_chan *);
207 int (*hw_interrupt)(struct ep93xx_dma_chan *);
208#define INTERRUPT_UNKNOWN 0
209#define INTERRUPT_DONE 1
210#define INTERRUPT_NEXT_BUFFER 2
211
212 size_t num_channels;
213 struct ep93xx_dma_chan channels[];
214};
215
216static inline struct device *chan2dev(struct ep93xx_dma_chan *edmac)
217{
218 return &edmac->chan.dev->device;
219}
220
221static struct ep93xx_dma_chan *to_ep93xx_dma_chan(struct dma_chan *chan)
222{
223 return container_of(chan, struct ep93xx_dma_chan, chan);
224}
225
226
227
228
229
230
231
232
233
234
235
236
237static void ep93xx_dma_set_active(struct ep93xx_dma_chan *edmac,
238 struct ep93xx_dma_desc *desc)
239{
240 BUG_ON(!list_empty(&edmac->active));
241
242 list_add_tail(&desc->node, &edmac->active);
243
244
245 while (!list_empty(&desc->tx_list)) {
246 struct ep93xx_dma_desc *d = list_first_entry(&desc->tx_list,
247 struct ep93xx_dma_desc, node);
248
249
250
251
252
253
254
255 d->txd.callback = desc->txd.callback;
256 d->txd.callback_param = desc->txd.callback_param;
257
258 list_move_tail(&d->node, &edmac->active);
259 }
260}
261
262
263static struct ep93xx_dma_desc *
264ep93xx_dma_get_active(struct ep93xx_dma_chan *edmac)
265{
266 return list_first_entry_or_null(&edmac->active,
267 struct ep93xx_dma_desc, node);
268}
269
270
271
272
273
274
275
276
277
278
279
280
281
282static bool ep93xx_dma_advance_active(struct ep93xx_dma_chan *edmac)
283{
284 struct ep93xx_dma_desc *desc;
285
286 list_rotate_left(&edmac->active);
287
288 if (test_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags))
289 return true;
290
291 desc = ep93xx_dma_get_active(edmac);
292 if (!desc)
293 return false;
294
295
296
297
298
299 return !desc->txd.cookie;
300}
301
302
303
304
305
306static void m2p_set_control(struct ep93xx_dma_chan *edmac, u32 control)
307{
308 writel(control, edmac->regs + M2P_CONTROL);
309
310
311
312
313 readl(edmac->regs + M2P_CONTROL);
314}
315
316static int m2p_hw_setup(struct ep93xx_dma_chan *edmac)
317{
318 struct ep93xx_dma_data *data = edmac->chan.private;
319 u32 control;
320
321 writel(data->port & 0xf, edmac->regs + M2P_PPALLOC);
322
323 control = M2P_CONTROL_CH_ERROR_INT | M2P_CONTROL_ICE
324 | M2P_CONTROL_ENABLE;
325 m2p_set_control(edmac, control);
326
327 edmac->buffer = 0;
328
329 return 0;
330}
331
332static inline u32 m2p_channel_state(struct ep93xx_dma_chan *edmac)
333{
334 return (readl(edmac->regs + M2P_STATUS) >> 4) & 0x3;
335}
336
337static void m2p_hw_synchronize(struct ep93xx_dma_chan *edmac)
338{
339 unsigned long flags;
340 u32 control;
341
342 spin_lock_irqsave(&edmac->lock, flags);
343 control = readl(edmac->regs + M2P_CONTROL);
344 control &= ~(M2P_CONTROL_STALLINT | M2P_CONTROL_NFBINT);
345 m2p_set_control(edmac, control);
346 spin_unlock_irqrestore(&edmac->lock, flags);
347
348 while (m2p_channel_state(edmac) >= M2P_STATE_ON)
349 schedule();
350}
351
352static void m2p_hw_shutdown(struct ep93xx_dma_chan *edmac)
353{
354 m2p_set_control(edmac, 0);
355
356 while (m2p_channel_state(edmac) != M2P_STATE_IDLE)
357 dev_warn(chan2dev(edmac), "M2P: Not yet IDLE\n");
358}
359
360static void m2p_fill_desc(struct ep93xx_dma_chan *edmac)
361{
362 struct ep93xx_dma_desc *desc;
363 u32 bus_addr;
364
365 desc = ep93xx_dma_get_active(edmac);
366 if (!desc) {
367 dev_warn(chan2dev(edmac), "M2P: empty descriptor list\n");
368 return;
369 }
370
371 if (ep93xx_dma_chan_direction(&edmac->chan) == DMA_MEM_TO_DEV)
372 bus_addr = desc->src_addr;
373 else
374 bus_addr = desc->dst_addr;
375
376 if (edmac->buffer == 0) {
377 writel(desc->size, edmac->regs + M2P_MAXCNT0);
378 writel(bus_addr, edmac->regs + M2P_BASE0);
379 } else {
380 writel(desc->size, edmac->regs + M2P_MAXCNT1);
381 writel(bus_addr, edmac->regs + M2P_BASE1);
382 }
383
384 edmac->buffer ^= 1;
385}
386
387static void m2p_hw_submit(struct ep93xx_dma_chan *edmac)
388{
389 u32 control = readl(edmac->regs + M2P_CONTROL);
390
391 m2p_fill_desc(edmac);
392 control |= M2P_CONTROL_STALLINT;
393
394 if (ep93xx_dma_advance_active(edmac)) {
395 m2p_fill_desc(edmac);
396 control |= M2P_CONTROL_NFBINT;
397 }
398
399 m2p_set_control(edmac, control);
400}
401
402static int m2p_hw_interrupt(struct ep93xx_dma_chan *edmac)
403{
404 u32 irq_status = readl(edmac->regs + M2P_INTERRUPT);
405 u32 control;
406
407 if (irq_status & M2P_INTERRUPT_ERROR) {
408 struct ep93xx_dma_desc *desc = ep93xx_dma_get_active(edmac);
409
410
411 writel(1, edmac->regs + M2P_INTERRUPT);
412
413
414
415
416
417
418
419
420
421 dev_err(chan2dev(edmac),
422 "DMA transfer failed! Details:\n"
423 "\tcookie : %d\n"
424 "\tsrc_addr : 0x%08x\n"
425 "\tdst_addr : 0x%08x\n"
426 "\tsize : %zu\n",
427 desc->txd.cookie, desc->src_addr, desc->dst_addr,
428 desc->size);
429 }
430
431
432
433
434
435
436 if (!(irq_status & (M2P_INTERRUPT_STALL | M2P_INTERRUPT_NFB)))
437 return INTERRUPT_UNKNOWN;
438
439 if (ep93xx_dma_advance_active(edmac)) {
440 m2p_fill_desc(edmac);
441 return INTERRUPT_NEXT_BUFFER;
442 }
443
444
445 control = readl(edmac->regs + M2P_CONTROL);
446 control &= ~(M2P_CONTROL_STALLINT | M2P_CONTROL_NFBINT);
447 m2p_set_control(edmac, control);
448
449 return INTERRUPT_DONE;
450}
451
452
453
454
455
456static int m2m_hw_setup(struct ep93xx_dma_chan *edmac)
457{
458 const struct ep93xx_dma_data *data = edmac->chan.private;
459 u32 control = 0;
460
461 if (!data) {
462
463 writel(control, edmac->regs + M2M_CONTROL);
464 return 0;
465 }
466
467 switch (data->port) {
468 case EP93XX_DMA_SSP:
469
470
471
472
473
474 control = (5 << M2M_CONTROL_PWSC_SHIFT);
475 control |= M2M_CONTROL_NO_HDSK;
476
477 if (data->direction == DMA_MEM_TO_DEV) {
478 control |= M2M_CONTROL_DAH;
479 control |= M2M_CONTROL_TM_TX;
480 control |= M2M_CONTROL_RSS_SSPTX;
481 } else {
482 control |= M2M_CONTROL_SAH;
483 control |= M2M_CONTROL_TM_RX;
484 control |= M2M_CONTROL_RSS_SSPRX;
485 }
486 break;
487
488 case EP93XX_DMA_IDE:
489
490
491
492
493 if (data->direction == DMA_MEM_TO_DEV) {
494
495 control = (3 << M2M_CONTROL_PWSC_SHIFT);
496 control |= M2M_CONTROL_DAH;
497 control |= M2M_CONTROL_TM_TX;
498 } else {
499 control = (2 << M2M_CONTROL_PWSC_SHIFT);
500 control |= M2M_CONTROL_SAH;
501 control |= M2M_CONTROL_TM_RX;
502 }
503
504 control |= M2M_CONTROL_NO_HDSK;
505 control |= M2M_CONTROL_RSS_IDE;
506 control |= M2M_CONTROL_PW_16;
507 break;
508
509 default:
510 return -EINVAL;
511 }
512
513 writel(control, edmac->regs + M2M_CONTROL);
514 return 0;
515}
516
517static void m2m_hw_shutdown(struct ep93xx_dma_chan *edmac)
518{
519
520 writel(0, edmac->regs + M2M_CONTROL);
521}
522
523static void m2m_fill_desc(struct ep93xx_dma_chan *edmac)
524{
525 struct ep93xx_dma_desc *desc;
526
527 desc = ep93xx_dma_get_active(edmac);
528 if (!desc) {
529 dev_warn(chan2dev(edmac), "M2M: empty descriptor list\n");
530 return;
531 }
532
533 if (edmac->buffer == 0) {
534 writel(desc->src_addr, edmac->regs + M2M_SAR_BASE0);
535 writel(desc->dst_addr, edmac->regs + M2M_DAR_BASE0);
536 writel(desc->size, edmac->regs + M2M_BCR0);
537 } else {
538 writel(desc->src_addr, edmac->regs + M2M_SAR_BASE1);
539 writel(desc->dst_addr, edmac->regs + M2M_DAR_BASE1);
540 writel(desc->size, edmac->regs + M2M_BCR1);
541 }
542
543 edmac->buffer ^= 1;
544}
545
546static void m2m_hw_submit(struct ep93xx_dma_chan *edmac)
547{
548 struct ep93xx_dma_data *data = edmac->chan.private;
549 u32 control = readl(edmac->regs + M2M_CONTROL);
550
551
552
553
554
555
556 control &= ~M2M_CONTROL_PW_MASK;
557 control |= edmac->runtime_ctrl;
558
559 m2m_fill_desc(edmac);
560 control |= M2M_CONTROL_DONEINT;
561
562 if (ep93xx_dma_advance_active(edmac)) {
563 m2m_fill_desc(edmac);
564 control |= M2M_CONTROL_NFBINT;
565 }
566
567
568
569
570
571 control |= M2M_CONTROL_ENABLE;
572 writel(control, edmac->regs + M2M_CONTROL);
573
574 if (!data) {
575
576
577
578
579 control |= M2M_CONTROL_START;
580 writel(control, edmac->regs + M2M_CONTROL);
581 }
582}
583
584
585
586
587
588
589
590
591
592
593
594static int m2m_hw_interrupt(struct ep93xx_dma_chan *edmac)
595{
596 u32 status = readl(edmac->regs + M2M_STATUS);
597 u32 ctl_fsm = status & M2M_STATUS_CTL_MASK;
598 u32 buf_fsm = status & M2M_STATUS_BUF_MASK;
599 bool done = status & M2M_STATUS_DONE;
600 bool last_done;
601 u32 control;
602 struct ep93xx_dma_desc *desc;
603
604
605 if (!(readl(edmac->regs + M2M_INTERRUPT) & M2M_INTERRUPT_MASK))
606 return INTERRUPT_UNKNOWN;
607
608 if (done) {
609
610 writel(0, edmac->regs + M2M_INTERRUPT);
611 }
612
613
614
615
616
617 desc = ep93xx_dma_get_active(edmac);
618 last_done = !desc || desc->txd.cookie;
619
620
621
622
623
624
625 if (!last_done &&
626 (buf_fsm == M2M_STATUS_BUF_NO ||
627 buf_fsm == M2M_STATUS_BUF_ON)) {
628
629
630
631
632
633
634 if (ep93xx_dma_advance_active(edmac)) {
635 m2m_fill_desc(edmac);
636 if (done && !edmac->chan.private) {
637
638 control = readl(edmac->regs + M2M_CONTROL);
639 control |= M2M_CONTROL_START;
640 writel(control, edmac->regs + M2M_CONTROL);
641 }
642 return INTERRUPT_NEXT_BUFFER;
643 } else {
644 last_done = true;
645 }
646 }
647
648
649
650
651
652 if (last_done &&
653 buf_fsm == M2M_STATUS_BUF_NO &&
654 ctl_fsm == M2M_STATUS_CTL_STALL) {
655
656 control = readl(edmac->regs + M2M_CONTROL);
657 control &= ~(M2M_CONTROL_DONEINT | M2M_CONTROL_NFBINT
658 | M2M_CONTROL_ENABLE);
659 writel(control, edmac->regs + M2M_CONTROL);
660 return INTERRUPT_DONE;
661 }
662
663
664
665
666 return INTERRUPT_NEXT_BUFFER;
667}
668
669
670
671
672
673static struct ep93xx_dma_desc *
674ep93xx_dma_desc_get(struct ep93xx_dma_chan *edmac)
675{
676 struct ep93xx_dma_desc *desc, *_desc;
677 struct ep93xx_dma_desc *ret = NULL;
678 unsigned long flags;
679
680 spin_lock_irqsave(&edmac->lock, flags);
681 list_for_each_entry_safe(desc, _desc, &edmac->free_list, node) {
682 if (async_tx_test_ack(&desc->txd)) {
683 list_del_init(&desc->node);
684
685
686 desc->src_addr = 0;
687 desc->dst_addr = 0;
688 desc->size = 0;
689 desc->complete = false;
690 desc->txd.cookie = 0;
691 desc->txd.callback = NULL;
692 desc->txd.callback_param = NULL;
693
694 ret = desc;
695 break;
696 }
697 }
698 spin_unlock_irqrestore(&edmac->lock, flags);
699 return ret;
700}
701
702static void ep93xx_dma_desc_put(struct ep93xx_dma_chan *edmac,
703 struct ep93xx_dma_desc *desc)
704{
705 if (desc) {
706 unsigned long flags;
707
708 spin_lock_irqsave(&edmac->lock, flags);
709 list_splice_init(&desc->tx_list, &edmac->free_list);
710 list_add(&desc->node, &edmac->free_list);
711 spin_unlock_irqrestore(&edmac->lock, flags);
712 }
713}
714
715
716
717
718
719
720
721
722
723static void ep93xx_dma_advance_work(struct ep93xx_dma_chan *edmac)
724{
725 struct ep93xx_dma_desc *new;
726 unsigned long flags;
727
728 spin_lock_irqsave(&edmac->lock, flags);
729 if (!list_empty(&edmac->active) || list_empty(&edmac->queue)) {
730 spin_unlock_irqrestore(&edmac->lock, flags);
731 return;
732 }
733
734
735 new = list_first_entry(&edmac->queue, struct ep93xx_dma_desc, node);
736 list_del_init(&new->node);
737
738 ep93xx_dma_set_active(edmac, new);
739
740
741 edmac->edma->hw_submit(edmac);
742 spin_unlock_irqrestore(&edmac->lock, flags);
743}
744
745static void ep93xx_dma_tasklet(unsigned long data)
746{
747 struct ep93xx_dma_chan *edmac = (struct ep93xx_dma_chan *)data;
748 struct ep93xx_dma_desc *desc, *d;
749 struct dmaengine_desc_callback cb;
750 LIST_HEAD(list);
751
752 memset(&cb, 0, sizeof(cb));
753 spin_lock_irq(&edmac->lock);
754
755
756
757
758
759 desc = ep93xx_dma_get_active(edmac);
760 if (desc) {
761 if (desc->complete) {
762
763 if (!test_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags))
764 dma_cookie_complete(&desc->txd);
765 list_splice_init(&edmac->active, &list);
766 }
767 dmaengine_desc_get_callback(&desc->txd, &cb);
768 }
769 spin_unlock_irq(&edmac->lock);
770
771
772 ep93xx_dma_advance_work(edmac);
773
774
775 list_for_each_entry_safe(desc, d, &list, node) {
776 dma_descriptor_unmap(&desc->txd);
777 ep93xx_dma_desc_put(edmac, desc);
778 }
779
780 dmaengine_desc_callback_invoke(&cb, NULL);
781}
782
783static irqreturn_t ep93xx_dma_interrupt(int irq, void *dev_id)
784{
785 struct ep93xx_dma_chan *edmac = dev_id;
786 struct ep93xx_dma_desc *desc;
787 irqreturn_t ret = IRQ_HANDLED;
788
789 spin_lock(&edmac->lock);
790
791 desc = ep93xx_dma_get_active(edmac);
792 if (!desc) {
793 dev_warn(chan2dev(edmac),
794 "got interrupt while active list is empty\n");
795 spin_unlock(&edmac->lock);
796 return IRQ_NONE;
797 }
798
799 switch (edmac->edma->hw_interrupt(edmac)) {
800 case INTERRUPT_DONE:
801 desc->complete = true;
802 tasklet_schedule(&edmac->tasklet);
803 break;
804
805 case INTERRUPT_NEXT_BUFFER:
806 if (test_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags))
807 tasklet_schedule(&edmac->tasklet);
808 break;
809
810 default:
811 dev_warn(chan2dev(edmac), "unknown interrupt!\n");
812 ret = IRQ_NONE;
813 break;
814 }
815
816 spin_unlock(&edmac->lock);
817 return ret;
818}
819
820
821
822
823
824
825
826
827
828static dma_cookie_t ep93xx_dma_tx_submit(struct dma_async_tx_descriptor *tx)
829{
830 struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(tx->chan);
831 struct ep93xx_dma_desc *desc;
832 dma_cookie_t cookie;
833 unsigned long flags;
834
835 spin_lock_irqsave(&edmac->lock, flags);
836 cookie = dma_cookie_assign(tx);
837
838 desc = container_of(tx, struct ep93xx_dma_desc, txd);
839
840
841
842
843
844
845 if (list_empty(&edmac->active)) {
846 ep93xx_dma_set_active(edmac, desc);
847 edmac->edma->hw_submit(edmac);
848 } else {
849 list_add_tail(&desc->node, &edmac->queue);
850 }
851
852 spin_unlock_irqrestore(&edmac->lock, flags);
853 return cookie;
854}
855
856
857
858
859
860
861
862
863
864static int ep93xx_dma_alloc_chan_resources(struct dma_chan *chan)
865{
866 struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
867 struct ep93xx_dma_data *data = chan->private;
868 const char *name = dma_chan_name(chan);
869 int ret, i;
870
871
872 if (!edmac->edma->m2m) {
873 if (!data)
874 return -EINVAL;
875 if (data->port < EP93XX_DMA_I2S1 ||
876 data->port > EP93XX_DMA_IRDA)
877 return -EINVAL;
878 if (data->direction != ep93xx_dma_chan_direction(chan))
879 return -EINVAL;
880 } else {
881 if (data) {
882 switch (data->port) {
883 case EP93XX_DMA_SSP:
884 case EP93XX_DMA_IDE:
885 if (!is_slave_direction(data->direction))
886 return -EINVAL;
887 break;
888 default:
889 return -EINVAL;
890 }
891 }
892 }
893
894 if (data && data->name)
895 name = data->name;
896
897 ret = clk_enable(edmac->clk);
898 if (ret)
899 return ret;
900
901 ret = request_irq(edmac->irq, ep93xx_dma_interrupt, 0, name, edmac);
902 if (ret)
903 goto fail_clk_disable;
904
905 spin_lock_irq(&edmac->lock);
906 dma_cookie_init(&edmac->chan);
907 ret = edmac->edma->hw_setup(edmac);
908 spin_unlock_irq(&edmac->lock);
909
910 if (ret)
911 goto fail_free_irq;
912
913 for (i = 0; i < DMA_MAX_CHAN_DESCRIPTORS; i++) {
914 struct ep93xx_dma_desc *desc;
915
916 desc = kzalloc(sizeof(*desc), GFP_KERNEL);
917 if (!desc) {
918 dev_warn(chan2dev(edmac), "not enough descriptors\n");
919 break;
920 }
921
922 INIT_LIST_HEAD(&desc->tx_list);
923
924 dma_async_tx_descriptor_init(&desc->txd, chan);
925 desc->txd.flags = DMA_CTRL_ACK;
926 desc->txd.tx_submit = ep93xx_dma_tx_submit;
927
928 ep93xx_dma_desc_put(edmac, desc);
929 }
930
931 return i;
932
933fail_free_irq:
934 free_irq(edmac->irq, edmac);
935fail_clk_disable:
936 clk_disable(edmac->clk);
937
938 return ret;
939}
940
941
942
943
944
945
946
947
948static void ep93xx_dma_free_chan_resources(struct dma_chan *chan)
949{
950 struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
951 struct ep93xx_dma_desc *desc, *d;
952 unsigned long flags;
953 LIST_HEAD(list);
954
955 BUG_ON(!list_empty(&edmac->active));
956 BUG_ON(!list_empty(&edmac->queue));
957
958 spin_lock_irqsave(&edmac->lock, flags);
959 edmac->edma->hw_shutdown(edmac);
960 edmac->runtime_addr = 0;
961 edmac->runtime_ctrl = 0;
962 edmac->buffer = 0;
963 list_splice_init(&edmac->free_list, &list);
964 spin_unlock_irqrestore(&edmac->lock, flags);
965
966 list_for_each_entry_safe(desc, d, &list, node)
967 kfree(desc);
968
969 clk_disable(edmac->clk);
970 free_irq(edmac->irq, edmac);
971}
972
973
974
975
976
977
978
979
980
981
982
983static struct dma_async_tx_descriptor *
984ep93xx_dma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest,
985 dma_addr_t src, size_t len, unsigned long flags)
986{
987 struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
988 struct ep93xx_dma_desc *desc, *first;
989 size_t bytes, offset;
990
991 first = NULL;
992 for (offset = 0; offset < len; offset += bytes) {
993 desc = ep93xx_dma_desc_get(edmac);
994 if (!desc) {
995 dev_warn(chan2dev(edmac), "couln't get descriptor\n");
996 goto fail;
997 }
998
999 bytes = min_t(size_t, len - offset, DMA_MAX_CHAN_BYTES);
1000
1001 desc->src_addr = src + offset;
1002 desc->dst_addr = dest + offset;
1003 desc->size = bytes;
1004
1005 if (!first)
1006 first = desc;
1007 else
1008 list_add_tail(&desc->node, &first->tx_list);
1009 }
1010
1011 first->txd.cookie = -EBUSY;
1012 first->txd.flags = flags;
1013
1014 return &first->txd;
1015fail:
1016 ep93xx_dma_desc_put(edmac, first);
1017 return NULL;
1018}
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031static struct dma_async_tx_descriptor *
1032ep93xx_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
1033 unsigned int sg_len, enum dma_transfer_direction dir,
1034 unsigned long flags, void *context)
1035{
1036 struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
1037 struct ep93xx_dma_desc *desc, *first;
1038 struct scatterlist *sg;
1039 int i;
1040
1041 if (!edmac->edma->m2m && dir != ep93xx_dma_chan_direction(chan)) {
1042 dev_warn(chan2dev(edmac),
1043 "channel was configured with different direction\n");
1044 return NULL;
1045 }
1046
1047 if (test_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags)) {
1048 dev_warn(chan2dev(edmac),
1049 "channel is already used for cyclic transfers\n");
1050 return NULL;
1051 }
1052
1053 first = NULL;
1054 for_each_sg(sgl, sg, sg_len, i) {
1055 size_t len = sg_dma_len(sg);
1056
1057 if (len > DMA_MAX_CHAN_BYTES) {
1058 dev_warn(chan2dev(edmac), "too big transfer size %zu\n",
1059 len);
1060 goto fail;
1061 }
1062
1063 desc = ep93xx_dma_desc_get(edmac);
1064 if (!desc) {
1065 dev_warn(chan2dev(edmac), "couln't get descriptor\n");
1066 goto fail;
1067 }
1068
1069 if (dir == DMA_MEM_TO_DEV) {
1070 desc->src_addr = sg_dma_address(sg);
1071 desc->dst_addr = edmac->runtime_addr;
1072 } else {
1073 desc->src_addr = edmac->runtime_addr;
1074 desc->dst_addr = sg_dma_address(sg);
1075 }
1076 desc->size = len;
1077
1078 if (!first)
1079 first = desc;
1080 else
1081 list_add_tail(&desc->node, &first->tx_list);
1082 }
1083
1084 first->txd.cookie = -EBUSY;
1085 first->txd.flags = flags;
1086
1087 return &first->txd;
1088
1089fail:
1090 ep93xx_dma_desc_put(edmac, first);
1091 return NULL;
1092}
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111static struct dma_async_tx_descriptor *
1112ep93xx_dma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t dma_addr,
1113 size_t buf_len, size_t period_len,
1114 enum dma_transfer_direction dir, unsigned long flags)
1115{
1116 struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
1117 struct ep93xx_dma_desc *desc, *first;
1118 size_t offset = 0;
1119
1120 if (!edmac->edma->m2m && dir != ep93xx_dma_chan_direction(chan)) {
1121 dev_warn(chan2dev(edmac),
1122 "channel was configured with different direction\n");
1123 return NULL;
1124 }
1125
1126 if (test_and_set_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags)) {
1127 dev_warn(chan2dev(edmac),
1128 "channel is already used for cyclic transfers\n");
1129 return NULL;
1130 }
1131
1132 if (period_len > DMA_MAX_CHAN_BYTES) {
1133 dev_warn(chan2dev(edmac), "too big period length %zu\n",
1134 period_len);
1135 return NULL;
1136 }
1137
1138
1139 first = NULL;
1140 for (offset = 0; offset < buf_len; offset += period_len) {
1141 desc = ep93xx_dma_desc_get(edmac);
1142 if (!desc) {
1143 dev_warn(chan2dev(edmac), "couln't get descriptor\n");
1144 goto fail;
1145 }
1146
1147 if (dir == DMA_MEM_TO_DEV) {
1148 desc->src_addr = dma_addr + offset;
1149 desc->dst_addr = edmac->runtime_addr;
1150 } else {
1151 desc->src_addr = edmac->runtime_addr;
1152 desc->dst_addr = dma_addr + offset;
1153 }
1154
1155 desc->size = period_len;
1156
1157 if (!first)
1158 first = desc;
1159 else
1160 list_add_tail(&desc->node, &first->tx_list);
1161 }
1162
1163 first->txd.cookie = -EBUSY;
1164
1165 return &first->txd;
1166
1167fail:
1168 ep93xx_dma_desc_put(edmac, first);
1169 return NULL;
1170}
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184static void ep93xx_dma_synchronize(struct dma_chan *chan)
1185{
1186 struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
1187
1188 if (edmac->edma->hw_synchronize)
1189 edmac->edma->hw_synchronize(edmac);
1190}
1191
1192
1193
1194
1195
1196
1197
1198
1199static int ep93xx_dma_terminate_all(struct dma_chan *chan)
1200{
1201 struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
1202 struct ep93xx_dma_desc *desc, *_d;
1203 unsigned long flags;
1204 LIST_HEAD(list);
1205
1206 spin_lock_irqsave(&edmac->lock, flags);
1207
1208 edmac->edma->hw_shutdown(edmac);
1209 clear_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags);
1210 list_splice_init(&edmac->active, &list);
1211 list_splice_init(&edmac->queue, &list);
1212
1213
1214
1215
1216 edmac->edma->hw_setup(edmac);
1217 spin_unlock_irqrestore(&edmac->lock, flags);
1218
1219 list_for_each_entry_safe(desc, _d, &list, node)
1220 ep93xx_dma_desc_put(edmac, desc);
1221
1222 return 0;
1223}
1224
1225static int ep93xx_dma_slave_config(struct dma_chan *chan,
1226 struct dma_slave_config *config)
1227{
1228 struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
1229 enum dma_slave_buswidth width;
1230 unsigned long flags;
1231 u32 addr, ctrl;
1232
1233 if (!edmac->edma->m2m)
1234 return -EINVAL;
1235
1236 switch (config->direction) {
1237 case DMA_DEV_TO_MEM:
1238 width = config->src_addr_width;
1239 addr = config->src_addr;
1240 break;
1241
1242 case DMA_MEM_TO_DEV:
1243 width = config->dst_addr_width;
1244 addr = config->dst_addr;
1245 break;
1246
1247 default:
1248 return -EINVAL;
1249 }
1250
1251 switch (width) {
1252 case DMA_SLAVE_BUSWIDTH_1_BYTE:
1253 ctrl = 0;
1254 break;
1255 case DMA_SLAVE_BUSWIDTH_2_BYTES:
1256 ctrl = M2M_CONTROL_PW_16;
1257 break;
1258 case DMA_SLAVE_BUSWIDTH_4_BYTES:
1259 ctrl = M2M_CONTROL_PW_32;
1260 break;
1261 default:
1262 return -EINVAL;
1263 }
1264
1265 spin_lock_irqsave(&edmac->lock, flags);
1266 edmac->runtime_addr = addr;
1267 edmac->runtime_ctrl = ctrl;
1268 spin_unlock_irqrestore(&edmac->lock, flags);
1269
1270 return 0;
1271}
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281static enum dma_status ep93xx_dma_tx_status(struct dma_chan *chan,
1282 dma_cookie_t cookie,
1283 struct dma_tx_state *state)
1284{
1285 return dma_cookie_status(chan, cookie, state);
1286}
1287
1288
1289
1290
1291
1292
1293
1294
1295static void ep93xx_dma_issue_pending(struct dma_chan *chan)
1296{
1297 ep93xx_dma_advance_work(to_ep93xx_dma_chan(chan));
1298}
1299
1300static int __init ep93xx_dma_probe(struct platform_device *pdev)
1301{
1302 struct ep93xx_dma_platform_data *pdata = dev_get_platdata(&pdev->dev);
1303 struct ep93xx_dma_engine *edma;
1304 struct dma_device *dma_dev;
1305 size_t edma_size;
1306 int ret, i;
1307
1308 edma_size = pdata->num_channels * sizeof(struct ep93xx_dma_chan);
1309 edma = kzalloc(sizeof(*edma) + edma_size, GFP_KERNEL);
1310 if (!edma)
1311 return -ENOMEM;
1312
1313 dma_dev = &edma->dma_dev;
1314 edma->m2m = platform_get_device_id(pdev)->driver_data;
1315 edma->num_channels = pdata->num_channels;
1316
1317 INIT_LIST_HEAD(&dma_dev->channels);
1318 for (i = 0; i < pdata->num_channels; i++) {
1319 const struct ep93xx_dma_chan_data *cdata = &pdata->channels[i];
1320 struct ep93xx_dma_chan *edmac = &edma->channels[i];
1321
1322 edmac->chan.device = dma_dev;
1323 edmac->regs = cdata->base;
1324 edmac->irq = cdata->irq;
1325 edmac->edma = edma;
1326
1327 edmac->clk = clk_get(NULL, cdata->name);
1328 if (IS_ERR(edmac->clk)) {
1329 dev_warn(&pdev->dev, "failed to get clock for %s\n",
1330 cdata->name);
1331 continue;
1332 }
1333
1334 spin_lock_init(&edmac->lock);
1335 INIT_LIST_HEAD(&edmac->active);
1336 INIT_LIST_HEAD(&edmac->queue);
1337 INIT_LIST_HEAD(&edmac->free_list);
1338 tasklet_init(&edmac->tasklet, ep93xx_dma_tasklet,
1339 (unsigned long)edmac);
1340
1341 list_add_tail(&edmac->chan.device_node,
1342 &dma_dev->channels);
1343 }
1344
1345 dma_cap_zero(dma_dev->cap_mask);
1346 dma_cap_set(DMA_SLAVE, dma_dev->cap_mask);
1347 dma_cap_set(DMA_CYCLIC, dma_dev->cap_mask);
1348
1349 dma_dev->dev = &pdev->dev;
1350 dma_dev->device_alloc_chan_resources = ep93xx_dma_alloc_chan_resources;
1351 dma_dev->device_free_chan_resources = ep93xx_dma_free_chan_resources;
1352 dma_dev->device_prep_slave_sg = ep93xx_dma_prep_slave_sg;
1353 dma_dev->device_prep_dma_cyclic = ep93xx_dma_prep_dma_cyclic;
1354 dma_dev->device_config = ep93xx_dma_slave_config;
1355 dma_dev->device_synchronize = ep93xx_dma_synchronize;
1356 dma_dev->device_terminate_all = ep93xx_dma_terminate_all;
1357 dma_dev->device_issue_pending = ep93xx_dma_issue_pending;
1358 dma_dev->device_tx_status = ep93xx_dma_tx_status;
1359
1360 dma_set_max_seg_size(dma_dev->dev, DMA_MAX_CHAN_BYTES);
1361
1362 if (edma->m2m) {
1363 dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask);
1364 dma_dev->device_prep_dma_memcpy = ep93xx_dma_prep_dma_memcpy;
1365
1366 edma->hw_setup = m2m_hw_setup;
1367 edma->hw_shutdown = m2m_hw_shutdown;
1368 edma->hw_submit = m2m_hw_submit;
1369 edma->hw_interrupt = m2m_hw_interrupt;
1370 } else {
1371 dma_cap_set(DMA_PRIVATE, dma_dev->cap_mask);
1372
1373 edma->hw_synchronize = m2p_hw_synchronize;
1374 edma->hw_setup = m2p_hw_setup;
1375 edma->hw_shutdown = m2p_hw_shutdown;
1376 edma->hw_submit = m2p_hw_submit;
1377 edma->hw_interrupt = m2p_hw_interrupt;
1378 }
1379
1380 ret = dma_async_device_register(dma_dev);
1381 if (unlikely(ret)) {
1382 for (i = 0; i < edma->num_channels; i++) {
1383 struct ep93xx_dma_chan *edmac = &edma->channels[i];
1384 if (!IS_ERR_OR_NULL(edmac->clk))
1385 clk_put(edmac->clk);
1386 }
1387 kfree(edma);
1388 } else {
1389 dev_info(dma_dev->dev, "EP93xx M2%s DMA ready\n",
1390 edma->m2m ? "M" : "P");
1391 }
1392
1393 return ret;
1394}
1395
1396static const struct platform_device_id ep93xx_dma_driver_ids[] = {
1397 { "ep93xx-dma-m2p", 0 },
1398 { "ep93xx-dma-m2m", 1 },
1399 { },
1400};
1401
1402static struct platform_driver ep93xx_dma_driver = {
1403 .driver = {
1404 .name = "ep93xx-dma",
1405 },
1406 .id_table = ep93xx_dma_driver_ids,
1407};
1408
1409static int __init ep93xx_dma_module_init(void)
1410{
1411 return platform_driver_probe(&ep93xx_dma_driver, ep93xx_dma_probe);
1412}
1413subsys_initcall(ep93xx_dma_module_init);
1414
1415MODULE_AUTHOR("Mika Westerberg <mika.westerberg@iki.fi>");
1416MODULE_DESCRIPTION("EP93xx DMA driver");
1417MODULE_LICENSE("GPL");
1418