1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21#include <linux/clk.h>
22#include <linux/init.h>
23#include <linux/interrupt.h>
24#include <linux/dmaengine.h>
25#include <linux/module.h>
26#include <linux/mod_devicetable.h>
27#include <linux/platform_device.h>
28#include <linux/slab.h>
29
30#include <linux/platform_data/dma-ep93xx.h>
31
32#include "dmaengine.h"
33
34
35#define M2P_CONTROL 0x0000
36#define M2P_CONTROL_STALLINT BIT(0)
37#define M2P_CONTROL_NFBINT BIT(1)
38#define M2P_CONTROL_CH_ERROR_INT BIT(3)
39#define M2P_CONTROL_ENABLE BIT(4)
40#define M2P_CONTROL_ICE BIT(6)
41
42#define M2P_INTERRUPT 0x0004
43#define M2P_INTERRUPT_STALL BIT(0)
44#define M2P_INTERRUPT_NFB BIT(1)
45#define M2P_INTERRUPT_ERROR BIT(3)
46
47#define M2P_PPALLOC 0x0008
48#define M2P_STATUS 0x000c
49
50#define M2P_MAXCNT0 0x0020
51#define M2P_BASE0 0x0024
52#define M2P_MAXCNT1 0x0030
53#define M2P_BASE1 0x0034
54
55#define M2P_STATE_IDLE 0
56#define M2P_STATE_STALL 1
57#define M2P_STATE_ON 2
58#define M2P_STATE_NEXT 3
59
60
61#define M2M_CONTROL 0x0000
62#define M2M_CONTROL_DONEINT BIT(2)
63#define M2M_CONTROL_ENABLE BIT(3)
64#define M2M_CONTROL_START BIT(4)
65#define M2M_CONTROL_DAH BIT(11)
66#define M2M_CONTROL_SAH BIT(12)
67#define M2M_CONTROL_PW_SHIFT 9
68#define M2M_CONTROL_PW_8 (0 << M2M_CONTROL_PW_SHIFT)
69#define M2M_CONTROL_PW_16 (1 << M2M_CONTROL_PW_SHIFT)
70#define M2M_CONTROL_PW_32 (2 << M2M_CONTROL_PW_SHIFT)
71#define M2M_CONTROL_PW_MASK (3 << M2M_CONTROL_PW_SHIFT)
72#define M2M_CONTROL_TM_SHIFT 13
73#define M2M_CONTROL_TM_TX (1 << M2M_CONTROL_TM_SHIFT)
74#define M2M_CONTROL_TM_RX (2 << M2M_CONTROL_TM_SHIFT)
75#define M2M_CONTROL_NFBINT BIT(21)
76#define M2M_CONTROL_RSS_SHIFT 22
77#define M2M_CONTROL_RSS_SSPRX (1 << M2M_CONTROL_RSS_SHIFT)
78#define M2M_CONTROL_RSS_SSPTX (2 << M2M_CONTROL_RSS_SHIFT)
79#define M2M_CONTROL_RSS_IDE (3 << M2M_CONTROL_RSS_SHIFT)
80#define M2M_CONTROL_NO_HDSK BIT(24)
81#define M2M_CONTROL_PWSC_SHIFT 25
82
83#define M2M_INTERRUPT 0x0004
84#define M2M_INTERRUPT_MASK 6
85
86#define M2M_STATUS 0x000c
87#define M2M_STATUS_CTL_SHIFT 1
88#define M2M_STATUS_CTL_IDLE (0 << M2M_STATUS_CTL_SHIFT)
89#define M2M_STATUS_CTL_STALL (1 << M2M_STATUS_CTL_SHIFT)
90#define M2M_STATUS_CTL_MEMRD (2 << M2M_STATUS_CTL_SHIFT)
91#define M2M_STATUS_CTL_MEMWR (3 << M2M_STATUS_CTL_SHIFT)
92#define M2M_STATUS_CTL_BWCWAIT (4 << M2M_STATUS_CTL_SHIFT)
93#define M2M_STATUS_CTL_MASK (7 << M2M_STATUS_CTL_SHIFT)
94#define M2M_STATUS_BUF_SHIFT 4
95#define M2M_STATUS_BUF_NO (0 << M2M_STATUS_BUF_SHIFT)
96#define M2M_STATUS_BUF_ON (1 << M2M_STATUS_BUF_SHIFT)
97#define M2M_STATUS_BUF_NEXT (2 << M2M_STATUS_BUF_SHIFT)
98#define M2M_STATUS_BUF_MASK (3 << M2M_STATUS_BUF_SHIFT)
99#define M2M_STATUS_DONE BIT(6)
100
101#define M2M_BCR0 0x0010
102#define M2M_BCR1 0x0014
103#define M2M_SAR_BASE0 0x0018
104#define M2M_SAR_BASE1 0x001c
105#define M2M_DAR_BASE0 0x002c
106#define M2M_DAR_BASE1 0x0030
107
108#define DMA_MAX_CHAN_BYTES 0xffff
109#define DMA_MAX_CHAN_DESCRIPTORS 32
110
111struct ep93xx_dma_engine;
112
113
114
115
116
117
118
119
120
121
122
123struct ep93xx_dma_desc {
124 u32 src_addr;
125 u32 dst_addr;
126 size_t size;
127 bool complete;
128 struct dma_async_tx_descriptor txd;
129 struct list_head tx_list;
130 struct list_head node;
131};
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164struct ep93xx_dma_chan {
165 struct dma_chan chan;
166 const struct ep93xx_dma_engine *edma;
167 void __iomem *regs;
168 int irq;
169 struct clk *clk;
170 struct tasklet_struct tasklet;
171
172 spinlock_t lock;
173 unsigned long flags;
174
175#define EP93XX_DMA_IS_CYCLIC 0
176
177 int buffer;
178 struct list_head active;
179 struct list_head queue;
180 struct list_head free_list;
181 u32 runtime_addr;
182 u32 runtime_ctrl;
183};
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201struct ep93xx_dma_engine {
202 struct dma_device dma_dev;
203 bool m2m;
204 int (*hw_setup)(struct ep93xx_dma_chan *);
205 void (*hw_synchronize)(struct ep93xx_dma_chan *);
206 void (*hw_shutdown)(struct ep93xx_dma_chan *);
207 void (*hw_submit)(struct ep93xx_dma_chan *);
208 int (*hw_interrupt)(struct ep93xx_dma_chan *);
209#define INTERRUPT_UNKNOWN 0
210#define INTERRUPT_DONE 1
211#define INTERRUPT_NEXT_BUFFER 2
212
213 size_t num_channels;
214 struct ep93xx_dma_chan channels[];
215};
216
217static inline struct device *chan2dev(struct ep93xx_dma_chan *edmac)
218{
219 return &edmac->chan.dev->device;
220}
221
222static struct ep93xx_dma_chan *to_ep93xx_dma_chan(struct dma_chan *chan)
223{
224 return container_of(chan, struct ep93xx_dma_chan, chan);
225}
226
227
228
229
230
231
232
233
234
235
236
237
238static void ep93xx_dma_set_active(struct ep93xx_dma_chan *edmac,
239 struct ep93xx_dma_desc *desc)
240{
241 BUG_ON(!list_empty(&edmac->active));
242
243 list_add_tail(&desc->node, &edmac->active);
244
245
246 while (!list_empty(&desc->tx_list)) {
247 struct ep93xx_dma_desc *d = list_first_entry(&desc->tx_list,
248 struct ep93xx_dma_desc, node);
249
250
251
252
253
254
255
256 d->txd.callback = desc->txd.callback;
257 d->txd.callback_param = desc->txd.callback_param;
258
259 list_move_tail(&d->node, &edmac->active);
260 }
261}
262
263
264static struct ep93xx_dma_desc *
265ep93xx_dma_get_active(struct ep93xx_dma_chan *edmac)
266{
267 return list_first_entry_or_null(&edmac->active,
268 struct ep93xx_dma_desc, node);
269}
270
271
272
273
274
275
276
277
278
279
280
281
282
283static bool ep93xx_dma_advance_active(struct ep93xx_dma_chan *edmac)
284{
285 struct ep93xx_dma_desc *desc;
286
287 list_rotate_left(&edmac->active);
288
289 if (test_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags))
290 return true;
291
292 desc = ep93xx_dma_get_active(edmac);
293 if (!desc)
294 return false;
295
296
297
298
299
300 return !desc->txd.cookie;
301}
302
303
304
305
306
307static void m2p_set_control(struct ep93xx_dma_chan *edmac, u32 control)
308{
309 writel(control, edmac->regs + M2P_CONTROL);
310
311
312
313
314 readl(edmac->regs + M2P_CONTROL);
315}
316
317static int m2p_hw_setup(struct ep93xx_dma_chan *edmac)
318{
319 struct ep93xx_dma_data *data = edmac->chan.private;
320 u32 control;
321
322 writel(data->port & 0xf, edmac->regs + M2P_PPALLOC);
323
324 control = M2P_CONTROL_CH_ERROR_INT | M2P_CONTROL_ICE
325 | M2P_CONTROL_ENABLE;
326 m2p_set_control(edmac, control);
327
328 edmac->buffer = 0;
329
330 return 0;
331}
332
333static inline u32 m2p_channel_state(struct ep93xx_dma_chan *edmac)
334{
335 return (readl(edmac->regs + M2P_STATUS) >> 4) & 0x3;
336}
337
338static void m2p_hw_synchronize(struct ep93xx_dma_chan *edmac)
339{
340 unsigned long flags;
341 u32 control;
342
343 spin_lock_irqsave(&edmac->lock, flags);
344 control = readl(edmac->regs + M2P_CONTROL);
345 control &= ~(M2P_CONTROL_STALLINT | M2P_CONTROL_NFBINT);
346 m2p_set_control(edmac, control);
347 spin_unlock_irqrestore(&edmac->lock, flags);
348
349 while (m2p_channel_state(edmac) >= M2P_STATE_ON)
350 schedule();
351}
352
353static void m2p_hw_shutdown(struct ep93xx_dma_chan *edmac)
354{
355 m2p_set_control(edmac, 0);
356
357 while (m2p_channel_state(edmac) != M2P_STATE_IDLE)
358 dev_warn(chan2dev(edmac), "M2P: Not yet IDLE\n");
359}
360
361static void m2p_fill_desc(struct ep93xx_dma_chan *edmac)
362{
363 struct ep93xx_dma_desc *desc;
364 u32 bus_addr;
365
366 desc = ep93xx_dma_get_active(edmac);
367 if (!desc) {
368 dev_warn(chan2dev(edmac), "M2P: empty descriptor list\n");
369 return;
370 }
371
372 if (ep93xx_dma_chan_direction(&edmac->chan) == DMA_MEM_TO_DEV)
373 bus_addr = desc->src_addr;
374 else
375 bus_addr = desc->dst_addr;
376
377 if (edmac->buffer == 0) {
378 writel(desc->size, edmac->regs + M2P_MAXCNT0);
379 writel(bus_addr, edmac->regs + M2P_BASE0);
380 } else {
381 writel(desc->size, edmac->regs + M2P_MAXCNT1);
382 writel(bus_addr, edmac->regs + M2P_BASE1);
383 }
384
385 edmac->buffer ^= 1;
386}
387
388static void m2p_hw_submit(struct ep93xx_dma_chan *edmac)
389{
390 u32 control = readl(edmac->regs + M2P_CONTROL);
391
392 m2p_fill_desc(edmac);
393 control |= M2P_CONTROL_STALLINT;
394
395 if (ep93xx_dma_advance_active(edmac)) {
396 m2p_fill_desc(edmac);
397 control |= M2P_CONTROL_NFBINT;
398 }
399
400 m2p_set_control(edmac, control);
401}
402
403static int m2p_hw_interrupt(struct ep93xx_dma_chan *edmac)
404{
405 u32 irq_status = readl(edmac->regs + M2P_INTERRUPT);
406 u32 control;
407
408 if (irq_status & M2P_INTERRUPT_ERROR) {
409 struct ep93xx_dma_desc *desc = ep93xx_dma_get_active(edmac);
410
411
412 writel(1, edmac->regs + M2P_INTERRUPT);
413
414
415
416
417
418
419
420
421
422 dev_err(chan2dev(edmac),
423 "DMA transfer failed! Details:\n"
424 "\tcookie : %d\n"
425 "\tsrc_addr : 0x%08x\n"
426 "\tdst_addr : 0x%08x\n"
427 "\tsize : %zu\n",
428 desc->txd.cookie, desc->src_addr, desc->dst_addr,
429 desc->size);
430 }
431
432
433
434
435
436
437 if (!(irq_status & (M2P_INTERRUPT_STALL | M2P_INTERRUPT_NFB)))
438 return INTERRUPT_UNKNOWN;
439
440 if (ep93xx_dma_advance_active(edmac)) {
441 m2p_fill_desc(edmac);
442 return INTERRUPT_NEXT_BUFFER;
443 }
444
445
446 control = readl(edmac->regs + M2P_CONTROL);
447 control &= ~(M2P_CONTROL_STALLINT | M2P_CONTROL_NFBINT);
448 m2p_set_control(edmac, control);
449
450 return INTERRUPT_DONE;
451}
452
453
454
455
456
457static int m2m_hw_setup(struct ep93xx_dma_chan *edmac)
458{
459 const struct ep93xx_dma_data *data = edmac->chan.private;
460 u32 control = 0;
461
462 if (!data) {
463
464 writel(control, edmac->regs + M2M_CONTROL);
465 return 0;
466 }
467
468 switch (data->port) {
469 case EP93XX_DMA_SSP:
470
471
472
473
474
475 control = (5 << M2M_CONTROL_PWSC_SHIFT);
476 control |= M2M_CONTROL_NO_HDSK;
477
478 if (data->direction == DMA_MEM_TO_DEV) {
479 control |= M2M_CONTROL_DAH;
480 control |= M2M_CONTROL_TM_TX;
481 control |= M2M_CONTROL_RSS_SSPTX;
482 } else {
483 control |= M2M_CONTROL_SAH;
484 control |= M2M_CONTROL_TM_RX;
485 control |= M2M_CONTROL_RSS_SSPRX;
486 }
487 break;
488
489 case EP93XX_DMA_IDE:
490
491
492
493
494 if (data->direction == DMA_MEM_TO_DEV) {
495
496 control = (3 << M2M_CONTROL_PWSC_SHIFT);
497 control |= M2M_CONTROL_DAH;
498 control |= M2M_CONTROL_TM_TX;
499 } else {
500 control = (2 << M2M_CONTROL_PWSC_SHIFT);
501 control |= M2M_CONTROL_SAH;
502 control |= M2M_CONTROL_TM_RX;
503 }
504
505 control |= M2M_CONTROL_NO_HDSK;
506 control |= M2M_CONTROL_RSS_IDE;
507 control |= M2M_CONTROL_PW_16;
508 break;
509
510 default:
511 return -EINVAL;
512 }
513
514 writel(control, edmac->regs + M2M_CONTROL);
515 return 0;
516}
517
518static void m2m_hw_shutdown(struct ep93xx_dma_chan *edmac)
519{
520
521 writel(0, edmac->regs + M2M_CONTROL);
522}
523
524static void m2m_fill_desc(struct ep93xx_dma_chan *edmac)
525{
526 struct ep93xx_dma_desc *desc;
527
528 desc = ep93xx_dma_get_active(edmac);
529 if (!desc) {
530 dev_warn(chan2dev(edmac), "M2M: empty descriptor list\n");
531 return;
532 }
533
534 if (edmac->buffer == 0) {
535 writel(desc->src_addr, edmac->regs + M2M_SAR_BASE0);
536 writel(desc->dst_addr, edmac->regs + M2M_DAR_BASE0);
537 writel(desc->size, edmac->regs + M2M_BCR0);
538 } else {
539 writel(desc->src_addr, edmac->regs + M2M_SAR_BASE1);
540 writel(desc->dst_addr, edmac->regs + M2M_DAR_BASE1);
541 writel(desc->size, edmac->regs + M2M_BCR1);
542 }
543
544 edmac->buffer ^= 1;
545}
546
547static void m2m_hw_submit(struct ep93xx_dma_chan *edmac)
548{
549 struct ep93xx_dma_data *data = edmac->chan.private;
550 u32 control = readl(edmac->regs + M2M_CONTROL);
551
552
553
554
555
556
557 control &= ~M2M_CONTROL_PW_MASK;
558 control |= edmac->runtime_ctrl;
559
560 m2m_fill_desc(edmac);
561 control |= M2M_CONTROL_DONEINT;
562
563 if (ep93xx_dma_advance_active(edmac)) {
564 m2m_fill_desc(edmac);
565 control |= M2M_CONTROL_NFBINT;
566 }
567
568
569
570
571
572 control |= M2M_CONTROL_ENABLE;
573 writel(control, edmac->regs + M2M_CONTROL);
574
575 if (!data) {
576
577
578
579
580 control |= M2M_CONTROL_START;
581 writel(control, edmac->regs + M2M_CONTROL);
582 }
583}
584
585
586
587
588
589
590
591
592
593
594
595static int m2m_hw_interrupt(struct ep93xx_dma_chan *edmac)
596{
597 u32 status = readl(edmac->regs + M2M_STATUS);
598 u32 ctl_fsm = status & M2M_STATUS_CTL_MASK;
599 u32 buf_fsm = status & M2M_STATUS_BUF_MASK;
600 bool done = status & M2M_STATUS_DONE;
601 bool last_done;
602 u32 control;
603 struct ep93xx_dma_desc *desc;
604
605
606 if (!(readl(edmac->regs + M2M_INTERRUPT) & M2M_INTERRUPT_MASK))
607 return INTERRUPT_UNKNOWN;
608
609 if (done) {
610
611 writel(0, edmac->regs + M2M_INTERRUPT);
612 }
613
614
615
616
617
618 desc = ep93xx_dma_get_active(edmac);
619 last_done = !desc || desc->txd.cookie;
620
621
622
623
624
625
626 if (!last_done &&
627 (buf_fsm == M2M_STATUS_BUF_NO ||
628 buf_fsm == M2M_STATUS_BUF_ON)) {
629
630
631
632
633
634
635 if (ep93xx_dma_advance_active(edmac)) {
636 m2m_fill_desc(edmac);
637 if (done && !edmac->chan.private) {
638
639 control = readl(edmac->regs + M2M_CONTROL);
640 control |= M2M_CONTROL_START;
641 writel(control, edmac->regs + M2M_CONTROL);
642 }
643 return INTERRUPT_NEXT_BUFFER;
644 } else {
645 last_done = true;
646 }
647 }
648
649
650
651
652
653 if (last_done &&
654 buf_fsm == M2M_STATUS_BUF_NO &&
655 ctl_fsm == M2M_STATUS_CTL_STALL) {
656
657 control = readl(edmac->regs + M2M_CONTROL);
658 control &= ~(M2M_CONTROL_DONEINT | M2M_CONTROL_NFBINT
659 | M2M_CONTROL_ENABLE);
660 writel(control, edmac->regs + M2M_CONTROL);
661 return INTERRUPT_DONE;
662 }
663
664
665
666
667 return INTERRUPT_NEXT_BUFFER;
668}
669
670
671
672
673
674static struct ep93xx_dma_desc *
675ep93xx_dma_desc_get(struct ep93xx_dma_chan *edmac)
676{
677 struct ep93xx_dma_desc *desc, *_desc;
678 struct ep93xx_dma_desc *ret = NULL;
679 unsigned long flags;
680
681 spin_lock_irqsave(&edmac->lock, flags);
682 list_for_each_entry_safe(desc, _desc, &edmac->free_list, node) {
683 if (async_tx_test_ack(&desc->txd)) {
684 list_del_init(&desc->node);
685
686
687 desc->src_addr = 0;
688 desc->dst_addr = 0;
689 desc->size = 0;
690 desc->complete = false;
691 desc->txd.cookie = 0;
692 desc->txd.callback = NULL;
693 desc->txd.callback_param = NULL;
694
695 ret = desc;
696 break;
697 }
698 }
699 spin_unlock_irqrestore(&edmac->lock, flags);
700 return ret;
701}
702
703static void ep93xx_dma_desc_put(struct ep93xx_dma_chan *edmac,
704 struct ep93xx_dma_desc *desc)
705{
706 if (desc) {
707 unsigned long flags;
708
709 spin_lock_irqsave(&edmac->lock, flags);
710 list_splice_init(&desc->tx_list, &edmac->free_list);
711 list_add(&desc->node, &edmac->free_list);
712 spin_unlock_irqrestore(&edmac->lock, flags);
713 }
714}
715
716
717
718
719
720
721
722
723
724static void ep93xx_dma_advance_work(struct ep93xx_dma_chan *edmac)
725{
726 struct ep93xx_dma_desc *new;
727 unsigned long flags;
728
729 spin_lock_irqsave(&edmac->lock, flags);
730 if (!list_empty(&edmac->active) || list_empty(&edmac->queue)) {
731 spin_unlock_irqrestore(&edmac->lock, flags);
732 return;
733 }
734
735
736 new = list_first_entry(&edmac->queue, struct ep93xx_dma_desc, node);
737 list_del_init(&new->node);
738
739 ep93xx_dma_set_active(edmac, new);
740
741
742 edmac->edma->hw_submit(edmac);
743 spin_unlock_irqrestore(&edmac->lock, flags);
744}
745
746static void ep93xx_dma_tasklet(unsigned long data)
747{
748 struct ep93xx_dma_chan *edmac = (struct ep93xx_dma_chan *)data;
749 struct ep93xx_dma_desc *desc, *d;
750 struct dmaengine_desc_callback cb;
751 LIST_HEAD(list);
752
753 memset(&cb, 0, sizeof(cb));
754 spin_lock_irq(&edmac->lock);
755
756
757
758
759
760 desc = ep93xx_dma_get_active(edmac);
761 if (desc) {
762 if (desc->complete) {
763
764 if (!test_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags))
765 dma_cookie_complete(&desc->txd);
766 list_splice_init(&edmac->active, &list);
767 }
768 dmaengine_desc_get_callback(&desc->txd, &cb);
769 }
770 spin_unlock_irq(&edmac->lock);
771
772
773 ep93xx_dma_advance_work(edmac);
774
775
776 list_for_each_entry_safe(desc, d, &list, node) {
777 dma_descriptor_unmap(&desc->txd);
778 ep93xx_dma_desc_put(edmac, desc);
779 }
780
781 dmaengine_desc_callback_invoke(&cb, NULL);
782}
783
784static irqreturn_t ep93xx_dma_interrupt(int irq, void *dev_id)
785{
786 struct ep93xx_dma_chan *edmac = dev_id;
787 struct ep93xx_dma_desc *desc;
788 irqreturn_t ret = IRQ_HANDLED;
789
790 spin_lock(&edmac->lock);
791
792 desc = ep93xx_dma_get_active(edmac);
793 if (!desc) {
794 dev_warn(chan2dev(edmac),
795 "got interrupt while active list is empty\n");
796 spin_unlock(&edmac->lock);
797 return IRQ_NONE;
798 }
799
800 switch (edmac->edma->hw_interrupt(edmac)) {
801 case INTERRUPT_DONE:
802 desc->complete = true;
803 tasklet_schedule(&edmac->tasklet);
804 break;
805
806 case INTERRUPT_NEXT_BUFFER:
807 if (test_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags))
808 tasklet_schedule(&edmac->tasklet);
809 break;
810
811 default:
812 dev_warn(chan2dev(edmac), "unknown interrupt!\n");
813 ret = IRQ_NONE;
814 break;
815 }
816
817 spin_unlock(&edmac->lock);
818 return ret;
819}
820
821
822
823
824
825
826
827
828
829static dma_cookie_t ep93xx_dma_tx_submit(struct dma_async_tx_descriptor *tx)
830{
831 struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(tx->chan);
832 struct ep93xx_dma_desc *desc;
833 dma_cookie_t cookie;
834 unsigned long flags;
835
836 spin_lock_irqsave(&edmac->lock, flags);
837 cookie = dma_cookie_assign(tx);
838
839 desc = container_of(tx, struct ep93xx_dma_desc, txd);
840
841
842
843
844
845
846 if (list_empty(&edmac->active)) {
847 ep93xx_dma_set_active(edmac, desc);
848 edmac->edma->hw_submit(edmac);
849 } else {
850 list_add_tail(&desc->node, &edmac->queue);
851 }
852
853 spin_unlock_irqrestore(&edmac->lock, flags);
854 return cookie;
855}
856
857
858
859
860
861
862
863
864
865static int ep93xx_dma_alloc_chan_resources(struct dma_chan *chan)
866{
867 struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
868 struct ep93xx_dma_data *data = chan->private;
869 const char *name = dma_chan_name(chan);
870 int ret, i;
871
872
873 if (!edmac->edma->m2m) {
874 if (!data)
875 return -EINVAL;
876 if (data->port < EP93XX_DMA_I2S1 ||
877 data->port > EP93XX_DMA_IRDA)
878 return -EINVAL;
879 if (data->direction != ep93xx_dma_chan_direction(chan))
880 return -EINVAL;
881 } else {
882 if (data) {
883 switch (data->port) {
884 case EP93XX_DMA_SSP:
885 case EP93XX_DMA_IDE:
886 if (!is_slave_direction(data->direction))
887 return -EINVAL;
888 break;
889 default:
890 return -EINVAL;
891 }
892 }
893 }
894
895 if (data && data->name)
896 name = data->name;
897
898 ret = clk_enable(edmac->clk);
899 if (ret)
900 return ret;
901
902 ret = request_irq(edmac->irq, ep93xx_dma_interrupt, 0, name, edmac);
903 if (ret)
904 goto fail_clk_disable;
905
906 spin_lock_irq(&edmac->lock);
907 dma_cookie_init(&edmac->chan);
908 ret = edmac->edma->hw_setup(edmac);
909 spin_unlock_irq(&edmac->lock);
910
911 if (ret)
912 goto fail_free_irq;
913
914 for (i = 0; i < DMA_MAX_CHAN_DESCRIPTORS; i++) {
915 struct ep93xx_dma_desc *desc;
916
917 desc = kzalloc(sizeof(*desc), GFP_KERNEL);
918 if (!desc) {
919 dev_warn(chan2dev(edmac), "not enough descriptors\n");
920 break;
921 }
922
923 INIT_LIST_HEAD(&desc->tx_list);
924
925 dma_async_tx_descriptor_init(&desc->txd, chan);
926 desc->txd.flags = DMA_CTRL_ACK;
927 desc->txd.tx_submit = ep93xx_dma_tx_submit;
928
929 ep93xx_dma_desc_put(edmac, desc);
930 }
931
932 return i;
933
934fail_free_irq:
935 free_irq(edmac->irq, edmac);
936fail_clk_disable:
937 clk_disable(edmac->clk);
938
939 return ret;
940}
941
942
943
944
945
946
947
948
949static void ep93xx_dma_free_chan_resources(struct dma_chan *chan)
950{
951 struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
952 struct ep93xx_dma_desc *desc, *d;
953 unsigned long flags;
954 LIST_HEAD(list);
955
956 BUG_ON(!list_empty(&edmac->active));
957 BUG_ON(!list_empty(&edmac->queue));
958
959 spin_lock_irqsave(&edmac->lock, flags);
960 edmac->edma->hw_shutdown(edmac);
961 edmac->runtime_addr = 0;
962 edmac->runtime_ctrl = 0;
963 edmac->buffer = 0;
964 list_splice_init(&edmac->free_list, &list);
965 spin_unlock_irqrestore(&edmac->lock, flags);
966
967 list_for_each_entry_safe(desc, d, &list, node)
968 kfree(desc);
969
970 clk_disable(edmac->clk);
971 free_irq(edmac->irq, edmac);
972}
973
974
975
976
977
978
979
980
981
982
983
984static struct dma_async_tx_descriptor *
985ep93xx_dma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest,
986 dma_addr_t src, size_t len, unsigned long flags)
987{
988 struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
989 struct ep93xx_dma_desc *desc, *first;
990 size_t bytes, offset;
991
992 first = NULL;
993 for (offset = 0; offset < len; offset += bytes) {
994 desc = ep93xx_dma_desc_get(edmac);
995 if (!desc) {
996 dev_warn(chan2dev(edmac), "couln't get descriptor\n");
997 goto fail;
998 }
999
1000 bytes = min_t(size_t, len - offset, DMA_MAX_CHAN_BYTES);
1001
1002 desc->src_addr = src + offset;
1003 desc->dst_addr = dest + offset;
1004 desc->size = bytes;
1005
1006 if (!first)
1007 first = desc;
1008 else
1009 list_add_tail(&desc->node, &first->tx_list);
1010 }
1011
1012 first->txd.cookie = -EBUSY;
1013 first->txd.flags = flags;
1014
1015 return &first->txd;
1016fail:
1017 ep93xx_dma_desc_put(edmac, first);
1018 return NULL;
1019}
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032static struct dma_async_tx_descriptor *
1033ep93xx_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
1034 unsigned int sg_len, enum dma_transfer_direction dir,
1035 unsigned long flags, void *context)
1036{
1037 struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
1038 struct ep93xx_dma_desc *desc, *first;
1039 struct scatterlist *sg;
1040 int i;
1041
1042 if (!edmac->edma->m2m && dir != ep93xx_dma_chan_direction(chan)) {
1043 dev_warn(chan2dev(edmac),
1044 "channel was configured with different direction\n");
1045 return NULL;
1046 }
1047
1048 if (test_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags)) {
1049 dev_warn(chan2dev(edmac),
1050 "channel is already used for cyclic transfers\n");
1051 return NULL;
1052 }
1053
1054 first = NULL;
1055 for_each_sg(sgl, sg, sg_len, i) {
1056 size_t len = sg_dma_len(sg);
1057
1058 if (len > DMA_MAX_CHAN_BYTES) {
1059 dev_warn(chan2dev(edmac), "too big transfer size %zu\n",
1060 len);
1061 goto fail;
1062 }
1063
1064 desc = ep93xx_dma_desc_get(edmac);
1065 if (!desc) {
1066 dev_warn(chan2dev(edmac), "couln't get descriptor\n");
1067 goto fail;
1068 }
1069
1070 if (dir == DMA_MEM_TO_DEV) {
1071 desc->src_addr = sg_dma_address(sg);
1072 desc->dst_addr = edmac->runtime_addr;
1073 } else {
1074 desc->src_addr = edmac->runtime_addr;
1075 desc->dst_addr = sg_dma_address(sg);
1076 }
1077 desc->size = len;
1078
1079 if (!first)
1080 first = desc;
1081 else
1082 list_add_tail(&desc->node, &first->tx_list);
1083 }
1084
1085 first->txd.cookie = -EBUSY;
1086 first->txd.flags = flags;
1087
1088 return &first->txd;
1089
1090fail:
1091 ep93xx_dma_desc_put(edmac, first);
1092 return NULL;
1093}
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112static struct dma_async_tx_descriptor *
1113ep93xx_dma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t dma_addr,
1114 size_t buf_len, size_t period_len,
1115 enum dma_transfer_direction dir, unsigned long flags)
1116{
1117 struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
1118 struct ep93xx_dma_desc *desc, *first;
1119 size_t offset = 0;
1120
1121 if (!edmac->edma->m2m && dir != ep93xx_dma_chan_direction(chan)) {
1122 dev_warn(chan2dev(edmac),
1123 "channel was configured with different direction\n");
1124 return NULL;
1125 }
1126
1127 if (test_and_set_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags)) {
1128 dev_warn(chan2dev(edmac),
1129 "channel is already used for cyclic transfers\n");
1130 return NULL;
1131 }
1132
1133 if (period_len > DMA_MAX_CHAN_BYTES) {
1134 dev_warn(chan2dev(edmac), "too big period length %zu\n",
1135 period_len);
1136 return NULL;
1137 }
1138
1139
1140 first = NULL;
1141 for (offset = 0; offset < buf_len; offset += period_len) {
1142 desc = ep93xx_dma_desc_get(edmac);
1143 if (!desc) {
1144 dev_warn(chan2dev(edmac), "couln't get descriptor\n");
1145 goto fail;
1146 }
1147
1148 if (dir == DMA_MEM_TO_DEV) {
1149 desc->src_addr = dma_addr + offset;
1150 desc->dst_addr = edmac->runtime_addr;
1151 } else {
1152 desc->src_addr = edmac->runtime_addr;
1153 desc->dst_addr = dma_addr + offset;
1154 }
1155
1156 desc->size = period_len;
1157
1158 if (!first)
1159 first = desc;
1160 else
1161 list_add_tail(&desc->node, &first->tx_list);
1162 }
1163
1164 first->txd.cookie = -EBUSY;
1165
1166 return &first->txd;
1167
1168fail:
1169 ep93xx_dma_desc_put(edmac, first);
1170 return NULL;
1171}
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185static void ep93xx_dma_synchronize(struct dma_chan *chan)
1186{
1187 struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
1188
1189 if (edmac->edma->hw_synchronize)
1190 edmac->edma->hw_synchronize(edmac);
1191}
1192
1193
1194
1195
1196
1197
1198
1199
1200static int ep93xx_dma_terminate_all(struct dma_chan *chan)
1201{
1202 struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
1203 struct ep93xx_dma_desc *desc, *_d;
1204 unsigned long flags;
1205 LIST_HEAD(list);
1206
1207 spin_lock_irqsave(&edmac->lock, flags);
1208
1209 edmac->edma->hw_shutdown(edmac);
1210 clear_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags);
1211 list_splice_init(&edmac->active, &list);
1212 list_splice_init(&edmac->queue, &list);
1213
1214
1215
1216
1217 edmac->edma->hw_setup(edmac);
1218 spin_unlock_irqrestore(&edmac->lock, flags);
1219
1220 list_for_each_entry_safe(desc, _d, &list, node)
1221 ep93xx_dma_desc_put(edmac, desc);
1222
1223 return 0;
1224}
1225
1226static int ep93xx_dma_slave_config(struct dma_chan *chan,
1227 struct dma_slave_config *config)
1228{
1229 struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
1230 enum dma_slave_buswidth width;
1231 unsigned long flags;
1232 u32 addr, ctrl;
1233
1234 if (!edmac->edma->m2m)
1235 return -EINVAL;
1236
1237 switch (config->direction) {
1238 case DMA_DEV_TO_MEM:
1239 width = config->src_addr_width;
1240 addr = config->src_addr;
1241 break;
1242
1243 case DMA_MEM_TO_DEV:
1244 width = config->dst_addr_width;
1245 addr = config->dst_addr;
1246 break;
1247
1248 default:
1249 return -EINVAL;
1250 }
1251
1252 switch (width) {
1253 case DMA_SLAVE_BUSWIDTH_1_BYTE:
1254 ctrl = 0;
1255 break;
1256 case DMA_SLAVE_BUSWIDTH_2_BYTES:
1257 ctrl = M2M_CONTROL_PW_16;
1258 break;
1259 case DMA_SLAVE_BUSWIDTH_4_BYTES:
1260 ctrl = M2M_CONTROL_PW_32;
1261 break;
1262 default:
1263 return -EINVAL;
1264 }
1265
1266 spin_lock_irqsave(&edmac->lock, flags);
1267 edmac->runtime_addr = addr;
1268 edmac->runtime_ctrl = ctrl;
1269 spin_unlock_irqrestore(&edmac->lock, flags);
1270
1271 return 0;
1272}
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282static enum dma_status ep93xx_dma_tx_status(struct dma_chan *chan,
1283 dma_cookie_t cookie,
1284 struct dma_tx_state *state)
1285{
1286 return dma_cookie_status(chan, cookie, state);
1287}
1288
1289
1290
1291
1292
1293
1294
1295
1296static void ep93xx_dma_issue_pending(struct dma_chan *chan)
1297{
1298 ep93xx_dma_advance_work(to_ep93xx_dma_chan(chan));
1299}
1300
1301static int __init ep93xx_dma_probe(struct platform_device *pdev)
1302{
1303 struct ep93xx_dma_platform_data *pdata = dev_get_platdata(&pdev->dev);
1304 struct ep93xx_dma_engine *edma;
1305 struct dma_device *dma_dev;
1306 size_t edma_size;
1307 int ret, i;
1308
1309 edma_size = pdata->num_channels * sizeof(struct ep93xx_dma_chan);
1310 edma = kzalloc(sizeof(*edma) + edma_size, GFP_KERNEL);
1311 if (!edma)
1312 return -ENOMEM;
1313
1314 dma_dev = &edma->dma_dev;
1315 edma->m2m = platform_get_device_id(pdev)->driver_data;
1316 edma->num_channels = pdata->num_channels;
1317
1318 INIT_LIST_HEAD(&dma_dev->channels);
1319 for (i = 0; i < pdata->num_channels; i++) {
1320 const struct ep93xx_dma_chan_data *cdata = &pdata->channels[i];
1321 struct ep93xx_dma_chan *edmac = &edma->channels[i];
1322
1323 edmac->chan.device = dma_dev;
1324 edmac->regs = cdata->base;
1325 edmac->irq = cdata->irq;
1326 edmac->edma = edma;
1327
1328 edmac->clk = clk_get(NULL, cdata->name);
1329 if (IS_ERR(edmac->clk)) {
1330 dev_warn(&pdev->dev, "failed to get clock for %s\n",
1331 cdata->name);
1332 continue;
1333 }
1334
1335 spin_lock_init(&edmac->lock);
1336 INIT_LIST_HEAD(&edmac->active);
1337 INIT_LIST_HEAD(&edmac->queue);
1338 INIT_LIST_HEAD(&edmac->free_list);
1339 tasklet_init(&edmac->tasklet, ep93xx_dma_tasklet,
1340 (unsigned long)edmac);
1341
1342 list_add_tail(&edmac->chan.device_node,
1343 &dma_dev->channels);
1344 }
1345
1346 dma_cap_zero(dma_dev->cap_mask);
1347 dma_cap_set(DMA_SLAVE, dma_dev->cap_mask);
1348 dma_cap_set(DMA_CYCLIC, dma_dev->cap_mask);
1349
1350 dma_dev->dev = &pdev->dev;
1351 dma_dev->device_alloc_chan_resources = ep93xx_dma_alloc_chan_resources;
1352 dma_dev->device_free_chan_resources = ep93xx_dma_free_chan_resources;
1353 dma_dev->device_prep_slave_sg = ep93xx_dma_prep_slave_sg;
1354 dma_dev->device_prep_dma_cyclic = ep93xx_dma_prep_dma_cyclic;
1355 dma_dev->device_config = ep93xx_dma_slave_config;
1356 dma_dev->device_synchronize = ep93xx_dma_synchronize;
1357 dma_dev->device_terminate_all = ep93xx_dma_terminate_all;
1358 dma_dev->device_issue_pending = ep93xx_dma_issue_pending;
1359 dma_dev->device_tx_status = ep93xx_dma_tx_status;
1360
1361 dma_set_max_seg_size(dma_dev->dev, DMA_MAX_CHAN_BYTES);
1362
1363 if (edma->m2m) {
1364 dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask);
1365 dma_dev->device_prep_dma_memcpy = ep93xx_dma_prep_dma_memcpy;
1366
1367 edma->hw_setup = m2m_hw_setup;
1368 edma->hw_shutdown = m2m_hw_shutdown;
1369 edma->hw_submit = m2m_hw_submit;
1370 edma->hw_interrupt = m2m_hw_interrupt;
1371 } else {
1372 dma_cap_set(DMA_PRIVATE, dma_dev->cap_mask);
1373
1374 edma->hw_synchronize = m2p_hw_synchronize;
1375 edma->hw_setup = m2p_hw_setup;
1376 edma->hw_shutdown = m2p_hw_shutdown;
1377 edma->hw_submit = m2p_hw_submit;
1378 edma->hw_interrupt = m2p_hw_interrupt;
1379 }
1380
1381 ret = dma_async_device_register(dma_dev);
1382 if (unlikely(ret)) {
1383 for (i = 0; i < edma->num_channels; i++) {
1384 struct ep93xx_dma_chan *edmac = &edma->channels[i];
1385 if (!IS_ERR_OR_NULL(edmac->clk))
1386 clk_put(edmac->clk);
1387 }
1388 kfree(edma);
1389 } else {
1390 dev_info(dma_dev->dev, "EP93xx M2%s DMA ready\n",
1391 edma->m2m ? "M" : "P");
1392 }
1393
1394 return ret;
1395}
1396
1397static const struct platform_device_id ep93xx_dma_driver_ids[] = {
1398 { "ep93xx-dma-m2p", 0 },
1399 { "ep93xx-dma-m2m", 1 },
1400 { },
1401};
1402
1403static struct platform_driver ep93xx_dma_driver = {
1404 .driver = {
1405 .name = "ep93xx-dma",
1406 },
1407 .id_table = ep93xx_dma_driver_ids,
1408};
1409
1410static int __init ep93xx_dma_module_init(void)
1411{
1412 return platform_driver_probe(&ep93xx_dma_driver, ep93xx_dma_probe);
1413}
1414subsys_initcall(ep93xx_dma_module_init);
1415
1416MODULE_AUTHOR("Mika Westerberg <mika.westerberg@iki.fi>");
1417MODULE_DESCRIPTION("EP93xx DMA driver");
1418MODULE_LICENSE("GPL");
1419