1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17#include <linux/clk.h>
18#include <linux/init.h>
19#include <linux/interrupt.h>
20#include <linux/dmaengine.h>
21#include <linux/module.h>
22#include <linux/mod_devicetable.h>
23#include <linux/platform_device.h>
24#include <linux/slab.h>
25
26#include <linux/platform_data/dma-ep93xx.h>
27
28#include "dmaengine.h"
29
30
31#define M2P_CONTROL 0x0000
32#define M2P_CONTROL_STALLINT BIT(0)
33#define M2P_CONTROL_NFBINT BIT(1)
34#define M2P_CONTROL_CH_ERROR_INT BIT(3)
35#define M2P_CONTROL_ENABLE BIT(4)
36#define M2P_CONTROL_ICE BIT(6)
37
38#define M2P_INTERRUPT 0x0004
39#define M2P_INTERRUPT_STALL BIT(0)
40#define M2P_INTERRUPT_NFB BIT(1)
41#define M2P_INTERRUPT_ERROR BIT(3)
42
43#define M2P_PPALLOC 0x0008
44#define M2P_STATUS 0x000c
45
46#define M2P_MAXCNT0 0x0020
47#define M2P_BASE0 0x0024
48#define M2P_MAXCNT1 0x0030
49#define M2P_BASE1 0x0034
50
51#define M2P_STATE_IDLE 0
52#define M2P_STATE_STALL 1
53#define M2P_STATE_ON 2
54#define M2P_STATE_NEXT 3
55
56
57#define M2M_CONTROL 0x0000
58#define M2M_CONTROL_DONEINT BIT(2)
59#define M2M_CONTROL_ENABLE BIT(3)
60#define M2M_CONTROL_START BIT(4)
61#define M2M_CONTROL_DAH BIT(11)
62#define M2M_CONTROL_SAH BIT(12)
63#define M2M_CONTROL_PW_SHIFT 9
64#define M2M_CONTROL_PW_8 (0 << M2M_CONTROL_PW_SHIFT)
65#define M2M_CONTROL_PW_16 (1 << M2M_CONTROL_PW_SHIFT)
66#define M2M_CONTROL_PW_32 (2 << M2M_CONTROL_PW_SHIFT)
67#define M2M_CONTROL_PW_MASK (3 << M2M_CONTROL_PW_SHIFT)
68#define M2M_CONTROL_TM_SHIFT 13
69#define M2M_CONTROL_TM_TX (1 << M2M_CONTROL_TM_SHIFT)
70#define M2M_CONTROL_TM_RX (2 << M2M_CONTROL_TM_SHIFT)
71#define M2M_CONTROL_NFBINT BIT(21)
72#define M2M_CONTROL_RSS_SHIFT 22
73#define M2M_CONTROL_RSS_SSPRX (1 << M2M_CONTROL_RSS_SHIFT)
74#define M2M_CONTROL_RSS_SSPTX (2 << M2M_CONTROL_RSS_SHIFT)
75#define M2M_CONTROL_RSS_IDE (3 << M2M_CONTROL_RSS_SHIFT)
76#define M2M_CONTROL_NO_HDSK BIT(24)
77#define M2M_CONTROL_PWSC_SHIFT 25
78
79#define M2M_INTERRUPT 0x0004
80#define M2M_INTERRUPT_MASK 6
81
82#define M2M_STATUS 0x000c
83#define M2M_STATUS_CTL_SHIFT 1
84#define M2M_STATUS_CTL_IDLE (0 << M2M_STATUS_CTL_SHIFT)
85#define M2M_STATUS_CTL_STALL (1 << M2M_STATUS_CTL_SHIFT)
86#define M2M_STATUS_CTL_MEMRD (2 << M2M_STATUS_CTL_SHIFT)
87#define M2M_STATUS_CTL_MEMWR (3 << M2M_STATUS_CTL_SHIFT)
88#define M2M_STATUS_CTL_BWCWAIT (4 << M2M_STATUS_CTL_SHIFT)
89#define M2M_STATUS_CTL_MASK (7 << M2M_STATUS_CTL_SHIFT)
90#define M2M_STATUS_BUF_SHIFT 4
91#define M2M_STATUS_BUF_NO (0 << M2M_STATUS_BUF_SHIFT)
92#define M2M_STATUS_BUF_ON (1 << M2M_STATUS_BUF_SHIFT)
93#define M2M_STATUS_BUF_NEXT (2 << M2M_STATUS_BUF_SHIFT)
94#define M2M_STATUS_BUF_MASK (3 << M2M_STATUS_BUF_SHIFT)
95#define M2M_STATUS_DONE BIT(6)
96
97#define M2M_BCR0 0x0010
98#define M2M_BCR1 0x0014
99#define M2M_SAR_BASE0 0x0018
100#define M2M_SAR_BASE1 0x001c
101#define M2M_DAR_BASE0 0x002c
102#define M2M_DAR_BASE1 0x0030
103
104#define DMA_MAX_CHAN_BYTES 0xffff
105#define DMA_MAX_CHAN_DESCRIPTORS 32
106
107struct ep93xx_dma_engine;
108static int ep93xx_dma_slave_config_write(struct dma_chan *chan,
109 enum dma_transfer_direction dir,
110 struct dma_slave_config *config);
111
112
113
114
115
116
117
118
119
120
121
122struct ep93xx_dma_desc {
123 u32 src_addr;
124 u32 dst_addr;
125 size_t size;
126 bool complete;
127 struct dma_async_tx_descriptor txd;
128 struct list_head tx_list;
129 struct list_head node;
130};
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163struct ep93xx_dma_chan {
164 struct dma_chan chan;
165 const struct ep93xx_dma_engine *edma;
166 void __iomem *regs;
167 int irq;
168 struct clk *clk;
169 struct tasklet_struct tasklet;
170
171 spinlock_t lock;
172 unsigned long flags;
173
174#define EP93XX_DMA_IS_CYCLIC 0
175
176 int buffer;
177 struct list_head active;
178 struct list_head queue;
179 struct list_head free_list;
180 u32 runtime_addr;
181 u32 runtime_ctrl;
182 struct dma_slave_config slave_config;
183};
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201struct ep93xx_dma_engine {
202 struct dma_device dma_dev;
203 bool m2m;
204 int (*hw_setup)(struct ep93xx_dma_chan *);
205 void (*hw_synchronize)(struct ep93xx_dma_chan *);
206 void (*hw_shutdown)(struct ep93xx_dma_chan *);
207 void (*hw_submit)(struct ep93xx_dma_chan *);
208 int (*hw_interrupt)(struct ep93xx_dma_chan *);
209#define INTERRUPT_UNKNOWN 0
210#define INTERRUPT_DONE 1
211#define INTERRUPT_NEXT_BUFFER 2
212
213 size_t num_channels;
214 struct ep93xx_dma_chan channels[];
215};
216
217static inline struct device *chan2dev(struct ep93xx_dma_chan *edmac)
218{
219 return &edmac->chan.dev->device;
220}
221
222static struct ep93xx_dma_chan *to_ep93xx_dma_chan(struct dma_chan *chan)
223{
224 return container_of(chan, struct ep93xx_dma_chan, chan);
225}
226
227
228
229
230
231
232
233
234
235
236
237
238static void ep93xx_dma_set_active(struct ep93xx_dma_chan *edmac,
239 struct ep93xx_dma_desc *desc)
240{
241 BUG_ON(!list_empty(&edmac->active));
242
243 list_add_tail(&desc->node, &edmac->active);
244
245
246 while (!list_empty(&desc->tx_list)) {
247 struct ep93xx_dma_desc *d = list_first_entry(&desc->tx_list,
248 struct ep93xx_dma_desc, node);
249
250
251
252
253
254
255
256 d->txd.callback = desc->txd.callback;
257 d->txd.callback_param = desc->txd.callback_param;
258
259 list_move_tail(&d->node, &edmac->active);
260 }
261}
262
263
264static struct ep93xx_dma_desc *
265ep93xx_dma_get_active(struct ep93xx_dma_chan *edmac)
266{
267 return list_first_entry_or_null(&edmac->active,
268 struct ep93xx_dma_desc, node);
269}
270
271
272
273
274
275
276
277
278
279
280
281
282
283static bool ep93xx_dma_advance_active(struct ep93xx_dma_chan *edmac)
284{
285 struct ep93xx_dma_desc *desc;
286
287 list_rotate_left(&edmac->active);
288
289 if (test_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags))
290 return true;
291
292 desc = ep93xx_dma_get_active(edmac);
293 if (!desc)
294 return false;
295
296
297
298
299
300 return !desc->txd.cookie;
301}
302
303
304
305
306
307static void m2p_set_control(struct ep93xx_dma_chan *edmac, u32 control)
308{
309 writel(control, edmac->regs + M2P_CONTROL);
310
311
312
313
314 readl(edmac->regs + M2P_CONTROL);
315}
316
317static int m2p_hw_setup(struct ep93xx_dma_chan *edmac)
318{
319 struct ep93xx_dma_data *data = edmac->chan.private;
320 u32 control;
321
322 writel(data->port & 0xf, edmac->regs + M2P_PPALLOC);
323
324 control = M2P_CONTROL_CH_ERROR_INT | M2P_CONTROL_ICE
325 | M2P_CONTROL_ENABLE;
326 m2p_set_control(edmac, control);
327
328 edmac->buffer = 0;
329
330 return 0;
331}
332
333static inline u32 m2p_channel_state(struct ep93xx_dma_chan *edmac)
334{
335 return (readl(edmac->regs + M2P_STATUS) >> 4) & 0x3;
336}
337
338static void m2p_hw_synchronize(struct ep93xx_dma_chan *edmac)
339{
340 unsigned long flags;
341 u32 control;
342
343 spin_lock_irqsave(&edmac->lock, flags);
344 control = readl(edmac->regs + M2P_CONTROL);
345 control &= ~(M2P_CONTROL_STALLINT | M2P_CONTROL_NFBINT);
346 m2p_set_control(edmac, control);
347 spin_unlock_irqrestore(&edmac->lock, flags);
348
349 while (m2p_channel_state(edmac) >= M2P_STATE_ON)
350 schedule();
351}
352
353static void m2p_hw_shutdown(struct ep93xx_dma_chan *edmac)
354{
355 m2p_set_control(edmac, 0);
356
357 while (m2p_channel_state(edmac) != M2P_STATE_IDLE)
358 dev_warn(chan2dev(edmac), "M2P: Not yet IDLE\n");
359}
360
361static void m2p_fill_desc(struct ep93xx_dma_chan *edmac)
362{
363 struct ep93xx_dma_desc *desc;
364 u32 bus_addr;
365
366 desc = ep93xx_dma_get_active(edmac);
367 if (!desc) {
368 dev_warn(chan2dev(edmac), "M2P: empty descriptor list\n");
369 return;
370 }
371
372 if (ep93xx_dma_chan_direction(&edmac->chan) == DMA_MEM_TO_DEV)
373 bus_addr = desc->src_addr;
374 else
375 bus_addr = desc->dst_addr;
376
377 if (edmac->buffer == 0) {
378 writel(desc->size, edmac->regs + M2P_MAXCNT0);
379 writel(bus_addr, edmac->regs + M2P_BASE0);
380 } else {
381 writel(desc->size, edmac->regs + M2P_MAXCNT1);
382 writel(bus_addr, edmac->regs + M2P_BASE1);
383 }
384
385 edmac->buffer ^= 1;
386}
387
388static void m2p_hw_submit(struct ep93xx_dma_chan *edmac)
389{
390 u32 control = readl(edmac->regs + M2P_CONTROL);
391
392 m2p_fill_desc(edmac);
393 control |= M2P_CONTROL_STALLINT;
394
395 if (ep93xx_dma_advance_active(edmac)) {
396 m2p_fill_desc(edmac);
397 control |= M2P_CONTROL_NFBINT;
398 }
399
400 m2p_set_control(edmac, control);
401}
402
403static int m2p_hw_interrupt(struct ep93xx_dma_chan *edmac)
404{
405 u32 irq_status = readl(edmac->regs + M2P_INTERRUPT);
406 u32 control;
407
408 if (irq_status & M2P_INTERRUPT_ERROR) {
409 struct ep93xx_dma_desc *desc = ep93xx_dma_get_active(edmac);
410
411
412 writel(1, edmac->regs + M2P_INTERRUPT);
413
414
415
416
417
418
419
420
421
422 dev_err(chan2dev(edmac),
423 "DMA transfer failed! Details:\n"
424 "\tcookie : %d\n"
425 "\tsrc_addr : 0x%08x\n"
426 "\tdst_addr : 0x%08x\n"
427 "\tsize : %zu\n",
428 desc->txd.cookie, desc->src_addr, desc->dst_addr,
429 desc->size);
430 }
431
432
433
434
435
436
437 if (!(irq_status & (M2P_INTERRUPT_STALL | M2P_INTERRUPT_NFB)))
438 return INTERRUPT_UNKNOWN;
439
440 if (ep93xx_dma_advance_active(edmac)) {
441 m2p_fill_desc(edmac);
442 return INTERRUPT_NEXT_BUFFER;
443 }
444
445
446 control = readl(edmac->regs + M2P_CONTROL);
447 control &= ~(M2P_CONTROL_STALLINT | M2P_CONTROL_NFBINT);
448 m2p_set_control(edmac, control);
449
450 return INTERRUPT_DONE;
451}
452
453
454
455
456
457static int m2m_hw_setup(struct ep93xx_dma_chan *edmac)
458{
459 const struct ep93xx_dma_data *data = edmac->chan.private;
460 u32 control = 0;
461
462 if (!data) {
463
464 writel(control, edmac->regs + M2M_CONTROL);
465 return 0;
466 }
467
468 switch (data->port) {
469 case EP93XX_DMA_SSP:
470
471
472
473
474
475 control = (5 << M2M_CONTROL_PWSC_SHIFT);
476 control |= M2M_CONTROL_NO_HDSK;
477
478 if (data->direction == DMA_MEM_TO_DEV) {
479 control |= M2M_CONTROL_DAH;
480 control |= M2M_CONTROL_TM_TX;
481 control |= M2M_CONTROL_RSS_SSPTX;
482 } else {
483 control |= M2M_CONTROL_SAH;
484 control |= M2M_CONTROL_TM_RX;
485 control |= M2M_CONTROL_RSS_SSPRX;
486 }
487 break;
488
489 case EP93XX_DMA_IDE:
490
491
492
493
494 if (data->direction == DMA_MEM_TO_DEV) {
495
496 control = (3 << M2M_CONTROL_PWSC_SHIFT);
497 control |= M2M_CONTROL_DAH;
498 control |= M2M_CONTROL_TM_TX;
499 } else {
500 control = (2 << M2M_CONTROL_PWSC_SHIFT);
501 control |= M2M_CONTROL_SAH;
502 control |= M2M_CONTROL_TM_RX;
503 }
504
505 control |= M2M_CONTROL_NO_HDSK;
506 control |= M2M_CONTROL_RSS_IDE;
507 control |= M2M_CONTROL_PW_16;
508 break;
509
510 default:
511 return -EINVAL;
512 }
513
514 writel(control, edmac->regs + M2M_CONTROL);
515 return 0;
516}
517
518static void m2m_hw_shutdown(struct ep93xx_dma_chan *edmac)
519{
520
521 writel(0, edmac->regs + M2M_CONTROL);
522}
523
524static void m2m_fill_desc(struct ep93xx_dma_chan *edmac)
525{
526 struct ep93xx_dma_desc *desc;
527
528 desc = ep93xx_dma_get_active(edmac);
529 if (!desc) {
530 dev_warn(chan2dev(edmac), "M2M: empty descriptor list\n");
531 return;
532 }
533
534 if (edmac->buffer == 0) {
535 writel(desc->src_addr, edmac->regs + M2M_SAR_BASE0);
536 writel(desc->dst_addr, edmac->regs + M2M_DAR_BASE0);
537 writel(desc->size, edmac->regs + M2M_BCR0);
538 } else {
539 writel(desc->src_addr, edmac->regs + M2M_SAR_BASE1);
540 writel(desc->dst_addr, edmac->regs + M2M_DAR_BASE1);
541 writel(desc->size, edmac->regs + M2M_BCR1);
542 }
543
544 edmac->buffer ^= 1;
545}
546
547static void m2m_hw_submit(struct ep93xx_dma_chan *edmac)
548{
549 struct ep93xx_dma_data *data = edmac->chan.private;
550 u32 control = readl(edmac->regs + M2M_CONTROL);
551
552
553
554
555
556
557 control &= ~M2M_CONTROL_PW_MASK;
558 control |= edmac->runtime_ctrl;
559
560 m2m_fill_desc(edmac);
561 control |= M2M_CONTROL_DONEINT;
562
563 if (ep93xx_dma_advance_active(edmac)) {
564 m2m_fill_desc(edmac);
565 control |= M2M_CONTROL_NFBINT;
566 }
567
568
569
570
571
572 control |= M2M_CONTROL_ENABLE;
573 writel(control, edmac->regs + M2M_CONTROL);
574
575 if (!data) {
576
577
578
579
580 control |= M2M_CONTROL_START;
581 writel(control, edmac->regs + M2M_CONTROL);
582 }
583}
584
585
586
587
588
589
590
591
592
593
594
595static int m2m_hw_interrupt(struct ep93xx_dma_chan *edmac)
596{
597 u32 status = readl(edmac->regs + M2M_STATUS);
598 u32 ctl_fsm = status & M2M_STATUS_CTL_MASK;
599 u32 buf_fsm = status & M2M_STATUS_BUF_MASK;
600 bool done = status & M2M_STATUS_DONE;
601 bool last_done;
602 u32 control;
603 struct ep93xx_dma_desc *desc;
604
605
606 if (!(readl(edmac->regs + M2M_INTERRUPT) & M2M_INTERRUPT_MASK))
607 return INTERRUPT_UNKNOWN;
608
609 if (done) {
610
611 writel(0, edmac->regs + M2M_INTERRUPT);
612 }
613
614
615
616
617
618 desc = ep93xx_dma_get_active(edmac);
619 last_done = !desc || desc->txd.cookie;
620
621
622
623
624
625
626 if (!last_done &&
627 (buf_fsm == M2M_STATUS_BUF_NO ||
628 buf_fsm == M2M_STATUS_BUF_ON)) {
629
630
631
632
633
634
635 if (ep93xx_dma_advance_active(edmac)) {
636 m2m_fill_desc(edmac);
637 if (done && !edmac->chan.private) {
638
639 control = readl(edmac->regs + M2M_CONTROL);
640 control |= M2M_CONTROL_START;
641 writel(control, edmac->regs + M2M_CONTROL);
642 }
643 return INTERRUPT_NEXT_BUFFER;
644 } else {
645 last_done = true;
646 }
647 }
648
649
650
651
652
653 if (last_done &&
654 buf_fsm == M2M_STATUS_BUF_NO &&
655 ctl_fsm == M2M_STATUS_CTL_STALL) {
656
657 control = readl(edmac->regs + M2M_CONTROL);
658 control &= ~(M2M_CONTROL_DONEINT | M2M_CONTROL_NFBINT
659 | M2M_CONTROL_ENABLE);
660 writel(control, edmac->regs + M2M_CONTROL);
661 return INTERRUPT_DONE;
662 }
663
664
665
666
667 return INTERRUPT_NEXT_BUFFER;
668}
669
670
671
672
673
674static struct ep93xx_dma_desc *
675ep93xx_dma_desc_get(struct ep93xx_dma_chan *edmac)
676{
677 struct ep93xx_dma_desc *desc, *_desc;
678 struct ep93xx_dma_desc *ret = NULL;
679 unsigned long flags;
680
681 spin_lock_irqsave(&edmac->lock, flags);
682 list_for_each_entry_safe(desc, _desc, &edmac->free_list, node) {
683 if (async_tx_test_ack(&desc->txd)) {
684 list_del_init(&desc->node);
685
686
687 desc->src_addr = 0;
688 desc->dst_addr = 0;
689 desc->size = 0;
690 desc->complete = false;
691 desc->txd.cookie = 0;
692 desc->txd.callback = NULL;
693 desc->txd.callback_param = NULL;
694
695 ret = desc;
696 break;
697 }
698 }
699 spin_unlock_irqrestore(&edmac->lock, flags);
700 return ret;
701}
702
703static void ep93xx_dma_desc_put(struct ep93xx_dma_chan *edmac,
704 struct ep93xx_dma_desc *desc)
705{
706 if (desc) {
707 unsigned long flags;
708
709 spin_lock_irqsave(&edmac->lock, flags);
710 list_splice_init(&desc->tx_list, &edmac->free_list);
711 list_add(&desc->node, &edmac->free_list);
712 spin_unlock_irqrestore(&edmac->lock, flags);
713 }
714}
715
716
717
718
719
720
721
722
723
724static void ep93xx_dma_advance_work(struct ep93xx_dma_chan *edmac)
725{
726 struct ep93xx_dma_desc *new;
727 unsigned long flags;
728
729 spin_lock_irqsave(&edmac->lock, flags);
730 if (!list_empty(&edmac->active) || list_empty(&edmac->queue)) {
731 spin_unlock_irqrestore(&edmac->lock, flags);
732 return;
733 }
734
735
736 new = list_first_entry(&edmac->queue, struct ep93xx_dma_desc, node);
737 list_del_init(&new->node);
738
739 ep93xx_dma_set_active(edmac, new);
740
741
742 edmac->edma->hw_submit(edmac);
743 spin_unlock_irqrestore(&edmac->lock, flags);
744}
745
746static void ep93xx_dma_tasklet(unsigned long data)
747{
748 struct ep93xx_dma_chan *edmac = (struct ep93xx_dma_chan *)data;
749 struct ep93xx_dma_desc *desc, *d;
750 struct dmaengine_desc_callback cb;
751 LIST_HEAD(list);
752
753 memset(&cb, 0, sizeof(cb));
754 spin_lock_irq(&edmac->lock);
755
756
757
758
759
760 desc = ep93xx_dma_get_active(edmac);
761 if (desc) {
762 if (desc->complete) {
763
764 if (!test_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags))
765 dma_cookie_complete(&desc->txd);
766 list_splice_init(&edmac->active, &list);
767 }
768 dmaengine_desc_get_callback(&desc->txd, &cb);
769 }
770 spin_unlock_irq(&edmac->lock);
771
772
773 ep93xx_dma_advance_work(edmac);
774
775
776 list_for_each_entry_safe(desc, d, &list, node) {
777 dma_descriptor_unmap(&desc->txd);
778 ep93xx_dma_desc_put(edmac, desc);
779 }
780
781 dmaengine_desc_callback_invoke(&cb, NULL);
782}
783
784static irqreturn_t ep93xx_dma_interrupt(int irq, void *dev_id)
785{
786 struct ep93xx_dma_chan *edmac = dev_id;
787 struct ep93xx_dma_desc *desc;
788 irqreturn_t ret = IRQ_HANDLED;
789
790 spin_lock(&edmac->lock);
791
792 desc = ep93xx_dma_get_active(edmac);
793 if (!desc) {
794 dev_warn(chan2dev(edmac),
795 "got interrupt while active list is empty\n");
796 spin_unlock(&edmac->lock);
797 return IRQ_NONE;
798 }
799
800 switch (edmac->edma->hw_interrupt(edmac)) {
801 case INTERRUPT_DONE:
802 desc->complete = true;
803 tasklet_schedule(&edmac->tasklet);
804 break;
805
806 case INTERRUPT_NEXT_BUFFER:
807 if (test_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags))
808 tasklet_schedule(&edmac->tasklet);
809 break;
810
811 default:
812 dev_warn(chan2dev(edmac), "unknown interrupt!\n");
813 ret = IRQ_NONE;
814 break;
815 }
816
817 spin_unlock(&edmac->lock);
818 return ret;
819}
820
821
822
823
824
825
826
827
828
829static dma_cookie_t ep93xx_dma_tx_submit(struct dma_async_tx_descriptor *tx)
830{
831 struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(tx->chan);
832 struct ep93xx_dma_desc *desc;
833 dma_cookie_t cookie;
834 unsigned long flags;
835
836 spin_lock_irqsave(&edmac->lock, flags);
837 cookie = dma_cookie_assign(tx);
838
839 desc = container_of(tx, struct ep93xx_dma_desc, txd);
840
841
842
843
844
845
846 if (list_empty(&edmac->active)) {
847 ep93xx_dma_set_active(edmac, desc);
848 edmac->edma->hw_submit(edmac);
849 } else {
850 list_add_tail(&desc->node, &edmac->queue);
851 }
852
853 spin_unlock_irqrestore(&edmac->lock, flags);
854 return cookie;
855}
856
857
858
859
860
861
862
863
864
865static int ep93xx_dma_alloc_chan_resources(struct dma_chan *chan)
866{
867 struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
868 struct ep93xx_dma_data *data = chan->private;
869 const char *name = dma_chan_name(chan);
870 int ret, i;
871
872
873 if (!edmac->edma->m2m) {
874 if (!data)
875 return -EINVAL;
876 if (data->port < EP93XX_DMA_I2S1 ||
877 data->port > EP93XX_DMA_IRDA)
878 return -EINVAL;
879 if (data->direction != ep93xx_dma_chan_direction(chan))
880 return -EINVAL;
881 } else {
882 if (data) {
883 switch (data->port) {
884 case EP93XX_DMA_SSP:
885 case EP93XX_DMA_IDE:
886 if (!is_slave_direction(data->direction))
887 return -EINVAL;
888 break;
889 default:
890 return -EINVAL;
891 }
892 }
893 }
894
895 if (data && data->name)
896 name = data->name;
897
898 ret = clk_enable(edmac->clk);
899 if (ret)
900 return ret;
901
902 ret = request_irq(edmac->irq, ep93xx_dma_interrupt, 0, name, edmac);
903 if (ret)
904 goto fail_clk_disable;
905
906 spin_lock_irq(&edmac->lock);
907 dma_cookie_init(&edmac->chan);
908 ret = edmac->edma->hw_setup(edmac);
909 spin_unlock_irq(&edmac->lock);
910
911 if (ret)
912 goto fail_free_irq;
913
914 for (i = 0; i < DMA_MAX_CHAN_DESCRIPTORS; i++) {
915 struct ep93xx_dma_desc *desc;
916
917 desc = kzalloc(sizeof(*desc), GFP_KERNEL);
918 if (!desc) {
919 dev_warn(chan2dev(edmac), "not enough descriptors\n");
920 break;
921 }
922
923 INIT_LIST_HEAD(&desc->tx_list);
924
925 dma_async_tx_descriptor_init(&desc->txd, chan);
926 desc->txd.flags = DMA_CTRL_ACK;
927 desc->txd.tx_submit = ep93xx_dma_tx_submit;
928
929 ep93xx_dma_desc_put(edmac, desc);
930 }
931
932 return i;
933
934fail_free_irq:
935 free_irq(edmac->irq, edmac);
936fail_clk_disable:
937 clk_disable(edmac->clk);
938
939 return ret;
940}
941
942
943
944
945
946
947
948
949static void ep93xx_dma_free_chan_resources(struct dma_chan *chan)
950{
951 struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
952 struct ep93xx_dma_desc *desc, *d;
953 unsigned long flags;
954 LIST_HEAD(list);
955
956 BUG_ON(!list_empty(&edmac->active));
957 BUG_ON(!list_empty(&edmac->queue));
958
959 spin_lock_irqsave(&edmac->lock, flags);
960 edmac->edma->hw_shutdown(edmac);
961 edmac->runtime_addr = 0;
962 edmac->runtime_ctrl = 0;
963 edmac->buffer = 0;
964 list_splice_init(&edmac->free_list, &list);
965 spin_unlock_irqrestore(&edmac->lock, flags);
966
967 list_for_each_entry_safe(desc, d, &list, node)
968 kfree(desc);
969
970 clk_disable(edmac->clk);
971 free_irq(edmac->irq, edmac);
972}
973
974
975
976
977
978
979
980
981
982
983
984static struct dma_async_tx_descriptor *
985ep93xx_dma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest,
986 dma_addr_t src, size_t len, unsigned long flags)
987{
988 struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
989 struct ep93xx_dma_desc *desc, *first;
990 size_t bytes, offset;
991
992 first = NULL;
993 for (offset = 0; offset < len; offset += bytes) {
994 desc = ep93xx_dma_desc_get(edmac);
995 if (!desc) {
996 dev_warn(chan2dev(edmac), "couldn't get descriptor\n");
997 goto fail;
998 }
999
1000 bytes = min_t(size_t, len - offset, DMA_MAX_CHAN_BYTES);
1001
1002 desc->src_addr = src + offset;
1003 desc->dst_addr = dest + offset;
1004 desc->size = bytes;
1005
1006 if (!first)
1007 first = desc;
1008 else
1009 list_add_tail(&desc->node, &first->tx_list);
1010 }
1011
1012 first->txd.cookie = -EBUSY;
1013 first->txd.flags = flags;
1014
1015 return &first->txd;
1016fail:
1017 ep93xx_dma_desc_put(edmac, first);
1018 return NULL;
1019}
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032static struct dma_async_tx_descriptor *
1033ep93xx_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
1034 unsigned int sg_len, enum dma_transfer_direction dir,
1035 unsigned long flags, void *context)
1036{
1037 struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
1038 struct ep93xx_dma_desc *desc, *first;
1039 struct scatterlist *sg;
1040 int i;
1041
1042 if (!edmac->edma->m2m && dir != ep93xx_dma_chan_direction(chan)) {
1043 dev_warn(chan2dev(edmac),
1044 "channel was configured with different direction\n");
1045 return NULL;
1046 }
1047
1048 if (test_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags)) {
1049 dev_warn(chan2dev(edmac),
1050 "channel is already used for cyclic transfers\n");
1051 return NULL;
1052 }
1053
1054 ep93xx_dma_slave_config_write(chan, dir, &edmac->slave_config);
1055
1056 first = NULL;
1057 for_each_sg(sgl, sg, sg_len, i) {
1058 size_t len = sg_dma_len(sg);
1059
1060 if (len > DMA_MAX_CHAN_BYTES) {
1061 dev_warn(chan2dev(edmac), "too big transfer size %zu\n",
1062 len);
1063 goto fail;
1064 }
1065
1066 desc = ep93xx_dma_desc_get(edmac);
1067 if (!desc) {
1068 dev_warn(chan2dev(edmac), "couldn't get descriptor\n");
1069 goto fail;
1070 }
1071
1072 if (dir == DMA_MEM_TO_DEV) {
1073 desc->src_addr = sg_dma_address(sg);
1074 desc->dst_addr = edmac->runtime_addr;
1075 } else {
1076 desc->src_addr = edmac->runtime_addr;
1077 desc->dst_addr = sg_dma_address(sg);
1078 }
1079 desc->size = len;
1080
1081 if (!first)
1082 first = desc;
1083 else
1084 list_add_tail(&desc->node, &first->tx_list);
1085 }
1086
1087 first->txd.cookie = -EBUSY;
1088 first->txd.flags = flags;
1089
1090 return &first->txd;
1091
1092fail:
1093 ep93xx_dma_desc_put(edmac, first);
1094 return NULL;
1095}
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114static struct dma_async_tx_descriptor *
1115ep93xx_dma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t dma_addr,
1116 size_t buf_len, size_t period_len,
1117 enum dma_transfer_direction dir, unsigned long flags)
1118{
1119 struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
1120 struct ep93xx_dma_desc *desc, *first;
1121 size_t offset = 0;
1122
1123 if (!edmac->edma->m2m && dir != ep93xx_dma_chan_direction(chan)) {
1124 dev_warn(chan2dev(edmac),
1125 "channel was configured with different direction\n");
1126 return NULL;
1127 }
1128
1129 if (test_and_set_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags)) {
1130 dev_warn(chan2dev(edmac),
1131 "channel is already used for cyclic transfers\n");
1132 return NULL;
1133 }
1134
1135 if (period_len > DMA_MAX_CHAN_BYTES) {
1136 dev_warn(chan2dev(edmac), "too big period length %zu\n",
1137 period_len);
1138 return NULL;
1139 }
1140
1141 ep93xx_dma_slave_config_write(chan, dir, &edmac->slave_config);
1142
1143
1144 first = NULL;
1145 for (offset = 0; offset < buf_len; offset += period_len) {
1146 desc = ep93xx_dma_desc_get(edmac);
1147 if (!desc) {
1148 dev_warn(chan2dev(edmac), "couldn't get descriptor\n");
1149 goto fail;
1150 }
1151
1152 if (dir == DMA_MEM_TO_DEV) {
1153 desc->src_addr = dma_addr + offset;
1154 desc->dst_addr = edmac->runtime_addr;
1155 } else {
1156 desc->src_addr = edmac->runtime_addr;
1157 desc->dst_addr = dma_addr + offset;
1158 }
1159
1160 desc->size = period_len;
1161
1162 if (!first)
1163 first = desc;
1164 else
1165 list_add_tail(&desc->node, &first->tx_list);
1166 }
1167
1168 first->txd.cookie = -EBUSY;
1169
1170 return &first->txd;
1171
1172fail:
1173 ep93xx_dma_desc_put(edmac, first);
1174 return NULL;
1175}
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189static void ep93xx_dma_synchronize(struct dma_chan *chan)
1190{
1191 struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
1192
1193 if (edmac->edma->hw_synchronize)
1194 edmac->edma->hw_synchronize(edmac);
1195}
1196
1197
1198
1199
1200
1201
1202
1203
1204static int ep93xx_dma_terminate_all(struct dma_chan *chan)
1205{
1206 struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
1207 struct ep93xx_dma_desc *desc, *_d;
1208 unsigned long flags;
1209 LIST_HEAD(list);
1210
1211 spin_lock_irqsave(&edmac->lock, flags);
1212
1213 edmac->edma->hw_shutdown(edmac);
1214 clear_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags);
1215 list_splice_init(&edmac->active, &list);
1216 list_splice_init(&edmac->queue, &list);
1217
1218
1219
1220
1221 edmac->edma->hw_setup(edmac);
1222 spin_unlock_irqrestore(&edmac->lock, flags);
1223
1224 list_for_each_entry_safe(desc, _d, &list, node)
1225 ep93xx_dma_desc_put(edmac, desc);
1226
1227 return 0;
1228}
1229
1230static int ep93xx_dma_slave_config(struct dma_chan *chan,
1231 struct dma_slave_config *config)
1232{
1233 struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
1234
1235 memcpy(&edmac->slave_config, config, sizeof(*config));
1236
1237 return 0;
1238}
1239
1240static int ep93xx_dma_slave_config_write(struct dma_chan *chan,
1241 enum dma_transfer_direction dir,
1242 struct dma_slave_config *config)
1243{
1244 struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
1245 enum dma_slave_buswidth width;
1246 unsigned long flags;
1247 u32 addr, ctrl;
1248
1249 if (!edmac->edma->m2m)
1250 return -EINVAL;
1251
1252 switch (dir) {
1253 case DMA_DEV_TO_MEM:
1254 width = config->src_addr_width;
1255 addr = config->src_addr;
1256 break;
1257
1258 case DMA_MEM_TO_DEV:
1259 width = config->dst_addr_width;
1260 addr = config->dst_addr;
1261 break;
1262
1263 default:
1264 return -EINVAL;
1265 }
1266
1267 switch (width) {
1268 case DMA_SLAVE_BUSWIDTH_1_BYTE:
1269 ctrl = 0;
1270 break;
1271 case DMA_SLAVE_BUSWIDTH_2_BYTES:
1272 ctrl = M2M_CONTROL_PW_16;
1273 break;
1274 case DMA_SLAVE_BUSWIDTH_4_BYTES:
1275 ctrl = M2M_CONTROL_PW_32;
1276 break;
1277 default:
1278 return -EINVAL;
1279 }
1280
1281 spin_lock_irqsave(&edmac->lock, flags);
1282 edmac->runtime_addr = addr;
1283 edmac->runtime_ctrl = ctrl;
1284 spin_unlock_irqrestore(&edmac->lock, flags);
1285
1286 return 0;
1287}
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297static enum dma_status ep93xx_dma_tx_status(struct dma_chan *chan,
1298 dma_cookie_t cookie,
1299 struct dma_tx_state *state)
1300{
1301 return dma_cookie_status(chan, cookie, state);
1302}
1303
1304
1305
1306
1307
1308
1309
1310
1311static void ep93xx_dma_issue_pending(struct dma_chan *chan)
1312{
1313 ep93xx_dma_advance_work(to_ep93xx_dma_chan(chan));
1314}
1315
1316static int __init ep93xx_dma_probe(struct platform_device *pdev)
1317{
1318 struct ep93xx_dma_platform_data *pdata = dev_get_platdata(&pdev->dev);
1319 struct ep93xx_dma_engine *edma;
1320 struct dma_device *dma_dev;
1321 size_t edma_size;
1322 int ret, i;
1323
1324 edma_size = pdata->num_channels * sizeof(struct ep93xx_dma_chan);
1325 edma = kzalloc(sizeof(*edma) + edma_size, GFP_KERNEL);
1326 if (!edma)
1327 return -ENOMEM;
1328
1329 dma_dev = &edma->dma_dev;
1330 edma->m2m = platform_get_device_id(pdev)->driver_data;
1331 edma->num_channels = pdata->num_channels;
1332
1333 INIT_LIST_HEAD(&dma_dev->channels);
1334 for (i = 0; i < pdata->num_channels; i++) {
1335 const struct ep93xx_dma_chan_data *cdata = &pdata->channels[i];
1336 struct ep93xx_dma_chan *edmac = &edma->channels[i];
1337
1338 edmac->chan.device = dma_dev;
1339 edmac->regs = cdata->base;
1340 edmac->irq = cdata->irq;
1341 edmac->edma = edma;
1342
1343 edmac->clk = clk_get(NULL, cdata->name);
1344 if (IS_ERR(edmac->clk)) {
1345 dev_warn(&pdev->dev, "failed to get clock for %s\n",
1346 cdata->name);
1347 continue;
1348 }
1349
1350 spin_lock_init(&edmac->lock);
1351 INIT_LIST_HEAD(&edmac->active);
1352 INIT_LIST_HEAD(&edmac->queue);
1353 INIT_LIST_HEAD(&edmac->free_list);
1354 tasklet_init(&edmac->tasklet, ep93xx_dma_tasklet,
1355 (unsigned long)edmac);
1356
1357 list_add_tail(&edmac->chan.device_node,
1358 &dma_dev->channels);
1359 }
1360
1361 dma_cap_zero(dma_dev->cap_mask);
1362 dma_cap_set(DMA_SLAVE, dma_dev->cap_mask);
1363 dma_cap_set(DMA_CYCLIC, dma_dev->cap_mask);
1364
1365 dma_dev->dev = &pdev->dev;
1366 dma_dev->device_alloc_chan_resources = ep93xx_dma_alloc_chan_resources;
1367 dma_dev->device_free_chan_resources = ep93xx_dma_free_chan_resources;
1368 dma_dev->device_prep_slave_sg = ep93xx_dma_prep_slave_sg;
1369 dma_dev->device_prep_dma_cyclic = ep93xx_dma_prep_dma_cyclic;
1370 dma_dev->device_config = ep93xx_dma_slave_config;
1371 dma_dev->device_synchronize = ep93xx_dma_synchronize;
1372 dma_dev->device_terminate_all = ep93xx_dma_terminate_all;
1373 dma_dev->device_issue_pending = ep93xx_dma_issue_pending;
1374 dma_dev->device_tx_status = ep93xx_dma_tx_status;
1375
1376 dma_set_max_seg_size(dma_dev->dev, DMA_MAX_CHAN_BYTES);
1377
1378 if (edma->m2m) {
1379 dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask);
1380 dma_dev->device_prep_dma_memcpy = ep93xx_dma_prep_dma_memcpy;
1381
1382 edma->hw_setup = m2m_hw_setup;
1383 edma->hw_shutdown = m2m_hw_shutdown;
1384 edma->hw_submit = m2m_hw_submit;
1385 edma->hw_interrupt = m2m_hw_interrupt;
1386 } else {
1387 dma_cap_set(DMA_PRIVATE, dma_dev->cap_mask);
1388
1389 edma->hw_synchronize = m2p_hw_synchronize;
1390 edma->hw_setup = m2p_hw_setup;
1391 edma->hw_shutdown = m2p_hw_shutdown;
1392 edma->hw_submit = m2p_hw_submit;
1393 edma->hw_interrupt = m2p_hw_interrupt;
1394 }
1395
1396 ret = dma_async_device_register(dma_dev);
1397 if (unlikely(ret)) {
1398 for (i = 0; i < edma->num_channels; i++) {
1399 struct ep93xx_dma_chan *edmac = &edma->channels[i];
1400 if (!IS_ERR_OR_NULL(edmac->clk))
1401 clk_put(edmac->clk);
1402 }
1403 kfree(edma);
1404 } else {
1405 dev_info(dma_dev->dev, "EP93xx M2%s DMA ready\n",
1406 edma->m2m ? "M" : "P");
1407 }
1408
1409 return ret;
1410}
1411
1412static const struct platform_device_id ep93xx_dma_driver_ids[] = {
1413 { "ep93xx-dma-m2p", 0 },
1414 { "ep93xx-dma-m2m", 1 },
1415 { },
1416};
1417
1418static struct platform_driver ep93xx_dma_driver = {
1419 .driver = {
1420 .name = "ep93xx-dma",
1421 },
1422 .id_table = ep93xx_dma_driver_ids,
1423};
1424
1425static int __init ep93xx_dma_module_init(void)
1426{
1427 return platform_driver_probe(&ep93xx_dma_driver, ep93xx_dma_probe);
1428}
1429subsys_initcall(ep93xx_dma_module_init);
1430
1431MODULE_AUTHOR("Mika Westerberg <mika.westerberg@iki.fi>");
1432MODULE_DESCRIPTION("EP93xx DMA driver");
1433MODULE_LICENSE("GPL");
1434