1
2
3
4
5
6
7
8
9
10
11
12#include <dt-bindings/dma/at91.h>
13#include <linux/clk.h>
14#include <linux/dmaengine.h>
15#include <linux/dma-mapping.h>
16#include <linux/dmapool.h>
17#include <linux/interrupt.h>
18#include <linux/module.h>
19#include <linux/platform_device.h>
20#include <linux/slab.h>
21#include <linux/of.h>
22#include <linux/of_device.h>
23#include <linux/of_dma.h>
24
25#include "at_hdmac_regs.h"
26#include "dmaengine.h"
27
28
29
30
31
32
33
34
35
36
37#define ATC_DEFAULT_CFG (ATC_FIFOCFG_HALFFIFO)
38#define ATC_DEFAULT_CTRLB (ATC_SIF(AT_DMA_MEM_IF) \
39 |ATC_DIF(AT_DMA_MEM_IF))
40#define ATC_DMA_BUSWIDTHS\
41 (BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED) |\
42 BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |\
43 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |\
44 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES))
45
46#define ATC_MAX_DSCR_TRIALS 10
47
48
49
50
51
52static unsigned int init_nr_desc_per_channel = 64;
53module_param(init_nr_desc_per_channel, uint, 0644);
54MODULE_PARM_DESC(init_nr_desc_per_channel,
55 "initial descriptors per channel (default: 64)");
56
57
58
59
60
61
62struct at_dma_platform_data {
63 unsigned int nr_channels;
64 dma_cap_mask_t cap_mask;
65};
66
67
68
69
70
71
72struct at_dma_slave {
73 struct device *dma_dev;
74 u32 cfg;
75};
76
77
78static dma_cookie_t atc_tx_submit(struct dma_async_tx_descriptor *tx);
79static void atc_issue_pending(struct dma_chan *chan);
80
81
82
83
84static inline unsigned int atc_get_xfer_width(dma_addr_t src, dma_addr_t dst,
85 size_t len)
86{
87 unsigned int width;
88
89 if (!((src | dst | len) & 3))
90 width = 2;
91 else if (!((src | dst | len) & 1))
92 width = 1;
93 else
94 width = 0;
95
96 return width;
97}
98
99static struct at_desc *atc_first_active(struct at_dma_chan *atchan)
100{
101 return list_first_entry(&atchan->active_list,
102 struct at_desc, desc_node);
103}
104
105static struct at_desc *atc_first_queued(struct at_dma_chan *atchan)
106{
107 return list_first_entry(&atchan->queue,
108 struct at_desc, desc_node);
109}
110
111
112
113
114
115
116
117
118
119
120
121static struct at_desc *atc_alloc_descriptor(struct dma_chan *chan,
122 gfp_t gfp_flags)
123{
124 struct at_desc *desc = NULL;
125 struct at_dma *atdma = to_at_dma(chan->device);
126 dma_addr_t phys;
127
128 desc = dma_pool_zalloc(atdma->dma_desc_pool, gfp_flags, &phys);
129 if (desc) {
130 INIT_LIST_HEAD(&desc->tx_list);
131 dma_async_tx_descriptor_init(&desc->txd, chan);
132
133 desc->txd.flags = DMA_CTRL_ACK;
134 desc->txd.tx_submit = atc_tx_submit;
135 desc->txd.phys = phys;
136 }
137
138 return desc;
139}
140
141
142
143
144
145static struct at_desc *atc_desc_get(struct at_dma_chan *atchan)
146{
147 struct at_desc *desc, *_desc;
148 struct at_desc *ret = NULL;
149 unsigned long flags;
150 unsigned int i = 0;
151
152 spin_lock_irqsave(&atchan->lock, flags);
153 list_for_each_entry_safe(desc, _desc, &atchan->free_list, desc_node) {
154 i++;
155 if (async_tx_test_ack(&desc->txd)) {
156 list_del(&desc->desc_node);
157 ret = desc;
158 break;
159 }
160 dev_dbg(chan2dev(&atchan->chan_common),
161 "desc %p not ACKed\n", desc);
162 }
163 spin_unlock_irqrestore(&atchan->lock, flags);
164 dev_vdbg(chan2dev(&atchan->chan_common),
165 "scanned %u descriptors on freelist\n", i);
166
167
168 if (!ret)
169 ret = atc_alloc_descriptor(&atchan->chan_common, GFP_NOWAIT);
170
171 return ret;
172}
173
174
175
176
177
178
179static void atc_desc_put(struct at_dma_chan *atchan, struct at_desc *desc)
180{
181 if (desc) {
182 struct at_desc *child;
183 unsigned long flags;
184
185 spin_lock_irqsave(&atchan->lock, flags);
186 list_for_each_entry(child, &desc->tx_list, desc_node)
187 dev_vdbg(chan2dev(&atchan->chan_common),
188 "moving child desc %p to freelist\n",
189 child);
190 list_splice_init(&desc->tx_list, &atchan->free_list);
191 dev_vdbg(chan2dev(&atchan->chan_common),
192 "moving desc %p to freelist\n", desc);
193 list_add(&desc->desc_node, &atchan->free_list);
194 spin_unlock_irqrestore(&atchan->lock, flags);
195 }
196}
197
198
199
200
201
202
203
204
205
206static void atc_desc_chain(struct at_desc **first, struct at_desc **prev,
207 struct at_desc *desc)
208{
209 if (!(*first)) {
210 *first = desc;
211 } else {
212
213 (*prev)->lli.dscr = desc->txd.phys;
214
215 list_add_tail(&desc->desc_node,
216 &(*first)->tx_list);
217 }
218 *prev = desc;
219}
220
221
222
223
224
225
226
227
228static void atc_dostart(struct at_dma_chan *atchan, struct at_desc *first)
229{
230 struct at_dma *atdma = to_at_dma(atchan->chan_common.device);
231
232
233 if (atc_chan_is_enabled(atchan)) {
234 dev_err(chan2dev(&atchan->chan_common),
235 "BUG: Attempted to start non-idle channel\n");
236 dev_err(chan2dev(&atchan->chan_common),
237 " channel: s0x%x d0x%x ctrl0x%x:0x%x l0x%x\n",
238 channel_readl(atchan, SADDR),
239 channel_readl(atchan, DADDR),
240 channel_readl(atchan, CTRLA),
241 channel_readl(atchan, CTRLB),
242 channel_readl(atchan, DSCR));
243
244
245 return;
246 }
247
248 vdbg_dump_regs(atchan);
249
250 channel_writel(atchan, SADDR, 0);
251 channel_writel(atchan, DADDR, 0);
252 channel_writel(atchan, CTRLA, 0);
253 channel_writel(atchan, CTRLB, 0);
254 channel_writel(atchan, DSCR, first->txd.phys);
255 channel_writel(atchan, SPIP, ATC_SPIP_HOLE(first->src_hole) |
256 ATC_SPIP_BOUNDARY(first->boundary));
257 channel_writel(atchan, DPIP, ATC_DPIP_HOLE(first->dst_hole) |
258 ATC_DPIP_BOUNDARY(first->boundary));
259 dma_writel(atdma, CHER, atchan->mask);
260
261 vdbg_dump_regs(atchan);
262}
263
264
265
266
267
268
269static struct at_desc *atc_get_desc_by_cookie(struct at_dma_chan *atchan,
270 dma_cookie_t cookie)
271{
272 struct at_desc *desc, *_desc;
273
274 list_for_each_entry_safe(desc, _desc, &atchan->queue, desc_node) {
275 if (desc->txd.cookie == cookie)
276 return desc;
277 }
278
279 list_for_each_entry_safe(desc, _desc, &atchan->active_list, desc_node) {
280 if (desc->txd.cookie == cookie)
281 return desc;
282 }
283
284 return NULL;
285}
286
287
288
289
290
291
292
293
294static inline int atc_calc_bytes_left(int current_len, u32 ctrla)
295{
296 u32 btsize = (ctrla & ATC_BTSIZE_MAX);
297 u32 src_width = ATC_REG_TO_SRC_WIDTH(ctrla);
298
299
300
301
302
303
304
305 return current_len - (btsize << src_width);
306}
307
308
309
310
311
312
313static int atc_get_bytes_left(struct dma_chan *chan, dma_cookie_t cookie)
314{
315 struct at_dma_chan *atchan = to_at_dma_chan(chan);
316 struct at_desc *desc_first = atc_first_active(atchan);
317 struct at_desc *desc;
318 int ret;
319 u32 ctrla, dscr, trials;
320
321
322
323
324
325
326 desc = atc_get_desc_by_cookie(atchan, cookie);
327 if (desc == NULL)
328 return -EINVAL;
329 else if (desc != desc_first)
330 return desc->total_len;
331
332
333 ret = desc_first->total_len;
334
335 if (desc_first->lli.dscr) {
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386 dscr = channel_readl(atchan, DSCR);
387 rmb();
388 ctrla = channel_readl(atchan, CTRLA);
389 for (trials = 0; trials < ATC_MAX_DSCR_TRIALS; ++trials) {
390 u32 new_dscr;
391
392 rmb();
393 new_dscr = channel_readl(atchan, DSCR);
394
395
396
397
398
399
400
401 if (likely(new_dscr == dscr))
402 break;
403
404
405
406
407
408
409
410
411 dscr = new_dscr;
412 rmb();
413 ctrla = channel_readl(atchan, CTRLA);
414 }
415 if (unlikely(trials >= ATC_MAX_DSCR_TRIALS))
416 return -ETIMEDOUT;
417
418
419 if (desc_first->lli.dscr == dscr)
420 return atc_calc_bytes_left(ret, ctrla);
421
422 ret -= desc_first->len;
423 list_for_each_entry(desc, &desc_first->tx_list, desc_node) {
424 if (desc->lli.dscr == dscr)
425 break;
426
427 ret -= desc->len;
428 }
429
430
431
432
433
434 ret = atc_calc_bytes_left(ret, ctrla);
435 } else {
436
437 ctrla = channel_readl(atchan, CTRLA);
438 ret = atc_calc_bytes_left(ret, ctrla);
439 }
440
441 return ret;
442}
443
444
445
446
447
448
449static void
450atc_chain_complete(struct at_dma_chan *atchan, struct at_desc *desc)
451{
452 struct dma_async_tx_descriptor *txd = &desc->txd;
453 struct at_dma *atdma = to_at_dma(atchan->chan_common.device);
454 unsigned long flags;
455
456 dev_vdbg(chan2dev(&atchan->chan_common),
457 "descriptor %u complete\n", txd->cookie);
458
459 spin_lock_irqsave(&atchan->lock, flags);
460
461
462 if (!atc_chan_is_cyclic(atchan))
463 dma_cookie_complete(txd);
464
465
466 if (desc->memset_buffer) {
467 dma_pool_free(atdma->memset_pool, desc->memset_vaddr,
468 desc->memset_paddr);
469 desc->memset_buffer = false;
470 }
471
472
473 list_splice_init(&desc->tx_list, &atchan->free_list);
474
475 list_move(&desc->desc_node, &atchan->free_list);
476
477 spin_unlock_irqrestore(&atchan->lock, flags);
478
479 dma_descriptor_unmap(txd);
480
481
482 if (!atc_chan_is_cyclic(atchan))
483 dmaengine_desc_get_callback_invoke(txd, NULL);
484
485 dma_run_dependencies(txd);
486}
487
488
489
490
491
492
493
494
495
496
497static void atc_complete_all(struct at_dma_chan *atchan)
498{
499 struct at_desc *desc, *_desc;
500 LIST_HEAD(list);
501 unsigned long flags;
502
503 dev_vdbg(chan2dev(&atchan->chan_common), "complete all\n");
504
505 spin_lock_irqsave(&atchan->lock, flags);
506
507
508
509
510
511 if (!list_empty(&atchan->queue))
512 atc_dostart(atchan, atc_first_queued(atchan));
513
514 list_splice_init(&atchan->active_list, &list);
515
516 list_splice_init(&atchan->queue, &atchan->active_list);
517
518 spin_unlock_irqrestore(&atchan->lock, flags);
519
520 list_for_each_entry_safe(desc, _desc, &list, desc_node)
521 atc_chain_complete(atchan, desc);
522}
523
524
525
526
527
528static void atc_advance_work(struct at_dma_chan *atchan)
529{
530 unsigned long flags;
531 int ret;
532
533 dev_vdbg(chan2dev(&atchan->chan_common), "advance_work\n");
534
535 spin_lock_irqsave(&atchan->lock, flags);
536 ret = atc_chan_is_enabled(atchan);
537 spin_unlock_irqrestore(&atchan->lock, flags);
538 if (ret)
539 return;
540
541 if (list_empty(&atchan->active_list) ||
542 list_is_singular(&atchan->active_list))
543 return atc_complete_all(atchan);
544
545 atc_chain_complete(atchan, atc_first_active(atchan));
546
547
548 spin_lock_irqsave(&atchan->lock, flags);
549 atc_dostart(atchan, atc_first_active(atchan));
550 spin_unlock_irqrestore(&atchan->lock, flags);
551}
552
553
554
555
556
557
558static void atc_handle_error(struct at_dma_chan *atchan)
559{
560 struct at_desc *bad_desc;
561 struct at_desc *child;
562 unsigned long flags;
563
564 spin_lock_irqsave(&atchan->lock, flags);
565
566
567
568
569
570 bad_desc = atc_first_active(atchan);
571 list_del_init(&bad_desc->desc_node);
572
573
574
575 list_splice_init(&atchan->queue, atchan->active_list.prev);
576
577
578 if (!list_empty(&atchan->active_list))
579 atc_dostart(atchan, atc_first_active(atchan));
580
581
582
583
584
585
586
587
588 dev_crit(chan2dev(&atchan->chan_common),
589 "Bad descriptor submitted for DMA!\n");
590 dev_crit(chan2dev(&atchan->chan_common),
591 " cookie: %d\n", bad_desc->txd.cookie);
592 atc_dump_lli(atchan, &bad_desc->lli);
593 list_for_each_entry(child, &bad_desc->tx_list, desc_node)
594 atc_dump_lli(atchan, &child->lli);
595
596 spin_unlock_irqrestore(&atchan->lock, flags);
597
598
599 atc_chain_complete(atchan, bad_desc);
600}
601
602
603
604
605
606static void atc_handle_cyclic(struct at_dma_chan *atchan)
607{
608 struct at_desc *first = atc_first_active(atchan);
609 struct dma_async_tx_descriptor *txd = &first->txd;
610
611 dev_vdbg(chan2dev(&atchan->chan_common),
612 "new cyclic period llp 0x%08x\n",
613 channel_readl(atchan, DSCR));
614
615 dmaengine_desc_get_callback_invoke(txd, NULL);
616}
617
618
619
620static void atc_tasklet(struct tasklet_struct *t)
621{
622 struct at_dma_chan *atchan = from_tasklet(atchan, t, tasklet);
623
624 if (test_and_clear_bit(ATC_IS_ERROR, &atchan->status))
625 return atc_handle_error(atchan);
626
627 if (atc_chan_is_cyclic(atchan))
628 return atc_handle_cyclic(atchan);
629
630 atc_advance_work(atchan);
631}
632
633static irqreturn_t at_dma_interrupt(int irq, void *dev_id)
634{
635 struct at_dma *atdma = (struct at_dma *)dev_id;
636 struct at_dma_chan *atchan;
637 int i;
638 u32 status, pending, imr;
639 int ret = IRQ_NONE;
640
641 do {
642 imr = dma_readl(atdma, EBCIMR);
643 status = dma_readl(atdma, EBCISR);
644 pending = status & imr;
645
646 if (!pending)
647 break;
648
649 dev_vdbg(atdma->dma_common.dev,
650 "interrupt: status = 0x%08x, 0x%08x, 0x%08x\n",
651 status, imr, pending);
652
653 for (i = 0; i < atdma->dma_common.chancnt; i++) {
654 atchan = &atdma->chan[i];
655 if (pending & (AT_DMA_BTC(i) | AT_DMA_ERR(i))) {
656 if (pending & AT_DMA_ERR(i)) {
657
658 dma_writel(atdma, CHDR,
659 AT_DMA_RES(i) | atchan->mask);
660
661 set_bit(ATC_IS_ERROR, &atchan->status);
662 }
663 tasklet_schedule(&atchan->tasklet);
664 ret = IRQ_HANDLED;
665 }
666 }
667
668 } while (pending);
669
670 return ret;
671}
672
673
674
675
676
677
678
679
680
681
682
683
684static dma_cookie_t atc_tx_submit(struct dma_async_tx_descriptor *tx)
685{
686 struct at_desc *desc = txd_to_at_desc(tx);
687 struct at_dma_chan *atchan = to_at_dma_chan(tx->chan);
688 dma_cookie_t cookie;
689 unsigned long flags;
690
691 spin_lock_irqsave(&atchan->lock, flags);
692 cookie = dma_cookie_assign(tx);
693
694 if (list_empty(&atchan->active_list)) {
695 dev_vdbg(chan2dev(tx->chan), "tx_submit: started %u\n",
696 desc->txd.cookie);
697 atc_dostart(atchan, desc);
698 list_add_tail(&desc->desc_node, &atchan->active_list);
699 } else {
700 dev_vdbg(chan2dev(tx->chan), "tx_submit: queued %u\n",
701 desc->txd.cookie);
702 list_add_tail(&desc->desc_node, &atchan->queue);
703 }
704
705 spin_unlock_irqrestore(&atchan->lock, flags);
706
707 return cookie;
708}
709
710
711
712
713
714
715
716static struct dma_async_tx_descriptor *
717atc_prep_dma_interleaved(struct dma_chan *chan,
718 struct dma_interleaved_template *xt,
719 unsigned long flags)
720{
721 struct at_dma_chan *atchan = to_at_dma_chan(chan);
722 struct data_chunk *first;
723 struct at_desc *desc = NULL;
724 size_t xfer_count;
725 unsigned int dwidth;
726 u32 ctrla;
727 u32 ctrlb;
728 size_t len = 0;
729 int i;
730
731 if (unlikely(!xt || xt->numf != 1 || !xt->frame_size))
732 return NULL;
733
734 first = xt->sgl;
735
736 dev_info(chan2dev(chan),
737 "%s: src=%pad, dest=%pad, numf=%d, frame_size=%d, flags=0x%lx\n",
738 __func__, &xt->src_start, &xt->dst_start, xt->numf,
739 xt->frame_size, flags);
740
741
742
743
744
745
746
747 for (i = 0; i < xt->frame_size; i++) {
748 struct data_chunk *chunk = xt->sgl + i;
749
750 if ((chunk->size != xt->sgl->size) ||
751 (dmaengine_get_dst_icg(xt, chunk) != dmaengine_get_dst_icg(xt, first)) ||
752 (dmaengine_get_src_icg(xt, chunk) != dmaengine_get_src_icg(xt, first))) {
753 dev_err(chan2dev(chan),
754 "%s: the controller can transfer only identical chunks\n",
755 __func__);
756 return NULL;
757 }
758
759 len += chunk->size;
760 }
761
762 dwidth = atc_get_xfer_width(xt->src_start,
763 xt->dst_start, len);
764
765 xfer_count = len >> dwidth;
766 if (xfer_count > ATC_BTSIZE_MAX) {
767 dev_err(chan2dev(chan), "%s: buffer is too big\n", __func__);
768 return NULL;
769 }
770
771 ctrla = ATC_SRC_WIDTH(dwidth) |
772 ATC_DST_WIDTH(dwidth);
773
774 ctrlb = ATC_DEFAULT_CTRLB | ATC_IEN
775 | ATC_SRC_ADDR_MODE_INCR
776 | ATC_DST_ADDR_MODE_INCR
777 | ATC_SRC_PIP
778 | ATC_DST_PIP
779 | ATC_FC_MEM2MEM;
780
781
782 desc = atc_desc_get(atchan);
783 if (!desc) {
784 dev_err(chan2dev(chan),
785 "%s: couldn't allocate our descriptor\n", __func__);
786 return NULL;
787 }
788
789 desc->lli.saddr = xt->src_start;
790 desc->lli.daddr = xt->dst_start;
791 desc->lli.ctrla = ctrla | xfer_count;
792 desc->lli.ctrlb = ctrlb;
793
794 desc->boundary = first->size >> dwidth;
795 desc->dst_hole = (dmaengine_get_dst_icg(xt, first) >> dwidth) + 1;
796 desc->src_hole = (dmaengine_get_src_icg(xt, first) >> dwidth) + 1;
797
798 desc->txd.cookie = -EBUSY;
799 desc->total_len = desc->len = len;
800
801
802 set_desc_eol(desc);
803
804 desc->txd.flags = flags;
805
806 return &desc->txd;
807}
808
809
810
811
812
813
814
815
816
817static struct dma_async_tx_descriptor *
818atc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
819 size_t len, unsigned long flags)
820{
821 struct at_dma_chan *atchan = to_at_dma_chan(chan);
822 struct at_desc *desc = NULL;
823 struct at_desc *first = NULL;
824 struct at_desc *prev = NULL;
825 size_t xfer_count;
826 size_t offset;
827 unsigned int src_width;
828 unsigned int dst_width;
829 u32 ctrla;
830 u32 ctrlb;
831
832 dev_vdbg(chan2dev(chan), "prep_dma_memcpy: d%pad s%pad l0x%zx f0x%lx\n",
833 &dest, &src, len, flags);
834
835 if (unlikely(!len)) {
836 dev_dbg(chan2dev(chan), "prep_dma_memcpy: length is zero!\n");
837 return NULL;
838 }
839
840 ctrlb = ATC_DEFAULT_CTRLB | ATC_IEN
841 | ATC_SRC_ADDR_MODE_INCR
842 | ATC_DST_ADDR_MODE_INCR
843 | ATC_FC_MEM2MEM;
844
845
846
847
848
849 src_width = dst_width = atc_get_xfer_width(src, dest, len);
850
851 ctrla = ATC_SRC_WIDTH(src_width) |
852 ATC_DST_WIDTH(dst_width);
853
854 for (offset = 0; offset < len; offset += xfer_count << src_width) {
855 xfer_count = min_t(size_t, (len - offset) >> src_width,
856 ATC_BTSIZE_MAX);
857
858 desc = atc_desc_get(atchan);
859 if (!desc)
860 goto err_desc_get;
861
862 desc->lli.saddr = src + offset;
863 desc->lli.daddr = dest + offset;
864 desc->lli.ctrla = ctrla | xfer_count;
865 desc->lli.ctrlb = ctrlb;
866
867 desc->txd.cookie = 0;
868 desc->len = xfer_count << src_width;
869
870 atc_desc_chain(&first, &prev, desc);
871 }
872
873
874 first->txd.cookie = -EBUSY;
875 first->total_len = len;
876
877
878 set_desc_eol(desc);
879
880 first->txd.flags = flags;
881
882 return &first->txd;
883
884err_desc_get:
885 atc_desc_put(atchan, first);
886 return NULL;
887}
888
889static struct at_desc *atc_create_memset_desc(struct dma_chan *chan,
890 dma_addr_t psrc,
891 dma_addr_t pdst,
892 size_t len)
893{
894 struct at_dma_chan *atchan = to_at_dma_chan(chan);
895 struct at_desc *desc;
896 size_t xfer_count;
897
898 u32 ctrla = ATC_SRC_WIDTH(2) | ATC_DST_WIDTH(2);
899 u32 ctrlb = ATC_DEFAULT_CTRLB | ATC_IEN |
900 ATC_SRC_ADDR_MODE_FIXED |
901 ATC_DST_ADDR_MODE_INCR |
902 ATC_FC_MEM2MEM;
903
904 xfer_count = len >> 2;
905 if (xfer_count > ATC_BTSIZE_MAX) {
906 dev_err(chan2dev(chan), "%s: buffer is too big\n",
907 __func__);
908 return NULL;
909 }
910
911 desc = atc_desc_get(atchan);
912 if (!desc) {
913 dev_err(chan2dev(chan), "%s: can't get a descriptor\n",
914 __func__);
915 return NULL;
916 }
917
918 desc->lli.saddr = psrc;
919 desc->lli.daddr = pdst;
920 desc->lli.ctrla = ctrla | xfer_count;
921 desc->lli.ctrlb = ctrlb;
922
923 desc->txd.cookie = 0;
924 desc->len = len;
925
926 return desc;
927}
928
929
930
931
932
933
934
935
936
937static struct dma_async_tx_descriptor *
938atc_prep_dma_memset(struct dma_chan *chan, dma_addr_t dest, int value,
939 size_t len, unsigned long flags)
940{
941 struct at_dma *atdma = to_at_dma(chan->device);
942 struct at_desc *desc;
943 void __iomem *vaddr;
944 dma_addr_t paddr;
945
946 dev_vdbg(chan2dev(chan), "%s: d%pad v0x%x l0x%zx f0x%lx\n", __func__,
947 &dest, value, len, flags);
948
949 if (unlikely(!len)) {
950 dev_dbg(chan2dev(chan), "%s: length is zero!\n", __func__);
951 return NULL;
952 }
953
954 if (!is_dma_fill_aligned(chan->device, dest, 0, len)) {
955 dev_dbg(chan2dev(chan), "%s: buffer is not aligned\n",
956 __func__);
957 return NULL;
958 }
959
960 vaddr = dma_pool_alloc(atdma->memset_pool, GFP_NOWAIT, &paddr);
961 if (!vaddr) {
962 dev_err(chan2dev(chan), "%s: couldn't allocate buffer\n",
963 __func__);
964 return NULL;
965 }
966 *(u32*)vaddr = value;
967
968 desc = atc_create_memset_desc(chan, paddr, dest, len);
969 if (!desc) {
970 dev_err(chan2dev(chan), "%s: couldn't get a descriptor\n",
971 __func__);
972 goto err_free_buffer;
973 }
974
975 desc->memset_paddr = paddr;
976 desc->memset_vaddr = vaddr;
977 desc->memset_buffer = true;
978
979 desc->txd.cookie = -EBUSY;
980 desc->total_len = len;
981
982
983 set_desc_eol(desc);
984
985 desc->txd.flags = flags;
986
987 return &desc->txd;
988
989err_free_buffer:
990 dma_pool_free(atdma->memset_pool, vaddr, paddr);
991 return NULL;
992}
993
994static struct dma_async_tx_descriptor *
995atc_prep_dma_memset_sg(struct dma_chan *chan,
996 struct scatterlist *sgl,
997 unsigned int sg_len, int value,
998 unsigned long flags)
999{
1000 struct at_dma_chan *atchan = to_at_dma_chan(chan);
1001 struct at_dma *atdma = to_at_dma(chan->device);
1002 struct at_desc *desc = NULL, *first = NULL, *prev = NULL;
1003 struct scatterlist *sg;
1004 void __iomem *vaddr;
1005 dma_addr_t paddr;
1006 size_t total_len = 0;
1007 int i;
1008
1009 dev_vdbg(chan2dev(chan), "%s: v0x%x l0x%zx f0x%lx\n", __func__,
1010 value, sg_len, flags);
1011
1012 if (unlikely(!sgl || !sg_len)) {
1013 dev_dbg(chan2dev(chan), "%s: scatterlist is empty!\n",
1014 __func__);
1015 return NULL;
1016 }
1017
1018 vaddr = dma_pool_alloc(atdma->memset_pool, GFP_NOWAIT, &paddr);
1019 if (!vaddr) {
1020 dev_err(chan2dev(chan), "%s: couldn't allocate buffer\n",
1021 __func__);
1022 return NULL;
1023 }
1024 *(u32*)vaddr = value;
1025
1026 for_each_sg(sgl, sg, sg_len, i) {
1027 dma_addr_t dest = sg_dma_address(sg);
1028 size_t len = sg_dma_len(sg);
1029
1030 dev_vdbg(chan2dev(chan), "%s: d%pad, l0x%zx\n",
1031 __func__, &dest, len);
1032
1033 if (!is_dma_fill_aligned(chan->device, dest, 0, len)) {
1034 dev_err(chan2dev(chan), "%s: buffer is not aligned\n",
1035 __func__);
1036 goto err_put_desc;
1037 }
1038
1039 desc = atc_create_memset_desc(chan, paddr, dest, len);
1040 if (!desc)
1041 goto err_put_desc;
1042
1043 atc_desc_chain(&first, &prev, desc);
1044
1045 total_len += len;
1046 }
1047
1048
1049
1050
1051
1052 desc->memset_paddr = paddr;
1053 desc->memset_vaddr = vaddr;
1054 desc->memset_buffer = true;
1055
1056 first->txd.cookie = -EBUSY;
1057 first->total_len = total_len;
1058
1059
1060 set_desc_eol(desc);
1061
1062 first->txd.flags = flags;
1063
1064 return &first->txd;
1065
1066err_put_desc:
1067 atc_desc_put(atchan, first);
1068 return NULL;
1069}
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080static struct dma_async_tx_descriptor *
1081atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
1082 unsigned int sg_len, enum dma_transfer_direction direction,
1083 unsigned long flags, void *context)
1084{
1085 struct at_dma_chan *atchan = to_at_dma_chan(chan);
1086 struct at_dma_slave *atslave = chan->private;
1087 struct dma_slave_config *sconfig = &atchan->dma_sconfig;
1088 struct at_desc *first = NULL;
1089 struct at_desc *prev = NULL;
1090 u32 ctrla;
1091 u32 ctrlb;
1092 dma_addr_t reg;
1093 unsigned int reg_width;
1094 unsigned int mem_width;
1095 unsigned int i;
1096 struct scatterlist *sg;
1097 size_t total_len = 0;
1098
1099 dev_vdbg(chan2dev(chan), "prep_slave_sg (%d): %s f0x%lx\n",
1100 sg_len,
1101 direction == DMA_MEM_TO_DEV ? "TO DEVICE" : "FROM DEVICE",
1102 flags);
1103
1104 if (unlikely(!atslave || !sg_len)) {
1105 dev_dbg(chan2dev(chan), "prep_slave_sg: sg length is zero!\n");
1106 return NULL;
1107 }
1108
1109 ctrla = ATC_SCSIZE(sconfig->src_maxburst)
1110 | ATC_DCSIZE(sconfig->dst_maxburst);
1111 ctrlb = ATC_IEN;
1112
1113 switch (direction) {
1114 case DMA_MEM_TO_DEV:
1115 reg_width = convert_buswidth(sconfig->dst_addr_width);
1116 ctrla |= ATC_DST_WIDTH(reg_width);
1117 ctrlb |= ATC_DST_ADDR_MODE_FIXED
1118 | ATC_SRC_ADDR_MODE_INCR
1119 | ATC_FC_MEM2PER
1120 | ATC_SIF(atchan->mem_if) | ATC_DIF(atchan->per_if);
1121 reg = sconfig->dst_addr;
1122 for_each_sg(sgl, sg, sg_len, i) {
1123 struct at_desc *desc;
1124 u32 len;
1125 u32 mem;
1126
1127 desc = atc_desc_get(atchan);
1128 if (!desc)
1129 goto err_desc_get;
1130
1131 mem = sg_dma_address(sg);
1132 len = sg_dma_len(sg);
1133 if (unlikely(!len)) {
1134 dev_dbg(chan2dev(chan),
1135 "prep_slave_sg: sg(%d) data length is zero\n", i);
1136 goto err;
1137 }
1138 mem_width = 2;
1139 if (unlikely(mem & 3 || len & 3))
1140 mem_width = 0;
1141
1142 desc->lli.saddr = mem;
1143 desc->lli.daddr = reg;
1144 desc->lli.ctrla = ctrla
1145 | ATC_SRC_WIDTH(mem_width)
1146 | len >> mem_width;
1147 desc->lli.ctrlb = ctrlb;
1148 desc->len = len;
1149
1150 atc_desc_chain(&first, &prev, desc);
1151 total_len += len;
1152 }
1153 break;
1154 case DMA_DEV_TO_MEM:
1155 reg_width = convert_buswidth(sconfig->src_addr_width);
1156 ctrla |= ATC_SRC_WIDTH(reg_width);
1157 ctrlb |= ATC_DST_ADDR_MODE_INCR
1158 | ATC_SRC_ADDR_MODE_FIXED
1159 | ATC_FC_PER2MEM
1160 | ATC_SIF(atchan->per_if) | ATC_DIF(atchan->mem_if);
1161
1162 reg = sconfig->src_addr;
1163 for_each_sg(sgl, sg, sg_len, i) {
1164 struct at_desc *desc;
1165 u32 len;
1166 u32 mem;
1167
1168 desc = atc_desc_get(atchan);
1169 if (!desc)
1170 goto err_desc_get;
1171
1172 mem = sg_dma_address(sg);
1173 len = sg_dma_len(sg);
1174 if (unlikely(!len)) {
1175 dev_dbg(chan2dev(chan),
1176 "prep_slave_sg: sg(%d) data length is zero\n", i);
1177 goto err;
1178 }
1179 mem_width = 2;
1180 if (unlikely(mem & 3 || len & 3))
1181 mem_width = 0;
1182
1183 desc->lli.saddr = reg;
1184 desc->lli.daddr = mem;
1185 desc->lli.ctrla = ctrla
1186 | ATC_DST_WIDTH(mem_width)
1187 | len >> reg_width;
1188 desc->lli.ctrlb = ctrlb;
1189 desc->len = len;
1190
1191 atc_desc_chain(&first, &prev, desc);
1192 total_len += len;
1193 }
1194 break;
1195 default:
1196 return NULL;
1197 }
1198
1199
1200 set_desc_eol(prev);
1201
1202
1203 first->txd.cookie = -EBUSY;
1204 first->total_len = total_len;
1205
1206
1207 first->txd.flags = flags;
1208
1209 return &first->txd;
1210
1211err_desc_get:
1212 dev_err(chan2dev(chan), "not enough descriptors available\n");
1213err:
1214 atc_desc_put(atchan, first);
1215 return NULL;
1216}
1217
1218
1219
1220
1221
1222static int
1223atc_dma_cyclic_check_values(unsigned int reg_width, dma_addr_t buf_addr,
1224 size_t period_len)
1225{
1226 if (period_len > (ATC_BTSIZE_MAX << reg_width))
1227 goto err_out;
1228 if (unlikely(period_len & ((1 << reg_width) - 1)))
1229 goto err_out;
1230 if (unlikely(buf_addr & ((1 << reg_width) - 1)))
1231 goto err_out;
1232
1233 return 0;
1234
1235err_out:
1236 return -EINVAL;
1237}
1238
1239
1240
1241
1242static int
1243atc_dma_cyclic_fill_desc(struct dma_chan *chan, struct at_desc *desc,
1244 unsigned int period_index, dma_addr_t buf_addr,
1245 unsigned int reg_width, size_t period_len,
1246 enum dma_transfer_direction direction)
1247{
1248 struct at_dma_chan *atchan = to_at_dma_chan(chan);
1249 struct dma_slave_config *sconfig = &atchan->dma_sconfig;
1250 u32 ctrla;
1251
1252
1253 ctrla = ATC_SCSIZE(sconfig->src_maxburst)
1254 | ATC_DCSIZE(sconfig->dst_maxburst)
1255 | ATC_DST_WIDTH(reg_width)
1256 | ATC_SRC_WIDTH(reg_width)
1257 | period_len >> reg_width;
1258
1259 switch (direction) {
1260 case DMA_MEM_TO_DEV:
1261 desc->lli.saddr = buf_addr + (period_len * period_index);
1262 desc->lli.daddr = sconfig->dst_addr;
1263 desc->lli.ctrla = ctrla;
1264 desc->lli.ctrlb = ATC_DST_ADDR_MODE_FIXED
1265 | ATC_SRC_ADDR_MODE_INCR
1266 | ATC_FC_MEM2PER
1267 | ATC_SIF(atchan->mem_if)
1268 | ATC_DIF(atchan->per_if);
1269 desc->len = period_len;
1270 break;
1271
1272 case DMA_DEV_TO_MEM:
1273 desc->lli.saddr = sconfig->src_addr;
1274 desc->lli.daddr = buf_addr + (period_len * period_index);
1275 desc->lli.ctrla = ctrla;
1276 desc->lli.ctrlb = ATC_DST_ADDR_MODE_INCR
1277 | ATC_SRC_ADDR_MODE_FIXED
1278 | ATC_FC_PER2MEM
1279 | ATC_SIF(atchan->per_if)
1280 | ATC_DIF(atchan->mem_if);
1281 desc->len = period_len;
1282 break;
1283
1284 default:
1285 return -EINVAL;
1286 }
1287
1288 return 0;
1289}
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300static struct dma_async_tx_descriptor *
1301atc_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
1302 size_t period_len, enum dma_transfer_direction direction,
1303 unsigned long flags)
1304{
1305 struct at_dma_chan *atchan = to_at_dma_chan(chan);
1306 struct at_dma_slave *atslave = chan->private;
1307 struct dma_slave_config *sconfig = &atchan->dma_sconfig;
1308 struct at_desc *first = NULL;
1309 struct at_desc *prev = NULL;
1310 unsigned long was_cyclic;
1311 unsigned int reg_width;
1312 unsigned int periods = buf_len / period_len;
1313 unsigned int i;
1314
1315 dev_vdbg(chan2dev(chan), "prep_dma_cyclic: %s buf@%pad - %d (%d/%d)\n",
1316 direction == DMA_MEM_TO_DEV ? "TO DEVICE" : "FROM DEVICE",
1317 &buf_addr,
1318 periods, buf_len, period_len);
1319
1320 if (unlikely(!atslave || !buf_len || !period_len)) {
1321 dev_dbg(chan2dev(chan), "prep_dma_cyclic: length is zero!\n");
1322 return NULL;
1323 }
1324
1325 was_cyclic = test_and_set_bit(ATC_IS_CYCLIC, &atchan->status);
1326 if (was_cyclic) {
1327 dev_dbg(chan2dev(chan), "prep_dma_cyclic: channel in use!\n");
1328 return NULL;
1329 }
1330
1331 if (unlikely(!is_slave_direction(direction)))
1332 goto err_out;
1333
1334 if (direction == DMA_MEM_TO_DEV)
1335 reg_width = convert_buswidth(sconfig->dst_addr_width);
1336 else
1337 reg_width = convert_buswidth(sconfig->src_addr_width);
1338
1339
1340 if (atc_dma_cyclic_check_values(reg_width, buf_addr, period_len))
1341 goto err_out;
1342
1343
1344 for (i = 0; i < periods; i++) {
1345 struct at_desc *desc;
1346
1347 desc = atc_desc_get(atchan);
1348 if (!desc)
1349 goto err_desc_get;
1350
1351 if (atc_dma_cyclic_fill_desc(chan, desc, i, buf_addr,
1352 reg_width, period_len, direction))
1353 goto err_desc_get;
1354
1355 atc_desc_chain(&first, &prev, desc);
1356 }
1357
1358
1359 prev->lli.dscr = first->txd.phys;
1360
1361
1362 first->txd.cookie = -EBUSY;
1363 first->total_len = buf_len;
1364
1365 return &first->txd;
1366
1367err_desc_get:
1368 dev_err(chan2dev(chan), "not enough descriptors available\n");
1369 atc_desc_put(atchan, first);
1370err_out:
1371 clear_bit(ATC_IS_CYCLIC, &atchan->status);
1372 return NULL;
1373}
1374
1375static int atc_config(struct dma_chan *chan,
1376 struct dma_slave_config *sconfig)
1377{
1378 struct at_dma_chan *atchan = to_at_dma_chan(chan);
1379
1380 dev_vdbg(chan2dev(chan), "%s\n", __func__);
1381
1382
1383 if (!chan->private)
1384 return -EINVAL;
1385
1386 memcpy(&atchan->dma_sconfig, sconfig, sizeof(*sconfig));
1387
1388 convert_burst(&atchan->dma_sconfig.src_maxburst);
1389 convert_burst(&atchan->dma_sconfig.dst_maxburst);
1390
1391 return 0;
1392}
1393
1394static int atc_pause(struct dma_chan *chan)
1395{
1396 struct at_dma_chan *atchan = to_at_dma_chan(chan);
1397 struct at_dma *atdma = to_at_dma(chan->device);
1398 int chan_id = atchan->chan_common.chan_id;
1399 unsigned long flags;
1400
1401 dev_vdbg(chan2dev(chan), "%s\n", __func__);
1402
1403 spin_lock_irqsave(&atchan->lock, flags);
1404
1405 dma_writel(atdma, CHER, AT_DMA_SUSP(chan_id));
1406 set_bit(ATC_IS_PAUSED, &atchan->status);
1407
1408 spin_unlock_irqrestore(&atchan->lock, flags);
1409
1410 return 0;
1411}
1412
1413static int atc_resume(struct dma_chan *chan)
1414{
1415 struct at_dma_chan *atchan = to_at_dma_chan(chan);
1416 struct at_dma *atdma = to_at_dma(chan->device);
1417 int chan_id = atchan->chan_common.chan_id;
1418 unsigned long flags;
1419
1420 dev_vdbg(chan2dev(chan), "%s\n", __func__);
1421
1422 if (!atc_chan_is_paused(atchan))
1423 return 0;
1424
1425 spin_lock_irqsave(&atchan->lock, flags);
1426
1427 dma_writel(atdma, CHDR, AT_DMA_RES(chan_id));
1428 clear_bit(ATC_IS_PAUSED, &atchan->status);
1429
1430 spin_unlock_irqrestore(&atchan->lock, flags);
1431
1432 return 0;
1433}
1434
1435static int atc_terminate_all(struct dma_chan *chan)
1436{
1437 struct at_dma_chan *atchan = to_at_dma_chan(chan);
1438 struct at_dma *atdma = to_at_dma(chan->device);
1439 int chan_id = atchan->chan_common.chan_id;
1440 struct at_desc *desc, *_desc;
1441 unsigned long flags;
1442
1443 LIST_HEAD(list);
1444
1445 dev_vdbg(chan2dev(chan), "%s\n", __func__);
1446
1447
1448
1449
1450
1451
1452
1453 spin_lock_irqsave(&atchan->lock, flags);
1454
1455
1456 dma_writel(atdma, CHDR, AT_DMA_RES(chan_id) | atchan->mask);
1457
1458
1459 while (dma_readl(atdma, CHSR) & atchan->mask)
1460 cpu_relax();
1461
1462
1463 list_splice_init(&atchan->queue, &list);
1464 list_splice_init(&atchan->active_list, &list);
1465
1466 spin_unlock_irqrestore(&atchan->lock, flags);
1467
1468
1469 list_for_each_entry_safe(desc, _desc, &list, desc_node)
1470 atc_chain_complete(atchan, desc);
1471
1472 clear_bit(ATC_IS_PAUSED, &atchan->status);
1473
1474 clear_bit(ATC_IS_CYCLIC, &atchan->status);
1475
1476 return 0;
1477}
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489static enum dma_status
1490atc_tx_status(struct dma_chan *chan,
1491 dma_cookie_t cookie,
1492 struct dma_tx_state *txstate)
1493{
1494 struct at_dma_chan *atchan = to_at_dma_chan(chan);
1495 unsigned long flags;
1496 enum dma_status ret;
1497 int bytes = 0;
1498
1499 ret = dma_cookie_status(chan, cookie, txstate);
1500 if (ret == DMA_COMPLETE)
1501 return ret;
1502
1503
1504
1505
1506 if (!txstate)
1507 return DMA_ERROR;
1508
1509 spin_lock_irqsave(&atchan->lock, flags);
1510
1511
1512 bytes = atc_get_bytes_left(chan, cookie);
1513
1514 spin_unlock_irqrestore(&atchan->lock, flags);
1515
1516 if (unlikely(bytes < 0)) {
1517 dev_vdbg(chan2dev(chan), "get residual bytes error\n");
1518 return DMA_ERROR;
1519 } else {
1520 dma_set_residue(txstate, bytes);
1521 }
1522
1523 dev_vdbg(chan2dev(chan), "tx_status %d: cookie = %d residue = %d\n",
1524 ret, cookie, bytes);
1525
1526 return ret;
1527}
1528
1529
1530
1531
1532
1533static void atc_issue_pending(struct dma_chan *chan)
1534{
1535 struct at_dma_chan *atchan = to_at_dma_chan(chan);
1536
1537 dev_vdbg(chan2dev(chan), "issue_pending\n");
1538
1539
1540 if (atc_chan_is_cyclic(atchan))
1541 return;
1542
1543 atc_advance_work(atchan);
1544}
1545
1546
1547
1548
1549
1550
1551
1552static int atc_alloc_chan_resources(struct dma_chan *chan)
1553{
1554 struct at_dma_chan *atchan = to_at_dma_chan(chan);
1555 struct at_dma *atdma = to_at_dma(chan->device);
1556 struct at_desc *desc;
1557 struct at_dma_slave *atslave;
1558 int i;
1559 u32 cfg;
1560
1561 dev_vdbg(chan2dev(chan), "alloc_chan_resources\n");
1562
1563
1564 if (atc_chan_is_enabled(atchan)) {
1565 dev_dbg(chan2dev(chan), "DMA channel not idle ?\n");
1566 return -EIO;
1567 }
1568
1569 if (!list_empty(&atchan->free_list)) {
1570 dev_dbg(chan2dev(chan), "can't allocate channel resources (channel not freed from a previous use)\n");
1571 return -EIO;
1572 }
1573
1574 cfg = ATC_DEFAULT_CFG;
1575
1576 atslave = chan->private;
1577 if (atslave) {
1578
1579
1580
1581
1582 BUG_ON(!atslave->dma_dev || atslave->dma_dev != atdma->dma_common.dev);
1583
1584
1585 if (atslave->cfg)
1586 cfg = atslave->cfg;
1587 }
1588
1589
1590 for (i = 0; i < init_nr_desc_per_channel; i++) {
1591 desc = atc_alloc_descriptor(chan, GFP_KERNEL);
1592 if (!desc) {
1593 dev_err(atdma->dma_common.dev,
1594 "Only %d initial descriptors\n", i);
1595 break;
1596 }
1597 list_add_tail(&desc->desc_node, &atchan->free_list);
1598 }
1599
1600 dma_cookie_init(chan);
1601
1602
1603 channel_writel(atchan, CFG, cfg);
1604
1605 dev_dbg(chan2dev(chan),
1606 "alloc_chan_resources: allocated %d descriptors\n", i);
1607
1608 return i;
1609}
1610
1611
1612
1613
1614
1615static void atc_free_chan_resources(struct dma_chan *chan)
1616{
1617 struct at_dma_chan *atchan = to_at_dma_chan(chan);
1618 struct at_dma *atdma = to_at_dma(chan->device);
1619 struct at_desc *desc, *_desc;
1620 LIST_HEAD(list);
1621
1622
1623 BUG_ON(!list_empty(&atchan->active_list));
1624 BUG_ON(!list_empty(&atchan->queue));
1625 BUG_ON(atc_chan_is_enabled(atchan));
1626
1627 list_for_each_entry_safe(desc, _desc, &atchan->free_list, desc_node) {
1628 dev_vdbg(chan2dev(chan), " freeing descriptor %p\n", desc);
1629 list_del(&desc->desc_node);
1630
1631 dma_pool_free(atdma->dma_desc_pool, desc, desc->txd.phys);
1632 }
1633 list_splice_init(&atchan->free_list, &list);
1634 atchan->status = 0;
1635
1636
1637
1638
1639 kfree(chan->private);
1640 chan->private = NULL;
1641
1642 dev_vdbg(chan2dev(chan), "free_chan_resources: done\n");
1643}
1644
1645#ifdef CONFIG_OF
1646static bool at_dma_filter(struct dma_chan *chan, void *slave)
1647{
1648 struct at_dma_slave *atslave = slave;
1649
1650 if (atslave->dma_dev == chan->device->dev) {
1651 chan->private = atslave;
1652 return true;
1653 } else {
1654 return false;
1655 }
1656}
1657
1658static struct dma_chan *at_dma_xlate(struct of_phandle_args *dma_spec,
1659 struct of_dma *of_dma)
1660{
1661 struct dma_chan *chan;
1662 struct at_dma_chan *atchan;
1663 struct at_dma_slave *atslave;
1664 dma_cap_mask_t mask;
1665 unsigned int per_id;
1666 struct platform_device *dmac_pdev;
1667
1668 if (dma_spec->args_count != 2)
1669 return NULL;
1670
1671 dmac_pdev = of_find_device_by_node(dma_spec->np);
1672 if (!dmac_pdev)
1673 return NULL;
1674
1675 dma_cap_zero(mask);
1676 dma_cap_set(DMA_SLAVE, mask);
1677
1678 atslave = kmalloc(sizeof(*atslave), GFP_KERNEL);
1679 if (!atslave) {
1680 put_device(&dmac_pdev->dev);
1681 return NULL;
1682 }
1683
1684 atslave->cfg = ATC_DST_H2SEL_HW | ATC_SRC_H2SEL_HW;
1685
1686
1687
1688
1689 per_id = dma_spec->args[1] & AT91_DMA_CFG_PER_ID_MASK;
1690 atslave->cfg |= ATC_DST_PER_MSB(per_id) | ATC_DST_PER(per_id)
1691 | ATC_SRC_PER_MSB(per_id) | ATC_SRC_PER(per_id);
1692
1693
1694
1695
1696
1697 switch (dma_spec->args[1] & AT91_DMA_CFG_FIFOCFG_MASK) {
1698 case AT91_DMA_CFG_FIFOCFG_ALAP:
1699 atslave->cfg |= ATC_FIFOCFG_LARGESTBURST;
1700 break;
1701 case AT91_DMA_CFG_FIFOCFG_ASAP:
1702 atslave->cfg |= ATC_FIFOCFG_ENOUGHSPACE;
1703 break;
1704 case AT91_DMA_CFG_FIFOCFG_HALF:
1705 default:
1706 atslave->cfg |= ATC_FIFOCFG_HALFFIFO;
1707 }
1708 atslave->dma_dev = &dmac_pdev->dev;
1709
1710 chan = dma_request_channel(mask, at_dma_filter, atslave);
1711 if (!chan) {
1712 put_device(&dmac_pdev->dev);
1713 kfree(atslave);
1714 return NULL;
1715 }
1716
1717 atchan = to_at_dma_chan(chan);
1718 atchan->per_if = dma_spec->args[0] & 0xff;
1719 atchan->mem_if = (dma_spec->args[0] >> 16) & 0xff;
1720
1721 return chan;
1722}
1723#else
1724static struct dma_chan *at_dma_xlate(struct of_phandle_args *dma_spec,
1725 struct of_dma *of_dma)
1726{
1727 return NULL;
1728}
1729#endif
1730
1731
1732
1733
1734static struct at_dma_platform_data at91sam9rl_config = {
1735 .nr_channels = 2,
1736};
1737static struct at_dma_platform_data at91sam9g45_config = {
1738 .nr_channels = 8,
1739};
1740
1741#if defined(CONFIG_OF)
1742static const struct of_device_id atmel_dma_dt_ids[] = {
1743 {
1744 .compatible = "atmel,at91sam9rl-dma",
1745 .data = &at91sam9rl_config,
1746 }, {
1747 .compatible = "atmel,at91sam9g45-dma",
1748 .data = &at91sam9g45_config,
1749 }, {
1750
1751 }
1752};
1753
1754MODULE_DEVICE_TABLE(of, atmel_dma_dt_ids);
1755#endif
1756
1757static const struct platform_device_id atdma_devtypes[] = {
1758 {
1759 .name = "at91sam9rl_dma",
1760 .driver_data = (unsigned long) &at91sam9rl_config,
1761 }, {
1762 .name = "at91sam9g45_dma",
1763 .driver_data = (unsigned long) &at91sam9g45_config,
1764 }, {
1765
1766 }
1767};
1768
1769static inline const struct at_dma_platform_data * __init at_dma_get_driver_data(
1770 struct platform_device *pdev)
1771{
1772 if (pdev->dev.of_node) {
1773 const struct of_device_id *match;
1774 match = of_match_node(atmel_dma_dt_ids, pdev->dev.of_node);
1775 if (match == NULL)
1776 return NULL;
1777 return match->data;
1778 }
1779 return (struct at_dma_platform_data *)
1780 platform_get_device_id(pdev)->driver_data;
1781}
1782
1783
1784
1785
1786
1787static void at_dma_off(struct at_dma *atdma)
1788{
1789 dma_writel(atdma, EN, 0);
1790
1791
1792 dma_writel(atdma, EBCIDR, -1L);
1793
1794
1795 while (dma_readl(atdma, CHSR) & atdma->all_chan_mask)
1796 cpu_relax();
1797}
1798
1799static int __init at_dma_probe(struct platform_device *pdev)
1800{
1801 struct resource *io;
1802 struct at_dma *atdma;
1803 size_t size;
1804 int irq;
1805 int err;
1806 int i;
1807 const struct at_dma_platform_data *plat_dat;
1808
1809
1810 dma_cap_set(DMA_MEMCPY, at91sam9rl_config.cap_mask);
1811 dma_cap_set(DMA_INTERLEAVE, at91sam9g45_config.cap_mask);
1812 dma_cap_set(DMA_MEMCPY, at91sam9g45_config.cap_mask);
1813 dma_cap_set(DMA_MEMSET, at91sam9g45_config.cap_mask);
1814 dma_cap_set(DMA_MEMSET_SG, at91sam9g45_config.cap_mask);
1815 dma_cap_set(DMA_PRIVATE, at91sam9g45_config.cap_mask);
1816 dma_cap_set(DMA_SLAVE, at91sam9g45_config.cap_mask);
1817
1818
1819 plat_dat = at_dma_get_driver_data(pdev);
1820 if (!plat_dat)
1821 return -ENODEV;
1822
1823 io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1824 if (!io)
1825 return -EINVAL;
1826
1827 irq = platform_get_irq(pdev, 0);
1828 if (irq < 0)
1829 return irq;
1830
1831 size = sizeof(struct at_dma);
1832 size += plat_dat->nr_channels * sizeof(struct at_dma_chan);
1833 atdma = kzalloc(size, GFP_KERNEL);
1834 if (!atdma)
1835 return -ENOMEM;
1836
1837
1838 atdma->dma_common.cap_mask = plat_dat->cap_mask;
1839 atdma->all_chan_mask = (1 << plat_dat->nr_channels) - 1;
1840
1841 size = resource_size(io);
1842 if (!request_mem_region(io->start, size, pdev->dev.driver->name)) {
1843 err = -EBUSY;
1844 goto err_kfree;
1845 }
1846
1847 atdma->regs = ioremap(io->start, size);
1848 if (!atdma->regs) {
1849 err = -ENOMEM;
1850 goto err_release_r;
1851 }
1852
1853 atdma->clk = clk_get(&pdev->dev, "dma_clk");
1854 if (IS_ERR(atdma->clk)) {
1855 err = PTR_ERR(atdma->clk);
1856 goto err_clk;
1857 }
1858 err = clk_prepare_enable(atdma->clk);
1859 if (err)
1860 goto err_clk_prepare;
1861
1862
1863 at_dma_off(atdma);
1864
1865 err = request_irq(irq, at_dma_interrupt, 0, "at_hdmac", atdma);
1866 if (err)
1867 goto err_irq;
1868
1869 platform_set_drvdata(pdev, atdma);
1870
1871
1872 atdma->dma_desc_pool = dma_pool_create("at_hdmac_desc_pool",
1873 &pdev->dev, sizeof(struct at_desc),
1874 4 , 0);
1875 if (!atdma->dma_desc_pool) {
1876 dev_err(&pdev->dev, "No memory for descriptors dma pool\n");
1877 err = -ENOMEM;
1878 goto err_desc_pool_create;
1879 }
1880
1881
1882 atdma->memset_pool = dma_pool_create("at_hdmac_memset_pool",
1883 &pdev->dev, sizeof(int), 4, 0);
1884 if (!atdma->memset_pool) {
1885 dev_err(&pdev->dev, "No memory for memset dma pool\n");
1886 err = -ENOMEM;
1887 goto err_memset_pool_create;
1888 }
1889
1890
1891 while (dma_readl(atdma, EBCISR))
1892 cpu_relax();
1893
1894
1895 INIT_LIST_HEAD(&atdma->dma_common.channels);
1896 for (i = 0; i < plat_dat->nr_channels; i++) {
1897 struct at_dma_chan *atchan = &atdma->chan[i];
1898
1899 atchan->mem_if = AT_DMA_MEM_IF;
1900 atchan->per_if = AT_DMA_PER_IF;
1901 atchan->chan_common.device = &atdma->dma_common;
1902 dma_cookie_init(&atchan->chan_common);
1903 list_add_tail(&atchan->chan_common.device_node,
1904 &atdma->dma_common.channels);
1905
1906 atchan->ch_regs = atdma->regs + ch_regs(i);
1907 spin_lock_init(&atchan->lock);
1908 atchan->mask = 1 << i;
1909
1910 INIT_LIST_HEAD(&atchan->active_list);
1911 INIT_LIST_HEAD(&atchan->queue);
1912 INIT_LIST_HEAD(&atchan->free_list);
1913
1914 tasklet_setup(&atchan->tasklet, atc_tasklet);
1915 atc_enable_chan_irq(atdma, i);
1916 }
1917
1918
1919 atdma->dma_common.device_alloc_chan_resources = atc_alloc_chan_resources;
1920 atdma->dma_common.device_free_chan_resources = atc_free_chan_resources;
1921 atdma->dma_common.device_tx_status = atc_tx_status;
1922 atdma->dma_common.device_issue_pending = atc_issue_pending;
1923 atdma->dma_common.dev = &pdev->dev;
1924
1925
1926 if (dma_has_cap(DMA_INTERLEAVE, atdma->dma_common.cap_mask))
1927 atdma->dma_common.device_prep_interleaved_dma = atc_prep_dma_interleaved;
1928
1929 if (dma_has_cap(DMA_MEMCPY, atdma->dma_common.cap_mask))
1930 atdma->dma_common.device_prep_dma_memcpy = atc_prep_dma_memcpy;
1931
1932 if (dma_has_cap(DMA_MEMSET, atdma->dma_common.cap_mask)) {
1933 atdma->dma_common.device_prep_dma_memset = atc_prep_dma_memset;
1934 atdma->dma_common.device_prep_dma_memset_sg = atc_prep_dma_memset_sg;
1935 atdma->dma_common.fill_align = DMAENGINE_ALIGN_4_BYTES;
1936 }
1937
1938 if (dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask)) {
1939 atdma->dma_common.device_prep_slave_sg = atc_prep_slave_sg;
1940
1941 dma_cap_set(DMA_CYCLIC, atdma->dma_common.cap_mask);
1942 atdma->dma_common.device_prep_dma_cyclic = atc_prep_dma_cyclic;
1943 atdma->dma_common.device_config = atc_config;
1944 atdma->dma_common.device_pause = atc_pause;
1945 atdma->dma_common.device_resume = atc_resume;
1946 atdma->dma_common.device_terminate_all = atc_terminate_all;
1947 atdma->dma_common.src_addr_widths = ATC_DMA_BUSWIDTHS;
1948 atdma->dma_common.dst_addr_widths = ATC_DMA_BUSWIDTHS;
1949 atdma->dma_common.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
1950 atdma->dma_common.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
1951 }
1952
1953 dma_writel(atdma, EN, AT_DMA_ENABLE);
1954
1955 dev_info(&pdev->dev, "Atmel AHB DMA Controller ( %s%s%s), %d channels\n",
1956 dma_has_cap(DMA_MEMCPY, atdma->dma_common.cap_mask) ? "cpy " : "",
1957 dma_has_cap(DMA_MEMSET, atdma->dma_common.cap_mask) ? "set " : "",
1958 dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask) ? "slave " : "",
1959 plat_dat->nr_channels);
1960
1961 dma_async_device_register(&atdma->dma_common);
1962
1963
1964
1965
1966
1967
1968 if (pdev->dev.of_node) {
1969 err = of_dma_controller_register(pdev->dev.of_node,
1970 at_dma_xlate, atdma);
1971 if (err) {
1972 dev_err(&pdev->dev, "could not register of_dma_controller\n");
1973 goto err_of_dma_controller_register;
1974 }
1975 }
1976
1977 return 0;
1978
1979err_of_dma_controller_register:
1980 dma_async_device_unregister(&atdma->dma_common);
1981 dma_pool_destroy(atdma->memset_pool);
1982err_memset_pool_create:
1983 dma_pool_destroy(atdma->dma_desc_pool);
1984err_desc_pool_create:
1985 free_irq(platform_get_irq(pdev, 0), atdma);
1986err_irq:
1987 clk_disable_unprepare(atdma->clk);
1988err_clk_prepare:
1989 clk_put(atdma->clk);
1990err_clk:
1991 iounmap(atdma->regs);
1992 atdma->regs = NULL;
1993err_release_r:
1994 release_mem_region(io->start, size);
1995err_kfree:
1996 kfree(atdma);
1997 return err;
1998}
1999
2000static int at_dma_remove(struct platform_device *pdev)
2001{
2002 struct at_dma *atdma = platform_get_drvdata(pdev);
2003 struct dma_chan *chan, *_chan;
2004 struct resource *io;
2005
2006 at_dma_off(atdma);
2007 if (pdev->dev.of_node)
2008 of_dma_controller_free(pdev->dev.of_node);
2009 dma_async_device_unregister(&atdma->dma_common);
2010
2011 dma_pool_destroy(atdma->memset_pool);
2012 dma_pool_destroy(atdma->dma_desc_pool);
2013 free_irq(platform_get_irq(pdev, 0), atdma);
2014
2015 list_for_each_entry_safe(chan, _chan, &atdma->dma_common.channels,
2016 device_node) {
2017 struct at_dma_chan *atchan = to_at_dma_chan(chan);
2018
2019
2020 atc_disable_chan_irq(atdma, chan->chan_id);
2021
2022 tasklet_kill(&atchan->tasklet);
2023 list_del(&chan->device_node);
2024 }
2025
2026 clk_disable_unprepare(atdma->clk);
2027 clk_put(atdma->clk);
2028
2029 iounmap(atdma->regs);
2030 atdma->regs = NULL;
2031
2032 io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2033 release_mem_region(io->start, resource_size(io));
2034
2035 kfree(atdma);
2036
2037 return 0;
2038}
2039
2040static void at_dma_shutdown(struct platform_device *pdev)
2041{
2042 struct at_dma *atdma = platform_get_drvdata(pdev);
2043
2044 at_dma_off(platform_get_drvdata(pdev));
2045 clk_disable_unprepare(atdma->clk);
2046}
2047
2048static int at_dma_prepare(struct device *dev)
2049{
2050 struct at_dma *atdma = dev_get_drvdata(dev);
2051 struct dma_chan *chan, *_chan;
2052
2053 list_for_each_entry_safe(chan, _chan, &atdma->dma_common.channels,
2054 device_node) {
2055 struct at_dma_chan *atchan = to_at_dma_chan(chan);
2056
2057 if (atc_chan_is_enabled(atchan) && !atc_chan_is_cyclic(atchan))
2058 return -EAGAIN;
2059 }
2060 return 0;
2061}
2062
2063static void atc_suspend_cyclic(struct at_dma_chan *atchan)
2064{
2065 struct dma_chan *chan = &atchan->chan_common;
2066
2067
2068
2069 if (!atc_chan_is_paused(atchan)) {
2070 dev_warn(chan2dev(chan),
2071 "cyclic channel not paused, should be done by channel user\n");
2072 atc_pause(chan);
2073 }
2074
2075
2076
2077 atchan->save_dscr = channel_readl(atchan, DSCR);
2078
2079 vdbg_dump_regs(atchan);
2080}
2081
2082static int at_dma_suspend_noirq(struct device *dev)
2083{
2084 struct at_dma *atdma = dev_get_drvdata(dev);
2085 struct dma_chan *chan, *_chan;
2086
2087
2088 list_for_each_entry_safe(chan, _chan, &atdma->dma_common.channels,
2089 device_node) {
2090 struct at_dma_chan *atchan = to_at_dma_chan(chan);
2091
2092 if (atc_chan_is_cyclic(atchan))
2093 atc_suspend_cyclic(atchan);
2094 atchan->save_cfg = channel_readl(atchan, CFG);
2095 }
2096 atdma->save_imr = dma_readl(atdma, EBCIMR);
2097
2098
2099 at_dma_off(atdma);
2100 clk_disable_unprepare(atdma->clk);
2101 return 0;
2102}
2103
2104static void atc_resume_cyclic(struct at_dma_chan *atchan)
2105{
2106 struct at_dma *atdma = to_at_dma(atchan->chan_common.device);
2107
2108
2109
2110 channel_writel(atchan, SADDR, 0);
2111 channel_writel(atchan, DADDR, 0);
2112 channel_writel(atchan, CTRLA, 0);
2113 channel_writel(atchan, CTRLB, 0);
2114 channel_writel(atchan, DSCR, atchan->save_dscr);
2115 dma_writel(atdma, CHER, atchan->mask);
2116
2117
2118
2119
2120 vdbg_dump_regs(atchan);
2121}
2122
2123static int at_dma_resume_noirq(struct device *dev)
2124{
2125 struct at_dma *atdma = dev_get_drvdata(dev);
2126 struct dma_chan *chan, *_chan;
2127
2128
2129 clk_prepare_enable(atdma->clk);
2130 dma_writel(atdma, EN, AT_DMA_ENABLE);
2131
2132
2133 while (dma_readl(atdma, EBCISR))
2134 cpu_relax();
2135
2136
2137 dma_writel(atdma, EBCIER, atdma->save_imr);
2138 list_for_each_entry_safe(chan, _chan, &atdma->dma_common.channels,
2139 device_node) {
2140 struct at_dma_chan *atchan = to_at_dma_chan(chan);
2141
2142 channel_writel(atchan, CFG, atchan->save_cfg);
2143 if (atc_chan_is_cyclic(atchan))
2144 atc_resume_cyclic(atchan);
2145 }
2146 return 0;
2147}
2148
2149static const struct dev_pm_ops at_dma_dev_pm_ops = {
2150 .prepare = at_dma_prepare,
2151 .suspend_noirq = at_dma_suspend_noirq,
2152 .resume_noirq = at_dma_resume_noirq,
2153};
2154
2155static struct platform_driver at_dma_driver = {
2156 .remove = at_dma_remove,
2157 .shutdown = at_dma_shutdown,
2158 .id_table = atdma_devtypes,
2159 .driver = {
2160 .name = "at_hdmac",
2161 .pm = &at_dma_dev_pm_ops,
2162 .of_match_table = of_match_ptr(atmel_dma_dt_ids),
2163 },
2164};
2165
2166static int __init at_dma_init(void)
2167{
2168 return platform_driver_probe(&at_dma_driver, at_dma_probe);
2169}
2170subsys_initcall(at_dma_init);
2171
2172static void __exit at_dma_exit(void)
2173{
2174 platform_driver_unregister(&at_dma_driver);
2175}
2176module_exit(at_dma_exit);
2177
2178MODULE_DESCRIPTION("Atmel AHB DMA Controller driver");
2179MODULE_AUTHOR("Nicolas Ferre <nicolas.ferre@atmel.com>");
2180MODULE_LICENSE("GPL");
2181MODULE_ALIAS("platform:at_hdmac");
2182