1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17#include <dt-bindings/dma/at91.h>
18#include <linux/clk.h>
19#include <linux/dmaengine.h>
20#include <linux/dma-mapping.h>
21#include <linux/dmapool.h>
22#include <linux/interrupt.h>
23#include <linux/module.h>
24#include <linux/platform_device.h>
25#include <linux/slab.h>
26#include <linux/of.h>
27#include <linux/of_device.h>
28#include <linux/of_dma.h>
29
30#include "at_hdmac_regs.h"
31#include "dmaengine.h"
32
33
34
35
36
37
38
39
40
41
42#define ATC_DEFAULT_CFG (ATC_FIFOCFG_HALFFIFO)
43#define ATC_DEFAULT_CTRLB (ATC_SIF(AT_DMA_MEM_IF) \
44 |ATC_DIF(AT_DMA_MEM_IF))
45#define ATC_DMA_BUSWIDTHS\
46 (BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED) |\
47 BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |\
48 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |\
49 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES))
50
51#define ATC_MAX_DSCR_TRIALS 10
52
53
54
55
56
57static unsigned int init_nr_desc_per_channel = 64;
58module_param(init_nr_desc_per_channel, uint, 0644);
59MODULE_PARM_DESC(init_nr_desc_per_channel,
60 "initial descriptors per channel (default: 64)");
61
62
63
64static dma_cookie_t atc_tx_submit(struct dma_async_tx_descriptor *tx);
65static void atc_issue_pending(struct dma_chan *chan);
66
67
68
69
70static inline unsigned int atc_get_xfer_width(dma_addr_t src, dma_addr_t dst,
71 size_t len)
72{
73 unsigned int width;
74
75 if (!((src | dst | len) & 3))
76 width = 2;
77 else if (!((src | dst | len) & 1))
78 width = 1;
79 else
80 width = 0;
81
82 return width;
83}
84
85static struct at_desc *atc_first_active(struct at_dma_chan *atchan)
86{
87 return list_first_entry(&atchan->active_list,
88 struct at_desc, desc_node);
89}
90
91static struct at_desc *atc_first_queued(struct at_dma_chan *atchan)
92{
93 return list_first_entry(&atchan->queue,
94 struct at_desc, desc_node);
95}
96
97
98
99
100
101
102
103
104
105
106
107static struct at_desc *atc_alloc_descriptor(struct dma_chan *chan,
108 gfp_t gfp_flags)
109{
110 struct at_desc *desc = NULL;
111 struct at_dma *atdma = to_at_dma(chan->device);
112 dma_addr_t phys;
113
114 desc = dma_pool_zalloc(atdma->dma_desc_pool, gfp_flags, &phys);
115 if (desc) {
116 INIT_LIST_HEAD(&desc->tx_list);
117 dma_async_tx_descriptor_init(&desc->txd, chan);
118
119 desc->txd.flags = DMA_CTRL_ACK;
120 desc->txd.tx_submit = atc_tx_submit;
121 desc->txd.phys = phys;
122 }
123
124 return desc;
125}
126
127
128
129
130
131static struct at_desc *atc_desc_get(struct at_dma_chan *atchan)
132{
133 struct at_desc *desc, *_desc;
134 struct at_desc *ret = NULL;
135 unsigned long flags;
136 unsigned int i = 0;
137 LIST_HEAD(tmp_list);
138
139 spin_lock_irqsave(&atchan->lock, flags);
140 list_for_each_entry_safe(desc, _desc, &atchan->free_list, desc_node) {
141 i++;
142 if (async_tx_test_ack(&desc->txd)) {
143 list_del(&desc->desc_node);
144 ret = desc;
145 break;
146 }
147 dev_dbg(chan2dev(&atchan->chan_common),
148 "desc %p not ACKed\n", desc);
149 }
150 spin_unlock_irqrestore(&atchan->lock, flags);
151 dev_vdbg(chan2dev(&atchan->chan_common),
152 "scanned %u descriptors on freelist\n", i);
153
154
155 if (!ret) {
156 ret = atc_alloc_descriptor(&atchan->chan_common, GFP_ATOMIC);
157 if (ret) {
158 spin_lock_irqsave(&atchan->lock, flags);
159 atchan->descs_allocated++;
160 spin_unlock_irqrestore(&atchan->lock, flags);
161 } else {
162 dev_err(chan2dev(&atchan->chan_common),
163 "not enough descriptors available\n");
164 }
165 }
166
167 return ret;
168}
169
170
171
172
173
174
175static void atc_desc_put(struct at_dma_chan *atchan, struct at_desc *desc)
176{
177 if (desc) {
178 struct at_desc *child;
179 unsigned long flags;
180
181 spin_lock_irqsave(&atchan->lock, flags);
182 list_for_each_entry(child, &desc->tx_list, desc_node)
183 dev_vdbg(chan2dev(&atchan->chan_common),
184 "moving child desc %p to freelist\n",
185 child);
186 list_splice_init(&desc->tx_list, &atchan->free_list);
187 dev_vdbg(chan2dev(&atchan->chan_common),
188 "moving desc %p to freelist\n", desc);
189 list_add(&desc->desc_node, &atchan->free_list);
190 spin_unlock_irqrestore(&atchan->lock, flags);
191 }
192}
193
194
195
196
197
198
199
200
201
202static void atc_desc_chain(struct at_desc **first, struct at_desc **prev,
203 struct at_desc *desc)
204{
205 if (!(*first)) {
206 *first = desc;
207 } else {
208
209 (*prev)->lli.dscr = desc->txd.phys;
210
211 list_add_tail(&desc->desc_node,
212 &(*first)->tx_list);
213 }
214 *prev = desc;
215}
216
217
218
219
220
221
222
223
224static void atc_dostart(struct at_dma_chan *atchan, struct at_desc *first)
225{
226 struct at_dma *atdma = to_at_dma(atchan->chan_common.device);
227
228
229 if (atc_chan_is_enabled(atchan)) {
230 dev_err(chan2dev(&atchan->chan_common),
231 "BUG: Attempted to start non-idle channel\n");
232 dev_err(chan2dev(&atchan->chan_common),
233 " channel: s0x%x d0x%x ctrl0x%x:0x%x l0x%x\n",
234 channel_readl(atchan, SADDR),
235 channel_readl(atchan, DADDR),
236 channel_readl(atchan, CTRLA),
237 channel_readl(atchan, CTRLB),
238 channel_readl(atchan, DSCR));
239
240
241 return;
242 }
243
244 vdbg_dump_regs(atchan);
245
246 channel_writel(atchan, SADDR, 0);
247 channel_writel(atchan, DADDR, 0);
248 channel_writel(atchan, CTRLA, 0);
249 channel_writel(atchan, CTRLB, 0);
250 channel_writel(atchan, DSCR, first->txd.phys);
251 channel_writel(atchan, SPIP, ATC_SPIP_HOLE(first->src_hole) |
252 ATC_SPIP_BOUNDARY(first->boundary));
253 channel_writel(atchan, DPIP, ATC_DPIP_HOLE(first->dst_hole) |
254 ATC_DPIP_BOUNDARY(first->boundary));
255 dma_writel(atdma, CHER, atchan->mask);
256
257 vdbg_dump_regs(atchan);
258}
259
260
261
262
263
264
265static struct at_desc *atc_get_desc_by_cookie(struct at_dma_chan *atchan,
266 dma_cookie_t cookie)
267{
268 struct at_desc *desc, *_desc;
269
270 list_for_each_entry_safe(desc, _desc, &atchan->queue, desc_node) {
271 if (desc->txd.cookie == cookie)
272 return desc;
273 }
274
275 list_for_each_entry_safe(desc, _desc, &atchan->active_list, desc_node) {
276 if (desc->txd.cookie == cookie)
277 return desc;
278 }
279
280 return NULL;
281}
282
283
284
285
286
287
288
289
290static inline int atc_calc_bytes_left(int current_len, u32 ctrla)
291{
292 u32 btsize = (ctrla & ATC_BTSIZE_MAX);
293 u32 src_width = ATC_REG_TO_SRC_WIDTH(ctrla);
294
295
296
297
298
299
300
301 return current_len - (btsize << src_width);
302}
303
304
305
306
307
308
309static int atc_get_bytes_left(struct dma_chan *chan, dma_cookie_t cookie)
310{
311 struct at_dma_chan *atchan = to_at_dma_chan(chan);
312 struct at_desc *desc_first = atc_first_active(atchan);
313 struct at_desc *desc;
314 int ret;
315 u32 ctrla, dscr, trials;
316
317
318
319
320
321
322 desc = atc_get_desc_by_cookie(atchan, cookie);
323 if (desc == NULL)
324 return -EINVAL;
325 else if (desc != desc_first)
326 return desc->total_len;
327
328
329 ret = desc_first->total_len;
330
331 if (desc_first->lli.dscr) {
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382 dscr = channel_readl(atchan, DSCR);
383 rmb();
384 ctrla = channel_readl(atchan, CTRLA);
385 for (trials = 0; trials < ATC_MAX_DSCR_TRIALS; ++trials) {
386 u32 new_dscr;
387
388 rmb();
389 new_dscr = channel_readl(atchan, DSCR);
390
391
392
393
394
395
396
397 if (likely(new_dscr == dscr))
398 break;
399
400
401
402
403
404
405
406
407 dscr = new_dscr;
408 rmb();
409 ctrla = channel_readl(atchan, CTRLA);
410 }
411 if (unlikely(trials >= ATC_MAX_DSCR_TRIALS))
412 return -ETIMEDOUT;
413
414
415 if (desc_first->lli.dscr == dscr)
416 return atc_calc_bytes_left(ret, ctrla);
417
418 ret -= desc_first->len;
419 list_for_each_entry(desc, &desc_first->tx_list, desc_node) {
420 if (desc->lli.dscr == dscr)
421 break;
422
423 ret -= desc->len;
424 }
425
426
427
428
429
430 ret = atc_calc_bytes_left(ret, ctrla);
431 } else {
432
433 ctrla = channel_readl(atchan, CTRLA);
434 ret = atc_calc_bytes_left(ret, ctrla);
435 }
436
437 return ret;
438}
439
440
441
442
443
444
445
446static void
447atc_chain_complete(struct at_dma_chan *atchan, struct at_desc *desc)
448{
449 struct dma_async_tx_descriptor *txd = &desc->txd;
450 struct at_dma *atdma = to_at_dma(atchan->chan_common.device);
451
452 dev_vdbg(chan2dev(&atchan->chan_common),
453 "descriptor %u complete\n", txd->cookie);
454
455
456 if (!atc_chan_is_cyclic(atchan))
457 dma_cookie_complete(txd);
458
459
460 if (desc->memset_buffer) {
461 dma_pool_free(atdma->memset_pool, desc->memset_vaddr,
462 desc->memset_paddr);
463 desc->memset_buffer = false;
464 }
465
466
467 list_splice_init(&desc->tx_list, &atchan->free_list);
468
469 list_move(&desc->desc_node, &atchan->free_list);
470
471 dma_descriptor_unmap(txd);
472
473
474 if (!atc_chan_is_cyclic(atchan)) {
475
476
477
478
479 dmaengine_desc_get_callback_invoke(txd, NULL);
480 }
481
482 dma_run_dependencies(txd);
483}
484
485
486
487
488
489
490
491
492
493
494static void atc_complete_all(struct at_dma_chan *atchan)
495{
496 struct at_desc *desc, *_desc;
497 LIST_HEAD(list);
498
499 dev_vdbg(chan2dev(&atchan->chan_common), "complete all\n");
500
501
502
503
504
505 if (!list_empty(&atchan->queue))
506 atc_dostart(atchan, atc_first_queued(atchan));
507
508 list_splice_init(&atchan->active_list, &list);
509
510 list_splice_init(&atchan->queue, &atchan->active_list);
511
512 list_for_each_entry_safe(desc, _desc, &list, desc_node)
513 atc_chain_complete(atchan, desc);
514}
515
516
517
518
519
520
521
522static void atc_advance_work(struct at_dma_chan *atchan)
523{
524 dev_vdbg(chan2dev(&atchan->chan_common), "advance_work\n");
525
526 if (atc_chan_is_enabled(atchan))
527 return;
528
529 if (list_empty(&atchan->active_list) ||
530 list_is_singular(&atchan->active_list)) {
531 atc_complete_all(atchan);
532 } else {
533 atc_chain_complete(atchan, atc_first_active(atchan));
534
535 atc_dostart(atchan, atc_first_active(atchan));
536 }
537}
538
539
540
541
542
543
544
545
546static void atc_handle_error(struct at_dma_chan *atchan)
547{
548 struct at_desc *bad_desc;
549 struct at_desc *child;
550
551
552
553
554
555
556 bad_desc = atc_first_active(atchan);
557 list_del_init(&bad_desc->desc_node);
558
559
560
561 list_splice_init(&atchan->queue, atchan->active_list.prev);
562
563
564 if (!list_empty(&atchan->active_list))
565 atc_dostart(atchan, atc_first_active(atchan));
566
567
568
569
570
571
572
573
574 dev_crit(chan2dev(&atchan->chan_common),
575 "Bad descriptor submitted for DMA!\n");
576 dev_crit(chan2dev(&atchan->chan_common),
577 " cookie: %d\n", bad_desc->txd.cookie);
578 atc_dump_lli(atchan, &bad_desc->lli);
579 list_for_each_entry(child, &bad_desc->tx_list, desc_node)
580 atc_dump_lli(atchan, &child->lli);
581
582
583 atc_chain_complete(atchan, bad_desc);
584}
585
586
587
588
589
590
591
592static void atc_handle_cyclic(struct at_dma_chan *atchan)
593{
594 struct at_desc *first = atc_first_active(atchan);
595 struct dma_async_tx_descriptor *txd = &first->txd;
596
597 dev_vdbg(chan2dev(&atchan->chan_common),
598 "new cyclic period llp 0x%08x\n",
599 channel_readl(atchan, DSCR));
600
601 dmaengine_desc_get_callback_invoke(txd, NULL);
602}
603
604
605
606static void atc_tasklet(unsigned long data)
607{
608 struct at_dma_chan *atchan = (struct at_dma_chan *)data;
609 unsigned long flags;
610
611 spin_lock_irqsave(&atchan->lock, flags);
612 if (test_and_clear_bit(ATC_IS_ERROR, &atchan->status))
613 atc_handle_error(atchan);
614 else if (atc_chan_is_cyclic(atchan))
615 atc_handle_cyclic(atchan);
616 else
617 atc_advance_work(atchan);
618
619 spin_unlock_irqrestore(&atchan->lock, flags);
620}
621
622static irqreturn_t at_dma_interrupt(int irq, void *dev_id)
623{
624 struct at_dma *atdma = (struct at_dma *)dev_id;
625 struct at_dma_chan *atchan;
626 int i;
627 u32 status, pending, imr;
628 int ret = IRQ_NONE;
629
630 do {
631 imr = dma_readl(atdma, EBCIMR);
632 status = dma_readl(atdma, EBCISR);
633 pending = status & imr;
634
635 if (!pending)
636 break;
637
638 dev_vdbg(atdma->dma_common.dev,
639 "interrupt: status = 0x%08x, 0x%08x, 0x%08x\n",
640 status, imr, pending);
641
642 for (i = 0; i < atdma->dma_common.chancnt; i++) {
643 atchan = &atdma->chan[i];
644 if (pending & (AT_DMA_BTC(i) | AT_DMA_ERR(i))) {
645 if (pending & AT_DMA_ERR(i)) {
646
647 dma_writel(atdma, CHDR,
648 AT_DMA_RES(i) | atchan->mask);
649
650 set_bit(ATC_IS_ERROR, &atchan->status);
651 }
652 tasklet_schedule(&atchan->tasklet);
653 ret = IRQ_HANDLED;
654 }
655 }
656
657 } while (pending);
658
659 return ret;
660}
661
662
663
664
665
666
667
668
669
670
671
672
673static dma_cookie_t atc_tx_submit(struct dma_async_tx_descriptor *tx)
674{
675 struct at_desc *desc = txd_to_at_desc(tx);
676 struct at_dma_chan *atchan = to_at_dma_chan(tx->chan);
677 dma_cookie_t cookie;
678 unsigned long flags;
679
680 spin_lock_irqsave(&atchan->lock, flags);
681 cookie = dma_cookie_assign(tx);
682
683 if (list_empty(&atchan->active_list)) {
684 dev_vdbg(chan2dev(tx->chan), "tx_submit: started %u\n",
685 desc->txd.cookie);
686 atc_dostart(atchan, desc);
687 list_add_tail(&desc->desc_node, &atchan->active_list);
688 } else {
689 dev_vdbg(chan2dev(tx->chan), "tx_submit: queued %u\n",
690 desc->txd.cookie);
691 list_add_tail(&desc->desc_node, &atchan->queue);
692 }
693
694 spin_unlock_irqrestore(&atchan->lock, flags);
695
696 return cookie;
697}
698
699
700
701
702
703
704
705static struct dma_async_tx_descriptor *
706atc_prep_dma_interleaved(struct dma_chan *chan,
707 struct dma_interleaved_template *xt,
708 unsigned long flags)
709{
710 struct at_dma_chan *atchan = to_at_dma_chan(chan);
711 struct data_chunk *first;
712 struct at_desc *desc = NULL;
713 size_t xfer_count;
714 unsigned int dwidth;
715 u32 ctrla;
716 u32 ctrlb;
717 size_t len = 0;
718 int i;
719
720 if (unlikely(!xt || xt->numf != 1 || !xt->frame_size))
721 return NULL;
722
723 first = xt->sgl;
724
725 dev_info(chan2dev(chan),
726 "%s: src=%pad, dest=%pad, numf=%d, frame_size=%d, flags=0x%lx\n",
727 __func__, &xt->src_start, &xt->dst_start, xt->numf,
728 xt->frame_size, flags);
729
730
731
732
733
734
735
736 for (i = 0; i < xt->frame_size; i++) {
737 struct data_chunk *chunk = xt->sgl + i;
738
739 if ((chunk->size != xt->sgl->size) ||
740 (dmaengine_get_dst_icg(xt, chunk) != dmaengine_get_dst_icg(xt, first)) ||
741 (dmaengine_get_src_icg(xt, chunk) != dmaengine_get_src_icg(xt, first))) {
742 dev_err(chan2dev(chan),
743 "%s: the controller can transfer only identical chunks\n",
744 __func__);
745 return NULL;
746 }
747
748 len += chunk->size;
749 }
750
751 dwidth = atc_get_xfer_width(xt->src_start,
752 xt->dst_start, len);
753
754 xfer_count = len >> dwidth;
755 if (xfer_count > ATC_BTSIZE_MAX) {
756 dev_err(chan2dev(chan), "%s: buffer is too big\n", __func__);
757 return NULL;
758 }
759
760 ctrla = ATC_SRC_WIDTH(dwidth) |
761 ATC_DST_WIDTH(dwidth);
762
763 ctrlb = ATC_DEFAULT_CTRLB | ATC_IEN
764 | ATC_SRC_ADDR_MODE_INCR
765 | ATC_DST_ADDR_MODE_INCR
766 | ATC_SRC_PIP
767 | ATC_DST_PIP
768 | ATC_FC_MEM2MEM;
769
770
771 desc = atc_desc_get(atchan);
772 if (!desc) {
773 dev_err(chan2dev(chan),
774 "%s: couldn't allocate our descriptor\n", __func__);
775 return NULL;
776 }
777
778 desc->lli.saddr = xt->src_start;
779 desc->lli.daddr = xt->dst_start;
780 desc->lli.ctrla = ctrla | xfer_count;
781 desc->lli.ctrlb = ctrlb;
782
783 desc->boundary = first->size >> dwidth;
784 desc->dst_hole = (dmaengine_get_dst_icg(xt, first) >> dwidth) + 1;
785 desc->src_hole = (dmaengine_get_src_icg(xt, first) >> dwidth) + 1;
786
787 desc->txd.cookie = -EBUSY;
788 desc->total_len = desc->len = len;
789
790
791 set_desc_eol(desc);
792
793 desc->txd.flags = flags;
794
795 return &desc->txd;
796}
797
798
799
800
801
802
803
804
805
806static struct dma_async_tx_descriptor *
807atc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
808 size_t len, unsigned long flags)
809{
810 struct at_dma_chan *atchan = to_at_dma_chan(chan);
811 struct at_desc *desc = NULL;
812 struct at_desc *first = NULL;
813 struct at_desc *prev = NULL;
814 size_t xfer_count;
815 size_t offset;
816 unsigned int src_width;
817 unsigned int dst_width;
818 u32 ctrla;
819 u32 ctrlb;
820
821 dev_vdbg(chan2dev(chan), "prep_dma_memcpy: d%pad s%pad l0x%zx f0x%lx\n",
822 &dest, &src, len, flags);
823
824 if (unlikely(!len)) {
825 dev_dbg(chan2dev(chan), "prep_dma_memcpy: length is zero!\n");
826 return NULL;
827 }
828
829 ctrlb = ATC_DEFAULT_CTRLB | ATC_IEN
830 | ATC_SRC_ADDR_MODE_INCR
831 | ATC_DST_ADDR_MODE_INCR
832 | ATC_FC_MEM2MEM;
833
834
835
836
837
838 src_width = dst_width = atc_get_xfer_width(src, dest, len);
839
840 ctrla = ATC_SRC_WIDTH(src_width) |
841 ATC_DST_WIDTH(dst_width);
842
843 for (offset = 0; offset < len; offset += xfer_count << src_width) {
844 xfer_count = min_t(size_t, (len - offset) >> src_width,
845 ATC_BTSIZE_MAX);
846
847 desc = atc_desc_get(atchan);
848 if (!desc)
849 goto err_desc_get;
850
851 desc->lli.saddr = src + offset;
852 desc->lli.daddr = dest + offset;
853 desc->lli.ctrla = ctrla | xfer_count;
854 desc->lli.ctrlb = ctrlb;
855
856 desc->txd.cookie = 0;
857 desc->len = xfer_count << src_width;
858
859 atc_desc_chain(&first, &prev, desc);
860 }
861
862
863 first->txd.cookie = -EBUSY;
864 first->total_len = len;
865
866
867 set_desc_eol(desc);
868
869 first->txd.flags = flags;
870
871 return &first->txd;
872
873err_desc_get:
874 atc_desc_put(atchan, first);
875 return NULL;
876}
877
878static struct at_desc *atc_create_memset_desc(struct dma_chan *chan,
879 dma_addr_t psrc,
880 dma_addr_t pdst,
881 size_t len)
882{
883 struct at_dma_chan *atchan = to_at_dma_chan(chan);
884 struct at_desc *desc;
885 size_t xfer_count;
886
887 u32 ctrla = ATC_SRC_WIDTH(2) | ATC_DST_WIDTH(2);
888 u32 ctrlb = ATC_DEFAULT_CTRLB | ATC_IEN |
889 ATC_SRC_ADDR_MODE_FIXED |
890 ATC_DST_ADDR_MODE_INCR |
891 ATC_FC_MEM2MEM;
892
893 xfer_count = len >> 2;
894 if (xfer_count > ATC_BTSIZE_MAX) {
895 dev_err(chan2dev(chan), "%s: buffer is too big\n",
896 __func__);
897 return NULL;
898 }
899
900 desc = atc_desc_get(atchan);
901 if (!desc) {
902 dev_err(chan2dev(chan), "%s: can't get a descriptor\n",
903 __func__);
904 return NULL;
905 }
906
907 desc->lli.saddr = psrc;
908 desc->lli.daddr = pdst;
909 desc->lli.ctrla = ctrla | xfer_count;
910 desc->lli.ctrlb = ctrlb;
911
912 desc->txd.cookie = 0;
913 desc->len = len;
914
915 return desc;
916}
917
918
919
920
921
922
923
924
925
926static struct dma_async_tx_descriptor *
927atc_prep_dma_memset(struct dma_chan *chan, dma_addr_t dest, int value,
928 size_t len, unsigned long flags)
929{
930 struct at_dma *atdma = to_at_dma(chan->device);
931 struct at_desc *desc;
932 void __iomem *vaddr;
933 dma_addr_t paddr;
934
935 dev_vdbg(chan2dev(chan), "%s: d%pad v0x%x l0x%zx f0x%lx\n", __func__,
936 &dest, value, len, flags);
937
938 if (unlikely(!len)) {
939 dev_dbg(chan2dev(chan), "%s: length is zero!\n", __func__);
940 return NULL;
941 }
942
943 if (!is_dma_fill_aligned(chan->device, dest, 0, len)) {
944 dev_dbg(chan2dev(chan), "%s: buffer is not aligned\n",
945 __func__);
946 return NULL;
947 }
948
949 vaddr = dma_pool_alloc(atdma->memset_pool, GFP_ATOMIC, &paddr);
950 if (!vaddr) {
951 dev_err(chan2dev(chan), "%s: couldn't allocate buffer\n",
952 __func__);
953 return NULL;
954 }
955 *(u32*)vaddr = value;
956
957 desc = atc_create_memset_desc(chan, paddr, dest, len);
958 if (!desc) {
959 dev_err(chan2dev(chan), "%s: couldn't get a descriptor\n",
960 __func__);
961 goto err_free_buffer;
962 }
963
964 desc->memset_paddr = paddr;
965 desc->memset_vaddr = vaddr;
966 desc->memset_buffer = true;
967
968 desc->txd.cookie = -EBUSY;
969 desc->total_len = len;
970
971
972 set_desc_eol(desc);
973
974 desc->txd.flags = flags;
975
976 return &desc->txd;
977
978err_free_buffer:
979 dma_pool_free(atdma->memset_pool, vaddr, paddr);
980 return NULL;
981}
982
983static struct dma_async_tx_descriptor *
984atc_prep_dma_memset_sg(struct dma_chan *chan,
985 struct scatterlist *sgl,
986 unsigned int sg_len, int value,
987 unsigned long flags)
988{
989 struct at_dma_chan *atchan = to_at_dma_chan(chan);
990 struct at_dma *atdma = to_at_dma(chan->device);
991 struct at_desc *desc = NULL, *first = NULL, *prev = NULL;
992 struct scatterlist *sg;
993 void __iomem *vaddr;
994 dma_addr_t paddr;
995 size_t total_len = 0;
996 int i;
997
998 dev_vdbg(chan2dev(chan), "%s: v0x%x l0x%zx f0x%lx\n", __func__,
999 value, sg_len, flags);
1000
1001 if (unlikely(!sgl || !sg_len)) {
1002 dev_dbg(chan2dev(chan), "%s: scatterlist is empty!\n",
1003 __func__);
1004 return NULL;
1005 }
1006
1007 vaddr = dma_pool_alloc(atdma->memset_pool, GFP_ATOMIC, &paddr);
1008 if (!vaddr) {
1009 dev_err(chan2dev(chan), "%s: couldn't allocate buffer\n",
1010 __func__);
1011 return NULL;
1012 }
1013 *(u32*)vaddr = value;
1014
1015 for_each_sg(sgl, sg, sg_len, i) {
1016 dma_addr_t dest = sg_dma_address(sg);
1017 size_t len = sg_dma_len(sg);
1018
1019 dev_vdbg(chan2dev(chan), "%s: d%pad, l0x%zx\n",
1020 __func__, &dest, len);
1021
1022 if (!is_dma_fill_aligned(chan->device, dest, 0, len)) {
1023 dev_err(chan2dev(chan), "%s: buffer is not aligned\n",
1024 __func__);
1025 goto err_put_desc;
1026 }
1027
1028 desc = atc_create_memset_desc(chan, paddr, dest, len);
1029 if (!desc)
1030 goto err_put_desc;
1031
1032 atc_desc_chain(&first, &prev, desc);
1033
1034 total_len += len;
1035 }
1036
1037
1038
1039
1040
1041 desc->memset_paddr = paddr;
1042 desc->memset_vaddr = vaddr;
1043 desc->memset_buffer = true;
1044
1045 first->txd.cookie = -EBUSY;
1046 first->total_len = total_len;
1047
1048
1049 set_desc_eol(desc);
1050
1051 first->txd.flags = flags;
1052
1053 return &first->txd;
1054
1055err_put_desc:
1056 atc_desc_put(atchan, first);
1057 return NULL;
1058}
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069static struct dma_async_tx_descriptor *
1070atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
1071 unsigned int sg_len, enum dma_transfer_direction direction,
1072 unsigned long flags, void *context)
1073{
1074 struct at_dma_chan *atchan = to_at_dma_chan(chan);
1075 struct at_dma_slave *atslave = chan->private;
1076 struct dma_slave_config *sconfig = &atchan->dma_sconfig;
1077 struct at_desc *first = NULL;
1078 struct at_desc *prev = NULL;
1079 u32 ctrla;
1080 u32 ctrlb;
1081 dma_addr_t reg;
1082 unsigned int reg_width;
1083 unsigned int mem_width;
1084 unsigned int i;
1085 struct scatterlist *sg;
1086 size_t total_len = 0;
1087
1088 dev_vdbg(chan2dev(chan), "prep_slave_sg (%d): %s f0x%lx\n",
1089 sg_len,
1090 direction == DMA_MEM_TO_DEV ? "TO DEVICE" : "FROM DEVICE",
1091 flags);
1092
1093 if (unlikely(!atslave || !sg_len)) {
1094 dev_dbg(chan2dev(chan), "prep_slave_sg: sg length is zero!\n");
1095 return NULL;
1096 }
1097
1098 ctrla = ATC_SCSIZE(sconfig->src_maxburst)
1099 | ATC_DCSIZE(sconfig->dst_maxburst);
1100 ctrlb = ATC_IEN;
1101
1102 switch (direction) {
1103 case DMA_MEM_TO_DEV:
1104 reg_width = convert_buswidth(sconfig->dst_addr_width);
1105 ctrla |= ATC_DST_WIDTH(reg_width);
1106 ctrlb |= ATC_DST_ADDR_MODE_FIXED
1107 | ATC_SRC_ADDR_MODE_INCR
1108 | ATC_FC_MEM2PER
1109 | ATC_SIF(atchan->mem_if) | ATC_DIF(atchan->per_if);
1110 reg = sconfig->dst_addr;
1111 for_each_sg(sgl, sg, sg_len, i) {
1112 struct at_desc *desc;
1113 u32 len;
1114 u32 mem;
1115
1116 desc = atc_desc_get(atchan);
1117 if (!desc)
1118 goto err_desc_get;
1119
1120 mem = sg_dma_address(sg);
1121 len = sg_dma_len(sg);
1122 if (unlikely(!len)) {
1123 dev_dbg(chan2dev(chan),
1124 "prep_slave_sg: sg(%d) data length is zero\n", i);
1125 goto err;
1126 }
1127 mem_width = 2;
1128 if (unlikely(mem & 3 || len & 3))
1129 mem_width = 0;
1130
1131 desc->lli.saddr = mem;
1132 desc->lli.daddr = reg;
1133 desc->lli.ctrla = ctrla
1134 | ATC_SRC_WIDTH(mem_width)
1135 | len >> mem_width;
1136 desc->lli.ctrlb = ctrlb;
1137 desc->len = len;
1138
1139 atc_desc_chain(&first, &prev, desc);
1140 total_len += len;
1141 }
1142 break;
1143 case DMA_DEV_TO_MEM:
1144 reg_width = convert_buswidth(sconfig->src_addr_width);
1145 ctrla |= ATC_SRC_WIDTH(reg_width);
1146 ctrlb |= ATC_DST_ADDR_MODE_INCR
1147 | ATC_SRC_ADDR_MODE_FIXED
1148 | ATC_FC_PER2MEM
1149 | ATC_SIF(atchan->per_if) | ATC_DIF(atchan->mem_if);
1150
1151 reg = sconfig->src_addr;
1152 for_each_sg(sgl, sg, sg_len, i) {
1153 struct at_desc *desc;
1154 u32 len;
1155 u32 mem;
1156
1157 desc = atc_desc_get(atchan);
1158 if (!desc)
1159 goto err_desc_get;
1160
1161 mem = sg_dma_address(sg);
1162 len = sg_dma_len(sg);
1163 if (unlikely(!len)) {
1164 dev_dbg(chan2dev(chan),
1165 "prep_slave_sg: sg(%d) data length is zero\n", i);
1166 goto err;
1167 }
1168 mem_width = 2;
1169 if (unlikely(mem & 3 || len & 3))
1170 mem_width = 0;
1171
1172 desc->lli.saddr = reg;
1173 desc->lli.daddr = mem;
1174 desc->lli.ctrla = ctrla
1175 | ATC_DST_WIDTH(mem_width)
1176 | len >> reg_width;
1177 desc->lli.ctrlb = ctrlb;
1178 desc->len = len;
1179
1180 atc_desc_chain(&first, &prev, desc);
1181 total_len += len;
1182 }
1183 break;
1184 default:
1185 return NULL;
1186 }
1187
1188
1189 set_desc_eol(prev);
1190
1191
1192 first->txd.cookie = -EBUSY;
1193 first->total_len = total_len;
1194
1195
1196 first->txd.flags = flags;
1197
1198 return &first->txd;
1199
1200err_desc_get:
1201 dev_err(chan2dev(chan), "not enough descriptors available\n");
1202err:
1203 atc_desc_put(atchan, first);
1204 return NULL;
1205}
1206
1207
1208
1209
1210
1211static int
1212atc_dma_cyclic_check_values(unsigned int reg_width, dma_addr_t buf_addr,
1213 size_t period_len)
1214{
1215 if (period_len > (ATC_BTSIZE_MAX << reg_width))
1216 goto err_out;
1217 if (unlikely(period_len & ((1 << reg_width) - 1)))
1218 goto err_out;
1219 if (unlikely(buf_addr & ((1 << reg_width) - 1)))
1220 goto err_out;
1221
1222 return 0;
1223
1224err_out:
1225 return -EINVAL;
1226}
1227
1228
1229
1230
1231static int
1232atc_dma_cyclic_fill_desc(struct dma_chan *chan, struct at_desc *desc,
1233 unsigned int period_index, dma_addr_t buf_addr,
1234 unsigned int reg_width, size_t period_len,
1235 enum dma_transfer_direction direction)
1236{
1237 struct at_dma_chan *atchan = to_at_dma_chan(chan);
1238 struct dma_slave_config *sconfig = &atchan->dma_sconfig;
1239 u32 ctrla;
1240
1241
1242 ctrla = ATC_SCSIZE(sconfig->src_maxburst)
1243 | ATC_DCSIZE(sconfig->dst_maxburst)
1244 | ATC_DST_WIDTH(reg_width)
1245 | ATC_SRC_WIDTH(reg_width)
1246 | period_len >> reg_width;
1247
1248 switch (direction) {
1249 case DMA_MEM_TO_DEV:
1250 desc->lli.saddr = buf_addr + (period_len * period_index);
1251 desc->lli.daddr = sconfig->dst_addr;
1252 desc->lli.ctrla = ctrla;
1253 desc->lli.ctrlb = ATC_DST_ADDR_MODE_FIXED
1254 | ATC_SRC_ADDR_MODE_INCR
1255 | ATC_FC_MEM2PER
1256 | ATC_SIF(atchan->mem_if)
1257 | ATC_DIF(atchan->per_if);
1258 desc->len = period_len;
1259 break;
1260
1261 case DMA_DEV_TO_MEM:
1262 desc->lli.saddr = sconfig->src_addr;
1263 desc->lli.daddr = buf_addr + (period_len * period_index);
1264 desc->lli.ctrla = ctrla;
1265 desc->lli.ctrlb = ATC_DST_ADDR_MODE_INCR
1266 | ATC_SRC_ADDR_MODE_FIXED
1267 | ATC_FC_PER2MEM
1268 | ATC_SIF(atchan->per_if)
1269 | ATC_DIF(atchan->mem_if);
1270 desc->len = period_len;
1271 break;
1272
1273 default:
1274 return -EINVAL;
1275 }
1276
1277 return 0;
1278}
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289static struct dma_async_tx_descriptor *
1290atc_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
1291 size_t period_len, enum dma_transfer_direction direction,
1292 unsigned long flags)
1293{
1294 struct at_dma_chan *atchan = to_at_dma_chan(chan);
1295 struct at_dma_slave *atslave = chan->private;
1296 struct dma_slave_config *sconfig = &atchan->dma_sconfig;
1297 struct at_desc *first = NULL;
1298 struct at_desc *prev = NULL;
1299 unsigned long was_cyclic;
1300 unsigned int reg_width;
1301 unsigned int periods = buf_len / period_len;
1302 unsigned int i;
1303
1304 dev_vdbg(chan2dev(chan), "prep_dma_cyclic: %s buf@%pad - %d (%d/%d)\n",
1305 direction == DMA_MEM_TO_DEV ? "TO DEVICE" : "FROM DEVICE",
1306 &buf_addr,
1307 periods, buf_len, period_len);
1308
1309 if (unlikely(!atslave || !buf_len || !period_len)) {
1310 dev_dbg(chan2dev(chan), "prep_dma_cyclic: length is zero!\n");
1311 return NULL;
1312 }
1313
1314 was_cyclic = test_and_set_bit(ATC_IS_CYCLIC, &atchan->status);
1315 if (was_cyclic) {
1316 dev_dbg(chan2dev(chan), "prep_dma_cyclic: channel in use!\n");
1317 return NULL;
1318 }
1319
1320 if (unlikely(!is_slave_direction(direction)))
1321 goto err_out;
1322
1323 if (sconfig->direction == DMA_MEM_TO_DEV)
1324 reg_width = convert_buswidth(sconfig->dst_addr_width);
1325 else
1326 reg_width = convert_buswidth(sconfig->src_addr_width);
1327
1328
1329 if (atc_dma_cyclic_check_values(reg_width, buf_addr, period_len))
1330 goto err_out;
1331
1332
1333 for (i = 0; i < periods; i++) {
1334 struct at_desc *desc;
1335
1336 desc = atc_desc_get(atchan);
1337 if (!desc)
1338 goto err_desc_get;
1339
1340 if (atc_dma_cyclic_fill_desc(chan, desc, i, buf_addr,
1341 reg_width, period_len, direction))
1342 goto err_desc_get;
1343
1344 atc_desc_chain(&first, &prev, desc);
1345 }
1346
1347
1348 prev->lli.dscr = first->txd.phys;
1349
1350
1351 first->txd.cookie = -EBUSY;
1352 first->total_len = buf_len;
1353
1354 return &first->txd;
1355
1356err_desc_get:
1357 dev_err(chan2dev(chan), "not enough descriptors available\n");
1358 atc_desc_put(atchan, first);
1359err_out:
1360 clear_bit(ATC_IS_CYCLIC, &atchan->status);
1361 return NULL;
1362}
1363
1364static int atc_config(struct dma_chan *chan,
1365 struct dma_slave_config *sconfig)
1366{
1367 struct at_dma_chan *atchan = to_at_dma_chan(chan);
1368
1369 dev_vdbg(chan2dev(chan), "%s\n", __func__);
1370
1371
1372 if (!chan->private)
1373 return -EINVAL;
1374
1375 memcpy(&atchan->dma_sconfig, sconfig, sizeof(*sconfig));
1376
1377 convert_burst(&atchan->dma_sconfig.src_maxburst);
1378 convert_burst(&atchan->dma_sconfig.dst_maxburst);
1379
1380 return 0;
1381}
1382
1383static int atc_pause(struct dma_chan *chan)
1384{
1385 struct at_dma_chan *atchan = to_at_dma_chan(chan);
1386 struct at_dma *atdma = to_at_dma(chan->device);
1387 int chan_id = atchan->chan_common.chan_id;
1388 unsigned long flags;
1389
1390 LIST_HEAD(list);
1391
1392 dev_vdbg(chan2dev(chan), "%s\n", __func__);
1393
1394 spin_lock_irqsave(&atchan->lock, flags);
1395
1396 dma_writel(atdma, CHER, AT_DMA_SUSP(chan_id));
1397 set_bit(ATC_IS_PAUSED, &atchan->status);
1398
1399 spin_unlock_irqrestore(&atchan->lock, flags);
1400
1401 return 0;
1402}
1403
1404static int atc_resume(struct dma_chan *chan)
1405{
1406 struct at_dma_chan *atchan = to_at_dma_chan(chan);
1407 struct at_dma *atdma = to_at_dma(chan->device);
1408 int chan_id = atchan->chan_common.chan_id;
1409 unsigned long flags;
1410
1411 LIST_HEAD(list);
1412
1413 dev_vdbg(chan2dev(chan), "%s\n", __func__);
1414
1415 if (!atc_chan_is_paused(atchan))
1416 return 0;
1417
1418 spin_lock_irqsave(&atchan->lock, flags);
1419
1420 dma_writel(atdma, CHDR, AT_DMA_RES(chan_id));
1421 clear_bit(ATC_IS_PAUSED, &atchan->status);
1422
1423 spin_unlock_irqrestore(&atchan->lock, flags);
1424
1425 return 0;
1426}
1427
1428static int atc_terminate_all(struct dma_chan *chan)
1429{
1430 struct at_dma_chan *atchan = to_at_dma_chan(chan);
1431 struct at_dma *atdma = to_at_dma(chan->device);
1432 int chan_id = atchan->chan_common.chan_id;
1433 struct at_desc *desc, *_desc;
1434 unsigned long flags;
1435
1436 LIST_HEAD(list);
1437
1438 dev_vdbg(chan2dev(chan), "%s\n", __func__);
1439
1440
1441
1442
1443
1444
1445
1446 spin_lock_irqsave(&atchan->lock, flags);
1447
1448
1449 dma_writel(atdma, CHDR, AT_DMA_RES(chan_id) | atchan->mask);
1450
1451
1452 while (dma_readl(atdma, CHSR) & atchan->mask)
1453 cpu_relax();
1454
1455
1456 list_splice_init(&atchan->queue, &list);
1457 list_splice_init(&atchan->active_list, &list);
1458
1459
1460 list_for_each_entry_safe(desc, _desc, &list, desc_node)
1461 atc_chain_complete(atchan, desc);
1462
1463 clear_bit(ATC_IS_PAUSED, &atchan->status);
1464
1465 clear_bit(ATC_IS_CYCLIC, &atchan->status);
1466
1467 spin_unlock_irqrestore(&atchan->lock, flags);
1468
1469 return 0;
1470}
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482static enum dma_status
1483atc_tx_status(struct dma_chan *chan,
1484 dma_cookie_t cookie,
1485 struct dma_tx_state *txstate)
1486{
1487 struct at_dma_chan *atchan = to_at_dma_chan(chan);
1488 unsigned long flags;
1489 enum dma_status ret;
1490 int bytes = 0;
1491
1492 ret = dma_cookie_status(chan, cookie, txstate);
1493 if (ret == DMA_COMPLETE)
1494 return ret;
1495
1496
1497
1498
1499 if (!txstate)
1500 return DMA_ERROR;
1501
1502 spin_lock_irqsave(&atchan->lock, flags);
1503
1504
1505 bytes = atc_get_bytes_left(chan, cookie);
1506
1507 spin_unlock_irqrestore(&atchan->lock, flags);
1508
1509 if (unlikely(bytes < 0)) {
1510 dev_vdbg(chan2dev(chan), "get residual bytes error\n");
1511 return DMA_ERROR;
1512 } else {
1513 dma_set_residue(txstate, bytes);
1514 }
1515
1516 dev_vdbg(chan2dev(chan), "tx_status %d: cookie = %d residue = %d\n",
1517 ret, cookie, bytes);
1518
1519 return ret;
1520}
1521
1522
1523
1524
1525
1526static void atc_issue_pending(struct dma_chan *chan)
1527{
1528 struct at_dma_chan *atchan = to_at_dma_chan(chan);
1529 unsigned long flags;
1530
1531 dev_vdbg(chan2dev(chan), "issue_pending\n");
1532
1533
1534 if (atc_chan_is_cyclic(atchan))
1535 return;
1536
1537 spin_lock_irqsave(&atchan->lock, flags);
1538 atc_advance_work(atchan);
1539 spin_unlock_irqrestore(&atchan->lock, flags);
1540}
1541
1542
1543
1544
1545
1546
1547
1548
1549static int atc_alloc_chan_resources(struct dma_chan *chan)
1550{
1551 struct at_dma_chan *atchan = to_at_dma_chan(chan);
1552 struct at_dma *atdma = to_at_dma(chan->device);
1553 struct at_desc *desc;
1554 struct at_dma_slave *atslave;
1555 unsigned long flags;
1556 int i;
1557 u32 cfg;
1558 LIST_HEAD(tmp_list);
1559
1560 dev_vdbg(chan2dev(chan), "alloc_chan_resources\n");
1561
1562
1563 if (atc_chan_is_enabled(atchan)) {
1564 dev_dbg(chan2dev(chan), "DMA channel not idle ?\n");
1565 return -EIO;
1566 }
1567
1568 cfg = ATC_DEFAULT_CFG;
1569
1570 atslave = chan->private;
1571 if (atslave) {
1572
1573
1574
1575
1576 BUG_ON(!atslave->dma_dev || atslave->dma_dev != atdma->dma_common.dev);
1577
1578
1579 if (atslave->cfg)
1580 cfg = atslave->cfg;
1581 }
1582
1583
1584
1585 if (!list_empty(&atchan->free_list))
1586 return atchan->descs_allocated;
1587
1588
1589 for (i = 0; i < init_nr_desc_per_channel; i++) {
1590 desc = atc_alloc_descriptor(chan, GFP_KERNEL);
1591 if (!desc) {
1592 dev_err(atdma->dma_common.dev,
1593 "Only %d initial descriptors\n", i);
1594 break;
1595 }
1596 list_add_tail(&desc->desc_node, &tmp_list);
1597 }
1598
1599 spin_lock_irqsave(&atchan->lock, flags);
1600 atchan->descs_allocated = i;
1601 list_splice(&tmp_list, &atchan->free_list);
1602 dma_cookie_init(chan);
1603 spin_unlock_irqrestore(&atchan->lock, flags);
1604
1605
1606 channel_writel(atchan, CFG, cfg);
1607
1608 dev_dbg(chan2dev(chan),
1609 "alloc_chan_resources: allocated %d descriptors\n",
1610 atchan->descs_allocated);
1611
1612 return atchan->descs_allocated;
1613}
1614
1615
1616
1617
1618
1619static void atc_free_chan_resources(struct dma_chan *chan)
1620{
1621 struct at_dma_chan *atchan = to_at_dma_chan(chan);
1622 struct at_dma *atdma = to_at_dma(chan->device);
1623 struct at_desc *desc, *_desc;
1624 LIST_HEAD(list);
1625
1626 dev_dbg(chan2dev(chan), "free_chan_resources: (descs allocated=%u)\n",
1627 atchan->descs_allocated);
1628
1629
1630 BUG_ON(!list_empty(&atchan->active_list));
1631 BUG_ON(!list_empty(&atchan->queue));
1632 BUG_ON(atc_chan_is_enabled(atchan));
1633
1634 list_for_each_entry_safe(desc, _desc, &atchan->free_list, desc_node) {
1635 dev_vdbg(chan2dev(chan), " freeing descriptor %p\n", desc);
1636 list_del(&desc->desc_node);
1637
1638 dma_pool_free(atdma->dma_desc_pool, desc, desc->txd.phys);
1639 }
1640 list_splice_init(&atchan->free_list, &list);
1641 atchan->descs_allocated = 0;
1642 atchan->status = 0;
1643
1644 dev_vdbg(chan2dev(chan), "free_chan_resources: done\n");
1645}
1646
1647#ifdef CONFIG_OF
1648static bool at_dma_filter(struct dma_chan *chan, void *slave)
1649{
1650 struct at_dma_slave *atslave = slave;
1651
1652 if (atslave->dma_dev == chan->device->dev) {
1653 chan->private = atslave;
1654 return true;
1655 } else {
1656 return false;
1657 }
1658}
1659
1660static struct dma_chan *at_dma_xlate(struct of_phandle_args *dma_spec,
1661 struct of_dma *of_dma)
1662{
1663 struct dma_chan *chan;
1664 struct at_dma_chan *atchan;
1665 struct at_dma_slave *atslave;
1666 dma_cap_mask_t mask;
1667 unsigned int per_id;
1668 struct platform_device *dmac_pdev;
1669
1670 if (dma_spec->args_count != 2)
1671 return NULL;
1672
1673 dmac_pdev = of_find_device_by_node(dma_spec->np);
1674
1675 dma_cap_zero(mask);
1676 dma_cap_set(DMA_SLAVE, mask);
1677
1678 atslave = devm_kzalloc(&dmac_pdev->dev, sizeof(*atslave), GFP_KERNEL);
1679 if (!atslave)
1680 return NULL;
1681
1682 atslave->cfg = ATC_DST_H2SEL_HW | ATC_SRC_H2SEL_HW;
1683
1684
1685
1686
1687 per_id = dma_spec->args[1] & AT91_DMA_CFG_PER_ID_MASK;
1688 atslave->cfg |= ATC_DST_PER_MSB(per_id) | ATC_DST_PER(per_id)
1689 | ATC_SRC_PER_MSB(per_id) | ATC_SRC_PER(per_id);
1690
1691
1692
1693
1694
1695 switch (dma_spec->args[1] & AT91_DMA_CFG_FIFOCFG_MASK) {
1696 case AT91_DMA_CFG_FIFOCFG_ALAP:
1697 atslave->cfg |= ATC_FIFOCFG_LARGESTBURST;
1698 break;
1699 case AT91_DMA_CFG_FIFOCFG_ASAP:
1700 atslave->cfg |= ATC_FIFOCFG_ENOUGHSPACE;
1701 break;
1702 case AT91_DMA_CFG_FIFOCFG_HALF:
1703 default:
1704 atslave->cfg |= ATC_FIFOCFG_HALFFIFO;
1705 }
1706 atslave->dma_dev = &dmac_pdev->dev;
1707
1708 chan = dma_request_channel(mask, at_dma_filter, atslave);
1709 if (!chan)
1710 return NULL;
1711
1712 atchan = to_at_dma_chan(chan);
1713 atchan->per_if = dma_spec->args[0] & 0xff;
1714 atchan->mem_if = (dma_spec->args[0] >> 16) & 0xff;
1715
1716 return chan;
1717}
1718#else
1719static struct dma_chan *at_dma_xlate(struct of_phandle_args *dma_spec,
1720 struct of_dma *of_dma)
1721{
1722 return NULL;
1723}
1724#endif
1725
1726
1727
1728
1729static struct at_dma_platform_data at91sam9rl_config = {
1730 .nr_channels = 2,
1731};
1732static struct at_dma_platform_data at91sam9g45_config = {
1733 .nr_channels = 8,
1734};
1735
1736#if defined(CONFIG_OF)
1737static const struct of_device_id atmel_dma_dt_ids[] = {
1738 {
1739 .compatible = "atmel,at91sam9rl-dma",
1740 .data = &at91sam9rl_config,
1741 }, {
1742 .compatible = "atmel,at91sam9g45-dma",
1743 .data = &at91sam9g45_config,
1744 }, {
1745
1746 }
1747};
1748
1749MODULE_DEVICE_TABLE(of, atmel_dma_dt_ids);
1750#endif
1751
1752static const struct platform_device_id atdma_devtypes[] = {
1753 {
1754 .name = "at91sam9rl_dma",
1755 .driver_data = (unsigned long) &at91sam9rl_config,
1756 }, {
1757 .name = "at91sam9g45_dma",
1758 .driver_data = (unsigned long) &at91sam9g45_config,
1759 }, {
1760
1761 }
1762};
1763
1764static inline const struct at_dma_platform_data * __init at_dma_get_driver_data(
1765 struct platform_device *pdev)
1766{
1767 if (pdev->dev.of_node) {
1768 const struct of_device_id *match;
1769 match = of_match_node(atmel_dma_dt_ids, pdev->dev.of_node);
1770 if (match == NULL)
1771 return NULL;
1772 return match->data;
1773 }
1774 return (struct at_dma_platform_data *)
1775 platform_get_device_id(pdev)->driver_data;
1776}
1777
1778
1779
1780
1781
1782static void at_dma_off(struct at_dma *atdma)
1783{
1784 dma_writel(atdma, EN, 0);
1785
1786
1787 dma_writel(atdma, EBCIDR, -1L);
1788
1789
1790 while (dma_readl(atdma, CHSR) & atdma->all_chan_mask)
1791 cpu_relax();
1792}
1793
1794static int __init at_dma_probe(struct platform_device *pdev)
1795{
1796 struct resource *io;
1797 struct at_dma *atdma;
1798 size_t size;
1799 int irq;
1800 int err;
1801 int i;
1802 const struct at_dma_platform_data *plat_dat;
1803
1804
1805 dma_cap_set(DMA_MEMCPY, at91sam9rl_config.cap_mask);
1806 dma_cap_set(DMA_INTERLEAVE, at91sam9g45_config.cap_mask);
1807 dma_cap_set(DMA_MEMCPY, at91sam9g45_config.cap_mask);
1808 dma_cap_set(DMA_MEMSET, at91sam9g45_config.cap_mask);
1809 dma_cap_set(DMA_MEMSET_SG, at91sam9g45_config.cap_mask);
1810 dma_cap_set(DMA_PRIVATE, at91sam9g45_config.cap_mask);
1811 dma_cap_set(DMA_SLAVE, at91sam9g45_config.cap_mask);
1812
1813
1814 plat_dat = at_dma_get_driver_data(pdev);
1815 if (!plat_dat)
1816 return -ENODEV;
1817
1818 io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1819 if (!io)
1820 return -EINVAL;
1821
1822 irq = platform_get_irq(pdev, 0);
1823 if (irq < 0)
1824 return irq;
1825
1826 size = sizeof(struct at_dma);
1827 size += plat_dat->nr_channels * sizeof(struct at_dma_chan);
1828 atdma = kzalloc(size, GFP_KERNEL);
1829 if (!atdma)
1830 return -ENOMEM;
1831
1832
1833 atdma->dma_common.cap_mask = plat_dat->cap_mask;
1834 atdma->all_chan_mask = (1 << plat_dat->nr_channels) - 1;
1835
1836 size = resource_size(io);
1837 if (!request_mem_region(io->start, size, pdev->dev.driver->name)) {
1838 err = -EBUSY;
1839 goto err_kfree;
1840 }
1841
1842 atdma->regs = ioremap(io->start, size);
1843 if (!atdma->regs) {
1844 err = -ENOMEM;
1845 goto err_release_r;
1846 }
1847
1848 atdma->clk = clk_get(&pdev->dev, "dma_clk");
1849 if (IS_ERR(atdma->clk)) {
1850 err = PTR_ERR(atdma->clk);
1851 goto err_clk;
1852 }
1853 err = clk_prepare_enable(atdma->clk);
1854 if (err)
1855 goto err_clk_prepare;
1856
1857
1858 at_dma_off(atdma);
1859
1860 err = request_irq(irq, at_dma_interrupt, 0, "at_hdmac", atdma);
1861 if (err)
1862 goto err_irq;
1863
1864 platform_set_drvdata(pdev, atdma);
1865
1866
1867 atdma->dma_desc_pool = dma_pool_create("at_hdmac_desc_pool",
1868 &pdev->dev, sizeof(struct at_desc),
1869 4 , 0);
1870 if (!atdma->dma_desc_pool) {
1871 dev_err(&pdev->dev, "No memory for descriptors dma pool\n");
1872 err = -ENOMEM;
1873 goto err_desc_pool_create;
1874 }
1875
1876
1877 atdma->memset_pool = dma_pool_create("at_hdmac_memset_pool",
1878 &pdev->dev, sizeof(int), 4, 0);
1879 if (!atdma->memset_pool) {
1880 dev_err(&pdev->dev, "No memory for memset dma pool\n");
1881 err = -ENOMEM;
1882 goto err_memset_pool_create;
1883 }
1884
1885
1886 while (dma_readl(atdma, EBCISR))
1887 cpu_relax();
1888
1889
1890 INIT_LIST_HEAD(&atdma->dma_common.channels);
1891 for (i = 0; i < plat_dat->nr_channels; i++) {
1892 struct at_dma_chan *atchan = &atdma->chan[i];
1893
1894 atchan->mem_if = AT_DMA_MEM_IF;
1895 atchan->per_if = AT_DMA_PER_IF;
1896 atchan->chan_common.device = &atdma->dma_common;
1897 dma_cookie_init(&atchan->chan_common);
1898 list_add_tail(&atchan->chan_common.device_node,
1899 &atdma->dma_common.channels);
1900
1901 atchan->ch_regs = atdma->regs + ch_regs(i);
1902 spin_lock_init(&atchan->lock);
1903 atchan->mask = 1 << i;
1904
1905 INIT_LIST_HEAD(&atchan->active_list);
1906 INIT_LIST_HEAD(&atchan->queue);
1907 INIT_LIST_HEAD(&atchan->free_list);
1908
1909 tasklet_init(&atchan->tasklet, atc_tasklet,
1910 (unsigned long)atchan);
1911 atc_enable_chan_irq(atdma, i);
1912 }
1913
1914
1915 atdma->dma_common.device_alloc_chan_resources = atc_alloc_chan_resources;
1916 atdma->dma_common.device_free_chan_resources = atc_free_chan_resources;
1917 atdma->dma_common.device_tx_status = atc_tx_status;
1918 atdma->dma_common.device_issue_pending = atc_issue_pending;
1919 atdma->dma_common.dev = &pdev->dev;
1920
1921
1922 if (dma_has_cap(DMA_INTERLEAVE, atdma->dma_common.cap_mask))
1923 atdma->dma_common.device_prep_interleaved_dma = atc_prep_dma_interleaved;
1924
1925 if (dma_has_cap(DMA_MEMCPY, atdma->dma_common.cap_mask))
1926 atdma->dma_common.device_prep_dma_memcpy = atc_prep_dma_memcpy;
1927
1928 if (dma_has_cap(DMA_MEMSET, atdma->dma_common.cap_mask)) {
1929 atdma->dma_common.device_prep_dma_memset = atc_prep_dma_memset;
1930 atdma->dma_common.device_prep_dma_memset_sg = atc_prep_dma_memset_sg;
1931 atdma->dma_common.fill_align = DMAENGINE_ALIGN_4_BYTES;
1932 }
1933
1934 if (dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask)) {
1935 atdma->dma_common.device_prep_slave_sg = atc_prep_slave_sg;
1936
1937 dma_cap_set(DMA_CYCLIC, atdma->dma_common.cap_mask);
1938 atdma->dma_common.device_prep_dma_cyclic = atc_prep_dma_cyclic;
1939 atdma->dma_common.device_config = atc_config;
1940 atdma->dma_common.device_pause = atc_pause;
1941 atdma->dma_common.device_resume = atc_resume;
1942 atdma->dma_common.device_terminate_all = atc_terminate_all;
1943 atdma->dma_common.src_addr_widths = ATC_DMA_BUSWIDTHS;
1944 atdma->dma_common.dst_addr_widths = ATC_DMA_BUSWIDTHS;
1945 atdma->dma_common.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
1946 atdma->dma_common.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
1947 }
1948
1949 dma_writel(atdma, EN, AT_DMA_ENABLE);
1950
1951 dev_info(&pdev->dev, "Atmel AHB DMA Controller ( %s%s%s), %d channels\n",
1952 dma_has_cap(DMA_MEMCPY, atdma->dma_common.cap_mask) ? "cpy " : "",
1953 dma_has_cap(DMA_MEMSET, atdma->dma_common.cap_mask) ? "set " : "",
1954 dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask) ? "slave " : "",
1955 plat_dat->nr_channels);
1956
1957 dma_async_device_register(&atdma->dma_common);
1958
1959
1960
1961
1962
1963
1964 if (pdev->dev.of_node) {
1965 err = of_dma_controller_register(pdev->dev.of_node,
1966 at_dma_xlate, atdma);
1967 if (err) {
1968 dev_err(&pdev->dev, "could not register of_dma_controller\n");
1969 goto err_of_dma_controller_register;
1970 }
1971 }
1972
1973 return 0;
1974
1975err_of_dma_controller_register:
1976 dma_async_device_unregister(&atdma->dma_common);
1977 dma_pool_destroy(atdma->memset_pool);
1978err_memset_pool_create:
1979 dma_pool_destroy(atdma->dma_desc_pool);
1980err_desc_pool_create:
1981 free_irq(platform_get_irq(pdev, 0), atdma);
1982err_irq:
1983 clk_disable_unprepare(atdma->clk);
1984err_clk_prepare:
1985 clk_put(atdma->clk);
1986err_clk:
1987 iounmap(atdma->regs);
1988 atdma->regs = NULL;
1989err_release_r:
1990 release_mem_region(io->start, size);
1991err_kfree:
1992 kfree(atdma);
1993 return err;
1994}
1995
1996static int at_dma_remove(struct platform_device *pdev)
1997{
1998 struct at_dma *atdma = platform_get_drvdata(pdev);
1999 struct dma_chan *chan, *_chan;
2000 struct resource *io;
2001
2002 at_dma_off(atdma);
2003 dma_async_device_unregister(&atdma->dma_common);
2004
2005 dma_pool_destroy(atdma->memset_pool);
2006 dma_pool_destroy(atdma->dma_desc_pool);
2007 free_irq(platform_get_irq(pdev, 0), atdma);
2008
2009 list_for_each_entry_safe(chan, _chan, &atdma->dma_common.channels,
2010 device_node) {
2011 struct at_dma_chan *atchan = to_at_dma_chan(chan);
2012
2013
2014 atc_disable_chan_irq(atdma, chan->chan_id);
2015
2016 tasklet_kill(&atchan->tasklet);
2017 list_del(&chan->device_node);
2018 }
2019
2020 clk_disable_unprepare(atdma->clk);
2021 clk_put(atdma->clk);
2022
2023 iounmap(atdma->regs);
2024 atdma->regs = NULL;
2025
2026 io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2027 release_mem_region(io->start, resource_size(io));
2028
2029 kfree(atdma);
2030
2031 return 0;
2032}
2033
2034static void at_dma_shutdown(struct platform_device *pdev)
2035{
2036 struct at_dma *atdma = platform_get_drvdata(pdev);
2037
2038 at_dma_off(platform_get_drvdata(pdev));
2039 clk_disable_unprepare(atdma->clk);
2040}
2041
2042static int at_dma_prepare(struct device *dev)
2043{
2044 struct platform_device *pdev = to_platform_device(dev);
2045 struct at_dma *atdma = platform_get_drvdata(pdev);
2046 struct dma_chan *chan, *_chan;
2047
2048 list_for_each_entry_safe(chan, _chan, &atdma->dma_common.channels,
2049 device_node) {
2050 struct at_dma_chan *atchan = to_at_dma_chan(chan);
2051
2052 if (atc_chan_is_enabled(atchan) && !atc_chan_is_cyclic(atchan))
2053 return -EAGAIN;
2054 }
2055 return 0;
2056}
2057
2058static void atc_suspend_cyclic(struct at_dma_chan *atchan)
2059{
2060 struct dma_chan *chan = &atchan->chan_common;
2061
2062
2063
2064 if (!atc_chan_is_paused(atchan)) {
2065 dev_warn(chan2dev(chan),
2066 "cyclic channel not paused, should be done by channel user\n");
2067 atc_pause(chan);
2068 }
2069
2070
2071
2072 atchan->save_dscr = channel_readl(atchan, DSCR);
2073
2074 vdbg_dump_regs(atchan);
2075}
2076
2077static int at_dma_suspend_noirq(struct device *dev)
2078{
2079 struct platform_device *pdev = to_platform_device(dev);
2080 struct at_dma *atdma = platform_get_drvdata(pdev);
2081 struct dma_chan *chan, *_chan;
2082
2083
2084 list_for_each_entry_safe(chan, _chan, &atdma->dma_common.channels,
2085 device_node) {
2086 struct at_dma_chan *atchan = to_at_dma_chan(chan);
2087
2088 if (atc_chan_is_cyclic(atchan))
2089 atc_suspend_cyclic(atchan);
2090 atchan->save_cfg = channel_readl(atchan, CFG);
2091 }
2092 atdma->save_imr = dma_readl(atdma, EBCIMR);
2093
2094
2095 at_dma_off(atdma);
2096 clk_disable_unprepare(atdma->clk);
2097 return 0;
2098}
2099
2100static void atc_resume_cyclic(struct at_dma_chan *atchan)
2101{
2102 struct at_dma *atdma = to_at_dma(atchan->chan_common.device);
2103
2104
2105
2106 channel_writel(atchan, SADDR, 0);
2107 channel_writel(atchan, DADDR, 0);
2108 channel_writel(atchan, CTRLA, 0);
2109 channel_writel(atchan, CTRLB, 0);
2110 channel_writel(atchan, DSCR, atchan->save_dscr);
2111 dma_writel(atdma, CHER, atchan->mask);
2112
2113
2114
2115
2116 vdbg_dump_regs(atchan);
2117}
2118
2119static int at_dma_resume_noirq(struct device *dev)
2120{
2121 struct platform_device *pdev = to_platform_device(dev);
2122 struct at_dma *atdma = platform_get_drvdata(pdev);
2123 struct dma_chan *chan, *_chan;
2124
2125
2126 clk_prepare_enable(atdma->clk);
2127 dma_writel(atdma, EN, AT_DMA_ENABLE);
2128
2129
2130 while (dma_readl(atdma, EBCISR))
2131 cpu_relax();
2132
2133
2134 dma_writel(atdma, EBCIER, atdma->save_imr);
2135 list_for_each_entry_safe(chan, _chan, &atdma->dma_common.channels,
2136 device_node) {
2137 struct at_dma_chan *atchan = to_at_dma_chan(chan);
2138
2139 channel_writel(atchan, CFG, atchan->save_cfg);
2140 if (atc_chan_is_cyclic(atchan))
2141 atc_resume_cyclic(atchan);
2142 }
2143 return 0;
2144}
2145
2146static const struct dev_pm_ops at_dma_dev_pm_ops = {
2147 .prepare = at_dma_prepare,
2148 .suspend_noirq = at_dma_suspend_noirq,
2149 .resume_noirq = at_dma_resume_noirq,
2150};
2151
2152static struct platform_driver at_dma_driver = {
2153 .remove = at_dma_remove,
2154 .shutdown = at_dma_shutdown,
2155 .id_table = atdma_devtypes,
2156 .driver = {
2157 .name = "at_hdmac",
2158 .pm = &at_dma_dev_pm_ops,
2159 .of_match_table = of_match_ptr(atmel_dma_dt_ids),
2160 },
2161};
2162
2163static int __init at_dma_init(void)
2164{
2165 return platform_driver_probe(&at_dma_driver, at_dma_probe);
2166}
2167subsys_initcall(at_dma_init);
2168
2169static void __exit at_dma_exit(void)
2170{
2171 platform_driver_unregister(&at_dma_driver);
2172}
2173module_exit(at_dma_exit);
2174
2175MODULE_DESCRIPTION("Atmel AHB DMA Controller driver");
2176MODULE_AUTHOR("Nicolas Ferre <nicolas.ferre@atmel.com>");
2177MODULE_LICENSE("GPL");
2178MODULE_ALIAS("platform:at_hdmac");
2179