1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17#include <dt-bindings/dma/at91.h>
18#include <linux/clk.h>
19#include <linux/dmaengine.h>
20#include <linux/dma-mapping.h>
21#include <linux/dmapool.h>
22#include <linux/interrupt.h>
23#include <linux/module.h>
24#include <linux/platform_device.h>
25#include <linux/slab.h>
26#include <linux/of.h>
27#include <linux/of_device.h>
28#include <linux/of_dma.h>
29
30#include "at_hdmac_regs.h"
31#include "dmaengine.h"
32
33
34
35
36
37
38
39
40
41
42#define ATC_DEFAULT_CFG (ATC_FIFOCFG_HALFFIFO)
43#define ATC_DEFAULT_CTRLB (ATC_SIF(AT_DMA_MEM_IF) \
44 |ATC_DIF(AT_DMA_MEM_IF))
45#define ATC_DMA_BUSWIDTHS\
46 (BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED) |\
47 BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |\
48 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |\
49 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES))
50
51#define ATC_MAX_DSCR_TRIALS 10
52
53
54
55
56
57static unsigned int init_nr_desc_per_channel = 64;
58module_param(init_nr_desc_per_channel, uint, 0644);
59MODULE_PARM_DESC(init_nr_desc_per_channel,
60 "initial descriptors per channel (default: 64)");
61
62
63
64static dma_cookie_t atc_tx_submit(struct dma_async_tx_descriptor *tx);
65static void atc_issue_pending(struct dma_chan *chan);
66
67
68
69
70static inline unsigned int atc_get_xfer_width(dma_addr_t src, dma_addr_t dst,
71 size_t len)
72{
73 unsigned int width;
74
75 if (!((src | dst | len) & 3))
76 width = 2;
77 else if (!((src | dst | len) & 1))
78 width = 1;
79 else
80 width = 0;
81
82 return width;
83}
84
85static struct at_desc *atc_first_active(struct at_dma_chan *atchan)
86{
87 return list_first_entry(&atchan->active_list,
88 struct at_desc, desc_node);
89}
90
91static struct at_desc *atc_first_queued(struct at_dma_chan *atchan)
92{
93 return list_first_entry(&atchan->queue,
94 struct at_desc, desc_node);
95}
96
97
98
99
100
101
102
103
104
105
106
107static struct at_desc *atc_alloc_descriptor(struct dma_chan *chan,
108 gfp_t gfp_flags)
109{
110 struct at_desc *desc = NULL;
111 struct at_dma *atdma = to_at_dma(chan->device);
112 dma_addr_t phys;
113
114 desc = dma_pool_alloc(atdma->dma_desc_pool, gfp_flags, &phys);
115 if (desc) {
116 memset(desc, 0, sizeof(struct at_desc));
117 INIT_LIST_HEAD(&desc->tx_list);
118 dma_async_tx_descriptor_init(&desc->txd, chan);
119
120 desc->txd.flags = DMA_CTRL_ACK;
121 desc->txd.tx_submit = atc_tx_submit;
122 desc->txd.phys = phys;
123 }
124
125 return desc;
126}
127
128
129
130
131
132static struct at_desc *atc_desc_get(struct at_dma_chan *atchan)
133{
134 struct at_desc *desc, *_desc;
135 struct at_desc *ret = NULL;
136 unsigned long flags;
137 unsigned int i = 0;
138 LIST_HEAD(tmp_list);
139
140 spin_lock_irqsave(&atchan->lock, flags);
141 list_for_each_entry_safe(desc, _desc, &atchan->free_list, desc_node) {
142 i++;
143 if (async_tx_test_ack(&desc->txd)) {
144 list_del(&desc->desc_node);
145 ret = desc;
146 break;
147 }
148 dev_dbg(chan2dev(&atchan->chan_common),
149 "desc %p not ACKed\n", desc);
150 }
151 spin_unlock_irqrestore(&atchan->lock, flags);
152 dev_vdbg(chan2dev(&atchan->chan_common),
153 "scanned %u descriptors on freelist\n", i);
154
155
156 if (!ret) {
157 ret = atc_alloc_descriptor(&atchan->chan_common, GFP_ATOMIC);
158 if (ret) {
159 spin_lock_irqsave(&atchan->lock, flags);
160 atchan->descs_allocated++;
161 spin_unlock_irqrestore(&atchan->lock, flags);
162 } else {
163 dev_err(chan2dev(&atchan->chan_common),
164 "not enough descriptors available\n");
165 }
166 }
167
168 return ret;
169}
170
171
172
173
174
175
176static void atc_desc_put(struct at_dma_chan *atchan, struct at_desc *desc)
177{
178 if (desc) {
179 struct at_desc *child;
180 unsigned long flags;
181
182 spin_lock_irqsave(&atchan->lock, flags);
183 list_for_each_entry(child, &desc->tx_list, desc_node)
184 dev_vdbg(chan2dev(&atchan->chan_common),
185 "moving child desc %p to freelist\n",
186 child);
187 list_splice_init(&desc->tx_list, &atchan->free_list);
188 dev_vdbg(chan2dev(&atchan->chan_common),
189 "moving desc %p to freelist\n", desc);
190 list_add(&desc->desc_node, &atchan->free_list);
191 spin_unlock_irqrestore(&atchan->lock, flags);
192 }
193}
194
195
196
197
198
199
200
201
202
203static void atc_desc_chain(struct at_desc **first, struct at_desc **prev,
204 struct at_desc *desc)
205{
206 if (!(*first)) {
207 *first = desc;
208 } else {
209
210 (*prev)->lli.dscr = desc->txd.phys;
211
212 list_add_tail(&desc->desc_node,
213 &(*first)->tx_list);
214 }
215 *prev = desc;
216}
217
218
219
220
221
222
223
224
225static void atc_dostart(struct at_dma_chan *atchan, struct at_desc *first)
226{
227 struct at_dma *atdma = to_at_dma(atchan->chan_common.device);
228
229
230 if (atc_chan_is_enabled(atchan)) {
231 dev_err(chan2dev(&atchan->chan_common),
232 "BUG: Attempted to start non-idle channel\n");
233 dev_err(chan2dev(&atchan->chan_common),
234 " channel: s0x%x d0x%x ctrl0x%x:0x%x l0x%x\n",
235 channel_readl(atchan, SADDR),
236 channel_readl(atchan, DADDR),
237 channel_readl(atchan, CTRLA),
238 channel_readl(atchan, CTRLB),
239 channel_readl(atchan, DSCR));
240
241
242 return;
243 }
244
245 vdbg_dump_regs(atchan);
246
247 channel_writel(atchan, SADDR, 0);
248 channel_writel(atchan, DADDR, 0);
249 channel_writel(atchan, CTRLA, 0);
250 channel_writel(atchan, CTRLB, 0);
251 channel_writel(atchan, DSCR, first->txd.phys);
252 channel_writel(atchan, SPIP, ATC_SPIP_HOLE(first->src_hole) |
253 ATC_SPIP_BOUNDARY(first->boundary));
254 channel_writel(atchan, DPIP, ATC_DPIP_HOLE(first->dst_hole) |
255 ATC_DPIP_BOUNDARY(first->boundary));
256 dma_writel(atdma, CHER, atchan->mask);
257
258 vdbg_dump_regs(atchan);
259}
260
261
262
263
264
265
266static struct at_desc *atc_get_desc_by_cookie(struct at_dma_chan *atchan,
267 dma_cookie_t cookie)
268{
269 struct at_desc *desc, *_desc;
270
271 list_for_each_entry_safe(desc, _desc, &atchan->queue, desc_node) {
272 if (desc->txd.cookie == cookie)
273 return desc;
274 }
275
276 list_for_each_entry_safe(desc, _desc, &atchan->active_list, desc_node) {
277 if (desc->txd.cookie == cookie)
278 return desc;
279 }
280
281 return NULL;
282}
283
284
285
286
287
288
289
290
291static inline int atc_calc_bytes_left(int current_len, u32 ctrla)
292{
293 u32 btsize = (ctrla & ATC_BTSIZE_MAX);
294 u32 src_width = ATC_REG_TO_SRC_WIDTH(ctrla);
295
296
297
298
299
300
301
302 return current_len - (btsize << src_width);
303}
304
305
306
307
308
309
310static int atc_get_bytes_left(struct dma_chan *chan, dma_cookie_t cookie)
311{
312 struct at_dma_chan *atchan = to_at_dma_chan(chan);
313 struct at_desc *desc_first = atc_first_active(atchan);
314 struct at_desc *desc;
315 int ret;
316 u32 ctrla, dscr, trials;
317
318
319
320
321
322
323 desc = atc_get_desc_by_cookie(atchan, cookie);
324 if (desc == NULL)
325 return -EINVAL;
326 else if (desc != desc_first)
327 return desc->total_len;
328
329
330 ret = desc_first->total_len;
331
332 if (desc_first->lli.dscr) {
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383 dscr = channel_readl(atchan, DSCR);
384 rmb();
385 ctrla = channel_readl(atchan, CTRLA);
386 for (trials = 0; trials < ATC_MAX_DSCR_TRIALS; ++trials) {
387 u32 new_dscr;
388
389 rmb();
390 new_dscr = channel_readl(atchan, DSCR);
391
392
393
394
395
396
397
398 if (likely(new_dscr == dscr))
399 break;
400
401
402
403
404
405
406
407
408 dscr = new_dscr;
409 rmb();
410 ctrla = channel_readl(atchan, CTRLA);
411 }
412 if (unlikely(trials >= ATC_MAX_DSCR_TRIALS))
413 return -ETIMEDOUT;
414
415
416 if (desc_first->lli.dscr == dscr)
417 return atc_calc_bytes_left(ret, ctrla);
418
419 ret -= desc_first->len;
420 list_for_each_entry(desc, &desc_first->tx_list, desc_node) {
421 if (desc->lli.dscr == dscr)
422 break;
423
424 ret -= desc->len;
425 }
426
427
428
429
430
431 ret = atc_calc_bytes_left(ret, ctrla);
432 } else {
433
434 ctrla = channel_readl(atchan, CTRLA);
435 ret = atc_calc_bytes_left(ret, ctrla);
436 }
437
438 return ret;
439}
440
441
442
443
444
445
446
447static void
448atc_chain_complete(struct at_dma_chan *atchan, struct at_desc *desc)
449{
450 struct dma_async_tx_descriptor *txd = &desc->txd;
451 struct at_dma *atdma = to_at_dma(atchan->chan_common.device);
452
453 dev_vdbg(chan2dev(&atchan->chan_common),
454 "descriptor %u complete\n", txd->cookie);
455
456
457 if (!atc_chan_is_cyclic(atchan))
458 dma_cookie_complete(txd);
459
460
461 if (desc->memset_buffer) {
462 dma_pool_free(atdma->memset_pool, desc->memset_vaddr,
463 desc->memset_paddr);
464 desc->memset_buffer = false;
465 }
466
467
468 list_splice_init(&desc->tx_list, &atchan->free_list);
469
470 list_move(&desc->desc_node, &atchan->free_list);
471
472 dma_descriptor_unmap(txd);
473
474
475 if (!atc_chan_is_cyclic(atchan)) {
476 dma_async_tx_callback callback = txd->callback;
477 void *param = txd->callback_param;
478
479
480
481
482
483 if (callback)
484 callback(param);
485 }
486
487 dma_run_dependencies(txd);
488}
489
490
491
492
493
494
495
496
497
498
499static void atc_complete_all(struct at_dma_chan *atchan)
500{
501 struct at_desc *desc, *_desc;
502 LIST_HEAD(list);
503
504 dev_vdbg(chan2dev(&atchan->chan_common), "complete all\n");
505
506
507
508
509
510 if (!list_empty(&atchan->queue))
511 atc_dostart(atchan, atc_first_queued(atchan));
512
513 list_splice_init(&atchan->active_list, &list);
514
515 list_splice_init(&atchan->queue, &atchan->active_list);
516
517 list_for_each_entry_safe(desc, _desc, &list, desc_node)
518 atc_chain_complete(atchan, desc);
519}
520
521
522
523
524
525
526
527static void atc_advance_work(struct at_dma_chan *atchan)
528{
529 dev_vdbg(chan2dev(&atchan->chan_common), "advance_work\n");
530
531 if (atc_chan_is_enabled(atchan))
532 return;
533
534 if (list_empty(&atchan->active_list) ||
535 list_is_singular(&atchan->active_list)) {
536 atc_complete_all(atchan);
537 } else {
538 atc_chain_complete(atchan, atc_first_active(atchan));
539
540 atc_dostart(atchan, atc_first_active(atchan));
541 }
542}
543
544
545
546
547
548
549
550
551static void atc_handle_error(struct at_dma_chan *atchan)
552{
553 struct at_desc *bad_desc;
554 struct at_desc *child;
555
556
557
558
559
560
561 bad_desc = atc_first_active(atchan);
562 list_del_init(&bad_desc->desc_node);
563
564
565
566 list_splice_init(&atchan->queue, atchan->active_list.prev);
567
568
569 if (!list_empty(&atchan->active_list))
570 atc_dostart(atchan, atc_first_active(atchan));
571
572
573
574
575
576
577
578
579 dev_crit(chan2dev(&atchan->chan_common),
580 "Bad descriptor submitted for DMA!\n");
581 dev_crit(chan2dev(&atchan->chan_common),
582 " cookie: %d\n", bad_desc->txd.cookie);
583 atc_dump_lli(atchan, &bad_desc->lli);
584 list_for_each_entry(child, &bad_desc->tx_list, desc_node)
585 atc_dump_lli(atchan, &child->lli);
586
587
588 atc_chain_complete(atchan, bad_desc);
589}
590
591
592
593
594
595
596
597static void atc_handle_cyclic(struct at_dma_chan *atchan)
598{
599 struct at_desc *first = atc_first_active(atchan);
600 struct dma_async_tx_descriptor *txd = &first->txd;
601 dma_async_tx_callback callback = txd->callback;
602 void *param = txd->callback_param;
603
604 dev_vdbg(chan2dev(&atchan->chan_common),
605 "new cyclic period llp 0x%08x\n",
606 channel_readl(atchan, DSCR));
607
608 if (callback)
609 callback(param);
610}
611
612
613
614static void atc_tasklet(unsigned long data)
615{
616 struct at_dma_chan *atchan = (struct at_dma_chan *)data;
617 unsigned long flags;
618
619 spin_lock_irqsave(&atchan->lock, flags);
620 if (test_and_clear_bit(ATC_IS_ERROR, &atchan->status))
621 atc_handle_error(atchan);
622 else if (atc_chan_is_cyclic(atchan))
623 atc_handle_cyclic(atchan);
624 else
625 atc_advance_work(atchan);
626
627 spin_unlock_irqrestore(&atchan->lock, flags);
628}
629
630static irqreturn_t at_dma_interrupt(int irq, void *dev_id)
631{
632 struct at_dma *atdma = (struct at_dma *)dev_id;
633 struct at_dma_chan *atchan;
634 int i;
635 u32 status, pending, imr;
636 int ret = IRQ_NONE;
637
638 do {
639 imr = dma_readl(atdma, EBCIMR);
640 status = dma_readl(atdma, EBCISR);
641 pending = status & imr;
642
643 if (!pending)
644 break;
645
646 dev_vdbg(atdma->dma_common.dev,
647 "interrupt: status = 0x%08x, 0x%08x, 0x%08x\n",
648 status, imr, pending);
649
650 for (i = 0; i < atdma->dma_common.chancnt; i++) {
651 atchan = &atdma->chan[i];
652 if (pending & (AT_DMA_BTC(i) | AT_DMA_ERR(i))) {
653 if (pending & AT_DMA_ERR(i)) {
654
655 dma_writel(atdma, CHDR,
656 AT_DMA_RES(i) | atchan->mask);
657
658 set_bit(ATC_IS_ERROR, &atchan->status);
659 }
660 tasklet_schedule(&atchan->tasklet);
661 ret = IRQ_HANDLED;
662 }
663 }
664
665 } while (pending);
666
667 return ret;
668}
669
670
671
672
673
674
675
676
677
678
679
680
681static dma_cookie_t atc_tx_submit(struct dma_async_tx_descriptor *tx)
682{
683 struct at_desc *desc = txd_to_at_desc(tx);
684 struct at_dma_chan *atchan = to_at_dma_chan(tx->chan);
685 dma_cookie_t cookie;
686 unsigned long flags;
687
688 spin_lock_irqsave(&atchan->lock, flags);
689 cookie = dma_cookie_assign(tx);
690
691 if (list_empty(&atchan->active_list)) {
692 dev_vdbg(chan2dev(tx->chan), "tx_submit: started %u\n",
693 desc->txd.cookie);
694 atc_dostart(atchan, desc);
695 list_add_tail(&desc->desc_node, &atchan->active_list);
696 } else {
697 dev_vdbg(chan2dev(tx->chan), "tx_submit: queued %u\n",
698 desc->txd.cookie);
699 list_add_tail(&desc->desc_node, &atchan->queue);
700 }
701
702 spin_unlock_irqrestore(&atchan->lock, flags);
703
704 return cookie;
705}
706
707
708
709
710
711
712
713static struct dma_async_tx_descriptor *
714atc_prep_dma_interleaved(struct dma_chan *chan,
715 struct dma_interleaved_template *xt,
716 unsigned long flags)
717{
718 struct at_dma_chan *atchan = to_at_dma_chan(chan);
719 struct data_chunk *first = xt->sgl;
720 struct at_desc *desc = NULL;
721 size_t xfer_count;
722 unsigned int dwidth;
723 u32 ctrla;
724 u32 ctrlb;
725 size_t len = 0;
726 int i;
727
728 if (unlikely(!xt || xt->numf != 1 || !xt->frame_size))
729 return NULL;
730
731 dev_info(chan2dev(chan),
732 "%s: src=%pad, dest=%pad, numf=%d, frame_size=%d, flags=0x%lx\n",
733 __func__, &xt->src_start, &xt->dst_start, xt->numf,
734 xt->frame_size, flags);
735
736
737
738
739
740
741
742 for (i = 0; i < xt->frame_size; i++) {
743 struct data_chunk *chunk = xt->sgl + i;
744
745 if ((chunk->size != xt->sgl->size) ||
746 (dmaengine_get_dst_icg(xt, chunk) != dmaengine_get_dst_icg(xt, first)) ||
747 (dmaengine_get_src_icg(xt, chunk) != dmaengine_get_src_icg(xt, first))) {
748 dev_err(chan2dev(chan),
749 "%s: the controller can transfer only identical chunks\n",
750 __func__);
751 return NULL;
752 }
753
754 len += chunk->size;
755 }
756
757 dwidth = atc_get_xfer_width(xt->src_start,
758 xt->dst_start, len);
759
760 xfer_count = len >> dwidth;
761 if (xfer_count > ATC_BTSIZE_MAX) {
762 dev_err(chan2dev(chan), "%s: buffer is too big\n", __func__);
763 return NULL;
764 }
765
766 ctrla = ATC_SRC_WIDTH(dwidth) |
767 ATC_DST_WIDTH(dwidth);
768
769 ctrlb = ATC_DEFAULT_CTRLB | ATC_IEN
770 | ATC_SRC_ADDR_MODE_INCR
771 | ATC_DST_ADDR_MODE_INCR
772 | ATC_SRC_PIP
773 | ATC_DST_PIP
774 | ATC_FC_MEM2MEM;
775
776
777 desc = atc_desc_get(atchan);
778 if (!desc) {
779 dev_err(chan2dev(chan),
780 "%s: couldn't allocate our descriptor\n", __func__);
781 return NULL;
782 }
783
784 desc->lli.saddr = xt->src_start;
785 desc->lli.daddr = xt->dst_start;
786 desc->lli.ctrla = ctrla | xfer_count;
787 desc->lli.ctrlb = ctrlb;
788
789 desc->boundary = first->size >> dwidth;
790 desc->dst_hole = (dmaengine_get_dst_icg(xt, first) >> dwidth) + 1;
791 desc->src_hole = (dmaengine_get_src_icg(xt, first) >> dwidth) + 1;
792
793 desc->txd.cookie = -EBUSY;
794 desc->total_len = desc->len = len;
795
796
797 set_desc_eol(desc);
798
799 desc->txd.flags = flags;
800
801 return &desc->txd;
802}
803
804
805
806
807
808
809
810
811
812static struct dma_async_tx_descriptor *
813atc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
814 size_t len, unsigned long flags)
815{
816 struct at_dma_chan *atchan = to_at_dma_chan(chan);
817 struct at_desc *desc = NULL;
818 struct at_desc *first = NULL;
819 struct at_desc *prev = NULL;
820 size_t xfer_count;
821 size_t offset;
822 unsigned int src_width;
823 unsigned int dst_width;
824 u32 ctrla;
825 u32 ctrlb;
826
827 dev_vdbg(chan2dev(chan), "prep_dma_memcpy: d%pad s%pad l0x%zx f0x%lx\n",
828 &dest, &src, len, flags);
829
830 if (unlikely(!len)) {
831 dev_dbg(chan2dev(chan), "prep_dma_memcpy: length is zero!\n");
832 return NULL;
833 }
834
835 ctrlb = ATC_DEFAULT_CTRLB | ATC_IEN
836 | ATC_SRC_ADDR_MODE_INCR
837 | ATC_DST_ADDR_MODE_INCR
838 | ATC_FC_MEM2MEM;
839
840
841
842
843
844 src_width = dst_width = atc_get_xfer_width(src, dest, len);
845
846 ctrla = ATC_SRC_WIDTH(src_width) |
847 ATC_DST_WIDTH(dst_width);
848
849 for (offset = 0; offset < len; offset += xfer_count << src_width) {
850 xfer_count = min_t(size_t, (len - offset) >> src_width,
851 ATC_BTSIZE_MAX);
852
853 desc = atc_desc_get(atchan);
854 if (!desc)
855 goto err_desc_get;
856
857 desc->lli.saddr = src + offset;
858 desc->lli.daddr = dest + offset;
859 desc->lli.ctrla = ctrla | xfer_count;
860 desc->lli.ctrlb = ctrlb;
861
862 desc->txd.cookie = 0;
863 desc->len = xfer_count << src_width;
864
865 atc_desc_chain(&first, &prev, desc);
866 }
867
868
869 first->txd.cookie = -EBUSY;
870 first->total_len = len;
871
872
873 set_desc_eol(desc);
874
875 first->txd.flags = flags;
876
877 return &first->txd;
878
879err_desc_get:
880 atc_desc_put(atchan, first);
881 return NULL;
882}
883
884static struct at_desc *atc_create_memset_desc(struct dma_chan *chan,
885 dma_addr_t psrc,
886 dma_addr_t pdst,
887 size_t len)
888{
889 struct at_dma_chan *atchan = to_at_dma_chan(chan);
890 struct at_desc *desc;
891 size_t xfer_count;
892
893 u32 ctrla = ATC_SRC_WIDTH(2) | ATC_DST_WIDTH(2);
894 u32 ctrlb = ATC_DEFAULT_CTRLB | ATC_IEN |
895 ATC_SRC_ADDR_MODE_FIXED |
896 ATC_DST_ADDR_MODE_INCR |
897 ATC_FC_MEM2MEM;
898
899 xfer_count = len >> 2;
900 if (xfer_count > ATC_BTSIZE_MAX) {
901 dev_err(chan2dev(chan), "%s: buffer is too big\n",
902 __func__);
903 return NULL;
904 }
905
906 desc = atc_desc_get(atchan);
907 if (!desc) {
908 dev_err(chan2dev(chan), "%s: can't get a descriptor\n",
909 __func__);
910 return NULL;
911 }
912
913 desc->lli.saddr = psrc;
914 desc->lli.daddr = pdst;
915 desc->lli.ctrla = ctrla | xfer_count;
916 desc->lli.ctrlb = ctrlb;
917
918 desc->txd.cookie = 0;
919 desc->len = len;
920
921 return desc;
922}
923
924
925
926
927
928
929
930
931
932static struct dma_async_tx_descriptor *
933atc_prep_dma_memset(struct dma_chan *chan, dma_addr_t dest, int value,
934 size_t len, unsigned long flags)
935{
936 struct at_dma *atdma = to_at_dma(chan->device);
937 struct at_desc *desc;
938 void __iomem *vaddr;
939 dma_addr_t paddr;
940
941 dev_vdbg(chan2dev(chan), "%s: d%pad v0x%x l0x%zx f0x%lx\n", __func__,
942 &dest, value, len, flags);
943
944 if (unlikely(!len)) {
945 dev_dbg(chan2dev(chan), "%s: length is zero!\n", __func__);
946 return NULL;
947 }
948
949 if (!is_dma_fill_aligned(chan->device, dest, 0, len)) {
950 dev_dbg(chan2dev(chan), "%s: buffer is not aligned\n",
951 __func__);
952 return NULL;
953 }
954
955 vaddr = dma_pool_alloc(atdma->memset_pool, GFP_ATOMIC, &paddr);
956 if (!vaddr) {
957 dev_err(chan2dev(chan), "%s: couldn't allocate buffer\n",
958 __func__);
959 return NULL;
960 }
961 *(u32*)vaddr = value;
962
963 desc = atc_create_memset_desc(chan, paddr, dest, len);
964 if (!desc) {
965 dev_err(chan2dev(chan), "%s: couldn't get a descriptor\n",
966 __func__);
967 goto err_free_buffer;
968 }
969
970 desc->memset_paddr = paddr;
971 desc->memset_vaddr = vaddr;
972 desc->memset_buffer = true;
973
974 desc->txd.cookie = -EBUSY;
975 desc->total_len = len;
976
977
978 set_desc_eol(desc);
979
980 desc->txd.flags = flags;
981
982 return &desc->txd;
983
984err_free_buffer:
985 dma_pool_free(atdma->memset_pool, vaddr, paddr);
986 return NULL;
987}
988
989static struct dma_async_tx_descriptor *
990atc_prep_dma_memset_sg(struct dma_chan *chan,
991 struct scatterlist *sgl,
992 unsigned int sg_len, int value,
993 unsigned long flags)
994{
995 struct at_dma_chan *atchan = to_at_dma_chan(chan);
996 struct at_dma *atdma = to_at_dma(chan->device);
997 struct at_desc *desc = NULL, *first = NULL, *prev = NULL;
998 struct scatterlist *sg;
999 void __iomem *vaddr;
1000 dma_addr_t paddr;
1001 size_t total_len = 0;
1002 int i;
1003
1004 dev_vdbg(chan2dev(chan), "%s: v0x%x l0x%zx f0x%lx\n", __func__,
1005 value, sg_len, flags);
1006
1007 if (unlikely(!sgl || !sg_len)) {
1008 dev_dbg(chan2dev(chan), "%s: scatterlist is empty!\n",
1009 __func__);
1010 return NULL;
1011 }
1012
1013 vaddr = dma_pool_alloc(atdma->memset_pool, GFP_ATOMIC, &paddr);
1014 if (!vaddr) {
1015 dev_err(chan2dev(chan), "%s: couldn't allocate buffer\n",
1016 __func__);
1017 return NULL;
1018 }
1019 *(u32*)vaddr = value;
1020
1021 for_each_sg(sgl, sg, sg_len, i) {
1022 dma_addr_t dest = sg_dma_address(sg);
1023 size_t len = sg_dma_len(sg);
1024
1025 dev_vdbg(chan2dev(chan), "%s: d%pad, l0x%zx\n",
1026 __func__, &dest, len);
1027
1028 if (!is_dma_fill_aligned(chan->device, dest, 0, len)) {
1029 dev_err(chan2dev(chan), "%s: buffer is not aligned\n",
1030 __func__);
1031 goto err_put_desc;
1032 }
1033
1034 desc = atc_create_memset_desc(chan, paddr, dest, len);
1035 if (!desc)
1036 goto err_put_desc;
1037
1038 atc_desc_chain(&first, &prev, desc);
1039
1040 total_len += len;
1041 }
1042
1043
1044
1045
1046
1047 desc->memset_paddr = paddr;
1048 desc->memset_vaddr = vaddr;
1049 desc->memset_buffer = true;
1050
1051 first->txd.cookie = -EBUSY;
1052 first->total_len = total_len;
1053
1054
1055 set_desc_eol(desc);
1056
1057 first->txd.flags = flags;
1058
1059 return &first->txd;
1060
1061err_put_desc:
1062 atc_desc_put(atchan, first);
1063 return NULL;
1064}
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075static struct dma_async_tx_descriptor *
1076atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
1077 unsigned int sg_len, enum dma_transfer_direction direction,
1078 unsigned long flags, void *context)
1079{
1080 struct at_dma_chan *atchan = to_at_dma_chan(chan);
1081 struct at_dma_slave *atslave = chan->private;
1082 struct dma_slave_config *sconfig = &atchan->dma_sconfig;
1083 struct at_desc *first = NULL;
1084 struct at_desc *prev = NULL;
1085 u32 ctrla;
1086 u32 ctrlb;
1087 dma_addr_t reg;
1088 unsigned int reg_width;
1089 unsigned int mem_width;
1090 unsigned int i;
1091 struct scatterlist *sg;
1092 size_t total_len = 0;
1093
1094 dev_vdbg(chan2dev(chan), "prep_slave_sg (%d): %s f0x%lx\n",
1095 sg_len,
1096 direction == DMA_MEM_TO_DEV ? "TO DEVICE" : "FROM DEVICE",
1097 flags);
1098
1099 if (unlikely(!atslave || !sg_len)) {
1100 dev_dbg(chan2dev(chan), "prep_slave_sg: sg length is zero!\n");
1101 return NULL;
1102 }
1103
1104 ctrla = ATC_SCSIZE(sconfig->src_maxburst)
1105 | ATC_DCSIZE(sconfig->dst_maxburst);
1106 ctrlb = ATC_IEN;
1107
1108 switch (direction) {
1109 case DMA_MEM_TO_DEV:
1110 reg_width = convert_buswidth(sconfig->dst_addr_width);
1111 ctrla |= ATC_DST_WIDTH(reg_width);
1112 ctrlb |= ATC_DST_ADDR_MODE_FIXED
1113 | ATC_SRC_ADDR_MODE_INCR
1114 | ATC_FC_MEM2PER
1115 | ATC_SIF(atchan->mem_if) | ATC_DIF(atchan->per_if);
1116 reg = sconfig->dst_addr;
1117 for_each_sg(sgl, sg, sg_len, i) {
1118 struct at_desc *desc;
1119 u32 len;
1120 u32 mem;
1121
1122 desc = atc_desc_get(atchan);
1123 if (!desc)
1124 goto err_desc_get;
1125
1126 mem = sg_dma_address(sg);
1127 len = sg_dma_len(sg);
1128 if (unlikely(!len)) {
1129 dev_dbg(chan2dev(chan),
1130 "prep_slave_sg: sg(%d) data length is zero\n", i);
1131 goto err;
1132 }
1133 mem_width = 2;
1134 if (unlikely(mem & 3 || len & 3))
1135 mem_width = 0;
1136
1137 desc->lli.saddr = mem;
1138 desc->lli.daddr = reg;
1139 desc->lli.ctrla = ctrla
1140 | ATC_SRC_WIDTH(mem_width)
1141 | len >> mem_width;
1142 desc->lli.ctrlb = ctrlb;
1143 desc->len = len;
1144
1145 atc_desc_chain(&first, &prev, desc);
1146 total_len += len;
1147 }
1148 break;
1149 case DMA_DEV_TO_MEM:
1150 reg_width = convert_buswidth(sconfig->src_addr_width);
1151 ctrla |= ATC_SRC_WIDTH(reg_width);
1152 ctrlb |= ATC_DST_ADDR_MODE_INCR
1153 | ATC_SRC_ADDR_MODE_FIXED
1154 | ATC_FC_PER2MEM
1155 | ATC_SIF(atchan->per_if) | ATC_DIF(atchan->mem_if);
1156
1157 reg = sconfig->src_addr;
1158 for_each_sg(sgl, sg, sg_len, i) {
1159 struct at_desc *desc;
1160 u32 len;
1161 u32 mem;
1162
1163 desc = atc_desc_get(atchan);
1164 if (!desc)
1165 goto err_desc_get;
1166
1167 mem = sg_dma_address(sg);
1168 len = sg_dma_len(sg);
1169 if (unlikely(!len)) {
1170 dev_dbg(chan2dev(chan),
1171 "prep_slave_sg: sg(%d) data length is zero\n", i);
1172 goto err;
1173 }
1174 mem_width = 2;
1175 if (unlikely(mem & 3 || len & 3))
1176 mem_width = 0;
1177
1178 desc->lli.saddr = reg;
1179 desc->lli.daddr = mem;
1180 desc->lli.ctrla = ctrla
1181 | ATC_DST_WIDTH(mem_width)
1182 | len >> reg_width;
1183 desc->lli.ctrlb = ctrlb;
1184 desc->len = len;
1185
1186 atc_desc_chain(&first, &prev, desc);
1187 total_len += len;
1188 }
1189 break;
1190 default:
1191 return NULL;
1192 }
1193
1194
1195 set_desc_eol(prev);
1196
1197
1198 first->txd.cookie = -EBUSY;
1199 first->total_len = total_len;
1200
1201
1202 first->txd.flags = flags;
1203
1204 return &first->txd;
1205
1206err_desc_get:
1207 dev_err(chan2dev(chan), "not enough descriptors available\n");
1208err:
1209 atc_desc_put(atchan, first);
1210 return NULL;
1211}
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222static struct dma_async_tx_descriptor *
1223atc_prep_dma_sg(struct dma_chan *chan,
1224 struct scatterlist *dst_sg, unsigned int dst_nents,
1225 struct scatterlist *src_sg, unsigned int src_nents,
1226 unsigned long flags)
1227{
1228 struct at_dma_chan *atchan = to_at_dma_chan(chan);
1229 struct at_desc *desc = NULL;
1230 struct at_desc *first = NULL;
1231 struct at_desc *prev = NULL;
1232 unsigned int src_width;
1233 unsigned int dst_width;
1234 size_t xfer_count;
1235 u32 ctrla;
1236 u32 ctrlb;
1237 size_t dst_len = 0, src_len = 0;
1238 dma_addr_t dst = 0, src = 0;
1239 size_t len = 0, total_len = 0;
1240
1241 if (unlikely(dst_nents == 0 || src_nents == 0))
1242 return NULL;
1243
1244 if (unlikely(dst_sg == NULL || src_sg == NULL))
1245 return NULL;
1246
1247 ctrlb = ATC_DEFAULT_CTRLB | ATC_IEN
1248 | ATC_SRC_ADDR_MODE_INCR
1249 | ATC_DST_ADDR_MODE_INCR
1250 | ATC_FC_MEM2MEM;
1251
1252
1253
1254
1255
1256 while (true) {
1257
1258
1259 if (dst_len == 0) {
1260
1261
1262 if (!dst_sg || !dst_nents)
1263 break;
1264
1265 dst = sg_dma_address(dst_sg);
1266 dst_len = sg_dma_len(dst_sg);
1267
1268 dst_sg = sg_next(dst_sg);
1269 dst_nents--;
1270 }
1271
1272 if (src_len == 0) {
1273
1274
1275 if (!src_sg || !src_nents)
1276 break;
1277
1278 src = sg_dma_address(src_sg);
1279 src_len = sg_dma_len(src_sg);
1280
1281 src_sg = sg_next(src_sg);
1282 src_nents--;
1283 }
1284
1285 len = min_t(size_t, src_len, dst_len);
1286 if (len == 0)
1287 continue;
1288
1289
1290 src_width = dst_width = atc_get_xfer_width(src, dst, len);
1291
1292 ctrla = ATC_SRC_WIDTH(src_width) |
1293 ATC_DST_WIDTH(dst_width);
1294
1295
1296
1297
1298
1299 xfer_count = len >> src_width;
1300 if (xfer_count > ATC_BTSIZE_MAX) {
1301 xfer_count = ATC_BTSIZE_MAX;
1302 len = ATC_BTSIZE_MAX << src_width;
1303 }
1304
1305
1306 desc = atc_desc_get(atchan);
1307 if (!desc)
1308 goto err_desc_get;
1309
1310 desc->lli.saddr = src;
1311 desc->lli.daddr = dst;
1312 desc->lli.ctrla = ctrla | xfer_count;
1313 desc->lli.ctrlb = ctrlb;
1314
1315 desc->txd.cookie = 0;
1316 desc->len = len;
1317
1318 atc_desc_chain(&first, &prev, desc);
1319
1320
1321 dst_len -= len;
1322 src_len -= len;
1323 dst += len;
1324 src += len;
1325
1326 total_len += len;
1327 }
1328
1329
1330 first->txd.cookie = -EBUSY;
1331 first->total_len = total_len;
1332
1333
1334 set_desc_eol(desc);
1335
1336 first->txd.flags = flags;
1337
1338 return &first->txd;
1339
1340err_desc_get:
1341 atc_desc_put(atchan, first);
1342 return NULL;
1343}
1344
1345
1346
1347
1348
1349static int
1350atc_dma_cyclic_check_values(unsigned int reg_width, dma_addr_t buf_addr,
1351 size_t period_len)
1352{
1353 if (period_len > (ATC_BTSIZE_MAX << reg_width))
1354 goto err_out;
1355 if (unlikely(period_len & ((1 << reg_width) - 1)))
1356 goto err_out;
1357 if (unlikely(buf_addr & ((1 << reg_width) - 1)))
1358 goto err_out;
1359
1360 return 0;
1361
1362err_out:
1363 return -EINVAL;
1364}
1365
1366
1367
1368
1369static int
1370atc_dma_cyclic_fill_desc(struct dma_chan *chan, struct at_desc *desc,
1371 unsigned int period_index, dma_addr_t buf_addr,
1372 unsigned int reg_width, size_t period_len,
1373 enum dma_transfer_direction direction)
1374{
1375 struct at_dma_chan *atchan = to_at_dma_chan(chan);
1376 struct dma_slave_config *sconfig = &atchan->dma_sconfig;
1377 u32 ctrla;
1378
1379
1380 ctrla = ATC_SCSIZE(sconfig->src_maxburst)
1381 | ATC_DCSIZE(sconfig->dst_maxburst)
1382 | ATC_DST_WIDTH(reg_width)
1383 | ATC_SRC_WIDTH(reg_width)
1384 | period_len >> reg_width;
1385
1386 switch (direction) {
1387 case DMA_MEM_TO_DEV:
1388 desc->lli.saddr = buf_addr + (period_len * period_index);
1389 desc->lli.daddr = sconfig->dst_addr;
1390 desc->lli.ctrla = ctrla;
1391 desc->lli.ctrlb = ATC_DST_ADDR_MODE_FIXED
1392 | ATC_SRC_ADDR_MODE_INCR
1393 | ATC_FC_MEM2PER
1394 | ATC_SIF(atchan->mem_if)
1395 | ATC_DIF(atchan->per_if);
1396 desc->len = period_len;
1397 break;
1398
1399 case DMA_DEV_TO_MEM:
1400 desc->lli.saddr = sconfig->src_addr;
1401 desc->lli.daddr = buf_addr + (period_len * period_index);
1402 desc->lli.ctrla = ctrla;
1403 desc->lli.ctrlb = ATC_DST_ADDR_MODE_INCR
1404 | ATC_SRC_ADDR_MODE_FIXED
1405 | ATC_FC_PER2MEM
1406 | ATC_SIF(atchan->per_if)
1407 | ATC_DIF(atchan->mem_if);
1408 desc->len = period_len;
1409 break;
1410
1411 default:
1412 return -EINVAL;
1413 }
1414
1415 return 0;
1416}
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427static struct dma_async_tx_descriptor *
1428atc_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
1429 size_t period_len, enum dma_transfer_direction direction,
1430 unsigned long flags)
1431{
1432 struct at_dma_chan *atchan = to_at_dma_chan(chan);
1433 struct at_dma_slave *atslave = chan->private;
1434 struct dma_slave_config *sconfig = &atchan->dma_sconfig;
1435 struct at_desc *first = NULL;
1436 struct at_desc *prev = NULL;
1437 unsigned long was_cyclic;
1438 unsigned int reg_width;
1439 unsigned int periods = buf_len / period_len;
1440 unsigned int i;
1441
1442 dev_vdbg(chan2dev(chan), "prep_dma_cyclic: %s buf@%pad - %d (%d/%d)\n",
1443 direction == DMA_MEM_TO_DEV ? "TO DEVICE" : "FROM DEVICE",
1444 &buf_addr,
1445 periods, buf_len, period_len);
1446
1447 if (unlikely(!atslave || !buf_len || !period_len)) {
1448 dev_dbg(chan2dev(chan), "prep_dma_cyclic: length is zero!\n");
1449 return NULL;
1450 }
1451
1452 was_cyclic = test_and_set_bit(ATC_IS_CYCLIC, &atchan->status);
1453 if (was_cyclic) {
1454 dev_dbg(chan2dev(chan), "prep_dma_cyclic: channel in use!\n");
1455 return NULL;
1456 }
1457
1458 if (unlikely(!is_slave_direction(direction)))
1459 goto err_out;
1460
1461 if (sconfig->direction == DMA_MEM_TO_DEV)
1462 reg_width = convert_buswidth(sconfig->dst_addr_width);
1463 else
1464 reg_width = convert_buswidth(sconfig->src_addr_width);
1465
1466
1467 if (atc_dma_cyclic_check_values(reg_width, buf_addr, period_len))
1468 goto err_out;
1469
1470
1471 for (i = 0; i < periods; i++) {
1472 struct at_desc *desc;
1473
1474 desc = atc_desc_get(atchan);
1475 if (!desc)
1476 goto err_desc_get;
1477
1478 if (atc_dma_cyclic_fill_desc(chan, desc, i, buf_addr,
1479 reg_width, period_len, direction))
1480 goto err_desc_get;
1481
1482 atc_desc_chain(&first, &prev, desc);
1483 }
1484
1485
1486 prev->lli.dscr = first->txd.phys;
1487
1488
1489 first->txd.cookie = -EBUSY;
1490 first->total_len = buf_len;
1491
1492 return &first->txd;
1493
1494err_desc_get:
1495 dev_err(chan2dev(chan), "not enough descriptors available\n");
1496 atc_desc_put(atchan, first);
1497err_out:
1498 clear_bit(ATC_IS_CYCLIC, &atchan->status);
1499 return NULL;
1500}
1501
1502static int atc_config(struct dma_chan *chan,
1503 struct dma_slave_config *sconfig)
1504{
1505 struct at_dma_chan *atchan = to_at_dma_chan(chan);
1506
1507 dev_vdbg(chan2dev(chan), "%s\n", __func__);
1508
1509
1510 if (!chan->private)
1511 return -EINVAL;
1512
1513 memcpy(&atchan->dma_sconfig, sconfig, sizeof(*sconfig));
1514
1515 convert_burst(&atchan->dma_sconfig.src_maxburst);
1516 convert_burst(&atchan->dma_sconfig.dst_maxburst);
1517
1518 return 0;
1519}
1520
1521static int atc_pause(struct dma_chan *chan)
1522{
1523 struct at_dma_chan *atchan = to_at_dma_chan(chan);
1524 struct at_dma *atdma = to_at_dma(chan->device);
1525 int chan_id = atchan->chan_common.chan_id;
1526 unsigned long flags;
1527
1528 LIST_HEAD(list);
1529
1530 dev_vdbg(chan2dev(chan), "%s\n", __func__);
1531
1532 spin_lock_irqsave(&atchan->lock, flags);
1533
1534 dma_writel(atdma, CHER, AT_DMA_SUSP(chan_id));
1535 set_bit(ATC_IS_PAUSED, &atchan->status);
1536
1537 spin_unlock_irqrestore(&atchan->lock, flags);
1538
1539 return 0;
1540}
1541
1542static int atc_resume(struct dma_chan *chan)
1543{
1544 struct at_dma_chan *atchan = to_at_dma_chan(chan);
1545 struct at_dma *atdma = to_at_dma(chan->device);
1546 int chan_id = atchan->chan_common.chan_id;
1547 unsigned long flags;
1548
1549 LIST_HEAD(list);
1550
1551 dev_vdbg(chan2dev(chan), "%s\n", __func__);
1552
1553 if (!atc_chan_is_paused(atchan))
1554 return 0;
1555
1556 spin_lock_irqsave(&atchan->lock, flags);
1557
1558 dma_writel(atdma, CHDR, AT_DMA_RES(chan_id));
1559 clear_bit(ATC_IS_PAUSED, &atchan->status);
1560
1561 spin_unlock_irqrestore(&atchan->lock, flags);
1562
1563 return 0;
1564}
1565
1566static int atc_terminate_all(struct dma_chan *chan)
1567{
1568 struct at_dma_chan *atchan = to_at_dma_chan(chan);
1569 struct at_dma *atdma = to_at_dma(chan->device);
1570 int chan_id = atchan->chan_common.chan_id;
1571 struct at_desc *desc, *_desc;
1572 unsigned long flags;
1573
1574 LIST_HEAD(list);
1575
1576 dev_vdbg(chan2dev(chan), "%s\n", __func__);
1577
1578
1579
1580
1581
1582
1583
1584 spin_lock_irqsave(&atchan->lock, flags);
1585
1586
1587 dma_writel(atdma, CHDR, AT_DMA_RES(chan_id) | atchan->mask);
1588
1589
1590 while (dma_readl(atdma, CHSR) & atchan->mask)
1591 cpu_relax();
1592
1593
1594 list_splice_init(&atchan->queue, &list);
1595 list_splice_init(&atchan->active_list, &list);
1596
1597
1598 list_for_each_entry_safe(desc, _desc, &list, desc_node)
1599 atc_chain_complete(atchan, desc);
1600
1601 clear_bit(ATC_IS_PAUSED, &atchan->status);
1602
1603 clear_bit(ATC_IS_CYCLIC, &atchan->status);
1604
1605 spin_unlock_irqrestore(&atchan->lock, flags);
1606
1607 return 0;
1608}
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620static enum dma_status
1621atc_tx_status(struct dma_chan *chan,
1622 dma_cookie_t cookie,
1623 struct dma_tx_state *txstate)
1624{
1625 struct at_dma_chan *atchan = to_at_dma_chan(chan);
1626 unsigned long flags;
1627 enum dma_status ret;
1628 int bytes = 0;
1629
1630 ret = dma_cookie_status(chan, cookie, txstate);
1631 if (ret == DMA_COMPLETE)
1632 return ret;
1633
1634
1635
1636
1637 if (!txstate)
1638 return DMA_ERROR;
1639
1640 spin_lock_irqsave(&atchan->lock, flags);
1641
1642
1643 bytes = atc_get_bytes_left(chan, cookie);
1644
1645 spin_unlock_irqrestore(&atchan->lock, flags);
1646
1647 if (unlikely(bytes < 0)) {
1648 dev_vdbg(chan2dev(chan), "get residual bytes error\n");
1649 return DMA_ERROR;
1650 } else {
1651 dma_set_residue(txstate, bytes);
1652 }
1653
1654 dev_vdbg(chan2dev(chan), "tx_status %d: cookie = %d residue = %d\n",
1655 ret, cookie, bytes);
1656
1657 return ret;
1658}
1659
1660
1661
1662
1663
1664static void atc_issue_pending(struct dma_chan *chan)
1665{
1666 struct at_dma_chan *atchan = to_at_dma_chan(chan);
1667 unsigned long flags;
1668
1669 dev_vdbg(chan2dev(chan), "issue_pending\n");
1670
1671
1672 if (atc_chan_is_cyclic(atchan))
1673 return;
1674
1675 spin_lock_irqsave(&atchan->lock, flags);
1676 atc_advance_work(atchan);
1677 spin_unlock_irqrestore(&atchan->lock, flags);
1678}
1679
1680
1681
1682
1683
1684
1685
1686
1687static int atc_alloc_chan_resources(struct dma_chan *chan)
1688{
1689 struct at_dma_chan *atchan = to_at_dma_chan(chan);
1690 struct at_dma *atdma = to_at_dma(chan->device);
1691 struct at_desc *desc;
1692 struct at_dma_slave *atslave;
1693 unsigned long flags;
1694 int i;
1695 u32 cfg;
1696 LIST_HEAD(tmp_list);
1697
1698 dev_vdbg(chan2dev(chan), "alloc_chan_resources\n");
1699
1700
1701 if (atc_chan_is_enabled(atchan)) {
1702 dev_dbg(chan2dev(chan), "DMA channel not idle ?\n");
1703 return -EIO;
1704 }
1705
1706 cfg = ATC_DEFAULT_CFG;
1707
1708 atslave = chan->private;
1709 if (atslave) {
1710
1711
1712
1713
1714 BUG_ON(!atslave->dma_dev || atslave->dma_dev != atdma->dma_common.dev);
1715
1716
1717 if (atslave->cfg)
1718 cfg = atslave->cfg;
1719 }
1720
1721
1722
1723 if (!list_empty(&atchan->free_list))
1724 return atchan->descs_allocated;
1725
1726
1727 for (i = 0; i < init_nr_desc_per_channel; i++) {
1728 desc = atc_alloc_descriptor(chan, GFP_KERNEL);
1729 if (!desc) {
1730 dev_err(atdma->dma_common.dev,
1731 "Only %d initial descriptors\n", i);
1732 break;
1733 }
1734 list_add_tail(&desc->desc_node, &tmp_list);
1735 }
1736
1737 spin_lock_irqsave(&atchan->lock, flags);
1738 atchan->descs_allocated = i;
1739 list_splice(&tmp_list, &atchan->free_list);
1740 dma_cookie_init(chan);
1741 spin_unlock_irqrestore(&atchan->lock, flags);
1742
1743
1744 channel_writel(atchan, CFG, cfg);
1745
1746 dev_dbg(chan2dev(chan),
1747 "alloc_chan_resources: allocated %d descriptors\n",
1748 atchan->descs_allocated);
1749
1750 return atchan->descs_allocated;
1751}
1752
1753
1754
1755
1756
1757static void atc_free_chan_resources(struct dma_chan *chan)
1758{
1759 struct at_dma_chan *atchan = to_at_dma_chan(chan);
1760 struct at_dma *atdma = to_at_dma(chan->device);
1761 struct at_desc *desc, *_desc;
1762 LIST_HEAD(list);
1763
1764 dev_dbg(chan2dev(chan), "free_chan_resources: (descs allocated=%u)\n",
1765 atchan->descs_allocated);
1766
1767
1768 BUG_ON(!list_empty(&atchan->active_list));
1769 BUG_ON(!list_empty(&atchan->queue));
1770 BUG_ON(atc_chan_is_enabled(atchan));
1771
1772 list_for_each_entry_safe(desc, _desc, &atchan->free_list, desc_node) {
1773 dev_vdbg(chan2dev(chan), " freeing descriptor %p\n", desc);
1774 list_del(&desc->desc_node);
1775
1776 dma_pool_free(atdma->dma_desc_pool, desc, desc->txd.phys);
1777 }
1778 list_splice_init(&atchan->free_list, &list);
1779 atchan->descs_allocated = 0;
1780 atchan->status = 0;
1781
1782 dev_vdbg(chan2dev(chan), "free_chan_resources: done\n");
1783}
1784
1785#ifdef CONFIG_OF
1786static bool at_dma_filter(struct dma_chan *chan, void *slave)
1787{
1788 struct at_dma_slave *atslave = slave;
1789
1790 if (atslave->dma_dev == chan->device->dev) {
1791 chan->private = atslave;
1792 return true;
1793 } else {
1794 return false;
1795 }
1796}
1797
1798static struct dma_chan *at_dma_xlate(struct of_phandle_args *dma_spec,
1799 struct of_dma *of_dma)
1800{
1801 struct dma_chan *chan;
1802 struct at_dma_chan *atchan;
1803 struct at_dma_slave *atslave;
1804 dma_cap_mask_t mask;
1805 unsigned int per_id;
1806 struct platform_device *dmac_pdev;
1807
1808 if (dma_spec->args_count != 2)
1809 return NULL;
1810
1811 dmac_pdev = of_find_device_by_node(dma_spec->np);
1812
1813 dma_cap_zero(mask);
1814 dma_cap_set(DMA_SLAVE, mask);
1815
1816 atslave = devm_kzalloc(&dmac_pdev->dev, sizeof(*atslave), GFP_KERNEL);
1817 if (!atslave)
1818 return NULL;
1819
1820 atslave->cfg = ATC_DST_H2SEL_HW | ATC_SRC_H2SEL_HW;
1821
1822
1823
1824
1825 per_id = dma_spec->args[1] & AT91_DMA_CFG_PER_ID_MASK;
1826 atslave->cfg |= ATC_DST_PER_MSB(per_id) | ATC_DST_PER(per_id)
1827 | ATC_SRC_PER_MSB(per_id) | ATC_SRC_PER(per_id);
1828
1829
1830
1831
1832
1833 switch (dma_spec->args[1] & AT91_DMA_CFG_FIFOCFG_MASK) {
1834 case AT91_DMA_CFG_FIFOCFG_ALAP:
1835 atslave->cfg |= ATC_FIFOCFG_LARGESTBURST;
1836 break;
1837 case AT91_DMA_CFG_FIFOCFG_ASAP:
1838 atslave->cfg |= ATC_FIFOCFG_ENOUGHSPACE;
1839 break;
1840 case AT91_DMA_CFG_FIFOCFG_HALF:
1841 default:
1842 atslave->cfg |= ATC_FIFOCFG_HALFFIFO;
1843 }
1844 atslave->dma_dev = &dmac_pdev->dev;
1845
1846 chan = dma_request_channel(mask, at_dma_filter, atslave);
1847 if (!chan)
1848 return NULL;
1849
1850 atchan = to_at_dma_chan(chan);
1851 atchan->per_if = dma_spec->args[0] & 0xff;
1852 atchan->mem_if = (dma_spec->args[0] >> 16) & 0xff;
1853
1854 return chan;
1855}
1856#else
1857static struct dma_chan *at_dma_xlate(struct of_phandle_args *dma_spec,
1858 struct of_dma *of_dma)
1859{
1860 return NULL;
1861}
1862#endif
1863
1864
1865
1866
1867static struct at_dma_platform_data at91sam9rl_config = {
1868 .nr_channels = 2,
1869};
1870static struct at_dma_platform_data at91sam9g45_config = {
1871 .nr_channels = 8,
1872};
1873
1874#if defined(CONFIG_OF)
1875static const struct of_device_id atmel_dma_dt_ids[] = {
1876 {
1877 .compatible = "atmel,at91sam9rl-dma",
1878 .data = &at91sam9rl_config,
1879 }, {
1880 .compatible = "atmel,at91sam9g45-dma",
1881 .data = &at91sam9g45_config,
1882 }, {
1883
1884 }
1885};
1886
1887MODULE_DEVICE_TABLE(of, atmel_dma_dt_ids);
1888#endif
1889
1890static const struct platform_device_id atdma_devtypes[] = {
1891 {
1892 .name = "at91sam9rl_dma",
1893 .driver_data = (unsigned long) &at91sam9rl_config,
1894 }, {
1895 .name = "at91sam9g45_dma",
1896 .driver_data = (unsigned long) &at91sam9g45_config,
1897 }, {
1898
1899 }
1900};
1901
1902static inline const struct at_dma_platform_data * __init at_dma_get_driver_data(
1903 struct platform_device *pdev)
1904{
1905 if (pdev->dev.of_node) {
1906 const struct of_device_id *match;
1907 match = of_match_node(atmel_dma_dt_ids, pdev->dev.of_node);
1908 if (match == NULL)
1909 return NULL;
1910 return match->data;
1911 }
1912 return (struct at_dma_platform_data *)
1913 platform_get_device_id(pdev)->driver_data;
1914}
1915
1916
1917
1918
1919
1920static void at_dma_off(struct at_dma *atdma)
1921{
1922 dma_writel(atdma, EN, 0);
1923
1924
1925 dma_writel(atdma, EBCIDR, -1L);
1926
1927
1928 while (dma_readl(atdma, CHSR) & atdma->all_chan_mask)
1929 cpu_relax();
1930}
1931
1932static int __init at_dma_probe(struct platform_device *pdev)
1933{
1934 struct resource *io;
1935 struct at_dma *atdma;
1936 size_t size;
1937 int irq;
1938 int err;
1939 int i;
1940 const struct at_dma_platform_data *plat_dat;
1941
1942
1943 dma_cap_set(DMA_MEMCPY, at91sam9rl_config.cap_mask);
1944 dma_cap_set(DMA_SG, at91sam9rl_config.cap_mask);
1945 dma_cap_set(DMA_INTERLEAVE, at91sam9g45_config.cap_mask);
1946 dma_cap_set(DMA_MEMCPY, at91sam9g45_config.cap_mask);
1947 dma_cap_set(DMA_MEMSET, at91sam9g45_config.cap_mask);
1948 dma_cap_set(DMA_MEMSET_SG, at91sam9g45_config.cap_mask);
1949 dma_cap_set(DMA_PRIVATE, at91sam9g45_config.cap_mask);
1950 dma_cap_set(DMA_SLAVE, at91sam9g45_config.cap_mask);
1951 dma_cap_set(DMA_SG, at91sam9g45_config.cap_mask);
1952
1953
1954 plat_dat = at_dma_get_driver_data(pdev);
1955 if (!plat_dat)
1956 return -ENODEV;
1957
1958 io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1959 if (!io)
1960 return -EINVAL;
1961
1962 irq = platform_get_irq(pdev, 0);
1963 if (irq < 0)
1964 return irq;
1965
1966 size = sizeof(struct at_dma);
1967 size += plat_dat->nr_channels * sizeof(struct at_dma_chan);
1968 atdma = kzalloc(size, GFP_KERNEL);
1969 if (!atdma)
1970 return -ENOMEM;
1971
1972
1973 atdma->dma_common.cap_mask = plat_dat->cap_mask;
1974 atdma->all_chan_mask = (1 << plat_dat->nr_channels) - 1;
1975
1976 size = resource_size(io);
1977 if (!request_mem_region(io->start, size, pdev->dev.driver->name)) {
1978 err = -EBUSY;
1979 goto err_kfree;
1980 }
1981
1982 atdma->regs = ioremap(io->start, size);
1983 if (!atdma->regs) {
1984 err = -ENOMEM;
1985 goto err_release_r;
1986 }
1987
1988 atdma->clk = clk_get(&pdev->dev, "dma_clk");
1989 if (IS_ERR(atdma->clk)) {
1990 err = PTR_ERR(atdma->clk);
1991 goto err_clk;
1992 }
1993 err = clk_prepare_enable(atdma->clk);
1994 if (err)
1995 goto err_clk_prepare;
1996
1997
1998 at_dma_off(atdma);
1999
2000 err = request_irq(irq, at_dma_interrupt, 0, "at_hdmac", atdma);
2001 if (err)
2002 goto err_irq;
2003
2004 platform_set_drvdata(pdev, atdma);
2005
2006
2007 atdma->dma_desc_pool = dma_pool_create("at_hdmac_desc_pool",
2008 &pdev->dev, sizeof(struct at_desc),
2009 4 , 0);
2010 if (!atdma->dma_desc_pool) {
2011 dev_err(&pdev->dev, "No memory for descriptors dma pool\n");
2012 err = -ENOMEM;
2013 goto err_desc_pool_create;
2014 }
2015
2016
2017 atdma->memset_pool = dma_pool_create("at_hdmac_memset_pool",
2018 &pdev->dev, sizeof(int), 4, 0);
2019 if (!atdma->memset_pool) {
2020 dev_err(&pdev->dev, "No memory for memset dma pool\n");
2021 err = -ENOMEM;
2022 goto err_memset_pool_create;
2023 }
2024
2025
2026 while (dma_readl(atdma, EBCISR))
2027 cpu_relax();
2028
2029
2030 INIT_LIST_HEAD(&atdma->dma_common.channels);
2031 for (i = 0; i < plat_dat->nr_channels; i++) {
2032 struct at_dma_chan *atchan = &atdma->chan[i];
2033
2034 atchan->mem_if = AT_DMA_MEM_IF;
2035 atchan->per_if = AT_DMA_PER_IF;
2036 atchan->chan_common.device = &atdma->dma_common;
2037 dma_cookie_init(&atchan->chan_common);
2038 list_add_tail(&atchan->chan_common.device_node,
2039 &atdma->dma_common.channels);
2040
2041 atchan->ch_regs = atdma->regs + ch_regs(i);
2042 spin_lock_init(&atchan->lock);
2043 atchan->mask = 1 << i;
2044
2045 INIT_LIST_HEAD(&atchan->active_list);
2046 INIT_LIST_HEAD(&atchan->queue);
2047 INIT_LIST_HEAD(&atchan->free_list);
2048
2049 tasklet_init(&atchan->tasklet, atc_tasklet,
2050 (unsigned long)atchan);
2051 atc_enable_chan_irq(atdma, i);
2052 }
2053
2054
2055 atdma->dma_common.device_alloc_chan_resources = atc_alloc_chan_resources;
2056 atdma->dma_common.device_free_chan_resources = atc_free_chan_resources;
2057 atdma->dma_common.device_tx_status = atc_tx_status;
2058 atdma->dma_common.device_issue_pending = atc_issue_pending;
2059 atdma->dma_common.dev = &pdev->dev;
2060
2061
2062 if (dma_has_cap(DMA_INTERLEAVE, atdma->dma_common.cap_mask))
2063 atdma->dma_common.device_prep_interleaved_dma = atc_prep_dma_interleaved;
2064
2065 if (dma_has_cap(DMA_MEMCPY, atdma->dma_common.cap_mask))
2066 atdma->dma_common.device_prep_dma_memcpy = atc_prep_dma_memcpy;
2067
2068 if (dma_has_cap(DMA_MEMSET, atdma->dma_common.cap_mask)) {
2069 atdma->dma_common.device_prep_dma_memset = atc_prep_dma_memset;
2070 atdma->dma_common.device_prep_dma_memset_sg = atc_prep_dma_memset_sg;
2071 atdma->dma_common.fill_align = DMAENGINE_ALIGN_4_BYTES;
2072 }
2073
2074 if (dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask)) {
2075 atdma->dma_common.device_prep_slave_sg = atc_prep_slave_sg;
2076
2077 dma_cap_set(DMA_CYCLIC, atdma->dma_common.cap_mask);
2078 atdma->dma_common.device_prep_dma_cyclic = atc_prep_dma_cyclic;
2079 atdma->dma_common.device_config = atc_config;
2080 atdma->dma_common.device_pause = atc_pause;
2081 atdma->dma_common.device_resume = atc_resume;
2082 atdma->dma_common.device_terminate_all = atc_terminate_all;
2083 atdma->dma_common.src_addr_widths = ATC_DMA_BUSWIDTHS;
2084 atdma->dma_common.dst_addr_widths = ATC_DMA_BUSWIDTHS;
2085 atdma->dma_common.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
2086 atdma->dma_common.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
2087 }
2088
2089 if (dma_has_cap(DMA_SG, atdma->dma_common.cap_mask))
2090 atdma->dma_common.device_prep_dma_sg = atc_prep_dma_sg;
2091
2092 dma_writel(atdma, EN, AT_DMA_ENABLE);
2093
2094 dev_info(&pdev->dev, "Atmel AHB DMA Controller ( %s%s%s%s), %d channels\n",
2095 dma_has_cap(DMA_MEMCPY, atdma->dma_common.cap_mask) ? "cpy " : "",
2096 dma_has_cap(DMA_MEMSET, atdma->dma_common.cap_mask) ? "set " : "",
2097 dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask) ? "slave " : "",
2098 dma_has_cap(DMA_SG, atdma->dma_common.cap_mask) ? "sg-cpy " : "",
2099 plat_dat->nr_channels);
2100
2101 dma_async_device_register(&atdma->dma_common);
2102
2103
2104
2105
2106
2107
2108 if (pdev->dev.of_node) {
2109 err = of_dma_controller_register(pdev->dev.of_node,
2110 at_dma_xlate, atdma);
2111 if (err) {
2112 dev_err(&pdev->dev, "could not register of_dma_controller\n");
2113 goto err_of_dma_controller_register;
2114 }
2115 }
2116
2117 return 0;
2118
2119err_of_dma_controller_register:
2120 dma_async_device_unregister(&atdma->dma_common);
2121 dma_pool_destroy(atdma->memset_pool);
2122err_memset_pool_create:
2123 dma_pool_destroy(atdma->dma_desc_pool);
2124err_desc_pool_create:
2125 free_irq(platform_get_irq(pdev, 0), atdma);
2126err_irq:
2127 clk_disable_unprepare(atdma->clk);
2128err_clk_prepare:
2129 clk_put(atdma->clk);
2130err_clk:
2131 iounmap(atdma->regs);
2132 atdma->regs = NULL;
2133err_release_r:
2134 release_mem_region(io->start, size);
2135err_kfree:
2136 kfree(atdma);
2137 return err;
2138}
2139
2140static int at_dma_remove(struct platform_device *pdev)
2141{
2142 struct at_dma *atdma = platform_get_drvdata(pdev);
2143 struct dma_chan *chan, *_chan;
2144 struct resource *io;
2145
2146 at_dma_off(atdma);
2147 dma_async_device_unregister(&atdma->dma_common);
2148
2149 dma_pool_destroy(atdma->memset_pool);
2150 dma_pool_destroy(atdma->dma_desc_pool);
2151 free_irq(platform_get_irq(pdev, 0), atdma);
2152
2153 list_for_each_entry_safe(chan, _chan, &atdma->dma_common.channels,
2154 device_node) {
2155 struct at_dma_chan *atchan = to_at_dma_chan(chan);
2156
2157
2158 atc_disable_chan_irq(atdma, chan->chan_id);
2159
2160 tasklet_kill(&atchan->tasklet);
2161 list_del(&chan->device_node);
2162 }
2163
2164 clk_disable_unprepare(atdma->clk);
2165 clk_put(atdma->clk);
2166
2167 iounmap(atdma->regs);
2168 atdma->regs = NULL;
2169
2170 io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2171 release_mem_region(io->start, resource_size(io));
2172
2173 kfree(atdma);
2174
2175 return 0;
2176}
2177
2178static void at_dma_shutdown(struct platform_device *pdev)
2179{
2180 struct at_dma *atdma = platform_get_drvdata(pdev);
2181
2182 at_dma_off(platform_get_drvdata(pdev));
2183 clk_disable_unprepare(atdma->clk);
2184}
2185
2186static int at_dma_prepare(struct device *dev)
2187{
2188 struct platform_device *pdev = to_platform_device(dev);
2189 struct at_dma *atdma = platform_get_drvdata(pdev);
2190 struct dma_chan *chan, *_chan;
2191
2192 list_for_each_entry_safe(chan, _chan, &atdma->dma_common.channels,
2193 device_node) {
2194 struct at_dma_chan *atchan = to_at_dma_chan(chan);
2195
2196 if (atc_chan_is_enabled(atchan) && !atc_chan_is_cyclic(atchan))
2197 return -EAGAIN;
2198 }
2199 return 0;
2200}
2201
2202static void atc_suspend_cyclic(struct at_dma_chan *atchan)
2203{
2204 struct dma_chan *chan = &atchan->chan_common;
2205
2206
2207
2208 if (!atc_chan_is_paused(atchan)) {
2209 dev_warn(chan2dev(chan),
2210 "cyclic channel not paused, should be done by channel user\n");
2211 atc_pause(chan);
2212 }
2213
2214
2215
2216 atchan->save_dscr = channel_readl(atchan, DSCR);
2217
2218 vdbg_dump_regs(atchan);
2219}
2220
2221static int at_dma_suspend_noirq(struct device *dev)
2222{
2223 struct platform_device *pdev = to_platform_device(dev);
2224 struct at_dma *atdma = platform_get_drvdata(pdev);
2225 struct dma_chan *chan, *_chan;
2226
2227
2228 list_for_each_entry_safe(chan, _chan, &atdma->dma_common.channels,
2229 device_node) {
2230 struct at_dma_chan *atchan = to_at_dma_chan(chan);
2231
2232 if (atc_chan_is_cyclic(atchan))
2233 atc_suspend_cyclic(atchan);
2234 atchan->save_cfg = channel_readl(atchan, CFG);
2235 }
2236 atdma->save_imr = dma_readl(atdma, EBCIMR);
2237
2238
2239 at_dma_off(atdma);
2240 clk_disable_unprepare(atdma->clk);
2241 return 0;
2242}
2243
2244static void atc_resume_cyclic(struct at_dma_chan *atchan)
2245{
2246 struct at_dma *atdma = to_at_dma(atchan->chan_common.device);
2247
2248
2249
2250 channel_writel(atchan, SADDR, 0);
2251 channel_writel(atchan, DADDR, 0);
2252 channel_writel(atchan, CTRLA, 0);
2253 channel_writel(atchan, CTRLB, 0);
2254 channel_writel(atchan, DSCR, atchan->save_dscr);
2255 dma_writel(atdma, CHER, atchan->mask);
2256
2257
2258
2259
2260 vdbg_dump_regs(atchan);
2261}
2262
2263static int at_dma_resume_noirq(struct device *dev)
2264{
2265 struct platform_device *pdev = to_platform_device(dev);
2266 struct at_dma *atdma = platform_get_drvdata(pdev);
2267 struct dma_chan *chan, *_chan;
2268
2269
2270 clk_prepare_enable(atdma->clk);
2271 dma_writel(atdma, EN, AT_DMA_ENABLE);
2272
2273
2274 while (dma_readl(atdma, EBCISR))
2275 cpu_relax();
2276
2277
2278 dma_writel(atdma, EBCIER, atdma->save_imr);
2279 list_for_each_entry_safe(chan, _chan, &atdma->dma_common.channels,
2280 device_node) {
2281 struct at_dma_chan *atchan = to_at_dma_chan(chan);
2282
2283 channel_writel(atchan, CFG, atchan->save_cfg);
2284 if (atc_chan_is_cyclic(atchan))
2285 atc_resume_cyclic(atchan);
2286 }
2287 return 0;
2288}
2289
2290static const struct dev_pm_ops at_dma_dev_pm_ops = {
2291 .prepare = at_dma_prepare,
2292 .suspend_noirq = at_dma_suspend_noirq,
2293 .resume_noirq = at_dma_resume_noirq,
2294};
2295
2296static struct platform_driver at_dma_driver = {
2297 .remove = at_dma_remove,
2298 .shutdown = at_dma_shutdown,
2299 .id_table = atdma_devtypes,
2300 .driver = {
2301 .name = "at_hdmac",
2302 .pm = &at_dma_dev_pm_ops,
2303 .of_match_table = of_match_ptr(atmel_dma_dt_ids),
2304 },
2305};
2306
2307static int __init at_dma_init(void)
2308{
2309 return platform_driver_probe(&at_dma_driver, at_dma_probe);
2310}
2311subsys_initcall(at_dma_init);
2312
2313static void __exit at_dma_exit(void)
2314{
2315 platform_driver_unregister(&at_dma_driver);
2316}
2317module_exit(at_dma_exit);
2318
2319MODULE_DESCRIPTION("Atmel AHB DMA Controller driver");
2320MODULE_AUTHOR("Nicolas Ferre <nicolas.ferre@atmel.com>");
2321MODULE_LICENSE("GPL");
2322MODULE_ALIAS("platform:at_hdmac");
2323