1
2
3
4
5
6
7
8
9
10
11
12#include <dt-bindings/dma/at91.h>
13#include <linux/clk.h>
14#include <linux/dmaengine.h>
15#include <linux/dma-mapping.h>
16#include <linux/dmapool.h>
17#include <linux/interrupt.h>
18#include <linux/module.h>
19#include <linux/platform_device.h>
20#include <linux/slab.h>
21#include <linux/of.h>
22#include <linux/of_device.h>
23#include <linux/of_dma.h>
24
25#include "at_hdmac_regs.h"
26#include "dmaengine.h"
27
28
29
30
31
32
33
34
35
36
37#define ATC_DEFAULT_CFG (ATC_FIFOCFG_HALFFIFO)
38#define ATC_DEFAULT_CTRLB (ATC_SIF(AT_DMA_MEM_IF) \
39 |ATC_DIF(AT_DMA_MEM_IF))
40#define ATC_DMA_BUSWIDTHS\
41 (BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED) |\
42 BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |\
43 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |\
44 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES))
45
46#define ATC_MAX_DSCR_TRIALS 10
47
48
49
50
51
52static unsigned int init_nr_desc_per_channel = 64;
53module_param(init_nr_desc_per_channel, uint, 0644);
54MODULE_PARM_DESC(init_nr_desc_per_channel,
55 "initial descriptors per channel (default: 64)");
56
57
58
59static dma_cookie_t atc_tx_submit(struct dma_async_tx_descriptor *tx);
60static void atc_issue_pending(struct dma_chan *chan);
61
62
63
64
65static inline unsigned int atc_get_xfer_width(dma_addr_t src, dma_addr_t dst,
66 size_t len)
67{
68 unsigned int width;
69
70 if (!((src | dst | len) & 3))
71 width = 2;
72 else if (!((src | dst | len) & 1))
73 width = 1;
74 else
75 width = 0;
76
77 return width;
78}
79
80static struct at_desc *atc_first_active(struct at_dma_chan *atchan)
81{
82 return list_first_entry(&atchan->active_list,
83 struct at_desc, desc_node);
84}
85
86static struct at_desc *atc_first_queued(struct at_dma_chan *atchan)
87{
88 return list_first_entry(&atchan->queue,
89 struct at_desc, desc_node);
90}
91
92
93
94
95
96
97
98
99
100
101
102static struct at_desc *atc_alloc_descriptor(struct dma_chan *chan,
103 gfp_t gfp_flags)
104{
105 struct at_desc *desc = NULL;
106 struct at_dma *atdma = to_at_dma(chan->device);
107 dma_addr_t phys;
108
109 desc = dma_pool_zalloc(atdma->dma_desc_pool, gfp_flags, &phys);
110 if (desc) {
111 INIT_LIST_HEAD(&desc->tx_list);
112 dma_async_tx_descriptor_init(&desc->txd, chan);
113
114 desc->txd.flags = DMA_CTRL_ACK;
115 desc->txd.tx_submit = atc_tx_submit;
116 desc->txd.phys = phys;
117 }
118
119 return desc;
120}
121
122
123
124
125
126static struct at_desc *atc_desc_get(struct at_dma_chan *atchan)
127{
128 struct at_desc *desc, *_desc;
129 struct at_desc *ret = NULL;
130 unsigned long flags;
131 unsigned int i = 0;
132
133 spin_lock_irqsave(&atchan->lock, flags);
134 list_for_each_entry_safe(desc, _desc, &atchan->free_list, desc_node) {
135 i++;
136 if (async_tx_test_ack(&desc->txd)) {
137 list_del(&desc->desc_node);
138 ret = desc;
139 break;
140 }
141 dev_dbg(chan2dev(&atchan->chan_common),
142 "desc %p not ACKed\n", desc);
143 }
144 spin_unlock_irqrestore(&atchan->lock, flags);
145 dev_vdbg(chan2dev(&atchan->chan_common),
146 "scanned %u descriptors on freelist\n", i);
147
148
149 if (!ret)
150 ret = atc_alloc_descriptor(&atchan->chan_common, GFP_NOWAIT);
151
152 return ret;
153}
154
155
156
157
158
159
160static void atc_desc_put(struct at_dma_chan *atchan, struct at_desc *desc)
161{
162 if (desc) {
163 struct at_desc *child;
164 unsigned long flags;
165
166 spin_lock_irqsave(&atchan->lock, flags);
167 list_for_each_entry(child, &desc->tx_list, desc_node)
168 dev_vdbg(chan2dev(&atchan->chan_common),
169 "moving child desc %p to freelist\n",
170 child);
171 list_splice_init(&desc->tx_list, &atchan->free_list);
172 dev_vdbg(chan2dev(&atchan->chan_common),
173 "moving desc %p to freelist\n", desc);
174 list_add(&desc->desc_node, &atchan->free_list);
175 spin_unlock_irqrestore(&atchan->lock, flags);
176 }
177}
178
179
180
181
182
183
184
185
186
187static void atc_desc_chain(struct at_desc **first, struct at_desc **prev,
188 struct at_desc *desc)
189{
190 if (!(*first)) {
191 *first = desc;
192 } else {
193
194 (*prev)->lli.dscr = desc->txd.phys;
195
196 list_add_tail(&desc->desc_node,
197 &(*first)->tx_list);
198 }
199 *prev = desc;
200}
201
202
203
204
205
206
207
208
209static void atc_dostart(struct at_dma_chan *atchan, struct at_desc *first)
210{
211 struct at_dma *atdma = to_at_dma(atchan->chan_common.device);
212
213
214 if (atc_chan_is_enabled(atchan)) {
215 dev_err(chan2dev(&atchan->chan_common),
216 "BUG: Attempted to start non-idle channel\n");
217 dev_err(chan2dev(&atchan->chan_common),
218 " channel: s0x%x d0x%x ctrl0x%x:0x%x l0x%x\n",
219 channel_readl(atchan, SADDR),
220 channel_readl(atchan, DADDR),
221 channel_readl(atchan, CTRLA),
222 channel_readl(atchan, CTRLB),
223 channel_readl(atchan, DSCR));
224
225
226 return;
227 }
228
229 vdbg_dump_regs(atchan);
230
231 channel_writel(atchan, SADDR, 0);
232 channel_writel(atchan, DADDR, 0);
233 channel_writel(atchan, CTRLA, 0);
234 channel_writel(atchan, CTRLB, 0);
235 channel_writel(atchan, DSCR, first->txd.phys);
236 channel_writel(atchan, SPIP, ATC_SPIP_HOLE(first->src_hole) |
237 ATC_SPIP_BOUNDARY(first->boundary));
238 channel_writel(atchan, DPIP, ATC_DPIP_HOLE(first->dst_hole) |
239 ATC_DPIP_BOUNDARY(first->boundary));
240 dma_writel(atdma, CHER, atchan->mask);
241
242 vdbg_dump_regs(atchan);
243}
244
245
246
247
248
249
250static struct at_desc *atc_get_desc_by_cookie(struct at_dma_chan *atchan,
251 dma_cookie_t cookie)
252{
253 struct at_desc *desc, *_desc;
254
255 list_for_each_entry_safe(desc, _desc, &atchan->queue, desc_node) {
256 if (desc->txd.cookie == cookie)
257 return desc;
258 }
259
260 list_for_each_entry_safe(desc, _desc, &atchan->active_list, desc_node) {
261 if (desc->txd.cookie == cookie)
262 return desc;
263 }
264
265 return NULL;
266}
267
268
269
270
271
272
273
274
275static inline int atc_calc_bytes_left(int current_len, u32 ctrla)
276{
277 u32 btsize = (ctrla & ATC_BTSIZE_MAX);
278 u32 src_width = ATC_REG_TO_SRC_WIDTH(ctrla);
279
280
281
282
283
284
285
286 return current_len - (btsize << src_width);
287}
288
289
290
291
292
293
294static int atc_get_bytes_left(struct dma_chan *chan, dma_cookie_t cookie)
295{
296 struct at_dma_chan *atchan = to_at_dma_chan(chan);
297 struct at_desc *desc_first = atc_first_active(atchan);
298 struct at_desc *desc;
299 int ret;
300 u32 ctrla, dscr, trials;
301
302
303
304
305
306
307 desc = atc_get_desc_by_cookie(atchan, cookie);
308 if (desc == NULL)
309 return -EINVAL;
310 else if (desc != desc_first)
311 return desc->total_len;
312
313
314 ret = desc_first->total_len;
315
316 if (desc_first->lli.dscr) {
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367 dscr = channel_readl(atchan, DSCR);
368 rmb();
369 ctrla = channel_readl(atchan, CTRLA);
370 for (trials = 0; trials < ATC_MAX_DSCR_TRIALS; ++trials) {
371 u32 new_dscr;
372
373 rmb();
374 new_dscr = channel_readl(atchan, DSCR);
375
376
377
378
379
380
381
382 if (likely(new_dscr == dscr))
383 break;
384
385
386
387
388
389
390
391
392 dscr = new_dscr;
393 rmb();
394 ctrla = channel_readl(atchan, CTRLA);
395 }
396 if (unlikely(trials >= ATC_MAX_DSCR_TRIALS))
397 return -ETIMEDOUT;
398
399
400 if (desc_first->lli.dscr == dscr)
401 return atc_calc_bytes_left(ret, ctrla);
402
403 ret -= desc_first->len;
404 list_for_each_entry(desc, &desc_first->tx_list, desc_node) {
405 if (desc->lli.dscr == dscr)
406 break;
407
408 ret -= desc->len;
409 }
410
411
412
413
414
415 ret = atc_calc_bytes_left(ret, ctrla);
416 } else {
417
418 ctrla = channel_readl(atchan, CTRLA);
419 ret = atc_calc_bytes_left(ret, ctrla);
420 }
421
422 return ret;
423}
424
425
426
427
428
429
430static void
431atc_chain_complete(struct at_dma_chan *atchan, struct at_desc *desc)
432{
433 struct dma_async_tx_descriptor *txd = &desc->txd;
434 struct at_dma *atdma = to_at_dma(atchan->chan_common.device);
435 unsigned long flags;
436
437 dev_vdbg(chan2dev(&atchan->chan_common),
438 "descriptor %u complete\n", txd->cookie);
439
440 spin_lock_irqsave(&atchan->lock, flags);
441
442
443 if (!atc_chan_is_cyclic(atchan))
444 dma_cookie_complete(txd);
445
446
447 if (desc->memset_buffer) {
448 dma_pool_free(atdma->memset_pool, desc->memset_vaddr,
449 desc->memset_paddr);
450 desc->memset_buffer = false;
451 }
452
453
454 list_splice_init(&desc->tx_list, &atchan->free_list);
455
456 list_move(&desc->desc_node, &atchan->free_list);
457
458 spin_unlock_irqrestore(&atchan->lock, flags);
459
460 dma_descriptor_unmap(txd);
461
462
463 if (!atc_chan_is_cyclic(atchan))
464 dmaengine_desc_get_callback_invoke(txd, NULL);
465
466 dma_run_dependencies(txd);
467}
468
469
470
471
472
473
474
475
476
477
478static void atc_complete_all(struct at_dma_chan *atchan)
479{
480 struct at_desc *desc, *_desc;
481 LIST_HEAD(list);
482 unsigned long flags;
483
484 dev_vdbg(chan2dev(&atchan->chan_common), "complete all\n");
485
486 spin_lock_irqsave(&atchan->lock, flags);
487
488
489
490
491
492 if (!list_empty(&atchan->queue))
493 atc_dostart(atchan, atc_first_queued(atchan));
494
495 list_splice_init(&atchan->active_list, &list);
496
497 list_splice_init(&atchan->queue, &atchan->active_list);
498
499 spin_unlock_irqrestore(&atchan->lock, flags);
500
501 list_for_each_entry_safe(desc, _desc, &list, desc_node)
502 atc_chain_complete(atchan, desc);
503}
504
505
506
507
508
509static void atc_advance_work(struct at_dma_chan *atchan)
510{
511 unsigned long flags;
512 int ret;
513
514 dev_vdbg(chan2dev(&atchan->chan_common), "advance_work\n");
515
516 spin_lock_irqsave(&atchan->lock, flags);
517 ret = atc_chan_is_enabled(atchan);
518 spin_unlock_irqrestore(&atchan->lock, flags);
519 if (ret)
520 return;
521
522 if (list_empty(&atchan->active_list) ||
523 list_is_singular(&atchan->active_list))
524 return atc_complete_all(atchan);
525
526 atc_chain_complete(atchan, atc_first_active(atchan));
527
528
529 spin_lock_irqsave(&atchan->lock, flags);
530 atc_dostart(atchan, atc_first_active(atchan));
531 spin_unlock_irqrestore(&atchan->lock, flags);
532}
533
534
535
536
537
538
539static void atc_handle_error(struct at_dma_chan *atchan)
540{
541 struct at_desc *bad_desc;
542 struct at_desc *child;
543 unsigned long flags;
544
545 spin_lock_irqsave(&atchan->lock, flags);
546
547
548
549
550
551 bad_desc = atc_first_active(atchan);
552 list_del_init(&bad_desc->desc_node);
553
554
555
556 list_splice_init(&atchan->queue, atchan->active_list.prev);
557
558
559 if (!list_empty(&atchan->active_list))
560 atc_dostart(atchan, atc_first_active(atchan));
561
562
563
564
565
566
567
568
569 dev_crit(chan2dev(&atchan->chan_common),
570 "Bad descriptor submitted for DMA!\n");
571 dev_crit(chan2dev(&atchan->chan_common),
572 " cookie: %d\n", bad_desc->txd.cookie);
573 atc_dump_lli(atchan, &bad_desc->lli);
574 list_for_each_entry(child, &bad_desc->tx_list, desc_node)
575 atc_dump_lli(atchan, &child->lli);
576
577 spin_unlock_irqrestore(&atchan->lock, flags);
578
579
580 atc_chain_complete(atchan, bad_desc);
581}
582
583
584
585
586
587static void atc_handle_cyclic(struct at_dma_chan *atchan)
588{
589 struct at_desc *first = atc_first_active(atchan);
590 struct dma_async_tx_descriptor *txd = &first->txd;
591
592 dev_vdbg(chan2dev(&atchan->chan_common),
593 "new cyclic period llp 0x%08x\n",
594 channel_readl(atchan, DSCR));
595
596 dmaengine_desc_get_callback_invoke(txd, NULL);
597}
598
599
600
601static void atc_tasklet(struct tasklet_struct *t)
602{
603 struct at_dma_chan *atchan = from_tasklet(atchan, t, tasklet);
604
605 if (test_and_clear_bit(ATC_IS_ERROR, &atchan->status))
606 return atc_handle_error(atchan);
607
608 if (atc_chan_is_cyclic(atchan))
609 return atc_handle_cyclic(atchan);
610
611 atc_advance_work(atchan);
612}
613
614static irqreturn_t at_dma_interrupt(int irq, void *dev_id)
615{
616 struct at_dma *atdma = (struct at_dma *)dev_id;
617 struct at_dma_chan *atchan;
618 int i;
619 u32 status, pending, imr;
620 int ret = IRQ_NONE;
621
622 do {
623 imr = dma_readl(atdma, EBCIMR);
624 status = dma_readl(atdma, EBCISR);
625 pending = status & imr;
626
627 if (!pending)
628 break;
629
630 dev_vdbg(atdma->dma_common.dev,
631 "interrupt: status = 0x%08x, 0x%08x, 0x%08x\n",
632 status, imr, pending);
633
634 for (i = 0; i < atdma->dma_common.chancnt; i++) {
635 atchan = &atdma->chan[i];
636 if (pending & (AT_DMA_BTC(i) | AT_DMA_ERR(i))) {
637 if (pending & AT_DMA_ERR(i)) {
638
639 dma_writel(atdma, CHDR,
640 AT_DMA_RES(i) | atchan->mask);
641
642 set_bit(ATC_IS_ERROR, &atchan->status);
643 }
644 tasklet_schedule(&atchan->tasklet);
645 ret = IRQ_HANDLED;
646 }
647 }
648
649 } while (pending);
650
651 return ret;
652}
653
654
655
656
657
658
659
660
661
662
663
664
665static dma_cookie_t atc_tx_submit(struct dma_async_tx_descriptor *tx)
666{
667 struct at_desc *desc = txd_to_at_desc(tx);
668 struct at_dma_chan *atchan = to_at_dma_chan(tx->chan);
669 dma_cookie_t cookie;
670 unsigned long flags;
671
672 spin_lock_irqsave(&atchan->lock, flags);
673 cookie = dma_cookie_assign(tx);
674
675 if (list_empty(&atchan->active_list)) {
676 dev_vdbg(chan2dev(tx->chan), "tx_submit: started %u\n",
677 desc->txd.cookie);
678 atc_dostart(atchan, desc);
679 list_add_tail(&desc->desc_node, &atchan->active_list);
680 } else {
681 dev_vdbg(chan2dev(tx->chan), "tx_submit: queued %u\n",
682 desc->txd.cookie);
683 list_add_tail(&desc->desc_node, &atchan->queue);
684 }
685
686 spin_unlock_irqrestore(&atchan->lock, flags);
687
688 return cookie;
689}
690
691
692
693
694
695
696
697static struct dma_async_tx_descriptor *
698atc_prep_dma_interleaved(struct dma_chan *chan,
699 struct dma_interleaved_template *xt,
700 unsigned long flags)
701{
702 struct at_dma_chan *atchan = to_at_dma_chan(chan);
703 struct data_chunk *first;
704 struct at_desc *desc = NULL;
705 size_t xfer_count;
706 unsigned int dwidth;
707 u32 ctrla;
708 u32 ctrlb;
709 size_t len = 0;
710 int i;
711
712 if (unlikely(!xt || xt->numf != 1 || !xt->frame_size))
713 return NULL;
714
715 first = xt->sgl;
716
717 dev_info(chan2dev(chan),
718 "%s: src=%pad, dest=%pad, numf=%d, frame_size=%d, flags=0x%lx\n",
719 __func__, &xt->src_start, &xt->dst_start, xt->numf,
720 xt->frame_size, flags);
721
722
723
724
725
726
727
728 for (i = 0; i < xt->frame_size; i++) {
729 struct data_chunk *chunk = xt->sgl + i;
730
731 if ((chunk->size != xt->sgl->size) ||
732 (dmaengine_get_dst_icg(xt, chunk) != dmaengine_get_dst_icg(xt, first)) ||
733 (dmaengine_get_src_icg(xt, chunk) != dmaengine_get_src_icg(xt, first))) {
734 dev_err(chan2dev(chan),
735 "%s: the controller can transfer only identical chunks\n",
736 __func__);
737 return NULL;
738 }
739
740 len += chunk->size;
741 }
742
743 dwidth = atc_get_xfer_width(xt->src_start,
744 xt->dst_start, len);
745
746 xfer_count = len >> dwidth;
747 if (xfer_count > ATC_BTSIZE_MAX) {
748 dev_err(chan2dev(chan), "%s: buffer is too big\n", __func__);
749 return NULL;
750 }
751
752 ctrla = ATC_SRC_WIDTH(dwidth) |
753 ATC_DST_WIDTH(dwidth);
754
755 ctrlb = ATC_DEFAULT_CTRLB | ATC_IEN
756 | ATC_SRC_ADDR_MODE_INCR
757 | ATC_DST_ADDR_MODE_INCR
758 | ATC_SRC_PIP
759 | ATC_DST_PIP
760 | ATC_FC_MEM2MEM;
761
762
763 desc = atc_desc_get(atchan);
764 if (!desc) {
765 dev_err(chan2dev(chan),
766 "%s: couldn't allocate our descriptor\n", __func__);
767 return NULL;
768 }
769
770 desc->lli.saddr = xt->src_start;
771 desc->lli.daddr = xt->dst_start;
772 desc->lli.ctrla = ctrla | xfer_count;
773 desc->lli.ctrlb = ctrlb;
774
775 desc->boundary = first->size >> dwidth;
776 desc->dst_hole = (dmaengine_get_dst_icg(xt, first) >> dwidth) + 1;
777 desc->src_hole = (dmaengine_get_src_icg(xt, first) >> dwidth) + 1;
778
779 desc->txd.cookie = -EBUSY;
780 desc->total_len = desc->len = len;
781
782
783 set_desc_eol(desc);
784
785 desc->txd.flags = flags;
786
787 return &desc->txd;
788}
789
790
791
792
793
794
795
796
797
798static struct dma_async_tx_descriptor *
799atc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
800 size_t len, unsigned long flags)
801{
802 struct at_dma_chan *atchan = to_at_dma_chan(chan);
803 struct at_desc *desc = NULL;
804 struct at_desc *first = NULL;
805 struct at_desc *prev = NULL;
806 size_t xfer_count;
807 size_t offset;
808 unsigned int src_width;
809 unsigned int dst_width;
810 u32 ctrla;
811 u32 ctrlb;
812
813 dev_vdbg(chan2dev(chan), "prep_dma_memcpy: d%pad s%pad l0x%zx f0x%lx\n",
814 &dest, &src, len, flags);
815
816 if (unlikely(!len)) {
817 dev_dbg(chan2dev(chan), "prep_dma_memcpy: length is zero!\n");
818 return NULL;
819 }
820
821 ctrlb = ATC_DEFAULT_CTRLB | ATC_IEN
822 | ATC_SRC_ADDR_MODE_INCR
823 | ATC_DST_ADDR_MODE_INCR
824 | ATC_FC_MEM2MEM;
825
826
827
828
829
830 src_width = dst_width = atc_get_xfer_width(src, dest, len);
831
832 ctrla = ATC_SRC_WIDTH(src_width) |
833 ATC_DST_WIDTH(dst_width);
834
835 for (offset = 0; offset < len; offset += xfer_count << src_width) {
836 xfer_count = min_t(size_t, (len - offset) >> src_width,
837 ATC_BTSIZE_MAX);
838
839 desc = atc_desc_get(atchan);
840 if (!desc)
841 goto err_desc_get;
842
843 desc->lli.saddr = src + offset;
844 desc->lli.daddr = dest + offset;
845 desc->lli.ctrla = ctrla | xfer_count;
846 desc->lli.ctrlb = ctrlb;
847
848 desc->txd.cookie = 0;
849 desc->len = xfer_count << src_width;
850
851 atc_desc_chain(&first, &prev, desc);
852 }
853
854
855 first->txd.cookie = -EBUSY;
856 first->total_len = len;
857
858
859 set_desc_eol(desc);
860
861 first->txd.flags = flags;
862
863 return &first->txd;
864
865err_desc_get:
866 atc_desc_put(atchan, first);
867 return NULL;
868}
869
870static struct at_desc *atc_create_memset_desc(struct dma_chan *chan,
871 dma_addr_t psrc,
872 dma_addr_t pdst,
873 size_t len)
874{
875 struct at_dma_chan *atchan = to_at_dma_chan(chan);
876 struct at_desc *desc;
877 size_t xfer_count;
878
879 u32 ctrla = ATC_SRC_WIDTH(2) | ATC_DST_WIDTH(2);
880 u32 ctrlb = ATC_DEFAULT_CTRLB | ATC_IEN |
881 ATC_SRC_ADDR_MODE_FIXED |
882 ATC_DST_ADDR_MODE_INCR |
883 ATC_FC_MEM2MEM;
884
885 xfer_count = len >> 2;
886 if (xfer_count > ATC_BTSIZE_MAX) {
887 dev_err(chan2dev(chan), "%s: buffer is too big\n",
888 __func__);
889 return NULL;
890 }
891
892 desc = atc_desc_get(atchan);
893 if (!desc) {
894 dev_err(chan2dev(chan), "%s: can't get a descriptor\n",
895 __func__);
896 return NULL;
897 }
898
899 desc->lli.saddr = psrc;
900 desc->lli.daddr = pdst;
901 desc->lli.ctrla = ctrla | xfer_count;
902 desc->lli.ctrlb = ctrlb;
903
904 desc->txd.cookie = 0;
905 desc->len = len;
906
907 return desc;
908}
909
910
911
912
913
914
915
916
917
918static struct dma_async_tx_descriptor *
919atc_prep_dma_memset(struct dma_chan *chan, dma_addr_t dest, int value,
920 size_t len, unsigned long flags)
921{
922 struct at_dma *atdma = to_at_dma(chan->device);
923 struct at_desc *desc;
924 void __iomem *vaddr;
925 dma_addr_t paddr;
926
927 dev_vdbg(chan2dev(chan), "%s: d%pad v0x%x l0x%zx f0x%lx\n", __func__,
928 &dest, value, len, flags);
929
930 if (unlikely(!len)) {
931 dev_dbg(chan2dev(chan), "%s: length is zero!\n", __func__);
932 return NULL;
933 }
934
935 if (!is_dma_fill_aligned(chan->device, dest, 0, len)) {
936 dev_dbg(chan2dev(chan), "%s: buffer is not aligned\n",
937 __func__);
938 return NULL;
939 }
940
941 vaddr = dma_pool_alloc(atdma->memset_pool, GFP_NOWAIT, &paddr);
942 if (!vaddr) {
943 dev_err(chan2dev(chan), "%s: couldn't allocate buffer\n",
944 __func__);
945 return NULL;
946 }
947 *(u32*)vaddr = value;
948
949 desc = atc_create_memset_desc(chan, paddr, dest, len);
950 if (!desc) {
951 dev_err(chan2dev(chan), "%s: couldn't get a descriptor\n",
952 __func__);
953 goto err_free_buffer;
954 }
955
956 desc->memset_paddr = paddr;
957 desc->memset_vaddr = vaddr;
958 desc->memset_buffer = true;
959
960 desc->txd.cookie = -EBUSY;
961 desc->total_len = len;
962
963
964 set_desc_eol(desc);
965
966 desc->txd.flags = flags;
967
968 return &desc->txd;
969
970err_free_buffer:
971 dma_pool_free(atdma->memset_pool, vaddr, paddr);
972 return NULL;
973}
974
975static struct dma_async_tx_descriptor *
976atc_prep_dma_memset_sg(struct dma_chan *chan,
977 struct scatterlist *sgl,
978 unsigned int sg_len, int value,
979 unsigned long flags)
980{
981 struct at_dma_chan *atchan = to_at_dma_chan(chan);
982 struct at_dma *atdma = to_at_dma(chan->device);
983 struct at_desc *desc = NULL, *first = NULL, *prev = NULL;
984 struct scatterlist *sg;
985 void __iomem *vaddr;
986 dma_addr_t paddr;
987 size_t total_len = 0;
988 int i;
989
990 dev_vdbg(chan2dev(chan), "%s: v0x%x l0x%zx f0x%lx\n", __func__,
991 value, sg_len, flags);
992
993 if (unlikely(!sgl || !sg_len)) {
994 dev_dbg(chan2dev(chan), "%s: scatterlist is empty!\n",
995 __func__);
996 return NULL;
997 }
998
999 vaddr = dma_pool_alloc(atdma->memset_pool, GFP_NOWAIT, &paddr);
1000 if (!vaddr) {
1001 dev_err(chan2dev(chan), "%s: couldn't allocate buffer\n",
1002 __func__);
1003 return NULL;
1004 }
1005 *(u32*)vaddr = value;
1006
1007 for_each_sg(sgl, sg, sg_len, i) {
1008 dma_addr_t dest = sg_dma_address(sg);
1009 size_t len = sg_dma_len(sg);
1010
1011 dev_vdbg(chan2dev(chan), "%s: d%pad, l0x%zx\n",
1012 __func__, &dest, len);
1013
1014 if (!is_dma_fill_aligned(chan->device, dest, 0, len)) {
1015 dev_err(chan2dev(chan), "%s: buffer is not aligned\n",
1016 __func__);
1017 goto err_put_desc;
1018 }
1019
1020 desc = atc_create_memset_desc(chan, paddr, dest, len);
1021 if (!desc)
1022 goto err_put_desc;
1023
1024 atc_desc_chain(&first, &prev, desc);
1025
1026 total_len += len;
1027 }
1028
1029
1030
1031
1032
1033 desc->memset_paddr = paddr;
1034 desc->memset_vaddr = vaddr;
1035 desc->memset_buffer = true;
1036
1037 first->txd.cookie = -EBUSY;
1038 first->total_len = total_len;
1039
1040
1041 set_desc_eol(desc);
1042
1043 first->txd.flags = flags;
1044
1045 return &first->txd;
1046
1047err_put_desc:
1048 atc_desc_put(atchan, first);
1049 return NULL;
1050}
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061static struct dma_async_tx_descriptor *
1062atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
1063 unsigned int sg_len, enum dma_transfer_direction direction,
1064 unsigned long flags, void *context)
1065{
1066 struct at_dma_chan *atchan = to_at_dma_chan(chan);
1067 struct at_dma_slave *atslave = chan->private;
1068 struct dma_slave_config *sconfig = &atchan->dma_sconfig;
1069 struct at_desc *first = NULL;
1070 struct at_desc *prev = NULL;
1071 u32 ctrla;
1072 u32 ctrlb;
1073 dma_addr_t reg;
1074 unsigned int reg_width;
1075 unsigned int mem_width;
1076 unsigned int i;
1077 struct scatterlist *sg;
1078 size_t total_len = 0;
1079
1080 dev_vdbg(chan2dev(chan), "prep_slave_sg (%d): %s f0x%lx\n",
1081 sg_len,
1082 direction == DMA_MEM_TO_DEV ? "TO DEVICE" : "FROM DEVICE",
1083 flags);
1084
1085 if (unlikely(!atslave || !sg_len)) {
1086 dev_dbg(chan2dev(chan), "prep_slave_sg: sg length is zero!\n");
1087 return NULL;
1088 }
1089
1090 ctrla = ATC_SCSIZE(sconfig->src_maxburst)
1091 | ATC_DCSIZE(sconfig->dst_maxburst);
1092 ctrlb = ATC_IEN;
1093
1094 switch (direction) {
1095 case DMA_MEM_TO_DEV:
1096 reg_width = convert_buswidth(sconfig->dst_addr_width);
1097 ctrla |= ATC_DST_WIDTH(reg_width);
1098 ctrlb |= ATC_DST_ADDR_MODE_FIXED
1099 | ATC_SRC_ADDR_MODE_INCR
1100 | ATC_FC_MEM2PER
1101 | ATC_SIF(atchan->mem_if) | ATC_DIF(atchan->per_if);
1102 reg = sconfig->dst_addr;
1103 for_each_sg(sgl, sg, sg_len, i) {
1104 struct at_desc *desc;
1105 u32 len;
1106 u32 mem;
1107
1108 desc = atc_desc_get(atchan);
1109 if (!desc)
1110 goto err_desc_get;
1111
1112 mem = sg_dma_address(sg);
1113 len = sg_dma_len(sg);
1114 if (unlikely(!len)) {
1115 dev_dbg(chan2dev(chan),
1116 "prep_slave_sg: sg(%d) data length is zero\n", i);
1117 goto err;
1118 }
1119 mem_width = 2;
1120 if (unlikely(mem & 3 || len & 3))
1121 mem_width = 0;
1122
1123 desc->lli.saddr = mem;
1124 desc->lli.daddr = reg;
1125 desc->lli.ctrla = ctrla
1126 | ATC_SRC_WIDTH(mem_width)
1127 | len >> mem_width;
1128 desc->lli.ctrlb = ctrlb;
1129 desc->len = len;
1130
1131 atc_desc_chain(&first, &prev, desc);
1132 total_len += len;
1133 }
1134 break;
1135 case DMA_DEV_TO_MEM:
1136 reg_width = convert_buswidth(sconfig->src_addr_width);
1137 ctrla |= ATC_SRC_WIDTH(reg_width);
1138 ctrlb |= ATC_DST_ADDR_MODE_INCR
1139 | ATC_SRC_ADDR_MODE_FIXED
1140 | ATC_FC_PER2MEM
1141 | ATC_SIF(atchan->per_if) | ATC_DIF(atchan->mem_if);
1142
1143 reg = sconfig->src_addr;
1144 for_each_sg(sgl, sg, sg_len, i) {
1145 struct at_desc *desc;
1146 u32 len;
1147 u32 mem;
1148
1149 desc = atc_desc_get(atchan);
1150 if (!desc)
1151 goto err_desc_get;
1152
1153 mem = sg_dma_address(sg);
1154 len = sg_dma_len(sg);
1155 if (unlikely(!len)) {
1156 dev_dbg(chan2dev(chan),
1157 "prep_slave_sg: sg(%d) data length is zero\n", i);
1158 goto err;
1159 }
1160 mem_width = 2;
1161 if (unlikely(mem & 3 || len & 3))
1162 mem_width = 0;
1163
1164 desc->lli.saddr = reg;
1165 desc->lli.daddr = mem;
1166 desc->lli.ctrla = ctrla
1167 | ATC_DST_WIDTH(mem_width)
1168 | len >> reg_width;
1169 desc->lli.ctrlb = ctrlb;
1170 desc->len = len;
1171
1172 atc_desc_chain(&first, &prev, desc);
1173 total_len += len;
1174 }
1175 break;
1176 default:
1177 return NULL;
1178 }
1179
1180
1181 set_desc_eol(prev);
1182
1183
1184 first->txd.cookie = -EBUSY;
1185 first->total_len = total_len;
1186
1187
1188 first->txd.flags = flags;
1189
1190 return &first->txd;
1191
1192err_desc_get:
1193 dev_err(chan2dev(chan), "not enough descriptors available\n");
1194err:
1195 atc_desc_put(atchan, first);
1196 return NULL;
1197}
1198
1199
1200
1201
1202
1203static int
1204atc_dma_cyclic_check_values(unsigned int reg_width, dma_addr_t buf_addr,
1205 size_t period_len)
1206{
1207 if (period_len > (ATC_BTSIZE_MAX << reg_width))
1208 goto err_out;
1209 if (unlikely(period_len & ((1 << reg_width) - 1)))
1210 goto err_out;
1211 if (unlikely(buf_addr & ((1 << reg_width) - 1)))
1212 goto err_out;
1213
1214 return 0;
1215
1216err_out:
1217 return -EINVAL;
1218}
1219
1220
1221
1222
1223static int
1224atc_dma_cyclic_fill_desc(struct dma_chan *chan, struct at_desc *desc,
1225 unsigned int period_index, dma_addr_t buf_addr,
1226 unsigned int reg_width, size_t period_len,
1227 enum dma_transfer_direction direction)
1228{
1229 struct at_dma_chan *atchan = to_at_dma_chan(chan);
1230 struct dma_slave_config *sconfig = &atchan->dma_sconfig;
1231 u32 ctrla;
1232
1233
1234 ctrla = ATC_SCSIZE(sconfig->src_maxburst)
1235 | ATC_DCSIZE(sconfig->dst_maxburst)
1236 | ATC_DST_WIDTH(reg_width)
1237 | ATC_SRC_WIDTH(reg_width)
1238 | period_len >> reg_width;
1239
1240 switch (direction) {
1241 case DMA_MEM_TO_DEV:
1242 desc->lli.saddr = buf_addr + (period_len * period_index);
1243 desc->lli.daddr = sconfig->dst_addr;
1244 desc->lli.ctrla = ctrla;
1245 desc->lli.ctrlb = ATC_DST_ADDR_MODE_FIXED
1246 | ATC_SRC_ADDR_MODE_INCR
1247 | ATC_FC_MEM2PER
1248 | ATC_SIF(atchan->mem_if)
1249 | ATC_DIF(atchan->per_if);
1250 desc->len = period_len;
1251 break;
1252
1253 case DMA_DEV_TO_MEM:
1254 desc->lli.saddr = sconfig->src_addr;
1255 desc->lli.daddr = buf_addr + (period_len * period_index);
1256 desc->lli.ctrla = ctrla;
1257 desc->lli.ctrlb = ATC_DST_ADDR_MODE_INCR
1258 | ATC_SRC_ADDR_MODE_FIXED
1259 | ATC_FC_PER2MEM
1260 | ATC_SIF(atchan->per_if)
1261 | ATC_DIF(atchan->mem_if);
1262 desc->len = period_len;
1263 break;
1264
1265 default:
1266 return -EINVAL;
1267 }
1268
1269 return 0;
1270}
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281static struct dma_async_tx_descriptor *
1282atc_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
1283 size_t period_len, enum dma_transfer_direction direction,
1284 unsigned long flags)
1285{
1286 struct at_dma_chan *atchan = to_at_dma_chan(chan);
1287 struct at_dma_slave *atslave = chan->private;
1288 struct dma_slave_config *sconfig = &atchan->dma_sconfig;
1289 struct at_desc *first = NULL;
1290 struct at_desc *prev = NULL;
1291 unsigned long was_cyclic;
1292 unsigned int reg_width;
1293 unsigned int periods = buf_len / period_len;
1294 unsigned int i;
1295
1296 dev_vdbg(chan2dev(chan), "prep_dma_cyclic: %s buf@%pad - %d (%d/%d)\n",
1297 direction == DMA_MEM_TO_DEV ? "TO DEVICE" : "FROM DEVICE",
1298 &buf_addr,
1299 periods, buf_len, period_len);
1300
1301 if (unlikely(!atslave || !buf_len || !period_len)) {
1302 dev_dbg(chan2dev(chan), "prep_dma_cyclic: length is zero!\n");
1303 return NULL;
1304 }
1305
1306 was_cyclic = test_and_set_bit(ATC_IS_CYCLIC, &atchan->status);
1307 if (was_cyclic) {
1308 dev_dbg(chan2dev(chan), "prep_dma_cyclic: channel in use!\n");
1309 return NULL;
1310 }
1311
1312 if (unlikely(!is_slave_direction(direction)))
1313 goto err_out;
1314
1315 if (direction == DMA_MEM_TO_DEV)
1316 reg_width = convert_buswidth(sconfig->dst_addr_width);
1317 else
1318 reg_width = convert_buswidth(sconfig->src_addr_width);
1319
1320
1321 if (atc_dma_cyclic_check_values(reg_width, buf_addr, period_len))
1322 goto err_out;
1323
1324
1325 for (i = 0; i < periods; i++) {
1326 struct at_desc *desc;
1327
1328 desc = atc_desc_get(atchan);
1329 if (!desc)
1330 goto err_desc_get;
1331
1332 if (atc_dma_cyclic_fill_desc(chan, desc, i, buf_addr,
1333 reg_width, period_len, direction))
1334 goto err_desc_get;
1335
1336 atc_desc_chain(&first, &prev, desc);
1337 }
1338
1339
1340 prev->lli.dscr = first->txd.phys;
1341
1342
1343 first->txd.cookie = -EBUSY;
1344 first->total_len = buf_len;
1345
1346 return &first->txd;
1347
1348err_desc_get:
1349 dev_err(chan2dev(chan), "not enough descriptors available\n");
1350 atc_desc_put(atchan, first);
1351err_out:
1352 clear_bit(ATC_IS_CYCLIC, &atchan->status);
1353 return NULL;
1354}
1355
1356static int atc_config(struct dma_chan *chan,
1357 struct dma_slave_config *sconfig)
1358{
1359 struct at_dma_chan *atchan = to_at_dma_chan(chan);
1360
1361 dev_vdbg(chan2dev(chan), "%s\n", __func__);
1362
1363
1364 if (!chan->private)
1365 return -EINVAL;
1366
1367 memcpy(&atchan->dma_sconfig, sconfig, sizeof(*sconfig));
1368
1369 convert_burst(&atchan->dma_sconfig.src_maxburst);
1370 convert_burst(&atchan->dma_sconfig.dst_maxburst);
1371
1372 return 0;
1373}
1374
1375static int atc_pause(struct dma_chan *chan)
1376{
1377 struct at_dma_chan *atchan = to_at_dma_chan(chan);
1378 struct at_dma *atdma = to_at_dma(chan->device);
1379 int chan_id = atchan->chan_common.chan_id;
1380 unsigned long flags;
1381
1382 dev_vdbg(chan2dev(chan), "%s\n", __func__);
1383
1384 spin_lock_irqsave(&atchan->lock, flags);
1385
1386 dma_writel(atdma, CHER, AT_DMA_SUSP(chan_id));
1387 set_bit(ATC_IS_PAUSED, &atchan->status);
1388
1389 spin_unlock_irqrestore(&atchan->lock, flags);
1390
1391 return 0;
1392}
1393
1394static int atc_resume(struct dma_chan *chan)
1395{
1396 struct at_dma_chan *atchan = to_at_dma_chan(chan);
1397 struct at_dma *atdma = to_at_dma(chan->device);
1398 int chan_id = atchan->chan_common.chan_id;
1399 unsigned long flags;
1400
1401 dev_vdbg(chan2dev(chan), "%s\n", __func__);
1402
1403 if (!atc_chan_is_paused(atchan))
1404 return 0;
1405
1406 spin_lock_irqsave(&atchan->lock, flags);
1407
1408 dma_writel(atdma, CHDR, AT_DMA_RES(chan_id));
1409 clear_bit(ATC_IS_PAUSED, &atchan->status);
1410
1411 spin_unlock_irqrestore(&atchan->lock, flags);
1412
1413 return 0;
1414}
1415
1416static int atc_terminate_all(struct dma_chan *chan)
1417{
1418 struct at_dma_chan *atchan = to_at_dma_chan(chan);
1419 struct at_dma *atdma = to_at_dma(chan->device);
1420 int chan_id = atchan->chan_common.chan_id;
1421 struct at_desc *desc, *_desc;
1422 unsigned long flags;
1423
1424 LIST_HEAD(list);
1425
1426 dev_vdbg(chan2dev(chan), "%s\n", __func__);
1427
1428
1429
1430
1431
1432
1433
1434 spin_lock_irqsave(&atchan->lock, flags);
1435
1436
1437 dma_writel(atdma, CHDR, AT_DMA_RES(chan_id) | atchan->mask);
1438
1439
1440 while (dma_readl(atdma, CHSR) & atchan->mask)
1441 cpu_relax();
1442
1443
1444 list_splice_init(&atchan->queue, &list);
1445 list_splice_init(&atchan->active_list, &list);
1446
1447 spin_unlock_irqrestore(&atchan->lock, flags);
1448
1449
1450 list_for_each_entry_safe(desc, _desc, &list, desc_node)
1451 atc_chain_complete(atchan, desc);
1452
1453 clear_bit(ATC_IS_PAUSED, &atchan->status);
1454
1455 clear_bit(ATC_IS_CYCLIC, &atchan->status);
1456
1457 return 0;
1458}
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470static enum dma_status
1471atc_tx_status(struct dma_chan *chan,
1472 dma_cookie_t cookie,
1473 struct dma_tx_state *txstate)
1474{
1475 struct at_dma_chan *atchan = to_at_dma_chan(chan);
1476 unsigned long flags;
1477 enum dma_status ret;
1478 int bytes = 0;
1479
1480 ret = dma_cookie_status(chan, cookie, txstate);
1481 if (ret == DMA_COMPLETE)
1482 return ret;
1483
1484
1485
1486
1487 if (!txstate)
1488 return DMA_ERROR;
1489
1490 spin_lock_irqsave(&atchan->lock, flags);
1491
1492
1493 bytes = atc_get_bytes_left(chan, cookie);
1494
1495 spin_unlock_irqrestore(&atchan->lock, flags);
1496
1497 if (unlikely(bytes < 0)) {
1498 dev_vdbg(chan2dev(chan), "get residual bytes error\n");
1499 return DMA_ERROR;
1500 } else {
1501 dma_set_residue(txstate, bytes);
1502 }
1503
1504 dev_vdbg(chan2dev(chan), "tx_status %d: cookie = %d residue = %d\n",
1505 ret, cookie, bytes);
1506
1507 return ret;
1508}
1509
1510
1511
1512
1513
1514static void atc_issue_pending(struct dma_chan *chan)
1515{
1516 struct at_dma_chan *atchan = to_at_dma_chan(chan);
1517
1518 dev_vdbg(chan2dev(chan), "issue_pending\n");
1519
1520
1521 if (atc_chan_is_cyclic(atchan))
1522 return;
1523
1524 atc_advance_work(atchan);
1525}
1526
1527
1528
1529
1530
1531
1532
1533static int atc_alloc_chan_resources(struct dma_chan *chan)
1534{
1535 struct at_dma_chan *atchan = to_at_dma_chan(chan);
1536 struct at_dma *atdma = to_at_dma(chan->device);
1537 struct at_desc *desc;
1538 struct at_dma_slave *atslave;
1539 int i;
1540 u32 cfg;
1541
1542 dev_vdbg(chan2dev(chan), "alloc_chan_resources\n");
1543
1544
1545 if (atc_chan_is_enabled(atchan)) {
1546 dev_dbg(chan2dev(chan), "DMA channel not idle ?\n");
1547 return -EIO;
1548 }
1549
1550 if (!list_empty(&atchan->free_list)) {
1551 dev_dbg(chan2dev(chan), "can't allocate channel resources (channel not freed from a previous use)\n");
1552 return -EIO;
1553 }
1554
1555 cfg = ATC_DEFAULT_CFG;
1556
1557 atslave = chan->private;
1558 if (atslave) {
1559
1560
1561
1562
1563 BUG_ON(!atslave->dma_dev || atslave->dma_dev != atdma->dma_common.dev);
1564
1565
1566 if (atslave->cfg)
1567 cfg = atslave->cfg;
1568 }
1569
1570
1571 for (i = 0; i < init_nr_desc_per_channel; i++) {
1572 desc = atc_alloc_descriptor(chan, GFP_KERNEL);
1573 if (!desc) {
1574 dev_err(atdma->dma_common.dev,
1575 "Only %d initial descriptors\n", i);
1576 break;
1577 }
1578 list_add_tail(&desc->desc_node, &atchan->free_list);
1579 }
1580
1581 dma_cookie_init(chan);
1582
1583
1584 channel_writel(atchan, CFG, cfg);
1585
1586 dev_dbg(chan2dev(chan),
1587 "alloc_chan_resources: allocated %d descriptors\n", i);
1588
1589 return i;
1590}
1591
1592
1593
1594
1595
1596static void atc_free_chan_resources(struct dma_chan *chan)
1597{
1598 struct at_dma_chan *atchan = to_at_dma_chan(chan);
1599 struct at_dma *atdma = to_at_dma(chan->device);
1600 struct at_desc *desc, *_desc;
1601 LIST_HEAD(list);
1602
1603
1604 BUG_ON(!list_empty(&atchan->active_list));
1605 BUG_ON(!list_empty(&atchan->queue));
1606 BUG_ON(atc_chan_is_enabled(atchan));
1607
1608 list_for_each_entry_safe(desc, _desc, &atchan->free_list, desc_node) {
1609 dev_vdbg(chan2dev(chan), " freeing descriptor %p\n", desc);
1610 list_del(&desc->desc_node);
1611
1612 dma_pool_free(atdma->dma_desc_pool, desc, desc->txd.phys);
1613 }
1614 list_splice_init(&atchan->free_list, &list);
1615 atchan->status = 0;
1616
1617
1618
1619
1620 kfree(chan->private);
1621 chan->private = NULL;
1622
1623 dev_vdbg(chan2dev(chan), "free_chan_resources: done\n");
1624}
1625
1626#ifdef CONFIG_OF
1627static bool at_dma_filter(struct dma_chan *chan, void *slave)
1628{
1629 struct at_dma_slave *atslave = slave;
1630
1631 if (atslave->dma_dev == chan->device->dev) {
1632 chan->private = atslave;
1633 return true;
1634 } else {
1635 return false;
1636 }
1637}
1638
1639static struct dma_chan *at_dma_xlate(struct of_phandle_args *dma_spec,
1640 struct of_dma *of_dma)
1641{
1642 struct dma_chan *chan;
1643 struct at_dma_chan *atchan;
1644 struct at_dma_slave *atslave;
1645 dma_cap_mask_t mask;
1646 unsigned int per_id;
1647 struct platform_device *dmac_pdev;
1648
1649 if (dma_spec->args_count != 2)
1650 return NULL;
1651
1652 dmac_pdev = of_find_device_by_node(dma_spec->np);
1653 if (!dmac_pdev)
1654 return NULL;
1655
1656 dma_cap_zero(mask);
1657 dma_cap_set(DMA_SLAVE, mask);
1658
1659 atslave = kmalloc(sizeof(*atslave), GFP_KERNEL);
1660 if (!atslave) {
1661 put_device(&dmac_pdev->dev);
1662 return NULL;
1663 }
1664
1665 atslave->cfg = ATC_DST_H2SEL_HW | ATC_SRC_H2SEL_HW;
1666
1667
1668
1669
1670 per_id = dma_spec->args[1] & AT91_DMA_CFG_PER_ID_MASK;
1671 atslave->cfg |= ATC_DST_PER_MSB(per_id) | ATC_DST_PER(per_id)
1672 | ATC_SRC_PER_MSB(per_id) | ATC_SRC_PER(per_id);
1673
1674
1675
1676
1677
1678 switch (dma_spec->args[1] & AT91_DMA_CFG_FIFOCFG_MASK) {
1679 case AT91_DMA_CFG_FIFOCFG_ALAP:
1680 atslave->cfg |= ATC_FIFOCFG_LARGESTBURST;
1681 break;
1682 case AT91_DMA_CFG_FIFOCFG_ASAP:
1683 atslave->cfg |= ATC_FIFOCFG_ENOUGHSPACE;
1684 break;
1685 case AT91_DMA_CFG_FIFOCFG_HALF:
1686 default:
1687 atslave->cfg |= ATC_FIFOCFG_HALFFIFO;
1688 }
1689 atslave->dma_dev = &dmac_pdev->dev;
1690
1691 chan = dma_request_channel(mask, at_dma_filter, atslave);
1692 if (!chan) {
1693 put_device(&dmac_pdev->dev);
1694 kfree(atslave);
1695 return NULL;
1696 }
1697
1698 atchan = to_at_dma_chan(chan);
1699 atchan->per_if = dma_spec->args[0] & 0xff;
1700 atchan->mem_if = (dma_spec->args[0] >> 16) & 0xff;
1701
1702 return chan;
1703}
1704#else
1705static struct dma_chan *at_dma_xlate(struct of_phandle_args *dma_spec,
1706 struct of_dma *of_dma)
1707{
1708 return NULL;
1709}
1710#endif
1711
1712
1713
1714
1715static struct at_dma_platform_data at91sam9rl_config = {
1716 .nr_channels = 2,
1717};
1718static struct at_dma_platform_data at91sam9g45_config = {
1719 .nr_channels = 8,
1720};
1721
1722#if defined(CONFIG_OF)
1723static const struct of_device_id atmel_dma_dt_ids[] = {
1724 {
1725 .compatible = "atmel,at91sam9rl-dma",
1726 .data = &at91sam9rl_config,
1727 }, {
1728 .compatible = "atmel,at91sam9g45-dma",
1729 .data = &at91sam9g45_config,
1730 }, {
1731
1732 }
1733};
1734
1735MODULE_DEVICE_TABLE(of, atmel_dma_dt_ids);
1736#endif
1737
1738static const struct platform_device_id atdma_devtypes[] = {
1739 {
1740 .name = "at91sam9rl_dma",
1741 .driver_data = (unsigned long) &at91sam9rl_config,
1742 }, {
1743 .name = "at91sam9g45_dma",
1744 .driver_data = (unsigned long) &at91sam9g45_config,
1745 }, {
1746
1747 }
1748};
1749
1750static inline const struct at_dma_platform_data * __init at_dma_get_driver_data(
1751 struct platform_device *pdev)
1752{
1753 if (pdev->dev.of_node) {
1754 const struct of_device_id *match;
1755 match = of_match_node(atmel_dma_dt_ids, pdev->dev.of_node);
1756 if (match == NULL)
1757 return NULL;
1758 return match->data;
1759 }
1760 return (struct at_dma_platform_data *)
1761 platform_get_device_id(pdev)->driver_data;
1762}
1763
1764
1765
1766
1767
1768static void at_dma_off(struct at_dma *atdma)
1769{
1770 dma_writel(atdma, EN, 0);
1771
1772
1773 dma_writel(atdma, EBCIDR, -1L);
1774
1775
1776 while (dma_readl(atdma, CHSR) & atdma->all_chan_mask)
1777 cpu_relax();
1778}
1779
1780static int __init at_dma_probe(struct platform_device *pdev)
1781{
1782 struct resource *io;
1783 struct at_dma *atdma;
1784 size_t size;
1785 int irq;
1786 int err;
1787 int i;
1788 const struct at_dma_platform_data *plat_dat;
1789
1790
1791 dma_cap_set(DMA_MEMCPY, at91sam9rl_config.cap_mask);
1792 dma_cap_set(DMA_INTERLEAVE, at91sam9g45_config.cap_mask);
1793 dma_cap_set(DMA_MEMCPY, at91sam9g45_config.cap_mask);
1794 dma_cap_set(DMA_MEMSET, at91sam9g45_config.cap_mask);
1795 dma_cap_set(DMA_MEMSET_SG, at91sam9g45_config.cap_mask);
1796 dma_cap_set(DMA_PRIVATE, at91sam9g45_config.cap_mask);
1797 dma_cap_set(DMA_SLAVE, at91sam9g45_config.cap_mask);
1798
1799
1800 plat_dat = at_dma_get_driver_data(pdev);
1801 if (!plat_dat)
1802 return -ENODEV;
1803
1804 io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1805 if (!io)
1806 return -EINVAL;
1807
1808 irq = platform_get_irq(pdev, 0);
1809 if (irq < 0)
1810 return irq;
1811
1812 size = sizeof(struct at_dma);
1813 size += plat_dat->nr_channels * sizeof(struct at_dma_chan);
1814 atdma = kzalloc(size, GFP_KERNEL);
1815 if (!atdma)
1816 return -ENOMEM;
1817
1818
1819 atdma->dma_common.cap_mask = plat_dat->cap_mask;
1820 atdma->all_chan_mask = (1 << plat_dat->nr_channels) - 1;
1821
1822 size = resource_size(io);
1823 if (!request_mem_region(io->start, size, pdev->dev.driver->name)) {
1824 err = -EBUSY;
1825 goto err_kfree;
1826 }
1827
1828 atdma->regs = ioremap(io->start, size);
1829 if (!atdma->regs) {
1830 err = -ENOMEM;
1831 goto err_release_r;
1832 }
1833
1834 atdma->clk = clk_get(&pdev->dev, "dma_clk");
1835 if (IS_ERR(atdma->clk)) {
1836 err = PTR_ERR(atdma->clk);
1837 goto err_clk;
1838 }
1839 err = clk_prepare_enable(atdma->clk);
1840 if (err)
1841 goto err_clk_prepare;
1842
1843
1844 at_dma_off(atdma);
1845
1846 err = request_irq(irq, at_dma_interrupt, 0, "at_hdmac", atdma);
1847 if (err)
1848 goto err_irq;
1849
1850 platform_set_drvdata(pdev, atdma);
1851
1852
1853 atdma->dma_desc_pool = dma_pool_create("at_hdmac_desc_pool",
1854 &pdev->dev, sizeof(struct at_desc),
1855 4 , 0);
1856 if (!atdma->dma_desc_pool) {
1857 dev_err(&pdev->dev, "No memory for descriptors dma pool\n");
1858 err = -ENOMEM;
1859 goto err_desc_pool_create;
1860 }
1861
1862
1863 atdma->memset_pool = dma_pool_create("at_hdmac_memset_pool",
1864 &pdev->dev, sizeof(int), 4, 0);
1865 if (!atdma->memset_pool) {
1866 dev_err(&pdev->dev, "No memory for memset dma pool\n");
1867 err = -ENOMEM;
1868 goto err_memset_pool_create;
1869 }
1870
1871
1872 while (dma_readl(atdma, EBCISR))
1873 cpu_relax();
1874
1875
1876 INIT_LIST_HEAD(&atdma->dma_common.channels);
1877 for (i = 0; i < plat_dat->nr_channels; i++) {
1878 struct at_dma_chan *atchan = &atdma->chan[i];
1879
1880 atchan->mem_if = AT_DMA_MEM_IF;
1881 atchan->per_if = AT_DMA_PER_IF;
1882 atchan->chan_common.device = &atdma->dma_common;
1883 dma_cookie_init(&atchan->chan_common);
1884 list_add_tail(&atchan->chan_common.device_node,
1885 &atdma->dma_common.channels);
1886
1887 atchan->ch_regs = atdma->regs + ch_regs(i);
1888 spin_lock_init(&atchan->lock);
1889 atchan->mask = 1 << i;
1890
1891 INIT_LIST_HEAD(&atchan->active_list);
1892 INIT_LIST_HEAD(&atchan->queue);
1893 INIT_LIST_HEAD(&atchan->free_list);
1894
1895 tasklet_setup(&atchan->tasklet, atc_tasklet);
1896 atc_enable_chan_irq(atdma, i);
1897 }
1898
1899
1900 atdma->dma_common.device_alloc_chan_resources = atc_alloc_chan_resources;
1901 atdma->dma_common.device_free_chan_resources = atc_free_chan_resources;
1902 atdma->dma_common.device_tx_status = atc_tx_status;
1903 atdma->dma_common.device_issue_pending = atc_issue_pending;
1904 atdma->dma_common.dev = &pdev->dev;
1905
1906
1907 if (dma_has_cap(DMA_INTERLEAVE, atdma->dma_common.cap_mask))
1908 atdma->dma_common.device_prep_interleaved_dma = atc_prep_dma_interleaved;
1909
1910 if (dma_has_cap(DMA_MEMCPY, atdma->dma_common.cap_mask))
1911 atdma->dma_common.device_prep_dma_memcpy = atc_prep_dma_memcpy;
1912
1913 if (dma_has_cap(DMA_MEMSET, atdma->dma_common.cap_mask)) {
1914 atdma->dma_common.device_prep_dma_memset = atc_prep_dma_memset;
1915 atdma->dma_common.device_prep_dma_memset_sg = atc_prep_dma_memset_sg;
1916 atdma->dma_common.fill_align = DMAENGINE_ALIGN_4_BYTES;
1917 }
1918
1919 if (dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask)) {
1920 atdma->dma_common.device_prep_slave_sg = atc_prep_slave_sg;
1921
1922 dma_cap_set(DMA_CYCLIC, atdma->dma_common.cap_mask);
1923 atdma->dma_common.device_prep_dma_cyclic = atc_prep_dma_cyclic;
1924 atdma->dma_common.device_config = atc_config;
1925 atdma->dma_common.device_pause = atc_pause;
1926 atdma->dma_common.device_resume = atc_resume;
1927 atdma->dma_common.device_terminate_all = atc_terminate_all;
1928 atdma->dma_common.src_addr_widths = ATC_DMA_BUSWIDTHS;
1929 atdma->dma_common.dst_addr_widths = ATC_DMA_BUSWIDTHS;
1930 atdma->dma_common.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
1931 atdma->dma_common.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
1932 }
1933
1934 dma_writel(atdma, EN, AT_DMA_ENABLE);
1935
1936 dev_info(&pdev->dev, "Atmel AHB DMA Controller ( %s%s%s), %d channels\n",
1937 dma_has_cap(DMA_MEMCPY, atdma->dma_common.cap_mask) ? "cpy " : "",
1938 dma_has_cap(DMA_MEMSET, atdma->dma_common.cap_mask) ? "set " : "",
1939 dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask) ? "slave " : "",
1940 plat_dat->nr_channels);
1941
1942 dma_async_device_register(&atdma->dma_common);
1943
1944
1945
1946
1947
1948
1949 if (pdev->dev.of_node) {
1950 err = of_dma_controller_register(pdev->dev.of_node,
1951 at_dma_xlate, atdma);
1952 if (err) {
1953 dev_err(&pdev->dev, "could not register of_dma_controller\n");
1954 goto err_of_dma_controller_register;
1955 }
1956 }
1957
1958 return 0;
1959
1960err_of_dma_controller_register:
1961 dma_async_device_unregister(&atdma->dma_common);
1962 dma_pool_destroy(atdma->memset_pool);
1963err_memset_pool_create:
1964 dma_pool_destroy(atdma->dma_desc_pool);
1965err_desc_pool_create:
1966 free_irq(platform_get_irq(pdev, 0), atdma);
1967err_irq:
1968 clk_disable_unprepare(atdma->clk);
1969err_clk_prepare:
1970 clk_put(atdma->clk);
1971err_clk:
1972 iounmap(atdma->regs);
1973 atdma->regs = NULL;
1974err_release_r:
1975 release_mem_region(io->start, size);
1976err_kfree:
1977 kfree(atdma);
1978 return err;
1979}
1980
1981static int at_dma_remove(struct platform_device *pdev)
1982{
1983 struct at_dma *atdma = platform_get_drvdata(pdev);
1984 struct dma_chan *chan, *_chan;
1985 struct resource *io;
1986
1987 at_dma_off(atdma);
1988 if (pdev->dev.of_node)
1989 of_dma_controller_free(pdev->dev.of_node);
1990 dma_async_device_unregister(&atdma->dma_common);
1991
1992 dma_pool_destroy(atdma->memset_pool);
1993 dma_pool_destroy(atdma->dma_desc_pool);
1994 free_irq(platform_get_irq(pdev, 0), atdma);
1995
1996 list_for_each_entry_safe(chan, _chan, &atdma->dma_common.channels,
1997 device_node) {
1998 struct at_dma_chan *atchan = to_at_dma_chan(chan);
1999
2000
2001 atc_disable_chan_irq(atdma, chan->chan_id);
2002
2003 tasklet_kill(&atchan->tasklet);
2004 list_del(&chan->device_node);
2005 }
2006
2007 clk_disable_unprepare(atdma->clk);
2008 clk_put(atdma->clk);
2009
2010 iounmap(atdma->regs);
2011 atdma->regs = NULL;
2012
2013 io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2014 release_mem_region(io->start, resource_size(io));
2015
2016 kfree(atdma);
2017
2018 return 0;
2019}
2020
2021static void at_dma_shutdown(struct platform_device *pdev)
2022{
2023 struct at_dma *atdma = platform_get_drvdata(pdev);
2024
2025 at_dma_off(platform_get_drvdata(pdev));
2026 clk_disable_unprepare(atdma->clk);
2027}
2028
2029static int at_dma_prepare(struct device *dev)
2030{
2031 struct at_dma *atdma = dev_get_drvdata(dev);
2032 struct dma_chan *chan, *_chan;
2033
2034 list_for_each_entry_safe(chan, _chan, &atdma->dma_common.channels,
2035 device_node) {
2036 struct at_dma_chan *atchan = to_at_dma_chan(chan);
2037
2038 if (atc_chan_is_enabled(atchan) && !atc_chan_is_cyclic(atchan))
2039 return -EAGAIN;
2040 }
2041 return 0;
2042}
2043
2044static void atc_suspend_cyclic(struct at_dma_chan *atchan)
2045{
2046 struct dma_chan *chan = &atchan->chan_common;
2047
2048
2049
2050 if (!atc_chan_is_paused(atchan)) {
2051 dev_warn(chan2dev(chan),
2052 "cyclic channel not paused, should be done by channel user\n");
2053 atc_pause(chan);
2054 }
2055
2056
2057
2058 atchan->save_dscr = channel_readl(atchan, DSCR);
2059
2060 vdbg_dump_regs(atchan);
2061}
2062
2063static int at_dma_suspend_noirq(struct device *dev)
2064{
2065 struct at_dma *atdma = dev_get_drvdata(dev);
2066 struct dma_chan *chan, *_chan;
2067
2068
2069 list_for_each_entry_safe(chan, _chan, &atdma->dma_common.channels,
2070 device_node) {
2071 struct at_dma_chan *atchan = to_at_dma_chan(chan);
2072
2073 if (atc_chan_is_cyclic(atchan))
2074 atc_suspend_cyclic(atchan);
2075 atchan->save_cfg = channel_readl(atchan, CFG);
2076 }
2077 atdma->save_imr = dma_readl(atdma, EBCIMR);
2078
2079
2080 at_dma_off(atdma);
2081 clk_disable_unprepare(atdma->clk);
2082 return 0;
2083}
2084
2085static void atc_resume_cyclic(struct at_dma_chan *atchan)
2086{
2087 struct at_dma *atdma = to_at_dma(atchan->chan_common.device);
2088
2089
2090
2091 channel_writel(atchan, SADDR, 0);
2092 channel_writel(atchan, DADDR, 0);
2093 channel_writel(atchan, CTRLA, 0);
2094 channel_writel(atchan, CTRLB, 0);
2095 channel_writel(atchan, DSCR, atchan->save_dscr);
2096 dma_writel(atdma, CHER, atchan->mask);
2097
2098
2099
2100
2101 vdbg_dump_regs(atchan);
2102}
2103
2104static int at_dma_resume_noirq(struct device *dev)
2105{
2106 struct at_dma *atdma = dev_get_drvdata(dev);
2107 struct dma_chan *chan, *_chan;
2108
2109
2110 clk_prepare_enable(atdma->clk);
2111 dma_writel(atdma, EN, AT_DMA_ENABLE);
2112
2113
2114 while (dma_readl(atdma, EBCISR))
2115 cpu_relax();
2116
2117
2118 dma_writel(atdma, EBCIER, atdma->save_imr);
2119 list_for_each_entry_safe(chan, _chan, &atdma->dma_common.channels,
2120 device_node) {
2121 struct at_dma_chan *atchan = to_at_dma_chan(chan);
2122
2123 channel_writel(atchan, CFG, atchan->save_cfg);
2124 if (atc_chan_is_cyclic(atchan))
2125 atc_resume_cyclic(atchan);
2126 }
2127 return 0;
2128}
2129
2130static const struct dev_pm_ops at_dma_dev_pm_ops = {
2131 .prepare = at_dma_prepare,
2132 .suspend_noirq = at_dma_suspend_noirq,
2133 .resume_noirq = at_dma_resume_noirq,
2134};
2135
2136static struct platform_driver at_dma_driver = {
2137 .remove = at_dma_remove,
2138 .shutdown = at_dma_shutdown,
2139 .id_table = atdma_devtypes,
2140 .driver = {
2141 .name = "at_hdmac",
2142 .pm = &at_dma_dev_pm_ops,
2143 .of_match_table = of_match_ptr(atmel_dma_dt_ids),
2144 },
2145};
2146
2147static int __init at_dma_init(void)
2148{
2149 return platform_driver_probe(&at_dma_driver, at_dma_probe);
2150}
2151subsys_initcall(at_dma_init);
2152
2153static void __exit at_dma_exit(void)
2154{
2155 platform_driver_unregister(&at_dma_driver);
2156}
2157module_exit(at_dma_exit);
2158
2159MODULE_DESCRIPTION("Atmel AHB DMA Controller driver");
2160MODULE_AUTHOR("Nicolas Ferre <nicolas.ferre@atmel.com>");
2161MODULE_LICENSE("GPL");
2162MODULE_ALIAS("platform:at_hdmac");
2163