1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17#include <dt-bindings/dma/at91.h>
18#include <linux/clk.h>
19#include <linux/dmaengine.h>
20#include <linux/dma-mapping.h>
21#include <linux/dmapool.h>
22#include <linux/interrupt.h>
23#include <linux/module.h>
24#include <linux/platform_device.h>
25#include <linux/slab.h>
26#include <linux/of.h>
27#include <linux/of_device.h>
28#include <linux/of_dma.h>
29
30#include "at_hdmac_regs.h"
31#include "dmaengine.h"
32
33
34
35
36
37
38
39
40
41
42#define ATC_DEFAULT_CFG (ATC_FIFOCFG_HALFFIFO)
43#define ATC_DEFAULT_CTRLB (ATC_SIF(AT_DMA_MEM_IF) \
44 |ATC_DIF(AT_DMA_MEM_IF))
45
46
47
48
49
50static unsigned int init_nr_desc_per_channel = 64;
51module_param(init_nr_desc_per_channel, uint, 0644);
52MODULE_PARM_DESC(init_nr_desc_per_channel,
53 "initial descriptors per channel (default: 64)");
54
55
56
57static dma_cookie_t atc_tx_submit(struct dma_async_tx_descriptor *tx);
58static void atc_issue_pending(struct dma_chan *chan);
59
60
61
62
63static struct at_desc *atc_first_active(struct at_dma_chan *atchan)
64{
65 return list_first_entry(&atchan->active_list,
66 struct at_desc, desc_node);
67}
68
69static struct at_desc *atc_first_queued(struct at_dma_chan *atchan)
70{
71 return list_first_entry(&atchan->queue,
72 struct at_desc, desc_node);
73}
74
75
76
77
78
79
80
81
82
83
84
85static struct at_desc *atc_alloc_descriptor(struct dma_chan *chan,
86 gfp_t gfp_flags)
87{
88 struct at_desc *desc = NULL;
89 struct at_dma *atdma = to_at_dma(chan->device);
90 dma_addr_t phys;
91
92 desc = dma_pool_alloc(atdma->dma_desc_pool, gfp_flags, &phys);
93 if (desc) {
94 memset(desc, 0, sizeof(struct at_desc));
95 INIT_LIST_HEAD(&desc->tx_list);
96 dma_async_tx_descriptor_init(&desc->txd, chan);
97
98 desc->txd.flags = DMA_CTRL_ACK;
99 desc->txd.tx_submit = atc_tx_submit;
100 desc->txd.phys = phys;
101 }
102
103 return desc;
104}
105
106
107
108
109
110static struct at_desc *atc_desc_get(struct at_dma_chan *atchan)
111{
112 struct at_desc *desc, *_desc;
113 struct at_desc *ret = NULL;
114 unsigned long flags;
115 unsigned int i = 0;
116 LIST_HEAD(tmp_list);
117
118 spin_lock_irqsave(&atchan->lock, flags);
119 list_for_each_entry_safe(desc, _desc, &atchan->free_list, desc_node) {
120 i++;
121 if (async_tx_test_ack(&desc->txd)) {
122 list_del(&desc->desc_node);
123 ret = desc;
124 break;
125 }
126 dev_dbg(chan2dev(&atchan->chan_common),
127 "desc %p not ACKed\n", desc);
128 }
129 spin_unlock_irqrestore(&atchan->lock, flags);
130 dev_vdbg(chan2dev(&atchan->chan_common),
131 "scanned %u descriptors on freelist\n", i);
132
133
134 if (!ret) {
135 ret = atc_alloc_descriptor(&atchan->chan_common, GFP_ATOMIC);
136 if (ret) {
137 spin_lock_irqsave(&atchan->lock, flags);
138 atchan->descs_allocated++;
139 spin_unlock_irqrestore(&atchan->lock, flags);
140 } else {
141 dev_err(chan2dev(&atchan->chan_common),
142 "not enough descriptors available\n");
143 }
144 }
145
146 return ret;
147}
148
149
150
151
152
153
154static void atc_desc_put(struct at_dma_chan *atchan, struct at_desc *desc)
155{
156 if (desc) {
157 struct at_desc *child;
158 unsigned long flags;
159
160 spin_lock_irqsave(&atchan->lock, flags);
161 list_for_each_entry(child, &desc->tx_list, desc_node)
162 dev_vdbg(chan2dev(&atchan->chan_common),
163 "moving child desc %p to freelist\n",
164 child);
165 list_splice_init(&desc->tx_list, &atchan->free_list);
166 dev_vdbg(chan2dev(&atchan->chan_common),
167 "moving desc %p to freelist\n", desc);
168 list_add(&desc->desc_node, &atchan->free_list);
169 spin_unlock_irqrestore(&atchan->lock, flags);
170 }
171}
172
173
174
175
176
177
178
179
180
181static void atc_desc_chain(struct at_desc **first, struct at_desc **prev,
182 struct at_desc *desc)
183{
184 if (!(*first)) {
185 *first = desc;
186 } else {
187
188 (*prev)->lli.dscr = desc->txd.phys;
189
190 list_add_tail(&desc->desc_node,
191 &(*first)->tx_list);
192 }
193 *prev = desc;
194}
195
196
197
198
199
200
201
202
203static void atc_dostart(struct at_dma_chan *atchan, struct at_desc *first)
204{
205 struct at_dma *atdma = to_at_dma(atchan->chan_common.device);
206
207
208 if (atc_chan_is_enabled(atchan)) {
209 dev_err(chan2dev(&atchan->chan_common),
210 "BUG: Attempted to start non-idle channel\n");
211 dev_err(chan2dev(&atchan->chan_common),
212 " channel: s0x%x d0x%x ctrl0x%x:0x%x l0x%x\n",
213 channel_readl(atchan, SADDR),
214 channel_readl(atchan, DADDR),
215 channel_readl(atchan, CTRLA),
216 channel_readl(atchan, CTRLB),
217 channel_readl(atchan, DSCR));
218
219
220 return;
221 }
222
223 vdbg_dump_regs(atchan);
224
225 channel_writel(atchan, SADDR, 0);
226 channel_writel(atchan, DADDR, 0);
227 channel_writel(atchan, CTRLA, 0);
228 channel_writel(atchan, CTRLB, 0);
229 channel_writel(atchan, DSCR, first->txd.phys);
230 dma_writel(atdma, CHER, atchan->mask);
231
232 vdbg_dump_regs(atchan);
233}
234
235
236
237
238
239
240
241static struct at_desc *atc_get_current_descriptors(struct at_dma_chan *atchan,
242 u32 dscr_addr)
243{
244 struct at_desc *desc, *_desc, *child, *desc_cur = NULL;
245
246 list_for_each_entry_safe(desc, _desc, &atchan->active_list, desc_node) {
247 if (desc->lli.dscr == dscr_addr) {
248 desc_cur = desc;
249 break;
250 }
251
252 list_for_each_entry(child, &desc->tx_list, desc_node) {
253 if (child->lli.dscr == dscr_addr) {
254 desc_cur = child;
255 break;
256 }
257 }
258 }
259
260 return desc_cur;
261}
262
263
264
265
266
267
268static int atc_get_bytes_left(struct dma_chan *chan)
269{
270 struct at_dma_chan *atchan = to_at_dma_chan(chan);
271 struct at_dma *atdma = to_at_dma(chan->device);
272 int chan_id = atchan->chan_common.chan_id;
273 struct at_desc *desc_first = atc_first_active(atchan);
274 struct at_desc *desc_cur;
275 int ret = 0, count = 0;
276
277
278
279
280
281 if (atchan->remain_desc == 0)
282
283 atchan->remain_desc = desc_first->len;
284
285
286
287
288
289 if (unlikely(test_bit(ATC_IS_BTC, &atchan->status))) {
290 clear_bit(ATC_IS_BTC, &atchan->status);
291 desc_cur = atc_get_current_descriptors(atchan,
292 channel_readl(atchan, DSCR));
293 if (!desc_cur) {
294 ret = -EINVAL;
295 goto out;
296 }
297 atchan->remain_desc -= (desc_cur->lli.ctrla & ATC_BTSIZE_MAX)
298 << (desc_first->tx_width);
299 if (atchan->remain_desc < 0) {
300 ret = -EINVAL;
301 goto out;
302 } else {
303 ret = atchan->remain_desc;
304 }
305 } else {
306
307
308
309
310 count = (channel_readl(atchan, CTRLA) & ATC_BTSIZE_MAX)
311 << (desc_first->tx_width);
312 ret = atchan->remain_desc - count;
313 }
314
315
316
317 if (!(dma_readl(atdma, CHSR) & AT_DMA_EMPT(chan_id)))
318 atc_issue_pending(chan);
319
320out:
321 return ret;
322}
323
324
325
326
327
328
329
330static void
331atc_chain_complete(struct at_dma_chan *atchan, struct at_desc *desc)
332{
333 struct dma_async_tx_descriptor *txd = &desc->txd;
334
335 dev_vdbg(chan2dev(&atchan->chan_common),
336 "descriptor %u complete\n", txd->cookie);
337
338
339 if (!atc_chan_is_cyclic(atchan))
340 dma_cookie_complete(txd);
341
342
343 list_splice_init(&desc->tx_list, &atchan->free_list);
344
345 list_move(&desc->desc_node, &atchan->free_list);
346
347 dma_descriptor_unmap(txd);
348
349
350 if (!atc_chan_is_cyclic(atchan)) {
351 dma_async_tx_callback callback = txd->callback;
352 void *param = txd->callback_param;
353
354
355
356
357
358 if (callback)
359 callback(param);
360 }
361
362 dma_run_dependencies(txd);
363}
364
365
366
367
368
369
370
371
372
373
374static void atc_complete_all(struct at_dma_chan *atchan)
375{
376 struct at_desc *desc, *_desc;
377 LIST_HEAD(list);
378
379 dev_vdbg(chan2dev(&atchan->chan_common), "complete all\n");
380
381
382
383
384
385 if (!list_empty(&atchan->queue))
386 atc_dostart(atchan, atc_first_queued(atchan));
387
388 list_splice_init(&atchan->active_list, &list);
389
390 list_splice_init(&atchan->queue, &atchan->active_list);
391
392 list_for_each_entry_safe(desc, _desc, &list, desc_node)
393 atc_chain_complete(atchan, desc);
394}
395
396
397
398
399
400
401
402static void atc_advance_work(struct at_dma_chan *atchan)
403{
404 dev_vdbg(chan2dev(&atchan->chan_common), "advance_work\n");
405
406 if (atc_chan_is_enabled(atchan))
407 return;
408
409 if (list_empty(&atchan->active_list) ||
410 list_is_singular(&atchan->active_list)) {
411 atc_complete_all(atchan);
412 } else {
413 atc_chain_complete(atchan, atc_first_active(atchan));
414
415 atc_dostart(atchan, atc_first_active(atchan));
416 }
417}
418
419
420
421
422
423
424
425
426static void atc_handle_error(struct at_dma_chan *atchan)
427{
428 struct at_desc *bad_desc;
429 struct at_desc *child;
430
431
432
433
434
435
436 bad_desc = atc_first_active(atchan);
437 list_del_init(&bad_desc->desc_node);
438
439
440
441 list_splice_init(&atchan->queue, atchan->active_list.prev);
442
443
444 if (!list_empty(&atchan->active_list))
445 atc_dostart(atchan, atc_first_active(atchan));
446
447
448
449
450
451
452
453
454 dev_crit(chan2dev(&atchan->chan_common),
455 "Bad descriptor submitted for DMA!\n");
456 dev_crit(chan2dev(&atchan->chan_common),
457 " cookie: %d\n", bad_desc->txd.cookie);
458 atc_dump_lli(atchan, &bad_desc->lli);
459 list_for_each_entry(child, &bad_desc->tx_list, desc_node)
460 atc_dump_lli(atchan, &child->lli);
461
462
463 atc_chain_complete(atchan, bad_desc);
464}
465
466
467
468
469
470
471
472static void atc_handle_cyclic(struct at_dma_chan *atchan)
473{
474 struct at_desc *first = atc_first_active(atchan);
475 struct dma_async_tx_descriptor *txd = &first->txd;
476 dma_async_tx_callback callback = txd->callback;
477 void *param = txd->callback_param;
478
479 dev_vdbg(chan2dev(&atchan->chan_common),
480 "new cyclic period llp 0x%08x\n",
481 channel_readl(atchan, DSCR));
482
483 if (callback)
484 callback(param);
485}
486
487
488
489static void atc_tasklet(unsigned long data)
490{
491 struct at_dma_chan *atchan = (struct at_dma_chan *)data;
492 unsigned long flags;
493
494 spin_lock_irqsave(&atchan->lock, flags);
495 if (test_and_clear_bit(ATC_IS_ERROR, &atchan->status))
496 atc_handle_error(atchan);
497 else if (atc_chan_is_cyclic(atchan))
498 atc_handle_cyclic(atchan);
499 else
500 atc_advance_work(atchan);
501
502 spin_unlock_irqrestore(&atchan->lock, flags);
503}
504
505static irqreturn_t at_dma_interrupt(int irq, void *dev_id)
506{
507 struct at_dma *atdma = (struct at_dma *)dev_id;
508 struct at_dma_chan *atchan;
509 int i;
510 u32 status, pending, imr;
511 int ret = IRQ_NONE;
512
513 do {
514 imr = dma_readl(atdma, EBCIMR);
515 status = dma_readl(atdma, EBCISR);
516 pending = status & imr;
517
518 if (!pending)
519 break;
520
521 dev_vdbg(atdma->dma_common.dev,
522 "interrupt: status = 0x%08x, 0x%08x, 0x%08x\n",
523 status, imr, pending);
524
525 for (i = 0; i < atdma->dma_common.chancnt; i++) {
526 atchan = &atdma->chan[i];
527 if (pending & (AT_DMA_BTC(i) | AT_DMA_ERR(i))) {
528 if (pending & AT_DMA_ERR(i)) {
529
530 dma_writel(atdma, CHDR,
531 AT_DMA_RES(i) | atchan->mask);
532
533 set_bit(ATC_IS_ERROR, &atchan->status);
534 }
535 if (pending & AT_DMA_BTC(i))
536 set_bit(ATC_IS_BTC, &atchan->status);
537 tasklet_schedule(&atchan->tasklet);
538 ret = IRQ_HANDLED;
539 }
540 }
541
542 } while (pending);
543
544 return ret;
545}
546
547
548
549
550
551
552
553
554
555
556
557
558static dma_cookie_t atc_tx_submit(struct dma_async_tx_descriptor *tx)
559{
560 struct at_desc *desc = txd_to_at_desc(tx);
561 struct at_dma_chan *atchan = to_at_dma_chan(tx->chan);
562 dma_cookie_t cookie;
563 unsigned long flags;
564
565 spin_lock_irqsave(&atchan->lock, flags);
566 cookie = dma_cookie_assign(tx);
567
568 if (list_empty(&atchan->active_list)) {
569 dev_vdbg(chan2dev(tx->chan), "tx_submit: started %u\n",
570 desc->txd.cookie);
571 atc_dostart(atchan, desc);
572 list_add_tail(&desc->desc_node, &atchan->active_list);
573 } else {
574 dev_vdbg(chan2dev(tx->chan), "tx_submit: queued %u\n",
575 desc->txd.cookie);
576 list_add_tail(&desc->desc_node, &atchan->queue);
577 }
578
579 spin_unlock_irqrestore(&atchan->lock, flags);
580
581 return cookie;
582}
583
584
585
586
587
588
589
590
591
592static struct dma_async_tx_descriptor *
593atc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
594 size_t len, unsigned long flags)
595{
596 struct at_dma_chan *atchan = to_at_dma_chan(chan);
597 struct at_desc *desc = NULL;
598 struct at_desc *first = NULL;
599 struct at_desc *prev = NULL;
600 size_t xfer_count;
601 size_t offset;
602 unsigned int src_width;
603 unsigned int dst_width;
604 u32 ctrla;
605 u32 ctrlb;
606
607 dev_vdbg(chan2dev(chan), "prep_dma_memcpy: d0x%x s0x%x l0x%zx f0x%lx\n",
608 dest, src, len, flags);
609
610 if (unlikely(!len)) {
611 dev_dbg(chan2dev(chan), "prep_dma_memcpy: length is zero!\n");
612 return NULL;
613 }
614
615 ctrlb = ATC_DEFAULT_CTRLB | ATC_IEN
616 | ATC_SRC_ADDR_MODE_INCR
617 | ATC_DST_ADDR_MODE_INCR
618 | ATC_FC_MEM2MEM;
619
620
621
622
623
624 if (!((src | dest | len) & 3)) {
625 ctrla = ATC_SRC_WIDTH_WORD | ATC_DST_WIDTH_WORD;
626 src_width = dst_width = 2;
627 } else if (!((src | dest | len) & 1)) {
628 ctrla = ATC_SRC_WIDTH_HALFWORD | ATC_DST_WIDTH_HALFWORD;
629 src_width = dst_width = 1;
630 } else {
631 ctrla = ATC_SRC_WIDTH_BYTE | ATC_DST_WIDTH_BYTE;
632 src_width = dst_width = 0;
633 }
634
635 for (offset = 0; offset < len; offset += xfer_count << src_width) {
636 xfer_count = min_t(size_t, (len - offset) >> src_width,
637 ATC_BTSIZE_MAX);
638
639 desc = atc_desc_get(atchan);
640 if (!desc)
641 goto err_desc_get;
642
643 desc->lli.saddr = src + offset;
644 desc->lli.daddr = dest + offset;
645 desc->lli.ctrla = ctrla | xfer_count;
646 desc->lli.ctrlb = ctrlb;
647
648 desc->txd.cookie = 0;
649
650 atc_desc_chain(&first, &prev, desc);
651 }
652
653
654 first->txd.cookie = -EBUSY;
655 first->len = len;
656 first->tx_width = src_width;
657
658
659 set_desc_eol(desc);
660
661 first->txd.flags = flags;
662
663 return &first->txd;
664
665err_desc_get:
666 atc_desc_put(atchan, first);
667 return NULL;
668}
669
670
671
672
673
674
675
676
677
678
679
680static struct dma_async_tx_descriptor *
681atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
682 unsigned int sg_len, enum dma_transfer_direction direction,
683 unsigned long flags, void *context)
684{
685 struct at_dma_chan *atchan = to_at_dma_chan(chan);
686 struct at_dma_slave *atslave = chan->private;
687 struct dma_slave_config *sconfig = &atchan->dma_sconfig;
688 struct at_desc *first = NULL;
689 struct at_desc *prev = NULL;
690 u32 ctrla;
691 u32 ctrlb;
692 dma_addr_t reg;
693 unsigned int reg_width;
694 unsigned int mem_width;
695 unsigned int i;
696 struct scatterlist *sg;
697 size_t total_len = 0;
698
699 dev_vdbg(chan2dev(chan), "prep_slave_sg (%d): %s f0x%lx\n",
700 sg_len,
701 direction == DMA_MEM_TO_DEV ? "TO DEVICE" : "FROM DEVICE",
702 flags);
703
704 if (unlikely(!atslave || !sg_len)) {
705 dev_dbg(chan2dev(chan), "prep_slave_sg: sg length is zero!\n");
706 return NULL;
707 }
708
709 ctrla = ATC_SCSIZE(sconfig->src_maxburst)
710 | ATC_DCSIZE(sconfig->dst_maxburst);
711 ctrlb = ATC_IEN;
712
713 switch (direction) {
714 case DMA_MEM_TO_DEV:
715 reg_width = convert_buswidth(sconfig->dst_addr_width);
716 ctrla |= ATC_DST_WIDTH(reg_width);
717 ctrlb |= ATC_DST_ADDR_MODE_FIXED
718 | ATC_SRC_ADDR_MODE_INCR
719 | ATC_FC_MEM2PER
720 | ATC_SIF(atchan->mem_if) | ATC_DIF(atchan->per_if);
721 reg = sconfig->dst_addr;
722 for_each_sg(sgl, sg, sg_len, i) {
723 struct at_desc *desc;
724 u32 len;
725 u32 mem;
726
727 desc = atc_desc_get(atchan);
728 if (!desc)
729 goto err_desc_get;
730
731 mem = sg_dma_address(sg);
732 len = sg_dma_len(sg);
733 if (unlikely(!len)) {
734 dev_dbg(chan2dev(chan),
735 "prep_slave_sg: sg(%d) data length is zero\n", i);
736 goto err;
737 }
738 mem_width = 2;
739 if (unlikely(mem & 3 || len & 3))
740 mem_width = 0;
741
742 desc->lli.saddr = mem;
743 desc->lli.daddr = reg;
744 desc->lli.ctrla = ctrla
745 | ATC_SRC_WIDTH(mem_width)
746 | len >> mem_width;
747 desc->lli.ctrlb = ctrlb;
748
749 atc_desc_chain(&first, &prev, desc);
750 total_len += len;
751 }
752 break;
753 case DMA_DEV_TO_MEM:
754 reg_width = convert_buswidth(sconfig->src_addr_width);
755 ctrla |= ATC_SRC_WIDTH(reg_width);
756 ctrlb |= ATC_DST_ADDR_MODE_INCR
757 | ATC_SRC_ADDR_MODE_FIXED
758 | ATC_FC_PER2MEM
759 | ATC_SIF(atchan->per_if) | ATC_DIF(atchan->mem_if);
760
761 reg = sconfig->src_addr;
762 for_each_sg(sgl, sg, sg_len, i) {
763 struct at_desc *desc;
764 u32 len;
765 u32 mem;
766
767 desc = atc_desc_get(atchan);
768 if (!desc)
769 goto err_desc_get;
770
771 mem = sg_dma_address(sg);
772 len = sg_dma_len(sg);
773 if (unlikely(!len)) {
774 dev_dbg(chan2dev(chan),
775 "prep_slave_sg: sg(%d) data length is zero\n", i);
776 goto err;
777 }
778 mem_width = 2;
779 if (unlikely(mem & 3 || len & 3))
780 mem_width = 0;
781
782 desc->lli.saddr = reg;
783 desc->lli.daddr = mem;
784 desc->lli.ctrla = ctrla
785 | ATC_DST_WIDTH(mem_width)
786 | len >> reg_width;
787 desc->lli.ctrlb = ctrlb;
788
789 atc_desc_chain(&first, &prev, desc);
790 total_len += len;
791 }
792 break;
793 default:
794 return NULL;
795 }
796
797
798 set_desc_eol(prev);
799
800
801 first->txd.cookie = -EBUSY;
802 first->len = total_len;
803 first->tx_width = reg_width;
804
805
806 first->txd.flags = flags;
807
808 return &first->txd;
809
810err_desc_get:
811 dev_err(chan2dev(chan), "not enough descriptors available\n");
812err:
813 atc_desc_put(atchan, first);
814 return NULL;
815}
816
817
818
819
820
821static int
822atc_dma_cyclic_check_values(unsigned int reg_width, dma_addr_t buf_addr,
823 size_t period_len)
824{
825 if (period_len > (ATC_BTSIZE_MAX << reg_width))
826 goto err_out;
827 if (unlikely(period_len & ((1 << reg_width) - 1)))
828 goto err_out;
829 if (unlikely(buf_addr & ((1 << reg_width) - 1)))
830 goto err_out;
831
832 return 0;
833
834err_out:
835 return -EINVAL;
836}
837
838
839
840
841static int
842atc_dma_cyclic_fill_desc(struct dma_chan *chan, struct at_desc *desc,
843 unsigned int period_index, dma_addr_t buf_addr,
844 unsigned int reg_width, size_t period_len,
845 enum dma_transfer_direction direction)
846{
847 struct at_dma_chan *atchan = to_at_dma_chan(chan);
848 struct dma_slave_config *sconfig = &atchan->dma_sconfig;
849 u32 ctrla;
850
851
852 ctrla = ATC_SCSIZE(sconfig->src_maxburst)
853 | ATC_DCSIZE(sconfig->dst_maxburst)
854 | ATC_DST_WIDTH(reg_width)
855 | ATC_SRC_WIDTH(reg_width)
856 | period_len >> reg_width;
857
858 switch (direction) {
859 case DMA_MEM_TO_DEV:
860 desc->lli.saddr = buf_addr + (period_len * period_index);
861 desc->lli.daddr = sconfig->dst_addr;
862 desc->lli.ctrla = ctrla;
863 desc->lli.ctrlb = ATC_DST_ADDR_MODE_FIXED
864 | ATC_SRC_ADDR_MODE_INCR
865 | ATC_FC_MEM2PER
866 | ATC_SIF(atchan->mem_if)
867 | ATC_DIF(atchan->per_if);
868 break;
869
870 case DMA_DEV_TO_MEM:
871 desc->lli.saddr = sconfig->src_addr;
872 desc->lli.daddr = buf_addr + (period_len * period_index);
873 desc->lli.ctrla = ctrla;
874 desc->lli.ctrlb = ATC_DST_ADDR_MODE_INCR
875 | ATC_SRC_ADDR_MODE_FIXED
876 | ATC_FC_PER2MEM
877 | ATC_SIF(atchan->per_if)
878 | ATC_DIF(atchan->mem_if);
879 break;
880
881 default:
882 return -EINVAL;
883 }
884
885 return 0;
886}
887
888
889
890
891
892
893
894
895
896
897
898static struct dma_async_tx_descriptor *
899atc_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
900 size_t period_len, enum dma_transfer_direction direction,
901 unsigned long flags, void *context)
902{
903 struct at_dma_chan *atchan = to_at_dma_chan(chan);
904 struct at_dma_slave *atslave = chan->private;
905 struct dma_slave_config *sconfig = &atchan->dma_sconfig;
906 struct at_desc *first = NULL;
907 struct at_desc *prev = NULL;
908 unsigned long was_cyclic;
909 unsigned int reg_width;
910 unsigned int periods = buf_len / period_len;
911 unsigned int i;
912
913 dev_vdbg(chan2dev(chan), "prep_dma_cyclic: %s buf@0x%08x - %d (%d/%d)\n",
914 direction == DMA_MEM_TO_DEV ? "TO DEVICE" : "FROM DEVICE",
915 buf_addr,
916 periods, buf_len, period_len);
917
918 if (unlikely(!atslave || !buf_len || !period_len)) {
919 dev_dbg(chan2dev(chan), "prep_dma_cyclic: length is zero!\n");
920 return NULL;
921 }
922
923 was_cyclic = test_and_set_bit(ATC_IS_CYCLIC, &atchan->status);
924 if (was_cyclic) {
925 dev_dbg(chan2dev(chan), "prep_dma_cyclic: channel in use!\n");
926 return NULL;
927 }
928
929 if (unlikely(!is_slave_direction(direction)))
930 goto err_out;
931
932 if (sconfig->direction == DMA_MEM_TO_DEV)
933 reg_width = convert_buswidth(sconfig->dst_addr_width);
934 else
935 reg_width = convert_buswidth(sconfig->src_addr_width);
936
937
938 if (atc_dma_cyclic_check_values(reg_width, buf_addr, period_len))
939 goto err_out;
940
941
942 for (i = 0; i < periods; i++) {
943 struct at_desc *desc;
944
945 desc = atc_desc_get(atchan);
946 if (!desc)
947 goto err_desc_get;
948
949 if (atc_dma_cyclic_fill_desc(chan, desc, i, buf_addr,
950 reg_width, period_len, direction))
951 goto err_desc_get;
952
953 atc_desc_chain(&first, &prev, desc);
954 }
955
956
957 prev->lli.dscr = first->txd.phys;
958
959
960 first->txd.cookie = -EBUSY;
961 first->len = buf_len;
962 first->tx_width = reg_width;
963
964 return &first->txd;
965
966err_desc_get:
967 dev_err(chan2dev(chan), "not enough descriptors available\n");
968 atc_desc_put(atchan, first);
969err_out:
970 clear_bit(ATC_IS_CYCLIC, &atchan->status);
971 return NULL;
972}
973
974static int set_runtime_config(struct dma_chan *chan,
975 struct dma_slave_config *sconfig)
976{
977 struct at_dma_chan *atchan = to_at_dma_chan(chan);
978
979
980 if (!chan->private)
981 return -EINVAL;
982
983 memcpy(&atchan->dma_sconfig, sconfig, sizeof(*sconfig));
984
985 convert_burst(&atchan->dma_sconfig.src_maxburst);
986 convert_burst(&atchan->dma_sconfig.dst_maxburst);
987
988 return 0;
989}
990
991
992static int atc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
993 unsigned long arg)
994{
995 struct at_dma_chan *atchan = to_at_dma_chan(chan);
996 struct at_dma *atdma = to_at_dma(chan->device);
997 int chan_id = atchan->chan_common.chan_id;
998 unsigned long flags;
999
1000 LIST_HEAD(list);
1001
1002 dev_vdbg(chan2dev(chan), "atc_control (%d)\n", cmd);
1003
1004 if (cmd == DMA_PAUSE) {
1005 spin_lock_irqsave(&atchan->lock, flags);
1006
1007 dma_writel(atdma, CHER, AT_DMA_SUSP(chan_id));
1008 set_bit(ATC_IS_PAUSED, &atchan->status);
1009
1010 spin_unlock_irqrestore(&atchan->lock, flags);
1011 } else if (cmd == DMA_RESUME) {
1012 if (!atc_chan_is_paused(atchan))
1013 return 0;
1014
1015 spin_lock_irqsave(&atchan->lock, flags);
1016
1017 dma_writel(atdma, CHDR, AT_DMA_RES(chan_id));
1018 clear_bit(ATC_IS_PAUSED, &atchan->status);
1019
1020 spin_unlock_irqrestore(&atchan->lock, flags);
1021 } else if (cmd == DMA_TERMINATE_ALL) {
1022 struct at_desc *desc, *_desc;
1023
1024
1025
1026
1027
1028
1029 spin_lock_irqsave(&atchan->lock, flags);
1030
1031
1032 dma_writel(atdma, CHDR, AT_DMA_RES(chan_id) | atchan->mask);
1033
1034
1035 while (dma_readl(atdma, CHSR) & atchan->mask)
1036 cpu_relax();
1037
1038
1039 list_splice_init(&atchan->queue, &list);
1040 list_splice_init(&atchan->active_list, &list);
1041
1042
1043 list_for_each_entry_safe(desc, _desc, &list, desc_node)
1044 atc_chain_complete(atchan, desc);
1045
1046 clear_bit(ATC_IS_PAUSED, &atchan->status);
1047
1048 clear_bit(ATC_IS_CYCLIC, &atchan->status);
1049
1050 spin_unlock_irqrestore(&atchan->lock, flags);
1051 } else if (cmd == DMA_SLAVE_CONFIG) {
1052 return set_runtime_config(chan, (struct dma_slave_config *)arg);
1053 } else {
1054 return -ENXIO;
1055 }
1056
1057 return 0;
1058}
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070static enum dma_status
1071atc_tx_status(struct dma_chan *chan,
1072 dma_cookie_t cookie,
1073 struct dma_tx_state *txstate)
1074{
1075 struct at_dma_chan *atchan = to_at_dma_chan(chan);
1076 unsigned long flags;
1077 enum dma_status ret;
1078 int bytes = 0;
1079
1080 ret = dma_cookie_status(chan, cookie, txstate);
1081 if (ret == DMA_COMPLETE)
1082 return ret;
1083
1084
1085
1086
1087 if (!txstate)
1088 return DMA_ERROR;
1089
1090 spin_lock_irqsave(&atchan->lock, flags);
1091
1092
1093 bytes = atc_get_bytes_left(chan);
1094
1095 spin_unlock_irqrestore(&atchan->lock, flags);
1096
1097 if (unlikely(bytes < 0)) {
1098 dev_vdbg(chan2dev(chan), "get residual bytes error\n");
1099 return DMA_ERROR;
1100 } else {
1101 dma_set_residue(txstate, bytes);
1102 }
1103
1104 dev_vdbg(chan2dev(chan), "tx_status %d: cookie = %d residue = %d\n",
1105 ret, cookie, bytes);
1106
1107 return ret;
1108}
1109
1110
1111
1112
1113
1114static void atc_issue_pending(struct dma_chan *chan)
1115{
1116 struct at_dma_chan *atchan = to_at_dma_chan(chan);
1117 unsigned long flags;
1118
1119 dev_vdbg(chan2dev(chan), "issue_pending\n");
1120
1121
1122 if (atc_chan_is_cyclic(atchan))
1123 return;
1124
1125 spin_lock_irqsave(&atchan->lock, flags);
1126 atc_advance_work(atchan);
1127 spin_unlock_irqrestore(&atchan->lock, flags);
1128}
1129
1130
1131
1132
1133
1134
1135
1136
1137static int atc_alloc_chan_resources(struct dma_chan *chan)
1138{
1139 struct at_dma_chan *atchan = to_at_dma_chan(chan);
1140 struct at_dma *atdma = to_at_dma(chan->device);
1141 struct at_desc *desc;
1142 struct at_dma_slave *atslave;
1143 unsigned long flags;
1144 int i;
1145 u32 cfg;
1146 LIST_HEAD(tmp_list);
1147
1148 dev_vdbg(chan2dev(chan), "alloc_chan_resources\n");
1149
1150
1151 if (atc_chan_is_enabled(atchan)) {
1152 dev_dbg(chan2dev(chan), "DMA channel not idle ?\n");
1153 return -EIO;
1154 }
1155
1156 cfg = ATC_DEFAULT_CFG;
1157
1158 atslave = chan->private;
1159 if (atslave) {
1160
1161
1162
1163
1164 BUG_ON(!atslave->dma_dev || atslave->dma_dev != atdma->dma_common.dev);
1165
1166
1167 if (atslave->cfg)
1168 cfg = atslave->cfg;
1169 }
1170
1171
1172
1173 if (!list_empty(&atchan->free_list))
1174 return atchan->descs_allocated;
1175
1176
1177 for (i = 0; i < init_nr_desc_per_channel; i++) {
1178 desc = atc_alloc_descriptor(chan, GFP_KERNEL);
1179 if (!desc) {
1180 dev_err(atdma->dma_common.dev,
1181 "Only %d initial descriptors\n", i);
1182 break;
1183 }
1184 list_add_tail(&desc->desc_node, &tmp_list);
1185 }
1186
1187 spin_lock_irqsave(&atchan->lock, flags);
1188 atchan->descs_allocated = i;
1189 atchan->remain_desc = 0;
1190 list_splice(&tmp_list, &atchan->free_list);
1191 dma_cookie_init(chan);
1192 spin_unlock_irqrestore(&atchan->lock, flags);
1193
1194
1195 channel_writel(atchan, CFG, cfg);
1196
1197 dev_dbg(chan2dev(chan),
1198 "alloc_chan_resources: allocated %d descriptors\n",
1199 atchan->descs_allocated);
1200
1201 return atchan->descs_allocated;
1202}
1203
1204
1205
1206
1207
1208static void atc_free_chan_resources(struct dma_chan *chan)
1209{
1210 struct at_dma_chan *atchan = to_at_dma_chan(chan);
1211 struct at_dma *atdma = to_at_dma(chan->device);
1212 struct at_desc *desc, *_desc;
1213 LIST_HEAD(list);
1214
1215 dev_dbg(chan2dev(chan), "free_chan_resources: (descs allocated=%u)\n",
1216 atchan->descs_allocated);
1217
1218
1219 BUG_ON(!list_empty(&atchan->active_list));
1220 BUG_ON(!list_empty(&atchan->queue));
1221 BUG_ON(atc_chan_is_enabled(atchan));
1222
1223 list_for_each_entry_safe(desc, _desc, &atchan->free_list, desc_node) {
1224 dev_vdbg(chan2dev(chan), " freeing descriptor %p\n", desc);
1225 list_del(&desc->desc_node);
1226
1227 dma_pool_free(atdma->dma_desc_pool, desc, desc->txd.phys);
1228 }
1229 list_splice_init(&atchan->free_list, &list);
1230 atchan->descs_allocated = 0;
1231 atchan->status = 0;
1232 atchan->remain_desc = 0;
1233
1234 dev_vdbg(chan2dev(chan), "free_chan_resources: done\n");
1235}
1236
1237#ifdef CONFIG_OF
1238static bool at_dma_filter(struct dma_chan *chan, void *slave)
1239{
1240 struct at_dma_slave *atslave = slave;
1241
1242 if (atslave->dma_dev == chan->device->dev) {
1243 chan->private = atslave;
1244 return true;
1245 } else {
1246 return false;
1247 }
1248}
1249
1250static struct dma_chan *at_dma_xlate(struct of_phandle_args *dma_spec,
1251 struct of_dma *of_dma)
1252{
1253 struct dma_chan *chan;
1254 struct at_dma_chan *atchan;
1255 struct at_dma_slave *atslave;
1256 dma_cap_mask_t mask;
1257 unsigned int per_id;
1258 struct platform_device *dmac_pdev;
1259
1260 if (dma_spec->args_count != 2)
1261 return NULL;
1262
1263 dmac_pdev = of_find_device_by_node(dma_spec->np);
1264
1265 dma_cap_zero(mask);
1266 dma_cap_set(DMA_SLAVE, mask);
1267
1268 atslave = devm_kzalloc(&dmac_pdev->dev, sizeof(*atslave), GFP_KERNEL);
1269 if (!atslave)
1270 return NULL;
1271
1272 atslave->cfg = ATC_DST_H2SEL_HW | ATC_SRC_H2SEL_HW;
1273
1274
1275
1276
1277 per_id = dma_spec->args[1] & AT91_DMA_CFG_PER_ID_MASK;
1278 atslave->cfg |= ATC_DST_PER_MSB(per_id) | ATC_DST_PER(per_id)
1279 | ATC_SRC_PER_MSB(per_id) | ATC_SRC_PER(per_id);
1280
1281
1282
1283
1284
1285 switch (dma_spec->args[1] & AT91_DMA_CFG_FIFOCFG_MASK) {
1286 case AT91_DMA_CFG_FIFOCFG_ALAP:
1287 atslave->cfg |= ATC_FIFOCFG_LARGESTBURST;
1288 break;
1289 case AT91_DMA_CFG_FIFOCFG_ASAP:
1290 atslave->cfg |= ATC_FIFOCFG_ENOUGHSPACE;
1291 break;
1292 case AT91_DMA_CFG_FIFOCFG_HALF:
1293 default:
1294 atslave->cfg |= ATC_FIFOCFG_HALFFIFO;
1295 }
1296 atslave->dma_dev = &dmac_pdev->dev;
1297
1298 chan = dma_request_channel(mask, at_dma_filter, atslave);
1299 if (!chan)
1300 return NULL;
1301
1302 atchan = to_at_dma_chan(chan);
1303 atchan->per_if = dma_spec->args[0] & 0xff;
1304 atchan->mem_if = (dma_spec->args[0] >> 16) & 0xff;
1305
1306 return chan;
1307}
1308#else
1309static struct dma_chan *at_dma_xlate(struct of_phandle_args *dma_spec,
1310 struct of_dma *of_dma)
1311{
1312 return NULL;
1313}
1314#endif
1315
1316
1317
1318
1319static struct at_dma_platform_data at91sam9rl_config = {
1320 .nr_channels = 2,
1321};
1322static struct at_dma_platform_data at91sam9g45_config = {
1323 .nr_channels = 8,
1324};
1325
1326#if defined(CONFIG_OF)
1327static const struct of_device_id atmel_dma_dt_ids[] = {
1328 {
1329 .compatible = "atmel,at91sam9rl-dma",
1330 .data = &at91sam9rl_config,
1331 }, {
1332 .compatible = "atmel,at91sam9g45-dma",
1333 .data = &at91sam9g45_config,
1334 }, {
1335
1336 }
1337};
1338
1339MODULE_DEVICE_TABLE(of, atmel_dma_dt_ids);
1340#endif
1341
1342static const struct platform_device_id atdma_devtypes[] = {
1343 {
1344 .name = "at91sam9rl_dma",
1345 .driver_data = (unsigned long) &at91sam9rl_config,
1346 }, {
1347 .name = "at91sam9g45_dma",
1348 .driver_data = (unsigned long) &at91sam9g45_config,
1349 }, {
1350
1351 }
1352};
1353
1354static inline const struct at_dma_platform_data * __init at_dma_get_driver_data(
1355 struct platform_device *pdev)
1356{
1357 if (pdev->dev.of_node) {
1358 const struct of_device_id *match;
1359 match = of_match_node(atmel_dma_dt_ids, pdev->dev.of_node);
1360 if (match == NULL)
1361 return NULL;
1362 return match->data;
1363 }
1364 return (struct at_dma_platform_data *)
1365 platform_get_device_id(pdev)->driver_data;
1366}
1367
1368
1369
1370
1371
1372static void at_dma_off(struct at_dma *atdma)
1373{
1374 dma_writel(atdma, EN, 0);
1375
1376
1377 dma_writel(atdma, EBCIDR, -1L);
1378
1379
1380 while (dma_readl(atdma, CHSR) & atdma->all_chan_mask)
1381 cpu_relax();
1382}
1383
1384static int __init at_dma_probe(struct platform_device *pdev)
1385{
1386 struct resource *io;
1387 struct at_dma *atdma;
1388 size_t size;
1389 int irq;
1390 int err;
1391 int i;
1392 const struct at_dma_platform_data *plat_dat;
1393
1394
1395 dma_cap_set(DMA_MEMCPY, at91sam9rl_config.cap_mask);
1396 dma_cap_set(DMA_MEMCPY, at91sam9g45_config.cap_mask);
1397 dma_cap_set(DMA_SLAVE, at91sam9g45_config.cap_mask);
1398
1399
1400 plat_dat = at_dma_get_driver_data(pdev);
1401 if (!plat_dat)
1402 return -ENODEV;
1403
1404 io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1405 if (!io)
1406 return -EINVAL;
1407
1408 irq = platform_get_irq(pdev, 0);
1409 if (irq < 0)
1410 return irq;
1411
1412 size = sizeof(struct at_dma);
1413 size += plat_dat->nr_channels * sizeof(struct at_dma_chan);
1414 atdma = kzalloc(size, GFP_KERNEL);
1415 if (!atdma)
1416 return -ENOMEM;
1417
1418
1419 atdma->dma_common.cap_mask = plat_dat->cap_mask;
1420 atdma->all_chan_mask = (1 << plat_dat->nr_channels) - 1;
1421
1422 size = resource_size(io);
1423 if (!request_mem_region(io->start, size, pdev->dev.driver->name)) {
1424 err = -EBUSY;
1425 goto err_kfree;
1426 }
1427
1428 atdma->regs = ioremap(io->start, size);
1429 if (!atdma->regs) {
1430 err = -ENOMEM;
1431 goto err_release_r;
1432 }
1433
1434 atdma->clk = clk_get(&pdev->dev, "dma_clk");
1435 if (IS_ERR(atdma->clk)) {
1436 err = PTR_ERR(atdma->clk);
1437 goto err_clk;
1438 }
1439 err = clk_prepare_enable(atdma->clk);
1440 if (err)
1441 goto err_clk_prepare;
1442
1443
1444 at_dma_off(atdma);
1445
1446 err = request_irq(irq, at_dma_interrupt, 0, "at_hdmac", atdma);
1447 if (err)
1448 goto err_irq;
1449
1450 platform_set_drvdata(pdev, atdma);
1451
1452
1453 atdma->dma_desc_pool = dma_pool_create("at_hdmac_desc_pool",
1454 &pdev->dev, sizeof(struct at_desc),
1455 4 , 0);
1456 if (!atdma->dma_desc_pool) {
1457 dev_err(&pdev->dev, "No memory for descriptors dma pool\n");
1458 err = -ENOMEM;
1459 goto err_pool_create;
1460 }
1461
1462
1463 while (dma_readl(atdma, EBCISR))
1464 cpu_relax();
1465
1466
1467 INIT_LIST_HEAD(&atdma->dma_common.channels);
1468 for (i = 0; i < plat_dat->nr_channels; i++) {
1469 struct at_dma_chan *atchan = &atdma->chan[i];
1470
1471 atchan->mem_if = AT_DMA_MEM_IF;
1472 atchan->per_if = AT_DMA_PER_IF;
1473 atchan->chan_common.device = &atdma->dma_common;
1474 dma_cookie_init(&atchan->chan_common);
1475 list_add_tail(&atchan->chan_common.device_node,
1476 &atdma->dma_common.channels);
1477
1478 atchan->ch_regs = atdma->regs + ch_regs(i);
1479 spin_lock_init(&atchan->lock);
1480 atchan->mask = 1 << i;
1481
1482 INIT_LIST_HEAD(&atchan->active_list);
1483 INIT_LIST_HEAD(&atchan->queue);
1484 INIT_LIST_HEAD(&atchan->free_list);
1485
1486 tasklet_init(&atchan->tasklet, atc_tasklet,
1487 (unsigned long)atchan);
1488 atc_enable_chan_irq(atdma, i);
1489 }
1490
1491
1492 atdma->dma_common.device_alloc_chan_resources = atc_alloc_chan_resources;
1493 atdma->dma_common.device_free_chan_resources = atc_free_chan_resources;
1494 atdma->dma_common.device_tx_status = atc_tx_status;
1495 atdma->dma_common.device_issue_pending = atc_issue_pending;
1496 atdma->dma_common.dev = &pdev->dev;
1497
1498
1499 if (dma_has_cap(DMA_MEMCPY, atdma->dma_common.cap_mask))
1500 atdma->dma_common.device_prep_dma_memcpy = atc_prep_dma_memcpy;
1501
1502 if (dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask)) {
1503 atdma->dma_common.device_prep_slave_sg = atc_prep_slave_sg;
1504
1505 dma_cap_set(DMA_CYCLIC, atdma->dma_common.cap_mask);
1506 atdma->dma_common.device_prep_dma_cyclic = atc_prep_dma_cyclic;
1507 atdma->dma_common.device_control = atc_control;
1508 }
1509
1510 dma_writel(atdma, EN, AT_DMA_ENABLE);
1511
1512 dev_info(&pdev->dev, "Atmel AHB DMA Controller ( %s%s), %d channels\n",
1513 dma_has_cap(DMA_MEMCPY, atdma->dma_common.cap_mask) ? "cpy " : "",
1514 dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask) ? "slave " : "",
1515 plat_dat->nr_channels);
1516
1517 dma_async_device_register(&atdma->dma_common);
1518
1519
1520
1521
1522
1523
1524 if (pdev->dev.of_node) {
1525 err = of_dma_controller_register(pdev->dev.of_node,
1526 at_dma_xlate, atdma);
1527 if (err) {
1528 dev_err(&pdev->dev, "could not register of_dma_controller\n");
1529 goto err_of_dma_controller_register;
1530 }
1531 }
1532
1533 return 0;
1534
1535err_of_dma_controller_register:
1536 dma_async_device_unregister(&atdma->dma_common);
1537 dma_pool_destroy(atdma->dma_desc_pool);
1538err_pool_create:
1539 free_irq(platform_get_irq(pdev, 0), atdma);
1540err_irq:
1541 clk_disable_unprepare(atdma->clk);
1542err_clk_prepare:
1543 clk_put(atdma->clk);
1544err_clk:
1545 iounmap(atdma->regs);
1546 atdma->regs = NULL;
1547err_release_r:
1548 release_mem_region(io->start, size);
1549err_kfree:
1550 kfree(atdma);
1551 return err;
1552}
1553
1554static int at_dma_remove(struct platform_device *pdev)
1555{
1556 struct at_dma *atdma = platform_get_drvdata(pdev);
1557 struct dma_chan *chan, *_chan;
1558 struct resource *io;
1559
1560 at_dma_off(atdma);
1561 dma_async_device_unregister(&atdma->dma_common);
1562
1563 dma_pool_destroy(atdma->dma_desc_pool);
1564 free_irq(platform_get_irq(pdev, 0), atdma);
1565
1566 list_for_each_entry_safe(chan, _chan, &atdma->dma_common.channels,
1567 device_node) {
1568 struct at_dma_chan *atchan = to_at_dma_chan(chan);
1569
1570
1571 atc_disable_chan_irq(atdma, chan->chan_id);
1572
1573 tasklet_kill(&atchan->tasklet);
1574 list_del(&chan->device_node);
1575 }
1576
1577 clk_disable_unprepare(atdma->clk);
1578 clk_put(atdma->clk);
1579
1580 iounmap(atdma->regs);
1581 atdma->regs = NULL;
1582
1583 io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1584 release_mem_region(io->start, resource_size(io));
1585
1586 kfree(atdma);
1587
1588 return 0;
1589}
1590
1591static void at_dma_shutdown(struct platform_device *pdev)
1592{
1593 struct at_dma *atdma = platform_get_drvdata(pdev);
1594
1595 at_dma_off(platform_get_drvdata(pdev));
1596 clk_disable_unprepare(atdma->clk);
1597}
1598
1599static int at_dma_prepare(struct device *dev)
1600{
1601 struct platform_device *pdev = to_platform_device(dev);
1602 struct at_dma *atdma = platform_get_drvdata(pdev);
1603 struct dma_chan *chan, *_chan;
1604
1605 list_for_each_entry_safe(chan, _chan, &atdma->dma_common.channels,
1606 device_node) {
1607 struct at_dma_chan *atchan = to_at_dma_chan(chan);
1608
1609 if (atc_chan_is_enabled(atchan) && !atc_chan_is_cyclic(atchan))
1610 return -EAGAIN;
1611 }
1612 return 0;
1613}
1614
1615static void atc_suspend_cyclic(struct at_dma_chan *atchan)
1616{
1617 struct dma_chan *chan = &atchan->chan_common;
1618
1619
1620
1621 if (!atc_chan_is_paused(atchan)) {
1622 dev_warn(chan2dev(chan),
1623 "cyclic channel not paused, should be done by channel user\n");
1624 atc_control(chan, DMA_PAUSE, 0);
1625 }
1626
1627
1628
1629 atchan->save_dscr = channel_readl(atchan, DSCR);
1630
1631 vdbg_dump_regs(atchan);
1632}
1633
1634static int at_dma_suspend_noirq(struct device *dev)
1635{
1636 struct platform_device *pdev = to_platform_device(dev);
1637 struct at_dma *atdma = platform_get_drvdata(pdev);
1638 struct dma_chan *chan, *_chan;
1639
1640
1641 list_for_each_entry_safe(chan, _chan, &atdma->dma_common.channels,
1642 device_node) {
1643 struct at_dma_chan *atchan = to_at_dma_chan(chan);
1644
1645 if (atc_chan_is_cyclic(atchan))
1646 atc_suspend_cyclic(atchan);
1647 atchan->save_cfg = channel_readl(atchan, CFG);
1648 }
1649 atdma->save_imr = dma_readl(atdma, EBCIMR);
1650
1651
1652 at_dma_off(atdma);
1653 clk_disable_unprepare(atdma->clk);
1654 return 0;
1655}
1656
1657static void atc_resume_cyclic(struct at_dma_chan *atchan)
1658{
1659 struct at_dma *atdma = to_at_dma(atchan->chan_common.device);
1660
1661
1662
1663 channel_writel(atchan, SADDR, 0);
1664 channel_writel(atchan, DADDR, 0);
1665 channel_writel(atchan, CTRLA, 0);
1666 channel_writel(atchan, CTRLB, 0);
1667 channel_writel(atchan, DSCR, atchan->save_dscr);
1668 dma_writel(atdma, CHER, atchan->mask);
1669
1670
1671
1672
1673 vdbg_dump_regs(atchan);
1674}
1675
1676static int at_dma_resume_noirq(struct device *dev)
1677{
1678 struct platform_device *pdev = to_platform_device(dev);
1679 struct at_dma *atdma = platform_get_drvdata(pdev);
1680 struct dma_chan *chan, *_chan;
1681
1682
1683 clk_prepare_enable(atdma->clk);
1684 dma_writel(atdma, EN, AT_DMA_ENABLE);
1685
1686
1687 while (dma_readl(atdma, EBCISR))
1688 cpu_relax();
1689
1690
1691 dma_writel(atdma, EBCIER, atdma->save_imr);
1692 list_for_each_entry_safe(chan, _chan, &atdma->dma_common.channels,
1693 device_node) {
1694 struct at_dma_chan *atchan = to_at_dma_chan(chan);
1695
1696 channel_writel(atchan, CFG, atchan->save_cfg);
1697 if (atc_chan_is_cyclic(atchan))
1698 atc_resume_cyclic(atchan);
1699 }
1700 return 0;
1701}
1702
1703static const struct dev_pm_ops at_dma_dev_pm_ops = {
1704 .prepare = at_dma_prepare,
1705 .suspend_noirq = at_dma_suspend_noirq,
1706 .resume_noirq = at_dma_resume_noirq,
1707};
1708
1709static struct platform_driver at_dma_driver = {
1710 .remove = at_dma_remove,
1711 .shutdown = at_dma_shutdown,
1712 .id_table = atdma_devtypes,
1713 .driver = {
1714 .name = "at_hdmac",
1715 .pm = &at_dma_dev_pm_ops,
1716 .of_match_table = of_match_ptr(atmel_dma_dt_ids),
1717 },
1718};
1719
1720static int __init at_dma_init(void)
1721{
1722 return platform_driver_probe(&at_dma_driver, at_dma_probe);
1723}
1724subsys_initcall(at_dma_init);
1725
1726static void __exit at_dma_exit(void)
1727{
1728 platform_driver_unregister(&at_dma_driver);
1729}
1730module_exit(at_dma_exit);
1731
1732MODULE_DESCRIPTION("Atmel AHB DMA Controller driver");
1733MODULE_AUTHOR("Nicolas Ferre <nicolas.ferre@atmel.com>");
1734MODULE_LICENSE("GPL");
1735MODULE_ALIAS("platform:at_hdmac");
1736