1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25#include <linux/module.h>
26#include <linux/init.h>
27#include <linux/kernel.h>
28#include <linux/interrupt.h>
29#include <linux/errno.h>
30#include <linux/clk.h>
31#include <linux/scatterlist.h>
32#include <linux/io.h>
33
34#include <asm/system.h>
35#include <asm/irq.h>
36#include <mach/hardware.h>
37#include <mach/dma-mx1-mx2.h>
38
39#define DMA_DCR 0x00
40#define DMA_DISR 0x04
41#define DMA_DIMR 0x08
42#define DMA_DBTOSR 0x0c
43#define DMA_DRTOSR 0x10
44#define DMA_DSESR 0x14
45#define DMA_DBOSR 0x18
46#define DMA_DBTOCR 0x1c
47#define DMA_WSRA 0x40
48#define DMA_XSRA 0x44
49#define DMA_YSRA 0x48
50#define DMA_WSRB 0x4c
51#define DMA_XSRB 0x50
52#define DMA_YSRB 0x54
53#define DMA_SAR(x) (0x80 + ((x) << 6))
54#define DMA_DAR(x) (0x84 + ((x) << 6))
55#define DMA_CNTR(x) (0x88 + ((x) << 6))
56#define DMA_CCR(x) (0x8c + ((x) << 6))
57#define DMA_RSSR(x) (0x90 + ((x) << 6))
58#define DMA_BLR(x) (0x94 + ((x) << 6))
59#define DMA_RTOR(x) (0x98 + ((x) << 6))
60#define DMA_BUCR(x) (0x98 + ((x) << 6))
61#define DMA_CCNR(x) (0x9C + ((x) << 6))
62
63#define DCR_DRST (1<<1)
64#define DCR_DEN (1<<0)
65#define DBTOCR_EN (1<<15)
66#define DBTOCR_CNT(x) ((x) & 0x7fff)
67#define CNTR_CNT(x) ((x) & 0xffffff)
68#define CCR_ACRPT (1<<14)
69#define CCR_DMOD_LINEAR (0x0 << 12)
70#define CCR_DMOD_2D (0x1 << 12)
71#define CCR_DMOD_FIFO (0x2 << 12)
72#define CCR_DMOD_EOBFIFO (0x3 << 12)
73#define CCR_SMOD_LINEAR (0x0 << 10)
74#define CCR_SMOD_2D (0x1 << 10)
75#define CCR_SMOD_FIFO (0x2 << 10)
76#define CCR_SMOD_EOBFIFO (0x3 << 10)
77#define CCR_MDIR_DEC (1<<9)
78#define CCR_MSEL_B (1<<8)
79#define CCR_DSIZ_32 (0x0 << 6)
80#define CCR_DSIZ_8 (0x1 << 6)
81#define CCR_DSIZ_16 (0x2 << 6)
82#define CCR_SSIZ_32 (0x0 << 4)
83#define CCR_SSIZ_8 (0x1 << 4)
84#define CCR_SSIZ_16 (0x2 << 4)
85#define CCR_REN (1<<3)
86#define CCR_RPT (1<<2)
87#define CCR_FRC (1<<1)
88#define CCR_CEN (1<<0)
89#define RTOR_EN (1<<15)
90#define RTOR_CLK (1<<14)
91#define RTOR_PSC (1<<13)
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110struct imx_dma_channel {
111 const char *name;
112 void (*irq_handler) (int, void *);
113 void (*err_handler) (int, void *, int errcode);
114 void (*prog_handler) (int, void *, struct scatterlist *);
115 void *data;
116 unsigned int dma_mode;
117 struct scatterlist *sg;
118 unsigned int resbytes;
119 int dma_num;
120
121 int in_use;
122
123 u32 ccr_from_device;
124 u32 ccr_to_device;
125
126 struct timer_list watchdog;
127
128 int hw_chaining;
129};
130
131static struct imx_dma_channel imx_dma_channels[IMX_DMA_CHANNELS];
132
133static struct clk *dma_clk;
134
135static int imx_dma_hw_chain(struct imx_dma_channel *imxdma)
136{
137 if (cpu_is_mx27())
138 return imxdma->hw_chaining;
139 else
140 return 0;
141}
142
143
144
145
146
147static inline int imx_dma_sg_next(int channel, struct scatterlist *sg)
148{
149 struct imx_dma_channel *imxdma = &imx_dma_channels[channel];
150 unsigned long now;
151
152 if (!imxdma->name) {
153 printk(KERN_CRIT "%s: called for not allocated channel %d\n",
154 __func__, channel);
155 return 0;
156 }
157
158 now = min(imxdma->resbytes, sg->length);
159 imxdma->resbytes -= now;
160
161 if ((imxdma->dma_mode & DMA_MODE_MASK) == DMA_MODE_READ)
162 __raw_writel(sg->dma_address, DMA_BASE + DMA_DAR(channel));
163 else
164 __raw_writel(sg->dma_address, DMA_BASE + DMA_SAR(channel));
165
166 __raw_writel(now, DMA_BASE + DMA_CNTR(channel));
167
168 pr_debug("imxdma%d: next sg chunk dst 0x%08x, src 0x%08x, "
169 "size 0x%08x\n", channel,
170 __raw_readl(DMA_BASE + DMA_DAR(channel)),
171 __raw_readl(DMA_BASE + DMA_SAR(channel)),
172 __raw_readl(DMA_BASE + DMA_CNTR(channel)));
173
174 return now;
175}
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192int
193imx_dma_setup_single(int channel, dma_addr_t dma_address,
194 unsigned int dma_length, unsigned int dev_addr,
195 unsigned int dmamode)
196{
197 struct imx_dma_channel *imxdma = &imx_dma_channels[channel];
198
199 imxdma->sg = NULL;
200 imxdma->dma_mode = dmamode;
201
202 if (!dma_address) {
203 printk(KERN_ERR "imxdma%d: imx_dma_setup_single null address\n",
204 channel);
205 return -EINVAL;
206 }
207
208 if (!dma_length) {
209 printk(KERN_ERR "imxdma%d: imx_dma_setup_single zero length\n",
210 channel);
211 return -EINVAL;
212 }
213
214 if ((dmamode & DMA_MODE_MASK) == DMA_MODE_READ) {
215 pr_debug("imxdma%d: %s dma_addressg=0x%08x dma_length=%d "
216 "dev_addr=0x%08x for read\n",
217 channel, __func__, (unsigned int)dma_address,
218 dma_length, dev_addr);
219
220 __raw_writel(dev_addr, DMA_BASE + DMA_SAR(channel));
221 __raw_writel(dma_address, DMA_BASE + DMA_DAR(channel));
222 __raw_writel(imxdma->ccr_from_device,
223 DMA_BASE + DMA_CCR(channel));
224 } else if ((dmamode & DMA_MODE_MASK) == DMA_MODE_WRITE) {
225 pr_debug("imxdma%d: %s dma_addressg=0x%08x dma_length=%d "
226 "dev_addr=0x%08x for write\n",
227 channel, __func__, (unsigned int)dma_address,
228 dma_length, dev_addr);
229
230 __raw_writel(dma_address, DMA_BASE + DMA_SAR(channel));
231 __raw_writel(dev_addr, DMA_BASE + DMA_DAR(channel));
232 __raw_writel(imxdma->ccr_to_device,
233 DMA_BASE + DMA_CCR(channel));
234 } else {
235 printk(KERN_ERR "imxdma%d: imx_dma_setup_single bad dmamode\n",
236 channel);
237 return -EINVAL;
238 }
239
240 __raw_writel(dma_length, DMA_BASE + DMA_CNTR(channel));
241
242 return 0;
243}
244EXPORT_SYMBOL(imx_dma_setup_single);
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286int
287imx_dma_setup_sg(int channel,
288 struct scatterlist *sg, unsigned int sgcount,
289 unsigned int dma_length, unsigned int dev_addr,
290 unsigned int dmamode)
291{
292 struct imx_dma_channel *imxdma = &imx_dma_channels[channel];
293
294 if (imxdma->in_use)
295 return -EBUSY;
296
297 imxdma->sg = sg;
298 imxdma->dma_mode = dmamode;
299 imxdma->resbytes = dma_length;
300
301 if (!sg || !sgcount) {
302 printk(KERN_ERR "imxdma%d: imx_dma_setup_sg epty sg list\n",
303 channel);
304 return -EINVAL;
305 }
306
307 if (!sg->length) {
308 printk(KERN_ERR "imxdma%d: imx_dma_setup_sg zero length\n",
309 channel);
310 return -EINVAL;
311 }
312
313 if ((dmamode & DMA_MODE_MASK) == DMA_MODE_READ) {
314 pr_debug("imxdma%d: %s sg=%p sgcount=%d total length=%d "
315 "dev_addr=0x%08x for read\n",
316 channel, __func__, sg, sgcount, dma_length, dev_addr);
317
318 __raw_writel(dev_addr, DMA_BASE + DMA_SAR(channel));
319 __raw_writel(imxdma->ccr_from_device,
320 DMA_BASE + DMA_CCR(channel));
321 } else if ((dmamode & DMA_MODE_MASK) == DMA_MODE_WRITE) {
322 pr_debug("imxdma%d: %s sg=%p sgcount=%d total length=%d "
323 "dev_addr=0x%08x for write\n",
324 channel, __func__, sg, sgcount, dma_length, dev_addr);
325
326 __raw_writel(dev_addr, DMA_BASE + DMA_DAR(channel));
327 __raw_writel(imxdma->ccr_to_device,
328 DMA_BASE + DMA_CCR(channel));
329 } else {
330 printk(KERN_ERR "imxdma%d: imx_dma_setup_sg bad dmamode\n",
331 channel);
332 return -EINVAL;
333 }
334
335 imx_dma_sg_next(channel, sg);
336
337 return 0;
338}
339EXPORT_SYMBOL(imx_dma_setup_sg);
340
341int
342imx_dma_config_channel(int channel, unsigned int config_port,
343 unsigned int config_mem, unsigned int dmareq, int hw_chaining)
344{
345 struct imx_dma_channel *imxdma = &imx_dma_channels[channel];
346 u32 dreq = 0;
347
348 imxdma->hw_chaining = 0;
349
350 if (hw_chaining) {
351 imxdma->hw_chaining = 1;
352 if (!imx_dma_hw_chain(imxdma))
353 return -EINVAL;
354 }
355
356 if (dmareq)
357 dreq = CCR_REN;
358
359 imxdma->ccr_from_device = config_port | (config_mem << 2) | dreq;
360 imxdma->ccr_to_device = config_mem | (config_port << 2) | dreq;
361
362 __raw_writel(dmareq, DMA_BASE + DMA_RSSR(channel));
363
364 return 0;
365}
366EXPORT_SYMBOL(imx_dma_config_channel);
367
368void imx_dma_config_burstlen(int channel, unsigned int burstlen)
369{
370 __raw_writel(burstlen, DMA_BASE + DMA_BLR(channel));
371}
372EXPORT_SYMBOL(imx_dma_config_burstlen);
373
374
375
376
377
378
379
380
381
382
383
384int
385imx_dma_setup_handlers(int channel,
386 void (*irq_handler) (int, void *),
387 void (*err_handler) (int, void *, int),
388 void *data)
389{
390 struct imx_dma_channel *imxdma = &imx_dma_channels[channel];
391 unsigned long flags;
392
393 if (!imxdma->name) {
394 printk(KERN_CRIT "%s: called for not allocated channel %d\n",
395 __func__, channel);
396 return -ENODEV;
397 }
398
399 local_irq_save(flags);
400 __raw_writel(1 << channel, DMA_BASE + DMA_DISR);
401 imxdma->irq_handler = irq_handler;
402 imxdma->err_handler = err_handler;
403 imxdma->data = data;
404 local_irq_restore(flags);
405 return 0;
406}
407EXPORT_SYMBOL(imx_dma_setup_handlers);
408
409
410
411
412
413
414
415int
416imx_dma_setup_progression_handler(int channel,
417 void (*prog_handler) (int, void*, struct scatterlist*))
418{
419 struct imx_dma_channel *imxdma = &imx_dma_channels[channel];
420 unsigned long flags;
421
422 if (!imxdma->name) {
423 printk(KERN_CRIT "%s: called for not allocated channel %d\n",
424 __func__, channel);
425 return -ENODEV;
426 }
427
428 local_irq_save(flags);
429 imxdma->prog_handler = prog_handler;
430 local_irq_restore(flags);
431 return 0;
432}
433EXPORT_SYMBOL(imx_dma_setup_progression_handler);
434
435
436
437
438
439
440
441
442
443
444
445
446void imx_dma_enable(int channel)
447{
448 struct imx_dma_channel *imxdma = &imx_dma_channels[channel];
449 unsigned long flags;
450
451 pr_debug("imxdma%d: imx_dma_enable\n", channel);
452
453 if (!imxdma->name) {
454 printk(KERN_CRIT "%s: called for not allocated channel %d\n",
455 __func__, channel);
456 return;
457 }
458
459 if (imxdma->in_use)
460 return;
461
462 local_irq_save(flags);
463
464 __raw_writel(1 << channel, DMA_BASE + DMA_DISR);
465 __raw_writel(__raw_readl(DMA_BASE + DMA_DIMR) & ~(1 << channel),
466 DMA_BASE + DMA_DIMR);
467 __raw_writel(__raw_readl(DMA_BASE + DMA_CCR(channel)) | CCR_CEN |
468 CCR_ACRPT,
469 DMA_BASE + DMA_CCR(channel));
470
471#ifdef CONFIG_ARCH_MX2
472 if (imxdma->sg && imx_dma_hw_chain(imxdma)) {
473 imxdma->sg = sg_next(imxdma->sg);
474 if (imxdma->sg) {
475 u32 tmp;
476 imx_dma_sg_next(channel, imxdma->sg);
477 tmp = __raw_readl(DMA_BASE + DMA_CCR(channel));
478 __raw_writel(tmp | CCR_RPT | CCR_ACRPT,
479 DMA_BASE + DMA_CCR(channel));
480 }
481 }
482#endif
483 imxdma->in_use = 1;
484
485 local_irq_restore(flags);
486}
487EXPORT_SYMBOL(imx_dma_enable);
488
489
490
491
492
493void imx_dma_disable(int channel)
494{
495 struct imx_dma_channel *imxdma = &imx_dma_channels[channel];
496 unsigned long flags;
497
498 pr_debug("imxdma%d: imx_dma_disable\n", channel);
499
500 if (imx_dma_hw_chain(imxdma))
501 del_timer(&imxdma->watchdog);
502
503 local_irq_save(flags);
504 __raw_writel(__raw_readl(DMA_BASE + DMA_DIMR) | (1 << channel),
505 DMA_BASE + DMA_DIMR);
506 __raw_writel(__raw_readl(DMA_BASE + DMA_CCR(channel)) & ~CCR_CEN,
507 DMA_BASE + DMA_CCR(channel));
508 __raw_writel(1 << channel, DMA_BASE + DMA_DISR);
509 imxdma->in_use = 0;
510 local_irq_restore(flags);
511}
512EXPORT_SYMBOL(imx_dma_disable);
513
514#ifdef CONFIG_ARCH_MX2
515static void imx_dma_watchdog(unsigned long chno)
516{
517 struct imx_dma_channel *imxdma = &imx_dma_channels[chno];
518
519 __raw_writel(0, DMA_BASE + DMA_CCR(chno));
520 imxdma->in_use = 0;
521 imxdma->sg = NULL;
522
523 if (imxdma->err_handler)
524 imxdma->err_handler(chno, imxdma->data, IMX_DMA_ERR_TIMEOUT);
525}
526#endif
527
528static irqreturn_t dma_err_handler(int irq, void *dev_id)
529{
530 int i, disr;
531 struct imx_dma_channel *imxdma;
532 unsigned int err_mask;
533 int errcode;
534
535 disr = __raw_readl(DMA_BASE + DMA_DISR);
536
537 err_mask = __raw_readl(DMA_BASE + DMA_DBTOSR) |
538 __raw_readl(DMA_BASE + DMA_DRTOSR) |
539 __raw_readl(DMA_BASE + DMA_DSESR) |
540 __raw_readl(DMA_BASE + DMA_DBOSR);
541
542 if (!err_mask)
543 return IRQ_HANDLED;
544
545 __raw_writel(disr & err_mask, DMA_BASE + DMA_DISR);
546
547 for (i = 0; i < IMX_DMA_CHANNELS; i++) {
548 if (!(err_mask & (1 << i)))
549 continue;
550 imxdma = &imx_dma_channels[i];
551 errcode = 0;
552
553 if (__raw_readl(DMA_BASE + DMA_DBTOSR) & (1 << i)) {
554 __raw_writel(1 << i, DMA_BASE + DMA_DBTOSR);
555 errcode |= IMX_DMA_ERR_BURST;
556 }
557 if (__raw_readl(DMA_BASE + DMA_DRTOSR) & (1 << i)) {
558 __raw_writel(1 << i, DMA_BASE + DMA_DRTOSR);
559 errcode |= IMX_DMA_ERR_REQUEST;
560 }
561 if (__raw_readl(DMA_BASE + DMA_DSESR) & (1 << i)) {
562 __raw_writel(1 << i, DMA_BASE + DMA_DSESR);
563 errcode |= IMX_DMA_ERR_TRANSFER;
564 }
565 if (__raw_readl(DMA_BASE + DMA_DBOSR) & (1 << i)) {
566 __raw_writel(1 << i, DMA_BASE + DMA_DBOSR);
567 errcode |= IMX_DMA_ERR_BUFFER;
568 }
569 if (imxdma->name && imxdma->err_handler) {
570 imxdma->err_handler(i, imxdma->data, errcode);
571 continue;
572 }
573
574 imx_dma_channels[i].sg = NULL;
575
576 printk(KERN_WARNING
577 "DMA timeout on channel %d (%s) -%s%s%s%s\n",
578 i, imxdma->name,
579 errcode & IMX_DMA_ERR_BURST ? " burst" : "",
580 errcode & IMX_DMA_ERR_REQUEST ? " request" : "",
581 errcode & IMX_DMA_ERR_TRANSFER ? " transfer" : "",
582 errcode & IMX_DMA_ERR_BUFFER ? " buffer" : "");
583 }
584 return IRQ_HANDLED;
585}
586
587static void dma_irq_handle_channel(int chno)
588{
589 struct imx_dma_channel *imxdma = &imx_dma_channels[chno];
590
591 if (!imxdma->name) {
592
593
594
595
596 printk(KERN_WARNING
597 "spurious IRQ for DMA channel %d\n", chno);
598 return;
599 }
600
601 if (imxdma->sg) {
602 u32 tmp;
603 struct scatterlist *current_sg = imxdma->sg;
604 imxdma->sg = sg_next(imxdma->sg);
605
606 if (imxdma->sg) {
607 imx_dma_sg_next(chno, imxdma->sg);
608
609 tmp = __raw_readl(DMA_BASE + DMA_CCR(chno));
610
611 if (imx_dma_hw_chain(imxdma)) {
612
613
614
615 mod_timer(&imxdma->watchdog,
616 jiffies + msecs_to_jiffies(500));
617
618 tmp |= CCR_CEN | CCR_RPT | CCR_ACRPT;
619 __raw_writel(tmp, DMA_BASE +
620 DMA_CCR(chno));
621 } else {
622 __raw_writel(tmp & ~CCR_CEN, DMA_BASE +
623 DMA_CCR(chno));
624 tmp |= CCR_CEN;
625 }
626
627 __raw_writel(tmp, DMA_BASE + DMA_CCR(chno));
628
629 if (imxdma->prog_handler)
630 imxdma->prog_handler(chno, imxdma->data,
631 current_sg);
632
633 return;
634 }
635
636 if (imx_dma_hw_chain(imxdma)) {
637 del_timer(&imxdma->watchdog);
638 return;
639 }
640 }
641
642 __raw_writel(0, DMA_BASE + DMA_CCR(chno));
643 imxdma->in_use = 0;
644 if (imxdma->irq_handler)
645 imxdma->irq_handler(chno, imxdma->data);
646}
647
648static irqreturn_t dma_irq_handler(int irq, void *dev_id)
649{
650 int i, disr;
651
652#ifdef CONFIG_ARCH_MX2
653 dma_err_handler(irq, dev_id);
654#endif
655
656 disr = __raw_readl(DMA_BASE + DMA_DISR);
657
658 pr_debug("imxdma: dma_irq_handler called, disr=0x%08x\n",
659 disr);
660
661 __raw_writel(disr, DMA_BASE + DMA_DISR);
662 for (i = 0; i < IMX_DMA_CHANNELS; i++) {
663 if (disr & (1 << i))
664 dma_irq_handle_channel(i);
665 }
666
667 return IRQ_HANDLED;
668}
669
670
671
672
673
674
675int imx_dma_request(int channel, const char *name)
676{
677 struct imx_dma_channel *imxdma = &imx_dma_channels[channel];
678 unsigned long flags;
679 int ret = 0;
680
681
682 if (!name)
683 return -EINVAL;
684
685 if (channel >= IMX_DMA_CHANNELS) {
686 printk(KERN_CRIT "%s: called for non-existed channel %d\n",
687 __func__, channel);
688 return -EINVAL;
689 }
690
691 local_irq_save(flags);
692 if (imxdma->name) {
693 local_irq_restore(flags);
694 return -EBUSY;
695 }
696 memset(imxdma, 0, sizeof(imxdma));
697 imxdma->name = name;
698 local_irq_restore(flags);
699
700#ifdef CONFIG_ARCH_MX2
701 ret = request_irq(MXC_INT_DMACH0 + channel, dma_irq_handler, 0, "DMA",
702 NULL);
703 if (ret) {
704 imxdma->name = NULL;
705 printk(KERN_CRIT "Can't register IRQ %d for DMA channel %d\n",
706 MXC_INT_DMACH0 + channel, channel);
707 return ret;
708 }
709 init_timer(&imxdma->watchdog);
710 imxdma->watchdog.function = &imx_dma_watchdog;
711 imxdma->watchdog.data = channel;
712#endif
713
714 return ret;
715}
716EXPORT_SYMBOL(imx_dma_request);
717
718
719
720
721
722void imx_dma_free(int channel)
723{
724 unsigned long flags;
725 struct imx_dma_channel *imxdma = &imx_dma_channels[channel];
726
727 if (!imxdma->name) {
728 printk(KERN_CRIT
729 "%s: trying to free free channel %d\n",
730 __func__, channel);
731 return;
732 }
733
734 local_irq_save(flags);
735
736 imx_dma_disable(channel);
737 imxdma->name = NULL;
738
739#ifdef CONFIG_ARCH_MX2
740 free_irq(MXC_INT_DMACH0 + channel, NULL);
741#endif
742
743 local_irq_restore(flags);
744}
745EXPORT_SYMBOL(imx_dma_free);
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761int imx_dma_request_by_prio(const char *name, enum imx_dma_prio prio)
762{
763 int i;
764 int best;
765
766 switch (prio) {
767 case (DMA_PRIO_HIGH):
768 best = 8;
769 break;
770 case (DMA_PRIO_MEDIUM):
771 best = 4;
772 break;
773 case (DMA_PRIO_LOW):
774 default:
775 best = 0;
776 break;
777 }
778
779 for (i = best; i < IMX_DMA_CHANNELS; i++)
780 if (!imx_dma_request(i, name))
781 return i;
782
783 for (i = best - 1; i >= 0; i--)
784 if (!imx_dma_request(i, name))
785 return i;
786
787 printk(KERN_ERR "%s: no free DMA channel found\n", __func__);
788
789 return -ENODEV;
790}
791EXPORT_SYMBOL(imx_dma_request_by_prio);
792
793static int __init imx_dma_init(void)
794{
795 int ret = 0;
796 int i;
797
798 dma_clk = clk_get(NULL, "dma");
799 clk_enable(dma_clk);
800
801
802 __raw_writel(DCR_DRST, DMA_BASE + DMA_DCR);
803
804#ifdef CONFIG_ARCH_MX1
805 ret = request_irq(DMA_INT, dma_irq_handler, 0, "DMA", NULL);
806 if (ret) {
807 printk(KERN_CRIT "Wow! Can't register IRQ for DMA\n");
808 return ret;
809 }
810
811 ret = request_irq(DMA_ERR, dma_err_handler, 0, "DMA", NULL);
812 if (ret) {
813 printk(KERN_CRIT "Wow! Can't register ERRIRQ for DMA\n");
814 free_irq(DMA_INT, NULL);
815 return ret;
816 }
817#endif
818
819 __raw_writel(DCR_DEN, DMA_BASE + DMA_DCR);
820
821
822 __raw_writel((1 << IMX_DMA_CHANNELS) - 1, DMA_BASE + DMA_DISR);
823
824
825 __raw_writel((1 << IMX_DMA_CHANNELS) - 1, DMA_BASE + DMA_DIMR);
826
827 for (i = 0; i < IMX_DMA_CHANNELS; i++) {
828 imx_dma_channels[i].sg = NULL;
829 imx_dma_channels[i].dma_num = i;
830 }
831
832 return ret;
833}
834
835arch_initcall(imx_dma_init);
836