1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25#include <linux/module.h>
26#include <linux/init.h>
27#include <linux/kernel.h>
28#include <linux/interrupt.h>
29#include <linux/err.h>
30#include <linux/errno.h>
31#include <linux/clk.h>
32#include <linux/scatterlist.h>
33#include <linux/io.h>
34
35#include <asm/system.h>
36#include <asm/irq.h>
37#include <mach/hardware.h>
38#include <mach/dma-v1.h>
39
40#define DMA_DCR 0x00
41#define DMA_DISR 0x04
42#define DMA_DIMR 0x08
43#define DMA_DBTOSR 0x0c
44#define DMA_DRTOSR 0x10
45#define DMA_DSESR 0x14
46#define DMA_DBOSR 0x18
47#define DMA_DBTOCR 0x1c
48#define DMA_WSRA 0x40
49#define DMA_XSRA 0x44
50#define DMA_YSRA 0x48
51#define DMA_WSRB 0x4c
52#define DMA_XSRB 0x50
53#define DMA_YSRB 0x54
54#define DMA_SAR(x) (0x80 + ((x) << 6))
55#define DMA_DAR(x) (0x84 + ((x) << 6))
56#define DMA_CNTR(x) (0x88 + ((x) << 6))
57#define DMA_CCR(x) (0x8c + ((x) << 6))
58#define DMA_RSSR(x) (0x90 + ((x) << 6))
59#define DMA_BLR(x) (0x94 + ((x) << 6))
60#define DMA_RTOR(x) (0x98 + ((x) << 6))
61#define DMA_BUCR(x) (0x98 + ((x) << 6))
62#define DMA_CCNR(x) (0x9C + ((x) << 6))
63
64#define DCR_DRST (1<<1)
65#define DCR_DEN (1<<0)
66#define DBTOCR_EN (1<<15)
67#define DBTOCR_CNT(x) ((x) & 0x7fff)
68#define CNTR_CNT(x) ((x) & 0xffffff)
69#define CCR_ACRPT (1<<14)
70#define CCR_DMOD_LINEAR (0x0 << 12)
71#define CCR_DMOD_2D (0x1 << 12)
72#define CCR_DMOD_FIFO (0x2 << 12)
73#define CCR_DMOD_EOBFIFO (0x3 << 12)
74#define CCR_SMOD_LINEAR (0x0 << 10)
75#define CCR_SMOD_2D (0x1 << 10)
76#define CCR_SMOD_FIFO (0x2 << 10)
77#define CCR_SMOD_EOBFIFO (0x3 << 10)
78#define CCR_MDIR_DEC (1<<9)
79#define CCR_MSEL_B (1<<8)
80#define CCR_DSIZ_32 (0x0 << 6)
81#define CCR_DSIZ_8 (0x1 << 6)
82#define CCR_DSIZ_16 (0x2 << 6)
83#define CCR_SSIZ_32 (0x0 << 4)
84#define CCR_SSIZ_8 (0x1 << 4)
85#define CCR_SSIZ_16 (0x2 << 4)
86#define CCR_REN (1<<3)
87#define CCR_RPT (1<<2)
88#define CCR_FRC (1<<1)
89#define CCR_CEN (1<<0)
90#define RTOR_EN (1<<15)
91#define RTOR_CLK (1<<14)
92#define RTOR_PSC (1<<13)
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111struct imx_dma_channel {
112 const char *name;
113 void (*irq_handler) (int, void *);
114 void (*err_handler) (int, void *, int errcode);
115 void (*prog_handler) (int, void *, struct scatterlist *);
116 void *data;
117 unsigned int dma_mode;
118 struct scatterlist *sg;
119 unsigned int resbytes;
120 int dma_num;
121
122 int in_use;
123
124 u32 ccr_from_device;
125 u32 ccr_to_device;
126
127 struct timer_list watchdog;
128
129 int hw_chaining;
130};
131
132static void __iomem *imx_dmav1_baseaddr;
133
134static void imx_dmav1_writel(unsigned val, unsigned offset)
135{
136 __raw_writel(val, imx_dmav1_baseaddr + offset);
137}
138
139static unsigned imx_dmav1_readl(unsigned offset)
140{
141 return __raw_readl(imx_dmav1_baseaddr + offset);
142}
143
144static struct imx_dma_channel imx_dma_channels[IMX_DMA_CHANNELS];
145
146static struct clk *dma_clk;
147
148static int imx_dma_hw_chain(struct imx_dma_channel *imxdma)
149{
150 if (cpu_is_mx27())
151 return imxdma->hw_chaining;
152 else
153 return 0;
154}
155
156
157
158
159static inline int imx_dma_sg_next(int channel, struct scatterlist *sg)
160{
161 struct imx_dma_channel *imxdma = &imx_dma_channels[channel];
162 unsigned long now;
163
164 if (!imxdma->name) {
165 printk(KERN_CRIT "%s: called for not allocated channel %d\n",
166 __func__, channel);
167 return 0;
168 }
169
170 now = min(imxdma->resbytes, sg->length);
171 if (imxdma->resbytes != IMX_DMA_LENGTH_LOOP)
172 imxdma->resbytes -= now;
173
174 if ((imxdma->dma_mode & DMA_MODE_MASK) == DMA_MODE_READ)
175 imx_dmav1_writel(sg->dma_address, DMA_DAR(channel));
176 else
177 imx_dmav1_writel(sg->dma_address, DMA_SAR(channel));
178
179 imx_dmav1_writel(now, DMA_CNTR(channel));
180
181 pr_debug("imxdma%d: next sg chunk dst 0x%08x, src 0x%08x, "
182 "size 0x%08x\n", channel,
183 imx_dmav1_readl(DMA_DAR(channel)),
184 imx_dmav1_readl(DMA_SAR(channel)),
185 imx_dmav1_readl(DMA_CNTR(channel)));
186
187 return now;
188}
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205int
206imx_dma_setup_single(int channel, dma_addr_t dma_address,
207 unsigned int dma_length, unsigned int dev_addr,
208 unsigned int dmamode)
209{
210 struct imx_dma_channel *imxdma = &imx_dma_channels[channel];
211
212 imxdma->sg = NULL;
213 imxdma->dma_mode = dmamode;
214
215 if (!dma_address) {
216 printk(KERN_ERR "imxdma%d: imx_dma_setup_single null address\n",
217 channel);
218 return -EINVAL;
219 }
220
221 if (!dma_length) {
222 printk(KERN_ERR "imxdma%d: imx_dma_setup_single zero length\n",
223 channel);
224 return -EINVAL;
225 }
226
227 if ((dmamode & DMA_MODE_MASK) == DMA_MODE_READ) {
228 pr_debug("imxdma%d: %s dma_addressg=0x%08x dma_length=%d "
229 "dev_addr=0x%08x for read\n",
230 channel, __func__, (unsigned int)dma_address,
231 dma_length, dev_addr);
232
233 imx_dmav1_writel(dev_addr, DMA_SAR(channel));
234 imx_dmav1_writel(dma_address, DMA_DAR(channel));
235 imx_dmav1_writel(imxdma->ccr_from_device, DMA_CCR(channel));
236 } else if ((dmamode & DMA_MODE_MASK) == DMA_MODE_WRITE) {
237 pr_debug("imxdma%d: %s dma_addressg=0x%08x dma_length=%d "
238 "dev_addr=0x%08x for write\n",
239 channel, __func__, (unsigned int)dma_address,
240 dma_length, dev_addr);
241
242 imx_dmav1_writel(dma_address, DMA_SAR(channel));
243 imx_dmav1_writel(dev_addr, DMA_DAR(channel));
244 imx_dmav1_writel(imxdma->ccr_to_device,
245 DMA_CCR(channel));
246 } else {
247 printk(KERN_ERR "imxdma%d: imx_dma_setup_single bad dmamode\n",
248 channel);
249 return -EINVAL;
250 }
251
252 imx_dmav1_writel(dma_length, DMA_CNTR(channel));
253
254 return 0;
255}
256EXPORT_SYMBOL(imx_dma_setup_single);
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298int
299imx_dma_setup_sg(int channel,
300 struct scatterlist *sg, unsigned int sgcount,
301 unsigned int dma_length, unsigned int dev_addr,
302 unsigned int dmamode)
303{
304 struct imx_dma_channel *imxdma = &imx_dma_channels[channel];
305
306 if (imxdma->in_use)
307 return -EBUSY;
308
309 imxdma->sg = sg;
310 imxdma->dma_mode = dmamode;
311 imxdma->resbytes = dma_length;
312
313 if (!sg || !sgcount) {
314 printk(KERN_ERR "imxdma%d: imx_dma_setup_sg empty sg list\n",
315 channel);
316 return -EINVAL;
317 }
318
319 if (!sg->length) {
320 printk(KERN_ERR "imxdma%d: imx_dma_setup_sg zero length\n",
321 channel);
322 return -EINVAL;
323 }
324
325 if ((dmamode & DMA_MODE_MASK) == DMA_MODE_READ) {
326 pr_debug("imxdma%d: %s sg=%p sgcount=%d total length=%d "
327 "dev_addr=0x%08x for read\n",
328 channel, __func__, sg, sgcount, dma_length, dev_addr);
329
330 imx_dmav1_writel(dev_addr, DMA_SAR(channel));
331 imx_dmav1_writel(imxdma->ccr_from_device, DMA_CCR(channel));
332 } else if ((dmamode & DMA_MODE_MASK) == DMA_MODE_WRITE) {
333 pr_debug("imxdma%d: %s sg=%p sgcount=%d total length=%d "
334 "dev_addr=0x%08x for write\n",
335 channel, __func__, sg, sgcount, dma_length, dev_addr);
336
337 imx_dmav1_writel(dev_addr, DMA_DAR(channel));
338 imx_dmav1_writel(imxdma->ccr_to_device, DMA_CCR(channel));
339 } else {
340 printk(KERN_ERR "imxdma%d: imx_dma_setup_sg bad dmamode\n",
341 channel);
342 return -EINVAL;
343 }
344
345 imx_dma_sg_next(channel, sg);
346
347 return 0;
348}
349EXPORT_SYMBOL(imx_dma_setup_sg);
350
351int
352imx_dma_config_channel(int channel, unsigned int config_port,
353 unsigned int config_mem, unsigned int dmareq, int hw_chaining)
354{
355 struct imx_dma_channel *imxdma = &imx_dma_channels[channel];
356 u32 dreq = 0;
357
358 imxdma->hw_chaining = 0;
359
360 if (hw_chaining) {
361 imxdma->hw_chaining = 1;
362 if (!imx_dma_hw_chain(imxdma))
363 return -EINVAL;
364 }
365
366 if (dmareq)
367 dreq = CCR_REN;
368
369 imxdma->ccr_from_device = config_port | (config_mem << 2) | dreq;
370 imxdma->ccr_to_device = config_mem | (config_port << 2) | dreq;
371
372 imx_dmav1_writel(dmareq, DMA_RSSR(channel));
373
374 return 0;
375}
376EXPORT_SYMBOL(imx_dma_config_channel);
377
378void imx_dma_config_burstlen(int channel, unsigned int burstlen)
379{
380 imx_dmav1_writel(burstlen, DMA_BLR(channel));
381}
382EXPORT_SYMBOL(imx_dma_config_burstlen);
383
384
385
386
387
388
389
390
391
392
393
394int
395imx_dma_setup_handlers(int channel,
396 void (*irq_handler) (int, void *),
397 void (*err_handler) (int, void *, int),
398 void *data)
399{
400 struct imx_dma_channel *imxdma = &imx_dma_channels[channel];
401 unsigned long flags;
402
403 if (!imxdma->name) {
404 printk(KERN_CRIT "%s: called for not allocated channel %d\n",
405 __func__, channel);
406 return -ENODEV;
407 }
408
409 local_irq_save(flags);
410 imx_dmav1_writel(1 << channel, DMA_DISR);
411 imxdma->irq_handler = irq_handler;
412 imxdma->err_handler = err_handler;
413 imxdma->data = data;
414 local_irq_restore(flags);
415 return 0;
416}
417EXPORT_SYMBOL(imx_dma_setup_handlers);
418
419
420
421
422
423
424
425int
426imx_dma_setup_progression_handler(int channel,
427 void (*prog_handler) (int, void*, struct scatterlist*))
428{
429 struct imx_dma_channel *imxdma = &imx_dma_channels[channel];
430 unsigned long flags;
431
432 if (!imxdma->name) {
433 printk(KERN_CRIT "%s: called for not allocated channel %d\n",
434 __func__, channel);
435 return -ENODEV;
436 }
437
438 local_irq_save(flags);
439 imxdma->prog_handler = prog_handler;
440 local_irq_restore(flags);
441 return 0;
442}
443EXPORT_SYMBOL(imx_dma_setup_progression_handler);
444
445
446
447
448
449
450
451
452
453
454
455
456void imx_dma_enable(int channel)
457{
458 struct imx_dma_channel *imxdma = &imx_dma_channels[channel];
459 unsigned long flags;
460
461 pr_debug("imxdma%d: imx_dma_enable\n", channel);
462
463 if (!imxdma->name) {
464 printk(KERN_CRIT "%s: called for not allocated channel %d\n",
465 __func__, channel);
466 return;
467 }
468
469 if (imxdma->in_use)
470 return;
471
472 local_irq_save(flags);
473
474 imx_dmav1_writel(1 << channel, DMA_DISR);
475 imx_dmav1_writel(imx_dmav1_readl(DMA_DIMR) & ~(1 << channel), DMA_DIMR);
476 imx_dmav1_writel(imx_dmav1_readl(DMA_CCR(channel)) | CCR_CEN |
477 CCR_ACRPT, DMA_CCR(channel));
478
479 if ((cpu_is_mx21() || cpu_is_mx27()) &&
480 imxdma->sg && imx_dma_hw_chain(imxdma)) {
481 imxdma->sg = sg_next(imxdma->sg);
482 if (imxdma->sg) {
483 u32 tmp;
484 imx_dma_sg_next(channel, imxdma->sg);
485 tmp = imx_dmav1_readl(DMA_CCR(channel));
486 imx_dmav1_writel(tmp | CCR_RPT | CCR_ACRPT,
487 DMA_CCR(channel));
488 }
489 }
490 imxdma->in_use = 1;
491
492 local_irq_restore(flags);
493}
494EXPORT_SYMBOL(imx_dma_enable);
495
496
497
498
499
500void imx_dma_disable(int channel)
501{
502 struct imx_dma_channel *imxdma = &imx_dma_channels[channel];
503 unsigned long flags;
504
505 pr_debug("imxdma%d: imx_dma_disable\n", channel);
506
507 if (imx_dma_hw_chain(imxdma))
508 del_timer(&imxdma->watchdog);
509
510 local_irq_save(flags);
511 imx_dmav1_writel(imx_dmav1_readl(DMA_DIMR) | (1 << channel), DMA_DIMR);
512 imx_dmav1_writel(imx_dmav1_readl(DMA_CCR(channel)) & ~CCR_CEN,
513 DMA_CCR(channel));
514 imx_dmav1_writel(1 << channel, DMA_DISR);
515 imxdma->in_use = 0;
516 local_irq_restore(flags);
517}
518EXPORT_SYMBOL(imx_dma_disable);
519
520static void imx_dma_watchdog(unsigned long chno)
521{
522 struct imx_dma_channel *imxdma = &imx_dma_channels[chno];
523
524 imx_dmav1_writel(0, DMA_CCR(chno));
525 imxdma->in_use = 0;
526 imxdma->sg = NULL;
527
528 if (imxdma->err_handler)
529 imxdma->err_handler(chno, imxdma->data, IMX_DMA_ERR_TIMEOUT);
530}
531
532static irqreturn_t dma_err_handler(int irq, void *dev_id)
533{
534 int i, disr;
535 struct imx_dma_channel *imxdma;
536 unsigned int err_mask;
537 int errcode;
538
539 disr = imx_dmav1_readl(DMA_DISR);
540
541 err_mask = imx_dmav1_readl(DMA_DBTOSR) |
542 imx_dmav1_readl(DMA_DRTOSR) |
543 imx_dmav1_readl(DMA_DSESR) |
544 imx_dmav1_readl(DMA_DBOSR);
545
546 if (!err_mask)
547 return IRQ_HANDLED;
548
549 imx_dmav1_writel(disr & err_mask, DMA_DISR);
550
551 for (i = 0; i < IMX_DMA_CHANNELS; i++) {
552 if (!(err_mask & (1 << i)))
553 continue;
554 imxdma = &imx_dma_channels[i];
555 errcode = 0;
556
557 if (imx_dmav1_readl(DMA_DBTOSR) & (1 << i)) {
558 imx_dmav1_writel(1 << i, DMA_DBTOSR);
559 errcode |= IMX_DMA_ERR_BURST;
560 }
561 if (imx_dmav1_readl(DMA_DRTOSR) & (1 << i)) {
562 imx_dmav1_writel(1 << i, DMA_DRTOSR);
563 errcode |= IMX_DMA_ERR_REQUEST;
564 }
565 if (imx_dmav1_readl(DMA_DSESR) & (1 << i)) {
566 imx_dmav1_writel(1 << i, DMA_DSESR);
567 errcode |= IMX_DMA_ERR_TRANSFER;
568 }
569 if (imx_dmav1_readl(DMA_DBOSR) & (1 << i)) {
570 imx_dmav1_writel(1 << i, DMA_DBOSR);
571 errcode |= IMX_DMA_ERR_BUFFER;
572 }
573 if (imxdma->name && imxdma->err_handler) {
574 imxdma->err_handler(i, imxdma->data, errcode);
575 continue;
576 }
577
578 imx_dma_channels[i].sg = NULL;
579
580 printk(KERN_WARNING
581 "DMA timeout on channel %d (%s) -%s%s%s%s\n",
582 i, imxdma->name,
583 errcode & IMX_DMA_ERR_BURST ? " burst" : "",
584 errcode & IMX_DMA_ERR_REQUEST ? " request" : "",
585 errcode & IMX_DMA_ERR_TRANSFER ? " transfer" : "",
586 errcode & IMX_DMA_ERR_BUFFER ? " buffer" : "");
587 }
588 return IRQ_HANDLED;
589}
590
591static void dma_irq_handle_channel(int chno)
592{
593 struct imx_dma_channel *imxdma = &imx_dma_channels[chno];
594
595 if (!imxdma->name) {
596
597
598
599
600 printk(KERN_WARNING
601 "spurious IRQ for DMA channel %d\n", chno);
602 return;
603 }
604
605 if (imxdma->sg) {
606 u32 tmp;
607 struct scatterlist *current_sg = imxdma->sg;
608 imxdma->sg = sg_next(imxdma->sg);
609
610 if (imxdma->sg) {
611 imx_dma_sg_next(chno, imxdma->sg);
612
613 tmp = imx_dmav1_readl(DMA_CCR(chno));
614
615 if (imx_dma_hw_chain(imxdma)) {
616
617
618
619 mod_timer(&imxdma->watchdog,
620 jiffies + msecs_to_jiffies(500));
621
622 tmp |= CCR_CEN | CCR_RPT | CCR_ACRPT;
623 imx_dmav1_writel(tmp, DMA_CCR(chno));
624 } else {
625 imx_dmav1_writel(tmp & ~CCR_CEN, DMA_CCR(chno));
626 tmp |= CCR_CEN;
627 }
628
629 imx_dmav1_writel(tmp, DMA_CCR(chno));
630
631 if (imxdma->prog_handler)
632 imxdma->prog_handler(chno, imxdma->data,
633 current_sg);
634
635 return;
636 }
637
638 if (imx_dma_hw_chain(imxdma)) {
639 del_timer(&imxdma->watchdog);
640 return;
641 }
642 }
643
644 imx_dmav1_writel(0, DMA_CCR(chno));
645 imxdma->in_use = 0;
646 if (imxdma->irq_handler)
647 imxdma->irq_handler(chno, imxdma->data);
648}
649
650static irqreturn_t dma_irq_handler(int irq, void *dev_id)
651{
652 int i, disr;
653
654 if (cpu_is_mx21() || cpu_is_mx27())
655 dma_err_handler(irq, dev_id);
656
657 disr = imx_dmav1_readl(DMA_DISR);
658
659 pr_debug("imxdma: dma_irq_handler called, disr=0x%08x\n",
660 disr);
661
662 imx_dmav1_writel(disr, DMA_DISR);
663 for (i = 0; i < IMX_DMA_CHANNELS; i++) {
664 if (disr & (1 << i))
665 dma_irq_handle_channel(i);
666 }
667
668 return IRQ_HANDLED;
669}
670
671
672
673
674
675
676int imx_dma_request(int channel, const char *name)
677{
678 struct imx_dma_channel *imxdma = &imx_dma_channels[channel];
679 unsigned long flags;
680 int ret = 0;
681
682
683 if (!name)
684 return -EINVAL;
685
686 if (channel >= IMX_DMA_CHANNELS) {
687 printk(KERN_CRIT "%s: called for non-existed channel %d\n",
688 __func__, channel);
689 return -EINVAL;
690 }
691
692 local_irq_save(flags);
693 if (imxdma->name) {
694 local_irq_restore(flags);
695 return -EBUSY;
696 }
697 memset(imxdma, 0, sizeof(*imxdma));
698 imxdma->name = name;
699 local_irq_restore(flags);
700
701 if (cpu_is_mx21() || cpu_is_mx27()) {
702 ret = request_irq(MX2x_INT_DMACH0 + channel,
703 dma_irq_handler, 0, "DMA", NULL);
704 if (ret) {
705 imxdma->name = NULL;
706 pr_crit("Can't register IRQ %d for DMA channel %d\n",
707 MX2x_INT_DMACH0 + channel, channel);
708 return ret;
709 }
710 init_timer(&imxdma->watchdog);
711 imxdma->watchdog.function = &imx_dma_watchdog;
712 imxdma->watchdog.data = channel;
713 }
714
715 return ret;
716}
717EXPORT_SYMBOL(imx_dma_request);
718
719
720
721
722
723void imx_dma_free(int channel)
724{
725 unsigned long flags;
726 struct imx_dma_channel *imxdma = &imx_dma_channels[channel];
727
728 if (!imxdma->name) {
729 printk(KERN_CRIT
730 "%s: trying to free free channel %d\n",
731 __func__, channel);
732 return;
733 }
734
735 local_irq_save(flags);
736
737 imx_dma_disable(channel);
738 imxdma->name = NULL;
739
740 if (cpu_is_mx21() || cpu_is_mx27())
741 free_irq(MX2x_INT_DMACH0 + channel, NULL);
742
743 local_irq_restore(flags);
744}
745EXPORT_SYMBOL(imx_dma_free);
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760int imx_dma_request_by_prio(const char *name, enum imx_dma_prio prio)
761{
762 int i;
763 int best;
764
765 switch (prio) {
766 case (DMA_PRIO_HIGH):
767 best = 8;
768 break;
769 case (DMA_PRIO_MEDIUM):
770 best = 4;
771 break;
772 case (DMA_PRIO_LOW):
773 default:
774 best = 0;
775 break;
776 }
777
778 for (i = best; i < IMX_DMA_CHANNELS; i++)
779 if (!imx_dma_request(i, name))
780 return i;
781
782 for (i = best - 1; i >= 0; i--)
783 if (!imx_dma_request(i, name))
784 return i;
785
786 printk(KERN_ERR "%s: no free DMA channel found\n", __func__);
787
788 return -ENODEV;
789}
790EXPORT_SYMBOL(imx_dma_request_by_prio);
791
792static int __init imx_dma_init(void)
793{
794 int ret = 0;
795 int i;
796
797 if (cpu_is_mx1())
798 imx_dmav1_baseaddr = MX1_IO_ADDRESS(MX1_DMA_BASE_ADDR);
799 else if (cpu_is_mx21())
800 imx_dmav1_baseaddr = MX21_IO_ADDRESS(MX21_DMA_BASE_ADDR);
801 else if (cpu_is_mx27())
802 imx_dmav1_baseaddr = MX27_IO_ADDRESS(MX27_DMA_BASE_ADDR);
803 else
804 return 0;
805
806 dma_clk = clk_get(NULL, "dma");
807 if (IS_ERR(dma_clk))
808 return PTR_ERR(dma_clk);
809 clk_enable(dma_clk);
810
811
812 imx_dmav1_writel(DCR_DRST, DMA_DCR);
813
814 if (cpu_is_mx1()) {
815 ret = request_irq(MX1_DMA_INT, dma_irq_handler, 0, "DMA", NULL);
816 if (ret) {
817 pr_crit("Wow! Can't register IRQ for DMA\n");
818 return ret;
819 }
820
821 ret = request_irq(MX1_DMA_ERR, dma_err_handler, 0, "DMA", NULL);
822 if (ret) {
823 pr_crit("Wow! Can't register ERRIRQ for DMA\n");
824 free_irq(MX1_DMA_INT, NULL);
825 return ret;
826 }
827 }
828
829
830 imx_dmav1_writel(DCR_DEN, DMA_DCR);
831
832
833 imx_dmav1_writel((1 << IMX_DMA_CHANNELS) - 1, DMA_DISR);
834
835
836 imx_dmav1_writel((1 << IMX_DMA_CHANNELS) - 1, DMA_DIMR);
837
838 for (i = 0; i < IMX_DMA_CHANNELS; i++) {
839 imx_dma_channels[i].sg = NULL;
840 imx_dma_channels[i].dma_num = i;
841 }
842
843 return ret;
844}
845
846arch_initcall(imx_dma_init);
847