1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17#include <linux/err.h>
18#include <linux/init.h>
19#include <linux/types.h>
20#include <linux/mm.h>
21#include <linux/interrupt.h>
22#include <linux/spinlock.h>
23#include <linux/device.h>
24#include <linux/dma-mapping.h>
25#include <linux/slab.h>
26#include <linux/platform_device.h>
27#include <linux/clk.h>
28#include <linux/dmaengine.h>
29#include <linux/module.h>
30#include <linux/of_device.h>
31#include <linux/of_dma.h>
32
33#include <asm/irq.h>
34#include <linux/platform_data/dma-imx.h>
35
36#include "dmaengine.h"
37#define IMXDMA_MAX_CHAN_DESCRIPTORS 16
38#define IMX_DMA_CHANNELS 16
39
40#define IMX_DMA_2D_SLOTS 2
41#define IMX_DMA_2D_SLOT_A 0
42#define IMX_DMA_2D_SLOT_B 1
43
44#define IMX_DMA_LENGTH_LOOP ((unsigned int)-1)
45#define IMX_DMA_MEMSIZE_32 (0 << 4)
46#define IMX_DMA_MEMSIZE_8 (1 << 4)
47#define IMX_DMA_MEMSIZE_16 (2 << 4)
48#define IMX_DMA_TYPE_LINEAR (0 << 10)
49#define IMX_DMA_TYPE_2D (1 << 10)
50#define IMX_DMA_TYPE_FIFO (2 << 10)
51
52#define IMX_DMA_ERR_BURST (1 << 0)
53#define IMX_DMA_ERR_REQUEST (1 << 1)
54#define IMX_DMA_ERR_TRANSFER (1 << 2)
55#define IMX_DMA_ERR_BUFFER (1 << 3)
56#define IMX_DMA_ERR_TIMEOUT (1 << 4)
57
58#define DMA_DCR 0x00
59#define DMA_DISR 0x04
60#define DMA_DIMR 0x08
61#define DMA_DBTOSR 0x0c
62#define DMA_DRTOSR 0x10
63#define DMA_DSESR 0x14
64#define DMA_DBOSR 0x18
65#define DMA_DBTOCR 0x1c
66#define DMA_WSRA 0x40
67#define DMA_XSRA 0x44
68#define DMA_YSRA 0x48
69#define DMA_WSRB 0x4c
70#define DMA_XSRB 0x50
71#define DMA_YSRB 0x54
72#define DMA_SAR(x) (0x80 + ((x) << 6))
73#define DMA_DAR(x) (0x84 + ((x) << 6))
74#define DMA_CNTR(x) (0x88 + ((x) << 6))
75#define DMA_CCR(x) (0x8c + ((x) << 6))
76#define DMA_RSSR(x) (0x90 + ((x) << 6))
77#define DMA_BLR(x) (0x94 + ((x) << 6))
78#define DMA_RTOR(x) (0x98 + ((x) << 6))
79#define DMA_BUCR(x) (0x98 + ((x) << 6))
80#define DMA_CCNR(x) (0x9C + ((x) << 6))
81
82#define DCR_DRST (1<<1)
83#define DCR_DEN (1<<0)
84#define DBTOCR_EN (1<<15)
85#define DBTOCR_CNT(x) ((x) & 0x7fff)
86#define CNTR_CNT(x) ((x) & 0xffffff)
87#define CCR_ACRPT (1<<14)
88#define CCR_DMOD_LINEAR (0x0 << 12)
89#define CCR_DMOD_2D (0x1 << 12)
90#define CCR_DMOD_FIFO (0x2 << 12)
91#define CCR_DMOD_EOBFIFO (0x3 << 12)
92#define CCR_SMOD_LINEAR (0x0 << 10)
93#define CCR_SMOD_2D (0x1 << 10)
94#define CCR_SMOD_FIFO (0x2 << 10)
95#define CCR_SMOD_EOBFIFO (0x3 << 10)
96#define CCR_MDIR_DEC (1<<9)
97#define CCR_MSEL_B (1<<8)
98#define CCR_DSIZ_32 (0x0 << 6)
99#define CCR_DSIZ_8 (0x1 << 6)
100#define CCR_DSIZ_16 (0x2 << 6)
101#define CCR_SSIZ_32 (0x0 << 4)
102#define CCR_SSIZ_8 (0x1 << 4)
103#define CCR_SSIZ_16 (0x2 << 4)
104#define CCR_REN (1<<3)
105#define CCR_RPT (1<<2)
106#define CCR_FRC (1<<1)
107#define CCR_CEN (1<<0)
108#define RTOR_EN (1<<15)
109#define RTOR_CLK (1<<14)
110#define RTOR_PSC (1<<13)
111
112enum imxdma_prep_type {
113 IMXDMA_DESC_MEMCPY,
114 IMXDMA_DESC_INTERLEAVED,
115 IMXDMA_DESC_SLAVE_SG,
116 IMXDMA_DESC_CYCLIC,
117};
118
119struct imx_dma_2d_config {
120 u16 xsr;
121 u16 ysr;
122 u16 wsr;
123 int count;
124};
125
126struct imxdma_desc {
127 struct list_head node;
128 struct dma_async_tx_descriptor desc;
129 enum dma_status status;
130 dma_addr_t src;
131 dma_addr_t dest;
132 size_t len;
133 enum dma_transfer_direction direction;
134 enum imxdma_prep_type type;
135
136 unsigned int config_port;
137 unsigned int config_mem;
138
139 unsigned int x;
140 unsigned int y;
141 unsigned int w;
142
143 struct scatterlist *sg;
144 unsigned int sgcount;
145};
146
147struct imxdma_channel {
148 int hw_chaining;
149 struct timer_list watchdog;
150 struct imxdma_engine *imxdma;
151 unsigned int channel;
152
153 struct tasklet_struct dma_tasklet;
154 struct list_head ld_free;
155 struct list_head ld_queue;
156 struct list_head ld_active;
157 int descs_allocated;
158 enum dma_slave_buswidth word_size;
159 dma_addr_t per_address;
160 u32 watermark_level;
161 struct dma_chan chan;
162 struct dma_async_tx_descriptor desc;
163 enum dma_status status;
164 int dma_request;
165 struct scatterlist *sg_list;
166 u32 ccr_from_device;
167 u32 ccr_to_device;
168 bool enabled_2d;
169 int slot_2d;
170 unsigned int irq;
171};
172
173enum imx_dma_type {
174 IMX1_DMA,
175 IMX21_DMA,
176 IMX27_DMA,
177};
178
179struct imxdma_engine {
180 struct device *dev;
181 struct device_dma_parameters dma_parms;
182 struct dma_device dma_device;
183 void __iomem *base;
184 struct clk *dma_ahb;
185 struct clk *dma_ipg;
186 spinlock_t lock;
187 struct imx_dma_2d_config slots_2d[IMX_DMA_2D_SLOTS];
188 struct imxdma_channel channel[IMX_DMA_CHANNELS];
189 enum imx_dma_type devtype;
190 unsigned int irq;
191 unsigned int irq_err;
192
193};
194
195struct imxdma_filter_data {
196 struct imxdma_engine *imxdma;
197 int request;
198};
199
200static const struct platform_device_id imx_dma_devtype[] = {
201 {
202 .name = "imx1-dma",
203 .driver_data = IMX1_DMA,
204 }, {
205 .name = "imx21-dma",
206 .driver_data = IMX21_DMA,
207 }, {
208 .name = "imx27-dma",
209 .driver_data = IMX27_DMA,
210 }, {
211
212 }
213};
214MODULE_DEVICE_TABLE(platform, imx_dma_devtype);
215
216static const struct of_device_id imx_dma_of_dev_id[] = {
217 {
218 .compatible = "fsl,imx1-dma",
219 .data = &imx_dma_devtype[IMX1_DMA],
220 }, {
221 .compatible = "fsl,imx21-dma",
222 .data = &imx_dma_devtype[IMX21_DMA],
223 }, {
224 .compatible = "fsl,imx27-dma",
225 .data = &imx_dma_devtype[IMX27_DMA],
226 }, {
227
228 }
229};
230MODULE_DEVICE_TABLE(of, imx_dma_of_dev_id);
231
232static inline int is_imx1_dma(struct imxdma_engine *imxdma)
233{
234 return imxdma->devtype == IMX1_DMA;
235}
236
237static inline int is_imx27_dma(struct imxdma_engine *imxdma)
238{
239 return imxdma->devtype == IMX27_DMA;
240}
241
242static struct imxdma_channel *to_imxdma_chan(struct dma_chan *chan)
243{
244 return container_of(chan, struct imxdma_channel, chan);
245}
246
247static inline bool imxdma_chan_is_doing_cyclic(struct imxdma_channel *imxdmac)
248{
249 struct imxdma_desc *desc;
250
251 if (!list_empty(&imxdmac->ld_active)) {
252 desc = list_first_entry(&imxdmac->ld_active, struct imxdma_desc,
253 node);
254 if (desc->type == IMXDMA_DESC_CYCLIC)
255 return true;
256 }
257 return false;
258}
259
260
261
262static void imx_dmav1_writel(struct imxdma_engine *imxdma, unsigned val,
263 unsigned offset)
264{
265 __raw_writel(val, imxdma->base + offset);
266}
267
268static unsigned imx_dmav1_readl(struct imxdma_engine *imxdma, unsigned offset)
269{
270 return __raw_readl(imxdma->base + offset);
271}
272
273static int imxdma_hw_chain(struct imxdma_channel *imxdmac)
274{
275 struct imxdma_engine *imxdma = imxdmac->imxdma;
276
277 if (is_imx27_dma(imxdma))
278 return imxdmac->hw_chaining;
279 else
280 return 0;
281}
282
283
284
285
286static inline int imxdma_sg_next(struct imxdma_desc *d)
287{
288 struct imxdma_channel *imxdmac = to_imxdma_chan(d->desc.chan);
289 struct imxdma_engine *imxdma = imxdmac->imxdma;
290 struct scatterlist *sg = d->sg;
291 unsigned long now;
292
293 now = min(d->len, sg_dma_len(sg));
294 if (d->len != IMX_DMA_LENGTH_LOOP)
295 d->len -= now;
296
297 if (d->direction == DMA_DEV_TO_MEM)
298 imx_dmav1_writel(imxdma, sg->dma_address,
299 DMA_DAR(imxdmac->channel));
300 else
301 imx_dmav1_writel(imxdma, sg->dma_address,
302 DMA_SAR(imxdmac->channel));
303
304 imx_dmav1_writel(imxdma, now, DMA_CNTR(imxdmac->channel));
305
306 dev_dbg(imxdma->dev, " %s channel: %d dst 0x%08x, src 0x%08x, "
307 "size 0x%08x\n", __func__, imxdmac->channel,
308 imx_dmav1_readl(imxdma, DMA_DAR(imxdmac->channel)),
309 imx_dmav1_readl(imxdma, DMA_SAR(imxdmac->channel)),
310 imx_dmav1_readl(imxdma, DMA_CNTR(imxdmac->channel)));
311
312 return now;
313}
314
315static void imxdma_enable_hw(struct imxdma_desc *d)
316{
317 struct imxdma_channel *imxdmac = to_imxdma_chan(d->desc.chan);
318 struct imxdma_engine *imxdma = imxdmac->imxdma;
319 int channel = imxdmac->channel;
320 unsigned long flags;
321
322 dev_dbg(imxdma->dev, "%s channel %d\n", __func__, channel);
323
324 local_irq_save(flags);
325
326 imx_dmav1_writel(imxdma, 1 << channel, DMA_DISR);
327 imx_dmav1_writel(imxdma, imx_dmav1_readl(imxdma, DMA_DIMR) &
328 ~(1 << channel), DMA_DIMR);
329 imx_dmav1_writel(imxdma, imx_dmav1_readl(imxdma, DMA_CCR(channel)) |
330 CCR_CEN | CCR_ACRPT, DMA_CCR(channel));
331
332 if (!is_imx1_dma(imxdma) &&
333 d->sg && imxdma_hw_chain(imxdmac)) {
334 d->sg = sg_next(d->sg);
335 if (d->sg) {
336 u32 tmp;
337 imxdma_sg_next(d);
338 tmp = imx_dmav1_readl(imxdma, DMA_CCR(channel));
339 imx_dmav1_writel(imxdma, tmp | CCR_RPT | CCR_ACRPT,
340 DMA_CCR(channel));
341 }
342 }
343
344 local_irq_restore(flags);
345}
346
347static void imxdma_disable_hw(struct imxdma_channel *imxdmac)
348{
349 struct imxdma_engine *imxdma = imxdmac->imxdma;
350 int channel = imxdmac->channel;
351 unsigned long flags;
352
353 dev_dbg(imxdma->dev, "%s channel %d\n", __func__, channel);
354
355 if (imxdma_hw_chain(imxdmac))
356 del_timer(&imxdmac->watchdog);
357
358 local_irq_save(flags);
359 imx_dmav1_writel(imxdma, imx_dmav1_readl(imxdma, DMA_DIMR) |
360 (1 << channel), DMA_DIMR);
361 imx_dmav1_writel(imxdma, imx_dmav1_readl(imxdma, DMA_CCR(channel)) &
362 ~CCR_CEN, DMA_CCR(channel));
363 imx_dmav1_writel(imxdma, 1 << channel, DMA_DISR);
364 local_irq_restore(flags);
365}
366
367static void imxdma_watchdog(unsigned long data)
368{
369 struct imxdma_channel *imxdmac = (struct imxdma_channel *)data;
370 struct imxdma_engine *imxdma = imxdmac->imxdma;
371 int channel = imxdmac->channel;
372
373 imx_dmav1_writel(imxdma, 0, DMA_CCR(channel));
374
375
376 tasklet_schedule(&imxdmac->dma_tasklet);
377 dev_dbg(imxdma->dev, "channel %d: watchdog timeout!\n",
378 imxdmac->channel);
379}
380
381static irqreturn_t imxdma_err_handler(int irq, void *dev_id)
382{
383 struct imxdma_engine *imxdma = dev_id;
384 unsigned int err_mask;
385 int i, disr;
386 int errcode;
387
388 disr = imx_dmav1_readl(imxdma, DMA_DISR);
389
390 err_mask = imx_dmav1_readl(imxdma, DMA_DBTOSR) |
391 imx_dmav1_readl(imxdma, DMA_DRTOSR) |
392 imx_dmav1_readl(imxdma, DMA_DSESR) |
393 imx_dmav1_readl(imxdma, DMA_DBOSR);
394
395 if (!err_mask)
396 return IRQ_HANDLED;
397
398 imx_dmav1_writel(imxdma, disr & err_mask, DMA_DISR);
399
400 for (i = 0; i < IMX_DMA_CHANNELS; i++) {
401 if (!(err_mask & (1 << i)))
402 continue;
403 errcode = 0;
404
405 if (imx_dmav1_readl(imxdma, DMA_DBTOSR) & (1 << i)) {
406 imx_dmav1_writel(imxdma, 1 << i, DMA_DBTOSR);
407 errcode |= IMX_DMA_ERR_BURST;
408 }
409 if (imx_dmav1_readl(imxdma, DMA_DRTOSR) & (1 << i)) {
410 imx_dmav1_writel(imxdma, 1 << i, DMA_DRTOSR);
411 errcode |= IMX_DMA_ERR_REQUEST;
412 }
413 if (imx_dmav1_readl(imxdma, DMA_DSESR) & (1 << i)) {
414 imx_dmav1_writel(imxdma, 1 << i, DMA_DSESR);
415 errcode |= IMX_DMA_ERR_TRANSFER;
416 }
417 if (imx_dmav1_readl(imxdma, DMA_DBOSR) & (1 << i)) {
418 imx_dmav1_writel(imxdma, 1 << i, DMA_DBOSR);
419 errcode |= IMX_DMA_ERR_BUFFER;
420 }
421
422 tasklet_schedule(&imxdma->channel[i].dma_tasklet);
423
424 dev_warn(imxdma->dev,
425 "DMA timeout on channel %d -%s%s%s%s\n", i,
426 errcode & IMX_DMA_ERR_BURST ? " burst" : "",
427 errcode & IMX_DMA_ERR_REQUEST ? " request" : "",
428 errcode & IMX_DMA_ERR_TRANSFER ? " transfer" : "",
429 errcode & IMX_DMA_ERR_BUFFER ? " buffer" : "");
430 }
431 return IRQ_HANDLED;
432}
433
434static void dma_irq_handle_channel(struct imxdma_channel *imxdmac)
435{
436 struct imxdma_engine *imxdma = imxdmac->imxdma;
437 int chno = imxdmac->channel;
438 struct imxdma_desc *desc;
439 unsigned long flags;
440
441 spin_lock_irqsave(&imxdma->lock, flags);
442 if (list_empty(&imxdmac->ld_active)) {
443 spin_unlock_irqrestore(&imxdma->lock, flags);
444 goto out;
445 }
446
447 desc = list_first_entry(&imxdmac->ld_active,
448 struct imxdma_desc,
449 node);
450 spin_unlock_irqrestore(&imxdma->lock, flags);
451
452 if (desc->sg) {
453 u32 tmp;
454 desc->sg = sg_next(desc->sg);
455
456 if (desc->sg) {
457 imxdma_sg_next(desc);
458
459 tmp = imx_dmav1_readl(imxdma, DMA_CCR(chno));
460
461 if (imxdma_hw_chain(imxdmac)) {
462
463
464
465 mod_timer(&imxdmac->watchdog,
466 jiffies + msecs_to_jiffies(500));
467
468 tmp |= CCR_CEN | CCR_RPT | CCR_ACRPT;
469 imx_dmav1_writel(imxdma, tmp, DMA_CCR(chno));
470 } else {
471 imx_dmav1_writel(imxdma, tmp & ~CCR_CEN,
472 DMA_CCR(chno));
473 tmp |= CCR_CEN;
474 }
475
476 imx_dmav1_writel(imxdma, tmp, DMA_CCR(chno));
477
478 if (imxdma_chan_is_doing_cyclic(imxdmac))
479
480 tasklet_schedule(&imxdmac->dma_tasklet);
481
482 return;
483 }
484
485 if (imxdma_hw_chain(imxdmac)) {
486 del_timer(&imxdmac->watchdog);
487 return;
488 }
489 }
490
491out:
492 imx_dmav1_writel(imxdma, 0, DMA_CCR(chno));
493
494 tasklet_schedule(&imxdmac->dma_tasklet);
495}
496
497static irqreturn_t dma_irq_handler(int irq, void *dev_id)
498{
499 struct imxdma_engine *imxdma = dev_id;
500 int i, disr;
501
502 if (!is_imx1_dma(imxdma))
503 imxdma_err_handler(irq, dev_id);
504
505 disr = imx_dmav1_readl(imxdma, DMA_DISR);
506
507 dev_dbg(imxdma->dev, "%s called, disr=0x%08x\n", __func__, disr);
508
509 imx_dmav1_writel(imxdma, disr, DMA_DISR);
510 for (i = 0; i < IMX_DMA_CHANNELS; i++) {
511 if (disr & (1 << i))
512 dma_irq_handle_channel(&imxdma->channel[i]);
513 }
514
515 return IRQ_HANDLED;
516}
517
518static int imxdma_xfer_desc(struct imxdma_desc *d)
519{
520 struct imxdma_channel *imxdmac = to_imxdma_chan(d->desc.chan);
521 struct imxdma_engine *imxdma = imxdmac->imxdma;
522 int slot = -1;
523 int i;
524
525
526 switch (d->type) {
527 case IMXDMA_DESC_INTERLEAVED:
528
529 for (i = 0; i < IMX_DMA_2D_SLOTS; i++) {
530 if ((imxdma->slots_2d[i].count > 0) &&
531 ((imxdma->slots_2d[i].xsr != d->x) ||
532 (imxdma->slots_2d[i].ysr != d->y) ||
533 (imxdma->slots_2d[i].wsr != d->w)))
534 continue;
535 slot = i;
536 break;
537 }
538 if (slot < 0)
539 return -EBUSY;
540
541 imxdma->slots_2d[slot].xsr = d->x;
542 imxdma->slots_2d[slot].ysr = d->y;
543 imxdma->slots_2d[slot].wsr = d->w;
544 imxdma->slots_2d[slot].count++;
545
546 imxdmac->slot_2d = slot;
547 imxdmac->enabled_2d = true;
548
549 if (slot == IMX_DMA_2D_SLOT_A) {
550 d->config_mem &= ~CCR_MSEL_B;
551 d->config_port &= ~CCR_MSEL_B;
552 imx_dmav1_writel(imxdma, d->x, DMA_XSRA);
553 imx_dmav1_writel(imxdma, d->y, DMA_YSRA);
554 imx_dmav1_writel(imxdma, d->w, DMA_WSRA);
555 } else {
556 d->config_mem |= CCR_MSEL_B;
557 d->config_port |= CCR_MSEL_B;
558 imx_dmav1_writel(imxdma, d->x, DMA_XSRB);
559 imx_dmav1_writel(imxdma, d->y, DMA_YSRB);
560 imx_dmav1_writel(imxdma, d->w, DMA_WSRB);
561 }
562
563
564
565
566 case IMXDMA_DESC_MEMCPY:
567 imx_dmav1_writel(imxdma, d->src, DMA_SAR(imxdmac->channel));
568 imx_dmav1_writel(imxdma, d->dest, DMA_DAR(imxdmac->channel));
569 imx_dmav1_writel(imxdma, d->config_mem | (d->config_port << 2),
570 DMA_CCR(imxdmac->channel));
571
572 imx_dmav1_writel(imxdma, d->len, DMA_CNTR(imxdmac->channel));
573
574 dev_dbg(imxdma->dev,
575 "%s channel: %d dest=0x%08llx src=0x%08llx dma_length=%zu\n",
576 __func__, imxdmac->channel,
577 (unsigned long long)d->dest,
578 (unsigned long long)d->src, d->len);
579
580 break;
581
582 case IMXDMA_DESC_CYCLIC:
583 case IMXDMA_DESC_SLAVE_SG:
584 if (d->direction == DMA_DEV_TO_MEM) {
585 imx_dmav1_writel(imxdma, imxdmac->per_address,
586 DMA_SAR(imxdmac->channel));
587 imx_dmav1_writel(imxdma, imxdmac->ccr_from_device,
588 DMA_CCR(imxdmac->channel));
589
590 dev_dbg(imxdma->dev,
591 "%s channel: %d sg=%p sgcount=%d total length=%zu dev_addr=0x%08llx (dev2mem)\n",
592 __func__, imxdmac->channel,
593 d->sg, d->sgcount, d->len,
594 (unsigned long long)imxdmac->per_address);
595 } else if (d->direction == DMA_MEM_TO_DEV) {
596 imx_dmav1_writel(imxdma, imxdmac->per_address,
597 DMA_DAR(imxdmac->channel));
598 imx_dmav1_writel(imxdma, imxdmac->ccr_to_device,
599 DMA_CCR(imxdmac->channel));
600
601 dev_dbg(imxdma->dev,
602 "%s channel: %d sg=%p sgcount=%d total length=%zu dev_addr=0x%08llx (mem2dev)\n",
603 __func__, imxdmac->channel,
604 d->sg, d->sgcount, d->len,
605 (unsigned long long)imxdmac->per_address);
606 } else {
607 dev_err(imxdma->dev, "%s channel: %d bad dma mode\n",
608 __func__, imxdmac->channel);
609 return -EINVAL;
610 }
611
612 imxdma_sg_next(d);
613
614 break;
615 default:
616 return -EINVAL;
617 }
618 imxdma_enable_hw(d);
619 return 0;
620}
621
622static void imxdma_tasklet(unsigned long data)
623{
624 struct imxdma_channel *imxdmac = (void *)data;
625 struct imxdma_engine *imxdma = imxdmac->imxdma;
626 struct imxdma_desc *desc;
627 unsigned long flags;
628
629 spin_lock_irqsave(&imxdma->lock, flags);
630
631 if (list_empty(&imxdmac->ld_active)) {
632
633 spin_unlock_irqrestore(&imxdma->lock, flags);
634 return;
635 }
636 desc = list_first_entry(&imxdmac->ld_active, struct imxdma_desc, node);
637
638
639
640
641
642 if (imxdma_chan_is_doing_cyclic(imxdmac))
643 goto out;
644 else
645 dma_cookie_complete(&desc->desc);
646
647
648 if (imxdmac->enabled_2d) {
649 imxdma->slots_2d[imxdmac->slot_2d].count--;
650 imxdmac->enabled_2d = false;
651 }
652
653 list_move_tail(imxdmac->ld_active.next, &imxdmac->ld_free);
654
655 if (!list_empty(&imxdmac->ld_queue)) {
656 desc = list_first_entry(&imxdmac->ld_queue, struct imxdma_desc,
657 node);
658 list_move_tail(imxdmac->ld_queue.next, &imxdmac->ld_active);
659 if (imxdma_xfer_desc(desc) < 0)
660 dev_warn(imxdma->dev, "%s: channel: %d couldn't xfer desc\n",
661 __func__, imxdmac->channel);
662 }
663out:
664 spin_unlock_irqrestore(&imxdma->lock, flags);
665
666 dmaengine_desc_get_callback_invoke(&desc->desc, NULL);
667}
668
669static int imxdma_terminate_all(struct dma_chan *chan)
670{
671 struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
672 struct imxdma_engine *imxdma = imxdmac->imxdma;
673 unsigned long flags;
674
675 imxdma_disable_hw(imxdmac);
676
677 spin_lock_irqsave(&imxdma->lock, flags);
678 list_splice_tail_init(&imxdmac->ld_active, &imxdmac->ld_free);
679 list_splice_tail_init(&imxdmac->ld_queue, &imxdmac->ld_free);
680 spin_unlock_irqrestore(&imxdma->lock, flags);
681 return 0;
682}
683
684static int imxdma_config(struct dma_chan *chan,
685 struct dma_slave_config *dmaengine_cfg)
686{
687 struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
688 struct imxdma_engine *imxdma = imxdmac->imxdma;
689 unsigned int mode = 0;
690
691 if (dmaengine_cfg->direction == DMA_DEV_TO_MEM) {
692 imxdmac->per_address = dmaengine_cfg->src_addr;
693 imxdmac->watermark_level = dmaengine_cfg->src_maxburst;
694 imxdmac->word_size = dmaengine_cfg->src_addr_width;
695 } else {
696 imxdmac->per_address = dmaengine_cfg->dst_addr;
697 imxdmac->watermark_level = dmaengine_cfg->dst_maxburst;
698 imxdmac->word_size = dmaengine_cfg->dst_addr_width;
699 }
700
701 switch (imxdmac->word_size) {
702 case DMA_SLAVE_BUSWIDTH_1_BYTE:
703 mode = IMX_DMA_MEMSIZE_8;
704 break;
705 case DMA_SLAVE_BUSWIDTH_2_BYTES:
706 mode = IMX_DMA_MEMSIZE_16;
707 break;
708 default:
709 case DMA_SLAVE_BUSWIDTH_4_BYTES:
710 mode = IMX_DMA_MEMSIZE_32;
711 break;
712 }
713
714 imxdmac->hw_chaining = 0;
715
716 imxdmac->ccr_from_device = (mode | IMX_DMA_TYPE_FIFO) |
717 ((IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR) << 2) |
718 CCR_REN;
719 imxdmac->ccr_to_device =
720 (IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR) |
721 ((mode | IMX_DMA_TYPE_FIFO) << 2) | CCR_REN;
722 imx_dmav1_writel(imxdma, imxdmac->dma_request,
723 DMA_RSSR(imxdmac->channel));
724
725
726 imx_dmav1_writel(imxdma, imxdmac->watermark_level *
727 imxdmac->word_size, DMA_BLR(imxdmac->channel));
728
729 return 0;
730}
731
732static enum dma_status imxdma_tx_status(struct dma_chan *chan,
733 dma_cookie_t cookie,
734 struct dma_tx_state *txstate)
735{
736 return dma_cookie_status(chan, cookie, txstate);
737}
738
739static dma_cookie_t imxdma_tx_submit(struct dma_async_tx_descriptor *tx)
740{
741 struct imxdma_channel *imxdmac = to_imxdma_chan(tx->chan);
742 struct imxdma_engine *imxdma = imxdmac->imxdma;
743 dma_cookie_t cookie;
744 unsigned long flags;
745
746 spin_lock_irqsave(&imxdma->lock, flags);
747 list_move_tail(imxdmac->ld_free.next, &imxdmac->ld_queue);
748 cookie = dma_cookie_assign(tx);
749 spin_unlock_irqrestore(&imxdma->lock, flags);
750
751 return cookie;
752}
753
754static int imxdma_alloc_chan_resources(struct dma_chan *chan)
755{
756 struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
757 struct imx_dma_data *data = chan->private;
758
759 if (data != NULL)
760 imxdmac->dma_request = data->dma_request;
761
762 while (imxdmac->descs_allocated < IMXDMA_MAX_CHAN_DESCRIPTORS) {
763 struct imxdma_desc *desc;
764
765 desc = kzalloc(sizeof(*desc), GFP_KERNEL);
766 if (!desc)
767 break;
768 __memzero(&desc->desc, sizeof(struct dma_async_tx_descriptor));
769 dma_async_tx_descriptor_init(&desc->desc, chan);
770 desc->desc.tx_submit = imxdma_tx_submit;
771
772 desc->desc.flags = DMA_CTRL_ACK;
773 desc->status = DMA_COMPLETE;
774
775 list_add_tail(&desc->node, &imxdmac->ld_free);
776 imxdmac->descs_allocated++;
777 }
778
779 if (!imxdmac->descs_allocated)
780 return -ENOMEM;
781
782 return imxdmac->descs_allocated;
783}
784
785static void imxdma_free_chan_resources(struct dma_chan *chan)
786{
787 struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
788 struct imxdma_engine *imxdma = imxdmac->imxdma;
789 struct imxdma_desc *desc, *_desc;
790 unsigned long flags;
791
792 spin_lock_irqsave(&imxdma->lock, flags);
793
794 imxdma_disable_hw(imxdmac);
795 list_splice_tail_init(&imxdmac->ld_active, &imxdmac->ld_free);
796 list_splice_tail_init(&imxdmac->ld_queue, &imxdmac->ld_free);
797
798 spin_unlock_irqrestore(&imxdma->lock, flags);
799
800 list_for_each_entry_safe(desc, _desc, &imxdmac->ld_free, node) {
801 kfree(desc);
802 imxdmac->descs_allocated--;
803 }
804 INIT_LIST_HEAD(&imxdmac->ld_free);
805
806 kfree(imxdmac->sg_list);
807 imxdmac->sg_list = NULL;
808}
809
810static struct dma_async_tx_descriptor *imxdma_prep_slave_sg(
811 struct dma_chan *chan, struct scatterlist *sgl,
812 unsigned int sg_len, enum dma_transfer_direction direction,
813 unsigned long flags, void *context)
814{
815 struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
816 struct scatterlist *sg;
817 int i, dma_length = 0;
818 struct imxdma_desc *desc;
819
820 if (list_empty(&imxdmac->ld_free) ||
821 imxdma_chan_is_doing_cyclic(imxdmac))
822 return NULL;
823
824 desc = list_first_entry(&imxdmac->ld_free, struct imxdma_desc, node);
825
826 for_each_sg(sgl, sg, sg_len, i) {
827 dma_length += sg_dma_len(sg);
828 }
829
830 switch (imxdmac->word_size) {
831 case DMA_SLAVE_BUSWIDTH_4_BYTES:
832 if (sg_dma_len(sgl) & 3 || sgl->dma_address & 3)
833 return NULL;
834 break;
835 case DMA_SLAVE_BUSWIDTH_2_BYTES:
836 if (sg_dma_len(sgl) & 1 || sgl->dma_address & 1)
837 return NULL;
838 break;
839 case DMA_SLAVE_BUSWIDTH_1_BYTE:
840 break;
841 default:
842 return NULL;
843 }
844
845 desc->type = IMXDMA_DESC_SLAVE_SG;
846 desc->sg = sgl;
847 desc->sgcount = sg_len;
848 desc->len = dma_length;
849 desc->direction = direction;
850 if (direction == DMA_DEV_TO_MEM) {
851 desc->src = imxdmac->per_address;
852 } else {
853 desc->dest = imxdmac->per_address;
854 }
855 desc->desc.callback = NULL;
856 desc->desc.callback_param = NULL;
857
858 return &desc->desc;
859}
860
861static struct dma_async_tx_descriptor *imxdma_prep_dma_cyclic(
862 struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len,
863 size_t period_len, enum dma_transfer_direction direction,
864 unsigned long flags)
865{
866 struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
867 struct imxdma_engine *imxdma = imxdmac->imxdma;
868 struct imxdma_desc *desc;
869 int i;
870 unsigned int periods = buf_len / period_len;
871
872 dev_dbg(imxdma->dev, "%s channel: %d buf_len=%zu period_len=%zu\n",
873 __func__, imxdmac->channel, buf_len, period_len);
874
875 if (list_empty(&imxdmac->ld_free) ||
876 imxdma_chan_is_doing_cyclic(imxdmac))
877 return NULL;
878
879 desc = list_first_entry(&imxdmac->ld_free, struct imxdma_desc, node);
880
881 kfree(imxdmac->sg_list);
882
883 imxdmac->sg_list = kcalloc(periods + 1,
884 sizeof(struct scatterlist), GFP_ATOMIC);
885 if (!imxdmac->sg_list)
886 return NULL;
887
888 sg_init_table(imxdmac->sg_list, periods);
889
890 for (i = 0; i < periods; i++) {
891 imxdmac->sg_list[i].page_link = 0;
892 imxdmac->sg_list[i].offset = 0;
893 imxdmac->sg_list[i].dma_address = dma_addr;
894 sg_dma_len(&imxdmac->sg_list[i]) = period_len;
895 dma_addr += period_len;
896 }
897
898
899 imxdmac->sg_list[periods].offset = 0;
900 sg_dma_len(&imxdmac->sg_list[periods]) = 0;
901 imxdmac->sg_list[periods].page_link =
902 ((unsigned long)imxdmac->sg_list | 0x01) & ~0x02;
903
904 desc->type = IMXDMA_DESC_CYCLIC;
905 desc->sg = imxdmac->sg_list;
906 desc->sgcount = periods;
907 desc->len = IMX_DMA_LENGTH_LOOP;
908 desc->direction = direction;
909 if (direction == DMA_DEV_TO_MEM) {
910 desc->src = imxdmac->per_address;
911 } else {
912 desc->dest = imxdmac->per_address;
913 }
914 desc->desc.callback = NULL;
915 desc->desc.callback_param = NULL;
916
917 return &desc->desc;
918}
919
920static struct dma_async_tx_descriptor *imxdma_prep_dma_memcpy(
921 struct dma_chan *chan, dma_addr_t dest,
922 dma_addr_t src, size_t len, unsigned long flags)
923{
924 struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
925 struct imxdma_engine *imxdma = imxdmac->imxdma;
926 struct imxdma_desc *desc;
927
928 dev_dbg(imxdma->dev, "%s channel: %d src=0x%llx dst=0x%llx len=%zu\n",
929 __func__, imxdmac->channel, (unsigned long long)src,
930 (unsigned long long)dest, len);
931
932 if (list_empty(&imxdmac->ld_free) ||
933 imxdma_chan_is_doing_cyclic(imxdmac))
934 return NULL;
935
936 desc = list_first_entry(&imxdmac->ld_free, struct imxdma_desc, node);
937
938 desc->type = IMXDMA_DESC_MEMCPY;
939 desc->src = src;
940 desc->dest = dest;
941 desc->len = len;
942 desc->direction = DMA_MEM_TO_MEM;
943 desc->config_port = IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR;
944 desc->config_mem = IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR;
945 desc->desc.callback = NULL;
946 desc->desc.callback_param = NULL;
947
948 return &desc->desc;
949}
950
951static struct dma_async_tx_descriptor *imxdma_prep_dma_interleaved(
952 struct dma_chan *chan, struct dma_interleaved_template *xt,
953 unsigned long flags)
954{
955 struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
956 struct imxdma_engine *imxdma = imxdmac->imxdma;
957 struct imxdma_desc *desc;
958
959 dev_dbg(imxdma->dev, "%s channel: %d src_start=0x%llx dst_start=0x%llx\n"
960 " src_sgl=%s dst_sgl=%s numf=%zu frame_size=%zu\n", __func__,
961 imxdmac->channel, (unsigned long long)xt->src_start,
962 (unsigned long long) xt->dst_start,
963 xt->src_sgl ? "true" : "false", xt->dst_sgl ? "true" : "false",
964 xt->numf, xt->frame_size);
965
966 if (list_empty(&imxdmac->ld_free) ||
967 imxdma_chan_is_doing_cyclic(imxdmac))
968 return NULL;
969
970 if (xt->frame_size != 1 || xt->numf <= 0 || xt->dir != DMA_MEM_TO_MEM)
971 return NULL;
972
973 desc = list_first_entry(&imxdmac->ld_free, struct imxdma_desc, node);
974
975 desc->type = IMXDMA_DESC_INTERLEAVED;
976 desc->src = xt->src_start;
977 desc->dest = xt->dst_start;
978 desc->x = xt->sgl[0].size;
979 desc->y = xt->numf;
980 desc->w = xt->sgl[0].icg + desc->x;
981 desc->len = desc->x * desc->y;
982 desc->direction = DMA_MEM_TO_MEM;
983 desc->config_port = IMX_DMA_MEMSIZE_32;
984 desc->config_mem = IMX_DMA_MEMSIZE_32;
985 if (xt->src_sgl)
986 desc->config_mem |= IMX_DMA_TYPE_2D;
987 if (xt->dst_sgl)
988 desc->config_port |= IMX_DMA_TYPE_2D;
989 desc->desc.callback = NULL;
990 desc->desc.callback_param = NULL;
991
992 return &desc->desc;
993}
994
995static void imxdma_issue_pending(struct dma_chan *chan)
996{
997 struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
998 struct imxdma_engine *imxdma = imxdmac->imxdma;
999 struct imxdma_desc *desc;
1000 unsigned long flags;
1001
1002 spin_lock_irqsave(&imxdma->lock, flags);
1003 if (list_empty(&imxdmac->ld_active) &&
1004 !list_empty(&imxdmac->ld_queue)) {
1005 desc = list_first_entry(&imxdmac->ld_queue,
1006 struct imxdma_desc, node);
1007
1008 if (imxdma_xfer_desc(desc) < 0) {
1009 dev_warn(imxdma->dev,
1010 "%s: channel: %d couldn't issue DMA xfer\n",
1011 __func__, imxdmac->channel);
1012 } else {
1013 list_move_tail(imxdmac->ld_queue.next,
1014 &imxdmac->ld_active);
1015 }
1016 }
1017 spin_unlock_irqrestore(&imxdma->lock, flags);
1018}
1019
1020static bool imxdma_filter_fn(struct dma_chan *chan, void *param)
1021{
1022 struct imxdma_filter_data *fdata = param;
1023 struct imxdma_channel *imxdma_chan = to_imxdma_chan(chan);
1024
1025 if (chan->device->dev != fdata->imxdma->dev)
1026 return false;
1027
1028 imxdma_chan->dma_request = fdata->request;
1029 chan->private = NULL;
1030
1031 return true;
1032}
1033
1034static struct dma_chan *imxdma_xlate(struct of_phandle_args *dma_spec,
1035 struct of_dma *ofdma)
1036{
1037 int count = dma_spec->args_count;
1038 struct imxdma_engine *imxdma = ofdma->of_dma_data;
1039 struct imxdma_filter_data fdata = {
1040 .imxdma = imxdma,
1041 };
1042
1043 if (count != 1)
1044 return NULL;
1045
1046 fdata.request = dma_spec->args[0];
1047
1048 return dma_request_channel(imxdma->dma_device.cap_mask,
1049 imxdma_filter_fn, &fdata);
1050}
1051
1052static int __init imxdma_probe(struct platform_device *pdev)
1053{
1054 struct imxdma_engine *imxdma;
1055 struct resource *res;
1056 const struct of_device_id *of_id;
1057 int ret, i;
1058 int irq, irq_err;
1059
1060 of_id = of_match_device(imx_dma_of_dev_id, &pdev->dev);
1061 if (of_id)
1062 pdev->id_entry = of_id->data;
1063
1064 imxdma = devm_kzalloc(&pdev->dev, sizeof(*imxdma), GFP_KERNEL);
1065 if (!imxdma)
1066 return -ENOMEM;
1067
1068 imxdma->dev = &pdev->dev;
1069 imxdma->devtype = pdev->id_entry->driver_data;
1070
1071 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1072 imxdma->base = devm_ioremap_resource(&pdev->dev, res);
1073 if (IS_ERR(imxdma->base))
1074 return PTR_ERR(imxdma->base);
1075
1076 irq = platform_get_irq(pdev, 0);
1077 if (irq < 0)
1078 return irq;
1079
1080 imxdma->dma_ipg = devm_clk_get(&pdev->dev, "ipg");
1081 if (IS_ERR(imxdma->dma_ipg))
1082 return PTR_ERR(imxdma->dma_ipg);
1083
1084 imxdma->dma_ahb = devm_clk_get(&pdev->dev, "ahb");
1085 if (IS_ERR(imxdma->dma_ahb))
1086 return PTR_ERR(imxdma->dma_ahb);
1087
1088 ret = clk_prepare_enable(imxdma->dma_ipg);
1089 if (ret)
1090 return ret;
1091 ret = clk_prepare_enable(imxdma->dma_ahb);
1092 if (ret)
1093 goto disable_dma_ipg_clk;
1094
1095
1096 imx_dmav1_writel(imxdma, DCR_DRST, DMA_DCR);
1097
1098 if (is_imx1_dma(imxdma)) {
1099 ret = devm_request_irq(&pdev->dev, irq,
1100 dma_irq_handler, 0, "DMA", imxdma);
1101 if (ret) {
1102 dev_warn(imxdma->dev, "Can't register IRQ for DMA\n");
1103 goto disable_dma_ahb_clk;
1104 }
1105 imxdma->irq = irq;
1106
1107 irq_err = platform_get_irq(pdev, 1);
1108 if (irq_err < 0) {
1109 ret = irq_err;
1110 goto disable_dma_ahb_clk;
1111 }
1112
1113 ret = devm_request_irq(&pdev->dev, irq_err,
1114 imxdma_err_handler, 0, "DMA", imxdma);
1115 if (ret) {
1116 dev_warn(imxdma->dev, "Can't register ERRIRQ for DMA\n");
1117 goto disable_dma_ahb_clk;
1118 }
1119 imxdma->irq_err = irq_err;
1120 }
1121
1122
1123 imx_dmav1_writel(imxdma, DCR_DEN, DMA_DCR);
1124
1125
1126 imx_dmav1_writel(imxdma, (1 << IMX_DMA_CHANNELS) - 1, DMA_DISR);
1127
1128
1129 imx_dmav1_writel(imxdma, (1 << IMX_DMA_CHANNELS) - 1, DMA_DIMR);
1130
1131 INIT_LIST_HEAD(&imxdma->dma_device.channels);
1132
1133 dma_cap_set(DMA_SLAVE, imxdma->dma_device.cap_mask);
1134 dma_cap_set(DMA_CYCLIC, imxdma->dma_device.cap_mask);
1135 dma_cap_set(DMA_MEMCPY, imxdma->dma_device.cap_mask);
1136 dma_cap_set(DMA_INTERLEAVE, imxdma->dma_device.cap_mask);
1137
1138
1139 for (i = 0; i < IMX_DMA_2D_SLOTS; i++)
1140 imxdma->slots_2d[i].count = 0;
1141
1142 spin_lock_init(&imxdma->lock);
1143
1144
1145 for (i = 0; i < IMX_DMA_CHANNELS; i++) {
1146 struct imxdma_channel *imxdmac = &imxdma->channel[i];
1147
1148 if (!is_imx1_dma(imxdma)) {
1149 ret = devm_request_irq(&pdev->dev, irq + i,
1150 dma_irq_handler, 0, "DMA", imxdma);
1151 if (ret) {
1152 dev_warn(imxdma->dev, "Can't register IRQ %d "
1153 "for DMA channel %d\n",
1154 irq + i, i);
1155 goto disable_dma_ahb_clk;
1156 }
1157
1158 imxdmac->irq = irq + i;
1159 init_timer(&imxdmac->watchdog);
1160 imxdmac->watchdog.function = &imxdma_watchdog;
1161 imxdmac->watchdog.data = (unsigned long)imxdmac;
1162 }
1163
1164 imxdmac->imxdma = imxdma;
1165
1166 INIT_LIST_HEAD(&imxdmac->ld_queue);
1167 INIT_LIST_HEAD(&imxdmac->ld_free);
1168 INIT_LIST_HEAD(&imxdmac->ld_active);
1169
1170 tasklet_init(&imxdmac->dma_tasklet, imxdma_tasklet,
1171 (unsigned long)imxdmac);
1172 imxdmac->chan.device = &imxdma->dma_device;
1173 dma_cookie_init(&imxdmac->chan);
1174 imxdmac->channel = i;
1175
1176
1177 list_add_tail(&imxdmac->chan.device_node,
1178 &imxdma->dma_device.channels);
1179 }
1180
1181 imxdma->dma_device.dev = &pdev->dev;
1182
1183 imxdma->dma_device.device_alloc_chan_resources = imxdma_alloc_chan_resources;
1184 imxdma->dma_device.device_free_chan_resources = imxdma_free_chan_resources;
1185 imxdma->dma_device.device_tx_status = imxdma_tx_status;
1186 imxdma->dma_device.device_prep_slave_sg = imxdma_prep_slave_sg;
1187 imxdma->dma_device.device_prep_dma_cyclic = imxdma_prep_dma_cyclic;
1188 imxdma->dma_device.device_prep_dma_memcpy = imxdma_prep_dma_memcpy;
1189 imxdma->dma_device.device_prep_interleaved_dma = imxdma_prep_dma_interleaved;
1190 imxdma->dma_device.device_config = imxdma_config;
1191 imxdma->dma_device.device_terminate_all = imxdma_terminate_all;
1192 imxdma->dma_device.device_issue_pending = imxdma_issue_pending;
1193
1194 platform_set_drvdata(pdev, imxdma);
1195
1196 imxdma->dma_device.copy_align = DMAENGINE_ALIGN_4_BYTES;
1197 imxdma->dma_device.dev->dma_parms = &imxdma->dma_parms;
1198 dma_set_max_seg_size(imxdma->dma_device.dev, 0xffffff);
1199
1200 ret = dma_async_device_register(&imxdma->dma_device);
1201 if (ret) {
1202 dev_err(&pdev->dev, "unable to register\n");
1203 goto disable_dma_ahb_clk;
1204 }
1205
1206 if (pdev->dev.of_node) {
1207 ret = of_dma_controller_register(pdev->dev.of_node,
1208 imxdma_xlate, imxdma);
1209 if (ret) {
1210 dev_err(&pdev->dev, "unable to register of_dma_controller\n");
1211 goto err_of_dma_controller;
1212 }
1213 }
1214
1215 return 0;
1216
1217err_of_dma_controller:
1218 dma_async_device_unregister(&imxdma->dma_device);
1219disable_dma_ahb_clk:
1220 clk_disable_unprepare(imxdma->dma_ahb);
1221disable_dma_ipg_clk:
1222 clk_disable_unprepare(imxdma->dma_ipg);
1223 return ret;
1224}
1225
1226static void imxdma_free_irq(struct platform_device *pdev, struct imxdma_engine *imxdma)
1227{
1228 int i;
1229
1230 if (is_imx1_dma(imxdma)) {
1231 disable_irq(imxdma->irq);
1232 disable_irq(imxdma->irq_err);
1233 }
1234
1235 for (i = 0; i < IMX_DMA_CHANNELS; i++) {
1236 struct imxdma_channel *imxdmac = &imxdma->channel[i];
1237
1238 if (!is_imx1_dma(imxdma))
1239 disable_irq(imxdmac->irq);
1240
1241 tasklet_kill(&imxdmac->dma_tasklet);
1242 }
1243}
1244
1245static int imxdma_remove(struct platform_device *pdev)
1246{
1247 struct imxdma_engine *imxdma = platform_get_drvdata(pdev);
1248
1249 imxdma_free_irq(pdev, imxdma);
1250
1251 dma_async_device_unregister(&imxdma->dma_device);
1252
1253 if (pdev->dev.of_node)
1254 of_dma_controller_free(pdev->dev.of_node);
1255
1256 clk_disable_unprepare(imxdma->dma_ipg);
1257 clk_disable_unprepare(imxdma->dma_ahb);
1258
1259 return 0;
1260}
1261
1262static struct platform_driver imxdma_driver = {
1263 .driver = {
1264 .name = "imx-dma",
1265 .of_match_table = imx_dma_of_dev_id,
1266 },
1267 .id_table = imx_dma_devtype,
1268 .remove = imxdma_remove,
1269};
1270
1271static int __init imxdma_module_init(void)
1272{
1273 return platform_driver_probe(&imxdma_driver, imxdma_probe);
1274}
1275subsys_initcall(imxdma_module_init);
1276
1277MODULE_AUTHOR("Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>");
1278MODULE_DESCRIPTION("i.MX dma driver");
1279MODULE_LICENSE("GPL");
1280