1
2
3
4
5
6
7
8
9
10
11#include <linux/err.h>
12#include <linux/init.h>
13#include <linux/types.h>
14#include <linux/mm.h>
15#include <linux/interrupt.h>
16#include <linux/spinlock.h>
17#include <linux/device.h>
18#include <linux/dma-mapping.h>
19#include <linux/slab.h>
20#include <linux/platform_device.h>
21#include <linux/clk.h>
22#include <linux/dmaengine.h>
23#include <linux/module.h>
24#include <linux/of_device.h>
25#include <linux/of_dma.h>
26
27#include <asm/irq.h>
28#include <linux/platform_data/dma-imx.h>
29
30#include "dmaengine.h"
31#define IMXDMA_MAX_CHAN_DESCRIPTORS 16
32#define IMX_DMA_CHANNELS 16
33
34#define IMX_DMA_2D_SLOTS 2
35#define IMX_DMA_2D_SLOT_A 0
36#define IMX_DMA_2D_SLOT_B 1
37
38#define IMX_DMA_LENGTH_LOOP ((unsigned int)-1)
39#define IMX_DMA_MEMSIZE_32 (0 << 4)
40#define IMX_DMA_MEMSIZE_8 (1 << 4)
41#define IMX_DMA_MEMSIZE_16 (2 << 4)
42#define IMX_DMA_TYPE_LINEAR (0 << 10)
43#define IMX_DMA_TYPE_2D (1 << 10)
44#define IMX_DMA_TYPE_FIFO (2 << 10)
45
46#define IMX_DMA_ERR_BURST (1 << 0)
47#define IMX_DMA_ERR_REQUEST (1 << 1)
48#define IMX_DMA_ERR_TRANSFER (1 << 2)
49#define IMX_DMA_ERR_BUFFER (1 << 3)
50#define IMX_DMA_ERR_TIMEOUT (1 << 4)
51
52#define DMA_DCR 0x00
53#define DMA_DISR 0x04
54#define DMA_DIMR 0x08
55#define DMA_DBTOSR 0x0c
56#define DMA_DRTOSR 0x10
57#define DMA_DSESR 0x14
58#define DMA_DBOSR 0x18
59#define DMA_DBTOCR 0x1c
60#define DMA_WSRA 0x40
61#define DMA_XSRA 0x44
62#define DMA_YSRA 0x48
63#define DMA_WSRB 0x4c
64#define DMA_XSRB 0x50
65#define DMA_YSRB 0x54
66#define DMA_SAR(x) (0x80 + ((x) << 6))
67#define DMA_DAR(x) (0x84 + ((x) << 6))
68#define DMA_CNTR(x) (0x88 + ((x) << 6))
69#define DMA_CCR(x) (0x8c + ((x) << 6))
70#define DMA_RSSR(x) (0x90 + ((x) << 6))
71#define DMA_BLR(x) (0x94 + ((x) << 6))
72#define DMA_RTOR(x) (0x98 + ((x) << 6))
73#define DMA_BUCR(x) (0x98 + ((x) << 6))
74#define DMA_CCNR(x) (0x9C + ((x) << 6))
75
76#define DCR_DRST (1<<1)
77#define DCR_DEN (1<<0)
78#define DBTOCR_EN (1<<15)
79#define DBTOCR_CNT(x) ((x) & 0x7fff)
80#define CNTR_CNT(x) ((x) & 0xffffff)
81#define CCR_ACRPT (1<<14)
82#define CCR_DMOD_LINEAR (0x0 << 12)
83#define CCR_DMOD_2D (0x1 << 12)
84#define CCR_DMOD_FIFO (0x2 << 12)
85#define CCR_DMOD_EOBFIFO (0x3 << 12)
86#define CCR_SMOD_LINEAR (0x0 << 10)
87#define CCR_SMOD_2D (0x1 << 10)
88#define CCR_SMOD_FIFO (0x2 << 10)
89#define CCR_SMOD_EOBFIFO (0x3 << 10)
90#define CCR_MDIR_DEC (1<<9)
91#define CCR_MSEL_B (1<<8)
92#define CCR_DSIZ_32 (0x0 << 6)
93#define CCR_DSIZ_8 (0x1 << 6)
94#define CCR_DSIZ_16 (0x2 << 6)
95#define CCR_SSIZ_32 (0x0 << 4)
96#define CCR_SSIZ_8 (0x1 << 4)
97#define CCR_SSIZ_16 (0x2 << 4)
98#define CCR_REN (1<<3)
99#define CCR_RPT (1<<2)
100#define CCR_FRC (1<<1)
101#define CCR_CEN (1<<0)
102#define RTOR_EN (1<<15)
103#define RTOR_CLK (1<<14)
104#define RTOR_PSC (1<<13)
105
106enum imxdma_prep_type {
107 IMXDMA_DESC_MEMCPY,
108 IMXDMA_DESC_INTERLEAVED,
109 IMXDMA_DESC_SLAVE_SG,
110 IMXDMA_DESC_CYCLIC,
111};
112
113struct imx_dma_2d_config {
114 u16 xsr;
115 u16 ysr;
116 u16 wsr;
117 int count;
118};
119
120struct imxdma_desc {
121 struct list_head node;
122 struct dma_async_tx_descriptor desc;
123 enum dma_status status;
124 dma_addr_t src;
125 dma_addr_t dest;
126 size_t len;
127 enum dma_transfer_direction direction;
128 enum imxdma_prep_type type;
129
130 unsigned int config_port;
131 unsigned int config_mem;
132
133 unsigned int x;
134 unsigned int y;
135 unsigned int w;
136
137 struct scatterlist *sg;
138 unsigned int sgcount;
139};
140
141struct imxdma_channel {
142 int hw_chaining;
143 struct timer_list watchdog;
144 struct imxdma_engine *imxdma;
145 unsigned int channel;
146
147 struct tasklet_struct dma_tasklet;
148 struct list_head ld_free;
149 struct list_head ld_queue;
150 struct list_head ld_active;
151 int descs_allocated;
152 enum dma_slave_buswidth word_size;
153 dma_addr_t per_address;
154 u32 watermark_level;
155 struct dma_chan chan;
156 struct dma_async_tx_descriptor desc;
157 enum dma_status status;
158 int dma_request;
159 struct scatterlist *sg_list;
160 u32 ccr_from_device;
161 u32 ccr_to_device;
162 bool enabled_2d;
163 int slot_2d;
164 unsigned int irq;
165 struct dma_slave_config config;
166};
167
168enum imx_dma_type {
169 IMX1_DMA,
170 IMX21_DMA,
171 IMX27_DMA,
172};
173
174struct imxdma_engine {
175 struct device *dev;
176 struct device_dma_parameters dma_parms;
177 struct dma_device dma_device;
178 void __iomem *base;
179 struct clk *dma_ahb;
180 struct clk *dma_ipg;
181 spinlock_t lock;
182 struct imx_dma_2d_config slots_2d[IMX_DMA_2D_SLOTS];
183 struct imxdma_channel channel[IMX_DMA_CHANNELS];
184 enum imx_dma_type devtype;
185 unsigned int irq;
186 unsigned int irq_err;
187
188};
189
190struct imxdma_filter_data {
191 struct imxdma_engine *imxdma;
192 int request;
193};
194
195static const struct platform_device_id imx_dma_devtype[] = {
196 {
197 .name = "imx1-dma",
198 .driver_data = IMX1_DMA,
199 }, {
200 .name = "imx21-dma",
201 .driver_data = IMX21_DMA,
202 }, {
203 .name = "imx27-dma",
204 .driver_data = IMX27_DMA,
205 }, {
206
207 }
208};
209MODULE_DEVICE_TABLE(platform, imx_dma_devtype);
210
211static const struct of_device_id imx_dma_of_dev_id[] = {
212 {
213 .compatible = "fsl,imx1-dma",
214 .data = &imx_dma_devtype[IMX1_DMA],
215 }, {
216 .compatible = "fsl,imx21-dma",
217 .data = &imx_dma_devtype[IMX21_DMA],
218 }, {
219 .compatible = "fsl,imx27-dma",
220 .data = &imx_dma_devtype[IMX27_DMA],
221 }, {
222
223 }
224};
225MODULE_DEVICE_TABLE(of, imx_dma_of_dev_id);
226
227static inline int is_imx1_dma(struct imxdma_engine *imxdma)
228{
229 return imxdma->devtype == IMX1_DMA;
230}
231
232static inline int is_imx27_dma(struct imxdma_engine *imxdma)
233{
234 return imxdma->devtype == IMX27_DMA;
235}
236
237static struct imxdma_channel *to_imxdma_chan(struct dma_chan *chan)
238{
239 return container_of(chan, struct imxdma_channel, chan);
240}
241
242static inline bool imxdma_chan_is_doing_cyclic(struct imxdma_channel *imxdmac)
243{
244 struct imxdma_desc *desc;
245
246 if (!list_empty(&imxdmac->ld_active)) {
247 desc = list_first_entry(&imxdmac->ld_active, struct imxdma_desc,
248 node);
249 if (desc->type == IMXDMA_DESC_CYCLIC)
250 return true;
251 }
252 return false;
253}
254
255
256
257static void imx_dmav1_writel(struct imxdma_engine *imxdma, unsigned val,
258 unsigned offset)
259{
260 __raw_writel(val, imxdma->base + offset);
261}
262
263static unsigned imx_dmav1_readl(struct imxdma_engine *imxdma, unsigned offset)
264{
265 return __raw_readl(imxdma->base + offset);
266}
267
268static int imxdma_hw_chain(struct imxdma_channel *imxdmac)
269{
270 struct imxdma_engine *imxdma = imxdmac->imxdma;
271
272 if (is_imx27_dma(imxdma))
273 return imxdmac->hw_chaining;
274 else
275 return 0;
276}
277
278
279
280
281static inline void imxdma_sg_next(struct imxdma_desc *d)
282{
283 struct imxdma_channel *imxdmac = to_imxdma_chan(d->desc.chan);
284 struct imxdma_engine *imxdma = imxdmac->imxdma;
285 struct scatterlist *sg = d->sg;
286 size_t now;
287
288 now = min_t(size_t, d->len, sg_dma_len(sg));
289 if (d->len != IMX_DMA_LENGTH_LOOP)
290 d->len -= now;
291
292 if (d->direction == DMA_DEV_TO_MEM)
293 imx_dmav1_writel(imxdma, sg->dma_address,
294 DMA_DAR(imxdmac->channel));
295 else
296 imx_dmav1_writel(imxdma, sg->dma_address,
297 DMA_SAR(imxdmac->channel));
298
299 imx_dmav1_writel(imxdma, now, DMA_CNTR(imxdmac->channel));
300
301 dev_dbg(imxdma->dev, " %s channel: %d dst 0x%08x, src 0x%08x, "
302 "size 0x%08x\n", __func__, imxdmac->channel,
303 imx_dmav1_readl(imxdma, DMA_DAR(imxdmac->channel)),
304 imx_dmav1_readl(imxdma, DMA_SAR(imxdmac->channel)),
305 imx_dmav1_readl(imxdma, DMA_CNTR(imxdmac->channel)));
306}
307
308static void imxdma_enable_hw(struct imxdma_desc *d)
309{
310 struct imxdma_channel *imxdmac = to_imxdma_chan(d->desc.chan);
311 struct imxdma_engine *imxdma = imxdmac->imxdma;
312 int channel = imxdmac->channel;
313 unsigned long flags;
314
315 dev_dbg(imxdma->dev, "%s channel %d\n", __func__, channel);
316
317 local_irq_save(flags);
318
319 imx_dmav1_writel(imxdma, 1 << channel, DMA_DISR);
320 imx_dmav1_writel(imxdma, imx_dmav1_readl(imxdma, DMA_DIMR) &
321 ~(1 << channel), DMA_DIMR);
322 imx_dmav1_writel(imxdma, imx_dmav1_readl(imxdma, DMA_CCR(channel)) |
323 CCR_CEN | CCR_ACRPT, DMA_CCR(channel));
324
325 if (!is_imx1_dma(imxdma) &&
326 d->sg && imxdma_hw_chain(imxdmac)) {
327 d->sg = sg_next(d->sg);
328 if (d->sg) {
329 u32 tmp;
330 imxdma_sg_next(d);
331 tmp = imx_dmav1_readl(imxdma, DMA_CCR(channel));
332 imx_dmav1_writel(imxdma, tmp | CCR_RPT | CCR_ACRPT,
333 DMA_CCR(channel));
334 }
335 }
336
337 local_irq_restore(flags);
338}
339
340static void imxdma_disable_hw(struct imxdma_channel *imxdmac)
341{
342 struct imxdma_engine *imxdma = imxdmac->imxdma;
343 int channel = imxdmac->channel;
344 unsigned long flags;
345
346 dev_dbg(imxdma->dev, "%s channel %d\n", __func__, channel);
347
348 if (imxdma_hw_chain(imxdmac))
349 del_timer(&imxdmac->watchdog);
350
351 local_irq_save(flags);
352 imx_dmav1_writel(imxdma, imx_dmav1_readl(imxdma, DMA_DIMR) |
353 (1 << channel), DMA_DIMR);
354 imx_dmav1_writel(imxdma, imx_dmav1_readl(imxdma, DMA_CCR(channel)) &
355 ~CCR_CEN, DMA_CCR(channel));
356 imx_dmav1_writel(imxdma, 1 << channel, DMA_DISR);
357 local_irq_restore(flags);
358}
359
360static void imxdma_watchdog(struct timer_list *t)
361{
362 struct imxdma_channel *imxdmac = from_timer(imxdmac, t, watchdog);
363 struct imxdma_engine *imxdma = imxdmac->imxdma;
364 int channel = imxdmac->channel;
365
366 imx_dmav1_writel(imxdma, 0, DMA_CCR(channel));
367
368
369 tasklet_schedule(&imxdmac->dma_tasklet);
370 dev_dbg(imxdma->dev, "channel %d: watchdog timeout!\n",
371 imxdmac->channel);
372}
373
374static irqreturn_t imxdma_err_handler(int irq, void *dev_id)
375{
376 struct imxdma_engine *imxdma = dev_id;
377 unsigned int err_mask;
378 int i, disr;
379 int errcode;
380
381 disr = imx_dmav1_readl(imxdma, DMA_DISR);
382
383 err_mask = imx_dmav1_readl(imxdma, DMA_DBTOSR) |
384 imx_dmav1_readl(imxdma, DMA_DRTOSR) |
385 imx_dmav1_readl(imxdma, DMA_DSESR) |
386 imx_dmav1_readl(imxdma, DMA_DBOSR);
387
388 if (!err_mask)
389 return IRQ_HANDLED;
390
391 imx_dmav1_writel(imxdma, disr & err_mask, DMA_DISR);
392
393 for (i = 0; i < IMX_DMA_CHANNELS; i++) {
394 if (!(err_mask & (1 << i)))
395 continue;
396 errcode = 0;
397
398 if (imx_dmav1_readl(imxdma, DMA_DBTOSR) & (1 << i)) {
399 imx_dmav1_writel(imxdma, 1 << i, DMA_DBTOSR);
400 errcode |= IMX_DMA_ERR_BURST;
401 }
402 if (imx_dmav1_readl(imxdma, DMA_DRTOSR) & (1 << i)) {
403 imx_dmav1_writel(imxdma, 1 << i, DMA_DRTOSR);
404 errcode |= IMX_DMA_ERR_REQUEST;
405 }
406 if (imx_dmav1_readl(imxdma, DMA_DSESR) & (1 << i)) {
407 imx_dmav1_writel(imxdma, 1 << i, DMA_DSESR);
408 errcode |= IMX_DMA_ERR_TRANSFER;
409 }
410 if (imx_dmav1_readl(imxdma, DMA_DBOSR) & (1 << i)) {
411 imx_dmav1_writel(imxdma, 1 << i, DMA_DBOSR);
412 errcode |= IMX_DMA_ERR_BUFFER;
413 }
414
415 tasklet_schedule(&imxdma->channel[i].dma_tasklet);
416
417 dev_warn(imxdma->dev,
418 "DMA timeout on channel %d -%s%s%s%s\n", i,
419 errcode & IMX_DMA_ERR_BURST ? " burst" : "",
420 errcode & IMX_DMA_ERR_REQUEST ? " request" : "",
421 errcode & IMX_DMA_ERR_TRANSFER ? " transfer" : "",
422 errcode & IMX_DMA_ERR_BUFFER ? " buffer" : "");
423 }
424 return IRQ_HANDLED;
425}
426
427static void dma_irq_handle_channel(struct imxdma_channel *imxdmac)
428{
429 struct imxdma_engine *imxdma = imxdmac->imxdma;
430 int chno = imxdmac->channel;
431 struct imxdma_desc *desc;
432 unsigned long flags;
433
434 spin_lock_irqsave(&imxdma->lock, flags);
435 if (list_empty(&imxdmac->ld_active)) {
436 spin_unlock_irqrestore(&imxdma->lock, flags);
437 goto out;
438 }
439
440 desc = list_first_entry(&imxdmac->ld_active,
441 struct imxdma_desc,
442 node);
443 spin_unlock_irqrestore(&imxdma->lock, flags);
444
445 if (desc->sg) {
446 u32 tmp;
447 desc->sg = sg_next(desc->sg);
448
449 if (desc->sg) {
450 imxdma_sg_next(desc);
451
452 tmp = imx_dmav1_readl(imxdma, DMA_CCR(chno));
453
454 if (imxdma_hw_chain(imxdmac)) {
455
456
457
458 mod_timer(&imxdmac->watchdog,
459 jiffies + msecs_to_jiffies(500));
460
461 tmp |= CCR_CEN | CCR_RPT | CCR_ACRPT;
462 imx_dmav1_writel(imxdma, tmp, DMA_CCR(chno));
463 } else {
464 imx_dmav1_writel(imxdma, tmp & ~CCR_CEN,
465 DMA_CCR(chno));
466 tmp |= CCR_CEN;
467 }
468
469 imx_dmav1_writel(imxdma, tmp, DMA_CCR(chno));
470
471 if (imxdma_chan_is_doing_cyclic(imxdmac))
472
473 tasklet_schedule(&imxdmac->dma_tasklet);
474
475 return;
476 }
477
478 if (imxdma_hw_chain(imxdmac)) {
479 del_timer(&imxdmac->watchdog);
480 return;
481 }
482 }
483
484out:
485 imx_dmav1_writel(imxdma, 0, DMA_CCR(chno));
486
487 tasklet_schedule(&imxdmac->dma_tasklet);
488}
489
490static irqreturn_t dma_irq_handler(int irq, void *dev_id)
491{
492 struct imxdma_engine *imxdma = dev_id;
493 int i, disr;
494
495 if (!is_imx1_dma(imxdma))
496 imxdma_err_handler(irq, dev_id);
497
498 disr = imx_dmav1_readl(imxdma, DMA_DISR);
499
500 dev_dbg(imxdma->dev, "%s called, disr=0x%08x\n", __func__, disr);
501
502 imx_dmav1_writel(imxdma, disr, DMA_DISR);
503 for (i = 0; i < IMX_DMA_CHANNELS; i++) {
504 if (disr & (1 << i))
505 dma_irq_handle_channel(&imxdma->channel[i]);
506 }
507
508 return IRQ_HANDLED;
509}
510
511static int imxdma_xfer_desc(struct imxdma_desc *d)
512{
513 struct imxdma_channel *imxdmac = to_imxdma_chan(d->desc.chan);
514 struct imxdma_engine *imxdma = imxdmac->imxdma;
515 int slot = -1;
516 int i;
517
518
519 switch (d->type) {
520 case IMXDMA_DESC_INTERLEAVED:
521
522 for (i = 0; i < IMX_DMA_2D_SLOTS; i++) {
523 if ((imxdma->slots_2d[i].count > 0) &&
524 ((imxdma->slots_2d[i].xsr != d->x) ||
525 (imxdma->slots_2d[i].ysr != d->y) ||
526 (imxdma->slots_2d[i].wsr != d->w)))
527 continue;
528 slot = i;
529 break;
530 }
531 if (slot < 0)
532 return -EBUSY;
533
534 imxdma->slots_2d[slot].xsr = d->x;
535 imxdma->slots_2d[slot].ysr = d->y;
536 imxdma->slots_2d[slot].wsr = d->w;
537 imxdma->slots_2d[slot].count++;
538
539 imxdmac->slot_2d = slot;
540 imxdmac->enabled_2d = true;
541
542 if (slot == IMX_DMA_2D_SLOT_A) {
543 d->config_mem &= ~CCR_MSEL_B;
544 d->config_port &= ~CCR_MSEL_B;
545 imx_dmav1_writel(imxdma, d->x, DMA_XSRA);
546 imx_dmav1_writel(imxdma, d->y, DMA_YSRA);
547 imx_dmav1_writel(imxdma, d->w, DMA_WSRA);
548 } else {
549 d->config_mem |= CCR_MSEL_B;
550 d->config_port |= CCR_MSEL_B;
551 imx_dmav1_writel(imxdma, d->x, DMA_XSRB);
552 imx_dmav1_writel(imxdma, d->y, DMA_YSRB);
553 imx_dmav1_writel(imxdma, d->w, DMA_WSRB);
554 }
555
556
557
558
559 case IMXDMA_DESC_MEMCPY:
560 imx_dmav1_writel(imxdma, d->src, DMA_SAR(imxdmac->channel));
561 imx_dmav1_writel(imxdma, d->dest, DMA_DAR(imxdmac->channel));
562 imx_dmav1_writel(imxdma, d->config_mem | (d->config_port << 2),
563 DMA_CCR(imxdmac->channel));
564
565 imx_dmav1_writel(imxdma, d->len, DMA_CNTR(imxdmac->channel));
566
567 dev_dbg(imxdma->dev,
568 "%s channel: %d dest=0x%08llx src=0x%08llx dma_length=%zu\n",
569 __func__, imxdmac->channel,
570 (unsigned long long)d->dest,
571 (unsigned long long)d->src, d->len);
572
573 break;
574
575 case IMXDMA_DESC_CYCLIC:
576 case IMXDMA_DESC_SLAVE_SG:
577 if (d->direction == DMA_DEV_TO_MEM) {
578 imx_dmav1_writel(imxdma, imxdmac->per_address,
579 DMA_SAR(imxdmac->channel));
580 imx_dmav1_writel(imxdma, imxdmac->ccr_from_device,
581 DMA_CCR(imxdmac->channel));
582
583 dev_dbg(imxdma->dev,
584 "%s channel: %d sg=%p sgcount=%d total length=%zu dev_addr=0x%08llx (dev2mem)\n",
585 __func__, imxdmac->channel,
586 d->sg, d->sgcount, d->len,
587 (unsigned long long)imxdmac->per_address);
588 } else if (d->direction == DMA_MEM_TO_DEV) {
589 imx_dmav1_writel(imxdma, imxdmac->per_address,
590 DMA_DAR(imxdmac->channel));
591 imx_dmav1_writel(imxdma, imxdmac->ccr_to_device,
592 DMA_CCR(imxdmac->channel));
593
594 dev_dbg(imxdma->dev,
595 "%s channel: %d sg=%p sgcount=%d total length=%zu dev_addr=0x%08llx (mem2dev)\n",
596 __func__, imxdmac->channel,
597 d->sg, d->sgcount, d->len,
598 (unsigned long long)imxdmac->per_address);
599 } else {
600 dev_err(imxdma->dev, "%s channel: %d bad dma mode\n",
601 __func__, imxdmac->channel);
602 return -EINVAL;
603 }
604
605 imxdma_sg_next(d);
606
607 break;
608 default:
609 return -EINVAL;
610 }
611 imxdma_enable_hw(d);
612 return 0;
613}
614
615static void imxdma_tasklet(unsigned long data)
616{
617 struct imxdma_channel *imxdmac = (void *)data;
618 struct imxdma_engine *imxdma = imxdmac->imxdma;
619 struct imxdma_desc *desc, *next_desc;
620 unsigned long flags;
621
622 spin_lock_irqsave(&imxdma->lock, flags);
623
624 if (list_empty(&imxdmac->ld_active)) {
625
626 spin_unlock_irqrestore(&imxdma->lock, flags);
627 return;
628 }
629 desc = list_first_entry(&imxdmac->ld_active, struct imxdma_desc, node);
630
631
632
633
634
635 if (imxdma_chan_is_doing_cyclic(imxdmac))
636 goto out;
637 else
638 dma_cookie_complete(&desc->desc);
639
640
641 if (imxdmac->enabled_2d) {
642 imxdma->slots_2d[imxdmac->slot_2d].count--;
643 imxdmac->enabled_2d = false;
644 }
645
646 list_move_tail(imxdmac->ld_active.next, &imxdmac->ld_free);
647
648 if (!list_empty(&imxdmac->ld_queue)) {
649 next_desc = list_first_entry(&imxdmac->ld_queue,
650 struct imxdma_desc, node);
651 list_move_tail(imxdmac->ld_queue.next, &imxdmac->ld_active);
652 if (imxdma_xfer_desc(next_desc) < 0)
653 dev_warn(imxdma->dev, "%s: channel: %d couldn't xfer desc\n",
654 __func__, imxdmac->channel);
655 }
656out:
657 spin_unlock_irqrestore(&imxdma->lock, flags);
658
659 dmaengine_desc_get_callback_invoke(&desc->desc, NULL);
660}
661
662static int imxdma_terminate_all(struct dma_chan *chan)
663{
664 struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
665 struct imxdma_engine *imxdma = imxdmac->imxdma;
666 unsigned long flags;
667
668 imxdma_disable_hw(imxdmac);
669
670 spin_lock_irqsave(&imxdma->lock, flags);
671 list_splice_tail_init(&imxdmac->ld_active, &imxdmac->ld_free);
672 list_splice_tail_init(&imxdmac->ld_queue, &imxdmac->ld_free);
673 spin_unlock_irqrestore(&imxdma->lock, flags);
674 return 0;
675}
676
677static int imxdma_config_write(struct dma_chan *chan,
678 struct dma_slave_config *dmaengine_cfg,
679 enum dma_transfer_direction direction)
680{
681 struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
682 struct imxdma_engine *imxdma = imxdmac->imxdma;
683 unsigned int mode = 0;
684
685 if (direction == DMA_DEV_TO_MEM) {
686 imxdmac->per_address = dmaengine_cfg->src_addr;
687 imxdmac->watermark_level = dmaengine_cfg->src_maxburst;
688 imxdmac->word_size = dmaengine_cfg->src_addr_width;
689 } else {
690 imxdmac->per_address = dmaengine_cfg->dst_addr;
691 imxdmac->watermark_level = dmaengine_cfg->dst_maxburst;
692 imxdmac->word_size = dmaengine_cfg->dst_addr_width;
693 }
694
695 switch (imxdmac->word_size) {
696 case DMA_SLAVE_BUSWIDTH_1_BYTE:
697 mode = IMX_DMA_MEMSIZE_8;
698 break;
699 case DMA_SLAVE_BUSWIDTH_2_BYTES:
700 mode = IMX_DMA_MEMSIZE_16;
701 break;
702 default:
703 case DMA_SLAVE_BUSWIDTH_4_BYTES:
704 mode = IMX_DMA_MEMSIZE_32;
705 break;
706 }
707
708 imxdmac->hw_chaining = 0;
709
710 imxdmac->ccr_from_device = (mode | IMX_DMA_TYPE_FIFO) |
711 ((IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR) << 2) |
712 CCR_REN;
713 imxdmac->ccr_to_device =
714 (IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR) |
715 ((mode | IMX_DMA_TYPE_FIFO) << 2) | CCR_REN;
716 imx_dmav1_writel(imxdma, imxdmac->dma_request,
717 DMA_RSSR(imxdmac->channel));
718
719
720 imx_dmav1_writel(imxdma, imxdmac->watermark_level *
721 imxdmac->word_size, DMA_BLR(imxdmac->channel));
722
723 return 0;
724}
725
726static int imxdma_config(struct dma_chan *chan,
727 struct dma_slave_config *dmaengine_cfg)
728{
729 struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
730
731 memcpy(&imxdmac->config, dmaengine_cfg, sizeof(*dmaengine_cfg));
732
733 return 0;
734}
735
736static enum dma_status imxdma_tx_status(struct dma_chan *chan,
737 dma_cookie_t cookie,
738 struct dma_tx_state *txstate)
739{
740 return dma_cookie_status(chan, cookie, txstate);
741}
742
743static dma_cookie_t imxdma_tx_submit(struct dma_async_tx_descriptor *tx)
744{
745 struct imxdma_channel *imxdmac = to_imxdma_chan(tx->chan);
746 struct imxdma_engine *imxdma = imxdmac->imxdma;
747 dma_cookie_t cookie;
748 unsigned long flags;
749
750 spin_lock_irqsave(&imxdma->lock, flags);
751 list_move_tail(imxdmac->ld_free.next, &imxdmac->ld_queue);
752 cookie = dma_cookie_assign(tx);
753 spin_unlock_irqrestore(&imxdma->lock, flags);
754
755 return cookie;
756}
757
758static int imxdma_alloc_chan_resources(struct dma_chan *chan)
759{
760 struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
761 struct imx_dma_data *data = chan->private;
762
763 if (data != NULL)
764 imxdmac->dma_request = data->dma_request;
765
766 while (imxdmac->descs_allocated < IMXDMA_MAX_CHAN_DESCRIPTORS) {
767 struct imxdma_desc *desc;
768
769 desc = kzalloc(sizeof(*desc), GFP_KERNEL);
770 if (!desc)
771 break;
772 memset(&desc->desc, 0, sizeof(struct dma_async_tx_descriptor));
773 dma_async_tx_descriptor_init(&desc->desc, chan);
774 desc->desc.tx_submit = imxdma_tx_submit;
775
776 desc->desc.flags = DMA_CTRL_ACK;
777 desc->status = DMA_COMPLETE;
778
779 list_add_tail(&desc->node, &imxdmac->ld_free);
780 imxdmac->descs_allocated++;
781 }
782
783 if (!imxdmac->descs_allocated)
784 return -ENOMEM;
785
786 return imxdmac->descs_allocated;
787}
788
789static void imxdma_free_chan_resources(struct dma_chan *chan)
790{
791 struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
792 struct imxdma_engine *imxdma = imxdmac->imxdma;
793 struct imxdma_desc *desc, *_desc;
794 unsigned long flags;
795
796 spin_lock_irqsave(&imxdma->lock, flags);
797
798 imxdma_disable_hw(imxdmac);
799 list_splice_tail_init(&imxdmac->ld_active, &imxdmac->ld_free);
800 list_splice_tail_init(&imxdmac->ld_queue, &imxdmac->ld_free);
801
802 spin_unlock_irqrestore(&imxdma->lock, flags);
803
804 list_for_each_entry_safe(desc, _desc, &imxdmac->ld_free, node) {
805 kfree(desc);
806 imxdmac->descs_allocated--;
807 }
808 INIT_LIST_HEAD(&imxdmac->ld_free);
809
810 kfree(imxdmac->sg_list);
811 imxdmac->sg_list = NULL;
812}
813
814static struct dma_async_tx_descriptor *imxdma_prep_slave_sg(
815 struct dma_chan *chan, struct scatterlist *sgl,
816 unsigned int sg_len, enum dma_transfer_direction direction,
817 unsigned long flags, void *context)
818{
819 struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
820 struct scatterlist *sg;
821 int i, dma_length = 0;
822 struct imxdma_desc *desc;
823
824 if (list_empty(&imxdmac->ld_free) ||
825 imxdma_chan_is_doing_cyclic(imxdmac))
826 return NULL;
827
828 desc = list_first_entry(&imxdmac->ld_free, struct imxdma_desc, node);
829
830 for_each_sg(sgl, sg, sg_len, i) {
831 dma_length += sg_dma_len(sg);
832 }
833
834 switch (imxdmac->word_size) {
835 case DMA_SLAVE_BUSWIDTH_4_BYTES:
836 if (sg_dma_len(sgl) & 3 || sgl->dma_address & 3)
837 return NULL;
838 break;
839 case DMA_SLAVE_BUSWIDTH_2_BYTES:
840 if (sg_dma_len(sgl) & 1 || sgl->dma_address & 1)
841 return NULL;
842 break;
843 case DMA_SLAVE_BUSWIDTH_1_BYTE:
844 break;
845 default:
846 return NULL;
847 }
848
849 desc->type = IMXDMA_DESC_SLAVE_SG;
850 desc->sg = sgl;
851 desc->sgcount = sg_len;
852 desc->len = dma_length;
853 desc->direction = direction;
854 if (direction == DMA_DEV_TO_MEM) {
855 desc->src = imxdmac->per_address;
856 } else {
857 desc->dest = imxdmac->per_address;
858 }
859 desc->desc.callback = NULL;
860 desc->desc.callback_param = NULL;
861
862 return &desc->desc;
863}
864
865static struct dma_async_tx_descriptor *imxdma_prep_dma_cyclic(
866 struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len,
867 size_t period_len, enum dma_transfer_direction direction,
868 unsigned long flags)
869{
870 struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
871 struct imxdma_engine *imxdma = imxdmac->imxdma;
872 struct imxdma_desc *desc;
873 int i;
874 unsigned int periods = buf_len / period_len;
875
876 dev_dbg(imxdma->dev, "%s channel: %d buf_len=%zu period_len=%zu\n",
877 __func__, imxdmac->channel, buf_len, period_len);
878
879 if (list_empty(&imxdmac->ld_free) ||
880 imxdma_chan_is_doing_cyclic(imxdmac))
881 return NULL;
882
883 desc = list_first_entry(&imxdmac->ld_free, struct imxdma_desc, node);
884
885 kfree(imxdmac->sg_list);
886
887 imxdmac->sg_list = kcalloc(periods + 1,
888 sizeof(struct scatterlist), GFP_ATOMIC);
889 if (!imxdmac->sg_list)
890 return NULL;
891
892 sg_init_table(imxdmac->sg_list, periods);
893
894 for (i = 0; i < periods; i++) {
895 sg_assign_page(&imxdmac->sg_list[i], NULL);
896 imxdmac->sg_list[i].offset = 0;
897 imxdmac->sg_list[i].dma_address = dma_addr;
898 sg_dma_len(&imxdmac->sg_list[i]) = period_len;
899 dma_addr += period_len;
900 }
901
902
903 sg_chain(imxdmac->sg_list, periods + 1, imxdmac->sg_list);
904
905 desc->type = IMXDMA_DESC_CYCLIC;
906 desc->sg = imxdmac->sg_list;
907 desc->sgcount = periods;
908 desc->len = IMX_DMA_LENGTH_LOOP;
909 desc->direction = direction;
910 if (direction == DMA_DEV_TO_MEM) {
911 desc->src = imxdmac->per_address;
912 } else {
913 desc->dest = imxdmac->per_address;
914 }
915 desc->desc.callback = NULL;
916 desc->desc.callback_param = NULL;
917
918 imxdma_config_write(chan, &imxdmac->config, direction);
919
920 return &desc->desc;
921}
922
923static struct dma_async_tx_descriptor *imxdma_prep_dma_memcpy(
924 struct dma_chan *chan, dma_addr_t dest,
925 dma_addr_t src, size_t len, unsigned long flags)
926{
927 struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
928 struct imxdma_engine *imxdma = imxdmac->imxdma;
929 struct imxdma_desc *desc;
930
931 dev_dbg(imxdma->dev, "%s channel: %d src=0x%llx dst=0x%llx len=%zu\n",
932 __func__, imxdmac->channel, (unsigned long long)src,
933 (unsigned long long)dest, len);
934
935 if (list_empty(&imxdmac->ld_free) ||
936 imxdma_chan_is_doing_cyclic(imxdmac))
937 return NULL;
938
939 desc = list_first_entry(&imxdmac->ld_free, struct imxdma_desc, node);
940
941 desc->type = IMXDMA_DESC_MEMCPY;
942 desc->src = src;
943 desc->dest = dest;
944 desc->len = len;
945 desc->direction = DMA_MEM_TO_MEM;
946 desc->config_port = IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR;
947 desc->config_mem = IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR;
948 desc->desc.callback = NULL;
949 desc->desc.callback_param = NULL;
950
951 return &desc->desc;
952}
953
954static struct dma_async_tx_descriptor *imxdma_prep_dma_interleaved(
955 struct dma_chan *chan, struct dma_interleaved_template *xt,
956 unsigned long flags)
957{
958 struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
959 struct imxdma_engine *imxdma = imxdmac->imxdma;
960 struct imxdma_desc *desc;
961
962 dev_dbg(imxdma->dev, "%s channel: %d src_start=0x%llx dst_start=0x%llx\n"
963 " src_sgl=%s dst_sgl=%s numf=%zu frame_size=%zu\n", __func__,
964 imxdmac->channel, (unsigned long long)xt->src_start,
965 (unsigned long long) xt->dst_start,
966 xt->src_sgl ? "true" : "false", xt->dst_sgl ? "true" : "false",
967 xt->numf, xt->frame_size);
968
969 if (list_empty(&imxdmac->ld_free) ||
970 imxdma_chan_is_doing_cyclic(imxdmac))
971 return NULL;
972
973 if (xt->frame_size != 1 || xt->numf <= 0 || xt->dir != DMA_MEM_TO_MEM)
974 return NULL;
975
976 desc = list_first_entry(&imxdmac->ld_free, struct imxdma_desc, node);
977
978 desc->type = IMXDMA_DESC_INTERLEAVED;
979 desc->src = xt->src_start;
980 desc->dest = xt->dst_start;
981 desc->x = xt->sgl[0].size;
982 desc->y = xt->numf;
983 desc->w = xt->sgl[0].icg + desc->x;
984 desc->len = desc->x * desc->y;
985 desc->direction = DMA_MEM_TO_MEM;
986 desc->config_port = IMX_DMA_MEMSIZE_32;
987 desc->config_mem = IMX_DMA_MEMSIZE_32;
988 if (xt->src_sgl)
989 desc->config_mem |= IMX_DMA_TYPE_2D;
990 if (xt->dst_sgl)
991 desc->config_port |= IMX_DMA_TYPE_2D;
992 desc->desc.callback = NULL;
993 desc->desc.callback_param = NULL;
994
995 return &desc->desc;
996}
997
998static void imxdma_issue_pending(struct dma_chan *chan)
999{
1000 struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
1001 struct imxdma_engine *imxdma = imxdmac->imxdma;
1002 struct imxdma_desc *desc;
1003 unsigned long flags;
1004
1005 spin_lock_irqsave(&imxdma->lock, flags);
1006 if (list_empty(&imxdmac->ld_active) &&
1007 !list_empty(&imxdmac->ld_queue)) {
1008 desc = list_first_entry(&imxdmac->ld_queue,
1009 struct imxdma_desc, node);
1010
1011 if (imxdma_xfer_desc(desc) < 0) {
1012 dev_warn(imxdma->dev,
1013 "%s: channel: %d couldn't issue DMA xfer\n",
1014 __func__, imxdmac->channel);
1015 } else {
1016 list_move_tail(imxdmac->ld_queue.next,
1017 &imxdmac->ld_active);
1018 }
1019 }
1020 spin_unlock_irqrestore(&imxdma->lock, flags);
1021}
1022
1023static bool imxdma_filter_fn(struct dma_chan *chan, void *param)
1024{
1025 struct imxdma_filter_data *fdata = param;
1026 struct imxdma_channel *imxdma_chan = to_imxdma_chan(chan);
1027
1028 if (chan->device->dev != fdata->imxdma->dev)
1029 return false;
1030
1031 imxdma_chan->dma_request = fdata->request;
1032 chan->private = NULL;
1033
1034 return true;
1035}
1036
1037static struct dma_chan *imxdma_xlate(struct of_phandle_args *dma_spec,
1038 struct of_dma *ofdma)
1039{
1040 int count = dma_spec->args_count;
1041 struct imxdma_engine *imxdma = ofdma->of_dma_data;
1042 struct imxdma_filter_data fdata = {
1043 .imxdma = imxdma,
1044 };
1045
1046 if (count != 1)
1047 return NULL;
1048
1049 fdata.request = dma_spec->args[0];
1050
1051 return dma_request_channel(imxdma->dma_device.cap_mask,
1052 imxdma_filter_fn, &fdata);
1053}
1054
1055static int __init imxdma_probe(struct platform_device *pdev)
1056{
1057 struct imxdma_engine *imxdma;
1058 struct resource *res;
1059 const struct of_device_id *of_id;
1060 int ret, i;
1061 int irq, irq_err;
1062
1063 of_id = of_match_device(imx_dma_of_dev_id, &pdev->dev);
1064 if (of_id)
1065 pdev->id_entry = of_id->data;
1066
1067 imxdma = devm_kzalloc(&pdev->dev, sizeof(*imxdma), GFP_KERNEL);
1068 if (!imxdma)
1069 return -ENOMEM;
1070
1071 imxdma->dev = &pdev->dev;
1072 imxdma->devtype = pdev->id_entry->driver_data;
1073
1074 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1075 imxdma->base = devm_ioremap_resource(&pdev->dev, res);
1076 if (IS_ERR(imxdma->base))
1077 return PTR_ERR(imxdma->base);
1078
1079 irq = platform_get_irq(pdev, 0);
1080 if (irq < 0)
1081 return irq;
1082
1083 imxdma->dma_ipg = devm_clk_get(&pdev->dev, "ipg");
1084 if (IS_ERR(imxdma->dma_ipg))
1085 return PTR_ERR(imxdma->dma_ipg);
1086
1087 imxdma->dma_ahb = devm_clk_get(&pdev->dev, "ahb");
1088 if (IS_ERR(imxdma->dma_ahb))
1089 return PTR_ERR(imxdma->dma_ahb);
1090
1091 ret = clk_prepare_enable(imxdma->dma_ipg);
1092 if (ret)
1093 return ret;
1094 ret = clk_prepare_enable(imxdma->dma_ahb);
1095 if (ret)
1096 goto disable_dma_ipg_clk;
1097
1098
1099 imx_dmav1_writel(imxdma, DCR_DRST, DMA_DCR);
1100
1101 if (is_imx1_dma(imxdma)) {
1102 ret = devm_request_irq(&pdev->dev, irq,
1103 dma_irq_handler, 0, "DMA", imxdma);
1104 if (ret) {
1105 dev_warn(imxdma->dev, "Can't register IRQ for DMA\n");
1106 goto disable_dma_ahb_clk;
1107 }
1108 imxdma->irq = irq;
1109
1110 irq_err = platform_get_irq(pdev, 1);
1111 if (irq_err < 0) {
1112 ret = irq_err;
1113 goto disable_dma_ahb_clk;
1114 }
1115
1116 ret = devm_request_irq(&pdev->dev, irq_err,
1117 imxdma_err_handler, 0, "DMA", imxdma);
1118 if (ret) {
1119 dev_warn(imxdma->dev, "Can't register ERRIRQ for DMA\n");
1120 goto disable_dma_ahb_clk;
1121 }
1122 imxdma->irq_err = irq_err;
1123 }
1124
1125
1126 imx_dmav1_writel(imxdma, DCR_DEN, DMA_DCR);
1127
1128
1129 imx_dmav1_writel(imxdma, (1 << IMX_DMA_CHANNELS) - 1, DMA_DISR);
1130
1131
1132 imx_dmav1_writel(imxdma, (1 << IMX_DMA_CHANNELS) - 1, DMA_DIMR);
1133
1134 INIT_LIST_HEAD(&imxdma->dma_device.channels);
1135
1136 dma_cap_set(DMA_SLAVE, imxdma->dma_device.cap_mask);
1137 dma_cap_set(DMA_CYCLIC, imxdma->dma_device.cap_mask);
1138 dma_cap_set(DMA_MEMCPY, imxdma->dma_device.cap_mask);
1139 dma_cap_set(DMA_INTERLEAVE, imxdma->dma_device.cap_mask);
1140
1141
1142 for (i = 0; i < IMX_DMA_2D_SLOTS; i++)
1143 imxdma->slots_2d[i].count = 0;
1144
1145 spin_lock_init(&imxdma->lock);
1146
1147
1148 for (i = 0; i < IMX_DMA_CHANNELS; i++) {
1149 struct imxdma_channel *imxdmac = &imxdma->channel[i];
1150
1151 if (!is_imx1_dma(imxdma)) {
1152 ret = devm_request_irq(&pdev->dev, irq + i,
1153 dma_irq_handler, 0, "DMA", imxdma);
1154 if (ret) {
1155 dev_warn(imxdma->dev, "Can't register IRQ %d "
1156 "for DMA channel %d\n",
1157 irq + i, i);
1158 goto disable_dma_ahb_clk;
1159 }
1160
1161 imxdmac->irq = irq + i;
1162 timer_setup(&imxdmac->watchdog, imxdma_watchdog, 0);
1163 }
1164
1165 imxdmac->imxdma = imxdma;
1166
1167 INIT_LIST_HEAD(&imxdmac->ld_queue);
1168 INIT_LIST_HEAD(&imxdmac->ld_free);
1169 INIT_LIST_HEAD(&imxdmac->ld_active);
1170
1171 tasklet_init(&imxdmac->dma_tasklet, imxdma_tasklet,
1172 (unsigned long)imxdmac);
1173 imxdmac->chan.device = &imxdma->dma_device;
1174 dma_cookie_init(&imxdmac->chan);
1175 imxdmac->channel = i;
1176
1177
1178 list_add_tail(&imxdmac->chan.device_node,
1179 &imxdma->dma_device.channels);
1180 }
1181
1182 imxdma->dma_device.dev = &pdev->dev;
1183
1184 imxdma->dma_device.device_alloc_chan_resources = imxdma_alloc_chan_resources;
1185 imxdma->dma_device.device_free_chan_resources = imxdma_free_chan_resources;
1186 imxdma->dma_device.device_tx_status = imxdma_tx_status;
1187 imxdma->dma_device.device_prep_slave_sg = imxdma_prep_slave_sg;
1188 imxdma->dma_device.device_prep_dma_cyclic = imxdma_prep_dma_cyclic;
1189 imxdma->dma_device.device_prep_dma_memcpy = imxdma_prep_dma_memcpy;
1190 imxdma->dma_device.device_prep_interleaved_dma = imxdma_prep_dma_interleaved;
1191 imxdma->dma_device.device_config = imxdma_config;
1192 imxdma->dma_device.device_terminate_all = imxdma_terminate_all;
1193 imxdma->dma_device.device_issue_pending = imxdma_issue_pending;
1194
1195 platform_set_drvdata(pdev, imxdma);
1196
1197 imxdma->dma_device.copy_align = DMAENGINE_ALIGN_4_BYTES;
1198 imxdma->dma_device.dev->dma_parms = &imxdma->dma_parms;
1199 dma_set_max_seg_size(imxdma->dma_device.dev, 0xffffff);
1200
1201 ret = dma_async_device_register(&imxdma->dma_device);
1202 if (ret) {
1203 dev_err(&pdev->dev, "unable to register\n");
1204 goto disable_dma_ahb_clk;
1205 }
1206
1207 if (pdev->dev.of_node) {
1208 ret = of_dma_controller_register(pdev->dev.of_node,
1209 imxdma_xlate, imxdma);
1210 if (ret) {
1211 dev_err(&pdev->dev, "unable to register of_dma_controller\n");
1212 goto err_of_dma_controller;
1213 }
1214 }
1215
1216 return 0;
1217
1218err_of_dma_controller:
1219 dma_async_device_unregister(&imxdma->dma_device);
1220disable_dma_ahb_clk:
1221 clk_disable_unprepare(imxdma->dma_ahb);
1222disable_dma_ipg_clk:
1223 clk_disable_unprepare(imxdma->dma_ipg);
1224 return ret;
1225}
1226
1227static void imxdma_free_irq(struct platform_device *pdev, struct imxdma_engine *imxdma)
1228{
1229 int i;
1230
1231 if (is_imx1_dma(imxdma)) {
1232 disable_irq(imxdma->irq);
1233 disable_irq(imxdma->irq_err);
1234 }
1235
1236 for (i = 0; i < IMX_DMA_CHANNELS; i++) {
1237 struct imxdma_channel *imxdmac = &imxdma->channel[i];
1238
1239 if (!is_imx1_dma(imxdma))
1240 disable_irq(imxdmac->irq);
1241
1242 tasklet_kill(&imxdmac->dma_tasklet);
1243 }
1244}
1245
1246static int imxdma_remove(struct platform_device *pdev)
1247{
1248 struct imxdma_engine *imxdma = platform_get_drvdata(pdev);
1249
1250 imxdma_free_irq(pdev, imxdma);
1251
1252 dma_async_device_unregister(&imxdma->dma_device);
1253
1254 if (pdev->dev.of_node)
1255 of_dma_controller_free(pdev->dev.of_node);
1256
1257 clk_disable_unprepare(imxdma->dma_ipg);
1258 clk_disable_unprepare(imxdma->dma_ahb);
1259
1260 return 0;
1261}
1262
1263static struct platform_driver imxdma_driver = {
1264 .driver = {
1265 .name = "imx-dma",
1266 .of_match_table = imx_dma_of_dev_id,
1267 },
1268 .id_table = imx_dma_devtype,
1269 .remove = imxdma_remove,
1270};
1271
1272static int __init imxdma_module_init(void)
1273{
1274 return platform_driver_probe(&imxdma_driver, imxdma_probe);
1275}
1276subsys_initcall(imxdma_module_init);
1277
1278MODULE_AUTHOR("Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>");
1279MODULE_DESCRIPTION("i.MX dma driver");
1280MODULE_LICENSE("GPL");
1281