1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23#undef DEBUG
24
25#include <linux/module.h>
26#include <linux/init.h>
27#include <linux/kernel.h>
28#include <linux/interrupt.h>
29#include <linux/errno.h>
30
31#include <asm/system.h>
32#include <asm/irq.h>
33#include <asm/hardware.h>
34#include <asm/dma.h>
35#include <asm/arch/imx-dma.h>
36
37struct imx_dma_channel imx_dma_channels[IMX_DMA_CHANNELS];
38
39
40
41
42
43
44
45
46
47
48
49static inline int imx_dma_sg_next(imx_dmach_t dma_ch, unsigned int lastcount)
50{
51 struct imx_dma_channel *imxdma = &imx_dma_channels[dma_ch];
52 unsigned int nextcount;
53 unsigned int nextaddr;
54
55 if (!imxdma->name) {
56 printk(KERN_CRIT "%s: called for not allocated channel %d\n",
57 __FUNCTION__, dma_ch);
58 return 0;
59 }
60
61 imxdma->resbytes -= lastcount;
62
63 if (!imxdma->sg) {
64 pr_debug("imxdma%d: no sg data\n", dma_ch);
65 return 0;
66 }
67
68 imxdma->sgbc += lastcount;
69 if ((imxdma->sgbc >= imxdma->sg->length) || !imxdma->resbytes) {
70 if ((imxdma->sgcount <= 1) || !imxdma->resbytes) {
71 pr_debug("imxdma%d: sg transfer limit reached\n",
72 dma_ch);
73 imxdma->sgcount=0;
74 imxdma->sg = NULL;
75 return 0;
76 } else {
77 imxdma->sgcount--;
78 imxdma->sg++;
79 imxdma->sgbc = 0;
80 }
81 }
82 nextcount = imxdma->sg->length - imxdma->sgbc;
83 nextaddr = imxdma->sg->dma_address + imxdma->sgbc;
84
85 if(imxdma->resbytes < nextcount)
86 nextcount = imxdma->resbytes;
87
88 if ((imxdma->dma_mode & DMA_MODE_MASK) == DMA_MODE_READ)
89 DAR(dma_ch) = nextaddr;
90 else
91 SAR(dma_ch) = nextaddr;
92
93 CNTR(dma_ch) = nextcount;
94 pr_debug("imxdma%d: next sg chunk dst 0x%08x, src 0x%08x, size 0x%08x\n",
95 dma_ch, DAR(dma_ch), SAR(dma_ch), CNTR(dma_ch));
96
97 return nextcount;
98}
99
100
101
102
103
104
105
106
107
108
109static int
110imx_dma_setup_sg_base(imx_dmach_t dma_ch,
111 struct scatterlist *sg, unsigned int sgcount)
112{
113 struct imx_dma_channel *imxdma = &imx_dma_channels[dma_ch];
114
115 imxdma->sg = sg;
116 imxdma->sgcount = sgcount;
117 imxdma->sgbc = 0;
118 return imx_dma_sg_next(dma_ch, 0);
119}
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138int
139imx_dma_setup_single(imx_dmach_t dma_ch, dma_addr_t dma_address,
140 unsigned int dma_length, unsigned int dev_addr,
141 dmamode_t dmamode)
142{
143 struct imx_dma_channel *imxdma = &imx_dma_channels[dma_ch];
144
145 imxdma->sg = NULL;
146 imxdma->sgcount = 0;
147 imxdma->dma_mode = dmamode;
148 imxdma->resbytes = dma_length;
149
150 if (!dma_address) {
151 printk(KERN_ERR "imxdma%d: imx_dma_setup_single null address\n",
152 dma_ch);
153 return -EINVAL;
154 }
155
156 if (!dma_length) {
157 printk(KERN_ERR "imxdma%d: imx_dma_setup_single zero length\n",
158 dma_ch);
159 return -EINVAL;
160 }
161
162 if ((dmamode & DMA_MODE_MASK) == DMA_MODE_READ) {
163 pr_debug("imxdma%d: mx_dma_setup_single2dev dma_addressg=0x%08x dma_length=%d dev_addr=0x%08x for read\n",
164 dma_ch, (unsigned int)dma_address, dma_length,
165 dev_addr);
166 SAR(dma_ch) = dev_addr;
167 DAR(dma_ch) = (unsigned int)dma_address;
168 } else if ((dmamode & DMA_MODE_MASK) == DMA_MODE_WRITE) {
169 pr_debug("imxdma%d: mx_dma_setup_single2dev dma_addressg=0x%08x dma_length=%d dev_addr=0x%08x for write\n",
170 dma_ch, (unsigned int)dma_address, dma_length,
171 dev_addr);
172 SAR(dma_ch) = (unsigned int)dma_address;
173 DAR(dma_ch) = dev_addr;
174 } else {
175 printk(KERN_ERR "imxdma%d: imx_dma_setup_single bad dmamode\n",
176 dma_ch);
177 return -EINVAL;
178 }
179
180 CNTR(dma_ch) = dma_length;
181
182 return 0;
183}
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223int
224imx_dma_setup_sg(imx_dmach_t dma_ch,
225 struct scatterlist *sg, unsigned int sgcount, unsigned int dma_length,
226 unsigned int dev_addr, dmamode_t dmamode)
227{
228 int res;
229 struct imx_dma_channel *imxdma = &imx_dma_channels[dma_ch];
230
231 imxdma->sg = NULL;
232 imxdma->sgcount = 0;
233 imxdma->dma_mode = dmamode;
234 imxdma->resbytes = dma_length;
235
236 if (!sg || !sgcount) {
237 printk(KERN_ERR "imxdma%d: imx_dma_setup_sg epty sg list\n",
238 dma_ch);
239 return -EINVAL;
240 }
241
242 if (!sg->length) {
243 printk(KERN_ERR "imxdma%d: imx_dma_setup_sg zero length\n",
244 dma_ch);
245 return -EINVAL;
246 }
247
248 if ((dmamode & DMA_MODE_MASK) == DMA_MODE_READ) {
249 pr_debug("imxdma%d: mx_dma_setup_sg2dev sg=%p sgcount=%d total length=%d dev_addr=0x%08x for read\n",
250 dma_ch, sg, sgcount, dma_length, dev_addr);
251 SAR(dma_ch) = dev_addr;
252 } else if ((dmamode & DMA_MODE_MASK) == DMA_MODE_WRITE) {
253 pr_debug("imxdma%d: mx_dma_setup_sg2dev sg=%p sgcount=%d total length=%d dev_addr=0x%08x for write\n",
254 dma_ch, sg, sgcount, dma_length, dev_addr);
255 DAR(dma_ch) = dev_addr;
256 } else {
257 printk(KERN_ERR "imxdma%d: imx_dma_setup_sg bad dmamode\n",
258 dma_ch);
259 return -EINVAL;
260 }
261
262 res = imx_dma_setup_sg_base(dma_ch, sg, sgcount);
263 if (res <= 0) {
264 printk(KERN_ERR "imxdma%d: no sg chunk ready\n", dma_ch);
265 return -EINVAL;
266 }
267
268 return 0;
269}
270
271
272
273
274
275
276
277
278
279
280int
281imx_dma_setup_handlers(imx_dmach_t dma_ch,
282 void (*irq_handler) (int, void *),
283 void (*err_handler) (int, void *, int),
284 void *data)
285{
286 struct imx_dma_channel *imxdma = &imx_dma_channels[dma_ch];
287 unsigned long flags;
288
289 if (!imxdma->name) {
290 printk(KERN_CRIT "%s: called for not allocated channel %d\n",
291 __FUNCTION__, dma_ch);
292 return -ENODEV;
293 }
294
295 local_irq_save(flags);
296 DISR = (1 << dma_ch);
297 imxdma->irq_handler = irq_handler;
298 imxdma->err_handler = err_handler;
299 imxdma->data = data;
300 local_irq_restore(flags);
301 return 0;
302}
303
304
305
306
307
308
309
310
311
312
313
314
315void imx_dma_enable(imx_dmach_t dma_ch)
316{
317 struct imx_dma_channel *imxdma = &imx_dma_channels[dma_ch];
318 unsigned long flags;
319
320 pr_debug("imxdma%d: imx_dma_enable\n", dma_ch);
321
322 if (!imxdma->name) {
323 printk(KERN_CRIT "%s: called for not allocated channel %d\n",
324 __FUNCTION__, dma_ch);
325 return;
326 }
327
328 local_irq_save(flags);
329 DISR = (1 << dma_ch);
330 DIMR &= ~(1 << dma_ch);
331 CCR(dma_ch) |= CCR_CEN;
332 local_irq_restore(flags);
333}
334
335
336
337
338
339void imx_dma_disable(imx_dmach_t dma_ch)
340{
341 unsigned long flags;
342
343 pr_debug("imxdma%d: imx_dma_disable\n", dma_ch);
344
345 local_irq_save(flags);
346 DIMR |= (1 << dma_ch);
347 CCR(dma_ch) &= ~CCR_CEN;
348 DISR = (1 << dma_ch);
349 local_irq_restore(flags);
350}
351
352
353
354
355
356
357int imx_dma_request(imx_dmach_t dma_ch, const char *name)
358{
359 struct imx_dma_channel *imxdma = &imx_dma_channels[dma_ch];
360 unsigned long flags;
361
362
363 if (!name)
364 return -EINVAL;
365
366 if (dma_ch >= IMX_DMA_CHANNELS) {
367 printk(KERN_CRIT "%s: called for non-existed channel %d\n",
368 __FUNCTION__, dma_ch);
369 return -EINVAL;
370 }
371
372 local_irq_save(flags);
373 if (imxdma->name) {
374 local_irq_restore(flags);
375 return -ENODEV;
376 }
377
378 imxdma->name = name;
379 imxdma->irq_handler = NULL;
380 imxdma->err_handler = NULL;
381 imxdma->data = NULL;
382 imxdma->sg = NULL;
383 local_irq_restore(flags);
384 return 0;
385}
386
387
388
389
390
391void imx_dma_free(imx_dmach_t dma_ch)
392{
393 unsigned long flags;
394 struct imx_dma_channel *imxdma = &imx_dma_channels[dma_ch];
395
396 if (!imxdma->name) {
397 printk(KERN_CRIT
398 "%s: trying to free channel %d which is already freed\n",
399 __FUNCTION__, dma_ch);
400 return;
401 }
402
403 local_irq_save(flags);
404
405 DIMR |= (1 << dma_ch);
406 CCR(dma_ch) &= ~CCR_CEN;
407 imxdma->name = NULL;
408 local_irq_restore(flags);
409}
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425int
426imx_dma_request_by_prio(imx_dmach_t * pdma_ch, const char *name,
427 imx_dma_prio prio)
428{
429 int i;
430 int best;
431
432 switch (prio) {
433 case (DMA_PRIO_HIGH):
434 best = 8;
435 break;
436 case (DMA_PRIO_MEDIUM):
437 best = 4;
438 break;
439 case (DMA_PRIO_LOW):
440 default:
441 best = 0;
442 break;
443 }
444
445 for (i = best; i < IMX_DMA_CHANNELS; i++) {
446 if (!imx_dma_request(i, name)) {
447 *pdma_ch = i;
448 return 0;
449 }
450 }
451
452 for (i = best - 1; i >= 0; i--) {
453 if (!imx_dma_request(i, name)) {
454 *pdma_ch = i;
455 return 0;
456 }
457 }
458
459 printk(KERN_ERR "%s: no free DMA channel found\n", __FUNCTION__);
460
461 return -ENODEV;
462}
463
464static irqreturn_t dma_err_handler(int irq, void *dev_id)
465{
466 int i, disr = DISR;
467 struct imx_dma_channel *channel;
468 unsigned int err_mask = DBTOSR | DRTOSR | DSESR | DBOSR;
469 int errcode;
470
471 DISR = disr & err_mask;
472 for (i = 0; i < IMX_DMA_CHANNELS; i++) {
473 if(!(err_mask & (1 << i)))
474 continue;
475 channel = &imx_dma_channels[i];
476 errcode = 0;
477
478 if (DBTOSR & (1 << i)) {
479 DBTOSR = (1 << i);
480 errcode |= IMX_DMA_ERR_BURST;
481 }
482 if (DRTOSR & (1 << i)) {
483 DRTOSR = (1 << i);
484 errcode |= IMX_DMA_ERR_REQUEST;
485 }
486 if (DSESR & (1 << i)) {
487 DSESR = (1 << i);
488 errcode |= IMX_DMA_ERR_TRANSFER;
489 }
490 if (DBOSR & (1 << i)) {
491 DBOSR = (1 << i);
492 errcode |= IMX_DMA_ERR_BUFFER;
493 }
494
495
496
497
498
499
500
501
502 if (channel->name && channel->err_handler) {
503 channel->err_handler(i, channel->data, errcode);
504 continue;
505 }
506
507 imx_dma_channels[i].sg = NULL;
508
509 printk(KERN_WARNING
510 "DMA timeout on channel %d (%s) -%s%s%s%s\n",
511 i, channel->name,
512 errcode&IMX_DMA_ERR_BURST? " burst":"",
513 errcode&IMX_DMA_ERR_REQUEST? " request":"",
514 errcode&IMX_DMA_ERR_TRANSFER? " transfer":"",
515 errcode&IMX_DMA_ERR_BUFFER? " buffer":"");
516 }
517 return IRQ_HANDLED;
518}
519
520static irqreturn_t dma_irq_handler(int irq, void *dev_id)
521{
522 int i, disr = DISR;
523
524 pr_debug("imxdma: dma_irq_handler called, disr=0x%08x\n",
525 disr);
526
527 DISR = disr;
528 for (i = 0; i < IMX_DMA_CHANNELS; i++) {
529 if (disr & (1 << i)) {
530 struct imx_dma_channel *channel = &imx_dma_channels[i];
531 if (channel->name) {
532 if (imx_dma_sg_next(i, CNTR(i))) {
533 CCR(i) &= ~CCR_CEN;
534 mb();
535 CCR(i) |= CCR_CEN;
536 } else {
537 if (channel->irq_handler)
538 channel->irq_handler(i,
539 channel->data);
540 }
541 } else {
542
543
544
545
546 printk(KERN_WARNING
547 "spurious IRQ for DMA channel %d\n", i);
548 }
549 }
550 }
551 return IRQ_HANDLED;
552}
553
554static int __init imx_dma_init(void)
555{
556 int ret;
557 int i;
558
559
560 DCR = DCR_DRST;
561
562 ret = request_irq(DMA_INT, dma_irq_handler, 0, "DMA", NULL);
563 if (ret) {
564 printk(KERN_CRIT "Wow! Can't register IRQ for DMA\n");
565 return ret;
566 }
567
568 ret = request_irq(DMA_ERR, dma_err_handler, 0, "DMA", NULL);
569 if (ret) {
570 printk(KERN_CRIT "Wow! Can't register ERRIRQ for DMA\n");
571 free_irq(DMA_INT, NULL);
572 }
573
574
575 DCR = DCR_DEN;
576
577
578 DISR = (1 << IMX_DMA_CHANNELS) - 1;
579
580
581 DIMR = (1 << IMX_DMA_CHANNELS) - 1;
582
583 for (i = 0; i < IMX_DMA_CHANNELS; i++) {
584 imx_dma_channels[i].sg = NULL;
585 imx_dma_channels[i].dma_num = i;
586 }
587
588 return ret;
589}
590
591arch_initcall(imx_dma_init);
592
593EXPORT_SYMBOL(imx_dma_setup_single);
594EXPORT_SYMBOL(imx_dma_setup_sg);
595EXPORT_SYMBOL(imx_dma_setup_handlers);
596EXPORT_SYMBOL(imx_dma_enable);
597EXPORT_SYMBOL(imx_dma_disable);
598EXPORT_SYMBOL(imx_dma_request);
599EXPORT_SYMBOL(imx_dma_free);
600EXPORT_SYMBOL(imx_dma_request_by_prio);
601EXPORT_SYMBOL(imx_dma_channels);
602