1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36#include <linux/module.h>
37#include <linux/dmaengine.h>
38#include <linux/dma-mapping.h>
39#include <linux/interrupt.h>
40#include <linux/io.h>
41#include <linux/slab.h>
42#include <linux/of_device.h>
43#include <linux/of_platform.h>
44
45#include <linux/random.h>
46
47
48#define MPC_DMA_DESCRIPTORS 64
49
50
51#define MPC_DMA_CHANNELS 64
52#define MPC_DMA_TCD_OFFSET 0x1000
53
54
55#define MPC_DMA_DMACR_EDCG (1 << 31)
56#define MPC_DMA_DMACR_ERGA (1 << 3)
57#define MPC_DMA_DMACR_ERCA (1 << 2)
58
59
60#define MPC_DMA_DMAES_VLD (1 << 31)
61#define MPC_DMA_DMAES_GPE (1 << 15)
62#define MPC_DMA_DMAES_CPE (1 << 14)
63#define MPC_DMA_DMAES_ERRCHN(err) \
64 (((err) >> 8) & 0x3f)
65#define MPC_DMA_DMAES_SAE (1 << 7)
66#define MPC_DMA_DMAES_SOE (1 << 6)
67#define MPC_DMA_DMAES_DAE (1 << 5)
68#define MPC_DMA_DMAES_DOE (1 << 4)
69#define MPC_DMA_DMAES_NCE (1 << 3)
70#define MPC_DMA_DMAES_SGE (1 << 2)
71#define MPC_DMA_DMAES_SBE (1 << 1)
72#define MPC_DMA_DMAES_DBE (1 << 0)
73
74#define MPC_DMA_DMAGPOR_SNOOP_ENABLE (1 << 6)
75
76#define MPC_DMA_TSIZE_1 0x00
77#define MPC_DMA_TSIZE_2 0x01
78#define MPC_DMA_TSIZE_4 0x02
79#define MPC_DMA_TSIZE_16 0x04
80#define MPC_DMA_TSIZE_32 0x05
81
82
83struct __attribute__ ((__packed__)) mpc_dma_regs {
84
85 u32 dmacr;
86 u32 dmaes;
87
88 u32 dmaerqh;
89 u32 dmaerql;
90 u32 dmaeeih;
91 u32 dmaeeil;
92
93 u8 dmaserq;
94 u8 dmacerq;
95 u8 dmaseei;
96 u8 dmaceei;
97
98 u8 dmacint;
99 u8 dmacerr;
100 u8 dmassrt;
101 u8 dmacdne;
102
103 u32 dmainth;
104 u32 dmaintl;
105 u32 dmaerrh;
106 u32 dmaerrl;
107
108 u32 dmahrsh;
109 u32 dmahrsl;
110 union {
111 u32 dmaihsa;
112 u32 dmagpor;
113 };
114 u32 dmailsa;
115
116 u32 reserve0[48];
117
118 u8 dchpri[MPC_DMA_CHANNELS];
119
120};
121
122struct __attribute__ ((__packed__)) mpc_dma_tcd {
123
124 u32 saddr;
125
126 u32 smod:5;
127 u32 ssize:3;
128 u32 dmod:5;
129 u32 dsize:3;
130 u32 soff:16;
131
132
133 u32 nbytes;
134 u32 slast;
135 u32 daddr;
136
137
138 u32 citer_elink:1;
139
140
141 u32 citer_linkch:6;
142 u32 citer:9;
143 u32 doff:16;
144
145
146 u32 dlast_sga;
147
148
149
150
151 u32 biter_elink:1;
152
153
154 u32 biter_linkch:6;
155 u32 biter:9;
156 u32 bwc:2;
157 u32 major_linkch:6;
158 u32 done:1;
159 u32 active:1;
160 u32 major_elink:1;
161
162
163 u32 e_sg:1;
164 u32 d_req:1;
165 u32 int_half:1;
166
167
168 u32 int_maj:1;
169
170
171 u32 start:1;
172};
173
174struct mpc_dma_desc {
175 struct dma_async_tx_descriptor desc;
176 struct mpc_dma_tcd *tcd;
177 dma_addr_t tcd_paddr;
178 int error;
179 struct list_head node;
180};
181
182struct mpc_dma_chan {
183 struct dma_chan chan;
184 struct list_head free;
185 struct list_head prepared;
186 struct list_head queued;
187 struct list_head active;
188 struct list_head completed;
189 struct mpc_dma_tcd *tcd;
190 dma_addr_t tcd_paddr;
191 dma_cookie_t completed_cookie;
192
193
194 spinlock_t lock;
195};
196
197struct mpc_dma {
198 struct dma_device dma;
199 struct tasklet_struct tasklet;
200 struct mpc_dma_chan channels[MPC_DMA_CHANNELS];
201 struct mpc_dma_regs __iomem *regs;
202 struct mpc_dma_tcd __iomem *tcd;
203 int irq;
204 int irq2;
205 uint error_status;
206 int is_mpc8308;
207
208
209 spinlock_t error_status_lock;
210};
211
212#define DRV_NAME "mpc512x_dma"
213
214
215static inline struct mpc_dma_chan *dma_chan_to_mpc_dma_chan(struct dma_chan *c)
216{
217 return container_of(c, struct mpc_dma_chan, chan);
218}
219
220
221static inline struct mpc_dma *dma_chan_to_mpc_dma(struct dma_chan *c)
222{
223 struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(c);
224 return container_of(mchan, struct mpc_dma, channels[c->chan_id]);
225}
226
227
228
229
230
231
232
233
234
235static void mpc_dma_execute(struct mpc_dma_chan *mchan)
236{
237 struct mpc_dma *mdma = dma_chan_to_mpc_dma(&mchan->chan);
238 struct mpc_dma_desc *first = NULL;
239 struct mpc_dma_desc *prev = NULL;
240 struct mpc_dma_desc *mdesc;
241 int cid = mchan->chan.chan_id;
242
243
244 list_splice_tail_init(&mchan->queued, &mchan->active);
245
246
247 list_for_each_entry(mdesc, &mchan->active, node) {
248 if (!first)
249 first = mdesc;
250
251 if (!prev) {
252 prev = mdesc;
253 continue;
254 }
255
256 prev->tcd->dlast_sga = mdesc->tcd_paddr;
257 prev->tcd->e_sg = 1;
258 mdesc->tcd->start = 1;
259
260 prev = mdesc;
261 }
262
263 prev->tcd->int_maj = 1;
264
265
266 memcpy_toio(&mdma->tcd[cid], first->tcd, sizeof(struct mpc_dma_tcd));
267
268 if (first != prev)
269 mdma->tcd[cid].e_sg = 1;
270 out_8(&mdma->regs->dmassrt, cid);
271}
272
273
274static void mpc_dma_irq_process(struct mpc_dma *mdma, u32 is, u32 es, int off)
275{
276 struct mpc_dma_chan *mchan;
277 struct mpc_dma_desc *mdesc;
278 u32 status = is | es;
279 int ch;
280
281 while ((ch = fls(status) - 1) >= 0) {
282 status &= ~(1 << ch);
283 mchan = &mdma->channels[ch + off];
284
285 spin_lock(&mchan->lock);
286
287 out_8(&mdma->regs->dmacint, ch + off);
288 out_8(&mdma->regs->dmacerr, ch + off);
289
290
291 if (es & (1 << ch))
292 list_for_each_entry(mdesc, &mchan->active, node)
293 mdesc->error = -EIO;
294
295
296 list_splice_tail_init(&mchan->active, &mchan->completed);
297 if (!list_empty(&mchan->queued))
298 mpc_dma_execute(mchan);
299
300 spin_unlock(&mchan->lock);
301 }
302}
303
304
305static irqreturn_t mpc_dma_irq(int irq, void *data)
306{
307 struct mpc_dma *mdma = data;
308 uint es;
309
310
311 es = in_be32(&mdma->regs->dmaes);
312 spin_lock(&mdma->error_status_lock);
313 if ((es & MPC_DMA_DMAES_VLD) && mdma->error_status == 0)
314 mdma->error_status = es;
315 spin_unlock(&mdma->error_status_lock);
316
317
318 if (mdma->dma.chancnt > 32) {
319 mpc_dma_irq_process(mdma, in_be32(&mdma->regs->dmainth),
320 in_be32(&mdma->regs->dmaerrh), 32);
321 }
322 mpc_dma_irq_process(mdma, in_be32(&mdma->regs->dmaintl),
323 in_be32(&mdma->regs->dmaerrl), 0);
324
325
326 tasklet_schedule(&mdma->tasklet);
327
328 return IRQ_HANDLED;
329}
330
331
332static void mpc_dma_process_completed(struct mpc_dma *mdma)
333{
334 dma_cookie_t last_cookie = 0;
335 struct mpc_dma_chan *mchan;
336 struct mpc_dma_desc *mdesc;
337 struct dma_async_tx_descriptor *desc;
338 unsigned long flags;
339 LIST_HEAD(list);
340 int i;
341
342 for (i = 0; i < mdma->dma.chancnt; i++) {
343 mchan = &mdma->channels[i];
344
345
346 spin_lock_irqsave(&mchan->lock, flags);
347 if (!list_empty(&mchan->completed))
348 list_splice_tail_init(&mchan->completed, &list);
349 spin_unlock_irqrestore(&mchan->lock, flags);
350
351 if (list_empty(&list))
352 continue;
353
354
355 list_for_each_entry(mdesc, &list, node) {
356 desc = &mdesc->desc;
357
358 if (desc->callback)
359 desc->callback(desc->callback_param);
360
361 last_cookie = desc->cookie;
362 dma_run_dependencies(desc);
363 }
364
365
366 spin_lock_irqsave(&mchan->lock, flags);
367 list_splice_tail_init(&list, &mchan->free);
368 mchan->completed_cookie = last_cookie;
369 spin_unlock_irqrestore(&mchan->lock, flags);
370 }
371}
372
373
374static void mpc_dma_tasklet(unsigned long data)
375{
376 struct mpc_dma *mdma = (void *)data;
377 unsigned long flags;
378 uint es;
379
380 spin_lock_irqsave(&mdma->error_status_lock, flags);
381 es = mdma->error_status;
382 mdma->error_status = 0;
383 spin_unlock_irqrestore(&mdma->error_status_lock, flags);
384
385
386 if (es) {
387 dev_err(mdma->dma.dev,
388 "Hardware reported following error(s) on channel %u:\n",
389 MPC_DMA_DMAES_ERRCHN(es));
390
391 if (es & MPC_DMA_DMAES_GPE)
392 dev_err(mdma->dma.dev, "- Group Priority Error\n");
393 if (es & MPC_DMA_DMAES_CPE)
394 dev_err(mdma->dma.dev, "- Channel Priority Error\n");
395 if (es & MPC_DMA_DMAES_SAE)
396 dev_err(mdma->dma.dev, "- Source Address Error\n");
397 if (es & MPC_DMA_DMAES_SOE)
398 dev_err(mdma->dma.dev, "- Source Offset"
399 " Configuration Error\n");
400 if (es & MPC_DMA_DMAES_DAE)
401 dev_err(mdma->dma.dev, "- Destination Address"
402 " Error\n");
403 if (es & MPC_DMA_DMAES_DOE)
404 dev_err(mdma->dma.dev, "- Destination Offset"
405 " Configuration Error\n");
406 if (es & MPC_DMA_DMAES_NCE)
407 dev_err(mdma->dma.dev, "- NBytes/Citter"
408 " Configuration Error\n");
409 if (es & MPC_DMA_DMAES_SGE)
410 dev_err(mdma->dma.dev, "- Scatter/Gather"
411 " Configuration Error\n");
412 if (es & MPC_DMA_DMAES_SBE)
413 dev_err(mdma->dma.dev, "- Source Bus Error\n");
414 if (es & MPC_DMA_DMAES_DBE)
415 dev_err(mdma->dma.dev, "- Destination Bus Error\n");
416 }
417
418 mpc_dma_process_completed(mdma);
419}
420
421
422static dma_cookie_t mpc_dma_tx_submit(struct dma_async_tx_descriptor *txd)
423{
424 struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(txd->chan);
425 struct mpc_dma_desc *mdesc;
426 unsigned long flags;
427 dma_cookie_t cookie;
428
429 mdesc = container_of(txd, struct mpc_dma_desc, desc);
430
431 spin_lock_irqsave(&mchan->lock, flags);
432
433
434 list_move_tail(&mdesc->node, &mchan->queued);
435
436
437 if (list_empty(&mchan->active))
438 mpc_dma_execute(mchan);
439
440
441 cookie = mchan->chan.cookie + 1;
442 if (cookie <= 0)
443 cookie = 1;
444
445 mchan->chan.cookie = cookie;
446 mdesc->desc.cookie = cookie;
447
448 spin_unlock_irqrestore(&mchan->lock, flags);
449
450 return cookie;
451}
452
453
454static int mpc_dma_alloc_chan_resources(struct dma_chan *chan)
455{
456 struct mpc_dma *mdma = dma_chan_to_mpc_dma(chan);
457 struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(chan);
458 struct mpc_dma_desc *mdesc;
459 struct mpc_dma_tcd *tcd;
460 dma_addr_t tcd_paddr;
461 unsigned long flags;
462 LIST_HEAD(descs);
463 int i;
464
465
466 tcd = dma_alloc_coherent(mdma->dma.dev,
467 MPC_DMA_DESCRIPTORS * sizeof(struct mpc_dma_tcd),
468 &tcd_paddr, GFP_KERNEL);
469 if (!tcd)
470 return -ENOMEM;
471
472
473 for (i = 0; i < MPC_DMA_DESCRIPTORS; i++) {
474 mdesc = kzalloc(sizeof(struct mpc_dma_desc), GFP_KERNEL);
475 if (!mdesc) {
476 dev_notice(mdma->dma.dev, "Memory allocation error. "
477 "Allocated only %u descriptors\n", i);
478 break;
479 }
480
481 dma_async_tx_descriptor_init(&mdesc->desc, chan);
482 mdesc->desc.flags = DMA_CTRL_ACK;
483 mdesc->desc.tx_submit = mpc_dma_tx_submit;
484
485 mdesc->tcd = &tcd[i];
486 mdesc->tcd_paddr = tcd_paddr + (i * sizeof(struct mpc_dma_tcd));
487
488 list_add_tail(&mdesc->node, &descs);
489 }
490
491
492 if (i == 0) {
493 dma_free_coherent(mdma->dma.dev,
494 MPC_DMA_DESCRIPTORS * sizeof(struct mpc_dma_tcd),
495 tcd, tcd_paddr);
496 return -ENOMEM;
497 }
498
499 spin_lock_irqsave(&mchan->lock, flags);
500 mchan->tcd = tcd;
501 mchan->tcd_paddr = tcd_paddr;
502 list_splice_tail_init(&descs, &mchan->free);
503 spin_unlock_irqrestore(&mchan->lock, flags);
504
505
506 out_8(&mdma->regs->dmaseei, chan->chan_id);
507
508 return 0;
509}
510
511
512static void mpc_dma_free_chan_resources(struct dma_chan *chan)
513{
514 struct mpc_dma *mdma = dma_chan_to_mpc_dma(chan);
515 struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(chan);
516 struct mpc_dma_desc *mdesc, *tmp;
517 struct mpc_dma_tcd *tcd;
518 dma_addr_t tcd_paddr;
519 unsigned long flags;
520 LIST_HEAD(descs);
521
522 spin_lock_irqsave(&mchan->lock, flags);
523
524
525 BUG_ON(!list_empty(&mchan->prepared));
526 BUG_ON(!list_empty(&mchan->queued));
527 BUG_ON(!list_empty(&mchan->active));
528 BUG_ON(!list_empty(&mchan->completed));
529
530
531 list_splice_tail_init(&mchan->free, &descs);
532 tcd = mchan->tcd;
533 tcd_paddr = mchan->tcd_paddr;
534
535 spin_unlock_irqrestore(&mchan->lock, flags);
536
537
538 dma_free_coherent(mdma->dma.dev,
539 MPC_DMA_DESCRIPTORS * sizeof(struct mpc_dma_tcd),
540 tcd, tcd_paddr);
541
542
543 list_for_each_entry_safe(mdesc, tmp, &descs, node)
544 kfree(mdesc);
545
546
547 out_8(&mdma->regs->dmaceei, chan->chan_id);
548}
549
550
551static void mpc_dma_issue_pending(struct dma_chan *chan)
552{
553
554
555
556
557}
558
559
560static enum dma_status
561mpc_dma_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
562 struct dma_tx_state *txstate)
563{
564 struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(chan);
565 unsigned long flags;
566 dma_cookie_t last_used;
567 dma_cookie_t last_complete;
568
569 spin_lock_irqsave(&mchan->lock, flags);
570 last_used = mchan->chan.cookie;
571 last_complete = mchan->completed_cookie;
572 spin_unlock_irqrestore(&mchan->lock, flags);
573
574 dma_set_tx_state(txstate, last_complete, last_used, 0);
575 return dma_async_is_complete(cookie, last_complete, last_used);
576}
577
578
579static struct dma_async_tx_descriptor *
580mpc_dma_prep_memcpy(struct dma_chan *chan, dma_addr_t dst, dma_addr_t src,
581 size_t len, unsigned long flags)
582{
583 struct mpc_dma *mdma = dma_chan_to_mpc_dma(chan);
584 struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(chan);
585 struct mpc_dma_desc *mdesc = NULL;
586 struct mpc_dma_tcd *tcd;
587 unsigned long iflags;
588
589
590 spin_lock_irqsave(&mchan->lock, iflags);
591 if (!list_empty(&mchan->free)) {
592 mdesc = list_first_entry(&mchan->free, struct mpc_dma_desc,
593 node);
594 list_del(&mdesc->node);
595 }
596 spin_unlock_irqrestore(&mchan->lock, iflags);
597
598 if (!mdesc) {
599
600 mpc_dma_process_completed(mdma);
601 return NULL;
602 }
603
604 mdesc->error = 0;
605 tcd = mdesc->tcd;
606
607
608 memset(tcd, 0, sizeof(struct mpc_dma_tcd));
609
610 if (IS_ALIGNED(src | dst | len, 32)) {
611 tcd->ssize = MPC_DMA_TSIZE_32;
612 tcd->dsize = MPC_DMA_TSIZE_32;
613 tcd->soff = 32;
614 tcd->doff = 32;
615 } else if (!mdma->is_mpc8308 && IS_ALIGNED(src | dst | len, 16)) {
616
617 tcd->ssize = MPC_DMA_TSIZE_16;
618 tcd->dsize = MPC_DMA_TSIZE_16;
619 tcd->soff = 16;
620 tcd->doff = 16;
621 } else if (IS_ALIGNED(src | dst | len, 4)) {
622 tcd->ssize = MPC_DMA_TSIZE_4;
623 tcd->dsize = MPC_DMA_TSIZE_4;
624 tcd->soff = 4;
625 tcd->doff = 4;
626 } else if (IS_ALIGNED(src | dst | len, 2)) {
627 tcd->ssize = MPC_DMA_TSIZE_2;
628 tcd->dsize = MPC_DMA_TSIZE_2;
629 tcd->soff = 2;
630 tcd->doff = 2;
631 } else {
632 tcd->ssize = MPC_DMA_TSIZE_1;
633 tcd->dsize = MPC_DMA_TSIZE_1;
634 tcd->soff = 1;
635 tcd->doff = 1;
636 }
637
638 tcd->saddr = src;
639 tcd->daddr = dst;
640 tcd->nbytes = len;
641 tcd->biter = 1;
642 tcd->citer = 1;
643
644
645 spin_lock_irqsave(&mchan->lock, iflags);
646 list_add_tail(&mdesc->node, &mchan->prepared);
647 spin_unlock_irqrestore(&mchan->lock, iflags);
648
649 return &mdesc->desc;
650}
651
652static int __devinit mpc_dma_probe(struct platform_device *op,
653 const struct of_device_id *match)
654{
655 struct device_node *dn = op->dev.of_node;
656 struct device *dev = &op->dev;
657 struct dma_device *dma;
658 struct mpc_dma *mdma;
659 struct mpc_dma_chan *mchan;
660 struct resource res;
661 ulong regs_start, regs_size;
662 int retval, i;
663
664 mdma = devm_kzalloc(dev, sizeof(struct mpc_dma), GFP_KERNEL);
665 if (!mdma) {
666 dev_err(dev, "Memory exhausted!\n");
667 return -ENOMEM;
668 }
669
670 mdma->irq = irq_of_parse_and_map(dn, 0);
671 if (mdma->irq == NO_IRQ) {
672 dev_err(dev, "Error mapping IRQ!\n");
673 return -EINVAL;
674 }
675
676 if (of_device_is_compatible(dn, "fsl,mpc8308-dma")) {
677 mdma->is_mpc8308 = 1;
678 mdma->irq2 = irq_of_parse_and_map(dn, 1);
679 if (mdma->irq2 == NO_IRQ) {
680 dev_err(dev, "Error mapping IRQ!\n");
681 return -EINVAL;
682 }
683 }
684
685 retval = of_address_to_resource(dn, 0, &res);
686 if (retval) {
687 dev_err(dev, "Error parsing memory region!\n");
688 return retval;
689 }
690
691 regs_start = res.start;
692 regs_size = resource_size(&res);
693
694 if (!devm_request_mem_region(dev, regs_start, regs_size, DRV_NAME)) {
695 dev_err(dev, "Error requesting memory region!\n");
696 return -EBUSY;
697 }
698
699 mdma->regs = devm_ioremap(dev, regs_start, regs_size);
700 if (!mdma->regs) {
701 dev_err(dev, "Error mapping memory region!\n");
702 return -ENOMEM;
703 }
704
705 mdma->tcd = (struct mpc_dma_tcd *)((u8 *)(mdma->regs)
706 + MPC_DMA_TCD_OFFSET);
707
708 retval = devm_request_irq(dev, mdma->irq, &mpc_dma_irq, 0, DRV_NAME,
709 mdma);
710 if (retval) {
711 dev_err(dev, "Error requesting IRQ!\n");
712 return -EINVAL;
713 }
714
715 if (mdma->is_mpc8308) {
716 retval = devm_request_irq(dev, mdma->irq2, &mpc_dma_irq, 0,
717 DRV_NAME, mdma);
718 if (retval) {
719 dev_err(dev, "Error requesting IRQ2!\n");
720 return -EINVAL;
721 }
722 }
723
724 spin_lock_init(&mdma->error_status_lock);
725
726 dma = &mdma->dma;
727 dma->dev = dev;
728 if (!mdma->is_mpc8308)
729 dma->chancnt = MPC_DMA_CHANNELS;
730 else
731 dma->chancnt = 16;
732 dma->device_alloc_chan_resources = mpc_dma_alloc_chan_resources;
733 dma->device_free_chan_resources = mpc_dma_free_chan_resources;
734 dma->device_issue_pending = mpc_dma_issue_pending;
735 dma->device_tx_status = mpc_dma_tx_status;
736 dma->device_prep_dma_memcpy = mpc_dma_prep_memcpy;
737
738 INIT_LIST_HEAD(&dma->channels);
739 dma_cap_set(DMA_MEMCPY, dma->cap_mask);
740
741 for (i = 0; i < dma->chancnt; i++) {
742 mchan = &mdma->channels[i];
743
744 mchan->chan.device = dma;
745 mchan->chan.chan_id = i;
746 mchan->chan.cookie = 1;
747 mchan->completed_cookie = mchan->chan.cookie;
748
749 INIT_LIST_HEAD(&mchan->free);
750 INIT_LIST_HEAD(&mchan->prepared);
751 INIT_LIST_HEAD(&mchan->queued);
752 INIT_LIST_HEAD(&mchan->active);
753 INIT_LIST_HEAD(&mchan->completed);
754
755 spin_lock_init(&mchan->lock);
756 list_add_tail(&mchan->chan.device_node, &dma->channels);
757 }
758
759 tasklet_init(&mdma->tasklet, mpc_dma_tasklet, (unsigned long)mdma);
760
761
762
763
764
765
766
767 if (!mdma->is_mpc8308) {
768 out_be32(&mdma->regs->dmacr, MPC_DMA_DMACR_EDCG |
769 MPC_DMA_DMACR_ERGA | MPC_DMA_DMACR_ERCA);
770
771
772 out_be32(&mdma->regs->dmaerqh, 0);
773 out_be32(&mdma->regs->dmaerql, 0);
774
775
776 out_be32(&mdma->regs->dmaeeih, 0);
777 out_be32(&mdma->regs->dmaeeil, 0);
778
779
780 out_be32(&mdma->regs->dmainth, 0xFFFFFFFF);
781 out_be32(&mdma->regs->dmaintl, 0xFFFFFFFF);
782 out_be32(&mdma->regs->dmaerrh, 0xFFFFFFFF);
783 out_be32(&mdma->regs->dmaerrl, 0xFFFFFFFF);
784
785
786 out_be32(&mdma->regs->dmaihsa, 0);
787 out_be32(&mdma->regs->dmailsa, 0);
788 } else {
789
790 out_be32(&mdma->regs->dmacr, MPC_DMA_DMACR_ERCA);
791
792
793 out_be32(&mdma->regs->dmagpor, MPC_DMA_DMAGPOR_SNOOP_ENABLE);
794
795 out_be32(&mdma->regs->dmaeeil, 0);
796
797
798 out_be32(&mdma->regs->dmaintl, 0xFFFF);
799 out_be32(&mdma->regs->dmaerrl, 0xFFFF);
800 }
801
802
803 dev_set_drvdata(dev, mdma);
804 retval = dma_async_device_register(dma);
805 if (retval) {
806 devm_free_irq(dev, mdma->irq, mdma);
807 irq_dispose_mapping(mdma->irq);
808 }
809
810 return retval;
811}
812
813static int __devexit mpc_dma_remove(struct platform_device *op)
814{
815 struct device *dev = &op->dev;
816 struct mpc_dma *mdma = dev_get_drvdata(dev);
817
818 dma_async_device_unregister(&mdma->dma);
819 devm_free_irq(dev, mdma->irq, mdma);
820 irq_dispose_mapping(mdma->irq);
821
822 return 0;
823}
824
825static struct of_device_id mpc_dma_match[] = {
826 { .compatible = "fsl,mpc5121-dma", },
827 {},
828};
829
830static struct of_platform_driver mpc_dma_driver = {
831 .probe = mpc_dma_probe,
832 .remove = __devexit_p(mpc_dma_remove),
833 .driver = {
834 .name = DRV_NAME,
835 .owner = THIS_MODULE,
836 .of_match_table = mpc_dma_match,
837 },
838};
839
840static int __init mpc_dma_init(void)
841{
842 return of_register_platform_driver(&mpc_dma_driver);
843}
844module_init(mpc_dma_init);
845
846static void __exit mpc_dma_exit(void)
847{
848 of_unregister_platform_driver(&mpc_dma_driver);
849}
850module_exit(mpc_dma_exit);
851
852MODULE_LICENSE("GPL");
853MODULE_AUTHOR("Piotr Ziecik <kosmo@semihalf.com>");
854