1
2
3
4
5
6
7
8
9
10
11
12
13
14
15#include <linux/bitops.h>
16#include <linux/delay.h>
17#include <linux/dma-mapping.h>
18#include <linux/dmapool.h>
19#include <linux/init.h>
20#include <linux/interrupt.h>
21#include <linux/io.h>
22#include <linux/iopoll.h>
23#include <linux/module.h>
24#include <linux/platform_device.h>
25#include <linux/slab.h>
26
27#include "dmaengine.h"
28
29#define MSGDMA_MAX_TRANS_LEN U32_MAX
30#define MSGDMA_DESC_NUM 1024
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46struct msgdma_extended_desc {
47 u32 read_addr_lo;
48 u32 write_addr_lo;
49 u32 len;
50 u32 burst_seq_num;
51 u32 stride;
52 u32 read_addr_hi;
53 u32 write_addr_hi;
54 u32 control;
55};
56
57
58#define MSGDMA_DESC_CTL_SET_CH(x) ((x) & 0xff)
59#define MSGDMA_DESC_CTL_GEN_SOP BIT(8)
60#define MSGDMA_DESC_CTL_GEN_EOP BIT(9)
61#define MSGDMA_DESC_CTL_PARK_READS BIT(10)
62#define MSGDMA_DESC_CTL_PARK_WRITES BIT(11)
63#define MSGDMA_DESC_CTL_END_ON_EOP BIT(12)
64#define MSGDMA_DESC_CTL_END_ON_LEN BIT(13)
65#define MSGDMA_DESC_CTL_TR_COMP_IRQ BIT(14)
66#define MSGDMA_DESC_CTL_EARLY_IRQ BIT(15)
67#define MSGDMA_DESC_CTL_TR_ERR_IRQ GENMASK(23, 16)
68#define MSGDMA_DESC_CTL_EARLY_DONE BIT(24)
69
70
71
72
73
74#define MSGDMA_DESC_CTL_GO BIT(31)
75
76
77#define MSGDMA_DESC_CTL_TX_FIRST (MSGDMA_DESC_CTL_GEN_SOP | \
78 MSGDMA_DESC_CTL_TR_ERR_IRQ | \
79 MSGDMA_DESC_CTL_GO)
80
81#define MSGDMA_DESC_CTL_TX_MIDDLE (MSGDMA_DESC_CTL_TR_ERR_IRQ | \
82 MSGDMA_DESC_CTL_GO)
83
84#define MSGDMA_DESC_CTL_TX_LAST (MSGDMA_DESC_CTL_GEN_EOP | \
85 MSGDMA_DESC_CTL_TR_COMP_IRQ | \
86 MSGDMA_DESC_CTL_TR_ERR_IRQ | \
87 MSGDMA_DESC_CTL_GO)
88
89#define MSGDMA_DESC_CTL_TX_SINGLE (MSGDMA_DESC_CTL_GEN_SOP | \
90 MSGDMA_DESC_CTL_GEN_EOP | \
91 MSGDMA_DESC_CTL_TR_COMP_IRQ | \
92 MSGDMA_DESC_CTL_TR_ERR_IRQ | \
93 MSGDMA_DESC_CTL_GO)
94
95#define MSGDMA_DESC_CTL_RX_SINGLE (MSGDMA_DESC_CTL_END_ON_EOP | \
96 MSGDMA_DESC_CTL_END_ON_LEN | \
97 MSGDMA_DESC_CTL_TR_COMP_IRQ | \
98 MSGDMA_DESC_CTL_EARLY_IRQ | \
99 MSGDMA_DESC_CTL_TR_ERR_IRQ | \
100 MSGDMA_DESC_CTL_GO)
101
102
103#define MSGDMA_DESC_STRIDE_RD 0x00000001
104#define MSGDMA_DESC_STRIDE_WR 0x00010000
105#define MSGDMA_DESC_STRIDE_RW 0x00010001
106
107
108#define MSGDMA_CSR_STATUS 0x00
109#define MSGDMA_CSR_CONTROL 0x04
110#define MSGDMA_CSR_RW_FILL_LEVEL 0x08
111
112#define MSGDMA_CSR_RESP_FILL_LEVEL 0x0c
113#define MSGDMA_CSR_RW_SEQ_NUM 0x10
114
115
116
117#define MSGDMA_CSR_STAT_BUSY BIT(0)
118#define MSGDMA_CSR_STAT_DESC_BUF_EMPTY BIT(1)
119#define MSGDMA_CSR_STAT_DESC_BUF_FULL BIT(2)
120#define MSGDMA_CSR_STAT_RESP_BUF_EMPTY BIT(3)
121#define MSGDMA_CSR_STAT_RESP_BUF_FULL BIT(4)
122#define MSGDMA_CSR_STAT_STOPPED BIT(5)
123#define MSGDMA_CSR_STAT_RESETTING BIT(6)
124#define MSGDMA_CSR_STAT_STOPPED_ON_ERR BIT(7)
125#define MSGDMA_CSR_STAT_STOPPED_ON_EARLY BIT(8)
126#define MSGDMA_CSR_STAT_IRQ BIT(9)
127#define MSGDMA_CSR_STAT_MASK GENMASK(9, 0)
128#define MSGDMA_CSR_STAT_MASK_WITHOUT_IRQ GENMASK(8, 0)
129
130#define DESC_EMPTY (MSGDMA_CSR_STAT_DESC_BUF_EMPTY | \
131 MSGDMA_CSR_STAT_RESP_BUF_EMPTY)
132
133
134#define MSGDMA_CSR_CTL_STOP BIT(0)
135#define MSGDMA_CSR_CTL_RESET BIT(1)
136#define MSGDMA_CSR_CTL_STOP_ON_ERR BIT(2)
137#define MSGDMA_CSR_CTL_STOP_ON_EARLY BIT(3)
138#define MSGDMA_CSR_CTL_GLOBAL_INTR BIT(4)
139#define MSGDMA_CSR_CTL_STOP_DESCS BIT(5)
140
141
142#define MSGDMA_CSR_WR_FILL_LEVEL_GET(v) (((v) & 0xffff0000) >> 16)
143#define MSGDMA_CSR_RD_FILL_LEVEL_GET(v) ((v) & 0x0000ffff)
144#define MSGDMA_CSR_RESP_FILL_LEVEL_GET(v) ((v) & 0x0000ffff)
145
146#define MSGDMA_CSR_SEQ_NUM_GET(v) (((v) & 0xffff0000) >> 16)
147
148
149#define MSGDMA_RESP_BYTES_TRANSFERRED 0x00
150#define MSGDMA_RESP_STATUS 0x04
151
152
153#define MSGDMA_RESP_EARLY_TERM BIT(8)
154#define MSGDMA_RESP_ERR_MASK 0xff
155
156
157
158
159
160
161
162struct msgdma_sw_desc {
163 struct dma_async_tx_descriptor async_tx;
164 struct msgdma_extended_desc hw_desc;
165 struct list_head node;
166 struct list_head tx_list;
167};
168
169
170
171
172struct msgdma_device {
173 spinlock_t lock;
174 struct device *dev;
175 struct tasklet_struct irq_tasklet;
176 struct list_head pending_list;
177 struct list_head free_list;
178 struct list_head active_list;
179 struct list_head done_list;
180 u32 desc_free_cnt;
181 bool idle;
182
183 struct dma_device dmadev;
184 struct dma_chan dmachan;
185 dma_addr_t hw_desq;
186 struct msgdma_sw_desc *sw_desq;
187 unsigned int npendings;
188
189 struct dma_slave_config slave_cfg;
190
191 int irq;
192
193
194 void __iomem *csr;
195
196
197 void __iomem *desc;
198
199
200 void __iomem *resp;
201};
202
203#define to_mdev(chan) container_of(chan, struct msgdma_device, dmachan)
204#define tx_to_desc(tx) container_of(tx, struct msgdma_sw_desc, async_tx)
205
206
207
208
209
210
211
212static struct msgdma_sw_desc *msgdma_get_descriptor(struct msgdma_device *mdev)
213{
214 struct msgdma_sw_desc *desc;
215 unsigned long flags;
216
217 spin_lock_irqsave(&mdev->lock, flags);
218 desc = list_first_entry(&mdev->free_list, struct msgdma_sw_desc, node);
219 list_del(&desc->node);
220 spin_unlock_irqrestore(&mdev->lock, flags);
221
222 INIT_LIST_HEAD(&desc->tx_list);
223
224 return desc;
225}
226
227
228
229
230
231
232static void msgdma_free_descriptor(struct msgdma_device *mdev,
233 struct msgdma_sw_desc *desc)
234{
235 struct msgdma_sw_desc *child, *next;
236
237 mdev->desc_free_cnt++;
238 list_add_tail(&desc->node, &mdev->free_list);
239 list_for_each_entry_safe(child, next, &desc->tx_list, node) {
240 mdev->desc_free_cnt++;
241 list_move_tail(&child->node, &mdev->free_list);
242 }
243}
244
245
246
247
248
249
250static void msgdma_free_desc_list(struct msgdma_device *mdev,
251 struct list_head *list)
252{
253 struct msgdma_sw_desc *desc, *next;
254
255 list_for_each_entry_safe(desc, next, list, node)
256 msgdma_free_descriptor(mdev, desc);
257}
258
259
260
261
262
263
264
265
266static void msgdma_desc_config(struct msgdma_extended_desc *desc,
267 dma_addr_t dst, dma_addr_t src, size_t len,
268 u32 stride)
269{
270
271 desc->read_addr_lo = lower_32_bits(src);
272 desc->write_addr_lo = lower_32_bits(dst);
273
274
275 desc->read_addr_hi = upper_32_bits(src);
276 desc->write_addr_hi = upper_32_bits(dst);
277
278 desc->len = len;
279 desc->stride = stride;
280 desc->burst_seq_num = 0;
281
282
283
284
285
286 desc->control = MSGDMA_DESC_CTL_TR_ERR_IRQ | MSGDMA_DESC_CTL_GO |
287 MSGDMA_DESC_CTL_END_ON_LEN;
288}
289
290
291
292
293
294static void msgdma_desc_config_eod(struct msgdma_extended_desc *desc)
295{
296 desc->control |= MSGDMA_DESC_CTL_TR_COMP_IRQ;
297}
298
299
300
301
302
303
304
305static dma_cookie_t msgdma_tx_submit(struct dma_async_tx_descriptor *tx)
306{
307 struct msgdma_device *mdev = to_mdev(tx->chan);
308 struct msgdma_sw_desc *new;
309 dma_cookie_t cookie;
310 unsigned long flags;
311
312 new = tx_to_desc(tx);
313 spin_lock_irqsave(&mdev->lock, flags);
314 cookie = dma_cookie_assign(tx);
315
316 list_add_tail(&new->node, &mdev->pending_list);
317 spin_unlock_irqrestore(&mdev->lock, flags);
318
319 return cookie;
320}
321
322
323
324
325
326
327
328
329
330
331
332static struct dma_async_tx_descriptor *
333msgdma_prep_memcpy(struct dma_chan *dchan, dma_addr_t dma_dst,
334 dma_addr_t dma_src, size_t len, ulong flags)
335{
336 struct msgdma_device *mdev = to_mdev(dchan);
337 struct msgdma_sw_desc *new, *first = NULL;
338 struct msgdma_extended_desc *desc;
339 size_t copy;
340 u32 desc_cnt;
341 unsigned long irqflags;
342
343 desc_cnt = DIV_ROUND_UP(len, MSGDMA_MAX_TRANS_LEN);
344
345 spin_lock_irqsave(&mdev->lock, irqflags);
346 if (desc_cnt > mdev->desc_free_cnt) {
347 spin_unlock_irqrestore(&mdev->lock, irqflags);
348 dev_dbg(mdev->dev, "mdev %p descs are not available\n", mdev);
349 return NULL;
350 }
351 mdev->desc_free_cnt -= desc_cnt;
352 spin_unlock_irqrestore(&mdev->lock, irqflags);
353
354 do {
355
356 new = msgdma_get_descriptor(mdev);
357
358 copy = min_t(size_t, len, MSGDMA_MAX_TRANS_LEN);
359 desc = &new->hw_desc;
360 msgdma_desc_config(desc, dma_dst, dma_src, copy,
361 MSGDMA_DESC_STRIDE_RW);
362 len -= copy;
363 dma_src += copy;
364 dma_dst += copy;
365 if (!first)
366 first = new;
367 else
368 list_add_tail(&new->node, &first->tx_list);
369 } while (len);
370
371 msgdma_desc_config_eod(desc);
372 async_tx_ack(&first->async_tx);
373 first->async_tx.flags = flags;
374
375 return &first->async_tx;
376}
377
378
379
380
381
382
383
384
385
386
387
388static struct dma_async_tx_descriptor *
389msgdma_prep_slave_sg(struct dma_chan *dchan, struct scatterlist *sgl,
390 unsigned int sg_len, enum dma_transfer_direction dir,
391 unsigned long flags, void *context)
392
393{
394 struct msgdma_device *mdev = to_mdev(dchan);
395 struct dma_slave_config *cfg = &mdev->slave_cfg;
396 struct msgdma_sw_desc *new, *first = NULL;
397 void *desc = NULL;
398 size_t len, avail;
399 dma_addr_t dma_dst, dma_src;
400 u32 desc_cnt = 0, i;
401 struct scatterlist *sg;
402 u32 stride;
403 unsigned long irqflags;
404
405 for_each_sg(sgl, sg, sg_len, i)
406 desc_cnt += DIV_ROUND_UP(sg_dma_len(sg), MSGDMA_MAX_TRANS_LEN);
407
408 spin_lock_irqsave(&mdev->lock, irqflags);
409 if (desc_cnt > mdev->desc_free_cnt) {
410 spin_unlock_irqrestore(&mdev->lock, irqflags);
411 dev_dbg(mdev->dev, "mdev %p descs are not available\n", mdev);
412 return NULL;
413 }
414 mdev->desc_free_cnt -= desc_cnt;
415 spin_unlock_irqrestore(&mdev->lock, irqflags);
416
417 avail = sg_dma_len(sgl);
418
419
420 while (true) {
421
422 new = msgdma_get_descriptor(mdev);
423
424 desc = &new->hw_desc;
425 len = min_t(size_t, avail, MSGDMA_MAX_TRANS_LEN);
426
427 if (dir == DMA_MEM_TO_DEV) {
428 dma_src = sg_dma_address(sgl) + sg_dma_len(sgl) - avail;
429 dma_dst = cfg->dst_addr;
430 stride = MSGDMA_DESC_STRIDE_RD;
431 } else {
432 dma_src = cfg->src_addr;
433 dma_dst = sg_dma_address(sgl) + sg_dma_len(sgl) - avail;
434 stride = MSGDMA_DESC_STRIDE_WR;
435 }
436 msgdma_desc_config(desc, dma_dst, dma_src, len, stride);
437 avail -= len;
438
439 if (!first)
440 first = new;
441 else
442 list_add_tail(&new->node, &first->tx_list);
443
444
445 if (avail == 0) {
446 if (sg_len == 0)
447 break;
448 sgl = sg_next(sgl);
449 if (sgl == NULL)
450 break;
451 sg_len--;
452 avail = sg_dma_len(sgl);
453 }
454 }
455
456 msgdma_desc_config_eod(desc);
457 first->async_tx.flags = flags;
458
459 return &first->async_tx;
460}
461
462static int msgdma_dma_config(struct dma_chan *dchan,
463 struct dma_slave_config *config)
464{
465 struct msgdma_device *mdev = to_mdev(dchan);
466
467 memcpy(&mdev->slave_cfg, config, sizeof(*config));
468
469 return 0;
470}
471
472static void msgdma_reset(struct msgdma_device *mdev)
473{
474 u32 val;
475 int ret;
476
477
478 iowrite32(MSGDMA_CSR_STAT_MASK, mdev->csr + MSGDMA_CSR_STATUS);
479 iowrite32(MSGDMA_CSR_CTL_RESET, mdev->csr + MSGDMA_CSR_CONTROL);
480
481 ret = readl_poll_timeout(mdev->csr + MSGDMA_CSR_STATUS, val,
482 (val & MSGDMA_CSR_STAT_RESETTING) == 0,
483 1, 10000);
484 if (ret)
485 dev_err(mdev->dev, "DMA channel did not reset\n");
486
487
488 iowrite32(MSGDMA_CSR_STAT_MASK, mdev->csr + MSGDMA_CSR_STATUS);
489
490
491 iowrite32(MSGDMA_CSR_CTL_STOP_ON_ERR | MSGDMA_CSR_CTL_STOP_ON_EARLY |
492 MSGDMA_CSR_CTL_GLOBAL_INTR, mdev->csr + MSGDMA_CSR_CONTROL);
493
494 mdev->idle = true;
495};
496
497static void msgdma_copy_one(struct msgdma_device *mdev,
498 struct msgdma_sw_desc *desc)
499{
500 void __iomem *hw_desc = mdev->desc;
501
502
503
504
505
506 while (ioread32(mdev->csr + MSGDMA_CSR_STATUS) &
507 MSGDMA_CSR_STAT_DESC_BUF_FULL)
508 mdelay(1);
509
510
511
512
513
514
515
516
517
518
519 memcpy((void __force *)hw_desc, &desc->hw_desc,
520 sizeof(desc->hw_desc) - sizeof(u32));
521
522
523 mdev->idle = false;
524 wmb();
525 iowrite32(desc->hw_desc.control, hw_desc +
526 offsetof(struct msgdma_extended_desc, control));
527 wmb();
528}
529
530
531
532
533
534
535static void msgdma_copy_desc_to_fifo(struct msgdma_device *mdev,
536 struct msgdma_sw_desc *desc)
537{
538 struct msgdma_sw_desc *sdesc, *next;
539
540 msgdma_copy_one(mdev, desc);
541
542 list_for_each_entry_safe(sdesc, next, &desc->tx_list, node)
543 msgdma_copy_one(mdev, sdesc);
544}
545
546
547
548
549
550static void msgdma_start_transfer(struct msgdma_device *mdev)
551{
552 struct msgdma_sw_desc *desc;
553
554 if (!mdev->idle)
555 return;
556
557 desc = list_first_entry_or_null(&mdev->pending_list,
558 struct msgdma_sw_desc, node);
559 if (!desc)
560 return;
561
562 list_splice_tail_init(&mdev->pending_list, &mdev->active_list);
563 msgdma_copy_desc_to_fifo(mdev, desc);
564}
565
566
567
568
569
570static void msgdma_issue_pending(struct dma_chan *chan)
571{
572 struct msgdma_device *mdev = to_mdev(chan);
573 unsigned long flags;
574
575 spin_lock_irqsave(&mdev->lock, flags);
576 msgdma_start_transfer(mdev);
577 spin_unlock_irqrestore(&mdev->lock, flags);
578}
579
580
581
582
583
584static void msgdma_chan_desc_cleanup(struct msgdma_device *mdev)
585{
586 struct msgdma_sw_desc *desc, *next;
587
588 list_for_each_entry_safe(desc, next, &mdev->done_list, node) {
589 dma_async_tx_callback callback;
590 void *callback_param;
591
592 list_del(&desc->node);
593
594 callback = desc->async_tx.callback;
595 callback_param = desc->async_tx.callback_param;
596 if (callback) {
597 spin_unlock(&mdev->lock);
598 callback(callback_param);
599 spin_lock(&mdev->lock);
600 }
601
602
603 msgdma_free_descriptor(mdev, desc);
604 }
605}
606
607
608
609
610
611static void msgdma_complete_descriptor(struct msgdma_device *mdev)
612{
613 struct msgdma_sw_desc *desc;
614
615 desc = list_first_entry_or_null(&mdev->active_list,
616 struct msgdma_sw_desc, node);
617 if (!desc)
618 return;
619 list_del(&desc->node);
620 dma_cookie_complete(&desc->async_tx);
621 list_add_tail(&desc->node, &mdev->done_list);
622}
623
624
625
626
627
628static void msgdma_free_descriptors(struct msgdma_device *mdev)
629{
630 msgdma_free_desc_list(mdev, &mdev->active_list);
631 msgdma_free_desc_list(mdev, &mdev->pending_list);
632 msgdma_free_desc_list(mdev, &mdev->done_list);
633}
634
635
636
637
638
639static void msgdma_free_chan_resources(struct dma_chan *dchan)
640{
641 struct msgdma_device *mdev = to_mdev(dchan);
642 unsigned long flags;
643
644 spin_lock_irqsave(&mdev->lock, flags);
645 msgdma_free_descriptors(mdev);
646 spin_unlock_irqrestore(&mdev->lock, flags);
647 kfree(mdev->sw_desq);
648}
649
650
651
652
653
654
655
656static int msgdma_alloc_chan_resources(struct dma_chan *dchan)
657{
658 struct msgdma_device *mdev = to_mdev(dchan);
659 struct msgdma_sw_desc *desc;
660 int i;
661
662 mdev->sw_desq = kcalloc(MSGDMA_DESC_NUM, sizeof(*desc), GFP_NOWAIT);
663 if (!mdev->sw_desq)
664 return -ENOMEM;
665
666 mdev->idle = true;
667 mdev->desc_free_cnt = MSGDMA_DESC_NUM;
668
669 INIT_LIST_HEAD(&mdev->free_list);
670
671 for (i = 0; i < MSGDMA_DESC_NUM; i++) {
672 desc = mdev->sw_desq + i;
673 dma_async_tx_descriptor_init(&desc->async_tx, &mdev->dmachan);
674 desc->async_tx.tx_submit = msgdma_tx_submit;
675 list_add_tail(&desc->node, &mdev->free_list);
676 }
677
678 return MSGDMA_DESC_NUM;
679}
680
681
682
683
684
685static void msgdma_tasklet(unsigned long data)
686{
687 struct msgdma_device *mdev = (struct msgdma_device *)data;
688 u32 count;
689 u32 __maybe_unused size;
690 u32 __maybe_unused status;
691 unsigned long flags;
692
693 spin_lock_irqsave(&mdev->lock, flags);
694
695
696 count = ioread32(mdev->csr + MSGDMA_CSR_RESP_FILL_LEVEL);
697 dev_dbg(mdev->dev, "%s (%d): response count=%d\n",
698 __func__, __LINE__, count);
699
700 while (count--) {
701
702
703
704
705
706
707 size = ioread32(mdev->resp + MSGDMA_RESP_BYTES_TRANSFERRED);
708 status = ioread32(mdev->resp + MSGDMA_RESP_STATUS);
709
710 msgdma_complete_descriptor(mdev);
711 msgdma_chan_desc_cleanup(mdev);
712 }
713
714 spin_unlock_irqrestore(&mdev->lock, flags);
715}
716
717
718
719
720
721
722
723
724static irqreturn_t msgdma_irq_handler(int irq, void *data)
725{
726 struct msgdma_device *mdev = data;
727 u32 status;
728
729 status = ioread32(mdev->csr + MSGDMA_CSR_STATUS);
730 if ((status & MSGDMA_CSR_STAT_BUSY) == 0) {
731
732 spin_lock(&mdev->lock);
733 mdev->idle = true;
734 msgdma_start_transfer(mdev);
735 spin_unlock(&mdev->lock);
736 }
737
738 tasklet_schedule(&mdev->irq_tasklet);
739
740
741 iowrite32(MSGDMA_CSR_STAT_IRQ, mdev->csr + MSGDMA_CSR_STATUS);
742
743 return IRQ_HANDLED;
744}
745
746
747
748
749
750static void msgdma_dev_remove(struct msgdma_device *mdev)
751{
752 if (!mdev)
753 return;
754
755 devm_free_irq(mdev->dev, mdev->irq, mdev);
756 tasklet_kill(&mdev->irq_tasklet);
757 list_del(&mdev->dmachan.device_node);
758}
759
760static int request_and_map(struct platform_device *pdev, const char *name,
761 struct resource **res, void __iomem **ptr)
762{
763 struct resource *region;
764 struct device *device = &pdev->dev;
765
766 *res = platform_get_resource_byname(pdev, IORESOURCE_MEM, name);
767 if (*res == NULL) {
768 dev_err(device, "resource %s not defined\n", name);
769 return -ENODEV;
770 }
771
772 region = devm_request_mem_region(device, (*res)->start,
773 resource_size(*res), dev_name(device));
774 if (region == NULL) {
775 dev_err(device, "unable to request %s\n", name);
776 return -EBUSY;
777 }
778
779 *ptr = devm_ioremap_nocache(device, region->start,
780 resource_size(region));
781 if (*ptr == NULL) {
782 dev_err(device, "ioremap_nocache of %s failed!", name);
783 return -ENOMEM;
784 }
785
786 return 0;
787}
788
789
790
791
792
793
794
795static int msgdma_probe(struct platform_device *pdev)
796{
797 struct msgdma_device *mdev;
798 struct dma_device *dma_dev;
799 struct resource *dma_res;
800 int ret;
801
802 mdev = devm_kzalloc(&pdev->dev, sizeof(*mdev), GFP_NOWAIT);
803 if (!mdev)
804 return -ENOMEM;
805
806 mdev->dev = &pdev->dev;
807
808
809 ret = request_and_map(pdev, "csr", &dma_res, &mdev->csr);
810 if (ret)
811 return ret;
812
813
814 ret = request_and_map(pdev, "desc", &dma_res, &mdev->desc);
815 if (ret)
816 return ret;
817
818
819 ret = request_and_map(pdev, "resp", &dma_res, &mdev->resp);
820 if (ret)
821 return ret;
822
823 platform_set_drvdata(pdev, mdev);
824
825
826 mdev->irq = platform_get_irq(pdev, 0);
827 if (mdev->irq < 0)
828 return -ENXIO;
829
830 ret = devm_request_irq(&pdev->dev, mdev->irq, msgdma_irq_handler,
831 0, dev_name(&pdev->dev), mdev);
832 if (ret)
833 return ret;
834
835 tasklet_init(&mdev->irq_tasklet, msgdma_tasklet, (unsigned long)mdev);
836
837 dma_cookie_init(&mdev->dmachan);
838
839 spin_lock_init(&mdev->lock);
840
841 INIT_LIST_HEAD(&mdev->active_list);
842 INIT_LIST_HEAD(&mdev->pending_list);
843 INIT_LIST_HEAD(&mdev->done_list);
844 INIT_LIST_HEAD(&mdev->free_list);
845
846 dma_dev = &mdev->dmadev;
847
848
849 dma_cap_zero(dma_dev->cap_mask);
850 dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask);
851 dma_cap_set(DMA_SLAVE, dma_dev->cap_mask);
852
853 dma_dev->src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
854 dma_dev->dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
855 dma_dev->directions = BIT(DMA_MEM_TO_DEV) | BIT(DMA_DEV_TO_MEM) |
856 BIT(DMA_MEM_TO_MEM);
857 dma_dev->residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
858
859
860 INIT_LIST_HEAD(&dma_dev->channels);
861
862
863 dma_dev->device_tx_status = dma_cookie_status;
864 dma_dev->device_issue_pending = msgdma_issue_pending;
865 dma_dev->dev = &pdev->dev;
866
867 dma_dev->copy_align = DMAENGINE_ALIGN_4_BYTES;
868 dma_dev->device_prep_dma_memcpy = msgdma_prep_memcpy;
869 dma_dev->device_prep_slave_sg = msgdma_prep_slave_sg;
870 dma_dev->device_config = msgdma_dma_config;
871
872 dma_dev->device_alloc_chan_resources = msgdma_alloc_chan_resources;
873 dma_dev->device_free_chan_resources = msgdma_free_chan_resources;
874
875 mdev->dmachan.device = dma_dev;
876 list_add_tail(&mdev->dmachan.device_node, &dma_dev->channels);
877
878
879 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
880 if (ret) {
881 dev_warn(&pdev->dev, "unable to set coherent mask to 64");
882 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
883 if (ret)
884 goto fail;
885 }
886
887 msgdma_reset(mdev);
888
889 ret = dma_async_device_register(dma_dev);
890 if (ret)
891 goto fail;
892
893 dev_notice(&pdev->dev, "Altera mSGDMA driver probe success\n");
894
895 return 0;
896
897fail:
898 msgdma_dev_remove(mdev);
899
900 return ret;
901}
902
903
904
905
906
907
908
909static int msgdma_remove(struct platform_device *pdev)
910{
911 struct msgdma_device *mdev = platform_get_drvdata(pdev);
912
913 dma_async_device_unregister(&mdev->dmadev);
914 msgdma_dev_remove(mdev);
915
916 dev_notice(&pdev->dev, "Altera mSGDMA driver removed\n");
917
918 return 0;
919}
920
921static struct platform_driver msgdma_driver = {
922 .driver = {
923 .name = "altera-msgdma",
924 },
925 .probe = msgdma_probe,
926 .remove = msgdma_remove,
927};
928
929module_platform_driver(msgdma_driver);
930
931MODULE_ALIAS("platform:altera-msgdma");
932MODULE_DESCRIPTION("Altera mSGDMA driver");
933MODULE_AUTHOR("Stefan Roese <sr@denx.de>");
934MODULE_LICENSE("GPL");
935