1
2
3
4
5
6
7
8
9
10
11#include <linux/bitops.h>
12#include <linux/delay.h>
13#include <linux/dma-mapping.h>
14#include <linux/dmapool.h>
15#include <linux/init.h>
16#include <linux/interrupt.h>
17#include <linux/io.h>
18#include <linux/iopoll.h>
19#include <linux/module.h>
20#include <linux/platform_device.h>
21#include <linux/slab.h>
22#include <linux/of_dma.h>
23
24#include "dmaengine.h"
25
26#define MSGDMA_MAX_TRANS_LEN U32_MAX
27#define MSGDMA_DESC_NUM 1024
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43struct msgdma_extended_desc {
44 u32 read_addr_lo;
45 u32 write_addr_lo;
46 u32 len;
47 u32 burst_seq_num;
48 u32 stride;
49 u32 read_addr_hi;
50 u32 write_addr_hi;
51 u32 control;
52};
53
54
55#define MSGDMA_DESC_CTL_SET_CH(x) ((x) & 0xff)
56#define MSGDMA_DESC_CTL_GEN_SOP BIT(8)
57#define MSGDMA_DESC_CTL_GEN_EOP BIT(9)
58#define MSGDMA_DESC_CTL_PARK_READS BIT(10)
59#define MSGDMA_DESC_CTL_PARK_WRITES BIT(11)
60#define MSGDMA_DESC_CTL_END_ON_EOP BIT(12)
61#define MSGDMA_DESC_CTL_END_ON_LEN BIT(13)
62#define MSGDMA_DESC_CTL_TR_COMP_IRQ BIT(14)
63#define MSGDMA_DESC_CTL_EARLY_IRQ BIT(15)
64#define MSGDMA_DESC_CTL_TR_ERR_IRQ GENMASK(23, 16)
65#define MSGDMA_DESC_CTL_EARLY_DONE BIT(24)
66
67
68
69
70
71#define MSGDMA_DESC_CTL_GO BIT(31)
72
73
74#define MSGDMA_DESC_CTL_TX_FIRST (MSGDMA_DESC_CTL_GEN_SOP | \
75 MSGDMA_DESC_CTL_TR_ERR_IRQ | \
76 MSGDMA_DESC_CTL_GO)
77
78#define MSGDMA_DESC_CTL_TX_MIDDLE (MSGDMA_DESC_CTL_TR_ERR_IRQ | \
79 MSGDMA_DESC_CTL_GO)
80
81#define MSGDMA_DESC_CTL_TX_LAST (MSGDMA_DESC_CTL_GEN_EOP | \
82 MSGDMA_DESC_CTL_TR_COMP_IRQ | \
83 MSGDMA_DESC_CTL_TR_ERR_IRQ | \
84 MSGDMA_DESC_CTL_GO)
85
86#define MSGDMA_DESC_CTL_TX_SINGLE (MSGDMA_DESC_CTL_GEN_SOP | \
87 MSGDMA_DESC_CTL_GEN_EOP | \
88 MSGDMA_DESC_CTL_TR_COMP_IRQ | \
89 MSGDMA_DESC_CTL_TR_ERR_IRQ | \
90 MSGDMA_DESC_CTL_GO)
91
92#define MSGDMA_DESC_CTL_RX_SINGLE (MSGDMA_DESC_CTL_END_ON_EOP | \
93 MSGDMA_DESC_CTL_END_ON_LEN | \
94 MSGDMA_DESC_CTL_TR_COMP_IRQ | \
95 MSGDMA_DESC_CTL_EARLY_IRQ | \
96 MSGDMA_DESC_CTL_TR_ERR_IRQ | \
97 MSGDMA_DESC_CTL_GO)
98
99
100#define MSGDMA_DESC_STRIDE_RD 0x00000001
101#define MSGDMA_DESC_STRIDE_WR 0x00010000
102#define MSGDMA_DESC_STRIDE_RW 0x00010001
103
104
105#define MSGDMA_CSR_STATUS 0x00
106#define MSGDMA_CSR_CONTROL 0x04
107#define MSGDMA_CSR_RW_FILL_LEVEL 0x08
108
109#define MSGDMA_CSR_RESP_FILL_LEVEL 0x0c
110#define MSGDMA_CSR_RW_SEQ_NUM 0x10
111
112
113
114#define MSGDMA_CSR_STAT_BUSY BIT(0)
115#define MSGDMA_CSR_STAT_DESC_BUF_EMPTY BIT(1)
116#define MSGDMA_CSR_STAT_DESC_BUF_FULL BIT(2)
117#define MSGDMA_CSR_STAT_RESP_BUF_EMPTY BIT(3)
118#define MSGDMA_CSR_STAT_RESP_BUF_FULL BIT(4)
119#define MSGDMA_CSR_STAT_STOPPED BIT(5)
120#define MSGDMA_CSR_STAT_RESETTING BIT(6)
121#define MSGDMA_CSR_STAT_STOPPED_ON_ERR BIT(7)
122#define MSGDMA_CSR_STAT_STOPPED_ON_EARLY BIT(8)
123#define MSGDMA_CSR_STAT_IRQ BIT(9)
124#define MSGDMA_CSR_STAT_MASK GENMASK(9, 0)
125#define MSGDMA_CSR_STAT_MASK_WITHOUT_IRQ GENMASK(8, 0)
126
127#define DESC_EMPTY (MSGDMA_CSR_STAT_DESC_BUF_EMPTY | \
128 MSGDMA_CSR_STAT_RESP_BUF_EMPTY)
129
130
131#define MSGDMA_CSR_CTL_STOP BIT(0)
132#define MSGDMA_CSR_CTL_RESET BIT(1)
133#define MSGDMA_CSR_CTL_STOP_ON_ERR BIT(2)
134#define MSGDMA_CSR_CTL_STOP_ON_EARLY BIT(3)
135#define MSGDMA_CSR_CTL_GLOBAL_INTR BIT(4)
136#define MSGDMA_CSR_CTL_STOP_DESCS BIT(5)
137
138
139#define MSGDMA_CSR_WR_FILL_LEVEL_GET(v) (((v) & 0xffff0000) >> 16)
140#define MSGDMA_CSR_RD_FILL_LEVEL_GET(v) ((v) & 0x0000ffff)
141#define MSGDMA_CSR_RESP_FILL_LEVEL_GET(v) ((v) & 0x0000ffff)
142
143#define MSGDMA_CSR_SEQ_NUM_GET(v) (((v) & 0xffff0000) >> 16)
144
145
146#define MSGDMA_RESP_BYTES_TRANSFERRED 0x00
147#define MSGDMA_RESP_STATUS 0x04
148
149
150#define MSGDMA_RESP_EARLY_TERM BIT(8)
151#define MSGDMA_RESP_ERR_MASK 0xff
152
153
154
155
156
157
158
159
160struct msgdma_sw_desc {
161 struct dma_async_tx_descriptor async_tx;
162 struct msgdma_extended_desc hw_desc;
163 struct list_head node;
164 struct list_head tx_list;
165};
166
167
168
169
170struct msgdma_device {
171 spinlock_t lock;
172 struct device *dev;
173 struct tasklet_struct irq_tasklet;
174 struct list_head pending_list;
175 struct list_head free_list;
176 struct list_head active_list;
177 struct list_head done_list;
178 u32 desc_free_cnt;
179 bool idle;
180
181 struct dma_device dmadev;
182 struct dma_chan dmachan;
183 dma_addr_t hw_desq;
184 struct msgdma_sw_desc *sw_desq;
185 unsigned int npendings;
186
187 struct dma_slave_config slave_cfg;
188
189 int irq;
190
191
192 void __iomem *csr;
193
194
195 void __iomem *desc;
196
197
198 void __iomem *resp;
199};
200
201#define to_mdev(chan) container_of(chan, struct msgdma_device, dmachan)
202#define tx_to_desc(tx) container_of(tx, struct msgdma_sw_desc, async_tx)
203
204
205
206
207
208
209
210static struct msgdma_sw_desc *msgdma_get_descriptor(struct msgdma_device *mdev)
211{
212 struct msgdma_sw_desc *desc;
213 unsigned long flags;
214
215 spin_lock_irqsave(&mdev->lock, flags);
216 desc = list_first_entry(&mdev->free_list, struct msgdma_sw_desc, node);
217 list_del(&desc->node);
218 spin_unlock_irqrestore(&mdev->lock, flags);
219
220 INIT_LIST_HEAD(&desc->tx_list);
221
222 return desc;
223}
224
225
226
227
228
229
230static void msgdma_free_descriptor(struct msgdma_device *mdev,
231 struct msgdma_sw_desc *desc)
232{
233 struct msgdma_sw_desc *child, *next;
234
235 mdev->desc_free_cnt++;
236 list_add_tail(&desc->node, &mdev->free_list);
237 list_for_each_entry_safe(child, next, &desc->tx_list, node) {
238 mdev->desc_free_cnt++;
239 list_move_tail(&child->node, &mdev->free_list);
240 }
241}
242
243
244
245
246
247
248static void msgdma_free_desc_list(struct msgdma_device *mdev,
249 struct list_head *list)
250{
251 struct msgdma_sw_desc *desc, *next;
252
253 list_for_each_entry_safe(desc, next, list, node)
254 msgdma_free_descriptor(mdev, desc);
255}
256
257
258
259
260
261
262
263
264
265static void msgdma_desc_config(struct msgdma_extended_desc *desc,
266 dma_addr_t dst, dma_addr_t src, size_t len,
267 u32 stride)
268{
269
270 desc->read_addr_lo = lower_32_bits(src);
271 desc->write_addr_lo = lower_32_bits(dst);
272
273
274 desc->read_addr_hi = upper_32_bits(src);
275 desc->write_addr_hi = upper_32_bits(dst);
276
277 desc->len = len;
278 desc->stride = stride;
279 desc->burst_seq_num = 0;
280
281
282
283
284
285 desc->control = MSGDMA_DESC_CTL_TR_ERR_IRQ | MSGDMA_DESC_CTL_GO |
286 MSGDMA_DESC_CTL_END_ON_LEN;
287}
288
289
290
291
292
293static void msgdma_desc_config_eod(struct msgdma_extended_desc *desc)
294{
295 desc->control |= MSGDMA_DESC_CTL_TR_COMP_IRQ;
296}
297
298
299
300
301
302
303
304static dma_cookie_t msgdma_tx_submit(struct dma_async_tx_descriptor *tx)
305{
306 struct msgdma_device *mdev = to_mdev(tx->chan);
307 struct msgdma_sw_desc *new;
308 dma_cookie_t cookie;
309 unsigned long flags;
310
311 new = tx_to_desc(tx);
312 spin_lock_irqsave(&mdev->lock, flags);
313 cookie = dma_cookie_assign(tx);
314
315 list_add_tail(&new->node, &mdev->pending_list);
316 spin_unlock_irqrestore(&mdev->lock, flags);
317
318 return cookie;
319}
320
321
322
323
324
325
326
327
328
329
330
331static struct dma_async_tx_descriptor *
332msgdma_prep_memcpy(struct dma_chan *dchan, dma_addr_t dma_dst,
333 dma_addr_t dma_src, size_t len, ulong flags)
334{
335 struct msgdma_device *mdev = to_mdev(dchan);
336 struct msgdma_sw_desc *new, *first = NULL;
337 struct msgdma_extended_desc *desc;
338 size_t copy;
339 u32 desc_cnt;
340 unsigned long irqflags;
341
342 desc_cnt = DIV_ROUND_UP(len, MSGDMA_MAX_TRANS_LEN);
343
344 spin_lock_irqsave(&mdev->lock, irqflags);
345 if (desc_cnt > mdev->desc_free_cnt) {
346 spin_unlock_irqrestore(&mdev->lock, irqflags);
347 dev_dbg(mdev->dev, "mdev %p descs are not available\n", mdev);
348 return NULL;
349 }
350 mdev->desc_free_cnt -= desc_cnt;
351 spin_unlock_irqrestore(&mdev->lock, irqflags);
352
353 do {
354
355 new = msgdma_get_descriptor(mdev);
356
357 copy = min_t(size_t, len, MSGDMA_MAX_TRANS_LEN);
358 desc = &new->hw_desc;
359 msgdma_desc_config(desc, dma_dst, dma_src, copy,
360 MSGDMA_DESC_STRIDE_RW);
361 len -= copy;
362 dma_src += copy;
363 dma_dst += copy;
364 if (!first)
365 first = new;
366 else
367 list_add_tail(&new->node, &first->tx_list);
368 } while (len);
369
370 msgdma_desc_config_eod(desc);
371 async_tx_ack(&first->async_tx);
372 first->async_tx.flags = flags;
373
374 return &first->async_tx;
375}
376
377
378
379
380
381
382
383
384
385
386
387static struct dma_async_tx_descriptor *
388msgdma_prep_slave_sg(struct dma_chan *dchan, struct scatterlist *sgl,
389 unsigned int sg_len, enum dma_transfer_direction dir,
390 unsigned long flags, void *context)
391
392{
393 struct msgdma_device *mdev = to_mdev(dchan);
394 struct dma_slave_config *cfg = &mdev->slave_cfg;
395 struct msgdma_sw_desc *new, *first = NULL;
396 void *desc = NULL;
397 size_t len, avail;
398 dma_addr_t dma_dst, dma_src;
399 u32 desc_cnt = 0, i;
400 struct scatterlist *sg;
401 u32 stride;
402 unsigned long irqflags;
403
404 for_each_sg(sgl, sg, sg_len, i)
405 desc_cnt += DIV_ROUND_UP(sg_dma_len(sg), MSGDMA_MAX_TRANS_LEN);
406
407 spin_lock_irqsave(&mdev->lock, irqflags);
408 if (desc_cnt > mdev->desc_free_cnt) {
409 spin_unlock_irqrestore(&mdev->lock, irqflags);
410 dev_dbg(mdev->dev, "mdev %p descs are not available\n", mdev);
411 return NULL;
412 }
413 mdev->desc_free_cnt -= desc_cnt;
414 spin_unlock_irqrestore(&mdev->lock, irqflags);
415
416 avail = sg_dma_len(sgl);
417
418
419 while (true) {
420
421 new = msgdma_get_descriptor(mdev);
422
423 desc = &new->hw_desc;
424 len = min_t(size_t, avail, MSGDMA_MAX_TRANS_LEN);
425
426 if (dir == DMA_MEM_TO_DEV) {
427 dma_src = sg_dma_address(sgl) + sg_dma_len(sgl) - avail;
428 dma_dst = cfg->dst_addr;
429 stride = MSGDMA_DESC_STRIDE_RD;
430 } else {
431 dma_src = cfg->src_addr;
432 dma_dst = sg_dma_address(sgl) + sg_dma_len(sgl) - avail;
433 stride = MSGDMA_DESC_STRIDE_WR;
434 }
435 msgdma_desc_config(desc, dma_dst, dma_src, len, stride);
436 avail -= len;
437
438 if (!first)
439 first = new;
440 else
441 list_add_tail(&new->node, &first->tx_list);
442
443
444 if (avail == 0) {
445 if (sg_len == 0)
446 break;
447 sgl = sg_next(sgl);
448 if (sgl == NULL)
449 break;
450 sg_len--;
451 avail = sg_dma_len(sgl);
452 }
453 }
454
455 msgdma_desc_config_eod(desc);
456 first->async_tx.flags = flags;
457
458 return &first->async_tx;
459}
460
461static int msgdma_dma_config(struct dma_chan *dchan,
462 struct dma_slave_config *config)
463{
464 struct msgdma_device *mdev = to_mdev(dchan);
465
466 memcpy(&mdev->slave_cfg, config, sizeof(*config));
467
468 return 0;
469}
470
471static void msgdma_reset(struct msgdma_device *mdev)
472{
473 u32 val;
474 int ret;
475
476
477 iowrite32(MSGDMA_CSR_STAT_MASK, mdev->csr + MSGDMA_CSR_STATUS);
478 iowrite32(MSGDMA_CSR_CTL_RESET, mdev->csr + MSGDMA_CSR_CONTROL);
479
480 ret = readl_poll_timeout(mdev->csr + MSGDMA_CSR_STATUS, val,
481 (val & MSGDMA_CSR_STAT_RESETTING) == 0,
482 1, 10000);
483 if (ret)
484 dev_err(mdev->dev, "DMA channel did not reset\n");
485
486
487 iowrite32(MSGDMA_CSR_STAT_MASK, mdev->csr + MSGDMA_CSR_STATUS);
488
489
490 iowrite32(MSGDMA_CSR_CTL_STOP_ON_ERR | MSGDMA_CSR_CTL_STOP_ON_EARLY |
491 MSGDMA_CSR_CTL_GLOBAL_INTR, mdev->csr + MSGDMA_CSR_CONTROL);
492
493 mdev->idle = true;
494};
495
496static void msgdma_copy_one(struct msgdma_device *mdev,
497 struct msgdma_sw_desc *desc)
498{
499 void __iomem *hw_desc = mdev->desc;
500
501
502
503
504
505 while (ioread32(mdev->csr + MSGDMA_CSR_STATUS) &
506 MSGDMA_CSR_STAT_DESC_BUF_FULL)
507 mdelay(1);
508
509
510
511
512
513
514
515
516
517
518 memcpy((void __force *)hw_desc, &desc->hw_desc,
519 sizeof(desc->hw_desc) - sizeof(u32));
520
521
522 mdev->idle = false;
523 wmb();
524 iowrite32(desc->hw_desc.control, hw_desc +
525 offsetof(struct msgdma_extended_desc, control));
526 wmb();
527}
528
529
530
531
532
533
534static void msgdma_copy_desc_to_fifo(struct msgdma_device *mdev,
535 struct msgdma_sw_desc *desc)
536{
537 struct msgdma_sw_desc *sdesc, *next;
538
539 msgdma_copy_one(mdev, desc);
540
541 list_for_each_entry_safe(sdesc, next, &desc->tx_list, node)
542 msgdma_copy_one(mdev, sdesc);
543}
544
545
546
547
548
549static void msgdma_start_transfer(struct msgdma_device *mdev)
550{
551 struct msgdma_sw_desc *desc;
552
553 if (!mdev->idle)
554 return;
555
556 desc = list_first_entry_or_null(&mdev->pending_list,
557 struct msgdma_sw_desc, node);
558 if (!desc)
559 return;
560
561 list_splice_tail_init(&mdev->pending_list, &mdev->active_list);
562 msgdma_copy_desc_to_fifo(mdev, desc);
563}
564
565
566
567
568
569static void msgdma_issue_pending(struct dma_chan *chan)
570{
571 struct msgdma_device *mdev = to_mdev(chan);
572 unsigned long flags;
573
574 spin_lock_irqsave(&mdev->lock, flags);
575 msgdma_start_transfer(mdev);
576 spin_unlock_irqrestore(&mdev->lock, flags);
577}
578
579
580
581
582
583static void msgdma_chan_desc_cleanup(struct msgdma_device *mdev)
584{
585 struct msgdma_sw_desc *desc, *next;
586
587 list_for_each_entry_safe(desc, next, &mdev->done_list, node) {
588 struct dmaengine_desc_callback cb;
589
590 list_del(&desc->node);
591
592 dmaengine_desc_get_callback(&desc->async_tx, &cb);
593 if (dmaengine_desc_callback_valid(&cb)) {
594 spin_unlock(&mdev->lock);
595 dmaengine_desc_callback_invoke(&cb, NULL);
596 spin_lock(&mdev->lock);
597 }
598
599
600 msgdma_free_descriptor(mdev, desc);
601 }
602}
603
604
605
606
607
608static void msgdma_complete_descriptor(struct msgdma_device *mdev)
609{
610 struct msgdma_sw_desc *desc;
611
612 desc = list_first_entry_or_null(&mdev->active_list,
613 struct msgdma_sw_desc, node);
614 if (!desc)
615 return;
616 list_del(&desc->node);
617 dma_cookie_complete(&desc->async_tx);
618 list_add_tail(&desc->node, &mdev->done_list);
619}
620
621
622
623
624
625static void msgdma_free_descriptors(struct msgdma_device *mdev)
626{
627 msgdma_free_desc_list(mdev, &mdev->active_list);
628 msgdma_free_desc_list(mdev, &mdev->pending_list);
629 msgdma_free_desc_list(mdev, &mdev->done_list);
630}
631
632
633
634
635
636static void msgdma_free_chan_resources(struct dma_chan *dchan)
637{
638 struct msgdma_device *mdev = to_mdev(dchan);
639 unsigned long flags;
640
641 spin_lock_irqsave(&mdev->lock, flags);
642 msgdma_free_descriptors(mdev);
643 spin_unlock_irqrestore(&mdev->lock, flags);
644 kfree(mdev->sw_desq);
645}
646
647
648
649
650
651
652
653static int msgdma_alloc_chan_resources(struct dma_chan *dchan)
654{
655 struct msgdma_device *mdev = to_mdev(dchan);
656 struct msgdma_sw_desc *desc;
657 int i;
658
659 mdev->sw_desq = kcalloc(MSGDMA_DESC_NUM, sizeof(*desc), GFP_NOWAIT);
660 if (!mdev->sw_desq)
661 return -ENOMEM;
662
663 mdev->idle = true;
664 mdev->desc_free_cnt = MSGDMA_DESC_NUM;
665
666 INIT_LIST_HEAD(&mdev->free_list);
667
668 for (i = 0; i < MSGDMA_DESC_NUM; i++) {
669 desc = mdev->sw_desq + i;
670 dma_async_tx_descriptor_init(&desc->async_tx, &mdev->dmachan);
671 desc->async_tx.tx_submit = msgdma_tx_submit;
672 list_add_tail(&desc->node, &mdev->free_list);
673 }
674
675 return MSGDMA_DESC_NUM;
676}
677
678
679
680
681
682static void msgdma_tasklet(struct tasklet_struct *t)
683{
684 struct msgdma_device *mdev = from_tasklet(mdev, t, irq_tasklet);
685 u32 count;
686 u32 __maybe_unused size;
687 u32 __maybe_unused status;
688 unsigned long flags;
689
690 spin_lock_irqsave(&mdev->lock, flags);
691
692 if (mdev->resp) {
693
694 count = ioread32(mdev->csr + MSGDMA_CSR_RESP_FILL_LEVEL);
695 dev_dbg(mdev->dev, "%s (%d): response count=%d\n",
696 __func__, __LINE__, count);
697 } else {
698 count = 1;
699 }
700
701 while (count--) {
702
703
704
705
706
707
708 if (mdev->resp) {
709 size = ioread32(mdev->resp +
710 MSGDMA_RESP_BYTES_TRANSFERRED);
711 status = ioread32(mdev->resp +
712 MSGDMA_RESP_STATUS);
713 }
714
715 msgdma_complete_descriptor(mdev);
716 msgdma_chan_desc_cleanup(mdev);
717 }
718
719 spin_unlock_irqrestore(&mdev->lock, flags);
720}
721
722
723
724
725
726
727
728
729static irqreturn_t msgdma_irq_handler(int irq, void *data)
730{
731 struct msgdma_device *mdev = data;
732 u32 status;
733
734 status = ioread32(mdev->csr + MSGDMA_CSR_STATUS);
735 if ((status & MSGDMA_CSR_STAT_BUSY) == 0) {
736
737 spin_lock(&mdev->lock);
738 mdev->idle = true;
739 msgdma_start_transfer(mdev);
740 spin_unlock(&mdev->lock);
741 }
742
743 tasklet_schedule(&mdev->irq_tasklet);
744
745
746 iowrite32(MSGDMA_CSR_STAT_IRQ, mdev->csr + MSGDMA_CSR_STATUS);
747
748 return IRQ_HANDLED;
749}
750
751
752
753
754
755static void msgdma_dev_remove(struct msgdma_device *mdev)
756{
757 if (!mdev)
758 return;
759
760 devm_free_irq(mdev->dev, mdev->irq, mdev);
761 tasklet_kill(&mdev->irq_tasklet);
762 list_del(&mdev->dmachan.device_node);
763}
764
765static int request_and_map(struct platform_device *pdev, const char *name,
766 struct resource **res, void __iomem **ptr,
767 bool optional)
768{
769 struct resource *region;
770 struct device *device = &pdev->dev;
771
772 *res = platform_get_resource_byname(pdev, IORESOURCE_MEM, name);
773 if (*res == NULL) {
774 if (optional) {
775 *ptr = NULL;
776 dev_info(device, "optional resource %s not defined\n",
777 name);
778 return 0;
779 }
780 dev_err(device, "mandatory resource %s not defined\n", name);
781 return -ENODEV;
782 }
783
784 region = devm_request_mem_region(device, (*res)->start,
785 resource_size(*res), dev_name(device));
786 if (region == NULL) {
787 dev_err(device, "unable to request %s\n", name);
788 return -EBUSY;
789 }
790
791 *ptr = devm_ioremap(device, region->start,
792 resource_size(region));
793 if (*ptr == NULL) {
794 dev_err(device, "ioremap of %s failed!", name);
795 return -ENOMEM;
796 }
797
798 return 0;
799}
800
801
802
803
804
805
806
807static int msgdma_probe(struct platform_device *pdev)
808{
809 struct msgdma_device *mdev;
810 struct dma_device *dma_dev;
811 struct resource *dma_res;
812 int ret;
813
814 mdev = devm_kzalloc(&pdev->dev, sizeof(*mdev), GFP_NOWAIT);
815 if (!mdev)
816 return -ENOMEM;
817
818 mdev->dev = &pdev->dev;
819
820
821 ret = request_and_map(pdev, "csr", &dma_res, &mdev->csr, false);
822 if (ret)
823 return ret;
824
825
826 ret = request_and_map(pdev, "desc", &dma_res, &mdev->desc, false);
827 if (ret)
828 return ret;
829
830
831 ret = request_and_map(pdev, "resp", &dma_res, &mdev->resp, true);
832 if (ret)
833 return ret;
834
835 platform_set_drvdata(pdev, mdev);
836
837
838 mdev->irq = platform_get_irq(pdev, 0);
839 if (mdev->irq < 0)
840 return -ENXIO;
841
842 ret = devm_request_irq(&pdev->dev, mdev->irq, msgdma_irq_handler,
843 0, dev_name(&pdev->dev), mdev);
844 if (ret)
845 return ret;
846
847 tasklet_setup(&mdev->irq_tasklet, msgdma_tasklet);
848
849 dma_cookie_init(&mdev->dmachan);
850
851 spin_lock_init(&mdev->lock);
852
853 INIT_LIST_HEAD(&mdev->active_list);
854 INIT_LIST_HEAD(&mdev->pending_list);
855 INIT_LIST_HEAD(&mdev->done_list);
856 INIT_LIST_HEAD(&mdev->free_list);
857
858 dma_dev = &mdev->dmadev;
859
860
861 dma_cap_zero(dma_dev->cap_mask);
862 dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask);
863 dma_cap_set(DMA_SLAVE, dma_dev->cap_mask);
864
865 dma_dev->src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
866 dma_dev->dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
867 dma_dev->directions = BIT(DMA_MEM_TO_DEV) | BIT(DMA_DEV_TO_MEM) |
868 BIT(DMA_MEM_TO_MEM);
869 dma_dev->residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
870
871
872 INIT_LIST_HEAD(&dma_dev->channels);
873
874
875 dma_dev->device_tx_status = dma_cookie_status;
876 dma_dev->device_issue_pending = msgdma_issue_pending;
877 dma_dev->dev = &pdev->dev;
878
879 dma_dev->copy_align = DMAENGINE_ALIGN_4_BYTES;
880 dma_dev->device_prep_dma_memcpy = msgdma_prep_memcpy;
881 dma_dev->device_prep_slave_sg = msgdma_prep_slave_sg;
882 dma_dev->device_config = msgdma_dma_config;
883
884 dma_dev->device_alloc_chan_resources = msgdma_alloc_chan_resources;
885 dma_dev->device_free_chan_resources = msgdma_free_chan_resources;
886
887 mdev->dmachan.device = dma_dev;
888 list_add_tail(&mdev->dmachan.device_node, &dma_dev->channels);
889
890
891 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
892 if (ret) {
893 dev_warn(&pdev->dev, "unable to set coherent mask to 64");
894 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
895 if (ret)
896 goto fail;
897 }
898
899 msgdma_reset(mdev);
900
901 ret = dma_async_device_register(dma_dev);
902 if (ret)
903 goto fail;
904
905 ret = of_dma_controller_register(pdev->dev.of_node,
906 of_dma_xlate_by_chan_id, dma_dev);
907 if (ret == -EINVAL)
908 dev_warn(&pdev->dev, "device was not probed from DT");
909 else if (ret && ret != -ENODEV)
910 goto fail;
911
912 dev_notice(&pdev->dev, "Altera mSGDMA driver probe success\n");
913
914 return 0;
915
916fail:
917 msgdma_dev_remove(mdev);
918
919 return ret;
920}
921
922
923
924
925
926
927
928static int msgdma_remove(struct platform_device *pdev)
929{
930 struct msgdma_device *mdev = platform_get_drvdata(pdev);
931
932 if (pdev->dev.of_node)
933 of_dma_controller_free(pdev->dev.of_node);
934 dma_async_device_unregister(&mdev->dmadev);
935 msgdma_dev_remove(mdev);
936
937 dev_notice(&pdev->dev, "Altera mSGDMA driver removed\n");
938
939 return 0;
940}
941
942#ifdef CONFIG_OF
943static const struct of_device_id msgdma_match[] = {
944 { .compatible = "altr,socfpga-msgdma", },
945 { }
946};
947
948MODULE_DEVICE_TABLE(of, msgdma_match);
949#endif
950
951static struct platform_driver msgdma_driver = {
952 .driver = {
953 .name = "altera-msgdma",
954 .of_match_table = of_match_ptr(msgdma_match),
955 },
956 .probe = msgdma_probe,
957 .remove = msgdma_remove,
958};
959
960module_platform_driver(msgdma_driver);
961
962MODULE_ALIAS("platform:altera-msgdma");
963MODULE_DESCRIPTION("Altera mSGDMA driver");
964MODULE_AUTHOR("Stefan Roese <sr@denx.de>");
965MODULE_LICENSE("GPL");
966