1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34#include <linux/bitops.h>
35#include <linux/dmapool.h>
36#include <linux/dma/xilinx_dma.h>
37#include <linux/init.h>
38#include <linux/interrupt.h>
39#include <linux/io.h>
40#include <linux/iopoll.h>
41#include <linux/module.h>
42#include <linux/of_address.h>
43#include <linux/of_dma.h>
44#include <linux/of_platform.h>
45#include <linux/of_irq.h>
46#include <linux/slab.h>
47#include <linux/clk.h>
48#include <linux/io-64-nonatomic-lo-hi.h>
49
50#include "../dmaengine.h"
51
52
53#define XILINX_DMA_MM2S_CTRL_OFFSET 0x0000
54#define XILINX_DMA_S2MM_CTRL_OFFSET 0x0030
55#define XILINX_VDMA_MM2S_DESC_OFFSET 0x0050
56#define XILINX_VDMA_S2MM_DESC_OFFSET 0x00a0
57
58
59#define XILINX_DMA_REG_DMACR 0x0000
60#define XILINX_DMA_DMACR_DELAY_MAX 0xff
61#define XILINX_DMA_DMACR_DELAY_SHIFT 24
62#define XILINX_DMA_DMACR_FRAME_COUNT_MAX 0xff
63#define XILINX_DMA_DMACR_FRAME_COUNT_SHIFT 16
64#define XILINX_DMA_DMACR_ERR_IRQ BIT(14)
65#define XILINX_DMA_DMACR_DLY_CNT_IRQ BIT(13)
66#define XILINX_DMA_DMACR_FRM_CNT_IRQ BIT(12)
67#define XILINX_DMA_DMACR_MASTER_SHIFT 8
68#define XILINX_DMA_DMACR_FSYNCSRC_SHIFT 5
69#define XILINX_DMA_DMACR_FRAMECNT_EN BIT(4)
70#define XILINX_DMA_DMACR_GENLOCK_EN BIT(3)
71#define XILINX_DMA_DMACR_RESET BIT(2)
72#define XILINX_DMA_DMACR_CIRC_EN BIT(1)
73#define XILINX_DMA_DMACR_RUNSTOP BIT(0)
74#define XILINX_DMA_DMACR_FSYNCSRC_MASK GENMASK(6, 5)
75#define XILINX_DMA_DMACR_DELAY_MASK GENMASK(31, 24)
76#define XILINX_DMA_DMACR_FRAME_COUNT_MASK GENMASK(23, 16)
77#define XILINX_DMA_DMACR_MASTER_MASK GENMASK(11, 8)
78
79#define XILINX_DMA_REG_DMASR 0x0004
80#define XILINX_DMA_DMASR_EOL_LATE_ERR BIT(15)
81#define XILINX_DMA_DMASR_ERR_IRQ BIT(14)
82#define XILINX_DMA_DMASR_DLY_CNT_IRQ BIT(13)
83#define XILINX_DMA_DMASR_FRM_CNT_IRQ BIT(12)
84#define XILINX_DMA_DMASR_SOF_LATE_ERR BIT(11)
85#define XILINX_DMA_DMASR_SG_DEC_ERR BIT(10)
86#define XILINX_DMA_DMASR_SG_SLV_ERR BIT(9)
87#define XILINX_DMA_DMASR_EOF_EARLY_ERR BIT(8)
88#define XILINX_DMA_DMASR_SOF_EARLY_ERR BIT(7)
89#define XILINX_DMA_DMASR_DMA_DEC_ERR BIT(6)
90#define XILINX_DMA_DMASR_DMA_SLAVE_ERR BIT(5)
91#define XILINX_DMA_DMASR_DMA_INT_ERR BIT(4)
92#define XILINX_DMA_DMASR_IDLE BIT(1)
93#define XILINX_DMA_DMASR_HALTED BIT(0)
94#define XILINX_DMA_DMASR_DELAY_MASK GENMASK(31, 24)
95#define XILINX_DMA_DMASR_FRAME_COUNT_MASK GENMASK(23, 16)
96
97#define XILINX_DMA_REG_CURDESC 0x0008
98#define XILINX_DMA_REG_TAILDESC 0x0010
99#define XILINX_DMA_REG_REG_INDEX 0x0014
100#define XILINX_DMA_REG_FRMSTORE 0x0018
101#define XILINX_DMA_REG_THRESHOLD 0x001c
102#define XILINX_DMA_REG_FRMPTR_STS 0x0024
103#define XILINX_DMA_REG_PARK_PTR 0x0028
104#define XILINX_DMA_PARK_PTR_WR_REF_SHIFT 8
105#define XILINX_DMA_PARK_PTR_WR_REF_MASK GENMASK(12, 8)
106#define XILINX_DMA_PARK_PTR_RD_REF_SHIFT 0
107#define XILINX_DMA_PARK_PTR_RD_REF_MASK GENMASK(4, 0)
108#define XILINX_DMA_REG_VDMA_VERSION 0x002c
109
110
111#define XILINX_DMA_REG_VSIZE 0x0000
112#define XILINX_DMA_REG_HSIZE 0x0004
113
114#define XILINX_DMA_REG_FRMDLY_STRIDE 0x0008
115#define XILINX_DMA_FRMDLY_STRIDE_FRMDLY_SHIFT 24
116#define XILINX_DMA_FRMDLY_STRIDE_STRIDE_SHIFT 0
117
118#define XILINX_VDMA_REG_START_ADDRESS(n) (0x000c + 4 * (n))
119#define XILINX_VDMA_REG_START_ADDRESS_64(n) (0x000c + 8 * (n))
120
121#define XILINX_VDMA_REG_ENABLE_VERTICAL_FLIP 0x00ec
122#define XILINX_VDMA_ENABLE_VERTICAL_FLIP BIT(0)
123
124
125#define XILINX_DMA_MAX_CHANS_PER_DEVICE 0x2
126
127#define XILINX_DMA_DMAXR_ALL_IRQ_MASK \
128 (XILINX_DMA_DMASR_FRM_CNT_IRQ | \
129 XILINX_DMA_DMASR_DLY_CNT_IRQ | \
130 XILINX_DMA_DMASR_ERR_IRQ)
131
132#define XILINX_DMA_DMASR_ALL_ERR_MASK \
133 (XILINX_DMA_DMASR_EOL_LATE_ERR | \
134 XILINX_DMA_DMASR_SOF_LATE_ERR | \
135 XILINX_DMA_DMASR_SG_DEC_ERR | \
136 XILINX_DMA_DMASR_SG_SLV_ERR | \
137 XILINX_DMA_DMASR_EOF_EARLY_ERR | \
138 XILINX_DMA_DMASR_SOF_EARLY_ERR | \
139 XILINX_DMA_DMASR_DMA_DEC_ERR | \
140 XILINX_DMA_DMASR_DMA_SLAVE_ERR | \
141 XILINX_DMA_DMASR_DMA_INT_ERR)
142
143
144
145
146
147
148#define XILINX_DMA_DMASR_ERR_RECOVER_MASK \
149 (XILINX_DMA_DMASR_SOF_LATE_ERR | \
150 XILINX_DMA_DMASR_EOF_EARLY_ERR | \
151 XILINX_DMA_DMASR_SOF_EARLY_ERR | \
152 XILINX_DMA_DMASR_DMA_INT_ERR)
153
154
155#define XILINX_DMA_FLUSH_S2MM 3
156#define XILINX_DMA_FLUSH_MM2S 2
157#define XILINX_DMA_FLUSH_BOTH 1
158
159
160#define XILINX_DMA_LOOP_COUNT 1000000
161
162
163#define XILINX_DMA_REG_SRCDSTADDR 0x18
164#define XILINX_DMA_REG_BTT 0x28
165
166
167#define XILINX_DMA_MAX_TRANS_LEN_MIN 8
168#define XILINX_DMA_MAX_TRANS_LEN_MAX 23
169#define XILINX_DMA_V2_MAX_TRANS_LEN_MAX 26
170#define XILINX_DMA_CR_COALESCE_MAX GENMASK(23, 16)
171#define XILINX_DMA_CR_CYCLIC_BD_EN_MASK BIT(4)
172#define XILINX_DMA_CR_COALESCE_SHIFT 16
173#define XILINX_DMA_BD_SOP BIT(27)
174#define XILINX_DMA_BD_EOP BIT(26)
175#define XILINX_DMA_COALESCE_MAX 255
176#define XILINX_DMA_NUM_DESCS 255
177#define XILINX_DMA_NUM_APP_WORDS 5
178
179
180#define XILINX_CDMA_REG_SRCADDR 0x18
181#define XILINX_CDMA_REG_DSTADDR 0x20
182
183
184#define XILINX_CDMA_CR_SGMODE BIT(3)
185
186#define xilinx_prep_dma_addr_t(addr) \
187 ((dma_addr_t)((u64)addr##_##msb << 32 | (addr)))
188
189
190
191
192
193
194
195
196
197
198
199struct xilinx_vdma_desc_hw {
200 u32 next_desc;
201 u32 pad1;
202 u32 buf_addr;
203 u32 buf_addr_msb;
204 u32 vsize;
205 u32 hsize;
206 u32 stride;
207} __aligned(64);
208
209
210
211
212
213
214
215
216
217
218
219
220
221struct xilinx_axidma_desc_hw {
222 u32 next_desc;
223 u32 next_desc_msb;
224 u32 buf_addr;
225 u32 buf_addr_msb;
226 u32 reserved1;
227 u32 reserved2;
228 u32 control;
229 u32 status;
230 u32 app[XILINX_DMA_NUM_APP_WORDS];
231} __aligned(64);
232
233
234
235
236
237
238
239
240
241
242
243
244struct xilinx_cdma_desc_hw {
245 u32 next_desc;
246 u32 next_desc_msb;
247 u32 src_addr;
248 u32 src_addr_msb;
249 u32 dest_addr;
250 u32 dest_addr_msb;
251 u32 control;
252 u32 status;
253} __aligned(64);
254
255
256
257
258
259
260
261struct xilinx_vdma_tx_segment {
262 struct xilinx_vdma_desc_hw hw;
263 struct list_head node;
264 dma_addr_t phys;
265} __aligned(64);
266
267
268
269
270
271
272
273struct xilinx_axidma_tx_segment {
274 struct xilinx_axidma_desc_hw hw;
275 struct list_head node;
276 dma_addr_t phys;
277} __aligned(64);
278
279
280
281
282
283
284
285struct xilinx_cdma_tx_segment {
286 struct xilinx_cdma_desc_hw hw;
287 struct list_head node;
288 dma_addr_t phys;
289} __aligned(64);
290
291
292
293
294
295
296
297
298struct xilinx_dma_tx_descriptor {
299 struct dma_async_tx_descriptor async_tx;
300 struct list_head segments;
301 struct list_head node;
302 bool cyclic;
303};
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342struct xilinx_dma_chan {
343 struct xilinx_dma_device *xdev;
344 u32 ctrl_offset;
345 u32 desc_offset;
346 spinlock_t lock;
347 struct list_head pending_list;
348 struct list_head active_list;
349 struct list_head done_list;
350 struct list_head free_seg_list;
351 struct dma_chan common;
352 struct dma_pool *desc_pool;
353 struct device *dev;
354 int irq;
355 int id;
356 enum dma_transfer_direction direction;
357 int num_frms;
358 bool has_sg;
359 bool cyclic;
360 bool genlock;
361 bool err;
362 bool idle;
363 struct tasklet_struct tasklet;
364 struct xilinx_vdma_config config;
365 bool flush_on_fsync;
366 u32 desc_pendingcount;
367 bool ext_addr;
368 u32 desc_submitcount;
369 u32 residue;
370 struct xilinx_axidma_tx_segment *seg_v;
371 dma_addr_t seg_p;
372 struct xilinx_axidma_tx_segment *cyclic_seg_v;
373 dma_addr_t cyclic_seg_p;
374 void (*start_transfer)(struct xilinx_dma_chan *chan);
375 int (*stop_transfer)(struct xilinx_dma_chan *chan);
376 bool has_vflip;
377};
378
379
380
381
382
383
384
385
386
387enum xdma_ip_type {
388 XDMA_TYPE_AXIDMA = 0,
389 XDMA_TYPE_CDMA,
390 XDMA_TYPE_VDMA,
391};
392
393struct xilinx_dma_config {
394 enum xdma_ip_type dmatype;
395 int (*clk_init)(struct platform_device *pdev, struct clk **axi_clk,
396 struct clk **tx_clk, struct clk **txs_clk,
397 struct clk **rx_clk, struct clk **rxs_clk);
398};
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420struct xilinx_dma_device {
421 void __iomem *regs;
422 struct device *dev;
423 struct dma_device common;
424 struct xilinx_dma_chan *chan[XILINX_DMA_MAX_CHANS_PER_DEVICE];
425 bool has_sg;
426 u32 flush_on_fsync;
427 bool ext_addr;
428 struct platform_device *pdev;
429 const struct xilinx_dma_config *dma_config;
430 struct clk *axi_clk;
431 struct clk *tx_clk;
432 struct clk *txs_clk;
433 struct clk *rx_clk;
434 struct clk *rxs_clk;
435 u32 nr_channels;
436 u32 chan_id;
437 u32 max_buffer_len;
438};
439
440
441#define to_xilinx_chan(chan) \
442 container_of(chan, struct xilinx_dma_chan, common)
443#define to_dma_tx_descriptor(tx) \
444 container_of(tx, struct xilinx_dma_tx_descriptor, async_tx)
445#define xilinx_dma_poll_timeout(chan, reg, val, cond, delay_us, timeout_us) \
446 readl_poll_timeout(chan->xdev->regs + chan->ctrl_offset + reg, val, \
447 cond, delay_us, timeout_us)
448
449
450static inline u32 dma_read(struct xilinx_dma_chan *chan, u32 reg)
451{
452 return ioread32(chan->xdev->regs + reg);
453}
454
455static inline void dma_write(struct xilinx_dma_chan *chan, u32 reg, u32 value)
456{
457 iowrite32(value, chan->xdev->regs + reg);
458}
459
460static inline void vdma_desc_write(struct xilinx_dma_chan *chan, u32 reg,
461 u32 value)
462{
463 dma_write(chan, chan->desc_offset + reg, value);
464}
465
466static inline u32 dma_ctrl_read(struct xilinx_dma_chan *chan, u32 reg)
467{
468 return dma_read(chan, chan->ctrl_offset + reg);
469}
470
471static inline void dma_ctrl_write(struct xilinx_dma_chan *chan, u32 reg,
472 u32 value)
473{
474 dma_write(chan, chan->ctrl_offset + reg, value);
475}
476
477static inline void dma_ctrl_clr(struct xilinx_dma_chan *chan, u32 reg,
478 u32 clr)
479{
480 dma_ctrl_write(chan, reg, dma_ctrl_read(chan, reg) & ~clr);
481}
482
483static inline void dma_ctrl_set(struct xilinx_dma_chan *chan, u32 reg,
484 u32 set)
485{
486 dma_ctrl_write(chan, reg, dma_ctrl_read(chan, reg) | set);
487}
488
489
490
491
492
493
494
495
496
497
498
499
500static inline void vdma_desc_write_64(struct xilinx_dma_chan *chan, u32 reg,
501 u32 value_lsb, u32 value_msb)
502{
503
504 writel(value_lsb, chan->xdev->regs + chan->desc_offset + reg);
505
506
507 writel(value_msb, chan->xdev->regs + chan->desc_offset + reg + 4);
508}
509
510static inline void dma_writeq(struct xilinx_dma_chan *chan, u32 reg, u64 value)
511{
512 lo_hi_writeq(value, chan->xdev->regs + chan->ctrl_offset + reg);
513}
514
515static inline void xilinx_write(struct xilinx_dma_chan *chan, u32 reg,
516 dma_addr_t addr)
517{
518 if (chan->ext_addr)
519 dma_writeq(chan, reg, addr);
520 else
521 dma_ctrl_write(chan, reg, addr);
522}
523
524static inline void xilinx_axidma_buf(struct xilinx_dma_chan *chan,
525 struct xilinx_axidma_desc_hw *hw,
526 dma_addr_t buf_addr, size_t sg_used,
527 size_t period_len)
528{
529 if (chan->ext_addr) {
530 hw->buf_addr = lower_32_bits(buf_addr + sg_used + period_len);
531 hw->buf_addr_msb = upper_32_bits(buf_addr + sg_used +
532 period_len);
533 } else {
534 hw->buf_addr = buf_addr + sg_used + period_len;
535 }
536}
537
538
539
540
541
542
543
544
545
546
547
548static struct xilinx_vdma_tx_segment *
549xilinx_vdma_alloc_tx_segment(struct xilinx_dma_chan *chan)
550{
551 struct xilinx_vdma_tx_segment *segment;
552 dma_addr_t phys;
553
554 segment = dma_pool_zalloc(chan->desc_pool, GFP_ATOMIC, &phys);
555 if (!segment)
556 return NULL;
557
558 segment->phys = phys;
559
560 return segment;
561}
562
563
564
565
566
567
568
569static struct xilinx_cdma_tx_segment *
570xilinx_cdma_alloc_tx_segment(struct xilinx_dma_chan *chan)
571{
572 struct xilinx_cdma_tx_segment *segment;
573 dma_addr_t phys;
574
575 segment = dma_pool_zalloc(chan->desc_pool, GFP_ATOMIC, &phys);
576 if (!segment)
577 return NULL;
578
579 segment->phys = phys;
580
581 return segment;
582}
583
584
585
586
587
588
589
590static struct xilinx_axidma_tx_segment *
591xilinx_axidma_alloc_tx_segment(struct xilinx_dma_chan *chan)
592{
593 struct xilinx_axidma_tx_segment *segment = NULL;
594 unsigned long flags;
595
596 spin_lock_irqsave(&chan->lock, flags);
597 if (!list_empty(&chan->free_seg_list)) {
598 segment = list_first_entry(&chan->free_seg_list,
599 struct xilinx_axidma_tx_segment,
600 node);
601 list_del(&segment->node);
602 }
603 spin_unlock_irqrestore(&chan->lock, flags);
604
605 return segment;
606}
607
608static void xilinx_dma_clean_hw_desc(struct xilinx_axidma_desc_hw *hw)
609{
610 u32 next_desc = hw->next_desc;
611 u32 next_desc_msb = hw->next_desc_msb;
612
613 memset(hw, 0, sizeof(struct xilinx_axidma_desc_hw));
614
615 hw->next_desc = next_desc;
616 hw->next_desc_msb = next_desc_msb;
617}
618
619
620
621
622
623
624static void xilinx_dma_free_tx_segment(struct xilinx_dma_chan *chan,
625 struct xilinx_axidma_tx_segment *segment)
626{
627 xilinx_dma_clean_hw_desc(&segment->hw);
628
629 list_add_tail(&segment->node, &chan->free_seg_list);
630}
631
632
633
634
635
636
637static void xilinx_cdma_free_tx_segment(struct xilinx_dma_chan *chan,
638 struct xilinx_cdma_tx_segment *segment)
639{
640 dma_pool_free(chan->desc_pool, segment, segment->phys);
641}
642
643
644
645
646
647
648static void xilinx_vdma_free_tx_segment(struct xilinx_dma_chan *chan,
649 struct xilinx_vdma_tx_segment *segment)
650{
651 dma_pool_free(chan->desc_pool, segment, segment->phys);
652}
653
654
655
656
657
658
659
660static struct xilinx_dma_tx_descriptor *
661xilinx_dma_alloc_tx_descriptor(struct xilinx_dma_chan *chan)
662{
663 struct xilinx_dma_tx_descriptor *desc;
664
665 desc = kzalloc(sizeof(*desc), GFP_KERNEL);
666 if (!desc)
667 return NULL;
668
669 INIT_LIST_HEAD(&desc->segments);
670
671 return desc;
672}
673
674
675
676
677
678
679static void
680xilinx_dma_free_tx_descriptor(struct xilinx_dma_chan *chan,
681 struct xilinx_dma_tx_descriptor *desc)
682{
683 struct xilinx_vdma_tx_segment *segment, *next;
684 struct xilinx_cdma_tx_segment *cdma_segment, *cdma_next;
685 struct xilinx_axidma_tx_segment *axidma_segment, *axidma_next;
686
687 if (!desc)
688 return;
689
690 if (chan->xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
691 list_for_each_entry_safe(segment, next, &desc->segments, node) {
692 list_del(&segment->node);
693 xilinx_vdma_free_tx_segment(chan, segment);
694 }
695 } else if (chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) {
696 list_for_each_entry_safe(cdma_segment, cdma_next,
697 &desc->segments, node) {
698 list_del(&cdma_segment->node);
699 xilinx_cdma_free_tx_segment(chan, cdma_segment);
700 }
701 } else {
702 list_for_each_entry_safe(axidma_segment, axidma_next,
703 &desc->segments, node) {
704 list_del(&axidma_segment->node);
705 xilinx_dma_free_tx_segment(chan, axidma_segment);
706 }
707 }
708
709 kfree(desc);
710}
711
712
713
714
715
716
717
718
719static void xilinx_dma_free_desc_list(struct xilinx_dma_chan *chan,
720 struct list_head *list)
721{
722 struct xilinx_dma_tx_descriptor *desc, *next;
723
724 list_for_each_entry_safe(desc, next, list, node) {
725 list_del(&desc->node);
726 xilinx_dma_free_tx_descriptor(chan, desc);
727 }
728}
729
730
731
732
733
734static void xilinx_dma_free_descriptors(struct xilinx_dma_chan *chan)
735{
736 unsigned long flags;
737
738 spin_lock_irqsave(&chan->lock, flags);
739
740 xilinx_dma_free_desc_list(chan, &chan->pending_list);
741 xilinx_dma_free_desc_list(chan, &chan->done_list);
742 xilinx_dma_free_desc_list(chan, &chan->active_list);
743
744 spin_unlock_irqrestore(&chan->lock, flags);
745}
746
747
748
749
750
751static void xilinx_dma_free_chan_resources(struct dma_chan *dchan)
752{
753 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
754 unsigned long flags;
755
756 dev_dbg(chan->dev, "Free all channel resources.\n");
757
758 xilinx_dma_free_descriptors(chan);
759
760 if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
761 spin_lock_irqsave(&chan->lock, flags);
762 INIT_LIST_HEAD(&chan->free_seg_list);
763 spin_unlock_irqrestore(&chan->lock, flags);
764
765
766 dma_free_coherent(chan->dev, sizeof(*chan->seg_v) *
767 XILINX_DMA_NUM_DESCS, chan->seg_v,
768 chan->seg_p);
769
770
771 dma_free_coherent(chan->dev, sizeof(*chan->cyclic_seg_v),
772 chan->cyclic_seg_v, chan->cyclic_seg_p);
773 }
774
775 if (chan->xdev->dma_config->dmatype != XDMA_TYPE_AXIDMA) {
776 dma_pool_destroy(chan->desc_pool);
777 chan->desc_pool = NULL;
778 }
779}
780
781
782
783
784
785
786
787static void xilinx_dma_chan_handle_cyclic(struct xilinx_dma_chan *chan,
788 struct xilinx_dma_tx_descriptor *desc,
789 unsigned long *flags)
790{
791 dma_async_tx_callback callback;
792 void *callback_param;
793
794 callback = desc->async_tx.callback;
795 callback_param = desc->async_tx.callback_param;
796 if (callback) {
797 spin_unlock_irqrestore(&chan->lock, *flags);
798 callback(callback_param);
799 spin_lock_irqsave(&chan->lock, *flags);
800 }
801}
802
803
804
805
806
807static void xilinx_dma_chan_desc_cleanup(struct xilinx_dma_chan *chan)
808{
809 struct xilinx_dma_tx_descriptor *desc, *next;
810 unsigned long flags;
811
812 spin_lock_irqsave(&chan->lock, flags);
813
814 list_for_each_entry_safe(desc, next, &chan->done_list, node) {
815 struct dmaengine_desc_callback cb;
816
817 if (desc->cyclic) {
818 xilinx_dma_chan_handle_cyclic(chan, desc, &flags);
819 break;
820 }
821
822
823 list_del(&desc->node);
824
825
826 dmaengine_desc_get_callback(&desc->async_tx, &cb);
827 if (dmaengine_desc_callback_valid(&cb)) {
828 spin_unlock_irqrestore(&chan->lock, flags);
829 dmaengine_desc_callback_invoke(&cb, NULL);
830 spin_lock_irqsave(&chan->lock, flags);
831 }
832
833
834 dma_run_dependencies(&desc->async_tx);
835 xilinx_dma_free_tx_descriptor(chan, desc);
836 }
837
838 spin_unlock_irqrestore(&chan->lock, flags);
839}
840
841
842
843
844
845static void xilinx_dma_do_tasklet(unsigned long data)
846{
847 struct xilinx_dma_chan *chan = (struct xilinx_dma_chan *)data;
848
849 xilinx_dma_chan_desc_cleanup(chan);
850}
851
852
853
854
855
856
857
858static int xilinx_dma_alloc_chan_resources(struct dma_chan *dchan)
859{
860 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
861 int i;
862
863
864 if (chan->desc_pool)
865 return 0;
866
867
868
869
870
871 if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
872
873 chan->seg_v = dma_zalloc_coherent(chan->dev,
874 sizeof(*chan->seg_v) *
875 XILINX_DMA_NUM_DESCS,
876 &chan->seg_p, GFP_KERNEL);
877 if (!chan->seg_v) {
878 dev_err(chan->dev,
879 "unable to allocate channel %d descriptors\n",
880 chan->id);
881 return -ENOMEM;
882 }
883
884
885
886
887
888
889 chan->cyclic_seg_v = dma_zalloc_coherent(chan->dev,
890 sizeof(*chan->cyclic_seg_v),
891 &chan->cyclic_seg_p, GFP_KERNEL);
892 if (!chan->cyclic_seg_v) {
893 dev_err(chan->dev,
894 "unable to allocate desc segment for cyclic DMA\n");
895 dma_free_coherent(chan->dev, sizeof(*chan->seg_v) *
896 XILINX_DMA_NUM_DESCS, chan->seg_v,
897 chan->seg_p);
898 return -ENOMEM;
899 }
900 chan->cyclic_seg_v->phys = chan->cyclic_seg_p;
901
902 for (i = 0; i < XILINX_DMA_NUM_DESCS; i++) {
903 chan->seg_v[i].hw.next_desc =
904 lower_32_bits(chan->seg_p + sizeof(*chan->seg_v) *
905 ((i + 1) % XILINX_DMA_NUM_DESCS));
906 chan->seg_v[i].hw.next_desc_msb =
907 upper_32_bits(chan->seg_p + sizeof(*chan->seg_v) *
908 ((i + 1) % XILINX_DMA_NUM_DESCS));
909 chan->seg_v[i].phys = chan->seg_p +
910 sizeof(*chan->seg_v) * i;
911 list_add_tail(&chan->seg_v[i].node,
912 &chan->free_seg_list);
913 }
914 } else if (chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) {
915 chan->desc_pool = dma_pool_create("xilinx_cdma_desc_pool",
916 chan->dev,
917 sizeof(struct xilinx_cdma_tx_segment),
918 __alignof__(struct xilinx_cdma_tx_segment),
919 0);
920 } else {
921 chan->desc_pool = dma_pool_create("xilinx_vdma_desc_pool",
922 chan->dev,
923 sizeof(struct xilinx_vdma_tx_segment),
924 __alignof__(struct xilinx_vdma_tx_segment),
925 0);
926 }
927
928 if (!chan->desc_pool &&
929 (chan->xdev->dma_config->dmatype != XDMA_TYPE_AXIDMA)) {
930 dev_err(chan->dev,
931 "unable to allocate channel %d descriptor pool\n",
932 chan->id);
933 return -ENOMEM;
934 }
935
936 dma_cookie_init(dchan);
937
938 if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
939
940
941
942 dma_ctrl_set(chan, XILINX_DMA_REG_DMACR,
943 XILINX_DMA_DMAXR_ALL_IRQ_MASK);
944 }
945
946 if ((chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) && chan->has_sg)
947 dma_ctrl_set(chan, XILINX_DMA_REG_DMACR,
948 XILINX_CDMA_CR_SGMODE);
949
950 return 0;
951}
952
953
954
955
956
957
958
959
960
961static enum dma_status xilinx_dma_tx_status(struct dma_chan *dchan,
962 dma_cookie_t cookie,
963 struct dma_tx_state *txstate)
964{
965 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
966 struct xilinx_dma_tx_descriptor *desc;
967 struct xilinx_axidma_tx_segment *segment;
968 struct xilinx_axidma_desc_hw *hw;
969 enum dma_status ret;
970 unsigned long flags;
971 u32 residue = 0;
972
973 ret = dma_cookie_status(dchan, cookie, txstate);
974 if (ret == DMA_COMPLETE || !txstate)
975 return ret;
976
977 if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
978 spin_lock_irqsave(&chan->lock, flags);
979
980 desc = list_last_entry(&chan->active_list,
981 struct xilinx_dma_tx_descriptor, node);
982 if (chan->has_sg) {
983 list_for_each_entry(segment, &desc->segments, node) {
984 hw = &segment->hw;
985 residue += (hw->control - hw->status) &
986 chan->xdev->max_buffer_len;
987 }
988 }
989 spin_unlock_irqrestore(&chan->lock, flags);
990
991 chan->residue = residue;
992 dma_set_residue(txstate, chan->residue);
993 }
994
995 return ret;
996}
997
998
999
1000
1001
1002
1003
1004static int xilinx_dma_stop_transfer(struct xilinx_dma_chan *chan)
1005{
1006 u32 val;
1007
1008 dma_ctrl_clr(chan, XILINX_DMA_REG_DMACR, XILINX_DMA_DMACR_RUNSTOP);
1009
1010
1011 return xilinx_dma_poll_timeout(chan, XILINX_DMA_REG_DMASR, val,
1012 val & XILINX_DMA_DMASR_HALTED, 0,
1013 XILINX_DMA_LOOP_COUNT);
1014}
1015
1016
1017
1018
1019
1020
1021
1022static int xilinx_cdma_stop_transfer(struct xilinx_dma_chan *chan)
1023{
1024 u32 val;
1025
1026 return xilinx_dma_poll_timeout(chan, XILINX_DMA_REG_DMASR, val,
1027 val & XILINX_DMA_DMASR_IDLE, 0,
1028 XILINX_DMA_LOOP_COUNT);
1029}
1030
1031
1032
1033
1034
1035static void xilinx_dma_start(struct xilinx_dma_chan *chan)
1036{
1037 int err;
1038 u32 val;
1039
1040 dma_ctrl_set(chan, XILINX_DMA_REG_DMACR, XILINX_DMA_DMACR_RUNSTOP);
1041
1042
1043 err = xilinx_dma_poll_timeout(chan, XILINX_DMA_REG_DMASR, val,
1044 !(val & XILINX_DMA_DMASR_HALTED), 0,
1045 XILINX_DMA_LOOP_COUNT);
1046
1047 if (err) {
1048 dev_err(chan->dev, "Cannot start channel %p: %x\n",
1049 chan, dma_ctrl_read(chan, XILINX_DMA_REG_DMASR));
1050
1051 chan->err = true;
1052 }
1053}
1054
1055
1056
1057
1058
1059static void xilinx_vdma_start_transfer(struct xilinx_dma_chan *chan)
1060{
1061 struct xilinx_vdma_config *config = &chan->config;
1062 struct xilinx_dma_tx_descriptor *desc, *tail_desc;
1063 u32 reg, j;
1064 struct xilinx_vdma_tx_segment *tail_segment;
1065
1066
1067 if (chan->err)
1068 return;
1069
1070 if (!chan->idle)
1071 return;
1072
1073 if (list_empty(&chan->pending_list))
1074 return;
1075
1076 desc = list_first_entry(&chan->pending_list,
1077 struct xilinx_dma_tx_descriptor, node);
1078 tail_desc = list_last_entry(&chan->pending_list,
1079 struct xilinx_dma_tx_descriptor, node);
1080
1081 tail_segment = list_last_entry(&tail_desc->segments,
1082 struct xilinx_vdma_tx_segment, node);
1083
1084
1085
1086
1087
1088 if (chan->has_sg)
1089 dma_ctrl_write(chan, XILINX_DMA_REG_CURDESC,
1090 desc->async_tx.phys);
1091
1092
1093 if (chan->has_vflip) {
1094 reg = dma_read(chan, XILINX_VDMA_REG_ENABLE_VERTICAL_FLIP);
1095 reg &= ~XILINX_VDMA_ENABLE_VERTICAL_FLIP;
1096 reg |= config->vflip_en;
1097 dma_write(chan, XILINX_VDMA_REG_ENABLE_VERTICAL_FLIP,
1098 reg);
1099 }
1100
1101 reg = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR);
1102
1103 if (config->frm_cnt_en)
1104 reg |= XILINX_DMA_DMACR_FRAMECNT_EN;
1105 else
1106 reg &= ~XILINX_DMA_DMACR_FRAMECNT_EN;
1107
1108
1109
1110
1111
1112 if (chan->has_sg || !config->park)
1113 reg |= XILINX_DMA_DMACR_CIRC_EN;
1114
1115 if (config->park)
1116 reg &= ~XILINX_DMA_DMACR_CIRC_EN;
1117
1118 dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg);
1119
1120 j = chan->desc_submitcount;
1121 reg = dma_read(chan, XILINX_DMA_REG_PARK_PTR);
1122 if (chan->direction == DMA_MEM_TO_DEV) {
1123 reg &= ~XILINX_DMA_PARK_PTR_RD_REF_MASK;
1124 reg |= j << XILINX_DMA_PARK_PTR_RD_REF_SHIFT;
1125 } else {
1126 reg &= ~XILINX_DMA_PARK_PTR_WR_REF_MASK;
1127 reg |= j << XILINX_DMA_PARK_PTR_WR_REF_SHIFT;
1128 }
1129 dma_write(chan, XILINX_DMA_REG_PARK_PTR, reg);
1130
1131
1132 xilinx_dma_start(chan);
1133
1134 if (chan->err)
1135 return;
1136
1137
1138 if (chan->has_sg) {
1139 dma_ctrl_write(chan, XILINX_DMA_REG_TAILDESC,
1140 tail_segment->phys);
1141 list_splice_tail_init(&chan->pending_list, &chan->active_list);
1142 chan->desc_pendingcount = 0;
1143 } else {
1144 struct xilinx_vdma_tx_segment *segment, *last = NULL;
1145 int i = 0;
1146
1147 if (chan->desc_submitcount < chan->num_frms)
1148 i = chan->desc_submitcount;
1149
1150 list_for_each_entry(segment, &desc->segments, node) {
1151 if (chan->ext_addr)
1152 vdma_desc_write_64(chan,
1153 XILINX_VDMA_REG_START_ADDRESS_64(i++),
1154 segment->hw.buf_addr,
1155 segment->hw.buf_addr_msb);
1156 else
1157 vdma_desc_write(chan,
1158 XILINX_VDMA_REG_START_ADDRESS(i++),
1159 segment->hw.buf_addr);
1160
1161 last = segment;
1162 }
1163
1164 if (!last)
1165 return;
1166
1167
1168 vdma_desc_write(chan, XILINX_DMA_REG_HSIZE, last->hw.hsize);
1169 vdma_desc_write(chan, XILINX_DMA_REG_FRMDLY_STRIDE,
1170 last->hw.stride);
1171 vdma_desc_write(chan, XILINX_DMA_REG_VSIZE, last->hw.vsize);
1172
1173 chan->desc_submitcount++;
1174 chan->desc_pendingcount--;
1175 list_del(&desc->node);
1176 list_add_tail(&desc->node, &chan->active_list);
1177 if (chan->desc_submitcount == chan->num_frms)
1178 chan->desc_submitcount = 0;
1179 }
1180
1181 chan->idle = false;
1182}
1183
1184
1185
1186
1187
1188static void xilinx_cdma_start_transfer(struct xilinx_dma_chan *chan)
1189{
1190 struct xilinx_dma_tx_descriptor *head_desc, *tail_desc;
1191 struct xilinx_cdma_tx_segment *tail_segment;
1192 u32 ctrl_reg = dma_read(chan, XILINX_DMA_REG_DMACR);
1193
1194 if (chan->err)
1195 return;
1196
1197 if (!chan->idle)
1198 return;
1199
1200 if (list_empty(&chan->pending_list))
1201 return;
1202
1203 head_desc = list_first_entry(&chan->pending_list,
1204 struct xilinx_dma_tx_descriptor, node);
1205 tail_desc = list_last_entry(&chan->pending_list,
1206 struct xilinx_dma_tx_descriptor, node);
1207 tail_segment = list_last_entry(&tail_desc->segments,
1208 struct xilinx_cdma_tx_segment, node);
1209
1210 if (chan->desc_pendingcount <= XILINX_DMA_COALESCE_MAX) {
1211 ctrl_reg &= ~XILINX_DMA_CR_COALESCE_MAX;
1212 ctrl_reg |= chan->desc_pendingcount <<
1213 XILINX_DMA_CR_COALESCE_SHIFT;
1214 dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, ctrl_reg);
1215 }
1216
1217 if (chan->has_sg) {
1218 dma_ctrl_clr(chan, XILINX_DMA_REG_DMACR,
1219 XILINX_CDMA_CR_SGMODE);
1220
1221 dma_ctrl_set(chan, XILINX_DMA_REG_DMACR,
1222 XILINX_CDMA_CR_SGMODE);
1223
1224 xilinx_write(chan, XILINX_DMA_REG_CURDESC,
1225 head_desc->async_tx.phys);
1226
1227
1228 xilinx_write(chan, XILINX_DMA_REG_TAILDESC,
1229 tail_segment->phys);
1230 } else {
1231
1232 struct xilinx_cdma_tx_segment *segment;
1233 struct xilinx_cdma_desc_hw *hw;
1234
1235 segment = list_first_entry(&head_desc->segments,
1236 struct xilinx_cdma_tx_segment,
1237 node);
1238
1239 hw = &segment->hw;
1240
1241 xilinx_write(chan, XILINX_CDMA_REG_SRCADDR,
1242 xilinx_prep_dma_addr_t(hw->src_addr));
1243 xilinx_write(chan, XILINX_CDMA_REG_DSTADDR,
1244 xilinx_prep_dma_addr_t(hw->dest_addr));
1245
1246
1247 dma_ctrl_write(chan, XILINX_DMA_REG_BTT,
1248 hw->control & chan->xdev->max_buffer_len);
1249 }
1250
1251 list_splice_tail_init(&chan->pending_list, &chan->active_list);
1252 chan->desc_pendingcount = 0;
1253 chan->idle = false;
1254}
1255
1256
1257
1258
1259
1260static void xilinx_dma_start_transfer(struct xilinx_dma_chan *chan)
1261{
1262 struct xilinx_dma_tx_descriptor *head_desc, *tail_desc;
1263 struct xilinx_axidma_tx_segment *tail_segment;
1264 u32 reg;
1265
1266 if (chan->err)
1267 return;
1268
1269 if (!chan->idle)
1270 return;
1271
1272 if (list_empty(&chan->pending_list))
1273 return;
1274
1275 head_desc = list_first_entry(&chan->pending_list,
1276 struct xilinx_dma_tx_descriptor, node);
1277 tail_desc = list_last_entry(&chan->pending_list,
1278 struct xilinx_dma_tx_descriptor, node);
1279 tail_segment = list_last_entry(&tail_desc->segments,
1280 struct xilinx_axidma_tx_segment, node);
1281
1282 reg = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR);
1283
1284 if (chan->desc_pendingcount <= XILINX_DMA_COALESCE_MAX) {
1285 reg &= ~XILINX_DMA_CR_COALESCE_MAX;
1286 reg |= chan->desc_pendingcount <<
1287 XILINX_DMA_CR_COALESCE_SHIFT;
1288 dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg);
1289 }
1290
1291 if (chan->has_sg)
1292 xilinx_write(chan, XILINX_DMA_REG_CURDESC,
1293 head_desc->async_tx.phys);
1294
1295 xilinx_dma_start(chan);
1296
1297 if (chan->err)
1298 return;
1299
1300
1301 if (chan->has_sg) {
1302 if (chan->cyclic)
1303 xilinx_write(chan, XILINX_DMA_REG_TAILDESC,
1304 chan->cyclic_seg_v->phys);
1305 else
1306 xilinx_write(chan, XILINX_DMA_REG_TAILDESC,
1307 tail_segment->phys);
1308 } else {
1309 struct xilinx_axidma_tx_segment *segment;
1310 struct xilinx_axidma_desc_hw *hw;
1311
1312 segment = list_first_entry(&head_desc->segments,
1313 struct xilinx_axidma_tx_segment,
1314 node);
1315 hw = &segment->hw;
1316
1317 xilinx_write(chan, XILINX_DMA_REG_SRCDSTADDR,
1318 xilinx_prep_dma_addr_t(hw->buf_addr));
1319
1320
1321 dma_ctrl_write(chan, XILINX_DMA_REG_BTT,
1322 hw->control & chan->xdev->max_buffer_len);
1323 }
1324
1325 list_splice_tail_init(&chan->pending_list, &chan->active_list);
1326 chan->desc_pendingcount = 0;
1327 chan->idle = false;
1328}
1329
1330
1331
1332
1333
1334static void xilinx_dma_issue_pending(struct dma_chan *dchan)
1335{
1336 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
1337 unsigned long flags;
1338
1339 spin_lock_irqsave(&chan->lock, flags);
1340 chan->start_transfer(chan);
1341 spin_unlock_irqrestore(&chan->lock, flags);
1342}
1343
1344
1345
1346
1347
1348
1349
1350static void xilinx_dma_complete_descriptor(struct xilinx_dma_chan *chan)
1351{
1352 struct xilinx_dma_tx_descriptor *desc, *next;
1353
1354
1355 if (list_empty(&chan->active_list))
1356 return;
1357
1358 list_for_each_entry_safe(desc, next, &chan->active_list, node) {
1359 list_del(&desc->node);
1360 if (!desc->cyclic)
1361 dma_cookie_complete(&desc->async_tx);
1362 list_add_tail(&desc->node, &chan->done_list);
1363 }
1364}
1365
1366
1367
1368
1369
1370
1371
1372static int xilinx_dma_reset(struct xilinx_dma_chan *chan)
1373{
1374 int err;
1375 u32 tmp;
1376
1377 dma_ctrl_set(chan, XILINX_DMA_REG_DMACR, XILINX_DMA_DMACR_RESET);
1378
1379
1380 err = xilinx_dma_poll_timeout(chan, XILINX_DMA_REG_DMACR, tmp,
1381 !(tmp & XILINX_DMA_DMACR_RESET), 0,
1382 XILINX_DMA_LOOP_COUNT);
1383
1384 if (err) {
1385 dev_err(chan->dev, "reset timeout, cr %x, sr %x\n",
1386 dma_ctrl_read(chan, XILINX_DMA_REG_DMACR),
1387 dma_ctrl_read(chan, XILINX_DMA_REG_DMASR));
1388 return -ETIMEDOUT;
1389 }
1390
1391 chan->err = false;
1392 chan->idle = true;
1393 chan->desc_submitcount = 0;
1394
1395 return err;
1396}
1397
1398
1399
1400
1401
1402
1403
1404static int xilinx_dma_chan_reset(struct xilinx_dma_chan *chan)
1405{
1406 int err;
1407
1408
1409 err = xilinx_dma_reset(chan);
1410 if (err)
1411 return err;
1412
1413
1414 dma_ctrl_set(chan, XILINX_DMA_REG_DMACR,
1415 XILINX_DMA_DMAXR_ALL_IRQ_MASK);
1416
1417 return 0;
1418}
1419
1420
1421
1422
1423
1424
1425
1426
1427static irqreturn_t xilinx_dma_irq_handler(int irq, void *data)
1428{
1429 struct xilinx_dma_chan *chan = data;
1430 u32 status;
1431
1432
1433 status = dma_ctrl_read(chan, XILINX_DMA_REG_DMASR);
1434 if (!(status & XILINX_DMA_DMAXR_ALL_IRQ_MASK))
1435 return IRQ_NONE;
1436
1437 dma_ctrl_write(chan, XILINX_DMA_REG_DMASR,
1438 status & XILINX_DMA_DMAXR_ALL_IRQ_MASK);
1439
1440 if (status & XILINX_DMA_DMASR_ERR_IRQ) {
1441
1442
1443
1444
1445
1446
1447
1448 u32 errors = status & XILINX_DMA_DMASR_ALL_ERR_MASK;
1449
1450 dma_ctrl_write(chan, XILINX_DMA_REG_DMASR,
1451 errors & XILINX_DMA_DMASR_ERR_RECOVER_MASK);
1452
1453 if (!chan->flush_on_fsync ||
1454 (errors & ~XILINX_DMA_DMASR_ERR_RECOVER_MASK)) {
1455 dev_err(chan->dev,
1456 "Channel %p has errors %x, cdr %x tdr %x\n",
1457 chan, errors,
1458 dma_ctrl_read(chan, XILINX_DMA_REG_CURDESC),
1459 dma_ctrl_read(chan, XILINX_DMA_REG_TAILDESC));
1460 chan->err = true;
1461 }
1462 }
1463
1464 if (status & XILINX_DMA_DMASR_DLY_CNT_IRQ) {
1465
1466
1467
1468
1469 dev_dbg(chan->dev, "Inter-packet latency too long\n");
1470 }
1471
1472 if (status & XILINX_DMA_DMASR_FRM_CNT_IRQ) {
1473 spin_lock(&chan->lock);
1474 xilinx_dma_complete_descriptor(chan);
1475 chan->idle = true;
1476 chan->start_transfer(chan);
1477 spin_unlock(&chan->lock);
1478 }
1479
1480 tasklet_schedule(&chan->tasklet);
1481 return IRQ_HANDLED;
1482}
1483
1484
1485
1486
1487
1488
1489static void append_desc_queue(struct xilinx_dma_chan *chan,
1490 struct xilinx_dma_tx_descriptor *desc)
1491{
1492 struct xilinx_vdma_tx_segment *tail_segment;
1493 struct xilinx_dma_tx_descriptor *tail_desc;
1494 struct xilinx_axidma_tx_segment *axidma_tail_segment;
1495 struct xilinx_cdma_tx_segment *cdma_tail_segment;
1496
1497 if (list_empty(&chan->pending_list))
1498 goto append;
1499
1500
1501
1502
1503
1504 tail_desc = list_last_entry(&chan->pending_list,
1505 struct xilinx_dma_tx_descriptor, node);
1506 if (chan->xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
1507 tail_segment = list_last_entry(&tail_desc->segments,
1508 struct xilinx_vdma_tx_segment,
1509 node);
1510 tail_segment->hw.next_desc = (u32)desc->async_tx.phys;
1511 } else if (chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) {
1512 cdma_tail_segment = list_last_entry(&tail_desc->segments,
1513 struct xilinx_cdma_tx_segment,
1514 node);
1515 cdma_tail_segment->hw.next_desc = (u32)desc->async_tx.phys;
1516 } else {
1517 axidma_tail_segment = list_last_entry(&tail_desc->segments,
1518 struct xilinx_axidma_tx_segment,
1519 node);
1520 axidma_tail_segment->hw.next_desc = (u32)desc->async_tx.phys;
1521 }
1522
1523
1524
1525
1526
1527append:
1528 list_add_tail(&desc->node, &chan->pending_list);
1529 chan->desc_pendingcount++;
1530
1531 if (chan->has_sg && (chan->xdev->dma_config->dmatype == XDMA_TYPE_VDMA)
1532 && unlikely(chan->desc_pendingcount > chan->num_frms)) {
1533 dev_dbg(chan->dev, "desc pendingcount is too high\n");
1534 chan->desc_pendingcount = chan->num_frms;
1535 }
1536}
1537
1538
1539
1540
1541
1542
1543
1544static dma_cookie_t xilinx_dma_tx_submit(struct dma_async_tx_descriptor *tx)
1545{
1546 struct xilinx_dma_tx_descriptor *desc = to_dma_tx_descriptor(tx);
1547 struct xilinx_dma_chan *chan = to_xilinx_chan(tx->chan);
1548 dma_cookie_t cookie;
1549 unsigned long flags;
1550 int err;
1551
1552 if (chan->cyclic) {
1553 xilinx_dma_free_tx_descriptor(chan, desc);
1554 return -EBUSY;
1555 }
1556
1557 if (chan->err) {
1558
1559
1560
1561
1562 err = xilinx_dma_chan_reset(chan);
1563 if (err < 0)
1564 return err;
1565 }
1566
1567 spin_lock_irqsave(&chan->lock, flags);
1568
1569 cookie = dma_cookie_assign(tx);
1570
1571
1572 append_desc_queue(chan, desc);
1573
1574 if (desc->cyclic)
1575 chan->cyclic = true;
1576
1577 spin_unlock_irqrestore(&chan->lock, flags);
1578
1579 return cookie;
1580}
1581
1582
1583
1584
1585
1586
1587
1588
1589
1590
1591static struct dma_async_tx_descriptor *
1592xilinx_vdma_dma_prep_interleaved(struct dma_chan *dchan,
1593 struct dma_interleaved_template *xt,
1594 unsigned long flags)
1595{
1596 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
1597 struct xilinx_dma_tx_descriptor *desc;
1598 struct xilinx_vdma_tx_segment *segment;
1599 struct xilinx_vdma_desc_hw *hw;
1600
1601 if (!is_slave_direction(xt->dir))
1602 return NULL;
1603
1604 if (!xt->numf || !xt->sgl[0].size)
1605 return NULL;
1606
1607 if (xt->frame_size != 1)
1608 return NULL;
1609
1610
1611 desc = xilinx_dma_alloc_tx_descriptor(chan);
1612 if (!desc)
1613 return NULL;
1614
1615 dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
1616 desc->async_tx.tx_submit = xilinx_dma_tx_submit;
1617 async_tx_ack(&desc->async_tx);
1618
1619
1620 segment = xilinx_vdma_alloc_tx_segment(chan);
1621 if (!segment)
1622 goto error;
1623
1624
1625 hw = &segment->hw;
1626 hw->vsize = xt->numf;
1627 hw->hsize = xt->sgl[0].size;
1628 hw->stride = (xt->sgl[0].icg + xt->sgl[0].size) <<
1629 XILINX_DMA_FRMDLY_STRIDE_STRIDE_SHIFT;
1630 hw->stride |= chan->config.frm_dly <<
1631 XILINX_DMA_FRMDLY_STRIDE_FRMDLY_SHIFT;
1632
1633 if (xt->dir != DMA_MEM_TO_DEV) {
1634 if (chan->ext_addr) {
1635 hw->buf_addr = lower_32_bits(xt->dst_start);
1636 hw->buf_addr_msb = upper_32_bits(xt->dst_start);
1637 } else {
1638 hw->buf_addr = xt->dst_start;
1639 }
1640 } else {
1641 if (chan->ext_addr) {
1642 hw->buf_addr = lower_32_bits(xt->src_start);
1643 hw->buf_addr_msb = upper_32_bits(xt->src_start);
1644 } else {
1645 hw->buf_addr = xt->src_start;
1646 }
1647 }
1648
1649
1650 list_add_tail(&segment->node, &desc->segments);
1651
1652
1653 segment = list_first_entry(&desc->segments,
1654 struct xilinx_vdma_tx_segment, node);
1655 desc->async_tx.phys = segment->phys;
1656
1657 return &desc->async_tx;
1658
1659error:
1660 xilinx_dma_free_tx_descriptor(chan, desc);
1661 return NULL;
1662}
1663
1664
1665
1666
1667
1668
1669
1670
1671
1672
1673
1674static struct dma_async_tx_descriptor *
1675xilinx_cdma_prep_memcpy(struct dma_chan *dchan, dma_addr_t dma_dst,
1676 dma_addr_t dma_src, size_t len, unsigned long flags)
1677{
1678 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
1679 struct xilinx_dma_tx_descriptor *desc;
1680 struct xilinx_cdma_tx_segment *segment;
1681 struct xilinx_cdma_desc_hw *hw;
1682
1683 if (!len || len > chan->xdev->max_buffer_len)
1684 return NULL;
1685
1686 desc = xilinx_dma_alloc_tx_descriptor(chan);
1687 if (!desc)
1688 return NULL;
1689
1690 dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
1691 desc->async_tx.tx_submit = xilinx_dma_tx_submit;
1692
1693
1694 segment = xilinx_cdma_alloc_tx_segment(chan);
1695 if (!segment)
1696 goto error;
1697
1698 hw = &segment->hw;
1699 hw->control = len;
1700 hw->src_addr = dma_src;
1701 hw->dest_addr = dma_dst;
1702 if (chan->ext_addr) {
1703 hw->src_addr_msb = upper_32_bits(dma_src);
1704 hw->dest_addr_msb = upper_32_bits(dma_dst);
1705 }
1706
1707
1708 list_add_tail(&segment->node, &desc->segments);
1709
1710 desc->async_tx.phys = segment->phys;
1711 hw->next_desc = segment->phys;
1712
1713 return &desc->async_tx;
1714
1715error:
1716 xilinx_dma_free_tx_descriptor(chan, desc);
1717 return NULL;
1718}
1719
1720
1721
1722
1723
1724
1725
1726
1727
1728
1729
1730
1731static struct dma_async_tx_descriptor *xilinx_cdma_prep_sg(
1732 struct dma_chan *dchan, struct scatterlist *dst_sg,
1733 unsigned int dst_sg_len, struct scatterlist *src_sg,
1734 unsigned int src_sg_len, unsigned long flags)
1735{
1736 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
1737 struct xilinx_dma_tx_descriptor *desc;
1738 struct xilinx_cdma_tx_segment *segment, *prev = NULL;
1739 struct xilinx_cdma_desc_hw *hw;
1740 size_t len, dst_avail, src_avail;
1741 dma_addr_t dma_dst, dma_src;
1742
1743 if (unlikely(dst_sg_len == 0 || src_sg_len == 0))
1744 return NULL;
1745
1746 if (unlikely(dst_sg == NULL || src_sg == NULL))
1747 return NULL;
1748
1749 desc = xilinx_dma_alloc_tx_descriptor(chan);
1750 if (!desc)
1751 return NULL;
1752
1753 dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
1754 desc->async_tx.tx_submit = xilinx_dma_tx_submit;
1755
1756 dst_avail = sg_dma_len(dst_sg);
1757 src_avail = sg_dma_len(src_sg);
1758
1759
1760
1761
1762 while (true) {
1763 len = min_t(size_t, src_avail, dst_avail);
1764 len = min_t(size_t, len, chan->xdev->max_buffer_len);
1765 if (len == 0)
1766 goto fetch;
1767
1768
1769 segment = xilinx_cdma_alloc_tx_segment(chan);
1770 if (!segment)
1771 goto error;
1772
1773 dma_dst = sg_dma_address(dst_sg) + sg_dma_len(dst_sg) -
1774 dst_avail;
1775 dma_src = sg_dma_address(src_sg) + sg_dma_len(src_sg) -
1776 src_avail;
1777 hw = &segment->hw;
1778 hw->control = len;
1779 hw->src_addr = dma_src;
1780 hw->dest_addr = dma_dst;
1781 if (chan->ext_addr) {
1782 hw->src_addr_msb = upper_32_bits(dma_src);
1783 hw->dest_addr_msb = upper_32_bits(dma_dst);
1784 }
1785
1786 if (prev)
1787 prev->hw.next_desc = segment->phys;
1788
1789 prev = segment;
1790 dst_avail -= len;
1791 src_avail -= len;
1792 list_add_tail(&segment->node, &desc->segments);
1793
1794fetch:
1795
1796 if (dst_avail == 0) {
1797 if (dst_sg_len == 0)
1798 break;
1799 dst_sg = sg_next(dst_sg);
1800 if (dst_sg == NULL)
1801 break;
1802 dst_sg_len--;
1803 dst_avail = sg_dma_len(dst_sg);
1804 }
1805
1806 if (src_avail == 0) {
1807 if (src_sg_len == 0)
1808 break;
1809 src_sg = sg_next(src_sg);
1810 if (src_sg == NULL)
1811 break;
1812 src_sg_len--;
1813 src_avail = sg_dma_len(src_sg);
1814 }
1815 }
1816
1817
1818 segment = list_first_entry(&desc->segments,
1819 struct xilinx_cdma_tx_segment, node);
1820 desc->async_tx.phys = segment->phys;
1821 prev->hw.next_desc = segment->phys;
1822
1823 return &desc->async_tx;
1824
1825error:
1826 xilinx_dma_free_tx_descriptor(chan, desc);
1827 return NULL;
1828}
1829
1830
1831
1832
1833
1834
1835
1836
1837
1838
1839
1840
1841static struct dma_async_tx_descriptor *xilinx_dma_prep_slave_sg(
1842 struct dma_chan *dchan, struct scatterlist *sgl, unsigned int sg_len,
1843 enum dma_transfer_direction direction, unsigned long flags,
1844 void *context)
1845{
1846 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
1847 struct xilinx_dma_tx_descriptor *desc;
1848 struct xilinx_axidma_tx_segment *segment = NULL;
1849 u32 *app_w = (u32 *)context;
1850 struct scatterlist *sg;
1851 size_t copy;
1852 size_t sg_used;
1853 unsigned int i;
1854
1855 if (!is_slave_direction(direction))
1856 return NULL;
1857
1858
1859 desc = xilinx_dma_alloc_tx_descriptor(chan);
1860 if (!desc)
1861 return NULL;
1862
1863 dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
1864 desc->async_tx.tx_submit = xilinx_dma_tx_submit;
1865
1866
1867 for_each_sg(sgl, sg, sg_len, i) {
1868 sg_used = 0;
1869
1870
1871 while (sg_used < sg_dma_len(sg)) {
1872 struct xilinx_axidma_desc_hw *hw;
1873
1874
1875 segment = xilinx_axidma_alloc_tx_segment(chan);
1876 if (!segment)
1877 goto error;
1878
1879
1880
1881
1882
1883 copy = min_t(size_t, sg_dma_len(sg) - sg_used,
1884 chan->xdev->max_buffer_len);
1885 hw = &segment->hw;
1886
1887
1888 xilinx_axidma_buf(chan, hw, sg_dma_address(sg),
1889 sg_used, 0);
1890
1891 hw->control = copy;
1892
1893 if (chan->direction == DMA_MEM_TO_DEV) {
1894 if (app_w)
1895 memcpy(hw->app, app_w, sizeof(u32) *
1896 XILINX_DMA_NUM_APP_WORDS);
1897 }
1898
1899 sg_used += copy;
1900
1901
1902
1903
1904
1905 list_add_tail(&segment->node, &desc->segments);
1906 }
1907 }
1908
1909 segment = list_first_entry(&desc->segments,
1910 struct xilinx_axidma_tx_segment, node);
1911 desc->async_tx.phys = segment->phys;
1912
1913
1914 if (chan->direction == DMA_MEM_TO_DEV) {
1915 segment->hw.control |= XILINX_DMA_BD_SOP;
1916 segment = list_last_entry(&desc->segments,
1917 struct xilinx_axidma_tx_segment,
1918 node);
1919 segment->hw.control |= XILINX_DMA_BD_EOP;
1920 }
1921
1922 return &desc->async_tx;
1923
1924error:
1925 xilinx_dma_free_tx_descriptor(chan, desc);
1926 return NULL;
1927}
1928
1929
1930
1931
1932
1933
1934
1935
1936
1937
1938
1939
1940static struct dma_async_tx_descriptor *xilinx_dma_prep_dma_cyclic(
1941 struct dma_chan *dchan, dma_addr_t buf_addr, size_t buf_len,
1942 size_t period_len, enum dma_transfer_direction direction,
1943 unsigned long flags)
1944{
1945 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
1946 struct xilinx_dma_tx_descriptor *desc;
1947 struct xilinx_axidma_tx_segment *segment, *head_segment, *prev = NULL;
1948 size_t copy, sg_used;
1949 unsigned int num_periods;
1950 int i;
1951 u32 reg;
1952
1953 if (!period_len)
1954 return NULL;
1955
1956 num_periods = buf_len / period_len;
1957
1958 if (!num_periods)
1959 return NULL;
1960
1961 if (!is_slave_direction(direction))
1962 return NULL;
1963
1964
1965 desc = xilinx_dma_alloc_tx_descriptor(chan);
1966 if (!desc)
1967 return NULL;
1968
1969 chan->direction = direction;
1970 dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
1971 desc->async_tx.tx_submit = xilinx_dma_tx_submit;
1972
1973 for (i = 0; i < num_periods; ++i) {
1974 sg_used = 0;
1975
1976 while (sg_used < period_len) {
1977 struct xilinx_axidma_desc_hw *hw;
1978
1979
1980 segment = xilinx_axidma_alloc_tx_segment(chan);
1981 if (!segment)
1982 goto error;
1983
1984
1985
1986
1987
1988 copy = min_t(size_t, period_len - sg_used,
1989 chan->xdev->max_buffer_len);
1990 hw = &segment->hw;
1991 xilinx_axidma_buf(chan, hw, buf_addr, sg_used,
1992 period_len * i);
1993 hw->control = copy;
1994
1995 if (prev)
1996 prev->hw.next_desc = segment->phys;
1997
1998 prev = segment;
1999 sg_used += copy;
2000
2001
2002
2003
2004
2005 list_add_tail(&segment->node, &desc->segments);
2006 }
2007 }
2008
2009 head_segment = list_first_entry(&desc->segments,
2010 struct xilinx_axidma_tx_segment, node);
2011 desc->async_tx.phys = head_segment->phys;
2012
2013 desc->cyclic = true;
2014 reg = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR);
2015 reg |= XILINX_DMA_CR_CYCLIC_BD_EN_MASK;
2016 dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg);
2017
2018 segment = list_last_entry(&desc->segments,
2019 struct xilinx_axidma_tx_segment,
2020 node);
2021 segment->hw.next_desc = (u32) head_segment->phys;
2022
2023
2024 if (direction == DMA_MEM_TO_DEV) {
2025 head_segment->hw.control |= XILINX_DMA_BD_SOP;
2026 segment->hw.control |= XILINX_DMA_BD_EOP;
2027 }
2028
2029 return &desc->async_tx;
2030
2031error:
2032 xilinx_dma_free_tx_descriptor(chan, desc);
2033 return NULL;
2034}
2035
2036
2037
2038
2039
2040
2041
2042static int xilinx_dma_terminate_all(struct dma_chan *dchan)
2043{
2044 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
2045 u32 reg;
2046 int err;
2047
2048 if (!chan->cyclic) {
2049 err = chan->stop_transfer(chan);
2050 if (err) {
2051 dev_err(chan->dev, "Cannot stop channel %p: %x\n",
2052 chan, dma_ctrl_read(chan,
2053 XILINX_DMA_REG_DMASR));
2054 chan->err = true;
2055 }
2056 }
2057
2058 xilinx_dma_chan_reset(chan);
2059
2060 xilinx_dma_free_descriptors(chan);
2061 chan->idle = true;
2062
2063 if (chan->cyclic) {
2064 reg = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR);
2065 reg &= ~XILINX_DMA_CR_CYCLIC_BD_EN_MASK;
2066 dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg);
2067 chan->cyclic = false;
2068 }
2069
2070 if ((chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) && chan->has_sg)
2071 dma_ctrl_clr(chan, XILINX_DMA_REG_DMACR,
2072 XILINX_CDMA_CR_SGMODE);
2073
2074 return 0;
2075}
2076
2077
2078
2079
2080
2081
2082
2083
2084
2085
2086
2087
2088
2089
2090int xilinx_vdma_channel_set_config(struct dma_chan *dchan,
2091 struct xilinx_vdma_config *cfg)
2092{
2093 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
2094 u32 dmacr;
2095
2096 if (cfg->reset)
2097 return xilinx_dma_chan_reset(chan);
2098
2099 dmacr = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR);
2100
2101 chan->config.frm_dly = cfg->frm_dly;
2102 chan->config.park = cfg->park;
2103
2104
2105 chan->config.gen_lock = cfg->gen_lock;
2106 chan->config.master = cfg->master;
2107
2108 dmacr &= ~XILINX_DMA_DMACR_GENLOCK_EN;
2109 if (cfg->gen_lock && chan->genlock) {
2110 dmacr |= XILINX_DMA_DMACR_GENLOCK_EN;
2111 dmacr &= ~XILINX_DMA_DMACR_MASTER_MASK;
2112 dmacr |= cfg->master << XILINX_DMA_DMACR_MASTER_SHIFT;
2113 }
2114
2115 chan->config.frm_cnt_en = cfg->frm_cnt_en;
2116 chan->config.vflip_en = cfg->vflip_en;
2117
2118 if (cfg->park)
2119 chan->config.park_frm = cfg->park_frm;
2120 else
2121 chan->config.park_frm = -1;
2122
2123 chan->config.coalesc = cfg->coalesc;
2124 chan->config.delay = cfg->delay;
2125
2126 if (cfg->coalesc <= XILINX_DMA_DMACR_FRAME_COUNT_MAX) {
2127 dmacr &= ~XILINX_DMA_DMACR_FRAME_COUNT_MASK;
2128 dmacr |= cfg->coalesc << XILINX_DMA_DMACR_FRAME_COUNT_SHIFT;
2129 chan->config.coalesc = cfg->coalesc;
2130 }
2131
2132 if (cfg->delay <= XILINX_DMA_DMACR_DELAY_MAX) {
2133 dmacr &= ~XILINX_DMA_DMACR_DELAY_MASK;
2134 dmacr |= cfg->delay << XILINX_DMA_DMACR_DELAY_SHIFT;
2135 chan->config.delay = cfg->delay;
2136 }
2137
2138
2139 dmacr &= ~XILINX_DMA_DMACR_FSYNCSRC_MASK;
2140 dmacr |= cfg->ext_fsync << XILINX_DMA_DMACR_FSYNCSRC_SHIFT;
2141
2142 dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, dmacr);
2143
2144 return 0;
2145}
2146EXPORT_SYMBOL(xilinx_vdma_channel_set_config);
2147
2148
2149
2150
2151
2152
2153
2154
2155
2156static void xilinx_dma_chan_remove(struct xilinx_dma_chan *chan)
2157{
2158
2159 dma_ctrl_clr(chan, XILINX_DMA_REG_DMACR,
2160 XILINX_DMA_DMAXR_ALL_IRQ_MASK);
2161
2162 if (chan->irq > 0)
2163 free_irq(chan->irq, chan);
2164
2165 tasklet_kill(&chan->tasklet);
2166
2167 list_del(&chan->common.device_node);
2168}
2169
2170static int axidma_clk_init(struct platform_device *pdev, struct clk **axi_clk,
2171 struct clk **tx_clk, struct clk **rx_clk,
2172 struct clk **sg_clk, struct clk **tmp_clk)
2173{
2174 int err;
2175
2176 *tmp_clk = NULL;
2177
2178 *axi_clk = devm_clk_get(&pdev->dev, "s_axi_lite_aclk");
2179 if (IS_ERR(*axi_clk)) {
2180 err = PTR_ERR(*axi_clk);
2181 dev_err(&pdev->dev, "failed to get axi_aclk (%d)\n", err);
2182 return err;
2183 }
2184
2185 *tx_clk = devm_clk_get(&pdev->dev, "m_axi_mm2s_aclk");
2186 if (IS_ERR(*tx_clk))
2187 *tx_clk = NULL;
2188
2189 *rx_clk = devm_clk_get(&pdev->dev, "m_axi_s2mm_aclk");
2190 if (IS_ERR(*rx_clk))
2191 *rx_clk = NULL;
2192
2193 *sg_clk = devm_clk_get(&pdev->dev, "m_axi_sg_aclk");
2194 if (IS_ERR(*sg_clk))
2195 *sg_clk = NULL;
2196
2197 err = clk_prepare_enable(*axi_clk);
2198 if (err) {
2199 dev_err(&pdev->dev, "failed to enable axi_clk (%d)\n", err);
2200 return err;
2201 }
2202
2203 err = clk_prepare_enable(*tx_clk);
2204 if (err) {
2205 dev_err(&pdev->dev, "failed to enable tx_clk (%d)\n", err);
2206 goto err_disable_axiclk;
2207 }
2208
2209 err = clk_prepare_enable(*rx_clk);
2210 if (err) {
2211 dev_err(&pdev->dev, "failed to enable rx_clk (%d)\n", err);
2212 goto err_disable_txclk;
2213 }
2214
2215 err = clk_prepare_enable(*sg_clk);
2216 if (err) {
2217 dev_err(&pdev->dev, "failed to enable sg_clk (%d)\n", err);
2218 goto err_disable_rxclk;
2219 }
2220
2221 return 0;
2222
2223err_disable_rxclk:
2224 clk_disable_unprepare(*rx_clk);
2225err_disable_txclk:
2226 clk_disable_unprepare(*tx_clk);
2227err_disable_axiclk:
2228 clk_disable_unprepare(*axi_clk);
2229
2230 return err;
2231}
2232
2233static int axicdma_clk_init(struct platform_device *pdev, struct clk **axi_clk,
2234 struct clk **dev_clk, struct clk **tmp_clk,
2235 struct clk **tmp1_clk, struct clk **tmp2_clk)
2236{
2237 int err;
2238
2239 *tmp_clk = NULL;
2240 *tmp1_clk = NULL;
2241 *tmp2_clk = NULL;
2242
2243 *axi_clk = devm_clk_get(&pdev->dev, "s_axi_lite_aclk");
2244 if (IS_ERR(*axi_clk)) {
2245 err = PTR_ERR(*axi_clk);
2246 dev_err(&pdev->dev, "failed to get axi_clk (%d)\n", err);
2247 return err;
2248 }
2249
2250 *dev_clk = devm_clk_get(&pdev->dev, "m_axi_aclk");
2251 if (IS_ERR(*dev_clk)) {
2252 err = PTR_ERR(*dev_clk);
2253 dev_err(&pdev->dev, "failed to get dev_clk (%d)\n", err);
2254 return err;
2255 }
2256
2257 err = clk_prepare_enable(*axi_clk);
2258 if (err) {
2259 dev_err(&pdev->dev, "failed to enable axi_clk (%d)\n", err);
2260 return err;
2261 }
2262
2263 err = clk_prepare_enable(*dev_clk);
2264 if (err) {
2265 dev_err(&pdev->dev, "failed to enable dev_clk (%d)\n", err);
2266 goto err_disable_axiclk;
2267 }
2268
2269 return 0;
2270
2271err_disable_axiclk:
2272 clk_disable_unprepare(*axi_clk);
2273
2274 return err;
2275}
2276
2277static int axivdma_clk_init(struct platform_device *pdev, struct clk **axi_clk,
2278 struct clk **tx_clk, struct clk **txs_clk,
2279 struct clk **rx_clk, struct clk **rxs_clk)
2280{
2281 int err;
2282
2283 *axi_clk = devm_clk_get(&pdev->dev, "s_axi_lite_aclk");
2284 if (IS_ERR(*axi_clk)) {
2285 err = PTR_ERR(*axi_clk);
2286 dev_err(&pdev->dev, "failed to get axi_aclk (%d)\n", err);
2287 return err;
2288 }
2289
2290 *tx_clk = devm_clk_get(&pdev->dev, "m_axi_mm2s_aclk");
2291 if (IS_ERR(*tx_clk))
2292 *tx_clk = NULL;
2293
2294 *txs_clk = devm_clk_get(&pdev->dev, "m_axis_mm2s_aclk");
2295 if (IS_ERR(*txs_clk))
2296 *txs_clk = NULL;
2297
2298 *rx_clk = devm_clk_get(&pdev->dev, "m_axi_s2mm_aclk");
2299 if (IS_ERR(*rx_clk))
2300 *rx_clk = NULL;
2301
2302 *rxs_clk = devm_clk_get(&pdev->dev, "s_axis_s2mm_aclk");
2303 if (IS_ERR(*rxs_clk))
2304 *rxs_clk = NULL;
2305
2306 err = clk_prepare_enable(*axi_clk);
2307 if (err) {
2308 dev_err(&pdev->dev, "failed to enable axi_clk (%d)\n", err);
2309 return err;
2310 }
2311
2312 err = clk_prepare_enable(*tx_clk);
2313 if (err) {
2314 dev_err(&pdev->dev, "failed to enable tx_clk (%d)\n", err);
2315 goto err_disable_axiclk;
2316 }
2317
2318 err = clk_prepare_enable(*txs_clk);
2319 if (err) {
2320 dev_err(&pdev->dev, "failed to enable txs_clk (%d)\n", err);
2321 goto err_disable_txclk;
2322 }
2323
2324 err = clk_prepare_enable(*rx_clk);
2325 if (err) {
2326 dev_err(&pdev->dev, "failed to enable rx_clk (%d)\n", err);
2327 goto err_disable_txsclk;
2328 }
2329
2330 err = clk_prepare_enable(*rxs_clk);
2331 if (err) {
2332 dev_err(&pdev->dev, "failed to enable rxs_clk (%d)\n", err);
2333 goto err_disable_rxclk;
2334 }
2335
2336 return 0;
2337
2338err_disable_rxclk:
2339 clk_disable_unprepare(*rx_clk);
2340err_disable_txsclk:
2341 clk_disable_unprepare(*txs_clk);
2342err_disable_txclk:
2343 clk_disable_unprepare(*tx_clk);
2344err_disable_axiclk:
2345 clk_disable_unprepare(*axi_clk);
2346
2347 return err;
2348}
2349
2350static void xdma_disable_allclks(struct xilinx_dma_device *xdev)
2351{
2352 clk_disable_unprepare(xdev->rxs_clk);
2353 clk_disable_unprepare(xdev->rx_clk);
2354 clk_disable_unprepare(xdev->txs_clk);
2355 clk_disable_unprepare(xdev->tx_clk);
2356 clk_disable_unprepare(xdev->axi_clk);
2357}
2358
2359
2360
2361
2362
2363
2364
2365
2366
2367
2368
2369
2370static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev,
2371 struct device_node *node, int chan_id)
2372{
2373 struct xilinx_dma_chan *chan;
2374 bool has_dre = false;
2375 u32 value, width;
2376 int err;
2377
2378
2379 chan = devm_kzalloc(xdev->dev, sizeof(*chan), GFP_KERNEL);
2380 if (!chan)
2381 return -ENOMEM;
2382
2383 chan->dev = xdev->dev;
2384 chan->xdev = xdev;
2385 chan->has_sg = xdev->has_sg;
2386 chan->desc_pendingcount = 0x0;
2387 chan->ext_addr = xdev->ext_addr;
2388
2389
2390
2391
2392
2393 chan->idle = true;
2394
2395 spin_lock_init(&chan->lock);
2396 INIT_LIST_HEAD(&chan->pending_list);
2397 INIT_LIST_HEAD(&chan->done_list);
2398 INIT_LIST_HEAD(&chan->active_list);
2399 INIT_LIST_HEAD(&chan->free_seg_list);
2400
2401
2402 has_dre = of_property_read_bool(node, "xlnx,include-dre");
2403
2404 chan->genlock = of_property_read_bool(node, "xlnx,genlock-mode");
2405
2406 err = of_property_read_u32(node, "xlnx,datawidth", &value);
2407 if (err) {
2408 dev_err(xdev->dev, "missing xlnx,datawidth property\n");
2409 return err;
2410 }
2411 width = value >> 3;
2412
2413
2414 if (width > 8)
2415 has_dre = false;
2416
2417 if (!has_dre)
2418 xdev->common.copy_align = fls(width - 1);
2419
2420 if (of_device_is_compatible(node, "xlnx,axi-vdma-mm2s-channel") ||
2421 of_device_is_compatible(node, "xlnx,axi-dma-mm2s-channel") ||
2422 of_device_is_compatible(node, "xlnx,axi-cdma-channel")) {
2423 chan->direction = DMA_MEM_TO_DEV;
2424 chan->id = chan_id;
2425 xdev->common.directions = BIT(DMA_MEM_TO_DEV);
2426
2427 chan->ctrl_offset = XILINX_DMA_MM2S_CTRL_OFFSET;
2428 if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
2429 chan->desc_offset = XILINX_VDMA_MM2S_DESC_OFFSET;
2430 chan->config.park = 1;
2431
2432 if (xdev->flush_on_fsync == XILINX_DMA_FLUSH_BOTH ||
2433 xdev->flush_on_fsync == XILINX_DMA_FLUSH_MM2S)
2434 chan->flush_on_fsync = true;
2435 }
2436 } else if (of_device_is_compatible(node,
2437 "xlnx,axi-vdma-s2mm-channel") ||
2438 of_device_is_compatible(node,
2439 "xlnx,axi-dma-s2mm-channel")) {
2440 chan->direction = DMA_DEV_TO_MEM;
2441 chan->id = chan_id;
2442 xdev->common.directions |= BIT(DMA_DEV_TO_MEM);
2443 chan->has_vflip = of_property_read_bool(node,
2444 "xlnx,enable-vert-flip");
2445 if (chan->has_vflip) {
2446 chan->config.vflip_en = dma_read(chan,
2447 XILINX_VDMA_REG_ENABLE_VERTICAL_FLIP) &
2448 XILINX_VDMA_ENABLE_VERTICAL_FLIP;
2449 }
2450
2451 chan->ctrl_offset = XILINX_DMA_S2MM_CTRL_OFFSET;
2452 if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
2453 chan->desc_offset = XILINX_VDMA_S2MM_DESC_OFFSET;
2454 chan->config.park = 1;
2455
2456 if (xdev->flush_on_fsync == XILINX_DMA_FLUSH_BOTH ||
2457 xdev->flush_on_fsync == XILINX_DMA_FLUSH_S2MM)
2458 chan->flush_on_fsync = true;
2459 }
2460 } else {
2461 dev_err(xdev->dev, "Invalid channel compatible node\n");
2462 return -EINVAL;
2463 }
2464
2465
2466 chan->irq = irq_of_parse_and_map(node, 0);
2467 err = request_irq(chan->irq, xilinx_dma_irq_handler, IRQF_SHARED,
2468 "xilinx-dma-controller", chan);
2469 if (err) {
2470 dev_err(xdev->dev, "unable to request IRQ %d\n", chan->irq);
2471 return err;
2472 }
2473
2474 if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
2475 chan->start_transfer = xilinx_dma_start_transfer;
2476 chan->stop_transfer = xilinx_dma_stop_transfer;
2477 } else if (xdev->dma_config->dmatype == XDMA_TYPE_CDMA) {
2478 chan->start_transfer = xilinx_cdma_start_transfer;
2479 chan->stop_transfer = xilinx_cdma_stop_transfer;
2480 } else {
2481 chan->start_transfer = xilinx_vdma_start_transfer;
2482 chan->stop_transfer = xilinx_dma_stop_transfer;
2483 }
2484
2485
2486 tasklet_init(&chan->tasklet, xilinx_dma_do_tasklet,
2487 (unsigned long)chan);
2488
2489
2490
2491
2492
2493 chan->common.device = &xdev->common;
2494
2495 list_add_tail(&chan->common.device_node, &xdev->common.channels);
2496 xdev->chan[chan->id] = chan;
2497
2498
2499 err = xilinx_dma_chan_reset(chan);
2500 if (err < 0) {
2501 dev_err(xdev->dev, "Reset channel failed\n");
2502 return err;
2503 }
2504
2505 return 0;
2506}
2507
2508
2509
2510
2511
2512
2513
2514
2515
2516
2517
2518static int xilinx_dma_child_probe(struct xilinx_dma_device *xdev,
2519 struct device_node *node)
2520{
2521 int i, nr_channels = 1;
2522
2523 for (i = 0; i < nr_channels; i++)
2524 xilinx_dma_chan_probe(xdev, node, xdev->chan_id++);
2525
2526 xdev->nr_channels += nr_channels;
2527
2528 return 0;
2529}
2530
2531
2532
2533
2534
2535
2536
2537
2538static struct dma_chan *of_dma_xilinx_xlate(struct of_phandle_args *dma_spec,
2539 struct of_dma *ofdma)
2540{
2541 struct xilinx_dma_device *xdev = ofdma->of_dma_data;
2542 int chan_id = dma_spec->args[0];
2543
2544 if (chan_id >= xdev->nr_channels || !xdev->chan[chan_id])
2545 return NULL;
2546
2547 return dma_get_slave_channel(&xdev->chan[chan_id]->common);
2548}
2549
2550static const struct xilinx_dma_config axidma_config = {
2551 .dmatype = XDMA_TYPE_AXIDMA,
2552 .clk_init = axidma_clk_init,
2553};
2554
2555static const struct xilinx_dma_config axicdma_config = {
2556 .dmatype = XDMA_TYPE_CDMA,
2557 .clk_init = axicdma_clk_init,
2558};
2559
2560static const struct xilinx_dma_config axivdma_config = {
2561 .dmatype = XDMA_TYPE_VDMA,
2562 .clk_init = axivdma_clk_init,
2563};
2564
2565static const struct of_device_id xilinx_dma_of_ids[] = {
2566 { .compatible = "xlnx,axi-dma-1.00.a", .data = &axidma_config },
2567 { .compatible = "xlnx,axi-cdma-1.00.a", .data = &axicdma_config },
2568 { .compatible = "xlnx,axi-vdma-1.00.a", .data = &axivdma_config },
2569 {}
2570};
2571MODULE_DEVICE_TABLE(of, xilinx_dma_of_ids);
2572
2573
2574
2575
2576
2577
2578
2579static int xilinx_dma_probe(struct platform_device *pdev)
2580{
2581 int (*clk_init)(struct platform_device *, struct clk **, struct clk **,
2582 struct clk **, struct clk **, struct clk **)
2583 = axivdma_clk_init;
2584 struct device_node *node = pdev->dev.of_node;
2585 struct xilinx_dma_device *xdev;
2586 struct device_node *child, *np = pdev->dev.of_node;
2587 struct resource *io;
2588 u32 num_frames, addr_width, len_width;
2589 int i, err;
2590
2591
2592 xdev = devm_kzalloc(&pdev->dev, sizeof(*xdev), GFP_KERNEL);
2593 if (!xdev)
2594 return -ENOMEM;
2595
2596 xdev->dev = &pdev->dev;
2597 if (np) {
2598 const struct of_device_id *match;
2599
2600 match = of_match_node(xilinx_dma_of_ids, np);
2601 if (match && match->data) {
2602 xdev->dma_config = match->data;
2603 clk_init = xdev->dma_config->clk_init;
2604 }
2605 }
2606
2607 err = clk_init(pdev, &xdev->axi_clk, &xdev->tx_clk, &xdev->txs_clk,
2608 &xdev->rx_clk, &xdev->rxs_clk);
2609 if (err)
2610 return err;
2611
2612
2613 io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2614 xdev->regs = devm_ioremap_resource(&pdev->dev, io);
2615 if (IS_ERR(xdev->regs))
2616 return PTR_ERR(xdev->regs);
2617
2618
2619 xdev->has_sg = of_property_read_bool(node, "xlnx,include-sg");
2620 xdev->max_buffer_len = GENMASK(XILINX_DMA_MAX_TRANS_LEN_MAX - 1, 0);
2621
2622 if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
2623 if (!of_property_read_u32(node, "xlnx,sg-length-width",
2624 &len_width)) {
2625 if (len_width < XILINX_DMA_MAX_TRANS_LEN_MIN ||
2626 len_width > XILINX_DMA_V2_MAX_TRANS_LEN_MAX) {
2627 dev_warn(xdev->dev,
2628 "invalid xlnx,sg-length-width property value using default width\n");
2629 } else {
2630 if (len_width > XILINX_DMA_MAX_TRANS_LEN_MAX)
2631 dev_warn(xdev->dev, "Please ensure that IP supports buffer length > 23 bits\n");
2632
2633 xdev->max_buffer_len = GENMASK(len_width - 1, 0);
2634 }
2635 }
2636 }
2637
2638 if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
2639 err = of_property_read_u32(node, "xlnx,num-fstores",
2640 &num_frames);
2641 if (err < 0) {
2642 dev_err(xdev->dev,
2643 "missing xlnx,num-fstores property\n");
2644 return err;
2645 }
2646
2647 err = of_property_read_u32(node, "xlnx,flush-fsync",
2648 &xdev->flush_on_fsync);
2649 if (err < 0)
2650 dev_warn(xdev->dev,
2651 "missing xlnx,flush-fsync property\n");
2652 }
2653
2654 err = of_property_read_u32(node, "xlnx,addrwidth", &addr_width);
2655 if (err < 0)
2656 dev_warn(xdev->dev, "missing xlnx,addrwidth property\n");
2657
2658 if (addr_width > 32)
2659 xdev->ext_addr = true;
2660 else
2661 xdev->ext_addr = false;
2662
2663
2664 dma_set_mask(xdev->dev, DMA_BIT_MASK(addr_width));
2665
2666
2667 xdev->common.dev = &pdev->dev;
2668
2669 INIT_LIST_HEAD(&xdev->common.channels);
2670 if (!(xdev->dma_config->dmatype == XDMA_TYPE_CDMA)) {
2671 dma_cap_set(DMA_SLAVE, xdev->common.cap_mask);
2672 dma_cap_set(DMA_PRIVATE, xdev->common.cap_mask);
2673 }
2674
2675 xdev->common.dst_addr_widths = BIT(addr_width / 8);
2676 xdev->common.src_addr_widths = BIT(addr_width / 8);
2677 xdev->common.device_alloc_chan_resources =
2678 xilinx_dma_alloc_chan_resources;
2679 xdev->common.device_free_chan_resources =
2680 xilinx_dma_free_chan_resources;
2681 xdev->common.device_terminate_all = xilinx_dma_terminate_all;
2682 xdev->common.device_tx_status = xilinx_dma_tx_status;
2683 xdev->common.device_issue_pending = xilinx_dma_issue_pending;
2684 if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
2685 dma_cap_set(DMA_CYCLIC, xdev->common.cap_mask);
2686 xdev->common.device_prep_slave_sg = xilinx_dma_prep_slave_sg;
2687 xdev->common.device_prep_dma_cyclic =
2688 xilinx_dma_prep_dma_cyclic;
2689
2690 xdev->common.residue_granularity =
2691 DMA_RESIDUE_GRANULARITY_SEGMENT;
2692 } else if (xdev->dma_config->dmatype == XDMA_TYPE_CDMA) {
2693 dma_cap_set(DMA_MEMCPY, xdev->common.cap_mask);
2694 dma_cap_set(DMA_SG, xdev->common.cap_mask);
2695 xdev->common.device_prep_dma_memcpy = xilinx_cdma_prep_memcpy;
2696 xdev->common.device_prep_dma_sg = xilinx_cdma_prep_sg;
2697 } else {
2698 xdev->common.device_prep_interleaved_dma =
2699 xilinx_vdma_dma_prep_interleaved;
2700 }
2701
2702 platform_set_drvdata(pdev, xdev);
2703
2704
2705 for_each_child_of_node(node, child) {
2706 err = xilinx_dma_child_probe(xdev, child);
2707 if (err < 0)
2708 goto disable_clks;
2709 }
2710
2711 if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
2712 for (i = 0; i < xdev->nr_channels; i++)
2713 if (xdev->chan[i])
2714 xdev->chan[i]->num_frms = num_frames;
2715 }
2716
2717
2718 dma_async_device_register(&xdev->common);
2719
2720 err = of_dma_controller_register(node, of_dma_xilinx_xlate,
2721 xdev);
2722 if (err < 0) {
2723 dev_err(&pdev->dev, "Unable to register DMA to DT\n");
2724 dma_async_device_unregister(&xdev->common);
2725 goto error;
2726 }
2727
2728 if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA)
2729 dev_info(&pdev->dev, "Xilinx AXI DMA Engine Driver Probed!!\n");
2730 else if (xdev->dma_config->dmatype == XDMA_TYPE_CDMA)
2731 dev_info(&pdev->dev, "Xilinx AXI CDMA Engine Driver Probed!!\n");
2732 else
2733 dev_info(&pdev->dev, "Xilinx AXI VDMA Engine Driver Probed!!\n");
2734
2735 return 0;
2736
2737disable_clks:
2738 xdma_disable_allclks(xdev);
2739error:
2740 for (i = 0; i < xdev->nr_channels; i++)
2741 if (xdev->chan[i])
2742 xilinx_dma_chan_remove(xdev->chan[i]);
2743
2744 return err;
2745}
2746
2747
2748
2749
2750
2751
2752
2753static int xilinx_dma_remove(struct platform_device *pdev)
2754{
2755 struct xilinx_dma_device *xdev = platform_get_drvdata(pdev);
2756 int i;
2757
2758 of_dma_controller_free(pdev->dev.of_node);
2759
2760 dma_async_device_unregister(&xdev->common);
2761
2762 for (i = 0; i < xdev->nr_channels; i++)
2763 if (xdev->chan[i])
2764 xilinx_dma_chan_remove(xdev->chan[i]);
2765
2766 xdma_disable_allclks(xdev);
2767
2768 return 0;
2769}
2770
2771static struct platform_driver xilinx_vdma_driver = {
2772 .driver = {
2773 .name = "xilinx-vdma",
2774 .of_match_table = xilinx_dma_of_ids,
2775 },
2776 .probe = xilinx_dma_probe,
2777 .remove = xilinx_dma_remove,
2778};
2779
2780module_platform_driver(xilinx_vdma_driver);
2781
2782MODULE_AUTHOR("Xilinx, Inc.");
2783MODULE_DESCRIPTION("Xilinx VDMA driver");
2784MODULE_LICENSE("GPL v2");
2785