1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30#include <linux/bitops.h>
31#include <linux/dmapool.h>
32#include <linux/dma/xilinx_dma.h>
33#include <linux/init.h>
34#include <linux/interrupt.h>
35#include <linux/io.h>
36#include <linux/iopoll.h>
37#include <linux/module.h>
38#include <linux/of_address.h>
39#include <linux/of_dma.h>
40#include <linux/of_platform.h>
41#include <linux/of_irq.h>
42#include <linux/slab.h>
43#include <linux/clk.h>
44#include <linux/io-64-nonatomic-lo-hi.h>
45
46#include "../dmaengine.h"
47
48
49#define XILINX_DMA_MM2S_CTRL_OFFSET 0x0000
50#define XILINX_DMA_S2MM_CTRL_OFFSET 0x0030
51#define XILINX_VDMA_MM2S_DESC_OFFSET 0x0050
52#define XILINX_VDMA_S2MM_DESC_OFFSET 0x00a0
53
54
55#define XILINX_DMA_REG_DMACR 0x0000
56#define XILINX_DMA_DMACR_DELAY_MAX 0xff
57#define XILINX_DMA_DMACR_DELAY_SHIFT 24
58#define XILINX_DMA_DMACR_FRAME_COUNT_MAX 0xff
59#define XILINX_DMA_DMACR_FRAME_COUNT_SHIFT 16
60#define XILINX_DMA_DMACR_ERR_IRQ BIT(14)
61#define XILINX_DMA_DMACR_DLY_CNT_IRQ BIT(13)
62#define XILINX_DMA_DMACR_FRM_CNT_IRQ BIT(12)
63#define XILINX_DMA_DMACR_MASTER_SHIFT 8
64#define XILINX_DMA_DMACR_FSYNCSRC_SHIFT 5
65#define XILINX_DMA_DMACR_FRAMECNT_EN BIT(4)
66#define XILINX_DMA_DMACR_GENLOCK_EN BIT(3)
67#define XILINX_DMA_DMACR_RESET BIT(2)
68#define XILINX_DMA_DMACR_CIRC_EN BIT(1)
69#define XILINX_DMA_DMACR_RUNSTOP BIT(0)
70#define XILINX_DMA_DMACR_FSYNCSRC_MASK GENMASK(6, 5)
71#define XILINX_DMA_DMACR_DELAY_MASK GENMASK(31, 24)
72#define XILINX_DMA_DMACR_FRAME_COUNT_MASK GENMASK(23, 16)
73#define XILINX_DMA_DMACR_MASTER_MASK GENMASK(11, 8)
74
75#define XILINX_DMA_REG_DMASR 0x0004
76#define XILINX_DMA_DMASR_EOL_LATE_ERR BIT(15)
77#define XILINX_DMA_DMASR_ERR_IRQ BIT(14)
78#define XILINX_DMA_DMASR_DLY_CNT_IRQ BIT(13)
79#define XILINX_DMA_DMASR_FRM_CNT_IRQ BIT(12)
80#define XILINX_DMA_DMASR_SOF_LATE_ERR BIT(11)
81#define XILINX_DMA_DMASR_SG_DEC_ERR BIT(10)
82#define XILINX_DMA_DMASR_SG_SLV_ERR BIT(9)
83#define XILINX_DMA_DMASR_EOF_EARLY_ERR BIT(8)
84#define XILINX_DMA_DMASR_SOF_EARLY_ERR BIT(7)
85#define XILINX_DMA_DMASR_DMA_DEC_ERR BIT(6)
86#define XILINX_DMA_DMASR_DMA_SLAVE_ERR BIT(5)
87#define XILINX_DMA_DMASR_DMA_INT_ERR BIT(4)
88#define XILINX_DMA_DMASR_SG_MASK BIT(3)
89#define XILINX_DMA_DMASR_IDLE BIT(1)
90#define XILINX_DMA_DMASR_HALTED BIT(0)
91#define XILINX_DMA_DMASR_DELAY_MASK GENMASK(31, 24)
92#define XILINX_DMA_DMASR_FRAME_COUNT_MASK GENMASK(23, 16)
93
94#define XILINX_DMA_REG_CURDESC 0x0008
95#define XILINX_DMA_REG_TAILDESC 0x0010
96#define XILINX_DMA_REG_REG_INDEX 0x0014
97#define XILINX_DMA_REG_FRMSTORE 0x0018
98#define XILINX_DMA_REG_THRESHOLD 0x001c
99#define XILINX_DMA_REG_FRMPTR_STS 0x0024
100#define XILINX_DMA_REG_PARK_PTR 0x0028
101#define XILINX_DMA_PARK_PTR_WR_REF_SHIFT 8
102#define XILINX_DMA_PARK_PTR_WR_REF_MASK GENMASK(12, 8)
103#define XILINX_DMA_PARK_PTR_RD_REF_SHIFT 0
104#define XILINX_DMA_PARK_PTR_RD_REF_MASK GENMASK(4, 0)
105#define XILINX_DMA_REG_VDMA_VERSION 0x002c
106
107
108#define XILINX_DMA_REG_VSIZE 0x0000
109#define XILINX_DMA_REG_HSIZE 0x0004
110
111#define XILINX_DMA_REG_FRMDLY_STRIDE 0x0008
112#define XILINX_DMA_FRMDLY_STRIDE_FRMDLY_SHIFT 24
113#define XILINX_DMA_FRMDLY_STRIDE_STRIDE_SHIFT 0
114
115#define XILINX_VDMA_REG_START_ADDRESS(n) (0x000c + 4 * (n))
116#define XILINX_VDMA_REG_START_ADDRESS_64(n) (0x000c + 8 * (n))
117
118#define XILINX_VDMA_REG_ENABLE_VERTICAL_FLIP 0x00ec
119#define XILINX_VDMA_ENABLE_VERTICAL_FLIP BIT(0)
120
121
122#define XILINX_DMA_MAX_CHANS_PER_DEVICE 0x20
123
124#define XILINX_DMA_DMAXR_ALL_IRQ_MASK \
125 (XILINX_DMA_DMASR_FRM_CNT_IRQ | \
126 XILINX_DMA_DMASR_DLY_CNT_IRQ | \
127 XILINX_DMA_DMASR_ERR_IRQ)
128
129#define XILINX_DMA_DMASR_ALL_ERR_MASK \
130 (XILINX_DMA_DMASR_EOL_LATE_ERR | \
131 XILINX_DMA_DMASR_SOF_LATE_ERR | \
132 XILINX_DMA_DMASR_SG_DEC_ERR | \
133 XILINX_DMA_DMASR_SG_SLV_ERR | \
134 XILINX_DMA_DMASR_EOF_EARLY_ERR | \
135 XILINX_DMA_DMASR_SOF_EARLY_ERR | \
136 XILINX_DMA_DMASR_DMA_DEC_ERR | \
137 XILINX_DMA_DMASR_DMA_SLAVE_ERR | \
138 XILINX_DMA_DMASR_DMA_INT_ERR)
139
140
141
142
143
144
145#define XILINX_DMA_DMASR_ERR_RECOVER_MASK \
146 (XILINX_DMA_DMASR_SOF_LATE_ERR | \
147 XILINX_DMA_DMASR_EOF_EARLY_ERR | \
148 XILINX_DMA_DMASR_SOF_EARLY_ERR | \
149 XILINX_DMA_DMASR_DMA_INT_ERR)
150
151
152#define XILINX_DMA_FLUSH_S2MM 3
153#define XILINX_DMA_FLUSH_MM2S 2
154#define XILINX_DMA_FLUSH_BOTH 1
155
156
157#define XILINX_DMA_LOOP_COUNT 1000000
158
159
160#define XILINX_DMA_REG_SRCDSTADDR 0x18
161#define XILINX_DMA_REG_BTT 0x28
162
163
164#define XILINX_DMA_MAX_TRANS_LEN_MIN 8
165#define XILINX_DMA_MAX_TRANS_LEN_MAX 23
166#define XILINX_DMA_V2_MAX_TRANS_LEN_MAX 26
167#define XILINX_DMA_CR_COALESCE_MAX GENMASK(23, 16)
168#define XILINX_DMA_CR_CYCLIC_BD_EN_MASK BIT(4)
169#define XILINX_DMA_CR_COALESCE_SHIFT 16
170#define XILINX_DMA_BD_SOP BIT(27)
171#define XILINX_DMA_BD_EOP BIT(26)
172#define XILINX_DMA_COALESCE_MAX 255
173#define XILINX_DMA_NUM_DESCS 255
174#define XILINX_DMA_NUM_APP_WORDS 5
175
176
177#define XILINX_DMA_MCRX_CDESC(x) (0x40 + (x-1) * 0x20)
178#define XILINX_DMA_MCRX_TDESC(x) (0x48 + (x-1) * 0x20)
179
180
181#define XILINX_DMA_BD_HSIZE_MASK GENMASK(15, 0)
182#define XILINX_DMA_BD_STRIDE_MASK GENMASK(15, 0)
183#define XILINX_DMA_BD_VSIZE_MASK GENMASK(31, 19)
184#define XILINX_DMA_BD_TDEST_MASK GENMASK(4, 0)
185#define XILINX_DMA_BD_STRIDE_SHIFT 0
186#define XILINX_DMA_BD_VSIZE_SHIFT 19
187
188
189#define XILINX_CDMA_REG_SRCADDR 0x18
190#define XILINX_CDMA_REG_DSTADDR 0x20
191
192
193#define XILINX_CDMA_CR_SGMODE BIT(3)
194
195#define xilinx_prep_dma_addr_t(addr) \
196 ((dma_addr_t)((u64)addr##_##msb << 32 | (addr)))
197
198
199
200
201
202
203
204
205
206
207
208struct xilinx_vdma_desc_hw {
209 u32 next_desc;
210 u32 pad1;
211 u32 buf_addr;
212 u32 buf_addr_msb;
213 u32 vsize;
214 u32 hsize;
215 u32 stride;
216} __aligned(64);
217
218
219
220
221
222
223
224
225
226
227
228
229
230struct xilinx_axidma_desc_hw {
231 u32 next_desc;
232 u32 next_desc_msb;
233 u32 buf_addr;
234 u32 buf_addr_msb;
235 u32 mcdma_control;
236 u32 vsize_stride;
237 u32 control;
238 u32 status;
239 u32 app[XILINX_DMA_NUM_APP_WORDS];
240} __aligned(64);
241
242
243
244
245
246
247
248
249
250
251
252
253struct xilinx_cdma_desc_hw {
254 u32 next_desc;
255 u32 next_desc_msb;
256 u32 src_addr;
257 u32 src_addr_msb;
258 u32 dest_addr;
259 u32 dest_addr_msb;
260 u32 control;
261 u32 status;
262} __aligned(64);
263
264
265
266
267
268
269
270struct xilinx_vdma_tx_segment {
271 struct xilinx_vdma_desc_hw hw;
272 struct list_head node;
273 dma_addr_t phys;
274} __aligned(64);
275
276
277
278
279
280
281
282struct xilinx_axidma_tx_segment {
283 struct xilinx_axidma_desc_hw hw;
284 struct list_head node;
285 dma_addr_t phys;
286} __aligned(64);
287
288
289
290
291
292
293
294struct xilinx_cdma_tx_segment {
295 struct xilinx_cdma_desc_hw hw;
296 struct list_head node;
297 dma_addr_t phys;
298} __aligned(64);
299
300
301
302
303
304
305
306
307struct xilinx_dma_tx_descriptor {
308 struct dma_async_tx_descriptor async_tx;
309 struct list_head segments;
310 struct list_head node;
311 bool cyclic;
312};
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352struct xilinx_dma_chan {
353 struct xilinx_dma_device *xdev;
354 u32 ctrl_offset;
355 u32 desc_offset;
356 spinlock_t lock;
357 struct list_head pending_list;
358 struct list_head active_list;
359 struct list_head done_list;
360 struct list_head free_seg_list;
361 struct dma_chan common;
362 struct dma_pool *desc_pool;
363 struct device *dev;
364 int irq;
365 int id;
366 enum dma_transfer_direction direction;
367 int num_frms;
368 bool has_sg;
369 bool cyclic;
370 bool genlock;
371 bool err;
372 bool idle;
373 struct tasklet_struct tasklet;
374 struct xilinx_vdma_config config;
375 bool flush_on_fsync;
376 u32 desc_pendingcount;
377 bool ext_addr;
378 u32 desc_submitcount;
379 u32 residue;
380 struct xilinx_axidma_tx_segment *seg_v;
381 dma_addr_t seg_p;
382 struct xilinx_axidma_tx_segment *cyclic_seg_v;
383 dma_addr_t cyclic_seg_p;
384 void (*start_transfer)(struct xilinx_dma_chan *chan);
385 int (*stop_transfer)(struct xilinx_dma_chan *chan);
386 u16 tdest;
387 bool has_vflip;
388};
389
390
391
392
393
394
395
396
397
398enum xdma_ip_type {
399 XDMA_TYPE_AXIDMA = 0,
400 XDMA_TYPE_CDMA,
401 XDMA_TYPE_VDMA,
402};
403
404struct xilinx_dma_config {
405 enum xdma_ip_type dmatype;
406 int (*clk_init)(struct platform_device *pdev, struct clk **axi_clk,
407 struct clk **tx_clk, struct clk **txs_clk,
408 struct clk **rx_clk, struct clk **rxs_clk);
409};
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431struct xilinx_dma_device {
432 void __iomem *regs;
433 struct device *dev;
434 struct dma_device common;
435 struct xilinx_dma_chan *chan[XILINX_DMA_MAX_CHANS_PER_DEVICE];
436 bool mcdma;
437 u32 flush_on_fsync;
438 bool ext_addr;
439 struct platform_device *pdev;
440 const struct xilinx_dma_config *dma_config;
441 struct clk *axi_clk;
442 struct clk *tx_clk;
443 struct clk *txs_clk;
444 struct clk *rx_clk;
445 struct clk *rxs_clk;
446 u32 nr_channels;
447 u32 chan_id;
448 u32 max_buffer_len;
449};
450
451
452#define to_xilinx_chan(chan) \
453 container_of(chan, struct xilinx_dma_chan, common)
454#define to_dma_tx_descriptor(tx) \
455 container_of(tx, struct xilinx_dma_tx_descriptor, async_tx)
456#define xilinx_dma_poll_timeout(chan, reg, val, cond, delay_us, timeout_us) \
457 readl_poll_timeout(chan->xdev->regs + chan->ctrl_offset + reg, val, \
458 cond, delay_us, timeout_us)
459
460
461static inline u32 dma_read(struct xilinx_dma_chan *chan, u32 reg)
462{
463 return ioread32(chan->xdev->regs + reg);
464}
465
466static inline void dma_write(struct xilinx_dma_chan *chan, u32 reg, u32 value)
467{
468 iowrite32(value, chan->xdev->regs + reg);
469}
470
471static inline void vdma_desc_write(struct xilinx_dma_chan *chan, u32 reg,
472 u32 value)
473{
474 dma_write(chan, chan->desc_offset + reg, value);
475}
476
477static inline u32 dma_ctrl_read(struct xilinx_dma_chan *chan, u32 reg)
478{
479 return dma_read(chan, chan->ctrl_offset + reg);
480}
481
482static inline void dma_ctrl_write(struct xilinx_dma_chan *chan, u32 reg,
483 u32 value)
484{
485 dma_write(chan, chan->ctrl_offset + reg, value);
486}
487
488static inline void dma_ctrl_clr(struct xilinx_dma_chan *chan, u32 reg,
489 u32 clr)
490{
491 dma_ctrl_write(chan, reg, dma_ctrl_read(chan, reg) & ~clr);
492}
493
494static inline void dma_ctrl_set(struct xilinx_dma_chan *chan, u32 reg,
495 u32 set)
496{
497 dma_ctrl_write(chan, reg, dma_ctrl_read(chan, reg) | set);
498}
499
500
501
502
503
504
505
506
507
508
509
510
511static inline void vdma_desc_write_64(struct xilinx_dma_chan *chan, u32 reg,
512 u32 value_lsb, u32 value_msb)
513{
514
515 writel(value_lsb, chan->xdev->regs + chan->desc_offset + reg);
516
517
518 writel(value_msb, chan->xdev->regs + chan->desc_offset + reg + 4);
519}
520
521static inline void dma_writeq(struct xilinx_dma_chan *chan, u32 reg, u64 value)
522{
523 lo_hi_writeq(value, chan->xdev->regs + chan->ctrl_offset + reg);
524}
525
526static inline void xilinx_write(struct xilinx_dma_chan *chan, u32 reg,
527 dma_addr_t addr)
528{
529 if (chan->ext_addr)
530 dma_writeq(chan, reg, addr);
531 else
532 dma_ctrl_write(chan, reg, addr);
533}
534
535static inline void xilinx_axidma_buf(struct xilinx_dma_chan *chan,
536 struct xilinx_axidma_desc_hw *hw,
537 dma_addr_t buf_addr, size_t sg_used,
538 size_t period_len)
539{
540 if (chan->ext_addr) {
541 hw->buf_addr = lower_32_bits(buf_addr + sg_used + period_len);
542 hw->buf_addr_msb = upper_32_bits(buf_addr + sg_used +
543 period_len);
544 } else {
545 hw->buf_addr = buf_addr + sg_used + period_len;
546 }
547}
548
549
550
551
552
553
554
555
556
557
558
559static struct xilinx_vdma_tx_segment *
560xilinx_vdma_alloc_tx_segment(struct xilinx_dma_chan *chan)
561{
562 struct xilinx_vdma_tx_segment *segment;
563 dma_addr_t phys;
564
565 segment = dma_pool_zalloc(chan->desc_pool, GFP_ATOMIC, &phys);
566 if (!segment)
567 return NULL;
568
569 segment->phys = phys;
570
571 return segment;
572}
573
574
575
576
577
578
579
580static struct xilinx_cdma_tx_segment *
581xilinx_cdma_alloc_tx_segment(struct xilinx_dma_chan *chan)
582{
583 struct xilinx_cdma_tx_segment *segment;
584 dma_addr_t phys;
585
586 segment = dma_pool_zalloc(chan->desc_pool, GFP_ATOMIC, &phys);
587 if (!segment)
588 return NULL;
589
590 segment->phys = phys;
591
592 return segment;
593}
594
595
596
597
598
599
600
601static struct xilinx_axidma_tx_segment *
602xilinx_axidma_alloc_tx_segment(struct xilinx_dma_chan *chan)
603{
604 struct xilinx_axidma_tx_segment *segment = NULL;
605 unsigned long flags;
606
607 spin_lock_irqsave(&chan->lock, flags);
608 if (!list_empty(&chan->free_seg_list)) {
609 segment = list_first_entry(&chan->free_seg_list,
610 struct xilinx_axidma_tx_segment,
611 node);
612 list_del(&segment->node);
613 }
614 spin_unlock_irqrestore(&chan->lock, flags);
615
616 return segment;
617}
618
619static void xilinx_dma_clean_hw_desc(struct xilinx_axidma_desc_hw *hw)
620{
621 u32 next_desc = hw->next_desc;
622 u32 next_desc_msb = hw->next_desc_msb;
623
624 memset(hw, 0, sizeof(struct xilinx_axidma_desc_hw));
625
626 hw->next_desc = next_desc;
627 hw->next_desc_msb = next_desc_msb;
628}
629
630
631
632
633
634
635static void xilinx_dma_free_tx_segment(struct xilinx_dma_chan *chan,
636 struct xilinx_axidma_tx_segment *segment)
637{
638 xilinx_dma_clean_hw_desc(&segment->hw);
639
640 list_add_tail(&segment->node, &chan->free_seg_list);
641}
642
643
644
645
646
647
648static void xilinx_cdma_free_tx_segment(struct xilinx_dma_chan *chan,
649 struct xilinx_cdma_tx_segment *segment)
650{
651 dma_pool_free(chan->desc_pool, segment, segment->phys);
652}
653
654
655
656
657
658
659static void xilinx_vdma_free_tx_segment(struct xilinx_dma_chan *chan,
660 struct xilinx_vdma_tx_segment *segment)
661{
662 dma_pool_free(chan->desc_pool, segment, segment->phys);
663}
664
665
666
667
668
669
670
671static struct xilinx_dma_tx_descriptor *
672xilinx_dma_alloc_tx_descriptor(struct xilinx_dma_chan *chan)
673{
674 struct xilinx_dma_tx_descriptor *desc;
675
676 desc = kzalloc(sizeof(*desc), GFP_KERNEL);
677 if (!desc)
678 return NULL;
679
680 INIT_LIST_HEAD(&desc->segments);
681
682 return desc;
683}
684
685
686
687
688
689
690static void
691xilinx_dma_free_tx_descriptor(struct xilinx_dma_chan *chan,
692 struct xilinx_dma_tx_descriptor *desc)
693{
694 struct xilinx_vdma_tx_segment *segment, *next;
695 struct xilinx_cdma_tx_segment *cdma_segment, *cdma_next;
696 struct xilinx_axidma_tx_segment *axidma_segment, *axidma_next;
697
698 if (!desc)
699 return;
700
701 if (chan->xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
702 list_for_each_entry_safe(segment, next, &desc->segments, node) {
703 list_del(&segment->node);
704 xilinx_vdma_free_tx_segment(chan, segment);
705 }
706 } else if (chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) {
707 list_for_each_entry_safe(cdma_segment, cdma_next,
708 &desc->segments, node) {
709 list_del(&cdma_segment->node);
710 xilinx_cdma_free_tx_segment(chan, cdma_segment);
711 }
712 } else {
713 list_for_each_entry_safe(axidma_segment, axidma_next,
714 &desc->segments, node) {
715 list_del(&axidma_segment->node);
716 xilinx_dma_free_tx_segment(chan, axidma_segment);
717 }
718 }
719
720 kfree(desc);
721}
722
723
724
725
726
727
728
729
730static void xilinx_dma_free_desc_list(struct xilinx_dma_chan *chan,
731 struct list_head *list)
732{
733 struct xilinx_dma_tx_descriptor *desc, *next;
734
735 list_for_each_entry_safe(desc, next, list, node) {
736 list_del(&desc->node);
737 xilinx_dma_free_tx_descriptor(chan, desc);
738 }
739}
740
741
742
743
744
745static void xilinx_dma_free_descriptors(struct xilinx_dma_chan *chan)
746{
747 unsigned long flags;
748
749 spin_lock_irqsave(&chan->lock, flags);
750
751 xilinx_dma_free_desc_list(chan, &chan->pending_list);
752 xilinx_dma_free_desc_list(chan, &chan->done_list);
753 xilinx_dma_free_desc_list(chan, &chan->active_list);
754
755 spin_unlock_irqrestore(&chan->lock, flags);
756}
757
758
759
760
761
762static void xilinx_dma_free_chan_resources(struct dma_chan *dchan)
763{
764 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
765 unsigned long flags;
766
767 dev_dbg(chan->dev, "Free all channel resources.\n");
768
769 xilinx_dma_free_descriptors(chan);
770
771 if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
772 spin_lock_irqsave(&chan->lock, flags);
773 INIT_LIST_HEAD(&chan->free_seg_list);
774 spin_unlock_irqrestore(&chan->lock, flags);
775
776
777 dma_free_coherent(chan->dev, sizeof(*chan->seg_v) *
778 XILINX_DMA_NUM_DESCS, chan->seg_v,
779 chan->seg_p);
780
781
782 dma_free_coherent(chan->dev, sizeof(*chan->cyclic_seg_v),
783 chan->cyclic_seg_v, chan->cyclic_seg_p);
784 }
785
786 if (chan->xdev->dma_config->dmatype != XDMA_TYPE_AXIDMA) {
787 dma_pool_destroy(chan->desc_pool);
788 chan->desc_pool = NULL;
789 }
790}
791
792
793
794
795
796
797
798static void xilinx_dma_chan_handle_cyclic(struct xilinx_dma_chan *chan,
799 struct xilinx_dma_tx_descriptor *desc,
800 unsigned long *flags)
801{
802 dma_async_tx_callback callback;
803 void *callback_param;
804
805 callback = desc->async_tx.callback;
806 callback_param = desc->async_tx.callback_param;
807 if (callback) {
808 spin_unlock_irqrestore(&chan->lock, *flags);
809 callback(callback_param);
810 spin_lock_irqsave(&chan->lock, *flags);
811 }
812}
813
814
815
816
817
818static void xilinx_dma_chan_desc_cleanup(struct xilinx_dma_chan *chan)
819{
820 struct xilinx_dma_tx_descriptor *desc, *next;
821 unsigned long flags;
822
823 spin_lock_irqsave(&chan->lock, flags);
824
825 list_for_each_entry_safe(desc, next, &chan->done_list, node) {
826 struct dmaengine_desc_callback cb;
827
828 if (desc->cyclic) {
829 xilinx_dma_chan_handle_cyclic(chan, desc, &flags);
830 break;
831 }
832
833
834 list_del(&desc->node);
835
836
837 dmaengine_desc_get_callback(&desc->async_tx, &cb);
838 if (dmaengine_desc_callback_valid(&cb)) {
839 spin_unlock_irqrestore(&chan->lock, flags);
840 dmaengine_desc_callback_invoke(&cb, NULL);
841 spin_lock_irqsave(&chan->lock, flags);
842 }
843
844
845 dma_run_dependencies(&desc->async_tx);
846 xilinx_dma_free_tx_descriptor(chan, desc);
847 }
848
849 spin_unlock_irqrestore(&chan->lock, flags);
850}
851
852
853
854
855
856static void xilinx_dma_do_tasklet(unsigned long data)
857{
858 struct xilinx_dma_chan *chan = (struct xilinx_dma_chan *)data;
859
860 xilinx_dma_chan_desc_cleanup(chan);
861}
862
863
864
865
866
867
868
869static int xilinx_dma_alloc_chan_resources(struct dma_chan *dchan)
870{
871 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
872 int i;
873
874
875 if (chan->desc_pool)
876 return 0;
877
878
879
880
881
882 if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
883
884 chan->seg_v = dma_alloc_coherent(chan->dev,
885 sizeof(*chan->seg_v) * XILINX_DMA_NUM_DESCS,
886 &chan->seg_p, GFP_KERNEL);
887 if (!chan->seg_v) {
888 dev_err(chan->dev,
889 "unable to allocate channel %d descriptors\n",
890 chan->id);
891 return -ENOMEM;
892 }
893
894
895
896
897
898
899 chan->cyclic_seg_v = dma_alloc_coherent(chan->dev,
900 sizeof(*chan->cyclic_seg_v),
901 &chan->cyclic_seg_p,
902 GFP_KERNEL);
903 if (!chan->cyclic_seg_v) {
904 dev_err(chan->dev,
905 "unable to allocate desc segment for cyclic DMA\n");
906 dma_free_coherent(chan->dev, sizeof(*chan->seg_v) *
907 XILINX_DMA_NUM_DESCS, chan->seg_v,
908 chan->seg_p);
909 return -ENOMEM;
910 }
911 chan->cyclic_seg_v->phys = chan->cyclic_seg_p;
912
913 for (i = 0; i < XILINX_DMA_NUM_DESCS; i++) {
914 chan->seg_v[i].hw.next_desc =
915 lower_32_bits(chan->seg_p + sizeof(*chan->seg_v) *
916 ((i + 1) % XILINX_DMA_NUM_DESCS));
917 chan->seg_v[i].hw.next_desc_msb =
918 upper_32_bits(chan->seg_p + sizeof(*chan->seg_v) *
919 ((i + 1) % XILINX_DMA_NUM_DESCS));
920 chan->seg_v[i].phys = chan->seg_p +
921 sizeof(*chan->seg_v) * i;
922 list_add_tail(&chan->seg_v[i].node,
923 &chan->free_seg_list);
924 }
925 } else if (chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) {
926 chan->desc_pool = dma_pool_create("xilinx_cdma_desc_pool",
927 chan->dev,
928 sizeof(struct xilinx_cdma_tx_segment),
929 __alignof__(struct xilinx_cdma_tx_segment),
930 0);
931 } else {
932 chan->desc_pool = dma_pool_create("xilinx_vdma_desc_pool",
933 chan->dev,
934 sizeof(struct xilinx_vdma_tx_segment),
935 __alignof__(struct xilinx_vdma_tx_segment),
936 0);
937 }
938
939 if (!chan->desc_pool &&
940 (chan->xdev->dma_config->dmatype != XDMA_TYPE_AXIDMA)) {
941 dev_err(chan->dev,
942 "unable to allocate channel %d descriptor pool\n",
943 chan->id);
944 return -ENOMEM;
945 }
946
947 dma_cookie_init(dchan);
948
949 if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
950
951
952
953 dma_ctrl_set(chan, XILINX_DMA_REG_DMACR,
954 XILINX_DMA_DMAXR_ALL_IRQ_MASK);
955 }
956
957 if ((chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) && chan->has_sg)
958 dma_ctrl_set(chan, XILINX_DMA_REG_DMACR,
959 XILINX_CDMA_CR_SGMODE);
960
961 return 0;
962}
963
964
965
966
967
968
969
970
971
972static int xilinx_dma_calc_copysize(struct xilinx_dma_chan *chan,
973 int size, int done)
974{
975 size_t copy;
976
977 copy = min_t(size_t, size - done,
978 chan->xdev->max_buffer_len);
979
980 if ((copy + done < size) &&
981 chan->xdev->common.copy_align) {
982
983
984
985
986 copy = rounddown(copy,
987 (1 << chan->xdev->common.copy_align));
988 }
989 return copy;
990}
991
992
993
994
995
996
997
998
999
1000static enum dma_status xilinx_dma_tx_status(struct dma_chan *dchan,
1001 dma_cookie_t cookie,
1002 struct dma_tx_state *txstate)
1003{
1004 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
1005 struct xilinx_dma_tx_descriptor *desc;
1006 struct xilinx_axidma_tx_segment *segment;
1007 struct xilinx_axidma_desc_hw *hw;
1008 enum dma_status ret;
1009 unsigned long flags;
1010 u32 residue = 0;
1011
1012 ret = dma_cookie_status(dchan, cookie, txstate);
1013 if (ret == DMA_COMPLETE || !txstate)
1014 return ret;
1015
1016 if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
1017 spin_lock_irqsave(&chan->lock, flags);
1018
1019 desc = list_last_entry(&chan->active_list,
1020 struct xilinx_dma_tx_descriptor, node);
1021 if (chan->has_sg) {
1022 list_for_each_entry(segment, &desc->segments, node) {
1023 hw = &segment->hw;
1024 residue += (hw->control - hw->status) &
1025 chan->xdev->max_buffer_len;
1026 }
1027 }
1028 spin_unlock_irqrestore(&chan->lock, flags);
1029
1030 chan->residue = residue;
1031 dma_set_residue(txstate, chan->residue);
1032 }
1033
1034 return ret;
1035}
1036
1037
1038
1039
1040
1041
1042
1043static int xilinx_dma_stop_transfer(struct xilinx_dma_chan *chan)
1044{
1045 u32 val;
1046
1047 dma_ctrl_clr(chan, XILINX_DMA_REG_DMACR, XILINX_DMA_DMACR_RUNSTOP);
1048
1049
1050 return xilinx_dma_poll_timeout(chan, XILINX_DMA_REG_DMASR, val,
1051 val & XILINX_DMA_DMASR_HALTED, 0,
1052 XILINX_DMA_LOOP_COUNT);
1053}
1054
1055
1056
1057
1058
1059
1060
1061static int xilinx_cdma_stop_transfer(struct xilinx_dma_chan *chan)
1062{
1063 u32 val;
1064
1065 return xilinx_dma_poll_timeout(chan, XILINX_DMA_REG_DMASR, val,
1066 val & XILINX_DMA_DMASR_IDLE, 0,
1067 XILINX_DMA_LOOP_COUNT);
1068}
1069
1070
1071
1072
1073
1074static void xilinx_dma_start(struct xilinx_dma_chan *chan)
1075{
1076 int err;
1077 u32 val;
1078
1079 dma_ctrl_set(chan, XILINX_DMA_REG_DMACR, XILINX_DMA_DMACR_RUNSTOP);
1080
1081
1082 err = xilinx_dma_poll_timeout(chan, XILINX_DMA_REG_DMASR, val,
1083 !(val & XILINX_DMA_DMASR_HALTED), 0,
1084 XILINX_DMA_LOOP_COUNT);
1085
1086 if (err) {
1087 dev_err(chan->dev, "Cannot start channel %p: %x\n",
1088 chan, dma_ctrl_read(chan, XILINX_DMA_REG_DMASR));
1089
1090 chan->err = true;
1091 }
1092}
1093
1094
1095
1096
1097
1098static void xilinx_vdma_start_transfer(struct xilinx_dma_chan *chan)
1099{
1100 struct xilinx_vdma_config *config = &chan->config;
1101 struct xilinx_dma_tx_descriptor *desc;
1102 u32 reg, j;
1103 struct xilinx_vdma_tx_segment *segment, *last = NULL;
1104 int i = 0;
1105
1106
1107 if (chan->err)
1108 return;
1109
1110 if (!chan->idle)
1111 return;
1112
1113 if (list_empty(&chan->pending_list))
1114 return;
1115
1116 desc = list_first_entry(&chan->pending_list,
1117 struct xilinx_dma_tx_descriptor, node);
1118
1119
1120 if (chan->has_vflip) {
1121 reg = dma_read(chan, XILINX_VDMA_REG_ENABLE_VERTICAL_FLIP);
1122 reg &= ~XILINX_VDMA_ENABLE_VERTICAL_FLIP;
1123 reg |= config->vflip_en;
1124 dma_write(chan, XILINX_VDMA_REG_ENABLE_VERTICAL_FLIP,
1125 reg);
1126 }
1127
1128 reg = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR);
1129
1130 if (config->frm_cnt_en)
1131 reg |= XILINX_DMA_DMACR_FRAMECNT_EN;
1132 else
1133 reg &= ~XILINX_DMA_DMACR_FRAMECNT_EN;
1134
1135
1136 if (config->park)
1137 reg &= ~XILINX_DMA_DMACR_CIRC_EN;
1138 else
1139 reg |= XILINX_DMA_DMACR_CIRC_EN;
1140
1141 dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg);
1142
1143 j = chan->desc_submitcount;
1144 reg = dma_read(chan, XILINX_DMA_REG_PARK_PTR);
1145 if (chan->direction == DMA_MEM_TO_DEV) {
1146 reg &= ~XILINX_DMA_PARK_PTR_RD_REF_MASK;
1147 reg |= j << XILINX_DMA_PARK_PTR_RD_REF_SHIFT;
1148 } else {
1149 reg &= ~XILINX_DMA_PARK_PTR_WR_REF_MASK;
1150 reg |= j << XILINX_DMA_PARK_PTR_WR_REF_SHIFT;
1151 }
1152 dma_write(chan, XILINX_DMA_REG_PARK_PTR, reg);
1153
1154
1155 xilinx_dma_start(chan);
1156
1157 if (chan->err)
1158 return;
1159
1160
1161 if (chan->desc_submitcount < chan->num_frms)
1162 i = chan->desc_submitcount;
1163
1164 list_for_each_entry(segment, &desc->segments, node) {
1165 if (chan->ext_addr)
1166 vdma_desc_write_64(chan,
1167 XILINX_VDMA_REG_START_ADDRESS_64(i++),
1168 segment->hw.buf_addr,
1169 segment->hw.buf_addr_msb);
1170 else
1171 vdma_desc_write(chan,
1172 XILINX_VDMA_REG_START_ADDRESS(i++),
1173 segment->hw.buf_addr);
1174
1175 last = segment;
1176 }
1177
1178 if (!last)
1179 return;
1180
1181
1182 vdma_desc_write(chan, XILINX_DMA_REG_HSIZE, last->hw.hsize);
1183 vdma_desc_write(chan, XILINX_DMA_REG_FRMDLY_STRIDE,
1184 last->hw.stride);
1185 vdma_desc_write(chan, XILINX_DMA_REG_VSIZE, last->hw.vsize);
1186
1187 chan->desc_submitcount++;
1188 chan->desc_pendingcount--;
1189 list_del(&desc->node);
1190 list_add_tail(&desc->node, &chan->active_list);
1191 if (chan->desc_submitcount == chan->num_frms)
1192 chan->desc_submitcount = 0;
1193
1194 chan->idle = false;
1195}
1196
1197
1198
1199
1200
1201static void xilinx_cdma_start_transfer(struct xilinx_dma_chan *chan)
1202{
1203 struct xilinx_dma_tx_descriptor *head_desc, *tail_desc;
1204 struct xilinx_cdma_tx_segment *tail_segment;
1205 u32 ctrl_reg = dma_read(chan, XILINX_DMA_REG_DMACR);
1206
1207 if (chan->err)
1208 return;
1209
1210 if (!chan->idle)
1211 return;
1212
1213 if (list_empty(&chan->pending_list))
1214 return;
1215
1216 head_desc = list_first_entry(&chan->pending_list,
1217 struct xilinx_dma_tx_descriptor, node);
1218 tail_desc = list_last_entry(&chan->pending_list,
1219 struct xilinx_dma_tx_descriptor, node);
1220 tail_segment = list_last_entry(&tail_desc->segments,
1221 struct xilinx_cdma_tx_segment, node);
1222
1223 if (chan->desc_pendingcount <= XILINX_DMA_COALESCE_MAX) {
1224 ctrl_reg &= ~XILINX_DMA_CR_COALESCE_MAX;
1225 ctrl_reg |= chan->desc_pendingcount <<
1226 XILINX_DMA_CR_COALESCE_SHIFT;
1227 dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, ctrl_reg);
1228 }
1229
1230 if (chan->has_sg) {
1231 dma_ctrl_clr(chan, XILINX_DMA_REG_DMACR,
1232 XILINX_CDMA_CR_SGMODE);
1233
1234 dma_ctrl_set(chan, XILINX_DMA_REG_DMACR,
1235 XILINX_CDMA_CR_SGMODE);
1236
1237 xilinx_write(chan, XILINX_DMA_REG_CURDESC,
1238 head_desc->async_tx.phys);
1239
1240
1241 xilinx_write(chan, XILINX_DMA_REG_TAILDESC,
1242 tail_segment->phys);
1243 } else {
1244
1245 struct xilinx_cdma_tx_segment *segment;
1246 struct xilinx_cdma_desc_hw *hw;
1247
1248 segment = list_first_entry(&head_desc->segments,
1249 struct xilinx_cdma_tx_segment,
1250 node);
1251
1252 hw = &segment->hw;
1253
1254 xilinx_write(chan, XILINX_CDMA_REG_SRCADDR,
1255 xilinx_prep_dma_addr_t(hw->src_addr));
1256 xilinx_write(chan, XILINX_CDMA_REG_DSTADDR,
1257 xilinx_prep_dma_addr_t(hw->dest_addr));
1258
1259
1260 dma_ctrl_write(chan, XILINX_DMA_REG_BTT,
1261 hw->control & chan->xdev->max_buffer_len);
1262 }
1263
1264 list_splice_tail_init(&chan->pending_list, &chan->active_list);
1265 chan->desc_pendingcount = 0;
1266 chan->idle = false;
1267}
1268
1269
1270
1271
1272
1273static void xilinx_dma_start_transfer(struct xilinx_dma_chan *chan)
1274{
1275 struct xilinx_dma_tx_descriptor *head_desc, *tail_desc;
1276 struct xilinx_axidma_tx_segment *tail_segment;
1277 u32 reg;
1278
1279 if (chan->err)
1280 return;
1281
1282 if (list_empty(&chan->pending_list))
1283 return;
1284
1285 if (!chan->idle)
1286 return;
1287
1288 head_desc = list_first_entry(&chan->pending_list,
1289 struct xilinx_dma_tx_descriptor, node);
1290 tail_desc = list_last_entry(&chan->pending_list,
1291 struct xilinx_dma_tx_descriptor, node);
1292 tail_segment = list_last_entry(&tail_desc->segments,
1293 struct xilinx_axidma_tx_segment, node);
1294
1295 reg = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR);
1296
1297 if (chan->desc_pendingcount <= XILINX_DMA_COALESCE_MAX) {
1298 reg &= ~XILINX_DMA_CR_COALESCE_MAX;
1299 reg |= chan->desc_pendingcount <<
1300 XILINX_DMA_CR_COALESCE_SHIFT;
1301 dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg);
1302 }
1303
1304 if (chan->has_sg && !chan->xdev->mcdma)
1305 xilinx_write(chan, XILINX_DMA_REG_CURDESC,
1306 head_desc->async_tx.phys);
1307
1308 if (chan->has_sg && chan->xdev->mcdma) {
1309 if (chan->direction == DMA_MEM_TO_DEV) {
1310 dma_ctrl_write(chan, XILINX_DMA_REG_CURDESC,
1311 head_desc->async_tx.phys);
1312 } else {
1313 if (!chan->tdest) {
1314 dma_ctrl_write(chan, XILINX_DMA_REG_CURDESC,
1315 head_desc->async_tx.phys);
1316 } else {
1317 dma_ctrl_write(chan,
1318 XILINX_DMA_MCRX_CDESC(chan->tdest),
1319 head_desc->async_tx.phys);
1320 }
1321 }
1322 }
1323
1324 xilinx_dma_start(chan);
1325
1326 if (chan->err)
1327 return;
1328
1329
1330 if (chan->has_sg && !chan->xdev->mcdma) {
1331 if (chan->cyclic)
1332 xilinx_write(chan, XILINX_DMA_REG_TAILDESC,
1333 chan->cyclic_seg_v->phys);
1334 else
1335 xilinx_write(chan, XILINX_DMA_REG_TAILDESC,
1336 tail_segment->phys);
1337 } else if (chan->has_sg && chan->xdev->mcdma) {
1338 if (chan->direction == DMA_MEM_TO_DEV) {
1339 dma_ctrl_write(chan, XILINX_DMA_REG_TAILDESC,
1340 tail_segment->phys);
1341 } else {
1342 if (!chan->tdest) {
1343 dma_ctrl_write(chan, XILINX_DMA_REG_TAILDESC,
1344 tail_segment->phys);
1345 } else {
1346 dma_ctrl_write(chan,
1347 XILINX_DMA_MCRX_TDESC(chan->tdest),
1348 tail_segment->phys);
1349 }
1350 }
1351 } else {
1352 struct xilinx_axidma_tx_segment *segment;
1353 struct xilinx_axidma_desc_hw *hw;
1354
1355 segment = list_first_entry(&head_desc->segments,
1356 struct xilinx_axidma_tx_segment,
1357 node);
1358 hw = &segment->hw;
1359
1360 xilinx_write(chan, XILINX_DMA_REG_SRCDSTADDR,
1361 xilinx_prep_dma_addr_t(hw->buf_addr));
1362
1363
1364 dma_ctrl_write(chan, XILINX_DMA_REG_BTT,
1365 hw->control & chan->xdev->max_buffer_len);
1366 }
1367
1368 list_splice_tail_init(&chan->pending_list, &chan->active_list);
1369 chan->desc_pendingcount = 0;
1370 chan->idle = false;
1371}
1372
1373
1374
1375
1376
1377static void xilinx_dma_issue_pending(struct dma_chan *dchan)
1378{
1379 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
1380 unsigned long flags;
1381
1382 spin_lock_irqsave(&chan->lock, flags);
1383 chan->start_transfer(chan);
1384 spin_unlock_irqrestore(&chan->lock, flags);
1385}
1386
1387
1388
1389
1390
1391
1392
1393static void xilinx_dma_complete_descriptor(struct xilinx_dma_chan *chan)
1394{
1395 struct xilinx_dma_tx_descriptor *desc, *next;
1396
1397
1398 if (list_empty(&chan->active_list))
1399 return;
1400
1401 list_for_each_entry_safe(desc, next, &chan->active_list, node) {
1402 list_del(&desc->node);
1403 if (!desc->cyclic)
1404 dma_cookie_complete(&desc->async_tx);
1405 list_add_tail(&desc->node, &chan->done_list);
1406 }
1407}
1408
1409
1410
1411
1412
1413
1414
1415static int xilinx_dma_reset(struct xilinx_dma_chan *chan)
1416{
1417 int err;
1418 u32 tmp;
1419
1420 dma_ctrl_set(chan, XILINX_DMA_REG_DMACR, XILINX_DMA_DMACR_RESET);
1421
1422
1423 err = xilinx_dma_poll_timeout(chan, XILINX_DMA_REG_DMACR, tmp,
1424 !(tmp & XILINX_DMA_DMACR_RESET), 0,
1425 XILINX_DMA_LOOP_COUNT);
1426
1427 if (err) {
1428 dev_err(chan->dev, "reset timeout, cr %x, sr %x\n",
1429 dma_ctrl_read(chan, XILINX_DMA_REG_DMACR),
1430 dma_ctrl_read(chan, XILINX_DMA_REG_DMASR));
1431 return -ETIMEDOUT;
1432 }
1433
1434 chan->err = false;
1435 chan->idle = true;
1436 chan->desc_submitcount = 0;
1437
1438 return err;
1439}
1440
1441
1442
1443
1444
1445
1446
1447static int xilinx_dma_chan_reset(struct xilinx_dma_chan *chan)
1448{
1449 int err;
1450
1451
1452 err = xilinx_dma_reset(chan);
1453 if (err)
1454 return err;
1455
1456
1457 dma_ctrl_set(chan, XILINX_DMA_REG_DMACR,
1458 XILINX_DMA_DMAXR_ALL_IRQ_MASK);
1459
1460 return 0;
1461}
1462
1463
1464
1465
1466
1467
1468
1469
1470static irqreturn_t xilinx_dma_irq_handler(int irq, void *data)
1471{
1472 struct xilinx_dma_chan *chan = data;
1473 u32 status;
1474
1475
1476 status = dma_ctrl_read(chan, XILINX_DMA_REG_DMASR);
1477 if (!(status & XILINX_DMA_DMAXR_ALL_IRQ_MASK))
1478 return IRQ_NONE;
1479
1480 dma_ctrl_write(chan, XILINX_DMA_REG_DMASR,
1481 status & XILINX_DMA_DMAXR_ALL_IRQ_MASK);
1482
1483 if (status & XILINX_DMA_DMASR_ERR_IRQ) {
1484
1485
1486
1487
1488
1489
1490
1491 u32 errors = status & XILINX_DMA_DMASR_ALL_ERR_MASK;
1492
1493 dma_ctrl_write(chan, XILINX_DMA_REG_DMASR,
1494 errors & XILINX_DMA_DMASR_ERR_RECOVER_MASK);
1495
1496 if (!chan->flush_on_fsync ||
1497 (errors & ~XILINX_DMA_DMASR_ERR_RECOVER_MASK)) {
1498 dev_err(chan->dev,
1499 "Channel %p has errors %x, cdr %x tdr %x\n",
1500 chan, errors,
1501 dma_ctrl_read(chan, XILINX_DMA_REG_CURDESC),
1502 dma_ctrl_read(chan, XILINX_DMA_REG_TAILDESC));
1503 chan->err = true;
1504 }
1505 }
1506
1507 if (status & XILINX_DMA_DMASR_DLY_CNT_IRQ) {
1508
1509
1510
1511
1512 dev_dbg(chan->dev, "Inter-packet latency too long\n");
1513 }
1514
1515 if (status & XILINX_DMA_DMASR_FRM_CNT_IRQ) {
1516 spin_lock(&chan->lock);
1517 xilinx_dma_complete_descriptor(chan);
1518 chan->idle = true;
1519 chan->start_transfer(chan);
1520 spin_unlock(&chan->lock);
1521 }
1522
1523 tasklet_schedule(&chan->tasklet);
1524 return IRQ_HANDLED;
1525}
1526
1527
1528
1529
1530
1531
1532static void append_desc_queue(struct xilinx_dma_chan *chan,
1533 struct xilinx_dma_tx_descriptor *desc)
1534{
1535 struct xilinx_vdma_tx_segment *tail_segment;
1536 struct xilinx_dma_tx_descriptor *tail_desc;
1537 struct xilinx_axidma_tx_segment *axidma_tail_segment;
1538 struct xilinx_cdma_tx_segment *cdma_tail_segment;
1539
1540 if (list_empty(&chan->pending_list))
1541 goto append;
1542
1543
1544
1545
1546
1547 tail_desc = list_last_entry(&chan->pending_list,
1548 struct xilinx_dma_tx_descriptor, node);
1549 if (chan->xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
1550 tail_segment = list_last_entry(&tail_desc->segments,
1551 struct xilinx_vdma_tx_segment,
1552 node);
1553 tail_segment->hw.next_desc = (u32)desc->async_tx.phys;
1554 } else if (chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) {
1555 cdma_tail_segment = list_last_entry(&tail_desc->segments,
1556 struct xilinx_cdma_tx_segment,
1557 node);
1558 cdma_tail_segment->hw.next_desc = (u32)desc->async_tx.phys;
1559 } else {
1560 axidma_tail_segment = list_last_entry(&tail_desc->segments,
1561 struct xilinx_axidma_tx_segment,
1562 node);
1563 axidma_tail_segment->hw.next_desc = (u32)desc->async_tx.phys;
1564 }
1565
1566
1567
1568
1569
1570append:
1571 list_add_tail(&desc->node, &chan->pending_list);
1572 chan->desc_pendingcount++;
1573
1574 if (chan->has_sg && (chan->xdev->dma_config->dmatype == XDMA_TYPE_VDMA)
1575 && unlikely(chan->desc_pendingcount > chan->num_frms)) {
1576 dev_dbg(chan->dev, "desc pendingcount is too high\n");
1577 chan->desc_pendingcount = chan->num_frms;
1578 }
1579}
1580
1581
1582
1583
1584
1585
1586
1587static dma_cookie_t xilinx_dma_tx_submit(struct dma_async_tx_descriptor *tx)
1588{
1589 struct xilinx_dma_tx_descriptor *desc = to_dma_tx_descriptor(tx);
1590 struct xilinx_dma_chan *chan = to_xilinx_chan(tx->chan);
1591 dma_cookie_t cookie;
1592 unsigned long flags;
1593 int err;
1594
1595 if (chan->cyclic) {
1596 xilinx_dma_free_tx_descriptor(chan, desc);
1597 return -EBUSY;
1598 }
1599
1600 if (chan->err) {
1601
1602
1603
1604
1605 err = xilinx_dma_chan_reset(chan);
1606 if (err < 0)
1607 return err;
1608 }
1609
1610 spin_lock_irqsave(&chan->lock, flags);
1611
1612 cookie = dma_cookie_assign(tx);
1613
1614
1615 append_desc_queue(chan, desc);
1616
1617 if (desc->cyclic)
1618 chan->cyclic = true;
1619
1620 spin_unlock_irqrestore(&chan->lock, flags);
1621
1622 return cookie;
1623}
1624
1625
1626
1627
1628
1629
1630
1631
1632
1633
1634static struct dma_async_tx_descriptor *
1635xilinx_vdma_dma_prep_interleaved(struct dma_chan *dchan,
1636 struct dma_interleaved_template *xt,
1637 unsigned long flags)
1638{
1639 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
1640 struct xilinx_dma_tx_descriptor *desc;
1641 struct xilinx_vdma_tx_segment *segment;
1642 struct xilinx_vdma_desc_hw *hw;
1643
1644 if (!is_slave_direction(xt->dir))
1645 return NULL;
1646
1647 if (!xt->numf || !xt->sgl[0].size)
1648 return NULL;
1649
1650 if (xt->frame_size != 1)
1651 return NULL;
1652
1653
1654 desc = xilinx_dma_alloc_tx_descriptor(chan);
1655 if (!desc)
1656 return NULL;
1657
1658 dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
1659 desc->async_tx.tx_submit = xilinx_dma_tx_submit;
1660 async_tx_ack(&desc->async_tx);
1661
1662
1663 segment = xilinx_vdma_alloc_tx_segment(chan);
1664 if (!segment)
1665 goto error;
1666
1667
1668 hw = &segment->hw;
1669 hw->vsize = xt->numf;
1670 hw->hsize = xt->sgl[0].size;
1671 hw->stride = (xt->sgl[0].icg + xt->sgl[0].size) <<
1672 XILINX_DMA_FRMDLY_STRIDE_STRIDE_SHIFT;
1673 hw->stride |= chan->config.frm_dly <<
1674 XILINX_DMA_FRMDLY_STRIDE_FRMDLY_SHIFT;
1675
1676 if (xt->dir != DMA_MEM_TO_DEV) {
1677 if (chan->ext_addr) {
1678 hw->buf_addr = lower_32_bits(xt->dst_start);
1679 hw->buf_addr_msb = upper_32_bits(xt->dst_start);
1680 } else {
1681 hw->buf_addr = xt->dst_start;
1682 }
1683 } else {
1684 if (chan->ext_addr) {
1685 hw->buf_addr = lower_32_bits(xt->src_start);
1686 hw->buf_addr_msb = upper_32_bits(xt->src_start);
1687 } else {
1688 hw->buf_addr = xt->src_start;
1689 }
1690 }
1691
1692
1693 list_add_tail(&segment->node, &desc->segments);
1694
1695
1696 segment = list_first_entry(&desc->segments,
1697 struct xilinx_vdma_tx_segment, node);
1698 desc->async_tx.phys = segment->phys;
1699
1700 return &desc->async_tx;
1701
1702error:
1703 xilinx_dma_free_tx_descriptor(chan, desc);
1704 return NULL;
1705}
1706
1707
1708
1709
1710
1711
1712
1713
1714
1715
1716
1717static struct dma_async_tx_descriptor *
1718xilinx_cdma_prep_memcpy(struct dma_chan *dchan, dma_addr_t dma_dst,
1719 dma_addr_t dma_src, size_t len, unsigned long flags)
1720{
1721 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
1722 struct xilinx_dma_tx_descriptor *desc;
1723 struct xilinx_cdma_tx_segment *segment;
1724 struct xilinx_cdma_desc_hw *hw;
1725
1726 if (!len || len > chan->xdev->max_buffer_len)
1727 return NULL;
1728
1729 desc = xilinx_dma_alloc_tx_descriptor(chan);
1730 if (!desc)
1731 return NULL;
1732
1733 dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
1734 desc->async_tx.tx_submit = xilinx_dma_tx_submit;
1735
1736
1737 segment = xilinx_cdma_alloc_tx_segment(chan);
1738 if (!segment)
1739 goto error;
1740
1741 hw = &segment->hw;
1742 hw->control = len;
1743 hw->src_addr = dma_src;
1744 hw->dest_addr = dma_dst;
1745 if (chan->ext_addr) {
1746 hw->src_addr_msb = upper_32_bits(dma_src);
1747 hw->dest_addr_msb = upper_32_bits(dma_dst);
1748 }
1749
1750
1751 list_add_tail(&segment->node, &desc->segments);
1752
1753 desc->async_tx.phys = segment->phys;
1754 hw->next_desc = segment->phys;
1755
1756 return &desc->async_tx;
1757
1758error:
1759 xilinx_dma_free_tx_descriptor(chan, desc);
1760 return NULL;
1761}
1762
1763
1764
1765
1766
1767
1768
1769
1770
1771
1772
1773
1774static struct dma_async_tx_descriptor *xilinx_dma_prep_slave_sg(
1775 struct dma_chan *dchan, struct scatterlist *sgl, unsigned int sg_len,
1776 enum dma_transfer_direction direction, unsigned long flags,
1777 void *context)
1778{
1779 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
1780 struct xilinx_dma_tx_descriptor *desc;
1781 struct xilinx_axidma_tx_segment *segment = NULL;
1782 u32 *app_w = (u32 *)context;
1783 struct scatterlist *sg;
1784 size_t copy;
1785 size_t sg_used;
1786 unsigned int i;
1787
1788 if (!is_slave_direction(direction))
1789 return NULL;
1790
1791
1792 desc = xilinx_dma_alloc_tx_descriptor(chan);
1793 if (!desc)
1794 return NULL;
1795
1796 dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
1797 desc->async_tx.tx_submit = xilinx_dma_tx_submit;
1798
1799
1800 for_each_sg(sgl, sg, sg_len, i) {
1801 sg_used = 0;
1802
1803
1804 while (sg_used < sg_dma_len(sg)) {
1805 struct xilinx_axidma_desc_hw *hw;
1806
1807
1808 segment = xilinx_axidma_alloc_tx_segment(chan);
1809 if (!segment)
1810 goto error;
1811
1812
1813
1814
1815
1816 copy = xilinx_dma_calc_copysize(chan, sg_dma_len(sg),
1817 sg_used);
1818 hw = &segment->hw;
1819
1820
1821 xilinx_axidma_buf(chan, hw, sg_dma_address(sg),
1822 sg_used, 0);
1823
1824 hw->control = copy;
1825
1826 if (chan->direction == DMA_MEM_TO_DEV) {
1827 if (app_w)
1828 memcpy(hw->app, app_w, sizeof(u32) *
1829 XILINX_DMA_NUM_APP_WORDS);
1830 }
1831
1832 sg_used += copy;
1833
1834
1835
1836
1837
1838 list_add_tail(&segment->node, &desc->segments);
1839 }
1840 }
1841
1842 segment = list_first_entry(&desc->segments,
1843 struct xilinx_axidma_tx_segment, node);
1844 desc->async_tx.phys = segment->phys;
1845
1846
1847 if (chan->direction == DMA_MEM_TO_DEV) {
1848 segment->hw.control |= XILINX_DMA_BD_SOP;
1849 segment = list_last_entry(&desc->segments,
1850 struct xilinx_axidma_tx_segment,
1851 node);
1852 segment->hw.control |= XILINX_DMA_BD_EOP;
1853 }
1854
1855 return &desc->async_tx;
1856
1857error:
1858 xilinx_dma_free_tx_descriptor(chan, desc);
1859 return NULL;
1860}
1861
1862
1863
1864
1865
1866
1867
1868
1869
1870
1871
1872
1873static struct dma_async_tx_descriptor *xilinx_dma_prep_dma_cyclic(
1874 struct dma_chan *dchan, dma_addr_t buf_addr, size_t buf_len,
1875 size_t period_len, enum dma_transfer_direction direction,
1876 unsigned long flags)
1877{
1878 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
1879 struct xilinx_dma_tx_descriptor *desc;
1880 struct xilinx_axidma_tx_segment *segment, *head_segment, *prev = NULL;
1881 size_t copy, sg_used;
1882 unsigned int num_periods;
1883 int i;
1884 u32 reg;
1885
1886 if (!period_len)
1887 return NULL;
1888
1889 num_periods = buf_len / period_len;
1890
1891 if (!num_periods)
1892 return NULL;
1893
1894 if (!is_slave_direction(direction))
1895 return NULL;
1896
1897
1898 desc = xilinx_dma_alloc_tx_descriptor(chan);
1899 if (!desc)
1900 return NULL;
1901
1902 chan->direction = direction;
1903 dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
1904 desc->async_tx.tx_submit = xilinx_dma_tx_submit;
1905
1906 for (i = 0; i < num_periods; ++i) {
1907 sg_used = 0;
1908
1909 while (sg_used < period_len) {
1910 struct xilinx_axidma_desc_hw *hw;
1911
1912
1913 segment = xilinx_axidma_alloc_tx_segment(chan);
1914 if (!segment)
1915 goto error;
1916
1917
1918
1919
1920
1921 copy = xilinx_dma_calc_copysize(chan, period_len,
1922 sg_used);
1923 hw = &segment->hw;
1924 xilinx_axidma_buf(chan, hw, buf_addr, sg_used,
1925 period_len * i);
1926 hw->control = copy;
1927
1928 if (prev)
1929 prev->hw.next_desc = segment->phys;
1930
1931 prev = segment;
1932 sg_used += copy;
1933
1934
1935
1936
1937
1938 list_add_tail(&segment->node, &desc->segments);
1939 }
1940 }
1941
1942 head_segment = list_first_entry(&desc->segments,
1943 struct xilinx_axidma_tx_segment, node);
1944 desc->async_tx.phys = head_segment->phys;
1945
1946 desc->cyclic = true;
1947 reg = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR);
1948 reg |= XILINX_DMA_CR_CYCLIC_BD_EN_MASK;
1949 dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg);
1950
1951 segment = list_last_entry(&desc->segments,
1952 struct xilinx_axidma_tx_segment,
1953 node);
1954 segment->hw.next_desc = (u32) head_segment->phys;
1955
1956
1957 if (direction == DMA_MEM_TO_DEV) {
1958 head_segment->hw.control |= XILINX_DMA_BD_SOP;
1959 segment->hw.control |= XILINX_DMA_BD_EOP;
1960 }
1961
1962 return &desc->async_tx;
1963
1964error:
1965 xilinx_dma_free_tx_descriptor(chan, desc);
1966 return NULL;
1967}
1968
1969
1970
1971
1972
1973
1974
1975
1976
1977
1978static struct dma_async_tx_descriptor *
1979xilinx_dma_prep_interleaved(struct dma_chan *dchan,
1980 struct dma_interleaved_template *xt,
1981 unsigned long flags)
1982{
1983 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
1984 struct xilinx_dma_tx_descriptor *desc;
1985 struct xilinx_axidma_tx_segment *segment;
1986 struct xilinx_axidma_desc_hw *hw;
1987
1988 if (!is_slave_direction(xt->dir))
1989 return NULL;
1990
1991 if (!xt->numf || !xt->sgl[0].size)
1992 return NULL;
1993
1994 if (xt->frame_size != 1)
1995 return NULL;
1996
1997
1998 desc = xilinx_dma_alloc_tx_descriptor(chan);
1999 if (!desc)
2000 return NULL;
2001
2002 chan->direction = xt->dir;
2003 dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
2004 desc->async_tx.tx_submit = xilinx_dma_tx_submit;
2005
2006
2007 segment = xilinx_axidma_alloc_tx_segment(chan);
2008 if (!segment)
2009 goto error;
2010
2011 hw = &segment->hw;
2012
2013
2014 if (xt->dir != DMA_MEM_TO_DEV)
2015 hw->buf_addr = xt->dst_start;
2016 else
2017 hw->buf_addr = xt->src_start;
2018
2019 hw->mcdma_control = chan->tdest & XILINX_DMA_BD_TDEST_MASK;
2020 hw->vsize_stride = (xt->numf << XILINX_DMA_BD_VSIZE_SHIFT) &
2021 XILINX_DMA_BD_VSIZE_MASK;
2022 hw->vsize_stride |= (xt->sgl[0].icg + xt->sgl[0].size) &
2023 XILINX_DMA_BD_STRIDE_MASK;
2024 hw->control = xt->sgl[0].size & XILINX_DMA_BD_HSIZE_MASK;
2025
2026
2027
2028
2029
2030 list_add_tail(&segment->node, &desc->segments);
2031
2032
2033 segment = list_first_entry(&desc->segments,
2034 struct xilinx_axidma_tx_segment, node);
2035 desc->async_tx.phys = segment->phys;
2036
2037
2038 if (xt->dir == DMA_MEM_TO_DEV) {
2039 segment->hw.control |= XILINX_DMA_BD_SOP;
2040 segment = list_last_entry(&desc->segments,
2041 struct xilinx_axidma_tx_segment,
2042 node);
2043 segment->hw.control |= XILINX_DMA_BD_EOP;
2044 }
2045
2046 return &desc->async_tx;
2047
2048error:
2049 xilinx_dma_free_tx_descriptor(chan, desc);
2050 return NULL;
2051}
2052
2053
2054
2055
2056
2057
2058
2059static int xilinx_dma_terminate_all(struct dma_chan *dchan)
2060{
2061 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
2062 u32 reg;
2063 int err;
2064
2065 if (chan->cyclic)
2066 xilinx_dma_chan_reset(chan);
2067
2068 err = chan->stop_transfer(chan);
2069 if (err) {
2070 dev_err(chan->dev, "Cannot stop channel %p: %x\n",
2071 chan, dma_ctrl_read(chan, XILINX_DMA_REG_DMASR));
2072 chan->err = true;
2073 }
2074
2075
2076 xilinx_dma_free_descriptors(chan);
2077 chan->idle = true;
2078
2079 if (chan->cyclic) {
2080 reg = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR);
2081 reg &= ~XILINX_DMA_CR_CYCLIC_BD_EN_MASK;
2082 dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg);
2083 chan->cyclic = false;
2084 }
2085
2086 if ((chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) && chan->has_sg)
2087 dma_ctrl_clr(chan, XILINX_DMA_REG_DMACR,
2088 XILINX_CDMA_CR_SGMODE);
2089
2090 return 0;
2091}
2092
2093
2094
2095
2096
2097
2098
2099
2100
2101
2102
2103
2104
2105
2106int xilinx_vdma_channel_set_config(struct dma_chan *dchan,
2107 struct xilinx_vdma_config *cfg)
2108{
2109 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
2110 u32 dmacr;
2111
2112 if (cfg->reset)
2113 return xilinx_dma_chan_reset(chan);
2114
2115 dmacr = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR);
2116
2117 chan->config.frm_dly = cfg->frm_dly;
2118 chan->config.park = cfg->park;
2119
2120
2121 chan->config.gen_lock = cfg->gen_lock;
2122 chan->config.master = cfg->master;
2123
2124 dmacr &= ~XILINX_DMA_DMACR_GENLOCK_EN;
2125 if (cfg->gen_lock && chan->genlock) {
2126 dmacr |= XILINX_DMA_DMACR_GENLOCK_EN;
2127 dmacr &= ~XILINX_DMA_DMACR_MASTER_MASK;
2128 dmacr |= cfg->master << XILINX_DMA_DMACR_MASTER_SHIFT;
2129 }
2130
2131 chan->config.frm_cnt_en = cfg->frm_cnt_en;
2132 chan->config.vflip_en = cfg->vflip_en;
2133
2134 if (cfg->park)
2135 chan->config.park_frm = cfg->park_frm;
2136 else
2137 chan->config.park_frm = -1;
2138
2139 chan->config.coalesc = cfg->coalesc;
2140 chan->config.delay = cfg->delay;
2141
2142 if (cfg->coalesc <= XILINX_DMA_DMACR_FRAME_COUNT_MAX) {
2143 dmacr &= ~XILINX_DMA_DMACR_FRAME_COUNT_MASK;
2144 dmacr |= cfg->coalesc << XILINX_DMA_DMACR_FRAME_COUNT_SHIFT;
2145 chan->config.coalesc = cfg->coalesc;
2146 }
2147
2148 if (cfg->delay <= XILINX_DMA_DMACR_DELAY_MAX) {
2149 dmacr &= ~XILINX_DMA_DMACR_DELAY_MASK;
2150 dmacr |= cfg->delay << XILINX_DMA_DMACR_DELAY_SHIFT;
2151 chan->config.delay = cfg->delay;
2152 }
2153
2154
2155 dmacr &= ~XILINX_DMA_DMACR_FSYNCSRC_MASK;
2156 dmacr |= cfg->ext_fsync << XILINX_DMA_DMACR_FSYNCSRC_SHIFT;
2157
2158 dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, dmacr);
2159
2160 return 0;
2161}
2162EXPORT_SYMBOL(xilinx_vdma_channel_set_config);
2163
2164
2165
2166
2167
2168
2169
2170
2171
2172static void xilinx_dma_chan_remove(struct xilinx_dma_chan *chan)
2173{
2174
2175 dma_ctrl_clr(chan, XILINX_DMA_REG_DMACR,
2176 XILINX_DMA_DMAXR_ALL_IRQ_MASK);
2177
2178 if (chan->irq > 0)
2179 free_irq(chan->irq, chan);
2180
2181 tasklet_kill(&chan->tasklet);
2182
2183 list_del(&chan->common.device_node);
2184}
2185
2186static int axidma_clk_init(struct platform_device *pdev, struct clk **axi_clk,
2187 struct clk **tx_clk, struct clk **rx_clk,
2188 struct clk **sg_clk, struct clk **tmp_clk)
2189{
2190 int err;
2191
2192 *tmp_clk = NULL;
2193
2194 *axi_clk = devm_clk_get(&pdev->dev, "s_axi_lite_aclk");
2195 if (IS_ERR(*axi_clk)) {
2196 err = PTR_ERR(*axi_clk);
2197 dev_err(&pdev->dev, "failed to get axi_aclk (%d)\n", err);
2198 return err;
2199 }
2200
2201 *tx_clk = devm_clk_get(&pdev->dev, "m_axi_mm2s_aclk");
2202 if (IS_ERR(*tx_clk))
2203 *tx_clk = NULL;
2204
2205 *rx_clk = devm_clk_get(&pdev->dev, "m_axi_s2mm_aclk");
2206 if (IS_ERR(*rx_clk))
2207 *rx_clk = NULL;
2208
2209 *sg_clk = devm_clk_get(&pdev->dev, "m_axi_sg_aclk");
2210 if (IS_ERR(*sg_clk))
2211 *sg_clk = NULL;
2212
2213 err = clk_prepare_enable(*axi_clk);
2214 if (err) {
2215 dev_err(&pdev->dev, "failed to enable axi_clk (%d)\n", err);
2216 return err;
2217 }
2218
2219 err = clk_prepare_enable(*tx_clk);
2220 if (err) {
2221 dev_err(&pdev->dev, "failed to enable tx_clk (%d)\n", err);
2222 goto err_disable_axiclk;
2223 }
2224
2225 err = clk_prepare_enable(*rx_clk);
2226 if (err) {
2227 dev_err(&pdev->dev, "failed to enable rx_clk (%d)\n", err);
2228 goto err_disable_txclk;
2229 }
2230
2231 err = clk_prepare_enable(*sg_clk);
2232 if (err) {
2233 dev_err(&pdev->dev, "failed to enable sg_clk (%d)\n", err);
2234 goto err_disable_rxclk;
2235 }
2236
2237 return 0;
2238
2239err_disable_rxclk:
2240 clk_disable_unprepare(*rx_clk);
2241err_disable_txclk:
2242 clk_disable_unprepare(*tx_clk);
2243err_disable_axiclk:
2244 clk_disable_unprepare(*axi_clk);
2245
2246 return err;
2247}
2248
2249static int axicdma_clk_init(struct platform_device *pdev, struct clk **axi_clk,
2250 struct clk **dev_clk, struct clk **tmp_clk,
2251 struct clk **tmp1_clk, struct clk **tmp2_clk)
2252{
2253 int err;
2254
2255 *tmp_clk = NULL;
2256 *tmp1_clk = NULL;
2257 *tmp2_clk = NULL;
2258
2259 *axi_clk = devm_clk_get(&pdev->dev, "s_axi_lite_aclk");
2260 if (IS_ERR(*axi_clk)) {
2261 err = PTR_ERR(*axi_clk);
2262 dev_err(&pdev->dev, "failed to get axi_clk (%d)\n", err);
2263 return err;
2264 }
2265
2266 *dev_clk = devm_clk_get(&pdev->dev, "m_axi_aclk");
2267 if (IS_ERR(*dev_clk)) {
2268 err = PTR_ERR(*dev_clk);
2269 dev_err(&pdev->dev, "failed to get dev_clk (%d)\n", err);
2270 return err;
2271 }
2272
2273 err = clk_prepare_enable(*axi_clk);
2274 if (err) {
2275 dev_err(&pdev->dev, "failed to enable axi_clk (%d)\n", err);
2276 return err;
2277 }
2278
2279 err = clk_prepare_enable(*dev_clk);
2280 if (err) {
2281 dev_err(&pdev->dev, "failed to enable dev_clk (%d)\n", err);
2282 goto err_disable_axiclk;
2283 }
2284
2285 return 0;
2286
2287err_disable_axiclk:
2288 clk_disable_unprepare(*axi_clk);
2289
2290 return err;
2291}
2292
2293static int axivdma_clk_init(struct platform_device *pdev, struct clk **axi_clk,
2294 struct clk **tx_clk, struct clk **txs_clk,
2295 struct clk **rx_clk, struct clk **rxs_clk)
2296{
2297 int err;
2298
2299 *axi_clk = devm_clk_get(&pdev->dev, "s_axi_lite_aclk");
2300 if (IS_ERR(*axi_clk)) {
2301 err = PTR_ERR(*axi_clk);
2302 dev_err(&pdev->dev, "failed to get axi_aclk (%d)\n", err);
2303 return err;
2304 }
2305
2306 *tx_clk = devm_clk_get(&pdev->dev, "m_axi_mm2s_aclk");
2307 if (IS_ERR(*tx_clk))
2308 *tx_clk = NULL;
2309
2310 *txs_clk = devm_clk_get(&pdev->dev, "m_axis_mm2s_aclk");
2311 if (IS_ERR(*txs_clk))
2312 *txs_clk = NULL;
2313
2314 *rx_clk = devm_clk_get(&pdev->dev, "m_axi_s2mm_aclk");
2315 if (IS_ERR(*rx_clk))
2316 *rx_clk = NULL;
2317
2318 *rxs_clk = devm_clk_get(&pdev->dev, "s_axis_s2mm_aclk");
2319 if (IS_ERR(*rxs_clk))
2320 *rxs_clk = NULL;
2321
2322 err = clk_prepare_enable(*axi_clk);
2323 if (err) {
2324 dev_err(&pdev->dev, "failed to enable axi_clk (%d)\n", err);
2325 return err;
2326 }
2327
2328 err = clk_prepare_enable(*tx_clk);
2329 if (err) {
2330 dev_err(&pdev->dev, "failed to enable tx_clk (%d)\n", err);
2331 goto err_disable_axiclk;
2332 }
2333
2334 err = clk_prepare_enable(*txs_clk);
2335 if (err) {
2336 dev_err(&pdev->dev, "failed to enable txs_clk (%d)\n", err);
2337 goto err_disable_txclk;
2338 }
2339
2340 err = clk_prepare_enable(*rx_clk);
2341 if (err) {
2342 dev_err(&pdev->dev, "failed to enable rx_clk (%d)\n", err);
2343 goto err_disable_txsclk;
2344 }
2345
2346 err = clk_prepare_enable(*rxs_clk);
2347 if (err) {
2348 dev_err(&pdev->dev, "failed to enable rxs_clk (%d)\n", err);
2349 goto err_disable_rxclk;
2350 }
2351
2352 return 0;
2353
2354err_disable_rxclk:
2355 clk_disable_unprepare(*rx_clk);
2356err_disable_txsclk:
2357 clk_disable_unprepare(*txs_clk);
2358err_disable_txclk:
2359 clk_disable_unprepare(*tx_clk);
2360err_disable_axiclk:
2361 clk_disable_unprepare(*axi_clk);
2362
2363 return err;
2364}
2365
2366static void xdma_disable_allclks(struct xilinx_dma_device *xdev)
2367{
2368 clk_disable_unprepare(xdev->rxs_clk);
2369 clk_disable_unprepare(xdev->rx_clk);
2370 clk_disable_unprepare(xdev->txs_clk);
2371 clk_disable_unprepare(xdev->tx_clk);
2372 clk_disable_unprepare(xdev->axi_clk);
2373}
2374
2375
2376
2377
2378
2379
2380
2381
2382
2383
2384
2385
2386static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev,
2387 struct device_node *node, int chan_id)
2388{
2389 struct xilinx_dma_chan *chan;
2390 bool has_dre = false;
2391 u32 value, width;
2392 int err;
2393
2394
2395 chan = devm_kzalloc(xdev->dev, sizeof(*chan), GFP_KERNEL);
2396 if (!chan)
2397 return -ENOMEM;
2398
2399 chan->dev = xdev->dev;
2400 chan->xdev = xdev;
2401 chan->desc_pendingcount = 0x0;
2402 chan->ext_addr = xdev->ext_addr;
2403
2404
2405
2406
2407
2408 chan->idle = true;
2409
2410 spin_lock_init(&chan->lock);
2411 INIT_LIST_HEAD(&chan->pending_list);
2412 INIT_LIST_HEAD(&chan->done_list);
2413 INIT_LIST_HEAD(&chan->active_list);
2414 INIT_LIST_HEAD(&chan->free_seg_list);
2415
2416
2417 has_dre = of_property_read_bool(node, "xlnx,include-dre");
2418
2419 chan->genlock = of_property_read_bool(node, "xlnx,genlock-mode");
2420
2421 err = of_property_read_u32(node, "xlnx,datawidth", &value);
2422 if (err) {
2423 dev_err(xdev->dev, "missing xlnx,datawidth property\n");
2424 return err;
2425 }
2426 width = value >> 3;
2427
2428
2429 if (width > 8)
2430 has_dre = false;
2431
2432 if (!has_dre)
2433 xdev->common.copy_align = fls(width - 1);
2434
2435 if (of_device_is_compatible(node, "xlnx,axi-vdma-mm2s-channel") ||
2436 of_device_is_compatible(node, "xlnx,axi-dma-mm2s-channel") ||
2437 of_device_is_compatible(node, "xlnx,axi-cdma-channel")) {
2438 chan->direction = DMA_MEM_TO_DEV;
2439 chan->id = chan_id;
2440 chan->tdest = chan_id;
2441
2442 chan->ctrl_offset = XILINX_DMA_MM2S_CTRL_OFFSET;
2443 if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
2444 chan->desc_offset = XILINX_VDMA_MM2S_DESC_OFFSET;
2445 chan->config.park = 1;
2446
2447 if (xdev->flush_on_fsync == XILINX_DMA_FLUSH_BOTH ||
2448 xdev->flush_on_fsync == XILINX_DMA_FLUSH_MM2S)
2449 chan->flush_on_fsync = true;
2450 }
2451 } else if (of_device_is_compatible(node,
2452 "xlnx,axi-vdma-s2mm-channel") ||
2453 of_device_is_compatible(node,
2454 "xlnx,axi-dma-s2mm-channel")) {
2455 chan->direction = DMA_DEV_TO_MEM;
2456 chan->id = chan_id;
2457 chan->tdest = chan_id - xdev->nr_channels;
2458 chan->has_vflip = of_property_read_bool(node,
2459 "xlnx,enable-vert-flip");
2460 if (chan->has_vflip) {
2461 chan->config.vflip_en = dma_read(chan,
2462 XILINX_VDMA_REG_ENABLE_VERTICAL_FLIP) &
2463 XILINX_VDMA_ENABLE_VERTICAL_FLIP;
2464 }
2465
2466 chan->ctrl_offset = XILINX_DMA_S2MM_CTRL_OFFSET;
2467 if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
2468 chan->desc_offset = XILINX_VDMA_S2MM_DESC_OFFSET;
2469 chan->config.park = 1;
2470
2471 if (xdev->flush_on_fsync == XILINX_DMA_FLUSH_BOTH ||
2472 xdev->flush_on_fsync == XILINX_DMA_FLUSH_S2MM)
2473 chan->flush_on_fsync = true;
2474 }
2475 } else {
2476 dev_err(xdev->dev, "Invalid channel compatible node\n");
2477 return -EINVAL;
2478 }
2479
2480
2481 chan->irq = irq_of_parse_and_map(node, 0);
2482 err = request_irq(chan->irq, xilinx_dma_irq_handler, IRQF_SHARED,
2483 "xilinx-dma-controller", chan);
2484 if (err) {
2485 dev_err(xdev->dev, "unable to request IRQ %d\n", chan->irq);
2486 return err;
2487 }
2488
2489 if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
2490 chan->start_transfer = xilinx_dma_start_transfer;
2491 chan->stop_transfer = xilinx_dma_stop_transfer;
2492 } else if (xdev->dma_config->dmatype == XDMA_TYPE_CDMA) {
2493 chan->start_transfer = xilinx_cdma_start_transfer;
2494 chan->stop_transfer = xilinx_cdma_stop_transfer;
2495 } else {
2496 chan->start_transfer = xilinx_vdma_start_transfer;
2497 chan->stop_transfer = xilinx_dma_stop_transfer;
2498 }
2499
2500
2501 if (xdev->dma_config->dmatype != XDMA_TYPE_VDMA) {
2502 if (dma_ctrl_read(chan, XILINX_DMA_REG_DMASR) &
2503 XILINX_DMA_DMASR_SG_MASK)
2504 chan->has_sg = true;
2505 dev_dbg(chan->dev, "ch %d: SG %s\n", chan->id,
2506 chan->has_sg ? "enabled" : "disabled");
2507 }
2508
2509
2510 tasklet_init(&chan->tasklet, xilinx_dma_do_tasklet,
2511 (unsigned long)chan);
2512
2513
2514
2515
2516
2517 chan->common.device = &xdev->common;
2518
2519 list_add_tail(&chan->common.device_node, &xdev->common.channels);
2520 xdev->chan[chan->id] = chan;
2521
2522
2523 err = xilinx_dma_chan_reset(chan);
2524 if (err < 0) {
2525 dev_err(xdev->dev, "Reset channel failed\n");
2526 return err;
2527 }
2528
2529 return 0;
2530}
2531
2532
2533
2534
2535
2536
2537
2538
2539
2540
2541
2542static int xilinx_dma_child_probe(struct xilinx_dma_device *xdev,
2543 struct device_node *node)
2544{
2545 int ret, i, nr_channels = 1;
2546
2547 ret = of_property_read_u32(node, "dma-channels", &nr_channels);
2548 if ((ret < 0) && xdev->mcdma)
2549 dev_warn(xdev->dev, "missing dma-channels property\n");
2550
2551 for (i = 0; i < nr_channels; i++)
2552 xilinx_dma_chan_probe(xdev, node, xdev->chan_id++);
2553
2554 xdev->nr_channels += nr_channels;
2555
2556 return 0;
2557}
2558
2559
2560
2561
2562
2563
2564
2565
2566static struct dma_chan *of_dma_xilinx_xlate(struct of_phandle_args *dma_spec,
2567 struct of_dma *ofdma)
2568{
2569 struct xilinx_dma_device *xdev = ofdma->of_dma_data;
2570 int chan_id = dma_spec->args[0];
2571
2572 if (chan_id >= xdev->nr_channels || !xdev->chan[chan_id])
2573 return NULL;
2574
2575 return dma_get_slave_channel(&xdev->chan[chan_id]->common);
2576}
2577
2578static const struct xilinx_dma_config axidma_config = {
2579 .dmatype = XDMA_TYPE_AXIDMA,
2580 .clk_init = axidma_clk_init,
2581};
2582
2583static const struct xilinx_dma_config axicdma_config = {
2584 .dmatype = XDMA_TYPE_CDMA,
2585 .clk_init = axicdma_clk_init,
2586};
2587
2588static const struct xilinx_dma_config axivdma_config = {
2589 .dmatype = XDMA_TYPE_VDMA,
2590 .clk_init = axivdma_clk_init,
2591};
2592
2593static const struct of_device_id xilinx_dma_of_ids[] = {
2594 { .compatible = "xlnx,axi-dma-1.00.a", .data = &axidma_config },
2595 { .compatible = "xlnx,axi-cdma-1.00.a", .data = &axicdma_config },
2596 { .compatible = "xlnx,axi-vdma-1.00.a", .data = &axivdma_config },
2597 {}
2598};
2599MODULE_DEVICE_TABLE(of, xilinx_dma_of_ids);
2600
2601
2602
2603
2604
2605
2606
2607static int xilinx_dma_probe(struct platform_device *pdev)
2608{
2609 int (*clk_init)(struct platform_device *, struct clk **, struct clk **,
2610 struct clk **, struct clk **, struct clk **)
2611 = axivdma_clk_init;
2612 struct device_node *node = pdev->dev.of_node;
2613 struct xilinx_dma_device *xdev;
2614 struct device_node *child, *np = pdev->dev.of_node;
2615 struct resource *io;
2616 u32 num_frames, addr_width, len_width;
2617 int i, err;
2618
2619
2620 xdev = devm_kzalloc(&pdev->dev, sizeof(*xdev), GFP_KERNEL);
2621 if (!xdev)
2622 return -ENOMEM;
2623
2624 xdev->dev = &pdev->dev;
2625 if (np) {
2626 const struct of_device_id *match;
2627
2628 match = of_match_node(xilinx_dma_of_ids, np);
2629 if (match && match->data) {
2630 xdev->dma_config = match->data;
2631 clk_init = xdev->dma_config->clk_init;
2632 }
2633 }
2634
2635 err = clk_init(pdev, &xdev->axi_clk, &xdev->tx_clk, &xdev->txs_clk,
2636 &xdev->rx_clk, &xdev->rxs_clk);
2637 if (err)
2638 return err;
2639
2640
2641 io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2642 xdev->regs = devm_ioremap_resource(&pdev->dev, io);
2643 if (IS_ERR(xdev->regs))
2644 return PTR_ERR(xdev->regs);
2645
2646
2647 xdev->max_buffer_len = GENMASK(XILINX_DMA_MAX_TRANS_LEN_MAX - 1, 0);
2648
2649 if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
2650 xdev->mcdma = of_property_read_bool(node, "xlnx,mcdma");
2651 if (!of_property_read_u32(node, "xlnx,sg-length-width",
2652 &len_width)) {
2653 if (len_width < XILINX_DMA_MAX_TRANS_LEN_MIN ||
2654 len_width > XILINX_DMA_V2_MAX_TRANS_LEN_MAX) {
2655 dev_warn(xdev->dev,
2656 "invalid xlnx,sg-length-width property value. Using default width\n");
2657 } else {
2658 if (len_width > XILINX_DMA_MAX_TRANS_LEN_MAX)
2659 dev_warn(xdev->dev, "Please ensure that IP supports buffer length > 23 bits\n");
2660 xdev->max_buffer_len =
2661 GENMASK(len_width - 1, 0);
2662 }
2663 }
2664 }
2665
2666 if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
2667 err = of_property_read_u32(node, "xlnx,num-fstores",
2668 &num_frames);
2669 if (err < 0) {
2670 dev_err(xdev->dev,
2671 "missing xlnx,num-fstores property\n");
2672 return err;
2673 }
2674
2675 err = of_property_read_u32(node, "xlnx,flush-fsync",
2676 &xdev->flush_on_fsync);
2677 if (err < 0)
2678 dev_warn(xdev->dev,
2679 "missing xlnx,flush-fsync property\n");
2680 }
2681
2682 err = of_property_read_u32(node, "xlnx,addrwidth", &addr_width);
2683 if (err < 0)
2684 dev_warn(xdev->dev, "missing xlnx,addrwidth property\n");
2685
2686 if (addr_width > 32)
2687 xdev->ext_addr = true;
2688 else
2689 xdev->ext_addr = false;
2690
2691
2692 dma_set_mask(xdev->dev, DMA_BIT_MASK(addr_width));
2693
2694
2695 xdev->common.dev = &pdev->dev;
2696
2697 INIT_LIST_HEAD(&xdev->common.channels);
2698 if (!(xdev->dma_config->dmatype == XDMA_TYPE_CDMA)) {
2699 dma_cap_set(DMA_SLAVE, xdev->common.cap_mask);
2700 dma_cap_set(DMA_PRIVATE, xdev->common.cap_mask);
2701 }
2702
2703 xdev->common.device_alloc_chan_resources =
2704 xilinx_dma_alloc_chan_resources;
2705 xdev->common.device_free_chan_resources =
2706 xilinx_dma_free_chan_resources;
2707 xdev->common.device_terminate_all = xilinx_dma_terminate_all;
2708 xdev->common.device_tx_status = xilinx_dma_tx_status;
2709 xdev->common.device_issue_pending = xilinx_dma_issue_pending;
2710 if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
2711 dma_cap_set(DMA_CYCLIC, xdev->common.cap_mask);
2712 xdev->common.device_prep_slave_sg = xilinx_dma_prep_slave_sg;
2713 xdev->common.device_prep_dma_cyclic =
2714 xilinx_dma_prep_dma_cyclic;
2715 xdev->common.device_prep_interleaved_dma =
2716 xilinx_dma_prep_interleaved;
2717
2718 xdev->common.residue_granularity =
2719 DMA_RESIDUE_GRANULARITY_SEGMENT;
2720 } else if (xdev->dma_config->dmatype == XDMA_TYPE_CDMA) {
2721 dma_cap_set(DMA_MEMCPY, xdev->common.cap_mask);
2722 xdev->common.device_prep_dma_memcpy = xilinx_cdma_prep_memcpy;
2723 } else {
2724 xdev->common.device_prep_interleaved_dma =
2725 xilinx_vdma_dma_prep_interleaved;
2726 }
2727
2728 platform_set_drvdata(pdev, xdev);
2729
2730
2731 for_each_child_of_node(node, child) {
2732 err = xilinx_dma_child_probe(xdev, child);
2733 if (err < 0)
2734 goto disable_clks;
2735 }
2736
2737 if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
2738 for (i = 0; i < xdev->nr_channels; i++)
2739 if (xdev->chan[i])
2740 xdev->chan[i]->num_frms = num_frames;
2741 }
2742
2743
2744 dma_async_device_register(&xdev->common);
2745
2746 err = of_dma_controller_register(node, of_dma_xilinx_xlate,
2747 xdev);
2748 if (err < 0) {
2749 dev_err(&pdev->dev, "Unable to register DMA to DT\n");
2750 dma_async_device_unregister(&xdev->common);
2751 goto error;
2752 }
2753
2754 if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA)
2755 dev_info(&pdev->dev, "Xilinx AXI DMA Engine Driver Probed!!\n");
2756 else if (xdev->dma_config->dmatype == XDMA_TYPE_CDMA)
2757 dev_info(&pdev->dev, "Xilinx AXI CDMA Engine Driver Probed!!\n");
2758 else
2759 dev_info(&pdev->dev, "Xilinx AXI VDMA Engine Driver Probed!!\n");
2760
2761 return 0;
2762
2763disable_clks:
2764 xdma_disable_allclks(xdev);
2765error:
2766 for (i = 0; i < xdev->nr_channels; i++)
2767 if (xdev->chan[i])
2768 xilinx_dma_chan_remove(xdev->chan[i]);
2769
2770 return err;
2771}
2772
2773
2774
2775
2776
2777
2778
2779static int xilinx_dma_remove(struct platform_device *pdev)
2780{
2781 struct xilinx_dma_device *xdev = platform_get_drvdata(pdev);
2782 int i;
2783
2784 of_dma_controller_free(pdev->dev.of_node);
2785
2786 dma_async_device_unregister(&xdev->common);
2787
2788 for (i = 0; i < xdev->nr_channels; i++)
2789 if (xdev->chan[i])
2790 xilinx_dma_chan_remove(xdev->chan[i]);
2791
2792 xdma_disable_allclks(xdev);
2793
2794 return 0;
2795}
2796
2797static struct platform_driver xilinx_vdma_driver = {
2798 .driver = {
2799 .name = "xilinx-vdma",
2800 .of_match_table = xilinx_dma_of_ids,
2801 },
2802 .probe = xilinx_dma_probe,
2803 .remove = xilinx_dma_remove,
2804};
2805
2806module_platform_driver(xilinx_vdma_driver);
2807
2808MODULE_AUTHOR("Xilinx, Inc.");
2809MODULE_DESCRIPTION("Xilinx VDMA driver");
2810MODULE_LICENSE("GPL v2");
2811