1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36#include <linux/bitops.h>
37#include <linux/dmapool.h>
38#include <linux/dma/xilinx_dma.h>
39#include <linux/init.h>
40#include <linux/interrupt.h>
41#include <linux/io.h>
42#include <linux/iopoll.h>
43#include <linux/module.h>
44#include <linux/of_address.h>
45#include <linux/of_dma.h>
46#include <linux/of_platform.h>
47#include <linux/of_irq.h>
48#include <linux/slab.h>
49#include <linux/clk.h>
50#include <linux/io-64-nonatomic-lo-hi.h>
51
52#include "../dmaengine.h"
53
54
55#define XILINX_DMA_MM2S_CTRL_OFFSET 0x0000
56#define XILINX_DMA_S2MM_CTRL_OFFSET 0x0030
57#define XILINX_VDMA_MM2S_DESC_OFFSET 0x0050
58#define XILINX_VDMA_S2MM_DESC_OFFSET 0x00a0
59
60
61#define XILINX_DMA_REG_DMACR 0x0000
62#define XILINX_DMA_DMACR_DELAY_MAX 0xff
63#define XILINX_DMA_DMACR_DELAY_SHIFT 24
64#define XILINX_DMA_DMACR_FRAME_COUNT_MAX 0xff
65#define XILINX_DMA_DMACR_FRAME_COUNT_SHIFT 16
66#define XILINX_DMA_DMACR_ERR_IRQ BIT(14)
67#define XILINX_DMA_DMACR_DLY_CNT_IRQ BIT(13)
68#define XILINX_DMA_DMACR_FRM_CNT_IRQ BIT(12)
69#define XILINX_DMA_DMACR_MASTER_SHIFT 8
70#define XILINX_DMA_DMACR_FSYNCSRC_SHIFT 5
71#define XILINX_DMA_DMACR_FRAMECNT_EN BIT(4)
72#define XILINX_DMA_DMACR_GENLOCK_EN BIT(3)
73#define XILINX_DMA_DMACR_RESET BIT(2)
74#define XILINX_DMA_DMACR_CIRC_EN BIT(1)
75#define XILINX_DMA_DMACR_RUNSTOP BIT(0)
76#define XILINX_DMA_DMACR_FSYNCSRC_MASK GENMASK(6, 5)
77#define XILINX_DMA_DMACR_DELAY_MASK GENMASK(31, 24)
78#define XILINX_DMA_DMACR_FRAME_COUNT_MASK GENMASK(23, 16)
79#define XILINX_DMA_DMACR_MASTER_MASK GENMASK(11, 8)
80
81#define XILINX_DMA_REG_DMASR 0x0004
82#define XILINX_DMA_DMASR_EOL_LATE_ERR BIT(15)
83#define XILINX_DMA_DMASR_ERR_IRQ BIT(14)
84#define XILINX_DMA_DMASR_DLY_CNT_IRQ BIT(13)
85#define XILINX_DMA_DMASR_FRM_CNT_IRQ BIT(12)
86#define XILINX_DMA_DMASR_SOF_LATE_ERR BIT(11)
87#define XILINX_DMA_DMASR_SG_DEC_ERR BIT(10)
88#define XILINX_DMA_DMASR_SG_SLV_ERR BIT(9)
89#define XILINX_DMA_DMASR_EOF_EARLY_ERR BIT(8)
90#define XILINX_DMA_DMASR_SOF_EARLY_ERR BIT(7)
91#define XILINX_DMA_DMASR_DMA_DEC_ERR BIT(6)
92#define XILINX_DMA_DMASR_DMA_SLAVE_ERR BIT(5)
93#define XILINX_DMA_DMASR_DMA_INT_ERR BIT(4)
94#define XILINX_DMA_DMASR_SG_MASK BIT(3)
95#define XILINX_DMA_DMASR_IDLE BIT(1)
96#define XILINX_DMA_DMASR_HALTED BIT(0)
97#define XILINX_DMA_DMASR_DELAY_MASK GENMASK(31, 24)
98#define XILINX_DMA_DMASR_FRAME_COUNT_MASK GENMASK(23, 16)
99
100#define XILINX_DMA_REG_CURDESC 0x0008
101#define XILINX_DMA_REG_TAILDESC 0x0010
102#define XILINX_DMA_REG_REG_INDEX 0x0014
103#define XILINX_DMA_REG_FRMSTORE 0x0018
104#define XILINX_DMA_REG_THRESHOLD 0x001c
105#define XILINX_DMA_REG_FRMPTR_STS 0x0024
106#define XILINX_DMA_REG_PARK_PTR 0x0028
107#define XILINX_DMA_PARK_PTR_WR_REF_SHIFT 8
108#define XILINX_DMA_PARK_PTR_WR_REF_MASK GENMASK(12, 8)
109#define XILINX_DMA_PARK_PTR_RD_REF_SHIFT 0
110#define XILINX_DMA_PARK_PTR_RD_REF_MASK GENMASK(4, 0)
111#define XILINX_DMA_REG_VDMA_VERSION 0x002c
112
113
114#define XILINX_DMA_REG_VSIZE 0x0000
115#define XILINX_DMA_REG_HSIZE 0x0004
116
117#define XILINX_DMA_REG_FRMDLY_STRIDE 0x0008
118#define XILINX_DMA_FRMDLY_STRIDE_FRMDLY_SHIFT 24
119#define XILINX_DMA_FRMDLY_STRIDE_STRIDE_SHIFT 0
120
121#define XILINX_VDMA_REG_START_ADDRESS(n) (0x000c + 4 * (n))
122#define XILINX_VDMA_REG_START_ADDRESS_64(n) (0x000c + 8 * (n))
123
124#define XILINX_VDMA_REG_ENABLE_VERTICAL_FLIP 0x00ec
125#define XILINX_VDMA_ENABLE_VERTICAL_FLIP BIT(0)
126
127
128#define XILINX_MCDMA_MAX_CHANS_PER_DEVICE 0x20
129#define XILINX_DMA_MAX_CHANS_PER_DEVICE 0x2
130#define XILINX_CDMA_MAX_CHANS_PER_DEVICE 0x1
131
132#define XILINX_DMA_DMAXR_ALL_IRQ_MASK \
133 (XILINX_DMA_DMASR_FRM_CNT_IRQ | \
134 XILINX_DMA_DMASR_DLY_CNT_IRQ | \
135 XILINX_DMA_DMASR_ERR_IRQ)
136
137#define XILINX_DMA_DMASR_ALL_ERR_MASK \
138 (XILINX_DMA_DMASR_EOL_LATE_ERR | \
139 XILINX_DMA_DMASR_SOF_LATE_ERR | \
140 XILINX_DMA_DMASR_SG_DEC_ERR | \
141 XILINX_DMA_DMASR_SG_SLV_ERR | \
142 XILINX_DMA_DMASR_EOF_EARLY_ERR | \
143 XILINX_DMA_DMASR_SOF_EARLY_ERR | \
144 XILINX_DMA_DMASR_DMA_DEC_ERR | \
145 XILINX_DMA_DMASR_DMA_SLAVE_ERR | \
146 XILINX_DMA_DMASR_DMA_INT_ERR)
147
148
149
150
151
152
153#define XILINX_DMA_DMASR_ERR_RECOVER_MASK \
154 (XILINX_DMA_DMASR_SOF_LATE_ERR | \
155 XILINX_DMA_DMASR_EOF_EARLY_ERR | \
156 XILINX_DMA_DMASR_SOF_EARLY_ERR | \
157 XILINX_DMA_DMASR_DMA_INT_ERR)
158
159
160#define XILINX_DMA_FLUSH_S2MM 3
161#define XILINX_DMA_FLUSH_MM2S 2
162#define XILINX_DMA_FLUSH_BOTH 1
163
164
165#define XILINX_DMA_LOOP_COUNT 1000000
166
167
168#define XILINX_DMA_REG_SRCDSTADDR 0x18
169#define XILINX_DMA_REG_BTT 0x28
170
171
172#define XILINX_DMA_MAX_TRANS_LEN_MIN 8
173#define XILINX_DMA_MAX_TRANS_LEN_MAX 23
174#define XILINX_DMA_V2_MAX_TRANS_LEN_MAX 26
175#define XILINX_DMA_CR_COALESCE_MAX GENMASK(23, 16)
176#define XILINX_DMA_CR_CYCLIC_BD_EN_MASK BIT(4)
177#define XILINX_DMA_CR_COALESCE_SHIFT 16
178#define XILINX_DMA_BD_SOP BIT(27)
179#define XILINX_DMA_BD_EOP BIT(26)
180#define XILINX_DMA_COALESCE_MAX 255
181#define XILINX_DMA_NUM_DESCS 255
182#define XILINX_DMA_NUM_APP_WORDS 5
183
184
185#define XILINX_CDMA_REG_SRCADDR 0x18
186#define XILINX_CDMA_REG_DSTADDR 0x20
187
188
189#define XILINX_CDMA_CR_SGMODE BIT(3)
190
191#define xilinx_prep_dma_addr_t(addr) \
192 ((dma_addr_t)((u64)addr##_##msb << 32 | (addr)))
193
194
195#define XILINX_MCDMA_MM2S_CTRL_OFFSET 0x0000
196#define XILINX_MCDMA_S2MM_CTRL_OFFSET 0x0500
197#define XILINX_MCDMA_CHEN_OFFSET 0x0008
198#define XILINX_MCDMA_CH_ERR_OFFSET 0x0010
199#define XILINX_MCDMA_RXINT_SER_OFFSET 0x0020
200#define XILINX_MCDMA_TXINT_SER_OFFSET 0x0028
201#define XILINX_MCDMA_CHAN_CR_OFFSET(x) (0x40 + (x) * 0x40)
202#define XILINX_MCDMA_CHAN_SR_OFFSET(x) (0x44 + (x) * 0x40)
203#define XILINX_MCDMA_CHAN_CDESC_OFFSET(x) (0x48 + (x) * 0x40)
204#define XILINX_MCDMA_CHAN_TDESC_OFFSET(x) (0x50 + (x) * 0x40)
205
206
207#define XILINX_MCDMA_COALESCE_SHIFT 16
208#define XILINX_MCDMA_COALESCE_MAX 24
209#define XILINX_MCDMA_IRQ_ALL_MASK GENMASK(7, 5)
210#define XILINX_MCDMA_COALESCE_MASK GENMASK(23, 16)
211#define XILINX_MCDMA_CR_RUNSTOP_MASK BIT(0)
212#define XILINX_MCDMA_IRQ_IOC_MASK BIT(5)
213#define XILINX_MCDMA_IRQ_DELAY_MASK BIT(6)
214#define XILINX_MCDMA_IRQ_ERR_MASK BIT(7)
215#define XILINX_MCDMA_BD_EOP BIT(30)
216#define XILINX_MCDMA_BD_SOP BIT(31)
217
218
219
220
221
222
223
224
225
226
227
228
229struct xilinx_vdma_desc_hw {
230 u32 next_desc;
231 u32 pad1;
232 u32 buf_addr;
233 u32 buf_addr_msb;
234 u32 vsize;
235 u32 hsize;
236 u32 stride;
237} __aligned(64);
238
239
240
241
242
243
244
245
246
247
248
249
250
251struct xilinx_axidma_desc_hw {
252 u32 next_desc;
253 u32 next_desc_msb;
254 u32 buf_addr;
255 u32 buf_addr_msb;
256 u32 reserved1;
257 u32 reserved2;
258 u32 control;
259 u32 status;
260 u32 app[XILINX_DMA_NUM_APP_WORDS];
261} __aligned(64);
262
263
264
265
266
267
268
269
270
271
272
273
274
275struct xilinx_aximcdma_desc_hw {
276 u32 next_desc;
277 u32 next_desc_msb;
278 u32 buf_addr;
279 u32 buf_addr_msb;
280 u32 rsvd;
281 u32 control;
282 u32 status;
283 u32 sideband_status;
284 u32 app[XILINX_DMA_NUM_APP_WORDS];
285} __aligned(64);
286
287
288
289
290
291
292
293
294
295
296
297
298struct xilinx_cdma_desc_hw {
299 u32 next_desc;
300 u32 next_desc_msb;
301 u32 src_addr;
302 u32 src_addr_msb;
303 u32 dest_addr;
304 u32 dest_addr_msb;
305 u32 control;
306 u32 status;
307} __aligned(64);
308
309
310
311
312
313
314
315struct xilinx_vdma_tx_segment {
316 struct xilinx_vdma_desc_hw hw;
317 struct list_head node;
318 dma_addr_t phys;
319} __aligned(64);
320
321
322
323
324
325
326
327struct xilinx_axidma_tx_segment {
328 struct xilinx_axidma_desc_hw hw;
329 struct list_head node;
330 dma_addr_t phys;
331} __aligned(64);
332
333
334
335
336
337
338
339struct xilinx_aximcdma_tx_segment {
340 struct xilinx_aximcdma_desc_hw hw;
341 struct list_head node;
342 dma_addr_t phys;
343} __aligned(64);
344
345
346
347
348
349
350
351struct xilinx_cdma_tx_segment {
352 struct xilinx_cdma_desc_hw hw;
353 struct list_head node;
354 dma_addr_t phys;
355} __aligned(64);
356
357
358
359
360
361
362
363
364
365
366struct xilinx_dma_tx_descriptor {
367 struct dma_async_tx_descriptor async_tx;
368 struct list_head segments;
369 struct list_head node;
370 bool cyclic;
371 bool err;
372 u32 residue;
373};
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413struct xilinx_dma_chan {
414 struct xilinx_dma_device *xdev;
415 u32 ctrl_offset;
416 u32 desc_offset;
417 spinlock_t lock;
418 struct list_head pending_list;
419 struct list_head active_list;
420 struct list_head done_list;
421 struct list_head free_seg_list;
422 struct dma_chan common;
423 struct dma_pool *desc_pool;
424 struct device *dev;
425 int irq;
426 int id;
427 enum dma_transfer_direction direction;
428 int num_frms;
429 bool has_sg;
430 bool cyclic;
431 bool genlock;
432 bool err;
433 bool idle;
434 struct tasklet_struct tasklet;
435 struct xilinx_vdma_config config;
436 bool flush_on_fsync;
437 u32 desc_pendingcount;
438 bool ext_addr;
439 u32 desc_submitcount;
440 struct xilinx_axidma_tx_segment *seg_v;
441 struct xilinx_aximcdma_tx_segment *seg_mv;
442 dma_addr_t seg_p;
443 struct xilinx_axidma_tx_segment *cyclic_seg_v;
444 dma_addr_t cyclic_seg_p;
445 void (*start_transfer)(struct xilinx_dma_chan *chan);
446 int (*stop_transfer)(struct xilinx_dma_chan *chan);
447 u16 tdest;
448 bool has_vflip;
449};
450
451
452
453
454
455
456
457
458
459
460enum xdma_ip_type {
461 XDMA_TYPE_AXIDMA = 0,
462 XDMA_TYPE_CDMA,
463 XDMA_TYPE_VDMA,
464 XDMA_TYPE_AXIMCDMA
465};
466
467struct xilinx_dma_config {
468 enum xdma_ip_type dmatype;
469 int (*clk_init)(struct platform_device *pdev, struct clk **axi_clk,
470 struct clk **tx_clk, struct clk **txs_clk,
471 struct clk **rx_clk, struct clk **rxs_clk);
472 irqreturn_t (*irq_handler)(int irq, void *data);
473 const int max_channels;
474};
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495struct xilinx_dma_device {
496 void __iomem *regs;
497 struct device *dev;
498 struct dma_device common;
499 struct xilinx_dma_chan *chan[XILINX_MCDMA_MAX_CHANS_PER_DEVICE];
500 u32 flush_on_fsync;
501 bool ext_addr;
502 struct platform_device *pdev;
503 const struct xilinx_dma_config *dma_config;
504 struct clk *axi_clk;
505 struct clk *tx_clk;
506 struct clk *txs_clk;
507 struct clk *rx_clk;
508 struct clk *rxs_clk;
509 u32 s2mm_chan_id;
510 u32 mm2s_chan_id;
511 u32 max_buffer_len;
512};
513
514
515#define to_xilinx_chan(chan) \
516 container_of(chan, struct xilinx_dma_chan, common)
517#define to_dma_tx_descriptor(tx) \
518 container_of(tx, struct xilinx_dma_tx_descriptor, async_tx)
519#define xilinx_dma_poll_timeout(chan, reg, val, cond, delay_us, timeout_us) \
520 readl_poll_timeout_atomic(chan->xdev->regs + chan->ctrl_offset + reg, \
521 val, cond, delay_us, timeout_us)
522
523
524static inline u32 dma_read(struct xilinx_dma_chan *chan, u32 reg)
525{
526 return ioread32(chan->xdev->regs + reg);
527}
528
529static inline void dma_write(struct xilinx_dma_chan *chan, u32 reg, u32 value)
530{
531 iowrite32(value, chan->xdev->regs + reg);
532}
533
534static inline void vdma_desc_write(struct xilinx_dma_chan *chan, u32 reg,
535 u32 value)
536{
537 dma_write(chan, chan->desc_offset + reg, value);
538}
539
540static inline u32 dma_ctrl_read(struct xilinx_dma_chan *chan, u32 reg)
541{
542 return dma_read(chan, chan->ctrl_offset + reg);
543}
544
545static inline void dma_ctrl_write(struct xilinx_dma_chan *chan, u32 reg,
546 u32 value)
547{
548 dma_write(chan, chan->ctrl_offset + reg, value);
549}
550
551static inline void dma_ctrl_clr(struct xilinx_dma_chan *chan, u32 reg,
552 u32 clr)
553{
554 dma_ctrl_write(chan, reg, dma_ctrl_read(chan, reg) & ~clr);
555}
556
557static inline void dma_ctrl_set(struct xilinx_dma_chan *chan, u32 reg,
558 u32 set)
559{
560 dma_ctrl_write(chan, reg, dma_ctrl_read(chan, reg) | set);
561}
562
563
564
565
566
567
568
569
570
571
572
573
574static inline void vdma_desc_write_64(struct xilinx_dma_chan *chan, u32 reg,
575 u32 value_lsb, u32 value_msb)
576{
577
578 writel(value_lsb, chan->xdev->regs + chan->desc_offset + reg);
579
580
581 writel(value_msb, chan->xdev->regs + chan->desc_offset + reg + 4);
582}
583
584static inline void dma_writeq(struct xilinx_dma_chan *chan, u32 reg, u64 value)
585{
586 lo_hi_writeq(value, chan->xdev->regs + chan->ctrl_offset + reg);
587}
588
589static inline void xilinx_write(struct xilinx_dma_chan *chan, u32 reg,
590 dma_addr_t addr)
591{
592 if (chan->ext_addr)
593 dma_writeq(chan, reg, addr);
594 else
595 dma_ctrl_write(chan, reg, addr);
596}
597
598static inline void xilinx_axidma_buf(struct xilinx_dma_chan *chan,
599 struct xilinx_axidma_desc_hw *hw,
600 dma_addr_t buf_addr, size_t sg_used,
601 size_t period_len)
602{
603 if (chan->ext_addr) {
604 hw->buf_addr = lower_32_bits(buf_addr + sg_used + period_len);
605 hw->buf_addr_msb = upper_32_bits(buf_addr + sg_used +
606 period_len);
607 } else {
608 hw->buf_addr = buf_addr + sg_used + period_len;
609 }
610}
611
612static inline void xilinx_aximcdma_buf(struct xilinx_dma_chan *chan,
613 struct xilinx_aximcdma_desc_hw *hw,
614 dma_addr_t buf_addr, size_t sg_used)
615{
616 if (chan->ext_addr) {
617 hw->buf_addr = lower_32_bits(buf_addr + sg_used);
618 hw->buf_addr_msb = upper_32_bits(buf_addr + sg_used);
619 } else {
620 hw->buf_addr = buf_addr + sg_used;
621 }
622}
623
624
625
626
627
628
629
630
631
632
633
634static struct xilinx_vdma_tx_segment *
635xilinx_vdma_alloc_tx_segment(struct xilinx_dma_chan *chan)
636{
637 struct xilinx_vdma_tx_segment *segment;
638 dma_addr_t phys;
639
640 segment = dma_pool_zalloc(chan->desc_pool, GFP_ATOMIC, &phys);
641 if (!segment)
642 return NULL;
643
644 segment->phys = phys;
645
646 return segment;
647}
648
649
650
651
652
653
654
655static struct xilinx_cdma_tx_segment *
656xilinx_cdma_alloc_tx_segment(struct xilinx_dma_chan *chan)
657{
658 struct xilinx_cdma_tx_segment *segment;
659 dma_addr_t phys;
660
661 segment = dma_pool_zalloc(chan->desc_pool, GFP_ATOMIC, &phys);
662 if (!segment)
663 return NULL;
664
665 segment->phys = phys;
666
667 return segment;
668}
669
670
671
672
673
674
675
676static struct xilinx_axidma_tx_segment *
677xilinx_axidma_alloc_tx_segment(struct xilinx_dma_chan *chan)
678{
679 struct xilinx_axidma_tx_segment *segment = NULL;
680 unsigned long flags;
681
682 spin_lock_irqsave(&chan->lock, flags);
683 if (!list_empty(&chan->free_seg_list)) {
684 segment = list_first_entry(&chan->free_seg_list,
685 struct xilinx_axidma_tx_segment,
686 node);
687 list_del(&segment->node);
688 }
689 spin_unlock_irqrestore(&chan->lock, flags);
690
691 if (!segment)
692 dev_dbg(chan->dev, "Could not find free tx segment\n");
693
694 return segment;
695}
696
697
698
699
700
701
702
703static struct xilinx_aximcdma_tx_segment *
704xilinx_aximcdma_alloc_tx_segment(struct xilinx_dma_chan *chan)
705{
706 struct xilinx_aximcdma_tx_segment *segment = NULL;
707 unsigned long flags;
708
709 spin_lock_irqsave(&chan->lock, flags);
710 if (!list_empty(&chan->free_seg_list)) {
711 segment = list_first_entry(&chan->free_seg_list,
712 struct xilinx_aximcdma_tx_segment,
713 node);
714 list_del(&segment->node);
715 }
716 spin_unlock_irqrestore(&chan->lock, flags);
717
718 return segment;
719}
720
721static void xilinx_dma_clean_hw_desc(struct xilinx_axidma_desc_hw *hw)
722{
723 u32 next_desc = hw->next_desc;
724 u32 next_desc_msb = hw->next_desc_msb;
725
726 memset(hw, 0, sizeof(struct xilinx_axidma_desc_hw));
727
728 hw->next_desc = next_desc;
729 hw->next_desc_msb = next_desc_msb;
730}
731
732static void xilinx_mcdma_clean_hw_desc(struct xilinx_aximcdma_desc_hw *hw)
733{
734 u32 next_desc = hw->next_desc;
735 u32 next_desc_msb = hw->next_desc_msb;
736
737 memset(hw, 0, sizeof(struct xilinx_aximcdma_desc_hw));
738
739 hw->next_desc = next_desc;
740 hw->next_desc_msb = next_desc_msb;
741}
742
743
744
745
746
747
748static void xilinx_dma_free_tx_segment(struct xilinx_dma_chan *chan,
749 struct xilinx_axidma_tx_segment *segment)
750{
751 xilinx_dma_clean_hw_desc(&segment->hw);
752
753 list_add_tail(&segment->node, &chan->free_seg_list);
754}
755
756
757
758
759
760
761static void xilinx_mcdma_free_tx_segment(struct xilinx_dma_chan *chan,
762 struct xilinx_aximcdma_tx_segment *
763 segment)
764{
765 xilinx_mcdma_clean_hw_desc(&segment->hw);
766
767 list_add_tail(&segment->node, &chan->free_seg_list);
768}
769
770
771
772
773
774
775static void xilinx_cdma_free_tx_segment(struct xilinx_dma_chan *chan,
776 struct xilinx_cdma_tx_segment *segment)
777{
778 dma_pool_free(chan->desc_pool, segment, segment->phys);
779}
780
781
782
783
784
785
786static void xilinx_vdma_free_tx_segment(struct xilinx_dma_chan *chan,
787 struct xilinx_vdma_tx_segment *segment)
788{
789 dma_pool_free(chan->desc_pool, segment, segment->phys);
790}
791
792
793
794
795
796
797
798static struct xilinx_dma_tx_descriptor *
799xilinx_dma_alloc_tx_descriptor(struct xilinx_dma_chan *chan)
800{
801 struct xilinx_dma_tx_descriptor *desc;
802
803 desc = kzalloc(sizeof(*desc), GFP_KERNEL);
804 if (!desc)
805 return NULL;
806
807 INIT_LIST_HEAD(&desc->segments);
808
809 return desc;
810}
811
812
813
814
815
816
817static void
818xilinx_dma_free_tx_descriptor(struct xilinx_dma_chan *chan,
819 struct xilinx_dma_tx_descriptor *desc)
820{
821 struct xilinx_vdma_tx_segment *segment, *next;
822 struct xilinx_cdma_tx_segment *cdma_segment, *cdma_next;
823 struct xilinx_axidma_tx_segment *axidma_segment, *axidma_next;
824 struct xilinx_aximcdma_tx_segment *aximcdma_segment, *aximcdma_next;
825
826 if (!desc)
827 return;
828
829 if (chan->xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
830 list_for_each_entry_safe(segment, next, &desc->segments, node) {
831 list_del(&segment->node);
832 xilinx_vdma_free_tx_segment(chan, segment);
833 }
834 } else if (chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) {
835 list_for_each_entry_safe(cdma_segment, cdma_next,
836 &desc->segments, node) {
837 list_del(&cdma_segment->node);
838 xilinx_cdma_free_tx_segment(chan, cdma_segment);
839 }
840 } else if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
841 list_for_each_entry_safe(axidma_segment, axidma_next,
842 &desc->segments, node) {
843 list_del(&axidma_segment->node);
844 xilinx_dma_free_tx_segment(chan, axidma_segment);
845 }
846 } else {
847 list_for_each_entry_safe(aximcdma_segment, aximcdma_next,
848 &desc->segments, node) {
849 list_del(&aximcdma_segment->node);
850 xilinx_mcdma_free_tx_segment(chan, aximcdma_segment);
851 }
852 }
853
854 kfree(desc);
855}
856
857
858
859
860
861
862
863
864static void xilinx_dma_free_desc_list(struct xilinx_dma_chan *chan,
865 struct list_head *list)
866{
867 struct xilinx_dma_tx_descriptor *desc, *next;
868
869 list_for_each_entry_safe(desc, next, list, node) {
870 list_del(&desc->node);
871 xilinx_dma_free_tx_descriptor(chan, desc);
872 }
873}
874
875
876
877
878
879static void xilinx_dma_free_descriptors(struct xilinx_dma_chan *chan)
880{
881 unsigned long flags;
882
883 spin_lock_irqsave(&chan->lock, flags);
884
885 xilinx_dma_free_desc_list(chan, &chan->pending_list);
886 xilinx_dma_free_desc_list(chan, &chan->done_list);
887 xilinx_dma_free_desc_list(chan, &chan->active_list);
888
889 spin_unlock_irqrestore(&chan->lock, flags);
890}
891
892
893
894
895
896static void xilinx_dma_free_chan_resources(struct dma_chan *dchan)
897{
898 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
899 unsigned long flags;
900
901 dev_dbg(chan->dev, "Free all channel resources.\n");
902
903 xilinx_dma_free_descriptors(chan);
904
905 if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
906 spin_lock_irqsave(&chan->lock, flags);
907 INIT_LIST_HEAD(&chan->free_seg_list);
908 spin_unlock_irqrestore(&chan->lock, flags);
909
910
911 dma_free_coherent(chan->dev, sizeof(*chan->seg_v) *
912 XILINX_DMA_NUM_DESCS, chan->seg_v,
913 chan->seg_p);
914
915
916 dma_free_coherent(chan->dev, sizeof(*chan->cyclic_seg_v),
917 chan->cyclic_seg_v, chan->cyclic_seg_p);
918 }
919
920 if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIMCDMA) {
921 spin_lock_irqsave(&chan->lock, flags);
922 INIT_LIST_HEAD(&chan->free_seg_list);
923 spin_unlock_irqrestore(&chan->lock, flags);
924
925
926 dma_free_coherent(chan->dev, sizeof(*chan->seg_mv) *
927 XILINX_DMA_NUM_DESCS, chan->seg_mv,
928 chan->seg_p);
929 }
930
931 if (chan->xdev->dma_config->dmatype != XDMA_TYPE_AXIDMA &&
932 chan->xdev->dma_config->dmatype != XDMA_TYPE_AXIMCDMA) {
933 dma_pool_destroy(chan->desc_pool);
934 chan->desc_pool = NULL;
935 }
936
937}
938
939
940
941
942
943
944
945
946static u32 xilinx_dma_get_residue(struct xilinx_dma_chan *chan,
947 struct xilinx_dma_tx_descriptor *desc)
948{
949 struct xilinx_cdma_tx_segment *cdma_seg;
950 struct xilinx_axidma_tx_segment *axidma_seg;
951 struct xilinx_aximcdma_tx_segment *aximcdma_seg;
952 struct xilinx_cdma_desc_hw *cdma_hw;
953 struct xilinx_axidma_desc_hw *axidma_hw;
954 struct xilinx_aximcdma_desc_hw *aximcdma_hw;
955 struct list_head *entry;
956 u32 residue = 0;
957
958 list_for_each(entry, &desc->segments) {
959 if (chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) {
960 cdma_seg = list_entry(entry,
961 struct xilinx_cdma_tx_segment,
962 node);
963 cdma_hw = &cdma_seg->hw;
964 residue += (cdma_hw->control - cdma_hw->status) &
965 chan->xdev->max_buffer_len;
966 } else if (chan->xdev->dma_config->dmatype ==
967 XDMA_TYPE_AXIDMA) {
968 axidma_seg = list_entry(entry,
969 struct xilinx_axidma_tx_segment,
970 node);
971 axidma_hw = &axidma_seg->hw;
972 residue += (axidma_hw->control - axidma_hw->status) &
973 chan->xdev->max_buffer_len;
974 } else {
975 aximcdma_seg =
976 list_entry(entry,
977 struct xilinx_aximcdma_tx_segment,
978 node);
979 aximcdma_hw = &aximcdma_seg->hw;
980 residue +=
981 (aximcdma_hw->control - aximcdma_hw->status) &
982 chan->xdev->max_buffer_len;
983 }
984 }
985
986 return residue;
987}
988
989
990
991
992
993
994
995static void xilinx_dma_chan_handle_cyclic(struct xilinx_dma_chan *chan,
996 struct xilinx_dma_tx_descriptor *desc,
997 unsigned long *flags)
998{
999 dma_async_tx_callback callback;
1000 void *callback_param;
1001
1002 callback = desc->async_tx.callback;
1003 callback_param = desc->async_tx.callback_param;
1004 if (callback) {
1005 spin_unlock_irqrestore(&chan->lock, *flags);
1006 callback(callback_param);
1007 spin_lock_irqsave(&chan->lock, *flags);
1008 }
1009}
1010
1011
1012
1013
1014
1015static void xilinx_dma_chan_desc_cleanup(struct xilinx_dma_chan *chan)
1016{
1017 struct xilinx_dma_tx_descriptor *desc, *next;
1018 unsigned long flags;
1019
1020 spin_lock_irqsave(&chan->lock, flags);
1021
1022 list_for_each_entry_safe(desc, next, &chan->done_list, node) {
1023 struct dmaengine_result result;
1024
1025 if (desc->cyclic) {
1026 xilinx_dma_chan_handle_cyclic(chan, desc, &flags);
1027 break;
1028 }
1029
1030
1031 list_del(&desc->node);
1032
1033 if (unlikely(desc->err)) {
1034 if (chan->direction == DMA_DEV_TO_MEM)
1035 result.result = DMA_TRANS_READ_FAILED;
1036 else
1037 result.result = DMA_TRANS_WRITE_FAILED;
1038 } else {
1039 result.result = DMA_TRANS_NOERROR;
1040 }
1041
1042 result.residue = desc->residue;
1043
1044
1045 spin_unlock_irqrestore(&chan->lock, flags);
1046 dmaengine_desc_get_callback_invoke(&desc->async_tx, &result);
1047 spin_lock_irqsave(&chan->lock, flags);
1048
1049
1050 dma_run_dependencies(&desc->async_tx);
1051 xilinx_dma_free_tx_descriptor(chan, desc);
1052 }
1053
1054 spin_unlock_irqrestore(&chan->lock, flags);
1055}
1056
1057
1058
1059
1060
1061static void xilinx_dma_do_tasklet(struct tasklet_struct *t)
1062{
1063 struct xilinx_dma_chan *chan = from_tasklet(chan, t, tasklet);
1064
1065 xilinx_dma_chan_desc_cleanup(chan);
1066}
1067
1068
1069
1070
1071
1072
1073
1074static int xilinx_dma_alloc_chan_resources(struct dma_chan *dchan)
1075{
1076 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
1077 int i;
1078
1079
1080 if (chan->desc_pool)
1081 return 0;
1082
1083
1084
1085
1086
1087 if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
1088
1089 chan->seg_v = dma_alloc_coherent(chan->dev,
1090 sizeof(*chan->seg_v) * XILINX_DMA_NUM_DESCS,
1091 &chan->seg_p, GFP_KERNEL);
1092 if (!chan->seg_v) {
1093 dev_err(chan->dev,
1094 "unable to allocate channel %d descriptors\n",
1095 chan->id);
1096 return -ENOMEM;
1097 }
1098
1099
1100
1101
1102
1103
1104 chan->cyclic_seg_v = dma_alloc_coherent(chan->dev,
1105 sizeof(*chan->cyclic_seg_v),
1106 &chan->cyclic_seg_p,
1107 GFP_KERNEL);
1108 if (!chan->cyclic_seg_v) {
1109 dev_err(chan->dev,
1110 "unable to allocate desc segment for cyclic DMA\n");
1111 dma_free_coherent(chan->dev, sizeof(*chan->seg_v) *
1112 XILINX_DMA_NUM_DESCS, chan->seg_v,
1113 chan->seg_p);
1114 return -ENOMEM;
1115 }
1116 chan->cyclic_seg_v->phys = chan->cyclic_seg_p;
1117
1118 for (i = 0; i < XILINX_DMA_NUM_DESCS; i++) {
1119 chan->seg_v[i].hw.next_desc =
1120 lower_32_bits(chan->seg_p + sizeof(*chan->seg_v) *
1121 ((i + 1) % XILINX_DMA_NUM_DESCS));
1122 chan->seg_v[i].hw.next_desc_msb =
1123 upper_32_bits(chan->seg_p + sizeof(*chan->seg_v) *
1124 ((i + 1) % XILINX_DMA_NUM_DESCS));
1125 chan->seg_v[i].phys = chan->seg_p +
1126 sizeof(*chan->seg_v) * i;
1127 list_add_tail(&chan->seg_v[i].node,
1128 &chan->free_seg_list);
1129 }
1130 } else if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIMCDMA) {
1131
1132 chan->seg_mv = dma_alloc_coherent(chan->dev,
1133 sizeof(*chan->seg_mv) *
1134 XILINX_DMA_NUM_DESCS,
1135 &chan->seg_p, GFP_KERNEL);
1136 if (!chan->seg_mv) {
1137 dev_err(chan->dev,
1138 "unable to allocate channel %d descriptors\n",
1139 chan->id);
1140 return -ENOMEM;
1141 }
1142 for (i = 0; i < XILINX_DMA_NUM_DESCS; i++) {
1143 chan->seg_mv[i].hw.next_desc =
1144 lower_32_bits(chan->seg_p + sizeof(*chan->seg_mv) *
1145 ((i + 1) % XILINX_DMA_NUM_DESCS));
1146 chan->seg_mv[i].hw.next_desc_msb =
1147 upper_32_bits(chan->seg_p + sizeof(*chan->seg_mv) *
1148 ((i + 1) % XILINX_DMA_NUM_DESCS));
1149 chan->seg_mv[i].phys = chan->seg_p +
1150 sizeof(*chan->seg_mv) * i;
1151 list_add_tail(&chan->seg_mv[i].node,
1152 &chan->free_seg_list);
1153 }
1154 } else if (chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) {
1155 chan->desc_pool = dma_pool_create("xilinx_cdma_desc_pool",
1156 chan->dev,
1157 sizeof(struct xilinx_cdma_tx_segment),
1158 __alignof__(struct xilinx_cdma_tx_segment),
1159 0);
1160 } else {
1161 chan->desc_pool = dma_pool_create("xilinx_vdma_desc_pool",
1162 chan->dev,
1163 sizeof(struct xilinx_vdma_tx_segment),
1164 __alignof__(struct xilinx_vdma_tx_segment),
1165 0);
1166 }
1167
1168 if (!chan->desc_pool &&
1169 ((chan->xdev->dma_config->dmatype != XDMA_TYPE_AXIDMA) &&
1170 chan->xdev->dma_config->dmatype != XDMA_TYPE_AXIMCDMA)) {
1171 dev_err(chan->dev,
1172 "unable to allocate channel %d descriptor pool\n",
1173 chan->id);
1174 return -ENOMEM;
1175 }
1176
1177 dma_cookie_init(dchan);
1178
1179 if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
1180
1181
1182
1183 dma_ctrl_set(chan, XILINX_DMA_REG_DMACR,
1184 XILINX_DMA_DMAXR_ALL_IRQ_MASK);
1185 }
1186
1187 if ((chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) && chan->has_sg)
1188 dma_ctrl_set(chan, XILINX_DMA_REG_DMACR,
1189 XILINX_CDMA_CR_SGMODE);
1190
1191 return 0;
1192}
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202static int xilinx_dma_calc_copysize(struct xilinx_dma_chan *chan,
1203 int size, int done)
1204{
1205 size_t copy;
1206
1207 copy = min_t(size_t, size - done,
1208 chan->xdev->max_buffer_len);
1209
1210 if ((copy + done < size) &&
1211 chan->xdev->common.copy_align) {
1212
1213
1214
1215
1216 copy = rounddown(copy,
1217 (1 << chan->xdev->common.copy_align));
1218 }
1219 return copy;
1220}
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230static enum dma_status xilinx_dma_tx_status(struct dma_chan *dchan,
1231 dma_cookie_t cookie,
1232 struct dma_tx_state *txstate)
1233{
1234 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
1235 struct xilinx_dma_tx_descriptor *desc;
1236 enum dma_status ret;
1237 unsigned long flags;
1238 u32 residue = 0;
1239
1240 ret = dma_cookie_status(dchan, cookie, txstate);
1241 if (ret == DMA_COMPLETE || !txstate)
1242 return ret;
1243
1244 spin_lock_irqsave(&chan->lock, flags);
1245 if (!list_empty(&chan->active_list)) {
1246 desc = list_last_entry(&chan->active_list,
1247 struct xilinx_dma_tx_descriptor, node);
1248
1249
1250
1251
1252 if (chan->has_sg && chan->xdev->dma_config->dmatype != XDMA_TYPE_VDMA)
1253 residue = xilinx_dma_get_residue(chan, desc);
1254 }
1255 spin_unlock_irqrestore(&chan->lock, flags);
1256
1257 dma_set_residue(txstate, residue);
1258
1259 return ret;
1260}
1261
1262
1263
1264
1265
1266
1267
1268static int xilinx_dma_stop_transfer(struct xilinx_dma_chan *chan)
1269{
1270 u32 val;
1271
1272 dma_ctrl_clr(chan, XILINX_DMA_REG_DMACR, XILINX_DMA_DMACR_RUNSTOP);
1273
1274
1275 return xilinx_dma_poll_timeout(chan, XILINX_DMA_REG_DMASR, val,
1276 val & XILINX_DMA_DMASR_HALTED, 0,
1277 XILINX_DMA_LOOP_COUNT);
1278}
1279
1280
1281
1282
1283
1284
1285
1286static int xilinx_cdma_stop_transfer(struct xilinx_dma_chan *chan)
1287{
1288 u32 val;
1289
1290 return xilinx_dma_poll_timeout(chan, XILINX_DMA_REG_DMASR, val,
1291 val & XILINX_DMA_DMASR_IDLE, 0,
1292 XILINX_DMA_LOOP_COUNT);
1293}
1294
1295
1296
1297
1298
1299static void xilinx_dma_start(struct xilinx_dma_chan *chan)
1300{
1301 int err;
1302 u32 val;
1303
1304 dma_ctrl_set(chan, XILINX_DMA_REG_DMACR, XILINX_DMA_DMACR_RUNSTOP);
1305
1306
1307 err = xilinx_dma_poll_timeout(chan, XILINX_DMA_REG_DMASR, val,
1308 !(val & XILINX_DMA_DMASR_HALTED), 0,
1309 XILINX_DMA_LOOP_COUNT);
1310
1311 if (err) {
1312 dev_err(chan->dev, "Cannot start channel %p: %x\n",
1313 chan, dma_ctrl_read(chan, XILINX_DMA_REG_DMASR));
1314
1315 chan->err = true;
1316 }
1317}
1318
1319
1320
1321
1322
1323static void xilinx_vdma_start_transfer(struct xilinx_dma_chan *chan)
1324{
1325 struct xilinx_vdma_config *config = &chan->config;
1326 struct xilinx_dma_tx_descriptor *desc;
1327 u32 reg, j;
1328 struct xilinx_vdma_tx_segment *segment, *last = NULL;
1329 int i = 0;
1330
1331
1332 if (chan->err)
1333 return;
1334
1335 if (!chan->idle)
1336 return;
1337
1338 if (list_empty(&chan->pending_list))
1339 return;
1340
1341 desc = list_first_entry(&chan->pending_list,
1342 struct xilinx_dma_tx_descriptor, node);
1343
1344
1345 if (chan->has_vflip) {
1346 reg = dma_read(chan, XILINX_VDMA_REG_ENABLE_VERTICAL_FLIP);
1347 reg &= ~XILINX_VDMA_ENABLE_VERTICAL_FLIP;
1348 reg |= config->vflip_en;
1349 dma_write(chan, XILINX_VDMA_REG_ENABLE_VERTICAL_FLIP,
1350 reg);
1351 }
1352
1353 reg = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR);
1354
1355 if (config->frm_cnt_en)
1356 reg |= XILINX_DMA_DMACR_FRAMECNT_EN;
1357 else
1358 reg &= ~XILINX_DMA_DMACR_FRAMECNT_EN;
1359
1360
1361 if (config->park)
1362 reg &= ~XILINX_DMA_DMACR_CIRC_EN;
1363 else
1364 reg |= XILINX_DMA_DMACR_CIRC_EN;
1365
1366 dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg);
1367
1368 j = chan->desc_submitcount;
1369 reg = dma_read(chan, XILINX_DMA_REG_PARK_PTR);
1370 if (chan->direction == DMA_MEM_TO_DEV) {
1371 reg &= ~XILINX_DMA_PARK_PTR_RD_REF_MASK;
1372 reg |= j << XILINX_DMA_PARK_PTR_RD_REF_SHIFT;
1373 } else {
1374 reg &= ~XILINX_DMA_PARK_PTR_WR_REF_MASK;
1375 reg |= j << XILINX_DMA_PARK_PTR_WR_REF_SHIFT;
1376 }
1377 dma_write(chan, XILINX_DMA_REG_PARK_PTR, reg);
1378
1379
1380 xilinx_dma_start(chan);
1381
1382 if (chan->err)
1383 return;
1384
1385
1386 if (chan->desc_submitcount < chan->num_frms)
1387 i = chan->desc_submitcount;
1388
1389 list_for_each_entry(segment, &desc->segments, node) {
1390 if (chan->ext_addr)
1391 vdma_desc_write_64(chan,
1392 XILINX_VDMA_REG_START_ADDRESS_64(i++),
1393 segment->hw.buf_addr,
1394 segment->hw.buf_addr_msb);
1395 else
1396 vdma_desc_write(chan,
1397 XILINX_VDMA_REG_START_ADDRESS(i++),
1398 segment->hw.buf_addr);
1399
1400 last = segment;
1401 }
1402
1403 if (!last)
1404 return;
1405
1406
1407 vdma_desc_write(chan, XILINX_DMA_REG_HSIZE, last->hw.hsize);
1408 vdma_desc_write(chan, XILINX_DMA_REG_FRMDLY_STRIDE,
1409 last->hw.stride);
1410 vdma_desc_write(chan, XILINX_DMA_REG_VSIZE, last->hw.vsize);
1411
1412 chan->desc_submitcount++;
1413 chan->desc_pendingcount--;
1414 list_del(&desc->node);
1415 list_add_tail(&desc->node, &chan->active_list);
1416 if (chan->desc_submitcount == chan->num_frms)
1417 chan->desc_submitcount = 0;
1418
1419 chan->idle = false;
1420}
1421
1422
1423
1424
1425
1426static void xilinx_cdma_start_transfer(struct xilinx_dma_chan *chan)
1427{
1428 struct xilinx_dma_tx_descriptor *head_desc, *tail_desc;
1429 struct xilinx_cdma_tx_segment *tail_segment;
1430 u32 ctrl_reg = dma_read(chan, XILINX_DMA_REG_DMACR);
1431
1432 if (chan->err)
1433 return;
1434
1435 if (!chan->idle)
1436 return;
1437
1438 if (list_empty(&chan->pending_list))
1439 return;
1440
1441 head_desc = list_first_entry(&chan->pending_list,
1442 struct xilinx_dma_tx_descriptor, node);
1443 tail_desc = list_last_entry(&chan->pending_list,
1444 struct xilinx_dma_tx_descriptor, node);
1445 tail_segment = list_last_entry(&tail_desc->segments,
1446 struct xilinx_cdma_tx_segment, node);
1447
1448 if (chan->desc_pendingcount <= XILINX_DMA_COALESCE_MAX) {
1449 ctrl_reg &= ~XILINX_DMA_CR_COALESCE_MAX;
1450 ctrl_reg |= chan->desc_pendingcount <<
1451 XILINX_DMA_CR_COALESCE_SHIFT;
1452 dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, ctrl_reg);
1453 }
1454
1455 if (chan->has_sg) {
1456 dma_ctrl_clr(chan, XILINX_DMA_REG_DMACR,
1457 XILINX_CDMA_CR_SGMODE);
1458
1459 dma_ctrl_set(chan, XILINX_DMA_REG_DMACR,
1460 XILINX_CDMA_CR_SGMODE);
1461
1462 xilinx_write(chan, XILINX_DMA_REG_CURDESC,
1463 head_desc->async_tx.phys);
1464
1465
1466 xilinx_write(chan, XILINX_DMA_REG_TAILDESC,
1467 tail_segment->phys);
1468 } else {
1469
1470 struct xilinx_cdma_tx_segment *segment;
1471 struct xilinx_cdma_desc_hw *hw;
1472
1473 segment = list_first_entry(&head_desc->segments,
1474 struct xilinx_cdma_tx_segment,
1475 node);
1476
1477 hw = &segment->hw;
1478
1479 xilinx_write(chan, XILINX_CDMA_REG_SRCADDR,
1480 xilinx_prep_dma_addr_t(hw->src_addr));
1481 xilinx_write(chan, XILINX_CDMA_REG_DSTADDR,
1482 xilinx_prep_dma_addr_t(hw->dest_addr));
1483
1484
1485 dma_ctrl_write(chan, XILINX_DMA_REG_BTT,
1486 hw->control & chan->xdev->max_buffer_len);
1487 }
1488
1489 list_splice_tail_init(&chan->pending_list, &chan->active_list);
1490 chan->desc_pendingcount = 0;
1491 chan->idle = false;
1492}
1493
1494
1495
1496
1497
1498static void xilinx_dma_start_transfer(struct xilinx_dma_chan *chan)
1499{
1500 struct xilinx_dma_tx_descriptor *head_desc, *tail_desc;
1501 struct xilinx_axidma_tx_segment *tail_segment;
1502 u32 reg;
1503
1504 if (chan->err)
1505 return;
1506
1507 if (list_empty(&chan->pending_list))
1508 return;
1509
1510 if (!chan->idle)
1511 return;
1512
1513 head_desc = list_first_entry(&chan->pending_list,
1514 struct xilinx_dma_tx_descriptor, node);
1515 tail_desc = list_last_entry(&chan->pending_list,
1516 struct xilinx_dma_tx_descriptor, node);
1517 tail_segment = list_last_entry(&tail_desc->segments,
1518 struct xilinx_axidma_tx_segment, node);
1519
1520 reg = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR);
1521
1522 if (chan->desc_pendingcount <= XILINX_DMA_COALESCE_MAX) {
1523 reg &= ~XILINX_DMA_CR_COALESCE_MAX;
1524 reg |= chan->desc_pendingcount <<
1525 XILINX_DMA_CR_COALESCE_SHIFT;
1526 dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg);
1527 }
1528
1529 if (chan->has_sg)
1530 xilinx_write(chan, XILINX_DMA_REG_CURDESC,
1531 head_desc->async_tx.phys);
1532
1533 xilinx_dma_start(chan);
1534
1535 if (chan->err)
1536 return;
1537
1538
1539 if (chan->has_sg) {
1540 if (chan->cyclic)
1541 xilinx_write(chan, XILINX_DMA_REG_TAILDESC,
1542 chan->cyclic_seg_v->phys);
1543 else
1544 xilinx_write(chan, XILINX_DMA_REG_TAILDESC,
1545 tail_segment->phys);
1546 } else {
1547 struct xilinx_axidma_tx_segment *segment;
1548 struct xilinx_axidma_desc_hw *hw;
1549
1550 segment = list_first_entry(&head_desc->segments,
1551 struct xilinx_axidma_tx_segment,
1552 node);
1553 hw = &segment->hw;
1554
1555 xilinx_write(chan, XILINX_DMA_REG_SRCDSTADDR,
1556 xilinx_prep_dma_addr_t(hw->buf_addr));
1557
1558
1559 dma_ctrl_write(chan, XILINX_DMA_REG_BTT,
1560 hw->control & chan->xdev->max_buffer_len);
1561 }
1562
1563 list_splice_tail_init(&chan->pending_list, &chan->active_list);
1564 chan->desc_pendingcount = 0;
1565 chan->idle = false;
1566}
1567
1568
1569
1570
1571
1572static void xilinx_mcdma_start_transfer(struct xilinx_dma_chan *chan)
1573{
1574 struct xilinx_dma_tx_descriptor *head_desc, *tail_desc;
1575 struct xilinx_aximcdma_tx_segment *tail_segment;
1576 u32 reg;
1577
1578
1579
1580
1581
1582
1583 if (chan->err)
1584 return;
1585
1586 if (!chan->idle)
1587 return;
1588
1589 if (list_empty(&chan->pending_list))
1590 return;
1591
1592 head_desc = list_first_entry(&chan->pending_list,
1593 struct xilinx_dma_tx_descriptor, node);
1594 tail_desc = list_last_entry(&chan->pending_list,
1595 struct xilinx_dma_tx_descriptor, node);
1596 tail_segment = list_last_entry(&tail_desc->segments,
1597 struct xilinx_aximcdma_tx_segment, node);
1598
1599 reg = dma_ctrl_read(chan, XILINX_MCDMA_CHAN_CR_OFFSET(chan->tdest));
1600
1601 if (chan->desc_pendingcount <= XILINX_MCDMA_COALESCE_MAX) {
1602 reg &= ~XILINX_MCDMA_COALESCE_MASK;
1603 reg |= chan->desc_pendingcount <<
1604 XILINX_MCDMA_COALESCE_SHIFT;
1605 }
1606
1607 reg |= XILINX_MCDMA_IRQ_ALL_MASK;
1608 dma_ctrl_write(chan, XILINX_MCDMA_CHAN_CR_OFFSET(chan->tdest), reg);
1609
1610
1611 xilinx_write(chan, XILINX_MCDMA_CHAN_CDESC_OFFSET(chan->tdest),
1612 head_desc->async_tx.phys);
1613
1614
1615 reg = dma_ctrl_read(chan, XILINX_MCDMA_CHEN_OFFSET);
1616 reg |= BIT(chan->tdest);
1617 dma_ctrl_write(chan, XILINX_MCDMA_CHEN_OFFSET, reg);
1618
1619
1620 reg = dma_ctrl_read(chan, XILINX_MCDMA_CHAN_CR_OFFSET(chan->tdest));
1621 reg |= XILINX_MCDMA_CR_RUNSTOP_MASK;
1622 dma_ctrl_write(chan, XILINX_MCDMA_CHAN_CR_OFFSET(chan->tdest), reg);
1623
1624 xilinx_dma_start(chan);
1625
1626 if (chan->err)
1627 return;
1628
1629
1630 xilinx_write(chan, XILINX_MCDMA_CHAN_TDESC_OFFSET(chan->tdest),
1631 tail_segment->phys);
1632
1633 list_splice_tail_init(&chan->pending_list, &chan->active_list);
1634 chan->desc_pendingcount = 0;
1635 chan->idle = false;
1636}
1637
1638
1639
1640
1641
1642static void xilinx_dma_issue_pending(struct dma_chan *dchan)
1643{
1644 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
1645 unsigned long flags;
1646
1647 spin_lock_irqsave(&chan->lock, flags);
1648 chan->start_transfer(chan);
1649 spin_unlock_irqrestore(&chan->lock, flags);
1650}
1651
1652
1653
1654
1655
1656
1657
1658static void xilinx_dma_complete_descriptor(struct xilinx_dma_chan *chan)
1659{
1660 struct xilinx_dma_tx_descriptor *desc, *next;
1661
1662
1663 if (list_empty(&chan->active_list))
1664 return;
1665
1666 list_for_each_entry_safe(desc, next, &chan->active_list, node) {
1667 if (chan->has_sg && chan->xdev->dma_config->dmatype !=
1668 XDMA_TYPE_VDMA)
1669 desc->residue = xilinx_dma_get_residue(chan, desc);
1670 else
1671 desc->residue = 0;
1672 desc->err = chan->err;
1673
1674 list_del(&desc->node);
1675 if (!desc->cyclic)
1676 dma_cookie_complete(&desc->async_tx);
1677 list_add_tail(&desc->node, &chan->done_list);
1678 }
1679}
1680
1681
1682
1683
1684
1685
1686
1687static int xilinx_dma_reset(struct xilinx_dma_chan *chan)
1688{
1689 int err;
1690 u32 tmp;
1691
1692 dma_ctrl_set(chan, XILINX_DMA_REG_DMACR, XILINX_DMA_DMACR_RESET);
1693
1694
1695 err = xilinx_dma_poll_timeout(chan, XILINX_DMA_REG_DMACR, tmp,
1696 !(tmp & XILINX_DMA_DMACR_RESET), 0,
1697 XILINX_DMA_LOOP_COUNT);
1698
1699 if (err) {
1700 dev_err(chan->dev, "reset timeout, cr %x, sr %x\n",
1701 dma_ctrl_read(chan, XILINX_DMA_REG_DMACR),
1702 dma_ctrl_read(chan, XILINX_DMA_REG_DMASR));
1703 return -ETIMEDOUT;
1704 }
1705
1706 chan->err = false;
1707 chan->idle = true;
1708 chan->desc_pendingcount = 0;
1709 chan->desc_submitcount = 0;
1710
1711 return err;
1712}
1713
1714
1715
1716
1717
1718
1719
1720static int xilinx_dma_chan_reset(struct xilinx_dma_chan *chan)
1721{
1722 int err;
1723
1724
1725 err = xilinx_dma_reset(chan);
1726 if (err)
1727 return err;
1728
1729
1730 dma_ctrl_set(chan, XILINX_DMA_REG_DMACR,
1731 XILINX_DMA_DMAXR_ALL_IRQ_MASK);
1732
1733 return 0;
1734}
1735
1736
1737
1738
1739
1740
1741
1742
1743static irqreturn_t xilinx_mcdma_irq_handler(int irq, void *data)
1744{
1745 struct xilinx_dma_chan *chan = data;
1746 u32 status, ser_offset, chan_sermask, chan_offset = 0, chan_id;
1747
1748 if (chan->direction == DMA_DEV_TO_MEM)
1749 ser_offset = XILINX_MCDMA_RXINT_SER_OFFSET;
1750 else
1751 ser_offset = XILINX_MCDMA_TXINT_SER_OFFSET;
1752
1753
1754 chan_sermask = dma_ctrl_read(chan, ser_offset);
1755 chan_id = ffs(chan_sermask);
1756
1757 if (!chan_id)
1758 return IRQ_NONE;
1759
1760 if (chan->direction == DMA_DEV_TO_MEM)
1761 chan_offset = chan->xdev->dma_config->max_channels / 2;
1762
1763 chan_offset = chan_offset + (chan_id - 1);
1764 chan = chan->xdev->chan[chan_offset];
1765
1766 status = dma_ctrl_read(chan, XILINX_MCDMA_CHAN_SR_OFFSET(chan->tdest));
1767 if (!(status & XILINX_MCDMA_IRQ_ALL_MASK))
1768 return IRQ_NONE;
1769
1770 dma_ctrl_write(chan, XILINX_MCDMA_CHAN_SR_OFFSET(chan->tdest),
1771 status & XILINX_MCDMA_IRQ_ALL_MASK);
1772
1773 if (status & XILINX_MCDMA_IRQ_ERR_MASK) {
1774 dev_err(chan->dev, "Channel %p has errors %x cdr %x tdr %x\n",
1775 chan,
1776 dma_ctrl_read(chan, XILINX_MCDMA_CH_ERR_OFFSET),
1777 dma_ctrl_read(chan, XILINX_MCDMA_CHAN_CDESC_OFFSET
1778 (chan->tdest)),
1779 dma_ctrl_read(chan, XILINX_MCDMA_CHAN_TDESC_OFFSET
1780 (chan->tdest)));
1781 chan->err = true;
1782 }
1783
1784 if (status & XILINX_MCDMA_IRQ_DELAY_MASK) {
1785
1786
1787
1788
1789 dev_dbg(chan->dev, "Inter-packet latency too long\n");
1790 }
1791
1792 if (status & XILINX_MCDMA_IRQ_IOC_MASK) {
1793 spin_lock(&chan->lock);
1794 xilinx_dma_complete_descriptor(chan);
1795 chan->idle = true;
1796 chan->start_transfer(chan);
1797 spin_unlock(&chan->lock);
1798 }
1799
1800 tasklet_schedule(&chan->tasklet);
1801 return IRQ_HANDLED;
1802}
1803
1804
1805
1806
1807
1808
1809
1810
1811static irqreturn_t xilinx_dma_irq_handler(int irq, void *data)
1812{
1813 struct xilinx_dma_chan *chan = data;
1814 u32 status;
1815
1816
1817 status = dma_ctrl_read(chan, XILINX_DMA_REG_DMASR);
1818 if (!(status & XILINX_DMA_DMAXR_ALL_IRQ_MASK))
1819 return IRQ_NONE;
1820
1821 dma_ctrl_write(chan, XILINX_DMA_REG_DMASR,
1822 status & XILINX_DMA_DMAXR_ALL_IRQ_MASK);
1823
1824 if (status & XILINX_DMA_DMASR_ERR_IRQ) {
1825
1826
1827
1828
1829
1830
1831
1832 u32 errors = status & XILINX_DMA_DMASR_ALL_ERR_MASK;
1833
1834 dma_ctrl_write(chan, XILINX_DMA_REG_DMASR,
1835 errors & XILINX_DMA_DMASR_ERR_RECOVER_MASK);
1836
1837 if (!chan->flush_on_fsync ||
1838 (errors & ~XILINX_DMA_DMASR_ERR_RECOVER_MASK)) {
1839 dev_err(chan->dev,
1840 "Channel %p has errors %x, cdr %x tdr %x\n",
1841 chan, errors,
1842 dma_ctrl_read(chan, XILINX_DMA_REG_CURDESC),
1843 dma_ctrl_read(chan, XILINX_DMA_REG_TAILDESC));
1844 chan->err = true;
1845 }
1846 }
1847
1848 if (status & XILINX_DMA_DMASR_DLY_CNT_IRQ) {
1849
1850
1851
1852
1853 dev_dbg(chan->dev, "Inter-packet latency too long\n");
1854 }
1855
1856 if (status & XILINX_DMA_DMASR_FRM_CNT_IRQ) {
1857 spin_lock(&chan->lock);
1858 xilinx_dma_complete_descriptor(chan);
1859 chan->idle = true;
1860 chan->start_transfer(chan);
1861 spin_unlock(&chan->lock);
1862 }
1863
1864 tasklet_schedule(&chan->tasklet);
1865 return IRQ_HANDLED;
1866}
1867
1868
1869
1870
1871
1872
1873static void append_desc_queue(struct xilinx_dma_chan *chan,
1874 struct xilinx_dma_tx_descriptor *desc)
1875{
1876 struct xilinx_vdma_tx_segment *tail_segment;
1877 struct xilinx_dma_tx_descriptor *tail_desc;
1878 struct xilinx_axidma_tx_segment *axidma_tail_segment;
1879 struct xilinx_aximcdma_tx_segment *aximcdma_tail_segment;
1880 struct xilinx_cdma_tx_segment *cdma_tail_segment;
1881
1882 if (list_empty(&chan->pending_list))
1883 goto append;
1884
1885
1886
1887
1888
1889 tail_desc = list_last_entry(&chan->pending_list,
1890 struct xilinx_dma_tx_descriptor, node);
1891 if (chan->xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
1892 tail_segment = list_last_entry(&tail_desc->segments,
1893 struct xilinx_vdma_tx_segment,
1894 node);
1895 tail_segment->hw.next_desc = (u32)desc->async_tx.phys;
1896 } else if (chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) {
1897 cdma_tail_segment = list_last_entry(&tail_desc->segments,
1898 struct xilinx_cdma_tx_segment,
1899 node);
1900 cdma_tail_segment->hw.next_desc = (u32)desc->async_tx.phys;
1901 } else if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
1902 axidma_tail_segment = list_last_entry(&tail_desc->segments,
1903 struct xilinx_axidma_tx_segment,
1904 node);
1905 axidma_tail_segment->hw.next_desc = (u32)desc->async_tx.phys;
1906 } else {
1907 aximcdma_tail_segment =
1908 list_last_entry(&tail_desc->segments,
1909 struct xilinx_aximcdma_tx_segment,
1910 node);
1911 aximcdma_tail_segment->hw.next_desc = (u32)desc->async_tx.phys;
1912 }
1913
1914
1915
1916
1917
1918append:
1919 list_add_tail(&desc->node, &chan->pending_list);
1920 chan->desc_pendingcount++;
1921
1922 if (chan->has_sg && (chan->xdev->dma_config->dmatype == XDMA_TYPE_VDMA)
1923 && unlikely(chan->desc_pendingcount > chan->num_frms)) {
1924 dev_dbg(chan->dev, "desc pendingcount is too high\n");
1925 chan->desc_pendingcount = chan->num_frms;
1926 }
1927}
1928
1929
1930
1931
1932
1933
1934
1935static dma_cookie_t xilinx_dma_tx_submit(struct dma_async_tx_descriptor *tx)
1936{
1937 struct xilinx_dma_tx_descriptor *desc = to_dma_tx_descriptor(tx);
1938 struct xilinx_dma_chan *chan = to_xilinx_chan(tx->chan);
1939 dma_cookie_t cookie;
1940 unsigned long flags;
1941 int err;
1942
1943 if (chan->cyclic) {
1944 xilinx_dma_free_tx_descriptor(chan, desc);
1945 return -EBUSY;
1946 }
1947
1948 if (chan->err) {
1949
1950
1951
1952
1953 err = xilinx_dma_chan_reset(chan);
1954 if (err < 0)
1955 return err;
1956 }
1957
1958 spin_lock_irqsave(&chan->lock, flags);
1959
1960 cookie = dma_cookie_assign(tx);
1961
1962
1963 append_desc_queue(chan, desc);
1964
1965 if (desc->cyclic)
1966 chan->cyclic = true;
1967
1968 spin_unlock_irqrestore(&chan->lock, flags);
1969
1970 return cookie;
1971}
1972
1973
1974
1975
1976
1977
1978
1979
1980
1981
1982static struct dma_async_tx_descriptor *
1983xilinx_vdma_dma_prep_interleaved(struct dma_chan *dchan,
1984 struct dma_interleaved_template *xt,
1985 unsigned long flags)
1986{
1987 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
1988 struct xilinx_dma_tx_descriptor *desc;
1989 struct xilinx_vdma_tx_segment *segment;
1990 struct xilinx_vdma_desc_hw *hw;
1991
1992 if (!is_slave_direction(xt->dir))
1993 return NULL;
1994
1995 if (!xt->numf || !xt->sgl[0].size)
1996 return NULL;
1997
1998 if (xt->frame_size != 1)
1999 return NULL;
2000
2001
2002 desc = xilinx_dma_alloc_tx_descriptor(chan);
2003 if (!desc)
2004 return NULL;
2005
2006 dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
2007 desc->async_tx.tx_submit = xilinx_dma_tx_submit;
2008 async_tx_ack(&desc->async_tx);
2009
2010
2011 segment = xilinx_vdma_alloc_tx_segment(chan);
2012 if (!segment)
2013 goto error;
2014
2015
2016 hw = &segment->hw;
2017 hw->vsize = xt->numf;
2018 hw->hsize = xt->sgl[0].size;
2019 hw->stride = (xt->sgl[0].icg + xt->sgl[0].size) <<
2020 XILINX_DMA_FRMDLY_STRIDE_STRIDE_SHIFT;
2021 hw->stride |= chan->config.frm_dly <<
2022 XILINX_DMA_FRMDLY_STRIDE_FRMDLY_SHIFT;
2023
2024 if (xt->dir != DMA_MEM_TO_DEV) {
2025 if (chan->ext_addr) {
2026 hw->buf_addr = lower_32_bits(xt->dst_start);
2027 hw->buf_addr_msb = upper_32_bits(xt->dst_start);
2028 } else {
2029 hw->buf_addr = xt->dst_start;
2030 }
2031 } else {
2032 if (chan->ext_addr) {
2033 hw->buf_addr = lower_32_bits(xt->src_start);
2034 hw->buf_addr_msb = upper_32_bits(xt->src_start);
2035 } else {
2036 hw->buf_addr = xt->src_start;
2037 }
2038 }
2039
2040
2041 list_add_tail(&segment->node, &desc->segments);
2042
2043
2044 segment = list_first_entry(&desc->segments,
2045 struct xilinx_vdma_tx_segment, node);
2046 desc->async_tx.phys = segment->phys;
2047
2048 return &desc->async_tx;
2049
2050error:
2051 xilinx_dma_free_tx_descriptor(chan, desc);
2052 return NULL;
2053}
2054
2055
2056
2057
2058
2059
2060
2061
2062
2063
2064
2065static struct dma_async_tx_descriptor *
2066xilinx_cdma_prep_memcpy(struct dma_chan *dchan, dma_addr_t dma_dst,
2067 dma_addr_t dma_src, size_t len, unsigned long flags)
2068{
2069 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
2070 struct xilinx_dma_tx_descriptor *desc;
2071 struct xilinx_cdma_tx_segment *segment;
2072 struct xilinx_cdma_desc_hw *hw;
2073
2074 if (!len || len > chan->xdev->max_buffer_len)
2075 return NULL;
2076
2077 desc = xilinx_dma_alloc_tx_descriptor(chan);
2078 if (!desc)
2079 return NULL;
2080
2081 dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
2082 desc->async_tx.tx_submit = xilinx_dma_tx_submit;
2083
2084
2085 segment = xilinx_cdma_alloc_tx_segment(chan);
2086 if (!segment)
2087 goto error;
2088
2089 hw = &segment->hw;
2090 hw->control = len;
2091 hw->src_addr = dma_src;
2092 hw->dest_addr = dma_dst;
2093 if (chan->ext_addr) {
2094 hw->src_addr_msb = upper_32_bits(dma_src);
2095 hw->dest_addr_msb = upper_32_bits(dma_dst);
2096 }
2097
2098
2099 list_add_tail(&segment->node, &desc->segments);
2100
2101 desc->async_tx.phys = segment->phys;
2102 hw->next_desc = segment->phys;
2103
2104 return &desc->async_tx;
2105
2106error:
2107 xilinx_dma_free_tx_descriptor(chan, desc);
2108 return NULL;
2109}
2110
2111
2112
2113
2114
2115
2116
2117
2118
2119
2120
2121
2122static struct dma_async_tx_descriptor *xilinx_dma_prep_slave_sg(
2123 struct dma_chan *dchan, struct scatterlist *sgl, unsigned int sg_len,
2124 enum dma_transfer_direction direction, unsigned long flags,
2125 void *context)
2126{
2127 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
2128 struct xilinx_dma_tx_descriptor *desc;
2129 struct xilinx_axidma_tx_segment *segment = NULL;
2130 u32 *app_w = (u32 *)context;
2131 struct scatterlist *sg;
2132 size_t copy;
2133 size_t sg_used;
2134 unsigned int i;
2135
2136 if (!is_slave_direction(direction))
2137 return NULL;
2138
2139
2140 desc = xilinx_dma_alloc_tx_descriptor(chan);
2141 if (!desc)
2142 return NULL;
2143
2144 dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
2145 desc->async_tx.tx_submit = xilinx_dma_tx_submit;
2146
2147
2148 for_each_sg(sgl, sg, sg_len, i) {
2149 sg_used = 0;
2150
2151
2152 while (sg_used < sg_dma_len(sg)) {
2153 struct xilinx_axidma_desc_hw *hw;
2154
2155
2156 segment = xilinx_axidma_alloc_tx_segment(chan);
2157 if (!segment)
2158 goto error;
2159
2160
2161
2162
2163
2164 copy = xilinx_dma_calc_copysize(chan, sg_dma_len(sg),
2165 sg_used);
2166 hw = &segment->hw;
2167
2168
2169 xilinx_axidma_buf(chan, hw, sg_dma_address(sg),
2170 sg_used, 0);
2171
2172 hw->control = copy;
2173
2174 if (chan->direction == DMA_MEM_TO_DEV) {
2175 if (app_w)
2176 memcpy(hw->app, app_w, sizeof(u32) *
2177 XILINX_DMA_NUM_APP_WORDS);
2178 }
2179
2180 sg_used += copy;
2181
2182
2183
2184
2185
2186 list_add_tail(&segment->node, &desc->segments);
2187 }
2188 }
2189
2190 segment = list_first_entry(&desc->segments,
2191 struct xilinx_axidma_tx_segment, node);
2192 desc->async_tx.phys = segment->phys;
2193
2194
2195 if (chan->direction == DMA_MEM_TO_DEV) {
2196 segment->hw.control |= XILINX_DMA_BD_SOP;
2197 segment = list_last_entry(&desc->segments,
2198 struct xilinx_axidma_tx_segment,
2199 node);
2200 segment->hw.control |= XILINX_DMA_BD_EOP;
2201 }
2202
2203 return &desc->async_tx;
2204
2205error:
2206 xilinx_dma_free_tx_descriptor(chan, desc);
2207 return NULL;
2208}
2209
2210
2211
2212
2213
2214
2215
2216
2217
2218
2219
2220
2221static struct dma_async_tx_descriptor *xilinx_dma_prep_dma_cyclic(
2222 struct dma_chan *dchan, dma_addr_t buf_addr, size_t buf_len,
2223 size_t period_len, enum dma_transfer_direction direction,
2224 unsigned long flags)
2225{
2226 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
2227 struct xilinx_dma_tx_descriptor *desc;
2228 struct xilinx_axidma_tx_segment *segment, *head_segment, *prev = NULL;
2229 size_t copy, sg_used;
2230 unsigned int num_periods;
2231 int i;
2232 u32 reg;
2233
2234 if (!period_len)
2235 return NULL;
2236
2237 num_periods = buf_len / period_len;
2238
2239 if (!num_periods)
2240 return NULL;
2241
2242 if (!is_slave_direction(direction))
2243 return NULL;
2244
2245
2246 desc = xilinx_dma_alloc_tx_descriptor(chan);
2247 if (!desc)
2248 return NULL;
2249
2250 chan->direction = direction;
2251 dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
2252 desc->async_tx.tx_submit = xilinx_dma_tx_submit;
2253
2254 for (i = 0; i < num_periods; ++i) {
2255 sg_used = 0;
2256
2257 while (sg_used < period_len) {
2258 struct xilinx_axidma_desc_hw *hw;
2259
2260
2261 segment = xilinx_axidma_alloc_tx_segment(chan);
2262 if (!segment)
2263 goto error;
2264
2265
2266
2267
2268
2269 copy = xilinx_dma_calc_copysize(chan, period_len,
2270 sg_used);
2271 hw = &segment->hw;
2272 xilinx_axidma_buf(chan, hw, buf_addr, sg_used,
2273 period_len * i);
2274 hw->control = copy;
2275
2276 if (prev)
2277 prev->hw.next_desc = segment->phys;
2278
2279 prev = segment;
2280 sg_used += copy;
2281
2282
2283
2284
2285
2286 list_add_tail(&segment->node, &desc->segments);
2287 }
2288 }
2289
2290 head_segment = list_first_entry(&desc->segments,
2291 struct xilinx_axidma_tx_segment, node);
2292 desc->async_tx.phys = head_segment->phys;
2293
2294 desc->cyclic = true;
2295 reg = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR);
2296 reg |= XILINX_DMA_CR_CYCLIC_BD_EN_MASK;
2297 dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg);
2298
2299 segment = list_last_entry(&desc->segments,
2300 struct xilinx_axidma_tx_segment,
2301 node);
2302 segment->hw.next_desc = (u32) head_segment->phys;
2303
2304
2305 if (direction == DMA_MEM_TO_DEV) {
2306 head_segment->hw.control |= XILINX_DMA_BD_SOP;
2307 segment->hw.control |= XILINX_DMA_BD_EOP;
2308 }
2309
2310 return &desc->async_tx;
2311
2312error:
2313 xilinx_dma_free_tx_descriptor(chan, desc);
2314 return NULL;
2315}
2316
2317
2318
2319
2320
2321
2322
2323
2324
2325
2326
2327
2328static struct dma_async_tx_descriptor *
2329xilinx_mcdma_prep_slave_sg(struct dma_chan *dchan, struct scatterlist *sgl,
2330 unsigned int sg_len,
2331 enum dma_transfer_direction direction,
2332 unsigned long flags, void *context)
2333{
2334 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
2335 struct xilinx_dma_tx_descriptor *desc;
2336 struct xilinx_aximcdma_tx_segment *segment = NULL;
2337 u32 *app_w = (u32 *)context;
2338 struct scatterlist *sg;
2339 size_t copy;
2340 size_t sg_used;
2341 unsigned int i;
2342
2343 if (!is_slave_direction(direction))
2344 return NULL;
2345
2346
2347 desc = xilinx_dma_alloc_tx_descriptor(chan);
2348 if (!desc)
2349 return NULL;
2350
2351 dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
2352 desc->async_tx.tx_submit = xilinx_dma_tx_submit;
2353
2354
2355 for_each_sg(sgl, sg, sg_len, i) {
2356 sg_used = 0;
2357
2358
2359 while (sg_used < sg_dma_len(sg)) {
2360 struct xilinx_aximcdma_desc_hw *hw;
2361
2362
2363 segment = xilinx_aximcdma_alloc_tx_segment(chan);
2364 if (!segment)
2365 goto error;
2366
2367
2368
2369
2370
2371 copy = min_t(size_t, sg_dma_len(sg) - sg_used,
2372 chan->xdev->max_buffer_len);
2373 hw = &segment->hw;
2374
2375
2376 xilinx_aximcdma_buf(chan, hw, sg_dma_address(sg),
2377 sg_used);
2378 hw->control = copy;
2379
2380 if (chan->direction == DMA_MEM_TO_DEV && app_w) {
2381 memcpy(hw->app, app_w, sizeof(u32) *
2382 XILINX_DMA_NUM_APP_WORDS);
2383 }
2384
2385 sg_used += copy;
2386
2387
2388
2389
2390 list_add_tail(&segment->node, &desc->segments);
2391 }
2392 }
2393
2394 segment = list_first_entry(&desc->segments,
2395 struct xilinx_aximcdma_tx_segment, node);
2396 desc->async_tx.phys = segment->phys;
2397
2398
2399 if (chan->direction == DMA_MEM_TO_DEV) {
2400 segment->hw.control |= XILINX_MCDMA_BD_SOP;
2401 segment = list_last_entry(&desc->segments,
2402 struct xilinx_aximcdma_tx_segment,
2403 node);
2404 segment->hw.control |= XILINX_MCDMA_BD_EOP;
2405 }
2406
2407 return &desc->async_tx;
2408
2409error:
2410 xilinx_dma_free_tx_descriptor(chan, desc);
2411
2412 return NULL;
2413}
2414
2415
2416
2417
2418
2419
2420
2421static int xilinx_dma_terminate_all(struct dma_chan *dchan)
2422{
2423 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
2424 u32 reg;
2425 int err;
2426
2427 if (!chan->cyclic) {
2428 err = chan->stop_transfer(chan);
2429 if (err) {
2430 dev_err(chan->dev, "Cannot stop channel %p: %x\n",
2431 chan, dma_ctrl_read(chan,
2432 XILINX_DMA_REG_DMASR));
2433 chan->err = true;
2434 }
2435 }
2436
2437 xilinx_dma_chan_reset(chan);
2438
2439 xilinx_dma_free_descriptors(chan);
2440 chan->idle = true;
2441
2442 if (chan->cyclic) {
2443 reg = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR);
2444 reg &= ~XILINX_DMA_CR_CYCLIC_BD_EN_MASK;
2445 dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg);
2446 chan->cyclic = false;
2447 }
2448
2449 if ((chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) && chan->has_sg)
2450 dma_ctrl_clr(chan, XILINX_DMA_REG_DMACR,
2451 XILINX_CDMA_CR_SGMODE);
2452
2453 return 0;
2454}
2455
2456
2457
2458
2459
2460
2461
2462
2463
2464
2465
2466
2467
2468
2469int xilinx_vdma_channel_set_config(struct dma_chan *dchan,
2470 struct xilinx_vdma_config *cfg)
2471{
2472 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
2473 u32 dmacr;
2474
2475 if (cfg->reset)
2476 return xilinx_dma_chan_reset(chan);
2477
2478 dmacr = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR);
2479
2480 chan->config.frm_dly = cfg->frm_dly;
2481 chan->config.park = cfg->park;
2482
2483
2484 chan->config.gen_lock = cfg->gen_lock;
2485 chan->config.master = cfg->master;
2486
2487 dmacr &= ~XILINX_DMA_DMACR_GENLOCK_EN;
2488 if (cfg->gen_lock && chan->genlock) {
2489 dmacr |= XILINX_DMA_DMACR_GENLOCK_EN;
2490 dmacr &= ~XILINX_DMA_DMACR_MASTER_MASK;
2491 dmacr |= cfg->master << XILINX_DMA_DMACR_MASTER_SHIFT;
2492 }
2493
2494 chan->config.frm_cnt_en = cfg->frm_cnt_en;
2495 chan->config.vflip_en = cfg->vflip_en;
2496
2497 if (cfg->park)
2498 chan->config.park_frm = cfg->park_frm;
2499 else
2500 chan->config.park_frm = -1;
2501
2502 chan->config.coalesc = cfg->coalesc;
2503 chan->config.delay = cfg->delay;
2504
2505 if (cfg->coalesc <= XILINX_DMA_DMACR_FRAME_COUNT_MAX) {
2506 dmacr &= ~XILINX_DMA_DMACR_FRAME_COUNT_MASK;
2507 dmacr |= cfg->coalesc << XILINX_DMA_DMACR_FRAME_COUNT_SHIFT;
2508 chan->config.coalesc = cfg->coalesc;
2509 }
2510
2511 if (cfg->delay <= XILINX_DMA_DMACR_DELAY_MAX) {
2512 dmacr &= ~XILINX_DMA_DMACR_DELAY_MASK;
2513 dmacr |= cfg->delay << XILINX_DMA_DMACR_DELAY_SHIFT;
2514 chan->config.delay = cfg->delay;
2515 }
2516
2517
2518 dmacr &= ~XILINX_DMA_DMACR_FSYNCSRC_MASK;
2519 dmacr |= cfg->ext_fsync << XILINX_DMA_DMACR_FSYNCSRC_SHIFT;
2520
2521 dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, dmacr);
2522
2523 return 0;
2524}
2525EXPORT_SYMBOL(xilinx_vdma_channel_set_config);
2526
2527
2528
2529
2530
2531
2532
2533
2534
2535static void xilinx_dma_chan_remove(struct xilinx_dma_chan *chan)
2536{
2537
2538 dma_ctrl_clr(chan, XILINX_DMA_REG_DMACR,
2539 XILINX_DMA_DMAXR_ALL_IRQ_MASK);
2540
2541 if (chan->irq > 0)
2542 free_irq(chan->irq, chan);
2543
2544 tasklet_kill(&chan->tasklet);
2545
2546 list_del(&chan->common.device_node);
2547}
2548
2549static int axidma_clk_init(struct platform_device *pdev, struct clk **axi_clk,
2550 struct clk **tx_clk, struct clk **rx_clk,
2551 struct clk **sg_clk, struct clk **tmp_clk)
2552{
2553 int err;
2554
2555 *tmp_clk = NULL;
2556
2557 *axi_clk = devm_clk_get(&pdev->dev, "s_axi_lite_aclk");
2558 if (IS_ERR(*axi_clk))
2559 return dev_err_probe(&pdev->dev, PTR_ERR(*axi_clk), "failed to get axi_aclk\n");
2560
2561 *tx_clk = devm_clk_get(&pdev->dev, "m_axi_mm2s_aclk");
2562 if (IS_ERR(*tx_clk))
2563 *tx_clk = NULL;
2564
2565 *rx_clk = devm_clk_get(&pdev->dev, "m_axi_s2mm_aclk");
2566 if (IS_ERR(*rx_clk))
2567 *rx_clk = NULL;
2568
2569 *sg_clk = devm_clk_get(&pdev->dev, "m_axi_sg_aclk");
2570 if (IS_ERR(*sg_clk))
2571 *sg_clk = NULL;
2572
2573 err = clk_prepare_enable(*axi_clk);
2574 if (err) {
2575 dev_err(&pdev->dev, "failed to enable axi_clk (%d)\n", err);
2576 return err;
2577 }
2578
2579 err = clk_prepare_enable(*tx_clk);
2580 if (err) {
2581 dev_err(&pdev->dev, "failed to enable tx_clk (%d)\n", err);
2582 goto err_disable_axiclk;
2583 }
2584
2585 err = clk_prepare_enable(*rx_clk);
2586 if (err) {
2587 dev_err(&pdev->dev, "failed to enable rx_clk (%d)\n", err);
2588 goto err_disable_txclk;
2589 }
2590
2591 err = clk_prepare_enable(*sg_clk);
2592 if (err) {
2593 dev_err(&pdev->dev, "failed to enable sg_clk (%d)\n", err);
2594 goto err_disable_rxclk;
2595 }
2596
2597 return 0;
2598
2599err_disable_rxclk:
2600 clk_disable_unprepare(*rx_clk);
2601err_disable_txclk:
2602 clk_disable_unprepare(*tx_clk);
2603err_disable_axiclk:
2604 clk_disable_unprepare(*axi_clk);
2605
2606 return err;
2607}
2608
2609static int axicdma_clk_init(struct platform_device *pdev, struct clk **axi_clk,
2610 struct clk **dev_clk, struct clk **tmp_clk,
2611 struct clk **tmp1_clk, struct clk **tmp2_clk)
2612{
2613 int err;
2614
2615 *tmp_clk = NULL;
2616 *tmp1_clk = NULL;
2617 *tmp2_clk = NULL;
2618
2619 *axi_clk = devm_clk_get(&pdev->dev, "s_axi_lite_aclk");
2620 if (IS_ERR(*axi_clk))
2621 return dev_err_probe(&pdev->dev, PTR_ERR(*axi_clk), "failed to get axi_aclk\n");
2622
2623 *dev_clk = devm_clk_get(&pdev->dev, "m_axi_aclk");
2624 if (IS_ERR(*dev_clk))
2625 return dev_err_probe(&pdev->dev, PTR_ERR(*dev_clk), "failed to get dev_clk\n");
2626
2627 err = clk_prepare_enable(*axi_clk);
2628 if (err) {
2629 dev_err(&pdev->dev, "failed to enable axi_clk (%d)\n", err);
2630 return err;
2631 }
2632
2633 err = clk_prepare_enable(*dev_clk);
2634 if (err) {
2635 dev_err(&pdev->dev, "failed to enable dev_clk (%d)\n", err);
2636 goto err_disable_axiclk;
2637 }
2638
2639 return 0;
2640
2641err_disable_axiclk:
2642 clk_disable_unprepare(*axi_clk);
2643
2644 return err;
2645}
2646
2647static int axivdma_clk_init(struct platform_device *pdev, struct clk **axi_clk,
2648 struct clk **tx_clk, struct clk **txs_clk,
2649 struct clk **rx_clk, struct clk **rxs_clk)
2650{
2651 int err;
2652
2653 *axi_clk = devm_clk_get(&pdev->dev, "s_axi_lite_aclk");
2654 if (IS_ERR(*axi_clk))
2655 return dev_err_probe(&pdev->dev, PTR_ERR(*axi_clk), "failed to get axi_aclk\n");
2656
2657 *tx_clk = devm_clk_get(&pdev->dev, "m_axi_mm2s_aclk");
2658 if (IS_ERR(*tx_clk))
2659 *tx_clk = NULL;
2660
2661 *txs_clk = devm_clk_get(&pdev->dev, "m_axis_mm2s_aclk");
2662 if (IS_ERR(*txs_clk))
2663 *txs_clk = NULL;
2664
2665 *rx_clk = devm_clk_get(&pdev->dev, "m_axi_s2mm_aclk");
2666 if (IS_ERR(*rx_clk))
2667 *rx_clk = NULL;
2668
2669 *rxs_clk = devm_clk_get(&pdev->dev, "s_axis_s2mm_aclk");
2670 if (IS_ERR(*rxs_clk))
2671 *rxs_clk = NULL;
2672
2673 err = clk_prepare_enable(*axi_clk);
2674 if (err) {
2675 dev_err(&pdev->dev, "failed to enable axi_clk (%d)\n",
2676 err);
2677 return err;
2678 }
2679
2680 err = clk_prepare_enable(*tx_clk);
2681 if (err) {
2682 dev_err(&pdev->dev, "failed to enable tx_clk (%d)\n", err);
2683 goto err_disable_axiclk;
2684 }
2685
2686 err = clk_prepare_enable(*txs_clk);
2687 if (err) {
2688 dev_err(&pdev->dev, "failed to enable txs_clk (%d)\n", err);
2689 goto err_disable_txclk;
2690 }
2691
2692 err = clk_prepare_enable(*rx_clk);
2693 if (err) {
2694 dev_err(&pdev->dev, "failed to enable rx_clk (%d)\n", err);
2695 goto err_disable_txsclk;
2696 }
2697
2698 err = clk_prepare_enable(*rxs_clk);
2699 if (err) {
2700 dev_err(&pdev->dev, "failed to enable rxs_clk (%d)\n", err);
2701 goto err_disable_rxclk;
2702 }
2703
2704 return 0;
2705
2706err_disable_rxclk:
2707 clk_disable_unprepare(*rx_clk);
2708err_disable_txsclk:
2709 clk_disable_unprepare(*txs_clk);
2710err_disable_txclk:
2711 clk_disable_unprepare(*tx_clk);
2712err_disable_axiclk:
2713 clk_disable_unprepare(*axi_clk);
2714
2715 return err;
2716}
2717
2718static void xdma_disable_allclks(struct xilinx_dma_device *xdev)
2719{
2720 clk_disable_unprepare(xdev->rxs_clk);
2721 clk_disable_unprepare(xdev->rx_clk);
2722 clk_disable_unprepare(xdev->txs_clk);
2723 clk_disable_unprepare(xdev->tx_clk);
2724 clk_disable_unprepare(xdev->axi_clk);
2725}
2726
2727
2728
2729
2730
2731
2732
2733
2734
2735
2736
2737static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev,
2738 struct device_node *node)
2739{
2740 struct xilinx_dma_chan *chan;
2741 bool has_dre = false;
2742 u32 value, width;
2743 int err;
2744
2745
2746 chan = devm_kzalloc(xdev->dev, sizeof(*chan), GFP_KERNEL);
2747 if (!chan)
2748 return -ENOMEM;
2749
2750 chan->dev = xdev->dev;
2751 chan->xdev = xdev;
2752 chan->desc_pendingcount = 0x0;
2753 chan->ext_addr = xdev->ext_addr;
2754
2755
2756
2757
2758
2759 chan->idle = true;
2760
2761 spin_lock_init(&chan->lock);
2762 INIT_LIST_HEAD(&chan->pending_list);
2763 INIT_LIST_HEAD(&chan->done_list);
2764 INIT_LIST_HEAD(&chan->active_list);
2765 INIT_LIST_HEAD(&chan->free_seg_list);
2766
2767
2768 has_dre = of_property_read_bool(node, "xlnx,include-dre");
2769
2770 chan->genlock = of_property_read_bool(node, "xlnx,genlock-mode");
2771
2772 err = of_property_read_u32(node, "xlnx,datawidth", &value);
2773 if (err) {
2774 dev_err(xdev->dev, "missing xlnx,datawidth property\n");
2775 return err;
2776 }
2777 width = value >> 3;
2778
2779
2780 if (width > 8)
2781 has_dre = false;
2782
2783 if (!has_dre)
2784 xdev->common.copy_align = (enum dmaengine_alignment)fls(width - 1);
2785
2786 if (of_device_is_compatible(node, "xlnx,axi-vdma-mm2s-channel") ||
2787 of_device_is_compatible(node, "xlnx,axi-dma-mm2s-channel") ||
2788 of_device_is_compatible(node, "xlnx,axi-cdma-channel")) {
2789 chan->direction = DMA_MEM_TO_DEV;
2790 chan->id = xdev->mm2s_chan_id++;
2791 chan->tdest = chan->id;
2792
2793 chan->ctrl_offset = XILINX_DMA_MM2S_CTRL_OFFSET;
2794 if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
2795 chan->desc_offset = XILINX_VDMA_MM2S_DESC_OFFSET;
2796 chan->config.park = 1;
2797
2798 if (xdev->flush_on_fsync == XILINX_DMA_FLUSH_BOTH ||
2799 xdev->flush_on_fsync == XILINX_DMA_FLUSH_MM2S)
2800 chan->flush_on_fsync = true;
2801 }
2802 } else if (of_device_is_compatible(node,
2803 "xlnx,axi-vdma-s2mm-channel") ||
2804 of_device_is_compatible(node,
2805 "xlnx,axi-dma-s2mm-channel")) {
2806 chan->direction = DMA_DEV_TO_MEM;
2807 chan->id = xdev->s2mm_chan_id++;
2808 chan->tdest = chan->id - xdev->dma_config->max_channels / 2;
2809 chan->has_vflip = of_property_read_bool(node,
2810 "xlnx,enable-vert-flip");
2811 if (chan->has_vflip) {
2812 chan->config.vflip_en = dma_read(chan,
2813 XILINX_VDMA_REG_ENABLE_VERTICAL_FLIP) &
2814 XILINX_VDMA_ENABLE_VERTICAL_FLIP;
2815 }
2816
2817 if (xdev->dma_config->dmatype == XDMA_TYPE_AXIMCDMA)
2818 chan->ctrl_offset = XILINX_MCDMA_S2MM_CTRL_OFFSET;
2819 else
2820 chan->ctrl_offset = XILINX_DMA_S2MM_CTRL_OFFSET;
2821
2822 if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
2823 chan->desc_offset = XILINX_VDMA_S2MM_DESC_OFFSET;
2824 chan->config.park = 1;
2825
2826 if (xdev->flush_on_fsync == XILINX_DMA_FLUSH_BOTH ||
2827 xdev->flush_on_fsync == XILINX_DMA_FLUSH_S2MM)
2828 chan->flush_on_fsync = true;
2829 }
2830 } else {
2831 dev_err(xdev->dev, "Invalid channel compatible node\n");
2832 return -EINVAL;
2833 }
2834
2835
2836 chan->irq = irq_of_parse_and_map(node, chan->tdest);
2837 err = request_irq(chan->irq, xdev->dma_config->irq_handler,
2838 IRQF_SHARED, "xilinx-dma-controller", chan);
2839 if (err) {
2840 dev_err(xdev->dev, "unable to request IRQ %d\n", chan->irq);
2841 return err;
2842 }
2843
2844 if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
2845 chan->start_transfer = xilinx_dma_start_transfer;
2846 chan->stop_transfer = xilinx_dma_stop_transfer;
2847 } else if (xdev->dma_config->dmatype == XDMA_TYPE_AXIMCDMA) {
2848 chan->start_transfer = xilinx_mcdma_start_transfer;
2849 chan->stop_transfer = xilinx_dma_stop_transfer;
2850 } else if (xdev->dma_config->dmatype == XDMA_TYPE_CDMA) {
2851 chan->start_transfer = xilinx_cdma_start_transfer;
2852 chan->stop_transfer = xilinx_cdma_stop_transfer;
2853 } else {
2854 chan->start_transfer = xilinx_vdma_start_transfer;
2855 chan->stop_transfer = xilinx_dma_stop_transfer;
2856 }
2857
2858
2859 if (xdev->dma_config->dmatype != XDMA_TYPE_VDMA) {
2860 if (xdev->dma_config->dmatype == XDMA_TYPE_AXIMCDMA ||
2861 dma_ctrl_read(chan, XILINX_DMA_REG_DMASR) &
2862 XILINX_DMA_DMASR_SG_MASK)
2863 chan->has_sg = true;
2864 dev_dbg(chan->dev, "ch %d: SG %s\n", chan->id,
2865 chan->has_sg ? "enabled" : "disabled");
2866 }
2867
2868
2869 tasklet_setup(&chan->tasklet, xilinx_dma_do_tasklet);
2870
2871
2872
2873
2874
2875 chan->common.device = &xdev->common;
2876
2877 list_add_tail(&chan->common.device_node, &xdev->common.channels);
2878 xdev->chan[chan->id] = chan;
2879
2880
2881 err = xilinx_dma_chan_reset(chan);
2882 if (err < 0) {
2883 dev_err(xdev->dev, "Reset channel failed\n");
2884 return err;
2885 }
2886
2887 return 0;
2888}
2889
2890
2891
2892
2893
2894
2895
2896
2897
2898
2899
2900static int xilinx_dma_child_probe(struct xilinx_dma_device *xdev,
2901 struct device_node *node)
2902{
2903 int ret, i;
2904 u32 nr_channels = 1;
2905
2906 ret = of_property_read_u32(node, "dma-channels", &nr_channels);
2907 if (xdev->dma_config->dmatype == XDMA_TYPE_AXIMCDMA && ret < 0)
2908 dev_warn(xdev->dev, "missing dma-channels property\n");
2909
2910 for (i = 0; i < nr_channels; i++)
2911 xilinx_dma_chan_probe(xdev, node);
2912
2913 return 0;
2914}
2915
2916
2917
2918
2919
2920
2921
2922
2923static struct dma_chan *of_dma_xilinx_xlate(struct of_phandle_args *dma_spec,
2924 struct of_dma *ofdma)
2925{
2926 struct xilinx_dma_device *xdev = ofdma->of_dma_data;
2927 int chan_id = dma_spec->args[0];
2928
2929 if (chan_id >= xdev->dma_config->max_channels || !xdev->chan[chan_id])
2930 return NULL;
2931
2932 return dma_get_slave_channel(&xdev->chan[chan_id]->common);
2933}
2934
2935static const struct xilinx_dma_config axidma_config = {
2936 .dmatype = XDMA_TYPE_AXIDMA,
2937 .clk_init = axidma_clk_init,
2938 .irq_handler = xilinx_dma_irq_handler,
2939 .max_channels = XILINX_DMA_MAX_CHANS_PER_DEVICE,
2940};
2941
2942static const struct xilinx_dma_config aximcdma_config = {
2943 .dmatype = XDMA_TYPE_AXIMCDMA,
2944 .clk_init = axidma_clk_init,
2945 .irq_handler = xilinx_mcdma_irq_handler,
2946 .max_channels = XILINX_MCDMA_MAX_CHANS_PER_DEVICE,
2947};
2948static const struct xilinx_dma_config axicdma_config = {
2949 .dmatype = XDMA_TYPE_CDMA,
2950 .clk_init = axicdma_clk_init,
2951 .irq_handler = xilinx_dma_irq_handler,
2952 .max_channels = XILINX_CDMA_MAX_CHANS_PER_DEVICE,
2953};
2954
2955static const struct xilinx_dma_config axivdma_config = {
2956 .dmatype = XDMA_TYPE_VDMA,
2957 .clk_init = axivdma_clk_init,
2958 .irq_handler = xilinx_dma_irq_handler,
2959 .max_channels = XILINX_DMA_MAX_CHANS_PER_DEVICE,
2960};
2961
2962static const struct of_device_id xilinx_dma_of_ids[] = {
2963 { .compatible = "xlnx,axi-dma-1.00.a", .data = &axidma_config },
2964 { .compatible = "xlnx,axi-cdma-1.00.a", .data = &axicdma_config },
2965 { .compatible = "xlnx,axi-vdma-1.00.a", .data = &axivdma_config },
2966 { .compatible = "xlnx,axi-mcdma-1.00.a", .data = &aximcdma_config },
2967 {}
2968};
2969MODULE_DEVICE_TABLE(of, xilinx_dma_of_ids);
2970
2971
2972
2973
2974
2975
2976
2977static int xilinx_dma_probe(struct platform_device *pdev)
2978{
2979 int (*clk_init)(struct platform_device *, struct clk **, struct clk **,
2980 struct clk **, struct clk **, struct clk **)
2981 = axivdma_clk_init;
2982 struct device_node *node = pdev->dev.of_node;
2983 struct xilinx_dma_device *xdev;
2984 struct device_node *child, *np = pdev->dev.of_node;
2985 u32 num_frames, addr_width, len_width;
2986 int i, err;
2987
2988
2989 xdev = devm_kzalloc(&pdev->dev, sizeof(*xdev), GFP_KERNEL);
2990 if (!xdev)
2991 return -ENOMEM;
2992
2993 xdev->dev = &pdev->dev;
2994 if (np) {
2995 const struct of_device_id *match;
2996
2997 match = of_match_node(xilinx_dma_of_ids, np);
2998 if (match && match->data) {
2999 xdev->dma_config = match->data;
3000 clk_init = xdev->dma_config->clk_init;
3001 }
3002 }
3003
3004 err = clk_init(pdev, &xdev->axi_clk, &xdev->tx_clk, &xdev->txs_clk,
3005 &xdev->rx_clk, &xdev->rxs_clk);
3006 if (err)
3007 return err;
3008
3009
3010 xdev->regs = devm_platform_ioremap_resource(pdev, 0);
3011 if (IS_ERR(xdev->regs))
3012 return PTR_ERR(xdev->regs);
3013
3014
3015 xdev->max_buffer_len = GENMASK(XILINX_DMA_MAX_TRANS_LEN_MAX - 1, 0);
3016 xdev->s2mm_chan_id = xdev->dma_config->max_channels / 2;
3017
3018 if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA ||
3019 xdev->dma_config->dmatype == XDMA_TYPE_AXIMCDMA) {
3020 if (!of_property_read_u32(node, "xlnx,sg-length-width",
3021 &len_width)) {
3022 if (len_width < XILINX_DMA_MAX_TRANS_LEN_MIN ||
3023 len_width > XILINX_DMA_V2_MAX_TRANS_LEN_MAX) {
3024 dev_warn(xdev->dev,
3025 "invalid xlnx,sg-length-width property value. Using default width\n");
3026 } else {
3027 if (len_width > XILINX_DMA_MAX_TRANS_LEN_MAX)
3028 dev_warn(xdev->dev, "Please ensure that IP supports buffer length > 23 bits\n");
3029 xdev->max_buffer_len =
3030 GENMASK(len_width - 1, 0);
3031 }
3032 }
3033 }
3034
3035 if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
3036 err = of_property_read_u32(node, "xlnx,num-fstores",
3037 &num_frames);
3038 if (err < 0) {
3039 dev_err(xdev->dev,
3040 "missing xlnx,num-fstores property\n");
3041 return err;
3042 }
3043
3044 err = of_property_read_u32(node, "xlnx,flush-fsync",
3045 &xdev->flush_on_fsync);
3046 if (err < 0)
3047 dev_warn(xdev->dev,
3048 "missing xlnx,flush-fsync property\n");
3049 }
3050
3051 err = of_property_read_u32(node, "xlnx,addrwidth", &addr_width);
3052 if (err < 0)
3053 dev_warn(xdev->dev, "missing xlnx,addrwidth property\n");
3054
3055 if (addr_width > 32)
3056 xdev->ext_addr = true;
3057 else
3058 xdev->ext_addr = false;
3059
3060
3061 dma_set_mask(xdev->dev, DMA_BIT_MASK(addr_width));
3062
3063
3064 xdev->common.dev = &pdev->dev;
3065
3066 INIT_LIST_HEAD(&xdev->common.channels);
3067 if (!(xdev->dma_config->dmatype == XDMA_TYPE_CDMA)) {
3068 dma_cap_set(DMA_SLAVE, xdev->common.cap_mask);
3069 dma_cap_set(DMA_PRIVATE, xdev->common.cap_mask);
3070 }
3071
3072 xdev->common.device_alloc_chan_resources =
3073 xilinx_dma_alloc_chan_resources;
3074 xdev->common.device_free_chan_resources =
3075 xilinx_dma_free_chan_resources;
3076 xdev->common.device_terminate_all = xilinx_dma_terminate_all;
3077 xdev->common.device_tx_status = xilinx_dma_tx_status;
3078 xdev->common.device_issue_pending = xilinx_dma_issue_pending;
3079 if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
3080 dma_cap_set(DMA_CYCLIC, xdev->common.cap_mask);
3081 xdev->common.device_prep_slave_sg = xilinx_dma_prep_slave_sg;
3082 xdev->common.device_prep_dma_cyclic =
3083 xilinx_dma_prep_dma_cyclic;
3084
3085 xdev->common.residue_granularity =
3086 DMA_RESIDUE_GRANULARITY_SEGMENT;
3087 } else if (xdev->dma_config->dmatype == XDMA_TYPE_CDMA) {
3088 dma_cap_set(DMA_MEMCPY, xdev->common.cap_mask);
3089 xdev->common.device_prep_dma_memcpy = xilinx_cdma_prep_memcpy;
3090
3091 xdev->common.residue_granularity =
3092 DMA_RESIDUE_GRANULARITY_SEGMENT;
3093 } else if (xdev->dma_config->dmatype == XDMA_TYPE_AXIMCDMA) {
3094 xdev->common.device_prep_slave_sg = xilinx_mcdma_prep_slave_sg;
3095 } else {
3096 xdev->common.device_prep_interleaved_dma =
3097 xilinx_vdma_dma_prep_interleaved;
3098 }
3099
3100 platform_set_drvdata(pdev, xdev);
3101
3102
3103 for_each_child_of_node(node, child) {
3104 err = xilinx_dma_child_probe(xdev, child);
3105 if (err < 0)
3106 goto disable_clks;
3107 }
3108
3109 if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
3110 for (i = 0; i < xdev->dma_config->max_channels; i++)
3111 if (xdev->chan[i])
3112 xdev->chan[i]->num_frms = num_frames;
3113 }
3114
3115
3116 err = dma_async_device_register(&xdev->common);
3117 if (err) {
3118 dev_err(xdev->dev, "failed to register the dma device\n");
3119 goto error;
3120 }
3121
3122 err = of_dma_controller_register(node, of_dma_xilinx_xlate,
3123 xdev);
3124 if (err < 0) {
3125 dev_err(&pdev->dev, "Unable to register DMA to DT\n");
3126 dma_async_device_unregister(&xdev->common);
3127 goto error;
3128 }
3129
3130 if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA)
3131 dev_info(&pdev->dev, "Xilinx AXI DMA Engine Driver Probed!!\n");
3132 else if (xdev->dma_config->dmatype == XDMA_TYPE_CDMA)
3133 dev_info(&pdev->dev, "Xilinx AXI CDMA Engine Driver Probed!!\n");
3134 else if (xdev->dma_config->dmatype == XDMA_TYPE_AXIMCDMA)
3135 dev_info(&pdev->dev, "Xilinx AXI MCDMA Engine Driver Probed!!\n");
3136 else
3137 dev_info(&pdev->dev, "Xilinx AXI VDMA Engine Driver Probed!!\n");
3138
3139 return 0;
3140
3141disable_clks:
3142 xdma_disable_allclks(xdev);
3143error:
3144 for (i = 0; i < xdev->dma_config->max_channels; i++)
3145 if (xdev->chan[i])
3146 xilinx_dma_chan_remove(xdev->chan[i]);
3147
3148 return err;
3149}
3150
3151
3152
3153
3154
3155
3156
3157static int xilinx_dma_remove(struct platform_device *pdev)
3158{
3159 struct xilinx_dma_device *xdev = platform_get_drvdata(pdev);
3160 int i;
3161
3162 of_dma_controller_free(pdev->dev.of_node);
3163
3164 dma_async_device_unregister(&xdev->common);
3165
3166 for (i = 0; i < xdev->dma_config->max_channels; i++)
3167 if (xdev->chan[i])
3168 xilinx_dma_chan_remove(xdev->chan[i]);
3169
3170 xdma_disable_allclks(xdev);
3171
3172 return 0;
3173}
3174
3175static struct platform_driver xilinx_vdma_driver = {
3176 .driver = {
3177 .name = "xilinx-vdma",
3178 .of_match_table = xilinx_dma_of_ids,
3179 },
3180 .probe = xilinx_dma_probe,
3181 .remove = xilinx_dma_remove,
3182};
3183
3184module_platform_driver(xilinx_vdma_driver);
3185
3186MODULE_AUTHOR("Xilinx, Inc.");
3187MODULE_DESCRIPTION("Xilinx VDMA driver");
3188MODULE_LICENSE("GPL v2");
3189