1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36#include <linux/bitops.h>
37#include <linux/dmapool.h>
38#include <linux/dma/xilinx_dma.h>
39#include <linux/init.h>
40#include <linux/interrupt.h>
41#include <linux/io.h>
42#include <linux/iopoll.h>
43#include <linux/module.h>
44#include <linux/of_address.h>
45#include <linux/of_dma.h>
46#include <linux/of_platform.h>
47#include <linux/of_irq.h>
48#include <linux/slab.h>
49#include <linux/clk.h>
50#include <linux/io-64-nonatomic-lo-hi.h>
51
52#include "../dmaengine.h"
53
54
55#define XILINX_DMA_MM2S_CTRL_OFFSET 0x0000
56#define XILINX_DMA_S2MM_CTRL_OFFSET 0x0030
57#define XILINX_VDMA_MM2S_DESC_OFFSET 0x0050
58#define XILINX_VDMA_S2MM_DESC_OFFSET 0x00a0
59
60
61#define XILINX_DMA_REG_DMACR 0x0000
62#define XILINX_DMA_DMACR_DELAY_MAX 0xff
63#define XILINX_DMA_DMACR_DELAY_SHIFT 24
64#define XILINX_DMA_DMACR_FRAME_COUNT_MAX 0xff
65#define XILINX_DMA_DMACR_FRAME_COUNT_SHIFT 16
66#define XILINX_DMA_DMACR_ERR_IRQ BIT(14)
67#define XILINX_DMA_DMACR_DLY_CNT_IRQ BIT(13)
68#define XILINX_DMA_DMACR_FRM_CNT_IRQ BIT(12)
69#define XILINX_DMA_DMACR_MASTER_SHIFT 8
70#define XILINX_DMA_DMACR_FSYNCSRC_SHIFT 5
71#define XILINX_DMA_DMACR_FRAMECNT_EN BIT(4)
72#define XILINX_DMA_DMACR_GENLOCK_EN BIT(3)
73#define XILINX_DMA_DMACR_RESET BIT(2)
74#define XILINX_DMA_DMACR_CIRC_EN BIT(1)
75#define XILINX_DMA_DMACR_RUNSTOP BIT(0)
76#define XILINX_DMA_DMACR_FSYNCSRC_MASK GENMASK(6, 5)
77#define XILINX_DMA_DMACR_DELAY_MASK GENMASK(31, 24)
78#define XILINX_DMA_DMACR_FRAME_COUNT_MASK GENMASK(23, 16)
79#define XILINX_DMA_DMACR_MASTER_MASK GENMASK(11, 8)
80
81#define XILINX_DMA_REG_DMASR 0x0004
82#define XILINX_DMA_DMASR_EOL_LATE_ERR BIT(15)
83#define XILINX_DMA_DMASR_ERR_IRQ BIT(14)
84#define XILINX_DMA_DMASR_DLY_CNT_IRQ BIT(13)
85#define XILINX_DMA_DMASR_FRM_CNT_IRQ BIT(12)
86#define XILINX_DMA_DMASR_SOF_LATE_ERR BIT(11)
87#define XILINX_DMA_DMASR_SG_DEC_ERR BIT(10)
88#define XILINX_DMA_DMASR_SG_SLV_ERR BIT(9)
89#define XILINX_DMA_DMASR_EOF_EARLY_ERR BIT(8)
90#define XILINX_DMA_DMASR_SOF_EARLY_ERR BIT(7)
91#define XILINX_DMA_DMASR_DMA_DEC_ERR BIT(6)
92#define XILINX_DMA_DMASR_DMA_SLAVE_ERR BIT(5)
93#define XILINX_DMA_DMASR_DMA_INT_ERR BIT(4)
94#define XILINX_DMA_DMASR_SG_MASK BIT(3)
95#define XILINX_DMA_DMASR_IDLE BIT(1)
96#define XILINX_DMA_DMASR_HALTED BIT(0)
97#define XILINX_DMA_DMASR_DELAY_MASK GENMASK(31, 24)
98#define XILINX_DMA_DMASR_FRAME_COUNT_MASK GENMASK(23, 16)
99
100#define XILINX_DMA_REG_CURDESC 0x0008
101#define XILINX_DMA_REG_TAILDESC 0x0010
102#define XILINX_DMA_REG_REG_INDEX 0x0014
103#define XILINX_DMA_REG_FRMSTORE 0x0018
104#define XILINX_DMA_REG_THRESHOLD 0x001c
105#define XILINX_DMA_REG_FRMPTR_STS 0x0024
106#define XILINX_DMA_REG_PARK_PTR 0x0028
107#define XILINX_DMA_PARK_PTR_WR_REF_SHIFT 8
108#define XILINX_DMA_PARK_PTR_WR_REF_MASK GENMASK(12, 8)
109#define XILINX_DMA_PARK_PTR_RD_REF_SHIFT 0
110#define XILINX_DMA_PARK_PTR_RD_REF_MASK GENMASK(4, 0)
111#define XILINX_DMA_REG_VDMA_VERSION 0x002c
112
113
114#define XILINX_DMA_REG_VSIZE 0x0000
115#define XILINX_DMA_REG_HSIZE 0x0004
116
117#define XILINX_DMA_REG_FRMDLY_STRIDE 0x0008
118#define XILINX_DMA_FRMDLY_STRIDE_FRMDLY_SHIFT 24
119#define XILINX_DMA_FRMDLY_STRIDE_STRIDE_SHIFT 0
120
121#define XILINX_VDMA_REG_START_ADDRESS(n) (0x000c + 4 * (n))
122#define XILINX_VDMA_REG_START_ADDRESS_64(n) (0x000c + 8 * (n))
123
124#define XILINX_VDMA_REG_ENABLE_VERTICAL_FLIP 0x00ec
125#define XILINX_VDMA_ENABLE_VERTICAL_FLIP BIT(0)
126
127
128#define XILINX_MCDMA_MAX_CHANS_PER_DEVICE 0x20
129#define XILINX_DMA_MAX_CHANS_PER_DEVICE 0x2
130#define XILINX_CDMA_MAX_CHANS_PER_DEVICE 0x1
131
132#define XILINX_DMA_DMAXR_ALL_IRQ_MASK \
133 (XILINX_DMA_DMASR_FRM_CNT_IRQ | \
134 XILINX_DMA_DMASR_DLY_CNT_IRQ | \
135 XILINX_DMA_DMASR_ERR_IRQ)
136
137#define XILINX_DMA_DMASR_ALL_ERR_MASK \
138 (XILINX_DMA_DMASR_EOL_LATE_ERR | \
139 XILINX_DMA_DMASR_SOF_LATE_ERR | \
140 XILINX_DMA_DMASR_SG_DEC_ERR | \
141 XILINX_DMA_DMASR_SG_SLV_ERR | \
142 XILINX_DMA_DMASR_EOF_EARLY_ERR | \
143 XILINX_DMA_DMASR_SOF_EARLY_ERR | \
144 XILINX_DMA_DMASR_DMA_DEC_ERR | \
145 XILINX_DMA_DMASR_DMA_SLAVE_ERR | \
146 XILINX_DMA_DMASR_DMA_INT_ERR)
147
148
149
150
151
152
153#define XILINX_DMA_DMASR_ERR_RECOVER_MASK \
154 (XILINX_DMA_DMASR_SOF_LATE_ERR | \
155 XILINX_DMA_DMASR_EOF_EARLY_ERR | \
156 XILINX_DMA_DMASR_SOF_EARLY_ERR | \
157 XILINX_DMA_DMASR_DMA_INT_ERR)
158
159
160#define XILINX_DMA_FLUSH_S2MM 3
161#define XILINX_DMA_FLUSH_MM2S 2
162#define XILINX_DMA_FLUSH_BOTH 1
163
164
165#define XILINX_DMA_LOOP_COUNT 1000000
166
167
168#define XILINX_DMA_REG_SRCDSTADDR 0x18
169#define XILINX_DMA_REG_BTT 0x28
170
171
172#define XILINX_DMA_MAX_TRANS_LEN_MIN 8
173#define XILINX_DMA_MAX_TRANS_LEN_MAX 23
174#define XILINX_DMA_V2_MAX_TRANS_LEN_MAX 26
175#define XILINX_DMA_CR_COALESCE_MAX GENMASK(23, 16)
176#define XILINX_DMA_CR_CYCLIC_BD_EN_MASK BIT(4)
177#define XILINX_DMA_CR_COALESCE_SHIFT 16
178#define XILINX_DMA_BD_SOP BIT(27)
179#define XILINX_DMA_BD_EOP BIT(26)
180#define XILINX_DMA_COALESCE_MAX 255
181#define XILINX_DMA_NUM_DESCS 255
182#define XILINX_DMA_NUM_APP_WORDS 5
183
184
185#define XILINX_CDMA_REG_SRCADDR 0x18
186#define XILINX_CDMA_REG_DSTADDR 0x20
187
188
189#define XILINX_CDMA_CR_SGMODE BIT(3)
190
191#define xilinx_prep_dma_addr_t(addr) \
192 ((dma_addr_t)((u64)addr##_##msb << 32 | (addr)))
193
194
195#define XILINX_MCDMA_MM2S_CTRL_OFFSET 0x0000
196#define XILINX_MCDMA_S2MM_CTRL_OFFSET 0x0500
197#define XILINX_MCDMA_CHEN_OFFSET 0x0008
198#define XILINX_MCDMA_CH_ERR_OFFSET 0x0010
199#define XILINX_MCDMA_RXINT_SER_OFFSET 0x0020
200#define XILINX_MCDMA_TXINT_SER_OFFSET 0x0028
201#define XILINX_MCDMA_CHAN_CR_OFFSET(x) (0x40 + (x) * 0x40)
202#define XILINX_MCDMA_CHAN_SR_OFFSET(x) (0x44 + (x) * 0x40)
203#define XILINX_MCDMA_CHAN_CDESC_OFFSET(x) (0x48 + (x) * 0x40)
204#define XILINX_MCDMA_CHAN_TDESC_OFFSET(x) (0x50 + (x) * 0x40)
205
206
207#define XILINX_MCDMA_COALESCE_SHIFT 16
208#define XILINX_MCDMA_COALESCE_MAX 24
209#define XILINX_MCDMA_IRQ_ALL_MASK GENMASK(7, 5)
210#define XILINX_MCDMA_COALESCE_MASK GENMASK(23, 16)
211#define XILINX_MCDMA_CR_RUNSTOP_MASK BIT(0)
212#define XILINX_MCDMA_IRQ_IOC_MASK BIT(5)
213#define XILINX_MCDMA_IRQ_DELAY_MASK BIT(6)
214#define XILINX_MCDMA_IRQ_ERR_MASK BIT(7)
215#define XILINX_MCDMA_BD_EOP BIT(30)
216#define XILINX_MCDMA_BD_SOP BIT(31)
217
218
219
220
221
222
223
224
225
226
227
228
229struct xilinx_vdma_desc_hw {
230 u32 next_desc;
231 u32 pad1;
232 u32 buf_addr;
233 u32 buf_addr_msb;
234 u32 vsize;
235 u32 hsize;
236 u32 stride;
237} __aligned(64);
238
239
240
241
242
243
244
245
246
247
248
249
250
251struct xilinx_axidma_desc_hw {
252 u32 next_desc;
253 u32 next_desc_msb;
254 u32 buf_addr;
255 u32 buf_addr_msb;
256 u32 reserved1;
257 u32 reserved2;
258 u32 control;
259 u32 status;
260 u32 app[XILINX_DMA_NUM_APP_WORDS];
261} __aligned(64);
262
263
264
265
266
267
268
269
270
271
272
273
274
275struct xilinx_aximcdma_desc_hw {
276 u32 next_desc;
277 u32 next_desc_msb;
278 u32 buf_addr;
279 u32 buf_addr_msb;
280 u32 rsvd;
281 u32 control;
282 u32 status;
283 u32 sideband_status;
284 u32 app[XILINX_DMA_NUM_APP_WORDS];
285} __aligned(64);
286
287
288
289
290
291
292
293
294
295
296
297
298struct xilinx_cdma_desc_hw {
299 u32 next_desc;
300 u32 next_desc_msb;
301 u32 src_addr;
302 u32 src_addr_msb;
303 u32 dest_addr;
304 u32 dest_addr_msb;
305 u32 control;
306 u32 status;
307} __aligned(64);
308
309
310
311
312
313
314
315struct xilinx_vdma_tx_segment {
316 struct xilinx_vdma_desc_hw hw;
317 struct list_head node;
318 dma_addr_t phys;
319} __aligned(64);
320
321
322
323
324
325
326
327struct xilinx_axidma_tx_segment {
328 struct xilinx_axidma_desc_hw hw;
329 struct list_head node;
330 dma_addr_t phys;
331} __aligned(64);
332
333
334
335
336
337
338
339struct xilinx_aximcdma_tx_segment {
340 struct xilinx_aximcdma_desc_hw hw;
341 struct list_head node;
342 dma_addr_t phys;
343} __aligned(64);
344
345
346
347
348
349
350
351struct xilinx_cdma_tx_segment {
352 struct xilinx_cdma_desc_hw hw;
353 struct list_head node;
354 dma_addr_t phys;
355} __aligned(64);
356
357
358
359
360
361
362
363
364
365
366struct xilinx_dma_tx_descriptor {
367 struct dma_async_tx_descriptor async_tx;
368 struct list_head segments;
369 struct list_head node;
370 bool cyclic;
371 bool err;
372 u32 residue;
373};
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413struct xilinx_dma_chan {
414 struct xilinx_dma_device *xdev;
415 u32 ctrl_offset;
416 u32 desc_offset;
417 spinlock_t lock;
418 struct list_head pending_list;
419 struct list_head active_list;
420 struct list_head done_list;
421 struct list_head free_seg_list;
422 struct dma_chan common;
423 struct dma_pool *desc_pool;
424 struct device *dev;
425 int irq;
426 int id;
427 enum dma_transfer_direction direction;
428 int num_frms;
429 bool has_sg;
430 bool cyclic;
431 bool genlock;
432 bool err;
433 bool idle;
434 struct tasklet_struct tasklet;
435 struct xilinx_vdma_config config;
436 bool flush_on_fsync;
437 u32 desc_pendingcount;
438 bool ext_addr;
439 u32 desc_submitcount;
440 struct xilinx_axidma_tx_segment *seg_v;
441 struct xilinx_aximcdma_tx_segment *seg_mv;
442 dma_addr_t seg_p;
443 struct xilinx_axidma_tx_segment *cyclic_seg_v;
444 dma_addr_t cyclic_seg_p;
445 void (*start_transfer)(struct xilinx_dma_chan *chan);
446 int (*stop_transfer)(struct xilinx_dma_chan *chan);
447 u16 tdest;
448 bool has_vflip;
449};
450
451
452
453
454
455
456
457
458
459
460enum xdma_ip_type {
461 XDMA_TYPE_AXIDMA = 0,
462 XDMA_TYPE_CDMA,
463 XDMA_TYPE_VDMA,
464 XDMA_TYPE_AXIMCDMA
465};
466
467struct xilinx_dma_config {
468 enum xdma_ip_type dmatype;
469 int (*clk_init)(struct platform_device *pdev, struct clk **axi_clk,
470 struct clk **tx_clk, struct clk **txs_clk,
471 struct clk **rx_clk, struct clk **rxs_clk);
472 irqreturn_t (*irq_handler)(int irq, void *data);
473 const int max_channels;
474};
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495struct xilinx_dma_device {
496 void __iomem *regs;
497 struct device *dev;
498 struct dma_device common;
499 struct xilinx_dma_chan *chan[XILINX_MCDMA_MAX_CHANS_PER_DEVICE];
500 u32 flush_on_fsync;
501 bool ext_addr;
502 struct platform_device *pdev;
503 const struct xilinx_dma_config *dma_config;
504 struct clk *axi_clk;
505 struct clk *tx_clk;
506 struct clk *txs_clk;
507 struct clk *rx_clk;
508 struct clk *rxs_clk;
509 u32 s2mm_chan_id;
510 u32 mm2s_chan_id;
511 u32 max_buffer_len;
512};
513
514
515#define to_xilinx_chan(chan) \
516 container_of(chan, struct xilinx_dma_chan, common)
517#define to_dma_tx_descriptor(tx) \
518 container_of(tx, struct xilinx_dma_tx_descriptor, async_tx)
519#define xilinx_dma_poll_timeout(chan, reg, val, cond, delay_us, timeout_us) \
520 readl_poll_timeout(chan->xdev->regs + chan->ctrl_offset + reg, val, \
521 cond, delay_us, timeout_us)
522
523
524static inline u32 dma_read(struct xilinx_dma_chan *chan, u32 reg)
525{
526 return ioread32(chan->xdev->regs + reg);
527}
528
529static inline void dma_write(struct xilinx_dma_chan *chan, u32 reg, u32 value)
530{
531 iowrite32(value, chan->xdev->regs + reg);
532}
533
534static inline void vdma_desc_write(struct xilinx_dma_chan *chan, u32 reg,
535 u32 value)
536{
537 dma_write(chan, chan->desc_offset + reg, value);
538}
539
540static inline u32 dma_ctrl_read(struct xilinx_dma_chan *chan, u32 reg)
541{
542 return dma_read(chan, chan->ctrl_offset + reg);
543}
544
545static inline void dma_ctrl_write(struct xilinx_dma_chan *chan, u32 reg,
546 u32 value)
547{
548 dma_write(chan, chan->ctrl_offset + reg, value);
549}
550
551static inline void dma_ctrl_clr(struct xilinx_dma_chan *chan, u32 reg,
552 u32 clr)
553{
554 dma_ctrl_write(chan, reg, dma_ctrl_read(chan, reg) & ~clr);
555}
556
557static inline void dma_ctrl_set(struct xilinx_dma_chan *chan, u32 reg,
558 u32 set)
559{
560 dma_ctrl_write(chan, reg, dma_ctrl_read(chan, reg) | set);
561}
562
563
564
565
566
567
568
569
570
571
572
573
574static inline void vdma_desc_write_64(struct xilinx_dma_chan *chan, u32 reg,
575 u32 value_lsb, u32 value_msb)
576{
577
578 writel(value_lsb, chan->xdev->regs + chan->desc_offset + reg);
579
580
581 writel(value_msb, chan->xdev->regs + chan->desc_offset + reg + 4);
582}
583
584static inline void dma_writeq(struct xilinx_dma_chan *chan, u32 reg, u64 value)
585{
586 lo_hi_writeq(value, chan->xdev->regs + chan->ctrl_offset + reg);
587}
588
589static inline void xilinx_write(struct xilinx_dma_chan *chan, u32 reg,
590 dma_addr_t addr)
591{
592 if (chan->ext_addr)
593 dma_writeq(chan, reg, addr);
594 else
595 dma_ctrl_write(chan, reg, addr);
596}
597
598static inline void xilinx_axidma_buf(struct xilinx_dma_chan *chan,
599 struct xilinx_axidma_desc_hw *hw,
600 dma_addr_t buf_addr, size_t sg_used,
601 size_t period_len)
602{
603 if (chan->ext_addr) {
604 hw->buf_addr = lower_32_bits(buf_addr + sg_used + period_len);
605 hw->buf_addr_msb = upper_32_bits(buf_addr + sg_used +
606 period_len);
607 } else {
608 hw->buf_addr = buf_addr + sg_used + period_len;
609 }
610}
611
612static inline void xilinx_aximcdma_buf(struct xilinx_dma_chan *chan,
613 struct xilinx_aximcdma_desc_hw *hw,
614 dma_addr_t buf_addr, size_t sg_used)
615{
616 if (chan->ext_addr) {
617 hw->buf_addr = lower_32_bits(buf_addr + sg_used);
618 hw->buf_addr_msb = upper_32_bits(buf_addr + sg_used);
619 } else {
620 hw->buf_addr = buf_addr + sg_used;
621 }
622}
623
624
625
626
627
628
629
630
631
632
633
634static struct xilinx_vdma_tx_segment *
635xilinx_vdma_alloc_tx_segment(struct xilinx_dma_chan *chan)
636{
637 struct xilinx_vdma_tx_segment *segment;
638 dma_addr_t phys;
639
640 segment = dma_pool_zalloc(chan->desc_pool, GFP_ATOMIC, &phys);
641 if (!segment)
642 return NULL;
643
644 segment->phys = phys;
645
646 return segment;
647}
648
649
650
651
652
653
654
655static struct xilinx_cdma_tx_segment *
656xilinx_cdma_alloc_tx_segment(struct xilinx_dma_chan *chan)
657{
658 struct xilinx_cdma_tx_segment *segment;
659 dma_addr_t phys;
660
661 segment = dma_pool_zalloc(chan->desc_pool, GFP_ATOMIC, &phys);
662 if (!segment)
663 return NULL;
664
665 segment->phys = phys;
666
667 return segment;
668}
669
670
671
672
673
674
675
676static struct xilinx_axidma_tx_segment *
677xilinx_axidma_alloc_tx_segment(struct xilinx_dma_chan *chan)
678{
679 struct xilinx_axidma_tx_segment *segment = NULL;
680 unsigned long flags;
681
682 spin_lock_irqsave(&chan->lock, flags);
683 if (!list_empty(&chan->free_seg_list)) {
684 segment = list_first_entry(&chan->free_seg_list,
685 struct xilinx_axidma_tx_segment,
686 node);
687 list_del(&segment->node);
688 }
689 spin_unlock_irqrestore(&chan->lock, flags);
690
691 if (!segment)
692 dev_dbg(chan->dev, "Could not find free tx segment\n");
693
694 return segment;
695}
696
697
698
699
700
701
702
703static struct xilinx_aximcdma_tx_segment *
704xilinx_aximcdma_alloc_tx_segment(struct xilinx_dma_chan *chan)
705{
706 struct xilinx_aximcdma_tx_segment *segment = NULL;
707 unsigned long flags;
708
709 spin_lock_irqsave(&chan->lock, flags);
710 if (!list_empty(&chan->free_seg_list)) {
711 segment = list_first_entry(&chan->free_seg_list,
712 struct xilinx_aximcdma_tx_segment,
713 node);
714 list_del(&segment->node);
715 }
716 spin_unlock_irqrestore(&chan->lock, flags);
717
718 return segment;
719}
720
721static void xilinx_dma_clean_hw_desc(struct xilinx_axidma_desc_hw *hw)
722{
723 u32 next_desc = hw->next_desc;
724 u32 next_desc_msb = hw->next_desc_msb;
725
726 memset(hw, 0, sizeof(struct xilinx_axidma_desc_hw));
727
728 hw->next_desc = next_desc;
729 hw->next_desc_msb = next_desc_msb;
730}
731
732static void xilinx_mcdma_clean_hw_desc(struct xilinx_aximcdma_desc_hw *hw)
733{
734 u32 next_desc = hw->next_desc;
735 u32 next_desc_msb = hw->next_desc_msb;
736
737 memset(hw, 0, sizeof(struct xilinx_aximcdma_desc_hw));
738
739 hw->next_desc = next_desc;
740 hw->next_desc_msb = next_desc_msb;
741}
742
743
744
745
746
747
748static void xilinx_dma_free_tx_segment(struct xilinx_dma_chan *chan,
749 struct xilinx_axidma_tx_segment *segment)
750{
751 xilinx_dma_clean_hw_desc(&segment->hw);
752
753 list_add_tail(&segment->node, &chan->free_seg_list);
754}
755
756
757
758
759
760
761static void xilinx_mcdma_free_tx_segment(struct xilinx_dma_chan *chan,
762 struct xilinx_aximcdma_tx_segment *
763 segment)
764{
765 xilinx_mcdma_clean_hw_desc(&segment->hw);
766
767 list_add_tail(&segment->node, &chan->free_seg_list);
768}
769
770
771
772
773
774
775static void xilinx_cdma_free_tx_segment(struct xilinx_dma_chan *chan,
776 struct xilinx_cdma_tx_segment *segment)
777{
778 dma_pool_free(chan->desc_pool, segment, segment->phys);
779}
780
781
782
783
784
785
786static void xilinx_vdma_free_tx_segment(struct xilinx_dma_chan *chan,
787 struct xilinx_vdma_tx_segment *segment)
788{
789 dma_pool_free(chan->desc_pool, segment, segment->phys);
790}
791
792
793
794
795
796
797
798static struct xilinx_dma_tx_descriptor *
799xilinx_dma_alloc_tx_descriptor(struct xilinx_dma_chan *chan)
800{
801 struct xilinx_dma_tx_descriptor *desc;
802
803 desc = kzalloc(sizeof(*desc), GFP_KERNEL);
804 if (!desc)
805 return NULL;
806
807 INIT_LIST_HEAD(&desc->segments);
808
809 return desc;
810}
811
812
813
814
815
816
817static void
818xilinx_dma_free_tx_descriptor(struct xilinx_dma_chan *chan,
819 struct xilinx_dma_tx_descriptor *desc)
820{
821 struct xilinx_vdma_tx_segment *segment, *next;
822 struct xilinx_cdma_tx_segment *cdma_segment, *cdma_next;
823 struct xilinx_axidma_tx_segment *axidma_segment, *axidma_next;
824 struct xilinx_aximcdma_tx_segment *aximcdma_segment, *aximcdma_next;
825
826 if (!desc)
827 return;
828
829 if (chan->xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
830 list_for_each_entry_safe(segment, next, &desc->segments, node) {
831 list_del(&segment->node);
832 xilinx_vdma_free_tx_segment(chan, segment);
833 }
834 } else if (chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) {
835 list_for_each_entry_safe(cdma_segment, cdma_next,
836 &desc->segments, node) {
837 list_del(&cdma_segment->node);
838 xilinx_cdma_free_tx_segment(chan, cdma_segment);
839 }
840 } else if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
841 list_for_each_entry_safe(axidma_segment, axidma_next,
842 &desc->segments, node) {
843 list_del(&axidma_segment->node);
844 xilinx_dma_free_tx_segment(chan, axidma_segment);
845 }
846 } else {
847 list_for_each_entry_safe(aximcdma_segment, aximcdma_next,
848 &desc->segments, node) {
849 list_del(&aximcdma_segment->node);
850 xilinx_mcdma_free_tx_segment(chan, aximcdma_segment);
851 }
852 }
853
854 kfree(desc);
855}
856
857
858
859
860
861
862
863
864static void xilinx_dma_free_desc_list(struct xilinx_dma_chan *chan,
865 struct list_head *list)
866{
867 struct xilinx_dma_tx_descriptor *desc, *next;
868
869 list_for_each_entry_safe(desc, next, list, node) {
870 list_del(&desc->node);
871 xilinx_dma_free_tx_descriptor(chan, desc);
872 }
873}
874
875
876
877
878
879static void xilinx_dma_free_descriptors(struct xilinx_dma_chan *chan)
880{
881 unsigned long flags;
882
883 spin_lock_irqsave(&chan->lock, flags);
884
885 xilinx_dma_free_desc_list(chan, &chan->pending_list);
886 xilinx_dma_free_desc_list(chan, &chan->done_list);
887 xilinx_dma_free_desc_list(chan, &chan->active_list);
888
889 spin_unlock_irqrestore(&chan->lock, flags);
890}
891
892
893
894
895
896static void xilinx_dma_free_chan_resources(struct dma_chan *dchan)
897{
898 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
899 unsigned long flags;
900
901 dev_dbg(chan->dev, "Free all channel resources.\n");
902
903 xilinx_dma_free_descriptors(chan);
904
905 if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
906 spin_lock_irqsave(&chan->lock, flags);
907 INIT_LIST_HEAD(&chan->free_seg_list);
908 spin_unlock_irqrestore(&chan->lock, flags);
909
910
911 dma_free_coherent(chan->dev, sizeof(*chan->seg_v) *
912 XILINX_DMA_NUM_DESCS, chan->seg_v,
913 chan->seg_p);
914
915
916 dma_free_coherent(chan->dev, sizeof(*chan->cyclic_seg_v),
917 chan->cyclic_seg_v, chan->cyclic_seg_p);
918 }
919
920 if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIMCDMA) {
921 spin_lock_irqsave(&chan->lock, flags);
922 INIT_LIST_HEAD(&chan->free_seg_list);
923 spin_unlock_irqrestore(&chan->lock, flags);
924
925
926 dma_free_coherent(chan->dev, sizeof(*chan->seg_mv) *
927 XILINX_DMA_NUM_DESCS, chan->seg_mv,
928 chan->seg_p);
929 }
930
931 if (chan->xdev->dma_config->dmatype != XDMA_TYPE_AXIDMA &&
932 chan->xdev->dma_config->dmatype != XDMA_TYPE_AXIMCDMA) {
933 dma_pool_destroy(chan->desc_pool);
934 chan->desc_pool = NULL;
935 }
936
937}
938
939
940
941
942
943
944
945
946static u32 xilinx_dma_get_residue(struct xilinx_dma_chan *chan,
947 struct xilinx_dma_tx_descriptor *desc)
948{
949 struct xilinx_cdma_tx_segment *cdma_seg;
950 struct xilinx_axidma_tx_segment *axidma_seg;
951 struct xilinx_cdma_desc_hw *cdma_hw;
952 struct xilinx_axidma_desc_hw *axidma_hw;
953 struct list_head *entry;
954 u32 residue = 0;
955
956 list_for_each(entry, &desc->segments) {
957 if (chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) {
958 cdma_seg = list_entry(entry,
959 struct xilinx_cdma_tx_segment,
960 node);
961 cdma_hw = &cdma_seg->hw;
962 residue += (cdma_hw->control - cdma_hw->status) &
963 chan->xdev->max_buffer_len;
964 } else {
965 axidma_seg = list_entry(entry,
966 struct xilinx_axidma_tx_segment,
967 node);
968 axidma_hw = &axidma_seg->hw;
969 residue += (axidma_hw->control - axidma_hw->status) &
970 chan->xdev->max_buffer_len;
971 }
972 }
973
974 return residue;
975}
976
977
978
979
980
981
982
983static void xilinx_dma_chan_handle_cyclic(struct xilinx_dma_chan *chan,
984 struct xilinx_dma_tx_descriptor *desc,
985 unsigned long *flags)
986{
987 dma_async_tx_callback callback;
988 void *callback_param;
989
990 callback = desc->async_tx.callback;
991 callback_param = desc->async_tx.callback_param;
992 if (callback) {
993 spin_unlock_irqrestore(&chan->lock, *flags);
994 callback(callback_param);
995 spin_lock_irqsave(&chan->lock, *flags);
996 }
997}
998
999
1000
1001
1002
1003static void xilinx_dma_chan_desc_cleanup(struct xilinx_dma_chan *chan)
1004{
1005 struct xilinx_dma_tx_descriptor *desc, *next;
1006 unsigned long flags;
1007
1008 spin_lock_irqsave(&chan->lock, flags);
1009
1010 list_for_each_entry_safe(desc, next, &chan->done_list, node) {
1011 struct dmaengine_result result;
1012
1013 if (desc->cyclic) {
1014 xilinx_dma_chan_handle_cyclic(chan, desc, &flags);
1015 break;
1016 }
1017
1018
1019 list_del(&desc->node);
1020
1021 if (unlikely(desc->err)) {
1022 if (chan->direction == DMA_DEV_TO_MEM)
1023 result.result = DMA_TRANS_READ_FAILED;
1024 else
1025 result.result = DMA_TRANS_WRITE_FAILED;
1026 } else {
1027 result.result = DMA_TRANS_NOERROR;
1028 }
1029
1030 result.residue = desc->residue;
1031
1032
1033 spin_unlock_irqrestore(&chan->lock, flags);
1034 dmaengine_desc_get_callback_invoke(&desc->async_tx, &result);
1035 spin_lock_irqsave(&chan->lock, flags);
1036
1037
1038 dma_run_dependencies(&desc->async_tx);
1039 xilinx_dma_free_tx_descriptor(chan, desc);
1040 }
1041
1042 spin_unlock_irqrestore(&chan->lock, flags);
1043}
1044
1045
1046
1047
1048
1049static void xilinx_dma_do_tasklet(unsigned long data)
1050{
1051 struct xilinx_dma_chan *chan = (struct xilinx_dma_chan *)data;
1052
1053 xilinx_dma_chan_desc_cleanup(chan);
1054}
1055
1056
1057
1058
1059
1060
1061
1062static int xilinx_dma_alloc_chan_resources(struct dma_chan *dchan)
1063{
1064 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
1065 int i;
1066
1067
1068 if (chan->desc_pool)
1069 return 0;
1070
1071
1072
1073
1074
1075 if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
1076
1077 chan->seg_v = dma_alloc_coherent(chan->dev,
1078 sizeof(*chan->seg_v) * XILINX_DMA_NUM_DESCS,
1079 &chan->seg_p, GFP_KERNEL);
1080 if (!chan->seg_v) {
1081 dev_err(chan->dev,
1082 "unable to allocate channel %d descriptors\n",
1083 chan->id);
1084 return -ENOMEM;
1085 }
1086
1087
1088
1089
1090
1091
1092 chan->cyclic_seg_v = dma_alloc_coherent(chan->dev,
1093 sizeof(*chan->cyclic_seg_v),
1094 &chan->cyclic_seg_p,
1095 GFP_KERNEL);
1096 if (!chan->cyclic_seg_v) {
1097 dev_err(chan->dev,
1098 "unable to allocate desc segment for cyclic DMA\n");
1099 dma_free_coherent(chan->dev, sizeof(*chan->seg_v) *
1100 XILINX_DMA_NUM_DESCS, chan->seg_v,
1101 chan->seg_p);
1102 return -ENOMEM;
1103 }
1104 chan->cyclic_seg_v->phys = chan->cyclic_seg_p;
1105
1106 for (i = 0; i < XILINX_DMA_NUM_DESCS; i++) {
1107 chan->seg_v[i].hw.next_desc =
1108 lower_32_bits(chan->seg_p + sizeof(*chan->seg_v) *
1109 ((i + 1) % XILINX_DMA_NUM_DESCS));
1110 chan->seg_v[i].hw.next_desc_msb =
1111 upper_32_bits(chan->seg_p + sizeof(*chan->seg_v) *
1112 ((i + 1) % XILINX_DMA_NUM_DESCS));
1113 chan->seg_v[i].phys = chan->seg_p +
1114 sizeof(*chan->seg_v) * i;
1115 list_add_tail(&chan->seg_v[i].node,
1116 &chan->free_seg_list);
1117 }
1118 } else if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIMCDMA) {
1119
1120 chan->seg_mv = dma_alloc_coherent(chan->dev,
1121 sizeof(*chan->seg_mv) *
1122 XILINX_DMA_NUM_DESCS,
1123 &chan->seg_p, GFP_KERNEL);
1124 if (!chan->seg_mv) {
1125 dev_err(chan->dev,
1126 "unable to allocate channel %d descriptors\n",
1127 chan->id);
1128 return -ENOMEM;
1129 }
1130 for (i = 0; i < XILINX_DMA_NUM_DESCS; i++) {
1131 chan->seg_mv[i].hw.next_desc =
1132 lower_32_bits(chan->seg_p + sizeof(*chan->seg_mv) *
1133 ((i + 1) % XILINX_DMA_NUM_DESCS));
1134 chan->seg_mv[i].hw.next_desc_msb =
1135 upper_32_bits(chan->seg_p + sizeof(*chan->seg_mv) *
1136 ((i + 1) % XILINX_DMA_NUM_DESCS));
1137 chan->seg_mv[i].phys = chan->seg_p +
1138 sizeof(*chan->seg_v) * i;
1139 list_add_tail(&chan->seg_mv[i].node,
1140 &chan->free_seg_list);
1141 }
1142 } else if (chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) {
1143 chan->desc_pool = dma_pool_create("xilinx_cdma_desc_pool",
1144 chan->dev,
1145 sizeof(struct xilinx_cdma_tx_segment),
1146 __alignof__(struct xilinx_cdma_tx_segment),
1147 0);
1148 } else {
1149 chan->desc_pool = dma_pool_create("xilinx_vdma_desc_pool",
1150 chan->dev,
1151 sizeof(struct xilinx_vdma_tx_segment),
1152 __alignof__(struct xilinx_vdma_tx_segment),
1153 0);
1154 }
1155
1156 if (!chan->desc_pool &&
1157 ((chan->xdev->dma_config->dmatype != XDMA_TYPE_AXIDMA) &&
1158 chan->xdev->dma_config->dmatype != XDMA_TYPE_AXIMCDMA)) {
1159 dev_err(chan->dev,
1160 "unable to allocate channel %d descriptor pool\n",
1161 chan->id);
1162 return -ENOMEM;
1163 }
1164
1165 dma_cookie_init(dchan);
1166
1167 if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
1168
1169
1170
1171 dma_ctrl_set(chan, XILINX_DMA_REG_DMACR,
1172 XILINX_DMA_DMAXR_ALL_IRQ_MASK);
1173 }
1174
1175 if ((chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) && chan->has_sg)
1176 dma_ctrl_set(chan, XILINX_DMA_REG_DMACR,
1177 XILINX_CDMA_CR_SGMODE);
1178
1179 return 0;
1180}
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190static int xilinx_dma_calc_copysize(struct xilinx_dma_chan *chan,
1191 int size, int done)
1192{
1193 size_t copy;
1194
1195 copy = min_t(size_t, size - done,
1196 chan->xdev->max_buffer_len);
1197
1198 if ((copy + done < size) &&
1199 chan->xdev->common.copy_align) {
1200
1201
1202
1203
1204 copy = rounddown(copy,
1205 (1 << chan->xdev->common.copy_align));
1206 }
1207 return copy;
1208}
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218static enum dma_status xilinx_dma_tx_status(struct dma_chan *dchan,
1219 dma_cookie_t cookie,
1220 struct dma_tx_state *txstate)
1221{
1222 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
1223 struct xilinx_dma_tx_descriptor *desc;
1224 enum dma_status ret;
1225 unsigned long flags;
1226 u32 residue = 0;
1227
1228 ret = dma_cookie_status(dchan, cookie, txstate);
1229 if (ret == DMA_COMPLETE || !txstate)
1230 return ret;
1231
1232 spin_lock_irqsave(&chan->lock, flags);
1233 if (!list_empty(&chan->active_list)) {
1234 desc = list_last_entry(&chan->active_list,
1235 struct xilinx_dma_tx_descriptor, node);
1236
1237
1238
1239
1240 if (chan->has_sg && chan->xdev->dma_config->dmatype != XDMA_TYPE_VDMA)
1241 residue = xilinx_dma_get_residue(chan, desc);
1242 }
1243 spin_unlock_irqrestore(&chan->lock, flags);
1244
1245 dma_set_residue(txstate, residue);
1246
1247 return ret;
1248}
1249
1250
1251
1252
1253
1254
1255
1256static int xilinx_dma_stop_transfer(struct xilinx_dma_chan *chan)
1257{
1258 u32 val;
1259
1260 dma_ctrl_clr(chan, XILINX_DMA_REG_DMACR, XILINX_DMA_DMACR_RUNSTOP);
1261
1262
1263 return xilinx_dma_poll_timeout(chan, XILINX_DMA_REG_DMASR, val,
1264 val & XILINX_DMA_DMASR_HALTED, 0,
1265 XILINX_DMA_LOOP_COUNT);
1266}
1267
1268
1269
1270
1271
1272
1273
1274static int xilinx_cdma_stop_transfer(struct xilinx_dma_chan *chan)
1275{
1276 u32 val;
1277
1278 return xilinx_dma_poll_timeout(chan, XILINX_DMA_REG_DMASR, val,
1279 val & XILINX_DMA_DMASR_IDLE, 0,
1280 XILINX_DMA_LOOP_COUNT);
1281}
1282
1283
1284
1285
1286
1287static void xilinx_dma_start(struct xilinx_dma_chan *chan)
1288{
1289 int err;
1290 u32 val;
1291
1292 dma_ctrl_set(chan, XILINX_DMA_REG_DMACR, XILINX_DMA_DMACR_RUNSTOP);
1293
1294
1295 err = xilinx_dma_poll_timeout(chan, XILINX_DMA_REG_DMASR, val,
1296 !(val & XILINX_DMA_DMASR_HALTED), 0,
1297 XILINX_DMA_LOOP_COUNT);
1298
1299 if (err) {
1300 dev_err(chan->dev, "Cannot start channel %p: %x\n",
1301 chan, dma_ctrl_read(chan, XILINX_DMA_REG_DMASR));
1302
1303 chan->err = true;
1304 }
1305}
1306
1307
1308
1309
1310
1311static void xilinx_vdma_start_transfer(struct xilinx_dma_chan *chan)
1312{
1313 struct xilinx_vdma_config *config = &chan->config;
1314 struct xilinx_dma_tx_descriptor *desc;
1315 u32 reg, j;
1316 struct xilinx_vdma_tx_segment *segment, *last = NULL;
1317 int i = 0;
1318
1319
1320 if (chan->err)
1321 return;
1322
1323 if (!chan->idle)
1324 return;
1325
1326 if (list_empty(&chan->pending_list))
1327 return;
1328
1329 desc = list_first_entry(&chan->pending_list,
1330 struct xilinx_dma_tx_descriptor, node);
1331
1332
1333 if (chan->has_vflip) {
1334 reg = dma_read(chan, XILINX_VDMA_REG_ENABLE_VERTICAL_FLIP);
1335 reg &= ~XILINX_VDMA_ENABLE_VERTICAL_FLIP;
1336 reg |= config->vflip_en;
1337 dma_write(chan, XILINX_VDMA_REG_ENABLE_VERTICAL_FLIP,
1338 reg);
1339 }
1340
1341 reg = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR);
1342
1343 if (config->frm_cnt_en)
1344 reg |= XILINX_DMA_DMACR_FRAMECNT_EN;
1345 else
1346 reg &= ~XILINX_DMA_DMACR_FRAMECNT_EN;
1347
1348
1349 if (config->park)
1350 reg &= ~XILINX_DMA_DMACR_CIRC_EN;
1351 else
1352 reg |= XILINX_DMA_DMACR_CIRC_EN;
1353
1354 dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg);
1355
1356 j = chan->desc_submitcount;
1357 reg = dma_read(chan, XILINX_DMA_REG_PARK_PTR);
1358 if (chan->direction == DMA_MEM_TO_DEV) {
1359 reg &= ~XILINX_DMA_PARK_PTR_RD_REF_MASK;
1360 reg |= j << XILINX_DMA_PARK_PTR_RD_REF_SHIFT;
1361 } else {
1362 reg &= ~XILINX_DMA_PARK_PTR_WR_REF_MASK;
1363 reg |= j << XILINX_DMA_PARK_PTR_WR_REF_SHIFT;
1364 }
1365 dma_write(chan, XILINX_DMA_REG_PARK_PTR, reg);
1366
1367
1368 xilinx_dma_start(chan);
1369
1370 if (chan->err)
1371 return;
1372
1373
1374 if (chan->desc_submitcount < chan->num_frms)
1375 i = chan->desc_submitcount;
1376
1377 list_for_each_entry(segment, &desc->segments, node) {
1378 if (chan->ext_addr)
1379 vdma_desc_write_64(chan,
1380 XILINX_VDMA_REG_START_ADDRESS_64(i++),
1381 segment->hw.buf_addr,
1382 segment->hw.buf_addr_msb);
1383 else
1384 vdma_desc_write(chan,
1385 XILINX_VDMA_REG_START_ADDRESS(i++),
1386 segment->hw.buf_addr);
1387
1388 last = segment;
1389 }
1390
1391 if (!last)
1392 return;
1393
1394
1395 vdma_desc_write(chan, XILINX_DMA_REG_HSIZE, last->hw.hsize);
1396 vdma_desc_write(chan, XILINX_DMA_REG_FRMDLY_STRIDE,
1397 last->hw.stride);
1398 vdma_desc_write(chan, XILINX_DMA_REG_VSIZE, last->hw.vsize);
1399
1400 chan->desc_submitcount++;
1401 chan->desc_pendingcount--;
1402 list_del(&desc->node);
1403 list_add_tail(&desc->node, &chan->active_list);
1404 if (chan->desc_submitcount == chan->num_frms)
1405 chan->desc_submitcount = 0;
1406
1407 chan->idle = false;
1408}
1409
1410
1411
1412
1413
1414static void xilinx_cdma_start_transfer(struct xilinx_dma_chan *chan)
1415{
1416 struct xilinx_dma_tx_descriptor *head_desc, *tail_desc;
1417 struct xilinx_cdma_tx_segment *tail_segment;
1418 u32 ctrl_reg = dma_read(chan, XILINX_DMA_REG_DMACR);
1419
1420 if (chan->err)
1421 return;
1422
1423 if (!chan->idle)
1424 return;
1425
1426 if (list_empty(&chan->pending_list))
1427 return;
1428
1429 head_desc = list_first_entry(&chan->pending_list,
1430 struct xilinx_dma_tx_descriptor, node);
1431 tail_desc = list_last_entry(&chan->pending_list,
1432 struct xilinx_dma_tx_descriptor, node);
1433 tail_segment = list_last_entry(&tail_desc->segments,
1434 struct xilinx_cdma_tx_segment, node);
1435
1436 if (chan->desc_pendingcount <= XILINX_DMA_COALESCE_MAX) {
1437 ctrl_reg &= ~XILINX_DMA_CR_COALESCE_MAX;
1438 ctrl_reg |= chan->desc_pendingcount <<
1439 XILINX_DMA_CR_COALESCE_SHIFT;
1440 dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, ctrl_reg);
1441 }
1442
1443 if (chan->has_sg) {
1444 dma_ctrl_clr(chan, XILINX_DMA_REG_DMACR,
1445 XILINX_CDMA_CR_SGMODE);
1446
1447 dma_ctrl_set(chan, XILINX_DMA_REG_DMACR,
1448 XILINX_CDMA_CR_SGMODE);
1449
1450 xilinx_write(chan, XILINX_DMA_REG_CURDESC,
1451 head_desc->async_tx.phys);
1452
1453
1454 xilinx_write(chan, XILINX_DMA_REG_TAILDESC,
1455 tail_segment->phys);
1456 } else {
1457
1458 struct xilinx_cdma_tx_segment *segment;
1459 struct xilinx_cdma_desc_hw *hw;
1460
1461 segment = list_first_entry(&head_desc->segments,
1462 struct xilinx_cdma_tx_segment,
1463 node);
1464
1465 hw = &segment->hw;
1466
1467 xilinx_write(chan, XILINX_CDMA_REG_SRCADDR,
1468 xilinx_prep_dma_addr_t(hw->src_addr));
1469 xilinx_write(chan, XILINX_CDMA_REG_DSTADDR,
1470 xilinx_prep_dma_addr_t(hw->dest_addr));
1471
1472
1473 dma_ctrl_write(chan, XILINX_DMA_REG_BTT,
1474 hw->control & chan->xdev->max_buffer_len);
1475 }
1476
1477 list_splice_tail_init(&chan->pending_list, &chan->active_list);
1478 chan->desc_pendingcount = 0;
1479 chan->idle = false;
1480}
1481
1482
1483
1484
1485
1486static void xilinx_dma_start_transfer(struct xilinx_dma_chan *chan)
1487{
1488 struct xilinx_dma_tx_descriptor *head_desc, *tail_desc;
1489 struct xilinx_axidma_tx_segment *tail_segment;
1490 u32 reg;
1491
1492 if (chan->err)
1493 return;
1494
1495 if (list_empty(&chan->pending_list))
1496 return;
1497
1498 if (!chan->idle)
1499 return;
1500
1501 head_desc = list_first_entry(&chan->pending_list,
1502 struct xilinx_dma_tx_descriptor, node);
1503 tail_desc = list_last_entry(&chan->pending_list,
1504 struct xilinx_dma_tx_descriptor, node);
1505 tail_segment = list_last_entry(&tail_desc->segments,
1506 struct xilinx_axidma_tx_segment, node);
1507
1508 reg = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR);
1509
1510 if (chan->desc_pendingcount <= XILINX_DMA_COALESCE_MAX) {
1511 reg &= ~XILINX_DMA_CR_COALESCE_MAX;
1512 reg |= chan->desc_pendingcount <<
1513 XILINX_DMA_CR_COALESCE_SHIFT;
1514 dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg);
1515 }
1516
1517 if (chan->has_sg)
1518 xilinx_write(chan, XILINX_DMA_REG_CURDESC,
1519 head_desc->async_tx.phys);
1520
1521 xilinx_dma_start(chan);
1522
1523 if (chan->err)
1524 return;
1525
1526
1527 if (chan->has_sg) {
1528 if (chan->cyclic)
1529 xilinx_write(chan, XILINX_DMA_REG_TAILDESC,
1530 chan->cyclic_seg_v->phys);
1531 else
1532 xilinx_write(chan, XILINX_DMA_REG_TAILDESC,
1533 tail_segment->phys);
1534 } else {
1535 struct xilinx_axidma_tx_segment *segment;
1536 struct xilinx_axidma_desc_hw *hw;
1537
1538 segment = list_first_entry(&head_desc->segments,
1539 struct xilinx_axidma_tx_segment,
1540 node);
1541 hw = &segment->hw;
1542
1543 xilinx_write(chan, XILINX_DMA_REG_SRCDSTADDR,
1544 xilinx_prep_dma_addr_t(hw->buf_addr));
1545
1546
1547 dma_ctrl_write(chan, XILINX_DMA_REG_BTT,
1548 hw->control & chan->xdev->max_buffer_len);
1549 }
1550
1551 list_splice_tail_init(&chan->pending_list, &chan->active_list);
1552 chan->desc_pendingcount = 0;
1553 chan->idle = false;
1554}
1555
1556
1557
1558
1559
1560static void xilinx_mcdma_start_transfer(struct xilinx_dma_chan *chan)
1561{
1562 struct xilinx_dma_tx_descriptor *head_desc, *tail_desc;
1563 struct xilinx_axidma_tx_segment *tail_segment;
1564 u32 reg;
1565
1566
1567
1568
1569
1570
1571 if (chan->err)
1572 return;
1573
1574 if (!chan->idle)
1575 return;
1576
1577 if (list_empty(&chan->pending_list))
1578 return;
1579
1580 head_desc = list_first_entry(&chan->pending_list,
1581 struct xilinx_dma_tx_descriptor, node);
1582 tail_desc = list_last_entry(&chan->pending_list,
1583 struct xilinx_dma_tx_descriptor, node);
1584 tail_segment = list_last_entry(&tail_desc->segments,
1585 struct xilinx_axidma_tx_segment, node);
1586
1587 reg = dma_ctrl_read(chan, XILINX_MCDMA_CHAN_CR_OFFSET(chan->tdest));
1588
1589 if (chan->desc_pendingcount <= XILINX_MCDMA_COALESCE_MAX) {
1590 reg &= ~XILINX_MCDMA_COALESCE_MASK;
1591 reg |= chan->desc_pendingcount <<
1592 XILINX_MCDMA_COALESCE_SHIFT;
1593 }
1594
1595 reg |= XILINX_MCDMA_IRQ_ALL_MASK;
1596 dma_ctrl_write(chan, XILINX_MCDMA_CHAN_CR_OFFSET(chan->tdest), reg);
1597
1598
1599 xilinx_write(chan, XILINX_MCDMA_CHAN_CDESC_OFFSET(chan->tdest),
1600 head_desc->async_tx.phys);
1601
1602
1603 reg = dma_ctrl_read(chan, XILINX_MCDMA_CHEN_OFFSET);
1604 reg |= BIT(chan->tdest);
1605 dma_ctrl_write(chan, XILINX_MCDMA_CHEN_OFFSET, reg);
1606
1607
1608 reg = dma_ctrl_read(chan, XILINX_MCDMA_CHAN_CR_OFFSET(chan->tdest));
1609 reg |= XILINX_MCDMA_CR_RUNSTOP_MASK;
1610 dma_ctrl_write(chan, XILINX_MCDMA_CHAN_CR_OFFSET(chan->tdest), reg);
1611
1612 xilinx_dma_start(chan);
1613
1614 if (chan->err)
1615 return;
1616
1617
1618 xilinx_write(chan, XILINX_MCDMA_CHAN_TDESC_OFFSET(chan->tdest),
1619 tail_segment->phys);
1620
1621 list_splice_tail_init(&chan->pending_list, &chan->active_list);
1622 chan->desc_pendingcount = 0;
1623 chan->idle = false;
1624}
1625
1626
1627
1628
1629
1630static void xilinx_dma_issue_pending(struct dma_chan *dchan)
1631{
1632 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
1633 unsigned long flags;
1634
1635 spin_lock_irqsave(&chan->lock, flags);
1636 chan->start_transfer(chan);
1637 spin_unlock_irqrestore(&chan->lock, flags);
1638}
1639
1640
1641
1642
1643
1644
1645
1646static void xilinx_dma_complete_descriptor(struct xilinx_dma_chan *chan)
1647{
1648 struct xilinx_dma_tx_descriptor *desc, *next;
1649
1650
1651 if (list_empty(&chan->active_list))
1652 return;
1653
1654 list_for_each_entry_safe(desc, next, &chan->active_list, node) {
1655 if (chan->has_sg && chan->xdev->dma_config->dmatype !=
1656 XDMA_TYPE_VDMA)
1657 desc->residue = xilinx_dma_get_residue(chan, desc);
1658 else
1659 desc->residue = 0;
1660 desc->err = chan->err;
1661
1662 list_del(&desc->node);
1663 if (!desc->cyclic)
1664 dma_cookie_complete(&desc->async_tx);
1665 list_add_tail(&desc->node, &chan->done_list);
1666 }
1667}
1668
1669
1670
1671
1672
1673
1674
1675static int xilinx_dma_reset(struct xilinx_dma_chan *chan)
1676{
1677 int err;
1678 u32 tmp;
1679
1680 dma_ctrl_set(chan, XILINX_DMA_REG_DMACR, XILINX_DMA_DMACR_RESET);
1681
1682
1683 err = xilinx_dma_poll_timeout(chan, XILINX_DMA_REG_DMACR, tmp,
1684 !(tmp & XILINX_DMA_DMACR_RESET), 0,
1685 XILINX_DMA_LOOP_COUNT);
1686
1687 if (err) {
1688 dev_err(chan->dev, "reset timeout, cr %x, sr %x\n",
1689 dma_ctrl_read(chan, XILINX_DMA_REG_DMACR),
1690 dma_ctrl_read(chan, XILINX_DMA_REG_DMASR));
1691 return -ETIMEDOUT;
1692 }
1693
1694 chan->err = false;
1695 chan->idle = true;
1696 chan->desc_pendingcount = 0;
1697 chan->desc_submitcount = 0;
1698
1699 return err;
1700}
1701
1702
1703
1704
1705
1706
1707
1708static int xilinx_dma_chan_reset(struct xilinx_dma_chan *chan)
1709{
1710 int err;
1711
1712
1713 err = xilinx_dma_reset(chan);
1714 if (err)
1715 return err;
1716
1717
1718 dma_ctrl_set(chan, XILINX_DMA_REG_DMACR,
1719 XILINX_DMA_DMAXR_ALL_IRQ_MASK);
1720
1721 return 0;
1722}
1723
1724
1725
1726
1727
1728
1729
1730
1731static irqreturn_t xilinx_mcdma_irq_handler(int irq, void *data)
1732{
1733 struct xilinx_dma_chan *chan = data;
1734 u32 status, ser_offset, chan_sermask, chan_offset = 0, chan_id;
1735
1736 if (chan->direction == DMA_DEV_TO_MEM)
1737 ser_offset = XILINX_MCDMA_RXINT_SER_OFFSET;
1738 else
1739 ser_offset = XILINX_MCDMA_TXINT_SER_OFFSET;
1740
1741
1742 chan_sermask = dma_ctrl_read(chan, ser_offset);
1743 chan_id = ffs(chan_sermask);
1744
1745 if (!chan_id)
1746 return IRQ_NONE;
1747
1748 if (chan->direction == DMA_DEV_TO_MEM)
1749 chan_offset = chan->xdev->dma_config->max_channels / 2;
1750
1751 chan_offset = chan_offset + (chan_id - 1);
1752 chan = chan->xdev->chan[chan_offset];
1753
1754 status = dma_ctrl_read(chan, XILINX_MCDMA_CHAN_SR_OFFSET(chan->tdest));
1755 if (!(status & XILINX_MCDMA_IRQ_ALL_MASK))
1756 return IRQ_NONE;
1757
1758 dma_ctrl_write(chan, XILINX_MCDMA_CHAN_SR_OFFSET(chan->tdest),
1759 status & XILINX_MCDMA_IRQ_ALL_MASK);
1760
1761 if (status & XILINX_MCDMA_IRQ_ERR_MASK) {
1762 dev_err(chan->dev, "Channel %p has errors %x cdr %x tdr %x\n",
1763 chan,
1764 dma_ctrl_read(chan, XILINX_MCDMA_CH_ERR_OFFSET),
1765 dma_ctrl_read(chan, XILINX_MCDMA_CHAN_CDESC_OFFSET
1766 (chan->tdest)),
1767 dma_ctrl_read(chan, XILINX_MCDMA_CHAN_TDESC_OFFSET
1768 (chan->tdest)));
1769 chan->err = true;
1770 }
1771
1772 if (status & XILINX_MCDMA_IRQ_DELAY_MASK) {
1773
1774
1775
1776
1777 dev_dbg(chan->dev, "Inter-packet latency too long\n");
1778 }
1779
1780 if (status & XILINX_MCDMA_IRQ_IOC_MASK) {
1781 spin_lock(&chan->lock);
1782 xilinx_dma_complete_descriptor(chan);
1783 chan->idle = true;
1784 chan->start_transfer(chan);
1785 spin_unlock(&chan->lock);
1786 }
1787
1788 tasklet_schedule(&chan->tasklet);
1789 return IRQ_HANDLED;
1790}
1791
1792
1793
1794
1795
1796
1797
1798
1799static irqreturn_t xilinx_dma_irq_handler(int irq, void *data)
1800{
1801 struct xilinx_dma_chan *chan = data;
1802 u32 status;
1803
1804
1805 status = dma_ctrl_read(chan, XILINX_DMA_REG_DMASR);
1806 if (!(status & XILINX_DMA_DMAXR_ALL_IRQ_MASK))
1807 return IRQ_NONE;
1808
1809 dma_ctrl_write(chan, XILINX_DMA_REG_DMASR,
1810 status & XILINX_DMA_DMAXR_ALL_IRQ_MASK);
1811
1812 if (status & XILINX_DMA_DMASR_ERR_IRQ) {
1813
1814
1815
1816
1817
1818
1819
1820 u32 errors = status & XILINX_DMA_DMASR_ALL_ERR_MASK;
1821
1822 dma_ctrl_write(chan, XILINX_DMA_REG_DMASR,
1823 errors & XILINX_DMA_DMASR_ERR_RECOVER_MASK);
1824
1825 if (!chan->flush_on_fsync ||
1826 (errors & ~XILINX_DMA_DMASR_ERR_RECOVER_MASK)) {
1827 dev_err(chan->dev,
1828 "Channel %p has errors %x, cdr %x tdr %x\n",
1829 chan, errors,
1830 dma_ctrl_read(chan, XILINX_DMA_REG_CURDESC),
1831 dma_ctrl_read(chan, XILINX_DMA_REG_TAILDESC));
1832 chan->err = true;
1833 }
1834 }
1835
1836 if (status & XILINX_DMA_DMASR_DLY_CNT_IRQ) {
1837
1838
1839
1840
1841 dev_dbg(chan->dev, "Inter-packet latency too long\n");
1842 }
1843
1844 if (status & XILINX_DMA_DMASR_FRM_CNT_IRQ) {
1845 spin_lock(&chan->lock);
1846 xilinx_dma_complete_descriptor(chan);
1847 chan->idle = true;
1848 chan->start_transfer(chan);
1849 spin_unlock(&chan->lock);
1850 }
1851
1852 tasklet_schedule(&chan->tasklet);
1853 return IRQ_HANDLED;
1854}
1855
1856
1857
1858
1859
1860
1861static void append_desc_queue(struct xilinx_dma_chan *chan,
1862 struct xilinx_dma_tx_descriptor *desc)
1863{
1864 struct xilinx_vdma_tx_segment *tail_segment;
1865 struct xilinx_dma_tx_descriptor *tail_desc;
1866 struct xilinx_axidma_tx_segment *axidma_tail_segment;
1867 struct xilinx_cdma_tx_segment *cdma_tail_segment;
1868
1869 if (list_empty(&chan->pending_list))
1870 goto append;
1871
1872
1873
1874
1875
1876 tail_desc = list_last_entry(&chan->pending_list,
1877 struct xilinx_dma_tx_descriptor, node);
1878 if (chan->xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
1879 tail_segment = list_last_entry(&tail_desc->segments,
1880 struct xilinx_vdma_tx_segment,
1881 node);
1882 tail_segment->hw.next_desc = (u32)desc->async_tx.phys;
1883 } else if (chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) {
1884 cdma_tail_segment = list_last_entry(&tail_desc->segments,
1885 struct xilinx_cdma_tx_segment,
1886 node);
1887 cdma_tail_segment->hw.next_desc = (u32)desc->async_tx.phys;
1888 } else {
1889 axidma_tail_segment = list_last_entry(&tail_desc->segments,
1890 struct xilinx_axidma_tx_segment,
1891 node);
1892 axidma_tail_segment->hw.next_desc = (u32)desc->async_tx.phys;
1893 }
1894
1895
1896
1897
1898
1899append:
1900 list_add_tail(&desc->node, &chan->pending_list);
1901 chan->desc_pendingcount++;
1902
1903 if (chan->has_sg && (chan->xdev->dma_config->dmatype == XDMA_TYPE_VDMA)
1904 && unlikely(chan->desc_pendingcount > chan->num_frms)) {
1905 dev_dbg(chan->dev, "desc pendingcount is too high\n");
1906 chan->desc_pendingcount = chan->num_frms;
1907 }
1908}
1909
1910
1911
1912
1913
1914
1915
1916static dma_cookie_t xilinx_dma_tx_submit(struct dma_async_tx_descriptor *tx)
1917{
1918 struct xilinx_dma_tx_descriptor *desc = to_dma_tx_descriptor(tx);
1919 struct xilinx_dma_chan *chan = to_xilinx_chan(tx->chan);
1920 dma_cookie_t cookie;
1921 unsigned long flags;
1922 int err;
1923
1924 if (chan->cyclic) {
1925 xilinx_dma_free_tx_descriptor(chan, desc);
1926 return -EBUSY;
1927 }
1928
1929 if (chan->err) {
1930
1931
1932
1933
1934 err = xilinx_dma_chan_reset(chan);
1935 if (err < 0)
1936 return err;
1937 }
1938
1939 spin_lock_irqsave(&chan->lock, flags);
1940
1941 cookie = dma_cookie_assign(tx);
1942
1943
1944 append_desc_queue(chan, desc);
1945
1946 if (desc->cyclic)
1947 chan->cyclic = true;
1948
1949 spin_unlock_irqrestore(&chan->lock, flags);
1950
1951 return cookie;
1952}
1953
1954
1955
1956
1957
1958
1959
1960
1961
1962
1963static struct dma_async_tx_descriptor *
1964xilinx_vdma_dma_prep_interleaved(struct dma_chan *dchan,
1965 struct dma_interleaved_template *xt,
1966 unsigned long flags)
1967{
1968 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
1969 struct xilinx_dma_tx_descriptor *desc;
1970 struct xilinx_vdma_tx_segment *segment;
1971 struct xilinx_vdma_desc_hw *hw;
1972
1973 if (!is_slave_direction(xt->dir))
1974 return NULL;
1975
1976 if (!xt->numf || !xt->sgl[0].size)
1977 return NULL;
1978
1979 if (xt->frame_size != 1)
1980 return NULL;
1981
1982
1983 desc = xilinx_dma_alloc_tx_descriptor(chan);
1984 if (!desc)
1985 return NULL;
1986
1987 dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
1988 desc->async_tx.tx_submit = xilinx_dma_tx_submit;
1989 async_tx_ack(&desc->async_tx);
1990
1991
1992 segment = xilinx_vdma_alloc_tx_segment(chan);
1993 if (!segment)
1994 goto error;
1995
1996
1997 hw = &segment->hw;
1998 hw->vsize = xt->numf;
1999 hw->hsize = xt->sgl[0].size;
2000 hw->stride = (xt->sgl[0].icg + xt->sgl[0].size) <<
2001 XILINX_DMA_FRMDLY_STRIDE_STRIDE_SHIFT;
2002 hw->stride |= chan->config.frm_dly <<
2003 XILINX_DMA_FRMDLY_STRIDE_FRMDLY_SHIFT;
2004
2005 if (xt->dir != DMA_MEM_TO_DEV) {
2006 if (chan->ext_addr) {
2007 hw->buf_addr = lower_32_bits(xt->dst_start);
2008 hw->buf_addr_msb = upper_32_bits(xt->dst_start);
2009 } else {
2010 hw->buf_addr = xt->dst_start;
2011 }
2012 } else {
2013 if (chan->ext_addr) {
2014 hw->buf_addr = lower_32_bits(xt->src_start);
2015 hw->buf_addr_msb = upper_32_bits(xt->src_start);
2016 } else {
2017 hw->buf_addr = xt->src_start;
2018 }
2019 }
2020
2021
2022 list_add_tail(&segment->node, &desc->segments);
2023
2024
2025 segment = list_first_entry(&desc->segments,
2026 struct xilinx_vdma_tx_segment, node);
2027 desc->async_tx.phys = segment->phys;
2028
2029 return &desc->async_tx;
2030
2031error:
2032 xilinx_dma_free_tx_descriptor(chan, desc);
2033 return NULL;
2034}
2035
2036
2037
2038
2039
2040
2041
2042
2043
2044
2045
2046static struct dma_async_tx_descriptor *
2047xilinx_cdma_prep_memcpy(struct dma_chan *dchan, dma_addr_t dma_dst,
2048 dma_addr_t dma_src, size_t len, unsigned long flags)
2049{
2050 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
2051 struct xilinx_dma_tx_descriptor *desc;
2052 struct xilinx_cdma_tx_segment *segment;
2053 struct xilinx_cdma_desc_hw *hw;
2054
2055 if (!len || len > chan->xdev->max_buffer_len)
2056 return NULL;
2057
2058 desc = xilinx_dma_alloc_tx_descriptor(chan);
2059 if (!desc)
2060 return NULL;
2061
2062 dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
2063 desc->async_tx.tx_submit = xilinx_dma_tx_submit;
2064
2065
2066 segment = xilinx_cdma_alloc_tx_segment(chan);
2067 if (!segment)
2068 goto error;
2069
2070 hw = &segment->hw;
2071 hw->control = len;
2072 hw->src_addr = dma_src;
2073 hw->dest_addr = dma_dst;
2074 if (chan->ext_addr) {
2075 hw->src_addr_msb = upper_32_bits(dma_src);
2076 hw->dest_addr_msb = upper_32_bits(dma_dst);
2077 }
2078
2079
2080 list_add_tail(&segment->node, &desc->segments);
2081
2082 desc->async_tx.phys = segment->phys;
2083 hw->next_desc = segment->phys;
2084
2085 return &desc->async_tx;
2086
2087error:
2088 xilinx_dma_free_tx_descriptor(chan, desc);
2089 return NULL;
2090}
2091
2092
2093
2094
2095
2096
2097
2098
2099
2100
2101
2102
2103static struct dma_async_tx_descriptor *xilinx_dma_prep_slave_sg(
2104 struct dma_chan *dchan, struct scatterlist *sgl, unsigned int sg_len,
2105 enum dma_transfer_direction direction, unsigned long flags,
2106 void *context)
2107{
2108 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
2109 struct xilinx_dma_tx_descriptor *desc;
2110 struct xilinx_axidma_tx_segment *segment = NULL;
2111 u32 *app_w = (u32 *)context;
2112 struct scatterlist *sg;
2113 size_t copy;
2114 size_t sg_used;
2115 unsigned int i;
2116
2117 if (!is_slave_direction(direction))
2118 return NULL;
2119
2120
2121 desc = xilinx_dma_alloc_tx_descriptor(chan);
2122 if (!desc)
2123 return NULL;
2124
2125 dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
2126 desc->async_tx.tx_submit = xilinx_dma_tx_submit;
2127
2128
2129 for_each_sg(sgl, sg, sg_len, i) {
2130 sg_used = 0;
2131
2132
2133 while (sg_used < sg_dma_len(sg)) {
2134 struct xilinx_axidma_desc_hw *hw;
2135
2136
2137 segment = xilinx_axidma_alloc_tx_segment(chan);
2138 if (!segment)
2139 goto error;
2140
2141
2142
2143
2144
2145 copy = xilinx_dma_calc_copysize(chan, sg_dma_len(sg),
2146 sg_used);
2147 hw = &segment->hw;
2148
2149
2150 xilinx_axidma_buf(chan, hw, sg_dma_address(sg),
2151 sg_used, 0);
2152
2153 hw->control = copy;
2154
2155 if (chan->direction == DMA_MEM_TO_DEV) {
2156 if (app_w)
2157 memcpy(hw->app, app_w, sizeof(u32) *
2158 XILINX_DMA_NUM_APP_WORDS);
2159 }
2160
2161 sg_used += copy;
2162
2163
2164
2165
2166
2167 list_add_tail(&segment->node, &desc->segments);
2168 }
2169 }
2170
2171 segment = list_first_entry(&desc->segments,
2172 struct xilinx_axidma_tx_segment, node);
2173 desc->async_tx.phys = segment->phys;
2174
2175
2176 if (chan->direction == DMA_MEM_TO_DEV) {
2177 segment->hw.control |= XILINX_DMA_BD_SOP;
2178 segment = list_last_entry(&desc->segments,
2179 struct xilinx_axidma_tx_segment,
2180 node);
2181 segment->hw.control |= XILINX_DMA_BD_EOP;
2182 }
2183
2184 return &desc->async_tx;
2185
2186error:
2187 xilinx_dma_free_tx_descriptor(chan, desc);
2188 return NULL;
2189}
2190
2191
2192
2193
2194
2195
2196
2197
2198
2199
2200
2201
2202static struct dma_async_tx_descriptor *xilinx_dma_prep_dma_cyclic(
2203 struct dma_chan *dchan, dma_addr_t buf_addr, size_t buf_len,
2204 size_t period_len, enum dma_transfer_direction direction,
2205 unsigned long flags)
2206{
2207 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
2208 struct xilinx_dma_tx_descriptor *desc;
2209 struct xilinx_axidma_tx_segment *segment, *head_segment, *prev = NULL;
2210 size_t copy, sg_used;
2211 unsigned int num_periods;
2212 int i;
2213 u32 reg;
2214
2215 if (!period_len)
2216 return NULL;
2217
2218 num_periods = buf_len / period_len;
2219
2220 if (!num_periods)
2221 return NULL;
2222
2223 if (!is_slave_direction(direction))
2224 return NULL;
2225
2226
2227 desc = xilinx_dma_alloc_tx_descriptor(chan);
2228 if (!desc)
2229 return NULL;
2230
2231 chan->direction = direction;
2232 dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
2233 desc->async_tx.tx_submit = xilinx_dma_tx_submit;
2234
2235 for (i = 0; i < num_periods; ++i) {
2236 sg_used = 0;
2237
2238 while (sg_used < period_len) {
2239 struct xilinx_axidma_desc_hw *hw;
2240
2241
2242 segment = xilinx_axidma_alloc_tx_segment(chan);
2243 if (!segment)
2244 goto error;
2245
2246
2247
2248
2249
2250 copy = xilinx_dma_calc_copysize(chan, period_len,
2251 sg_used);
2252 hw = &segment->hw;
2253 xilinx_axidma_buf(chan, hw, buf_addr, sg_used,
2254 period_len * i);
2255 hw->control = copy;
2256
2257 if (prev)
2258 prev->hw.next_desc = segment->phys;
2259
2260 prev = segment;
2261 sg_used += copy;
2262
2263
2264
2265
2266
2267 list_add_tail(&segment->node, &desc->segments);
2268 }
2269 }
2270
2271 head_segment = list_first_entry(&desc->segments,
2272 struct xilinx_axidma_tx_segment, node);
2273 desc->async_tx.phys = head_segment->phys;
2274
2275 desc->cyclic = true;
2276 reg = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR);
2277 reg |= XILINX_DMA_CR_CYCLIC_BD_EN_MASK;
2278 dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg);
2279
2280 segment = list_last_entry(&desc->segments,
2281 struct xilinx_axidma_tx_segment,
2282 node);
2283 segment->hw.next_desc = (u32) head_segment->phys;
2284
2285
2286 if (direction == DMA_MEM_TO_DEV) {
2287 head_segment->hw.control |= XILINX_DMA_BD_SOP;
2288 segment->hw.control |= XILINX_DMA_BD_EOP;
2289 }
2290
2291 return &desc->async_tx;
2292
2293error:
2294 xilinx_dma_free_tx_descriptor(chan, desc);
2295 return NULL;
2296}
2297
2298
2299
2300
2301
2302
2303
2304
2305
2306
2307
2308
2309static struct dma_async_tx_descriptor *
2310xilinx_mcdma_prep_slave_sg(struct dma_chan *dchan, struct scatterlist *sgl,
2311 unsigned int sg_len,
2312 enum dma_transfer_direction direction,
2313 unsigned long flags, void *context)
2314{
2315 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
2316 struct xilinx_dma_tx_descriptor *desc;
2317 struct xilinx_aximcdma_tx_segment *segment = NULL;
2318 u32 *app_w = (u32 *)context;
2319 struct scatterlist *sg;
2320 size_t copy;
2321 size_t sg_used;
2322 unsigned int i;
2323
2324 if (!is_slave_direction(direction))
2325 return NULL;
2326
2327
2328 desc = xilinx_dma_alloc_tx_descriptor(chan);
2329 if (!desc)
2330 return NULL;
2331
2332 dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
2333 desc->async_tx.tx_submit = xilinx_dma_tx_submit;
2334
2335
2336 for_each_sg(sgl, sg, sg_len, i) {
2337 sg_used = 0;
2338
2339
2340 while (sg_used < sg_dma_len(sg)) {
2341 struct xilinx_aximcdma_desc_hw *hw;
2342
2343
2344 segment = xilinx_aximcdma_alloc_tx_segment(chan);
2345 if (!segment)
2346 goto error;
2347
2348
2349
2350
2351
2352 copy = min_t(size_t, sg_dma_len(sg) - sg_used,
2353 chan->xdev->max_buffer_len);
2354 hw = &segment->hw;
2355
2356
2357 xilinx_aximcdma_buf(chan, hw, sg_dma_address(sg),
2358 sg_used);
2359 hw->control = copy;
2360
2361 if (chan->direction == DMA_MEM_TO_DEV && app_w) {
2362 memcpy(hw->app, app_w, sizeof(u32) *
2363 XILINX_DMA_NUM_APP_WORDS);
2364 }
2365
2366 sg_used += copy;
2367
2368
2369
2370
2371 list_add_tail(&segment->node, &desc->segments);
2372 }
2373 }
2374
2375 segment = list_first_entry(&desc->segments,
2376 struct xilinx_aximcdma_tx_segment, node);
2377 desc->async_tx.phys = segment->phys;
2378
2379
2380 if (chan->direction == DMA_MEM_TO_DEV) {
2381 segment->hw.control |= XILINX_MCDMA_BD_SOP;
2382 segment = list_last_entry(&desc->segments,
2383 struct xilinx_aximcdma_tx_segment,
2384 node);
2385 segment->hw.control |= XILINX_MCDMA_BD_EOP;
2386 }
2387
2388 return &desc->async_tx;
2389
2390error:
2391 xilinx_dma_free_tx_descriptor(chan, desc);
2392
2393 return NULL;
2394}
2395
2396
2397
2398
2399
2400
2401
2402static int xilinx_dma_terminate_all(struct dma_chan *dchan)
2403{
2404 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
2405 u32 reg;
2406 int err;
2407
2408 if (!chan->cyclic) {
2409 err = chan->stop_transfer(chan);
2410 if (err) {
2411 dev_err(chan->dev, "Cannot stop channel %p: %x\n",
2412 chan, dma_ctrl_read(chan,
2413 XILINX_DMA_REG_DMASR));
2414 chan->err = true;
2415 }
2416 }
2417
2418 xilinx_dma_chan_reset(chan);
2419
2420 xilinx_dma_free_descriptors(chan);
2421 chan->idle = true;
2422
2423 if (chan->cyclic) {
2424 reg = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR);
2425 reg &= ~XILINX_DMA_CR_CYCLIC_BD_EN_MASK;
2426 dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg);
2427 chan->cyclic = false;
2428 }
2429
2430 if ((chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) && chan->has_sg)
2431 dma_ctrl_clr(chan, XILINX_DMA_REG_DMACR,
2432 XILINX_CDMA_CR_SGMODE);
2433
2434 return 0;
2435}
2436
2437
2438
2439
2440
2441
2442
2443
2444
2445
2446
2447
2448
2449
2450int xilinx_vdma_channel_set_config(struct dma_chan *dchan,
2451 struct xilinx_vdma_config *cfg)
2452{
2453 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
2454 u32 dmacr;
2455
2456 if (cfg->reset)
2457 return xilinx_dma_chan_reset(chan);
2458
2459 dmacr = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR);
2460
2461 chan->config.frm_dly = cfg->frm_dly;
2462 chan->config.park = cfg->park;
2463
2464
2465 chan->config.gen_lock = cfg->gen_lock;
2466 chan->config.master = cfg->master;
2467
2468 dmacr &= ~XILINX_DMA_DMACR_GENLOCK_EN;
2469 if (cfg->gen_lock && chan->genlock) {
2470 dmacr |= XILINX_DMA_DMACR_GENLOCK_EN;
2471 dmacr &= ~XILINX_DMA_DMACR_MASTER_MASK;
2472 dmacr |= cfg->master << XILINX_DMA_DMACR_MASTER_SHIFT;
2473 }
2474
2475 chan->config.frm_cnt_en = cfg->frm_cnt_en;
2476 chan->config.vflip_en = cfg->vflip_en;
2477
2478 if (cfg->park)
2479 chan->config.park_frm = cfg->park_frm;
2480 else
2481 chan->config.park_frm = -1;
2482
2483 chan->config.coalesc = cfg->coalesc;
2484 chan->config.delay = cfg->delay;
2485
2486 if (cfg->coalesc <= XILINX_DMA_DMACR_FRAME_COUNT_MAX) {
2487 dmacr &= ~XILINX_DMA_DMACR_FRAME_COUNT_MASK;
2488 dmacr |= cfg->coalesc << XILINX_DMA_DMACR_FRAME_COUNT_SHIFT;
2489 chan->config.coalesc = cfg->coalesc;
2490 }
2491
2492 if (cfg->delay <= XILINX_DMA_DMACR_DELAY_MAX) {
2493 dmacr &= ~XILINX_DMA_DMACR_DELAY_MASK;
2494 dmacr |= cfg->delay << XILINX_DMA_DMACR_DELAY_SHIFT;
2495 chan->config.delay = cfg->delay;
2496 }
2497
2498
2499 dmacr &= ~XILINX_DMA_DMACR_FSYNCSRC_MASK;
2500 dmacr |= cfg->ext_fsync << XILINX_DMA_DMACR_FSYNCSRC_SHIFT;
2501
2502 dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, dmacr);
2503
2504 return 0;
2505}
2506EXPORT_SYMBOL(xilinx_vdma_channel_set_config);
2507
2508
2509
2510
2511
2512
2513
2514
2515
2516static void xilinx_dma_chan_remove(struct xilinx_dma_chan *chan)
2517{
2518
2519 dma_ctrl_clr(chan, XILINX_DMA_REG_DMACR,
2520 XILINX_DMA_DMAXR_ALL_IRQ_MASK);
2521
2522 if (chan->irq > 0)
2523 free_irq(chan->irq, chan);
2524
2525 tasklet_kill(&chan->tasklet);
2526
2527 list_del(&chan->common.device_node);
2528}
2529
2530static int axidma_clk_init(struct platform_device *pdev, struct clk **axi_clk,
2531 struct clk **tx_clk, struct clk **rx_clk,
2532 struct clk **sg_clk, struct clk **tmp_clk)
2533{
2534 int err;
2535
2536 *tmp_clk = NULL;
2537
2538 *axi_clk = devm_clk_get(&pdev->dev, "s_axi_lite_aclk");
2539 if (IS_ERR(*axi_clk)) {
2540 err = PTR_ERR(*axi_clk);
2541 if (err != -EPROBE_DEFER)
2542 dev_err(&pdev->dev, "failed to get axi_aclk (%d)\n",
2543 err);
2544 return err;
2545 }
2546
2547 *tx_clk = devm_clk_get(&pdev->dev, "m_axi_mm2s_aclk");
2548 if (IS_ERR(*tx_clk))
2549 *tx_clk = NULL;
2550
2551 *rx_clk = devm_clk_get(&pdev->dev, "m_axi_s2mm_aclk");
2552 if (IS_ERR(*rx_clk))
2553 *rx_clk = NULL;
2554
2555 *sg_clk = devm_clk_get(&pdev->dev, "m_axi_sg_aclk");
2556 if (IS_ERR(*sg_clk))
2557 *sg_clk = NULL;
2558
2559 err = clk_prepare_enable(*axi_clk);
2560 if (err) {
2561 dev_err(&pdev->dev, "failed to enable axi_clk (%d)\n", err);
2562 return err;
2563 }
2564
2565 err = clk_prepare_enable(*tx_clk);
2566 if (err) {
2567 dev_err(&pdev->dev, "failed to enable tx_clk (%d)\n", err);
2568 goto err_disable_axiclk;
2569 }
2570
2571 err = clk_prepare_enable(*rx_clk);
2572 if (err) {
2573 dev_err(&pdev->dev, "failed to enable rx_clk (%d)\n", err);
2574 goto err_disable_txclk;
2575 }
2576
2577 err = clk_prepare_enable(*sg_clk);
2578 if (err) {
2579 dev_err(&pdev->dev, "failed to enable sg_clk (%d)\n", err);
2580 goto err_disable_rxclk;
2581 }
2582
2583 return 0;
2584
2585err_disable_rxclk:
2586 clk_disable_unprepare(*rx_clk);
2587err_disable_txclk:
2588 clk_disable_unprepare(*tx_clk);
2589err_disable_axiclk:
2590 clk_disable_unprepare(*axi_clk);
2591
2592 return err;
2593}
2594
2595static int axicdma_clk_init(struct platform_device *pdev, struct clk **axi_clk,
2596 struct clk **dev_clk, struct clk **tmp_clk,
2597 struct clk **tmp1_clk, struct clk **tmp2_clk)
2598{
2599 int err;
2600
2601 *tmp_clk = NULL;
2602 *tmp1_clk = NULL;
2603 *tmp2_clk = NULL;
2604
2605 *axi_clk = devm_clk_get(&pdev->dev, "s_axi_lite_aclk");
2606 if (IS_ERR(*axi_clk)) {
2607 err = PTR_ERR(*axi_clk);
2608 if (err != -EPROBE_DEFER)
2609 dev_err(&pdev->dev, "failed to get axi_clk (%d)\n",
2610 err);
2611 return err;
2612 }
2613
2614 *dev_clk = devm_clk_get(&pdev->dev, "m_axi_aclk");
2615 if (IS_ERR(*dev_clk)) {
2616 err = PTR_ERR(*dev_clk);
2617 if (err != -EPROBE_DEFER)
2618 dev_err(&pdev->dev, "failed to get dev_clk (%d)\n",
2619 err);
2620 return err;
2621 }
2622
2623 err = clk_prepare_enable(*axi_clk);
2624 if (err) {
2625 dev_err(&pdev->dev, "failed to enable axi_clk (%d)\n", err);
2626 return err;
2627 }
2628
2629 err = clk_prepare_enable(*dev_clk);
2630 if (err) {
2631 dev_err(&pdev->dev, "failed to enable dev_clk (%d)\n", err);
2632 goto err_disable_axiclk;
2633 }
2634
2635 return 0;
2636
2637err_disable_axiclk:
2638 clk_disable_unprepare(*axi_clk);
2639
2640 return err;
2641}
2642
2643static int axivdma_clk_init(struct platform_device *pdev, struct clk **axi_clk,
2644 struct clk **tx_clk, struct clk **txs_clk,
2645 struct clk **rx_clk, struct clk **rxs_clk)
2646{
2647 int err;
2648
2649 *axi_clk = devm_clk_get(&pdev->dev, "s_axi_lite_aclk");
2650 if (IS_ERR(*axi_clk)) {
2651 err = PTR_ERR(*axi_clk);
2652 if (err != -EPROBE_DEFER)
2653 dev_err(&pdev->dev, "failed to get axi_aclk (%d)\n",
2654 err);
2655 return err;
2656 }
2657
2658 *tx_clk = devm_clk_get(&pdev->dev, "m_axi_mm2s_aclk");
2659 if (IS_ERR(*tx_clk))
2660 *tx_clk = NULL;
2661
2662 *txs_clk = devm_clk_get(&pdev->dev, "m_axis_mm2s_aclk");
2663 if (IS_ERR(*txs_clk))
2664 *txs_clk = NULL;
2665
2666 *rx_clk = devm_clk_get(&pdev->dev, "m_axi_s2mm_aclk");
2667 if (IS_ERR(*rx_clk))
2668 *rx_clk = NULL;
2669
2670 *rxs_clk = devm_clk_get(&pdev->dev, "s_axis_s2mm_aclk");
2671 if (IS_ERR(*rxs_clk))
2672 *rxs_clk = NULL;
2673
2674 err = clk_prepare_enable(*axi_clk);
2675 if (err) {
2676 dev_err(&pdev->dev, "failed to enable axi_clk (%d)\n",
2677 err);
2678 return err;
2679 }
2680
2681 err = clk_prepare_enable(*tx_clk);
2682 if (err) {
2683 dev_err(&pdev->dev, "failed to enable tx_clk (%d)\n", err);
2684 goto err_disable_axiclk;
2685 }
2686
2687 err = clk_prepare_enable(*txs_clk);
2688 if (err) {
2689 dev_err(&pdev->dev, "failed to enable txs_clk (%d)\n", err);
2690 goto err_disable_txclk;
2691 }
2692
2693 err = clk_prepare_enable(*rx_clk);
2694 if (err) {
2695 dev_err(&pdev->dev, "failed to enable rx_clk (%d)\n", err);
2696 goto err_disable_txsclk;
2697 }
2698
2699 err = clk_prepare_enable(*rxs_clk);
2700 if (err) {
2701 dev_err(&pdev->dev, "failed to enable rxs_clk (%d)\n", err);
2702 goto err_disable_rxclk;
2703 }
2704
2705 return 0;
2706
2707err_disable_rxclk:
2708 clk_disable_unprepare(*rx_clk);
2709err_disable_txsclk:
2710 clk_disable_unprepare(*txs_clk);
2711err_disable_txclk:
2712 clk_disable_unprepare(*tx_clk);
2713err_disable_axiclk:
2714 clk_disable_unprepare(*axi_clk);
2715
2716 return err;
2717}
2718
2719static void xdma_disable_allclks(struct xilinx_dma_device *xdev)
2720{
2721 clk_disable_unprepare(xdev->rxs_clk);
2722 clk_disable_unprepare(xdev->rx_clk);
2723 clk_disable_unprepare(xdev->txs_clk);
2724 clk_disable_unprepare(xdev->tx_clk);
2725 clk_disable_unprepare(xdev->axi_clk);
2726}
2727
2728
2729
2730
2731
2732
2733
2734
2735
2736
2737
2738static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev,
2739 struct device_node *node)
2740{
2741 struct xilinx_dma_chan *chan;
2742 bool has_dre = false;
2743 u32 value, width;
2744 int err;
2745
2746
2747 chan = devm_kzalloc(xdev->dev, sizeof(*chan), GFP_KERNEL);
2748 if (!chan)
2749 return -ENOMEM;
2750
2751 chan->dev = xdev->dev;
2752 chan->xdev = xdev;
2753 chan->desc_pendingcount = 0x0;
2754 chan->ext_addr = xdev->ext_addr;
2755
2756
2757
2758
2759
2760 chan->idle = true;
2761
2762 spin_lock_init(&chan->lock);
2763 INIT_LIST_HEAD(&chan->pending_list);
2764 INIT_LIST_HEAD(&chan->done_list);
2765 INIT_LIST_HEAD(&chan->active_list);
2766 INIT_LIST_HEAD(&chan->free_seg_list);
2767
2768
2769 has_dre = of_property_read_bool(node, "xlnx,include-dre");
2770
2771 chan->genlock = of_property_read_bool(node, "xlnx,genlock-mode");
2772
2773 err = of_property_read_u32(node, "xlnx,datawidth", &value);
2774 if (err) {
2775 dev_err(xdev->dev, "missing xlnx,datawidth property\n");
2776 return err;
2777 }
2778 width = value >> 3;
2779
2780
2781 if (width > 8)
2782 has_dre = false;
2783
2784 if (!has_dre)
2785 xdev->common.copy_align = fls(width - 1);
2786
2787 if (of_device_is_compatible(node, "xlnx,axi-vdma-mm2s-channel") ||
2788 of_device_is_compatible(node, "xlnx,axi-dma-mm2s-channel") ||
2789 of_device_is_compatible(node, "xlnx,axi-cdma-channel")) {
2790 chan->direction = DMA_MEM_TO_DEV;
2791 chan->id = xdev->mm2s_chan_id++;
2792 chan->tdest = chan->id;
2793
2794 chan->ctrl_offset = XILINX_DMA_MM2S_CTRL_OFFSET;
2795 if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
2796 chan->desc_offset = XILINX_VDMA_MM2S_DESC_OFFSET;
2797 chan->config.park = 1;
2798
2799 if (xdev->flush_on_fsync == XILINX_DMA_FLUSH_BOTH ||
2800 xdev->flush_on_fsync == XILINX_DMA_FLUSH_MM2S)
2801 chan->flush_on_fsync = true;
2802 }
2803 } else if (of_device_is_compatible(node,
2804 "xlnx,axi-vdma-s2mm-channel") ||
2805 of_device_is_compatible(node,
2806 "xlnx,axi-dma-s2mm-channel")) {
2807 chan->direction = DMA_DEV_TO_MEM;
2808 chan->id = xdev->s2mm_chan_id++;
2809 chan->tdest = chan->id - xdev->dma_config->max_channels / 2;
2810 chan->has_vflip = of_property_read_bool(node,
2811 "xlnx,enable-vert-flip");
2812 if (chan->has_vflip) {
2813 chan->config.vflip_en = dma_read(chan,
2814 XILINX_VDMA_REG_ENABLE_VERTICAL_FLIP) &
2815 XILINX_VDMA_ENABLE_VERTICAL_FLIP;
2816 }
2817
2818 if (xdev->dma_config->dmatype == XDMA_TYPE_AXIMCDMA)
2819 chan->ctrl_offset = XILINX_MCDMA_S2MM_CTRL_OFFSET;
2820 else
2821 chan->ctrl_offset = XILINX_DMA_S2MM_CTRL_OFFSET;
2822
2823 if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
2824 chan->desc_offset = XILINX_VDMA_S2MM_DESC_OFFSET;
2825 chan->config.park = 1;
2826
2827 if (xdev->flush_on_fsync == XILINX_DMA_FLUSH_BOTH ||
2828 xdev->flush_on_fsync == XILINX_DMA_FLUSH_S2MM)
2829 chan->flush_on_fsync = true;
2830 }
2831 } else {
2832 dev_err(xdev->dev, "Invalid channel compatible node\n");
2833 return -EINVAL;
2834 }
2835
2836
2837 chan->irq = irq_of_parse_and_map(node, chan->tdest);
2838 err = request_irq(chan->irq, xdev->dma_config->irq_handler,
2839 IRQF_SHARED, "xilinx-dma-controller", chan);
2840 if (err) {
2841 dev_err(xdev->dev, "unable to request IRQ %d\n", chan->irq);
2842 return err;
2843 }
2844
2845 if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
2846 chan->start_transfer = xilinx_dma_start_transfer;
2847 chan->stop_transfer = xilinx_dma_stop_transfer;
2848 } else if (xdev->dma_config->dmatype == XDMA_TYPE_AXIMCDMA) {
2849 chan->start_transfer = xilinx_mcdma_start_transfer;
2850 chan->stop_transfer = xilinx_dma_stop_transfer;
2851 } else if (xdev->dma_config->dmatype == XDMA_TYPE_CDMA) {
2852 chan->start_transfer = xilinx_cdma_start_transfer;
2853 chan->stop_transfer = xilinx_cdma_stop_transfer;
2854 } else {
2855 chan->start_transfer = xilinx_vdma_start_transfer;
2856 chan->stop_transfer = xilinx_dma_stop_transfer;
2857 }
2858
2859
2860 if (xdev->dma_config->dmatype != XDMA_TYPE_VDMA) {
2861 if (dma_ctrl_read(chan, XILINX_DMA_REG_DMASR) &
2862 XILINX_DMA_DMASR_SG_MASK)
2863 chan->has_sg = true;
2864 dev_dbg(chan->dev, "ch %d: SG %s\n", chan->id,
2865 chan->has_sg ? "enabled" : "disabled");
2866 }
2867
2868
2869 tasklet_init(&chan->tasklet, xilinx_dma_do_tasklet,
2870 (unsigned long)chan);
2871
2872
2873
2874
2875
2876 chan->common.device = &xdev->common;
2877
2878 list_add_tail(&chan->common.device_node, &xdev->common.channels);
2879 xdev->chan[chan->id] = chan;
2880
2881
2882 err = xilinx_dma_chan_reset(chan);
2883 if (err < 0) {
2884 dev_err(xdev->dev, "Reset channel failed\n");
2885 return err;
2886 }
2887
2888 return 0;
2889}
2890
2891
2892
2893
2894
2895
2896
2897
2898
2899
2900
2901static int xilinx_dma_child_probe(struct xilinx_dma_device *xdev,
2902 struct device_node *node)
2903{
2904 int ret, i, nr_channels = 1;
2905
2906 ret = of_property_read_u32(node, "dma-channels", &nr_channels);
2907 if (xdev->dma_config->dmatype == XDMA_TYPE_AXIMCDMA && ret < 0)
2908 dev_warn(xdev->dev, "missing dma-channels property\n");
2909
2910 for (i = 0; i < nr_channels; i++)
2911 xilinx_dma_chan_probe(xdev, node);
2912
2913 return 0;
2914}
2915
2916
2917
2918
2919
2920
2921
2922
2923static struct dma_chan *of_dma_xilinx_xlate(struct of_phandle_args *dma_spec,
2924 struct of_dma *ofdma)
2925{
2926 struct xilinx_dma_device *xdev = ofdma->of_dma_data;
2927 int chan_id = dma_spec->args[0];
2928
2929 if (chan_id >= xdev->dma_config->max_channels || !xdev->chan[chan_id])
2930 return NULL;
2931
2932 return dma_get_slave_channel(&xdev->chan[chan_id]->common);
2933}
2934
2935static const struct xilinx_dma_config axidma_config = {
2936 .dmatype = XDMA_TYPE_AXIDMA,
2937 .clk_init = axidma_clk_init,
2938 .irq_handler = xilinx_dma_irq_handler,
2939 .max_channels = XILINX_DMA_MAX_CHANS_PER_DEVICE,
2940};
2941
2942static const struct xilinx_dma_config aximcdma_config = {
2943 .dmatype = XDMA_TYPE_AXIMCDMA,
2944 .clk_init = axidma_clk_init,
2945 .irq_handler = xilinx_mcdma_irq_handler,
2946 .max_channels = XILINX_MCDMA_MAX_CHANS_PER_DEVICE,
2947};
2948static const struct xilinx_dma_config axicdma_config = {
2949 .dmatype = XDMA_TYPE_CDMA,
2950 .clk_init = axicdma_clk_init,
2951 .irq_handler = xilinx_dma_irq_handler,
2952 .max_channels = XILINX_CDMA_MAX_CHANS_PER_DEVICE,
2953};
2954
2955static const struct xilinx_dma_config axivdma_config = {
2956 .dmatype = XDMA_TYPE_VDMA,
2957 .clk_init = axivdma_clk_init,
2958 .irq_handler = xilinx_dma_irq_handler,
2959 .max_channels = XILINX_DMA_MAX_CHANS_PER_DEVICE,
2960};
2961
2962static const struct of_device_id xilinx_dma_of_ids[] = {
2963 { .compatible = "xlnx,axi-dma-1.00.a", .data = &axidma_config },
2964 { .compatible = "xlnx,axi-cdma-1.00.a", .data = &axicdma_config },
2965 { .compatible = "xlnx,axi-vdma-1.00.a", .data = &axivdma_config },
2966 { .compatible = "xlnx,axi-mcdma-1.00.a", .data = &aximcdma_config },
2967 {}
2968};
2969MODULE_DEVICE_TABLE(of, xilinx_dma_of_ids);
2970
2971
2972
2973
2974
2975
2976
2977static int xilinx_dma_probe(struct platform_device *pdev)
2978{
2979 int (*clk_init)(struct platform_device *, struct clk **, struct clk **,
2980 struct clk **, struct clk **, struct clk **)
2981 = axivdma_clk_init;
2982 struct device_node *node = pdev->dev.of_node;
2983 struct xilinx_dma_device *xdev;
2984 struct device_node *child, *np = pdev->dev.of_node;
2985 u32 num_frames, addr_width, len_width;
2986 int i, err;
2987
2988
2989 xdev = devm_kzalloc(&pdev->dev, sizeof(*xdev), GFP_KERNEL);
2990 if (!xdev)
2991 return -ENOMEM;
2992
2993 xdev->dev = &pdev->dev;
2994 if (np) {
2995 const struct of_device_id *match;
2996
2997 match = of_match_node(xilinx_dma_of_ids, np);
2998 if (match && match->data) {
2999 xdev->dma_config = match->data;
3000 clk_init = xdev->dma_config->clk_init;
3001 }
3002 }
3003
3004 err = clk_init(pdev, &xdev->axi_clk, &xdev->tx_clk, &xdev->txs_clk,
3005 &xdev->rx_clk, &xdev->rxs_clk);
3006 if (err)
3007 return err;
3008
3009
3010 xdev->regs = devm_platform_ioremap_resource(pdev, 0);
3011 if (IS_ERR(xdev->regs))
3012 return PTR_ERR(xdev->regs);
3013
3014
3015 xdev->max_buffer_len = GENMASK(XILINX_DMA_MAX_TRANS_LEN_MAX - 1, 0);
3016 xdev->s2mm_chan_id = xdev->dma_config->max_channels / 2;
3017
3018 if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA ||
3019 xdev->dma_config->dmatype == XDMA_TYPE_AXIMCDMA) {
3020 if (!of_property_read_u32(node, "xlnx,sg-length-width",
3021 &len_width)) {
3022 if (len_width < XILINX_DMA_MAX_TRANS_LEN_MIN ||
3023 len_width > XILINX_DMA_V2_MAX_TRANS_LEN_MAX) {
3024 dev_warn(xdev->dev,
3025 "invalid xlnx,sg-length-width property value. Using default width\n");
3026 } else {
3027 if (len_width > XILINX_DMA_MAX_TRANS_LEN_MAX)
3028 dev_warn(xdev->dev, "Please ensure that IP supports buffer length > 23 bits\n");
3029 xdev->max_buffer_len =
3030 GENMASK(len_width - 1, 0);
3031 }
3032 }
3033 }
3034
3035 if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
3036 err = of_property_read_u32(node, "xlnx,num-fstores",
3037 &num_frames);
3038 if (err < 0) {
3039 dev_err(xdev->dev,
3040 "missing xlnx,num-fstores property\n");
3041 return err;
3042 }
3043
3044 err = of_property_read_u32(node, "xlnx,flush-fsync",
3045 &xdev->flush_on_fsync);
3046 if (err < 0)
3047 dev_warn(xdev->dev,
3048 "missing xlnx,flush-fsync property\n");
3049 }
3050
3051 err = of_property_read_u32(node, "xlnx,addrwidth", &addr_width);
3052 if (err < 0)
3053 dev_warn(xdev->dev, "missing xlnx,addrwidth property\n");
3054
3055 if (addr_width > 32)
3056 xdev->ext_addr = true;
3057 else
3058 xdev->ext_addr = false;
3059
3060
3061 dma_set_mask(xdev->dev, DMA_BIT_MASK(addr_width));
3062
3063
3064 xdev->common.dev = &pdev->dev;
3065
3066 INIT_LIST_HEAD(&xdev->common.channels);
3067 if (!(xdev->dma_config->dmatype == XDMA_TYPE_CDMA)) {
3068 dma_cap_set(DMA_SLAVE, xdev->common.cap_mask);
3069 dma_cap_set(DMA_PRIVATE, xdev->common.cap_mask);
3070 }
3071
3072 xdev->common.device_alloc_chan_resources =
3073 xilinx_dma_alloc_chan_resources;
3074 xdev->common.device_free_chan_resources =
3075 xilinx_dma_free_chan_resources;
3076 xdev->common.device_terminate_all = xilinx_dma_terminate_all;
3077 xdev->common.device_tx_status = xilinx_dma_tx_status;
3078 xdev->common.device_issue_pending = xilinx_dma_issue_pending;
3079 if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
3080 dma_cap_set(DMA_CYCLIC, xdev->common.cap_mask);
3081 xdev->common.device_prep_slave_sg = xilinx_dma_prep_slave_sg;
3082 xdev->common.device_prep_dma_cyclic =
3083 xilinx_dma_prep_dma_cyclic;
3084
3085 xdev->common.residue_granularity =
3086 DMA_RESIDUE_GRANULARITY_SEGMENT;
3087 } else if (xdev->dma_config->dmatype == XDMA_TYPE_CDMA) {
3088 dma_cap_set(DMA_MEMCPY, xdev->common.cap_mask);
3089 xdev->common.device_prep_dma_memcpy = xilinx_cdma_prep_memcpy;
3090
3091 xdev->common.residue_granularity =
3092 DMA_RESIDUE_GRANULARITY_SEGMENT;
3093 } else if (xdev->dma_config->dmatype == XDMA_TYPE_AXIMCDMA) {
3094 xdev->common.device_prep_slave_sg = xilinx_mcdma_prep_slave_sg;
3095 } else {
3096 xdev->common.device_prep_interleaved_dma =
3097 xilinx_vdma_dma_prep_interleaved;
3098 }
3099
3100 platform_set_drvdata(pdev, xdev);
3101
3102
3103 for_each_child_of_node(node, child) {
3104 err = xilinx_dma_child_probe(xdev, child);
3105 if (err < 0)
3106 goto disable_clks;
3107 }
3108
3109 if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
3110 for (i = 0; i < xdev->dma_config->max_channels; i++)
3111 if (xdev->chan[i])
3112 xdev->chan[i]->num_frms = num_frames;
3113 }
3114
3115
3116 dma_async_device_register(&xdev->common);
3117
3118 err = of_dma_controller_register(node, of_dma_xilinx_xlate,
3119 xdev);
3120 if (err < 0) {
3121 dev_err(&pdev->dev, "Unable to register DMA to DT\n");
3122 dma_async_device_unregister(&xdev->common);
3123 goto error;
3124 }
3125
3126 if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA)
3127 dev_info(&pdev->dev, "Xilinx AXI DMA Engine Driver Probed!!\n");
3128 else if (xdev->dma_config->dmatype == XDMA_TYPE_CDMA)
3129 dev_info(&pdev->dev, "Xilinx AXI CDMA Engine Driver Probed!!\n");
3130 else if (xdev->dma_config->dmatype == XDMA_TYPE_AXIMCDMA)
3131 dev_info(&pdev->dev, "Xilinx AXI MCDMA Engine Driver Probed!!\n");
3132 else
3133 dev_info(&pdev->dev, "Xilinx AXI VDMA Engine Driver Probed!!\n");
3134
3135 return 0;
3136
3137disable_clks:
3138 xdma_disable_allclks(xdev);
3139error:
3140 for (i = 0; i < xdev->dma_config->max_channels; i++)
3141 if (xdev->chan[i])
3142 xilinx_dma_chan_remove(xdev->chan[i]);
3143
3144 return err;
3145}
3146
3147
3148
3149
3150
3151
3152
3153static int xilinx_dma_remove(struct platform_device *pdev)
3154{
3155 struct xilinx_dma_device *xdev = platform_get_drvdata(pdev);
3156 int i;
3157
3158 of_dma_controller_free(pdev->dev.of_node);
3159
3160 dma_async_device_unregister(&xdev->common);
3161
3162 for (i = 0; i < xdev->dma_config->max_channels; i++)
3163 if (xdev->chan[i])
3164 xilinx_dma_chan_remove(xdev->chan[i]);
3165
3166 xdma_disable_allclks(xdev);
3167
3168 return 0;
3169}
3170
3171static struct platform_driver xilinx_vdma_driver = {
3172 .driver = {
3173 .name = "xilinx-vdma",
3174 .of_match_table = xilinx_dma_of_ids,
3175 },
3176 .probe = xilinx_dma_probe,
3177 .remove = xilinx_dma_remove,
3178};
3179
3180module_platform_driver(xilinx_vdma_driver);
3181
3182MODULE_AUTHOR("Xilinx, Inc.");
3183MODULE_DESCRIPTION("Xilinx VDMA driver");
3184MODULE_LICENSE("GPL v2");
3185