1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36#include <linux/bitops.h>
37#include <linux/dmapool.h>
38#include <linux/dma/xilinx_dma.h>
39#include <linux/init.h>
40#include <linux/interrupt.h>
41#include <linux/io.h>
42#include <linux/iopoll.h>
43#include <linux/module.h>
44#include <linux/of_address.h>
45#include <linux/of_dma.h>
46#include <linux/of_platform.h>
47#include <linux/of_irq.h>
48#include <linux/slab.h>
49#include <linux/clk.h>
50#include <linux/io-64-nonatomic-lo-hi.h>
51
52#include "../dmaengine.h"
53
54
55#define XILINX_DMA_MM2S_CTRL_OFFSET 0x0000
56#define XILINX_DMA_S2MM_CTRL_OFFSET 0x0030
57#define XILINX_VDMA_MM2S_DESC_OFFSET 0x0050
58#define XILINX_VDMA_S2MM_DESC_OFFSET 0x00a0
59
60
61#define XILINX_DMA_REG_DMACR 0x0000
62#define XILINX_DMA_DMACR_DELAY_MAX 0xff
63#define XILINX_DMA_DMACR_DELAY_SHIFT 24
64#define XILINX_DMA_DMACR_FRAME_COUNT_MAX 0xff
65#define XILINX_DMA_DMACR_FRAME_COUNT_SHIFT 16
66#define XILINX_DMA_DMACR_ERR_IRQ BIT(14)
67#define XILINX_DMA_DMACR_DLY_CNT_IRQ BIT(13)
68#define XILINX_DMA_DMACR_FRM_CNT_IRQ BIT(12)
69#define XILINX_DMA_DMACR_MASTER_SHIFT 8
70#define XILINX_DMA_DMACR_FSYNCSRC_SHIFT 5
71#define XILINX_DMA_DMACR_FRAMECNT_EN BIT(4)
72#define XILINX_DMA_DMACR_GENLOCK_EN BIT(3)
73#define XILINX_DMA_DMACR_RESET BIT(2)
74#define XILINX_DMA_DMACR_CIRC_EN BIT(1)
75#define XILINX_DMA_DMACR_RUNSTOP BIT(0)
76#define XILINX_DMA_DMACR_FSYNCSRC_MASK GENMASK(6, 5)
77#define XILINX_DMA_DMACR_DELAY_MASK GENMASK(31, 24)
78#define XILINX_DMA_DMACR_FRAME_COUNT_MASK GENMASK(23, 16)
79#define XILINX_DMA_DMACR_MASTER_MASK GENMASK(11, 8)
80
81#define XILINX_DMA_REG_DMASR 0x0004
82#define XILINX_DMA_DMASR_EOL_LATE_ERR BIT(15)
83#define XILINX_DMA_DMASR_ERR_IRQ BIT(14)
84#define XILINX_DMA_DMASR_DLY_CNT_IRQ BIT(13)
85#define XILINX_DMA_DMASR_FRM_CNT_IRQ BIT(12)
86#define XILINX_DMA_DMASR_SOF_LATE_ERR BIT(11)
87#define XILINX_DMA_DMASR_SG_DEC_ERR BIT(10)
88#define XILINX_DMA_DMASR_SG_SLV_ERR BIT(9)
89#define XILINX_DMA_DMASR_EOF_EARLY_ERR BIT(8)
90#define XILINX_DMA_DMASR_SOF_EARLY_ERR BIT(7)
91#define XILINX_DMA_DMASR_DMA_DEC_ERR BIT(6)
92#define XILINX_DMA_DMASR_DMA_SLAVE_ERR BIT(5)
93#define XILINX_DMA_DMASR_DMA_INT_ERR BIT(4)
94#define XILINX_DMA_DMASR_SG_MASK BIT(3)
95#define XILINX_DMA_DMASR_IDLE BIT(1)
96#define XILINX_DMA_DMASR_HALTED BIT(0)
97#define XILINX_DMA_DMASR_DELAY_MASK GENMASK(31, 24)
98#define XILINX_DMA_DMASR_FRAME_COUNT_MASK GENMASK(23, 16)
99
100#define XILINX_DMA_REG_CURDESC 0x0008
101#define XILINX_DMA_REG_TAILDESC 0x0010
102#define XILINX_DMA_REG_REG_INDEX 0x0014
103#define XILINX_DMA_REG_FRMSTORE 0x0018
104#define XILINX_DMA_REG_THRESHOLD 0x001c
105#define XILINX_DMA_REG_FRMPTR_STS 0x0024
106#define XILINX_DMA_REG_PARK_PTR 0x0028
107#define XILINX_DMA_PARK_PTR_WR_REF_SHIFT 8
108#define XILINX_DMA_PARK_PTR_WR_REF_MASK GENMASK(12, 8)
109#define XILINX_DMA_PARK_PTR_RD_REF_SHIFT 0
110#define XILINX_DMA_PARK_PTR_RD_REF_MASK GENMASK(4, 0)
111#define XILINX_DMA_REG_VDMA_VERSION 0x002c
112
113
114#define XILINX_DMA_REG_VSIZE 0x0000
115#define XILINX_DMA_REG_HSIZE 0x0004
116
117#define XILINX_DMA_REG_FRMDLY_STRIDE 0x0008
118#define XILINX_DMA_FRMDLY_STRIDE_FRMDLY_SHIFT 24
119#define XILINX_DMA_FRMDLY_STRIDE_STRIDE_SHIFT 0
120
121#define XILINX_VDMA_REG_START_ADDRESS(n) (0x000c + 4 * (n))
122#define XILINX_VDMA_REG_START_ADDRESS_64(n) (0x000c + 8 * (n))
123
124#define XILINX_VDMA_REG_ENABLE_VERTICAL_FLIP 0x00ec
125#define XILINX_VDMA_ENABLE_VERTICAL_FLIP BIT(0)
126
127
128#define XILINX_MCDMA_MAX_CHANS_PER_DEVICE 0x20
129#define XILINX_DMA_MAX_CHANS_PER_DEVICE 0x2
130#define XILINX_CDMA_MAX_CHANS_PER_DEVICE 0x1
131
132#define XILINX_DMA_DMAXR_ALL_IRQ_MASK \
133 (XILINX_DMA_DMASR_FRM_CNT_IRQ | \
134 XILINX_DMA_DMASR_DLY_CNT_IRQ | \
135 XILINX_DMA_DMASR_ERR_IRQ)
136
137#define XILINX_DMA_DMASR_ALL_ERR_MASK \
138 (XILINX_DMA_DMASR_EOL_LATE_ERR | \
139 XILINX_DMA_DMASR_SOF_LATE_ERR | \
140 XILINX_DMA_DMASR_SG_DEC_ERR | \
141 XILINX_DMA_DMASR_SG_SLV_ERR | \
142 XILINX_DMA_DMASR_EOF_EARLY_ERR | \
143 XILINX_DMA_DMASR_SOF_EARLY_ERR | \
144 XILINX_DMA_DMASR_DMA_DEC_ERR | \
145 XILINX_DMA_DMASR_DMA_SLAVE_ERR | \
146 XILINX_DMA_DMASR_DMA_INT_ERR)
147
148
149
150
151
152
153#define XILINX_DMA_DMASR_ERR_RECOVER_MASK \
154 (XILINX_DMA_DMASR_SOF_LATE_ERR | \
155 XILINX_DMA_DMASR_EOF_EARLY_ERR | \
156 XILINX_DMA_DMASR_SOF_EARLY_ERR | \
157 XILINX_DMA_DMASR_DMA_INT_ERR)
158
159
160#define XILINX_DMA_FLUSH_S2MM 3
161#define XILINX_DMA_FLUSH_MM2S 2
162#define XILINX_DMA_FLUSH_BOTH 1
163
164
165#define XILINX_DMA_LOOP_COUNT 1000000
166
167
168#define XILINX_DMA_REG_SRCDSTADDR 0x18
169#define XILINX_DMA_REG_BTT 0x28
170
171
172#define XILINX_DMA_MAX_TRANS_LEN_MIN 8
173#define XILINX_DMA_MAX_TRANS_LEN_MAX 23
174#define XILINX_DMA_V2_MAX_TRANS_LEN_MAX 26
175#define XILINX_DMA_CR_COALESCE_MAX GENMASK(23, 16)
176#define XILINX_DMA_CR_CYCLIC_BD_EN_MASK BIT(4)
177#define XILINX_DMA_CR_COALESCE_SHIFT 16
178#define XILINX_DMA_BD_SOP BIT(27)
179#define XILINX_DMA_BD_EOP BIT(26)
180#define XILINX_DMA_COALESCE_MAX 255
181#define XILINX_DMA_NUM_DESCS 255
182#define XILINX_DMA_NUM_APP_WORDS 5
183
184
185#define XILINX_CDMA_REG_SRCADDR 0x18
186#define XILINX_CDMA_REG_DSTADDR 0x20
187
188
189#define XILINX_CDMA_CR_SGMODE BIT(3)
190
191#define xilinx_prep_dma_addr_t(addr) \
192 ((dma_addr_t)((u64)addr##_##msb << 32 | (addr)))
193
194
195#define XILINX_MCDMA_MM2S_CTRL_OFFSET 0x0000
196#define XILINX_MCDMA_S2MM_CTRL_OFFSET 0x0500
197#define XILINX_MCDMA_CHEN_OFFSET 0x0008
198#define XILINX_MCDMA_CH_ERR_OFFSET 0x0010
199#define XILINX_MCDMA_RXINT_SER_OFFSET 0x0020
200#define XILINX_MCDMA_TXINT_SER_OFFSET 0x0028
201#define XILINX_MCDMA_CHAN_CR_OFFSET(x) (0x40 + (x) * 0x40)
202#define XILINX_MCDMA_CHAN_SR_OFFSET(x) (0x44 + (x) * 0x40)
203#define XILINX_MCDMA_CHAN_CDESC_OFFSET(x) (0x48 + (x) * 0x40)
204#define XILINX_MCDMA_CHAN_TDESC_OFFSET(x) (0x50 + (x) * 0x40)
205
206
207#define XILINX_MCDMA_COALESCE_SHIFT 16
208#define XILINX_MCDMA_COALESCE_MAX 24
209#define XILINX_MCDMA_IRQ_ALL_MASK GENMASK(7, 5)
210#define XILINX_MCDMA_COALESCE_MASK GENMASK(23, 16)
211#define XILINX_MCDMA_CR_RUNSTOP_MASK BIT(0)
212#define XILINX_MCDMA_IRQ_IOC_MASK BIT(5)
213#define XILINX_MCDMA_IRQ_DELAY_MASK BIT(6)
214#define XILINX_MCDMA_IRQ_ERR_MASK BIT(7)
215#define XILINX_MCDMA_BD_EOP BIT(30)
216#define XILINX_MCDMA_BD_SOP BIT(31)
217
218
219
220
221
222
223
224
225
226
227
228
229struct xilinx_vdma_desc_hw {
230 u32 next_desc;
231 u32 pad1;
232 u32 buf_addr;
233 u32 buf_addr_msb;
234 u32 vsize;
235 u32 hsize;
236 u32 stride;
237} __aligned(64);
238
239
240
241
242
243
244
245
246
247
248
249
250
251struct xilinx_axidma_desc_hw {
252 u32 next_desc;
253 u32 next_desc_msb;
254 u32 buf_addr;
255 u32 buf_addr_msb;
256 u32 reserved1;
257 u32 reserved2;
258 u32 control;
259 u32 status;
260 u32 app[XILINX_DMA_NUM_APP_WORDS];
261} __aligned(64);
262
263
264
265
266
267
268
269
270
271
272
273
274
275struct xilinx_aximcdma_desc_hw {
276 u32 next_desc;
277 u32 next_desc_msb;
278 u32 buf_addr;
279 u32 buf_addr_msb;
280 u32 rsvd;
281 u32 control;
282 u32 status;
283 u32 sideband_status;
284 u32 app[XILINX_DMA_NUM_APP_WORDS];
285} __aligned(64);
286
287
288
289
290
291
292
293
294
295
296
297
298struct xilinx_cdma_desc_hw {
299 u32 next_desc;
300 u32 next_desc_msb;
301 u32 src_addr;
302 u32 src_addr_msb;
303 u32 dest_addr;
304 u32 dest_addr_msb;
305 u32 control;
306 u32 status;
307} __aligned(64);
308
309
310
311
312
313
314
315struct xilinx_vdma_tx_segment {
316 struct xilinx_vdma_desc_hw hw;
317 struct list_head node;
318 dma_addr_t phys;
319} __aligned(64);
320
321
322
323
324
325
326
327struct xilinx_axidma_tx_segment {
328 struct xilinx_axidma_desc_hw hw;
329 struct list_head node;
330 dma_addr_t phys;
331} __aligned(64);
332
333
334
335
336
337
338
339struct xilinx_aximcdma_tx_segment {
340 struct xilinx_aximcdma_desc_hw hw;
341 struct list_head node;
342 dma_addr_t phys;
343} __aligned(64);
344
345
346
347
348
349
350
351struct xilinx_cdma_tx_segment {
352 struct xilinx_cdma_desc_hw hw;
353 struct list_head node;
354 dma_addr_t phys;
355} __aligned(64);
356
357
358
359
360
361
362
363
364
365
366struct xilinx_dma_tx_descriptor {
367 struct dma_async_tx_descriptor async_tx;
368 struct list_head segments;
369 struct list_head node;
370 bool cyclic;
371 bool err;
372 u32 residue;
373};
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414struct xilinx_dma_chan {
415 struct xilinx_dma_device *xdev;
416 u32 ctrl_offset;
417 u32 desc_offset;
418 spinlock_t lock;
419 struct list_head pending_list;
420 struct list_head active_list;
421 struct list_head done_list;
422 struct list_head free_seg_list;
423 struct dma_chan common;
424 struct dma_pool *desc_pool;
425 struct device *dev;
426 int irq;
427 int id;
428 enum dma_transfer_direction direction;
429 int num_frms;
430 bool has_sg;
431 bool cyclic;
432 bool genlock;
433 bool err;
434 bool idle;
435 bool terminating;
436 struct tasklet_struct tasklet;
437 struct xilinx_vdma_config config;
438 bool flush_on_fsync;
439 u32 desc_pendingcount;
440 bool ext_addr;
441 u32 desc_submitcount;
442 struct xilinx_axidma_tx_segment *seg_v;
443 struct xilinx_aximcdma_tx_segment *seg_mv;
444 dma_addr_t seg_p;
445 struct xilinx_axidma_tx_segment *cyclic_seg_v;
446 dma_addr_t cyclic_seg_p;
447 void (*start_transfer)(struct xilinx_dma_chan *chan);
448 int (*stop_transfer)(struct xilinx_dma_chan *chan);
449 u16 tdest;
450 bool has_vflip;
451};
452
453
454
455
456
457
458
459
460
461
462enum xdma_ip_type {
463 XDMA_TYPE_AXIDMA = 0,
464 XDMA_TYPE_CDMA,
465 XDMA_TYPE_VDMA,
466 XDMA_TYPE_AXIMCDMA
467};
468
469struct xilinx_dma_config {
470 enum xdma_ip_type dmatype;
471 int (*clk_init)(struct platform_device *pdev, struct clk **axi_clk,
472 struct clk **tx_clk, struct clk **txs_clk,
473 struct clk **rx_clk, struct clk **rxs_clk);
474 irqreturn_t (*irq_handler)(int irq, void *data);
475 const int max_channels;
476};
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497struct xilinx_dma_device {
498 void __iomem *regs;
499 struct device *dev;
500 struct dma_device common;
501 struct xilinx_dma_chan *chan[XILINX_MCDMA_MAX_CHANS_PER_DEVICE];
502 u32 flush_on_fsync;
503 bool ext_addr;
504 struct platform_device *pdev;
505 const struct xilinx_dma_config *dma_config;
506 struct clk *axi_clk;
507 struct clk *tx_clk;
508 struct clk *txs_clk;
509 struct clk *rx_clk;
510 struct clk *rxs_clk;
511 u32 s2mm_chan_id;
512 u32 mm2s_chan_id;
513 u32 max_buffer_len;
514};
515
516
517#define to_xilinx_chan(chan) \
518 container_of(chan, struct xilinx_dma_chan, common)
519#define to_dma_tx_descriptor(tx) \
520 container_of(tx, struct xilinx_dma_tx_descriptor, async_tx)
521#define xilinx_dma_poll_timeout(chan, reg, val, cond, delay_us, timeout_us) \
522 readl_poll_timeout_atomic(chan->xdev->regs + chan->ctrl_offset + reg, \
523 val, cond, delay_us, timeout_us)
524
525
526static inline u32 dma_read(struct xilinx_dma_chan *chan, u32 reg)
527{
528 return ioread32(chan->xdev->regs + reg);
529}
530
531static inline void dma_write(struct xilinx_dma_chan *chan, u32 reg, u32 value)
532{
533 iowrite32(value, chan->xdev->regs + reg);
534}
535
536static inline void vdma_desc_write(struct xilinx_dma_chan *chan, u32 reg,
537 u32 value)
538{
539 dma_write(chan, chan->desc_offset + reg, value);
540}
541
542static inline u32 dma_ctrl_read(struct xilinx_dma_chan *chan, u32 reg)
543{
544 return dma_read(chan, chan->ctrl_offset + reg);
545}
546
547static inline void dma_ctrl_write(struct xilinx_dma_chan *chan, u32 reg,
548 u32 value)
549{
550 dma_write(chan, chan->ctrl_offset + reg, value);
551}
552
553static inline void dma_ctrl_clr(struct xilinx_dma_chan *chan, u32 reg,
554 u32 clr)
555{
556 dma_ctrl_write(chan, reg, dma_ctrl_read(chan, reg) & ~clr);
557}
558
559static inline void dma_ctrl_set(struct xilinx_dma_chan *chan, u32 reg,
560 u32 set)
561{
562 dma_ctrl_write(chan, reg, dma_ctrl_read(chan, reg) | set);
563}
564
565
566
567
568
569
570
571
572
573
574
575
576static inline void vdma_desc_write_64(struct xilinx_dma_chan *chan, u32 reg,
577 u32 value_lsb, u32 value_msb)
578{
579
580 writel(value_lsb, chan->xdev->regs + chan->desc_offset + reg);
581
582
583 writel(value_msb, chan->xdev->regs + chan->desc_offset + reg + 4);
584}
585
586static inline void dma_writeq(struct xilinx_dma_chan *chan, u32 reg, u64 value)
587{
588 lo_hi_writeq(value, chan->xdev->regs + chan->ctrl_offset + reg);
589}
590
591static inline void xilinx_write(struct xilinx_dma_chan *chan, u32 reg,
592 dma_addr_t addr)
593{
594 if (chan->ext_addr)
595 dma_writeq(chan, reg, addr);
596 else
597 dma_ctrl_write(chan, reg, addr);
598}
599
600static inline void xilinx_axidma_buf(struct xilinx_dma_chan *chan,
601 struct xilinx_axidma_desc_hw *hw,
602 dma_addr_t buf_addr, size_t sg_used,
603 size_t period_len)
604{
605 if (chan->ext_addr) {
606 hw->buf_addr = lower_32_bits(buf_addr + sg_used + period_len);
607 hw->buf_addr_msb = upper_32_bits(buf_addr + sg_used +
608 period_len);
609 } else {
610 hw->buf_addr = buf_addr + sg_used + period_len;
611 }
612}
613
614static inline void xilinx_aximcdma_buf(struct xilinx_dma_chan *chan,
615 struct xilinx_aximcdma_desc_hw *hw,
616 dma_addr_t buf_addr, size_t sg_used)
617{
618 if (chan->ext_addr) {
619 hw->buf_addr = lower_32_bits(buf_addr + sg_used);
620 hw->buf_addr_msb = upper_32_bits(buf_addr + sg_used);
621 } else {
622 hw->buf_addr = buf_addr + sg_used;
623 }
624}
625
626
627
628
629
630
631
632
633
634
635
636static struct xilinx_vdma_tx_segment *
637xilinx_vdma_alloc_tx_segment(struct xilinx_dma_chan *chan)
638{
639 struct xilinx_vdma_tx_segment *segment;
640 dma_addr_t phys;
641
642 segment = dma_pool_zalloc(chan->desc_pool, GFP_ATOMIC, &phys);
643 if (!segment)
644 return NULL;
645
646 segment->phys = phys;
647
648 return segment;
649}
650
651
652
653
654
655
656
657static struct xilinx_cdma_tx_segment *
658xilinx_cdma_alloc_tx_segment(struct xilinx_dma_chan *chan)
659{
660 struct xilinx_cdma_tx_segment *segment;
661 dma_addr_t phys;
662
663 segment = dma_pool_zalloc(chan->desc_pool, GFP_ATOMIC, &phys);
664 if (!segment)
665 return NULL;
666
667 segment->phys = phys;
668
669 return segment;
670}
671
672
673
674
675
676
677
678static struct xilinx_axidma_tx_segment *
679xilinx_axidma_alloc_tx_segment(struct xilinx_dma_chan *chan)
680{
681 struct xilinx_axidma_tx_segment *segment = NULL;
682 unsigned long flags;
683
684 spin_lock_irqsave(&chan->lock, flags);
685 if (!list_empty(&chan->free_seg_list)) {
686 segment = list_first_entry(&chan->free_seg_list,
687 struct xilinx_axidma_tx_segment,
688 node);
689 list_del(&segment->node);
690 }
691 spin_unlock_irqrestore(&chan->lock, flags);
692
693 if (!segment)
694 dev_dbg(chan->dev, "Could not find free tx segment\n");
695
696 return segment;
697}
698
699
700
701
702
703
704
705static struct xilinx_aximcdma_tx_segment *
706xilinx_aximcdma_alloc_tx_segment(struct xilinx_dma_chan *chan)
707{
708 struct xilinx_aximcdma_tx_segment *segment = NULL;
709 unsigned long flags;
710
711 spin_lock_irqsave(&chan->lock, flags);
712 if (!list_empty(&chan->free_seg_list)) {
713 segment = list_first_entry(&chan->free_seg_list,
714 struct xilinx_aximcdma_tx_segment,
715 node);
716 list_del(&segment->node);
717 }
718 spin_unlock_irqrestore(&chan->lock, flags);
719
720 return segment;
721}
722
723static void xilinx_dma_clean_hw_desc(struct xilinx_axidma_desc_hw *hw)
724{
725 u32 next_desc = hw->next_desc;
726 u32 next_desc_msb = hw->next_desc_msb;
727
728 memset(hw, 0, sizeof(struct xilinx_axidma_desc_hw));
729
730 hw->next_desc = next_desc;
731 hw->next_desc_msb = next_desc_msb;
732}
733
734static void xilinx_mcdma_clean_hw_desc(struct xilinx_aximcdma_desc_hw *hw)
735{
736 u32 next_desc = hw->next_desc;
737 u32 next_desc_msb = hw->next_desc_msb;
738
739 memset(hw, 0, sizeof(struct xilinx_aximcdma_desc_hw));
740
741 hw->next_desc = next_desc;
742 hw->next_desc_msb = next_desc_msb;
743}
744
745
746
747
748
749
750static void xilinx_dma_free_tx_segment(struct xilinx_dma_chan *chan,
751 struct xilinx_axidma_tx_segment *segment)
752{
753 xilinx_dma_clean_hw_desc(&segment->hw);
754
755 list_add_tail(&segment->node, &chan->free_seg_list);
756}
757
758
759
760
761
762
763static void xilinx_mcdma_free_tx_segment(struct xilinx_dma_chan *chan,
764 struct xilinx_aximcdma_tx_segment *
765 segment)
766{
767 xilinx_mcdma_clean_hw_desc(&segment->hw);
768
769 list_add_tail(&segment->node, &chan->free_seg_list);
770}
771
772
773
774
775
776
777static void xilinx_cdma_free_tx_segment(struct xilinx_dma_chan *chan,
778 struct xilinx_cdma_tx_segment *segment)
779{
780 dma_pool_free(chan->desc_pool, segment, segment->phys);
781}
782
783
784
785
786
787
788static void xilinx_vdma_free_tx_segment(struct xilinx_dma_chan *chan,
789 struct xilinx_vdma_tx_segment *segment)
790{
791 dma_pool_free(chan->desc_pool, segment, segment->phys);
792}
793
794
795
796
797
798
799
800static struct xilinx_dma_tx_descriptor *
801xilinx_dma_alloc_tx_descriptor(struct xilinx_dma_chan *chan)
802{
803 struct xilinx_dma_tx_descriptor *desc;
804
805 desc = kzalloc(sizeof(*desc), GFP_NOWAIT);
806 if (!desc)
807 return NULL;
808
809 INIT_LIST_HEAD(&desc->segments);
810
811 return desc;
812}
813
814
815
816
817
818
819static void
820xilinx_dma_free_tx_descriptor(struct xilinx_dma_chan *chan,
821 struct xilinx_dma_tx_descriptor *desc)
822{
823 struct xilinx_vdma_tx_segment *segment, *next;
824 struct xilinx_cdma_tx_segment *cdma_segment, *cdma_next;
825 struct xilinx_axidma_tx_segment *axidma_segment, *axidma_next;
826 struct xilinx_aximcdma_tx_segment *aximcdma_segment, *aximcdma_next;
827
828 if (!desc)
829 return;
830
831 if (chan->xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
832 list_for_each_entry_safe(segment, next, &desc->segments, node) {
833 list_del(&segment->node);
834 xilinx_vdma_free_tx_segment(chan, segment);
835 }
836 } else if (chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) {
837 list_for_each_entry_safe(cdma_segment, cdma_next,
838 &desc->segments, node) {
839 list_del(&cdma_segment->node);
840 xilinx_cdma_free_tx_segment(chan, cdma_segment);
841 }
842 } else if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
843 list_for_each_entry_safe(axidma_segment, axidma_next,
844 &desc->segments, node) {
845 list_del(&axidma_segment->node);
846 xilinx_dma_free_tx_segment(chan, axidma_segment);
847 }
848 } else {
849 list_for_each_entry_safe(aximcdma_segment, aximcdma_next,
850 &desc->segments, node) {
851 list_del(&aximcdma_segment->node);
852 xilinx_mcdma_free_tx_segment(chan, aximcdma_segment);
853 }
854 }
855
856 kfree(desc);
857}
858
859
860
861
862
863
864
865
866static void xilinx_dma_free_desc_list(struct xilinx_dma_chan *chan,
867 struct list_head *list)
868{
869 struct xilinx_dma_tx_descriptor *desc, *next;
870
871 list_for_each_entry_safe(desc, next, list, node) {
872 list_del(&desc->node);
873 xilinx_dma_free_tx_descriptor(chan, desc);
874 }
875}
876
877
878
879
880
881static void xilinx_dma_free_descriptors(struct xilinx_dma_chan *chan)
882{
883 unsigned long flags;
884
885 spin_lock_irqsave(&chan->lock, flags);
886
887 xilinx_dma_free_desc_list(chan, &chan->pending_list);
888 xilinx_dma_free_desc_list(chan, &chan->done_list);
889 xilinx_dma_free_desc_list(chan, &chan->active_list);
890
891 spin_unlock_irqrestore(&chan->lock, flags);
892}
893
894
895
896
897
898static void xilinx_dma_free_chan_resources(struct dma_chan *dchan)
899{
900 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
901 unsigned long flags;
902
903 dev_dbg(chan->dev, "Free all channel resources.\n");
904
905 xilinx_dma_free_descriptors(chan);
906
907 if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
908 spin_lock_irqsave(&chan->lock, flags);
909 INIT_LIST_HEAD(&chan->free_seg_list);
910 spin_unlock_irqrestore(&chan->lock, flags);
911
912
913 dma_free_coherent(chan->dev, sizeof(*chan->seg_v) *
914 XILINX_DMA_NUM_DESCS, chan->seg_v,
915 chan->seg_p);
916
917
918 dma_free_coherent(chan->dev, sizeof(*chan->cyclic_seg_v),
919 chan->cyclic_seg_v, chan->cyclic_seg_p);
920 }
921
922 if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIMCDMA) {
923 spin_lock_irqsave(&chan->lock, flags);
924 INIT_LIST_HEAD(&chan->free_seg_list);
925 spin_unlock_irqrestore(&chan->lock, flags);
926
927
928 dma_free_coherent(chan->dev, sizeof(*chan->seg_mv) *
929 XILINX_DMA_NUM_DESCS, chan->seg_mv,
930 chan->seg_p);
931 }
932
933 if (chan->xdev->dma_config->dmatype != XDMA_TYPE_AXIDMA &&
934 chan->xdev->dma_config->dmatype != XDMA_TYPE_AXIMCDMA) {
935 dma_pool_destroy(chan->desc_pool);
936 chan->desc_pool = NULL;
937 }
938
939}
940
941
942
943
944
945
946
947
948static u32 xilinx_dma_get_residue(struct xilinx_dma_chan *chan,
949 struct xilinx_dma_tx_descriptor *desc)
950{
951 struct xilinx_cdma_tx_segment *cdma_seg;
952 struct xilinx_axidma_tx_segment *axidma_seg;
953 struct xilinx_aximcdma_tx_segment *aximcdma_seg;
954 struct xilinx_cdma_desc_hw *cdma_hw;
955 struct xilinx_axidma_desc_hw *axidma_hw;
956 struct xilinx_aximcdma_desc_hw *aximcdma_hw;
957 struct list_head *entry;
958 u32 residue = 0;
959
960 list_for_each(entry, &desc->segments) {
961 if (chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) {
962 cdma_seg = list_entry(entry,
963 struct xilinx_cdma_tx_segment,
964 node);
965 cdma_hw = &cdma_seg->hw;
966 residue += (cdma_hw->control - cdma_hw->status) &
967 chan->xdev->max_buffer_len;
968 } else if (chan->xdev->dma_config->dmatype ==
969 XDMA_TYPE_AXIDMA) {
970 axidma_seg = list_entry(entry,
971 struct xilinx_axidma_tx_segment,
972 node);
973 axidma_hw = &axidma_seg->hw;
974 residue += (axidma_hw->control - axidma_hw->status) &
975 chan->xdev->max_buffer_len;
976 } else {
977 aximcdma_seg =
978 list_entry(entry,
979 struct xilinx_aximcdma_tx_segment,
980 node);
981 aximcdma_hw = &aximcdma_seg->hw;
982 residue +=
983 (aximcdma_hw->control - aximcdma_hw->status) &
984 chan->xdev->max_buffer_len;
985 }
986 }
987
988 return residue;
989}
990
991
992
993
994
995
996
997static void xilinx_dma_chan_handle_cyclic(struct xilinx_dma_chan *chan,
998 struct xilinx_dma_tx_descriptor *desc,
999 unsigned long *flags)
1000{
1001 dma_async_tx_callback callback;
1002 void *callback_param;
1003
1004 callback = desc->async_tx.callback;
1005 callback_param = desc->async_tx.callback_param;
1006 if (callback) {
1007 spin_unlock_irqrestore(&chan->lock, *flags);
1008 callback(callback_param);
1009 spin_lock_irqsave(&chan->lock, *flags);
1010 }
1011}
1012
1013
1014
1015
1016
1017static void xilinx_dma_chan_desc_cleanup(struct xilinx_dma_chan *chan)
1018{
1019 struct xilinx_dma_tx_descriptor *desc, *next;
1020 unsigned long flags;
1021
1022 spin_lock_irqsave(&chan->lock, flags);
1023
1024 list_for_each_entry_safe(desc, next, &chan->done_list, node) {
1025 struct dmaengine_result result;
1026
1027 if (desc->cyclic) {
1028 xilinx_dma_chan_handle_cyclic(chan, desc, &flags);
1029 break;
1030 }
1031
1032
1033 list_del(&desc->node);
1034
1035 if (unlikely(desc->err)) {
1036 if (chan->direction == DMA_DEV_TO_MEM)
1037 result.result = DMA_TRANS_READ_FAILED;
1038 else
1039 result.result = DMA_TRANS_WRITE_FAILED;
1040 } else {
1041 result.result = DMA_TRANS_NOERROR;
1042 }
1043
1044 result.residue = desc->residue;
1045
1046
1047 spin_unlock_irqrestore(&chan->lock, flags);
1048 dmaengine_desc_get_callback_invoke(&desc->async_tx, &result);
1049 spin_lock_irqsave(&chan->lock, flags);
1050
1051
1052 dma_run_dependencies(&desc->async_tx);
1053 xilinx_dma_free_tx_descriptor(chan, desc);
1054
1055
1056
1057
1058
1059 if (chan->terminating)
1060 break;
1061 }
1062
1063 spin_unlock_irqrestore(&chan->lock, flags);
1064}
1065
1066
1067
1068
1069
1070static void xilinx_dma_do_tasklet(struct tasklet_struct *t)
1071{
1072 struct xilinx_dma_chan *chan = from_tasklet(chan, t, tasklet);
1073
1074 xilinx_dma_chan_desc_cleanup(chan);
1075}
1076
1077
1078
1079
1080
1081
1082
1083static int xilinx_dma_alloc_chan_resources(struct dma_chan *dchan)
1084{
1085 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
1086 int i;
1087
1088
1089 if (chan->desc_pool)
1090 return 0;
1091
1092
1093
1094
1095
1096 if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
1097
1098 chan->seg_v = dma_alloc_coherent(chan->dev,
1099 sizeof(*chan->seg_v) * XILINX_DMA_NUM_DESCS,
1100 &chan->seg_p, GFP_KERNEL);
1101 if (!chan->seg_v) {
1102 dev_err(chan->dev,
1103 "unable to allocate channel %d descriptors\n",
1104 chan->id);
1105 return -ENOMEM;
1106 }
1107
1108
1109
1110
1111
1112
1113 chan->cyclic_seg_v = dma_alloc_coherent(chan->dev,
1114 sizeof(*chan->cyclic_seg_v),
1115 &chan->cyclic_seg_p,
1116 GFP_KERNEL);
1117 if (!chan->cyclic_seg_v) {
1118 dev_err(chan->dev,
1119 "unable to allocate desc segment for cyclic DMA\n");
1120 dma_free_coherent(chan->dev, sizeof(*chan->seg_v) *
1121 XILINX_DMA_NUM_DESCS, chan->seg_v,
1122 chan->seg_p);
1123 return -ENOMEM;
1124 }
1125 chan->cyclic_seg_v->phys = chan->cyclic_seg_p;
1126
1127 for (i = 0; i < XILINX_DMA_NUM_DESCS; i++) {
1128 chan->seg_v[i].hw.next_desc =
1129 lower_32_bits(chan->seg_p + sizeof(*chan->seg_v) *
1130 ((i + 1) % XILINX_DMA_NUM_DESCS));
1131 chan->seg_v[i].hw.next_desc_msb =
1132 upper_32_bits(chan->seg_p + sizeof(*chan->seg_v) *
1133 ((i + 1) % XILINX_DMA_NUM_DESCS));
1134 chan->seg_v[i].phys = chan->seg_p +
1135 sizeof(*chan->seg_v) * i;
1136 list_add_tail(&chan->seg_v[i].node,
1137 &chan->free_seg_list);
1138 }
1139 } else if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIMCDMA) {
1140
1141 chan->seg_mv = dma_alloc_coherent(chan->dev,
1142 sizeof(*chan->seg_mv) *
1143 XILINX_DMA_NUM_DESCS,
1144 &chan->seg_p, GFP_KERNEL);
1145 if (!chan->seg_mv) {
1146 dev_err(chan->dev,
1147 "unable to allocate channel %d descriptors\n",
1148 chan->id);
1149 return -ENOMEM;
1150 }
1151 for (i = 0; i < XILINX_DMA_NUM_DESCS; i++) {
1152 chan->seg_mv[i].hw.next_desc =
1153 lower_32_bits(chan->seg_p + sizeof(*chan->seg_mv) *
1154 ((i + 1) % XILINX_DMA_NUM_DESCS));
1155 chan->seg_mv[i].hw.next_desc_msb =
1156 upper_32_bits(chan->seg_p + sizeof(*chan->seg_mv) *
1157 ((i + 1) % XILINX_DMA_NUM_DESCS));
1158 chan->seg_mv[i].phys = chan->seg_p +
1159 sizeof(*chan->seg_mv) * i;
1160 list_add_tail(&chan->seg_mv[i].node,
1161 &chan->free_seg_list);
1162 }
1163 } else if (chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) {
1164 chan->desc_pool = dma_pool_create("xilinx_cdma_desc_pool",
1165 chan->dev,
1166 sizeof(struct xilinx_cdma_tx_segment),
1167 __alignof__(struct xilinx_cdma_tx_segment),
1168 0);
1169 } else {
1170 chan->desc_pool = dma_pool_create("xilinx_vdma_desc_pool",
1171 chan->dev,
1172 sizeof(struct xilinx_vdma_tx_segment),
1173 __alignof__(struct xilinx_vdma_tx_segment),
1174 0);
1175 }
1176
1177 if (!chan->desc_pool &&
1178 ((chan->xdev->dma_config->dmatype != XDMA_TYPE_AXIDMA) &&
1179 chan->xdev->dma_config->dmatype != XDMA_TYPE_AXIMCDMA)) {
1180 dev_err(chan->dev,
1181 "unable to allocate channel %d descriptor pool\n",
1182 chan->id);
1183 return -ENOMEM;
1184 }
1185
1186 dma_cookie_init(dchan);
1187
1188 if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
1189
1190
1191
1192 dma_ctrl_set(chan, XILINX_DMA_REG_DMACR,
1193 XILINX_DMA_DMAXR_ALL_IRQ_MASK);
1194 }
1195
1196 if ((chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) && chan->has_sg)
1197 dma_ctrl_set(chan, XILINX_DMA_REG_DMACR,
1198 XILINX_CDMA_CR_SGMODE);
1199
1200 return 0;
1201}
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211static int xilinx_dma_calc_copysize(struct xilinx_dma_chan *chan,
1212 int size, int done)
1213{
1214 size_t copy;
1215
1216 copy = min_t(size_t, size - done,
1217 chan->xdev->max_buffer_len);
1218
1219 if ((copy + done < size) &&
1220 chan->xdev->common.copy_align) {
1221
1222
1223
1224
1225 copy = rounddown(copy,
1226 (1 << chan->xdev->common.copy_align));
1227 }
1228 return copy;
1229}
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239static enum dma_status xilinx_dma_tx_status(struct dma_chan *dchan,
1240 dma_cookie_t cookie,
1241 struct dma_tx_state *txstate)
1242{
1243 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
1244 struct xilinx_dma_tx_descriptor *desc;
1245 enum dma_status ret;
1246 unsigned long flags;
1247 u32 residue = 0;
1248
1249 ret = dma_cookie_status(dchan, cookie, txstate);
1250 if (ret == DMA_COMPLETE || !txstate)
1251 return ret;
1252
1253 spin_lock_irqsave(&chan->lock, flags);
1254 if (!list_empty(&chan->active_list)) {
1255 desc = list_last_entry(&chan->active_list,
1256 struct xilinx_dma_tx_descriptor, node);
1257
1258
1259
1260
1261 if (chan->has_sg && chan->xdev->dma_config->dmatype != XDMA_TYPE_VDMA)
1262 residue = xilinx_dma_get_residue(chan, desc);
1263 }
1264 spin_unlock_irqrestore(&chan->lock, flags);
1265
1266 dma_set_residue(txstate, residue);
1267
1268 return ret;
1269}
1270
1271
1272
1273
1274
1275
1276
1277static int xilinx_dma_stop_transfer(struct xilinx_dma_chan *chan)
1278{
1279 u32 val;
1280
1281 dma_ctrl_clr(chan, XILINX_DMA_REG_DMACR, XILINX_DMA_DMACR_RUNSTOP);
1282
1283
1284 return xilinx_dma_poll_timeout(chan, XILINX_DMA_REG_DMASR, val,
1285 val & XILINX_DMA_DMASR_HALTED, 0,
1286 XILINX_DMA_LOOP_COUNT);
1287}
1288
1289
1290
1291
1292
1293
1294
1295static int xilinx_cdma_stop_transfer(struct xilinx_dma_chan *chan)
1296{
1297 u32 val;
1298
1299 return xilinx_dma_poll_timeout(chan, XILINX_DMA_REG_DMASR, val,
1300 val & XILINX_DMA_DMASR_IDLE, 0,
1301 XILINX_DMA_LOOP_COUNT);
1302}
1303
1304
1305
1306
1307
1308static void xilinx_dma_start(struct xilinx_dma_chan *chan)
1309{
1310 int err;
1311 u32 val;
1312
1313 dma_ctrl_set(chan, XILINX_DMA_REG_DMACR, XILINX_DMA_DMACR_RUNSTOP);
1314
1315
1316 err = xilinx_dma_poll_timeout(chan, XILINX_DMA_REG_DMASR, val,
1317 !(val & XILINX_DMA_DMASR_HALTED), 0,
1318 XILINX_DMA_LOOP_COUNT);
1319
1320 if (err) {
1321 dev_err(chan->dev, "Cannot start channel %p: %x\n",
1322 chan, dma_ctrl_read(chan, XILINX_DMA_REG_DMASR));
1323
1324 chan->err = true;
1325 }
1326}
1327
1328
1329
1330
1331
1332static void xilinx_vdma_start_transfer(struct xilinx_dma_chan *chan)
1333{
1334 struct xilinx_vdma_config *config = &chan->config;
1335 struct xilinx_dma_tx_descriptor *desc;
1336 u32 reg, j;
1337 struct xilinx_vdma_tx_segment *segment, *last = NULL;
1338 int i = 0;
1339
1340
1341 if (chan->err)
1342 return;
1343
1344 if (!chan->idle)
1345 return;
1346
1347 if (list_empty(&chan->pending_list))
1348 return;
1349
1350 desc = list_first_entry(&chan->pending_list,
1351 struct xilinx_dma_tx_descriptor, node);
1352
1353
1354 if (chan->has_vflip) {
1355 reg = dma_read(chan, XILINX_VDMA_REG_ENABLE_VERTICAL_FLIP);
1356 reg &= ~XILINX_VDMA_ENABLE_VERTICAL_FLIP;
1357 reg |= config->vflip_en;
1358 dma_write(chan, XILINX_VDMA_REG_ENABLE_VERTICAL_FLIP,
1359 reg);
1360 }
1361
1362 reg = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR);
1363
1364 if (config->frm_cnt_en)
1365 reg |= XILINX_DMA_DMACR_FRAMECNT_EN;
1366 else
1367 reg &= ~XILINX_DMA_DMACR_FRAMECNT_EN;
1368
1369
1370 if (config->park)
1371 reg &= ~XILINX_DMA_DMACR_CIRC_EN;
1372 else
1373 reg |= XILINX_DMA_DMACR_CIRC_EN;
1374
1375 dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg);
1376
1377 j = chan->desc_submitcount;
1378 reg = dma_read(chan, XILINX_DMA_REG_PARK_PTR);
1379 if (chan->direction == DMA_MEM_TO_DEV) {
1380 reg &= ~XILINX_DMA_PARK_PTR_RD_REF_MASK;
1381 reg |= j << XILINX_DMA_PARK_PTR_RD_REF_SHIFT;
1382 } else {
1383 reg &= ~XILINX_DMA_PARK_PTR_WR_REF_MASK;
1384 reg |= j << XILINX_DMA_PARK_PTR_WR_REF_SHIFT;
1385 }
1386 dma_write(chan, XILINX_DMA_REG_PARK_PTR, reg);
1387
1388
1389 xilinx_dma_start(chan);
1390
1391 if (chan->err)
1392 return;
1393
1394
1395 if (chan->desc_submitcount < chan->num_frms)
1396 i = chan->desc_submitcount;
1397
1398 list_for_each_entry(segment, &desc->segments, node) {
1399 if (chan->ext_addr)
1400 vdma_desc_write_64(chan,
1401 XILINX_VDMA_REG_START_ADDRESS_64(i++),
1402 segment->hw.buf_addr,
1403 segment->hw.buf_addr_msb);
1404 else
1405 vdma_desc_write(chan,
1406 XILINX_VDMA_REG_START_ADDRESS(i++),
1407 segment->hw.buf_addr);
1408
1409 last = segment;
1410 }
1411
1412 if (!last)
1413 return;
1414
1415
1416 vdma_desc_write(chan, XILINX_DMA_REG_HSIZE, last->hw.hsize);
1417 vdma_desc_write(chan, XILINX_DMA_REG_FRMDLY_STRIDE,
1418 last->hw.stride);
1419 vdma_desc_write(chan, XILINX_DMA_REG_VSIZE, last->hw.vsize);
1420
1421 chan->desc_submitcount++;
1422 chan->desc_pendingcount--;
1423 list_move_tail(&desc->node, &chan->active_list);
1424 if (chan->desc_submitcount == chan->num_frms)
1425 chan->desc_submitcount = 0;
1426
1427 chan->idle = false;
1428}
1429
1430
1431
1432
1433
1434static void xilinx_cdma_start_transfer(struct xilinx_dma_chan *chan)
1435{
1436 struct xilinx_dma_tx_descriptor *head_desc, *tail_desc;
1437 struct xilinx_cdma_tx_segment *tail_segment;
1438 u32 ctrl_reg = dma_read(chan, XILINX_DMA_REG_DMACR);
1439
1440 if (chan->err)
1441 return;
1442
1443 if (!chan->idle)
1444 return;
1445
1446 if (list_empty(&chan->pending_list))
1447 return;
1448
1449 head_desc = list_first_entry(&chan->pending_list,
1450 struct xilinx_dma_tx_descriptor, node);
1451 tail_desc = list_last_entry(&chan->pending_list,
1452 struct xilinx_dma_tx_descriptor, node);
1453 tail_segment = list_last_entry(&tail_desc->segments,
1454 struct xilinx_cdma_tx_segment, node);
1455
1456 if (chan->desc_pendingcount <= XILINX_DMA_COALESCE_MAX) {
1457 ctrl_reg &= ~XILINX_DMA_CR_COALESCE_MAX;
1458 ctrl_reg |= chan->desc_pendingcount <<
1459 XILINX_DMA_CR_COALESCE_SHIFT;
1460 dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, ctrl_reg);
1461 }
1462
1463 if (chan->has_sg) {
1464 dma_ctrl_clr(chan, XILINX_DMA_REG_DMACR,
1465 XILINX_CDMA_CR_SGMODE);
1466
1467 dma_ctrl_set(chan, XILINX_DMA_REG_DMACR,
1468 XILINX_CDMA_CR_SGMODE);
1469
1470 xilinx_write(chan, XILINX_DMA_REG_CURDESC,
1471 head_desc->async_tx.phys);
1472
1473
1474 xilinx_write(chan, XILINX_DMA_REG_TAILDESC,
1475 tail_segment->phys);
1476 } else {
1477
1478 struct xilinx_cdma_tx_segment *segment;
1479 struct xilinx_cdma_desc_hw *hw;
1480
1481 segment = list_first_entry(&head_desc->segments,
1482 struct xilinx_cdma_tx_segment,
1483 node);
1484
1485 hw = &segment->hw;
1486
1487 xilinx_write(chan, XILINX_CDMA_REG_SRCADDR,
1488 xilinx_prep_dma_addr_t(hw->src_addr));
1489 xilinx_write(chan, XILINX_CDMA_REG_DSTADDR,
1490 xilinx_prep_dma_addr_t(hw->dest_addr));
1491
1492
1493 dma_ctrl_write(chan, XILINX_DMA_REG_BTT,
1494 hw->control & chan->xdev->max_buffer_len);
1495 }
1496
1497 list_splice_tail_init(&chan->pending_list, &chan->active_list);
1498 chan->desc_pendingcount = 0;
1499 chan->idle = false;
1500}
1501
1502
1503
1504
1505
1506static void xilinx_dma_start_transfer(struct xilinx_dma_chan *chan)
1507{
1508 struct xilinx_dma_tx_descriptor *head_desc, *tail_desc;
1509 struct xilinx_axidma_tx_segment *tail_segment;
1510 u32 reg;
1511
1512 if (chan->err)
1513 return;
1514
1515 if (list_empty(&chan->pending_list))
1516 return;
1517
1518 if (!chan->idle)
1519 return;
1520
1521 head_desc = list_first_entry(&chan->pending_list,
1522 struct xilinx_dma_tx_descriptor, node);
1523 tail_desc = list_last_entry(&chan->pending_list,
1524 struct xilinx_dma_tx_descriptor, node);
1525 tail_segment = list_last_entry(&tail_desc->segments,
1526 struct xilinx_axidma_tx_segment, node);
1527
1528 reg = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR);
1529
1530 if (chan->desc_pendingcount <= XILINX_DMA_COALESCE_MAX) {
1531 reg &= ~XILINX_DMA_CR_COALESCE_MAX;
1532 reg |= chan->desc_pendingcount <<
1533 XILINX_DMA_CR_COALESCE_SHIFT;
1534 dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg);
1535 }
1536
1537 if (chan->has_sg)
1538 xilinx_write(chan, XILINX_DMA_REG_CURDESC,
1539 head_desc->async_tx.phys);
1540
1541 xilinx_dma_start(chan);
1542
1543 if (chan->err)
1544 return;
1545
1546
1547 if (chan->has_sg) {
1548 if (chan->cyclic)
1549 xilinx_write(chan, XILINX_DMA_REG_TAILDESC,
1550 chan->cyclic_seg_v->phys);
1551 else
1552 xilinx_write(chan, XILINX_DMA_REG_TAILDESC,
1553 tail_segment->phys);
1554 } else {
1555 struct xilinx_axidma_tx_segment *segment;
1556 struct xilinx_axidma_desc_hw *hw;
1557
1558 segment = list_first_entry(&head_desc->segments,
1559 struct xilinx_axidma_tx_segment,
1560 node);
1561 hw = &segment->hw;
1562
1563 xilinx_write(chan, XILINX_DMA_REG_SRCDSTADDR,
1564 xilinx_prep_dma_addr_t(hw->buf_addr));
1565
1566
1567 dma_ctrl_write(chan, XILINX_DMA_REG_BTT,
1568 hw->control & chan->xdev->max_buffer_len);
1569 }
1570
1571 list_splice_tail_init(&chan->pending_list, &chan->active_list);
1572 chan->desc_pendingcount = 0;
1573 chan->idle = false;
1574}
1575
1576
1577
1578
1579
1580static void xilinx_mcdma_start_transfer(struct xilinx_dma_chan *chan)
1581{
1582 struct xilinx_dma_tx_descriptor *head_desc, *tail_desc;
1583 struct xilinx_aximcdma_tx_segment *tail_segment;
1584 u32 reg;
1585
1586
1587
1588
1589
1590
1591 if (chan->err)
1592 return;
1593
1594 if (!chan->idle)
1595 return;
1596
1597 if (list_empty(&chan->pending_list))
1598 return;
1599
1600 head_desc = list_first_entry(&chan->pending_list,
1601 struct xilinx_dma_tx_descriptor, node);
1602 tail_desc = list_last_entry(&chan->pending_list,
1603 struct xilinx_dma_tx_descriptor, node);
1604 tail_segment = list_last_entry(&tail_desc->segments,
1605 struct xilinx_aximcdma_tx_segment, node);
1606
1607 reg = dma_ctrl_read(chan, XILINX_MCDMA_CHAN_CR_OFFSET(chan->tdest));
1608
1609 if (chan->desc_pendingcount <= XILINX_MCDMA_COALESCE_MAX) {
1610 reg &= ~XILINX_MCDMA_COALESCE_MASK;
1611 reg |= chan->desc_pendingcount <<
1612 XILINX_MCDMA_COALESCE_SHIFT;
1613 }
1614
1615 reg |= XILINX_MCDMA_IRQ_ALL_MASK;
1616 dma_ctrl_write(chan, XILINX_MCDMA_CHAN_CR_OFFSET(chan->tdest), reg);
1617
1618
1619 xilinx_write(chan, XILINX_MCDMA_CHAN_CDESC_OFFSET(chan->tdest),
1620 head_desc->async_tx.phys);
1621
1622
1623 reg = dma_ctrl_read(chan, XILINX_MCDMA_CHEN_OFFSET);
1624 reg |= BIT(chan->tdest);
1625 dma_ctrl_write(chan, XILINX_MCDMA_CHEN_OFFSET, reg);
1626
1627
1628 reg = dma_ctrl_read(chan, XILINX_MCDMA_CHAN_CR_OFFSET(chan->tdest));
1629 reg |= XILINX_MCDMA_CR_RUNSTOP_MASK;
1630 dma_ctrl_write(chan, XILINX_MCDMA_CHAN_CR_OFFSET(chan->tdest), reg);
1631
1632 xilinx_dma_start(chan);
1633
1634 if (chan->err)
1635 return;
1636
1637
1638 xilinx_write(chan, XILINX_MCDMA_CHAN_TDESC_OFFSET(chan->tdest),
1639 tail_segment->phys);
1640
1641 list_splice_tail_init(&chan->pending_list, &chan->active_list);
1642 chan->desc_pendingcount = 0;
1643 chan->idle = false;
1644}
1645
1646
1647
1648
1649
1650static void xilinx_dma_issue_pending(struct dma_chan *dchan)
1651{
1652 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
1653 unsigned long flags;
1654
1655 spin_lock_irqsave(&chan->lock, flags);
1656 chan->start_transfer(chan);
1657 spin_unlock_irqrestore(&chan->lock, flags);
1658}
1659
1660
1661
1662
1663
1664
1665static int xilinx_dma_device_config(struct dma_chan *dchan,
1666 struct dma_slave_config *config)
1667{
1668 return 0;
1669}
1670
1671
1672
1673
1674
1675
1676
1677static void xilinx_dma_complete_descriptor(struct xilinx_dma_chan *chan)
1678{
1679 struct xilinx_dma_tx_descriptor *desc, *next;
1680
1681
1682 if (list_empty(&chan->active_list))
1683 return;
1684
1685 list_for_each_entry_safe(desc, next, &chan->active_list, node) {
1686 if (chan->has_sg && chan->xdev->dma_config->dmatype !=
1687 XDMA_TYPE_VDMA)
1688 desc->residue = xilinx_dma_get_residue(chan, desc);
1689 else
1690 desc->residue = 0;
1691 desc->err = chan->err;
1692
1693 list_del(&desc->node);
1694 if (!desc->cyclic)
1695 dma_cookie_complete(&desc->async_tx);
1696 list_add_tail(&desc->node, &chan->done_list);
1697 }
1698}
1699
1700
1701
1702
1703
1704
1705
1706static int xilinx_dma_reset(struct xilinx_dma_chan *chan)
1707{
1708 int err;
1709 u32 tmp;
1710
1711 dma_ctrl_set(chan, XILINX_DMA_REG_DMACR, XILINX_DMA_DMACR_RESET);
1712
1713
1714 err = xilinx_dma_poll_timeout(chan, XILINX_DMA_REG_DMACR, tmp,
1715 !(tmp & XILINX_DMA_DMACR_RESET), 0,
1716 XILINX_DMA_LOOP_COUNT);
1717
1718 if (err) {
1719 dev_err(chan->dev, "reset timeout, cr %x, sr %x\n",
1720 dma_ctrl_read(chan, XILINX_DMA_REG_DMACR),
1721 dma_ctrl_read(chan, XILINX_DMA_REG_DMASR));
1722 return -ETIMEDOUT;
1723 }
1724
1725 chan->err = false;
1726 chan->idle = true;
1727 chan->desc_pendingcount = 0;
1728 chan->desc_submitcount = 0;
1729
1730 return err;
1731}
1732
1733
1734
1735
1736
1737
1738
1739static int xilinx_dma_chan_reset(struct xilinx_dma_chan *chan)
1740{
1741 int err;
1742
1743
1744 err = xilinx_dma_reset(chan);
1745 if (err)
1746 return err;
1747
1748
1749 dma_ctrl_set(chan, XILINX_DMA_REG_DMACR,
1750 XILINX_DMA_DMAXR_ALL_IRQ_MASK);
1751
1752 return 0;
1753}
1754
1755
1756
1757
1758
1759
1760
1761
1762static irqreturn_t xilinx_mcdma_irq_handler(int irq, void *data)
1763{
1764 struct xilinx_dma_chan *chan = data;
1765 u32 status, ser_offset, chan_sermask, chan_offset = 0, chan_id;
1766
1767 if (chan->direction == DMA_DEV_TO_MEM)
1768 ser_offset = XILINX_MCDMA_RXINT_SER_OFFSET;
1769 else
1770 ser_offset = XILINX_MCDMA_TXINT_SER_OFFSET;
1771
1772
1773 chan_sermask = dma_ctrl_read(chan, ser_offset);
1774 chan_id = ffs(chan_sermask);
1775
1776 if (!chan_id)
1777 return IRQ_NONE;
1778
1779 if (chan->direction == DMA_DEV_TO_MEM)
1780 chan_offset = chan->xdev->dma_config->max_channels / 2;
1781
1782 chan_offset = chan_offset + (chan_id - 1);
1783 chan = chan->xdev->chan[chan_offset];
1784
1785 status = dma_ctrl_read(chan, XILINX_MCDMA_CHAN_SR_OFFSET(chan->tdest));
1786 if (!(status & XILINX_MCDMA_IRQ_ALL_MASK))
1787 return IRQ_NONE;
1788
1789 dma_ctrl_write(chan, XILINX_MCDMA_CHAN_SR_OFFSET(chan->tdest),
1790 status & XILINX_MCDMA_IRQ_ALL_MASK);
1791
1792 if (status & XILINX_MCDMA_IRQ_ERR_MASK) {
1793 dev_err(chan->dev, "Channel %p has errors %x cdr %x tdr %x\n",
1794 chan,
1795 dma_ctrl_read(chan, XILINX_MCDMA_CH_ERR_OFFSET),
1796 dma_ctrl_read(chan, XILINX_MCDMA_CHAN_CDESC_OFFSET
1797 (chan->tdest)),
1798 dma_ctrl_read(chan, XILINX_MCDMA_CHAN_TDESC_OFFSET
1799 (chan->tdest)));
1800 chan->err = true;
1801 }
1802
1803 if (status & XILINX_MCDMA_IRQ_DELAY_MASK) {
1804
1805
1806
1807
1808 dev_dbg(chan->dev, "Inter-packet latency too long\n");
1809 }
1810
1811 if (status & XILINX_MCDMA_IRQ_IOC_MASK) {
1812 spin_lock(&chan->lock);
1813 xilinx_dma_complete_descriptor(chan);
1814 chan->idle = true;
1815 chan->start_transfer(chan);
1816 spin_unlock(&chan->lock);
1817 }
1818
1819 tasklet_schedule(&chan->tasklet);
1820 return IRQ_HANDLED;
1821}
1822
1823
1824
1825
1826
1827
1828
1829
1830static irqreturn_t xilinx_dma_irq_handler(int irq, void *data)
1831{
1832 struct xilinx_dma_chan *chan = data;
1833 u32 status;
1834
1835
1836 status = dma_ctrl_read(chan, XILINX_DMA_REG_DMASR);
1837 if (!(status & XILINX_DMA_DMAXR_ALL_IRQ_MASK))
1838 return IRQ_NONE;
1839
1840 dma_ctrl_write(chan, XILINX_DMA_REG_DMASR,
1841 status & XILINX_DMA_DMAXR_ALL_IRQ_MASK);
1842
1843 if (status & XILINX_DMA_DMASR_ERR_IRQ) {
1844
1845
1846
1847
1848
1849
1850
1851 u32 errors = status & XILINX_DMA_DMASR_ALL_ERR_MASK;
1852
1853 dma_ctrl_write(chan, XILINX_DMA_REG_DMASR,
1854 errors & XILINX_DMA_DMASR_ERR_RECOVER_MASK);
1855
1856 if (!chan->flush_on_fsync ||
1857 (errors & ~XILINX_DMA_DMASR_ERR_RECOVER_MASK)) {
1858 dev_err(chan->dev,
1859 "Channel %p has errors %x, cdr %x tdr %x\n",
1860 chan, errors,
1861 dma_ctrl_read(chan, XILINX_DMA_REG_CURDESC),
1862 dma_ctrl_read(chan, XILINX_DMA_REG_TAILDESC));
1863 chan->err = true;
1864 }
1865 }
1866
1867 if (status & XILINX_DMA_DMASR_DLY_CNT_IRQ) {
1868
1869
1870
1871
1872 dev_dbg(chan->dev, "Inter-packet latency too long\n");
1873 }
1874
1875 if (status & XILINX_DMA_DMASR_FRM_CNT_IRQ) {
1876 spin_lock(&chan->lock);
1877 xilinx_dma_complete_descriptor(chan);
1878 chan->idle = true;
1879 chan->start_transfer(chan);
1880 spin_unlock(&chan->lock);
1881 }
1882
1883 tasklet_schedule(&chan->tasklet);
1884 return IRQ_HANDLED;
1885}
1886
1887
1888
1889
1890
1891
1892static void append_desc_queue(struct xilinx_dma_chan *chan,
1893 struct xilinx_dma_tx_descriptor *desc)
1894{
1895 struct xilinx_vdma_tx_segment *tail_segment;
1896 struct xilinx_dma_tx_descriptor *tail_desc;
1897 struct xilinx_axidma_tx_segment *axidma_tail_segment;
1898 struct xilinx_aximcdma_tx_segment *aximcdma_tail_segment;
1899 struct xilinx_cdma_tx_segment *cdma_tail_segment;
1900
1901 if (list_empty(&chan->pending_list))
1902 goto append;
1903
1904
1905
1906
1907
1908 tail_desc = list_last_entry(&chan->pending_list,
1909 struct xilinx_dma_tx_descriptor, node);
1910 if (chan->xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
1911 tail_segment = list_last_entry(&tail_desc->segments,
1912 struct xilinx_vdma_tx_segment,
1913 node);
1914 tail_segment->hw.next_desc = (u32)desc->async_tx.phys;
1915 } else if (chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) {
1916 cdma_tail_segment = list_last_entry(&tail_desc->segments,
1917 struct xilinx_cdma_tx_segment,
1918 node);
1919 cdma_tail_segment->hw.next_desc = (u32)desc->async_tx.phys;
1920 } else if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
1921 axidma_tail_segment = list_last_entry(&tail_desc->segments,
1922 struct xilinx_axidma_tx_segment,
1923 node);
1924 axidma_tail_segment->hw.next_desc = (u32)desc->async_tx.phys;
1925 } else {
1926 aximcdma_tail_segment =
1927 list_last_entry(&tail_desc->segments,
1928 struct xilinx_aximcdma_tx_segment,
1929 node);
1930 aximcdma_tail_segment->hw.next_desc = (u32)desc->async_tx.phys;
1931 }
1932
1933
1934
1935
1936
1937append:
1938 list_add_tail(&desc->node, &chan->pending_list);
1939 chan->desc_pendingcount++;
1940
1941 if (chan->has_sg && (chan->xdev->dma_config->dmatype == XDMA_TYPE_VDMA)
1942 && unlikely(chan->desc_pendingcount > chan->num_frms)) {
1943 dev_dbg(chan->dev, "desc pendingcount is too high\n");
1944 chan->desc_pendingcount = chan->num_frms;
1945 }
1946}
1947
1948
1949
1950
1951
1952
1953
1954static dma_cookie_t xilinx_dma_tx_submit(struct dma_async_tx_descriptor *tx)
1955{
1956 struct xilinx_dma_tx_descriptor *desc = to_dma_tx_descriptor(tx);
1957 struct xilinx_dma_chan *chan = to_xilinx_chan(tx->chan);
1958 dma_cookie_t cookie;
1959 unsigned long flags;
1960 int err;
1961
1962 if (chan->cyclic) {
1963 xilinx_dma_free_tx_descriptor(chan, desc);
1964 return -EBUSY;
1965 }
1966
1967 if (chan->err) {
1968
1969
1970
1971
1972 err = xilinx_dma_chan_reset(chan);
1973 if (err < 0)
1974 return err;
1975 }
1976
1977 spin_lock_irqsave(&chan->lock, flags);
1978
1979 cookie = dma_cookie_assign(tx);
1980
1981
1982 append_desc_queue(chan, desc);
1983
1984 if (desc->cyclic)
1985 chan->cyclic = true;
1986
1987 chan->terminating = false;
1988
1989 spin_unlock_irqrestore(&chan->lock, flags);
1990
1991 return cookie;
1992}
1993
1994
1995
1996
1997
1998
1999
2000
2001
2002
2003static struct dma_async_tx_descriptor *
2004xilinx_vdma_dma_prep_interleaved(struct dma_chan *dchan,
2005 struct dma_interleaved_template *xt,
2006 unsigned long flags)
2007{
2008 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
2009 struct xilinx_dma_tx_descriptor *desc;
2010 struct xilinx_vdma_tx_segment *segment;
2011 struct xilinx_vdma_desc_hw *hw;
2012
2013 if (!is_slave_direction(xt->dir))
2014 return NULL;
2015
2016 if (!xt->numf || !xt->sgl[0].size)
2017 return NULL;
2018
2019 if (xt->frame_size != 1)
2020 return NULL;
2021
2022
2023 desc = xilinx_dma_alloc_tx_descriptor(chan);
2024 if (!desc)
2025 return NULL;
2026
2027 dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
2028 desc->async_tx.tx_submit = xilinx_dma_tx_submit;
2029 async_tx_ack(&desc->async_tx);
2030
2031
2032 segment = xilinx_vdma_alloc_tx_segment(chan);
2033 if (!segment)
2034 goto error;
2035
2036
2037 hw = &segment->hw;
2038 hw->vsize = xt->numf;
2039 hw->hsize = xt->sgl[0].size;
2040 hw->stride = (xt->sgl[0].icg + xt->sgl[0].size) <<
2041 XILINX_DMA_FRMDLY_STRIDE_STRIDE_SHIFT;
2042 hw->stride |= chan->config.frm_dly <<
2043 XILINX_DMA_FRMDLY_STRIDE_FRMDLY_SHIFT;
2044
2045 if (xt->dir != DMA_MEM_TO_DEV) {
2046 if (chan->ext_addr) {
2047 hw->buf_addr = lower_32_bits(xt->dst_start);
2048 hw->buf_addr_msb = upper_32_bits(xt->dst_start);
2049 } else {
2050 hw->buf_addr = xt->dst_start;
2051 }
2052 } else {
2053 if (chan->ext_addr) {
2054 hw->buf_addr = lower_32_bits(xt->src_start);
2055 hw->buf_addr_msb = upper_32_bits(xt->src_start);
2056 } else {
2057 hw->buf_addr = xt->src_start;
2058 }
2059 }
2060
2061
2062 list_add_tail(&segment->node, &desc->segments);
2063
2064
2065 segment = list_first_entry(&desc->segments,
2066 struct xilinx_vdma_tx_segment, node);
2067 desc->async_tx.phys = segment->phys;
2068
2069 return &desc->async_tx;
2070
2071error:
2072 xilinx_dma_free_tx_descriptor(chan, desc);
2073 return NULL;
2074}
2075
2076
2077
2078
2079
2080
2081
2082
2083
2084
2085
2086static struct dma_async_tx_descriptor *
2087xilinx_cdma_prep_memcpy(struct dma_chan *dchan, dma_addr_t dma_dst,
2088 dma_addr_t dma_src, size_t len, unsigned long flags)
2089{
2090 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
2091 struct xilinx_dma_tx_descriptor *desc;
2092 struct xilinx_cdma_tx_segment *segment;
2093 struct xilinx_cdma_desc_hw *hw;
2094
2095 if (!len || len > chan->xdev->max_buffer_len)
2096 return NULL;
2097
2098 desc = xilinx_dma_alloc_tx_descriptor(chan);
2099 if (!desc)
2100 return NULL;
2101
2102 dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
2103 desc->async_tx.tx_submit = xilinx_dma_tx_submit;
2104
2105
2106 segment = xilinx_cdma_alloc_tx_segment(chan);
2107 if (!segment)
2108 goto error;
2109
2110 hw = &segment->hw;
2111 hw->control = len;
2112 hw->src_addr = dma_src;
2113 hw->dest_addr = dma_dst;
2114 if (chan->ext_addr) {
2115 hw->src_addr_msb = upper_32_bits(dma_src);
2116 hw->dest_addr_msb = upper_32_bits(dma_dst);
2117 }
2118
2119
2120 list_add_tail(&segment->node, &desc->segments);
2121
2122 desc->async_tx.phys = segment->phys;
2123 hw->next_desc = segment->phys;
2124
2125 return &desc->async_tx;
2126
2127error:
2128 xilinx_dma_free_tx_descriptor(chan, desc);
2129 return NULL;
2130}
2131
2132
2133
2134
2135
2136
2137
2138
2139
2140
2141
2142
2143static struct dma_async_tx_descriptor *xilinx_dma_prep_slave_sg(
2144 struct dma_chan *dchan, struct scatterlist *sgl, unsigned int sg_len,
2145 enum dma_transfer_direction direction, unsigned long flags,
2146 void *context)
2147{
2148 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
2149 struct xilinx_dma_tx_descriptor *desc;
2150 struct xilinx_axidma_tx_segment *segment = NULL;
2151 u32 *app_w = (u32 *)context;
2152 struct scatterlist *sg;
2153 size_t copy;
2154 size_t sg_used;
2155 unsigned int i;
2156
2157 if (!is_slave_direction(direction))
2158 return NULL;
2159
2160
2161 desc = xilinx_dma_alloc_tx_descriptor(chan);
2162 if (!desc)
2163 return NULL;
2164
2165 dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
2166 desc->async_tx.tx_submit = xilinx_dma_tx_submit;
2167
2168
2169 for_each_sg(sgl, sg, sg_len, i) {
2170 sg_used = 0;
2171
2172
2173 while (sg_used < sg_dma_len(sg)) {
2174 struct xilinx_axidma_desc_hw *hw;
2175
2176
2177 segment = xilinx_axidma_alloc_tx_segment(chan);
2178 if (!segment)
2179 goto error;
2180
2181
2182
2183
2184
2185 copy = xilinx_dma_calc_copysize(chan, sg_dma_len(sg),
2186 sg_used);
2187 hw = &segment->hw;
2188
2189
2190 xilinx_axidma_buf(chan, hw, sg_dma_address(sg),
2191 sg_used, 0);
2192
2193 hw->control = copy;
2194
2195 if (chan->direction == DMA_MEM_TO_DEV) {
2196 if (app_w)
2197 memcpy(hw->app, app_w, sizeof(u32) *
2198 XILINX_DMA_NUM_APP_WORDS);
2199 }
2200
2201 sg_used += copy;
2202
2203
2204
2205
2206
2207 list_add_tail(&segment->node, &desc->segments);
2208 }
2209 }
2210
2211 segment = list_first_entry(&desc->segments,
2212 struct xilinx_axidma_tx_segment, node);
2213 desc->async_tx.phys = segment->phys;
2214
2215
2216 if (chan->direction == DMA_MEM_TO_DEV) {
2217 segment->hw.control |= XILINX_DMA_BD_SOP;
2218 segment = list_last_entry(&desc->segments,
2219 struct xilinx_axidma_tx_segment,
2220 node);
2221 segment->hw.control |= XILINX_DMA_BD_EOP;
2222 }
2223
2224 return &desc->async_tx;
2225
2226error:
2227 xilinx_dma_free_tx_descriptor(chan, desc);
2228 return NULL;
2229}
2230
2231
2232
2233
2234
2235
2236
2237
2238
2239
2240
2241
2242static struct dma_async_tx_descriptor *xilinx_dma_prep_dma_cyclic(
2243 struct dma_chan *dchan, dma_addr_t buf_addr, size_t buf_len,
2244 size_t period_len, enum dma_transfer_direction direction,
2245 unsigned long flags)
2246{
2247 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
2248 struct xilinx_dma_tx_descriptor *desc;
2249 struct xilinx_axidma_tx_segment *segment, *head_segment, *prev = NULL;
2250 size_t copy, sg_used;
2251 unsigned int num_periods;
2252 int i;
2253 u32 reg;
2254
2255 if (!period_len)
2256 return NULL;
2257
2258 num_periods = buf_len / period_len;
2259
2260 if (!num_periods)
2261 return NULL;
2262
2263 if (!is_slave_direction(direction))
2264 return NULL;
2265
2266
2267 desc = xilinx_dma_alloc_tx_descriptor(chan);
2268 if (!desc)
2269 return NULL;
2270
2271 chan->direction = direction;
2272 dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
2273 desc->async_tx.tx_submit = xilinx_dma_tx_submit;
2274
2275 for (i = 0; i < num_periods; ++i) {
2276 sg_used = 0;
2277
2278 while (sg_used < period_len) {
2279 struct xilinx_axidma_desc_hw *hw;
2280
2281
2282 segment = xilinx_axidma_alloc_tx_segment(chan);
2283 if (!segment)
2284 goto error;
2285
2286
2287
2288
2289
2290 copy = xilinx_dma_calc_copysize(chan, period_len,
2291 sg_used);
2292 hw = &segment->hw;
2293 xilinx_axidma_buf(chan, hw, buf_addr, sg_used,
2294 period_len * i);
2295 hw->control = copy;
2296
2297 if (prev)
2298 prev->hw.next_desc = segment->phys;
2299
2300 prev = segment;
2301 sg_used += copy;
2302
2303
2304
2305
2306
2307 list_add_tail(&segment->node, &desc->segments);
2308 }
2309 }
2310
2311 head_segment = list_first_entry(&desc->segments,
2312 struct xilinx_axidma_tx_segment, node);
2313 desc->async_tx.phys = head_segment->phys;
2314
2315 desc->cyclic = true;
2316 reg = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR);
2317 reg |= XILINX_DMA_CR_CYCLIC_BD_EN_MASK;
2318 dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg);
2319
2320 segment = list_last_entry(&desc->segments,
2321 struct xilinx_axidma_tx_segment,
2322 node);
2323 segment->hw.next_desc = (u32) head_segment->phys;
2324
2325
2326 if (direction == DMA_MEM_TO_DEV) {
2327 head_segment->hw.control |= XILINX_DMA_BD_SOP;
2328 segment->hw.control |= XILINX_DMA_BD_EOP;
2329 }
2330
2331 return &desc->async_tx;
2332
2333error:
2334 xilinx_dma_free_tx_descriptor(chan, desc);
2335 return NULL;
2336}
2337
2338
2339
2340
2341
2342
2343
2344
2345
2346
2347
2348
2349static struct dma_async_tx_descriptor *
2350xilinx_mcdma_prep_slave_sg(struct dma_chan *dchan, struct scatterlist *sgl,
2351 unsigned int sg_len,
2352 enum dma_transfer_direction direction,
2353 unsigned long flags, void *context)
2354{
2355 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
2356 struct xilinx_dma_tx_descriptor *desc;
2357 struct xilinx_aximcdma_tx_segment *segment = NULL;
2358 u32 *app_w = (u32 *)context;
2359 struct scatterlist *sg;
2360 size_t copy;
2361 size_t sg_used;
2362 unsigned int i;
2363
2364 if (!is_slave_direction(direction))
2365 return NULL;
2366
2367
2368 desc = xilinx_dma_alloc_tx_descriptor(chan);
2369 if (!desc)
2370 return NULL;
2371
2372 dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
2373 desc->async_tx.tx_submit = xilinx_dma_tx_submit;
2374
2375
2376 for_each_sg(sgl, sg, sg_len, i) {
2377 sg_used = 0;
2378
2379
2380 while (sg_used < sg_dma_len(sg)) {
2381 struct xilinx_aximcdma_desc_hw *hw;
2382
2383
2384 segment = xilinx_aximcdma_alloc_tx_segment(chan);
2385 if (!segment)
2386 goto error;
2387
2388
2389
2390
2391
2392 copy = min_t(size_t, sg_dma_len(sg) - sg_used,
2393 chan->xdev->max_buffer_len);
2394 hw = &segment->hw;
2395
2396
2397 xilinx_aximcdma_buf(chan, hw, sg_dma_address(sg),
2398 sg_used);
2399 hw->control = copy;
2400
2401 if (chan->direction == DMA_MEM_TO_DEV && app_w) {
2402 memcpy(hw->app, app_w, sizeof(u32) *
2403 XILINX_DMA_NUM_APP_WORDS);
2404 }
2405
2406 sg_used += copy;
2407
2408
2409
2410
2411 list_add_tail(&segment->node, &desc->segments);
2412 }
2413 }
2414
2415 segment = list_first_entry(&desc->segments,
2416 struct xilinx_aximcdma_tx_segment, node);
2417 desc->async_tx.phys = segment->phys;
2418
2419
2420 if (chan->direction == DMA_MEM_TO_DEV) {
2421 segment->hw.control |= XILINX_MCDMA_BD_SOP;
2422 segment = list_last_entry(&desc->segments,
2423 struct xilinx_aximcdma_tx_segment,
2424 node);
2425 segment->hw.control |= XILINX_MCDMA_BD_EOP;
2426 }
2427
2428 return &desc->async_tx;
2429
2430error:
2431 xilinx_dma_free_tx_descriptor(chan, desc);
2432
2433 return NULL;
2434}
2435
2436
2437
2438
2439
2440
2441
2442static int xilinx_dma_terminate_all(struct dma_chan *dchan)
2443{
2444 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
2445 u32 reg;
2446 int err;
2447
2448 if (!chan->cyclic) {
2449 err = chan->stop_transfer(chan);
2450 if (err) {
2451 dev_err(chan->dev, "Cannot stop channel %p: %x\n",
2452 chan, dma_ctrl_read(chan,
2453 XILINX_DMA_REG_DMASR));
2454 chan->err = true;
2455 }
2456 }
2457
2458 xilinx_dma_chan_reset(chan);
2459
2460 chan->terminating = true;
2461 xilinx_dma_free_descriptors(chan);
2462 chan->idle = true;
2463
2464 if (chan->cyclic) {
2465 reg = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR);
2466 reg &= ~XILINX_DMA_CR_CYCLIC_BD_EN_MASK;
2467 dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg);
2468 chan->cyclic = false;
2469 }
2470
2471 if ((chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) && chan->has_sg)
2472 dma_ctrl_clr(chan, XILINX_DMA_REG_DMACR,
2473 XILINX_CDMA_CR_SGMODE);
2474
2475 return 0;
2476}
2477
2478static void xilinx_dma_synchronize(struct dma_chan *dchan)
2479{
2480 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
2481
2482 tasklet_kill(&chan->tasklet);
2483}
2484
2485
2486
2487
2488
2489
2490
2491
2492
2493
2494
2495
2496
2497
2498int xilinx_vdma_channel_set_config(struct dma_chan *dchan,
2499 struct xilinx_vdma_config *cfg)
2500{
2501 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
2502 u32 dmacr;
2503
2504 if (cfg->reset)
2505 return xilinx_dma_chan_reset(chan);
2506
2507 dmacr = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR);
2508
2509 chan->config.frm_dly = cfg->frm_dly;
2510 chan->config.park = cfg->park;
2511
2512
2513 chan->config.gen_lock = cfg->gen_lock;
2514 chan->config.master = cfg->master;
2515
2516 dmacr &= ~XILINX_DMA_DMACR_GENLOCK_EN;
2517 if (cfg->gen_lock && chan->genlock) {
2518 dmacr |= XILINX_DMA_DMACR_GENLOCK_EN;
2519 dmacr &= ~XILINX_DMA_DMACR_MASTER_MASK;
2520 dmacr |= cfg->master << XILINX_DMA_DMACR_MASTER_SHIFT;
2521 }
2522
2523 chan->config.frm_cnt_en = cfg->frm_cnt_en;
2524 chan->config.vflip_en = cfg->vflip_en;
2525
2526 if (cfg->park)
2527 chan->config.park_frm = cfg->park_frm;
2528 else
2529 chan->config.park_frm = -1;
2530
2531 chan->config.coalesc = cfg->coalesc;
2532 chan->config.delay = cfg->delay;
2533
2534 if (cfg->coalesc <= XILINX_DMA_DMACR_FRAME_COUNT_MAX) {
2535 dmacr &= ~XILINX_DMA_DMACR_FRAME_COUNT_MASK;
2536 dmacr |= cfg->coalesc << XILINX_DMA_DMACR_FRAME_COUNT_SHIFT;
2537 chan->config.coalesc = cfg->coalesc;
2538 }
2539
2540 if (cfg->delay <= XILINX_DMA_DMACR_DELAY_MAX) {
2541 dmacr &= ~XILINX_DMA_DMACR_DELAY_MASK;
2542 dmacr |= cfg->delay << XILINX_DMA_DMACR_DELAY_SHIFT;
2543 chan->config.delay = cfg->delay;
2544 }
2545
2546
2547 dmacr &= ~XILINX_DMA_DMACR_FSYNCSRC_MASK;
2548 dmacr |= cfg->ext_fsync << XILINX_DMA_DMACR_FSYNCSRC_SHIFT;
2549
2550 dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, dmacr);
2551
2552 return 0;
2553}
2554EXPORT_SYMBOL(xilinx_vdma_channel_set_config);
2555
2556
2557
2558
2559
2560
2561
2562
2563
2564static void xilinx_dma_chan_remove(struct xilinx_dma_chan *chan)
2565{
2566
2567 dma_ctrl_clr(chan, XILINX_DMA_REG_DMACR,
2568 XILINX_DMA_DMAXR_ALL_IRQ_MASK);
2569
2570 if (chan->irq > 0)
2571 free_irq(chan->irq, chan);
2572
2573 tasklet_kill(&chan->tasklet);
2574
2575 list_del(&chan->common.device_node);
2576}
2577
2578static int axidma_clk_init(struct platform_device *pdev, struct clk **axi_clk,
2579 struct clk **tx_clk, struct clk **rx_clk,
2580 struct clk **sg_clk, struct clk **tmp_clk)
2581{
2582 int err;
2583
2584 *tmp_clk = NULL;
2585
2586 *axi_clk = devm_clk_get(&pdev->dev, "s_axi_lite_aclk");
2587 if (IS_ERR(*axi_clk))
2588 return dev_err_probe(&pdev->dev, PTR_ERR(*axi_clk), "failed to get axi_aclk\n");
2589
2590 *tx_clk = devm_clk_get(&pdev->dev, "m_axi_mm2s_aclk");
2591 if (IS_ERR(*tx_clk))
2592 *tx_clk = NULL;
2593
2594 *rx_clk = devm_clk_get(&pdev->dev, "m_axi_s2mm_aclk");
2595 if (IS_ERR(*rx_clk))
2596 *rx_clk = NULL;
2597
2598 *sg_clk = devm_clk_get(&pdev->dev, "m_axi_sg_aclk");
2599 if (IS_ERR(*sg_clk))
2600 *sg_clk = NULL;
2601
2602 err = clk_prepare_enable(*axi_clk);
2603 if (err) {
2604 dev_err(&pdev->dev, "failed to enable axi_clk (%d)\n", err);
2605 return err;
2606 }
2607
2608 err = clk_prepare_enable(*tx_clk);
2609 if (err) {
2610 dev_err(&pdev->dev, "failed to enable tx_clk (%d)\n", err);
2611 goto err_disable_axiclk;
2612 }
2613
2614 err = clk_prepare_enable(*rx_clk);
2615 if (err) {
2616 dev_err(&pdev->dev, "failed to enable rx_clk (%d)\n", err);
2617 goto err_disable_txclk;
2618 }
2619
2620 err = clk_prepare_enable(*sg_clk);
2621 if (err) {
2622 dev_err(&pdev->dev, "failed to enable sg_clk (%d)\n", err);
2623 goto err_disable_rxclk;
2624 }
2625
2626 return 0;
2627
2628err_disable_rxclk:
2629 clk_disable_unprepare(*rx_clk);
2630err_disable_txclk:
2631 clk_disable_unprepare(*tx_clk);
2632err_disable_axiclk:
2633 clk_disable_unprepare(*axi_clk);
2634
2635 return err;
2636}
2637
2638static int axicdma_clk_init(struct platform_device *pdev, struct clk **axi_clk,
2639 struct clk **dev_clk, struct clk **tmp_clk,
2640 struct clk **tmp1_clk, struct clk **tmp2_clk)
2641{
2642 int err;
2643
2644 *tmp_clk = NULL;
2645 *tmp1_clk = NULL;
2646 *tmp2_clk = NULL;
2647
2648 *axi_clk = devm_clk_get(&pdev->dev, "s_axi_lite_aclk");
2649 if (IS_ERR(*axi_clk))
2650 return dev_err_probe(&pdev->dev, PTR_ERR(*axi_clk), "failed to get axi_aclk\n");
2651
2652 *dev_clk = devm_clk_get(&pdev->dev, "m_axi_aclk");
2653 if (IS_ERR(*dev_clk))
2654 return dev_err_probe(&pdev->dev, PTR_ERR(*dev_clk), "failed to get dev_clk\n");
2655
2656 err = clk_prepare_enable(*axi_clk);
2657 if (err) {
2658 dev_err(&pdev->dev, "failed to enable axi_clk (%d)\n", err);
2659 return err;
2660 }
2661
2662 err = clk_prepare_enable(*dev_clk);
2663 if (err) {
2664 dev_err(&pdev->dev, "failed to enable dev_clk (%d)\n", err);
2665 goto err_disable_axiclk;
2666 }
2667
2668 return 0;
2669
2670err_disable_axiclk:
2671 clk_disable_unprepare(*axi_clk);
2672
2673 return err;
2674}
2675
2676static int axivdma_clk_init(struct platform_device *pdev, struct clk **axi_clk,
2677 struct clk **tx_clk, struct clk **txs_clk,
2678 struct clk **rx_clk, struct clk **rxs_clk)
2679{
2680 int err;
2681
2682 *axi_clk = devm_clk_get(&pdev->dev, "s_axi_lite_aclk");
2683 if (IS_ERR(*axi_clk))
2684 return dev_err_probe(&pdev->dev, PTR_ERR(*axi_clk), "failed to get axi_aclk\n");
2685
2686 *tx_clk = devm_clk_get(&pdev->dev, "m_axi_mm2s_aclk");
2687 if (IS_ERR(*tx_clk))
2688 *tx_clk = NULL;
2689
2690 *txs_clk = devm_clk_get(&pdev->dev, "m_axis_mm2s_aclk");
2691 if (IS_ERR(*txs_clk))
2692 *txs_clk = NULL;
2693
2694 *rx_clk = devm_clk_get(&pdev->dev, "m_axi_s2mm_aclk");
2695 if (IS_ERR(*rx_clk))
2696 *rx_clk = NULL;
2697
2698 *rxs_clk = devm_clk_get(&pdev->dev, "s_axis_s2mm_aclk");
2699 if (IS_ERR(*rxs_clk))
2700 *rxs_clk = NULL;
2701
2702 err = clk_prepare_enable(*axi_clk);
2703 if (err) {
2704 dev_err(&pdev->dev, "failed to enable axi_clk (%d)\n",
2705 err);
2706 return err;
2707 }
2708
2709 err = clk_prepare_enable(*tx_clk);
2710 if (err) {
2711 dev_err(&pdev->dev, "failed to enable tx_clk (%d)\n", err);
2712 goto err_disable_axiclk;
2713 }
2714
2715 err = clk_prepare_enable(*txs_clk);
2716 if (err) {
2717 dev_err(&pdev->dev, "failed to enable txs_clk (%d)\n", err);
2718 goto err_disable_txclk;
2719 }
2720
2721 err = clk_prepare_enable(*rx_clk);
2722 if (err) {
2723 dev_err(&pdev->dev, "failed to enable rx_clk (%d)\n", err);
2724 goto err_disable_txsclk;
2725 }
2726
2727 err = clk_prepare_enable(*rxs_clk);
2728 if (err) {
2729 dev_err(&pdev->dev, "failed to enable rxs_clk (%d)\n", err);
2730 goto err_disable_rxclk;
2731 }
2732
2733 return 0;
2734
2735err_disable_rxclk:
2736 clk_disable_unprepare(*rx_clk);
2737err_disable_txsclk:
2738 clk_disable_unprepare(*txs_clk);
2739err_disable_txclk:
2740 clk_disable_unprepare(*tx_clk);
2741err_disable_axiclk:
2742 clk_disable_unprepare(*axi_clk);
2743
2744 return err;
2745}
2746
2747static void xdma_disable_allclks(struct xilinx_dma_device *xdev)
2748{
2749 clk_disable_unprepare(xdev->rxs_clk);
2750 clk_disable_unprepare(xdev->rx_clk);
2751 clk_disable_unprepare(xdev->txs_clk);
2752 clk_disable_unprepare(xdev->tx_clk);
2753 clk_disable_unprepare(xdev->axi_clk);
2754}
2755
2756
2757
2758
2759
2760
2761
2762
2763
2764
2765
2766static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev,
2767 struct device_node *node)
2768{
2769 struct xilinx_dma_chan *chan;
2770 bool has_dre = false;
2771 u32 value, width;
2772 int err;
2773
2774
2775 chan = devm_kzalloc(xdev->dev, sizeof(*chan), GFP_KERNEL);
2776 if (!chan)
2777 return -ENOMEM;
2778
2779 chan->dev = xdev->dev;
2780 chan->xdev = xdev;
2781 chan->desc_pendingcount = 0x0;
2782 chan->ext_addr = xdev->ext_addr;
2783
2784
2785
2786
2787
2788 chan->idle = true;
2789
2790 spin_lock_init(&chan->lock);
2791 INIT_LIST_HEAD(&chan->pending_list);
2792 INIT_LIST_HEAD(&chan->done_list);
2793 INIT_LIST_HEAD(&chan->active_list);
2794 INIT_LIST_HEAD(&chan->free_seg_list);
2795
2796
2797 has_dre = of_property_read_bool(node, "xlnx,include-dre");
2798
2799 chan->genlock = of_property_read_bool(node, "xlnx,genlock-mode");
2800
2801 err = of_property_read_u32(node, "xlnx,datawidth", &value);
2802 if (err) {
2803 dev_err(xdev->dev, "missing xlnx,datawidth property\n");
2804 return err;
2805 }
2806 width = value >> 3;
2807
2808
2809 if (width > 8)
2810 has_dre = false;
2811
2812 if (!has_dre)
2813 xdev->common.copy_align = (enum dmaengine_alignment)fls(width - 1);
2814
2815 if (of_device_is_compatible(node, "xlnx,axi-vdma-mm2s-channel") ||
2816 of_device_is_compatible(node, "xlnx,axi-dma-mm2s-channel") ||
2817 of_device_is_compatible(node, "xlnx,axi-cdma-channel")) {
2818 chan->direction = DMA_MEM_TO_DEV;
2819 chan->id = xdev->mm2s_chan_id++;
2820 chan->tdest = chan->id;
2821
2822 chan->ctrl_offset = XILINX_DMA_MM2S_CTRL_OFFSET;
2823 if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
2824 chan->desc_offset = XILINX_VDMA_MM2S_DESC_OFFSET;
2825 chan->config.park = 1;
2826
2827 if (xdev->flush_on_fsync == XILINX_DMA_FLUSH_BOTH ||
2828 xdev->flush_on_fsync == XILINX_DMA_FLUSH_MM2S)
2829 chan->flush_on_fsync = true;
2830 }
2831 } else if (of_device_is_compatible(node,
2832 "xlnx,axi-vdma-s2mm-channel") ||
2833 of_device_is_compatible(node,
2834 "xlnx,axi-dma-s2mm-channel")) {
2835 chan->direction = DMA_DEV_TO_MEM;
2836 chan->id = xdev->s2mm_chan_id++;
2837 chan->tdest = chan->id - xdev->dma_config->max_channels / 2;
2838 chan->has_vflip = of_property_read_bool(node,
2839 "xlnx,enable-vert-flip");
2840 if (chan->has_vflip) {
2841 chan->config.vflip_en = dma_read(chan,
2842 XILINX_VDMA_REG_ENABLE_VERTICAL_FLIP) &
2843 XILINX_VDMA_ENABLE_VERTICAL_FLIP;
2844 }
2845
2846 if (xdev->dma_config->dmatype == XDMA_TYPE_AXIMCDMA)
2847 chan->ctrl_offset = XILINX_MCDMA_S2MM_CTRL_OFFSET;
2848 else
2849 chan->ctrl_offset = XILINX_DMA_S2MM_CTRL_OFFSET;
2850
2851 if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
2852 chan->desc_offset = XILINX_VDMA_S2MM_DESC_OFFSET;
2853 chan->config.park = 1;
2854
2855 if (xdev->flush_on_fsync == XILINX_DMA_FLUSH_BOTH ||
2856 xdev->flush_on_fsync == XILINX_DMA_FLUSH_S2MM)
2857 chan->flush_on_fsync = true;
2858 }
2859 } else {
2860 dev_err(xdev->dev, "Invalid channel compatible node\n");
2861 return -EINVAL;
2862 }
2863
2864
2865 chan->irq = irq_of_parse_and_map(node, chan->tdest);
2866 err = request_irq(chan->irq, xdev->dma_config->irq_handler,
2867 IRQF_SHARED, "xilinx-dma-controller", chan);
2868 if (err) {
2869 dev_err(xdev->dev, "unable to request IRQ %d\n", chan->irq);
2870 return err;
2871 }
2872
2873 if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
2874 chan->start_transfer = xilinx_dma_start_transfer;
2875 chan->stop_transfer = xilinx_dma_stop_transfer;
2876 } else if (xdev->dma_config->dmatype == XDMA_TYPE_AXIMCDMA) {
2877 chan->start_transfer = xilinx_mcdma_start_transfer;
2878 chan->stop_transfer = xilinx_dma_stop_transfer;
2879 } else if (xdev->dma_config->dmatype == XDMA_TYPE_CDMA) {
2880 chan->start_transfer = xilinx_cdma_start_transfer;
2881 chan->stop_transfer = xilinx_cdma_stop_transfer;
2882 } else {
2883 chan->start_transfer = xilinx_vdma_start_transfer;
2884 chan->stop_transfer = xilinx_dma_stop_transfer;
2885 }
2886
2887
2888 if (xdev->dma_config->dmatype != XDMA_TYPE_VDMA) {
2889 if (xdev->dma_config->dmatype == XDMA_TYPE_AXIMCDMA ||
2890 dma_ctrl_read(chan, XILINX_DMA_REG_DMASR) &
2891 XILINX_DMA_DMASR_SG_MASK)
2892 chan->has_sg = true;
2893 dev_dbg(chan->dev, "ch %d: SG %s\n", chan->id,
2894 chan->has_sg ? "enabled" : "disabled");
2895 }
2896
2897
2898 tasklet_setup(&chan->tasklet, xilinx_dma_do_tasklet);
2899
2900
2901
2902
2903
2904 chan->common.device = &xdev->common;
2905
2906 list_add_tail(&chan->common.device_node, &xdev->common.channels);
2907 xdev->chan[chan->id] = chan;
2908
2909
2910 err = xilinx_dma_chan_reset(chan);
2911 if (err < 0) {
2912 dev_err(xdev->dev, "Reset channel failed\n");
2913 return err;
2914 }
2915
2916 return 0;
2917}
2918
2919
2920
2921
2922
2923
2924
2925
2926
2927
2928
2929static int xilinx_dma_child_probe(struct xilinx_dma_device *xdev,
2930 struct device_node *node)
2931{
2932 int ret, i;
2933 u32 nr_channels = 1;
2934
2935 ret = of_property_read_u32(node, "dma-channels", &nr_channels);
2936 if (xdev->dma_config->dmatype == XDMA_TYPE_AXIMCDMA && ret < 0)
2937 dev_warn(xdev->dev, "missing dma-channels property\n");
2938
2939 for (i = 0; i < nr_channels; i++)
2940 xilinx_dma_chan_probe(xdev, node);
2941
2942 return 0;
2943}
2944
2945
2946
2947
2948
2949
2950
2951
2952static struct dma_chan *of_dma_xilinx_xlate(struct of_phandle_args *dma_spec,
2953 struct of_dma *ofdma)
2954{
2955 struct xilinx_dma_device *xdev = ofdma->of_dma_data;
2956 int chan_id = dma_spec->args[0];
2957
2958 if (chan_id >= xdev->dma_config->max_channels || !xdev->chan[chan_id])
2959 return NULL;
2960
2961 return dma_get_slave_channel(&xdev->chan[chan_id]->common);
2962}
2963
2964static const struct xilinx_dma_config axidma_config = {
2965 .dmatype = XDMA_TYPE_AXIDMA,
2966 .clk_init = axidma_clk_init,
2967 .irq_handler = xilinx_dma_irq_handler,
2968 .max_channels = XILINX_DMA_MAX_CHANS_PER_DEVICE,
2969};
2970
2971static const struct xilinx_dma_config aximcdma_config = {
2972 .dmatype = XDMA_TYPE_AXIMCDMA,
2973 .clk_init = axidma_clk_init,
2974 .irq_handler = xilinx_mcdma_irq_handler,
2975 .max_channels = XILINX_MCDMA_MAX_CHANS_PER_DEVICE,
2976};
2977static const struct xilinx_dma_config axicdma_config = {
2978 .dmatype = XDMA_TYPE_CDMA,
2979 .clk_init = axicdma_clk_init,
2980 .irq_handler = xilinx_dma_irq_handler,
2981 .max_channels = XILINX_CDMA_MAX_CHANS_PER_DEVICE,
2982};
2983
2984static const struct xilinx_dma_config axivdma_config = {
2985 .dmatype = XDMA_TYPE_VDMA,
2986 .clk_init = axivdma_clk_init,
2987 .irq_handler = xilinx_dma_irq_handler,
2988 .max_channels = XILINX_DMA_MAX_CHANS_PER_DEVICE,
2989};
2990
2991static const struct of_device_id xilinx_dma_of_ids[] = {
2992 { .compatible = "xlnx,axi-dma-1.00.a", .data = &axidma_config },
2993 { .compatible = "xlnx,axi-cdma-1.00.a", .data = &axicdma_config },
2994 { .compatible = "xlnx,axi-vdma-1.00.a", .data = &axivdma_config },
2995 { .compatible = "xlnx,axi-mcdma-1.00.a", .data = &aximcdma_config },
2996 {}
2997};
2998MODULE_DEVICE_TABLE(of, xilinx_dma_of_ids);
2999
3000
3001
3002
3003
3004
3005
3006static int xilinx_dma_probe(struct platform_device *pdev)
3007{
3008 int (*clk_init)(struct platform_device *, struct clk **, struct clk **,
3009 struct clk **, struct clk **, struct clk **)
3010 = axivdma_clk_init;
3011 struct device_node *node = pdev->dev.of_node;
3012 struct xilinx_dma_device *xdev;
3013 struct device_node *child, *np = pdev->dev.of_node;
3014 u32 num_frames, addr_width, len_width;
3015 int i, err;
3016
3017
3018 xdev = devm_kzalloc(&pdev->dev, sizeof(*xdev), GFP_KERNEL);
3019 if (!xdev)
3020 return -ENOMEM;
3021
3022 xdev->dev = &pdev->dev;
3023 if (np) {
3024 const struct of_device_id *match;
3025
3026 match = of_match_node(xilinx_dma_of_ids, np);
3027 if (match && match->data) {
3028 xdev->dma_config = match->data;
3029 clk_init = xdev->dma_config->clk_init;
3030 }
3031 }
3032
3033 err = clk_init(pdev, &xdev->axi_clk, &xdev->tx_clk, &xdev->txs_clk,
3034 &xdev->rx_clk, &xdev->rxs_clk);
3035 if (err)
3036 return err;
3037
3038
3039 xdev->regs = devm_platform_ioremap_resource(pdev, 0);
3040 if (IS_ERR(xdev->regs))
3041 return PTR_ERR(xdev->regs);
3042
3043
3044 xdev->max_buffer_len = GENMASK(XILINX_DMA_MAX_TRANS_LEN_MAX - 1, 0);
3045 xdev->s2mm_chan_id = xdev->dma_config->max_channels / 2;
3046
3047 if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA ||
3048 xdev->dma_config->dmatype == XDMA_TYPE_AXIMCDMA) {
3049 if (!of_property_read_u32(node, "xlnx,sg-length-width",
3050 &len_width)) {
3051 if (len_width < XILINX_DMA_MAX_TRANS_LEN_MIN ||
3052 len_width > XILINX_DMA_V2_MAX_TRANS_LEN_MAX) {
3053 dev_warn(xdev->dev,
3054 "invalid xlnx,sg-length-width property value. Using default width\n");
3055 } else {
3056 if (len_width > XILINX_DMA_MAX_TRANS_LEN_MAX)
3057 dev_warn(xdev->dev, "Please ensure that IP supports buffer length > 23 bits\n");
3058 xdev->max_buffer_len =
3059 GENMASK(len_width - 1, 0);
3060 }
3061 }
3062 }
3063
3064 if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
3065 err = of_property_read_u32(node, "xlnx,num-fstores",
3066 &num_frames);
3067 if (err < 0) {
3068 dev_err(xdev->dev,
3069 "missing xlnx,num-fstores property\n");
3070 return err;
3071 }
3072
3073 err = of_property_read_u32(node, "xlnx,flush-fsync",
3074 &xdev->flush_on_fsync);
3075 if (err < 0)
3076 dev_warn(xdev->dev,
3077 "missing xlnx,flush-fsync property\n");
3078 }
3079
3080 err = of_property_read_u32(node, "xlnx,addrwidth", &addr_width);
3081 if (err < 0)
3082 dev_warn(xdev->dev, "missing xlnx,addrwidth property\n");
3083
3084 if (addr_width > 32)
3085 xdev->ext_addr = true;
3086 else
3087 xdev->ext_addr = false;
3088
3089
3090 dma_set_mask_and_coherent(xdev->dev, DMA_BIT_MASK(addr_width));
3091
3092
3093 xdev->common.dev = &pdev->dev;
3094
3095 INIT_LIST_HEAD(&xdev->common.channels);
3096 if (!(xdev->dma_config->dmatype == XDMA_TYPE_CDMA)) {
3097 dma_cap_set(DMA_SLAVE, xdev->common.cap_mask);
3098 dma_cap_set(DMA_PRIVATE, xdev->common.cap_mask);
3099 }
3100
3101 xdev->common.device_alloc_chan_resources =
3102 xilinx_dma_alloc_chan_resources;
3103 xdev->common.device_free_chan_resources =
3104 xilinx_dma_free_chan_resources;
3105 xdev->common.device_terminate_all = xilinx_dma_terminate_all;
3106 xdev->common.device_synchronize = xilinx_dma_synchronize;
3107 xdev->common.device_tx_status = xilinx_dma_tx_status;
3108 xdev->common.device_issue_pending = xilinx_dma_issue_pending;
3109 xdev->common.device_config = xilinx_dma_device_config;
3110 if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
3111 dma_cap_set(DMA_CYCLIC, xdev->common.cap_mask);
3112 xdev->common.device_prep_slave_sg = xilinx_dma_prep_slave_sg;
3113 xdev->common.device_prep_dma_cyclic =
3114 xilinx_dma_prep_dma_cyclic;
3115
3116 xdev->common.residue_granularity =
3117 DMA_RESIDUE_GRANULARITY_SEGMENT;
3118 } else if (xdev->dma_config->dmatype == XDMA_TYPE_CDMA) {
3119 dma_cap_set(DMA_MEMCPY, xdev->common.cap_mask);
3120 xdev->common.device_prep_dma_memcpy = xilinx_cdma_prep_memcpy;
3121
3122 xdev->common.residue_granularity =
3123 DMA_RESIDUE_GRANULARITY_SEGMENT;
3124 } else if (xdev->dma_config->dmatype == XDMA_TYPE_AXIMCDMA) {
3125 xdev->common.device_prep_slave_sg = xilinx_mcdma_prep_slave_sg;
3126 } else {
3127 xdev->common.device_prep_interleaved_dma =
3128 xilinx_vdma_dma_prep_interleaved;
3129 }
3130
3131 platform_set_drvdata(pdev, xdev);
3132
3133
3134 for_each_child_of_node(node, child) {
3135 err = xilinx_dma_child_probe(xdev, child);
3136 if (err < 0)
3137 goto disable_clks;
3138 }
3139
3140 if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
3141 for (i = 0; i < xdev->dma_config->max_channels; i++)
3142 if (xdev->chan[i])
3143 xdev->chan[i]->num_frms = num_frames;
3144 }
3145
3146
3147 err = dma_async_device_register(&xdev->common);
3148 if (err) {
3149 dev_err(xdev->dev, "failed to register the dma device\n");
3150 goto error;
3151 }
3152
3153 err = of_dma_controller_register(node, of_dma_xilinx_xlate,
3154 xdev);
3155 if (err < 0) {
3156 dev_err(&pdev->dev, "Unable to register DMA to DT\n");
3157 dma_async_device_unregister(&xdev->common);
3158 goto error;
3159 }
3160
3161 if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA)
3162 dev_info(&pdev->dev, "Xilinx AXI DMA Engine Driver Probed!!\n");
3163 else if (xdev->dma_config->dmatype == XDMA_TYPE_CDMA)
3164 dev_info(&pdev->dev, "Xilinx AXI CDMA Engine Driver Probed!!\n");
3165 else if (xdev->dma_config->dmatype == XDMA_TYPE_AXIMCDMA)
3166 dev_info(&pdev->dev, "Xilinx AXI MCDMA Engine Driver Probed!!\n");
3167 else
3168 dev_info(&pdev->dev, "Xilinx AXI VDMA Engine Driver Probed!!\n");
3169
3170 return 0;
3171
3172disable_clks:
3173 xdma_disable_allclks(xdev);
3174error:
3175 for (i = 0; i < xdev->dma_config->max_channels; i++)
3176 if (xdev->chan[i])
3177 xilinx_dma_chan_remove(xdev->chan[i]);
3178
3179 return err;
3180}
3181
3182
3183
3184
3185
3186
3187
3188static int xilinx_dma_remove(struct platform_device *pdev)
3189{
3190 struct xilinx_dma_device *xdev = platform_get_drvdata(pdev);
3191 int i;
3192
3193 of_dma_controller_free(pdev->dev.of_node);
3194
3195 dma_async_device_unregister(&xdev->common);
3196
3197 for (i = 0; i < xdev->dma_config->max_channels; i++)
3198 if (xdev->chan[i])
3199 xilinx_dma_chan_remove(xdev->chan[i]);
3200
3201 xdma_disable_allclks(xdev);
3202
3203 return 0;
3204}
3205
3206static struct platform_driver xilinx_vdma_driver = {
3207 .driver = {
3208 .name = "xilinx-vdma",
3209 .of_match_table = xilinx_dma_of_ids,
3210 },
3211 .probe = xilinx_dma_probe,
3212 .remove = xilinx_dma_remove,
3213};
3214
3215module_platform_driver(xilinx_vdma_driver);
3216
3217MODULE_AUTHOR("Xilinx, Inc.");
3218MODULE_DESCRIPTION("Xilinx VDMA driver");
3219MODULE_LICENSE("GPL v2");
3220