1
2
3
4
5
6
7#include <linux/kernel.h>
8#include <linux/delay.h>
9#include <linux/dmaengine.h>
10#include <linux/dma-mapping.h>
11#include <linux/dmapool.h>
12#include <linux/err.h>
13#include <linux/init.h>
14#include <linux/interrupt.h>
15#include <linux/list.h>
16#include <linux/platform_device.h>
17#include <linux/slab.h>
18#include <linux/spinlock.h>
19#include <linux/sys_soc.h>
20#include <linux/of.h>
21#include <linux/of_dma.h>
22#include <linux/of_device.h>
23#include <linux/of_irq.h>
24#include <linux/workqueue.h>
25#include <linux/completion.h>
26#include <linux/soc/ti/k3-ringacc.h>
27#include <linux/soc/ti/ti_sci_protocol.h>
28#include <linux/soc/ti/ti_sci_inta_msi.h>
29#include <linux/dma/ti-cppi5.h>
30
31#include "../virt-dma.h"
32#include "k3-udma.h"
33#include "k3-psil-priv.h"
34
35struct udma_static_tr {
36 u8 elsize;
37 u16 elcnt;
38 u16 bstcnt;
39};
40
41#define K3_UDMA_MAX_RFLOWS 1024
42#define K3_UDMA_DEFAULT_RING_SIZE 16
43
44
45#define UDMA_RFLOW_SRCTAG_NONE 0
46#define UDMA_RFLOW_SRCTAG_CFG_TAG 1
47#define UDMA_RFLOW_SRCTAG_FLOW_ID 2
48#define UDMA_RFLOW_SRCTAG_SRC_TAG 4
49
50#define UDMA_RFLOW_DSTTAG_NONE 0
51#define UDMA_RFLOW_DSTTAG_CFG_TAG 1
52#define UDMA_RFLOW_DSTTAG_FLOW_ID 2
53#define UDMA_RFLOW_DSTTAG_DST_TAG_LO 4
54#define UDMA_RFLOW_DSTTAG_DST_TAG_HI 5
55
56struct udma_chan;
57
58enum udma_mmr {
59 MMR_GCFG = 0,
60 MMR_RCHANRT,
61 MMR_TCHANRT,
62 MMR_LAST,
63};
64
65static const char * const mmr_names[] = { "gcfg", "rchanrt", "tchanrt" };
66
67struct udma_tchan {
68 void __iomem *reg_rt;
69
70 int id;
71 struct k3_ring *t_ring;
72 struct k3_ring *tc_ring;
73};
74
75struct udma_rflow {
76 int id;
77 struct k3_ring *fd_ring;
78 struct k3_ring *r_ring;
79};
80
81struct udma_rchan {
82 void __iomem *reg_rt;
83
84 int id;
85};
86
87#define UDMA_FLAG_PDMA_ACC32 BIT(0)
88#define UDMA_FLAG_PDMA_BURST BIT(1)
89
90struct udma_match_data {
91 u32 psil_base;
92 bool enable_memcpy_support;
93 u32 flags;
94 u32 statictr_z_mask;
95};
96
97struct udma_soc_data {
98 u32 rchan_oes_offset;
99};
100
101struct udma_hwdesc {
102 size_t cppi5_desc_size;
103 void *cppi5_desc_vaddr;
104 dma_addr_t cppi5_desc_paddr;
105
106
107 void *tr_req_base;
108 struct cppi5_tr_resp_t *tr_resp_base;
109};
110
111struct udma_rx_flush {
112 struct udma_hwdesc hwdescs[2];
113
114 size_t buffer_size;
115 void *buffer_vaddr;
116 dma_addr_t buffer_paddr;
117};
118
119struct udma_dev {
120 struct dma_device ddev;
121 struct device *dev;
122 void __iomem *mmrs[MMR_LAST];
123 const struct udma_match_data *match_data;
124 const struct udma_soc_data *soc_data;
125
126 u8 tpl_levels;
127 u32 tpl_start_idx[3];
128
129 size_t desc_align;
130
131 struct udma_tisci_rm tisci_rm;
132
133 struct k3_ringacc *ringacc;
134
135 struct work_struct purge_work;
136 struct list_head desc_to_purge;
137 spinlock_t lock;
138
139 struct udma_rx_flush rx_flush;
140
141 int tchan_cnt;
142 int echan_cnt;
143 int rchan_cnt;
144 int rflow_cnt;
145 unsigned long *tchan_map;
146 unsigned long *rchan_map;
147 unsigned long *rflow_gp_map;
148 unsigned long *rflow_gp_map_allocated;
149 unsigned long *rflow_in_use;
150
151 struct udma_tchan *tchans;
152 struct udma_rchan *rchans;
153 struct udma_rflow *rflows;
154
155 struct udma_chan *channels;
156 u32 psil_base;
157 u32 atype;
158};
159
160struct udma_desc {
161 struct virt_dma_desc vd;
162
163 bool terminated;
164
165 enum dma_transfer_direction dir;
166
167 struct udma_static_tr static_tr;
168 u32 residue;
169
170 unsigned int sglen;
171 unsigned int desc_idx;
172 unsigned int tr_idx;
173
174 u32 metadata_size;
175 void *metadata;
176
177 unsigned int hwdesc_count;
178 struct udma_hwdesc hwdesc[];
179};
180
181enum udma_chan_state {
182 UDMA_CHAN_IS_IDLE = 0,
183 UDMA_CHAN_IS_ACTIVE,
184 UDMA_CHAN_IS_TERMINATING,
185};
186
187struct udma_tx_drain {
188 struct delayed_work work;
189 ktime_t tstamp;
190 u32 residue;
191};
192
193struct udma_chan_config {
194 bool pkt_mode;
195 bool needs_epib;
196 u32 psd_size;
197 u32 metadata_size;
198 u32 hdesc_size;
199 bool notdpkt;
200 int remote_thread_id;
201 u32 atype;
202 u32 src_thread;
203 u32 dst_thread;
204 enum psil_endpoint_type ep_type;
205 bool enable_acc32;
206 bool enable_burst;
207 enum udma_tp_level channel_tpl;
208
209 enum dma_transfer_direction dir;
210};
211
212struct udma_chan {
213 struct virt_dma_chan vc;
214 struct dma_slave_config cfg;
215 struct udma_dev *ud;
216 struct udma_desc *desc;
217 struct udma_desc *terminated_desc;
218 struct udma_static_tr static_tr;
219 char *name;
220
221 struct udma_tchan *tchan;
222 struct udma_rchan *rchan;
223 struct udma_rflow *rflow;
224
225 bool psil_paired;
226
227 int irq_num_ring;
228 int irq_num_udma;
229
230 bool cyclic;
231 bool paused;
232
233 enum udma_chan_state state;
234 struct completion teardown_completed;
235
236 struct udma_tx_drain tx_drain;
237
238 u32 bcnt;
239
240
241 struct udma_chan_config config;
242
243
244 bool use_dma_pool;
245 struct dma_pool *hdesc_pool;
246
247 u32 id;
248};
249
250static inline struct udma_dev *to_udma_dev(struct dma_device *d)
251{
252 return container_of(d, struct udma_dev, ddev);
253}
254
255static inline struct udma_chan *to_udma_chan(struct dma_chan *c)
256{
257 return container_of(c, struct udma_chan, vc.chan);
258}
259
260static inline struct udma_desc *to_udma_desc(struct dma_async_tx_descriptor *t)
261{
262 return container_of(t, struct udma_desc, vd.tx);
263}
264
265
266static inline u32 udma_read(void __iomem *base, int reg)
267{
268 return readl(base + reg);
269}
270
271static inline void udma_write(void __iomem *base, int reg, u32 val)
272{
273 writel(val, base + reg);
274}
275
276static inline void udma_update_bits(void __iomem *base, int reg,
277 u32 mask, u32 val)
278{
279 u32 tmp, orig;
280
281 orig = readl(base + reg);
282 tmp = orig & ~mask;
283 tmp |= (val & mask);
284
285 if (tmp != orig)
286 writel(tmp, base + reg);
287}
288
289
290static inline u32 udma_tchanrt_read(struct udma_chan *uc, int reg)
291{
292 if (!uc->tchan)
293 return 0;
294 return udma_read(uc->tchan->reg_rt, reg);
295}
296
297static inline void udma_tchanrt_write(struct udma_chan *uc, int reg, u32 val)
298{
299 if (!uc->tchan)
300 return;
301 udma_write(uc->tchan->reg_rt, reg, val);
302}
303
304static inline void udma_tchanrt_update_bits(struct udma_chan *uc, int reg,
305 u32 mask, u32 val)
306{
307 if (!uc->tchan)
308 return;
309 udma_update_bits(uc->tchan->reg_rt, reg, mask, val);
310}
311
312
313static inline u32 udma_rchanrt_read(struct udma_chan *uc, int reg)
314{
315 if (!uc->rchan)
316 return 0;
317 return udma_read(uc->rchan->reg_rt, reg);
318}
319
320static inline void udma_rchanrt_write(struct udma_chan *uc, int reg, u32 val)
321{
322 if (!uc->rchan)
323 return;
324 udma_write(uc->rchan->reg_rt, reg, val);
325}
326
327static inline void udma_rchanrt_update_bits(struct udma_chan *uc, int reg,
328 u32 mask, u32 val)
329{
330 if (!uc->rchan)
331 return;
332 udma_update_bits(uc->rchan->reg_rt, reg, mask, val);
333}
334
335static int navss_psil_pair(struct udma_dev *ud, u32 src_thread, u32 dst_thread)
336{
337 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
338
339 dst_thread |= K3_PSIL_DST_THREAD_ID_OFFSET;
340 return tisci_rm->tisci_psil_ops->pair(tisci_rm->tisci,
341 tisci_rm->tisci_navss_dev_id,
342 src_thread, dst_thread);
343}
344
345static int navss_psil_unpair(struct udma_dev *ud, u32 src_thread,
346 u32 dst_thread)
347{
348 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
349
350 dst_thread |= K3_PSIL_DST_THREAD_ID_OFFSET;
351 return tisci_rm->tisci_psil_ops->unpair(tisci_rm->tisci,
352 tisci_rm->tisci_navss_dev_id,
353 src_thread, dst_thread);
354}
355
356static void udma_reset_uchan(struct udma_chan *uc)
357{
358 memset(&uc->config, 0, sizeof(uc->config));
359 uc->config.remote_thread_id = -1;
360 uc->state = UDMA_CHAN_IS_IDLE;
361}
362
363static void udma_dump_chan_stdata(struct udma_chan *uc)
364{
365 struct device *dev = uc->ud->dev;
366 u32 offset;
367 int i;
368
369 if (uc->config.dir == DMA_MEM_TO_DEV || uc->config.dir == DMA_MEM_TO_MEM) {
370 dev_dbg(dev, "TCHAN State data:\n");
371 for (i = 0; i < 32; i++) {
372 offset = UDMA_CHAN_RT_STDATA_REG + i * 4;
373 dev_dbg(dev, "TRT_STDATA[%02d]: 0x%08x\n", i,
374 udma_tchanrt_read(uc, offset));
375 }
376 }
377
378 if (uc->config.dir == DMA_DEV_TO_MEM || uc->config.dir == DMA_MEM_TO_MEM) {
379 dev_dbg(dev, "RCHAN State data:\n");
380 for (i = 0; i < 32; i++) {
381 offset = UDMA_CHAN_RT_STDATA_REG + i * 4;
382 dev_dbg(dev, "RRT_STDATA[%02d]: 0x%08x\n", i,
383 udma_rchanrt_read(uc, offset));
384 }
385 }
386}
387
388static inline dma_addr_t udma_curr_cppi5_desc_paddr(struct udma_desc *d,
389 int idx)
390{
391 return d->hwdesc[idx].cppi5_desc_paddr;
392}
393
394static inline void *udma_curr_cppi5_desc_vaddr(struct udma_desc *d, int idx)
395{
396 return d->hwdesc[idx].cppi5_desc_vaddr;
397}
398
399static struct udma_desc *udma_udma_desc_from_paddr(struct udma_chan *uc,
400 dma_addr_t paddr)
401{
402 struct udma_desc *d = uc->terminated_desc;
403
404 if (d) {
405 dma_addr_t desc_paddr = udma_curr_cppi5_desc_paddr(d,
406 d->desc_idx);
407
408 if (desc_paddr != paddr)
409 d = NULL;
410 }
411
412 if (!d) {
413 d = uc->desc;
414 if (d) {
415 dma_addr_t desc_paddr = udma_curr_cppi5_desc_paddr(d,
416 d->desc_idx);
417
418 if (desc_paddr != paddr)
419 d = NULL;
420 }
421 }
422
423 return d;
424}
425
426static void udma_free_hwdesc(struct udma_chan *uc, struct udma_desc *d)
427{
428 if (uc->use_dma_pool) {
429 int i;
430
431 for (i = 0; i < d->hwdesc_count; i++) {
432 if (!d->hwdesc[i].cppi5_desc_vaddr)
433 continue;
434
435 dma_pool_free(uc->hdesc_pool,
436 d->hwdesc[i].cppi5_desc_vaddr,
437 d->hwdesc[i].cppi5_desc_paddr);
438
439 d->hwdesc[i].cppi5_desc_vaddr = NULL;
440 }
441 } else if (d->hwdesc[0].cppi5_desc_vaddr) {
442 struct udma_dev *ud = uc->ud;
443
444 dma_free_coherent(ud->dev, d->hwdesc[0].cppi5_desc_size,
445 d->hwdesc[0].cppi5_desc_vaddr,
446 d->hwdesc[0].cppi5_desc_paddr);
447
448 d->hwdesc[0].cppi5_desc_vaddr = NULL;
449 }
450}
451
452static void udma_purge_desc_work(struct work_struct *work)
453{
454 struct udma_dev *ud = container_of(work, typeof(*ud), purge_work);
455 struct virt_dma_desc *vd, *_vd;
456 unsigned long flags;
457 LIST_HEAD(head);
458
459 spin_lock_irqsave(&ud->lock, flags);
460 list_splice_tail_init(&ud->desc_to_purge, &head);
461 spin_unlock_irqrestore(&ud->lock, flags);
462
463 list_for_each_entry_safe(vd, _vd, &head, node) {
464 struct udma_chan *uc = to_udma_chan(vd->tx.chan);
465 struct udma_desc *d = to_udma_desc(&vd->tx);
466
467 udma_free_hwdesc(uc, d);
468 list_del(&vd->node);
469 kfree(d);
470 }
471
472
473 if (!list_empty(&ud->desc_to_purge))
474 schedule_work(&ud->purge_work);
475}
476
477static void udma_desc_free(struct virt_dma_desc *vd)
478{
479 struct udma_dev *ud = to_udma_dev(vd->tx.chan->device);
480 struct udma_chan *uc = to_udma_chan(vd->tx.chan);
481 struct udma_desc *d = to_udma_desc(&vd->tx);
482 unsigned long flags;
483
484 if (uc->terminated_desc == d)
485 uc->terminated_desc = NULL;
486
487 if (uc->use_dma_pool) {
488 udma_free_hwdesc(uc, d);
489 kfree(d);
490 return;
491 }
492
493 spin_lock_irqsave(&ud->lock, flags);
494 list_add_tail(&vd->node, &ud->desc_to_purge);
495 spin_unlock_irqrestore(&ud->lock, flags);
496
497 schedule_work(&ud->purge_work);
498}
499
500static bool udma_is_chan_running(struct udma_chan *uc)
501{
502 u32 trt_ctl = 0;
503 u32 rrt_ctl = 0;
504
505 if (uc->tchan)
506 trt_ctl = udma_tchanrt_read(uc, UDMA_CHAN_RT_CTL_REG);
507 if (uc->rchan)
508 rrt_ctl = udma_rchanrt_read(uc, UDMA_CHAN_RT_CTL_REG);
509
510 if (trt_ctl & UDMA_CHAN_RT_CTL_EN || rrt_ctl & UDMA_CHAN_RT_CTL_EN)
511 return true;
512
513 return false;
514}
515
516static bool udma_is_chan_paused(struct udma_chan *uc)
517{
518 u32 val, pause_mask;
519
520 switch (uc->config.dir) {
521 case DMA_DEV_TO_MEM:
522 val = udma_rchanrt_read(uc, UDMA_CHAN_RT_PEER_RT_EN_REG);
523 pause_mask = UDMA_PEER_RT_EN_PAUSE;
524 break;
525 case DMA_MEM_TO_DEV:
526 val = udma_tchanrt_read(uc, UDMA_CHAN_RT_PEER_RT_EN_REG);
527 pause_mask = UDMA_PEER_RT_EN_PAUSE;
528 break;
529 case DMA_MEM_TO_MEM:
530 val = udma_tchanrt_read(uc, UDMA_CHAN_RT_CTL_REG);
531 pause_mask = UDMA_CHAN_RT_CTL_PAUSE;
532 break;
533 default:
534 return false;
535 }
536
537 if (val & pause_mask)
538 return true;
539
540 return false;
541}
542
543static inline dma_addr_t udma_get_rx_flush_hwdesc_paddr(struct udma_chan *uc)
544{
545 return uc->ud->rx_flush.hwdescs[uc->config.pkt_mode].cppi5_desc_paddr;
546}
547
548static int udma_push_to_ring(struct udma_chan *uc, int idx)
549{
550 struct udma_desc *d = uc->desc;
551 struct k3_ring *ring = NULL;
552 dma_addr_t paddr;
553
554 switch (uc->config.dir) {
555 case DMA_DEV_TO_MEM:
556 ring = uc->rflow->fd_ring;
557 break;
558 case DMA_MEM_TO_DEV:
559 case DMA_MEM_TO_MEM:
560 ring = uc->tchan->t_ring;
561 break;
562 default:
563 return -EINVAL;
564 }
565
566
567 if (idx == -1) {
568 paddr = udma_get_rx_flush_hwdesc_paddr(uc);
569 } else {
570 paddr = udma_curr_cppi5_desc_paddr(d, idx);
571
572 wmb();
573 }
574
575 return k3_ringacc_ring_push(ring, &paddr);
576}
577
578static bool udma_desc_is_rx_flush(struct udma_chan *uc, dma_addr_t addr)
579{
580 if (uc->config.dir != DMA_DEV_TO_MEM)
581 return false;
582
583 if (addr == udma_get_rx_flush_hwdesc_paddr(uc))
584 return true;
585
586 return false;
587}
588
589static int udma_pop_from_ring(struct udma_chan *uc, dma_addr_t *addr)
590{
591 struct k3_ring *ring = NULL;
592 int ret;
593
594 switch (uc->config.dir) {
595 case DMA_DEV_TO_MEM:
596 ring = uc->rflow->r_ring;
597 break;
598 case DMA_MEM_TO_DEV:
599 case DMA_MEM_TO_MEM:
600 ring = uc->tchan->tc_ring;
601 break;
602 default:
603 return -ENOENT;
604 }
605
606 ret = k3_ringacc_ring_pop(ring, addr);
607 if (ret)
608 return ret;
609
610 rmb();
611
612
613 if (cppi5_desc_is_tdcm(*addr))
614 return 0;
615
616
617 if (udma_desc_is_rx_flush(uc, *addr))
618 return -ENOENT;
619
620 return 0;
621}
622
623static void udma_reset_rings(struct udma_chan *uc)
624{
625 struct k3_ring *ring1 = NULL;
626 struct k3_ring *ring2 = NULL;
627
628 switch (uc->config.dir) {
629 case DMA_DEV_TO_MEM:
630 if (uc->rchan) {
631 ring1 = uc->rflow->fd_ring;
632 ring2 = uc->rflow->r_ring;
633 }
634 break;
635 case DMA_MEM_TO_DEV:
636 case DMA_MEM_TO_MEM:
637 if (uc->tchan) {
638 ring1 = uc->tchan->t_ring;
639 ring2 = uc->tchan->tc_ring;
640 }
641 break;
642 default:
643 break;
644 }
645
646 if (ring1)
647 k3_ringacc_ring_reset_dma(ring1,
648 k3_ringacc_ring_get_occ(ring1));
649 if (ring2)
650 k3_ringacc_ring_reset(ring2);
651
652
653 if (uc->terminated_desc) {
654 udma_desc_free(&uc->terminated_desc->vd);
655 uc->terminated_desc = NULL;
656 }
657}
658
659static void udma_reset_counters(struct udma_chan *uc)
660{
661 u32 val;
662
663 if (uc->tchan) {
664 val = udma_tchanrt_read(uc, UDMA_CHAN_RT_BCNT_REG);
665 udma_tchanrt_write(uc, UDMA_CHAN_RT_BCNT_REG, val);
666
667 val = udma_tchanrt_read(uc, UDMA_CHAN_RT_SBCNT_REG);
668 udma_tchanrt_write(uc, UDMA_CHAN_RT_SBCNT_REG, val);
669
670 val = udma_tchanrt_read(uc, UDMA_CHAN_RT_PCNT_REG);
671 udma_tchanrt_write(uc, UDMA_CHAN_RT_PCNT_REG, val);
672
673 val = udma_tchanrt_read(uc, UDMA_CHAN_RT_PEER_BCNT_REG);
674 udma_tchanrt_write(uc, UDMA_CHAN_RT_PEER_BCNT_REG, val);
675 }
676
677 if (uc->rchan) {
678 val = udma_rchanrt_read(uc, UDMA_CHAN_RT_BCNT_REG);
679 udma_rchanrt_write(uc, UDMA_CHAN_RT_BCNT_REG, val);
680
681 val = udma_rchanrt_read(uc, UDMA_CHAN_RT_SBCNT_REG);
682 udma_rchanrt_write(uc, UDMA_CHAN_RT_SBCNT_REG, val);
683
684 val = udma_rchanrt_read(uc, UDMA_CHAN_RT_PCNT_REG);
685 udma_rchanrt_write(uc, UDMA_CHAN_RT_PCNT_REG, val);
686
687 val = udma_rchanrt_read(uc, UDMA_CHAN_RT_PEER_BCNT_REG);
688 udma_rchanrt_write(uc, UDMA_CHAN_RT_PEER_BCNT_REG, val);
689 }
690
691 uc->bcnt = 0;
692}
693
694static int udma_reset_chan(struct udma_chan *uc, bool hard)
695{
696 switch (uc->config.dir) {
697 case DMA_DEV_TO_MEM:
698 udma_rchanrt_write(uc, UDMA_CHAN_RT_PEER_RT_EN_REG, 0);
699 udma_rchanrt_write(uc, UDMA_CHAN_RT_CTL_REG, 0);
700 break;
701 case DMA_MEM_TO_DEV:
702 udma_tchanrt_write(uc, UDMA_CHAN_RT_CTL_REG, 0);
703 udma_tchanrt_write(uc, UDMA_CHAN_RT_PEER_RT_EN_REG, 0);
704 break;
705 case DMA_MEM_TO_MEM:
706 udma_rchanrt_write(uc, UDMA_CHAN_RT_CTL_REG, 0);
707 udma_tchanrt_write(uc, UDMA_CHAN_RT_CTL_REG, 0);
708 break;
709 default:
710 return -EINVAL;
711 }
712
713
714 udma_reset_counters(uc);
715
716
717 if (hard) {
718 struct udma_chan_config ucc_backup;
719 int ret;
720
721 memcpy(&ucc_backup, &uc->config, sizeof(uc->config));
722 uc->ud->ddev.device_free_chan_resources(&uc->vc.chan);
723
724
725 memcpy(&uc->config, &ucc_backup, sizeof(uc->config));
726 ret = uc->ud->ddev.device_alloc_chan_resources(&uc->vc.chan);
727 if (ret)
728 return ret;
729
730
731
732
733
734 if (uc->config.dir == DMA_DEV_TO_MEM)
735 udma_rchanrt_write(uc, UDMA_CHAN_RT_CTL_REG,
736 UDMA_CHAN_RT_CTL_EN |
737 UDMA_CHAN_RT_CTL_TDOWN |
738 UDMA_CHAN_RT_CTL_FTDOWN);
739 }
740 uc->state = UDMA_CHAN_IS_IDLE;
741
742 return 0;
743}
744
745static void udma_start_desc(struct udma_chan *uc)
746{
747 struct udma_chan_config *ucc = &uc->config;
748
749 if (ucc->pkt_mode && (uc->cyclic || ucc->dir == DMA_DEV_TO_MEM)) {
750 int i;
751
752
753 for (i = 0; i < uc->desc->sglen; i++)
754 udma_push_to_ring(uc, i);
755 } else {
756 udma_push_to_ring(uc, 0);
757 }
758}
759
760static bool udma_chan_needs_reconfiguration(struct udma_chan *uc)
761{
762
763 if (uc->config.ep_type == PSIL_EP_NATIVE)
764 return false;
765
766
767 if (memcmp(&uc->static_tr, &uc->desc->static_tr, sizeof(uc->static_tr)))
768 return true;
769
770 return false;
771}
772
773static int udma_start(struct udma_chan *uc)
774{
775 struct virt_dma_desc *vd = vchan_next_desc(&uc->vc);
776
777 if (!vd) {
778 uc->desc = NULL;
779 return -ENOENT;
780 }
781
782 list_del(&vd->node);
783
784 uc->desc = to_udma_desc(&vd->tx);
785
786
787 if (udma_is_chan_running(uc) && !udma_chan_needs_reconfiguration(uc)) {
788 udma_start_desc(uc);
789 goto out;
790 }
791
792
793 udma_reset_chan(uc, false);
794
795
796 udma_start_desc(uc);
797
798 switch (uc->desc->dir) {
799 case DMA_DEV_TO_MEM:
800
801 if (uc->config.ep_type == PSIL_EP_PDMA_XY) {
802 u32 val = PDMA_STATIC_TR_Y(uc->desc->static_tr.elcnt) |
803 PDMA_STATIC_TR_X(uc->desc->static_tr.elsize);
804 const struct udma_match_data *match_data =
805 uc->ud->match_data;
806
807 if (uc->config.enable_acc32)
808 val |= PDMA_STATIC_TR_XY_ACC32;
809 if (uc->config.enable_burst)
810 val |= PDMA_STATIC_TR_XY_BURST;
811
812 udma_rchanrt_write(uc,
813 UDMA_CHAN_RT_PEER_STATIC_TR_XY_REG,
814 val);
815
816 udma_rchanrt_write(uc,
817 UDMA_CHAN_RT_PEER_STATIC_TR_Z_REG,
818 PDMA_STATIC_TR_Z(uc->desc->static_tr.bstcnt,
819 match_data->statictr_z_mask));
820
821
822 memcpy(&uc->static_tr, &uc->desc->static_tr,
823 sizeof(uc->static_tr));
824 }
825
826 udma_rchanrt_write(uc, UDMA_CHAN_RT_CTL_REG,
827 UDMA_CHAN_RT_CTL_EN);
828
829
830 udma_rchanrt_write(uc, UDMA_CHAN_RT_PEER_RT_EN_REG,
831 UDMA_PEER_RT_EN_ENABLE);
832
833 break;
834 case DMA_MEM_TO_DEV:
835
836 if (uc->config.ep_type == PSIL_EP_PDMA_XY) {
837 u32 val = PDMA_STATIC_TR_Y(uc->desc->static_tr.elcnt) |
838 PDMA_STATIC_TR_X(uc->desc->static_tr.elsize);
839
840 if (uc->config.enable_acc32)
841 val |= PDMA_STATIC_TR_XY_ACC32;
842 if (uc->config.enable_burst)
843 val |= PDMA_STATIC_TR_XY_BURST;
844
845 udma_tchanrt_write(uc,
846 UDMA_CHAN_RT_PEER_STATIC_TR_XY_REG,
847 val);
848
849
850 memcpy(&uc->static_tr, &uc->desc->static_tr,
851 sizeof(uc->static_tr));
852 }
853
854
855 udma_tchanrt_write(uc, UDMA_CHAN_RT_PEER_RT_EN_REG,
856 UDMA_PEER_RT_EN_ENABLE);
857
858 udma_tchanrt_write(uc, UDMA_CHAN_RT_CTL_REG,
859 UDMA_CHAN_RT_CTL_EN);
860
861 break;
862 case DMA_MEM_TO_MEM:
863 udma_rchanrt_write(uc, UDMA_CHAN_RT_CTL_REG,
864 UDMA_CHAN_RT_CTL_EN);
865 udma_tchanrt_write(uc, UDMA_CHAN_RT_CTL_REG,
866 UDMA_CHAN_RT_CTL_EN);
867
868 break;
869 default:
870 return -EINVAL;
871 }
872
873 uc->state = UDMA_CHAN_IS_ACTIVE;
874out:
875
876 return 0;
877}
878
879static int udma_stop(struct udma_chan *uc)
880{
881 enum udma_chan_state old_state = uc->state;
882
883 uc->state = UDMA_CHAN_IS_TERMINATING;
884 reinit_completion(&uc->teardown_completed);
885
886 switch (uc->config.dir) {
887 case DMA_DEV_TO_MEM:
888 if (!uc->cyclic && !uc->desc)
889 udma_push_to_ring(uc, -1);
890
891 udma_rchanrt_write(uc, UDMA_CHAN_RT_PEER_RT_EN_REG,
892 UDMA_PEER_RT_EN_ENABLE |
893 UDMA_PEER_RT_EN_TEARDOWN);
894 break;
895 case DMA_MEM_TO_DEV:
896 udma_tchanrt_write(uc, UDMA_CHAN_RT_PEER_RT_EN_REG,
897 UDMA_PEER_RT_EN_ENABLE |
898 UDMA_PEER_RT_EN_FLUSH);
899 udma_tchanrt_write(uc, UDMA_CHAN_RT_CTL_REG,
900 UDMA_CHAN_RT_CTL_EN |
901 UDMA_CHAN_RT_CTL_TDOWN);
902 break;
903 case DMA_MEM_TO_MEM:
904 udma_tchanrt_write(uc, UDMA_CHAN_RT_CTL_REG,
905 UDMA_CHAN_RT_CTL_EN |
906 UDMA_CHAN_RT_CTL_TDOWN);
907 break;
908 default:
909 uc->state = old_state;
910 complete_all(&uc->teardown_completed);
911 return -EINVAL;
912 }
913
914 return 0;
915}
916
917static void udma_cyclic_packet_elapsed(struct udma_chan *uc)
918{
919 struct udma_desc *d = uc->desc;
920 struct cppi5_host_desc_t *h_desc;
921
922 h_desc = d->hwdesc[d->desc_idx].cppi5_desc_vaddr;
923 cppi5_hdesc_reset_to_original(h_desc);
924 udma_push_to_ring(uc, d->desc_idx);
925 d->desc_idx = (d->desc_idx + 1) % d->sglen;
926}
927
928static inline void udma_fetch_epib(struct udma_chan *uc, struct udma_desc *d)
929{
930 struct cppi5_host_desc_t *h_desc = d->hwdesc[0].cppi5_desc_vaddr;
931
932 memcpy(d->metadata, h_desc->epib, d->metadata_size);
933}
934
935static bool udma_is_desc_really_done(struct udma_chan *uc, struct udma_desc *d)
936{
937 u32 peer_bcnt, bcnt;
938
939
940 if (uc->config.ep_type == PSIL_EP_NATIVE ||
941 uc->config.dir != DMA_MEM_TO_DEV)
942 return true;
943
944 peer_bcnt = udma_tchanrt_read(uc, UDMA_CHAN_RT_PEER_BCNT_REG);
945 bcnt = udma_tchanrt_read(uc, UDMA_CHAN_RT_BCNT_REG);
946
947
948 if (peer_bcnt < bcnt) {
949 uc->tx_drain.residue = bcnt - peer_bcnt;
950 uc->tx_drain.tstamp = ktime_get();
951 return false;
952 }
953
954 return true;
955}
956
957static void udma_check_tx_completion(struct work_struct *work)
958{
959 struct udma_chan *uc = container_of(work, typeof(*uc),
960 tx_drain.work.work);
961 bool desc_done = true;
962 u32 residue_diff;
963 ktime_t time_diff;
964 unsigned long delay;
965
966 while (1) {
967 if (uc->desc) {
968
969 residue_diff = uc->tx_drain.residue;
970 time_diff = uc->tx_drain.tstamp;
971
972
973
974
975 desc_done = udma_is_desc_really_done(uc, uc->desc);
976 }
977
978 if (!desc_done) {
979
980
981
982
983 time_diff = ktime_sub(uc->tx_drain.tstamp,
984 time_diff) + 1;
985 residue_diff -= uc->tx_drain.residue;
986 if (residue_diff) {
987
988
989
990
991
992
993 delay = (time_diff / residue_diff) *
994 uc->tx_drain.residue;
995 } else {
996
997 schedule_delayed_work(&uc->tx_drain.work, HZ);
998 break;
999 }
1000
1001 usleep_range(ktime_to_us(delay),
1002 ktime_to_us(delay) + 10);
1003 continue;
1004 }
1005
1006 if (uc->desc) {
1007 struct udma_desc *d = uc->desc;
1008
1009 uc->bcnt += d->residue;
1010 udma_start(uc);
1011 vchan_cookie_complete(&d->vd);
1012 break;
1013 }
1014
1015 break;
1016 }
1017}
1018
1019static irqreturn_t udma_ring_irq_handler(int irq, void *data)
1020{
1021 struct udma_chan *uc = data;
1022 struct udma_desc *d;
1023 unsigned long flags;
1024 dma_addr_t paddr = 0;
1025
1026 if (udma_pop_from_ring(uc, &paddr) || !paddr)
1027 return IRQ_HANDLED;
1028
1029 spin_lock_irqsave(&uc->vc.lock, flags);
1030
1031
1032 if (cppi5_desc_is_tdcm(paddr)) {
1033 complete_all(&uc->teardown_completed);
1034
1035 if (uc->terminated_desc) {
1036 udma_desc_free(&uc->terminated_desc->vd);
1037 uc->terminated_desc = NULL;
1038 }
1039
1040 if (!uc->desc)
1041 udma_start(uc);
1042
1043 goto out;
1044 }
1045
1046 d = udma_udma_desc_from_paddr(uc, paddr);
1047
1048 if (d) {
1049 dma_addr_t desc_paddr = udma_curr_cppi5_desc_paddr(d,
1050 d->desc_idx);
1051 if (desc_paddr != paddr) {
1052 dev_err(uc->ud->dev, "not matching descriptors!\n");
1053 goto out;
1054 }
1055
1056 if (d == uc->desc) {
1057
1058 if (uc->cyclic) {
1059 udma_cyclic_packet_elapsed(uc);
1060 vchan_cyclic_callback(&d->vd);
1061 } else {
1062 if (udma_is_desc_really_done(uc, d)) {
1063 uc->bcnt += d->residue;
1064 udma_start(uc);
1065 vchan_cookie_complete(&d->vd);
1066 } else {
1067 schedule_delayed_work(&uc->tx_drain.work,
1068 0);
1069 }
1070 }
1071 } else {
1072
1073
1074
1075
1076 dma_cookie_complete(&d->vd.tx);
1077 }
1078 }
1079out:
1080 spin_unlock_irqrestore(&uc->vc.lock, flags);
1081
1082 return IRQ_HANDLED;
1083}
1084
1085static irqreturn_t udma_udma_irq_handler(int irq, void *data)
1086{
1087 struct udma_chan *uc = data;
1088 struct udma_desc *d;
1089 unsigned long flags;
1090
1091 spin_lock_irqsave(&uc->vc.lock, flags);
1092 d = uc->desc;
1093 if (d) {
1094 d->tr_idx = (d->tr_idx + 1) % d->sglen;
1095
1096 if (uc->cyclic) {
1097 vchan_cyclic_callback(&d->vd);
1098 } else {
1099
1100 uc->bcnt += d->residue;
1101 udma_start(uc);
1102 vchan_cookie_complete(&d->vd);
1103 }
1104 }
1105
1106 spin_unlock_irqrestore(&uc->vc.lock, flags);
1107
1108 return IRQ_HANDLED;
1109}
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127static int __udma_alloc_gp_rflow_range(struct udma_dev *ud, int from, int cnt)
1128{
1129 int start, tmp_from;
1130 DECLARE_BITMAP(tmp, K3_UDMA_MAX_RFLOWS);
1131
1132 tmp_from = from;
1133 if (tmp_from < 0)
1134 tmp_from = ud->rchan_cnt;
1135
1136 if (tmp_from < ud->rchan_cnt)
1137 return -EINVAL;
1138
1139 if (tmp_from + cnt > ud->rflow_cnt)
1140 return -EINVAL;
1141
1142 bitmap_or(tmp, ud->rflow_gp_map, ud->rflow_gp_map_allocated,
1143 ud->rflow_cnt);
1144
1145 start = bitmap_find_next_zero_area(tmp,
1146 ud->rflow_cnt,
1147 tmp_from, cnt, 0);
1148 if (start >= ud->rflow_cnt)
1149 return -ENOMEM;
1150
1151 if (from >= 0 && start != from)
1152 return -EEXIST;
1153
1154 bitmap_set(ud->rflow_gp_map_allocated, start, cnt);
1155 return start;
1156}
1157
1158static int __udma_free_gp_rflow_range(struct udma_dev *ud, int from, int cnt)
1159{
1160 if (from < ud->rchan_cnt)
1161 return -EINVAL;
1162 if (from + cnt > ud->rflow_cnt)
1163 return -EINVAL;
1164
1165 bitmap_clear(ud->rflow_gp_map_allocated, from, cnt);
1166 return 0;
1167}
1168
1169static struct udma_rflow *__udma_get_rflow(struct udma_dev *ud, int id)
1170{
1171
1172
1173
1174
1175
1176
1177
1178 if (id < 0 || id >= ud->rflow_cnt)
1179 return ERR_PTR(-ENOENT);
1180
1181 if (test_bit(id, ud->rflow_in_use))
1182 return ERR_PTR(-ENOENT);
1183
1184
1185 if (!test_bit(id, ud->rflow_gp_map) &&
1186 !test_bit(id, ud->rflow_gp_map_allocated))
1187 return ERR_PTR(-EINVAL);
1188
1189 dev_dbg(ud->dev, "get rflow%d\n", id);
1190 set_bit(id, ud->rflow_in_use);
1191 return &ud->rflows[id];
1192}
1193
1194static void __udma_put_rflow(struct udma_dev *ud, struct udma_rflow *rflow)
1195{
1196 if (!test_bit(rflow->id, ud->rflow_in_use)) {
1197 dev_err(ud->dev, "attempt to put unused rflow%d\n", rflow->id);
1198 return;
1199 }
1200
1201 dev_dbg(ud->dev, "put rflow%d\n", rflow->id);
1202 clear_bit(rflow->id, ud->rflow_in_use);
1203}
1204
1205#define UDMA_RESERVE_RESOURCE(res) \
1206static struct udma_##res *__udma_reserve_##res(struct udma_dev *ud, \
1207 enum udma_tp_level tpl, \
1208 int id) \
1209{ \
1210 if (id >= 0) { \
1211 if (test_bit(id, ud->res##_map)) { \
1212 dev_err(ud->dev, "res##%d is in use\n", id); \
1213 return ERR_PTR(-ENOENT); \
1214 } \
1215 } else { \
1216 int start; \
1217 \
1218 if (tpl >= ud->tpl_levels) \
1219 tpl = ud->tpl_levels - 1; \
1220 \
1221 start = ud->tpl_start_idx[tpl]; \
1222 \
1223 id = find_next_zero_bit(ud->res##_map, ud->res##_cnt, \
1224 start); \
1225 if (id == ud->res##_cnt) { \
1226 return ERR_PTR(-ENOENT); \
1227 } \
1228 } \
1229 \
1230 set_bit(id, ud->res##_map); \
1231 return &ud->res##s[id]; \
1232}
1233
1234UDMA_RESERVE_RESOURCE(tchan);
1235UDMA_RESERVE_RESOURCE(rchan);
1236
1237static int udma_get_tchan(struct udma_chan *uc)
1238{
1239 struct udma_dev *ud = uc->ud;
1240
1241 if (uc->tchan) {
1242 dev_dbg(ud->dev, "chan%d: already have tchan%d allocated\n",
1243 uc->id, uc->tchan->id);
1244 return 0;
1245 }
1246
1247 uc->tchan = __udma_reserve_tchan(ud, uc->config.channel_tpl, -1);
1248
1249 return PTR_ERR_OR_ZERO(uc->tchan);
1250}
1251
1252static int udma_get_rchan(struct udma_chan *uc)
1253{
1254 struct udma_dev *ud = uc->ud;
1255
1256 if (uc->rchan) {
1257 dev_dbg(ud->dev, "chan%d: already have rchan%d allocated\n",
1258 uc->id, uc->rchan->id);
1259 return 0;
1260 }
1261
1262 uc->rchan = __udma_reserve_rchan(ud, uc->config.channel_tpl, -1);
1263
1264 return PTR_ERR_OR_ZERO(uc->rchan);
1265}
1266
1267static int udma_get_chan_pair(struct udma_chan *uc)
1268{
1269 struct udma_dev *ud = uc->ud;
1270 int chan_id, end;
1271
1272 if ((uc->tchan && uc->rchan) && uc->tchan->id == uc->rchan->id) {
1273 dev_info(ud->dev, "chan%d: already have %d pair allocated\n",
1274 uc->id, uc->tchan->id);
1275 return 0;
1276 }
1277
1278 if (uc->tchan) {
1279 dev_err(ud->dev, "chan%d: already have tchan%d allocated\n",
1280 uc->id, uc->tchan->id);
1281 return -EBUSY;
1282 } else if (uc->rchan) {
1283 dev_err(ud->dev, "chan%d: already have rchan%d allocated\n",
1284 uc->id, uc->rchan->id);
1285 return -EBUSY;
1286 }
1287
1288
1289 end = min(ud->tchan_cnt, ud->rchan_cnt);
1290
1291 chan_id = ud->tpl_start_idx[ud->tpl_levels - 1];
1292 for (; chan_id < end; chan_id++) {
1293 if (!test_bit(chan_id, ud->tchan_map) &&
1294 !test_bit(chan_id, ud->rchan_map))
1295 break;
1296 }
1297
1298 if (chan_id == end)
1299 return -ENOENT;
1300
1301 set_bit(chan_id, ud->tchan_map);
1302 set_bit(chan_id, ud->rchan_map);
1303 uc->tchan = &ud->tchans[chan_id];
1304 uc->rchan = &ud->rchans[chan_id];
1305
1306 return 0;
1307}
1308
1309static int udma_get_rflow(struct udma_chan *uc, int flow_id)
1310{
1311 struct udma_dev *ud = uc->ud;
1312
1313 if (!uc->rchan) {
1314 dev_err(ud->dev, "chan%d: does not have rchan??\n", uc->id);
1315 return -EINVAL;
1316 }
1317
1318 if (uc->rflow) {
1319 dev_dbg(ud->dev, "chan%d: already have rflow%d allocated\n",
1320 uc->id, uc->rflow->id);
1321 return 0;
1322 }
1323
1324 uc->rflow = __udma_get_rflow(ud, flow_id);
1325
1326 return PTR_ERR_OR_ZERO(uc->rflow);
1327}
1328
1329static void udma_put_rchan(struct udma_chan *uc)
1330{
1331 struct udma_dev *ud = uc->ud;
1332
1333 if (uc->rchan) {
1334 dev_dbg(ud->dev, "chan%d: put rchan%d\n", uc->id,
1335 uc->rchan->id);
1336 clear_bit(uc->rchan->id, ud->rchan_map);
1337 uc->rchan = NULL;
1338 }
1339}
1340
1341static void udma_put_tchan(struct udma_chan *uc)
1342{
1343 struct udma_dev *ud = uc->ud;
1344
1345 if (uc->tchan) {
1346 dev_dbg(ud->dev, "chan%d: put tchan%d\n", uc->id,
1347 uc->tchan->id);
1348 clear_bit(uc->tchan->id, ud->tchan_map);
1349 uc->tchan = NULL;
1350 }
1351}
1352
1353static void udma_put_rflow(struct udma_chan *uc)
1354{
1355 struct udma_dev *ud = uc->ud;
1356
1357 if (uc->rflow) {
1358 dev_dbg(ud->dev, "chan%d: put rflow%d\n", uc->id,
1359 uc->rflow->id);
1360 __udma_put_rflow(ud, uc->rflow);
1361 uc->rflow = NULL;
1362 }
1363}
1364
1365static void udma_free_tx_resources(struct udma_chan *uc)
1366{
1367 if (!uc->tchan)
1368 return;
1369
1370 k3_ringacc_ring_free(uc->tchan->t_ring);
1371 k3_ringacc_ring_free(uc->tchan->tc_ring);
1372 uc->tchan->t_ring = NULL;
1373 uc->tchan->tc_ring = NULL;
1374
1375 udma_put_tchan(uc);
1376}
1377
1378static int udma_alloc_tx_resources(struct udma_chan *uc)
1379{
1380 struct k3_ring_cfg ring_cfg;
1381 struct udma_dev *ud = uc->ud;
1382 int ret;
1383
1384 ret = udma_get_tchan(uc);
1385 if (ret)
1386 return ret;
1387
1388 ret = k3_ringacc_request_rings_pair(ud->ringacc, uc->tchan->id, -1,
1389 &uc->tchan->t_ring,
1390 &uc->tchan->tc_ring);
1391 if (ret) {
1392 ret = -EBUSY;
1393 goto err_ring;
1394 }
1395
1396 memset(&ring_cfg, 0, sizeof(ring_cfg));
1397 ring_cfg.size = K3_UDMA_DEFAULT_RING_SIZE;
1398 ring_cfg.elm_size = K3_RINGACC_RING_ELSIZE_8;
1399 ring_cfg.mode = K3_RINGACC_RING_MODE_MESSAGE;
1400
1401 ret = k3_ringacc_ring_cfg(uc->tchan->t_ring, &ring_cfg);
1402 ret |= k3_ringacc_ring_cfg(uc->tchan->tc_ring, &ring_cfg);
1403
1404 if (ret)
1405 goto err_ringcfg;
1406
1407 return 0;
1408
1409err_ringcfg:
1410 k3_ringacc_ring_free(uc->tchan->tc_ring);
1411 uc->tchan->tc_ring = NULL;
1412 k3_ringacc_ring_free(uc->tchan->t_ring);
1413 uc->tchan->t_ring = NULL;
1414err_ring:
1415 udma_put_tchan(uc);
1416
1417 return ret;
1418}
1419
1420static void udma_free_rx_resources(struct udma_chan *uc)
1421{
1422 if (!uc->rchan)
1423 return;
1424
1425 if (uc->rflow) {
1426 struct udma_rflow *rflow = uc->rflow;
1427
1428 k3_ringacc_ring_free(rflow->fd_ring);
1429 k3_ringacc_ring_free(rflow->r_ring);
1430 rflow->fd_ring = NULL;
1431 rflow->r_ring = NULL;
1432
1433 udma_put_rflow(uc);
1434 }
1435
1436 udma_put_rchan(uc);
1437}
1438
1439static int udma_alloc_rx_resources(struct udma_chan *uc)
1440{
1441 struct udma_dev *ud = uc->ud;
1442 struct k3_ring_cfg ring_cfg;
1443 struct udma_rflow *rflow;
1444 int fd_ring_id;
1445 int ret;
1446
1447 ret = udma_get_rchan(uc);
1448 if (ret)
1449 return ret;
1450
1451
1452 if (uc->config.dir == DMA_MEM_TO_MEM)
1453 return 0;
1454
1455 ret = udma_get_rflow(uc, uc->rchan->id);
1456 if (ret) {
1457 ret = -EBUSY;
1458 goto err_rflow;
1459 }
1460
1461 rflow = uc->rflow;
1462 fd_ring_id = ud->tchan_cnt + ud->echan_cnt + uc->rchan->id;
1463 ret = k3_ringacc_request_rings_pair(ud->ringacc, fd_ring_id, -1,
1464 &rflow->fd_ring, &rflow->r_ring);
1465 if (ret) {
1466 ret = -EBUSY;
1467 goto err_ring;
1468 }
1469
1470 memset(&ring_cfg, 0, sizeof(ring_cfg));
1471
1472 if (uc->config.pkt_mode)
1473 ring_cfg.size = SG_MAX_SEGMENTS;
1474 else
1475 ring_cfg.size = K3_UDMA_DEFAULT_RING_SIZE;
1476
1477 ring_cfg.elm_size = K3_RINGACC_RING_ELSIZE_8;
1478 ring_cfg.mode = K3_RINGACC_RING_MODE_MESSAGE;
1479
1480 ret = k3_ringacc_ring_cfg(rflow->fd_ring, &ring_cfg);
1481 ring_cfg.size = K3_UDMA_DEFAULT_RING_SIZE;
1482 ret |= k3_ringacc_ring_cfg(rflow->r_ring, &ring_cfg);
1483
1484 if (ret)
1485 goto err_ringcfg;
1486
1487 return 0;
1488
1489err_ringcfg:
1490 k3_ringacc_ring_free(rflow->r_ring);
1491 rflow->r_ring = NULL;
1492 k3_ringacc_ring_free(rflow->fd_ring);
1493 rflow->fd_ring = NULL;
1494err_ring:
1495 udma_put_rflow(uc);
1496err_rflow:
1497 udma_put_rchan(uc);
1498
1499 return ret;
1500}
1501
1502#define TISCI_TCHAN_VALID_PARAMS ( \
1503 TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID | \
1504 TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_FILT_EINFO_VALID | \
1505 TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_FILT_PSWORDS_VALID | \
1506 TI_SCI_MSG_VALUE_RM_UDMAP_CH_CHAN_TYPE_VALID | \
1507 TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_SUPR_TDPKT_VALID | \
1508 TI_SCI_MSG_VALUE_RM_UDMAP_CH_FETCH_SIZE_VALID | \
1509 TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID | \
1510 TI_SCI_MSG_VALUE_RM_UDMAP_CH_ATYPE_VALID)
1511
1512#define TISCI_RCHAN_VALID_PARAMS ( \
1513 TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID | \
1514 TI_SCI_MSG_VALUE_RM_UDMAP_CH_FETCH_SIZE_VALID | \
1515 TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID | \
1516 TI_SCI_MSG_VALUE_RM_UDMAP_CH_CHAN_TYPE_VALID | \
1517 TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_IGNORE_SHORT_VALID | \
1518 TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_IGNORE_LONG_VALID | \
1519 TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_START_VALID | \
1520 TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_CNT_VALID | \
1521 TI_SCI_MSG_VALUE_RM_UDMAP_CH_ATYPE_VALID)
1522
1523static int udma_tisci_m2m_channel_config(struct udma_chan *uc)
1524{
1525 struct udma_dev *ud = uc->ud;
1526 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
1527 const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops;
1528 struct udma_tchan *tchan = uc->tchan;
1529 struct udma_rchan *rchan = uc->rchan;
1530 int ret = 0;
1531
1532
1533 int tc_ring = k3_ringacc_get_ring_id(tchan->tc_ring);
1534 struct ti_sci_msg_rm_udmap_tx_ch_cfg req_tx = { 0 };
1535 struct ti_sci_msg_rm_udmap_rx_ch_cfg req_rx = { 0 };
1536
1537 req_tx.valid_params = TISCI_TCHAN_VALID_PARAMS;
1538 req_tx.nav_id = tisci_rm->tisci_dev_id;
1539 req_tx.index = tchan->id;
1540 req_tx.tx_chan_type = TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_BCOPY_PBRR;
1541 req_tx.tx_fetch_size = sizeof(struct cppi5_desc_hdr_t) >> 2;
1542 req_tx.txcq_qnum = tc_ring;
1543 req_tx.tx_atype = ud->atype;
1544
1545 ret = tisci_ops->tx_ch_cfg(tisci_rm->tisci, &req_tx);
1546 if (ret) {
1547 dev_err(ud->dev, "tchan%d cfg failed %d\n", tchan->id, ret);
1548 return ret;
1549 }
1550
1551 req_rx.valid_params = TISCI_RCHAN_VALID_PARAMS;
1552 req_rx.nav_id = tisci_rm->tisci_dev_id;
1553 req_rx.index = rchan->id;
1554 req_rx.rx_fetch_size = sizeof(struct cppi5_desc_hdr_t) >> 2;
1555 req_rx.rxcq_qnum = tc_ring;
1556 req_rx.rx_chan_type = TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_BCOPY_PBRR;
1557 req_rx.rx_atype = ud->atype;
1558
1559 ret = tisci_ops->rx_ch_cfg(tisci_rm->tisci, &req_rx);
1560 if (ret)
1561 dev_err(ud->dev, "rchan%d alloc failed %d\n", rchan->id, ret);
1562
1563 return ret;
1564}
1565
1566static int udma_tisci_tx_channel_config(struct udma_chan *uc)
1567{
1568 struct udma_dev *ud = uc->ud;
1569 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
1570 const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops;
1571 struct udma_tchan *tchan = uc->tchan;
1572 int tc_ring = k3_ringacc_get_ring_id(tchan->tc_ring);
1573 struct ti_sci_msg_rm_udmap_tx_ch_cfg req_tx = { 0 };
1574 u32 mode, fetch_size;
1575 int ret = 0;
1576
1577 if (uc->config.pkt_mode) {
1578 mode = TI_SCI_RM_UDMAP_CHAN_TYPE_PKT_PBRR;
1579 fetch_size = cppi5_hdesc_calc_size(uc->config.needs_epib,
1580 uc->config.psd_size, 0);
1581 } else {
1582 mode = TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_PBRR;
1583 fetch_size = sizeof(struct cppi5_desc_hdr_t);
1584 }
1585
1586 req_tx.valid_params = TISCI_TCHAN_VALID_PARAMS;
1587 req_tx.nav_id = tisci_rm->tisci_dev_id;
1588 req_tx.index = tchan->id;
1589 req_tx.tx_chan_type = mode;
1590 req_tx.tx_supr_tdpkt = uc->config.notdpkt;
1591 req_tx.tx_fetch_size = fetch_size >> 2;
1592 req_tx.txcq_qnum = tc_ring;
1593 req_tx.tx_atype = uc->config.atype;
1594
1595 ret = tisci_ops->tx_ch_cfg(tisci_rm->tisci, &req_tx);
1596 if (ret)
1597 dev_err(ud->dev, "tchan%d cfg failed %d\n", tchan->id, ret);
1598
1599 return ret;
1600}
1601
1602static int udma_tisci_rx_channel_config(struct udma_chan *uc)
1603{
1604 struct udma_dev *ud = uc->ud;
1605 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
1606 const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops;
1607 struct udma_rchan *rchan = uc->rchan;
1608 int fd_ring = k3_ringacc_get_ring_id(uc->rflow->fd_ring);
1609 int rx_ring = k3_ringacc_get_ring_id(uc->rflow->r_ring);
1610 struct ti_sci_msg_rm_udmap_rx_ch_cfg req_rx = { 0 };
1611 struct ti_sci_msg_rm_udmap_flow_cfg flow_req = { 0 };
1612 u32 mode, fetch_size;
1613 int ret = 0;
1614
1615 if (uc->config.pkt_mode) {
1616 mode = TI_SCI_RM_UDMAP_CHAN_TYPE_PKT_PBRR;
1617 fetch_size = cppi5_hdesc_calc_size(uc->config.needs_epib,
1618 uc->config.psd_size, 0);
1619 } else {
1620 mode = TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_PBRR;
1621 fetch_size = sizeof(struct cppi5_desc_hdr_t);
1622 }
1623
1624 req_rx.valid_params = TISCI_RCHAN_VALID_PARAMS;
1625 req_rx.nav_id = tisci_rm->tisci_dev_id;
1626 req_rx.index = rchan->id;
1627 req_rx.rx_fetch_size = fetch_size >> 2;
1628 req_rx.rxcq_qnum = rx_ring;
1629 req_rx.rx_chan_type = mode;
1630 req_rx.rx_atype = uc->config.atype;
1631
1632 ret = tisci_ops->rx_ch_cfg(tisci_rm->tisci, &req_rx);
1633 if (ret) {
1634 dev_err(ud->dev, "rchan%d cfg failed %d\n", rchan->id, ret);
1635 return ret;
1636 }
1637
1638 flow_req.valid_params =
1639 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_EINFO_PRESENT_VALID |
1640 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_PSINFO_PRESENT_VALID |
1641 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_ERROR_HANDLING_VALID |
1642 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DESC_TYPE_VALID |
1643 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_QNUM_VALID |
1644 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_SRC_TAG_HI_SEL_VALID |
1645 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_SRC_TAG_LO_SEL_VALID |
1646 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_TAG_HI_SEL_VALID |
1647 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_TAG_LO_SEL_VALID |
1648 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ0_SZ0_QNUM_VALID |
1649 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ1_QNUM_VALID |
1650 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ2_QNUM_VALID |
1651 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ3_QNUM_VALID;
1652
1653 flow_req.nav_id = tisci_rm->tisci_dev_id;
1654 flow_req.flow_index = rchan->id;
1655
1656 if (uc->config.needs_epib)
1657 flow_req.rx_einfo_present = 1;
1658 else
1659 flow_req.rx_einfo_present = 0;
1660 if (uc->config.psd_size)
1661 flow_req.rx_psinfo_present = 1;
1662 else
1663 flow_req.rx_psinfo_present = 0;
1664 flow_req.rx_error_handling = 1;
1665 flow_req.rx_dest_qnum = rx_ring;
1666 flow_req.rx_src_tag_hi_sel = UDMA_RFLOW_SRCTAG_NONE;
1667 flow_req.rx_src_tag_lo_sel = UDMA_RFLOW_SRCTAG_SRC_TAG;
1668 flow_req.rx_dest_tag_hi_sel = UDMA_RFLOW_DSTTAG_DST_TAG_HI;
1669 flow_req.rx_dest_tag_lo_sel = UDMA_RFLOW_DSTTAG_DST_TAG_LO;
1670 flow_req.rx_fdq0_sz0_qnum = fd_ring;
1671 flow_req.rx_fdq1_qnum = fd_ring;
1672 flow_req.rx_fdq2_qnum = fd_ring;
1673 flow_req.rx_fdq3_qnum = fd_ring;
1674
1675 ret = tisci_ops->rx_flow_cfg(tisci_rm->tisci, &flow_req);
1676
1677 if (ret)
1678 dev_err(ud->dev, "flow%d config failed: %d\n", rchan->id, ret);
1679
1680 return 0;
1681}
1682
1683static int udma_alloc_chan_resources(struct dma_chan *chan)
1684{
1685 struct udma_chan *uc = to_udma_chan(chan);
1686 struct udma_dev *ud = to_udma_dev(chan->device);
1687 const struct udma_soc_data *soc_data = ud->soc_data;
1688 struct k3_ring *irq_ring;
1689 u32 irq_udma_idx;
1690 int ret;
1691
1692 if (uc->config.pkt_mode || uc->config.dir == DMA_MEM_TO_MEM) {
1693 uc->use_dma_pool = true;
1694
1695 if (uc->config.dir == DMA_MEM_TO_MEM) {
1696 uc->config.hdesc_size = cppi5_trdesc_calc_size(
1697 sizeof(struct cppi5_tr_type15_t), 2);
1698 uc->config.pkt_mode = false;
1699 }
1700 }
1701
1702 if (uc->use_dma_pool) {
1703 uc->hdesc_pool = dma_pool_create(uc->name, ud->ddev.dev,
1704 uc->config.hdesc_size,
1705 ud->desc_align,
1706 0);
1707 if (!uc->hdesc_pool) {
1708 dev_err(ud->ddev.dev,
1709 "Descriptor pool allocation failed\n");
1710 uc->use_dma_pool = false;
1711 ret = -ENOMEM;
1712 goto err_cleanup;
1713 }
1714 }
1715
1716
1717
1718
1719
1720 reinit_completion(&uc->teardown_completed);
1721 complete_all(&uc->teardown_completed);
1722 uc->state = UDMA_CHAN_IS_IDLE;
1723
1724 switch (uc->config.dir) {
1725 case DMA_MEM_TO_MEM:
1726
1727 dev_dbg(uc->ud->dev, "%s: chan%d as MEM-to-MEM\n", __func__,
1728 uc->id);
1729
1730 ret = udma_get_chan_pair(uc);
1731 if (ret)
1732 goto err_cleanup;
1733
1734 ret = udma_alloc_tx_resources(uc);
1735 if (ret) {
1736 udma_put_rchan(uc);
1737 goto err_cleanup;
1738 }
1739
1740 ret = udma_alloc_rx_resources(uc);
1741 if (ret) {
1742 udma_free_tx_resources(uc);
1743 goto err_cleanup;
1744 }
1745
1746 uc->config.src_thread = ud->psil_base + uc->tchan->id;
1747 uc->config.dst_thread = (ud->psil_base + uc->rchan->id) |
1748 K3_PSIL_DST_THREAD_ID_OFFSET;
1749
1750 irq_ring = uc->tchan->tc_ring;
1751 irq_udma_idx = uc->tchan->id;
1752
1753 ret = udma_tisci_m2m_channel_config(uc);
1754 break;
1755 case DMA_MEM_TO_DEV:
1756
1757 dev_dbg(uc->ud->dev, "%s: chan%d as MEM-to-DEV\n", __func__,
1758 uc->id);
1759
1760 ret = udma_alloc_tx_resources(uc);
1761 if (ret)
1762 goto err_cleanup;
1763
1764 uc->config.src_thread = ud->psil_base + uc->tchan->id;
1765 uc->config.dst_thread = uc->config.remote_thread_id;
1766 uc->config.dst_thread |= K3_PSIL_DST_THREAD_ID_OFFSET;
1767
1768 irq_ring = uc->tchan->tc_ring;
1769 irq_udma_idx = uc->tchan->id;
1770
1771 ret = udma_tisci_tx_channel_config(uc);
1772 break;
1773 case DMA_DEV_TO_MEM:
1774
1775 dev_dbg(uc->ud->dev, "%s: chan%d as DEV-to-MEM\n", __func__,
1776 uc->id);
1777
1778 ret = udma_alloc_rx_resources(uc);
1779 if (ret)
1780 goto err_cleanup;
1781
1782 uc->config.src_thread = uc->config.remote_thread_id;
1783 uc->config.dst_thread = (ud->psil_base + uc->rchan->id) |
1784 K3_PSIL_DST_THREAD_ID_OFFSET;
1785
1786 irq_ring = uc->rflow->r_ring;
1787 irq_udma_idx = soc_data->rchan_oes_offset + uc->rchan->id;
1788
1789 ret = udma_tisci_rx_channel_config(uc);
1790 break;
1791 default:
1792
1793 dev_err(uc->ud->dev, "%s: chan%d invalid direction (%u)\n",
1794 __func__, uc->id, uc->config.dir);
1795 ret = -EINVAL;
1796 goto err_cleanup;
1797
1798 }
1799
1800
1801 if (ret)
1802 goto err_res_free;
1803
1804 if (udma_is_chan_running(uc)) {
1805 dev_warn(ud->dev, "chan%d: is running!\n", uc->id);
1806 udma_reset_chan(uc, false);
1807 if (udma_is_chan_running(uc)) {
1808 dev_err(ud->dev, "chan%d: won't stop!\n", uc->id);
1809 ret = -EBUSY;
1810 goto err_res_free;
1811 }
1812 }
1813
1814
1815 ret = navss_psil_pair(ud, uc->config.src_thread, uc->config.dst_thread);
1816 if (ret) {
1817 dev_err(ud->dev, "PSI-L pairing failed: 0x%04x -> 0x%04x\n",
1818 uc->config.src_thread, uc->config.dst_thread);
1819 goto err_res_free;
1820 }
1821
1822 uc->psil_paired = true;
1823
1824 uc->irq_num_ring = k3_ringacc_get_ring_irq_num(irq_ring);
1825 if (uc->irq_num_ring <= 0) {
1826 dev_err(ud->dev, "Failed to get ring irq (index: %u)\n",
1827 k3_ringacc_get_ring_id(irq_ring));
1828 ret = -EINVAL;
1829 goto err_psi_free;
1830 }
1831
1832 ret = request_irq(uc->irq_num_ring, udma_ring_irq_handler,
1833 IRQF_TRIGGER_HIGH, uc->name, uc);
1834 if (ret) {
1835 dev_err(ud->dev, "chan%d: ring irq request failed\n", uc->id);
1836 goto err_irq_free;
1837 }
1838
1839
1840 if (is_slave_direction(uc->config.dir) && !uc->config.pkt_mode) {
1841 uc->irq_num_udma = ti_sci_inta_msi_get_virq(ud->dev,
1842 irq_udma_idx);
1843 if (uc->irq_num_udma <= 0) {
1844 dev_err(ud->dev, "Failed to get udma irq (index: %u)\n",
1845 irq_udma_idx);
1846 free_irq(uc->irq_num_ring, uc);
1847 ret = -EINVAL;
1848 goto err_irq_free;
1849 }
1850
1851 ret = request_irq(uc->irq_num_udma, udma_udma_irq_handler, 0,
1852 uc->name, uc);
1853 if (ret) {
1854 dev_err(ud->dev, "chan%d: UDMA irq request failed\n",
1855 uc->id);
1856 free_irq(uc->irq_num_ring, uc);
1857 goto err_irq_free;
1858 }
1859 } else {
1860 uc->irq_num_udma = 0;
1861 }
1862
1863 udma_reset_rings(uc);
1864
1865 return 0;
1866
1867err_irq_free:
1868 uc->irq_num_ring = 0;
1869 uc->irq_num_udma = 0;
1870err_psi_free:
1871 navss_psil_unpair(ud, uc->config.src_thread, uc->config.dst_thread);
1872 uc->psil_paired = false;
1873err_res_free:
1874 udma_free_tx_resources(uc);
1875 udma_free_rx_resources(uc);
1876err_cleanup:
1877 udma_reset_uchan(uc);
1878
1879 if (uc->use_dma_pool) {
1880 dma_pool_destroy(uc->hdesc_pool);
1881 uc->use_dma_pool = false;
1882 }
1883
1884 return ret;
1885}
1886
1887static int udma_slave_config(struct dma_chan *chan,
1888 struct dma_slave_config *cfg)
1889{
1890 struct udma_chan *uc = to_udma_chan(chan);
1891
1892 memcpy(&uc->cfg, cfg, sizeof(uc->cfg));
1893
1894 return 0;
1895}
1896
1897static struct udma_desc *udma_alloc_tr_desc(struct udma_chan *uc,
1898 size_t tr_size, int tr_count,
1899 enum dma_transfer_direction dir)
1900{
1901 struct udma_hwdesc *hwdesc;
1902 struct cppi5_desc_hdr_t *tr_desc;
1903 struct udma_desc *d;
1904 u32 reload_count = 0;
1905 u32 ring_id;
1906
1907 switch (tr_size) {
1908 case 16:
1909 case 32:
1910 case 64:
1911 case 128:
1912 break;
1913 default:
1914 dev_err(uc->ud->dev, "Unsupported TR size of %zu\n", tr_size);
1915 return NULL;
1916 }
1917
1918
1919 d = kzalloc(sizeof(*d) + sizeof(d->hwdesc[0]), GFP_NOWAIT);
1920 if (!d)
1921 return NULL;
1922
1923 d->sglen = tr_count;
1924
1925 d->hwdesc_count = 1;
1926 hwdesc = &d->hwdesc[0];
1927
1928
1929 if (uc->use_dma_pool) {
1930 hwdesc->cppi5_desc_size = uc->config.hdesc_size;
1931 hwdesc->cppi5_desc_vaddr = dma_pool_zalloc(uc->hdesc_pool,
1932 GFP_NOWAIT,
1933 &hwdesc->cppi5_desc_paddr);
1934 } else {
1935 hwdesc->cppi5_desc_size = cppi5_trdesc_calc_size(tr_size,
1936 tr_count);
1937 hwdesc->cppi5_desc_size = ALIGN(hwdesc->cppi5_desc_size,
1938 uc->ud->desc_align);
1939 hwdesc->cppi5_desc_vaddr = dma_alloc_coherent(uc->ud->dev,
1940 hwdesc->cppi5_desc_size,
1941 &hwdesc->cppi5_desc_paddr,
1942 GFP_NOWAIT);
1943 }
1944
1945 if (!hwdesc->cppi5_desc_vaddr) {
1946 kfree(d);
1947 return NULL;
1948 }
1949
1950
1951 hwdesc->tr_req_base = hwdesc->cppi5_desc_vaddr + tr_size;
1952
1953 hwdesc->tr_resp_base = hwdesc->tr_req_base + tr_size * tr_count;
1954
1955 tr_desc = hwdesc->cppi5_desc_vaddr;
1956
1957 if (uc->cyclic)
1958 reload_count = CPPI5_INFO0_TRDESC_RLDCNT_INFINITE;
1959
1960 if (dir == DMA_DEV_TO_MEM)
1961 ring_id = k3_ringacc_get_ring_id(uc->rflow->r_ring);
1962 else
1963 ring_id = k3_ringacc_get_ring_id(uc->tchan->tc_ring);
1964
1965 cppi5_trdesc_init(tr_desc, tr_count, tr_size, 0, reload_count);
1966 cppi5_desc_set_pktids(tr_desc, uc->id,
1967 CPPI5_INFO1_DESC_FLOWID_DEFAULT);
1968 cppi5_desc_set_retpolicy(tr_desc, 0, ring_id);
1969
1970 return d;
1971}
1972
1973
1974
1975
1976
1977
1978
1979
1980
1981
1982
1983
1984
1985
1986
1987
1988
1989static int udma_get_tr_counters(size_t len, unsigned long align_to,
1990 u16 *tr0_cnt0, u16 *tr0_cnt1, u16 *tr1_cnt0)
1991{
1992 if (len < SZ_64K) {
1993 *tr0_cnt0 = len;
1994 *tr0_cnt1 = 1;
1995
1996 return 1;
1997 }
1998
1999 if (align_to > 3)
2000 align_to = 3;
2001
2002realign:
2003 *tr0_cnt0 = SZ_64K - BIT(align_to);
2004 if (len / *tr0_cnt0 >= SZ_64K) {
2005 if (align_to) {
2006 align_to--;
2007 goto realign;
2008 }
2009 return -EINVAL;
2010 }
2011
2012 *tr0_cnt1 = len / *tr0_cnt0;
2013 *tr1_cnt0 = len % *tr0_cnt0;
2014
2015 return 2;
2016}
2017
2018static struct udma_desc *
2019udma_prep_slave_sg_tr(struct udma_chan *uc, struct scatterlist *sgl,
2020 unsigned int sglen, enum dma_transfer_direction dir,
2021 unsigned long tx_flags, void *context)
2022{
2023 struct scatterlist *sgent;
2024 struct udma_desc *d;
2025 struct cppi5_tr_type1_t *tr_req = NULL;
2026 u16 tr0_cnt0, tr0_cnt1, tr1_cnt0;
2027 unsigned int i;
2028 size_t tr_size;
2029 int num_tr = 0;
2030 int tr_idx = 0;
2031
2032
2033 for_each_sg(sgl, sgent, sglen, i) {
2034 if (sg_dma_len(sgent) < SZ_64K)
2035 num_tr++;
2036 else
2037 num_tr += 2;
2038 }
2039
2040
2041 tr_size = sizeof(struct cppi5_tr_type1_t);
2042 d = udma_alloc_tr_desc(uc, tr_size, num_tr, dir);
2043 if (!d)
2044 return NULL;
2045
2046 d->sglen = sglen;
2047
2048 tr_req = d->hwdesc[0].tr_req_base;
2049 for_each_sg(sgl, sgent, sglen, i) {
2050 dma_addr_t sg_addr = sg_dma_address(sgent);
2051
2052 num_tr = udma_get_tr_counters(sg_dma_len(sgent), __ffs(sg_addr),
2053 &tr0_cnt0, &tr0_cnt1, &tr1_cnt0);
2054 if (num_tr < 0) {
2055 dev_err(uc->ud->dev, "size %u is not supported\n",
2056 sg_dma_len(sgent));
2057 udma_free_hwdesc(uc, d);
2058 kfree(d);
2059 return NULL;
2060 }
2061
2062 cppi5_tr_init(&tr_req[tr_idx].flags, CPPI5_TR_TYPE1, false,
2063 false, CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
2064 cppi5_tr_csf_set(&tr_req[tr_idx].flags, CPPI5_TR_CSF_SUPR_EVT);
2065
2066 tr_req[tr_idx].addr = sg_addr;
2067 tr_req[tr_idx].icnt0 = tr0_cnt0;
2068 tr_req[tr_idx].icnt1 = tr0_cnt1;
2069 tr_req[tr_idx].dim1 = tr0_cnt0;
2070 tr_idx++;
2071
2072 if (num_tr == 2) {
2073 cppi5_tr_init(&tr_req[tr_idx].flags, CPPI5_TR_TYPE1,
2074 false, false,
2075 CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
2076 cppi5_tr_csf_set(&tr_req[tr_idx].flags,
2077 CPPI5_TR_CSF_SUPR_EVT);
2078
2079 tr_req[tr_idx].addr = sg_addr + tr0_cnt1 * tr0_cnt0;
2080 tr_req[tr_idx].icnt0 = tr1_cnt0;
2081 tr_req[tr_idx].icnt1 = 1;
2082 tr_req[tr_idx].dim1 = tr1_cnt0;
2083 tr_idx++;
2084 }
2085
2086 d->residue += sg_dma_len(sgent);
2087 }
2088
2089 cppi5_tr_csf_set(&tr_req[tr_idx - 1].flags,
2090 CPPI5_TR_CSF_SUPR_EVT | CPPI5_TR_CSF_EOP);
2091
2092 return d;
2093}
2094
2095static int udma_configure_statictr(struct udma_chan *uc, struct udma_desc *d,
2096 enum dma_slave_buswidth dev_width,
2097 u16 elcnt)
2098{
2099 if (uc->config.ep_type != PSIL_EP_PDMA_XY)
2100 return 0;
2101
2102
2103 switch (dev_width) {
2104 case DMA_SLAVE_BUSWIDTH_1_BYTE:
2105 d->static_tr.elsize = 0;
2106 break;
2107 case DMA_SLAVE_BUSWIDTH_2_BYTES:
2108 d->static_tr.elsize = 1;
2109 break;
2110 case DMA_SLAVE_BUSWIDTH_3_BYTES:
2111 d->static_tr.elsize = 2;
2112 break;
2113 case DMA_SLAVE_BUSWIDTH_4_BYTES:
2114 d->static_tr.elsize = 3;
2115 break;
2116 case DMA_SLAVE_BUSWIDTH_8_BYTES:
2117 d->static_tr.elsize = 4;
2118 break;
2119 default:
2120 return -EINVAL;
2121 }
2122
2123 d->static_tr.elcnt = elcnt;
2124
2125
2126
2127
2128
2129
2130
2131 if (uc->config.pkt_mode || !uc->cyclic) {
2132 unsigned int div = dev_width * elcnt;
2133
2134 if (uc->cyclic)
2135 d->static_tr.bstcnt = d->residue / d->sglen / div;
2136 else
2137 d->static_tr.bstcnt = d->residue / div;
2138
2139 if (uc->config.dir == DMA_DEV_TO_MEM &&
2140 d->static_tr.bstcnt > uc->ud->match_data->statictr_z_mask)
2141 return -EINVAL;
2142 } else {
2143 d->static_tr.bstcnt = 0;
2144 }
2145
2146 return 0;
2147}
2148
2149static struct udma_desc *
2150udma_prep_slave_sg_pkt(struct udma_chan *uc, struct scatterlist *sgl,
2151 unsigned int sglen, enum dma_transfer_direction dir,
2152 unsigned long tx_flags, void *context)
2153{
2154 struct scatterlist *sgent;
2155 struct cppi5_host_desc_t *h_desc = NULL;
2156 struct udma_desc *d;
2157 u32 ring_id;
2158 unsigned int i;
2159
2160 d = kzalloc(struct_size(d, hwdesc, sglen), GFP_NOWAIT);
2161 if (!d)
2162 return NULL;
2163
2164 d->sglen = sglen;
2165 d->hwdesc_count = sglen;
2166
2167 if (dir == DMA_DEV_TO_MEM)
2168 ring_id = k3_ringacc_get_ring_id(uc->rflow->r_ring);
2169 else
2170 ring_id = k3_ringacc_get_ring_id(uc->tchan->tc_ring);
2171
2172 for_each_sg(sgl, sgent, sglen, i) {
2173 struct udma_hwdesc *hwdesc = &d->hwdesc[i];
2174 dma_addr_t sg_addr = sg_dma_address(sgent);
2175 struct cppi5_host_desc_t *desc;
2176 size_t sg_len = sg_dma_len(sgent);
2177
2178 hwdesc->cppi5_desc_vaddr = dma_pool_zalloc(uc->hdesc_pool,
2179 GFP_NOWAIT,
2180 &hwdesc->cppi5_desc_paddr);
2181 if (!hwdesc->cppi5_desc_vaddr) {
2182 dev_err(uc->ud->dev,
2183 "descriptor%d allocation failed\n", i);
2184
2185 udma_free_hwdesc(uc, d);
2186 kfree(d);
2187 return NULL;
2188 }
2189
2190 d->residue += sg_len;
2191 hwdesc->cppi5_desc_size = uc->config.hdesc_size;
2192 desc = hwdesc->cppi5_desc_vaddr;
2193
2194 if (i == 0) {
2195 cppi5_hdesc_init(desc, 0, 0);
2196
2197 cppi5_desc_set_pktids(&desc->hdr, uc->id,
2198 CPPI5_INFO1_DESC_FLOWID_DEFAULT);
2199 cppi5_desc_set_retpolicy(&desc->hdr, 0, ring_id);
2200 } else {
2201 cppi5_hdesc_reset_hbdesc(desc);
2202 cppi5_desc_set_retpolicy(&desc->hdr, 0, 0xffff);
2203 }
2204
2205
2206 cppi5_hdesc_attach_buf(desc, sg_addr, sg_len, sg_addr, sg_len);
2207
2208
2209 if (h_desc)
2210 cppi5_hdesc_link_hbdesc(h_desc,
2211 hwdesc->cppi5_desc_paddr);
2212
2213 if (dir == DMA_MEM_TO_DEV)
2214 h_desc = desc;
2215 }
2216
2217 if (d->residue >= SZ_4M) {
2218 dev_err(uc->ud->dev,
2219 "%s: Transfer size %u is over the supported 4M range\n",
2220 __func__, d->residue);
2221 udma_free_hwdesc(uc, d);
2222 kfree(d);
2223 return NULL;
2224 }
2225
2226 h_desc = d->hwdesc[0].cppi5_desc_vaddr;
2227 cppi5_hdesc_set_pktlen(h_desc, d->residue);
2228
2229 return d;
2230}
2231
2232static int udma_attach_metadata(struct dma_async_tx_descriptor *desc,
2233 void *data, size_t len)
2234{
2235 struct udma_desc *d = to_udma_desc(desc);
2236 struct udma_chan *uc = to_udma_chan(desc->chan);
2237 struct cppi5_host_desc_t *h_desc;
2238 u32 psd_size = len;
2239 u32 flags = 0;
2240
2241 if (!uc->config.pkt_mode || !uc->config.metadata_size)
2242 return -ENOTSUPP;
2243
2244 if (!data || len > uc->config.metadata_size)
2245 return -EINVAL;
2246
2247 if (uc->config.needs_epib && len < CPPI5_INFO0_HDESC_EPIB_SIZE)
2248 return -EINVAL;
2249
2250 h_desc = d->hwdesc[0].cppi5_desc_vaddr;
2251 if (d->dir == DMA_MEM_TO_DEV)
2252 memcpy(h_desc->epib, data, len);
2253
2254 if (uc->config.needs_epib)
2255 psd_size -= CPPI5_INFO0_HDESC_EPIB_SIZE;
2256
2257 d->metadata = data;
2258 d->metadata_size = len;
2259 if (uc->config.needs_epib)
2260 flags |= CPPI5_INFO0_HDESC_EPIB_PRESENT;
2261
2262 cppi5_hdesc_update_flags(h_desc, flags);
2263 cppi5_hdesc_update_psdata_size(h_desc, psd_size);
2264
2265 return 0;
2266}
2267
2268static void *udma_get_metadata_ptr(struct dma_async_tx_descriptor *desc,
2269 size_t *payload_len, size_t *max_len)
2270{
2271 struct udma_desc *d = to_udma_desc(desc);
2272 struct udma_chan *uc = to_udma_chan(desc->chan);
2273 struct cppi5_host_desc_t *h_desc;
2274
2275 if (!uc->config.pkt_mode || !uc->config.metadata_size)
2276 return ERR_PTR(-ENOTSUPP);
2277
2278 h_desc = d->hwdesc[0].cppi5_desc_vaddr;
2279
2280 *max_len = uc->config.metadata_size;
2281
2282 *payload_len = cppi5_hdesc_epib_present(&h_desc->hdr) ?
2283 CPPI5_INFO0_HDESC_EPIB_SIZE : 0;
2284 *payload_len += cppi5_hdesc_get_psdata_size(h_desc);
2285
2286 return h_desc->epib;
2287}
2288
2289static int udma_set_metadata_len(struct dma_async_tx_descriptor *desc,
2290 size_t payload_len)
2291{
2292 struct udma_desc *d = to_udma_desc(desc);
2293 struct udma_chan *uc = to_udma_chan(desc->chan);
2294 struct cppi5_host_desc_t *h_desc;
2295 u32 psd_size = payload_len;
2296 u32 flags = 0;
2297
2298 if (!uc->config.pkt_mode || !uc->config.metadata_size)
2299 return -ENOTSUPP;
2300
2301 if (payload_len > uc->config.metadata_size)
2302 return -EINVAL;
2303
2304 if (uc->config.needs_epib && payload_len < CPPI5_INFO0_HDESC_EPIB_SIZE)
2305 return -EINVAL;
2306
2307 h_desc = d->hwdesc[0].cppi5_desc_vaddr;
2308
2309 if (uc->config.needs_epib) {
2310 psd_size -= CPPI5_INFO0_HDESC_EPIB_SIZE;
2311 flags |= CPPI5_INFO0_HDESC_EPIB_PRESENT;
2312 }
2313
2314 cppi5_hdesc_update_flags(h_desc, flags);
2315 cppi5_hdesc_update_psdata_size(h_desc, psd_size);
2316
2317 return 0;
2318}
2319
2320static struct dma_descriptor_metadata_ops metadata_ops = {
2321 .attach = udma_attach_metadata,
2322 .get_ptr = udma_get_metadata_ptr,
2323 .set_len = udma_set_metadata_len,
2324};
2325
2326static struct dma_async_tx_descriptor *
2327udma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
2328 unsigned int sglen, enum dma_transfer_direction dir,
2329 unsigned long tx_flags, void *context)
2330{
2331 struct udma_chan *uc = to_udma_chan(chan);
2332 enum dma_slave_buswidth dev_width;
2333 struct udma_desc *d;
2334 u32 burst;
2335
2336 if (dir != uc->config.dir) {
2337 dev_err(chan->device->dev,
2338 "%s: chan%d is for %s, not supporting %s\n",
2339 __func__, uc->id,
2340 dmaengine_get_direction_text(uc->config.dir),
2341 dmaengine_get_direction_text(dir));
2342 return NULL;
2343 }
2344
2345 if (dir == DMA_DEV_TO_MEM) {
2346 dev_width = uc->cfg.src_addr_width;
2347 burst = uc->cfg.src_maxburst;
2348 } else if (dir == DMA_MEM_TO_DEV) {
2349 dev_width = uc->cfg.dst_addr_width;
2350 burst = uc->cfg.dst_maxburst;
2351 } else {
2352 dev_err(chan->device->dev, "%s: bad direction?\n", __func__);
2353 return NULL;
2354 }
2355
2356 if (!burst)
2357 burst = 1;
2358
2359 if (uc->config.pkt_mode)
2360 d = udma_prep_slave_sg_pkt(uc, sgl, sglen, dir, tx_flags,
2361 context);
2362 else
2363 d = udma_prep_slave_sg_tr(uc, sgl, sglen, dir, tx_flags,
2364 context);
2365
2366 if (!d)
2367 return NULL;
2368
2369 d->dir = dir;
2370 d->desc_idx = 0;
2371 d->tr_idx = 0;
2372
2373
2374 if (udma_configure_statictr(uc, d, dev_width, burst)) {
2375 dev_err(uc->ud->dev,
2376 "%s: StaticTR Z is limited to maximum 4095 (%u)\n",
2377 __func__, d->static_tr.bstcnt);
2378
2379 udma_free_hwdesc(uc, d);
2380 kfree(d);
2381 return NULL;
2382 }
2383
2384 if (uc->config.metadata_size)
2385 d->vd.tx.metadata_ops = &metadata_ops;
2386
2387 return vchan_tx_prep(&uc->vc, &d->vd, tx_flags);
2388}
2389
2390static struct udma_desc *
2391udma_prep_dma_cyclic_tr(struct udma_chan *uc, dma_addr_t buf_addr,
2392 size_t buf_len, size_t period_len,
2393 enum dma_transfer_direction dir, unsigned long flags)
2394{
2395 struct udma_desc *d;
2396 size_t tr_size, period_addr;
2397 struct cppi5_tr_type1_t *tr_req;
2398 unsigned int periods = buf_len / period_len;
2399 u16 tr0_cnt0, tr0_cnt1, tr1_cnt0;
2400 unsigned int i;
2401 int num_tr;
2402
2403 num_tr = udma_get_tr_counters(period_len, __ffs(buf_addr), &tr0_cnt0,
2404 &tr0_cnt1, &tr1_cnt0);
2405 if (num_tr < 0) {
2406 dev_err(uc->ud->dev, "size %zu is not supported\n",
2407 period_len);
2408 return NULL;
2409 }
2410
2411
2412 tr_size = sizeof(struct cppi5_tr_type1_t);
2413 d = udma_alloc_tr_desc(uc, tr_size, periods * num_tr, dir);
2414 if (!d)
2415 return NULL;
2416
2417 tr_req = d->hwdesc[0].tr_req_base;
2418 period_addr = buf_addr;
2419 for (i = 0; i < periods; i++) {
2420 int tr_idx = i * num_tr;
2421
2422 cppi5_tr_init(&tr_req[tr_idx].flags, CPPI5_TR_TYPE1, false,
2423 false, CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
2424
2425 tr_req[tr_idx].addr = period_addr;
2426 tr_req[tr_idx].icnt0 = tr0_cnt0;
2427 tr_req[tr_idx].icnt1 = tr0_cnt1;
2428 tr_req[tr_idx].dim1 = tr0_cnt0;
2429
2430 if (num_tr == 2) {
2431 cppi5_tr_csf_set(&tr_req[tr_idx].flags,
2432 CPPI5_TR_CSF_SUPR_EVT);
2433 tr_idx++;
2434
2435 cppi5_tr_init(&tr_req[tr_idx].flags, CPPI5_TR_TYPE1,
2436 false, false,
2437 CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
2438
2439 tr_req[tr_idx].addr = period_addr + tr0_cnt1 * tr0_cnt0;
2440 tr_req[tr_idx].icnt0 = tr1_cnt0;
2441 tr_req[tr_idx].icnt1 = 1;
2442 tr_req[tr_idx].dim1 = tr1_cnt0;
2443 }
2444
2445 if (!(flags & DMA_PREP_INTERRUPT))
2446 cppi5_tr_csf_set(&tr_req[tr_idx].flags,
2447 CPPI5_TR_CSF_SUPR_EVT);
2448
2449 period_addr += period_len;
2450 }
2451
2452 return d;
2453}
2454
2455static struct udma_desc *
2456udma_prep_dma_cyclic_pkt(struct udma_chan *uc, dma_addr_t buf_addr,
2457 size_t buf_len, size_t period_len,
2458 enum dma_transfer_direction dir, unsigned long flags)
2459{
2460 struct udma_desc *d;
2461 u32 ring_id;
2462 int i;
2463 int periods = buf_len / period_len;
2464
2465 if (periods > (K3_UDMA_DEFAULT_RING_SIZE - 1))
2466 return NULL;
2467
2468 if (period_len >= SZ_4M)
2469 return NULL;
2470
2471 d = kzalloc(struct_size(d, hwdesc, periods), GFP_NOWAIT);
2472 if (!d)
2473 return NULL;
2474
2475 d->hwdesc_count = periods;
2476
2477
2478 if (dir == DMA_DEV_TO_MEM)
2479 ring_id = k3_ringacc_get_ring_id(uc->rflow->r_ring);
2480 else
2481 ring_id = k3_ringacc_get_ring_id(uc->tchan->tc_ring);
2482
2483 for (i = 0; i < periods; i++) {
2484 struct udma_hwdesc *hwdesc = &d->hwdesc[i];
2485 dma_addr_t period_addr = buf_addr + (period_len * i);
2486 struct cppi5_host_desc_t *h_desc;
2487
2488 hwdesc->cppi5_desc_vaddr = dma_pool_zalloc(uc->hdesc_pool,
2489 GFP_NOWAIT,
2490 &hwdesc->cppi5_desc_paddr);
2491 if (!hwdesc->cppi5_desc_vaddr) {
2492 dev_err(uc->ud->dev,
2493 "descriptor%d allocation failed\n", i);
2494
2495 udma_free_hwdesc(uc, d);
2496 kfree(d);
2497 return NULL;
2498 }
2499
2500 hwdesc->cppi5_desc_size = uc->config.hdesc_size;
2501 h_desc = hwdesc->cppi5_desc_vaddr;
2502
2503 cppi5_hdesc_init(h_desc, 0, 0);
2504 cppi5_hdesc_set_pktlen(h_desc, period_len);
2505
2506
2507 cppi5_desc_set_pktids(&h_desc->hdr, uc->id,
2508 CPPI5_INFO1_DESC_FLOWID_DEFAULT);
2509 cppi5_desc_set_retpolicy(&h_desc->hdr, 0, ring_id);
2510
2511
2512 cppi5_hdesc_attach_buf(h_desc,
2513 period_addr, period_len,
2514 period_addr, period_len);
2515 }
2516
2517 return d;
2518}
2519
2520static struct dma_async_tx_descriptor *
2521udma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
2522 size_t period_len, enum dma_transfer_direction dir,
2523 unsigned long flags)
2524{
2525 struct udma_chan *uc = to_udma_chan(chan);
2526 enum dma_slave_buswidth dev_width;
2527 struct udma_desc *d;
2528 u32 burst;
2529
2530 if (dir != uc->config.dir) {
2531 dev_err(chan->device->dev,
2532 "%s: chan%d is for %s, not supporting %s\n",
2533 __func__, uc->id,
2534 dmaengine_get_direction_text(uc->config.dir),
2535 dmaengine_get_direction_text(dir));
2536 return NULL;
2537 }
2538
2539 uc->cyclic = true;
2540
2541 if (dir == DMA_DEV_TO_MEM) {
2542 dev_width = uc->cfg.src_addr_width;
2543 burst = uc->cfg.src_maxburst;
2544 } else if (dir == DMA_MEM_TO_DEV) {
2545 dev_width = uc->cfg.dst_addr_width;
2546 burst = uc->cfg.dst_maxburst;
2547 } else {
2548 dev_err(uc->ud->dev, "%s: bad direction?\n", __func__);
2549 return NULL;
2550 }
2551
2552 if (!burst)
2553 burst = 1;
2554
2555 if (uc->config.pkt_mode)
2556 d = udma_prep_dma_cyclic_pkt(uc, buf_addr, buf_len, period_len,
2557 dir, flags);
2558 else
2559 d = udma_prep_dma_cyclic_tr(uc, buf_addr, buf_len, period_len,
2560 dir, flags);
2561
2562 if (!d)
2563 return NULL;
2564
2565 d->sglen = buf_len / period_len;
2566
2567 d->dir = dir;
2568 d->residue = buf_len;
2569
2570
2571 if (udma_configure_statictr(uc, d, dev_width, burst)) {
2572 dev_err(uc->ud->dev,
2573 "%s: StaticTR Z is limited to maximum 4095 (%u)\n",
2574 __func__, d->static_tr.bstcnt);
2575
2576 udma_free_hwdesc(uc, d);
2577 kfree(d);
2578 return NULL;
2579 }
2580
2581 if (uc->config.metadata_size)
2582 d->vd.tx.metadata_ops = &metadata_ops;
2583
2584 return vchan_tx_prep(&uc->vc, &d->vd, flags);
2585}
2586
2587static struct dma_async_tx_descriptor *
2588udma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
2589 size_t len, unsigned long tx_flags)
2590{
2591 struct udma_chan *uc = to_udma_chan(chan);
2592 struct udma_desc *d;
2593 struct cppi5_tr_type15_t *tr_req;
2594 int num_tr;
2595 size_t tr_size = sizeof(struct cppi5_tr_type15_t);
2596 u16 tr0_cnt0, tr0_cnt1, tr1_cnt0;
2597
2598 if (uc->config.dir != DMA_MEM_TO_MEM) {
2599 dev_err(chan->device->dev,
2600 "%s: chan%d is for %s, not supporting %s\n",
2601 __func__, uc->id,
2602 dmaengine_get_direction_text(uc->config.dir),
2603 dmaengine_get_direction_text(DMA_MEM_TO_MEM));
2604 return NULL;
2605 }
2606
2607 num_tr = udma_get_tr_counters(len, __ffs(src | dest), &tr0_cnt0,
2608 &tr0_cnt1, &tr1_cnt0);
2609 if (num_tr < 0) {
2610 dev_err(uc->ud->dev, "size %zu is not supported\n",
2611 len);
2612 return NULL;
2613 }
2614
2615 d = udma_alloc_tr_desc(uc, tr_size, num_tr, DMA_MEM_TO_MEM);
2616 if (!d)
2617 return NULL;
2618
2619 d->dir = DMA_MEM_TO_MEM;
2620 d->desc_idx = 0;
2621 d->tr_idx = 0;
2622 d->residue = len;
2623
2624 tr_req = d->hwdesc[0].tr_req_base;
2625
2626 cppi5_tr_init(&tr_req[0].flags, CPPI5_TR_TYPE15, false, true,
2627 CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
2628 cppi5_tr_csf_set(&tr_req[0].flags, CPPI5_TR_CSF_SUPR_EVT);
2629
2630 tr_req[0].addr = src;
2631 tr_req[0].icnt0 = tr0_cnt0;
2632 tr_req[0].icnt1 = tr0_cnt1;
2633 tr_req[0].icnt2 = 1;
2634 tr_req[0].icnt3 = 1;
2635 tr_req[0].dim1 = tr0_cnt0;
2636
2637 tr_req[0].daddr = dest;
2638 tr_req[0].dicnt0 = tr0_cnt0;
2639 tr_req[0].dicnt1 = tr0_cnt1;
2640 tr_req[0].dicnt2 = 1;
2641 tr_req[0].dicnt3 = 1;
2642 tr_req[0].ddim1 = tr0_cnt0;
2643
2644 if (num_tr == 2) {
2645 cppi5_tr_init(&tr_req[1].flags, CPPI5_TR_TYPE15, false, true,
2646 CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
2647 cppi5_tr_csf_set(&tr_req[1].flags, CPPI5_TR_CSF_SUPR_EVT);
2648
2649 tr_req[1].addr = src + tr0_cnt1 * tr0_cnt0;
2650 tr_req[1].icnt0 = tr1_cnt0;
2651 tr_req[1].icnt1 = 1;
2652 tr_req[1].icnt2 = 1;
2653 tr_req[1].icnt3 = 1;
2654
2655 tr_req[1].daddr = dest + tr0_cnt1 * tr0_cnt0;
2656 tr_req[1].dicnt0 = tr1_cnt0;
2657 tr_req[1].dicnt1 = 1;
2658 tr_req[1].dicnt2 = 1;
2659 tr_req[1].dicnt3 = 1;
2660 }
2661
2662 cppi5_tr_csf_set(&tr_req[num_tr - 1].flags,
2663 CPPI5_TR_CSF_SUPR_EVT | CPPI5_TR_CSF_EOP);
2664
2665 if (uc->config.metadata_size)
2666 d->vd.tx.metadata_ops = &metadata_ops;
2667
2668 return vchan_tx_prep(&uc->vc, &d->vd, tx_flags);
2669}
2670
2671static void udma_issue_pending(struct dma_chan *chan)
2672{
2673 struct udma_chan *uc = to_udma_chan(chan);
2674 unsigned long flags;
2675
2676 spin_lock_irqsave(&uc->vc.lock, flags);
2677
2678
2679 if (vchan_issue_pending(&uc->vc) && !uc->desc) {
2680
2681
2682
2683
2684
2685 if (!(uc->state == UDMA_CHAN_IS_TERMINATING &&
2686 udma_is_chan_running(uc)))
2687 udma_start(uc);
2688 }
2689
2690 spin_unlock_irqrestore(&uc->vc.lock, flags);
2691}
2692
2693static enum dma_status udma_tx_status(struct dma_chan *chan,
2694 dma_cookie_t cookie,
2695 struct dma_tx_state *txstate)
2696{
2697 struct udma_chan *uc = to_udma_chan(chan);
2698 enum dma_status ret;
2699 unsigned long flags;
2700
2701 spin_lock_irqsave(&uc->vc.lock, flags);
2702
2703 ret = dma_cookie_status(chan, cookie, txstate);
2704
2705 if (!udma_is_chan_running(uc))
2706 ret = DMA_COMPLETE;
2707
2708 if (ret == DMA_IN_PROGRESS && udma_is_chan_paused(uc))
2709 ret = DMA_PAUSED;
2710
2711 if (ret == DMA_COMPLETE || !txstate)
2712 goto out;
2713
2714 if (uc->desc && uc->desc->vd.tx.cookie == cookie) {
2715 u32 peer_bcnt = 0;
2716 u32 bcnt = 0;
2717 u32 residue = uc->desc->residue;
2718 u32 delay = 0;
2719
2720 if (uc->desc->dir == DMA_MEM_TO_DEV) {
2721 bcnt = udma_tchanrt_read(uc, UDMA_CHAN_RT_SBCNT_REG);
2722
2723 if (uc->config.ep_type != PSIL_EP_NATIVE) {
2724 peer_bcnt = udma_tchanrt_read(uc,
2725 UDMA_CHAN_RT_PEER_BCNT_REG);
2726
2727 if (bcnt > peer_bcnt)
2728 delay = bcnt - peer_bcnt;
2729 }
2730 } else if (uc->desc->dir == DMA_DEV_TO_MEM) {
2731 bcnt = udma_rchanrt_read(uc, UDMA_CHAN_RT_BCNT_REG);
2732
2733 if (uc->config.ep_type != PSIL_EP_NATIVE) {
2734 peer_bcnt = udma_rchanrt_read(uc,
2735 UDMA_CHAN_RT_PEER_BCNT_REG);
2736
2737 if (peer_bcnt > bcnt)
2738 delay = peer_bcnt - bcnt;
2739 }
2740 } else {
2741 bcnt = udma_tchanrt_read(uc, UDMA_CHAN_RT_BCNT_REG);
2742 }
2743
2744 bcnt -= uc->bcnt;
2745 if (bcnt && !(bcnt % uc->desc->residue))
2746 residue = 0;
2747 else
2748 residue -= bcnt % uc->desc->residue;
2749
2750 if (!residue && (uc->config.dir == DMA_DEV_TO_MEM || !delay)) {
2751 ret = DMA_COMPLETE;
2752 delay = 0;
2753 }
2754
2755 dma_set_residue(txstate, residue);
2756 dma_set_in_flight_bytes(txstate, delay);
2757
2758 } else {
2759 ret = DMA_COMPLETE;
2760 }
2761
2762out:
2763 spin_unlock_irqrestore(&uc->vc.lock, flags);
2764 return ret;
2765}
2766
2767static int udma_pause(struct dma_chan *chan)
2768{
2769 struct udma_chan *uc = to_udma_chan(chan);
2770
2771
2772 switch (uc->config.dir) {
2773 case DMA_DEV_TO_MEM:
2774 udma_rchanrt_update_bits(uc, UDMA_CHAN_RT_PEER_RT_EN_REG,
2775 UDMA_PEER_RT_EN_PAUSE,
2776 UDMA_PEER_RT_EN_PAUSE);
2777 break;
2778 case DMA_MEM_TO_DEV:
2779 udma_tchanrt_update_bits(uc, UDMA_CHAN_RT_PEER_RT_EN_REG,
2780 UDMA_PEER_RT_EN_PAUSE,
2781 UDMA_PEER_RT_EN_PAUSE);
2782 break;
2783 case DMA_MEM_TO_MEM:
2784 udma_tchanrt_update_bits(uc, UDMA_CHAN_RT_CTL_REG,
2785 UDMA_CHAN_RT_CTL_PAUSE,
2786 UDMA_CHAN_RT_CTL_PAUSE);
2787 break;
2788 default:
2789 return -EINVAL;
2790 }
2791
2792 return 0;
2793}
2794
2795static int udma_resume(struct dma_chan *chan)
2796{
2797 struct udma_chan *uc = to_udma_chan(chan);
2798
2799
2800 switch (uc->config.dir) {
2801 case DMA_DEV_TO_MEM:
2802 udma_rchanrt_update_bits(uc, UDMA_CHAN_RT_PEER_RT_EN_REG,
2803 UDMA_PEER_RT_EN_PAUSE, 0);
2804
2805 break;
2806 case DMA_MEM_TO_DEV:
2807 udma_tchanrt_update_bits(uc, UDMA_CHAN_RT_PEER_RT_EN_REG,
2808 UDMA_PEER_RT_EN_PAUSE, 0);
2809 break;
2810 case DMA_MEM_TO_MEM:
2811 udma_tchanrt_update_bits(uc, UDMA_CHAN_RT_CTL_REG,
2812 UDMA_CHAN_RT_CTL_PAUSE, 0);
2813 break;
2814 default:
2815 return -EINVAL;
2816 }
2817
2818 return 0;
2819}
2820
2821static int udma_terminate_all(struct dma_chan *chan)
2822{
2823 struct udma_chan *uc = to_udma_chan(chan);
2824 unsigned long flags;
2825 LIST_HEAD(head);
2826
2827 spin_lock_irqsave(&uc->vc.lock, flags);
2828
2829 if (udma_is_chan_running(uc))
2830 udma_stop(uc);
2831
2832 if (uc->desc) {
2833 uc->terminated_desc = uc->desc;
2834 uc->desc = NULL;
2835 uc->terminated_desc->terminated = true;
2836 cancel_delayed_work(&uc->tx_drain.work);
2837 }
2838
2839 uc->paused = false;
2840
2841 vchan_get_all_descriptors(&uc->vc, &head);
2842 spin_unlock_irqrestore(&uc->vc.lock, flags);
2843 vchan_dma_desc_free_list(&uc->vc, &head);
2844
2845 return 0;
2846}
2847
2848static void udma_synchronize(struct dma_chan *chan)
2849{
2850 struct udma_chan *uc = to_udma_chan(chan);
2851 unsigned long timeout = msecs_to_jiffies(1000);
2852
2853 vchan_synchronize(&uc->vc);
2854
2855 if (uc->state == UDMA_CHAN_IS_TERMINATING) {
2856 timeout = wait_for_completion_timeout(&uc->teardown_completed,
2857 timeout);
2858 if (!timeout) {
2859 dev_warn(uc->ud->dev, "chan%d teardown timeout!\n",
2860 uc->id);
2861 udma_dump_chan_stdata(uc);
2862 udma_reset_chan(uc, true);
2863 }
2864 }
2865
2866 udma_reset_chan(uc, false);
2867 if (udma_is_chan_running(uc))
2868 dev_warn(uc->ud->dev, "chan%d refused to stop!\n", uc->id);
2869
2870 cancel_delayed_work_sync(&uc->tx_drain.work);
2871 udma_reset_rings(uc);
2872}
2873
2874static void udma_desc_pre_callback(struct virt_dma_chan *vc,
2875 struct virt_dma_desc *vd,
2876 struct dmaengine_result *result)
2877{
2878 struct udma_chan *uc = to_udma_chan(&vc->chan);
2879 struct udma_desc *d;
2880
2881 if (!vd)
2882 return;
2883
2884 d = to_udma_desc(&vd->tx);
2885
2886 if (d->metadata_size)
2887 udma_fetch_epib(uc, d);
2888
2889
2890 if (result) {
2891 void *desc_vaddr = udma_curr_cppi5_desc_vaddr(d, d->desc_idx);
2892
2893 if (cppi5_desc_get_type(desc_vaddr) ==
2894 CPPI5_INFO0_DESC_TYPE_VAL_HOST) {
2895 result->residue = d->residue -
2896 cppi5_hdesc_get_pktlen(desc_vaddr);
2897 if (result->residue)
2898 result->result = DMA_TRANS_ABORTED;
2899 else
2900 result->result = DMA_TRANS_NOERROR;
2901 } else {
2902 result->residue = 0;
2903 result->result = DMA_TRANS_NOERROR;
2904 }
2905 }
2906}
2907
2908
2909
2910
2911
2912static void udma_vchan_complete(struct tasklet_struct *t)
2913{
2914 struct virt_dma_chan *vc = from_tasklet(vc, t, task);
2915 struct virt_dma_desc *vd, *_vd;
2916 struct dmaengine_desc_callback cb;
2917 LIST_HEAD(head);
2918
2919 spin_lock_irq(&vc->lock);
2920 list_splice_tail_init(&vc->desc_completed, &head);
2921 vd = vc->cyclic;
2922 if (vd) {
2923 vc->cyclic = NULL;
2924 dmaengine_desc_get_callback(&vd->tx, &cb);
2925 } else {
2926 memset(&cb, 0, sizeof(cb));
2927 }
2928 spin_unlock_irq(&vc->lock);
2929
2930 udma_desc_pre_callback(vc, vd, NULL);
2931 dmaengine_desc_callback_invoke(&cb, NULL);
2932
2933 list_for_each_entry_safe(vd, _vd, &head, node) {
2934 struct dmaengine_result result;
2935
2936 dmaengine_desc_get_callback(&vd->tx, &cb);
2937
2938 list_del(&vd->node);
2939
2940 udma_desc_pre_callback(vc, vd, &result);
2941 dmaengine_desc_callback_invoke(&cb, &result);
2942
2943 vchan_vdesc_fini(vd);
2944 }
2945}
2946
2947static void udma_free_chan_resources(struct dma_chan *chan)
2948{
2949 struct udma_chan *uc = to_udma_chan(chan);
2950 struct udma_dev *ud = to_udma_dev(chan->device);
2951
2952 udma_terminate_all(chan);
2953 if (uc->terminated_desc) {
2954 udma_reset_chan(uc, false);
2955 udma_reset_rings(uc);
2956 }
2957
2958 cancel_delayed_work_sync(&uc->tx_drain.work);
2959
2960 if (uc->irq_num_ring > 0) {
2961 free_irq(uc->irq_num_ring, uc);
2962
2963 uc->irq_num_ring = 0;
2964 }
2965 if (uc->irq_num_udma > 0) {
2966 free_irq(uc->irq_num_udma, uc);
2967
2968 uc->irq_num_udma = 0;
2969 }
2970
2971
2972 if (uc->psil_paired) {
2973 navss_psil_unpair(ud, uc->config.src_thread,
2974 uc->config.dst_thread);
2975 uc->psil_paired = false;
2976 }
2977
2978 vchan_free_chan_resources(&uc->vc);
2979 tasklet_kill(&uc->vc.task);
2980
2981 udma_free_tx_resources(uc);
2982 udma_free_rx_resources(uc);
2983 udma_reset_uchan(uc);
2984
2985 if (uc->use_dma_pool) {
2986 dma_pool_destroy(uc->hdesc_pool);
2987 uc->use_dma_pool = false;
2988 }
2989}
2990
2991static struct platform_driver udma_driver;
2992
2993struct udma_filter_param {
2994 int remote_thread_id;
2995 u32 atype;
2996};
2997
2998static bool udma_dma_filter_fn(struct dma_chan *chan, void *param)
2999{
3000 struct udma_chan_config *ucc;
3001 struct psil_endpoint_config *ep_config;
3002 struct udma_filter_param *filter_param;
3003 struct udma_chan *uc;
3004 struct udma_dev *ud;
3005
3006 if (chan->device->dev->driver != &udma_driver.driver)
3007 return false;
3008
3009 uc = to_udma_chan(chan);
3010 ucc = &uc->config;
3011 ud = uc->ud;
3012 filter_param = param;
3013
3014 if (filter_param->atype > 2) {
3015 dev_err(ud->dev, "Invalid channel atype: %u\n",
3016 filter_param->atype);
3017 return false;
3018 }
3019
3020 ucc->remote_thread_id = filter_param->remote_thread_id;
3021 ucc->atype = filter_param->atype;
3022
3023 if (ucc->remote_thread_id & K3_PSIL_DST_THREAD_ID_OFFSET)
3024 ucc->dir = DMA_MEM_TO_DEV;
3025 else
3026 ucc->dir = DMA_DEV_TO_MEM;
3027
3028 ep_config = psil_get_ep_config(ucc->remote_thread_id);
3029 if (IS_ERR(ep_config)) {
3030 dev_err(ud->dev, "No configuration for psi-l thread 0x%04x\n",
3031 ucc->remote_thread_id);
3032 ucc->dir = DMA_MEM_TO_MEM;
3033 ucc->remote_thread_id = -1;
3034 ucc->atype = 0;
3035 return false;
3036 }
3037
3038 ucc->pkt_mode = ep_config->pkt_mode;
3039 ucc->channel_tpl = ep_config->channel_tpl;
3040 ucc->notdpkt = ep_config->notdpkt;
3041 ucc->ep_type = ep_config->ep_type;
3042
3043 if (ucc->ep_type != PSIL_EP_NATIVE) {
3044 const struct udma_match_data *match_data = ud->match_data;
3045
3046 if (match_data->flags & UDMA_FLAG_PDMA_ACC32)
3047 ucc->enable_acc32 = ep_config->pdma_acc32;
3048 if (match_data->flags & UDMA_FLAG_PDMA_BURST)
3049 ucc->enable_burst = ep_config->pdma_burst;
3050 }
3051
3052 ucc->needs_epib = ep_config->needs_epib;
3053 ucc->psd_size = ep_config->psd_size;
3054 ucc->metadata_size =
3055 (ucc->needs_epib ? CPPI5_INFO0_HDESC_EPIB_SIZE : 0) +
3056 ucc->psd_size;
3057
3058 if (ucc->pkt_mode)
3059 ucc->hdesc_size = ALIGN(sizeof(struct cppi5_host_desc_t) +
3060 ucc->metadata_size, ud->desc_align);
3061
3062 dev_dbg(ud->dev, "chan%d: Remote thread: 0x%04x (%s)\n", uc->id,
3063 ucc->remote_thread_id, dmaengine_get_direction_text(ucc->dir));
3064
3065 return true;
3066}
3067
3068static struct dma_chan *udma_of_xlate(struct of_phandle_args *dma_spec,
3069 struct of_dma *ofdma)
3070{
3071 struct udma_dev *ud = ofdma->of_dma_data;
3072 dma_cap_mask_t mask = ud->ddev.cap_mask;
3073 struct udma_filter_param filter_param;
3074 struct dma_chan *chan;
3075
3076 if (dma_spec->args_count != 1 && dma_spec->args_count != 2)
3077 return NULL;
3078
3079 filter_param.remote_thread_id = dma_spec->args[0];
3080 if (dma_spec->args_count == 2)
3081 filter_param.atype = dma_spec->args[1];
3082 else
3083 filter_param.atype = 0;
3084
3085 chan = __dma_request_channel(&mask, udma_dma_filter_fn, &filter_param,
3086 ofdma->of_node);
3087 if (!chan) {
3088 dev_err(ud->dev, "get channel fail in %s.\n", __func__);
3089 return ERR_PTR(-EINVAL);
3090 }
3091
3092 return chan;
3093}
3094
3095static struct udma_match_data am654_main_data = {
3096 .psil_base = 0x1000,
3097 .enable_memcpy_support = true,
3098 .statictr_z_mask = GENMASK(11, 0),
3099};
3100
3101static struct udma_match_data am654_mcu_data = {
3102 .psil_base = 0x6000,
3103 .enable_memcpy_support = false,
3104 .statictr_z_mask = GENMASK(11, 0),
3105};
3106
3107static struct udma_match_data j721e_main_data = {
3108 .psil_base = 0x1000,
3109 .enable_memcpy_support = true,
3110 .flags = UDMA_FLAG_PDMA_ACC32 | UDMA_FLAG_PDMA_BURST,
3111 .statictr_z_mask = GENMASK(23, 0),
3112};
3113
3114static struct udma_match_data j721e_mcu_data = {
3115 .psil_base = 0x6000,
3116 .enable_memcpy_support = false,
3117 .flags = UDMA_FLAG_PDMA_ACC32 | UDMA_FLAG_PDMA_BURST,
3118 .statictr_z_mask = GENMASK(23, 0),
3119};
3120
3121static const struct of_device_id udma_of_match[] = {
3122 {
3123 .compatible = "ti,am654-navss-main-udmap",
3124 .data = &am654_main_data,
3125 },
3126 {
3127 .compatible = "ti,am654-navss-mcu-udmap",
3128 .data = &am654_mcu_data,
3129 }, {
3130 .compatible = "ti,j721e-navss-main-udmap",
3131 .data = &j721e_main_data,
3132 }, {
3133 .compatible = "ti,j721e-navss-mcu-udmap",
3134 .data = &j721e_mcu_data,
3135 },
3136 { },
3137};
3138
3139static struct udma_soc_data am654_soc_data = {
3140 .rchan_oes_offset = 0x200,
3141};
3142
3143static struct udma_soc_data j721e_soc_data = {
3144 .rchan_oes_offset = 0x400,
3145};
3146
3147static struct udma_soc_data j7200_soc_data = {
3148 .rchan_oes_offset = 0x80,
3149};
3150
3151static const struct soc_device_attribute k3_soc_devices[] = {
3152 { .family = "AM65X", .data = &am654_soc_data },
3153 { .family = "J721E", .data = &j721e_soc_data },
3154 { .family = "J7200", .data = &j7200_soc_data },
3155 { }
3156};
3157
3158static int udma_get_mmrs(struct platform_device *pdev, struct udma_dev *ud)
3159{
3160 int i;
3161
3162 for (i = 0; i < MMR_LAST; i++) {
3163 ud->mmrs[i] = devm_platform_ioremap_resource_byname(pdev, mmr_names[i]);
3164 if (IS_ERR(ud->mmrs[i]))
3165 return PTR_ERR(ud->mmrs[i]);
3166 }
3167
3168 return 0;
3169}
3170
3171static int udma_setup_resources(struct udma_dev *ud)
3172{
3173 struct device *dev = ud->dev;
3174 int ch_count, ret, i, j;
3175 u32 cap2, cap3;
3176 struct ti_sci_resource_desc *rm_desc;
3177 struct ti_sci_resource *rm_res, irq_res;
3178 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
3179 static const char * const range_names[] = { "ti,sci-rm-range-tchan",
3180 "ti,sci-rm-range-rchan",
3181 "ti,sci-rm-range-rflow" };
3182
3183 cap2 = udma_read(ud->mmrs[MMR_GCFG], UDMA_CAP_REG(2));
3184 cap3 = udma_read(ud->mmrs[MMR_GCFG], UDMA_CAP_REG(3));
3185
3186 ud->rflow_cnt = UDMA_CAP3_RFLOW_CNT(cap3);
3187 ud->tchan_cnt = UDMA_CAP2_TCHAN_CNT(cap2);
3188 ud->echan_cnt = UDMA_CAP2_ECHAN_CNT(cap2);
3189 ud->rchan_cnt = UDMA_CAP2_RCHAN_CNT(cap2);
3190 ch_count = ud->tchan_cnt + ud->rchan_cnt;
3191
3192
3193 if (of_device_is_compatible(dev->of_node,
3194 "ti,am654-navss-main-udmap")) {
3195 ud->tpl_levels = 2;
3196 ud->tpl_start_idx[0] = 8;
3197 } else if (of_device_is_compatible(dev->of_node,
3198 "ti,am654-navss-mcu-udmap")) {
3199 ud->tpl_levels = 2;
3200 ud->tpl_start_idx[0] = 2;
3201 } else if (UDMA_CAP3_UCHAN_CNT(cap3)) {
3202 ud->tpl_levels = 3;
3203 ud->tpl_start_idx[1] = UDMA_CAP3_UCHAN_CNT(cap3);
3204 ud->tpl_start_idx[0] = ud->tpl_start_idx[1] +
3205 UDMA_CAP3_HCHAN_CNT(cap3);
3206 } else if (UDMA_CAP3_HCHAN_CNT(cap3)) {
3207 ud->tpl_levels = 2;
3208 ud->tpl_start_idx[0] = UDMA_CAP3_HCHAN_CNT(cap3);
3209 } else {
3210 ud->tpl_levels = 1;
3211 }
3212
3213 ud->tchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->tchan_cnt),
3214 sizeof(unsigned long), GFP_KERNEL);
3215 ud->tchans = devm_kcalloc(dev, ud->tchan_cnt, sizeof(*ud->tchans),
3216 GFP_KERNEL);
3217 ud->rchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->rchan_cnt),
3218 sizeof(unsigned long), GFP_KERNEL);
3219 ud->rchans = devm_kcalloc(dev, ud->rchan_cnt, sizeof(*ud->rchans),
3220 GFP_KERNEL);
3221 ud->rflow_gp_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->rflow_cnt),
3222 sizeof(unsigned long),
3223 GFP_KERNEL);
3224 ud->rflow_gp_map_allocated = devm_kcalloc(dev,
3225 BITS_TO_LONGS(ud->rflow_cnt),
3226 sizeof(unsigned long),
3227 GFP_KERNEL);
3228 ud->rflow_in_use = devm_kcalloc(dev, BITS_TO_LONGS(ud->rflow_cnt),
3229 sizeof(unsigned long),
3230 GFP_KERNEL);
3231 ud->rflows = devm_kcalloc(dev, ud->rflow_cnt, sizeof(*ud->rflows),
3232 GFP_KERNEL);
3233
3234 if (!ud->tchan_map || !ud->rchan_map || !ud->rflow_gp_map ||
3235 !ud->rflow_gp_map_allocated || !ud->tchans || !ud->rchans ||
3236 !ud->rflows || !ud->rflow_in_use)
3237 return -ENOMEM;
3238
3239
3240
3241
3242
3243
3244 bitmap_set(ud->rflow_gp_map_allocated, 0, ud->rchan_cnt);
3245
3246
3247 bitmap_set(ud->rflow_gp_map, 0, ud->rflow_cnt);
3248
3249
3250 for (i = 0; i < RM_RANGE_LAST; i++)
3251 tisci_rm->rm_ranges[i] =
3252 devm_ti_sci_get_of_resource(tisci_rm->tisci, dev,
3253 tisci_rm->tisci_dev_id,
3254 (char *)range_names[i]);
3255
3256
3257 rm_res = tisci_rm->rm_ranges[RM_RANGE_TCHAN];
3258 if (IS_ERR(rm_res)) {
3259 bitmap_zero(ud->tchan_map, ud->tchan_cnt);
3260 } else {
3261 bitmap_fill(ud->tchan_map, ud->tchan_cnt);
3262 for (i = 0; i < rm_res->sets; i++) {
3263 rm_desc = &rm_res->desc[i];
3264 bitmap_clear(ud->tchan_map, rm_desc->start,
3265 rm_desc->num);
3266 dev_dbg(dev, "ti-sci-res: tchan: %d:%d\n",
3267 rm_desc->start, rm_desc->num);
3268 }
3269 }
3270 irq_res.sets = rm_res->sets;
3271
3272
3273 rm_res = tisci_rm->rm_ranges[RM_RANGE_RCHAN];
3274 if (IS_ERR(rm_res)) {
3275 bitmap_zero(ud->rchan_map, ud->rchan_cnt);
3276 } else {
3277 bitmap_fill(ud->rchan_map, ud->rchan_cnt);
3278 for (i = 0; i < rm_res->sets; i++) {
3279 rm_desc = &rm_res->desc[i];
3280 bitmap_clear(ud->rchan_map, rm_desc->start,
3281 rm_desc->num);
3282 dev_dbg(dev, "ti-sci-res: rchan: %d:%d\n",
3283 rm_desc->start, rm_desc->num);
3284 }
3285 }
3286
3287 irq_res.sets += rm_res->sets;
3288 irq_res.desc = kcalloc(irq_res.sets, sizeof(*irq_res.desc), GFP_KERNEL);
3289 rm_res = tisci_rm->rm_ranges[RM_RANGE_TCHAN];
3290 for (i = 0; i < rm_res->sets; i++) {
3291 irq_res.desc[i].start = rm_res->desc[i].start;
3292 irq_res.desc[i].num = rm_res->desc[i].num;
3293 }
3294 rm_res = tisci_rm->rm_ranges[RM_RANGE_RCHAN];
3295 for (j = 0; j < rm_res->sets; j++, i++) {
3296 irq_res.desc[i].start = rm_res->desc[j].start +
3297 ud->soc_data->rchan_oes_offset;
3298 irq_res.desc[i].num = rm_res->desc[j].num;
3299 }
3300 ret = ti_sci_inta_msi_domain_alloc_irqs(ud->dev, &irq_res);
3301 kfree(irq_res.desc);
3302 if (ret) {
3303 dev_err(ud->dev, "Failed to allocate MSI interrupts\n");
3304 return ret;
3305 }
3306
3307
3308 rm_res = tisci_rm->rm_ranges[RM_RANGE_RFLOW];
3309 if (IS_ERR(rm_res)) {
3310
3311 bitmap_clear(ud->rflow_gp_map, ud->rchan_cnt,
3312 ud->rflow_cnt - ud->rchan_cnt);
3313 } else {
3314 for (i = 0; i < rm_res->sets; i++) {
3315 rm_desc = &rm_res->desc[i];
3316 bitmap_clear(ud->rflow_gp_map, rm_desc->start,
3317 rm_desc->num);
3318 dev_dbg(dev, "ti-sci-res: rflow: %d:%d\n",
3319 rm_desc->start, rm_desc->num);
3320 }
3321 }
3322
3323 ch_count -= bitmap_weight(ud->tchan_map, ud->tchan_cnt);
3324 ch_count -= bitmap_weight(ud->rchan_map, ud->rchan_cnt);
3325 if (!ch_count)
3326 return -ENODEV;
3327
3328 ud->channels = devm_kcalloc(dev, ch_count, sizeof(*ud->channels),
3329 GFP_KERNEL);
3330 if (!ud->channels)
3331 return -ENOMEM;
3332
3333 dev_info(dev, "Channels: %d (tchan: %u, rchan: %u, gp-rflow: %u)\n",
3334 ch_count,
3335 ud->tchan_cnt - bitmap_weight(ud->tchan_map, ud->tchan_cnt),
3336 ud->rchan_cnt - bitmap_weight(ud->rchan_map, ud->rchan_cnt),
3337 ud->rflow_cnt - bitmap_weight(ud->rflow_gp_map,
3338 ud->rflow_cnt));
3339
3340 return ch_count;
3341}
3342
3343static int udma_setup_rx_flush(struct udma_dev *ud)
3344{
3345 struct udma_rx_flush *rx_flush = &ud->rx_flush;
3346 struct cppi5_desc_hdr_t *tr_desc;
3347 struct cppi5_tr_type1_t *tr_req;
3348 struct cppi5_host_desc_t *desc;
3349 struct device *dev = ud->dev;
3350 struct udma_hwdesc *hwdesc;
3351 size_t tr_size;
3352
3353
3354 rx_flush->buffer_size = SZ_1K;
3355 rx_flush->buffer_vaddr = devm_kzalloc(dev, rx_flush->buffer_size,
3356 GFP_KERNEL);
3357 if (!rx_flush->buffer_vaddr)
3358 return -ENOMEM;
3359
3360 rx_flush->buffer_paddr = dma_map_single(dev, rx_flush->buffer_vaddr,
3361 rx_flush->buffer_size,
3362 DMA_TO_DEVICE);
3363 if (dma_mapping_error(dev, rx_flush->buffer_paddr))
3364 return -ENOMEM;
3365
3366
3367 hwdesc = &rx_flush->hwdescs[0];
3368 tr_size = sizeof(struct cppi5_tr_type1_t);
3369 hwdesc->cppi5_desc_size = cppi5_trdesc_calc_size(tr_size, 1);
3370 hwdesc->cppi5_desc_size = ALIGN(hwdesc->cppi5_desc_size,
3371 ud->desc_align);
3372
3373 hwdesc->cppi5_desc_vaddr = devm_kzalloc(dev, hwdesc->cppi5_desc_size,
3374 GFP_KERNEL);
3375 if (!hwdesc->cppi5_desc_vaddr)
3376 return -ENOMEM;
3377
3378 hwdesc->cppi5_desc_paddr = dma_map_single(dev, hwdesc->cppi5_desc_vaddr,
3379 hwdesc->cppi5_desc_size,
3380 DMA_TO_DEVICE);
3381 if (dma_mapping_error(dev, hwdesc->cppi5_desc_paddr))
3382 return -ENOMEM;
3383
3384
3385 hwdesc->tr_req_base = hwdesc->cppi5_desc_vaddr + tr_size;
3386
3387 hwdesc->tr_resp_base = hwdesc->tr_req_base + tr_size;
3388
3389 tr_desc = hwdesc->cppi5_desc_vaddr;
3390 cppi5_trdesc_init(tr_desc, 1, tr_size, 0, 0);
3391 cppi5_desc_set_pktids(tr_desc, 0, CPPI5_INFO1_DESC_FLOWID_DEFAULT);
3392 cppi5_desc_set_retpolicy(tr_desc, 0, 0);
3393
3394 tr_req = hwdesc->tr_req_base;
3395 cppi5_tr_init(&tr_req->flags, CPPI5_TR_TYPE1, false, false,
3396 CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
3397 cppi5_tr_csf_set(&tr_req->flags, CPPI5_TR_CSF_SUPR_EVT);
3398
3399 tr_req->addr = rx_flush->buffer_paddr;
3400 tr_req->icnt0 = rx_flush->buffer_size;
3401 tr_req->icnt1 = 1;
3402
3403 dma_sync_single_for_device(dev, hwdesc->cppi5_desc_paddr,
3404 hwdesc->cppi5_desc_size, DMA_TO_DEVICE);
3405
3406
3407 hwdesc = &rx_flush->hwdescs[1];
3408 hwdesc->cppi5_desc_size = ALIGN(sizeof(struct cppi5_host_desc_t) +
3409 CPPI5_INFO0_HDESC_EPIB_SIZE +
3410 CPPI5_INFO0_HDESC_PSDATA_MAX_SIZE,
3411 ud->desc_align);
3412
3413 hwdesc->cppi5_desc_vaddr = devm_kzalloc(dev, hwdesc->cppi5_desc_size,
3414 GFP_KERNEL);
3415 if (!hwdesc->cppi5_desc_vaddr)
3416 return -ENOMEM;
3417
3418 hwdesc->cppi5_desc_paddr = dma_map_single(dev, hwdesc->cppi5_desc_vaddr,
3419 hwdesc->cppi5_desc_size,
3420 DMA_TO_DEVICE);
3421 if (dma_mapping_error(dev, hwdesc->cppi5_desc_paddr))
3422 return -ENOMEM;
3423
3424 desc = hwdesc->cppi5_desc_vaddr;
3425 cppi5_hdesc_init(desc, 0, 0);
3426 cppi5_desc_set_pktids(&desc->hdr, 0, CPPI5_INFO1_DESC_FLOWID_DEFAULT);
3427 cppi5_desc_set_retpolicy(&desc->hdr, 0, 0);
3428
3429 cppi5_hdesc_attach_buf(desc,
3430 rx_flush->buffer_paddr, rx_flush->buffer_size,
3431 rx_flush->buffer_paddr, rx_flush->buffer_size);
3432
3433 dma_sync_single_for_device(dev, hwdesc->cppi5_desc_paddr,
3434 hwdesc->cppi5_desc_size, DMA_TO_DEVICE);
3435 return 0;
3436}
3437
3438#ifdef CONFIG_DEBUG_FS
3439static void udma_dbg_summary_show_chan(struct seq_file *s,
3440 struct dma_chan *chan)
3441{
3442 struct udma_chan *uc = to_udma_chan(chan);
3443 struct udma_chan_config *ucc = &uc->config;
3444
3445 seq_printf(s, " %-13s| %s", dma_chan_name(chan),
3446 chan->dbg_client_name ?: "in-use");
3447 seq_printf(s, " (%s, ", dmaengine_get_direction_text(uc->config.dir));
3448
3449 switch (uc->config.dir) {
3450 case DMA_MEM_TO_MEM:
3451 seq_printf(s, "chan%d pair [0x%04x -> 0x%04x], ", uc->tchan->id,
3452 ucc->src_thread, ucc->dst_thread);
3453 break;
3454 case DMA_DEV_TO_MEM:
3455 seq_printf(s, "rchan%d [0x%04x -> 0x%04x], ", uc->rchan->id,
3456 ucc->src_thread, ucc->dst_thread);
3457 break;
3458 case DMA_MEM_TO_DEV:
3459 seq_printf(s, "tchan%d [0x%04x -> 0x%04x], ", uc->tchan->id,
3460 ucc->src_thread, ucc->dst_thread);
3461 break;
3462 default:
3463 seq_printf(s, ")\n");
3464 return;
3465 }
3466
3467 if (ucc->ep_type == PSIL_EP_NATIVE) {
3468 seq_printf(s, "PSI-L Native");
3469 if (ucc->metadata_size) {
3470 seq_printf(s, "[%s", ucc->needs_epib ? " EPIB" : "");
3471 if (ucc->psd_size)
3472 seq_printf(s, " PSDsize:%u", ucc->psd_size);
3473 seq_printf(s, " ]");
3474 }
3475 } else {
3476 seq_printf(s, "PDMA");
3477 if (ucc->enable_acc32 || ucc->enable_burst)
3478 seq_printf(s, "[%s%s ]",
3479 ucc->enable_acc32 ? " ACC32" : "",
3480 ucc->enable_burst ? " BURST" : "");
3481 }
3482
3483 seq_printf(s, ", %s)\n", ucc->pkt_mode ? "Packet mode" : "TR mode");
3484}
3485
3486static void udma_dbg_summary_show(struct seq_file *s,
3487 struct dma_device *dma_dev)
3488{
3489 struct dma_chan *chan;
3490
3491 list_for_each_entry(chan, &dma_dev->channels, device_node) {
3492 if (chan->client_count)
3493 udma_dbg_summary_show_chan(s, chan);
3494 }
3495}
3496#endif
3497
3498#define TI_UDMAC_BUSWIDTHS (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
3499 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
3500 BIT(DMA_SLAVE_BUSWIDTH_3_BYTES) | \
3501 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | \
3502 BIT(DMA_SLAVE_BUSWIDTH_8_BYTES))
3503
3504static int udma_probe(struct platform_device *pdev)
3505{
3506 struct device_node *navss_node = pdev->dev.parent->of_node;
3507 const struct soc_device_attribute *soc;
3508 struct device *dev = &pdev->dev;
3509 struct udma_dev *ud;
3510 const struct of_device_id *match;
3511 int i, ret;
3512 int ch_count;
3513
3514 ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(48));
3515 if (ret)
3516 dev_err(dev, "failed to set dma mask stuff\n");
3517
3518 ud = devm_kzalloc(dev, sizeof(*ud), GFP_KERNEL);
3519 if (!ud)
3520 return -ENOMEM;
3521
3522 ret = udma_get_mmrs(pdev, ud);
3523 if (ret)
3524 return ret;
3525
3526 ud->tisci_rm.tisci = ti_sci_get_by_phandle(dev->of_node, "ti,sci");
3527 if (IS_ERR(ud->tisci_rm.tisci))
3528 return PTR_ERR(ud->tisci_rm.tisci);
3529
3530 ret = of_property_read_u32(dev->of_node, "ti,sci-dev-id",
3531 &ud->tisci_rm.tisci_dev_id);
3532 if (ret) {
3533 dev_err(dev, "ti,sci-dev-id read failure %d\n", ret);
3534 return ret;
3535 }
3536 pdev->id = ud->tisci_rm.tisci_dev_id;
3537
3538 ret = of_property_read_u32(navss_node, "ti,sci-dev-id",
3539 &ud->tisci_rm.tisci_navss_dev_id);
3540 if (ret) {
3541 dev_err(dev, "NAVSS ti,sci-dev-id read failure %d\n", ret);
3542 return ret;
3543 }
3544
3545 ret = of_property_read_u32(dev->of_node, "ti,udma-atype", &ud->atype);
3546 if (!ret && ud->atype > 2) {
3547 dev_err(dev, "Invalid atype: %u\n", ud->atype);
3548 return -EINVAL;
3549 }
3550
3551 ud->tisci_rm.tisci_udmap_ops = &ud->tisci_rm.tisci->ops.rm_udmap_ops;
3552 ud->tisci_rm.tisci_psil_ops = &ud->tisci_rm.tisci->ops.rm_psil_ops;
3553
3554 ud->ringacc = of_k3_ringacc_get_by_phandle(dev->of_node, "ti,ringacc");
3555 if (IS_ERR(ud->ringacc))
3556 return PTR_ERR(ud->ringacc);
3557
3558 dev->msi_domain = of_msi_get_domain(dev, dev->of_node,
3559 DOMAIN_BUS_TI_SCI_INTA_MSI);
3560 if (!dev->msi_domain) {
3561 dev_err(dev, "Failed to get MSI domain\n");
3562 return -EPROBE_DEFER;
3563 }
3564
3565 match = of_match_node(udma_of_match, dev->of_node);
3566 if (!match) {
3567 dev_err(dev, "No compatible match found\n");
3568 return -ENODEV;
3569 }
3570 ud->match_data = match->data;
3571
3572 soc = soc_device_match(k3_soc_devices);
3573 if (!soc) {
3574 dev_err(dev, "No compatible SoC found\n");
3575 return -ENODEV;
3576 }
3577 ud->soc_data = soc->data;
3578
3579 dma_cap_set(DMA_SLAVE, ud->ddev.cap_mask);
3580 dma_cap_set(DMA_CYCLIC, ud->ddev.cap_mask);
3581
3582 ud->ddev.device_alloc_chan_resources = udma_alloc_chan_resources;
3583 ud->ddev.device_config = udma_slave_config;
3584 ud->ddev.device_prep_slave_sg = udma_prep_slave_sg;
3585 ud->ddev.device_prep_dma_cyclic = udma_prep_dma_cyclic;
3586 ud->ddev.device_issue_pending = udma_issue_pending;
3587 ud->ddev.device_tx_status = udma_tx_status;
3588 ud->ddev.device_pause = udma_pause;
3589 ud->ddev.device_resume = udma_resume;
3590 ud->ddev.device_terminate_all = udma_terminate_all;
3591 ud->ddev.device_synchronize = udma_synchronize;
3592#ifdef CONFIG_DEBUG_FS
3593 ud->ddev.dbg_summary_show = udma_dbg_summary_show;
3594#endif
3595
3596 ud->ddev.device_free_chan_resources = udma_free_chan_resources;
3597 ud->ddev.src_addr_widths = TI_UDMAC_BUSWIDTHS;
3598 ud->ddev.dst_addr_widths = TI_UDMAC_BUSWIDTHS;
3599 ud->ddev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
3600 ud->ddev.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
3601 ud->ddev.copy_align = DMAENGINE_ALIGN_8_BYTES;
3602 ud->ddev.desc_metadata_modes = DESC_METADATA_CLIENT |
3603 DESC_METADATA_ENGINE;
3604 if (ud->match_data->enable_memcpy_support) {
3605 dma_cap_set(DMA_MEMCPY, ud->ddev.cap_mask);
3606 ud->ddev.device_prep_dma_memcpy = udma_prep_dma_memcpy;
3607 ud->ddev.directions |= BIT(DMA_MEM_TO_MEM);
3608 }
3609
3610 ud->ddev.dev = dev;
3611 ud->dev = dev;
3612 ud->psil_base = ud->match_data->psil_base;
3613
3614 INIT_LIST_HEAD(&ud->ddev.channels);
3615 INIT_LIST_HEAD(&ud->desc_to_purge);
3616
3617 ch_count = udma_setup_resources(ud);
3618 if (ch_count <= 0)
3619 return ch_count;
3620
3621 spin_lock_init(&ud->lock);
3622 INIT_WORK(&ud->purge_work, udma_purge_desc_work);
3623
3624 ud->desc_align = 64;
3625 if (ud->desc_align < dma_get_cache_alignment())
3626 ud->desc_align = dma_get_cache_alignment();
3627
3628 ret = udma_setup_rx_flush(ud);
3629 if (ret)
3630 return ret;
3631
3632 for (i = 0; i < ud->tchan_cnt; i++) {
3633 struct udma_tchan *tchan = &ud->tchans[i];
3634
3635 tchan->id = i;
3636 tchan->reg_rt = ud->mmrs[MMR_TCHANRT] + i * 0x1000;
3637 }
3638
3639 for (i = 0; i < ud->rchan_cnt; i++) {
3640 struct udma_rchan *rchan = &ud->rchans[i];
3641
3642 rchan->id = i;
3643 rchan->reg_rt = ud->mmrs[MMR_RCHANRT] + i * 0x1000;
3644 }
3645
3646 for (i = 0; i < ud->rflow_cnt; i++) {
3647 struct udma_rflow *rflow = &ud->rflows[i];
3648
3649 rflow->id = i;
3650 }
3651
3652 for (i = 0; i < ch_count; i++) {
3653 struct udma_chan *uc = &ud->channels[i];
3654
3655 uc->ud = ud;
3656 uc->vc.desc_free = udma_desc_free;
3657 uc->id = i;
3658 uc->tchan = NULL;
3659 uc->rchan = NULL;
3660 uc->config.remote_thread_id = -1;
3661 uc->config.dir = DMA_MEM_TO_MEM;
3662 uc->name = devm_kasprintf(dev, GFP_KERNEL, "%s chan%d",
3663 dev_name(dev), i);
3664
3665 vchan_init(&uc->vc, &ud->ddev);
3666
3667 tasklet_setup(&uc->vc.task, udma_vchan_complete);
3668 init_completion(&uc->teardown_completed);
3669 INIT_DELAYED_WORK(&uc->tx_drain.work, udma_check_tx_completion);
3670 }
3671
3672 ret = dma_async_device_register(&ud->ddev);
3673 if (ret) {
3674 dev_err(dev, "failed to register slave DMA engine: %d\n", ret);
3675 return ret;
3676 }
3677
3678 platform_set_drvdata(pdev, ud);
3679
3680 ret = of_dma_controller_register(dev->of_node, udma_of_xlate, ud);
3681 if (ret) {
3682 dev_err(dev, "failed to register of_dma controller\n");
3683 dma_async_device_unregister(&ud->ddev);
3684 }
3685
3686 return ret;
3687}
3688
3689static struct platform_driver udma_driver = {
3690 .driver = {
3691 .name = "ti-udma",
3692 .of_match_table = udma_of_match,
3693 .suppress_bind_attrs = true,
3694 },
3695 .probe = udma_probe,
3696};
3697builtin_platform_driver(udma_driver);
3698
3699
3700#include "k3-udma-private.c"
3701