uboot/drivers/dma/ti/k3-udma.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0+
   2/*
   3 *  Copyright (C) 2018 Texas Instruments Incorporated - http://www.ti.com
   4 *  Author: Peter Ujfalusi <peter.ujfalusi@ti.com>
   5 */
   6#define pr_fmt(fmt) "udma: " fmt
   7
   8#include <common.h>
   9#include <cpu_func.h>
  10#include <log.h>
  11#include <asm/cache.h>
  12#include <asm/io.h>
  13#include <asm/bitops.h>
  14#include <malloc.h>
  15#include <linux/bitops.h>
  16#include <linux/dma-mapping.h>
  17#include <dm.h>
  18#include <dm/device_compat.h>
  19#include <dm/devres.h>
  20#include <dm/read.h>
  21#include <dm/of_access.h>
  22#include <dma.h>
  23#include <dma-uclass.h>
  24#include <linux/delay.h>
  25#include <linux/bitmap.h>
  26#include <linux/err.h>
  27#include <linux/soc/ti/k3-navss-ringacc.h>
  28#include <linux/soc/ti/cppi5.h>
  29#include <linux/soc/ti/ti-udma.h>
  30#include <linux/soc/ti/ti_sci_protocol.h>
  31#include <linux/soc/ti/cppi5.h>
  32
  33#include "k3-udma-hwdef.h"
  34#include "k3-psil-priv.h"
  35
  36#define K3_UDMA_MAX_RFLOWS 1024
  37
  38struct udma_chan;
  39
  40enum k3_dma_type {
  41        DMA_TYPE_UDMA = 0,
  42        DMA_TYPE_BCDMA,
  43        DMA_TYPE_PKTDMA,
  44};
  45
  46enum udma_mmr {
  47        MMR_GCFG = 0,
  48        MMR_BCHANRT,
  49        MMR_RCHANRT,
  50        MMR_TCHANRT,
  51        MMR_RCHAN,
  52        MMR_TCHAN,
  53        MMR_RFLOW,
  54        MMR_LAST,
  55};
  56
  57static const char * const mmr_names[] = {
  58        [MMR_GCFG] = "gcfg",
  59        [MMR_BCHANRT] = "bchanrt",
  60        [MMR_RCHANRT] = "rchanrt",
  61        [MMR_TCHANRT] = "tchanrt",
  62        [MMR_RCHAN] = "rchan",
  63        [MMR_TCHAN] = "tchan",
  64        [MMR_RFLOW] = "rflow",
  65};
  66
  67struct udma_tchan {
  68        void __iomem *reg_chan;
  69        void __iomem *reg_rt;
  70
  71        int id;
  72        struct k3_nav_ring *t_ring; /* Transmit ring */
  73        struct k3_nav_ring *tc_ring; /* Transmit Completion ring */
  74        int tflow_id; /* applicable only for PKTDMA */
  75
  76};
  77
  78#define udma_bchan udma_tchan
  79
  80struct udma_rflow {
  81        void __iomem *reg_rflow;
  82        int id;
  83        struct k3_nav_ring *fd_ring; /* Free Descriptor ring */
  84        struct k3_nav_ring *r_ring; /* Receive ring */
  85};
  86
  87struct udma_rchan {
  88        void __iomem *reg_chan;
  89        void __iomem *reg_rt;
  90
  91        int id;
  92};
  93
  94struct udma_oes_offsets {
  95        /* K3 UDMA Output Event Offset */
  96        u32 udma_rchan;
  97
  98        /* BCDMA Output Event Offsets */
  99        u32 bcdma_bchan_data;
 100        u32 bcdma_bchan_ring;
 101        u32 bcdma_tchan_data;
 102        u32 bcdma_tchan_ring;
 103        u32 bcdma_rchan_data;
 104        u32 bcdma_rchan_ring;
 105
 106        /* PKTDMA Output Event Offsets */
 107        u32 pktdma_tchan_flow;
 108        u32 pktdma_rchan_flow;
 109};
 110
 111#define UDMA_FLAG_PDMA_ACC32            BIT(0)
 112#define UDMA_FLAG_PDMA_BURST            BIT(1)
 113#define UDMA_FLAG_TDTYPE                BIT(2)
 114
 115struct udma_match_data {
 116        enum k3_dma_type type;
 117        u32 psil_base;
 118        bool enable_memcpy_support;
 119        u32 flags;
 120        u32 statictr_z_mask;
 121        struct udma_oes_offsets oes;
 122
 123        u8 tpl_levels;
 124        u32 level_start_idx[];
 125};
 126
 127enum udma_rm_range {
 128        RM_RANGE_BCHAN = 0,
 129        RM_RANGE_TCHAN,
 130        RM_RANGE_RCHAN,
 131        RM_RANGE_RFLOW,
 132        RM_RANGE_TFLOW,
 133        RM_RANGE_LAST,
 134};
 135
 136struct udma_tisci_rm {
 137        const struct ti_sci_handle *tisci;
 138        const struct ti_sci_rm_udmap_ops *tisci_udmap_ops;
 139        u32  tisci_dev_id;
 140
 141        /* tisci information for PSI-L thread pairing/unpairing */
 142        const struct ti_sci_rm_psil_ops *tisci_psil_ops;
 143        u32  tisci_navss_dev_id;
 144
 145        struct ti_sci_resource *rm_ranges[RM_RANGE_LAST];
 146};
 147
 148struct udma_dev {
 149        struct udevice *dev;
 150        void __iomem *mmrs[MMR_LAST];
 151
 152        struct udma_tisci_rm tisci_rm;
 153        struct k3_nav_ringacc *ringacc;
 154
 155        u32 features;
 156
 157        int bchan_cnt;
 158        int tchan_cnt;
 159        int echan_cnt;
 160        int rchan_cnt;
 161        int rflow_cnt;
 162        int tflow_cnt;
 163        unsigned long *bchan_map;
 164        unsigned long *tchan_map;
 165        unsigned long *rchan_map;
 166        unsigned long *rflow_map;
 167        unsigned long *rflow_map_reserved;
 168        unsigned long *rflow_in_use;
 169        unsigned long *tflow_map;
 170
 171        struct udma_bchan *bchans;
 172        struct udma_tchan *tchans;
 173        struct udma_rchan *rchans;
 174        struct udma_rflow *rflows;
 175
 176        struct udma_match_data *match_data;
 177
 178        struct udma_chan *channels;
 179        u32 psil_base;
 180
 181        u32 ch_count;
 182};
 183
 184struct udma_chan_config {
 185        u32 psd_size; /* size of Protocol Specific Data */
 186        u32 metadata_size; /* (needs_epib ? 16:0) + psd_size */
 187        u32 hdesc_size; /* Size of a packet descriptor in packet mode */
 188        int remote_thread_id;
 189        u32 atype;
 190        u32 src_thread;
 191        u32 dst_thread;
 192        enum psil_endpoint_type ep_type;
 193        enum udma_tp_level channel_tpl; /* Channel Throughput Level */
 194
 195        /* PKTDMA mapped channel */
 196        int mapped_channel_id;
 197        /* PKTDMA default tflow or rflow for mapped channel */
 198        int default_flow_id;
 199
 200        enum dma_direction dir;
 201
 202        unsigned int pkt_mode:1; /* TR or packet */
 203        unsigned int needs_epib:1; /* EPIB is needed for the communication or not */
 204        unsigned int enable_acc32:1;
 205        unsigned int enable_burst:1;
 206        unsigned int notdpkt:1; /* Suppress sending TDC packet */
 207};
 208
 209struct udma_chan {
 210        struct udma_dev *ud;
 211        char name[20];
 212
 213        struct udma_bchan *bchan;
 214        struct udma_tchan *tchan;
 215        struct udma_rchan *rchan;
 216        struct udma_rflow *rflow;
 217
 218        struct ti_udma_drv_chan_cfg_data cfg_data;
 219
 220        u32 bcnt; /* number of bytes completed since the start of the channel */
 221
 222        struct udma_chan_config config;
 223
 224        u32 id;
 225
 226        struct cppi5_host_desc_t *desc_tx;
 227        bool in_use;
 228        void    *desc_rx;
 229        u32     num_rx_bufs;
 230        u32     desc_rx_cur;
 231
 232};
 233
 234#define UDMA_CH_1000(ch)                (ch * 0x1000)
 235#define UDMA_CH_100(ch)                 (ch * 0x100)
 236#define UDMA_CH_40(ch)                  (ch * 0x40)
 237
 238#ifdef PKTBUFSRX
 239#define UDMA_RX_DESC_NUM PKTBUFSRX
 240#else
 241#define UDMA_RX_DESC_NUM 4
 242#endif
 243
 244/* Generic register access functions */
 245static inline u32 udma_read(void __iomem *base, int reg)
 246{
 247        u32 v;
 248
 249        v = __raw_readl(base + reg);
 250        pr_debug("READL(32): v(%08X)<--reg(%p)\n", v, base + reg);
 251        return v;
 252}
 253
 254static inline void udma_write(void __iomem *base, int reg, u32 val)
 255{
 256        pr_debug("WRITEL(32): v(%08X)-->reg(%p)\n", val, base + reg);
 257        __raw_writel(val, base + reg);
 258}
 259
 260static inline void udma_update_bits(void __iomem *base, int reg,
 261                                    u32 mask, u32 val)
 262{
 263        u32 tmp, orig;
 264
 265        orig = udma_read(base, reg);
 266        tmp = orig & ~mask;
 267        tmp |= (val & mask);
 268
 269        if (tmp != orig)
 270                udma_write(base, reg, tmp);
 271}
 272
 273/* TCHANRT */
 274static inline u32 udma_tchanrt_read(struct udma_tchan *tchan, int reg)
 275{
 276        if (!tchan)
 277                return 0;
 278        return udma_read(tchan->reg_rt, reg);
 279}
 280
 281static inline void udma_tchanrt_write(struct udma_tchan *tchan,
 282                                      int reg, u32 val)
 283{
 284        if (!tchan)
 285                return;
 286        udma_write(tchan->reg_rt, reg, val);
 287}
 288
 289/* RCHANRT */
 290static inline u32 udma_rchanrt_read(struct udma_rchan *rchan, int reg)
 291{
 292        if (!rchan)
 293                return 0;
 294        return udma_read(rchan->reg_rt, reg);
 295}
 296
 297static inline void udma_rchanrt_write(struct udma_rchan *rchan,
 298                                      int reg, u32 val)
 299{
 300        if (!rchan)
 301                return;
 302        udma_write(rchan->reg_rt, reg, val);
 303}
 304
 305static inline int udma_navss_psil_pair(struct udma_dev *ud, u32 src_thread,
 306                                       u32 dst_thread)
 307{
 308        struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
 309
 310        dst_thread |= UDMA_PSIL_DST_THREAD_ID_OFFSET;
 311
 312        return tisci_rm->tisci_psil_ops->pair(tisci_rm->tisci,
 313                                              tisci_rm->tisci_navss_dev_id,
 314                                              src_thread, dst_thread);
 315}
 316
 317static inline int udma_navss_psil_unpair(struct udma_dev *ud, u32 src_thread,
 318                                         u32 dst_thread)
 319{
 320        struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
 321
 322        dst_thread |= UDMA_PSIL_DST_THREAD_ID_OFFSET;
 323
 324        return tisci_rm->tisci_psil_ops->unpair(tisci_rm->tisci,
 325                                                tisci_rm->tisci_navss_dev_id,
 326                                                src_thread, dst_thread);
 327}
 328
 329static inline char *udma_get_dir_text(enum dma_direction dir)
 330{
 331        switch (dir) {
 332        case DMA_DEV_TO_MEM:
 333                return "DEV_TO_MEM";
 334        case DMA_MEM_TO_DEV:
 335                return "MEM_TO_DEV";
 336        case DMA_MEM_TO_MEM:
 337                return "MEM_TO_MEM";
 338        case DMA_DEV_TO_DEV:
 339                return "DEV_TO_DEV";
 340        default:
 341                break;
 342        }
 343
 344        return "invalid";
 345}
 346
 347#include "k3-udma-u-boot.c"
 348
 349static void udma_reset_uchan(struct udma_chan *uc)
 350{
 351        memset(&uc->config, 0, sizeof(uc->config));
 352        uc->config.remote_thread_id = -1;
 353        uc->config.mapped_channel_id = -1;
 354        uc->config.default_flow_id = -1;
 355}
 356
 357static inline bool udma_is_chan_running(struct udma_chan *uc)
 358{
 359        u32 trt_ctl = 0;
 360        u32 rrt_ctl = 0;
 361
 362        switch (uc->config.dir) {
 363        case DMA_DEV_TO_MEM:
 364                rrt_ctl = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_CTL_REG);
 365                pr_debug("%s: rrt_ctl: 0x%08x (peer: 0x%08x)\n",
 366                         __func__, rrt_ctl,
 367                         udma_rchanrt_read(uc->rchan,
 368                                           UDMA_RCHAN_RT_PEER_RT_EN_REG));
 369                break;
 370        case DMA_MEM_TO_DEV:
 371                trt_ctl = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_CTL_REG);
 372                pr_debug("%s: trt_ctl: 0x%08x (peer: 0x%08x)\n",
 373                         __func__, trt_ctl,
 374                         udma_tchanrt_read(uc->tchan,
 375                                           UDMA_TCHAN_RT_PEER_RT_EN_REG));
 376                break;
 377        case DMA_MEM_TO_MEM:
 378                trt_ctl = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_CTL_REG);
 379                rrt_ctl = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_CTL_REG);
 380                break;
 381        default:
 382                break;
 383        }
 384
 385        if (trt_ctl & UDMA_CHAN_RT_CTL_EN || rrt_ctl & UDMA_CHAN_RT_CTL_EN)
 386                return true;
 387
 388        return false;
 389}
 390
 391static int udma_pop_from_ring(struct udma_chan *uc, dma_addr_t *addr)
 392{
 393        struct k3_nav_ring *ring = NULL;
 394        int ret = -ENOENT;
 395
 396        switch (uc->config.dir) {
 397        case DMA_DEV_TO_MEM:
 398                ring = uc->rflow->r_ring;
 399                break;
 400        case DMA_MEM_TO_DEV:
 401                ring = uc->tchan->tc_ring;
 402                break;
 403        case DMA_MEM_TO_MEM:
 404                ring = uc->tchan->tc_ring;
 405                break;
 406        default:
 407                break;
 408        }
 409
 410        if (ring && k3_nav_ringacc_ring_get_occ(ring))
 411                ret = k3_nav_ringacc_ring_pop(ring, addr);
 412
 413        return ret;
 414}
 415
 416static void udma_reset_rings(struct udma_chan *uc)
 417{
 418        struct k3_nav_ring *ring1 = NULL;
 419        struct k3_nav_ring *ring2 = NULL;
 420
 421        switch (uc->config.dir) {
 422        case DMA_DEV_TO_MEM:
 423                ring1 = uc->rflow->fd_ring;
 424                ring2 = uc->rflow->r_ring;
 425                break;
 426        case DMA_MEM_TO_DEV:
 427                ring1 = uc->tchan->t_ring;
 428                ring2 = uc->tchan->tc_ring;
 429                break;
 430        case DMA_MEM_TO_MEM:
 431                ring1 = uc->tchan->t_ring;
 432                ring2 = uc->tchan->tc_ring;
 433                break;
 434        default:
 435                break;
 436        }
 437
 438        if (ring1)
 439                k3_nav_ringacc_ring_reset_dma(ring1, k3_nav_ringacc_ring_get_occ(ring1));
 440        if (ring2)
 441                k3_nav_ringacc_ring_reset(ring2);
 442}
 443
 444static void udma_reset_counters(struct udma_chan *uc)
 445{
 446        u32 val;
 447
 448        if (uc->tchan) {
 449                val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_BCNT_REG);
 450                udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_BCNT_REG, val);
 451
 452                val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_SBCNT_REG);
 453                udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_SBCNT_REG, val);
 454
 455                val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_PCNT_REG);
 456                udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_PCNT_REG, val);
 457
 458                if (!uc->bchan) {
 459                        val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_PEER_BCNT_REG);
 460                        udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_PEER_BCNT_REG, val);
 461                }
 462        }
 463
 464        if (uc->rchan) {
 465                val = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_BCNT_REG);
 466                udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_BCNT_REG, val);
 467
 468                val = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_SBCNT_REG);
 469                udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_SBCNT_REG, val);
 470
 471                val = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_PCNT_REG);
 472                udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_PCNT_REG, val);
 473
 474                val = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_PEER_BCNT_REG);
 475                udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_PEER_BCNT_REG, val);
 476        }
 477
 478        uc->bcnt = 0;
 479}
 480
 481static inline int udma_stop_hard(struct udma_chan *uc)
 482{
 483        pr_debug("%s: ENTER (chan%d)\n", __func__, uc->id);
 484
 485        switch (uc->config.dir) {
 486        case DMA_DEV_TO_MEM:
 487                udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_PEER_RT_EN_REG, 0);
 488                udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_CTL_REG, 0);
 489                break;
 490        case DMA_MEM_TO_DEV:
 491                udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_CTL_REG, 0);
 492                udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_PEER_RT_EN_REG, 0);
 493                break;
 494        case DMA_MEM_TO_MEM:
 495                udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_CTL_REG, 0);
 496                udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_CTL_REG, 0);
 497                break;
 498        default:
 499                return -EINVAL;
 500        }
 501
 502        return 0;
 503}
 504
 505static int udma_start(struct udma_chan *uc)
 506{
 507        /* Channel is already running, no need to proceed further */
 508        if (udma_is_chan_running(uc))
 509                goto out;
 510
 511        pr_debug("%s: chan:%d dir:%s\n",
 512                 __func__, uc->id, udma_get_dir_text(uc->config.dir));
 513
 514        /* Make sure that we clear the teardown bit, if it is set */
 515        udma_stop_hard(uc);
 516
 517        /* Reset all counters */
 518        udma_reset_counters(uc);
 519
 520        switch (uc->config.dir) {
 521        case DMA_DEV_TO_MEM:
 522                udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_CTL_REG,
 523                                   UDMA_CHAN_RT_CTL_EN);
 524
 525                /* Enable remote */
 526                udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_PEER_RT_EN_REG,
 527                                   UDMA_PEER_RT_EN_ENABLE);
 528
 529                pr_debug("%s(rx): RT_CTL:0x%08x PEER RT_ENABLE:0x%08x\n",
 530                         __func__,
 531                         udma_rchanrt_read(uc->rchan,
 532                                           UDMA_RCHAN_RT_CTL_REG),
 533                         udma_rchanrt_read(uc->rchan,
 534                                           UDMA_RCHAN_RT_PEER_RT_EN_REG));
 535                break;
 536        case DMA_MEM_TO_DEV:
 537                /* Enable remote */
 538                udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_PEER_RT_EN_REG,
 539                                   UDMA_PEER_RT_EN_ENABLE);
 540
 541                udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_CTL_REG,
 542                                   UDMA_CHAN_RT_CTL_EN);
 543
 544                pr_debug("%s(tx): RT_CTL:0x%08x PEER RT_ENABLE:0x%08x\n",
 545                         __func__,
 546                         udma_tchanrt_read(uc->tchan,
 547                                           UDMA_TCHAN_RT_CTL_REG),
 548                         udma_tchanrt_read(uc->tchan,
 549                                           UDMA_TCHAN_RT_PEER_RT_EN_REG));
 550                break;
 551        case DMA_MEM_TO_MEM:
 552                udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_CTL_REG,
 553                                   UDMA_CHAN_RT_CTL_EN);
 554                udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_CTL_REG,
 555                                   UDMA_CHAN_RT_CTL_EN);
 556
 557                break;
 558        default:
 559                return -EINVAL;
 560        }
 561
 562        pr_debug("%s: DONE chan:%d\n", __func__, uc->id);
 563out:
 564        return 0;
 565}
 566
 567static inline void udma_stop_mem2dev(struct udma_chan *uc, bool sync)
 568{
 569        int i = 0;
 570        u32 val;
 571
 572        udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_CTL_REG,
 573                           UDMA_CHAN_RT_CTL_EN |
 574                           UDMA_CHAN_RT_CTL_TDOWN);
 575
 576        val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_CTL_REG);
 577
 578        while (sync && (val & UDMA_CHAN_RT_CTL_EN)) {
 579                val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_CTL_REG);
 580                udelay(1);
 581                if (i > 1000) {
 582                        printf(" %s TIMEOUT !\n", __func__);
 583                        break;
 584                }
 585                i++;
 586        }
 587
 588        val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_PEER_RT_EN_REG);
 589        if (val & UDMA_PEER_RT_EN_ENABLE)
 590                printf("%s: peer not stopped TIMEOUT !\n", __func__);
 591}
 592
 593static inline void udma_stop_dev2mem(struct udma_chan *uc, bool sync)
 594{
 595        int i = 0;
 596        u32 val;
 597
 598        udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_PEER_RT_EN_REG,
 599                           UDMA_PEER_RT_EN_ENABLE |
 600                           UDMA_PEER_RT_EN_TEARDOWN);
 601
 602        val = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_CTL_REG);
 603
 604        while (sync && (val & UDMA_CHAN_RT_CTL_EN)) {
 605                val = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_CTL_REG);
 606                udelay(1);
 607                if (i > 1000) {
 608                        printf("%s TIMEOUT !\n", __func__);
 609                        break;
 610                }
 611                i++;
 612        }
 613
 614        val = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_PEER_RT_EN_REG);
 615        if (val & UDMA_PEER_RT_EN_ENABLE)
 616                printf("%s: peer not stopped TIMEOUT !\n", __func__);
 617}
 618
 619static inline int udma_stop(struct udma_chan *uc)
 620{
 621        pr_debug("%s: chan:%d dir:%s\n",
 622                 __func__, uc->id, udma_get_dir_text(uc->config.dir));
 623
 624        udma_reset_counters(uc);
 625        switch (uc->config.dir) {
 626        case DMA_DEV_TO_MEM:
 627                udma_stop_dev2mem(uc, true);
 628                break;
 629        case DMA_MEM_TO_DEV:
 630                udma_stop_mem2dev(uc, true);
 631                break;
 632        case DMA_MEM_TO_MEM:
 633                udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_CTL_REG, 0);
 634                udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_CTL_REG, 0);
 635                break;
 636        default:
 637                return -EINVAL;
 638        }
 639
 640        return 0;
 641}
 642
 643static void udma_poll_completion(struct udma_chan *uc, dma_addr_t *paddr)
 644{
 645        int i = 1;
 646
 647        while (udma_pop_from_ring(uc, paddr)) {
 648                udelay(1);
 649                if (!(i % 1000000))
 650                        printf(".");
 651                i++;
 652        }
 653}
 654
 655static struct udma_rflow *__udma_reserve_rflow(struct udma_dev *ud, int id)
 656{
 657        DECLARE_BITMAP(tmp, K3_UDMA_MAX_RFLOWS);
 658
 659        if (id >= 0) {
 660                if (test_bit(id, ud->rflow_map)) {
 661                        dev_err(ud->dev, "rflow%d is in use\n", id);
 662                        return ERR_PTR(-ENOENT);
 663                }
 664        } else {
 665                bitmap_or(tmp, ud->rflow_map, ud->rflow_map_reserved,
 666                          ud->rflow_cnt);
 667
 668                id = find_next_zero_bit(tmp, ud->rflow_cnt, ud->rchan_cnt);
 669                if (id >= ud->rflow_cnt)
 670                        return ERR_PTR(-ENOENT);
 671        }
 672
 673        __set_bit(id, ud->rflow_map);
 674        return &ud->rflows[id];
 675}
 676
 677#define UDMA_RESERVE_RESOURCE(res)                                      \
 678static struct udma_##res *__udma_reserve_##res(struct udma_dev *ud,     \
 679                                               int id)                  \
 680{                                                                       \
 681        if (id >= 0) {                                                  \
 682                if (test_bit(id, ud->res##_map)) {                      \
 683                        dev_err(ud->dev, "res##%d is in use\n", id);    \
 684                        return ERR_PTR(-ENOENT);                        \
 685                }                                                       \
 686        } else {                                                        \
 687                id = find_first_zero_bit(ud->res##_map, ud->res##_cnt); \
 688                if (id == ud->res##_cnt) {                              \
 689                        return ERR_PTR(-ENOENT);                        \
 690                }                                                       \
 691        }                                                               \
 692                                                                        \
 693        __set_bit(id, ud->res##_map);                                   \
 694        return &ud->res##s[id];                                         \
 695}
 696
 697UDMA_RESERVE_RESOURCE(tchan);
 698UDMA_RESERVE_RESOURCE(rchan);
 699
 700static int udma_get_tchan(struct udma_chan *uc)
 701{
 702        struct udma_dev *ud = uc->ud;
 703
 704        if (uc->tchan) {
 705                dev_dbg(ud->dev, "chan%d: already have tchan%d allocated\n",
 706                        uc->id, uc->tchan->id);
 707                return 0;
 708        }
 709
 710        uc->tchan = __udma_reserve_tchan(ud, uc->config.mapped_channel_id);
 711        if (IS_ERR(uc->tchan))
 712                return PTR_ERR(uc->tchan);
 713
 714        if (ud->tflow_cnt) {
 715                int tflow_id;
 716
 717                /* Only PKTDMA have support for tx flows */
 718                if (uc->config.default_flow_id >= 0)
 719                        tflow_id = uc->config.default_flow_id;
 720                else
 721                        tflow_id = uc->tchan->id;
 722
 723                if (test_bit(tflow_id, ud->tflow_map)) {
 724                        dev_err(ud->dev, "tflow%d is in use\n", tflow_id);
 725                        __clear_bit(uc->tchan->id, ud->tchan_map);
 726                        uc->tchan = NULL;
 727                        return -ENOENT;
 728                }
 729
 730                uc->tchan->tflow_id = tflow_id;
 731                __set_bit(tflow_id, ud->tflow_map);
 732        } else {
 733                uc->tchan->tflow_id = -1;
 734        }
 735
 736        pr_debug("chan%d: got tchan%d\n", uc->id, uc->tchan->id);
 737
 738        return 0;
 739}
 740
 741static int udma_get_rchan(struct udma_chan *uc)
 742{
 743        struct udma_dev *ud = uc->ud;
 744
 745        if (uc->rchan) {
 746                dev_dbg(ud->dev, "chan%d: already have rchan%d allocated\n",
 747                        uc->id, uc->rchan->id);
 748                return 0;
 749        }
 750
 751        uc->rchan = __udma_reserve_rchan(ud, uc->config.mapped_channel_id);
 752        if (IS_ERR(uc->rchan))
 753                return PTR_ERR(uc->rchan);
 754
 755        pr_debug("chan%d: got rchan%d\n", uc->id, uc->rchan->id);
 756
 757        return 0;
 758}
 759
 760static int udma_get_chan_pair(struct udma_chan *uc)
 761{
 762        struct udma_dev *ud = uc->ud;
 763        int chan_id, end;
 764
 765        if ((uc->tchan && uc->rchan) && uc->tchan->id == uc->rchan->id) {
 766                dev_info(ud->dev, "chan%d: already have %d pair allocated\n",
 767                         uc->id, uc->tchan->id);
 768                return 0;
 769        }
 770
 771        if (uc->tchan) {
 772                dev_err(ud->dev, "chan%d: already have tchan%d allocated\n",
 773                        uc->id, uc->tchan->id);
 774                return -EBUSY;
 775        } else if (uc->rchan) {
 776                dev_err(ud->dev, "chan%d: already have rchan%d allocated\n",
 777                        uc->id, uc->rchan->id);
 778                return -EBUSY;
 779        }
 780
 781        /* Can be optimized, but let's have it like this for now */
 782        end = min(ud->tchan_cnt, ud->rchan_cnt);
 783        for (chan_id = 0; chan_id < end; chan_id++) {
 784                if (!test_bit(chan_id, ud->tchan_map) &&
 785                    !test_bit(chan_id, ud->rchan_map))
 786                        break;
 787        }
 788
 789        if (chan_id == end)
 790                return -ENOENT;
 791
 792        __set_bit(chan_id, ud->tchan_map);
 793        __set_bit(chan_id, ud->rchan_map);
 794        uc->tchan = &ud->tchans[chan_id];
 795        uc->rchan = &ud->rchans[chan_id];
 796
 797        pr_debug("chan%d: got t/rchan%d pair\n", uc->id, chan_id);
 798
 799        return 0;
 800}
 801
 802static int udma_get_rflow(struct udma_chan *uc, int flow_id)
 803{
 804        struct udma_dev *ud = uc->ud;
 805
 806        if (uc->rflow) {
 807                dev_dbg(ud->dev, "chan%d: already have rflow%d allocated\n",
 808                        uc->id, uc->rflow->id);
 809                return 0;
 810        }
 811
 812        if (!uc->rchan)
 813                dev_warn(ud->dev, "chan%d: does not have rchan??\n", uc->id);
 814
 815        uc->rflow = __udma_reserve_rflow(ud, flow_id);
 816        if (IS_ERR(uc->rflow))
 817                return PTR_ERR(uc->rflow);
 818
 819        pr_debug("chan%d: got rflow%d\n", uc->id, uc->rflow->id);
 820        return 0;
 821}
 822
 823static void udma_put_rchan(struct udma_chan *uc)
 824{
 825        struct udma_dev *ud = uc->ud;
 826
 827        if (uc->rchan) {
 828                dev_dbg(ud->dev, "chan%d: put rchan%d\n", uc->id,
 829                        uc->rchan->id);
 830                __clear_bit(uc->rchan->id, ud->rchan_map);
 831                uc->rchan = NULL;
 832        }
 833}
 834
 835static void udma_put_tchan(struct udma_chan *uc)
 836{
 837        struct udma_dev *ud = uc->ud;
 838
 839        if (uc->tchan) {
 840                dev_dbg(ud->dev, "chan%d: put tchan%d\n", uc->id,
 841                        uc->tchan->id);
 842                __clear_bit(uc->tchan->id, ud->tchan_map);
 843                if (uc->tchan->tflow_id >= 0)
 844                        __clear_bit(uc->tchan->tflow_id, ud->tflow_map);
 845                uc->tchan = NULL;
 846        }
 847}
 848
 849static void udma_put_rflow(struct udma_chan *uc)
 850{
 851        struct udma_dev *ud = uc->ud;
 852
 853        if (uc->rflow) {
 854                dev_dbg(ud->dev, "chan%d: put rflow%d\n", uc->id,
 855                        uc->rflow->id);
 856                __clear_bit(uc->rflow->id, ud->rflow_map);
 857                uc->rflow = NULL;
 858        }
 859}
 860
 861static void udma_free_tx_resources(struct udma_chan *uc)
 862{
 863        if (!uc->tchan)
 864                return;
 865
 866        k3_nav_ringacc_ring_free(uc->tchan->t_ring);
 867        k3_nav_ringacc_ring_free(uc->tchan->tc_ring);
 868        uc->tchan->t_ring = NULL;
 869        uc->tchan->tc_ring = NULL;
 870
 871        udma_put_tchan(uc);
 872}
 873
 874static int udma_alloc_tx_resources(struct udma_chan *uc)
 875{
 876        struct k3_nav_ring_cfg ring_cfg;
 877        struct udma_dev *ud = uc->ud;
 878        int ret;
 879
 880        ret = udma_get_tchan(uc);
 881        if (ret)
 882                return ret;
 883
 884        ret = k3_nav_ringacc_request_rings_pair(ud->ringacc, uc->tchan->id, -1,
 885                                                &uc->tchan->t_ring,
 886                                                &uc->tchan->tc_ring);
 887        if (ret) {
 888                ret = -EBUSY;
 889                goto err_tx_ring;
 890        }
 891
 892        memset(&ring_cfg, 0, sizeof(ring_cfg));
 893        ring_cfg.size = 16;
 894        ring_cfg.elm_size = K3_NAV_RINGACC_RING_ELSIZE_8;
 895        ring_cfg.mode = K3_NAV_RINGACC_RING_MODE_RING;
 896
 897        ret = k3_nav_ringacc_ring_cfg(uc->tchan->t_ring, &ring_cfg);
 898        ret |= k3_nav_ringacc_ring_cfg(uc->tchan->tc_ring, &ring_cfg);
 899
 900        if (ret)
 901                goto err_ringcfg;
 902
 903        return 0;
 904
 905err_ringcfg:
 906        k3_nav_ringacc_ring_free(uc->tchan->tc_ring);
 907        uc->tchan->tc_ring = NULL;
 908        k3_nav_ringacc_ring_free(uc->tchan->t_ring);
 909        uc->tchan->t_ring = NULL;
 910err_tx_ring:
 911        udma_put_tchan(uc);
 912
 913        return ret;
 914}
 915
 916static void udma_free_rx_resources(struct udma_chan *uc)
 917{
 918        if (!uc->rchan)
 919                return;
 920
 921        if (uc->rflow) {
 922                k3_nav_ringacc_ring_free(uc->rflow->fd_ring);
 923                k3_nav_ringacc_ring_free(uc->rflow->r_ring);
 924                uc->rflow->fd_ring = NULL;
 925                uc->rflow->r_ring = NULL;
 926
 927                udma_put_rflow(uc);
 928        }
 929
 930        udma_put_rchan(uc);
 931}
 932
 933static int udma_alloc_rx_resources(struct udma_chan *uc)
 934{
 935        struct k3_nav_ring_cfg ring_cfg;
 936        struct udma_dev *ud = uc->ud;
 937        struct udma_rflow *rflow;
 938        int fd_ring_id;
 939        int ret;
 940
 941        ret = udma_get_rchan(uc);
 942        if (ret)
 943                return ret;
 944
 945        /* For MEM_TO_MEM we don't need rflow or rings */
 946        if (uc->config.dir == DMA_MEM_TO_MEM)
 947                return 0;
 948
 949        if (uc->config.default_flow_id >= 0)
 950                ret = udma_get_rflow(uc, uc->config.default_flow_id);
 951        else
 952                ret = udma_get_rflow(uc, uc->rchan->id);
 953
 954        if (ret) {
 955                ret = -EBUSY;
 956                goto err_rflow;
 957        }
 958
 959        rflow = uc->rflow;
 960        if (ud->tflow_cnt) {
 961                fd_ring_id = ud->tflow_cnt + rflow->id;
 962        } else {
 963                fd_ring_id = ud->bchan_cnt + ud->tchan_cnt + ud->echan_cnt +
 964                        uc->rchan->id;
 965        }
 966
 967        ret = k3_nav_ringacc_request_rings_pair(ud->ringacc, fd_ring_id, -1,
 968                                                &rflow->fd_ring, &rflow->r_ring);
 969        if (ret) {
 970                ret = -EBUSY;
 971                goto err_rx_ring;
 972        }
 973
 974        memset(&ring_cfg, 0, sizeof(ring_cfg));
 975        ring_cfg.size = 16;
 976        ring_cfg.elm_size = K3_NAV_RINGACC_RING_ELSIZE_8;
 977        ring_cfg.mode = K3_NAV_RINGACC_RING_MODE_RING;
 978
 979        ret = k3_nav_ringacc_ring_cfg(rflow->fd_ring, &ring_cfg);
 980        ret |= k3_nav_ringacc_ring_cfg(rflow->r_ring, &ring_cfg);
 981        if (ret)
 982                goto err_ringcfg;
 983
 984        return 0;
 985
 986err_ringcfg:
 987        k3_nav_ringacc_ring_free(rflow->r_ring);
 988        rflow->r_ring = NULL;
 989        k3_nav_ringacc_ring_free(rflow->fd_ring);
 990        rflow->fd_ring = NULL;
 991err_rx_ring:
 992        udma_put_rflow(uc);
 993err_rflow:
 994        udma_put_rchan(uc);
 995
 996        return ret;
 997}
 998
 999static int udma_alloc_tchan_sci_req(struct udma_chan *uc)
1000{
1001        struct udma_dev *ud = uc->ud;
1002        int tc_ring = k3_nav_ringacc_get_ring_id(uc->tchan->tc_ring);
1003        struct ti_sci_msg_rm_udmap_tx_ch_cfg req;
1004        struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
1005        u32 mode;
1006        int ret;
1007
1008        if (uc->config.pkt_mode)
1009                mode = TI_SCI_RM_UDMAP_CHAN_TYPE_PKT_PBRR;
1010        else
1011                mode = TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_BCOPY_PBRR;
1012
1013        req.valid_params = TI_SCI_MSG_VALUE_RM_UDMAP_CH_CHAN_TYPE_VALID |
1014                        TI_SCI_MSG_VALUE_RM_UDMAP_CH_FETCH_SIZE_VALID |
1015                        TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID;
1016        req.nav_id = tisci_rm->tisci_dev_id;
1017        req.index = uc->tchan->id;
1018        req.tx_chan_type = mode;
1019        if (uc->config.dir == DMA_MEM_TO_MEM)
1020                req.tx_fetch_size = sizeof(struct cppi5_desc_hdr_t) >> 2;
1021        else
1022                req.tx_fetch_size = cppi5_hdesc_calc_size(uc->config.needs_epib,
1023                                                          uc->config.psd_size,
1024                                                          0) >> 2;
1025        req.txcq_qnum = tc_ring;
1026
1027        ret = tisci_rm->tisci_udmap_ops->tx_ch_cfg(tisci_rm->tisci, &req);
1028        if (ret) {
1029                dev_err(ud->dev, "tisci tx alloc failed %d\n", ret);
1030                return ret;
1031        }
1032
1033        /*
1034         * Above TI SCI call handles firewall configuration, cfg
1035         * register configuration still has to be done locally in
1036         * absence of RM services.
1037         */
1038        if (IS_ENABLED(CONFIG_K3_DM_FW))
1039                udma_alloc_tchan_raw(uc);
1040
1041        return 0;
1042}
1043
1044static int udma_alloc_rchan_sci_req(struct udma_chan *uc)
1045{
1046        struct udma_dev *ud = uc->ud;
1047        int fd_ring = k3_nav_ringacc_get_ring_id(uc->rflow->fd_ring);
1048        int rx_ring = k3_nav_ringacc_get_ring_id(uc->rflow->r_ring);
1049        int tc_ring = k3_nav_ringacc_get_ring_id(uc->tchan->tc_ring);
1050        struct ti_sci_msg_rm_udmap_rx_ch_cfg req = { 0 };
1051        struct ti_sci_msg_rm_udmap_flow_cfg flow_req = { 0 };
1052        struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
1053        u32 mode;
1054        int ret;
1055
1056        if (uc->config.pkt_mode)
1057                mode = TI_SCI_RM_UDMAP_CHAN_TYPE_PKT_PBRR;
1058        else
1059                mode = TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_BCOPY_PBRR;
1060
1061        req.valid_params = TI_SCI_MSG_VALUE_RM_UDMAP_CH_FETCH_SIZE_VALID |
1062                        TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID |
1063                        TI_SCI_MSG_VALUE_RM_UDMAP_CH_CHAN_TYPE_VALID;
1064        req.nav_id = tisci_rm->tisci_dev_id;
1065        req.index = uc->rchan->id;
1066        req.rx_chan_type = mode;
1067        if (uc->config.dir == DMA_MEM_TO_MEM) {
1068                req.rx_fetch_size = sizeof(struct cppi5_desc_hdr_t) >> 2;
1069                req.rxcq_qnum = tc_ring;
1070        } else {
1071                req.rx_fetch_size = cppi5_hdesc_calc_size(uc->config.needs_epib,
1072                                                          uc->config.psd_size,
1073                                                          0) >> 2;
1074                req.rxcq_qnum = rx_ring;
1075        }
1076        if (ud->match_data->type == DMA_TYPE_UDMA &&
1077            uc->rflow->id != uc->rchan->id &&
1078            uc->config.dir != DMA_MEM_TO_MEM) {
1079                req.flowid_start = uc->rflow->id;
1080                req.flowid_cnt = 1;
1081                req.valid_params |= TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_START_VALID |
1082                                    TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_CNT_VALID;
1083        }
1084
1085        ret = tisci_rm->tisci_udmap_ops->rx_ch_cfg(tisci_rm->tisci, &req);
1086        if (ret) {
1087                dev_err(ud->dev, "tisci rx %u cfg failed %d\n",
1088                        uc->rchan->id, ret);
1089                return ret;
1090        }
1091        if (uc->config.dir == DMA_MEM_TO_MEM)
1092                return ret;
1093
1094        flow_req.valid_params =
1095                        TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_EINFO_PRESENT_VALID |
1096                        TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_PSINFO_PRESENT_VALID |
1097                        TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_ERROR_HANDLING_VALID |
1098                        TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DESC_TYPE_VALID |
1099                        TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_QNUM_VALID |
1100                        TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_SRC_TAG_HI_SEL_VALID |
1101                        TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_SRC_TAG_LO_SEL_VALID |
1102                        TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_TAG_HI_SEL_VALID |
1103                        TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_TAG_LO_SEL_VALID |
1104                        TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ0_SZ0_QNUM_VALID |
1105                        TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ1_QNUM_VALID |
1106                        TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ2_QNUM_VALID |
1107                        TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ3_QNUM_VALID |
1108                        TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_PS_LOCATION_VALID;
1109
1110        flow_req.nav_id = tisci_rm->tisci_dev_id;
1111        flow_req.flow_index = uc->rflow->id;
1112
1113        if (uc->config.needs_epib)
1114                flow_req.rx_einfo_present = 1;
1115        else
1116                flow_req.rx_einfo_present = 0;
1117
1118        if (uc->config.psd_size)
1119                flow_req.rx_psinfo_present = 1;
1120        else
1121                flow_req.rx_psinfo_present = 0;
1122
1123        flow_req.rx_error_handling = 0;
1124        flow_req.rx_desc_type = 0;
1125        flow_req.rx_dest_qnum = rx_ring;
1126        flow_req.rx_src_tag_hi_sel = 2;
1127        flow_req.rx_src_tag_lo_sel = 4;
1128        flow_req.rx_dest_tag_hi_sel = 5;
1129        flow_req.rx_dest_tag_lo_sel = 4;
1130        flow_req.rx_fdq0_sz0_qnum = fd_ring;
1131        flow_req.rx_fdq1_qnum = fd_ring;
1132        flow_req.rx_fdq2_qnum = fd_ring;
1133        flow_req.rx_fdq3_qnum = fd_ring;
1134        flow_req.rx_ps_location = 0;
1135
1136        ret = tisci_rm->tisci_udmap_ops->rx_flow_cfg(tisci_rm->tisci,
1137                                                     &flow_req);
1138        if (ret) {
1139                dev_err(ud->dev, "tisci rx %u flow %u cfg failed %d\n",
1140                        uc->rchan->id, uc->rflow->id, ret);
1141                return ret;
1142        }
1143
1144        /*
1145         * Above TI SCI call handles firewall configuration, cfg
1146         * register configuration still has to be done locally in
1147         * absence of RM services.
1148         */
1149        if (IS_ENABLED(CONFIG_K3_DM_FW))
1150                udma_alloc_rchan_raw(uc);
1151
1152        return 0;
1153}
1154
1155static int udma_alloc_chan_resources(struct udma_chan *uc)
1156{
1157        struct udma_dev *ud = uc->ud;
1158        int ret;
1159
1160        pr_debug("%s: chan:%d as %s\n",
1161                 __func__, uc->id, udma_get_dir_text(uc->config.dir));
1162
1163        switch (uc->config.dir) {
1164        case DMA_MEM_TO_MEM:
1165                /* Non synchronized - mem to mem type of transfer */
1166                uc->config.pkt_mode = false;
1167                ret = udma_get_chan_pair(uc);
1168                if (ret)
1169                        return ret;
1170
1171                ret = udma_alloc_tx_resources(uc);
1172                if (ret)
1173                        goto err_free_res;
1174
1175                ret = udma_alloc_rx_resources(uc);
1176                if (ret)
1177                        goto err_free_res;
1178
1179                uc->config.src_thread = ud->psil_base + uc->tchan->id;
1180                uc->config.dst_thread = (ud->psil_base + uc->rchan->id) | 0x8000;
1181                break;
1182        case DMA_MEM_TO_DEV:
1183                /* Slave transfer synchronized - mem to dev (TX) trasnfer */
1184                ret = udma_alloc_tx_resources(uc);
1185                if (ret)
1186                        goto err_free_res;
1187
1188                uc->config.src_thread = ud->psil_base + uc->tchan->id;
1189                uc->config.dst_thread = uc->config.remote_thread_id;
1190                uc->config.dst_thread |= 0x8000;
1191
1192                break;
1193        case DMA_DEV_TO_MEM:
1194                /* Slave transfer synchronized - dev to mem (RX) trasnfer */
1195                ret = udma_alloc_rx_resources(uc);
1196                if (ret)
1197                        goto err_free_res;
1198
1199                uc->config.src_thread = uc->config.remote_thread_id;
1200                uc->config.dst_thread = (ud->psil_base + uc->rchan->id) | 0x8000;
1201
1202                break;
1203        default:
1204                /* Can not happen */
1205                pr_debug("%s: chan:%d invalid direction (%u)\n",
1206                         __func__, uc->id, uc->config.dir);
1207                return -EINVAL;
1208        }
1209
1210        /* We have channel indexes and rings */
1211        if (uc->config.dir == DMA_MEM_TO_MEM) {
1212                ret = udma_alloc_tchan_sci_req(uc);
1213                if (ret)
1214                        goto err_free_res;
1215
1216                ret = udma_alloc_rchan_sci_req(uc);
1217                if (ret)
1218                        goto err_free_res;
1219        } else {
1220                /* Slave transfer */
1221                if (uc->config.dir == DMA_MEM_TO_DEV) {
1222                        ret = udma_alloc_tchan_sci_req(uc);
1223                        if (ret)
1224                                goto err_free_res;
1225                } else {
1226                        ret = udma_alloc_rchan_sci_req(uc);
1227                        if (ret)
1228                                goto err_free_res;
1229                }
1230        }
1231
1232        if (udma_is_chan_running(uc)) {
1233                dev_warn(ud->dev, "chan%d: is running!\n", uc->id);
1234                udma_stop(uc);
1235                if (udma_is_chan_running(uc)) {
1236                        dev_err(ud->dev, "chan%d: won't stop!\n", uc->id);
1237                        goto err_free_res;
1238                }
1239        }
1240
1241        /* PSI-L pairing */
1242        ret = udma_navss_psil_pair(ud, uc->config.src_thread, uc->config.dst_thread);
1243        if (ret) {
1244                dev_err(ud->dev, "k3_nav_psil_request_link fail\n");
1245                goto err_free_res;
1246        }
1247
1248        return 0;
1249
1250err_free_res:
1251        udma_free_tx_resources(uc);
1252        udma_free_rx_resources(uc);
1253        uc->config.remote_thread_id = -1;
1254        return ret;
1255}
1256
1257static void udma_free_chan_resources(struct udma_chan *uc)
1258{
1259        /* Hard reset UDMA channel */
1260        udma_stop_hard(uc);
1261        udma_reset_counters(uc);
1262
1263        /* Release PSI-L pairing */
1264        udma_navss_psil_unpair(uc->ud, uc->config.src_thread, uc->config.dst_thread);
1265
1266        /* Reset the rings for a new start */
1267        udma_reset_rings(uc);
1268        udma_free_tx_resources(uc);
1269        udma_free_rx_resources(uc);
1270
1271        uc->config.remote_thread_id = -1;
1272        uc->config.dir = DMA_MEM_TO_MEM;
1273}
1274
1275static const char * const range_names[] = {
1276        [RM_RANGE_BCHAN] = "ti,sci-rm-range-bchan",
1277        [RM_RANGE_TCHAN] = "ti,sci-rm-range-tchan",
1278        [RM_RANGE_RCHAN] = "ti,sci-rm-range-rchan",
1279        [RM_RANGE_RFLOW] = "ti,sci-rm-range-rflow",
1280        [RM_RANGE_TFLOW] = "ti,sci-rm-range-tflow",
1281};
1282
1283static int udma_get_mmrs(struct udevice *dev)
1284{
1285        struct udma_dev *ud = dev_get_priv(dev);
1286        u32 cap2, cap3, cap4;
1287        int i;
1288
1289        ud->mmrs[MMR_GCFG] = (uint32_t *)devfdt_get_addr_name(dev, mmr_names[MMR_GCFG]);
1290        if (!ud->mmrs[MMR_GCFG])
1291                return -EINVAL;
1292
1293        cap2 = udma_read(ud->mmrs[MMR_GCFG], 0x28);
1294        cap3 = udma_read(ud->mmrs[MMR_GCFG], 0x2c);
1295
1296        switch (ud->match_data->type) {
1297        case DMA_TYPE_UDMA:
1298                ud->rflow_cnt = cap3 & 0x3fff;
1299                ud->tchan_cnt = cap2 & 0x1ff;
1300                ud->echan_cnt = (cap2 >> 9) & 0x1ff;
1301                ud->rchan_cnt = (cap2 >> 18) & 0x1ff;
1302                break;
1303        case DMA_TYPE_BCDMA:
1304                ud->bchan_cnt = cap2 & 0x1ff;
1305                ud->tchan_cnt = (cap2 >> 9) & 0x1ff;
1306                ud->rchan_cnt = (cap2 >> 18) & 0x1ff;
1307                break;
1308        case DMA_TYPE_PKTDMA:
1309                cap4 = udma_read(ud->mmrs[MMR_GCFG], 0x30);
1310                ud->tchan_cnt = cap2 & 0x1ff;
1311                ud->rchan_cnt = (cap2 >> 18) & 0x1ff;
1312                ud->rflow_cnt = cap3 & 0x3fff;
1313                ud->tflow_cnt = cap4 & 0x3fff;
1314                break;
1315        default:
1316                return -EINVAL;
1317        }
1318
1319        for (i = 1; i < MMR_LAST; i++) {
1320                if (i == MMR_BCHANRT && ud->bchan_cnt == 0)
1321                        continue;
1322                if (i == MMR_TCHANRT && ud->tchan_cnt == 0)
1323                        continue;
1324                if (i == MMR_RCHANRT && ud->rchan_cnt == 0)
1325                        continue;
1326
1327                ud->mmrs[i] = (uint32_t *)devfdt_get_addr_name(dev,
1328                                mmr_names[i]);
1329                if (!ud->mmrs[i])
1330                        return -EINVAL;
1331        }
1332
1333        return 0;
1334}
1335
1336static int udma_setup_resources(struct udma_dev *ud)
1337{
1338        struct udevice *dev = ud->dev;
1339        int i;
1340        struct ti_sci_resource_desc *rm_desc;
1341        struct ti_sci_resource *rm_res;
1342        struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
1343
1344        ud->tchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->tchan_cnt),
1345                                           sizeof(unsigned long), GFP_KERNEL);
1346        ud->tchans = devm_kcalloc(dev, ud->tchan_cnt, sizeof(*ud->tchans),
1347                                  GFP_KERNEL);
1348        ud->rchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->rchan_cnt),
1349                                           sizeof(unsigned long), GFP_KERNEL);
1350        ud->rchans = devm_kcalloc(dev, ud->rchan_cnt, sizeof(*ud->rchans),
1351                                  GFP_KERNEL);
1352        ud->rflow_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->rflow_cnt),
1353                                           sizeof(unsigned long), GFP_KERNEL);
1354        ud->rflow_map_reserved = devm_kcalloc(dev, BITS_TO_LONGS(ud->rflow_cnt),
1355                                              sizeof(unsigned long),
1356                                              GFP_KERNEL);
1357        ud->rflows = devm_kcalloc(dev, ud->rflow_cnt, sizeof(*ud->rflows),
1358                                  GFP_KERNEL);
1359
1360        if (!ud->tchan_map || !ud->rchan_map || !ud->rflow_map ||
1361            !ud->rflow_map_reserved || !ud->tchans || !ud->rchans ||
1362            !ud->rflows)
1363                return -ENOMEM;
1364
1365        /*
1366         * RX flows with the same Ids as RX channels are reserved to be used
1367         * as default flows if remote HW can't generate flow_ids. Those
1368         * RX flows can be requested only explicitly by id.
1369         */
1370        bitmap_set(ud->rflow_map_reserved, 0, ud->rchan_cnt);
1371
1372        /* Get resource ranges from tisci */
1373        for (i = 0; i < RM_RANGE_LAST; i++) {
1374                if (i == RM_RANGE_BCHAN || i == RM_RANGE_TFLOW)
1375                        continue;
1376
1377                tisci_rm->rm_ranges[i] =
1378                        devm_ti_sci_get_of_resource(tisci_rm->tisci, dev,
1379                                                    tisci_rm->tisci_dev_id,
1380                                                    (char *)range_names[i]);
1381        }
1382
1383        /* tchan ranges */
1384        rm_res = tisci_rm->rm_ranges[RM_RANGE_TCHAN];
1385        if (IS_ERR(rm_res)) {
1386                bitmap_zero(ud->tchan_map, ud->tchan_cnt);
1387        } else {
1388                bitmap_fill(ud->tchan_map, ud->tchan_cnt);
1389                for (i = 0; i < rm_res->sets; i++) {
1390                        rm_desc = &rm_res->desc[i];
1391                        bitmap_clear(ud->tchan_map, rm_desc->start,
1392                                     rm_desc->num);
1393                }
1394        }
1395
1396        /* rchan and matching default flow ranges */
1397        rm_res = tisci_rm->rm_ranges[RM_RANGE_RCHAN];
1398        if (IS_ERR(rm_res)) {
1399                bitmap_zero(ud->rchan_map, ud->rchan_cnt);
1400                bitmap_zero(ud->rflow_map, ud->rchan_cnt);
1401        } else {
1402                bitmap_fill(ud->rchan_map, ud->rchan_cnt);
1403                bitmap_fill(ud->rflow_map, ud->rchan_cnt);
1404                for (i = 0; i < rm_res->sets; i++) {
1405                        rm_desc = &rm_res->desc[i];
1406                        bitmap_clear(ud->rchan_map, rm_desc->start,
1407                                     rm_desc->num);
1408                        bitmap_clear(ud->rflow_map, rm_desc->start,
1409                                     rm_desc->num);
1410                }
1411        }
1412
1413        /* GP rflow ranges */
1414        rm_res = tisci_rm->rm_ranges[RM_RANGE_RFLOW];
1415        if (IS_ERR(rm_res)) {
1416                bitmap_clear(ud->rflow_map, ud->rchan_cnt,
1417                             ud->rflow_cnt - ud->rchan_cnt);
1418        } else {
1419                bitmap_set(ud->rflow_map, ud->rchan_cnt,
1420                           ud->rflow_cnt - ud->rchan_cnt);
1421                for (i = 0; i < rm_res->sets; i++) {
1422                        rm_desc = &rm_res->desc[i];
1423                        bitmap_clear(ud->rflow_map, rm_desc->start,
1424                                     rm_desc->num);
1425                }
1426        }
1427
1428        return 0;
1429}
1430
1431static int bcdma_setup_resources(struct udma_dev *ud)
1432{
1433        int i;
1434        struct udevice *dev = ud->dev;
1435        struct ti_sci_resource_desc *rm_desc;
1436        struct ti_sci_resource *rm_res;
1437        struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
1438
1439        ud->bchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->bchan_cnt),
1440                                           sizeof(unsigned long), GFP_KERNEL);
1441        ud->bchans = devm_kcalloc(dev, ud->bchan_cnt, sizeof(*ud->bchans),
1442                                  GFP_KERNEL);
1443        ud->tchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->tchan_cnt),
1444                                           sizeof(unsigned long), GFP_KERNEL);
1445        ud->tchans = devm_kcalloc(dev, ud->tchan_cnt, sizeof(*ud->tchans),
1446                                  GFP_KERNEL);
1447        ud->rchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->rchan_cnt),
1448                                           sizeof(unsigned long), GFP_KERNEL);
1449        ud->rchans = devm_kcalloc(dev, ud->rchan_cnt, sizeof(*ud->rchans),
1450                                  GFP_KERNEL);
1451        /* BCDMA do not really have flows, but the driver expect it */
1452        ud->rflow_in_use = devm_kcalloc(dev, BITS_TO_LONGS(ud->rchan_cnt),
1453                                        sizeof(unsigned long),
1454                                        GFP_KERNEL);
1455        ud->rflows = devm_kcalloc(dev, ud->rchan_cnt, sizeof(*ud->rflows),
1456                                  GFP_KERNEL);
1457
1458        if (!ud->bchan_map || !ud->tchan_map || !ud->rchan_map ||
1459            !ud->rflow_in_use || !ud->bchans || !ud->tchans || !ud->rchans ||
1460            !ud->rflows)
1461                return -ENOMEM;
1462
1463        /* Get resource ranges from tisci */
1464        for (i = 0; i < RM_RANGE_LAST; i++) {
1465                if (i == RM_RANGE_RFLOW || i == RM_RANGE_TFLOW)
1466                        continue;
1467
1468                tisci_rm->rm_ranges[i] =
1469                        devm_ti_sci_get_of_resource(tisci_rm->tisci, dev,
1470                                                    tisci_rm->tisci_dev_id,
1471                                                    (char *)range_names[i]);
1472        }
1473
1474        /* bchan ranges */
1475        rm_res = tisci_rm->rm_ranges[RM_RANGE_BCHAN];
1476        if (IS_ERR(rm_res)) {
1477                bitmap_zero(ud->bchan_map, ud->bchan_cnt);
1478        } else {
1479                bitmap_fill(ud->bchan_map, ud->bchan_cnt);
1480                for (i = 0; i < rm_res->sets; i++) {
1481                        rm_desc = &rm_res->desc[i];
1482                        bitmap_clear(ud->bchan_map, rm_desc->start,
1483                                     rm_desc->num);
1484                        dev_dbg(dev, "ti-sci-res: bchan: %d:%d\n",
1485                                rm_desc->start, rm_desc->num);
1486                }
1487        }
1488
1489        /* tchan ranges */
1490        rm_res = tisci_rm->rm_ranges[RM_RANGE_TCHAN];
1491        if (IS_ERR(rm_res)) {
1492                bitmap_zero(ud->tchan_map, ud->tchan_cnt);
1493        } else {
1494                bitmap_fill(ud->tchan_map, ud->tchan_cnt);
1495                for (i = 0; i < rm_res->sets; i++) {
1496                        rm_desc = &rm_res->desc[i];
1497                        bitmap_clear(ud->tchan_map, rm_desc->start,
1498                                     rm_desc->num);
1499                        dev_dbg(dev, "ti-sci-res: tchan: %d:%d\n",
1500                                rm_desc->start, rm_desc->num);
1501                }
1502        }
1503
1504        /* rchan ranges */
1505        rm_res = tisci_rm->rm_ranges[RM_RANGE_RCHAN];
1506        if (IS_ERR(rm_res)) {
1507                bitmap_zero(ud->rchan_map, ud->rchan_cnt);
1508        } else {
1509                bitmap_fill(ud->rchan_map, ud->rchan_cnt);
1510                for (i = 0; i < rm_res->sets; i++) {
1511                        rm_desc = &rm_res->desc[i];
1512                        bitmap_clear(ud->rchan_map, rm_desc->start,
1513                                     rm_desc->num);
1514                        dev_dbg(dev, "ti-sci-res: rchan: %d:%d\n",
1515                                rm_desc->start, rm_desc->num);
1516                }
1517        }
1518
1519        return 0;
1520}
1521
1522static int pktdma_setup_resources(struct udma_dev *ud)
1523{
1524        int i;
1525        struct udevice *dev = ud->dev;
1526        struct ti_sci_resource *rm_res;
1527        struct ti_sci_resource_desc *rm_desc;
1528        struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
1529
1530        ud->tchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->tchan_cnt),
1531                                           sizeof(unsigned long), GFP_KERNEL);
1532        ud->tchans = devm_kcalloc(dev, ud->tchan_cnt, sizeof(*ud->tchans),
1533                                  GFP_KERNEL);
1534        ud->rchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->rchan_cnt),
1535                                           sizeof(unsigned long), GFP_KERNEL);
1536        ud->rchans = devm_kcalloc(dev, ud->rchan_cnt, sizeof(*ud->rchans),
1537                                  GFP_KERNEL);
1538        ud->rflow_in_use = devm_kcalloc(dev, BITS_TO_LONGS(ud->rflow_cnt),
1539                                        sizeof(unsigned long),
1540                                        GFP_KERNEL);
1541        ud->rflows = devm_kcalloc(dev, ud->rflow_cnt, sizeof(*ud->rflows),
1542                                  GFP_KERNEL);
1543        ud->tflow_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->tflow_cnt),
1544                                           sizeof(unsigned long), GFP_KERNEL);
1545
1546        if (!ud->tchan_map || !ud->rchan_map || !ud->tflow_map || !ud->tchans ||
1547            !ud->rchans || !ud->rflows || !ud->rflow_in_use)
1548                return -ENOMEM;
1549
1550        /* Get resource ranges from tisci */
1551        for (i = 0; i < RM_RANGE_LAST; i++) {
1552                if (i == RM_RANGE_BCHAN)
1553                        continue;
1554
1555                tisci_rm->rm_ranges[i] =
1556                        devm_ti_sci_get_of_resource(tisci_rm->tisci, dev,
1557                                                    tisci_rm->tisci_dev_id,
1558                                                    (char *)range_names[i]);
1559        }
1560
1561        /* tchan ranges */
1562        rm_res = tisci_rm->rm_ranges[RM_RANGE_TCHAN];
1563        if (IS_ERR(rm_res)) {
1564                bitmap_zero(ud->tchan_map, ud->tchan_cnt);
1565        } else {
1566                bitmap_fill(ud->tchan_map, ud->tchan_cnt);
1567                for (i = 0; i < rm_res->sets; i++) {
1568                        rm_desc = &rm_res->desc[i];
1569                        bitmap_clear(ud->tchan_map, rm_desc->start,
1570                                     rm_desc->num);
1571                        dev_dbg(dev, "ti-sci-res: tchan: %d:%d\n",
1572                                rm_desc->start, rm_desc->num);
1573                }
1574        }
1575
1576        /* rchan ranges */
1577        rm_res = tisci_rm->rm_ranges[RM_RANGE_RCHAN];
1578        if (IS_ERR(rm_res)) {
1579                bitmap_zero(ud->rchan_map, ud->rchan_cnt);
1580        } else {
1581                bitmap_fill(ud->rchan_map, ud->rchan_cnt);
1582                for (i = 0; i < rm_res->sets; i++) {
1583                        rm_desc = &rm_res->desc[i];
1584                        bitmap_clear(ud->rchan_map, rm_desc->start,
1585                                     rm_desc->num);
1586                        dev_dbg(dev, "ti-sci-res: rchan: %d:%d\n",
1587                                rm_desc->start, rm_desc->num);
1588                }
1589        }
1590
1591        /* rflow ranges */
1592        rm_res = tisci_rm->rm_ranges[RM_RANGE_RFLOW];
1593        if (IS_ERR(rm_res)) {
1594                /* all rflows are assigned exclusively to Linux */
1595                bitmap_zero(ud->rflow_in_use, ud->rflow_cnt);
1596        } else {
1597                bitmap_fill(ud->rflow_in_use, ud->rflow_cnt);
1598                for (i = 0; i < rm_res->sets; i++) {
1599                        rm_desc = &rm_res->desc[i];
1600                        bitmap_clear(ud->rflow_in_use, rm_desc->start,
1601                                     rm_desc->num);
1602                        dev_dbg(dev, "ti-sci-res: rflow: %d:%d\n",
1603                                rm_desc->start, rm_desc->num);
1604                }
1605        }
1606
1607        /* tflow ranges */
1608        rm_res = tisci_rm->rm_ranges[RM_RANGE_TFLOW];
1609        if (IS_ERR(rm_res)) {
1610                /* all tflows are assigned exclusively to Linux */
1611                bitmap_zero(ud->tflow_map, ud->tflow_cnt);
1612        } else {
1613                bitmap_fill(ud->tflow_map, ud->tflow_cnt);
1614                for (i = 0; i < rm_res->sets; i++) {
1615                        rm_desc = &rm_res->desc[i];
1616                        bitmap_clear(ud->tflow_map, rm_desc->start,
1617                                     rm_desc->num);
1618                        dev_dbg(dev, "ti-sci-res: tflow: %d:%d\n",
1619                                rm_desc->start, rm_desc->num);
1620                }
1621        }
1622
1623        return 0;
1624}
1625
1626static int setup_resources(struct udma_dev *ud)
1627{
1628        struct udevice *dev = ud->dev;
1629        int ch_count, ret;
1630
1631        switch (ud->match_data->type) {
1632        case DMA_TYPE_UDMA:
1633                ret = udma_setup_resources(ud);
1634                break;
1635        case DMA_TYPE_BCDMA:
1636                ret = bcdma_setup_resources(ud);
1637                break;
1638        case DMA_TYPE_PKTDMA:
1639                ret = pktdma_setup_resources(ud);
1640                break;
1641        default:
1642                return -EINVAL;
1643        }
1644
1645        if (ret)
1646                return ret;
1647
1648        ch_count  = ud->bchan_cnt + ud->tchan_cnt + ud->rchan_cnt;
1649        if (ud->bchan_cnt)
1650                ch_count -= bitmap_weight(ud->bchan_map, ud->bchan_cnt);
1651        ch_count -= bitmap_weight(ud->tchan_map, ud->tchan_cnt);
1652        ch_count -= bitmap_weight(ud->rchan_map, ud->rchan_cnt);
1653        if (!ch_count)
1654                return -ENODEV;
1655
1656        ud->channels = devm_kcalloc(dev, ch_count, sizeof(*ud->channels),
1657                                    GFP_KERNEL);
1658        if (!ud->channels)
1659                return -ENOMEM;
1660
1661        switch (ud->match_data->type) {
1662        case DMA_TYPE_UDMA:
1663                dev_dbg(dev,
1664                        "Channels: %d (tchan: %u, rchan: %u, gp-rflow: %u)\n",
1665                        ch_count,
1666                        ud->tchan_cnt - bitmap_weight(ud->tchan_map,
1667                                                      ud->tchan_cnt),
1668                        ud->rchan_cnt - bitmap_weight(ud->rchan_map,
1669                                                      ud->rchan_cnt),
1670                        ud->rflow_cnt - bitmap_weight(ud->rflow_map,
1671                                                      ud->rflow_cnt));
1672                break;
1673        case DMA_TYPE_BCDMA:
1674                dev_dbg(dev,
1675                        "Channels: %d (bchan: %u, tchan: %u, rchan: %u)\n",
1676                        ch_count,
1677                        ud->bchan_cnt - bitmap_weight(ud->bchan_map,
1678                                                      ud->bchan_cnt),
1679                        ud->tchan_cnt - bitmap_weight(ud->tchan_map,
1680                                                      ud->tchan_cnt),
1681                        ud->rchan_cnt - bitmap_weight(ud->rchan_map,
1682                                                      ud->rchan_cnt));
1683                break;
1684        case DMA_TYPE_PKTDMA:
1685                dev_dbg(dev,
1686                        "Channels: %d (tchan: %u, rchan: %u)\n",
1687                        ch_count,
1688                        ud->tchan_cnt - bitmap_weight(ud->tchan_map,
1689                                                      ud->tchan_cnt),
1690                        ud->rchan_cnt - bitmap_weight(ud->rchan_map,
1691                                                      ud->rchan_cnt));
1692                break;
1693        default:
1694                break;
1695        }
1696
1697        return ch_count;
1698}
1699
1700static int udma_probe(struct udevice *dev)
1701{
1702        struct dma_dev_priv *uc_priv = dev_get_uclass_priv(dev);
1703        struct udma_dev *ud = dev_get_priv(dev);
1704        int i, ret;
1705        struct udevice *tmp;
1706        struct udevice *tisci_dev = NULL;
1707        struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
1708        ofnode navss_ofnode = ofnode_get_parent(dev_ofnode(dev));
1709
1710
1711        ud->match_data = (void *)dev_get_driver_data(dev);
1712        ret = udma_get_mmrs(dev);
1713        if (ret)
1714                return ret;
1715
1716        ud->psil_base = ud->match_data->psil_base;
1717
1718        ret = uclass_get_device_by_phandle(UCLASS_FIRMWARE, dev,
1719                                           "ti,sci", &tisci_dev);
1720        if (ret) {
1721                debug("Failed to get TISCI phandle (%d)\n", ret);
1722                tisci_rm->tisci = NULL;
1723                return -EINVAL;
1724        }
1725        tisci_rm->tisci = (struct ti_sci_handle *)
1726                          (ti_sci_get_handle_from_sysfw(tisci_dev));
1727
1728        tisci_rm->tisci_dev_id = -1;
1729        ret = dev_read_u32(dev, "ti,sci-dev-id", &tisci_rm->tisci_dev_id);
1730        if (ret) {
1731                dev_err(dev, "ti,sci-dev-id read failure %d\n", ret);
1732                return ret;
1733        }
1734
1735        tisci_rm->tisci_navss_dev_id = -1;
1736        ret = ofnode_read_u32(navss_ofnode, "ti,sci-dev-id",
1737                              &tisci_rm->tisci_navss_dev_id);
1738        if (ret) {
1739                dev_err(dev, "navss sci-dev-id read failure %d\n", ret);
1740                return ret;
1741        }
1742
1743        tisci_rm->tisci_udmap_ops = &tisci_rm->tisci->ops.rm_udmap_ops;
1744        tisci_rm->tisci_psil_ops = &tisci_rm->tisci->ops.rm_psil_ops;
1745
1746        if (ud->match_data->type == DMA_TYPE_UDMA) {
1747                ret = uclass_get_device_by_phandle(UCLASS_MISC, dev,
1748                                                   "ti,ringacc", &tmp);
1749                ud->ringacc = dev_get_priv(tmp);
1750        } else {
1751                struct k3_ringacc_init_data ring_init_data;
1752
1753                ring_init_data.tisci = ud->tisci_rm.tisci;
1754                ring_init_data.tisci_dev_id = ud->tisci_rm.tisci_dev_id;
1755                if (ud->match_data->type == DMA_TYPE_BCDMA) {
1756                        ring_init_data.num_rings = ud->bchan_cnt +
1757                                                   ud->tchan_cnt +
1758                                                   ud->rchan_cnt;
1759                } else {
1760                        ring_init_data.num_rings = ud->rflow_cnt +
1761                                                   ud->tflow_cnt;
1762                }
1763
1764                ud->ringacc = k3_ringacc_dmarings_init(dev, &ring_init_data);
1765        }
1766        if (IS_ERR(ud->ringacc))
1767                return PTR_ERR(ud->ringacc);
1768
1769        ud->dev = dev;
1770        ud->ch_count = setup_resources(ud);
1771        if (ud->ch_count <= 0)
1772                return ud->ch_count;
1773
1774        for (i = 0; i < ud->bchan_cnt; i++) {
1775                struct udma_bchan *bchan = &ud->bchans[i];
1776
1777                bchan->id = i;
1778                bchan->reg_rt = ud->mmrs[MMR_BCHANRT] + i * 0x1000;
1779        }
1780
1781        for (i = 0; i < ud->tchan_cnt; i++) {
1782                struct udma_tchan *tchan = &ud->tchans[i];
1783
1784                tchan->id = i;
1785                tchan->reg_chan = ud->mmrs[MMR_TCHAN] + UDMA_CH_100(i);
1786                tchan->reg_rt = ud->mmrs[MMR_TCHANRT] + UDMA_CH_1000(i);
1787        }
1788
1789        for (i = 0; i < ud->rchan_cnt; i++) {
1790                struct udma_rchan *rchan = &ud->rchans[i];
1791
1792                rchan->id = i;
1793                rchan->reg_chan = ud->mmrs[MMR_RCHAN] + UDMA_CH_100(i);
1794                rchan->reg_rt = ud->mmrs[MMR_RCHANRT] + UDMA_CH_1000(i);
1795        }
1796
1797        for (i = 0; i < ud->rflow_cnt; i++) {
1798                struct udma_rflow *rflow = &ud->rflows[i];
1799
1800                rflow->id = i;
1801                rflow->reg_rflow = ud->mmrs[MMR_RFLOW] + UDMA_CH_40(i);
1802        }
1803
1804        for (i = 0; i < ud->ch_count; i++) {
1805                struct udma_chan *uc = &ud->channels[i];
1806
1807                uc->ud = ud;
1808                uc->id = i;
1809                uc->config.remote_thread_id = -1;
1810                uc->bchan = NULL;
1811                uc->tchan = NULL;
1812                uc->rchan = NULL;
1813                uc->config.mapped_channel_id = -1;
1814                uc->config.default_flow_id = -1;
1815                uc->config.dir = DMA_MEM_TO_MEM;
1816                sprintf(uc->name, "UDMA chan%d\n", i);
1817                if (!i)
1818                        uc->in_use = true;
1819        }
1820
1821        pr_debug("%s(rev: 0x%08x) CAP0-3: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
1822                 dev->name,
1823                 udma_read(ud->mmrs[MMR_GCFG], 0),
1824                 udma_read(ud->mmrs[MMR_GCFG], 0x20),
1825                 udma_read(ud->mmrs[MMR_GCFG], 0x24),
1826                 udma_read(ud->mmrs[MMR_GCFG], 0x28),
1827                 udma_read(ud->mmrs[MMR_GCFG], 0x2c));
1828
1829        uc_priv->supported = DMA_SUPPORTS_MEM_TO_MEM | DMA_SUPPORTS_MEM_TO_DEV;
1830
1831        return ret;
1832}
1833
1834static int udma_push_to_ring(struct k3_nav_ring *ring, void *elem)
1835{
1836        u64 addr = 0;
1837
1838        memcpy(&addr, &elem, sizeof(elem));
1839        return k3_nav_ringacc_ring_push(ring, &addr);
1840}
1841
1842static int *udma_prep_dma_memcpy(struct udma_chan *uc, dma_addr_t dest,
1843                                 dma_addr_t src, size_t len)
1844{
1845        u32 tc_ring_id = k3_nav_ringacc_get_ring_id(uc->tchan->tc_ring);
1846        struct cppi5_tr_type15_t *tr_req;
1847        int num_tr;
1848        size_t tr_size = sizeof(struct cppi5_tr_type15_t);
1849        u16 tr0_cnt0, tr0_cnt1, tr1_cnt0;
1850        unsigned long dummy;
1851        void *tr_desc;
1852        size_t desc_size;
1853
1854        if (len < SZ_64K) {
1855                num_tr = 1;
1856                tr0_cnt0 = len;
1857                tr0_cnt1 = 1;
1858        } else {
1859                unsigned long align_to = __ffs(src | dest);
1860
1861                if (align_to > 3)
1862                        align_to = 3;
1863                /*
1864                 * Keep simple: tr0: SZ_64K-alignment blocks,
1865                 *              tr1: the remaining
1866                 */
1867                num_tr = 2;
1868                tr0_cnt0 = (SZ_64K - BIT(align_to));
1869                if (len / tr0_cnt0 >= SZ_64K) {
1870                        dev_err(uc->ud->dev, "size %zu is not supported\n",
1871                                len);
1872                        return NULL;
1873                }
1874
1875                tr0_cnt1 = len / tr0_cnt0;
1876                tr1_cnt0 = len % tr0_cnt0;
1877        }
1878
1879        desc_size = cppi5_trdesc_calc_size(num_tr, tr_size);
1880        tr_desc = dma_alloc_coherent(desc_size, &dummy);
1881        if (!tr_desc)
1882                return NULL;
1883        memset(tr_desc, 0, desc_size);
1884
1885        cppi5_trdesc_init(tr_desc, num_tr, tr_size, 0, 0);
1886        cppi5_desc_set_pktids(tr_desc, uc->id, 0x3fff);
1887        cppi5_desc_set_retpolicy(tr_desc, 0, tc_ring_id);
1888
1889        tr_req = tr_desc + tr_size;
1890
1891        cppi5_tr_init(&tr_req[0].flags, CPPI5_TR_TYPE15, false, true,
1892                      CPPI5_TR_EVENT_SIZE_COMPLETION, 1);
1893        cppi5_tr_csf_set(&tr_req[0].flags, CPPI5_TR_CSF_SUPR_EVT);
1894
1895        tr_req[0].addr = src;
1896        tr_req[0].icnt0 = tr0_cnt0;
1897        tr_req[0].icnt1 = tr0_cnt1;
1898        tr_req[0].icnt2 = 1;
1899        tr_req[0].icnt3 = 1;
1900        tr_req[0].dim1 = tr0_cnt0;
1901
1902        tr_req[0].daddr = dest;
1903        tr_req[0].dicnt0 = tr0_cnt0;
1904        tr_req[0].dicnt1 = tr0_cnt1;
1905        tr_req[0].dicnt2 = 1;
1906        tr_req[0].dicnt3 = 1;
1907        tr_req[0].ddim1 = tr0_cnt0;
1908
1909        if (num_tr == 2) {
1910                cppi5_tr_init(&tr_req[1].flags, CPPI5_TR_TYPE15, false, true,
1911                              CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
1912                cppi5_tr_csf_set(&tr_req[1].flags, CPPI5_TR_CSF_SUPR_EVT);
1913
1914                tr_req[1].addr = src + tr0_cnt1 * tr0_cnt0;
1915                tr_req[1].icnt0 = tr1_cnt0;
1916                tr_req[1].icnt1 = 1;
1917                tr_req[1].icnt2 = 1;
1918                tr_req[1].icnt3 = 1;
1919
1920                tr_req[1].daddr = dest + tr0_cnt1 * tr0_cnt0;
1921                tr_req[1].dicnt0 = tr1_cnt0;
1922                tr_req[1].dicnt1 = 1;
1923                tr_req[1].dicnt2 = 1;
1924                tr_req[1].dicnt3 = 1;
1925        }
1926
1927        cppi5_tr_csf_set(&tr_req[num_tr - 1].flags, CPPI5_TR_CSF_EOP);
1928
1929        flush_dcache_range((unsigned long)tr_desc,
1930                           ALIGN((unsigned long)tr_desc + desc_size,
1931                                 ARCH_DMA_MINALIGN));
1932
1933        udma_push_to_ring(uc->tchan->t_ring, tr_desc);
1934
1935        return 0;
1936}
1937
1938#define TISCI_BCDMA_BCHAN_VALID_PARAMS (                        \
1939        TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID |       \
1940        TI_SCI_MSG_VALUE_RM_UDMAP_CH_EXTENDED_CH_TYPE_VALID)
1941
1942#define TISCI_BCDMA_TCHAN_VALID_PARAMS (                        \
1943        TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID |       \
1944        TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_SUPR_TDPKT_VALID)
1945
1946#define TISCI_BCDMA_RCHAN_VALID_PARAMS (                        \
1947        TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID)
1948
1949#define TISCI_UDMA_TCHAN_VALID_PARAMS (                         \
1950        TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID |       \
1951        TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_FILT_EINFO_VALID |      \
1952        TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_FILT_PSWORDS_VALID |    \
1953        TI_SCI_MSG_VALUE_RM_UDMAP_CH_CHAN_TYPE_VALID |          \
1954        TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_SUPR_TDPKT_VALID |      \
1955        TI_SCI_MSG_VALUE_RM_UDMAP_CH_FETCH_SIZE_VALID |         \
1956        TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID |            \
1957        TI_SCI_MSG_VALUE_RM_UDMAP_CH_ATYPE_VALID)
1958
1959#define TISCI_UDMA_RCHAN_VALID_PARAMS (                         \
1960        TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID |       \
1961        TI_SCI_MSG_VALUE_RM_UDMAP_CH_FETCH_SIZE_VALID |         \
1962        TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID |            \
1963        TI_SCI_MSG_VALUE_RM_UDMAP_CH_CHAN_TYPE_VALID |          \
1964        TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_IGNORE_SHORT_VALID |    \
1965        TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_IGNORE_LONG_VALID |     \
1966        TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_START_VALID |    \
1967        TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_CNT_VALID |      \
1968        TI_SCI_MSG_VALUE_RM_UDMAP_CH_ATYPE_VALID)
1969
1970static int bcdma_tisci_m2m_channel_config(struct udma_chan *uc)
1971{
1972        struct udma_dev *ud = uc->ud;
1973        struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
1974        const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops;
1975        struct ti_sci_msg_rm_udmap_tx_ch_cfg req_tx = { 0 };
1976        struct udma_bchan *bchan = uc->bchan;
1977        int ret = 0;
1978
1979        req_tx.valid_params = TISCI_BCDMA_BCHAN_VALID_PARAMS;
1980        req_tx.nav_id = tisci_rm->tisci_dev_id;
1981        req_tx.extended_ch_type = TI_SCI_RM_BCDMA_EXTENDED_CH_TYPE_BCHAN;
1982        req_tx.index = bchan->id;
1983
1984        ret = tisci_ops->tx_ch_cfg(tisci_rm->tisci, &req_tx);
1985        if (ret)
1986                dev_err(ud->dev, "bchan%d cfg failed %d\n", bchan->id, ret);
1987
1988        return ret;
1989}
1990
1991static struct udma_bchan *__bcdma_reserve_bchan(struct udma_dev *ud, int id)
1992{
1993        if (id >= 0) {
1994                if (test_bit(id, ud->bchan_map)) {
1995                        dev_err(ud->dev, "bchan%d is in use\n", id);
1996                        return ERR_PTR(-ENOENT);
1997                }
1998        } else {
1999                id = find_next_zero_bit(ud->bchan_map, ud->bchan_cnt, 0);
2000                if (id == ud->bchan_cnt)
2001                        return ERR_PTR(-ENOENT);
2002        }
2003        __set_bit(id, ud->bchan_map);
2004        return &ud->bchans[id];
2005}
2006
2007static int bcdma_get_bchan(struct udma_chan *uc)
2008{
2009        struct udma_dev *ud = uc->ud;
2010
2011        if (uc->bchan) {
2012                dev_err(ud->dev, "chan%d: already have bchan%d allocated\n",
2013                        uc->id, uc->bchan->id);
2014                return 0;
2015        }
2016
2017        uc->bchan = __bcdma_reserve_bchan(ud, -1);
2018        if (IS_ERR(uc->bchan))
2019                return PTR_ERR(uc->bchan);
2020
2021        uc->tchan = uc->bchan;
2022
2023        return 0;
2024}
2025
2026static void bcdma_put_bchan(struct udma_chan *uc)
2027{
2028        struct udma_dev *ud = uc->ud;
2029
2030        if (uc->bchan) {
2031                dev_dbg(ud->dev, "chan%d: put bchan%d\n", uc->id,
2032                        uc->bchan->id);
2033                __clear_bit(uc->bchan->id, ud->bchan_map);
2034                uc->bchan = NULL;
2035                uc->tchan = NULL;
2036        }
2037}
2038
2039static void bcdma_free_bchan_resources(struct udma_chan *uc)
2040{
2041        if (!uc->bchan)
2042                return;
2043
2044        k3_nav_ringacc_ring_free(uc->bchan->tc_ring);
2045        k3_nav_ringacc_ring_free(uc->bchan->t_ring);
2046        uc->bchan->tc_ring = NULL;
2047        uc->bchan->t_ring = NULL;
2048
2049        bcdma_put_bchan(uc);
2050}
2051
2052static int bcdma_alloc_bchan_resources(struct udma_chan *uc)
2053{
2054        struct k3_nav_ring_cfg ring_cfg;
2055        struct udma_dev *ud = uc->ud;
2056        int ret;
2057
2058        ret = bcdma_get_bchan(uc);
2059        if (ret)
2060                return ret;
2061
2062        ret = k3_nav_ringacc_request_rings_pair(ud->ringacc, uc->bchan->id, -1,
2063                                                &uc->bchan->t_ring,
2064                                                &uc->bchan->tc_ring);
2065        if (ret) {
2066                ret = -EBUSY;
2067                goto err_ring;
2068        }
2069
2070        memset(&ring_cfg, 0, sizeof(ring_cfg));
2071        ring_cfg.size = 16;
2072        ring_cfg.elm_size = K3_NAV_RINGACC_RING_ELSIZE_8;
2073        ring_cfg.mode = K3_NAV_RINGACC_RING_MODE_RING;
2074
2075        ret = k3_nav_ringacc_ring_cfg(uc->bchan->t_ring, &ring_cfg);
2076        if (ret)
2077                goto err_ringcfg;
2078
2079        return 0;
2080
2081err_ringcfg:
2082        k3_nav_ringacc_ring_free(uc->bchan->tc_ring);
2083        uc->bchan->tc_ring = NULL;
2084        k3_nav_ringacc_ring_free(uc->bchan->t_ring);
2085        uc->bchan->t_ring = NULL;
2086err_ring:
2087        bcdma_put_bchan(uc);
2088
2089        return ret;
2090}
2091
2092static int bcdma_tisci_tx_channel_config(struct udma_chan *uc)
2093{
2094        struct udma_dev *ud = uc->ud;
2095        struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
2096        const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops;
2097        struct udma_tchan *tchan = uc->tchan;
2098        struct ti_sci_msg_rm_udmap_tx_ch_cfg req_tx = { 0 };
2099        int ret = 0;
2100
2101        req_tx.valid_params = TISCI_BCDMA_TCHAN_VALID_PARAMS;
2102        req_tx.nav_id = tisci_rm->tisci_dev_id;
2103        req_tx.index = tchan->id;
2104        req_tx.tx_supr_tdpkt = uc->config.notdpkt;
2105        if (uc->config.ep_type == PSIL_EP_PDMA_XY &&
2106            ud->match_data->flags & UDMA_FLAG_TDTYPE) {
2107                /* wait for peer to complete the teardown for PDMAs */
2108                req_tx.valid_params |=
2109                                TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_TDTYPE_VALID;
2110                req_tx.tx_tdtype = 1;
2111        }
2112
2113        ret = tisci_ops->tx_ch_cfg(tisci_rm->tisci, &req_tx);
2114        if (ret)
2115                dev_err(ud->dev, "tchan%d cfg failed %d\n", tchan->id, ret);
2116
2117        return ret;
2118}
2119
2120#define pktdma_tisci_tx_channel_config bcdma_tisci_tx_channel_config
2121
2122static int pktdma_tisci_rx_channel_config(struct udma_chan *uc)
2123{
2124        struct udma_dev *ud = uc->ud;
2125        struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
2126        const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops;
2127        struct ti_sci_msg_rm_udmap_rx_ch_cfg req_rx = { 0 };
2128        struct ti_sci_msg_rm_udmap_flow_cfg flow_req = { 0 };
2129        int ret = 0;
2130
2131        req_rx.valid_params = TISCI_BCDMA_RCHAN_VALID_PARAMS;
2132        req_rx.nav_id = tisci_rm->tisci_dev_id;
2133        req_rx.index = uc->rchan->id;
2134
2135        ret = tisci_ops->rx_ch_cfg(tisci_rm->tisci, &req_rx);
2136        if (ret) {
2137                dev_err(ud->dev, "rchan%d cfg failed %d\n", uc->rchan->id, ret);
2138                return ret;
2139        }
2140
2141        flow_req.valid_params =
2142                TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_EINFO_PRESENT_VALID |
2143                TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_PSINFO_PRESENT_VALID |
2144                TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_ERROR_HANDLING_VALID;
2145
2146        flow_req.nav_id = tisci_rm->tisci_dev_id;
2147        flow_req.flow_index = uc->rflow->id;
2148
2149        if (uc->config.needs_epib)
2150                flow_req.rx_einfo_present = 1;
2151        else
2152                flow_req.rx_einfo_present = 0;
2153        if (uc->config.psd_size)
2154                flow_req.rx_psinfo_present = 1;
2155        else
2156                flow_req.rx_psinfo_present = 0;
2157        flow_req.rx_error_handling = 1;
2158
2159        ret = tisci_ops->rx_flow_cfg(tisci_rm->tisci, &flow_req);
2160
2161        if (ret)
2162                dev_err(ud->dev, "flow%d config failed: %d\n", uc->rflow->id,
2163                        ret);
2164
2165        return ret;
2166}
2167
2168static int bcdma_alloc_chan_resources(struct udma_chan *uc)
2169{
2170        int ret;
2171
2172        uc->config.pkt_mode = false;
2173
2174        switch (uc->config.dir) {
2175        case DMA_MEM_TO_MEM:
2176                /* Non synchronized - mem to mem type of transfer */
2177                dev_dbg(uc->ud->dev, "%s: chan%d as MEM-to-MEM\n", __func__,
2178                        uc->id);
2179
2180                ret = bcdma_alloc_bchan_resources(uc);
2181                if (ret)
2182                        return ret;
2183
2184                ret = bcdma_tisci_m2m_channel_config(uc);
2185                break;
2186        default:
2187                /* Can not happen */
2188                dev_err(uc->ud->dev, "%s: chan%d invalid direction (%u)\n",
2189                        __func__, uc->id, uc->config.dir);
2190                return -EINVAL;
2191        }
2192
2193        /* check if the channel configuration was successful */
2194        if (ret)
2195                goto err_res_free;
2196
2197        if (udma_is_chan_running(uc)) {
2198                dev_warn(uc->ud->dev, "chan%d: is running!\n", uc->id);
2199                udma_stop(uc);
2200                if (udma_is_chan_running(uc)) {
2201                        dev_err(uc->ud->dev, "chan%d: won't stop!\n", uc->id);
2202                        goto err_res_free;
2203                }
2204        }
2205
2206        udma_reset_rings(uc);
2207
2208        return 0;
2209
2210err_res_free:
2211        bcdma_free_bchan_resources(uc);
2212        udma_free_tx_resources(uc);
2213        udma_free_rx_resources(uc);
2214
2215        udma_reset_uchan(uc);
2216
2217        return ret;
2218}
2219
2220static int pktdma_alloc_chan_resources(struct udma_chan *uc)
2221{
2222        struct udma_dev *ud = uc->ud;
2223        int ret;
2224
2225        switch (uc->config.dir) {
2226        case DMA_MEM_TO_DEV:
2227                /* Slave transfer synchronized - mem to dev (TX) trasnfer */
2228                dev_dbg(uc->ud->dev, "%s: chan%d as MEM-to-DEV\n", __func__,
2229                        uc->id);
2230
2231                ret = udma_alloc_tx_resources(uc);
2232                if (ret) {
2233                        uc->config.remote_thread_id = -1;
2234                        return ret;
2235                }
2236
2237                uc->config.src_thread = ud->psil_base + uc->tchan->id;
2238                uc->config.dst_thread = uc->config.remote_thread_id;
2239                uc->config.dst_thread |= K3_PSIL_DST_THREAD_ID_OFFSET;
2240
2241                ret = pktdma_tisci_tx_channel_config(uc);
2242                break;
2243        case DMA_DEV_TO_MEM:
2244                /* Slave transfer synchronized - dev to mem (RX) trasnfer */
2245                dev_dbg(uc->ud->dev, "%s: chan%d as DEV-to-MEM\n", __func__,
2246                        uc->id);
2247
2248                ret = udma_alloc_rx_resources(uc);
2249                if (ret) {
2250                        uc->config.remote_thread_id = -1;
2251                        return ret;
2252                }
2253
2254                uc->config.src_thread = uc->config.remote_thread_id;
2255                uc->config.dst_thread = (ud->psil_base + uc->rchan->id) |
2256                                        K3_PSIL_DST_THREAD_ID_OFFSET;
2257
2258                ret = pktdma_tisci_rx_channel_config(uc);
2259                break;
2260        default:
2261                /* Can not happen */
2262                dev_err(uc->ud->dev, "%s: chan%d invalid direction (%u)\n",
2263                        __func__, uc->id, uc->config.dir);
2264                return -EINVAL;
2265        }
2266
2267        /* check if the channel configuration was successful */
2268        if (ret)
2269                goto err_res_free;
2270
2271        /* PSI-L pairing */
2272        ret = udma_navss_psil_pair(ud, uc->config.src_thread, uc->config.dst_thread);
2273        if (ret) {
2274                dev_err(ud->dev, "PSI-L pairing failed: 0x%04x -> 0x%04x\n",
2275                        uc->config.src_thread, uc->config.dst_thread);
2276                goto err_res_free;
2277        }
2278
2279        if (udma_is_chan_running(uc)) {
2280                dev_warn(ud->dev, "chan%d: is running!\n", uc->id);
2281                udma_stop(uc);
2282                if (udma_is_chan_running(uc)) {
2283                        dev_err(ud->dev, "chan%d: won't stop!\n", uc->id);
2284                        goto err_res_free;
2285                }
2286        }
2287
2288        udma_reset_rings(uc);
2289
2290        if (uc->tchan)
2291                dev_dbg(ud->dev,
2292                        "chan%d: tchan%d, tflow%d, Remote thread: 0x%04x\n",
2293                        uc->id, uc->tchan->id, uc->tchan->tflow_id,
2294                        uc->config.remote_thread_id);
2295        else if (uc->rchan)
2296                dev_dbg(ud->dev,
2297                        "chan%d: rchan%d, rflow%d, Remote thread: 0x%04x\n",
2298                        uc->id, uc->rchan->id, uc->rflow->id,
2299                        uc->config.remote_thread_id);
2300        return 0;
2301
2302err_res_free:
2303        udma_free_tx_resources(uc);
2304        udma_free_rx_resources(uc);
2305
2306        udma_reset_uchan(uc);
2307
2308        return ret;
2309}
2310
2311static int udma_transfer(struct udevice *dev, int direction,
2312                         void *dst, void *src, size_t len)
2313{
2314        struct udma_dev *ud = dev_get_priv(dev);
2315        /* Channel0 is reserved for memcpy */
2316        struct udma_chan *uc = &ud->channels[0];
2317        dma_addr_t paddr = 0;
2318        int ret;
2319
2320        switch (ud->match_data->type) {
2321        case DMA_TYPE_UDMA:
2322                ret = udma_alloc_chan_resources(uc);
2323                break;
2324        case DMA_TYPE_BCDMA:
2325                ret = bcdma_alloc_chan_resources(uc);
2326                break;
2327        default:
2328                return -EINVAL;
2329        };
2330        if (ret)
2331                return ret;
2332
2333        udma_prep_dma_memcpy(uc, (dma_addr_t)dst, (dma_addr_t)src, len);
2334        udma_start(uc);
2335        udma_poll_completion(uc, &paddr);
2336        udma_stop(uc);
2337
2338        switch (ud->match_data->type) {
2339        case DMA_TYPE_UDMA:
2340                udma_free_chan_resources(uc);
2341                break;
2342        case DMA_TYPE_BCDMA:
2343                bcdma_free_bchan_resources(uc);
2344                break;
2345        default:
2346                return -EINVAL;
2347        };
2348
2349        return 0;
2350}
2351
2352static int udma_request(struct dma *dma)
2353{
2354        struct udma_dev *ud = dev_get_priv(dma->dev);
2355        struct udma_chan_config *ucc;
2356        struct udma_chan *uc;
2357        unsigned long dummy;
2358        int ret;
2359
2360        if (dma->id >= (ud->rchan_cnt + ud->tchan_cnt)) {
2361                dev_err(dma->dev, "invalid dma ch_id %lu\n", dma->id);
2362                return -EINVAL;
2363        }
2364
2365        uc = &ud->channels[dma->id];
2366        ucc = &uc->config;
2367        switch (ud->match_data->type) {
2368        case DMA_TYPE_UDMA:
2369                ret = udma_alloc_chan_resources(uc);
2370                break;
2371        case DMA_TYPE_BCDMA:
2372                ret = bcdma_alloc_chan_resources(uc);
2373                break;
2374        case DMA_TYPE_PKTDMA:
2375                ret = pktdma_alloc_chan_resources(uc);
2376                break;
2377        default:
2378                return -EINVAL;
2379        }
2380        if (ret) {
2381                dev_err(dma->dev, "alloc dma res failed %d\n", ret);
2382                return -EINVAL;
2383        }
2384
2385        if (uc->config.dir == DMA_MEM_TO_DEV) {
2386                uc->desc_tx = dma_alloc_coherent(ucc->hdesc_size, &dummy);
2387                memset(uc->desc_tx, 0, ucc->hdesc_size);
2388        } else {
2389                uc->desc_rx = dma_alloc_coherent(
2390                                ucc->hdesc_size * UDMA_RX_DESC_NUM, &dummy);
2391                memset(uc->desc_rx, 0, ucc->hdesc_size * UDMA_RX_DESC_NUM);
2392        }
2393
2394        uc->in_use = true;
2395        uc->desc_rx_cur = 0;
2396        uc->num_rx_bufs = 0;
2397
2398        if (uc->config.dir == DMA_DEV_TO_MEM) {
2399                uc->cfg_data.flow_id_base = uc->rflow->id;
2400                uc->cfg_data.flow_id_cnt = 1;
2401        }
2402
2403        return 0;
2404}
2405
2406static int udma_rfree(struct dma *dma)
2407{
2408        struct udma_dev *ud = dev_get_priv(dma->dev);
2409        struct udma_chan *uc;
2410
2411        if (dma->id >= (ud->rchan_cnt + ud->tchan_cnt)) {
2412                dev_err(dma->dev, "invalid dma ch_id %lu\n", dma->id);
2413                return -EINVAL;
2414        }
2415        uc = &ud->channels[dma->id];
2416
2417        if (udma_is_chan_running(uc))
2418                udma_stop(uc);
2419
2420        udma_navss_psil_unpair(ud, uc->config.src_thread,
2421                               uc->config.dst_thread);
2422
2423        bcdma_free_bchan_resources(uc);
2424        udma_free_tx_resources(uc);
2425        udma_free_rx_resources(uc);
2426        udma_reset_uchan(uc);
2427
2428        uc->in_use = false;
2429
2430        return 0;
2431}
2432
2433static int udma_enable(struct dma *dma)
2434{
2435        struct udma_dev *ud = dev_get_priv(dma->dev);
2436        struct udma_chan *uc;
2437        int ret;
2438
2439        if (dma->id >= (ud->rchan_cnt + ud->tchan_cnt)) {
2440                dev_err(dma->dev, "invalid dma ch_id %lu\n", dma->id);
2441                return -EINVAL;
2442        }
2443        uc = &ud->channels[dma->id];
2444
2445        ret = udma_start(uc);
2446
2447        return ret;
2448}
2449
2450static int udma_disable(struct dma *dma)
2451{
2452        struct udma_dev *ud = dev_get_priv(dma->dev);
2453        struct udma_chan *uc;
2454        int ret = 0;
2455
2456        if (dma->id >= (ud->rchan_cnt + ud->tchan_cnt)) {
2457                dev_err(dma->dev, "invalid dma ch_id %lu\n", dma->id);
2458                return -EINVAL;
2459        }
2460        uc = &ud->channels[dma->id];
2461
2462        if (udma_is_chan_running(uc))
2463                ret = udma_stop(uc);
2464        else
2465                dev_err(dma->dev, "%s not running\n", __func__);
2466
2467        return ret;
2468}
2469
2470static int udma_send(struct dma *dma, void *src, size_t len, void *metadata)
2471{
2472        struct udma_dev *ud = dev_get_priv(dma->dev);
2473        struct cppi5_host_desc_t *desc_tx;
2474        dma_addr_t dma_src = (dma_addr_t)src;
2475        struct ti_udma_drv_packet_data packet_data = { 0 };
2476        dma_addr_t paddr;
2477        struct udma_chan *uc;
2478        u32 tc_ring_id;
2479        int ret;
2480
2481        if (metadata)
2482                packet_data = *((struct ti_udma_drv_packet_data *)metadata);
2483
2484        if (dma->id >= (ud->rchan_cnt + ud->tchan_cnt)) {
2485                dev_err(dma->dev, "invalid dma ch_id %lu\n", dma->id);
2486                return -EINVAL;
2487        }
2488        uc = &ud->channels[dma->id];
2489
2490        if (uc->config.dir != DMA_MEM_TO_DEV)
2491                return -EINVAL;
2492
2493        tc_ring_id = k3_nav_ringacc_get_ring_id(uc->tchan->tc_ring);
2494
2495        desc_tx = uc->desc_tx;
2496
2497        cppi5_hdesc_reset_hbdesc(desc_tx);
2498
2499        cppi5_hdesc_init(desc_tx,
2500                         uc->config.needs_epib ? CPPI5_INFO0_HDESC_EPIB_PRESENT : 0,
2501                         uc->config.psd_size);
2502        cppi5_hdesc_set_pktlen(desc_tx, len);
2503        cppi5_hdesc_attach_buf(desc_tx, dma_src, len, dma_src, len);
2504        cppi5_desc_set_pktids(&desc_tx->hdr, uc->id, 0x3fff);
2505        cppi5_desc_set_retpolicy(&desc_tx->hdr, 0, tc_ring_id);
2506        /* pass below information from caller */
2507        cppi5_hdesc_set_pkttype(desc_tx, packet_data.pkt_type);
2508        cppi5_desc_set_tags_ids(&desc_tx->hdr, 0, packet_data.dest_tag);
2509
2510        flush_dcache_range((unsigned long)dma_src,
2511                           ALIGN((unsigned long)dma_src + len,
2512                                 ARCH_DMA_MINALIGN));
2513        flush_dcache_range((unsigned long)desc_tx,
2514                           ALIGN((unsigned long)desc_tx + uc->config.hdesc_size,
2515                                 ARCH_DMA_MINALIGN));
2516
2517        ret = udma_push_to_ring(uc->tchan->t_ring, uc->desc_tx);
2518        if (ret) {
2519                dev_err(dma->dev, "TX dma push fail ch_id %lu %d\n",
2520                        dma->id, ret);
2521                return ret;
2522        }
2523
2524        udma_poll_completion(uc, &paddr);
2525
2526        return 0;
2527}
2528
2529static int udma_receive(struct dma *dma, void **dst, void *metadata)
2530{
2531        struct udma_dev *ud = dev_get_priv(dma->dev);
2532        struct udma_chan_config *ucc;
2533        struct cppi5_host_desc_t *desc_rx;
2534        dma_addr_t buf_dma;
2535        struct udma_chan *uc;
2536        u32 buf_dma_len, pkt_len;
2537        u32 port_id = 0;
2538        int ret;
2539
2540        if (dma->id >= (ud->rchan_cnt + ud->tchan_cnt)) {
2541                dev_err(dma->dev, "invalid dma ch_id %lu\n", dma->id);
2542                return -EINVAL;
2543        }
2544        uc = &ud->channels[dma->id];
2545        ucc = &uc->config;
2546
2547        if (uc->config.dir != DMA_DEV_TO_MEM)
2548                return -EINVAL;
2549        if (!uc->num_rx_bufs)
2550                return -EINVAL;
2551
2552        ret = k3_nav_ringacc_ring_pop(uc->rflow->r_ring, &desc_rx);
2553        if (ret && ret != -ENODATA) {
2554                dev_err(dma->dev, "rx dma fail ch_id:%lu %d\n", dma->id, ret);
2555                return ret;
2556        } else if (ret == -ENODATA) {
2557                return 0;
2558        }
2559
2560        /* invalidate cache data */
2561        invalidate_dcache_range((ulong)desc_rx,
2562                                (ulong)(desc_rx + ucc->hdesc_size));
2563
2564        cppi5_hdesc_get_obuf(desc_rx, &buf_dma, &buf_dma_len);
2565        pkt_len = cppi5_hdesc_get_pktlen(desc_rx);
2566
2567        /* invalidate cache data */
2568        invalidate_dcache_range((ulong)buf_dma,
2569                                (ulong)(buf_dma + buf_dma_len));
2570
2571        cppi5_desc_get_tags_ids(&desc_rx->hdr, &port_id, NULL);
2572
2573        *dst = (void *)buf_dma;
2574        uc->num_rx_bufs--;
2575
2576        return pkt_len;
2577}
2578
2579static int udma_of_xlate(struct dma *dma, struct ofnode_phandle_args *args)
2580{
2581        struct udma_chan_config *ucc;
2582        struct udma_dev *ud = dev_get_priv(dma->dev);
2583        struct udma_chan *uc = &ud->channels[0];
2584        struct psil_endpoint_config *ep_config;
2585        u32 val;
2586
2587        for (val = 0; val < ud->ch_count; val++) {
2588                uc = &ud->channels[val];
2589                if (!uc->in_use)
2590                        break;
2591        }
2592
2593        if (val == ud->ch_count)
2594                return -EBUSY;
2595
2596        ucc = &uc->config;
2597        ucc->remote_thread_id = args->args[0];
2598        if (ucc->remote_thread_id & K3_PSIL_DST_THREAD_ID_OFFSET)
2599                ucc->dir = DMA_MEM_TO_DEV;
2600        else
2601                ucc->dir = DMA_DEV_TO_MEM;
2602
2603        ep_config = psil_get_ep_config(ucc->remote_thread_id);
2604        if (IS_ERR(ep_config)) {
2605                dev_err(ud->dev, "No configuration for psi-l thread 0x%04x\n",
2606                        uc->config.remote_thread_id);
2607                ucc->dir = DMA_MEM_TO_MEM;
2608                ucc->remote_thread_id = -1;
2609                return false;
2610        }
2611
2612        ucc->pkt_mode = ep_config->pkt_mode;
2613        ucc->channel_tpl = ep_config->channel_tpl;
2614        ucc->notdpkt = ep_config->notdpkt;
2615        ucc->ep_type = ep_config->ep_type;
2616
2617        if (ud->match_data->type == DMA_TYPE_PKTDMA &&
2618            ep_config->mapped_channel_id >= 0) {
2619                ucc->mapped_channel_id = ep_config->mapped_channel_id;
2620                ucc->default_flow_id = ep_config->default_flow_id;
2621        } else {
2622                ucc->mapped_channel_id = -1;
2623                ucc->default_flow_id = -1;
2624        }
2625
2626        ucc->needs_epib = ep_config->needs_epib;
2627        ucc->psd_size = ep_config->psd_size;
2628        ucc->metadata_size = (ucc->needs_epib ? CPPI5_INFO0_HDESC_EPIB_SIZE : 0) + ucc->psd_size;
2629
2630        ucc->hdesc_size = cppi5_hdesc_calc_size(ucc->needs_epib,
2631                                                ucc->psd_size, 0);
2632        ucc->hdesc_size = ALIGN(ucc->hdesc_size, ARCH_DMA_MINALIGN);
2633
2634        dma->id = uc->id;
2635        pr_debug("Allocated dma chn:%lu epib:%d psdata:%u meta:%u thread_id:%x\n",
2636                 dma->id, ucc->needs_epib,
2637                 ucc->psd_size, ucc->metadata_size,
2638                 ucc->remote_thread_id);
2639
2640        return 0;
2641}
2642
2643int udma_prepare_rcv_buf(struct dma *dma, void *dst, size_t size)
2644{
2645        struct udma_dev *ud = dev_get_priv(dma->dev);
2646        struct cppi5_host_desc_t *desc_rx;
2647        dma_addr_t dma_dst;
2648        struct udma_chan *uc;
2649        u32 desc_num;
2650
2651        if (dma->id >= (ud->rchan_cnt + ud->tchan_cnt)) {
2652                dev_err(dma->dev, "invalid dma ch_id %lu\n", dma->id);
2653                return -EINVAL;
2654        }
2655        uc = &ud->channels[dma->id];
2656
2657        if (uc->config.dir != DMA_DEV_TO_MEM)
2658                return -EINVAL;
2659
2660        if (uc->num_rx_bufs >= UDMA_RX_DESC_NUM)
2661                return -EINVAL;
2662
2663        desc_num = uc->desc_rx_cur % UDMA_RX_DESC_NUM;
2664        desc_rx = uc->desc_rx + (desc_num * uc->config.hdesc_size);
2665        dma_dst = (dma_addr_t)dst;
2666
2667        cppi5_hdesc_reset_hbdesc(desc_rx);
2668
2669        cppi5_hdesc_init(desc_rx,
2670                         uc->config.needs_epib ? CPPI5_INFO0_HDESC_EPIB_PRESENT : 0,
2671                         uc->config.psd_size);
2672        cppi5_hdesc_set_pktlen(desc_rx, size);
2673        cppi5_hdesc_attach_buf(desc_rx, dma_dst, size, dma_dst, size);
2674
2675        flush_dcache_range((unsigned long)desc_rx,
2676                           ALIGN((unsigned long)desc_rx + uc->config.hdesc_size,
2677                                 ARCH_DMA_MINALIGN));
2678
2679        udma_push_to_ring(uc->rflow->fd_ring, desc_rx);
2680
2681        uc->num_rx_bufs++;
2682        uc->desc_rx_cur++;
2683
2684        return 0;
2685}
2686
2687static int udma_get_cfg(struct dma *dma, u32 id, void **data)
2688{
2689        struct udma_dev *ud = dev_get_priv(dma->dev);
2690        struct udma_chan *uc;
2691
2692        if (dma->id >= (ud->rchan_cnt + ud->tchan_cnt)) {
2693                dev_err(dma->dev, "invalid dma ch_id %lu\n", dma->id);
2694                return -EINVAL;
2695        }
2696
2697        switch (id) {
2698        case TI_UDMA_CHAN_PRIV_INFO:
2699                uc = &ud->channels[dma->id];
2700                *data = &uc->cfg_data;
2701                return 0;
2702        }
2703
2704        return -EINVAL;
2705}
2706
2707static const struct dma_ops udma_ops = {
2708        .transfer       = udma_transfer,
2709        .of_xlate       = udma_of_xlate,
2710        .request        = udma_request,
2711        .rfree          = udma_rfree,
2712        .enable         = udma_enable,
2713        .disable        = udma_disable,
2714        .send           = udma_send,
2715        .receive        = udma_receive,
2716        .prepare_rcv_buf = udma_prepare_rcv_buf,
2717        .get_cfg        = udma_get_cfg,
2718};
2719
2720static struct udma_match_data am654_main_data = {
2721        .type = DMA_TYPE_UDMA,
2722        .psil_base = 0x1000,
2723        .enable_memcpy_support = true,
2724        .statictr_z_mask = GENMASK(11, 0),
2725        .oes = {
2726                .udma_rchan = 0x200,
2727        },
2728        .tpl_levels = 2,
2729        .level_start_idx = {
2730                [0] = 8, /* Normal channels */
2731                [1] = 0, /* High Throughput channels */
2732        },
2733};
2734
2735static struct udma_match_data am654_mcu_data = {
2736        .type = DMA_TYPE_UDMA,
2737        .psil_base = 0x6000,
2738        .enable_memcpy_support = true,
2739        .statictr_z_mask = GENMASK(11, 0),
2740        .oes = {
2741                .udma_rchan = 0x200,
2742        },
2743        .tpl_levels = 2,
2744        .level_start_idx = {
2745                [0] = 2, /* Normal channels */
2746                [1] = 0, /* High Throughput channels */
2747        },
2748};
2749
2750static struct udma_match_data j721e_main_data = {
2751        .type = DMA_TYPE_UDMA,
2752        .psil_base = 0x1000,
2753        .enable_memcpy_support = true,
2754        .flags = UDMA_FLAG_PDMA_ACC32 | UDMA_FLAG_PDMA_BURST | UDMA_FLAG_TDTYPE,
2755        .statictr_z_mask = GENMASK(23, 0),
2756        .oes = {
2757                .udma_rchan = 0x400,
2758        },
2759        .tpl_levels = 3,
2760        .level_start_idx = {
2761                [0] = 16, /* Normal channels */
2762                [1] = 4, /* High Throughput channels */
2763                [2] = 0, /* Ultra High Throughput channels */
2764        },
2765};
2766
2767static struct udma_match_data j721e_mcu_data = {
2768        .type = DMA_TYPE_UDMA,
2769        .psil_base = 0x6000,
2770        .enable_memcpy_support = true,
2771        .flags = UDMA_FLAG_PDMA_ACC32 | UDMA_FLAG_PDMA_BURST | UDMA_FLAG_TDTYPE,
2772        .statictr_z_mask = GENMASK(23, 0),
2773        .oes = {
2774                .udma_rchan = 0x400,
2775        },
2776        .tpl_levels = 2,
2777        .level_start_idx = {
2778                [0] = 2, /* Normal channels */
2779                [1] = 0, /* High Throughput channels */
2780        },
2781};
2782
2783static struct udma_match_data am64_bcdma_data = {
2784        .type = DMA_TYPE_BCDMA,
2785        .psil_base = 0x2000, /* for tchan and rchan, not applicable to bchan */
2786        .enable_memcpy_support = true, /* Supported via bchan */
2787        .flags = UDMA_FLAG_PDMA_ACC32 | UDMA_FLAG_PDMA_BURST | UDMA_FLAG_TDTYPE,
2788        .statictr_z_mask = GENMASK(23, 0),
2789        .oes = {
2790                .bcdma_bchan_data = 0x2200,
2791                .bcdma_bchan_ring = 0x2400,
2792                .bcdma_tchan_data = 0x2800,
2793                .bcdma_tchan_ring = 0x2a00,
2794                .bcdma_rchan_data = 0x2e00,
2795                .bcdma_rchan_ring = 0x3000,
2796        },
2797        /* No throughput levels */
2798};
2799
2800static struct udma_match_data am64_pktdma_data = {
2801        .type = DMA_TYPE_PKTDMA,
2802        .psil_base = 0x1000,
2803        .enable_memcpy_support = false,
2804        .flags = UDMA_FLAG_PDMA_ACC32 | UDMA_FLAG_PDMA_BURST | UDMA_FLAG_TDTYPE,
2805        .statictr_z_mask = GENMASK(23, 0),
2806        .oes = {
2807                .pktdma_tchan_flow = 0x1200,
2808                .pktdma_rchan_flow = 0x1600,
2809        },
2810        /* No throughput levels */
2811};
2812
2813static const struct udevice_id udma_ids[] = {
2814        {
2815                .compatible = "ti,am654-navss-main-udmap",
2816                .data = (ulong)&am654_main_data,
2817        },
2818        {
2819                .compatible = "ti,am654-navss-mcu-udmap",
2820                .data = (ulong)&am654_mcu_data,
2821        }, {
2822                .compatible = "ti,j721e-navss-main-udmap",
2823                .data = (ulong)&j721e_main_data,
2824        }, {
2825                .compatible = "ti,j721e-navss-mcu-udmap",
2826                .data = (ulong)&j721e_mcu_data,
2827        },
2828        {
2829                .compatible = "ti,am64-dmss-bcdma",
2830                .data = (ulong)&am64_bcdma_data,
2831        },
2832        {
2833                .compatible = "ti,am64-dmss-pktdma",
2834                .data = (ulong)&am64_pktdma_data,
2835        },
2836        { /* Sentinel */ },
2837};
2838
2839U_BOOT_DRIVER(ti_edma3) = {
2840        .name   = "ti-udma",
2841        .id     = UCLASS_DMA,
2842        .of_match = udma_ids,
2843        .ops    = &udma_ops,
2844        .probe  = udma_probe,
2845        .priv_auto      = sizeof(struct udma_dev),
2846};
2847