linux/drivers/dma/ti/k3-udma-glue.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * K3 NAVSS DMA glue interface
   4 *
   5 * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com
   6 *
   7 */
   8
   9#include <linux/atomic.h>
  10#include <linux/delay.h>
  11#include <linux/dma-mapping.h>
  12#include <linux/io.h>
  13#include <linux/init.h>
  14#include <linux/of.h>
  15#include <linux/platform_device.h>
  16#include <linux/soc/ti/k3-ringacc.h>
  17#include <linux/dma/ti-cppi5.h>
  18#include <linux/dma/k3-udma-glue.h>
  19
  20#include "k3-udma.h"
  21#include "k3-psil-priv.h"
  22
  23struct k3_udma_glue_common {
  24        struct device *dev;
  25        struct device chan_dev;
  26        struct udma_dev *udmax;
  27        const struct udma_tisci_rm *tisci_rm;
  28        struct k3_ringacc *ringacc;
  29        u32 src_thread;
  30        u32 dst_thread;
  31
  32        u32  hdesc_size;
  33        bool epib;
  34        u32  psdata_size;
  35        u32  swdata_size;
  36        u32  atype_asel;
  37        struct psil_endpoint_config *ep_config;
  38};
  39
  40struct k3_udma_glue_tx_channel {
  41        struct k3_udma_glue_common common;
  42
  43        struct udma_tchan *udma_tchanx;
  44        int udma_tchan_id;
  45
  46        struct k3_ring *ringtx;
  47        struct k3_ring *ringtxcq;
  48
  49        bool psil_paired;
  50
  51        int virq;
  52
  53        atomic_t free_pkts;
  54        bool tx_pause_on_err;
  55        bool tx_filt_einfo;
  56        bool tx_filt_pswords;
  57        bool tx_supr_tdpkt;
  58
  59        int udma_tflow_id;
  60};
  61
  62struct k3_udma_glue_rx_flow {
  63        struct udma_rflow *udma_rflow;
  64        int udma_rflow_id;
  65        struct k3_ring *ringrx;
  66        struct k3_ring *ringrxfdq;
  67
  68        int virq;
  69};
  70
  71struct k3_udma_glue_rx_channel {
  72        struct k3_udma_glue_common common;
  73
  74        struct udma_rchan *udma_rchanx;
  75        int udma_rchan_id;
  76        bool remote;
  77
  78        bool psil_paired;
  79
  80        u32  swdata_size;
  81        int  flow_id_base;
  82
  83        struct k3_udma_glue_rx_flow *flows;
  84        u32 flow_num;
  85        u32 flows_ready;
  86};
  87
  88static void k3_udma_chan_dev_release(struct device *dev)
  89{
  90        /* The struct containing the device is devm managed */
  91}
  92
  93static struct class k3_udma_glue_devclass = {
  94        .name           = "k3_udma_glue_chan",
  95        .dev_release    = k3_udma_chan_dev_release,
  96};
  97
  98#define K3_UDMAX_TDOWN_TIMEOUT_US 1000
  99
 100static int of_k3_udma_glue_parse(struct device_node *udmax_np,
 101                                 struct k3_udma_glue_common *common)
 102{
 103        common->udmax = of_xudma_dev_get(udmax_np, NULL);
 104        if (IS_ERR(common->udmax))
 105                return PTR_ERR(common->udmax);
 106
 107        common->ringacc = xudma_get_ringacc(common->udmax);
 108        common->tisci_rm = xudma_dev_get_tisci_rm(common->udmax);
 109
 110        return 0;
 111}
 112
 113static int of_k3_udma_glue_parse_chn(struct device_node *chn_np,
 114                const char *name, struct k3_udma_glue_common *common,
 115                bool tx_chn)
 116{
 117        struct of_phandle_args dma_spec;
 118        u32 thread_id;
 119        int ret = 0;
 120        int index;
 121
 122        if (unlikely(!name))
 123                return -EINVAL;
 124
 125        index = of_property_match_string(chn_np, "dma-names", name);
 126        if (index < 0)
 127                return index;
 128
 129        if (of_parse_phandle_with_args(chn_np, "dmas", "#dma-cells", index,
 130                                       &dma_spec))
 131                return -ENOENT;
 132
 133        ret = of_k3_udma_glue_parse(dma_spec.np, common);
 134        if (ret)
 135                goto out_put_spec;
 136
 137        thread_id = dma_spec.args[0];
 138        if (dma_spec.args_count == 2) {
 139                if (dma_spec.args[1] > 2 && !xudma_is_pktdma(common->udmax)) {
 140                        dev_err(common->dev, "Invalid channel atype: %u\n",
 141                                dma_spec.args[1]);
 142                        ret = -EINVAL;
 143                        goto out_put_spec;
 144                }
 145                if (dma_spec.args[1] > 15 && xudma_is_pktdma(common->udmax)) {
 146                        dev_err(common->dev, "Invalid channel asel: %u\n",
 147                                dma_spec.args[1]);
 148                        ret = -EINVAL;
 149                        goto out_put_spec;
 150                }
 151
 152                common->atype_asel = dma_spec.args[1];
 153        }
 154
 155        if (tx_chn && !(thread_id & K3_PSIL_DST_THREAD_ID_OFFSET)) {
 156                ret = -EINVAL;
 157                goto out_put_spec;
 158        }
 159
 160        if (!tx_chn && (thread_id & K3_PSIL_DST_THREAD_ID_OFFSET)) {
 161                ret = -EINVAL;
 162                goto out_put_spec;
 163        }
 164
 165        /* get psil endpoint config */
 166        common->ep_config = psil_get_ep_config(thread_id);
 167        if (IS_ERR(common->ep_config)) {
 168                dev_err(common->dev,
 169                        "No configuration for psi-l thread 0x%04x\n",
 170                        thread_id);
 171                ret = PTR_ERR(common->ep_config);
 172                goto out_put_spec;
 173        }
 174
 175        common->epib = common->ep_config->needs_epib;
 176        common->psdata_size = common->ep_config->psd_size;
 177
 178        if (tx_chn)
 179                common->dst_thread = thread_id;
 180        else
 181                common->src_thread = thread_id;
 182
 183out_put_spec:
 184        of_node_put(dma_spec.np);
 185        return ret;
 186};
 187
 188static void k3_udma_glue_dump_tx_chn(struct k3_udma_glue_tx_channel *tx_chn)
 189{
 190        struct device *dev = tx_chn->common.dev;
 191
 192        dev_dbg(dev, "dump_tx_chn:\n"
 193                "udma_tchan_id: %d\n"
 194                "src_thread: %08x\n"
 195                "dst_thread: %08x\n",
 196                tx_chn->udma_tchan_id,
 197                tx_chn->common.src_thread,
 198                tx_chn->common.dst_thread);
 199}
 200
 201static void k3_udma_glue_dump_tx_rt_chn(struct k3_udma_glue_tx_channel *chn,
 202                                        char *mark)
 203{
 204        struct device *dev = chn->common.dev;
 205
 206        dev_dbg(dev, "=== dump ===> %s\n", mark);
 207        dev_dbg(dev, "0x%08X: %08X\n", UDMA_CHAN_RT_CTL_REG,
 208                xudma_tchanrt_read(chn->udma_tchanx, UDMA_CHAN_RT_CTL_REG));
 209        dev_dbg(dev, "0x%08X: %08X\n", UDMA_CHAN_RT_PEER_RT_EN_REG,
 210                xudma_tchanrt_read(chn->udma_tchanx,
 211                                   UDMA_CHAN_RT_PEER_RT_EN_REG));
 212        dev_dbg(dev, "0x%08X: %08X\n", UDMA_CHAN_RT_PCNT_REG,
 213                xudma_tchanrt_read(chn->udma_tchanx, UDMA_CHAN_RT_PCNT_REG));
 214        dev_dbg(dev, "0x%08X: %08X\n", UDMA_CHAN_RT_BCNT_REG,
 215                xudma_tchanrt_read(chn->udma_tchanx, UDMA_CHAN_RT_BCNT_REG));
 216        dev_dbg(dev, "0x%08X: %08X\n", UDMA_CHAN_RT_SBCNT_REG,
 217                xudma_tchanrt_read(chn->udma_tchanx, UDMA_CHAN_RT_SBCNT_REG));
 218}
 219
 220static int k3_udma_glue_cfg_tx_chn(struct k3_udma_glue_tx_channel *tx_chn)
 221{
 222        const struct udma_tisci_rm *tisci_rm = tx_chn->common.tisci_rm;
 223        struct ti_sci_msg_rm_udmap_tx_ch_cfg req;
 224
 225        memset(&req, 0, sizeof(req));
 226
 227        req.valid_params = TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID |
 228                        TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_FILT_EINFO_VALID |
 229                        TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_FILT_PSWORDS_VALID |
 230                        TI_SCI_MSG_VALUE_RM_UDMAP_CH_CHAN_TYPE_VALID |
 231                        TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_SUPR_TDPKT_VALID |
 232                        TI_SCI_MSG_VALUE_RM_UDMAP_CH_FETCH_SIZE_VALID |
 233                        TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID |
 234                        TI_SCI_MSG_VALUE_RM_UDMAP_CH_ATYPE_VALID;
 235        req.nav_id = tisci_rm->tisci_dev_id;
 236        req.index = tx_chn->udma_tchan_id;
 237        if (tx_chn->tx_pause_on_err)
 238                req.tx_pause_on_err = 1;
 239        if (tx_chn->tx_filt_einfo)
 240                req.tx_filt_einfo = 1;
 241        if (tx_chn->tx_filt_pswords)
 242                req.tx_filt_pswords = 1;
 243        req.tx_chan_type = TI_SCI_RM_UDMAP_CHAN_TYPE_PKT_PBRR;
 244        if (tx_chn->tx_supr_tdpkt)
 245                req.tx_supr_tdpkt = 1;
 246        req.tx_fetch_size = tx_chn->common.hdesc_size >> 2;
 247        req.txcq_qnum = k3_ringacc_get_ring_id(tx_chn->ringtxcq);
 248        req.tx_atype = tx_chn->common.atype_asel;
 249
 250        return tisci_rm->tisci_udmap_ops->tx_ch_cfg(tisci_rm->tisci, &req);
 251}
 252
 253struct k3_udma_glue_tx_channel *k3_udma_glue_request_tx_chn(struct device *dev,
 254                const char *name, struct k3_udma_glue_tx_channel_cfg *cfg)
 255{
 256        struct k3_udma_glue_tx_channel *tx_chn;
 257        int ret;
 258
 259        tx_chn = devm_kzalloc(dev, sizeof(*tx_chn), GFP_KERNEL);
 260        if (!tx_chn)
 261                return ERR_PTR(-ENOMEM);
 262
 263        tx_chn->common.dev = dev;
 264        tx_chn->common.swdata_size = cfg->swdata_size;
 265        tx_chn->tx_pause_on_err = cfg->tx_pause_on_err;
 266        tx_chn->tx_filt_einfo = cfg->tx_filt_einfo;
 267        tx_chn->tx_filt_pswords = cfg->tx_filt_pswords;
 268        tx_chn->tx_supr_tdpkt = cfg->tx_supr_tdpkt;
 269
 270        /* parse of udmap channel */
 271        ret = of_k3_udma_glue_parse_chn(dev->of_node, name,
 272                                        &tx_chn->common, true);
 273        if (ret)
 274                goto err;
 275
 276        tx_chn->common.hdesc_size = cppi5_hdesc_calc_size(tx_chn->common.epib,
 277                                                tx_chn->common.psdata_size,
 278                                                tx_chn->common.swdata_size);
 279
 280        if (xudma_is_pktdma(tx_chn->common.udmax))
 281                tx_chn->udma_tchan_id = tx_chn->common.ep_config->mapped_channel_id;
 282        else
 283                tx_chn->udma_tchan_id = -1;
 284
 285        /* request and cfg UDMAP TX channel */
 286        tx_chn->udma_tchanx = xudma_tchan_get(tx_chn->common.udmax,
 287                                              tx_chn->udma_tchan_id);
 288        if (IS_ERR(tx_chn->udma_tchanx)) {
 289                ret = PTR_ERR(tx_chn->udma_tchanx);
 290                dev_err(dev, "UDMAX tchanx get err %d\n", ret);
 291                goto err;
 292        }
 293        tx_chn->udma_tchan_id = xudma_tchan_get_id(tx_chn->udma_tchanx);
 294
 295        tx_chn->common.chan_dev.class = &k3_udma_glue_devclass;
 296        tx_chn->common.chan_dev.parent = xudma_get_device(tx_chn->common.udmax);
 297        dev_set_name(&tx_chn->common.chan_dev, "tchan%d-0x%04x",
 298                     tx_chn->udma_tchan_id, tx_chn->common.dst_thread);
 299        ret = device_register(&tx_chn->common.chan_dev);
 300        if (ret) {
 301                dev_err(dev, "Channel Device registration failed %d\n", ret);
 302                tx_chn->common.chan_dev.parent = NULL;
 303                goto err;
 304        }
 305
 306        if (xudma_is_pktdma(tx_chn->common.udmax)) {
 307                /* prepare the channel device as coherent */
 308                tx_chn->common.chan_dev.dma_coherent = true;
 309                dma_coerce_mask_and_coherent(&tx_chn->common.chan_dev,
 310                                             DMA_BIT_MASK(48));
 311        }
 312
 313        atomic_set(&tx_chn->free_pkts, cfg->txcq_cfg.size);
 314
 315        if (xudma_is_pktdma(tx_chn->common.udmax))
 316                tx_chn->udma_tflow_id = tx_chn->common.ep_config->default_flow_id;
 317        else
 318                tx_chn->udma_tflow_id = tx_chn->udma_tchan_id;
 319
 320        /* request and cfg rings */
 321        ret =  k3_ringacc_request_rings_pair(tx_chn->common.ringacc,
 322                                             tx_chn->udma_tflow_id, -1,
 323                                             &tx_chn->ringtx,
 324                                             &tx_chn->ringtxcq);
 325        if (ret) {
 326                dev_err(dev, "Failed to get TX/TXCQ rings %d\n", ret);
 327                goto err;
 328        }
 329
 330        /* Set the dma_dev for the rings to be configured */
 331        cfg->tx_cfg.dma_dev = k3_udma_glue_tx_get_dma_device(tx_chn);
 332        cfg->txcq_cfg.dma_dev = cfg->tx_cfg.dma_dev;
 333
 334        /* Set the ASEL value for DMA rings of PKTDMA */
 335        if (xudma_is_pktdma(tx_chn->common.udmax)) {
 336                cfg->tx_cfg.asel = tx_chn->common.atype_asel;
 337                cfg->txcq_cfg.asel = tx_chn->common.atype_asel;
 338        }
 339
 340        ret = k3_ringacc_ring_cfg(tx_chn->ringtx, &cfg->tx_cfg);
 341        if (ret) {
 342                dev_err(dev, "Failed to cfg ringtx %d\n", ret);
 343                goto err;
 344        }
 345
 346        ret = k3_ringacc_ring_cfg(tx_chn->ringtxcq, &cfg->txcq_cfg);
 347        if (ret) {
 348                dev_err(dev, "Failed to cfg ringtx %d\n", ret);
 349                goto err;
 350        }
 351
 352        /* request and cfg psi-l */
 353        tx_chn->common.src_thread =
 354                        xudma_dev_get_psil_base(tx_chn->common.udmax) +
 355                        tx_chn->udma_tchan_id;
 356
 357        ret = k3_udma_glue_cfg_tx_chn(tx_chn);
 358        if (ret) {
 359                dev_err(dev, "Failed to cfg tchan %d\n", ret);
 360                goto err;
 361        }
 362
 363        k3_udma_glue_dump_tx_chn(tx_chn);
 364
 365        return tx_chn;
 366
 367err:
 368        k3_udma_glue_release_tx_chn(tx_chn);
 369        return ERR_PTR(ret);
 370}
 371EXPORT_SYMBOL_GPL(k3_udma_glue_request_tx_chn);
 372
 373void k3_udma_glue_release_tx_chn(struct k3_udma_glue_tx_channel *tx_chn)
 374{
 375        if (tx_chn->psil_paired) {
 376                xudma_navss_psil_unpair(tx_chn->common.udmax,
 377                                        tx_chn->common.src_thread,
 378                                        tx_chn->common.dst_thread);
 379                tx_chn->psil_paired = false;
 380        }
 381
 382        if (!IS_ERR_OR_NULL(tx_chn->udma_tchanx))
 383                xudma_tchan_put(tx_chn->common.udmax,
 384                                tx_chn->udma_tchanx);
 385
 386        if (tx_chn->ringtxcq)
 387                k3_ringacc_ring_free(tx_chn->ringtxcq);
 388
 389        if (tx_chn->ringtx)
 390                k3_ringacc_ring_free(tx_chn->ringtx);
 391
 392        if (tx_chn->common.chan_dev.parent) {
 393                device_unregister(&tx_chn->common.chan_dev);
 394                tx_chn->common.chan_dev.parent = NULL;
 395        }
 396}
 397EXPORT_SYMBOL_GPL(k3_udma_glue_release_tx_chn);
 398
 399int k3_udma_glue_push_tx_chn(struct k3_udma_glue_tx_channel *tx_chn,
 400                             struct cppi5_host_desc_t *desc_tx,
 401                             dma_addr_t desc_dma)
 402{
 403        u32 ringtxcq_id;
 404
 405        if (!atomic_add_unless(&tx_chn->free_pkts, -1, 0))
 406                return -ENOMEM;
 407
 408        ringtxcq_id = k3_ringacc_get_ring_id(tx_chn->ringtxcq);
 409        cppi5_desc_set_retpolicy(&desc_tx->hdr, 0, ringtxcq_id);
 410
 411        return k3_ringacc_ring_push(tx_chn->ringtx, &desc_dma);
 412}
 413EXPORT_SYMBOL_GPL(k3_udma_glue_push_tx_chn);
 414
 415int k3_udma_glue_pop_tx_chn(struct k3_udma_glue_tx_channel *tx_chn,
 416                            dma_addr_t *desc_dma)
 417{
 418        int ret;
 419
 420        ret = k3_ringacc_ring_pop(tx_chn->ringtxcq, desc_dma);
 421        if (!ret)
 422                atomic_inc(&tx_chn->free_pkts);
 423
 424        return ret;
 425}
 426EXPORT_SYMBOL_GPL(k3_udma_glue_pop_tx_chn);
 427
 428int k3_udma_glue_enable_tx_chn(struct k3_udma_glue_tx_channel *tx_chn)
 429{
 430        int ret;
 431
 432        ret = xudma_navss_psil_pair(tx_chn->common.udmax,
 433                                    tx_chn->common.src_thread,
 434                                    tx_chn->common.dst_thread);
 435        if (ret) {
 436                dev_err(tx_chn->common.dev, "PSI-L request err %d\n", ret);
 437                return ret;
 438        }
 439
 440        tx_chn->psil_paired = true;
 441
 442        xudma_tchanrt_write(tx_chn->udma_tchanx, UDMA_CHAN_RT_PEER_RT_EN_REG,
 443                            UDMA_PEER_RT_EN_ENABLE);
 444
 445        xudma_tchanrt_write(tx_chn->udma_tchanx, UDMA_CHAN_RT_CTL_REG,
 446                            UDMA_CHAN_RT_CTL_EN);
 447
 448        k3_udma_glue_dump_tx_rt_chn(tx_chn, "txchn en");
 449        return 0;
 450}
 451EXPORT_SYMBOL_GPL(k3_udma_glue_enable_tx_chn);
 452
 453void k3_udma_glue_disable_tx_chn(struct k3_udma_glue_tx_channel *tx_chn)
 454{
 455        k3_udma_glue_dump_tx_rt_chn(tx_chn, "txchn dis1");
 456
 457        xudma_tchanrt_write(tx_chn->udma_tchanx, UDMA_CHAN_RT_CTL_REG, 0);
 458
 459        xudma_tchanrt_write(tx_chn->udma_tchanx,
 460                            UDMA_CHAN_RT_PEER_RT_EN_REG, 0);
 461        k3_udma_glue_dump_tx_rt_chn(tx_chn, "txchn dis2");
 462
 463        if (tx_chn->psil_paired) {
 464                xudma_navss_psil_unpair(tx_chn->common.udmax,
 465                                        tx_chn->common.src_thread,
 466                                        tx_chn->common.dst_thread);
 467                tx_chn->psil_paired = false;
 468        }
 469}
 470EXPORT_SYMBOL_GPL(k3_udma_glue_disable_tx_chn);
 471
 472void k3_udma_glue_tdown_tx_chn(struct k3_udma_glue_tx_channel *tx_chn,
 473                               bool sync)
 474{
 475        int i = 0;
 476        u32 val;
 477
 478        k3_udma_glue_dump_tx_rt_chn(tx_chn, "txchn tdown1");
 479
 480        xudma_tchanrt_write(tx_chn->udma_tchanx, UDMA_CHAN_RT_CTL_REG,
 481                            UDMA_CHAN_RT_CTL_EN | UDMA_CHAN_RT_CTL_TDOWN);
 482
 483        val = xudma_tchanrt_read(tx_chn->udma_tchanx, UDMA_CHAN_RT_CTL_REG);
 484
 485        while (sync && (val & UDMA_CHAN_RT_CTL_EN)) {
 486                val = xudma_tchanrt_read(tx_chn->udma_tchanx,
 487                                         UDMA_CHAN_RT_CTL_REG);
 488                udelay(1);
 489                if (i > K3_UDMAX_TDOWN_TIMEOUT_US) {
 490                        dev_err(tx_chn->common.dev, "TX tdown timeout\n");
 491                        break;
 492                }
 493                i++;
 494        }
 495
 496        val = xudma_tchanrt_read(tx_chn->udma_tchanx,
 497                                 UDMA_CHAN_RT_PEER_RT_EN_REG);
 498        if (sync && (val & UDMA_PEER_RT_EN_ENABLE))
 499                dev_err(tx_chn->common.dev, "TX tdown peer not stopped\n");
 500        k3_udma_glue_dump_tx_rt_chn(tx_chn, "txchn tdown2");
 501}
 502EXPORT_SYMBOL_GPL(k3_udma_glue_tdown_tx_chn);
 503
 504void k3_udma_glue_reset_tx_chn(struct k3_udma_glue_tx_channel *tx_chn,
 505                               void *data,
 506                               void (*cleanup)(void *data, dma_addr_t desc_dma))
 507{
 508        struct device *dev = tx_chn->common.dev;
 509        dma_addr_t desc_dma;
 510        int occ_tx, i, ret;
 511
 512        /*
 513         * TXQ reset need to be special way as it is input for udma and its
 514         * state cached by udma, so:
 515         * 1) save TXQ occ
 516         * 2) clean up TXQ and call callback .cleanup() for each desc
 517         * 3) reset TXQ in a special way
 518         */
 519        occ_tx = k3_ringacc_ring_get_occ(tx_chn->ringtx);
 520        dev_dbg(dev, "TX reset occ_tx %u\n", occ_tx);
 521
 522        for (i = 0; i < occ_tx; i++) {
 523                ret = k3_ringacc_ring_pop(tx_chn->ringtx, &desc_dma);
 524                if (ret) {
 525                        if (ret != -ENODATA)
 526                                dev_err(dev, "TX reset pop %d\n", ret);
 527                        break;
 528                }
 529                cleanup(data, desc_dma);
 530        }
 531
 532        /* reset TXCQ as it is not input for udma - expected to be empty */
 533        k3_ringacc_ring_reset(tx_chn->ringtxcq);
 534        k3_ringacc_ring_reset_dma(tx_chn->ringtx, occ_tx);
 535}
 536EXPORT_SYMBOL_GPL(k3_udma_glue_reset_tx_chn);
 537
 538u32 k3_udma_glue_tx_get_hdesc_size(struct k3_udma_glue_tx_channel *tx_chn)
 539{
 540        return tx_chn->common.hdesc_size;
 541}
 542EXPORT_SYMBOL_GPL(k3_udma_glue_tx_get_hdesc_size);
 543
 544u32 k3_udma_glue_tx_get_txcq_id(struct k3_udma_glue_tx_channel *tx_chn)
 545{
 546        return k3_ringacc_get_ring_id(tx_chn->ringtxcq);
 547}
 548EXPORT_SYMBOL_GPL(k3_udma_glue_tx_get_txcq_id);
 549
 550int k3_udma_glue_tx_get_irq(struct k3_udma_glue_tx_channel *tx_chn)
 551{
 552        if (xudma_is_pktdma(tx_chn->common.udmax)) {
 553                tx_chn->virq = xudma_pktdma_tflow_get_irq(tx_chn->common.udmax,
 554                                                          tx_chn->udma_tflow_id);
 555        } else {
 556                tx_chn->virq = k3_ringacc_get_ring_irq_num(tx_chn->ringtxcq);
 557        }
 558
 559        return tx_chn->virq;
 560}
 561EXPORT_SYMBOL_GPL(k3_udma_glue_tx_get_irq);
 562
 563struct device *
 564        k3_udma_glue_tx_get_dma_device(struct k3_udma_glue_tx_channel *tx_chn)
 565{
 566        if (xudma_is_pktdma(tx_chn->common.udmax) &&
 567            (tx_chn->common.atype_asel == 14 || tx_chn->common.atype_asel == 15))
 568                return &tx_chn->common.chan_dev;
 569
 570        return xudma_get_device(tx_chn->common.udmax);
 571}
 572EXPORT_SYMBOL_GPL(k3_udma_glue_tx_get_dma_device);
 573
 574void k3_udma_glue_tx_dma_to_cppi5_addr(struct k3_udma_glue_tx_channel *tx_chn,
 575                                       dma_addr_t *addr)
 576{
 577        if (!xudma_is_pktdma(tx_chn->common.udmax) ||
 578            !tx_chn->common.atype_asel)
 579                return;
 580
 581        *addr |= (u64)tx_chn->common.atype_asel << K3_ADDRESS_ASEL_SHIFT;
 582}
 583EXPORT_SYMBOL_GPL(k3_udma_glue_tx_dma_to_cppi5_addr);
 584
 585void k3_udma_glue_tx_cppi5_to_dma_addr(struct k3_udma_glue_tx_channel *tx_chn,
 586                                       dma_addr_t *addr)
 587{
 588        if (!xudma_is_pktdma(tx_chn->common.udmax) ||
 589            !tx_chn->common.atype_asel)
 590                return;
 591
 592        *addr &= (u64)GENMASK(K3_ADDRESS_ASEL_SHIFT - 1, 0);
 593}
 594EXPORT_SYMBOL_GPL(k3_udma_glue_tx_cppi5_to_dma_addr);
 595
 596static int k3_udma_glue_cfg_rx_chn(struct k3_udma_glue_rx_channel *rx_chn)
 597{
 598        const struct udma_tisci_rm *tisci_rm = rx_chn->common.tisci_rm;
 599        struct ti_sci_msg_rm_udmap_rx_ch_cfg req;
 600        int ret;
 601
 602        memset(&req, 0, sizeof(req));
 603
 604        req.valid_params = TI_SCI_MSG_VALUE_RM_UDMAP_CH_FETCH_SIZE_VALID |
 605                           TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID |
 606                           TI_SCI_MSG_VALUE_RM_UDMAP_CH_CHAN_TYPE_VALID |
 607                           TI_SCI_MSG_VALUE_RM_UDMAP_CH_ATYPE_VALID;
 608
 609        req.nav_id = tisci_rm->tisci_dev_id;
 610        req.index = rx_chn->udma_rchan_id;
 611        req.rx_fetch_size = rx_chn->common.hdesc_size >> 2;
 612        /*
 613         * TODO: we can't support rxcq_qnum/RCHAN[a]_RCQ cfg with current sysfw
 614         * and udmax impl, so just configure it to invalid value.
 615         * req.rxcq_qnum = k3_ringacc_get_ring_id(rx_chn->flows[0].ringrx);
 616         */
 617        req.rxcq_qnum = 0xFFFF;
 618        if (!xudma_is_pktdma(rx_chn->common.udmax) && rx_chn->flow_num &&
 619            rx_chn->flow_id_base != rx_chn->udma_rchan_id) {
 620                /* Default flow + extra ones */
 621                req.valid_params |= TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_START_VALID |
 622                                    TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_CNT_VALID;
 623                req.flowid_start = rx_chn->flow_id_base;
 624                req.flowid_cnt = rx_chn->flow_num;
 625        }
 626        req.rx_chan_type = TI_SCI_RM_UDMAP_CHAN_TYPE_PKT_PBRR;
 627        req.rx_atype = rx_chn->common.atype_asel;
 628
 629        ret = tisci_rm->tisci_udmap_ops->rx_ch_cfg(tisci_rm->tisci, &req);
 630        if (ret)
 631                dev_err(rx_chn->common.dev, "rchan%d cfg failed %d\n",
 632                        rx_chn->udma_rchan_id, ret);
 633
 634        return ret;
 635}
 636
 637static void k3_udma_glue_release_rx_flow(struct k3_udma_glue_rx_channel *rx_chn,
 638                                         u32 flow_num)
 639{
 640        struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_num];
 641
 642        if (IS_ERR_OR_NULL(flow->udma_rflow))
 643                return;
 644
 645        if (flow->ringrxfdq)
 646                k3_ringacc_ring_free(flow->ringrxfdq);
 647
 648        if (flow->ringrx)
 649                k3_ringacc_ring_free(flow->ringrx);
 650
 651        xudma_rflow_put(rx_chn->common.udmax, flow->udma_rflow);
 652        flow->udma_rflow = NULL;
 653        rx_chn->flows_ready--;
 654}
 655
 656static int k3_udma_glue_cfg_rx_flow(struct k3_udma_glue_rx_channel *rx_chn,
 657                                    u32 flow_idx,
 658                                    struct k3_udma_glue_rx_flow_cfg *flow_cfg)
 659{
 660        struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_idx];
 661        const struct udma_tisci_rm *tisci_rm = rx_chn->common.tisci_rm;
 662        struct device *dev = rx_chn->common.dev;
 663        struct ti_sci_msg_rm_udmap_flow_cfg req;
 664        int rx_ring_id;
 665        int rx_ringfdq_id;
 666        int ret = 0;
 667
 668        flow->udma_rflow = xudma_rflow_get(rx_chn->common.udmax,
 669                                           flow->udma_rflow_id);
 670        if (IS_ERR(flow->udma_rflow)) {
 671                ret = PTR_ERR(flow->udma_rflow);
 672                dev_err(dev, "UDMAX rflow get err %d\n", ret);
 673                return ret;
 674        }
 675
 676        if (flow->udma_rflow_id != xudma_rflow_get_id(flow->udma_rflow)) {
 677                ret = -ENODEV;
 678                goto err_rflow_put;
 679        }
 680
 681        if (xudma_is_pktdma(rx_chn->common.udmax)) {
 682                rx_ringfdq_id = flow->udma_rflow_id +
 683                                xudma_get_rflow_ring_offset(rx_chn->common.udmax);
 684                rx_ring_id = 0;
 685        } else {
 686                rx_ring_id = flow_cfg->ring_rxq_id;
 687                rx_ringfdq_id = flow_cfg->ring_rxfdq0_id;
 688        }
 689
 690        /* request and cfg rings */
 691        ret =  k3_ringacc_request_rings_pair(rx_chn->common.ringacc,
 692                                             rx_ringfdq_id, rx_ring_id,
 693                                             &flow->ringrxfdq,
 694                                             &flow->ringrx);
 695        if (ret) {
 696                dev_err(dev, "Failed to get RX/RXFDQ rings %d\n", ret);
 697                goto err_rflow_put;
 698        }
 699
 700        /* Set the dma_dev for the rings to be configured */
 701        flow_cfg->rx_cfg.dma_dev = k3_udma_glue_rx_get_dma_device(rx_chn);
 702        flow_cfg->rxfdq_cfg.dma_dev = flow_cfg->rx_cfg.dma_dev;
 703
 704        /* Set the ASEL value for DMA rings of PKTDMA */
 705        if (xudma_is_pktdma(rx_chn->common.udmax)) {
 706                flow_cfg->rx_cfg.asel = rx_chn->common.atype_asel;
 707                flow_cfg->rxfdq_cfg.asel = rx_chn->common.atype_asel;
 708        }
 709
 710        ret = k3_ringacc_ring_cfg(flow->ringrx, &flow_cfg->rx_cfg);
 711        if (ret) {
 712                dev_err(dev, "Failed to cfg ringrx %d\n", ret);
 713                goto err_ringrxfdq_free;
 714        }
 715
 716        ret = k3_ringacc_ring_cfg(flow->ringrxfdq, &flow_cfg->rxfdq_cfg);
 717        if (ret) {
 718                dev_err(dev, "Failed to cfg ringrxfdq %d\n", ret);
 719                goto err_ringrxfdq_free;
 720        }
 721
 722        if (rx_chn->remote) {
 723                rx_ring_id = TI_SCI_RESOURCE_NULL;
 724                rx_ringfdq_id = TI_SCI_RESOURCE_NULL;
 725        } else {
 726                rx_ring_id = k3_ringacc_get_ring_id(flow->ringrx);
 727                rx_ringfdq_id = k3_ringacc_get_ring_id(flow->ringrxfdq);
 728        }
 729
 730        memset(&req, 0, sizeof(req));
 731
 732        req.valid_params =
 733                        TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_EINFO_PRESENT_VALID |
 734                        TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_PSINFO_PRESENT_VALID |
 735                        TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_ERROR_HANDLING_VALID |
 736                        TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DESC_TYPE_VALID |
 737                        TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_QNUM_VALID |
 738                        TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_SRC_TAG_HI_SEL_VALID |
 739                        TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_SRC_TAG_LO_SEL_VALID |
 740                        TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_TAG_HI_SEL_VALID |
 741                        TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_TAG_LO_SEL_VALID |
 742                        TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ0_SZ0_QNUM_VALID |
 743                        TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ1_QNUM_VALID |
 744                        TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ2_QNUM_VALID |
 745                        TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ3_QNUM_VALID;
 746        req.nav_id = tisci_rm->tisci_dev_id;
 747        req.flow_index = flow->udma_rflow_id;
 748        if (rx_chn->common.epib)
 749                req.rx_einfo_present = 1;
 750        if (rx_chn->common.psdata_size)
 751                req.rx_psinfo_present = 1;
 752        if (flow_cfg->rx_error_handling)
 753                req.rx_error_handling = 1;
 754        req.rx_desc_type = 0;
 755        req.rx_dest_qnum = rx_ring_id;
 756        req.rx_src_tag_hi_sel = 0;
 757        req.rx_src_tag_lo_sel = flow_cfg->src_tag_lo_sel;
 758        req.rx_dest_tag_hi_sel = 0;
 759        req.rx_dest_tag_lo_sel = 0;
 760        req.rx_fdq0_sz0_qnum = rx_ringfdq_id;
 761        req.rx_fdq1_qnum = rx_ringfdq_id;
 762        req.rx_fdq2_qnum = rx_ringfdq_id;
 763        req.rx_fdq3_qnum = rx_ringfdq_id;
 764
 765        ret = tisci_rm->tisci_udmap_ops->rx_flow_cfg(tisci_rm->tisci, &req);
 766        if (ret) {
 767                dev_err(dev, "flow%d config failed: %d\n", flow->udma_rflow_id,
 768                        ret);
 769                goto err_ringrxfdq_free;
 770        }
 771
 772        rx_chn->flows_ready++;
 773        dev_dbg(dev, "flow%d config done. ready:%d\n",
 774                flow->udma_rflow_id, rx_chn->flows_ready);
 775
 776        return 0;
 777
 778err_ringrxfdq_free:
 779        k3_ringacc_ring_free(flow->ringrxfdq);
 780        k3_ringacc_ring_free(flow->ringrx);
 781
 782err_rflow_put:
 783        xudma_rflow_put(rx_chn->common.udmax, flow->udma_rflow);
 784        flow->udma_rflow = NULL;
 785
 786        return ret;
 787}
 788
 789static void k3_udma_glue_dump_rx_chn(struct k3_udma_glue_rx_channel *chn)
 790{
 791        struct device *dev = chn->common.dev;
 792
 793        dev_dbg(dev, "dump_rx_chn:\n"
 794                "udma_rchan_id: %d\n"
 795                "src_thread: %08x\n"
 796                "dst_thread: %08x\n"
 797                "epib: %d\n"
 798                "hdesc_size: %u\n"
 799                "psdata_size: %u\n"
 800                "swdata_size: %u\n"
 801                "flow_id_base: %d\n"
 802                "flow_num: %d\n",
 803                chn->udma_rchan_id,
 804                chn->common.src_thread,
 805                chn->common.dst_thread,
 806                chn->common.epib,
 807                chn->common.hdesc_size,
 808                chn->common.psdata_size,
 809                chn->common.swdata_size,
 810                chn->flow_id_base,
 811                chn->flow_num);
 812}
 813
 814static void k3_udma_glue_dump_rx_rt_chn(struct k3_udma_glue_rx_channel *chn,
 815                                        char *mark)
 816{
 817        struct device *dev = chn->common.dev;
 818
 819        dev_dbg(dev, "=== dump ===> %s\n", mark);
 820
 821        dev_dbg(dev, "0x%08X: %08X\n", UDMA_CHAN_RT_CTL_REG,
 822                xudma_rchanrt_read(chn->udma_rchanx, UDMA_CHAN_RT_CTL_REG));
 823        dev_dbg(dev, "0x%08X: %08X\n", UDMA_CHAN_RT_PEER_RT_EN_REG,
 824                xudma_rchanrt_read(chn->udma_rchanx,
 825                                   UDMA_CHAN_RT_PEER_RT_EN_REG));
 826        dev_dbg(dev, "0x%08X: %08X\n", UDMA_CHAN_RT_PCNT_REG,
 827                xudma_rchanrt_read(chn->udma_rchanx, UDMA_CHAN_RT_PCNT_REG));
 828        dev_dbg(dev, "0x%08X: %08X\n", UDMA_CHAN_RT_BCNT_REG,
 829                xudma_rchanrt_read(chn->udma_rchanx, UDMA_CHAN_RT_BCNT_REG));
 830        dev_dbg(dev, "0x%08X: %08X\n", UDMA_CHAN_RT_SBCNT_REG,
 831                xudma_rchanrt_read(chn->udma_rchanx, UDMA_CHAN_RT_SBCNT_REG));
 832}
 833
 834static int
 835k3_udma_glue_allocate_rx_flows(struct k3_udma_glue_rx_channel *rx_chn,
 836                               struct k3_udma_glue_rx_channel_cfg *cfg)
 837{
 838        int ret;
 839
 840        /* default rflow */
 841        if (cfg->flow_id_use_rxchan_id)
 842                return 0;
 843
 844        /* not a GP rflows */
 845        if (rx_chn->flow_id_base != -1 &&
 846            !xudma_rflow_is_gp(rx_chn->common.udmax, rx_chn->flow_id_base))
 847                return 0;
 848
 849        /* Allocate range of GP rflows */
 850        ret = xudma_alloc_gp_rflow_range(rx_chn->common.udmax,
 851                                         rx_chn->flow_id_base,
 852                                         rx_chn->flow_num);
 853        if (ret < 0) {
 854                dev_err(rx_chn->common.dev, "UDMAX reserve_rflow %d cnt:%d err: %d\n",
 855                        rx_chn->flow_id_base, rx_chn->flow_num, ret);
 856                return ret;
 857        }
 858        rx_chn->flow_id_base = ret;
 859
 860        return 0;
 861}
 862
 863static struct k3_udma_glue_rx_channel *
 864k3_udma_glue_request_rx_chn_priv(struct device *dev, const char *name,
 865                                 struct k3_udma_glue_rx_channel_cfg *cfg)
 866{
 867        struct k3_udma_glue_rx_channel *rx_chn;
 868        struct psil_endpoint_config *ep_cfg;
 869        int ret, i;
 870
 871        if (cfg->flow_id_num <= 0)
 872                return ERR_PTR(-EINVAL);
 873
 874        if (cfg->flow_id_num != 1 &&
 875            (cfg->def_flow_cfg || cfg->flow_id_use_rxchan_id))
 876                return ERR_PTR(-EINVAL);
 877
 878        rx_chn = devm_kzalloc(dev, sizeof(*rx_chn), GFP_KERNEL);
 879        if (!rx_chn)
 880                return ERR_PTR(-ENOMEM);
 881
 882        rx_chn->common.dev = dev;
 883        rx_chn->common.swdata_size = cfg->swdata_size;
 884        rx_chn->remote = false;
 885
 886        /* parse of udmap channel */
 887        ret = of_k3_udma_glue_parse_chn(dev->of_node, name,
 888                                        &rx_chn->common, false);
 889        if (ret)
 890                goto err;
 891
 892        rx_chn->common.hdesc_size = cppi5_hdesc_calc_size(rx_chn->common.epib,
 893                                                rx_chn->common.psdata_size,
 894                                                rx_chn->common.swdata_size);
 895
 896        ep_cfg = rx_chn->common.ep_config;
 897
 898        if (xudma_is_pktdma(rx_chn->common.udmax))
 899                rx_chn->udma_rchan_id = ep_cfg->mapped_channel_id;
 900        else
 901                rx_chn->udma_rchan_id = -1;
 902
 903        /* request and cfg UDMAP RX channel */
 904        rx_chn->udma_rchanx = xudma_rchan_get(rx_chn->common.udmax,
 905                                              rx_chn->udma_rchan_id);
 906        if (IS_ERR(rx_chn->udma_rchanx)) {
 907                ret = PTR_ERR(rx_chn->udma_rchanx);
 908                dev_err(dev, "UDMAX rchanx get err %d\n", ret);
 909                goto err;
 910        }
 911        rx_chn->udma_rchan_id = xudma_rchan_get_id(rx_chn->udma_rchanx);
 912
 913        rx_chn->common.chan_dev.class = &k3_udma_glue_devclass;
 914        rx_chn->common.chan_dev.parent = xudma_get_device(rx_chn->common.udmax);
 915        dev_set_name(&rx_chn->common.chan_dev, "rchan%d-0x%04x",
 916                     rx_chn->udma_rchan_id, rx_chn->common.src_thread);
 917        ret = device_register(&rx_chn->common.chan_dev);
 918        if (ret) {
 919                dev_err(dev, "Channel Device registration failed %d\n", ret);
 920                rx_chn->common.chan_dev.parent = NULL;
 921                goto err;
 922        }
 923
 924        if (xudma_is_pktdma(rx_chn->common.udmax)) {
 925                /* prepare the channel device as coherent */
 926                rx_chn->common.chan_dev.dma_coherent = true;
 927                dma_coerce_mask_and_coherent(&rx_chn->common.chan_dev,
 928                                             DMA_BIT_MASK(48));
 929        }
 930
 931        if (xudma_is_pktdma(rx_chn->common.udmax)) {
 932                int flow_start = cfg->flow_id_base;
 933                int flow_end;
 934
 935                if (flow_start == -1)
 936                        flow_start = ep_cfg->flow_start;
 937
 938                flow_end = flow_start + cfg->flow_id_num - 1;
 939                if (flow_start < ep_cfg->flow_start ||
 940                    flow_end > (ep_cfg->flow_start + ep_cfg->flow_num - 1)) {
 941                        dev_err(dev, "Invalid flow range requested\n");
 942                        ret = -EINVAL;
 943                        goto err;
 944                }
 945                rx_chn->flow_id_base = flow_start;
 946        } else {
 947                rx_chn->flow_id_base = cfg->flow_id_base;
 948
 949                /* Use RX channel id as flow id: target dev can't generate flow_id */
 950                if (cfg->flow_id_use_rxchan_id)
 951                        rx_chn->flow_id_base = rx_chn->udma_rchan_id;
 952        }
 953
 954        rx_chn->flow_num = cfg->flow_id_num;
 955
 956        rx_chn->flows = devm_kcalloc(dev, rx_chn->flow_num,
 957                                     sizeof(*rx_chn->flows), GFP_KERNEL);
 958        if (!rx_chn->flows) {
 959                ret = -ENOMEM;
 960                goto err;
 961        }
 962
 963        ret = k3_udma_glue_allocate_rx_flows(rx_chn, cfg);
 964        if (ret)
 965                goto err;
 966
 967        for (i = 0; i < rx_chn->flow_num; i++)
 968                rx_chn->flows[i].udma_rflow_id = rx_chn->flow_id_base + i;
 969
 970        /* request and cfg psi-l */
 971        rx_chn->common.dst_thread =
 972                        xudma_dev_get_psil_base(rx_chn->common.udmax) +
 973                        rx_chn->udma_rchan_id;
 974
 975        ret = k3_udma_glue_cfg_rx_chn(rx_chn);
 976        if (ret) {
 977                dev_err(dev, "Failed to cfg rchan %d\n", ret);
 978                goto err;
 979        }
 980
 981        /* init default RX flow only if flow_num = 1 */
 982        if (cfg->def_flow_cfg) {
 983                ret = k3_udma_glue_cfg_rx_flow(rx_chn, 0, cfg->def_flow_cfg);
 984                if (ret)
 985                        goto err;
 986        }
 987
 988        k3_udma_glue_dump_rx_chn(rx_chn);
 989
 990        return rx_chn;
 991
 992err:
 993        k3_udma_glue_release_rx_chn(rx_chn);
 994        return ERR_PTR(ret);
 995}
 996
 997static struct k3_udma_glue_rx_channel *
 998k3_udma_glue_request_remote_rx_chn(struct device *dev, const char *name,
 999                                   struct k3_udma_glue_rx_channel_cfg *cfg)
1000{
1001        struct k3_udma_glue_rx_channel *rx_chn;
1002        int ret, i;
1003
1004        if (cfg->flow_id_num <= 0 ||
1005            cfg->flow_id_use_rxchan_id ||
1006            cfg->def_flow_cfg ||
1007            cfg->flow_id_base < 0)
1008                return ERR_PTR(-EINVAL);
1009
1010        /*
1011         * Remote RX channel is under control of Remote CPU core, so
1012         * Linux can only request and manipulate by dedicated RX flows
1013         */
1014
1015        rx_chn = devm_kzalloc(dev, sizeof(*rx_chn), GFP_KERNEL);
1016        if (!rx_chn)
1017                return ERR_PTR(-ENOMEM);
1018
1019        rx_chn->common.dev = dev;
1020        rx_chn->common.swdata_size = cfg->swdata_size;
1021        rx_chn->remote = true;
1022        rx_chn->udma_rchan_id = -1;
1023        rx_chn->flow_num = cfg->flow_id_num;
1024        rx_chn->flow_id_base = cfg->flow_id_base;
1025        rx_chn->psil_paired = false;
1026
1027        /* parse of udmap channel */
1028        ret = of_k3_udma_glue_parse_chn(dev->of_node, name,
1029                                        &rx_chn->common, false);
1030        if (ret)
1031                goto err;
1032
1033        rx_chn->common.hdesc_size = cppi5_hdesc_calc_size(rx_chn->common.epib,
1034                                                rx_chn->common.psdata_size,
1035                                                rx_chn->common.swdata_size);
1036
1037        rx_chn->flows = devm_kcalloc(dev, rx_chn->flow_num,
1038                                     sizeof(*rx_chn->flows), GFP_KERNEL);
1039        if (!rx_chn->flows) {
1040                ret = -ENOMEM;
1041                goto err;
1042        }
1043
1044        rx_chn->common.chan_dev.class = &k3_udma_glue_devclass;
1045        rx_chn->common.chan_dev.parent = xudma_get_device(rx_chn->common.udmax);
1046        dev_set_name(&rx_chn->common.chan_dev, "rchan_remote-0x%04x",
1047                     rx_chn->common.src_thread);
1048        ret = device_register(&rx_chn->common.chan_dev);
1049        if (ret) {
1050                dev_err(dev, "Channel Device registration failed %d\n", ret);
1051                rx_chn->common.chan_dev.parent = NULL;
1052                goto err;
1053        }
1054
1055        if (xudma_is_pktdma(rx_chn->common.udmax)) {
1056                /* prepare the channel device as coherent */
1057                rx_chn->common.chan_dev.dma_coherent = true;
1058                dma_coerce_mask_and_coherent(&rx_chn->common.chan_dev,
1059                                             DMA_BIT_MASK(48));
1060        }
1061
1062        ret = k3_udma_glue_allocate_rx_flows(rx_chn, cfg);
1063        if (ret)
1064                goto err;
1065
1066        for (i = 0; i < rx_chn->flow_num; i++)
1067                rx_chn->flows[i].udma_rflow_id = rx_chn->flow_id_base + i;
1068
1069        k3_udma_glue_dump_rx_chn(rx_chn);
1070
1071        return rx_chn;
1072
1073err:
1074        k3_udma_glue_release_rx_chn(rx_chn);
1075        return ERR_PTR(ret);
1076}
1077
1078struct k3_udma_glue_rx_channel *
1079k3_udma_glue_request_rx_chn(struct device *dev, const char *name,
1080                            struct k3_udma_glue_rx_channel_cfg *cfg)
1081{
1082        if (cfg->remote)
1083                return k3_udma_glue_request_remote_rx_chn(dev, name, cfg);
1084        else
1085                return k3_udma_glue_request_rx_chn_priv(dev, name, cfg);
1086}
1087EXPORT_SYMBOL_GPL(k3_udma_glue_request_rx_chn);
1088
1089void k3_udma_glue_release_rx_chn(struct k3_udma_glue_rx_channel *rx_chn)
1090{
1091        int i;
1092
1093        if (IS_ERR_OR_NULL(rx_chn->common.udmax))
1094                return;
1095
1096        if (rx_chn->psil_paired) {
1097                xudma_navss_psil_unpair(rx_chn->common.udmax,
1098                                        rx_chn->common.src_thread,
1099                                        rx_chn->common.dst_thread);
1100                rx_chn->psil_paired = false;
1101        }
1102
1103        for (i = 0; i < rx_chn->flow_num; i++)
1104                k3_udma_glue_release_rx_flow(rx_chn, i);
1105
1106        if (xudma_rflow_is_gp(rx_chn->common.udmax, rx_chn->flow_id_base))
1107                xudma_free_gp_rflow_range(rx_chn->common.udmax,
1108                                          rx_chn->flow_id_base,
1109                                          rx_chn->flow_num);
1110
1111        if (!IS_ERR_OR_NULL(rx_chn->udma_rchanx))
1112                xudma_rchan_put(rx_chn->common.udmax,
1113                                rx_chn->udma_rchanx);
1114
1115        if (rx_chn->common.chan_dev.parent) {
1116                device_unregister(&rx_chn->common.chan_dev);
1117                rx_chn->common.chan_dev.parent = NULL;
1118        }
1119}
1120EXPORT_SYMBOL_GPL(k3_udma_glue_release_rx_chn);
1121
1122int k3_udma_glue_rx_flow_init(struct k3_udma_glue_rx_channel *rx_chn,
1123                              u32 flow_idx,
1124                              struct k3_udma_glue_rx_flow_cfg *flow_cfg)
1125{
1126        if (flow_idx >= rx_chn->flow_num)
1127                return -EINVAL;
1128
1129        return k3_udma_glue_cfg_rx_flow(rx_chn, flow_idx, flow_cfg);
1130}
1131EXPORT_SYMBOL_GPL(k3_udma_glue_rx_flow_init);
1132
1133u32 k3_udma_glue_rx_flow_get_fdq_id(struct k3_udma_glue_rx_channel *rx_chn,
1134                                    u32 flow_idx)
1135{
1136        struct k3_udma_glue_rx_flow *flow;
1137
1138        if (flow_idx >= rx_chn->flow_num)
1139                return -EINVAL;
1140
1141        flow = &rx_chn->flows[flow_idx];
1142
1143        return k3_ringacc_get_ring_id(flow->ringrxfdq);
1144}
1145EXPORT_SYMBOL_GPL(k3_udma_glue_rx_flow_get_fdq_id);
1146
1147u32 k3_udma_glue_rx_get_flow_id_base(struct k3_udma_glue_rx_channel *rx_chn)
1148{
1149        return rx_chn->flow_id_base;
1150}
1151EXPORT_SYMBOL_GPL(k3_udma_glue_rx_get_flow_id_base);
1152
1153int k3_udma_glue_rx_flow_enable(struct k3_udma_glue_rx_channel *rx_chn,
1154                                u32 flow_idx)
1155{
1156        struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_idx];
1157        const struct udma_tisci_rm *tisci_rm = rx_chn->common.tisci_rm;
1158        struct device *dev = rx_chn->common.dev;
1159        struct ti_sci_msg_rm_udmap_flow_cfg req;
1160        int rx_ring_id;
1161        int rx_ringfdq_id;
1162        int ret = 0;
1163
1164        if (!rx_chn->remote)
1165                return -EINVAL;
1166
1167        rx_ring_id = k3_ringacc_get_ring_id(flow->ringrx);
1168        rx_ringfdq_id = k3_ringacc_get_ring_id(flow->ringrxfdq);
1169
1170        memset(&req, 0, sizeof(req));
1171
1172        req.valid_params =
1173                        TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_QNUM_VALID |
1174                        TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ0_SZ0_QNUM_VALID |
1175                        TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ1_QNUM_VALID |
1176                        TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ2_QNUM_VALID |
1177                        TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ3_QNUM_VALID;
1178        req.nav_id = tisci_rm->tisci_dev_id;
1179        req.flow_index = flow->udma_rflow_id;
1180        req.rx_dest_qnum = rx_ring_id;
1181        req.rx_fdq0_sz0_qnum = rx_ringfdq_id;
1182        req.rx_fdq1_qnum = rx_ringfdq_id;
1183        req.rx_fdq2_qnum = rx_ringfdq_id;
1184        req.rx_fdq3_qnum = rx_ringfdq_id;
1185
1186        ret = tisci_rm->tisci_udmap_ops->rx_flow_cfg(tisci_rm->tisci, &req);
1187        if (ret) {
1188                dev_err(dev, "flow%d enable failed: %d\n", flow->udma_rflow_id,
1189                        ret);
1190        }
1191
1192        return ret;
1193}
1194EXPORT_SYMBOL_GPL(k3_udma_glue_rx_flow_enable);
1195
1196int k3_udma_glue_rx_flow_disable(struct k3_udma_glue_rx_channel *rx_chn,
1197                                 u32 flow_idx)
1198{
1199        struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_idx];
1200        const struct udma_tisci_rm *tisci_rm = rx_chn->common.tisci_rm;
1201        struct device *dev = rx_chn->common.dev;
1202        struct ti_sci_msg_rm_udmap_flow_cfg req;
1203        int ret = 0;
1204
1205        if (!rx_chn->remote)
1206                return -EINVAL;
1207
1208        memset(&req, 0, sizeof(req));
1209        req.valid_params =
1210                        TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_QNUM_VALID |
1211                        TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ0_SZ0_QNUM_VALID |
1212                        TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ1_QNUM_VALID |
1213                        TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ2_QNUM_VALID |
1214                        TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ3_QNUM_VALID;
1215        req.nav_id = tisci_rm->tisci_dev_id;
1216        req.flow_index = flow->udma_rflow_id;
1217        req.rx_dest_qnum = TI_SCI_RESOURCE_NULL;
1218        req.rx_fdq0_sz0_qnum = TI_SCI_RESOURCE_NULL;
1219        req.rx_fdq1_qnum = TI_SCI_RESOURCE_NULL;
1220        req.rx_fdq2_qnum = TI_SCI_RESOURCE_NULL;
1221        req.rx_fdq3_qnum = TI_SCI_RESOURCE_NULL;
1222
1223        ret = tisci_rm->tisci_udmap_ops->rx_flow_cfg(tisci_rm->tisci, &req);
1224        if (ret) {
1225                dev_err(dev, "flow%d disable failed: %d\n", flow->udma_rflow_id,
1226                        ret);
1227        }
1228
1229        return ret;
1230}
1231EXPORT_SYMBOL_GPL(k3_udma_glue_rx_flow_disable);
1232
1233int k3_udma_glue_enable_rx_chn(struct k3_udma_glue_rx_channel *rx_chn)
1234{
1235        int ret;
1236
1237        if (rx_chn->remote)
1238                return -EINVAL;
1239
1240        if (rx_chn->flows_ready < rx_chn->flow_num)
1241                return -EINVAL;
1242
1243        ret = xudma_navss_psil_pair(rx_chn->common.udmax,
1244                                    rx_chn->common.src_thread,
1245                                    rx_chn->common.dst_thread);
1246        if (ret) {
1247                dev_err(rx_chn->common.dev, "PSI-L request err %d\n", ret);
1248                return ret;
1249        }
1250
1251        rx_chn->psil_paired = true;
1252
1253        xudma_rchanrt_write(rx_chn->udma_rchanx, UDMA_CHAN_RT_CTL_REG,
1254                            UDMA_CHAN_RT_CTL_EN);
1255
1256        xudma_rchanrt_write(rx_chn->udma_rchanx, UDMA_CHAN_RT_PEER_RT_EN_REG,
1257                            UDMA_PEER_RT_EN_ENABLE);
1258
1259        k3_udma_glue_dump_rx_rt_chn(rx_chn, "rxrt en");
1260        return 0;
1261}
1262EXPORT_SYMBOL_GPL(k3_udma_glue_enable_rx_chn);
1263
1264void k3_udma_glue_disable_rx_chn(struct k3_udma_glue_rx_channel *rx_chn)
1265{
1266        k3_udma_glue_dump_rx_rt_chn(rx_chn, "rxrt dis1");
1267
1268        xudma_rchanrt_write(rx_chn->udma_rchanx,
1269                            UDMA_CHAN_RT_PEER_RT_EN_REG, 0);
1270        xudma_rchanrt_write(rx_chn->udma_rchanx, UDMA_CHAN_RT_CTL_REG, 0);
1271
1272        k3_udma_glue_dump_rx_rt_chn(rx_chn, "rxrt dis2");
1273
1274        if (rx_chn->psil_paired) {
1275                xudma_navss_psil_unpair(rx_chn->common.udmax,
1276                                        rx_chn->common.src_thread,
1277                                        rx_chn->common.dst_thread);
1278                rx_chn->psil_paired = false;
1279        }
1280}
1281EXPORT_SYMBOL_GPL(k3_udma_glue_disable_rx_chn);
1282
1283void k3_udma_glue_tdown_rx_chn(struct k3_udma_glue_rx_channel *rx_chn,
1284                               bool sync)
1285{
1286        int i = 0;
1287        u32 val;
1288
1289        if (rx_chn->remote)
1290                return;
1291
1292        k3_udma_glue_dump_rx_rt_chn(rx_chn, "rxrt tdown1");
1293
1294        xudma_rchanrt_write(rx_chn->udma_rchanx, UDMA_CHAN_RT_PEER_RT_EN_REG,
1295                            UDMA_PEER_RT_EN_ENABLE | UDMA_PEER_RT_EN_TEARDOWN);
1296
1297        val = xudma_rchanrt_read(rx_chn->udma_rchanx, UDMA_CHAN_RT_CTL_REG);
1298
1299        while (sync && (val & UDMA_CHAN_RT_CTL_EN)) {
1300                val = xudma_rchanrt_read(rx_chn->udma_rchanx,
1301                                         UDMA_CHAN_RT_CTL_REG);
1302                udelay(1);
1303                if (i > K3_UDMAX_TDOWN_TIMEOUT_US) {
1304                        dev_err(rx_chn->common.dev, "RX tdown timeout\n");
1305                        break;
1306                }
1307                i++;
1308        }
1309
1310        val = xudma_rchanrt_read(rx_chn->udma_rchanx,
1311                                 UDMA_CHAN_RT_PEER_RT_EN_REG);
1312        if (sync && (val & UDMA_PEER_RT_EN_ENABLE))
1313                dev_err(rx_chn->common.dev, "TX tdown peer not stopped\n");
1314        k3_udma_glue_dump_rx_rt_chn(rx_chn, "rxrt tdown2");
1315}
1316EXPORT_SYMBOL_GPL(k3_udma_glue_tdown_rx_chn);
1317
1318void k3_udma_glue_reset_rx_chn(struct k3_udma_glue_rx_channel *rx_chn,
1319                u32 flow_num, void *data,
1320                void (*cleanup)(void *data, dma_addr_t desc_dma), bool skip_fdq)
1321{
1322        struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_num];
1323        struct device *dev = rx_chn->common.dev;
1324        dma_addr_t desc_dma;
1325        int occ_rx, i, ret;
1326
1327        /* reset RXCQ as it is not input for udma - expected to be empty */
1328        occ_rx = k3_ringacc_ring_get_occ(flow->ringrx);
1329        dev_dbg(dev, "RX reset flow %u occ_rx %u\n", flow_num, occ_rx);
1330
1331        /* Skip RX FDQ in case one FDQ is used for the set of flows */
1332        if (skip_fdq)
1333                goto do_reset;
1334
1335        /*
1336         * RX FDQ reset need to be special way as it is input for udma and its
1337         * state cached by udma, so:
1338         * 1) save RX FDQ occ
1339         * 2) clean up RX FDQ and call callback .cleanup() for each desc
1340         * 3) reset RX FDQ in a special way
1341         */
1342        occ_rx = k3_ringacc_ring_get_occ(flow->ringrxfdq);
1343        dev_dbg(dev, "RX reset flow %u occ_rx_fdq %u\n", flow_num, occ_rx);
1344
1345        for (i = 0; i < occ_rx; i++) {
1346                ret = k3_ringacc_ring_pop(flow->ringrxfdq, &desc_dma);
1347                if (ret) {
1348                        if (ret != -ENODATA)
1349                                dev_err(dev, "RX reset pop %d\n", ret);
1350                        break;
1351                }
1352                cleanup(data, desc_dma);
1353        }
1354
1355        k3_ringacc_ring_reset_dma(flow->ringrxfdq, occ_rx);
1356
1357do_reset:
1358        k3_ringacc_ring_reset(flow->ringrx);
1359}
1360EXPORT_SYMBOL_GPL(k3_udma_glue_reset_rx_chn);
1361
1362int k3_udma_glue_push_rx_chn(struct k3_udma_glue_rx_channel *rx_chn,
1363                             u32 flow_num, struct cppi5_host_desc_t *desc_rx,
1364                             dma_addr_t desc_dma)
1365{
1366        struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_num];
1367
1368        return k3_ringacc_ring_push(flow->ringrxfdq, &desc_dma);
1369}
1370EXPORT_SYMBOL_GPL(k3_udma_glue_push_rx_chn);
1371
1372int k3_udma_glue_pop_rx_chn(struct k3_udma_glue_rx_channel *rx_chn,
1373                            u32 flow_num, dma_addr_t *desc_dma)
1374{
1375        struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_num];
1376
1377        return k3_ringacc_ring_pop(flow->ringrx, desc_dma);
1378}
1379EXPORT_SYMBOL_GPL(k3_udma_glue_pop_rx_chn);
1380
1381int k3_udma_glue_rx_get_irq(struct k3_udma_glue_rx_channel *rx_chn,
1382                            u32 flow_num)
1383{
1384        struct k3_udma_glue_rx_flow *flow;
1385
1386        flow = &rx_chn->flows[flow_num];
1387
1388        if (xudma_is_pktdma(rx_chn->common.udmax)) {
1389                flow->virq = xudma_pktdma_rflow_get_irq(rx_chn->common.udmax,
1390                                                        flow->udma_rflow_id);
1391        } else {
1392                flow->virq = k3_ringacc_get_ring_irq_num(flow->ringrx);
1393        }
1394
1395        return flow->virq;
1396}
1397EXPORT_SYMBOL_GPL(k3_udma_glue_rx_get_irq);
1398
1399struct device *
1400        k3_udma_glue_rx_get_dma_device(struct k3_udma_glue_rx_channel *rx_chn)
1401{
1402        if (xudma_is_pktdma(rx_chn->common.udmax) &&
1403            (rx_chn->common.atype_asel == 14 || rx_chn->common.atype_asel == 15))
1404                return &rx_chn->common.chan_dev;
1405
1406        return xudma_get_device(rx_chn->common.udmax);
1407}
1408EXPORT_SYMBOL_GPL(k3_udma_glue_rx_get_dma_device);
1409
1410void k3_udma_glue_rx_dma_to_cppi5_addr(struct k3_udma_glue_rx_channel *rx_chn,
1411                                       dma_addr_t *addr)
1412{
1413        if (!xudma_is_pktdma(rx_chn->common.udmax) ||
1414            !rx_chn->common.atype_asel)
1415                return;
1416
1417        *addr |= (u64)rx_chn->common.atype_asel << K3_ADDRESS_ASEL_SHIFT;
1418}
1419EXPORT_SYMBOL_GPL(k3_udma_glue_rx_dma_to_cppi5_addr);
1420
1421void k3_udma_glue_rx_cppi5_to_dma_addr(struct k3_udma_glue_rx_channel *rx_chn,
1422                                       dma_addr_t *addr)
1423{
1424        if (!xudma_is_pktdma(rx_chn->common.udmax) ||
1425            !rx_chn->common.atype_asel)
1426                return;
1427
1428        *addr &= (u64)GENMASK(K3_ADDRESS_ASEL_SHIFT - 1, 0);
1429}
1430EXPORT_SYMBOL_GPL(k3_udma_glue_rx_cppi5_to_dma_addr);
1431
1432static int __init k3_udma_glue_class_init(void)
1433{
1434        return class_register(&k3_udma_glue_devclass);
1435}
1436arch_initcall(k3_udma_glue_class_init);
1437