dpdk/drivers/common/cnxk/roc_cpt.c
<<
>>
Prefs
   1/* SPDX-License-Identifier: BSD-3-Clause
   2 * Copyright(C) 2021 Marvell.
   3 */
   4
   5#include "roc_api.h"
   6#include "roc_priv.h"
   7
   8#define CPT_IQ_FC_LEN  128
   9#define CPT_IQ_GRP_LEN 16
  10
  11#define CPT_IQ_NB_DESC_MULTIPLIER 40
  12
  13/* The effective queue size to software is (CPT_LF_Q_SIZE[SIZE_DIV40] - 1 - 8).
  14 *
  15 * CPT requires 320 free entries (+8). And 40 entries are required for
  16 * allowing CPT to discard packet when the queues are full (+1).
  17 */
  18#define CPT_IQ_NB_DESC_SIZE_DIV40(nb_desc)                                     \
  19        (PLT_DIV_CEIL(nb_desc, CPT_IQ_NB_DESC_MULTIPLIER) + 1 + 8)
  20
  21#define CPT_IQ_GRP_SIZE(nb_desc)                                               \
  22        (CPT_IQ_NB_DESC_SIZE_DIV40(nb_desc) * CPT_IQ_GRP_LEN)
  23
  24#define CPT_LF_MAX_NB_DESC     128000
  25#define CPT_LF_DEFAULT_NB_DESC 1024
  26
  27static void
  28cpt_lf_misc_intr_enb_dis(struct roc_cpt_lf *lf, bool enb)
  29{
  30        /* Enable all cpt lf error irqs except RQ_DISABLED and CQ_DISABLED */
  31        if (enb)
  32                plt_write64((BIT_ULL(6) | BIT_ULL(5) | BIT_ULL(3) | BIT_ULL(2) |
  33                             BIT_ULL(1)),
  34                            lf->rbase + CPT_LF_MISC_INT_ENA_W1S);
  35        else
  36                plt_write64((BIT_ULL(6) | BIT_ULL(5) | BIT_ULL(3) | BIT_ULL(2) |
  37                             BIT_ULL(1)),
  38                            lf->rbase + CPT_LF_MISC_INT_ENA_W1C);
  39}
  40
  41static void
  42cpt_lf_misc_irq(void *param)
  43{
  44        struct roc_cpt_lf *lf = (struct roc_cpt_lf *)param;
  45        struct dev *dev = lf->dev;
  46        uint64_t intr;
  47
  48        intr = plt_read64(lf->rbase + CPT_LF_MISC_INT);
  49        if (intr == 0)
  50                return;
  51
  52        plt_err("Err_irq=0x%" PRIx64 " pf=%d, vf=%d", intr, dev->pf, dev->vf);
  53
  54        /* Clear interrupt */
  55        plt_write64(intr, lf->rbase + CPT_LF_MISC_INT);
  56}
  57
  58static int
  59cpt_lf_register_misc_irq(struct roc_cpt_lf *lf)
  60{
  61        struct plt_pci_device *pci_dev = lf->pci_dev;
  62        struct plt_intr_handle *handle;
  63        int rc, vec;
  64
  65        handle = &pci_dev->intr_handle;
  66
  67        vec = lf->msixoff + CPT_LF_INT_VEC_MISC;
  68        /* Clear err interrupt */
  69        cpt_lf_misc_intr_enb_dis(lf, false);
  70        /* Set used interrupt vectors */
  71        rc = dev_irq_register(handle, cpt_lf_misc_irq, lf, vec);
  72        /* Enable all dev interrupt except for RQ_DISABLED */
  73        cpt_lf_misc_intr_enb_dis(lf, true);
  74
  75        return rc;
  76}
  77
  78static void
  79cpt_lf_unregister_misc_irq(struct roc_cpt_lf *lf)
  80{
  81        struct plt_pci_device *pci_dev = lf->pci_dev;
  82        struct plt_intr_handle *handle;
  83        int vec;
  84
  85        handle = &pci_dev->intr_handle;
  86
  87        vec = lf->msixoff + CPT_LF_INT_VEC_MISC;
  88        /* Clear err interrupt */
  89        cpt_lf_misc_intr_enb_dis(lf, false);
  90        dev_irq_unregister(handle, cpt_lf_misc_irq, lf, vec);
  91}
  92
  93static void
  94cpt_lf_done_intr_enb_dis(struct roc_cpt_lf *lf, bool enb)
  95{
  96        if (enb)
  97                plt_write64(0x1, lf->rbase + CPT_LF_DONE_INT_ENA_W1S);
  98        else
  99                plt_write64(0x1, lf->rbase + CPT_LF_DONE_INT_ENA_W1C);
 100}
 101
 102static void
 103cpt_lf_done_irq(void *param)
 104{
 105        struct roc_cpt_lf *lf = param;
 106        uint64_t done_wait;
 107        uint64_t intr;
 108
 109        /* Read the number of completed requests */
 110        intr = plt_read64(lf->rbase + CPT_LF_DONE);
 111        if (intr == 0)
 112                return;
 113
 114        done_wait = plt_read64(lf->rbase + CPT_LF_DONE_WAIT);
 115
 116        /* Acknowledge the number of completed requests */
 117        plt_write64(intr, lf->rbase + CPT_LF_DONE_ACK);
 118
 119        plt_write64(done_wait, lf->rbase + CPT_LF_DONE_WAIT);
 120}
 121
 122static int
 123cpt_lf_register_done_irq(struct roc_cpt_lf *lf)
 124{
 125        struct plt_pci_device *pci_dev = lf->pci_dev;
 126        struct plt_intr_handle *handle;
 127        int rc, vec;
 128
 129        handle = &pci_dev->intr_handle;
 130
 131        vec = lf->msixoff + CPT_LF_INT_VEC_DONE;
 132
 133        /* Clear done interrupt */
 134        cpt_lf_done_intr_enb_dis(lf, false);
 135
 136        /* Set used interrupt vectors */
 137        rc = dev_irq_register(handle, cpt_lf_done_irq, lf, vec);
 138
 139        /* Enable done interrupt */
 140        cpt_lf_done_intr_enb_dis(lf, true);
 141
 142        return rc;
 143}
 144
 145static void
 146cpt_lf_unregister_done_irq(struct roc_cpt_lf *lf)
 147{
 148        struct plt_pci_device *pci_dev = lf->pci_dev;
 149        struct plt_intr_handle *handle;
 150        int vec;
 151
 152        handle = &pci_dev->intr_handle;
 153
 154        vec = lf->msixoff + CPT_LF_INT_VEC_DONE;
 155
 156        /* Clear done interrupt */
 157        cpt_lf_done_intr_enb_dis(lf, false);
 158        dev_irq_unregister(handle, cpt_lf_done_irq, lf, vec);
 159}
 160
 161static int
 162cpt_lf_register_irqs(struct roc_cpt_lf *lf)
 163{
 164        int rc;
 165
 166        if (lf->msixoff == MSIX_VECTOR_INVALID) {
 167                plt_err("Invalid CPTLF MSIX vector offset vector: 0x%x",
 168                        lf->msixoff);
 169                return -EINVAL;
 170        }
 171
 172        /* Register lf err interrupt */
 173        rc = cpt_lf_register_misc_irq(lf);
 174        if (rc)
 175                plt_err("Error registering IRQs");
 176
 177        rc = cpt_lf_register_done_irq(lf);
 178        if (rc)
 179                plt_err("Error registering IRQs");
 180
 181        return rc;
 182}
 183
 184static void
 185cpt_lf_unregister_irqs(struct roc_cpt_lf *lf)
 186{
 187        cpt_lf_unregister_misc_irq(lf);
 188        cpt_lf_unregister_done_irq(lf);
 189}
 190
 191static void
 192cpt_lf_dump(struct roc_cpt_lf *lf)
 193{
 194        plt_cpt_dbg("CPT LF");
 195        plt_cpt_dbg("RBASE: 0x%016" PRIx64, lf->rbase);
 196        plt_cpt_dbg("LMT_BASE: 0x%016" PRIx64, lf->lmt_base);
 197        plt_cpt_dbg("MSIXOFF: 0x%x", lf->msixoff);
 198        plt_cpt_dbg("LF_ID: 0x%x", lf->lf_id);
 199        plt_cpt_dbg("NB DESC: %d", lf->nb_desc);
 200        plt_cpt_dbg("FC_ADDR: 0x%016" PRIx64, (uintptr_t)lf->fc_addr);
 201        plt_cpt_dbg("CQ.VADDR: 0x%016" PRIx64, (uintptr_t)lf->iq_vaddr);
 202
 203        plt_cpt_dbg("CPT LF REG:");
 204        plt_cpt_dbg("LF_CTL[0x%016llx]: 0x%016" PRIx64, CPT_LF_CTL,
 205                    plt_read64(lf->rbase + CPT_LF_CTL));
 206        plt_cpt_dbg("Q_SIZE[0x%016llx]: 0x%016" PRIx64, CPT_LF_INPROG,
 207                    plt_read64(lf->rbase + CPT_LF_INPROG));
 208
 209        plt_cpt_dbg("Q_BASE[0x%016llx]: 0x%016" PRIx64, CPT_LF_Q_BASE,
 210                    plt_read64(lf->rbase + CPT_LF_Q_BASE));
 211        plt_cpt_dbg("Q_SIZE[0x%016llx]: 0x%016" PRIx64, CPT_LF_Q_SIZE,
 212                    plt_read64(lf->rbase + CPT_LF_Q_SIZE));
 213        plt_cpt_dbg("Q_INST_PTR[0x%016llx]: 0x%016" PRIx64, CPT_LF_Q_INST_PTR,
 214                    plt_read64(lf->rbase + CPT_LF_Q_INST_PTR));
 215        plt_cpt_dbg("Q_GRP_PTR[0x%016llx]: 0x%016" PRIx64, CPT_LF_Q_GRP_PTR,
 216                    plt_read64(lf->rbase + CPT_LF_Q_GRP_PTR));
 217}
 218
 219int
 220cpt_lf_outb_cfg(struct dev *dev, uint16_t sso_pf_func, uint16_t nix_pf_func,
 221                uint8_t lf_id, bool ena)
 222{
 223        struct cpt_inline_ipsec_cfg_msg *req;
 224        struct mbox *mbox = dev->mbox;
 225
 226        req = mbox_alloc_msg_cpt_inline_ipsec_cfg(mbox);
 227        if (req == NULL)
 228                return -ENOSPC;
 229
 230        req->dir = CPT_INLINE_OUTBOUND;
 231        req->slot = lf_id;
 232        if (ena) {
 233                req->enable = 1;
 234                req->sso_pf_func = sso_pf_func;
 235                req->nix_pf_func = nix_pf_func;
 236        } else {
 237                req->enable = 0;
 238        }
 239
 240        return mbox_process(mbox);
 241}
 242
 243int
 244roc_cpt_inline_ipsec_cfg(struct dev *cpt_dev, uint8_t lf_id,
 245                         struct roc_nix *roc_nix)
 246{
 247        bool ena = roc_nix ? true : false;
 248        uint16_t nix_pf_func = 0;
 249        uint16_t sso_pf_func = 0;
 250
 251        if (ena) {
 252                nix_pf_func = roc_nix_get_pf_func(roc_nix);
 253                sso_pf_func = idev_sso_pffunc_get();
 254        }
 255
 256        return cpt_lf_outb_cfg(cpt_dev, sso_pf_func, nix_pf_func, lf_id, ena);
 257}
 258
 259int
 260roc_cpt_inline_ipsec_inb_cfg(struct roc_cpt *roc_cpt, uint16_t param1,
 261                             uint16_t param2)
 262{
 263        struct cpt *cpt = roc_cpt_to_cpt_priv(roc_cpt);
 264        struct cpt_rx_inline_lf_cfg_msg *req;
 265        struct mbox *mbox;
 266
 267        mbox = cpt->dev.mbox;
 268
 269        req = mbox_alloc_msg_cpt_rx_inline_lf_cfg(mbox);
 270        if (req == NULL)
 271                return -ENOSPC;
 272
 273        req->sso_pf_func = idev_sso_pffunc_get();
 274        req->param1 = param1;
 275        req->param2 = param2;
 276
 277        return mbox_process(mbox);
 278}
 279
 280int
 281roc_cpt_rxc_time_cfg(struct roc_cpt *roc_cpt, struct roc_cpt_rxc_time_cfg *cfg)
 282{
 283        struct cpt *cpt = roc_cpt_to_cpt_priv(roc_cpt);
 284        struct cpt_rxc_time_cfg_req *req;
 285        struct dev *dev = &cpt->dev;
 286
 287        req = mbox_alloc_msg_cpt_rxc_time_cfg(dev->mbox);
 288        if (req == NULL)
 289                return -ENOSPC;
 290
 291        req->blkaddr = 0;
 292
 293        /* The step value is in microseconds. */
 294        req->step = cfg->step;
 295
 296        /* The timeout will be: limit * step microseconds */
 297        req->zombie_limit = cfg->zombie_limit;
 298        req->zombie_thres = cfg->zombie_thres;
 299
 300        /* The timeout will be: limit * step microseconds */
 301        req->active_limit = cfg->active_limit;
 302        req->active_thres = cfg->active_thres;
 303
 304        return mbox_process(dev->mbox);
 305}
 306
 307int
 308cpt_get_msix_offset(struct dev *dev, struct msix_offset_rsp **msix_rsp)
 309{
 310        struct mbox *mbox = dev->mbox;
 311        int rc;
 312
 313        /* Get MSIX vector offsets */
 314        mbox_alloc_msg_msix_offset(mbox);
 315        rc = mbox_process_msg(mbox, (void *)msix_rsp);
 316
 317        return rc;
 318}
 319
 320int
 321cpt_lfs_attach(struct dev *dev, uint8_t blkaddr, bool modify, uint16_t nb_lf)
 322{
 323        struct mbox *mbox = dev->mbox;
 324        struct rsrc_attach_req *req;
 325
 326        if (blkaddr != RVU_BLOCK_ADDR_CPT0 && blkaddr != RVU_BLOCK_ADDR_CPT1)
 327                return -EINVAL;
 328
 329        /* Attach CPT(lf) */
 330        req = mbox_alloc_msg_attach_resources(mbox);
 331        if (req == NULL)
 332                return -ENOSPC;
 333
 334        req->cptlfs = nb_lf;
 335        req->modify = modify;
 336        req->cpt_blkaddr = blkaddr;
 337
 338        return mbox_process(mbox);
 339}
 340
 341int
 342cpt_lfs_detach(struct dev *dev)
 343{
 344        struct mbox *mbox = dev->mbox;
 345        struct rsrc_detach_req *req;
 346
 347        req = mbox_alloc_msg_detach_resources(mbox);
 348        if (req == NULL)
 349                return -ENOSPC;
 350
 351        req->cptlfs = 1;
 352        req->partial = 1;
 353
 354        return mbox_process(mbox);
 355}
 356
 357static int
 358cpt_available_lfs_get(struct dev *dev, uint16_t *nb_lf)
 359{
 360        struct mbox *mbox = dev->mbox;
 361        struct free_rsrcs_rsp *rsp;
 362        int rc;
 363
 364        mbox_alloc_msg_free_rsrc_cnt(mbox);
 365
 366        rc = mbox_process_msg(mbox, (void *)&rsp);
 367        if (rc)
 368                return -EIO;
 369
 370        *nb_lf = rsp->cpt;
 371        return 0;
 372}
 373
 374int
 375cpt_lfs_alloc(struct dev *dev, uint8_t eng_grpmsk, uint8_t blkaddr,
 376              bool inl_dev_sso)
 377{
 378        struct cpt_lf_alloc_req_msg *req;
 379        struct mbox *mbox = dev->mbox;
 380
 381        if (blkaddr != RVU_BLOCK_ADDR_CPT0 && blkaddr != RVU_BLOCK_ADDR_CPT1)
 382                return -EINVAL;
 383
 384        PLT_SET_USED(inl_dev_sso);
 385
 386        req = mbox_alloc_msg_cpt_lf_alloc(mbox);
 387        req->nix_pf_func = 0;
 388        req->sso_pf_func = idev_sso_pffunc_get();
 389        req->eng_grpmsk = eng_grpmsk;
 390        req->blkaddr = blkaddr;
 391
 392        return mbox_process(mbox);
 393}
 394
 395int
 396cpt_lfs_free(struct dev *dev)
 397{
 398        mbox_alloc_msg_cpt_lf_free(dev->mbox);
 399
 400        return mbox_process(dev->mbox);
 401}
 402
 403static int
 404cpt_hardware_caps_get(struct dev *dev, union cpt_eng_caps *hw_caps)
 405{
 406        struct cpt_caps_rsp_msg *rsp;
 407        int ret;
 408
 409        mbox_alloc_msg_cpt_caps_get(dev->mbox);
 410
 411        ret = mbox_process_msg(dev->mbox, (void *)&rsp);
 412        if (ret)
 413                return -EIO;
 414
 415        mbox_memcpy(hw_caps, rsp->eng_caps,
 416                    sizeof(union cpt_eng_caps) * CPT_MAX_ENG_TYPES);
 417
 418        return 0;
 419}
 420
 421static uint32_t
 422cpt_lf_iq_mem_calc(uint32_t nb_desc)
 423{
 424        uint32_t len;
 425
 426        /* Space for instruction group memory */
 427        len = CPT_IQ_GRP_SIZE(nb_desc);
 428
 429        /* Align to 128B */
 430        len = PLT_ALIGN(len, ROC_ALIGN);
 431
 432        /* Space for FC */
 433        len += CPT_IQ_FC_LEN;
 434
 435        /* For instruction queues */
 436        len += CPT_IQ_NB_DESC_SIZE_DIV40(nb_desc) * CPT_IQ_NB_DESC_MULTIPLIER *
 437               sizeof(struct cpt_inst_s);
 438
 439        return len;
 440}
 441
 442static inline void
 443cpt_iq_init(struct roc_cpt_lf *lf)
 444{
 445        union cpt_lf_q_size lf_q_size = {.u = 0x0};
 446        union cpt_lf_q_base lf_q_base = {.u = 0x0};
 447        uintptr_t addr;
 448
 449        lf->io_addr = lf->rbase + CPT_LF_NQX(0);
 450
 451        /* Disable command queue */
 452        roc_cpt_iq_disable(lf);
 453
 454        /* Set command queue base address */
 455        addr = (uintptr_t)lf->iq_vaddr +
 456               PLT_ALIGN(CPT_IQ_GRP_SIZE(lf->nb_desc), ROC_ALIGN);
 457
 458        lf_q_base.u = addr;
 459
 460        plt_write64(lf_q_base.u, lf->rbase + CPT_LF_Q_BASE);
 461
 462        /* Set command queue size */
 463        lf_q_size.s.size_div40 = CPT_IQ_NB_DESC_SIZE_DIV40(lf->nb_desc);
 464        plt_write64(lf_q_size.u, lf->rbase + CPT_LF_Q_SIZE);
 465
 466        lf->fc_addr = (uint64_t *)addr;
 467}
 468
 469int
 470roc_cpt_dev_configure(struct roc_cpt *roc_cpt, int nb_lf)
 471{
 472        struct cpt *cpt = roc_cpt_to_cpt_priv(roc_cpt);
 473        uint8_t blkaddr = RVU_BLOCK_ADDR_CPT0;
 474        struct msix_offset_rsp *rsp;
 475        uint8_t eng_grpmsk;
 476        int rc, i;
 477
 478        /* Request LF resources */
 479        rc = cpt_lfs_attach(&cpt->dev, blkaddr, false, nb_lf);
 480        if (rc)
 481                return rc;
 482
 483        eng_grpmsk = (1 << roc_cpt->eng_grp[CPT_ENG_TYPE_AE]) |
 484                     (1 << roc_cpt->eng_grp[CPT_ENG_TYPE_SE]) |
 485                     (1 << roc_cpt->eng_grp[CPT_ENG_TYPE_IE]);
 486
 487        rc = cpt_lfs_alloc(&cpt->dev, eng_grpmsk, blkaddr, false);
 488        if (rc)
 489                goto lfs_detach;
 490
 491        rc = cpt_get_msix_offset(&cpt->dev, &rsp);
 492        if (rc)
 493                goto lfs_free;
 494
 495        for (i = 0; i < nb_lf; i++)
 496                cpt->lf_msix_off[i] =
 497                        (cpt->lf_blkaddr[i] == RVU_BLOCK_ADDR_CPT1) ?
 498                                rsp->cpt1_lf_msixoff[i] :
 499                                rsp->cptlf_msixoff[i];
 500
 501        roc_cpt->nb_lf = nb_lf;
 502
 503        return 0;
 504
 505lfs_free:
 506        cpt_lfs_free(&cpt->dev);
 507lfs_detach:
 508        cpt_lfs_detach(&cpt->dev);
 509        return rc;
 510}
 511
 512uint64_t
 513cpt_get_blkaddr(struct dev *dev)
 514{
 515        uint64_t reg;
 516        uint64_t off;
 517
 518        /* Reading the discovery register to know which CPT is the LF
 519         * attached to. Assume CPT LF's of only one block are attached
 520         * to a pffunc.
 521         */
 522        if (dev_is_vf(dev))
 523                off = RVU_VF_BLOCK_ADDRX_DISC(RVU_BLOCK_ADDR_CPT1);
 524        else
 525                off = RVU_PF_BLOCK_ADDRX_DISC(RVU_BLOCK_ADDR_CPT1);
 526
 527        reg = plt_read64(dev->bar2 + off);
 528
 529        return reg & 0x1FFULL ? RVU_BLOCK_ADDR_CPT1 : RVU_BLOCK_ADDR_CPT0;
 530}
 531
 532int
 533cpt_lf_init(struct roc_cpt_lf *lf)
 534{
 535        struct dev *dev = lf->dev;
 536        uint64_t blkaddr;
 537        void *iq_mem;
 538        int rc;
 539
 540        if (lf->nb_desc == 0 || lf->nb_desc > CPT_LF_MAX_NB_DESC)
 541                lf->nb_desc = CPT_LF_DEFAULT_NB_DESC;
 542
 543        /* Allocate memory for instruction queue for CPT LF. */
 544        iq_mem = plt_zmalloc(cpt_lf_iq_mem_calc(lf->nb_desc), ROC_ALIGN);
 545        if (iq_mem == NULL)
 546                return -ENOMEM;
 547
 548        blkaddr = cpt_get_blkaddr(dev);
 549        lf->rbase = dev->bar2 + ((blkaddr << 20) | (lf->lf_id << 12));
 550        lf->iq_vaddr = iq_mem;
 551        lf->lmt_base = dev->lmt_base;
 552        lf->pf_func = dev->pf_func;
 553
 554        /* Initialize instruction queue */
 555        cpt_iq_init(lf);
 556
 557        rc = cpt_lf_register_irqs(lf);
 558        if (rc)
 559                goto disable_iq;
 560
 561        return 0;
 562
 563disable_iq:
 564        roc_cpt_iq_disable(lf);
 565        plt_free(iq_mem);
 566        return rc;
 567}
 568
 569int
 570roc_cpt_lf_init(struct roc_cpt *roc_cpt, struct roc_cpt_lf *lf)
 571{
 572        struct cpt *cpt = roc_cpt_to_cpt_priv(roc_cpt);
 573        int rc;
 574
 575        lf->dev = &cpt->dev;
 576        lf->roc_cpt = roc_cpt;
 577        lf->msixoff = cpt->lf_msix_off[lf->lf_id];
 578        lf->pci_dev = cpt->pci_dev;
 579
 580        rc = cpt_lf_init(lf);
 581        if (rc)
 582                return rc;
 583
 584        /* LF init successful */
 585        roc_cpt->lf[lf->lf_id] = lf;
 586        return rc;
 587}
 588
 589int
 590roc_cpt_dev_init(struct roc_cpt *roc_cpt)
 591{
 592        struct plt_pci_device *pci_dev;
 593        uint16_t nb_lf_avail;
 594        struct dev *dev;
 595        struct cpt *cpt;
 596        int rc;
 597
 598        if (roc_cpt == NULL || roc_cpt->pci_dev == NULL)
 599                return -EINVAL;
 600
 601        PLT_STATIC_ASSERT(sizeof(struct cpt) <= ROC_CPT_MEM_SZ);
 602
 603        cpt = roc_cpt_to_cpt_priv(roc_cpt);
 604        memset(cpt, 0, sizeof(*cpt));
 605        pci_dev = roc_cpt->pci_dev;
 606        dev = &cpt->dev;
 607
 608        /* Initialize device  */
 609        rc = dev_init(dev, pci_dev);
 610        if (rc) {
 611                plt_err("Failed to init roc device");
 612                goto fail;
 613        }
 614
 615        cpt->pci_dev = pci_dev;
 616        roc_cpt->lmt_base = dev->lmt_base;
 617
 618        rc = cpt_hardware_caps_get(dev, roc_cpt->hw_caps);
 619        if (rc) {
 620                plt_err("Could not determine hardware capabilities");
 621                goto fail;
 622        }
 623
 624        rc = cpt_available_lfs_get(&cpt->dev, &nb_lf_avail);
 625        if (rc) {
 626                plt_err("Could not get available lfs");
 627                goto fail;
 628        }
 629
 630        /* Reserve 1 CPT LF for inline inbound */
 631        nb_lf_avail = PLT_MIN(nb_lf_avail, ROC_CPT_MAX_LFS - 1);
 632
 633        roc_cpt->nb_lf_avail = nb_lf_avail;
 634
 635        dev->roc_cpt = roc_cpt;
 636
 637        /* Set it to idev if not already present */
 638        if (!roc_idev_cpt_get())
 639                roc_idev_cpt_set(roc_cpt);
 640
 641        return 0;
 642
 643fail:
 644        return rc;
 645}
 646
 647int
 648roc_cpt_lf_ctx_flush(struct roc_cpt_lf *lf, uint64_t cptr)
 649{
 650        union cpt_lf_ctx_flush reg;
 651
 652        if (lf == NULL)
 653                return -ENOTSUP;
 654
 655        reg.u = 0;
 656        reg.s.pf_func = lf->pf_func;
 657        reg.s.inval = 1;
 658        reg.s.cptr = cptr;
 659
 660        plt_write64(reg.u, lf->rbase + CPT_LF_CTX_FLUSH);
 661
 662        return 0;
 663}
 664
 665void
 666cpt_lf_fini(struct roc_cpt_lf *lf)
 667{
 668        /* Unregister IRQ's */
 669        cpt_lf_unregister_irqs(lf);
 670
 671        /* Disable IQ */
 672        roc_cpt_iq_disable(lf);
 673
 674        /* Free memory */
 675        plt_free(lf->iq_vaddr);
 676        lf->iq_vaddr = NULL;
 677}
 678
 679void
 680roc_cpt_lf_fini(struct roc_cpt_lf *lf)
 681{
 682        if (lf == NULL)
 683                return;
 684        lf->roc_cpt->lf[lf->lf_id] = NULL;
 685        cpt_lf_fini(lf);
 686}
 687
 688int
 689roc_cpt_dev_fini(struct roc_cpt *roc_cpt)
 690{
 691        struct cpt *cpt = roc_cpt_to_cpt_priv(roc_cpt);
 692
 693        if (cpt == NULL)
 694                return -EINVAL;
 695
 696        /* Remove idev references */
 697        if (roc_idev_cpt_get() == roc_cpt)
 698                roc_idev_cpt_set(NULL);
 699
 700        roc_cpt->nb_lf_avail = 0;
 701
 702        roc_cpt->lmt_base = 0;
 703
 704        return dev_fini(&cpt->dev, cpt->pci_dev);
 705}
 706
 707void
 708roc_cpt_dev_clear(struct roc_cpt *roc_cpt)
 709{
 710        struct cpt *cpt = roc_cpt_to_cpt_priv(roc_cpt);
 711        int i;
 712
 713        if (cpt == NULL)
 714                return;
 715
 716        for (i = 0; i < roc_cpt->nb_lf; i++)
 717                cpt->lf_msix_off[i] = 0;
 718
 719        roc_cpt->nb_lf = 0;
 720
 721        cpt_lfs_free(&cpt->dev);
 722
 723        cpt_lfs_detach(&cpt->dev);
 724}
 725
 726int
 727roc_cpt_eng_grp_add(struct roc_cpt *roc_cpt, enum cpt_eng_type eng_type)
 728{
 729        struct cpt *cpt = roc_cpt_to_cpt_priv(roc_cpt);
 730        struct dev *dev = &cpt->dev;
 731        struct cpt_eng_grp_req *req;
 732        struct cpt_eng_grp_rsp *rsp;
 733        int ret;
 734
 735        req = mbox_alloc_msg_cpt_eng_grp_get(dev->mbox);
 736        if (req == NULL)
 737                return -EIO;
 738
 739        switch (eng_type) {
 740        case CPT_ENG_TYPE_AE:
 741        case CPT_ENG_TYPE_SE:
 742        case CPT_ENG_TYPE_IE:
 743                break;
 744        default:
 745                return -EINVAL;
 746        }
 747
 748        req->eng_type = eng_type;
 749        ret = mbox_process_msg(dev->mbox, (void *)&rsp);
 750        if (ret)
 751                return -EIO;
 752
 753        if (rsp->eng_grp_num > 8) {
 754                plt_err("Invalid CPT engine group");
 755                return -ENOTSUP;
 756        }
 757
 758        roc_cpt->eng_grp[eng_type] = rsp->eng_grp_num;
 759
 760        return rsp->eng_grp_num;
 761}
 762
 763void
 764roc_cpt_iq_disable(struct roc_cpt_lf *lf)
 765{
 766        union cpt_lf_ctl lf_ctl = {.u = 0x0};
 767        union cpt_lf_inprog lf_inprog;
 768        int timeout = 20;
 769
 770        /* Disable instructions enqueuing */
 771        plt_write64(lf_ctl.u, lf->rbase + CPT_LF_CTL);
 772
 773        /* Wait for instruction queue to become empty */
 774        do {
 775                lf_inprog.u = plt_read64(lf->rbase + CPT_LF_INPROG);
 776                if (!lf_inprog.s.inflight)
 777                        break;
 778
 779                plt_delay_ms(20);
 780                if (timeout-- < 0) {
 781                        plt_err("CPT LF %d is still busy", lf->lf_id);
 782                        break;
 783                }
 784
 785        } while (1);
 786
 787        /* Disable executions in the LF's queue.
 788         * The queue should be empty at this point
 789         */
 790        lf_inprog.s.eena = 0x0;
 791        plt_write64(lf_inprog.u, lf->rbase + CPT_LF_INPROG);
 792}
 793
 794void
 795roc_cpt_iq_enable(struct roc_cpt_lf *lf)
 796{
 797        union cpt_lf_inprog lf_inprog;
 798        union cpt_lf_ctl lf_ctl;
 799
 800        /* Disable command queue */
 801        roc_cpt_iq_disable(lf);
 802
 803        /* Enable command queue execution */
 804        lf_inprog.u = plt_read64(lf->rbase + CPT_LF_INPROG);
 805        lf_inprog.s.eena = 1;
 806        plt_write64(lf_inprog.u, lf->rbase + CPT_LF_INPROG);
 807
 808        /* Enable instruction queue enqueuing */
 809        lf_ctl.u = plt_read64(lf->rbase + CPT_LF_CTL);
 810        lf_ctl.s.ena = 1;
 811        lf_ctl.s.fc_ena = 1;
 812        lf_ctl.s.fc_up_crossing = 1;
 813        lf_ctl.s.fc_hyst_bits = CPT_FC_NUM_HYST_BITS;
 814        plt_write64(lf_ctl.u, lf->rbase + CPT_LF_CTL);
 815
 816        cpt_lf_dump(lf);
 817}
 818
 819int
 820roc_cpt_lmtline_init(struct roc_cpt *roc_cpt, struct roc_cpt_lmtline *lmtline,
 821                     int lf_id)
 822{
 823        struct roc_cpt_lf *lf;
 824
 825        lf = roc_cpt->lf[lf_id];
 826        if (lf == NULL)
 827                return -ENOTSUP;
 828
 829        lmtline->io_addr = lf->io_addr;
 830        if (roc_model_is_cn10k())
 831                lmtline->io_addr |= ROC_CN10K_CPT_INST_DW_M1 << 4;
 832
 833        lmtline->fc_addr = lf->fc_addr;
 834        lmtline->lmt_base = lf->lmt_base;
 835
 836        return 0;
 837}
 838