dpdk/drivers/common/cnxk/roc_cpt.c
<<
>>
Prefs
   1/* SPDX-License-Identifier: BSD-3-Clause
   2 * Copyright(C) 2021 Marvell.
   3 */
   4
   5#include "roc_api.h"
   6#include "roc_priv.h"
   7
   8#define CPT_IQ_FC_LEN  128
   9#define CPT_IQ_GRP_LEN 16
  10
  11#define CPT_IQ_NB_DESC_MULTIPLIER 40
  12
  13/* The effective queue size to software is (CPT_LF_Q_SIZE[SIZE_DIV40] - 1 - 8).
  14 *
  15 * CPT requires 320 free entries (+8). And 40 entries are required for
  16 * allowing CPT to discard packet when the queues are full (+1).
  17 */
  18#define CPT_IQ_NB_DESC_SIZE_DIV40(nb_desc)                                     \
  19        (PLT_DIV_CEIL(nb_desc, CPT_IQ_NB_DESC_MULTIPLIER) + 1 + 8)
  20
  21#define CPT_IQ_GRP_SIZE(nb_desc)                                               \
  22        (CPT_IQ_NB_DESC_SIZE_DIV40(nb_desc) * CPT_IQ_GRP_LEN)
  23
  24#define CPT_LF_MAX_NB_DESC      128000
  25#define CPT_LF_DEFAULT_NB_DESC  1024
  26#define CPT_LF_FC_MIN_THRESHOLD 32
  27
  28static void
  29cpt_lf_misc_intr_enb_dis(struct roc_cpt_lf *lf, bool enb)
  30{
  31        /* Enable all cpt lf error irqs except RQ_DISABLED and CQ_DISABLED */
  32        if (enb)
  33                plt_write64((BIT_ULL(6) | BIT_ULL(5) | BIT_ULL(3) | BIT_ULL(2) |
  34                             BIT_ULL(1)),
  35                            lf->rbase + CPT_LF_MISC_INT_ENA_W1S);
  36        else
  37                plt_write64((BIT_ULL(6) | BIT_ULL(5) | BIT_ULL(3) | BIT_ULL(2) |
  38                             BIT_ULL(1)),
  39                            lf->rbase + CPT_LF_MISC_INT_ENA_W1C);
  40}
  41
  42static void
  43cpt_lf_misc_irq(void *param)
  44{
  45        struct roc_cpt_lf *lf = (struct roc_cpt_lf *)param;
  46        struct dev *dev = lf->dev;
  47        uint64_t intr;
  48
  49        intr = plt_read64(lf->rbase + CPT_LF_MISC_INT);
  50        if (intr == 0)
  51                return;
  52
  53        plt_err("Err_irq=0x%" PRIx64 " pf=%d, vf=%d", intr, dev->pf, dev->vf);
  54
  55        /* Dump lf registers */
  56        cpt_lf_print(lf);
  57
  58        /* Clear interrupt */
  59        plt_write64(intr, lf->rbase + CPT_LF_MISC_INT);
  60}
  61
  62static int
  63cpt_lf_register_misc_irq(struct roc_cpt_lf *lf)
  64{
  65        struct plt_pci_device *pci_dev = lf->pci_dev;
  66        struct plt_intr_handle *handle;
  67        int rc, vec;
  68
  69        handle = pci_dev->intr_handle;
  70
  71        vec = lf->msixoff + CPT_LF_INT_VEC_MISC;
  72        /* Clear err interrupt */
  73        cpt_lf_misc_intr_enb_dis(lf, false);
  74        /* Set used interrupt vectors */
  75        rc = dev_irq_register(handle, cpt_lf_misc_irq, lf, vec);
  76        /* Enable all dev interrupt except for RQ_DISABLED */
  77        cpt_lf_misc_intr_enb_dis(lf, true);
  78
  79        return rc;
  80}
  81
  82static void
  83cpt_lf_unregister_misc_irq(struct roc_cpt_lf *lf)
  84{
  85        struct plt_pci_device *pci_dev = lf->pci_dev;
  86        struct plt_intr_handle *handle;
  87        int vec;
  88
  89        handle = pci_dev->intr_handle;
  90
  91        vec = lf->msixoff + CPT_LF_INT_VEC_MISC;
  92        /* Clear err interrupt */
  93        cpt_lf_misc_intr_enb_dis(lf, false);
  94        dev_irq_unregister(handle, cpt_lf_misc_irq, lf, vec);
  95}
  96
  97static void
  98cpt_lf_done_intr_enb_dis(struct roc_cpt_lf *lf, bool enb)
  99{
 100        if (enb)
 101                plt_write64(0x1, lf->rbase + CPT_LF_DONE_INT_ENA_W1S);
 102        else
 103                plt_write64(0x1, lf->rbase + CPT_LF_DONE_INT_ENA_W1C);
 104}
 105
 106static void
 107cpt_lf_done_irq(void *param)
 108{
 109        struct roc_cpt_lf *lf = param;
 110        uint64_t done_wait;
 111        uint64_t intr;
 112
 113        /* Read the number of completed requests */
 114        intr = plt_read64(lf->rbase + CPT_LF_DONE);
 115        if (intr == 0)
 116                return;
 117
 118        done_wait = plt_read64(lf->rbase + CPT_LF_DONE_WAIT);
 119
 120        /* Acknowledge the number of completed requests */
 121        plt_write64(intr, lf->rbase + CPT_LF_DONE_ACK);
 122
 123        plt_write64(done_wait, lf->rbase + CPT_LF_DONE_WAIT);
 124}
 125
 126static int
 127cpt_lf_register_done_irq(struct roc_cpt_lf *lf)
 128{
 129        struct plt_pci_device *pci_dev = lf->pci_dev;
 130        struct plt_intr_handle *handle;
 131        int rc, vec;
 132
 133        handle = pci_dev->intr_handle;
 134
 135        vec = lf->msixoff + CPT_LF_INT_VEC_DONE;
 136
 137        /* Clear done interrupt */
 138        cpt_lf_done_intr_enb_dis(lf, false);
 139
 140        /* Set used interrupt vectors */
 141        rc = dev_irq_register(handle, cpt_lf_done_irq, lf, vec);
 142
 143        /* Enable done interrupt */
 144        cpt_lf_done_intr_enb_dis(lf, true);
 145
 146        return rc;
 147}
 148
 149static void
 150cpt_lf_unregister_done_irq(struct roc_cpt_lf *lf)
 151{
 152        struct plt_pci_device *pci_dev = lf->pci_dev;
 153        struct plt_intr_handle *handle;
 154        int vec;
 155
 156        handle = pci_dev->intr_handle;
 157
 158        vec = lf->msixoff + CPT_LF_INT_VEC_DONE;
 159
 160        /* Clear done interrupt */
 161        cpt_lf_done_intr_enb_dis(lf, false);
 162        dev_irq_unregister(handle, cpt_lf_done_irq, lf, vec);
 163}
 164
 165static int
 166cpt_lf_register_irqs(struct roc_cpt_lf *lf)
 167{
 168        int rc;
 169
 170        if (lf->msixoff == MSIX_VECTOR_INVALID) {
 171                plt_err("Invalid CPTLF MSIX vector offset vector: 0x%x",
 172                        lf->msixoff);
 173                return -EINVAL;
 174        }
 175
 176        /* Register lf err interrupt */
 177        rc = cpt_lf_register_misc_irq(lf);
 178        if (rc)
 179                plt_err("Error registering IRQs");
 180
 181        rc = cpt_lf_register_done_irq(lf);
 182        if (rc)
 183                plt_err("Error registering IRQs");
 184
 185        return rc;
 186}
 187
 188static void
 189cpt_lf_unregister_irqs(struct roc_cpt_lf *lf)
 190{
 191        cpt_lf_unregister_misc_irq(lf);
 192        cpt_lf_unregister_done_irq(lf);
 193}
 194
 195static void
 196cpt_lf_dump(struct roc_cpt_lf *lf)
 197{
 198        plt_cpt_dbg("CPT LF");
 199        plt_cpt_dbg("RBASE: 0x%016" PRIx64, lf->rbase);
 200        plt_cpt_dbg("LMT_BASE: 0x%016" PRIx64, lf->lmt_base);
 201        plt_cpt_dbg("MSIXOFF: 0x%x", lf->msixoff);
 202        plt_cpt_dbg("LF_ID: 0x%x", lf->lf_id);
 203        plt_cpt_dbg("NB DESC: %d", lf->nb_desc);
 204        plt_cpt_dbg("FC_ADDR: 0x%016" PRIx64, (uintptr_t)lf->fc_addr);
 205        plt_cpt_dbg("CQ.VADDR: 0x%016" PRIx64, (uintptr_t)lf->iq_vaddr);
 206
 207        plt_cpt_dbg("CPT LF REG:");
 208        plt_cpt_dbg("LF_CTL[0x%016llx]: 0x%016" PRIx64, CPT_LF_CTL,
 209                    plt_read64(lf->rbase + CPT_LF_CTL));
 210        plt_cpt_dbg("LF_INPROG[0x%016llx]: 0x%016" PRIx64, CPT_LF_INPROG,
 211                    plt_read64(lf->rbase + CPT_LF_INPROG));
 212
 213        plt_cpt_dbg("Q_BASE[0x%016llx]: 0x%016" PRIx64, CPT_LF_Q_BASE,
 214                    plt_read64(lf->rbase + CPT_LF_Q_BASE));
 215        plt_cpt_dbg("Q_SIZE[0x%016llx]: 0x%016" PRIx64, CPT_LF_Q_SIZE,
 216                    plt_read64(lf->rbase + CPT_LF_Q_SIZE));
 217        plt_cpt_dbg("Q_INST_PTR[0x%016llx]: 0x%016" PRIx64, CPT_LF_Q_INST_PTR,
 218                    plt_read64(lf->rbase + CPT_LF_Q_INST_PTR));
 219        plt_cpt_dbg("Q_GRP_PTR[0x%016llx]: 0x%016" PRIx64, CPT_LF_Q_GRP_PTR,
 220                    plt_read64(lf->rbase + CPT_LF_Q_GRP_PTR));
 221}
 222
 223int
 224cpt_lf_outb_cfg(struct dev *dev, uint16_t sso_pf_func, uint16_t nix_pf_func,
 225                uint8_t lf_id, bool ena)
 226{
 227        struct cpt_inline_ipsec_cfg_msg *req;
 228        struct mbox *mbox = dev->mbox;
 229
 230        req = mbox_alloc_msg_cpt_inline_ipsec_cfg(mbox);
 231        if (req == NULL)
 232                return -ENOSPC;
 233
 234        req->dir = CPT_INLINE_OUTBOUND;
 235        req->slot = lf_id;
 236        if (ena) {
 237                req->enable = 1;
 238                req->sso_pf_func = sso_pf_func;
 239                req->nix_pf_func = nix_pf_func;
 240        } else {
 241                req->enable = 0;
 242        }
 243
 244        return mbox_process(mbox);
 245}
 246
 247int
 248roc_cpt_inline_ipsec_cfg(struct dev *cpt_dev, uint8_t lf_id,
 249                         struct roc_nix *roc_nix)
 250{
 251        bool ena = roc_nix ? true : false;
 252        uint16_t nix_pf_func = 0;
 253        uint16_t sso_pf_func = 0;
 254
 255        if (ena) {
 256                nix_pf_func = roc_nix_get_pf_func(roc_nix);
 257                sso_pf_func = idev_sso_pffunc_get();
 258        }
 259
 260        return cpt_lf_outb_cfg(cpt_dev, sso_pf_func, nix_pf_func, lf_id, ena);
 261}
 262
 263int
 264roc_cpt_inline_ipsec_inb_cfg(struct roc_cpt *roc_cpt, uint16_t param1,
 265                             uint16_t param2)
 266{
 267        struct cpt *cpt = roc_cpt_to_cpt_priv(roc_cpt);
 268        struct cpt_rx_inline_lf_cfg_msg *req;
 269        struct mbox *mbox;
 270
 271        mbox = cpt->dev.mbox;
 272
 273        req = mbox_alloc_msg_cpt_rx_inline_lf_cfg(mbox);
 274        if (req == NULL)
 275                return -ENOSPC;
 276
 277        req->sso_pf_func = idev_sso_pffunc_get();
 278        req->param1 = param1;
 279        req->param2 = param2;
 280
 281        return mbox_process(mbox);
 282}
 283
 284int
 285roc_cpt_rxc_time_cfg(struct roc_cpt *roc_cpt, struct roc_cpt_rxc_time_cfg *cfg)
 286{
 287        struct cpt *cpt = roc_cpt_to_cpt_priv(roc_cpt);
 288        struct cpt_rxc_time_cfg_req *req;
 289        struct dev *dev = &cpt->dev;
 290
 291        req = mbox_alloc_msg_cpt_rxc_time_cfg(dev->mbox);
 292        if (req == NULL)
 293                return -ENOSPC;
 294
 295        req->blkaddr = 0;
 296
 297        /* The step value is in microseconds. */
 298        req->step = cfg->step;
 299
 300        /* The timeout will be: limit * step microseconds */
 301        req->zombie_limit = cfg->zombie_limit;
 302        req->zombie_thres = cfg->zombie_thres;
 303
 304        /* The timeout will be: limit * step microseconds */
 305        req->active_limit = cfg->active_limit;
 306        req->active_thres = cfg->active_thres;
 307
 308        return mbox_process(dev->mbox);
 309}
 310
 311int
 312cpt_get_msix_offset(struct dev *dev, struct msix_offset_rsp **msix_rsp)
 313{
 314        struct mbox *mbox = dev->mbox;
 315        int rc;
 316
 317        /* Get MSIX vector offsets */
 318        mbox_alloc_msg_msix_offset(mbox);
 319        rc = mbox_process_msg(mbox, (void *)msix_rsp);
 320
 321        return rc;
 322}
 323
 324int
 325cpt_lfs_attach(struct dev *dev, uint8_t blkaddr, bool modify, uint16_t nb_lf)
 326{
 327        struct mbox *mbox = dev->mbox;
 328        struct rsrc_attach_req *req;
 329
 330        if (blkaddr != RVU_BLOCK_ADDR_CPT0 && blkaddr != RVU_BLOCK_ADDR_CPT1)
 331                return -EINVAL;
 332
 333        /* Attach CPT(lf) */
 334        req = mbox_alloc_msg_attach_resources(mbox);
 335        if (req == NULL)
 336                return -ENOSPC;
 337
 338        req->cptlfs = nb_lf;
 339        req->modify = modify;
 340        req->cpt_blkaddr = blkaddr;
 341
 342        return mbox_process(mbox);
 343}
 344
 345int
 346cpt_lfs_detach(struct dev *dev)
 347{
 348        struct mbox *mbox = dev->mbox;
 349        struct rsrc_detach_req *req;
 350
 351        req = mbox_alloc_msg_detach_resources(mbox);
 352        if (req == NULL)
 353                return -ENOSPC;
 354
 355        req->cptlfs = 1;
 356        req->partial = 1;
 357
 358        return mbox_process(mbox);
 359}
 360
 361static int
 362cpt_available_lfs_get(struct dev *dev, uint16_t *nb_lf)
 363{
 364        struct mbox *mbox = dev->mbox;
 365        struct free_rsrcs_rsp *rsp;
 366        int rc;
 367
 368        mbox_alloc_msg_free_rsrc_cnt(mbox);
 369
 370        rc = mbox_process_msg(mbox, (void *)&rsp);
 371        if (rc)
 372                return -EIO;
 373
 374        *nb_lf = PLT_MAX((uint16_t)rsp->cpt, (uint16_t)rsp->cpt1);
 375        return 0;
 376}
 377
 378int
 379cpt_lfs_alloc(struct dev *dev, uint8_t eng_grpmsk, uint8_t blkaddr,
 380              bool inl_dev_sso)
 381{
 382        struct cpt_lf_alloc_req_msg *req;
 383        struct mbox *mbox = dev->mbox;
 384
 385        if (blkaddr != RVU_BLOCK_ADDR_CPT0 && blkaddr != RVU_BLOCK_ADDR_CPT1)
 386                return -EINVAL;
 387
 388        req = mbox_alloc_msg_cpt_lf_alloc(mbox);
 389        if (!req)
 390                return -ENOSPC;
 391
 392        req->nix_pf_func = 0;
 393        if (inl_dev_sso && nix_inl_dev_pffunc_get())
 394                req->sso_pf_func = nix_inl_dev_pffunc_get();
 395        else
 396                req->sso_pf_func = idev_sso_pffunc_get();
 397        req->eng_grpmsk = eng_grpmsk;
 398        req->blkaddr = blkaddr;
 399
 400        return mbox_process(mbox);
 401}
 402
 403int
 404cpt_lfs_free(struct dev *dev)
 405{
 406        mbox_alloc_msg_cpt_lf_free(dev->mbox);
 407
 408        return mbox_process(dev->mbox);
 409}
 410
 411static int
 412cpt_hardware_caps_get(struct dev *dev, struct roc_cpt *roc_cpt)
 413{
 414        struct cpt_caps_rsp_msg *rsp;
 415        int ret;
 416
 417        mbox_alloc_msg_cpt_caps_get(dev->mbox);
 418
 419        ret = mbox_process_msg(dev->mbox, (void *)&rsp);
 420        if (ret)
 421                return -EIO;
 422
 423        roc_cpt->cpt_revision = rsp->cpt_revision;
 424        mbox_memcpy(roc_cpt->hw_caps, rsp->eng_caps,
 425                    sizeof(union cpt_eng_caps) * CPT_MAX_ENG_TYPES);
 426
 427        return 0;
 428}
 429
 430static uint32_t
 431cpt_lf_iq_mem_calc(uint32_t nb_desc)
 432{
 433        uint32_t len;
 434
 435        /* Space for instruction group memory */
 436        len = CPT_IQ_GRP_SIZE(nb_desc);
 437
 438        /* Align to 128B */
 439        len = PLT_ALIGN(len, ROC_ALIGN);
 440
 441        /* Space for FC */
 442        len += CPT_IQ_FC_LEN;
 443
 444        /* For instruction queues */
 445        len += PLT_ALIGN(CPT_IQ_NB_DESC_SIZE_DIV40(nb_desc) *
 446                                 CPT_IQ_NB_DESC_MULTIPLIER *
 447                                 sizeof(struct cpt_inst_s),
 448                         ROC_ALIGN);
 449
 450        return len;
 451}
 452
 453static inline void
 454cpt_iq_init(struct roc_cpt_lf *lf)
 455{
 456        union cpt_lf_q_size lf_q_size = {.u = 0x0};
 457        union cpt_lf_q_base lf_q_base = {.u = 0x0};
 458        uintptr_t addr;
 459
 460        lf->io_addr = lf->rbase + CPT_LF_NQX(0);
 461
 462        /* Disable command queue */
 463        roc_cpt_iq_disable(lf);
 464
 465        /* Set command queue base address */
 466        addr = (uintptr_t)lf->iq_vaddr +
 467               PLT_ALIGN(CPT_IQ_GRP_SIZE(lf->nb_desc), ROC_ALIGN);
 468
 469        lf_q_base.u = addr;
 470
 471        plt_write64(lf_q_base.u, lf->rbase + CPT_LF_Q_BASE);
 472
 473        /* Set command queue size */
 474        lf_q_size.s.size_div40 = CPT_IQ_NB_DESC_SIZE_DIV40(lf->nb_desc);
 475        plt_write64(lf_q_size.u, lf->rbase + CPT_LF_Q_SIZE);
 476
 477        lf->fc_addr = (uint64_t *)addr;
 478}
 479
 480int
 481roc_cpt_dev_configure(struct roc_cpt *roc_cpt, int nb_lf)
 482{
 483        struct cpt *cpt = roc_cpt_to_cpt_priv(roc_cpt);
 484        uint8_t blkaddr[ROC_CPT_MAX_BLKS];
 485        struct msix_offset_rsp *rsp;
 486        uint8_t eng_grpmsk;
 487        int blknum = 0;
 488        int rc, i;
 489
 490        blkaddr[0] = RVU_BLOCK_ADDR_CPT0;
 491        blkaddr[1] = RVU_BLOCK_ADDR_CPT1;
 492
 493        if ((roc_cpt->cpt_revision == ROC_CPT_REVISION_ID_98XX) &&
 494            (cpt->dev.pf_func & 0x1))
 495                blknum = (blknum + 1) % ROC_CPT_MAX_BLKS;
 496
 497        /* Request LF resources */
 498        rc = cpt_lfs_attach(&cpt->dev, blkaddr[blknum], true, nb_lf);
 499
 500        /* Request LFs from another block if current block has less LFs */
 501        if (roc_cpt->cpt_revision == ROC_CPT_REVISION_ID_98XX && rc == ENOSPC) {
 502                blknum = (blknum + 1) % ROC_CPT_MAX_BLKS;
 503                rc = cpt_lfs_attach(&cpt->dev, blkaddr[blknum], true, nb_lf);
 504        }
 505        if (rc) {
 506                plt_err("Could not attach LFs");
 507                return rc;
 508        }
 509
 510        for (i = 0; i < nb_lf; i++)
 511                cpt->lf_blkaddr[i] = blkaddr[blknum];
 512
 513        eng_grpmsk = (1 << roc_cpt->eng_grp[CPT_ENG_TYPE_AE]) |
 514                     (1 << roc_cpt->eng_grp[CPT_ENG_TYPE_SE]) |
 515                     (1 << roc_cpt->eng_grp[CPT_ENG_TYPE_IE]);
 516
 517        rc = cpt_lfs_alloc(&cpt->dev, eng_grpmsk, blkaddr[blknum], false);
 518        if (rc)
 519                goto lfs_detach;
 520
 521        rc = cpt_get_msix_offset(&cpt->dev, &rsp);
 522        if (rc)
 523                goto lfs_free;
 524
 525        for (i = 0; i < nb_lf; i++)
 526                cpt->lf_msix_off[i] =
 527                        (cpt->lf_blkaddr[i] == RVU_BLOCK_ADDR_CPT1) ?
 528                                rsp->cpt1_lf_msixoff[i] :
 529                                rsp->cptlf_msixoff[i];
 530
 531        roc_cpt->nb_lf = nb_lf;
 532
 533        return 0;
 534
 535lfs_free:
 536        cpt_lfs_free(&cpt->dev);
 537lfs_detach:
 538        cpt_lfs_detach(&cpt->dev);
 539        return rc;
 540}
 541
 542uint64_t
 543cpt_get_blkaddr(struct dev *dev)
 544{
 545        uint64_t reg;
 546        uint64_t off;
 547
 548        /* Reading the discovery register to know which CPT is the LF
 549         * attached to. Assume CPT LF's of only one block are attached
 550         * to a pffunc.
 551         */
 552        if (dev_is_vf(dev))
 553                off = RVU_VF_BLOCK_ADDRX_DISC(RVU_BLOCK_ADDR_CPT1);
 554        else
 555                off = RVU_PF_BLOCK_ADDRX_DISC(RVU_BLOCK_ADDR_CPT1);
 556
 557        reg = plt_read64(dev->bar2 + off);
 558
 559        return reg & 0x1FFULL ? RVU_BLOCK_ADDR_CPT1 : RVU_BLOCK_ADDR_CPT0;
 560}
 561
 562int
 563cpt_lf_init(struct roc_cpt_lf *lf)
 564{
 565        struct dev *dev = lf->dev;
 566        uint64_t blkaddr;
 567        void *iq_mem;
 568        int rc;
 569
 570        if (lf->nb_desc == 0 || lf->nb_desc > CPT_LF_MAX_NB_DESC)
 571                lf->nb_desc = CPT_LF_DEFAULT_NB_DESC;
 572
 573        /* Allocate memory for instruction queue for CPT LF. */
 574        iq_mem = plt_zmalloc(cpt_lf_iq_mem_calc(lf->nb_desc), ROC_ALIGN);
 575        if (iq_mem == NULL)
 576                return -ENOMEM;
 577        plt_atomic_thread_fence(__ATOMIC_ACQ_REL);
 578
 579        blkaddr = cpt_get_blkaddr(dev);
 580        lf->rbase = dev->bar2 + ((blkaddr << 20) | (lf->lf_id << 12));
 581        lf->iq_vaddr = iq_mem;
 582        lf->lmt_base = dev->lmt_base;
 583        lf->pf_func = dev->pf_func;
 584
 585        /* Initialize instruction queue */
 586        cpt_iq_init(lf);
 587
 588        rc = cpt_lf_register_irqs(lf);
 589        if (rc)
 590                goto disable_iq;
 591
 592        return 0;
 593
 594disable_iq:
 595        roc_cpt_iq_disable(lf);
 596        plt_free(iq_mem);
 597        return rc;
 598}
 599
 600int
 601roc_cpt_lf_init(struct roc_cpt *roc_cpt, struct roc_cpt_lf *lf)
 602{
 603        struct cpt *cpt = roc_cpt_to_cpt_priv(roc_cpt);
 604        int rc;
 605
 606        lf->dev = &cpt->dev;
 607        lf->roc_cpt = roc_cpt;
 608        lf->msixoff = cpt->lf_msix_off[lf->lf_id];
 609        lf->pci_dev = cpt->pci_dev;
 610
 611        rc = cpt_lf_init(lf);
 612        if (rc)
 613                return rc;
 614
 615        /* LF init successful */
 616        roc_cpt->lf[lf->lf_id] = lf;
 617        return rc;
 618}
 619
 620int
 621roc_cpt_dev_init(struct roc_cpt *roc_cpt)
 622{
 623        struct plt_pci_device *pci_dev;
 624        uint16_t nb_lf_avail;
 625        struct dev *dev;
 626        struct cpt *cpt;
 627        int rc;
 628
 629        if (roc_cpt == NULL || roc_cpt->pci_dev == NULL)
 630                return -EINVAL;
 631
 632        PLT_STATIC_ASSERT(sizeof(struct cpt) <= ROC_CPT_MEM_SZ);
 633
 634        cpt = roc_cpt_to_cpt_priv(roc_cpt);
 635        memset(cpt, 0, sizeof(*cpt));
 636        pci_dev = roc_cpt->pci_dev;
 637        dev = &cpt->dev;
 638
 639        /* Initialize device  */
 640        rc = dev_init(dev, pci_dev);
 641        if (rc) {
 642                plt_err("Failed to init roc device");
 643                goto fail;
 644        }
 645
 646        cpt->pci_dev = pci_dev;
 647        roc_cpt->lmt_base = dev->lmt_base;
 648
 649        rc = cpt_hardware_caps_get(dev, roc_cpt);
 650        if (rc) {
 651                plt_err("Could not determine hardware capabilities");
 652                goto fail;
 653        }
 654
 655        rc = cpt_available_lfs_get(&cpt->dev, &nb_lf_avail);
 656        if (rc) {
 657                plt_err("Could not get available lfs");
 658                goto fail;
 659        }
 660
 661        /* Reserve 1 CPT LF for inline inbound */
 662        nb_lf_avail = PLT_MIN(nb_lf_avail, (uint16_t)(ROC_CPT_MAX_LFS - 1));
 663
 664        roc_cpt->nb_lf_avail = nb_lf_avail;
 665
 666        dev->roc_cpt = roc_cpt;
 667
 668        /* Set it to idev if not already present */
 669        if (!roc_idev_cpt_get())
 670                roc_idev_cpt_set(roc_cpt);
 671
 672        return 0;
 673
 674fail:
 675        return rc;
 676}
 677
 678int
 679roc_cpt_lf_ctx_flush(struct roc_cpt_lf *lf, void *cptr, bool inval)
 680{
 681        union cpt_lf_ctx_flush reg;
 682
 683        if (lf == NULL) {
 684                plt_err("Could not trigger CTX flush");
 685                return -ENOTSUP;
 686        }
 687
 688        reg.u = 0;
 689        reg.s.inval = inval;
 690        reg.s.cptr = (uintptr_t)cptr >> 7;
 691
 692        plt_write64(reg.u, lf->rbase + CPT_LF_CTX_FLUSH);
 693
 694        return 0;
 695}
 696
 697int
 698roc_cpt_lf_ctx_reload(struct roc_cpt_lf *lf, void *cptr)
 699{
 700        union cpt_lf_ctx_reload reg;
 701
 702        if (lf == NULL) {
 703                plt_err("Could not trigger CTX reload");
 704                return -ENOTSUP;
 705        }
 706
 707        reg.u = 0;
 708        reg.s.cptr = (uintptr_t)cptr >> 7;
 709
 710        plt_write64(reg.u, lf->rbase + CPT_LF_CTX_RELOAD);
 711
 712        return 0;
 713}
 714
 715void
 716cpt_lf_fini(struct roc_cpt_lf *lf)
 717{
 718        /* Unregister IRQ's */
 719        cpt_lf_unregister_irqs(lf);
 720
 721        /* Disable IQ */
 722        roc_cpt_iq_disable(lf);
 723
 724        /* Free memory */
 725        plt_free(lf->iq_vaddr);
 726        lf->iq_vaddr = NULL;
 727}
 728
 729void
 730roc_cpt_lf_fini(struct roc_cpt_lf *lf)
 731{
 732        if (lf == NULL)
 733                return;
 734        lf->roc_cpt->lf[lf->lf_id] = NULL;
 735        cpt_lf_fini(lf);
 736}
 737
 738int
 739roc_cpt_dev_fini(struct roc_cpt *roc_cpt)
 740{
 741        struct cpt *cpt = roc_cpt_to_cpt_priv(roc_cpt);
 742
 743        if (cpt == NULL)
 744                return -EINVAL;
 745
 746        /* Remove idev references */
 747        if (roc_idev_cpt_get() == roc_cpt)
 748                roc_idev_cpt_set(NULL);
 749
 750        roc_cpt->nb_lf_avail = 0;
 751
 752        roc_cpt->lmt_base = 0;
 753
 754        return dev_fini(&cpt->dev, cpt->pci_dev);
 755}
 756
 757void
 758roc_cpt_dev_clear(struct roc_cpt *roc_cpt)
 759{
 760        struct cpt *cpt = roc_cpt_to_cpt_priv(roc_cpt);
 761        int i;
 762
 763        if (cpt == NULL)
 764                return;
 765
 766        for (i = 0; i < roc_cpt->nb_lf; i++)
 767                cpt->lf_msix_off[i] = 0;
 768
 769        roc_cpt->nb_lf = 0;
 770
 771        cpt_lfs_free(&cpt->dev);
 772
 773        cpt_lfs_detach(&cpt->dev);
 774}
 775
 776int
 777roc_cpt_eng_grp_add(struct roc_cpt *roc_cpt, enum cpt_eng_type eng_type)
 778{
 779        struct cpt *cpt = roc_cpt_to_cpt_priv(roc_cpt);
 780        struct dev *dev = &cpt->dev;
 781        struct cpt_eng_grp_req *req;
 782        struct cpt_eng_grp_rsp *rsp;
 783        int ret;
 784
 785        req = mbox_alloc_msg_cpt_eng_grp_get(dev->mbox);
 786        if (req == NULL)
 787                return -EIO;
 788
 789        switch (eng_type) {
 790        case CPT_ENG_TYPE_AE:
 791        case CPT_ENG_TYPE_SE:
 792        case CPT_ENG_TYPE_IE:
 793                break;
 794        default:
 795                return -EINVAL;
 796        }
 797
 798        req->eng_type = eng_type;
 799        ret = mbox_process_msg(dev->mbox, (void *)&rsp);
 800        if (ret)
 801                return -EIO;
 802
 803        if (rsp->eng_grp_num > 8) {
 804                plt_err("Invalid CPT engine group");
 805                return -ENOTSUP;
 806        }
 807
 808        roc_cpt->eng_grp[eng_type] = rsp->eng_grp_num;
 809
 810        return rsp->eng_grp_num;
 811}
 812
 813void
 814roc_cpt_iq_disable(struct roc_cpt_lf *lf)
 815{
 816        volatile union cpt_lf_q_grp_ptr grp_ptr = {.u = 0x0};
 817        volatile union cpt_lf_inprog lf_inprog = {.u = 0x0};
 818        union cpt_lf_ctl lf_ctl = {.u = 0x0};
 819        int timeout = 20;
 820        int cnt;
 821
 822        /* Disable instructions enqueuing */
 823        plt_write64(lf_ctl.u, lf->rbase + CPT_LF_CTL);
 824
 825        /* Wait for instruction queue to become empty */
 826        do {
 827                lf_inprog.u = plt_read64(lf->rbase + CPT_LF_INPROG);
 828                if (!lf_inprog.s.inflight)
 829                        break;
 830
 831                plt_delay_ms(20);
 832                if (timeout-- < 0) {
 833                        plt_err("CPT LF %d is still busy", lf->lf_id);
 834                        break;
 835                }
 836
 837        } while (1);
 838
 839        /* Disable executions in the LF's queue.
 840         * The queue should be empty at this point
 841         */
 842        lf_inprog.s.eena = 0x0;
 843        plt_write64(lf_inprog.u, lf->rbase + CPT_LF_INPROG);
 844
 845        /* Wait for instruction queue to become empty */
 846        cnt = 0;
 847        do {
 848                lf_inprog.u = plt_read64(lf->rbase + CPT_LF_INPROG);
 849                if (lf_inprog.s.grb_partial)
 850                        cnt = 0;
 851                else
 852                        cnt++;
 853                grp_ptr.u = plt_read64(lf->rbase + CPT_LF_Q_GRP_PTR);
 854        } while ((cnt < 10) && (grp_ptr.s.nq_ptr != grp_ptr.s.dq_ptr));
 855
 856        cnt = 0;
 857        do {
 858                lf_inprog.u = plt_read64(lf->rbase + CPT_LF_INPROG);
 859                if ((lf_inprog.s.inflight == 0) && (lf_inprog.s.gwb_cnt < 40) &&
 860                    ((lf_inprog.s.grb_cnt == 0) || (lf_inprog.s.grb_cnt == 40)))
 861                        cnt++;
 862                else
 863                        cnt = 0;
 864        } while (cnt < 10);
 865}
 866
 867void
 868roc_cpt_iq_enable(struct roc_cpt_lf *lf)
 869{
 870        union cpt_lf_inprog lf_inprog;
 871        union cpt_lf_ctl lf_ctl;
 872
 873        /* Disable command queue */
 874        roc_cpt_iq_disable(lf);
 875
 876        /* Enable instruction queue enqueuing */
 877        lf_ctl.u = plt_read64(lf->rbase + CPT_LF_CTL);
 878        lf_ctl.s.ena = 1;
 879        lf_ctl.s.fc_ena = 1;
 880        lf_ctl.s.fc_up_crossing = 0;
 881        lf_ctl.s.fc_hyst_bits = plt_log2_u32(CPT_LF_FC_MIN_THRESHOLD);
 882        plt_write64(lf_ctl.u, lf->rbase + CPT_LF_CTL);
 883
 884        /* Enable command queue execution */
 885        lf_inprog.u = plt_read64(lf->rbase + CPT_LF_INPROG);
 886        lf_inprog.s.eena = 1;
 887        plt_write64(lf_inprog.u, lf->rbase + CPT_LF_INPROG);
 888
 889        cpt_lf_dump(lf);
 890}
 891
 892int
 893roc_cpt_lmtline_init(struct roc_cpt *roc_cpt, struct roc_cpt_lmtline *lmtline,
 894                     int lf_id)
 895{
 896        struct roc_cpt_lf *lf;
 897
 898        lf = roc_cpt->lf[lf_id];
 899        if (lf == NULL)
 900                return -ENOTSUP;
 901
 902        lmtline->io_addr = lf->io_addr;
 903        if (roc_model_is_cn10k())
 904                lmtline->io_addr |= ROC_CN10K_CPT_INST_DW_M1 << 4;
 905
 906        lmtline->fc_addr = lf->fc_addr;
 907        lmtline->lmt_base = lf->lmt_base;
 908        lmtline->fc_thresh = lf->nb_desc - CPT_LF_FC_MIN_THRESHOLD;
 909
 910        return 0;
 911}
 912
 913int
 914roc_cpt_ctx_write(struct roc_cpt_lf *lf, void *sa_dptr, void *sa_cptr,
 915                  uint16_t sa_len)
 916{
 917        uintptr_t lmt_base = lf->lmt_base;
 918        union cpt_res_s res, *hw_res;
 919        uint64_t lmt_arg, io_addr;
 920        struct cpt_inst_s *inst;
 921        uint16_t lmt_id;
 922        uint64_t *dptr;
 923        int i;
 924
 925        ROC_LMT_CPT_BASE_ID_GET(lmt_base, lmt_id);
 926        inst = (struct cpt_inst_s *)lmt_base;
 927
 928        memset(inst, 0, sizeof(struct cpt_inst_s));
 929
 930        hw_res = plt_zmalloc(sizeof(*hw_res), ROC_CPT_RES_ALIGN);
 931        if (hw_res == NULL) {
 932                plt_err("Couldn't allocate memory for result address");
 933                return -ENOMEM;
 934        }
 935
 936        dptr = plt_zmalloc(sa_len, 8);
 937        if (dptr == NULL) {
 938                plt_err("Couldn't allocate memory for SA dptr");
 939                plt_free(hw_res);
 940                return -ENOMEM;
 941        }
 942
 943        for (i = 0; i < (sa_len / 8); i++)
 944                dptr[i] = plt_cpu_to_be_64(((uint64_t *)sa_dptr)[i]);
 945
 946        /* Fill CPT_INST_S for WRITE_SA microcode op */
 947        hw_res->cn10k.compcode = CPT_COMP_NOT_DONE;
 948        inst->res_addr = (uint64_t)hw_res;
 949        inst->dptr = (uint64_t)dptr;
 950        inst->w4.s.param2 = sa_len >> 3;
 951        inst->w4.s.dlen = sa_len;
 952        inst->w4.s.opcode_major = ROC_IE_OT_MAJOR_OP_WRITE_SA;
 953        inst->w4.s.opcode_minor = ROC_IE_OT_MINOR_OP_WRITE_SA;
 954        inst->w7.s.cptr = (uint64_t)sa_cptr;
 955        inst->w7.s.ctx_val = 1;
 956        inst->w7.s.egrp = ROC_CPT_DFLT_ENG_GRP_SE_IE;
 957
 958        lmt_arg = ROC_CN10K_CPT_LMT_ARG | (uint64_t)lmt_id;
 959        io_addr = lf->io_addr | ROC_CN10K_CPT_INST_DW_M1 << 4;
 960
 961        roc_lmt_submit_steorl(lmt_arg, io_addr);
 962        plt_io_wmb();
 963
 964        /* Use 1 min timeout for the poll */
 965        const uint64_t timeout = plt_tsc_cycles() + 60 * plt_tsc_hz();
 966
 967        /* Wait until CPT instruction completes */
 968        do {
 969                res.u64[0] = __atomic_load_n(&hw_res->u64[0], __ATOMIC_RELAXED);
 970                if (unlikely(plt_tsc_cycles() > timeout))
 971                        break;
 972        } while (res.cn10k.compcode == CPT_COMP_NOT_DONE);
 973
 974        plt_free(dptr);
 975        plt_free(hw_res);
 976
 977        if (res.cn10k.compcode != CPT_COMP_WARN) {
 978                plt_err("Write SA operation timed out");
 979                return -ETIMEDOUT;
 980        }
 981
 982        return 0;
 983}
 984
 985int
 986roc_on_cpt_ctx_write(struct roc_cpt_lf *lf, void *sa, uint8_t opcode,
 987                     uint16_t ctx_len, uint8_t egrp)
 988{
 989        union cpt_res_s res, *hw_res;
 990        struct cpt_inst_s inst;
 991        uint64_t lmt_status;
 992        int ret = 0;
 993
 994        hw_res = plt_zmalloc(sizeof(*hw_res), ROC_CPT_RES_ALIGN);
 995        if (unlikely(hw_res == NULL)) {
 996                plt_err("Couldn't allocate memory for result address");
 997                return -ENOMEM;
 998        }
 999
1000        hw_res->cn9k.compcode = CPT_COMP_NOT_DONE;
1001
1002        inst.w4.s.opcode_major = opcode;
1003        inst.w4.s.opcode_minor = ctx_len >> 3;
1004        inst.w4.s.param1 = 0;
1005        inst.w4.s.param2 = 0;
1006        inst.w4.s.dlen = ctx_len;
1007        inst.dptr = rte_mempool_virt2iova(sa);
1008        inst.rptr = 0;
1009        inst.w7.s.cptr = rte_mempool_virt2iova(sa);
1010        inst.w7.s.egrp = egrp;
1011
1012        inst.w0.u64 = 0;
1013        inst.w2.u64 = 0;
1014        inst.w3.u64 = 0;
1015        inst.res_addr = (uintptr_t)hw_res;
1016
1017        rte_io_wmb();
1018
1019        do {
1020                /* Copy CPT command to LMTLINE */
1021                roc_lmt_mov64((void *)lf->lmt_base, &inst);
1022                lmt_status = roc_lmt_submit_ldeor(lf->io_addr);
1023        } while (lmt_status == 0);
1024
1025        const uint64_t timeout = plt_tsc_cycles() + 60 * plt_tsc_hz();
1026
1027        /* Wait until CPT instruction completes */
1028        do {
1029                res.u64[0] = __atomic_load_n(&hw_res->u64[0], __ATOMIC_RELAXED);
1030                if (unlikely(plt_tsc_cycles() > timeout)) {
1031                        plt_err("Request timed out");
1032                        ret = -ETIMEDOUT;
1033                        goto free;
1034                }
1035        } while (res.cn9k.compcode == CPT_COMP_NOT_DONE);
1036
1037        if (unlikely(res.cn9k.compcode != CPT_COMP_GOOD)) {
1038                ret = res.cn9k.compcode;
1039                switch (ret) {
1040                case CPT_COMP_INSTERR:
1041                        plt_err("Request failed with instruction error");
1042                        break;
1043                case CPT_COMP_FAULT:
1044                        plt_err("Request failed with DMA fault");
1045                        break;
1046                case CPT_COMP_HWERR:
1047                        plt_err("Request failed with hardware error");
1048                        break;
1049                default:
1050                        plt_err("Request failed with unknown hardware completion code : 0x%x",
1051                                ret);
1052                }
1053                ret = -EINVAL;
1054                goto free;
1055        }
1056
1057        if (unlikely(res.cn9k.uc_compcode != ROC_IE_ON_UCC_SUCCESS)) {
1058                ret = res.cn9k.uc_compcode;
1059                switch (ret) {
1060                case ROC_IE_ON_AUTH_UNSUPPORTED:
1061                        plt_err("Invalid auth type");
1062                        break;
1063                case ROC_IE_ON_ENCRYPT_UNSUPPORTED:
1064                        plt_err("Invalid encrypt type");
1065                        break;
1066                default:
1067                        plt_err("Request failed with unknown microcode completion code : 0x%x",
1068                                ret);
1069                }
1070                ret = -ENOTSUP;
1071        }
1072
1073free:
1074        plt_free(hw_res);
1075        return ret;
1076}
1077