linux/drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/* Marvell OcteonTx2 RVU Admin Function driver
   3 *
   4 * Copyright (C) 2018 Marvell International Ltd.
   5 *
   6 * This program is free software; you can redistribute it and/or modify
   7 * it under the terms of the GNU General Public License version 2 as
   8 * published by the Free Software Foundation.
   9 */
  10
  11#include <linux/module.h>
  12#include <linux/pci.h>
  13
  14#include "rvu_struct.h"
  15#include "rvu_reg.h"
  16#include "rvu.h"
  17
  18static int npa_aq_enqueue_wait(struct rvu *rvu, struct rvu_block *block,
  19                               struct npa_aq_inst_s *inst)
  20{
  21        struct admin_queue *aq = block->aq;
  22        struct npa_aq_res_s *result;
  23        int timeout = 1000;
  24        u64 reg, head;
  25
  26        result = (struct npa_aq_res_s *)aq->res->base;
  27
  28        /* Get current head pointer where to append this instruction */
  29        reg = rvu_read64(rvu, block->addr, NPA_AF_AQ_STATUS);
  30        head = (reg >> 4) & AQ_PTR_MASK;
  31
  32        memcpy((void *)(aq->inst->base + (head * aq->inst->entry_sz)),
  33               (void *)inst, aq->inst->entry_sz);
  34        memset(result, 0, sizeof(*result));
  35        /* sync into memory */
  36        wmb();
  37
  38        /* Ring the doorbell and wait for result */
  39        rvu_write64(rvu, block->addr, NPA_AF_AQ_DOOR, 1);
  40        while (result->compcode == NPA_AQ_COMP_NOTDONE) {
  41                cpu_relax();
  42                udelay(1);
  43                timeout--;
  44                if (!timeout)
  45                        return -EBUSY;
  46        }
  47
  48        if (result->compcode != NPA_AQ_COMP_GOOD)
  49                /* TODO: Replace this with some error code */
  50                return -EBUSY;
  51
  52        return 0;
  53}
  54
  55int rvu_npa_aq_enq_inst(struct rvu *rvu, struct npa_aq_enq_req *req,
  56                        struct npa_aq_enq_rsp *rsp)
  57{
  58        struct rvu_hwinfo *hw = rvu->hw;
  59        u16 pcifunc = req->hdr.pcifunc;
  60        int blkaddr, npalf, rc = 0;
  61        struct npa_aq_inst_s inst;
  62        struct rvu_block *block;
  63        struct admin_queue *aq;
  64        struct rvu_pfvf *pfvf;
  65        void *ctx, *mask;
  66        bool ena;
  67
  68        pfvf = rvu_get_pfvf(rvu, pcifunc);
  69        if (!pfvf->aura_ctx || req->aura_id >= pfvf->aura_ctx->qsize)
  70                return NPA_AF_ERR_AQ_ENQUEUE;
  71
  72        blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, pcifunc);
  73        if (!pfvf->npalf || blkaddr < 0)
  74                return NPA_AF_ERR_AF_LF_INVALID;
  75
  76        block = &hw->block[blkaddr];
  77        aq = block->aq;
  78        if (!aq) {
  79                dev_warn(rvu->dev, "%s: NPA AQ not initialized\n", __func__);
  80                return NPA_AF_ERR_AQ_ENQUEUE;
  81        }
  82
  83        npalf = rvu_get_lf(rvu, block, pcifunc, 0);
  84        if (npalf < 0)
  85                return NPA_AF_ERR_AF_LF_INVALID;
  86
  87        memset(&inst, 0, sizeof(struct npa_aq_inst_s));
  88        inst.cindex = req->aura_id;
  89        inst.lf = npalf;
  90        inst.ctype = req->ctype;
  91        inst.op = req->op;
  92        /* Currently we are not supporting enqueuing multiple instructions,
  93         * so always choose first entry in result memory.
  94         */
  95        inst.res_addr = (u64)aq->res->iova;
  96
  97        /* Hardware uses same aq->res->base for updating result of
  98         * previous instruction hence wait here till it is done.
  99         */
 100        spin_lock(&aq->lock);
 101
 102        /* Clean result + context memory */
 103        memset(aq->res->base, 0, aq->res->entry_sz);
 104        /* Context needs to be written at RES_ADDR + 128 */
 105        ctx = aq->res->base + 128;
 106        /* Mask needs to be written at RES_ADDR + 256 */
 107        mask = aq->res->base + 256;
 108
 109        switch (req->op) {
 110        case NPA_AQ_INSTOP_WRITE:
 111                /* Copy context and write mask */
 112                if (req->ctype == NPA_AQ_CTYPE_AURA) {
 113                        memcpy(mask, &req->aura_mask,
 114                               sizeof(struct npa_aura_s));
 115                        memcpy(ctx, &req->aura, sizeof(struct npa_aura_s));
 116                } else {
 117                        memcpy(mask, &req->pool_mask,
 118                               sizeof(struct npa_pool_s));
 119                        memcpy(ctx, &req->pool, sizeof(struct npa_pool_s));
 120                }
 121                break;
 122        case NPA_AQ_INSTOP_INIT:
 123                if (req->ctype == NPA_AQ_CTYPE_AURA) {
 124                        if (req->aura.pool_addr >= pfvf->pool_ctx->qsize) {
 125                                rc = NPA_AF_ERR_AQ_FULL;
 126                                break;
 127                        }
 128                        /* Set pool's context address */
 129                        req->aura.pool_addr = pfvf->pool_ctx->iova +
 130                        (req->aura.pool_addr * pfvf->pool_ctx->entry_sz);
 131                        memcpy(ctx, &req->aura, sizeof(struct npa_aura_s));
 132                } else { /* POOL's context */
 133                        memcpy(ctx, &req->pool, sizeof(struct npa_pool_s));
 134                }
 135                break;
 136        case NPA_AQ_INSTOP_NOP:
 137        case NPA_AQ_INSTOP_READ:
 138        case NPA_AQ_INSTOP_LOCK:
 139        case NPA_AQ_INSTOP_UNLOCK:
 140                break;
 141        default:
 142                rc = NPA_AF_ERR_AQ_FULL;
 143                break;
 144        }
 145
 146        if (rc) {
 147                spin_unlock(&aq->lock);
 148                return rc;
 149        }
 150
 151        /* Submit the instruction to AQ */
 152        rc = npa_aq_enqueue_wait(rvu, block, &inst);
 153        if (rc) {
 154                spin_unlock(&aq->lock);
 155                return rc;
 156        }
 157
 158        /* Set aura bitmap if aura hw context is enabled */
 159        if (req->ctype == NPA_AQ_CTYPE_AURA) {
 160                if (req->op == NPA_AQ_INSTOP_INIT && req->aura.ena)
 161                        __set_bit(req->aura_id, pfvf->aura_bmap);
 162                if (req->op == NPA_AQ_INSTOP_WRITE) {
 163                        ena = (req->aura.ena & req->aura_mask.ena) |
 164                                (test_bit(req->aura_id, pfvf->aura_bmap) &
 165                                ~req->aura_mask.ena);
 166                        if (ena)
 167                                __set_bit(req->aura_id, pfvf->aura_bmap);
 168                        else
 169                                __clear_bit(req->aura_id, pfvf->aura_bmap);
 170                }
 171        }
 172
 173        /* Set pool bitmap if pool hw context is enabled */
 174        if (req->ctype == NPA_AQ_CTYPE_POOL) {
 175                if (req->op == NPA_AQ_INSTOP_INIT && req->pool.ena)
 176                        __set_bit(req->aura_id, pfvf->pool_bmap);
 177                if (req->op == NPA_AQ_INSTOP_WRITE) {
 178                        ena = (req->pool.ena & req->pool_mask.ena) |
 179                                (test_bit(req->aura_id, pfvf->pool_bmap) &
 180                                ~req->pool_mask.ena);
 181                        if (ena)
 182                                __set_bit(req->aura_id, pfvf->pool_bmap);
 183                        else
 184                                __clear_bit(req->aura_id, pfvf->pool_bmap);
 185                }
 186        }
 187        spin_unlock(&aq->lock);
 188
 189        if (rsp) {
 190                /* Copy read context into mailbox */
 191                if (req->op == NPA_AQ_INSTOP_READ) {
 192                        if (req->ctype == NPA_AQ_CTYPE_AURA)
 193                                memcpy(&rsp->aura, ctx,
 194                                       sizeof(struct npa_aura_s));
 195                        else
 196                                memcpy(&rsp->pool, ctx,
 197                                       sizeof(struct npa_pool_s));
 198                }
 199        }
 200
 201        return 0;
 202}
 203
 204static int npa_lf_hwctx_disable(struct rvu *rvu, struct hwctx_disable_req *req)
 205{
 206        struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc);
 207        struct npa_aq_enq_req aq_req;
 208        unsigned long *bmap;
 209        int id, cnt = 0;
 210        int err = 0, rc;
 211
 212        if (!pfvf->pool_ctx || !pfvf->aura_ctx)
 213                return NPA_AF_ERR_AQ_ENQUEUE;
 214
 215        memset(&aq_req, 0, sizeof(struct npa_aq_enq_req));
 216        aq_req.hdr.pcifunc = req->hdr.pcifunc;
 217
 218        if (req->ctype == NPA_AQ_CTYPE_POOL) {
 219                aq_req.pool.ena = 0;
 220                aq_req.pool_mask.ena = 1;
 221                cnt = pfvf->pool_ctx->qsize;
 222                bmap = pfvf->pool_bmap;
 223        } else if (req->ctype == NPA_AQ_CTYPE_AURA) {
 224                aq_req.aura.ena = 0;
 225                aq_req.aura_mask.ena = 1;
 226                aq_req.aura.bp_ena = 0;
 227                aq_req.aura_mask.bp_ena = 1;
 228                cnt = pfvf->aura_ctx->qsize;
 229                bmap = pfvf->aura_bmap;
 230        }
 231
 232        aq_req.ctype = req->ctype;
 233        aq_req.op = NPA_AQ_INSTOP_WRITE;
 234
 235        for (id = 0; id < cnt; id++) {
 236                if (!test_bit(id, bmap))
 237                        continue;
 238                aq_req.aura_id = id;
 239                rc = rvu_npa_aq_enq_inst(rvu, &aq_req, NULL);
 240                if (rc) {
 241                        err = rc;
 242                        dev_err(rvu->dev, "Failed to disable %s:%d context\n",
 243                                (req->ctype == NPA_AQ_CTYPE_AURA) ?
 244                                "Aura" : "Pool", id);
 245                }
 246        }
 247
 248        return err;
 249}
 250
 251#ifdef CONFIG_NDC_DIS_DYNAMIC_CACHING
 252static int npa_lf_hwctx_lockdown(struct rvu *rvu, struct npa_aq_enq_req *req)
 253{
 254        struct npa_aq_enq_req lock_ctx_req;
 255        int err;
 256
 257        if (req->op != NPA_AQ_INSTOP_INIT)
 258                return 0;
 259
 260        memset(&lock_ctx_req, 0, sizeof(struct npa_aq_enq_req));
 261        lock_ctx_req.hdr.pcifunc = req->hdr.pcifunc;
 262        lock_ctx_req.ctype = req->ctype;
 263        lock_ctx_req.op = NPA_AQ_INSTOP_LOCK;
 264        lock_ctx_req.aura_id = req->aura_id;
 265        err = rvu_npa_aq_enq_inst(rvu, &lock_ctx_req, NULL);
 266        if (err)
 267                dev_err(rvu->dev,
 268                        "PFUNC 0x%x: Failed to lock NPA context %s:%d\n",
 269                        req->hdr.pcifunc,
 270                        (req->ctype == NPA_AQ_CTYPE_AURA) ?
 271                        "Aura" : "Pool", req->aura_id);
 272        return err;
 273}
 274
 275int rvu_mbox_handler_npa_aq_enq(struct rvu *rvu,
 276                                struct npa_aq_enq_req *req,
 277                                struct npa_aq_enq_rsp *rsp)
 278{
 279        int err;
 280
 281        err = rvu_npa_aq_enq_inst(rvu, req, rsp);
 282        if (!err)
 283                err = npa_lf_hwctx_lockdown(rvu, req);
 284        return err;
 285}
 286#else
 287
 288int rvu_mbox_handler_npa_aq_enq(struct rvu *rvu,
 289                                struct npa_aq_enq_req *req,
 290                                struct npa_aq_enq_rsp *rsp)
 291{
 292        return rvu_npa_aq_enq_inst(rvu, req, rsp);
 293}
 294#endif
 295
 296int rvu_mbox_handler_npa_hwctx_disable(struct rvu *rvu,
 297                                       struct hwctx_disable_req *req,
 298                                       struct msg_rsp *rsp)
 299{
 300        return npa_lf_hwctx_disable(rvu, req);
 301}
 302
 303static void npa_ctx_free(struct rvu *rvu, struct rvu_pfvf *pfvf)
 304{
 305        kfree(pfvf->aura_bmap);
 306        pfvf->aura_bmap = NULL;
 307
 308        qmem_free(rvu->dev, pfvf->aura_ctx);
 309        pfvf->aura_ctx = NULL;
 310
 311        kfree(pfvf->pool_bmap);
 312        pfvf->pool_bmap = NULL;
 313
 314        qmem_free(rvu->dev, pfvf->pool_ctx);
 315        pfvf->pool_ctx = NULL;
 316
 317        qmem_free(rvu->dev, pfvf->npa_qints_ctx);
 318        pfvf->npa_qints_ctx = NULL;
 319}
 320
 321int rvu_mbox_handler_npa_lf_alloc(struct rvu *rvu,
 322                                  struct npa_lf_alloc_req *req,
 323                                  struct npa_lf_alloc_rsp *rsp)
 324{
 325        int npalf, qints, hwctx_size, err, rc = 0;
 326        struct rvu_hwinfo *hw = rvu->hw;
 327        u16 pcifunc = req->hdr.pcifunc;
 328        struct rvu_block *block;
 329        struct rvu_pfvf *pfvf;
 330        u64 cfg, ctx_cfg;
 331        int blkaddr;
 332
 333        if (req->aura_sz > NPA_AURA_SZ_MAX ||
 334            req->aura_sz == NPA_AURA_SZ_0 || !req->nr_pools)
 335                return NPA_AF_ERR_PARAM;
 336
 337        if (req->way_mask)
 338                req->way_mask &= 0xFFFF;
 339
 340        pfvf = rvu_get_pfvf(rvu, pcifunc);
 341        blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, pcifunc);
 342        if (!pfvf->npalf || blkaddr < 0)
 343                return NPA_AF_ERR_AF_LF_INVALID;
 344
 345        block = &hw->block[blkaddr];
 346        npalf = rvu_get_lf(rvu, block, pcifunc, 0);
 347        if (npalf < 0)
 348                return NPA_AF_ERR_AF_LF_INVALID;
 349
 350        /* Reset this NPA LF */
 351        err = rvu_lf_reset(rvu, block, npalf);
 352        if (err) {
 353                dev_err(rvu->dev, "Failed to reset NPALF%d\n", npalf);
 354                return NPA_AF_ERR_LF_RESET;
 355        }
 356
 357        ctx_cfg = rvu_read64(rvu, blkaddr, NPA_AF_CONST1);
 358
 359        /* Alloc memory for aura HW contexts */
 360        hwctx_size = 1UL << (ctx_cfg & 0xF);
 361        err = qmem_alloc(rvu->dev, &pfvf->aura_ctx,
 362                         NPA_AURA_COUNT(req->aura_sz), hwctx_size);
 363        if (err)
 364                goto free_mem;
 365
 366        pfvf->aura_bmap = kcalloc(NPA_AURA_COUNT(req->aura_sz), sizeof(long),
 367                                  GFP_KERNEL);
 368        if (!pfvf->aura_bmap)
 369                goto free_mem;
 370
 371        /* Alloc memory for pool HW contexts */
 372        hwctx_size = 1UL << ((ctx_cfg >> 4) & 0xF);
 373        err = qmem_alloc(rvu->dev, &pfvf->pool_ctx, req->nr_pools, hwctx_size);
 374        if (err)
 375                goto free_mem;
 376
 377        pfvf->pool_bmap = kcalloc(NPA_AURA_COUNT(req->aura_sz), sizeof(long),
 378                                  GFP_KERNEL);
 379        if (!pfvf->pool_bmap)
 380                goto free_mem;
 381
 382        /* Get no of queue interrupts supported */
 383        cfg = rvu_read64(rvu, blkaddr, NPA_AF_CONST);
 384        qints = (cfg >> 28) & 0xFFF;
 385
 386        /* Alloc memory for Qints HW contexts */
 387        hwctx_size = 1UL << ((ctx_cfg >> 8) & 0xF);
 388        err = qmem_alloc(rvu->dev, &pfvf->npa_qints_ctx, qints, hwctx_size);
 389        if (err)
 390                goto free_mem;
 391
 392        cfg = rvu_read64(rvu, blkaddr, NPA_AF_LFX_AURAS_CFG(npalf));
 393        /* Clear way partition mask and set aura offset to '0' */
 394        cfg &= ~(BIT_ULL(34) - 1);
 395        /* Set aura size & enable caching of contexts */
 396        cfg |= (req->aura_sz << 16) | BIT_ULL(34) | req->way_mask;
 397
 398        rvu_write64(rvu, blkaddr, NPA_AF_LFX_AURAS_CFG(npalf), cfg);
 399
 400        /* Configure aura HW context's base */
 401        rvu_write64(rvu, blkaddr, NPA_AF_LFX_LOC_AURAS_BASE(npalf),
 402                    (u64)pfvf->aura_ctx->iova);
 403
 404        /* Enable caching of qints hw context */
 405        rvu_write64(rvu, blkaddr, NPA_AF_LFX_QINTS_CFG(npalf),
 406                    BIT_ULL(36) | req->way_mask << 20);
 407        rvu_write64(rvu, blkaddr, NPA_AF_LFX_QINTS_BASE(npalf),
 408                    (u64)pfvf->npa_qints_ctx->iova);
 409
 410        goto exit;
 411
 412free_mem:
 413        npa_ctx_free(rvu, pfvf);
 414        rc = -ENOMEM;
 415
 416exit:
 417        /* set stack page info */
 418        cfg = rvu_read64(rvu, blkaddr, NPA_AF_CONST);
 419        rsp->stack_pg_ptrs = (cfg >> 8) & 0xFF;
 420        rsp->stack_pg_bytes = cfg & 0xFF;
 421        rsp->qints = (cfg >> 28) & 0xFFF;
 422        return rc;
 423}
 424
 425int rvu_mbox_handler_npa_lf_free(struct rvu *rvu, struct msg_req *req,
 426                                 struct msg_rsp *rsp)
 427{
 428        struct rvu_hwinfo *hw = rvu->hw;
 429        u16 pcifunc = req->hdr.pcifunc;
 430        struct rvu_block *block;
 431        struct rvu_pfvf *pfvf;
 432        int npalf, err;
 433        int blkaddr;
 434
 435        pfvf = rvu_get_pfvf(rvu, pcifunc);
 436        blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, pcifunc);
 437        if (!pfvf->npalf || blkaddr < 0)
 438                return NPA_AF_ERR_AF_LF_INVALID;
 439
 440        block = &hw->block[blkaddr];
 441        npalf = rvu_get_lf(rvu, block, pcifunc, 0);
 442        if (npalf < 0)
 443                return NPA_AF_ERR_AF_LF_INVALID;
 444
 445        /* Reset this NPA LF */
 446        err = rvu_lf_reset(rvu, block, npalf);
 447        if (err) {
 448                dev_err(rvu->dev, "Failed to reset NPALF%d\n", npalf);
 449                return NPA_AF_ERR_LF_RESET;
 450        }
 451
 452        npa_ctx_free(rvu, pfvf);
 453
 454        return 0;
 455}
 456
 457static int npa_aq_init(struct rvu *rvu, struct rvu_block *block)
 458{
 459        u64 cfg;
 460        int err;
 461
 462        /* Set admin queue endianness */
 463        cfg = rvu_read64(rvu, block->addr, NPA_AF_GEN_CFG);
 464#ifdef __BIG_ENDIAN
 465        cfg |= BIT_ULL(1);
 466        rvu_write64(rvu, block->addr, NPA_AF_GEN_CFG, cfg);
 467#else
 468        cfg &= ~BIT_ULL(1);
 469        rvu_write64(rvu, block->addr, NPA_AF_GEN_CFG, cfg);
 470#endif
 471
 472        /* Do not bypass NDC cache */
 473        cfg = rvu_read64(rvu, block->addr, NPA_AF_NDC_CFG);
 474        cfg &= ~0x03DULL;
 475#ifdef CONFIG_NDC_DIS_DYNAMIC_CACHING
 476        /* Disable caching of stack pages */
 477        cfg |= 0x10ULL;
 478#endif
 479        rvu_write64(rvu, block->addr, NPA_AF_NDC_CFG, cfg);
 480
 481        /* Result structure can be followed by Aura/Pool context at
 482         * RES + 128bytes and a write mask at RES + 256 bytes, depending on
 483         * operation type. Alloc sufficient result memory for all operations.
 484         */
 485        err = rvu_aq_alloc(rvu, &block->aq,
 486                           Q_COUNT(AQ_SIZE), sizeof(struct npa_aq_inst_s),
 487                           ALIGN(sizeof(struct npa_aq_res_s), 128) + 256);
 488        if (err)
 489                return err;
 490
 491        rvu_write64(rvu, block->addr, NPA_AF_AQ_CFG, AQ_SIZE);
 492        rvu_write64(rvu, block->addr,
 493                    NPA_AF_AQ_BASE, (u64)block->aq->inst->iova);
 494        return 0;
 495}
 496
 497int rvu_npa_init(struct rvu *rvu)
 498{
 499        struct rvu_hwinfo *hw = rvu->hw;
 500        int blkaddr, err;
 501
 502        blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, 0);
 503        if (blkaddr < 0)
 504                return 0;
 505
 506        /* Initialize admin queue */
 507        err = npa_aq_init(rvu, &hw->block[blkaddr]);
 508        if (err)
 509                return err;
 510
 511        return 0;
 512}
 513
 514void rvu_npa_freemem(struct rvu *rvu)
 515{
 516        struct rvu_hwinfo *hw = rvu->hw;
 517        struct rvu_block *block;
 518        int blkaddr;
 519
 520        blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, 0);
 521        if (blkaddr < 0)
 522                return;
 523
 524        block = &hw->block[blkaddr];
 525        rvu_aq_free(rvu, block->aq);
 526}
 527
 528void rvu_npa_lf_teardown(struct rvu *rvu, u16 pcifunc, int npalf)
 529{
 530        struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
 531        struct hwctx_disable_req ctx_req;
 532
 533        /* Disable all pools */
 534        ctx_req.hdr.pcifunc = pcifunc;
 535        ctx_req.ctype = NPA_AQ_CTYPE_POOL;
 536        npa_lf_hwctx_disable(rvu, &ctx_req);
 537
 538        /* Disable all auras */
 539        ctx_req.ctype = NPA_AQ_CTYPE_AURA;
 540        npa_lf_hwctx_disable(rvu, &ctx_req);
 541
 542        npa_ctx_free(rvu, pfvf);
 543}
 544