linux/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/* Marvell OcteonTx2 RVU Admin Function driver
   3 *
   4 * Copyright (C) 2018 Marvell International Ltd.
   5 *
   6 * This program is free software; you can redistribute it and/or modify
   7 * it under the terms of the GNU General Public License version 2 as
   8 * published by the Free Software Foundation.
   9 */
  10
  11#include <linux/module.h>
  12#include <linux/interrupt.h>
  13#include <linux/delay.h>
  14#include <linux/irq.h>
  15#include <linux/pci.h>
  16#include <linux/sysfs.h>
  17
  18#include "cgx.h"
  19#include "rvu.h"
  20#include "rvu_reg.h"
  21
  22#define DRV_NAME        "octeontx2-af"
  23#define DRV_STRING      "Marvell OcteonTX2 RVU Admin Function Driver"
  24
  25static int rvu_get_hwvf(struct rvu *rvu, int pcifunc);
  26
  27static void rvu_set_msix_offset(struct rvu *rvu, struct rvu_pfvf *pfvf,
  28                                struct rvu_block *block, int lf);
  29static void rvu_clear_msix_offset(struct rvu *rvu, struct rvu_pfvf *pfvf,
  30                                  struct rvu_block *block, int lf);
  31static void __rvu_flr_handler(struct rvu *rvu, u16 pcifunc);
  32
  33static int rvu_mbox_init(struct rvu *rvu, struct mbox_wq_info *mw,
  34                         int type, int num,
  35                         void (mbox_handler)(struct work_struct *),
  36                         void (mbox_up_handler)(struct work_struct *));
  37enum {
  38        TYPE_AFVF,
  39        TYPE_AFPF,
  40};
  41
  42/* Supported devices */
  43static const struct pci_device_id rvu_id_table[] = {
  44        { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_RVU_AF) },
  45        { 0, }  /* end of table */
  46};
  47
  48MODULE_AUTHOR("Sunil Goutham <sgoutham@marvell.com>");
  49MODULE_DESCRIPTION(DRV_STRING);
  50MODULE_LICENSE("GPL v2");
  51MODULE_DEVICE_TABLE(pci, rvu_id_table);
  52
  53static char *mkex_profile; /* MKEX profile name */
  54module_param(mkex_profile, charp, 0000);
  55MODULE_PARM_DESC(mkex_profile, "MKEX profile name string");
  56
  57static void rvu_setup_hw_capabilities(struct rvu *rvu)
  58{
  59        struct rvu_hwinfo *hw = rvu->hw;
  60
  61        hw->cap.nix_tx_aggr_lvl = NIX_TXSCH_LVL_TL1;
  62        hw->cap.nix_fixed_txschq_mapping = false;
  63        hw->cap.nix_shaping = true;
  64        hw->cap.nix_tx_link_bp = true;
  65        hw->cap.nix_rx_multicast = true;
  66
  67        if (is_rvu_96xx_B0(rvu)) {
  68                hw->cap.nix_fixed_txschq_mapping = true;
  69                hw->cap.nix_txsch_per_cgx_lmac = 4;
  70                hw->cap.nix_txsch_per_lbk_lmac = 132;
  71                hw->cap.nix_txsch_per_sdp_lmac = 76;
  72                hw->cap.nix_shaping = false;
  73                hw->cap.nix_tx_link_bp = false;
  74                if (is_rvu_96xx_A0(rvu))
  75                        hw->cap.nix_rx_multicast = false;
  76        }
  77}
  78
  79/* Poll a RVU block's register 'offset', for a 'zero'
  80 * or 'nonzero' at bits specified by 'mask'
  81 */
  82int rvu_poll_reg(struct rvu *rvu, u64 block, u64 offset, u64 mask, bool zero)
  83{
  84        unsigned long timeout = jiffies + usecs_to_jiffies(10000);
  85        void __iomem *reg;
  86        u64 reg_val;
  87
  88        reg = rvu->afreg_base + ((block << 28) | offset);
  89again:
  90        reg_val = readq(reg);
  91        if (zero && !(reg_val & mask))
  92                return 0;
  93        if (!zero && (reg_val & mask))
  94                return 0;
  95        if (time_before(jiffies, timeout)) {
  96                usleep_range(1, 5);
  97                goto again;
  98        }
  99        return -EBUSY;
 100}
 101
 102int rvu_alloc_rsrc(struct rsrc_bmap *rsrc)
 103{
 104        int id;
 105
 106        if (!rsrc->bmap)
 107                return -EINVAL;
 108
 109        id = find_first_zero_bit(rsrc->bmap, rsrc->max);
 110        if (id >= rsrc->max)
 111                return -ENOSPC;
 112
 113        __set_bit(id, rsrc->bmap);
 114
 115        return id;
 116}
 117
 118int rvu_alloc_rsrc_contig(struct rsrc_bmap *rsrc, int nrsrc)
 119{
 120        int start;
 121
 122        if (!rsrc->bmap)
 123                return -EINVAL;
 124
 125        start = bitmap_find_next_zero_area(rsrc->bmap, rsrc->max, 0, nrsrc, 0);
 126        if (start >= rsrc->max)
 127                return -ENOSPC;
 128
 129        bitmap_set(rsrc->bmap, start, nrsrc);
 130        return start;
 131}
 132
 133static void rvu_free_rsrc_contig(struct rsrc_bmap *rsrc, int nrsrc, int start)
 134{
 135        if (!rsrc->bmap)
 136                return;
 137        if (start >= rsrc->max)
 138                return;
 139
 140        bitmap_clear(rsrc->bmap, start, nrsrc);
 141}
 142
 143bool rvu_rsrc_check_contig(struct rsrc_bmap *rsrc, int nrsrc)
 144{
 145        int start;
 146
 147        if (!rsrc->bmap)
 148                return false;
 149
 150        start = bitmap_find_next_zero_area(rsrc->bmap, rsrc->max, 0, nrsrc, 0);
 151        if (start >= rsrc->max)
 152                return false;
 153
 154        return true;
 155}
 156
 157void rvu_free_rsrc(struct rsrc_bmap *rsrc, int id)
 158{
 159        if (!rsrc->bmap)
 160                return;
 161
 162        __clear_bit(id, rsrc->bmap);
 163}
 164
 165int rvu_rsrc_free_count(struct rsrc_bmap *rsrc)
 166{
 167        int used;
 168
 169        if (!rsrc->bmap)
 170                return 0;
 171
 172        used = bitmap_weight(rsrc->bmap, rsrc->max);
 173        return (rsrc->max - used);
 174}
 175
 176int rvu_alloc_bitmap(struct rsrc_bmap *rsrc)
 177{
 178        rsrc->bmap = kcalloc(BITS_TO_LONGS(rsrc->max),
 179                             sizeof(long), GFP_KERNEL);
 180        if (!rsrc->bmap)
 181                return -ENOMEM;
 182        return 0;
 183}
 184
 185/* Get block LF's HW index from a PF_FUNC's block slot number */
 186int rvu_get_lf(struct rvu *rvu, struct rvu_block *block, u16 pcifunc, u16 slot)
 187{
 188        u16 match = 0;
 189        int lf;
 190
 191        mutex_lock(&rvu->rsrc_lock);
 192        for (lf = 0; lf < block->lf.max; lf++) {
 193                if (block->fn_map[lf] == pcifunc) {
 194                        if (slot == match) {
 195                                mutex_unlock(&rvu->rsrc_lock);
 196                                return lf;
 197                        }
 198                        match++;
 199                }
 200        }
 201        mutex_unlock(&rvu->rsrc_lock);
 202        return -ENODEV;
 203}
 204
 205/* Convert BLOCK_TYPE_E to a BLOCK_ADDR_E.
 206 * Some silicon variants of OcteonTX2 supports
 207 * multiple blocks of same type.
 208 *
 209 * @pcifunc has to be zero when no LF is yet attached.
 210 */
 211int rvu_get_blkaddr(struct rvu *rvu, int blktype, u16 pcifunc)
 212{
 213        int devnum, blkaddr = -ENODEV;
 214        u64 cfg, reg;
 215        bool is_pf;
 216
 217        switch (blktype) {
 218        case BLKTYPE_NPC:
 219                blkaddr = BLKADDR_NPC;
 220                goto exit;
 221        case BLKTYPE_NPA:
 222                blkaddr = BLKADDR_NPA;
 223                goto exit;
 224        case BLKTYPE_NIX:
 225                /* For now assume NIX0 */
 226                if (!pcifunc) {
 227                        blkaddr = BLKADDR_NIX0;
 228                        goto exit;
 229                }
 230                break;
 231        case BLKTYPE_SSO:
 232                blkaddr = BLKADDR_SSO;
 233                goto exit;
 234        case BLKTYPE_SSOW:
 235                blkaddr = BLKADDR_SSOW;
 236                goto exit;
 237        case BLKTYPE_TIM:
 238                blkaddr = BLKADDR_TIM;
 239                goto exit;
 240        case BLKTYPE_CPT:
 241                /* For now assume CPT0 */
 242                if (!pcifunc) {
 243                        blkaddr = BLKADDR_CPT0;
 244                        goto exit;
 245                }
 246                break;
 247        }
 248
 249        /* Check if this is a RVU PF or VF */
 250        if (pcifunc & RVU_PFVF_FUNC_MASK) {
 251                is_pf = false;
 252                devnum = rvu_get_hwvf(rvu, pcifunc);
 253        } else {
 254                is_pf = true;
 255                devnum = rvu_get_pf(pcifunc);
 256        }
 257
 258        /* Check if the 'pcifunc' has a NIX LF from 'BLKADDR_NIX0' */
 259        if (blktype == BLKTYPE_NIX) {
 260                reg = is_pf ? RVU_PRIV_PFX_NIX0_CFG : RVU_PRIV_HWVFX_NIX0_CFG;
 261                cfg = rvu_read64(rvu, BLKADDR_RVUM, reg | (devnum << 16));
 262                if (cfg)
 263                        blkaddr = BLKADDR_NIX0;
 264        }
 265
 266        /* Check if the 'pcifunc' has a CPT LF from 'BLKADDR_CPT0' */
 267        if (blktype == BLKTYPE_CPT) {
 268                reg = is_pf ? RVU_PRIV_PFX_CPT0_CFG : RVU_PRIV_HWVFX_CPT0_CFG;
 269                cfg = rvu_read64(rvu, BLKADDR_RVUM, reg | (devnum << 16));
 270                if (cfg)
 271                        blkaddr = BLKADDR_CPT0;
 272        }
 273
 274exit:
 275        if (is_block_implemented(rvu->hw, blkaddr))
 276                return blkaddr;
 277        return -ENODEV;
 278}
 279
 280static void rvu_update_rsrc_map(struct rvu *rvu, struct rvu_pfvf *pfvf,
 281                                struct rvu_block *block, u16 pcifunc,
 282                                u16 lf, bool attach)
 283{
 284        int devnum, num_lfs = 0;
 285        bool is_pf;
 286        u64 reg;
 287
 288        if (lf >= block->lf.max) {
 289                dev_err(&rvu->pdev->dev,
 290                        "%s: FATAL: LF %d is >= %s's max lfs i.e %d\n",
 291                        __func__, lf, block->name, block->lf.max);
 292                return;
 293        }
 294
 295        /* Check if this is for a RVU PF or VF */
 296        if (pcifunc & RVU_PFVF_FUNC_MASK) {
 297                is_pf = false;
 298                devnum = rvu_get_hwvf(rvu, pcifunc);
 299        } else {
 300                is_pf = true;
 301                devnum = rvu_get_pf(pcifunc);
 302        }
 303
 304        block->fn_map[lf] = attach ? pcifunc : 0;
 305
 306        switch (block->type) {
 307        case BLKTYPE_NPA:
 308                pfvf->npalf = attach ? true : false;
 309                num_lfs = pfvf->npalf;
 310                break;
 311        case BLKTYPE_NIX:
 312                pfvf->nixlf = attach ? true : false;
 313                num_lfs = pfvf->nixlf;
 314                break;
 315        case BLKTYPE_SSO:
 316                attach ? pfvf->sso++ : pfvf->sso--;
 317                num_lfs = pfvf->sso;
 318                break;
 319        case BLKTYPE_SSOW:
 320                attach ? pfvf->ssow++ : pfvf->ssow--;
 321                num_lfs = pfvf->ssow;
 322                break;
 323        case BLKTYPE_TIM:
 324                attach ? pfvf->timlfs++ : pfvf->timlfs--;
 325                num_lfs = pfvf->timlfs;
 326                break;
 327        case BLKTYPE_CPT:
 328                attach ? pfvf->cptlfs++ : pfvf->cptlfs--;
 329                num_lfs = pfvf->cptlfs;
 330                break;
 331        }
 332
 333        reg = is_pf ? block->pf_lfcnt_reg : block->vf_lfcnt_reg;
 334        rvu_write64(rvu, BLKADDR_RVUM, reg | (devnum << 16), num_lfs);
 335}
 336
 337inline int rvu_get_pf(u16 pcifunc)
 338{
 339        return (pcifunc >> RVU_PFVF_PF_SHIFT) & RVU_PFVF_PF_MASK;
 340}
 341
 342void rvu_get_pf_numvfs(struct rvu *rvu, int pf, int *numvfs, int *hwvf)
 343{
 344        u64 cfg;
 345
 346        /* Get numVFs attached to this PF and first HWVF */
 347        cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf));
 348        *numvfs = (cfg >> 12) & 0xFF;
 349        *hwvf = cfg & 0xFFF;
 350}
 351
 352static int rvu_get_hwvf(struct rvu *rvu, int pcifunc)
 353{
 354        int pf, func;
 355        u64 cfg;
 356
 357        pf = rvu_get_pf(pcifunc);
 358        func = pcifunc & RVU_PFVF_FUNC_MASK;
 359
 360        /* Get first HWVF attached to this PF */
 361        cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf));
 362
 363        return ((cfg & 0xFFF) + func - 1);
 364}
 365
 366struct rvu_pfvf *rvu_get_pfvf(struct rvu *rvu, int pcifunc)
 367{
 368        /* Check if it is a PF or VF */
 369        if (pcifunc & RVU_PFVF_FUNC_MASK)
 370                return &rvu->hwvf[rvu_get_hwvf(rvu, pcifunc)];
 371        else
 372                return &rvu->pf[rvu_get_pf(pcifunc)];
 373}
 374
 375static bool is_pf_func_valid(struct rvu *rvu, u16 pcifunc)
 376{
 377        int pf, vf, nvfs;
 378        u64 cfg;
 379
 380        pf = rvu_get_pf(pcifunc);
 381        if (pf >= rvu->hw->total_pfs)
 382                return false;
 383
 384        if (!(pcifunc & RVU_PFVF_FUNC_MASK))
 385                return true;
 386
 387        /* Check if VF is within number of VFs attached to this PF */
 388        vf = (pcifunc & RVU_PFVF_FUNC_MASK) - 1;
 389        cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf));
 390        nvfs = (cfg >> 12) & 0xFF;
 391        if (vf >= nvfs)
 392                return false;
 393
 394        return true;
 395}
 396
 397bool is_block_implemented(struct rvu_hwinfo *hw, int blkaddr)
 398{
 399        struct rvu_block *block;
 400
 401        if (blkaddr < BLKADDR_RVUM || blkaddr >= BLK_COUNT)
 402                return false;
 403
 404        block = &hw->block[blkaddr];
 405        return block->implemented;
 406}
 407
 408static void rvu_check_block_implemented(struct rvu *rvu)
 409{
 410        struct rvu_hwinfo *hw = rvu->hw;
 411        struct rvu_block *block;
 412        int blkid;
 413        u64 cfg;
 414
 415        /* For each block check if 'implemented' bit is set */
 416        for (blkid = 0; blkid < BLK_COUNT; blkid++) {
 417                block = &hw->block[blkid];
 418                cfg = rvupf_read64(rvu, RVU_PF_BLOCK_ADDRX_DISC(blkid));
 419                if (cfg & BIT_ULL(11))
 420                        block->implemented = true;
 421        }
 422}
 423
 424static void rvu_setup_rvum_blk_revid(struct rvu *rvu)
 425{
 426        rvu_write64(rvu, BLKADDR_RVUM,
 427                    RVU_PRIV_BLOCK_TYPEX_REV(BLKTYPE_RVUM),
 428                    RVU_BLK_RVUM_REVID);
 429}
 430
 431static void rvu_clear_rvum_blk_revid(struct rvu *rvu)
 432{
 433        rvu_write64(rvu, BLKADDR_RVUM,
 434                    RVU_PRIV_BLOCK_TYPEX_REV(BLKTYPE_RVUM), 0x00);
 435}
 436
 437int rvu_lf_reset(struct rvu *rvu, struct rvu_block *block, int lf)
 438{
 439        int err;
 440
 441        if (!block->implemented)
 442                return 0;
 443
 444        rvu_write64(rvu, block->addr, block->lfreset_reg, lf | BIT_ULL(12));
 445        err = rvu_poll_reg(rvu, block->addr, block->lfreset_reg, BIT_ULL(12),
 446                           true);
 447        return err;
 448}
 449
 450static void rvu_block_reset(struct rvu *rvu, int blkaddr, u64 rst_reg)
 451{
 452        struct rvu_block *block = &rvu->hw->block[blkaddr];
 453
 454        if (!block->implemented)
 455                return;
 456
 457        rvu_write64(rvu, blkaddr, rst_reg, BIT_ULL(0));
 458        rvu_poll_reg(rvu, blkaddr, rst_reg, BIT_ULL(63), true);
 459}
 460
 461static void rvu_reset_all_blocks(struct rvu *rvu)
 462{
 463        /* Do a HW reset of all RVU blocks */
 464        rvu_block_reset(rvu, BLKADDR_NPA, NPA_AF_BLK_RST);
 465        rvu_block_reset(rvu, BLKADDR_NIX0, NIX_AF_BLK_RST);
 466        rvu_block_reset(rvu, BLKADDR_NPC, NPC_AF_BLK_RST);
 467        rvu_block_reset(rvu, BLKADDR_SSO, SSO_AF_BLK_RST);
 468        rvu_block_reset(rvu, BLKADDR_TIM, TIM_AF_BLK_RST);
 469        rvu_block_reset(rvu, BLKADDR_CPT0, CPT_AF_BLK_RST);
 470        rvu_block_reset(rvu, BLKADDR_NDC_NIX0_RX, NDC_AF_BLK_RST);
 471        rvu_block_reset(rvu, BLKADDR_NDC_NIX0_TX, NDC_AF_BLK_RST);
 472        rvu_block_reset(rvu, BLKADDR_NDC_NPA0, NDC_AF_BLK_RST);
 473}
 474
 475static void rvu_scan_block(struct rvu *rvu, struct rvu_block *block)
 476{
 477        struct rvu_pfvf *pfvf;
 478        u64 cfg;
 479        int lf;
 480
 481        for (lf = 0; lf < block->lf.max; lf++) {
 482                cfg = rvu_read64(rvu, block->addr,
 483                                 block->lfcfg_reg | (lf << block->lfshift));
 484                if (!(cfg & BIT_ULL(63)))
 485                        continue;
 486
 487                /* Set this resource as being used */
 488                __set_bit(lf, block->lf.bmap);
 489
 490                /* Get, to whom this LF is attached */
 491                pfvf = rvu_get_pfvf(rvu, (cfg >> 8) & 0xFFFF);
 492                rvu_update_rsrc_map(rvu, pfvf, block,
 493                                    (cfg >> 8) & 0xFFFF, lf, true);
 494
 495                /* Set start MSIX vector for this LF within this PF/VF */
 496                rvu_set_msix_offset(rvu, pfvf, block, lf);
 497        }
 498}
 499
 500static void rvu_check_min_msix_vec(struct rvu *rvu, int nvecs, int pf, int vf)
 501{
 502        int min_vecs;
 503
 504        if (!vf)
 505                goto check_pf;
 506
 507        if (!nvecs) {
 508                dev_warn(rvu->dev,
 509                         "PF%d:VF%d is configured with zero msix vectors, %d\n",
 510                         pf, vf - 1, nvecs);
 511        }
 512        return;
 513
 514check_pf:
 515        if (pf == 0)
 516                min_vecs = RVU_AF_INT_VEC_CNT + RVU_PF_INT_VEC_CNT;
 517        else
 518                min_vecs = RVU_PF_INT_VEC_CNT;
 519
 520        if (!(nvecs < min_vecs))
 521                return;
 522        dev_warn(rvu->dev,
 523                 "PF%d is configured with too few vectors, %d, min is %d\n",
 524                 pf, nvecs, min_vecs);
 525}
 526
 527static int rvu_setup_msix_resources(struct rvu *rvu)
 528{
 529        struct rvu_hwinfo *hw = rvu->hw;
 530        int pf, vf, numvfs, hwvf, err;
 531        int nvecs, offset, max_msix;
 532        struct rvu_pfvf *pfvf;
 533        u64 cfg, phy_addr;
 534        dma_addr_t iova;
 535
 536        for (pf = 0; pf < hw->total_pfs; pf++) {
 537                cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf));
 538                /* If PF is not enabled, nothing to do */
 539                if (!((cfg >> 20) & 0x01))
 540                        continue;
 541
 542                rvu_get_pf_numvfs(rvu, pf, &numvfs, &hwvf);
 543
 544                pfvf = &rvu->pf[pf];
 545                /* Get num of MSIX vectors attached to this PF */
 546                cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_MSIX_CFG(pf));
 547                pfvf->msix.max = ((cfg >> 32) & 0xFFF) + 1;
 548                rvu_check_min_msix_vec(rvu, pfvf->msix.max, pf, 0);
 549
 550                /* Alloc msix bitmap for this PF */
 551                err = rvu_alloc_bitmap(&pfvf->msix);
 552                if (err)
 553                        return err;
 554
 555                /* Allocate memory for MSIX vector to RVU block LF mapping */
 556                pfvf->msix_lfmap = devm_kcalloc(rvu->dev, pfvf->msix.max,
 557                                                sizeof(u16), GFP_KERNEL);
 558                if (!pfvf->msix_lfmap)
 559                        return -ENOMEM;
 560
 561                /* For PF0 (AF) firmware will set msix vector offsets for
 562                 * AF, block AF and PF0_INT vectors, so jump to VFs.
 563                 */
 564                if (!pf)
 565                        goto setup_vfmsix;
 566
 567                /* Set MSIX offset for PF's 'RVU_PF_INT_VEC' vectors.
 568                 * These are allocated on driver init and never freed,
 569                 * so no need to set 'msix_lfmap' for these.
 570                 */
 571                cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_INT_CFG(pf));
 572                nvecs = (cfg >> 12) & 0xFF;
 573                cfg &= ~0x7FFULL;
 574                offset = rvu_alloc_rsrc_contig(&pfvf->msix, nvecs);
 575                rvu_write64(rvu, BLKADDR_RVUM,
 576                            RVU_PRIV_PFX_INT_CFG(pf), cfg | offset);
 577setup_vfmsix:
 578                /* Alloc msix bitmap for VFs */
 579                for (vf = 0; vf < numvfs; vf++) {
 580                        pfvf =  &rvu->hwvf[hwvf + vf];
 581                        /* Get num of MSIX vectors attached to this VF */
 582                        cfg = rvu_read64(rvu, BLKADDR_RVUM,
 583                                         RVU_PRIV_PFX_MSIX_CFG(pf));
 584                        pfvf->msix.max = (cfg & 0xFFF) + 1;
 585                        rvu_check_min_msix_vec(rvu, pfvf->msix.max, pf, vf + 1);
 586
 587                        /* Alloc msix bitmap for this VF */
 588                        err = rvu_alloc_bitmap(&pfvf->msix);
 589                        if (err)
 590                                return err;
 591
 592                        pfvf->msix_lfmap =
 593                                devm_kcalloc(rvu->dev, pfvf->msix.max,
 594                                             sizeof(u16), GFP_KERNEL);
 595                        if (!pfvf->msix_lfmap)
 596                                return -ENOMEM;
 597
 598                        /* Set MSIX offset for HWVF's 'RVU_VF_INT_VEC' vectors.
 599                         * These are allocated on driver init and never freed,
 600                         * so no need to set 'msix_lfmap' for these.
 601                         */
 602                        cfg = rvu_read64(rvu, BLKADDR_RVUM,
 603                                         RVU_PRIV_HWVFX_INT_CFG(hwvf + vf));
 604                        nvecs = (cfg >> 12) & 0xFF;
 605                        cfg &= ~0x7FFULL;
 606                        offset = rvu_alloc_rsrc_contig(&pfvf->msix, nvecs);
 607                        rvu_write64(rvu, BLKADDR_RVUM,
 608                                    RVU_PRIV_HWVFX_INT_CFG(hwvf + vf),
 609                                    cfg | offset);
 610                }
 611        }
 612
 613        /* HW interprets RVU_AF_MSIXTR_BASE address as an IOVA, hence
 614         * create a IOMMU mapping for the physcial address configured by
 615         * firmware and reconfig RVU_AF_MSIXTR_BASE with IOVA.
 616         */
 617        cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_CONST);
 618        max_msix = cfg & 0xFFFFF;
 619        if (rvu->fwdata && rvu->fwdata->msixtr_base)
 620                phy_addr = rvu->fwdata->msixtr_base;
 621        else
 622                phy_addr = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_MSIXTR_BASE);
 623
 624        iova = dma_map_resource(rvu->dev, phy_addr,
 625                                max_msix * PCI_MSIX_ENTRY_SIZE,
 626                                DMA_BIDIRECTIONAL, 0);
 627
 628        if (dma_mapping_error(rvu->dev, iova))
 629                return -ENOMEM;
 630
 631        rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_MSIXTR_BASE, (u64)iova);
 632        rvu->msix_base_iova = iova;
 633        rvu->msixtr_base_phy = phy_addr;
 634
 635        return 0;
 636}
 637
 638static void rvu_reset_msix(struct rvu *rvu)
 639{
 640        /* Restore msixtr base register */
 641        rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_MSIXTR_BASE,
 642                    rvu->msixtr_base_phy);
 643}
 644
 645static void rvu_free_hw_resources(struct rvu *rvu)
 646{
 647        struct rvu_hwinfo *hw = rvu->hw;
 648        struct rvu_block *block;
 649        struct rvu_pfvf  *pfvf;
 650        int id, max_msix;
 651        u64 cfg;
 652
 653        rvu_npa_freemem(rvu);
 654        rvu_npc_freemem(rvu);
 655        rvu_nix_freemem(rvu);
 656
 657        /* Free block LF bitmaps */
 658        for (id = 0; id < BLK_COUNT; id++) {
 659                block = &hw->block[id];
 660                kfree(block->lf.bmap);
 661        }
 662
 663        /* Free MSIX bitmaps */
 664        for (id = 0; id < hw->total_pfs; id++) {
 665                pfvf = &rvu->pf[id];
 666                kfree(pfvf->msix.bmap);
 667        }
 668
 669        for (id = 0; id < hw->total_vfs; id++) {
 670                pfvf = &rvu->hwvf[id];
 671                kfree(pfvf->msix.bmap);
 672        }
 673
 674        /* Unmap MSIX vector base IOVA mapping */
 675        if (!rvu->msix_base_iova)
 676                return;
 677        cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_CONST);
 678        max_msix = cfg & 0xFFFFF;
 679        dma_unmap_resource(rvu->dev, rvu->msix_base_iova,
 680                           max_msix * PCI_MSIX_ENTRY_SIZE,
 681                           DMA_BIDIRECTIONAL, 0);
 682
 683        rvu_reset_msix(rvu);
 684        mutex_destroy(&rvu->rsrc_lock);
 685}
 686
 687static void rvu_setup_pfvf_macaddress(struct rvu *rvu)
 688{
 689        struct rvu_hwinfo *hw = rvu->hw;
 690        int pf, vf, numvfs, hwvf;
 691        struct rvu_pfvf *pfvf;
 692        u64 *mac;
 693
 694        for (pf = 0; pf < hw->total_pfs; pf++) {
 695                if (!is_pf_cgxmapped(rvu, pf))
 696                        continue;
 697                /* Assign MAC address to PF */
 698                pfvf = &rvu->pf[pf];
 699                if (rvu->fwdata && pf < PF_MACNUM_MAX) {
 700                        mac = &rvu->fwdata->pf_macs[pf];
 701                        if (*mac)
 702                                u64_to_ether_addr(*mac, pfvf->mac_addr);
 703                        else
 704                                eth_random_addr(pfvf->mac_addr);
 705                } else {
 706                        eth_random_addr(pfvf->mac_addr);
 707                }
 708
 709                /* Assign MAC address to VFs */
 710                rvu_get_pf_numvfs(rvu, pf, &numvfs, &hwvf);
 711                for (vf = 0; vf < numvfs; vf++, hwvf++) {
 712                        pfvf = &rvu->hwvf[hwvf];
 713                        if (rvu->fwdata && hwvf < VF_MACNUM_MAX) {
 714                                mac = &rvu->fwdata->vf_macs[hwvf];
 715                                if (*mac)
 716                                        u64_to_ether_addr(*mac, pfvf->mac_addr);
 717                                else
 718                                        eth_random_addr(pfvf->mac_addr);
 719                        } else {
 720                                eth_random_addr(pfvf->mac_addr);
 721                        }
 722                }
 723        }
 724}
 725
 726static int rvu_fwdata_init(struct rvu *rvu)
 727{
 728        u64 fwdbase;
 729        int err;
 730
 731        /* Get firmware data base address */
 732        err = cgx_get_fwdata_base(&fwdbase);
 733        if (err)
 734                goto fail;
 735        rvu->fwdata = ioremap_wc(fwdbase, sizeof(struct rvu_fwdata));
 736        if (!rvu->fwdata)
 737                goto fail;
 738        if (!is_rvu_fwdata_valid(rvu)) {
 739                dev_err(rvu->dev,
 740                        "Mismatch in 'fwdata' struct btw kernel and firmware\n");
 741                iounmap(rvu->fwdata);
 742                rvu->fwdata = NULL;
 743                return -EINVAL;
 744        }
 745        return 0;
 746fail:
 747        dev_info(rvu->dev, "Unable to fetch 'fwdata' from firmware\n");
 748        return -EIO;
 749}
 750
 751static void rvu_fwdata_exit(struct rvu *rvu)
 752{
 753        if (rvu->fwdata)
 754                iounmap(rvu->fwdata);
 755}
 756
 757static int rvu_setup_hw_resources(struct rvu *rvu)
 758{
 759        struct rvu_hwinfo *hw = rvu->hw;
 760        struct rvu_block *block;
 761        int blkid, err;
 762        u64 cfg;
 763
 764        /* Get HW supported max RVU PF & VF count */
 765        cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_CONST);
 766        hw->total_pfs = (cfg >> 32) & 0xFF;
 767        hw->total_vfs = (cfg >> 20) & 0xFFF;
 768        hw->max_vfs_per_pf = (cfg >> 40) & 0xFF;
 769
 770        /* Init NPA LF's bitmap */
 771        block = &hw->block[BLKADDR_NPA];
 772        if (!block->implemented)
 773                goto nix;
 774        cfg = rvu_read64(rvu, BLKADDR_NPA, NPA_AF_CONST);
 775        block->lf.max = (cfg >> 16) & 0xFFF;
 776        block->addr = BLKADDR_NPA;
 777        block->type = BLKTYPE_NPA;
 778        block->lfshift = 8;
 779        block->lookup_reg = NPA_AF_RVU_LF_CFG_DEBUG;
 780        block->pf_lfcnt_reg = RVU_PRIV_PFX_NPA_CFG;
 781        block->vf_lfcnt_reg = RVU_PRIV_HWVFX_NPA_CFG;
 782        block->lfcfg_reg = NPA_PRIV_LFX_CFG;
 783        block->msixcfg_reg = NPA_PRIV_LFX_INT_CFG;
 784        block->lfreset_reg = NPA_AF_LF_RST;
 785        sprintf(block->name, "NPA");
 786        err = rvu_alloc_bitmap(&block->lf);
 787        if (err)
 788                return err;
 789
 790nix:
 791        /* Init NIX LF's bitmap */
 792        block = &hw->block[BLKADDR_NIX0];
 793        if (!block->implemented)
 794                goto sso;
 795        cfg = rvu_read64(rvu, BLKADDR_NIX0, NIX_AF_CONST2);
 796        block->lf.max = cfg & 0xFFF;
 797        block->addr = BLKADDR_NIX0;
 798        block->type = BLKTYPE_NIX;
 799        block->lfshift = 8;
 800        block->lookup_reg = NIX_AF_RVU_LF_CFG_DEBUG;
 801        block->pf_lfcnt_reg = RVU_PRIV_PFX_NIX0_CFG;
 802        block->vf_lfcnt_reg = RVU_PRIV_HWVFX_NIX0_CFG;
 803        block->lfcfg_reg = NIX_PRIV_LFX_CFG;
 804        block->msixcfg_reg = NIX_PRIV_LFX_INT_CFG;
 805        block->lfreset_reg = NIX_AF_LF_RST;
 806        sprintf(block->name, "NIX");
 807        err = rvu_alloc_bitmap(&block->lf);
 808        if (err)
 809                return err;
 810
 811sso:
 812        /* Init SSO group's bitmap */
 813        block = &hw->block[BLKADDR_SSO];
 814        if (!block->implemented)
 815                goto ssow;
 816        cfg = rvu_read64(rvu, BLKADDR_SSO, SSO_AF_CONST);
 817        block->lf.max = cfg & 0xFFFF;
 818        block->addr = BLKADDR_SSO;
 819        block->type = BLKTYPE_SSO;
 820        block->multislot = true;
 821        block->lfshift = 3;
 822        block->lookup_reg = SSO_AF_RVU_LF_CFG_DEBUG;
 823        block->pf_lfcnt_reg = RVU_PRIV_PFX_SSO_CFG;
 824        block->vf_lfcnt_reg = RVU_PRIV_HWVFX_SSO_CFG;
 825        block->lfcfg_reg = SSO_PRIV_LFX_HWGRP_CFG;
 826        block->msixcfg_reg = SSO_PRIV_LFX_HWGRP_INT_CFG;
 827        block->lfreset_reg = SSO_AF_LF_HWGRP_RST;
 828        sprintf(block->name, "SSO GROUP");
 829        err = rvu_alloc_bitmap(&block->lf);
 830        if (err)
 831                return err;
 832
 833ssow:
 834        /* Init SSO workslot's bitmap */
 835        block = &hw->block[BLKADDR_SSOW];
 836        if (!block->implemented)
 837                goto tim;
 838        block->lf.max = (cfg >> 56) & 0xFF;
 839        block->addr = BLKADDR_SSOW;
 840        block->type = BLKTYPE_SSOW;
 841        block->multislot = true;
 842        block->lfshift = 3;
 843        block->lookup_reg = SSOW_AF_RVU_LF_HWS_CFG_DEBUG;
 844        block->pf_lfcnt_reg = RVU_PRIV_PFX_SSOW_CFG;
 845        block->vf_lfcnt_reg = RVU_PRIV_HWVFX_SSOW_CFG;
 846        block->lfcfg_reg = SSOW_PRIV_LFX_HWS_CFG;
 847        block->msixcfg_reg = SSOW_PRIV_LFX_HWS_INT_CFG;
 848        block->lfreset_reg = SSOW_AF_LF_HWS_RST;
 849        sprintf(block->name, "SSOWS");
 850        err = rvu_alloc_bitmap(&block->lf);
 851        if (err)
 852                return err;
 853
 854tim:
 855        /* Init TIM LF's bitmap */
 856        block = &hw->block[BLKADDR_TIM];
 857        if (!block->implemented)
 858                goto cpt;
 859        cfg = rvu_read64(rvu, BLKADDR_TIM, TIM_AF_CONST);
 860        block->lf.max = cfg & 0xFFFF;
 861        block->addr = BLKADDR_TIM;
 862        block->type = BLKTYPE_TIM;
 863        block->multislot = true;
 864        block->lfshift = 3;
 865        block->lookup_reg = TIM_AF_RVU_LF_CFG_DEBUG;
 866        block->pf_lfcnt_reg = RVU_PRIV_PFX_TIM_CFG;
 867        block->vf_lfcnt_reg = RVU_PRIV_HWVFX_TIM_CFG;
 868        block->lfcfg_reg = TIM_PRIV_LFX_CFG;
 869        block->msixcfg_reg = TIM_PRIV_LFX_INT_CFG;
 870        block->lfreset_reg = TIM_AF_LF_RST;
 871        sprintf(block->name, "TIM");
 872        err = rvu_alloc_bitmap(&block->lf);
 873        if (err)
 874                return err;
 875
 876cpt:
 877        /* Init CPT LF's bitmap */
 878        block = &hw->block[BLKADDR_CPT0];
 879        if (!block->implemented)
 880                goto init;
 881        cfg = rvu_read64(rvu, BLKADDR_CPT0, CPT_AF_CONSTANTS0);
 882        block->lf.max = cfg & 0xFF;
 883        block->addr = BLKADDR_CPT0;
 884        block->type = BLKTYPE_CPT;
 885        block->multislot = true;
 886        block->lfshift = 3;
 887        block->lookup_reg = CPT_AF_RVU_LF_CFG_DEBUG;
 888        block->pf_lfcnt_reg = RVU_PRIV_PFX_CPT0_CFG;
 889        block->vf_lfcnt_reg = RVU_PRIV_HWVFX_CPT0_CFG;
 890        block->lfcfg_reg = CPT_PRIV_LFX_CFG;
 891        block->msixcfg_reg = CPT_PRIV_LFX_INT_CFG;
 892        block->lfreset_reg = CPT_AF_LF_RST;
 893        sprintf(block->name, "CPT");
 894        err = rvu_alloc_bitmap(&block->lf);
 895        if (err)
 896                return err;
 897
 898init:
 899        /* Allocate memory for PFVF data */
 900        rvu->pf = devm_kcalloc(rvu->dev, hw->total_pfs,
 901                               sizeof(struct rvu_pfvf), GFP_KERNEL);
 902        if (!rvu->pf)
 903                return -ENOMEM;
 904
 905        rvu->hwvf = devm_kcalloc(rvu->dev, hw->total_vfs,
 906                                 sizeof(struct rvu_pfvf), GFP_KERNEL);
 907        if (!rvu->hwvf)
 908                return -ENOMEM;
 909
 910        mutex_init(&rvu->rsrc_lock);
 911
 912        rvu_fwdata_init(rvu);
 913
 914        err = rvu_setup_msix_resources(rvu);
 915        if (err)
 916                return err;
 917
 918        for (blkid = 0; blkid < BLK_COUNT; blkid++) {
 919                block = &hw->block[blkid];
 920                if (!block->lf.bmap)
 921                        continue;
 922
 923                /* Allocate memory for block LF/slot to pcifunc mapping info */
 924                block->fn_map = devm_kcalloc(rvu->dev, block->lf.max,
 925                                             sizeof(u16), GFP_KERNEL);
 926                if (!block->fn_map) {
 927                        err = -ENOMEM;
 928                        goto msix_err;
 929                }
 930
 931                /* Scan all blocks to check if low level firmware has
 932                 * already provisioned any of the resources to a PF/VF.
 933                 */
 934                rvu_scan_block(rvu, block);
 935        }
 936
 937        err = rvu_npc_init(rvu);
 938        if (err)
 939                goto npc_err;
 940
 941        err = rvu_cgx_init(rvu);
 942        if (err)
 943                goto cgx_err;
 944
 945        /* Assign MACs for CGX mapped functions */
 946        rvu_setup_pfvf_macaddress(rvu);
 947
 948        err = rvu_npa_init(rvu);
 949        if (err)
 950                goto npa_err;
 951
 952        err = rvu_nix_init(rvu);
 953        if (err)
 954                goto nix_err;
 955
 956        return 0;
 957
 958nix_err:
 959        rvu_nix_freemem(rvu);
 960npa_err:
 961        rvu_npa_freemem(rvu);
 962cgx_err:
 963        rvu_cgx_exit(rvu);
 964npc_err:
 965        rvu_npc_freemem(rvu);
 966        rvu_fwdata_exit(rvu);
 967msix_err:
 968        rvu_reset_msix(rvu);
 969        return err;
 970}
 971
 972/* NPA and NIX admin queue APIs */
 973void rvu_aq_free(struct rvu *rvu, struct admin_queue *aq)
 974{
 975        if (!aq)
 976                return;
 977
 978        qmem_free(rvu->dev, aq->inst);
 979        qmem_free(rvu->dev, aq->res);
 980        devm_kfree(rvu->dev, aq);
 981}
 982
 983int rvu_aq_alloc(struct rvu *rvu, struct admin_queue **ad_queue,
 984                 int qsize, int inst_size, int res_size)
 985{
 986        struct admin_queue *aq;
 987        int err;
 988
 989        *ad_queue = devm_kzalloc(rvu->dev, sizeof(*aq), GFP_KERNEL);
 990        if (!*ad_queue)
 991                return -ENOMEM;
 992        aq = *ad_queue;
 993
 994        /* Alloc memory for instructions i.e AQ */
 995        err = qmem_alloc(rvu->dev, &aq->inst, qsize, inst_size);
 996        if (err) {
 997                devm_kfree(rvu->dev, aq);
 998                return err;
 999        }
1000
1001        /* Alloc memory for results */
1002        err = qmem_alloc(rvu->dev, &aq->res, qsize, res_size);
1003        if (err) {
1004                rvu_aq_free(rvu, aq);
1005                return err;
1006        }
1007
1008        spin_lock_init(&aq->lock);
1009        return 0;
1010}
1011
1012int rvu_mbox_handler_ready(struct rvu *rvu, struct msg_req *req,
1013                           struct ready_msg_rsp *rsp)
1014{
1015        if (rvu->fwdata) {
1016                rsp->rclk_freq = rvu->fwdata->rclk;
1017                rsp->sclk_freq = rvu->fwdata->sclk;
1018        }
1019        return 0;
1020}
1021
1022/* Get current count of a RVU block's LF/slots
1023 * provisioned to a given RVU func.
1024 */
1025static u16 rvu_get_rsrc_mapcount(struct rvu_pfvf *pfvf, int blktype)
1026{
1027        switch (blktype) {
1028        case BLKTYPE_NPA:
1029                return pfvf->npalf ? 1 : 0;
1030        case BLKTYPE_NIX:
1031                return pfvf->nixlf ? 1 : 0;
1032        case BLKTYPE_SSO:
1033                return pfvf->sso;
1034        case BLKTYPE_SSOW:
1035                return pfvf->ssow;
1036        case BLKTYPE_TIM:
1037                return pfvf->timlfs;
1038        case BLKTYPE_CPT:
1039                return pfvf->cptlfs;
1040        }
1041        return 0;
1042}
1043
1044bool is_pffunc_map_valid(struct rvu *rvu, u16 pcifunc, int blktype)
1045{
1046        struct rvu_pfvf *pfvf;
1047
1048        if (!is_pf_func_valid(rvu, pcifunc))
1049                return false;
1050
1051        pfvf = rvu_get_pfvf(rvu, pcifunc);
1052
1053        /* Check if this PFFUNC has a LF of type blktype attached */
1054        if (!rvu_get_rsrc_mapcount(pfvf, blktype))
1055                return false;
1056
1057        return true;
1058}
1059
1060static int rvu_lookup_rsrc(struct rvu *rvu, struct rvu_block *block,
1061                           int pcifunc, int slot)
1062{
1063        u64 val;
1064
1065        val = ((u64)pcifunc << 24) | (slot << 16) | (1ULL << 13);
1066        rvu_write64(rvu, block->addr, block->lookup_reg, val);
1067        /* Wait for the lookup to finish */
1068        /* TODO: put some timeout here */
1069        while (rvu_read64(rvu, block->addr, block->lookup_reg) & (1ULL << 13))
1070                ;
1071
1072        val = rvu_read64(rvu, block->addr, block->lookup_reg);
1073
1074        /* Check LF valid bit */
1075        if (!(val & (1ULL << 12)))
1076                return -1;
1077
1078        return (val & 0xFFF);
1079}
1080
1081static void rvu_detach_block(struct rvu *rvu, int pcifunc, int blktype)
1082{
1083        struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
1084        struct rvu_hwinfo *hw = rvu->hw;
1085        struct rvu_block *block;
1086        int slot, lf, num_lfs;
1087        int blkaddr;
1088
1089        blkaddr = rvu_get_blkaddr(rvu, blktype, pcifunc);
1090        if (blkaddr < 0)
1091                return;
1092
1093        block = &hw->block[blkaddr];
1094
1095        num_lfs = rvu_get_rsrc_mapcount(pfvf, block->type);
1096        if (!num_lfs)
1097                return;
1098
1099        for (slot = 0; slot < num_lfs; slot++) {
1100                lf = rvu_lookup_rsrc(rvu, block, pcifunc, slot);
1101                if (lf < 0) /* This should never happen */
1102                        continue;
1103
1104                /* Disable the LF */
1105                rvu_write64(rvu, blkaddr, block->lfcfg_reg |
1106                            (lf << block->lfshift), 0x00ULL);
1107
1108                /* Update SW maintained mapping info as well */
1109                rvu_update_rsrc_map(rvu, pfvf, block,
1110                                    pcifunc, lf, false);
1111
1112                /* Free the resource */
1113                rvu_free_rsrc(&block->lf, lf);
1114
1115                /* Clear MSIX vector offset for this LF */
1116                rvu_clear_msix_offset(rvu, pfvf, block, lf);
1117        }
1118}
1119
1120static int rvu_detach_rsrcs(struct rvu *rvu, struct rsrc_detach *detach,
1121                            u16 pcifunc)
1122{
1123        struct rvu_hwinfo *hw = rvu->hw;
1124        bool detach_all = true;
1125        struct rvu_block *block;
1126        int blkid;
1127
1128        mutex_lock(&rvu->rsrc_lock);
1129
1130        /* Check for partial resource detach */
1131        if (detach && detach->partial)
1132                detach_all = false;
1133
1134        /* Check for RVU block's LFs attached to this func,
1135         * if so, detach them.
1136         */
1137        for (blkid = 0; blkid < BLK_COUNT; blkid++) {
1138                block = &hw->block[blkid];
1139                if (!block->lf.bmap)
1140                        continue;
1141                if (!detach_all && detach) {
1142                        if (blkid == BLKADDR_NPA && !detach->npalf)
1143                                continue;
1144                        else if ((blkid == BLKADDR_NIX0) && !detach->nixlf)
1145                                continue;
1146                        else if ((blkid == BLKADDR_SSO) && !detach->sso)
1147                                continue;
1148                        else if ((blkid == BLKADDR_SSOW) && !detach->ssow)
1149                                continue;
1150                        else if ((blkid == BLKADDR_TIM) && !detach->timlfs)
1151                                continue;
1152                        else if ((blkid == BLKADDR_CPT0) && !detach->cptlfs)
1153                                continue;
1154                }
1155                rvu_detach_block(rvu, pcifunc, block->type);
1156        }
1157
1158        mutex_unlock(&rvu->rsrc_lock);
1159        return 0;
1160}
1161
1162int rvu_mbox_handler_detach_resources(struct rvu *rvu,
1163                                      struct rsrc_detach *detach,
1164                                      struct msg_rsp *rsp)
1165{
1166        return rvu_detach_rsrcs(rvu, detach, detach->hdr.pcifunc);
1167}
1168
1169static void rvu_attach_block(struct rvu *rvu, int pcifunc,
1170                             int blktype, int num_lfs)
1171{
1172        struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
1173        struct rvu_hwinfo *hw = rvu->hw;
1174        struct rvu_block *block;
1175        int slot, lf;
1176        int blkaddr;
1177        u64 cfg;
1178
1179        if (!num_lfs)
1180                return;
1181
1182        blkaddr = rvu_get_blkaddr(rvu, blktype, 0);
1183        if (blkaddr < 0)
1184                return;
1185
1186        block = &hw->block[blkaddr];
1187        if (!block->lf.bmap)
1188                return;
1189
1190        for (slot = 0; slot < num_lfs; slot++) {
1191                /* Allocate the resource */
1192                lf = rvu_alloc_rsrc(&block->lf);
1193                if (lf < 0)
1194                        return;
1195
1196                cfg = (1ULL << 63) | (pcifunc << 8) | slot;
1197                rvu_write64(rvu, blkaddr, block->lfcfg_reg |
1198                            (lf << block->lfshift), cfg);
1199                rvu_update_rsrc_map(rvu, pfvf, block,
1200                                    pcifunc, lf, true);
1201
1202                /* Set start MSIX vector for this LF within this PF/VF */
1203                rvu_set_msix_offset(rvu, pfvf, block, lf);
1204        }
1205}
1206
1207static int rvu_check_rsrc_availability(struct rvu *rvu,
1208                                       struct rsrc_attach *req, u16 pcifunc)
1209{
1210        struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
1211        struct rvu_hwinfo *hw = rvu->hw;
1212        struct rvu_block *block;
1213        int free_lfs, mappedlfs;
1214
1215        /* Only one NPA LF can be attached */
1216        if (req->npalf && !rvu_get_rsrc_mapcount(pfvf, BLKTYPE_NPA)) {
1217                block = &hw->block[BLKADDR_NPA];
1218                free_lfs = rvu_rsrc_free_count(&block->lf);
1219                if (!free_lfs)
1220                        goto fail;
1221        } else if (req->npalf) {
1222                dev_err(&rvu->pdev->dev,
1223                        "Func 0x%x: Invalid req, already has NPA\n",
1224                         pcifunc);
1225                return -EINVAL;
1226        }
1227
1228        /* Only one NIX LF can be attached */
1229        if (req->nixlf && !rvu_get_rsrc_mapcount(pfvf, BLKTYPE_NIX)) {
1230                block = &hw->block[BLKADDR_NIX0];
1231                free_lfs = rvu_rsrc_free_count(&block->lf);
1232                if (!free_lfs)
1233                        goto fail;
1234        } else if (req->nixlf) {
1235                dev_err(&rvu->pdev->dev,
1236                        "Func 0x%x: Invalid req, already has NIX\n",
1237                        pcifunc);
1238                return -EINVAL;
1239        }
1240
1241        if (req->sso) {
1242                block = &hw->block[BLKADDR_SSO];
1243                /* Is request within limits ? */
1244                if (req->sso > block->lf.max) {
1245                        dev_err(&rvu->pdev->dev,
1246                                "Func 0x%x: Invalid SSO req, %d > max %d\n",
1247                                 pcifunc, req->sso, block->lf.max);
1248                        return -EINVAL;
1249                }
1250                mappedlfs = rvu_get_rsrc_mapcount(pfvf, block->type);
1251                free_lfs = rvu_rsrc_free_count(&block->lf);
1252                /* Check if additional resources are available */
1253                if (req->sso > mappedlfs &&
1254                    ((req->sso - mappedlfs) > free_lfs))
1255                        goto fail;
1256        }
1257
1258        if (req->ssow) {
1259                block = &hw->block[BLKADDR_SSOW];
1260                if (req->ssow > block->lf.max) {
1261                        dev_err(&rvu->pdev->dev,
1262                                "Func 0x%x: Invalid SSOW req, %d > max %d\n",
1263                                 pcifunc, req->sso, block->lf.max);
1264                        return -EINVAL;
1265                }
1266                mappedlfs = rvu_get_rsrc_mapcount(pfvf, block->type);
1267                free_lfs = rvu_rsrc_free_count(&block->lf);
1268                if (req->ssow > mappedlfs &&
1269                    ((req->ssow - mappedlfs) > free_lfs))
1270                        goto fail;
1271        }
1272
1273        if (req->timlfs) {
1274                block = &hw->block[BLKADDR_TIM];
1275                if (req->timlfs > block->lf.max) {
1276                        dev_err(&rvu->pdev->dev,
1277                                "Func 0x%x: Invalid TIMLF req, %d > max %d\n",
1278                                 pcifunc, req->timlfs, block->lf.max);
1279                        return -EINVAL;
1280                }
1281                mappedlfs = rvu_get_rsrc_mapcount(pfvf, block->type);
1282                free_lfs = rvu_rsrc_free_count(&block->lf);
1283                if (req->timlfs > mappedlfs &&
1284                    ((req->timlfs - mappedlfs) > free_lfs))
1285                        goto fail;
1286        }
1287
1288        if (req->cptlfs) {
1289                block = &hw->block[BLKADDR_CPT0];
1290                if (req->cptlfs > block->lf.max) {
1291                        dev_err(&rvu->pdev->dev,
1292                                "Func 0x%x: Invalid CPTLF req, %d > max %d\n",
1293                                 pcifunc, req->cptlfs, block->lf.max);
1294                        return -EINVAL;
1295                }
1296                mappedlfs = rvu_get_rsrc_mapcount(pfvf, block->type);
1297                free_lfs = rvu_rsrc_free_count(&block->lf);
1298                if (req->cptlfs > mappedlfs &&
1299                    ((req->cptlfs - mappedlfs) > free_lfs))
1300                        goto fail;
1301        }
1302
1303        return 0;
1304
1305fail:
1306        dev_info(rvu->dev, "Request for %s failed\n", block->name);
1307        return -ENOSPC;
1308}
1309
1310int rvu_mbox_handler_attach_resources(struct rvu *rvu,
1311                                      struct rsrc_attach *attach,
1312                                      struct msg_rsp *rsp)
1313{
1314        u16 pcifunc = attach->hdr.pcifunc;
1315        int err;
1316
1317        /* If first request, detach all existing attached resources */
1318        if (!attach->modify)
1319                rvu_detach_rsrcs(rvu, NULL, pcifunc);
1320
1321        mutex_lock(&rvu->rsrc_lock);
1322
1323        /* Check if the request can be accommodated */
1324        err = rvu_check_rsrc_availability(rvu, attach, pcifunc);
1325        if (err)
1326                goto exit;
1327
1328        /* Now attach the requested resources */
1329        if (attach->npalf)
1330                rvu_attach_block(rvu, pcifunc, BLKTYPE_NPA, 1);
1331
1332        if (attach->nixlf)
1333                rvu_attach_block(rvu, pcifunc, BLKTYPE_NIX, 1);
1334
1335        if (attach->sso) {
1336                /* RVU func doesn't know which exact LF or slot is attached
1337                 * to it, it always sees as slot 0,1,2. So for a 'modify'
1338                 * request, simply detach all existing attached LFs/slots
1339                 * and attach a fresh.
1340                 */
1341                if (attach->modify)
1342                        rvu_detach_block(rvu, pcifunc, BLKTYPE_SSO);
1343                rvu_attach_block(rvu, pcifunc, BLKTYPE_SSO, attach->sso);
1344        }
1345
1346        if (attach->ssow) {
1347                if (attach->modify)
1348                        rvu_detach_block(rvu, pcifunc, BLKTYPE_SSOW);
1349                rvu_attach_block(rvu, pcifunc, BLKTYPE_SSOW, attach->ssow);
1350        }
1351
1352        if (attach->timlfs) {
1353                if (attach->modify)
1354                        rvu_detach_block(rvu, pcifunc, BLKTYPE_TIM);
1355                rvu_attach_block(rvu, pcifunc, BLKTYPE_TIM, attach->timlfs);
1356        }
1357
1358        if (attach->cptlfs) {
1359                if (attach->modify)
1360                        rvu_detach_block(rvu, pcifunc, BLKTYPE_CPT);
1361                rvu_attach_block(rvu, pcifunc, BLKTYPE_CPT, attach->cptlfs);
1362        }
1363
1364exit:
1365        mutex_unlock(&rvu->rsrc_lock);
1366        return err;
1367}
1368
1369static u16 rvu_get_msix_offset(struct rvu *rvu, struct rvu_pfvf *pfvf,
1370                               int blkaddr, int lf)
1371{
1372        u16 vec;
1373
1374        if (lf < 0)
1375                return MSIX_VECTOR_INVALID;
1376
1377        for (vec = 0; vec < pfvf->msix.max; vec++) {
1378                if (pfvf->msix_lfmap[vec] == MSIX_BLKLF(blkaddr, lf))
1379                        return vec;
1380        }
1381        return MSIX_VECTOR_INVALID;
1382}
1383
1384static void rvu_set_msix_offset(struct rvu *rvu, struct rvu_pfvf *pfvf,
1385                                struct rvu_block *block, int lf)
1386{
1387        u16 nvecs, vec, offset;
1388        u64 cfg;
1389
1390        cfg = rvu_read64(rvu, block->addr, block->msixcfg_reg |
1391                         (lf << block->lfshift));
1392        nvecs = (cfg >> 12) & 0xFF;
1393
1394        /* Check and alloc MSIX vectors, must be contiguous */
1395        if (!rvu_rsrc_check_contig(&pfvf->msix, nvecs))
1396                return;
1397
1398        offset = rvu_alloc_rsrc_contig(&pfvf->msix, nvecs);
1399
1400        /* Config MSIX offset in LF */
1401        rvu_write64(rvu, block->addr, block->msixcfg_reg |
1402                    (lf << block->lfshift), (cfg & ~0x7FFULL) | offset);
1403
1404        /* Update the bitmap as well */
1405        for (vec = 0; vec < nvecs; vec++)
1406                pfvf->msix_lfmap[offset + vec] = MSIX_BLKLF(block->addr, lf);
1407}
1408
1409static void rvu_clear_msix_offset(struct rvu *rvu, struct rvu_pfvf *pfvf,
1410                                  struct rvu_block *block, int lf)
1411{
1412        u16 nvecs, vec, offset;
1413        u64 cfg;
1414
1415        cfg = rvu_read64(rvu, block->addr, block->msixcfg_reg |
1416                         (lf << block->lfshift));
1417        nvecs = (cfg >> 12) & 0xFF;
1418
1419        /* Clear MSIX offset in LF */
1420        rvu_write64(rvu, block->addr, block->msixcfg_reg |
1421                    (lf << block->lfshift), cfg & ~0x7FFULL);
1422
1423        offset = rvu_get_msix_offset(rvu, pfvf, block->addr, lf);
1424
1425        /* Update the mapping */
1426        for (vec = 0; vec < nvecs; vec++)
1427                pfvf->msix_lfmap[offset + vec] = 0;
1428
1429        /* Free the same in MSIX bitmap */
1430        rvu_free_rsrc_contig(&pfvf->msix, nvecs, offset);
1431}
1432
1433int rvu_mbox_handler_msix_offset(struct rvu *rvu, struct msg_req *req,
1434                                 struct msix_offset_rsp *rsp)
1435{
1436        struct rvu_hwinfo *hw = rvu->hw;
1437        u16 pcifunc = req->hdr.pcifunc;
1438        struct rvu_pfvf *pfvf;
1439        int lf, slot;
1440
1441        pfvf = rvu_get_pfvf(rvu, pcifunc);
1442        if (!pfvf->msix.bmap)
1443                return 0;
1444
1445        /* Set MSIX offsets for each block's LFs attached to this PF/VF */
1446        lf = rvu_get_lf(rvu, &hw->block[BLKADDR_NPA], pcifunc, 0);
1447        rsp->npa_msixoff = rvu_get_msix_offset(rvu, pfvf, BLKADDR_NPA, lf);
1448
1449        lf = rvu_get_lf(rvu, &hw->block[BLKADDR_NIX0], pcifunc, 0);
1450        rsp->nix_msixoff = rvu_get_msix_offset(rvu, pfvf, BLKADDR_NIX0, lf);
1451
1452        rsp->sso = pfvf->sso;
1453        for (slot = 0; slot < rsp->sso; slot++) {
1454                lf = rvu_get_lf(rvu, &hw->block[BLKADDR_SSO], pcifunc, slot);
1455                rsp->sso_msixoff[slot] =
1456                        rvu_get_msix_offset(rvu, pfvf, BLKADDR_SSO, lf);
1457        }
1458
1459        rsp->ssow = pfvf->ssow;
1460        for (slot = 0; slot < rsp->ssow; slot++) {
1461                lf = rvu_get_lf(rvu, &hw->block[BLKADDR_SSOW], pcifunc, slot);
1462                rsp->ssow_msixoff[slot] =
1463                        rvu_get_msix_offset(rvu, pfvf, BLKADDR_SSOW, lf);
1464        }
1465
1466        rsp->timlfs = pfvf->timlfs;
1467        for (slot = 0; slot < rsp->timlfs; slot++) {
1468                lf = rvu_get_lf(rvu, &hw->block[BLKADDR_TIM], pcifunc, slot);
1469                rsp->timlf_msixoff[slot] =
1470                        rvu_get_msix_offset(rvu, pfvf, BLKADDR_TIM, lf);
1471        }
1472
1473        rsp->cptlfs = pfvf->cptlfs;
1474        for (slot = 0; slot < rsp->cptlfs; slot++) {
1475                lf = rvu_get_lf(rvu, &hw->block[BLKADDR_CPT0], pcifunc, slot);
1476                rsp->cptlf_msixoff[slot] =
1477                        rvu_get_msix_offset(rvu, pfvf, BLKADDR_CPT0, lf);
1478        }
1479        return 0;
1480}
1481
1482int rvu_mbox_handler_vf_flr(struct rvu *rvu, struct msg_req *req,
1483                            struct msg_rsp *rsp)
1484{
1485        u16 pcifunc = req->hdr.pcifunc;
1486        u16 vf, numvfs;
1487        u64 cfg;
1488
1489        vf = pcifunc & RVU_PFVF_FUNC_MASK;
1490        cfg = rvu_read64(rvu, BLKADDR_RVUM,
1491                         RVU_PRIV_PFX_CFG(rvu_get_pf(pcifunc)));
1492        numvfs = (cfg >> 12) & 0xFF;
1493
1494        if (vf && vf <= numvfs)
1495                __rvu_flr_handler(rvu, pcifunc);
1496        else
1497                return RVU_INVALID_VF_ID;
1498
1499        return 0;
1500}
1501
1502int rvu_mbox_handler_get_hw_cap(struct rvu *rvu, struct msg_req *req,
1503                                struct get_hw_cap_rsp *rsp)
1504{
1505        struct rvu_hwinfo *hw = rvu->hw;
1506
1507        rsp->nix_fixed_txschq_mapping = hw->cap.nix_fixed_txschq_mapping;
1508        rsp->nix_shaping = hw->cap.nix_shaping;
1509
1510        return 0;
1511}
1512
1513static int rvu_process_mbox_msg(struct otx2_mbox *mbox, int devid,
1514                                struct mbox_msghdr *req)
1515{
1516        struct rvu *rvu = pci_get_drvdata(mbox->pdev);
1517
1518        /* Check if valid, if not reply with a invalid msg */
1519        if (req->sig != OTX2_MBOX_REQ_SIG)
1520                goto bad_message;
1521
1522        switch (req->id) {
1523#define M(_name, _id, _fn_name, _req_type, _rsp_type)                   \
1524        case _id: {                                                     \
1525                struct _rsp_type *rsp;                                  \
1526                int err;                                                \
1527                                                                        \
1528                rsp = (struct _rsp_type *)otx2_mbox_alloc_msg(          \
1529                        mbox, devid,                                    \
1530                        sizeof(struct _rsp_type));                      \
1531                /* some handlers should complete even if reply */       \
1532                /* could not be allocated */                            \
1533                if (!rsp &&                                             \
1534                    _id != MBOX_MSG_DETACH_RESOURCES &&                 \
1535                    _id != MBOX_MSG_NIX_TXSCH_FREE &&                   \
1536                    _id != MBOX_MSG_VF_FLR)                             \
1537                        return -ENOMEM;                                 \
1538                if (rsp) {                                              \
1539                        rsp->hdr.id = _id;                              \
1540                        rsp->hdr.sig = OTX2_MBOX_RSP_SIG;               \
1541                        rsp->hdr.pcifunc = req->pcifunc;                \
1542                        rsp->hdr.rc = 0;                                \
1543                }                                                       \
1544                                                                        \
1545                err = rvu_mbox_handler_ ## _fn_name(rvu,                \
1546                                                    (struct _req_type *)req, \
1547                                                    rsp);               \
1548                if (rsp && err)                                         \
1549                        rsp->hdr.rc = err;                              \
1550                                                                        \
1551                return rsp ? err : -ENOMEM;                             \
1552        }
1553MBOX_MESSAGES
1554#undef M
1555
1556bad_message:
1557        default:
1558                otx2_reply_invalid_msg(mbox, devid, req->pcifunc, req->id);
1559                return -ENODEV;
1560        }
1561}
1562
1563static void __rvu_mbox_handler(struct rvu_work *mwork, int type)
1564{
1565        struct rvu *rvu = mwork->rvu;
1566        int offset, err, id, devid;
1567        struct otx2_mbox_dev *mdev;
1568        struct mbox_hdr *req_hdr;
1569        struct mbox_msghdr *msg;
1570        struct mbox_wq_info *mw;
1571        struct otx2_mbox *mbox;
1572
1573        switch (type) {
1574        case TYPE_AFPF:
1575                mw = &rvu->afpf_wq_info;
1576                break;
1577        case TYPE_AFVF:
1578                mw = &rvu->afvf_wq_info;
1579                break;
1580        default:
1581                return;
1582        }
1583
1584        devid = mwork - mw->mbox_wrk;
1585        mbox = &mw->mbox;
1586        mdev = &mbox->dev[devid];
1587
1588        /* Process received mbox messages */
1589        req_hdr = mdev->mbase + mbox->rx_start;
1590        if (mw->mbox_wrk[devid].num_msgs == 0)
1591                return;
1592
1593        offset = mbox->rx_start + ALIGN(sizeof(*req_hdr), MBOX_MSG_ALIGN);
1594
1595        for (id = 0; id < mw->mbox_wrk[devid].num_msgs; id++) {
1596                msg = mdev->mbase + offset;
1597
1598                /* Set which PF/VF sent this message based on mbox IRQ */
1599                switch (type) {
1600                case TYPE_AFPF:
1601                        msg->pcifunc &=
1602                                ~(RVU_PFVF_PF_MASK << RVU_PFVF_PF_SHIFT);
1603                        msg->pcifunc |= (devid << RVU_PFVF_PF_SHIFT);
1604                        break;
1605                case TYPE_AFVF:
1606                        msg->pcifunc &=
1607                                ~(RVU_PFVF_FUNC_MASK << RVU_PFVF_FUNC_SHIFT);
1608                        msg->pcifunc |= (devid << RVU_PFVF_FUNC_SHIFT) + 1;
1609                        break;
1610                }
1611
1612                err = rvu_process_mbox_msg(mbox, devid, msg);
1613                if (!err) {
1614                        offset = mbox->rx_start + msg->next_msgoff;
1615                        continue;
1616                }
1617
1618                if (msg->pcifunc & RVU_PFVF_FUNC_MASK)
1619                        dev_warn(rvu->dev, "Error %d when processing message %s (0x%x) from PF%d:VF%d\n",
1620                                 err, otx2_mbox_id2name(msg->id),
1621                                 msg->id, rvu_get_pf(msg->pcifunc),
1622                                 (msg->pcifunc & RVU_PFVF_FUNC_MASK) - 1);
1623                else
1624                        dev_warn(rvu->dev, "Error %d when processing message %s (0x%x) from PF%d\n",
1625                                 err, otx2_mbox_id2name(msg->id),
1626                                 msg->id, devid);
1627        }
1628        mw->mbox_wrk[devid].num_msgs = 0;
1629
1630        /* Send mbox responses to VF/PF */
1631        otx2_mbox_msg_send(mbox, devid);
1632}
1633
1634static inline void rvu_afpf_mbox_handler(struct work_struct *work)
1635{
1636        struct rvu_work *mwork = container_of(work, struct rvu_work, work);
1637
1638        __rvu_mbox_handler(mwork, TYPE_AFPF);
1639}
1640
1641static inline void rvu_afvf_mbox_handler(struct work_struct *work)
1642{
1643        struct rvu_work *mwork = container_of(work, struct rvu_work, work);
1644
1645        __rvu_mbox_handler(mwork, TYPE_AFVF);
1646}
1647
1648static void __rvu_mbox_up_handler(struct rvu_work *mwork, int type)
1649{
1650        struct rvu *rvu = mwork->rvu;
1651        struct otx2_mbox_dev *mdev;
1652        struct mbox_hdr *rsp_hdr;
1653        struct mbox_msghdr *msg;
1654        struct mbox_wq_info *mw;
1655        struct otx2_mbox *mbox;
1656        int offset, id, devid;
1657
1658        switch (type) {
1659        case TYPE_AFPF:
1660                mw = &rvu->afpf_wq_info;
1661                break;
1662        case TYPE_AFVF:
1663                mw = &rvu->afvf_wq_info;
1664                break;
1665        default:
1666                return;
1667        }
1668
1669        devid = mwork - mw->mbox_wrk_up;
1670        mbox = &mw->mbox_up;
1671        mdev = &mbox->dev[devid];
1672
1673        rsp_hdr = mdev->mbase + mbox->rx_start;
1674        if (mw->mbox_wrk_up[devid].up_num_msgs == 0) {
1675                dev_warn(rvu->dev, "mbox up handler: num_msgs = 0\n");
1676                return;
1677        }
1678
1679        offset = mbox->rx_start + ALIGN(sizeof(*rsp_hdr), MBOX_MSG_ALIGN);
1680
1681        for (id = 0; id < mw->mbox_wrk_up[devid].up_num_msgs; id++) {
1682                msg = mdev->mbase + offset;
1683
1684                if (msg->id >= MBOX_MSG_MAX) {
1685                        dev_err(rvu->dev,
1686                                "Mbox msg with unknown ID 0x%x\n", msg->id);
1687                        goto end;
1688                }
1689
1690                if (msg->sig != OTX2_MBOX_RSP_SIG) {
1691                        dev_err(rvu->dev,
1692                                "Mbox msg with wrong signature %x, ID 0x%x\n",
1693                                msg->sig, msg->id);
1694                        goto end;
1695                }
1696
1697                switch (msg->id) {
1698                case MBOX_MSG_CGX_LINK_EVENT:
1699                        break;
1700                default:
1701                        if (msg->rc)
1702                                dev_err(rvu->dev,
1703                                        "Mbox msg response has err %d, ID 0x%x\n",
1704                                        msg->rc, msg->id);
1705                        break;
1706                }
1707end:
1708                offset = mbox->rx_start + msg->next_msgoff;
1709                mdev->msgs_acked++;
1710        }
1711        mw->mbox_wrk_up[devid].up_num_msgs = 0;
1712
1713        otx2_mbox_reset(mbox, devid);
1714}
1715
1716static inline void rvu_afpf_mbox_up_handler(struct work_struct *work)
1717{
1718        struct rvu_work *mwork = container_of(work, struct rvu_work, work);
1719
1720        __rvu_mbox_up_handler(mwork, TYPE_AFPF);
1721}
1722
1723static inline void rvu_afvf_mbox_up_handler(struct work_struct *work)
1724{
1725        struct rvu_work *mwork = container_of(work, struct rvu_work, work);
1726
1727        __rvu_mbox_up_handler(mwork, TYPE_AFVF);
1728}
1729
1730static int rvu_mbox_init(struct rvu *rvu, struct mbox_wq_info *mw,
1731                         int type, int num,
1732                         void (mbox_handler)(struct work_struct *),
1733                         void (mbox_up_handler)(struct work_struct *))
1734{
1735        void __iomem *hwbase = NULL, *reg_base;
1736        int err, i, dir, dir_up;
1737        struct rvu_work *mwork;
1738        const char *name;
1739        u64 bar4_addr;
1740
1741        switch (type) {
1742        case TYPE_AFPF:
1743                name = "rvu_afpf_mailbox";
1744                bar4_addr = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_PF_BAR4_ADDR);
1745                dir = MBOX_DIR_AFPF;
1746                dir_up = MBOX_DIR_AFPF_UP;
1747                reg_base = rvu->afreg_base;
1748                break;
1749        case TYPE_AFVF:
1750                name = "rvu_afvf_mailbox";
1751                bar4_addr = rvupf_read64(rvu, RVU_PF_VF_BAR4_ADDR);
1752                dir = MBOX_DIR_PFVF;
1753                dir_up = MBOX_DIR_PFVF_UP;
1754                reg_base = rvu->pfreg_base;
1755                break;
1756        default:
1757                return -EINVAL;
1758        }
1759
1760        mw->mbox_wq = alloc_workqueue(name,
1761                                      WQ_UNBOUND | WQ_HIGHPRI | WQ_MEM_RECLAIM,
1762                                      num);
1763        if (!mw->mbox_wq)
1764                return -ENOMEM;
1765
1766        mw->mbox_wrk = devm_kcalloc(rvu->dev, num,
1767                                    sizeof(struct rvu_work), GFP_KERNEL);
1768        if (!mw->mbox_wrk) {
1769                err = -ENOMEM;
1770                goto exit;
1771        }
1772
1773        mw->mbox_wrk_up = devm_kcalloc(rvu->dev, num,
1774                                       sizeof(struct rvu_work), GFP_KERNEL);
1775        if (!mw->mbox_wrk_up) {
1776                err = -ENOMEM;
1777                goto exit;
1778        }
1779
1780        /* Mailbox is a reserved memory (in RAM) region shared between
1781         * RVU devices, shouldn't be mapped as device memory to allow
1782         * unaligned accesses.
1783         */
1784        hwbase = ioremap_wc(bar4_addr, MBOX_SIZE * num);
1785        if (!hwbase) {
1786                dev_err(rvu->dev, "Unable to map mailbox region\n");
1787                err = -ENOMEM;
1788                goto exit;
1789        }
1790
1791        err = otx2_mbox_init(&mw->mbox, hwbase, rvu->pdev, reg_base, dir, num);
1792        if (err)
1793                goto exit;
1794
1795        err = otx2_mbox_init(&mw->mbox_up, hwbase, rvu->pdev,
1796                             reg_base, dir_up, num);
1797        if (err)
1798                goto exit;
1799
1800        for (i = 0; i < num; i++) {
1801                mwork = &mw->mbox_wrk[i];
1802                mwork->rvu = rvu;
1803                INIT_WORK(&mwork->work, mbox_handler);
1804
1805                mwork = &mw->mbox_wrk_up[i];
1806                mwork->rvu = rvu;
1807                INIT_WORK(&mwork->work, mbox_up_handler);
1808        }
1809
1810        return 0;
1811exit:
1812        if (hwbase)
1813                iounmap((void __iomem *)hwbase);
1814        destroy_workqueue(mw->mbox_wq);
1815        return err;
1816}
1817
1818static void rvu_mbox_destroy(struct mbox_wq_info *mw)
1819{
1820        if (mw->mbox_wq) {
1821                flush_workqueue(mw->mbox_wq);
1822                destroy_workqueue(mw->mbox_wq);
1823                mw->mbox_wq = NULL;
1824        }
1825
1826        if (mw->mbox.hwbase)
1827                iounmap((void __iomem *)mw->mbox.hwbase);
1828
1829        otx2_mbox_destroy(&mw->mbox);
1830        otx2_mbox_destroy(&mw->mbox_up);
1831}
1832
1833static void rvu_queue_work(struct mbox_wq_info *mw, int first,
1834                           int mdevs, u64 intr)
1835{
1836        struct otx2_mbox_dev *mdev;
1837        struct otx2_mbox *mbox;
1838        struct mbox_hdr *hdr;
1839        int i;
1840
1841        for (i = first; i < mdevs; i++) {
1842                /* start from 0 */
1843                if (!(intr & BIT_ULL(i - first)))
1844                        continue;
1845
1846                mbox = &mw->mbox;
1847                mdev = &mbox->dev[i];
1848                hdr = mdev->mbase + mbox->rx_start;
1849
1850                /*The hdr->num_msgs is set to zero immediately in the interrupt
1851                 * handler to  ensure that it holds a correct value next time
1852                 * when the interrupt handler is called.
1853                 * pf->mbox.num_msgs holds the data for use in pfaf_mbox_handler
1854                 * pf>mbox.up_num_msgs holds the data for use in
1855                 * pfaf_mbox_up_handler.
1856                 */
1857
1858                if (hdr->num_msgs) {
1859                        mw->mbox_wrk[i].num_msgs = hdr->num_msgs;
1860                        hdr->num_msgs = 0;
1861                        queue_work(mw->mbox_wq, &mw->mbox_wrk[i].work);
1862                }
1863                mbox = &mw->mbox_up;
1864                mdev = &mbox->dev[i];
1865                hdr = mdev->mbase + mbox->rx_start;
1866                if (hdr->num_msgs) {
1867                        mw->mbox_wrk_up[i].up_num_msgs = hdr->num_msgs;
1868                        hdr->num_msgs = 0;
1869                        queue_work(mw->mbox_wq, &mw->mbox_wrk_up[i].work);
1870                }
1871        }
1872}
1873
1874static irqreturn_t rvu_mbox_intr_handler(int irq, void *rvu_irq)
1875{
1876        struct rvu *rvu = (struct rvu *)rvu_irq;
1877        int vfs = rvu->vfs;
1878        u64 intr;
1879
1880        intr = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_PFAF_MBOX_INT);
1881        /* Clear interrupts */
1882        rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFAF_MBOX_INT, intr);
1883
1884        /* Sync with mbox memory region */
1885        rmb();
1886
1887        rvu_queue_work(&rvu->afpf_wq_info, 0, rvu->hw->total_pfs, intr);
1888
1889        /* Handle VF interrupts */
1890        if (vfs > 64) {
1891                intr = rvupf_read64(rvu, RVU_PF_VFPF_MBOX_INTX(1));
1892                rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INTX(1), intr);
1893
1894                rvu_queue_work(&rvu->afvf_wq_info, 64, vfs, intr);
1895                vfs -= 64;
1896        }
1897
1898        intr = rvupf_read64(rvu, RVU_PF_VFPF_MBOX_INTX(0));
1899        rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INTX(0), intr);
1900
1901        rvu_queue_work(&rvu->afvf_wq_info, 0, vfs, intr);
1902
1903        return IRQ_HANDLED;
1904}
1905
1906static void rvu_enable_mbox_intr(struct rvu *rvu)
1907{
1908        struct rvu_hwinfo *hw = rvu->hw;
1909
1910        /* Clear spurious irqs, if any */
1911        rvu_write64(rvu, BLKADDR_RVUM,
1912                    RVU_AF_PFAF_MBOX_INT, INTR_MASK(hw->total_pfs));
1913
1914        /* Enable mailbox interrupt for all PFs except PF0 i.e AF itself */
1915        rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFAF_MBOX_INT_ENA_W1S,
1916                    INTR_MASK(hw->total_pfs) & ~1ULL);
1917}
1918
1919static void rvu_blklf_teardown(struct rvu *rvu, u16 pcifunc, u8 blkaddr)
1920{
1921        struct rvu_block *block;
1922        int slot, lf, num_lfs;
1923        int err;
1924
1925        block = &rvu->hw->block[blkaddr];
1926        num_lfs = rvu_get_rsrc_mapcount(rvu_get_pfvf(rvu, pcifunc),
1927                                        block->type);
1928        if (!num_lfs)
1929                return;
1930        for (slot = 0; slot < num_lfs; slot++) {
1931                lf = rvu_get_lf(rvu, block, pcifunc, slot);
1932                if (lf < 0)
1933                        continue;
1934
1935                /* Cleanup LF and reset it */
1936                if (block->addr == BLKADDR_NIX0)
1937                        rvu_nix_lf_teardown(rvu, pcifunc, block->addr, lf);
1938                else if (block->addr == BLKADDR_NPA)
1939                        rvu_npa_lf_teardown(rvu, pcifunc, lf);
1940
1941                err = rvu_lf_reset(rvu, block, lf);
1942                if (err) {
1943                        dev_err(rvu->dev, "Failed to reset blkaddr %d LF%d\n",
1944                                block->addr, lf);
1945                }
1946        }
1947}
1948
1949static void __rvu_flr_handler(struct rvu *rvu, u16 pcifunc)
1950{
1951        mutex_lock(&rvu->flr_lock);
1952        /* Reset order should reflect inter-block dependencies:
1953         * 1. Reset any packet/work sources (NIX, CPT, TIM)
1954         * 2. Flush and reset SSO/SSOW
1955         * 3. Cleanup pools (NPA)
1956         */
1957        rvu_blklf_teardown(rvu, pcifunc, BLKADDR_NIX0);
1958        rvu_blklf_teardown(rvu, pcifunc, BLKADDR_CPT0);
1959        rvu_blklf_teardown(rvu, pcifunc, BLKADDR_TIM);
1960        rvu_blklf_teardown(rvu, pcifunc, BLKADDR_SSOW);
1961        rvu_blklf_teardown(rvu, pcifunc, BLKADDR_SSO);
1962        rvu_blklf_teardown(rvu, pcifunc, BLKADDR_NPA);
1963        rvu_detach_rsrcs(rvu, NULL, pcifunc);
1964        mutex_unlock(&rvu->flr_lock);
1965}
1966
1967static void rvu_afvf_flr_handler(struct rvu *rvu, int vf)
1968{
1969        int reg = 0;
1970
1971        /* pcifunc = 0(PF0) | (vf + 1) */
1972        __rvu_flr_handler(rvu, vf + 1);
1973
1974        if (vf >= 64) {
1975                reg = 1;
1976                vf = vf - 64;
1977        }
1978
1979        /* Signal FLR finish and enable IRQ */
1980        rvupf_write64(rvu, RVU_PF_VFTRPENDX(reg), BIT_ULL(vf));
1981        rvupf_write64(rvu, RVU_PF_VFFLR_INT_ENA_W1SX(reg), BIT_ULL(vf));
1982}
1983
1984static void rvu_flr_handler(struct work_struct *work)
1985{
1986        struct rvu_work *flrwork = container_of(work, struct rvu_work, work);
1987        struct rvu *rvu = flrwork->rvu;
1988        u16 pcifunc, numvfs, vf;
1989        u64 cfg;
1990        int pf;
1991
1992        pf = flrwork - rvu->flr_wrk;
1993        if (pf >= rvu->hw->total_pfs) {
1994                rvu_afvf_flr_handler(rvu, pf - rvu->hw->total_pfs);
1995                return;
1996        }
1997
1998        cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf));
1999        numvfs = (cfg >> 12) & 0xFF;
2000        pcifunc  = pf << RVU_PFVF_PF_SHIFT;
2001
2002        for (vf = 0; vf < numvfs; vf++)
2003                __rvu_flr_handler(rvu, (pcifunc | (vf + 1)));
2004
2005        __rvu_flr_handler(rvu, pcifunc);
2006
2007        /* Signal FLR finish */
2008        rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFTRPEND, BIT_ULL(pf));
2009
2010        /* Enable interrupt */
2011        rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFFLR_INT_ENA_W1S,  BIT_ULL(pf));
2012}
2013
2014static void rvu_afvf_queue_flr_work(struct rvu *rvu, int start_vf, int numvfs)
2015{
2016        int dev, vf, reg = 0;
2017        u64 intr;
2018
2019        if (start_vf >= 64)
2020                reg = 1;
2021
2022        intr = rvupf_read64(rvu, RVU_PF_VFFLR_INTX(reg));
2023        if (!intr)
2024                return;
2025
2026        for (vf = 0; vf < numvfs; vf++) {
2027                if (!(intr & BIT_ULL(vf)))
2028                        continue;
2029                dev = vf + start_vf + rvu->hw->total_pfs;
2030                queue_work(rvu->flr_wq, &rvu->flr_wrk[dev].work);
2031                /* Clear and disable the interrupt */
2032                rvupf_write64(rvu, RVU_PF_VFFLR_INTX(reg), BIT_ULL(vf));
2033                rvupf_write64(rvu, RVU_PF_VFFLR_INT_ENA_W1CX(reg), BIT_ULL(vf));
2034        }
2035}
2036
2037static irqreturn_t rvu_flr_intr_handler(int irq, void *rvu_irq)
2038{
2039        struct rvu *rvu = (struct rvu *)rvu_irq;
2040        u64 intr;
2041        u8  pf;
2042
2043        intr = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_PFFLR_INT);
2044        if (!intr)
2045                goto afvf_flr;
2046
2047        for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
2048                if (intr & (1ULL << pf)) {
2049                        /* PF is already dead do only AF related operations */
2050                        queue_work(rvu->flr_wq, &rvu->flr_wrk[pf].work);
2051                        /* clear interrupt */
2052                        rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFFLR_INT,
2053                                    BIT_ULL(pf));
2054                        /* Disable the interrupt */
2055                        rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFFLR_INT_ENA_W1C,
2056                                    BIT_ULL(pf));
2057                }
2058        }
2059
2060afvf_flr:
2061        rvu_afvf_queue_flr_work(rvu, 0, 64);
2062        if (rvu->vfs > 64)
2063                rvu_afvf_queue_flr_work(rvu, 64, rvu->vfs - 64);
2064
2065        return IRQ_HANDLED;
2066}
2067
2068static void rvu_me_handle_vfset(struct rvu *rvu, int idx, u64 intr)
2069{
2070        int vf;
2071
2072        /* Nothing to be done here other than clearing the
2073         * TRPEND bit.
2074         */
2075        for (vf = 0; vf < 64; vf++) {
2076                if (intr & (1ULL << vf)) {
2077                        /* clear the trpend due to ME(master enable) */
2078                        rvupf_write64(rvu, RVU_PF_VFTRPENDX(idx), BIT_ULL(vf));
2079                        /* clear interrupt */
2080                        rvupf_write64(rvu, RVU_PF_VFME_INTX(idx), BIT_ULL(vf));
2081                }
2082        }
2083}
2084
2085/* Handles ME interrupts from VFs of AF */
2086static irqreturn_t rvu_me_vf_intr_handler(int irq, void *rvu_irq)
2087{
2088        struct rvu *rvu = (struct rvu *)rvu_irq;
2089        int vfset;
2090        u64 intr;
2091
2092        intr = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_PFME_INT);
2093
2094        for (vfset = 0; vfset <= 1; vfset++) {
2095                intr = rvupf_read64(rvu, RVU_PF_VFME_INTX(vfset));
2096                if (intr)
2097                        rvu_me_handle_vfset(rvu, vfset, intr);
2098        }
2099
2100        return IRQ_HANDLED;
2101}
2102
2103/* Handles ME interrupts from PFs */
2104static irqreturn_t rvu_me_pf_intr_handler(int irq, void *rvu_irq)
2105{
2106        struct rvu *rvu = (struct rvu *)rvu_irq;
2107        u64 intr;
2108        u8  pf;
2109
2110        intr = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_PFME_INT);
2111
2112        /* Nothing to be done here other than clearing the
2113         * TRPEND bit.
2114         */
2115        for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
2116                if (intr & (1ULL << pf)) {
2117                        /* clear the trpend due to ME(master enable) */
2118                        rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFTRPEND,
2119                                    BIT_ULL(pf));
2120                        /* clear interrupt */
2121                        rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFME_INT,
2122                                    BIT_ULL(pf));
2123                }
2124        }
2125
2126        return IRQ_HANDLED;
2127}
2128
2129static void rvu_unregister_interrupts(struct rvu *rvu)
2130{
2131        int irq;
2132
2133        /* Disable the Mbox interrupt */
2134        rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFAF_MBOX_INT_ENA_W1C,
2135                    INTR_MASK(rvu->hw->total_pfs) & ~1ULL);
2136
2137        /* Disable the PF FLR interrupt */
2138        rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFFLR_INT_ENA_W1C,
2139                    INTR_MASK(rvu->hw->total_pfs) & ~1ULL);
2140
2141        /* Disable the PF ME interrupt */
2142        rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFME_INT_ENA_W1C,
2143                    INTR_MASK(rvu->hw->total_pfs) & ~1ULL);
2144
2145        for (irq = 0; irq < rvu->num_vec; irq++) {
2146                if (rvu->irq_allocated[irq])
2147                        free_irq(pci_irq_vector(rvu->pdev, irq), rvu);
2148        }
2149
2150        pci_free_irq_vectors(rvu->pdev);
2151        rvu->num_vec = 0;
2152}
2153
2154static int rvu_afvf_msix_vectors_num_ok(struct rvu *rvu)
2155{
2156        struct rvu_pfvf *pfvf = &rvu->pf[0];
2157        int offset;
2158
2159        pfvf = &rvu->pf[0];
2160        offset = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_INT_CFG(0)) & 0x3ff;
2161
2162        /* Make sure there are enough MSIX vectors configured so that
2163         * VF interrupts can be handled. Offset equal to zero means
2164         * that PF vectors are not configured and overlapping AF vectors.
2165         */
2166        return (pfvf->msix.max >= RVU_AF_INT_VEC_CNT + RVU_PF_INT_VEC_CNT) &&
2167               offset;
2168}
2169
2170static int rvu_register_interrupts(struct rvu *rvu)
2171{
2172        int ret, offset, pf_vec_start;
2173
2174        rvu->num_vec = pci_msix_vec_count(rvu->pdev);
2175
2176        rvu->irq_name = devm_kmalloc_array(rvu->dev, rvu->num_vec,
2177                                           NAME_SIZE, GFP_KERNEL);
2178        if (!rvu->irq_name)
2179                return -ENOMEM;
2180
2181        rvu->irq_allocated = devm_kcalloc(rvu->dev, rvu->num_vec,
2182                                          sizeof(bool), GFP_KERNEL);
2183        if (!rvu->irq_allocated)
2184                return -ENOMEM;
2185
2186        /* Enable MSI-X */
2187        ret = pci_alloc_irq_vectors(rvu->pdev, rvu->num_vec,
2188                                    rvu->num_vec, PCI_IRQ_MSIX);
2189        if (ret < 0) {
2190                dev_err(rvu->dev,
2191                        "RVUAF: Request for %d msix vectors failed, ret %d\n",
2192                        rvu->num_vec, ret);
2193                return ret;
2194        }
2195
2196        /* Register mailbox interrupt handler */
2197        sprintf(&rvu->irq_name[RVU_AF_INT_VEC_MBOX * NAME_SIZE], "RVUAF Mbox");
2198        ret = request_irq(pci_irq_vector(rvu->pdev, RVU_AF_INT_VEC_MBOX),
2199                          rvu_mbox_intr_handler, 0,
2200                          &rvu->irq_name[RVU_AF_INT_VEC_MBOX * NAME_SIZE], rvu);
2201        if (ret) {
2202                dev_err(rvu->dev,
2203                        "RVUAF: IRQ registration failed for mbox irq\n");
2204                goto fail;
2205        }
2206
2207        rvu->irq_allocated[RVU_AF_INT_VEC_MBOX] = true;
2208
2209        /* Enable mailbox interrupts from all PFs */
2210        rvu_enable_mbox_intr(rvu);
2211
2212        /* Register FLR interrupt handler */
2213        sprintf(&rvu->irq_name[RVU_AF_INT_VEC_PFFLR * NAME_SIZE],
2214                "RVUAF FLR");
2215        ret = request_irq(pci_irq_vector(rvu->pdev, RVU_AF_INT_VEC_PFFLR),
2216                          rvu_flr_intr_handler, 0,
2217                          &rvu->irq_name[RVU_AF_INT_VEC_PFFLR * NAME_SIZE],
2218                          rvu);
2219        if (ret) {
2220                dev_err(rvu->dev,
2221                        "RVUAF: IRQ registration failed for FLR\n");
2222                goto fail;
2223        }
2224        rvu->irq_allocated[RVU_AF_INT_VEC_PFFLR] = true;
2225
2226        /* Enable FLR interrupt for all PFs*/
2227        rvu_write64(rvu, BLKADDR_RVUM,
2228                    RVU_AF_PFFLR_INT, INTR_MASK(rvu->hw->total_pfs));
2229
2230        rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFFLR_INT_ENA_W1S,
2231                    INTR_MASK(rvu->hw->total_pfs) & ~1ULL);
2232
2233        /* Register ME interrupt handler */
2234        sprintf(&rvu->irq_name[RVU_AF_INT_VEC_PFME * NAME_SIZE],
2235                "RVUAF ME");
2236        ret = request_irq(pci_irq_vector(rvu->pdev, RVU_AF_INT_VEC_PFME),
2237                          rvu_me_pf_intr_handler, 0,
2238                          &rvu->irq_name[RVU_AF_INT_VEC_PFME * NAME_SIZE],
2239                          rvu);
2240        if (ret) {
2241                dev_err(rvu->dev,
2242                        "RVUAF: IRQ registration failed for ME\n");
2243        }
2244        rvu->irq_allocated[RVU_AF_INT_VEC_PFME] = true;
2245
2246        /* Clear TRPEND bit for all PF */
2247        rvu_write64(rvu, BLKADDR_RVUM,
2248                    RVU_AF_PFTRPEND, INTR_MASK(rvu->hw->total_pfs));
2249        /* Enable ME interrupt for all PFs*/
2250        rvu_write64(rvu, BLKADDR_RVUM,
2251                    RVU_AF_PFME_INT, INTR_MASK(rvu->hw->total_pfs));
2252
2253        rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFME_INT_ENA_W1S,
2254                    INTR_MASK(rvu->hw->total_pfs) & ~1ULL);
2255
2256        if (!rvu_afvf_msix_vectors_num_ok(rvu))
2257                return 0;
2258
2259        /* Get PF MSIX vectors offset. */
2260        pf_vec_start = rvu_read64(rvu, BLKADDR_RVUM,
2261                                  RVU_PRIV_PFX_INT_CFG(0)) & 0x3ff;
2262
2263        /* Register MBOX0 interrupt. */
2264        offset = pf_vec_start + RVU_PF_INT_VEC_VFPF_MBOX0;
2265        sprintf(&rvu->irq_name[offset * NAME_SIZE], "RVUAFVF Mbox0");
2266        ret = request_irq(pci_irq_vector(rvu->pdev, offset),
2267                          rvu_mbox_intr_handler, 0,
2268                          &rvu->irq_name[offset * NAME_SIZE],
2269                          rvu);
2270        if (ret)
2271                dev_err(rvu->dev,
2272                        "RVUAF: IRQ registration failed for Mbox0\n");
2273
2274        rvu->irq_allocated[offset] = true;
2275
2276        /* Register MBOX1 interrupt. MBOX1 IRQ number follows MBOX0 so
2277         * simply increment current offset by 1.
2278         */
2279        offset = pf_vec_start + RVU_PF_INT_VEC_VFPF_MBOX1;
2280        sprintf(&rvu->irq_name[offset * NAME_SIZE], "RVUAFVF Mbox1");
2281        ret = request_irq(pci_irq_vector(rvu->pdev, offset),
2282                          rvu_mbox_intr_handler, 0,
2283                          &rvu->irq_name[offset * NAME_SIZE],
2284                          rvu);
2285        if (ret)
2286                dev_err(rvu->dev,
2287                        "RVUAF: IRQ registration failed for Mbox1\n");
2288
2289        rvu->irq_allocated[offset] = true;
2290
2291        /* Register FLR interrupt handler for AF's VFs */
2292        offset = pf_vec_start + RVU_PF_INT_VEC_VFFLR0;
2293        sprintf(&rvu->irq_name[offset * NAME_SIZE], "RVUAFVF FLR0");
2294        ret = request_irq(pci_irq_vector(rvu->pdev, offset),
2295                          rvu_flr_intr_handler, 0,
2296                          &rvu->irq_name[offset * NAME_SIZE], rvu);
2297        if (ret) {
2298                dev_err(rvu->dev,
2299                        "RVUAF: IRQ registration failed for RVUAFVF FLR0\n");
2300                goto fail;
2301        }
2302        rvu->irq_allocated[offset] = true;
2303
2304        offset = pf_vec_start + RVU_PF_INT_VEC_VFFLR1;
2305        sprintf(&rvu->irq_name[offset * NAME_SIZE], "RVUAFVF FLR1");
2306        ret = request_irq(pci_irq_vector(rvu->pdev, offset),
2307                          rvu_flr_intr_handler, 0,
2308                          &rvu->irq_name[offset * NAME_SIZE], rvu);
2309        if (ret) {
2310                dev_err(rvu->dev,
2311                        "RVUAF: IRQ registration failed for RVUAFVF FLR1\n");
2312                goto fail;
2313        }
2314        rvu->irq_allocated[offset] = true;
2315
2316        /* Register ME interrupt handler for AF's VFs */
2317        offset = pf_vec_start + RVU_PF_INT_VEC_VFME0;
2318        sprintf(&rvu->irq_name[offset * NAME_SIZE], "RVUAFVF ME0");
2319        ret = request_irq(pci_irq_vector(rvu->pdev, offset),
2320                          rvu_me_vf_intr_handler, 0,
2321                          &rvu->irq_name[offset * NAME_SIZE], rvu);
2322        if (ret) {
2323                dev_err(rvu->dev,
2324                        "RVUAF: IRQ registration failed for RVUAFVF ME0\n");
2325                goto fail;
2326        }
2327        rvu->irq_allocated[offset] = true;
2328
2329        offset = pf_vec_start + RVU_PF_INT_VEC_VFME1;
2330        sprintf(&rvu->irq_name[offset * NAME_SIZE], "RVUAFVF ME1");
2331        ret = request_irq(pci_irq_vector(rvu->pdev, offset),
2332                          rvu_me_vf_intr_handler, 0,
2333                          &rvu->irq_name[offset * NAME_SIZE], rvu);
2334        if (ret) {
2335                dev_err(rvu->dev,
2336                        "RVUAF: IRQ registration failed for RVUAFVF ME1\n");
2337                goto fail;
2338        }
2339        rvu->irq_allocated[offset] = true;
2340        return 0;
2341
2342fail:
2343        rvu_unregister_interrupts(rvu);
2344        return ret;
2345}
2346
2347static void rvu_flr_wq_destroy(struct rvu *rvu)
2348{
2349        if (rvu->flr_wq) {
2350                flush_workqueue(rvu->flr_wq);
2351                destroy_workqueue(rvu->flr_wq);
2352                rvu->flr_wq = NULL;
2353        }
2354}
2355
2356static int rvu_flr_init(struct rvu *rvu)
2357{
2358        int dev, num_devs;
2359        u64 cfg;
2360        int pf;
2361
2362        /* Enable FLR for all PFs*/
2363        for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
2364                cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf));
2365                rvu_write64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf),
2366                            cfg | BIT_ULL(22));
2367        }
2368
2369        rvu->flr_wq = alloc_workqueue("rvu_afpf_flr",
2370                                      WQ_UNBOUND | WQ_HIGHPRI | WQ_MEM_RECLAIM,
2371                                       1);
2372        if (!rvu->flr_wq)
2373                return -ENOMEM;
2374
2375        num_devs = rvu->hw->total_pfs + pci_sriov_get_totalvfs(rvu->pdev);
2376        rvu->flr_wrk = devm_kcalloc(rvu->dev, num_devs,
2377                                    sizeof(struct rvu_work), GFP_KERNEL);
2378        if (!rvu->flr_wrk) {
2379                destroy_workqueue(rvu->flr_wq);
2380                return -ENOMEM;
2381        }
2382
2383        for (dev = 0; dev < num_devs; dev++) {
2384                rvu->flr_wrk[dev].rvu = rvu;
2385                INIT_WORK(&rvu->flr_wrk[dev].work, rvu_flr_handler);
2386        }
2387
2388        mutex_init(&rvu->flr_lock);
2389
2390        return 0;
2391}
2392
2393static void rvu_disable_afvf_intr(struct rvu *rvu)
2394{
2395        int vfs = rvu->vfs;
2396
2397        rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INT_ENA_W1CX(0), INTR_MASK(vfs));
2398        rvupf_write64(rvu, RVU_PF_VFFLR_INT_ENA_W1CX(0), INTR_MASK(vfs));
2399        rvupf_write64(rvu, RVU_PF_VFME_INT_ENA_W1CX(0), INTR_MASK(vfs));
2400        if (vfs <= 64)
2401                return;
2402
2403        rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INT_ENA_W1CX(1),
2404                      INTR_MASK(vfs - 64));
2405        rvupf_write64(rvu, RVU_PF_VFFLR_INT_ENA_W1CX(1), INTR_MASK(vfs - 64));
2406        rvupf_write64(rvu, RVU_PF_VFME_INT_ENA_W1CX(1), INTR_MASK(vfs - 64));
2407}
2408
2409static void rvu_enable_afvf_intr(struct rvu *rvu)
2410{
2411        int vfs = rvu->vfs;
2412
2413        /* Clear any pending interrupts and enable AF VF interrupts for
2414         * the first 64 VFs.
2415         */
2416        /* Mbox */
2417        rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INTX(0), INTR_MASK(vfs));
2418        rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INT_ENA_W1SX(0), INTR_MASK(vfs));
2419
2420        /* FLR */
2421        rvupf_write64(rvu, RVU_PF_VFFLR_INTX(0), INTR_MASK(vfs));
2422        rvupf_write64(rvu, RVU_PF_VFFLR_INT_ENA_W1SX(0), INTR_MASK(vfs));
2423        rvupf_write64(rvu, RVU_PF_VFME_INT_ENA_W1SX(0), INTR_MASK(vfs));
2424
2425        /* Same for remaining VFs, if any. */
2426        if (vfs <= 64)
2427                return;
2428
2429        rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INTX(1), INTR_MASK(vfs - 64));
2430        rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INT_ENA_W1SX(1),
2431                      INTR_MASK(vfs - 64));
2432
2433        rvupf_write64(rvu, RVU_PF_VFFLR_INTX(1), INTR_MASK(vfs - 64));
2434        rvupf_write64(rvu, RVU_PF_VFFLR_INT_ENA_W1SX(1), INTR_MASK(vfs - 64));
2435        rvupf_write64(rvu, RVU_PF_VFME_INT_ENA_W1SX(1), INTR_MASK(vfs - 64));
2436}
2437
2438#define PCI_DEVID_OCTEONTX2_LBK 0xA061
2439
2440static int lbk_get_num_chans(void)
2441{
2442        struct pci_dev *pdev;
2443        void __iomem *base;
2444        int ret = -EIO;
2445
2446        pdev = pci_get_device(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_LBK,
2447                              NULL);
2448        if (!pdev)
2449                goto err;
2450
2451        base = pci_ioremap_bar(pdev, 0);
2452        if (!base)
2453                goto err_put;
2454
2455        /* Read number of available LBK channels from LBK(0)_CONST register. */
2456        ret = (readq(base + 0x10) >> 32) & 0xffff;
2457        iounmap(base);
2458err_put:
2459        pci_dev_put(pdev);
2460err:
2461        return ret;
2462}
2463
2464static int rvu_enable_sriov(struct rvu *rvu)
2465{
2466        struct pci_dev *pdev = rvu->pdev;
2467        int err, chans, vfs;
2468
2469        if (!rvu_afvf_msix_vectors_num_ok(rvu)) {
2470                dev_warn(&pdev->dev,
2471                         "Skipping SRIOV enablement since not enough IRQs are available\n");
2472                return 0;
2473        }
2474
2475        chans = lbk_get_num_chans();
2476        if (chans < 0)
2477                return chans;
2478
2479        vfs = pci_sriov_get_totalvfs(pdev);
2480
2481        /* Limit VFs in case we have more VFs than LBK channels available. */
2482        if (vfs > chans)
2483                vfs = chans;
2484
2485        if (!vfs)
2486                return 0;
2487
2488        /* Save VFs number for reference in VF interrupts handlers.
2489         * Since interrupts might start arriving during SRIOV enablement
2490         * ordinary API cannot be used to get number of enabled VFs.
2491         */
2492        rvu->vfs = vfs;
2493
2494        err = rvu_mbox_init(rvu, &rvu->afvf_wq_info, TYPE_AFVF, vfs,
2495                            rvu_afvf_mbox_handler, rvu_afvf_mbox_up_handler);
2496        if (err)
2497                return err;
2498
2499        rvu_enable_afvf_intr(rvu);
2500        /* Make sure IRQs are enabled before SRIOV. */
2501        mb();
2502
2503        err = pci_enable_sriov(pdev, vfs);
2504        if (err) {
2505                rvu_disable_afvf_intr(rvu);
2506                rvu_mbox_destroy(&rvu->afvf_wq_info);
2507                return err;
2508        }
2509
2510        return 0;
2511}
2512
2513static void rvu_disable_sriov(struct rvu *rvu)
2514{
2515        rvu_disable_afvf_intr(rvu);
2516        rvu_mbox_destroy(&rvu->afvf_wq_info);
2517        pci_disable_sriov(rvu->pdev);
2518}
2519
2520static void rvu_update_module_params(struct rvu *rvu)
2521{
2522        const char *default_pfl_name = "default";
2523
2524        strscpy(rvu->mkex_pfl_name,
2525                mkex_profile ? mkex_profile : default_pfl_name, MKEX_NAME_LEN);
2526}
2527
2528static int rvu_probe(struct pci_dev *pdev, const struct pci_device_id *id)
2529{
2530        struct device *dev = &pdev->dev;
2531        struct rvu *rvu;
2532        int    err;
2533
2534        rvu = devm_kzalloc(dev, sizeof(*rvu), GFP_KERNEL);
2535        if (!rvu)
2536                return -ENOMEM;
2537
2538        rvu->hw = devm_kzalloc(dev, sizeof(struct rvu_hwinfo), GFP_KERNEL);
2539        if (!rvu->hw) {
2540                devm_kfree(dev, rvu);
2541                return -ENOMEM;
2542        }
2543
2544        pci_set_drvdata(pdev, rvu);
2545        rvu->pdev = pdev;
2546        rvu->dev = &pdev->dev;
2547
2548        err = pci_enable_device(pdev);
2549        if (err) {
2550                dev_err(dev, "Failed to enable PCI device\n");
2551                goto err_freemem;
2552        }
2553
2554        err = pci_request_regions(pdev, DRV_NAME);
2555        if (err) {
2556                dev_err(dev, "PCI request regions failed 0x%x\n", err);
2557                goto err_disable_device;
2558        }
2559
2560        err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48));
2561        if (err) {
2562                dev_err(dev, "DMA mask config failed, abort\n");
2563                goto err_release_regions;
2564        }
2565
2566        pci_set_master(pdev);
2567
2568        /* Map Admin function CSRs */
2569        rvu->afreg_base = pcim_iomap(pdev, PCI_AF_REG_BAR_NUM, 0);
2570        rvu->pfreg_base = pcim_iomap(pdev, PCI_PF_REG_BAR_NUM, 0);
2571        if (!rvu->afreg_base || !rvu->pfreg_base) {
2572                dev_err(dev, "Unable to map admin function CSRs, aborting\n");
2573                err = -ENOMEM;
2574                goto err_release_regions;
2575        }
2576
2577        /* Store module params in rvu structure */
2578        rvu_update_module_params(rvu);
2579
2580        /* Check which blocks the HW supports */
2581        rvu_check_block_implemented(rvu);
2582
2583        rvu_reset_all_blocks(rvu);
2584
2585        rvu_setup_hw_capabilities(rvu);
2586
2587        err = rvu_setup_hw_resources(rvu);
2588        if (err)
2589                goto err_release_regions;
2590
2591        /* Init mailbox btw AF and PFs */
2592        err = rvu_mbox_init(rvu, &rvu->afpf_wq_info, TYPE_AFPF,
2593                            rvu->hw->total_pfs, rvu_afpf_mbox_handler,
2594                            rvu_afpf_mbox_up_handler);
2595        if (err)
2596                goto err_hwsetup;
2597
2598        err = rvu_flr_init(rvu);
2599        if (err)
2600                goto err_mbox;
2601
2602        err = rvu_register_interrupts(rvu);
2603        if (err)
2604                goto err_flr;
2605
2606        rvu_setup_rvum_blk_revid(rvu);
2607
2608        /* Enable AF's VFs (if any) */
2609        err = rvu_enable_sriov(rvu);
2610        if (err)
2611                goto err_irq;
2612
2613        /* Initialize debugfs */
2614        rvu_dbg_init(rvu);
2615
2616        return 0;
2617err_irq:
2618        rvu_unregister_interrupts(rvu);
2619err_flr:
2620        rvu_flr_wq_destroy(rvu);
2621err_mbox:
2622        rvu_mbox_destroy(&rvu->afpf_wq_info);
2623err_hwsetup:
2624        rvu_cgx_exit(rvu);
2625        rvu_fwdata_exit(rvu);
2626        rvu_reset_all_blocks(rvu);
2627        rvu_free_hw_resources(rvu);
2628        rvu_clear_rvum_blk_revid(rvu);
2629err_release_regions:
2630        pci_release_regions(pdev);
2631err_disable_device:
2632        pci_disable_device(pdev);
2633err_freemem:
2634        pci_set_drvdata(pdev, NULL);
2635        devm_kfree(&pdev->dev, rvu->hw);
2636        devm_kfree(dev, rvu);
2637        return err;
2638}
2639
2640static void rvu_remove(struct pci_dev *pdev)
2641{
2642        struct rvu *rvu = pci_get_drvdata(pdev);
2643
2644        rvu_dbg_exit(rvu);
2645        rvu_unregister_interrupts(rvu);
2646        rvu_flr_wq_destroy(rvu);
2647        rvu_cgx_exit(rvu);
2648        rvu_fwdata_exit(rvu);
2649        rvu_mbox_destroy(&rvu->afpf_wq_info);
2650        rvu_disable_sriov(rvu);
2651        rvu_reset_all_blocks(rvu);
2652        rvu_free_hw_resources(rvu);
2653        rvu_clear_rvum_blk_revid(rvu);
2654        pci_release_regions(pdev);
2655        pci_disable_device(pdev);
2656        pci_set_drvdata(pdev, NULL);
2657
2658        devm_kfree(&pdev->dev, rvu->hw);
2659        devm_kfree(&pdev->dev, rvu);
2660}
2661
2662static struct pci_driver rvu_driver = {
2663        .name = DRV_NAME,
2664        .id_table = rvu_id_table,
2665        .probe = rvu_probe,
2666        .remove = rvu_remove,
2667};
2668
2669static int __init rvu_init_module(void)
2670{
2671        int err;
2672
2673        pr_info("%s: %s\n", DRV_NAME, DRV_STRING);
2674
2675        err = pci_register_driver(&cgx_driver);
2676        if (err < 0)
2677                return err;
2678
2679        err =  pci_register_driver(&rvu_driver);
2680        if (err < 0)
2681                pci_unregister_driver(&cgx_driver);
2682
2683        return err;
2684}
2685
2686static void __exit rvu_cleanup_module(void)
2687{
2688        pci_unregister_driver(&rvu_driver);
2689        pci_unregister_driver(&cgx_driver);
2690}
2691
2692module_init(rvu_init_module);
2693module_exit(rvu_cleanup_module);
2694