linux/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/* Marvell OcteonTx2 RVU Admin Function driver
   3 *
   4 * Copyright (C) 2018 Marvell International Ltd.
   5 *
   6 * This program is free software; you can redistribute it and/or modify
   7 * it under the terms of the GNU General Public License version 2 as
   8 * published by the Free Software Foundation.
   9 */
  10
  11#include <linux/module.h>
  12#include <linux/pci.h>
  13
  14#include "rvu_struct.h"
  15#include "rvu_reg.h"
  16#include "rvu.h"
  17#include "npc.h"
  18#include "cgx.h"
  19
  20static int nix_update_bcast_mce_list(struct rvu *rvu, u16 pcifunc, bool add);
  21static int rvu_nix_get_bpid(struct rvu *rvu, struct nix_bp_cfg_req *req,
  22                            int type, int chan_id);
  23
  24enum mc_tbl_sz {
  25        MC_TBL_SZ_256,
  26        MC_TBL_SZ_512,
  27        MC_TBL_SZ_1K,
  28        MC_TBL_SZ_2K,
  29        MC_TBL_SZ_4K,
  30        MC_TBL_SZ_8K,
  31        MC_TBL_SZ_16K,
  32        MC_TBL_SZ_32K,
  33        MC_TBL_SZ_64K,
  34};
  35
  36enum mc_buf_cnt {
  37        MC_BUF_CNT_8,
  38        MC_BUF_CNT_16,
  39        MC_BUF_CNT_32,
  40        MC_BUF_CNT_64,
  41        MC_BUF_CNT_128,
  42        MC_BUF_CNT_256,
  43        MC_BUF_CNT_512,
  44        MC_BUF_CNT_1024,
  45        MC_BUF_CNT_2048,
  46};
  47
  48enum nix_makr_fmt_indexes {
  49        NIX_MARK_CFG_IP_DSCP_RED,
  50        NIX_MARK_CFG_IP_DSCP_YELLOW,
  51        NIX_MARK_CFG_IP_DSCP_YELLOW_RED,
  52        NIX_MARK_CFG_IP_ECN_RED,
  53        NIX_MARK_CFG_IP_ECN_YELLOW,
  54        NIX_MARK_CFG_IP_ECN_YELLOW_RED,
  55        NIX_MARK_CFG_VLAN_DEI_RED,
  56        NIX_MARK_CFG_VLAN_DEI_YELLOW,
  57        NIX_MARK_CFG_VLAN_DEI_YELLOW_RED,
  58        NIX_MARK_CFG_MAX,
  59};
  60
  61/* For now considering MC resources needed for broadcast
  62 * pkt replication only. i.e 256 HWVFs + 12 PFs.
  63 */
  64#define MC_TBL_SIZE     MC_TBL_SZ_512
  65#define MC_BUF_CNT      MC_BUF_CNT_128
  66
  67struct mce {
  68        struct hlist_node       node;
  69        u16                     pcifunc;
  70};
  71
  72bool is_nixlf_attached(struct rvu *rvu, u16 pcifunc)
  73{
  74        struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
  75        int blkaddr;
  76
  77        blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
  78        if (!pfvf->nixlf || blkaddr < 0)
  79                return false;
  80        return true;
  81}
  82
  83int rvu_get_nixlf_count(struct rvu *rvu)
  84{
  85        struct rvu_block *block;
  86        int blkaddr;
  87
  88        blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
  89        if (blkaddr < 0)
  90                return 0;
  91        block = &rvu->hw->block[blkaddr];
  92        return block->lf.max;
  93}
  94
  95int nix_get_nixlf(struct rvu *rvu, u16 pcifunc, int *nixlf, int *nix_blkaddr)
  96{
  97        struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
  98        struct rvu_hwinfo *hw = rvu->hw;
  99        int blkaddr;
 100
 101        blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
 102        if (!pfvf->nixlf || blkaddr < 0)
 103                return NIX_AF_ERR_AF_LF_INVALID;
 104
 105        *nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
 106        if (*nixlf < 0)
 107                return NIX_AF_ERR_AF_LF_INVALID;
 108
 109        if (nix_blkaddr)
 110                *nix_blkaddr = blkaddr;
 111
 112        return 0;
 113}
 114
 115static void nix_mce_list_init(struct nix_mce_list *list, int max)
 116{
 117        INIT_HLIST_HEAD(&list->head);
 118        list->count = 0;
 119        list->max = max;
 120}
 121
 122static u16 nix_alloc_mce_list(struct nix_mcast *mcast, int count)
 123{
 124        int idx;
 125
 126        if (!mcast)
 127                return 0;
 128
 129        idx = mcast->next_free_mce;
 130        mcast->next_free_mce += count;
 131        return idx;
 132}
 133
 134static inline struct nix_hw *get_nix_hw(struct rvu_hwinfo *hw, int blkaddr)
 135{
 136        if (blkaddr == BLKADDR_NIX0 && hw->nix0)
 137                return hw->nix0;
 138
 139        return NULL;
 140}
 141
 142static void nix_rx_sync(struct rvu *rvu, int blkaddr)
 143{
 144        int err;
 145
 146        /*Sync all in flight RX packets to LLC/DRAM */
 147        rvu_write64(rvu, blkaddr, NIX_AF_RX_SW_SYNC, BIT_ULL(0));
 148        err = rvu_poll_reg(rvu, blkaddr, NIX_AF_RX_SW_SYNC, BIT_ULL(0), true);
 149        if (err)
 150                dev_err(rvu->dev, "NIX RX software sync failed\n");
 151}
 152
 153static bool is_valid_txschq(struct rvu *rvu, int blkaddr,
 154                            int lvl, u16 pcifunc, u16 schq)
 155{
 156        struct rvu_hwinfo *hw = rvu->hw;
 157        struct nix_txsch *txsch;
 158        struct nix_hw *nix_hw;
 159        u16 map_func;
 160
 161        nix_hw = get_nix_hw(rvu->hw, blkaddr);
 162        if (!nix_hw)
 163                return false;
 164
 165        txsch = &nix_hw->txsch[lvl];
 166        /* Check out of bounds */
 167        if (schq >= txsch->schq.max)
 168                return false;
 169
 170        mutex_lock(&rvu->rsrc_lock);
 171        map_func = TXSCH_MAP_FUNC(txsch->pfvf_map[schq]);
 172        mutex_unlock(&rvu->rsrc_lock);
 173
 174        /* TLs aggegating traffic are shared across PF and VFs */
 175        if (lvl >= hw->cap.nix_tx_aggr_lvl) {
 176                if (rvu_get_pf(map_func) != rvu_get_pf(pcifunc))
 177                        return false;
 178                else
 179                        return true;
 180        }
 181
 182        if (map_func != pcifunc)
 183                return false;
 184
 185        return true;
 186}
 187
 188static int nix_interface_init(struct rvu *rvu, u16 pcifunc, int type, int nixlf)
 189{
 190        struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
 191        u8 cgx_id, lmac_id;
 192        int pkind, pf, vf;
 193        int err;
 194
 195        pf = rvu_get_pf(pcifunc);
 196        if (!is_pf_cgxmapped(rvu, pf) && type != NIX_INTF_TYPE_LBK)
 197                return 0;
 198
 199        switch (type) {
 200        case NIX_INTF_TYPE_CGX:
 201                pfvf->cgx_lmac = rvu->pf2cgxlmac_map[pf];
 202                rvu_get_cgx_lmac_id(pfvf->cgx_lmac, &cgx_id, &lmac_id);
 203
 204                pkind = rvu_npc_get_pkind(rvu, pf);
 205                if (pkind < 0) {
 206                        dev_err(rvu->dev,
 207                                "PF_Func 0x%x: Invalid pkind\n", pcifunc);
 208                        return -EINVAL;
 209                }
 210                pfvf->rx_chan_base = NIX_CHAN_CGX_LMAC_CHX(cgx_id, lmac_id, 0);
 211                pfvf->tx_chan_base = pfvf->rx_chan_base;
 212                pfvf->rx_chan_cnt = 1;
 213                pfvf->tx_chan_cnt = 1;
 214                cgx_set_pkind(rvu_cgx_pdata(cgx_id, rvu), lmac_id, pkind);
 215                rvu_npc_set_pkind(rvu, pkind, pfvf);
 216
 217                /* By default we enable pause frames */
 218                if ((pcifunc & RVU_PFVF_FUNC_MASK) == 0)
 219                        cgx_lmac_set_pause_frm(rvu_cgx_pdata(cgx_id, rvu),
 220                                               lmac_id, true, true);
 221                break;
 222        case NIX_INTF_TYPE_LBK:
 223                vf = (pcifunc & RVU_PFVF_FUNC_MASK) - 1;
 224
 225                /* Note that AF's VFs work in pairs and talk over consecutive
 226                 * loopback channels.Therefore if odd number of AF VFs are
 227                 * enabled then the last VF remains with no pair.
 228                 */
 229                pfvf->rx_chan_base = NIX_CHAN_LBK_CHX(0, vf);
 230                pfvf->tx_chan_base = vf & 0x1 ? NIX_CHAN_LBK_CHX(0, vf - 1) :
 231                                                NIX_CHAN_LBK_CHX(0, vf + 1);
 232                pfvf->rx_chan_cnt = 1;
 233                pfvf->tx_chan_cnt = 1;
 234                rvu_npc_install_promisc_entry(rvu, pcifunc, nixlf,
 235                                              pfvf->rx_chan_base, false);
 236                break;
 237        }
 238
 239        /* Add a UCAST forwarding rule in MCAM with this NIXLF attached
 240         * RVU PF/VF's MAC address.
 241         */
 242        rvu_npc_install_ucast_entry(rvu, pcifunc, nixlf,
 243                                    pfvf->rx_chan_base, pfvf->mac_addr);
 244
 245        /* Add this PF_FUNC to bcast pkt replication list */
 246        err = nix_update_bcast_mce_list(rvu, pcifunc, true);
 247        if (err) {
 248                dev_err(rvu->dev,
 249                        "Bcast list, failed to enable PF_FUNC 0x%x\n",
 250                        pcifunc);
 251                return err;
 252        }
 253
 254        rvu_npc_install_bcast_match_entry(rvu, pcifunc,
 255                                          nixlf, pfvf->rx_chan_base);
 256        pfvf->maxlen = NIC_HW_MIN_FRS;
 257        pfvf->minlen = NIC_HW_MIN_FRS;
 258
 259        return 0;
 260}
 261
 262static void nix_interface_deinit(struct rvu *rvu, u16 pcifunc, u8 nixlf)
 263{
 264        struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
 265        int err;
 266
 267        pfvf->maxlen = 0;
 268        pfvf->minlen = 0;
 269        pfvf->rxvlan = false;
 270
 271        /* Remove this PF_FUNC from bcast pkt replication list */
 272        err = nix_update_bcast_mce_list(rvu, pcifunc, false);
 273        if (err) {
 274                dev_err(rvu->dev,
 275                        "Bcast list, failed to disable PF_FUNC 0x%x\n",
 276                        pcifunc);
 277        }
 278
 279        /* Free and disable any MCAM entries used by this NIX LF */
 280        rvu_npc_disable_mcam_entries(rvu, pcifunc, nixlf);
 281}
 282
 283int rvu_mbox_handler_nix_bp_disable(struct rvu *rvu,
 284                                    struct nix_bp_cfg_req *req,
 285                                    struct msg_rsp *rsp)
 286{
 287        u16 pcifunc = req->hdr.pcifunc;
 288        struct rvu_pfvf *pfvf;
 289        int blkaddr, pf, type;
 290        u16 chan_base, chan;
 291        u64 cfg;
 292
 293        pf = rvu_get_pf(pcifunc);
 294        type = is_afvf(pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX;
 295        if (!is_pf_cgxmapped(rvu, pf) && type != NIX_INTF_TYPE_LBK)
 296                return 0;
 297
 298        pfvf = rvu_get_pfvf(rvu, pcifunc);
 299        blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
 300
 301        chan_base = pfvf->rx_chan_base + req->chan_base;
 302        for (chan = chan_base; chan < (chan_base + req->chan_cnt); chan++) {
 303                cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan));
 304                rvu_write64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan),
 305                            cfg & ~BIT_ULL(16));
 306        }
 307        return 0;
 308}
 309
 310static int rvu_nix_get_bpid(struct rvu *rvu, struct nix_bp_cfg_req *req,
 311                            int type, int chan_id)
 312{
 313        int bpid, blkaddr, lmac_chan_cnt;
 314        struct rvu_hwinfo *hw = rvu->hw;
 315        u16 cgx_bpid_cnt, lbk_bpid_cnt;
 316        struct rvu_pfvf *pfvf;
 317        u8 cgx_id, lmac_id;
 318        u64 cfg;
 319
 320        blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, req->hdr.pcifunc);
 321        cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST);
 322        lmac_chan_cnt = cfg & 0xFF;
 323
 324        cgx_bpid_cnt = hw->cgx_links * lmac_chan_cnt;
 325        lbk_bpid_cnt = hw->lbk_links * ((cfg >> 16) & 0xFF);
 326
 327        pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc);
 328
 329        /* Backpressure IDs range division
 330         * CGX channles are mapped to (0 - 191) BPIDs
 331         * LBK channles are mapped to (192 - 255) BPIDs
 332         * SDP channles are mapped to (256 - 511) BPIDs
 333         *
 334         * Lmac channles and bpids mapped as follows
 335         * cgx(0)_lmac(0)_chan(0 - 15) = bpid(0 - 15)
 336         * cgx(0)_lmac(1)_chan(0 - 15) = bpid(16 - 31) ....
 337         * cgx(1)_lmac(0)_chan(0 - 15) = bpid(64 - 79) ....
 338         */
 339        switch (type) {
 340        case NIX_INTF_TYPE_CGX:
 341                if ((req->chan_base + req->chan_cnt) > 15)
 342                        return -EINVAL;
 343                rvu_get_cgx_lmac_id(pfvf->cgx_lmac, &cgx_id, &lmac_id);
 344                /* Assign bpid based on cgx, lmac and chan id */
 345                bpid = (cgx_id * hw->lmac_per_cgx * lmac_chan_cnt) +
 346                        (lmac_id * lmac_chan_cnt) + req->chan_base;
 347
 348                if (req->bpid_per_chan)
 349                        bpid += chan_id;
 350                if (bpid > cgx_bpid_cnt)
 351                        return -EINVAL;
 352                break;
 353
 354        case NIX_INTF_TYPE_LBK:
 355                if ((req->chan_base + req->chan_cnt) > 63)
 356                        return -EINVAL;
 357                bpid = cgx_bpid_cnt + req->chan_base;
 358                if (req->bpid_per_chan)
 359                        bpid += chan_id;
 360                if (bpid > (cgx_bpid_cnt + lbk_bpid_cnt))
 361                        return -EINVAL;
 362                break;
 363        default:
 364                return -EINVAL;
 365        }
 366        return bpid;
 367}
 368
 369int rvu_mbox_handler_nix_bp_enable(struct rvu *rvu,
 370                                   struct nix_bp_cfg_req *req,
 371                                   struct nix_bp_cfg_rsp *rsp)
 372{
 373        int blkaddr, pf, type, chan_id = 0;
 374        u16 pcifunc = req->hdr.pcifunc;
 375        struct rvu_pfvf *pfvf;
 376        u16 chan_base, chan;
 377        s16 bpid, bpid_base;
 378        u64 cfg;
 379
 380        pf = rvu_get_pf(pcifunc);
 381        type = is_afvf(pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX;
 382
 383        /* Enable backpressure only for CGX mapped PFs and LBK interface */
 384        if (!is_pf_cgxmapped(rvu, pf) && type != NIX_INTF_TYPE_LBK)
 385                return 0;
 386
 387        pfvf = rvu_get_pfvf(rvu, pcifunc);
 388        blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
 389
 390        bpid_base = rvu_nix_get_bpid(rvu, req, type, chan_id);
 391        chan_base = pfvf->rx_chan_base + req->chan_base;
 392        bpid = bpid_base;
 393
 394        for (chan = chan_base; chan < (chan_base + req->chan_cnt); chan++) {
 395                if (bpid < 0) {
 396                        dev_warn(rvu->dev, "Fail to enable backpressure\n");
 397                        return -EINVAL;
 398                }
 399
 400                cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan));
 401                rvu_write64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan),
 402                            cfg | (bpid & 0xFF) | BIT_ULL(16));
 403                chan_id++;
 404                bpid = rvu_nix_get_bpid(rvu, req, type, chan_id);
 405        }
 406
 407        for (chan = 0; chan < req->chan_cnt; chan++) {
 408                /* Map channel and bpid assign to it */
 409                rsp->chan_bpid[chan] = ((req->chan_base + chan) & 0x7F) << 10 |
 410                                        (bpid_base & 0x3FF);
 411                if (req->bpid_per_chan)
 412                        bpid_base++;
 413        }
 414        rsp->chan_cnt = req->chan_cnt;
 415
 416        return 0;
 417}
 418
 419static void nix_setup_lso_tso_l3(struct rvu *rvu, int blkaddr,
 420                                 u64 format, bool v4, u64 *fidx)
 421{
 422        struct nix_lso_format field = {0};
 423
 424        /* IP's Length field */
 425        field.layer = NIX_TXLAYER_OL3;
 426        /* In ipv4, length field is at offset 2 bytes, for ipv6 it's 4 */
 427        field.offset = v4 ? 2 : 4;
 428        field.sizem1 = 1; /* i.e 2 bytes */
 429        field.alg = NIX_LSOALG_ADD_PAYLEN;
 430        rvu_write64(rvu, blkaddr,
 431                    NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++),
 432                    *(u64 *)&field);
 433
 434        /* No ID field in IPv6 header */
 435        if (!v4)
 436                return;
 437
 438        /* IP's ID field */
 439        field.layer = NIX_TXLAYER_OL3;
 440        field.offset = 4;
 441        field.sizem1 = 1; /* i.e 2 bytes */
 442        field.alg = NIX_LSOALG_ADD_SEGNUM;
 443        rvu_write64(rvu, blkaddr,
 444                    NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++),
 445                    *(u64 *)&field);
 446}
 447
 448static void nix_setup_lso_tso_l4(struct rvu *rvu, int blkaddr,
 449                                 u64 format, u64 *fidx)
 450{
 451        struct nix_lso_format field = {0};
 452
 453        /* TCP's sequence number field */
 454        field.layer = NIX_TXLAYER_OL4;
 455        field.offset = 4;
 456        field.sizem1 = 3; /* i.e 4 bytes */
 457        field.alg = NIX_LSOALG_ADD_OFFSET;
 458        rvu_write64(rvu, blkaddr,
 459                    NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++),
 460                    *(u64 *)&field);
 461
 462        /* TCP's flags field */
 463        field.layer = NIX_TXLAYER_OL4;
 464        field.offset = 12;
 465        field.sizem1 = 1; /* 2 bytes */
 466        field.alg = NIX_LSOALG_TCP_FLAGS;
 467        rvu_write64(rvu, blkaddr,
 468                    NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++),
 469                    *(u64 *)&field);
 470}
 471
 472static void nix_setup_lso(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr)
 473{
 474        u64 cfg, idx, fidx = 0;
 475
 476        /* Get max HW supported format indices */
 477        cfg = (rvu_read64(rvu, blkaddr, NIX_AF_CONST1) >> 48) & 0xFF;
 478        nix_hw->lso.total = cfg;
 479
 480        /* Enable LSO */
 481        cfg = rvu_read64(rvu, blkaddr, NIX_AF_LSO_CFG);
 482        /* For TSO, set first and middle segment flags to
 483         * mask out PSH, RST & FIN flags in TCP packet
 484         */
 485        cfg &= ~((0xFFFFULL << 32) | (0xFFFFULL << 16));
 486        cfg |= (0xFFF2ULL << 32) | (0xFFF2ULL << 16);
 487        rvu_write64(rvu, blkaddr, NIX_AF_LSO_CFG, cfg | BIT_ULL(63));
 488
 489        /* Setup default static LSO formats
 490         *
 491         * Configure format fields for TCPv4 segmentation offload
 492         */
 493        idx = NIX_LSO_FORMAT_IDX_TSOV4;
 494        nix_setup_lso_tso_l3(rvu, blkaddr, idx, true, &fidx);
 495        nix_setup_lso_tso_l4(rvu, blkaddr, idx, &fidx);
 496
 497        /* Set rest of the fields to NOP */
 498        for (; fidx < 8; fidx++) {
 499                rvu_write64(rvu, blkaddr,
 500                            NIX_AF_LSO_FORMATX_FIELDX(idx, fidx), 0x0ULL);
 501        }
 502        nix_hw->lso.in_use++;
 503
 504        /* Configure format fields for TCPv6 segmentation offload */
 505        idx = NIX_LSO_FORMAT_IDX_TSOV6;
 506        fidx = 0;
 507        nix_setup_lso_tso_l3(rvu, blkaddr, idx, false, &fidx);
 508        nix_setup_lso_tso_l4(rvu, blkaddr, idx, &fidx);
 509
 510        /* Set rest of the fields to NOP */
 511        for (; fidx < 8; fidx++) {
 512                rvu_write64(rvu, blkaddr,
 513                            NIX_AF_LSO_FORMATX_FIELDX(idx, fidx), 0x0ULL);
 514        }
 515        nix_hw->lso.in_use++;
 516}
 517
 518static void nix_ctx_free(struct rvu *rvu, struct rvu_pfvf *pfvf)
 519{
 520        kfree(pfvf->rq_bmap);
 521        kfree(pfvf->sq_bmap);
 522        kfree(pfvf->cq_bmap);
 523        if (pfvf->rq_ctx)
 524                qmem_free(rvu->dev, pfvf->rq_ctx);
 525        if (pfvf->sq_ctx)
 526                qmem_free(rvu->dev, pfvf->sq_ctx);
 527        if (pfvf->cq_ctx)
 528                qmem_free(rvu->dev, pfvf->cq_ctx);
 529        if (pfvf->rss_ctx)
 530                qmem_free(rvu->dev, pfvf->rss_ctx);
 531        if (pfvf->nix_qints_ctx)
 532                qmem_free(rvu->dev, pfvf->nix_qints_ctx);
 533        if (pfvf->cq_ints_ctx)
 534                qmem_free(rvu->dev, pfvf->cq_ints_ctx);
 535
 536        pfvf->rq_bmap = NULL;
 537        pfvf->cq_bmap = NULL;
 538        pfvf->sq_bmap = NULL;
 539        pfvf->rq_ctx = NULL;
 540        pfvf->sq_ctx = NULL;
 541        pfvf->cq_ctx = NULL;
 542        pfvf->rss_ctx = NULL;
 543        pfvf->nix_qints_ctx = NULL;
 544        pfvf->cq_ints_ctx = NULL;
 545}
 546
 547static int nixlf_rss_ctx_init(struct rvu *rvu, int blkaddr,
 548                              struct rvu_pfvf *pfvf, int nixlf,
 549                              int rss_sz, int rss_grps, int hwctx_size,
 550                              u64 way_mask)
 551{
 552        int err, grp, num_indices;
 553
 554        /* RSS is not requested for this NIXLF */
 555        if (!rss_sz)
 556                return 0;
 557        num_indices = rss_sz * rss_grps;
 558
 559        /* Alloc NIX RSS HW context memory and config the base */
 560        err = qmem_alloc(rvu->dev, &pfvf->rss_ctx, num_indices, hwctx_size);
 561        if (err)
 562                return err;
 563
 564        rvu_write64(rvu, blkaddr, NIX_AF_LFX_RSS_BASE(nixlf),
 565                    (u64)pfvf->rss_ctx->iova);
 566
 567        /* Config full RSS table size, enable RSS and caching */
 568        rvu_write64(rvu, blkaddr, NIX_AF_LFX_RSS_CFG(nixlf),
 569                    BIT_ULL(36) | BIT_ULL(4) |
 570                    ilog2(num_indices / MAX_RSS_INDIR_TBL_SIZE) |
 571                    way_mask << 20);
 572        /* Config RSS group offset and sizes */
 573        for (grp = 0; grp < rss_grps; grp++)
 574                rvu_write64(rvu, blkaddr, NIX_AF_LFX_RSS_GRPX(nixlf, grp),
 575                            ((ilog2(rss_sz) - 1) << 16) | (rss_sz * grp));
 576        return 0;
 577}
 578
 579static int nix_aq_enqueue_wait(struct rvu *rvu, struct rvu_block *block,
 580                               struct nix_aq_inst_s *inst)
 581{
 582        struct admin_queue *aq = block->aq;
 583        struct nix_aq_res_s *result;
 584        int timeout = 1000;
 585        u64 reg, head;
 586
 587        result = (struct nix_aq_res_s *)aq->res->base;
 588
 589        /* Get current head pointer where to append this instruction */
 590        reg = rvu_read64(rvu, block->addr, NIX_AF_AQ_STATUS);
 591        head = (reg >> 4) & AQ_PTR_MASK;
 592
 593        memcpy((void *)(aq->inst->base + (head * aq->inst->entry_sz)),
 594               (void *)inst, aq->inst->entry_sz);
 595        memset(result, 0, sizeof(*result));
 596        /* sync into memory */
 597        wmb();
 598
 599        /* Ring the doorbell and wait for result */
 600        rvu_write64(rvu, block->addr, NIX_AF_AQ_DOOR, 1);
 601        while (result->compcode == NIX_AQ_COMP_NOTDONE) {
 602                cpu_relax();
 603                udelay(1);
 604                timeout--;
 605                if (!timeout)
 606                        return -EBUSY;
 607        }
 608
 609        if (result->compcode != NIX_AQ_COMP_GOOD)
 610                /* TODO: Replace this with some error code */
 611                return -EBUSY;
 612
 613        return 0;
 614}
 615
 616static int rvu_nix_aq_enq_inst(struct rvu *rvu, struct nix_aq_enq_req *req,
 617                               struct nix_aq_enq_rsp *rsp)
 618{
 619        struct rvu_hwinfo *hw = rvu->hw;
 620        u16 pcifunc = req->hdr.pcifunc;
 621        int nixlf, blkaddr, rc = 0;
 622        struct nix_aq_inst_s inst;
 623        struct rvu_block *block;
 624        struct admin_queue *aq;
 625        struct rvu_pfvf *pfvf;
 626        void *ctx, *mask;
 627        bool ena;
 628        u64 cfg;
 629
 630        blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
 631        if (blkaddr < 0)
 632                return NIX_AF_ERR_AF_LF_INVALID;
 633
 634        block = &hw->block[blkaddr];
 635        aq = block->aq;
 636        if (!aq) {
 637                dev_warn(rvu->dev, "%s: NIX AQ not initialized\n", __func__);
 638                return NIX_AF_ERR_AQ_ENQUEUE;
 639        }
 640
 641        pfvf = rvu_get_pfvf(rvu, pcifunc);
 642        nixlf = rvu_get_lf(rvu, block, pcifunc, 0);
 643
 644        /* Skip NIXLF check for broadcast MCE entry init */
 645        if (!(!rsp && req->ctype == NIX_AQ_CTYPE_MCE)) {
 646                if (!pfvf->nixlf || nixlf < 0)
 647                        return NIX_AF_ERR_AF_LF_INVALID;
 648        }
 649
 650        switch (req->ctype) {
 651        case NIX_AQ_CTYPE_RQ:
 652                /* Check if index exceeds max no of queues */
 653                if (!pfvf->rq_ctx || req->qidx >= pfvf->rq_ctx->qsize)
 654                        rc = NIX_AF_ERR_AQ_ENQUEUE;
 655                break;
 656        case NIX_AQ_CTYPE_SQ:
 657                if (!pfvf->sq_ctx || req->qidx >= pfvf->sq_ctx->qsize)
 658                        rc = NIX_AF_ERR_AQ_ENQUEUE;
 659                break;
 660        case NIX_AQ_CTYPE_CQ:
 661                if (!pfvf->cq_ctx || req->qidx >= pfvf->cq_ctx->qsize)
 662                        rc = NIX_AF_ERR_AQ_ENQUEUE;
 663                break;
 664        case NIX_AQ_CTYPE_RSS:
 665                /* Check if RSS is enabled and qidx is within range */
 666                cfg = rvu_read64(rvu, blkaddr, NIX_AF_LFX_RSS_CFG(nixlf));
 667                if (!(cfg & BIT_ULL(4)) || !pfvf->rss_ctx ||
 668                    (req->qidx >= (256UL << (cfg & 0xF))))
 669                        rc = NIX_AF_ERR_AQ_ENQUEUE;
 670                break;
 671        case NIX_AQ_CTYPE_MCE:
 672                cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_MCAST_CFG);
 673                /* Check if index exceeds MCE list length */
 674                if (!hw->nix0->mcast.mce_ctx ||
 675                    (req->qidx >= (256UL << (cfg & 0xF))))
 676                        rc = NIX_AF_ERR_AQ_ENQUEUE;
 677
 678                /* Adding multicast lists for requests from PF/VFs is not
 679                 * yet supported, so ignore this.
 680                 */
 681                if (rsp)
 682                        rc = NIX_AF_ERR_AQ_ENQUEUE;
 683                break;
 684        default:
 685                rc = NIX_AF_ERR_AQ_ENQUEUE;
 686        }
 687
 688        if (rc)
 689                return rc;
 690
 691        /* Check if SQ pointed SMQ belongs to this PF/VF or not */
 692        if (req->ctype == NIX_AQ_CTYPE_SQ &&
 693            ((req->op == NIX_AQ_INSTOP_INIT && req->sq.ena) ||
 694             (req->op == NIX_AQ_INSTOP_WRITE &&
 695              req->sq_mask.ena && req->sq_mask.smq && req->sq.ena))) {
 696                if (!is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_SMQ,
 697                                     pcifunc, req->sq.smq))
 698                        return NIX_AF_ERR_AQ_ENQUEUE;
 699        }
 700
 701        memset(&inst, 0, sizeof(struct nix_aq_inst_s));
 702        inst.lf = nixlf;
 703        inst.cindex = req->qidx;
 704        inst.ctype = req->ctype;
 705        inst.op = req->op;
 706        /* Currently we are not supporting enqueuing multiple instructions,
 707         * so always choose first entry in result memory.
 708         */
 709        inst.res_addr = (u64)aq->res->iova;
 710
 711        /* Hardware uses same aq->res->base for updating result of
 712         * previous instruction hence wait here till it is done.
 713         */
 714        spin_lock(&aq->lock);
 715
 716        /* Clean result + context memory */
 717        memset(aq->res->base, 0, aq->res->entry_sz);
 718        /* Context needs to be written at RES_ADDR + 128 */
 719        ctx = aq->res->base + 128;
 720        /* Mask needs to be written at RES_ADDR + 256 */
 721        mask = aq->res->base + 256;
 722
 723        switch (req->op) {
 724        case NIX_AQ_INSTOP_WRITE:
 725                if (req->ctype == NIX_AQ_CTYPE_RQ)
 726                        memcpy(mask, &req->rq_mask,
 727                               sizeof(struct nix_rq_ctx_s));
 728                else if (req->ctype == NIX_AQ_CTYPE_SQ)
 729                        memcpy(mask, &req->sq_mask,
 730                               sizeof(struct nix_sq_ctx_s));
 731                else if (req->ctype == NIX_AQ_CTYPE_CQ)
 732                        memcpy(mask, &req->cq_mask,
 733                               sizeof(struct nix_cq_ctx_s));
 734                else if (req->ctype == NIX_AQ_CTYPE_RSS)
 735                        memcpy(mask, &req->rss_mask,
 736                               sizeof(struct nix_rsse_s));
 737                else if (req->ctype == NIX_AQ_CTYPE_MCE)
 738                        memcpy(mask, &req->mce_mask,
 739                               sizeof(struct nix_rx_mce_s));
 740                /* Fall through */
 741        case NIX_AQ_INSTOP_INIT:
 742                if (req->ctype == NIX_AQ_CTYPE_RQ)
 743                        memcpy(ctx, &req->rq, sizeof(struct nix_rq_ctx_s));
 744                else if (req->ctype == NIX_AQ_CTYPE_SQ)
 745                        memcpy(ctx, &req->sq, sizeof(struct nix_sq_ctx_s));
 746                else if (req->ctype == NIX_AQ_CTYPE_CQ)
 747                        memcpy(ctx, &req->cq, sizeof(struct nix_cq_ctx_s));
 748                else if (req->ctype == NIX_AQ_CTYPE_RSS)
 749                        memcpy(ctx, &req->rss, sizeof(struct nix_rsse_s));
 750                else if (req->ctype == NIX_AQ_CTYPE_MCE)
 751                        memcpy(ctx, &req->mce, sizeof(struct nix_rx_mce_s));
 752                break;
 753        case NIX_AQ_INSTOP_NOP:
 754        case NIX_AQ_INSTOP_READ:
 755        case NIX_AQ_INSTOP_LOCK:
 756        case NIX_AQ_INSTOP_UNLOCK:
 757                break;
 758        default:
 759                rc = NIX_AF_ERR_AQ_ENQUEUE;
 760                spin_unlock(&aq->lock);
 761                return rc;
 762        }
 763
 764        /* Submit the instruction to AQ */
 765        rc = nix_aq_enqueue_wait(rvu, block, &inst);
 766        if (rc) {
 767                spin_unlock(&aq->lock);
 768                return rc;
 769        }
 770
 771        /* Set RQ/SQ/CQ bitmap if respective queue hw context is enabled */
 772        if (req->op == NIX_AQ_INSTOP_INIT) {
 773                if (req->ctype == NIX_AQ_CTYPE_RQ && req->rq.ena)
 774                        __set_bit(req->qidx, pfvf->rq_bmap);
 775                if (req->ctype == NIX_AQ_CTYPE_SQ && req->sq.ena)
 776                        __set_bit(req->qidx, pfvf->sq_bmap);
 777                if (req->ctype == NIX_AQ_CTYPE_CQ && req->cq.ena)
 778                        __set_bit(req->qidx, pfvf->cq_bmap);
 779        }
 780
 781        if (req->op == NIX_AQ_INSTOP_WRITE) {
 782                if (req->ctype == NIX_AQ_CTYPE_RQ) {
 783                        ena = (req->rq.ena & req->rq_mask.ena) |
 784                                (test_bit(req->qidx, pfvf->rq_bmap) &
 785                                ~req->rq_mask.ena);
 786                        if (ena)
 787                                __set_bit(req->qidx, pfvf->rq_bmap);
 788                        else
 789                                __clear_bit(req->qidx, pfvf->rq_bmap);
 790                }
 791                if (req->ctype == NIX_AQ_CTYPE_SQ) {
 792                        ena = (req->rq.ena & req->sq_mask.ena) |
 793                                (test_bit(req->qidx, pfvf->sq_bmap) &
 794                                ~req->sq_mask.ena);
 795                        if (ena)
 796                                __set_bit(req->qidx, pfvf->sq_bmap);
 797                        else
 798                                __clear_bit(req->qidx, pfvf->sq_bmap);
 799                }
 800                if (req->ctype == NIX_AQ_CTYPE_CQ) {
 801                        ena = (req->rq.ena & req->cq_mask.ena) |
 802                                (test_bit(req->qidx, pfvf->cq_bmap) &
 803                                ~req->cq_mask.ena);
 804                        if (ena)
 805                                __set_bit(req->qidx, pfvf->cq_bmap);
 806                        else
 807                                __clear_bit(req->qidx, pfvf->cq_bmap);
 808                }
 809        }
 810
 811        if (rsp) {
 812                /* Copy read context into mailbox */
 813                if (req->op == NIX_AQ_INSTOP_READ) {
 814                        if (req->ctype == NIX_AQ_CTYPE_RQ)
 815                                memcpy(&rsp->rq, ctx,
 816                                       sizeof(struct nix_rq_ctx_s));
 817                        else if (req->ctype == NIX_AQ_CTYPE_SQ)
 818                                memcpy(&rsp->sq, ctx,
 819                                       sizeof(struct nix_sq_ctx_s));
 820                        else if (req->ctype == NIX_AQ_CTYPE_CQ)
 821                                memcpy(&rsp->cq, ctx,
 822                                       sizeof(struct nix_cq_ctx_s));
 823                        else if (req->ctype == NIX_AQ_CTYPE_RSS)
 824                                memcpy(&rsp->rss, ctx,
 825                                       sizeof(struct nix_rsse_s));
 826                        else if (req->ctype == NIX_AQ_CTYPE_MCE)
 827                                memcpy(&rsp->mce, ctx,
 828                                       sizeof(struct nix_rx_mce_s));
 829                }
 830        }
 831
 832        spin_unlock(&aq->lock);
 833        return 0;
 834}
 835
 836static const char *nix_get_ctx_name(int ctype)
 837{
 838        switch (ctype) {
 839        case NIX_AQ_CTYPE_CQ:
 840                return "CQ";
 841        case NIX_AQ_CTYPE_SQ:
 842                return "SQ";
 843        case NIX_AQ_CTYPE_RQ:
 844                return "RQ";
 845        case NIX_AQ_CTYPE_RSS:
 846                return "RSS";
 847        }
 848        return "";
 849}
 850
 851static int nix_lf_hwctx_disable(struct rvu *rvu, struct hwctx_disable_req *req)
 852{
 853        struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc);
 854        struct nix_aq_enq_req aq_req;
 855        unsigned long *bmap;
 856        int qidx, q_cnt = 0;
 857        int err = 0, rc;
 858
 859        if (!pfvf->cq_ctx || !pfvf->sq_ctx || !pfvf->rq_ctx)
 860                return NIX_AF_ERR_AQ_ENQUEUE;
 861
 862        memset(&aq_req, 0, sizeof(struct nix_aq_enq_req));
 863        aq_req.hdr.pcifunc = req->hdr.pcifunc;
 864
 865        if (req->ctype == NIX_AQ_CTYPE_CQ) {
 866                aq_req.cq.ena = 0;
 867                aq_req.cq_mask.ena = 1;
 868                aq_req.cq.bp_ena = 0;
 869                aq_req.cq_mask.bp_ena = 1;
 870                q_cnt = pfvf->cq_ctx->qsize;
 871                bmap = pfvf->cq_bmap;
 872        }
 873        if (req->ctype == NIX_AQ_CTYPE_SQ) {
 874                aq_req.sq.ena = 0;
 875                aq_req.sq_mask.ena = 1;
 876                q_cnt = pfvf->sq_ctx->qsize;
 877                bmap = pfvf->sq_bmap;
 878        }
 879        if (req->ctype == NIX_AQ_CTYPE_RQ) {
 880                aq_req.rq.ena = 0;
 881                aq_req.rq_mask.ena = 1;
 882                q_cnt = pfvf->rq_ctx->qsize;
 883                bmap = pfvf->rq_bmap;
 884        }
 885
 886        aq_req.ctype = req->ctype;
 887        aq_req.op = NIX_AQ_INSTOP_WRITE;
 888
 889        for (qidx = 0; qidx < q_cnt; qidx++) {
 890                if (!test_bit(qidx, bmap))
 891                        continue;
 892                aq_req.qidx = qidx;
 893                rc = rvu_nix_aq_enq_inst(rvu, &aq_req, NULL);
 894                if (rc) {
 895                        err = rc;
 896                        dev_err(rvu->dev, "Failed to disable %s:%d context\n",
 897                                nix_get_ctx_name(req->ctype), qidx);
 898                }
 899        }
 900
 901        return err;
 902}
 903
 904#ifdef CONFIG_NDC_DIS_DYNAMIC_CACHING
 905static int nix_lf_hwctx_lockdown(struct rvu *rvu, struct nix_aq_enq_req *req)
 906{
 907        struct nix_aq_enq_req lock_ctx_req;
 908        int err;
 909
 910        if (req->op != NIX_AQ_INSTOP_INIT)
 911                return 0;
 912
 913        if (req->ctype == NIX_AQ_CTYPE_MCE ||
 914            req->ctype == NIX_AQ_CTYPE_DYNO)
 915                return 0;
 916
 917        memset(&lock_ctx_req, 0, sizeof(struct nix_aq_enq_req));
 918        lock_ctx_req.hdr.pcifunc = req->hdr.pcifunc;
 919        lock_ctx_req.ctype = req->ctype;
 920        lock_ctx_req.op = NIX_AQ_INSTOP_LOCK;
 921        lock_ctx_req.qidx = req->qidx;
 922        err = rvu_nix_aq_enq_inst(rvu, &lock_ctx_req, NULL);
 923        if (err)
 924                dev_err(rvu->dev,
 925                        "PFUNC 0x%x: Failed to lock NIX %s:%d context\n",
 926                        req->hdr.pcifunc,
 927                        nix_get_ctx_name(req->ctype), req->qidx);
 928        return err;
 929}
 930
 931int rvu_mbox_handler_nix_aq_enq(struct rvu *rvu,
 932                                struct nix_aq_enq_req *req,
 933                                struct nix_aq_enq_rsp *rsp)
 934{
 935        int err;
 936
 937        err = rvu_nix_aq_enq_inst(rvu, req, rsp);
 938        if (!err)
 939                err = nix_lf_hwctx_lockdown(rvu, req);
 940        return err;
 941}
 942#else
 943
 944int rvu_mbox_handler_nix_aq_enq(struct rvu *rvu,
 945                                struct nix_aq_enq_req *req,
 946                                struct nix_aq_enq_rsp *rsp)
 947{
 948        return rvu_nix_aq_enq_inst(rvu, req, rsp);
 949}
 950#endif
 951
 952int rvu_mbox_handler_nix_hwctx_disable(struct rvu *rvu,
 953                                       struct hwctx_disable_req *req,
 954                                       struct msg_rsp *rsp)
 955{
 956        return nix_lf_hwctx_disable(rvu, req);
 957}
 958
 959int rvu_mbox_handler_nix_lf_alloc(struct rvu *rvu,
 960                                  struct nix_lf_alloc_req *req,
 961                                  struct nix_lf_alloc_rsp *rsp)
 962{
 963        int nixlf, qints, hwctx_size, intf, err, rc = 0;
 964        struct rvu_hwinfo *hw = rvu->hw;
 965        u16 pcifunc = req->hdr.pcifunc;
 966        struct rvu_block *block;
 967        struct rvu_pfvf *pfvf;
 968        u64 cfg, ctx_cfg;
 969        int blkaddr;
 970
 971        if (!req->rq_cnt || !req->sq_cnt || !req->cq_cnt)
 972                return NIX_AF_ERR_PARAM;
 973
 974        if (req->way_mask)
 975                req->way_mask &= 0xFFFF;
 976
 977        pfvf = rvu_get_pfvf(rvu, pcifunc);
 978        blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
 979        if (!pfvf->nixlf || blkaddr < 0)
 980                return NIX_AF_ERR_AF_LF_INVALID;
 981
 982        block = &hw->block[blkaddr];
 983        nixlf = rvu_get_lf(rvu, block, pcifunc, 0);
 984        if (nixlf < 0)
 985                return NIX_AF_ERR_AF_LF_INVALID;
 986
 987        /* Check if requested 'NIXLF <=> NPALF' mapping is valid */
 988        if (req->npa_func) {
 989                /* If default, use 'this' NIXLF's PFFUNC */
 990                if (req->npa_func == RVU_DEFAULT_PF_FUNC)
 991                        req->npa_func = pcifunc;
 992                if (!is_pffunc_map_valid(rvu, req->npa_func, BLKTYPE_NPA))
 993                        return NIX_AF_INVAL_NPA_PF_FUNC;
 994        }
 995
 996        /* Check if requested 'NIXLF <=> SSOLF' mapping is valid */
 997        if (req->sso_func) {
 998                /* If default, use 'this' NIXLF's PFFUNC */
 999                if (req->sso_func == RVU_DEFAULT_PF_FUNC)
1000                        req->sso_func = pcifunc;
1001                if (!is_pffunc_map_valid(rvu, req->sso_func, BLKTYPE_SSO))
1002                        return NIX_AF_INVAL_SSO_PF_FUNC;
1003        }
1004
1005        /* If RSS is being enabled, check if requested config is valid.
1006         * RSS table size should be power of two, otherwise
1007         * RSS_GRP::OFFSET + adder might go beyond that group or
1008         * won't be able to use entire table.
1009         */
1010        if (req->rss_sz && (req->rss_sz > MAX_RSS_INDIR_TBL_SIZE ||
1011                            !is_power_of_2(req->rss_sz)))
1012                return NIX_AF_ERR_RSS_SIZE_INVALID;
1013
1014        if (req->rss_sz &&
1015            (!req->rss_grps || req->rss_grps > MAX_RSS_GROUPS))
1016                return NIX_AF_ERR_RSS_GRPS_INVALID;
1017
1018        /* Reset this NIX LF */
1019        err = rvu_lf_reset(rvu, block, nixlf);
1020        if (err) {
1021                dev_err(rvu->dev, "Failed to reset NIX%d LF%d\n",
1022                        block->addr - BLKADDR_NIX0, nixlf);
1023                return NIX_AF_ERR_LF_RESET;
1024        }
1025
1026        ctx_cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST3);
1027
1028        /* Alloc NIX RQ HW context memory and config the base */
1029        hwctx_size = 1UL << ((ctx_cfg >> 4) & 0xF);
1030        err = qmem_alloc(rvu->dev, &pfvf->rq_ctx, req->rq_cnt, hwctx_size);
1031        if (err)
1032                goto free_mem;
1033
1034        pfvf->rq_bmap = kcalloc(req->rq_cnt, sizeof(long), GFP_KERNEL);
1035        if (!pfvf->rq_bmap)
1036                goto free_mem;
1037
1038        rvu_write64(rvu, blkaddr, NIX_AF_LFX_RQS_BASE(nixlf),
1039                    (u64)pfvf->rq_ctx->iova);
1040
1041        /* Set caching and queue count in HW */
1042        cfg = BIT_ULL(36) | (req->rq_cnt - 1) | req->way_mask << 20;
1043        rvu_write64(rvu, blkaddr, NIX_AF_LFX_RQS_CFG(nixlf), cfg);
1044
1045        /* Alloc NIX SQ HW context memory and config the base */
1046        hwctx_size = 1UL << (ctx_cfg & 0xF);
1047        err = qmem_alloc(rvu->dev, &pfvf->sq_ctx, req->sq_cnt, hwctx_size);
1048        if (err)
1049                goto free_mem;
1050
1051        pfvf->sq_bmap = kcalloc(req->sq_cnt, sizeof(long), GFP_KERNEL);
1052        if (!pfvf->sq_bmap)
1053                goto free_mem;
1054
1055        rvu_write64(rvu, blkaddr, NIX_AF_LFX_SQS_BASE(nixlf),
1056                    (u64)pfvf->sq_ctx->iova);
1057
1058        cfg = BIT_ULL(36) | (req->sq_cnt - 1) | req->way_mask << 20;
1059        rvu_write64(rvu, blkaddr, NIX_AF_LFX_SQS_CFG(nixlf), cfg);
1060
1061        /* Alloc NIX CQ HW context memory and config the base */
1062        hwctx_size = 1UL << ((ctx_cfg >> 8) & 0xF);
1063        err = qmem_alloc(rvu->dev, &pfvf->cq_ctx, req->cq_cnt, hwctx_size);
1064        if (err)
1065                goto free_mem;
1066
1067        pfvf->cq_bmap = kcalloc(req->cq_cnt, sizeof(long), GFP_KERNEL);
1068        if (!pfvf->cq_bmap)
1069                goto free_mem;
1070
1071        rvu_write64(rvu, blkaddr, NIX_AF_LFX_CQS_BASE(nixlf),
1072                    (u64)pfvf->cq_ctx->iova);
1073
1074        cfg = BIT_ULL(36) | (req->cq_cnt - 1) | req->way_mask << 20;
1075        rvu_write64(rvu, blkaddr, NIX_AF_LFX_CQS_CFG(nixlf), cfg);
1076
1077        /* Initialize receive side scaling (RSS) */
1078        hwctx_size = 1UL << ((ctx_cfg >> 12) & 0xF);
1079        err = nixlf_rss_ctx_init(rvu, blkaddr, pfvf, nixlf, req->rss_sz,
1080                                 req->rss_grps, hwctx_size, req->way_mask);
1081        if (err)
1082                goto free_mem;
1083
1084        /* Alloc memory for CQINT's HW contexts */
1085        cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST2);
1086        qints = (cfg >> 24) & 0xFFF;
1087        hwctx_size = 1UL << ((ctx_cfg >> 24) & 0xF);
1088        err = qmem_alloc(rvu->dev, &pfvf->cq_ints_ctx, qints, hwctx_size);
1089        if (err)
1090                goto free_mem;
1091
1092        rvu_write64(rvu, blkaddr, NIX_AF_LFX_CINTS_BASE(nixlf),
1093                    (u64)pfvf->cq_ints_ctx->iova);
1094
1095        rvu_write64(rvu, blkaddr, NIX_AF_LFX_CINTS_CFG(nixlf),
1096                    BIT_ULL(36) | req->way_mask << 20);
1097
1098        /* Alloc memory for QINT's HW contexts */
1099        cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST2);
1100        qints = (cfg >> 12) & 0xFFF;
1101        hwctx_size = 1UL << ((ctx_cfg >> 20) & 0xF);
1102        err = qmem_alloc(rvu->dev, &pfvf->nix_qints_ctx, qints, hwctx_size);
1103        if (err)
1104                goto free_mem;
1105
1106        rvu_write64(rvu, blkaddr, NIX_AF_LFX_QINTS_BASE(nixlf),
1107                    (u64)pfvf->nix_qints_ctx->iova);
1108        rvu_write64(rvu, blkaddr, NIX_AF_LFX_QINTS_CFG(nixlf),
1109                    BIT_ULL(36) | req->way_mask << 20);
1110
1111        /* Setup VLANX TPID's.
1112         * Use VLAN1 for 802.1Q
1113         * and VLAN0 for 802.1AD.
1114         */
1115        cfg = (0x8100ULL << 16) | 0x88A8ULL;
1116        rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_CFG(nixlf), cfg);
1117
1118        /* Enable LMTST for this NIX LF */
1119        rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_CFG2(nixlf), BIT_ULL(0));
1120
1121        /* Set CQE/WQE size, NPA_PF_FUNC for SQBs and also SSO_PF_FUNC */
1122        if (req->npa_func)
1123                cfg = req->npa_func;
1124        if (req->sso_func)
1125                cfg |= (u64)req->sso_func << 16;
1126
1127        cfg |= (u64)req->xqe_sz << 33;
1128        rvu_write64(rvu, blkaddr, NIX_AF_LFX_CFG(nixlf), cfg);
1129
1130        /* Config Rx pkt length, csum checks and apad  enable / disable */
1131        rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_CFG(nixlf), req->rx_cfg);
1132
1133        intf = is_afvf(pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX;
1134        err = nix_interface_init(rvu, pcifunc, intf, nixlf);
1135        if (err)
1136                goto free_mem;
1137
1138        /* Disable NPC entries as NIXLF's contexts are not initialized yet */
1139        rvu_npc_disable_default_entries(rvu, pcifunc, nixlf);
1140
1141        goto exit;
1142
1143free_mem:
1144        nix_ctx_free(rvu, pfvf);
1145        rc = -ENOMEM;
1146
1147exit:
1148        /* Set macaddr of this PF/VF */
1149        ether_addr_copy(rsp->mac_addr, pfvf->mac_addr);
1150
1151        /* set SQB size info */
1152        cfg = rvu_read64(rvu, blkaddr, NIX_AF_SQ_CONST);
1153        rsp->sqb_size = (cfg >> 34) & 0xFFFF;
1154        rsp->rx_chan_base = pfvf->rx_chan_base;
1155        rsp->tx_chan_base = pfvf->tx_chan_base;
1156        rsp->rx_chan_cnt = pfvf->rx_chan_cnt;
1157        rsp->tx_chan_cnt = pfvf->tx_chan_cnt;
1158        rsp->lso_tsov4_idx = NIX_LSO_FORMAT_IDX_TSOV4;
1159        rsp->lso_tsov6_idx = NIX_LSO_FORMAT_IDX_TSOV6;
1160        /* Get HW supported stat count */
1161        cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST1);
1162        rsp->lf_rx_stats = ((cfg >> 32) & 0xFF);
1163        rsp->lf_tx_stats = ((cfg >> 24) & 0xFF);
1164        /* Get count of CQ IRQs and error IRQs supported per LF */
1165        cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST2);
1166        rsp->qints = ((cfg >> 12) & 0xFFF);
1167        rsp->cints = ((cfg >> 24) & 0xFFF);
1168        return rc;
1169}
1170
1171int rvu_mbox_handler_nix_lf_free(struct rvu *rvu, struct msg_req *req,
1172                                 struct msg_rsp *rsp)
1173{
1174        struct rvu_hwinfo *hw = rvu->hw;
1175        u16 pcifunc = req->hdr.pcifunc;
1176        struct rvu_block *block;
1177        int blkaddr, nixlf, err;
1178        struct rvu_pfvf *pfvf;
1179
1180        pfvf = rvu_get_pfvf(rvu, pcifunc);
1181        blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
1182        if (!pfvf->nixlf || blkaddr < 0)
1183                return NIX_AF_ERR_AF_LF_INVALID;
1184
1185        block = &hw->block[blkaddr];
1186        nixlf = rvu_get_lf(rvu, block, pcifunc, 0);
1187        if (nixlf < 0)
1188                return NIX_AF_ERR_AF_LF_INVALID;
1189
1190        nix_interface_deinit(rvu, pcifunc, nixlf);
1191
1192        /* Reset this NIX LF */
1193        err = rvu_lf_reset(rvu, block, nixlf);
1194        if (err) {
1195                dev_err(rvu->dev, "Failed to reset NIX%d LF%d\n",
1196                        block->addr - BLKADDR_NIX0, nixlf);
1197                return NIX_AF_ERR_LF_RESET;
1198        }
1199
1200        nix_ctx_free(rvu, pfvf);
1201
1202        return 0;
1203}
1204
1205int rvu_mbox_handler_nix_mark_format_cfg(struct rvu *rvu,
1206                                         struct nix_mark_format_cfg  *req,
1207                                         struct nix_mark_format_cfg_rsp *rsp)
1208{
1209        u16 pcifunc = req->hdr.pcifunc;
1210        struct nix_hw *nix_hw;
1211        struct rvu_pfvf *pfvf;
1212        int blkaddr, rc;
1213        u32 cfg;
1214
1215        pfvf = rvu_get_pfvf(rvu, pcifunc);
1216        blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
1217        if (!pfvf->nixlf || blkaddr < 0)
1218                return NIX_AF_ERR_AF_LF_INVALID;
1219
1220        nix_hw = get_nix_hw(rvu->hw, blkaddr);
1221        if (!nix_hw)
1222                return -EINVAL;
1223
1224        cfg = (((u32)req->offset & 0x7) << 16) |
1225              (((u32)req->y_mask & 0xF) << 12) |
1226              (((u32)req->y_val & 0xF) << 8) |
1227              (((u32)req->r_mask & 0xF) << 4) | ((u32)req->r_val & 0xF);
1228
1229        rc = rvu_nix_reserve_mark_format(rvu, nix_hw, blkaddr, cfg);
1230        if (rc < 0) {
1231                dev_err(rvu->dev, "No mark_format_ctl for (pf:%d, vf:%d)",
1232                        rvu_get_pf(pcifunc), pcifunc & RVU_PFVF_FUNC_MASK);
1233                return NIX_AF_ERR_MARK_CFG_FAIL;
1234        }
1235
1236        rsp->mark_format_idx = rc;
1237        return 0;
1238}
1239
1240/* Disable shaping of pkts by a scheduler queue
1241 * at a given scheduler level.
1242 */
1243static void nix_reset_tx_shaping(struct rvu *rvu, int blkaddr,
1244                                 int lvl, int schq)
1245{
1246        u64  cir_reg = 0, pir_reg = 0;
1247        u64  cfg;
1248
1249        switch (lvl) {
1250        case NIX_TXSCH_LVL_TL1:
1251                cir_reg = NIX_AF_TL1X_CIR(schq);
1252                pir_reg = 0; /* PIR not available at TL1 */
1253                break;
1254        case NIX_TXSCH_LVL_TL2:
1255                cir_reg = NIX_AF_TL2X_CIR(schq);
1256                pir_reg = NIX_AF_TL2X_PIR(schq);
1257                break;
1258        case NIX_TXSCH_LVL_TL3:
1259                cir_reg = NIX_AF_TL3X_CIR(schq);
1260                pir_reg = NIX_AF_TL3X_PIR(schq);
1261                break;
1262        case NIX_TXSCH_LVL_TL4:
1263                cir_reg = NIX_AF_TL4X_CIR(schq);
1264                pir_reg = NIX_AF_TL4X_PIR(schq);
1265                break;
1266        }
1267
1268        if (!cir_reg)
1269                return;
1270        cfg = rvu_read64(rvu, blkaddr, cir_reg);
1271        rvu_write64(rvu, blkaddr, cir_reg, cfg & ~BIT_ULL(0));
1272
1273        if (!pir_reg)
1274                return;
1275        cfg = rvu_read64(rvu, blkaddr, pir_reg);
1276        rvu_write64(rvu, blkaddr, pir_reg, cfg & ~BIT_ULL(0));
1277}
1278
1279static void nix_reset_tx_linkcfg(struct rvu *rvu, int blkaddr,
1280                                 int lvl, int schq)
1281{
1282        struct rvu_hwinfo *hw = rvu->hw;
1283        int link;
1284
1285        if (lvl >= hw->cap.nix_tx_aggr_lvl)
1286                return;
1287
1288        /* Reset TL4's SDP link config */
1289        if (lvl == NIX_TXSCH_LVL_TL4)
1290                rvu_write64(rvu, blkaddr, NIX_AF_TL4X_SDP_LINK_CFG(schq), 0x00);
1291
1292        if (lvl != NIX_TXSCH_LVL_TL2)
1293                return;
1294
1295        /* Reset TL2's CGX or LBK link config */
1296        for (link = 0; link < (hw->cgx_links + hw->lbk_links); link++)
1297                rvu_write64(rvu, blkaddr,
1298                            NIX_AF_TL3_TL2X_LINKX_CFG(schq, link), 0x00);
1299}
1300
1301static int nix_get_tx_link(struct rvu *rvu, u16 pcifunc)
1302{
1303        struct rvu_hwinfo *hw = rvu->hw;
1304        int pf = rvu_get_pf(pcifunc);
1305        u8 cgx_id = 0, lmac_id = 0;
1306
1307        if (is_afvf(pcifunc)) {/* LBK links */
1308                return hw->cgx_links;
1309        } else if (is_pf_cgxmapped(rvu, pf)) {
1310                rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
1311                return (cgx_id * hw->lmac_per_cgx) + lmac_id;
1312        }
1313
1314        /* SDP link */
1315        return hw->cgx_links + hw->lbk_links;
1316}
1317
1318static void nix_get_txschq_range(struct rvu *rvu, u16 pcifunc,
1319                                 int link, int *start, int *end)
1320{
1321        struct rvu_hwinfo *hw = rvu->hw;
1322        int pf = rvu_get_pf(pcifunc);
1323
1324        if (is_afvf(pcifunc)) { /* LBK links */
1325                *start = hw->cap.nix_txsch_per_cgx_lmac * link;
1326                *end = *start + hw->cap.nix_txsch_per_lbk_lmac;
1327        } else if (is_pf_cgxmapped(rvu, pf)) { /* CGX links */
1328                *start = hw->cap.nix_txsch_per_cgx_lmac * link;
1329                *end = *start + hw->cap.nix_txsch_per_cgx_lmac;
1330        } else { /* SDP link */
1331                *start = (hw->cap.nix_txsch_per_cgx_lmac * hw->cgx_links) +
1332                        (hw->cap.nix_txsch_per_lbk_lmac * hw->lbk_links);
1333                *end = *start + hw->cap.nix_txsch_per_sdp_lmac;
1334        }
1335}
1336
1337static int nix_check_txschq_alloc_req(struct rvu *rvu, int lvl, u16 pcifunc,
1338                                      struct nix_hw *nix_hw,
1339                                      struct nix_txsch_alloc_req *req)
1340{
1341        struct rvu_hwinfo *hw = rvu->hw;
1342        int schq, req_schq, free_cnt;
1343        struct nix_txsch *txsch;
1344        int link, start, end;
1345
1346        txsch = &nix_hw->txsch[lvl];
1347        req_schq = req->schq_contig[lvl] + req->schq[lvl];
1348
1349        if (!req_schq)
1350                return 0;
1351
1352        link = nix_get_tx_link(rvu, pcifunc);
1353
1354        /* For traffic aggregating scheduler level, one queue is enough */
1355        if (lvl >= hw->cap.nix_tx_aggr_lvl) {
1356                if (req_schq != 1)
1357                        return NIX_AF_ERR_TLX_ALLOC_FAIL;
1358                return 0;
1359        }
1360
1361        /* Get free SCHQ count and check if request can be accomodated */
1362        if (hw->cap.nix_fixed_txschq_mapping) {
1363                nix_get_txschq_range(rvu, pcifunc, link, &start, &end);
1364                schq = start + (pcifunc & RVU_PFVF_FUNC_MASK);
1365                if (end <= txsch->schq.max && schq < end &&
1366                    !test_bit(schq, txsch->schq.bmap))
1367                        free_cnt = 1;
1368                else
1369                        free_cnt = 0;
1370        } else {
1371                free_cnt = rvu_rsrc_free_count(&txsch->schq);
1372        }
1373
1374        if (free_cnt < req_schq || req_schq > MAX_TXSCHQ_PER_FUNC)
1375                return NIX_AF_ERR_TLX_ALLOC_FAIL;
1376
1377        /* If contiguous queues are needed, check for availability */
1378        if (!hw->cap.nix_fixed_txschq_mapping && req->schq_contig[lvl] &&
1379            !rvu_rsrc_check_contig(&txsch->schq, req->schq_contig[lvl]))
1380                return NIX_AF_ERR_TLX_ALLOC_FAIL;
1381
1382        return 0;
1383}
1384
1385static void nix_txsch_alloc(struct rvu *rvu, struct nix_txsch *txsch,
1386                            struct nix_txsch_alloc_rsp *rsp,
1387                            int lvl, int start, int end)
1388{
1389        struct rvu_hwinfo *hw = rvu->hw;
1390        u16 pcifunc = rsp->hdr.pcifunc;
1391        int idx, schq;
1392
1393        /* For traffic aggregating levels, queue alloc is based
1394         * on transmit link to which PF_FUNC is mapped to.
1395         */
1396        if (lvl >= hw->cap.nix_tx_aggr_lvl) {
1397                /* A single TL queue is allocated */
1398                if (rsp->schq_contig[lvl]) {
1399                        rsp->schq_contig[lvl] = 1;
1400                        rsp->schq_contig_list[lvl][0] = start;
1401                }
1402
1403                /* Both contig and non-contig reqs doesn't make sense here */
1404                if (rsp->schq_contig[lvl])
1405                        rsp->schq[lvl] = 0;
1406
1407                if (rsp->schq[lvl]) {
1408                        rsp->schq[lvl] = 1;
1409                        rsp->schq_list[lvl][0] = start;
1410                }
1411                return;
1412        }
1413
1414        /* Adjust the queue request count if HW supports
1415         * only one queue per level configuration.
1416         */
1417        if (hw->cap.nix_fixed_txschq_mapping) {
1418                idx = pcifunc & RVU_PFVF_FUNC_MASK;
1419                schq = start + idx;
1420                if (idx >= (end - start) || test_bit(schq, txsch->schq.bmap)) {
1421                        rsp->schq_contig[lvl] = 0;
1422                        rsp->schq[lvl] = 0;
1423                        return;
1424                }
1425
1426                if (rsp->schq_contig[lvl]) {
1427                        rsp->schq_contig[lvl] = 1;
1428                        set_bit(schq, txsch->schq.bmap);
1429                        rsp->schq_contig_list[lvl][0] = schq;
1430                        rsp->schq[lvl] = 0;
1431                } else if (rsp->schq[lvl]) {
1432                        rsp->schq[lvl] = 1;
1433                        set_bit(schq, txsch->schq.bmap);
1434                        rsp->schq_list[lvl][0] = schq;
1435                }
1436                return;
1437        }
1438
1439        /* Allocate contiguous queue indices requesty first */
1440        if (rsp->schq_contig[lvl]) {
1441                schq = bitmap_find_next_zero_area(txsch->schq.bmap,
1442                                                  txsch->schq.max, start,
1443                                                  rsp->schq_contig[lvl], 0);
1444                if (schq >= end)
1445                        rsp->schq_contig[lvl] = 0;
1446                for (idx = 0; idx < rsp->schq_contig[lvl]; idx++) {
1447                        set_bit(schq, txsch->schq.bmap);
1448                        rsp->schq_contig_list[lvl][idx] = schq;
1449                        schq++;
1450                }
1451        }
1452
1453        /* Allocate non-contiguous queue indices */
1454        if (rsp->schq[lvl]) {
1455                idx = 0;
1456                for (schq = start; schq < end; schq++) {
1457                        if (!test_bit(schq, txsch->schq.bmap)) {
1458                                set_bit(schq, txsch->schq.bmap);
1459                                rsp->schq_list[lvl][idx++] = schq;
1460                        }
1461                        if (idx == rsp->schq[lvl])
1462                                break;
1463                }
1464                /* Update how many were allocated */
1465                rsp->schq[lvl] = idx;
1466        }
1467}
1468
1469int rvu_mbox_handler_nix_txsch_alloc(struct rvu *rvu,
1470                                     struct nix_txsch_alloc_req *req,
1471                                     struct nix_txsch_alloc_rsp *rsp)
1472{
1473        struct rvu_hwinfo *hw = rvu->hw;
1474        u16 pcifunc = req->hdr.pcifunc;
1475        int link, blkaddr, rc = 0;
1476        int lvl, idx, start, end;
1477        struct nix_txsch *txsch;
1478        struct rvu_pfvf *pfvf;
1479        struct nix_hw *nix_hw;
1480        u32 *pfvf_map;
1481        u16 schq;
1482
1483        pfvf = rvu_get_pfvf(rvu, pcifunc);
1484        blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
1485        if (!pfvf->nixlf || blkaddr < 0)
1486                return NIX_AF_ERR_AF_LF_INVALID;
1487
1488        nix_hw = get_nix_hw(rvu->hw, blkaddr);
1489        if (!nix_hw)
1490                return -EINVAL;
1491
1492        mutex_lock(&rvu->rsrc_lock);
1493
1494        /* Check if request is valid as per HW capabilities
1495         * and can be accomodated.
1496         */
1497        for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
1498                rc = nix_check_txschq_alloc_req(rvu, lvl, pcifunc, nix_hw, req);
1499                if (rc)
1500                        goto err;
1501        }
1502
1503        /* Allocate requested Tx scheduler queues */
1504        for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
1505                txsch = &nix_hw->txsch[lvl];
1506                pfvf_map = txsch->pfvf_map;
1507
1508                if (!req->schq[lvl] && !req->schq_contig[lvl])
1509                        continue;
1510
1511                rsp->schq[lvl] = req->schq[lvl];
1512                rsp->schq_contig[lvl] = req->schq_contig[lvl];
1513
1514                link = nix_get_tx_link(rvu, pcifunc);
1515
1516                if (lvl >= hw->cap.nix_tx_aggr_lvl) {
1517                        start = link;
1518                        end = link;
1519                } else if (hw->cap.nix_fixed_txschq_mapping) {
1520                        nix_get_txschq_range(rvu, pcifunc, link, &start, &end);
1521                } else {
1522                        start = 0;
1523                        end = txsch->schq.max;
1524                }
1525
1526                nix_txsch_alloc(rvu, txsch, rsp, lvl, start, end);
1527
1528                /* Reset queue config */
1529                for (idx = 0; idx < req->schq_contig[lvl]; idx++) {
1530                        schq = rsp->schq_contig_list[lvl][idx];
1531                        if (!(TXSCH_MAP_FLAGS(pfvf_map[schq]) &
1532                            NIX_TXSCHQ_CFG_DONE))
1533                                pfvf_map[schq] = TXSCH_MAP(pcifunc, 0);
1534                        nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq);
1535                        nix_reset_tx_shaping(rvu, blkaddr, lvl, schq);
1536                }
1537
1538                for (idx = 0; idx < req->schq[lvl]; idx++) {
1539                        schq = rsp->schq_list[lvl][idx];
1540                        if (!(TXSCH_MAP_FLAGS(pfvf_map[schq]) &
1541                            NIX_TXSCHQ_CFG_DONE))
1542                                pfvf_map[schq] = TXSCH_MAP(pcifunc, 0);
1543                        nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq);
1544                        nix_reset_tx_shaping(rvu, blkaddr, lvl, schq);
1545                }
1546        }
1547
1548        rsp->aggr_level = hw->cap.nix_tx_aggr_lvl;
1549        rsp->aggr_lvl_rr_prio = TXSCH_TL1_DFLT_RR_PRIO;
1550        rsp->link_cfg_lvl = rvu_read64(rvu, blkaddr,
1551                                       NIX_AF_PSE_CHANNEL_LEVEL) & 0x01 ?
1552                                       NIX_TXSCH_LVL_TL3 : NIX_TXSCH_LVL_TL2;
1553        goto exit;
1554err:
1555        rc = NIX_AF_ERR_TLX_ALLOC_FAIL;
1556exit:
1557        mutex_unlock(&rvu->rsrc_lock);
1558        return rc;
1559}
1560
1561static void nix_smq_flush(struct rvu *rvu, int blkaddr,
1562                          int smq, u16 pcifunc, int nixlf)
1563{
1564        int pf = rvu_get_pf(pcifunc);
1565        u8 cgx_id = 0, lmac_id = 0;
1566        int err, restore_tx_en = 0;
1567        u64 cfg;
1568
1569        /* enable cgx tx if disabled */
1570        if (is_pf_cgxmapped(rvu, pf)) {
1571                rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
1572                restore_tx_en = !cgx_lmac_tx_enable(rvu_cgx_pdata(cgx_id, rvu),
1573                                                    lmac_id, true);
1574        }
1575
1576        cfg = rvu_read64(rvu, blkaddr, NIX_AF_SMQX_CFG(smq));
1577        /* Do SMQ flush and set enqueue xoff */
1578        cfg |= BIT_ULL(50) | BIT_ULL(49);
1579        rvu_write64(rvu, blkaddr, NIX_AF_SMQX_CFG(smq), cfg);
1580
1581        /* Disable backpressure from physical link,
1582         * otherwise SMQ flush may stall.
1583         */
1584        rvu_cgx_enadis_rx_bp(rvu, pf, false);
1585
1586        /* Wait for flush to complete */
1587        err = rvu_poll_reg(rvu, blkaddr,
1588                           NIX_AF_SMQX_CFG(smq), BIT_ULL(49), true);
1589        if (err)
1590                dev_err(rvu->dev,
1591                        "NIXLF%d: SMQ%d flush failed\n", nixlf, smq);
1592
1593        rvu_cgx_enadis_rx_bp(rvu, pf, true);
1594        /* restore cgx tx state */
1595        if (restore_tx_en)
1596                cgx_lmac_tx_enable(rvu_cgx_pdata(cgx_id, rvu), lmac_id, false);
1597}
1598
1599static int nix_txschq_free(struct rvu *rvu, u16 pcifunc)
1600{
1601        int blkaddr, nixlf, lvl, schq, err;
1602        struct rvu_hwinfo *hw = rvu->hw;
1603        struct nix_txsch *txsch;
1604        struct nix_hw *nix_hw;
1605
1606        blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
1607        if (blkaddr < 0)
1608                return NIX_AF_ERR_AF_LF_INVALID;
1609
1610        nix_hw = get_nix_hw(rvu->hw, blkaddr);
1611        if (!nix_hw)
1612                return -EINVAL;
1613
1614        nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
1615        if (nixlf < 0)
1616                return NIX_AF_ERR_AF_LF_INVALID;
1617
1618        /* Disable TL2/3 queue links before SMQ flush*/
1619        mutex_lock(&rvu->rsrc_lock);
1620        for (lvl = NIX_TXSCH_LVL_TL4; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
1621                if (lvl != NIX_TXSCH_LVL_TL2 && lvl != NIX_TXSCH_LVL_TL4)
1622                        continue;
1623
1624                txsch = &nix_hw->txsch[lvl];
1625                for (schq = 0; schq < txsch->schq.max; schq++) {
1626                        if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc)
1627                                continue;
1628                        nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq);
1629                }
1630        }
1631
1632        /* Flush SMQs */
1633        txsch = &nix_hw->txsch[NIX_TXSCH_LVL_SMQ];
1634        for (schq = 0; schq < txsch->schq.max; schq++) {
1635                if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc)
1636                        continue;
1637                nix_smq_flush(rvu, blkaddr, schq, pcifunc, nixlf);
1638        }
1639
1640        /* Now free scheduler queues to free pool */
1641        for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
1642                 /* TLs above aggregation level are shared across all PF
1643                  * and it's VFs, hence skip freeing them.
1644                  */
1645                if (lvl >= hw->cap.nix_tx_aggr_lvl)
1646                        continue;
1647
1648                txsch = &nix_hw->txsch[lvl];
1649                for (schq = 0; schq < txsch->schq.max; schq++) {
1650                        if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc)
1651                                continue;
1652                        rvu_free_rsrc(&txsch->schq, schq);
1653                        txsch->pfvf_map[schq] = TXSCH_MAP(0, NIX_TXSCHQ_FREE);
1654                }
1655        }
1656        mutex_unlock(&rvu->rsrc_lock);
1657
1658        /* Sync cached info for this LF in NDC-TX to LLC/DRAM */
1659        rvu_write64(rvu, blkaddr, NIX_AF_NDC_TX_SYNC, BIT_ULL(12) | nixlf);
1660        err = rvu_poll_reg(rvu, blkaddr, NIX_AF_NDC_TX_SYNC, BIT_ULL(12), true);
1661        if (err)
1662                dev_err(rvu->dev, "NDC-TX sync failed for NIXLF %d\n", nixlf);
1663
1664        return 0;
1665}
1666
1667static int nix_txschq_free_one(struct rvu *rvu,
1668                               struct nix_txsch_free_req *req)
1669{
1670        struct rvu_hwinfo *hw = rvu->hw;
1671        u16 pcifunc = req->hdr.pcifunc;
1672        int lvl, schq, nixlf, blkaddr;
1673        struct nix_txsch *txsch;
1674        struct nix_hw *nix_hw;
1675        u32 *pfvf_map;
1676
1677        blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
1678        if (blkaddr < 0)
1679                return NIX_AF_ERR_AF_LF_INVALID;
1680
1681        nix_hw = get_nix_hw(rvu->hw, blkaddr);
1682        if (!nix_hw)
1683                return -EINVAL;
1684
1685        nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
1686        if (nixlf < 0)
1687                return NIX_AF_ERR_AF_LF_INVALID;
1688
1689        lvl = req->schq_lvl;
1690        schq = req->schq;
1691        txsch = &nix_hw->txsch[lvl];
1692
1693        if (lvl >= hw->cap.nix_tx_aggr_lvl || schq >= txsch->schq.max)
1694                return 0;
1695
1696        pfvf_map = txsch->pfvf_map;
1697        mutex_lock(&rvu->rsrc_lock);
1698
1699        if (TXSCH_MAP_FUNC(pfvf_map[schq]) != pcifunc) {
1700                mutex_unlock(&rvu->rsrc_lock);
1701                goto err;
1702        }
1703
1704        /* Flush if it is a SMQ. Onus of disabling
1705         * TL2/3 queue links before SMQ flush is on user
1706         */
1707        if (lvl == NIX_TXSCH_LVL_SMQ)
1708                nix_smq_flush(rvu, blkaddr, schq, pcifunc, nixlf);
1709
1710        /* Free the resource */
1711        rvu_free_rsrc(&txsch->schq, schq);
1712        txsch->pfvf_map[schq] = TXSCH_MAP(0, NIX_TXSCHQ_FREE);
1713        mutex_unlock(&rvu->rsrc_lock);
1714        return 0;
1715err:
1716        return NIX_AF_ERR_TLX_INVALID;
1717}
1718
1719int rvu_mbox_handler_nix_txsch_free(struct rvu *rvu,
1720                                    struct nix_txsch_free_req *req,
1721                                    struct msg_rsp *rsp)
1722{
1723        if (req->flags & TXSCHQ_FREE_ALL)
1724                return nix_txschq_free(rvu, req->hdr.pcifunc);
1725        else
1726                return nix_txschq_free_one(rvu, req);
1727}
1728
1729static bool is_txschq_hierarchy_valid(struct rvu *rvu, u16 pcifunc, int blkaddr,
1730                                      int lvl, u64 reg, u64 regval)
1731{
1732        u64 regbase = reg & 0xFFFF;
1733        u16 schq, parent;
1734
1735        if (!rvu_check_valid_reg(TXSCHQ_HWREGMAP, lvl, reg))
1736                return false;
1737
1738        schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT);
1739        /* Check if this schq belongs to this PF/VF or not */
1740        if (!is_valid_txschq(rvu, blkaddr, lvl, pcifunc, schq))
1741                return false;
1742
1743        parent = (regval >> 16) & 0x1FF;
1744        /* Validate MDQ's TL4 parent */
1745        if (regbase == NIX_AF_MDQX_PARENT(0) &&
1746            !is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL4, pcifunc, parent))
1747                return false;
1748
1749        /* Validate TL4's TL3 parent */
1750        if (regbase == NIX_AF_TL4X_PARENT(0) &&
1751            !is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL3, pcifunc, parent))
1752                return false;
1753
1754        /* Validate TL3's TL2 parent */
1755        if (regbase == NIX_AF_TL3X_PARENT(0) &&
1756            !is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL2, pcifunc, parent))
1757                return false;
1758
1759        /* Validate TL2's TL1 parent */
1760        if (regbase == NIX_AF_TL2X_PARENT(0) &&
1761            !is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL1, pcifunc, parent))
1762                return false;
1763
1764        return true;
1765}
1766
1767static bool is_txschq_shaping_valid(struct rvu_hwinfo *hw, int lvl, u64 reg)
1768{
1769        u64 regbase;
1770
1771        if (hw->cap.nix_shaping)
1772                return true;
1773
1774        /* If shaping and coloring is not supported, then
1775         * *_CIR and *_PIR registers should not be configured.
1776         */
1777        regbase = reg & 0xFFFF;
1778
1779        switch (lvl) {
1780        case NIX_TXSCH_LVL_TL1:
1781                if (regbase == NIX_AF_TL1X_CIR(0))
1782                        return false;
1783                break;
1784        case NIX_TXSCH_LVL_TL2:
1785                if (regbase == NIX_AF_TL2X_CIR(0) ||
1786                    regbase == NIX_AF_TL2X_PIR(0))
1787                        return false;
1788                break;
1789        case NIX_TXSCH_LVL_TL3:
1790                if (regbase == NIX_AF_TL3X_CIR(0) ||
1791                    regbase == NIX_AF_TL3X_PIR(0))
1792                        return false;
1793                break;
1794        case NIX_TXSCH_LVL_TL4:
1795                if (regbase == NIX_AF_TL4X_CIR(0) ||
1796                    regbase == NIX_AF_TL4X_PIR(0))
1797                        return false;
1798                break;
1799        }
1800        return true;
1801}
1802
1803static void nix_tl1_default_cfg(struct rvu *rvu, struct nix_hw *nix_hw,
1804                                u16 pcifunc, int blkaddr)
1805{
1806        u32 *pfvf_map;
1807        int schq;
1808
1809        schq = nix_get_tx_link(rvu, pcifunc);
1810        pfvf_map = nix_hw->txsch[NIX_TXSCH_LVL_TL1].pfvf_map;
1811        /* Skip if PF has already done the config */
1812        if (TXSCH_MAP_FLAGS(pfvf_map[schq]) & NIX_TXSCHQ_CFG_DONE)
1813                return;
1814        rvu_write64(rvu, blkaddr, NIX_AF_TL1X_TOPOLOGY(schq),
1815                    (TXSCH_TL1_DFLT_RR_PRIO << 1));
1816        rvu_write64(rvu, blkaddr, NIX_AF_TL1X_SCHEDULE(schq),
1817                    TXSCH_TL1_DFLT_RR_QTM);
1818        rvu_write64(rvu, blkaddr, NIX_AF_TL1X_CIR(schq), 0x00);
1819        pfvf_map[schq] = TXSCH_SET_FLAG(pfvf_map[schq], NIX_TXSCHQ_CFG_DONE);
1820}
1821
1822int rvu_mbox_handler_nix_txschq_cfg(struct rvu *rvu,
1823                                    struct nix_txschq_config *req,
1824                                    struct msg_rsp *rsp)
1825{
1826        struct rvu_hwinfo *hw = rvu->hw;
1827        u16 pcifunc = req->hdr.pcifunc;
1828        u64 reg, regval, schq_regbase;
1829        struct nix_txsch *txsch;
1830        struct nix_hw *nix_hw;
1831        int blkaddr, idx, err;
1832        int nixlf, schq;
1833        u32 *pfvf_map;
1834
1835        if (req->lvl >= NIX_TXSCH_LVL_CNT ||
1836            req->num_regs > MAX_REGS_PER_MBOX_MSG)
1837                return NIX_AF_INVAL_TXSCHQ_CFG;
1838
1839        err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr);
1840        if (err)
1841                return err;
1842
1843        nix_hw = get_nix_hw(rvu->hw, blkaddr);
1844        if (!nix_hw)
1845                return -EINVAL;
1846
1847        txsch = &nix_hw->txsch[req->lvl];
1848        pfvf_map = txsch->pfvf_map;
1849
1850        if (req->lvl >= hw->cap.nix_tx_aggr_lvl &&
1851            pcifunc & RVU_PFVF_FUNC_MASK) {
1852                mutex_lock(&rvu->rsrc_lock);
1853                if (req->lvl == NIX_TXSCH_LVL_TL1)
1854                        nix_tl1_default_cfg(rvu, nix_hw, pcifunc, blkaddr);
1855                mutex_unlock(&rvu->rsrc_lock);
1856                return 0;
1857        }
1858
1859        for (idx = 0; idx < req->num_regs; idx++) {
1860                reg = req->reg[idx];
1861                regval = req->regval[idx];
1862                schq_regbase = reg & 0xFFFF;
1863
1864                if (!is_txschq_hierarchy_valid(rvu, pcifunc, blkaddr,
1865                                               txsch->lvl, reg, regval))
1866                        return NIX_AF_INVAL_TXSCHQ_CFG;
1867
1868                /* Check if shaping and coloring is supported */
1869                if (!is_txschq_shaping_valid(hw, req->lvl, reg))
1870                        continue;
1871
1872                /* Replace PF/VF visible NIXLF slot with HW NIXLF id */
1873                if (schq_regbase == NIX_AF_SMQX_CFG(0)) {
1874                        nixlf = rvu_get_lf(rvu, &hw->block[blkaddr],
1875                                           pcifunc, 0);
1876                        regval &= ~(0x7FULL << 24);
1877                        regval |= ((u64)nixlf << 24);
1878                }
1879
1880                /* Clear 'BP_ENA' config, if it's not allowed */
1881                if (!hw->cap.nix_tx_link_bp) {
1882                        if (schq_regbase == NIX_AF_TL4X_SDP_LINK_CFG(0) ||
1883                            (schq_regbase & 0xFF00) ==
1884                            NIX_AF_TL3_TL2X_LINKX_CFG(0, 0))
1885                                regval &= ~BIT_ULL(13);
1886                }
1887
1888                /* Mark config as done for TL1 by PF */
1889                if (schq_regbase >= NIX_AF_TL1X_SCHEDULE(0) &&
1890                    schq_regbase <= NIX_AF_TL1X_GREEN_BYTES(0)) {
1891                        schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT);
1892                        mutex_lock(&rvu->rsrc_lock);
1893                        pfvf_map[schq] = TXSCH_SET_FLAG(pfvf_map[schq],
1894                                                        NIX_TXSCHQ_CFG_DONE);
1895                        mutex_unlock(&rvu->rsrc_lock);
1896                }
1897
1898                /* SMQ flush is special hence split register writes such
1899                 * that flush first and write rest of the bits later.
1900                 */
1901                if (schq_regbase == NIX_AF_SMQX_CFG(0) &&
1902                    (regval & BIT_ULL(49))) {
1903                        schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT);
1904                        nix_smq_flush(rvu, blkaddr, schq, pcifunc, nixlf);
1905                        regval &= ~BIT_ULL(49);
1906                }
1907                rvu_write64(rvu, blkaddr, reg, regval);
1908        }
1909
1910        return 0;
1911}
1912
1913static int nix_rx_vtag_cfg(struct rvu *rvu, int nixlf, int blkaddr,
1914                           struct nix_vtag_config *req)
1915{
1916        u64 regval = req->vtag_size;
1917
1918        if (req->rx.vtag_type > 7 || req->vtag_size > VTAGSIZE_T8)
1919                return -EINVAL;
1920
1921        if (req->rx.capture_vtag)
1922                regval |= BIT_ULL(5);
1923        if (req->rx.strip_vtag)
1924                regval |= BIT_ULL(4);
1925
1926        rvu_write64(rvu, blkaddr,
1927                    NIX_AF_LFX_RX_VTAG_TYPEX(nixlf, req->rx.vtag_type), regval);
1928        return 0;
1929}
1930
1931int rvu_mbox_handler_nix_vtag_cfg(struct rvu *rvu,
1932                                  struct nix_vtag_config *req,
1933                                  struct msg_rsp *rsp)
1934{
1935        u16 pcifunc = req->hdr.pcifunc;
1936        int blkaddr, nixlf, err;
1937
1938        err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr);
1939        if (err)
1940                return err;
1941
1942        if (req->cfg_type) {
1943                err = nix_rx_vtag_cfg(rvu, nixlf, blkaddr, req);
1944                if (err)
1945                        return NIX_AF_ERR_PARAM;
1946        } else {
1947                /* TODO: handle tx vtag configuration */
1948                return 0;
1949        }
1950
1951        return 0;
1952}
1953
1954static int nix_setup_mce(struct rvu *rvu, int mce, u8 op,
1955                         u16 pcifunc, int next, bool eol)
1956{
1957        struct nix_aq_enq_req aq_req;
1958        int err;
1959
1960        aq_req.hdr.pcifunc = 0;
1961        aq_req.ctype = NIX_AQ_CTYPE_MCE;
1962        aq_req.op = op;
1963        aq_req.qidx = mce;
1964
1965        /* Forward bcast pkts to RQ0, RSS not needed */
1966        aq_req.mce.op = 0;
1967        aq_req.mce.index = 0;
1968        aq_req.mce.eol = eol;
1969        aq_req.mce.pf_func = pcifunc;
1970        aq_req.mce.next = next;
1971
1972        /* All fields valid */
1973        *(u64 *)(&aq_req.mce_mask) = ~0ULL;
1974
1975        err = rvu_nix_aq_enq_inst(rvu, &aq_req, NULL);
1976        if (err) {
1977                dev_err(rvu->dev, "Failed to setup Bcast MCE for PF%d:VF%d\n",
1978                        rvu_get_pf(pcifunc), pcifunc & RVU_PFVF_FUNC_MASK);
1979                return err;
1980        }
1981        return 0;
1982}
1983
1984static int nix_update_mce_list(struct nix_mce_list *mce_list,
1985                               u16 pcifunc, bool add)
1986{
1987        struct mce *mce, *tail = NULL;
1988        bool delete = false;
1989
1990        /* Scan through the current list */
1991        hlist_for_each_entry(mce, &mce_list->head, node) {
1992                /* If already exists, then delete */
1993                if (mce->pcifunc == pcifunc && !add) {
1994                        delete = true;
1995                        break;
1996                }
1997                tail = mce;
1998        }
1999
2000        if (delete) {
2001                hlist_del(&mce->node);
2002                kfree(mce);
2003                mce_list->count--;
2004                return 0;
2005        }
2006
2007        if (!add)
2008                return 0;
2009
2010        /* Add a new one to the list, at the tail */
2011        mce = kzalloc(sizeof(*mce), GFP_KERNEL);
2012        if (!mce)
2013                return -ENOMEM;
2014        mce->pcifunc = pcifunc;
2015        if (!tail)
2016                hlist_add_head(&mce->node, &mce_list->head);
2017        else
2018                hlist_add_behind(&mce->node, &tail->node);
2019        mce_list->count++;
2020        return 0;
2021}
2022
2023static int nix_update_bcast_mce_list(struct rvu *rvu, u16 pcifunc, bool add)
2024{
2025        int err = 0, idx, next_idx, last_idx;
2026        struct nix_mce_list *mce_list;
2027        struct nix_mcast *mcast;
2028        struct nix_hw *nix_hw;
2029        struct rvu_pfvf *pfvf;
2030        struct mce *mce;
2031        int blkaddr;
2032
2033        /* Broadcast pkt replication is not needed for AF's VFs, hence skip */
2034        if (is_afvf(pcifunc))
2035                return 0;
2036
2037        blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
2038        if (blkaddr < 0)
2039                return 0;
2040
2041        nix_hw = get_nix_hw(rvu->hw, blkaddr);
2042        if (!nix_hw)
2043                return 0;
2044
2045        mcast = &nix_hw->mcast;
2046
2047        /* Get this PF/VF func's MCE index */
2048        pfvf = rvu_get_pfvf(rvu, pcifunc & ~RVU_PFVF_FUNC_MASK);
2049        idx = pfvf->bcast_mce_idx + (pcifunc & RVU_PFVF_FUNC_MASK);
2050
2051        mce_list = &pfvf->bcast_mce_list;
2052        if (idx > (pfvf->bcast_mce_idx + mce_list->max)) {
2053                dev_err(rvu->dev,
2054                        "%s: Idx %d > max MCE idx %d, for PF%d bcast list\n",
2055                        __func__, idx, mce_list->max,
2056                        pcifunc >> RVU_PFVF_PF_SHIFT);
2057                return -EINVAL;
2058        }
2059
2060        mutex_lock(&mcast->mce_lock);
2061
2062        err = nix_update_mce_list(mce_list, pcifunc, add);
2063        if (err)
2064                goto end;
2065
2066        /* Disable MCAM entry in NPC */
2067        if (!mce_list->count) {
2068                rvu_npc_disable_bcast_entry(rvu, pcifunc);
2069                goto end;
2070        }
2071
2072        /* Dump the updated list to HW */
2073        idx = pfvf->bcast_mce_idx;
2074        last_idx = idx + mce_list->count - 1;
2075        hlist_for_each_entry(mce, &mce_list->head, node) {
2076                if (idx > last_idx)
2077                        break;
2078
2079                next_idx = idx + 1;
2080                /* EOL should be set in last MCE */
2081                err = nix_setup_mce(rvu, idx, NIX_AQ_INSTOP_WRITE,
2082                                    mce->pcifunc, next_idx,
2083                                    (next_idx > last_idx) ? true : false);
2084                if (err)
2085                        goto end;
2086                idx++;
2087        }
2088
2089end:
2090        mutex_unlock(&mcast->mce_lock);
2091        return err;
2092}
2093
2094static int nix_setup_bcast_tables(struct rvu *rvu, struct nix_hw *nix_hw)
2095{
2096        struct nix_mcast *mcast = &nix_hw->mcast;
2097        int err, pf, numvfs, idx;
2098        struct rvu_pfvf *pfvf;
2099        u16 pcifunc;
2100        u64 cfg;
2101
2102        /* Skip PF0 (i.e AF) */
2103        for (pf = 1; pf < (rvu->cgx_mapped_pfs + 1); pf++) {
2104                cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf));
2105                /* If PF is not enabled, nothing to do */
2106                if (!((cfg >> 20) & 0x01))
2107                        continue;
2108                /* Get numVFs attached to this PF */
2109                numvfs = (cfg >> 12) & 0xFF;
2110
2111                pfvf = &rvu->pf[pf];
2112                /* Save the start MCE */
2113                pfvf->bcast_mce_idx = nix_alloc_mce_list(mcast, numvfs + 1);
2114
2115                nix_mce_list_init(&pfvf->bcast_mce_list, numvfs + 1);
2116
2117                for (idx = 0; idx < (numvfs + 1); idx++) {
2118                        /* idx-0 is for PF, followed by VFs */
2119                        pcifunc = (pf << RVU_PFVF_PF_SHIFT);
2120                        pcifunc |= idx;
2121                        /* Add dummy entries now, so that we don't have to check
2122                         * for whether AQ_OP should be INIT/WRITE later on.
2123                         * Will be updated when a NIXLF is attached/detached to
2124                         * these PF/VFs.
2125                         */
2126                        err = nix_setup_mce(rvu, pfvf->bcast_mce_idx + idx,
2127                                            NIX_AQ_INSTOP_INIT,
2128                                            pcifunc, 0, true);
2129                        if (err)
2130                                return err;
2131                }
2132        }
2133        return 0;
2134}
2135
2136static int nix_setup_mcast(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr)
2137{
2138        struct nix_mcast *mcast = &nix_hw->mcast;
2139        struct rvu_hwinfo *hw = rvu->hw;
2140        int err, size;
2141
2142        size = (rvu_read64(rvu, blkaddr, NIX_AF_CONST3) >> 16) & 0x0F;
2143        size = (1ULL << size);
2144
2145        /* Alloc memory for multicast/mirror replication entries */
2146        err = qmem_alloc(rvu->dev, &mcast->mce_ctx,
2147                         (256UL << MC_TBL_SIZE), size);
2148        if (err)
2149                return -ENOMEM;
2150
2151        rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_BASE,
2152                    (u64)mcast->mce_ctx->iova);
2153
2154        /* Set max list length equal to max no of VFs per PF  + PF itself */
2155        rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_CFG,
2156                    BIT_ULL(36) | (hw->max_vfs_per_pf << 4) | MC_TBL_SIZE);
2157
2158        /* Alloc memory for multicast replication buffers */
2159        size = rvu_read64(rvu, blkaddr, NIX_AF_MC_MIRROR_CONST) & 0xFFFF;
2160        err = qmem_alloc(rvu->dev, &mcast->mcast_buf,
2161                         (8UL << MC_BUF_CNT), size);
2162        if (err)
2163                return -ENOMEM;
2164
2165        rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_BUF_BASE,
2166                    (u64)mcast->mcast_buf->iova);
2167
2168        /* Alloc pkind for NIX internal RX multicast/mirror replay */
2169        mcast->replay_pkind = rvu_alloc_rsrc(&hw->pkind.rsrc);
2170
2171        rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_BUF_CFG,
2172                    BIT_ULL(63) | (mcast->replay_pkind << 24) |
2173                    BIT_ULL(20) | MC_BUF_CNT);
2174
2175        mutex_init(&mcast->mce_lock);
2176
2177        return nix_setup_bcast_tables(rvu, nix_hw);
2178}
2179
2180static int nix_setup_txschq(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr)
2181{
2182        struct nix_txsch *txsch;
2183        int err, lvl, schq;
2184        u64 cfg, reg;
2185
2186        /* Get scheduler queue count of each type and alloc
2187         * bitmap for each for alloc/free/attach operations.
2188         */
2189        for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
2190                txsch = &nix_hw->txsch[lvl];
2191                txsch->lvl = lvl;
2192                switch (lvl) {
2193                case NIX_TXSCH_LVL_SMQ:
2194                        reg = NIX_AF_MDQ_CONST;
2195                        break;
2196                case NIX_TXSCH_LVL_TL4:
2197                        reg = NIX_AF_TL4_CONST;
2198                        break;
2199                case NIX_TXSCH_LVL_TL3:
2200                        reg = NIX_AF_TL3_CONST;
2201                        break;
2202                case NIX_TXSCH_LVL_TL2:
2203                        reg = NIX_AF_TL2_CONST;
2204                        break;
2205                case NIX_TXSCH_LVL_TL1:
2206                        reg = NIX_AF_TL1_CONST;
2207                        break;
2208                }
2209                cfg = rvu_read64(rvu, blkaddr, reg);
2210                txsch->schq.max = cfg & 0xFFFF;
2211                err = rvu_alloc_bitmap(&txsch->schq);
2212                if (err)
2213                        return err;
2214
2215                /* Allocate memory for scheduler queues to
2216                 * PF/VF pcifunc mapping info.
2217                 */
2218                txsch->pfvf_map = devm_kcalloc(rvu->dev, txsch->schq.max,
2219                                               sizeof(u32), GFP_KERNEL);
2220                if (!txsch->pfvf_map)
2221                        return -ENOMEM;
2222                for (schq = 0; schq < txsch->schq.max; schq++)
2223                        txsch->pfvf_map[schq] = TXSCH_MAP(0, NIX_TXSCHQ_FREE);
2224        }
2225        return 0;
2226}
2227
2228int rvu_nix_reserve_mark_format(struct rvu *rvu, struct nix_hw *nix_hw,
2229                                int blkaddr, u32 cfg)
2230{
2231        int fmt_idx;
2232
2233        for (fmt_idx = 0; fmt_idx < nix_hw->mark_format.in_use; fmt_idx++) {
2234                if (nix_hw->mark_format.cfg[fmt_idx] == cfg)
2235                        return fmt_idx;
2236        }
2237        if (fmt_idx >= nix_hw->mark_format.total)
2238                return -ERANGE;
2239
2240        rvu_write64(rvu, blkaddr, NIX_AF_MARK_FORMATX_CTL(fmt_idx), cfg);
2241        nix_hw->mark_format.cfg[fmt_idx] = cfg;
2242        nix_hw->mark_format.in_use++;
2243        return fmt_idx;
2244}
2245
2246static int nix_af_mark_format_setup(struct rvu *rvu, struct nix_hw *nix_hw,
2247                                    int blkaddr)
2248{
2249        u64 cfgs[] = {
2250                [NIX_MARK_CFG_IP_DSCP_RED]         = 0x10003,
2251                [NIX_MARK_CFG_IP_DSCP_YELLOW]      = 0x11200,
2252                [NIX_MARK_CFG_IP_DSCP_YELLOW_RED]  = 0x11203,
2253                [NIX_MARK_CFG_IP_ECN_RED]          = 0x6000c,
2254                [NIX_MARK_CFG_IP_ECN_YELLOW]       = 0x60c00,
2255                [NIX_MARK_CFG_IP_ECN_YELLOW_RED]   = 0x60c0c,
2256                [NIX_MARK_CFG_VLAN_DEI_RED]        = 0x30008,
2257                [NIX_MARK_CFG_VLAN_DEI_YELLOW]     = 0x30800,
2258                [NIX_MARK_CFG_VLAN_DEI_YELLOW_RED] = 0x30808,
2259        };
2260        int i, rc;
2261        u64 total;
2262
2263        total = (rvu_read64(rvu, blkaddr, NIX_AF_PSE_CONST) & 0xFF00) >> 8;
2264        nix_hw->mark_format.total = (u8)total;
2265        nix_hw->mark_format.cfg = devm_kcalloc(rvu->dev, total, sizeof(u32),
2266                                               GFP_KERNEL);
2267        if (!nix_hw->mark_format.cfg)
2268                return -ENOMEM;
2269        for (i = 0; i < NIX_MARK_CFG_MAX; i++) {
2270                rc = rvu_nix_reserve_mark_format(rvu, nix_hw, blkaddr, cfgs[i]);
2271                if (rc < 0)
2272                        dev_err(rvu->dev, "Err %d in setup mark format %d\n",
2273                                i, rc);
2274        }
2275
2276        return 0;
2277}
2278
2279int rvu_mbox_handler_nix_stats_rst(struct rvu *rvu, struct msg_req *req,
2280                                   struct msg_rsp *rsp)
2281{
2282        u16 pcifunc = req->hdr.pcifunc;
2283        int i, nixlf, blkaddr, err;
2284        u64 stats;
2285
2286        err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr);
2287        if (err)
2288                return err;
2289
2290        /* Get stats count supported by HW */
2291        stats = rvu_read64(rvu, blkaddr, NIX_AF_CONST1);
2292
2293        /* Reset tx stats */
2294        for (i = 0; i < ((stats >> 24) & 0xFF); i++)
2295                rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_STATX(nixlf, i), 0);
2296
2297        /* Reset rx stats */
2298        for (i = 0; i < ((stats >> 32) & 0xFF); i++)
2299                rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_STATX(nixlf, i), 0);
2300
2301        return 0;
2302}
2303
2304/* Returns the ALG index to be set into NPC_RX_ACTION */
2305static int get_flowkey_alg_idx(struct nix_hw *nix_hw, u32 flow_cfg)
2306{
2307        int i;
2308
2309        /* Scan over exiting algo entries to find a match */
2310        for (i = 0; i < nix_hw->flowkey.in_use; i++)
2311                if (nix_hw->flowkey.flowkey[i] == flow_cfg)
2312                        return i;
2313
2314        return -ERANGE;
2315}
2316
2317static int set_flowkey_fields(struct nix_rx_flowkey_alg *alg, u32 flow_cfg)
2318{
2319        int idx, nr_field, key_off, field_marker, keyoff_marker;
2320        int max_key_off, max_bit_pos, group_member;
2321        struct nix_rx_flowkey_alg *field;
2322        struct nix_rx_flowkey_alg tmp;
2323        u32 key_type, valid_key;
2324
2325        if (!alg)
2326                return -EINVAL;
2327
2328#define FIELDS_PER_ALG  5
2329#define MAX_KEY_OFF     40
2330        /* Clear all fields */
2331        memset(alg, 0, sizeof(uint64_t) * FIELDS_PER_ALG);
2332
2333        /* Each of the 32 possible flow key algorithm definitions should
2334         * fall into above incremental config (except ALG0). Otherwise a
2335         * single NPC MCAM entry is not sufficient for supporting RSS.
2336         *
2337         * If a different definition or combination needed then NPC MCAM
2338         * has to be programmed to filter such pkts and it's action should
2339         * point to this definition to calculate flowtag or hash.
2340         *
2341         * The `for loop` goes over _all_ protocol field and the following
2342         * variables depicts the state machine forward progress logic.
2343         *
2344         * keyoff_marker - Enabled when hash byte length needs to be accounted
2345         * in field->key_offset update.
2346         * field_marker - Enabled when a new field needs to be selected.
2347         * group_member - Enabled when protocol is part of a group.
2348         */
2349
2350        keyoff_marker = 0; max_key_off = 0; group_member = 0;
2351        nr_field = 0; key_off = 0; field_marker = 1;
2352        field = &tmp; max_bit_pos = fls(flow_cfg);
2353        for (idx = 0;
2354             idx < max_bit_pos && nr_field < FIELDS_PER_ALG &&
2355             key_off < MAX_KEY_OFF; idx++) {
2356                key_type = BIT(idx);
2357                valid_key = flow_cfg & key_type;
2358                /* Found a field marker, reset the field values */
2359                if (field_marker)
2360                        memset(&tmp, 0, sizeof(tmp));
2361
2362                field_marker = true;
2363                keyoff_marker = true;
2364                switch (key_type) {
2365                case NIX_FLOW_KEY_TYPE_PORT:
2366                        field->sel_chan = true;
2367                        /* This should be set to 1, when SEL_CHAN is set */
2368                        field->bytesm1 = 1;
2369                        break;
2370                case NIX_FLOW_KEY_TYPE_IPV4:
2371                case NIX_FLOW_KEY_TYPE_INNR_IPV4:
2372                        field->lid = NPC_LID_LC;
2373                        field->ltype_match = NPC_LT_LC_IP;
2374                        if (key_type == NIX_FLOW_KEY_TYPE_INNR_IPV4) {
2375                                field->lid = NPC_LID_LG;
2376                                field->ltype_match = NPC_LT_LG_TU_IP;
2377                        }
2378                        field->hdr_offset = 12; /* SIP offset */
2379                        field->bytesm1 = 7; /* SIP + DIP, 8 bytes */
2380                        field->ltype_mask = 0xF; /* Match only IPv4 */
2381                        keyoff_marker = false;
2382                        break;
2383                case NIX_FLOW_KEY_TYPE_IPV6:
2384                case NIX_FLOW_KEY_TYPE_INNR_IPV6:
2385                        field->lid = NPC_LID_LC;
2386                        field->ltype_match = NPC_LT_LC_IP6;
2387                        if (key_type == NIX_FLOW_KEY_TYPE_INNR_IPV6) {
2388                                field->lid = NPC_LID_LG;
2389                                field->ltype_match = NPC_LT_LG_TU_IP6;
2390                        }
2391                        field->hdr_offset = 8; /* SIP offset */
2392                        field->bytesm1 = 31; /* SIP + DIP, 32 bytes */
2393                        field->ltype_mask = 0xF; /* Match only IPv6 */
2394                        break;
2395                case NIX_FLOW_KEY_TYPE_TCP:
2396                case NIX_FLOW_KEY_TYPE_UDP:
2397                case NIX_FLOW_KEY_TYPE_SCTP:
2398                case NIX_FLOW_KEY_TYPE_INNR_TCP:
2399                case NIX_FLOW_KEY_TYPE_INNR_UDP:
2400                case NIX_FLOW_KEY_TYPE_INNR_SCTP:
2401                        field->lid = NPC_LID_LD;
2402                        if (key_type == NIX_FLOW_KEY_TYPE_INNR_TCP ||
2403                            key_type == NIX_FLOW_KEY_TYPE_INNR_UDP ||
2404                            key_type == NIX_FLOW_KEY_TYPE_INNR_SCTP)
2405                                field->lid = NPC_LID_LH;
2406                        field->bytesm1 = 3; /* Sport + Dport, 4 bytes */
2407
2408                        /* Enum values for NPC_LID_LD and NPC_LID_LG are same,
2409                         * so no need to change the ltype_match, just change
2410                         * the lid for inner protocols
2411                         */
2412                        BUILD_BUG_ON((int)NPC_LT_LD_TCP !=
2413                                     (int)NPC_LT_LH_TU_TCP);
2414                        BUILD_BUG_ON((int)NPC_LT_LD_UDP !=
2415                                     (int)NPC_LT_LH_TU_UDP);
2416                        BUILD_BUG_ON((int)NPC_LT_LD_SCTP !=
2417                                     (int)NPC_LT_LH_TU_SCTP);
2418
2419                        if ((key_type == NIX_FLOW_KEY_TYPE_TCP ||
2420                             key_type == NIX_FLOW_KEY_TYPE_INNR_TCP) &&
2421                            valid_key) {
2422                                field->ltype_match |= NPC_LT_LD_TCP;
2423                                group_member = true;
2424                        } else if ((key_type == NIX_FLOW_KEY_TYPE_UDP ||
2425                                    key_type == NIX_FLOW_KEY_TYPE_INNR_UDP) &&
2426                                   valid_key) {
2427                                field->ltype_match |= NPC_LT_LD_UDP;
2428                                group_member = true;
2429                        } else if ((key_type == NIX_FLOW_KEY_TYPE_SCTP ||
2430                                    key_type == NIX_FLOW_KEY_TYPE_INNR_SCTP) &&
2431                                   valid_key) {
2432                                field->ltype_match |= NPC_LT_LD_SCTP;
2433                                group_member = true;
2434                        }
2435                        field->ltype_mask = ~field->ltype_match;
2436                        if (key_type == NIX_FLOW_KEY_TYPE_SCTP ||
2437                            key_type == NIX_FLOW_KEY_TYPE_INNR_SCTP) {
2438                                /* Handle the case where any of the group item
2439                                 * is enabled in the group but not the final one
2440                                 */
2441                                if (group_member) {
2442                                        valid_key = true;
2443                                        group_member = false;
2444                                }
2445                        } else {
2446                                field_marker = false;
2447                                keyoff_marker = false;
2448                        }
2449                        break;
2450                case NIX_FLOW_KEY_TYPE_NVGRE:
2451                        field->lid = NPC_LID_LD;
2452                        field->hdr_offset = 4; /* VSID offset */
2453                        field->bytesm1 = 2;
2454                        field->ltype_match = NPC_LT_LD_NVGRE;
2455                        field->ltype_mask = 0xF;
2456                        break;
2457                case NIX_FLOW_KEY_TYPE_VXLAN:
2458                case NIX_FLOW_KEY_TYPE_GENEVE:
2459                        field->lid = NPC_LID_LE;
2460                        field->bytesm1 = 2;
2461                        field->hdr_offset = 4;
2462                        field->ltype_mask = 0xF;
2463                        field_marker = false;
2464                        keyoff_marker = false;
2465
2466                        if (key_type == NIX_FLOW_KEY_TYPE_VXLAN && valid_key) {
2467                                field->ltype_match |= NPC_LT_LE_VXLAN;
2468                                group_member = true;
2469                        }
2470
2471                        if (key_type == NIX_FLOW_KEY_TYPE_GENEVE && valid_key) {
2472                                field->ltype_match |= NPC_LT_LE_GENEVE;
2473                                group_member = true;
2474                        }
2475
2476                        if (key_type == NIX_FLOW_KEY_TYPE_GENEVE) {
2477                                if (group_member) {
2478                                        field->ltype_mask = ~field->ltype_match;
2479                                        field_marker = true;
2480                                        keyoff_marker = true;
2481                                        valid_key = true;
2482                                        group_member = false;
2483                                }
2484                        }
2485                        break;
2486                case NIX_FLOW_KEY_TYPE_ETH_DMAC:
2487                case NIX_FLOW_KEY_TYPE_INNR_ETH_DMAC:
2488                        field->lid = NPC_LID_LA;
2489                        field->ltype_match = NPC_LT_LA_ETHER;
2490                        if (key_type == NIX_FLOW_KEY_TYPE_INNR_ETH_DMAC) {
2491                                field->lid = NPC_LID_LF;
2492                                field->ltype_match = NPC_LT_LF_TU_ETHER;
2493                        }
2494                        field->hdr_offset = 0;
2495                        field->bytesm1 = 5; /* DMAC 6 Byte */
2496                        field->ltype_mask = 0xF;
2497                        break;
2498                case NIX_FLOW_KEY_TYPE_IPV6_EXT:
2499                        field->lid = NPC_LID_LC;
2500                        field->hdr_offset = 40; /* IPV6 hdr */
2501                        field->bytesm1 = 0; /* 1 Byte ext hdr*/
2502                        field->ltype_match = NPC_LT_LC_IP6_EXT;
2503                        field->ltype_mask = 0xF;
2504                        break;
2505                case NIX_FLOW_KEY_TYPE_GTPU:
2506                        field->lid = NPC_LID_LE;
2507                        field->hdr_offset = 4;
2508                        field->bytesm1 = 3; /* 4 bytes TID*/
2509                        field->ltype_match = NPC_LT_LE_GTPU;
2510                        field->ltype_mask = 0xF;
2511                        break;
2512                }
2513                field->ena = 1;
2514
2515                /* Found a valid flow key type */
2516                if (valid_key) {
2517                        field->key_offset = key_off;
2518                        memcpy(&alg[nr_field], field, sizeof(*field));
2519                        max_key_off = max(max_key_off, field->bytesm1 + 1);
2520
2521                        /* Found a field marker, get the next field */
2522                        if (field_marker)
2523                                nr_field++;
2524                }
2525
2526                /* Found a keyoff marker, update the new key_off */
2527                if (keyoff_marker) {
2528                        key_off += max_key_off;
2529                        max_key_off = 0;
2530                }
2531        }
2532        /* Processed all the flow key types */
2533        if (idx == max_bit_pos && key_off <= MAX_KEY_OFF)
2534                return 0;
2535        else
2536                return NIX_AF_ERR_RSS_NOSPC_FIELD;
2537}
2538
2539static int reserve_flowkey_alg_idx(struct rvu *rvu, int blkaddr, u32 flow_cfg)
2540{
2541        u64 field[FIELDS_PER_ALG];
2542        struct nix_hw *hw;
2543        int fid, rc;
2544
2545        hw = get_nix_hw(rvu->hw, blkaddr);
2546        if (!hw)
2547                return -EINVAL;
2548
2549        /* No room to add new flow hash algoritham */
2550        if (hw->flowkey.in_use >= NIX_FLOW_KEY_ALG_MAX)
2551                return NIX_AF_ERR_RSS_NOSPC_ALGO;
2552
2553        /* Generate algo fields for the given flow_cfg */
2554        rc = set_flowkey_fields((struct nix_rx_flowkey_alg *)field, flow_cfg);
2555        if (rc)
2556                return rc;
2557
2558        /* Update ALGX_FIELDX register with generated fields */
2559        for (fid = 0; fid < FIELDS_PER_ALG; fid++)
2560                rvu_write64(rvu, blkaddr,
2561                            NIX_AF_RX_FLOW_KEY_ALGX_FIELDX(hw->flowkey.in_use,
2562                                                           fid), field[fid]);
2563
2564        /* Store the flow_cfg for futher lookup */
2565        rc = hw->flowkey.in_use;
2566        hw->flowkey.flowkey[rc] = flow_cfg;
2567        hw->flowkey.in_use++;
2568
2569        return rc;
2570}
2571
2572int rvu_mbox_handler_nix_rss_flowkey_cfg(struct rvu *rvu,
2573                                         struct nix_rss_flowkey_cfg *req,
2574                                         struct nix_rss_flowkey_cfg_rsp *rsp)
2575{
2576        u16 pcifunc = req->hdr.pcifunc;
2577        int alg_idx, nixlf, blkaddr;
2578        struct nix_hw *nix_hw;
2579        int err;
2580
2581        err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr);
2582        if (err)
2583                return err;
2584
2585        nix_hw = get_nix_hw(rvu->hw, blkaddr);
2586        if (!nix_hw)
2587                return -EINVAL;
2588
2589        alg_idx = get_flowkey_alg_idx(nix_hw, req->flowkey_cfg);
2590        /* Failed to get algo index from the exiting list, reserve new  */
2591        if (alg_idx < 0) {
2592                alg_idx = reserve_flowkey_alg_idx(rvu, blkaddr,
2593                                                  req->flowkey_cfg);
2594                if (alg_idx < 0)
2595                        return alg_idx;
2596        }
2597        rsp->alg_idx = alg_idx;
2598        rvu_npc_update_flowkey_alg_idx(rvu, pcifunc, nixlf, req->group,
2599                                       alg_idx, req->mcam_index);
2600        return 0;
2601}
2602
2603static int nix_rx_flowkey_alg_cfg(struct rvu *rvu, int blkaddr)
2604{
2605        u32 flowkey_cfg, minkey_cfg;
2606        int alg, fid, rc;
2607
2608        /* Disable all flow key algx fieldx */
2609        for (alg = 0; alg < NIX_FLOW_KEY_ALG_MAX; alg++) {
2610                for (fid = 0; fid < FIELDS_PER_ALG; fid++)
2611                        rvu_write64(rvu, blkaddr,
2612                                    NIX_AF_RX_FLOW_KEY_ALGX_FIELDX(alg, fid),
2613                                    0);
2614        }
2615
2616        /* IPv4/IPv6 SIP/DIPs */
2617        flowkey_cfg = NIX_FLOW_KEY_TYPE_IPV4 | NIX_FLOW_KEY_TYPE_IPV6;
2618        rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
2619        if (rc < 0)
2620                return rc;
2621
2622        /* TCPv4/v6 4-tuple, SIP, DIP, Sport, Dport */
2623        minkey_cfg = flowkey_cfg;
2624        flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_TCP;
2625        rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
2626        if (rc < 0)
2627                return rc;
2628
2629        /* UDPv4/v6 4-tuple, SIP, DIP, Sport, Dport */
2630        flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_UDP;
2631        rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
2632        if (rc < 0)
2633                return rc;
2634
2635        /* SCTPv4/v6 4-tuple, SIP, DIP, Sport, Dport */
2636        flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_SCTP;
2637        rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
2638        if (rc < 0)
2639                return rc;
2640
2641        /* TCP/UDP v4/v6 4-tuple, rest IP pkts 2-tuple */
2642        flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_TCP |
2643                        NIX_FLOW_KEY_TYPE_UDP;
2644        rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
2645        if (rc < 0)
2646                return rc;
2647
2648        /* TCP/SCTP v4/v6 4-tuple, rest IP pkts 2-tuple */
2649        flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_TCP |
2650                        NIX_FLOW_KEY_TYPE_SCTP;
2651        rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
2652        if (rc < 0)
2653                return rc;
2654
2655        /* UDP/SCTP v4/v6 4-tuple, rest IP pkts 2-tuple */
2656        flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_UDP |
2657                        NIX_FLOW_KEY_TYPE_SCTP;
2658        rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
2659        if (rc < 0)
2660                return rc;
2661
2662        /* TCP/UDP/SCTP v4/v6 4-tuple, rest IP pkts 2-tuple */
2663        flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_TCP |
2664                      NIX_FLOW_KEY_TYPE_UDP | NIX_FLOW_KEY_TYPE_SCTP;
2665        rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
2666        if (rc < 0)
2667                return rc;
2668
2669        return 0;
2670}
2671
2672int rvu_mbox_handler_nix_set_mac_addr(struct rvu *rvu,
2673                                      struct nix_set_mac_addr *req,
2674                                      struct msg_rsp *rsp)
2675{
2676        u16 pcifunc = req->hdr.pcifunc;
2677        int blkaddr, nixlf, err;
2678        struct rvu_pfvf *pfvf;
2679
2680        err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr);
2681        if (err)
2682                return err;
2683
2684        pfvf = rvu_get_pfvf(rvu, pcifunc);
2685
2686        ether_addr_copy(pfvf->mac_addr, req->mac_addr);
2687
2688        rvu_npc_install_ucast_entry(rvu, pcifunc, nixlf,
2689                                    pfvf->rx_chan_base, req->mac_addr);
2690
2691        rvu_npc_update_rxvlan(rvu, pcifunc, nixlf);
2692
2693        return 0;
2694}
2695
2696int rvu_mbox_handler_nix_get_mac_addr(struct rvu *rvu,
2697                                      struct msg_req *req,
2698                                      struct nix_get_mac_addr_rsp *rsp)
2699{
2700        u16 pcifunc = req->hdr.pcifunc;
2701        struct rvu_pfvf *pfvf;
2702
2703        if (!is_nixlf_attached(rvu, pcifunc))
2704                return NIX_AF_ERR_AF_LF_INVALID;
2705
2706        pfvf = rvu_get_pfvf(rvu, pcifunc);
2707
2708        ether_addr_copy(rsp->mac_addr, pfvf->mac_addr);
2709
2710        return 0;
2711}
2712
2713int rvu_mbox_handler_nix_set_rx_mode(struct rvu *rvu, struct nix_rx_mode *req,
2714                                     struct msg_rsp *rsp)
2715{
2716        bool allmulti = false, disable_promisc = false;
2717        u16 pcifunc = req->hdr.pcifunc;
2718        int blkaddr, nixlf, err;
2719        struct rvu_pfvf *pfvf;
2720
2721        err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr);
2722        if (err)
2723                return err;
2724
2725        pfvf = rvu_get_pfvf(rvu, pcifunc);
2726
2727        if (req->mode & NIX_RX_MODE_PROMISC)
2728                allmulti = false;
2729        else if (req->mode & NIX_RX_MODE_ALLMULTI)
2730                allmulti = true;
2731        else
2732                disable_promisc = true;
2733
2734        if (disable_promisc)
2735                rvu_npc_disable_promisc_entry(rvu, pcifunc, nixlf);
2736        else
2737                rvu_npc_install_promisc_entry(rvu, pcifunc, nixlf,
2738                                              pfvf->rx_chan_base, allmulti);
2739
2740        rvu_npc_update_rxvlan(rvu, pcifunc, nixlf);
2741
2742        return 0;
2743}
2744
2745static void nix_find_link_frs(struct rvu *rvu,
2746                              struct nix_frs_cfg *req, u16 pcifunc)
2747{
2748        int pf = rvu_get_pf(pcifunc);
2749        struct rvu_pfvf *pfvf;
2750        int maxlen, minlen;
2751        int numvfs, hwvf;
2752        int vf;
2753
2754        /* Update with requester's min/max lengths */
2755        pfvf = rvu_get_pfvf(rvu, pcifunc);
2756        pfvf->maxlen = req->maxlen;
2757        if (req->update_minlen)
2758                pfvf->minlen = req->minlen;
2759
2760        maxlen = req->maxlen;
2761        minlen = req->update_minlen ? req->minlen : 0;
2762
2763        /* Get this PF's numVFs and starting hwvf */
2764        rvu_get_pf_numvfs(rvu, pf, &numvfs, &hwvf);
2765
2766        /* For each VF, compare requested max/minlen */
2767        for (vf = 0; vf < numvfs; vf++) {
2768                pfvf =  &rvu->hwvf[hwvf + vf];
2769                if (pfvf->maxlen > maxlen)
2770                        maxlen = pfvf->maxlen;
2771                if (req->update_minlen &&
2772                    pfvf->minlen && pfvf->minlen < minlen)
2773                        minlen = pfvf->minlen;
2774        }
2775
2776        /* Compare requested max/minlen with PF's max/minlen */
2777        pfvf = &rvu->pf[pf];
2778        if (pfvf->maxlen > maxlen)
2779                maxlen = pfvf->maxlen;
2780        if (req->update_minlen &&
2781            pfvf->minlen && pfvf->minlen < minlen)
2782                minlen = pfvf->minlen;
2783
2784        /* Update the request with max/min PF's and it's VF's max/min */
2785        req->maxlen = maxlen;
2786        if (req->update_minlen)
2787                req->minlen = minlen;
2788}
2789
2790int rvu_mbox_handler_nix_set_hw_frs(struct rvu *rvu, struct nix_frs_cfg *req,
2791                                    struct msg_rsp *rsp)
2792{
2793        struct rvu_hwinfo *hw = rvu->hw;
2794        u16 pcifunc = req->hdr.pcifunc;
2795        int pf = rvu_get_pf(pcifunc);
2796        int blkaddr, schq, link = -1;
2797        struct nix_txsch *txsch;
2798        u64 cfg, lmac_fifo_len;
2799        struct nix_hw *nix_hw;
2800        u8 cgx = 0, lmac = 0;
2801
2802        blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
2803        if (blkaddr < 0)
2804                return NIX_AF_ERR_AF_LF_INVALID;
2805
2806        nix_hw = get_nix_hw(rvu->hw, blkaddr);
2807        if (!nix_hw)
2808                return -EINVAL;
2809
2810        if (!req->sdp_link && req->maxlen > NIC_HW_MAX_FRS)
2811                return NIX_AF_ERR_FRS_INVALID;
2812
2813        if (req->update_minlen && req->minlen < NIC_HW_MIN_FRS)
2814                return NIX_AF_ERR_FRS_INVALID;
2815
2816        /* Check if requester wants to update SMQ's */
2817        if (!req->update_smq)
2818                goto rx_frscfg;
2819
2820        /* Update min/maxlen in each of the SMQ attached to this PF/VF */
2821        txsch = &nix_hw->txsch[NIX_TXSCH_LVL_SMQ];
2822        mutex_lock(&rvu->rsrc_lock);
2823        for (schq = 0; schq < txsch->schq.max; schq++) {
2824                if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc)
2825                        continue;
2826                cfg = rvu_read64(rvu, blkaddr, NIX_AF_SMQX_CFG(schq));
2827                cfg = (cfg & ~(0xFFFFULL << 8)) | ((u64)req->maxlen << 8);
2828                if (req->update_minlen)
2829                        cfg = (cfg & ~0x7FULL) | ((u64)req->minlen & 0x7F);
2830                rvu_write64(rvu, blkaddr, NIX_AF_SMQX_CFG(schq), cfg);
2831        }
2832        mutex_unlock(&rvu->rsrc_lock);
2833
2834rx_frscfg:
2835        /* Check if config is for SDP link */
2836        if (req->sdp_link) {
2837                if (!hw->sdp_links)
2838                        return NIX_AF_ERR_RX_LINK_INVALID;
2839                link = hw->cgx_links + hw->lbk_links;
2840                goto linkcfg;
2841        }
2842
2843        /* Check if the request is from CGX mapped RVU PF */
2844        if (is_pf_cgxmapped(rvu, pf)) {
2845                /* Get CGX and LMAC to which this PF is mapped and find link */
2846                rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx, &lmac);
2847                link = (cgx * hw->lmac_per_cgx) + lmac;
2848        } else if (pf == 0) {
2849                /* For VFs of PF0 ingress is LBK port, so config LBK link */
2850                link = hw->cgx_links;
2851        }
2852
2853        if (link < 0)
2854                return NIX_AF_ERR_RX_LINK_INVALID;
2855
2856        nix_find_link_frs(rvu, req, pcifunc);
2857
2858linkcfg:
2859        cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link));
2860        cfg = (cfg & ~(0xFFFFULL << 16)) | ((u64)req->maxlen << 16);
2861        if (req->update_minlen)
2862                cfg = (cfg & ~0xFFFFULL) | req->minlen;
2863        rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link), cfg);
2864
2865        if (req->sdp_link || pf == 0)
2866                return 0;
2867
2868        /* Update transmit credits for CGX links */
2869        lmac_fifo_len =
2870                CGX_FIFO_LEN / cgx_get_lmac_cnt(rvu_cgx_pdata(cgx, rvu));
2871        cfg = rvu_read64(rvu, blkaddr, NIX_AF_TX_LINKX_NORM_CREDIT(link));
2872        cfg &= ~(0xFFFFFULL << 12);
2873        cfg |=  ((lmac_fifo_len - req->maxlen) / 16) << 12;
2874        rvu_write64(rvu, blkaddr, NIX_AF_TX_LINKX_NORM_CREDIT(link), cfg);
2875        return 0;
2876}
2877
2878int rvu_mbox_handler_nix_rxvlan_alloc(struct rvu *rvu, struct msg_req *req,
2879                                      struct msg_rsp *rsp)
2880{
2881        struct npc_mcam_alloc_entry_req alloc_req = { };
2882        struct npc_mcam_alloc_entry_rsp alloc_rsp = { };
2883        struct npc_mcam_free_entry_req free_req = { };
2884        u16 pcifunc = req->hdr.pcifunc;
2885        int blkaddr, nixlf, err;
2886        struct rvu_pfvf *pfvf;
2887
2888        /* LBK VFs do not have separate MCAM UCAST entry hence
2889         * skip allocating rxvlan for them
2890         */
2891        if (is_afvf(pcifunc))
2892                return 0;
2893
2894        pfvf = rvu_get_pfvf(rvu, pcifunc);
2895        if (pfvf->rxvlan)
2896                return 0;
2897
2898        /* alloc new mcam entry */
2899        alloc_req.hdr.pcifunc = pcifunc;
2900        alloc_req.count = 1;
2901
2902        err = rvu_mbox_handler_npc_mcam_alloc_entry(rvu, &alloc_req,
2903                                                    &alloc_rsp);
2904        if (err)
2905                return err;
2906
2907        /* update entry to enable rxvlan offload */
2908        blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
2909        if (blkaddr < 0) {
2910                err = NIX_AF_ERR_AF_LF_INVALID;
2911                goto free_entry;
2912        }
2913
2914        nixlf = rvu_get_lf(rvu, &rvu->hw->block[blkaddr], pcifunc, 0);
2915        if (nixlf < 0) {
2916                err = NIX_AF_ERR_AF_LF_INVALID;
2917                goto free_entry;
2918        }
2919
2920        pfvf->rxvlan_index = alloc_rsp.entry_list[0];
2921        /* all it means is that rxvlan_index is valid */
2922        pfvf->rxvlan = true;
2923
2924        err = rvu_npc_update_rxvlan(rvu, pcifunc, nixlf);
2925        if (err)
2926                goto free_entry;
2927
2928        return 0;
2929free_entry:
2930        free_req.hdr.pcifunc = pcifunc;
2931        free_req.entry = alloc_rsp.entry_list[0];
2932        rvu_mbox_handler_npc_mcam_free_entry(rvu, &free_req, rsp);
2933        pfvf->rxvlan = false;
2934        return err;
2935}
2936
2937int rvu_mbox_handler_nix_set_rx_cfg(struct rvu *rvu, struct nix_rx_cfg *req,
2938                                    struct msg_rsp *rsp)
2939{
2940        int nixlf, blkaddr, err;
2941        u64 cfg;
2942
2943        err = nix_get_nixlf(rvu, req->hdr.pcifunc, &nixlf, &blkaddr);
2944        if (err)
2945                return err;
2946
2947        cfg = rvu_read64(rvu, blkaddr, NIX_AF_LFX_RX_CFG(nixlf));
2948        /* Set the interface configuration */
2949        if (req->len_verify & BIT(0))
2950                cfg |= BIT_ULL(41);
2951        else
2952                cfg &= ~BIT_ULL(41);
2953
2954        if (req->len_verify & BIT(1))
2955                cfg |= BIT_ULL(40);
2956        else
2957                cfg &= ~BIT_ULL(40);
2958
2959        if (req->csum_verify & BIT(0))
2960                cfg |= BIT_ULL(37);
2961        else
2962                cfg &= ~BIT_ULL(37);
2963
2964        rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_CFG(nixlf), cfg);
2965
2966        return 0;
2967}
2968
2969static void nix_link_config(struct rvu *rvu, int blkaddr)
2970{
2971        struct rvu_hwinfo *hw = rvu->hw;
2972        int cgx, lmac_cnt, slink, link;
2973        u64 tx_credits;
2974
2975        /* Set default min/max packet lengths allowed on NIX Rx links.
2976         *
2977         * With HW reset minlen value of 60byte, HW will treat ARP pkts
2978         * as undersize and report them to SW as error pkts, hence
2979         * setting it to 40 bytes.
2980         */
2981        for (link = 0; link < (hw->cgx_links + hw->lbk_links); link++) {
2982                rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link),
2983                            NIC_HW_MAX_FRS << 16 | NIC_HW_MIN_FRS);
2984        }
2985
2986        if (hw->sdp_links) {
2987                link = hw->cgx_links + hw->lbk_links;
2988                rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link),
2989                            SDP_HW_MAX_FRS << 16 | NIC_HW_MIN_FRS);
2990        }
2991
2992        /* Set credits for Tx links assuming max packet length allowed.
2993         * This will be reconfigured based on MTU set for PF/VF.
2994         */
2995        for (cgx = 0; cgx < hw->cgx; cgx++) {
2996                lmac_cnt = cgx_get_lmac_cnt(rvu_cgx_pdata(cgx, rvu));
2997                tx_credits = ((CGX_FIFO_LEN / lmac_cnt) - NIC_HW_MAX_FRS) / 16;
2998                /* Enable credits and set credit pkt count to max allowed */
2999                tx_credits =  (tx_credits << 12) | (0x1FF << 2) | BIT_ULL(1);
3000                slink = cgx * hw->lmac_per_cgx;
3001                for (link = slink; link < (slink + lmac_cnt); link++) {
3002                        rvu_write64(rvu, blkaddr,
3003                                    NIX_AF_TX_LINKX_NORM_CREDIT(link),
3004                                    tx_credits);
3005                }
3006        }
3007
3008        /* Set Tx credits for LBK link */
3009        slink = hw->cgx_links;
3010        for (link = slink; link < (slink + hw->lbk_links); link++) {
3011                tx_credits = 1000; /* 10 * max LBK datarate = 10 * 100Gbps */
3012                /* Enable credits and set credit pkt count to max allowed */
3013                tx_credits =  (tx_credits << 12) | (0x1FF << 2) | BIT_ULL(1);
3014                rvu_write64(rvu, blkaddr,
3015                            NIX_AF_TX_LINKX_NORM_CREDIT(link), tx_credits);
3016        }
3017}
3018
3019static int nix_calibrate_x2p(struct rvu *rvu, int blkaddr)
3020{
3021        int idx, err;
3022        u64 status;
3023
3024        /* Start X2P bus calibration */
3025        rvu_write64(rvu, blkaddr, NIX_AF_CFG,
3026                    rvu_read64(rvu, blkaddr, NIX_AF_CFG) | BIT_ULL(9));
3027        /* Wait for calibration to complete */
3028        err = rvu_poll_reg(rvu, blkaddr,
3029                           NIX_AF_STATUS, BIT_ULL(10), false);
3030        if (err) {
3031                dev_err(rvu->dev, "NIX X2P bus calibration failed\n");
3032                return err;
3033        }
3034
3035        status = rvu_read64(rvu, blkaddr, NIX_AF_STATUS);
3036        /* Check if CGX devices are ready */
3037        for (idx = 0; idx < rvu->cgx_cnt_max; idx++) {
3038                /* Skip when cgx port is not available */
3039                if (!rvu_cgx_pdata(idx, rvu) ||
3040                    (status & (BIT_ULL(16 + idx))))
3041                        continue;
3042                dev_err(rvu->dev,
3043                        "CGX%d didn't respond to NIX X2P calibration\n", idx);
3044                err = -EBUSY;
3045        }
3046
3047        /* Check if LBK is ready */
3048        if (!(status & BIT_ULL(19))) {
3049                dev_err(rvu->dev,
3050                        "LBK didn't respond to NIX X2P calibration\n");
3051                err = -EBUSY;
3052        }
3053
3054        /* Clear 'calibrate_x2p' bit */
3055        rvu_write64(rvu, blkaddr, NIX_AF_CFG,
3056                    rvu_read64(rvu, blkaddr, NIX_AF_CFG) & ~BIT_ULL(9));
3057        if (err || (status & 0x3FFULL))
3058                dev_err(rvu->dev,
3059                        "NIX X2P calibration failed, status 0x%llx\n", status);
3060        if (err)
3061                return err;
3062        return 0;
3063}
3064
3065static int nix_aq_init(struct rvu *rvu, struct rvu_block *block)
3066{
3067        u64 cfg;
3068        int err;
3069
3070        /* Set admin queue endianness */
3071        cfg = rvu_read64(rvu, block->addr, NIX_AF_CFG);
3072#ifdef __BIG_ENDIAN
3073        cfg |= BIT_ULL(8);
3074        rvu_write64(rvu, block->addr, NIX_AF_CFG, cfg);
3075#else
3076        cfg &= ~BIT_ULL(8);
3077        rvu_write64(rvu, block->addr, NIX_AF_CFG, cfg);
3078#endif
3079
3080        /* Do not bypass NDC cache */
3081        cfg = rvu_read64(rvu, block->addr, NIX_AF_NDC_CFG);
3082        cfg &= ~0x3FFEULL;
3083#ifdef CONFIG_NDC_DIS_DYNAMIC_CACHING
3084        /* Disable caching of SQB aka SQEs */
3085        cfg |= 0x04ULL;
3086#endif
3087        rvu_write64(rvu, block->addr, NIX_AF_NDC_CFG, cfg);
3088
3089        /* Result structure can be followed by RQ/SQ/CQ context at
3090         * RES + 128bytes and a write mask at RES + 256 bytes, depending on
3091         * operation type. Alloc sufficient result memory for all operations.
3092         */
3093        err = rvu_aq_alloc(rvu, &block->aq,
3094                           Q_COUNT(AQ_SIZE), sizeof(struct nix_aq_inst_s),
3095                           ALIGN(sizeof(struct nix_aq_res_s), 128) + 256);
3096        if (err)
3097                return err;
3098
3099        rvu_write64(rvu, block->addr, NIX_AF_AQ_CFG, AQ_SIZE);
3100        rvu_write64(rvu, block->addr,
3101                    NIX_AF_AQ_BASE, (u64)block->aq->inst->iova);
3102        return 0;
3103}
3104
3105int rvu_nix_init(struct rvu *rvu)
3106{
3107        struct rvu_hwinfo *hw = rvu->hw;
3108        struct rvu_block *block;
3109        int blkaddr, err;
3110        u64 cfg;
3111
3112        blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
3113        if (blkaddr < 0)
3114                return 0;
3115        block = &hw->block[blkaddr];
3116
3117        if (is_rvu_96xx_B0(rvu)) {
3118                /* As per a HW errata in 96xx A0/B0 silicon, NIX may corrupt
3119                 * internal state when conditional clocks are turned off.
3120                 * Hence enable them.
3121                 */
3122                rvu_write64(rvu, blkaddr, NIX_AF_CFG,
3123                            rvu_read64(rvu, blkaddr, NIX_AF_CFG) | 0x40ULL);
3124
3125                /* Set chan/link to backpressure TL3 instead of TL2 */
3126                rvu_write64(rvu, blkaddr, NIX_AF_PSE_CHANNEL_LEVEL, 0x01);
3127
3128                /* Disable SQ manager's sticky mode operation (set TM6 = 0)
3129                 * This sticky mode is known to cause SQ stalls when multiple
3130                 * SQs are mapped to same SMQ and transmitting pkts at a time.
3131                 */
3132                cfg = rvu_read64(rvu, blkaddr, NIX_AF_SQM_DBG_CTL_STATUS);
3133                cfg &= ~BIT_ULL(15);
3134                rvu_write64(rvu, blkaddr, NIX_AF_SQM_DBG_CTL_STATUS, cfg);
3135        }
3136
3137        /* Calibrate X2P bus to check if CGX/LBK links are fine */
3138        err = nix_calibrate_x2p(rvu, blkaddr);
3139        if (err)
3140                return err;
3141
3142        /* Set num of links of each type */
3143        cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST);
3144        hw->cgx = (cfg >> 12) & 0xF;
3145        hw->lmac_per_cgx = (cfg >> 8) & 0xF;
3146        hw->cgx_links = hw->cgx * hw->lmac_per_cgx;
3147        hw->lbk_links = 1;
3148        hw->sdp_links = 1;
3149
3150        /* Initialize admin queue */
3151        err = nix_aq_init(rvu, block);
3152        if (err)
3153                return err;
3154
3155        /* Restore CINT timer delay to HW reset values */
3156        rvu_write64(rvu, blkaddr, NIX_AF_CINT_DELAY, 0x0ULL);
3157
3158        if (blkaddr == BLKADDR_NIX0) {
3159                hw->nix0 = devm_kzalloc(rvu->dev,
3160                                        sizeof(struct nix_hw), GFP_KERNEL);
3161                if (!hw->nix0)
3162                        return -ENOMEM;
3163
3164                err = nix_setup_txschq(rvu, hw->nix0, blkaddr);
3165                if (err)
3166                        return err;
3167
3168                err = nix_af_mark_format_setup(rvu, hw->nix0, blkaddr);
3169                if (err)
3170                        return err;
3171
3172                err = nix_setup_mcast(rvu, hw->nix0, blkaddr);
3173                if (err)
3174                        return err;
3175
3176                /* Configure segmentation offload formats */
3177                nix_setup_lso(rvu, hw->nix0, blkaddr);
3178
3179                /* Config Outer/Inner L2, IP, TCP, UDP and SCTP NPC layer info.
3180                 * This helps HW protocol checker to identify headers
3181                 * and validate length and checksums.
3182                 */
3183                rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OL2,
3184                            (NPC_LID_LA << 8) | (NPC_LT_LA_ETHER << 4) | 0x0F);
3185                rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OIP4,
3186                            (NPC_LID_LC << 8) | (NPC_LT_LC_IP << 4) | 0x0F);
3187                rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IIP4,
3188                            (NPC_LID_LG << 8) | (NPC_LT_LG_TU_IP << 4) | 0x0F);
3189                rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OIP6,
3190                            (NPC_LID_LC << 8) | (NPC_LT_LC_IP6 << 4) | 0x0F);
3191                rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IIP6,
3192                            (NPC_LID_LG << 8) | (NPC_LT_LG_TU_IP6 << 4) | 0x0F);
3193                rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OTCP,
3194                            (NPC_LID_LD << 8) | (NPC_LT_LD_TCP << 4) | 0x0F);
3195                rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_ITCP,
3196                            (NPC_LID_LH << 8) | (NPC_LT_LH_TU_TCP << 4) | 0x0F);
3197                rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OUDP,
3198                            (NPC_LID_LD << 8) | (NPC_LT_LD_UDP << 4) | 0x0F);
3199                rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IUDP,
3200                            (NPC_LID_LH << 8) | (NPC_LT_LH_TU_UDP << 4) | 0x0F);
3201                rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OSCTP,
3202                            (NPC_LID_LD << 8) | (NPC_LT_LD_SCTP << 4) | 0x0F);
3203                rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_ISCTP,
3204                            (NPC_LID_LH << 8) | (NPC_LT_LH_TU_SCTP << 4) |
3205                            0x0F);
3206
3207                err = nix_rx_flowkey_alg_cfg(rvu, blkaddr);
3208                if (err)
3209                        return err;
3210
3211                /* Initialize CGX/LBK/SDP link credits, min/max pkt lengths */
3212                nix_link_config(rvu, blkaddr);
3213
3214                /* Enable Channel backpressure */
3215                rvu_write64(rvu, blkaddr, NIX_AF_RX_CFG, BIT_ULL(0));
3216        }
3217        return 0;
3218}
3219
3220void rvu_nix_freemem(struct rvu *rvu)
3221{
3222        struct rvu_hwinfo *hw = rvu->hw;
3223        struct rvu_block *block;
3224        struct nix_txsch *txsch;
3225        struct nix_mcast *mcast;
3226        struct nix_hw *nix_hw;
3227        int blkaddr, lvl;
3228
3229        blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
3230        if (blkaddr < 0)
3231                return;
3232
3233        block = &hw->block[blkaddr];
3234        rvu_aq_free(rvu, block->aq);
3235
3236        if (blkaddr == BLKADDR_NIX0) {
3237                nix_hw = get_nix_hw(rvu->hw, blkaddr);
3238                if (!nix_hw)
3239                        return;
3240
3241                for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
3242                        txsch = &nix_hw->txsch[lvl];
3243                        kfree(txsch->schq.bmap);
3244                }
3245
3246                mcast = &nix_hw->mcast;
3247                qmem_free(rvu->dev, mcast->mce_ctx);
3248                qmem_free(rvu->dev, mcast->mcast_buf);
3249                mutex_destroy(&mcast->mce_lock);
3250        }
3251}
3252
3253int rvu_mbox_handler_nix_lf_start_rx(struct rvu *rvu, struct msg_req *req,
3254                                     struct msg_rsp *rsp)
3255{
3256        u16 pcifunc = req->hdr.pcifunc;
3257        int nixlf, err;
3258
3259        err = nix_get_nixlf(rvu, pcifunc, &nixlf, NULL);
3260        if (err)
3261                return err;
3262
3263        rvu_npc_enable_default_entries(rvu, pcifunc, nixlf);
3264
3265        return rvu_cgx_start_stop_io(rvu, pcifunc, true);
3266}
3267
3268int rvu_mbox_handler_nix_lf_stop_rx(struct rvu *rvu, struct msg_req *req,
3269                                    struct msg_rsp *rsp)
3270{
3271        u16 pcifunc = req->hdr.pcifunc;
3272        int nixlf, err;
3273
3274        err = nix_get_nixlf(rvu, pcifunc, &nixlf, NULL);
3275        if (err)
3276                return err;
3277
3278        rvu_npc_disable_default_entries(rvu, pcifunc, nixlf);
3279
3280        return rvu_cgx_start_stop_io(rvu, pcifunc, false);
3281}
3282
3283void rvu_nix_lf_teardown(struct rvu *rvu, u16 pcifunc, int blkaddr, int nixlf)
3284{
3285        struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
3286        struct hwctx_disable_req ctx_req;
3287        int err;
3288
3289        ctx_req.hdr.pcifunc = pcifunc;
3290
3291        /* Cleanup NPC MCAM entries, free Tx scheduler queues being used */
3292        nix_interface_deinit(rvu, pcifunc, nixlf);
3293        nix_rx_sync(rvu, blkaddr);
3294        nix_txschq_free(rvu, pcifunc);
3295
3296        rvu_cgx_start_stop_io(rvu, pcifunc, false);
3297
3298        if (pfvf->sq_ctx) {
3299                ctx_req.ctype = NIX_AQ_CTYPE_SQ;
3300                err = nix_lf_hwctx_disable(rvu, &ctx_req);
3301                if (err)
3302                        dev_err(rvu->dev, "SQ ctx disable failed\n");
3303        }
3304
3305        if (pfvf->rq_ctx) {
3306                ctx_req.ctype = NIX_AQ_CTYPE_RQ;
3307                err = nix_lf_hwctx_disable(rvu, &ctx_req);
3308                if (err)
3309                        dev_err(rvu->dev, "RQ ctx disable failed\n");
3310        }
3311
3312        if (pfvf->cq_ctx) {
3313                ctx_req.ctype = NIX_AQ_CTYPE_CQ;
3314                err = nix_lf_hwctx_disable(rvu, &ctx_req);
3315                if (err)
3316                        dev_err(rvu->dev, "CQ ctx disable failed\n");
3317        }
3318
3319        nix_ctx_free(rvu, pfvf);
3320}
3321
3322int rvu_mbox_handler_nix_lso_format_cfg(struct rvu *rvu,
3323                                        struct nix_lso_format_cfg *req,
3324                                        struct nix_lso_format_cfg_rsp *rsp)
3325{
3326        u16 pcifunc = req->hdr.pcifunc;
3327        struct nix_hw *nix_hw;
3328        struct rvu_pfvf *pfvf;
3329        int blkaddr, idx, f;
3330        u64 reg;
3331
3332        pfvf = rvu_get_pfvf(rvu, pcifunc);
3333        blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
3334        if (!pfvf->nixlf || blkaddr < 0)
3335                return NIX_AF_ERR_AF_LF_INVALID;
3336
3337        nix_hw = get_nix_hw(rvu->hw, blkaddr);
3338        if (!nix_hw)
3339                return -EINVAL;
3340
3341        /* Find existing matching LSO format, if any */
3342        for (idx = 0; idx < nix_hw->lso.in_use; idx++) {
3343                for (f = 0; f < NIX_LSO_FIELD_MAX; f++) {
3344                        reg = rvu_read64(rvu, blkaddr,
3345                                         NIX_AF_LSO_FORMATX_FIELDX(idx, f));
3346                        if (req->fields[f] != (reg & req->field_mask))
3347                                break;
3348                }
3349
3350                if (f == NIX_LSO_FIELD_MAX)
3351                        break;
3352        }
3353
3354        if (idx < nix_hw->lso.in_use) {
3355                /* Match found */
3356                rsp->lso_format_idx = idx;
3357                return 0;
3358        }
3359
3360        if (nix_hw->lso.in_use == nix_hw->lso.total)
3361                return NIX_AF_ERR_LSO_CFG_FAIL;
3362
3363        rsp->lso_format_idx = nix_hw->lso.in_use++;
3364
3365        for (f = 0; f < NIX_LSO_FIELD_MAX; f++)
3366                rvu_write64(rvu, blkaddr,
3367                            NIX_AF_LSO_FORMATX_FIELDX(rsp->lso_format_idx, f),
3368                            req->fields[f]);
3369
3370        return 0;
3371}
3372