linux/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/* Marvell RVU Admin Function driver
   3 *
   4 * Copyright (C) 2018 Marvell.
   5 *
   6 */
   7
   8#include <linux/module.h>
   9#include <linux/pci.h>
  10
  11#include "rvu_struct.h"
  12#include "rvu_reg.h"
  13#include "rvu.h"
  14#include "npc.h"
  15#include "cgx.h"
  16#include "lmac_common.h"
  17
  18static void nix_free_tx_vtag_entries(struct rvu *rvu, u16 pcifunc);
  19static int rvu_nix_get_bpid(struct rvu *rvu, struct nix_bp_cfg_req *req,
  20                            int type, int chan_id);
  21static int nix_update_mce_rule(struct rvu *rvu, u16 pcifunc,
  22                               int type, bool add);
  23static int nix_setup_ipolicers(struct rvu *rvu,
  24                               struct nix_hw *nix_hw, int blkaddr);
  25static void nix_ipolicer_freemem(struct rvu *rvu, struct nix_hw *nix_hw);
  26static int nix_verify_bandprof(struct nix_cn10k_aq_enq_req *req,
  27                               struct nix_hw *nix_hw, u16 pcifunc);
  28static int nix_free_all_bandprof(struct rvu *rvu, u16 pcifunc);
  29static void nix_clear_ratelimit_aggr(struct rvu *rvu, struct nix_hw *nix_hw,
  30                                     u32 leaf_prof);
  31
  32enum mc_tbl_sz {
  33        MC_TBL_SZ_256,
  34        MC_TBL_SZ_512,
  35        MC_TBL_SZ_1K,
  36        MC_TBL_SZ_2K,
  37        MC_TBL_SZ_4K,
  38        MC_TBL_SZ_8K,
  39        MC_TBL_SZ_16K,
  40        MC_TBL_SZ_32K,
  41        MC_TBL_SZ_64K,
  42};
  43
  44enum mc_buf_cnt {
  45        MC_BUF_CNT_8,
  46        MC_BUF_CNT_16,
  47        MC_BUF_CNT_32,
  48        MC_BUF_CNT_64,
  49        MC_BUF_CNT_128,
  50        MC_BUF_CNT_256,
  51        MC_BUF_CNT_512,
  52        MC_BUF_CNT_1024,
  53        MC_BUF_CNT_2048,
  54};
  55
  56enum nix_makr_fmt_indexes {
  57        NIX_MARK_CFG_IP_DSCP_RED,
  58        NIX_MARK_CFG_IP_DSCP_YELLOW,
  59        NIX_MARK_CFG_IP_DSCP_YELLOW_RED,
  60        NIX_MARK_CFG_IP_ECN_RED,
  61        NIX_MARK_CFG_IP_ECN_YELLOW,
  62        NIX_MARK_CFG_IP_ECN_YELLOW_RED,
  63        NIX_MARK_CFG_VLAN_DEI_RED,
  64        NIX_MARK_CFG_VLAN_DEI_YELLOW,
  65        NIX_MARK_CFG_VLAN_DEI_YELLOW_RED,
  66        NIX_MARK_CFG_MAX,
  67};
  68
  69/* For now considering MC resources needed for broadcast
  70 * pkt replication only. i.e 256 HWVFs + 12 PFs.
  71 */
  72#define MC_TBL_SIZE     MC_TBL_SZ_512
  73#define MC_BUF_CNT      MC_BUF_CNT_128
  74
  75struct mce {
  76        struct hlist_node       node;
  77        u16                     pcifunc;
  78};
  79
  80int rvu_get_next_nix_blkaddr(struct rvu *rvu, int blkaddr)
  81{
  82        int i = 0;
  83
  84        /*If blkaddr is 0, return the first nix block address*/
  85        if (blkaddr == 0)
  86                return rvu->nix_blkaddr[blkaddr];
  87
  88        while (i + 1 < MAX_NIX_BLKS) {
  89                if (rvu->nix_blkaddr[i] == blkaddr)
  90                        return rvu->nix_blkaddr[i + 1];
  91                i++;
  92        }
  93
  94        return 0;
  95}
  96
  97bool is_nixlf_attached(struct rvu *rvu, u16 pcifunc)
  98{
  99        struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
 100        int blkaddr;
 101
 102        blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
 103        if (!pfvf->nixlf || blkaddr < 0)
 104                return false;
 105        return true;
 106}
 107
 108int rvu_get_nixlf_count(struct rvu *rvu)
 109{
 110        int blkaddr = 0, max = 0;
 111        struct rvu_block *block;
 112
 113        blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr);
 114        while (blkaddr) {
 115                block = &rvu->hw->block[blkaddr];
 116                max += block->lf.max;
 117                blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr);
 118        }
 119        return max;
 120}
 121
 122int nix_get_nixlf(struct rvu *rvu, u16 pcifunc, int *nixlf, int *nix_blkaddr)
 123{
 124        struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
 125        struct rvu_hwinfo *hw = rvu->hw;
 126        int blkaddr;
 127
 128        blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
 129        if (!pfvf->nixlf || blkaddr < 0)
 130                return NIX_AF_ERR_AF_LF_INVALID;
 131
 132        *nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
 133        if (*nixlf < 0)
 134                return NIX_AF_ERR_AF_LF_INVALID;
 135
 136        if (nix_blkaddr)
 137                *nix_blkaddr = blkaddr;
 138
 139        return 0;
 140}
 141
 142int nix_get_struct_ptrs(struct rvu *rvu, u16 pcifunc,
 143                        struct nix_hw **nix_hw, int *blkaddr)
 144{
 145        struct rvu_pfvf *pfvf;
 146
 147        pfvf = rvu_get_pfvf(rvu, pcifunc);
 148        *blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
 149        if (!pfvf->nixlf || *blkaddr < 0)
 150                return NIX_AF_ERR_AF_LF_INVALID;
 151
 152        *nix_hw = get_nix_hw(rvu->hw, *blkaddr);
 153        if (!*nix_hw)
 154                return NIX_AF_ERR_INVALID_NIXBLK;
 155        return 0;
 156}
 157
 158static void nix_mce_list_init(struct nix_mce_list *list, int max)
 159{
 160        INIT_HLIST_HEAD(&list->head);
 161        list->count = 0;
 162        list->max = max;
 163}
 164
 165static u16 nix_alloc_mce_list(struct nix_mcast *mcast, int count)
 166{
 167        int idx;
 168
 169        if (!mcast)
 170                return 0;
 171
 172        idx = mcast->next_free_mce;
 173        mcast->next_free_mce += count;
 174        return idx;
 175}
 176
 177struct nix_hw *get_nix_hw(struct rvu_hwinfo *hw, int blkaddr)
 178{
 179        int nix_blkaddr = 0, i = 0;
 180        struct rvu *rvu = hw->rvu;
 181
 182        nix_blkaddr = rvu_get_next_nix_blkaddr(rvu, nix_blkaddr);
 183        while (nix_blkaddr) {
 184                if (blkaddr == nix_blkaddr && hw->nix)
 185                        return &hw->nix[i];
 186                nix_blkaddr = rvu_get_next_nix_blkaddr(rvu, nix_blkaddr);
 187                i++;
 188        }
 189        return NULL;
 190}
 191
 192u32 convert_dwrr_mtu_to_bytes(u8 dwrr_mtu)
 193{
 194        dwrr_mtu &= 0x1FULL;
 195
 196        /* MTU used for DWRR calculation is in power of 2 up until 64K bytes.
 197         * Value of 4 is reserved for MTU value of 9728 bytes.
 198         * Value of 5 is reserved for MTU value of 10240 bytes.
 199         */
 200        switch (dwrr_mtu) {
 201        case 4:
 202                return 9728;
 203        case 5:
 204                return 10240;
 205        default:
 206                return BIT_ULL(dwrr_mtu);
 207        }
 208
 209        return 0;
 210}
 211
 212u32 convert_bytes_to_dwrr_mtu(u32 bytes)
 213{
 214        /* MTU used for DWRR calculation is in power of 2 up until 64K bytes.
 215         * Value of 4 is reserved for MTU value of 9728 bytes.
 216         * Value of 5 is reserved for MTU value of 10240 bytes.
 217         */
 218        if (bytes > BIT_ULL(16))
 219                return 0;
 220
 221        switch (bytes) {
 222        case 9728:
 223                return 4;
 224        case 10240:
 225                return 5;
 226        default:
 227                return ilog2(bytes);
 228        }
 229
 230        return 0;
 231}
 232
 233static void nix_rx_sync(struct rvu *rvu, int blkaddr)
 234{
 235        int err;
 236
 237        /* Sync all in flight RX packets to LLC/DRAM */
 238        rvu_write64(rvu, blkaddr, NIX_AF_RX_SW_SYNC, BIT_ULL(0));
 239        err = rvu_poll_reg(rvu, blkaddr, NIX_AF_RX_SW_SYNC, BIT_ULL(0), true);
 240        if (err)
 241                dev_err(rvu->dev, "SYNC1: NIX RX software sync failed\n");
 242
 243        /* SW_SYNC ensures all existing transactions are finished and pkts
 244         * are written to LLC/DRAM, queues should be teared down after
 245         * successful SW_SYNC. Due to a HW errata, in some rare scenarios
 246         * an existing transaction might end after SW_SYNC operation. To
 247         * ensure operation is fully done, do the SW_SYNC twice.
 248         */
 249        rvu_write64(rvu, blkaddr, NIX_AF_RX_SW_SYNC, BIT_ULL(0));
 250        err = rvu_poll_reg(rvu, blkaddr, NIX_AF_RX_SW_SYNC, BIT_ULL(0), true);
 251        if (err)
 252                dev_err(rvu->dev, "SYNC2: NIX RX software sync failed\n");
 253}
 254
 255static bool is_valid_txschq(struct rvu *rvu, int blkaddr,
 256                            int lvl, u16 pcifunc, u16 schq)
 257{
 258        struct rvu_hwinfo *hw = rvu->hw;
 259        struct nix_txsch *txsch;
 260        struct nix_hw *nix_hw;
 261        u16 map_func;
 262
 263        nix_hw = get_nix_hw(rvu->hw, blkaddr);
 264        if (!nix_hw)
 265                return false;
 266
 267        txsch = &nix_hw->txsch[lvl];
 268        /* Check out of bounds */
 269        if (schq >= txsch->schq.max)
 270                return false;
 271
 272        mutex_lock(&rvu->rsrc_lock);
 273        map_func = TXSCH_MAP_FUNC(txsch->pfvf_map[schq]);
 274        mutex_unlock(&rvu->rsrc_lock);
 275
 276        /* TLs aggegating traffic are shared across PF and VFs */
 277        if (lvl >= hw->cap.nix_tx_aggr_lvl) {
 278                if (rvu_get_pf(map_func) != rvu_get_pf(pcifunc))
 279                        return false;
 280                else
 281                        return true;
 282        }
 283
 284        if (map_func != pcifunc)
 285                return false;
 286
 287        return true;
 288}
 289
 290static int nix_interface_init(struct rvu *rvu, u16 pcifunc, int type, int nixlf,
 291                              struct nix_lf_alloc_rsp *rsp, bool loop)
 292{
 293        struct rvu_pfvf *parent_pf, *pfvf = rvu_get_pfvf(rvu, pcifunc);
 294        u16 req_chan_base, req_chan_end, req_chan_cnt;
 295        struct rvu_hwinfo *hw = rvu->hw;
 296        struct sdp_node_info *sdp_info;
 297        int pkind, pf, vf, lbkid, vfid;
 298        struct mac_ops *mac_ops;
 299        u8 cgx_id, lmac_id;
 300        bool from_vf;
 301        int err;
 302
 303        pf = rvu_get_pf(pcifunc);
 304        if (!is_pf_cgxmapped(rvu, pf) && type != NIX_INTF_TYPE_LBK &&
 305            type != NIX_INTF_TYPE_SDP)
 306                return 0;
 307
 308        switch (type) {
 309        case NIX_INTF_TYPE_CGX:
 310                pfvf->cgx_lmac = rvu->pf2cgxlmac_map[pf];
 311                rvu_get_cgx_lmac_id(pfvf->cgx_lmac, &cgx_id, &lmac_id);
 312
 313                pkind = rvu_npc_get_pkind(rvu, pf);
 314                if (pkind < 0) {
 315                        dev_err(rvu->dev,
 316                                "PF_Func 0x%x: Invalid pkind\n", pcifunc);
 317                        return -EINVAL;
 318                }
 319                pfvf->rx_chan_base = rvu_nix_chan_cgx(rvu, cgx_id, lmac_id, 0);
 320                pfvf->tx_chan_base = pfvf->rx_chan_base;
 321                pfvf->rx_chan_cnt = 1;
 322                pfvf->tx_chan_cnt = 1;
 323                rsp->tx_link = cgx_id * hw->lmac_per_cgx + lmac_id;
 324
 325                cgx_set_pkind(rvu_cgx_pdata(cgx_id, rvu), lmac_id, pkind);
 326                rvu_npc_set_pkind(rvu, pkind, pfvf);
 327
 328                mac_ops = get_mac_ops(rvu_cgx_pdata(cgx_id, rvu));
 329
 330                /* By default we enable pause frames */
 331                if ((pcifunc & RVU_PFVF_FUNC_MASK) == 0)
 332                        mac_ops->mac_enadis_pause_frm(rvu_cgx_pdata(cgx_id,
 333                                                                    rvu),
 334                                                      lmac_id, true, true);
 335                break;
 336        case NIX_INTF_TYPE_LBK:
 337                vf = (pcifunc & RVU_PFVF_FUNC_MASK) - 1;
 338
 339                /* If NIX1 block is present on the silicon then NIXes are
 340                 * assigned alternatively for lbk interfaces. NIX0 should
 341                 * send packets on lbk link 1 channels and NIX1 should send
 342                 * on lbk link 0 channels for the communication between
 343                 * NIX0 and NIX1.
 344                 */
 345                lbkid = 0;
 346                if (rvu->hw->lbk_links > 1)
 347                        lbkid = vf & 0x1 ? 0 : 1;
 348
 349                /* By default NIX0 is configured to send packet on lbk link 1
 350                 * (which corresponds to LBK1), same packet will receive on
 351                 * NIX1 over lbk link 0. If NIX1 sends packet on lbk link 0
 352                 * (which corresponds to LBK2) packet will receive on NIX0 lbk
 353                 * link 1.
 354                 * But if lbk links for NIX0 and NIX1 are negated, i.e NIX0
 355                 * transmits and receives on lbk link 0, whick corresponds
 356                 * to LBK1 block, back to back connectivity between NIX and
 357                 * LBK can be achieved (which is similar to 96xx)
 358                 *
 359                 *                      RX              TX
 360                 * NIX0 lbk link        1 (LBK2)        1 (LBK1)
 361                 * NIX0 lbk link        0 (LBK0)        0 (LBK0)
 362                 * NIX1 lbk link        0 (LBK1)        0 (LBK2)
 363                 * NIX1 lbk link        1 (LBK3)        1 (LBK3)
 364                 */
 365                if (loop)
 366                        lbkid = !lbkid;
 367
 368                /* Note that AF's VFs work in pairs and talk over consecutive
 369                 * loopback channels.Therefore if odd number of AF VFs are
 370                 * enabled then the last VF remains with no pair.
 371                 */
 372                pfvf->rx_chan_base = rvu_nix_chan_lbk(rvu, lbkid, vf);
 373                pfvf->tx_chan_base = vf & 0x1 ?
 374                                        rvu_nix_chan_lbk(rvu, lbkid, vf - 1) :
 375                                        rvu_nix_chan_lbk(rvu, lbkid, vf + 1);
 376                pfvf->rx_chan_cnt = 1;
 377                pfvf->tx_chan_cnt = 1;
 378                rsp->tx_link = hw->cgx_links + lbkid;
 379                pfvf->lbkid = lbkid;
 380                rvu_npc_set_pkind(rvu, NPC_RX_LBK_PKIND, pfvf);
 381                rvu_npc_install_promisc_entry(rvu, pcifunc, nixlf,
 382                                              pfvf->rx_chan_base,
 383                                              pfvf->rx_chan_cnt);
 384
 385                break;
 386        case NIX_INTF_TYPE_SDP:
 387                from_vf = !!(pcifunc & RVU_PFVF_FUNC_MASK);
 388                parent_pf = &rvu->pf[rvu_get_pf(pcifunc)];
 389                sdp_info = parent_pf->sdp_info;
 390                if (!sdp_info) {
 391                        dev_err(rvu->dev, "Invalid sdp_info pointer\n");
 392                        return -EINVAL;
 393                }
 394                if (from_vf) {
 395                        req_chan_base = rvu_nix_chan_sdp(rvu, 0) + sdp_info->pf_srn +
 396                                sdp_info->num_pf_rings;
 397                        vf = (pcifunc & RVU_PFVF_FUNC_MASK) - 1;
 398                        for (vfid = 0; vfid < vf; vfid++)
 399                                req_chan_base += sdp_info->vf_rings[vfid];
 400                        req_chan_cnt = sdp_info->vf_rings[vf];
 401                        req_chan_end = req_chan_base + req_chan_cnt - 1;
 402                        if (req_chan_base < rvu_nix_chan_sdp(rvu, 0) ||
 403                            req_chan_end > rvu_nix_chan_sdp(rvu, 255)) {
 404                                dev_err(rvu->dev,
 405                                        "PF_Func 0x%x: Invalid channel base and count\n",
 406                                        pcifunc);
 407                                return -EINVAL;
 408                        }
 409                } else {
 410                        req_chan_base = rvu_nix_chan_sdp(rvu, 0) + sdp_info->pf_srn;
 411                        req_chan_cnt = sdp_info->num_pf_rings;
 412                }
 413
 414                pfvf->rx_chan_base = req_chan_base;
 415                pfvf->rx_chan_cnt = req_chan_cnt;
 416                pfvf->tx_chan_base = pfvf->rx_chan_base;
 417                pfvf->tx_chan_cnt = pfvf->rx_chan_cnt;
 418
 419                rsp->tx_link = hw->cgx_links + hw->lbk_links;
 420                rvu_npc_install_promisc_entry(rvu, pcifunc, nixlf,
 421                                              pfvf->rx_chan_base,
 422                                              pfvf->rx_chan_cnt);
 423                break;
 424        }
 425
 426        /* Add a UCAST forwarding rule in MCAM with this NIXLF attached
 427         * RVU PF/VF's MAC address.
 428         */
 429        rvu_npc_install_ucast_entry(rvu, pcifunc, nixlf,
 430                                    pfvf->rx_chan_base, pfvf->mac_addr);
 431
 432        /* Add this PF_FUNC to bcast pkt replication list */
 433        err = nix_update_mce_rule(rvu, pcifunc, NIXLF_BCAST_ENTRY, true);
 434        if (err) {
 435                dev_err(rvu->dev,
 436                        "Bcast list, failed to enable PF_FUNC 0x%x\n",
 437                        pcifunc);
 438                return err;
 439        }
 440        /* Install MCAM rule matching Ethernet broadcast mac address */
 441        rvu_npc_install_bcast_match_entry(rvu, pcifunc,
 442                                          nixlf, pfvf->rx_chan_base);
 443
 444        pfvf->maxlen = NIC_HW_MIN_FRS;
 445        pfvf->minlen = NIC_HW_MIN_FRS;
 446
 447        return 0;
 448}
 449
 450static void nix_interface_deinit(struct rvu *rvu, u16 pcifunc, u8 nixlf)
 451{
 452        struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
 453        int err;
 454
 455        pfvf->maxlen = 0;
 456        pfvf->minlen = 0;
 457
 458        /* Remove this PF_FUNC from bcast pkt replication list */
 459        err = nix_update_mce_rule(rvu, pcifunc, NIXLF_BCAST_ENTRY, false);
 460        if (err) {
 461                dev_err(rvu->dev,
 462                        "Bcast list, failed to disable PF_FUNC 0x%x\n",
 463                        pcifunc);
 464        }
 465
 466        /* Free and disable any MCAM entries used by this NIX LF */
 467        rvu_npc_disable_mcam_entries(rvu, pcifunc, nixlf);
 468
 469        /* Disable DMAC filters used */
 470        rvu_cgx_disable_dmac_entries(rvu, pcifunc);
 471}
 472
 473int rvu_mbox_handler_nix_bp_disable(struct rvu *rvu,
 474                                    struct nix_bp_cfg_req *req,
 475                                    struct msg_rsp *rsp)
 476{
 477        u16 pcifunc = req->hdr.pcifunc;
 478        struct rvu_pfvf *pfvf;
 479        int blkaddr, pf, type;
 480        u16 chan_base, chan;
 481        u64 cfg;
 482
 483        pf = rvu_get_pf(pcifunc);
 484        type = is_afvf(pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX;
 485        if (!is_pf_cgxmapped(rvu, pf) && type != NIX_INTF_TYPE_LBK)
 486                return 0;
 487
 488        pfvf = rvu_get_pfvf(rvu, pcifunc);
 489        blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
 490
 491        chan_base = pfvf->rx_chan_base + req->chan_base;
 492        for (chan = chan_base; chan < (chan_base + req->chan_cnt); chan++) {
 493                cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan));
 494                rvu_write64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan),
 495                            cfg & ~BIT_ULL(16));
 496        }
 497        return 0;
 498}
 499
 500static int rvu_nix_get_bpid(struct rvu *rvu, struct nix_bp_cfg_req *req,
 501                            int type, int chan_id)
 502{
 503        int bpid, blkaddr, lmac_chan_cnt, sdp_chan_cnt;
 504        u16 cgx_bpid_cnt, lbk_bpid_cnt, sdp_bpid_cnt;
 505        struct rvu_hwinfo *hw = rvu->hw;
 506        struct rvu_pfvf *pfvf;
 507        u8 cgx_id, lmac_id;
 508        u64 cfg;
 509
 510        blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, req->hdr.pcifunc);
 511        cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST);
 512        lmac_chan_cnt = cfg & 0xFF;
 513
 514        cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST1);
 515        sdp_chan_cnt = cfg & 0xFFF;
 516
 517        cgx_bpid_cnt = hw->cgx_links * lmac_chan_cnt;
 518        lbk_bpid_cnt = hw->lbk_links * ((cfg >> 16) & 0xFF);
 519        sdp_bpid_cnt = hw->sdp_links * sdp_chan_cnt;
 520
 521        pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc);
 522
 523        /* Backpressure IDs range division
 524         * CGX channles are mapped to (0 - 191) BPIDs
 525         * LBK channles are mapped to (192 - 255) BPIDs
 526         * SDP channles are mapped to (256 - 511) BPIDs
 527         *
 528         * Lmac channles and bpids mapped as follows
 529         * cgx(0)_lmac(0)_chan(0 - 15) = bpid(0 - 15)
 530         * cgx(0)_lmac(1)_chan(0 - 15) = bpid(16 - 31) ....
 531         * cgx(1)_lmac(0)_chan(0 - 15) = bpid(64 - 79) ....
 532         */
 533        switch (type) {
 534        case NIX_INTF_TYPE_CGX:
 535                if ((req->chan_base + req->chan_cnt) > 15)
 536                        return -EINVAL;
 537                rvu_get_cgx_lmac_id(pfvf->cgx_lmac, &cgx_id, &lmac_id);
 538                /* Assign bpid based on cgx, lmac and chan id */
 539                bpid = (cgx_id * hw->lmac_per_cgx * lmac_chan_cnt) +
 540                        (lmac_id * lmac_chan_cnt) + req->chan_base;
 541
 542                if (req->bpid_per_chan)
 543                        bpid += chan_id;
 544                if (bpid > cgx_bpid_cnt)
 545                        return -EINVAL;
 546                break;
 547
 548        case NIX_INTF_TYPE_LBK:
 549                if ((req->chan_base + req->chan_cnt) > 63)
 550                        return -EINVAL;
 551                bpid = cgx_bpid_cnt + req->chan_base;
 552                if (req->bpid_per_chan)
 553                        bpid += chan_id;
 554                if (bpid > (cgx_bpid_cnt + lbk_bpid_cnt))
 555                        return -EINVAL;
 556                break;
 557        case NIX_INTF_TYPE_SDP:
 558                if ((req->chan_base + req->chan_cnt) > 255)
 559                        return -EINVAL;
 560
 561                bpid = sdp_bpid_cnt + req->chan_base;
 562                if (req->bpid_per_chan)
 563                        bpid += chan_id;
 564
 565                if (bpid > (cgx_bpid_cnt + lbk_bpid_cnt + sdp_bpid_cnt))
 566                        return -EINVAL;
 567                break;
 568        default:
 569                return -EINVAL;
 570        }
 571        return bpid;
 572}
 573
 574int rvu_mbox_handler_nix_bp_enable(struct rvu *rvu,
 575                                   struct nix_bp_cfg_req *req,
 576                                   struct nix_bp_cfg_rsp *rsp)
 577{
 578        int blkaddr, pf, type, chan_id = 0;
 579        u16 pcifunc = req->hdr.pcifunc;
 580        struct rvu_pfvf *pfvf;
 581        u16 chan_base, chan;
 582        s16 bpid, bpid_base;
 583        u64 cfg;
 584
 585        pf = rvu_get_pf(pcifunc);
 586        type = is_afvf(pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX;
 587        if (is_sdp_pfvf(pcifunc))
 588                type = NIX_INTF_TYPE_SDP;
 589
 590        /* Enable backpressure only for CGX mapped PFs and LBK/SDP interface */
 591        if (!is_pf_cgxmapped(rvu, pf) && type != NIX_INTF_TYPE_LBK &&
 592            type != NIX_INTF_TYPE_SDP)
 593                return 0;
 594
 595        pfvf = rvu_get_pfvf(rvu, pcifunc);
 596        blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
 597
 598        bpid_base = rvu_nix_get_bpid(rvu, req, type, chan_id);
 599        chan_base = pfvf->rx_chan_base + req->chan_base;
 600        bpid = bpid_base;
 601
 602        for (chan = chan_base; chan < (chan_base + req->chan_cnt); chan++) {
 603                if (bpid < 0) {
 604                        dev_warn(rvu->dev, "Fail to enable backpressure\n");
 605                        return -EINVAL;
 606                }
 607
 608                cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan));
 609                cfg &= ~GENMASK_ULL(8, 0);
 610                rvu_write64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan),
 611                            cfg | (bpid & GENMASK_ULL(8, 0)) | BIT_ULL(16));
 612                chan_id++;
 613                bpid = rvu_nix_get_bpid(rvu, req, type, chan_id);
 614        }
 615
 616        for (chan = 0; chan < req->chan_cnt; chan++) {
 617                /* Map channel and bpid assign to it */
 618                rsp->chan_bpid[chan] = ((req->chan_base + chan) & 0x7F) << 10 |
 619                                        (bpid_base & 0x3FF);
 620                if (req->bpid_per_chan)
 621                        bpid_base++;
 622        }
 623        rsp->chan_cnt = req->chan_cnt;
 624
 625        return 0;
 626}
 627
 628static void nix_setup_lso_tso_l3(struct rvu *rvu, int blkaddr,
 629                                 u64 format, bool v4, u64 *fidx)
 630{
 631        struct nix_lso_format field = {0};
 632
 633        /* IP's Length field */
 634        field.layer = NIX_TXLAYER_OL3;
 635        /* In ipv4, length field is at offset 2 bytes, for ipv6 it's 4 */
 636        field.offset = v4 ? 2 : 4;
 637        field.sizem1 = 1; /* i.e 2 bytes */
 638        field.alg = NIX_LSOALG_ADD_PAYLEN;
 639        rvu_write64(rvu, blkaddr,
 640                    NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++),
 641                    *(u64 *)&field);
 642
 643        /* No ID field in IPv6 header */
 644        if (!v4)
 645                return;
 646
 647        /* IP's ID field */
 648        field.layer = NIX_TXLAYER_OL3;
 649        field.offset = 4;
 650        field.sizem1 = 1; /* i.e 2 bytes */
 651        field.alg = NIX_LSOALG_ADD_SEGNUM;
 652        rvu_write64(rvu, blkaddr,
 653                    NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++),
 654                    *(u64 *)&field);
 655}
 656
 657static void nix_setup_lso_tso_l4(struct rvu *rvu, int blkaddr,
 658                                 u64 format, u64 *fidx)
 659{
 660        struct nix_lso_format field = {0};
 661
 662        /* TCP's sequence number field */
 663        field.layer = NIX_TXLAYER_OL4;
 664        field.offset = 4;
 665        field.sizem1 = 3; /* i.e 4 bytes */
 666        field.alg = NIX_LSOALG_ADD_OFFSET;
 667        rvu_write64(rvu, blkaddr,
 668                    NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++),
 669                    *(u64 *)&field);
 670
 671        /* TCP's flags field */
 672        field.layer = NIX_TXLAYER_OL4;
 673        field.offset = 12;
 674        field.sizem1 = 1; /* 2 bytes */
 675        field.alg = NIX_LSOALG_TCP_FLAGS;
 676        rvu_write64(rvu, blkaddr,
 677                    NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++),
 678                    *(u64 *)&field);
 679}
 680
 681static void nix_setup_lso(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr)
 682{
 683        u64 cfg, idx, fidx = 0;
 684
 685        /* Get max HW supported format indices */
 686        cfg = (rvu_read64(rvu, blkaddr, NIX_AF_CONST1) >> 48) & 0xFF;
 687        nix_hw->lso.total = cfg;
 688
 689        /* Enable LSO */
 690        cfg = rvu_read64(rvu, blkaddr, NIX_AF_LSO_CFG);
 691        /* For TSO, set first and middle segment flags to
 692         * mask out PSH, RST & FIN flags in TCP packet
 693         */
 694        cfg &= ~((0xFFFFULL << 32) | (0xFFFFULL << 16));
 695        cfg |= (0xFFF2ULL << 32) | (0xFFF2ULL << 16);
 696        rvu_write64(rvu, blkaddr, NIX_AF_LSO_CFG, cfg | BIT_ULL(63));
 697
 698        /* Setup default static LSO formats
 699         *
 700         * Configure format fields for TCPv4 segmentation offload
 701         */
 702        idx = NIX_LSO_FORMAT_IDX_TSOV4;
 703        nix_setup_lso_tso_l3(rvu, blkaddr, idx, true, &fidx);
 704        nix_setup_lso_tso_l4(rvu, blkaddr, idx, &fidx);
 705
 706        /* Set rest of the fields to NOP */
 707        for (; fidx < 8; fidx++) {
 708                rvu_write64(rvu, blkaddr,
 709                            NIX_AF_LSO_FORMATX_FIELDX(idx, fidx), 0x0ULL);
 710        }
 711        nix_hw->lso.in_use++;
 712
 713        /* Configure format fields for TCPv6 segmentation offload */
 714        idx = NIX_LSO_FORMAT_IDX_TSOV6;
 715        fidx = 0;
 716        nix_setup_lso_tso_l3(rvu, blkaddr, idx, false, &fidx);
 717        nix_setup_lso_tso_l4(rvu, blkaddr, idx, &fidx);
 718
 719        /* Set rest of the fields to NOP */
 720        for (; fidx < 8; fidx++) {
 721                rvu_write64(rvu, blkaddr,
 722                            NIX_AF_LSO_FORMATX_FIELDX(idx, fidx), 0x0ULL);
 723        }
 724        nix_hw->lso.in_use++;
 725}
 726
 727static void nix_ctx_free(struct rvu *rvu, struct rvu_pfvf *pfvf)
 728{
 729        kfree(pfvf->rq_bmap);
 730        kfree(pfvf->sq_bmap);
 731        kfree(pfvf->cq_bmap);
 732        if (pfvf->rq_ctx)
 733                qmem_free(rvu->dev, pfvf->rq_ctx);
 734        if (pfvf->sq_ctx)
 735                qmem_free(rvu->dev, pfvf->sq_ctx);
 736        if (pfvf->cq_ctx)
 737                qmem_free(rvu->dev, pfvf->cq_ctx);
 738        if (pfvf->rss_ctx)
 739                qmem_free(rvu->dev, pfvf->rss_ctx);
 740        if (pfvf->nix_qints_ctx)
 741                qmem_free(rvu->dev, pfvf->nix_qints_ctx);
 742        if (pfvf->cq_ints_ctx)
 743                qmem_free(rvu->dev, pfvf->cq_ints_ctx);
 744
 745        pfvf->rq_bmap = NULL;
 746        pfvf->cq_bmap = NULL;
 747        pfvf->sq_bmap = NULL;
 748        pfvf->rq_ctx = NULL;
 749        pfvf->sq_ctx = NULL;
 750        pfvf->cq_ctx = NULL;
 751        pfvf->rss_ctx = NULL;
 752        pfvf->nix_qints_ctx = NULL;
 753        pfvf->cq_ints_ctx = NULL;
 754}
 755
 756static int nixlf_rss_ctx_init(struct rvu *rvu, int blkaddr,
 757                              struct rvu_pfvf *pfvf, int nixlf,
 758                              int rss_sz, int rss_grps, int hwctx_size,
 759                              u64 way_mask, bool tag_lsb_as_adder)
 760{
 761        int err, grp, num_indices;
 762        u64 val;
 763
 764        /* RSS is not requested for this NIXLF */
 765        if (!rss_sz)
 766                return 0;
 767        num_indices = rss_sz * rss_grps;
 768
 769        /* Alloc NIX RSS HW context memory and config the base */
 770        err = qmem_alloc(rvu->dev, &pfvf->rss_ctx, num_indices, hwctx_size);
 771        if (err)
 772                return err;
 773
 774        rvu_write64(rvu, blkaddr, NIX_AF_LFX_RSS_BASE(nixlf),
 775                    (u64)pfvf->rss_ctx->iova);
 776
 777        /* Config full RSS table size, enable RSS and caching */
 778        val = BIT_ULL(36) | BIT_ULL(4) | way_mask << 20 |
 779                        ilog2(num_indices / MAX_RSS_INDIR_TBL_SIZE);
 780
 781        if (tag_lsb_as_adder)
 782                val |= BIT_ULL(5);
 783
 784        rvu_write64(rvu, blkaddr, NIX_AF_LFX_RSS_CFG(nixlf), val);
 785        /* Config RSS group offset and sizes */
 786        for (grp = 0; grp < rss_grps; grp++)
 787                rvu_write64(rvu, blkaddr, NIX_AF_LFX_RSS_GRPX(nixlf, grp),
 788                            ((ilog2(rss_sz) - 1) << 16) | (rss_sz * grp));
 789        return 0;
 790}
 791
 792static int nix_aq_enqueue_wait(struct rvu *rvu, struct rvu_block *block,
 793                               struct nix_aq_inst_s *inst)
 794{
 795        struct admin_queue *aq = block->aq;
 796        struct nix_aq_res_s *result;
 797        int timeout = 1000;
 798        u64 reg, head;
 799
 800        result = (struct nix_aq_res_s *)aq->res->base;
 801
 802        /* Get current head pointer where to append this instruction */
 803        reg = rvu_read64(rvu, block->addr, NIX_AF_AQ_STATUS);
 804        head = (reg >> 4) & AQ_PTR_MASK;
 805
 806        memcpy((void *)(aq->inst->base + (head * aq->inst->entry_sz)),
 807               (void *)inst, aq->inst->entry_sz);
 808        memset(result, 0, sizeof(*result));
 809        /* sync into memory */
 810        wmb();
 811
 812        /* Ring the doorbell and wait for result */
 813        rvu_write64(rvu, block->addr, NIX_AF_AQ_DOOR, 1);
 814        while (result->compcode == NIX_AQ_COMP_NOTDONE) {
 815                cpu_relax();
 816                udelay(1);
 817                timeout--;
 818                if (!timeout)
 819                        return -EBUSY;
 820        }
 821
 822        if (result->compcode != NIX_AQ_COMP_GOOD)
 823                /* TODO: Replace this with some error code */
 824                return -EBUSY;
 825
 826        return 0;
 827}
 828
 829static int rvu_nix_blk_aq_enq_inst(struct rvu *rvu, struct nix_hw *nix_hw,
 830                                   struct nix_aq_enq_req *req,
 831                                   struct nix_aq_enq_rsp *rsp)
 832{
 833        struct rvu_hwinfo *hw = rvu->hw;
 834        u16 pcifunc = req->hdr.pcifunc;
 835        int nixlf, blkaddr, rc = 0;
 836        struct nix_aq_inst_s inst;
 837        struct rvu_block *block;
 838        struct admin_queue *aq;
 839        struct rvu_pfvf *pfvf;
 840        void *ctx, *mask;
 841        bool ena;
 842        u64 cfg;
 843
 844        blkaddr = nix_hw->blkaddr;
 845        block = &hw->block[blkaddr];
 846        aq = block->aq;
 847        if (!aq) {
 848                dev_warn(rvu->dev, "%s: NIX AQ not initialized\n", __func__);
 849                return NIX_AF_ERR_AQ_ENQUEUE;
 850        }
 851
 852        pfvf = rvu_get_pfvf(rvu, pcifunc);
 853        nixlf = rvu_get_lf(rvu, block, pcifunc, 0);
 854
 855        /* Skip NIXLF check for broadcast MCE entry and bandwidth profile
 856         * operations done by AF itself.
 857         */
 858        if (!((!rsp && req->ctype == NIX_AQ_CTYPE_MCE) ||
 859              (req->ctype == NIX_AQ_CTYPE_BANDPROF && !pcifunc))) {
 860                if (!pfvf->nixlf || nixlf < 0)
 861                        return NIX_AF_ERR_AF_LF_INVALID;
 862        }
 863
 864        switch (req->ctype) {
 865        case NIX_AQ_CTYPE_RQ:
 866                /* Check if index exceeds max no of queues */
 867                if (!pfvf->rq_ctx || req->qidx >= pfvf->rq_ctx->qsize)
 868                        rc = NIX_AF_ERR_AQ_ENQUEUE;
 869                break;
 870        case NIX_AQ_CTYPE_SQ:
 871                if (!pfvf->sq_ctx || req->qidx >= pfvf->sq_ctx->qsize)
 872                        rc = NIX_AF_ERR_AQ_ENQUEUE;
 873                break;
 874        case NIX_AQ_CTYPE_CQ:
 875                if (!pfvf->cq_ctx || req->qidx >= pfvf->cq_ctx->qsize)
 876                        rc = NIX_AF_ERR_AQ_ENQUEUE;
 877                break;
 878        case NIX_AQ_CTYPE_RSS:
 879                /* Check if RSS is enabled and qidx is within range */
 880                cfg = rvu_read64(rvu, blkaddr, NIX_AF_LFX_RSS_CFG(nixlf));
 881                if (!(cfg & BIT_ULL(4)) || !pfvf->rss_ctx ||
 882                    (req->qidx >= (256UL << (cfg & 0xF))))
 883                        rc = NIX_AF_ERR_AQ_ENQUEUE;
 884                break;
 885        case NIX_AQ_CTYPE_MCE:
 886                cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_MCAST_CFG);
 887
 888                /* Check if index exceeds MCE list length */
 889                if (!nix_hw->mcast.mce_ctx ||
 890                    (req->qidx >= (256UL << (cfg & 0xF))))
 891                        rc = NIX_AF_ERR_AQ_ENQUEUE;
 892
 893                /* Adding multicast lists for requests from PF/VFs is not
 894                 * yet supported, so ignore this.
 895                 */
 896                if (rsp)
 897                        rc = NIX_AF_ERR_AQ_ENQUEUE;
 898                break;
 899        case NIX_AQ_CTYPE_BANDPROF:
 900                if (nix_verify_bandprof((struct nix_cn10k_aq_enq_req *)req,
 901                                        nix_hw, pcifunc))
 902                        rc = NIX_AF_ERR_INVALID_BANDPROF;
 903                break;
 904        default:
 905                rc = NIX_AF_ERR_AQ_ENQUEUE;
 906        }
 907
 908        if (rc)
 909                return rc;
 910
 911        /* Check if SQ pointed SMQ belongs to this PF/VF or not */
 912        if (req->ctype == NIX_AQ_CTYPE_SQ &&
 913            ((req->op == NIX_AQ_INSTOP_INIT && req->sq.ena) ||
 914             (req->op == NIX_AQ_INSTOP_WRITE &&
 915              req->sq_mask.ena && req->sq_mask.smq && req->sq.ena))) {
 916                if (!is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_SMQ,
 917                                     pcifunc, req->sq.smq))
 918                        return NIX_AF_ERR_AQ_ENQUEUE;
 919        }
 920
 921        memset(&inst, 0, sizeof(struct nix_aq_inst_s));
 922        inst.lf = nixlf;
 923        inst.cindex = req->qidx;
 924        inst.ctype = req->ctype;
 925        inst.op = req->op;
 926        /* Currently we are not supporting enqueuing multiple instructions,
 927         * so always choose first entry in result memory.
 928         */
 929        inst.res_addr = (u64)aq->res->iova;
 930
 931        /* Hardware uses same aq->res->base for updating result of
 932         * previous instruction hence wait here till it is done.
 933         */
 934        spin_lock(&aq->lock);
 935
 936        /* Clean result + context memory */
 937        memset(aq->res->base, 0, aq->res->entry_sz);
 938        /* Context needs to be written at RES_ADDR + 128 */
 939        ctx = aq->res->base + 128;
 940        /* Mask needs to be written at RES_ADDR + 256 */
 941        mask = aq->res->base + 256;
 942
 943        switch (req->op) {
 944        case NIX_AQ_INSTOP_WRITE:
 945                if (req->ctype == NIX_AQ_CTYPE_RQ)
 946                        memcpy(mask, &req->rq_mask,
 947                               sizeof(struct nix_rq_ctx_s));
 948                else if (req->ctype == NIX_AQ_CTYPE_SQ)
 949                        memcpy(mask, &req->sq_mask,
 950                               sizeof(struct nix_sq_ctx_s));
 951                else if (req->ctype == NIX_AQ_CTYPE_CQ)
 952                        memcpy(mask, &req->cq_mask,
 953                               sizeof(struct nix_cq_ctx_s));
 954                else if (req->ctype == NIX_AQ_CTYPE_RSS)
 955                        memcpy(mask, &req->rss_mask,
 956                               sizeof(struct nix_rsse_s));
 957                else if (req->ctype == NIX_AQ_CTYPE_MCE)
 958                        memcpy(mask, &req->mce_mask,
 959                               sizeof(struct nix_rx_mce_s));
 960                else if (req->ctype == NIX_AQ_CTYPE_BANDPROF)
 961                        memcpy(mask, &req->prof_mask,
 962                               sizeof(struct nix_bandprof_s));
 963                fallthrough;
 964        case NIX_AQ_INSTOP_INIT:
 965                if (req->ctype == NIX_AQ_CTYPE_RQ)
 966                        memcpy(ctx, &req->rq, sizeof(struct nix_rq_ctx_s));
 967                else if (req->ctype == NIX_AQ_CTYPE_SQ)
 968                        memcpy(ctx, &req->sq, sizeof(struct nix_sq_ctx_s));
 969                else if (req->ctype == NIX_AQ_CTYPE_CQ)
 970                        memcpy(ctx, &req->cq, sizeof(struct nix_cq_ctx_s));
 971                else if (req->ctype == NIX_AQ_CTYPE_RSS)
 972                        memcpy(ctx, &req->rss, sizeof(struct nix_rsse_s));
 973                else if (req->ctype == NIX_AQ_CTYPE_MCE)
 974                        memcpy(ctx, &req->mce, sizeof(struct nix_rx_mce_s));
 975                else if (req->ctype == NIX_AQ_CTYPE_BANDPROF)
 976                        memcpy(ctx, &req->prof, sizeof(struct nix_bandprof_s));
 977                break;
 978        case NIX_AQ_INSTOP_NOP:
 979        case NIX_AQ_INSTOP_READ:
 980        case NIX_AQ_INSTOP_LOCK:
 981        case NIX_AQ_INSTOP_UNLOCK:
 982                break;
 983        default:
 984                rc = NIX_AF_ERR_AQ_ENQUEUE;
 985                spin_unlock(&aq->lock);
 986                return rc;
 987        }
 988
 989        /* Submit the instruction to AQ */
 990        rc = nix_aq_enqueue_wait(rvu, block, &inst);
 991        if (rc) {
 992                spin_unlock(&aq->lock);
 993                return rc;
 994        }
 995
 996        /* Set RQ/SQ/CQ bitmap if respective queue hw context is enabled */
 997        if (req->op == NIX_AQ_INSTOP_INIT) {
 998                if (req->ctype == NIX_AQ_CTYPE_RQ && req->rq.ena)
 999                        __set_bit(req->qidx, pfvf->rq_bmap);
1000                if (req->ctype == NIX_AQ_CTYPE_SQ && req->sq.ena)
1001                        __set_bit(req->qidx, pfvf->sq_bmap);
1002                if (req->ctype == NIX_AQ_CTYPE_CQ && req->cq.ena)
1003                        __set_bit(req->qidx, pfvf->cq_bmap);
1004        }
1005
1006        if (req->op == NIX_AQ_INSTOP_WRITE) {
1007                if (req->ctype == NIX_AQ_CTYPE_RQ) {
1008                        ena = (req->rq.ena & req->rq_mask.ena) |
1009                                (test_bit(req->qidx, pfvf->rq_bmap) &
1010                                ~req->rq_mask.ena);
1011                        if (ena)
1012                                __set_bit(req->qidx, pfvf->rq_bmap);
1013                        else
1014                                __clear_bit(req->qidx, pfvf->rq_bmap);
1015                }
1016                if (req->ctype == NIX_AQ_CTYPE_SQ) {
1017                        ena = (req->rq.ena & req->sq_mask.ena) |
1018                                (test_bit(req->qidx, pfvf->sq_bmap) &
1019                                ~req->sq_mask.ena);
1020                        if (ena)
1021                                __set_bit(req->qidx, pfvf->sq_bmap);
1022                        else
1023                                __clear_bit(req->qidx, pfvf->sq_bmap);
1024                }
1025                if (req->ctype == NIX_AQ_CTYPE_CQ) {
1026                        ena = (req->rq.ena & req->cq_mask.ena) |
1027                                (test_bit(req->qidx, pfvf->cq_bmap) &
1028                                ~req->cq_mask.ena);
1029                        if (ena)
1030                                __set_bit(req->qidx, pfvf->cq_bmap);
1031                        else
1032                                __clear_bit(req->qidx, pfvf->cq_bmap);
1033                }
1034        }
1035
1036        if (rsp) {
1037                /* Copy read context into mailbox */
1038                if (req->op == NIX_AQ_INSTOP_READ) {
1039                        if (req->ctype == NIX_AQ_CTYPE_RQ)
1040                                memcpy(&rsp->rq, ctx,
1041                                       sizeof(struct nix_rq_ctx_s));
1042                        else if (req->ctype == NIX_AQ_CTYPE_SQ)
1043                                memcpy(&rsp->sq, ctx,
1044                                       sizeof(struct nix_sq_ctx_s));
1045                        else if (req->ctype == NIX_AQ_CTYPE_CQ)
1046                                memcpy(&rsp->cq, ctx,
1047                                       sizeof(struct nix_cq_ctx_s));
1048                        else if (req->ctype == NIX_AQ_CTYPE_RSS)
1049                                memcpy(&rsp->rss, ctx,
1050                                       sizeof(struct nix_rsse_s));
1051                        else if (req->ctype == NIX_AQ_CTYPE_MCE)
1052                                memcpy(&rsp->mce, ctx,
1053                                       sizeof(struct nix_rx_mce_s));
1054                        else if (req->ctype == NIX_AQ_CTYPE_BANDPROF)
1055                                memcpy(&rsp->prof, ctx,
1056                                       sizeof(struct nix_bandprof_s));
1057                }
1058        }
1059
1060        spin_unlock(&aq->lock);
1061        return 0;
1062}
1063
1064static int rvu_nix_aq_enq_inst(struct rvu *rvu, struct nix_aq_enq_req *req,
1065                               struct nix_aq_enq_rsp *rsp)
1066{
1067        struct nix_hw *nix_hw;
1068        int blkaddr;
1069
1070        blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, req->hdr.pcifunc);
1071        if (blkaddr < 0)
1072                return NIX_AF_ERR_AF_LF_INVALID;
1073
1074        nix_hw =  get_nix_hw(rvu->hw, blkaddr);
1075        if (!nix_hw)
1076                return NIX_AF_ERR_INVALID_NIXBLK;
1077
1078        return rvu_nix_blk_aq_enq_inst(rvu, nix_hw, req, rsp);
1079}
1080
1081static const char *nix_get_ctx_name(int ctype)
1082{
1083        switch (ctype) {
1084        case NIX_AQ_CTYPE_CQ:
1085                return "CQ";
1086        case NIX_AQ_CTYPE_SQ:
1087                return "SQ";
1088        case NIX_AQ_CTYPE_RQ:
1089                return "RQ";
1090        case NIX_AQ_CTYPE_RSS:
1091                return "RSS";
1092        }
1093        return "";
1094}
1095
1096static int nix_lf_hwctx_disable(struct rvu *rvu, struct hwctx_disable_req *req)
1097{
1098        struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc);
1099        struct nix_aq_enq_req aq_req;
1100        unsigned long *bmap;
1101        int qidx, q_cnt = 0;
1102        int err = 0, rc;
1103
1104        if (!pfvf->cq_ctx || !pfvf->sq_ctx || !pfvf->rq_ctx)
1105                return NIX_AF_ERR_AQ_ENQUEUE;
1106
1107        memset(&aq_req, 0, sizeof(struct nix_aq_enq_req));
1108        aq_req.hdr.pcifunc = req->hdr.pcifunc;
1109
1110        if (req->ctype == NIX_AQ_CTYPE_CQ) {
1111                aq_req.cq.ena = 0;
1112                aq_req.cq_mask.ena = 1;
1113                aq_req.cq.bp_ena = 0;
1114                aq_req.cq_mask.bp_ena = 1;
1115                q_cnt = pfvf->cq_ctx->qsize;
1116                bmap = pfvf->cq_bmap;
1117        }
1118        if (req->ctype == NIX_AQ_CTYPE_SQ) {
1119                aq_req.sq.ena = 0;
1120                aq_req.sq_mask.ena = 1;
1121                q_cnt = pfvf->sq_ctx->qsize;
1122                bmap = pfvf->sq_bmap;
1123        }
1124        if (req->ctype == NIX_AQ_CTYPE_RQ) {
1125                aq_req.rq.ena = 0;
1126                aq_req.rq_mask.ena = 1;
1127                q_cnt = pfvf->rq_ctx->qsize;
1128                bmap = pfvf->rq_bmap;
1129        }
1130
1131        aq_req.ctype = req->ctype;
1132        aq_req.op = NIX_AQ_INSTOP_WRITE;
1133
1134        for (qidx = 0; qidx < q_cnt; qidx++) {
1135                if (!test_bit(qidx, bmap))
1136                        continue;
1137                aq_req.qidx = qidx;
1138                rc = rvu_nix_aq_enq_inst(rvu, &aq_req, NULL);
1139                if (rc) {
1140                        err = rc;
1141                        dev_err(rvu->dev, "Failed to disable %s:%d context\n",
1142                                nix_get_ctx_name(req->ctype), qidx);
1143                }
1144        }
1145
1146        return err;
1147}
1148
1149#ifdef CONFIG_NDC_DIS_DYNAMIC_CACHING
1150static int nix_lf_hwctx_lockdown(struct rvu *rvu, struct nix_aq_enq_req *req)
1151{
1152        struct nix_aq_enq_req lock_ctx_req;
1153        int err;
1154
1155        if (req->op != NIX_AQ_INSTOP_INIT)
1156                return 0;
1157
1158        if (req->ctype == NIX_AQ_CTYPE_MCE ||
1159            req->ctype == NIX_AQ_CTYPE_DYNO)
1160                return 0;
1161
1162        memset(&lock_ctx_req, 0, sizeof(struct nix_aq_enq_req));
1163        lock_ctx_req.hdr.pcifunc = req->hdr.pcifunc;
1164        lock_ctx_req.ctype = req->ctype;
1165        lock_ctx_req.op = NIX_AQ_INSTOP_LOCK;
1166        lock_ctx_req.qidx = req->qidx;
1167        err = rvu_nix_aq_enq_inst(rvu, &lock_ctx_req, NULL);
1168        if (err)
1169                dev_err(rvu->dev,
1170                        "PFUNC 0x%x: Failed to lock NIX %s:%d context\n",
1171                        req->hdr.pcifunc,
1172                        nix_get_ctx_name(req->ctype), req->qidx);
1173        return err;
1174}
1175
1176int rvu_mbox_handler_nix_aq_enq(struct rvu *rvu,
1177                                struct nix_aq_enq_req *req,
1178                                struct nix_aq_enq_rsp *rsp)
1179{
1180        int err;
1181
1182        err = rvu_nix_aq_enq_inst(rvu, req, rsp);
1183        if (!err)
1184                err = nix_lf_hwctx_lockdown(rvu, req);
1185        return err;
1186}
1187#else
1188
1189int rvu_mbox_handler_nix_aq_enq(struct rvu *rvu,
1190                                struct nix_aq_enq_req *req,
1191                                struct nix_aq_enq_rsp *rsp)
1192{
1193        return rvu_nix_aq_enq_inst(rvu, req, rsp);
1194}
1195#endif
1196/* CN10K mbox handler */
1197int rvu_mbox_handler_nix_cn10k_aq_enq(struct rvu *rvu,
1198                                      struct nix_cn10k_aq_enq_req *req,
1199                                      struct nix_cn10k_aq_enq_rsp *rsp)
1200{
1201        return rvu_nix_aq_enq_inst(rvu, (struct nix_aq_enq_req *)req,
1202                                  (struct nix_aq_enq_rsp *)rsp);
1203}
1204
1205int rvu_mbox_handler_nix_hwctx_disable(struct rvu *rvu,
1206                                       struct hwctx_disable_req *req,
1207                                       struct msg_rsp *rsp)
1208{
1209        return nix_lf_hwctx_disable(rvu, req);
1210}
1211
1212int rvu_mbox_handler_nix_lf_alloc(struct rvu *rvu,
1213                                  struct nix_lf_alloc_req *req,
1214                                  struct nix_lf_alloc_rsp *rsp)
1215{
1216        int nixlf, qints, hwctx_size, intf, err, rc = 0;
1217        struct rvu_hwinfo *hw = rvu->hw;
1218        u16 pcifunc = req->hdr.pcifunc;
1219        struct rvu_block *block;
1220        struct rvu_pfvf *pfvf;
1221        u64 cfg, ctx_cfg;
1222        int blkaddr;
1223
1224        if (!req->rq_cnt || !req->sq_cnt || !req->cq_cnt)
1225                return NIX_AF_ERR_PARAM;
1226
1227        if (req->way_mask)
1228                req->way_mask &= 0xFFFF;
1229
1230        pfvf = rvu_get_pfvf(rvu, pcifunc);
1231        blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
1232        if (!pfvf->nixlf || blkaddr < 0)
1233                return NIX_AF_ERR_AF_LF_INVALID;
1234
1235        block = &hw->block[blkaddr];
1236        nixlf = rvu_get_lf(rvu, block, pcifunc, 0);
1237        if (nixlf < 0)
1238                return NIX_AF_ERR_AF_LF_INVALID;
1239
1240        /* Check if requested 'NIXLF <=> NPALF' mapping is valid */
1241        if (req->npa_func) {
1242                /* If default, use 'this' NIXLF's PFFUNC */
1243                if (req->npa_func == RVU_DEFAULT_PF_FUNC)
1244                        req->npa_func = pcifunc;
1245                if (!is_pffunc_map_valid(rvu, req->npa_func, BLKTYPE_NPA))
1246                        return NIX_AF_INVAL_NPA_PF_FUNC;
1247        }
1248
1249        /* Check if requested 'NIXLF <=> SSOLF' mapping is valid */
1250        if (req->sso_func) {
1251                /* If default, use 'this' NIXLF's PFFUNC */
1252                if (req->sso_func == RVU_DEFAULT_PF_FUNC)
1253                        req->sso_func = pcifunc;
1254                if (!is_pffunc_map_valid(rvu, req->sso_func, BLKTYPE_SSO))
1255                        return NIX_AF_INVAL_SSO_PF_FUNC;
1256        }
1257
1258        /* If RSS is being enabled, check if requested config is valid.
1259         * RSS table size should be power of two, otherwise
1260         * RSS_GRP::OFFSET + adder might go beyond that group or
1261         * won't be able to use entire table.
1262         */
1263        if (req->rss_sz && (req->rss_sz > MAX_RSS_INDIR_TBL_SIZE ||
1264                            !is_power_of_2(req->rss_sz)))
1265                return NIX_AF_ERR_RSS_SIZE_INVALID;
1266
1267        if (req->rss_sz &&
1268            (!req->rss_grps || req->rss_grps > MAX_RSS_GROUPS))
1269                return NIX_AF_ERR_RSS_GRPS_INVALID;
1270
1271        /* Reset this NIX LF */
1272        err = rvu_lf_reset(rvu, block, nixlf);
1273        if (err) {
1274                dev_err(rvu->dev, "Failed to reset NIX%d LF%d\n",
1275                        block->addr - BLKADDR_NIX0, nixlf);
1276                return NIX_AF_ERR_LF_RESET;
1277        }
1278
1279        ctx_cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST3);
1280
1281        /* Alloc NIX RQ HW context memory and config the base */
1282        hwctx_size = 1UL << ((ctx_cfg >> 4) & 0xF);
1283        err = qmem_alloc(rvu->dev, &pfvf->rq_ctx, req->rq_cnt, hwctx_size);
1284        if (err)
1285                goto free_mem;
1286
1287        pfvf->rq_bmap = kcalloc(req->rq_cnt, sizeof(long), GFP_KERNEL);
1288        if (!pfvf->rq_bmap)
1289                goto free_mem;
1290
1291        rvu_write64(rvu, blkaddr, NIX_AF_LFX_RQS_BASE(nixlf),
1292                    (u64)pfvf->rq_ctx->iova);
1293
1294        /* Set caching and queue count in HW */
1295        cfg = BIT_ULL(36) | (req->rq_cnt - 1) | req->way_mask << 20;
1296        rvu_write64(rvu, blkaddr, NIX_AF_LFX_RQS_CFG(nixlf), cfg);
1297
1298        /* Alloc NIX SQ HW context memory and config the base */
1299        hwctx_size = 1UL << (ctx_cfg & 0xF);
1300        err = qmem_alloc(rvu->dev, &pfvf->sq_ctx, req->sq_cnt, hwctx_size);
1301        if (err)
1302                goto free_mem;
1303
1304        pfvf->sq_bmap = kcalloc(req->sq_cnt, sizeof(long), GFP_KERNEL);
1305        if (!pfvf->sq_bmap)
1306                goto free_mem;
1307
1308        rvu_write64(rvu, blkaddr, NIX_AF_LFX_SQS_BASE(nixlf),
1309                    (u64)pfvf->sq_ctx->iova);
1310
1311        cfg = BIT_ULL(36) | (req->sq_cnt - 1) | req->way_mask << 20;
1312        rvu_write64(rvu, blkaddr, NIX_AF_LFX_SQS_CFG(nixlf), cfg);
1313
1314        /* Alloc NIX CQ HW context memory and config the base */
1315        hwctx_size = 1UL << ((ctx_cfg >> 8) & 0xF);
1316        err = qmem_alloc(rvu->dev, &pfvf->cq_ctx, req->cq_cnt, hwctx_size);
1317        if (err)
1318                goto free_mem;
1319
1320        pfvf->cq_bmap = kcalloc(req->cq_cnt, sizeof(long), GFP_KERNEL);
1321        if (!pfvf->cq_bmap)
1322                goto free_mem;
1323
1324        rvu_write64(rvu, blkaddr, NIX_AF_LFX_CQS_BASE(nixlf),
1325                    (u64)pfvf->cq_ctx->iova);
1326
1327        cfg = BIT_ULL(36) | (req->cq_cnt - 1) | req->way_mask << 20;
1328        rvu_write64(rvu, blkaddr, NIX_AF_LFX_CQS_CFG(nixlf), cfg);
1329
1330        /* Initialize receive side scaling (RSS) */
1331        hwctx_size = 1UL << ((ctx_cfg >> 12) & 0xF);
1332        err = nixlf_rss_ctx_init(rvu, blkaddr, pfvf, nixlf, req->rss_sz,
1333                                 req->rss_grps, hwctx_size, req->way_mask,
1334                                 !!(req->flags & NIX_LF_RSS_TAG_LSB_AS_ADDER));
1335        if (err)
1336                goto free_mem;
1337
1338        /* Alloc memory for CQINT's HW contexts */
1339        cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST2);
1340        qints = (cfg >> 24) & 0xFFF;
1341        hwctx_size = 1UL << ((ctx_cfg >> 24) & 0xF);
1342        err = qmem_alloc(rvu->dev, &pfvf->cq_ints_ctx, qints, hwctx_size);
1343        if (err)
1344                goto free_mem;
1345
1346        rvu_write64(rvu, blkaddr, NIX_AF_LFX_CINTS_BASE(nixlf),
1347                    (u64)pfvf->cq_ints_ctx->iova);
1348
1349        rvu_write64(rvu, blkaddr, NIX_AF_LFX_CINTS_CFG(nixlf),
1350                    BIT_ULL(36) | req->way_mask << 20);
1351
1352        /* Alloc memory for QINT's HW contexts */
1353        cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST2);
1354        qints = (cfg >> 12) & 0xFFF;
1355        hwctx_size = 1UL << ((ctx_cfg >> 20) & 0xF);
1356        err = qmem_alloc(rvu->dev, &pfvf->nix_qints_ctx, qints, hwctx_size);
1357        if (err)
1358                goto free_mem;
1359
1360        rvu_write64(rvu, blkaddr, NIX_AF_LFX_QINTS_BASE(nixlf),
1361                    (u64)pfvf->nix_qints_ctx->iova);
1362        rvu_write64(rvu, blkaddr, NIX_AF_LFX_QINTS_CFG(nixlf),
1363                    BIT_ULL(36) | req->way_mask << 20);
1364
1365        /* Setup VLANX TPID's.
1366         * Use VLAN1 for 802.1Q
1367         * and VLAN0 for 802.1AD.
1368         */
1369        cfg = (0x8100ULL << 16) | 0x88A8ULL;
1370        rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_CFG(nixlf), cfg);
1371
1372        /* Enable LMTST for this NIX LF */
1373        rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_CFG2(nixlf), BIT_ULL(0));
1374
1375        /* Set CQE/WQE size, NPA_PF_FUNC for SQBs and also SSO_PF_FUNC */
1376        if (req->npa_func)
1377                cfg = req->npa_func;
1378        if (req->sso_func)
1379                cfg |= (u64)req->sso_func << 16;
1380
1381        cfg |= (u64)req->xqe_sz << 33;
1382        rvu_write64(rvu, blkaddr, NIX_AF_LFX_CFG(nixlf), cfg);
1383
1384        /* Config Rx pkt length, csum checks and apad  enable / disable */
1385        rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_CFG(nixlf), req->rx_cfg);
1386
1387        /* Configure pkind for TX parse config */
1388        cfg = NPC_TX_DEF_PKIND;
1389        rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_PARSE_CFG(nixlf), cfg);
1390
1391        intf = is_afvf(pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX;
1392        if (is_sdp_pfvf(pcifunc))
1393                intf = NIX_INTF_TYPE_SDP;
1394
1395        err = nix_interface_init(rvu, pcifunc, intf, nixlf, rsp,
1396                                 !!(req->flags & NIX_LF_LBK_BLK_SEL));
1397        if (err)
1398                goto free_mem;
1399
1400        /* Disable NPC entries as NIXLF's contexts are not initialized yet */
1401        rvu_npc_disable_default_entries(rvu, pcifunc, nixlf);
1402
1403        /* Configure RX VTAG Type 7 (strip) for vf vlan */
1404        rvu_write64(rvu, blkaddr,
1405                    NIX_AF_LFX_RX_VTAG_TYPEX(nixlf, NIX_AF_LFX_RX_VTAG_TYPE7),
1406                    VTAGSIZE_T4 | VTAG_STRIP);
1407
1408        goto exit;
1409
1410free_mem:
1411        nix_ctx_free(rvu, pfvf);
1412        rc = -ENOMEM;
1413
1414exit:
1415        /* Set macaddr of this PF/VF */
1416        ether_addr_copy(rsp->mac_addr, pfvf->mac_addr);
1417
1418        /* set SQB size info */
1419        cfg = rvu_read64(rvu, blkaddr, NIX_AF_SQ_CONST);
1420        rsp->sqb_size = (cfg >> 34) & 0xFFFF;
1421        rsp->rx_chan_base = pfvf->rx_chan_base;
1422        rsp->tx_chan_base = pfvf->tx_chan_base;
1423        rsp->rx_chan_cnt = pfvf->rx_chan_cnt;
1424        rsp->tx_chan_cnt = pfvf->tx_chan_cnt;
1425        rsp->lso_tsov4_idx = NIX_LSO_FORMAT_IDX_TSOV4;
1426        rsp->lso_tsov6_idx = NIX_LSO_FORMAT_IDX_TSOV6;
1427        /* Get HW supported stat count */
1428        cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST1);
1429        rsp->lf_rx_stats = ((cfg >> 32) & 0xFF);
1430        rsp->lf_tx_stats = ((cfg >> 24) & 0xFF);
1431        /* Get count of CQ IRQs and error IRQs supported per LF */
1432        cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST2);
1433        rsp->qints = ((cfg >> 12) & 0xFFF);
1434        rsp->cints = ((cfg >> 24) & 0xFFF);
1435        rsp->cgx_links = hw->cgx_links;
1436        rsp->lbk_links = hw->lbk_links;
1437        rsp->sdp_links = hw->sdp_links;
1438
1439        return rc;
1440}
1441
1442int rvu_mbox_handler_nix_lf_free(struct rvu *rvu, struct nix_lf_free_req *req,
1443                                 struct msg_rsp *rsp)
1444{
1445        struct rvu_hwinfo *hw = rvu->hw;
1446        u16 pcifunc = req->hdr.pcifunc;
1447        struct rvu_block *block;
1448        int blkaddr, nixlf, err;
1449        struct rvu_pfvf *pfvf;
1450
1451        pfvf = rvu_get_pfvf(rvu, pcifunc);
1452        blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
1453        if (!pfvf->nixlf || blkaddr < 0)
1454                return NIX_AF_ERR_AF_LF_INVALID;
1455
1456        block = &hw->block[blkaddr];
1457        nixlf = rvu_get_lf(rvu, block, pcifunc, 0);
1458        if (nixlf < 0)
1459                return NIX_AF_ERR_AF_LF_INVALID;
1460
1461        if (req->flags & NIX_LF_DISABLE_FLOWS)
1462                rvu_npc_disable_mcam_entries(rvu, pcifunc, nixlf);
1463        else
1464                rvu_npc_free_mcam_entries(rvu, pcifunc, nixlf);
1465
1466        /* Free any tx vtag def entries used by this NIX LF */
1467        if (!(req->flags & NIX_LF_DONT_FREE_TX_VTAG))
1468                nix_free_tx_vtag_entries(rvu, pcifunc);
1469
1470        nix_interface_deinit(rvu, pcifunc, nixlf);
1471
1472        /* Reset this NIX LF */
1473        err = rvu_lf_reset(rvu, block, nixlf);
1474        if (err) {
1475                dev_err(rvu->dev, "Failed to reset NIX%d LF%d\n",
1476                        block->addr - BLKADDR_NIX0, nixlf);
1477                return NIX_AF_ERR_LF_RESET;
1478        }
1479
1480        nix_ctx_free(rvu, pfvf);
1481
1482        return 0;
1483}
1484
1485int rvu_mbox_handler_nix_mark_format_cfg(struct rvu *rvu,
1486                                         struct nix_mark_format_cfg  *req,
1487                                         struct nix_mark_format_cfg_rsp *rsp)
1488{
1489        u16 pcifunc = req->hdr.pcifunc;
1490        struct nix_hw *nix_hw;
1491        struct rvu_pfvf *pfvf;
1492        int blkaddr, rc;
1493        u32 cfg;
1494
1495        pfvf = rvu_get_pfvf(rvu, pcifunc);
1496        blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
1497        if (!pfvf->nixlf || blkaddr < 0)
1498                return NIX_AF_ERR_AF_LF_INVALID;
1499
1500        nix_hw = get_nix_hw(rvu->hw, blkaddr);
1501        if (!nix_hw)
1502                return NIX_AF_ERR_INVALID_NIXBLK;
1503
1504        cfg = (((u32)req->offset & 0x7) << 16) |
1505              (((u32)req->y_mask & 0xF) << 12) |
1506              (((u32)req->y_val & 0xF) << 8) |
1507              (((u32)req->r_mask & 0xF) << 4) | ((u32)req->r_val & 0xF);
1508
1509        rc = rvu_nix_reserve_mark_format(rvu, nix_hw, blkaddr, cfg);
1510        if (rc < 0) {
1511                dev_err(rvu->dev, "No mark_format_ctl for (pf:%d, vf:%d)",
1512                        rvu_get_pf(pcifunc), pcifunc & RVU_PFVF_FUNC_MASK);
1513                return NIX_AF_ERR_MARK_CFG_FAIL;
1514        }
1515
1516        rsp->mark_format_idx = rc;
1517        return 0;
1518}
1519
1520/* Handle shaper update specially for few revisions */
1521static bool
1522handle_txschq_shaper_update(struct rvu *rvu, int blkaddr, int nixlf,
1523                            int lvl, u64 reg, u64 regval)
1524{
1525        u64 regbase, oldval, sw_xoff = 0;
1526        u64 dbgval, md_debug0 = 0;
1527        unsigned long poll_tmo;
1528        bool rate_reg = 0;
1529        u32 schq;
1530
1531        regbase = reg & 0xFFFF;
1532        schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT);
1533
1534        /* Check for rate register */
1535        switch (lvl) {
1536        case NIX_TXSCH_LVL_TL1:
1537                md_debug0 = NIX_AF_TL1X_MD_DEBUG0(schq);
1538                sw_xoff = NIX_AF_TL1X_SW_XOFF(schq);
1539
1540                rate_reg = !!(regbase == NIX_AF_TL1X_CIR(0));
1541                break;
1542        case NIX_TXSCH_LVL_TL2:
1543                md_debug0 = NIX_AF_TL2X_MD_DEBUG0(schq);
1544                sw_xoff = NIX_AF_TL2X_SW_XOFF(schq);
1545
1546                rate_reg = (regbase == NIX_AF_TL2X_CIR(0) ||
1547                            regbase == NIX_AF_TL2X_PIR(0));
1548                break;
1549        case NIX_TXSCH_LVL_TL3:
1550                md_debug0 = NIX_AF_TL3X_MD_DEBUG0(schq);
1551                sw_xoff = NIX_AF_TL3X_SW_XOFF(schq);
1552
1553                rate_reg = (regbase == NIX_AF_TL3X_CIR(0) ||
1554                            regbase == NIX_AF_TL3X_PIR(0));
1555                break;
1556        case NIX_TXSCH_LVL_TL4:
1557                md_debug0 = NIX_AF_TL4X_MD_DEBUG0(schq);
1558                sw_xoff = NIX_AF_TL4X_SW_XOFF(schq);
1559
1560                rate_reg = (regbase == NIX_AF_TL4X_CIR(0) ||
1561                            regbase == NIX_AF_TL4X_PIR(0));
1562                break;
1563        case NIX_TXSCH_LVL_MDQ:
1564                sw_xoff = NIX_AF_MDQX_SW_XOFF(schq);
1565                rate_reg = (regbase == NIX_AF_MDQX_CIR(0) ||
1566                            regbase == NIX_AF_MDQX_PIR(0));
1567                break;
1568        }
1569
1570        if (!rate_reg)
1571                return false;
1572
1573        /* Nothing special to do when state is not toggled */
1574        oldval = rvu_read64(rvu, blkaddr, reg);
1575        if ((oldval & 0x1) == (regval & 0x1)) {
1576                rvu_write64(rvu, blkaddr, reg, regval);
1577                return true;
1578        }
1579
1580        /* PIR/CIR disable */
1581        if (!(regval & 0x1)) {
1582                rvu_write64(rvu, blkaddr, sw_xoff, 1);
1583                rvu_write64(rvu, blkaddr, reg, 0);
1584                udelay(4);
1585                rvu_write64(rvu, blkaddr, sw_xoff, 0);
1586                return true;
1587        }
1588
1589        /* PIR/CIR enable */
1590        rvu_write64(rvu, blkaddr, sw_xoff, 1);
1591        if (md_debug0) {
1592                poll_tmo = jiffies + usecs_to_jiffies(10000);
1593                /* Wait until VLD(bit32) == 1 or C_CON(bit48) == 0 */
1594                do {
1595                        if (time_after(jiffies, poll_tmo)) {
1596                                dev_err(rvu->dev,
1597                                        "NIXLF%d: TLX%u(lvl %u) CIR/PIR enable failed\n",
1598                                        nixlf, schq, lvl);
1599                                goto exit;
1600                        }
1601                        usleep_range(1, 5);
1602                        dbgval = rvu_read64(rvu, blkaddr, md_debug0);
1603                } while (!(dbgval & BIT_ULL(32)) && (dbgval & BIT_ULL(48)));
1604        }
1605        rvu_write64(rvu, blkaddr, reg, regval);
1606exit:
1607        rvu_write64(rvu, blkaddr, sw_xoff, 0);
1608        return true;
1609}
1610
1611/* Disable shaping of pkts by a scheduler queue
1612 * at a given scheduler level.
1613 */
1614static void nix_reset_tx_shaping(struct rvu *rvu, int blkaddr,
1615                                 int nixlf, int lvl, int schq)
1616{
1617        struct rvu_hwinfo *hw = rvu->hw;
1618        u64  cir_reg = 0, pir_reg = 0;
1619        u64  cfg;
1620
1621        switch (lvl) {
1622        case NIX_TXSCH_LVL_TL1:
1623                cir_reg = NIX_AF_TL1X_CIR(schq);
1624                pir_reg = 0; /* PIR not available at TL1 */
1625                break;
1626        case NIX_TXSCH_LVL_TL2:
1627                cir_reg = NIX_AF_TL2X_CIR(schq);
1628                pir_reg = NIX_AF_TL2X_PIR(schq);
1629                break;
1630        case NIX_TXSCH_LVL_TL3:
1631                cir_reg = NIX_AF_TL3X_CIR(schq);
1632                pir_reg = NIX_AF_TL3X_PIR(schq);
1633                break;
1634        case NIX_TXSCH_LVL_TL4:
1635                cir_reg = NIX_AF_TL4X_CIR(schq);
1636                pir_reg = NIX_AF_TL4X_PIR(schq);
1637                break;
1638        case NIX_TXSCH_LVL_MDQ:
1639                cir_reg = NIX_AF_MDQX_CIR(schq);
1640                pir_reg = NIX_AF_MDQX_PIR(schq);
1641                break;
1642        }
1643
1644        /* Shaper state toggle needs wait/poll */
1645        if (hw->cap.nix_shaper_toggle_wait) {
1646                if (cir_reg)
1647                        handle_txschq_shaper_update(rvu, blkaddr, nixlf,
1648                                                    lvl, cir_reg, 0);
1649                if (pir_reg)
1650                        handle_txschq_shaper_update(rvu, blkaddr, nixlf,
1651                                                    lvl, pir_reg, 0);
1652                return;
1653        }
1654
1655        if (!cir_reg)
1656                return;
1657        cfg = rvu_read64(rvu, blkaddr, cir_reg);
1658        rvu_write64(rvu, blkaddr, cir_reg, cfg & ~BIT_ULL(0));
1659
1660        if (!pir_reg)
1661                return;
1662        cfg = rvu_read64(rvu, blkaddr, pir_reg);
1663        rvu_write64(rvu, blkaddr, pir_reg, cfg & ~BIT_ULL(0));
1664}
1665
1666static void nix_reset_tx_linkcfg(struct rvu *rvu, int blkaddr,
1667                                 int lvl, int schq)
1668{
1669        struct rvu_hwinfo *hw = rvu->hw;
1670        int link_level;
1671        int link;
1672
1673        if (lvl >= hw->cap.nix_tx_aggr_lvl)
1674                return;
1675
1676        /* Reset TL4's SDP link config */
1677        if (lvl == NIX_TXSCH_LVL_TL4)
1678                rvu_write64(rvu, blkaddr, NIX_AF_TL4X_SDP_LINK_CFG(schq), 0x00);
1679
1680        link_level = rvu_read64(rvu, blkaddr, NIX_AF_PSE_CHANNEL_LEVEL) & 0x01 ?
1681                        NIX_TXSCH_LVL_TL3 : NIX_TXSCH_LVL_TL2;
1682        if (lvl != link_level)
1683                return;
1684
1685        /* Reset TL2's CGX or LBK link config */
1686        for (link = 0; link < (hw->cgx_links + hw->lbk_links); link++)
1687                rvu_write64(rvu, blkaddr,
1688                            NIX_AF_TL3_TL2X_LINKX_CFG(schq, link), 0x00);
1689}
1690
1691static void nix_clear_tx_xoff(struct rvu *rvu, int blkaddr,
1692                              int lvl, int schq)
1693{
1694        struct rvu_hwinfo *hw = rvu->hw;
1695        u64 reg;
1696
1697        /* Skip this if shaping is not supported */
1698        if (!hw->cap.nix_shaping)
1699                return;
1700
1701        /* Clear level specific SW_XOFF */
1702        switch (lvl) {
1703        case NIX_TXSCH_LVL_TL1:
1704                reg = NIX_AF_TL1X_SW_XOFF(schq);
1705                break;
1706        case NIX_TXSCH_LVL_TL2:
1707                reg = NIX_AF_TL2X_SW_XOFF(schq);
1708                break;
1709        case NIX_TXSCH_LVL_TL3:
1710                reg = NIX_AF_TL3X_SW_XOFF(schq);
1711                break;
1712        case NIX_TXSCH_LVL_TL4:
1713                reg = NIX_AF_TL4X_SW_XOFF(schq);
1714                break;
1715        case NIX_TXSCH_LVL_MDQ:
1716                reg = NIX_AF_MDQX_SW_XOFF(schq);
1717                break;
1718        default:
1719                return;
1720        }
1721
1722        rvu_write64(rvu, blkaddr, reg, 0x0);
1723}
1724
1725static int nix_get_tx_link(struct rvu *rvu, u16 pcifunc)
1726{
1727        struct rvu_hwinfo *hw = rvu->hw;
1728        int pf = rvu_get_pf(pcifunc);
1729        u8 cgx_id = 0, lmac_id = 0;
1730
1731        if (is_afvf(pcifunc)) {/* LBK links */
1732                return hw->cgx_links;
1733        } else if (is_pf_cgxmapped(rvu, pf)) {
1734                rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
1735                return (cgx_id * hw->lmac_per_cgx) + lmac_id;
1736        }
1737
1738        /* SDP link */
1739        return hw->cgx_links + hw->lbk_links;
1740}
1741
1742static void nix_get_txschq_range(struct rvu *rvu, u16 pcifunc,
1743                                 int link, int *start, int *end)
1744{
1745        struct rvu_hwinfo *hw = rvu->hw;
1746        int pf = rvu_get_pf(pcifunc);
1747
1748        if (is_afvf(pcifunc)) { /* LBK links */
1749                *start = hw->cap.nix_txsch_per_cgx_lmac * link;
1750                *end = *start + hw->cap.nix_txsch_per_lbk_lmac;
1751        } else if (is_pf_cgxmapped(rvu, pf)) { /* CGX links */
1752                *start = hw->cap.nix_txsch_per_cgx_lmac * link;
1753                *end = *start + hw->cap.nix_txsch_per_cgx_lmac;
1754        } else { /* SDP link */
1755                *start = (hw->cap.nix_txsch_per_cgx_lmac * hw->cgx_links) +
1756                        (hw->cap.nix_txsch_per_lbk_lmac * hw->lbk_links);
1757                *end = *start + hw->cap.nix_txsch_per_sdp_lmac;
1758        }
1759}
1760
1761static int nix_check_txschq_alloc_req(struct rvu *rvu, int lvl, u16 pcifunc,
1762                                      struct nix_hw *nix_hw,
1763                                      struct nix_txsch_alloc_req *req)
1764{
1765        struct rvu_hwinfo *hw = rvu->hw;
1766        int schq, req_schq, free_cnt;
1767        struct nix_txsch *txsch;
1768        int link, start, end;
1769
1770        txsch = &nix_hw->txsch[lvl];
1771        req_schq = req->schq_contig[lvl] + req->schq[lvl];
1772
1773        if (!req_schq)
1774                return 0;
1775
1776        link = nix_get_tx_link(rvu, pcifunc);
1777
1778        /* For traffic aggregating scheduler level, one queue is enough */
1779        if (lvl >= hw->cap.nix_tx_aggr_lvl) {
1780                if (req_schq != 1)
1781                        return NIX_AF_ERR_TLX_ALLOC_FAIL;
1782                return 0;
1783        }
1784
1785        /* Get free SCHQ count and check if request can be accomodated */
1786        if (hw->cap.nix_fixed_txschq_mapping) {
1787                nix_get_txschq_range(rvu, pcifunc, link, &start, &end);
1788                schq = start + (pcifunc & RVU_PFVF_FUNC_MASK);
1789                if (end <= txsch->schq.max && schq < end &&
1790                    !test_bit(schq, txsch->schq.bmap))
1791                        free_cnt = 1;
1792                else
1793                        free_cnt = 0;
1794        } else {
1795                free_cnt = rvu_rsrc_free_count(&txsch->schq);
1796        }
1797
1798        if (free_cnt < req_schq || req_schq > MAX_TXSCHQ_PER_FUNC)
1799                return NIX_AF_ERR_TLX_ALLOC_FAIL;
1800
1801        /* If contiguous queues are needed, check for availability */
1802        if (!hw->cap.nix_fixed_txschq_mapping && req->schq_contig[lvl] &&
1803            !rvu_rsrc_check_contig(&txsch->schq, req->schq_contig[lvl]))
1804                return NIX_AF_ERR_TLX_ALLOC_FAIL;
1805
1806        return 0;
1807}
1808
1809static void nix_txsch_alloc(struct rvu *rvu, struct nix_txsch *txsch,
1810                            struct nix_txsch_alloc_rsp *rsp,
1811                            int lvl, int start, int end)
1812{
1813        struct rvu_hwinfo *hw = rvu->hw;
1814        u16 pcifunc = rsp->hdr.pcifunc;
1815        int idx, schq;
1816
1817        /* For traffic aggregating levels, queue alloc is based
1818         * on transmit link to which PF_FUNC is mapped to.
1819         */
1820        if (lvl >= hw->cap.nix_tx_aggr_lvl) {
1821                /* A single TL queue is allocated */
1822                if (rsp->schq_contig[lvl]) {
1823                        rsp->schq_contig[lvl] = 1;
1824                        rsp->schq_contig_list[lvl][0] = start;
1825                }
1826
1827                /* Both contig and non-contig reqs doesn't make sense here */
1828                if (rsp->schq_contig[lvl])
1829                        rsp->schq[lvl] = 0;
1830
1831                if (rsp->schq[lvl]) {
1832                        rsp->schq[lvl] = 1;
1833                        rsp->schq_list[lvl][0] = start;
1834                }
1835                return;
1836        }
1837
1838        /* Adjust the queue request count if HW supports
1839         * only one queue per level configuration.
1840         */
1841        if (hw->cap.nix_fixed_txschq_mapping) {
1842                idx = pcifunc & RVU_PFVF_FUNC_MASK;
1843                schq = start + idx;
1844                if (idx >= (end - start) || test_bit(schq, txsch->schq.bmap)) {
1845                        rsp->schq_contig[lvl] = 0;
1846                        rsp->schq[lvl] = 0;
1847                        return;
1848                }
1849
1850                if (rsp->schq_contig[lvl]) {
1851                        rsp->schq_contig[lvl] = 1;
1852                        set_bit(schq, txsch->schq.bmap);
1853                        rsp->schq_contig_list[lvl][0] = schq;
1854                        rsp->schq[lvl] = 0;
1855                } else if (rsp->schq[lvl]) {
1856                        rsp->schq[lvl] = 1;
1857                        set_bit(schq, txsch->schq.bmap);
1858                        rsp->schq_list[lvl][0] = schq;
1859                }
1860                return;
1861        }
1862
1863        /* Allocate contiguous queue indices requesty first */
1864        if (rsp->schq_contig[lvl]) {
1865                schq = bitmap_find_next_zero_area(txsch->schq.bmap,
1866                                                  txsch->schq.max, start,
1867                                                  rsp->schq_contig[lvl], 0);
1868                if (schq >= end)
1869                        rsp->schq_contig[lvl] = 0;
1870                for (idx = 0; idx < rsp->schq_contig[lvl]; idx++) {
1871                        set_bit(schq, txsch->schq.bmap);
1872                        rsp->schq_contig_list[lvl][idx] = schq;
1873                        schq++;
1874                }
1875        }
1876
1877        /* Allocate non-contiguous queue indices */
1878        if (rsp->schq[lvl]) {
1879                idx = 0;
1880                for (schq = start; schq < end; schq++) {
1881                        if (!test_bit(schq, txsch->schq.bmap)) {
1882                                set_bit(schq, txsch->schq.bmap);
1883                                rsp->schq_list[lvl][idx++] = schq;
1884                        }
1885                        if (idx == rsp->schq[lvl])
1886                                break;
1887                }
1888                /* Update how many were allocated */
1889                rsp->schq[lvl] = idx;
1890        }
1891}
1892
1893int rvu_mbox_handler_nix_txsch_alloc(struct rvu *rvu,
1894                                     struct nix_txsch_alloc_req *req,
1895                                     struct nix_txsch_alloc_rsp *rsp)
1896{
1897        struct rvu_hwinfo *hw = rvu->hw;
1898        u16 pcifunc = req->hdr.pcifunc;
1899        int link, blkaddr, rc = 0;
1900        int lvl, idx, start, end;
1901        struct nix_txsch *txsch;
1902        struct nix_hw *nix_hw;
1903        u32 *pfvf_map;
1904        int nixlf;
1905        u16 schq;
1906
1907        rc = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr);
1908        if (rc)
1909                return rc;
1910
1911        nix_hw = get_nix_hw(rvu->hw, blkaddr);
1912        if (!nix_hw)
1913                return NIX_AF_ERR_INVALID_NIXBLK;
1914
1915        mutex_lock(&rvu->rsrc_lock);
1916
1917        /* Check if request is valid as per HW capabilities
1918         * and can be accomodated.
1919         */
1920        for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
1921                rc = nix_check_txschq_alloc_req(rvu, lvl, pcifunc, nix_hw, req);
1922                if (rc)
1923                        goto err;
1924        }
1925
1926        /* Allocate requested Tx scheduler queues */
1927        for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
1928                txsch = &nix_hw->txsch[lvl];
1929                pfvf_map = txsch->pfvf_map;
1930
1931                if (!req->schq[lvl] && !req->schq_contig[lvl])
1932                        continue;
1933
1934                rsp->schq[lvl] = req->schq[lvl];
1935                rsp->schq_contig[lvl] = req->schq_contig[lvl];
1936
1937                link = nix_get_tx_link(rvu, pcifunc);
1938
1939                if (lvl >= hw->cap.nix_tx_aggr_lvl) {
1940                        start = link;
1941                        end = link;
1942                } else if (hw->cap.nix_fixed_txschq_mapping) {
1943                        nix_get_txschq_range(rvu, pcifunc, link, &start, &end);
1944                } else {
1945                        start = 0;
1946                        end = txsch->schq.max;
1947                }
1948
1949                nix_txsch_alloc(rvu, txsch, rsp, lvl, start, end);
1950
1951                /* Reset queue config */
1952                for (idx = 0; idx < req->schq_contig[lvl]; idx++) {
1953                        schq = rsp->schq_contig_list[lvl][idx];
1954                        if (!(TXSCH_MAP_FLAGS(pfvf_map[schq]) &
1955                            NIX_TXSCHQ_CFG_DONE))
1956                                pfvf_map[schq] = TXSCH_MAP(pcifunc, 0);
1957                        nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq);
1958                        nix_reset_tx_shaping(rvu, blkaddr, nixlf, lvl, schq);
1959                }
1960
1961                for (idx = 0; idx < req->schq[lvl]; idx++) {
1962                        schq = rsp->schq_list[lvl][idx];
1963                        if (!(TXSCH_MAP_FLAGS(pfvf_map[schq]) &
1964                            NIX_TXSCHQ_CFG_DONE))
1965                                pfvf_map[schq] = TXSCH_MAP(pcifunc, 0);
1966                        nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq);
1967                        nix_reset_tx_shaping(rvu, blkaddr, nixlf, lvl, schq);
1968                }
1969        }
1970
1971        rsp->aggr_level = hw->cap.nix_tx_aggr_lvl;
1972        rsp->aggr_lvl_rr_prio = TXSCH_TL1_DFLT_RR_PRIO;
1973        rsp->link_cfg_lvl = rvu_read64(rvu, blkaddr,
1974                                       NIX_AF_PSE_CHANNEL_LEVEL) & 0x01 ?
1975                                       NIX_TXSCH_LVL_TL3 : NIX_TXSCH_LVL_TL2;
1976        goto exit;
1977err:
1978        rc = NIX_AF_ERR_TLX_ALLOC_FAIL;
1979exit:
1980        mutex_unlock(&rvu->rsrc_lock);
1981        return rc;
1982}
1983
1984static int nix_smq_flush(struct rvu *rvu, int blkaddr,
1985                         int smq, u16 pcifunc, int nixlf)
1986{
1987        int pf = rvu_get_pf(pcifunc);
1988        u8 cgx_id = 0, lmac_id = 0;
1989        int err, restore_tx_en = 0;
1990        u64 cfg;
1991
1992        /* enable cgx tx if disabled */
1993        if (is_pf_cgxmapped(rvu, pf)) {
1994                rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
1995                restore_tx_en = !cgx_lmac_tx_enable(rvu_cgx_pdata(cgx_id, rvu),
1996                                                    lmac_id, true);
1997        }
1998
1999        cfg = rvu_read64(rvu, blkaddr, NIX_AF_SMQX_CFG(smq));
2000        /* Do SMQ flush and set enqueue xoff */
2001        cfg |= BIT_ULL(50) | BIT_ULL(49);
2002        rvu_write64(rvu, blkaddr, NIX_AF_SMQX_CFG(smq), cfg);
2003
2004        /* Disable backpressure from physical link,
2005         * otherwise SMQ flush may stall.
2006         */
2007        rvu_cgx_enadis_rx_bp(rvu, pf, false);
2008
2009        /* Wait for flush to complete */
2010        err = rvu_poll_reg(rvu, blkaddr,
2011                           NIX_AF_SMQX_CFG(smq), BIT_ULL(49), true);
2012        if (err)
2013                dev_err(rvu->dev,
2014                        "NIXLF%d: SMQ%d flush failed\n", nixlf, smq);
2015
2016        rvu_cgx_enadis_rx_bp(rvu, pf, true);
2017        /* restore cgx tx state */
2018        if (restore_tx_en)
2019                cgx_lmac_tx_enable(rvu_cgx_pdata(cgx_id, rvu), lmac_id, false);
2020        return err;
2021}
2022
2023static int nix_txschq_free(struct rvu *rvu, u16 pcifunc)
2024{
2025        int blkaddr, nixlf, lvl, schq, err;
2026        struct rvu_hwinfo *hw = rvu->hw;
2027        struct nix_txsch *txsch;
2028        struct nix_hw *nix_hw;
2029        u16 map_func;
2030
2031        blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
2032        if (blkaddr < 0)
2033                return NIX_AF_ERR_AF_LF_INVALID;
2034
2035        nix_hw = get_nix_hw(rvu->hw, blkaddr);
2036        if (!nix_hw)
2037                return NIX_AF_ERR_INVALID_NIXBLK;
2038
2039        nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
2040        if (nixlf < 0)
2041                return NIX_AF_ERR_AF_LF_INVALID;
2042
2043        /* Disable TL2/3 queue links and all XOFF's before SMQ flush*/
2044        mutex_lock(&rvu->rsrc_lock);
2045        for (lvl = NIX_TXSCH_LVL_MDQ; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
2046                txsch = &nix_hw->txsch[lvl];
2047
2048                if (lvl >= hw->cap.nix_tx_aggr_lvl)
2049                        continue;
2050
2051                for (schq = 0; schq < txsch->schq.max; schq++) {
2052                        if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc)
2053                                continue;
2054                        nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq);
2055                        nix_clear_tx_xoff(rvu, blkaddr, lvl, schq);
2056                }
2057        }
2058        nix_clear_tx_xoff(rvu, blkaddr, NIX_TXSCH_LVL_TL1,
2059                          nix_get_tx_link(rvu, pcifunc));
2060
2061        /* On PF cleanup, clear cfg done flag as
2062         * PF would have changed default config.
2063         */
2064        if (!(pcifunc & RVU_PFVF_FUNC_MASK)) {
2065                txsch = &nix_hw->txsch[NIX_TXSCH_LVL_TL1];
2066                schq = nix_get_tx_link(rvu, pcifunc);
2067                /* Do not clear pcifunc in txsch->pfvf_map[schq] because
2068                 * VF might be using this TL1 queue
2069                 */
2070                map_func = TXSCH_MAP_FUNC(txsch->pfvf_map[schq]);
2071                txsch->pfvf_map[schq] = TXSCH_SET_FLAG(map_func, 0x0);
2072        }
2073
2074        /* Flush SMQs */
2075        txsch = &nix_hw->txsch[NIX_TXSCH_LVL_SMQ];
2076        for (schq = 0; schq < txsch->schq.max; schq++) {
2077                if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc)
2078                        continue;
2079                nix_smq_flush(rvu, blkaddr, schq, pcifunc, nixlf);
2080        }
2081
2082        /* Now free scheduler queues to free pool */
2083        for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
2084                 /* TLs above aggregation level are shared across all PF
2085                  * and it's VFs, hence skip freeing them.
2086                  */
2087                if (lvl >= hw->cap.nix_tx_aggr_lvl)
2088                        continue;
2089
2090                txsch = &nix_hw->txsch[lvl];
2091                for (schq = 0; schq < txsch->schq.max; schq++) {
2092                        if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc)
2093                                continue;
2094                        rvu_free_rsrc(&txsch->schq, schq);
2095                        txsch->pfvf_map[schq] = TXSCH_MAP(0, NIX_TXSCHQ_FREE);
2096                }
2097        }
2098        mutex_unlock(&rvu->rsrc_lock);
2099
2100        /* Sync cached info for this LF in NDC-TX to LLC/DRAM */
2101        rvu_write64(rvu, blkaddr, NIX_AF_NDC_TX_SYNC, BIT_ULL(12) | nixlf);
2102        err = rvu_poll_reg(rvu, blkaddr, NIX_AF_NDC_TX_SYNC, BIT_ULL(12), true);
2103        if (err)
2104                dev_err(rvu->dev, "NDC-TX sync failed for NIXLF %d\n", nixlf);
2105
2106        return 0;
2107}
2108
2109static int nix_txschq_free_one(struct rvu *rvu,
2110                               struct nix_txsch_free_req *req)
2111{
2112        struct rvu_hwinfo *hw = rvu->hw;
2113        u16 pcifunc = req->hdr.pcifunc;
2114        int lvl, schq, nixlf, blkaddr;
2115        struct nix_txsch *txsch;
2116        struct nix_hw *nix_hw;
2117        u32 *pfvf_map;
2118        int rc;
2119
2120        blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
2121        if (blkaddr < 0)
2122                return NIX_AF_ERR_AF_LF_INVALID;
2123
2124        nix_hw = get_nix_hw(rvu->hw, blkaddr);
2125        if (!nix_hw)
2126                return NIX_AF_ERR_INVALID_NIXBLK;
2127
2128        nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
2129        if (nixlf < 0)
2130                return NIX_AF_ERR_AF_LF_INVALID;
2131
2132        lvl = req->schq_lvl;
2133        schq = req->schq;
2134        txsch = &nix_hw->txsch[lvl];
2135
2136        if (lvl >= hw->cap.nix_tx_aggr_lvl || schq >= txsch->schq.max)
2137                return 0;
2138
2139        pfvf_map = txsch->pfvf_map;
2140        mutex_lock(&rvu->rsrc_lock);
2141
2142        if (TXSCH_MAP_FUNC(pfvf_map[schq]) != pcifunc) {
2143                rc = NIX_AF_ERR_TLX_INVALID;
2144                goto err;
2145        }
2146
2147        /* Clear SW_XOFF of this resource only.
2148         * For SMQ level, all path XOFF's
2149         * need to be made clear by user
2150         */
2151        nix_clear_tx_xoff(rvu, blkaddr, lvl, schq);
2152
2153        /* Flush if it is a SMQ. Onus of disabling
2154         * TL2/3 queue links before SMQ flush is on user
2155         */
2156        if (lvl == NIX_TXSCH_LVL_SMQ &&
2157            nix_smq_flush(rvu, blkaddr, schq, pcifunc, nixlf)) {
2158                rc = NIX_AF_SMQ_FLUSH_FAILED;
2159                goto err;
2160        }
2161
2162        /* Free the resource */
2163        rvu_free_rsrc(&txsch->schq, schq);
2164        txsch->pfvf_map[schq] = TXSCH_MAP(0, NIX_TXSCHQ_FREE);
2165        mutex_unlock(&rvu->rsrc_lock);
2166        return 0;
2167err:
2168        mutex_unlock(&rvu->rsrc_lock);
2169        return rc;
2170}
2171
2172int rvu_mbox_handler_nix_txsch_free(struct rvu *rvu,
2173                                    struct nix_txsch_free_req *req,
2174                                    struct msg_rsp *rsp)
2175{
2176        if (req->flags & TXSCHQ_FREE_ALL)
2177                return nix_txschq_free(rvu, req->hdr.pcifunc);
2178        else
2179                return nix_txschq_free_one(rvu, req);
2180}
2181
2182static bool is_txschq_hierarchy_valid(struct rvu *rvu, u16 pcifunc, int blkaddr,
2183                                      int lvl, u64 reg, u64 regval)
2184{
2185        u64 regbase = reg & 0xFFFF;
2186        u16 schq, parent;
2187
2188        if (!rvu_check_valid_reg(TXSCHQ_HWREGMAP, lvl, reg))
2189                return false;
2190
2191        schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT);
2192        /* Check if this schq belongs to this PF/VF or not */
2193        if (!is_valid_txschq(rvu, blkaddr, lvl, pcifunc, schq))
2194                return false;
2195
2196        parent = (regval >> 16) & 0x1FF;
2197        /* Validate MDQ's TL4 parent */
2198        if (regbase == NIX_AF_MDQX_PARENT(0) &&
2199            !is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL4, pcifunc, parent))
2200                return false;
2201
2202        /* Validate TL4's TL3 parent */
2203        if (regbase == NIX_AF_TL4X_PARENT(0) &&
2204            !is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL3, pcifunc, parent))
2205                return false;
2206
2207        /* Validate TL3's TL2 parent */
2208        if (regbase == NIX_AF_TL3X_PARENT(0) &&
2209            !is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL2, pcifunc, parent))
2210                return false;
2211
2212        /* Validate TL2's TL1 parent */
2213        if (regbase == NIX_AF_TL2X_PARENT(0) &&
2214            !is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL1, pcifunc, parent))
2215                return false;
2216
2217        return true;
2218}
2219
2220static bool is_txschq_shaping_valid(struct rvu_hwinfo *hw, int lvl, u64 reg)
2221{
2222        u64 regbase;
2223
2224        if (hw->cap.nix_shaping)
2225                return true;
2226
2227        /* If shaping and coloring is not supported, then
2228         * *_CIR and *_PIR registers should not be configured.
2229         */
2230        regbase = reg & 0xFFFF;
2231
2232        switch (lvl) {
2233        case NIX_TXSCH_LVL_TL1:
2234                if (regbase == NIX_AF_TL1X_CIR(0))
2235                        return false;
2236                break;
2237        case NIX_TXSCH_LVL_TL2:
2238                if (regbase == NIX_AF_TL2X_CIR(0) ||
2239                    regbase == NIX_AF_TL2X_PIR(0))
2240                        return false;
2241                break;
2242        case NIX_TXSCH_LVL_TL3:
2243                if (regbase == NIX_AF_TL3X_CIR(0) ||
2244                    regbase == NIX_AF_TL3X_PIR(0))
2245                        return false;
2246                break;
2247        case NIX_TXSCH_LVL_TL4:
2248                if (regbase == NIX_AF_TL4X_CIR(0) ||
2249                    regbase == NIX_AF_TL4X_PIR(0))
2250                        return false;
2251                break;
2252        case NIX_TXSCH_LVL_MDQ:
2253                if (regbase == NIX_AF_MDQX_CIR(0) ||
2254                    regbase == NIX_AF_MDQX_PIR(0))
2255                        return false;
2256                break;
2257        }
2258        return true;
2259}
2260
2261static void nix_tl1_default_cfg(struct rvu *rvu, struct nix_hw *nix_hw,
2262                                u16 pcifunc, int blkaddr)
2263{
2264        u32 *pfvf_map;
2265        int schq;
2266
2267        schq = nix_get_tx_link(rvu, pcifunc);
2268        pfvf_map = nix_hw->txsch[NIX_TXSCH_LVL_TL1].pfvf_map;
2269        /* Skip if PF has already done the config */
2270        if (TXSCH_MAP_FLAGS(pfvf_map[schq]) & NIX_TXSCHQ_CFG_DONE)
2271                return;
2272        rvu_write64(rvu, blkaddr, NIX_AF_TL1X_TOPOLOGY(schq),
2273                    (TXSCH_TL1_DFLT_RR_PRIO << 1));
2274
2275        /* On OcteonTx2 the config was in bytes and newer silcons
2276         * it's changed to weight.
2277         */
2278        if (!rvu->hw->cap.nix_common_dwrr_mtu)
2279                rvu_write64(rvu, blkaddr, NIX_AF_TL1X_SCHEDULE(schq),
2280                            TXSCH_TL1_DFLT_RR_QTM);
2281        else
2282                rvu_write64(rvu, blkaddr, NIX_AF_TL1X_SCHEDULE(schq),
2283                            CN10K_MAX_DWRR_WEIGHT);
2284
2285        rvu_write64(rvu, blkaddr, NIX_AF_TL1X_CIR(schq), 0x00);
2286        pfvf_map[schq] = TXSCH_SET_FLAG(pfvf_map[schq], NIX_TXSCHQ_CFG_DONE);
2287}
2288
2289/* Register offset - [15:0]
2290 * Scheduler Queue number - [25:16]
2291 */
2292#define NIX_TX_SCHQ_MASK        GENMASK_ULL(25, 0)
2293
2294static int nix_txschq_cfg_read(struct rvu *rvu, struct nix_hw *nix_hw,
2295                               int blkaddr, struct nix_txschq_config *req,
2296                               struct nix_txschq_config *rsp)
2297{
2298        u16 pcifunc = req->hdr.pcifunc;
2299        int idx, schq;
2300        u64 reg;
2301
2302        for (idx = 0; idx < req->num_regs; idx++) {
2303                reg = req->reg[idx];
2304                reg &= NIX_TX_SCHQ_MASK;
2305                schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT);
2306                if (!rvu_check_valid_reg(TXSCHQ_HWREGMAP, req->lvl, reg) ||
2307                    !is_valid_txschq(rvu, blkaddr, req->lvl, pcifunc, schq))
2308                        return NIX_AF_INVAL_TXSCHQ_CFG;
2309                rsp->regval[idx] = rvu_read64(rvu, blkaddr, reg);
2310        }
2311        rsp->lvl = req->lvl;
2312        rsp->num_regs = req->num_regs;
2313        return 0;
2314}
2315
2316static void rvu_nix_tx_tl2_cfg(struct rvu *rvu, int blkaddr,
2317                               u16 pcifunc, struct nix_txsch *txsch)
2318{
2319        struct rvu_hwinfo *hw = rvu->hw;
2320        int lbk_link_start, lbk_links;
2321        u8 pf = rvu_get_pf(pcifunc);
2322        int schq;
2323
2324        if (!is_pf_cgxmapped(rvu, pf))
2325                return;
2326
2327        lbk_link_start = hw->cgx_links;
2328
2329        for (schq = 0; schq < txsch->schq.max; schq++) {
2330                if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc)
2331                        continue;
2332                /* Enable all LBK links with channel 63 by default so that
2333                 * packets can be sent to LBK with a NPC TX MCAM rule
2334                 */
2335                lbk_links = hw->lbk_links;
2336                while (lbk_links--)
2337                        rvu_write64(rvu, blkaddr,
2338                                    NIX_AF_TL3_TL2X_LINKX_CFG(schq,
2339                                                              lbk_link_start +
2340                                                              lbk_links),
2341                                    BIT_ULL(12) | RVU_SWITCH_LBK_CHAN);
2342        }
2343}
2344
2345int rvu_mbox_handler_nix_txschq_cfg(struct rvu *rvu,
2346                                    struct nix_txschq_config *req,
2347                                    struct nix_txschq_config *rsp)
2348{
2349        u64 reg, val, regval, schq_regbase, val_mask;
2350        struct rvu_hwinfo *hw = rvu->hw;
2351        u16 pcifunc = req->hdr.pcifunc;
2352        struct nix_txsch *txsch;
2353        struct nix_hw *nix_hw;
2354        int blkaddr, idx, err;
2355        int nixlf, schq;
2356        u32 *pfvf_map;
2357
2358        if (req->lvl >= NIX_TXSCH_LVL_CNT ||
2359            req->num_regs > MAX_REGS_PER_MBOX_MSG)
2360                return NIX_AF_INVAL_TXSCHQ_CFG;
2361
2362        err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr);
2363        if (err)
2364                return err;
2365
2366        nix_hw = get_nix_hw(rvu->hw, blkaddr);
2367        if (!nix_hw)
2368                return NIX_AF_ERR_INVALID_NIXBLK;
2369
2370        if (req->read)
2371                return nix_txschq_cfg_read(rvu, nix_hw, blkaddr, req, rsp);
2372
2373        txsch = &nix_hw->txsch[req->lvl];
2374        pfvf_map = txsch->pfvf_map;
2375
2376        if (req->lvl >= hw->cap.nix_tx_aggr_lvl &&
2377            pcifunc & RVU_PFVF_FUNC_MASK) {
2378                mutex_lock(&rvu->rsrc_lock);
2379                if (req->lvl == NIX_TXSCH_LVL_TL1)
2380                        nix_tl1_default_cfg(rvu, nix_hw, pcifunc, blkaddr);
2381                mutex_unlock(&rvu->rsrc_lock);
2382                return 0;
2383        }
2384
2385        for (idx = 0; idx < req->num_regs; idx++) {
2386                reg = req->reg[idx];
2387                reg &= NIX_TX_SCHQ_MASK;
2388                regval = req->regval[idx];
2389                schq_regbase = reg & 0xFFFF;
2390                val_mask = req->regval_mask[idx];
2391
2392                if (!is_txschq_hierarchy_valid(rvu, pcifunc, blkaddr,
2393                                               txsch->lvl, reg, regval))
2394                        return NIX_AF_INVAL_TXSCHQ_CFG;
2395
2396                /* Check if shaping and coloring is supported */
2397                if (!is_txschq_shaping_valid(hw, req->lvl, reg))
2398                        continue;
2399
2400                val = rvu_read64(rvu, blkaddr, reg);
2401                regval = (val & val_mask) | (regval & ~val_mask);
2402
2403                /* Handle shaping state toggle specially */
2404                if (hw->cap.nix_shaper_toggle_wait &&
2405                    handle_txschq_shaper_update(rvu, blkaddr, nixlf,
2406                                                req->lvl, reg, regval))
2407                        continue;
2408
2409                /* Replace PF/VF visible NIXLF slot with HW NIXLF id */
2410                if (schq_regbase == NIX_AF_SMQX_CFG(0)) {
2411                        nixlf = rvu_get_lf(rvu, &hw->block[blkaddr],
2412                                           pcifunc, 0);
2413                        regval &= ~(0x7FULL << 24);
2414                        regval |= ((u64)nixlf << 24);
2415                }
2416
2417                /* Clear 'BP_ENA' config, if it's not allowed */
2418                if (!hw->cap.nix_tx_link_bp) {
2419                        if (schq_regbase == NIX_AF_TL4X_SDP_LINK_CFG(0) ||
2420                            (schq_regbase & 0xFF00) ==
2421                            NIX_AF_TL3_TL2X_LINKX_CFG(0, 0))
2422                                regval &= ~BIT_ULL(13);
2423                }
2424
2425                /* Mark config as done for TL1 by PF */
2426                if (schq_regbase >= NIX_AF_TL1X_SCHEDULE(0) &&
2427                    schq_regbase <= NIX_AF_TL1X_GREEN_BYTES(0)) {
2428                        schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT);
2429                        mutex_lock(&rvu->rsrc_lock);
2430                        pfvf_map[schq] = TXSCH_SET_FLAG(pfvf_map[schq],
2431                                                        NIX_TXSCHQ_CFG_DONE);
2432                        mutex_unlock(&rvu->rsrc_lock);
2433                }
2434
2435                /* SMQ flush is special hence split register writes such
2436                 * that flush first and write rest of the bits later.
2437                 */
2438                if (schq_regbase == NIX_AF_SMQX_CFG(0) &&
2439                    (regval & BIT_ULL(49))) {
2440                        schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT);
2441                        nix_smq_flush(rvu, blkaddr, schq, pcifunc, nixlf);
2442                        regval &= ~BIT_ULL(49);
2443                }
2444                rvu_write64(rvu, blkaddr, reg, regval);
2445        }
2446
2447        rvu_nix_tx_tl2_cfg(rvu, blkaddr, pcifunc,
2448                           &nix_hw->txsch[NIX_TXSCH_LVL_TL2]);
2449        return 0;
2450}
2451
2452static int nix_rx_vtag_cfg(struct rvu *rvu, int nixlf, int blkaddr,
2453                           struct nix_vtag_config *req)
2454{
2455        u64 regval = req->vtag_size;
2456
2457        if (req->rx.vtag_type > NIX_AF_LFX_RX_VTAG_TYPE7 ||
2458            req->vtag_size > VTAGSIZE_T8)
2459                return -EINVAL;
2460
2461        /* RX VTAG Type 7 reserved for vf vlan */
2462        if (req->rx.vtag_type == NIX_AF_LFX_RX_VTAG_TYPE7)
2463                return NIX_AF_ERR_RX_VTAG_INUSE;
2464
2465        if (req->rx.capture_vtag)
2466                regval |= BIT_ULL(5);
2467        if (req->rx.strip_vtag)
2468                regval |= BIT_ULL(4);
2469
2470        rvu_write64(rvu, blkaddr,
2471                    NIX_AF_LFX_RX_VTAG_TYPEX(nixlf, req->rx.vtag_type), regval);
2472        return 0;
2473}
2474
2475static int nix_tx_vtag_free(struct rvu *rvu, int blkaddr,
2476                            u16 pcifunc, int index)
2477{
2478        struct nix_hw *nix_hw = get_nix_hw(rvu->hw, blkaddr);
2479        struct nix_txvlan *vlan;
2480
2481        if (!nix_hw)
2482                return NIX_AF_ERR_INVALID_NIXBLK;
2483
2484        vlan = &nix_hw->txvlan;
2485        if (vlan->entry2pfvf_map[index] != pcifunc)
2486                return NIX_AF_ERR_PARAM;
2487
2488        rvu_write64(rvu, blkaddr,
2489                    NIX_AF_TX_VTAG_DEFX_DATA(index), 0x0ull);
2490        rvu_write64(rvu, blkaddr,
2491                    NIX_AF_TX_VTAG_DEFX_CTL(index), 0x0ull);
2492
2493        vlan->entry2pfvf_map[index] = 0;
2494        rvu_free_rsrc(&vlan->rsrc, index);
2495
2496        return 0;
2497}
2498
2499static void nix_free_tx_vtag_entries(struct rvu *rvu, u16 pcifunc)
2500{
2501        struct nix_txvlan *vlan;
2502        struct nix_hw *nix_hw;
2503        int index, blkaddr;
2504
2505        blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
2506        if (blkaddr < 0)
2507                return;
2508
2509        nix_hw = get_nix_hw(rvu->hw, blkaddr);
2510        if (!nix_hw)
2511                return;
2512
2513        vlan = &nix_hw->txvlan;
2514
2515        mutex_lock(&vlan->rsrc_lock);
2516        /* Scan all the entries and free the ones mapped to 'pcifunc' */
2517        for (index = 0; index < vlan->rsrc.max; index++) {
2518                if (vlan->entry2pfvf_map[index] == pcifunc)
2519                        nix_tx_vtag_free(rvu, blkaddr, pcifunc, index);
2520        }
2521        mutex_unlock(&vlan->rsrc_lock);
2522}
2523
2524static int nix_tx_vtag_alloc(struct rvu *rvu, int blkaddr,
2525                             u64 vtag, u8 size)
2526{
2527        struct nix_hw *nix_hw = get_nix_hw(rvu->hw, blkaddr);
2528        struct nix_txvlan *vlan;
2529        u64 regval;
2530        int index;
2531
2532        if (!nix_hw)
2533                return NIX_AF_ERR_INVALID_NIXBLK;
2534
2535        vlan = &nix_hw->txvlan;
2536
2537        mutex_lock(&vlan->rsrc_lock);
2538
2539        index = rvu_alloc_rsrc(&vlan->rsrc);
2540        if (index < 0) {
2541                mutex_unlock(&vlan->rsrc_lock);
2542                return index;
2543        }
2544
2545        mutex_unlock(&vlan->rsrc_lock);
2546
2547        regval = size ? vtag : vtag << 32;
2548
2549        rvu_write64(rvu, blkaddr,
2550                    NIX_AF_TX_VTAG_DEFX_DATA(index), regval);
2551        rvu_write64(rvu, blkaddr,
2552                    NIX_AF_TX_VTAG_DEFX_CTL(index), size);
2553
2554        return index;
2555}
2556
2557static int nix_tx_vtag_decfg(struct rvu *rvu, int blkaddr,
2558                             struct nix_vtag_config *req)
2559{
2560        struct nix_hw *nix_hw = get_nix_hw(rvu->hw, blkaddr);
2561        u16 pcifunc = req->hdr.pcifunc;
2562        int idx0 = req->tx.vtag0_idx;
2563        int idx1 = req->tx.vtag1_idx;
2564        struct nix_txvlan *vlan;
2565        int err = 0;
2566
2567        if (!nix_hw)
2568                return NIX_AF_ERR_INVALID_NIXBLK;
2569
2570        vlan = &nix_hw->txvlan;
2571        if (req->tx.free_vtag0 && req->tx.free_vtag1)
2572                if (vlan->entry2pfvf_map[idx0] != pcifunc ||
2573                    vlan->entry2pfvf_map[idx1] != pcifunc)
2574                        return NIX_AF_ERR_PARAM;
2575
2576        mutex_lock(&vlan->rsrc_lock);
2577
2578        if (req->tx.free_vtag0) {
2579                err = nix_tx_vtag_free(rvu, blkaddr, pcifunc, idx0);
2580                if (err)
2581                        goto exit;
2582        }
2583
2584        if (req->tx.free_vtag1)
2585                err = nix_tx_vtag_free(rvu, blkaddr, pcifunc, idx1);
2586
2587exit:
2588        mutex_unlock(&vlan->rsrc_lock);
2589        return err;
2590}
2591
2592static int nix_tx_vtag_cfg(struct rvu *rvu, int blkaddr,
2593                           struct nix_vtag_config *req,
2594                           struct nix_vtag_config_rsp *rsp)
2595{
2596        struct nix_hw *nix_hw = get_nix_hw(rvu->hw, blkaddr);
2597        struct nix_txvlan *vlan;
2598        u16 pcifunc = req->hdr.pcifunc;
2599
2600        if (!nix_hw)
2601                return NIX_AF_ERR_INVALID_NIXBLK;
2602
2603        vlan = &nix_hw->txvlan;
2604        if (req->tx.cfg_vtag0) {
2605                rsp->vtag0_idx =
2606                        nix_tx_vtag_alloc(rvu, blkaddr,
2607                                          req->tx.vtag0, req->vtag_size);
2608
2609                if (rsp->vtag0_idx < 0)
2610                        return NIX_AF_ERR_TX_VTAG_NOSPC;
2611
2612                vlan->entry2pfvf_map[rsp->vtag0_idx] = pcifunc;
2613        }
2614
2615        if (req->tx.cfg_vtag1) {
2616                rsp->vtag1_idx =
2617                        nix_tx_vtag_alloc(rvu, blkaddr,
2618                                          req->tx.vtag1, req->vtag_size);
2619
2620                if (rsp->vtag1_idx < 0)
2621                        goto err_free;
2622
2623                vlan->entry2pfvf_map[rsp->vtag1_idx] = pcifunc;
2624        }
2625
2626        return 0;
2627
2628err_free:
2629        if (req->tx.cfg_vtag0)
2630                nix_tx_vtag_free(rvu, blkaddr, pcifunc, rsp->vtag0_idx);
2631
2632        return NIX_AF_ERR_TX_VTAG_NOSPC;
2633}
2634
2635int rvu_mbox_handler_nix_vtag_cfg(struct rvu *rvu,
2636                                  struct nix_vtag_config *req,
2637                                  struct nix_vtag_config_rsp *rsp)
2638{
2639        u16 pcifunc = req->hdr.pcifunc;
2640        int blkaddr, nixlf, err;
2641
2642        err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr);
2643        if (err)
2644                return err;
2645
2646        if (req->cfg_type) {
2647                /* rx vtag configuration */
2648                err = nix_rx_vtag_cfg(rvu, nixlf, blkaddr, req);
2649                if (err)
2650                        return NIX_AF_ERR_PARAM;
2651        } else {
2652                /* tx vtag configuration */
2653                if ((req->tx.cfg_vtag0 || req->tx.cfg_vtag1) &&
2654                    (req->tx.free_vtag0 || req->tx.free_vtag1))
2655                        return NIX_AF_ERR_PARAM;
2656
2657                if (req->tx.cfg_vtag0 || req->tx.cfg_vtag1)
2658                        return nix_tx_vtag_cfg(rvu, blkaddr, req, rsp);
2659
2660                if (req->tx.free_vtag0 || req->tx.free_vtag1)
2661                        return nix_tx_vtag_decfg(rvu, blkaddr, req);
2662        }
2663
2664        return 0;
2665}
2666
2667static int nix_blk_setup_mce(struct rvu *rvu, struct nix_hw *nix_hw,
2668                             int mce, u8 op, u16 pcifunc, int next, bool eol)
2669{
2670        struct nix_aq_enq_req aq_req;
2671        int err;
2672
2673        aq_req.hdr.pcifunc = 0;
2674        aq_req.ctype = NIX_AQ_CTYPE_MCE;
2675        aq_req.op = op;
2676        aq_req.qidx = mce;
2677
2678        /* Use RSS with RSS index 0 */
2679        aq_req.mce.op = 1;
2680        aq_req.mce.index = 0;
2681        aq_req.mce.eol = eol;
2682        aq_req.mce.pf_func = pcifunc;
2683        aq_req.mce.next = next;
2684
2685        /* All fields valid */
2686        *(u64 *)(&aq_req.mce_mask) = ~0ULL;
2687
2688        err = rvu_nix_blk_aq_enq_inst(rvu, nix_hw, &aq_req, NULL);
2689        if (err) {
2690                dev_err(rvu->dev, "Failed to setup Bcast MCE for PF%d:VF%d\n",
2691                        rvu_get_pf(pcifunc), pcifunc & RVU_PFVF_FUNC_MASK);
2692                return err;
2693        }
2694        return 0;
2695}
2696
2697static int nix_update_mce_list_entry(struct nix_mce_list *mce_list,
2698                                     u16 pcifunc, bool add)
2699{
2700        struct mce *mce, *tail = NULL;
2701        bool delete = false;
2702
2703        /* Scan through the current list */
2704        hlist_for_each_entry(mce, &mce_list->head, node) {
2705                /* If already exists, then delete */
2706                if (mce->pcifunc == pcifunc && !add) {
2707                        delete = true;
2708                        break;
2709                } else if (mce->pcifunc == pcifunc && add) {
2710                        /* entry already exists */
2711                        return 0;
2712                }
2713                tail = mce;
2714        }
2715
2716        if (delete) {
2717                hlist_del(&mce->node);
2718                kfree(mce);
2719                mce_list->count--;
2720                return 0;
2721        }
2722
2723        if (!add)
2724                return 0;
2725
2726        /* Add a new one to the list, at the tail */
2727        mce = kzalloc(sizeof(*mce), GFP_KERNEL);
2728        if (!mce)
2729                return -ENOMEM;
2730        mce->pcifunc = pcifunc;
2731        if (!tail)
2732                hlist_add_head(&mce->node, &mce_list->head);
2733        else
2734                hlist_add_behind(&mce->node, &tail->node);
2735        mce_list->count++;
2736        return 0;
2737}
2738
2739int nix_update_mce_list(struct rvu *rvu, u16 pcifunc,
2740                        struct nix_mce_list *mce_list,
2741                        int mce_idx, int mcam_index, bool add)
2742{
2743        int err = 0, idx, next_idx, last_idx, blkaddr, npc_blkaddr;
2744        struct npc_mcam *mcam = &rvu->hw->mcam;
2745        struct nix_mcast *mcast;
2746        struct nix_hw *nix_hw;
2747        struct mce *mce;
2748
2749        if (!mce_list)
2750                return -EINVAL;
2751
2752        /* Get this PF/VF func's MCE index */
2753        idx = mce_idx + (pcifunc & RVU_PFVF_FUNC_MASK);
2754
2755        if (idx > (mce_idx + mce_list->max)) {
2756                dev_err(rvu->dev,
2757                        "%s: Idx %d > max MCE idx %d, for PF%d bcast list\n",
2758                        __func__, idx, mce_list->max,
2759                        pcifunc >> RVU_PFVF_PF_SHIFT);
2760                return -EINVAL;
2761        }
2762
2763        err = nix_get_struct_ptrs(rvu, pcifunc, &nix_hw, &blkaddr);
2764        if (err)
2765                return err;
2766
2767        mcast = &nix_hw->mcast;
2768        mutex_lock(&mcast->mce_lock);
2769
2770        err = nix_update_mce_list_entry(mce_list, pcifunc, add);
2771        if (err)
2772                goto end;
2773
2774        /* Disable MCAM entry in NPC */
2775        if (!mce_list->count) {
2776                npc_blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
2777                npc_enable_mcam_entry(rvu, mcam, npc_blkaddr, mcam_index, false);
2778                goto end;
2779        }
2780
2781        /* Dump the updated list to HW */
2782        idx = mce_idx;
2783        last_idx = idx + mce_list->count - 1;
2784        hlist_for_each_entry(mce, &mce_list->head, node) {
2785                if (idx > last_idx)
2786                        break;
2787
2788                next_idx = idx + 1;
2789                /* EOL should be set in last MCE */
2790                err = nix_blk_setup_mce(rvu, nix_hw, idx, NIX_AQ_INSTOP_WRITE,
2791                                        mce->pcifunc, next_idx,
2792                                        (next_idx > last_idx) ? true : false);
2793                if (err)
2794                        goto end;
2795                idx++;
2796        }
2797
2798end:
2799        mutex_unlock(&mcast->mce_lock);
2800        return err;
2801}
2802
2803void nix_get_mce_list(struct rvu *rvu, u16 pcifunc, int type,
2804                      struct nix_mce_list **mce_list, int *mce_idx)
2805{
2806        struct rvu_hwinfo *hw = rvu->hw;
2807        struct rvu_pfvf *pfvf;
2808
2809        if (!hw->cap.nix_rx_multicast ||
2810            !is_pf_cgxmapped(rvu, rvu_get_pf(pcifunc & ~RVU_PFVF_FUNC_MASK))) {
2811                *mce_list = NULL;
2812                *mce_idx = 0;
2813                return;
2814        }
2815
2816        /* Get this PF/VF func's MCE index */
2817        pfvf = rvu_get_pfvf(rvu, pcifunc & ~RVU_PFVF_FUNC_MASK);
2818
2819        if (type == NIXLF_BCAST_ENTRY) {
2820                *mce_list = &pfvf->bcast_mce_list;
2821                *mce_idx = pfvf->bcast_mce_idx;
2822        } else if (type == NIXLF_ALLMULTI_ENTRY) {
2823                *mce_list = &pfvf->mcast_mce_list;
2824                *mce_idx = pfvf->mcast_mce_idx;
2825        } else if (type == NIXLF_PROMISC_ENTRY) {
2826                *mce_list = &pfvf->promisc_mce_list;
2827                *mce_idx = pfvf->promisc_mce_idx;
2828        }  else {
2829                *mce_list = NULL;
2830                *mce_idx = 0;
2831        }
2832}
2833
2834static int nix_update_mce_rule(struct rvu *rvu, u16 pcifunc,
2835                               int type, bool add)
2836{
2837        int err = 0, nixlf, blkaddr, mcam_index, mce_idx;
2838        struct npc_mcam *mcam = &rvu->hw->mcam;
2839        struct rvu_hwinfo *hw = rvu->hw;
2840        struct nix_mce_list *mce_list;
2841        int pf;
2842
2843        /* skip multicast pkt replication for AF's VFs & SDP links */
2844        if (is_afvf(pcifunc) || is_sdp_pfvf(pcifunc))
2845                return 0;
2846
2847        if (!hw->cap.nix_rx_multicast)
2848                return 0;
2849
2850        pf = rvu_get_pf(pcifunc);
2851        if (!is_pf_cgxmapped(rvu, pf))
2852                return 0;
2853
2854        blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
2855        if (blkaddr < 0)
2856                return -EINVAL;
2857
2858        nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
2859        if (nixlf < 0)
2860                return -EINVAL;
2861
2862        nix_get_mce_list(rvu, pcifunc, type, &mce_list, &mce_idx);
2863
2864        mcam_index = npc_get_nixlf_mcam_index(mcam,
2865                                              pcifunc & ~RVU_PFVF_FUNC_MASK,
2866                                              nixlf, type);
2867        err = nix_update_mce_list(rvu, pcifunc, mce_list,
2868                                  mce_idx, mcam_index, add);
2869        return err;
2870}
2871
2872static int nix_setup_mce_tables(struct rvu *rvu, struct nix_hw *nix_hw)
2873{
2874        struct nix_mcast *mcast = &nix_hw->mcast;
2875        int err, pf, numvfs, idx;
2876        struct rvu_pfvf *pfvf;
2877        u16 pcifunc;
2878        u64 cfg;
2879
2880        /* Skip PF0 (i.e AF) */
2881        for (pf = 1; pf < (rvu->cgx_mapped_pfs + 1); pf++) {
2882                cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf));
2883                /* If PF is not enabled, nothing to do */
2884                if (!((cfg >> 20) & 0x01))
2885                        continue;
2886                /* Get numVFs attached to this PF */
2887                numvfs = (cfg >> 12) & 0xFF;
2888
2889                pfvf = &rvu->pf[pf];
2890
2891                /* This NIX0/1 block mapped to PF ? */
2892                if (pfvf->nix_blkaddr != nix_hw->blkaddr)
2893                        continue;
2894
2895                /* save start idx of broadcast mce list */
2896                pfvf->bcast_mce_idx = nix_alloc_mce_list(mcast, numvfs + 1);
2897                nix_mce_list_init(&pfvf->bcast_mce_list, numvfs + 1);
2898
2899                /* save start idx of multicast mce list */
2900                pfvf->mcast_mce_idx = nix_alloc_mce_list(mcast, numvfs + 1);
2901                nix_mce_list_init(&pfvf->mcast_mce_list, numvfs + 1);
2902
2903                /* save the start idx of promisc mce list */
2904                pfvf->promisc_mce_idx = nix_alloc_mce_list(mcast, numvfs + 1);
2905                nix_mce_list_init(&pfvf->promisc_mce_list, numvfs + 1);
2906
2907                for (idx = 0; idx < (numvfs + 1); idx++) {
2908                        /* idx-0 is for PF, followed by VFs */
2909                        pcifunc = (pf << RVU_PFVF_PF_SHIFT);
2910                        pcifunc |= idx;
2911                        /* Add dummy entries now, so that we don't have to check
2912                         * for whether AQ_OP should be INIT/WRITE later on.
2913                         * Will be updated when a NIXLF is attached/detached to
2914                         * these PF/VFs.
2915                         */
2916                        err = nix_blk_setup_mce(rvu, nix_hw,
2917                                                pfvf->bcast_mce_idx + idx,
2918                                                NIX_AQ_INSTOP_INIT,
2919                                                pcifunc, 0, true);
2920                        if (err)
2921                                return err;
2922
2923                        /* add dummy entries to multicast mce list */
2924                        err = nix_blk_setup_mce(rvu, nix_hw,
2925                                                pfvf->mcast_mce_idx + idx,
2926                                                NIX_AQ_INSTOP_INIT,
2927                                                pcifunc, 0, true);
2928                        if (err)
2929                                return err;
2930
2931                        /* add dummy entries to promisc mce list */
2932                        err = nix_blk_setup_mce(rvu, nix_hw,
2933                                                pfvf->promisc_mce_idx + idx,
2934                                                NIX_AQ_INSTOP_INIT,
2935                                                pcifunc, 0, true);
2936                        if (err)
2937                                return err;
2938                }
2939        }
2940        return 0;
2941}
2942
2943static int nix_setup_mcast(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr)
2944{
2945        struct nix_mcast *mcast = &nix_hw->mcast;
2946        struct rvu_hwinfo *hw = rvu->hw;
2947        int err, size;
2948
2949        size = (rvu_read64(rvu, blkaddr, NIX_AF_CONST3) >> 16) & 0x0F;
2950        size = (1ULL << size);
2951
2952        /* Alloc memory for multicast/mirror replication entries */
2953        err = qmem_alloc(rvu->dev, &mcast->mce_ctx,
2954                         (256UL << MC_TBL_SIZE), size);
2955        if (err)
2956                return -ENOMEM;
2957
2958        rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_BASE,
2959                    (u64)mcast->mce_ctx->iova);
2960
2961        /* Set max list length equal to max no of VFs per PF  + PF itself */
2962        rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_CFG,
2963                    BIT_ULL(36) | (hw->max_vfs_per_pf << 4) | MC_TBL_SIZE);
2964
2965        /* Alloc memory for multicast replication buffers */
2966        size = rvu_read64(rvu, blkaddr, NIX_AF_MC_MIRROR_CONST) & 0xFFFF;
2967        err = qmem_alloc(rvu->dev, &mcast->mcast_buf,
2968                         (8UL << MC_BUF_CNT), size);
2969        if (err)
2970                return -ENOMEM;
2971
2972        rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_BUF_BASE,
2973                    (u64)mcast->mcast_buf->iova);
2974
2975        /* Alloc pkind for NIX internal RX multicast/mirror replay */
2976        mcast->replay_pkind = rvu_alloc_rsrc(&hw->pkind.rsrc);
2977
2978        rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_BUF_CFG,
2979                    BIT_ULL(63) | (mcast->replay_pkind << 24) |
2980                    BIT_ULL(20) | MC_BUF_CNT);
2981
2982        mutex_init(&mcast->mce_lock);
2983
2984        return nix_setup_mce_tables(rvu, nix_hw);
2985}
2986
2987static int nix_setup_txvlan(struct rvu *rvu, struct nix_hw *nix_hw)
2988{
2989        struct nix_txvlan *vlan = &nix_hw->txvlan;
2990        int err;
2991
2992        /* Allocate resource bimap for tx vtag def registers*/
2993        vlan->rsrc.max = NIX_TX_VTAG_DEF_MAX;
2994        err = rvu_alloc_bitmap(&vlan->rsrc);
2995        if (err)
2996                return -ENOMEM;
2997
2998        /* Alloc memory for saving entry to RVU PFFUNC allocation mapping */
2999        vlan->entry2pfvf_map = devm_kcalloc(rvu->dev, vlan->rsrc.max,
3000                                            sizeof(u16), GFP_KERNEL);
3001        if (!vlan->entry2pfvf_map)
3002                goto free_mem;
3003
3004        mutex_init(&vlan->rsrc_lock);
3005        return 0;
3006
3007free_mem:
3008        kfree(vlan->rsrc.bmap);
3009        return -ENOMEM;
3010}
3011
3012static int nix_setup_txschq(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr)
3013{
3014        struct nix_txsch *txsch;
3015        int err, lvl, schq;
3016        u64 cfg, reg;
3017
3018        /* Get scheduler queue count of each type and alloc
3019         * bitmap for each for alloc/free/attach operations.
3020         */
3021        for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
3022                txsch = &nix_hw->txsch[lvl];
3023                txsch->lvl = lvl;
3024                switch (lvl) {
3025                case NIX_TXSCH_LVL_SMQ:
3026                        reg = NIX_AF_MDQ_CONST;
3027                        break;
3028                case NIX_TXSCH_LVL_TL4:
3029                        reg = NIX_AF_TL4_CONST;
3030                        break;
3031                case NIX_TXSCH_LVL_TL3:
3032                        reg = NIX_AF_TL3_CONST;
3033                        break;
3034                case NIX_TXSCH_LVL_TL2:
3035                        reg = NIX_AF_TL2_CONST;
3036                        break;
3037                case NIX_TXSCH_LVL_TL1:
3038                        reg = NIX_AF_TL1_CONST;
3039                        break;
3040                }
3041                cfg = rvu_read64(rvu, blkaddr, reg);
3042                txsch->schq.max = cfg & 0xFFFF;
3043                err = rvu_alloc_bitmap(&txsch->schq);
3044                if (err)
3045                        return err;
3046
3047                /* Allocate memory for scheduler queues to
3048                 * PF/VF pcifunc mapping info.
3049                 */
3050                txsch->pfvf_map = devm_kcalloc(rvu->dev, txsch->schq.max,
3051                                               sizeof(u32), GFP_KERNEL);
3052                if (!txsch->pfvf_map)
3053                        return -ENOMEM;
3054                for (schq = 0; schq < txsch->schq.max; schq++)
3055                        txsch->pfvf_map[schq] = TXSCH_MAP(0, NIX_TXSCHQ_FREE);
3056        }
3057
3058        /* Setup a default value of 8192 as DWRR MTU */
3059        if (rvu->hw->cap.nix_common_dwrr_mtu) {
3060                rvu_write64(rvu, blkaddr, NIX_AF_DWRR_RPM_MTU,
3061                            convert_bytes_to_dwrr_mtu(8192));
3062                rvu_write64(rvu, blkaddr, NIX_AF_DWRR_SDP_MTU,
3063                            convert_bytes_to_dwrr_mtu(8192));
3064        }
3065
3066        return 0;
3067}
3068
3069int rvu_nix_reserve_mark_format(struct rvu *rvu, struct nix_hw *nix_hw,
3070                                int blkaddr, u32 cfg)
3071{
3072        int fmt_idx;
3073
3074        for (fmt_idx = 0; fmt_idx < nix_hw->mark_format.in_use; fmt_idx++) {
3075                if (nix_hw->mark_format.cfg[fmt_idx] == cfg)
3076                        return fmt_idx;
3077        }
3078        if (fmt_idx >= nix_hw->mark_format.total)
3079                return -ERANGE;
3080
3081        rvu_write64(rvu, blkaddr, NIX_AF_MARK_FORMATX_CTL(fmt_idx), cfg);
3082        nix_hw->mark_format.cfg[fmt_idx] = cfg;
3083        nix_hw->mark_format.in_use++;
3084        return fmt_idx;
3085}
3086
3087static int nix_af_mark_format_setup(struct rvu *rvu, struct nix_hw *nix_hw,
3088                                    int blkaddr)
3089{
3090        u64 cfgs[] = {
3091                [NIX_MARK_CFG_IP_DSCP_RED]         = 0x10003,
3092                [NIX_MARK_CFG_IP_DSCP_YELLOW]      = 0x11200,
3093                [NIX_MARK_CFG_IP_DSCP_YELLOW_RED]  = 0x11203,
3094                [NIX_MARK_CFG_IP_ECN_RED]          = 0x6000c,
3095                [NIX_MARK_CFG_IP_ECN_YELLOW]       = 0x60c00,
3096                [NIX_MARK_CFG_IP_ECN_YELLOW_RED]   = 0x60c0c,
3097                [NIX_MARK_CFG_VLAN_DEI_RED]        = 0x30008,
3098                [NIX_MARK_CFG_VLAN_DEI_YELLOW]     = 0x30800,
3099                [NIX_MARK_CFG_VLAN_DEI_YELLOW_RED] = 0x30808,
3100        };
3101        int i, rc;
3102        u64 total;
3103
3104        total = (rvu_read64(rvu, blkaddr, NIX_AF_PSE_CONST) & 0xFF00) >> 8;
3105        nix_hw->mark_format.total = (u8)total;
3106        nix_hw->mark_format.cfg = devm_kcalloc(rvu->dev, total, sizeof(u32),
3107                                               GFP_KERNEL);
3108        if (!nix_hw->mark_format.cfg)
3109                return -ENOMEM;
3110        for (i = 0; i < NIX_MARK_CFG_MAX; i++) {
3111                rc = rvu_nix_reserve_mark_format(rvu, nix_hw, blkaddr, cfgs[i]);
3112                if (rc < 0)
3113                        dev_err(rvu->dev, "Err %d in setup mark format %d\n",
3114                                i, rc);
3115        }
3116
3117        return 0;
3118}
3119
3120static void rvu_get_lbk_link_max_frs(struct rvu *rvu,  u16 *max_mtu)
3121{
3122        /* CN10K supports LBK FIFO size 72 KB */
3123        if (rvu->hw->lbk_bufsize == 0x12000)
3124                *max_mtu = CN10K_LBK_LINK_MAX_FRS;
3125        else
3126                *max_mtu = NIC_HW_MAX_FRS;
3127}
3128
3129static void rvu_get_lmac_link_max_frs(struct rvu *rvu, u16 *max_mtu)
3130{
3131        /* RPM supports FIFO len 128 KB */
3132        if (rvu_cgx_get_fifolen(rvu) == 0x20000)
3133                *max_mtu = CN10K_LMAC_LINK_MAX_FRS;
3134        else
3135                *max_mtu = NIC_HW_MAX_FRS;
3136}
3137
3138int rvu_mbox_handler_nix_get_hw_info(struct rvu *rvu, struct msg_req *req,
3139                                     struct nix_hw_info *rsp)
3140{
3141        u16 pcifunc = req->hdr.pcifunc;
3142        u64 dwrr_mtu;
3143        int blkaddr;
3144
3145        blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
3146        if (blkaddr < 0)
3147                return NIX_AF_ERR_AF_LF_INVALID;
3148
3149        if (is_afvf(pcifunc))
3150                rvu_get_lbk_link_max_frs(rvu, &rsp->max_mtu);
3151        else
3152                rvu_get_lmac_link_max_frs(rvu, &rsp->max_mtu);
3153
3154        rsp->min_mtu = NIC_HW_MIN_FRS;
3155
3156        if (!rvu->hw->cap.nix_common_dwrr_mtu) {
3157                /* Return '1' on OTx2 */
3158                rsp->rpm_dwrr_mtu = 1;
3159                rsp->sdp_dwrr_mtu = 1;
3160                return 0;
3161        }
3162
3163        dwrr_mtu = rvu_read64(rvu, BLKADDR_NIX0, NIX_AF_DWRR_RPM_MTU);
3164        rsp->rpm_dwrr_mtu = convert_dwrr_mtu_to_bytes(dwrr_mtu);
3165
3166        dwrr_mtu = rvu_read64(rvu, BLKADDR_NIX0, NIX_AF_DWRR_SDP_MTU);
3167        rsp->sdp_dwrr_mtu = convert_dwrr_mtu_to_bytes(dwrr_mtu);
3168
3169        return 0;
3170}
3171
3172int rvu_mbox_handler_nix_stats_rst(struct rvu *rvu, struct msg_req *req,
3173                                   struct msg_rsp *rsp)
3174{
3175        u16 pcifunc = req->hdr.pcifunc;
3176        int i, nixlf, blkaddr, err;
3177        u64 stats;
3178
3179        err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr);
3180        if (err)
3181                return err;
3182
3183        /* Get stats count supported by HW */
3184        stats = rvu_read64(rvu, blkaddr, NIX_AF_CONST1);
3185
3186        /* Reset tx stats */
3187        for (i = 0; i < ((stats >> 24) & 0xFF); i++)
3188                rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_STATX(nixlf, i), 0);
3189
3190        /* Reset rx stats */
3191        for (i = 0; i < ((stats >> 32) & 0xFF); i++)
3192                rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_STATX(nixlf, i), 0);
3193
3194        return 0;
3195}
3196
3197/* Returns the ALG index to be set into NPC_RX_ACTION */
3198static int get_flowkey_alg_idx(struct nix_hw *nix_hw, u32 flow_cfg)
3199{
3200        int i;
3201
3202        /* Scan over exiting algo entries to find a match */
3203        for (i = 0; i < nix_hw->flowkey.in_use; i++)
3204                if (nix_hw->flowkey.flowkey[i] == flow_cfg)
3205                        return i;
3206
3207        return -ERANGE;
3208}
3209
3210static int set_flowkey_fields(struct nix_rx_flowkey_alg *alg, u32 flow_cfg)
3211{
3212        int idx, nr_field, key_off, field_marker, keyoff_marker;
3213        int max_key_off, max_bit_pos, group_member;
3214        struct nix_rx_flowkey_alg *field;
3215        struct nix_rx_flowkey_alg tmp;
3216        u32 key_type, valid_key;
3217        int l4_key_offset = 0;
3218
3219        if (!alg)
3220                return -EINVAL;
3221
3222#define FIELDS_PER_ALG  5
3223#define MAX_KEY_OFF     40
3224        /* Clear all fields */
3225        memset(alg, 0, sizeof(uint64_t) * FIELDS_PER_ALG);
3226
3227        /* Each of the 32 possible flow key algorithm definitions should
3228         * fall into above incremental config (except ALG0). Otherwise a
3229         * single NPC MCAM entry is not sufficient for supporting RSS.
3230         *
3231         * If a different definition or combination needed then NPC MCAM
3232         * has to be programmed to filter such pkts and it's action should
3233         * point to this definition to calculate flowtag or hash.
3234         *
3235         * The `for loop` goes over _all_ protocol field and the following
3236         * variables depicts the state machine forward progress logic.
3237         *
3238         * keyoff_marker - Enabled when hash byte length needs to be accounted
3239         * in field->key_offset update.
3240         * field_marker - Enabled when a new field needs to be selected.
3241         * group_member - Enabled when protocol is part of a group.
3242         */
3243
3244        keyoff_marker = 0; max_key_off = 0; group_member = 0;
3245        nr_field = 0; key_off = 0; field_marker = 1;
3246        field = &tmp; max_bit_pos = fls(flow_cfg);
3247        for (idx = 0;
3248             idx < max_bit_pos && nr_field < FIELDS_PER_ALG &&
3249             key_off < MAX_KEY_OFF; idx++) {
3250                key_type = BIT(idx);
3251                valid_key = flow_cfg & key_type;
3252                /* Found a field marker, reset the field values */
3253                if (field_marker)
3254                        memset(&tmp, 0, sizeof(tmp));
3255
3256                field_marker = true;
3257                keyoff_marker = true;
3258                switch (key_type) {
3259                case NIX_FLOW_KEY_TYPE_PORT:
3260                        field->sel_chan = true;
3261                        /* This should be set to 1, when SEL_CHAN is set */
3262                        field->bytesm1 = 1;
3263                        break;
3264                case NIX_FLOW_KEY_TYPE_IPV4_PROTO:
3265                        field->lid = NPC_LID_LC;
3266                        field->hdr_offset = 9; /* offset */
3267                        field->bytesm1 = 0; /* 1 byte */
3268                        field->ltype_match = NPC_LT_LC_IP;
3269                        field->ltype_mask = 0xF;
3270                        break;
3271                case NIX_FLOW_KEY_TYPE_IPV4:
3272                case NIX_FLOW_KEY_TYPE_INNR_IPV4:
3273                        field->lid = NPC_LID_LC;
3274                        field->ltype_match = NPC_LT_LC_IP;
3275                        if (key_type == NIX_FLOW_KEY_TYPE_INNR_IPV4) {
3276                                field->lid = NPC_LID_LG;
3277                                field->ltype_match = NPC_LT_LG_TU_IP;
3278                        }
3279                        field->hdr_offset = 12; /* SIP offset */
3280                        field->bytesm1 = 7; /* SIP + DIP, 8 bytes */
3281                        field->ltype_mask = 0xF; /* Match only IPv4 */
3282                        keyoff_marker = false;
3283                        break;
3284                case NIX_FLOW_KEY_TYPE_IPV6:
3285                case NIX_FLOW_KEY_TYPE_INNR_IPV6:
3286                        field->lid = NPC_LID_LC;
3287                        field->ltype_match = NPC_LT_LC_IP6;
3288                        if (key_type == NIX_FLOW_KEY_TYPE_INNR_IPV6) {
3289                                field->lid = NPC_LID_LG;
3290                                field->ltype_match = NPC_LT_LG_TU_IP6;
3291                        }
3292                        field->hdr_offset = 8; /* SIP offset */
3293                        field->bytesm1 = 31; /* SIP + DIP, 32 bytes */
3294                        field->ltype_mask = 0xF; /* Match only IPv6 */
3295                        break;
3296                case NIX_FLOW_KEY_TYPE_TCP:
3297                case NIX_FLOW_KEY_TYPE_UDP:
3298                case NIX_FLOW_KEY_TYPE_SCTP:
3299                case NIX_FLOW_KEY_TYPE_INNR_TCP:
3300                case NIX_FLOW_KEY_TYPE_INNR_UDP:
3301                case NIX_FLOW_KEY_TYPE_INNR_SCTP:
3302                        field->lid = NPC_LID_LD;
3303                        if (key_type == NIX_FLOW_KEY_TYPE_INNR_TCP ||
3304                            key_type == NIX_FLOW_KEY_TYPE_INNR_UDP ||
3305                            key_type == NIX_FLOW_KEY_TYPE_INNR_SCTP)
3306                                field->lid = NPC_LID_LH;
3307                        field->bytesm1 = 3; /* Sport + Dport, 4 bytes */
3308
3309                        /* Enum values for NPC_LID_LD and NPC_LID_LG are same,
3310                         * so no need to change the ltype_match, just change
3311                         * the lid for inner protocols
3312                         */
3313                        BUILD_BUG_ON((int)NPC_LT_LD_TCP !=
3314                                     (int)NPC_LT_LH_TU_TCP);
3315                        BUILD_BUG_ON((int)NPC_LT_LD_UDP !=
3316                                     (int)NPC_LT_LH_TU_UDP);
3317                        BUILD_BUG_ON((int)NPC_LT_LD_SCTP !=
3318                                     (int)NPC_LT_LH_TU_SCTP);
3319
3320                        if ((key_type == NIX_FLOW_KEY_TYPE_TCP ||
3321                             key_type == NIX_FLOW_KEY_TYPE_INNR_TCP) &&
3322                            valid_key) {
3323                                field->ltype_match |= NPC_LT_LD_TCP;
3324                                group_member = true;
3325                        } else if ((key_type == NIX_FLOW_KEY_TYPE_UDP ||
3326                                    key_type == NIX_FLOW_KEY_TYPE_INNR_UDP) &&
3327                                   valid_key) {
3328                                field->ltype_match |= NPC_LT_LD_UDP;
3329                                group_member = true;
3330                        } else if ((key_type == NIX_FLOW_KEY_TYPE_SCTP ||
3331                                    key_type == NIX_FLOW_KEY_TYPE_INNR_SCTP) &&
3332                                   valid_key) {
3333                                field->ltype_match |= NPC_LT_LD_SCTP;
3334                                group_member = true;
3335                        }
3336                        field->ltype_mask = ~field->ltype_match;
3337                        if (key_type == NIX_FLOW_KEY_TYPE_SCTP ||
3338                            key_type == NIX_FLOW_KEY_TYPE_INNR_SCTP) {
3339                                /* Handle the case where any of the group item
3340                                 * is enabled in the group but not the final one
3341                                 */
3342                                if (group_member) {
3343                                        valid_key = true;
3344                                        group_member = false;
3345                                }
3346                        } else {
3347                                field_marker = false;
3348                                keyoff_marker = false;
3349                        }
3350
3351                        /* TCP/UDP/SCTP and ESP/AH falls at same offset so
3352                         * remember the TCP key offset of 40 byte hash key.
3353                         */
3354                        if (key_type == NIX_FLOW_KEY_TYPE_TCP)
3355                                l4_key_offset = key_off;
3356                        break;
3357                case NIX_FLOW_KEY_TYPE_NVGRE:
3358                        field->lid = NPC_LID_LD;
3359                        field->hdr_offset = 4; /* VSID offset */
3360                        field->bytesm1 = 2;
3361                        field->ltype_match = NPC_LT_LD_NVGRE;
3362                        field->ltype_mask = 0xF;
3363                        break;
3364                case NIX_FLOW_KEY_TYPE_VXLAN:
3365                case NIX_FLOW_KEY_TYPE_GENEVE:
3366                        field->lid = NPC_LID_LE;
3367                        field->bytesm1 = 2;
3368                        field->hdr_offset = 4;
3369                        field->ltype_mask = 0xF;
3370                        field_marker = false;
3371                        keyoff_marker = false;
3372
3373                        if (key_type == NIX_FLOW_KEY_TYPE_VXLAN && valid_key) {
3374                                field->ltype_match |= NPC_LT_LE_VXLAN;
3375                                group_member = true;
3376                        }
3377
3378                        if (key_type == NIX_FLOW_KEY_TYPE_GENEVE && valid_key) {
3379                                field->ltype_match |= NPC_LT_LE_GENEVE;
3380                                group_member = true;
3381                        }
3382
3383                        if (key_type == NIX_FLOW_KEY_TYPE_GENEVE) {
3384                                if (group_member) {
3385                                        field->ltype_mask = ~field->ltype_match;
3386                                        field_marker = true;
3387                                        keyoff_marker = true;
3388                                        valid_key = true;
3389                                        group_member = false;
3390                                }
3391                        }
3392                        break;
3393                case NIX_FLOW_KEY_TYPE_ETH_DMAC:
3394                case NIX_FLOW_KEY_TYPE_INNR_ETH_DMAC:
3395                        field->lid = NPC_LID_LA;
3396                        field->ltype_match = NPC_LT_LA_ETHER;
3397                        if (key_type == NIX_FLOW_KEY_TYPE_INNR_ETH_DMAC) {
3398                                field->lid = NPC_LID_LF;
3399                                field->ltype_match = NPC_LT_LF_TU_ETHER;
3400                        }
3401                        field->hdr_offset = 0;
3402                        field->bytesm1 = 5; /* DMAC 6 Byte */
3403                        field->ltype_mask = 0xF;
3404                        break;
3405                case NIX_FLOW_KEY_TYPE_IPV6_EXT:
3406                        field->lid = NPC_LID_LC;
3407                        field->hdr_offset = 40; /* IPV6 hdr */
3408                        field->bytesm1 = 0; /* 1 Byte ext hdr*/
3409                        field->ltype_match = NPC_LT_LC_IP6_EXT;
3410                        field->ltype_mask = 0xF;
3411                        break;
3412                case NIX_FLOW_KEY_TYPE_GTPU:
3413                        field->lid = NPC_LID_LE;
3414                        field->hdr_offset = 4;
3415                        field->bytesm1 = 3; /* 4 bytes TID*/
3416                        field->ltype_match = NPC_LT_LE_GTPU;
3417                        field->ltype_mask = 0xF;
3418                        break;
3419                case NIX_FLOW_KEY_TYPE_VLAN:
3420                        field->lid = NPC_LID_LB;
3421                        field->hdr_offset = 2; /* Skip TPID (2-bytes) */
3422                        field->bytesm1 = 1; /* 2 Bytes (Actually 12 bits) */
3423                        field->ltype_match = NPC_LT_LB_CTAG;
3424                        field->ltype_mask = 0xF;
3425                        field->fn_mask = 1; /* Mask out the first nibble */
3426                        break;
3427                case NIX_FLOW_KEY_TYPE_AH:
3428                case NIX_FLOW_KEY_TYPE_ESP:
3429                        field->hdr_offset = 0;
3430                        field->bytesm1 = 7; /* SPI + sequence number */
3431                        field->ltype_mask = 0xF;
3432                        field->lid = NPC_LID_LE;
3433                        field->ltype_match = NPC_LT_LE_ESP;
3434                        if (key_type == NIX_FLOW_KEY_TYPE_AH) {
3435                                field->lid = NPC_LID_LD;
3436                                field->ltype_match = NPC_LT_LD_AH;
3437                                field->hdr_offset = 4;
3438                                keyoff_marker = false;
3439                        }
3440                        break;
3441                }
3442                field->ena = 1;
3443
3444                /* Found a valid flow key type */
3445                if (valid_key) {
3446                        /* Use the key offset of TCP/UDP/SCTP fields
3447                         * for ESP/AH fields.
3448                         */
3449                        if (key_type == NIX_FLOW_KEY_TYPE_ESP ||
3450                            key_type == NIX_FLOW_KEY_TYPE_AH)
3451                                key_off = l4_key_offset;
3452                        field->key_offset = key_off;
3453                        memcpy(&alg[nr_field], field, sizeof(*field));
3454                        max_key_off = max(max_key_off, field->bytesm1 + 1);
3455
3456                        /* Found a field marker, get the next field */
3457                        if (field_marker)
3458                                nr_field++;
3459                }
3460
3461                /* Found a keyoff marker, update the new key_off */
3462                if (keyoff_marker) {
3463                        key_off += max_key_off;
3464                        max_key_off = 0;
3465                }
3466        }
3467        /* Processed all the flow key types */
3468        if (idx == max_bit_pos && key_off <= MAX_KEY_OFF)
3469                return 0;
3470        else
3471                return NIX_AF_ERR_RSS_NOSPC_FIELD;
3472}
3473
3474static int reserve_flowkey_alg_idx(struct rvu *rvu, int blkaddr, u32 flow_cfg)
3475{
3476        u64 field[FIELDS_PER_ALG];
3477        struct nix_hw *hw;
3478        int fid, rc;
3479
3480        hw = get_nix_hw(rvu->hw, blkaddr);
3481        if (!hw)
3482                return NIX_AF_ERR_INVALID_NIXBLK;
3483
3484        /* No room to add new flow hash algoritham */
3485        if (hw->flowkey.in_use >= NIX_FLOW_KEY_ALG_MAX)
3486                return NIX_AF_ERR_RSS_NOSPC_ALGO;
3487
3488        /* Generate algo fields for the given flow_cfg */
3489        rc = set_flowkey_fields((struct nix_rx_flowkey_alg *)field, flow_cfg);
3490        if (rc)
3491                return rc;
3492
3493        /* Update ALGX_FIELDX register with generated fields */
3494        for (fid = 0; fid < FIELDS_PER_ALG; fid++)
3495                rvu_write64(rvu, blkaddr,
3496                            NIX_AF_RX_FLOW_KEY_ALGX_FIELDX(hw->flowkey.in_use,
3497                                                           fid), field[fid]);
3498
3499        /* Store the flow_cfg for futher lookup */
3500        rc = hw->flowkey.in_use;
3501        hw->flowkey.flowkey[rc] = flow_cfg;
3502        hw->flowkey.in_use++;
3503
3504        return rc;
3505}
3506
3507int rvu_mbox_handler_nix_rss_flowkey_cfg(struct rvu *rvu,
3508                                         struct nix_rss_flowkey_cfg *req,
3509                                         struct nix_rss_flowkey_cfg_rsp *rsp)
3510{
3511        u16 pcifunc = req->hdr.pcifunc;
3512        int alg_idx, nixlf, blkaddr;
3513        struct nix_hw *nix_hw;
3514        int err;
3515
3516        err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr);
3517        if (err)
3518                return err;
3519
3520        nix_hw = get_nix_hw(rvu->hw, blkaddr);
3521        if (!nix_hw)
3522                return NIX_AF_ERR_INVALID_NIXBLK;
3523
3524        alg_idx = get_flowkey_alg_idx(nix_hw, req->flowkey_cfg);
3525        /* Failed to get algo index from the exiting list, reserve new  */
3526        if (alg_idx < 0) {
3527                alg_idx = reserve_flowkey_alg_idx(rvu, blkaddr,
3528                                                  req->flowkey_cfg);
3529                if (alg_idx < 0)
3530                        return alg_idx;
3531        }
3532        rsp->alg_idx = alg_idx;
3533        rvu_npc_update_flowkey_alg_idx(rvu, pcifunc, nixlf, req->group,
3534                                       alg_idx, req->mcam_index);
3535        return 0;
3536}
3537
3538static int nix_rx_flowkey_alg_cfg(struct rvu *rvu, int blkaddr)
3539{
3540        u32 flowkey_cfg, minkey_cfg;
3541        int alg, fid, rc;
3542
3543        /* Disable all flow key algx fieldx */
3544        for (alg = 0; alg < NIX_FLOW_KEY_ALG_MAX; alg++) {
3545                for (fid = 0; fid < FIELDS_PER_ALG; fid++)
3546                        rvu_write64(rvu, blkaddr,
3547                                    NIX_AF_RX_FLOW_KEY_ALGX_FIELDX(alg, fid),
3548                                    0);
3549        }
3550
3551        /* IPv4/IPv6 SIP/DIPs */
3552        flowkey_cfg = NIX_FLOW_KEY_TYPE_IPV4 | NIX_FLOW_KEY_TYPE_IPV6;
3553        rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
3554        if (rc < 0)
3555                return rc;
3556
3557        /* TCPv4/v6 4-tuple, SIP, DIP, Sport, Dport */
3558        minkey_cfg = flowkey_cfg;
3559        flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_TCP;
3560        rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
3561        if (rc < 0)
3562                return rc;
3563
3564        /* UDPv4/v6 4-tuple, SIP, DIP, Sport, Dport */
3565        flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_UDP;
3566        rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
3567        if (rc < 0)
3568                return rc;
3569
3570        /* SCTPv4/v6 4-tuple, SIP, DIP, Sport, Dport */
3571        flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_SCTP;
3572        rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
3573        if (rc < 0)
3574                return rc;
3575
3576        /* TCP/UDP v4/v6 4-tuple, rest IP pkts 2-tuple */
3577        flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_TCP |
3578                        NIX_FLOW_KEY_TYPE_UDP;
3579        rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
3580        if (rc < 0)
3581                return rc;
3582
3583        /* TCP/SCTP v4/v6 4-tuple, rest IP pkts 2-tuple */
3584        flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_TCP |
3585                        NIX_FLOW_KEY_TYPE_SCTP;
3586        rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
3587        if (rc < 0)
3588                return rc;
3589
3590        /* UDP/SCTP v4/v6 4-tuple, rest IP pkts 2-tuple */
3591        flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_UDP |
3592                        NIX_FLOW_KEY_TYPE_SCTP;
3593        rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
3594        if (rc < 0)
3595                return rc;
3596
3597        /* TCP/UDP/SCTP v4/v6 4-tuple, rest IP pkts 2-tuple */
3598        flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_TCP |
3599                      NIX_FLOW_KEY_TYPE_UDP | NIX_FLOW_KEY_TYPE_SCTP;
3600        rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
3601        if (rc < 0)
3602                return rc;
3603
3604        return 0;
3605}
3606
3607int rvu_mbox_handler_nix_set_mac_addr(struct rvu *rvu,
3608                                      struct nix_set_mac_addr *req,
3609                                      struct msg_rsp *rsp)
3610{
3611        bool from_vf = req->hdr.pcifunc & RVU_PFVF_FUNC_MASK;
3612        u16 pcifunc = req->hdr.pcifunc;
3613        int blkaddr, nixlf, err;
3614        struct rvu_pfvf *pfvf;
3615
3616        err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr);
3617        if (err)
3618                return err;
3619
3620        pfvf = rvu_get_pfvf(rvu, pcifunc);
3621
3622        /* untrusted VF can't overwrite admin(PF) changes */
3623        if (!test_bit(PF_SET_VF_TRUSTED, &pfvf->flags) &&
3624            (from_vf && test_bit(PF_SET_VF_MAC, &pfvf->flags))) {
3625                dev_warn(rvu->dev,
3626                         "MAC address set by admin(PF) cannot be overwritten by untrusted VF");
3627                return -EPERM;
3628        }
3629
3630        ether_addr_copy(pfvf->mac_addr, req->mac_addr);
3631
3632        rvu_npc_install_ucast_entry(rvu, pcifunc, nixlf,
3633                                    pfvf->rx_chan_base, req->mac_addr);
3634
3635        if (test_bit(PF_SET_VF_TRUSTED, &pfvf->flags) && from_vf)
3636                ether_addr_copy(pfvf->default_mac, req->mac_addr);
3637
3638        rvu_switch_update_rules(rvu, pcifunc);
3639
3640        return 0;
3641}
3642
3643int rvu_mbox_handler_nix_get_mac_addr(struct rvu *rvu,
3644                                      struct msg_req *req,
3645                                      struct nix_get_mac_addr_rsp *rsp)
3646{
3647        u16 pcifunc = req->hdr.pcifunc;
3648        struct rvu_pfvf *pfvf;
3649
3650        if (!is_nixlf_attached(rvu, pcifunc))
3651                return NIX_AF_ERR_AF_LF_INVALID;
3652
3653        pfvf = rvu_get_pfvf(rvu, pcifunc);
3654
3655        ether_addr_copy(rsp->mac_addr, pfvf->mac_addr);
3656
3657        return 0;
3658}
3659
3660int rvu_mbox_handler_nix_set_rx_mode(struct rvu *rvu, struct nix_rx_mode *req,
3661                                     struct msg_rsp *rsp)
3662{
3663        bool allmulti, promisc, nix_rx_multicast;
3664        u16 pcifunc = req->hdr.pcifunc;
3665        struct rvu_pfvf *pfvf;
3666        int nixlf, err;
3667
3668        pfvf = rvu_get_pfvf(rvu, pcifunc);
3669        promisc = req->mode & NIX_RX_MODE_PROMISC ? true : false;
3670        allmulti = req->mode & NIX_RX_MODE_ALLMULTI ? true : false;
3671        pfvf->use_mce_list = req->mode & NIX_RX_MODE_USE_MCE ? true : false;
3672
3673        nix_rx_multicast = rvu->hw->cap.nix_rx_multicast & pfvf->use_mce_list;
3674
3675        if (is_vf(pcifunc) && !nix_rx_multicast &&
3676            (promisc || allmulti)) {
3677                dev_warn_ratelimited(rvu->dev,
3678                                     "VF promisc/multicast not supported\n");
3679                return 0;
3680        }
3681
3682        /* untrusted VF can't configure promisc/allmulti */
3683        if (is_vf(pcifunc) && !test_bit(PF_SET_VF_TRUSTED, &pfvf->flags) &&
3684            (promisc || allmulti))
3685                return 0;
3686
3687        err = nix_get_nixlf(rvu, pcifunc, &nixlf, NULL);
3688        if (err)
3689                return err;
3690
3691        if (nix_rx_multicast) {
3692                /* add/del this PF_FUNC to/from mcast pkt replication list */
3693                err = nix_update_mce_rule(rvu, pcifunc, NIXLF_ALLMULTI_ENTRY,
3694                                          allmulti);
3695                if (err) {
3696                        dev_err(rvu->dev,
3697                                "Failed to update pcifunc 0x%x to multicast list\n",
3698                                pcifunc);
3699                        return err;
3700                }
3701
3702                /* add/del this PF_FUNC to/from promisc pkt replication list */
3703                err = nix_update_mce_rule(rvu, pcifunc, NIXLF_PROMISC_ENTRY,
3704                                          promisc);
3705                if (err) {
3706                        dev_err(rvu->dev,
3707                                "Failed to update pcifunc 0x%x to promisc list\n",
3708                                pcifunc);
3709                        return err;
3710                }
3711        }
3712
3713        /* install/uninstall allmulti entry */
3714        if (allmulti) {
3715                rvu_npc_install_allmulti_entry(rvu, pcifunc, nixlf,
3716                                               pfvf->rx_chan_base);
3717        } else {
3718                if (!nix_rx_multicast)
3719                        rvu_npc_enable_allmulti_entry(rvu, pcifunc, nixlf, false);
3720        }
3721
3722        /* install/uninstall promisc entry */
3723        if (promisc) {
3724                rvu_npc_install_promisc_entry(rvu, pcifunc, nixlf,
3725                                              pfvf->rx_chan_base,
3726                                              pfvf->rx_chan_cnt);
3727        } else {
3728                if (!nix_rx_multicast)
3729                        rvu_npc_enable_promisc_entry(rvu, pcifunc, nixlf, false);
3730        }
3731
3732        return 0;
3733}
3734
3735static void nix_find_link_frs(struct rvu *rvu,
3736                              struct nix_frs_cfg *req, u16 pcifunc)
3737{
3738        int pf = rvu_get_pf(pcifunc);
3739        struct rvu_pfvf *pfvf;
3740        int maxlen, minlen;
3741        int numvfs, hwvf;
3742        int vf;
3743
3744        /* Update with requester's min/max lengths */
3745        pfvf = rvu_get_pfvf(rvu, pcifunc);
3746        pfvf->maxlen = req->maxlen;
3747        if (req->update_minlen)
3748                pfvf->minlen = req->minlen;
3749
3750        maxlen = req->maxlen;
3751        minlen = req->update_minlen ? req->minlen : 0;
3752
3753        /* Get this PF's numVFs and starting hwvf */
3754        rvu_get_pf_numvfs(rvu, pf, &numvfs, &hwvf);
3755
3756        /* For each VF, compare requested max/minlen */
3757        for (vf = 0; vf < numvfs; vf++) {
3758                pfvf =  &rvu->hwvf[hwvf + vf];
3759                if (pfvf->maxlen > maxlen)
3760                        maxlen = pfvf->maxlen;
3761                if (req->update_minlen &&
3762                    pfvf->minlen && pfvf->minlen < minlen)
3763                        minlen = pfvf->minlen;
3764        }
3765
3766        /* Compare requested max/minlen with PF's max/minlen */
3767        pfvf = &rvu->pf[pf];
3768        if (pfvf->maxlen > maxlen)
3769                maxlen = pfvf->maxlen;
3770        if (req->update_minlen &&
3771            pfvf->minlen && pfvf->minlen < minlen)
3772                minlen = pfvf->minlen;
3773
3774        /* Update the request with max/min PF's and it's VF's max/min */
3775        req->maxlen = maxlen;
3776        if (req->update_minlen)
3777                req->minlen = minlen;
3778}
3779
3780static int
3781nix_config_link_credits(struct rvu *rvu, int blkaddr, int link,
3782                        u16 pcifunc, u64 tx_credits)
3783{
3784        struct rvu_hwinfo *hw = rvu->hw;
3785        int pf = rvu_get_pf(pcifunc);
3786        u8 cgx_id = 0, lmac_id = 0;
3787        unsigned long poll_tmo;
3788        bool restore_tx_en = 0;
3789        struct nix_hw *nix_hw;
3790        u64 cfg, sw_xoff = 0;
3791        u32 schq = 0;
3792        u32 credits;
3793        int rc;
3794
3795        nix_hw = get_nix_hw(rvu->hw, blkaddr);
3796        if (!nix_hw)
3797                return NIX_AF_ERR_INVALID_NIXBLK;
3798
3799        if (tx_credits == nix_hw->tx_credits[link])
3800                return 0;
3801
3802        /* Enable cgx tx if disabled for credits to be back */
3803        if (is_pf_cgxmapped(rvu, pf)) {
3804                rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
3805                restore_tx_en = !cgx_lmac_tx_enable(rvu_cgx_pdata(cgx_id, rvu),
3806                                                    lmac_id, true);
3807        }
3808
3809        mutex_lock(&rvu->rsrc_lock);
3810        /* Disable new traffic to link */
3811        if (hw->cap.nix_shaping) {
3812                schq = nix_get_tx_link(rvu, pcifunc);
3813                sw_xoff = rvu_read64(rvu, blkaddr, NIX_AF_TL1X_SW_XOFF(schq));
3814                rvu_write64(rvu, blkaddr,
3815                            NIX_AF_TL1X_SW_XOFF(schq), BIT_ULL(0));
3816        }
3817
3818        rc = -EBUSY;
3819        poll_tmo = jiffies + usecs_to_jiffies(10000);
3820        /* Wait for credits to return */
3821        do {
3822                if (time_after(jiffies, poll_tmo))
3823                        goto exit;
3824                usleep_range(100, 200);
3825
3826                cfg = rvu_read64(rvu, blkaddr,
3827                                 NIX_AF_TX_LINKX_NORM_CREDIT(link));
3828                credits = (cfg >> 12) & 0xFFFFFULL;
3829        } while (credits != nix_hw->tx_credits[link]);
3830
3831        cfg &= ~(0xFFFFFULL << 12);
3832        cfg |= (tx_credits << 12);
3833        rvu_write64(rvu, blkaddr, NIX_AF_TX_LINKX_NORM_CREDIT(link), cfg);
3834        rc = 0;
3835
3836        nix_hw->tx_credits[link] = tx_credits;
3837
3838exit:
3839        /* Enable traffic back */
3840        if (hw->cap.nix_shaping && !sw_xoff)
3841                rvu_write64(rvu, blkaddr, NIX_AF_TL1X_SW_XOFF(schq), 0);
3842
3843        /* Restore state of cgx tx */
3844        if (restore_tx_en)
3845                cgx_lmac_tx_enable(rvu_cgx_pdata(cgx_id, rvu), lmac_id, false);
3846
3847        mutex_unlock(&rvu->rsrc_lock);
3848        return rc;
3849}
3850
3851int rvu_mbox_handler_nix_set_hw_frs(struct rvu *rvu, struct nix_frs_cfg *req,
3852                                    struct msg_rsp *rsp)
3853{
3854        struct rvu_hwinfo *hw = rvu->hw;
3855        u16 pcifunc = req->hdr.pcifunc;
3856        int pf = rvu_get_pf(pcifunc);
3857        int blkaddr, schq, link = -1;
3858        struct nix_txsch *txsch;
3859        u64 cfg, lmac_fifo_len;
3860        struct nix_hw *nix_hw;
3861        struct rvu_pfvf *pfvf;
3862        u8 cgx = 0, lmac = 0;
3863        u16 max_mtu;
3864
3865        blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
3866        if (blkaddr < 0)
3867                return NIX_AF_ERR_AF_LF_INVALID;
3868
3869        nix_hw = get_nix_hw(rvu->hw, blkaddr);
3870        if (!nix_hw)
3871                return NIX_AF_ERR_INVALID_NIXBLK;
3872
3873        if (is_afvf(pcifunc))
3874                rvu_get_lbk_link_max_frs(rvu, &max_mtu);
3875        else
3876                rvu_get_lmac_link_max_frs(rvu, &max_mtu);
3877
3878        if (!req->sdp_link && req->maxlen > max_mtu)
3879                return NIX_AF_ERR_FRS_INVALID;
3880
3881        if (req->update_minlen && req->minlen < NIC_HW_MIN_FRS)
3882                return NIX_AF_ERR_FRS_INVALID;
3883
3884        /* Check if requester wants to update SMQ's */
3885        if (!req->update_smq)
3886                goto rx_frscfg;
3887
3888        /* Update min/maxlen in each of the SMQ attached to this PF/VF */
3889        txsch = &nix_hw->txsch[NIX_TXSCH_LVL_SMQ];
3890        mutex_lock(&rvu->rsrc_lock);
3891        for (schq = 0; schq < txsch->schq.max; schq++) {
3892                if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc)
3893                        continue;
3894                cfg = rvu_read64(rvu, blkaddr, NIX_AF_SMQX_CFG(schq));
3895                cfg = (cfg & ~(0xFFFFULL << 8)) | ((u64)req->maxlen << 8);
3896                if (req->update_minlen)
3897                        cfg = (cfg & ~0x7FULL) | ((u64)req->minlen & 0x7F);
3898                rvu_write64(rvu, blkaddr, NIX_AF_SMQX_CFG(schq), cfg);
3899        }
3900        mutex_unlock(&rvu->rsrc_lock);
3901
3902rx_frscfg:
3903        /* Check if config is for SDP link */
3904        if (req->sdp_link) {
3905                if (!hw->sdp_links)
3906                        return NIX_AF_ERR_RX_LINK_INVALID;
3907                link = hw->cgx_links + hw->lbk_links;
3908                goto linkcfg;
3909        }
3910
3911        /* Check if the request is from CGX mapped RVU PF */
3912        if (is_pf_cgxmapped(rvu, pf)) {
3913                /* Get CGX and LMAC to which this PF is mapped and find link */
3914                rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx, &lmac);
3915                link = (cgx * hw->lmac_per_cgx) + lmac;
3916        } else if (pf == 0) {
3917                /* For VFs of PF0 ingress is LBK port, so config LBK link */
3918                pfvf = rvu_get_pfvf(rvu, pcifunc);
3919                link = hw->cgx_links + pfvf->lbkid;
3920        }
3921
3922        if (link < 0)
3923                return NIX_AF_ERR_RX_LINK_INVALID;
3924
3925        nix_find_link_frs(rvu, req, pcifunc);
3926
3927linkcfg:
3928        cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link));
3929        cfg = (cfg & ~(0xFFFFULL << 16)) | ((u64)req->maxlen << 16);
3930        if (req->update_minlen)
3931                cfg = (cfg & ~0xFFFFULL) | req->minlen;
3932        rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link), cfg);
3933
3934        if (req->sdp_link || pf == 0)
3935                return 0;
3936
3937        /* Update transmit credits for CGX links */
3938        lmac_fifo_len =
3939                rvu_cgx_get_fifolen(rvu) /
3940                cgx_get_lmac_cnt(rvu_cgx_pdata(cgx, rvu));
3941        return nix_config_link_credits(rvu, blkaddr, link, pcifunc,
3942                                       (lmac_fifo_len - req->maxlen) / 16);
3943}
3944
3945int rvu_mbox_handler_nix_set_rx_cfg(struct rvu *rvu, struct nix_rx_cfg *req,
3946                                    struct msg_rsp *rsp)
3947{
3948        int nixlf, blkaddr, err;
3949        u64 cfg;
3950
3951        err = nix_get_nixlf(rvu, req->hdr.pcifunc, &nixlf, &blkaddr);
3952        if (err)
3953                return err;
3954
3955        cfg = rvu_read64(rvu, blkaddr, NIX_AF_LFX_RX_CFG(nixlf));
3956        /* Set the interface configuration */
3957        if (req->len_verify & BIT(0))
3958                cfg |= BIT_ULL(41);
3959        else
3960                cfg &= ~BIT_ULL(41);
3961
3962        if (req->len_verify & BIT(1))
3963                cfg |= BIT_ULL(40);
3964        else
3965                cfg &= ~BIT_ULL(40);
3966
3967        if (req->csum_verify & BIT(0))
3968                cfg |= BIT_ULL(37);
3969        else
3970                cfg &= ~BIT_ULL(37);
3971
3972        rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_CFG(nixlf), cfg);
3973
3974        return 0;
3975}
3976
3977static u64 rvu_get_lbk_link_credits(struct rvu *rvu, u16 lbk_max_frs)
3978{
3979        /* CN10k supports 72KB FIFO size and max packet size of 64k */
3980        if (rvu->hw->lbk_bufsize == 0x12000)
3981                return (rvu->hw->lbk_bufsize - lbk_max_frs) / 16;
3982
3983        return 1600; /* 16 * max LBK datarate = 16 * 100Gbps */
3984}
3985
3986static void nix_link_config(struct rvu *rvu, int blkaddr,
3987                            struct nix_hw *nix_hw)
3988{
3989        struct rvu_hwinfo *hw = rvu->hw;
3990        int cgx, lmac_cnt, slink, link;
3991        u16 lbk_max_frs, lmac_max_frs;
3992        u64 tx_credits, cfg;
3993
3994        rvu_get_lbk_link_max_frs(rvu, &lbk_max_frs);
3995        rvu_get_lmac_link_max_frs(rvu, &lmac_max_frs);
3996
3997        /* Set default min/max packet lengths allowed on NIX Rx links.
3998         *
3999         * With HW reset minlen value of 60byte, HW will treat ARP pkts
4000         * as undersize and report them to SW as error pkts, hence
4001         * setting it to 40 bytes.
4002         */
4003        for (link = 0; link < hw->cgx_links; link++) {
4004                rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link),
4005                                ((u64)lmac_max_frs << 16) | NIC_HW_MIN_FRS);
4006        }
4007
4008        for (link = hw->cgx_links; link < hw->lbk_links; link++) {
4009                rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link),
4010                            ((u64)lbk_max_frs << 16) | NIC_HW_MIN_FRS);
4011        }
4012        if (hw->sdp_links) {
4013                link = hw->cgx_links + hw->lbk_links;
4014                rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link),
4015                            SDP_HW_MAX_FRS << 16 | NIC_HW_MIN_FRS);
4016        }
4017
4018        /* Set credits for Tx links assuming max packet length allowed.
4019         * This will be reconfigured based on MTU set for PF/VF.
4020         */
4021        for (cgx = 0; cgx < hw->cgx; cgx++) {
4022                lmac_cnt = cgx_get_lmac_cnt(rvu_cgx_pdata(cgx, rvu));
4023                /* Skip when cgx is not available or lmac cnt is zero */
4024                if (lmac_cnt <= 0)
4025                        continue;
4026                tx_credits = ((rvu_cgx_get_fifolen(rvu) / lmac_cnt) -
4027                               lmac_max_frs) / 16;
4028                /* Enable credits and set credit pkt count to max allowed */
4029                cfg =  (tx_credits << 12) | (0x1FF << 2) | BIT_ULL(1);
4030                slink = cgx * hw->lmac_per_cgx;
4031                for (link = slink; link < (slink + lmac_cnt); link++) {
4032                        nix_hw->tx_credits[link] = tx_credits;
4033                        rvu_write64(rvu, blkaddr,
4034                                    NIX_AF_TX_LINKX_NORM_CREDIT(link), cfg);
4035                }
4036        }
4037
4038        /* Set Tx credits for LBK link */
4039        slink = hw->cgx_links;
4040        for (link = slink; link < (slink + hw->lbk_links); link++) {
4041                tx_credits = rvu_get_lbk_link_credits(rvu, lbk_max_frs);
4042                nix_hw->tx_credits[link] = tx_credits;
4043                /* Enable credits and set credit pkt count to max allowed */
4044                tx_credits =  (tx_credits << 12) | (0x1FF << 2) | BIT_ULL(1);
4045                rvu_write64(rvu, blkaddr,
4046                            NIX_AF_TX_LINKX_NORM_CREDIT(link), tx_credits);
4047        }
4048}
4049
4050static int nix_calibrate_x2p(struct rvu *rvu, int blkaddr)
4051{
4052        int idx, err;
4053        u64 status;
4054
4055        /* Start X2P bus calibration */
4056        rvu_write64(rvu, blkaddr, NIX_AF_CFG,
4057                    rvu_read64(rvu, blkaddr, NIX_AF_CFG) | BIT_ULL(9));
4058        /* Wait for calibration to complete */
4059        err = rvu_poll_reg(rvu, blkaddr,
4060                           NIX_AF_STATUS, BIT_ULL(10), false);
4061        if (err) {
4062                dev_err(rvu->dev, "NIX X2P bus calibration failed\n");
4063                return err;
4064        }
4065
4066        status = rvu_read64(rvu, blkaddr, NIX_AF_STATUS);
4067        /* Check if CGX devices are ready */
4068        for (idx = 0; idx < rvu->cgx_cnt_max; idx++) {
4069                /* Skip when cgx port is not available */
4070                if (!rvu_cgx_pdata(idx, rvu) ||
4071                    (status & (BIT_ULL(16 + idx))))
4072                        continue;
4073                dev_err(rvu->dev,
4074                        "CGX%d didn't respond to NIX X2P calibration\n", idx);
4075                err = -EBUSY;
4076        }
4077
4078        /* Check if LBK is ready */
4079        if (!(status & BIT_ULL(19))) {
4080                dev_err(rvu->dev,
4081                        "LBK didn't respond to NIX X2P calibration\n");
4082                err = -EBUSY;
4083        }
4084
4085        /* Clear 'calibrate_x2p' bit */
4086        rvu_write64(rvu, blkaddr, NIX_AF_CFG,
4087                    rvu_read64(rvu, blkaddr, NIX_AF_CFG) & ~BIT_ULL(9));
4088        if (err || (status & 0x3FFULL))
4089                dev_err(rvu->dev,
4090                        "NIX X2P calibration failed, status 0x%llx\n", status);
4091        if (err)
4092                return err;
4093        return 0;
4094}
4095
4096static int nix_aq_init(struct rvu *rvu, struct rvu_block *block)
4097{
4098        u64 cfg;
4099        int err;
4100
4101        /* Set admin queue endianness */
4102        cfg = rvu_read64(rvu, block->addr, NIX_AF_CFG);
4103#ifdef __BIG_ENDIAN
4104        cfg |= BIT_ULL(8);
4105        rvu_write64(rvu, block->addr, NIX_AF_CFG, cfg);
4106#else
4107        cfg &= ~BIT_ULL(8);
4108        rvu_write64(rvu, block->addr, NIX_AF_CFG, cfg);
4109#endif
4110
4111        /* Do not bypass NDC cache */
4112        cfg = rvu_read64(rvu, block->addr, NIX_AF_NDC_CFG);
4113        cfg &= ~0x3FFEULL;
4114#ifdef CONFIG_NDC_DIS_DYNAMIC_CACHING
4115        /* Disable caching of SQB aka SQEs */
4116        cfg |= 0x04ULL;
4117#endif
4118        rvu_write64(rvu, block->addr, NIX_AF_NDC_CFG, cfg);
4119
4120        /* Result structure can be followed by RQ/SQ/CQ context at
4121         * RES + 128bytes and a write mask at RES + 256 bytes, depending on
4122         * operation type. Alloc sufficient result memory for all operations.
4123         */
4124        err = rvu_aq_alloc(rvu, &block->aq,
4125                           Q_COUNT(AQ_SIZE), sizeof(struct nix_aq_inst_s),
4126                           ALIGN(sizeof(struct nix_aq_res_s), 128) + 256);
4127        if (err)
4128                return err;
4129
4130        rvu_write64(rvu, block->addr, NIX_AF_AQ_CFG, AQ_SIZE);
4131        rvu_write64(rvu, block->addr,
4132                    NIX_AF_AQ_BASE, (u64)block->aq->inst->iova);
4133        return 0;
4134}
4135
4136static void rvu_nix_setup_capabilities(struct rvu *rvu, int blkaddr)
4137{
4138        struct rvu_hwinfo *hw = rvu->hw;
4139        u64 hw_const;
4140
4141        hw_const = rvu_read64(rvu, blkaddr, NIX_AF_CONST1);
4142
4143        /* On OcteonTx2 DWRR quantum is directly configured into each of
4144         * the transmit scheduler queues. And PF/VF drivers were free to
4145         * config any value upto 2^24.
4146         * On CN10K, HW is modified, the quantum configuration at scheduler
4147         * queues is in terms of weight. And SW needs to setup a base DWRR MTU
4148         * at NIX_AF_DWRR_RPM_MTU / NIX_AF_DWRR_SDP_MTU. HW will do
4149         * 'DWRR MTU * weight' to get the quantum.
4150         *
4151         * Check if HW uses a common MTU for all DWRR quantum configs.
4152         * On OcteonTx2 this register field is '0'.
4153         */
4154        if (((hw_const >> 56) & 0x10) == 0x10)
4155                hw->cap.nix_common_dwrr_mtu = true;
4156}
4157
4158static int rvu_nix_block_init(struct rvu *rvu, struct nix_hw *nix_hw)
4159{
4160        const struct npc_lt_def_cfg *ltdefs;
4161        struct rvu_hwinfo *hw = rvu->hw;
4162        int blkaddr = nix_hw->blkaddr;
4163        struct rvu_block *block;
4164        int err;
4165        u64 cfg;
4166
4167        block = &hw->block[blkaddr];
4168
4169        if (is_rvu_96xx_B0(rvu)) {
4170                /* As per a HW errata in 96xx A0/B0 silicon, NIX may corrupt
4171                 * internal state when conditional clocks are turned off.
4172                 * Hence enable them.
4173                 */
4174                rvu_write64(rvu, blkaddr, NIX_AF_CFG,
4175                            rvu_read64(rvu, blkaddr, NIX_AF_CFG) | 0x40ULL);
4176
4177                /* Set chan/link to backpressure TL3 instead of TL2 */
4178                rvu_write64(rvu, blkaddr, NIX_AF_PSE_CHANNEL_LEVEL, 0x01);
4179
4180                /* Disable SQ manager's sticky mode operation (set TM6 = 0)
4181                 * This sticky mode is known to cause SQ stalls when multiple
4182                 * SQs are mapped to same SMQ and transmitting pkts at a time.
4183                 */
4184                cfg = rvu_read64(rvu, blkaddr, NIX_AF_SQM_DBG_CTL_STATUS);
4185                cfg &= ~BIT_ULL(15);
4186                rvu_write64(rvu, blkaddr, NIX_AF_SQM_DBG_CTL_STATUS, cfg);
4187        }
4188
4189        ltdefs = rvu->kpu.lt_def;
4190        /* Calibrate X2P bus to check if CGX/LBK links are fine */
4191        err = nix_calibrate_x2p(rvu, blkaddr);
4192        if (err)
4193                return err;
4194
4195        /* Setup capabilities of the NIX block */
4196        rvu_nix_setup_capabilities(rvu, blkaddr);
4197
4198        /* Initialize admin queue */
4199        err = nix_aq_init(rvu, block);
4200        if (err)
4201                return err;
4202
4203        /* Restore CINT timer delay to HW reset values */
4204        rvu_write64(rvu, blkaddr, NIX_AF_CINT_DELAY, 0x0ULL);
4205
4206        /* For better performance use NDC TX instead of NDC RX for SQ's SQEs" */
4207        rvu_write64(rvu, blkaddr, NIX_AF_SEB_CFG, 0x1ULL);
4208
4209        if (is_block_implemented(hw, blkaddr)) {
4210                err = nix_setup_txschq(rvu, nix_hw, blkaddr);
4211                if (err)
4212                        return err;
4213
4214                err = nix_setup_ipolicers(rvu, nix_hw, blkaddr);
4215                if (err)
4216                        return err;
4217
4218                err = nix_af_mark_format_setup(rvu, nix_hw, blkaddr);
4219                if (err)
4220                        return err;
4221
4222                err = nix_setup_mcast(rvu, nix_hw, blkaddr);
4223                if (err)
4224                        return err;
4225
4226                err = nix_setup_txvlan(rvu, nix_hw);
4227                if (err)
4228                        return err;
4229
4230                /* Configure segmentation offload formats */
4231                nix_setup_lso(rvu, nix_hw, blkaddr);
4232
4233                /* Config Outer/Inner L2, IP, TCP, UDP and SCTP NPC layer info.
4234                 * This helps HW protocol checker to identify headers
4235                 * and validate length and checksums.
4236                 */
4237                rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OL2,
4238                            (ltdefs->rx_ol2.lid << 8) | (ltdefs->rx_ol2.ltype_match << 4) |
4239                            ltdefs->rx_ol2.ltype_mask);
4240                rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OIP4,
4241                            (ltdefs->rx_oip4.lid << 8) | (ltdefs->rx_oip4.ltype_match << 4) |
4242                            ltdefs->rx_oip4.ltype_mask);
4243                rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IIP4,
4244                            (ltdefs->rx_iip4.lid << 8) | (ltdefs->rx_iip4.ltype_match << 4) |
4245                            ltdefs->rx_iip4.ltype_mask);
4246                rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OIP6,
4247                            (ltdefs->rx_oip6.lid << 8) | (ltdefs->rx_oip6.ltype_match << 4) |
4248                            ltdefs->rx_oip6.ltype_mask);
4249                rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IIP6,
4250                            (ltdefs->rx_iip6.lid << 8) | (ltdefs->rx_iip6.ltype_match << 4) |
4251                            ltdefs->rx_iip6.ltype_mask);
4252                rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OTCP,
4253                            (ltdefs->rx_otcp.lid << 8) | (ltdefs->rx_otcp.ltype_match << 4) |
4254                            ltdefs->rx_otcp.ltype_mask);
4255                rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_ITCP,
4256                            (ltdefs->rx_itcp.lid << 8) | (ltdefs->rx_itcp.ltype_match << 4) |
4257                            ltdefs->rx_itcp.ltype_mask);
4258                rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OUDP,
4259                            (ltdefs->rx_oudp.lid << 8) | (ltdefs->rx_oudp.ltype_match << 4) |
4260                            ltdefs->rx_oudp.ltype_mask);
4261                rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IUDP,
4262                            (ltdefs->rx_iudp.lid << 8) | (ltdefs->rx_iudp.ltype_match << 4) |
4263                            ltdefs->rx_iudp.ltype_mask);
4264                rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OSCTP,
4265                            (ltdefs->rx_osctp.lid << 8) | (ltdefs->rx_osctp.ltype_match << 4) |
4266                            ltdefs->rx_osctp.ltype_mask);
4267                rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_ISCTP,
4268                            (ltdefs->rx_isctp.lid << 8) | (ltdefs->rx_isctp.ltype_match << 4) |
4269                            ltdefs->rx_isctp.ltype_mask);
4270
4271                if (!is_rvu_otx2(rvu)) {
4272                        /* Enable APAD calculation for other protocols
4273                         * matching APAD0 and APAD1 lt def registers.
4274                         */
4275                        rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_CST_APAD0,
4276                                    (ltdefs->rx_apad0.valid << 11) |
4277                                    (ltdefs->rx_apad0.lid << 8) |
4278                                    (ltdefs->rx_apad0.ltype_match << 4) |
4279                                    ltdefs->rx_apad0.ltype_mask);
4280                        rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_CST_APAD1,
4281                                    (ltdefs->rx_apad1.valid << 11) |
4282                                    (ltdefs->rx_apad1.lid << 8) |
4283                                    (ltdefs->rx_apad1.ltype_match << 4) |
4284                                    ltdefs->rx_apad1.ltype_mask);
4285
4286                        /* Receive ethertype defination register defines layer
4287                         * information in NPC_RESULT_S to identify the Ethertype
4288                         * location in L2 header. Used for Ethertype overwriting
4289                         * in inline IPsec flow.
4290                         */
4291                        rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_ET(0),
4292                                    (ltdefs->rx_et[0].offset << 12) |
4293                                    (ltdefs->rx_et[0].valid << 11) |
4294                                    (ltdefs->rx_et[0].lid << 8) |
4295                                    (ltdefs->rx_et[0].ltype_match << 4) |
4296                                    ltdefs->rx_et[0].ltype_mask);
4297                        rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_ET(1),
4298                                    (ltdefs->rx_et[1].offset << 12) |
4299                                    (ltdefs->rx_et[1].valid << 11) |
4300                                    (ltdefs->rx_et[1].lid << 8) |
4301                                    (ltdefs->rx_et[1].ltype_match << 4) |
4302                                    ltdefs->rx_et[1].ltype_mask);
4303                }
4304
4305                err = nix_rx_flowkey_alg_cfg(rvu, blkaddr);
4306                if (err)
4307                        return err;
4308
4309                nix_hw->tx_credits = kcalloc(hw->cgx_links + hw->lbk_links,
4310                                             sizeof(u64), GFP_KERNEL);
4311                if (!nix_hw->tx_credits)
4312                        return -ENOMEM;
4313
4314                /* Initialize CGX/LBK/SDP link credits, min/max pkt lengths */
4315                nix_link_config(rvu, blkaddr, nix_hw);
4316
4317                /* Enable Channel backpressure */
4318                rvu_write64(rvu, blkaddr, NIX_AF_RX_CFG, BIT_ULL(0));
4319        }
4320        return 0;
4321}
4322
4323int rvu_nix_init(struct rvu *rvu)
4324{
4325        struct rvu_hwinfo *hw = rvu->hw;
4326        struct nix_hw *nix_hw;
4327        int blkaddr = 0, err;
4328        int i = 0;
4329
4330        hw->nix = devm_kcalloc(rvu->dev, MAX_NIX_BLKS, sizeof(struct nix_hw),
4331                               GFP_KERNEL);
4332        if (!hw->nix)
4333                return -ENOMEM;
4334
4335        blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr);
4336        while (blkaddr) {
4337                nix_hw = &hw->nix[i];
4338                nix_hw->rvu = rvu;
4339                nix_hw->blkaddr = blkaddr;
4340                err = rvu_nix_block_init(rvu, nix_hw);
4341                if (err)
4342                        return err;
4343                blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr);
4344                i++;
4345        }
4346
4347        return 0;
4348}
4349
4350static void rvu_nix_block_freemem(struct rvu *rvu, int blkaddr,
4351                                  struct rvu_block *block)
4352{
4353        struct nix_txsch *txsch;
4354        struct nix_mcast *mcast;
4355        struct nix_txvlan *vlan;
4356        struct nix_hw *nix_hw;
4357        int lvl;
4358
4359        rvu_aq_free(rvu, block->aq);
4360
4361        if (is_block_implemented(rvu->hw, blkaddr)) {
4362                nix_hw = get_nix_hw(rvu->hw, blkaddr);
4363                if (!nix_hw)
4364                        return;
4365
4366                for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
4367                        txsch = &nix_hw->txsch[lvl];
4368                        kfree(txsch->schq.bmap);
4369                }
4370
4371                kfree(nix_hw->tx_credits);
4372
4373                nix_ipolicer_freemem(rvu, nix_hw);
4374
4375                vlan = &nix_hw->txvlan;
4376                kfree(vlan->rsrc.bmap);
4377                mutex_destroy(&vlan->rsrc_lock);
4378
4379                mcast = &nix_hw->mcast;
4380                qmem_free(rvu->dev, mcast->mce_ctx);
4381                qmem_free(rvu->dev, mcast->mcast_buf);
4382                mutex_destroy(&mcast->mce_lock);
4383        }
4384}
4385
4386void rvu_nix_freemem(struct rvu *rvu)
4387{
4388        struct rvu_hwinfo *hw = rvu->hw;
4389        struct rvu_block *block;
4390        int blkaddr = 0;
4391
4392        blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr);
4393        while (blkaddr) {
4394                block = &hw->block[blkaddr];
4395                rvu_nix_block_freemem(rvu, blkaddr, block);
4396                blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr);
4397        }
4398}
4399
4400int rvu_mbox_handler_nix_lf_start_rx(struct rvu *rvu, struct msg_req *req,
4401                                     struct msg_rsp *rsp)
4402{
4403        u16 pcifunc = req->hdr.pcifunc;
4404        struct rvu_pfvf *pfvf;
4405        int nixlf, err;
4406
4407        err = nix_get_nixlf(rvu, pcifunc, &nixlf, NULL);
4408        if (err)
4409                return err;
4410
4411        rvu_npc_enable_default_entries(rvu, pcifunc, nixlf);
4412
4413        npc_mcam_enable_flows(rvu, pcifunc);
4414
4415        pfvf = rvu_get_pfvf(rvu, pcifunc);
4416        set_bit(NIXLF_INITIALIZED, &pfvf->flags);
4417
4418        rvu_switch_update_rules(rvu, pcifunc);
4419
4420        return rvu_cgx_start_stop_io(rvu, pcifunc, true);
4421}
4422
4423int rvu_mbox_handler_nix_lf_stop_rx(struct rvu *rvu, struct msg_req *req,
4424                                    struct msg_rsp *rsp)
4425{
4426        u16 pcifunc = req->hdr.pcifunc;
4427        struct rvu_pfvf *pfvf;
4428        int nixlf, err;
4429
4430        err = nix_get_nixlf(rvu, pcifunc, &nixlf, NULL);
4431        if (err)
4432                return err;
4433
4434        rvu_npc_disable_mcam_entries(rvu, pcifunc, nixlf);
4435
4436        pfvf = rvu_get_pfvf(rvu, pcifunc);
4437        clear_bit(NIXLF_INITIALIZED, &pfvf->flags);
4438
4439        return rvu_cgx_start_stop_io(rvu, pcifunc, false);
4440}
4441
4442void rvu_nix_lf_teardown(struct rvu *rvu, u16 pcifunc, int blkaddr, int nixlf)
4443{
4444        struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
4445        struct hwctx_disable_req ctx_req;
4446        int err;
4447
4448        ctx_req.hdr.pcifunc = pcifunc;
4449
4450        /* Cleanup NPC MCAM entries, free Tx scheduler queues being used */
4451        rvu_npc_disable_mcam_entries(rvu, pcifunc, nixlf);
4452        rvu_npc_free_mcam_entries(rvu, pcifunc, nixlf);
4453        nix_interface_deinit(rvu, pcifunc, nixlf);
4454        nix_rx_sync(rvu, blkaddr);
4455        nix_txschq_free(rvu, pcifunc);
4456
4457        clear_bit(NIXLF_INITIALIZED, &pfvf->flags);
4458
4459        rvu_cgx_start_stop_io(rvu, pcifunc, false);
4460
4461        if (pfvf->sq_ctx) {
4462                ctx_req.ctype = NIX_AQ_CTYPE_SQ;
4463                err = nix_lf_hwctx_disable(rvu, &ctx_req);
4464                if (err)
4465                        dev_err(rvu->dev, "SQ ctx disable failed\n");
4466        }
4467
4468        if (pfvf->rq_ctx) {
4469                ctx_req.ctype = NIX_AQ_CTYPE_RQ;
4470                err = nix_lf_hwctx_disable(rvu, &ctx_req);
4471                if (err)
4472                        dev_err(rvu->dev, "RQ ctx disable failed\n");
4473        }
4474
4475        if (pfvf->cq_ctx) {
4476                ctx_req.ctype = NIX_AQ_CTYPE_CQ;
4477                err = nix_lf_hwctx_disable(rvu, &ctx_req);
4478                if (err)
4479                        dev_err(rvu->dev, "CQ ctx disable failed\n");
4480        }
4481
4482        nix_ctx_free(rvu, pfvf);
4483
4484        nix_free_all_bandprof(rvu, pcifunc);
4485}
4486
4487#define NIX_AF_LFX_TX_CFG_PTP_EN        BIT_ULL(32)
4488
4489static int rvu_nix_lf_ptp_tx_cfg(struct rvu *rvu, u16 pcifunc, bool enable)
4490{
4491        struct rvu_hwinfo *hw = rvu->hw;
4492        struct rvu_block *block;
4493        int blkaddr, pf;
4494        int nixlf;
4495        u64 cfg;
4496
4497        pf = rvu_get_pf(pcifunc);
4498        if (!is_mac_feature_supported(rvu, pf, RVU_LMAC_FEAT_PTP))
4499                return 0;
4500
4501        blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
4502        if (blkaddr < 0)
4503                return NIX_AF_ERR_AF_LF_INVALID;
4504
4505        block = &hw->block[blkaddr];
4506        nixlf = rvu_get_lf(rvu, block, pcifunc, 0);
4507        if (nixlf < 0)
4508                return NIX_AF_ERR_AF_LF_INVALID;
4509
4510        cfg = rvu_read64(rvu, blkaddr, NIX_AF_LFX_TX_CFG(nixlf));
4511
4512        if (enable)
4513                cfg |= NIX_AF_LFX_TX_CFG_PTP_EN;
4514        else
4515                cfg &= ~NIX_AF_LFX_TX_CFG_PTP_EN;
4516
4517        rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_CFG(nixlf), cfg);
4518
4519        return 0;
4520}
4521
4522int rvu_mbox_handler_nix_lf_ptp_tx_enable(struct rvu *rvu, struct msg_req *req,
4523                                          struct msg_rsp *rsp)
4524{
4525        return rvu_nix_lf_ptp_tx_cfg(rvu, req->hdr.pcifunc, true);
4526}
4527
4528int rvu_mbox_handler_nix_lf_ptp_tx_disable(struct rvu *rvu, struct msg_req *req,
4529                                           struct msg_rsp *rsp)
4530{
4531        return rvu_nix_lf_ptp_tx_cfg(rvu, req->hdr.pcifunc, false);
4532}
4533
4534int rvu_mbox_handler_nix_lso_format_cfg(struct rvu *rvu,
4535                                        struct nix_lso_format_cfg *req,
4536                                        struct nix_lso_format_cfg_rsp *rsp)
4537{
4538        u16 pcifunc = req->hdr.pcifunc;
4539        struct nix_hw *nix_hw;
4540        struct rvu_pfvf *pfvf;
4541        int blkaddr, idx, f;
4542        u64 reg;
4543
4544        pfvf = rvu_get_pfvf(rvu, pcifunc);
4545        blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
4546        if (!pfvf->nixlf || blkaddr < 0)
4547                return NIX_AF_ERR_AF_LF_INVALID;
4548
4549        nix_hw = get_nix_hw(rvu->hw, blkaddr);
4550        if (!nix_hw)
4551                return NIX_AF_ERR_INVALID_NIXBLK;
4552
4553        /* Find existing matching LSO format, if any */
4554        for (idx = 0; idx < nix_hw->lso.in_use; idx++) {
4555                for (f = 0; f < NIX_LSO_FIELD_MAX; f++) {
4556                        reg = rvu_read64(rvu, blkaddr,
4557                                         NIX_AF_LSO_FORMATX_FIELDX(idx, f));
4558                        if (req->fields[f] != (reg & req->field_mask))
4559                                break;
4560                }
4561
4562                if (f == NIX_LSO_FIELD_MAX)
4563                        break;
4564        }
4565
4566        if (idx < nix_hw->lso.in_use) {
4567                /* Match found */
4568                rsp->lso_format_idx = idx;
4569                return 0;
4570        }
4571
4572        if (nix_hw->lso.in_use == nix_hw->lso.total)
4573                return NIX_AF_ERR_LSO_CFG_FAIL;
4574
4575        rsp->lso_format_idx = nix_hw->lso.in_use++;
4576
4577        for (f = 0; f < NIX_LSO_FIELD_MAX; f++)
4578                rvu_write64(rvu, blkaddr,
4579                            NIX_AF_LSO_FORMATX_FIELDX(rsp->lso_format_idx, f),
4580                            req->fields[f]);
4581
4582        return 0;
4583}
4584
4585void rvu_nix_reset_mac(struct rvu_pfvf *pfvf, int pcifunc)
4586{
4587        bool from_vf = !!(pcifunc & RVU_PFVF_FUNC_MASK);
4588
4589        /* overwrite vf mac address with default_mac */
4590        if (from_vf)
4591                ether_addr_copy(pfvf->mac_addr, pfvf->default_mac);
4592}
4593
4594/* NIX ingress policers or bandwidth profiles APIs */
4595static void nix_config_rx_pkt_policer_precolor(struct rvu *rvu, int blkaddr)
4596{
4597        struct npc_lt_def_cfg defs, *ltdefs;
4598
4599        ltdefs = &defs;
4600        memcpy(ltdefs, rvu->kpu.lt_def, sizeof(struct npc_lt_def_cfg));
4601
4602        /* Extract PCP and DEI fields from outer VLAN from byte offset
4603         * 2 from the start of LB_PTR (ie TAG).
4604         * VLAN0 is Outer VLAN and VLAN1 is Inner VLAN. Inner VLAN
4605         * fields are considered when 'Tunnel enable' is set in profile.
4606         */
4607        rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_VLAN0_PCP_DEI,
4608                    (2UL << 12) | (ltdefs->ovlan.lid << 8) |
4609                    (ltdefs->ovlan.ltype_match << 4) |
4610                    ltdefs->ovlan.ltype_mask);
4611        rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_VLAN1_PCP_DEI,
4612                    (2UL << 12) | (ltdefs->ivlan.lid << 8) |
4613                    (ltdefs->ivlan.ltype_match << 4) |
4614                    ltdefs->ivlan.ltype_mask);
4615
4616        /* DSCP field in outer and tunneled IPv4 packets */
4617        rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OIP4_DSCP,
4618                    (1UL << 12) | (ltdefs->rx_oip4.lid << 8) |
4619                    (ltdefs->rx_oip4.ltype_match << 4) |
4620                    ltdefs->rx_oip4.ltype_mask);
4621        rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IIP4_DSCP,
4622                    (1UL << 12) | (ltdefs->rx_iip4.lid << 8) |
4623                    (ltdefs->rx_iip4.ltype_match << 4) |
4624                    ltdefs->rx_iip4.ltype_mask);
4625
4626        /* DSCP field (traffic class) in outer and tunneled IPv6 packets */
4627        rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OIP6_DSCP,
4628                    (1UL << 11) | (ltdefs->rx_oip6.lid << 8) |
4629                    (ltdefs->rx_oip6.ltype_match << 4) |
4630                    ltdefs->rx_oip6.ltype_mask);
4631        rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IIP6_DSCP,
4632                    (1UL << 11) | (ltdefs->rx_iip6.lid << 8) |
4633                    (ltdefs->rx_iip6.ltype_match << 4) |
4634                    ltdefs->rx_iip6.ltype_mask);
4635}
4636
4637static int nix_init_policer_context(struct rvu *rvu, struct nix_hw *nix_hw,
4638                                    int layer, int prof_idx)
4639{
4640        struct nix_cn10k_aq_enq_req aq_req;
4641        int rc;
4642
4643        memset(&aq_req, 0, sizeof(struct nix_cn10k_aq_enq_req));
4644
4645        aq_req.qidx = (prof_idx & 0x3FFF) | (layer << 14);
4646        aq_req.ctype = NIX_AQ_CTYPE_BANDPROF;
4647        aq_req.op = NIX_AQ_INSTOP_INIT;
4648
4649        /* Context is all zeros, submit to AQ */
4650        rc = rvu_nix_blk_aq_enq_inst(rvu, nix_hw,
4651                                     (struct nix_aq_enq_req *)&aq_req, NULL);
4652        if (rc)
4653                dev_err(rvu->dev, "Failed to INIT bandwidth profile layer %d profile %d\n",
4654                        layer, prof_idx);
4655        return rc;
4656}
4657
4658static int nix_setup_ipolicers(struct rvu *rvu,
4659                               struct nix_hw *nix_hw, int blkaddr)
4660{
4661        struct rvu_hwinfo *hw = rvu->hw;
4662        struct nix_ipolicer *ipolicer;
4663        int err, layer, prof_idx;
4664        u64 cfg;
4665
4666        cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST);
4667        if (!(cfg & BIT_ULL(61))) {
4668                hw->cap.ipolicer = false;
4669                return 0;
4670        }
4671
4672        hw->cap.ipolicer = true;
4673        nix_hw->ipolicer = devm_kcalloc(rvu->dev, BAND_PROF_NUM_LAYERS,
4674                                        sizeof(*ipolicer), GFP_KERNEL);
4675        if (!nix_hw->ipolicer)
4676                return -ENOMEM;
4677
4678        cfg = rvu_read64(rvu, blkaddr, NIX_AF_PL_CONST);
4679
4680        for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) {
4681                ipolicer = &nix_hw->ipolicer[layer];
4682                switch (layer) {
4683                case BAND_PROF_LEAF_LAYER:
4684                        ipolicer->band_prof.max = cfg & 0XFFFF;
4685                        break;
4686                case BAND_PROF_MID_LAYER:
4687                        ipolicer->band_prof.max = (cfg >> 16) & 0XFFFF;
4688                        break;
4689                case BAND_PROF_TOP_LAYER:
4690                        ipolicer->band_prof.max = (cfg >> 32) & 0XFFFF;
4691                        break;
4692                }
4693
4694                if (!ipolicer->band_prof.max)
4695                        continue;
4696
4697                err = rvu_alloc_bitmap(&ipolicer->band_prof);
4698                if (err)
4699                        return err;
4700
4701                ipolicer->pfvf_map = devm_kcalloc(rvu->dev,
4702                                                  ipolicer->band_prof.max,
4703                                                  sizeof(u16), GFP_KERNEL);
4704                if (!ipolicer->pfvf_map)
4705                        return -ENOMEM;
4706
4707                ipolicer->match_id = devm_kcalloc(rvu->dev,
4708                                                  ipolicer->band_prof.max,
4709                                                  sizeof(u16), GFP_KERNEL);
4710                if (!ipolicer->match_id)
4711                        return -ENOMEM;
4712
4713                for (prof_idx = 0;
4714                     prof_idx < ipolicer->band_prof.max; prof_idx++) {
4715                        /* Set AF as current owner for INIT ops to succeed */
4716                        ipolicer->pfvf_map[prof_idx] = 0x00;
4717
4718                        /* There is no enable bit in the profile context,
4719                         * so no context disable. So let's INIT them here
4720                         * so that PF/VF later on have to just do WRITE to
4721                         * setup policer rates and config.
4722                         */
4723                        err = nix_init_policer_context(rvu, nix_hw,
4724                                                       layer, prof_idx);
4725                        if (err)
4726                                return err;
4727                }
4728
4729                /* Allocate memory for maintaining ref_counts for MID level
4730                 * profiles, this will be needed for leaf layer profiles'
4731                 * aggregation.
4732                 */
4733                if (layer != BAND_PROF_MID_LAYER)
4734                        continue;
4735
4736                ipolicer->ref_count = devm_kcalloc(rvu->dev,
4737                                                   ipolicer->band_prof.max,
4738                                                   sizeof(u16), GFP_KERNEL);
4739        }
4740
4741        /* Set policer timeunit to 2us ie  (19 + 1) * 100 nsec = 2us */
4742        rvu_write64(rvu, blkaddr, NIX_AF_PL_TS, 19);
4743
4744        nix_config_rx_pkt_policer_precolor(rvu, blkaddr);
4745
4746        return 0;
4747}
4748
4749static void nix_ipolicer_freemem(struct rvu *rvu, struct nix_hw *nix_hw)
4750{
4751        struct nix_ipolicer *ipolicer;
4752        int layer;
4753
4754        if (!rvu->hw->cap.ipolicer)
4755                return;
4756
4757        for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) {
4758                ipolicer = &nix_hw->ipolicer[layer];
4759
4760                if (!ipolicer->band_prof.max)
4761                        continue;
4762
4763                kfree(ipolicer->band_prof.bmap);
4764        }
4765}
4766
4767static int nix_verify_bandprof(struct nix_cn10k_aq_enq_req *req,
4768                               struct nix_hw *nix_hw, u16 pcifunc)
4769{
4770        struct nix_ipolicer *ipolicer;
4771        int layer, hi_layer, prof_idx;
4772
4773        /* Bits [15:14] in profile index represent layer */
4774        layer = (req->qidx >> 14) & 0x03;
4775        prof_idx = req->qidx & 0x3FFF;
4776
4777        ipolicer = &nix_hw->ipolicer[layer];
4778        if (prof_idx >= ipolicer->band_prof.max)
4779                return -EINVAL;
4780
4781        /* Check if the profile is allocated to the requesting PCIFUNC or not
4782         * with the exception of AF. AF is allowed to read and update contexts.
4783         */
4784        if (pcifunc && ipolicer->pfvf_map[prof_idx] != pcifunc)
4785                return -EINVAL;
4786
4787        /* If this profile is linked to higher layer profile then check
4788         * if that profile is also allocated to the requesting PCIFUNC
4789         * or not.
4790         */
4791        if (!req->prof.hl_en)
4792                return 0;
4793
4794        /* Leaf layer profile can link only to mid layer and
4795         * mid layer to top layer.
4796         */
4797        if (layer == BAND_PROF_LEAF_LAYER)
4798                hi_layer = BAND_PROF_MID_LAYER;
4799        else if (layer == BAND_PROF_MID_LAYER)
4800                hi_layer = BAND_PROF_TOP_LAYER;
4801        else
4802                return -EINVAL;
4803
4804        ipolicer = &nix_hw->ipolicer[hi_layer];
4805        prof_idx = req->prof.band_prof_id;
4806        if (prof_idx >= ipolicer->band_prof.max ||
4807            ipolicer->pfvf_map[prof_idx] != pcifunc)
4808                return -EINVAL;
4809
4810        return 0;
4811}
4812
4813int rvu_mbox_handler_nix_bandprof_alloc(struct rvu *rvu,
4814                                        struct nix_bandprof_alloc_req *req,
4815                                        struct nix_bandprof_alloc_rsp *rsp)
4816{
4817        int blkaddr, layer, prof, idx, err;
4818        u16 pcifunc = req->hdr.pcifunc;
4819        struct nix_ipolicer *ipolicer;
4820        struct nix_hw *nix_hw;
4821
4822        if (!rvu->hw->cap.ipolicer)
4823                return NIX_AF_ERR_IPOLICER_NOTSUPP;
4824
4825        err = nix_get_struct_ptrs(rvu, pcifunc, &nix_hw, &blkaddr);
4826        if (err)
4827                return err;
4828
4829        mutex_lock(&rvu->rsrc_lock);
4830        for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) {
4831                if (layer == BAND_PROF_INVAL_LAYER)
4832                        continue;
4833                if (!req->prof_count[layer])
4834                        continue;
4835
4836                ipolicer = &nix_hw->ipolicer[layer];
4837                for (idx = 0; idx < req->prof_count[layer]; idx++) {
4838                        /* Allocate a max of 'MAX_BANDPROF_PER_PFFUNC' profiles */
4839                        if (idx == MAX_BANDPROF_PER_PFFUNC)
4840                                break;
4841
4842                        prof = rvu_alloc_rsrc(&ipolicer->band_prof);
4843                        if (prof < 0)
4844                                break;
4845                        rsp->prof_count[layer]++;
4846                        rsp->prof_idx[layer][idx] = prof;
4847                        ipolicer->pfvf_map[prof] = pcifunc;
4848                }
4849        }
4850        mutex_unlock(&rvu->rsrc_lock);
4851        return 0;
4852}
4853
4854static int nix_free_all_bandprof(struct rvu *rvu, u16 pcifunc)
4855{
4856        int blkaddr, layer, prof_idx, err;
4857        struct nix_ipolicer *ipolicer;
4858        struct nix_hw *nix_hw;
4859
4860        if (!rvu->hw->cap.ipolicer)
4861                return NIX_AF_ERR_IPOLICER_NOTSUPP;
4862
4863        err = nix_get_struct_ptrs(rvu, pcifunc, &nix_hw, &blkaddr);
4864        if (err)
4865                return err;
4866
4867        mutex_lock(&rvu->rsrc_lock);
4868        /* Free all the profiles allocated to the PCIFUNC */
4869        for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) {
4870                if (layer == BAND_PROF_INVAL_LAYER)
4871                        continue;
4872                ipolicer = &nix_hw->ipolicer[layer];
4873
4874                for (prof_idx = 0; prof_idx < ipolicer->band_prof.max; prof_idx++) {
4875                        if (ipolicer->pfvf_map[prof_idx] != pcifunc)
4876                                continue;
4877
4878                        /* Clear ratelimit aggregation, if any */
4879                        if (layer == BAND_PROF_LEAF_LAYER &&
4880                            ipolicer->match_id[prof_idx])
4881                                nix_clear_ratelimit_aggr(rvu, nix_hw, prof_idx);
4882
4883                        ipolicer->pfvf_map[prof_idx] = 0x00;
4884                        ipolicer->match_id[prof_idx] = 0;
4885                        rvu_free_rsrc(&ipolicer->band_prof, prof_idx);
4886                }
4887        }
4888        mutex_unlock(&rvu->rsrc_lock);
4889        return 0;
4890}
4891
4892int rvu_mbox_handler_nix_bandprof_free(struct rvu *rvu,
4893                                       struct nix_bandprof_free_req *req,
4894                                       struct msg_rsp *rsp)
4895{
4896        int blkaddr, layer, prof_idx, idx, err;
4897        u16 pcifunc = req->hdr.pcifunc;
4898        struct nix_ipolicer *ipolicer;
4899        struct nix_hw *nix_hw;
4900
4901        if (req->free_all)
4902                return nix_free_all_bandprof(rvu, pcifunc);
4903
4904        if (!rvu->hw->cap.ipolicer)
4905                return NIX_AF_ERR_IPOLICER_NOTSUPP;
4906
4907        err = nix_get_struct_ptrs(rvu, pcifunc, &nix_hw, &blkaddr);
4908        if (err)
4909                return err;
4910
4911        mutex_lock(&rvu->rsrc_lock);
4912        /* Free the requested profile indices */
4913        for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) {
4914                if (layer == BAND_PROF_INVAL_LAYER)
4915                        continue;
4916                if (!req->prof_count[layer])
4917                        continue;
4918
4919                ipolicer = &nix_hw->ipolicer[layer];
4920                for (idx = 0; idx < req->prof_count[layer]; idx++) {
4921                        prof_idx = req->prof_idx[layer][idx];
4922                        if (prof_idx >= ipolicer->band_prof.max ||
4923                            ipolicer->pfvf_map[prof_idx] != pcifunc)
4924                                continue;
4925
4926                        /* Clear ratelimit aggregation, if any */
4927                        if (layer == BAND_PROF_LEAF_LAYER &&
4928                            ipolicer->match_id[prof_idx])
4929                                nix_clear_ratelimit_aggr(rvu, nix_hw, prof_idx);
4930
4931                        ipolicer->pfvf_map[prof_idx] = 0x00;
4932                        ipolicer->match_id[prof_idx] = 0;
4933                        rvu_free_rsrc(&ipolicer->band_prof, prof_idx);
4934                        if (idx == MAX_BANDPROF_PER_PFFUNC)
4935                                break;
4936                }
4937        }
4938        mutex_unlock(&rvu->rsrc_lock);
4939        return 0;
4940}
4941
4942int nix_aq_context_read(struct rvu *rvu, struct nix_hw *nix_hw,
4943                        struct nix_cn10k_aq_enq_req *aq_req,
4944                        struct nix_cn10k_aq_enq_rsp *aq_rsp,
4945                        u16 pcifunc, u8 ctype, u32 qidx)
4946{
4947        memset(aq_req, 0, sizeof(struct nix_cn10k_aq_enq_req));
4948        aq_req->hdr.pcifunc = pcifunc;
4949        aq_req->ctype = ctype;
4950        aq_req->op = NIX_AQ_INSTOP_READ;
4951        aq_req->qidx = qidx;
4952
4953        return rvu_nix_blk_aq_enq_inst(rvu, nix_hw,
4954                                       (struct nix_aq_enq_req *)aq_req,
4955                                       (struct nix_aq_enq_rsp *)aq_rsp);
4956}
4957
4958static int nix_ipolicer_map_leaf_midprofs(struct rvu *rvu,
4959                                          struct nix_hw *nix_hw,
4960                                          struct nix_cn10k_aq_enq_req *aq_req,
4961                                          struct nix_cn10k_aq_enq_rsp *aq_rsp,
4962                                          u32 leaf_prof, u16 mid_prof)
4963{
4964        memset(aq_req, 0, sizeof(struct nix_cn10k_aq_enq_req));
4965        aq_req->hdr.pcifunc = 0x00;
4966        aq_req->ctype = NIX_AQ_CTYPE_BANDPROF;
4967        aq_req->op = NIX_AQ_INSTOP_WRITE;
4968        aq_req->qidx = leaf_prof;
4969
4970        aq_req->prof.band_prof_id = mid_prof;
4971        aq_req->prof_mask.band_prof_id = GENMASK(6, 0);
4972        aq_req->prof.hl_en = 1;
4973        aq_req->prof_mask.hl_en = 1;
4974
4975        return rvu_nix_blk_aq_enq_inst(rvu, nix_hw,
4976                                       (struct nix_aq_enq_req *)aq_req,
4977                                       (struct nix_aq_enq_rsp *)aq_rsp);
4978}
4979
4980int rvu_nix_setup_ratelimit_aggr(struct rvu *rvu, u16 pcifunc,
4981                                 u16 rq_idx, u16 match_id)
4982{
4983        int leaf_prof, mid_prof, leaf_match;
4984        struct nix_cn10k_aq_enq_req aq_req;
4985        struct nix_cn10k_aq_enq_rsp aq_rsp;
4986        struct nix_ipolicer *ipolicer;
4987        struct nix_hw *nix_hw;
4988        int blkaddr, idx, rc;
4989
4990        if (!rvu->hw->cap.ipolicer)
4991                return 0;
4992
4993        rc = nix_get_struct_ptrs(rvu, pcifunc, &nix_hw, &blkaddr);
4994        if (rc)
4995                return rc;
4996
4997        /* Fetch the RQ's context to see if policing is enabled */
4998        rc = nix_aq_context_read(rvu, nix_hw, &aq_req, &aq_rsp, pcifunc,
4999                                 NIX_AQ_CTYPE_RQ, rq_idx);
5000        if (rc) {
5001                dev_err(rvu->dev,
5002                        "%s: Failed to fetch RQ%d context of PFFUNC 0x%x\n",
5003                        __func__, rq_idx, pcifunc);
5004                return rc;
5005        }
5006
5007        if (!aq_rsp.rq.policer_ena)
5008                return 0;
5009
5010        /* Get the bandwidth profile ID mapped to this RQ */
5011        leaf_prof = aq_rsp.rq.band_prof_id;
5012
5013        ipolicer = &nix_hw->ipolicer[BAND_PROF_LEAF_LAYER];
5014        ipolicer->match_id[leaf_prof] = match_id;
5015
5016        /* Check if any other leaf profile is marked with same match_id */
5017        for (idx = 0; idx < ipolicer->band_prof.max; idx++) {
5018                if (idx == leaf_prof)
5019                        continue;
5020                if (ipolicer->match_id[idx] != match_id)
5021                        continue;
5022
5023                leaf_match = idx;
5024                break;
5025        }
5026
5027        if (idx == ipolicer->band_prof.max)
5028                return 0;
5029
5030        /* Fetch the matching profile's context to check if it's already
5031         * mapped to a mid level profile.
5032         */
5033        rc = nix_aq_context_read(rvu, nix_hw, &aq_req, &aq_rsp, 0x00,
5034                                 NIX_AQ_CTYPE_BANDPROF, leaf_match);
5035        if (rc) {
5036                dev_err(rvu->dev,
5037                        "%s: Failed to fetch context of leaf profile %d\n",
5038                        __func__, leaf_match);
5039                return rc;
5040        }
5041
5042        ipolicer = &nix_hw->ipolicer[BAND_PROF_MID_LAYER];
5043        if (aq_rsp.prof.hl_en) {
5044                /* Get Mid layer prof index and map leaf_prof index
5045                 * also such that flows that are being steered
5046                 * to different RQs and marked with same match_id
5047                 * are rate limited in a aggregate fashion
5048                 */
5049                mid_prof = aq_rsp.prof.band_prof_id;
5050                rc = nix_ipolicer_map_leaf_midprofs(rvu, nix_hw,
5051                                                    &aq_req, &aq_rsp,
5052                                                    leaf_prof, mid_prof);
5053                if (rc) {
5054                        dev_err(rvu->dev,
5055                                "%s: Failed to map leaf(%d) and mid(%d) profiles\n",
5056                                __func__, leaf_prof, mid_prof);
5057                        goto exit;
5058                }
5059
5060                mutex_lock(&rvu->rsrc_lock);
5061                ipolicer->ref_count[mid_prof]++;
5062                mutex_unlock(&rvu->rsrc_lock);
5063                goto exit;
5064        }
5065
5066        /* Allocate a mid layer profile and
5067         * map both 'leaf_prof' and 'leaf_match' profiles to it.
5068         */
5069        mutex_lock(&rvu->rsrc_lock);
5070        mid_prof = rvu_alloc_rsrc(&ipolicer->band_prof);
5071        if (mid_prof < 0) {
5072                dev_err(rvu->dev,
5073                        "%s: Unable to allocate mid layer profile\n", __func__);
5074                mutex_unlock(&rvu->rsrc_lock);
5075                goto exit;
5076        }
5077        mutex_unlock(&rvu->rsrc_lock);
5078        ipolicer->pfvf_map[mid_prof] = 0x00;
5079        ipolicer->ref_count[mid_prof] = 0;
5080
5081        /* Initialize mid layer profile same as 'leaf_prof' */
5082        rc = nix_aq_context_read(rvu, nix_hw, &aq_req, &aq_rsp, 0x00,
5083                                 NIX_AQ_CTYPE_BANDPROF, leaf_prof);
5084        if (rc) {
5085                dev_err(rvu->dev,
5086                        "%s: Failed to fetch context of leaf profile %d\n",
5087                        __func__, leaf_prof);
5088                goto exit;
5089        }
5090
5091        memset(&aq_req, 0, sizeof(struct nix_cn10k_aq_enq_req));
5092        aq_req.hdr.pcifunc = 0x00;
5093        aq_req.qidx = (mid_prof & 0x3FFF) | (BAND_PROF_MID_LAYER << 14);
5094        aq_req.ctype = NIX_AQ_CTYPE_BANDPROF;
5095        aq_req.op = NIX_AQ_INSTOP_WRITE;
5096        memcpy(&aq_req.prof, &aq_rsp.prof, sizeof(struct nix_bandprof_s));
5097        /* Clear higher layer enable bit in the mid profile, just in case */
5098        aq_req.prof.hl_en = 0;
5099        aq_req.prof_mask.hl_en = 1;
5100
5101        rc = rvu_nix_blk_aq_enq_inst(rvu, nix_hw,
5102                                     (struct nix_aq_enq_req *)&aq_req, NULL);
5103        if (rc) {
5104                dev_err(rvu->dev,
5105                        "%s: Failed to INIT context of mid layer profile %d\n",
5106                        __func__, mid_prof);
5107                goto exit;
5108        }
5109
5110        /* Map both leaf profiles to this mid layer profile */
5111        rc = nix_ipolicer_map_leaf_midprofs(rvu, nix_hw,
5112                                            &aq_req, &aq_rsp,
5113                                            leaf_prof, mid_prof);
5114        if (rc) {
5115                dev_err(rvu->dev,
5116                        "%s: Failed to map leaf(%d) and mid(%d) profiles\n",
5117                        __func__, leaf_prof, mid_prof);
5118                goto exit;
5119        }
5120
5121        mutex_lock(&rvu->rsrc_lock);
5122        ipolicer->ref_count[mid_prof]++;
5123        mutex_unlock(&rvu->rsrc_lock);
5124
5125        rc = nix_ipolicer_map_leaf_midprofs(rvu, nix_hw,
5126                                            &aq_req, &aq_rsp,
5127                                            leaf_match, mid_prof);
5128        if (rc) {
5129                dev_err(rvu->dev,
5130                        "%s: Failed to map leaf(%d) and mid(%d) profiles\n",
5131                        __func__, leaf_match, mid_prof);
5132                ipolicer->ref_count[mid_prof]--;
5133                goto exit;
5134        }
5135
5136        mutex_lock(&rvu->rsrc_lock);
5137        ipolicer->ref_count[mid_prof]++;
5138        mutex_unlock(&rvu->rsrc_lock);
5139
5140exit:
5141        return rc;
5142}
5143
5144/* Called with mutex rsrc_lock */
5145static void nix_clear_ratelimit_aggr(struct rvu *rvu, struct nix_hw *nix_hw,
5146                                     u32 leaf_prof)
5147{
5148        struct nix_cn10k_aq_enq_req aq_req;
5149        struct nix_cn10k_aq_enq_rsp aq_rsp;
5150        struct nix_ipolicer *ipolicer;
5151        u16 mid_prof;
5152        int rc;
5153
5154        mutex_unlock(&rvu->rsrc_lock);
5155
5156        rc = nix_aq_context_read(rvu, nix_hw, &aq_req, &aq_rsp, 0x00,
5157                                 NIX_AQ_CTYPE_BANDPROF, leaf_prof);
5158
5159        mutex_lock(&rvu->rsrc_lock);
5160        if (rc) {
5161                dev_err(rvu->dev,
5162                        "%s: Failed to fetch context of leaf profile %d\n",
5163                        __func__, leaf_prof);
5164                return;
5165        }
5166
5167        if (!aq_rsp.prof.hl_en)
5168                return;
5169
5170        mid_prof = aq_rsp.prof.band_prof_id;
5171        ipolicer = &nix_hw->ipolicer[BAND_PROF_MID_LAYER];
5172        ipolicer->ref_count[mid_prof]--;
5173        /* If ref_count is zero, free mid layer profile */
5174        if (!ipolicer->ref_count[mid_prof]) {
5175                ipolicer->pfvf_map[mid_prof] = 0x00;
5176                rvu_free_rsrc(&ipolicer->band_prof, mid_prof);
5177        }
5178}
5179
5180int rvu_mbox_handler_nix_bandprof_get_hwinfo(struct rvu *rvu, struct msg_req *req,
5181                                             struct nix_bandprof_get_hwinfo_rsp *rsp)
5182{
5183        struct nix_ipolicer *ipolicer;
5184        int blkaddr, layer, err;
5185        struct nix_hw *nix_hw;
5186        u64 tu;
5187
5188        if (!rvu->hw->cap.ipolicer)
5189                return NIX_AF_ERR_IPOLICER_NOTSUPP;
5190
5191        err = nix_get_struct_ptrs(rvu, req->hdr.pcifunc, &nix_hw, &blkaddr);
5192        if (err)
5193                return err;
5194
5195        /* Return number of bandwidth profiles free at each layer */
5196        mutex_lock(&rvu->rsrc_lock);
5197        for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) {
5198                if (layer == BAND_PROF_INVAL_LAYER)
5199                        continue;
5200
5201                ipolicer = &nix_hw->ipolicer[layer];
5202                rsp->prof_count[layer] = rvu_rsrc_free_count(&ipolicer->band_prof);
5203        }
5204        mutex_unlock(&rvu->rsrc_lock);
5205
5206        /* Set the policer timeunit in nanosec */
5207        tu = rvu_read64(rvu, blkaddr, NIX_AF_PL_TS) & GENMASK_ULL(9, 0);
5208        rsp->policer_timeunit = (tu + 1) * 100;
5209
5210        return 0;
5211}
5212