linux/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
<<
>>
Prefs
   1/* bnx2x_sriov.c: QLogic Everest network driver.
   2 *
   3 * Copyright 2009-2013 Broadcom Corporation
   4 * Copyright 2014 QLogic Corporation
   5 * All rights reserved
   6 *
   7 * Unless you and QLogic execute a separate written software license
   8 * agreement governing use of this software, this software is licensed to you
   9 * under the terms of the GNU General Public License version 2, available
  10 * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL").
  11 *
  12 * Notwithstanding the above, under no circumstances may you combine this
  13 * software in any way with any other QLogic software provided under a
  14 * license other than the GPL, without QLogic's express prior written
  15 * consent.
  16 *
  17 * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
  18 * Written by: Shmulik Ravid
  19 *             Ariel Elior <ariel.elior@qlogic.com>
  20 *
  21 */
  22#include "bnx2x.h"
  23#include "bnx2x_init.h"
  24#include "bnx2x_cmn.h"
  25#include "bnx2x_sp.h"
  26#include <linux/crc32.h>
  27#include <linux/if_vlan.h>
  28
  29static int bnx2x_vf_op_prep(struct bnx2x *bp, int vfidx,
  30                            struct bnx2x_virtf **vf,
  31                            struct pf_vf_bulletin_content **bulletin,
  32                            bool test_queue);
  33
  34/* General service functions */
  35static void storm_memset_vf_to_pf(struct bnx2x *bp, u16 abs_fid,
  36                                         u16 pf_id)
  37{
  38        REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_VF_TO_PF_OFFSET(abs_fid),
  39                pf_id);
  40        REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_VF_TO_PF_OFFSET(abs_fid),
  41                pf_id);
  42        REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_VF_TO_PF_OFFSET(abs_fid),
  43                pf_id);
  44        REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_VF_TO_PF_OFFSET(abs_fid),
  45                pf_id);
  46}
  47
  48static void storm_memset_func_en(struct bnx2x *bp, u16 abs_fid,
  49                                        u8 enable)
  50{
  51        REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(abs_fid),
  52                enable);
  53        REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(abs_fid),
  54                enable);
  55        REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(abs_fid),
  56                enable);
  57        REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(abs_fid),
  58                enable);
  59}
  60
  61int bnx2x_vf_idx_by_abs_fid(struct bnx2x *bp, u16 abs_vfid)
  62{
  63        int idx;
  64
  65        for_each_vf(bp, idx)
  66                if (bnx2x_vf(bp, idx, abs_vfid) == abs_vfid)
  67                        break;
  68        return idx;
  69}
  70
  71static
  72struct bnx2x_virtf *bnx2x_vf_by_abs_fid(struct bnx2x *bp, u16 abs_vfid)
  73{
  74        u16 idx =  (u16)bnx2x_vf_idx_by_abs_fid(bp, abs_vfid);
  75        return (idx < BNX2X_NR_VIRTFN(bp)) ? BP_VF(bp, idx) : NULL;
  76}
  77
  78static void bnx2x_vf_igu_ack_sb(struct bnx2x *bp, struct bnx2x_virtf *vf,
  79                                u8 igu_sb_id, u8 segment, u16 index, u8 op,
  80                                u8 update)
  81{
  82        /* acking a VF sb through the PF - use the GRC */
  83        u32 ctl;
  84        u32 igu_addr_data = IGU_REG_COMMAND_REG_32LSB_DATA;
  85        u32 igu_addr_ctl = IGU_REG_COMMAND_REG_CTRL;
  86        u32 func_encode = vf->abs_vfid;
  87        u32 addr_encode = IGU_CMD_E2_PROD_UPD_BASE + igu_sb_id;
  88        struct igu_regular cmd_data = {0};
  89
  90        cmd_data.sb_id_and_flags =
  91                        ((index << IGU_REGULAR_SB_INDEX_SHIFT) |
  92                         (segment << IGU_REGULAR_SEGMENT_ACCESS_SHIFT) |
  93                         (update << IGU_REGULAR_BUPDATE_SHIFT) |
  94                         (op << IGU_REGULAR_ENABLE_INT_SHIFT));
  95
  96        ctl = addr_encode << IGU_CTRL_REG_ADDRESS_SHIFT         |
  97              func_encode << IGU_CTRL_REG_FID_SHIFT             |
  98              IGU_CTRL_CMD_TYPE_WR << IGU_CTRL_REG_TYPE_SHIFT;
  99
 100        DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n",
 101           cmd_data.sb_id_and_flags, igu_addr_data);
 102        REG_WR(bp, igu_addr_data, cmd_data.sb_id_and_flags);
 103        barrier();
 104
 105        DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n",
 106           ctl, igu_addr_ctl);
 107        REG_WR(bp, igu_addr_ctl, ctl);
 108        barrier();
 109}
 110
 111static bool bnx2x_validate_vf_sp_objs(struct bnx2x *bp,
 112                                       struct bnx2x_virtf *vf,
 113                                       bool print_err)
 114{
 115        if (!bnx2x_leading_vfq(vf, sp_initialized)) {
 116                if (print_err)
 117                        BNX2X_ERR("Slowpath objects not yet initialized!\n");
 118                else
 119                        DP(BNX2X_MSG_IOV, "Slowpath objects not yet initialized!\n");
 120                return false;
 121        }
 122        return true;
 123}
 124
 125/* VFOP operations states */
 126void bnx2x_vfop_qctor_dump_tx(struct bnx2x *bp, struct bnx2x_virtf *vf,
 127                              struct bnx2x_queue_init_params *init_params,
 128                              struct bnx2x_queue_setup_params *setup_params,
 129                              u16 q_idx, u16 sb_idx)
 130{
 131        DP(BNX2X_MSG_IOV,
 132           "VF[%d] Q_SETUP: txq[%d]-- vfsb=%d, sb-index=%d, hc-rate=%d, flags=0x%lx, traffic-type=%d",
 133           vf->abs_vfid,
 134           q_idx,
 135           sb_idx,
 136           init_params->tx.sb_cq_index,
 137           init_params->tx.hc_rate,
 138           setup_params->flags,
 139           setup_params->txq_params.traffic_type);
 140}
 141
 142void bnx2x_vfop_qctor_dump_rx(struct bnx2x *bp, struct bnx2x_virtf *vf,
 143                            struct bnx2x_queue_init_params *init_params,
 144                            struct bnx2x_queue_setup_params *setup_params,
 145                            u16 q_idx, u16 sb_idx)
 146{
 147        struct bnx2x_rxq_setup_params *rxq_params = &setup_params->rxq_params;
 148
 149        DP(BNX2X_MSG_IOV, "VF[%d] Q_SETUP: rxq[%d]-- vfsb=%d, sb-index=%d, hc-rate=%d, mtu=%d, buf-size=%d\n"
 150           "sge-size=%d, max_sge_pkt=%d, tpa-agg-size=%d, flags=0x%lx, drop-flags=0x%x, cache-log=%d\n",
 151           vf->abs_vfid,
 152           q_idx,
 153           sb_idx,
 154           init_params->rx.sb_cq_index,
 155           init_params->rx.hc_rate,
 156           setup_params->gen_params.mtu,
 157           rxq_params->buf_sz,
 158           rxq_params->sge_buf_sz,
 159           rxq_params->max_sges_pkt,
 160           rxq_params->tpa_agg_sz,
 161           setup_params->flags,
 162           rxq_params->drop_flags,
 163           rxq_params->cache_line_log);
 164}
 165
 166void bnx2x_vfop_qctor_prep(struct bnx2x *bp,
 167                           struct bnx2x_virtf *vf,
 168                           struct bnx2x_vf_queue *q,
 169                           struct bnx2x_vf_queue_construct_params *p,
 170                           unsigned long q_type)
 171{
 172        struct bnx2x_queue_init_params *init_p = &p->qstate.params.init;
 173        struct bnx2x_queue_setup_params *setup_p = &p->prep_qsetup;
 174
 175        /* INIT */
 176
 177        /* Enable host coalescing in the transition to INIT state */
 178        if (test_bit(BNX2X_Q_FLG_HC, &init_p->rx.flags))
 179                __set_bit(BNX2X_Q_FLG_HC_EN, &init_p->rx.flags);
 180
 181        if (test_bit(BNX2X_Q_FLG_HC, &init_p->tx.flags))
 182                __set_bit(BNX2X_Q_FLG_HC_EN, &init_p->tx.flags);
 183
 184        /* FW SB ID */
 185        init_p->rx.fw_sb_id = vf_igu_sb(vf, q->sb_idx);
 186        init_p->tx.fw_sb_id = vf_igu_sb(vf, q->sb_idx);
 187
 188        /* context */
 189        init_p->cxts[0] = q->cxt;
 190
 191        /* SETUP */
 192
 193        /* Setup-op general parameters */
 194        setup_p->gen_params.spcl_id = vf->sp_cl_id;
 195        setup_p->gen_params.stat_id = vfq_stat_id(vf, q);
 196        setup_p->gen_params.fp_hsi = vf->fp_hsi;
 197
 198        /* Setup-op flags:
 199         * collect statistics, zero statistics, local-switching, security,
 200         * OV for Flex10, RSS and MCAST for leading
 201         */
 202        if (test_bit(BNX2X_Q_FLG_STATS, &setup_p->flags))
 203                __set_bit(BNX2X_Q_FLG_ZERO_STATS, &setup_p->flags);
 204
 205        /* for VFs, enable tx switching, bd coherency, and mac address
 206         * anti-spoofing
 207         */
 208        __set_bit(BNX2X_Q_FLG_TX_SWITCH, &setup_p->flags);
 209        __set_bit(BNX2X_Q_FLG_TX_SEC, &setup_p->flags);
 210        if (vf->spoofchk)
 211                __set_bit(BNX2X_Q_FLG_ANTI_SPOOF, &setup_p->flags);
 212        else
 213                __clear_bit(BNX2X_Q_FLG_ANTI_SPOOF, &setup_p->flags);
 214
 215        /* Setup-op rx parameters */
 216        if (test_bit(BNX2X_Q_TYPE_HAS_RX, &q_type)) {
 217                struct bnx2x_rxq_setup_params *rxq_p = &setup_p->rxq_params;
 218
 219                rxq_p->cl_qzone_id = vfq_qzone_id(vf, q);
 220                rxq_p->fw_sb_id = vf_igu_sb(vf, q->sb_idx);
 221                rxq_p->rss_engine_id = FW_VF_HANDLE(vf->abs_vfid);
 222
 223                if (test_bit(BNX2X_Q_FLG_TPA, &setup_p->flags))
 224                        rxq_p->max_tpa_queues = BNX2X_VF_MAX_TPA_AGG_QUEUES;
 225        }
 226
 227        /* Setup-op tx parameters */
 228        if (test_bit(BNX2X_Q_TYPE_HAS_TX, &q_type)) {
 229                setup_p->txq_params.tss_leading_cl_id = vf->leading_rss;
 230                setup_p->txq_params.fw_sb_id = vf_igu_sb(vf, q->sb_idx);
 231        }
 232}
 233
 234static int bnx2x_vf_queue_create(struct bnx2x *bp,
 235                                 struct bnx2x_virtf *vf, int qid,
 236                                 struct bnx2x_vf_queue_construct_params *qctor)
 237{
 238        struct bnx2x_queue_state_params *q_params;
 239        int rc = 0;
 240
 241        DP(BNX2X_MSG_IOV, "vf[%d:%d]\n", vf->abs_vfid, qid);
 242
 243        /* Prepare ramrod information */
 244        q_params = &qctor->qstate;
 245        q_params->q_obj = &bnx2x_vfq(vf, qid, sp_obj);
 246        set_bit(RAMROD_COMP_WAIT, &q_params->ramrod_flags);
 247
 248        if (bnx2x_get_q_logical_state(bp, q_params->q_obj) ==
 249            BNX2X_Q_LOGICAL_STATE_ACTIVE) {
 250                DP(BNX2X_MSG_IOV, "queue was already up. Aborting gracefully\n");
 251                goto out;
 252        }
 253
 254        /* Run Queue 'construction' ramrods */
 255        q_params->cmd = BNX2X_Q_CMD_INIT;
 256        rc = bnx2x_queue_state_change(bp, q_params);
 257        if (rc)
 258                goto out;
 259
 260        memcpy(&q_params->params.setup, &qctor->prep_qsetup,
 261               sizeof(struct bnx2x_queue_setup_params));
 262        q_params->cmd = BNX2X_Q_CMD_SETUP;
 263        rc = bnx2x_queue_state_change(bp, q_params);
 264        if (rc)
 265                goto out;
 266
 267        /* enable interrupts */
 268        bnx2x_vf_igu_ack_sb(bp, vf, vf_igu_sb(vf, bnx2x_vfq(vf, qid, sb_idx)),
 269                            USTORM_ID, 0, IGU_INT_ENABLE, 0);
 270out:
 271        return rc;
 272}
 273
 274static int bnx2x_vf_queue_destroy(struct bnx2x *bp, struct bnx2x_virtf *vf,
 275                                  int qid)
 276{
 277        enum bnx2x_queue_cmd cmds[] = {BNX2X_Q_CMD_HALT,
 278                                       BNX2X_Q_CMD_TERMINATE,
 279                                       BNX2X_Q_CMD_CFC_DEL};
 280        struct bnx2x_queue_state_params q_params;
 281        int rc, i;
 282
 283        DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid);
 284
 285        /* Prepare ramrod information */
 286        memset(&q_params, 0, sizeof(struct bnx2x_queue_state_params));
 287        q_params.q_obj = &bnx2x_vfq(vf, qid, sp_obj);
 288        set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
 289
 290        if (bnx2x_get_q_logical_state(bp, q_params.q_obj) ==
 291            BNX2X_Q_LOGICAL_STATE_STOPPED) {
 292                DP(BNX2X_MSG_IOV, "queue was already stopped. Aborting gracefully\n");
 293                goto out;
 294        }
 295
 296        /* Run Queue 'destruction' ramrods */
 297        for (i = 0; i < ARRAY_SIZE(cmds); i++) {
 298                q_params.cmd = cmds[i];
 299                rc = bnx2x_queue_state_change(bp, &q_params);
 300                if (rc) {
 301                        BNX2X_ERR("Failed to run Queue command %d\n", cmds[i]);
 302                        return rc;
 303                }
 304        }
 305out:
 306        /* Clean Context */
 307        if (bnx2x_vfq(vf, qid, cxt)) {
 308                bnx2x_vfq(vf, qid, cxt)->ustorm_ag_context.cdu_usage = 0;
 309                bnx2x_vfq(vf, qid, cxt)->xstorm_ag_context.cdu_reserved = 0;
 310        }
 311
 312        return 0;
 313}
 314
 315static void
 316bnx2x_vf_set_igu_info(struct bnx2x *bp, u8 igu_sb_id, u8 abs_vfid)
 317{
 318        struct bnx2x_virtf *vf = bnx2x_vf_by_abs_fid(bp, abs_vfid);
 319        if (vf) {
 320                /* the first igu entry belonging to VFs of this PF */
 321                if (!BP_VFDB(bp)->first_vf_igu_entry)
 322                        BP_VFDB(bp)->first_vf_igu_entry = igu_sb_id;
 323
 324                /* the first igu entry belonging to this VF */
 325                if (!vf_sb_count(vf))
 326                        vf->igu_base_id = igu_sb_id;
 327
 328                ++vf_sb_count(vf);
 329                ++vf->sb_count;
 330        }
 331        BP_VFDB(bp)->vf_sbs_pool++;
 332}
 333
 334static int bnx2x_vf_vlan_mac_clear(struct bnx2x *bp, struct bnx2x_virtf *vf,
 335                                   int qid, bool drv_only, int type)
 336{
 337        struct bnx2x_vlan_mac_ramrod_params ramrod;
 338        int rc;
 339
 340        DP(BNX2X_MSG_IOV, "vf[%d] - deleting all %s\n", vf->abs_vfid,
 341                          (type == BNX2X_VF_FILTER_VLAN_MAC) ? "VLAN-MACs" :
 342                          (type == BNX2X_VF_FILTER_MAC) ? "MACs" : "VLANs");
 343
 344        /* Prepare ramrod params */
 345        memset(&ramrod, 0, sizeof(struct bnx2x_vlan_mac_ramrod_params));
 346        if (type == BNX2X_VF_FILTER_VLAN_MAC) {
 347                set_bit(BNX2X_ETH_MAC, &ramrod.user_req.vlan_mac_flags);
 348                ramrod.vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_mac_obj);
 349        } else if (type == BNX2X_VF_FILTER_MAC) {
 350                set_bit(BNX2X_ETH_MAC, &ramrod.user_req.vlan_mac_flags);
 351                ramrod.vlan_mac_obj = &bnx2x_vfq(vf, qid, mac_obj);
 352        } else {
 353                ramrod.vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj);
 354        }
 355        ramrod.user_req.cmd = BNX2X_VLAN_MAC_DEL;
 356
 357        set_bit(RAMROD_EXEC, &ramrod.ramrod_flags);
 358        if (drv_only)
 359                set_bit(RAMROD_DRV_CLR_ONLY, &ramrod.ramrod_flags);
 360        else
 361                set_bit(RAMROD_COMP_WAIT, &ramrod.ramrod_flags);
 362
 363        /* Start deleting */
 364        rc = ramrod.vlan_mac_obj->delete_all(bp,
 365                                             ramrod.vlan_mac_obj,
 366                                             &ramrod.user_req.vlan_mac_flags,
 367                                             &ramrod.ramrod_flags);
 368        if (rc) {
 369                BNX2X_ERR("Failed to delete all %s\n",
 370                          (type == BNX2X_VF_FILTER_VLAN_MAC) ? "VLAN-MACs" :
 371                          (type == BNX2X_VF_FILTER_MAC) ? "MACs" : "VLANs");
 372                return rc;
 373        }
 374
 375        return 0;
 376}
 377
 378static int bnx2x_vf_mac_vlan_config(struct bnx2x *bp,
 379                                    struct bnx2x_virtf *vf, int qid,
 380                                    struct bnx2x_vf_mac_vlan_filter *filter,
 381                                    bool drv_only)
 382{
 383        struct bnx2x_vlan_mac_ramrod_params ramrod;
 384        int rc;
 385
 386        DP(BNX2X_MSG_IOV, "vf[%d] - %s a %s filter\n",
 387           vf->abs_vfid, filter->add ? "Adding" : "Deleting",
 388           (filter->type == BNX2X_VF_FILTER_VLAN_MAC) ? "VLAN-MAC" :
 389           (filter->type == BNX2X_VF_FILTER_MAC) ? "MAC" : "VLAN");
 390
 391        /* Prepare ramrod params */
 392        memset(&ramrod, 0, sizeof(struct bnx2x_vlan_mac_ramrod_params));
 393        if (filter->type == BNX2X_VF_FILTER_VLAN_MAC) {
 394                ramrod.vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_mac_obj);
 395                ramrod.user_req.u.vlan.vlan = filter->vid;
 396                memcpy(&ramrod.user_req.u.mac.mac, filter->mac, ETH_ALEN);
 397                set_bit(BNX2X_ETH_MAC, &ramrod.user_req.vlan_mac_flags);
 398        } else if (filter->type == BNX2X_VF_FILTER_VLAN) {
 399                ramrod.vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj);
 400                ramrod.user_req.u.vlan.vlan = filter->vid;
 401        } else {
 402                set_bit(BNX2X_ETH_MAC, &ramrod.user_req.vlan_mac_flags);
 403                ramrod.vlan_mac_obj = &bnx2x_vfq(vf, qid, mac_obj);
 404                memcpy(&ramrod.user_req.u.mac.mac, filter->mac, ETH_ALEN);
 405        }
 406        ramrod.user_req.cmd = filter->add ? BNX2X_VLAN_MAC_ADD :
 407                                            BNX2X_VLAN_MAC_DEL;
 408
 409        set_bit(RAMROD_EXEC, &ramrod.ramrod_flags);
 410        if (drv_only)
 411                set_bit(RAMROD_DRV_CLR_ONLY, &ramrod.ramrod_flags);
 412        else
 413                set_bit(RAMROD_COMP_WAIT, &ramrod.ramrod_flags);
 414
 415        /* Add/Remove the filter */
 416        rc = bnx2x_config_vlan_mac(bp, &ramrod);
 417        if (rc == -EEXIST)
 418                return 0;
 419        if (rc) {
 420                BNX2X_ERR("Failed to %s %s\n",
 421                          filter->add ? "add" : "delete",
 422                          (filter->type == BNX2X_VF_FILTER_VLAN_MAC) ?
 423                                "VLAN-MAC" :
 424                          (filter->type == BNX2X_VF_FILTER_MAC) ?
 425                                "MAC" : "VLAN");
 426                return rc;
 427        }
 428
 429        filter->applied = true;
 430
 431        return 0;
 432}
 433
 434int bnx2x_vf_mac_vlan_config_list(struct bnx2x *bp, struct bnx2x_virtf *vf,
 435                                  struct bnx2x_vf_mac_vlan_filters *filters,
 436                                  int qid, bool drv_only)
 437{
 438        int rc = 0, i;
 439
 440        DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid);
 441
 442        if (!bnx2x_validate_vf_sp_objs(bp, vf, true))
 443                return -EINVAL;
 444
 445        /* Prepare ramrod params */
 446        for (i = 0; i < filters->count; i++) {
 447                rc = bnx2x_vf_mac_vlan_config(bp, vf, qid,
 448                                              &filters->filters[i], drv_only);
 449                if (rc)
 450                        break;
 451        }
 452
 453        /* Rollback if needed */
 454        if (i != filters->count) {
 455                BNX2X_ERR("Managed only %d/%d filters - rolling back\n",
 456                          i, filters->count);
 457                while (--i >= 0) {
 458                        if (!filters->filters[i].applied)
 459                                continue;
 460                        filters->filters[i].add = !filters->filters[i].add;
 461                        bnx2x_vf_mac_vlan_config(bp, vf, qid,
 462                                                 &filters->filters[i],
 463                                                 drv_only);
 464                }
 465        }
 466
 467        /* It's our responsibility to free the filters */
 468        kfree(filters);
 469
 470        return rc;
 471}
 472
 473int bnx2x_vf_queue_setup(struct bnx2x *bp, struct bnx2x_virtf *vf, int qid,
 474                         struct bnx2x_vf_queue_construct_params *qctor)
 475{
 476        int rc;
 477
 478        DP(BNX2X_MSG_IOV, "vf[%d:%d]\n", vf->abs_vfid, qid);
 479
 480        rc = bnx2x_vf_queue_create(bp, vf, qid, qctor);
 481        if (rc)
 482                goto op_err;
 483
 484        /* Schedule the configuration of any pending vlan filters */
 485        bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_HYPERVISOR_VLAN,
 486                               BNX2X_MSG_IOV);
 487        return 0;
 488op_err:
 489        BNX2X_ERR("QSETUP[%d:%d] error: rc %d\n", vf->abs_vfid, qid, rc);
 490        return rc;
 491}
 492
 493static int bnx2x_vf_queue_flr(struct bnx2x *bp, struct bnx2x_virtf *vf,
 494                               int qid)
 495{
 496        int rc;
 497
 498        DP(BNX2X_MSG_IOV, "vf[%d:%d]\n", vf->abs_vfid, qid);
 499
 500        /* If needed, clean the filtering data base */
 501        if ((qid == LEADING_IDX) &&
 502            bnx2x_validate_vf_sp_objs(bp, vf, false)) {
 503                rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid, true,
 504                                             BNX2X_VF_FILTER_VLAN_MAC);
 505                if (rc)
 506                        goto op_err;
 507                rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid, true,
 508                                             BNX2X_VF_FILTER_VLAN);
 509                if (rc)
 510                        goto op_err;
 511                rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid, true,
 512                                             BNX2X_VF_FILTER_MAC);
 513                if (rc)
 514                        goto op_err;
 515        }
 516
 517        /* Terminate queue */
 518        if (bnx2x_vfq(vf, qid, sp_obj).state != BNX2X_Q_STATE_RESET) {
 519                struct bnx2x_queue_state_params qstate;
 520
 521                memset(&qstate, 0, sizeof(struct bnx2x_queue_state_params));
 522                qstate.q_obj = &bnx2x_vfq(vf, qid, sp_obj);
 523                qstate.q_obj->state = BNX2X_Q_STATE_STOPPED;
 524                qstate.cmd = BNX2X_Q_CMD_TERMINATE;
 525                set_bit(RAMROD_COMP_WAIT, &qstate.ramrod_flags);
 526                rc = bnx2x_queue_state_change(bp, &qstate);
 527                if (rc)
 528                        goto op_err;
 529        }
 530
 531        return 0;
 532op_err:
 533        BNX2X_ERR("vf[%d:%d] error: rc %d\n", vf->abs_vfid, qid, rc);
 534        return rc;
 535}
 536
 537int bnx2x_vf_mcast(struct bnx2x *bp, struct bnx2x_virtf *vf,
 538                   bnx2x_mac_addr_t *mcasts, int mc_num, bool drv_only)
 539{
 540        struct bnx2x_mcast_list_elem *mc = NULL;
 541        struct bnx2x_mcast_ramrod_params mcast;
 542        int rc, i;
 543
 544        DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid);
 545
 546        /* Prepare Multicast command */
 547        memset(&mcast, 0, sizeof(struct bnx2x_mcast_ramrod_params));
 548        mcast.mcast_obj = &vf->mcast_obj;
 549        if (drv_only)
 550                set_bit(RAMROD_DRV_CLR_ONLY, &mcast.ramrod_flags);
 551        else
 552                set_bit(RAMROD_COMP_WAIT, &mcast.ramrod_flags);
 553        if (mc_num) {
 554                mc = kcalloc(mc_num, sizeof(struct bnx2x_mcast_list_elem),
 555                             GFP_KERNEL);
 556                if (!mc) {
 557                        BNX2X_ERR("Cannot Configure multicasts due to lack of memory\n");
 558                        return -ENOMEM;
 559                }
 560        }
 561
 562        if (mc_num) {
 563                INIT_LIST_HEAD(&mcast.mcast_list);
 564                for (i = 0; i < mc_num; i++) {
 565                        mc[i].mac = mcasts[i];
 566                        list_add_tail(&mc[i].link,
 567                                      &mcast.mcast_list);
 568                }
 569
 570                /* add new mcasts */
 571                mcast.mcast_list_len = mc_num;
 572                rc = bnx2x_config_mcast(bp, &mcast, BNX2X_MCAST_CMD_SET);
 573                if (rc)
 574                        BNX2X_ERR("Failed to set multicasts\n");
 575        } else {
 576                /* clear existing mcasts */
 577                rc = bnx2x_config_mcast(bp, &mcast, BNX2X_MCAST_CMD_DEL);
 578                if (rc)
 579                        BNX2X_ERR("Failed to remove multicasts\n");
 580        }
 581
 582        kfree(mc);
 583
 584        return rc;
 585}
 586
 587static void bnx2x_vf_prep_rx_mode(struct bnx2x *bp, u8 qid,
 588                                  struct bnx2x_rx_mode_ramrod_params *ramrod,
 589                                  struct bnx2x_virtf *vf,
 590                                  unsigned long accept_flags)
 591{
 592        struct bnx2x_vf_queue *vfq = vfq_get(vf, qid);
 593
 594        memset(ramrod, 0, sizeof(*ramrod));
 595        ramrod->cid = vfq->cid;
 596        ramrod->cl_id = vfq_cl_id(vf, vfq);
 597        ramrod->rx_mode_obj = &bp->rx_mode_obj;
 598        ramrod->func_id = FW_VF_HANDLE(vf->abs_vfid);
 599        ramrod->rx_accept_flags = accept_flags;
 600        ramrod->tx_accept_flags = accept_flags;
 601        ramrod->pstate = &vf->filter_state;
 602        ramrod->state = BNX2X_FILTER_RX_MODE_PENDING;
 603
 604        set_bit(BNX2X_FILTER_RX_MODE_PENDING, &vf->filter_state);
 605        set_bit(RAMROD_RX, &ramrod->ramrod_flags);
 606        set_bit(RAMROD_TX, &ramrod->ramrod_flags);
 607
 608        ramrod->rdata = bnx2x_vf_sp(bp, vf, rx_mode_rdata.e2);
 609        ramrod->rdata_mapping = bnx2x_vf_sp_map(bp, vf, rx_mode_rdata.e2);
 610}
 611
 612int bnx2x_vf_rxmode(struct bnx2x *bp, struct bnx2x_virtf *vf,
 613                    int qid, unsigned long accept_flags)
 614{
 615        struct bnx2x_rx_mode_ramrod_params ramrod;
 616
 617        DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid);
 618
 619        bnx2x_vf_prep_rx_mode(bp, qid, &ramrod, vf, accept_flags);
 620        set_bit(RAMROD_COMP_WAIT, &ramrod.ramrod_flags);
 621        vfq_get(vf, qid)->accept_flags = ramrod.rx_accept_flags;
 622        return bnx2x_config_rx_mode(bp, &ramrod);
 623}
 624
 625int bnx2x_vf_queue_teardown(struct bnx2x *bp, struct bnx2x_virtf *vf, int qid)
 626{
 627        int rc;
 628
 629        DP(BNX2X_MSG_IOV, "vf[%d:%d]\n", vf->abs_vfid, qid);
 630
 631        /* Remove all classification configuration for leading queue */
 632        if (qid == LEADING_IDX) {
 633                rc = bnx2x_vf_rxmode(bp, vf, qid, 0);
 634                if (rc)
 635                        goto op_err;
 636
 637                /* Remove filtering if feasible */
 638                if (bnx2x_validate_vf_sp_objs(bp, vf, true)) {
 639                        rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid,
 640                                                     false,
 641                                                     BNX2X_VF_FILTER_VLAN_MAC);
 642                        if (rc)
 643                                goto op_err;
 644                        rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid,
 645                                                     false,
 646                                                     BNX2X_VF_FILTER_VLAN);
 647                        if (rc)
 648                                goto op_err;
 649                        rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid,
 650                                                     false,
 651                                                     BNX2X_VF_FILTER_MAC);
 652                        if (rc)
 653                                goto op_err;
 654                        rc = bnx2x_vf_mcast(bp, vf, NULL, 0, false);
 655                        if (rc)
 656                                goto op_err;
 657                }
 658        }
 659
 660        /* Destroy queue */
 661        rc = bnx2x_vf_queue_destroy(bp, vf, qid);
 662        if (rc)
 663                goto op_err;
 664        return rc;
 665op_err:
 666        BNX2X_ERR("vf[%d:%d] error: rc %d\n",
 667                  vf->abs_vfid, qid, rc);
 668        return rc;
 669}
 670
 671/* VF enable primitives
 672 * when pretend is required the caller is responsible
 673 * for calling pretend prior to calling these routines
 674 */
 675
 676/* internal vf enable - until vf is enabled internally all transactions
 677 * are blocked. This routine should always be called last with pretend.
 678 */
 679static void bnx2x_vf_enable_internal(struct bnx2x *bp, u8 enable)
 680{
 681        REG_WR(bp, PGLUE_B_REG_INTERNAL_VFID_ENABLE, enable ? 1 : 0);
 682}
 683
 684/* clears vf error in all semi blocks */
 685static void bnx2x_vf_semi_clear_err(struct bnx2x *bp, u8 abs_vfid)
 686{
 687        REG_WR(bp, TSEM_REG_VFPF_ERR_NUM, abs_vfid);
 688        REG_WR(bp, USEM_REG_VFPF_ERR_NUM, abs_vfid);
 689        REG_WR(bp, CSEM_REG_VFPF_ERR_NUM, abs_vfid);
 690        REG_WR(bp, XSEM_REG_VFPF_ERR_NUM, abs_vfid);
 691}
 692
 693static void bnx2x_vf_pglue_clear_err(struct bnx2x *bp, u8 abs_vfid)
 694{
 695        u32 was_err_group = (2 * BP_PATH(bp) + abs_vfid) >> 5;
 696        u32 was_err_reg = 0;
 697
 698        switch (was_err_group) {
 699        case 0:
 700            was_err_reg = PGLUE_B_REG_WAS_ERROR_VF_31_0_CLR;
 701            break;
 702        case 1:
 703            was_err_reg = PGLUE_B_REG_WAS_ERROR_VF_63_32_CLR;
 704            break;
 705        case 2:
 706            was_err_reg = PGLUE_B_REG_WAS_ERROR_VF_95_64_CLR;
 707            break;
 708        case 3:
 709            was_err_reg = PGLUE_B_REG_WAS_ERROR_VF_127_96_CLR;
 710            break;
 711        }
 712        REG_WR(bp, was_err_reg, 1 << (abs_vfid & 0x1f));
 713}
 714
 715static void bnx2x_vf_igu_reset(struct bnx2x *bp, struct bnx2x_virtf *vf)
 716{
 717        int i;
 718        u32 val;
 719
 720        /* Set VF masks and configuration - pretend */
 721        bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf->abs_vfid));
 722
 723        REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_LSB, 0);
 724        REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_MSB, 0);
 725        REG_WR(bp, IGU_REG_SB_MASK_LSB, 0);
 726        REG_WR(bp, IGU_REG_SB_MASK_MSB, 0);
 727        REG_WR(bp, IGU_REG_PBA_STATUS_LSB, 0);
 728        REG_WR(bp, IGU_REG_PBA_STATUS_MSB, 0);
 729
 730        val = REG_RD(bp, IGU_REG_VF_CONFIGURATION);
 731        val |= (IGU_VF_CONF_FUNC_EN | IGU_VF_CONF_MSI_MSIX_EN);
 732        val &= ~IGU_VF_CONF_PARENT_MASK;
 733        val |= (BP_ABS_FUNC(bp) >> 1) << IGU_VF_CONF_PARENT_SHIFT;
 734        REG_WR(bp, IGU_REG_VF_CONFIGURATION, val);
 735
 736        DP(BNX2X_MSG_IOV,
 737           "value in IGU_REG_VF_CONFIGURATION of vf %d after write is 0x%08x\n",
 738           vf->abs_vfid, val);
 739
 740        bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
 741
 742        /* iterate over all queues, clear sb consumer */
 743        for (i = 0; i < vf_sb_count(vf); i++) {
 744                u8 igu_sb_id = vf_igu_sb(vf, i);
 745
 746                /* zero prod memory */
 747                REG_WR(bp, IGU_REG_PROD_CONS_MEMORY + igu_sb_id * 4, 0);
 748
 749                /* clear sb state machine */
 750                bnx2x_igu_clear_sb_gen(bp, vf->abs_vfid, igu_sb_id,
 751                                       false /* VF */);
 752
 753                /* disable + update */
 754                bnx2x_vf_igu_ack_sb(bp, vf, igu_sb_id, USTORM_ID, 0,
 755                                    IGU_INT_DISABLE, 1);
 756        }
 757}
 758
 759void bnx2x_vf_enable_access(struct bnx2x *bp, u8 abs_vfid)
 760{
 761        /* set the VF-PF association in the FW */
 762        storm_memset_vf_to_pf(bp, FW_VF_HANDLE(abs_vfid), BP_FUNC(bp));
 763        storm_memset_func_en(bp, FW_VF_HANDLE(abs_vfid), 1);
 764
 765        /* clear vf errors*/
 766        bnx2x_vf_semi_clear_err(bp, abs_vfid);
 767        bnx2x_vf_pglue_clear_err(bp, abs_vfid);
 768
 769        /* internal vf-enable - pretend */
 770        bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, abs_vfid));
 771        DP(BNX2X_MSG_IOV, "enabling internal access for vf %x\n", abs_vfid);
 772        bnx2x_vf_enable_internal(bp, true);
 773        bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
 774}
 775
 776static void bnx2x_vf_enable_traffic(struct bnx2x *bp, struct bnx2x_virtf *vf)
 777{
 778        /* Reset vf in IGU  interrupts are still disabled */
 779        bnx2x_vf_igu_reset(bp, vf);
 780
 781        /* pretend to enable the vf with the PBF */
 782        bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf->abs_vfid));
 783        REG_WR(bp, PBF_REG_DISABLE_VF, 0);
 784        bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
 785}
 786
 787static u8 bnx2x_vf_is_pcie_pending(struct bnx2x *bp, u8 abs_vfid)
 788{
 789        struct pci_dev *dev;
 790        struct bnx2x_virtf *vf = bnx2x_vf_by_abs_fid(bp, abs_vfid);
 791
 792        if (!vf)
 793                return false;
 794
 795        dev = pci_get_domain_bus_and_slot(vf->domain, vf->bus, vf->devfn);
 796        if (dev)
 797                return bnx2x_is_pcie_pending(dev);
 798        return false;
 799}
 800
 801int bnx2x_vf_flr_clnup_epilog(struct bnx2x *bp, u8 abs_vfid)
 802{
 803        /* Verify no pending pci transactions */
 804        if (bnx2x_vf_is_pcie_pending(bp, abs_vfid))
 805                BNX2X_ERR("PCIE Transactions still pending\n");
 806
 807        return 0;
 808}
 809
 810/* must be called after the number of PF queues and the number of VFs are
 811 * both known
 812 */
 813static void
 814bnx2x_iov_static_resc(struct bnx2x *bp, struct bnx2x_virtf *vf)
 815{
 816        struct vf_pf_resc_request *resc = &vf->alloc_resc;
 817
 818        /* will be set only during VF-ACQUIRE */
 819        resc->num_rxqs = 0;
 820        resc->num_txqs = 0;
 821
 822        resc->num_mac_filters = VF_MAC_CREDIT_CNT;
 823        resc->num_vlan_filters = VF_VLAN_CREDIT_CNT;
 824
 825        /* no real limitation */
 826        resc->num_mc_filters = 0;
 827
 828        /* num_sbs already set */
 829        resc->num_sbs = vf->sb_count;
 830}
 831
 832/* FLR routines: */
 833static void bnx2x_vf_free_resc(struct bnx2x *bp, struct bnx2x_virtf *vf)
 834{
 835        /* reset the state variables */
 836        bnx2x_iov_static_resc(bp, vf);
 837        vf->state = VF_FREE;
 838}
 839
 840static void bnx2x_vf_flr_clnup_hw(struct bnx2x *bp, struct bnx2x_virtf *vf)
 841{
 842        u32 poll_cnt = bnx2x_flr_clnup_poll_count(bp);
 843
 844        /* DQ usage counter */
 845        bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf->abs_vfid));
 846        bnx2x_flr_clnup_poll_hw_counter(bp, DORQ_REG_VF_USAGE_CNT,
 847                                        "DQ VF usage counter timed out",
 848                                        poll_cnt);
 849        bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
 850
 851        /* FW cleanup command - poll for the results */
 852        if (bnx2x_send_final_clnup(bp, (u8)FW_VF_HANDLE(vf->abs_vfid),
 853                                   poll_cnt))
 854                BNX2X_ERR("VF[%d] Final cleanup timed-out\n", vf->abs_vfid);
 855
 856        /* verify TX hw is flushed */
 857        bnx2x_tx_hw_flushed(bp, poll_cnt);
 858}
 859
 860static void bnx2x_vf_flr(struct bnx2x *bp, struct bnx2x_virtf *vf)
 861{
 862        int rc, i;
 863
 864        DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid);
 865
 866        /* the cleanup operations are valid if and only if the VF
 867         * was first acquired.
 868         */
 869        for (i = 0; i < vf_rxq_count(vf); i++) {
 870                rc = bnx2x_vf_queue_flr(bp, vf, i);
 871                if (rc)
 872                        goto out;
 873        }
 874
 875        /* remove multicasts */
 876        bnx2x_vf_mcast(bp, vf, NULL, 0, true);
 877
 878        /* dispatch final cleanup and wait for HW queues to flush */
 879        bnx2x_vf_flr_clnup_hw(bp, vf);
 880
 881        /* release VF resources */
 882        bnx2x_vf_free_resc(bp, vf);
 883
 884        vf->malicious = false;
 885
 886        /* re-open the mailbox */
 887        bnx2x_vf_enable_mbx(bp, vf->abs_vfid);
 888        return;
 889out:
 890        BNX2X_ERR("vf[%d:%d] failed flr: rc %d\n",
 891                  vf->abs_vfid, i, rc);
 892}
 893
 894static void bnx2x_vf_flr_clnup(struct bnx2x *bp)
 895{
 896        struct bnx2x_virtf *vf;
 897        int i;
 898
 899        for (i = 0; i < BNX2X_NR_VIRTFN(bp); i++) {
 900                /* VF should be RESET & in FLR cleanup states */
 901                if (bnx2x_vf(bp, i, state) != VF_RESET ||
 902                    !bnx2x_vf(bp, i, flr_clnup_stage))
 903                        continue;
 904
 905                DP(BNX2X_MSG_IOV, "next vf to cleanup: %d. Num of vfs: %d\n",
 906                   i, BNX2X_NR_VIRTFN(bp));
 907
 908                vf = BP_VF(bp, i);
 909
 910                /* lock the vf pf channel */
 911                bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_FLR);
 912
 913                /* invoke the VF FLR SM */
 914                bnx2x_vf_flr(bp, vf);
 915
 916                /* mark the VF to be ACKED and continue */
 917                vf->flr_clnup_stage = false;
 918                bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_FLR);
 919        }
 920
 921        /* Acknowledge the handled VFs.
 922         * we are acknowledge all the vfs which an flr was requested for, even
 923         * if amongst them there are such that we never opened, since the mcp
 924         * will interrupt us immediately again if we only ack some of the bits,
 925         * resulting in an endless loop. This can happen for example in KVM
 926         * where an 'all ones' flr request is sometimes given by hyper visor
 927         */
 928        DP(BNX2X_MSG_MCP, "DRV_STATUS_VF_DISABLED ACK for vfs 0x%x 0x%x\n",
 929           bp->vfdb->flrd_vfs[0], bp->vfdb->flrd_vfs[1]);
 930        for (i = 0; i < FLRD_VFS_DWORDS; i++)
 931                SHMEM2_WR(bp, drv_ack_vf_disabled[BP_FW_MB_IDX(bp)][i],
 932                          bp->vfdb->flrd_vfs[i]);
 933
 934        bnx2x_fw_command(bp, DRV_MSG_CODE_VF_DISABLED_DONE, 0);
 935
 936        /* clear the acked bits - better yet if the MCP implemented
 937         * write to clear semantics
 938         */
 939        for (i = 0; i < FLRD_VFS_DWORDS; i++)
 940                SHMEM2_WR(bp, drv_ack_vf_disabled[BP_FW_MB_IDX(bp)][i], 0);
 941}
 942
 943void bnx2x_vf_handle_flr_event(struct bnx2x *bp)
 944{
 945        int i;
 946
 947        /* Read FLR'd VFs */
 948        for (i = 0; i < FLRD_VFS_DWORDS; i++)
 949                bp->vfdb->flrd_vfs[i] = SHMEM2_RD(bp, mcp_vf_disabled[i]);
 950
 951        DP(BNX2X_MSG_MCP,
 952           "DRV_STATUS_VF_DISABLED received for vfs 0x%x 0x%x\n",
 953           bp->vfdb->flrd_vfs[0], bp->vfdb->flrd_vfs[1]);
 954
 955        for_each_vf(bp, i) {
 956                struct bnx2x_virtf *vf = BP_VF(bp, i);
 957                u32 reset = 0;
 958
 959                if (vf->abs_vfid < 32)
 960                        reset = bp->vfdb->flrd_vfs[0] & (1 << vf->abs_vfid);
 961                else
 962                        reset = bp->vfdb->flrd_vfs[1] &
 963                                (1 << (vf->abs_vfid - 32));
 964
 965                if (reset) {
 966                        /* set as reset and ready for cleanup */
 967                        vf->state = VF_RESET;
 968                        vf->flr_clnup_stage = true;
 969
 970                        DP(BNX2X_MSG_IOV,
 971                           "Initiating Final cleanup for VF %d\n",
 972                           vf->abs_vfid);
 973                }
 974        }
 975
 976        /* do the FLR cleanup for all marked VFs*/
 977        bnx2x_vf_flr_clnup(bp);
 978}
 979
 980/* IOV global initialization routines  */
 981void bnx2x_iov_init_dq(struct bnx2x *bp)
 982{
 983        if (!IS_SRIOV(bp))
 984                return;
 985
 986        /* Set the DQ such that the CID reflect the abs_vfid */
 987        REG_WR(bp, DORQ_REG_VF_NORM_VF_BASE, 0);
 988        REG_WR(bp, DORQ_REG_MAX_RVFID_SIZE, ilog2(BNX2X_MAX_NUM_OF_VFS));
 989
 990        /* Set VFs starting CID. If its > 0 the preceding CIDs are belong to
 991         * the PF L2 queues
 992         */
 993        REG_WR(bp, DORQ_REG_VF_NORM_CID_BASE, BNX2X_FIRST_VF_CID);
 994
 995        /* The VF window size is the log2 of the max number of CIDs per VF */
 996        REG_WR(bp, DORQ_REG_VF_NORM_CID_WND_SIZE, BNX2X_VF_CID_WND);
 997
 998        /* The VF doorbell size  0 - *B, 4 - 128B. We set it here to match
 999         * the Pf doorbell size although the 2 are independent.
1000         */
1001        REG_WR(bp, DORQ_REG_VF_NORM_CID_OFST, 3);
1002
1003        /* No security checks for now -
1004         * configure single rule (out of 16) mask = 0x1, value = 0x0,
1005         * CID range 0 - 0x1ffff
1006         */
1007        REG_WR(bp, DORQ_REG_VF_TYPE_MASK_0, 1);
1008        REG_WR(bp, DORQ_REG_VF_TYPE_VALUE_0, 0);
1009        REG_WR(bp, DORQ_REG_VF_TYPE_MIN_MCID_0, 0);
1010        REG_WR(bp, DORQ_REG_VF_TYPE_MAX_MCID_0, 0x1ffff);
1011
1012        /* set the VF doorbell threshold. This threshold represents the amount
1013         * of doorbells allowed in the main DORQ fifo for a specific VF.
1014         */
1015        REG_WR(bp, DORQ_REG_VF_USAGE_CT_LIMIT, 64);
1016}
1017
1018void bnx2x_iov_init_dmae(struct bnx2x *bp)
1019{
1020        if (pci_find_ext_capability(bp->pdev, PCI_EXT_CAP_ID_SRIOV))
1021                REG_WR(bp, DMAE_REG_BACKWARD_COMP_EN, 0);
1022}
1023
1024static int bnx2x_vf_domain(struct bnx2x *bp, int vfid)
1025{
1026        struct pci_dev *dev = bp->pdev;
1027
1028        return pci_domain_nr(dev->bus);
1029}
1030
1031static int bnx2x_vf_bus(struct bnx2x *bp, int vfid)
1032{
1033        struct pci_dev *dev = bp->pdev;
1034        struct bnx2x_sriov *iov = &bp->vfdb->sriov;
1035
1036        return dev->bus->number + ((dev->devfn + iov->offset +
1037                                    iov->stride * vfid) >> 8);
1038}
1039
1040static int bnx2x_vf_devfn(struct bnx2x *bp, int vfid)
1041{
1042        struct pci_dev *dev = bp->pdev;
1043        struct bnx2x_sriov *iov = &bp->vfdb->sriov;
1044
1045        return (dev->devfn + iov->offset + iov->stride * vfid) & 0xff;
1046}
1047
1048static void bnx2x_vf_set_bars(struct bnx2x *bp, struct bnx2x_virtf *vf)
1049{
1050        int i, n;
1051        struct pci_dev *dev = bp->pdev;
1052        struct bnx2x_sriov *iov = &bp->vfdb->sriov;
1053
1054        for (i = 0, n = 0; i < PCI_SRIOV_NUM_BARS; i += 2, n++) {
1055                u64 start = pci_resource_start(dev, PCI_IOV_RESOURCES + i);
1056                u32 size = pci_resource_len(dev, PCI_IOV_RESOURCES + i);
1057
1058                size /= iov->total;
1059                vf->bars[n].bar = start + size * vf->abs_vfid;
1060                vf->bars[n].size = size;
1061        }
1062}
1063
1064static int
1065bnx2x_get_vf_igu_cam_info(struct bnx2x *bp)
1066{
1067        int sb_id;
1068        u32 val;
1069        u8 fid, current_pf = 0;
1070
1071        /* IGU in normal mode - read CAM */
1072        for (sb_id = 0; sb_id < IGU_REG_MAPPING_MEMORY_SIZE; sb_id++) {
1073                val = REG_RD(bp, IGU_REG_MAPPING_MEMORY + sb_id * 4);
1074                if (!(val & IGU_REG_MAPPING_MEMORY_VALID))
1075                        continue;
1076                fid = GET_FIELD((val), IGU_REG_MAPPING_MEMORY_FID);
1077                if (fid & IGU_FID_ENCODE_IS_PF)
1078                        current_pf = fid & IGU_FID_PF_NUM_MASK;
1079                else if (current_pf == BP_FUNC(bp))
1080                        bnx2x_vf_set_igu_info(bp, sb_id,
1081                                              (fid & IGU_FID_VF_NUM_MASK));
1082                DP(BNX2X_MSG_IOV, "%s[%d], igu_sb_id=%d, msix=%d\n",
1083                   ((fid & IGU_FID_ENCODE_IS_PF) ? "PF" : "VF"),
1084                   ((fid & IGU_FID_ENCODE_IS_PF) ? (fid & IGU_FID_PF_NUM_MASK) :
1085                   (fid & IGU_FID_VF_NUM_MASK)), sb_id,
1086                   GET_FIELD((val), IGU_REG_MAPPING_MEMORY_VECTOR));
1087        }
1088        DP(BNX2X_MSG_IOV, "vf_sbs_pool is %d\n", BP_VFDB(bp)->vf_sbs_pool);
1089        return BP_VFDB(bp)->vf_sbs_pool;
1090}
1091
1092static void __bnx2x_iov_free_vfdb(struct bnx2x *bp)
1093{
1094        if (bp->vfdb) {
1095                kfree(bp->vfdb->vfqs);
1096                kfree(bp->vfdb->vfs);
1097                kfree(bp->vfdb);
1098        }
1099        bp->vfdb = NULL;
1100}
1101
1102static int bnx2x_sriov_pci_cfg_info(struct bnx2x *bp, struct bnx2x_sriov *iov)
1103{
1104        int pos;
1105        struct pci_dev *dev = bp->pdev;
1106
1107        pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV);
1108        if (!pos) {
1109                BNX2X_ERR("failed to find SRIOV capability in device\n");
1110                return -ENODEV;
1111        }
1112
1113        iov->pos = pos;
1114        DP(BNX2X_MSG_IOV, "sriov ext pos %d\n", pos);
1115        pci_read_config_word(dev, pos + PCI_SRIOV_CTRL, &iov->ctrl);
1116        pci_read_config_word(dev, pos + PCI_SRIOV_TOTAL_VF, &iov->total);
1117        pci_read_config_word(dev, pos + PCI_SRIOV_INITIAL_VF, &iov->initial);
1118        pci_read_config_word(dev, pos + PCI_SRIOV_VF_OFFSET, &iov->offset);
1119        pci_read_config_word(dev, pos + PCI_SRIOV_VF_STRIDE, &iov->stride);
1120        pci_read_config_dword(dev, pos + PCI_SRIOV_SUP_PGSIZE, &iov->pgsz);
1121        pci_read_config_dword(dev, pos + PCI_SRIOV_CAP, &iov->cap);
1122        pci_read_config_byte(dev, pos + PCI_SRIOV_FUNC_LINK, &iov->link);
1123
1124        return 0;
1125}
1126
1127static int bnx2x_sriov_info(struct bnx2x *bp, struct bnx2x_sriov *iov)
1128{
1129        u32 val;
1130
1131        /* read the SRIOV capability structure
1132         * The fields can be read via configuration read or
1133         * directly from the device (starting at offset PCICFG_OFFSET)
1134         */
1135        if (bnx2x_sriov_pci_cfg_info(bp, iov))
1136                return -ENODEV;
1137
1138        /* get the number of SRIOV bars */
1139        iov->nres = 0;
1140
1141        /* read the first_vfid */
1142        val = REG_RD(bp, PCICFG_OFFSET + GRC_CONFIG_REG_PF_INIT_VF);
1143        iov->first_vf_in_pf = ((val & GRC_CR_PF_INIT_VF_PF_FIRST_VF_NUM_MASK)
1144                               * 8) - (BNX2X_MAX_NUM_OF_VFS * BP_PATH(bp));
1145
1146        DP(BNX2X_MSG_IOV,
1147           "IOV info[%d]: first vf %d, nres %d, cap 0x%x, ctrl 0x%x, total %d, initial %d, num vfs %d, offset %d, stride %d, page size 0x%x\n",
1148           BP_FUNC(bp),
1149           iov->first_vf_in_pf, iov->nres, iov->cap, iov->ctrl, iov->total,
1150           iov->initial, iov->nr_virtfn, iov->offset, iov->stride, iov->pgsz);
1151
1152        return 0;
1153}
1154
1155/* must be called after PF bars are mapped */
1156int bnx2x_iov_init_one(struct bnx2x *bp, int int_mode_param,
1157                       int num_vfs_param)
1158{
1159        int err, i;
1160        struct bnx2x_sriov *iov;
1161        struct pci_dev *dev = bp->pdev;
1162
1163        bp->vfdb = NULL;
1164
1165        /* verify is pf */
1166        if (IS_VF(bp))
1167                return 0;
1168
1169        /* verify sriov capability is present in configuration space */
1170        if (!pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV))
1171                return 0;
1172
1173        /* verify chip revision */
1174        if (CHIP_IS_E1x(bp))
1175                return 0;
1176
1177        /* check if SRIOV support is turned off */
1178        if (!num_vfs_param)
1179                return 0;
1180
1181        /* SRIOV assumes that num of PF CIDs < BNX2X_FIRST_VF_CID */
1182        if (BNX2X_L2_MAX_CID(bp) >= BNX2X_FIRST_VF_CID) {
1183                BNX2X_ERR("PF cids %d are overspilling into vf space (starts at %d). Abort SRIOV\n",
1184                          BNX2X_L2_MAX_CID(bp), BNX2X_FIRST_VF_CID);
1185                return 0;
1186        }
1187
1188        /* SRIOV can be enabled only with MSIX */
1189        if (int_mode_param == BNX2X_INT_MODE_MSI ||
1190            int_mode_param == BNX2X_INT_MODE_INTX) {
1191                BNX2X_ERR("Forced MSI/INTx mode is incompatible with SRIOV\n");
1192                return 0;
1193        }
1194
1195        /* verify ari is enabled */
1196        if (!pci_ari_enabled(bp->pdev->bus)) {
1197                BNX2X_ERR("ARI not supported (check pci bridge ARI forwarding), SRIOV can not be enabled\n");
1198                return 0;
1199        }
1200
1201        /* verify igu is in normal mode */
1202        if (CHIP_INT_MODE_IS_BC(bp)) {
1203                BNX2X_ERR("IGU not normal mode,  SRIOV can not be enabled\n");
1204                return 0;
1205        }
1206
1207        /* allocate the vfs database */
1208        bp->vfdb = kzalloc(sizeof(*(bp->vfdb)), GFP_KERNEL);
1209        if (!bp->vfdb) {
1210                BNX2X_ERR("failed to allocate vf database\n");
1211                err = -ENOMEM;
1212                goto failed;
1213        }
1214
1215        /* get the sriov info - Linux already collected all the pertinent
1216         * information, however the sriov structure is for the private use
1217         * of the pci module. Also we want this information regardless
1218         * of the hyper-visor.
1219         */
1220        iov = &(bp->vfdb->sriov);
1221        err = bnx2x_sriov_info(bp, iov);
1222        if (err)
1223                goto failed;
1224
1225        /* SR-IOV capability was enabled but there are no VFs*/
1226        if (iov->total == 0) {
1227                err = 0;
1228                goto failed;
1229        }
1230
1231        iov->nr_virtfn = min_t(u16, iov->total, num_vfs_param);
1232
1233        DP(BNX2X_MSG_IOV, "num_vfs_param was %d, nr_virtfn was %d\n",
1234           num_vfs_param, iov->nr_virtfn);
1235
1236        /* allocate the vf array */
1237        bp->vfdb->vfs = kcalloc(BNX2X_NR_VIRTFN(bp),
1238                                sizeof(struct bnx2x_virtf),
1239                                GFP_KERNEL);
1240        if (!bp->vfdb->vfs) {
1241                BNX2X_ERR("failed to allocate vf array\n");
1242                err = -ENOMEM;
1243                goto failed;
1244        }
1245
1246        /* Initial VF init - index and abs_vfid - nr_virtfn must be set */
1247        for_each_vf(bp, i) {
1248                bnx2x_vf(bp, i, index) = i;
1249                bnx2x_vf(bp, i, abs_vfid) = iov->first_vf_in_pf + i;
1250                bnx2x_vf(bp, i, state) = VF_FREE;
1251                mutex_init(&bnx2x_vf(bp, i, op_mutex));
1252                bnx2x_vf(bp, i, op_current) = CHANNEL_TLV_NONE;
1253                /* enable spoofchk by default */
1254                bnx2x_vf(bp, i, spoofchk) = 1;
1255        }
1256
1257        /* re-read the IGU CAM for VFs - index and abs_vfid must be set */
1258        if (!bnx2x_get_vf_igu_cam_info(bp)) {
1259                BNX2X_ERR("No entries in IGU CAM for vfs\n");
1260                err = -EINVAL;
1261                goto failed;
1262        }
1263
1264        /* allocate the queue arrays for all VFs */
1265        bp->vfdb->vfqs = kcalloc(BNX2X_MAX_NUM_VF_QUEUES,
1266                                 sizeof(struct bnx2x_vf_queue),
1267                                 GFP_KERNEL);
1268
1269        if (!bp->vfdb->vfqs) {
1270                BNX2X_ERR("failed to allocate vf queue array\n");
1271                err = -ENOMEM;
1272                goto failed;
1273        }
1274
1275        /* Prepare the VFs event synchronization mechanism */
1276        mutex_init(&bp->vfdb->event_mutex);
1277
1278        mutex_init(&bp->vfdb->bulletin_mutex);
1279
1280        if (SHMEM2_HAS(bp, sriov_switch_mode))
1281                SHMEM2_WR(bp, sriov_switch_mode, SRIOV_SWITCH_MODE_VEB);
1282
1283        return 0;
1284failed:
1285        DP(BNX2X_MSG_IOV, "Failed err=%d\n", err);
1286        __bnx2x_iov_free_vfdb(bp);
1287        return err;
1288}
1289
1290void bnx2x_iov_remove_one(struct bnx2x *bp)
1291{
1292        int vf_idx;
1293
1294        /* if SRIOV is not enabled there's nothing to do */
1295        if (!IS_SRIOV(bp))
1296                return;
1297
1298        bnx2x_disable_sriov(bp);
1299
1300        /* disable access to all VFs */
1301        for (vf_idx = 0; vf_idx < bp->vfdb->sriov.total; vf_idx++) {
1302                bnx2x_pretend_func(bp,
1303                                   HW_VF_HANDLE(bp,
1304                                                bp->vfdb->sriov.first_vf_in_pf +
1305                                                vf_idx));
1306                DP(BNX2X_MSG_IOV, "disabling internal access for vf %d\n",
1307                   bp->vfdb->sriov.first_vf_in_pf + vf_idx);
1308                bnx2x_vf_enable_internal(bp, 0);
1309                bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
1310        }
1311
1312        /* free vf database */
1313        __bnx2x_iov_free_vfdb(bp);
1314}
1315
1316void bnx2x_iov_free_mem(struct bnx2x *bp)
1317{
1318        int i;
1319
1320        if (!IS_SRIOV(bp))
1321                return;
1322
1323        /* free vfs hw contexts */
1324        for (i = 0; i < BNX2X_VF_CIDS/ILT_PAGE_CIDS; i++) {
1325                struct hw_dma *cxt = &bp->vfdb->context[i];
1326                BNX2X_PCI_FREE(cxt->addr, cxt->mapping, cxt->size);
1327        }
1328
1329        BNX2X_PCI_FREE(BP_VFDB(bp)->sp_dma.addr,
1330                       BP_VFDB(bp)->sp_dma.mapping,
1331                       BP_VFDB(bp)->sp_dma.size);
1332
1333        BNX2X_PCI_FREE(BP_VF_MBX_DMA(bp)->addr,
1334                       BP_VF_MBX_DMA(bp)->mapping,
1335                       BP_VF_MBX_DMA(bp)->size);
1336
1337        BNX2X_PCI_FREE(BP_VF_BULLETIN_DMA(bp)->addr,
1338                       BP_VF_BULLETIN_DMA(bp)->mapping,
1339                       BP_VF_BULLETIN_DMA(bp)->size);
1340}
1341
1342int bnx2x_iov_alloc_mem(struct bnx2x *bp)
1343{
1344        size_t tot_size;
1345        int i, rc = 0;
1346
1347        if (!IS_SRIOV(bp))
1348                return rc;
1349
1350        /* allocate vfs hw contexts */
1351        tot_size = (BP_VFDB(bp)->sriov.first_vf_in_pf + BNX2X_NR_VIRTFN(bp)) *
1352                BNX2X_CIDS_PER_VF * sizeof(union cdu_context);
1353
1354        for (i = 0; i < BNX2X_VF_CIDS/ILT_PAGE_CIDS; i++) {
1355                struct hw_dma *cxt = BP_VF_CXT_PAGE(bp, i);
1356                cxt->size = min_t(size_t, tot_size, CDU_ILT_PAGE_SZ);
1357
1358                if (cxt->size) {
1359                        cxt->addr = BNX2X_PCI_ALLOC(&cxt->mapping, cxt->size);
1360                        if (!cxt->addr)
1361                                goto alloc_mem_err;
1362                } else {
1363                        cxt->addr = NULL;
1364                        cxt->mapping = 0;
1365                }
1366                tot_size -= cxt->size;
1367        }
1368
1369        /* allocate vfs ramrods dma memory - client_init and set_mac */
1370        tot_size = BNX2X_NR_VIRTFN(bp) * sizeof(struct bnx2x_vf_sp);
1371        BP_VFDB(bp)->sp_dma.addr = BNX2X_PCI_ALLOC(&BP_VFDB(bp)->sp_dma.mapping,
1372                                                   tot_size);
1373        if (!BP_VFDB(bp)->sp_dma.addr)
1374                goto alloc_mem_err;
1375        BP_VFDB(bp)->sp_dma.size = tot_size;
1376
1377        /* allocate mailboxes */
1378        tot_size = BNX2X_NR_VIRTFN(bp) * MBX_MSG_ALIGNED_SIZE;
1379        BP_VF_MBX_DMA(bp)->addr = BNX2X_PCI_ALLOC(&BP_VF_MBX_DMA(bp)->mapping,
1380                                                  tot_size);
1381        if (!BP_VF_MBX_DMA(bp)->addr)
1382                goto alloc_mem_err;
1383
1384        BP_VF_MBX_DMA(bp)->size = tot_size;
1385
1386        /* allocate local bulletin boards */
1387        tot_size = BNX2X_NR_VIRTFN(bp) * BULLETIN_CONTENT_SIZE;
1388        BP_VF_BULLETIN_DMA(bp)->addr = BNX2X_PCI_ALLOC(&BP_VF_BULLETIN_DMA(bp)->mapping,
1389                                                       tot_size);
1390        if (!BP_VF_BULLETIN_DMA(bp)->addr)
1391                goto alloc_mem_err;
1392
1393        BP_VF_BULLETIN_DMA(bp)->size = tot_size;
1394
1395        return 0;
1396
1397alloc_mem_err:
1398        return -ENOMEM;
1399}
1400
1401static void bnx2x_vfq_init(struct bnx2x *bp, struct bnx2x_virtf *vf,
1402                           struct bnx2x_vf_queue *q)
1403{
1404        u8 cl_id = vfq_cl_id(vf, q);
1405        u8 func_id = FW_VF_HANDLE(vf->abs_vfid);
1406        unsigned long q_type = 0;
1407
1408        set_bit(BNX2X_Q_TYPE_HAS_TX, &q_type);
1409        set_bit(BNX2X_Q_TYPE_HAS_RX, &q_type);
1410
1411        /* Queue State object */
1412        bnx2x_init_queue_obj(bp, &q->sp_obj,
1413                             cl_id, &q->cid, 1, func_id,
1414                             bnx2x_vf_sp(bp, vf, q_data),
1415                             bnx2x_vf_sp_map(bp, vf, q_data),
1416                             q_type);
1417
1418        /* sp indication is set only when vlan/mac/etc. are initialized */
1419        q->sp_initialized = false;
1420
1421        DP(BNX2X_MSG_IOV,
1422           "initialized vf %d's queue object. func id set to %d. cid set to 0x%x\n",
1423           vf->abs_vfid, q->sp_obj.func_id, q->cid);
1424}
1425
1426static int bnx2x_max_speed_cap(struct bnx2x *bp)
1427{
1428        u32 supported = bp->port.supported[bnx2x_get_link_cfg_idx(bp)];
1429
1430        if (supported &
1431            (SUPPORTED_20000baseMLD2_Full | SUPPORTED_20000baseKR2_Full))
1432                return 20000;
1433
1434        return 10000; /* assume lowest supported speed is 10G */
1435}
1436
1437int bnx2x_iov_link_update_vf(struct bnx2x *bp, int idx)
1438{
1439        struct bnx2x_link_report_data *state = &bp->last_reported_link;
1440        struct pf_vf_bulletin_content *bulletin;
1441        struct bnx2x_virtf *vf;
1442        bool update = true;
1443        int rc = 0;
1444
1445        /* sanity and init */
1446        rc = bnx2x_vf_op_prep(bp, idx, &vf, &bulletin, false);
1447        if (rc)
1448                return rc;
1449
1450        mutex_lock(&bp->vfdb->bulletin_mutex);
1451
1452        if (vf->link_cfg == IFLA_VF_LINK_STATE_AUTO) {
1453                bulletin->valid_bitmap |= 1 << LINK_VALID;
1454
1455                bulletin->link_speed = state->line_speed;
1456                bulletin->link_flags = 0;
1457                if (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1458                             &state->link_report_flags))
1459                        bulletin->link_flags |= VFPF_LINK_REPORT_LINK_DOWN;
1460                if (test_bit(BNX2X_LINK_REPORT_FD,
1461                             &state->link_report_flags))
1462                        bulletin->link_flags |= VFPF_LINK_REPORT_FULL_DUPLEX;
1463                if (test_bit(BNX2X_LINK_REPORT_RX_FC_ON,
1464                             &state->link_report_flags))
1465                        bulletin->link_flags |= VFPF_LINK_REPORT_RX_FC_ON;
1466                if (test_bit(BNX2X_LINK_REPORT_TX_FC_ON,
1467                             &state->link_report_flags))
1468                        bulletin->link_flags |= VFPF_LINK_REPORT_TX_FC_ON;
1469        } else if (vf->link_cfg == IFLA_VF_LINK_STATE_DISABLE &&
1470                   !(bulletin->link_flags & VFPF_LINK_REPORT_LINK_DOWN)) {
1471                bulletin->valid_bitmap |= 1 << LINK_VALID;
1472                bulletin->link_flags |= VFPF_LINK_REPORT_LINK_DOWN;
1473        } else if (vf->link_cfg == IFLA_VF_LINK_STATE_ENABLE &&
1474                   (bulletin->link_flags & VFPF_LINK_REPORT_LINK_DOWN)) {
1475                bulletin->valid_bitmap |= 1 << LINK_VALID;
1476                bulletin->link_speed = bnx2x_max_speed_cap(bp);
1477                bulletin->link_flags &= ~VFPF_LINK_REPORT_LINK_DOWN;
1478        } else {
1479                update = false;
1480        }
1481
1482        if (update) {
1483                DP(NETIF_MSG_LINK | BNX2X_MSG_IOV,
1484                   "vf %d mode %u speed %d flags %x\n", idx,
1485                   vf->link_cfg, bulletin->link_speed, bulletin->link_flags);
1486
1487                /* Post update on VF's bulletin board */
1488                rc = bnx2x_post_vf_bulletin(bp, idx);
1489                if (rc) {
1490                        BNX2X_ERR("failed to update VF[%d] bulletin\n", idx);
1491                        goto out;
1492                }
1493        }
1494
1495out:
1496        mutex_unlock(&bp->vfdb->bulletin_mutex);
1497        return rc;
1498}
1499
1500int bnx2x_set_vf_link_state(struct net_device *dev, int idx, int link_state)
1501{
1502        struct bnx2x *bp = netdev_priv(dev);
1503        struct bnx2x_virtf *vf = BP_VF(bp, idx);
1504
1505        if (!vf)
1506                return -EINVAL;
1507
1508        if (vf->link_cfg == link_state)
1509                return 0; /* nothing todo */
1510
1511        vf->link_cfg = link_state;
1512
1513        return bnx2x_iov_link_update_vf(bp, idx);
1514}
1515
1516void bnx2x_iov_link_update(struct bnx2x *bp)
1517{
1518        int vfid;
1519
1520        if (!IS_SRIOV(bp))
1521                return;
1522
1523        for_each_vf(bp, vfid)
1524                bnx2x_iov_link_update_vf(bp, vfid);
1525}
1526
1527/* called by bnx2x_nic_load */
1528int bnx2x_iov_nic_init(struct bnx2x *bp)
1529{
1530        int vfid;
1531
1532        if (!IS_SRIOV(bp)) {
1533                DP(BNX2X_MSG_IOV, "vfdb was not allocated\n");
1534                return 0;
1535        }
1536
1537        DP(BNX2X_MSG_IOV, "num of vfs: %d\n", (bp)->vfdb->sriov.nr_virtfn);
1538
1539        /* let FLR complete ... */
1540        msleep(100);
1541
1542        /* initialize vf database */
1543        for_each_vf(bp, vfid) {
1544                struct bnx2x_virtf *vf = BP_VF(bp, vfid);
1545
1546                int base_vf_cid = (BP_VFDB(bp)->sriov.first_vf_in_pf + vfid) *
1547                        BNX2X_CIDS_PER_VF;
1548
1549                union cdu_context *base_cxt = (union cdu_context *)
1550                        BP_VF_CXT_PAGE(bp, base_vf_cid/ILT_PAGE_CIDS)->addr +
1551                        (base_vf_cid & (ILT_PAGE_CIDS-1));
1552
1553                DP(BNX2X_MSG_IOV,
1554                   "VF[%d] Max IGU SBs: %d, base vf cid 0x%x, base cid 0x%x, base cxt %p\n",
1555                   vf->abs_vfid, vf_sb_count(vf), base_vf_cid,
1556                   BNX2X_FIRST_VF_CID + base_vf_cid, base_cxt);
1557
1558                /* init statically provisioned resources */
1559                bnx2x_iov_static_resc(bp, vf);
1560
1561                /* queues are initialized during VF-ACQUIRE */
1562                vf->filter_state = 0;
1563                vf->sp_cl_id = bnx2x_fp(bp, 0, cl_id);
1564
1565                bnx2x_init_credit_pool(&vf->vf_vlans_pool, 0,
1566                                       vf_vlan_rules_cnt(vf));
1567                bnx2x_init_credit_pool(&vf->vf_macs_pool, 0,
1568                                       vf_mac_rules_cnt(vf));
1569
1570                /*  init mcast object - This object will be re-initialized
1571                 *  during VF-ACQUIRE with the proper cl_id and cid.
1572                 *  It needs to be initialized here so that it can be safely
1573                 *  handled by a subsequent FLR flow.
1574                 */
1575                bnx2x_init_mcast_obj(bp, &vf->mcast_obj, 0xFF,
1576                                     0xFF, 0xFF, 0xFF,
1577                                     bnx2x_vf_sp(bp, vf, mcast_rdata),
1578                                     bnx2x_vf_sp_map(bp, vf, mcast_rdata),
1579                                     BNX2X_FILTER_MCAST_PENDING,
1580                                     &vf->filter_state,
1581                                     BNX2X_OBJ_TYPE_RX_TX);
1582
1583                /* set the mailbox message addresses */
1584                BP_VF_MBX(bp, vfid)->msg = (struct bnx2x_vf_mbx_msg *)
1585                        (((u8 *)BP_VF_MBX_DMA(bp)->addr) + vfid *
1586                        MBX_MSG_ALIGNED_SIZE);
1587
1588                BP_VF_MBX(bp, vfid)->msg_mapping = BP_VF_MBX_DMA(bp)->mapping +
1589                        vfid * MBX_MSG_ALIGNED_SIZE;
1590
1591                /* Enable vf mailbox */
1592                bnx2x_vf_enable_mbx(bp, vf->abs_vfid);
1593        }
1594
1595        /* Final VF init */
1596        for_each_vf(bp, vfid) {
1597                struct bnx2x_virtf *vf = BP_VF(bp, vfid);
1598
1599                /* fill in the BDF and bars */
1600                vf->domain = bnx2x_vf_domain(bp, vfid);
1601                vf->bus = bnx2x_vf_bus(bp, vfid);
1602                vf->devfn = bnx2x_vf_devfn(bp, vfid);
1603                bnx2x_vf_set_bars(bp, vf);
1604
1605                DP(BNX2X_MSG_IOV,
1606                   "VF info[%d]: bus 0x%x, devfn 0x%x, bar0 [0x%x, %d], bar1 [0x%x, %d], bar2 [0x%x, %d]\n",
1607                   vf->abs_vfid, vf->bus, vf->devfn,
1608                   (unsigned)vf->bars[0].bar, vf->bars[0].size,
1609                   (unsigned)vf->bars[1].bar, vf->bars[1].size,
1610                   (unsigned)vf->bars[2].bar, vf->bars[2].size);
1611        }
1612
1613        return 0;
1614}
1615
1616/* called by bnx2x_chip_cleanup */
1617int bnx2x_iov_chip_cleanup(struct bnx2x *bp)
1618{
1619        int i;
1620
1621        if (!IS_SRIOV(bp))
1622                return 0;
1623
1624        /* release all the VFs */
1625        for_each_vf(bp, i)
1626                bnx2x_vf_release(bp, BP_VF(bp, i));
1627
1628        return 0;
1629}
1630
1631/* called by bnx2x_init_hw_func, returns the next ilt line */
1632int bnx2x_iov_init_ilt(struct bnx2x *bp, u16 line)
1633{
1634        int i;
1635        struct bnx2x_ilt *ilt = BP_ILT(bp);
1636
1637        if (!IS_SRIOV(bp))
1638                return line;
1639
1640        /* set vfs ilt lines */
1641        for (i = 0; i < BNX2X_VF_CIDS/ILT_PAGE_CIDS; i++) {
1642                struct hw_dma *hw_cxt = BP_VF_CXT_PAGE(bp, i);
1643
1644                ilt->lines[line+i].page = hw_cxt->addr;
1645                ilt->lines[line+i].page_mapping = hw_cxt->mapping;
1646                ilt->lines[line+i].size = hw_cxt->size; /* doesn't matter */
1647        }
1648        return line + i;
1649}
1650
1651static u8 bnx2x_iov_is_vf_cid(struct bnx2x *bp, u16 cid)
1652{
1653        return ((cid >= BNX2X_FIRST_VF_CID) &&
1654                ((cid - BNX2X_FIRST_VF_CID) < BNX2X_VF_CIDS));
1655}
1656
1657static
1658void bnx2x_vf_handle_classification_eqe(struct bnx2x *bp,
1659                                        struct bnx2x_vf_queue *vfq,
1660                                        union event_ring_elem *elem)
1661{
1662        unsigned long ramrod_flags = 0;
1663        int rc = 0;
1664        u32 echo = le32_to_cpu(elem->message.data.eth_event.echo);
1665
1666        /* Always push next commands out, don't wait here */
1667        set_bit(RAMROD_CONT, &ramrod_flags);
1668
1669        switch (echo >> BNX2X_SWCID_SHIFT) {
1670        case BNX2X_FILTER_MAC_PENDING:
1671                rc = vfq->mac_obj.complete(bp, &vfq->mac_obj, elem,
1672                                           &ramrod_flags);
1673                break;
1674        case BNX2X_FILTER_VLAN_PENDING:
1675                rc = vfq->vlan_obj.complete(bp, &vfq->vlan_obj, elem,
1676                                            &ramrod_flags);
1677                break;
1678        default:
1679                BNX2X_ERR("Unsupported classification command: 0x%x\n", echo);
1680                return;
1681        }
1682        if (rc < 0)
1683                BNX2X_ERR("Failed to schedule new commands: %d\n", rc);
1684        else if (rc > 0)
1685                DP(BNX2X_MSG_IOV, "Scheduled next pending commands...\n");
1686}
1687
1688static
1689void bnx2x_vf_handle_mcast_eqe(struct bnx2x *bp,
1690                               struct bnx2x_virtf *vf)
1691{
1692        struct bnx2x_mcast_ramrod_params rparam = {NULL};
1693        int rc;
1694
1695        rparam.mcast_obj = &vf->mcast_obj;
1696        vf->mcast_obj.raw.clear_pending(&vf->mcast_obj.raw);
1697
1698        /* If there are pending mcast commands - send them */
1699        if (vf->mcast_obj.check_pending(&vf->mcast_obj)) {
1700                rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
1701                if (rc < 0)
1702                        BNX2X_ERR("Failed to send pending mcast commands: %d\n",
1703                                  rc);
1704        }
1705}
1706
1707static
1708void bnx2x_vf_handle_filters_eqe(struct bnx2x *bp,
1709                                 struct bnx2x_virtf *vf)
1710{
1711        smp_mb__before_atomic();
1712        clear_bit(BNX2X_FILTER_RX_MODE_PENDING, &vf->filter_state);
1713        smp_mb__after_atomic();
1714}
1715
1716static void bnx2x_vf_handle_rss_update_eqe(struct bnx2x *bp,
1717                                           struct bnx2x_virtf *vf)
1718{
1719        vf->rss_conf_obj.raw.clear_pending(&vf->rss_conf_obj.raw);
1720}
1721
1722int bnx2x_iov_eq_sp_event(struct bnx2x *bp, union event_ring_elem *elem)
1723{
1724        struct bnx2x_virtf *vf;
1725        int qidx = 0, abs_vfid;
1726        u8 opcode;
1727        u16 cid = 0xffff;
1728
1729        if (!IS_SRIOV(bp))
1730                return 1;
1731
1732        /* first get the cid - the only events we handle here are cfc-delete
1733         * and set-mac completion
1734         */
1735        opcode = elem->message.opcode;
1736
1737        switch (opcode) {
1738        case EVENT_RING_OPCODE_CFC_DEL:
1739                cid = SW_CID(elem->message.data.cfc_del_event.cid);
1740                DP(BNX2X_MSG_IOV, "checking cfc-del comp cid=%d\n", cid);
1741                break;
1742        case EVENT_RING_OPCODE_CLASSIFICATION_RULES:
1743        case EVENT_RING_OPCODE_MULTICAST_RULES:
1744        case EVENT_RING_OPCODE_FILTERS_RULES:
1745        case EVENT_RING_OPCODE_RSS_UPDATE_RULES:
1746                cid = SW_CID(elem->message.data.eth_event.echo);
1747                DP(BNX2X_MSG_IOV, "checking filtering comp cid=%d\n", cid);
1748                break;
1749        case EVENT_RING_OPCODE_VF_FLR:
1750                abs_vfid = elem->message.data.vf_flr_event.vf_id;
1751                DP(BNX2X_MSG_IOV, "Got VF FLR notification abs_vfid=%d\n",
1752                   abs_vfid);
1753                goto get_vf;
1754        case EVENT_RING_OPCODE_MALICIOUS_VF:
1755                abs_vfid = elem->message.data.malicious_vf_event.vf_id;
1756                BNX2X_ERR("Got VF MALICIOUS notification abs_vfid=%d err_id=0x%x\n",
1757                          abs_vfid,
1758                          elem->message.data.malicious_vf_event.err_id);
1759                goto get_vf;
1760        default:
1761                return 1;
1762        }
1763
1764        /* check if the cid is the VF range */
1765        if (!bnx2x_iov_is_vf_cid(bp, cid)) {
1766                DP(BNX2X_MSG_IOV, "cid is outside vf range: %d\n", cid);
1767                return 1;
1768        }
1769
1770        /* extract vf and rxq index from vf_cid - relies on the following:
1771         * 1. vfid on cid reflects the true abs_vfid
1772         * 2. The max number of VFs (per path) is 64
1773         */
1774        qidx = cid & ((1 << BNX2X_VF_CID_WND)-1);
1775        abs_vfid = (cid >> BNX2X_VF_CID_WND) & (BNX2X_MAX_NUM_OF_VFS-1);
1776get_vf:
1777        vf = bnx2x_vf_by_abs_fid(bp, abs_vfid);
1778
1779        if (!vf) {
1780                BNX2X_ERR("EQ completion for unknown VF, cid %d, abs_vfid %d\n",
1781                          cid, abs_vfid);
1782                return 0;
1783        }
1784
1785        switch (opcode) {
1786        case EVENT_RING_OPCODE_CFC_DEL:
1787                DP(BNX2X_MSG_IOV, "got VF [%d:%d] cfc delete ramrod\n",
1788                   vf->abs_vfid, qidx);
1789                vfq_get(vf, qidx)->sp_obj.complete_cmd(bp,
1790                                                       &vfq_get(vf,
1791                                                                qidx)->sp_obj,
1792                                                       BNX2X_Q_CMD_CFC_DEL);
1793                break;
1794        case EVENT_RING_OPCODE_CLASSIFICATION_RULES:
1795                DP(BNX2X_MSG_IOV, "got VF [%d:%d] set mac/vlan ramrod\n",
1796                   vf->abs_vfid, qidx);
1797                bnx2x_vf_handle_classification_eqe(bp, vfq_get(vf, qidx), elem);
1798                break;
1799        case EVENT_RING_OPCODE_MULTICAST_RULES:
1800                DP(BNX2X_MSG_IOV, "got VF [%d:%d] set mcast ramrod\n",
1801                   vf->abs_vfid, qidx);
1802                bnx2x_vf_handle_mcast_eqe(bp, vf);
1803                break;
1804        case EVENT_RING_OPCODE_FILTERS_RULES:
1805                DP(BNX2X_MSG_IOV, "got VF [%d:%d] set rx-mode ramrod\n",
1806                   vf->abs_vfid, qidx);
1807                bnx2x_vf_handle_filters_eqe(bp, vf);
1808                break;
1809        case EVENT_RING_OPCODE_RSS_UPDATE_RULES:
1810                DP(BNX2X_MSG_IOV, "got VF [%d:%d] RSS update ramrod\n",
1811                   vf->abs_vfid, qidx);
1812                bnx2x_vf_handle_rss_update_eqe(bp, vf);
1813                fallthrough;
1814        case EVENT_RING_OPCODE_VF_FLR:
1815                /* Do nothing for now */
1816                return 0;
1817        case EVENT_RING_OPCODE_MALICIOUS_VF:
1818                vf->malicious = true;
1819                return 0;
1820        }
1821
1822        return 0;
1823}
1824
1825static struct bnx2x_virtf *bnx2x_vf_by_cid(struct bnx2x *bp, int vf_cid)
1826{
1827        /* extract the vf from vf_cid - relies on the following:
1828         * 1. vfid on cid reflects the true abs_vfid
1829         * 2. The max number of VFs (per path) is 64
1830         */
1831        int abs_vfid = (vf_cid >> BNX2X_VF_CID_WND) & (BNX2X_MAX_NUM_OF_VFS-1);
1832        return bnx2x_vf_by_abs_fid(bp, abs_vfid);
1833}
1834
1835void bnx2x_iov_set_queue_sp_obj(struct bnx2x *bp, int vf_cid,
1836                                struct bnx2x_queue_sp_obj **q_obj)
1837{
1838        struct bnx2x_virtf *vf;
1839
1840        if (!IS_SRIOV(bp))
1841                return;
1842
1843        vf = bnx2x_vf_by_cid(bp, vf_cid);
1844
1845        if (vf) {
1846                /* extract queue index from vf_cid - relies on the following:
1847                 * 1. vfid on cid reflects the true abs_vfid
1848                 * 2. The max number of VFs (per path) is 64
1849                 */
1850                int q_index = vf_cid & ((1 << BNX2X_VF_CID_WND)-1);
1851                *q_obj = &bnx2x_vfq(vf, q_index, sp_obj);
1852        } else {
1853                BNX2X_ERR("No vf matching cid %d\n", vf_cid);
1854        }
1855}
1856
1857void bnx2x_iov_adjust_stats_req(struct bnx2x *bp)
1858{
1859        int i;
1860        int first_queue_query_index, num_queues_req;
1861        struct stats_query_entry *cur_query_entry;
1862        u8 stats_count = 0;
1863        bool is_fcoe = false;
1864
1865        if (!IS_SRIOV(bp))
1866                return;
1867
1868        if (!NO_FCOE(bp))
1869                is_fcoe = true;
1870
1871        /* fcoe adds one global request and one queue request */
1872        num_queues_req = BNX2X_NUM_ETH_QUEUES(bp) + is_fcoe;
1873        first_queue_query_index = BNX2X_FIRST_QUEUE_QUERY_IDX -
1874                (is_fcoe ? 0 : 1);
1875
1876        DP_AND((BNX2X_MSG_IOV | BNX2X_MSG_STATS),
1877               "BNX2X_NUM_ETH_QUEUES %d, is_fcoe %d, first_queue_query_index %d => determined the last non virtual statistics query index is %d. Will add queries on top of that\n",
1878               BNX2X_NUM_ETH_QUEUES(bp), is_fcoe, first_queue_query_index,
1879               first_queue_query_index + num_queues_req);
1880
1881        cur_query_entry = &bp->fw_stats_req->
1882                query[first_queue_query_index + num_queues_req];
1883
1884        for_each_vf(bp, i) {
1885                int j;
1886                struct bnx2x_virtf *vf = BP_VF(bp, i);
1887
1888                if (vf->state != VF_ENABLED) {
1889                        DP_AND((BNX2X_MSG_IOV | BNX2X_MSG_STATS),
1890                               "vf %d not enabled so no stats for it\n",
1891                               vf->abs_vfid);
1892                        continue;
1893                }
1894
1895                if (vf->malicious) {
1896                        DP_AND((BNX2X_MSG_IOV | BNX2X_MSG_STATS),
1897                               "vf %d malicious so no stats for it\n",
1898                               vf->abs_vfid);
1899                        continue;
1900                }
1901
1902                DP_AND((BNX2X_MSG_IOV | BNX2X_MSG_STATS),
1903                       "add addresses for vf %d\n", vf->abs_vfid);
1904                for_each_vfq(vf, j) {
1905                        struct bnx2x_vf_queue *rxq = vfq_get(vf, j);
1906
1907                        dma_addr_t q_stats_addr =
1908                                vf->fw_stat_map + j * vf->stats_stride;
1909
1910                        /* collect stats fro active queues only */
1911                        if (bnx2x_get_q_logical_state(bp, &rxq->sp_obj) ==
1912                            BNX2X_Q_LOGICAL_STATE_STOPPED)
1913                                continue;
1914
1915                        /* create stats query entry for this queue */
1916                        cur_query_entry->kind = STATS_TYPE_QUEUE;
1917                        cur_query_entry->index = vfq_stat_id(vf, rxq);
1918                        cur_query_entry->funcID =
1919                                cpu_to_le16(FW_VF_HANDLE(vf->abs_vfid));
1920                        cur_query_entry->address.hi =
1921                                cpu_to_le32(U64_HI(q_stats_addr));
1922                        cur_query_entry->address.lo =
1923                                cpu_to_le32(U64_LO(q_stats_addr));
1924                        DP_AND((BNX2X_MSG_IOV | BNX2X_MSG_STATS),
1925                               "added address %x %x for vf %d queue %d client %d\n",
1926                               cur_query_entry->address.hi,
1927                               cur_query_entry->address.lo,
1928                               cur_query_entry->funcID,
1929                               j, cur_query_entry->index);
1930                        cur_query_entry++;
1931                        stats_count++;
1932
1933                        /* all stats are coalesced to the leading queue */
1934                        if (vf->cfg_flags & VF_CFG_STATS_COALESCE)
1935                                break;
1936                }
1937        }
1938        bp->fw_stats_req->hdr.cmd_num = bp->fw_stats_num + stats_count;
1939}
1940
1941/* VF API helpers */
1942static void bnx2x_vf_qtbl_set_q(struct bnx2x *bp, u8 abs_vfid, u8 qid,
1943                                u8 enable)
1944{
1945        u32 reg = PXP_REG_HST_ZONE_PERMISSION_TABLE + qid * 4;
1946        u32 val = enable ? (abs_vfid | (1 << 6)) : 0;
1947
1948        REG_WR(bp, reg, val);
1949}
1950
1951static void bnx2x_vf_clr_qtbl(struct bnx2x *bp, struct bnx2x_virtf *vf)
1952{
1953        int i;
1954
1955        for_each_vfq(vf, i)
1956                bnx2x_vf_qtbl_set_q(bp, vf->abs_vfid,
1957                                    vfq_qzone_id(vf, vfq_get(vf, i)), false);
1958}
1959
1960static void bnx2x_vf_igu_disable(struct bnx2x *bp, struct bnx2x_virtf *vf)
1961{
1962        u32 val;
1963
1964        /* clear the VF configuration - pretend */
1965        bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf->abs_vfid));
1966        val = REG_RD(bp, IGU_REG_VF_CONFIGURATION);
1967        val &= ~(IGU_VF_CONF_MSI_MSIX_EN | IGU_VF_CONF_SINGLE_ISR_EN |
1968                 IGU_VF_CONF_FUNC_EN | IGU_VF_CONF_PARENT_MASK);
1969        REG_WR(bp, IGU_REG_VF_CONFIGURATION, val);
1970        bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
1971}
1972
1973u8 bnx2x_vf_max_queue_cnt(struct bnx2x *bp, struct bnx2x_virtf *vf)
1974{
1975        return min_t(u8, min_t(u8, vf_sb_count(vf), BNX2X_CIDS_PER_VF),
1976                     BNX2X_VF_MAX_QUEUES);
1977}
1978
1979static
1980int bnx2x_vf_chk_avail_resc(struct bnx2x *bp, struct bnx2x_virtf *vf,
1981                            struct vf_pf_resc_request *req_resc)
1982{
1983        u8 rxq_cnt = vf_rxq_count(vf) ? : bnx2x_vf_max_queue_cnt(bp, vf);
1984        u8 txq_cnt = vf_txq_count(vf) ? : bnx2x_vf_max_queue_cnt(bp, vf);
1985
1986        return ((req_resc->num_rxqs <= rxq_cnt) &&
1987                (req_resc->num_txqs <= txq_cnt) &&
1988                (req_resc->num_sbs <= vf_sb_count(vf))   &&
1989                (req_resc->num_mac_filters <= vf_mac_rules_cnt(vf)) &&
1990                (req_resc->num_vlan_filters <= vf_vlan_rules_cnt(vf)));
1991}
1992
1993/* CORE VF API */
1994int bnx2x_vf_acquire(struct bnx2x *bp, struct bnx2x_virtf *vf,
1995                     struct vf_pf_resc_request *resc)
1996{
1997        int base_vf_cid = (BP_VFDB(bp)->sriov.first_vf_in_pf + vf->index) *
1998                BNX2X_CIDS_PER_VF;
1999
2000        union cdu_context *base_cxt = (union cdu_context *)
2001                BP_VF_CXT_PAGE(bp, base_vf_cid/ILT_PAGE_CIDS)->addr +
2002                (base_vf_cid & (ILT_PAGE_CIDS-1));
2003        int i;
2004
2005        /* if state is 'acquired' the VF was not released or FLR'd, in
2006         * this case the returned resources match the acquired already
2007         * acquired resources. Verify that the requested numbers do
2008         * not exceed the already acquired numbers.
2009         */
2010        if (vf->state == VF_ACQUIRED) {
2011                DP(BNX2X_MSG_IOV, "VF[%d] Trying to re-acquire resources (VF was not released or FLR'd)\n",
2012                   vf->abs_vfid);
2013
2014                if (!bnx2x_vf_chk_avail_resc(bp, vf, resc)) {
2015                        BNX2X_ERR("VF[%d] When re-acquiring resources, requested numbers must be <= then previously acquired numbers\n",
2016                                  vf->abs_vfid);
2017                        return -EINVAL;
2018                }
2019                return 0;
2020        }
2021
2022        /* Otherwise vf state must be 'free' or 'reset' */
2023        if (vf->state != VF_FREE && vf->state != VF_RESET) {
2024                BNX2X_ERR("VF[%d] Can not acquire a VF with state %d\n",
2025                          vf->abs_vfid, vf->state);
2026                return -EINVAL;
2027        }
2028
2029        /* static allocation:
2030         * the global maximum number are fixed per VF. Fail the request if
2031         * requested number exceed these globals
2032         */
2033        if (!bnx2x_vf_chk_avail_resc(bp, vf, resc)) {
2034                DP(BNX2X_MSG_IOV,
2035                   "cannot fulfill vf resource request. Placing maximal available values in response\n");
2036                /* set the max resource in the vf */
2037                return -ENOMEM;
2038        }
2039
2040        /* Set resources counters - 0 request means max available */
2041        vf_sb_count(vf) = resc->num_sbs;
2042        vf_rxq_count(vf) = resc->num_rxqs ? : bnx2x_vf_max_queue_cnt(bp, vf);
2043        vf_txq_count(vf) = resc->num_txqs ? : bnx2x_vf_max_queue_cnt(bp, vf);
2044
2045        DP(BNX2X_MSG_IOV,
2046           "Fulfilling vf request: sb count %d, tx_count %d, rx_count %d, mac_rules_count %d, vlan_rules_count %d\n",
2047           vf_sb_count(vf), vf_rxq_count(vf),
2048           vf_txq_count(vf), vf_mac_rules_cnt(vf),
2049           vf_vlan_rules_cnt(vf));
2050
2051        /* Initialize the queues */
2052        if (!vf->vfqs) {
2053                DP(BNX2X_MSG_IOV, "vf->vfqs was not allocated\n");
2054                return -EINVAL;
2055        }
2056
2057        for_each_vfq(vf, i) {
2058                struct bnx2x_vf_queue *q = vfq_get(vf, i);
2059
2060                if (!q) {
2061                        BNX2X_ERR("q number %d was not allocated\n", i);
2062                        return -EINVAL;
2063                }
2064
2065                q->index = i;
2066                q->cxt = &((base_cxt + i)->eth);
2067                q->cid = BNX2X_FIRST_VF_CID + base_vf_cid + i;
2068
2069                DP(BNX2X_MSG_IOV, "VFQ[%d:%d]: index %d, cid 0x%x, cxt %p\n",
2070                   vf->abs_vfid, i, q->index, q->cid, q->cxt);
2071
2072                /* init SP objects */
2073                bnx2x_vfq_init(bp, vf, q);
2074        }
2075        vf->state = VF_ACQUIRED;
2076        return 0;
2077}
2078
2079int bnx2x_vf_init(struct bnx2x *bp, struct bnx2x_virtf *vf, dma_addr_t *sb_map)
2080{
2081        struct bnx2x_func_init_params func_init = {0};
2082        int i;
2083
2084        /* the sb resources are initialized at this point, do the
2085         * FW/HW initializations
2086         */
2087        for_each_vf_sb(vf, i)
2088                bnx2x_init_sb(bp, (dma_addr_t)sb_map[i], vf->abs_vfid, true,
2089                              vf_igu_sb(vf, i), vf_igu_sb(vf, i));
2090
2091        /* Sanity checks */
2092        if (vf->state != VF_ACQUIRED) {
2093                DP(BNX2X_MSG_IOV, "VF[%d] is not in VF_ACQUIRED, but %d\n",
2094                   vf->abs_vfid, vf->state);
2095                return -EINVAL;
2096        }
2097
2098        /* let FLR complete ... */
2099        msleep(100);
2100
2101        /* FLR cleanup epilogue */
2102        if (bnx2x_vf_flr_clnup_epilog(bp, vf->abs_vfid))
2103                return -EBUSY;
2104
2105        /* reset IGU VF statistics: MSIX */
2106        REG_WR(bp, IGU_REG_STATISTIC_NUM_MESSAGE_SENT + vf->abs_vfid * 4 , 0);
2107
2108        /* function setup */
2109        func_init.pf_id = BP_FUNC(bp);
2110        func_init.func_id = FW_VF_HANDLE(vf->abs_vfid);
2111        bnx2x_func_init(bp, &func_init);
2112
2113        /* Enable the vf */
2114        bnx2x_vf_enable_access(bp, vf->abs_vfid);
2115        bnx2x_vf_enable_traffic(bp, vf);
2116
2117        /* queue protection table */
2118        for_each_vfq(vf, i)
2119                bnx2x_vf_qtbl_set_q(bp, vf->abs_vfid,
2120                                    vfq_qzone_id(vf, vfq_get(vf, i)), true);
2121
2122        vf->state = VF_ENABLED;
2123
2124        /* update vf bulletin board */
2125        bnx2x_post_vf_bulletin(bp, vf->index);
2126
2127        return 0;
2128}
2129
2130struct set_vf_state_cookie {
2131        struct bnx2x_virtf *vf;
2132        u8 state;
2133};
2134
2135static void bnx2x_set_vf_state(void *cookie)
2136{
2137        struct set_vf_state_cookie *p = (struct set_vf_state_cookie *)cookie;
2138
2139        p->vf->state = p->state;
2140}
2141
2142int bnx2x_vf_close(struct bnx2x *bp, struct bnx2x_virtf *vf)
2143{
2144        int rc = 0, i;
2145
2146        DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid);
2147
2148        /* Close all queues */
2149        for (i = 0; i < vf_rxq_count(vf); i++) {
2150                rc = bnx2x_vf_queue_teardown(bp, vf, i);
2151                if (rc)
2152                        goto op_err;
2153        }
2154
2155        /* disable the interrupts */
2156        DP(BNX2X_MSG_IOV, "disabling igu\n");
2157        bnx2x_vf_igu_disable(bp, vf);
2158
2159        /* disable the VF */
2160        DP(BNX2X_MSG_IOV, "clearing qtbl\n");
2161        bnx2x_vf_clr_qtbl(bp, vf);
2162
2163        /* need to make sure there are no outstanding stats ramrods which may
2164         * cause the device to access the VF's stats buffer which it will free
2165         * as soon as we return from the close flow.
2166         */
2167        {
2168                struct set_vf_state_cookie cookie;
2169
2170                cookie.vf = vf;
2171                cookie.state = VF_ACQUIRED;
2172                rc = bnx2x_stats_safe_exec(bp, bnx2x_set_vf_state, &cookie);
2173                if (rc)
2174                        goto op_err;
2175        }
2176
2177        DP(BNX2X_MSG_IOV, "set state to acquired\n");
2178
2179        return 0;
2180op_err:
2181        BNX2X_ERR("vf[%d] CLOSE error: rc %d\n", vf->abs_vfid, rc);
2182        return rc;
2183}
2184
2185/* VF release can be called either: 1. The VF was acquired but
2186 * not enabled 2. the vf was enabled or in the process of being
2187 * enabled
2188 */
2189int bnx2x_vf_free(struct bnx2x *bp, struct bnx2x_virtf *vf)
2190{
2191        int rc;
2192
2193        DP(BNX2X_MSG_IOV, "VF[%d] STATE: %s\n", vf->abs_vfid,
2194           vf->state == VF_FREE ? "Free" :
2195           vf->state == VF_ACQUIRED ? "Acquired" :
2196           vf->state == VF_ENABLED ? "Enabled" :
2197           vf->state == VF_RESET ? "Reset" :
2198           "Unknown");
2199
2200        switch (vf->state) {
2201        case VF_ENABLED:
2202                rc = bnx2x_vf_close(bp, vf);
2203                if (rc)
2204                        goto op_err;
2205                fallthrough;    /* to release resources */
2206        case VF_ACQUIRED:
2207                DP(BNX2X_MSG_IOV, "about to free resources\n");
2208                bnx2x_vf_free_resc(bp, vf);
2209                break;
2210
2211        case VF_FREE:
2212        case VF_RESET:
2213        default:
2214                break;
2215        }
2216        return 0;
2217op_err:
2218        BNX2X_ERR("VF[%d] RELEASE error: rc %d\n", vf->abs_vfid, rc);
2219        return rc;
2220}
2221
2222int bnx2x_vf_rss_update(struct bnx2x *bp, struct bnx2x_virtf *vf,
2223                        struct bnx2x_config_rss_params *rss)
2224{
2225        DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid);
2226        set_bit(RAMROD_COMP_WAIT, &rss->ramrod_flags);
2227        return bnx2x_config_rss(bp, rss);
2228}
2229
2230int bnx2x_vf_tpa_update(struct bnx2x *bp, struct bnx2x_virtf *vf,
2231                        struct vfpf_tpa_tlv *tlv,
2232                        struct bnx2x_queue_update_tpa_params *params)
2233{
2234        aligned_u64 *sge_addr = tlv->tpa_client_info.sge_addr;
2235        struct bnx2x_queue_state_params qstate;
2236        int qid, rc = 0;
2237
2238        DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid);
2239
2240        /* Set ramrod params */
2241        memset(&qstate, 0, sizeof(struct bnx2x_queue_state_params));
2242        memcpy(&qstate.params.update_tpa, params,
2243               sizeof(struct bnx2x_queue_update_tpa_params));
2244        qstate.cmd = BNX2X_Q_CMD_UPDATE_TPA;
2245        set_bit(RAMROD_COMP_WAIT, &qstate.ramrod_flags);
2246
2247        for (qid = 0; qid < vf_rxq_count(vf); qid++) {
2248                qstate.q_obj = &bnx2x_vfq(vf, qid, sp_obj);
2249                qstate.params.update_tpa.sge_map = sge_addr[qid];
2250                DP(BNX2X_MSG_IOV, "sge_addr[%d:%d] %08x:%08x\n",
2251                   vf->abs_vfid, qid, U64_HI(sge_addr[qid]),
2252                   U64_LO(sge_addr[qid]));
2253                rc = bnx2x_queue_state_change(bp, &qstate);
2254                if (rc) {
2255                        BNX2X_ERR("Failed to configure sge_addr %08x:%08x for [%d:%d]\n",
2256                                  U64_HI(sge_addr[qid]), U64_LO(sge_addr[qid]),
2257                                  vf->abs_vfid, qid);
2258                        return rc;
2259                }
2260        }
2261
2262        return rc;
2263}
2264
2265/* VF release ~ VF close + VF release-resources
2266 * Release is the ultimate SW shutdown and is called whenever an
2267 * irrecoverable error is encountered.
2268 */
2269int bnx2x_vf_release(struct bnx2x *bp, struct bnx2x_virtf *vf)
2270{
2271        int rc;
2272
2273        DP(BNX2X_MSG_IOV, "PF releasing vf %d\n", vf->abs_vfid);
2274        bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_RELEASE_VF);
2275
2276        rc = bnx2x_vf_free(bp, vf);
2277        if (rc)
2278                WARN(rc,
2279                     "VF[%d] Failed to allocate resources for release op- rc=%d\n",
2280                     vf->abs_vfid, rc);
2281        bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_RELEASE_VF);
2282        return rc;
2283}
2284
2285void bnx2x_lock_vf_pf_channel(struct bnx2x *bp, struct bnx2x_virtf *vf,
2286                              enum channel_tlvs tlv)
2287{
2288        /* we don't lock the channel for unsupported tlvs */
2289        if (!bnx2x_tlv_supported(tlv)) {
2290                BNX2X_ERR("attempting to lock with unsupported tlv. Aborting\n");
2291                return;
2292        }
2293
2294        /* lock the channel */
2295        mutex_lock(&vf->op_mutex);
2296
2297        /* record the locking op */
2298        vf->op_current = tlv;
2299
2300        /* log the lock */
2301        DP(BNX2X_MSG_IOV, "VF[%d]: vf pf channel locked by %d\n",
2302           vf->abs_vfid, tlv);
2303}
2304
2305void bnx2x_unlock_vf_pf_channel(struct bnx2x *bp, struct bnx2x_virtf *vf,
2306                                enum channel_tlvs expected_tlv)
2307{
2308        enum channel_tlvs current_tlv;
2309
2310        if (!vf) {
2311                BNX2X_ERR("VF was %p\n", vf);
2312                return;
2313        }
2314
2315        current_tlv = vf->op_current;
2316
2317        /* we don't unlock the channel for unsupported tlvs */
2318        if (!bnx2x_tlv_supported(expected_tlv))
2319                return;
2320
2321        WARN(expected_tlv != vf->op_current,
2322             "lock mismatch: expected %d found %d", expected_tlv,
2323             vf->op_current);
2324
2325        /* record the locking op */
2326        vf->op_current = CHANNEL_TLV_NONE;
2327
2328        /* lock the channel */
2329        mutex_unlock(&vf->op_mutex);
2330
2331        /* log the unlock */
2332        DP(BNX2X_MSG_IOV, "VF[%d]: vf pf channel unlocked by %d\n",
2333           vf->abs_vfid, current_tlv);
2334}
2335
2336static int bnx2x_set_pf_tx_switching(struct bnx2x *bp, bool enable)
2337{
2338        struct bnx2x_queue_state_params q_params;
2339        u32 prev_flags;
2340        int i, rc;
2341
2342        /* Verify changes are needed and record current Tx switching state */
2343        prev_flags = bp->flags;
2344        if (enable)
2345                bp->flags |= TX_SWITCHING;
2346        else
2347                bp->flags &= ~TX_SWITCHING;
2348        if (prev_flags == bp->flags)
2349                return 0;
2350
2351        /* Verify state enables the sending of queue ramrods */
2352        if ((bp->state != BNX2X_STATE_OPEN) ||
2353            (bnx2x_get_q_logical_state(bp,
2354                                      &bnx2x_sp_obj(bp, &bp->fp[0]).q_obj) !=
2355             BNX2X_Q_LOGICAL_STATE_ACTIVE))
2356                return 0;
2357
2358        /* send q. update ramrod to configure Tx switching */
2359        memset(&q_params, 0, sizeof(q_params));
2360        __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
2361        q_params.cmd = BNX2X_Q_CMD_UPDATE;
2362        __set_bit(BNX2X_Q_UPDATE_TX_SWITCHING_CHNG,
2363                  &q_params.params.update.update_flags);
2364        if (enable)
2365                __set_bit(BNX2X_Q_UPDATE_TX_SWITCHING,
2366                          &q_params.params.update.update_flags);
2367        else
2368                __clear_bit(BNX2X_Q_UPDATE_TX_SWITCHING,
2369                            &q_params.params.update.update_flags);
2370
2371        /* send the ramrod on all the queues of the PF */
2372        for_each_eth_queue(bp, i) {
2373                struct bnx2x_fastpath *fp = &bp->fp[i];
2374                int tx_idx;
2375
2376                /* Set the appropriate Queue object */
2377                q_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj;
2378
2379                for (tx_idx = FIRST_TX_COS_INDEX;
2380                     tx_idx < fp->max_cos; tx_idx++) {
2381                        q_params.params.update.cid_index = tx_idx;
2382
2383                        /* Update the Queue state */
2384                        rc = bnx2x_queue_state_change(bp, &q_params);
2385                        if (rc) {
2386                                BNX2X_ERR("Failed to configure Tx switching\n");
2387                                return rc;
2388                        }
2389                }
2390        }
2391
2392        DP(BNX2X_MSG_IOV, "%s Tx Switching\n", enable ? "Enabled" : "Disabled");
2393        return 0;
2394}
2395
2396int bnx2x_sriov_configure(struct pci_dev *dev, int num_vfs_param)
2397{
2398        struct bnx2x *bp = netdev_priv(pci_get_drvdata(dev));
2399
2400        if (!IS_SRIOV(bp)) {
2401                BNX2X_ERR("failed to configure SR-IOV since vfdb was not allocated. Check dmesg for errors in probe stage\n");
2402                return -EINVAL;
2403        }
2404
2405        DP(BNX2X_MSG_IOV, "bnx2x_sriov_configure called with %d, BNX2X_NR_VIRTFN(bp) was %d\n",
2406           num_vfs_param, BNX2X_NR_VIRTFN(bp));
2407
2408        /* HW channel is only operational when PF is up */
2409        if (bp->state != BNX2X_STATE_OPEN) {
2410                BNX2X_ERR("VF num configuration via sysfs not supported while PF is down\n");
2411                return -EINVAL;
2412        }
2413
2414        /* we are always bound by the total_vfs in the configuration space */
2415        if (num_vfs_param > BNX2X_NR_VIRTFN(bp)) {
2416                BNX2X_ERR("truncating requested number of VFs (%d) down to maximum allowed (%d)\n",
2417                          num_vfs_param, BNX2X_NR_VIRTFN(bp));
2418                num_vfs_param = BNX2X_NR_VIRTFN(bp);
2419        }
2420
2421        bp->requested_nr_virtfn = num_vfs_param;
2422        if (num_vfs_param == 0) {
2423                bnx2x_set_pf_tx_switching(bp, false);
2424                bnx2x_disable_sriov(bp);
2425                return 0;
2426        } else {
2427                return bnx2x_enable_sriov(bp);
2428        }
2429}
2430
2431#define IGU_ENTRY_SIZE 4
2432
2433int bnx2x_enable_sriov(struct bnx2x *bp)
2434{
2435        int rc = 0, req_vfs = bp->requested_nr_virtfn;
2436        int vf_idx, sb_idx, vfq_idx, qcount, first_vf;
2437        u32 igu_entry, address;
2438        u16 num_vf_queues;
2439
2440        if (req_vfs == 0)
2441                return 0;
2442
2443        first_vf = bp->vfdb->sriov.first_vf_in_pf;
2444
2445        /* statically distribute vf sb pool between VFs */
2446        num_vf_queues = min_t(u16, BNX2X_VF_MAX_QUEUES,
2447                              BP_VFDB(bp)->vf_sbs_pool / req_vfs);
2448
2449        /* zero previous values learned from igu cam */
2450        for (vf_idx = 0; vf_idx < req_vfs; vf_idx++) {
2451                struct bnx2x_virtf *vf = BP_VF(bp, vf_idx);
2452
2453                vf->sb_count = 0;
2454                vf_sb_count(BP_VF(bp, vf_idx)) = 0;
2455        }
2456        bp->vfdb->vf_sbs_pool = 0;
2457
2458        /* prepare IGU cam */
2459        sb_idx = BP_VFDB(bp)->first_vf_igu_entry;
2460        address = IGU_REG_MAPPING_MEMORY + sb_idx * IGU_ENTRY_SIZE;
2461        for (vf_idx = first_vf; vf_idx < first_vf + req_vfs; vf_idx++) {
2462                for (vfq_idx = 0; vfq_idx < num_vf_queues; vfq_idx++) {
2463                        igu_entry = vf_idx << IGU_REG_MAPPING_MEMORY_FID_SHIFT |
2464                                vfq_idx << IGU_REG_MAPPING_MEMORY_VECTOR_SHIFT |
2465                                IGU_REG_MAPPING_MEMORY_VALID;
2466                        DP(BNX2X_MSG_IOV, "assigning sb %d to vf %d\n",
2467                           sb_idx, vf_idx);
2468                        REG_WR(bp, address, igu_entry);
2469                        sb_idx++;
2470                        address += IGU_ENTRY_SIZE;
2471                }
2472        }
2473
2474        /* Reinitialize vf database according to igu cam */
2475        bnx2x_get_vf_igu_cam_info(bp);
2476
2477        DP(BNX2X_MSG_IOV, "vf_sbs_pool %d, num_vf_queues %d\n",
2478           BP_VFDB(bp)->vf_sbs_pool, num_vf_queues);
2479
2480        qcount = 0;
2481        for_each_vf(bp, vf_idx) {
2482                struct bnx2x_virtf *vf = BP_VF(bp, vf_idx);
2483
2484                /* set local queue arrays */
2485                vf->vfqs = &bp->vfdb->vfqs[qcount];
2486                qcount += vf_sb_count(vf);
2487                bnx2x_iov_static_resc(bp, vf);
2488        }
2489
2490        /* prepare msix vectors in VF configuration space - the value in the
2491         * PCI configuration space should be the index of the last entry,
2492         * namely one less than the actual size of the table
2493         */
2494        for (vf_idx = first_vf; vf_idx < first_vf + req_vfs; vf_idx++) {
2495                bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf_idx));
2496                REG_WR(bp, PCICFG_OFFSET + GRC_CONFIG_REG_VF_MSIX_CONTROL,
2497                       num_vf_queues - 1);
2498                DP(BNX2X_MSG_IOV, "set msix vec num in VF %d cfg space to %d\n",
2499                   vf_idx, num_vf_queues - 1);
2500        }
2501        bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
2502
2503        /* enable sriov. This will probe all the VFs, and consequentially cause
2504         * the "acquire" messages to appear on the VF PF channel.
2505         */
2506        DP(BNX2X_MSG_IOV, "about to call enable sriov\n");
2507        bnx2x_disable_sriov(bp);
2508
2509        rc = bnx2x_set_pf_tx_switching(bp, true);
2510        if (rc)
2511                return rc;
2512
2513        rc = pci_enable_sriov(bp->pdev, req_vfs);
2514        if (rc) {
2515                BNX2X_ERR("pci_enable_sriov failed with %d\n", rc);
2516                return rc;
2517        }
2518        DP(BNX2X_MSG_IOV, "sriov enabled (%d vfs)\n", req_vfs);
2519        return req_vfs;
2520}
2521
2522void bnx2x_pf_set_vfs_vlan(struct bnx2x *bp)
2523{
2524        int vfidx;
2525        struct pf_vf_bulletin_content *bulletin;
2526
2527        DP(BNX2X_MSG_IOV, "configuring vlan for VFs from sp-task\n");
2528        for_each_vf(bp, vfidx) {
2529                bulletin = BP_VF_BULLETIN(bp, vfidx);
2530                if (bulletin->valid_bitmap & (1 << VLAN_VALID))
2531                        bnx2x_set_vf_vlan(bp->dev, vfidx, bulletin->vlan, 0,
2532                                          htons(ETH_P_8021Q));
2533        }
2534}
2535
2536void bnx2x_disable_sriov(struct bnx2x *bp)
2537{
2538        if (pci_vfs_assigned(bp->pdev)) {
2539                DP(BNX2X_MSG_IOV,
2540                   "Unloading driver while VFs are assigned - VFs will not be deallocated\n");
2541                return;
2542        }
2543
2544        pci_disable_sriov(bp->pdev);
2545}
2546
2547static int bnx2x_vf_op_prep(struct bnx2x *bp, int vfidx,
2548                            struct bnx2x_virtf **vf,
2549                            struct pf_vf_bulletin_content **bulletin,
2550                            bool test_queue)
2551{
2552        if (bp->state != BNX2X_STATE_OPEN) {
2553                BNX2X_ERR("PF is down - can't utilize iov-related functionality\n");
2554                return -EINVAL;
2555        }
2556
2557        if (!IS_SRIOV(bp)) {
2558                BNX2X_ERR("sriov is disabled - can't utilize iov-related functionality\n");
2559                return -EINVAL;
2560        }
2561
2562        if (vfidx >= BNX2X_NR_VIRTFN(bp)) {
2563                BNX2X_ERR("VF is uninitialized - can't utilize iov-related functionality. vfidx was %d BNX2X_NR_VIRTFN was %d\n",
2564                          vfidx, BNX2X_NR_VIRTFN(bp));
2565                return -EINVAL;
2566        }
2567
2568        /* init members */
2569        *vf = BP_VF(bp, vfidx);
2570        *bulletin = BP_VF_BULLETIN(bp, vfidx);
2571
2572        if (!*vf) {
2573                BNX2X_ERR("Unable to get VF structure for vfidx %d\n", vfidx);
2574                return -EINVAL;
2575        }
2576
2577        if (test_queue && !(*vf)->vfqs) {
2578                BNX2X_ERR("vfqs struct is null. Was this invoked before dynamically enabling SR-IOV? vfidx was %d\n",
2579                          vfidx);
2580                return -EINVAL;
2581        }
2582
2583        if (!*bulletin) {
2584                BNX2X_ERR("Bulletin Board struct is null for vfidx %d\n",
2585                          vfidx);
2586                return -EINVAL;
2587        }
2588
2589        return 0;
2590}
2591
2592int bnx2x_get_vf_config(struct net_device *dev, int vfidx,
2593                        struct ifla_vf_info *ivi)
2594{
2595        struct bnx2x *bp = netdev_priv(dev);
2596        struct bnx2x_virtf *vf = NULL;
2597        struct pf_vf_bulletin_content *bulletin = NULL;
2598        struct bnx2x_vlan_mac_obj *mac_obj;
2599        struct bnx2x_vlan_mac_obj *vlan_obj;
2600        int rc;
2601
2602        /* sanity and init */
2603        rc = bnx2x_vf_op_prep(bp, vfidx, &vf, &bulletin, true);
2604        if (rc)
2605                return rc;
2606
2607        mac_obj = &bnx2x_leading_vfq(vf, mac_obj);
2608        vlan_obj = &bnx2x_leading_vfq(vf, vlan_obj);
2609        if (!mac_obj || !vlan_obj) {
2610                BNX2X_ERR("VF partially initialized\n");
2611                return -EINVAL;
2612        }
2613
2614        ivi->vf = vfidx;
2615        ivi->qos = 0;
2616        ivi->max_tx_rate = 10000; /* always 10G. TBA take from link struct */
2617        ivi->min_tx_rate = 0;
2618        ivi->spoofchk = vf->spoofchk ? 1 : 0;
2619        ivi->linkstate = vf->link_cfg;
2620        if (vf->state == VF_ENABLED) {
2621                /* mac and vlan are in vlan_mac objects */
2622                if (bnx2x_validate_vf_sp_objs(bp, vf, false)) {
2623                        mac_obj->get_n_elements(bp, mac_obj, 1, (u8 *)&ivi->mac,
2624                                                0, ETH_ALEN);
2625                        vlan_obj->get_n_elements(bp, vlan_obj, 1,
2626                                                 (u8 *)&ivi->vlan, 0,
2627                                                 VLAN_HLEN);
2628                }
2629        } else {
2630                mutex_lock(&bp->vfdb->bulletin_mutex);
2631                /* mac */
2632                if (bulletin->valid_bitmap & (1 << MAC_ADDR_VALID))
2633                        /* mac configured by ndo so its in bulletin board */
2634                        memcpy(&ivi->mac, bulletin->mac, ETH_ALEN);
2635                else
2636                        /* function has not been loaded yet. Show mac as 0s */
2637                        eth_zero_addr(ivi->mac);
2638
2639                /* vlan */
2640                if (bulletin->valid_bitmap & (1 << VLAN_VALID))
2641                        /* vlan configured by ndo so its in bulletin board */
2642                        memcpy(&ivi->vlan, &bulletin->vlan, VLAN_HLEN);
2643                else
2644                        /* function has not been loaded yet. Show vlans as 0s */
2645                        memset(&ivi->vlan, 0, VLAN_HLEN);
2646
2647                mutex_unlock(&bp->vfdb->bulletin_mutex);
2648        }
2649
2650        return 0;
2651}
2652
2653/* New mac for VF. Consider these cases:
2654 * 1. VF hasn't been acquired yet - save the mac in local bulletin board and
2655 *    supply at acquire.
2656 * 2. VF has already been acquired but has not yet initialized - store in local
2657 *    bulletin board. mac will be posted on VF bulletin board after VF init. VF
2658 *    will configure this mac when it is ready.
2659 * 3. VF has already initialized but has not yet setup a queue - post the new
2660 *    mac on VF's bulletin board right now. VF will configure this mac when it
2661 *    is ready.
2662 * 4. VF has already set a queue - delete any macs already configured for this
2663 *    queue and manually config the new mac.
2664 * In any event, once this function has been called refuse any attempts by the
2665 * VF to configure any mac for itself except for this mac. In case of a race
2666 * where the VF fails to see the new post on its bulletin board before sending a
2667 * mac configuration request, the PF will simply fail the request and VF can try
2668 * again after consulting its bulletin board.
2669 */
2670int bnx2x_set_vf_mac(struct net_device *dev, int vfidx, u8 *mac)
2671{
2672        struct bnx2x *bp = netdev_priv(dev);
2673        int rc, q_logical_state;
2674        struct bnx2x_virtf *vf = NULL;
2675        struct pf_vf_bulletin_content *bulletin = NULL;
2676
2677        if (!is_valid_ether_addr(mac)) {
2678                BNX2X_ERR("mac address invalid\n");
2679                return -EINVAL;
2680        }
2681
2682        /* sanity and init */
2683        rc = bnx2x_vf_op_prep(bp, vfidx, &vf, &bulletin, true);
2684        if (rc)
2685                return rc;
2686
2687        mutex_lock(&bp->vfdb->bulletin_mutex);
2688
2689        /* update PF's copy of the VF's bulletin. Will no longer accept mac
2690         * configuration requests from vf unless match this mac
2691         */
2692        bulletin->valid_bitmap |= 1 << MAC_ADDR_VALID;
2693        memcpy(bulletin->mac, mac, ETH_ALEN);
2694
2695        /* Post update on VF's bulletin board */
2696        rc = bnx2x_post_vf_bulletin(bp, vfidx);
2697
2698        /* release lock before checking return code */
2699        mutex_unlock(&bp->vfdb->bulletin_mutex);
2700
2701        if (rc) {
2702                BNX2X_ERR("failed to update VF[%d] bulletin\n", vfidx);
2703                return rc;
2704        }
2705
2706        q_logical_state =
2707                bnx2x_get_q_logical_state(bp, &bnx2x_leading_vfq(vf, sp_obj));
2708        if (vf->state == VF_ENABLED &&
2709            q_logical_state == BNX2X_Q_LOGICAL_STATE_ACTIVE) {
2710                /* configure the mac in device on this vf's queue */
2711                unsigned long ramrod_flags = 0;
2712                struct bnx2x_vlan_mac_obj *mac_obj;
2713
2714                /* User should be able to see failure reason in system logs */
2715                if (!bnx2x_validate_vf_sp_objs(bp, vf, true))
2716                        return -EINVAL;
2717
2718                /* must lock vfpf channel to protect against vf flows */
2719                bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_MAC);
2720
2721                /* remove existing eth macs */
2722                mac_obj = &bnx2x_leading_vfq(vf, mac_obj);
2723                rc = bnx2x_del_all_macs(bp, mac_obj, BNX2X_ETH_MAC, true);
2724                if (rc) {
2725                        BNX2X_ERR("failed to delete eth macs\n");
2726                        rc = -EINVAL;
2727                        goto out;
2728                }
2729
2730                /* remove existing uc list macs */
2731                rc = bnx2x_del_all_macs(bp, mac_obj, BNX2X_UC_LIST_MAC, true);
2732                if (rc) {
2733                        BNX2X_ERR("failed to delete uc_list macs\n");
2734                        rc = -EINVAL;
2735                        goto out;
2736                }
2737
2738                /* configure the new mac to device */
2739                __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
2740                bnx2x_set_mac_one(bp, (u8 *)&bulletin->mac, mac_obj, true,
2741                                  BNX2X_ETH_MAC, &ramrod_flags);
2742
2743out:
2744                bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_MAC);
2745        }
2746
2747        return rc;
2748}
2749
2750static void bnx2x_set_vf_vlan_acceptance(struct bnx2x *bp,
2751                                         struct bnx2x_virtf *vf, bool accept)
2752{
2753        struct bnx2x_rx_mode_ramrod_params rx_ramrod;
2754        unsigned long accept_flags;
2755
2756        /* need to remove/add the VF's accept_any_vlan bit */
2757        accept_flags = bnx2x_leading_vfq(vf, accept_flags);
2758        if (accept)
2759                set_bit(BNX2X_ACCEPT_ANY_VLAN, &accept_flags);
2760        else
2761                clear_bit(BNX2X_ACCEPT_ANY_VLAN, &accept_flags);
2762
2763        bnx2x_vf_prep_rx_mode(bp, LEADING_IDX, &rx_ramrod, vf,
2764                              accept_flags);
2765        bnx2x_leading_vfq(vf, accept_flags) = accept_flags;
2766        bnx2x_config_rx_mode(bp, &rx_ramrod);
2767}
2768
2769static int bnx2x_set_vf_vlan_filter(struct bnx2x *bp, struct bnx2x_virtf *vf,
2770                                    u16 vlan, bool add)
2771{
2772        struct bnx2x_vlan_mac_ramrod_params ramrod_param;
2773        unsigned long ramrod_flags = 0;
2774        int rc = 0;
2775
2776        /* configure the new vlan to device */
2777        memset(&ramrod_param, 0, sizeof(ramrod_param));
2778        __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
2779        ramrod_param.vlan_mac_obj = &bnx2x_leading_vfq(vf, vlan_obj);
2780        ramrod_param.ramrod_flags = ramrod_flags;
2781        ramrod_param.user_req.u.vlan.vlan = vlan;
2782        ramrod_param.user_req.cmd = add ? BNX2X_VLAN_MAC_ADD
2783                                        : BNX2X_VLAN_MAC_DEL;
2784        rc = bnx2x_config_vlan_mac(bp, &ramrod_param);
2785        if (rc) {
2786                BNX2X_ERR("failed to configure vlan\n");
2787                return -EINVAL;
2788        }
2789
2790        return 0;
2791}
2792
2793int bnx2x_set_vf_vlan(struct net_device *dev, int vfidx, u16 vlan, u8 qos,
2794                      __be16 vlan_proto)
2795{
2796        struct pf_vf_bulletin_content *bulletin = NULL;
2797        struct bnx2x *bp = netdev_priv(dev);
2798        struct bnx2x_vlan_mac_obj *vlan_obj;
2799        unsigned long vlan_mac_flags = 0;
2800        unsigned long ramrod_flags = 0;
2801        struct bnx2x_virtf *vf = NULL;
2802        int i, rc;
2803
2804        if (vlan > 4095) {
2805                BNX2X_ERR("illegal vlan value %d\n", vlan);
2806                return -EINVAL;
2807        }
2808
2809        if (vlan_proto != htons(ETH_P_8021Q))
2810                return -EPROTONOSUPPORT;
2811
2812        DP(BNX2X_MSG_IOV, "configuring VF %d with VLAN %d qos %d\n",
2813           vfidx, vlan, 0);
2814
2815        /* sanity and init */
2816        rc = bnx2x_vf_op_prep(bp, vfidx, &vf, &bulletin, true);
2817        if (rc)
2818                return rc;
2819
2820        /* update PF's copy of the VF's bulletin. No point in posting the vlan
2821         * to the VF since it doesn't have anything to do with it. But it useful
2822         * to store it here in case the VF is not up yet and we can only
2823         * configure the vlan later when it does. Treat vlan id 0 as remove the
2824         * Host tag.
2825         */
2826        mutex_lock(&bp->vfdb->bulletin_mutex);
2827
2828        if (vlan > 0)
2829                bulletin->valid_bitmap |= 1 << VLAN_VALID;
2830        else
2831                bulletin->valid_bitmap &= ~(1 << VLAN_VALID);
2832        bulletin->vlan = vlan;
2833
2834        /* Post update on VF's bulletin board */
2835        rc = bnx2x_post_vf_bulletin(bp, vfidx);
2836        if (rc)
2837                BNX2X_ERR("failed to update VF[%d] bulletin\n", vfidx);
2838        mutex_unlock(&bp->vfdb->bulletin_mutex);
2839
2840        /* is vf initialized and queue set up? */
2841        if (vf->state != VF_ENABLED ||
2842            bnx2x_get_q_logical_state(bp, &bnx2x_leading_vfq(vf, sp_obj)) !=
2843            BNX2X_Q_LOGICAL_STATE_ACTIVE)
2844                return rc;
2845
2846        /* User should be able to see error in system logs */
2847        if (!bnx2x_validate_vf_sp_objs(bp, vf, true))
2848                return -EINVAL;
2849
2850        /* must lock vfpf channel to protect against vf flows */
2851        bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_VLAN);
2852
2853        /* remove existing vlans */
2854        __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
2855        vlan_obj = &bnx2x_leading_vfq(vf, vlan_obj);
2856        rc = vlan_obj->delete_all(bp, vlan_obj, &vlan_mac_flags,
2857                                  &ramrod_flags);
2858        if (rc) {
2859                BNX2X_ERR("failed to delete vlans\n");
2860                rc = -EINVAL;
2861                goto out;
2862        }
2863
2864        /* clear accept_any_vlan when HV forces vlan, otherwise
2865         * according to VF capabilities
2866         */
2867        if (vlan || !(vf->cfg_flags & VF_CFG_VLAN_FILTER))
2868                bnx2x_set_vf_vlan_acceptance(bp, vf, !vlan);
2869
2870        rc = bnx2x_set_vf_vlan_filter(bp, vf, vlan, true);
2871        if (rc)
2872                goto out;
2873
2874        /* send queue update ramrods to configure default vlan and
2875         * silent vlan removal
2876         */
2877        for_each_vfq(vf, i) {
2878                struct bnx2x_queue_state_params q_params = {NULL};
2879                struct bnx2x_queue_update_params *update_params;
2880
2881                q_params.q_obj = &bnx2x_vfq(vf, i, sp_obj);
2882
2883                /* validate the Q is UP */
2884                if (bnx2x_get_q_logical_state(bp, q_params.q_obj) !=
2885                    BNX2X_Q_LOGICAL_STATE_ACTIVE)
2886                        continue;
2887
2888                __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
2889                q_params.cmd = BNX2X_Q_CMD_UPDATE;
2890                update_params = &q_params.params.update;
2891                __set_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN_CHNG,
2892                          &update_params->update_flags);
2893                __set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM_CHNG,
2894                          &update_params->update_flags);
2895                if (vlan == 0) {
2896                        /* if vlan is 0 then we want to leave the VF traffic
2897                         * untagged, and leave the incoming traffic untouched
2898                         * (i.e. do not remove any vlan tags).
2899                         */
2900                        __clear_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN,
2901                                    &update_params->update_flags);
2902                        __clear_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM,
2903                                    &update_params->update_flags);
2904                } else {
2905                        /* configure default vlan to vf queue and set silent
2906                         * vlan removal (the vf remains unaware of this vlan).
2907                         */
2908                        __set_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN,
2909                                  &update_params->update_flags);
2910                        __set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM,
2911                                  &update_params->update_flags);
2912                        update_params->def_vlan = vlan;
2913                        update_params->silent_removal_value =
2914                                vlan & VLAN_VID_MASK;
2915                        update_params->silent_removal_mask = VLAN_VID_MASK;
2916                }
2917
2918                /* Update the Queue state */
2919                rc = bnx2x_queue_state_change(bp, &q_params);
2920                if (rc) {
2921                        BNX2X_ERR("Failed to configure default VLAN queue %d\n",
2922                                  i);
2923                        goto out;
2924                }
2925        }
2926out:
2927        bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_VLAN);
2928
2929        if (rc)
2930                DP(BNX2X_MSG_IOV,
2931                   "updated VF[%d] vlan configuration (vlan = %d)\n",
2932                   vfidx, vlan);
2933
2934        return rc;
2935}
2936
2937int bnx2x_set_vf_spoofchk(struct net_device *dev, int idx, bool val)
2938{
2939        struct bnx2x *bp = netdev_priv(dev);
2940        struct bnx2x_virtf *vf;
2941        int i, rc = 0;
2942
2943        vf = BP_VF(bp, idx);
2944        if (!vf)
2945                return -EINVAL;
2946
2947        /* nothing to do */
2948        if (vf->spoofchk == val)
2949                return 0;
2950
2951        vf->spoofchk = val ? 1 : 0;
2952
2953        DP(BNX2X_MSG_IOV, "%s spoofchk for VF %d\n",
2954           val ? "enabling" : "disabling", idx);
2955
2956        /* is vf initialized and queue set up? */
2957        if (vf->state != VF_ENABLED ||
2958            bnx2x_get_q_logical_state(bp, &bnx2x_leading_vfq(vf, sp_obj)) !=
2959            BNX2X_Q_LOGICAL_STATE_ACTIVE)
2960                return rc;
2961
2962        /* User should be able to see error in system logs */
2963        if (!bnx2x_validate_vf_sp_objs(bp, vf, true))
2964                return -EINVAL;
2965
2966        /* send queue update ramrods to configure spoofchk */
2967        for_each_vfq(vf, i) {
2968                struct bnx2x_queue_state_params q_params = {NULL};
2969                struct bnx2x_queue_update_params *update_params;
2970
2971                q_params.q_obj = &bnx2x_vfq(vf, i, sp_obj);
2972
2973                /* validate the Q is UP */
2974                if (bnx2x_get_q_logical_state(bp, q_params.q_obj) !=
2975                    BNX2X_Q_LOGICAL_STATE_ACTIVE)
2976                        continue;
2977
2978                __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
2979                q_params.cmd = BNX2X_Q_CMD_UPDATE;
2980                update_params = &q_params.params.update;
2981                __set_bit(BNX2X_Q_UPDATE_ANTI_SPOOF_CHNG,
2982                          &update_params->update_flags);
2983                if (val) {
2984                        __set_bit(BNX2X_Q_UPDATE_ANTI_SPOOF,
2985                                  &update_params->update_flags);
2986                } else {
2987                        __clear_bit(BNX2X_Q_UPDATE_ANTI_SPOOF,
2988                                    &update_params->update_flags);
2989                }
2990
2991                /* Update the Queue state */
2992                rc = bnx2x_queue_state_change(bp, &q_params);
2993                if (rc) {
2994                        BNX2X_ERR("Failed to %s spoofchk on VF %d - vfq %d\n",
2995                                  val ? "enable" : "disable", idx, i);
2996                        goto out;
2997                }
2998        }
2999out:
3000        if (!rc)
3001                DP(BNX2X_MSG_IOV,
3002                   "%s spoofchk for VF[%d]\n", val ? "Enabled" : "Disabled",
3003                   idx);
3004
3005        return rc;
3006}
3007
3008/* crc is the first field in the bulletin board. Compute the crc over the
3009 * entire bulletin board excluding the crc field itself. Use the length field
3010 * as the Bulletin Board was posted by a PF with possibly a different version
3011 * from the vf which will sample it. Therefore, the length is computed by the
3012 * PF and then used blindly by the VF.
3013 */
3014u32 bnx2x_crc_vf_bulletin(struct pf_vf_bulletin_content *bulletin)
3015{
3016        return crc32(BULLETIN_CRC_SEED,
3017                 ((u8 *)bulletin) + sizeof(bulletin->crc),
3018                 bulletin->length - sizeof(bulletin->crc));
3019}
3020
3021/* Check for new posts on the bulletin board */
3022enum sample_bulletin_result bnx2x_sample_bulletin(struct bnx2x *bp)
3023{
3024        struct pf_vf_bulletin_content *bulletin;
3025        int attempts;
3026
3027        /* sampling structure in mid post may result with corrupted data
3028         * validate crc to ensure coherency.
3029         */
3030        for (attempts = 0; attempts < BULLETIN_ATTEMPTS; attempts++) {
3031                u32 crc;
3032
3033                /* sample the bulletin board */
3034                memcpy(&bp->shadow_bulletin, bp->pf2vf_bulletin,
3035                       sizeof(union pf_vf_bulletin));
3036
3037                crc = bnx2x_crc_vf_bulletin(&bp->shadow_bulletin.content);
3038
3039                if (bp->shadow_bulletin.content.crc == crc)
3040                        break;
3041
3042                BNX2X_ERR("bad crc on bulletin board. Contained %x computed %x\n",
3043                          bp->shadow_bulletin.content.crc, crc);
3044        }
3045
3046        if (attempts >= BULLETIN_ATTEMPTS) {
3047                BNX2X_ERR("pf to vf bulletin board crc was wrong %d consecutive times. Aborting\n",
3048                          attempts);
3049                return PFVF_BULLETIN_CRC_ERR;
3050        }
3051        bulletin = &bp->shadow_bulletin.content;
3052
3053        /* bulletin board hasn't changed since last sample */
3054        if (bp->old_bulletin.version == bulletin->version)
3055                return PFVF_BULLETIN_UNCHANGED;
3056
3057        /* the mac address in bulletin board is valid and is new */
3058        if (bulletin->valid_bitmap & 1 << MAC_ADDR_VALID &&
3059            !ether_addr_equal(bulletin->mac, bp->old_bulletin.mac)) {
3060                /* update new mac to net device */
3061                memcpy(bp->dev->dev_addr, bulletin->mac, ETH_ALEN);
3062        }
3063
3064        if (bulletin->valid_bitmap & (1 << LINK_VALID)) {
3065                DP(BNX2X_MSG_IOV, "link update speed %d flags %x\n",
3066                   bulletin->link_speed, bulletin->link_flags);
3067
3068                bp->vf_link_vars.line_speed = bulletin->link_speed;
3069                bp->vf_link_vars.link_report_flags = 0;
3070                /* Link is down */
3071                if (bulletin->link_flags & VFPF_LINK_REPORT_LINK_DOWN)
3072                        __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
3073                                  &bp->vf_link_vars.link_report_flags);
3074                /* Full DUPLEX */
3075                if (bulletin->link_flags & VFPF_LINK_REPORT_FULL_DUPLEX)
3076                        __set_bit(BNX2X_LINK_REPORT_FD,
3077                                  &bp->vf_link_vars.link_report_flags);
3078                /* Rx Flow Control is ON */
3079                if (bulletin->link_flags & VFPF_LINK_REPORT_RX_FC_ON)
3080                        __set_bit(BNX2X_LINK_REPORT_RX_FC_ON,
3081                                  &bp->vf_link_vars.link_report_flags);
3082                /* Tx Flow Control is ON */
3083                if (bulletin->link_flags & VFPF_LINK_REPORT_TX_FC_ON)
3084                        __set_bit(BNX2X_LINK_REPORT_TX_FC_ON,
3085                                  &bp->vf_link_vars.link_report_flags);
3086                __bnx2x_link_report(bp);
3087        }
3088
3089        /* copy new bulletin board to bp */
3090        memcpy(&bp->old_bulletin, bulletin,
3091               sizeof(struct pf_vf_bulletin_content));
3092
3093        return PFVF_BULLETIN_UPDATED;
3094}
3095
3096void bnx2x_timer_sriov(struct bnx2x *bp)
3097{
3098        bnx2x_sample_bulletin(bp);
3099
3100        /* if channel is down we need to self destruct */
3101        if (bp->old_bulletin.valid_bitmap & 1 << CHANNEL_DOWN)
3102                bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_VFPF_CHANNEL_DOWN,
3103                                       BNX2X_MSG_IOV);
3104}
3105
3106void __iomem *bnx2x_vf_doorbells(struct bnx2x *bp)
3107{
3108        /* vf doorbells are embedded within the regview */
3109        return bp->regview + PXP_VF_ADDR_DB_START;
3110}
3111
3112void bnx2x_vf_pci_dealloc(struct bnx2x *bp)
3113{
3114        BNX2X_PCI_FREE(bp->vf2pf_mbox, bp->vf2pf_mbox_mapping,
3115                       sizeof(struct bnx2x_vf_mbx_msg));
3116        BNX2X_PCI_FREE(bp->pf2vf_bulletin, bp->pf2vf_bulletin_mapping,
3117                       sizeof(union pf_vf_bulletin));
3118}
3119
3120int bnx2x_vf_pci_alloc(struct bnx2x *bp)
3121{
3122        mutex_init(&bp->vf2pf_mutex);
3123
3124        /* allocate vf2pf mailbox for vf to pf channel */
3125        bp->vf2pf_mbox = BNX2X_PCI_ALLOC(&bp->vf2pf_mbox_mapping,
3126                                         sizeof(struct bnx2x_vf_mbx_msg));
3127        if (!bp->vf2pf_mbox)
3128                goto alloc_mem_err;
3129
3130        /* allocate pf 2 vf bulletin board */
3131        bp->pf2vf_bulletin = BNX2X_PCI_ALLOC(&bp->pf2vf_bulletin_mapping,
3132                                             sizeof(union pf_vf_bulletin));
3133        if (!bp->pf2vf_bulletin)
3134                goto alloc_mem_err;
3135
3136        bnx2x_vf_bulletin_finalize(&bp->pf2vf_bulletin->content, true);
3137
3138        return 0;
3139
3140alloc_mem_err:
3141        bnx2x_vf_pci_dealloc(bp);
3142        return -ENOMEM;
3143}
3144
3145void bnx2x_iov_channel_down(struct bnx2x *bp)
3146{
3147        int vf_idx;
3148        struct pf_vf_bulletin_content *bulletin;
3149
3150        if (!IS_SRIOV(bp))
3151                return;
3152
3153        for_each_vf(bp, vf_idx) {
3154                /* locate this VFs bulletin board and update the channel down
3155                 * bit
3156                 */
3157                bulletin = BP_VF_BULLETIN(bp, vf_idx);
3158                bulletin->valid_bitmap |= 1 << CHANNEL_DOWN;
3159
3160                /* update vf bulletin board */
3161                bnx2x_post_vf_bulletin(bp, vf_idx);
3162        }
3163}
3164
3165void bnx2x_iov_task(struct work_struct *work)
3166{
3167        struct bnx2x *bp = container_of(work, struct bnx2x, iov_task.work);
3168
3169        if (!netif_running(bp->dev))
3170                return;
3171
3172        if (test_and_clear_bit(BNX2X_IOV_HANDLE_FLR,
3173                               &bp->iov_task_state))
3174                bnx2x_vf_handle_flr_event(bp);
3175
3176        if (test_and_clear_bit(BNX2X_IOV_HANDLE_VF_MSG,
3177                               &bp->iov_task_state))
3178                bnx2x_vf_mbx(bp);
3179}
3180
3181void bnx2x_schedule_iov_task(struct bnx2x *bp, enum bnx2x_iov_flag flag)
3182{
3183        smp_mb__before_atomic();
3184        set_bit(flag, &bp->iov_task_state);
3185        smp_mb__after_atomic();
3186        DP(BNX2X_MSG_IOV, "Scheduling iov task [Flag: %d]\n", flag);
3187        queue_delayed_work(bnx2x_iov_wq, &bp->iov_task, 0);
3188}
3189