linux/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
<<
>>
Prefs
   1/* bnx2x_sriov.c: Broadcom Everest network driver.
   2 *
   3 * Copyright 2009-2013 Broadcom Corporation
   4 *
   5 * Unless you and Broadcom execute a separate written software license
   6 * agreement governing use of this software, this software is licensed to you
   7 * under the terms of the GNU General Public License version 2, available
   8 * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL").
   9 *
  10 * Notwithstanding the above, under no circumstances may you combine this
  11 * software in any way with any other Broadcom software provided under a
  12 * license other than the GPL, without Broadcom's express prior written
  13 * consent.
  14 *
  15 * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
  16 * Written by: Shmulik Ravid
  17 *             Ariel Elior <ariel.elior@qlogic.com>
  18 *
  19 */
  20#include "bnx2x.h"
  21#include "bnx2x_init.h"
  22#include "bnx2x_cmn.h"
  23#include "bnx2x_sp.h"
  24#include <linux/crc32.h>
  25#include <linux/if_vlan.h>
  26
  27static int bnx2x_vf_op_prep(struct bnx2x *bp, int vfidx,
  28                            struct bnx2x_virtf **vf,
  29                            struct pf_vf_bulletin_content **bulletin,
  30                            bool test_queue);
  31
  32/* General service functions */
  33static void storm_memset_vf_to_pf(struct bnx2x *bp, u16 abs_fid,
  34                                         u16 pf_id)
  35{
  36        REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_VF_TO_PF_OFFSET(abs_fid),
  37                pf_id);
  38        REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_VF_TO_PF_OFFSET(abs_fid),
  39                pf_id);
  40        REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_VF_TO_PF_OFFSET(abs_fid),
  41                pf_id);
  42        REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_VF_TO_PF_OFFSET(abs_fid),
  43                pf_id);
  44}
  45
  46static void storm_memset_func_en(struct bnx2x *bp, u16 abs_fid,
  47                                        u8 enable)
  48{
  49        REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(abs_fid),
  50                enable);
  51        REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(abs_fid),
  52                enable);
  53        REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(abs_fid),
  54                enable);
  55        REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(abs_fid),
  56                enable);
  57}
  58
  59int bnx2x_vf_idx_by_abs_fid(struct bnx2x *bp, u16 abs_vfid)
  60{
  61        int idx;
  62
  63        for_each_vf(bp, idx)
  64                if (bnx2x_vf(bp, idx, abs_vfid) == abs_vfid)
  65                        break;
  66        return idx;
  67}
  68
  69static
  70struct bnx2x_virtf *bnx2x_vf_by_abs_fid(struct bnx2x *bp, u16 abs_vfid)
  71{
  72        u16 idx =  (u16)bnx2x_vf_idx_by_abs_fid(bp, abs_vfid);
  73        return (idx < BNX2X_NR_VIRTFN(bp)) ? BP_VF(bp, idx) : NULL;
  74}
  75
  76static void bnx2x_vf_igu_ack_sb(struct bnx2x *bp, struct bnx2x_virtf *vf,
  77                                u8 igu_sb_id, u8 segment, u16 index, u8 op,
  78                                u8 update)
  79{
  80        /* acking a VF sb through the PF - use the GRC */
  81        u32 ctl;
  82        u32 igu_addr_data = IGU_REG_COMMAND_REG_32LSB_DATA;
  83        u32 igu_addr_ctl = IGU_REG_COMMAND_REG_CTRL;
  84        u32 func_encode = vf->abs_vfid;
  85        u32 addr_encode = IGU_CMD_E2_PROD_UPD_BASE + igu_sb_id;
  86        struct igu_regular cmd_data = {0};
  87
  88        cmd_data.sb_id_and_flags =
  89                        ((index << IGU_REGULAR_SB_INDEX_SHIFT) |
  90                         (segment << IGU_REGULAR_SEGMENT_ACCESS_SHIFT) |
  91                         (update << IGU_REGULAR_BUPDATE_SHIFT) |
  92                         (op << IGU_REGULAR_ENABLE_INT_SHIFT));
  93
  94        ctl = addr_encode << IGU_CTRL_REG_ADDRESS_SHIFT         |
  95              func_encode << IGU_CTRL_REG_FID_SHIFT             |
  96              IGU_CTRL_CMD_TYPE_WR << IGU_CTRL_REG_TYPE_SHIFT;
  97
  98        DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n",
  99           cmd_data.sb_id_and_flags, igu_addr_data);
 100        REG_WR(bp, igu_addr_data, cmd_data.sb_id_and_flags);
 101        mmiowb();
 102        barrier();
 103
 104        DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n",
 105           ctl, igu_addr_ctl);
 106        REG_WR(bp, igu_addr_ctl, ctl);
 107        mmiowb();
 108        barrier();
 109}
 110
 111static bool bnx2x_validate_vf_sp_objs(struct bnx2x *bp,
 112                                       struct bnx2x_virtf *vf,
 113                                       bool print_err)
 114{
 115        if (!bnx2x_leading_vfq(vf, sp_initialized)) {
 116                if (print_err)
 117                        BNX2X_ERR("Slowpath objects not yet initialized!\n");
 118                else
 119                        DP(BNX2X_MSG_IOV, "Slowpath objects not yet initialized!\n");
 120                return false;
 121        }
 122        return true;
 123}
 124
 125/* VFOP operations states */
 126void bnx2x_vfop_qctor_dump_tx(struct bnx2x *bp, struct bnx2x_virtf *vf,
 127                              struct bnx2x_queue_init_params *init_params,
 128                              struct bnx2x_queue_setup_params *setup_params,
 129                              u16 q_idx, u16 sb_idx)
 130{
 131        DP(BNX2X_MSG_IOV,
 132           "VF[%d] Q_SETUP: txq[%d]-- vfsb=%d, sb-index=%d, hc-rate=%d, flags=0x%lx, traffic-type=%d",
 133           vf->abs_vfid,
 134           q_idx,
 135           sb_idx,
 136           init_params->tx.sb_cq_index,
 137           init_params->tx.hc_rate,
 138           setup_params->flags,
 139           setup_params->txq_params.traffic_type);
 140}
 141
 142void bnx2x_vfop_qctor_dump_rx(struct bnx2x *bp, struct bnx2x_virtf *vf,
 143                            struct bnx2x_queue_init_params *init_params,
 144                            struct bnx2x_queue_setup_params *setup_params,
 145                            u16 q_idx, u16 sb_idx)
 146{
 147        struct bnx2x_rxq_setup_params *rxq_params = &setup_params->rxq_params;
 148
 149        DP(BNX2X_MSG_IOV, "VF[%d] Q_SETUP: rxq[%d]-- vfsb=%d, sb-index=%d, hc-rate=%d, mtu=%d, buf-size=%d\n"
 150           "sge-size=%d, max_sge_pkt=%d, tpa-agg-size=%d, flags=0x%lx, drop-flags=0x%x, cache-log=%d\n",
 151           vf->abs_vfid,
 152           q_idx,
 153           sb_idx,
 154           init_params->rx.sb_cq_index,
 155           init_params->rx.hc_rate,
 156           setup_params->gen_params.mtu,
 157           rxq_params->buf_sz,
 158           rxq_params->sge_buf_sz,
 159           rxq_params->max_sges_pkt,
 160           rxq_params->tpa_agg_sz,
 161           setup_params->flags,
 162           rxq_params->drop_flags,
 163           rxq_params->cache_line_log);
 164}
 165
 166void bnx2x_vfop_qctor_prep(struct bnx2x *bp,
 167                           struct bnx2x_virtf *vf,
 168                           struct bnx2x_vf_queue *q,
 169                           struct bnx2x_vf_queue_construct_params *p,
 170                           unsigned long q_type)
 171{
 172        struct bnx2x_queue_init_params *init_p = &p->qstate.params.init;
 173        struct bnx2x_queue_setup_params *setup_p = &p->prep_qsetup;
 174
 175        /* INIT */
 176
 177        /* Enable host coalescing in the transition to INIT state */
 178        if (test_bit(BNX2X_Q_FLG_HC, &init_p->rx.flags))
 179                __set_bit(BNX2X_Q_FLG_HC_EN, &init_p->rx.flags);
 180
 181        if (test_bit(BNX2X_Q_FLG_HC, &init_p->tx.flags))
 182                __set_bit(BNX2X_Q_FLG_HC_EN, &init_p->tx.flags);
 183
 184        /* FW SB ID */
 185        init_p->rx.fw_sb_id = vf_igu_sb(vf, q->sb_idx);
 186        init_p->tx.fw_sb_id = vf_igu_sb(vf, q->sb_idx);
 187
 188        /* context */
 189        init_p->cxts[0] = q->cxt;
 190
 191        /* SETUP */
 192
 193        /* Setup-op general parameters */
 194        setup_p->gen_params.spcl_id = vf->sp_cl_id;
 195        setup_p->gen_params.stat_id = vfq_stat_id(vf, q);
 196        setup_p->gen_params.fp_hsi = vf->fp_hsi;
 197
 198        /* Setup-op pause params:
 199         * Nothing to do, the pause thresholds are set by default to 0 which
 200         * effectively turns off the feature for this queue. We don't want
 201         * one queue (VF) to interfering with another queue (another VF)
 202         */
 203        if (vf->cfg_flags & VF_CFG_FW_FC)
 204                BNX2X_ERR("No support for pause to VFs (abs_vfid: %d)\n",
 205                          vf->abs_vfid);
 206        /* Setup-op flags:
 207         * collect statistics, zero statistics, local-switching, security,
 208         * OV for Flex10, RSS and MCAST for leading
 209         */
 210        if (test_bit(BNX2X_Q_FLG_STATS, &setup_p->flags))
 211                __set_bit(BNX2X_Q_FLG_ZERO_STATS, &setup_p->flags);
 212
 213        /* for VFs, enable tx switching, bd coherency, and mac address
 214         * anti-spoofing
 215         */
 216        __set_bit(BNX2X_Q_FLG_TX_SWITCH, &setup_p->flags);
 217        __set_bit(BNX2X_Q_FLG_TX_SEC, &setup_p->flags);
 218        __set_bit(BNX2X_Q_FLG_ANTI_SPOOF, &setup_p->flags);
 219
 220        /* Setup-op rx parameters */
 221        if (test_bit(BNX2X_Q_TYPE_HAS_RX, &q_type)) {
 222                struct bnx2x_rxq_setup_params *rxq_p = &setup_p->rxq_params;
 223
 224                rxq_p->cl_qzone_id = vfq_qzone_id(vf, q);
 225                rxq_p->fw_sb_id = vf_igu_sb(vf, q->sb_idx);
 226                rxq_p->rss_engine_id = FW_VF_HANDLE(vf->abs_vfid);
 227
 228                if (test_bit(BNX2X_Q_FLG_TPA, &setup_p->flags))
 229                        rxq_p->max_tpa_queues = BNX2X_VF_MAX_TPA_AGG_QUEUES;
 230        }
 231
 232        /* Setup-op tx parameters */
 233        if (test_bit(BNX2X_Q_TYPE_HAS_TX, &q_type)) {
 234                setup_p->txq_params.tss_leading_cl_id = vf->leading_rss;
 235                setup_p->txq_params.fw_sb_id = vf_igu_sb(vf, q->sb_idx);
 236        }
 237}
 238
 239static int bnx2x_vf_queue_create(struct bnx2x *bp,
 240                                 struct bnx2x_virtf *vf, int qid,
 241                                 struct bnx2x_vf_queue_construct_params *qctor)
 242{
 243        struct bnx2x_queue_state_params *q_params;
 244        int rc = 0;
 245
 246        DP(BNX2X_MSG_IOV, "vf[%d:%d]\n", vf->abs_vfid, qid);
 247
 248        /* Prepare ramrod information */
 249        q_params = &qctor->qstate;
 250        q_params->q_obj = &bnx2x_vfq(vf, qid, sp_obj);
 251        set_bit(RAMROD_COMP_WAIT, &q_params->ramrod_flags);
 252
 253        if (bnx2x_get_q_logical_state(bp, q_params->q_obj) ==
 254            BNX2X_Q_LOGICAL_STATE_ACTIVE) {
 255                DP(BNX2X_MSG_IOV, "queue was already up. Aborting gracefully\n");
 256                goto out;
 257        }
 258
 259        /* Run Queue 'construction' ramrods */
 260        q_params->cmd = BNX2X_Q_CMD_INIT;
 261        rc = bnx2x_queue_state_change(bp, q_params);
 262        if (rc)
 263                goto out;
 264
 265        memcpy(&q_params->params.setup, &qctor->prep_qsetup,
 266               sizeof(struct bnx2x_queue_setup_params));
 267        q_params->cmd = BNX2X_Q_CMD_SETUP;
 268        rc = bnx2x_queue_state_change(bp, q_params);
 269        if (rc)
 270                goto out;
 271
 272        /* enable interrupts */
 273        bnx2x_vf_igu_ack_sb(bp, vf, vf_igu_sb(vf, bnx2x_vfq(vf, qid, sb_idx)),
 274                            USTORM_ID, 0, IGU_INT_ENABLE, 0);
 275out:
 276        return rc;
 277}
 278
 279static int bnx2x_vf_queue_destroy(struct bnx2x *bp, struct bnx2x_virtf *vf,
 280                                  int qid)
 281{
 282        enum bnx2x_queue_cmd cmds[] = {BNX2X_Q_CMD_HALT,
 283                                       BNX2X_Q_CMD_TERMINATE,
 284                                       BNX2X_Q_CMD_CFC_DEL};
 285        struct bnx2x_queue_state_params q_params;
 286        int rc, i;
 287
 288        DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid);
 289
 290        /* Prepare ramrod information */
 291        memset(&q_params, 0, sizeof(struct bnx2x_queue_state_params));
 292        q_params.q_obj = &bnx2x_vfq(vf, qid, sp_obj);
 293        set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
 294
 295        if (bnx2x_get_q_logical_state(bp, q_params.q_obj) ==
 296            BNX2X_Q_LOGICAL_STATE_STOPPED) {
 297                DP(BNX2X_MSG_IOV, "queue was already stopped. Aborting gracefully\n");
 298                goto out;
 299        }
 300
 301        /* Run Queue 'destruction' ramrods */
 302        for (i = 0; i < ARRAY_SIZE(cmds); i++) {
 303                q_params.cmd = cmds[i];
 304                rc = bnx2x_queue_state_change(bp, &q_params);
 305                if (rc) {
 306                        BNX2X_ERR("Failed to run Queue command %d\n", cmds[i]);
 307                        return rc;
 308                }
 309        }
 310out:
 311        /* Clean Context */
 312        if (bnx2x_vfq(vf, qid, cxt)) {
 313                bnx2x_vfq(vf, qid, cxt)->ustorm_ag_context.cdu_usage = 0;
 314                bnx2x_vfq(vf, qid, cxt)->xstorm_ag_context.cdu_reserved = 0;
 315        }
 316
 317        return 0;
 318}
 319
 320static void
 321bnx2x_vf_set_igu_info(struct bnx2x *bp, u8 igu_sb_id, u8 abs_vfid)
 322{
 323        struct bnx2x_virtf *vf = bnx2x_vf_by_abs_fid(bp, abs_vfid);
 324        if (vf) {
 325                /* the first igu entry belonging to VFs of this PF */
 326                if (!BP_VFDB(bp)->first_vf_igu_entry)
 327                        BP_VFDB(bp)->first_vf_igu_entry = igu_sb_id;
 328
 329                /* the first igu entry belonging to this VF */
 330                if (!vf_sb_count(vf))
 331                        vf->igu_base_id = igu_sb_id;
 332
 333                ++vf_sb_count(vf);
 334                ++vf->sb_count;
 335        }
 336        BP_VFDB(bp)->vf_sbs_pool++;
 337}
 338
 339static inline void bnx2x_vf_vlan_credit(struct bnx2x *bp,
 340                                        struct bnx2x_vlan_mac_obj *obj,
 341                                        atomic_t *counter)
 342{
 343        struct list_head *pos;
 344        int read_lock;
 345        int cnt = 0;
 346
 347        read_lock = bnx2x_vlan_mac_h_read_lock(bp, obj);
 348        if (read_lock)
 349                DP(BNX2X_MSG_SP, "Failed to take vlan mac read head; continuing anyway\n");
 350
 351        list_for_each(pos, &obj->head)
 352                cnt++;
 353
 354        if (!read_lock)
 355                bnx2x_vlan_mac_h_read_unlock(bp, obj);
 356
 357        atomic_set(counter, cnt);
 358}
 359
 360static int bnx2x_vf_vlan_mac_clear(struct bnx2x *bp, struct bnx2x_virtf *vf,
 361                                   int qid, bool drv_only, bool mac)
 362{
 363        struct bnx2x_vlan_mac_ramrod_params ramrod;
 364        int rc;
 365
 366        DP(BNX2X_MSG_IOV, "vf[%d] - deleting all %s\n", vf->abs_vfid,
 367           mac ? "MACs" : "VLANs");
 368
 369        /* Prepare ramrod params */
 370        memset(&ramrod, 0, sizeof(struct bnx2x_vlan_mac_ramrod_params));
 371        if (mac) {
 372                set_bit(BNX2X_ETH_MAC, &ramrod.user_req.vlan_mac_flags);
 373                ramrod.vlan_mac_obj = &bnx2x_vfq(vf, qid, mac_obj);
 374        } else {
 375                set_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
 376                        &ramrod.user_req.vlan_mac_flags);
 377                ramrod.vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj);
 378        }
 379        ramrod.user_req.cmd = BNX2X_VLAN_MAC_DEL;
 380
 381        set_bit(RAMROD_EXEC, &ramrod.ramrod_flags);
 382        if (drv_only)
 383                set_bit(RAMROD_DRV_CLR_ONLY, &ramrod.ramrod_flags);
 384        else
 385                set_bit(RAMROD_COMP_WAIT, &ramrod.ramrod_flags);
 386
 387        /* Start deleting */
 388        rc = ramrod.vlan_mac_obj->delete_all(bp,
 389                                             ramrod.vlan_mac_obj,
 390                                             &ramrod.user_req.vlan_mac_flags,
 391                                             &ramrod.ramrod_flags);
 392        if (rc) {
 393                BNX2X_ERR("Failed to delete all %s\n",
 394                          mac ? "MACs" : "VLANs");
 395                return rc;
 396        }
 397
 398        /* Clear the vlan counters */
 399        if (!mac)
 400                atomic_set(&bnx2x_vfq(vf, qid, vlan_count), 0);
 401
 402        return 0;
 403}
 404
 405static int bnx2x_vf_mac_vlan_config(struct bnx2x *bp,
 406                                    struct bnx2x_virtf *vf, int qid,
 407                                    struct bnx2x_vf_mac_vlan_filter *filter,
 408                                    bool drv_only)
 409{
 410        struct bnx2x_vlan_mac_ramrod_params ramrod;
 411        int rc;
 412
 413        DP(BNX2X_MSG_IOV, "vf[%d] - %s a %s filter\n",
 414           vf->abs_vfid, filter->add ? "Adding" : "Deleting",
 415           filter->type == BNX2X_VF_FILTER_MAC ? "MAC" : "VLAN");
 416
 417        /* Prepare ramrod params */
 418        memset(&ramrod, 0, sizeof(struct bnx2x_vlan_mac_ramrod_params));
 419        if (filter->type == BNX2X_VF_FILTER_VLAN) {
 420                set_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
 421                        &ramrod.user_req.vlan_mac_flags);
 422                ramrod.vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj);
 423                ramrod.user_req.u.vlan.vlan = filter->vid;
 424        } else {
 425                set_bit(BNX2X_ETH_MAC, &ramrod.user_req.vlan_mac_flags);
 426                ramrod.vlan_mac_obj = &bnx2x_vfq(vf, qid, mac_obj);
 427                memcpy(&ramrod.user_req.u.mac.mac, filter->mac, ETH_ALEN);
 428        }
 429        ramrod.user_req.cmd = filter->add ? BNX2X_VLAN_MAC_ADD :
 430                                            BNX2X_VLAN_MAC_DEL;
 431
 432        /* Verify there are available vlan credits */
 433        if (filter->add && filter->type == BNX2X_VF_FILTER_VLAN &&
 434            (atomic_read(&bnx2x_vfq(vf, qid, vlan_count)) >=
 435             vf_vlan_rules_cnt(vf))) {
 436                BNX2X_ERR("No credits for vlan [%d >= %d]\n",
 437                          atomic_read(&bnx2x_vfq(vf, qid, vlan_count)),
 438                          vf_vlan_rules_cnt(vf));
 439                return -ENOMEM;
 440        }
 441
 442        set_bit(RAMROD_EXEC, &ramrod.ramrod_flags);
 443        if (drv_only)
 444                set_bit(RAMROD_DRV_CLR_ONLY, &ramrod.ramrod_flags);
 445        else
 446                set_bit(RAMROD_COMP_WAIT, &ramrod.ramrod_flags);
 447
 448        /* Add/Remove the filter */
 449        rc = bnx2x_config_vlan_mac(bp, &ramrod);
 450        if (rc && rc != -EEXIST) {
 451                BNX2X_ERR("Failed to %s %s\n",
 452                          filter->add ? "add" : "delete",
 453                          filter->type == BNX2X_VF_FILTER_MAC ? "MAC" :
 454                                                                "VLAN");
 455                return rc;
 456        }
 457
 458        /* Update the vlan counters */
 459        if (filter->type == BNX2X_VF_FILTER_VLAN)
 460                bnx2x_vf_vlan_credit(bp, ramrod.vlan_mac_obj,
 461                                     &bnx2x_vfq(vf, qid, vlan_count));
 462
 463        return 0;
 464}
 465
 466int bnx2x_vf_mac_vlan_config_list(struct bnx2x *bp, struct bnx2x_virtf *vf,
 467                                  struct bnx2x_vf_mac_vlan_filters *filters,
 468                                  int qid, bool drv_only)
 469{
 470        int rc = 0, i;
 471
 472        DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid);
 473
 474        if (!bnx2x_validate_vf_sp_objs(bp, vf, true))
 475                return -EINVAL;
 476
 477        /* Prepare ramrod params */
 478        for (i = 0; i < filters->count; i++) {
 479                rc = bnx2x_vf_mac_vlan_config(bp, vf, qid,
 480                                              &filters->filters[i], drv_only);
 481                if (rc)
 482                        break;
 483        }
 484
 485        /* Rollback if needed */
 486        if (i != filters->count) {
 487                BNX2X_ERR("Managed only %d/%d filters - rolling back\n",
 488                          i, filters->count + 1);
 489                while (--i >= 0) {
 490                        filters->filters[i].add = !filters->filters[i].add;
 491                        bnx2x_vf_mac_vlan_config(bp, vf, qid,
 492                                                 &filters->filters[i],
 493                                                 drv_only);
 494                }
 495        }
 496
 497        /* It's our responsibility to free the filters */
 498        kfree(filters);
 499
 500        return rc;
 501}
 502
 503int bnx2x_vf_queue_setup(struct bnx2x *bp, struct bnx2x_virtf *vf, int qid,
 504                         struct bnx2x_vf_queue_construct_params *qctor)
 505{
 506        int rc;
 507
 508        DP(BNX2X_MSG_IOV, "vf[%d:%d]\n", vf->abs_vfid, qid);
 509
 510        rc = bnx2x_vf_queue_create(bp, vf, qid, qctor);
 511        if (rc)
 512                goto op_err;
 513
 514        /* Configure vlan0 for leading queue */
 515        if (!qid) {
 516                struct bnx2x_vf_mac_vlan_filter filter;
 517
 518                memset(&filter, 0, sizeof(struct bnx2x_vf_mac_vlan_filter));
 519                filter.type = BNX2X_VF_FILTER_VLAN;
 520                filter.add = true;
 521                filter.vid = 0;
 522                rc = bnx2x_vf_mac_vlan_config(bp, vf, qid, &filter, false);
 523                if (rc)
 524                        goto op_err;
 525        }
 526
 527        /* Schedule the configuration of any pending vlan filters */
 528        vf->cfg_flags |= VF_CFG_VLAN;
 529        bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_HYPERVISOR_VLAN,
 530                               BNX2X_MSG_IOV);
 531        return 0;
 532op_err:
 533        BNX2X_ERR("QSETUP[%d:%d] error: rc %d\n", vf->abs_vfid, qid, rc);
 534        return rc;
 535}
 536
 537static int bnx2x_vf_queue_flr(struct bnx2x *bp, struct bnx2x_virtf *vf,
 538                               int qid)
 539{
 540        int rc;
 541
 542        DP(BNX2X_MSG_IOV, "vf[%d:%d]\n", vf->abs_vfid, qid);
 543
 544        /* If needed, clean the filtering data base */
 545        if ((qid == LEADING_IDX) &&
 546            bnx2x_validate_vf_sp_objs(bp, vf, false)) {
 547                rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid, true, false);
 548                if (rc)
 549                        goto op_err;
 550                rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid, true, true);
 551                if (rc)
 552                        goto op_err;
 553        }
 554
 555        /* Terminate queue */
 556        if (bnx2x_vfq(vf, qid, sp_obj).state != BNX2X_Q_STATE_RESET) {
 557                struct bnx2x_queue_state_params qstate;
 558
 559                memset(&qstate, 0, sizeof(struct bnx2x_queue_state_params));
 560                qstate.q_obj = &bnx2x_vfq(vf, qid, sp_obj);
 561                qstate.q_obj->state = BNX2X_Q_STATE_STOPPED;
 562                qstate.cmd = BNX2X_Q_CMD_TERMINATE;
 563                set_bit(RAMROD_COMP_WAIT, &qstate.ramrod_flags);
 564                rc = bnx2x_queue_state_change(bp, &qstate);
 565                if (rc)
 566                        goto op_err;
 567        }
 568
 569        return 0;
 570op_err:
 571        BNX2X_ERR("vf[%d:%d] error: rc %d\n", vf->abs_vfid, qid, rc);
 572        return rc;
 573}
 574
 575int bnx2x_vf_mcast(struct bnx2x *bp, struct bnx2x_virtf *vf,
 576                   bnx2x_mac_addr_t *mcasts, int mc_num, bool drv_only)
 577{
 578        struct bnx2x_mcast_list_elem *mc = NULL;
 579        struct bnx2x_mcast_ramrod_params mcast;
 580        int rc, i;
 581
 582        DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid);
 583
 584        /* Prepare Multicast command */
 585        memset(&mcast, 0, sizeof(struct bnx2x_mcast_ramrod_params));
 586        mcast.mcast_obj = &vf->mcast_obj;
 587        if (drv_only)
 588                set_bit(RAMROD_DRV_CLR_ONLY, &mcast.ramrod_flags);
 589        else
 590                set_bit(RAMROD_COMP_WAIT, &mcast.ramrod_flags);
 591        if (mc_num) {
 592                mc = kzalloc(mc_num * sizeof(struct bnx2x_mcast_list_elem),
 593                             GFP_KERNEL);
 594                if (!mc) {
 595                        BNX2X_ERR("Cannot Configure multicasts due to lack of memory\n");
 596                        return -ENOMEM;
 597                }
 598        }
 599
 600        /* clear existing mcasts */
 601        mcast.mcast_list_len = vf->mcast_list_len;
 602        vf->mcast_list_len = mc_num;
 603        rc = bnx2x_config_mcast(bp, &mcast, BNX2X_MCAST_CMD_DEL);
 604        if (rc) {
 605                BNX2X_ERR("Failed to remove multicasts\n");
 606                kfree(mc);
 607                return rc;
 608        }
 609
 610        /* update mcast list on the ramrod params */
 611        if (mc_num) {
 612                INIT_LIST_HEAD(&mcast.mcast_list);
 613                for (i = 0; i < mc_num; i++) {
 614                        mc[i].mac = mcasts[i];
 615                        list_add_tail(&mc[i].link,
 616                                      &mcast.mcast_list);
 617                }
 618
 619                /* add new mcasts */
 620                mcast.mcast_list_len = mc_num;
 621                rc = bnx2x_config_mcast(bp, &mcast, BNX2X_MCAST_CMD_ADD);
 622                if (rc)
 623                        BNX2X_ERR("Faled to add multicasts\n");
 624                kfree(mc);
 625        }
 626
 627        return rc;
 628}
 629
 630static void bnx2x_vf_prep_rx_mode(struct bnx2x *bp, u8 qid,
 631                                  struct bnx2x_rx_mode_ramrod_params *ramrod,
 632                                  struct bnx2x_virtf *vf,
 633                                  unsigned long accept_flags)
 634{
 635        struct bnx2x_vf_queue *vfq = vfq_get(vf, qid);
 636
 637        memset(ramrod, 0, sizeof(*ramrod));
 638        ramrod->cid = vfq->cid;
 639        ramrod->cl_id = vfq_cl_id(vf, vfq);
 640        ramrod->rx_mode_obj = &bp->rx_mode_obj;
 641        ramrod->func_id = FW_VF_HANDLE(vf->abs_vfid);
 642        ramrod->rx_accept_flags = accept_flags;
 643        ramrod->tx_accept_flags = accept_flags;
 644        ramrod->pstate = &vf->filter_state;
 645        ramrod->state = BNX2X_FILTER_RX_MODE_PENDING;
 646
 647        set_bit(BNX2X_FILTER_RX_MODE_PENDING, &vf->filter_state);
 648        set_bit(RAMROD_RX, &ramrod->ramrod_flags);
 649        set_bit(RAMROD_TX, &ramrod->ramrod_flags);
 650
 651        ramrod->rdata = bnx2x_vf_sp(bp, vf, rx_mode_rdata.e2);
 652        ramrod->rdata_mapping = bnx2x_vf_sp_map(bp, vf, rx_mode_rdata.e2);
 653}
 654
 655int bnx2x_vf_rxmode(struct bnx2x *bp, struct bnx2x_virtf *vf,
 656                    int qid, unsigned long accept_flags)
 657{
 658        struct bnx2x_rx_mode_ramrod_params ramrod;
 659
 660        DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid);
 661
 662        bnx2x_vf_prep_rx_mode(bp, qid, &ramrod, vf, accept_flags);
 663        set_bit(RAMROD_COMP_WAIT, &ramrod.ramrod_flags);
 664        vfq_get(vf, qid)->accept_flags = ramrod.rx_accept_flags;
 665        return bnx2x_config_rx_mode(bp, &ramrod);
 666}
 667
 668int bnx2x_vf_queue_teardown(struct bnx2x *bp, struct bnx2x_virtf *vf, int qid)
 669{
 670        int rc;
 671
 672        DP(BNX2X_MSG_IOV, "vf[%d:%d]\n", vf->abs_vfid, qid);
 673
 674        /* Remove all classification configuration for leading queue */
 675        if (qid == LEADING_IDX) {
 676                rc = bnx2x_vf_rxmode(bp, vf, qid, 0);
 677                if (rc)
 678                        goto op_err;
 679
 680                /* Remove filtering if feasible */
 681                if (bnx2x_validate_vf_sp_objs(bp, vf, true)) {
 682                        rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid,
 683                                                     false, false);
 684                        if (rc)
 685                                goto op_err;
 686                        rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid,
 687                                                     false, true);
 688                        if (rc)
 689                                goto op_err;
 690                        rc = bnx2x_vf_mcast(bp, vf, NULL, 0, false);
 691                        if (rc)
 692                                goto op_err;
 693                }
 694        }
 695
 696        /* Destroy queue */
 697        rc = bnx2x_vf_queue_destroy(bp, vf, qid);
 698        if (rc)
 699                goto op_err;
 700        return rc;
 701op_err:
 702        BNX2X_ERR("vf[%d:%d] error: rc %d\n",
 703                  vf->abs_vfid, qid, rc);
 704        return rc;
 705}
 706
 707/* VF enable primitives
 708 * when pretend is required the caller is responsible
 709 * for calling pretend prior to calling these routines
 710 */
 711
 712/* internal vf enable - until vf is enabled internally all transactions
 713 * are blocked. This routine should always be called last with pretend.
 714 */
 715static void bnx2x_vf_enable_internal(struct bnx2x *bp, u8 enable)
 716{
 717        REG_WR(bp, PGLUE_B_REG_INTERNAL_VFID_ENABLE, enable ? 1 : 0);
 718}
 719
 720/* clears vf error in all semi blocks */
 721static void bnx2x_vf_semi_clear_err(struct bnx2x *bp, u8 abs_vfid)
 722{
 723        REG_WR(bp, TSEM_REG_VFPF_ERR_NUM, abs_vfid);
 724        REG_WR(bp, USEM_REG_VFPF_ERR_NUM, abs_vfid);
 725        REG_WR(bp, CSEM_REG_VFPF_ERR_NUM, abs_vfid);
 726        REG_WR(bp, XSEM_REG_VFPF_ERR_NUM, abs_vfid);
 727}
 728
 729static void bnx2x_vf_pglue_clear_err(struct bnx2x *bp, u8 abs_vfid)
 730{
 731        u32 was_err_group = (2 * BP_PATH(bp) + abs_vfid) >> 5;
 732        u32 was_err_reg = 0;
 733
 734        switch (was_err_group) {
 735        case 0:
 736            was_err_reg = PGLUE_B_REG_WAS_ERROR_VF_31_0_CLR;
 737            break;
 738        case 1:
 739            was_err_reg = PGLUE_B_REG_WAS_ERROR_VF_63_32_CLR;
 740            break;
 741        case 2:
 742            was_err_reg = PGLUE_B_REG_WAS_ERROR_VF_95_64_CLR;
 743            break;
 744        case 3:
 745            was_err_reg = PGLUE_B_REG_WAS_ERROR_VF_127_96_CLR;
 746            break;
 747        }
 748        REG_WR(bp, was_err_reg, 1 << (abs_vfid & 0x1f));
 749}
 750
 751static void bnx2x_vf_igu_reset(struct bnx2x *bp, struct bnx2x_virtf *vf)
 752{
 753        int i;
 754        u32 val;
 755
 756        /* Set VF masks and configuration - pretend */
 757        bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf->abs_vfid));
 758
 759        REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_LSB, 0);
 760        REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_MSB, 0);
 761        REG_WR(bp, IGU_REG_SB_MASK_LSB, 0);
 762        REG_WR(bp, IGU_REG_SB_MASK_MSB, 0);
 763        REG_WR(bp, IGU_REG_PBA_STATUS_LSB, 0);
 764        REG_WR(bp, IGU_REG_PBA_STATUS_MSB, 0);
 765
 766        val = REG_RD(bp, IGU_REG_VF_CONFIGURATION);
 767        val |= (IGU_VF_CONF_FUNC_EN | IGU_VF_CONF_MSI_MSIX_EN);
 768        if (vf->cfg_flags & VF_CFG_INT_SIMD)
 769                val |= IGU_VF_CONF_SINGLE_ISR_EN;
 770        val &= ~IGU_VF_CONF_PARENT_MASK;
 771        val |= (BP_ABS_FUNC(bp) >> 1) << IGU_VF_CONF_PARENT_SHIFT;
 772        REG_WR(bp, IGU_REG_VF_CONFIGURATION, val);
 773
 774        DP(BNX2X_MSG_IOV,
 775           "value in IGU_REG_VF_CONFIGURATION of vf %d after write is 0x%08x\n",
 776           vf->abs_vfid, val);
 777
 778        bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
 779
 780        /* iterate over all queues, clear sb consumer */
 781        for (i = 0; i < vf_sb_count(vf); i++) {
 782                u8 igu_sb_id = vf_igu_sb(vf, i);
 783
 784                /* zero prod memory */
 785                REG_WR(bp, IGU_REG_PROD_CONS_MEMORY + igu_sb_id * 4, 0);
 786
 787                /* clear sb state machine */
 788                bnx2x_igu_clear_sb_gen(bp, vf->abs_vfid, igu_sb_id,
 789                                       false /* VF */);
 790
 791                /* disable + update */
 792                bnx2x_vf_igu_ack_sb(bp, vf, igu_sb_id, USTORM_ID, 0,
 793                                    IGU_INT_DISABLE, 1);
 794        }
 795}
 796
 797void bnx2x_vf_enable_access(struct bnx2x *bp, u8 abs_vfid)
 798{
 799        /* set the VF-PF association in the FW */
 800        storm_memset_vf_to_pf(bp, FW_VF_HANDLE(abs_vfid), BP_FUNC(bp));
 801        storm_memset_func_en(bp, FW_VF_HANDLE(abs_vfid), 1);
 802
 803        /* clear vf errors*/
 804        bnx2x_vf_semi_clear_err(bp, abs_vfid);
 805        bnx2x_vf_pglue_clear_err(bp, abs_vfid);
 806
 807        /* internal vf-enable - pretend */
 808        bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, abs_vfid));
 809        DP(BNX2X_MSG_IOV, "enabling internal access for vf %x\n", abs_vfid);
 810        bnx2x_vf_enable_internal(bp, true);
 811        bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
 812}
 813
 814static void bnx2x_vf_enable_traffic(struct bnx2x *bp, struct bnx2x_virtf *vf)
 815{
 816        /* Reset vf in IGU  interrupts are still disabled */
 817        bnx2x_vf_igu_reset(bp, vf);
 818
 819        /* pretend to enable the vf with the PBF */
 820        bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf->abs_vfid));
 821        REG_WR(bp, PBF_REG_DISABLE_VF, 0);
 822        bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
 823}
 824
 825static u8 bnx2x_vf_is_pcie_pending(struct bnx2x *bp, u8 abs_vfid)
 826{
 827        struct pci_dev *dev;
 828        struct bnx2x_virtf *vf = bnx2x_vf_by_abs_fid(bp, abs_vfid);
 829
 830        if (!vf)
 831                return false;
 832
 833        dev = pci_get_bus_and_slot(vf->bus, vf->devfn);
 834        if (dev)
 835                return bnx2x_is_pcie_pending(dev);
 836        return false;
 837}
 838
 839int bnx2x_vf_flr_clnup_epilog(struct bnx2x *bp, u8 abs_vfid)
 840{
 841        /* Verify no pending pci transactions */
 842        if (bnx2x_vf_is_pcie_pending(bp, abs_vfid))
 843                BNX2X_ERR("PCIE Transactions still pending\n");
 844
 845        return 0;
 846}
 847
 848static void bnx2x_iov_re_set_vlan_filters(struct bnx2x *bp,
 849                                          struct bnx2x_virtf *vf,
 850                                          int new)
 851{
 852        int num = vf_vlan_rules_cnt(vf);
 853        int diff = new - num;
 854        bool rc = true;
 855
 856        DP(BNX2X_MSG_IOV, "vf[%d] - %d vlan filter credits [previously %d]\n",
 857           vf->abs_vfid, new, num);
 858
 859        if (diff > 0)
 860                rc = bp->vlans_pool.get(&bp->vlans_pool, diff);
 861        else if (diff < 0)
 862                rc = bp->vlans_pool.put(&bp->vlans_pool, -diff);
 863
 864        if (rc)
 865                vf_vlan_rules_cnt(vf) = new;
 866        else
 867                DP(BNX2X_MSG_IOV, "vf[%d] - Failed to configure vlan filter credits change\n",
 868                   vf->abs_vfid);
 869}
 870
 871/* must be called after the number of PF queues and the number of VFs are
 872 * both known
 873 */
 874static void
 875bnx2x_iov_static_resc(struct bnx2x *bp, struct bnx2x_virtf *vf)
 876{
 877        struct vf_pf_resc_request *resc = &vf->alloc_resc;
 878        u16 vlan_count = 0;
 879
 880        /* will be set only during VF-ACQUIRE */
 881        resc->num_rxqs = 0;
 882        resc->num_txqs = 0;
 883
 884        /* no credit calculations for macs (just yet) */
 885        resc->num_mac_filters = 1;
 886
 887        /* divvy up vlan rules */
 888        bnx2x_iov_re_set_vlan_filters(bp, vf, 0);
 889        vlan_count = bp->vlans_pool.check(&bp->vlans_pool);
 890        vlan_count = 1 << ilog2(vlan_count);
 891        bnx2x_iov_re_set_vlan_filters(bp, vf,
 892                                      vlan_count / BNX2X_NR_VIRTFN(bp));
 893
 894        /* no real limitation */
 895        resc->num_mc_filters = 0;
 896
 897        /* num_sbs already set */
 898        resc->num_sbs = vf->sb_count;
 899}
 900
 901/* FLR routines: */
 902static void bnx2x_vf_free_resc(struct bnx2x *bp, struct bnx2x_virtf *vf)
 903{
 904        /* reset the state variables */
 905        bnx2x_iov_static_resc(bp, vf);
 906        vf->state = VF_FREE;
 907}
 908
 909static void bnx2x_vf_flr_clnup_hw(struct bnx2x *bp, struct bnx2x_virtf *vf)
 910{
 911        u32 poll_cnt = bnx2x_flr_clnup_poll_count(bp);
 912
 913        /* DQ usage counter */
 914        bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf->abs_vfid));
 915        bnx2x_flr_clnup_poll_hw_counter(bp, DORQ_REG_VF_USAGE_CNT,
 916                                        "DQ VF usage counter timed out",
 917                                        poll_cnt);
 918        bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
 919
 920        /* FW cleanup command - poll for the results */
 921        if (bnx2x_send_final_clnup(bp, (u8)FW_VF_HANDLE(vf->abs_vfid),
 922                                   poll_cnt))
 923                BNX2X_ERR("VF[%d] Final cleanup timed-out\n", vf->abs_vfid);
 924
 925        /* verify TX hw is flushed */
 926        bnx2x_tx_hw_flushed(bp, poll_cnt);
 927}
 928
 929static void bnx2x_vf_flr(struct bnx2x *bp, struct bnx2x_virtf *vf)
 930{
 931        int rc, i;
 932
 933        DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid);
 934
 935        /* the cleanup operations are valid if and only if the VF
 936         * was first acquired.
 937         */
 938        for (i = 0; i < vf_rxq_count(vf); i++) {
 939                rc = bnx2x_vf_queue_flr(bp, vf, i);
 940                if (rc)
 941                        goto out;
 942        }
 943
 944        /* remove multicasts */
 945        bnx2x_vf_mcast(bp, vf, NULL, 0, true);
 946
 947        /* dispatch final cleanup and wait for HW queues to flush */
 948        bnx2x_vf_flr_clnup_hw(bp, vf);
 949
 950        /* release VF resources */
 951        bnx2x_vf_free_resc(bp, vf);
 952
 953        /* re-open the mailbox */
 954        bnx2x_vf_enable_mbx(bp, vf->abs_vfid);
 955        return;
 956out:
 957        BNX2X_ERR("vf[%d:%d] failed flr: rc %d\n",
 958                  vf->abs_vfid, i, rc);
 959}
 960
 961static void bnx2x_vf_flr_clnup(struct bnx2x *bp)
 962{
 963        struct bnx2x_virtf *vf;
 964        int i;
 965
 966        for (i = 0; i < BNX2X_NR_VIRTFN(bp); i++) {
 967                /* VF should be RESET & in FLR cleanup states */
 968                if (bnx2x_vf(bp, i, state) != VF_RESET ||
 969                    !bnx2x_vf(bp, i, flr_clnup_stage))
 970                        continue;
 971
 972                DP(BNX2X_MSG_IOV, "next vf to cleanup: %d. Num of vfs: %d\n",
 973                   i, BNX2X_NR_VIRTFN(bp));
 974
 975                vf = BP_VF(bp, i);
 976
 977                /* lock the vf pf channel */
 978                bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_FLR);
 979
 980                /* invoke the VF FLR SM */
 981                bnx2x_vf_flr(bp, vf);
 982
 983                /* mark the VF to be ACKED and continue */
 984                vf->flr_clnup_stage = false;
 985                bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_FLR);
 986        }
 987
 988        /* Acknowledge the handled VFs.
 989         * we are acknowledge all the vfs which an flr was requested for, even
 990         * if amongst them there are such that we never opened, since the mcp
 991         * will interrupt us immediately again if we only ack some of the bits,
 992         * resulting in an endless loop. This can happen for example in KVM
 993         * where an 'all ones' flr request is sometimes given by hyper visor
 994         */
 995        DP(BNX2X_MSG_MCP, "DRV_STATUS_VF_DISABLED ACK for vfs 0x%x 0x%x\n",
 996           bp->vfdb->flrd_vfs[0], bp->vfdb->flrd_vfs[1]);
 997        for (i = 0; i < FLRD_VFS_DWORDS; i++)
 998                SHMEM2_WR(bp, drv_ack_vf_disabled[BP_FW_MB_IDX(bp)][i],
 999                          bp->vfdb->flrd_vfs[i]);
1000
1001        bnx2x_fw_command(bp, DRV_MSG_CODE_VF_DISABLED_DONE, 0);
1002
1003        /* clear the acked bits - better yet if the MCP implemented
1004         * write to clear semantics
1005         */
1006        for (i = 0; i < FLRD_VFS_DWORDS; i++)
1007                SHMEM2_WR(bp, drv_ack_vf_disabled[BP_FW_MB_IDX(bp)][i], 0);
1008}
1009
1010void bnx2x_vf_handle_flr_event(struct bnx2x *bp)
1011{
1012        int i;
1013
1014        /* Read FLR'd VFs */
1015        for (i = 0; i < FLRD_VFS_DWORDS; i++)
1016                bp->vfdb->flrd_vfs[i] = SHMEM2_RD(bp, mcp_vf_disabled[i]);
1017
1018        DP(BNX2X_MSG_MCP,
1019           "DRV_STATUS_VF_DISABLED received for vfs 0x%x 0x%x\n",
1020           bp->vfdb->flrd_vfs[0], bp->vfdb->flrd_vfs[1]);
1021
1022        for_each_vf(bp, i) {
1023                struct bnx2x_virtf *vf = BP_VF(bp, i);
1024                u32 reset = 0;
1025
1026                if (vf->abs_vfid < 32)
1027                        reset = bp->vfdb->flrd_vfs[0] & (1 << vf->abs_vfid);
1028                else
1029                        reset = bp->vfdb->flrd_vfs[1] &
1030                                (1 << (vf->abs_vfid - 32));
1031
1032                if (reset) {
1033                        /* set as reset and ready for cleanup */
1034                        vf->state = VF_RESET;
1035                        vf->flr_clnup_stage = true;
1036
1037                        DP(BNX2X_MSG_IOV,
1038                           "Initiating Final cleanup for VF %d\n",
1039                           vf->abs_vfid);
1040                }
1041        }
1042
1043        /* do the FLR cleanup for all marked VFs*/
1044        bnx2x_vf_flr_clnup(bp);
1045}
1046
1047/* IOV global initialization routines  */
1048void bnx2x_iov_init_dq(struct bnx2x *bp)
1049{
1050        if (!IS_SRIOV(bp))
1051                return;
1052
1053        /* Set the DQ such that the CID reflect the abs_vfid */
1054        REG_WR(bp, DORQ_REG_VF_NORM_VF_BASE, 0);
1055        REG_WR(bp, DORQ_REG_MAX_RVFID_SIZE, ilog2(BNX2X_MAX_NUM_OF_VFS));
1056
1057        /* Set VFs starting CID. If its > 0 the preceding CIDs are belong to
1058         * the PF L2 queues
1059         */
1060        REG_WR(bp, DORQ_REG_VF_NORM_CID_BASE, BNX2X_FIRST_VF_CID);
1061
1062        /* The VF window size is the log2 of the max number of CIDs per VF */
1063        REG_WR(bp, DORQ_REG_VF_NORM_CID_WND_SIZE, BNX2X_VF_CID_WND);
1064
1065        /* The VF doorbell size  0 - *B, 4 - 128B. We set it here to match
1066         * the Pf doorbell size although the 2 are independent.
1067         */
1068        REG_WR(bp, DORQ_REG_VF_NORM_CID_OFST, 3);
1069
1070        /* No security checks for now -
1071         * configure single rule (out of 16) mask = 0x1, value = 0x0,
1072         * CID range 0 - 0x1ffff
1073         */
1074        REG_WR(bp, DORQ_REG_VF_TYPE_MASK_0, 1);
1075        REG_WR(bp, DORQ_REG_VF_TYPE_VALUE_0, 0);
1076        REG_WR(bp, DORQ_REG_VF_TYPE_MIN_MCID_0, 0);
1077        REG_WR(bp, DORQ_REG_VF_TYPE_MAX_MCID_0, 0x1ffff);
1078
1079        /* set the VF doorbell threshold. This threshold represents the amount
1080         * of doorbells allowed in the main DORQ fifo for a specific VF.
1081         */
1082        REG_WR(bp, DORQ_REG_VF_USAGE_CT_LIMIT, 64);
1083}
1084
1085void bnx2x_iov_init_dmae(struct bnx2x *bp)
1086{
1087        if (pci_find_ext_capability(bp->pdev, PCI_EXT_CAP_ID_SRIOV))
1088                REG_WR(bp, DMAE_REG_BACKWARD_COMP_EN, 0);
1089}
1090
1091static int bnx2x_vf_bus(struct bnx2x *bp, int vfid)
1092{
1093        struct pci_dev *dev = bp->pdev;
1094        struct bnx2x_sriov *iov = &bp->vfdb->sriov;
1095
1096        return dev->bus->number + ((dev->devfn + iov->offset +
1097                                    iov->stride * vfid) >> 8);
1098}
1099
1100static int bnx2x_vf_devfn(struct bnx2x *bp, int vfid)
1101{
1102        struct pci_dev *dev = bp->pdev;
1103        struct bnx2x_sriov *iov = &bp->vfdb->sriov;
1104
1105        return (dev->devfn + iov->offset + iov->stride * vfid) & 0xff;
1106}
1107
1108static void bnx2x_vf_set_bars(struct bnx2x *bp, struct bnx2x_virtf *vf)
1109{
1110        int i, n;
1111        struct pci_dev *dev = bp->pdev;
1112        struct bnx2x_sriov *iov = &bp->vfdb->sriov;
1113
1114        for (i = 0, n = 0; i < PCI_SRIOV_NUM_BARS; i += 2, n++) {
1115                u64 start = pci_resource_start(dev, PCI_IOV_RESOURCES + i);
1116                u32 size = pci_resource_len(dev, PCI_IOV_RESOURCES + i);
1117
1118                size /= iov->total;
1119                vf->bars[n].bar = start + size * vf->abs_vfid;
1120                vf->bars[n].size = size;
1121        }
1122}
1123
1124static int bnx2x_ari_enabled(struct pci_dev *dev)
1125{
1126        return dev->bus->self && dev->bus->self->ari_enabled;
1127}
1128
1129static int
1130bnx2x_get_vf_igu_cam_info(struct bnx2x *bp)
1131{
1132        int sb_id;
1133        u32 val;
1134        u8 fid, current_pf = 0;
1135
1136        /* IGU in normal mode - read CAM */
1137        for (sb_id = 0; sb_id < IGU_REG_MAPPING_MEMORY_SIZE; sb_id++) {
1138                val = REG_RD(bp, IGU_REG_MAPPING_MEMORY + sb_id * 4);
1139                if (!(val & IGU_REG_MAPPING_MEMORY_VALID))
1140                        continue;
1141                fid = GET_FIELD((val), IGU_REG_MAPPING_MEMORY_FID);
1142                if (fid & IGU_FID_ENCODE_IS_PF)
1143                        current_pf = fid & IGU_FID_PF_NUM_MASK;
1144                else if (current_pf == BP_FUNC(bp))
1145                        bnx2x_vf_set_igu_info(bp, sb_id,
1146                                              (fid & IGU_FID_VF_NUM_MASK));
1147                DP(BNX2X_MSG_IOV, "%s[%d], igu_sb_id=%d, msix=%d\n",
1148                   ((fid & IGU_FID_ENCODE_IS_PF) ? "PF" : "VF"),
1149                   ((fid & IGU_FID_ENCODE_IS_PF) ? (fid & IGU_FID_PF_NUM_MASK) :
1150                   (fid & IGU_FID_VF_NUM_MASK)), sb_id,
1151                   GET_FIELD((val), IGU_REG_MAPPING_MEMORY_VECTOR));
1152        }
1153        DP(BNX2X_MSG_IOV, "vf_sbs_pool is %d\n", BP_VFDB(bp)->vf_sbs_pool);
1154        return BP_VFDB(bp)->vf_sbs_pool;
1155}
1156
1157static void __bnx2x_iov_free_vfdb(struct bnx2x *bp)
1158{
1159        if (bp->vfdb) {
1160                kfree(bp->vfdb->vfqs);
1161                kfree(bp->vfdb->vfs);
1162                kfree(bp->vfdb);
1163        }
1164        bp->vfdb = NULL;
1165}
1166
1167static int bnx2x_sriov_pci_cfg_info(struct bnx2x *bp, struct bnx2x_sriov *iov)
1168{
1169        int pos;
1170        struct pci_dev *dev = bp->pdev;
1171
1172        pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV);
1173        if (!pos) {
1174                BNX2X_ERR("failed to find SRIOV capability in device\n");
1175                return -ENODEV;
1176        }
1177
1178        iov->pos = pos;
1179        DP(BNX2X_MSG_IOV, "sriov ext pos %d\n", pos);
1180        pci_read_config_word(dev, pos + PCI_SRIOV_CTRL, &iov->ctrl);
1181        pci_read_config_word(dev, pos + PCI_SRIOV_TOTAL_VF, &iov->total);
1182        pci_read_config_word(dev, pos + PCI_SRIOV_INITIAL_VF, &iov->initial);
1183        pci_read_config_word(dev, pos + PCI_SRIOV_VF_OFFSET, &iov->offset);
1184        pci_read_config_word(dev, pos + PCI_SRIOV_VF_STRIDE, &iov->stride);
1185        pci_read_config_dword(dev, pos + PCI_SRIOV_SUP_PGSIZE, &iov->pgsz);
1186        pci_read_config_dword(dev, pos + PCI_SRIOV_CAP, &iov->cap);
1187        pci_read_config_byte(dev, pos + PCI_SRIOV_FUNC_LINK, &iov->link);
1188
1189        return 0;
1190}
1191
1192static int bnx2x_sriov_info(struct bnx2x *bp, struct bnx2x_sriov *iov)
1193{
1194        u32 val;
1195
1196        /* read the SRIOV capability structure
1197         * The fields can be read via configuration read or
1198         * directly from the device (starting at offset PCICFG_OFFSET)
1199         */
1200        if (bnx2x_sriov_pci_cfg_info(bp, iov))
1201                return -ENODEV;
1202
1203        /* get the number of SRIOV bars */
1204        iov->nres = 0;
1205
1206        /* read the first_vfid */
1207        val = REG_RD(bp, PCICFG_OFFSET + GRC_CONFIG_REG_PF_INIT_VF);
1208        iov->first_vf_in_pf = ((val & GRC_CR_PF_INIT_VF_PF_FIRST_VF_NUM_MASK)
1209                               * 8) - (BNX2X_MAX_NUM_OF_VFS * BP_PATH(bp));
1210
1211        DP(BNX2X_MSG_IOV,
1212           "IOV info[%d]: first vf %d, nres %d, cap 0x%x, ctrl 0x%x, total %d, initial %d, num vfs %d, offset %d, stride %d, page size 0x%x\n",
1213           BP_FUNC(bp),
1214           iov->first_vf_in_pf, iov->nres, iov->cap, iov->ctrl, iov->total,
1215           iov->initial, iov->nr_virtfn, iov->offset, iov->stride, iov->pgsz);
1216
1217        return 0;
1218}
1219
1220/* must be called after PF bars are mapped */
1221int bnx2x_iov_init_one(struct bnx2x *bp, int int_mode_param,
1222                       int num_vfs_param)
1223{
1224        int err, i;
1225        struct bnx2x_sriov *iov;
1226        struct pci_dev *dev = bp->pdev;
1227
1228        bp->vfdb = NULL;
1229
1230        /* verify is pf */
1231        if (IS_VF(bp))
1232                return 0;
1233
1234        /* verify sriov capability is present in configuration space */
1235        if (!pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV))
1236                return 0;
1237
1238        /* verify chip revision */
1239        if (CHIP_IS_E1x(bp))
1240                return 0;
1241
1242        /* check if SRIOV support is turned off */
1243        if (!num_vfs_param)
1244                return 0;
1245
1246        /* SRIOV assumes that num of PF CIDs < BNX2X_FIRST_VF_CID */
1247        if (BNX2X_L2_MAX_CID(bp) >= BNX2X_FIRST_VF_CID) {
1248                BNX2X_ERR("PF cids %d are overspilling into vf space (starts at %d). Abort SRIOV\n",
1249                          BNX2X_L2_MAX_CID(bp), BNX2X_FIRST_VF_CID);
1250                return 0;
1251        }
1252
1253        /* SRIOV can be enabled only with MSIX */
1254        if (int_mode_param == BNX2X_INT_MODE_MSI ||
1255            int_mode_param == BNX2X_INT_MODE_INTX) {
1256                BNX2X_ERR("Forced MSI/INTx mode is incompatible with SRIOV\n");
1257                return 0;
1258        }
1259
1260        err = -EIO;
1261        /* verify ari is enabled */
1262        if (!bnx2x_ari_enabled(bp->pdev)) {
1263                BNX2X_ERR("ARI not supported (check pci bridge ARI forwarding), SRIOV can not be enabled\n");
1264                return 0;
1265        }
1266
1267        /* verify igu is in normal mode */
1268        if (CHIP_INT_MODE_IS_BC(bp)) {
1269                BNX2X_ERR("IGU not normal mode,  SRIOV can not be enabled\n");
1270                return 0;
1271        }
1272
1273        /* allocate the vfs database */
1274        bp->vfdb = kzalloc(sizeof(*(bp->vfdb)), GFP_KERNEL);
1275        if (!bp->vfdb) {
1276                BNX2X_ERR("failed to allocate vf database\n");
1277                err = -ENOMEM;
1278                goto failed;
1279        }
1280
1281        /* get the sriov info - Linux already collected all the pertinent
1282         * information, however the sriov structure is for the private use
1283         * of the pci module. Also we want this information regardless
1284         * of the hyper-visor.
1285         */
1286        iov = &(bp->vfdb->sriov);
1287        err = bnx2x_sriov_info(bp, iov);
1288        if (err)
1289                goto failed;
1290
1291        /* SR-IOV capability was enabled but there are no VFs*/
1292        if (iov->total == 0)
1293                goto failed;
1294
1295        iov->nr_virtfn = min_t(u16, iov->total, num_vfs_param);
1296
1297        DP(BNX2X_MSG_IOV, "num_vfs_param was %d, nr_virtfn was %d\n",
1298           num_vfs_param, iov->nr_virtfn);
1299
1300        /* allocate the vf array */
1301        bp->vfdb->vfs = kzalloc(sizeof(struct bnx2x_virtf) *
1302                                BNX2X_NR_VIRTFN(bp), GFP_KERNEL);
1303        if (!bp->vfdb->vfs) {
1304                BNX2X_ERR("failed to allocate vf array\n");
1305                err = -ENOMEM;
1306                goto failed;
1307        }
1308
1309        /* Initial VF init - index and abs_vfid - nr_virtfn must be set */
1310        for_each_vf(bp, i) {
1311                bnx2x_vf(bp, i, index) = i;
1312                bnx2x_vf(bp, i, abs_vfid) = iov->first_vf_in_pf + i;
1313                bnx2x_vf(bp, i, state) = VF_FREE;
1314                mutex_init(&bnx2x_vf(bp, i, op_mutex));
1315                bnx2x_vf(bp, i, op_current) = CHANNEL_TLV_NONE;
1316        }
1317
1318        /* re-read the IGU CAM for VFs - index and abs_vfid must be set */
1319        if (!bnx2x_get_vf_igu_cam_info(bp)) {
1320                BNX2X_ERR("No entries in IGU CAM for vfs\n");
1321                err = -EINVAL;
1322                goto failed;
1323        }
1324
1325        /* allocate the queue arrays for all VFs */
1326        bp->vfdb->vfqs = kzalloc(
1327                BNX2X_MAX_NUM_VF_QUEUES * sizeof(struct bnx2x_vf_queue),
1328                GFP_KERNEL);
1329
1330        if (!bp->vfdb->vfqs) {
1331                BNX2X_ERR("failed to allocate vf queue array\n");
1332                err = -ENOMEM;
1333                goto failed;
1334        }
1335
1336        /* Prepare the VFs event synchronization mechanism */
1337        mutex_init(&bp->vfdb->event_mutex);
1338
1339        mutex_init(&bp->vfdb->bulletin_mutex);
1340
1341        return 0;
1342failed:
1343        DP(BNX2X_MSG_IOV, "Failed err=%d\n", err);
1344        __bnx2x_iov_free_vfdb(bp);
1345        return err;
1346}
1347
1348void bnx2x_iov_remove_one(struct bnx2x *bp)
1349{
1350        int vf_idx;
1351
1352        /* if SRIOV is not enabled there's nothing to do */
1353        if (!IS_SRIOV(bp))
1354                return;
1355
1356        bnx2x_disable_sriov(bp);
1357
1358        /* disable access to all VFs */
1359        for (vf_idx = 0; vf_idx < bp->vfdb->sriov.total; vf_idx++) {
1360                bnx2x_pretend_func(bp,
1361                                   HW_VF_HANDLE(bp,
1362                                                bp->vfdb->sriov.first_vf_in_pf +
1363                                                vf_idx));
1364                DP(BNX2X_MSG_IOV, "disabling internal access for vf %d\n",
1365                   bp->vfdb->sriov.first_vf_in_pf + vf_idx);
1366                bnx2x_vf_enable_internal(bp, 0);
1367                bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
1368        }
1369
1370        /* free vf database */
1371        __bnx2x_iov_free_vfdb(bp);
1372}
1373
1374void bnx2x_iov_free_mem(struct bnx2x *bp)
1375{
1376        int i;
1377
1378        if (!IS_SRIOV(bp))
1379                return;
1380
1381        /* free vfs hw contexts */
1382        for (i = 0; i < BNX2X_VF_CIDS/ILT_PAGE_CIDS; i++) {
1383                struct hw_dma *cxt = &bp->vfdb->context[i];
1384                BNX2X_PCI_FREE(cxt->addr, cxt->mapping, cxt->size);
1385        }
1386
1387        BNX2X_PCI_FREE(BP_VFDB(bp)->sp_dma.addr,
1388                       BP_VFDB(bp)->sp_dma.mapping,
1389                       BP_VFDB(bp)->sp_dma.size);
1390
1391        BNX2X_PCI_FREE(BP_VF_MBX_DMA(bp)->addr,
1392                       BP_VF_MBX_DMA(bp)->mapping,
1393                       BP_VF_MBX_DMA(bp)->size);
1394
1395        BNX2X_PCI_FREE(BP_VF_BULLETIN_DMA(bp)->addr,
1396                       BP_VF_BULLETIN_DMA(bp)->mapping,
1397                       BP_VF_BULLETIN_DMA(bp)->size);
1398}
1399
1400int bnx2x_iov_alloc_mem(struct bnx2x *bp)
1401{
1402        size_t tot_size;
1403        int i, rc = 0;
1404
1405        if (!IS_SRIOV(bp))
1406                return rc;
1407
1408        /* allocate vfs hw contexts */
1409        tot_size = (BP_VFDB(bp)->sriov.first_vf_in_pf + BNX2X_NR_VIRTFN(bp)) *
1410                BNX2X_CIDS_PER_VF * sizeof(union cdu_context);
1411
1412        for (i = 0; i < BNX2X_VF_CIDS/ILT_PAGE_CIDS; i++) {
1413                struct hw_dma *cxt = BP_VF_CXT_PAGE(bp, i);
1414                cxt->size = min_t(size_t, tot_size, CDU_ILT_PAGE_SZ);
1415
1416                if (cxt->size) {
1417                        cxt->addr = BNX2X_PCI_ALLOC(&cxt->mapping, cxt->size);
1418                        if (!cxt->addr)
1419                                goto alloc_mem_err;
1420                } else {
1421                        cxt->addr = NULL;
1422                        cxt->mapping = 0;
1423                }
1424                tot_size -= cxt->size;
1425        }
1426
1427        /* allocate vfs ramrods dma memory - client_init and set_mac */
1428        tot_size = BNX2X_NR_VIRTFN(bp) * sizeof(struct bnx2x_vf_sp);
1429        BP_VFDB(bp)->sp_dma.addr = BNX2X_PCI_ALLOC(&BP_VFDB(bp)->sp_dma.mapping,
1430                                                   tot_size);
1431        if (!BP_VFDB(bp)->sp_dma.addr)
1432                goto alloc_mem_err;
1433        BP_VFDB(bp)->sp_dma.size = tot_size;
1434
1435        /* allocate mailboxes */
1436        tot_size = BNX2X_NR_VIRTFN(bp) * MBX_MSG_ALIGNED_SIZE;
1437        BP_VF_MBX_DMA(bp)->addr = BNX2X_PCI_ALLOC(&BP_VF_MBX_DMA(bp)->mapping,
1438                                                  tot_size);
1439        if (!BP_VF_MBX_DMA(bp)->addr)
1440                goto alloc_mem_err;
1441
1442        BP_VF_MBX_DMA(bp)->size = tot_size;
1443
1444        /* allocate local bulletin boards */
1445        tot_size = BNX2X_NR_VIRTFN(bp) * BULLETIN_CONTENT_SIZE;
1446        BP_VF_BULLETIN_DMA(bp)->addr = BNX2X_PCI_ALLOC(&BP_VF_BULLETIN_DMA(bp)->mapping,
1447                                                       tot_size);
1448        if (!BP_VF_BULLETIN_DMA(bp)->addr)
1449                goto alloc_mem_err;
1450
1451        BP_VF_BULLETIN_DMA(bp)->size = tot_size;
1452
1453        return 0;
1454
1455alloc_mem_err:
1456        return -ENOMEM;
1457}
1458
1459static void bnx2x_vfq_init(struct bnx2x *bp, struct bnx2x_virtf *vf,
1460                           struct bnx2x_vf_queue *q)
1461{
1462        u8 cl_id = vfq_cl_id(vf, q);
1463        u8 func_id = FW_VF_HANDLE(vf->abs_vfid);
1464        unsigned long q_type = 0;
1465
1466        set_bit(BNX2X_Q_TYPE_HAS_TX, &q_type);
1467        set_bit(BNX2X_Q_TYPE_HAS_RX, &q_type);
1468
1469        /* Queue State object */
1470        bnx2x_init_queue_obj(bp, &q->sp_obj,
1471                             cl_id, &q->cid, 1, func_id,
1472                             bnx2x_vf_sp(bp, vf, q_data),
1473                             bnx2x_vf_sp_map(bp, vf, q_data),
1474                             q_type);
1475
1476        /* sp indication is set only when vlan/mac/etc. are initialized */
1477        q->sp_initialized = false;
1478
1479        DP(BNX2X_MSG_IOV,
1480           "initialized vf %d's queue object. func id set to %d. cid set to 0x%x\n",
1481           vf->abs_vfid, q->sp_obj.func_id, q->cid);
1482}
1483
1484static int bnx2x_max_speed_cap(struct bnx2x *bp)
1485{
1486        u32 supported = bp->port.supported[bnx2x_get_link_cfg_idx(bp)];
1487
1488        if (supported &
1489            (SUPPORTED_20000baseMLD2_Full | SUPPORTED_20000baseKR2_Full))
1490                return 20000;
1491
1492        return 10000; /* assume lowest supported speed is 10G */
1493}
1494
1495int bnx2x_iov_link_update_vf(struct bnx2x *bp, int idx)
1496{
1497        struct bnx2x_link_report_data *state = &bp->last_reported_link;
1498        struct pf_vf_bulletin_content *bulletin;
1499        struct bnx2x_virtf *vf;
1500        bool update = true;
1501        int rc = 0;
1502
1503        /* sanity and init */
1504        rc = bnx2x_vf_op_prep(bp, idx, &vf, &bulletin, false);
1505        if (rc)
1506                return rc;
1507
1508        mutex_lock(&bp->vfdb->bulletin_mutex);
1509
1510        if (vf->link_cfg == IFLA_VF_LINK_STATE_AUTO) {
1511                bulletin->valid_bitmap |= 1 << LINK_VALID;
1512
1513                bulletin->link_speed = state->line_speed;
1514                bulletin->link_flags = 0;
1515                if (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1516                             &state->link_report_flags))
1517                        bulletin->link_flags |= VFPF_LINK_REPORT_LINK_DOWN;
1518                if (test_bit(BNX2X_LINK_REPORT_FD,
1519                             &state->link_report_flags))
1520                        bulletin->link_flags |= VFPF_LINK_REPORT_FULL_DUPLEX;
1521                if (test_bit(BNX2X_LINK_REPORT_RX_FC_ON,
1522                             &state->link_report_flags))
1523                        bulletin->link_flags |= VFPF_LINK_REPORT_RX_FC_ON;
1524                if (test_bit(BNX2X_LINK_REPORT_TX_FC_ON,
1525                             &state->link_report_flags))
1526                        bulletin->link_flags |= VFPF_LINK_REPORT_TX_FC_ON;
1527        } else if (vf->link_cfg == IFLA_VF_LINK_STATE_DISABLE &&
1528                   !(bulletin->link_flags & VFPF_LINK_REPORT_LINK_DOWN)) {
1529                bulletin->valid_bitmap |= 1 << LINK_VALID;
1530                bulletin->link_flags |= VFPF_LINK_REPORT_LINK_DOWN;
1531        } else if (vf->link_cfg == IFLA_VF_LINK_STATE_ENABLE &&
1532                   (bulletin->link_flags & VFPF_LINK_REPORT_LINK_DOWN)) {
1533                bulletin->valid_bitmap |= 1 << LINK_VALID;
1534                bulletin->link_speed = bnx2x_max_speed_cap(bp);
1535                bulletin->link_flags &= ~VFPF_LINK_REPORT_LINK_DOWN;
1536        } else {
1537                update = false;
1538        }
1539
1540        if (update) {
1541                DP(NETIF_MSG_LINK | BNX2X_MSG_IOV,
1542                   "vf %d mode %u speed %d flags %x\n", idx,
1543                   vf->link_cfg, bulletin->link_speed, bulletin->link_flags);
1544
1545                /* Post update on VF's bulletin board */
1546                rc = bnx2x_post_vf_bulletin(bp, idx);
1547                if (rc) {
1548                        BNX2X_ERR("failed to update VF[%d] bulletin\n", idx);
1549                        goto out;
1550                }
1551        }
1552
1553out:
1554        mutex_unlock(&bp->vfdb->bulletin_mutex);
1555        return rc;
1556}
1557
1558int bnx2x_set_vf_link_state(struct net_device *dev, int idx, int link_state)
1559{
1560        struct bnx2x *bp = netdev_priv(dev);
1561        struct bnx2x_virtf *vf = BP_VF(bp, idx);
1562
1563        if (!vf)
1564                return -EINVAL;
1565
1566        if (vf->link_cfg == link_state)
1567                return 0; /* nothing todo */
1568
1569        vf->link_cfg = link_state;
1570
1571        return bnx2x_iov_link_update_vf(bp, idx);
1572}
1573
1574void bnx2x_iov_link_update(struct bnx2x *bp)
1575{
1576        int vfid;
1577
1578        if (!IS_SRIOV(bp))
1579                return;
1580
1581        for_each_vf(bp, vfid)
1582                bnx2x_iov_link_update_vf(bp, vfid);
1583}
1584
1585/* called by bnx2x_nic_load */
1586int bnx2x_iov_nic_init(struct bnx2x *bp)
1587{
1588        int vfid;
1589
1590        if (!IS_SRIOV(bp)) {
1591                DP(BNX2X_MSG_IOV, "vfdb was not allocated\n");
1592                return 0;
1593        }
1594
1595        DP(BNX2X_MSG_IOV, "num of vfs: %d\n", (bp)->vfdb->sriov.nr_virtfn);
1596
1597        /* let FLR complete ... */
1598        msleep(100);
1599
1600        /* initialize vf database */
1601        for_each_vf(bp, vfid) {
1602                struct bnx2x_virtf *vf = BP_VF(bp, vfid);
1603
1604                int base_vf_cid = (BP_VFDB(bp)->sriov.first_vf_in_pf + vfid) *
1605                        BNX2X_CIDS_PER_VF;
1606
1607                union cdu_context *base_cxt = (union cdu_context *)
1608                        BP_VF_CXT_PAGE(bp, base_vf_cid/ILT_PAGE_CIDS)->addr +
1609                        (base_vf_cid & (ILT_PAGE_CIDS-1));
1610
1611                DP(BNX2X_MSG_IOV,
1612                   "VF[%d] Max IGU SBs: %d, base vf cid 0x%x, base cid 0x%x, base cxt %p\n",
1613                   vf->abs_vfid, vf_sb_count(vf), base_vf_cid,
1614                   BNX2X_FIRST_VF_CID + base_vf_cid, base_cxt);
1615
1616                /* init statically provisioned resources */
1617                bnx2x_iov_static_resc(bp, vf);
1618
1619                /* queues are initialized during VF-ACQUIRE */
1620                vf->filter_state = 0;
1621                vf->sp_cl_id = bnx2x_fp(bp, 0, cl_id);
1622
1623                /*  init mcast object - This object will be re-initialized
1624                 *  during VF-ACQUIRE with the proper cl_id and cid.
1625                 *  It needs to be initialized here so that it can be safely
1626                 *  handled by a subsequent FLR flow.
1627                 */
1628                vf->mcast_list_len = 0;
1629                bnx2x_init_mcast_obj(bp, &vf->mcast_obj, 0xFF,
1630                                     0xFF, 0xFF, 0xFF,
1631                                     bnx2x_vf_sp(bp, vf, mcast_rdata),
1632                                     bnx2x_vf_sp_map(bp, vf, mcast_rdata),
1633                                     BNX2X_FILTER_MCAST_PENDING,
1634                                     &vf->filter_state,
1635                                     BNX2X_OBJ_TYPE_RX_TX);
1636
1637                /* set the mailbox message addresses */
1638                BP_VF_MBX(bp, vfid)->msg = (struct bnx2x_vf_mbx_msg *)
1639                        (((u8 *)BP_VF_MBX_DMA(bp)->addr) + vfid *
1640                        MBX_MSG_ALIGNED_SIZE);
1641
1642                BP_VF_MBX(bp, vfid)->msg_mapping = BP_VF_MBX_DMA(bp)->mapping +
1643                        vfid * MBX_MSG_ALIGNED_SIZE;
1644
1645                /* Enable vf mailbox */
1646                bnx2x_vf_enable_mbx(bp, vf->abs_vfid);
1647        }
1648
1649        /* Final VF init */
1650        for_each_vf(bp, vfid) {
1651                struct bnx2x_virtf *vf = BP_VF(bp, vfid);
1652
1653                /* fill in the BDF and bars */
1654                vf->bus = bnx2x_vf_bus(bp, vfid);
1655                vf->devfn = bnx2x_vf_devfn(bp, vfid);
1656                bnx2x_vf_set_bars(bp, vf);
1657
1658                DP(BNX2X_MSG_IOV,
1659                   "VF info[%d]: bus 0x%x, devfn 0x%x, bar0 [0x%x, %d], bar1 [0x%x, %d], bar2 [0x%x, %d]\n",
1660                   vf->abs_vfid, vf->bus, vf->devfn,
1661                   (unsigned)vf->bars[0].bar, vf->bars[0].size,
1662                   (unsigned)vf->bars[1].bar, vf->bars[1].size,
1663                   (unsigned)vf->bars[2].bar, vf->bars[2].size);
1664        }
1665
1666        return 0;
1667}
1668
1669/* called by bnx2x_chip_cleanup */
1670int bnx2x_iov_chip_cleanup(struct bnx2x *bp)
1671{
1672        int i;
1673
1674        if (!IS_SRIOV(bp))
1675                return 0;
1676
1677        /* release all the VFs */
1678        for_each_vf(bp, i)
1679                bnx2x_vf_release(bp, BP_VF(bp, i));
1680
1681        return 0;
1682}
1683
1684/* called by bnx2x_init_hw_func, returns the next ilt line */
1685int bnx2x_iov_init_ilt(struct bnx2x *bp, u16 line)
1686{
1687        int i;
1688        struct bnx2x_ilt *ilt = BP_ILT(bp);
1689
1690        if (!IS_SRIOV(bp))
1691                return line;
1692
1693        /* set vfs ilt lines */
1694        for (i = 0; i < BNX2X_VF_CIDS/ILT_PAGE_CIDS; i++) {
1695                struct hw_dma *hw_cxt = BP_VF_CXT_PAGE(bp, i);
1696
1697                ilt->lines[line+i].page = hw_cxt->addr;
1698                ilt->lines[line+i].page_mapping = hw_cxt->mapping;
1699                ilt->lines[line+i].size = hw_cxt->size; /* doesn't matter */
1700        }
1701        return line + i;
1702}
1703
1704static u8 bnx2x_iov_is_vf_cid(struct bnx2x *bp, u16 cid)
1705{
1706        return ((cid >= BNX2X_FIRST_VF_CID) &&
1707                ((cid - BNX2X_FIRST_VF_CID) < BNX2X_VF_CIDS));
1708}
1709
1710static
1711void bnx2x_vf_handle_classification_eqe(struct bnx2x *bp,
1712                                        struct bnx2x_vf_queue *vfq,
1713                                        union event_ring_elem *elem)
1714{
1715        unsigned long ramrod_flags = 0;
1716        int rc = 0;
1717
1718        /* Always push next commands out, don't wait here */
1719        set_bit(RAMROD_CONT, &ramrod_flags);
1720
1721        switch (elem->message.data.eth_event.echo >> BNX2X_SWCID_SHIFT) {
1722        case BNX2X_FILTER_MAC_PENDING:
1723                rc = vfq->mac_obj.complete(bp, &vfq->mac_obj, elem,
1724                                           &ramrod_flags);
1725                break;
1726        case BNX2X_FILTER_VLAN_PENDING:
1727                rc = vfq->vlan_obj.complete(bp, &vfq->vlan_obj, elem,
1728                                            &ramrod_flags);
1729                break;
1730        default:
1731                BNX2X_ERR("Unsupported classification command: %d\n",
1732                          elem->message.data.eth_event.echo);
1733                return;
1734        }
1735        if (rc < 0)
1736                BNX2X_ERR("Failed to schedule new commands: %d\n", rc);
1737        else if (rc > 0)
1738                DP(BNX2X_MSG_IOV, "Scheduled next pending commands...\n");
1739}
1740
1741static
1742void bnx2x_vf_handle_mcast_eqe(struct bnx2x *bp,
1743                               struct bnx2x_virtf *vf)
1744{
1745        struct bnx2x_mcast_ramrod_params rparam = {NULL};
1746        int rc;
1747
1748        rparam.mcast_obj = &vf->mcast_obj;
1749        vf->mcast_obj.raw.clear_pending(&vf->mcast_obj.raw);
1750
1751        /* If there are pending mcast commands - send them */
1752        if (vf->mcast_obj.check_pending(&vf->mcast_obj)) {
1753                rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
1754                if (rc < 0)
1755                        BNX2X_ERR("Failed to send pending mcast commands: %d\n",
1756                                  rc);
1757        }
1758}
1759
1760static
1761void bnx2x_vf_handle_filters_eqe(struct bnx2x *bp,
1762                                 struct bnx2x_virtf *vf)
1763{
1764        smp_mb__before_atomic();
1765        clear_bit(BNX2X_FILTER_RX_MODE_PENDING, &vf->filter_state);
1766        smp_mb__after_atomic();
1767}
1768
1769static void bnx2x_vf_handle_rss_update_eqe(struct bnx2x *bp,
1770                                           struct bnx2x_virtf *vf)
1771{
1772        vf->rss_conf_obj.raw.clear_pending(&vf->rss_conf_obj.raw);
1773}
1774
1775int bnx2x_iov_eq_sp_event(struct bnx2x *bp, union event_ring_elem *elem)
1776{
1777        struct bnx2x_virtf *vf;
1778        int qidx = 0, abs_vfid;
1779        u8 opcode;
1780        u16 cid = 0xffff;
1781
1782        if (!IS_SRIOV(bp))
1783                return 1;
1784
1785        /* first get the cid - the only events we handle here are cfc-delete
1786         * and set-mac completion
1787         */
1788        opcode = elem->message.opcode;
1789
1790        switch (opcode) {
1791        case EVENT_RING_OPCODE_CFC_DEL:
1792                cid = SW_CID((__force __le32)
1793                             elem->message.data.cfc_del_event.cid);
1794                DP(BNX2X_MSG_IOV, "checking cfc-del comp cid=%d\n", cid);
1795                break;
1796        case EVENT_RING_OPCODE_CLASSIFICATION_RULES:
1797        case EVENT_RING_OPCODE_MULTICAST_RULES:
1798        case EVENT_RING_OPCODE_FILTERS_RULES:
1799        case EVENT_RING_OPCODE_RSS_UPDATE_RULES:
1800                cid = (elem->message.data.eth_event.echo &
1801                       BNX2X_SWCID_MASK);
1802                DP(BNX2X_MSG_IOV, "checking filtering comp cid=%d\n", cid);
1803                break;
1804        case EVENT_RING_OPCODE_VF_FLR:
1805                abs_vfid = elem->message.data.vf_flr_event.vf_id;
1806                DP(BNX2X_MSG_IOV, "Got VF FLR notification abs_vfid=%d\n",
1807                   abs_vfid);
1808                goto get_vf;
1809        case EVENT_RING_OPCODE_MALICIOUS_VF:
1810                abs_vfid = elem->message.data.malicious_vf_event.vf_id;
1811                BNX2X_ERR("Got VF MALICIOUS notification abs_vfid=%d err_id=0x%x\n",
1812                          abs_vfid,
1813                          elem->message.data.malicious_vf_event.err_id);
1814                goto get_vf;
1815        default:
1816                return 1;
1817        }
1818
1819        /* check if the cid is the VF range */
1820        if (!bnx2x_iov_is_vf_cid(bp, cid)) {
1821                DP(BNX2X_MSG_IOV, "cid is outside vf range: %d\n", cid);
1822                return 1;
1823        }
1824
1825        /* extract vf and rxq index from vf_cid - relies on the following:
1826         * 1. vfid on cid reflects the true abs_vfid
1827         * 2. The max number of VFs (per path) is 64
1828         */
1829        qidx = cid & ((1 << BNX2X_VF_CID_WND)-1);
1830        abs_vfid = (cid >> BNX2X_VF_CID_WND) & (BNX2X_MAX_NUM_OF_VFS-1);
1831get_vf:
1832        vf = bnx2x_vf_by_abs_fid(bp, abs_vfid);
1833
1834        if (!vf) {
1835                BNX2X_ERR("EQ completion for unknown VF, cid %d, abs_vfid %d\n",
1836                          cid, abs_vfid);
1837                return 0;
1838        }
1839
1840        switch (opcode) {
1841        case EVENT_RING_OPCODE_CFC_DEL:
1842                DP(BNX2X_MSG_IOV, "got VF [%d:%d] cfc delete ramrod\n",
1843                   vf->abs_vfid, qidx);
1844                vfq_get(vf, qidx)->sp_obj.complete_cmd(bp,
1845                                                       &vfq_get(vf,
1846                                                                qidx)->sp_obj,
1847                                                       BNX2X_Q_CMD_CFC_DEL);
1848                break;
1849        case EVENT_RING_OPCODE_CLASSIFICATION_RULES:
1850                DP(BNX2X_MSG_IOV, "got VF [%d:%d] set mac/vlan ramrod\n",
1851                   vf->abs_vfid, qidx);
1852                bnx2x_vf_handle_classification_eqe(bp, vfq_get(vf, qidx), elem);
1853                break;
1854        case EVENT_RING_OPCODE_MULTICAST_RULES:
1855                DP(BNX2X_MSG_IOV, "got VF [%d:%d] set mcast ramrod\n",
1856                   vf->abs_vfid, qidx);
1857                bnx2x_vf_handle_mcast_eqe(bp, vf);
1858                break;
1859        case EVENT_RING_OPCODE_FILTERS_RULES:
1860                DP(BNX2X_MSG_IOV, "got VF [%d:%d] set rx-mode ramrod\n",
1861                   vf->abs_vfid, qidx);
1862                bnx2x_vf_handle_filters_eqe(bp, vf);
1863                break;
1864        case EVENT_RING_OPCODE_RSS_UPDATE_RULES:
1865                DP(BNX2X_MSG_IOV, "got VF [%d:%d] RSS update ramrod\n",
1866                   vf->abs_vfid, qidx);
1867                bnx2x_vf_handle_rss_update_eqe(bp, vf);
1868        case EVENT_RING_OPCODE_VF_FLR:
1869        case EVENT_RING_OPCODE_MALICIOUS_VF:
1870                /* Do nothing for now */
1871                return 0;
1872        }
1873
1874        return 0;
1875}
1876
1877static struct bnx2x_virtf *bnx2x_vf_by_cid(struct bnx2x *bp, int vf_cid)
1878{
1879        /* extract the vf from vf_cid - relies on the following:
1880         * 1. vfid on cid reflects the true abs_vfid
1881         * 2. The max number of VFs (per path) is 64
1882         */
1883        int abs_vfid = (vf_cid >> BNX2X_VF_CID_WND) & (BNX2X_MAX_NUM_OF_VFS-1);
1884        return bnx2x_vf_by_abs_fid(bp, abs_vfid);
1885}
1886
1887void bnx2x_iov_set_queue_sp_obj(struct bnx2x *bp, int vf_cid,
1888                                struct bnx2x_queue_sp_obj **q_obj)
1889{
1890        struct bnx2x_virtf *vf;
1891
1892        if (!IS_SRIOV(bp))
1893                return;
1894
1895        vf = bnx2x_vf_by_cid(bp, vf_cid);
1896
1897        if (vf) {
1898                /* extract queue index from vf_cid - relies on the following:
1899                 * 1. vfid on cid reflects the true abs_vfid
1900                 * 2. The max number of VFs (per path) is 64
1901                 */
1902                int q_index = vf_cid & ((1 << BNX2X_VF_CID_WND)-1);
1903                *q_obj = &bnx2x_vfq(vf, q_index, sp_obj);
1904        } else {
1905                BNX2X_ERR("No vf matching cid %d\n", vf_cid);
1906        }
1907}
1908
1909void bnx2x_iov_adjust_stats_req(struct bnx2x *bp)
1910{
1911        int i;
1912        int first_queue_query_index, num_queues_req;
1913        dma_addr_t cur_data_offset;
1914        struct stats_query_entry *cur_query_entry;
1915        u8 stats_count = 0;
1916        bool is_fcoe = false;
1917
1918        if (!IS_SRIOV(bp))
1919                return;
1920
1921        if (!NO_FCOE(bp))
1922                is_fcoe = true;
1923
1924        /* fcoe adds one global request and one queue request */
1925        num_queues_req = BNX2X_NUM_ETH_QUEUES(bp) + is_fcoe;
1926        first_queue_query_index = BNX2X_FIRST_QUEUE_QUERY_IDX -
1927                (is_fcoe ? 0 : 1);
1928
1929        DP_AND((BNX2X_MSG_IOV | BNX2X_MSG_STATS),
1930               "BNX2X_NUM_ETH_QUEUES %d, is_fcoe %d, first_queue_query_index %d => determined the last non virtual statistics query index is %d. Will add queries on top of that\n",
1931               BNX2X_NUM_ETH_QUEUES(bp), is_fcoe, first_queue_query_index,
1932               first_queue_query_index + num_queues_req);
1933
1934        cur_data_offset = bp->fw_stats_data_mapping +
1935                offsetof(struct bnx2x_fw_stats_data, queue_stats) +
1936                num_queues_req * sizeof(struct per_queue_stats);
1937
1938        cur_query_entry = &bp->fw_stats_req->
1939                query[first_queue_query_index + num_queues_req];
1940
1941        for_each_vf(bp, i) {
1942                int j;
1943                struct bnx2x_virtf *vf = BP_VF(bp, i);
1944
1945                if (vf->state != VF_ENABLED) {
1946                        DP_AND((BNX2X_MSG_IOV | BNX2X_MSG_STATS),
1947                               "vf %d not enabled so no stats for it\n",
1948                               vf->abs_vfid);
1949                        continue;
1950                }
1951
1952                DP(BNX2X_MSG_IOV, "add addresses for vf %d\n", vf->abs_vfid);
1953                for_each_vfq(vf, j) {
1954                        struct bnx2x_vf_queue *rxq = vfq_get(vf, j);
1955
1956                        dma_addr_t q_stats_addr =
1957                                vf->fw_stat_map + j * vf->stats_stride;
1958
1959                        /* collect stats fro active queues only */
1960                        if (bnx2x_get_q_logical_state(bp, &rxq->sp_obj) ==
1961                            BNX2X_Q_LOGICAL_STATE_STOPPED)
1962                                continue;
1963
1964                        /* create stats query entry for this queue */
1965                        cur_query_entry->kind = STATS_TYPE_QUEUE;
1966                        cur_query_entry->index = vfq_stat_id(vf, rxq);
1967                        cur_query_entry->funcID =
1968                                cpu_to_le16(FW_VF_HANDLE(vf->abs_vfid));
1969                        cur_query_entry->address.hi =
1970                                cpu_to_le32(U64_HI(q_stats_addr));
1971                        cur_query_entry->address.lo =
1972                                cpu_to_le32(U64_LO(q_stats_addr));
1973                        DP(BNX2X_MSG_IOV,
1974                           "added address %x %x for vf %d queue %d client %d\n",
1975                           cur_query_entry->address.hi,
1976                           cur_query_entry->address.lo, cur_query_entry->funcID,
1977                           j, cur_query_entry->index);
1978                        cur_query_entry++;
1979                        cur_data_offset += sizeof(struct per_queue_stats);
1980                        stats_count++;
1981
1982                        /* all stats are coalesced to the leading queue */
1983                        if (vf->cfg_flags & VF_CFG_STATS_COALESCE)
1984                                break;
1985                }
1986        }
1987        bp->fw_stats_req->hdr.cmd_num = bp->fw_stats_num + stats_count;
1988}
1989
1990/* VF API helpers */
1991static void bnx2x_vf_qtbl_set_q(struct bnx2x *bp, u8 abs_vfid, u8 qid,
1992                                u8 enable)
1993{
1994        u32 reg = PXP_REG_HST_ZONE_PERMISSION_TABLE + qid * 4;
1995        u32 val = enable ? (abs_vfid | (1 << 6)) : 0;
1996
1997        REG_WR(bp, reg, val);
1998}
1999
2000static void bnx2x_vf_clr_qtbl(struct bnx2x *bp, struct bnx2x_virtf *vf)
2001{
2002        int i;
2003
2004        for_each_vfq(vf, i)
2005                bnx2x_vf_qtbl_set_q(bp, vf->abs_vfid,
2006                                    vfq_qzone_id(vf, vfq_get(vf, i)), false);
2007}
2008
2009static void bnx2x_vf_igu_disable(struct bnx2x *bp, struct bnx2x_virtf *vf)
2010{
2011        u32 val;
2012
2013        /* clear the VF configuration - pretend */
2014        bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf->abs_vfid));
2015        val = REG_RD(bp, IGU_REG_VF_CONFIGURATION);
2016        val &= ~(IGU_VF_CONF_MSI_MSIX_EN | IGU_VF_CONF_SINGLE_ISR_EN |
2017                 IGU_VF_CONF_FUNC_EN | IGU_VF_CONF_PARENT_MASK);
2018        REG_WR(bp, IGU_REG_VF_CONFIGURATION, val);
2019        bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
2020}
2021
2022u8 bnx2x_vf_max_queue_cnt(struct bnx2x *bp, struct bnx2x_virtf *vf)
2023{
2024        return min_t(u8, min_t(u8, vf_sb_count(vf), BNX2X_CIDS_PER_VF),
2025                     BNX2X_VF_MAX_QUEUES);
2026}
2027
2028static
2029int bnx2x_vf_chk_avail_resc(struct bnx2x *bp, struct bnx2x_virtf *vf,
2030                            struct vf_pf_resc_request *req_resc)
2031{
2032        u8 rxq_cnt = vf_rxq_count(vf) ? : bnx2x_vf_max_queue_cnt(bp, vf);
2033        u8 txq_cnt = vf_txq_count(vf) ? : bnx2x_vf_max_queue_cnt(bp, vf);
2034
2035        /* Save a vlan filter for the Hypervisor */
2036        return ((req_resc->num_rxqs <= rxq_cnt) &&
2037                (req_resc->num_txqs <= txq_cnt) &&
2038                (req_resc->num_sbs <= vf_sb_count(vf))   &&
2039                (req_resc->num_mac_filters <= vf_mac_rules_cnt(vf)) &&
2040                (req_resc->num_vlan_filters <= vf_vlan_rules_visible_cnt(vf)));
2041}
2042
2043/* CORE VF API */
2044int bnx2x_vf_acquire(struct bnx2x *bp, struct bnx2x_virtf *vf,
2045                     struct vf_pf_resc_request *resc)
2046{
2047        int base_vf_cid = (BP_VFDB(bp)->sriov.first_vf_in_pf + vf->index) *
2048                BNX2X_CIDS_PER_VF;
2049
2050        union cdu_context *base_cxt = (union cdu_context *)
2051                BP_VF_CXT_PAGE(bp, base_vf_cid/ILT_PAGE_CIDS)->addr +
2052                (base_vf_cid & (ILT_PAGE_CIDS-1));
2053        int i;
2054
2055        /* if state is 'acquired' the VF was not released or FLR'd, in
2056         * this case the returned resources match the acquired already
2057         * acquired resources. Verify that the requested numbers do
2058         * not exceed the already acquired numbers.
2059         */
2060        if (vf->state == VF_ACQUIRED) {
2061                DP(BNX2X_MSG_IOV, "VF[%d] Trying to re-acquire resources (VF was not released or FLR'd)\n",
2062                   vf->abs_vfid);
2063
2064                if (!bnx2x_vf_chk_avail_resc(bp, vf, resc)) {
2065                        BNX2X_ERR("VF[%d] When re-acquiring resources, requested numbers must be <= then previously acquired numbers\n",
2066                                  vf->abs_vfid);
2067                        return -EINVAL;
2068                }
2069                return 0;
2070        }
2071
2072        /* Otherwise vf state must be 'free' or 'reset' */
2073        if (vf->state != VF_FREE && vf->state != VF_RESET) {
2074                BNX2X_ERR("VF[%d] Can not acquire a VF with state %d\n",
2075                          vf->abs_vfid, vf->state);
2076                return -EINVAL;
2077        }
2078
2079        /* static allocation:
2080         * the global maximum number are fixed per VF. Fail the request if
2081         * requested number exceed these globals
2082         */
2083        if (!bnx2x_vf_chk_avail_resc(bp, vf, resc)) {
2084                DP(BNX2X_MSG_IOV,
2085                   "cannot fulfill vf resource request. Placing maximal available values in response\n");
2086                /* set the max resource in the vf */
2087                return -ENOMEM;
2088        }
2089
2090        /* Set resources counters - 0 request means max available */
2091        vf_sb_count(vf) = resc->num_sbs;
2092        vf_rxq_count(vf) = resc->num_rxqs ? : bnx2x_vf_max_queue_cnt(bp, vf);
2093        vf_txq_count(vf) = resc->num_txqs ? : bnx2x_vf_max_queue_cnt(bp, vf);
2094        if (resc->num_mac_filters)
2095                vf_mac_rules_cnt(vf) = resc->num_mac_filters;
2096        /* Add an additional vlan filter credit for the hypervisor */
2097        bnx2x_iov_re_set_vlan_filters(bp, vf, resc->num_vlan_filters + 1);
2098
2099        DP(BNX2X_MSG_IOV,
2100           "Fulfilling vf request: sb count %d, tx_count %d, rx_count %d, mac_rules_count %d, vlan_rules_count %d\n",
2101           vf_sb_count(vf), vf_rxq_count(vf),
2102           vf_txq_count(vf), vf_mac_rules_cnt(vf),
2103           vf_vlan_rules_visible_cnt(vf));
2104
2105        /* Initialize the queues */
2106        if (!vf->vfqs) {
2107                DP(BNX2X_MSG_IOV, "vf->vfqs was not allocated\n");
2108                return -EINVAL;
2109        }
2110
2111        for_each_vfq(vf, i) {
2112                struct bnx2x_vf_queue *q = vfq_get(vf, i);
2113
2114                if (!q) {
2115                        BNX2X_ERR("q number %d was not allocated\n", i);
2116                        return -EINVAL;
2117                }
2118
2119                q->index = i;
2120                q->cxt = &((base_cxt + i)->eth);
2121                q->cid = BNX2X_FIRST_VF_CID + base_vf_cid + i;
2122
2123                DP(BNX2X_MSG_IOV, "VFQ[%d:%d]: index %d, cid 0x%x, cxt %p\n",
2124                   vf->abs_vfid, i, q->index, q->cid, q->cxt);
2125
2126                /* init SP objects */
2127                bnx2x_vfq_init(bp, vf, q);
2128        }
2129        vf->state = VF_ACQUIRED;
2130        return 0;
2131}
2132
2133int bnx2x_vf_init(struct bnx2x *bp, struct bnx2x_virtf *vf, dma_addr_t *sb_map)
2134{
2135        struct bnx2x_func_init_params func_init = {0};
2136        u16 flags = 0;
2137        int i;
2138
2139        /* the sb resources are initialized at this point, do the
2140         * FW/HW initializations
2141         */
2142        for_each_vf_sb(vf, i)
2143                bnx2x_init_sb(bp, (dma_addr_t)sb_map[i], vf->abs_vfid, true,
2144                              vf_igu_sb(vf, i), vf_igu_sb(vf, i));
2145
2146        /* Sanity checks */
2147        if (vf->state != VF_ACQUIRED) {
2148                DP(BNX2X_MSG_IOV, "VF[%d] is not in VF_ACQUIRED, but %d\n",
2149                   vf->abs_vfid, vf->state);
2150                return -EINVAL;
2151        }
2152
2153        /* let FLR complete ... */
2154        msleep(100);
2155
2156        /* FLR cleanup epilogue */
2157        if (bnx2x_vf_flr_clnup_epilog(bp, vf->abs_vfid))
2158                return -EBUSY;
2159
2160        /* reset IGU VF statistics: MSIX */
2161        REG_WR(bp, IGU_REG_STATISTIC_NUM_MESSAGE_SENT + vf->abs_vfid * 4 , 0);
2162
2163        /* vf init */
2164        if (vf->cfg_flags & VF_CFG_STATS)
2165                flags |= (FUNC_FLG_STATS | FUNC_FLG_SPQ);
2166
2167        if (vf->cfg_flags & VF_CFG_TPA)
2168                flags |= FUNC_FLG_TPA;
2169
2170        if (is_vf_multi(vf))
2171                flags |= FUNC_FLG_RSS;
2172
2173        /* function setup */
2174        func_init.func_flgs = flags;
2175        func_init.pf_id = BP_FUNC(bp);
2176        func_init.func_id = FW_VF_HANDLE(vf->abs_vfid);
2177        func_init.fw_stat_map = vf->fw_stat_map;
2178        func_init.spq_map = vf->spq_map;
2179        func_init.spq_prod = 0;
2180        bnx2x_func_init(bp, &func_init);
2181
2182        /* Enable the vf */
2183        bnx2x_vf_enable_access(bp, vf->abs_vfid);
2184        bnx2x_vf_enable_traffic(bp, vf);
2185
2186        /* queue protection table */
2187        for_each_vfq(vf, i)
2188                bnx2x_vf_qtbl_set_q(bp, vf->abs_vfid,
2189                                    vfq_qzone_id(vf, vfq_get(vf, i)), true);
2190
2191        vf->state = VF_ENABLED;
2192
2193        /* update vf bulletin board */
2194        bnx2x_post_vf_bulletin(bp, vf->index);
2195
2196        return 0;
2197}
2198
2199struct set_vf_state_cookie {
2200        struct bnx2x_virtf *vf;
2201        u8 state;
2202};
2203
2204static void bnx2x_set_vf_state(void *cookie)
2205{
2206        struct set_vf_state_cookie *p = (struct set_vf_state_cookie *)cookie;
2207
2208        p->vf->state = p->state;
2209}
2210
2211int bnx2x_vf_close(struct bnx2x *bp, struct bnx2x_virtf *vf)
2212{
2213        int rc = 0, i;
2214
2215        DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid);
2216
2217        /* Close all queues */
2218        for (i = 0; i < vf_rxq_count(vf); i++) {
2219                rc = bnx2x_vf_queue_teardown(bp, vf, i);
2220                if (rc)
2221                        goto op_err;
2222        }
2223
2224        /* disable the interrupts */
2225        DP(BNX2X_MSG_IOV, "disabling igu\n");
2226        bnx2x_vf_igu_disable(bp, vf);
2227
2228        /* disable the VF */
2229        DP(BNX2X_MSG_IOV, "clearing qtbl\n");
2230        bnx2x_vf_clr_qtbl(bp, vf);
2231
2232        /* need to make sure there are no outstanding stats ramrods which may
2233         * cause the device to access the VF's stats buffer which it will free
2234         * as soon as we return from the close flow.
2235         */
2236        {
2237                struct set_vf_state_cookie cookie;
2238
2239                cookie.vf = vf;
2240                cookie.state = VF_ACQUIRED;
2241                rc = bnx2x_stats_safe_exec(bp, bnx2x_set_vf_state, &cookie);
2242                if (rc)
2243                        goto op_err;
2244        }
2245
2246        DP(BNX2X_MSG_IOV, "set state to acquired\n");
2247
2248        return 0;
2249op_err:
2250        BNX2X_ERR("vf[%d] CLOSE error: rc %d\n", vf->abs_vfid, rc);
2251        return rc;
2252}
2253
2254/* VF release can be called either: 1. The VF was acquired but
2255 * not enabled 2. the vf was enabled or in the process of being
2256 * enabled
2257 */
2258int bnx2x_vf_free(struct bnx2x *bp, struct bnx2x_virtf *vf)
2259{
2260        int rc;
2261
2262        DP(BNX2X_MSG_IOV, "VF[%d] STATE: %s\n", vf->abs_vfid,
2263           vf->state == VF_FREE ? "Free" :
2264           vf->state == VF_ACQUIRED ? "Acquired" :
2265           vf->state == VF_ENABLED ? "Enabled" :
2266           vf->state == VF_RESET ? "Reset" :
2267           "Unknown");
2268
2269        switch (vf->state) {
2270        case VF_ENABLED:
2271                rc = bnx2x_vf_close(bp, vf);
2272                if (rc)
2273                        goto op_err;
2274                /* Fallthrough to release resources */
2275        case VF_ACQUIRED:
2276                DP(BNX2X_MSG_IOV, "about to free resources\n");
2277                bnx2x_vf_free_resc(bp, vf);
2278                break;
2279
2280        case VF_FREE:
2281        case VF_RESET:
2282        default:
2283                break;
2284        }
2285        return 0;
2286op_err:
2287        BNX2X_ERR("VF[%d] RELEASE error: rc %d\n", vf->abs_vfid, rc);
2288        return rc;
2289}
2290
2291int bnx2x_vf_rss_update(struct bnx2x *bp, struct bnx2x_virtf *vf,
2292                        struct bnx2x_config_rss_params *rss)
2293{
2294        DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid);
2295        set_bit(RAMROD_COMP_WAIT, &rss->ramrod_flags);
2296        return bnx2x_config_rss(bp, rss);
2297}
2298
2299int bnx2x_vf_tpa_update(struct bnx2x *bp, struct bnx2x_virtf *vf,
2300                        struct vfpf_tpa_tlv *tlv,
2301                        struct bnx2x_queue_update_tpa_params *params)
2302{
2303        aligned_u64 *sge_addr = tlv->tpa_client_info.sge_addr;
2304        struct bnx2x_queue_state_params qstate;
2305        int qid, rc = 0;
2306
2307        DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid);
2308
2309        /* Set ramrod params */
2310        memset(&qstate, 0, sizeof(struct bnx2x_queue_state_params));
2311        memcpy(&qstate.params.update_tpa, params,
2312               sizeof(struct bnx2x_queue_update_tpa_params));
2313        qstate.cmd = BNX2X_Q_CMD_UPDATE_TPA;
2314        set_bit(RAMROD_COMP_WAIT, &qstate.ramrod_flags);
2315
2316        for (qid = 0; qid < vf_rxq_count(vf); qid++) {
2317                qstate.q_obj = &bnx2x_vfq(vf, qid, sp_obj);
2318                qstate.params.update_tpa.sge_map = sge_addr[qid];
2319                DP(BNX2X_MSG_IOV, "sge_addr[%d:%d] %08x:%08x\n",
2320                   vf->abs_vfid, qid, U64_HI(sge_addr[qid]),
2321                   U64_LO(sge_addr[qid]));
2322                rc = bnx2x_queue_state_change(bp, &qstate);
2323                if (rc) {
2324                        BNX2X_ERR("Failed to configure sge_addr %08x:%08x for [%d:%d]\n",
2325                                  U64_HI(sge_addr[qid]), U64_LO(sge_addr[qid]),
2326                                  vf->abs_vfid, qid);
2327                        return rc;
2328                }
2329        }
2330
2331        return rc;
2332}
2333
2334/* VF release ~ VF close + VF release-resources
2335 * Release is the ultimate SW shutdown and is called whenever an
2336 * irrecoverable error is encountered.
2337 */
2338int bnx2x_vf_release(struct bnx2x *bp, struct bnx2x_virtf *vf)
2339{
2340        int rc;
2341
2342        DP(BNX2X_MSG_IOV, "PF releasing vf %d\n", vf->abs_vfid);
2343        bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_RELEASE_VF);
2344
2345        rc = bnx2x_vf_free(bp, vf);
2346        if (rc)
2347                WARN(rc,
2348                     "VF[%d] Failed to allocate resources for release op- rc=%d\n",
2349                     vf->abs_vfid, rc);
2350        bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_RELEASE_VF);
2351        return rc;
2352}
2353
2354void bnx2x_lock_vf_pf_channel(struct bnx2x *bp, struct bnx2x_virtf *vf,
2355                              enum channel_tlvs tlv)
2356{
2357        /* we don't lock the channel for unsupported tlvs */
2358        if (!bnx2x_tlv_supported(tlv)) {
2359                BNX2X_ERR("attempting to lock with unsupported tlv. Aborting\n");
2360                return;
2361        }
2362
2363        /* lock the channel */
2364        mutex_lock(&vf->op_mutex);
2365
2366        /* record the locking op */
2367        vf->op_current = tlv;
2368
2369        /* log the lock */
2370        DP(BNX2X_MSG_IOV, "VF[%d]: vf pf channel locked by %d\n",
2371           vf->abs_vfid, tlv);
2372}
2373
2374void bnx2x_unlock_vf_pf_channel(struct bnx2x *bp, struct bnx2x_virtf *vf,
2375                                enum channel_tlvs expected_tlv)
2376{
2377        enum channel_tlvs current_tlv;
2378
2379        if (!vf) {
2380                BNX2X_ERR("VF was %p\n", vf);
2381                return;
2382        }
2383
2384        current_tlv = vf->op_current;
2385
2386        /* we don't unlock the channel for unsupported tlvs */
2387        if (!bnx2x_tlv_supported(expected_tlv))
2388                return;
2389
2390        WARN(expected_tlv != vf->op_current,
2391             "lock mismatch: expected %d found %d", expected_tlv,
2392             vf->op_current);
2393
2394        /* record the locking op */
2395        vf->op_current = CHANNEL_TLV_NONE;
2396
2397        /* lock the channel */
2398        mutex_unlock(&vf->op_mutex);
2399
2400        /* log the unlock */
2401        DP(BNX2X_MSG_IOV, "VF[%d]: vf pf channel unlocked by %d\n",
2402           vf->abs_vfid, current_tlv);
2403}
2404
2405static int bnx2x_set_pf_tx_switching(struct bnx2x *bp, bool enable)
2406{
2407        struct bnx2x_queue_state_params q_params;
2408        u32 prev_flags;
2409        int i, rc;
2410
2411        /* Verify changes are needed and record current Tx switching state */
2412        prev_flags = bp->flags;
2413        if (enable)
2414                bp->flags |= TX_SWITCHING;
2415        else
2416                bp->flags &= ~TX_SWITCHING;
2417        if (prev_flags == bp->flags)
2418                return 0;
2419
2420        /* Verify state enables the sending of queue ramrods */
2421        if ((bp->state != BNX2X_STATE_OPEN) ||
2422            (bnx2x_get_q_logical_state(bp,
2423                                      &bnx2x_sp_obj(bp, &bp->fp[0]).q_obj) !=
2424             BNX2X_Q_LOGICAL_STATE_ACTIVE))
2425                return 0;
2426
2427        /* send q. update ramrod to configure Tx switching */
2428        memset(&q_params, 0, sizeof(q_params));
2429        __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
2430        q_params.cmd = BNX2X_Q_CMD_UPDATE;
2431        __set_bit(BNX2X_Q_UPDATE_TX_SWITCHING_CHNG,
2432                  &q_params.params.update.update_flags);
2433        if (enable)
2434                __set_bit(BNX2X_Q_UPDATE_TX_SWITCHING,
2435                          &q_params.params.update.update_flags);
2436        else
2437                __clear_bit(BNX2X_Q_UPDATE_TX_SWITCHING,
2438                            &q_params.params.update.update_flags);
2439
2440        /* send the ramrod on all the queues of the PF */
2441        for_each_eth_queue(bp, i) {
2442                struct bnx2x_fastpath *fp = &bp->fp[i];
2443
2444                /* Set the appropriate Queue object */
2445                q_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj;
2446
2447                /* Update the Queue state */
2448                rc = bnx2x_queue_state_change(bp, &q_params);
2449                if (rc) {
2450                        BNX2X_ERR("Failed to configure Tx switching\n");
2451                        return rc;
2452                }
2453        }
2454
2455        DP(BNX2X_MSG_IOV, "%s Tx Switching\n", enable ? "Enabled" : "Disabled");
2456        return 0;
2457}
2458
2459int bnx2x_sriov_configure(struct pci_dev *dev, int num_vfs_param)
2460{
2461        struct bnx2x *bp = netdev_priv(pci_get_drvdata(dev));
2462
2463        if (!IS_SRIOV(bp)) {
2464                BNX2X_ERR("failed to configure SR-IOV since vfdb was not allocated. Check dmesg for errors in probe stage\n");
2465                return -EINVAL;
2466        }
2467
2468        DP(BNX2X_MSG_IOV, "bnx2x_sriov_configure called with %d, BNX2X_NR_VIRTFN(bp) was %d\n",
2469           num_vfs_param, BNX2X_NR_VIRTFN(bp));
2470
2471        /* HW channel is only operational when PF is up */
2472        if (bp->state != BNX2X_STATE_OPEN) {
2473                BNX2X_ERR("VF num configuration via sysfs not supported while PF is down\n");
2474                return -EINVAL;
2475        }
2476
2477        /* we are always bound by the total_vfs in the configuration space */
2478        if (num_vfs_param > BNX2X_NR_VIRTFN(bp)) {
2479                BNX2X_ERR("truncating requested number of VFs (%d) down to maximum allowed (%d)\n",
2480                          num_vfs_param, BNX2X_NR_VIRTFN(bp));
2481                num_vfs_param = BNX2X_NR_VIRTFN(bp);
2482        }
2483
2484        bp->requested_nr_virtfn = num_vfs_param;
2485        if (num_vfs_param == 0) {
2486                bnx2x_set_pf_tx_switching(bp, false);
2487                bnx2x_disable_sriov(bp);
2488                return 0;
2489        } else {
2490                return bnx2x_enable_sriov(bp);
2491        }
2492}
2493
2494#define IGU_ENTRY_SIZE 4
2495
2496int bnx2x_enable_sriov(struct bnx2x *bp)
2497{
2498        int rc = 0, req_vfs = bp->requested_nr_virtfn;
2499        int vf_idx, sb_idx, vfq_idx, qcount, first_vf;
2500        u32 igu_entry, address;
2501        u16 num_vf_queues;
2502
2503        if (req_vfs == 0)
2504                return 0;
2505
2506        first_vf = bp->vfdb->sriov.first_vf_in_pf;
2507
2508        /* statically distribute vf sb pool between VFs */
2509        num_vf_queues = min_t(u16, BNX2X_VF_MAX_QUEUES,
2510                              BP_VFDB(bp)->vf_sbs_pool / req_vfs);
2511
2512        /* zero previous values learned from igu cam */
2513        for (vf_idx = 0; vf_idx < req_vfs; vf_idx++) {
2514                struct bnx2x_virtf *vf = BP_VF(bp, vf_idx);
2515
2516                vf->sb_count = 0;
2517                vf_sb_count(BP_VF(bp, vf_idx)) = 0;
2518        }
2519        bp->vfdb->vf_sbs_pool = 0;
2520
2521        /* prepare IGU cam */
2522        sb_idx = BP_VFDB(bp)->first_vf_igu_entry;
2523        address = IGU_REG_MAPPING_MEMORY + sb_idx * IGU_ENTRY_SIZE;
2524        for (vf_idx = first_vf; vf_idx < first_vf + req_vfs; vf_idx++) {
2525                for (vfq_idx = 0; vfq_idx < num_vf_queues; vfq_idx++) {
2526                        igu_entry = vf_idx << IGU_REG_MAPPING_MEMORY_FID_SHIFT |
2527                                vfq_idx << IGU_REG_MAPPING_MEMORY_VECTOR_SHIFT |
2528                                IGU_REG_MAPPING_MEMORY_VALID;
2529                        DP(BNX2X_MSG_IOV, "assigning sb %d to vf %d\n",
2530                           sb_idx, vf_idx);
2531                        REG_WR(bp, address, igu_entry);
2532                        sb_idx++;
2533                        address += IGU_ENTRY_SIZE;
2534                }
2535        }
2536
2537        /* Reinitialize vf database according to igu cam */
2538        bnx2x_get_vf_igu_cam_info(bp);
2539
2540        DP(BNX2X_MSG_IOV, "vf_sbs_pool %d, num_vf_queues %d\n",
2541           BP_VFDB(bp)->vf_sbs_pool, num_vf_queues);
2542
2543        qcount = 0;
2544        for_each_vf(bp, vf_idx) {
2545                struct bnx2x_virtf *vf = BP_VF(bp, vf_idx);
2546
2547                /* set local queue arrays */
2548                vf->vfqs = &bp->vfdb->vfqs[qcount];
2549                qcount += vf_sb_count(vf);
2550                bnx2x_iov_static_resc(bp, vf);
2551        }
2552
2553        /* prepare msix vectors in VF configuration space - the value in the
2554         * PCI configuration space should be the index of the last entry,
2555         * namely one less than the actual size of the table
2556         */
2557        for (vf_idx = first_vf; vf_idx < first_vf + req_vfs; vf_idx++) {
2558                bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf_idx));
2559                REG_WR(bp, PCICFG_OFFSET + GRC_CONFIG_REG_VF_MSIX_CONTROL,
2560                       num_vf_queues - 1);
2561                DP(BNX2X_MSG_IOV, "set msix vec num in VF %d cfg space to %d\n",
2562                   vf_idx, num_vf_queues - 1);
2563        }
2564        bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
2565
2566        /* enable sriov. This will probe all the VFs, and consequentially cause
2567         * the "acquire" messages to appear on the VF PF channel.
2568         */
2569        DP(BNX2X_MSG_IOV, "about to call enable sriov\n");
2570        bnx2x_disable_sriov(bp);
2571
2572        rc = bnx2x_set_pf_tx_switching(bp, true);
2573        if (rc)
2574                return rc;
2575
2576        rc = pci_enable_sriov(bp->pdev, req_vfs);
2577        if (rc) {
2578                BNX2X_ERR("pci_enable_sriov failed with %d\n", rc);
2579                return rc;
2580        }
2581        DP(BNX2X_MSG_IOV, "sriov enabled (%d vfs)\n", req_vfs);
2582        return req_vfs;
2583}
2584
2585void bnx2x_pf_set_vfs_vlan(struct bnx2x *bp)
2586{
2587        int vfidx;
2588        struct pf_vf_bulletin_content *bulletin;
2589
2590        DP(BNX2X_MSG_IOV, "configuring vlan for VFs from sp-task\n");
2591        for_each_vf(bp, vfidx) {
2592        bulletin = BP_VF_BULLETIN(bp, vfidx);
2593                if (BP_VF(bp, vfidx)->cfg_flags & VF_CFG_VLAN)
2594                        bnx2x_set_vf_vlan(bp->dev, vfidx, bulletin->vlan, 0);
2595        }
2596}
2597
2598void bnx2x_disable_sriov(struct bnx2x *bp)
2599{
2600        if (pci_vfs_assigned(bp->pdev)) {
2601                DP(BNX2X_MSG_IOV,
2602                   "Unloading driver while VFs are assigned - VFs will not be deallocated\n");
2603                return;
2604        }
2605
2606        pci_disable_sriov(bp->pdev);
2607}
2608
2609static int bnx2x_vf_op_prep(struct bnx2x *bp, int vfidx,
2610                            struct bnx2x_virtf **vf,
2611                            struct pf_vf_bulletin_content **bulletin,
2612                            bool test_queue)
2613{
2614        if (bp->state != BNX2X_STATE_OPEN) {
2615                BNX2X_ERR("PF is down - can't utilize iov-related functionality\n");
2616                return -EINVAL;
2617        }
2618
2619        if (!IS_SRIOV(bp)) {
2620                BNX2X_ERR("sriov is disabled - can't utilize iov-related functionality\n");
2621                return -EINVAL;
2622        }
2623
2624        if (vfidx >= BNX2X_NR_VIRTFN(bp)) {
2625                BNX2X_ERR("VF is uninitialized - can't utilize iov-related functionality. vfidx was %d BNX2X_NR_VIRTFN was %d\n",
2626                          vfidx, BNX2X_NR_VIRTFN(bp));
2627                return -EINVAL;
2628        }
2629
2630        /* init members */
2631        *vf = BP_VF(bp, vfidx);
2632        *bulletin = BP_VF_BULLETIN(bp, vfidx);
2633
2634        if (!*vf) {
2635                BNX2X_ERR("Unable to get VF structure for vfidx %d\n", vfidx);
2636                return -EINVAL;
2637        }
2638
2639        if (test_queue && !(*vf)->vfqs) {
2640                BNX2X_ERR("vfqs struct is null. Was this invoked before dynamically enabling SR-IOV? vfidx was %d\n",
2641                          vfidx);
2642                return -EINVAL;
2643        }
2644
2645        if (!*bulletin) {
2646                BNX2X_ERR("Bulletin Board struct is null for vfidx %d\n",
2647                          vfidx);
2648                return -EINVAL;
2649        }
2650
2651        return 0;
2652}
2653
2654int bnx2x_get_vf_config(struct net_device *dev, int vfidx,
2655                        struct ifla_vf_info *ivi)
2656{
2657        struct bnx2x *bp = netdev_priv(dev);
2658        struct bnx2x_virtf *vf = NULL;
2659        struct pf_vf_bulletin_content *bulletin = NULL;
2660        struct bnx2x_vlan_mac_obj *mac_obj;
2661        struct bnx2x_vlan_mac_obj *vlan_obj;
2662        int rc;
2663
2664        /* sanity and init */
2665        rc = bnx2x_vf_op_prep(bp, vfidx, &vf, &bulletin, true);
2666        if (rc)
2667                return rc;
2668
2669        mac_obj = &bnx2x_leading_vfq(vf, mac_obj);
2670        vlan_obj = &bnx2x_leading_vfq(vf, vlan_obj);
2671        if (!mac_obj || !vlan_obj) {
2672                BNX2X_ERR("VF partially initialized\n");
2673                return -EINVAL;
2674        }
2675
2676        ivi->vf = vfidx;
2677        ivi->qos = 0;
2678        ivi->max_tx_rate = 10000; /* always 10G. TBA take from link struct */
2679        ivi->min_tx_rate = 0;
2680        ivi->spoofchk = 1; /*always enabled */
2681        if (vf->state == VF_ENABLED) {
2682                /* mac and vlan are in vlan_mac objects */
2683                if (bnx2x_validate_vf_sp_objs(bp, vf, false)) {
2684                        mac_obj->get_n_elements(bp, mac_obj, 1, (u8 *)&ivi->mac,
2685                                                0, ETH_ALEN);
2686                        vlan_obj->get_n_elements(bp, vlan_obj, 1,
2687                                                 (u8 *)&ivi->vlan, 0,
2688                                                 VLAN_HLEN);
2689                }
2690        } else {
2691                mutex_lock(&bp->vfdb->bulletin_mutex);
2692                /* mac */
2693                if (bulletin->valid_bitmap & (1 << MAC_ADDR_VALID))
2694                        /* mac configured by ndo so its in bulletin board */
2695                        memcpy(&ivi->mac, bulletin->mac, ETH_ALEN);
2696                else
2697                        /* function has not been loaded yet. Show mac as 0s */
2698                        eth_zero_addr(ivi->mac);
2699
2700                /* vlan */
2701                if (bulletin->valid_bitmap & (1 << VLAN_VALID))
2702                        /* vlan configured by ndo so its in bulletin board */
2703                        memcpy(&ivi->vlan, &bulletin->vlan, VLAN_HLEN);
2704                else
2705                        /* function has not been loaded yet. Show vlans as 0s */
2706                        memset(&ivi->vlan, 0, VLAN_HLEN);
2707
2708                mutex_unlock(&bp->vfdb->bulletin_mutex);
2709        }
2710
2711        return 0;
2712}
2713
2714/* New mac for VF. Consider these cases:
2715 * 1. VF hasn't been acquired yet - save the mac in local bulletin board and
2716 *    supply at acquire.
2717 * 2. VF has already been acquired but has not yet initialized - store in local
2718 *    bulletin board. mac will be posted on VF bulletin board after VF init. VF
2719 *    will configure this mac when it is ready.
2720 * 3. VF has already initialized but has not yet setup a queue - post the new
2721 *    mac on VF's bulletin board right now. VF will configure this mac when it
2722 *    is ready.
2723 * 4. VF has already set a queue - delete any macs already configured for this
2724 *    queue and manually config the new mac.
2725 * In any event, once this function has been called refuse any attempts by the
2726 * VF to configure any mac for itself except for this mac. In case of a race
2727 * where the VF fails to see the new post on its bulletin board before sending a
2728 * mac configuration request, the PF will simply fail the request and VF can try
2729 * again after consulting its bulletin board.
2730 */
2731int bnx2x_set_vf_mac(struct net_device *dev, int vfidx, u8 *mac)
2732{
2733        struct bnx2x *bp = netdev_priv(dev);
2734        int rc, q_logical_state;
2735        struct bnx2x_virtf *vf = NULL;
2736        struct pf_vf_bulletin_content *bulletin = NULL;
2737
2738        if (!is_valid_ether_addr(mac)) {
2739                BNX2X_ERR("mac address invalid\n");
2740                return -EINVAL;
2741        }
2742
2743        /* sanity and init */
2744        rc = bnx2x_vf_op_prep(bp, vfidx, &vf, &bulletin, true);
2745        if (rc)
2746                return rc;
2747
2748        mutex_lock(&bp->vfdb->bulletin_mutex);
2749
2750        /* update PF's copy of the VF's bulletin. Will no longer accept mac
2751         * configuration requests from vf unless match this mac
2752         */
2753        bulletin->valid_bitmap |= 1 << MAC_ADDR_VALID;
2754        memcpy(bulletin->mac, mac, ETH_ALEN);
2755
2756        /* Post update on VF's bulletin board */
2757        rc = bnx2x_post_vf_bulletin(bp, vfidx);
2758
2759        /* release lock before checking return code */
2760        mutex_unlock(&bp->vfdb->bulletin_mutex);
2761
2762        if (rc) {
2763                BNX2X_ERR("failed to update VF[%d] bulletin\n", vfidx);
2764                return rc;
2765        }
2766
2767        q_logical_state =
2768                bnx2x_get_q_logical_state(bp, &bnx2x_leading_vfq(vf, sp_obj));
2769        if (vf->state == VF_ENABLED &&
2770            q_logical_state == BNX2X_Q_LOGICAL_STATE_ACTIVE) {
2771                /* configure the mac in device on this vf's queue */
2772                unsigned long ramrod_flags = 0;
2773                struct bnx2x_vlan_mac_obj *mac_obj;
2774
2775                /* User should be able to see failure reason in system logs */
2776                if (!bnx2x_validate_vf_sp_objs(bp, vf, true))
2777                        return -EINVAL;
2778
2779                /* must lock vfpf channel to protect against vf flows */
2780                bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_MAC);
2781
2782                /* remove existing eth macs */
2783                mac_obj = &bnx2x_leading_vfq(vf, mac_obj);
2784                rc = bnx2x_del_all_macs(bp, mac_obj, BNX2X_ETH_MAC, true);
2785                if (rc) {
2786                        BNX2X_ERR("failed to delete eth macs\n");
2787                        rc = -EINVAL;
2788                        goto out;
2789                }
2790
2791                /* remove existing uc list macs */
2792                rc = bnx2x_del_all_macs(bp, mac_obj, BNX2X_UC_LIST_MAC, true);
2793                if (rc) {
2794                        BNX2X_ERR("failed to delete uc_list macs\n");
2795                        rc = -EINVAL;
2796                        goto out;
2797                }
2798
2799                /* configure the new mac to device */
2800                __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
2801                bnx2x_set_mac_one(bp, (u8 *)&bulletin->mac, mac_obj, true,
2802                                  BNX2X_ETH_MAC, &ramrod_flags);
2803
2804out:
2805                bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_MAC);
2806        }
2807
2808        return rc;
2809}
2810
2811int bnx2x_set_vf_vlan(struct net_device *dev, int vfidx, u16 vlan, u8 qos)
2812{
2813        struct bnx2x_queue_state_params q_params = {NULL};
2814        struct bnx2x_vlan_mac_ramrod_params ramrod_param;
2815        struct bnx2x_queue_update_params *update_params;
2816        struct pf_vf_bulletin_content *bulletin = NULL;
2817        struct bnx2x_rx_mode_ramrod_params rx_ramrod;
2818        struct bnx2x *bp = netdev_priv(dev);
2819        struct bnx2x_vlan_mac_obj *vlan_obj;
2820        unsigned long vlan_mac_flags = 0;
2821        unsigned long ramrod_flags = 0;
2822        struct bnx2x_virtf *vf = NULL;
2823        unsigned long accept_flags;
2824        int rc;
2825
2826        if (vlan > 4095) {
2827                BNX2X_ERR("illegal vlan value %d\n", vlan);
2828                return -EINVAL;
2829        }
2830
2831        DP(BNX2X_MSG_IOV, "configuring VF %d with VLAN %d qos %d\n",
2832           vfidx, vlan, 0);
2833
2834        /* sanity and init */
2835        rc = bnx2x_vf_op_prep(bp, vfidx, &vf, &bulletin, true);
2836        if (rc)
2837                return rc;
2838
2839        /* update PF's copy of the VF's bulletin. No point in posting the vlan
2840         * to the VF since it doesn't have anything to do with it. But it useful
2841         * to store it here in case the VF is not up yet and we can only
2842         * configure the vlan later when it does. Treat vlan id 0 as remove the
2843         * Host tag.
2844         */
2845        mutex_lock(&bp->vfdb->bulletin_mutex);
2846
2847        if (vlan > 0)
2848                bulletin->valid_bitmap |= 1 << VLAN_VALID;
2849        else
2850                bulletin->valid_bitmap &= ~(1 << VLAN_VALID);
2851        bulletin->vlan = vlan;
2852
2853        mutex_unlock(&bp->vfdb->bulletin_mutex);
2854
2855        /* is vf initialized and queue set up? */
2856        if (vf->state != VF_ENABLED ||
2857            bnx2x_get_q_logical_state(bp, &bnx2x_leading_vfq(vf, sp_obj)) !=
2858            BNX2X_Q_LOGICAL_STATE_ACTIVE)
2859                return rc;
2860
2861        /* User should be able to see error in system logs */
2862        if (!bnx2x_validate_vf_sp_objs(bp, vf, true))
2863                return -EINVAL;
2864
2865        /* must lock vfpf channel to protect against vf flows */
2866        bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_VLAN);
2867
2868        /* remove existing vlans */
2869        __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
2870        vlan_obj = &bnx2x_leading_vfq(vf, vlan_obj);
2871        rc = vlan_obj->delete_all(bp, vlan_obj, &vlan_mac_flags,
2872                                  &ramrod_flags);
2873        if (rc) {
2874                BNX2X_ERR("failed to delete vlans\n");
2875                rc = -EINVAL;
2876                goto out;
2877        }
2878
2879        /* need to remove/add the VF's accept_any_vlan bit */
2880        accept_flags = bnx2x_leading_vfq(vf, accept_flags);
2881        if (vlan)
2882                clear_bit(BNX2X_ACCEPT_ANY_VLAN, &accept_flags);
2883        else
2884                set_bit(BNX2X_ACCEPT_ANY_VLAN, &accept_flags);
2885
2886        bnx2x_vf_prep_rx_mode(bp, LEADING_IDX, &rx_ramrod, vf,
2887                              accept_flags);
2888        bnx2x_leading_vfq(vf, accept_flags) = accept_flags;
2889        bnx2x_config_rx_mode(bp, &rx_ramrod);
2890
2891        /* configure the new vlan to device */
2892        memset(&ramrod_param, 0, sizeof(ramrod_param));
2893        __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
2894        ramrod_param.vlan_mac_obj = vlan_obj;
2895        ramrod_param.ramrod_flags = ramrod_flags;
2896        set_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
2897                &ramrod_param.user_req.vlan_mac_flags);
2898        ramrod_param.user_req.u.vlan.vlan = vlan;
2899        ramrod_param.user_req.cmd = BNX2X_VLAN_MAC_ADD;
2900        rc = bnx2x_config_vlan_mac(bp, &ramrod_param);
2901        if (rc) {
2902                BNX2X_ERR("failed to configure vlan\n");
2903                rc =  -EINVAL;
2904                goto out;
2905        }
2906
2907        /* send queue update ramrod to configure default vlan and silent
2908         * vlan removal
2909         */
2910        __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
2911        q_params.cmd = BNX2X_Q_CMD_UPDATE;
2912        q_params.q_obj = &bnx2x_leading_vfq(vf, sp_obj);
2913        update_params = &q_params.params.update;
2914        __set_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN_CHNG,
2915                  &update_params->update_flags);
2916        __set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM_CHNG,
2917                  &update_params->update_flags);
2918        if (vlan == 0) {
2919                /* if vlan is 0 then we want to leave the VF traffic
2920                 * untagged, and leave the incoming traffic untouched
2921                 * (i.e. do not remove any vlan tags).
2922                 */
2923                __clear_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN,
2924                            &update_params->update_flags);
2925                __clear_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM,
2926                            &update_params->update_flags);
2927        } else {
2928                /* configure default vlan to vf queue and set silent
2929                 * vlan removal (the vf remains unaware of this vlan).
2930                 */
2931                __set_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN,
2932                          &update_params->update_flags);
2933                __set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM,
2934                          &update_params->update_flags);
2935                update_params->def_vlan = vlan;
2936                update_params->silent_removal_value =
2937                        vlan & VLAN_VID_MASK;
2938                update_params->silent_removal_mask = VLAN_VID_MASK;
2939        }
2940
2941        /* Update the Queue state */
2942        rc = bnx2x_queue_state_change(bp, &q_params);
2943        if (rc) {
2944                BNX2X_ERR("Failed to configure default VLAN\n");
2945                goto out;
2946        }
2947
2948
2949        /* clear the flag indicating that this VF needs its vlan
2950         * (will only be set if the HV configured the Vlan before vf was
2951         * up and we were called because the VF came up later
2952         */
2953out:
2954        vf->cfg_flags &= ~VF_CFG_VLAN;
2955        bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_VLAN);
2956
2957        return rc;
2958}
2959
2960/* crc is the first field in the bulletin board. Compute the crc over the
2961 * entire bulletin board excluding the crc field itself. Use the length field
2962 * as the Bulletin Board was posted by a PF with possibly a different version
2963 * from the vf which will sample it. Therefore, the length is computed by the
2964 * PF and then used blindly by the VF.
2965 */
2966u32 bnx2x_crc_vf_bulletin(struct pf_vf_bulletin_content *bulletin)
2967{
2968        return crc32(BULLETIN_CRC_SEED,
2969                 ((u8 *)bulletin) + sizeof(bulletin->crc),
2970                 bulletin->length - sizeof(bulletin->crc));
2971}
2972
2973/* Check for new posts on the bulletin board */
2974enum sample_bulletin_result bnx2x_sample_bulletin(struct bnx2x *bp)
2975{
2976        struct pf_vf_bulletin_content *bulletin;
2977        int attempts;
2978
2979        /* sampling structure in mid post may result with corrupted data
2980         * validate crc to ensure coherency.
2981         */
2982        for (attempts = 0; attempts < BULLETIN_ATTEMPTS; attempts++) {
2983                u32 crc;
2984
2985                /* sample the bulletin board */
2986                memcpy(&bp->shadow_bulletin, bp->pf2vf_bulletin,
2987                       sizeof(union pf_vf_bulletin));
2988
2989                crc = bnx2x_crc_vf_bulletin(&bp->shadow_bulletin.content);
2990
2991                if (bp->shadow_bulletin.content.crc == crc)
2992                        break;
2993
2994                BNX2X_ERR("bad crc on bulletin board. Contained %x computed %x\n",
2995                          bp->shadow_bulletin.content.crc, crc);
2996        }
2997
2998        if (attempts >= BULLETIN_ATTEMPTS) {
2999                BNX2X_ERR("pf to vf bulletin board crc was wrong %d consecutive times. Aborting\n",
3000                          attempts);
3001                return PFVF_BULLETIN_CRC_ERR;
3002        }
3003        bulletin = &bp->shadow_bulletin.content;
3004
3005        /* bulletin board hasn't changed since last sample */
3006        if (bp->old_bulletin.version == bulletin->version)
3007                return PFVF_BULLETIN_UNCHANGED;
3008
3009        /* the mac address in bulletin board is valid and is new */
3010        if (bulletin->valid_bitmap & 1 << MAC_ADDR_VALID &&
3011            !ether_addr_equal(bulletin->mac, bp->old_bulletin.mac)) {
3012                /* update new mac to net device */
3013                memcpy(bp->dev->dev_addr, bulletin->mac, ETH_ALEN);
3014        }
3015
3016        if (bulletin->valid_bitmap & (1 << LINK_VALID)) {
3017                DP(BNX2X_MSG_IOV, "link update speed %d flags %x\n",
3018                   bulletin->link_speed, bulletin->link_flags);
3019
3020                bp->vf_link_vars.line_speed = bulletin->link_speed;
3021                bp->vf_link_vars.link_report_flags = 0;
3022                /* Link is down */
3023                if (bulletin->link_flags & VFPF_LINK_REPORT_LINK_DOWN)
3024                        __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
3025                                  &bp->vf_link_vars.link_report_flags);
3026                /* Full DUPLEX */
3027                if (bulletin->link_flags & VFPF_LINK_REPORT_FULL_DUPLEX)
3028                        __set_bit(BNX2X_LINK_REPORT_FD,
3029                                  &bp->vf_link_vars.link_report_flags);
3030                /* Rx Flow Control is ON */
3031                if (bulletin->link_flags & VFPF_LINK_REPORT_RX_FC_ON)
3032                        __set_bit(BNX2X_LINK_REPORT_RX_FC_ON,
3033                                  &bp->vf_link_vars.link_report_flags);
3034                /* Tx Flow Control is ON */
3035                if (bulletin->link_flags & VFPF_LINK_REPORT_TX_FC_ON)
3036                        __set_bit(BNX2X_LINK_REPORT_TX_FC_ON,
3037                                  &bp->vf_link_vars.link_report_flags);
3038                __bnx2x_link_report(bp);
3039        }
3040
3041        /* copy new bulletin board to bp */
3042        memcpy(&bp->old_bulletin, bulletin,
3043               sizeof(struct pf_vf_bulletin_content));
3044
3045        return PFVF_BULLETIN_UPDATED;
3046}
3047
3048void bnx2x_timer_sriov(struct bnx2x *bp)
3049{
3050        bnx2x_sample_bulletin(bp);
3051
3052        /* if channel is down we need to self destruct */
3053        if (bp->old_bulletin.valid_bitmap & 1 << CHANNEL_DOWN)
3054                bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_VFPF_CHANNEL_DOWN,
3055                                       BNX2X_MSG_IOV);
3056}
3057
3058void __iomem *bnx2x_vf_doorbells(struct bnx2x *bp)
3059{
3060        /* vf doorbells are embedded within the regview */
3061        return bp->regview + PXP_VF_ADDR_DB_START;
3062}
3063
3064void bnx2x_vf_pci_dealloc(struct bnx2x *bp)
3065{
3066        BNX2X_PCI_FREE(bp->vf2pf_mbox, bp->vf2pf_mbox_mapping,
3067                       sizeof(struct bnx2x_vf_mbx_msg));
3068        BNX2X_PCI_FREE(bp->vf2pf_mbox, bp->pf2vf_bulletin_mapping,
3069                       sizeof(union pf_vf_bulletin));
3070}
3071
3072int bnx2x_vf_pci_alloc(struct bnx2x *bp)
3073{
3074        mutex_init(&bp->vf2pf_mutex);
3075
3076        /* allocate vf2pf mailbox for vf to pf channel */
3077        bp->vf2pf_mbox = BNX2X_PCI_ALLOC(&bp->vf2pf_mbox_mapping,
3078                                         sizeof(struct bnx2x_vf_mbx_msg));
3079        if (!bp->vf2pf_mbox)
3080                goto alloc_mem_err;
3081
3082        /* allocate pf 2 vf bulletin board */
3083        bp->pf2vf_bulletin = BNX2X_PCI_ALLOC(&bp->pf2vf_bulletin_mapping,
3084                                             sizeof(union pf_vf_bulletin));
3085        if (!bp->pf2vf_bulletin)
3086                goto alloc_mem_err;
3087
3088        bnx2x_vf_bulletin_finalize(&bp->pf2vf_bulletin->content, true);
3089
3090        return 0;
3091
3092alloc_mem_err:
3093        bnx2x_vf_pci_dealloc(bp);
3094        return -ENOMEM;
3095}
3096
3097void bnx2x_iov_channel_down(struct bnx2x *bp)
3098{
3099        int vf_idx;
3100        struct pf_vf_bulletin_content *bulletin;
3101
3102        if (!IS_SRIOV(bp))
3103                return;
3104
3105        for_each_vf(bp, vf_idx) {
3106                /* locate this VFs bulletin board and update the channel down
3107                 * bit
3108                 */
3109                bulletin = BP_VF_BULLETIN(bp, vf_idx);
3110                bulletin->valid_bitmap |= 1 << CHANNEL_DOWN;
3111
3112                /* update vf bulletin board */
3113                bnx2x_post_vf_bulletin(bp, vf_idx);
3114        }
3115}
3116
3117void bnx2x_iov_task(struct work_struct *work)
3118{
3119        struct bnx2x *bp = container_of(work, struct bnx2x, iov_task.work);
3120
3121        if (!netif_running(bp->dev))
3122                return;
3123
3124        if (test_and_clear_bit(BNX2X_IOV_HANDLE_FLR,
3125                               &bp->iov_task_state))
3126                bnx2x_vf_handle_flr_event(bp);
3127
3128        if (test_and_clear_bit(BNX2X_IOV_HANDLE_VF_MSG,
3129                               &bp->iov_task_state))
3130                bnx2x_vf_mbx(bp);
3131}
3132
3133void bnx2x_schedule_iov_task(struct bnx2x *bp, enum bnx2x_iov_flag flag)
3134{
3135        smp_mb__before_atomic();
3136        set_bit(flag, &bp->iov_task_state);
3137        smp_mb__after_atomic();
3138        DP(BNX2X_MSG_IOV, "Scheduling iov task [Flag: %d]\n", flag);
3139        queue_delayed_work(bnx2x_iov_wq, &bp->iov_task, 0);
3140}
3141