linux/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
<<
>>
Prefs
   1/* bnx2x_sriov.c: QLogic Everest network driver.
   2 *
   3 * Copyright 2009-2013 Broadcom Corporation
   4 * Copyright 2014 QLogic Corporation
   5 * All rights reserved
   6 *
   7 * Unless you and QLogic execute a separate written software license
   8 * agreement governing use of this software, this software is licensed to you
   9 * under the terms of the GNU General Public License version 2, available
  10 * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL").
  11 *
  12 * Notwithstanding the above, under no circumstances may you combine this
  13 * software in any way with any other QLogic software provided under a
  14 * license other than the GPL, without QLogic's express prior written
  15 * consent.
  16 *
  17 * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
  18 * Written by: Shmulik Ravid
  19 *             Ariel Elior <ariel.elior@qlogic.com>
  20 *
  21 */
  22#include "bnx2x.h"
  23#include "bnx2x_init.h"
  24#include "bnx2x_cmn.h"
  25#include "bnx2x_sp.h"
  26#include <linux/crc32.h>
  27#include <linux/if_vlan.h>
  28
  29static int bnx2x_vf_op_prep(struct bnx2x *bp, int vfidx,
  30                            struct bnx2x_virtf **vf,
  31                            struct pf_vf_bulletin_content **bulletin,
  32                            bool test_queue);
  33
  34/* General service functions */
  35static void storm_memset_vf_to_pf(struct bnx2x *bp, u16 abs_fid,
  36                                         u16 pf_id)
  37{
  38        REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_VF_TO_PF_OFFSET(abs_fid),
  39                pf_id);
  40        REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_VF_TO_PF_OFFSET(abs_fid),
  41                pf_id);
  42        REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_VF_TO_PF_OFFSET(abs_fid),
  43                pf_id);
  44        REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_VF_TO_PF_OFFSET(abs_fid),
  45                pf_id);
  46}
  47
  48static void storm_memset_func_en(struct bnx2x *bp, u16 abs_fid,
  49                                        u8 enable)
  50{
  51        REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(abs_fid),
  52                enable);
  53        REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(abs_fid),
  54                enable);
  55        REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(abs_fid),
  56                enable);
  57        REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(abs_fid),
  58                enable);
  59}
  60
  61int bnx2x_vf_idx_by_abs_fid(struct bnx2x *bp, u16 abs_vfid)
  62{
  63        int idx;
  64
  65        for_each_vf(bp, idx)
  66                if (bnx2x_vf(bp, idx, abs_vfid) == abs_vfid)
  67                        break;
  68        return idx;
  69}
  70
  71static
  72struct bnx2x_virtf *bnx2x_vf_by_abs_fid(struct bnx2x *bp, u16 abs_vfid)
  73{
  74        u16 idx =  (u16)bnx2x_vf_idx_by_abs_fid(bp, abs_vfid);
  75        return (idx < BNX2X_NR_VIRTFN(bp)) ? BP_VF(bp, idx) : NULL;
  76}
  77
  78static void bnx2x_vf_igu_ack_sb(struct bnx2x *bp, struct bnx2x_virtf *vf,
  79                                u8 igu_sb_id, u8 segment, u16 index, u8 op,
  80                                u8 update)
  81{
  82        /* acking a VF sb through the PF - use the GRC */
  83        u32 ctl;
  84        u32 igu_addr_data = IGU_REG_COMMAND_REG_32LSB_DATA;
  85        u32 igu_addr_ctl = IGU_REG_COMMAND_REG_CTRL;
  86        u32 func_encode = vf->abs_vfid;
  87        u32 addr_encode = IGU_CMD_E2_PROD_UPD_BASE + igu_sb_id;
  88        struct igu_regular cmd_data = {0};
  89
  90        cmd_data.sb_id_and_flags =
  91                        ((index << IGU_REGULAR_SB_INDEX_SHIFT) |
  92                         (segment << IGU_REGULAR_SEGMENT_ACCESS_SHIFT) |
  93                         (update << IGU_REGULAR_BUPDATE_SHIFT) |
  94                         (op << IGU_REGULAR_ENABLE_INT_SHIFT));
  95
  96        ctl = addr_encode << IGU_CTRL_REG_ADDRESS_SHIFT         |
  97              func_encode << IGU_CTRL_REG_FID_SHIFT             |
  98              IGU_CTRL_CMD_TYPE_WR << IGU_CTRL_REG_TYPE_SHIFT;
  99
 100        DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n",
 101           cmd_data.sb_id_and_flags, igu_addr_data);
 102        REG_WR(bp, igu_addr_data, cmd_data.sb_id_and_flags);
 103        mmiowb();
 104        barrier();
 105
 106        DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n",
 107           ctl, igu_addr_ctl);
 108        REG_WR(bp, igu_addr_ctl, ctl);
 109        mmiowb();
 110        barrier();
 111}
 112
 113static bool bnx2x_validate_vf_sp_objs(struct bnx2x *bp,
 114                                       struct bnx2x_virtf *vf,
 115                                       bool print_err)
 116{
 117        if (!bnx2x_leading_vfq(vf, sp_initialized)) {
 118                if (print_err)
 119                        BNX2X_ERR("Slowpath objects not yet initialized!\n");
 120                else
 121                        DP(BNX2X_MSG_IOV, "Slowpath objects not yet initialized!\n");
 122                return false;
 123        }
 124        return true;
 125}
 126
 127/* VFOP operations states */
 128void bnx2x_vfop_qctor_dump_tx(struct bnx2x *bp, struct bnx2x_virtf *vf,
 129                              struct bnx2x_queue_init_params *init_params,
 130                              struct bnx2x_queue_setup_params *setup_params,
 131                              u16 q_idx, u16 sb_idx)
 132{
 133        DP(BNX2X_MSG_IOV,
 134           "VF[%d] Q_SETUP: txq[%d]-- vfsb=%d, sb-index=%d, hc-rate=%d, flags=0x%lx, traffic-type=%d",
 135           vf->abs_vfid,
 136           q_idx,
 137           sb_idx,
 138           init_params->tx.sb_cq_index,
 139           init_params->tx.hc_rate,
 140           setup_params->flags,
 141           setup_params->txq_params.traffic_type);
 142}
 143
 144void bnx2x_vfop_qctor_dump_rx(struct bnx2x *bp, struct bnx2x_virtf *vf,
 145                            struct bnx2x_queue_init_params *init_params,
 146                            struct bnx2x_queue_setup_params *setup_params,
 147                            u16 q_idx, u16 sb_idx)
 148{
 149        struct bnx2x_rxq_setup_params *rxq_params = &setup_params->rxq_params;
 150
 151        DP(BNX2X_MSG_IOV, "VF[%d] Q_SETUP: rxq[%d]-- vfsb=%d, sb-index=%d, hc-rate=%d, mtu=%d, buf-size=%d\n"
 152           "sge-size=%d, max_sge_pkt=%d, tpa-agg-size=%d, flags=0x%lx, drop-flags=0x%x, cache-log=%d\n",
 153           vf->abs_vfid,
 154           q_idx,
 155           sb_idx,
 156           init_params->rx.sb_cq_index,
 157           init_params->rx.hc_rate,
 158           setup_params->gen_params.mtu,
 159           rxq_params->buf_sz,
 160           rxq_params->sge_buf_sz,
 161           rxq_params->max_sges_pkt,
 162           rxq_params->tpa_agg_sz,
 163           setup_params->flags,
 164           rxq_params->drop_flags,
 165           rxq_params->cache_line_log);
 166}
 167
 168void bnx2x_vfop_qctor_prep(struct bnx2x *bp,
 169                           struct bnx2x_virtf *vf,
 170                           struct bnx2x_vf_queue *q,
 171                           struct bnx2x_vf_queue_construct_params *p,
 172                           unsigned long q_type)
 173{
 174        struct bnx2x_queue_init_params *init_p = &p->qstate.params.init;
 175        struct bnx2x_queue_setup_params *setup_p = &p->prep_qsetup;
 176
 177        /* INIT */
 178
 179        /* Enable host coalescing in the transition to INIT state */
 180        if (test_bit(BNX2X_Q_FLG_HC, &init_p->rx.flags))
 181                __set_bit(BNX2X_Q_FLG_HC_EN, &init_p->rx.flags);
 182
 183        if (test_bit(BNX2X_Q_FLG_HC, &init_p->tx.flags))
 184                __set_bit(BNX2X_Q_FLG_HC_EN, &init_p->tx.flags);
 185
 186        /* FW SB ID */
 187        init_p->rx.fw_sb_id = vf_igu_sb(vf, q->sb_idx);
 188        init_p->tx.fw_sb_id = vf_igu_sb(vf, q->sb_idx);
 189
 190        /* context */
 191        init_p->cxts[0] = q->cxt;
 192
 193        /* SETUP */
 194
 195        /* Setup-op general parameters */
 196        setup_p->gen_params.spcl_id = vf->sp_cl_id;
 197        setup_p->gen_params.stat_id = vfq_stat_id(vf, q);
 198        setup_p->gen_params.fp_hsi = vf->fp_hsi;
 199
 200        /* Setup-op flags:
 201         * collect statistics, zero statistics, local-switching, security,
 202         * OV for Flex10, RSS and MCAST for leading
 203         */
 204        if (test_bit(BNX2X_Q_FLG_STATS, &setup_p->flags))
 205                __set_bit(BNX2X_Q_FLG_ZERO_STATS, &setup_p->flags);
 206
 207        /* for VFs, enable tx switching, bd coherency, and mac address
 208         * anti-spoofing
 209         */
 210        __set_bit(BNX2X_Q_FLG_TX_SWITCH, &setup_p->flags);
 211        __set_bit(BNX2X_Q_FLG_TX_SEC, &setup_p->flags);
 212        __set_bit(BNX2X_Q_FLG_ANTI_SPOOF, &setup_p->flags);
 213
 214        /* Setup-op rx parameters */
 215        if (test_bit(BNX2X_Q_TYPE_HAS_RX, &q_type)) {
 216                struct bnx2x_rxq_setup_params *rxq_p = &setup_p->rxq_params;
 217
 218                rxq_p->cl_qzone_id = vfq_qzone_id(vf, q);
 219                rxq_p->fw_sb_id = vf_igu_sb(vf, q->sb_idx);
 220                rxq_p->rss_engine_id = FW_VF_HANDLE(vf->abs_vfid);
 221
 222                if (test_bit(BNX2X_Q_FLG_TPA, &setup_p->flags))
 223                        rxq_p->max_tpa_queues = BNX2X_VF_MAX_TPA_AGG_QUEUES;
 224        }
 225
 226        /* Setup-op tx parameters */
 227        if (test_bit(BNX2X_Q_TYPE_HAS_TX, &q_type)) {
 228                setup_p->txq_params.tss_leading_cl_id = vf->leading_rss;
 229                setup_p->txq_params.fw_sb_id = vf_igu_sb(vf, q->sb_idx);
 230        }
 231}
 232
 233static int bnx2x_vf_queue_create(struct bnx2x *bp,
 234                                 struct bnx2x_virtf *vf, int qid,
 235                                 struct bnx2x_vf_queue_construct_params *qctor)
 236{
 237        struct bnx2x_queue_state_params *q_params;
 238        int rc = 0;
 239
 240        DP(BNX2X_MSG_IOV, "vf[%d:%d]\n", vf->abs_vfid, qid);
 241
 242        /* Prepare ramrod information */
 243        q_params = &qctor->qstate;
 244        q_params->q_obj = &bnx2x_vfq(vf, qid, sp_obj);
 245        set_bit(RAMROD_COMP_WAIT, &q_params->ramrod_flags);
 246
 247        if (bnx2x_get_q_logical_state(bp, q_params->q_obj) ==
 248            BNX2X_Q_LOGICAL_STATE_ACTIVE) {
 249                DP(BNX2X_MSG_IOV, "queue was already up. Aborting gracefully\n");
 250                goto out;
 251        }
 252
 253        /* Run Queue 'construction' ramrods */
 254        q_params->cmd = BNX2X_Q_CMD_INIT;
 255        rc = bnx2x_queue_state_change(bp, q_params);
 256        if (rc)
 257                goto out;
 258
 259        memcpy(&q_params->params.setup, &qctor->prep_qsetup,
 260               sizeof(struct bnx2x_queue_setup_params));
 261        q_params->cmd = BNX2X_Q_CMD_SETUP;
 262        rc = bnx2x_queue_state_change(bp, q_params);
 263        if (rc)
 264                goto out;
 265
 266        /* enable interrupts */
 267        bnx2x_vf_igu_ack_sb(bp, vf, vf_igu_sb(vf, bnx2x_vfq(vf, qid, sb_idx)),
 268                            USTORM_ID, 0, IGU_INT_ENABLE, 0);
 269out:
 270        return rc;
 271}
 272
 273static int bnx2x_vf_queue_destroy(struct bnx2x *bp, struct bnx2x_virtf *vf,
 274                                  int qid)
 275{
 276        enum bnx2x_queue_cmd cmds[] = {BNX2X_Q_CMD_HALT,
 277                                       BNX2X_Q_CMD_TERMINATE,
 278                                       BNX2X_Q_CMD_CFC_DEL};
 279        struct bnx2x_queue_state_params q_params;
 280        int rc, i;
 281
 282        DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid);
 283
 284        /* Prepare ramrod information */
 285        memset(&q_params, 0, sizeof(struct bnx2x_queue_state_params));
 286        q_params.q_obj = &bnx2x_vfq(vf, qid, sp_obj);
 287        set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
 288
 289        if (bnx2x_get_q_logical_state(bp, q_params.q_obj) ==
 290            BNX2X_Q_LOGICAL_STATE_STOPPED) {
 291                DP(BNX2X_MSG_IOV, "queue was already stopped. Aborting gracefully\n");
 292                goto out;
 293        }
 294
 295        /* Run Queue 'destruction' ramrods */
 296        for (i = 0; i < ARRAY_SIZE(cmds); i++) {
 297                q_params.cmd = cmds[i];
 298                rc = bnx2x_queue_state_change(bp, &q_params);
 299                if (rc) {
 300                        BNX2X_ERR("Failed to run Queue command %d\n", cmds[i]);
 301                        return rc;
 302                }
 303        }
 304out:
 305        /* Clean Context */
 306        if (bnx2x_vfq(vf, qid, cxt)) {
 307                bnx2x_vfq(vf, qid, cxt)->ustorm_ag_context.cdu_usage = 0;
 308                bnx2x_vfq(vf, qid, cxt)->xstorm_ag_context.cdu_reserved = 0;
 309        }
 310
 311        return 0;
 312}
 313
 314static void
 315bnx2x_vf_set_igu_info(struct bnx2x *bp, u8 igu_sb_id, u8 abs_vfid)
 316{
 317        struct bnx2x_virtf *vf = bnx2x_vf_by_abs_fid(bp, abs_vfid);
 318        if (vf) {
 319                /* the first igu entry belonging to VFs of this PF */
 320                if (!BP_VFDB(bp)->first_vf_igu_entry)
 321                        BP_VFDB(bp)->first_vf_igu_entry = igu_sb_id;
 322
 323                /* the first igu entry belonging to this VF */
 324                if (!vf_sb_count(vf))
 325                        vf->igu_base_id = igu_sb_id;
 326
 327                ++vf_sb_count(vf);
 328                ++vf->sb_count;
 329        }
 330        BP_VFDB(bp)->vf_sbs_pool++;
 331}
 332
 333static inline void bnx2x_vf_vlan_credit(struct bnx2x *bp,
 334                                        struct bnx2x_vlan_mac_obj *obj,
 335                                        atomic_t *counter)
 336{
 337        struct list_head *pos;
 338        int read_lock;
 339        int cnt = 0;
 340
 341        read_lock = bnx2x_vlan_mac_h_read_lock(bp, obj);
 342        if (read_lock)
 343                DP(BNX2X_MSG_SP, "Failed to take vlan mac read head; continuing anyway\n");
 344
 345        list_for_each(pos, &obj->head)
 346                cnt++;
 347
 348        if (!read_lock)
 349                bnx2x_vlan_mac_h_read_unlock(bp, obj);
 350
 351        atomic_set(counter, cnt);
 352}
 353
 354static int bnx2x_vf_vlan_mac_clear(struct bnx2x *bp, struct bnx2x_virtf *vf,
 355                                   int qid, bool drv_only, int type)
 356{
 357        struct bnx2x_vlan_mac_ramrod_params ramrod;
 358        int rc;
 359
 360        DP(BNX2X_MSG_IOV, "vf[%d] - deleting all %s\n", vf->abs_vfid,
 361                          (type == BNX2X_VF_FILTER_VLAN_MAC) ? "VLAN-MACs" :
 362                          (type == BNX2X_VF_FILTER_MAC) ? "MACs" : "VLANs");
 363
 364        /* Prepare ramrod params */
 365        memset(&ramrod, 0, sizeof(struct bnx2x_vlan_mac_ramrod_params));
 366        if (type == BNX2X_VF_FILTER_VLAN_MAC) {
 367                set_bit(BNX2X_ETH_MAC, &ramrod.user_req.vlan_mac_flags);
 368                ramrod.vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_mac_obj);
 369        } else if (type == BNX2X_VF_FILTER_MAC) {
 370                set_bit(BNX2X_ETH_MAC, &ramrod.user_req.vlan_mac_flags);
 371                ramrod.vlan_mac_obj = &bnx2x_vfq(vf, qid, mac_obj);
 372        } else {
 373                ramrod.vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj);
 374        }
 375        ramrod.user_req.cmd = BNX2X_VLAN_MAC_DEL;
 376
 377        set_bit(RAMROD_EXEC, &ramrod.ramrod_flags);
 378        if (drv_only)
 379                set_bit(RAMROD_DRV_CLR_ONLY, &ramrod.ramrod_flags);
 380        else
 381                set_bit(RAMROD_COMP_WAIT, &ramrod.ramrod_flags);
 382
 383        /* Start deleting */
 384        rc = ramrod.vlan_mac_obj->delete_all(bp,
 385                                             ramrod.vlan_mac_obj,
 386                                             &ramrod.user_req.vlan_mac_flags,
 387                                             &ramrod.ramrod_flags);
 388        if (rc) {
 389                BNX2X_ERR("Failed to delete all %s\n",
 390                          (type == BNX2X_VF_FILTER_VLAN_MAC) ? "VLAN-MACs" :
 391                          (type == BNX2X_VF_FILTER_MAC) ? "MACs" : "VLANs");
 392                return rc;
 393        }
 394
 395        return 0;
 396}
 397
 398static int bnx2x_vf_mac_vlan_config(struct bnx2x *bp,
 399                                    struct bnx2x_virtf *vf, int qid,
 400                                    struct bnx2x_vf_mac_vlan_filter *filter,
 401                                    bool drv_only)
 402{
 403        struct bnx2x_vlan_mac_ramrod_params ramrod;
 404        int rc;
 405
 406        DP(BNX2X_MSG_IOV, "vf[%d] - %s a %s filter\n",
 407           vf->abs_vfid, filter->add ? "Adding" : "Deleting",
 408           (filter->type == BNX2X_VF_FILTER_VLAN_MAC) ? "VLAN-MAC" :
 409           (filter->type == BNX2X_VF_FILTER_MAC) ? "MAC" : "VLAN");
 410
 411        /* Prepare ramrod params */
 412        memset(&ramrod, 0, sizeof(struct bnx2x_vlan_mac_ramrod_params));
 413        if (filter->type == BNX2X_VF_FILTER_VLAN_MAC) {
 414                ramrod.vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_mac_obj);
 415                ramrod.user_req.u.vlan.vlan = filter->vid;
 416                memcpy(&ramrod.user_req.u.mac.mac, filter->mac, ETH_ALEN);
 417                set_bit(BNX2X_ETH_MAC, &ramrod.user_req.vlan_mac_flags);
 418        } else if (filter->type == BNX2X_VF_FILTER_VLAN) {
 419                ramrod.vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj);
 420                ramrod.user_req.u.vlan.vlan = filter->vid;
 421        } else {
 422                set_bit(BNX2X_ETH_MAC, &ramrod.user_req.vlan_mac_flags);
 423                ramrod.vlan_mac_obj = &bnx2x_vfq(vf, qid, mac_obj);
 424                memcpy(&ramrod.user_req.u.mac.mac, filter->mac, ETH_ALEN);
 425        }
 426        ramrod.user_req.cmd = filter->add ? BNX2X_VLAN_MAC_ADD :
 427                                            BNX2X_VLAN_MAC_DEL;
 428
 429        set_bit(RAMROD_EXEC, &ramrod.ramrod_flags);
 430        if (drv_only)
 431                set_bit(RAMROD_DRV_CLR_ONLY, &ramrod.ramrod_flags);
 432        else
 433                set_bit(RAMROD_COMP_WAIT, &ramrod.ramrod_flags);
 434
 435        /* Add/Remove the filter */
 436        rc = bnx2x_config_vlan_mac(bp, &ramrod);
 437        if (rc && rc != -EEXIST) {
 438                BNX2X_ERR("Failed to %s %s\n",
 439                          filter->add ? "add" : "delete",
 440                          (filter->type == BNX2X_VF_FILTER_VLAN_MAC) ?
 441                                "VLAN-MAC" :
 442                          (filter->type == BNX2X_VF_FILTER_MAC) ?
 443                                "MAC" : "VLAN");
 444                return rc;
 445        }
 446
 447        return 0;
 448}
 449
 450int bnx2x_vf_mac_vlan_config_list(struct bnx2x *bp, struct bnx2x_virtf *vf,
 451                                  struct bnx2x_vf_mac_vlan_filters *filters,
 452                                  int qid, bool drv_only)
 453{
 454        int rc = 0, i;
 455
 456        DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid);
 457
 458        if (!bnx2x_validate_vf_sp_objs(bp, vf, true))
 459                return -EINVAL;
 460
 461        /* Prepare ramrod params */
 462        for (i = 0; i < filters->count; i++) {
 463                rc = bnx2x_vf_mac_vlan_config(bp, vf, qid,
 464                                              &filters->filters[i], drv_only);
 465                if (rc)
 466                        break;
 467        }
 468
 469        /* Rollback if needed */
 470        if (i != filters->count) {
 471                BNX2X_ERR("Managed only %d/%d filters - rolling back\n",
 472                          i, filters->count + 1);
 473                while (--i >= 0) {
 474                        filters->filters[i].add = !filters->filters[i].add;
 475                        bnx2x_vf_mac_vlan_config(bp, vf, qid,
 476                                                 &filters->filters[i],
 477                                                 drv_only);
 478                }
 479        }
 480
 481        /* It's our responsibility to free the filters */
 482        kfree(filters);
 483
 484        return rc;
 485}
 486
 487int bnx2x_vf_queue_setup(struct bnx2x *bp, struct bnx2x_virtf *vf, int qid,
 488                         struct bnx2x_vf_queue_construct_params *qctor)
 489{
 490        int rc;
 491
 492        DP(BNX2X_MSG_IOV, "vf[%d:%d]\n", vf->abs_vfid, qid);
 493
 494        rc = bnx2x_vf_queue_create(bp, vf, qid, qctor);
 495        if (rc)
 496                goto op_err;
 497
 498        /* Schedule the configuration of any pending vlan filters */
 499        bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_HYPERVISOR_VLAN,
 500                               BNX2X_MSG_IOV);
 501        return 0;
 502op_err:
 503        BNX2X_ERR("QSETUP[%d:%d] error: rc %d\n", vf->abs_vfid, qid, rc);
 504        return rc;
 505}
 506
 507static int bnx2x_vf_queue_flr(struct bnx2x *bp, struct bnx2x_virtf *vf,
 508                               int qid)
 509{
 510        int rc;
 511
 512        DP(BNX2X_MSG_IOV, "vf[%d:%d]\n", vf->abs_vfid, qid);
 513
 514        /* If needed, clean the filtering data base */
 515        if ((qid == LEADING_IDX) &&
 516            bnx2x_validate_vf_sp_objs(bp, vf, false)) {
 517                rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid, true,
 518                                             BNX2X_VF_FILTER_VLAN_MAC);
 519                if (rc)
 520                        goto op_err;
 521                rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid, true,
 522                                             BNX2X_VF_FILTER_VLAN);
 523                if (rc)
 524                        goto op_err;
 525                rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid, true,
 526                                             BNX2X_VF_FILTER_MAC);
 527                if (rc)
 528                        goto op_err;
 529        }
 530
 531        /* Terminate queue */
 532        if (bnx2x_vfq(vf, qid, sp_obj).state != BNX2X_Q_STATE_RESET) {
 533                struct bnx2x_queue_state_params qstate;
 534
 535                memset(&qstate, 0, sizeof(struct bnx2x_queue_state_params));
 536                qstate.q_obj = &bnx2x_vfq(vf, qid, sp_obj);
 537                qstate.q_obj->state = BNX2X_Q_STATE_STOPPED;
 538                qstate.cmd = BNX2X_Q_CMD_TERMINATE;
 539                set_bit(RAMROD_COMP_WAIT, &qstate.ramrod_flags);
 540                rc = bnx2x_queue_state_change(bp, &qstate);
 541                if (rc)
 542                        goto op_err;
 543        }
 544
 545        return 0;
 546op_err:
 547        BNX2X_ERR("vf[%d:%d] error: rc %d\n", vf->abs_vfid, qid, rc);
 548        return rc;
 549}
 550
 551int bnx2x_vf_mcast(struct bnx2x *bp, struct bnx2x_virtf *vf,
 552                   bnx2x_mac_addr_t *mcasts, int mc_num, bool drv_only)
 553{
 554        struct bnx2x_mcast_list_elem *mc = NULL;
 555        struct bnx2x_mcast_ramrod_params mcast;
 556        int rc, i;
 557
 558        DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid);
 559
 560        /* Prepare Multicast command */
 561        memset(&mcast, 0, sizeof(struct bnx2x_mcast_ramrod_params));
 562        mcast.mcast_obj = &vf->mcast_obj;
 563        if (drv_only)
 564                set_bit(RAMROD_DRV_CLR_ONLY, &mcast.ramrod_flags);
 565        else
 566                set_bit(RAMROD_COMP_WAIT, &mcast.ramrod_flags);
 567        if (mc_num) {
 568                mc = kzalloc(mc_num * sizeof(struct bnx2x_mcast_list_elem),
 569                             GFP_KERNEL);
 570                if (!mc) {
 571                        BNX2X_ERR("Cannot Configure multicasts due to lack of memory\n");
 572                        return -ENOMEM;
 573                }
 574        }
 575
 576        if (mc_num) {
 577                INIT_LIST_HEAD(&mcast.mcast_list);
 578                for (i = 0; i < mc_num; i++) {
 579                        mc[i].mac = mcasts[i];
 580                        list_add_tail(&mc[i].link,
 581                                      &mcast.mcast_list);
 582                }
 583
 584                /* add new mcasts */
 585                mcast.mcast_list_len = mc_num;
 586                rc = bnx2x_config_mcast(bp, &mcast, BNX2X_MCAST_CMD_SET);
 587                if (rc)
 588                        BNX2X_ERR("Faled to set multicasts\n");
 589        } else {
 590                /* clear existing mcasts */
 591                rc = bnx2x_config_mcast(bp, &mcast, BNX2X_MCAST_CMD_DEL);
 592                if (rc)
 593                        BNX2X_ERR("Failed to remove multicasts\n");
 594        }
 595
 596        kfree(mc);
 597
 598        return rc;
 599}
 600
 601static void bnx2x_vf_prep_rx_mode(struct bnx2x *bp, u8 qid,
 602                                  struct bnx2x_rx_mode_ramrod_params *ramrod,
 603                                  struct bnx2x_virtf *vf,
 604                                  unsigned long accept_flags)
 605{
 606        struct bnx2x_vf_queue *vfq = vfq_get(vf, qid);
 607
 608        memset(ramrod, 0, sizeof(*ramrod));
 609        ramrod->cid = vfq->cid;
 610        ramrod->cl_id = vfq_cl_id(vf, vfq);
 611        ramrod->rx_mode_obj = &bp->rx_mode_obj;
 612        ramrod->func_id = FW_VF_HANDLE(vf->abs_vfid);
 613        ramrod->rx_accept_flags = accept_flags;
 614        ramrod->tx_accept_flags = accept_flags;
 615        ramrod->pstate = &vf->filter_state;
 616        ramrod->state = BNX2X_FILTER_RX_MODE_PENDING;
 617
 618        set_bit(BNX2X_FILTER_RX_MODE_PENDING, &vf->filter_state);
 619        set_bit(RAMROD_RX, &ramrod->ramrod_flags);
 620        set_bit(RAMROD_TX, &ramrod->ramrod_flags);
 621
 622        ramrod->rdata = bnx2x_vf_sp(bp, vf, rx_mode_rdata.e2);
 623        ramrod->rdata_mapping = bnx2x_vf_sp_map(bp, vf, rx_mode_rdata.e2);
 624}
 625
 626int bnx2x_vf_rxmode(struct bnx2x *bp, struct bnx2x_virtf *vf,
 627                    int qid, unsigned long accept_flags)
 628{
 629        struct bnx2x_rx_mode_ramrod_params ramrod;
 630
 631        DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid);
 632
 633        bnx2x_vf_prep_rx_mode(bp, qid, &ramrod, vf, accept_flags);
 634        set_bit(RAMROD_COMP_WAIT, &ramrod.ramrod_flags);
 635        vfq_get(vf, qid)->accept_flags = ramrod.rx_accept_flags;
 636        return bnx2x_config_rx_mode(bp, &ramrod);
 637}
 638
 639int bnx2x_vf_queue_teardown(struct bnx2x *bp, struct bnx2x_virtf *vf, int qid)
 640{
 641        int rc;
 642
 643        DP(BNX2X_MSG_IOV, "vf[%d:%d]\n", vf->abs_vfid, qid);
 644
 645        /* Remove all classification configuration for leading queue */
 646        if (qid == LEADING_IDX) {
 647                rc = bnx2x_vf_rxmode(bp, vf, qid, 0);
 648                if (rc)
 649                        goto op_err;
 650
 651                /* Remove filtering if feasible */
 652                if (bnx2x_validate_vf_sp_objs(bp, vf, true)) {
 653                        rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid,
 654                                                     false,
 655                                                     BNX2X_VF_FILTER_VLAN_MAC);
 656                        if (rc)
 657                                goto op_err;
 658                        rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid,
 659                                                     false,
 660                                                     BNX2X_VF_FILTER_VLAN);
 661                        if (rc)
 662                                goto op_err;
 663                        rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid,
 664                                                     false,
 665                                                     BNX2X_VF_FILTER_MAC);
 666                        if (rc)
 667                                goto op_err;
 668                        rc = bnx2x_vf_mcast(bp, vf, NULL, 0, false);
 669                        if (rc)
 670                                goto op_err;
 671                }
 672        }
 673
 674        /* Destroy queue */
 675        rc = bnx2x_vf_queue_destroy(bp, vf, qid);
 676        if (rc)
 677                goto op_err;
 678        return rc;
 679op_err:
 680        BNX2X_ERR("vf[%d:%d] error: rc %d\n",
 681                  vf->abs_vfid, qid, rc);
 682        return rc;
 683}
 684
 685/* VF enable primitives
 686 * when pretend is required the caller is responsible
 687 * for calling pretend prior to calling these routines
 688 */
 689
 690/* internal vf enable - until vf is enabled internally all transactions
 691 * are blocked. This routine should always be called last with pretend.
 692 */
 693static void bnx2x_vf_enable_internal(struct bnx2x *bp, u8 enable)
 694{
 695        REG_WR(bp, PGLUE_B_REG_INTERNAL_VFID_ENABLE, enable ? 1 : 0);
 696}
 697
 698/* clears vf error in all semi blocks */
 699static void bnx2x_vf_semi_clear_err(struct bnx2x *bp, u8 abs_vfid)
 700{
 701        REG_WR(bp, TSEM_REG_VFPF_ERR_NUM, abs_vfid);
 702        REG_WR(bp, USEM_REG_VFPF_ERR_NUM, abs_vfid);
 703        REG_WR(bp, CSEM_REG_VFPF_ERR_NUM, abs_vfid);
 704        REG_WR(bp, XSEM_REG_VFPF_ERR_NUM, abs_vfid);
 705}
 706
 707static void bnx2x_vf_pglue_clear_err(struct bnx2x *bp, u8 abs_vfid)
 708{
 709        u32 was_err_group = (2 * BP_PATH(bp) + abs_vfid) >> 5;
 710        u32 was_err_reg = 0;
 711
 712        switch (was_err_group) {
 713        case 0:
 714            was_err_reg = PGLUE_B_REG_WAS_ERROR_VF_31_0_CLR;
 715            break;
 716        case 1:
 717            was_err_reg = PGLUE_B_REG_WAS_ERROR_VF_63_32_CLR;
 718            break;
 719        case 2:
 720            was_err_reg = PGLUE_B_REG_WAS_ERROR_VF_95_64_CLR;
 721            break;
 722        case 3:
 723            was_err_reg = PGLUE_B_REG_WAS_ERROR_VF_127_96_CLR;
 724            break;
 725        }
 726        REG_WR(bp, was_err_reg, 1 << (abs_vfid & 0x1f));
 727}
 728
 729static void bnx2x_vf_igu_reset(struct bnx2x *bp, struct bnx2x_virtf *vf)
 730{
 731        int i;
 732        u32 val;
 733
 734        /* Set VF masks and configuration - pretend */
 735        bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf->abs_vfid));
 736
 737        REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_LSB, 0);
 738        REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_MSB, 0);
 739        REG_WR(bp, IGU_REG_SB_MASK_LSB, 0);
 740        REG_WR(bp, IGU_REG_SB_MASK_MSB, 0);
 741        REG_WR(bp, IGU_REG_PBA_STATUS_LSB, 0);
 742        REG_WR(bp, IGU_REG_PBA_STATUS_MSB, 0);
 743
 744        val = REG_RD(bp, IGU_REG_VF_CONFIGURATION);
 745        val |= (IGU_VF_CONF_FUNC_EN | IGU_VF_CONF_MSI_MSIX_EN);
 746        val &= ~IGU_VF_CONF_PARENT_MASK;
 747        val |= (BP_ABS_FUNC(bp) >> 1) << IGU_VF_CONF_PARENT_SHIFT;
 748        REG_WR(bp, IGU_REG_VF_CONFIGURATION, val);
 749
 750        DP(BNX2X_MSG_IOV,
 751           "value in IGU_REG_VF_CONFIGURATION of vf %d after write is 0x%08x\n",
 752           vf->abs_vfid, val);
 753
 754        bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
 755
 756        /* iterate over all queues, clear sb consumer */
 757        for (i = 0; i < vf_sb_count(vf); i++) {
 758                u8 igu_sb_id = vf_igu_sb(vf, i);
 759
 760                /* zero prod memory */
 761                REG_WR(bp, IGU_REG_PROD_CONS_MEMORY + igu_sb_id * 4, 0);
 762
 763                /* clear sb state machine */
 764                bnx2x_igu_clear_sb_gen(bp, vf->abs_vfid, igu_sb_id,
 765                                       false /* VF */);
 766
 767                /* disable + update */
 768                bnx2x_vf_igu_ack_sb(bp, vf, igu_sb_id, USTORM_ID, 0,
 769                                    IGU_INT_DISABLE, 1);
 770        }
 771}
 772
 773void bnx2x_vf_enable_access(struct bnx2x *bp, u8 abs_vfid)
 774{
 775        /* set the VF-PF association in the FW */
 776        storm_memset_vf_to_pf(bp, FW_VF_HANDLE(abs_vfid), BP_FUNC(bp));
 777        storm_memset_func_en(bp, FW_VF_HANDLE(abs_vfid), 1);
 778
 779        /* clear vf errors*/
 780        bnx2x_vf_semi_clear_err(bp, abs_vfid);
 781        bnx2x_vf_pglue_clear_err(bp, abs_vfid);
 782
 783        /* internal vf-enable - pretend */
 784        bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, abs_vfid));
 785        DP(BNX2X_MSG_IOV, "enabling internal access for vf %x\n", abs_vfid);
 786        bnx2x_vf_enable_internal(bp, true);
 787        bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
 788}
 789
 790static void bnx2x_vf_enable_traffic(struct bnx2x *bp, struct bnx2x_virtf *vf)
 791{
 792        /* Reset vf in IGU  interrupts are still disabled */
 793        bnx2x_vf_igu_reset(bp, vf);
 794
 795        /* pretend to enable the vf with the PBF */
 796        bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf->abs_vfid));
 797        REG_WR(bp, PBF_REG_DISABLE_VF, 0);
 798        bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
 799}
 800
 801static u8 bnx2x_vf_is_pcie_pending(struct bnx2x *bp, u8 abs_vfid)
 802{
 803        struct pci_dev *dev;
 804        struct bnx2x_virtf *vf = bnx2x_vf_by_abs_fid(bp, abs_vfid);
 805
 806        if (!vf)
 807                return false;
 808
 809        dev = pci_get_bus_and_slot(vf->bus, vf->devfn);
 810        if (dev)
 811                return bnx2x_is_pcie_pending(dev);
 812        return false;
 813}
 814
 815int bnx2x_vf_flr_clnup_epilog(struct bnx2x *bp, u8 abs_vfid)
 816{
 817        /* Verify no pending pci transactions */
 818        if (bnx2x_vf_is_pcie_pending(bp, abs_vfid))
 819                BNX2X_ERR("PCIE Transactions still pending\n");
 820
 821        return 0;
 822}
 823
 824/* must be called after the number of PF queues and the number of VFs are
 825 * both known
 826 */
 827static void
 828bnx2x_iov_static_resc(struct bnx2x *bp, struct bnx2x_virtf *vf)
 829{
 830        struct vf_pf_resc_request *resc = &vf->alloc_resc;
 831
 832        /* will be set only during VF-ACQUIRE */
 833        resc->num_rxqs = 0;
 834        resc->num_txqs = 0;
 835
 836        resc->num_mac_filters = VF_MAC_CREDIT_CNT;
 837        resc->num_vlan_filters = VF_VLAN_CREDIT_CNT;
 838
 839        /* no real limitation */
 840        resc->num_mc_filters = 0;
 841
 842        /* num_sbs already set */
 843        resc->num_sbs = vf->sb_count;
 844}
 845
 846/* FLR routines: */
 847static void bnx2x_vf_free_resc(struct bnx2x *bp, struct bnx2x_virtf *vf)
 848{
 849        /* reset the state variables */
 850        bnx2x_iov_static_resc(bp, vf);
 851        vf->state = VF_FREE;
 852}
 853
 854static void bnx2x_vf_flr_clnup_hw(struct bnx2x *bp, struct bnx2x_virtf *vf)
 855{
 856        u32 poll_cnt = bnx2x_flr_clnup_poll_count(bp);
 857
 858        /* DQ usage counter */
 859        bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf->abs_vfid));
 860        bnx2x_flr_clnup_poll_hw_counter(bp, DORQ_REG_VF_USAGE_CNT,
 861                                        "DQ VF usage counter timed out",
 862                                        poll_cnt);
 863        bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
 864
 865        /* FW cleanup command - poll for the results */
 866        if (bnx2x_send_final_clnup(bp, (u8)FW_VF_HANDLE(vf->abs_vfid),
 867                                   poll_cnt))
 868                BNX2X_ERR("VF[%d] Final cleanup timed-out\n", vf->abs_vfid);
 869
 870        /* verify TX hw is flushed */
 871        bnx2x_tx_hw_flushed(bp, poll_cnt);
 872}
 873
 874static void bnx2x_vf_flr(struct bnx2x *bp, struct bnx2x_virtf *vf)
 875{
 876        int rc, i;
 877
 878        DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid);
 879
 880        /* the cleanup operations are valid if and only if the VF
 881         * was first acquired.
 882         */
 883        for (i = 0; i < vf_rxq_count(vf); i++) {
 884                rc = bnx2x_vf_queue_flr(bp, vf, i);
 885                if (rc)
 886                        goto out;
 887        }
 888
 889        /* remove multicasts */
 890        bnx2x_vf_mcast(bp, vf, NULL, 0, true);
 891
 892        /* dispatch final cleanup and wait for HW queues to flush */
 893        bnx2x_vf_flr_clnup_hw(bp, vf);
 894
 895        /* release VF resources */
 896        bnx2x_vf_free_resc(bp, vf);
 897
 898        /* re-open the mailbox */
 899        bnx2x_vf_enable_mbx(bp, vf->abs_vfid);
 900        return;
 901out:
 902        BNX2X_ERR("vf[%d:%d] failed flr: rc %d\n",
 903                  vf->abs_vfid, i, rc);
 904}
 905
 906static void bnx2x_vf_flr_clnup(struct bnx2x *bp)
 907{
 908        struct bnx2x_virtf *vf;
 909        int i;
 910
 911        for (i = 0; i < BNX2X_NR_VIRTFN(bp); i++) {
 912                /* VF should be RESET & in FLR cleanup states */
 913                if (bnx2x_vf(bp, i, state) != VF_RESET ||
 914                    !bnx2x_vf(bp, i, flr_clnup_stage))
 915                        continue;
 916
 917                DP(BNX2X_MSG_IOV, "next vf to cleanup: %d. Num of vfs: %d\n",
 918                   i, BNX2X_NR_VIRTFN(bp));
 919
 920                vf = BP_VF(bp, i);
 921
 922                /* lock the vf pf channel */
 923                bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_FLR);
 924
 925                /* invoke the VF FLR SM */
 926                bnx2x_vf_flr(bp, vf);
 927
 928                /* mark the VF to be ACKED and continue */
 929                vf->flr_clnup_stage = false;
 930                bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_FLR);
 931        }
 932
 933        /* Acknowledge the handled VFs.
 934         * we are acknowledge all the vfs which an flr was requested for, even
 935         * if amongst them there are such that we never opened, since the mcp
 936         * will interrupt us immediately again if we only ack some of the bits,
 937         * resulting in an endless loop. This can happen for example in KVM
 938         * where an 'all ones' flr request is sometimes given by hyper visor
 939         */
 940        DP(BNX2X_MSG_MCP, "DRV_STATUS_VF_DISABLED ACK for vfs 0x%x 0x%x\n",
 941           bp->vfdb->flrd_vfs[0], bp->vfdb->flrd_vfs[1]);
 942        for (i = 0; i < FLRD_VFS_DWORDS; i++)
 943                SHMEM2_WR(bp, drv_ack_vf_disabled[BP_FW_MB_IDX(bp)][i],
 944                          bp->vfdb->flrd_vfs[i]);
 945
 946        bnx2x_fw_command(bp, DRV_MSG_CODE_VF_DISABLED_DONE, 0);
 947
 948        /* clear the acked bits - better yet if the MCP implemented
 949         * write to clear semantics
 950         */
 951        for (i = 0; i < FLRD_VFS_DWORDS; i++)
 952                SHMEM2_WR(bp, drv_ack_vf_disabled[BP_FW_MB_IDX(bp)][i], 0);
 953}
 954
 955void bnx2x_vf_handle_flr_event(struct bnx2x *bp)
 956{
 957        int i;
 958
 959        /* Read FLR'd VFs */
 960        for (i = 0; i < FLRD_VFS_DWORDS; i++)
 961                bp->vfdb->flrd_vfs[i] = SHMEM2_RD(bp, mcp_vf_disabled[i]);
 962
 963        DP(BNX2X_MSG_MCP,
 964           "DRV_STATUS_VF_DISABLED received for vfs 0x%x 0x%x\n",
 965           bp->vfdb->flrd_vfs[0], bp->vfdb->flrd_vfs[1]);
 966
 967        for_each_vf(bp, i) {
 968                struct bnx2x_virtf *vf = BP_VF(bp, i);
 969                u32 reset = 0;
 970
 971                if (vf->abs_vfid < 32)
 972                        reset = bp->vfdb->flrd_vfs[0] & (1 << vf->abs_vfid);
 973                else
 974                        reset = bp->vfdb->flrd_vfs[1] &
 975                                (1 << (vf->abs_vfid - 32));
 976
 977                if (reset) {
 978                        /* set as reset and ready for cleanup */
 979                        vf->state = VF_RESET;
 980                        vf->flr_clnup_stage = true;
 981
 982                        DP(BNX2X_MSG_IOV,
 983                           "Initiating Final cleanup for VF %d\n",
 984                           vf->abs_vfid);
 985                }
 986        }
 987
 988        /* do the FLR cleanup for all marked VFs*/
 989        bnx2x_vf_flr_clnup(bp);
 990}
 991
 992/* IOV global initialization routines  */
 993void bnx2x_iov_init_dq(struct bnx2x *bp)
 994{
 995        if (!IS_SRIOV(bp))
 996                return;
 997
 998        /* Set the DQ such that the CID reflect the abs_vfid */
 999        REG_WR(bp, DORQ_REG_VF_NORM_VF_BASE, 0);
1000        REG_WR(bp, DORQ_REG_MAX_RVFID_SIZE, ilog2(BNX2X_MAX_NUM_OF_VFS));
1001
1002        /* Set VFs starting CID. If its > 0 the preceding CIDs are belong to
1003         * the PF L2 queues
1004         */
1005        REG_WR(bp, DORQ_REG_VF_NORM_CID_BASE, BNX2X_FIRST_VF_CID);
1006
1007        /* The VF window size is the log2 of the max number of CIDs per VF */
1008        REG_WR(bp, DORQ_REG_VF_NORM_CID_WND_SIZE, BNX2X_VF_CID_WND);
1009
1010        /* The VF doorbell size  0 - *B, 4 - 128B. We set it here to match
1011         * the Pf doorbell size although the 2 are independent.
1012         */
1013        REG_WR(bp, DORQ_REG_VF_NORM_CID_OFST, 3);
1014
1015        /* No security checks for now -
1016         * configure single rule (out of 16) mask = 0x1, value = 0x0,
1017         * CID range 0 - 0x1ffff
1018         */
1019        REG_WR(bp, DORQ_REG_VF_TYPE_MASK_0, 1);
1020        REG_WR(bp, DORQ_REG_VF_TYPE_VALUE_0, 0);
1021        REG_WR(bp, DORQ_REG_VF_TYPE_MIN_MCID_0, 0);
1022        REG_WR(bp, DORQ_REG_VF_TYPE_MAX_MCID_0, 0x1ffff);
1023
1024        /* set the VF doorbell threshold. This threshold represents the amount
1025         * of doorbells allowed in the main DORQ fifo for a specific VF.
1026         */
1027        REG_WR(bp, DORQ_REG_VF_USAGE_CT_LIMIT, 64);
1028}
1029
1030void bnx2x_iov_init_dmae(struct bnx2x *bp)
1031{
1032        if (pci_find_ext_capability(bp->pdev, PCI_EXT_CAP_ID_SRIOV))
1033                REG_WR(bp, DMAE_REG_BACKWARD_COMP_EN, 0);
1034}
1035
1036static int bnx2x_vf_bus(struct bnx2x *bp, int vfid)
1037{
1038        struct pci_dev *dev = bp->pdev;
1039        struct bnx2x_sriov *iov = &bp->vfdb->sriov;
1040
1041        return dev->bus->number + ((dev->devfn + iov->offset +
1042                                    iov->stride * vfid) >> 8);
1043}
1044
1045static int bnx2x_vf_devfn(struct bnx2x *bp, int vfid)
1046{
1047        struct pci_dev *dev = bp->pdev;
1048        struct bnx2x_sriov *iov = &bp->vfdb->sriov;
1049
1050        return (dev->devfn + iov->offset + iov->stride * vfid) & 0xff;
1051}
1052
1053static void bnx2x_vf_set_bars(struct bnx2x *bp, struct bnx2x_virtf *vf)
1054{
1055        int i, n;
1056        struct pci_dev *dev = bp->pdev;
1057        struct bnx2x_sriov *iov = &bp->vfdb->sriov;
1058
1059        for (i = 0, n = 0; i < PCI_SRIOV_NUM_BARS; i += 2, n++) {
1060                u64 start = pci_resource_start(dev, PCI_IOV_RESOURCES + i);
1061                u32 size = pci_resource_len(dev, PCI_IOV_RESOURCES + i);
1062
1063                size /= iov->total;
1064                vf->bars[n].bar = start + size * vf->abs_vfid;
1065                vf->bars[n].size = size;
1066        }
1067}
1068
1069static int bnx2x_ari_enabled(struct pci_dev *dev)
1070{
1071        return dev->bus->self && dev->bus->self->ari_enabled;
1072}
1073
1074static int
1075bnx2x_get_vf_igu_cam_info(struct bnx2x *bp)
1076{
1077        int sb_id;
1078        u32 val;
1079        u8 fid, current_pf = 0;
1080
1081        /* IGU in normal mode - read CAM */
1082        for (sb_id = 0; sb_id < IGU_REG_MAPPING_MEMORY_SIZE; sb_id++) {
1083                val = REG_RD(bp, IGU_REG_MAPPING_MEMORY + sb_id * 4);
1084                if (!(val & IGU_REG_MAPPING_MEMORY_VALID))
1085                        continue;
1086                fid = GET_FIELD((val), IGU_REG_MAPPING_MEMORY_FID);
1087                if (fid & IGU_FID_ENCODE_IS_PF)
1088                        current_pf = fid & IGU_FID_PF_NUM_MASK;
1089                else if (current_pf == BP_FUNC(bp))
1090                        bnx2x_vf_set_igu_info(bp, sb_id,
1091                                              (fid & IGU_FID_VF_NUM_MASK));
1092                DP(BNX2X_MSG_IOV, "%s[%d], igu_sb_id=%d, msix=%d\n",
1093                   ((fid & IGU_FID_ENCODE_IS_PF) ? "PF" : "VF"),
1094                   ((fid & IGU_FID_ENCODE_IS_PF) ? (fid & IGU_FID_PF_NUM_MASK) :
1095                   (fid & IGU_FID_VF_NUM_MASK)), sb_id,
1096                   GET_FIELD((val), IGU_REG_MAPPING_MEMORY_VECTOR));
1097        }
1098        DP(BNX2X_MSG_IOV, "vf_sbs_pool is %d\n", BP_VFDB(bp)->vf_sbs_pool);
1099        return BP_VFDB(bp)->vf_sbs_pool;
1100}
1101
1102static void __bnx2x_iov_free_vfdb(struct bnx2x *bp)
1103{
1104        if (bp->vfdb) {
1105                kfree(bp->vfdb->vfqs);
1106                kfree(bp->vfdb->vfs);
1107                kfree(bp->vfdb);
1108        }
1109        bp->vfdb = NULL;
1110}
1111
1112static int bnx2x_sriov_pci_cfg_info(struct bnx2x *bp, struct bnx2x_sriov *iov)
1113{
1114        int pos;
1115        struct pci_dev *dev = bp->pdev;
1116
1117        pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV);
1118        if (!pos) {
1119                BNX2X_ERR("failed to find SRIOV capability in device\n");
1120                return -ENODEV;
1121        }
1122
1123        iov->pos = pos;
1124        DP(BNX2X_MSG_IOV, "sriov ext pos %d\n", pos);
1125        pci_read_config_word(dev, pos + PCI_SRIOV_CTRL, &iov->ctrl);
1126        pci_read_config_word(dev, pos + PCI_SRIOV_TOTAL_VF, &iov->total);
1127        pci_read_config_word(dev, pos + PCI_SRIOV_INITIAL_VF, &iov->initial);
1128        pci_read_config_word(dev, pos + PCI_SRIOV_VF_OFFSET, &iov->offset);
1129        pci_read_config_word(dev, pos + PCI_SRIOV_VF_STRIDE, &iov->stride);
1130        pci_read_config_dword(dev, pos + PCI_SRIOV_SUP_PGSIZE, &iov->pgsz);
1131        pci_read_config_dword(dev, pos + PCI_SRIOV_CAP, &iov->cap);
1132        pci_read_config_byte(dev, pos + PCI_SRIOV_FUNC_LINK, &iov->link);
1133
1134        return 0;
1135}
1136
1137static int bnx2x_sriov_info(struct bnx2x *bp, struct bnx2x_sriov *iov)
1138{
1139        u32 val;
1140
1141        /* read the SRIOV capability structure
1142         * The fields can be read via configuration read or
1143         * directly from the device (starting at offset PCICFG_OFFSET)
1144         */
1145        if (bnx2x_sriov_pci_cfg_info(bp, iov))
1146                return -ENODEV;
1147
1148        /* get the number of SRIOV bars */
1149        iov->nres = 0;
1150
1151        /* read the first_vfid */
1152        val = REG_RD(bp, PCICFG_OFFSET + GRC_CONFIG_REG_PF_INIT_VF);
1153        iov->first_vf_in_pf = ((val & GRC_CR_PF_INIT_VF_PF_FIRST_VF_NUM_MASK)
1154                               * 8) - (BNX2X_MAX_NUM_OF_VFS * BP_PATH(bp));
1155
1156        DP(BNX2X_MSG_IOV,
1157           "IOV info[%d]: first vf %d, nres %d, cap 0x%x, ctrl 0x%x, total %d, initial %d, num vfs %d, offset %d, stride %d, page size 0x%x\n",
1158           BP_FUNC(bp),
1159           iov->first_vf_in_pf, iov->nres, iov->cap, iov->ctrl, iov->total,
1160           iov->initial, iov->nr_virtfn, iov->offset, iov->stride, iov->pgsz);
1161
1162        return 0;
1163}
1164
1165/* must be called after PF bars are mapped */
1166int bnx2x_iov_init_one(struct bnx2x *bp, int int_mode_param,
1167                       int num_vfs_param)
1168{
1169        int err, i;
1170        struct bnx2x_sriov *iov;
1171        struct pci_dev *dev = bp->pdev;
1172
1173        bp->vfdb = NULL;
1174
1175        /* verify is pf */
1176        if (IS_VF(bp))
1177                return 0;
1178
1179        /* verify sriov capability is present in configuration space */
1180        if (!pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV))
1181                return 0;
1182
1183        /* verify chip revision */
1184        if (CHIP_IS_E1x(bp))
1185                return 0;
1186
1187        /* check if SRIOV support is turned off */
1188        if (!num_vfs_param)
1189                return 0;
1190
1191        /* SRIOV assumes that num of PF CIDs < BNX2X_FIRST_VF_CID */
1192        if (BNX2X_L2_MAX_CID(bp) >= BNX2X_FIRST_VF_CID) {
1193                BNX2X_ERR("PF cids %d are overspilling into vf space (starts at %d). Abort SRIOV\n",
1194                          BNX2X_L2_MAX_CID(bp), BNX2X_FIRST_VF_CID);
1195                return 0;
1196        }
1197
1198        /* SRIOV can be enabled only with MSIX */
1199        if (int_mode_param == BNX2X_INT_MODE_MSI ||
1200            int_mode_param == BNX2X_INT_MODE_INTX) {
1201                BNX2X_ERR("Forced MSI/INTx mode is incompatible with SRIOV\n");
1202                return 0;
1203        }
1204
1205        err = -EIO;
1206        /* verify ari is enabled */
1207        if (!bnx2x_ari_enabled(bp->pdev)) {
1208                BNX2X_ERR("ARI not supported (check pci bridge ARI forwarding), SRIOV can not be enabled\n");
1209                return 0;
1210        }
1211
1212        /* verify igu is in normal mode */
1213        if (CHIP_INT_MODE_IS_BC(bp)) {
1214                BNX2X_ERR("IGU not normal mode,  SRIOV can not be enabled\n");
1215                return 0;
1216        }
1217
1218        /* allocate the vfs database */
1219        bp->vfdb = kzalloc(sizeof(*(bp->vfdb)), GFP_KERNEL);
1220        if (!bp->vfdb) {
1221                BNX2X_ERR("failed to allocate vf database\n");
1222                err = -ENOMEM;
1223                goto failed;
1224        }
1225
1226        /* get the sriov info - Linux already collected all the pertinent
1227         * information, however the sriov structure is for the private use
1228         * of the pci module. Also we want this information regardless
1229         * of the hyper-visor.
1230         */
1231        iov = &(bp->vfdb->sriov);
1232        err = bnx2x_sriov_info(bp, iov);
1233        if (err)
1234                goto failed;
1235
1236        /* SR-IOV capability was enabled but there are no VFs*/
1237        if (iov->total == 0)
1238                goto failed;
1239
1240        iov->nr_virtfn = min_t(u16, iov->total, num_vfs_param);
1241
1242        DP(BNX2X_MSG_IOV, "num_vfs_param was %d, nr_virtfn was %d\n",
1243           num_vfs_param, iov->nr_virtfn);
1244
1245        /* allocate the vf array */
1246        bp->vfdb->vfs = kzalloc(sizeof(struct bnx2x_virtf) *
1247                                BNX2X_NR_VIRTFN(bp), GFP_KERNEL);
1248        if (!bp->vfdb->vfs) {
1249                BNX2X_ERR("failed to allocate vf array\n");
1250                err = -ENOMEM;
1251                goto failed;
1252        }
1253
1254        /* Initial VF init - index and abs_vfid - nr_virtfn must be set */
1255        for_each_vf(bp, i) {
1256                bnx2x_vf(bp, i, index) = i;
1257                bnx2x_vf(bp, i, abs_vfid) = iov->first_vf_in_pf + i;
1258                bnx2x_vf(bp, i, state) = VF_FREE;
1259                mutex_init(&bnx2x_vf(bp, i, op_mutex));
1260                bnx2x_vf(bp, i, op_current) = CHANNEL_TLV_NONE;
1261        }
1262
1263        /* re-read the IGU CAM for VFs - index and abs_vfid must be set */
1264        if (!bnx2x_get_vf_igu_cam_info(bp)) {
1265                BNX2X_ERR("No entries in IGU CAM for vfs\n");
1266                err = -EINVAL;
1267                goto failed;
1268        }
1269
1270        /* allocate the queue arrays for all VFs */
1271        bp->vfdb->vfqs = kzalloc(
1272                BNX2X_MAX_NUM_VF_QUEUES * sizeof(struct bnx2x_vf_queue),
1273                GFP_KERNEL);
1274
1275        if (!bp->vfdb->vfqs) {
1276                BNX2X_ERR("failed to allocate vf queue array\n");
1277                err = -ENOMEM;
1278                goto failed;
1279        }
1280
1281        /* Prepare the VFs event synchronization mechanism */
1282        mutex_init(&bp->vfdb->event_mutex);
1283
1284        mutex_init(&bp->vfdb->bulletin_mutex);
1285
1286        if (SHMEM2_HAS(bp, sriov_switch_mode))
1287                SHMEM2_WR(bp, sriov_switch_mode, SRIOV_SWITCH_MODE_VEB);
1288
1289        return 0;
1290failed:
1291        DP(BNX2X_MSG_IOV, "Failed err=%d\n", err);
1292        __bnx2x_iov_free_vfdb(bp);
1293        return err;
1294}
1295
1296void bnx2x_iov_remove_one(struct bnx2x *bp)
1297{
1298        int vf_idx;
1299
1300        /* if SRIOV is not enabled there's nothing to do */
1301        if (!IS_SRIOV(bp))
1302                return;
1303
1304        bnx2x_disable_sriov(bp);
1305
1306        /* disable access to all VFs */
1307        for (vf_idx = 0; vf_idx < bp->vfdb->sriov.total; vf_idx++) {
1308                bnx2x_pretend_func(bp,
1309                                   HW_VF_HANDLE(bp,
1310                                                bp->vfdb->sriov.first_vf_in_pf +
1311                                                vf_idx));
1312                DP(BNX2X_MSG_IOV, "disabling internal access for vf %d\n",
1313                   bp->vfdb->sriov.first_vf_in_pf + vf_idx);
1314                bnx2x_vf_enable_internal(bp, 0);
1315                bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
1316        }
1317
1318        /* free vf database */
1319        __bnx2x_iov_free_vfdb(bp);
1320}
1321
1322void bnx2x_iov_free_mem(struct bnx2x *bp)
1323{
1324        int i;
1325
1326        if (!IS_SRIOV(bp))
1327                return;
1328
1329        /* free vfs hw contexts */
1330        for (i = 0; i < BNX2X_VF_CIDS/ILT_PAGE_CIDS; i++) {
1331                struct hw_dma *cxt = &bp->vfdb->context[i];
1332                BNX2X_PCI_FREE(cxt->addr, cxt->mapping, cxt->size);
1333        }
1334
1335        BNX2X_PCI_FREE(BP_VFDB(bp)->sp_dma.addr,
1336                       BP_VFDB(bp)->sp_dma.mapping,
1337                       BP_VFDB(bp)->sp_dma.size);
1338
1339        BNX2X_PCI_FREE(BP_VF_MBX_DMA(bp)->addr,
1340                       BP_VF_MBX_DMA(bp)->mapping,
1341                       BP_VF_MBX_DMA(bp)->size);
1342
1343        BNX2X_PCI_FREE(BP_VF_BULLETIN_DMA(bp)->addr,
1344                       BP_VF_BULLETIN_DMA(bp)->mapping,
1345                       BP_VF_BULLETIN_DMA(bp)->size);
1346}
1347
1348int bnx2x_iov_alloc_mem(struct bnx2x *bp)
1349{
1350        size_t tot_size;
1351        int i, rc = 0;
1352
1353        if (!IS_SRIOV(bp))
1354                return rc;
1355
1356        /* allocate vfs hw contexts */
1357        tot_size = (BP_VFDB(bp)->sriov.first_vf_in_pf + BNX2X_NR_VIRTFN(bp)) *
1358                BNX2X_CIDS_PER_VF * sizeof(union cdu_context);
1359
1360        for (i = 0; i < BNX2X_VF_CIDS/ILT_PAGE_CIDS; i++) {
1361                struct hw_dma *cxt = BP_VF_CXT_PAGE(bp, i);
1362                cxt->size = min_t(size_t, tot_size, CDU_ILT_PAGE_SZ);
1363
1364                if (cxt->size) {
1365                        cxt->addr = BNX2X_PCI_ALLOC(&cxt->mapping, cxt->size);
1366                        if (!cxt->addr)
1367                                goto alloc_mem_err;
1368                } else {
1369                        cxt->addr = NULL;
1370                        cxt->mapping = 0;
1371                }
1372                tot_size -= cxt->size;
1373        }
1374
1375        /* allocate vfs ramrods dma memory - client_init and set_mac */
1376        tot_size = BNX2X_NR_VIRTFN(bp) * sizeof(struct bnx2x_vf_sp);
1377        BP_VFDB(bp)->sp_dma.addr = BNX2X_PCI_ALLOC(&BP_VFDB(bp)->sp_dma.mapping,
1378                                                   tot_size);
1379        if (!BP_VFDB(bp)->sp_dma.addr)
1380                goto alloc_mem_err;
1381        BP_VFDB(bp)->sp_dma.size = tot_size;
1382
1383        /* allocate mailboxes */
1384        tot_size = BNX2X_NR_VIRTFN(bp) * MBX_MSG_ALIGNED_SIZE;
1385        BP_VF_MBX_DMA(bp)->addr = BNX2X_PCI_ALLOC(&BP_VF_MBX_DMA(bp)->mapping,
1386                                                  tot_size);
1387        if (!BP_VF_MBX_DMA(bp)->addr)
1388                goto alloc_mem_err;
1389
1390        BP_VF_MBX_DMA(bp)->size = tot_size;
1391
1392        /* allocate local bulletin boards */
1393        tot_size = BNX2X_NR_VIRTFN(bp) * BULLETIN_CONTENT_SIZE;
1394        BP_VF_BULLETIN_DMA(bp)->addr = BNX2X_PCI_ALLOC(&BP_VF_BULLETIN_DMA(bp)->mapping,
1395                                                       tot_size);
1396        if (!BP_VF_BULLETIN_DMA(bp)->addr)
1397                goto alloc_mem_err;
1398
1399        BP_VF_BULLETIN_DMA(bp)->size = tot_size;
1400
1401        return 0;
1402
1403alloc_mem_err:
1404        return -ENOMEM;
1405}
1406
1407static void bnx2x_vfq_init(struct bnx2x *bp, struct bnx2x_virtf *vf,
1408                           struct bnx2x_vf_queue *q)
1409{
1410        u8 cl_id = vfq_cl_id(vf, q);
1411        u8 func_id = FW_VF_HANDLE(vf->abs_vfid);
1412        unsigned long q_type = 0;
1413
1414        set_bit(BNX2X_Q_TYPE_HAS_TX, &q_type);
1415        set_bit(BNX2X_Q_TYPE_HAS_RX, &q_type);
1416
1417        /* Queue State object */
1418        bnx2x_init_queue_obj(bp, &q->sp_obj,
1419                             cl_id, &q->cid, 1, func_id,
1420                             bnx2x_vf_sp(bp, vf, q_data),
1421                             bnx2x_vf_sp_map(bp, vf, q_data),
1422                             q_type);
1423
1424        /* sp indication is set only when vlan/mac/etc. are initialized */
1425        q->sp_initialized = false;
1426
1427        DP(BNX2X_MSG_IOV,
1428           "initialized vf %d's queue object. func id set to %d. cid set to 0x%x\n",
1429           vf->abs_vfid, q->sp_obj.func_id, q->cid);
1430}
1431
1432static int bnx2x_max_speed_cap(struct bnx2x *bp)
1433{
1434        u32 supported = bp->port.supported[bnx2x_get_link_cfg_idx(bp)];
1435
1436        if (supported &
1437            (SUPPORTED_20000baseMLD2_Full | SUPPORTED_20000baseKR2_Full))
1438                return 20000;
1439
1440        return 10000; /* assume lowest supported speed is 10G */
1441}
1442
1443int bnx2x_iov_link_update_vf(struct bnx2x *bp, int idx)
1444{
1445        struct bnx2x_link_report_data *state = &bp->last_reported_link;
1446        struct pf_vf_bulletin_content *bulletin;
1447        struct bnx2x_virtf *vf;
1448        bool update = true;
1449        int rc = 0;
1450
1451        /* sanity and init */
1452        rc = bnx2x_vf_op_prep(bp, idx, &vf, &bulletin, false);
1453        if (rc)
1454                return rc;
1455
1456        mutex_lock(&bp->vfdb->bulletin_mutex);
1457
1458        if (vf->link_cfg == IFLA_VF_LINK_STATE_AUTO) {
1459                bulletin->valid_bitmap |= 1 << LINK_VALID;
1460
1461                bulletin->link_speed = state->line_speed;
1462                bulletin->link_flags = 0;
1463                if (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1464                             &state->link_report_flags))
1465                        bulletin->link_flags |= VFPF_LINK_REPORT_LINK_DOWN;
1466                if (test_bit(BNX2X_LINK_REPORT_FD,
1467                             &state->link_report_flags))
1468                        bulletin->link_flags |= VFPF_LINK_REPORT_FULL_DUPLEX;
1469                if (test_bit(BNX2X_LINK_REPORT_RX_FC_ON,
1470                             &state->link_report_flags))
1471                        bulletin->link_flags |= VFPF_LINK_REPORT_RX_FC_ON;
1472                if (test_bit(BNX2X_LINK_REPORT_TX_FC_ON,
1473                             &state->link_report_flags))
1474                        bulletin->link_flags |= VFPF_LINK_REPORT_TX_FC_ON;
1475        } else if (vf->link_cfg == IFLA_VF_LINK_STATE_DISABLE &&
1476                   !(bulletin->link_flags & VFPF_LINK_REPORT_LINK_DOWN)) {
1477                bulletin->valid_bitmap |= 1 << LINK_VALID;
1478                bulletin->link_flags |= VFPF_LINK_REPORT_LINK_DOWN;
1479        } else if (vf->link_cfg == IFLA_VF_LINK_STATE_ENABLE &&
1480                   (bulletin->link_flags & VFPF_LINK_REPORT_LINK_DOWN)) {
1481                bulletin->valid_bitmap |= 1 << LINK_VALID;
1482                bulletin->link_speed = bnx2x_max_speed_cap(bp);
1483                bulletin->link_flags &= ~VFPF_LINK_REPORT_LINK_DOWN;
1484        } else {
1485                update = false;
1486        }
1487
1488        if (update) {
1489                DP(NETIF_MSG_LINK | BNX2X_MSG_IOV,
1490                   "vf %d mode %u speed %d flags %x\n", idx,
1491                   vf->link_cfg, bulletin->link_speed, bulletin->link_flags);
1492
1493                /* Post update on VF's bulletin board */
1494                rc = bnx2x_post_vf_bulletin(bp, idx);
1495                if (rc) {
1496                        BNX2X_ERR("failed to update VF[%d] bulletin\n", idx);
1497                        goto out;
1498                }
1499        }
1500
1501out:
1502        mutex_unlock(&bp->vfdb->bulletin_mutex);
1503        return rc;
1504}
1505
1506int bnx2x_set_vf_link_state(struct net_device *dev, int idx, int link_state)
1507{
1508        struct bnx2x *bp = netdev_priv(dev);
1509        struct bnx2x_virtf *vf = BP_VF(bp, idx);
1510
1511        if (!vf)
1512                return -EINVAL;
1513
1514        if (vf->link_cfg == link_state)
1515                return 0; /* nothing todo */
1516
1517        vf->link_cfg = link_state;
1518
1519        return bnx2x_iov_link_update_vf(bp, idx);
1520}
1521
1522void bnx2x_iov_link_update(struct bnx2x *bp)
1523{
1524        int vfid;
1525
1526        if (!IS_SRIOV(bp))
1527                return;
1528
1529        for_each_vf(bp, vfid)
1530                bnx2x_iov_link_update_vf(bp, vfid);
1531}
1532
1533/* called by bnx2x_nic_load */
1534int bnx2x_iov_nic_init(struct bnx2x *bp)
1535{
1536        int vfid;
1537
1538        if (!IS_SRIOV(bp)) {
1539                DP(BNX2X_MSG_IOV, "vfdb was not allocated\n");
1540                return 0;
1541        }
1542
1543        DP(BNX2X_MSG_IOV, "num of vfs: %d\n", (bp)->vfdb->sriov.nr_virtfn);
1544
1545        /* let FLR complete ... */
1546        msleep(100);
1547
1548        /* initialize vf database */
1549        for_each_vf(bp, vfid) {
1550                struct bnx2x_virtf *vf = BP_VF(bp, vfid);
1551
1552                int base_vf_cid = (BP_VFDB(bp)->sriov.first_vf_in_pf + vfid) *
1553                        BNX2X_CIDS_PER_VF;
1554
1555                union cdu_context *base_cxt = (union cdu_context *)
1556                        BP_VF_CXT_PAGE(bp, base_vf_cid/ILT_PAGE_CIDS)->addr +
1557                        (base_vf_cid & (ILT_PAGE_CIDS-1));
1558
1559                DP(BNX2X_MSG_IOV,
1560                   "VF[%d] Max IGU SBs: %d, base vf cid 0x%x, base cid 0x%x, base cxt %p\n",
1561                   vf->abs_vfid, vf_sb_count(vf), base_vf_cid,
1562                   BNX2X_FIRST_VF_CID + base_vf_cid, base_cxt);
1563
1564                /* init statically provisioned resources */
1565                bnx2x_iov_static_resc(bp, vf);
1566
1567                /* queues are initialized during VF-ACQUIRE */
1568                vf->filter_state = 0;
1569                vf->sp_cl_id = bnx2x_fp(bp, 0, cl_id);
1570
1571                bnx2x_init_credit_pool(&vf->vf_vlans_pool, 0,
1572                                       vf_vlan_rules_cnt(vf));
1573                bnx2x_init_credit_pool(&vf->vf_macs_pool, 0,
1574                                       vf_mac_rules_cnt(vf));
1575
1576                /*  init mcast object - This object will be re-initialized
1577                 *  during VF-ACQUIRE with the proper cl_id and cid.
1578                 *  It needs to be initialized here so that it can be safely
1579                 *  handled by a subsequent FLR flow.
1580                 */
1581                bnx2x_init_mcast_obj(bp, &vf->mcast_obj, 0xFF,
1582                                     0xFF, 0xFF, 0xFF,
1583                                     bnx2x_vf_sp(bp, vf, mcast_rdata),
1584                                     bnx2x_vf_sp_map(bp, vf, mcast_rdata),
1585                                     BNX2X_FILTER_MCAST_PENDING,
1586                                     &vf->filter_state,
1587                                     BNX2X_OBJ_TYPE_RX_TX);
1588
1589                /* set the mailbox message addresses */
1590                BP_VF_MBX(bp, vfid)->msg = (struct bnx2x_vf_mbx_msg *)
1591                        (((u8 *)BP_VF_MBX_DMA(bp)->addr) + vfid *
1592                        MBX_MSG_ALIGNED_SIZE);
1593
1594                BP_VF_MBX(bp, vfid)->msg_mapping = BP_VF_MBX_DMA(bp)->mapping +
1595                        vfid * MBX_MSG_ALIGNED_SIZE;
1596
1597                /* Enable vf mailbox */
1598                bnx2x_vf_enable_mbx(bp, vf->abs_vfid);
1599        }
1600
1601        /* Final VF init */
1602        for_each_vf(bp, vfid) {
1603                struct bnx2x_virtf *vf = BP_VF(bp, vfid);
1604
1605                /* fill in the BDF and bars */
1606                vf->bus = bnx2x_vf_bus(bp, vfid);
1607                vf->devfn = bnx2x_vf_devfn(bp, vfid);
1608                bnx2x_vf_set_bars(bp, vf);
1609
1610                DP(BNX2X_MSG_IOV,
1611                   "VF info[%d]: bus 0x%x, devfn 0x%x, bar0 [0x%x, %d], bar1 [0x%x, %d], bar2 [0x%x, %d]\n",
1612                   vf->abs_vfid, vf->bus, vf->devfn,
1613                   (unsigned)vf->bars[0].bar, vf->bars[0].size,
1614                   (unsigned)vf->bars[1].bar, vf->bars[1].size,
1615                   (unsigned)vf->bars[2].bar, vf->bars[2].size);
1616        }
1617
1618        return 0;
1619}
1620
1621/* called by bnx2x_chip_cleanup */
1622int bnx2x_iov_chip_cleanup(struct bnx2x *bp)
1623{
1624        int i;
1625
1626        if (!IS_SRIOV(bp))
1627                return 0;
1628
1629        /* release all the VFs */
1630        for_each_vf(bp, i)
1631                bnx2x_vf_release(bp, BP_VF(bp, i));
1632
1633        return 0;
1634}
1635
1636/* called by bnx2x_init_hw_func, returns the next ilt line */
1637int bnx2x_iov_init_ilt(struct bnx2x *bp, u16 line)
1638{
1639        int i;
1640        struct bnx2x_ilt *ilt = BP_ILT(bp);
1641
1642        if (!IS_SRIOV(bp))
1643                return line;
1644
1645        /* set vfs ilt lines */
1646        for (i = 0; i < BNX2X_VF_CIDS/ILT_PAGE_CIDS; i++) {
1647                struct hw_dma *hw_cxt = BP_VF_CXT_PAGE(bp, i);
1648
1649                ilt->lines[line+i].page = hw_cxt->addr;
1650                ilt->lines[line+i].page_mapping = hw_cxt->mapping;
1651                ilt->lines[line+i].size = hw_cxt->size; /* doesn't matter */
1652        }
1653        return line + i;
1654}
1655
1656static u8 bnx2x_iov_is_vf_cid(struct bnx2x *bp, u16 cid)
1657{
1658        return ((cid >= BNX2X_FIRST_VF_CID) &&
1659                ((cid - BNX2X_FIRST_VF_CID) < BNX2X_VF_CIDS));
1660}
1661
1662static
1663void bnx2x_vf_handle_classification_eqe(struct bnx2x *bp,
1664                                        struct bnx2x_vf_queue *vfq,
1665                                        union event_ring_elem *elem)
1666{
1667        unsigned long ramrod_flags = 0;
1668        int rc = 0;
1669        u32 echo = le32_to_cpu(elem->message.data.eth_event.echo);
1670
1671        /* Always push next commands out, don't wait here */
1672        set_bit(RAMROD_CONT, &ramrod_flags);
1673
1674        switch (echo >> BNX2X_SWCID_SHIFT) {
1675        case BNX2X_FILTER_MAC_PENDING:
1676                rc = vfq->mac_obj.complete(bp, &vfq->mac_obj, elem,
1677                                           &ramrod_flags);
1678                break;
1679        case BNX2X_FILTER_VLAN_PENDING:
1680                rc = vfq->vlan_obj.complete(bp, &vfq->vlan_obj, elem,
1681                                            &ramrod_flags);
1682                break;
1683        default:
1684                BNX2X_ERR("Unsupported classification command: 0x%x\n", echo);
1685                return;
1686        }
1687        if (rc < 0)
1688                BNX2X_ERR("Failed to schedule new commands: %d\n", rc);
1689        else if (rc > 0)
1690                DP(BNX2X_MSG_IOV, "Scheduled next pending commands...\n");
1691}
1692
1693static
1694void bnx2x_vf_handle_mcast_eqe(struct bnx2x *bp,
1695                               struct bnx2x_virtf *vf)
1696{
1697        struct bnx2x_mcast_ramrod_params rparam = {NULL};
1698        int rc;
1699
1700        rparam.mcast_obj = &vf->mcast_obj;
1701        vf->mcast_obj.raw.clear_pending(&vf->mcast_obj.raw);
1702
1703        /* If there are pending mcast commands - send them */
1704        if (vf->mcast_obj.check_pending(&vf->mcast_obj)) {
1705                rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
1706                if (rc < 0)
1707                        BNX2X_ERR("Failed to send pending mcast commands: %d\n",
1708                                  rc);
1709        }
1710}
1711
1712static
1713void bnx2x_vf_handle_filters_eqe(struct bnx2x *bp,
1714                                 struct bnx2x_virtf *vf)
1715{
1716        smp_mb__before_atomic();
1717        clear_bit(BNX2X_FILTER_RX_MODE_PENDING, &vf->filter_state);
1718        smp_mb__after_atomic();
1719}
1720
1721static void bnx2x_vf_handle_rss_update_eqe(struct bnx2x *bp,
1722                                           struct bnx2x_virtf *vf)
1723{
1724        vf->rss_conf_obj.raw.clear_pending(&vf->rss_conf_obj.raw);
1725}
1726
1727int bnx2x_iov_eq_sp_event(struct bnx2x *bp, union event_ring_elem *elem)
1728{
1729        struct bnx2x_virtf *vf;
1730        int qidx = 0, abs_vfid;
1731        u8 opcode;
1732        u16 cid = 0xffff;
1733
1734        if (!IS_SRIOV(bp))
1735                return 1;
1736
1737        /* first get the cid - the only events we handle here are cfc-delete
1738         * and set-mac completion
1739         */
1740        opcode = elem->message.opcode;
1741
1742        switch (opcode) {
1743        case EVENT_RING_OPCODE_CFC_DEL:
1744                cid = SW_CID(elem->message.data.cfc_del_event.cid);
1745                DP(BNX2X_MSG_IOV, "checking cfc-del comp cid=%d\n", cid);
1746                break;
1747        case EVENT_RING_OPCODE_CLASSIFICATION_RULES:
1748        case EVENT_RING_OPCODE_MULTICAST_RULES:
1749        case EVENT_RING_OPCODE_FILTERS_RULES:
1750        case EVENT_RING_OPCODE_RSS_UPDATE_RULES:
1751                cid = SW_CID(elem->message.data.eth_event.echo);
1752                DP(BNX2X_MSG_IOV, "checking filtering comp cid=%d\n", cid);
1753                break;
1754        case EVENT_RING_OPCODE_VF_FLR:
1755                abs_vfid = elem->message.data.vf_flr_event.vf_id;
1756                DP(BNX2X_MSG_IOV, "Got VF FLR notification abs_vfid=%d\n",
1757                   abs_vfid);
1758                goto get_vf;
1759        case EVENT_RING_OPCODE_MALICIOUS_VF:
1760                abs_vfid = elem->message.data.malicious_vf_event.vf_id;
1761                BNX2X_ERR("Got VF MALICIOUS notification abs_vfid=%d err_id=0x%x\n",
1762                          abs_vfid,
1763                          elem->message.data.malicious_vf_event.err_id);
1764                goto get_vf;
1765        default:
1766                return 1;
1767        }
1768
1769        /* check if the cid is the VF range */
1770        if (!bnx2x_iov_is_vf_cid(bp, cid)) {
1771                DP(BNX2X_MSG_IOV, "cid is outside vf range: %d\n", cid);
1772                return 1;
1773        }
1774
1775        /* extract vf and rxq index from vf_cid - relies on the following:
1776         * 1. vfid on cid reflects the true abs_vfid
1777         * 2. The max number of VFs (per path) is 64
1778         */
1779        qidx = cid & ((1 << BNX2X_VF_CID_WND)-1);
1780        abs_vfid = (cid >> BNX2X_VF_CID_WND) & (BNX2X_MAX_NUM_OF_VFS-1);
1781get_vf:
1782        vf = bnx2x_vf_by_abs_fid(bp, abs_vfid);
1783
1784        if (!vf) {
1785                BNX2X_ERR("EQ completion for unknown VF, cid %d, abs_vfid %d\n",
1786                          cid, abs_vfid);
1787                return 0;
1788        }
1789
1790        switch (opcode) {
1791        case EVENT_RING_OPCODE_CFC_DEL:
1792                DP(BNX2X_MSG_IOV, "got VF [%d:%d] cfc delete ramrod\n",
1793                   vf->abs_vfid, qidx);
1794                vfq_get(vf, qidx)->sp_obj.complete_cmd(bp,
1795                                                       &vfq_get(vf,
1796                                                                qidx)->sp_obj,
1797                                                       BNX2X_Q_CMD_CFC_DEL);
1798                break;
1799        case EVENT_RING_OPCODE_CLASSIFICATION_RULES:
1800                DP(BNX2X_MSG_IOV, "got VF [%d:%d] set mac/vlan ramrod\n",
1801                   vf->abs_vfid, qidx);
1802                bnx2x_vf_handle_classification_eqe(bp, vfq_get(vf, qidx), elem);
1803                break;
1804        case EVENT_RING_OPCODE_MULTICAST_RULES:
1805                DP(BNX2X_MSG_IOV, "got VF [%d:%d] set mcast ramrod\n",
1806                   vf->abs_vfid, qidx);
1807                bnx2x_vf_handle_mcast_eqe(bp, vf);
1808                break;
1809        case EVENT_RING_OPCODE_FILTERS_RULES:
1810                DP(BNX2X_MSG_IOV, "got VF [%d:%d] set rx-mode ramrod\n",
1811                   vf->abs_vfid, qidx);
1812                bnx2x_vf_handle_filters_eqe(bp, vf);
1813                break;
1814        case EVENT_RING_OPCODE_RSS_UPDATE_RULES:
1815                DP(BNX2X_MSG_IOV, "got VF [%d:%d] RSS update ramrod\n",
1816                   vf->abs_vfid, qidx);
1817                bnx2x_vf_handle_rss_update_eqe(bp, vf);
1818        case EVENT_RING_OPCODE_VF_FLR:
1819        case EVENT_RING_OPCODE_MALICIOUS_VF:
1820                /* Do nothing for now */
1821                return 0;
1822        }
1823
1824        return 0;
1825}
1826
1827static struct bnx2x_virtf *bnx2x_vf_by_cid(struct bnx2x *bp, int vf_cid)
1828{
1829        /* extract the vf from vf_cid - relies on the following:
1830         * 1. vfid on cid reflects the true abs_vfid
1831         * 2. The max number of VFs (per path) is 64
1832         */
1833        int abs_vfid = (vf_cid >> BNX2X_VF_CID_WND) & (BNX2X_MAX_NUM_OF_VFS-1);
1834        return bnx2x_vf_by_abs_fid(bp, abs_vfid);
1835}
1836
1837void bnx2x_iov_set_queue_sp_obj(struct bnx2x *bp, int vf_cid,
1838                                struct bnx2x_queue_sp_obj **q_obj)
1839{
1840        struct bnx2x_virtf *vf;
1841
1842        if (!IS_SRIOV(bp))
1843                return;
1844
1845        vf = bnx2x_vf_by_cid(bp, vf_cid);
1846
1847        if (vf) {
1848                /* extract queue index from vf_cid - relies on the following:
1849                 * 1. vfid on cid reflects the true abs_vfid
1850                 * 2. The max number of VFs (per path) is 64
1851                 */
1852                int q_index = vf_cid & ((1 << BNX2X_VF_CID_WND)-1);
1853                *q_obj = &bnx2x_vfq(vf, q_index, sp_obj);
1854        } else {
1855                BNX2X_ERR("No vf matching cid %d\n", vf_cid);
1856        }
1857}
1858
1859void bnx2x_iov_adjust_stats_req(struct bnx2x *bp)
1860{
1861        int i;
1862        int first_queue_query_index, num_queues_req;
1863        dma_addr_t cur_data_offset;
1864        struct stats_query_entry *cur_query_entry;
1865        u8 stats_count = 0;
1866        bool is_fcoe = false;
1867
1868        if (!IS_SRIOV(bp))
1869                return;
1870
1871        if (!NO_FCOE(bp))
1872                is_fcoe = true;
1873
1874        /* fcoe adds one global request and one queue request */
1875        num_queues_req = BNX2X_NUM_ETH_QUEUES(bp) + is_fcoe;
1876        first_queue_query_index = BNX2X_FIRST_QUEUE_QUERY_IDX -
1877                (is_fcoe ? 0 : 1);
1878
1879        DP_AND((BNX2X_MSG_IOV | BNX2X_MSG_STATS),
1880               "BNX2X_NUM_ETH_QUEUES %d, is_fcoe %d, first_queue_query_index %d => determined the last non virtual statistics query index is %d. Will add queries on top of that\n",
1881               BNX2X_NUM_ETH_QUEUES(bp), is_fcoe, first_queue_query_index,
1882               first_queue_query_index + num_queues_req);
1883
1884        cur_data_offset = bp->fw_stats_data_mapping +
1885                offsetof(struct bnx2x_fw_stats_data, queue_stats) +
1886                num_queues_req * sizeof(struct per_queue_stats);
1887
1888        cur_query_entry = &bp->fw_stats_req->
1889                query[first_queue_query_index + num_queues_req];
1890
1891        for_each_vf(bp, i) {
1892                int j;
1893                struct bnx2x_virtf *vf = BP_VF(bp, i);
1894
1895                if (vf->state != VF_ENABLED) {
1896                        DP_AND((BNX2X_MSG_IOV | BNX2X_MSG_STATS),
1897                               "vf %d not enabled so no stats for it\n",
1898                               vf->abs_vfid);
1899                        continue;
1900                }
1901
1902                DP(BNX2X_MSG_IOV, "add addresses for vf %d\n", vf->abs_vfid);
1903                for_each_vfq(vf, j) {
1904                        struct bnx2x_vf_queue *rxq = vfq_get(vf, j);
1905
1906                        dma_addr_t q_stats_addr =
1907                                vf->fw_stat_map + j * vf->stats_stride;
1908
1909                        /* collect stats fro active queues only */
1910                        if (bnx2x_get_q_logical_state(bp, &rxq->sp_obj) ==
1911                            BNX2X_Q_LOGICAL_STATE_STOPPED)
1912                                continue;
1913
1914                        /* create stats query entry for this queue */
1915                        cur_query_entry->kind = STATS_TYPE_QUEUE;
1916                        cur_query_entry->index = vfq_stat_id(vf, rxq);
1917                        cur_query_entry->funcID =
1918                                cpu_to_le16(FW_VF_HANDLE(vf->abs_vfid));
1919                        cur_query_entry->address.hi =
1920                                cpu_to_le32(U64_HI(q_stats_addr));
1921                        cur_query_entry->address.lo =
1922                                cpu_to_le32(U64_LO(q_stats_addr));
1923                        DP(BNX2X_MSG_IOV,
1924                           "added address %x %x for vf %d queue %d client %d\n",
1925                           cur_query_entry->address.hi,
1926                           cur_query_entry->address.lo, cur_query_entry->funcID,
1927                           j, cur_query_entry->index);
1928                        cur_query_entry++;
1929                        cur_data_offset += sizeof(struct per_queue_stats);
1930                        stats_count++;
1931
1932                        /* all stats are coalesced to the leading queue */
1933                        if (vf->cfg_flags & VF_CFG_STATS_COALESCE)
1934                                break;
1935                }
1936        }
1937        bp->fw_stats_req->hdr.cmd_num = bp->fw_stats_num + stats_count;
1938}
1939
1940/* VF API helpers */
1941static void bnx2x_vf_qtbl_set_q(struct bnx2x *bp, u8 abs_vfid, u8 qid,
1942                                u8 enable)
1943{
1944        u32 reg = PXP_REG_HST_ZONE_PERMISSION_TABLE + qid * 4;
1945        u32 val = enable ? (abs_vfid | (1 << 6)) : 0;
1946
1947        REG_WR(bp, reg, val);
1948}
1949
1950static void bnx2x_vf_clr_qtbl(struct bnx2x *bp, struct bnx2x_virtf *vf)
1951{
1952        int i;
1953
1954        for_each_vfq(vf, i)
1955                bnx2x_vf_qtbl_set_q(bp, vf->abs_vfid,
1956                                    vfq_qzone_id(vf, vfq_get(vf, i)), false);
1957}
1958
1959static void bnx2x_vf_igu_disable(struct bnx2x *bp, struct bnx2x_virtf *vf)
1960{
1961        u32 val;
1962
1963        /* clear the VF configuration - pretend */
1964        bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf->abs_vfid));
1965        val = REG_RD(bp, IGU_REG_VF_CONFIGURATION);
1966        val &= ~(IGU_VF_CONF_MSI_MSIX_EN | IGU_VF_CONF_SINGLE_ISR_EN |
1967                 IGU_VF_CONF_FUNC_EN | IGU_VF_CONF_PARENT_MASK);
1968        REG_WR(bp, IGU_REG_VF_CONFIGURATION, val);
1969        bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
1970}
1971
1972u8 bnx2x_vf_max_queue_cnt(struct bnx2x *bp, struct bnx2x_virtf *vf)
1973{
1974        return min_t(u8, min_t(u8, vf_sb_count(vf), BNX2X_CIDS_PER_VF),
1975                     BNX2X_VF_MAX_QUEUES);
1976}
1977
1978static
1979int bnx2x_vf_chk_avail_resc(struct bnx2x *bp, struct bnx2x_virtf *vf,
1980                            struct vf_pf_resc_request *req_resc)
1981{
1982        u8 rxq_cnt = vf_rxq_count(vf) ? : bnx2x_vf_max_queue_cnt(bp, vf);
1983        u8 txq_cnt = vf_txq_count(vf) ? : bnx2x_vf_max_queue_cnt(bp, vf);
1984
1985        return ((req_resc->num_rxqs <= rxq_cnt) &&
1986                (req_resc->num_txqs <= txq_cnt) &&
1987                (req_resc->num_sbs <= vf_sb_count(vf))   &&
1988                (req_resc->num_mac_filters <= vf_mac_rules_cnt(vf)) &&
1989                (req_resc->num_vlan_filters <= vf_vlan_rules_cnt(vf)));
1990}
1991
1992/* CORE VF API */
1993int bnx2x_vf_acquire(struct bnx2x *bp, struct bnx2x_virtf *vf,
1994                     struct vf_pf_resc_request *resc)
1995{
1996        int base_vf_cid = (BP_VFDB(bp)->sriov.first_vf_in_pf + vf->index) *
1997                BNX2X_CIDS_PER_VF;
1998
1999        union cdu_context *base_cxt = (union cdu_context *)
2000                BP_VF_CXT_PAGE(bp, base_vf_cid/ILT_PAGE_CIDS)->addr +
2001                (base_vf_cid & (ILT_PAGE_CIDS-1));
2002        int i;
2003
2004        /* if state is 'acquired' the VF was not released or FLR'd, in
2005         * this case the returned resources match the acquired already
2006         * acquired resources. Verify that the requested numbers do
2007         * not exceed the already acquired numbers.
2008         */
2009        if (vf->state == VF_ACQUIRED) {
2010                DP(BNX2X_MSG_IOV, "VF[%d] Trying to re-acquire resources (VF was not released or FLR'd)\n",
2011                   vf->abs_vfid);
2012
2013                if (!bnx2x_vf_chk_avail_resc(bp, vf, resc)) {
2014                        BNX2X_ERR("VF[%d] When re-acquiring resources, requested numbers must be <= then previously acquired numbers\n",
2015                                  vf->abs_vfid);
2016                        return -EINVAL;
2017                }
2018                return 0;
2019        }
2020
2021        /* Otherwise vf state must be 'free' or 'reset' */
2022        if (vf->state != VF_FREE && vf->state != VF_RESET) {
2023                BNX2X_ERR("VF[%d] Can not acquire a VF with state %d\n",
2024                          vf->abs_vfid, vf->state);
2025                return -EINVAL;
2026        }
2027
2028        /* static allocation:
2029         * the global maximum number are fixed per VF. Fail the request if
2030         * requested number exceed these globals
2031         */
2032        if (!bnx2x_vf_chk_avail_resc(bp, vf, resc)) {
2033                DP(BNX2X_MSG_IOV,
2034                   "cannot fulfill vf resource request. Placing maximal available values in response\n");
2035                /* set the max resource in the vf */
2036                return -ENOMEM;
2037        }
2038
2039        /* Set resources counters - 0 request means max available */
2040        vf_sb_count(vf) = resc->num_sbs;
2041        vf_rxq_count(vf) = resc->num_rxqs ? : bnx2x_vf_max_queue_cnt(bp, vf);
2042        vf_txq_count(vf) = resc->num_txqs ? : bnx2x_vf_max_queue_cnt(bp, vf);
2043
2044        DP(BNX2X_MSG_IOV,
2045           "Fulfilling vf request: sb count %d, tx_count %d, rx_count %d, mac_rules_count %d, vlan_rules_count %d\n",
2046           vf_sb_count(vf), vf_rxq_count(vf),
2047           vf_txq_count(vf), vf_mac_rules_cnt(vf),
2048           vf_vlan_rules_cnt(vf));
2049
2050        /* Initialize the queues */
2051        if (!vf->vfqs) {
2052                DP(BNX2X_MSG_IOV, "vf->vfqs was not allocated\n");
2053                return -EINVAL;
2054        }
2055
2056        for_each_vfq(vf, i) {
2057                struct bnx2x_vf_queue *q = vfq_get(vf, i);
2058
2059                if (!q) {
2060                        BNX2X_ERR("q number %d was not allocated\n", i);
2061                        return -EINVAL;
2062                }
2063
2064                q->index = i;
2065                q->cxt = &((base_cxt + i)->eth);
2066                q->cid = BNX2X_FIRST_VF_CID + base_vf_cid + i;
2067
2068                DP(BNX2X_MSG_IOV, "VFQ[%d:%d]: index %d, cid 0x%x, cxt %p\n",
2069                   vf->abs_vfid, i, q->index, q->cid, q->cxt);
2070
2071                /* init SP objects */
2072                bnx2x_vfq_init(bp, vf, q);
2073        }
2074        vf->state = VF_ACQUIRED;
2075        return 0;
2076}
2077
2078int bnx2x_vf_init(struct bnx2x *bp, struct bnx2x_virtf *vf, dma_addr_t *sb_map)
2079{
2080        struct bnx2x_func_init_params func_init = {0};
2081        int i;
2082
2083        /* the sb resources are initialized at this point, do the
2084         * FW/HW initializations
2085         */
2086        for_each_vf_sb(vf, i)
2087                bnx2x_init_sb(bp, (dma_addr_t)sb_map[i], vf->abs_vfid, true,
2088                              vf_igu_sb(vf, i), vf_igu_sb(vf, i));
2089
2090        /* Sanity checks */
2091        if (vf->state != VF_ACQUIRED) {
2092                DP(BNX2X_MSG_IOV, "VF[%d] is not in VF_ACQUIRED, but %d\n",
2093                   vf->abs_vfid, vf->state);
2094                return -EINVAL;
2095        }
2096
2097        /* let FLR complete ... */
2098        msleep(100);
2099
2100        /* FLR cleanup epilogue */
2101        if (bnx2x_vf_flr_clnup_epilog(bp, vf->abs_vfid))
2102                return -EBUSY;
2103
2104        /* reset IGU VF statistics: MSIX */
2105        REG_WR(bp, IGU_REG_STATISTIC_NUM_MESSAGE_SENT + vf->abs_vfid * 4 , 0);
2106
2107        /* function setup */
2108        func_init.pf_id = BP_FUNC(bp);
2109        func_init.func_id = FW_VF_HANDLE(vf->abs_vfid);
2110        bnx2x_func_init(bp, &func_init);
2111
2112        /* Enable the vf */
2113        bnx2x_vf_enable_access(bp, vf->abs_vfid);
2114        bnx2x_vf_enable_traffic(bp, vf);
2115
2116        /* queue protection table */
2117        for_each_vfq(vf, i)
2118                bnx2x_vf_qtbl_set_q(bp, vf->abs_vfid,
2119                                    vfq_qzone_id(vf, vfq_get(vf, i)), true);
2120
2121        vf->state = VF_ENABLED;
2122
2123        /* update vf bulletin board */
2124        bnx2x_post_vf_bulletin(bp, vf->index);
2125
2126        return 0;
2127}
2128
2129struct set_vf_state_cookie {
2130        struct bnx2x_virtf *vf;
2131        u8 state;
2132};
2133
2134static void bnx2x_set_vf_state(void *cookie)
2135{
2136        struct set_vf_state_cookie *p = (struct set_vf_state_cookie *)cookie;
2137
2138        p->vf->state = p->state;
2139}
2140
2141int bnx2x_vf_close(struct bnx2x *bp, struct bnx2x_virtf *vf)
2142{
2143        int rc = 0, i;
2144
2145        DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid);
2146
2147        /* Close all queues */
2148        for (i = 0; i < vf_rxq_count(vf); i++) {
2149                rc = bnx2x_vf_queue_teardown(bp, vf, i);
2150                if (rc)
2151                        goto op_err;
2152        }
2153
2154        /* disable the interrupts */
2155        DP(BNX2X_MSG_IOV, "disabling igu\n");
2156        bnx2x_vf_igu_disable(bp, vf);
2157
2158        /* disable the VF */
2159        DP(BNX2X_MSG_IOV, "clearing qtbl\n");
2160        bnx2x_vf_clr_qtbl(bp, vf);
2161
2162        /* need to make sure there are no outstanding stats ramrods which may
2163         * cause the device to access the VF's stats buffer which it will free
2164         * as soon as we return from the close flow.
2165         */
2166        {
2167                struct set_vf_state_cookie cookie;
2168
2169                cookie.vf = vf;
2170                cookie.state = VF_ACQUIRED;
2171                rc = bnx2x_stats_safe_exec(bp, bnx2x_set_vf_state, &cookie);
2172                if (rc)
2173                        goto op_err;
2174        }
2175
2176        DP(BNX2X_MSG_IOV, "set state to acquired\n");
2177
2178        return 0;
2179op_err:
2180        BNX2X_ERR("vf[%d] CLOSE error: rc %d\n", vf->abs_vfid, rc);
2181        return rc;
2182}
2183
2184/* VF release can be called either: 1. The VF was acquired but
2185 * not enabled 2. the vf was enabled or in the process of being
2186 * enabled
2187 */
2188int bnx2x_vf_free(struct bnx2x *bp, struct bnx2x_virtf *vf)
2189{
2190        int rc;
2191
2192        DP(BNX2X_MSG_IOV, "VF[%d] STATE: %s\n", vf->abs_vfid,
2193           vf->state == VF_FREE ? "Free" :
2194           vf->state == VF_ACQUIRED ? "Acquired" :
2195           vf->state == VF_ENABLED ? "Enabled" :
2196           vf->state == VF_RESET ? "Reset" :
2197           "Unknown");
2198
2199        switch (vf->state) {
2200        case VF_ENABLED:
2201                rc = bnx2x_vf_close(bp, vf);
2202                if (rc)
2203                        goto op_err;
2204                /* Fallthrough to release resources */
2205        case VF_ACQUIRED:
2206                DP(BNX2X_MSG_IOV, "about to free resources\n");
2207                bnx2x_vf_free_resc(bp, vf);
2208                break;
2209
2210        case VF_FREE:
2211        case VF_RESET:
2212        default:
2213                break;
2214        }
2215        return 0;
2216op_err:
2217        BNX2X_ERR("VF[%d] RELEASE error: rc %d\n", vf->abs_vfid, rc);
2218        return rc;
2219}
2220
2221int bnx2x_vf_rss_update(struct bnx2x *bp, struct bnx2x_virtf *vf,
2222                        struct bnx2x_config_rss_params *rss)
2223{
2224        DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid);
2225        set_bit(RAMROD_COMP_WAIT, &rss->ramrod_flags);
2226        return bnx2x_config_rss(bp, rss);
2227}
2228
2229int bnx2x_vf_tpa_update(struct bnx2x *bp, struct bnx2x_virtf *vf,
2230                        struct vfpf_tpa_tlv *tlv,
2231                        struct bnx2x_queue_update_tpa_params *params)
2232{
2233        aligned_u64 *sge_addr = tlv->tpa_client_info.sge_addr;
2234        struct bnx2x_queue_state_params qstate;
2235        int qid, rc = 0;
2236
2237        DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid);
2238
2239        /* Set ramrod params */
2240        memset(&qstate, 0, sizeof(struct bnx2x_queue_state_params));
2241        memcpy(&qstate.params.update_tpa, params,
2242               sizeof(struct bnx2x_queue_update_tpa_params));
2243        qstate.cmd = BNX2X_Q_CMD_UPDATE_TPA;
2244        set_bit(RAMROD_COMP_WAIT, &qstate.ramrod_flags);
2245
2246        for (qid = 0; qid < vf_rxq_count(vf); qid++) {
2247                qstate.q_obj = &bnx2x_vfq(vf, qid, sp_obj);
2248                qstate.params.update_tpa.sge_map = sge_addr[qid];
2249                DP(BNX2X_MSG_IOV, "sge_addr[%d:%d] %08x:%08x\n",
2250                   vf->abs_vfid, qid, U64_HI(sge_addr[qid]),
2251                   U64_LO(sge_addr[qid]));
2252                rc = bnx2x_queue_state_change(bp, &qstate);
2253                if (rc) {
2254                        BNX2X_ERR("Failed to configure sge_addr %08x:%08x for [%d:%d]\n",
2255                                  U64_HI(sge_addr[qid]), U64_LO(sge_addr[qid]),
2256                                  vf->abs_vfid, qid);
2257                        return rc;
2258                }
2259        }
2260
2261        return rc;
2262}
2263
2264/* VF release ~ VF close + VF release-resources
2265 * Release is the ultimate SW shutdown and is called whenever an
2266 * irrecoverable error is encountered.
2267 */
2268int bnx2x_vf_release(struct bnx2x *bp, struct bnx2x_virtf *vf)
2269{
2270        int rc;
2271
2272        DP(BNX2X_MSG_IOV, "PF releasing vf %d\n", vf->abs_vfid);
2273        bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_RELEASE_VF);
2274
2275        rc = bnx2x_vf_free(bp, vf);
2276        if (rc)
2277                WARN(rc,
2278                     "VF[%d] Failed to allocate resources for release op- rc=%d\n",
2279                     vf->abs_vfid, rc);
2280        bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_RELEASE_VF);
2281        return rc;
2282}
2283
2284void bnx2x_lock_vf_pf_channel(struct bnx2x *bp, struct bnx2x_virtf *vf,
2285                              enum channel_tlvs tlv)
2286{
2287        /* we don't lock the channel for unsupported tlvs */
2288        if (!bnx2x_tlv_supported(tlv)) {
2289                BNX2X_ERR("attempting to lock with unsupported tlv. Aborting\n");
2290                return;
2291        }
2292
2293        /* lock the channel */
2294        mutex_lock(&vf->op_mutex);
2295
2296        /* record the locking op */
2297        vf->op_current = tlv;
2298
2299        /* log the lock */
2300        DP(BNX2X_MSG_IOV, "VF[%d]: vf pf channel locked by %d\n",
2301           vf->abs_vfid, tlv);
2302}
2303
2304void bnx2x_unlock_vf_pf_channel(struct bnx2x *bp, struct bnx2x_virtf *vf,
2305                                enum channel_tlvs expected_tlv)
2306{
2307        enum channel_tlvs current_tlv;
2308
2309        if (!vf) {
2310                BNX2X_ERR("VF was %p\n", vf);
2311                return;
2312        }
2313
2314        current_tlv = vf->op_current;
2315
2316        /* we don't unlock the channel for unsupported tlvs */
2317        if (!bnx2x_tlv_supported(expected_tlv))
2318                return;
2319
2320        WARN(expected_tlv != vf->op_current,
2321             "lock mismatch: expected %d found %d", expected_tlv,
2322             vf->op_current);
2323
2324        /* record the locking op */
2325        vf->op_current = CHANNEL_TLV_NONE;
2326
2327        /* lock the channel */
2328        mutex_unlock(&vf->op_mutex);
2329
2330        /* log the unlock */
2331        DP(BNX2X_MSG_IOV, "VF[%d]: vf pf channel unlocked by %d\n",
2332           vf->abs_vfid, current_tlv);
2333}
2334
2335static int bnx2x_set_pf_tx_switching(struct bnx2x *bp, bool enable)
2336{
2337        struct bnx2x_queue_state_params q_params;
2338        u32 prev_flags;
2339        int i, rc;
2340
2341        /* Verify changes are needed and record current Tx switching state */
2342        prev_flags = bp->flags;
2343        if (enable)
2344                bp->flags |= TX_SWITCHING;
2345        else
2346                bp->flags &= ~TX_SWITCHING;
2347        if (prev_flags == bp->flags)
2348                return 0;
2349
2350        /* Verify state enables the sending of queue ramrods */
2351        if ((bp->state != BNX2X_STATE_OPEN) ||
2352            (bnx2x_get_q_logical_state(bp,
2353                                      &bnx2x_sp_obj(bp, &bp->fp[0]).q_obj) !=
2354             BNX2X_Q_LOGICAL_STATE_ACTIVE))
2355                return 0;
2356
2357        /* send q. update ramrod to configure Tx switching */
2358        memset(&q_params, 0, sizeof(q_params));
2359        __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
2360        q_params.cmd = BNX2X_Q_CMD_UPDATE;
2361        __set_bit(BNX2X_Q_UPDATE_TX_SWITCHING_CHNG,
2362                  &q_params.params.update.update_flags);
2363        if (enable)
2364                __set_bit(BNX2X_Q_UPDATE_TX_SWITCHING,
2365                          &q_params.params.update.update_flags);
2366        else
2367                __clear_bit(BNX2X_Q_UPDATE_TX_SWITCHING,
2368                            &q_params.params.update.update_flags);
2369
2370        /* send the ramrod on all the queues of the PF */
2371        for_each_eth_queue(bp, i) {
2372                struct bnx2x_fastpath *fp = &bp->fp[i];
2373
2374                /* Set the appropriate Queue object */
2375                q_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj;
2376
2377                /* Update the Queue state */
2378                rc = bnx2x_queue_state_change(bp, &q_params);
2379                if (rc) {
2380                        BNX2X_ERR("Failed to configure Tx switching\n");
2381                        return rc;
2382                }
2383        }
2384
2385        DP(BNX2X_MSG_IOV, "%s Tx Switching\n", enable ? "Enabled" : "Disabled");
2386        return 0;
2387}
2388
2389int bnx2x_sriov_configure(struct pci_dev *dev, int num_vfs_param)
2390{
2391        struct bnx2x *bp = netdev_priv(pci_get_drvdata(dev));
2392
2393        if (!IS_SRIOV(bp)) {
2394                BNX2X_ERR("failed to configure SR-IOV since vfdb was not allocated. Check dmesg for errors in probe stage\n");
2395                return -EINVAL;
2396        }
2397
2398        DP(BNX2X_MSG_IOV, "bnx2x_sriov_configure called with %d, BNX2X_NR_VIRTFN(bp) was %d\n",
2399           num_vfs_param, BNX2X_NR_VIRTFN(bp));
2400
2401        /* HW channel is only operational when PF is up */
2402        if (bp->state != BNX2X_STATE_OPEN) {
2403                BNX2X_ERR("VF num configuration via sysfs not supported while PF is down\n");
2404                return -EINVAL;
2405        }
2406
2407        /* we are always bound by the total_vfs in the configuration space */
2408        if (num_vfs_param > BNX2X_NR_VIRTFN(bp)) {
2409                BNX2X_ERR("truncating requested number of VFs (%d) down to maximum allowed (%d)\n",
2410                          num_vfs_param, BNX2X_NR_VIRTFN(bp));
2411                num_vfs_param = BNX2X_NR_VIRTFN(bp);
2412        }
2413
2414        bp->requested_nr_virtfn = num_vfs_param;
2415        if (num_vfs_param == 0) {
2416                bnx2x_set_pf_tx_switching(bp, false);
2417                bnx2x_disable_sriov(bp);
2418                return 0;
2419        } else {
2420                return bnx2x_enable_sriov(bp);
2421        }
2422}
2423
2424#define IGU_ENTRY_SIZE 4
2425
2426int bnx2x_enable_sriov(struct bnx2x *bp)
2427{
2428        int rc = 0, req_vfs = bp->requested_nr_virtfn;
2429        int vf_idx, sb_idx, vfq_idx, qcount, first_vf;
2430        u32 igu_entry, address;
2431        u16 num_vf_queues;
2432
2433        if (req_vfs == 0)
2434                return 0;
2435
2436        first_vf = bp->vfdb->sriov.first_vf_in_pf;
2437
2438        /* statically distribute vf sb pool between VFs */
2439        num_vf_queues = min_t(u16, BNX2X_VF_MAX_QUEUES,
2440                              BP_VFDB(bp)->vf_sbs_pool / req_vfs);
2441
2442        /* zero previous values learned from igu cam */
2443        for (vf_idx = 0; vf_idx < req_vfs; vf_idx++) {
2444                struct bnx2x_virtf *vf = BP_VF(bp, vf_idx);
2445
2446                vf->sb_count = 0;
2447                vf_sb_count(BP_VF(bp, vf_idx)) = 0;
2448        }
2449        bp->vfdb->vf_sbs_pool = 0;
2450
2451        /* prepare IGU cam */
2452        sb_idx = BP_VFDB(bp)->first_vf_igu_entry;
2453        address = IGU_REG_MAPPING_MEMORY + sb_idx * IGU_ENTRY_SIZE;
2454        for (vf_idx = first_vf; vf_idx < first_vf + req_vfs; vf_idx++) {
2455                for (vfq_idx = 0; vfq_idx < num_vf_queues; vfq_idx++) {
2456                        igu_entry = vf_idx << IGU_REG_MAPPING_MEMORY_FID_SHIFT |
2457                                vfq_idx << IGU_REG_MAPPING_MEMORY_VECTOR_SHIFT |
2458                                IGU_REG_MAPPING_MEMORY_VALID;
2459                        DP(BNX2X_MSG_IOV, "assigning sb %d to vf %d\n",
2460                           sb_idx, vf_idx);
2461                        REG_WR(bp, address, igu_entry);
2462                        sb_idx++;
2463                        address += IGU_ENTRY_SIZE;
2464                }
2465        }
2466
2467        /* Reinitialize vf database according to igu cam */
2468        bnx2x_get_vf_igu_cam_info(bp);
2469
2470        DP(BNX2X_MSG_IOV, "vf_sbs_pool %d, num_vf_queues %d\n",
2471           BP_VFDB(bp)->vf_sbs_pool, num_vf_queues);
2472
2473        qcount = 0;
2474        for_each_vf(bp, vf_idx) {
2475                struct bnx2x_virtf *vf = BP_VF(bp, vf_idx);
2476
2477                /* set local queue arrays */
2478                vf->vfqs = &bp->vfdb->vfqs[qcount];
2479                qcount += vf_sb_count(vf);
2480                bnx2x_iov_static_resc(bp, vf);
2481        }
2482
2483        /* prepare msix vectors in VF configuration space - the value in the
2484         * PCI configuration space should be the index of the last entry,
2485         * namely one less than the actual size of the table
2486         */
2487        for (vf_idx = first_vf; vf_idx < first_vf + req_vfs; vf_idx++) {
2488                bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf_idx));
2489                REG_WR(bp, PCICFG_OFFSET + GRC_CONFIG_REG_VF_MSIX_CONTROL,
2490                       num_vf_queues - 1);
2491                DP(BNX2X_MSG_IOV, "set msix vec num in VF %d cfg space to %d\n",
2492                   vf_idx, num_vf_queues - 1);
2493        }
2494        bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
2495
2496        /* enable sriov. This will probe all the VFs, and consequentially cause
2497         * the "acquire" messages to appear on the VF PF channel.
2498         */
2499        DP(BNX2X_MSG_IOV, "about to call enable sriov\n");
2500        bnx2x_disable_sriov(bp);
2501
2502        rc = bnx2x_set_pf_tx_switching(bp, true);
2503        if (rc)
2504                return rc;
2505
2506        rc = pci_enable_sriov(bp->pdev, req_vfs);
2507        if (rc) {
2508                BNX2X_ERR("pci_enable_sriov failed with %d\n", rc);
2509                return rc;
2510        }
2511        DP(BNX2X_MSG_IOV, "sriov enabled (%d vfs)\n", req_vfs);
2512        return req_vfs;
2513}
2514
2515void bnx2x_pf_set_vfs_vlan(struct bnx2x *bp)
2516{
2517        int vfidx;
2518        struct pf_vf_bulletin_content *bulletin;
2519
2520        DP(BNX2X_MSG_IOV, "configuring vlan for VFs from sp-task\n");
2521        for_each_vf(bp, vfidx) {
2522                bulletin = BP_VF_BULLETIN(bp, vfidx);
2523                if (bulletin->valid_bitmap & (1 << VLAN_VALID))
2524                        bnx2x_set_vf_vlan(bp->dev, vfidx, bulletin->vlan, 0,
2525                                          htons(ETH_P_8021Q));
2526        }
2527}
2528
2529void bnx2x_disable_sriov(struct bnx2x *bp)
2530{
2531        if (pci_vfs_assigned(bp->pdev)) {
2532                DP(BNX2X_MSG_IOV,
2533                   "Unloading driver while VFs are assigned - VFs will not be deallocated\n");
2534                return;
2535        }
2536
2537        pci_disable_sriov(bp->pdev);
2538}
2539
2540static int bnx2x_vf_op_prep(struct bnx2x *bp, int vfidx,
2541                            struct bnx2x_virtf **vf,
2542                            struct pf_vf_bulletin_content **bulletin,
2543                            bool test_queue)
2544{
2545        if (bp->state != BNX2X_STATE_OPEN) {
2546                BNX2X_ERR("PF is down - can't utilize iov-related functionality\n");
2547                return -EINVAL;
2548        }
2549
2550        if (!IS_SRIOV(bp)) {
2551                BNX2X_ERR("sriov is disabled - can't utilize iov-related functionality\n");
2552                return -EINVAL;
2553        }
2554
2555        if (vfidx >= BNX2X_NR_VIRTFN(bp)) {
2556                BNX2X_ERR("VF is uninitialized - can't utilize iov-related functionality. vfidx was %d BNX2X_NR_VIRTFN was %d\n",
2557                          vfidx, BNX2X_NR_VIRTFN(bp));
2558                return -EINVAL;
2559        }
2560
2561        /* init members */
2562        *vf = BP_VF(bp, vfidx);
2563        *bulletin = BP_VF_BULLETIN(bp, vfidx);
2564
2565        if (!*vf) {
2566                BNX2X_ERR("Unable to get VF structure for vfidx %d\n", vfidx);
2567                return -EINVAL;
2568        }
2569
2570        if (test_queue && !(*vf)->vfqs) {
2571                BNX2X_ERR("vfqs struct is null. Was this invoked before dynamically enabling SR-IOV? vfidx was %d\n",
2572                          vfidx);
2573                return -EINVAL;
2574        }
2575
2576        if (!*bulletin) {
2577                BNX2X_ERR("Bulletin Board struct is null for vfidx %d\n",
2578                          vfidx);
2579                return -EINVAL;
2580        }
2581
2582        return 0;
2583}
2584
2585int bnx2x_get_vf_config(struct net_device *dev, int vfidx,
2586                        struct ifla_vf_info *ivi)
2587{
2588        struct bnx2x *bp = netdev_priv(dev);
2589        struct bnx2x_virtf *vf = NULL;
2590        struct pf_vf_bulletin_content *bulletin = NULL;
2591        struct bnx2x_vlan_mac_obj *mac_obj;
2592        struct bnx2x_vlan_mac_obj *vlan_obj;
2593        int rc;
2594
2595        /* sanity and init */
2596        rc = bnx2x_vf_op_prep(bp, vfidx, &vf, &bulletin, true);
2597        if (rc)
2598                return rc;
2599
2600        mac_obj = &bnx2x_leading_vfq(vf, mac_obj);
2601        vlan_obj = &bnx2x_leading_vfq(vf, vlan_obj);
2602        if (!mac_obj || !vlan_obj) {
2603                BNX2X_ERR("VF partially initialized\n");
2604                return -EINVAL;
2605        }
2606
2607        ivi->vf = vfidx;
2608        ivi->qos = 0;
2609        ivi->max_tx_rate = 10000; /* always 10G. TBA take from link struct */
2610        ivi->min_tx_rate = 0;
2611        ivi->spoofchk = 1; /*always enabled */
2612        if (vf->state == VF_ENABLED) {
2613                /* mac and vlan are in vlan_mac objects */
2614                if (bnx2x_validate_vf_sp_objs(bp, vf, false)) {
2615                        mac_obj->get_n_elements(bp, mac_obj, 1, (u8 *)&ivi->mac,
2616                                                0, ETH_ALEN);
2617                        vlan_obj->get_n_elements(bp, vlan_obj, 1,
2618                                                 (u8 *)&ivi->vlan, 0,
2619                                                 VLAN_HLEN);
2620                }
2621        } else {
2622                mutex_lock(&bp->vfdb->bulletin_mutex);
2623                /* mac */
2624                if (bulletin->valid_bitmap & (1 << MAC_ADDR_VALID))
2625                        /* mac configured by ndo so its in bulletin board */
2626                        memcpy(&ivi->mac, bulletin->mac, ETH_ALEN);
2627                else
2628                        /* function has not been loaded yet. Show mac as 0s */
2629                        eth_zero_addr(ivi->mac);
2630
2631                /* vlan */
2632                if (bulletin->valid_bitmap & (1 << VLAN_VALID))
2633                        /* vlan configured by ndo so its in bulletin board */
2634                        memcpy(&ivi->vlan, &bulletin->vlan, VLAN_HLEN);
2635                else
2636                        /* function has not been loaded yet. Show vlans as 0s */
2637                        memset(&ivi->vlan, 0, VLAN_HLEN);
2638
2639                mutex_unlock(&bp->vfdb->bulletin_mutex);
2640        }
2641
2642        return 0;
2643}
2644
2645/* New mac for VF. Consider these cases:
2646 * 1. VF hasn't been acquired yet - save the mac in local bulletin board and
2647 *    supply at acquire.
2648 * 2. VF has already been acquired but has not yet initialized - store in local
2649 *    bulletin board. mac will be posted on VF bulletin board after VF init. VF
2650 *    will configure this mac when it is ready.
2651 * 3. VF has already initialized but has not yet setup a queue - post the new
2652 *    mac on VF's bulletin board right now. VF will configure this mac when it
2653 *    is ready.
2654 * 4. VF has already set a queue - delete any macs already configured for this
2655 *    queue and manually config the new mac.
2656 * In any event, once this function has been called refuse any attempts by the
2657 * VF to configure any mac for itself except for this mac. In case of a race
2658 * where the VF fails to see the new post on its bulletin board before sending a
2659 * mac configuration request, the PF will simply fail the request and VF can try
2660 * again after consulting its bulletin board.
2661 */
2662int bnx2x_set_vf_mac(struct net_device *dev, int vfidx, u8 *mac)
2663{
2664        struct bnx2x *bp = netdev_priv(dev);
2665        int rc, q_logical_state;
2666        struct bnx2x_virtf *vf = NULL;
2667        struct pf_vf_bulletin_content *bulletin = NULL;
2668
2669        if (!is_valid_ether_addr(mac)) {
2670                BNX2X_ERR("mac address invalid\n");
2671                return -EINVAL;
2672        }
2673
2674        /* sanity and init */
2675        rc = bnx2x_vf_op_prep(bp, vfidx, &vf, &bulletin, true);
2676        if (rc)
2677                return rc;
2678
2679        mutex_lock(&bp->vfdb->bulletin_mutex);
2680
2681        /* update PF's copy of the VF's bulletin. Will no longer accept mac
2682         * configuration requests from vf unless match this mac
2683         */
2684        bulletin->valid_bitmap |= 1 << MAC_ADDR_VALID;
2685        memcpy(bulletin->mac, mac, ETH_ALEN);
2686
2687        /* Post update on VF's bulletin board */
2688        rc = bnx2x_post_vf_bulletin(bp, vfidx);
2689
2690        /* release lock before checking return code */
2691        mutex_unlock(&bp->vfdb->bulletin_mutex);
2692
2693        if (rc) {
2694                BNX2X_ERR("failed to update VF[%d] bulletin\n", vfidx);
2695                return rc;
2696        }
2697
2698        q_logical_state =
2699                bnx2x_get_q_logical_state(bp, &bnx2x_leading_vfq(vf, sp_obj));
2700        if (vf->state == VF_ENABLED &&
2701            q_logical_state == BNX2X_Q_LOGICAL_STATE_ACTIVE) {
2702                /* configure the mac in device on this vf's queue */
2703                unsigned long ramrod_flags = 0;
2704                struct bnx2x_vlan_mac_obj *mac_obj;
2705
2706                /* User should be able to see failure reason in system logs */
2707                if (!bnx2x_validate_vf_sp_objs(bp, vf, true))
2708                        return -EINVAL;
2709
2710                /* must lock vfpf channel to protect against vf flows */
2711                bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_MAC);
2712
2713                /* remove existing eth macs */
2714                mac_obj = &bnx2x_leading_vfq(vf, mac_obj);
2715                rc = bnx2x_del_all_macs(bp, mac_obj, BNX2X_ETH_MAC, true);
2716                if (rc) {
2717                        BNX2X_ERR("failed to delete eth macs\n");
2718                        rc = -EINVAL;
2719                        goto out;
2720                }
2721
2722                /* remove existing uc list macs */
2723                rc = bnx2x_del_all_macs(bp, mac_obj, BNX2X_UC_LIST_MAC, true);
2724                if (rc) {
2725                        BNX2X_ERR("failed to delete uc_list macs\n");
2726                        rc = -EINVAL;
2727                        goto out;
2728                }
2729
2730                /* configure the new mac to device */
2731                __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
2732                bnx2x_set_mac_one(bp, (u8 *)&bulletin->mac, mac_obj, true,
2733                                  BNX2X_ETH_MAC, &ramrod_flags);
2734
2735out:
2736                bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_MAC);
2737        }
2738
2739        return rc;
2740}
2741
2742static void bnx2x_set_vf_vlan_acceptance(struct bnx2x *bp,
2743                                         struct bnx2x_virtf *vf, bool accept)
2744{
2745        struct bnx2x_rx_mode_ramrod_params rx_ramrod;
2746        unsigned long accept_flags;
2747
2748        /* need to remove/add the VF's accept_any_vlan bit */
2749        accept_flags = bnx2x_leading_vfq(vf, accept_flags);
2750        if (accept)
2751                set_bit(BNX2X_ACCEPT_ANY_VLAN, &accept_flags);
2752        else
2753                clear_bit(BNX2X_ACCEPT_ANY_VLAN, &accept_flags);
2754
2755        bnx2x_vf_prep_rx_mode(bp, LEADING_IDX, &rx_ramrod, vf,
2756                              accept_flags);
2757        bnx2x_leading_vfq(vf, accept_flags) = accept_flags;
2758        bnx2x_config_rx_mode(bp, &rx_ramrod);
2759}
2760
2761static int bnx2x_set_vf_vlan_filter(struct bnx2x *bp, struct bnx2x_virtf *vf,
2762                                    u16 vlan, bool add)
2763{
2764        struct bnx2x_vlan_mac_ramrod_params ramrod_param;
2765        unsigned long ramrod_flags = 0;
2766        int rc = 0;
2767
2768        /* configure the new vlan to device */
2769        memset(&ramrod_param, 0, sizeof(ramrod_param));
2770        __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
2771        ramrod_param.vlan_mac_obj = &bnx2x_leading_vfq(vf, vlan_obj);
2772        ramrod_param.ramrod_flags = ramrod_flags;
2773        ramrod_param.user_req.u.vlan.vlan = vlan;
2774        ramrod_param.user_req.cmd = add ? BNX2X_VLAN_MAC_ADD
2775                                        : BNX2X_VLAN_MAC_DEL;
2776        rc = bnx2x_config_vlan_mac(bp, &ramrod_param);
2777        if (rc) {
2778                BNX2X_ERR("failed to configure vlan\n");
2779                return -EINVAL;
2780        }
2781
2782        return 0;
2783}
2784
2785int bnx2x_set_vf_vlan(struct net_device *dev, int vfidx, u16 vlan, u8 qos,
2786                      __be16 vlan_proto)
2787{
2788        struct pf_vf_bulletin_content *bulletin = NULL;
2789        struct bnx2x *bp = netdev_priv(dev);
2790        struct bnx2x_vlan_mac_obj *vlan_obj;
2791        unsigned long vlan_mac_flags = 0;
2792        unsigned long ramrod_flags = 0;
2793        struct bnx2x_virtf *vf = NULL;
2794        int i, rc;
2795
2796        if (vlan > 4095) {
2797                BNX2X_ERR("illegal vlan value %d\n", vlan);
2798                return -EINVAL;
2799        }
2800
2801        if (vlan_proto != htons(ETH_P_8021Q))
2802                return -EPROTONOSUPPORT;
2803
2804        DP(BNX2X_MSG_IOV, "configuring VF %d with VLAN %d qos %d\n",
2805           vfidx, vlan, 0);
2806
2807        /* sanity and init */
2808        rc = bnx2x_vf_op_prep(bp, vfidx, &vf, &bulletin, true);
2809        if (rc)
2810                return rc;
2811
2812        /* update PF's copy of the VF's bulletin. No point in posting the vlan
2813         * to the VF since it doesn't have anything to do with it. But it useful
2814         * to store it here in case the VF is not up yet and we can only
2815         * configure the vlan later when it does. Treat vlan id 0 as remove the
2816         * Host tag.
2817         */
2818        mutex_lock(&bp->vfdb->bulletin_mutex);
2819
2820        if (vlan > 0)
2821                bulletin->valid_bitmap |= 1 << VLAN_VALID;
2822        else
2823                bulletin->valid_bitmap &= ~(1 << VLAN_VALID);
2824        bulletin->vlan = vlan;
2825
2826        /* Post update on VF's bulletin board */
2827        rc = bnx2x_post_vf_bulletin(bp, vfidx);
2828        if (rc)
2829                BNX2X_ERR("failed to update VF[%d] bulletin\n", vfidx);
2830        mutex_unlock(&bp->vfdb->bulletin_mutex);
2831
2832        /* is vf initialized and queue set up? */
2833        if (vf->state != VF_ENABLED ||
2834            bnx2x_get_q_logical_state(bp, &bnx2x_leading_vfq(vf, sp_obj)) !=
2835            BNX2X_Q_LOGICAL_STATE_ACTIVE)
2836                return rc;
2837
2838        /* User should be able to see error in system logs */
2839        if (!bnx2x_validate_vf_sp_objs(bp, vf, true))
2840                return -EINVAL;
2841
2842        /* must lock vfpf channel to protect against vf flows */
2843        bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_VLAN);
2844
2845        /* remove existing vlans */
2846        __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
2847        vlan_obj = &bnx2x_leading_vfq(vf, vlan_obj);
2848        rc = vlan_obj->delete_all(bp, vlan_obj, &vlan_mac_flags,
2849                                  &ramrod_flags);
2850        if (rc) {
2851                BNX2X_ERR("failed to delete vlans\n");
2852                rc = -EINVAL;
2853                goto out;
2854        }
2855
2856        /* clear accept_any_vlan when HV forces vlan, otherwise
2857         * according to VF capabilities
2858         */
2859        if (vlan || !(vf->cfg_flags & VF_CFG_VLAN_FILTER))
2860                bnx2x_set_vf_vlan_acceptance(bp, vf, !vlan);
2861
2862        rc = bnx2x_set_vf_vlan_filter(bp, vf, vlan, true);
2863        if (rc)
2864                goto out;
2865
2866        /* send queue update ramrods to configure default vlan and
2867         * silent vlan removal
2868         */
2869        for_each_vfq(vf, i) {
2870                struct bnx2x_queue_state_params q_params = {NULL};
2871                struct bnx2x_queue_update_params *update_params;
2872
2873                q_params.q_obj = &bnx2x_vfq(vf, i, sp_obj);
2874
2875                /* validate the Q is UP */
2876                if (bnx2x_get_q_logical_state(bp, q_params.q_obj) !=
2877                    BNX2X_Q_LOGICAL_STATE_ACTIVE)
2878                        continue;
2879
2880                __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
2881                q_params.cmd = BNX2X_Q_CMD_UPDATE;
2882                update_params = &q_params.params.update;
2883                __set_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN_CHNG,
2884                          &update_params->update_flags);
2885                __set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM_CHNG,
2886                          &update_params->update_flags);
2887                if (vlan == 0) {
2888                        /* if vlan is 0 then we want to leave the VF traffic
2889                         * untagged, and leave the incoming traffic untouched
2890                         * (i.e. do not remove any vlan tags).
2891                         */
2892                        __clear_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN,
2893                                    &update_params->update_flags);
2894                        __clear_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM,
2895                                    &update_params->update_flags);
2896                } else {
2897                        /* configure default vlan to vf queue and set silent
2898                         * vlan removal (the vf remains unaware of this vlan).
2899                         */
2900                        __set_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN,
2901                                  &update_params->update_flags);
2902                        __set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM,
2903                                  &update_params->update_flags);
2904                        update_params->def_vlan = vlan;
2905                        update_params->silent_removal_value =
2906                                vlan & VLAN_VID_MASK;
2907                        update_params->silent_removal_mask = VLAN_VID_MASK;
2908                }
2909
2910                /* Update the Queue state */
2911                rc = bnx2x_queue_state_change(bp, &q_params);
2912                if (rc) {
2913                        BNX2X_ERR("Failed to configure default VLAN queue %d\n",
2914                                  i);
2915                        goto out;
2916                }
2917        }
2918out:
2919        bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_VLAN);
2920
2921        if (rc)
2922                DP(BNX2X_MSG_IOV,
2923                   "updated VF[%d] vlan configuration (vlan = %d)\n",
2924                   vfidx, vlan);
2925
2926        return rc;
2927}
2928
2929/* crc is the first field in the bulletin board. Compute the crc over the
2930 * entire bulletin board excluding the crc field itself. Use the length field
2931 * as the Bulletin Board was posted by a PF with possibly a different version
2932 * from the vf which will sample it. Therefore, the length is computed by the
2933 * PF and then used blindly by the VF.
2934 */
2935u32 bnx2x_crc_vf_bulletin(struct pf_vf_bulletin_content *bulletin)
2936{
2937        return crc32(BULLETIN_CRC_SEED,
2938                 ((u8 *)bulletin) + sizeof(bulletin->crc),
2939                 bulletin->length - sizeof(bulletin->crc));
2940}
2941
2942/* Check for new posts on the bulletin board */
2943enum sample_bulletin_result bnx2x_sample_bulletin(struct bnx2x *bp)
2944{
2945        struct pf_vf_bulletin_content *bulletin;
2946        int attempts;
2947
2948        /* sampling structure in mid post may result with corrupted data
2949         * validate crc to ensure coherency.
2950         */
2951        for (attempts = 0; attempts < BULLETIN_ATTEMPTS; attempts++) {
2952                u32 crc;
2953
2954                /* sample the bulletin board */
2955                memcpy(&bp->shadow_bulletin, bp->pf2vf_bulletin,
2956                       sizeof(union pf_vf_bulletin));
2957
2958                crc = bnx2x_crc_vf_bulletin(&bp->shadow_bulletin.content);
2959
2960                if (bp->shadow_bulletin.content.crc == crc)
2961                        break;
2962
2963                BNX2X_ERR("bad crc on bulletin board. Contained %x computed %x\n",
2964                          bp->shadow_bulletin.content.crc, crc);
2965        }
2966
2967        if (attempts >= BULLETIN_ATTEMPTS) {
2968                BNX2X_ERR("pf to vf bulletin board crc was wrong %d consecutive times. Aborting\n",
2969                          attempts);
2970                return PFVF_BULLETIN_CRC_ERR;
2971        }
2972        bulletin = &bp->shadow_bulletin.content;
2973
2974        /* bulletin board hasn't changed since last sample */
2975        if (bp->old_bulletin.version == bulletin->version)
2976                return PFVF_BULLETIN_UNCHANGED;
2977
2978        /* the mac address in bulletin board is valid and is new */
2979        if (bulletin->valid_bitmap & 1 << MAC_ADDR_VALID &&
2980            !ether_addr_equal(bulletin->mac, bp->old_bulletin.mac)) {
2981                /* update new mac to net device */
2982                memcpy(bp->dev->dev_addr, bulletin->mac, ETH_ALEN);
2983        }
2984
2985        if (bulletin->valid_bitmap & (1 << LINK_VALID)) {
2986                DP(BNX2X_MSG_IOV, "link update speed %d flags %x\n",
2987                   bulletin->link_speed, bulletin->link_flags);
2988
2989                bp->vf_link_vars.line_speed = bulletin->link_speed;
2990                bp->vf_link_vars.link_report_flags = 0;
2991                /* Link is down */
2992                if (bulletin->link_flags & VFPF_LINK_REPORT_LINK_DOWN)
2993                        __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
2994                                  &bp->vf_link_vars.link_report_flags);
2995                /* Full DUPLEX */
2996                if (bulletin->link_flags & VFPF_LINK_REPORT_FULL_DUPLEX)
2997                        __set_bit(BNX2X_LINK_REPORT_FD,
2998                                  &bp->vf_link_vars.link_report_flags);
2999                /* Rx Flow Control is ON */
3000                if (bulletin->link_flags & VFPF_LINK_REPORT_RX_FC_ON)
3001                        __set_bit(BNX2X_LINK_REPORT_RX_FC_ON,
3002                                  &bp->vf_link_vars.link_report_flags);
3003                /* Tx Flow Control is ON */
3004                if (bulletin->link_flags & VFPF_LINK_REPORT_TX_FC_ON)
3005                        __set_bit(BNX2X_LINK_REPORT_TX_FC_ON,
3006                                  &bp->vf_link_vars.link_report_flags);
3007                __bnx2x_link_report(bp);
3008        }
3009
3010        /* copy new bulletin board to bp */
3011        memcpy(&bp->old_bulletin, bulletin,
3012               sizeof(struct pf_vf_bulletin_content));
3013
3014        return PFVF_BULLETIN_UPDATED;
3015}
3016
3017void bnx2x_timer_sriov(struct bnx2x *bp)
3018{
3019        bnx2x_sample_bulletin(bp);
3020
3021        /* if channel is down we need to self destruct */
3022        if (bp->old_bulletin.valid_bitmap & 1 << CHANNEL_DOWN)
3023                bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_VFPF_CHANNEL_DOWN,
3024                                       BNX2X_MSG_IOV);
3025}
3026
3027void __iomem *bnx2x_vf_doorbells(struct bnx2x *bp)
3028{
3029        /* vf doorbells are embedded within the regview */
3030        return bp->regview + PXP_VF_ADDR_DB_START;
3031}
3032
3033void bnx2x_vf_pci_dealloc(struct bnx2x *bp)
3034{
3035        BNX2X_PCI_FREE(bp->vf2pf_mbox, bp->vf2pf_mbox_mapping,
3036                       sizeof(struct bnx2x_vf_mbx_msg));
3037        BNX2X_PCI_FREE(bp->vf2pf_mbox, bp->pf2vf_bulletin_mapping,
3038                       sizeof(union pf_vf_bulletin));
3039}
3040
3041int bnx2x_vf_pci_alloc(struct bnx2x *bp)
3042{
3043        mutex_init(&bp->vf2pf_mutex);
3044
3045        /* allocate vf2pf mailbox for vf to pf channel */
3046        bp->vf2pf_mbox = BNX2X_PCI_ALLOC(&bp->vf2pf_mbox_mapping,
3047                                         sizeof(struct bnx2x_vf_mbx_msg));
3048        if (!bp->vf2pf_mbox)
3049                goto alloc_mem_err;
3050
3051        /* allocate pf 2 vf bulletin board */
3052        bp->pf2vf_bulletin = BNX2X_PCI_ALLOC(&bp->pf2vf_bulletin_mapping,
3053                                             sizeof(union pf_vf_bulletin));
3054        if (!bp->pf2vf_bulletin)
3055                goto alloc_mem_err;
3056
3057        bnx2x_vf_bulletin_finalize(&bp->pf2vf_bulletin->content, true);
3058
3059        return 0;
3060
3061alloc_mem_err:
3062        bnx2x_vf_pci_dealloc(bp);
3063        return -ENOMEM;
3064}
3065
3066void bnx2x_iov_channel_down(struct bnx2x *bp)
3067{
3068        int vf_idx;
3069        struct pf_vf_bulletin_content *bulletin;
3070
3071        if (!IS_SRIOV(bp))
3072                return;
3073
3074        for_each_vf(bp, vf_idx) {
3075                /* locate this VFs bulletin board and update the channel down
3076                 * bit
3077                 */
3078                bulletin = BP_VF_BULLETIN(bp, vf_idx);
3079                bulletin->valid_bitmap |= 1 << CHANNEL_DOWN;
3080
3081                /* update vf bulletin board */
3082                bnx2x_post_vf_bulletin(bp, vf_idx);
3083        }
3084}
3085
3086void bnx2x_iov_task(struct work_struct *work)
3087{
3088        struct bnx2x *bp = container_of(work, struct bnx2x, iov_task.work);
3089
3090        if (!netif_running(bp->dev))
3091                return;
3092
3093        if (test_and_clear_bit(BNX2X_IOV_HANDLE_FLR,
3094                               &bp->iov_task_state))
3095                bnx2x_vf_handle_flr_event(bp);
3096
3097        if (test_and_clear_bit(BNX2X_IOV_HANDLE_VF_MSG,
3098                               &bp->iov_task_state))
3099                bnx2x_vf_mbx(bp);
3100}
3101
3102void bnx2x_schedule_iov_task(struct bnx2x *bp, enum bnx2x_iov_flag flag)
3103{
3104        smp_mb__before_atomic();
3105        set_bit(flag, &bp->iov_task_state);
3106        smp_mb__after_atomic();
3107        DP(BNX2X_MSG_IOV, "Scheduling iov task [Flag: %d]\n", flag);
3108        queue_delayed_work(bnx2x_iov_wq, &bp->iov_task, 0);
3109}
3110