linux/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
<<
>>
Prefs
   1/*******************************************************************************
   2 *
   3 * Intel Ethernet Controller XL710 Family Linux Driver
   4 * Copyright(c) 2013 - 2014 Intel Corporation.
   5 *
   6 * This program is free software; you can redistribute it and/or modify it
   7 * under the terms and conditions of the GNU General Public License,
   8 * version 2, as published by the Free Software Foundation.
   9 *
  10 * This program is distributed in the hope it will be useful, but WITHOUT
  11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  13 * more details.
  14 *
  15 * You should have received a copy of the GNU General Public License along
  16 * with this program.  If not, see <http://www.gnu.org/licenses/>.
  17 *
  18 * The full GNU General Public License is included in this distribution in
  19 * the file called "COPYING".
  20 *
  21 * Contact Information:
  22 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
  23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
  24 *
  25 ******************************************************************************/
  26
  27#include "i40e.h"
  28
  29/***********************misc routines*****************************/
  30
  31/**
  32 * i40e_vc_disable_vf
  33 * @pf: pointer to the pf info
  34 * @vf: pointer to the vf info
  35 *
  36 * Disable the VF through a SW reset
  37 **/
  38static inline void i40e_vc_disable_vf(struct i40e_pf *pf, struct i40e_vf *vf)
  39{
  40        struct i40e_hw *hw = &pf->hw;
  41        u32 reg;
  42
  43        reg = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id));
  44        reg |= I40E_VPGEN_VFRTRIG_VFSWR_MASK;
  45        wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg);
  46        i40e_flush(hw);
  47}
  48
  49/**
  50 * i40e_vc_isvalid_vsi_id
  51 * @vf: pointer to the vf info
  52 * @vsi_id: vf relative vsi id
  53 *
  54 * check for the valid vsi id
  55 **/
  56static inline bool i40e_vc_isvalid_vsi_id(struct i40e_vf *vf, u8 vsi_id)
  57{
  58        struct i40e_pf *pf = vf->pf;
  59
  60        return pf->vsi[vsi_id]->vf_id == vf->vf_id;
  61}
  62
  63/**
  64 * i40e_vc_isvalid_queue_id
  65 * @vf: pointer to the vf info
  66 * @vsi_id: vsi id
  67 * @qid: vsi relative queue id
  68 *
  69 * check for the valid queue id
  70 **/
  71static inline bool i40e_vc_isvalid_queue_id(struct i40e_vf *vf, u8 vsi_id,
  72                                            u8 qid)
  73{
  74        struct i40e_pf *pf = vf->pf;
  75
  76        return qid < pf->vsi[vsi_id]->num_queue_pairs;
  77}
  78
  79/**
  80 * i40e_vc_isvalid_vector_id
  81 * @vf: pointer to the vf info
  82 * @vector_id: vf relative vector id
  83 *
  84 * check for the valid vector id
  85 **/
  86static inline bool i40e_vc_isvalid_vector_id(struct i40e_vf *vf, u8 vector_id)
  87{
  88        struct i40e_pf *pf = vf->pf;
  89
  90        return vector_id < pf->hw.func_caps.num_msix_vectors_vf;
  91}
  92
  93/***********************vf resource mgmt routines*****************/
  94
  95/**
  96 * i40e_vc_get_pf_queue_id
  97 * @vf: pointer to the vf info
  98 * @vsi_idx: index of VSI in PF struct
  99 * @vsi_queue_id: vsi relative queue id
 100 *
 101 * return pf relative queue id
 102 **/
 103static u16 i40e_vc_get_pf_queue_id(struct i40e_vf *vf, u8 vsi_idx,
 104                                   u8 vsi_queue_id)
 105{
 106        struct i40e_pf *pf = vf->pf;
 107        struct i40e_vsi *vsi = pf->vsi[vsi_idx];
 108        u16 pf_queue_id = I40E_QUEUE_END_OF_LIST;
 109
 110        if (le16_to_cpu(vsi->info.mapping_flags) &
 111            I40E_AQ_VSI_QUE_MAP_NONCONTIG)
 112                pf_queue_id =
 113                        le16_to_cpu(vsi->info.queue_mapping[vsi_queue_id]);
 114        else
 115                pf_queue_id = le16_to_cpu(vsi->info.queue_mapping[0]) +
 116                              vsi_queue_id;
 117
 118        return pf_queue_id;
 119}
 120
 121/**
 122 * i40e_config_irq_link_list
 123 * @vf: pointer to the vf info
 124 * @vsi_idx: index of VSI in PF struct
 125 * @vecmap: irq map info
 126 *
 127 * configure irq link list from the map
 128 **/
 129static void i40e_config_irq_link_list(struct i40e_vf *vf, u16 vsi_idx,
 130                                      struct i40e_virtchnl_vector_map *vecmap)
 131{
 132        unsigned long linklistmap = 0, tempmap;
 133        struct i40e_pf *pf = vf->pf;
 134        struct i40e_hw *hw = &pf->hw;
 135        u16 vsi_queue_id, pf_queue_id;
 136        enum i40e_queue_type qtype;
 137        u16 next_q, vector_id;
 138        u32 reg, reg_idx;
 139        u16 itr_idx = 0;
 140
 141        vector_id = vecmap->vector_id;
 142        /* setup the head */
 143        if (0 == vector_id)
 144                reg_idx = I40E_VPINT_LNKLST0(vf->vf_id);
 145        else
 146                reg_idx = I40E_VPINT_LNKLSTN(
 147                     ((pf->hw.func_caps.num_msix_vectors_vf - 1) * vf->vf_id) +
 148                     (vector_id - 1));
 149
 150        if (vecmap->rxq_map == 0 && vecmap->txq_map == 0) {
 151                /* Special case - No queues mapped on this vector */
 152                wr32(hw, reg_idx, I40E_VPINT_LNKLST0_FIRSTQ_INDX_MASK);
 153                goto irq_list_done;
 154        }
 155        tempmap = vecmap->rxq_map;
 156        for_each_set_bit(vsi_queue_id, &tempmap, I40E_MAX_VSI_QP) {
 157                linklistmap |= (1 <<
 158                                (I40E_VIRTCHNL_SUPPORTED_QTYPES *
 159                                 vsi_queue_id));
 160        }
 161
 162        tempmap = vecmap->txq_map;
 163        for_each_set_bit(vsi_queue_id, &tempmap, I40E_MAX_VSI_QP) {
 164                linklistmap |= (1 <<
 165                                (I40E_VIRTCHNL_SUPPORTED_QTYPES * vsi_queue_id
 166                                 + 1));
 167        }
 168
 169        next_q = find_first_bit(&linklistmap,
 170                                (I40E_MAX_VSI_QP *
 171                                 I40E_VIRTCHNL_SUPPORTED_QTYPES));
 172        vsi_queue_id = next_q/I40E_VIRTCHNL_SUPPORTED_QTYPES;
 173        qtype = next_q%I40E_VIRTCHNL_SUPPORTED_QTYPES;
 174        pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_idx, vsi_queue_id);
 175        reg = ((qtype << I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT) | pf_queue_id);
 176
 177        wr32(hw, reg_idx, reg);
 178
 179        while (next_q < (I40E_MAX_VSI_QP * I40E_VIRTCHNL_SUPPORTED_QTYPES)) {
 180                switch (qtype) {
 181                case I40E_QUEUE_TYPE_RX:
 182                        reg_idx = I40E_QINT_RQCTL(pf_queue_id);
 183                        itr_idx = vecmap->rxitr_idx;
 184                        break;
 185                case I40E_QUEUE_TYPE_TX:
 186                        reg_idx = I40E_QINT_TQCTL(pf_queue_id);
 187                        itr_idx = vecmap->txitr_idx;
 188                        break;
 189                default:
 190                        break;
 191                }
 192
 193                next_q = find_next_bit(&linklistmap,
 194                                       (I40E_MAX_VSI_QP *
 195                                        I40E_VIRTCHNL_SUPPORTED_QTYPES),
 196                                       next_q + 1);
 197                if (next_q <
 198                    (I40E_MAX_VSI_QP * I40E_VIRTCHNL_SUPPORTED_QTYPES)) {
 199                        vsi_queue_id = next_q / I40E_VIRTCHNL_SUPPORTED_QTYPES;
 200                        qtype = next_q % I40E_VIRTCHNL_SUPPORTED_QTYPES;
 201                        pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_idx,
 202                                                              vsi_queue_id);
 203                } else {
 204                        pf_queue_id = I40E_QUEUE_END_OF_LIST;
 205                        qtype = 0;
 206                }
 207
 208                /* format for the RQCTL & TQCTL regs is same */
 209                reg = (vector_id) |
 210                    (qtype << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) |
 211                    (pf_queue_id << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
 212                    (1 << I40E_QINT_RQCTL_CAUSE_ENA_SHIFT) |
 213                    (itr_idx << I40E_QINT_RQCTL_ITR_INDX_SHIFT);
 214                wr32(hw, reg_idx, reg);
 215        }
 216
 217irq_list_done:
 218        i40e_flush(hw);
 219}
 220
 221/**
 222 * i40e_config_vsi_tx_queue
 223 * @vf: pointer to the vf info
 224 * @vsi_idx: index of VSI in PF struct
 225 * @vsi_queue_id: vsi relative queue index
 226 * @info: config. info
 227 *
 228 * configure tx queue
 229 **/
 230static int i40e_config_vsi_tx_queue(struct i40e_vf *vf, u16 vsi_idx,
 231                                    u16 vsi_queue_id,
 232                                    struct i40e_virtchnl_txq_info *info)
 233{
 234        struct i40e_pf *pf = vf->pf;
 235        struct i40e_hw *hw = &pf->hw;
 236        struct i40e_hmc_obj_txq tx_ctx;
 237        u16 pf_queue_id;
 238        u32 qtx_ctl;
 239        int ret = 0;
 240
 241        pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_idx, vsi_queue_id);
 242
 243        /* clear the context structure first */
 244        memset(&tx_ctx, 0, sizeof(struct i40e_hmc_obj_txq));
 245
 246        /* only set the required fields */
 247        tx_ctx.base = info->dma_ring_addr / 128;
 248        tx_ctx.qlen = info->ring_len;
 249        tx_ctx.rdylist = le16_to_cpu(pf->vsi[vsi_idx]->info.qs_handle[0]);
 250        tx_ctx.rdylist_act = 0;
 251        tx_ctx.head_wb_ena = info->headwb_enabled;
 252        tx_ctx.head_wb_addr = info->dma_headwb_addr;
 253
 254        /* clear the context in the HMC */
 255        ret = i40e_clear_lan_tx_queue_context(hw, pf_queue_id);
 256        if (ret) {
 257                dev_err(&pf->pdev->dev,
 258                        "Failed to clear VF LAN Tx queue context %d, error: %d\n",
 259                        pf_queue_id, ret);
 260                ret = -ENOENT;
 261                goto error_context;
 262        }
 263
 264        /* set the context in the HMC */
 265        ret = i40e_set_lan_tx_queue_context(hw, pf_queue_id, &tx_ctx);
 266        if (ret) {
 267                dev_err(&pf->pdev->dev,
 268                        "Failed to set VF LAN Tx queue context %d error: %d\n",
 269                        pf_queue_id, ret);
 270                ret = -ENOENT;
 271                goto error_context;
 272        }
 273
 274        /* associate this queue with the PCI VF function */
 275        qtx_ctl = I40E_QTX_CTL_VF_QUEUE;
 276        qtx_ctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT)
 277                    & I40E_QTX_CTL_PF_INDX_MASK);
 278        qtx_ctl |= (((vf->vf_id + hw->func_caps.vf_base_id)
 279                     << I40E_QTX_CTL_VFVM_INDX_SHIFT)
 280                    & I40E_QTX_CTL_VFVM_INDX_MASK);
 281        wr32(hw, I40E_QTX_CTL(pf_queue_id), qtx_ctl);
 282        i40e_flush(hw);
 283
 284error_context:
 285        return ret;
 286}
 287
 288/**
 289 * i40e_config_vsi_rx_queue
 290 * @vf: pointer to the vf info
 291 * @vsi_idx: index of VSI in PF struct
 292 * @vsi_queue_id: vsi relative queue index
 293 * @info: config. info
 294 *
 295 * configure rx queue
 296 **/
 297static int i40e_config_vsi_rx_queue(struct i40e_vf *vf, u16 vsi_idx,
 298                                    u16 vsi_queue_id,
 299                                    struct i40e_virtchnl_rxq_info *info)
 300{
 301        struct i40e_pf *pf = vf->pf;
 302        struct i40e_hw *hw = &pf->hw;
 303        struct i40e_hmc_obj_rxq rx_ctx;
 304        u16 pf_queue_id;
 305        int ret = 0;
 306
 307        pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_idx, vsi_queue_id);
 308
 309        /* clear the context structure first */
 310        memset(&rx_ctx, 0, sizeof(struct i40e_hmc_obj_rxq));
 311
 312        /* only set the required fields */
 313        rx_ctx.base = info->dma_ring_addr / 128;
 314        rx_ctx.qlen = info->ring_len;
 315
 316        if (info->splithdr_enabled) {
 317                rx_ctx.hsplit_0 = I40E_RX_SPLIT_L2      |
 318                                  I40E_RX_SPLIT_IP      |
 319                                  I40E_RX_SPLIT_TCP_UDP |
 320                                  I40E_RX_SPLIT_SCTP;
 321                /* header length validation */
 322                if (info->hdr_size > ((2 * 1024) - 64)) {
 323                        ret = -EINVAL;
 324                        goto error_param;
 325                }
 326                rx_ctx.hbuff = info->hdr_size >> I40E_RXQ_CTX_HBUFF_SHIFT;
 327
 328                /* set splitalways mode 10b */
 329                rx_ctx.dtype = 0x2;
 330        }
 331
 332        /* databuffer length validation */
 333        if (info->databuffer_size > ((16 * 1024) - 128)) {
 334                ret = -EINVAL;
 335                goto error_param;
 336        }
 337        rx_ctx.dbuff = info->databuffer_size >> I40E_RXQ_CTX_DBUFF_SHIFT;
 338
 339        /* max pkt. length validation */
 340        if (info->max_pkt_size >= (16 * 1024) || info->max_pkt_size < 64) {
 341                ret = -EINVAL;
 342                goto error_param;
 343        }
 344        rx_ctx.rxmax = info->max_pkt_size;
 345
 346        /* enable 32bytes desc always */
 347        rx_ctx.dsize = 1;
 348
 349        /* default values */
 350        rx_ctx.lrxqthresh = 2;
 351        rx_ctx.crcstrip = 1;
 352        rx_ctx.prefena = 1;
 353
 354        /* clear the context in the HMC */
 355        ret = i40e_clear_lan_rx_queue_context(hw, pf_queue_id);
 356        if (ret) {
 357                dev_err(&pf->pdev->dev,
 358                        "Failed to clear VF LAN Rx queue context %d, error: %d\n",
 359                        pf_queue_id, ret);
 360                ret = -ENOENT;
 361                goto error_param;
 362        }
 363
 364        /* set the context in the HMC */
 365        ret = i40e_set_lan_rx_queue_context(hw, pf_queue_id, &rx_ctx);
 366        if (ret) {
 367                dev_err(&pf->pdev->dev,
 368                        "Failed to set VF LAN Rx queue context %d error: %d\n",
 369                        pf_queue_id, ret);
 370                ret = -ENOENT;
 371                goto error_param;
 372        }
 373
 374error_param:
 375        return ret;
 376}
 377
 378/**
 379 * i40e_alloc_vsi_res
 380 * @vf: pointer to the vf info
 381 * @type: type of VSI to allocate
 382 *
 383 * alloc vf vsi context & resources
 384 **/
 385static int i40e_alloc_vsi_res(struct i40e_vf *vf, enum i40e_vsi_type type)
 386{
 387        struct i40e_mac_filter *f = NULL;
 388        struct i40e_pf *pf = vf->pf;
 389        struct i40e_vsi *vsi;
 390        int ret = 0;
 391
 392        vsi = i40e_vsi_setup(pf, type, pf->vsi[pf->lan_vsi]->seid, vf->vf_id);
 393
 394        if (!vsi) {
 395                dev_err(&pf->pdev->dev,
 396                        "add vsi failed for vf %d, aq_err %d\n",
 397                        vf->vf_id, pf->hw.aq.asq_last_status);
 398                ret = -ENOENT;
 399                goto error_alloc_vsi_res;
 400        }
 401        if (type == I40E_VSI_SRIOV) {
 402                u8 brdcast[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
 403                vf->lan_vsi_index = vsi->idx;
 404                vf->lan_vsi_id = vsi->id;
 405                dev_info(&pf->pdev->dev,
 406                         "VF %d assigned LAN VSI index %d, VSI id %d\n",
 407                         vf->vf_id, vsi->idx, vsi->id);
 408                /* If the port VLAN has been configured and then the
 409                 * VF driver was removed then the VSI port VLAN
 410                 * configuration was destroyed.  Check if there is
 411                 * a port VLAN and restore the VSI configuration if
 412                 * needed.
 413                 */
 414                if (vf->port_vlan_id)
 415                        i40e_vsi_add_pvid(vsi, vf->port_vlan_id);
 416                f = i40e_add_filter(vsi, vf->default_lan_addr.addr,
 417                                    vf->port_vlan_id, true, false);
 418                if (!f)
 419                        dev_info(&pf->pdev->dev,
 420                                 "Could not allocate VF MAC addr\n");
 421                f = i40e_add_filter(vsi, brdcast, vf->port_vlan_id,
 422                                    true, false);
 423                if (!f)
 424                        dev_info(&pf->pdev->dev,
 425                                 "Could not allocate VF broadcast filter\n");
 426        }
 427
 428        /* program mac filter */
 429        ret = i40e_sync_vsi_filters(vsi);
 430        if (ret)
 431                dev_err(&pf->pdev->dev, "Unable to program ucast filters\n");
 432
 433        /* Set VF bandwidth if specified */
 434        if (vf->tx_rate) {
 435                ret = i40e_aq_config_vsi_bw_limit(&pf->hw, vsi->seid,
 436                                                  vf->tx_rate / 50, 0, NULL);
 437                if (ret)
 438                        dev_err(&pf->pdev->dev, "Unable to set tx rate, VF %d, error code %d.\n",
 439                                vf->vf_id, ret);
 440        }
 441
 442error_alloc_vsi_res:
 443        return ret;
 444}
 445
 446/**
 447 * i40e_enable_vf_mappings
 448 * @vf: pointer to the vf info
 449 *
 450 * enable vf mappings
 451 **/
 452static void i40e_enable_vf_mappings(struct i40e_vf *vf)
 453{
 454        struct i40e_pf *pf = vf->pf;
 455        struct i40e_hw *hw = &pf->hw;
 456        u32 reg, total_queue_pairs = 0;
 457        int j;
 458
 459        /* Tell the hardware we're using noncontiguous mapping. HW requires
 460         * that VF queues be mapped using this method, even when they are
 461         * contiguous in real life
 462         */
 463        wr32(hw, I40E_VSILAN_QBASE(vf->lan_vsi_id),
 464             I40E_VSILAN_QBASE_VSIQTABLE_ENA_MASK);
 465
 466        /* enable VF vplan_qtable mappings */
 467        reg = I40E_VPLAN_MAPENA_TXRX_ENA_MASK;
 468        wr32(hw, I40E_VPLAN_MAPENA(vf->vf_id), reg);
 469
 470        /* map PF queues to VF queues */
 471        for (j = 0; j < pf->vsi[vf->lan_vsi_index]->num_queue_pairs; j++) {
 472                u16 qid = i40e_vc_get_pf_queue_id(vf, vf->lan_vsi_index, j);
 473                reg = (qid & I40E_VPLAN_QTABLE_QINDEX_MASK);
 474                wr32(hw, I40E_VPLAN_QTABLE(total_queue_pairs, vf->vf_id), reg);
 475                total_queue_pairs++;
 476        }
 477
 478        /* map PF queues to VSI */
 479        for (j = 0; j < 7; j++) {
 480                if (j * 2 >= pf->vsi[vf->lan_vsi_index]->num_queue_pairs) {
 481                        reg = 0x07FF07FF;       /* unused */
 482                } else {
 483                        u16 qid = i40e_vc_get_pf_queue_id(vf, vf->lan_vsi_index,
 484                                                          j * 2);
 485                        reg = qid;
 486                        qid = i40e_vc_get_pf_queue_id(vf, vf->lan_vsi_index,
 487                                                      (j * 2) + 1);
 488                        reg |= qid << 16;
 489                }
 490                wr32(hw, I40E_VSILAN_QTABLE(j, vf->lan_vsi_id), reg);
 491        }
 492
 493        i40e_flush(hw);
 494}
 495
 496/**
 497 * i40e_disable_vf_mappings
 498 * @vf: pointer to the vf info
 499 *
 500 * disable vf mappings
 501 **/
 502static void i40e_disable_vf_mappings(struct i40e_vf *vf)
 503{
 504        struct i40e_pf *pf = vf->pf;
 505        struct i40e_hw *hw = &pf->hw;
 506        int i;
 507
 508        /* disable qp mappings */
 509        wr32(hw, I40E_VPLAN_MAPENA(vf->vf_id), 0);
 510        for (i = 0; i < I40E_MAX_VSI_QP; i++)
 511                wr32(hw, I40E_VPLAN_QTABLE(i, vf->vf_id),
 512                     I40E_QUEUE_END_OF_LIST);
 513        i40e_flush(hw);
 514}
 515
 516/**
 517 * i40e_free_vf_res
 518 * @vf: pointer to the vf info
 519 *
 520 * free vf resources
 521 **/
 522static void i40e_free_vf_res(struct i40e_vf *vf)
 523{
 524        struct i40e_pf *pf = vf->pf;
 525        struct i40e_hw *hw = &pf->hw;
 526        u32 reg_idx, reg;
 527        int i, msix_vf;
 528
 529        /* free vsi & disconnect it from the parent uplink */
 530        if (vf->lan_vsi_index) {
 531                i40e_vsi_release(pf->vsi[vf->lan_vsi_index]);
 532                vf->lan_vsi_index = 0;
 533                vf->lan_vsi_id = 0;
 534        }
 535        msix_vf = pf->hw.func_caps.num_msix_vectors_vf;
 536
 537        /* disable interrupts so the VF starts in a known state */
 538        for (i = 0; i < msix_vf; i++) {
 539                /* format is same for both registers */
 540                if (0 == i)
 541                        reg_idx = I40E_VFINT_DYN_CTL0(vf->vf_id);
 542                else
 543                        reg_idx = I40E_VFINT_DYN_CTLN(((msix_vf - 1) *
 544                                                      (vf->vf_id))
 545                                                     + (i - 1));
 546                wr32(hw, reg_idx, I40E_VFINT_DYN_CTLN_CLEARPBA_MASK);
 547                i40e_flush(hw);
 548        }
 549
 550        /* clear the irq settings */
 551        for (i = 0; i < msix_vf; i++) {
 552                /* format is same for both registers */
 553                if (0 == i)
 554                        reg_idx = I40E_VPINT_LNKLST0(vf->vf_id);
 555                else
 556                        reg_idx = I40E_VPINT_LNKLSTN(((msix_vf - 1) *
 557                                                      (vf->vf_id))
 558                                                     + (i - 1));
 559                reg = (I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_MASK |
 560                       I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK);
 561                wr32(hw, reg_idx, reg);
 562                i40e_flush(hw);
 563        }
 564        /* reset some of the state varibles keeping
 565         * track of the resources
 566         */
 567        vf->num_queue_pairs = 0;
 568        vf->vf_states = 0;
 569}
 570
 571/**
 572 * i40e_alloc_vf_res
 573 * @vf: pointer to the vf info
 574 *
 575 * allocate vf resources
 576 **/
 577static int i40e_alloc_vf_res(struct i40e_vf *vf)
 578{
 579        struct i40e_pf *pf = vf->pf;
 580        int total_queue_pairs = 0;
 581        int ret;
 582
 583        /* allocate hw vsi context & associated resources */
 584        ret = i40e_alloc_vsi_res(vf, I40E_VSI_SRIOV);
 585        if (ret)
 586                goto error_alloc;
 587        total_queue_pairs += pf->vsi[vf->lan_vsi_index]->num_queue_pairs;
 588        set_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
 589
 590        /* store the total qps number for the runtime
 591         * vf req validation
 592         */
 593        vf->num_queue_pairs = total_queue_pairs;
 594
 595        /* vf is now completely initialized */
 596        set_bit(I40E_VF_STAT_INIT, &vf->vf_states);
 597
 598error_alloc:
 599        if (ret)
 600                i40e_free_vf_res(vf);
 601
 602        return ret;
 603}
 604
 605#define VF_DEVICE_STATUS 0xAA
 606#define VF_TRANS_PENDING_MASK 0x20
 607/**
 608 * i40e_quiesce_vf_pci
 609 * @vf: pointer to the vf structure
 610 *
 611 * Wait for VF PCI transactions to be cleared after reset. Returns -EIO
 612 * if the transactions never clear.
 613 **/
 614static int i40e_quiesce_vf_pci(struct i40e_vf *vf)
 615{
 616        struct i40e_pf *pf = vf->pf;
 617        struct i40e_hw *hw = &pf->hw;
 618        int vf_abs_id, i;
 619        u32 reg;
 620
 621        vf_abs_id = vf->vf_id + hw->func_caps.vf_base_id;
 622
 623        wr32(hw, I40E_PF_PCI_CIAA,
 624             VF_DEVICE_STATUS | (vf_abs_id << I40E_PF_PCI_CIAA_VF_NUM_SHIFT));
 625        for (i = 0; i < 100; i++) {
 626                reg = rd32(hw, I40E_PF_PCI_CIAD);
 627                if ((reg & VF_TRANS_PENDING_MASK) == 0)
 628                        return 0;
 629                udelay(1);
 630        }
 631        return -EIO;
 632}
 633
 634/**
 635 * i40e_reset_vf
 636 * @vf: pointer to the vf structure
 637 * @flr: VFLR was issued or not
 638 *
 639 * reset the vf
 640 **/
 641void i40e_reset_vf(struct i40e_vf *vf, bool flr)
 642{
 643        struct i40e_pf *pf = vf->pf;
 644        struct i40e_hw *hw = &pf->hw;
 645        bool rsd = false;
 646        int i;
 647        u32 reg;
 648
 649        /* warn the VF */
 650        clear_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states);
 651
 652        /* In the case of a VFLR, the HW has already reset the VF and we
 653         * just need to clean up, so don't hit the VFRTRIG register.
 654         */
 655        if (!flr) {
 656                /* reset vf using VPGEN_VFRTRIG reg */
 657                reg = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id));
 658                reg |= I40E_VPGEN_VFRTRIG_VFSWR_MASK;
 659                wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg);
 660                i40e_flush(hw);
 661        }
 662
 663        if (i40e_quiesce_vf_pci(vf))
 664                dev_err(&pf->pdev->dev, "VF %d PCI transactions stuck\n",
 665                        vf->vf_id);
 666
 667        /* poll VPGEN_VFRSTAT reg to make sure
 668         * that reset is complete
 669         */
 670        for (i = 0; i < 100; i++) {
 671                /* vf reset requires driver to first reset the
 672                 * vf and then poll the status register to make sure
 673                 * that the requested op was completed
 674                 * successfully
 675                 */
 676                udelay(10);
 677                reg = rd32(hw, I40E_VPGEN_VFRSTAT(vf->vf_id));
 678                if (reg & I40E_VPGEN_VFRSTAT_VFRD_MASK) {
 679                        rsd = true;
 680                        break;
 681                }
 682        }
 683
 684        if (!rsd)
 685                dev_err(&pf->pdev->dev, "VF reset check timeout on VF %d\n",
 686                        vf->vf_id);
 687        wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_id), I40E_VFR_COMPLETED);
 688        /* clear the reset bit in the VPGEN_VFRTRIG reg */
 689        reg = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id));
 690        reg &= ~I40E_VPGEN_VFRTRIG_VFSWR_MASK;
 691        wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg);
 692
 693        /* On initial reset, we won't have any queues */
 694        if (vf->lan_vsi_index == 0)
 695                goto complete_reset;
 696
 697        i40e_vsi_control_rings(pf->vsi[vf->lan_vsi_index], false);
 698complete_reset:
 699        /* reallocate vf resources to reset the VSI state */
 700        i40e_free_vf_res(vf);
 701        i40e_alloc_vf_res(vf);
 702        i40e_enable_vf_mappings(vf);
 703        set_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states);
 704
 705        /* tell the VF the reset is done */
 706        wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_id), I40E_VFR_VFACTIVE);
 707        i40e_flush(hw);
 708}
 709
 710/**
 711 * i40e_vfs_are_assigned
 712 * @pf: pointer to the pf structure
 713 *
 714 * Determine if any VFs are assigned to VMs
 715 **/
 716static bool i40e_vfs_are_assigned(struct i40e_pf *pf)
 717{
 718        struct pci_dev *pdev = pf->pdev;
 719        struct pci_dev *vfdev;
 720
 721        /* loop through all the VFs to see if we own any that are assigned */
 722        vfdev = pci_get_device(PCI_VENDOR_ID_INTEL, I40E_DEV_ID_VF , NULL);
 723        while (vfdev) {
 724                /* if we don't own it we don't care */
 725                if (vfdev->is_virtfn && pci_physfn(vfdev) == pdev) {
 726                        /* if it is assigned we cannot release it */
 727                        if (vfdev->dev_flags & PCI_DEV_FLAGS_ASSIGNED)
 728                                return true;
 729                }
 730
 731                vfdev = pci_get_device(PCI_VENDOR_ID_INTEL,
 732                                       I40E_DEV_ID_VF,
 733                                       vfdev);
 734        }
 735
 736        return false;
 737}
 738#ifdef CONFIG_PCI_IOV
 739
 740/**
 741 * i40e_enable_pf_switch_lb
 742 * @pf: pointer to the pf structure
 743 *
 744 * enable switch loop back or die - no point in a return value
 745 **/
 746static void i40e_enable_pf_switch_lb(struct i40e_pf *pf)
 747{
 748        struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
 749        struct i40e_vsi_context ctxt;
 750        int aq_ret;
 751
 752        ctxt.seid = pf->main_vsi_seid;
 753        ctxt.pf_num = pf->hw.pf_id;
 754        ctxt.vf_num = 0;
 755        aq_ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
 756        if (aq_ret) {
 757                dev_info(&pf->pdev->dev,
 758                         "%s couldn't get pf vsi config, err %d, aq_err %d\n",
 759                         __func__, aq_ret, pf->hw.aq.asq_last_status);
 760                return;
 761        }
 762        ctxt.flags = I40E_AQ_VSI_TYPE_PF;
 763        ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
 764        ctxt.info.switch_id |= cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
 765
 766        aq_ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
 767        if (aq_ret) {
 768                dev_info(&pf->pdev->dev,
 769                         "%s: update vsi switch failed, aq_err=%d\n",
 770                         __func__, vsi->back->hw.aq.asq_last_status);
 771        }
 772}
 773#endif
 774
 775/**
 776 * i40e_disable_pf_switch_lb
 777 * @pf: pointer to the pf structure
 778 *
 779 * disable switch loop back or die - no point in a return value
 780 **/
 781static void i40e_disable_pf_switch_lb(struct i40e_pf *pf)
 782{
 783        struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
 784        struct i40e_vsi_context ctxt;
 785        int aq_ret;
 786
 787        ctxt.seid = pf->main_vsi_seid;
 788        ctxt.pf_num = pf->hw.pf_id;
 789        ctxt.vf_num = 0;
 790        aq_ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
 791        if (aq_ret) {
 792                dev_info(&pf->pdev->dev,
 793                         "%s couldn't get pf vsi config, err %d, aq_err %d\n",
 794                         __func__, aq_ret, pf->hw.aq.asq_last_status);
 795                return;
 796        }
 797        ctxt.flags = I40E_AQ_VSI_TYPE_PF;
 798        ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
 799        ctxt.info.switch_id &= ~cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
 800
 801        aq_ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
 802        if (aq_ret) {
 803                dev_info(&pf->pdev->dev,
 804                         "%s: update vsi switch failed, aq_err=%d\n",
 805                         __func__, vsi->back->hw.aq.asq_last_status);
 806        }
 807}
 808
 809/**
 810 * i40e_free_vfs
 811 * @pf: pointer to the pf structure
 812 *
 813 * free vf resources
 814 **/
 815void i40e_free_vfs(struct i40e_pf *pf)
 816{
 817        struct i40e_hw *hw = &pf->hw;
 818        u32 reg_idx, bit_idx;
 819        int i, tmp, vf_id;
 820
 821        if (!pf->vf)
 822                return;
 823
 824        /* Disable interrupt 0 so we don't try to handle the VFLR. */
 825        i40e_irq_dynamic_disable_icr0(pf);
 826
 827        mdelay(10); /* let any messages in transit get finished up */
 828        /* free up vf resources */
 829        tmp = pf->num_alloc_vfs;
 830        pf->num_alloc_vfs = 0;
 831        for (i = 0; i < tmp; i++) {
 832                if (test_bit(I40E_VF_STAT_INIT, &pf->vf[i].vf_states))
 833                        i40e_free_vf_res(&pf->vf[i]);
 834                /* disable qp mappings */
 835                i40e_disable_vf_mappings(&pf->vf[i]);
 836        }
 837
 838        kfree(pf->vf);
 839        pf->vf = NULL;
 840
 841        /* This check is for when the driver is unloaded while VFs are
 842         * assigned. Setting the number of VFs to 0 through sysfs is caught
 843         * before this function ever gets called.
 844         */
 845        if (!i40e_vfs_are_assigned(pf)) {
 846                pci_disable_sriov(pf->pdev);
 847                /* Acknowledge VFLR for all VFS. Without this, VFs will fail to
 848                 * work correctly when SR-IOV gets re-enabled.
 849                 */
 850                for (vf_id = 0; vf_id < tmp; vf_id++) {
 851                        reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32;
 852                        bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32;
 853                        wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), (1 << bit_idx));
 854                }
 855                i40e_disable_pf_switch_lb(pf);
 856        } else {
 857                dev_warn(&pf->pdev->dev,
 858                         "unable to disable SR-IOV because VFs are assigned.\n");
 859        }
 860
 861        /* Re-enable interrupt 0. */
 862        i40e_irq_dynamic_enable_icr0(pf);
 863}
 864
 865#ifdef CONFIG_PCI_IOV
 866/**
 867 * i40e_alloc_vfs
 868 * @pf: pointer to the pf structure
 869 * @num_alloc_vfs: number of vfs to allocate
 870 *
 871 * allocate vf resources
 872 **/
 873int i40e_alloc_vfs(struct i40e_pf *pf, u16 num_alloc_vfs)
 874{
 875        struct i40e_vf *vfs;
 876        int i, ret = 0;
 877
 878        /* Disable interrupt 0 so we don't try to handle the VFLR. */
 879        i40e_irq_dynamic_disable_icr0(pf);
 880
 881        /* Check to see if we're just allocating resources for extant VFs */
 882        if (pci_num_vf(pf->pdev) != num_alloc_vfs) {
 883                ret = pci_enable_sriov(pf->pdev, num_alloc_vfs);
 884                if (ret) {
 885                        dev_err(&pf->pdev->dev,
 886                                "Failed to enable SR-IOV, error %d.\n", ret);
 887                        pf->num_alloc_vfs = 0;
 888                        goto err_iov;
 889                }
 890        }
 891        /* allocate memory */
 892        vfs = kcalloc(num_alloc_vfs, sizeof(struct i40e_vf), GFP_KERNEL);
 893        if (!vfs) {
 894                ret = -ENOMEM;
 895                goto err_alloc;
 896        }
 897        pf->vf = vfs;
 898
 899        /* apply default profile */
 900        for (i = 0; i < num_alloc_vfs; i++) {
 901                vfs[i].pf = pf;
 902                vfs[i].parent_type = I40E_SWITCH_ELEMENT_TYPE_VEB;
 903                vfs[i].vf_id = i;
 904
 905                /* assign default capabilities */
 906                set_bit(I40E_VIRTCHNL_VF_CAP_L2, &vfs[i].vf_caps);
 907                vfs[i].spoofchk = true;
 908                /* vf resources get allocated during reset */
 909                i40e_reset_vf(&vfs[i], false);
 910
 911                /* enable vf vplan_qtable mappings */
 912                i40e_enable_vf_mappings(&vfs[i]);
 913        }
 914        pf->num_alloc_vfs = num_alloc_vfs;
 915
 916        i40e_enable_pf_switch_lb(pf);
 917err_alloc:
 918        if (ret)
 919                i40e_free_vfs(pf);
 920err_iov:
 921        /* Re-enable interrupt 0. */
 922        i40e_irq_dynamic_enable_icr0(pf);
 923        return ret;
 924}
 925
 926#endif
 927/**
 928 * i40e_pci_sriov_enable
 929 * @pdev: pointer to a pci_dev structure
 930 * @num_vfs: number of vfs to allocate
 931 *
 932 * Enable or change the number of VFs
 933 **/
 934static int i40e_pci_sriov_enable(struct pci_dev *pdev, int num_vfs)
 935{
 936#ifdef CONFIG_PCI_IOV
 937        struct i40e_pf *pf = pci_get_drvdata(pdev);
 938        int pre_existing_vfs = pci_num_vf(pdev);
 939        int err = 0;
 940
 941        dev_info(&pdev->dev, "Allocating %d VFs.\n", num_vfs);
 942        if (pre_existing_vfs && pre_existing_vfs != num_vfs)
 943                i40e_free_vfs(pf);
 944        else if (pre_existing_vfs && pre_existing_vfs == num_vfs)
 945                goto out;
 946
 947        if (num_vfs > pf->num_req_vfs) {
 948                err = -EPERM;
 949                goto err_out;
 950        }
 951
 952        err = i40e_alloc_vfs(pf, num_vfs);
 953        if (err) {
 954                dev_warn(&pdev->dev, "Failed to enable SR-IOV: %d\n", err);
 955                goto err_out;
 956        }
 957
 958out:
 959        return num_vfs;
 960
 961err_out:
 962        return err;
 963#endif
 964        return 0;
 965}
 966
 967/**
 968 * i40e_pci_sriov_configure
 969 * @pdev: pointer to a pci_dev structure
 970 * @num_vfs: number of vfs to allocate
 971 *
 972 * Enable or change the number of VFs. Called when the user updates the number
 973 * of VFs in sysfs.
 974 **/
 975int i40e_pci_sriov_configure(struct pci_dev *pdev, int num_vfs)
 976{
 977        struct i40e_pf *pf = pci_get_drvdata(pdev);
 978
 979        if (num_vfs)
 980                return i40e_pci_sriov_enable(pdev, num_vfs);
 981
 982        if (!i40e_vfs_are_assigned(pf)) {
 983                i40e_free_vfs(pf);
 984        } else {
 985                dev_warn(&pdev->dev, "Unable to free VFs because some are assigned to VMs.\n");
 986                return -EINVAL;
 987        }
 988        return 0;
 989}
 990
 991/***********************virtual channel routines******************/
 992
 993/**
 994 * i40e_vc_send_msg_to_vf
 995 * @vf: pointer to the vf info
 996 * @v_opcode: virtual channel opcode
 997 * @v_retval: virtual channel return value
 998 * @msg: pointer to the msg buffer
 999 * @msglen: msg length
1000 *
1001 * send msg to vf
1002 **/
1003static int i40e_vc_send_msg_to_vf(struct i40e_vf *vf, u32 v_opcode,
1004                                  u32 v_retval, u8 *msg, u16 msglen)
1005{
1006        struct i40e_pf *pf;
1007        struct i40e_hw *hw;
1008        int abs_vf_id;
1009        i40e_status aq_ret;
1010
1011        /* validate the request */
1012        if (!vf || vf->vf_id >= vf->pf->num_alloc_vfs)
1013                return -EINVAL;
1014
1015        pf = vf->pf;
1016        hw = &pf->hw;
1017        abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
1018
1019        /* single place to detect unsuccessful return values */
1020        if (v_retval) {
1021                vf->num_invalid_msgs++;
1022                dev_err(&pf->pdev->dev, "Failed opcode %d Error: %d\n",
1023                        v_opcode, v_retval);
1024                if (vf->num_invalid_msgs >
1025                    I40E_DEFAULT_NUM_INVALID_MSGS_ALLOWED) {
1026                        dev_err(&pf->pdev->dev,
1027                                "Number of invalid messages exceeded for VF %d\n",
1028                                vf->vf_id);
1029                        dev_err(&pf->pdev->dev, "Use PF Control I/F to enable the VF\n");
1030                        set_bit(I40E_VF_STAT_DISABLED, &vf->vf_states);
1031                }
1032        } else {
1033                vf->num_valid_msgs++;
1034        }
1035
1036        aq_ret = i40e_aq_send_msg_to_vf(hw, abs_vf_id,  v_opcode, v_retval,
1037                                        msg, msglen, NULL);
1038        if (aq_ret) {
1039                dev_err(&pf->pdev->dev,
1040                        "Unable to send the message to VF %d aq_err %d\n",
1041                        vf->vf_id, pf->hw.aq.asq_last_status);
1042                return -EIO;
1043        }
1044
1045        return 0;
1046}
1047
1048/**
1049 * i40e_vc_send_resp_to_vf
1050 * @vf: pointer to the vf info
1051 * @opcode: operation code
1052 * @retval: return value
1053 *
1054 * send resp msg to vf
1055 **/
1056static int i40e_vc_send_resp_to_vf(struct i40e_vf *vf,
1057                                   enum i40e_virtchnl_ops opcode,
1058                                   i40e_status retval)
1059{
1060        return i40e_vc_send_msg_to_vf(vf, opcode, retval, NULL, 0);
1061}
1062
1063/**
1064 * i40e_vc_get_version_msg
1065 * @vf: pointer to the vf info
1066 *
1067 * called from the vf to request the API version used by the PF
1068 **/
1069static int i40e_vc_get_version_msg(struct i40e_vf *vf)
1070{
1071        struct i40e_virtchnl_version_info info = {
1072                I40E_VIRTCHNL_VERSION_MAJOR, I40E_VIRTCHNL_VERSION_MINOR
1073        };
1074
1075        return i40e_vc_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_VERSION,
1076                                      I40E_SUCCESS, (u8 *)&info,
1077                                      sizeof(struct
1078                                             i40e_virtchnl_version_info));
1079}
1080
1081/**
1082 * i40e_vc_get_vf_resources_msg
1083 * @vf: pointer to the vf info
1084 * @msg: pointer to the msg buffer
1085 * @msglen: msg length
1086 *
1087 * called from the vf to request its resources
1088 **/
1089static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf)
1090{
1091        struct i40e_virtchnl_vf_resource *vfres = NULL;
1092        struct i40e_pf *pf = vf->pf;
1093        i40e_status aq_ret = 0;
1094        struct i40e_vsi *vsi;
1095        int i = 0, len = 0;
1096        int num_vsis = 1;
1097        int ret;
1098
1099        if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states)) {
1100                aq_ret = I40E_ERR_PARAM;
1101                goto err;
1102        }
1103
1104        len = (sizeof(struct i40e_virtchnl_vf_resource) +
1105               sizeof(struct i40e_virtchnl_vsi_resource) * num_vsis);
1106
1107        vfres = kzalloc(len, GFP_KERNEL);
1108        if (!vfres) {
1109                aq_ret = I40E_ERR_NO_MEMORY;
1110                len = 0;
1111                goto err;
1112        }
1113
1114        vfres->vf_offload_flags = I40E_VIRTCHNL_VF_OFFLOAD_L2;
1115        vsi = pf->vsi[vf->lan_vsi_index];
1116        if (!vsi->info.pvid)
1117                vfres->vf_offload_flags |= I40E_VIRTCHNL_VF_OFFLOAD_VLAN;
1118
1119        vfres->num_vsis = num_vsis;
1120        vfres->num_queue_pairs = vf->num_queue_pairs;
1121        vfres->max_vectors = pf->hw.func_caps.num_msix_vectors_vf;
1122        if (vf->lan_vsi_index) {
1123                vfres->vsi_res[i].vsi_id = vf->lan_vsi_index;
1124                vfres->vsi_res[i].vsi_type = I40E_VSI_SRIOV;
1125                vfres->vsi_res[i].num_queue_pairs =
1126                    pf->vsi[vf->lan_vsi_index]->num_queue_pairs;
1127                memcpy(vfres->vsi_res[i].default_mac_addr,
1128                       vf->default_lan_addr.addr, ETH_ALEN);
1129                i++;
1130        }
1131        set_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states);
1132
1133err:
1134        /* send the response back to the vf */
1135        ret = i40e_vc_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_GET_VF_RESOURCES,
1136                                     aq_ret, (u8 *)vfres, len);
1137
1138        kfree(vfres);
1139        return ret;
1140}
1141
1142/**
1143 * i40e_vc_reset_vf_msg
1144 * @vf: pointer to the vf info
1145 * @msg: pointer to the msg buffer
1146 * @msglen: msg length
1147 *
1148 * called from the vf to reset itself,
1149 * unlike other virtchnl messages, pf driver
1150 * doesn't send the response back to the vf
1151 **/
1152static void i40e_vc_reset_vf_msg(struct i40e_vf *vf)
1153{
1154        if (test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states))
1155                i40e_reset_vf(vf, false);
1156}
1157
1158/**
1159 * i40e_vc_config_promiscuous_mode_msg
1160 * @vf: pointer to the vf info
1161 * @msg: pointer to the msg buffer
1162 * @msglen: msg length
1163 *
1164 * called from the vf to configure the promiscuous mode of
1165 * vf vsis
1166 **/
1167static int i40e_vc_config_promiscuous_mode_msg(struct i40e_vf *vf,
1168                                               u8 *msg, u16 msglen)
1169{
1170        struct i40e_virtchnl_promisc_info *info =
1171            (struct i40e_virtchnl_promisc_info *)msg;
1172        struct i40e_pf *pf = vf->pf;
1173        struct i40e_hw *hw = &pf->hw;
1174        struct i40e_vsi *vsi;
1175        bool allmulti = false;
1176        i40e_status aq_ret;
1177
1178        if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) ||
1179            !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) ||
1180            !i40e_vc_isvalid_vsi_id(vf, info->vsi_id) ||
1181            (pf->vsi[info->vsi_id]->type != I40E_VSI_FCOE)) {
1182                aq_ret = I40E_ERR_PARAM;
1183                goto error_param;
1184        }
1185        vsi = pf->vsi[info->vsi_id];
1186        if (info->flags & I40E_FLAG_VF_MULTICAST_PROMISC)
1187                allmulti = true;
1188        aq_ret = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid,
1189                                                       allmulti, NULL);
1190
1191error_param:
1192        /* send the response to the vf */
1193        return i40e_vc_send_resp_to_vf(vf,
1194                                       I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE,
1195                                       aq_ret);
1196}
1197
1198/**
1199 * i40e_vc_config_queues_msg
1200 * @vf: pointer to the vf info
1201 * @msg: pointer to the msg buffer
1202 * @msglen: msg length
1203 *
1204 * called from the vf to configure the rx/tx
1205 * queues
1206 **/
1207static int i40e_vc_config_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
1208{
1209        struct i40e_virtchnl_vsi_queue_config_info *qci =
1210            (struct i40e_virtchnl_vsi_queue_config_info *)msg;
1211        struct i40e_virtchnl_queue_pair_info *qpi;
1212        u16 vsi_id, vsi_queue_id;
1213        i40e_status aq_ret = 0;
1214        int i;
1215
1216        if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) {
1217                aq_ret = I40E_ERR_PARAM;
1218                goto error_param;
1219        }
1220
1221        vsi_id = qci->vsi_id;
1222        if (!i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
1223                aq_ret = I40E_ERR_PARAM;
1224                goto error_param;
1225        }
1226        for (i = 0; i < qci->num_queue_pairs; i++) {
1227                qpi = &qci->qpair[i];
1228                vsi_queue_id = qpi->txq.queue_id;
1229                if ((qpi->txq.vsi_id != vsi_id) ||
1230                    (qpi->rxq.vsi_id != vsi_id) ||
1231                    (qpi->rxq.queue_id != vsi_queue_id) ||
1232                    !i40e_vc_isvalid_queue_id(vf, vsi_id, vsi_queue_id)) {
1233                        aq_ret = I40E_ERR_PARAM;
1234                        goto error_param;
1235                }
1236
1237                if (i40e_config_vsi_rx_queue(vf, vsi_id, vsi_queue_id,
1238                                             &qpi->rxq) ||
1239                    i40e_config_vsi_tx_queue(vf, vsi_id, vsi_queue_id,
1240                                             &qpi->txq)) {
1241                        aq_ret = I40E_ERR_PARAM;
1242                        goto error_param;
1243                }
1244        }
1245
1246error_param:
1247        /* send the response to the vf */
1248        return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES,
1249                                       aq_ret);
1250}
1251
1252/**
1253 * i40e_vc_config_irq_map_msg
1254 * @vf: pointer to the vf info
1255 * @msg: pointer to the msg buffer
1256 * @msglen: msg length
1257 *
1258 * called from the vf to configure the irq to
1259 * queue map
1260 **/
1261static int i40e_vc_config_irq_map_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
1262{
1263        struct i40e_virtchnl_irq_map_info *irqmap_info =
1264            (struct i40e_virtchnl_irq_map_info *)msg;
1265        struct i40e_virtchnl_vector_map *map;
1266        u16 vsi_id, vsi_queue_id, vector_id;
1267        i40e_status aq_ret = 0;
1268        unsigned long tempmap;
1269        int i;
1270
1271        if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) {
1272                aq_ret = I40E_ERR_PARAM;
1273                goto error_param;
1274        }
1275
1276        for (i = 0; i < irqmap_info->num_vectors; i++) {
1277                map = &irqmap_info->vecmap[i];
1278
1279                vector_id = map->vector_id;
1280                vsi_id = map->vsi_id;
1281                /* validate msg params */
1282                if (!i40e_vc_isvalid_vector_id(vf, vector_id) ||
1283                    !i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
1284                        aq_ret = I40E_ERR_PARAM;
1285                        goto error_param;
1286                }
1287
1288                /* lookout for the invalid queue index */
1289                tempmap = map->rxq_map;
1290                for_each_set_bit(vsi_queue_id, &tempmap, I40E_MAX_VSI_QP) {
1291                        if (!i40e_vc_isvalid_queue_id(vf, vsi_id,
1292                                                      vsi_queue_id)) {
1293                                aq_ret = I40E_ERR_PARAM;
1294                                goto error_param;
1295                        }
1296                }
1297
1298                tempmap = map->txq_map;
1299                for_each_set_bit(vsi_queue_id, &tempmap, I40E_MAX_VSI_QP) {
1300                        if (!i40e_vc_isvalid_queue_id(vf, vsi_id,
1301                                                      vsi_queue_id)) {
1302                                aq_ret = I40E_ERR_PARAM;
1303                                goto error_param;
1304                        }
1305                }
1306
1307                i40e_config_irq_link_list(vf, vsi_id, map);
1308        }
1309error_param:
1310        /* send the response to the vf */
1311        return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
1312                                       aq_ret);
1313}
1314
1315/**
1316 * i40e_vc_enable_queues_msg
1317 * @vf: pointer to the vf info
1318 * @msg: pointer to the msg buffer
1319 * @msglen: msg length
1320 *
1321 * called from the vf to enable all or specific queue(s)
1322 **/
1323static int i40e_vc_enable_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
1324{
1325        struct i40e_virtchnl_queue_select *vqs =
1326            (struct i40e_virtchnl_queue_select *)msg;
1327        struct i40e_pf *pf = vf->pf;
1328        u16 vsi_id = vqs->vsi_id;
1329        i40e_status aq_ret = 0;
1330
1331        if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) {
1332                aq_ret = I40E_ERR_PARAM;
1333                goto error_param;
1334        }
1335
1336        if (!i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
1337                aq_ret = I40E_ERR_PARAM;
1338                goto error_param;
1339        }
1340
1341        if ((0 == vqs->rx_queues) && (0 == vqs->tx_queues)) {
1342                aq_ret = I40E_ERR_PARAM;
1343                goto error_param;
1344        }
1345        if (i40e_vsi_control_rings(pf->vsi[vsi_id], true))
1346                aq_ret = I40E_ERR_TIMEOUT;
1347error_param:
1348        /* send the response to the vf */
1349        return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_ENABLE_QUEUES,
1350                                       aq_ret);
1351}
1352
1353/**
1354 * i40e_vc_disable_queues_msg
1355 * @vf: pointer to the vf info
1356 * @msg: pointer to the msg buffer
1357 * @msglen: msg length
1358 *
1359 * called from the vf to disable all or specific
1360 * queue(s)
1361 **/
1362static int i40e_vc_disable_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
1363{
1364        struct i40e_virtchnl_queue_select *vqs =
1365            (struct i40e_virtchnl_queue_select *)msg;
1366        struct i40e_pf *pf = vf->pf;
1367        u16 vsi_id = vqs->vsi_id;
1368        i40e_status aq_ret = 0;
1369
1370        if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) {
1371                aq_ret = I40E_ERR_PARAM;
1372                goto error_param;
1373        }
1374
1375        if (!i40e_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
1376                aq_ret = I40E_ERR_PARAM;
1377                goto error_param;
1378        }
1379
1380        if ((0 == vqs->rx_queues) && (0 == vqs->tx_queues)) {
1381                aq_ret = I40E_ERR_PARAM;
1382                goto error_param;
1383        }
1384        if (i40e_vsi_control_rings(pf->vsi[vsi_id], false))
1385                aq_ret = I40E_ERR_TIMEOUT;
1386
1387error_param:
1388        /* send the response to the vf */
1389        return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_DISABLE_QUEUES,
1390                                       aq_ret);
1391}
1392
1393/**
1394 * i40e_vc_get_stats_msg
1395 * @vf: pointer to the vf info
1396 * @msg: pointer to the msg buffer
1397 * @msglen: msg length
1398 *
1399 * called from the vf to get vsi stats
1400 **/
1401static int i40e_vc_get_stats_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
1402{
1403        struct i40e_virtchnl_queue_select *vqs =
1404            (struct i40e_virtchnl_queue_select *)msg;
1405        struct i40e_pf *pf = vf->pf;
1406        struct i40e_eth_stats stats;
1407        i40e_status aq_ret = 0;
1408        struct i40e_vsi *vsi;
1409
1410        memset(&stats, 0, sizeof(struct i40e_eth_stats));
1411
1412        if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) {
1413                aq_ret = I40E_ERR_PARAM;
1414                goto error_param;
1415        }
1416
1417        if (!i40e_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
1418                aq_ret = I40E_ERR_PARAM;
1419                goto error_param;
1420        }
1421
1422        vsi = pf->vsi[vqs->vsi_id];
1423        if (!vsi) {
1424                aq_ret = I40E_ERR_PARAM;
1425                goto error_param;
1426        }
1427        i40e_update_eth_stats(vsi);
1428        stats = vsi->eth_stats;
1429
1430error_param:
1431        /* send the response back to the vf */
1432        return i40e_vc_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_GET_STATS, aq_ret,
1433                                      (u8 *)&stats, sizeof(stats));
1434}
1435
1436/**
1437 * i40e_check_vf_permission
1438 * @vf: pointer to the vf info
1439 * @macaddr: pointer to the MAC Address being checked
1440 *
1441 * Check if the VF has permission to add or delete unicast MAC address
1442 * filters and return error code -EPERM if not.  Then check if the
1443 * address filter requested is broadcast or zero and if so return
1444 * an invalid MAC address error code.
1445 **/
1446static inline int i40e_check_vf_permission(struct i40e_vf *vf, u8 *macaddr)
1447{
1448        struct i40e_pf *pf = vf->pf;
1449        int ret = 0;
1450
1451        if (is_broadcast_ether_addr(macaddr) ||
1452                   is_zero_ether_addr(macaddr)) {
1453                dev_err(&pf->pdev->dev, "invalid VF MAC addr %pM\n", macaddr);
1454                ret = I40E_ERR_INVALID_MAC_ADDR;
1455        } else if (vf->pf_set_mac && !is_multicast_ether_addr(macaddr) &&
1456                   !ether_addr_equal(macaddr, vf->default_lan_addr.addr)) {
1457                /* If the host VMM administrator has set the VF MAC address
1458                 * administratively via the ndo_set_vf_mac command then deny
1459                 * permission to the VF to add or delete unicast MAC addresses.
1460                 * The VF may request to set the MAC address filter already
1461                 * assigned to it so do not return an error in that case.
1462                 */
1463                dev_err(&pf->pdev->dev,
1464                        "VF attempting to override administratively set MAC address\nPlease reload the VF driver to resume normal operation\n");
1465                ret = -EPERM;
1466        }
1467        return ret;
1468}
1469
1470/**
1471 * i40e_vc_add_mac_addr_msg
1472 * @vf: pointer to the vf info
1473 * @msg: pointer to the msg buffer
1474 * @msglen: msg length
1475 *
1476 * add guest mac address filter
1477 **/
1478static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
1479{
1480        struct i40e_virtchnl_ether_addr_list *al =
1481            (struct i40e_virtchnl_ether_addr_list *)msg;
1482        struct i40e_pf *pf = vf->pf;
1483        struct i40e_vsi *vsi = NULL;
1484        u16 vsi_id = al->vsi_id;
1485        i40e_status ret = 0;
1486        int i;
1487
1488        if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) ||
1489            !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) ||
1490            !i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
1491                ret = I40E_ERR_PARAM;
1492                goto error_param;
1493        }
1494
1495        for (i = 0; i < al->num_elements; i++) {
1496                ret = i40e_check_vf_permission(vf, al->list[i].addr);
1497                if (ret)
1498                        goto error_param;
1499        }
1500        vsi = pf->vsi[vsi_id];
1501
1502        /* add new addresses to the list */
1503        for (i = 0; i < al->num_elements; i++) {
1504                struct i40e_mac_filter *f;
1505
1506                f = i40e_find_mac(vsi, al->list[i].addr, true, false);
1507                if (!f) {
1508                        if (i40e_is_vsi_in_vlan(vsi))
1509                                f = i40e_put_mac_in_vlan(vsi, al->list[i].addr,
1510                                                         true, false);
1511                        else
1512                                f = i40e_add_filter(vsi, al->list[i].addr, -1,
1513                                                    true, false);
1514                }
1515
1516                if (!f) {
1517                        dev_err(&pf->pdev->dev,
1518                                "Unable to add VF MAC filter\n");
1519                        ret = I40E_ERR_PARAM;
1520                        goto error_param;
1521                }
1522        }
1523
1524        /* program the updated filter list */
1525        if (i40e_sync_vsi_filters(vsi))
1526                dev_err(&pf->pdev->dev, "Unable to program VF MAC filters\n");
1527
1528error_param:
1529        /* send the response to the vf */
1530        return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS,
1531                                       ret);
1532}
1533
1534/**
1535 * i40e_vc_del_mac_addr_msg
1536 * @vf: pointer to the vf info
1537 * @msg: pointer to the msg buffer
1538 * @msglen: msg length
1539 *
1540 * remove guest mac address filter
1541 **/
1542static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
1543{
1544        struct i40e_virtchnl_ether_addr_list *al =
1545            (struct i40e_virtchnl_ether_addr_list *)msg;
1546        struct i40e_pf *pf = vf->pf;
1547        struct i40e_vsi *vsi = NULL;
1548        u16 vsi_id = al->vsi_id;
1549        i40e_status ret = 0;
1550        int i;
1551
1552        if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) ||
1553            !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) ||
1554            !i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
1555                ret = I40E_ERR_PARAM;
1556                goto error_param;
1557        }
1558
1559        for (i = 0; i < al->num_elements; i++) {
1560                if (is_broadcast_ether_addr(al->list[i].addr) ||
1561                    is_zero_ether_addr(al->list[i].addr)) {
1562                        dev_err(&pf->pdev->dev, "invalid VF MAC addr %pM\n",
1563                                al->list[i].addr);
1564                        ret = I40E_ERR_INVALID_MAC_ADDR;
1565                        goto error_param;
1566                }
1567        }
1568        vsi = pf->vsi[vsi_id];
1569
1570        /* delete addresses from the list */
1571        for (i = 0; i < al->num_elements; i++)
1572                i40e_del_filter(vsi, al->list[i].addr,
1573                                I40E_VLAN_ANY, true, false);
1574
1575        /* program the updated filter list */
1576        if (i40e_sync_vsi_filters(vsi))
1577                dev_err(&pf->pdev->dev, "Unable to program VF MAC filters\n");
1578
1579error_param:
1580        /* send the response to the vf */
1581        return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS,
1582                                       ret);
1583}
1584
1585/**
1586 * i40e_vc_add_vlan_msg
1587 * @vf: pointer to the vf info
1588 * @msg: pointer to the msg buffer
1589 * @msglen: msg length
1590 *
1591 * program guest vlan id
1592 **/
1593static int i40e_vc_add_vlan_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
1594{
1595        struct i40e_virtchnl_vlan_filter_list *vfl =
1596            (struct i40e_virtchnl_vlan_filter_list *)msg;
1597        struct i40e_pf *pf = vf->pf;
1598        struct i40e_vsi *vsi = NULL;
1599        u16 vsi_id = vfl->vsi_id;
1600        i40e_status aq_ret = 0;
1601        int i;
1602
1603        if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) ||
1604            !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) ||
1605            !i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
1606                aq_ret = I40E_ERR_PARAM;
1607                goto error_param;
1608        }
1609
1610        for (i = 0; i < vfl->num_elements; i++) {
1611                if (vfl->vlan_id[i] > I40E_MAX_VLANID) {
1612                        aq_ret = I40E_ERR_PARAM;
1613                        dev_err(&pf->pdev->dev,
1614                                "invalid VF VLAN id %d\n", vfl->vlan_id[i]);
1615                        goto error_param;
1616                }
1617        }
1618        vsi = pf->vsi[vsi_id];
1619        if (vsi->info.pvid) {
1620                aq_ret = I40E_ERR_PARAM;
1621                goto error_param;
1622        }
1623
1624        i40e_vlan_stripping_enable(vsi);
1625        for (i = 0; i < vfl->num_elements; i++) {
1626                /* add new VLAN filter */
1627                int ret = i40e_vsi_add_vlan(vsi, vfl->vlan_id[i]);
1628                if (ret)
1629                        dev_err(&pf->pdev->dev,
1630                                "Unable to add VF vlan filter %d, error %d\n",
1631                                vfl->vlan_id[i], ret);
1632        }
1633
1634error_param:
1635        /* send the response to the vf */
1636        return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_ADD_VLAN, aq_ret);
1637}
1638
1639/**
1640 * i40e_vc_remove_vlan_msg
1641 * @vf: pointer to the vf info
1642 * @msg: pointer to the msg buffer
1643 * @msglen: msg length
1644 *
1645 * remove programmed guest vlan id
1646 **/
1647static int i40e_vc_remove_vlan_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
1648{
1649        struct i40e_virtchnl_vlan_filter_list *vfl =
1650            (struct i40e_virtchnl_vlan_filter_list *)msg;
1651        struct i40e_pf *pf = vf->pf;
1652        struct i40e_vsi *vsi = NULL;
1653        u16 vsi_id = vfl->vsi_id;
1654        i40e_status aq_ret = 0;
1655        int i;
1656
1657        if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) ||
1658            !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) ||
1659            !i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
1660                aq_ret = I40E_ERR_PARAM;
1661                goto error_param;
1662        }
1663
1664        for (i = 0; i < vfl->num_elements; i++) {
1665                if (vfl->vlan_id[i] > I40E_MAX_VLANID) {
1666                        aq_ret = I40E_ERR_PARAM;
1667                        goto error_param;
1668                }
1669        }
1670
1671        vsi = pf->vsi[vsi_id];
1672        if (vsi->info.pvid) {
1673                aq_ret = I40E_ERR_PARAM;
1674                goto error_param;
1675        }
1676
1677        for (i = 0; i < vfl->num_elements; i++) {
1678                int ret = i40e_vsi_kill_vlan(vsi, vfl->vlan_id[i]);
1679                if (ret)
1680                        dev_err(&pf->pdev->dev,
1681                                "Unable to delete VF vlan filter %d, error %d\n",
1682                                vfl->vlan_id[i], ret);
1683        }
1684
1685error_param:
1686        /* send the response to the vf */
1687        return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_DEL_VLAN, aq_ret);
1688}
1689
1690/**
1691 * i40e_vc_validate_vf_msg
1692 * @vf: pointer to the vf info
1693 * @msg: pointer to the msg buffer
1694 * @msglen: msg length
1695 * @msghndl: msg handle
1696 *
1697 * validate msg
1698 **/
1699static int i40e_vc_validate_vf_msg(struct i40e_vf *vf, u32 v_opcode,
1700                                   u32 v_retval, u8 *msg, u16 msglen)
1701{
1702        bool err_msg_format = false;
1703        int valid_len;
1704
1705        /* Check if VF is disabled. */
1706        if (test_bit(I40E_VF_STAT_DISABLED, &vf->vf_states))
1707                return I40E_ERR_PARAM;
1708
1709        /* Validate message length. */
1710        switch (v_opcode) {
1711        case I40E_VIRTCHNL_OP_VERSION:
1712                valid_len = sizeof(struct i40e_virtchnl_version_info);
1713                break;
1714        case I40E_VIRTCHNL_OP_RESET_VF:
1715        case I40E_VIRTCHNL_OP_GET_VF_RESOURCES:
1716                valid_len = 0;
1717                break;
1718        case I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE:
1719                valid_len = sizeof(struct i40e_virtchnl_txq_info);
1720                break;
1721        case I40E_VIRTCHNL_OP_CONFIG_RX_QUEUE:
1722                valid_len = sizeof(struct i40e_virtchnl_rxq_info);
1723                break;
1724        case I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES:
1725                valid_len = sizeof(struct i40e_virtchnl_vsi_queue_config_info);
1726                if (msglen >= valid_len) {
1727                        struct i40e_virtchnl_vsi_queue_config_info *vqc =
1728                            (struct i40e_virtchnl_vsi_queue_config_info *)msg;
1729                        valid_len += (vqc->num_queue_pairs *
1730                                      sizeof(struct
1731                                             i40e_virtchnl_queue_pair_info));
1732                        if (vqc->num_queue_pairs == 0)
1733                                err_msg_format = true;
1734                }
1735                break;
1736        case I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP:
1737                valid_len = sizeof(struct i40e_virtchnl_irq_map_info);
1738                if (msglen >= valid_len) {
1739                        struct i40e_virtchnl_irq_map_info *vimi =
1740                            (struct i40e_virtchnl_irq_map_info *)msg;
1741                        valid_len += (vimi->num_vectors *
1742                                      sizeof(struct i40e_virtchnl_vector_map));
1743                        if (vimi->num_vectors == 0)
1744                                err_msg_format = true;
1745                }
1746                break;
1747        case I40E_VIRTCHNL_OP_ENABLE_QUEUES:
1748        case I40E_VIRTCHNL_OP_DISABLE_QUEUES:
1749                valid_len = sizeof(struct i40e_virtchnl_queue_select);
1750                break;
1751        case I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS:
1752        case I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS:
1753                valid_len = sizeof(struct i40e_virtchnl_ether_addr_list);
1754                if (msglen >= valid_len) {
1755                        struct i40e_virtchnl_ether_addr_list *veal =
1756                            (struct i40e_virtchnl_ether_addr_list *)msg;
1757                        valid_len += veal->num_elements *
1758                            sizeof(struct i40e_virtchnl_ether_addr);
1759                        if (veal->num_elements == 0)
1760                                err_msg_format = true;
1761                }
1762                break;
1763        case I40E_VIRTCHNL_OP_ADD_VLAN:
1764        case I40E_VIRTCHNL_OP_DEL_VLAN:
1765                valid_len = sizeof(struct i40e_virtchnl_vlan_filter_list);
1766                if (msglen >= valid_len) {
1767                        struct i40e_virtchnl_vlan_filter_list *vfl =
1768                            (struct i40e_virtchnl_vlan_filter_list *)msg;
1769                        valid_len += vfl->num_elements * sizeof(u16);
1770                        if (vfl->num_elements == 0)
1771                                err_msg_format = true;
1772                }
1773                break;
1774        case I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
1775                valid_len = sizeof(struct i40e_virtchnl_promisc_info);
1776                break;
1777        case I40E_VIRTCHNL_OP_GET_STATS:
1778                valid_len = sizeof(struct i40e_virtchnl_queue_select);
1779                break;
1780        /* These are always errors coming from the VF. */
1781        case I40E_VIRTCHNL_OP_EVENT:
1782        case I40E_VIRTCHNL_OP_UNKNOWN:
1783        default:
1784                return -EPERM;
1785                break;
1786        }
1787        /* few more checks */
1788        if ((valid_len != msglen) || (err_msg_format)) {
1789                i40e_vc_send_resp_to_vf(vf, v_opcode, I40E_ERR_PARAM);
1790                return -EINVAL;
1791        } else {
1792                return 0;
1793        }
1794}
1795
1796/**
1797 * i40e_vc_process_vf_msg
1798 * @pf: pointer to the pf structure
1799 * @vf_id: source vf id
1800 * @msg: pointer to the msg buffer
1801 * @msglen: msg length
1802 * @msghndl: msg handle
1803 *
1804 * called from the common aeq/arq handler to
1805 * process request from vf
1806 **/
1807int i40e_vc_process_vf_msg(struct i40e_pf *pf, u16 vf_id, u32 v_opcode,
1808                           u32 v_retval, u8 *msg, u16 msglen)
1809{
1810        struct i40e_hw *hw = &pf->hw;
1811        unsigned int local_vf_id = vf_id - hw->func_caps.vf_base_id;
1812        struct i40e_vf *vf;
1813        int ret;
1814
1815        pf->vf_aq_requests++;
1816        if (local_vf_id >= pf->num_alloc_vfs)
1817                return -EINVAL;
1818        vf = &(pf->vf[local_vf_id]);
1819        /* perform basic checks on the msg */
1820        ret = i40e_vc_validate_vf_msg(vf, v_opcode, v_retval, msg, msglen);
1821
1822        if (ret) {
1823                dev_err(&pf->pdev->dev, "Invalid message from vf %d, opcode %d, len %d\n",
1824                        local_vf_id, v_opcode, msglen);
1825                return ret;
1826        }
1827
1828        switch (v_opcode) {
1829        case I40E_VIRTCHNL_OP_VERSION:
1830                ret = i40e_vc_get_version_msg(vf);
1831                break;
1832        case I40E_VIRTCHNL_OP_GET_VF_RESOURCES:
1833                ret = i40e_vc_get_vf_resources_msg(vf);
1834                break;
1835        case I40E_VIRTCHNL_OP_RESET_VF:
1836                i40e_vc_reset_vf_msg(vf);
1837                ret = 0;
1838                break;
1839        case I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
1840                ret = i40e_vc_config_promiscuous_mode_msg(vf, msg, msglen);
1841                break;
1842        case I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES:
1843                ret = i40e_vc_config_queues_msg(vf, msg, msglen);
1844                break;
1845        case I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP:
1846                ret = i40e_vc_config_irq_map_msg(vf, msg, msglen);
1847                break;
1848        case I40E_VIRTCHNL_OP_ENABLE_QUEUES:
1849                ret = i40e_vc_enable_queues_msg(vf, msg, msglen);
1850                break;
1851        case I40E_VIRTCHNL_OP_DISABLE_QUEUES:
1852                ret = i40e_vc_disable_queues_msg(vf, msg, msglen);
1853                break;
1854        case I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS:
1855                ret = i40e_vc_add_mac_addr_msg(vf, msg, msglen);
1856                break;
1857        case I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS:
1858                ret = i40e_vc_del_mac_addr_msg(vf, msg, msglen);
1859                break;
1860        case I40E_VIRTCHNL_OP_ADD_VLAN:
1861                ret = i40e_vc_add_vlan_msg(vf, msg, msglen);
1862                break;
1863        case I40E_VIRTCHNL_OP_DEL_VLAN:
1864                ret = i40e_vc_remove_vlan_msg(vf, msg, msglen);
1865                break;
1866        case I40E_VIRTCHNL_OP_GET_STATS:
1867                ret = i40e_vc_get_stats_msg(vf, msg, msglen);
1868                break;
1869        case I40E_VIRTCHNL_OP_UNKNOWN:
1870        default:
1871                dev_err(&pf->pdev->dev, "Unsupported opcode %d from vf %d\n",
1872                        v_opcode, local_vf_id);
1873                ret = i40e_vc_send_resp_to_vf(vf, v_opcode,
1874                                              I40E_ERR_NOT_IMPLEMENTED);
1875                break;
1876        }
1877
1878        return ret;
1879}
1880
1881/**
1882 * i40e_vc_process_vflr_event
1883 * @pf: pointer to the pf structure
1884 *
1885 * called from the vlfr irq handler to
1886 * free up vf resources and state variables
1887 **/
1888int i40e_vc_process_vflr_event(struct i40e_pf *pf)
1889{
1890        u32 reg, reg_idx, bit_idx, vf_id;
1891        struct i40e_hw *hw = &pf->hw;
1892        struct i40e_vf *vf;
1893
1894        if (!test_bit(__I40E_VFLR_EVENT_PENDING, &pf->state))
1895                return 0;
1896
1897        clear_bit(__I40E_VFLR_EVENT_PENDING, &pf->state);
1898        for (vf_id = 0; vf_id < pf->num_alloc_vfs; vf_id++) {
1899                reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32;
1900                bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32;
1901                /* read GLGEN_VFLRSTAT register to find out the flr vfs */
1902                vf = &pf->vf[vf_id];
1903                reg = rd32(hw, I40E_GLGEN_VFLRSTAT(reg_idx));
1904                if (reg & (1 << bit_idx)) {
1905                        /* clear the bit in GLGEN_VFLRSTAT */
1906                        wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), (1 << bit_idx));
1907
1908                        if (!test_bit(__I40E_DOWN, &pf->state))
1909                                i40e_reset_vf(vf, true);
1910                }
1911        }
1912
1913        /* re-enable vflr interrupt cause */
1914        reg = rd32(hw, I40E_PFINT_ICR0_ENA);
1915        reg |= I40E_PFINT_ICR0_ENA_VFLR_MASK;
1916        wr32(hw, I40E_PFINT_ICR0_ENA, reg);
1917        i40e_flush(hw);
1918
1919        return 0;
1920}
1921
1922/**
1923 * i40e_vc_vf_broadcast
1924 * @pf: pointer to the pf structure
1925 * @opcode: operation code
1926 * @retval: return value
1927 * @msg: pointer to the msg buffer
1928 * @msglen: msg length
1929 *
1930 * send a message to all VFs on a given PF
1931 **/
1932static void i40e_vc_vf_broadcast(struct i40e_pf *pf,
1933                                 enum i40e_virtchnl_ops v_opcode,
1934                                 i40e_status v_retval, u8 *msg,
1935                                 u16 msglen)
1936{
1937        struct i40e_hw *hw = &pf->hw;
1938        struct i40e_vf *vf = pf->vf;
1939        int i;
1940
1941        for (i = 0; i < pf->num_alloc_vfs; i++, vf++) {
1942                int abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
1943                /* Not all vfs are enabled so skip the ones that are not */
1944                if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states) &&
1945                    !test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states))
1946                        continue;
1947
1948                /* Ignore return value on purpose - a given VF may fail, but
1949                 * we need to keep going and send to all of them
1950                 */
1951                i40e_aq_send_msg_to_vf(hw, abs_vf_id, v_opcode, v_retval,
1952                                       msg, msglen, NULL);
1953        }
1954}
1955
1956/**
1957 * i40e_vc_notify_link_state
1958 * @pf: pointer to the pf structure
1959 *
1960 * send a link status message to all VFs on a given PF
1961 **/
1962void i40e_vc_notify_link_state(struct i40e_pf *pf)
1963{
1964        struct i40e_virtchnl_pf_event pfe;
1965        struct i40e_hw *hw = &pf->hw;
1966        struct i40e_vf *vf = pf->vf;
1967        struct i40e_link_status *ls = &pf->hw.phy.link_info;
1968        int i;
1969
1970        pfe.event = I40E_VIRTCHNL_EVENT_LINK_CHANGE;
1971        pfe.severity = I40E_PF_EVENT_SEVERITY_INFO;
1972        for (i = 0; i < pf->num_alloc_vfs; i++, vf++) {
1973                int abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
1974                if (vf->link_forced) {
1975                        pfe.event_data.link_event.link_status = vf->link_up;
1976                        pfe.event_data.link_event.link_speed =
1977                                (vf->link_up ? I40E_LINK_SPEED_40GB : 0);
1978                } else {
1979                        pfe.event_data.link_event.link_status =
1980                                ls->link_info & I40E_AQ_LINK_UP;
1981                        pfe.event_data.link_event.link_speed = ls->link_speed;
1982                }
1983                i40e_aq_send_msg_to_vf(hw, abs_vf_id, I40E_VIRTCHNL_OP_EVENT,
1984                                       0, (u8 *)&pfe, sizeof(pfe),
1985                                       NULL);
1986        }
1987}
1988
1989/**
1990 * i40e_vc_notify_reset
1991 * @pf: pointer to the pf structure
1992 *
1993 * indicate a pending reset to all VFs on a given PF
1994 **/
1995void i40e_vc_notify_reset(struct i40e_pf *pf)
1996{
1997        struct i40e_virtchnl_pf_event pfe;
1998
1999        pfe.event = I40E_VIRTCHNL_EVENT_RESET_IMPENDING;
2000        pfe.severity = I40E_PF_EVENT_SEVERITY_CERTAIN_DOOM;
2001        i40e_vc_vf_broadcast(pf, I40E_VIRTCHNL_OP_EVENT, I40E_SUCCESS,
2002                             (u8 *)&pfe, sizeof(struct i40e_virtchnl_pf_event));
2003}
2004
2005/**
2006 * i40e_vc_notify_vf_reset
2007 * @vf: pointer to the vf structure
2008 *
2009 * indicate a pending reset to the given VF
2010 **/
2011void i40e_vc_notify_vf_reset(struct i40e_vf *vf)
2012{
2013        struct i40e_virtchnl_pf_event pfe;
2014        int abs_vf_id;
2015
2016        /* validate the request */
2017        if (!vf || vf->vf_id >= vf->pf->num_alloc_vfs)
2018                return;
2019
2020        /* verify if the VF is in either init or active before proceeding */
2021        if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states) &&
2022            !test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states))
2023                return;
2024
2025        abs_vf_id = vf->vf_id + vf->pf->hw.func_caps.vf_base_id;
2026
2027        pfe.event = I40E_VIRTCHNL_EVENT_RESET_IMPENDING;
2028        pfe.severity = I40E_PF_EVENT_SEVERITY_CERTAIN_DOOM;
2029        i40e_aq_send_msg_to_vf(&vf->pf->hw, abs_vf_id, I40E_VIRTCHNL_OP_EVENT,
2030                               I40E_SUCCESS, (u8 *)&pfe,
2031                               sizeof(struct i40e_virtchnl_pf_event), NULL);
2032}
2033
2034/**
2035 * i40e_ndo_set_vf_mac
2036 * @netdev: network interface device structure
2037 * @vf_id: vf identifier
2038 * @mac: mac address
2039 *
2040 * program vf mac address
2041 **/
2042int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
2043{
2044        struct i40e_netdev_priv *np = netdev_priv(netdev);
2045        struct i40e_vsi *vsi = np->vsi;
2046        struct i40e_pf *pf = vsi->back;
2047        struct i40e_mac_filter *f;
2048        struct i40e_vf *vf;
2049        int ret = 0;
2050
2051        /* validate the request */
2052        if (vf_id >= pf->num_alloc_vfs) {
2053                dev_err(&pf->pdev->dev,
2054                        "Invalid VF Identifier %d\n", vf_id);
2055                ret = -EINVAL;
2056                goto error_param;
2057        }
2058
2059        vf = &(pf->vf[vf_id]);
2060        vsi = pf->vsi[vf->lan_vsi_index];
2061        if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states)) {
2062                dev_err(&pf->pdev->dev,
2063                        "Uninitialized VF %d\n", vf_id);
2064                ret = -EINVAL;
2065                goto error_param;
2066        }
2067
2068        if (!is_valid_ether_addr(mac)) {
2069                dev_err(&pf->pdev->dev,
2070                        "Invalid VF ethernet address\n");
2071                ret = -EINVAL;
2072                goto error_param;
2073        }
2074
2075        /* delete the temporary mac address */
2076        i40e_del_filter(vsi, vf->default_lan_addr.addr, vf->port_vlan_id,
2077                        true, false);
2078
2079        /* Delete all the filters for this VSI - we're going to kill it
2080         * anyway.
2081         */
2082        list_for_each_entry(f, &vsi->mac_filter_list, list)
2083                i40e_del_filter(vsi, f->macaddr, f->vlan, true, false);
2084
2085        dev_info(&pf->pdev->dev, "Setting MAC %pM on VF %d\n", mac, vf_id);
2086        /* program mac filter */
2087        if (i40e_sync_vsi_filters(vsi)) {
2088                dev_err(&pf->pdev->dev, "Unable to program ucast filters\n");
2089                ret = -EIO;
2090                goto error_param;
2091        }
2092        ether_addr_copy(vf->default_lan_addr.addr, mac);
2093        vf->pf_set_mac = true;
2094        /* Force the VF driver stop so it has to reload with new MAC address */
2095        i40e_vc_disable_vf(pf, vf);
2096        dev_info(&pf->pdev->dev, "Reload the VF driver to make this change effective.\n");
2097        ret = 0;
2098
2099error_param:
2100        return ret;
2101}
2102
2103/**
2104 * i40e_ndo_set_vf_port_vlan
2105 * @netdev: network interface device structure
2106 * @vf_id: vf identifier
2107 * @vlan_id: mac address
2108 * @qos: priority setting
2109 *
2110 * program vf vlan id and/or qos
2111 **/
2112int i40e_ndo_set_vf_port_vlan(struct net_device *netdev,
2113                              int vf_id, u16 vlan_id, u8 qos)
2114{
2115        struct i40e_netdev_priv *np = netdev_priv(netdev);
2116        struct i40e_pf *pf = np->vsi->back;
2117        struct i40e_vsi *vsi;
2118        struct i40e_vf *vf;
2119        int ret = 0;
2120
2121        /* validate the request */
2122        if (vf_id >= pf->num_alloc_vfs) {
2123                dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id);
2124                ret = -EINVAL;
2125                goto error_pvid;
2126        }
2127
2128        if ((vlan_id > I40E_MAX_VLANID) || (qos > 7)) {
2129                dev_err(&pf->pdev->dev, "Invalid VF Parameters\n");
2130                ret = -EINVAL;
2131                goto error_pvid;
2132        }
2133
2134        vf = &(pf->vf[vf_id]);
2135        vsi = pf->vsi[vf->lan_vsi_index];
2136        if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states)) {
2137                dev_err(&pf->pdev->dev, "Uninitialized VF %d\n", vf_id);
2138                ret = -EINVAL;
2139                goto error_pvid;
2140        }
2141
2142        if (vsi->info.pvid == 0 && i40e_is_vsi_in_vlan(vsi)) {
2143                dev_err(&pf->pdev->dev,
2144                        "VF %d has already configured VLAN filters and the administrator is requesting a port VLAN override.\nPlease unload and reload the VF driver for this change to take effect.\n",
2145                        vf_id);
2146                /* Administrator Error - knock the VF offline until he does
2147                 * the right thing by reconfiguring his network correctly
2148                 * and then reloading the VF driver.
2149                 */
2150                i40e_vc_disable_vf(pf, vf);
2151        }
2152
2153        /* Check for condition where there was already a port VLAN ID
2154         * filter set and now it is being deleted by setting it to zero.
2155         * Additionally check for the condition where there was a port
2156         * VLAN but now there is a new and different port VLAN being set.
2157         * Before deleting all the old VLAN filters we must add new ones
2158         * with -1 (I40E_VLAN_ANY) or otherwise we're left with all our
2159         * MAC addresses deleted.
2160         */
2161        if ((!(vlan_id || qos) ||
2162            (vlan_id | qos) != le16_to_cpu(vsi->info.pvid)) &&
2163            vsi->info.pvid)
2164                ret = i40e_vsi_add_vlan(vsi, I40E_VLAN_ANY);
2165
2166        if (vsi->info.pvid) {
2167                /* kill old VLAN */
2168                ret = i40e_vsi_kill_vlan(vsi, (le16_to_cpu(vsi->info.pvid) &
2169                                               VLAN_VID_MASK));
2170                if (ret) {
2171                        dev_info(&vsi->back->pdev->dev,
2172                                 "remove VLAN failed, ret=%d, aq_err=%d\n",
2173                                 ret, pf->hw.aq.asq_last_status);
2174                }
2175        }
2176        if (vlan_id || qos)
2177                ret = i40e_vsi_add_pvid(vsi,
2178                                vlan_id | (qos << I40E_VLAN_PRIORITY_SHIFT));
2179        else
2180                i40e_vsi_remove_pvid(vsi);
2181
2182        if (vlan_id) {
2183                dev_info(&pf->pdev->dev, "Setting VLAN %d, QOS 0x%x on VF %d\n",
2184                         vlan_id, qos, vf_id);
2185
2186                /* add new VLAN filter */
2187                ret = i40e_vsi_add_vlan(vsi, vlan_id);
2188                if (ret) {
2189                        dev_info(&vsi->back->pdev->dev,
2190                                 "add VF VLAN failed, ret=%d aq_err=%d\n", ret,
2191                                 vsi->back->hw.aq.asq_last_status);
2192                        goto error_pvid;
2193                }
2194                /* Kill non-vlan MAC filters - ignore error return since
2195                 * there might not be any non-vlan MAC filters.
2196                 */
2197                i40e_vsi_kill_vlan(vsi, I40E_VLAN_ANY);
2198        }
2199
2200        if (ret) {
2201                dev_err(&pf->pdev->dev, "Unable to update VF vsi context\n");
2202                goto error_pvid;
2203        }
2204        /* The Port VLAN needs to be saved across resets the same as the
2205         * default LAN MAC address.
2206         */
2207        vf->port_vlan_id = le16_to_cpu(vsi->info.pvid);
2208        ret = 0;
2209
2210error_pvid:
2211        return ret;
2212}
2213
2214#define I40E_BW_CREDIT_DIVISOR 50     /* 50Mbps per BW credit */
2215#define I40E_MAX_BW_INACTIVE_ACCUM 4  /* device can accumulate 4 credits max */
2216/**
2217 * i40e_ndo_set_vf_bw
2218 * @netdev: network interface device structure
2219 * @vf_id: vf identifier
2220 * @tx_rate: tx rate
2221 *
2222 * configure vf tx rate
2223 **/
2224int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate,
2225                       int max_tx_rate)
2226{
2227        struct i40e_netdev_priv *np = netdev_priv(netdev);
2228        struct i40e_pf *pf = np->vsi->back;
2229        struct i40e_vsi *vsi;
2230        struct i40e_vf *vf;
2231        int speed = 0;
2232        int ret = 0;
2233
2234        /* validate the request */
2235        if (vf_id >= pf->num_alloc_vfs) {
2236                dev_err(&pf->pdev->dev, "Invalid VF Identifier %d.\n", vf_id);
2237                ret = -EINVAL;
2238                goto error;
2239        }
2240
2241        if (min_tx_rate) {
2242                dev_err(&pf->pdev->dev, "Invalid min tx rate (%d) (greater than 0) specified for vf %d.\n",
2243                        min_tx_rate, vf_id);
2244                return -EINVAL;
2245        }
2246
2247        vf = &(pf->vf[vf_id]);
2248        vsi = pf->vsi[vf->lan_vsi_index];
2249        if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states)) {
2250                dev_err(&pf->pdev->dev, "Uninitialized VF %d.\n", vf_id);
2251                ret = -EINVAL;
2252                goto error;
2253        }
2254
2255        switch (pf->hw.phy.link_info.link_speed) {
2256        case I40E_LINK_SPEED_40GB:
2257                speed = 40000;
2258                break;
2259        case I40E_LINK_SPEED_10GB:
2260                speed = 10000;
2261                break;
2262        case I40E_LINK_SPEED_1GB:
2263                speed = 1000;
2264                break;
2265        default:
2266                break;
2267        }
2268
2269        if (max_tx_rate > speed) {
2270                dev_err(&pf->pdev->dev, "Invalid max tx rate %d specified for vf %d.",
2271                        max_tx_rate, vf->vf_id);
2272                ret = -EINVAL;
2273                goto error;
2274        }
2275
2276        if ((max_tx_rate < 50) && (max_tx_rate > 0)) {
2277                dev_warn(&pf->pdev->dev, "Setting max Tx rate to minimum usable value of 50Mbps.\n");
2278                max_tx_rate = 50;
2279        }
2280
2281        /* Tx rate credits are in values of 50Mbps, 0 is disabled*/
2282        ret = i40e_aq_config_vsi_bw_limit(&pf->hw, vsi->seid,
2283                                          max_tx_rate / I40E_BW_CREDIT_DIVISOR,
2284                                          I40E_MAX_BW_INACTIVE_ACCUM, NULL);
2285        if (ret) {
2286                dev_err(&pf->pdev->dev, "Unable to set max tx rate, error code %d.\n",
2287                        ret);
2288                ret = -EIO;
2289                goto error;
2290        }
2291        vf->tx_rate = max_tx_rate;
2292error:
2293        return ret;
2294}
2295
2296/**
2297 * i40e_ndo_get_vf_config
2298 * @netdev: network interface device structure
2299 * @vf_id: vf identifier
2300 * @ivi: vf configuration structure
2301 *
2302 * return vf configuration
2303 **/
2304int i40e_ndo_get_vf_config(struct net_device *netdev,
2305                           int vf_id, struct ifla_vf_info *ivi)
2306{
2307        struct i40e_netdev_priv *np = netdev_priv(netdev);
2308        struct i40e_vsi *vsi = np->vsi;
2309        struct i40e_pf *pf = vsi->back;
2310        struct i40e_vf *vf;
2311        int ret = 0;
2312
2313        /* validate the request */
2314        if (vf_id >= pf->num_alloc_vfs) {
2315                dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id);
2316                ret = -EINVAL;
2317                goto error_param;
2318        }
2319
2320        vf = &(pf->vf[vf_id]);
2321        /* first vsi is always the LAN vsi */
2322        vsi = pf->vsi[vf->lan_vsi_index];
2323        if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states)) {
2324                dev_err(&pf->pdev->dev, "Uninitialized VF %d\n", vf_id);
2325                ret = -EINVAL;
2326                goto error_param;
2327        }
2328
2329        ivi->vf = vf_id;
2330
2331        memcpy(&ivi->mac, vf->default_lan_addr.addr, ETH_ALEN);
2332
2333        ivi->max_tx_rate = vf->tx_rate;
2334        ivi->min_tx_rate = 0;
2335        ivi->vlan = le16_to_cpu(vsi->info.pvid) & I40E_VLAN_MASK;
2336        ivi->qos = (le16_to_cpu(vsi->info.pvid) & I40E_PRIORITY_MASK) >>
2337                   I40E_VLAN_PRIORITY_SHIFT;
2338        if (vf->link_forced == false)
2339                ivi->linkstate = IFLA_VF_LINK_STATE_AUTO;
2340        else if (vf->link_up == true)
2341                ivi->linkstate = IFLA_VF_LINK_STATE_ENABLE;
2342        else
2343                ivi->linkstate = IFLA_VF_LINK_STATE_DISABLE;
2344        ivi->spoofchk = vf->spoofchk;
2345        ret = 0;
2346
2347error_param:
2348        return ret;
2349}
2350
2351/**
2352 * i40e_ndo_set_vf_link_state
2353 * @netdev: network interface device structure
2354 * @vf_id: vf identifier
2355 * @link: required link state
2356 *
2357 * Set the link state of a specified VF, regardless of physical link state
2358 **/
2359int i40e_ndo_set_vf_link_state(struct net_device *netdev, int vf_id, int link)
2360{
2361        struct i40e_netdev_priv *np = netdev_priv(netdev);
2362        struct i40e_pf *pf = np->vsi->back;
2363        struct i40e_virtchnl_pf_event pfe;
2364        struct i40e_hw *hw = &pf->hw;
2365        struct i40e_vf *vf;
2366        int abs_vf_id;
2367        int ret = 0;
2368
2369        /* validate the request */
2370        if (vf_id >= pf->num_alloc_vfs) {
2371                dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id);
2372                ret = -EINVAL;
2373                goto error_out;
2374        }
2375
2376        vf = &pf->vf[vf_id];
2377        abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
2378
2379        pfe.event = I40E_VIRTCHNL_EVENT_LINK_CHANGE;
2380        pfe.severity = I40E_PF_EVENT_SEVERITY_INFO;
2381
2382        switch (link) {
2383        case IFLA_VF_LINK_STATE_AUTO:
2384                vf->link_forced = false;
2385                pfe.event_data.link_event.link_status =
2386                        pf->hw.phy.link_info.link_info & I40E_AQ_LINK_UP;
2387                pfe.event_data.link_event.link_speed =
2388                        pf->hw.phy.link_info.link_speed;
2389                break;
2390        case IFLA_VF_LINK_STATE_ENABLE:
2391                vf->link_forced = true;
2392                vf->link_up = true;
2393                pfe.event_data.link_event.link_status = true;
2394                pfe.event_data.link_event.link_speed = I40E_LINK_SPEED_40GB;
2395                break;
2396        case IFLA_VF_LINK_STATE_DISABLE:
2397                vf->link_forced = true;
2398                vf->link_up = false;
2399                pfe.event_data.link_event.link_status = false;
2400                pfe.event_data.link_event.link_speed = 0;
2401                break;
2402        default:
2403                ret = -EINVAL;
2404                goto error_out;
2405        }
2406        /* Notify the VF of its new link state */
2407        i40e_aq_send_msg_to_vf(hw, abs_vf_id, I40E_VIRTCHNL_OP_EVENT,
2408                               0, (u8 *)&pfe, sizeof(pfe), NULL);
2409
2410error_out:
2411        return ret;
2412}
2413
2414/**
2415 * i40e_ndo_set_vf_spoofchk
2416 * @netdev: network interface device structure
2417 * @vf_id: vf identifier
2418 * @enable: flag to enable or disable feature
2419 *
2420 * Enable or disable VF spoof checking
2421 **/
2422int i40e_ndo_set_vf_spoofck(struct net_device *netdev, int vf_id, bool enable)
2423{
2424        struct i40e_netdev_priv *np = netdev_priv(netdev);
2425        struct i40e_vsi *vsi = np->vsi;
2426        struct i40e_pf *pf = vsi->back;
2427        struct i40e_vsi_context ctxt;
2428        struct i40e_hw *hw = &pf->hw;
2429        struct i40e_vf *vf;
2430        int ret = 0;
2431
2432        /* validate the request */
2433        if (vf_id >= pf->num_alloc_vfs) {
2434                dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id);
2435                ret = -EINVAL;
2436                goto out;
2437        }
2438
2439        vf = &(pf->vf[vf_id]);
2440
2441        if (enable == vf->spoofchk)
2442                goto out;
2443
2444        vf->spoofchk = enable;
2445        memset(&ctxt, 0, sizeof(ctxt));
2446        ctxt.seid = pf->vsi[vf->lan_vsi_index]->seid;
2447        ctxt.pf_num = pf->hw.pf_id;
2448        ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SECURITY_VALID);
2449        if (enable)
2450                ctxt.info.sec_flags |= I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK;
2451        ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
2452        if (ret) {
2453                dev_err(&pf->pdev->dev, "Error %d updating VSI parameters\n",
2454                        ret);
2455                ret = -EIO;
2456        }
2457out:
2458        return ret;
2459}
2460