linux/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
<<
>>
Prefs
   1/*******************************************************************************
   2 *
   3 * Intel Ethernet Controller XL710 Family Linux Driver
   4 * Copyright(c) 2013 - 2014 Intel Corporation.
   5 *
   6 * This program is free software; you can redistribute it and/or modify it
   7 * under the terms and conditions of the GNU General Public License,
   8 * version 2, as published by the Free Software Foundation.
   9 *
  10 * This program is distributed in the hope it will be useful, but WITHOUT
  11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  13 * more details.
  14 *
  15 * You should have received a copy of the GNU General Public License along
  16 * with this program.  If not, see <http://www.gnu.org/licenses/>.
  17 *
  18 * The full GNU General Public License is included in this distribution in
  19 * the file called "COPYING".
  20 *
  21 * Contact Information:
  22 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
  23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
  24 *
  25 ******************************************************************************/
  26
  27#include "i40e.h"
  28
  29/***********************misc routines*****************************/
  30
  31/**
  32 * i40e_vc_disable_vf
  33 * @pf: pointer to the pf info
  34 * @vf: pointer to the vf info
  35 *
  36 * Disable the VF through a SW reset
  37 **/
  38static inline void i40e_vc_disable_vf(struct i40e_pf *pf, struct i40e_vf *vf)
  39{
  40        struct i40e_hw *hw = &pf->hw;
  41        u32 reg;
  42
  43        reg = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id));
  44        reg |= I40E_VPGEN_VFRTRIG_VFSWR_MASK;
  45        wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg);
  46        i40e_flush(hw);
  47}
  48
  49/**
  50 * i40e_vc_isvalid_vsi_id
  51 * @vf: pointer to the vf info
  52 * @vsi_id: vf relative vsi id
  53 *
  54 * check for the valid vsi id
  55 **/
  56static inline bool i40e_vc_isvalid_vsi_id(struct i40e_vf *vf, u8 vsi_id)
  57{
  58        struct i40e_pf *pf = vf->pf;
  59
  60        return pf->vsi[vsi_id]->vf_id == vf->vf_id;
  61}
  62
  63/**
  64 * i40e_vc_isvalid_queue_id
  65 * @vf: pointer to the vf info
  66 * @vsi_id: vsi id
  67 * @qid: vsi relative queue id
  68 *
  69 * check for the valid queue id
  70 **/
  71static inline bool i40e_vc_isvalid_queue_id(struct i40e_vf *vf, u8 vsi_id,
  72                                            u8 qid)
  73{
  74        struct i40e_pf *pf = vf->pf;
  75
  76        return qid < pf->vsi[vsi_id]->num_queue_pairs;
  77}
  78
  79/**
  80 * i40e_vc_isvalid_vector_id
  81 * @vf: pointer to the vf info
  82 * @vector_id: vf relative vector id
  83 *
  84 * check for the valid vector id
  85 **/
  86static inline bool i40e_vc_isvalid_vector_id(struct i40e_vf *vf, u8 vector_id)
  87{
  88        struct i40e_pf *pf = vf->pf;
  89
  90        return vector_id < pf->hw.func_caps.num_msix_vectors_vf;
  91}
  92
  93/***********************vf resource mgmt routines*****************/
  94
  95/**
  96 * i40e_vc_get_pf_queue_id
  97 * @vf: pointer to the vf info
  98 * @vsi_idx: index of VSI in PF struct
  99 * @vsi_queue_id: vsi relative queue id
 100 *
 101 * return pf relative queue id
 102 **/
 103static u16 i40e_vc_get_pf_queue_id(struct i40e_vf *vf, u8 vsi_idx,
 104                                   u8 vsi_queue_id)
 105{
 106        struct i40e_pf *pf = vf->pf;
 107        struct i40e_vsi *vsi = pf->vsi[vsi_idx];
 108        u16 pf_queue_id = I40E_QUEUE_END_OF_LIST;
 109
 110        if (le16_to_cpu(vsi->info.mapping_flags) &
 111            I40E_AQ_VSI_QUE_MAP_NONCONTIG)
 112                pf_queue_id =
 113                        le16_to_cpu(vsi->info.queue_mapping[vsi_queue_id]);
 114        else
 115                pf_queue_id = le16_to_cpu(vsi->info.queue_mapping[0]) +
 116                              vsi_queue_id;
 117
 118        return pf_queue_id;
 119}
 120
 121/**
 122 * i40e_config_irq_link_list
 123 * @vf: pointer to the vf info
 124 * @vsi_idx: index of VSI in PF struct
 125 * @vecmap: irq map info
 126 *
 127 * configure irq link list from the map
 128 **/
 129static void i40e_config_irq_link_list(struct i40e_vf *vf, u16 vsi_idx,
 130                                      struct i40e_virtchnl_vector_map *vecmap)
 131{
 132        unsigned long linklistmap = 0, tempmap;
 133        struct i40e_pf *pf = vf->pf;
 134        struct i40e_hw *hw = &pf->hw;
 135        u16 vsi_queue_id, pf_queue_id;
 136        enum i40e_queue_type qtype;
 137        u16 next_q, vector_id;
 138        u32 reg, reg_idx;
 139        u16 itr_idx = 0;
 140
 141        vector_id = vecmap->vector_id;
 142        /* setup the head */
 143        if (0 == vector_id)
 144                reg_idx = I40E_VPINT_LNKLST0(vf->vf_id);
 145        else
 146                reg_idx = I40E_VPINT_LNKLSTN(
 147                     ((pf->hw.func_caps.num_msix_vectors_vf - 1) * vf->vf_id) +
 148                     (vector_id - 1));
 149
 150        if (vecmap->rxq_map == 0 && vecmap->txq_map == 0) {
 151                /* Special case - No queues mapped on this vector */
 152                wr32(hw, reg_idx, I40E_VPINT_LNKLST0_FIRSTQ_INDX_MASK);
 153                goto irq_list_done;
 154        }
 155        tempmap = vecmap->rxq_map;
 156        for_each_set_bit(vsi_queue_id, &tempmap, I40E_MAX_VSI_QP) {
 157                linklistmap |= (1 <<
 158                                (I40E_VIRTCHNL_SUPPORTED_QTYPES *
 159                                 vsi_queue_id));
 160        }
 161
 162        tempmap = vecmap->txq_map;
 163        for_each_set_bit(vsi_queue_id, &tempmap, I40E_MAX_VSI_QP) {
 164                linklistmap |= (1 <<
 165                                (I40E_VIRTCHNL_SUPPORTED_QTYPES * vsi_queue_id
 166                                 + 1));
 167        }
 168
 169        next_q = find_first_bit(&linklistmap,
 170                                (I40E_MAX_VSI_QP *
 171                                 I40E_VIRTCHNL_SUPPORTED_QTYPES));
 172        vsi_queue_id = next_q/I40E_VIRTCHNL_SUPPORTED_QTYPES;
 173        qtype = next_q%I40E_VIRTCHNL_SUPPORTED_QTYPES;
 174        pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_idx, vsi_queue_id);
 175        reg = ((qtype << I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT) | pf_queue_id);
 176
 177        wr32(hw, reg_idx, reg);
 178
 179        while (next_q < (I40E_MAX_VSI_QP * I40E_VIRTCHNL_SUPPORTED_QTYPES)) {
 180                switch (qtype) {
 181                case I40E_QUEUE_TYPE_RX:
 182                        reg_idx = I40E_QINT_RQCTL(pf_queue_id);
 183                        itr_idx = vecmap->rxitr_idx;
 184                        break;
 185                case I40E_QUEUE_TYPE_TX:
 186                        reg_idx = I40E_QINT_TQCTL(pf_queue_id);
 187                        itr_idx = vecmap->txitr_idx;
 188                        break;
 189                default:
 190                        break;
 191                }
 192
 193                next_q = find_next_bit(&linklistmap,
 194                                       (I40E_MAX_VSI_QP *
 195                                        I40E_VIRTCHNL_SUPPORTED_QTYPES),
 196                                       next_q + 1);
 197                if (next_q <
 198                    (I40E_MAX_VSI_QP * I40E_VIRTCHNL_SUPPORTED_QTYPES)) {
 199                        vsi_queue_id = next_q / I40E_VIRTCHNL_SUPPORTED_QTYPES;
 200                        qtype = next_q % I40E_VIRTCHNL_SUPPORTED_QTYPES;
 201                        pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_idx,
 202                                                              vsi_queue_id);
 203                } else {
 204                        pf_queue_id = I40E_QUEUE_END_OF_LIST;
 205                        qtype = 0;
 206                }
 207
 208                /* format for the RQCTL & TQCTL regs is same */
 209                reg = (vector_id) |
 210                    (qtype << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) |
 211                    (pf_queue_id << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
 212                    (1 << I40E_QINT_RQCTL_CAUSE_ENA_SHIFT) |
 213                    (itr_idx << I40E_QINT_RQCTL_ITR_INDX_SHIFT);
 214                wr32(hw, reg_idx, reg);
 215        }
 216
 217irq_list_done:
 218        i40e_flush(hw);
 219}
 220
 221/**
 222 * i40e_config_vsi_tx_queue
 223 * @vf: pointer to the vf info
 224 * @vsi_idx: index of VSI in PF struct
 225 * @vsi_queue_id: vsi relative queue index
 226 * @info: config. info
 227 *
 228 * configure tx queue
 229 **/
 230static int i40e_config_vsi_tx_queue(struct i40e_vf *vf, u16 vsi_idx,
 231                                    u16 vsi_queue_id,
 232                                    struct i40e_virtchnl_txq_info *info)
 233{
 234        struct i40e_pf *pf = vf->pf;
 235        struct i40e_hw *hw = &pf->hw;
 236        struct i40e_hmc_obj_txq tx_ctx;
 237        u16 pf_queue_id;
 238        u32 qtx_ctl;
 239        int ret = 0;
 240
 241        pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_idx, vsi_queue_id);
 242
 243        /* clear the context structure first */
 244        memset(&tx_ctx, 0, sizeof(struct i40e_hmc_obj_txq));
 245
 246        /* only set the required fields */
 247        tx_ctx.base = info->dma_ring_addr / 128;
 248        tx_ctx.qlen = info->ring_len;
 249        tx_ctx.rdylist = le16_to_cpu(pf->vsi[vsi_idx]->info.qs_handle[0]);
 250        tx_ctx.rdylist_act = 0;
 251        tx_ctx.head_wb_ena = info->headwb_enabled;
 252        tx_ctx.head_wb_addr = info->dma_headwb_addr;
 253
 254        /* clear the context in the HMC */
 255        ret = i40e_clear_lan_tx_queue_context(hw, pf_queue_id);
 256        if (ret) {
 257                dev_err(&pf->pdev->dev,
 258                        "Failed to clear VF LAN Tx queue context %d, error: %d\n",
 259                        pf_queue_id, ret);
 260                ret = -ENOENT;
 261                goto error_context;
 262        }
 263
 264        /* set the context in the HMC */
 265        ret = i40e_set_lan_tx_queue_context(hw, pf_queue_id, &tx_ctx);
 266        if (ret) {
 267                dev_err(&pf->pdev->dev,
 268                        "Failed to set VF LAN Tx queue context %d error: %d\n",
 269                        pf_queue_id, ret);
 270                ret = -ENOENT;
 271                goto error_context;
 272        }
 273
 274        /* associate this queue with the PCI VF function */
 275        qtx_ctl = I40E_QTX_CTL_VF_QUEUE;
 276        qtx_ctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT)
 277                    & I40E_QTX_CTL_PF_INDX_MASK);
 278        qtx_ctl |= (((vf->vf_id + hw->func_caps.vf_base_id)
 279                     << I40E_QTX_CTL_VFVM_INDX_SHIFT)
 280                    & I40E_QTX_CTL_VFVM_INDX_MASK);
 281        wr32(hw, I40E_QTX_CTL(pf_queue_id), qtx_ctl);
 282        i40e_flush(hw);
 283
 284error_context:
 285        return ret;
 286}
 287
 288/**
 289 * i40e_config_vsi_rx_queue
 290 * @vf: pointer to the vf info
 291 * @vsi_idx: index of VSI in PF struct
 292 * @vsi_queue_id: vsi relative queue index
 293 * @info: config. info
 294 *
 295 * configure rx queue
 296 **/
 297static int i40e_config_vsi_rx_queue(struct i40e_vf *vf, u16 vsi_idx,
 298                                    u16 vsi_queue_id,
 299                                    struct i40e_virtchnl_rxq_info *info)
 300{
 301        struct i40e_pf *pf = vf->pf;
 302        struct i40e_hw *hw = &pf->hw;
 303        struct i40e_hmc_obj_rxq rx_ctx;
 304        u16 pf_queue_id;
 305        int ret = 0;
 306
 307        pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_idx, vsi_queue_id);
 308
 309        /* clear the context structure first */
 310        memset(&rx_ctx, 0, sizeof(struct i40e_hmc_obj_rxq));
 311
 312        /* only set the required fields */
 313        rx_ctx.base = info->dma_ring_addr / 128;
 314        rx_ctx.qlen = info->ring_len;
 315
 316        if (info->splithdr_enabled) {
 317                rx_ctx.hsplit_0 = I40E_RX_SPLIT_L2      |
 318                                  I40E_RX_SPLIT_IP      |
 319                                  I40E_RX_SPLIT_TCP_UDP |
 320                                  I40E_RX_SPLIT_SCTP;
 321                /* header length validation */
 322                if (info->hdr_size > ((2 * 1024) - 64)) {
 323                        ret = -EINVAL;
 324                        goto error_param;
 325                }
 326                rx_ctx.hbuff = info->hdr_size >> I40E_RXQ_CTX_HBUFF_SHIFT;
 327
 328                /* set splitalways mode 10b */
 329                rx_ctx.dtype = 0x2;
 330        }
 331
 332        /* databuffer length validation */
 333        if (info->databuffer_size > ((16 * 1024) - 128)) {
 334                ret = -EINVAL;
 335                goto error_param;
 336        }
 337        rx_ctx.dbuff = info->databuffer_size >> I40E_RXQ_CTX_DBUFF_SHIFT;
 338
 339        /* max pkt. length validation */
 340        if (info->max_pkt_size >= (16 * 1024) || info->max_pkt_size < 64) {
 341                ret = -EINVAL;
 342                goto error_param;
 343        }
 344        rx_ctx.rxmax = info->max_pkt_size;
 345
 346        /* enable 32bytes desc always */
 347        rx_ctx.dsize = 1;
 348
 349        /* default values */
 350        rx_ctx.tphrdesc_ena = 1;
 351        rx_ctx.tphwdesc_ena = 1;
 352        rx_ctx.tphdata_ena = 1;
 353        rx_ctx.tphhead_ena = 1;
 354        rx_ctx.lrxqthresh = 2;
 355        rx_ctx.crcstrip = 1;
 356        rx_ctx.prefena = 1;
 357
 358        /* clear the context in the HMC */
 359        ret = i40e_clear_lan_rx_queue_context(hw, pf_queue_id);
 360        if (ret) {
 361                dev_err(&pf->pdev->dev,
 362                        "Failed to clear VF LAN Rx queue context %d, error: %d\n",
 363                        pf_queue_id, ret);
 364                ret = -ENOENT;
 365                goto error_param;
 366        }
 367
 368        /* set the context in the HMC */
 369        ret = i40e_set_lan_rx_queue_context(hw, pf_queue_id, &rx_ctx);
 370        if (ret) {
 371                dev_err(&pf->pdev->dev,
 372                        "Failed to set VF LAN Rx queue context %d error: %d\n",
 373                        pf_queue_id, ret);
 374                ret = -ENOENT;
 375                goto error_param;
 376        }
 377
 378error_param:
 379        return ret;
 380}
 381
 382/**
 383 * i40e_alloc_vsi_res
 384 * @vf: pointer to the vf info
 385 * @type: type of VSI to allocate
 386 *
 387 * alloc vf vsi context & resources
 388 **/
 389static int i40e_alloc_vsi_res(struct i40e_vf *vf, enum i40e_vsi_type type)
 390{
 391        struct i40e_mac_filter *f = NULL;
 392        struct i40e_pf *pf = vf->pf;
 393        struct i40e_vsi *vsi;
 394        int ret = 0;
 395
 396        vsi = i40e_vsi_setup(pf, type, pf->vsi[pf->lan_vsi]->seid, vf->vf_id);
 397
 398        if (!vsi) {
 399                dev_err(&pf->pdev->dev,
 400                        "add vsi failed for vf %d, aq_err %d\n",
 401                        vf->vf_id, pf->hw.aq.asq_last_status);
 402                ret = -ENOENT;
 403                goto error_alloc_vsi_res;
 404        }
 405        if (type == I40E_VSI_SRIOV) {
 406                u8 brdcast[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
 407                vf->lan_vsi_index = vsi->idx;
 408                vf->lan_vsi_id = vsi->id;
 409                dev_info(&pf->pdev->dev,
 410                         "VF %d assigned LAN VSI index %d, VSI id %d\n",
 411                         vf->vf_id, vsi->idx, vsi->id);
 412                /* If the port VLAN has been configured and then the
 413                 * VF driver was removed then the VSI port VLAN
 414                 * configuration was destroyed.  Check if there is
 415                 * a port VLAN and restore the VSI configuration if
 416                 * needed.
 417                 */
 418                if (vf->port_vlan_id)
 419                        i40e_vsi_add_pvid(vsi, vf->port_vlan_id);
 420                f = i40e_add_filter(vsi, vf->default_lan_addr.addr,
 421                                    vf->port_vlan_id, true, false);
 422                if (!f)
 423                        dev_info(&pf->pdev->dev,
 424                                 "Could not allocate VF MAC addr\n");
 425                f = i40e_add_filter(vsi, brdcast, vf->port_vlan_id,
 426                                    true, false);
 427                if (!f)
 428                        dev_info(&pf->pdev->dev,
 429                                 "Could not allocate VF broadcast filter\n");
 430        }
 431
 432        /* program mac filter */
 433        ret = i40e_sync_vsi_filters(vsi);
 434        if (ret)
 435                dev_err(&pf->pdev->dev, "Unable to program ucast filters\n");
 436
 437        /* Set VF bandwidth if specified */
 438        if (vf->tx_rate) {
 439                ret = i40e_aq_config_vsi_bw_limit(&pf->hw, vsi->seid,
 440                                                  vf->tx_rate / 50, 0, NULL);
 441                if (ret)
 442                        dev_err(&pf->pdev->dev, "Unable to set tx rate, VF %d, error code %d.\n",
 443                                vf->vf_id, ret);
 444        }
 445
 446error_alloc_vsi_res:
 447        return ret;
 448}
 449
 450/**
 451 * i40e_enable_vf_mappings
 452 * @vf: pointer to the vf info
 453 *
 454 * enable vf mappings
 455 **/
 456static void i40e_enable_vf_mappings(struct i40e_vf *vf)
 457{
 458        struct i40e_pf *pf = vf->pf;
 459        struct i40e_hw *hw = &pf->hw;
 460        u32 reg, total_queue_pairs = 0;
 461        int j;
 462
 463        /* Tell the hardware we're using noncontiguous mapping. HW requires
 464         * that VF queues be mapped using this method, even when they are
 465         * contiguous in real life
 466         */
 467        wr32(hw, I40E_VSILAN_QBASE(vf->lan_vsi_id),
 468             I40E_VSILAN_QBASE_VSIQTABLE_ENA_MASK);
 469
 470        /* enable VF vplan_qtable mappings */
 471        reg = I40E_VPLAN_MAPENA_TXRX_ENA_MASK;
 472        wr32(hw, I40E_VPLAN_MAPENA(vf->vf_id), reg);
 473
 474        /* map PF queues to VF queues */
 475        for (j = 0; j < pf->vsi[vf->lan_vsi_index]->num_queue_pairs; j++) {
 476                u16 qid = i40e_vc_get_pf_queue_id(vf, vf->lan_vsi_index, j);
 477                reg = (qid & I40E_VPLAN_QTABLE_QINDEX_MASK);
 478                wr32(hw, I40E_VPLAN_QTABLE(total_queue_pairs, vf->vf_id), reg);
 479                total_queue_pairs++;
 480        }
 481
 482        /* map PF queues to VSI */
 483        for (j = 0; j < 7; j++) {
 484                if (j * 2 >= pf->vsi[vf->lan_vsi_index]->num_queue_pairs) {
 485                        reg = 0x07FF07FF;       /* unused */
 486                } else {
 487                        u16 qid = i40e_vc_get_pf_queue_id(vf, vf->lan_vsi_index,
 488                                                          j * 2);
 489                        reg = qid;
 490                        qid = i40e_vc_get_pf_queue_id(vf, vf->lan_vsi_index,
 491                                                      (j * 2) + 1);
 492                        reg |= qid << 16;
 493                }
 494                wr32(hw, I40E_VSILAN_QTABLE(j, vf->lan_vsi_id), reg);
 495        }
 496
 497        i40e_flush(hw);
 498}
 499
 500/**
 501 * i40e_disable_vf_mappings
 502 * @vf: pointer to the vf info
 503 *
 504 * disable vf mappings
 505 **/
 506static void i40e_disable_vf_mappings(struct i40e_vf *vf)
 507{
 508        struct i40e_pf *pf = vf->pf;
 509        struct i40e_hw *hw = &pf->hw;
 510        int i;
 511
 512        /* disable qp mappings */
 513        wr32(hw, I40E_VPLAN_MAPENA(vf->vf_id), 0);
 514        for (i = 0; i < I40E_MAX_VSI_QP; i++)
 515                wr32(hw, I40E_VPLAN_QTABLE(i, vf->vf_id),
 516                     I40E_QUEUE_END_OF_LIST);
 517        i40e_flush(hw);
 518}
 519
 520/**
 521 * i40e_free_vf_res
 522 * @vf: pointer to the vf info
 523 *
 524 * free vf resources
 525 **/
 526static void i40e_free_vf_res(struct i40e_vf *vf)
 527{
 528        struct i40e_pf *pf = vf->pf;
 529        struct i40e_hw *hw = &pf->hw;
 530        u32 reg_idx, reg;
 531        int i, msix_vf;
 532
 533        /* free vsi & disconnect it from the parent uplink */
 534        if (vf->lan_vsi_index) {
 535                i40e_vsi_release(pf->vsi[vf->lan_vsi_index]);
 536                vf->lan_vsi_index = 0;
 537                vf->lan_vsi_id = 0;
 538        }
 539        msix_vf = pf->hw.func_caps.num_msix_vectors_vf;
 540
 541        /* disable interrupts so the VF starts in a known state */
 542        for (i = 0; i < msix_vf; i++) {
 543                /* format is same for both registers */
 544                if (0 == i)
 545                        reg_idx = I40E_VFINT_DYN_CTL0(vf->vf_id);
 546                else
 547                        reg_idx = I40E_VFINT_DYN_CTLN(((msix_vf - 1) *
 548                                                      (vf->vf_id))
 549                                                     + (i - 1));
 550                wr32(hw, reg_idx, I40E_VFINT_DYN_CTLN_CLEARPBA_MASK);
 551                i40e_flush(hw);
 552        }
 553
 554        /* clear the irq settings */
 555        for (i = 0; i < msix_vf; i++) {
 556                /* format is same for both registers */
 557                if (0 == i)
 558                        reg_idx = I40E_VPINT_LNKLST0(vf->vf_id);
 559                else
 560                        reg_idx = I40E_VPINT_LNKLSTN(((msix_vf - 1) *
 561                                                      (vf->vf_id))
 562                                                     + (i - 1));
 563                reg = (I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_MASK |
 564                       I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK);
 565                wr32(hw, reg_idx, reg);
 566                i40e_flush(hw);
 567        }
 568        /* reset some of the state varibles keeping
 569         * track of the resources
 570         */
 571        vf->num_queue_pairs = 0;
 572        vf->vf_states = 0;
 573}
 574
 575/**
 576 * i40e_alloc_vf_res
 577 * @vf: pointer to the vf info
 578 *
 579 * allocate vf resources
 580 **/
 581static int i40e_alloc_vf_res(struct i40e_vf *vf)
 582{
 583        struct i40e_pf *pf = vf->pf;
 584        int total_queue_pairs = 0;
 585        int ret;
 586
 587        /* allocate hw vsi context & associated resources */
 588        ret = i40e_alloc_vsi_res(vf, I40E_VSI_SRIOV);
 589        if (ret)
 590                goto error_alloc;
 591        total_queue_pairs += pf->vsi[vf->lan_vsi_index]->num_queue_pairs;
 592        set_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
 593
 594        /* store the total qps number for the runtime
 595         * vf req validation
 596         */
 597        vf->num_queue_pairs = total_queue_pairs;
 598
 599        /* vf is now completely initialized */
 600        set_bit(I40E_VF_STAT_INIT, &vf->vf_states);
 601
 602error_alloc:
 603        if (ret)
 604                i40e_free_vf_res(vf);
 605
 606        return ret;
 607}
 608
 609#define VF_DEVICE_STATUS 0xAA
 610#define VF_TRANS_PENDING_MASK 0x20
 611/**
 612 * i40e_quiesce_vf_pci
 613 * @vf: pointer to the vf structure
 614 *
 615 * Wait for VF PCI transactions to be cleared after reset. Returns -EIO
 616 * if the transactions never clear.
 617 **/
 618static int i40e_quiesce_vf_pci(struct i40e_vf *vf)
 619{
 620        struct i40e_pf *pf = vf->pf;
 621        struct i40e_hw *hw = &pf->hw;
 622        int vf_abs_id, i;
 623        u32 reg;
 624
 625        vf_abs_id = vf->vf_id + hw->func_caps.vf_base_id;
 626
 627        wr32(hw, I40E_PF_PCI_CIAA,
 628             VF_DEVICE_STATUS | (vf_abs_id << I40E_PF_PCI_CIAA_VF_NUM_SHIFT));
 629        for (i = 0; i < 100; i++) {
 630                reg = rd32(hw, I40E_PF_PCI_CIAD);
 631                if ((reg & VF_TRANS_PENDING_MASK) == 0)
 632                        return 0;
 633                udelay(1);
 634        }
 635        return -EIO;
 636}
 637
 638/**
 639 * i40e_reset_vf
 640 * @vf: pointer to the vf structure
 641 * @flr: VFLR was issued or not
 642 *
 643 * reset the vf
 644 **/
 645void i40e_reset_vf(struct i40e_vf *vf, bool flr)
 646{
 647        struct i40e_pf *pf = vf->pf;
 648        struct i40e_hw *hw = &pf->hw;
 649        bool rsd = false;
 650        int i;
 651        u32 reg;
 652
 653        /* warn the VF */
 654        clear_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states);
 655
 656        /* In the case of a VFLR, the HW has already reset the VF and we
 657         * just need to clean up, so don't hit the VFRTRIG register.
 658         */
 659        if (!flr) {
 660                /* reset vf using VPGEN_VFRTRIG reg */
 661                reg = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id));
 662                reg |= I40E_VPGEN_VFRTRIG_VFSWR_MASK;
 663                wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg);
 664                i40e_flush(hw);
 665        }
 666
 667        if (i40e_quiesce_vf_pci(vf))
 668                dev_err(&pf->pdev->dev, "VF %d PCI transactions stuck\n",
 669                        vf->vf_id);
 670
 671        /* poll VPGEN_VFRSTAT reg to make sure
 672         * that reset is complete
 673         */
 674        for (i = 0; i < 100; i++) {
 675                /* vf reset requires driver to first reset the
 676                 * vf & than poll the status register to make sure
 677                 * that the requested op was completed
 678                 * successfully
 679                 */
 680                udelay(10);
 681                reg = rd32(hw, I40E_VPGEN_VFRSTAT(vf->vf_id));
 682                if (reg & I40E_VPGEN_VFRSTAT_VFRD_MASK) {
 683                        rsd = true;
 684                        break;
 685                }
 686        }
 687
 688        if (!rsd)
 689                dev_err(&pf->pdev->dev, "VF reset check timeout on VF %d\n",
 690                        vf->vf_id);
 691        wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_id), I40E_VFR_COMPLETED);
 692        /* clear the reset bit in the VPGEN_VFRTRIG reg */
 693        reg = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id));
 694        reg &= ~I40E_VPGEN_VFRTRIG_VFSWR_MASK;
 695        wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg);
 696
 697        /* On initial reset, we won't have any queues */
 698        if (vf->lan_vsi_index == 0)
 699                goto complete_reset;
 700
 701        i40e_vsi_control_rings(pf->vsi[vf->lan_vsi_index], false);
 702complete_reset:
 703        /* reallocate vf resources to reset the VSI state */
 704        i40e_free_vf_res(vf);
 705        i40e_alloc_vf_res(vf);
 706        i40e_enable_vf_mappings(vf);
 707        set_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states);
 708
 709        /* tell the VF the reset is done */
 710        wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_id), I40E_VFR_VFACTIVE);
 711        i40e_flush(hw);
 712}
 713
 714/**
 715 * i40e_vfs_are_assigned
 716 * @pf: pointer to the pf structure
 717 *
 718 * Determine if any VFs are assigned to VMs
 719 **/
 720static bool i40e_vfs_are_assigned(struct i40e_pf *pf)
 721{
 722        struct pci_dev *pdev = pf->pdev;
 723        struct pci_dev *vfdev;
 724
 725        /* loop through all the VFs to see if we own any that are assigned */
 726        vfdev = pci_get_device(PCI_VENDOR_ID_INTEL, I40E_DEV_ID_VF , NULL);
 727        while (vfdev) {
 728                /* if we don't own it we don't care */
 729                if (vfdev->is_virtfn && pci_physfn(vfdev) == pdev) {
 730                        /* if it is assigned we cannot release it */
 731                        if (vfdev->dev_flags & PCI_DEV_FLAGS_ASSIGNED)
 732                                return true;
 733                }
 734
 735                vfdev = pci_get_device(PCI_VENDOR_ID_INTEL,
 736                                       I40E_DEV_ID_VF,
 737                                       vfdev);
 738        }
 739
 740        return false;
 741}
 742#ifdef CONFIG_PCI_IOV
 743
 744/**
 745 * i40e_enable_pf_switch_lb
 746 * @pf: pointer to the pf structure
 747 *
 748 * enable switch loop back or die - no point in a return value
 749 **/
 750static void i40e_enable_pf_switch_lb(struct i40e_pf *pf)
 751{
 752        struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
 753        struct i40e_vsi_context ctxt;
 754        int aq_ret;
 755
 756        ctxt.seid = pf->main_vsi_seid;
 757        ctxt.pf_num = pf->hw.pf_id;
 758        ctxt.vf_num = 0;
 759        aq_ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
 760        if (aq_ret) {
 761                dev_info(&pf->pdev->dev,
 762                         "%s couldn't get pf vsi config, err %d, aq_err %d\n",
 763                         __func__, aq_ret, pf->hw.aq.asq_last_status);
 764                return;
 765        }
 766        ctxt.flags = I40E_AQ_VSI_TYPE_PF;
 767        ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
 768        ctxt.info.switch_id |= cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
 769
 770        aq_ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
 771        if (aq_ret) {
 772                dev_info(&pf->pdev->dev,
 773                         "%s: update vsi switch failed, aq_err=%d\n",
 774                         __func__, vsi->back->hw.aq.asq_last_status);
 775        }
 776}
 777#endif
 778
 779/**
 780 * i40e_disable_pf_switch_lb
 781 * @pf: pointer to the pf structure
 782 *
 783 * disable switch loop back or die - no point in a return value
 784 **/
 785static void i40e_disable_pf_switch_lb(struct i40e_pf *pf)
 786{
 787        struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
 788        struct i40e_vsi_context ctxt;
 789        int aq_ret;
 790
 791        ctxt.seid = pf->main_vsi_seid;
 792        ctxt.pf_num = pf->hw.pf_id;
 793        ctxt.vf_num = 0;
 794        aq_ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
 795        if (aq_ret) {
 796                dev_info(&pf->pdev->dev,
 797                         "%s couldn't get pf vsi config, err %d, aq_err %d\n",
 798                         __func__, aq_ret, pf->hw.aq.asq_last_status);
 799                return;
 800        }
 801        ctxt.flags = I40E_AQ_VSI_TYPE_PF;
 802        ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
 803        ctxt.info.switch_id &= ~cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
 804
 805        aq_ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
 806        if (aq_ret) {
 807                dev_info(&pf->pdev->dev,
 808                         "%s: update vsi switch failed, aq_err=%d\n",
 809                         __func__, vsi->back->hw.aq.asq_last_status);
 810        }
 811}
 812
 813/**
 814 * i40e_free_vfs
 815 * @pf: pointer to the pf structure
 816 *
 817 * free vf resources
 818 **/
 819void i40e_free_vfs(struct i40e_pf *pf)
 820{
 821        struct i40e_hw *hw = &pf->hw;
 822        u32 reg_idx, bit_idx;
 823        int i, tmp, vf_id;
 824
 825        if (!pf->vf)
 826                return;
 827
 828        /* Disable interrupt 0 so we don't try to handle the VFLR. */
 829        i40e_irq_dynamic_disable_icr0(pf);
 830
 831        mdelay(10); /* let any messages in transit get finished up */
 832        /* free up vf resources */
 833        tmp = pf->num_alloc_vfs;
 834        pf->num_alloc_vfs = 0;
 835        for (i = 0; i < tmp; i++) {
 836                if (test_bit(I40E_VF_STAT_INIT, &pf->vf[i].vf_states))
 837                        i40e_free_vf_res(&pf->vf[i]);
 838                /* disable qp mappings */
 839                i40e_disable_vf_mappings(&pf->vf[i]);
 840        }
 841
 842        kfree(pf->vf);
 843        pf->vf = NULL;
 844
 845        /* This check is for when the driver is unloaded while VFs are
 846         * assigned. Setting the number of VFs to 0 through sysfs is caught
 847         * before this function ever gets called.
 848         */
 849        if (!i40e_vfs_are_assigned(pf)) {
 850                pci_disable_sriov(pf->pdev);
 851                /* Acknowledge VFLR for all VFS. Without this, VFs will fail to
 852                 * work correctly when SR-IOV gets re-enabled.
 853                 */
 854                for (vf_id = 0; vf_id < tmp; vf_id++) {
 855                        reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32;
 856                        bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32;
 857                        wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), (1 << bit_idx));
 858                }
 859                i40e_disable_pf_switch_lb(pf);
 860        } else {
 861                dev_warn(&pf->pdev->dev,
 862                         "unable to disable SR-IOV because VFs are assigned.\n");
 863        }
 864
 865        /* Re-enable interrupt 0. */
 866        i40e_irq_dynamic_enable_icr0(pf);
 867}
 868
 869#ifdef CONFIG_PCI_IOV
 870/**
 871 * i40e_alloc_vfs
 872 * @pf: pointer to the pf structure
 873 * @num_alloc_vfs: number of vfs to allocate
 874 *
 875 * allocate vf resources
 876 **/
 877int i40e_alloc_vfs(struct i40e_pf *pf, u16 num_alloc_vfs)
 878{
 879        struct i40e_vf *vfs;
 880        int i, ret = 0;
 881
 882        /* Disable interrupt 0 so we don't try to handle the VFLR. */
 883        i40e_irq_dynamic_disable_icr0(pf);
 884
 885        /* Check to see if we're just allocating resources for extant VFs */
 886        if (pci_num_vf(pf->pdev) != num_alloc_vfs) {
 887                ret = pci_enable_sriov(pf->pdev, num_alloc_vfs);
 888                if (ret) {
 889                        dev_err(&pf->pdev->dev,
 890                                "Failed to enable SR-IOV, error %d.\n", ret);
 891                        pf->num_alloc_vfs = 0;
 892                        goto err_iov;
 893                }
 894        }
 895        /* allocate memory */
 896        vfs = kcalloc(num_alloc_vfs, sizeof(struct i40e_vf), GFP_KERNEL);
 897        if (!vfs) {
 898                ret = -ENOMEM;
 899                goto err_alloc;
 900        }
 901        pf->vf = vfs;
 902
 903        /* apply default profile */
 904        for (i = 0; i < num_alloc_vfs; i++) {
 905                vfs[i].pf = pf;
 906                vfs[i].parent_type = I40E_SWITCH_ELEMENT_TYPE_VEB;
 907                vfs[i].vf_id = i;
 908
 909                /* assign default capabilities */
 910                set_bit(I40E_VIRTCHNL_VF_CAP_L2, &vfs[i].vf_caps);
 911                vfs[i].spoofchk = true;
 912                /* vf resources get allocated during reset */
 913                i40e_reset_vf(&vfs[i], false);
 914
 915                /* enable vf vplan_qtable mappings */
 916                i40e_enable_vf_mappings(&vfs[i]);
 917        }
 918        pf->num_alloc_vfs = num_alloc_vfs;
 919
 920        i40e_enable_pf_switch_lb(pf);
 921err_alloc:
 922        if (ret)
 923                i40e_free_vfs(pf);
 924err_iov:
 925        /* Re-enable interrupt 0. */
 926        i40e_irq_dynamic_enable_icr0(pf);
 927        return ret;
 928}
 929
 930#endif
 931/**
 932 * i40e_pci_sriov_enable
 933 * @pdev: pointer to a pci_dev structure
 934 * @num_vfs: number of vfs to allocate
 935 *
 936 * Enable or change the number of VFs
 937 **/
 938static int i40e_pci_sriov_enable(struct pci_dev *pdev, int num_vfs)
 939{
 940#ifdef CONFIG_PCI_IOV
 941        struct i40e_pf *pf = pci_get_drvdata(pdev);
 942        int pre_existing_vfs = pci_num_vf(pdev);
 943        int err = 0;
 944
 945        dev_info(&pdev->dev, "Allocating %d VFs.\n", num_vfs);
 946        if (pre_existing_vfs && pre_existing_vfs != num_vfs)
 947                i40e_free_vfs(pf);
 948        else if (pre_existing_vfs && pre_existing_vfs == num_vfs)
 949                goto out;
 950
 951        if (num_vfs > pf->num_req_vfs) {
 952                err = -EPERM;
 953                goto err_out;
 954        }
 955
 956        err = i40e_alloc_vfs(pf, num_vfs);
 957        if (err) {
 958                dev_warn(&pdev->dev, "Failed to enable SR-IOV: %d\n", err);
 959                goto err_out;
 960        }
 961
 962out:
 963        return num_vfs;
 964
 965err_out:
 966        return err;
 967#endif
 968        return 0;
 969}
 970
 971/**
 972 * i40e_pci_sriov_configure
 973 * @pdev: pointer to a pci_dev structure
 974 * @num_vfs: number of vfs to allocate
 975 *
 976 * Enable or change the number of VFs. Called when the user updates the number
 977 * of VFs in sysfs.
 978 **/
 979int i40e_pci_sriov_configure(struct pci_dev *pdev, int num_vfs)
 980{
 981        struct i40e_pf *pf = pci_get_drvdata(pdev);
 982
 983        if (num_vfs)
 984                return i40e_pci_sriov_enable(pdev, num_vfs);
 985
 986        if (!i40e_vfs_are_assigned(pf)) {
 987                i40e_free_vfs(pf);
 988        } else {
 989                dev_warn(&pdev->dev, "Unable to free VFs because some are assigned to VMs.\n");
 990                return -EINVAL;
 991        }
 992        return 0;
 993}
 994
 995/***********************virtual channel routines******************/
 996
 997/**
 998 * i40e_vc_send_msg_to_vf
 999 * @vf: pointer to the vf info
1000 * @v_opcode: virtual channel opcode
1001 * @v_retval: virtual channel return value
1002 * @msg: pointer to the msg buffer
1003 * @msglen: msg length
1004 *
1005 * send msg to vf
1006 **/
1007static int i40e_vc_send_msg_to_vf(struct i40e_vf *vf, u32 v_opcode,
1008                                  u32 v_retval, u8 *msg, u16 msglen)
1009{
1010        struct i40e_pf *pf = vf->pf;
1011        struct i40e_hw *hw = &pf->hw;
1012        int true_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
1013        i40e_status aq_ret;
1014
1015        /* single place to detect unsuccessful return values */
1016        if (v_retval) {
1017                vf->num_invalid_msgs++;
1018                dev_err(&pf->pdev->dev, "Failed opcode %d Error: %d\n",
1019                        v_opcode, v_retval);
1020                if (vf->num_invalid_msgs >
1021                    I40E_DEFAULT_NUM_INVALID_MSGS_ALLOWED) {
1022                        dev_err(&pf->pdev->dev,
1023                                "Number of invalid messages exceeded for VF %d\n",
1024                                vf->vf_id);
1025                        dev_err(&pf->pdev->dev, "Use PF Control I/F to enable the VF\n");
1026                        set_bit(I40E_VF_STAT_DISABLED, &vf->vf_states);
1027                }
1028        } else {
1029                vf->num_valid_msgs++;
1030        }
1031
1032        aq_ret = i40e_aq_send_msg_to_vf(hw, true_vf_id, v_opcode, v_retval,
1033                                        msg, msglen, NULL);
1034        if (aq_ret) {
1035                dev_err(&pf->pdev->dev,
1036                        "Unable to send the message to VF %d aq_err %d\n",
1037                        vf->vf_id, pf->hw.aq.asq_last_status);
1038                return -EIO;
1039        }
1040
1041        return 0;
1042}
1043
1044/**
1045 * i40e_vc_send_resp_to_vf
1046 * @vf: pointer to the vf info
1047 * @opcode: operation code
1048 * @retval: return value
1049 *
1050 * send resp msg to vf
1051 **/
1052static int i40e_vc_send_resp_to_vf(struct i40e_vf *vf,
1053                                   enum i40e_virtchnl_ops opcode,
1054                                   i40e_status retval)
1055{
1056        return i40e_vc_send_msg_to_vf(vf, opcode, retval, NULL, 0);
1057}
1058
1059/**
1060 * i40e_vc_get_version_msg
1061 * @vf: pointer to the vf info
1062 *
1063 * called from the vf to request the API version used by the PF
1064 **/
1065static int i40e_vc_get_version_msg(struct i40e_vf *vf)
1066{
1067        struct i40e_virtchnl_version_info info = {
1068                I40E_VIRTCHNL_VERSION_MAJOR, I40E_VIRTCHNL_VERSION_MINOR
1069        };
1070
1071        return i40e_vc_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_VERSION,
1072                                      I40E_SUCCESS, (u8 *)&info,
1073                                      sizeof(struct
1074                                             i40e_virtchnl_version_info));
1075}
1076
1077/**
1078 * i40e_vc_get_vf_resources_msg
1079 * @vf: pointer to the vf info
1080 * @msg: pointer to the msg buffer
1081 * @msglen: msg length
1082 *
1083 * called from the vf to request its resources
1084 **/
1085static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf)
1086{
1087        struct i40e_virtchnl_vf_resource *vfres = NULL;
1088        struct i40e_pf *pf = vf->pf;
1089        i40e_status aq_ret = 0;
1090        struct i40e_vsi *vsi;
1091        int i = 0, len = 0;
1092        int num_vsis = 1;
1093        int ret;
1094
1095        if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states)) {
1096                aq_ret = I40E_ERR_PARAM;
1097                goto err;
1098        }
1099
1100        len = (sizeof(struct i40e_virtchnl_vf_resource) +
1101               sizeof(struct i40e_virtchnl_vsi_resource) * num_vsis);
1102
1103        vfres = kzalloc(len, GFP_KERNEL);
1104        if (!vfres) {
1105                aq_ret = I40E_ERR_NO_MEMORY;
1106                len = 0;
1107                goto err;
1108        }
1109
1110        vfres->vf_offload_flags = I40E_VIRTCHNL_VF_OFFLOAD_L2;
1111        vsi = pf->vsi[vf->lan_vsi_index];
1112        if (!vsi->info.pvid)
1113                vfres->vf_offload_flags |= I40E_VIRTCHNL_VF_OFFLOAD_VLAN;
1114
1115        vfres->num_vsis = num_vsis;
1116        vfres->num_queue_pairs = vf->num_queue_pairs;
1117        vfres->max_vectors = pf->hw.func_caps.num_msix_vectors_vf;
1118        if (vf->lan_vsi_index) {
1119                vfres->vsi_res[i].vsi_id = vf->lan_vsi_index;
1120                vfres->vsi_res[i].vsi_type = I40E_VSI_SRIOV;
1121                vfres->vsi_res[i].num_queue_pairs =
1122                    pf->vsi[vf->lan_vsi_index]->num_queue_pairs;
1123                memcpy(vfres->vsi_res[i].default_mac_addr,
1124                       vf->default_lan_addr.addr, ETH_ALEN);
1125                i++;
1126        }
1127        set_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states);
1128
1129err:
1130        /* send the response back to the vf */
1131        ret = i40e_vc_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_GET_VF_RESOURCES,
1132                                     aq_ret, (u8 *)vfres, len);
1133
1134        kfree(vfres);
1135        return ret;
1136}
1137
1138/**
1139 * i40e_vc_reset_vf_msg
1140 * @vf: pointer to the vf info
1141 * @msg: pointer to the msg buffer
1142 * @msglen: msg length
1143 *
1144 * called from the vf to reset itself,
1145 * unlike other virtchnl messages, pf driver
1146 * doesn't send the response back to the vf
1147 **/
1148static void i40e_vc_reset_vf_msg(struct i40e_vf *vf)
1149{
1150        if (test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states))
1151                i40e_reset_vf(vf, false);
1152}
1153
1154/**
1155 * i40e_vc_config_promiscuous_mode_msg
1156 * @vf: pointer to the vf info
1157 * @msg: pointer to the msg buffer
1158 * @msglen: msg length
1159 *
1160 * called from the vf to configure the promiscuous mode of
1161 * vf vsis
1162 **/
1163static int i40e_vc_config_promiscuous_mode_msg(struct i40e_vf *vf,
1164                                               u8 *msg, u16 msglen)
1165{
1166        struct i40e_virtchnl_promisc_info *info =
1167            (struct i40e_virtchnl_promisc_info *)msg;
1168        struct i40e_pf *pf = vf->pf;
1169        struct i40e_hw *hw = &pf->hw;
1170        bool allmulti = false;
1171        bool promisc = false;
1172        i40e_status aq_ret;
1173
1174        if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) ||
1175            !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) ||
1176            !i40e_vc_isvalid_vsi_id(vf, info->vsi_id) ||
1177            (pf->vsi[info->vsi_id]->type != I40E_VSI_FCOE)) {
1178                aq_ret = I40E_ERR_PARAM;
1179                goto error_param;
1180        }
1181
1182        if (info->flags & I40E_FLAG_VF_UNICAST_PROMISC)
1183                promisc = true;
1184        aq_ret = i40e_aq_set_vsi_unicast_promiscuous(hw, info->vsi_id,
1185                                                     promisc, NULL);
1186        if (aq_ret)
1187                goto error_param;
1188
1189        if (info->flags & I40E_FLAG_VF_MULTICAST_PROMISC)
1190                allmulti = true;
1191        aq_ret = i40e_aq_set_vsi_multicast_promiscuous(hw, info->vsi_id,
1192                                                       allmulti, NULL);
1193
1194error_param:
1195        /* send the response to the vf */
1196        return i40e_vc_send_resp_to_vf(vf,
1197                                       I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE,
1198                                       aq_ret);
1199}
1200
1201/**
1202 * i40e_vc_config_queues_msg
1203 * @vf: pointer to the vf info
1204 * @msg: pointer to the msg buffer
1205 * @msglen: msg length
1206 *
1207 * called from the vf to configure the rx/tx
1208 * queues
1209 **/
1210static int i40e_vc_config_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
1211{
1212        struct i40e_virtchnl_vsi_queue_config_info *qci =
1213            (struct i40e_virtchnl_vsi_queue_config_info *)msg;
1214        struct i40e_virtchnl_queue_pair_info *qpi;
1215        u16 vsi_id, vsi_queue_id;
1216        i40e_status aq_ret = 0;
1217        int i;
1218
1219        if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) {
1220                aq_ret = I40E_ERR_PARAM;
1221                goto error_param;
1222        }
1223
1224        vsi_id = qci->vsi_id;
1225        if (!i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
1226                aq_ret = I40E_ERR_PARAM;
1227                goto error_param;
1228        }
1229        for (i = 0; i < qci->num_queue_pairs; i++) {
1230                qpi = &qci->qpair[i];
1231                vsi_queue_id = qpi->txq.queue_id;
1232                if ((qpi->txq.vsi_id != vsi_id) ||
1233                    (qpi->rxq.vsi_id != vsi_id) ||
1234                    (qpi->rxq.queue_id != vsi_queue_id) ||
1235                    !i40e_vc_isvalid_queue_id(vf, vsi_id, vsi_queue_id)) {
1236                        aq_ret = I40E_ERR_PARAM;
1237                        goto error_param;
1238                }
1239
1240                if (i40e_config_vsi_rx_queue(vf, vsi_id, vsi_queue_id,
1241                                             &qpi->rxq) ||
1242                    i40e_config_vsi_tx_queue(vf, vsi_id, vsi_queue_id,
1243                                             &qpi->txq)) {
1244                        aq_ret = I40E_ERR_PARAM;
1245                        goto error_param;
1246                }
1247        }
1248
1249error_param:
1250        /* send the response to the vf */
1251        return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES,
1252                                       aq_ret);
1253}
1254
1255/**
1256 * i40e_vc_config_irq_map_msg
1257 * @vf: pointer to the vf info
1258 * @msg: pointer to the msg buffer
1259 * @msglen: msg length
1260 *
1261 * called from the vf to configure the irq to
1262 * queue map
1263 **/
1264static int i40e_vc_config_irq_map_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
1265{
1266        struct i40e_virtchnl_irq_map_info *irqmap_info =
1267            (struct i40e_virtchnl_irq_map_info *)msg;
1268        struct i40e_virtchnl_vector_map *map;
1269        u16 vsi_id, vsi_queue_id, vector_id;
1270        i40e_status aq_ret = 0;
1271        unsigned long tempmap;
1272        int i;
1273
1274        if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) {
1275                aq_ret = I40E_ERR_PARAM;
1276                goto error_param;
1277        }
1278
1279        for (i = 0; i < irqmap_info->num_vectors; i++) {
1280                map = &irqmap_info->vecmap[i];
1281
1282                vector_id = map->vector_id;
1283                vsi_id = map->vsi_id;
1284                /* validate msg params */
1285                if (!i40e_vc_isvalid_vector_id(vf, vector_id) ||
1286                    !i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
1287                        aq_ret = I40E_ERR_PARAM;
1288                        goto error_param;
1289                }
1290
1291                /* lookout for the invalid queue index */
1292                tempmap = map->rxq_map;
1293                for_each_set_bit(vsi_queue_id, &tempmap, I40E_MAX_VSI_QP) {
1294                        if (!i40e_vc_isvalid_queue_id(vf, vsi_id,
1295                                                      vsi_queue_id)) {
1296                                aq_ret = I40E_ERR_PARAM;
1297                                goto error_param;
1298                        }
1299                }
1300
1301                tempmap = map->txq_map;
1302                for_each_set_bit(vsi_queue_id, &tempmap, I40E_MAX_VSI_QP) {
1303                        if (!i40e_vc_isvalid_queue_id(vf, vsi_id,
1304                                                      vsi_queue_id)) {
1305                                aq_ret = I40E_ERR_PARAM;
1306                                goto error_param;
1307                        }
1308                }
1309
1310                i40e_config_irq_link_list(vf, vsi_id, map);
1311        }
1312error_param:
1313        /* send the response to the vf */
1314        return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
1315                                       aq_ret);
1316}
1317
1318/**
1319 * i40e_vc_enable_queues_msg
1320 * @vf: pointer to the vf info
1321 * @msg: pointer to the msg buffer
1322 * @msglen: msg length
1323 *
1324 * called from the vf to enable all or specific queue(s)
1325 **/
1326static int i40e_vc_enable_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
1327{
1328        struct i40e_virtchnl_queue_select *vqs =
1329            (struct i40e_virtchnl_queue_select *)msg;
1330        struct i40e_pf *pf = vf->pf;
1331        u16 vsi_id = vqs->vsi_id;
1332        i40e_status aq_ret = 0;
1333
1334        if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) {
1335                aq_ret = I40E_ERR_PARAM;
1336                goto error_param;
1337        }
1338
1339        if (!i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
1340                aq_ret = I40E_ERR_PARAM;
1341                goto error_param;
1342        }
1343
1344        if ((0 == vqs->rx_queues) && (0 == vqs->tx_queues)) {
1345                aq_ret = I40E_ERR_PARAM;
1346                goto error_param;
1347        }
1348        if (i40e_vsi_control_rings(pf->vsi[vsi_id], true))
1349                aq_ret = I40E_ERR_TIMEOUT;
1350error_param:
1351        /* send the response to the vf */
1352        return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_ENABLE_QUEUES,
1353                                       aq_ret);
1354}
1355
1356/**
1357 * i40e_vc_disable_queues_msg
1358 * @vf: pointer to the vf info
1359 * @msg: pointer to the msg buffer
1360 * @msglen: msg length
1361 *
1362 * called from the vf to disable all or specific
1363 * queue(s)
1364 **/
1365static int i40e_vc_disable_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
1366{
1367        struct i40e_virtchnl_queue_select *vqs =
1368            (struct i40e_virtchnl_queue_select *)msg;
1369        struct i40e_pf *pf = vf->pf;
1370        u16 vsi_id = vqs->vsi_id;
1371        i40e_status aq_ret = 0;
1372
1373        if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) {
1374                aq_ret = I40E_ERR_PARAM;
1375                goto error_param;
1376        }
1377
1378        if (!i40e_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
1379                aq_ret = I40E_ERR_PARAM;
1380                goto error_param;
1381        }
1382
1383        if ((0 == vqs->rx_queues) && (0 == vqs->tx_queues)) {
1384                aq_ret = I40E_ERR_PARAM;
1385                goto error_param;
1386        }
1387        if (i40e_vsi_control_rings(pf->vsi[vsi_id], false))
1388                aq_ret = I40E_ERR_TIMEOUT;
1389
1390error_param:
1391        /* send the response to the vf */
1392        return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_DISABLE_QUEUES,
1393                                       aq_ret);
1394}
1395
1396/**
1397 * i40e_vc_get_stats_msg
1398 * @vf: pointer to the vf info
1399 * @msg: pointer to the msg buffer
1400 * @msglen: msg length
1401 *
1402 * called from the vf to get vsi stats
1403 **/
1404static int i40e_vc_get_stats_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
1405{
1406        struct i40e_virtchnl_queue_select *vqs =
1407            (struct i40e_virtchnl_queue_select *)msg;
1408        struct i40e_pf *pf = vf->pf;
1409        struct i40e_eth_stats stats;
1410        i40e_status aq_ret = 0;
1411        struct i40e_vsi *vsi;
1412
1413        memset(&stats, 0, sizeof(struct i40e_eth_stats));
1414
1415        if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) {
1416                aq_ret = I40E_ERR_PARAM;
1417                goto error_param;
1418        }
1419
1420        if (!i40e_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
1421                aq_ret = I40E_ERR_PARAM;
1422                goto error_param;
1423        }
1424
1425        vsi = pf->vsi[vqs->vsi_id];
1426        if (!vsi) {
1427                aq_ret = I40E_ERR_PARAM;
1428                goto error_param;
1429        }
1430        i40e_update_eth_stats(vsi);
1431        stats = vsi->eth_stats;
1432
1433error_param:
1434        /* send the response back to the vf */
1435        return i40e_vc_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_GET_STATS, aq_ret,
1436                                      (u8 *)&stats, sizeof(stats));
1437}
1438
1439/**
1440 * i40e_check_vf_permission
1441 * @vf: pointer to the vf info
1442 * @macaddr: pointer to the MAC Address being checked
1443 *
1444 * Check if the VF has permission to add or delete unicast MAC address
1445 * filters and return error code -EPERM if not.  Then check if the
1446 * address filter requested is broadcast or zero and if so return
1447 * an invalid MAC address error code.
1448 **/
1449static inline int i40e_check_vf_permission(struct i40e_vf *vf, u8 *macaddr)
1450{
1451        struct i40e_pf *pf = vf->pf;
1452        int ret = 0;
1453
1454        if (is_broadcast_ether_addr(macaddr) ||
1455                   is_zero_ether_addr(macaddr)) {
1456                dev_err(&pf->pdev->dev, "invalid VF MAC addr %pM\n", macaddr);
1457                ret = I40E_ERR_INVALID_MAC_ADDR;
1458        } else if (vf->pf_set_mac && !is_multicast_ether_addr(macaddr) &&
1459                   !ether_addr_equal(macaddr, vf->default_lan_addr.addr)) {
1460                /* If the host VMM administrator has set the VF MAC address
1461                 * administratively via the ndo_set_vf_mac command then deny
1462                 * permission to the VF to add or delete unicast MAC addresses.
1463                 * The VF may request to set the MAC address filter already
1464                 * assigned to it so do not return an error in that case.
1465                 */
1466                dev_err(&pf->pdev->dev,
1467                        "VF attempting to override administratively set MAC address\nPlease reload the VF driver to resume normal operation\n");
1468                ret = -EPERM;
1469        }
1470        return ret;
1471}
1472
1473/**
1474 * i40e_vc_add_mac_addr_msg
1475 * @vf: pointer to the vf info
1476 * @msg: pointer to the msg buffer
1477 * @msglen: msg length
1478 *
1479 * add guest mac address filter
1480 **/
1481static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
1482{
1483        struct i40e_virtchnl_ether_addr_list *al =
1484            (struct i40e_virtchnl_ether_addr_list *)msg;
1485        struct i40e_pf *pf = vf->pf;
1486        struct i40e_vsi *vsi = NULL;
1487        u16 vsi_id = al->vsi_id;
1488        i40e_status ret = 0;
1489        int i;
1490
1491        if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) ||
1492            !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) ||
1493            !i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
1494                ret = I40E_ERR_PARAM;
1495                goto error_param;
1496        }
1497
1498        for (i = 0; i < al->num_elements; i++) {
1499                ret = i40e_check_vf_permission(vf, al->list[i].addr);
1500                if (ret)
1501                        goto error_param;
1502        }
1503        vsi = pf->vsi[vsi_id];
1504
1505        /* add new addresses to the list */
1506        for (i = 0; i < al->num_elements; i++) {
1507                struct i40e_mac_filter *f;
1508
1509                f = i40e_find_mac(vsi, al->list[i].addr, true, false);
1510                if (!f) {
1511                        if (i40e_is_vsi_in_vlan(vsi))
1512                                f = i40e_put_mac_in_vlan(vsi, al->list[i].addr,
1513                                                         true, false);
1514                        else
1515                                f = i40e_add_filter(vsi, al->list[i].addr, -1,
1516                                                    true, false);
1517                }
1518
1519                if (!f) {
1520                        dev_err(&pf->pdev->dev,
1521                                "Unable to add VF MAC filter\n");
1522                        ret = I40E_ERR_PARAM;
1523                        goto error_param;
1524                }
1525        }
1526
1527        /* program the updated filter list */
1528        if (i40e_sync_vsi_filters(vsi))
1529                dev_err(&pf->pdev->dev, "Unable to program VF MAC filters\n");
1530
1531error_param:
1532        /* send the response to the vf */
1533        return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS,
1534                                       ret);
1535}
1536
1537/**
1538 * i40e_vc_del_mac_addr_msg
1539 * @vf: pointer to the vf info
1540 * @msg: pointer to the msg buffer
1541 * @msglen: msg length
1542 *
1543 * remove guest mac address filter
1544 **/
1545static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
1546{
1547        struct i40e_virtchnl_ether_addr_list *al =
1548            (struct i40e_virtchnl_ether_addr_list *)msg;
1549        struct i40e_pf *pf = vf->pf;
1550        struct i40e_vsi *vsi = NULL;
1551        u16 vsi_id = al->vsi_id;
1552        i40e_status ret = 0;
1553        int i;
1554
1555        if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) ||
1556            !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) ||
1557            !i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
1558                ret = I40E_ERR_PARAM;
1559                goto error_param;
1560        }
1561
1562        for (i = 0; i < al->num_elements; i++) {
1563                if (is_broadcast_ether_addr(al->list[i].addr) ||
1564                    is_zero_ether_addr(al->list[i].addr)) {
1565                        dev_err(&pf->pdev->dev, "invalid VF MAC addr %pM\n",
1566                                al->list[i].addr);
1567                        ret = I40E_ERR_INVALID_MAC_ADDR;
1568                        goto error_param;
1569                }
1570        }
1571        vsi = pf->vsi[vsi_id];
1572
1573        /* delete addresses from the list */
1574        for (i = 0; i < al->num_elements; i++)
1575                i40e_del_filter(vsi, al->list[i].addr,
1576                                I40E_VLAN_ANY, true, false);
1577
1578        /* program the updated filter list */
1579        if (i40e_sync_vsi_filters(vsi))
1580                dev_err(&pf->pdev->dev, "Unable to program VF MAC filters\n");
1581
1582error_param:
1583        /* send the response to the vf */
1584        return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS,
1585                                       ret);
1586}
1587
1588/**
1589 * i40e_vc_add_vlan_msg
1590 * @vf: pointer to the vf info
1591 * @msg: pointer to the msg buffer
1592 * @msglen: msg length
1593 *
1594 * program guest vlan id
1595 **/
1596static int i40e_vc_add_vlan_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
1597{
1598        struct i40e_virtchnl_vlan_filter_list *vfl =
1599            (struct i40e_virtchnl_vlan_filter_list *)msg;
1600        struct i40e_pf *pf = vf->pf;
1601        struct i40e_vsi *vsi = NULL;
1602        u16 vsi_id = vfl->vsi_id;
1603        i40e_status aq_ret = 0;
1604        int i;
1605
1606        if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) ||
1607            !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) ||
1608            !i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
1609                aq_ret = I40E_ERR_PARAM;
1610                goto error_param;
1611        }
1612
1613        for (i = 0; i < vfl->num_elements; i++) {
1614                if (vfl->vlan_id[i] > I40E_MAX_VLANID) {
1615                        aq_ret = I40E_ERR_PARAM;
1616                        dev_err(&pf->pdev->dev,
1617                                "invalid VF VLAN id %d\n", vfl->vlan_id[i]);
1618                        goto error_param;
1619                }
1620        }
1621        vsi = pf->vsi[vsi_id];
1622        if (vsi->info.pvid) {
1623                aq_ret = I40E_ERR_PARAM;
1624                goto error_param;
1625        }
1626
1627        i40e_vlan_stripping_enable(vsi);
1628        for (i = 0; i < vfl->num_elements; i++) {
1629                /* add new VLAN filter */
1630                int ret = i40e_vsi_add_vlan(vsi, vfl->vlan_id[i]);
1631                if (ret)
1632                        dev_err(&pf->pdev->dev,
1633                                "Unable to add VF vlan filter %d, error %d\n",
1634                                vfl->vlan_id[i], ret);
1635        }
1636
1637error_param:
1638        /* send the response to the vf */
1639        return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_ADD_VLAN, aq_ret);
1640}
1641
1642/**
1643 * i40e_vc_remove_vlan_msg
1644 * @vf: pointer to the vf info
1645 * @msg: pointer to the msg buffer
1646 * @msglen: msg length
1647 *
1648 * remove programmed guest vlan id
1649 **/
1650static int i40e_vc_remove_vlan_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
1651{
1652        struct i40e_virtchnl_vlan_filter_list *vfl =
1653            (struct i40e_virtchnl_vlan_filter_list *)msg;
1654        struct i40e_pf *pf = vf->pf;
1655        struct i40e_vsi *vsi = NULL;
1656        u16 vsi_id = vfl->vsi_id;
1657        i40e_status aq_ret = 0;
1658        int i;
1659
1660        if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) ||
1661            !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) ||
1662            !i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
1663                aq_ret = I40E_ERR_PARAM;
1664                goto error_param;
1665        }
1666
1667        for (i = 0; i < vfl->num_elements; i++) {
1668                if (vfl->vlan_id[i] > I40E_MAX_VLANID) {
1669                        aq_ret = I40E_ERR_PARAM;
1670                        goto error_param;
1671                }
1672        }
1673
1674        vsi = pf->vsi[vsi_id];
1675        if (vsi->info.pvid) {
1676                aq_ret = I40E_ERR_PARAM;
1677                goto error_param;
1678        }
1679
1680        for (i = 0; i < vfl->num_elements; i++) {
1681                int ret = i40e_vsi_kill_vlan(vsi, vfl->vlan_id[i]);
1682                if (ret)
1683                        dev_err(&pf->pdev->dev,
1684                                "Unable to delete VF vlan filter %d, error %d\n",
1685                                vfl->vlan_id[i], ret);
1686        }
1687
1688error_param:
1689        /* send the response to the vf */
1690        return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_DEL_VLAN, aq_ret);
1691}
1692
1693/**
1694 * i40e_vc_validate_vf_msg
1695 * @vf: pointer to the vf info
1696 * @msg: pointer to the msg buffer
1697 * @msglen: msg length
1698 * @msghndl: msg handle
1699 *
1700 * validate msg
1701 **/
1702static int i40e_vc_validate_vf_msg(struct i40e_vf *vf, u32 v_opcode,
1703                                   u32 v_retval, u8 *msg, u16 msglen)
1704{
1705        bool err_msg_format = false;
1706        int valid_len;
1707
1708        /* Check if VF is disabled. */
1709        if (test_bit(I40E_VF_STAT_DISABLED, &vf->vf_states))
1710                return I40E_ERR_PARAM;
1711
1712        /* Validate message length. */
1713        switch (v_opcode) {
1714        case I40E_VIRTCHNL_OP_VERSION:
1715                valid_len = sizeof(struct i40e_virtchnl_version_info);
1716                break;
1717        case I40E_VIRTCHNL_OP_RESET_VF:
1718        case I40E_VIRTCHNL_OP_GET_VF_RESOURCES:
1719                valid_len = 0;
1720                break;
1721        case I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE:
1722                valid_len = sizeof(struct i40e_virtchnl_txq_info);
1723                break;
1724        case I40E_VIRTCHNL_OP_CONFIG_RX_QUEUE:
1725                valid_len = sizeof(struct i40e_virtchnl_rxq_info);
1726                break;
1727        case I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES:
1728                valid_len = sizeof(struct i40e_virtchnl_vsi_queue_config_info);
1729                if (msglen >= valid_len) {
1730                        struct i40e_virtchnl_vsi_queue_config_info *vqc =
1731                            (struct i40e_virtchnl_vsi_queue_config_info *)msg;
1732                        valid_len += (vqc->num_queue_pairs *
1733                                      sizeof(struct
1734                                             i40e_virtchnl_queue_pair_info));
1735                        if (vqc->num_queue_pairs == 0)
1736                                err_msg_format = true;
1737                }
1738                break;
1739        case I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP:
1740                valid_len = sizeof(struct i40e_virtchnl_irq_map_info);
1741                if (msglen >= valid_len) {
1742                        struct i40e_virtchnl_irq_map_info *vimi =
1743                            (struct i40e_virtchnl_irq_map_info *)msg;
1744                        valid_len += (vimi->num_vectors *
1745                                      sizeof(struct i40e_virtchnl_vector_map));
1746                        if (vimi->num_vectors == 0)
1747                                err_msg_format = true;
1748                }
1749                break;
1750        case I40E_VIRTCHNL_OP_ENABLE_QUEUES:
1751        case I40E_VIRTCHNL_OP_DISABLE_QUEUES:
1752                valid_len = sizeof(struct i40e_virtchnl_queue_select);
1753                break;
1754        case I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS:
1755        case I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS:
1756                valid_len = sizeof(struct i40e_virtchnl_ether_addr_list);
1757                if (msglen >= valid_len) {
1758                        struct i40e_virtchnl_ether_addr_list *veal =
1759                            (struct i40e_virtchnl_ether_addr_list *)msg;
1760                        valid_len += veal->num_elements *
1761                            sizeof(struct i40e_virtchnl_ether_addr);
1762                        if (veal->num_elements == 0)
1763                                err_msg_format = true;
1764                }
1765                break;
1766        case I40E_VIRTCHNL_OP_ADD_VLAN:
1767        case I40E_VIRTCHNL_OP_DEL_VLAN:
1768                valid_len = sizeof(struct i40e_virtchnl_vlan_filter_list);
1769                if (msglen >= valid_len) {
1770                        struct i40e_virtchnl_vlan_filter_list *vfl =
1771                            (struct i40e_virtchnl_vlan_filter_list *)msg;
1772                        valid_len += vfl->num_elements * sizeof(u16);
1773                        if (vfl->num_elements == 0)
1774                                err_msg_format = true;
1775                }
1776                break;
1777        case I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
1778                valid_len = sizeof(struct i40e_virtchnl_promisc_info);
1779                break;
1780        case I40E_VIRTCHNL_OP_GET_STATS:
1781                valid_len = sizeof(struct i40e_virtchnl_queue_select);
1782                break;
1783        /* These are always errors coming from the VF. */
1784        case I40E_VIRTCHNL_OP_EVENT:
1785        case I40E_VIRTCHNL_OP_UNKNOWN:
1786        default:
1787                return -EPERM;
1788                break;
1789        }
1790        /* few more checks */
1791        if ((valid_len != msglen) || (err_msg_format)) {
1792                i40e_vc_send_resp_to_vf(vf, v_opcode, I40E_ERR_PARAM);
1793                return -EINVAL;
1794        } else {
1795                return 0;
1796        }
1797}
1798
1799/**
1800 * i40e_vc_process_vf_msg
1801 * @pf: pointer to the pf structure
1802 * @vf_id: source vf id
1803 * @msg: pointer to the msg buffer
1804 * @msglen: msg length
1805 * @msghndl: msg handle
1806 *
1807 * called from the common aeq/arq handler to
1808 * process request from vf
1809 **/
1810int i40e_vc_process_vf_msg(struct i40e_pf *pf, u16 vf_id, u32 v_opcode,
1811                           u32 v_retval, u8 *msg, u16 msglen)
1812{
1813        struct i40e_hw *hw = &pf->hw;
1814        unsigned int local_vf_id = vf_id - hw->func_caps.vf_base_id;
1815        struct i40e_vf *vf;
1816        int ret;
1817
1818        pf->vf_aq_requests++;
1819        if (local_vf_id >= pf->num_alloc_vfs)
1820                return -EINVAL;
1821        vf = &(pf->vf[local_vf_id]);
1822        /* perform basic checks on the msg */
1823        ret = i40e_vc_validate_vf_msg(vf, v_opcode, v_retval, msg, msglen);
1824
1825        if (ret) {
1826                dev_err(&pf->pdev->dev, "Invalid message from vf %d, opcode %d, len %d\n",
1827                        local_vf_id, v_opcode, msglen);
1828                return ret;
1829        }
1830
1831        switch (v_opcode) {
1832        case I40E_VIRTCHNL_OP_VERSION:
1833                ret = i40e_vc_get_version_msg(vf);
1834                break;
1835        case I40E_VIRTCHNL_OP_GET_VF_RESOURCES:
1836                ret = i40e_vc_get_vf_resources_msg(vf);
1837                break;
1838        case I40E_VIRTCHNL_OP_RESET_VF:
1839                i40e_vc_reset_vf_msg(vf);
1840                ret = 0;
1841                break;
1842        case I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
1843                ret = i40e_vc_config_promiscuous_mode_msg(vf, msg, msglen);
1844                break;
1845        case I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES:
1846                ret = i40e_vc_config_queues_msg(vf, msg, msglen);
1847                break;
1848        case I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP:
1849                ret = i40e_vc_config_irq_map_msg(vf, msg, msglen);
1850                break;
1851        case I40E_VIRTCHNL_OP_ENABLE_QUEUES:
1852                ret = i40e_vc_enable_queues_msg(vf, msg, msglen);
1853                break;
1854        case I40E_VIRTCHNL_OP_DISABLE_QUEUES:
1855                ret = i40e_vc_disable_queues_msg(vf, msg, msglen);
1856                break;
1857        case I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS:
1858                ret = i40e_vc_add_mac_addr_msg(vf, msg, msglen);
1859                break;
1860        case I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS:
1861                ret = i40e_vc_del_mac_addr_msg(vf, msg, msglen);
1862                break;
1863        case I40E_VIRTCHNL_OP_ADD_VLAN:
1864                ret = i40e_vc_add_vlan_msg(vf, msg, msglen);
1865                break;
1866        case I40E_VIRTCHNL_OP_DEL_VLAN:
1867                ret = i40e_vc_remove_vlan_msg(vf, msg, msglen);
1868                break;
1869        case I40E_VIRTCHNL_OP_GET_STATS:
1870                ret = i40e_vc_get_stats_msg(vf, msg, msglen);
1871                break;
1872        case I40E_VIRTCHNL_OP_UNKNOWN:
1873        default:
1874                dev_err(&pf->pdev->dev, "Unsupported opcode %d from vf %d\n",
1875                        v_opcode, local_vf_id);
1876                ret = i40e_vc_send_resp_to_vf(vf, v_opcode,
1877                                              I40E_ERR_NOT_IMPLEMENTED);
1878                break;
1879        }
1880
1881        return ret;
1882}
1883
1884/**
1885 * i40e_vc_process_vflr_event
1886 * @pf: pointer to the pf structure
1887 *
1888 * called from the vlfr irq handler to
1889 * free up vf resources and state variables
1890 **/
1891int i40e_vc_process_vflr_event(struct i40e_pf *pf)
1892{
1893        u32 reg, reg_idx, bit_idx, vf_id;
1894        struct i40e_hw *hw = &pf->hw;
1895        struct i40e_vf *vf;
1896
1897        if (!test_bit(__I40E_VFLR_EVENT_PENDING, &pf->state))
1898                return 0;
1899
1900        clear_bit(__I40E_VFLR_EVENT_PENDING, &pf->state);
1901        for (vf_id = 0; vf_id < pf->num_alloc_vfs; vf_id++) {
1902                reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32;
1903                bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32;
1904                /* read GLGEN_VFLRSTAT register to find out the flr vfs */
1905                vf = &pf->vf[vf_id];
1906                reg = rd32(hw, I40E_GLGEN_VFLRSTAT(reg_idx));
1907                if (reg & (1 << bit_idx)) {
1908                        /* clear the bit in GLGEN_VFLRSTAT */
1909                        wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), (1 << bit_idx));
1910
1911                        if (!test_bit(__I40E_DOWN, &pf->state))
1912                                i40e_reset_vf(vf, true);
1913                }
1914        }
1915
1916        /* re-enable vflr interrupt cause */
1917        reg = rd32(hw, I40E_PFINT_ICR0_ENA);
1918        reg |= I40E_PFINT_ICR0_ENA_VFLR_MASK;
1919        wr32(hw, I40E_PFINT_ICR0_ENA, reg);
1920        i40e_flush(hw);
1921
1922        return 0;
1923}
1924
1925/**
1926 * i40e_vc_vf_broadcast
1927 * @pf: pointer to the pf structure
1928 * @opcode: operation code
1929 * @retval: return value
1930 * @msg: pointer to the msg buffer
1931 * @msglen: msg length
1932 *
1933 * send a message to all VFs on a given PF
1934 **/
1935static void i40e_vc_vf_broadcast(struct i40e_pf *pf,
1936                                 enum i40e_virtchnl_ops v_opcode,
1937                                 i40e_status v_retval, u8 *msg,
1938                                 u16 msglen)
1939{
1940        struct i40e_hw *hw = &pf->hw;
1941        struct i40e_vf *vf = pf->vf;
1942        int i;
1943
1944        for (i = 0; i < pf->num_alloc_vfs; i++) {
1945                /* Ignore return value on purpose - a given VF may fail, but
1946                 * we need to keep going and send to all of them
1947                 */
1948                i40e_aq_send_msg_to_vf(hw, vf->vf_id, v_opcode, v_retval,
1949                                       msg, msglen, NULL);
1950                vf++;
1951        }
1952}
1953
1954/**
1955 * i40e_vc_notify_link_state
1956 * @pf: pointer to the pf structure
1957 *
1958 * send a link status message to all VFs on a given PF
1959 **/
1960void i40e_vc_notify_link_state(struct i40e_pf *pf)
1961{
1962        struct i40e_virtchnl_pf_event pfe;
1963        struct i40e_hw *hw = &pf->hw;
1964        struct i40e_vf *vf = pf->vf;
1965        struct i40e_link_status *ls = &pf->hw.phy.link_info;
1966        int i;
1967
1968        pfe.event = I40E_VIRTCHNL_EVENT_LINK_CHANGE;
1969        pfe.severity = I40E_PF_EVENT_SEVERITY_INFO;
1970        for (i = 0; i < pf->num_alloc_vfs; i++) {
1971                if (vf->link_forced) {
1972                        pfe.event_data.link_event.link_status = vf->link_up;
1973                        pfe.event_data.link_event.link_speed =
1974                                (vf->link_up ? I40E_LINK_SPEED_40GB : 0);
1975                } else {
1976                        pfe.event_data.link_event.link_status =
1977                                ls->link_info & I40E_AQ_LINK_UP;
1978                        pfe.event_data.link_event.link_speed = ls->link_speed;
1979                }
1980                i40e_aq_send_msg_to_vf(hw, vf->vf_id, I40E_VIRTCHNL_OP_EVENT,
1981                                       0, (u8 *)&pfe, sizeof(pfe),
1982                                       NULL);
1983                vf++;
1984        }
1985}
1986
1987/**
1988 * i40e_vc_notify_reset
1989 * @pf: pointer to the pf structure
1990 *
1991 * indicate a pending reset to all VFs on a given PF
1992 **/
1993void i40e_vc_notify_reset(struct i40e_pf *pf)
1994{
1995        struct i40e_virtchnl_pf_event pfe;
1996
1997        pfe.event = I40E_VIRTCHNL_EVENT_RESET_IMPENDING;
1998        pfe.severity = I40E_PF_EVENT_SEVERITY_CERTAIN_DOOM;
1999        i40e_vc_vf_broadcast(pf, I40E_VIRTCHNL_OP_EVENT, I40E_SUCCESS,
2000                             (u8 *)&pfe, sizeof(struct i40e_virtchnl_pf_event));
2001}
2002
2003/**
2004 * i40e_vc_notify_vf_reset
2005 * @vf: pointer to the vf structure
2006 *
2007 * indicate a pending reset to the given VF
2008 **/
2009void i40e_vc_notify_vf_reset(struct i40e_vf *vf)
2010{
2011        struct i40e_virtchnl_pf_event pfe;
2012
2013        pfe.event = I40E_VIRTCHNL_EVENT_RESET_IMPENDING;
2014        pfe.severity = I40E_PF_EVENT_SEVERITY_CERTAIN_DOOM;
2015        i40e_aq_send_msg_to_vf(&vf->pf->hw, vf->vf_id, I40E_VIRTCHNL_OP_EVENT,
2016                               I40E_SUCCESS, (u8 *)&pfe,
2017                               sizeof(struct i40e_virtchnl_pf_event), NULL);
2018}
2019
2020/**
2021 * i40e_ndo_set_vf_mac
2022 * @netdev: network interface device structure
2023 * @vf_id: vf identifier
2024 * @mac: mac address
2025 *
2026 * program vf mac address
2027 **/
2028int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
2029{
2030        struct i40e_netdev_priv *np = netdev_priv(netdev);
2031        struct i40e_vsi *vsi = np->vsi;
2032        struct i40e_pf *pf = vsi->back;
2033        struct i40e_mac_filter *f;
2034        struct i40e_vf *vf;
2035        int ret = 0;
2036
2037        /* validate the request */
2038        if (vf_id >= pf->num_alloc_vfs) {
2039                dev_err(&pf->pdev->dev,
2040                        "Invalid VF Identifier %d\n", vf_id);
2041                ret = -EINVAL;
2042                goto error_param;
2043        }
2044
2045        vf = &(pf->vf[vf_id]);
2046        vsi = pf->vsi[vf->lan_vsi_index];
2047        if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states)) {
2048                dev_err(&pf->pdev->dev,
2049                        "Uninitialized VF %d\n", vf_id);
2050                ret = -EINVAL;
2051                goto error_param;
2052        }
2053
2054        if (!is_valid_ether_addr(mac)) {
2055                dev_err(&pf->pdev->dev,
2056                        "Invalid VF ethernet address\n");
2057                ret = -EINVAL;
2058                goto error_param;
2059        }
2060
2061        /* delete the temporary mac address */
2062        i40e_del_filter(vsi, vf->default_lan_addr.addr, vf->port_vlan_id,
2063                        true, false);
2064
2065        /* Delete all the filters for this VSI - we're going to kill it
2066         * anyway.
2067         */
2068        list_for_each_entry(f, &vsi->mac_filter_list, list)
2069                i40e_del_filter(vsi, f->macaddr, f->vlan, true, false);
2070
2071        dev_info(&pf->pdev->dev, "Setting MAC %pM on VF %d\n", mac, vf_id);
2072        /* program mac filter */
2073        if (i40e_sync_vsi_filters(vsi)) {
2074                dev_err(&pf->pdev->dev, "Unable to program ucast filters\n");
2075                ret = -EIO;
2076                goto error_param;
2077        }
2078        ether_addr_copy(vf->default_lan_addr.addr, mac);
2079        vf->pf_set_mac = true;
2080        dev_info(&pf->pdev->dev, "Reload the VF driver to make this change effective.\n");
2081        ret = 0;
2082
2083error_param:
2084        return ret;
2085}
2086
2087/**
2088 * i40e_ndo_set_vf_port_vlan
2089 * @netdev: network interface device structure
2090 * @vf_id: vf identifier
2091 * @vlan_id: mac address
2092 * @qos: priority setting
2093 *
2094 * program vf vlan id and/or qos
2095 **/
2096int i40e_ndo_set_vf_port_vlan(struct net_device *netdev,
2097                              int vf_id, u16 vlan_id, u8 qos)
2098{
2099        struct i40e_netdev_priv *np = netdev_priv(netdev);
2100        struct i40e_pf *pf = np->vsi->back;
2101        struct i40e_vsi *vsi;
2102        struct i40e_vf *vf;
2103        int ret = 0;
2104
2105        /* validate the request */
2106        if (vf_id >= pf->num_alloc_vfs) {
2107                dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id);
2108                ret = -EINVAL;
2109                goto error_pvid;
2110        }
2111
2112        if ((vlan_id > I40E_MAX_VLANID) || (qos > 7)) {
2113                dev_err(&pf->pdev->dev, "Invalid VF Parameters\n");
2114                ret = -EINVAL;
2115                goto error_pvid;
2116        }
2117
2118        vf = &(pf->vf[vf_id]);
2119        vsi = pf->vsi[vf->lan_vsi_index];
2120        if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states)) {
2121                dev_err(&pf->pdev->dev, "Uninitialized VF %d\n", vf_id);
2122                ret = -EINVAL;
2123                goto error_pvid;
2124        }
2125
2126        if (vsi->info.pvid == 0 && i40e_is_vsi_in_vlan(vsi)) {
2127                dev_err(&pf->pdev->dev,
2128                        "VF %d has already configured VLAN filters and the administrator is requesting a port VLAN override.\nPlease unload and reload the VF driver for this change to take effect.\n",
2129                        vf_id);
2130                /* Administrator Error - knock the VF offline until he does
2131                 * the right thing by reconfiguring his network correctly
2132                 * and then reloading the VF driver.
2133                 */
2134                i40e_vc_disable_vf(pf, vf);
2135        }
2136
2137        /* Check for condition where there was already a port VLAN ID
2138         * filter set and now it is being deleted by setting it to zero.
2139         * Additionally check for the condition where there was a port
2140         * VLAN but now there is a new and different port VLAN being set.
2141         * Before deleting all the old VLAN filters we must add new ones
2142         * with -1 (I40E_VLAN_ANY) or otherwise we're left with all our
2143         * MAC addresses deleted.
2144         */
2145        if ((!(vlan_id || qos) ||
2146            (vlan_id | qos) != le16_to_cpu(vsi->info.pvid)) &&
2147            vsi->info.pvid)
2148                ret = i40e_vsi_add_vlan(vsi, I40E_VLAN_ANY);
2149
2150        if (vsi->info.pvid) {
2151                /* kill old VLAN */
2152                ret = i40e_vsi_kill_vlan(vsi, (le16_to_cpu(vsi->info.pvid) &
2153                                               VLAN_VID_MASK));
2154                if (ret) {
2155                        dev_info(&vsi->back->pdev->dev,
2156                                 "remove VLAN failed, ret=%d, aq_err=%d\n",
2157                                 ret, pf->hw.aq.asq_last_status);
2158                }
2159        }
2160        if (vlan_id || qos)
2161                ret = i40e_vsi_add_pvid(vsi,
2162                                vlan_id | (qos << I40E_VLAN_PRIORITY_SHIFT));
2163        else
2164                i40e_vsi_remove_pvid(vsi);
2165
2166        if (vlan_id) {
2167                dev_info(&pf->pdev->dev, "Setting VLAN %d, QOS 0x%x on VF %d\n",
2168                         vlan_id, qos, vf_id);
2169
2170                /* add new VLAN filter */
2171                ret = i40e_vsi_add_vlan(vsi, vlan_id);
2172                if (ret) {
2173                        dev_info(&vsi->back->pdev->dev,
2174                                 "add VF VLAN failed, ret=%d aq_err=%d\n", ret,
2175                                 vsi->back->hw.aq.asq_last_status);
2176                        goto error_pvid;
2177                }
2178                /* Kill non-vlan MAC filters - ignore error return since
2179                 * there might not be any non-vlan MAC filters.
2180                 */
2181                i40e_vsi_kill_vlan(vsi, I40E_VLAN_ANY);
2182        }
2183
2184        if (ret) {
2185                dev_err(&pf->pdev->dev, "Unable to update VF vsi context\n");
2186                goto error_pvid;
2187        }
2188        /* The Port VLAN needs to be saved across resets the same as the
2189         * default LAN MAC address.
2190         */
2191        vf->port_vlan_id = le16_to_cpu(vsi->info.pvid);
2192        ret = 0;
2193
2194error_pvid:
2195        return ret;
2196}
2197
2198#define I40E_BW_CREDIT_DIVISOR 50     /* 50Mbps per BW credit */
2199#define I40E_MAX_BW_INACTIVE_ACCUM 4  /* device can accumulate 4 credits max */
2200/**
2201 * i40e_ndo_set_vf_bw
2202 * @netdev: network interface device structure
2203 * @vf_id: vf identifier
2204 * @tx_rate: tx rate
2205 *
2206 * configure vf tx rate
2207 **/
2208int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate,
2209                       int max_tx_rate)
2210{
2211        struct i40e_netdev_priv *np = netdev_priv(netdev);
2212        struct i40e_pf *pf = np->vsi->back;
2213        struct i40e_vsi *vsi;
2214        struct i40e_vf *vf;
2215        int speed = 0;
2216        int ret = 0;
2217
2218        /* validate the request */
2219        if (vf_id >= pf->num_alloc_vfs) {
2220                dev_err(&pf->pdev->dev, "Invalid VF Identifier %d.\n", vf_id);
2221                ret = -EINVAL;
2222                goto error;
2223        }
2224
2225        if (min_tx_rate) {
2226                dev_err(&pf->pdev->dev, "Invalid min tx rate (%d) (greater than 0) specified for vf %d.\n",
2227                        min_tx_rate, vf_id);
2228                return -EINVAL;
2229        }
2230
2231        vf = &(pf->vf[vf_id]);
2232        vsi = pf->vsi[vf->lan_vsi_index];
2233        if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states)) {
2234                dev_err(&pf->pdev->dev, "Uninitialized VF %d.\n", vf_id);
2235                ret = -EINVAL;
2236                goto error;
2237        }
2238
2239        switch (pf->hw.phy.link_info.link_speed) {
2240        case I40E_LINK_SPEED_40GB:
2241                speed = 40000;
2242                break;
2243        case I40E_LINK_SPEED_10GB:
2244                speed = 10000;
2245                break;
2246        case I40E_LINK_SPEED_1GB:
2247                speed = 1000;
2248                break;
2249        default:
2250                break;
2251        }
2252
2253        if (max_tx_rate > speed) {
2254                dev_err(&pf->pdev->dev, "Invalid max tx rate %d specified for vf %d.",
2255                        max_tx_rate, vf->vf_id);
2256                ret = -EINVAL;
2257                goto error;
2258        }
2259
2260        if ((max_tx_rate < 50) && (max_tx_rate > 0)) {
2261                dev_warn(&pf->pdev->dev, "Setting max Tx rate to minimum usable value of 50Mbps.\n");
2262                max_tx_rate = 50;
2263        }
2264
2265        /* Tx rate credits are in values of 50Mbps, 0 is disabled*/
2266        ret = i40e_aq_config_vsi_bw_limit(&pf->hw, vsi->seid,
2267                                          max_tx_rate / I40E_BW_CREDIT_DIVISOR,
2268                                          I40E_MAX_BW_INACTIVE_ACCUM, NULL);
2269        if (ret) {
2270                dev_err(&pf->pdev->dev, "Unable to set max tx rate, error code %d.\n",
2271                        ret);
2272                ret = -EIO;
2273                goto error;
2274        }
2275        vf->tx_rate = max_tx_rate;
2276error:
2277        return ret;
2278}
2279
2280/**
2281 * i40e_ndo_get_vf_config
2282 * @netdev: network interface device structure
2283 * @vf_id: vf identifier
2284 * @ivi: vf configuration structure
2285 *
2286 * return vf configuration
2287 **/
2288int i40e_ndo_get_vf_config(struct net_device *netdev,
2289                           int vf_id, struct ifla_vf_info *ivi)
2290{
2291        struct i40e_netdev_priv *np = netdev_priv(netdev);
2292        struct i40e_vsi *vsi = np->vsi;
2293        struct i40e_pf *pf = vsi->back;
2294        struct i40e_vf *vf;
2295        int ret = 0;
2296
2297        /* validate the request */
2298        if (vf_id >= pf->num_alloc_vfs) {
2299                dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id);
2300                ret = -EINVAL;
2301                goto error_param;
2302        }
2303
2304        vf = &(pf->vf[vf_id]);
2305        /* first vsi is always the LAN vsi */
2306        vsi = pf->vsi[vf->lan_vsi_index];
2307        if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states)) {
2308                dev_err(&pf->pdev->dev, "Uninitialized VF %d\n", vf_id);
2309                ret = -EINVAL;
2310                goto error_param;
2311        }
2312
2313        ivi->vf = vf_id;
2314
2315        memcpy(&ivi->mac, vf->default_lan_addr.addr, ETH_ALEN);
2316
2317        ivi->max_tx_rate = vf->tx_rate;
2318        ivi->min_tx_rate = 0;
2319        ivi->vlan = le16_to_cpu(vsi->info.pvid) & I40E_VLAN_MASK;
2320        ivi->qos = (le16_to_cpu(vsi->info.pvid) & I40E_PRIORITY_MASK) >>
2321                   I40E_VLAN_PRIORITY_SHIFT;
2322        if (vf->link_forced == false)
2323                ivi->linkstate = IFLA_VF_LINK_STATE_AUTO;
2324        else if (vf->link_up == true)
2325                ivi->linkstate = IFLA_VF_LINK_STATE_ENABLE;
2326        else
2327                ivi->linkstate = IFLA_VF_LINK_STATE_DISABLE;
2328        ivi->spoofchk = vf->spoofchk;
2329        ret = 0;
2330
2331error_param:
2332        return ret;
2333}
2334
2335/**
2336 * i40e_ndo_set_vf_link_state
2337 * @netdev: network interface device structure
2338 * @vf_id: vf identifier
2339 * @link: required link state
2340 *
2341 * Set the link state of a specified VF, regardless of physical link state
2342 **/
2343int i40e_ndo_set_vf_link_state(struct net_device *netdev, int vf_id, int link)
2344{
2345        struct i40e_netdev_priv *np = netdev_priv(netdev);
2346        struct i40e_pf *pf = np->vsi->back;
2347        struct i40e_virtchnl_pf_event pfe;
2348        struct i40e_hw *hw = &pf->hw;
2349        struct i40e_vf *vf;
2350        int ret = 0;
2351
2352        /* validate the request */
2353        if (vf_id >= pf->num_alloc_vfs) {
2354                dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id);
2355                ret = -EINVAL;
2356                goto error_out;
2357        }
2358
2359        vf = &pf->vf[vf_id];
2360
2361        pfe.event = I40E_VIRTCHNL_EVENT_LINK_CHANGE;
2362        pfe.severity = I40E_PF_EVENT_SEVERITY_INFO;
2363
2364        switch (link) {
2365        case IFLA_VF_LINK_STATE_AUTO:
2366                vf->link_forced = false;
2367                pfe.event_data.link_event.link_status =
2368                        pf->hw.phy.link_info.link_info & I40E_AQ_LINK_UP;
2369                pfe.event_data.link_event.link_speed =
2370                        pf->hw.phy.link_info.link_speed;
2371                break;
2372        case IFLA_VF_LINK_STATE_ENABLE:
2373                vf->link_forced = true;
2374                vf->link_up = true;
2375                pfe.event_data.link_event.link_status = true;
2376                pfe.event_data.link_event.link_speed = I40E_LINK_SPEED_40GB;
2377                break;
2378        case IFLA_VF_LINK_STATE_DISABLE:
2379                vf->link_forced = true;
2380                vf->link_up = false;
2381                pfe.event_data.link_event.link_status = false;
2382                pfe.event_data.link_event.link_speed = 0;
2383                break;
2384        default:
2385                ret = -EINVAL;
2386                goto error_out;
2387        }
2388        /* Notify the VF of its new link state */
2389        i40e_aq_send_msg_to_vf(hw, vf->vf_id, I40E_VIRTCHNL_OP_EVENT,
2390                               0, (u8 *)&pfe, sizeof(pfe), NULL);
2391
2392error_out:
2393        return ret;
2394}
2395
2396/**
2397 * i40e_ndo_set_vf_spoofchk
2398 * @netdev: network interface device structure
2399 * @vf_id: vf identifier
2400 * @enable: flag to enable or disable feature
2401 *
2402 * Enable or disable VF spoof checking
2403 **/
2404int i40e_ndo_set_vf_spoofck(struct net_device *netdev, int vf_id, bool enable)
2405{
2406        struct i40e_netdev_priv *np = netdev_priv(netdev);
2407        struct i40e_vsi *vsi = np->vsi;
2408        struct i40e_pf *pf = vsi->back;
2409        struct i40e_vsi_context ctxt;
2410        struct i40e_hw *hw = &pf->hw;
2411        struct i40e_vf *vf;
2412        int ret = 0;
2413
2414        /* validate the request */
2415        if (vf_id >= pf->num_alloc_vfs) {
2416                dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id);
2417                ret = -EINVAL;
2418                goto out;
2419        }
2420
2421        vf = &(pf->vf[vf_id]);
2422
2423        if (enable == vf->spoofchk)
2424                goto out;
2425
2426        vf->spoofchk = enable;
2427        memset(&ctxt, 0, sizeof(ctxt));
2428        ctxt.seid = pf->vsi[vf->lan_vsi_index]->seid;
2429        ctxt.pf_num = pf->hw.pf_id;
2430        ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SECURITY_VALID);
2431        if (enable)
2432                ctxt.info.sec_flags |= I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK;
2433        ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
2434        if (ret) {
2435                dev_err(&pf->pdev->dev, "Error %d updating VSI parameters\n",
2436                        ret);
2437                ret = -EIO;
2438        }
2439out:
2440        return ret;
2441}
2442