linux/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
<<
>>
Prefs
   1/*******************************************************************************
   2 *
   3 * Intel Ethernet Controller XL710 Family Linux Driver
   4 * Copyright(c) 2013 - 2015 Intel Corporation.
   5 *
   6 * This program is free software; you can redistribute it and/or modify it
   7 * under the terms and conditions of the GNU General Public License,
   8 * version 2, as published by the Free Software Foundation.
   9 *
  10 * This program is distributed in the hope it will be useful, but WITHOUT
  11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  13 * more details.
  14 *
  15 * You should have received a copy of the GNU General Public License along
  16 * with this program.  If not, see <http://www.gnu.org/licenses/>.
  17 *
  18 * The full GNU General Public License is included in this distribution in
  19 * the file called "COPYING".
  20 *
  21 * Contact Information:
  22 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
  23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
  24 *
  25 ******************************************************************************/
  26
  27#include "i40e.h"
  28
  29/*********************notification routines***********************/
  30
  31/**
  32 * i40e_vc_vf_broadcast
  33 * @pf: pointer to the PF structure
  34 * @opcode: operation code
  35 * @retval: return value
  36 * @msg: pointer to the msg buffer
  37 * @msglen: msg length
  38 *
  39 * send a message to all VFs on a given PF
  40 **/
  41static void i40e_vc_vf_broadcast(struct i40e_pf *pf,
  42                                 enum i40e_virtchnl_ops v_opcode,
  43                                 i40e_status v_retval, u8 *msg,
  44                                 u16 msglen)
  45{
  46        struct i40e_hw *hw = &pf->hw;
  47        struct i40e_vf *vf = pf->vf;
  48        int i;
  49
  50        for (i = 0; i < pf->num_alloc_vfs; i++, vf++) {
  51                int abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
  52                /* Not all vfs are enabled so skip the ones that are not */
  53                if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states) &&
  54                    !test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states))
  55                        continue;
  56
  57                /* Ignore return value on purpose - a given VF may fail, but
  58                 * we need to keep going and send to all of them
  59                 */
  60                i40e_aq_send_msg_to_vf(hw, abs_vf_id, v_opcode, v_retval,
  61                                       msg, msglen, NULL);
  62        }
  63}
  64
  65/**
  66 * i40e_vc_notify_link_state
  67 * @vf: pointer to the VF structure
  68 *
  69 * send a link status message to a single VF
  70 **/
  71static void i40e_vc_notify_vf_link_state(struct i40e_vf *vf)
  72{
  73        struct i40e_virtchnl_pf_event pfe;
  74        struct i40e_pf *pf = vf->pf;
  75        struct i40e_hw *hw = &pf->hw;
  76        struct i40e_link_status *ls = &pf->hw.phy.link_info;
  77        int abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
  78
  79        pfe.event = I40E_VIRTCHNL_EVENT_LINK_CHANGE;
  80        pfe.severity = I40E_PF_EVENT_SEVERITY_INFO;
  81        if (vf->link_forced) {
  82                pfe.event_data.link_event.link_status = vf->link_up;
  83                pfe.event_data.link_event.link_speed =
  84                        (vf->link_up ? I40E_LINK_SPEED_40GB : 0);
  85        } else {
  86                pfe.event_data.link_event.link_status =
  87                        ls->link_info & I40E_AQ_LINK_UP;
  88                pfe.event_data.link_event.link_speed = ls->link_speed;
  89        }
  90        i40e_aq_send_msg_to_vf(hw, abs_vf_id, I40E_VIRTCHNL_OP_EVENT,
  91                               0, (u8 *)&pfe, sizeof(pfe), NULL);
  92}
  93
  94/**
  95 * i40e_vc_notify_link_state
  96 * @pf: pointer to the PF structure
  97 *
  98 * send a link status message to all VFs on a given PF
  99 **/
 100void i40e_vc_notify_link_state(struct i40e_pf *pf)
 101{
 102        int i;
 103
 104        for (i = 0; i < pf->num_alloc_vfs; i++)
 105                i40e_vc_notify_vf_link_state(&pf->vf[i]);
 106}
 107
 108/**
 109 * i40e_vc_notify_reset
 110 * @pf: pointer to the PF structure
 111 *
 112 * indicate a pending reset to all VFs on a given PF
 113 **/
 114void i40e_vc_notify_reset(struct i40e_pf *pf)
 115{
 116        struct i40e_virtchnl_pf_event pfe;
 117
 118        pfe.event = I40E_VIRTCHNL_EVENT_RESET_IMPENDING;
 119        pfe.severity = I40E_PF_EVENT_SEVERITY_CERTAIN_DOOM;
 120        i40e_vc_vf_broadcast(pf, I40E_VIRTCHNL_OP_EVENT, 0,
 121                             (u8 *)&pfe, sizeof(struct i40e_virtchnl_pf_event));
 122}
 123
 124/**
 125 * i40e_vc_notify_vf_reset
 126 * @vf: pointer to the VF structure
 127 *
 128 * indicate a pending reset to the given VF
 129 **/
 130void i40e_vc_notify_vf_reset(struct i40e_vf *vf)
 131{
 132        struct i40e_virtchnl_pf_event pfe;
 133        int abs_vf_id;
 134
 135        /* validate the request */
 136        if (!vf || vf->vf_id >= vf->pf->num_alloc_vfs)
 137                return;
 138
 139        /* verify if the VF is in either init or active before proceeding */
 140        if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states) &&
 141            !test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states))
 142                return;
 143
 144        abs_vf_id = vf->vf_id + vf->pf->hw.func_caps.vf_base_id;
 145
 146        pfe.event = I40E_VIRTCHNL_EVENT_RESET_IMPENDING;
 147        pfe.severity = I40E_PF_EVENT_SEVERITY_CERTAIN_DOOM;
 148        i40e_aq_send_msg_to_vf(&vf->pf->hw, abs_vf_id, I40E_VIRTCHNL_OP_EVENT,
 149                               0, (u8 *)&pfe,
 150                               sizeof(struct i40e_virtchnl_pf_event), NULL);
 151}
 152/***********************misc routines*****************************/
 153
 154/**
 155 * i40e_vc_disable_vf
 156 * @pf: pointer to the PF info
 157 * @vf: pointer to the VF info
 158 *
 159 * Disable the VF through a SW reset
 160 **/
 161static inline void i40e_vc_disable_vf(struct i40e_pf *pf, struct i40e_vf *vf)
 162{
 163        struct i40e_hw *hw = &pf->hw;
 164        u32 reg;
 165
 166        reg = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id));
 167        reg |= I40E_VPGEN_VFRTRIG_VFSWR_MASK;
 168        wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg);
 169        i40e_flush(hw);
 170}
 171
 172/**
 173 * i40e_vc_isvalid_vsi_id
 174 * @vf: pointer to the VF info
 175 * @vsi_id: VF relative VSI id
 176 *
 177 * check for the valid VSI id
 178 **/
 179static inline bool i40e_vc_isvalid_vsi_id(struct i40e_vf *vf, u16 vsi_id)
 180{
 181        struct i40e_pf *pf = vf->pf;
 182        struct i40e_vsi *vsi = i40e_find_vsi_from_id(pf, vsi_id);
 183
 184        return (vsi && (vsi->vf_id == vf->vf_id));
 185}
 186
 187/**
 188 * i40e_vc_isvalid_queue_id
 189 * @vf: pointer to the VF info
 190 * @vsi_id: vsi id
 191 * @qid: vsi relative queue id
 192 *
 193 * check for the valid queue id
 194 **/
 195static inline bool i40e_vc_isvalid_queue_id(struct i40e_vf *vf, u16 vsi_id,
 196                                            u8 qid)
 197{
 198        struct i40e_pf *pf = vf->pf;
 199        struct i40e_vsi *vsi = i40e_find_vsi_from_id(pf, vsi_id);
 200
 201        return (vsi && (qid < vsi->alloc_queue_pairs));
 202}
 203
 204/**
 205 * i40e_vc_isvalid_vector_id
 206 * @vf: pointer to the VF info
 207 * @vector_id: VF relative vector id
 208 *
 209 * check for the valid vector id
 210 **/
 211static inline bool i40e_vc_isvalid_vector_id(struct i40e_vf *vf, u8 vector_id)
 212{
 213        struct i40e_pf *pf = vf->pf;
 214
 215        return vector_id < pf->hw.func_caps.num_msix_vectors_vf;
 216}
 217
 218/***********************vf resource mgmt routines*****************/
 219
 220/**
 221 * i40e_vc_get_pf_queue_id
 222 * @vf: pointer to the VF info
 223 * @vsi_id: id of VSI as provided by the FW
 224 * @vsi_queue_id: vsi relative queue id
 225 *
 226 * return PF relative queue id
 227 **/
 228static u16 i40e_vc_get_pf_queue_id(struct i40e_vf *vf, u16 vsi_id,
 229                                   u8 vsi_queue_id)
 230{
 231        struct i40e_pf *pf = vf->pf;
 232        struct i40e_vsi *vsi = i40e_find_vsi_from_id(pf, vsi_id);
 233        u16 pf_queue_id = I40E_QUEUE_END_OF_LIST;
 234
 235        if (!vsi)
 236                return pf_queue_id;
 237
 238        if (le16_to_cpu(vsi->info.mapping_flags) &
 239            I40E_AQ_VSI_QUE_MAP_NONCONTIG)
 240                pf_queue_id =
 241                        le16_to_cpu(vsi->info.queue_mapping[vsi_queue_id]);
 242        else
 243                pf_queue_id = le16_to_cpu(vsi->info.queue_mapping[0]) +
 244                              vsi_queue_id;
 245
 246        return pf_queue_id;
 247}
 248
 249/**
 250 * i40e_config_irq_link_list
 251 * @vf: pointer to the VF info
 252 * @vsi_id: id of VSI as given by the FW
 253 * @vecmap: irq map info
 254 *
 255 * configure irq link list from the map
 256 **/
 257static void i40e_config_irq_link_list(struct i40e_vf *vf, u16 vsi_id,
 258                                      struct i40e_virtchnl_vector_map *vecmap)
 259{
 260        unsigned long linklistmap = 0, tempmap;
 261        struct i40e_pf *pf = vf->pf;
 262        struct i40e_hw *hw = &pf->hw;
 263        u16 vsi_queue_id, pf_queue_id;
 264        enum i40e_queue_type qtype;
 265        u16 next_q, vector_id;
 266        u32 reg, reg_idx;
 267        u16 itr_idx = 0;
 268
 269        vector_id = vecmap->vector_id;
 270        /* setup the head */
 271        if (0 == vector_id)
 272                reg_idx = I40E_VPINT_LNKLST0(vf->vf_id);
 273        else
 274                reg_idx = I40E_VPINT_LNKLSTN(
 275                     ((pf->hw.func_caps.num_msix_vectors_vf - 1) * vf->vf_id) +
 276                     (vector_id - 1));
 277
 278        if (vecmap->rxq_map == 0 && vecmap->txq_map == 0) {
 279                /* Special case - No queues mapped on this vector */
 280                wr32(hw, reg_idx, I40E_VPINT_LNKLST0_FIRSTQ_INDX_MASK);
 281                goto irq_list_done;
 282        }
 283        tempmap = vecmap->rxq_map;
 284        for_each_set_bit(vsi_queue_id, &tempmap, I40E_MAX_VSI_QP) {
 285                linklistmap |= (1 <<
 286                                (I40E_VIRTCHNL_SUPPORTED_QTYPES *
 287                                 vsi_queue_id));
 288        }
 289
 290        tempmap = vecmap->txq_map;
 291        for_each_set_bit(vsi_queue_id, &tempmap, I40E_MAX_VSI_QP) {
 292                linklistmap |= (1 <<
 293                                (I40E_VIRTCHNL_SUPPORTED_QTYPES * vsi_queue_id
 294                                 + 1));
 295        }
 296
 297        next_q = find_first_bit(&linklistmap,
 298                                (I40E_MAX_VSI_QP *
 299                                 I40E_VIRTCHNL_SUPPORTED_QTYPES));
 300        vsi_queue_id = next_q/I40E_VIRTCHNL_SUPPORTED_QTYPES;
 301        qtype = next_q%I40E_VIRTCHNL_SUPPORTED_QTYPES;
 302        pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_id, vsi_queue_id);
 303        reg = ((qtype << I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT) | pf_queue_id);
 304
 305        wr32(hw, reg_idx, reg);
 306
 307        while (next_q < (I40E_MAX_VSI_QP * I40E_VIRTCHNL_SUPPORTED_QTYPES)) {
 308                switch (qtype) {
 309                case I40E_QUEUE_TYPE_RX:
 310                        reg_idx = I40E_QINT_RQCTL(pf_queue_id);
 311                        itr_idx = vecmap->rxitr_idx;
 312                        break;
 313                case I40E_QUEUE_TYPE_TX:
 314                        reg_idx = I40E_QINT_TQCTL(pf_queue_id);
 315                        itr_idx = vecmap->txitr_idx;
 316                        break;
 317                default:
 318                        break;
 319                }
 320
 321                next_q = find_next_bit(&linklistmap,
 322                                       (I40E_MAX_VSI_QP *
 323                                        I40E_VIRTCHNL_SUPPORTED_QTYPES),
 324                                       next_q + 1);
 325                if (next_q <
 326                    (I40E_MAX_VSI_QP * I40E_VIRTCHNL_SUPPORTED_QTYPES)) {
 327                        vsi_queue_id = next_q / I40E_VIRTCHNL_SUPPORTED_QTYPES;
 328                        qtype = next_q % I40E_VIRTCHNL_SUPPORTED_QTYPES;
 329                        pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_id,
 330                                                              vsi_queue_id);
 331                } else {
 332                        pf_queue_id = I40E_QUEUE_END_OF_LIST;
 333                        qtype = 0;
 334                }
 335
 336                /* format for the RQCTL & TQCTL regs is same */
 337                reg = (vector_id) |
 338                    (qtype << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) |
 339                    (pf_queue_id << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
 340                    (1 << I40E_QINT_RQCTL_CAUSE_ENA_SHIFT) |
 341                    (itr_idx << I40E_QINT_RQCTL_ITR_INDX_SHIFT);
 342                wr32(hw, reg_idx, reg);
 343        }
 344
 345irq_list_done:
 346        i40e_flush(hw);
 347}
 348
 349/**
 350 * i40e_config_vsi_tx_queue
 351 * @vf: pointer to the VF info
 352 * @vsi_id: id of VSI as provided by the FW
 353 * @vsi_queue_id: vsi relative queue index
 354 * @info: config. info
 355 *
 356 * configure tx queue
 357 **/
 358static int i40e_config_vsi_tx_queue(struct i40e_vf *vf, u16 vsi_id,
 359                                    u16 vsi_queue_id,
 360                                    struct i40e_virtchnl_txq_info *info)
 361{
 362        struct i40e_pf *pf = vf->pf;
 363        struct i40e_hw *hw = &pf->hw;
 364        struct i40e_hmc_obj_txq tx_ctx;
 365        struct i40e_vsi *vsi;
 366        u16 pf_queue_id;
 367        u32 qtx_ctl;
 368        int ret = 0;
 369
 370        pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_id, vsi_queue_id);
 371        vsi = i40e_find_vsi_from_id(pf, vsi_id);
 372
 373        /* clear the context structure first */
 374        memset(&tx_ctx, 0, sizeof(struct i40e_hmc_obj_txq));
 375
 376        /* only set the required fields */
 377        tx_ctx.base = info->dma_ring_addr / 128;
 378        tx_ctx.qlen = info->ring_len;
 379        tx_ctx.rdylist = le16_to_cpu(vsi->info.qs_handle[0]);
 380        tx_ctx.rdylist_act = 0;
 381        tx_ctx.head_wb_ena = info->headwb_enabled;
 382        tx_ctx.head_wb_addr = info->dma_headwb_addr;
 383
 384        /* clear the context in the HMC */
 385        ret = i40e_clear_lan_tx_queue_context(hw, pf_queue_id);
 386        if (ret) {
 387                dev_err(&pf->pdev->dev,
 388                        "Failed to clear VF LAN Tx queue context %d, error: %d\n",
 389                        pf_queue_id, ret);
 390                ret = -ENOENT;
 391                goto error_context;
 392        }
 393
 394        /* set the context in the HMC */
 395        ret = i40e_set_lan_tx_queue_context(hw, pf_queue_id, &tx_ctx);
 396        if (ret) {
 397                dev_err(&pf->pdev->dev,
 398                        "Failed to set VF LAN Tx queue context %d error: %d\n",
 399                        pf_queue_id, ret);
 400                ret = -ENOENT;
 401                goto error_context;
 402        }
 403
 404        /* associate this queue with the PCI VF function */
 405        qtx_ctl = I40E_QTX_CTL_VF_QUEUE;
 406        qtx_ctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT)
 407                    & I40E_QTX_CTL_PF_INDX_MASK);
 408        qtx_ctl |= (((vf->vf_id + hw->func_caps.vf_base_id)
 409                     << I40E_QTX_CTL_VFVM_INDX_SHIFT)
 410                    & I40E_QTX_CTL_VFVM_INDX_MASK);
 411        wr32(hw, I40E_QTX_CTL(pf_queue_id), qtx_ctl);
 412        i40e_flush(hw);
 413
 414error_context:
 415        return ret;
 416}
 417
 418/**
 419 * i40e_config_vsi_rx_queue
 420 * @vf: pointer to the VF info
 421 * @vsi_id: id of VSI  as provided by the FW
 422 * @vsi_queue_id: vsi relative queue index
 423 * @info: config. info
 424 *
 425 * configure rx queue
 426 **/
 427static int i40e_config_vsi_rx_queue(struct i40e_vf *vf, u16 vsi_id,
 428                                    u16 vsi_queue_id,
 429                                    struct i40e_virtchnl_rxq_info *info)
 430{
 431        struct i40e_pf *pf = vf->pf;
 432        struct i40e_hw *hw = &pf->hw;
 433        struct i40e_hmc_obj_rxq rx_ctx;
 434        u16 pf_queue_id;
 435        int ret = 0;
 436
 437        pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_id, vsi_queue_id);
 438
 439        /* clear the context structure first */
 440        memset(&rx_ctx, 0, sizeof(struct i40e_hmc_obj_rxq));
 441
 442        /* only set the required fields */
 443        rx_ctx.base = info->dma_ring_addr / 128;
 444        rx_ctx.qlen = info->ring_len;
 445
 446        if (info->splithdr_enabled) {
 447                rx_ctx.hsplit_0 = I40E_RX_SPLIT_L2      |
 448                                  I40E_RX_SPLIT_IP      |
 449                                  I40E_RX_SPLIT_TCP_UDP |
 450                                  I40E_RX_SPLIT_SCTP;
 451                /* header length validation */
 452                if (info->hdr_size > ((2 * 1024) - 64)) {
 453                        ret = -EINVAL;
 454                        goto error_param;
 455                }
 456                rx_ctx.hbuff = info->hdr_size >> I40E_RXQ_CTX_HBUFF_SHIFT;
 457
 458                /* set splitalways mode 10b */
 459                rx_ctx.dtype = 0x2;
 460        }
 461
 462        /* databuffer length validation */
 463        if (info->databuffer_size > ((16 * 1024) - 128)) {
 464                ret = -EINVAL;
 465                goto error_param;
 466        }
 467        rx_ctx.dbuff = info->databuffer_size >> I40E_RXQ_CTX_DBUFF_SHIFT;
 468
 469        /* max pkt. length validation */
 470        if (info->max_pkt_size >= (16 * 1024) || info->max_pkt_size < 64) {
 471                ret = -EINVAL;
 472                goto error_param;
 473        }
 474        rx_ctx.rxmax = info->max_pkt_size;
 475
 476        /* enable 32bytes desc always */
 477        rx_ctx.dsize = 1;
 478
 479        /* default values */
 480        rx_ctx.lrxqthresh = 2;
 481        rx_ctx.crcstrip = 1;
 482        rx_ctx.prefena = 1;
 483        rx_ctx.l2tsel = 1;
 484
 485        /* clear the context in the HMC */
 486        ret = i40e_clear_lan_rx_queue_context(hw, pf_queue_id);
 487        if (ret) {
 488                dev_err(&pf->pdev->dev,
 489                        "Failed to clear VF LAN Rx queue context %d, error: %d\n",
 490                        pf_queue_id, ret);
 491                ret = -ENOENT;
 492                goto error_param;
 493        }
 494
 495        /* set the context in the HMC */
 496        ret = i40e_set_lan_rx_queue_context(hw, pf_queue_id, &rx_ctx);
 497        if (ret) {
 498                dev_err(&pf->pdev->dev,
 499                        "Failed to set VF LAN Rx queue context %d error: %d\n",
 500                        pf_queue_id, ret);
 501                ret = -ENOENT;
 502                goto error_param;
 503        }
 504
 505error_param:
 506        return ret;
 507}
 508
 509/**
 510 * i40e_alloc_vsi_res
 511 * @vf: pointer to the VF info
 512 * @type: type of VSI to allocate
 513 *
 514 * alloc VF vsi context & resources
 515 **/
 516static int i40e_alloc_vsi_res(struct i40e_vf *vf, enum i40e_vsi_type type)
 517{
 518        struct i40e_mac_filter *f = NULL;
 519        struct i40e_pf *pf = vf->pf;
 520        struct i40e_vsi *vsi;
 521        int ret = 0;
 522
 523        vsi = i40e_vsi_setup(pf, type, pf->vsi[pf->lan_vsi]->seid, vf->vf_id);
 524
 525        if (!vsi) {
 526                dev_err(&pf->pdev->dev,
 527                        "add vsi failed for VF %d, aq_err %d\n",
 528                        vf->vf_id, pf->hw.aq.asq_last_status);
 529                ret = -ENOENT;
 530                goto error_alloc_vsi_res;
 531        }
 532        if (type == I40E_VSI_SRIOV) {
 533                u8 brdcast[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
 534                vf->lan_vsi_idx = vsi->idx;
 535                vf->lan_vsi_id = vsi->id;
 536                /* If the port VLAN has been configured and then the
 537                 * VF driver was removed then the VSI port VLAN
 538                 * configuration was destroyed.  Check if there is
 539                 * a port VLAN and restore the VSI configuration if
 540                 * needed.
 541                 */
 542                if (vf->port_vlan_id)
 543                        i40e_vsi_add_pvid(vsi, vf->port_vlan_id);
 544                f = i40e_add_filter(vsi, vf->default_lan_addr.addr,
 545                                    vf->port_vlan_id, true, false);
 546                if (!f)
 547                        dev_info(&pf->pdev->dev,
 548                                 "Could not allocate VF MAC addr\n");
 549                f = i40e_add_filter(vsi, brdcast, vf->port_vlan_id,
 550                                    true, false);
 551                if (!f)
 552                        dev_info(&pf->pdev->dev,
 553                                 "Could not allocate VF broadcast filter\n");
 554        }
 555
 556        /* program mac filter */
 557        ret = i40e_sync_vsi_filters(vsi);
 558        if (ret)
 559                dev_err(&pf->pdev->dev, "Unable to program ucast filters\n");
 560
 561        /* Set VF bandwidth if specified */
 562        if (vf->tx_rate) {
 563                ret = i40e_aq_config_vsi_bw_limit(&pf->hw, vsi->seid,
 564                                                  vf->tx_rate / 50, 0, NULL);
 565                if (ret)
 566                        dev_err(&pf->pdev->dev, "Unable to set tx rate, VF %d, error code %d.\n",
 567                                vf->vf_id, ret);
 568        }
 569
 570error_alloc_vsi_res:
 571        return ret;
 572}
 573
 574/**
 575 * i40e_enable_vf_mappings
 576 * @vf: pointer to the VF info
 577 *
 578 * enable VF mappings
 579 **/
 580static void i40e_enable_vf_mappings(struct i40e_vf *vf)
 581{
 582        struct i40e_pf *pf = vf->pf;
 583        struct i40e_hw *hw = &pf->hw;
 584        u32 reg, total_queue_pairs = 0;
 585        int j;
 586
 587        /* Tell the hardware we're using noncontiguous mapping. HW requires
 588         * that VF queues be mapped using this method, even when they are
 589         * contiguous in real life
 590         */
 591        wr32(hw, I40E_VSILAN_QBASE(vf->lan_vsi_id),
 592             I40E_VSILAN_QBASE_VSIQTABLE_ENA_MASK);
 593
 594        /* enable VF vplan_qtable mappings */
 595        reg = I40E_VPLAN_MAPENA_TXRX_ENA_MASK;
 596        wr32(hw, I40E_VPLAN_MAPENA(vf->vf_id), reg);
 597
 598        /* map PF queues to VF queues */
 599        for (j = 0; j < pf->vsi[vf->lan_vsi_idx]->alloc_queue_pairs; j++) {
 600                u16 qid = i40e_vc_get_pf_queue_id(vf, vf->lan_vsi_id, j);
 601                reg = (qid & I40E_VPLAN_QTABLE_QINDEX_MASK);
 602                wr32(hw, I40E_VPLAN_QTABLE(total_queue_pairs, vf->vf_id), reg);
 603                total_queue_pairs++;
 604        }
 605
 606        /* map PF queues to VSI */
 607        for (j = 0; j < 7; j++) {
 608                if (j * 2 >= pf->vsi[vf->lan_vsi_idx]->alloc_queue_pairs) {
 609                        reg = 0x07FF07FF;       /* unused */
 610                } else {
 611                        u16 qid = i40e_vc_get_pf_queue_id(vf, vf->lan_vsi_id,
 612                                                          j * 2);
 613                        reg = qid;
 614                        qid = i40e_vc_get_pf_queue_id(vf, vf->lan_vsi_id,
 615                                                      (j * 2) + 1);
 616                        reg |= qid << 16;
 617                }
 618                wr32(hw, I40E_VSILAN_QTABLE(j, vf->lan_vsi_id), reg);
 619        }
 620
 621        i40e_flush(hw);
 622}
 623
 624/**
 625 * i40e_disable_vf_mappings
 626 * @vf: pointer to the VF info
 627 *
 628 * disable VF mappings
 629 **/
 630static void i40e_disable_vf_mappings(struct i40e_vf *vf)
 631{
 632        struct i40e_pf *pf = vf->pf;
 633        struct i40e_hw *hw = &pf->hw;
 634        int i;
 635
 636        /* disable qp mappings */
 637        wr32(hw, I40E_VPLAN_MAPENA(vf->vf_id), 0);
 638        for (i = 0; i < I40E_MAX_VSI_QP; i++)
 639                wr32(hw, I40E_VPLAN_QTABLE(i, vf->vf_id),
 640                     I40E_QUEUE_END_OF_LIST);
 641        i40e_flush(hw);
 642}
 643
 644/**
 645 * i40e_free_vf_res
 646 * @vf: pointer to the VF info
 647 *
 648 * free VF resources
 649 **/
 650static void i40e_free_vf_res(struct i40e_vf *vf)
 651{
 652        struct i40e_pf *pf = vf->pf;
 653        struct i40e_hw *hw = &pf->hw;
 654        u32 reg_idx, reg;
 655        int i, msix_vf;
 656
 657        /* free vsi & disconnect it from the parent uplink */
 658        if (vf->lan_vsi_idx) {
 659                i40e_vsi_release(pf->vsi[vf->lan_vsi_idx]);
 660                vf->lan_vsi_idx = 0;
 661                vf->lan_vsi_id = 0;
 662        }
 663        msix_vf = pf->hw.func_caps.num_msix_vectors_vf;
 664
 665        /* disable interrupts so the VF starts in a known state */
 666        for (i = 0; i < msix_vf; i++) {
 667                /* format is same for both registers */
 668                if (0 == i)
 669                        reg_idx = I40E_VFINT_DYN_CTL0(vf->vf_id);
 670                else
 671                        reg_idx = I40E_VFINT_DYN_CTLN(((msix_vf - 1) *
 672                                                      (vf->vf_id))
 673                                                     + (i - 1));
 674                wr32(hw, reg_idx, I40E_VFINT_DYN_CTLN_CLEARPBA_MASK);
 675                i40e_flush(hw);
 676        }
 677
 678        /* clear the irq settings */
 679        for (i = 0; i < msix_vf; i++) {
 680                /* format is same for both registers */
 681                if (0 == i)
 682                        reg_idx = I40E_VPINT_LNKLST0(vf->vf_id);
 683                else
 684                        reg_idx = I40E_VPINT_LNKLSTN(((msix_vf - 1) *
 685                                                      (vf->vf_id))
 686                                                     + (i - 1));
 687                reg = (I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_MASK |
 688                       I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK);
 689                wr32(hw, reg_idx, reg);
 690                i40e_flush(hw);
 691        }
 692        /* reset some of the state varibles keeping
 693         * track of the resources
 694         */
 695        vf->num_queue_pairs = 0;
 696        vf->vf_states = 0;
 697}
 698
 699/**
 700 * i40e_alloc_vf_res
 701 * @vf: pointer to the VF info
 702 *
 703 * allocate VF resources
 704 **/
 705static int i40e_alloc_vf_res(struct i40e_vf *vf)
 706{
 707        struct i40e_pf *pf = vf->pf;
 708        int total_queue_pairs = 0;
 709        int ret;
 710
 711        /* allocate hw vsi context & associated resources */
 712        ret = i40e_alloc_vsi_res(vf, I40E_VSI_SRIOV);
 713        if (ret)
 714                goto error_alloc;
 715        total_queue_pairs += pf->vsi[vf->lan_vsi_idx]->alloc_queue_pairs;
 716        set_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
 717
 718        /* store the total qps number for the runtime
 719         * VF req validation
 720         */
 721        vf->num_queue_pairs = total_queue_pairs;
 722
 723        /* VF is now completely initialized */
 724        set_bit(I40E_VF_STAT_INIT, &vf->vf_states);
 725
 726error_alloc:
 727        if (ret)
 728                i40e_free_vf_res(vf);
 729
 730        return ret;
 731}
 732
 733#define VF_DEVICE_STATUS 0xAA
 734#define VF_TRANS_PENDING_MASK 0x20
 735/**
 736 * i40e_quiesce_vf_pci
 737 * @vf: pointer to the VF structure
 738 *
 739 * Wait for VF PCI transactions to be cleared after reset. Returns -EIO
 740 * if the transactions never clear.
 741 **/
 742static int i40e_quiesce_vf_pci(struct i40e_vf *vf)
 743{
 744        struct i40e_pf *pf = vf->pf;
 745        struct i40e_hw *hw = &pf->hw;
 746        int vf_abs_id, i;
 747        u32 reg;
 748
 749        vf_abs_id = vf->vf_id + hw->func_caps.vf_base_id;
 750
 751        wr32(hw, I40E_PF_PCI_CIAA,
 752             VF_DEVICE_STATUS | (vf_abs_id << I40E_PF_PCI_CIAA_VF_NUM_SHIFT));
 753        for (i = 0; i < 100; i++) {
 754                reg = rd32(hw, I40E_PF_PCI_CIAD);
 755                if ((reg & VF_TRANS_PENDING_MASK) == 0)
 756                        return 0;
 757                udelay(1);
 758        }
 759        return -EIO;
 760}
 761
 762/**
 763 * i40e_reset_vf
 764 * @vf: pointer to the VF structure
 765 * @flr: VFLR was issued or not
 766 *
 767 * reset the VF
 768 **/
 769void i40e_reset_vf(struct i40e_vf *vf, bool flr)
 770{
 771        struct i40e_pf *pf = vf->pf;
 772        struct i40e_hw *hw = &pf->hw;
 773        bool rsd = false;
 774        int i;
 775        u32 reg;
 776
 777        if (test_and_set_bit(__I40E_VF_DISABLE, &pf->state))
 778                return;
 779
 780        /* warn the VF */
 781        clear_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states);
 782
 783        /* In the case of a VFLR, the HW has already reset the VF and we
 784         * just need to clean up, so don't hit the VFRTRIG register.
 785         */
 786        if (!flr) {
 787                /* reset VF using VPGEN_VFRTRIG reg */
 788                reg = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id));
 789                reg |= I40E_VPGEN_VFRTRIG_VFSWR_MASK;
 790                wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg);
 791                i40e_flush(hw);
 792        }
 793
 794        if (i40e_quiesce_vf_pci(vf))
 795                dev_err(&pf->pdev->dev, "VF %d PCI transactions stuck\n",
 796                        vf->vf_id);
 797
 798        /* poll VPGEN_VFRSTAT reg to make sure
 799         * that reset is complete
 800         */
 801        for (i = 0; i < 10; i++) {
 802                /* VF reset requires driver to first reset the VF and then
 803                 * poll the status register to make sure that the reset
 804                 * completed successfully. Due to internal HW FIFO flushes,
 805                 * we must wait 10ms before the register will be valid.
 806                 */
 807                usleep_range(10000, 20000);
 808                reg = rd32(hw, I40E_VPGEN_VFRSTAT(vf->vf_id));
 809                if (reg & I40E_VPGEN_VFRSTAT_VFRD_MASK) {
 810                        rsd = true;
 811                        break;
 812                }
 813        }
 814
 815        if (flr)
 816                usleep_range(10000, 20000);
 817
 818        if (!rsd)
 819                dev_err(&pf->pdev->dev, "VF reset check timeout on VF %d\n",
 820                        vf->vf_id);
 821        wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_id), I40E_VFR_COMPLETED);
 822        /* clear the reset bit in the VPGEN_VFRTRIG reg */
 823        reg = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id));
 824        reg &= ~I40E_VPGEN_VFRTRIG_VFSWR_MASK;
 825        wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg);
 826
 827        /* On initial reset, we won't have any queues */
 828        if (vf->lan_vsi_idx == 0)
 829                goto complete_reset;
 830
 831        i40e_vsi_control_rings(pf->vsi[vf->lan_vsi_idx], false);
 832complete_reset:
 833        /* reallocate VF resources to reset the VSI state */
 834        i40e_free_vf_res(vf);
 835        i40e_alloc_vf_res(vf);
 836        i40e_enable_vf_mappings(vf);
 837        set_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states);
 838
 839        /* tell the VF the reset is done */
 840        wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_id), I40E_VFR_VFACTIVE);
 841        i40e_flush(hw);
 842        clear_bit(__I40E_VF_DISABLE, &pf->state);
 843}
 844
 845/**
 846 * i40e_free_vfs
 847 * @pf: pointer to the PF structure
 848 *
 849 * free VF resources
 850 **/
 851void i40e_free_vfs(struct i40e_pf *pf)
 852{
 853        struct i40e_hw *hw = &pf->hw;
 854        u32 reg_idx, bit_idx;
 855        int i, tmp, vf_id;
 856
 857        if (!pf->vf)
 858                return;
 859        while (test_and_set_bit(__I40E_VF_DISABLE, &pf->state))
 860                usleep_range(1000, 2000);
 861
 862        for (i = 0; i < pf->num_alloc_vfs; i++)
 863                if (test_bit(I40E_VF_STAT_INIT, &pf->vf[i].vf_states))
 864                        i40e_vsi_control_rings(pf->vsi[pf->vf[i].lan_vsi_idx],
 865                                               false);
 866
 867        /* Disable IOV before freeing resources. This lets any VF drivers
 868         * running in the host get themselves cleaned up before we yank
 869         * the carpet out from underneath their feet.
 870         */
 871        if (!pci_vfs_assigned(pf->pdev))
 872                pci_disable_sriov(pf->pdev);
 873        else
 874                dev_warn(&pf->pdev->dev, "VFs are assigned - not disabling SR-IOV\n");
 875
 876        msleep(20); /* let any messages in transit get finished up */
 877
 878        /* free up VF resources */
 879        tmp = pf->num_alloc_vfs;
 880        pf->num_alloc_vfs = 0;
 881        for (i = 0; i < tmp; i++) {
 882                if (test_bit(I40E_VF_STAT_INIT, &pf->vf[i].vf_states))
 883                        i40e_free_vf_res(&pf->vf[i]);
 884                /* disable qp mappings */
 885                i40e_disable_vf_mappings(&pf->vf[i]);
 886        }
 887
 888        kfree(pf->vf);
 889        pf->vf = NULL;
 890
 891        /* This check is for when the driver is unloaded while VFs are
 892         * assigned. Setting the number of VFs to 0 through sysfs is caught
 893         * before this function ever gets called.
 894         */
 895        if (!pci_vfs_assigned(pf->pdev)) {
 896                /* Acknowledge VFLR for all VFS. Without this, VFs will fail to
 897                 * work correctly when SR-IOV gets re-enabled.
 898                 */
 899                for (vf_id = 0; vf_id < tmp; vf_id++) {
 900                        reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32;
 901                        bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32;
 902                        wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), (1 << bit_idx));
 903                }
 904        }
 905        clear_bit(__I40E_VF_DISABLE, &pf->state);
 906}
 907
 908#ifdef CONFIG_PCI_IOV
 909/**
 910 * i40e_alloc_vfs
 911 * @pf: pointer to the PF structure
 912 * @num_alloc_vfs: number of VFs to allocate
 913 *
 914 * allocate VF resources
 915 **/
 916int i40e_alloc_vfs(struct i40e_pf *pf, u16 num_alloc_vfs)
 917{
 918        struct i40e_vf *vfs;
 919        int i, ret = 0;
 920
 921        /* Disable interrupt 0 so we don't try to handle the VFLR. */
 922        i40e_irq_dynamic_disable_icr0(pf);
 923
 924        /* Check to see if we're just allocating resources for extant VFs */
 925        if (pci_num_vf(pf->pdev) != num_alloc_vfs) {
 926                ret = pci_enable_sriov(pf->pdev, num_alloc_vfs);
 927                if (ret) {
 928                        dev_err(&pf->pdev->dev,
 929                                "Failed to enable SR-IOV, error %d.\n", ret);
 930                        pf->num_alloc_vfs = 0;
 931                        goto err_iov;
 932                }
 933        }
 934        /* allocate memory */
 935        vfs = kcalloc(num_alloc_vfs, sizeof(struct i40e_vf), GFP_KERNEL);
 936        if (!vfs) {
 937                ret = -ENOMEM;
 938                goto err_alloc;
 939        }
 940        pf->vf = vfs;
 941
 942        /* apply default profile */
 943        for (i = 0; i < num_alloc_vfs; i++) {
 944                vfs[i].pf = pf;
 945                vfs[i].parent_type = I40E_SWITCH_ELEMENT_TYPE_VEB;
 946                vfs[i].vf_id = i;
 947
 948                /* assign default capabilities */
 949                set_bit(I40E_VIRTCHNL_VF_CAP_L2, &vfs[i].vf_caps);
 950                vfs[i].spoofchk = true;
 951                /* VF resources get allocated during reset */
 952                i40e_reset_vf(&vfs[i], false);
 953
 954                /* enable VF vplan_qtable mappings */
 955                i40e_enable_vf_mappings(&vfs[i]);
 956        }
 957        pf->num_alloc_vfs = num_alloc_vfs;
 958
 959err_alloc:
 960        if (ret)
 961                i40e_free_vfs(pf);
 962err_iov:
 963        /* Re-enable interrupt 0. */
 964        i40e_irq_dynamic_enable_icr0(pf);
 965        return ret;
 966}
 967
 968#endif
 969/**
 970 * i40e_pci_sriov_enable
 971 * @pdev: pointer to a pci_dev structure
 972 * @num_vfs: number of VFs to allocate
 973 *
 974 * Enable or change the number of VFs
 975 **/
 976static int i40e_pci_sriov_enable(struct pci_dev *pdev, int num_vfs)
 977{
 978#ifdef CONFIG_PCI_IOV
 979        struct i40e_pf *pf = pci_get_drvdata(pdev);
 980        int pre_existing_vfs = pci_num_vf(pdev);
 981        int err = 0;
 982
 983        dev_info(&pdev->dev, "Allocating %d VFs.\n", num_vfs);
 984        if (pre_existing_vfs && pre_existing_vfs != num_vfs)
 985                i40e_free_vfs(pf);
 986        else if (pre_existing_vfs && pre_existing_vfs == num_vfs)
 987                goto out;
 988
 989        if (num_vfs > pf->num_req_vfs) {
 990                err = -EPERM;
 991                goto err_out;
 992        }
 993
 994        err = i40e_alloc_vfs(pf, num_vfs);
 995        if (err) {
 996                dev_warn(&pdev->dev, "Failed to enable SR-IOV: %d\n", err);
 997                goto err_out;
 998        }
 999
1000out:
1001        return num_vfs;
1002
1003err_out:
1004        return err;
1005#endif
1006        return 0;
1007}
1008
1009/**
1010 * i40e_pci_sriov_configure
1011 * @pdev: pointer to a pci_dev structure
1012 * @num_vfs: number of VFs to allocate
1013 *
1014 * Enable or change the number of VFs. Called when the user updates the number
1015 * of VFs in sysfs.
1016 **/
1017int i40e_pci_sriov_configure(struct pci_dev *pdev, int num_vfs)
1018{
1019        struct i40e_pf *pf = pci_get_drvdata(pdev);
1020
1021        if (num_vfs) {
1022                if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) {
1023                        pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
1024                        i40e_do_reset_safe(pf,
1025                                           BIT_ULL(__I40E_PF_RESET_REQUESTED));
1026                }
1027                return i40e_pci_sriov_enable(pdev, num_vfs);
1028        }
1029
1030        if (!pci_vfs_assigned(pf->pdev)) {
1031                i40e_free_vfs(pf);
1032                pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED;
1033                i40e_do_reset_safe(pf, BIT_ULL(__I40E_PF_RESET_REQUESTED));
1034        } else {
1035                dev_warn(&pdev->dev, "Unable to free VFs because some are assigned to VMs.\n");
1036                return -EINVAL;
1037        }
1038        return 0;
1039}
1040
1041/***********************virtual channel routines******************/
1042
1043/**
1044 * i40e_vc_send_msg_to_vf
1045 * @vf: pointer to the VF info
1046 * @v_opcode: virtual channel opcode
1047 * @v_retval: virtual channel return value
1048 * @msg: pointer to the msg buffer
1049 * @msglen: msg length
1050 *
1051 * send msg to VF
1052 **/
1053static int i40e_vc_send_msg_to_vf(struct i40e_vf *vf, u32 v_opcode,
1054                                  u32 v_retval, u8 *msg, u16 msglen)
1055{
1056        struct i40e_pf *pf;
1057        struct i40e_hw *hw;
1058        int abs_vf_id;
1059        i40e_status aq_ret;
1060
1061        /* validate the request */
1062        if (!vf || vf->vf_id >= vf->pf->num_alloc_vfs)
1063                return -EINVAL;
1064
1065        pf = vf->pf;
1066        hw = &pf->hw;
1067        abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
1068
1069        /* single place to detect unsuccessful return values */
1070        if (v_retval) {
1071                vf->num_invalid_msgs++;
1072                dev_err(&pf->pdev->dev, "Failed opcode %d Error: %d\n",
1073                        v_opcode, v_retval);
1074                if (vf->num_invalid_msgs >
1075                    I40E_DEFAULT_NUM_INVALID_MSGS_ALLOWED) {
1076                        dev_err(&pf->pdev->dev,
1077                                "Number of invalid messages exceeded for VF %d\n",
1078                                vf->vf_id);
1079                        dev_err(&pf->pdev->dev, "Use PF Control I/F to enable the VF\n");
1080                        set_bit(I40E_VF_STAT_DISABLED, &vf->vf_states);
1081                }
1082        } else {
1083                vf->num_valid_msgs++;
1084        }
1085
1086        aq_ret = i40e_aq_send_msg_to_vf(hw, abs_vf_id,  v_opcode, v_retval,
1087                                        msg, msglen, NULL);
1088        if (aq_ret) {
1089                dev_err(&pf->pdev->dev,
1090                        "Unable to send the message to VF %d aq_err %d\n",
1091                        vf->vf_id, pf->hw.aq.asq_last_status);
1092                return -EIO;
1093        }
1094
1095        return 0;
1096}
1097
1098/**
1099 * i40e_vc_send_resp_to_vf
1100 * @vf: pointer to the VF info
1101 * @opcode: operation code
1102 * @retval: return value
1103 *
1104 * send resp msg to VF
1105 **/
1106static int i40e_vc_send_resp_to_vf(struct i40e_vf *vf,
1107                                   enum i40e_virtchnl_ops opcode,
1108                                   i40e_status retval)
1109{
1110        return i40e_vc_send_msg_to_vf(vf, opcode, retval, NULL, 0);
1111}
1112
1113/**
1114 * i40e_vc_get_version_msg
1115 * @vf: pointer to the VF info
1116 *
1117 * called from the VF to request the API version used by the PF
1118 **/
1119static int i40e_vc_get_version_msg(struct i40e_vf *vf)
1120{
1121        struct i40e_virtchnl_version_info info = {
1122                I40E_VIRTCHNL_VERSION_MAJOR, I40E_VIRTCHNL_VERSION_MINOR
1123        };
1124
1125        return i40e_vc_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_VERSION,
1126                                      I40E_SUCCESS, (u8 *)&info,
1127                                      sizeof(struct
1128                                             i40e_virtchnl_version_info));
1129}
1130
1131/**
1132 * i40e_vc_get_vf_resources_msg
1133 * @vf: pointer to the VF info
1134 * @msg: pointer to the msg buffer
1135 * @msglen: msg length
1136 *
1137 * called from the VF to request its resources
1138 **/
1139static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf)
1140{
1141        struct i40e_virtchnl_vf_resource *vfres = NULL;
1142        struct i40e_pf *pf = vf->pf;
1143        i40e_status aq_ret = 0;
1144        struct i40e_vsi *vsi;
1145        int i = 0, len = 0;
1146        int num_vsis = 1;
1147        int ret;
1148
1149        if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states)) {
1150                aq_ret = I40E_ERR_PARAM;
1151                goto err;
1152        }
1153
1154        len = (sizeof(struct i40e_virtchnl_vf_resource) +
1155               sizeof(struct i40e_virtchnl_vsi_resource) * num_vsis);
1156
1157        vfres = kzalloc(len, GFP_KERNEL);
1158        if (!vfres) {
1159                aq_ret = I40E_ERR_NO_MEMORY;
1160                len = 0;
1161                goto err;
1162        }
1163
1164        vfres->vf_offload_flags = I40E_VIRTCHNL_VF_OFFLOAD_L2;
1165        vsi = pf->vsi[vf->lan_vsi_idx];
1166        if (!vsi->info.pvid)
1167                vfres->vf_offload_flags |= I40E_VIRTCHNL_VF_OFFLOAD_VLAN;
1168
1169        vfres->num_vsis = num_vsis;
1170        vfres->num_queue_pairs = vf->num_queue_pairs;
1171        vfres->max_vectors = pf->hw.func_caps.num_msix_vectors_vf;
1172        if (vf->lan_vsi_idx) {
1173                vfres->vsi_res[i].vsi_id = vf->lan_vsi_id;
1174                vfres->vsi_res[i].vsi_type = I40E_VSI_SRIOV;
1175                vfres->vsi_res[i].num_queue_pairs =
1176                    pf->vsi[vf->lan_vsi_idx]->alloc_queue_pairs;
1177                memcpy(vfres->vsi_res[i].default_mac_addr,
1178                       vf->default_lan_addr.addr, ETH_ALEN);
1179                i++;
1180        }
1181        set_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states);
1182
1183err:
1184        /* send the response back to the VF */
1185        ret = i40e_vc_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_GET_VF_RESOURCES,
1186                                     aq_ret, (u8 *)vfres, len);
1187
1188        kfree(vfres);
1189        return ret;
1190}
1191
1192/**
1193 * i40e_vc_reset_vf_msg
1194 * @vf: pointer to the VF info
1195 * @msg: pointer to the msg buffer
1196 * @msglen: msg length
1197 *
1198 * called from the VF to reset itself,
1199 * unlike other virtchnl messages, PF driver
1200 * doesn't send the response back to the VF
1201 **/
1202static void i40e_vc_reset_vf_msg(struct i40e_vf *vf)
1203{
1204        if (test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states))
1205                i40e_reset_vf(vf, false);
1206}
1207
1208/**
1209 * i40e_vc_config_promiscuous_mode_msg
1210 * @vf: pointer to the VF info
1211 * @msg: pointer to the msg buffer
1212 * @msglen: msg length
1213 *
1214 * called from the VF to configure the promiscuous mode of
1215 * VF vsis
1216 **/
1217static int i40e_vc_config_promiscuous_mode_msg(struct i40e_vf *vf,
1218                                               u8 *msg, u16 msglen)
1219{
1220        struct i40e_virtchnl_promisc_info *info =
1221            (struct i40e_virtchnl_promisc_info *)msg;
1222        struct i40e_pf *pf = vf->pf;
1223        struct i40e_hw *hw = &pf->hw;
1224        struct i40e_vsi *vsi;
1225        bool allmulti = false;
1226        i40e_status aq_ret;
1227
1228        vsi = i40e_find_vsi_from_id(pf, info->vsi_id);
1229        if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) ||
1230            !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) ||
1231            !i40e_vc_isvalid_vsi_id(vf, info->vsi_id) ||
1232            (vsi->type != I40E_VSI_FCOE)) {
1233                aq_ret = I40E_ERR_PARAM;
1234                goto error_param;
1235        }
1236        if (info->flags & I40E_FLAG_VF_MULTICAST_PROMISC)
1237                allmulti = true;
1238        aq_ret = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid,
1239                                                       allmulti, NULL);
1240
1241error_param:
1242        /* send the response to the VF */
1243        return i40e_vc_send_resp_to_vf(vf,
1244                                       I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE,
1245                                       aq_ret);
1246}
1247
1248/**
1249 * i40e_vc_config_queues_msg
1250 * @vf: pointer to the VF info
1251 * @msg: pointer to the msg buffer
1252 * @msglen: msg length
1253 *
1254 * called from the VF to configure the rx/tx
1255 * queues
1256 **/
1257static int i40e_vc_config_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
1258{
1259        struct i40e_virtchnl_vsi_queue_config_info *qci =
1260            (struct i40e_virtchnl_vsi_queue_config_info *)msg;
1261        struct i40e_virtchnl_queue_pair_info *qpi;
1262        struct i40e_pf *pf = vf->pf;
1263        u16 vsi_id, vsi_queue_id;
1264        i40e_status aq_ret = 0;
1265        int i;
1266
1267        if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) {
1268                aq_ret = I40E_ERR_PARAM;
1269                goto error_param;
1270        }
1271
1272        vsi_id = qci->vsi_id;
1273        if (!i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
1274                aq_ret = I40E_ERR_PARAM;
1275                goto error_param;
1276        }
1277        for (i = 0; i < qci->num_queue_pairs; i++) {
1278                qpi = &qci->qpair[i];
1279                vsi_queue_id = qpi->txq.queue_id;
1280                if ((qpi->txq.vsi_id != vsi_id) ||
1281                    (qpi->rxq.vsi_id != vsi_id) ||
1282                    (qpi->rxq.queue_id != vsi_queue_id) ||
1283                    !i40e_vc_isvalid_queue_id(vf, vsi_id, vsi_queue_id)) {
1284                        aq_ret = I40E_ERR_PARAM;
1285                        goto error_param;
1286                }
1287
1288                if (i40e_config_vsi_rx_queue(vf, vsi_id, vsi_queue_id,
1289                                             &qpi->rxq) ||
1290                    i40e_config_vsi_tx_queue(vf, vsi_id, vsi_queue_id,
1291                                             &qpi->txq)) {
1292                        aq_ret = I40E_ERR_PARAM;
1293                        goto error_param;
1294                }
1295        }
1296        /* set vsi num_queue_pairs in use to num configured by VF */
1297        pf->vsi[vf->lan_vsi_idx]->num_queue_pairs = qci->num_queue_pairs;
1298
1299error_param:
1300        /* send the response to the VF */
1301        return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES,
1302                                       aq_ret);
1303}
1304
1305/**
1306 * i40e_vc_config_irq_map_msg
1307 * @vf: pointer to the VF info
1308 * @msg: pointer to the msg buffer
1309 * @msglen: msg length
1310 *
1311 * called from the VF to configure the irq to
1312 * queue map
1313 **/
1314static int i40e_vc_config_irq_map_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
1315{
1316        struct i40e_virtchnl_irq_map_info *irqmap_info =
1317            (struct i40e_virtchnl_irq_map_info *)msg;
1318        struct i40e_virtchnl_vector_map *map;
1319        u16 vsi_id, vsi_queue_id, vector_id;
1320        i40e_status aq_ret = 0;
1321        unsigned long tempmap;
1322        int i;
1323
1324        if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) {
1325                aq_ret = I40E_ERR_PARAM;
1326                goto error_param;
1327        }
1328
1329        for (i = 0; i < irqmap_info->num_vectors; i++) {
1330                map = &irqmap_info->vecmap[i];
1331
1332                vector_id = map->vector_id;
1333                vsi_id = map->vsi_id;
1334                /* validate msg params */
1335                if (!i40e_vc_isvalid_vector_id(vf, vector_id) ||
1336                    !i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
1337                        aq_ret = I40E_ERR_PARAM;
1338                        goto error_param;
1339                }
1340
1341                /* lookout for the invalid queue index */
1342                tempmap = map->rxq_map;
1343                for_each_set_bit(vsi_queue_id, &tempmap, I40E_MAX_VSI_QP) {
1344                        if (!i40e_vc_isvalid_queue_id(vf, vsi_id,
1345                                                      vsi_queue_id)) {
1346                                aq_ret = I40E_ERR_PARAM;
1347                                goto error_param;
1348                        }
1349                }
1350
1351                tempmap = map->txq_map;
1352                for_each_set_bit(vsi_queue_id, &tempmap, I40E_MAX_VSI_QP) {
1353                        if (!i40e_vc_isvalid_queue_id(vf, vsi_id,
1354                                                      vsi_queue_id)) {
1355                                aq_ret = I40E_ERR_PARAM;
1356                                goto error_param;
1357                        }
1358                }
1359
1360                i40e_config_irq_link_list(vf, vsi_id, map);
1361        }
1362error_param:
1363        /* send the response to the VF */
1364        return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
1365                                       aq_ret);
1366}
1367
1368/**
1369 * i40e_vc_enable_queues_msg
1370 * @vf: pointer to the VF info
1371 * @msg: pointer to the msg buffer
1372 * @msglen: msg length
1373 *
1374 * called from the VF to enable all or specific queue(s)
1375 **/
1376static int i40e_vc_enable_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
1377{
1378        struct i40e_virtchnl_queue_select *vqs =
1379            (struct i40e_virtchnl_queue_select *)msg;
1380        struct i40e_pf *pf = vf->pf;
1381        u16 vsi_id = vqs->vsi_id;
1382        i40e_status aq_ret = 0;
1383
1384        if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) {
1385                aq_ret = I40E_ERR_PARAM;
1386                goto error_param;
1387        }
1388
1389        if (!i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
1390                aq_ret = I40E_ERR_PARAM;
1391                goto error_param;
1392        }
1393
1394        if ((0 == vqs->rx_queues) && (0 == vqs->tx_queues)) {
1395                aq_ret = I40E_ERR_PARAM;
1396                goto error_param;
1397        }
1398
1399        if (i40e_vsi_control_rings(pf->vsi[vf->lan_vsi_idx], true))
1400                aq_ret = I40E_ERR_TIMEOUT;
1401error_param:
1402        /* send the response to the VF */
1403        return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_ENABLE_QUEUES,
1404                                       aq_ret);
1405}
1406
1407/**
1408 * i40e_vc_disable_queues_msg
1409 * @vf: pointer to the VF info
1410 * @msg: pointer to the msg buffer
1411 * @msglen: msg length
1412 *
1413 * called from the VF to disable all or specific
1414 * queue(s)
1415 **/
1416static int i40e_vc_disable_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
1417{
1418        struct i40e_virtchnl_queue_select *vqs =
1419            (struct i40e_virtchnl_queue_select *)msg;
1420        struct i40e_pf *pf = vf->pf;
1421        i40e_status aq_ret = 0;
1422
1423        if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) {
1424                aq_ret = I40E_ERR_PARAM;
1425                goto error_param;
1426        }
1427
1428        if (!i40e_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
1429                aq_ret = I40E_ERR_PARAM;
1430                goto error_param;
1431        }
1432
1433        if ((0 == vqs->rx_queues) && (0 == vqs->tx_queues)) {
1434                aq_ret = I40E_ERR_PARAM;
1435                goto error_param;
1436        }
1437
1438        if (i40e_vsi_control_rings(pf->vsi[vf->lan_vsi_idx], false))
1439                aq_ret = I40E_ERR_TIMEOUT;
1440
1441error_param:
1442        /* send the response to the VF */
1443        return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_DISABLE_QUEUES,
1444                                       aq_ret);
1445}
1446
1447/**
1448 * i40e_vc_get_stats_msg
1449 * @vf: pointer to the VF info
1450 * @msg: pointer to the msg buffer
1451 * @msglen: msg length
1452 *
1453 * called from the VF to get vsi stats
1454 **/
1455static int i40e_vc_get_stats_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
1456{
1457        struct i40e_virtchnl_queue_select *vqs =
1458            (struct i40e_virtchnl_queue_select *)msg;
1459        struct i40e_pf *pf = vf->pf;
1460        struct i40e_eth_stats stats;
1461        i40e_status aq_ret = 0;
1462        struct i40e_vsi *vsi;
1463
1464        memset(&stats, 0, sizeof(struct i40e_eth_stats));
1465
1466        if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) {
1467                aq_ret = I40E_ERR_PARAM;
1468                goto error_param;
1469        }
1470
1471        if (!i40e_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
1472                aq_ret = I40E_ERR_PARAM;
1473                goto error_param;
1474        }
1475
1476        vsi = pf->vsi[vf->lan_vsi_idx];
1477        if (!vsi) {
1478                aq_ret = I40E_ERR_PARAM;
1479                goto error_param;
1480        }
1481        i40e_update_eth_stats(vsi);
1482        stats = vsi->eth_stats;
1483
1484error_param:
1485        /* send the response back to the VF */
1486        return i40e_vc_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_GET_STATS, aq_ret,
1487                                      (u8 *)&stats, sizeof(stats));
1488}
1489
1490/**
1491 * i40e_check_vf_permission
1492 * @vf: pointer to the VF info
1493 * @macaddr: pointer to the MAC Address being checked
1494 *
1495 * Check if the VF has permission to add or delete unicast MAC address
1496 * filters and return error code -EPERM if not.  Then check if the
1497 * address filter requested is broadcast or zero and if so return
1498 * an invalid MAC address error code.
1499 **/
1500static inline int i40e_check_vf_permission(struct i40e_vf *vf, u8 *macaddr)
1501{
1502        struct i40e_pf *pf = vf->pf;
1503        int ret = 0;
1504
1505        if (is_broadcast_ether_addr(macaddr) ||
1506                   is_zero_ether_addr(macaddr)) {
1507                dev_err(&pf->pdev->dev, "invalid VF MAC addr %pM\n", macaddr);
1508                ret = I40E_ERR_INVALID_MAC_ADDR;
1509        } else if (vf->pf_set_mac && !is_multicast_ether_addr(macaddr) &&
1510                   !ether_addr_equal(macaddr, vf->default_lan_addr.addr)) {
1511                /* If the host VMM administrator has set the VF MAC address
1512                 * administratively via the ndo_set_vf_mac command then deny
1513                 * permission to the VF to add or delete unicast MAC addresses.
1514                 * The VF may request to set the MAC address filter already
1515                 * assigned to it so do not return an error in that case.
1516                 */
1517                dev_err(&pf->pdev->dev,
1518                        "VF attempting to override administratively set MAC address\nPlease reload the VF driver to resume normal operation\n");
1519                ret = -EPERM;
1520        }
1521        return ret;
1522}
1523
1524/**
1525 * i40e_vc_add_mac_addr_msg
1526 * @vf: pointer to the VF info
1527 * @msg: pointer to the msg buffer
1528 * @msglen: msg length
1529 *
1530 * add guest mac address filter
1531 **/
1532static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
1533{
1534        struct i40e_virtchnl_ether_addr_list *al =
1535            (struct i40e_virtchnl_ether_addr_list *)msg;
1536        struct i40e_pf *pf = vf->pf;
1537        struct i40e_vsi *vsi = NULL;
1538        u16 vsi_id = al->vsi_id;
1539        i40e_status ret = 0;
1540        int i;
1541
1542        if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) ||
1543            !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) ||
1544            !i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
1545                ret = I40E_ERR_PARAM;
1546                goto error_param;
1547        }
1548
1549        for (i = 0; i < al->num_elements; i++) {
1550                ret = i40e_check_vf_permission(vf, al->list[i].addr);
1551                if (ret)
1552                        goto error_param;
1553        }
1554        vsi = pf->vsi[vf->lan_vsi_idx];
1555
1556        /* add new addresses to the list */
1557        for (i = 0; i < al->num_elements; i++) {
1558                struct i40e_mac_filter *f;
1559
1560                f = i40e_find_mac(vsi, al->list[i].addr, true, false);
1561                if (!f) {
1562                        if (i40e_is_vsi_in_vlan(vsi))
1563                                f = i40e_put_mac_in_vlan(vsi, al->list[i].addr,
1564                                                         true, false);
1565                        else
1566                                f = i40e_add_filter(vsi, al->list[i].addr, -1,
1567                                                    true, false);
1568                }
1569
1570                if (!f) {
1571                        dev_err(&pf->pdev->dev,
1572                                "Unable to add VF MAC filter\n");
1573                        ret = I40E_ERR_PARAM;
1574                        goto error_param;
1575                }
1576        }
1577
1578        /* program the updated filter list */
1579        if (i40e_sync_vsi_filters(vsi))
1580                dev_err(&pf->pdev->dev, "Unable to program VF MAC filters\n");
1581
1582error_param:
1583        /* send the response to the VF */
1584        return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS,
1585                                       ret);
1586}
1587
1588/**
1589 * i40e_vc_del_mac_addr_msg
1590 * @vf: pointer to the VF info
1591 * @msg: pointer to the msg buffer
1592 * @msglen: msg length
1593 *
1594 * remove guest mac address filter
1595 **/
1596static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
1597{
1598        struct i40e_virtchnl_ether_addr_list *al =
1599            (struct i40e_virtchnl_ether_addr_list *)msg;
1600        struct i40e_pf *pf = vf->pf;
1601        struct i40e_vsi *vsi = NULL;
1602        u16 vsi_id = al->vsi_id;
1603        i40e_status ret = 0;
1604        int i;
1605
1606        if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) ||
1607            !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) ||
1608            !i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
1609                ret = I40E_ERR_PARAM;
1610                goto error_param;
1611        }
1612
1613        for (i = 0; i < al->num_elements; i++) {
1614                if (is_broadcast_ether_addr(al->list[i].addr) ||
1615                    is_zero_ether_addr(al->list[i].addr)) {
1616                        dev_err(&pf->pdev->dev, "invalid VF MAC addr %pM\n",
1617                                al->list[i].addr);
1618                        ret = I40E_ERR_INVALID_MAC_ADDR;
1619                        goto error_param;
1620                }
1621        }
1622        vsi = pf->vsi[vf->lan_vsi_idx];
1623
1624        /* delete addresses from the list */
1625        for (i = 0; i < al->num_elements; i++)
1626                i40e_del_filter(vsi, al->list[i].addr,
1627                                I40E_VLAN_ANY, true, false);
1628
1629        /* program the updated filter list */
1630        if (i40e_sync_vsi_filters(vsi))
1631                dev_err(&pf->pdev->dev, "Unable to program VF MAC filters\n");
1632
1633error_param:
1634        /* send the response to the VF */
1635        return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS,
1636                                       ret);
1637}
1638
1639/**
1640 * i40e_vc_add_vlan_msg
1641 * @vf: pointer to the VF info
1642 * @msg: pointer to the msg buffer
1643 * @msglen: msg length
1644 *
1645 * program guest vlan id
1646 **/
1647static int i40e_vc_add_vlan_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
1648{
1649        struct i40e_virtchnl_vlan_filter_list *vfl =
1650            (struct i40e_virtchnl_vlan_filter_list *)msg;
1651        struct i40e_pf *pf = vf->pf;
1652        struct i40e_vsi *vsi = NULL;
1653        u16 vsi_id = vfl->vsi_id;
1654        i40e_status aq_ret = 0;
1655        int i;
1656
1657        if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) ||
1658            !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) ||
1659            !i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
1660                aq_ret = I40E_ERR_PARAM;
1661                goto error_param;
1662        }
1663
1664        for (i = 0; i < vfl->num_elements; i++) {
1665                if (vfl->vlan_id[i] > I40E_MAX_VLANID) {
1666                        aq_ret = I40E_ERR_PARAM;
1667                        dev_err(&pf->pdev->dev,
1668                                "invalid VF VLAN id %d\n", vfl->vlan_id[i]);
1669                        goto error_param;
1670                }
1671        }
1672        vsi = pf->vsi[vf->lan_vsi_idx];
1673        if (vsi->info.pvid) {
1674                aq_ret = I40E_ERR_PARAM;
1675                goto error_param;
1676        }
1677
1678        i40e_vlan_stripping_enable(vsi);
1679        for (i = 0; i < vfl->num_elements; i++) {
1680                /* add new VLAN filter */
1681                int ret = i40e_vsi_add_vlan(vsi, vfl->vlan_id[i]);
1682                if (ret)
1683                        dev_err(&pf->pdev->dev,
1684                                "Unable to add VF vlan filter %d, error %d\n",
1685                                vfl->vlan_id[i], ret);
1686        }
1687
1688error_param:
1689        /* send the response to the VF */
1690        return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_ADD_VLAN, aq_ret);
1691}
1692
1693/**
1694 * i40e_vc_remove_vlan_msg
1695 * @vf: pointer to the VF info
1696 * @msg: pointer to the msg buffer
1697 * @msglen: msg length
1698 *
1699 * remove programmed guest vlan id
1700 **/
1701static int i40e_vc_remove_vlan_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
1702{
1703        struct i40e_virtchnl_vlan_filter_list *vfl =
1704            (struct i40e_virtchnl_vlan_filter_list *)msg;
1705        struct i40e_pf *pf = vf->pf;
1706        struct i40e_vsi *vsi = NULL;
1707        u16 vsi_id = vfl->vsi_id;
1708        i40e_status aq_ret = 0;
1709        int i;
1710
1711        if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) ||
1712            !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) ||
1713            !i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
1714                aq_ret = I40E_ERR_PARAM;
1715                goto error_param;
1716        }
1717
1718        for (i = 0; i < vfl->num_elements; i++) {
1719                if (vfl->vlan_id[i] > I40E_MAX_VLANID) {
1720                        aq_ret = I40E_ERR_PARAM;
1721                        goto error_param;
1722                }
1723        }
1724
1725        vsi = pf->vsi[vf->lan_vsi_idx];
1726        if (vsi->info.pvid) {
1727                aq_ret = I40E_ERR_PARAM;
1728                goto error_param;
1729        }
1730
1731        for (i = 0; i < vfl->num_elements; i++) {
1732                int ret = i40e_vsi_kill_vlan(vsi, vfl->vlan_id[i]);
1733                if (ret)
1734                        dev_err(&pf->pdev->dev,
1735                                "Unable to delete VF vlan filter %d, error %d\n",
1736                                vfl->vlan_id[i], ret);
1737        }
1738
1739error_param:
1740        /* send the response to the VF */
1741        return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_DEL_VLAN, aq_ret);
1742}
1743
1744/**
1745 * i40e_vc_validate_vf_msg
1746 * @vf: pointer to the VF info
1747 * @msg: pointer to the msg buffer
1748 * @msglen: msg length
1749 * @msghndl: msg handle
1750 *
1751 * validate msg
1752 **/
1753static int i40e_vc_validate_vf_msg(struct i40e_vf *vf, u32 v_opcode,
1754                                   u32 v_retval, u8 *msg, u16 msglen)
1755{
1756        bool err_msg_format = false;
1757        int valid_len;
1758
1759        /* Check if VF is disabled. */
1760        if (test_bit(I40E_VF_STAT_DISABLED, &vf->vf_states))
1761                return I40E_ERR_PARAM;
1762
1763        /* Validate message length. */
1764        switch (v_opcode) {
1765        case I40E_VIRTCHNL_OP_VERSION:
1766                valid_len = sizeof(struct i40e_virtchnl_version_info);
1767                break;
1768        case I40E_VIRTCHNL_OP_RESET_VF:
1769        case I40E_VIRTCHNL_OP_GET_VF_RESOURCES:
1770                valid_len = 0;
1771                break;
1772        case I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE:
1773                valid_len = sizeof(struct i40e_virtchnl_txq_info);
1774                break;
1775        case I40E_VIRTCHNL_OP_CONFIG_RX_QUEUE:
1776                valid_len = sizeof(struct i40e_virtchnl_rxq_info);
1777                break;
1778        case I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES:
1779                valid_len = sizeof(struct i40e_virtchnl_vsi_queue_config_info);
1780                if (msglen >= valid_len) {
1781                        struct i40e_virtchnl_vsi_queue_config_info *vqc =
1782                            (struct i40e_virtchnl_vsi_queue_config_info *)msg;
1783                        valid_len += (vqc->num_queue_pairs *
1784                                      sizeof(struct
1785                                             i40e_virtchnl_queue_pair_info));
1786                        if (vqc->num_queue_pairs == 0)
1787                                err_msg_format = true;
1788                }
1789                break;
1790        case I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP:
1791                valid_len = sizeof(struct i40e_virtchnl_irq_map_info);
1792                if (msglen >= valid_len) {
1793                        struct i40e_virtchnl_irq_map_info *vimi =
1794                            (struct i40e_virtchnl_irq_map_info *)msg;
1795                        valid_len += (vimi->num_vectors *
1796                                      sizeof(struct i40e_virtchnl_vector_map));
1797                        if (vimi->num_vectors == 0)
1798                                err_msg_format = true;
1799                }
1800                break;
1801        case I40E_VIRTCHNL_OP_ENABLE_QUEUES:
1802        case I40E_VIRTCHNL_OP_DISABLE_QUEUES:
1803                valid_len = sizeof(struct i40e_virtchnl_queue_select);
1804                break;
1805        case I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS:
1806        case I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS:
1807                valid_len = sizeof(struct i40e_virtchnl_ether_addr_list);
1808                if (msglen >= valid_len) {
1809                        struct i40e_virtchnl_ether_addr_list *veal =
1810                            (struct i40e_virtchnl_ether_addr_list *)msg;
1811                        valid_len += veal->num_elements *
1812                            sizeof(struct i40e_virtchnl_ether_addr);
1813                        if (veal->num_elements == 0)
1814                                err_msg_format = true;
1815                }
1816                break;
1817        case I40E_VIRTCHNL_OP_ADD_VLAN:
1818        case I40E_VIRTCHNL_OP_DEL_VLAN:
1819                valid_len = sizeof(struct i40e_virtchnl_vlan_filter_list);
1820                if (msglen >= valid_len) {
1821                        struct i40e_virtchnl_vlan_filter_list *vfl =
1822                            (struct i40e_virtchnl_vlan_filter_list *)msg;
1823                        valid_len += vfl->num_elements * sizeof(u16);
1824                        if (vfl->num_elements == 0)
1825                                err_msg_format = true;
1826                }
1827                break;
1828        case I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
1829                valid_len = sizeof(struct i40e_virtchnl_promisc_info);
1830                break;
1831        case I40E_VIRTCHNL_OP_GET_STATS:
1832                valid_len = sizeof(struct i40e_virtchnl_queue_select);
1833                break;
1834        /* These are always errors coming from the VF. */
1835        case I40E_VIRTCHNL_OP_EVENT:
1836        case I40E_VIRTCHNL_OP_UNKNOWN:
1837        default:
1838                return -EPERM;
1839                break;
1840        }
1841        /* few more checks */
1842        if ((valid_len != msglen) || (err_msg_format)) {
1843                i40e_vc_send_resp_to_vf(vf, v_opcode, I40E_ERR_PARAM);
1844                return -EINVAL;
1845        } else {
1846                return 0;
1847        }
1848}
1849
1850/**
1851 * i40e_vc_process_vf_msg
1852 * @pf: pointer to the PF structure
1853 * @vf_id: source VF id
1854 * @msg: pointer to the msg buffer
1855 * @msglen: msg length
1856 * @msghndl: msg handle
1857 *
1858 * called from the common aeq/arq handler to
1859 * process request from VF
1860 **/
1861int i40e_vc_process_vf_msg(struct i40e_pf *pf, u16 vf_id, u32 v_opcode,
1862                           u32 v_retval, u8 *msg, u16 msglen)
1863{
1864        struct i40e_hw *hw = &pf->hw;
1865        unsigned int local_vf_id = vf_id - hw->func_caps.vf_base_id;
1866        struct i40e_vf *vf;
1867        int ret;
1868
1869        pf->vf_aq_requests++;
1870        if (local_vf_id >= pf->num_alloc_vfs)
1871                return -EINVAL;
1872        vf = &(pf->vf[local_vf_id]);
1873        /* perform basic checks on the msg */
1874        ret = i40e_vc_validate_vf_msg(vf, v_opcode, v_retval, msg, msglen);
1875
1876        if (ret) {
1877                dev_err(&pf->pdev->dev, "Invalid message from VF %d, opcode %d, len %d\n",
1878                        local_vf_id, v_opcode, msglen);
1879                return ret;
1880        }
1881
1882        switch (v_opcode) {
1883        case I40E_VIRTCHNL_OP_VERSION:
1884                ret = i40e_vc_get_version_msg(vf);
1885                break;
1886        case I40E_VIRTCHNL_OP_GET_VF_RESOURCES:
1887                ret = i40e_vc_get_vf_resources_msg(vf);
1888                break;
1889        case I40E_VIRTCHNL_OP_RESET_VF:
1890                i40e_vc_reset_vf_msg(vf);
1891                ret = 0;
1892                break;
1893        case I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
1894                ret = i40e_vc_config_promiscuous_mode_msg(vf, msg, msglen);
1895                break;
1896        case I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES:
1897                ret = i40e_vc_config_queues_msg(vf, msg, msglen);
1898                break;
1899        case I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP:
1900                ret = i40e_vc_config_irq_map_msg(vf, msg, msglen);
1901                break;
1902        case I40E_VIRTCHNL_OP_ENABLE_QUEUES:
1903                ret = i40e_vc_enable_queues_msg(vf, msg, msglen);
1904                i40e_vc_notify_vf_link_state(vf);
1905                break;
1906        case I40E_VIRTCHNL_OP_DISABLE_QUEUES:
1907                ret = i40e_vc_disable_queues_msg(vf, msg, msglen);
1908                break;
1909        case I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS:
1910                ret = i40e_vc_add_mac_addr_msg(vf, msg, msglen);
1911                break;
1912        case I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS:
1913                ret = i40e_vc_del_mac_addr_msg(vf, msg, msglen);
1914                break;
1915        case I40E_VIRTCHNL_OP_ADD_VLAN:
1916                ret = i40e_vc_add_vlan_msg(vf, msg, msglen);
1917                break;
1918        case I40E_VIRTCHNL_OP_DEL_VLAN:
1919                ret = i40e_vc_remove_vlan_msg(vf, msg, msglen);
1920                break;
1921        case I40E_VIRTCHNL_OP_GET_STATS:
1922                ret = i40e_vc_get_stats_msg(vf, msg, msglen);
1923                break;
1924        case I40E_VIRTCHNL_OP_UNKNOWN:
1925        default:
1926                dev_err(&pf->pdev->dev, "Unsupported opcode %d from VF %d\n",
1927                        v_opcode, local_vf_id);
1928                ret = i40e_vc_send_resp_to_vf(vf, v_opcode,
1929                                              I40E_ERR_NOT_IMPLEMENTED);
1930                break;
1931        }
1932
1933        return ret;
1934}
1935
1936/**
1937 * i40e_vc_process_vflr_event
1938 * @pf: pointer to the PF structure
1939 *
1940 * called from the vlfr irq handler to
1941 * free up VF resources and state variables
1942 **/
1943int i40e_vc_process_vflr_event(struct i40e_pf *pf)
1944{
1945        u32 reg, reg_idx, bit_idx, vf_id;
1946        struct i40e_hw *hw = &pf->hw;
1947        struct i40e_vf *vf;
1948
1949        if (!test_bit(__I40E_VFLR_EVENT_PENDING, &pf->state))
1950                return 0;
1951
1952        /* re-enable vflr interrupt cause */
1953        reg = rd32(hw, I40E_PFINT_ICR0_ENA);
1954        reg |= I40E_PFINT_ICR0_ENA_VFLR_MASK;
1955        wr32(hw, I40E_PFINT_ICR0_ENA, reg);
1956        i40e_flush(hw);
1957
1958        clear_bit(__I40E_VFLR_EVENT_PENDING, &pf->state);
1959        for (vf_id = 0; vf_id < pf->num_alloc_vfs; vf_id++) {
1960                reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32;
1961                bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32;
1962                /* read GLGEN_VFLRSTAT register to find out the flr VFs */
1963                vf = &pf->vf[vf_id];
1964                reg = rd32(hw, I40E_GLGEN_VFLRSTAT(reg_idx));
1965                if (reg & (1 << bit_idx)) {
1966                        /* clear the bit in GLGEN_VFLRSTAT */
1967                        wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), (1 << bit_idx));
1968
1969                        if (!test_bit(__I40E_DOWN, &pf->state))
1970                                i40e_reset_vf(vf, true);
1971                }
1972        }
1973
1974        return 0;
1975}
1976
1977/**
1978 * i40e_ndo_set_vf_mac
1979 * @netdev: network interface device structure
1980 * @vf_id: VF identifier
1981 * @mac: mac address
1982 *
1983 * program VF mac address
1984 **/
1985int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
1986{
1987        struct i40e_netdev_priv *np = netdev_priv(netdev);
1988        struct i40e_vsi *vsi = np->vsi;
1989        struct i40e_pf *pf = vsi->back;
1990        struct i40e_mac_filter *f;
1991        struct i40e_vf *vf;
1992        int ret = 0;
1993
1994        /* validate the request */
1995        if (vf_id >= pf->num_alloc_vfs) {
1996                dev_err(&pf->pdev->dev,
1997                        "Invalid VF Identifier %d\n", vf_id);
1998                ret = -EINVAL;
1999                goto error_param;
2000        }
2001
2002        vf = &(pf->vf[vf_id]);
2003        vsi = pf->vsi[vf->lan_vsi_idx];
2004        if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states)) {
2005                dev_err(&pf->pdev->dev,
2006                        "Uninitialized VF %d\n", vf_id);
2007                ret = -EINVAL;
2008                goto error_param;
2009        }
2010
2011        if (!is_valid_ether_addr(mac)) {
2012                dev_err(&pf->pdev->dev,
2013                        "Invalid VF ethernet address\n");
2014                ret = -EINVAL;
2015                goto error_param;
2016        }
2017
2018        /* delete the temporary mac address */
2019        i40e_del_filter(vsi, vf->default_lan_addr.addr, vf->port_vlan_id,
2020                        true, false);
2021
2022        /* Delete all the filters for this VSI - we're going to kill it
2023         * anyway.
2024         */
2025        list_for_each_entry(f, &vsi->mac_filter_list, list)
2026                i40e_del_filter(vsi, f->macaddr, f->vlan, true, false);
2027
2028        dev_info(&pf->pdev->dev, "Setting MAC %pM on VF %d\n", mac, vf_id);
2029        /* program mac filter */
2030        if (i40e_sync_vsi_filters(vsi)) {
2031                dev_err(&pf->pdev->dev, "Unable to program ucast filters\n");
2032                ret = -EIO;
2033                goto error_param;
2034        }
2035        ether_addr_copy(vf->default_lan_addr.addr, mac);
2036        vf->pf_set_mac = true;
2037        /* Force the VF driver stop so it has to reload with new MAC address */
2038        i40e_vc_disable_vf(pf, vf);
2039        dev_info(&pf->pdev->dev, "Reload the VF driver to make this change effective.\n");
2040
2041error_param:
2042        return ret;
2043}
2044
2045/**
2046 * i40e_ndo_set_vf_port_vlan
2047 * @netdev: network interface device structure
2048 * @vf_id: VF identifier
2049 * @vlan_id: mac address
2050 * @qos: priority setting
2051 *
2052 * program VF vlan id and/or qos
2053 **/
2054int i40e_ndo_set_vf_port_vlan(struct net_device *netdev,
2055                              int vf_id, u16 vlan_id, u8 qos)
2056{
2057        struct i40e_netdev_priv *np = netdev_priv(netdev);
2058        struct i40e_pf *pf = np->vsi->back;
2059        struct i40e_vsi *vsi;
2060        struct i40e_vf *vf;
2061        int ret = 0;
2062
2063        /* validate the request */
2064        if (vf_id >= pf->num_alloc_vfs) {
2065                dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id);
2066                ret = -EINVAL;
2067                goto error_pvid;
2068        }
2069
2070        if ((vlan_id > I40E_MAX_VLANID) || (qos > 7)) {
2071                dev_err(&pf->pdev->dev, "Invalid VF Parameters\n");
2072                ret = -EINVAL;
2073                goto error_pvid;
2074        }
2075
2076        vf = &(pf->vf[vf_id]);
2077        vsi = pf->vsi[vf->lan_vsi_idx];
2078        if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states)) {
2079                dev_err(&pf->pdev->dev, "Uninitialized VF %d\n", vf_id);
2080                ret = -EINVAL;
2081                goto error_pvid;
2082        }
2083
2084        if (vsi->info.pvid == 0 && i40e_is_vsi_in_vlan(vsi)) {
2085                dev_err(&pf->pdev->dev,
2086                        "VF %d has already configured VLAN filters and the administrator is requesting a port VLAN override.\nPlease unload and reload the VF driver for this change to take effect.\n",
2087                        vf_id);
2088                /* Administrator Error - knock the VF offline until he does
2089                 * the right thing by reconfiguring his network correctly
2090                 * and then reloading the VF driver.
2091                 */
2092                i40e_vc_disable_vf(pf, vf);
2093        }
2094
2095        /* Check for condition where there was already a port VLAN ID
2096         * filter set and now it is being deleted by setting it to zero.
2097         * Additionally check for the condition where there was a port
2098         * VLAN but now there is a new and different port VLAN being set.
2099         * Before deleting all the old VLAN filters we must add new ones
2100         * with -1 (I40E_VLAN_ANY) or otherwise we're left with all our
2101         * MAC addresses deleted.
2102         */
2103        if ((!(vlan_id || qos) ||
2104            (vlan_id | qos) != le16_to_cpu(vsi->info.pvid)) &&
2105            vsi->info.pvid)
2106                ret = i40e_vsi_add_vlan(vsi, I40E_VLAN_ANY);
2107
2108        if (vsi->info.pvid) {
2109                /* kill old VLAN */
2110                ret = i40e_vsi_kill_vlan(vsi, (le16_to_cpu(vsi->info.pvid) &
2111                                               VLAN_VID_MASK));
2112                if (ret) {
2113                        dev_info(&vsi->back->pdev->dev,
2114                                 "remove VLAN failed, ret=%d, aq_err=%d\n",
2115                                 ret, pf->hw.aq.asq_last_status);
2116                }
2117        }
2118        if (vlan_id || qos)
2119                ret = i40e_vsi_add_pvid(vsi,
2120                                vlan_id | (qos << I40E_VLAN_PRIORITY_SHIFT));
2121        else
2122                i40e_vsi_remove_pvid(vsi);
2123
2124        if (vlan_id) {
2125                dev_info(&pf->pdev->dev, "Setting VLAN %d, QOS 0x%x on VF %d\n",
2126                         vlan_id, qos, vf_id);
2127
2128                /* add new VLAN filter */
2129                ret = i40e_vsi_add_vlan(vsi, vlan_id);
2130                if (ret) {
2131                        dev_info(&vsi->back->pdev->dev,
2132                                 "add VF VLAN failed, ret=%d aq_err=%d\n", ret,
2133                                 vsi->back->hw.aq.asq_last_status);
2134                        goto error_pvid;
2135                }
2136                /* Kill non-vlan MAC filters - ignore error return since
2137                 * there might not be any non-vlan MAC filters.
2138                 */
2139                i40e_vsi_kill_vlan(vsi, I40E_VLAN_ANY);
2140        }
2141
2142        if (ret) {
2143                dev_err(&pf->pdev->dev, "Unable to update VF vsi context\n");
2144                goto error_pvid;
2145        }
2146        /* The Port VLAN needs to be saved across resets the same as the
2147         * default LAN MAC address.
2148         */
2149        vf->port_vlan_id = le16_to_cpu(vsi->info.pvid);
2150        ret = 0;
2151
2152error_pvid:
2153        return ret;
2154}
2155
2156#define I40E_BW_CREDIT_DIVISOR 50     /* 50Mbps per BW credit */
2157#define I40E_MAX_BW_INACTIVE_ACCUM 4  /* device can accumulate 4 credits max */
2158/**
2159 * i40e_ndo_set_vf_bw
2160 * @netdev: network interface device structure
2161 * @vf_id: VF identifier
2162 * @tx_rate: Tx rate
2163 *
2164 * configure VF Tx rate
2165 **/
2166int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate,
2167                       int max_tx_rate)
2168{
2169        struct i40e_netdev_priv *np = netdev_priv(netdev);
2170        struct i40e_pf *pf = np->vsi->back;
2171        struct i40e_vsi *vsi;
2172        struct i40e_vf *vf;
2173        int speed = 0;
2174        int ret = 0;
2175
2176        /* validate the request */
2177        if (vf_id >= pf->num_alloc_vfs) {
2178                dev_err(&pf->pdev->dev, "Invalid VF Identifier %d.\n", vf_id);
2179                ret = -EINVAL;
2180                goto error;
2181        }
2182
2183        if (min_tx_rate) {
2184                dev_err(&pf->pdev->dev, "Invalid min tx rate (%d) (greater than 0) specified for VF %d.\n",
2185                        min_tx_rate, vf_id);
2186                return -EINVAL;
2187        }
2188
2189        vf = &(pf->vf[vf_id]);
2190        vsi = pf->vsi[vf->lan_vsi_idx];
2191        if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states)) {
2192                dev_err(&pf->pdev->dev, "Uninitialized VF %d.\n", vf_id);
2193                ret = -EINVAL;
2194                goto error;
2195        }
2196
2197        switch (pf->hw.phy.link_info.link_speed) {
2198        case I40E_LINK_SPEED_40GB:
2199                speed = 40000;
2200                break;
2201        case I40E_LINK_SPEED_10GB:
2202                speed = 10000;
2203                break;
2204        case I40E_LINK_SPEED_1GB:
2205                speed = 1000;
2206                break;
2207        default:
2208                break;
2209        }
2210
2211        if (max_tx_rate > speed) {
2212                dev_err(&pf->pdev->dev, "Invalid max tx rate %d specified for VF %d.",
2213                        max_tx_rate, vf->vf_id);
2214                ret = -EINVAL;
2215                goto error;
2216        }
2217
2218        if ((max_tx_rate < 50) && (max_tx_rate > 0)) {
2219                dev_warn(&pf->pdev->dev, "Setting max Tx rate to minimum usable value of 50Mbps.\n");
2220                max_tx_rate = 50;
2221        }
2222
2223        /* Tx rate credits are in values of 50Mbps, 0 is disabled*/
2224        ret = i40e_aq_config_vsi_bw_limit(&pf->hw, vsi->seid,
2225                                          max_tx_rate / I40E_BW_CREDIT_DIVISOR,
2226                                          I40E_MAX_BW_INACTIVE_ACCUM, NULL);
2227        if (ret) {
2228                dev_err(&pf->pdev->dev, "Unable to set max tx rate, error code %d.\n",
2229                        ret);
2230                ret = -EIO;
2231                goto error;
2232        }
2233        vf->tx_rate = max_tx_rate;
2234error:
2235        return ret;
2236}
2237
2238/**
2239 * i40e_ndo_get_vf_config
2240 * @netdev: network interface device structure
2241 * @vf_id: VF identifier
2242 * @ivi: VF configuration structure
2243 *
2244 * return VF configuration
2245 **/
2246int i40e_ndo_get_vf_config(struct net_device *netdev,
2247                           int vf_id, struct ifla_vf_info *ivi)
2248{
2249        struct i40e_netdev_priv *np = netdev_priv(netdev);
2250        struct i40e_vsi *vsi = np->vsi;
2251        struct i40e_pf *pf = vsi->back;
2252        struct i40e_vf *vf;
2253        int ret = 0;
2254
2255        /* validate the request */
2256        if (vf_id >= pf->num_alloc_vfs) {
2257                dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id);
2258                ret = -EINVAL;
2259                goto error_param;
2260        }
2261
2262        vf = &(pf->vf[vf_id]);
2263        /* first vsi is always the LAN vsi */
2264        vsi = pf->vsi[vf->lan_vsi_idx];
2265        if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states)) {
2266                dev_err(&pf->pdev->dev, "Uninitialized VF %d\n", vf_id);
2267                ret = -EINVAL;
2268                goto error_param;
2269        }
2270
2271        ivi->vf = vf_id;
2272
2273        memcpy(&ivi->mac, vf->default_lan_addr.addr, ETH_ALEN);
2274
2275        ivi->max_tx_rate = vf->tx_rate;
2276        ivi->min_tx_rate = 0;
2277        ivi->vlan = le16_to_cpu(vsi->info.pvid) & I40E_VLAN_MASK;
2278        ivi->qos = (le16_to_cpu(vsi->info.pvid) & I40E_PRIORITY_MASK) >>
2279                   I40E_VLAN_PRIORITY_SHIFT;
2280        if (vf->link_forced == false)
2281                ivi->linkstate = IFLA_VF_LINK_STATE_AUTO;
2282        else if (vf->link_up == true)
2283                ivi->linkstate = IFLA_VF_LINK_STATE_ENABLE;
2284        else
2285                ivi->linkstate = IFLA_VF_LINK_STATE_DISABLE;
2286        ivi->spoofchk = vf->spoofchk;
2287        ret = 0;
2288
2289error_param:
2290        return ret;
2291}
2292
2293/**
2294 * i40e_ndo_set_vf_link_state
2295 * @netdev: network interface device structure
2296 * @vf_id: VF identifier
2297 * @link: required link state
2298 *
2299 * Set the link state of a specified VF, regardless of physical link state
2300 **/
2301int i40e_ndo_set_vf_link_state(struct net_device *netdev, int vf_id, int link)
2302{
2303        struct i40e_netdev_priv *np = netdev_priv(netdev);
2304        struct i40e_pf *pf = np->vsi->back;
2305        struct i40e_virtchnl_pf_event pfe;
2306        struct i40e_hw *hw = &pf->hw;
2307        struct i40e_vf *vf;
2308        int abs_vf_id;
2309        int ret = 0;
2310
2311        /* validate the request */
2312        if (vf_id >= pf->num_alloc_vfs) {
2313                dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id);
2314                ret = -EINVAL;
2315                goto error_out;
2316        }
2317
2318        vf = &pf->vf[vf_id];
2319        abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
2320
2321        pfe.event = I40E_VIRTCHNL_EVENT_LINK_CHANGE;
2322        pfe.severity = I40E_PF_EVENT_SEVERITY_INFO;
2323
2324        switch (link) {
2325        case IFLA_VF_LINK_STATE_AUTO:
2326                vf->link_forced = false;
2327                pfe.event_data.link_event.link_status =
2328                        pf->hw.phy.link_info.link_info & I40E_AQ_LINK_UP;
2329                pfe.event_data.link_event.link_speed =
2330                        pf->hw.phy.link_info.link_speed;
2331                break;
2332        case IFLA_VF_LINK_STATE_ENABLE:
2333                vf->link_forced = true;
2334                vf->link_up = true;
2335                pfe.event_data.link_event.link_status = true;
2336                pfe.event_data.link_event.link_speed = I40E_LINK_SPEED_40GB;
2337                break;
2338        case IFLA_VF_LINK_STATE_DISABLE:
2339                vf->link_forced = true;
2340                vf->link_up = false;
2341                pfe.event_data.link_event.link_status = false;
2342                pfe.event_data.link_event.link_speed = 0;
2343                break;
2344        default:
2345                ret = -EINVAL;
2346                goto error_out;
2347        }
2348        /* Notify the VF of its new link state */
2349        i40e_aq_send_msg_to_vf(hw, abs_vf_id, I40E_VIRTCHNL_OP_EVENT,
2350                               0, (u8 *)&pfe, sizeof(pfe), NULL);
2351
2352error_out:
2353        return ret;
2354}
2355
2356/**
2357 * i40e_ndo_set_vf_spoofchk
2358 * @netdev: network interface device structure
2359 * @vf_id: VF identifier
2360 * @enable: flag to enable or disable feature
2361 *
2362 * Enable or disable VF spoof checking
2363 **/
2364int i40e_ndo_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool enable)
2365{
2366        struct i40e_netdev_priv *np = netdev_priv(netdev);
2367        struct i40e_vsi *vsi = np->vsi;
2368        struct i40e_pf *pf = vsi->back;
2369        struct i40e_vsi_context ctxt;
2370        struct i40e_hw *hw = &pf->hw;
2371        struct i40e_vf *vf;
2372        int ret = 0;
2373
2374        /* validate the request */
2375        if (vf_id >= pf->num_alloc_vfs) {
2376                dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id);
2377                ret = -EINVAL;
2378                goto out;
2379        }
2380
2381        vf = &(pf->vf[vf_id]);
2382
2383        if (enable == vf->spoofchk)
2384                goto out;
2385
2386        vf->spoofchk = enable;
2387        memset(&ctxt, 0, sizeof(ctxt));
2388        ctxt.seid = pf->vsi[vf->lan_vsi_idx]->seid;
2389        ctxt.pf_num = pf->hw.pf_id;
2390        ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SECURITY_VALID);
2391        if (enable)
2392                ctxt.info.sec_flags |= (I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK |
2393                                        I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK);
2394        ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
2395        if (ret) {
2396                dev_err(&pf->pdev->dev, "Error %d updating VSI parameters\n",
2397                        ret);
2398                ret = -EIO;
2399        }
2400out:
2401        return ret;
2402}
2403