linux/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
<<
>>
Prefs
   1/*******************************************************************************
   2 *
   3 * Intel Ethernet Controller XL710 Family Linux Driver
   4 * Copyright(c) 2013 - 2016 Intel Corporation.
   5 *
   6 * This program is free software; you can redistribute it and/or modify it
   7 * under the terms and conditions of the GNU General Public License,
   8 * version 2, as published by the Free Software Foundation.
   9 *
  10 * This program is distributed in the hope it will be useful, but WITHOUT
  11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  13 * more details.
  14 *
  15 * You should have received a copy of the GNU General Public License along
  16 * with this program.  If not, see <http://www.gnu.org/licenses/>.
  17 *
  18 * The full GNU General Public License is included in this distribution in
  19 * the file called "COPYING".
  20 *
  21 * Contact Information:
  22 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
  23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
  24 *
  25 ******************************************************************************/
  26
  27#include "i40e.h"
  28
  29/*********************notification routines***********************/
  30
  31/**
  32 * i40e_vc_vf_broadcast
  33 * @pf: pointer to the PF structure
  34 * @opcode: operation code
  35 * @retval: return value
  36 * @msg: pointer to the msg buffer
  37 * @msglen: msg length
  38 *
  39 * send a message to all VFs on a given PF
  40 **/
  41static void i40e_vc_vf_broadcast(struct i40e_pf *pf,
  42                                 enum virtchnl_ops v_opcode,
  43                                 i40e_status v_retval, u8 *msg,
  44                                 u16 msglen)
  45{
  46        struct i40e_hw *hw = &pf->hw;
  47        struct i40e_vf *vf = pf->vf;
  48        int i;
  49
  50        for (i = 0; i < pf->num_alloc_vfs; i++, vf++) {
  51                int abs_vf_id = vf->vf_id + (int)hw->func_caps.vf_base_id;
  52                /* Not all vfs are enabled so skip the ones that are not */
  53                if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states) &&
  54                    !test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states))
  55                        continue;
  56
  57                /* Ignore return value on purpose - a given VF may fail, but
  58                 * we need to keep going and send to all of them
  59                 */
  60                i40e_aq_send_msg_to_vf(hw, abs_vf_id, v_opcode, v_retval,
  61                                       msg, msglen, NULL);
  62        }
  63}
  64
  65/**
  66 * i40e_vc_notify_vf_link_state
  67 * @vf: pointer to the VF structure
  68 *
  69 * send a link status message to a single VF
  70 **/
  71static void i40e_vc_notify_vf_link_state(struct i40e_vf *vf)
  72{
  73        struct virtchnl_pf_event pfe;
  74        struct i40e_pf *pf = vf->pf;
  75        struct i40e_hw *hw = &pf->hw;
  76        struct i40e_link_status *ls = &pf->hw.phy.link_info;
  77        int abs_vf_id = vf->vf_id + (int)hw->func_caps.vf_base_id;
  78
  79        pfe.event = VIRTCHNL_EVENT_LINK_CHANGE;
  80        pfe.severity = PF_EVENT_SEVERITY_INFO;
  81        if (vf->link_forced) {
  82                pfe.event_data.link_event.link_status = vf->link_up;
  83                pfe.event_data.link_event.link_speed =
  84                        (vf->link_up ? I40E_LINK_SPEED_40GB : 0);
  85        } else {
  86                pfe.event_data.link_event.link_status =
  87                        ls->link_info & I40E_AQ_LINK_UP;
  88                pfe.event_data.link_event.link_speed =
  89                        (enum virtchnl_link_speed)ls->link_speed;
  90        }
  91        i40e_aq_send_msg_to_vf(hw, abs_vf_id, VIRTCHNL_OP_EVENT,
  92                               0, (u8 *)&pfe, sizeof(pfe), NULL);
  93}
  94
  95/**
  96 * i40e_vc_notify_link_state
  97 * @pf: pointer to the PF structure
  98 *
  99 * send a link status message to all VFs on a given PF
 100 **/
 101void i40e_vc_notify_link_state(struct i40e_pf *pf)
 102{
 103        int i;
 104
 105        for (i = 0; i < pf->num_alloc_vfs; i++)
 106                i40e_vc_notify_vf_link_state(&pf->vf[i]);
 107}
 108
 109/**
 110 * i40e_vc_notify_reset
 111 * @pf: pointer to the PF structure
 112 *
 113 * indicate a pending reset to all VFs on a given PF
 114 **/
 115void i40e_vc_notify_reset(struct i40e_pf *pf)
 116{
 117        struct virtchnl_pf_event pfe;
 118
 119        pfe.event = VIRTCHNL_EVENT_RESET_IMPENDING;
 120        pfe.severity = PF_EVENT_SEVERITY_CERTAIN_DOOM;
 121        i40e_vc_vf_broadcast(pf, VIRTCHNL_OP_EVENT, 0,
 122                             (u8 *)&pfe, sizeof(struct virtchnl_pf_event));
 123}
 124
 125/**
 126 * i40e_vc_notify_vf_reset
 127 * @vf: pointer to the VF structure
 128 *
 129 * indicate a pending reset to the given VF
 130 **/
 131void i40e_vc_notify_vf_reset(struct i40e_vf *vf)
 132{
 133        struct virtchnl_pf_event pfe;
 134        int abs_vf_id;
 135
 136        /* validate the request */
 137        if (!vf || vf->vf_id >= vf->pf->num_alloc_vfs)
 138                return;
 139
 140        /* verify if the VF is in either init or active before proceeding */
 141        if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states) &&
 142            !test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states))
 143                return;
 144
 145        abs_vf_id = vf->vf_id + (int)vf->pf->hw.func_caps.vf_base_id;
 146
 147        pfe.event = VIRTCHNL_EVENT_RESET_IMPENDING;
 148        pfe.severity = PF_EVENT_SEVERITY_CERTAIN_DOOM;
 149        i40e_aq_send_msg_to_vf(&vf->pf->hw, abs_vf_id, VIRTCHNL_OP_EVENT,
 150                               0, (u8 *)&pfe,
 151                               sizeof(struct virtchnl_pf_event), NULL);
 152}
 153/***********************misc routines*****************************/
 154
 155/**
 156 * i40e_vc_disable_vf
 157 * @vf: pointer to the VF info
 158 *
 159 * Disable the VF through a SW reset.
 160 **/
 161static inline void i40e_vc_disable_vf(struct i40e_vf *vf)
 162{
 163        int i;
 164
 165        i40e_vc_notify_vf_reset(vf);
 166
 167        /* We want to ensure that an actual reset occurs initiated after this
 168         * function was called. However, we do not want to wait forever, so
 169         * we'll give a reasonable time and print a message if we failed to
 170         * ensure a reset.
 171         */
 172        for (i = 0; i < 20; i++) {
 173                if (i40e_reset_vf(vf, false))
 174                        return;
 175                usleep_range(10000, 20000);
 176        }
 177
 178        dev_warn(&vf->pf->pdev->dev,
 179                 "Failed to initiate reset for VF %d after 200 milliseconds\n",
 180                 vf->vf_id);
 181}
 182
 183/**
 184 * i40e_vc_isvalid_vsi_id
 185 * @vf: pointer to the VF info
 186 * @vsi_id: VF relative VSI id
 187 *
 188 * check for the valid VSI id
 189 **/
 190static inline bool i40e_vc_isvalid_vsi_id(struct i40e_vf *vf, u16 vsi_id)
 191{
 192        struct i40e_pf *pf = vf->pf;
 193        struct i40e_vsi *vsi = i40e_find_vsi_from_id(pf, vsi_id);
 194
 195        return (vsi && (vsi->vf_id == vf->vf_id));
 196}
 197
 198/**
 199 * i40e_vc_isvalid_queue_id
 200 * @vf: pointer to the VF info
 201 * @vsi_id: vsi id
 202 * @qid: vsi relative queue id
 203 *
 204 * check for the valid queue id
 205 **/
 206static inline bool i40e_vc_isvalid_queue_id(struct i40e_vf *vf, u16 vsi_id,
 207                                            u8 qid)
 208{
 209        struct i40e_pf *pf = vf->pf;
 210        struct i40e_vsi *vsi = i40e_find_vsi_from_id(pf, vsi_id);
 211
 212        return (vsi && (qid < vsi->alloc_queue_pairs));
 213}
 214
 215/**
 216 * i40e_vc_isvalid_vector_id
 217 * @vf: pointer to the VF info
 218 * @vector_id: VF relative vector id
 219 *
 220 * check for the valid vector id
 221 **/
 222static inline bool i40e_vc_isvalid_vector_id(struct i40e_vf *vf, u8 vector_id)
 223{
 224        struct i40e_pf *pf = vf->pf;
 225
 226        return vector_id < pf->hw.func_caps.num_msix_vectors_vf;
 227}
 228
 229/***********************vf resource mgmt routines*****************/
 230
 231/**
 232 * i40e_vc_get_pf_queue_id
 233 * @vf: pointer to the VF info
 234 * @vsi_id: id of VSI as provided by the FW
 235 * @vsi_queue_id: vsi relative queue id
 236 *
 237 * return PF relative queue id
 238 **/
 239static u16 i40e_vc_get_pf_queue_id(struct i40e_vf *vf, u16 vsi_id,
 240                                   u8 vsi_queue_id)
 241{
 242        struct i40e_pf *pf = vf->pf;
 243        struct i40e_vsi *vsi = i40e_find_vsi_from_id(pf, vsi_id);
 244        u16 pf_queue_id = I40E_QUEUE_END_OF_LIST;
 245
 246        if (!vsi)
 247                return pf_queue_id;
 248
 249        if (le16_to_cpu(vsi->info.mapping_flags) &
 250            I40E_AQ_VSI_QUE_MAP_NONCONTIG)
 251                pf_queue_id =
 252                        le16_to_cpu(vsi->info.queue_mapping[vsi_queue_id]);
 253        else
 254                pf_queue_id = le16_to_cpu(vsi->info.queue_mapping[0]) +
 255                              vsi_queue_id;
 256
 257        return pf_queue_id;
 258}
 259
 260/**
 261 * i40e_config_irq_link_list
 262 * @vf: pointer to the VF info
 263 * @vsi_id: id of VSI as given by the FW
 264 * @vecmap: irq map info
 265 *
 266 * configure irq link list from the map
 267 **/
 268static void i40e_config_irq_link_list(struct i40e_vf *vf, u16 vsi_id,
 269                                      struct virtchnl_vector_map *vecmap)
 270{
 271        unsigned long linklistmap = 0, tempmap;
 272        struct i40e_pf *pf = vf->pf;
 273        struct i40e_hw *hw = &pf->hw;
 274        u16 vsi_queue_id, pf_queue_id;
 275        enum i40e_queue_type qtype;
 276        u16 next_q, vector_id, size;
 277        u32 reg, reg_idx;
 278        u16 itr_idx = 0;
 279
 280        vector_id = vecmap->vector_id;
 281        /* setup the head */
 282        if (0 == vector_id)
 283                reg_idx = I40E_VPINT_LNKLST0(vf->vf_id);
 284        else
 285                reg_idx = I40E_VPINT_LNKLSTN(
 286                     ((pf->hw.func_caps.num_msix_vectors_vf - 1) * vf->vf_id) +
 287                     (vector_id - 1));
 288
 289        if (vecmap->rxq_map == 0 && vecmap->txq_map == 0) {
 290                /* Special case - No queues mapped on this vector */
 291                wr32(hw, reg_idx, I40E_VPINT_LNKLST0_FIRSTQ_INDX_MASK);
 292                goto irq_list_done;
 293        }
 294        tempmap = vecmap->rxq_map;
 295        for_each_set_bit(vsi_queue_id, &tempmap, I40E_MAX_VSI_QP) {
 296                linklistmap |= (BIT(I40E_VIRTCHNL_SUPPORTED_QTYPES *
 297                                    vsi_queue_id));
 298        }
 299
 300        tempmap = vecmap->txq_map;
 301        for_each_set_bit(vsi_queue_id, &tempmap, I40E_MAX_VSI_QP) {
 302                linklistmap |= (BIT(I40E_VIRTCHNL_SUPPORTED_QTYPES *
 303                                     vsi_queue_id + 1));
 304        }
 305
 306        size = I40E_MAX_VSI_QP * I40E_VIRTCHNL_SUPPORTED_QTYPES;
 307        next_q = find_first_bit(&linklistmap, size);
 308        if (unlikely(next_q == size))
 309                goto irq_list_done;
 310
 311        vsi_queue_id = next_q / I40E_VIRTCHNL_SUPPORTED_QTYPES;
 312        qtype = next_q % I40E_VIRTCHNL_SUPPORTED_QTYPES;
 313        pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_id, vsi_queue_id);
 314        reg = ((qtype << I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT) | pf_queue_id);
 315
 316        wr32(hw, reg_idx, reg);
 317
 318        while (next_q < size) {
 319                switch (qtype) {
 320                case I40E_QUEUE_TYPE_RX:
 321                        reg_idx = I40E_QINT_RQCTL(pf_queue_id);
 322                        itr_idx = vecmap->rxitr_idx;
 323                        break;
 324                case I40E_QUEUE_TYPE_TX:
 325                        reg_idx = I40E_QINT_TQCTL(pf_queue_id);
 326                        itr_idx = vecmap->txitr_idx;
 327                        break;
 328                default:
 329                        break;
 330                }
 331
 332                next_q = find_next_bit(&linklistmap, size, next_q + 1);
 333                if (next_q < size) {
 334                        vsi_queue_id = next_q / I40E_VIRTCHNL_SUPPORTED_QTYPES;
 335                        qtype = next_q % I40E_VIRTCHNL_SUPPORTED_QTYPES;
 336                        pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_id,
 337                                                              vsi_queue_id);
 338                } else {
 339                        pf_queue_id = I40E_QUEUE_END_OF_LIST;
 340                        qtype = 0;
 341                }
 342
 343                /* format for the RQCTL & TQCTL regs is same */
 344                reg = (vector_id) |
 345                    (qtype << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) |
 346                    (pf_queue_id << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
 347                    BIT(I40E_QINT_RQCTL_CAUSE_ENA_SHIFT) |
 348                    (itr_idx << I40E_QINT_RQCTL_ITR_INDX_SHIFT);
 349                wr32(hw, reg_idx, reg);
 350        }
 351
 352        /* if the vf is running in polling mode and using interrupt zero,
 353         * need to disable auto-mask on enabling zero interrupt for VFs.
 354         */
 355        if ((vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RX_POLLING) &&
 356            (vector_id == 0)) {
 357                reg = rd32(hw, I40E_GLINT_CTL);
 358                if (!(reg & I40E_GLINT_CTL_DIS_AUTOMASK_VF0_MASK)) {
 359                        reg |= I40E_GLINT_CTL_DIS_AUTOMASK_VF0_MASK;
 360                        wr32(hw, I40E_GLINT_CTL, reg);
 361                }
 362        }
 363
 364irq_list_done:
 365        i40e_flush(hw);
 366}
 367
 368/**
 369 * i40e_release_iwarp_qvlist
 370 * @vf: pointer to the VF.
 371 *
 372 **/
 373static void i40e_release_iwarp_qvlist(struct i40e_vf *vf)
 374{
 375        struct i40e_pf *pf = vf->pf;
 376        struct virtchnl_iwarp_qvlist_info *qvlist_info = vf->qvlist_info;
 377        u32 msix_vf;
 378        u32 i;
 379
 380        if (!vf->qvlist_info)
 381                return;
 382
 383        msix_vf = pf->hw.func_caps.num_msix_vectors_vf;
 384        for (i = 0; i < qvlist_info->num_vectors; i++) {
 385                struct virtchnl_iwarp_qv_info *qv_info;
 386                u32 next_q_index, next_q_type;
 387                struct i40e_hw *hw = &pf->hw;
 388                u32 v_idx, reg_idx, reg;
 389
 390                qv_info = &qvlist_info->qv_info[i];
 391                if (!qv_info)
 392                        continue;
 393                v_idx = qv_info->v_idx;
 394                if (qv_info->ceq_idx != I40E_QUEUE_INVALID_IDX) {
 395                        /* Figure out the queue after CEQ and make that the
 396                         * first queue.
 397                         */
 398                        reg_idx = (msix_vf - 1) * vf->vf_id + qv_info->ceq_idx;
 399                        reg = rd32(hw, I40E_VPINT_CEQCTL(reg_idx));
 400                        next_q_index = (reg & I40E_VPINT_CEQCTL_NEXTQ_INDX_MASK)
 401                                        >> I40E_VPINT_CEQCTL_NEXTQ_INDX_SHIFT;
 402                        next_q_type = (reg & I40E_VPINT_CEQCTL_NEXTQ_TYPE_MASK)
 403                                        >> I40E_VPINT_CEQCTL_NEXTQ_TYPE_SHIFT;
 404
 405                        reg_idx = ((msix_vf - 1) * vf->vf_id) + (v_idx - 1);
 406                        reg = (next_q_index &
 407                               I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK) |
 408                               (next_q_type <<
 409                               I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT);
 410
 411                        wr32(hw, I40E_VPINT_LNKLSTN(reg_idx), reg);
 412                }
 413        }
 414        kfree(vf->qvlist_info);
 415        vf->qvlist_info = NULL;
 416}
 417
 418/**
 419 * i40e_config_iwarp_qvlist
 420 * @vf: pointer to the VF info
 421 * @qvlist_info: queue and vector list
 422 *
 423 * Return 0 on success or < 0 on error
 424 **/
 425static int i40e_config_iwarp_qvlist(struct i40e_vf *vf,
 426                                    struct virtchnl_iwarp_qvlist_info *qvlist_info)
 427{
 428        struct i40e_pf *pf = vf->pf;
 429        struct i40e_hw *hw = &pf->hw;
 430        struct virtchnl_iwarp_qv_info *qv_info;
 431        u32 v_idx, i, reg_idx, reg;
 432        u32 next_q_idx, next_q_type;
 433        u32 msix_vf, size;
 434
 435        size = sizeof(struct virtchnl_iwarp_qvlist_info) +
 436               (sizeof(struct virtchnl_iwarp_qv_info) *
 437                                                (qvlist_info->num_vectors - 1));
 438        vf->qvlist_info = kzalloc(size, GFP_KERNEL);
 439        if (!vf->qvlist_info)
 440                return -ENOMEM;
 441
 442        vf->qvlist_info->num_vectors = qvlist_info->num_vectors;
 443
 444        msix_vf = pf->hw.func_caps.num_msix_vectors_vf;
 445        for (i = 0; i < qvlist_info->num_vectors; i++) {
 446                qv_info = &qvlist_info->qv_info[i];
 447                if (!qv_info)
 448                        continue;
 449                v_idx = qv_info->v_idx;
 450
 451                /* Validate vector id belongs to this vf */
 452                if (!i40e_vc_isvalid_vector_id(vf, v_idx))
 453                        goto err;
 454
 455                vf->qvlist_info->qv_info[i] = *qv_info;
 456
 457                reg_idx = ((msix_vf - 1) * vf->vf_id) + (v_idx - 1);
 458                /* We might be sharing the interrupt, so get the first queue
 459                 * index and type, push it down the list by adding the new
 460                 * queue on top. Also link it with the new queue in CEQCTL.
 461                 */
 462                reg = rd32(hw, I40E_VPINT_LNKLSTN(reg_idx));
 463                next_q_idx = ((reg & I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK) >>
 464                                I40E_VPINT_LNKLSTN_FIRSTQ_INDX_SHIFT);
 465                next_q_type = ((reg & I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_MASK) >>
 466                                I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT);
 467
 468                if (qv_info->ceq_idx != I40E_QUEUE_INVALID_IDX) {
 469                        reg_idx = (msix_vf - 1) * vf->vf_id + qv_info->ceq_idx;
 470                        reg = (I40E_VPINT_CEQCTL_CAUSE_ENA_MASK |
 471                        (v_idx << I40E_VPINT_CEQCTL_MSIX_INDX_SHIFT) |
 472                        (qv_info->itr_idx << I40E_VPINT_CEQCTL_ITR_INDX_SHIFT) |
 473                        (next_q_type << I40E_VPINT_CEQCTL_NEXTQ_TYPE_SHIFT) |
 474                        (next_q_idx << I40E_VPINT_CEQCTL_NEXTQ_INDX_SHIFT));
 475                        wr32(hw, I40E_VPINT_CEQCTL(reg_idx), reg);
 476
 477                        reg_idx = ((msix_vf - 1) * vf->vf_id) + (v_idx - 1);
 478                        reg = (qv_info->ceq_idx &
 479                               I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK) |
 480                               (I40E_QUEUE_TYPE_PE_CEQ <<
 481                               I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT);
 482                        wr32(hw, I40E_VPINT_LNKLSTN(reg_idx), reg);
 483                }
 484
 485                if (qv_info->aeq_idx != I40E_QUEUE_INVALID_IDX) {
 486                        reg = (I40E_VPINT_AEQCTL_CAUSE_ENA_MASK |
 487                        (v_idx << I40E_VPINT_AEQCTL_MSIX_INDX_SHIFT) |
 488                        (qv_info->itr_idx << I40E_VPINT_AEQCTL_ITR_INDX_SHIFT));
 489
 490                        wr32(hw, I40E_VPINT_AEQCTL(vf->vf_id), reg);
 491                }
 492        }
 493
 494        return 0;
 495err:
 496        kfree(vf->qvlist_info);
 497        vf->qvlist_info = NULL;
 498        return -EINVAL;
 499}
 500
 501/**
 502 * i40e_config_vsi_tx_queue
 503 * @vf: pointer to the VF info
 504 * @vsi_id: id of VSI as provided by the FW
 505 * @vsi_queue_id: vsi relative queue index
 506 * @info: config. info
 507 *
 508 * configure tx queue
 509 **/
 510static int i40e_config_vsi_tx_queue(struct i40e_vf *vf, u16 vsi_id,
 511                                    u16 vsi_queue_id,
 512                                    struct virtchnl_txq_info *info)
 513{
 514        struct i40e_pf *pf = vf->pf;
 515        struct i40e_hw *hw = &pf->hw;
 516        struct i40e_hmc_obj_txq tx_ctx;
 517        struct i40e_vsi *vsi;
 518        u16 pf_queue_id;
 519        u32 qtx_ctl;
 520        int ret = 0;
 521
 522        if (!i40e_vc_isvalid_vsi_id(vf, info->vsi_id)) {
 523                ret = -ENOENT;
 524                goto error_context;
 525        }
 526        pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_id, vsi_queue_id);
 527        vsi = i40e_find_vsi_from_id(pf, vsi_id);
 528        if (!vsi) {
 529                ret = -ENOENT;
 530                goto error_context;
 531        }
 532
 533        /* clear the context structure first */
 534        memset(&tx_ctx, 0, sizeof(struct i40e_hmc_obj_txq));
 535
 536        /* only set the required fields */
 537        tx_ctx.base = info->dma_ring_addr / 128;
 538        tx_ctx.qlen = info->ring_len;
 539        tx_ctx.rdylist = le16_to_cpu(vsi->info.qs_handle[0]);
 540        tx_ctx.rdylist_act = 0;
 541        tx_ctx.head_wb_ena = info->headwb_enabled;
 542        tx_ctx.head_wb_addr = info->dma_headwb_addr;
 543
 544        /* clear the context in the HMC */
 545        ret = i40e_clear_lan_tx_queue_context(hw, pf_queue_id);
 546        if (ret) {
 547                dev_err(&pf->pdev->dev,
 548                        "Failed to clear VF LAN Tx queue context %d, error: %d\n",
 549                        pf_queue_id, ret);
 550                ret = -ENOENT;
 551                goto error_context;
 552        }
 553
 554        /* set the context in the HMC */
 555        ret = i40e_set_lan_tx_queue_context(hw, pf_queue_id, &tx_ctx);
 556        if (ret) {
 557                dev_err(&pf->pdev->dev,
 558                        "Failed to set VF LAN Tx queue context %d error: %d\n",
 559                        pf_queue_id, ret);
 560                ret = -ENOENT;
 561                goto error_context;
 562        }
 563
 564        /* associate this queue with the PCI VF function */
 565        qtx_ctl = I40E_QTX_CTL_VF_QUEUE;
 566        qtx_ctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT)
 567                    & I40E_QTX_CTL_PF_INDX_MASK);
 568        qtx_ctl |= (((vf->vf_id + hw->func_caps.vf_base_id)
 569                     << I40E_QTX_CTL_VFVM_INDX_SHIFT)
 570                    & I40E_QTX_CTL_VFVM_INDX_MASK);
 571        wr32(hw, I40E_QTX_CTL(pf_queue_id), qtx_ctl);
 572        i40e_flush(hw);
 573
 574error_context:
 575        return ret;
 576}
 577
 578/**
 579 * i40e_config_vsi_rx_queue
 580 * @vf: pointer to the VF info
 581 * @vsi_id: id of VSI  as provided by the FW
 582 * @vsi_queue_id: vsi relative queue index
 583 * @info: config. info
 584 *
 585 * configure rx queue
 586 **/
 587static int i40e_config_vsi_rx_queue(struct i40e_vf *vf, u16 vsi_id,
 588                                    u16 vsi_queue_id,
 589                                    struct virtchnl_rxq_info *info)
 590{
 591        struct i40e_pf *pf = vf->pf;
 592        struct i40e_hw *hw = &pf->hw;
 593        struct i40e_hmc_obj_rxq rx_ctx;
 594        u16 pf_queue_id;
 595        int ret = 0;
 596
 597        pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_id, vsi_queue_id);
 598
 599        /* clear the context structure first */
 600        memset(&rx_ctx, 0, sizeof(struct i40e_hmc_obj_rxq));
 601
 602        /* only set the required fields */
 603        rx_ctx.base = info->dma_ring_addr / 128;
 604        rx_ctx.qlen = info->ring_len;
 605
 606        if (info->splithdr_enabled) {
 607                rx_ctx.hsplit_0 = I40E_RX_SPLIT_L2      |
 608                                  I40E_RX_SPLIT_IP      |
 609                                  I40E_RX_SPLIT_TCP_UDP |
 610                                  I40E_RX_SPLIT_SCTP;
 611                /* header length validation */
 612                if (info->hdr_size > ((2 * 1024) - 64)) {
 613                        ret = -EINVAL;
 614                        goto error_param;
 615                }
 616                rx_ctx.hbuff = info->hdr_size >> I40E_RXQ_CTX_HBUFF_SHIFT;
 617
 618                /* set split mode 10b */
 619                rx_ctx.dtype = I40E_RX_DTYPE_HEADER_SPLIT;
 620        }
 621
 622        /* databuffer length validation */
 623        if (info->databuffer_size > ((16 * 1024) - 128)) {
 624                ret = -EINVAL;
 625                goto error_param;
 626        }
 627        rx_ctx.dbuff = info->databuffer_size >> I40E_RXQ_CTX_DBUFF_SHIFT;
 628
 629        /* max pkt. length validation */
 630        if (info->max_pkt_size >= (16 * 1024) || info->max_pkt_size < 64) {
 631                ret = -EINVAL;
 632                goto error_param;
 633        }
 634        rx_ctx.rxmax = info->max_pkt_size;
 635
 636        /* enable 32bytes desc always */
 637        rx_ctx.dsize = 1;
 638
 639        /* default values */
 640        rx_ctx.lrxqthresh = 1;
 641        rx_ctx.crcstrip = 1;
 642        rx_ctx.prefena = 1;
 643        rx_ctx.l2tsel = 1;
 644
 645        /* clear the context in the HMC */
 646        ret = i40e_clear_lan_rx_queue_context(hw, pf_queue_id);
 647        if (ret) {
 648                dev_err(&pf->pdev->dev,
 649                        "Failed to clear VF LAN Rx queue context %d, error: %d\n",
 650                        pf_queue_id, ret);
 651                ret = -ENOENT;
 652                goto error_param;
 653        }
 654
 655        /* set the context in the HMC */
 656        ret = i40e_set_lan_rx_queue_context(hw, pf_queue_id, &rx_ctx);
 657        if (ret) {
 658                dev_err(&pf->pdev->dev,
 659                        "Failed to set VF LAN Rx queue context %d error: %d\n",
 660                        pf_queue_id, ret);
 661                ret = -ENOENT;
 662                goto error_param;
 663        }
 664
 665error_param:
 666        return ret;
 667}
 668
 669/**
 670 * i40e_alloc_vsi_res
 671 * @vf: pointer to the VF info
 672 * @type: type of VSI to allocate
 673 *
 674 * alloc VF vsi context & resources
 675 **/
 676static int i40e_alloc_vsi_res(struct i40e_vf *vf, enum i40e_vsi_type type)
 677{
 678        struct i40e_mac_filter *f = NULL;
 679        struct i40e_pf *pf = vf->pf;
 680        struct i40e_vsi *vsi;
 681        int ret = 0;
 682
 683        vsi = i40e_vsi_setup(pf, type, pf->vsi[pf->lan_vsi]->seid, vf->vf_id);
 684
 685        if (!vsi) {
 686                dev_err(&pf->pdev->dev,
 687                        "add vsi failed for VF %d, aq_err %d\n",
 688                        vf->vf_id, pf->hw.aq.asq_last_status);
 689                ret = -ENOENT;
 690                goto error_alloc_vsi_res;
 691        }
 692        if (type == I40E_VSI_SRIOV) {
 693                u64 hena = i40e_pf_get_default_rss_hena(pf);
 694                u8 broadcast[ETH_ALEN];
 695
 696                vf->lan_vsi_idx = vsi->idx;
 697                vf->lan_vsi_id = vsi->id;
 698                /* If the port VLAN has been configured and then the
 699                 * VF driver was removed then the VSI port VLAN
 700                 * configuration was destroyed.  Check if there is
 701                 * a port VLAN and restore the VSI configuration if
 702                 * needed.
 703                 */
 704                if (vf->port_vlan_id)
 705                        i40e_vsi_add_pvid(vsi, vf->port_vlan_id);
 706
 707                spin_lock_bh(&vsi->mac_filter_hash_lock);
 708                if (is_valid_ether_addr(vf->default_lan_addr.addr)) {
 709                        f = i40e_add_mac_filter(vsi,
 710                                                vf->default_lan_addr.addr);
 711                        if (!f)
 712                                dev_info(&pf->pdev->dev,
 713                                         "Could not add MAC filter %pM for VF %d\n",
 714                                        vf->default_lan_addr.addr, vf->vf_id);
 715                }
 716                eth_broadcast_addr(broadcast);
 717                f = i40e_add_mac_filter(vsi, broadcast);
 718                if (!f)
 719                        dev_info(&pf->pdev->dev,
 720                                 "Could not allocate VF broadcast filter\n");
 721                spin_unlock_bh(&vsi->mac_filter_hash_lock);
 722                wr32(&pf->hw, I40E_VFQF_HENA1(0, vf->vf_id), (u32)hena);
 723                wr32(&pf->hw, I40E_VFQF_HENA1(1, vf->vf_id), (u32)(hena >> 32));
 724        }
 725
 726        /* program mac filter */
 727        ret = i40e_sync_vsi_filters(vsi);
 728        if (ret)
 729                dev_err(&pf->pdev->dev, "Unable to program ucast filters\n");
 730
 731        /* Set VF bandwidth if specified */
 732        if (vf->tx_rate) {
 733                ret = i40e_aq_config_vsi_bw_limit(&pf->hw, vsi->seid,
 734                                                  vf->tx_rate / 50, 0, NULL);
 735                if (ret)
 736                        dev_err(&pf->pdev->dev, "Unable to set tx rate, VF %d, error code %d.\n",
 737                                vf->vf_id, ret);
 738        }
 739
 740error_alloc_vsi_res:
 741        return ret;
 742}
 743
 744/**
 745 * i40e_enable_vf_mappings
 746 * @vf: pointer to the VF info
 747 *
 748 * enable VF mappings
 749 **/
 750static void i40e_enable_vf_mappings(struct i40e_vf *vf)
 751{
 752        struct i40e_pf *pf = vf->pf;
 753        struct i40e_hw *hw = &pf->hw;
 754        u32 reg, total_queue_pairs = 0;
 755        int j;
 756
 757        /* Tell the hardware we're using noncontiguous mapping. HW requires
 758         * that VF queues be mapped using this method, even when they are
 759         * contiguous in real life
 760         */
 761        i40e_write_rx_ctl(hw, I40E_VSILAN_QBASE(vf->lan_vsi_id),
 762                          I40E_VSILAN_QBASE_VSIQTABLE_ENA_MASK);
 763
 764        /* enable VF vplan_qtable mappings */
 765        reg = I40E_VPLAN_MAPENA_TXRX_ENA_MASK;
 766        wr32(hw, I40E_VPLAN_MAPENA(vf->vf_id), reg);
 767
 768        /* map PF queues to VF queues */
 769        for (j = 0; j < pf->vsi[vf->lan_vsi_idx]->alloc_queue_pairs; j++) {
 770                u16 qid = i40e_vc_get_pf_queue_id(vf, vf->lan_vsi_id, j);
 771
 772                reg = (qid & I40E_VPLAN_QTABLE_QINDEX_MASK);
 773                wr32(hw, I40E_VPLAN_QTABLE(total_queue_pairs, vf->vf_id), reg);
 774                total_queue_pairs++;
 775        }
 776
 777        /* map PF queues to VSI */
 778        for (j = 0; j < 7; j++) {
 779                if (j * 2 >= pf->vsi[vf->lan_vsi_idx]->alloc_queue_pairs) {
 780                        reg = 0x07FF07FF;       /* unused */
 781                } else {
 782                        u16 qid = i40e_vc_get_pf_queue_id(vf, vf->lan_vsi_id,
 783                                                          j * 2);
 784                        reg = qid;
 785                        qid = i40e_vc_get_pf_queue_id(vf, vf->lan_vsi_id,
 786                                                      (j * 2) + 1);
 787                        reg |= qid << 16;
 788                }
 789                i40e_write_rx_ctl(hw, I40E_VSILAN_QTABLE(j, vf->lan_vsi_id),
 790                                  reg);
 791        }
 792
 793        i40e_flush(hw);
 794}
 795
 796/**
 797 * i40e_disable_vf_mappings
 798 * @vf: pointer to the VF info
 799 *
 800 * disable VF mappings
 801 **/
 802static void i40e_disable_vf_mappings(struct i40e_vf *vf)
 803{
 804        struct i40e_pf *pf = vf->pf;
 805        struct i40e_hw *hw = &pf->hw;
 806        int i;
 807
 808        /* disable qp mappings */
 809        wr32(hw, I40E_VPLAN_MAPENA(vf->vf_id), 0);
 810        for (i = 0; i < I40E_MAX_VSI_QP; i++)
 811                wr32(hw, I40E_VPLAN_QTABLE(i, vf->vf_id),
 812                     I40E_QUEUE_END_OF_LIST);
 813        i40e_flush(hw);
 814}
 815
 816/**
 817 * i40e_free_vf_res
 818 * @vf: pointer to the VF info
 819 *
 820 * free VF resources
 821 **/
 822static void i40e_free_vf_res(struct i40e_vf *vf)
 823{
 824        struct i40e_pf *pf = vf->pf;
 825        struct i40e_hw *hw = &pf->hw;
 826        u32 reg_idx, reg;
 827        int i, msix_vf;
 828
 829        /* Start by disabling VF's configuration API to prevent the OS from
 830         * accessing the VF's VSI after it's freed / invalidated.
 831         */
 832        clear_bit(I40E_VF_STATE_INIT, &vf->vf_states);
 833
 834        /* It's possible the VF had requeuested more queues than the default so
 835         * do the accounting here when we're about to free them.
 836         */
 837        if (vf->num_queue_pairs > I40E_DEFAULT_QUEUES_PER_VF) {
 838                pf->queues_left += vf->num_queue_pairs -
 839                                   I40E_DEFAULT_QUEUES_PER_VF;
 840        }
 841
 842        /* free vsi & disconnect it from the parent uplink */
 843        if (vf->lan_vsi_idx) {
 844                i40e_vsi_release(pf->vsi[vf->lan_vsi_idx]);
 845                vf->lan_vsi_idx = 0;
 846                vf->lan_vsi_id = 0;
 847                vf->num_mac = 0;
 848        }
 849        msix_vf = pf->hw.func_caps.num_msix_vectors_vf;
 850
 851        /* disable interrupts so the VF starts in a known state */
 852        for (i = 0; i < msix_vf; i++) {
 853                /* format is same for both registers */
 854                if (0 == i)
 855                        reg_idx = I40E_VFINT_DYN_CTL0(vf->vf_id);
 856                else
 857                        reg_idx = I40E_VFINT_DYN_CTLN(((msix_vf - 1) *
 858                                                      (vf->vf_id))
 859                                                     + (i - 1));
 860                wr32(hw, reg_idx, I40E_VFINT_DYN_CTLN_CLEARPBA_MASK);
 861                i40e_flush(hw);
 862        }
 863
 864        /* clear the irq settings */
 865        for (i = 0; i < msix_vf; i++) {
 866                /* format is same for both registers */
 867                if (0 == i)
 868                        reg_idx = I40E_VPINT_LNKLST0(vf->vf_id);
 869                else
 870                        reg_idx = I40E_VPINT_LNKLSTN(((msix_vf - 1) *
 871                                                      (vf->vf_id))
 872                                                     + (i - 1));
 873                reg = (I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_MASK |
 874                       I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK);
 875                wr32(hw, reg_idx, reg);
 876                i40e_flush(hw);
 877        }
 878        /* reset some of the state variables keeping track of the resources */
 879        vf->num_queue_pairs = 0;
 880        clear_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states);
 881        clear_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states);
 882}
 883
 884/**
 885 * i40e_alloc_vf_res
 886 * @vf: pointer to the VF info
 887 *
 888 * allocate VF resources
 889 **/
 890static int i40e_alloc_vf_res(struct i40e_vf *vf)
 891{
 892        struct i40e_pf *pf = vf->pf;
 893        int total_queue_pairs = 0;
 894        int ret;
 895
 896        if (vf->num_req_queues &&
 897            vf->num_req_queues <= pf->queues_left + I40E_DEFAULT_QUEUES_PER_VF)
 898                pf->num_vf_qps = vf->num_req_queues;
 899        else
 900                pf->num_vf_qps = I40E_DEFAULT_QUEUES_PER_VF;
 901
 902        /* allocate hw vsi context & associated resources */
 903        ret = i40e_alloc_vsi_res(vf, I40E_VSI_SRIOV);
 904        if (ret)
 905                goto error_alloc;
 906        total_queue_pairs += pf->vsi[vf->lan_vsi_idx]->alloc_queue_pairs;
 907
 908        /* We account for each VF to get a default number of queue pairs.  If
 909         * the VF has now requested more, we need to account for that to make
 910         * certain we never request more queues than we actually have left in
 911         * HW.
 912         */
 913        if (total_queue_pairs > I40E_DEFAULT_QUEUES_PER_VF)
 914                pf->queues_left -=
 915                        total_queue_pairs - I40E_DEFAULT_QUEUES_PER_VF;
 916
 917        if (vf->trusted)
 918                set_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
 919        else
 920                clear_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
 921
 922        /* store the total qps number for the runtime
 923         * VF req validation
 924         */
 925        vf->num_queue_pairs = total_queue_pairs;
 926
 927        /* VF is now completely initialized */
 928        set_bit(I40E_VF_STATE_INIT, &vf->vf_states);
 929
 930error_alloc:
 931        if (ret)
 932                i40e_free_vf_res(vf);
 933
 934        return ret;
 935}
 936
 937#define VF_DEVICE_STATUS 0xAA
 938#define VF_TRANS_PENDING_MASK 0x20
 939/**
 940 * i40e_quiesce_vf_pci
 941 * @vf: pointer to the VF structure
 942 *
 943 * Wait for VF PCI transactions to be cleared after reset. Returns -EIO
 944 * if the transactions never clear.
 945 **/
 946static int i40e_quiesce_vf_pci(struct i40e_vf *vf)
 947{
 948        struct i40e_pf *pf = vf->pf;
 949        struct i40e_hw *hw = &pf->hw;
 950        int vf_abs_id, i;
 951        u32 reg;
 952
 953        vf_abs_id = vf->vf_id + hw->func_caps.vf_base_id;
 954
 955        wr32(hw, I40E_PF_PCI_CIAA,
 956             VF_DEVICE_STATUS | (vf_abs_id << I40E_PF_PCI_CIAA_VF_NUM_SHIFT));
 957        for (i = 0; i < 100; i++) {
 958                reg = rd32(hw, I40E_PF_PCI_CIAD);
 959                if ((reg & VF_TRANS_PENDING_MASK) == 0)
 960                        return 0;
 961                udelay(1);
 962        }
 963        return -EIO;
 964}
 965
 966/**
 967 * i40e_trigger_vf_reset
 968 * @vf: pointer to the VF structure
 969 * @flr: VFLR was issued or not
 970 *
 971 * Trigger hardware to start a reset for a particular VF. Expects the caller
 972 * to wait the proper amount of time to allow hardware to reset the VF before
 973 * it cleans up and restores VF functionality.
 974 **/
 975static void i40e_trigger_vf_reset(struct i40e_vf *vf, bool flr)
 976{
 977        struct i40e_pf *pf = vf->pf;
 978        struct i40e_hw *hw = &pf->hw;
 979        u32 reg, reg_idx, bit_idx;
 980
 981        /* warn the VF */
 982        clear_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states);
 983
 984        /* Disable VF's configuration API during reset. The flag is re-enabled
 985         * in i40e_alloc_vf_res(), when it's safe again to access VF's VSI.
 986         * It's normally disabled in i40e_free_vf_res(), but it's safer
 987         * to do it earlier to give some time to finish to any VF config
 988         * functions that may still be running at this point.
 989         */
 990        clear_bit(I40E_VF_STATE_INIT, &vf->vf_states);
 991
 992        /* In the case of a VFLR, the HW has already reset the VF and we
 993         * just need to clean up, so don't hit the VFRTRIG register.
 994         */
 995        if (!flr) {
 996                /* reset VF using VPGEN_VFRTRIG reg */
 997                reg = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id));
 998                reg |= I40E_VPGEN_VFRTRIG_VFSWR_MASK;
 999                wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg);
1000                i40e_flush(hw);
1001        }
1002        /* clear the VFLR bit in GLGEN_VFLRSTAT */
1003        reg_idx = (hw->func_caps.vf_base_id + vf->vf_id) / 32;
1004        bit_idx = (hw->func_caps.vf_base_id + vf->vf_id) % 32;
1005        wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx));
1006        i40e_flush(hw);
1007
1008        if (i40e_quiesce_vf_pci(vf))
1009                dev_err(&pf->pdev->dev, "VF %d PCI transactions stuck\n",
1010                        vf->vf_id);
1011}
1012
1013/**
1014 * i40e_cleanup_reset_vf
1015 * @vf: pointer to the VF structure
1016 *
1017 * Cleanup a VF after the hardware reset is finished. Expects the caller to
1018 * have verified whether the reset is finished properly, and ensure the
1019 * minimum amount of wait time has passed.
1020 **/
1021static void i40e_cleanup_reset_vf(struct i40e_vf *vf)
1022{
1023        struct i40e_pf *pf = vf->pf;
1024        struct i40e_hw *hw = &pf->hw;
1025        u32 reg;
1026
1027        /* free VF resources to begin resetting the VSI state */
1028        i40e_free_vf_res(vf);
1029
1030        /* Enable hardware by clearing the reset bit in the VPGEN_VFRTRIG reg.
1031         * By doing this we allow HW to access VF memory at any point. If we
1032         * did it any sooner, HW could access memory while it was being freed
1033         * in i40e_free_vf_res(), causing an IOMMU fault.
1034         *
1035         * On the other hand, this needs to be done ASAP, because the VF driver
1036         * is waiting for this to happen and may report a timeout. It's
1037         * harmless, but it gets logged into Guest OS kernel log, so best avoid
1038         * it.
1039         */
1040        reg = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id));
1041        reg &= ~I40E_VPGEN_VFRTRIG_VFSWR_MASK;
1042        wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg);
1043
1044        /* reallocate VF resources to finish resetting the VSI state */
1045        if (!i40e_alloc_vf_res(vf)) {
1046                int abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
1047                i40e_enable_vf_mappings(vf);
1048                set_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states);
1049                clear_bit(I40E_VF_STATE_DISABLED, &vf->vf_states);
1050                /* Do not notify the client during VF init */
1051                if (!test_and_clear_bit(I40E_VF_STATE_PRE_ENABLE,
1052                                        &vf->vf_states))
1053                        i40e_notify_client_of_vf_reset(pf, abs_vf_id);
1054                vf->num_vlan = 0;
1055        }
1056
1057        /* Tell the VF driver the reset is done. This needs to be done only
1058         * after VF has been fully initialized, because the VF driver may
1059         * request resources immediately after setting this flag.
1060         */
1061        wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_id), VIRTCHNL_VFR_VFACTIVE);
1062}
1063
1064/**
1065 * i40e_reset_vf
1066 * @vf: pointer to the VF structure
1067 * @flr: VFLR was issued or not
1068 *
1069 * Returns true if the VF is reset, false otherwise.
1070 **/
1071bool i40e_reset_vf(struct i40e_vf *vf, bool flr)
1072{
1073        struct i40e_pf *pf = vf->pf;
1074        struct i40e_hw *hw = &pf->hw;
1075        bool rsd = false;
1076        u32 reg;
1077        int i;
1078
1079        /* If the VFs have been disabled, this means something else is
1080         * resetting the VF, so we shouldn't continue.
1081         */
1082        if (test_and_set_bit(__I40E_VF_DISABLE, pf->state))
1083                return false;
1084
1085        i40e_trigger_vf_reset(vf, flr);
1086
1087        /* poll VPGEN_VFRSTAT reg to make sure
1088         * that reset is complete
1089         */
1090        for (i = 0; i < 10; i++) {
1091                /* VF reset requires driver to first reset the VF and then
1092                 * poll the status register to make sure that the reset
1093                 * completed successfully. Due to internal HW FIFO flushes,
1094                 * we must wait 10ms before the register will be valid.
1095                 */
1096                usleep_range(10000, 20000);
1097                reg = rd32(hw, I40E_VPGEN_VFRSTAT(vf->vf_id));
1098                if (reg & I40E_VPGEN_VFRSTAT_VFRD_MASK) {
1099                        rsd = true;
1100                        break;
1101                }
1102        }
1103
1104        if (flr)
1105                usleep_range(10000, 20000);
1106
1107        if (!rsd)
1108                dev_err(&pf->pdev->dev, "VF reset check timeout on VF %d\n",
1109                        vf->vf_id);
1110        usleep_range(10000, 20000);
1111
1112        /* On initial reset, we don't have any queues to disable */
1113        if (vf->lan_vsi_idx != 0)
1114                i40e_vsi_stop_rings(pf->vsi[vf->lan_vsi_idx]);
1115
1116        i40e_cleanup_reset_vf(vf);
1117
1118        i40e_flush(hw);
1119        clear_bit(__I40E_VF_DISABLE, pf->state);
1120
1121        return true;
1122}
1123
1124/**
1125 * i40e_reset_all_vfs
1126 * @pf: pointer to the PF structure
1127 * @flr: VFLR was issued or not
1128 *
1129 * Reset all allocated VFs in one go. First, tell the hardware to reset each
1130 * VF, then do all the waiting in one chunk, and finally finish restoring each
1131 * VF after the wait. This is useful during PF routines which need to reset
1132 * all VFs, as otherwise it must perform these resets in a serialized fashion.
1133 *
1134 * Returns true if any VFs were reset, and false otherwise.
1135 **/
1136bool i40e_reset_all_vfs(struct i40e_pf *pf, bool flr)
1137{
1138        struct i40e_hw *hw = &pf->hw;
1139        struct i40e_vf *vf;
1140        int i, v;
1141        u32 reg;
1142
1143        /* If we don't have any VFs, then there is nothing to reset */
1144        if (!pf->num_alloc_vfs)
1145                return false;
1146
1147        /* If VFs have been disabled, there is no need to reset */
1148        if (test_and_set_bit(__I40E_VF_DISABLE, pf->state))
1149                return false;
1150
1151        /* Begin reset on all VFs at once */
1152        for (v = 0; v < pf->num_alloc_vfs; v++)
1153                i40e_trigger_vf_reset(&pf->vf[v], flr);
1154
1155        /* HW requires some time to make sure it can flush the FIFO for a VF
1156         * when it resets it. Poll the VPGEN_VFRSTAT register for each VF in
1157         * sequence to make sure that it has completed. We'll keep track of
1158         * the VFs using a simple iterator that increments once that VF has
1159         * finished resetting.
1160         */
1161        for (i = 0, v = 0; i < 10 && v < pf->num_alloc_vfs; i++) {
1162                usleep_range(10000, 20000);
1163
1164                /* Check each VF in sequence, beginning with the VF to fail
1165                 * the previous check.
1166                 */
1167                while (v < pf->num_alloc_vfs) {
1168                        vf = &pf->vf[v];
1169                        reg = rd32(hw, I40E_VPGEN_VFRSTAT(vf->vf_id));
1170                        if (!(reg & I40E_VPGEN_VFRSTAT_VFRD_MASK))
1171                                break;
1172
1173                        /* If the current VF has finished resetting, move on
1174                         * to the next VF in sequence.
1175                         */
1176                        v++;
1177                }
1178        }
1179
1180        if (flr)
1181                usleep_range(10000, 20000);
1182
1183        /* Display a warning if at least one VF didn't manage to reset in
1184         * time, but continue on with the operation.
1185         */
1186        if (v < pf->num_alloc_vfs)
1187                dev_err(&pf->pdev->dev, "VF reset check timeout on VF %d\n",
1188                        pf->vf[v].vf_id);
1189        usleep_range(10000, 20000);
1190
1191        /* Begin disabling all the rings associated with VFs, but do not wait
1192         * between each VF.
1193         */
1194        for (v = 0; v < pf->num_alloc_vfs; v++) {
1195                /* On initial reset, we don't have any queues to disable */
1196                if (pf->vf[v].lan_vsi_idx == 0)
1197                        continue;
1198
1199                i40e_vsi_stop_rings_no_wait(pf->vsi[pf->vf[v].lan_vsi_idx]);
1200        }
1201
1202        /* Now that we've notified HW to disable all of the VF rings, wait
1203         * until they finish.
1204         */
1205        for (v = 0; v < pf->num_alloc_vfs; v++) {
1206                /* On initial reset, we don't have any queues to disable */
1207                if (pf->vf[v].lan_vsi_idx == 0)
1208                        continue;
1209
1210                i40e_vsi_wait_queues_disabled(pf->vsi[pf->vf[v].lan_vsi_idx]);
1211        }
1212
1213        /* Hw may need up to 50ms to finish disabling the RX queues. We
1214         * minimize the wait by delaying only once for all VFs.
1215         */
1216        mdelay(50);
1217
1218        /* Finish the reset on each VF */
1219        for (v = 0; v < pf->num_alloc_vfs; v++)
1220                i40e_cleanup_reset_vf(&pf->vf[v]);
1221
1222        i40e_flush(hw);
1223        clear_bit(__I40E_VF_DISABLE, pf->state);
1224
1225        return true;
1226}
1227
1228/**
1229 * i40e_free_vfs
1230 * @pf: pointer to the PF structure
1231 *
1232 * free VF resources
1233 **/
1234void i40e_free_vfs(struct i40e_pf *pf)
1235{
1236        struct i40e_hw *hw = &pf->hw;
1237        u32 reg_idx, bit_idx;
1238        int i, tmp, vf_id;
1239
1240        if (!pf->vf)
1241                return;
1242        while (test_and_set_bit(__I40E_VF_DISABLE, pf->state))
1243                usleep_range(1000, 2000);
1244
1245        i40e_notify_client_of_vf_enable(pf, 0);
1246
1247        /* Amortize wait time by stopping all VFs at the same time */
1248        for (i = 0; i < pf->num_alloc_vfs; i++) {
1249                if (test_bit(I40E_VF_STATE_INIT, &pf->vf[i].vf_states))
1250                        continue;
1251
1252                i40e_vsi_stop_rings_no_wait(pf->vsi[pf->vf[i].lan_vsi_idx]);
1253        }
1254
1255        for (i = 0; i < pf->num_alloc_vfs; i++) {
1256                if (test_bit(I40E_VF_STATE_INIT, &pf->vf[i].vf_states))
1257                        continue;
1258
1259                i40e_vsi_wait_queues_disabled(pf->vsi[pf->vf[i].lan_vsi_idx]);
1260        }
1261
1262        /* Disable IOV before freeing resources. This lets any VF drivers
1263         * running in the host get themselves cleaned up before we yank
1264         * the carpet out from underneath their feet.
1265         */
1266        if (!pci_vfs_assigned(pf->pdev))
1267                pci_disable_sriov(pf->pdev);
1268        else
1269                dev_warn(&pf->pdev->dev, "VFs are assigned - not disabling SR-IOV\n");
1270
1271        /* free up VF resources */
1272        tmp = pf->num_alloc_vfs;
1273        pf->num_alloc_vfs = 0;
1274        for (i = 0; i < tmp; i++) {
1275                if (test_bit(I40E_VF_STATE_INIT, &pf->vf[i].vf_states))
1276                        i40e_free_vf_res(&pf->vf[i]);
1277                /* disable qp mappings */
1278                i40e_disable_vf_mappings(&pf->vf[i]);
1279        }
1280
1281        kfree(pf->vf);
1282        pf->vf = NULL;
1283
1284        /* This check is for when the driver is unloaded while VFs are
1285         * assigned. Setting the number of VFs to 0 through sysfs is caught
1286         * before this function ever gets called.
1287         */
1288        if (!pci_vfs_assigned(pf->pdev)) {
1289                /* Acknowledge VFLR for all VFS. Without this, VFs will fail to
1290                 * work correctly when SR-IOV gets re-enabled.
1291                 */
1292                for (vf_id = 0; vf_id < tmp; vf_id++) {
1293                        reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32;
1294                        bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32;
1295                        wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx));
1296                }
1297        }
1298        clear_bit(__I40E_VF_DISABLE, pf->state);
1299}
1300
1301#ifdef CONFIG_PCI_IOV
1302/**
1303 * i40e_alloc_vfs
1304 * @pf: pointer to the PF structure
1305 * @num_alloc_vfs: number of VFs to allocate
1306 *
1307 * allocate VF resources
1308 **/
1309int i40e_alloc_vfs(struct i40e_pf *pf, u16 num_alloc_vfs)
1310{
1311        struct i40e_vf *vfs;
1312        int i, ret = 0;
1313
1314        /* Disable interrupt 0 so we don't try to handle the VFLR. */
1315        i40e_irq_dynamic_disable_icr0(pf);
1316
1317        /* Check to see if we're just allocating resources for extant VFs */
1318        if (pci_num_vf(pf->pdev) != num_alloc_vfs) {
1319                ret = pci_enable_sriov(pf->pdev, num_alloc_vfs);
1320                if (ret) {
1321                        pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED;
1322                        pf->num_alloc_vfs = 0;
1323                        goto err_iov;
1324                }
1325        }
1326        /* allocate memory */
1327        vfs = kcalloc(num_alloc_vfs, sizeof(struct i40e_vf), GFP_KERNEL);
1328        if (!vfs) {
1329                ret = -ENOMEM;
1330                goto err_alloc;
1331        }
1332        pf->vf = vfs;
1333
1334        /* apply default profile */
1335        for (i = 0; i < num_alloc_vfs; i++) {
1336                vfs[i].pf = pf;
1337                vfs[i].parent_type = I40E_SWITCH_ELEMENT_TYPE_VEB;
1338                vfs[i].vf_id = i;
1339
1340                /* assign default capabilities */
1341                set_bit(I40E_VIRTCHNL_VF_CAP_L2, &vfs[i].vf_caps);
1342                vfs[i].spoofchk = true;
1343
1344                set_bit(I40E_VF_STATE_PRE_ENABLE, &vfs[i].vf_states);
1345
1346        }
1347        pf->num_alloc_vfs = num_alloc_vfs;
1348
1349        /* VF resources get allocated during reset */
1350        i40e_reset_all_vfs(pf, false);
1351
1352        i40e_notify_client_of_vf_enable(pf, num_alloc_vfs);
1353
1354err_alloc:
1355        if (ret)
1356                i40e_free_vfs(pf);
1357err_iov:
1358        /* Re-enable interrupt 0. */
1359        i40e_irq_dynamic_enable_icr0(pf);
1360        return ret;
1361}
1362
1363#endif
1364/**
1365 * i40e_pci_sriov_enable
1366 * @pdev: pointer to a pci_dev structure
1367 * @num_vfs: number of VFs to allocate
1368 *
1369 * Enable or change the number of VFs
1370 **/
1371static int i40e_pci_sriov_enable(struct pci_dev *pdev, int num_vfs)
1372{
1373#ifdef CONFIG_PCI_IOV
1374        struct i40e_pf *pf = pci_get_drvdata(pdev);
1375        int pre_existing_vfs = pci_num_vf(pdev);
1376        int err = 0;
1377
1378        if (test_bit(__I40E_TESTING, pf->state)) {
1379                dev_warn(&pdev->dev,
1380                         "Cannot enable SR-IOV virtual functions while the device is undergoing diagnostic testing\n");
1381                err = -EPERM;
1382                goto err_out;
1383        }
1384
1385        if (pre_existing_vfs && pre_existing_vfs != num_vfs)
1386                i40e_free_vfs(pf);
1387        else if (pre_existing_vfs && pre_existing_vfs == num_vfs)
1388                goto out;
1389
1390        if (num_vfs > pf->num_req_vfs) {
1391                dev_warn(&pdev->dev, "Unable to enable %d VFs. Limited to %d VFs due to device resource constraints.\n",
1392                         num_vfs, pf->num_req_vfs);
1393                err = -EPERM;
1394                goto err_out;
1395        }
1396
1397        dev_info(&pdev->dev, "Allocating %d VFs.\n", num_vfs);
1398        err = i40e_alloc_vfs(pf, num_vfs);
1399        if (err) {
1400                dev_warn(&pdev->dev, "Failed to enable SR-IOV: %d\n", err);
1401                goto err_out;
1402        }
1403
1404out:
1405        return num_vfs;
1406
1407err_out:
1408        return err;
1409#endif
1410        return 0;
1411}
1412
1413/**
1414 * i40e_pci_sriov_configure
1415 * @pdev: pointer to a pci_dev structure
1416 * @num_vfs: number of VFs to allocate
1417 *
1418 * Enable or change the number of VFs. Called when the user updates the number
1419 * of VFs in sysfs.
1420 **/
1421int i40e_pci_sriov_configure(struct pci_dev *pdev, int num_vfs)
1422{
1423        struct i40e_pf *pf = pci_get_drvdata(pdev);
1424
1425        if (num_vfs) {
1426                if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) {
1427                        pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
1428                        i40e_do_reset_safe(pf,
1429                                           BIT_ULL(__I40E_PF_RESET_REQUESTED));
1430                }
1431                return i40e_pci_sriov_enable(pdev, num_vfs);
1432        }
1433
1434        if (!pci_vfs_assigned(pf->pdev)) {
1435                i40e_free_vfs(pf);
1436                pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED;
1437                i40e_do_reset_safe(pf, BIT_ULL(__I40E_PF_RESET_REQUESTED));
1438        } else {
1439                dev_warn(&pdev->dev, "Unable to free VFs because some are assigned to VMs.\n");
1440                return -EINVAL;
1441        }
1442        return 0;
1443}
1444
1445/***********************virtual channel routines******************/
1446
1447/**
1448 * i40e_vc_send_msg_to_vf
1449 * @vf: pointer to the VF info
1450 * @v_opcode: virtual channel opcode
1451 * @v_retval: virtual channel return value
1452 * @msg: pointer to the msg buffer
1453 * @msglen: msg length
1454 *
1455 * send msg to VF
1456 **/
1457static int i40e_vc_send_msg_to_vf(struct i40e_vf *vf, u32 v_opcode,
1458                                  u32 v_retval, u8 *msg, u16 msglen)
1459{
1460        struct i40e_pf *pf;
1461        struct i40e_hw *hw;
1462        int abs_vf_id;
1463        i40e_status aq_ret;
1464
1465        /* validate the request */
1466        if (!vf || vf->vf_id >= vf->pf->num_alloc_vfs)
1467                return -EINVAL;
1468
1469        pf = vf->pf;
1470        hw = &pf->hw;
1471        abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
1472
1473        /* single place to detect unsuccessful return values */
1474        if (v_retval) {
1475                vf->num_invalid_msgs++;
1476                dev_info(&pf->pdev->dev, "VF %d failed opcode %d, retval: %d\n",
1477                         vf->vf_id, v_opcode, v_retval);
1478                if (vf->num_invalid_msgs >
1479                    I40E_DEFAULT_NUM_INVALID_MSGS_ALLOWED) {
1480                        dev_err(&pf->pdev->dev,
1481                                "Number of invalid messages exceeded for VF %d\n",
1482                                vf->vf_id);
1483                        dev_err(&pf->pdev->dev, "Use PF Control I/F to enable the VF\n");
1484                        set_bit(I40E_VF_STATE_DISABLED, &vf->vf_states);
1485                }
1486        } else {
1487                vf->num_valid_msgs++;
1488                /* reset the invalid counter, if a valid message is received. */
1489                vf->num_invalid_msgs = 0;
1490        }
1491
1492        aq_ret = i40e_aq_send_msg_to_vf(hw, abs_vf_id,  v_opcode, v_retval,
1493                                        msg, msglen, NULL);
1494        if (aq_ret) {
1495                dev_info(&pf->pdev->dev,
1496                         "Unable to send the message to VF %d aq_err %d\n",
1497                         vf->vf_id, pf->hw.aq.asq_last_status);
1498                return -EIO;
1499        }
1500
1501        return 0;
1502}
1503
1504/**
1505 * i40e_vc_send_resp_to_vf
1506 * @vf: pointer to the VF info
1507 * @opcode: operation code
1508 * @retval: return value
1509 *
1510 * send resp msg to VF
1511 **/
1512static int i40e_vc_send_resp_to_vf(struct i40e_vf *vf,
1513                                   enum virtchnl_ops opcode,
1514                                   i40e_status retval)
1515{
1516        return i40e_vc_send_msg_to_vf(vf, opcode, retval, NULL, 0);
1517}
1518
1519/**
1520 * i40e_vc_get_version_msg
1521 * @vf: pointer to the VF info
1522 *
1523 * called from the VF to request the API version used by the PF
1524 **/
1525static int i40e_vc_get_version_msg(struct i40e_vf *vf, u8 *msg)
1526{
1527        struct virtchnl_version_info info = {
1528                VIRTCHNL_VERSION_MAJOR, VIRTCHNL_VERSION_MINOR
1529        };
1530
1531        vf->vf_ver = *(struct virtchnl_version_info *)msg;
1532        /* VFs running the 1.0 API expect to get 1.0 back or they will cry. */
1533        if (VF_IS_V10(&vf->vf_ver))
1534                info.minor = VIRTCHNL_VERSION_MINOR_NO_VF_CAPS;
1535        return i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_VERSION,
1536                                      I40E_SUCCESS, (u8 *)&info,
1537                                      sizeof(struct virtchnl_version_info));
1538}
1539
1540/**
1541 * i40e_vc_get_vf_resources_msg
1542 * @vf: pointer to the VF info
1543 * @msg: pointer to the msg buffer
1544 * @msglen: msg length
1545 *
1546 * called from the VF to request its resources
1547 **/
1548static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg)
1549{
1550        struct virtchnl_vf_resource *vfres = NULL;
1551        struct i40e_pf *pf = vf->pf;
1552        i40e_status aq_ret = 0;
1553        struct i40e_vsi *vsi;
1554        int num_vsis = 1;
1555        int len = 0;
1556        int ret;
1557
1558        if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) {
1559                aq_ret = I40E_ERR_PARAM;
1560                goto err;
1561        }
1562
1563        len = (sizeof(struct virtchnl_vf_resource) +
1564               sizeof(struct virtchnl_vsi_resource) * num_vsis);
1565
1566        vfres = kzalloc(len, GFP_KERNEL);
1567        if (!vfres) {
1568                aq_ret = I40E_ERR_NO_MEMORY;
1569                len = 0;
1570                goto err;
1571        }
1572        if (VF_IS_V11(&vf->vf_ver))
1573                vf->driver_caps = *(u32 *)msg;
1574        else
1575                vf->driver_caps = VIRTCHNL_VF_OFFLOAD_L2 |
1576                                  VIRTCHNL_VF_OFFLOAD_RSS_REG |
1577                                  VIRTCHNL_VF_OFFLOAD_VLAN;
1578
1579        vfres->vf_cap_flags = VIRTCHNL_VF_OFFLOAD_L2;
1580        vsi = pf->vsi[vf->lan_vsi_idx];
1581        if (!vsi->info.pvid)
1582                vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_VLAN;
1583
1584        if (i40e_vf_client_capable(pf, vf->vf_id) &&
1585            (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_IWARP)) {
1586                vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_IWARP;
1587                set_bit(I40E_VF_STATE_IWARPENA, &vf->vf_states);
1588        } else {
1589                clear_bit(I40E_VF_STATE_IWARPENA, &vf->vf_states);
1590        }
1591
1592        if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PF) {
1593                vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_PF;
1594        } else {
1595                if ((pf->hw_features & I40E_HW_RSS_AQ_CAPABLE) &&
1596                    (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_AQ))
1597                        vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_AQ;
1598                else
1599                        vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_REG;
1600        }
1601
1602        if (pf->hw_features & I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE) {
1603                if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2)
1604                        vfres->vf_cap_flags |=
1605                                VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2;
1606        }
1607
1608        if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ENCAP)
1609                vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ENCAP;
1610
1611        if ((pf->hw_features & I40E_HW_OUTER_UDP_CSUM_CAPABLE) &&
1612            (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM))
1613                vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM;
1614
1615        if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RX_POLLING) {
1616                if (pf->flags & I40E_FLAG_MFP_ENABLED) {
1617                        dev_err(&pf->pdev->dev,
1618                                "VF %d requested polling mode: this feature is supported only when the device is running in single function per port (SFP) mode\n",
1619                                 vf->vf_id);
1620                        aq_ret = I40E_ERR_PARAM;
1621                        goto err;
1622                }
1623                vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RX_POLLING;
1624        }
1625
1626        if (pf->hw_features & I40E_HW_WB_ON_ITR_CAPABLE) {
1627                if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_WB_ON_ITR)
1628                        vfres->vf_cap_flags |=
1629                                        VIRTCHNL_VF_OFFLOAD_WB_ON_ITR;
1630        }
1631
1632        if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_REQ_QUEUES)
1633                vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_REQ_QUEUES;
1634
1635        vfres->num_vsis = num_vsis;
1636        vfres->num_queue_pairs = vf->num_queue_pairs;
1637        vfres->max_vectors = pf->hw.func_caps.num_msix_vectors_vf;
1638        vfres->rss_key_size = I40E_HKEY_ARRAY_SIZE;
1639        vfres->rss_lut_size = I40E_VF_HLUT_ARRAY_SIZE;
1640
1641        if (vf->lan_vsi_idx) {
1642                vfres->vsi_res[0].vsi_id = vf->lan_vsi_id;
1643                vfres->vsi_res[0].vsi_type = VIRTCHNL_VSI_SRIOV;
1644                vfres->vsi_res[0].num_queue_pairs = vsi->alloc_queue_pairs;
1645                /* VFs only use TC 0 */
1646                vfres->vsi_res[0].qset_handle
1647                                          = le16_to_cpu(vsi->info.qs_handle[0]);
1648                ether_addr_copy(vfres->vsi_res[0].default_mac_addr,
1649                                vf->default_lan_addr.addr);
1650        }
1651        set_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states);
1652
1653err:
1654        /* send the response back to the VF */
1655        ret = i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_VF_RESOURCES,
1656                                     aq_ret, (u8 *)vfres, len);
1657
1658        kfree(vfres);
1659        return ret;
1660}
1661
1662/**
1663 * i40e_vc_reset_vf_msg
1664 * @vf: pointer to the VF info
1665 * @msg: pointer to the msg buffer
1666 * @msglen: msg length
1667 *
1668 * called from the VF to reset itself,
1669 * unlike other virtchnl messages, PF driver
1670 * doesn't send the response back to the VF
1671 **/
1672static void i40e_vc_reset_vf_msg(struct i40e_vf *vf)
1673{
1674        if (test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states))
1675                i40e_reset_vf(vf, false);
1676}
1677
1678/**
1679 * i40e_getnum_vf_vsi_vlan_filters
1680 * @vsi: pointer to the vsi
1681 *
1682 * called to get the number of VLANs offloaded on this VF
1683 **/
1684static inline int i40e_getnum_vf_vsi_vlan_filters(struct i40e_vsi *vsi)
1685{
1686        struct i40e_mac_filter *f;
1687        int num_vlans = 0, bkt;
1688
1689        hash_for_each(vsi->mac_filter_hash, bkt, f, hlist) {
1690                if (f->vlan >= 0 && f->vlan <= I40E_MAX_VLANID)
1691                        num_vlans++;
1692        }
1693
1694        return num_vlans;
1695}
1696
1697/**
1698 * i40e_vc_config_promiscuous_mode_msg
1699 * @vf: pointer to the VF info
1700 * @msg: pointer to the msg buffer
1701 * @msglen: msg length
1702 *
1703 * called from the VF to configure the promiscuous mode of
1704 * VF vsis
1705 **/
1706static int i40e_vc_config_promiscuous_mode_msg(struct i40e_vf *vf,
1707                                               u8 *msg, u16 msglen)
1708{
1709        struct virtchnl_promisc_info *info =
1710            (struct virtchnl_promisc_info *)msg;
1711        struct i40e_pf *pf = vf->pf;
1712        struct i40e_hw *hw = &pf->hw;
1713        struct i40e_mac_filter *f;
1714        i40e_status aq_ret = 0;
1715        bool allmulti = false;
1716        struct i40e_vsi *vsi;
1717        bool alluni = false;
1718        int aq_err = 0;
1719        int bkt;
1720
1721        vsi = i40e_find_vsi_from_id(pf, info->vsi_id);
1722        if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
1723            !i40e_vc_isvalid_vsi_id(vf, info->vsi_id) ||
1724            !vsi) {
1725                aq_ret = I40E_ERR_PARAM;
1726                goto error_param;
1727        }
1728        if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) {
1729                dev_err(&pf->pdev->dev,
1730                        "Unprivileged VF %d is attempting to configure promiscuous mode\n",
1731                        vf->vf_id);
1732                /* Lie to the VF on purpose. */
1733                aq_ret = 0;
1734                goto error_param;
1735        }
1736        /* Multicast promiscuous handling*/
1737        if (info->flags & FLAG_VF_MULTICAST_PROMISC)
1738                allmulti = true;
1739
1740        if (vf->port_vlan_id) {
1741                aq_ret = i40e_aq_set_vsi_mc_promisc_on_vlan(hw, vsi->seid,
1742                                                            allmulti,
1743                                                            vf->port_vlan_id,
1744                                                            NULL);
1745        } else if (i40e_getnum_vf_vsi_vlan_filters(vsi)) {
1746                hash_for_each(vsi->mac_filter_hash, bkt, f, hlist) {
1747                        if (f->vlan < 0 || f->vlan > I40E_MAX_VLANID)
1748                                continue;
1749                        aq_ret = i40e_aq_set_vsi_mc_promisc_on_vlan(hw,
1750                                                                    vsi->seid,
1751                                                                    allmulti,
1752                                                                    f->vlan,
1753                                                                    NULL);
1754                        aq_err = pf->hw.aq.asq_last_status;
1755                        if (aq_ret) {
1756                                dev_err(&pf->pdev->dev,
1757                                        "Could not add VLAN %d to multicast promiscuous domain err %s aq_err %s\n",
1758                                        f->vlan,
1759                                        i40e_stat_str(&pf->hw, aq_ret),
1760                                        i40e_aq_str(&pf->hw, aq_err));
1761                                break;
1762                        }
1763                }
1764        } else {
1765                aq_ret = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid,
1766                                                               allmulti, NULL);
1767                aq_err = pf->hw.aq.asq_last_status;
1768                if (aq_ret) {
1769                        dev_err(&pf->pdev->dev,
1770                                "VF %d failed to set multicast promiscuous mode err %s aq_err %s\n",
1771                                vf->vf_id,
1772                                i40e_stat_str(&pf->hw, aq_ret),
1773                                i40e_aq_str(&pf->hw, aq_err));
1774                        goto error_param;
1775                }
1776        }
1777
1778        if (!aq_ret) {
1779                dev_info(&pf->pdev->dev,
1780                         "VF %d successfully set multicast promiscuous mode\n",
1781                         vf->vf_id);
1782                if (allmulti)
1783                        set_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states);
1784                else
1785                        clear_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states);
1786        }
1787
1788        if (info->flags & FLAG_VF_UNICAST_PROMISC)
1789                alluni = true;
1790        if (vf->port_vlan_id) {
1791                aq_ret = i40e_aq_set_vsi_uc_promisc_on_vlan(hw, vsi->seid,
1792                                                            alluni,
1793                                                            vf->port_vlan_id,
1794                                                            NULL);
1795        } else if (i40e_getnum_vf_vsi_vlan_filters(vsi)) {
1796                hash_for_each(vsi->mac_filter_hash, bkt, f, hlist) {
1797                        if (f->vlan < 0 || f->vlan > I40E_MAX_VLANID)
1798                                continue;
1799                        aq_ret = i40e_aq_set_vsi_uc_promisc_on_vlan(hw,
1800                                                                    vsi->seid,
1801                                                                    alluni,
1802                                                                    f->vlan,
1803                                                                    NULL);
1804                        aq_err = pf->hw.aq.asq_last_status;
1805                        if (aq_ret)
1806                                dev_err(&pf->pdev->dev,
1807                                        "Could not add VLAN %d to Unicast promiscuous domain err %s aq_err %s\n",
1808                                        f->vlan,
1809                                        i40e_stat_str(&pf->hw, aq_ret),
1810                                        i40e_aq_str(&pf->hw, aq_err));
1811                }
1812        } else {
1813                aq_ret = i40e_aq_set_vsi_unicast_promiscuous(hw, vsi->seid,
1814                                                             alluni, NULL,
1815                                                             true);
1816                aq_err = pf->hw.aq.asq_last_status;
1817                if (aq_ret) {
1818                        dev_err(&pf->pdev->dev,
1819                                "VF %d failed to set unicast promiscuous mode %8.8x err %s aq_err %s\n",
1820                                vf->vf_id, info->flags,
1821                                i40e_stat_str(&pf->hw, aq_ret),
1822                                i40e_aq_str(&pf->hw, aq_err));
1823                        goto error_param;
1824                }
1825        }
1826
1827        if (!aq_ret) {
1828                dev_info(&pf->pdev->dev,
1829                         "VF %d successfully set unicast promiscuous mode\n",
1830                         vf->vf_id);
1831                if (alluni)
1832                        set_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states);
1833                else
1834                        clear_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states);
1835        }
1836
1837error_param:
1838        /* send the response to the VF */
1839        return i40e_vc_send_resp_to_vf(vf,
1840                                       VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE,
1841                                       aq_ret);
1842}
1843
1844/**
1845 * i40e_vc_config_queues_msg
1846 * @vf: pointer to the VF info
1847 * @msg: pointer to the msg buffer
1848 * @msglen: msg length
1849 *
1850 * called from the VF to configure the rx/tx
1851 * queues
1852 **/
1853static int i40e_vc_config_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
1854{
1855        struct virtchnl_vsi_queue_config_info *qci =
1856            (struct virtchnl_vsi_queue_config_info *)msg;
1857        struct virtchnl_queue_pair_info *qpi;
1858        struct i40e_pf *pf = vf->pf;
1859        u16 vsi_id, vsi_queue_id;
1860        i40e_status aq_ret = 0;
1861        int i;
1862
1863        if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
1864                aq_ret = I40E_ERR_PARAM;
1865                goto error_param;
1866        }
1867
1868        vsi_id = qci->vsi_id;
1869        if (!i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
1870                aq_ret = I40E_ERR_PARAM;
1871                goto error_param;
1872        }
1873        for (i = 0; i < qci->num_queue_pairs; i++) {
1874                qpi = &qci->qpair[i];
1875                vsi_queue_id = qpi->txq.queue_id;
1876                if ((qpi->txq.vsi_id != vsi_id) ||
1877                    (qpi->rxq.vsi_id != vsi_id) ||
1878                    (qpi->rxq.queue_id != vsi_queue_id) ||
1879                    !i40e_vc_isvalid_queue_id(vf, vsi_id, vsi_queue_id)) {
1880                        aq_ret = I40E_ERR_PARAM;
1881                        goto error_param;
1882                }
1883
1884                if (i40e_config_vsi_rx_queue(vf, vsi_id, vsi_queue_id,
1885                                             &qpi->rxq) ||
1886                    i40e_config_vsi_tx_queue(vf, vsi_id, vsi_queue_id,
1887                                             &qpi->txq)) {
1888                        aq_ret = I40E_ERR_PARAM;
1889                        goto error_param;
1890                }
1891        }
1892        /* set vsi num_queue_pairs in use to num configured by VF */
1893        pf->vsi[vf->lan_vsi_idx]->num_queue_pairs = qci->num_queue_pairs;
1894
1895error_param:
1896        /* send the response to the VF */
1897        return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES,
1898                                       aq_ret);
1899}
1900
1901/**
1902 * i40e_vc_config_irq_map_msg
1903 * @vf: pointer to the VF info
1904 * @msg: pointer to the msg buffer
1905 * @msglen: msg length
1906 *
1907 * called from the VF to configure the irq to
1908 * queue map
1909 **/
1910static int i40e_vc_config_irq_map_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
1911{
1912        struct virtchnl_irq_map_info *irqmap_info =
1913            (struct virtchnl_irq_map_info *)msg;
1914        struct virtchnl_vector_map *map;
1915        u16 vsi_id, vsi_queue_id, vector_id;
1916        i40e_status aq_ret = 0;
1917        unsigned long tempmap;
1918        int i;
1919
1920        if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
1921                aq_ret = I40E_ERR_PARAM;
1922                goto error_param;
1923        }
1924
1925        for (i = 0; i < irqmap_info->num_vectors; i++) {
1926                map = &irqmap_info->vecmap[i];
1927
1928                vector_id = map->vector_id;
1929                vsi_id = map->vsi_id;
1930                /* validate msg params */
1931                if (!i40e_vc_isvalid_vector_id(vf, vector_id) ||
1932                    !i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
1933                        aq_ret = I40E_ERR_PARAM;
1934                        goto error_param;
1935                }
1936
1937                /* lookout for the invalid queue index */
1938                tempmap = map->rxq_map;
1939                for_each_set_bit(vsi_queue_id, &tempmap, I40E_MAX_VSI_QP) {
1940                        if (!i40e_vc_isvalid_queue_id(vf, vsi_id,
1941                                                      vsi_queue_id)) {
1942                                aq_ret = I40E_ERR_PARAM;
1943                                goto error_param;
1944                        }
1945                }
1946
1947                tempmap = map->txq_map;
1948                for_each_set_bit(vsi_queue_id, &tempmap, I40E_MAX_VSI_QP) {
1949                        if (!i40e_vc_isvalid_queue_id(vf, vsi_id,
1950                                                      vsi_queue_id)) {
1951                                aq_ret = I40E_ERR_PARAM;
1952                                goto error_param;
1953                        }
1954                }
1955
1956                i40e_config_irq_link_list(vf, vsi_id, map);
1957        }
1958error_param:
1959        /* send the response to the VF */
1960        return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_CONFIG_IRQ_MAP,
1961                                       aq_ret);
1962}
1963
1964/**
1965 * i40e_vc_enable_queues_msg
1966 * @vf: pointer to the VF info
1967 * @msg: pointer to the msg buffer
1968 * @msglen: msg length
1969 *
1970 * called from the VF to enable all or specific queue(s)
1971 **/
1972static int i40e_vc_enable_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
1973{
1974        struct virtchnl_queue_select *vqs =
1975            (struct virtchnl_queue_select *)msg;
1976        struct i40e_pf *pf = vf->pf;
1977        u16 vsi_id = vqs->vsi_id;
1978        i40e_status aq_ret = 0;
1979
1980        if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
1981                aq_ret = I40E_ERR_PARAM;
1982                goto error_param;
1983        }
1984
1985        if (!i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
1986                aq_ret = I40E_ERR_PARAM;
1987                goto error_param;
1988        }
1989
1990        if ((0 == vqs->rx_queues) && (0 == vqs->tx_queues)) {
1991                aq_ret = I40E_ERR_PARAM;
1992                goto error_param;
1993        }
1994
1995        if (i40e_vsi_start_rings(pf->vsi[vf->lan_vsi_idx]))
1996                aq_ret = I40E_ERR_TIMEOUT;
1997error_param:
1998        /* send the response to the VF */
1999        return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ENABLE_QUEUES,
2000                                       aq_ret);
2001}
2002
2003/**
2004 * i40e_vc_disable_queues_msg
2005 * @vf: pointer to the VF info
2006 * @msg: pointer to the msg buffer
2007 * @msglen: msg length
2008 *
2009 * called from the VF to disable all or specific
2010 * queue(s)
2011 **/
2012static int i40e_vc_disable_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
2013{
2014        struct virtchnl_queue_select *vqs =
2015            (struct virtchnl_queue_select *)msg;
2016        struct i40e_pf *pf = vf->pf;
2017        i40e_status aq_ret = 0;
2018
2019        if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
2020                aq_ret = I40E_ERR_PARAM;
2021                goto error_param;
2022        }
2023
2024        if (!i40e_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
2025                aq_ret = I40E_ERR_PARAM;
2026                goto error_param;
2027        }
2028
2029        if ((0 == vqs->rx_queues) && (0 == vqs->tx_queues)) {
2030                aq_ret = I40E_ERR_PARAM;
2031                goto error_param;
2032        }
2033
2034        i40e_vsi_stop_rings(pf->vsi[vf->lan_vsi_idx]);
2035
2036error_param:
2037        /* send the response to the VF */
2038        return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DISABLE_QUEUES,
2039                                       aq_ret);
2040}
2041
2042/**
2043 * i40e_vc_request_queues_msg
2044 * @vf: pointer to the VF info
2045 * @msg: pointer to the msg buffer
2046 * @msglen: msg length
2047 *
2048 * VFs get a default number of queues but can use this message to request a
2049 * different number.  If the request is successful, PF will reset the VF and
2050 * return 0.  If unsuccessful, PF will send message informing VF of number of
2051 * available queues and return result of sending VF a message.
2052 **/
2053static int i40e_vc_request_queues_msg(struct i40e_vf *vf, u8 *msg, int msglen)
2054{
2055        struct virtchnl_vf_res_request *vfres =
2056                (struct virtchnl_vf_res_request *)msg;
2057        int req_pairs = vfres->num_queue_pairs;
2058        int cur_pairs = vf->num_queue_pairs;
2059        struct i40e_pf *pf = vf->pf;
2060
2061        if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states))
2062                return -EINVAL;
2063
2064        if (req_pairs <= 0) {
2065                dev_err(&pf->pdev->dev,
2066                        "VF %d tried to request %d queues.  Ignoring.\n",
2067                        vf->vf_id, req_pairs);
2068        } else if (req_pairs > I40E_MAX_VF_QUEUES) {
2069                dev_err(&pf->pdev->dev,
2070                        "VF %d tried to request more than %d queues.\n",
2071                        vf->vf_id,
2072                        I40E_MAX_VF_QUEUES);
2073                vfres->num_queue_pairs = I40E_MAX_VF_QUEUES;
2074        } else if (req_pairs - cur_pairs > pf->queues_left) {
2075                dev_warn(&pf->pdev->dev,
2076                         "VF %d requested %d more queues, but only %d left.\n",
2077                         vf->vf_id,
2078                         req_pairs - cur_pairs,
2079                         pf->queues_left);
2080                vfres->num_queue_pairs = pf->queues_left + cur_pairs;
2081        } else {
2082                /* successful request */
2083                vf->num_req_queues = req_pairs;
2084                i40e_vc_notify_vf_reset(vf);
2085                i40e_reset_vf(vf, false);
2086                return 0;
2087        }
2088
2089        return i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_REQUEST_QUEUES, 0,
2090                                      (u8 *)vfres, sizeof(vfres));
2091}
2092
2093/**
2094 * i40e_vc_get_stats_msg
2095 * @vf: pointer to the VF info
2096 * @msg: pointer to the msg buffer
2097 * @msglen: msg length
2098 *
2099 * called from the VF to get vsi stats
2100 **/
2101static int i40e_vc_get_stats_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
2102{
2103        struct virtchnl_queue_select *vqs =
2104            (struct virtchnl_queue_select *)msg;
2105        struct i40e_pf *pf = vf->pf;
2106        struct i40e_eth_stats stats;
2107        i40e_status aq_ret = 0;
2108        struct i40e_vsi *vsi;
2109
2110        memset(&stats, 0, sizeof(struct i40e_eth_stats));
2111
2112        if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
2113                aq_ret = I40E_ERR_PARAM;
2114                goto error_param;
2115        }
2116
2117        if (!i40e_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
2118                aq_ret = I40E_ERR_PARAM;
2119                goto error_param;
2120        }
2121
2122        vsi = pf->vsi[vf->lan_vsi_idx];
2123        if (!vsi) {
2124                aq_ret = I40E_ERR_PARAM;
2125                goto error_param;
2126        }
2127        i40e_update_eth_stats(vsi);
2128        stats = vsi->eth_stats;
2129
2130error_param:
2131        /* send the response back to the VF */
2132        return i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_STATS, aq_ret,
2133                                      (u8 *)&stats, sizeof(stats));
2134}
2135
2136/* If the VF is not trusted restrict the number of MAC/VLAN it can program */
2137#define I40E_VC_MAX_MAC_ADDR_PER_VF 12
2138#define I40E_VC_MAX_VLAN_PER_VF 8
2139
2140/**
2141 * i40e_check_vf_permission
2142 * @vf: pointer to the VF info
2143 * @macaddr: pointer to the MAC Address being checked
2144 *
2145 * Check if the VF has permission to add or delete unicast MAC address
2146 * filters and return error code -EPERM if not.  Then check if the
2147 * address filter requested is broadcast or zero and if so return
2148 * an invalid MAC address error code.
2149 **/
2150static inline int i40e_check_vf_permission(struct i40e_vf *vf, u8 *macaddr)
2151{
2152        struct i40e_pf *pf = vf->pf;
2153        int ret = 0;
2154
2155        if (is_broadcast_ether_addr(macaddr) ||
2156                   is_zero_ether_addr(macaddr)) {
2157                dev_err(&pf->pdev->dev, "invalid VF MAC addr %pM\n", macaddr);
2158                ret = I40E_ERR_INVALID_MAC_ADDR;
2159        } else if (vf->pf_set_mac && !is_multicast_ether_addr(macaddr) &&
2160                   !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) &&
2161                   !ether_addr_equal(macaddr, vf->default_lan_addr.addr)) {
2162                /* If the host VMM administrator has set the VF MAC address
2163                 * administratively via the ndo_set_vf_mac command then deny
2164                 * permission to the VF to add or delete unicast MAC addresses.
2165                 * Unless the VF is privileged and then it can do whatever.
2166                 * The VF may request to set the MAC address filter already
2167                 * assigned to it so do not return an error in that case.
2168                 */
2169                dev_err(&pf->pdev->dev,
2170                        "VF attempting to override administratively set MAC address, reload the VF driver to resume normal operation\n");
2171                ret = -EPERM;
2172        } else if ((vf->num_mac >= I40E_VC_MAX_MAC_ADDR_PER_VF) &&
2173                   !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) {
2174                dev_err(&pf->pdev->dev,
2175                        "VF is not trusted, switch the VF to trusted to add more functionality\n");
2176                ret = -EPERM;
2177        }
2178        return ret;
2179}
2180
2181/**
2182 * i40e_vc_add_mac_addr_msg
2183 * @vf: pointer to the VF info
2184 * @msg: pointer to the msg buffer
2185 * @msglen: msg length
2186 *
2187 * add guest mac address filter
2188 **/
2189static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
2190{
2191        struct virtchnl_ether_addr_list *al =
2192            (struct virtchnl_ether_addr_list *)msg;
2193        struct i40e_pf *pf = vf->pf;
2194        struct i40e_vsi *vsi = NULL;
2195        u16 vsi_id = al->vsi_id;
2196        i40e_status ret = 0;
2197        int i;
2198
2199        if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
2200            !i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
2201                ret = I40E_ERR_PARAM;
2202                goto error_param;
2203        }
2204
2205        for (i = 0; i < al->num_elements; i++) {
2206                ret = i40e_check_vf_permission(vf, al->list[i].addr);
2207                if (ret)
2208                        goto error_param;
2209        }
2210        vsi = pf->vsi[vf->lan_vsi_idx];
2211
2212        /* Lock once, because all function inside for loop accesses VSI's
2213         * MAC filter list which needs to be protected using same lock.
2214         */
2215        spin_lock_bh(&vsi->mac_filter_hash_lock);
2216
2217        /* add new addresses to the list */
2218        for (i = 0; i < al->num_elements; i++) {
2219                struct i40e_mac_filter *f;
2220
2221                f = i40e_find_mac(vsi, al->list[i].addr);
2222                if (!f) {
2223                        f = i40e_add_mac_filter(vsi, al->list[i].addr);
2224
2225                        if (!f) {
2226                                dev_err(&pf->pdev->dev,
2227                                        "Unable to add MAC filter %pM for VF %d\n",
2228                                        al->list[i].addr, vf->vf_id);
2229                                ret = I40E_ERR_PARAM;
2230                                spin_unlock_bh(&vsi->mac_filter_hash_lock);
2231                                goto error_param;
2232                        } else {
2233                                vf->num_mac++;
2234                        }
2235                }
2236        }
2237        spin_unlock_bh(&vsi->mac_filter_hash_lock);
2238
2239        /* program the updated filter list */
2240        ret = i40e_sync_vsi_filters(vsi);
2241        if (ret)
2242                dev_err(&pf->pdev->dev, "Unable to program VF %d MAC filters, error %d\n",
2243                        vf->vf_id, ret);
2244
2245error_param:
2246        /* send the response to the VF */
2247        return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ADD_ETH_ADDR,
2248                                       ret);
2249}
2250
2251/**
2252 * i40e_vc_del_mac_addr_msg
2253 * @vf: pointer to the VF info
2254 * @msg: pointer to the msg buffer
2255 * @msglen: msg length
2256 *
2257 * remove guest mac address filter
2258 **/
2259static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
2260{
2261        struct virtchnl_ether_addr_list *al =
2262            (struct virtchnl_ether_addr_list *)msg;
2263        struct i40e_pf *pf = vf->pf;
2264        struct i40e_vsi *vsi = NULL;
2265        u16 vsi_id = al->vsi_id;
2266        i40e_status ret = 0;
2267        int i;
2268
2269        if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
2270            !i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
2271                ret = I40E_ERR_PARAM;
2272                goto error_param;
2273        }
2274
2275        for (i = 0; i < al->num_elements; i++) {
2276                if (is_broadcast_ether_addr(al->list[i].addr) ||
2277                    is_zero_ether_addr(al->list[i].addr)) {
2278                        dev_err(&pf->pdev->dev, "Invalid MAC addr %pM for VF %d\n",
2279                                al->list[i].addr, vf->vf_id);
2280                        ret = I40E_ERR_INVALID_MAC_ADDR;
2281                        goto error_param;
2282                }
2283        }
2284        vsi = pf->vsi[vf->lan_vsi_idx];
2285
2286        spin_lock_bh(&vsi->mac_filter_hash_lock);
2287        /* delete addresses from the list */
2288        for (i = 0; i < al->num_elements; i++)
2289                if (i40e_del_mac_filter(vsi, al->list[i].addr)) {
2290                        ret = I40E_ERR_INVALID_MAC_ADDR;
2291                        spin_unlock_bh(&vsi->mac_filter_hash_lock);
2292                        goto error_param;
2293                } else {
2294                        vf->num_mac--;
2295                }
2296
2297        spin_unlock_bh(&vsi->mac_filter_hash_lock);
2298
2299        /* program the updated filter list */
2300        ret = i40e_sync_vsi_filters(vsi);
2301        if (ret)
2302                dev_err(&pf->pdev->dev, "Unable to program VF %d MAC filters, error %d\n",
2303                        vf->vf_id, ret);
2304
2305error_param:
2306        /* send the response to the VF */
2307        return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DEL_ETH_ADDR,
2308                                       ret);
2309}
2310
2311/**
2312 * i40e_vc_add_vlan_msg
2313 * @vf: pointer to the VF info
2314 * @msg: pointer to the msg buffer
2315 * @msglen: msg length
2316 *
2317 * program guest vlan id
2318 **/
2319static int i40e_vc_add_vlan_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
2320{
2321        struct virtchnl_vlan_filter_list *vfl =
2322            (struct virtchnl_vlan_filter_list *)msg;
2323        struct i40e_pf *pf = vf->pf;
2324        struct i40e_vsi *vsi = NULL;
2325        u16 vsi_id = vfl->vsi_id;
2326        i40e_status aq_ret = 0;
2327        int i;
2328
2329        if ((vf->num_vlan >= I40E_VC_MAX_VLAN_PER_VF) &&
2330            !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) {
2331                dev_err(&pf->pdev->dev,
2332                        "VF is not trusted, switch the VF to trusted to add more VLAN addresses\n");
2333                goto error_param;
2334        }
2335        if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
2336            !i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
2337                aq_ret = I40E_ERR_PARAM;
2338                goto error_param;
2339        }
2340
2341        for (i = 0; i < vfl->num_elements; i++) {
2342                if (vfl->vlan_id[i] > I40E_MAX_VLANID) {
2343                        aq_ret = I40E_ERR_PARAM;
2344                        dev_err(&pf->pdev->dev,
2345                                "invalid VF VLAN id %d\n", vfl->vlan_id[i]);
2346                        goto error_param;
2347                }
2348        }
2349        vsi = pf->vsi[vf->lan_vsi_idx];
2350        if (vsi->info.pvid) {
2351                aq_ret = I40E_ERR_PARAM;
2352                goto error_param;
2353        }
2354
2355        i40e_vlan_stripping_enable(vsi);
2356        for (i = 0; i < vfl->num_elements; i++) {
2357                /* add new VLAN filter */
2358                int ret = i40e_vsi_add_vlan(vsi, vfl->vlan_id[i]);
2359                if (!ret)
2360                        vf->num_vlan++;
2361
2362                if (test_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states))
2363                        i40e_aq_set_vsi_uc_promisc_on_vlan(&pf->hw, vsi->seid,
2364                                                           true,
2365                                                           vfl->vlan_id[i],
2366                                                           NULL);
2367                if (test_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states))
2368                        i40e_aq_set_vsi_mc_promisc_on_vlan(&pf->hw, vsi->seid,
2369                                                           true,
2370                                                           vfl->vlan_id[i],
2371                                                           NULL);
2372
2373                if (ret)
2374                        dev_err(&pf->pdev->dev,
2375                                "Unable to add VLAN filter %d for VF %d, error %d\n",
2376                                vfl->vlan_id[i], vf->vf_id, ret);
2377        }
2378
2379error_param:
2380        /* send the response to the VF */
2381        return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ADD_VLAN, aq_ret);
2382}
2383
2384/**
2385 * i40e_vc_remove_vlan_msg
2386 * @vf: pointer to the VF info
2387 * @msg: pointer to the msg buffer
2388 * @msglen: msg length
2389 *
2390 * remove programmed guest vlan id
2391 **/
2392static int i40e_vc_remove_vlan_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
2393{
2394        struct virtchnl_vlan_filter_list *vfl =
2395            (struct virtchnl_vlan_filter_list *)msg;
2396        struct i40e_pf *pf = vf->pf;
2397        struct i40e_vsi *vsi = NULL;
2398        u16 vsi_id = vfl->vsi_id;
2399        i40e_status aq_ret = 0;
2400        int i;
2401
2402        if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
2403            !i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
2404                aq_ret = I40E_ERR_PARAM;
2405                goto error_param;
2406        }
2407
2408        for (i = 0; i < vfl->num_elements; i++) {
2409                if (vfl->vlan_id[i] > I40E_MAX_VLANID) {
2410                        aq_ret = I40E_ERR_PARAM;
2411                        goto error_param;
2412                }
2413        }
2414
2415        vsi = pf->vsi[vf->lan_vsi_idx];
2416        if (vsi->info.pvid) {
2417                aq_ret = I40E_ERR_PARAM;
2418                goto error_param;
2419        }
2420
2421        for (i = 0; i < vfl->num_elements; i++) {
2422                i40e_vsi_kill_vlan(vsi, vfl->vlan_id[i]);
2423                vf->num_vlan--;
2424
2425                if (test_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states))
2426                        i40e_aq_set_vsi_uc_promisc_on_vlan(&pf->hw, vsi->seid,
2427                                                           false,
2428                                                           vfl->vlan_id[i],
2429                                                           NULL);
2430                if (test_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states))
2431                        i40e_aq_set_vsi_mc_promisc_on_vlan(&pf->hw, vsi->seid,
2432                                                           false,
2433                                                           vfl->vlan_id[i],
2434                                                           NULL);
2435        }
2436
2437error_param:
2438        /* send the response to the VF */
2439        return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DEL_VLAN, aq_ret);
2440}
2441
2442/**
2443 * i40e_vc_iwarp_msg
2444 * @vf: pointer to the VF info
2445 * @msg: pointer to the msg buffer
2446 * @msglen: msg length
2447 *
2448 * called from the VF for the iwarp msgs
2449 **/
2450static int i40e_vc_iwarp_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
2451{
2452        struct i40e_pf *pf = vf->pf;
2453        int abs_vf_id = vf->vf_id + pf->hw.func_caps.vf_base_id;
2454        i40e_status aq_ret = 0;
2455
2456        if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
2457            !test_bit(I40E_VF_STATE_IWARPENA, &vf->vf_states)) {
2458                aq_ret = I40E_ERR_PARAM;
2459                goto error_param;
2460        }
2461
2462        i40e_notify_client_of_vf_msg(pf->vsi[pf->lan_vsi], abs_vf_id,
2463                                     msg, msglen);
2464
2465error_param:
2466        /* send the response to the VF */
2467        return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_IWARP,
2468                                       aq_ret);
2469}
2470
2471/**
2472 * i40e_vc_iwarp_qvmap_msg
2473 * @vf: pointer to the VF info
2474 * @msg: pointer to the msg buffer
2475 * @msglen: msg length
2476 * @config: config qvmap or release it
2477 *
2478 * called from the VF for the iwarp msgs
2479 **/
2480static int i40e_vc_iwarp_qvmap_msg(struct i40e_vf *vf, u8 *msg, u16 msglen,
2481                                   bool config)
2482{
2483        struct virtchnl_iwarp_qvlist_info *qvlist_info =
2484                                (struct virtchnl_iwarp_qvlist_info *)msg;
2485        i40e_status aq_ret = 0;
2486
2487        if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
2488            !test_bit(I40E_VF_STATE_IWARPENA, &vf->vf_states)) {
2489                aq_ret = I40E_ERR_PARAM;
2490                goto error_param;
2491        }
2492
2493        if (config) {
2494                if (i40e_config_iwarp_qvlist(vf, qvlist_info))
2495                        aq_ret = I40E_ERR_PARAM;
2496        } else {
2497                i40e_release_iwarp_qvlist(vf);
2498        }
2499
2500error_param:
2501        /* send the response to the VF */
2502        return i40e_vc_send_resp_to_vf(vf,
2503                               config ? VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP :
2504                               VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP,
2505                               aq_ret);
2506}
2507
2508/**
2509 * i40e_vc_config_rss_key
2510 * @vf: pointer to the VF info
2511 * @msg: pointer to the msg buffer
2512 * @msglen: msg length
2513 *
2514 * Configure the VF's RSS key
2515 **/
2516static int i40e_vc_config_rss_key(struct i40e_vf *vf, u8 *msg, u16 msglen)
2517{
2518        struct virtchnl_rss_key *vrk =
2519                (struct virtchnl_rss_key *)msg;
2520        struct i40e_pf *pf = vf->pf;
2521        struct i40e_vsi *vsi = NULL;
2522        u16 vsi_id = vrk->vsi_id;
2523        i40e_status aq_ret = 0;
2524
2525        if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
2526            !i40e_vc_isvalid_vsi_id(vf, vsi_id) ||
2527            (vrk->key_len != I40E_HKEY_ARRAY_SIZE)) {
2528                aq_ret = I40E_ERR_PARAM;
2529                goto err;
2530        }
2531
2532        vsi = pf->vsi[vf->lan_vsi_idx];
2533        aq_ret = i40e_config_rss(vsi, vrk->key, NULL, 0);
2534err:
2535        /* send the response to the VF */
2536        return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_KEY,
2537                                       aq_ret);
2538}
2539
2540/**
2541 * i40e_vc_config_rss_lut
2542 * @vf: pointer to the VF info
2543 * @msg: pointer to the msg buffer
2544 * @msglen: msg length
2545 *
2546 * Configure the VF's RSS LUT
2547 **/
2548static int i40e_vc_config_rss_lut(struct i40e_vf *vf, u8 *msg, u16 msglen)
2549{
2550        struct virtchnl_rss_lut *vrl =
2551                (struct virtchnl_rss_lut *)msg;
2552        struct i40e_pf *pf = vf->pf;
2553        struct i40e_vsi *vsi = NULL;
2554        u16 vsi_id = vrl->vsi_id;
2555        i40e_status aq_ret = 0;
2556
2557        if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
2558            !i40e_vc_isvalid_vsi_id(vf, vsi_id) ||
2559            (vrl->lut_entries != I40E_VF_HLUT_ARRAY_SIZE)) {
2560                aq_ret = I40E_ERR_PARAM;
2561                goto err;
2562        }
2563
2564        vsi = pf->vsi[vf->lan_vsi_idx];
2565        aq_ret = i40e_config_rss(vsi, NULL, vrl->lut, I40E_VF_HLUT_ARRAY_SIZE);
2566        /* send the response to the VF */
2567err:
2568        return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_LUT,
2569                                       aq_ret);
2570}
2571
2572/**
2573 * i40e_vc_get_rss_hena
2574 * @vf: pointer to the VF info
2575 * @msg: pointer to the msg buffer
2576 * @msglen: msg length
2577 *
2578 * Return the RSS HENA bits allowed by the hardware
2579 **/
2580static int i40e_vc_get_rss_hena(struct i40e_vf *vf, u8 *msg, u16 msglen)
2581{
2582        struct virtchnl_rss_hena *vrh = NULL;
2583        struct i40e_pf *pf = vf->pf;
2584        i40e_status aq_ret = 0;
2585        int len = 0;
2586
2587        if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
2588                aq_ret = I40E_ERR_PARAM;
2589                goto err;
2590        }
2591        len = sizeof(struct virtchnl_rss_hena);
2592
2593        vrh = kzalloc(len, GFP_KERNEL);
2594        if (!vrh) {
2595                aq_ret = I40E_ERR_NO_MEMORY;
2596                len = 0;
2597                goto err;
2598        }
2599        vrh->hena = i40e_pf_get_default_rss_hena(pf);
2600err:
2601        /* send the response back to the VF */
2602        aq_ret = i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_RSS_HENA_CAPS,
2603                                        aq_ret, (u8 *)vrh, len);
2604        kfree(vrh);
2605        return aq_ret;
2606}
2607
2608/**
2609 * i40e_vc_set_rss_hena
2610 * @vf: pointer to the VF info
2611 * @msg: pointer to the msg buffer
2612 * @msglen: msg length
2613 *
2614 * Set the RSS HENA bits for the VF
2615 **/
2616static int i40e_vc_set_rss_hena(struct i40e_vf *vf, u8 *msg, u16 msglen)
2617{
2618        struct virtchnl_rss_hena *vrh =
2619                (struct virtchnl_rss_hena *)msg;
2620        struct i40e_pf *pf = vf->pf;
2621        struct i40e_hw *hw = &pf->hw;
2622        i40e_status aq_ret = 0;
2623
2624        if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
2625                aq_ret = I40E_ERR_PARAM;
2626                goto err;
2627        }
2628        i40e_write_rx_ctl(hw, I40E_VFQF_HENA1(0, vf->vf_id), (u32)vrh->hena);
2629        i40e_write_rx_ctl(hw, I40E_VFQF_HENA1(1, vf->vf_id),
2630                          (u32)(vrh->hena >> 32));
2631
2632        /* send the response to the VF */
2633err:
2634        return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_SET_RSS_HENA, aq_ret);
2635}
2636
2637/**
2638 * i40e_vc_enable_vlan_stripping
2639 * @vf: pointer to the VF info
2640 * @msg: pointer to the msg buffer
2641 * @msglen: msg length
2642 *
2643 * Enable vlan header stripping for the VF
2644 **/
2645static int i40e_vc_enable_vlan_stripping(struct i40e_vf *vf, u8 *msg,
2646                                         u16 msglen)
2647{
2648        struct i40e_vsi *vsi = vf->pf->vsi[vf->lan_vsi_idx];
2649        i40e_status aq_ret = 0;
2650
2651        if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
2652                aq_ret = I40E_ERR_PARAM;
2653                goto err;
2654        }
2655
2656        i40e_vlan_stripping_enable(vsi);
2657
2658        /* send the response to the VF */
2659err:
2660        return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ENABLE_VLAN_STRIPPING,
2661                                       aq_ret);
2662}
2663
2664/**
2665 * i40e_vc_disable_vlan_stripping
2666 * @vf: pointer to the VF info
2667 * @msg: pointer to the msg buffer
2668 * @msglen: msg length
2669 *
2670 * Disable vlan header stripping for the VF
2671 **/
2672static int i40e_vc_disable_vlan_stripping(struct i40e_vf *vf, u8 *msg,
2673                                          u16 msglen)
2674{
2675        struct i40e_vsi *vsi = vf->pf->vsi[vf->lan_vsi_idx];
2676        i40e_status aq_ret = 0;
2677
2678        if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
2679                aq_ret = I40E_ERR_PARAM;
2680                goto err;
2681        }
2682
2683        i40e_vlan_stripping_disable(vsi);
2684
2685        /* send the response to the VF */
2686err:
2687        return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DISABLE_VLAN_STRIPPING,
2688                                       aq_ret);
2689}
2690
2691/**
2692 * i40e_vc_process_vf_msg
2693 * @pf: pointer to the PF structure
2694 * @vf_id: source VF id
2695 * @msg: pointer to the msg buffer
2696 * @msglen: msg length
2697 * @msghndl: msg handle
2698 *
2699 * called from the common aeq/arq handler to
2700 * process request from VF
2701 **/
2702int i40e_vc_process_vf_msg(struct i40e_pf *pf, s16 vf_id, u32 v_opcode,
2703                           u32 v_retval, u8 *msg, u16 msglen)
2704{
2705        struct i40e_hw *hw = &pf->hw;
2706        int local_vf_id = vf_id - (s16)hw->func_caps.vf_base_id;
2707        struct i40e_vf *vf;
2708        int ret;
2709
2710        pf->vf_aq_requests++;
2711        if (local_vf_id >= pf->num_alloc_vfs)
2712                return -EINVAL;
2713        vf = &(pf->vf[local_vf_id]);
2714
2715        /* Check if VF is disabled. */
2716        if (test_bit(I40E_VF_STATE_DISABLED, &vf->vf_states))
2717                return I40E_ERR_PARAM;
2718
2719        /* perform basic checks on the msg */
2720        ret = virtchnl_vc_validate_vf_msg(&vf->vf_ver, v_opcode, msg, msglen);
2721
2722        /* perform additional checks specific to this driver */
2723        if (v_opcode == VIRTCHNL_OP_CONFIG_RSS_KEY) {
2724                struct virtchnl_rss_key *vrk = (struct virtchnl_rss_key *)msg;
2725
2726                if (vrk->key_len != I40E_HKEY_ARRAY_SIZE)
2727                        ret = -EINVAL;
2728        } else if (v_opcode == VIRTCHNL_OP_CONFIG_RSS_LUT) {
2729                struct virtchnl_rss_lut *vrl = (struct virtchnl_rss_lut *)msg;
2730
2731                if (vrl->lut_entries != I40E_VF_HLUT_ARRAY_SIZE)
2732                        ret = -EINVAL;
2733        }
2734
2735        if (ret) {
2736                i40e_vc_send_resp_to_vf(vf, v_opcode, I40E_ERR_PARAM);
2737                dev_err(&pf->pdev->dev, "Invalid message from VF %d, opcode %d, len %d\n",
2738                        local_vf_id, v_opcode, msglen);
2739                switch (ret) {
2740                case VIRTCHNL_ERR_PARAM:
2741                        return -EPERM;
2742                default:
2743                        return -EINVAL;
2744                }
2745        }
2746
2747        switch (v_opcode) {
2748        case VIRTCHNL_OP_VERSION:
2749                ret = i40e_vc_get_version_msg(vf, msg);
2750                break;
2751        case VIRTCHNL_OP_GET_VF_RESOURCES:
2752                ret = i40e_vc_get_vf_resources_msg(vf, msg);
2753                break;
2754        case VIRTCHNL_OP_RESET_VF:
2755                i40e_vc_reset_vf_msg(vf);
2756                ret = 0;
2757                break;
2758        case VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
2759                ret = i40e_vc_config_promiscuous_mode_msg(vf, msg, msglen);
2760                break;
2761        case VIRTCHNL_OP_CONFIG_VSI_QUEUES:
2762                ret = i40e_vc_config_queues_msg(vf, msg, msglen);
2763                break;
2764        case VIRTCHNL_OP_CONFIG_IRQ_MAP:
2765                ret = i40e_vc_config_irq_map_msg(vf, msg, msglen);
2766                break;
2767        case VIRTCHNL_OP_ENABLE_QUEUES:
2768                ret = i40e_vc_enable_queues_msg(vf, msg, msglen);
2769                i40e_vc_notify_vf_link_state(vf);
2770                break;
2771        case VIRTCHNL_OP_DISABLE_QUEUES:
2772                ret = i40e_vc_disable_queues_msg(vf, msg, msglen);
2773                break;
2774        case VIRTCHNL_OP_ADD_ETH_ADDR:
2775                ret = i40e_vc_add_mac_addr_msg(vf, msg, msglen);
2776                break;
2777        case VIRTCHNL_OP_DEL_ETH_ADDR:
2778                ret = i40e_vc_del_mac_addr_msg(vf, msg, msglen);
2779                break;
2780        case VIRTCHNL_OP_ADD_VLAN:
2781                ret = i40e_vc_add_vlan_msg(vf, msg, msglen);
2782                break;
2783        case VIRTCHNL_OP_DEL_VLAN:
2784                ret = i40e_vc_remove_vlan_msg(vf, msg, msglen);
2785                break;
2786        case VIRTCHNL_OP_GET_STATS:
2787                ret = i40e_vc_get_stats_msg(vf, msg, msglen);
2788                break;
2789        case VIRTCHNL_OP_IWARP:
2790                ret = i40e_vc_iwarp_msg(vf, msg, msglen);
2791                break;
2792        case VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP:
2793                ret = i40e_vc_iwarp_qvmap_msg(vf, msg, msglen, true);
2794                break;
2795        case VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP:
2796                ret = i40e_vc_iwarp_qvmap_msg(vf, msg, msglen, false);
2797                break;
2798        case VIRTCHNL_OP_CONFIG_RSS_KEY:
2799                ret = i40e_vc_config_rss_key(vf, msg, msglen);
2800                break;
2801        case VIRTCHNL_OP_CONFIG_RSS_LUT:
2802                ret = i40e_vc_config_rss_lut(vf, msg, msglen);
2803                break;
2804        case VIRTCHNL_OP_GET_RSS_HENA_CAPS:
2805                ret = i40e_vc_get_rss_hena(vf, msg, msglen);
2806                break;
2807        case VIRTCHNL_OP_SET_RSS_HENA:
2808                ret = i40e_vc_set_rss_hena(vf, msg, msglen);
2809                break;
2810        case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING:
2811                ret = i40e_vc_enable_vlan_stripping(vf, msg, msglen);
2812                break;
2813        case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING:
2814                ret = i40e_vc_disable_vlan_stripping(vf, msg, msglen);
2815                break;
2816        case VIRTCHNL_OP_REQUEST_QUEUES:
2817                ret = i40e_vc_request_queues_msg(vf, msg, msglen);
2818                break;
2819
2820        case VIRTCHNL_OP_UNKNOWN:
2821        default:
2822                dev_err(&pf->pdev->dev, "Unsupported opcode %d from VF %d\n",
2823                        v_opcode, local_vf_id);
2824                ret = i40e_vc_send_resp_to_vf(vf, v_opcode,
2825                                              I40E_ERR_NOT_IMPLEMENTED);
2826                break;
2827        }
2828
2829        return ret;
2830}
2831
2832/**
2833 * i40e_vc_process_vflr_event
2834 * @pf: pointer to the PF structure
2835 *
2836 * called from the vlfr irq handler to
2837 * free up VF resources and state variables
2838 **/
2839int i40e_vc_process_vflr_event(struct i40e_pf *pf)
2840{
2841        struct i40e_hw *hw = &pf->hw;
2842        u32 reg, reg_idx, bit_idx;
2843        struct i40e_vf *vf;
2844        int vf_id;
2845
2846        if (!test_bit(__I40E_VFLR_EVENT_PENDING, pf->state))
2847                return 0;
2848
2849        /* Re-enable the VFLR interrupt cause here, before looking for which
2850         * VF got reset. Otherwise, if another VF gets a reset while the
2851         * first one is being processed, that interrupt will be lost, and
2852         * that VF will be stuck in reset forever.
2853         */
2854        reg = rd32(hw, I40E_PFINT_ICR0_ENA);
2855        reg |= I40E_PFINT_ICR0_ENA_VFLR_MASK;
2856        wr32(hw, I40E_PFINT_ICR0_ENA, reg);
2857        i40e_flush(hw);
2858
2859        clear_bit(__I40E_VFLR_EVENT_PENDING, pf->state);
2860        for (vf_id = 0; vf_id < pf->num_alloc_vfs; vf_id++) {
2861                reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32;
2862                bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32;
2863                /* read GLGEN_VFLRSTAT register to find out the flr VFs */
2864                vf = &pf->vf[vf_id];
2865                reg = rd32(hw, I40E_GLGEN_VFLRSTAT(reg_idx));
2866                if (reg & BIT(bit_idx))
2867                        /* i40e_reset_vf will clear the bit in GLGEN_VFLRSTAT */
2868                        i40e_reset_vf(vf, true);
2869        }
2870
2871        return 0;
2872}
2873
2874/**
2875 * i40e_ndo_set_vf_mac
2876 * @netdev: network interface device structure
2877 * @vf_id: VF identifier
2878 * @mac: mac address
2879 *
2880 * program VF mac address
2881 **/
2882int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
2883{
2884        struct i40e_netdev_priv *np = netdev_priv(netdev);
2885        struct i40e_vsi *vsi = np->vsi;
2886        struct i40e_pf *pf = vsi->back;
2887        struct i40e_mac_filter *f;
2888        struct i40e_vf *vf;
2889        int ret = 0;
2890        struct hlist_node *h;
2891        int bkt;
2892        u8 i;
2893
2894        /* validate the request */
2895        if (vf_id >= pf->num_alloc_vfs) {
2896                dev_err(&pf->pdev->dev,
2897                        "Invalid VF Identifier %d\n", vf_id);
2898                ret = -EINVAL;
2899                goto error_param;
2900        }
2901
2902        vf = &(pf->vf[vf_id]);
2903        vsi = pf->vsi[vf->lan_vsi_idx];
2904
2905        /* When the VF is resetting wait until it is done.
2906         * It can take up to 200 milliseconds,
2907         * but wait for up to 300 milliseconds to be safe.
2908         */
2909        for (i = 0; i < 15; i++) {
2910                if (test_bit(I40E_VF_STATE_INIT, &vf->vf_states))
2911                        break;
2912                msleep(20);
2913        }
2914        if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) {
2915                dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n",
2916                        vf_id);
2917                ret = -EAGAIN;
2918                goto error_param;
2919        }
2920
2921        if (is_multicast_ether_addr(mac)) {
2922                dev_err(&pf->pdev->dev,
2923                        "Invalid Ethernet address %pM for VF %d\n", mac, vf_id);
2924                ret = -EINVAL;
2925                goto error_param;
2926        }
2927
2928        /* Lock once because below invoked function add/del_filter requires
2929         * mac_filter_hash_lock to be held
2930         */
2931        spin_lock_bh(&vsi->mac_filter_hash_lock);
2932
2933        /* delete the temporary mac address */
2934        if (!is_zero_ether_addr(vf->default_lan_addr.addr))
2935                i40e_del_mac_filter(vsi, vf->default_lan_addr.addr);
2936
2937        /* Delete all the filters for this VSI - we're going to kill it
2938         * anyway.
2939         */
2940        hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist)
2941                __i40e_del_filter(vsi, f);
2942
2943        spin_unlock_bh(&vsi->mac_filter_hash_lock);
2944
2945        /* program mac filter */
2946        if (i40e_sync_vsi_filters(vsi)) {
2947                dev_err(&pf->pdev->dev, "Unable to program ucast filters\n");
2948                ret = -EIO;
2949                goto error_param;
2950        }
2951        ether_addr_copy(vf->default_lan_addr.addr, mac);
2952
2953        if (is_zero_ether_addr(mac)) {
2954                vf->pf_set_mac = false;
2955                dev_info(&pf->pdev->dev, "Removing MAC on VF %d\n", vf_id);
2956        } else {
2957                vf->pf_set_mac = true;
2958                dev_info(&pf->pdev->dev, "Setting MAC %pM on VF %d\n",
2959                         mac, vf_id);
2960        }
2961
2962        /* Force the VF driver stop so it has to reload with new MAC address */
2963        i40e_vc_disable_vf(vf);
2964        dev_info(&pf->pdev->dev, "Reload the VF driver to make this change effective.\n");
2965
2966error_param:
2967        return ret;
2968}
2969
2970/**
2971 * i40e_vsi_has_vlans - True if VSI has configured VLANs
2972 * @vsi: pointer to the vsi
2973 *
2974 * Check if a VSI has configured any VLANs. False if we have a port VLAN or if
2975 * we have no configured VLANs. Do not call while holding the
2976 * mac_filter_hash_lock.
2977 */
2978static bool i40e_vsi_has_vlans(struct i40e_vsi *vsi)
2979{
2980        bool have_vlans;
2981
2982        /* If we have a port VLAN, then the VSI cannot have any VLANs
2983         * configured, as all MAC/VLAN filters will be assigned to the PVID.
2984         */
2985        if (vsi->info.pvid)
2986                return false;
2987
2988        /* Since we don't have a PVID, we know that if the device is in VLAN
2989         * mode it must be because of a VLAN filter configured on this VSI.
2990         */
2991        spin_lock_bh(&vsi->mac_filter_hash_lock);
2992        have_vlans = i40e_is_vsi_in_vlan(vsi);
2993        spin_unlock_bh(&vsi->mac_filter_hash_lock);
2994
2995        return have_vlans;
2996}
2997
2998/**
2999 * i40e_ndo_set_vf_port_vlan
3000 * @netdev: network interface device structure
3001 * @vf_id: VF identifier
3002 * @vlan_id: mac address
3003 * @qos: priority setting
3004 * @vlan_proto: vlan protocol
3005 *
3006 * program VF vlan id and/or qos
3007 **/
3008int i40e_ndo_set_vf_port_vlan(struct net_device *netdev, int vf_id,
3009                              u16 vlan_id, u8 qos, __be16 vlan_proto)
3010{
3011        u16 vlanprio = vlan_id | (qos << I40E_VLAN_PRIORITY_SHIFT);
3012        struct i40e_netdev_priv *np = netdev_priv(netdev);
3013        struct i40e_pf *pf = np->vsi->back;
3014        struct i40e_vsi *vsi;
3015        struct i40e_vf *vf;
3016        int ret = 0;
3017
3018        /* validate the request */
3019        if (vf_id >= pf->num_alloc_vfs) {
3020                dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id);
3021                ret = -EINVAL;
3022                goto error_pvid;
3023        }
3024
3025        if ((vlan_id > I40E_MAX_VLANID) || (qos > 7)) {
3026                dev_err(&pf->pdev->dev, "Invalid VF Parameters\n");
3027                ret = -EINVAL;
3028                goto error_pvid;
3029        }
3030
3031        if (vlan_proto != htons(ETH_P_8021Q)) {
3032                dev_err(&pf->pdev->dev, "VF VLAN protocol is not supported\n");
3033                ret = -EPROTONOSUPPORT;
3034                goto error_pvid;
3035        }
3036
3037        vf = &(pf->vf[vf_id]);
3038        vsi = pf->vsi[vf->lan_vsi_idx];
3039        if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) {
3040                dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n",
3041                        vf_id);
3042                ret = -EAGAIN;
3043                goto error_pvid;
3044        }
3045
3046        if (le16_to_cpu(vsi->info.pvid) == vlanprio)
3047                /* duplicate request, so just return success */
3048                goto error_pvid;
3049
3050        if (i40e_vsi_has_vlans(vsi)) {
3051                dev_err(&pf->pdev->dev,
3052                        "VF %d has already configured VLAN filters and the administrator is requesting a port VLAN override.\nPlease unload and reload the VF driver for this change to take effect.\n",
3053                        vf_id);
3054                /* Administrator Error - knock the VF offline until he does
3055                 * the right thing by reconfiguring his network correctly
3056                 * and then reloading the VF driver.
3057                 */
3058                i40e_vc_disable_vf(vf);
3059                /* During reset the VF got a new VSI, so refresh the pointer. */
3060                vsi = pf->vsi[vf->lan_vsi_idx];
3061        }
3062
3063        /* Locked once because multiple functions below iterate list */
3064        spin_lock_bh(&vsi->mac_filter_hash_lock);
3065
3066        /* Check for condition where there was already a port VLAN ID
3067         * filter set and now it is being deleted by setting it to zero.
3068         * Additionally check for the condition where there was a port
3069         * VLAN but now there is a new and different port VLAN being set.
3070         * Before deleting all the old VLAN filters we must add new ones
3071         * with -1 (I40E_VLAN_ANY) or otherwise we're left with all our
3072         * MAC addresses deleted.
3073         */
3074        if ((!(vlan_id || qos) ||
3075            vlanprio != le16_to_cpu(vsi->info.pvid)) &&
3076            vsi->info.pvid) {
3077                ret = i40e_add_vlan_all_mac(vsi, I40E_VLAN_ANY);
3078                if (ret) {
3079                        dev_info(&vsi->back->pdev->dev,
3080                                 "add VF VLAN failed, ret=%d aq_err=%d\n", ret,
3081                                 vsi->back->hw.aq.asq_last_status);
3082                        spin_unlock_bh(&vsi->mac_filter_hash_lock);
3083                        goto error_pvid;
3084                }
3085        }
3086
3087        if (vsi->info.pvid) {
3088                /* remove all filters on the old VLAN */
3089                i40e_rm_vlan_all_mac(vsi, (le16_to_cpu(vsi->info.pvid) &
3090                                           VLAN_VID_MASK));
3091        }
3092
3093        spin_unlock_bh(&vsi->mac_filter_hash_lock);
3094        if (vlan_id || qos)
3095                ret = i40e_vsi_add_pvid(vsi, vlanprio);
3096        else
3097                i40e_vsi_remove_pvid(vsi);
3098        spin_lock_bh(&vsi->mac_filter_hash_lock);
3099
3100        if (vlan_id) {
3101                dev_info(&pf->pdev->dev, "Setting VLAN %d, QOS 0x%x on VF %d\n",
3102                         vlan_id, qos, vf_id);
3103
3104                /* add new VLAN filter for each MAC */
3105                ret = i40e_add_vlan_all_mac(vsi, vlan_id);
3106                if (ret) {
3107                        dev_info(&vsi->back->pdev->dev,
3108                                 "add VF VLAN failed, ret=%d aq_err=%d\n", ret,
3109                                 vsi->back->hw.aq.asq_last_status);
3110                        spin_unlock_bh(&vsi->mac_filter_hash_lock);
3111                        goto error_pvid;
3112                }
3113
3114                /* remove the previously added non-VLAN MAC filters */
3115                i40e_rm_vlan_all_mac(vsi, I40E_VLAN_ANY);
3116        }
3117
3118        spin_unlock_bh(&vsi->mac_filter_hash_lock);
3119
3120        /* Schedule the worker thread to take care of applying changes */
3121        i40e_service_event_schedule(vsi->back);
3122
3123        if (ret) {
3124                dev_err(&pf->pdev->dev, "Unable to update VF vsi context\n");
3125                goto error_pvid;
3126        }
3127
3128        /* The Port VLAN needs to be saved across resets the same as the
3129         * default LAN MAC address.
3130         */
3131        vf->port_vlan_id = le16_to_cpu(vsi->info.pvid);
3132        ret = 0;
3133
3134error_pvid:
3135        return ret;
3136}
3137
3138#define I40E_BW_CREDIT_DIVISOR 50     /* 50Mbps per BW credit */
3139#define I40E_MAX_BW_INACTIVE_ACCUM 4  /* device can accumulate 4 credits max */
3140/**
3141 * i40e_ndo_set_vf_bw
3142 * @netdev: network interface device structure
3143 * @vf_id: VF identifier
3144 * @tx_rate: Tx rate
3145 *
3146 * configure VF Tx rate
3147 **/
3148int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate,
3149                       int max_tx_rate)
3150{
3151        struct i40e_netdev_priv *np = netdev_priv(netdev);
3152        struct i40e_pf *pf = np->vsi->back;
3153        struct i40e_vsi *vsi;
3154        struct i40e_vf *vf;
3155        int speed = 0;
3156        int ret = 0;
3157
3158        /* validate the request */
3159        if (vf_id >= pf->num_alloc_vfs) {
3160                dev_err(&pf->pdev->dev, "Invalid VF Identifier %d.\n", vf_id);
3161                ret = -EINVAL;
3162                goto error;
3163        }
3164
3165        if (min_tx_rate) {
3166                dev_err(&pf->pdev->dev, "Invalid min tx rate (%d) (greater than 0) specified for VF %d.\n",
3167                        min_tx_rate, vf_id);
3168                return -EINVAL;
3169        }
3170
3171        vf = &(pf->vf[vf_id]);
3172        vsi = pf->vsi[vf->lan_vsi_idx];
3173        if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) {
3174                dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n",
3175                        vf_id);
3176                ret = -EAGAIN;
3177                goto error;
3178        }
3179
3180        switch (pf->hw.phy.link_info.link_speed) {
3181        case I40E_LINK_SPEED_40GB:
3182                speed = 40000;
3183                break;
3184        case I40E_LINK_SPEED_25GB:
3185                speed = 25000;
3186                break;
3187        case I40E_LINK_SPEED_20GB:
3188                speed = 20000;
3189                break;
3190        case I40E_LINK_SPEED_10GB:
3191                speed = 10000;
3192                break;
3193        case I40E_LINK_SPEED_1GB:
3194                speed = 1000;
3195                break;
3196        default:
3197                break;
3198        }
3199
3200        if (max_tx_rate > speed) {
3201                dev_err(&pf->pdev->dev, "Invalid max tx rate %d specified for VF %d.\n",
3202                        max_tx_rate, vf->vf_id);
3203                ret = -EINVAL;
3204                goto error;
3205        }
3206
3207        if ((max_tx_rate < 50) && (max_tx_rate > 0)) {
3208                dev_warn(&pf->pdev->dev, "Setting max Tx rate to minimum usable value of 50Mbps.\n");
3209                max_tx_rate = 50;
3210        }
3211
3212        /* Tx rate credits are in values of 50Mbps, 0 is disabled*/
3213        ret = i40e_aq_config_vsi_bw_limit(&pf->hw, vsi->seid,
3214                                          max_tx_rate / I40E_BW_CREDIT_DIVISOR,
3215                                          I40E_MAX_BW_INACTIVE_ACCUM, NULL);
3216        if (ret) {
3217                dev_err(&pf->pdev->dev, "Unable to set max tx rate, error code %d.\n",
3218                        ret);
3219                ret = -EIO;
3220                goto error;
3221        }
3222        vf->tx_rate = max_tx_rate;
3223error:
3224        return ret;
3225}
3226
3227/**
3228 * i40e_ndo_get_vf_config
3229 * @netdev: network interface device structure
3230 * @vf_id: VF identifier
3231 * @ivi: VF configuration structure
3232 *
3233 * return VF configuration
3234 **/
3235int i40e_ndo_get_vf_config(struct net_device *netdev,
3236                           int vf_id, struct ifla_vf_info *ivi)
3237{
3238        struct i40e_netdev_priv *np = netdev_priv(netdev);
3239        struct i40e_vsi *vsi = np->vsi;
3240        struct i40e_pf *pf = vsi->back;
3241        struct i40e_vf *vf;
3242        int ret = 0;
3243
3244        /* validate the request */
3245        if (vf_id >= pf->num_alloc_vfs) {
3246                dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id);
3247                ret = -EINVAL;
3248                goto error_param;
3249        }
3250
3251        vf = &(pf->vf[vf_id]);
3252        /* first vsi is always the LAN vsi */
3253        vsi = pf->vsi[vf->lan_vsi_idx];
3254        if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) {
3255                dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n",
3256                        vf_id);
3257                ret = -EAGAIN;
3258                goto error_param;
3259        }
3260
3261        ivi->vf = vf_id;
3262
3263        ether_addr_copy(ivi->mac, vf->default_lan_addr.addr);
3264
3265        ivi->max_tx_rate = vf->tx_rate;
3266        ivi->min_tx_rate = 0;
3267        ivi->vlan = le16_to_cpu(vsi->info.pvid) & I40E_VLAN_MASK;
3268        ivi->qos = (le16_to_cpu(vsi->info.pvid) & I40E_PRIORITY_MASK) >>
3269                   I40E_VLAN_PRIORITY_SHIFT;
3270        if (vf->link_forced == false)
3271                ivi->linkstate = IFLA_VF_LINK_STATE_AUTO;
3272        else if (vf->link_up == true)
3273                ivi->linkstate = IFLA_VF_LINK_STATE_ENABLE;
3274        else
3275                ivi->linkstate = IFLA_VF_LINK_STATE_DISABLE;
3276        ivi->spoofchk = vf->spoofchk;
3277        ivi->trusted = vf->trusted;
3278        ret = 0;
3279
3280error_param:
3281        return ret;
3282}
3283
3284/**
3285 * i40e_ndo_set_vf_link_state
3286 * @netdev: network interface device structure
3287 * @vf_id: VF identifier
3288 * @link: required link state
3289 *
3290 * Set the link state of a specified VF, regardless of physical link state
3291 **/
3292int i40e_ndo_set_vf_link_state(struct net_device *netdev, int vf_id, int link)
3293{
3294        struct i40e_netdev_priv *np = netdev_priv(netdev);
3295        struct i40e_pf *pf = np->vsi->back;
3296        struct virtchnl_pf_event pfe;
3297        struct i40e_hw *hw = &pf->hw;
3298        struct i40e_vf *vf;
3299        int abs_vf_id;
3300        int ret = 0;
3301
3302        /* validate the request */
3303        if (vf_id >= pf->num_alloc_vfs) {
3304                dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id);
3305                ret = -EINVAL;
3306                goto error_out;
3307        }
3308
3309        vf = &pf->vf[vf_id];
3310        abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
3311
3312        pfe.event = VIRTCHNL_EVENT_LINK_CHANGE;
3313        pfe.severity = PF_EVENT_SEVERITY_INFO;
3314
3315        switch (link) {
3316        case IFLA_VF_LINK_STATE_AUTO:
3317                vf->link_forced = false;
3318                pfe.event_data.link_event.link_status =
3319                        pf->hw.phy.link_info.link_info & I40E_AQ_LINK_UP;
3320                pfe.event_data.link_event.link_speed =
3321                        (enum virtchnl_link_speed)
3322                        pf->hw.phy.link_info.link_speed;
3323                break;
3324        case IFLA_VF_LINK_STATE_ENABLE:
3325                vf->link_forced = true;
3326                vf->link_up = true;
3327                pfe.event_data.link_event.link_status = true;
3328                pfe.event_data.link_event.link_speed = I40E_LINK_SPEED_40GB;
3329                break;
3330        case IFLA_VF_LINK_STATE_DISABLE:
3331                vf->link_forced = true;
3332                vf->link_up = false;
3333                pfe.event_data.link_event.link_status = false;
3334                pfe.event_data.link_event.link_speed = 0;
3335                break;
3336        default:
3337                ret = -EINVAL;
3338                goto error_out;
3339        }
3340        /* Notify the VF of its new link state */
3341        i40e_aq_send_msg_to_vf(hw, abs_vf_id, VIRTCHNL_OP_EVENT,
3342                               0, (u8 *)&pfe, sizeof(pfe), NULL);
3343
3344error_out:
3345        return ret;
3346}
3347
3348/**
3349 * i40e_ndo_set_vf_spoofchk
3350 * @netdev: network interface device structure
3351 * @vf_id: VF identifier
3352 * @enable: flag to enable or disable feature
3353 *
3354 * Enable or disable VF spoof checking
3355 **/
3356int i40e_ndo_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool enable)
3357{
3358        struct i40e_netdev_priv *np = netdev_priv(netdev);
3359        struct i40e_vsi *vsi = np->vsi;
3360        struct i40e_pf *pf = vsi->back;
3361        struct i40e_vsi_context ctxt;
3362        struct i40e_hw *hw = &pf->hw;
3363        struct i40e_vf *vf;
3364        int ret = 0;
3365
3366        /* validate the request */
3367        if (vf_id >= pf->num_alloc_vfs) {
3368                dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id);
3369                ret = -EINVAL;
3370                goto out;
3371        }
3372
3373        vf = &(pf->vf[vf_id]);
3374        if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) {
3375                dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n",
3376                        vf_id);
3377                ret = -EAGAIN;
3378                goto out;
3379        }
3380
3381        if (enable == vf->spoofchk)
3382                goto out;
3383
3384        vf->spoofchk = enable;
3385        memset(&ctxt, 0, sizeof(ctxt));
3386        ctxt.seid = pf->vsi[vf->lan_vsi_idx]->seid;
3387        ctxt.pf_num = pf->hw.pf_id;
3388        ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SECURITY_VALID);
3389        if (enable)
3390                ctxt.info.sec_flags |= (I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK |
3391                                        I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK);
3392        ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
3393        if (ret) {
3394                dev_err(&pf->pdev->dev, "Error %d updating VSI parameters\n",
3395                        ret);
3396                ret = -EIO;
3397        }
3398out:
3399        return ret;
3400}
3401
3402/**
3403 * i40e_ndo_set_vf_trust
3404 * @netdev: network interface device structure of the pf
3405 * @vf_id: VF identifier
3406 * @setting: trust setting
3407 *
3408 * Enable or disable VF trust setting
3409 **/
3410int i40e_ndo_set_vf_trust(struct net_device *netdev, int vf_id, bool setting)
3411{
3412        struct i40e_netdev_priv *np = netdev_priv(netdev);
3413        struct i40e_pf *pf = np->vsi->back;
3414        struct i40e_vf *vf;
3415        int ret = 0;
3416
3417        /* validate the request */
3418        if (vf_id >= pf->num_alloc_vfs) {
3419                dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id);
3420                return -EINVAL;
3421        }
3422
3423        if (pf->flags & I40E_FLAG_MFP_ENABLED) {
3424                dev_err(&pf->pdev->dev, "Trusted VF not supported in MFP mode.\n");
3425                return -EINVAL;
3426        }
3427
3428        vf = &pf->vf[vf_id];
3429
3430        if (setting == vf->trusted)
3431                goto out;
3432
3433        vf->trusted = setting;
3434        i40e_vc_disable_vf(vf);
3435        dev_info(&pf->pdev->dev, "VF %u is now %strusted\n",
3436                 vf_id, setting ? "" : "un");
3437out:
3438        return ret;
3439}
3440