linux/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/* Copyright(c) 2013 - 2018 Intel Corporation. */
   3
   4#include "i40e.h"
   5
   6/*********************notification routines***********************/
   7
   8/**
   9 * i40e_vc_vf_broadcast
  10 * @pf: pointer to the PF structure
  11 * @v_opcode: operation code
  12 * @v_retval: return value
  13 * @msg: pointer to the msg buffer
  14 * @msglen: msg length
  15 *
  16 * send a message to all VFs on a given PF
  17 **/
  18static void i40e_vc_vf_broadcast(struct i40e_pf *pf,
  19                                 enum virtchnl_ops v_opcode,
  20                                 i40e_status v_retval, u8 *msg,
  21                                 u16 msglen)
  22{
  23        struct i40e_hw *hw = &pf->hw;
  24        struct i40e_vf *vf = pf->vf;
  25        int i;
  26
  27        for (i = 0; i < pf->num_alloc_vfs; i++, vf++) {
  28                int abs_vf_id = vf->vf_id + (int)hw->func_caps.vf_base_id;
  29                /* Not all vfs are enabled so skip the ones that are not */
  30                if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states) &&
  31                    !test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states))
  32                        continue;
  33
  34                /* Ignore return value on purpose - a given VF may fail, but
  35                 * we need to keep going and send to all of them
  36                 */
  37                i40e_aq_send_msg_to_vf(hw, abs_vf_id, v_opcode, v_retval,
  38                                       msg, msglen, NULL);
  39        }
  40}
  41
  42/**
  43 * i40e_vc_notify_vf_link_state
  44 * @vf: pointer to the VF structure
  45 *
  46 * send a link status message to a single VF
  47 **/
  48static void i40e_vc_notify_vf_link_state(struct i40e_vf *vf)
  49{
  50        struct virtchnl_pf_event pfe;
  51        struct i40e_pf *pf = vf->pf;
  52        struct i40e_hw *hw = &pf->hw;
  53        struct i40e_link_status *ls = &pf->hw.phy.link_info;
  54        int abs_vf_id = vf->vf_id + (int)hw->func_caps.vf_base_id;
  55
  56        pfe.event = VIRTCHNL_EVENT_LINK_CHANGE;
  57        pfe.severity = PF_EVENT_SEVERITY_INFO;
  58
  59        /* Always report link is down if the VF queues aren't enabled */
  60        if (!vf->queues_enabled) {
  61                pfe.event_data.link_event.link_status = false;
  62                pfe.event_data.link_event.link_speed = 0;
  63        } else if (vf->link_forced) {
  64                pfe.event_data.link_event.link_status = vf->link_up;
  65                pfe.event_data.link_event.link_speed =
  66                        (vf->link_up ? VIRTCHNL_LINK_SPEED_40GB : 0);
  67        } else {
  68                pfe.event_data.link_event.link_status =
  69                        ls->link_info & I40E_AQ_LINK_UP;
  70                pfe.event_data.link_event.link_speed =
  71                        i40e_virtchnl_link_speed(ls->link_speed);
  72        }
  73
  74        i40e_aq_send_msg_to_vf(hw, abs_vf_id, VIRTCHNL_OP_EVENT,
  75                               0, (u8 *)&pfe, sizeof(pfe), NULL);
  76}
  77
  78/**
  79 * i40e_vc_notify_link_state
  80 * @pf: pointer to the PF structure
  81 *
  82 * send a link status message to all VFs on a given PF
  83 **/
  84void i40e_vc_notify_link_state(struct i40e_pf *pf)
  85{
  86        int i;
  87
  88        for (i = 0; i < pf->num_alloc_vfs; i++)
  89                i40e_vc_notify_vf_link_state(&pf->vf[i]);
  90}
  91
  92/**
  93 * i40e_vc_notify_reset
  94 * @pf: pointer to the PF structure
  95 *
  96 * indicate a pending reset to all VFs on a given PF
  97 **/
  98void i40e_vc_notify_reset(struct i40e_pf *pf)
  99{
 100        struct virtchnl_pf_event pfe;
 101
 102        pfe.event = VIRTCHNL_EVENT_RESET_IMPENDING;
 103        pfe.severity = PF_EVENT_SEVERITY_CERTAIN_DOOM;
 104        i40e_vc_vf_broadcast(pf, VIRTCHNL_OP_EVENT, 0,
 105                             (u8 *)&pfe, sizeof(struct virtchnl_pf_event));
 106}
 107
 108/**
 109 * i40e_vc_notify_vf_reset
 110 * @vf: pointer to the VF structure
 111 *
 112 * indicate a pending reset to the given VF
 113 **/
 114void i40e_vc_notify_vf_reset(struct i40e_vf *vf)
 115{
 116        struct virtchnl_pf_event pfe;
 117        int abs_vf_id;
 118
 119        /* validate the request */
 120        if (!vf || vf->vf_id >= vf->pf->num_alloc_vfs)
 121                return;
 122
 123        /* verify if the VF is in either init or active before proceeding */
 124        if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states) &&
 125            !test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states))
 126                return;
 127
 128        abs_vf_id = vf->vf_id + (int)vf->pf->hw.func_caps.vf_base_id;
 129
 130        pfe.event = VIRTCHNL_EVENT_RESET_IMPENDING;
 131        pfe.severity = PF_EVENT_SEVERITY_CERTAIN_DOOM;
 132        i40e_aq_send_msg_to_vf(&vf->pf->hw, abs_vf_id, VIRTCHNL_OP_EVENT,
 133                               0, (u8 *)&pfe,
 134                               sizeof(struct virtchnl_pf_event), NULL);
 135}
 136/***********************misc routines*****************************/
 137
 138/**
 139 * i40e_vc_disable_vf
 140 * @vf: pointer to the VF info
 141 *
 142 * Disable the VF through a SW reset.
 143 **/
 144static inline void i40e_vc_disable_vf(struct i40e_vf *vf)
 145{
 146        int i;
 147
 148        i40e_vc_notify_vf_reset(vf);
 149
 150        /* We want to ensure that an actual reset occurs initiated after this
 151         * function was called. However, we do not want to wait forever, so
 152         * we'll give a reasonable time and print a message if we failed to
 153         * ensure a reset.
 154         */
 155        for (i = 0; i < 20; i++) {
 156                if (i40e_reset_vf(vf, false))
 157                        return;
 158                usleep_range(10000, 20000);
 159        }
 160
 161        dev_warn(&vf->pf->pdev->dev,
 162                 "Failed to initiate reset for VF %d after 200 milliseconds\n",
 163                 vf->vf_id);
 164}
 165
 166/**
 167 * i40e_vc_isvalid_vsi_id
 168 * @vf: pointer to the VF info
 169 * @vsi_id: VF relative VSI id
 170 *
 171 * check for the valid VSI id
 172 **/
 173static inline bool i40e_vc_isvalid_vsi_id(struct i40e_vf *vf, u16 vsi_id)
 174{
 175        struct i40e_pf *pf = vf->pf;
 176        struct i40e_vsi *vsi = i40e_find_vsi_from_id(pf, vsi_id);
 177
 178        return (vsi && (vsi->vf_id == vf->vf_id));
 179}
 180
 181/**
 182 * i40e_vc_isvalid_queue_id
 183 * @vf: pointer to the VF info
 184 * @vsi_id: vsi id
 185 * @qid: vsi relative queue id
 186 *
 187 * check for the valid queue id
 188 **/
 189static inline bool i40e_vc_isvalid_queue_id(struct i40e_vf *vf, u16 vsi_id,
 190                                            u16 qid)
 191{
 192        struct i40e_pf *pf = vf->pf;
 193        struct i40e_vsi *vsi = i40e_find_vsi_from_id(pf, vsi_id);
 194
 195        return (vsi && (qid < vsi->alloc_queue_pairs));
 196}
 197
 198/**
 199 * i40e_vc_isvalid_vector_id
 200 * @vf: pointer to the VF info
 201 * @vector_id: VF relative vector id
 202 *
 203 * check for the valid vector id
 204 **/
 205static inline bool i40e_vc_isvalid_vector_id(struct i40e_vf *vf, u32 vector_id)
 206{
 207        struct i40e_pf *pf = vf->pf;
 208
 209        return vector_id < pf->hw.func_caps.num_msix_vectors_vf;
 210}
 211
 212/***********************vf resource mgmt routines*****************/
 213
 214/**
 215 * i40e_vc_get_pf_queue_id
 216 * @vf: pointer to the VF info
 217 * @vsi_id: id of VSI as provided by the FW
 218 * @vsi_queue_id: vsi relative queue id
 219 *
 220 * return PF relative queue id
 221 **/
 222static u16 i40e_vc_get_pf_queue_id(struct i40e_vf *vf, u16 vsi_id,
 223                                   u8 vsi_queue_id)
 224{
 225        struct i40e_pf *pf = vf->pf;
 226        struct i40e_vsi *vsi = i40e_find_vsi_from_id(pf, vsi_id);
 227        u16 pf_queue_id = I40E_QUEUE_END_OF_LIST;
 228
 229        if (!vsi)
 230                return pf_queue_id;
 231
 232        if (le16_to_cpu(vsi->info.mapping_flags) &
 233            I40E_AQ_VSI_QUE_MAP_NONCONTIG)
 234                pf_queue_id =
 235                        le16_to_cpu(vsi->info.queue_mapping[vsi_queue_id]);
 236        else
 237                pf_queue_id = le16_to_cpu(vsi->info.queue_mapping[0]) +
 238                              vsi_queue_id;
 239
 240        return pf_queue_id;
 241}
 242
 243/**
 244 * i40e_get_real_pf_qid
 245 * @vf: pointer to the VF info
 246 * @vsi_id: vsi id
 247 * @queue_id: queue number
 248 *
 249 * wrapper function to get pf_queue_id handling ADq code as well
 250 **/
 251static u16 i40e_get_real_pf_qid(struct i40e_vf *vf, u16 vsi_id, u16 queue_id)
 252{
 253        int i;
 254
 255        if (vf->adq_enabled) {
 256                /* Although VF considers all the queues(can be 1 to 16) as its
 257                 * own but they may actually belong to different VSIs(up to 4).
 258                 * We need to find which queues belongs to which VSI.
 259                 */
 260                for (i = 0; i < vf->num_tc; i++) {
 261                        if (queue_id < vf->ch[i].num_qps) {
 262                                vsi_id = vf->ch[i].vsi_id;
 263                                break;
 264                        }
 265                        /* find right queue id which is relative to a
 266                         * given VSI.
 267                         */
 268                        queue_id -= vf->ch[i].num_qps;
 269                        }
 270                }
 271
 272        return i40e_vc_get_pf_queue_id(vf, vsi_id, queue_id);
 273}
 274
 275/**
 276 * i40e_config_irq_link_list
 277 * @vf: pointer to the VF info
 278 * @vsi_id: id of VSI as given by the FW
 279 * @vecmap: irq map info
 280 *
 281 * configure irq link list from the map
 282 **/
 283static void i40e_config_irq_link_list(struct i40e_vf *vf, u16 vsi_id,
 284                                      struct virtchnl_vector_map *vecmap)
 285{
 286        unsigned long linklistmap = 0, tempmap;
 287        struct i40e_pf *pf = vf->pf;
 288        struct i40e_hw *hw = &pf->hw;
 289        u16 vsi_queue_id, pf_queue_id;
 290        enum i40e_queue_type qtype;
 291        u16 next_q, vector_id, size;
 292        u32 reg, reg_idx;
 293        u16 itr_idx = 0;
 294
 295        vector_id = vecmap->vector_id;
 296        /* setup the head */
 297        if (0 == vector_id)
 298                reg_idx = I40E_VPINT_LNKLST0(vf->vf_id);
 299        else
 300                reg_idx = I40E_VPINT_LNKLSTN(
 301                     ((pf->hw.func_caps.num_msix_vectors_vf - 1) * vf->vf_id) +
 302                     (vector_id - 1));
 303
 304        if (vecmap->rxq_map == 0 && vecmap->txq_map == 0) {
 305                /* Special case - No queues mapped on this vector */
 306                wr32(hw, reg_idx, I40E_VPINT_LNKLST0_FIRSTQ_INDX_MASK);
 307                goto irq_list_done;
 308        }
 309        tempmap = vecmap->rxq_map;
 310        for_each_set_bit(vsi_queue_id, &tempmap, I40E_MAX_VSI_QP) {
 311                linklistmap |= (BIT(I40E_VIRTCHNL_SUPPORTED_QTYPES *
 312                                    vsi_queue_id));
 313        }
 314
 315        tempmap = vecmap->txq_map;
 316        for_each_set_bit(vsi_queue_id, &tempmap, I40E_MAX_VSI_QP) {
 317                linklistmap |= (BIT(I40E_VIRTCHNL_SUPPORTED_QTYPES *
 318                                     vsi_queue_id + 1));
 319        }
 320
 321        size = I40E_MAX_VSI_QP * I40E_VIRTCHNL_SUPPORTED_QTYPES;
 322        next_q = find_first_bit(&linklistmap, size);
 323        if (unlikely(next_q == size))
 324                goto irq_list_done;
 325
 326        vsi_queue_id = next_q / I40E_VIRTCHNL_SUPPORTED_QTYPES;
 327        qtype = next_q % I40E_VIRTCHNL_SUPPORTED_QTYPES;
 328        pf_queue_id = i40e_get_real_pf_qid(vf, vsi_id, vsi_queue_id);
 329        reg = ((qtype << I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT) | pf_queue_id);
 330
 331        wr32(hw, reg_idx, reg);
 332
 333        while (next_q < size) {
 334                switch (qtype) {
 335                case I40E_QUEUE_TYPE_RX:
 336                        reg_idx = I40E_QINT_RQCTL(pf_queue_id);
 337                        itr_idx = vecmap->rxitr_idx;
 338                        break;
 339                case I40E_QUEUE_TYPE_TX:
 340                        reg_idx = I40E_QINT_TQCTL(pf_queue_id);
 341                        itr_idx = vecmap->txitr_idx;
 342                        break;
 343                default:
 344                        break;
 345                }
 346
 347                next_q = find_next_bit(&linklistmap, size, next_q + 1);
 348                if (next_q < size) {
 349                        vsi_queue_id = next_q / I40E_VIRTCHNL_SUPPORTED_QTYPES;
 350                        qtype = next_q % I40E_VIRTCHNL_SUPPORTED_QTYPES;
 351                        pf_queue_id = i40e_get_real_pf_qid(vf,
 352                                                           vsi_id,
 353                                                           vsi_queue_id);
 354                } else {
 355                        pf_queue_id = I40E_QUEUE_END_OF_LIST;
 356                        qtype = 0;
 357                }
 358
 359                /* format for the RQCTL & TQCTL regs is same */
 360                reg = (vector_id) |
 361                    (qtype << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) |
 362                    (pf_queue_id << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
 363                    BIT(I40E_QINT_RQCTL_CAUSE_ENA_SHIFT) |
 364                    (itr_idx << I40E_QINT_RQCTL_ITR_INDX_SHIFT);
 365                wr32(hw, reg_idx, reg);
 366        }
 367
 368        /* if the vf is running in polling mode and using interrupt zero,
 369         * need to disable auto-mask on enabling zero interrupt for VFs.
 370         */
 371        if ((vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RX_POLLING) &&
 372            (vector_id == 0)) {
 373                reg = rd32(hw, I40E_GLINT_CTL);
 374                if (!(reg & I40E_GLINT_CTL_DIS_AUTOMASK_VF0_MASK)) {
 375                        reg |= I40E_GLINT_CTL_DIS_AUTOMASK_VF0_MASK;
 376                        wr32(hw, I40E_GLINT_CTL, reg);
 377                }
 378        }
 379
 380irq_list_done:
 381        i40e_flush(hw);
 382}
 383
 384/**
 385 * i40e_release_iwarp_qvlist
 386 * @vf: pointer to the VF.
 387 *
 388 **/
 389static void i40e_release_iwarp_qvlist(struct i40e_vf *vf)
 390{
 391        struct i40e_pf *pf = vf->pf;
 392        struct virtchnl_iwarp_qvlist_info *qvlist_info = vf->qvlist_info;
 393        u32 msix_vf;
 394        u32 i;
 395
 396        if (!vf->qvlist_info)
 397                return;
 398
 399        msix_vf = pf->hw.func_caps.num_msix_vectors_vf;
 400        for (i = 0; i < qvlist_info->num_vectors; i++) {
 401                struct virtchnl_iwarp_qv_info *qv_info;
 402                u32 next_q_index, next_q_type;
 403                struct i40e_hw *hw = &pf->hw;
 404                u32 v_idx, reg_idx, reg;
 405
 406                qv_info = &qvlist_info->qv_info[i];
 407                if (!qv_info)
 408                        continue;
 409                v_idx = qv_info->v_idx;
 410                if (qv_info->ceq_idx != I40E_QUEUE_INVALID_IDX) {
 411                        /* Figure out the queue after CEQ and make that the
 412                         * first queue.
 413                         */
 414                        reg_idx = (msix_vf - 1) * vf->vf_id + qv_info->ceq_idx;
 415                        reg = rd32(hw, I40E_VPINT_CEQCTL(reg_idx));
 416                        next_q_index = (reg & I40E_VPINT_CEQCTL_NEXTQ_INDX_MASK)
 417                                        >> I40E_VPINT_CEQCTL_NEXTQ_INDX_SHIFT;
 418                        next_q_type = (reg & I40E_VPINT_CEQCTL_NEXTQ_TYPE_MASK)
 419                                        >> I40E_VPINT_CEQCTL_NEXTQ_TYPE_SHIFT;
 420
 421                        reg_idx = ((msix_vf - 1) * vf->vf_id) + (v_idx - 1);
 422                        reg = (next_q_index &
 423                               I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK) |
 424                               (next_q_type <<
 425                               I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT);
 426
 427                        wr32(hw, I40E_VPINT_LNKLSTN(reg_idx), reg);
 428                }
 429        }
 430        kfree(vf->qvlist_info);
 431        vf->qvlist_info = NULL;
 432}
 433
 434/**
 435 * i40e_config_iwarp_qvlist
 436 * @vf: pointer to the VF info
 437 * @qvlist_info: queue and vector list
 438 *
 439 * Return 0 on success or < 0 on error
 440 **/
 441static int i40e_config_iwarp_qvlist(struct i40e_vf *vf,
 442                                    struct virtchnl_iwarp_qvlist_info *qvlist_info)
 443{
 444        struct i40e_pf *pf = vf->pf;
 445        struct i40e_hw *hw = &pf->hw;
 446        struct virtchnl_iwarp_qv_info *qv_info;
 447        u32 v_idx, i, reg_idx, reg;
 448        u32 next_q_idx, next_q_type;
 449        u32 msix_vf;
 450        int ret = 0;
 451
 452        msix_vf = pf->hw.func_caps.num_msix_vectors_vf;
 453
 454        if (qvlist_info->num_vectors > msix_vf) {
 455                dev_warn(&pf->pdev->dev,
 456                         "Incorrect number of iwarp vectors %u. Maximum %u allowed.\n",
 457                         qvlist_info->num_vectors,
 458                         msix_vf);
 459                ret = -EINVAL;
 460                goto err_out;
 461        }
 462
 463        kfree(vf->qvlist_info);
 464        vf->qvlist_info = kzalloc(struct_size(vf->qvlist_info, qv_info,
 465                                              qvlist_info->num_vectors - 1),
 466                                  GFP_KERNEL);
 467        if (!vf->qvlist_info) {
 468                ret = -ENOMEM;
 469                goto err_out;
 470        }
 471        vf->qvlist_info->num_vectors = qvlist_info->num_vectors;
 472
 473        msix_vf = pf->hw.func_caps.num_msix_vectors_vf;
 474        for (i = 0; i < qvlist_info->num_vectors; i++) {
 475                qv_info = &qvlist_info->qv_info[i];
 476                if (!qv_info)
 477                        continue;
 478
 479                /* Validate vector id belongs to this vf */
 480                if (!i40e_vc_isvalid_vector_id(vf, qv_info->v_idx)) {
 481                        ret = -EINVAL;
 482                        goto err_free;
 483                }
 484
 485                v_idx = qv_info->v_idx;
 486
 487                vf->qvlist_info->qv_info[i] = *qv_info;
 488
 489                reg_idx = ((msix_vf - 1) * vf->vf_id) + (v_idx - 1);
 490                /* We might be sharing the interrupt, so get the first queue
 491                 * index and type, push it down the list by adding the new
 492                 * queue on top. Also link it with the new queue in CEQCTL.
 493                 */
 494                reg = rd32(hw, I40E_VPINT_LNKLSTN(reg_idx));
 495                next_q_idx = ((reg & I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK) >>
 496                                I40E_VPINT_LNKLSTN_FIRSTQ_INDX_SHIFT);
 497                next_q_type = ((reg & I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_MASK) >>
 498                                I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT);
 499
 500                if (qv_info->ceq_idx != I40E_QUEUE_INVALID_IDX) {
 501                        reg_idx = (msix_vf - 1) * vf->vf_id + qv_info->ceq_idx;
 502                        reg = (I40E_VPINT_CEQCTL_CAUSE_ENA_MASK |
 503                        (v_idx << I40E_VPINT_CEQCTL_MSIX_INDX_SHIFT) |
 504                        (qv_info->itr_idx << I40E_VPINT_CEQCTL_ITR_INDX_SHIFT) |
 505                        (next_q_type << I40E_VPINT_CEQCTL_NEXTQ_TYPE_SHIFT) |
 506                        (next_q_idx << I40E_VPINT_CEQCTL_NEXTQ_INDX_SHIFT));
 507                        wr32(hw, I40E_VPINT_CEQCTL(reg_idx), reg);
 508
 509                        reg_idx = ((msix_vf - 1) * vf->vf_id) + (v_idx - 1);
 510                        reg = (qv_info->ceq_idx &
 511                               I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK) |
 512                               (I40E_QUEUE_TYPE_PE_CEQ <<
 513                               I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT);
 514                        wr32(hw, I40E_VPINT_LNKLSTN(reg_idx), reg);
 515                }
 516
 517                if (qv_info->aeq_idx != I40E_QUEUE_INVALID_IDX) {
 518                        reg = (I40E_VPINT_AEQCTL_CAUSE_ENA_MASK |
 519                        (v_idx << I40E_VPINT_AEQCTL_MSIX_INDX_SHIFT) |
 520                        (qv_info->itr_idx << I40E_VPINT_AEQCTL_ITR_INDX_SHIFT));
 521
 522                        wr32(hw, I40E_VPINT_AEQCTL(vf->vf_id), reg);
 523                }
 524        }
 525
 526        return 0;
 527err_free:
 528        kfree(vf->qvlist_info);
 529        vf->qvlist_info = NULL;
 530err_out:
 531        return ret;
 532}
 533
 534/**
 535 * i40e_config_vsi_tx_queue
 536 * @vf: pointer to the VF info
 537 * @vsi_id: id of VSI as provided by the FW
 538 * @vsi_queue_id: vsi relative queue index
 539 * @info: config. info
 540 *
 541 * configure tx queue
 542 **/
 543static int i40e_config_vsi_tx_queue(struct i40e_vf *vf, u16 vsi_id,
 544                                    u16 vsi_queue_id,
 545                                    struct virtchnl_txq_info *info)
 546{
 547        struct i40e_pf *pf = vf->pf;
 548        struct i40e_hw *hw = &pf->hw;
 549        struct i40e_hmc_obj_txq tx_ctx;
 550        struct i40e_vsi *vsi;
 551        u16 pf_queue_id;
 552        u32 qtx_ctl;
 553        int ret = 0;
 554
 555        if (!i40e_vc_isvalid_vsi_id(vf, info->vsi_id)) {
 556                ret = -ENOENT;
 557                goto error_context;
 558        }
 559        pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_id, vsi_queue_id);
 560        vsi = i40e_find_vsi_from_id(pf, vsi_id);
 561        if (!vsi) {
 562                ret = -ENOENT;
 563                goto error_context;
 564        }
 565
 566        /* clear the context structure first */
 567        memset(&tx_ctx, 0, sizeof(struct i40e_hmc_obj_txq));
 568
 569        /* only set the required fields */
 570        tx_ctx.base = info->dma_ring_addr / 128;
 571        tx_ctx.qlen = info->ring_len;
 572        tx_ctx.rdylist = le16_to_cpu(vsi->info.qs_handle[0]);
 573        tx_ctx.rdylist_act = 0;
 574        tx_ctx.head_wb_ena = info->headwb_enabled;
 575        tx_ctx.head_wb_addr = info->dma_headwb_addr;
 576
 577        /* clear the context in the HMC */
 578        ret = i40e_clear_lan_tx_queue_context(hw, pf_queue_id);
 579        if (ret) {
 580                dev_err(&pf->pdev->dev,
 581                        "Failed to clear VF LAN Tx queue context %d, error: %d\n",
 582                        pf_queue_id, ret);
 583                ret = -ENOENT;
 584                goto error_context;
 585        }
 586
 587        /* set the context in the HMC */
 588        ret = i40e_set_lan_tx_queue_context(hw, pf_queue_id, &tx_ctx);
 589        if (ret) {
 590                dev_err(&pf->pdev->dev,
 591                        "Failed to set VF LAN Tx queue context %d error: %d\n",
 592                        pf_queue_id, ret);
 593                ret = -ENOENT;
 594                goto error_context;
 595        }
 596
 597        /* associate this queue with the PCI VF function */
 598        qtx_ctl = I40E_QTX_CTL_VF_QUEUE;
 599        qtx_ctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT)
 600                    & I40E_QTX_CTL_PF_INDX_MASK);
 601        qtx_ctl |= (((vf->vf_id + hw->func_caps.vf_base_id)
 602                     << I40E_QTX_CTL_VFVM_INDX_SHIFT)
 603                    & I40E_QTX_CTL_VFVM_INDX_MASK);
 604        wr32(hw, I40E_QTX_CTL(pf_queue_id), qtx_ctl);
 605        i40e_flush(hw);
 606
 607error_context:
 608        return ret;
 609}
 610
 611/**
 612 * i40e_config_vsi_rx_queue
 613 * @vf: pointer to the VF info
 614 * @vsi_id: id of VSI  as provided by the FW
 615 * @vsi_queue_id: vsi relative queue index
 616 * @info: config. info
 617 *
 618 * configure rx queue
 619 **/
 620static int i40e_config_vsi_rx_queue(struct i40e_vf *vf, u16 vsi_id,
 621                                    u16 vsi_queue_id,
 622                                    struct virtchnl_rxq_info *info)
 623{
 624        struct i40e_pf *pf = vf->pf;
 625        struct i40e_hw *hw = &pf->hw;
 626        struct i40e_hmc_obj_rxq rx_ctx;
 627        u16 pf_queue_id;
 628        int ret = 0;
 629
 630        pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_id, vsi_queue_id);
 631
 632        /* clear the context structure first */
 633        memset(&rx_ctx, 0, sizeof(struct i40e_hmc_obj_rxq));
 634
 635        /* only set the required fields */
 636        rx_ctx.base = info->dma_ring_addr / 128;
 637        rx_ctx.qlen = info->ring_len;
 638
 639        if (info->splithdr_enabled) {
 640                rx_ctx.hsplit_0 = I40E_RX_SPLIT_L2      |
 641                                  I40E_RX_SPLIT_IP      |
 642                                  I40E_RX_SPLIT_TCP_UDP |
 643                                  I40E_RX_SPLIT_SCTP;
 644                /* header length validation */
 645                if (info->hdr_size > ((2 * 1024) - 64)) {
 646                        ret = -EINVAL;
 647                        goto error_param;
 648                }
 649                rx_ctx.hbuff = info->hdr_size >> I40E_RXQ_CTX_HBUFF_SHIFT;
 650
 651                /* set split mode 10b */
 652                rx_ctx.dtype = I40E_RX_DTYPE_HEADER_SPLIT;
 653        }
 654
 655        /* databuffer length validation */
 656        if (info->databuffer_size > ((16 * 1024) - 128)) {
 657                ret = -EINVAL;
 658                goto error_param;
 659        }
 660        rx_ctx.dbuff = info->databuffer_size >> I40E_RXQ_CTX_DBUFF_SHIFT;
 661
 662        /* max pkt. length validation */
 663        if (info->max_pkt_size >= (16 * 1024) || info->max_pkt_size < 64) {
 664                ret = -EINVAL;
 665                goto error_param;
 666        }
 667        rx_ctx.rxmax = info->max_pkt_size;
 668
 669        /* enable 32bytes desc always */
 670        rx_ctx.dsize = 1;
 671
 672        /* default values */
 673        rx_ctx.lrxqthresh = 1;
 674        rx_ctx.crcstrip = 1;
 675        rx_ctx.prefena = 1;
 676        rx_ctx.l2tsel = 1;
 677
 678        /* clear the context in the HMC */
 679        ret = i40e_clear_lan_rx_queue_context(hw, pf_queue_id);
 680        if (ret) {
 681                dev_err(&pf->pdev->dev,
 682                        "Failed to clear VF LAN Rx queue context %d, error: %d\n",
 683                        pf_queue_id, ret);
 684                ret = -ENOENT;
 685                goto error_param;
 686        }
 687
 688        /* set the context in the HMC */
 689        ret = i40e_set_lan_rx_queue_context(hw, pf_queue_id, &rx_ctx);
 690        if (ret) {
 691                dev_err(&pf->pdev->dev,
 692                        "Failed to set VF LAN Rx queue context %d error: %d\n",
 693                        pf_queue_id, ret);
 694                ret = -ENOENT;
 695                goto error_param;
 696        }
 697
 698error_param:
 699        return ret;
 700}
 701
 702/**
 703 * i40e_alloc_vsi_res
 704 * @vf: pointer to the VF info
 705 * @idx: VSI index, applies only for ADq mode, zero otherwise
 706 *
 707 * alloc VF vsi context & resources
 708 **/
 709static int i40e_alloc_vsi_res(struct i40e_vf *vf, u8 idx)
 710{
 711        struct i40e_mac_filter *f = NULL;
 712        struct i40e_pf *pf = vf->pf;
 713        struct i40e_vsi *vsi;
 714        u64 max_tx_rate = 0;
 715        int ret = 0;
 716
 717        vsi = i40e_vsi_setup(pf, I40E_VSI_SRIOV, pf->vsi[pf->lan_vsi]->seid,
 718                             vf->vf_id);
 719
 720        if (!vsi) {
 721                dev_err(&pf->pdev->dev,
 722                        "add vsi failed for VF %d, aq_err %d\n",
 723                        vf->vf_id, pf->hw.aq.asq_last_status);
 724                ret = -ENOENT;
 725                goto error_alloc_vsi_res;
 726        }
 727
 728        if (!idx) {
 729                u64 hena = i40e_pf_get_default_rss_hena(pf);
 730                u8 broadcast[ETH_ALEN];
 731
 732                vf->lan_vsi_idx = vsi->idx;
 733                vf->lan_vsi_id = vsi->id;
 734                /* If the port VLAN has been configured and then the
 735                 * VF driver was removed then the VSI port VLAN
 736                 * configuration was destroyed.  Check if there is
 737                 * a port VLAN and restore the VSI configuration if
 738                 * needed.
 739                 */
 740                if (vf->port_vlan_id)
 741                        i40e_vsi_add_pvid(vsi, vf->port_vlan_id);
 742
 743                spin_lock_bh(&vsi->mac_filter_hash_lock);
 744                if (is_valid_ether_addr(vf->default_lan_addr.addr)) {
 745                        f = i40e_add_mac_filter(vsi,
 746                                                vf->default_lan_addr.addr);
 747                        if (!f)
 748                                dev_info(&pf->pdev->dev,
 749                                         "Could not add MAC filter %pM for VF %d\n",
 750                                        vf->default_lan_addr.addr, vf->vf_id);
 751                }
 752                eth_broadcast_addr(broadcast);
 753                f = i40e_add_mac_filter(vsi, broadcast);
 754                if (!f)
 755                        dev_info(&pf->pdev->dev,
 756                                 "Could not allocate VF broadcast filter\n");
 757                spin_unlock_bh(&vsi->mac_filter_hash_lock);
 758                wr32(&pf->hw, I40E_VFQF_HENA1(0, vf->vf_id), (u32)hena);
 759                wr32(&pf->hw, I40E_VFQF_HENA1(1, vf->vf_id), (u32)(hena >> 32));
 760                /* program mac filter only for VF VSI */
 761                ret = i40e_sync_vsi_filters(vsi);
 762                if (ret)
 763                        dev_err(&pf->pdev->dev, "Unable to program ucast filters\n");
 764        }
 765
 766        /* storing VSI index and id for ADq and don't apply the mac filter */
 767        if (vf->adq_enabled) {
 768                vf->ch[idx].vsi_idx = vsi->idx;
 769                vf->ch[idx].vsi_id = vsi->id;
 770        }
 771
 772        /* Set VF bandwidth if specified */
 773        if (vf->tx_rate) {
 774                max_tx_rate = vf->tx_rate;
 775        } else if (vf->ch[idx].max_tx_rate) {
 776                max_tx_rate = vf->ch[idx].max_tx_rate;
 777        }
 778
 779        if (max_tx_rate) {
 780                max_tx_rate = div_u64(max_tx_rate, I40E_BW_CREDIT_DIVISOR);
 781                ret = i40e_aq_config_vsi_bw_limit(&pf->hw, vsi->seid,
 782                                                  max_tx_rate, 0, NULL);
 783                if (ret)
 784                        dev_err(&pf->pdev->dev, "Unable to set tx rate, VF %d, error code %d.\n",
 785                                vf->vf_id, ret);
 786        }
 787
 788error_alloc_vsi_res:
 789        return ret;
 790}
 791
 792/**
 793 * i40e_map_pf_queues_to_vsi
 794 * @vf: pointer to the VF info
 795 *
 796 * PF maps LQPs to a VF by programming VSILAN_QTABLE & VPLAN_QTABLE. This
 797 * function takes care of first part VSILAN_QTABLE, mapping pf queues to VSI.
 798 **/
 799static void i40e_map_pf_queues_to_vsi(struct i40e_vf *vf)
 800{
 801        struct i40e_pf *pf = vf->pf;
 802        struct i40e_hw *hw = &pf->hw;
 803        u32 reg, num_tc = 1; /* VF has at least one traffic class */
 804        u16 vsi_id, qps;
 805        int i, j;
 806
 807        if (vf->adq_enabled)
 808                num_tc = vf->num_tc;
 809
 810        for (i = 0; i < num_tc; i++) {
 811                if (vf->adq_enabled) {
 812                        qps = vf->ch[i].num_qps;
 813                        vsi_id =  vf->ch[i].vsi_id;
 814                } else {
 815                        qps = pf->vsi[vf->lan_vsi_idx]->alloc_queue_pairs;
 816                        vsi_id = vf->lan_vsi_id;
 817                }
 818
 819                for (j = 0; j < 7; j++) {
 820                        if (j * 2 >= qps) {
 821                                /* end of list */
 822                                reg = 0x07FF07FF;
 823                        } else {
 824                                u16 qid = i40e_vc_get_pf_queue_id(vf,
 825                                                                  vsi_id,
 826                                                                  j * 2);
 827                                reg = qid;
 828                                qid = i40e_vc_get_pf_queue_id(vf, vsi_id,
 829                                                              (j * 2) + 1);
 830                                reg |= qid << 16;
 831                        }
 832                        i40e_write_rx_ctl(hw,
 833                                          I40E_VSILAN_QTABLE(j, vsi_id),
 834                                          reg);
 835                }
 836        }
 837}
 838
 839/**
 840 * i40e_map_pf_to_vf_queues
 841 * @vf: pointer to the VF info
 842 *
 843 * PF maps LQPs to a VF by programming VSILAN_QTABLE & VPLAN_QTABLE. This
 844 * function takes care of the second part VPLAN_QTABLE & completes VF mappings.
 845 **/
 846static void i40e_map_pf_to_vf_queues(struct i40e_vf *vf)
 847{
 848        struct i40e_pf *pf = vf->pf;
 849        struct i40e_hw *hw = &pf->hw;
 850        u32 reg, total_qps = 0;
 851        u32 qps, num_tc = 1; /* VF has at least one traffic class */
 852        u16 vsi_id, qid;
 853        int i, j;
 854
 855        if (vf->adq_enabled)
 856                num_tc = vf->num_tc;
 857
 858        for (i = 0; i < num_tc; i++) {
 859                if (vf->adq_enabled) {
 860                        qps = vf->ch[i].num_qps;
 861                        vsi_id =  vf->ch[i].vsi_id;
 862                } else {
 863                        qps = pf->vsi[vf->lan_vsi_idx]->alloc_queue_pairs;
 864                        vsi_id = vf->lan_vsi_id;
 865                }
 866
 867                for (j = 0; j < qps; j++) {
 868                        qid = i40e_vc_get_pf_queue_id(vf, vsi_id, j);
 869
 870                        reg = (qid & I40E_VPLAN_QTABLE_QINDEX_MASK);
 871                        wr32(hw, I40E_VPLAN_QTABLE(total_qps, vf->vf_id),
 872                             reg);
 873                        total_qps++;
 874                }
 875        }
 876}
 877
 878/**
 879 * i40e_enable_vf_mappings
 880 * @vf: pointer to the VF info
 881 *
 882 * enable VF mappings
 883 **/
 884static void i40e_enable_vf_mappings(struct i40e_vf *vf)
 885{
 886        struct i40e_pf *pf = vf->pf;
 887        struct i40e_hw *hw = &pf->hw;
 888        u32 reg;
 889
 890        /* Tell the hardware we're using noncontiguous mapping. HW requires
 891         * that VF queues be mapped using this method, even when they are
 892         * contiguous in real life
 893         */
 894        i40e_write_rx_ctl(hw, I40E_VSILAN_QBASE(vf->lan_vsi_id),
 895                          I40E_VSILAN_QBASE_VSIQTABLE_ENA_MASK);
 896
 897        /* enable VF vplan_qtable mappings */
 898        reg = I40E_VPLAN_MAPENA_TXRX_ENA_MASK;
 899        wr32(hw, I40E_VPLAN_MAPENA(vf->vf_id), reg);
 900
 901        i40e_map_pf_to_vf_queues(vf);
 902        i40e_map_pf_queues_to_vsi(vf);
 903
 904        i40e_flush(hw);
 905}
 906
 907/**
 908 * i40e_disable_vf_mappings
 909 * @vf: pointer to the VF info
 910 *
 911 * disable VF mappings
 912 **/
 913static void i40e_disable_vf_mappings(struct i40e_vf *vf)
 914{
 915        struct i40e_pf *pf = vf->pf;
 916        struct i40e_hw *hw = &pf->hw;
 917        int i;
 918
 919        /* disable qp mappings */
 920        wr32(hw, I40E_VPLAN_MAPENA(vf->vf_id), 0);
 921        for (i = 0; i < I40E_MAX_VSI_QP; i++)
 922                wr32(hw, I40E_VPLAN_QTABLE(i, vf->vf_id),
 923                     I40E_QUEUE_END_OF_LIST);
 924        i40e_flush(hw);
 925}
 926
 927/**
 928 * i40e_free_vf_res
 929 * @vf: pointer to the VF info
 930 *
 931 * free VF resources
 932 **/
 933static void i40e_free_vf_res(struct i40e_vf *vf)
 934{
 935        struct i40e_pf *pf = vf->pf;
 936        struct i40e_hw *hw = &pf->hw;
 937        u32 reg_idx, reg;
 938        int i, j, msix_vf;
 939
 940        /* Start by disabling VF's configuration API to prevent the OS from
 941         * accessing the VF's VSI after it's freed / invalidated.
 942         */
 943        clear_bit(I40E_VF_STATE_INIT, &vf->vf_states);
 944
 945        /* It's possible the VF had requeuested more queues than the default so
 946         * do the accounting here when we're about to free them.
 947         */
 948        if (vf->num_queue_pairs > I40E_DEFAULT_QUEUES_PER_VF) {
 949                pf->queues_left += vf->num_queue_pairs -
 950                                   I40E_DEFAULT_QUEUES_PER_VF;
 951        }
 952
 953        /* free vsi & disconnect it from the parent uplink */
 954        if (vf->lan_vsi_idx) {
 955                i40e_vsi_release(pf->vsi[vf->lan_vsi_idx]);
 956                vf->lan_vsi_idx = 0;
 957                vf->lan_vsi_id = 0;
 958        }
 959
 960        /* do the accounting and remove additional ADq VSI's */
 961        if (vf->adq_enabled && vf->ch[0].vsi_idx) {
 962                for (j = 0; j < vf->num_tc; j++) {
 963                        /* At this point VSI0 is already released so don't
 964                         * release it again and only clear their values in
 965                         * structure variables
 966                         */
 967                        if (j)
 968                                i40e_vsi_release(pf->vsi[vf->ch[j].vsi_idx]);
 969                        vf->ch[j].vsi_idx = 0;
 970                        vf->ch[j].vsi_id = 0;
 971                }
 972        }
 973        msix_vf = pf->hw.func_caps.num_msix_vectors_vf;
 974
 975        /* disable interrupts so the VF starts in a known state */
 976        for (i = 0; i < msix_vf; i++) {
 977                /* format is same for both registers */
 978                if (0 == i)
 979                        reg_idx = I40E_VFINT_DYN_CTL0(vf->vf_id);
 980                else
 981                        reg_idx = I40E_VFINT_DYN_CTLN(((msix_vf - 1) *
 982                                                      (vf->vf_id))
 983                                                     + (i - 1));
 984                wr32(hw, reg_idx, I40E_VFINT_DYN_CTLN_CLEARPBA_MASK);
 985                i40e_flush(hw);
 986        }
 987
 988        /* clear the irq settings */
 989        for (i = 0; i < msix_vf; i++) {
 990                /* format is same for both registers */
 991                if (0 == i)
 992                        reg_idx = I40E_VPINT_LNKLST0(vf->vf_id);
 993                else
 994                        reg_idx = I40E_VPINT_LNKLSTN(((msix_vf - 1) *
 995                                                      (vf->vf_id))
 996                                                     + (i - 1));
 997                reg = (I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_MASK |
 998                       I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK);
 999                wr32(hw, reg_idx, reg);
1000                i40e_flush(hw);
1001        }
1002        /* reset some of the state variables keeping track of the resources */
1003        vf->num_queue_pairs = 0;
1004        clear_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states);
1005        clear_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states);
1006}
1007
1008/**
1009 * i40e_alloc_vf_res
1010 * @vf: pointer to the VF info
1011 *
1012 * allocate VF resources
1013 **/
1014static int i40e_alloc_vf_res(struct i40e_vf *vf)
1015{
1016        struct i40e_pf *pf = vf->pf;
1017        int total_queue_pairs = 0;
1018        int ret, idx;
1019
1020        if (vf->num_req_queues &&
1021            vf->num_req_queues <= pf->queues_left + I40E_DEFAULT_QUEUES_PER_VF)
1022                pf->num_vf_qps = vf->num_req_queues;
1023        else
1024                pf->num_vf_qps = I40E_DEFAULT_QUEUES_PER_VF;
1025
1026        /* allocate hw vsi context & associated resources */
1027        ret = i40e_alloc_vsi_res(vf, 0);
1028        if (ret)
1029                goto error_alloc;
1030        total_queue_pairs += pf->vsi[vf->lan_vsi_idx]->alloc_queue_pairs;
1031
1032        /* allocate additional VSIs based on tc information for ADq */
1033        if (vf->adq_enabled) {
1034                if (pf->queues_left >=
1035                    (I40E_MAX_VF_QUEUES - I40E_DEFAULT_QUEUES_PER_VF)) {
1036                        /* TC 0 always belongs to VF VSI */
1037                        for (idx = 1; idx < vf->num_tc; idx++) {
1038                                ret = i40e_alloc_vsi_res(vf, idx);
1039                                if (ret)
1040                                        goto error_alloc;
1041                        }
1042                        /* send correct number of queues */
1043                        total_queue_pairs = I40E_MAX_VF_QUEUES;
1044                } else {
1045                        dev_info(&pf->pdev->dev, "VF %d: Not enough queues to allocate, disabling ADq\n",
1046                                 vf->vf_id);
1047                        vf->adq_enabled = false;
1048                }
1049        }
1050
1051        /* We account for each VF to get a default number of queue pairs.  If
1052         * the VF has now requested more, we need to account for that to make
1053         * certain we never request more queues than we actually have left in
1054         * HW.
1055         */
1056        if (total_queue_pairs > I40E_DEFAULT_QUEUES_PER_VF)
1057                pf->queues_left -=
1058                        total_queue_pairs - I40E_DEFAULT_QUEUES_PER_VF;
1059
1060        if (vf->trusted)
1061                set_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
1062        else
1063                clear_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
1064
1065        /* store the total qps number for the runtime
1066         * VF req validation
1067         */
1068        vf->num_queue_pairs = total_queue_pairs;
1069
1070        /* VF is now completely initialized */
1071        set_bit(I40E_VF_STATE_INIT, &vf->vf_states);
1072
1073error_alloc:
1074        if (ret)
1075                i40e_free_vf_res(vf);
1076
1077        return ret;
1078}
1079
1080#define VF_DEVICE_STATUS 0xAA
1081#define VF_TRANS_PENDING_MASK 0x20
1082/**
1083 * i40e_quiesce_vf_pci
1084 * @vf: pointer to the VF structure
1085 *
1086 * Wait for VF PCI transactions to be cleared after reset. Returns -EIO
1087 * if the transactions never clear.
1088 **/
1089static int i40e_quiesce_vf_pci(struct i40e_vf *vf)
1090{
1091        struct i40e_pf *pf = vf->pf;
1092        struct i40e_hw *hw = &pf->hw;
1093        int vf_abs_id, i;
1094        u32 reg;
1095
1096        vf_abs_id = vf->vf_id + hw->func_caps.vf_base_id;
1097
1098        wr32(hw, I40E_PF_PCI_CIAA,
1099             VF_DEVICE_STATUS | (vf_abs_id << I40E_PF_PCI_CIAA_VF_NUM_SHIFT));
1100        for (i = 0; i < 100; i++) {
1101                reg = rd32(hw, I40E_PF_PCI_CIAD);
1102                if ((reg & VF_TRANS_PENDING_MASK) == 0)
1103                        return 0;
1104                udelay(1);
1105        }
1106        return -EIO;
1107}
1108
1109/**
1110 * i40e_getnum_vf_vsi_vlan_filters
1111 * @vsi: pointer to the vsi
1112 *
1113 * called to get the number of VLANs offloaded on this VF
1114 **/
1115static int i40e_getnum_vf_vsi_vlan_filters(struct i40e_vsi *vsi)
1116{
1117        struct i40e_mac_filter *f;
1118        u16 num_vlans = 0, bkt;
1119
1120        hash_for_each(vsi->mac_filter_hash, bkt, f, hlist) {
1121                if (f->vlan >= 0 && f->vlan <= I40E_MAX_VLANID)
1122                        num_vlans++;
1123        }
1124
1125        return num_vlans;
1126}
1127
1128/**
1129 * i40e_get_vlan_list_sync
1130 * @vsi: pointer to the VSI
1131 * @num_vlans: number of VLANs in mac_filter_hash, returned to caller
1132 * @vlan_list: list of VLANs present in mac_filter_hash, returned to caller.
1133 *             This array is allocated here, but has to be freed in caller.
1134 *
1135 * Called to get number of VLANs and VLAN list present in mac_filter_hash.
1136 **/
1137static void i40e_get_vlan_list_sync(struct i40e_vsi *vsi, u16 *num_vlans,
1138                                    s16 **vlan_list)
1139{
1140        struct i40e_mac_filter *f;
1141        int i = 0;
1142        int bkt;
1143
1144        spin_lock_bh(&vsi->mac_filter_hash_lock);
1145        *num_vlans = i40e_getnum_vf_vsi_vlan_filters(vsi);
1146        *vlan_list = kcalloc(*num_vlans, sizeof(**vlan_list), GFP_ATOMIC);
1147        if (!(*vlan_list))
1148                goto err;
1149
1150        hash_for_each(vsi->mac_filter_hash, bkt, f, hlist) {
1151                if (f->vlan < 0 || f->vlan > I40E_MAX_VLANID)
1152                        continue;
1153                (*vlan_list)[i++] = f->vlan;
1154        }
1155err:
1156        spin_unlock_bh(&vsi->mac_filter_hash_lock);
1157}
1158
1159/**
1160 * i40e_set_vsi_promisc
1161 * @vf: pointer to the VF struct
1162 * @seid: VSI number
1163 * @multi_enable: set MAC L2 layer multicast promiscuous enable/disable
1164 *                for a given VLAN
1165 * @unicast_enable: set MAC L2 layer unicast promiscuous enable/disable
1166 *                  for a given VLAN
1167 * @vl: List of VLANs - apply filter for given VLANs
1168 * @num_vlans: Number of elements in @vl
1169 **/
1170static i40e_status
1171i40e_set_vsi_promisc(struct i40e_vf *vf, u16 seid, bool multi_enable,
1172                     bool unicast_enable, s16 *vl, u16 num_vlans)
1173{
1174        i40e_status aq_ret, aq_tmp = 0;
1175        struct i40e_pf *pf = vf->pf;
1176        struct i40e_hw *hw = &pf->hw;
1177        int i;
1178
1179        /* No VLAN to set promisc on, set on VSI */
1180        if (!num_vlans || !vl) {
1181                aq_ret = i40e_aq_set_vsi_multicast_promiscuous(hw, seid,
1182                                                               multi_enable,
1183                                                               NULL);
1184                if (aq_ret) {
1185                        int aq_err = pf->hw.aq.asq_last_status;
1186
1187                        dev_err(&pf->pdev->dev,
1188                                "VF %d failed to set multicast promiscuous mode err %s aq_err %s\n",
1189                                vf->vf_id,
1190                                i40e_stat_str(&pf->hw, aq_ret),
1191                                i40e_aq_str(&pf->hw, aq_err));
1192
1193                        return aq_ret;
1194                }
1195
1196                aq_ret = i40e_aq_set_vsi_unicast_promiscuous(hw, seid,
1197                                                             unicast_enable,
1198                                                             NULL, true);
1199
1200                if (aq_ret) {
1201                        int aq_err = pf->hw.aq.asq_last_status;
1202
1203                        dev_err(&pf->pdev->dev,
1204                                "VF %d failed to set unicast promiscuous mode err %s aq_err %s\n",
1205                                vf->vf_id,
1206                                i40e_stat_str(&pf->hw, aq_ret),
1207                                i40e_aq_str(&pf->hw, aq_err));
1208                }
1209
1210                return aq_ret;
1211        }
1212
1213        for (i = 0; i < num_vlans; i++) {
1214                aq_ret = i40e_aq_set_vsi_mc_promisc_on_vlan(hw, seid,
1215                                                            multi_enable,
1216                                                            vl[i], NULL);
1217                if (aq_ret) {
1218                        int aq_err = pf->hw.aq.asq_last_status;
1219
1220                        dev_err(&pf->pdev->dev,
1221                                "VF %d failed to set multicast promiscuous mode err %s aq_err %s\n",
1222                                vf->vf_id,
1223                                i40e_stat_str(&pf->hw, aq_ret),
1224                                i40e_aq_str(&pf->hw, aq_err));
1225
1226                        if (!aq_tmp)
1227                                aq_tmp = aq_ret;
1228                }
1229
1230                aq_ret = i40e_aq_set_vsi_uc_promisc_on_vlan(hw, seid,
1231                                                            unicast_enable,
1232                                                            vl[i], NULL);
1233                if (aq_ret) {
1234                        int aq_err = pf->hw.aq.asq_last_status;
1235
1236                        dev_err(&pf->pdev->dev,
1237                                "VF %d failed to set unicast promiscuous mode err %s aq_err %s\n",
1238                                vf->vf_id,
1239                                i40e_stat_str(&pf->hw, aq_ret),
1240                                i40e_aq_str(&pf->hw, aq_err));
1241
1242                        if (!aq_tmp)
1243                                aq_tmp = aq_ret;
1244                }
1245        }
1246
1247        if (aq_tmp)
1248                aq_ret = aq_tmp;
1249
1250        return aq_ret;
1251}
1252
1253/**
1254 * i40e_config_vf_promiscuous_mode
1255 * @vf: pointer to the VF info
1256 * @vsi_id: VSI id
1257 * @allmulti: set MAC L2 layer multicast promiscuous enable/disable
1258 * @alluni: set MAC L2 layer unicast promiscuous enable/disable
1259 *
1260 * Called from the VF to configure the promiscuous mode of
1261 * VF vsis and from the VF reset path to reset promiscuous mode.
1262 **/
1263static i40e_status i40e_config_vf_promiscuous_mode(struct i40e_vf *vf,
1264                                                   u16 vsi_id,
1265                                                   bool allmulti,
1266                                                   bool alluni)
1267{
1268        i40e_status aq_ret = I40E_SUCCESS;
1269        struct i40e_pf *pf = vf->pf;
1270        struct i40e_vsi *vsi;
1271        u16 num_vlans;
1272        s16 *vl;
1273
1274        vsi = i40e_find_vsi_from_id(pf, vsi_id);
1275        if (!i40e_vc_isvalid_vsi_id(vf, vsi_id) || !vsi)
1276                return I40E_ERR_PARAM;
1277
1278        if (vf->port_vlan_id) {
1279                aq_ret = i40e_set_vsi_promisc(vf, vsi->seid, allmulti,
1280                                              alluni, &vf->port_vlan_id, 1);
1281                return aq_ret;
1282        } else if (i40e_getnum_vf_vsi_vlan_filters(vsi)) {
1283                i40e_get_vlan_list_sync(vsi, &num_vlans, &vl);
1284
1285                if (!vl)
1286                        return I40E_ERR_NO_MEMORY;
1287
1288                aq_ret = i40e_set_vsi_promisc(vf, vsi->seid, allmulti, alluni,
1289                                              vl, num_vlans);
1290                kfree(vl);
1291                return aq_ret;
1292        }
1293
1294        /* no VLANs to set on, set on VSI */
1295        aq_ret = i40e_set_vsi_promisc(vf, vsi->seid, allmulti, alluni,
1296                                      NULL, 0);
1297        return aq_ret;
1298}
1299
1300/**
1301 * i40e_trigger_vf_reset
1302 * @vf: pointer to the VF structure
1303 * @flr: VFLR was issued or not
1304 *
1305 * Trigger hardware to start a reset for a particular VF. Expects the caller
1306 * to wait the proper amount of time to allow hardware to reset the VF before
1307 * it cleans up and restores VF functionality.
1308 **/
1309static void i40e_trigger_vf_reset(struct i40e_vf *vf, bool flr)
1310{
1311        struct i40e_pf *pf = vf->pf;
1312        struct i40e_hw *hw = &pf->hw;
1313        u32 reg, reg_idx, bit_idx;
1314
1315        /* warn the VF */
1316        clear_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states);
1317
1318        /* Disable VF's configuration API during reset. The flag is re-enabled
1319         * in i40e_alloc_vf_res(), when it's safe again to access VF's VSI.
1320         * It's normally disabled in i40e_free_vf_res(), but it's safer
1321         * to do it earlier to give some time to finish to any VF config
1322         * functions that may still be running at this point.
1323         */
1324        clear_bit(I40E_VF_STATE_INIT, &vf->vf_states);
1325
1326        /* In the case of a VFLR, the HW has already reset the VF and we
1327         * just need to clean up, so don't hit the VFRTRIG register.
1328         */
1329        if (!flr) {
1330                /* reset VF using VPGEN_VFRTRIG reg */
1331                reg = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id));
1332                reg |= I40E_VPGEN_VFRTRIG_VFSWR_MASK;
1333                wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg);
1334                i40e_flush(hw);
1335        }
1336        /* clear the VFLR bit in GLGEN_VFLRSTAT */
1337        reg_idx = (hw->func_caps.vf_base_id + vf->vf_id) / 32;
1338        bit_idx = (hw->func_caps.vf_base_id + vf->vf_id) % 32;
1339        wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx));
1340        i40e_flush(hw);
1341
1342        if (i40e_quiesce_vf_pci(vf))
1343                dev_err(&pf->pdev->dev, "VF %d PCI transactions stuck\n",
1344                        vf->vf_id);
1345}
1346
1347/**
1348 * i40e_cleanup_reset_vf
1349 * @vf: pointer to the VF structure
1350 *
1351 * Cleanup a VF after the hardware reset is finished. Expects the caller to
1352 * have verified whether the reset is finished properly, and ensure the
1353 * minimum amount of wait time has passed.
1354 **/
1355static void i40e_cleanup_reset_vf(struct i40e_vf *vf)
1356{
1357        struct i40e_pf *pf = vf->pf;
1358        struct i40e_hw *hw = &pf->hw;
1359        u32 reg;
1360
1361        /* disable promisc modes in case they were enabled */
1362        i40e_config_vf_promiscuous_mode(vf, vf->lan_vsi_id, false, false);
1363
1364        /* free VF resources to begin resetting the VSI state */
1365        i40e_free_vf_res(vf);
1366
1367        /* Enable hardware by clearing the reset bit in the VPGEN_VFRTRIG reg.
1368         * By doing this we allow HW to access VF memory at any point. If we
1369         * did it any sooner, HW could access memory while it was being freed
1370         * in i40e_free_vf_res(), causing an IOMMU fault.
1371         *
1372         * On the other hand, this needs to be done ASAP, because the VF driver
1373         * is waiting for this to happen and may report a timeout. It's
1374         * harmless, but it gets logged into Guest OS kernel log, so best avoid
1375         * it.
1376         */
1377        reg = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id));
1378        reg &= ~I40E_VPGEN_VFRTRIG_VFSWR_MASK;
1379        wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg);
1380
1381        /* reallocate VF resources to finish resetting the VSI state */
1382        if (!i40e_alloc_vf_res(vf)) {
1383                int abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
1384                i40e_enable_vf_mappings(vf);
1385                set_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states);
1386                clear_bit(I40E_VF_STATE_DISABLED, &vf->vf_states);
1387                /* Do not notify the client during VF init */
1388                if (!test_and_clear_bit(I40E_VF_STATE_PRE_ENABLE,
1389                                        &vf->vf_states))
1390                        i40e_notify_client_of_vf_reset(pf, abs_vf_id);
1391                vf->num_vlan = 0;
1392        }
1393
1394        /* Tell the VF driver the reset is done. This needs to be done only
1395         * after VF has been fully initialized, because the VF driver may
1396         * request resources immediately after setting this flag.
1397         */
1398        wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_id), VIRTCHNL_VFR_VFACTIVE);
1399}
1400
1401/**
1402 * i40e_reset_vf
1403 * @vf: pointer to the VF structure
1404 * @flr: VFLR was issued or not
1405 *
1406 * Returns true if the VF is in reset, resets successfully, or resets
1407 * are disabled and false otherwise.
1408 **/
1409bool i40e_reset_vf(struct i40e_vf *vf, bool flr)
1410{
1411        struct i40e_pf *pf = vf->pf;
1412        struct i40e_hw *hw = &pf->hw;
1413        bool rsd = false;
1414        u32 reg;
1415        int i;
1416
1417        if (test_bit(__I40E_VF_RESETS_DISABLED, pf->state))
1418                return true;
1419
1420        /* If the VFs have been disabled, this means something else is
1421         * resetting the VF, so we shouldn't continue.
1422         */
1423        if (test_and_set_bit(__I40E_VF_DISABLE, pf->state))
1424                return true;
1425
1426        i40e_trigger_vf_reset(vf, flr);
1427
1428        /* poll VPGEN_VFRSTAT reg to make sure
1429         * that reset is complete
1430         */
1431        for (i = 0; i < 10; i++) {
1432                /* VF reset requires driver to first reset the VF and then
1433                 * poll the status register to make sure that the reset
1434                 * completed successfully. Due to internal HW FIFO flushes,
1435                 * we must wait 10ms before the register will be valid.
1436                 */
1437                usleep_range(10000, 20000);
1438                reg = rd32(hw, I40E_VPGEN_VFRSTAT(vf->vf_id));
1439                if (reg & I40E_VPGEN_VFRSTAT_VFRD_MASK) {
1440                        rsd = true;
1441                        break;
1442                }
1443        }
1444
1445        if (flr)
1446                usleep_range(10000, 20000);
1447
1448        if (!rsd)
1449                dev_err(&pf->pdev->dev, "VF reset check timeout on VF %d\n",
1450                        vf->vf_id);
1451        usleep_range(10000, 20000);
1452
1453        /* On initial reset, we don't have any queues to disable */
1454        if (vf->lan_vsi_idx != 0)
1455                i40e_vsi_stop_rings(pf->vsi[vf->lan_vsi_idx]);
1456
1457        i40e_cleanup_reset_vf(vf);
1458
1459        i40e_flush(hw);
1460        clear_bit(__I40E_VF_DISABLE, pf->state);
1461
1462        return true;
1463}
1464
1465/**
1466 * i40e_reset_all_vfs
1467 * @pf: pointer to the PF structure
1468 * @flr: VFLR was issued or not
1469 *
1470 * Reset all allocated VFs in one go. First, tell the hardware to reset each
1471 * VF, then do all the waiting in one chunk, and finally finish restoring each
1472 * VF after the wait. This is useful during PF routines which need to reset
1473 * all VFs, as otherwise it must perform these resets in a serialized fashion.
1474 *
1475 * Returns true if any VFs were reset, and false otherwise.
1476 **/
1477bool i40e_reset_all_vfs(struct i40e_pf *pf, bool flr)
1478{
1479        struct i40e_hw *hw = &pf->hw;
1480        struct i40e_vf *vf;
1481        int i, v;
1482        u32 reg;
1483
1484        /* If we don't have any VFs, then there is nothing to reset */
1485        if (!pf->num_alloc_vfs)
1486                return false;
1487
1488        /* If VFs have been disabled, there is no need to reset */
1489        if (test_and_set_bit(__I40E_VF_DISABLE, pf->state))
1490                return false;
1491
1492        /* Begin reset on all VFs at once */
1493        for (v = 0; v < pf->num_alloc_vfs; v++)
1494                i40e_trigger_vf_reset(&pf->vf[v], flr);
1495
1496        /* HW requires some time to make sure it can flush the FIFO for a VF
1497         * when it resets it. Poll the VPGEN_VFRSTAT register for each VF in
1498         * sequence to make sure that it has completed. We'll keep track of
1499         * the VFs using a simple iterator that increments once that VF has
1500         * finished resetting.
1501         */
1502        for (i = 0, v = 0; i < 10 && v < pf->num_alloc_vfs; i++) {
1503                usleep_range(10000, 20000);
1504
1505                /* Check each VF in sequence, beginning with the VF to fail
1506                 * the previous check.
1507                 */
1508                while (v < pf->num_alloc_vfs) {
1509                        vf = &pf->vf[v];
1510                        reg = rd32(hw, I40E_VPGEN_VFRSTAT(vf->vf_id));
1511                        if (!(reg & I40E_VPGEN_VFRSTAT_VFRD_MASK))
1512                                break;
1513
1514                        /* If the current VF has finished resetting, move on
1515                         * to the next VF in sequence.
1516                         */
1517                        v++;
1518                }
1519        }
1520
1521        if (flr)
1522                usleep_range(10000, 20000);
1523
1524        /* Display a warning if at least one VF didn't manage to reset in
1525         * time, but continue on with the operation.
1526         */
1527        if (v < pf->num_alloc_vfs)
1528                dev_err(&pf->pdev->dev, "VF reset check timeout on VF %d\n",
1529                        pf->vf[v].vf_id);
1530        usleep_range(10000, 20000);
1531
1532        /* Begin disabling all the rings associated with VFs, but do not wait
1533         * between each VF.
1534         */
1535        for (v = 0; v < pf->num_alloc_vfs; v++) {
1536                /* On initial reset, we don't have any queues to disable */
1537                if (pf->vf[v].lan_vsi_idx == 0)
1538                        continue;
1539
1540                i40e_vsi_stop_rings_no_wait(pf->vsi[pf->vf[v].lan_vsi_idx]);
1541        }
1542
1543        /* Now that we've notified HW to disable all of the VF rings, wait
1544         * until they finish.
1545         */
1546        for (v = 0; v < pf->num_alloc_vfs; v++) {
1547                /* On initial reset, we don't have any queues to disable */
1548                if (pf->vf[v].lan_vsi_idx == 0)
1549                        continue;
1550
1551                i40e_vsi_wait_queues_disabled(pf->vsi[pf->vf[v].lan_vsi_idx]);
1552        }
1553
1554        /* Hw may need up to 50ms to finish disabling the RX queues. We
1555         * minimize the wait by delaying only once for all VFs.
1556         */
1557        mdelay(50);
1558
1559        /* Finish the reset on each VF */
1560        for (v = 0; v < pf->num_alloc_vfs; v++)
1561                i40e_cleanup_reset_vf(&pf->vf[v]);
1562
1563        i40e_flush(hw);
1564        clear_bit(__I40E_VF_DISABLE, pf->state);
1565
1566        return true;
1567}
1568
1569/**
1570 * i40e_free_vfs
1571 * @pf: pointer to the PF structure
1572 *
1573 * free VF resources
1574 **/
1575void i40e_free_vfs(struct i40e_pf *pf)
1576{
1577        struct i40e_hw *hw = &pf->hw;
1578        u32 reg_idx, bit_idx;
1579        int i, tmp, vf_id;
1580
1581        if (!pf->vf)
1582                return;
1583        while (test_and_set_bit(__I40E_VF_DISABLE, pf->state))
1584                usleep_range(1000, 2000);
1585
1586        i40e_notify_client_of_vf_enable(pf, 0);
1587
1588        /* Disable IOV before freeing resources. This lets any VF drivers
1589         * running in the host get themselves cleaned up before we yank
1590         * the carpet out from underneath their feet.
1591         */
1592        if (!pci_vfs_assigned(pf->pdev))
1593                pci_disable_sriov(pf->pdev);
1594        else
1595                dev_warn(&pf->pdev->dev, "VFs are assigned - not disabling SR-IOV\n");
1596
1597        /* Amortize wait time by stopping all VFs at the same time */
1598        for (i = 0; i < pf->num_alloc_vfs; i++) {
1599                if (test_bit(I40E_VF_STATE_INIT, &pf->vf[i].vf_states))
1600                        continue;
1601
1602                i40e_vsi_stop_rings_no_wait(pf->vsi[pf->vf[i].lan_vsi_idx]);
1603        }
1604
1605        for (i = 0; i < pf->num_alloc_vfs; i++) {
1606                if (test_bit(I40E_VF_STATE_INIT, &pf->vf[i].vf_states))
1607                        continue;
1608
1609                i40e_vsi_wait_queues_disabled(pf->vsi[pf->vf[i].lan_vsi_idx]);
1610        }
1611
1612        /* free up VF resources */
1613        tmp = pf->num_alloc_vfs;
1614        pf->num_alloc_vfs = 0;
1615        for (i = 0; i < tmp; i++) {
1616                if (test_bit(I40E_VF_STATE_INIT, &pf->vf[i].vf_states))
1617                        i40e_free_vf_res(&pf->vf[i]);
1618                /* disable qp mappings */
1619                i40e_disable_vf_mappings(&pf->vf[i]);
1620        }
1621
1622        kfree(pf->vf);
1623        pf->vf = NULL;
1624
1625        /* This check is for when the driver is unloaded while VFs are
1626         * assigned. Setting the number of VFs to 0 through sysfs is caught
1627         * before this function ever gets called.
1628         */
1629        if (!pci_vfs_assigned(pf->pdev)) {
1630                /* Acknowledge VFLR for all VFS. Without this, VFs will fail to
1631                 * work correctly when SR-IOV gets re-enabled.
1632                 */
1633                for (vf_id = 0; vf_id < tmp; vf_id++) {
1634                        reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32;
1635                        bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32;
1636                        wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx));
1637                }
1638        }
1639        clear_bit(__I40E_VF_DISABLE, pf->state);
1640}
1641
1642#ifdef CONFIG_PCI_IOV
1643/**
1644 * i40e_alloc_vfs
1645 * @pf: pointer to the PF structure
1646 * @num_alloc_vfs: number of VFs to allocate
1647 *
1648 * allocate VF resources
1649 **/
1650int i40e_alloc_vfs(struct i40e_pf *pf, u16 num_alloc_vfs)
1651{
1652        struct i40e_vf *vfs;
1653        int i, ret = 0;
1654
1655        /* Disable interrupt 0 so we don't try to handle the VFLR. */
1656        i40e_irq_dynamic_disable_icr0(pf);
1657
1658        /* Check to see if we're just allocating resources for extant VFs */
1659        if (pci_num_vf(pf->pdev) != num_alloc_vfs) {
1660                ret = pci_enable_sriov(pf->pdev, num_alloc_vfs);
1661                if (ret) {
1662                        pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED;
1663                        pf->num_alloc_vfs = 0;
1664                        goto err_iov;
1665                }
1666        }
1667        /* allocate memory */
1668        vfs = kcalloc(num_alloc_vfs, sizeof(struct i40e_vf), GFP_KERNEL);
1669        if (!vfs) {
1670                ret = -ENOMEM;
1671                goto err_alloc;
1672        }
1673        pf->vf = vfs;
1674
1675        /* apply default profile */
1676        for (i = 0; i < num_alloc_vfs; i++) {
1677                vfs[i].pf = pf;
1678                vfs[i].parent_type = I40E_SWITCH_ELEMENT_TYPE_VEB;
1679                vfs[i].vf_id = i;
1680
1681                /* assign default capabilities */
1682                set_bit(I40E_VIRTCHNL_VF_CAP_L2, &vfs[i].vf_caps);
1683                vfs[i].spoofchk = true;
1684
1685                set_bit(I40E_VF_STATE_PRE_ENABLE, &vfs[i].vf_states);
1686
1687        }
1688        pf->num_alloc_vfs = num_alloc_vfs;
1689
1690        /* VF resources get allocated during reset */
1691        i40e_reset_all_vfs(pf, false);
1692
1693        i40e_notify_client_of_vf_enable(pf, num_alloc_vfs);
1694
1695err_alloc:
1696        if (ret)
1697                i40e_free_vfs(pf);
1698err_iov:
1699        /* Re-enable interrupt 0. */
1700        i40e_irq_dynamic_enable_icr0(pf);
1701        return ret;
1702}
1703
1704#endif
1705/**
1706 * i40e_pci_sriov_enable
1707 * @pdev: pointer to a pci_dev structure
1708 * @num_vfs: number of VFs to allocate
1709 *
1710 * Enable or change the number of VFs
1711 **/
1712static int i40e_pci_sriov_enable(struct pci_dev *pdev, int num_vfs)
1713{
1714#ifdef CONFIG_PCI_IOV
1715        struct i40e_pf *pf = pci_get_drvdata(pdev);
1716        int pre_existing_vfs = pci_num_vf(pdev);
1717        int err = 0;
1718
1719        if (test_bit(__I40E_TESTING, pf->state)) {
1720                dev_warn(&pdev->dev,
1721                         "Cannot enable SR-IOV virtual functions while the device is undergoing diagnostic testing\n");
1722                err = -EPERM;
1723                goto err_out;
1724        }
1725
1726        if (pre_existing_vfs && pre_existing_vfs != num_vfs)
1727                i40e_free_vfs(pf);
1728        else if (pre_existing_vfs && pre_existing_vfs == num_vfs)
1729                goto out;
1730
1731        if (num_vfs > pf->num_req_vfs) {
1732                dev_warn(&pdev->dev, "Unable to enable %d VFs. Limited to %d VFs due to device resource constraints.\n",
1733                         num_vfs, pf->num_req_vfs);
1734                err = -EPERM;
1735                goto err_out;
1736        }
1737
1738        dev_info(&pdev->dev, "Allocating %d VFs.\n", num_vfs);
1739        err = i40e_alloc_vfs(pf, num_vfs);
1740        if (err) {
1741                dev_warn(&pdev->dev, "Failed to enable SR-IOV: %d\n", err);
1742                goto err_out;
1743        }
1744
1745out:
1746        return num_vfs;
1747
1748err_out:
1749        return err;
1750#endif
1751        return 0;
1752}
1753
1754/**
1755 * i40e_pci_sriov_configure
1756 * @pdev: pointer to a pci_dev structure
1757 * @num_vfs: number of VFs to allocate
1758 *
1759 * Enable or change the number of VFs. Called when the user updates the number
1760 * of VFs in sysfs.
1761 **/
1762int i40e_pci_sriov_configure(struct pci_dev *pdev, int num_vfs)
1763{
1764        struct i40e_pf *pf = pci_get_drvdata(pdev);
1765        int ret = 0;
1766
1767        if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) {
1768                dev_warn(&pdev->dev, "Unable to configure VFs, other operation is pending.\n");
1769                return -EAGAIN;
1770        }
1771
1772        if (num_vfs) {
1773                if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) {
1774                        pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
1775                        i40e_do_reset_safe(pf, I40E_PF_RESET_FLAG);
1776                }
1777                ret = i40e_pci_sriov_enable(pdev, num_vfs);
1778                goto sriov_configure_out;
1779        }
1780
1781        if (!pci_vfs_assigned(pf->pdev)) {
1782                i40e_free_vfs(pf);
1783                pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED;
1784                i40e_do_reset_safe(pf, I40E_PF_RESET_FLAG);
1785        } else {
1786                dev_warn(&pdev->dev, "Unable to free VFs because some are assigned to VMs.\n");
1787                ret = -EINVAL;
1788                goto sriov_configure_out;
1789        }
1790sriov_configure_out:
1791        clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state);
1792        return ret;
1793}
1794
1795/***********************virtual channel routines******************/
1796
1797/**
1798 * i40e_vc_send_msg_to_vf
1799 * @vf: pointer to the VF info
1800 * @v_opcode: virtual channel opcode
1801 * @v_retval: virtual channel return value
1802 * @msg: pointer to the msg buffer
1803 * @msglen: msg length
1804 *
1805 * send msg to VF
1806 **/
1807static int i40e_vc_send_msg_to_vf(struct i40e_vf *vf, u32 v_opcode,
1808                                  u32 v_retval, u8 *msg, u16 msglen)
1809{
1810        struct i40e_pf *pf;
1811        struct i40e_hw *hw;
1812        int abs_vf_id;
1813        i40e_status aq_ret;
1814
1815        /* validate the request */
1816        if (!vf || vf->vf_id >= vf->pf->num_alloc_vfs)
1817                return -EINVAL;
1818
1819        pf = vf->pf;
1820        hw = &pf->hw;
1821        abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
1822
1823        /* single place to detect unsuccessful return values */
1824        if (v_retval) {
1825                vf->num_invalid_msgs++;
1826                dev_info(&pf->pdev->dev, "VF %d failed opcode %d, retval: %d\n",
1827                         vf->vf_id, v_opcode, v_retval);
1828                if (vf->num_invalid_msgs >
1829                    I40E_DEFAULT_NUM_INVALID_MSGS_ALLOWED) {
1830                        dev_err(&pf->pdev->dev,
1831                                "Number of invalid messages exceeded for VF %d\n",
1832                                vf->vf_id);
1833                        dev_err(&pf->pdev->dev, "Use PF Control I/F to enable the VF\n");
1834                        set_bit(I40E_VF_STATE_DISABLED, &vf->vf_states);
1835                }
1836        } else {
1837                vf->num_valid_msgs++;
1838                /* reset the invalid counter, if a valid message is received. */
1839                vf->num_invalid_msgs = 0;
1840        }
1841
1842        aq_ret = i40e_aq_send_msg_to_vf(hw, abs_vf_id,  v_opcode, v_retval,
1843                                        msg, msglen, NULL);
1844        if (aq_ret) {
1845                dev_info(&pf->pdev->dev,
1846                         "Unable to send the message to VF %d aq_err %d\n",
1847                         vf->vf_id, pf->hw.aq.asq_last_status);
1848                return -EIO;
1849        }
1850
1851        return 0;
1852}
1853
1854/**
1855 * i40e_vc_send_resp_to_vf
1856 * @vf: pointer to the VF info
1857 * @opcode: operation code
1858 * @retval: return value
1859 *
1860 * send resp msg to VF
1861 **/
1862static int i40e_vc_send_resp_to_vf(struct i40e_vf *vf,
1863                                   enum virtchnl_ops opcode,
1864                                   i40e_status retval)
1865{
1866        return i40e_vc_send_msg_to_vf(vf, opcode, retval, NULL, 0);
1867}
1868
1869/**
1870 * i40e_vc_get_version_msg
1871 * @vf: pointer to the VF info
1872 * @msg: pointer to the msg buffer
1873 *
1874 * called from the VF to request the API version used by the PF
1875 **/
1876static int i40e_vc_get_version_msg(struct i40e_vf *vf, u8 *msg)
1877{
1878        struct virtchnl_version_info info = {
1879                VIRTCHNL_VERSION_MAJOR, VIRTCHNL_VERSION_MINOR
1880        };
1881
1882        vf->vf_ver = *(struct virtchnl_version_info *)msg;
1883        /* VFs running the 1.0 API expect to get 1.0 back or they will cry. */
1884        if (VF_IS_V10(&vf->vf_ver))
1885                info.minor = VIRTCHNL_VERSION_MINOR_NO_VF_CAPS;
1886        return i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_VERSION,
1887                                      I40E_SUCCESS, (u8 *)&info,
1888                                      sizeof(struct virtchnl_version_info));
1889}
1890
1891/**
1892 * i40e_del_qch - delete all the additional VSIs created as a part of ADq
1893 * @vf: pointer to VF structure
1894 **/
1895static void i40e_del_qch(struct i40e_vf *vf)
1896{
1897        struct i40e_pf *pf = vf->pf;
1898        int i;
1899
1900        /* first element in the array belongs to primary VF VSI and we shouldn't
1901         * delete it. We should however delete the rest of the VSIs created
1902         */
1903        for (i = 1; i < vf->num_tc; i++) {
1904                if (vf->ch[i].vsi_idx) {
1905                        i40e_vsi_release(pf->vsi[vf->ch[i].vsi_idx]);
1906                        vf->ch[i].vsi_idx = 0;
1907                        vf->ch[i].vsi_id = 0;
1908                }
1909        }
1910}
1911
1912/**
1913 * i40e_vc_get_vf_resources_msg
1914 * @vf: pointer to the VF info
1915 * @msg: pointer to the msg buffer
1916 *
1917 * called from the VF to request its resources
1918 **/
1919static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg)
1920{
1921        struct virtchnl_vf_resource *vfres = NULL;
1922        struct i40e_pf *pf = vf->pf;
1923        i40e_status aq_ret = 0;
1924        struct i40e_vsi *vsi;
1925        int num_vsis = 1;
1926        size_t len = 0;
1927        int ret;
1928
1929        if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) {
1930                aq_ret = I40E_ERR_PARAM;
1931                goto err;
1932        }
1933
1934        len = struct_size(vfres, vsi_res, num_vsis);
1935        vfres = kzalloc(len, GFP_KERNEL);
1936        if (!vfres) {
1937                aq_ret = I40E_ERR_NO_MEMORY;
1938                len = 0;
1939                goto err;
1940        }
1941        if (VF_IS_V11(&vf->vf_ver))
1942                vf->driver_caps = *(u32 *)msg;
1943        else
1944                vf->driver_caps = VIRTCHNL_VF_OFFLOAD_L2 |
1945                                  VIRTCHNL_VF_OFFLOAD_RSS_REG |
1946                                  VIRTCHNL_VF_OFFLOAD_VLAN;
1947
1948        vfres->vf_cap_flags = VIRTCHNL_VF_OFFLOAD_L2;
1949        vsi = pf->vsi[vf->lan_vsi_idx];
1950        if (!vsi->info.pvid)
1951                vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_VLAN;
1952
1953        if (i40e_vf_client_capable(pf, vf->vf_id) &&
1954            (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_IWARP)) {
1955                vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_IWARP;
1956                set_bit(I40E_VF_STATE_IWARPENA, &vf->vf_states);
1957        } else {
1958                clear_bit(I40E_VF_STATE_IWARPENA, &vf->vf_states);
1959        }
1960
1961        if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PF) {
1962                vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_PF;
1963        } else {
1964                if ((pf->hw_features & I40E_HW_RSS_AQ_CAPABLE) &&
1965                    (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_AQ))
1966                        vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_AQ;
1967                else
1968                        vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_REG;
1969        }
1970
1971        if (pf->hw_features & I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE) {
1972                if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2)
1973                        vfres->vf_cap_flags |=
1974                                VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2;
1975        }
1976
1977        if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ENCAP)
1978                vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ENCAP;
1979
1980        if ((pf->hw_features & I40E_HW_OUTER_UDP_CSUM_CAPABLE) &&
1981            (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM))
1982                vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM;
1983
1984        if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RX_POLLING) {
1985                if (pf->flags & I40E_FLAG_MFP_ENABLED) {
1986                        dev_err(&pf->pdev->dev,
1987                                "VF %d requested polling mode: this feature is supported only when the device is running in single function per port (SFP) mode\n",
1988                                 vf->vf_id);
1989                        aq_ret = I40E_ERR_PARAM;
1990                        goto err;
1991                }
1992                vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RX_POLLING;
1993        }
1994
1995        if (pf->hw_features & I40E_HW_WB_ON_ITR_CAPABLE) {
1996                if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_WB_ON_ITR)
1997                        vfres->vf_cap_flags |=
1998                                        VIRTCHNL_VF_OFFLOAD_WB_ON_ITR;
1999        }
2000
2001        if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_REQ_QUEUES)
2002                vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_REQ_QUEUES;
2003
2004        if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ADQ)
2005                vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ADQ;
2006
2007        vfres->num_vsis = num_vsis;
2008        vfres->num_queue_pairs = vf->num_queue_pairs;
2009        vfres->max_vectors = pf->hw.func_caps.num_msix_vectors_vf;
2010        vfres->rss_key_size = I40E_HKEY_ARRAY_SIZE;
2011        vfres->rss_lut_size = I40E_VF_HLUT_ARRAY_SIZE;
2012
2013        if (vf->lan_vsi_idx) {
2014                vfres->vsi_res[0].vsi_id = vf->lan_vsi_id;
2015                vfres->vsi_res[0].vsi_type = VIRTCHNL_VSI_SRIOV;
2016                vfres->vsi_res[0].num_queue_pairs = vsi->alloc_queue_pairs;
2017                /* VFs only use TC 0 */
2018                vfres->vsi_res[0].qset_handle
2019                                          = le16_to_cpu(vsi->info.qs_handle[0]);
2020                ether_addr_copy(vfres->vsi_res[0].default_mac_addr,
2021                                vf->default_lan_addr.addr);
2022        }
2023        set_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states);
2024
2025err:
2026        /* send the response back to the VF */
2027        ret = i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_VF_RESOURCES,
2028                                     aq_ret, (u8 *)vfres, len);
2029
2030        kfree(vfres);
2031        return ret;
2032}
2033
2034/**
2035 * i40e_vc_reset_vf_msg
2036 * @vf: pointer to the VF info
2037 *
2038 * called from the VF to reset itself,
2039 * unlike other virtchnl messages, PF driver
2040 * doesn't send the response back to the VF
2041 **/
2042static void i40e_vc_reset_vf_msg(struct i40e_vf *vf)
2043{
2044        if (test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states))
2045                i40e_reset_vf(vf, false);
2046}
2047
2048/**
2049 * i40e_vc_config_promiscuous_mode_msg
2050 * @vf: pointer to the VF info
2051 * @msg: pointer to the msg buffer
2052 *
2053 * called from the VF to configure the promiscuous mode of
2054 * VF vsis
2055 **/
2056static int i40e_vc_config_promiscuous_mode_msg(struct i40e_vf *vf, u8 *msg)
2057{
2058        struct virtchnl_promisc_info *info =
2059            (struct virtchnl_promisc_info *)msg;
2060        struct i40e_pf *pf = vf->pf;
2061        i40e_status aq_ret = 0;
2062        bool allmulti = false;
2063        bool alluni = false;
2064
2065        if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
2066                aq_ret = I40E_ERR_PARAM;
2067                goto err_out;
2068        }
2069        if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) {
2070                dev_err(&pf->pdev->dev,
2071                        "Unprivileged VF %d is attempting to configure promiscuous mode\n",
2072                        vf->vf_id);
2073
2074                /* Lie to the VF on purpose, because this is an error we can
2075                 * ignore. Unprivileged VF is not a virtual channel error.
2076                 */
2077                aq_ret = 0;
2078                goto err_out;
2079        }
2080
2081        if (info->flags > I40E_MAX_VF_PROMISC_FLAGS) {
2082                aq_ret = I40E_ERR_PARAM;
2083                goto err_out;
2084        }
2085
2086        if (!i40e_vc_isvalid_vsi_id(vf, info->vsi_id)) {
2087                aq_ret = I40E_ERR_PARAM;
2088                goto err_out;
2089        }
2090
2091        /* Multicast promiscuous handling*/
2092        if (info->flags & FLAG_VF_MULTICAST_PROMISC)
2093                allmulti = true;
2094
2095        if (info->flags & FLAG_VF_UNICAST_PROMISC)
2096                alluni = true;
2097        aq_ret = i40e_config_vf_promiscuous_mode(vf, info->vsi_id, allmulti,
2098                                                 alluni);
2099        if (aq_ret)
2100                goto err_out;
2101
2102        if (allmulti) {
2103                if (!test_and_set_bit(I40E_VF_STATE_MC_PROMISC,
2104                                      &vf->vf_states))
2105                        dev_info(&pf->pdev->dev,
2106                                 "VF %d successfully set multicast promiscuous mode\n",
2107                                 vf->vf_id);
2108        } else if (test_and_clear_bit(I40E_VF_STATE_MC_PROMISC,
2109                                      &vf->vf_states))
2110                dev_info(&pf->pdev->dev,
2111                         "VF %d successfully unset multicast promiscuous mode\n",
2112                         vf->vf_id);
2113
2114        if (alluni) {
2115                if (!test_and_set_bit(I40E_VF_STATE_UC_PROMISC,
2116                                      &vf->vf_states))
2117                        dev_info(&pf->pdev->dev,
2118                                 "VF %d successfully set unicast promiscuous mode\n",
2119                                 vf->vf_id);
2120        } else if (test_and_clear_bit(I40E_VF_STATE_UC_PROMISC,
2121                                      &vf->vf_states))
2122                dev_info(&pf->pdev->dev,
2123                         "VF %d successfully unset unicast promiscuous mode\n",
2124                         vf->vf_id);
2125
2126err_out:
2127        /* send the response to the VF */
2128        return i40e_vc_send_resp_to_vf(vf,
2129                                       VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE,
2130                                       aq_ret);
2131}
2132
2133/**
2134 * i40e_vc_config_queues_msg
2135 * @vf: pointer to the VF info
2136 * @msg: pointer to the msg buffer
2137 *
2138 * called from the VF to configure the rx/tx
2139 * queues
2140 **/
2141static int i40e_vc_config_queues_msg(struct i40e_vf *vf, u8 *msg)
2142{
2143        struct virtchnl_vsi_queue_config_info *qci =
2144            (struct virtchnl_vsi_queue_config_info *)msg;
2145        struct virtchnl_queue_pair_info *qpi;
2146        struct i40e_pf *pf = vf->pf;
2147        u16 vsi_id, vsi_queue_id = 0;
2148        u16 num_qps_all = 0;
2149        i40e_status aq_ret = 0;
2150        int i, j = 0, idx = 0;
2151
2152        if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
2153                aq_ret = I40E_ERR_PARAM;
2154                goto error_param;
2155        }
2156
2157        if (!i40e_vc_isvalid_vsi_id(vf, qci->vsi_id)) {
2158                aq_ret = I40E_ERR_PARAM;
2159                goto error_param;
2160        }
2161
2162        if (qci->num_queue_pairs > I40E_MAX_VF_QUEUES) {
2163                aq_ret = I40E_ERR_PARAM;
2164                goto error_param;
2165        }
2166
2167        if (vf->adq_enabled) {
2168                for (i = 0; i < I40E_MAX_VF_VSI; i++)
2169                        num_qps_all += vf->ch[i].num_qps;
2170                if (num_qps_all != qci->num_queue_pairs) {
2171                        aq_ret = I40E_ERR_PARAM;
2172                        goto error_param;
2173                }
2174        }
2175
2176        vsi_id = qci->vsi_id;
2177
2178        for (i = 0; i < qci->num_queue_pairs; i++) {
2179                qpi = &qci->qpair[i];
2180
2181                if (!vf->adq_enabled) {
2182                        if (!i40e_vc_isvalid_queue_id(vf, vsi_id,
2183                                                      qpi->txq.queue_id)) {
2184                                aq_ret = I40E_ERR_PARAM;
2185                                goto error_param;
2186                        }
2187
2188                        vsi_queue_id = qpi->txq.queue_id;
2189
2190                        if (qpi->txq.vsi_id != qci->vsi_id ||
2191                            qpi->rxq.vsi_id != qci->vsi_id ||
2192                            qpi->rxq.queue_id != vsi_queue_id) {
2193                                aq_ret = I40E_ERR_PARAM;
2194                                goto error_param;
2195                        }
2196                }
2197
2198                if (vf->adq_enabled) {
2199                        if (idx >= ARRAY_SIZE(vf->ch)) {
2200                                aq_ret = I40E_ERR_NO_AVAILABLE_VSI;
2201                                goto error_param;
2202                        }
2203                        vsi_id = vf->ch[idx].vsi_id;
2204                }
2205
2206                if (i40e_config_vsi_rx_queue(vf, vsi_id, vsi_queue_id,
2207                                             &qpi->rxq) ||
2208                    i40e_config_vsi_tx_queue(vf, vsi_id, vsi_queue_id,
2209                                             &qpi->txq)) {
2210                        aq_ret = I40E_ERR_PARAM;
2211                        goto error_param;
2212                }
2213
2214                /* For ADq there can be up to 4 VSIs with max 4 queues each.
2215                 * VF does not know about these additional VSIs and all
2216                 * it cares is about its own queues. PF configures these queues
2217                 * to its appropriate VSIs based on TC mapping
2218                 */
2219                if (vf->adq_enabled) {
2220                        if (idx >= ARRAY_SIZE(vf->ch)) {
2221                                aq_ret = I40E_ERR_NO_AVAILABLE_VSI;
2222                                goto error_param;
2223                        }
2224                        if (j == (vf->ch[idx].num_qps - 1)) {
2225                                idx++;
2226                                j = 0; /* resetting the queue count */
2227                                vsi_queue_id = 0;
2228                        } else {
2229                                j++;
2230                                vsi_queue_id++;
2231                        }
2232                }
2233        }
2234        /* set vsi num_queue_pairs in use to num configured by VF */
2235        if (!vf->adq_enabled) {
2236                pf->vsi[vf->lan_vsi_idx]->num_queue_pairs =
2237                        qci->num_queue_pairs;
2238        } else {
2239                for (i = 0; i < vf->num_tc; i++)
2240                        pf->vsi[vf->ch[i].vsi_idx]->num_queue_pairs =
2241                               vf->ch[i].num_qps;
2242        }
2243
2244error_param:
2245        /* send the response to the VF */
2246        return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES,
2247                                       aq_ret);
2248}
2249
2250/**
2251 * i40e_validate_queue_map - check queue map is valid
2252 * @vf: the VF structure pointer
2253 * @vsi_id: vsi id
2254 * @queuemap: Tx or Rx queue map
2255 *
2256 * check if Tx or Rx queue map is valid
2257 **/
2258static int i40e_validate_queue_map(struct i40e_vf *vf, u16 vsi_id,
2259                                   unsigned long queuemap)
2260{
2261        u16 vsi_queue_id, queue_id;
2262
2263        for_each_set_bit(vsi_queue_id, &queuemap, I40E_MAX_VSI_QP) {
2264                if (vf->adq_enabled) {
2265                        vsi_id = vf->ch[vsi_queue_id / I40E_MAX_VF_VSI].vsi_id;
2266                        queue_id = (vsi_queue_id % I40E_DEFAULT_QUEUES_PER_VF);
2267                } else {
2268                        queue_id = vsi_queue_id;
2269                }
2270
2271                if (!i40e_vc_isvalid_queue_id(vf, vsi_id, queue_id))
2272                        return -EINVAL;
2273        }
2274
2275        return 0;
2276}
2277
2278/**
2279 * i40e_vc_config_irq_map_msg
2280 * @vf: pointer to the VF info
2281 * @msg: pointer to the msg buffer
2282 *
2283 * called from the VF to configure the irq to
2284 * queue map
2285 **/
2286static int i40e_vc_config_irq_map_msg(struct i40e_vf *vf, u8 *msg)
2287{
2288        struct virtchnl_irq_map_info *irqmap_info =
2289            (struct virtchnl_irq_map_info *)msg;
2290        struct virtchnl_vector_map *map;
2291        u16 vsi_id;
2292        i40e_status aq_ret = 0;
2293        int i;
2294
2295        if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
2296                aq_ret = I40E_ERR_PARAM;
2297                goto error_param;
2298        }
2299
2300        if (irqmap_info->num_vectors >
2301            vf->pf->hw.func_caps.num_msix_vectors_vf) {
2302                aq_ret = I40E_ERR_PARAM;
2303                goto error_param;
2304        }
2305
2306        for (i = 0; i < irqmap_info->num_vectors; i++) {
2307                map = &irqmap_info->vecmap[i];
2308                /* validate msg params */
2309                if (!i40e_vc_isvalid_vector_id(vf, map->vector_id) ||
2310                    !i40e_vc_isvalid_vsi_id(vf, map->vsi_id)) {
2311                        aq_ret = I40E_ERR_PARAM;
2312                        goto error_param;
2313                }
2314                vsi_id = map->vsi_id;
2315
2316                if (i40e_validate_queue_map(vf, vsi_id, map->rxq_map)) {
2317                        aq_ret = I40E_ERR_PARAM;
2318                        goto error_param;
2319                }
2320
2321                if (i40e_validate_queue_map(vf, vsi_id, map->txq_map)) {
2322                        aq_ret = I40E_ERR_PARAM;
2323                        goto error_param;
2324                }
2325
2326                i40e_config_irq_link_list(vf, vsi_id, map);
2327        }
2328error_param:
2329        /* send the response to the VF */
2330        return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_CONFIG_IRQ_MAP,
2331                                       aq_ret);
2332}
2333
2334/**
2335 * i40e_ctrl_vf_tx_rings
2336 * @vsi: the SRIOV VSI being configured
2337 * @q_map: bit map of the queues to be enabled
2338 * @enable: start or stop the queue
2339 **/
2340static int i40e_ctrl_vf_tx_rings(struct i40e_vsi *vsi, unsigned long q_map,
2341                                 bool enable)
2342{
2343        struct i40e_pf *pf = vsi->back;
2344        int ret = 0;
2345        u16 q_id;
2346
2347        for_each_set_bit(q_id, &q_map, I40E_MAX_VF_QUEUES) {
2348                ret = i40e_control_wait_tx_q(vsi->seid, pf,
2349                                             vsi->base_queue + q_id,
2350                                             false /*is xdp*/, enable);
2351                if (ret)
2352                        break;
2353        }
2354        return ret;
2355}
2356
2357/**
2358 * i40e_ctrl_vf_rx_rings
2359 * @vsi: the SRIOV VSI being configured
2360 * @q_map: bit map of the queues to be enabled
2361 * @enable: start or stop the queue
2362 **/
2363static int i40e_ctrl_vf_rx_rings(struct i40e_vsi *vsi, unsigned long q_map,
2364                                 bool enable)
2365{
2366        struct i40e_pf *pf = vsi->back;
2367        int ret = 0;
2368        u16 q_id;
2369
2370        for_each_set_bit(q_id, &q_map, I40E_MAX_VF_QUEUES) {
2371                ret = i40e_control_wait_rx_q(pf, vsi->base_queue + q_id,
2372                                             enable);
2373                if (ret)
2374                        break;
2375        }
2376        return ret;
2377}
2378
2379/**
2380 * i40e_vc_validate_vqs_bitmaps - validate Rx/Tx queue bitmaps from VIRTHCHNL
2381 * @vqs: virtchnl_queue_select structure containing bitmaps to validate
2382 *
2383 * Returns true if validation was successful, else false.
2384 */
2385static bool i40e_vc_validate_vqs_bitmaps(struct virtchnl_queue_select *vqs)
2386{
2387        if ((!vqs->rx_queues && !vqs->tx_queues) ||
2388            vqs->rx_queues >= BIT(I40E_MAX_VF_QUEUES) ||
2389            vqs->tx_queues >= BIT(I40E_MAX_VF_QUEUES))
2390                return false;
2391
2392        return true;
2393}
2394
2395/**
2396 * i40e_vc_enable_queues_msg
2397 * @vf: pointer to the VF info
2398 * @msg: pointer to the msg buffer
2399 *
2400 * called from the VF to enable all or specific queue(s)
2401 **/
2402static int i40e_vc_enable_queues_msg(struct i40e_vf *vf, u8 *msg)
2403{
2404        struct virtchnl_queue_select *vqs =
2405            (struct virtchnl_queue_select *)msg;
2406        struct i40e_pf *pf = vf->pf;
2407        i40e_status aq_ret = 0;
2408        int i;
2409
2410        if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
2411                aq_ret = I40E_ERR_PARAM;
2412                goto error_param;
2413        }
2414
2415        if (!i40e_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
2416                aq_ret = I40E_ERR_PARAM;
2417                goto error_param;
2418        }
2419
2420        if (!i40e_vc_validate_vqs_bitmaps(vqs)) {
2421                aq_ret = I40E_ERR_PARAM;
2422                goto error_param;
2423        }
2424
2425        /* Use the queue bit map sent by the VF */
2426        if (i40e_ctrl_vf_rx_rings(pf->vsi[vf->lan_vsi_idx], vqs->rx_queues,
2427                                  true)) {
2428                aq_ret = I40E_ERR_TIMEOUT;
2429                goto error_param;
2430        }
2431        if (i40e_ctrl_vf_tx_rings(pf->vsi[vf->lan_vsi_idx], vqs->tx_queues,
2432                                  true)) {
2433                aq_ret = I40E_ERR_TIMEOUT;
2434                goto error_param;
2435        }
2436
2437        /* need to start the rings for additional ADq VSI's as well */
2438        if (vf->adq_enabled) {
2439                /* zero belongs to LAN VSI */
2440                for (i = 1; i < vf->num_tc; i++) {
2441                        if (i40e_vsi_start_rings(pf->vsi[vf->ch[i].vsi_idx]))
2442                                aq_ret = I40E_ERR_TIMEOUT;
2443                }
2444        }
2445
2446        vf->queues_enabled = true;
2447
2448error_param:
2449        /* send the response to the VF */
2450        return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ENABLE_QUEUES,
2451                                       aq_ret);
2452}
2453
2454/**
2455 * i40e_vc_disable_queues_msg
2456 * @vf: pointer to the VF info
2457 * @msg: pointer to the msg buffer
2458 *
2459 * called from the VF to disable all or specific
2460 * queue(s)
2461 **/
2462static int i40e_vc_disable_queues_msg(struct i40e_vf *vf, u8 *msg)
2463{
2464        struct virtchnl_queue_select *vqs =
2465            (struct virtchnl_queue_select *)msg;
2466        struct i40e_pf *pf = vf->pf;
2467        i40e_status aq_ret = 0;
2468
2469        /* Immediately mark queues as disabled */
2470        vf->queues_enabled = false;
2471
2472        if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
2473                aq_ret = I40E_ERR_PARAM;
2474                goto error_param;
2475        }
2476
2477        if (!i40e_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
2478                aq_ret = I40E_ERR_PARAM;
2479                goto error_param;
2480        }
2481
2482        if (!i40e_vc_validate_vqs_bitmaps(vqs)) {
2483                aq_ret = I40E_ERR_PARAM;
2484                goto error_param;
2485        }
2486
2487        /* Use the queue bit map sent by the VF */
2488        if (i40e_ctrl_vf_tx_rings(pf->vsi[vf->lan_vsi_idx], vqs->tx_queues,
2489                                  false)) {
2490                aq_ret = I40E_ERR_TIMEOUT;
2491                goto error_param;
2492        }
2493        if (i40e_ctrl_vf_rx_rings(pf->vsi[vf->lan_vsi_idx], vqs->rx_queues,
2494                                  false)) {
2495                aq_ret = I40E_ERR_TIMEOUT;
2496                goto error_param;
2497        }
2498error_param:
2499        /* send the response to the VF */
2500        return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DISABLE_QUEUES,
2501                                       aq_ret);
2502}
2503
2504/**
2505 * i40e_vc_request_queues_msg
2506 * @vf: pointer to the VF info
2507 * @msg: pointer to the msg buffer
2508 *
2509 * VFs get a default number of queues but can use this message to request a
2510 * different number.  If the request is successful, PF will reset the VF and
2511 * return 0.  If unsuccessful, PF will send message informing VF of number of
2512 * available queues and return result of sending VF a message.
2513 **/
2514static int i40e_vc_request_queues_msg(struct i40e_vf *vf, u8 *msg)
2515{
2516        struct virtchnl_vf_res_request *vfres =
2517                (struct virtchnl_vf_res_request *)msg;
2518        u16 req_pairs = vfres->num_queue_pairs;
2519        u8 cur_pairs = vf->num_queue_pairs;
2520        struct i40e_pf *pf = vf->pf;
2521
2522        if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states))
2523                return -EINVAL;
2524
2525        if (req_pairs > I40E_MAX_VF_QUEUES) {
2526                dev_err(&pf->pdev->dev,
2527                        "VF %d tried to request more than %d queues.\n",
2528                        vf->vf_id,
2529                        I40E_MAX_VF_QUEUES);
2530                vfres->num_queue_pairs = I40E_MAX_VF_QUEUES;
2531        } else if (req_pairs - cur_pairs > pf->queues_left) {
2532                dev_warn(&pf->pdev->dev,
2533                         "VF %d requested %d more queues, but only %d left.\n",
2534                         vf->vf_id,
2535                         req_pairs - cur_pairs,
2536                         pf->queues_left);
2537                vfres->num_queue_pairs = pf->queues_left + cur_pairs;
2538        } else {
2539                /* successful request */
2540                vf->num_req_queues = req_pairs;
2541                i40e_vc_notify_vf_reset(vf);
2542                i40e_reset_vf(vf, false);
2543                return 0;
2544        }
2545
2546        return i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_REQUEST_QUEUES, 0,
2547                                      (u8 *)vfres, sizeof(*vfres));
2548}
2549
2550/**
2551 * i40e_vc_get_stats_msg
2552 * @vf: pointer to the VF info
2553 * @msg: pointer to the msg buffer
2554 *
2555 * called from the VF to get vsi stats
2556 **/
2557static int i40e_vc_get_stats_msg(struct i40e_vf *vf, u8 *msg)
2558{
2559        struct virtchnl_queue_select *vqs =
2560            (struct virtchnl_queue_select *)msg;
2561        struct i40e_pf *pf = vf->pf;
2562        struct i40e_eth_stats stats;
2563        i40e_status aq_ret = 0;
2564        struct i40e_vsi *vsi;
2565
2566        memset(&stats, 0, sizeof(struct i40e_eth_stats));
2567
2568        if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
2569                aq_ret = I40E_ERR_PARAM;
2570                goto error_param;
2571        }
2572
2573        if (!i40e_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
2574                aq_ret = I40E_ERR_PARAM;
2575                goto error_param;
2576        }
2577
2578        vsi = pf->vsi[vf->lan_vsi_idx];
2579        if (!vsi) {
2580                aq_ret = I40E_ERR_PARAM;
2581                goto error_param;
2582        }
2583        i40e_update_eth_stats(vsi);
2584        stats = vsi->eth_stats;
2585
2586error_param:
2587        /* send the response back to the VF */
2588        return i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_STATS, aq_ret,
2589                                      (u8 *)&stats, sizeof(stats));
2590}
2591
2592/* If the VF is not trusted restrict the number of MAC/VLAN it can program
2593 * MAC filters: 16 for multicast, 1 for MAC, 1 for broadcast
2594 */
2595#define I40E_VC_MAX_MAC_ADDR_PER_VF (16 + 1 + 1)
2596#define I40E_VC_MAX_VLAN_PER_VF 16
2597
2598/**
2599 * i40e_check_vf_permission
2600 * @vf: pointer to the VF info
2601 * @al: MAC address list from virtchnl
2602 *
2603 * Check that the given list of MAC addresses is allowed. Will return -EPERM
2604 * if any address in the list is not valid. Checks the following conditions:
2605 *
2606 * 1) broadcast and zero addresses are never valid
2607 * 2) unicast addresses are not allowed if the VMM has administratively set
2608 *    the VF MAC address, unless the VF is marked as privileged.
2609 * 3) There is enough space to add all the addresses.
2610 *
2611 * Note that to guarantee consistency, it is expected this function be called
2612 * while holding the mac_filter_hash_lock, as otherwise the current number of
2613 * addresses might not be accurate.
2614 **/
2615static inline int i40e_check_vf_permission(struct i40e_vf *vf,
2616                                           struct virtchnl_ether_addr_list *al)
2617{
2618        struct i40e_pf *pf = vf->pf;
2619        struct i40e_vsi *vsi = pf->vsi[vf->lan_vsi_idx];
2620        int mac2add_cnt = 0;
2621        int i;
2622
2623        for (i = 0; i < al->num_elements; i++) {
2624                struct i40e_mac_filter *f;
2625                u8 *addr = al->list[i].addr;
2626
2627                if (is_broadcast_ether_addr(addr) ||
2628                    is_zero_ether_addr(addr)) {
2629                        dev_err(&pf->pdev->dev, "invalid VF MAC addr %pM\n",
2630                                addr);
2631                        return I40E_ERR_INVALID_MAC_ADDR;
2632                }
2633
2634                /* If the host VMM administrator has set the VF MAC address
2635                 * administratively via the ndo_set_vf_mac command then deny
2636                 * permission to the VF to add or delete unicast MAC addresses.
2637                 * Unless the VF is privileged and then it can do whatever.
2638                 * The VF may request to set the MAC address filter already
2639                 * assigned to it so do not return an error in that case.
2640                 */
2641                if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) &&
2642                    !is_multicast_ether_addr(addr) && vf->pf_set_mac &&
2643                    !ether_addr_equal(addr, vf->default_lan_addr.addr)) {
2644                        dev_err(&pf->pdev->dev,
2645                                "VF attempting to override administratively set MAC address, bring down and up the VF interface to resume normal operation\n");
2646                        return -EPERM;
2647                }
2648
2649                /*count filters that really will be added*/
2650                f = i40e_find_mac(vsi, addr);
2651                if (!f)
2652                        ++mac2add_cnt;
2653        }
2654
2655        /* If this VF is not privileged, then we can't add more than a limited
2656         * number of addresses. Check to make sure that the additions do not
2657         * push us over the limit.
2658         */
2659        if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) &&
2660            (i40e_count_filters(vsi) + mac2add_cnt) >
2661                    I40E_VC_MAX_MAC_ADDR_PER_VF) {
2662                dev_err(&pf->pdev->dev,
2663                        "Cannot add more MAC addresses, VF is not trusted, switch the VF to trusted to add more functionality\n");
2664                return -EPERM;
2665        }
2666        return 0;
2667}
2668
2669/**
2670 * i40e_vc_add_mac_addr_msg
2671 * @vf: pointer to the VF info
2672 * @msg: pointer to the msg buffer
2673 *
2674 * add guest mac address filter
2675 **/
2676static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg)
2677{
2678        struct virtchnl_ether_addr_list *al =
2679            (struct virtchnl_ether_addr_list *)msg;
2680        struct i40e_pf *pf = vf->pf;
2681        struct i40e_vsi *vsi = NULL;
2682        i40e_status ret = 0;
2683        int i;
2684
2685        if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
2686            !i40e_vc_isvalid_vsi_id(vf, al->vsi_id)) {
2687                ret = I40E_ERR_PARAM;
2688                goto error_param;
2689        }
2690
2691        vsi = pf->vsi[vf->lan_vsi_idx];
2692
2693        /* Lock once, because all function inside for loop accesses VSI's
2694         * MAC filter list which needs to be protected using same lock.
2695         */
2696        spin_lock_bh(&vsi->mac_filter_hash_lock);
2697
2698        ret = i40e_check_vf_permission(vf, al);
2699        if (ret) {
2700                spin_unlock_bh(&vsi->mac_filter_hash_lock);
2701                goto error_param;
2702        }
2703
2704        /* add new addresses to the list */
2705        for (i = 0; i < al->num_elements; i++) {
2706                struct i40e_mac_filter *f;
2707
2708                f = i40e_find_mac(vsi, al->list[i].addr);
2709                if (!f) {
2710                        f = i40e_add_mac_filter(vsi, al->list[i].addr);
2711
2712                        if (!f) {
2713                                dev_err(&pf->pdev->dev,
2714                                        "Unable to add MAC filter %pM for VF %d\n",
2715                                        al->list[i].addr, vf->vf_id);
2716                                ret = I40E_ERR_PARAM;
2717                                spin_unlock_bh(&vsi->mac_filter_hash_lock);
2718                                goto error_param;
2719                        }
2720                        if (is_valid_ether_addr(al->list[i].addr) &&
2721                            is_zero_ether_addr(vf->default_lan_addr.addr))
2722                                ether_addr_copy(vf->default_lan_addr.addr,
2723                                                al->list[i].addr);
2724                }
2725        }
2726        spin_unlock_bh(&vsi->mac_filter_hash_lock);
2727
2728        /* program the updated filter list */
2729        ret = i40e_sync_vsi_filters(vsi);
2730        if (ret)
2731                dev_err(&pf->pdev->dev, "Unable to program VF %d MAC filters, error %d\n",
2732                        vf->vf_id, ret);
2733
2734error_param:
2735        /* send the response to the VF */
2736        return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ADD_ETH_ADDR,
2737                                       ret);
2738}
2739
2740/**
2741 * i40e_vc_del_mac_addr_msg
2742 * @vf: pointer to the VF info
2743 * @msg: pointer to the msg buffer
2744 *
2745 * remove guest mac address filter
2746 **/
2747static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg)
2748{
2749        struct virtchnl_ether_addr_list *al =
2750            (struct virtchnl_ether_addr_list *)msg;
2751        bool was_unimac_deleted = false;
2752        struct i40e_pf *pf = vf->pf;
2753        struct i40e_vsi *vsi = NULL;
2754        i40e_status ret = 0;
2755        int i;
2756
2757        if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
2758            !i40e_vc_isvalid_vsi_id(vf, al->vsi_id)) {
2759                ret = I40E_ERR_PARAM;
2760                goto error_param;
2761        }
2762
2763        for (i = 0; i < al->num_elements; i++) {
2764                if (is_broadcast_ether_addr(al->list[i].addr) ||
2765                    is_zero_ether_addr(al->list[i].addr)) {
2766                        dev_err(&pf->pdev->dev, "Invalid MAC addr %pM for VF %d\n",
2767                                al->list[i].addr, vf->vf_id);
2768                        ret = I40E_ERR_INVALID_MAC_ADDR;
2769                        goto error_param;
2770                }
2771                if (ether_addr_equal(al->list[i].addr, vf->default_lan_addr.addr))
2772                        was_unimac_deleted = true;
2773        }
2774        vsi = pf->vsi[vf->lan_vsi_idx];
2775
2776        spin_lock_bh(&vsi->mac_filter_hash_lock);
2777        /* delete addresses from the list */
2778        for (i = 0; i < al->num_elements; i++)
2779                if (i40e_del_mac_filter(vsi, al->list[i].addr)) {
2780                        ret = I40E_ERR_INVALID_MAC_ADDR;
2781                        spin_unlock_bh(&vsi->mac_filter_hash_lock);
2782                        goto error_param;
2783                }
2784
2785        spin_unlock_bh(&vsi->mac_filter_hash_lock);
2786
2787        /* program the updated filter list */
2788        ret = i40e_sync_vsi_filters(vsi);
2789        if (ret)
2790                dev_err(&pf->pdev->dev, "Unable to program VF %d MAC filters, error %d\n",
2791                        vf->vf_id, ret);
2792
2793        if (vf->trusted && was_unimac_deleted) {
2794                struct i40e_mac_filter *f;
2795                struct hlist_node *h;
2796                u8 *macaddr = NULL;
2797                int bkt;
2798
2799                /* set last unicast mac address as default */
2800                spin_lock_bh(&vsi->mac_filter_hash_lock);
2801                hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
2802                        if (is_valid_ether_addr(f->macaddr))
2803                                macaddr = f->macaddr;
2804                }
2805                if (macaddr)
2806                        ether_addr_copy(vf->default_lan_addr.addr, macaddr);
2807                spin_unlock_bh(&vsi->mac_filter_hash_lock);
2808        }
2809error_param:
2810        /* send the response to the VF */
2811        return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DEL_ETH_ADDR, ret);
2812}
2813
2814/**
2815 * i40e_vc_add_vlan_msg
2816 * @vf: pointer to the VF info
2817 * @msg: pointer to the msg buffer
2818 *
2819 * program guest vlan id
2820 **/
2821static int i40e_vc_add_vlan_msg(struct i40e_vf *vf, u8 *msg)
2822{
2823        struct virtchnl_vlan_filter_list *vfl =
2824            (struct virtchnl_vlan_filter_list *)msg;
2825        struct i40e_pf *pf = vf->pf;
2826        struct i40e_vsi *vsi = NULL;
2827        i40e_status aq_ret = 0;
2828        int i;
2829
2830        if ((vf->num_vlan >= I40E_VC_MAX_VLAN_PER_VF) &&
2831            !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) {
2832                dev_err(&pf->pdev->dev,
2833                        "VF is not trusted, switch the VF to trusted to add more VLAN addresses\n");
2834                goto error_param;
2835        }
2836        if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
2837            !i40e_vc_isvalid_vsi_id(vf, vfl->vsi_id)) {
2838                aq_ret = I40E_ERR_PARAM;
2839                goto error_param;
2840        }
2841
2842        for (i = 0; i < vfl->num_elements; i++) {
2843                if (vfl->vlan_id[i] > I40E_MAX_VLANID) {
2844                        aq_ret = I40E_ERR_PARAM;
2845                        dev_err(&pf->pdev->dev,
2846                                "invalid VF VLAN id %d\n", vfl->vlan_id[i]);
2847                        goto error_param;
2848                }
2849        }
2850        vsi = pf->vsi[vf->lan_vsi_idx];
2851        if (vsi->info.pvid) {
2852                aq_ret = I40E_ERR_PARAM;
2853                goto error_param;
2854        }
2855
2856        i40e_vlan_stripping_enable(vsi);
2857        for (i = 0; i < vfl->num_elements; i++) {
2858                /* add new VLAN filter */
2859                int ret = i40e_vsi_add_vlan(vsi, vfl->vlan_id[i]);
2860                if (!ret)
2861                        vf->num_vlan++;
2862
2863                if (test_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states))
2864                        i40e_aq_set_vsi_uc_promisc_on_vlan(&pf->hw, vsi->seid,
2865                                                           true,
2866                                                           vfl->vlan_id[i],
2867                                                           NULL);
2868                if (test_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states))
2869                        i40e_aq_set_vsi_mc_promisc_on_vlan(&pf->hw, vsi->seid,
2870                                                           true,
2871                                                           vfl->vlan_id[i],
2872                                                           NULL);
2873
2874                if (ret)
2875                        dev_err(&pf->pdev->dev,
2876                                "Unable to add VLAN filter %d for VF %d, error %d\n",
2877                                vfl->vlan_id[i], vf->vf_id, ret);
2878        }
2879
2880error_param:
2881        /* send the response to the VF */
2882        return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ADD_VLAN, aq_ret);
2883}
2884
2885/**
2886 * i40e_vc_remove_vlan_msg
2887 * @vf: pointer to the VF info
2888 * @msg: pointer to the msg buffer
2889 *
2890 * remove programmed guest vlan id
2891 **/
2892static int i40e_vc_remove_vlan_msg(struct i40e_vf *vf, u8 *msg)
2893{
2894        struct virtchnl_vlan_filter_list *vfl =
2895            (struct virtchnl_vlan_filter_list *)msg;
2896        struct i40e_pf *pf = vf->pf;
2897        struct i40e_vsi *vsi = NULL;
2898        i40e_status aq_ret = 0;
2899        int i;
2900
2901        if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
2902            !i40e_vc_isvalid_vsi_id(vf, vfl->vsi_id)) {
2903                aq_ret = I40E_ERR_PARAM;
2904                goto error_param;
2905        }
2906
2907        for (i = 0; i < vfl->num_elements; i++) {
2908                if (vfl->vlan_id[i] > I40E_MAX_VLANID) {
2909                        aq_ret = I40E_ERR_PARAM;
2910                        goto error_param;
2911                }
2912        }
2913
2914        vsi = pf->vsi[vf->lan_vsi_idx];
2915        if (vsi->info.pvid) {
2916                if (vfl->num_elements > 1 || vfl->vlan_id[0])
2917                        aq_ret = I40E_ERR_PARAM;
2918                goto error_param;
2919        }
2920
2921        for (i = 0; i < vfl->num_elements; i++) {
2922                i40e_vsi_kill_vlan(vsi, vfl->vlan_id[i]);
2923                vf->num_vlan--;
2924
2925                if (test_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states))
2926                        i40e_aq_set_vsi_uc_promisc_on_vlan(&pf->hw, vsi->seid,
2927                                                           false,
2928                                                           vfl->vlan_id[i],
2929                                                           NULL);
2930                if (test_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states))
2931                        i40e_aq_set_vsi_mc_promisc_on_vlan(&pf->hw, vsi->seid,
2932                                                           false,
2933                                                           vfl->vlan_id[i],
2934                                                           NULL);
2935        }
2936
2937error_param:
2938        /* send the response to the VF */
2939        return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DEL_VLAN, aq_ret);
2940}
2941
2942/**
2943 * i40e_vc_iwarp_msg
2944 * @vf: pointer to the VF info
2945 * @msg: pointer to the msg buffer
2946 * @msglen: msg length
2947 *
2948 * called from the VF for the iwarp msgs
2949 **/
2950static int i40e_vc_iwarp_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
2951{
2952        struct i40e_pf *pf = vf->pf;
2953        int abs_vf_id = vf->vf_id + pf->hw.func_caps.vf_base_id;
2954        i40e_status aq_ret = 0;
2955
2956        if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
2957            !test_bit(I40E_VF_STATE_IWARPENA, &vf->vf_states)) {
2958                aq_ret = I40E_ERR_PARAM;
2959                goto error_param;
2960        }
2961
2962        i40e_notify_client_of_vf_msg(pf->vsi[pf->lan_vsi], abs_vf_id,
2963                                     msg, msglen);
2964
2965error_param:
2966        /* send the response to the VF */
2967        return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_IWARP,
2968                                       aq_ret);
2969}
2970
2971/**
2972 * i40e_vc_iwarp_qvmap_msg
2973 * @vf: pointer to the VF info
2974 * @msg: pointer to the msg buffer
2975 * @config: config qvmap or release it
2976 *
2977 * called from the VF for the iwarp msgs
2978 **/
2979static int i40e_vc_iwarp_qvmap_msg(struct i40e_vf *vf, u8 *msg, bool config)
2980{
2981        struct virtchnl_iwarp_qvlist_info *qvlist_info =
2982                                (struct virtchnl_iwarp_qvlist_info *)msg;
2983        i40e_status aq_ret = 0;
2984
2985        if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
2986            !test_bit(I40E_VF_STATE_IWARPENA, &vf->vf_states)) {
2987                aq_ret = I40E_ERR_PARAM;
2988                goto error_param;
2989        }
2990
2991        if (config) {
2992                if (i40e_config_iwarp_qvlist(vf, qvlist_info))
2993                        aq_ret = I40E_ERR_PARAM;
2994        } else {
2995                i40e_release_iwarp_qvlist(vf);
2996        }
2997
2998error_param:
2999        /* send the response to the VF */
3000        return i40e_vc_send_resp_to_vf(vf,
3001                               config ? VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP :
3002                               VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP,
3003                               aq_ret);
3004}
3005
3006/**
3007 * i40e_vc_config_rss_key
3008 * @vf: pointer to the VF info
3009 * @msg: pointer to the msg buffer
3010 *
3011 * Configure the VF's RSS key
3012 **/
3013static int i40e_vc_config_rss_key(struct i40e_vf *vf, u8 *msg)
3014{
3015        struct virtchnl_rss_key *vrk =
3016                (struct virtchnl_rss_key *)msg;
3017        struct i40e_pf *pf = vf->pf;
3018        struct i40e_vsi *vsi = NULL;
3019        i40e_status aq_ret = 0;
3020
3021        if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
3022            !i40e_vc_isvalid_vsi_id(vf, vrk->vsi_id) ||
3023            (vrk->key_len != I40E_HKEY_ARRAY_SIZE)) {
3024                aq_ret = I40E_ERR_PARAM;
3025                goto err;
3026        }
3027
3028        vsi = pf->vsi[vf->lan_vsi_idx];
3029        aq_ret = i40e_config_rss(vsi, vrk->key, NULL, 0);
3030err:
3031        /* send the response to the VF */
3032        return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_KEY,
3033                                       aq_ret);
3034}
3035
3036/**
3037 * i40e_vc_config_rss_lut
3038 * @vf: pointer to the VF info
3039 * @msg: pointer to the msg buffer
3040 *
3041 * Configure the VF's RSS LUT
3042 **/
3043static int i40e_vc_config_rss_lut(struct i40e_vf *vf, u8 *msg)
3044{
3045        struct virtchnl_rss_lut *vrl =
3046                (struct virtchnl_rss_lut *)msg;
3047        struct i40e_pf *pf = vf->pf;
3048        struct i40e_vsi *vsi = NULL;
3049        i40e_status aq_ret = 0;
3050        u16 i;
3051
3052        if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
3053            !i40e_vc_isvalid_vsi_id(vf, vrl->vsi_id) ||
3054            (vrl->lut_entries != I40E_VF_HLUT_ARRAY_SIZE)) {
3055                aq_ret = I40E_ERR_PARAM;
3056                goto err;
3057        }
3058
3059        for (i = 0; i < vrl->lut_entries; i++)
3060                if (vrl->lut[i] >= vf->num_queue_pairs) {
3061                        aq_ret = I40E_ERR_PARAM;
3062                        goto err;
3063                }
3064
3065        vsi = pf->vsi[vf->lan_vsi_idx];
3066        aq_ret = i40e_config_rss(vsi, NULL, vrl->lut, I40E_VF_HLUT_ARRAY_SIZE);
3067        /* send the response to the VF */
3068err:
3069        return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_LUT,
3070                                       aq_ret);
3071}
3072
3073/**
3074 * i40e_vc_get_rss_hena
3075 * @vf: pointer to the VF info
3076 * @msg: pointer to the msg buffer
3077 *
3078 * Return the RSS HENA bits allowed by the hardware
3079 **/
3080static int i40e_vc_get_rss_hena(struct i40e_vf *vf, u8 *msg)
3081{
3082        struct virtchnl_rss_hena *vrh = NULL;
3083        struct i40e_pf *pf = vf->pf;
3084        i40e_status aq_ret = 0;
3085        int len = 0;
3086
3087        if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
3088                aq_ret = I40E_ERR_PARAM;
3089                goto err;
3090        }
3091        len = sizeof(struct virtchnl_rss_hena);
3092
3093        vrh = kzalloc(len, GFP_KERNEL);
3094        if (!vrh) {
3095                aq_ret = I40E_ERR_NO_MEMORY;
3096                len = 0;
3097                goto err;
3098        }
3099        vrh->hena = i40e_pf_get_default_rss_hena(pf);
3100err:
3101        /* send the response back to the VF */
3102        aq_ret = i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_RSS_HENA_CAPS,
3103                                        aq_ret, (u8 *)vrh, len);
3104        kfree(vrh);
3105        return aq_ret;
3106}
3107
3108/**
3109 * i40e_vc_set_rss_hena
3110 * @vf: pointer to the VF info
3111 * @msg: pointer to the msg buffer
3112 *
3113 * Set the RSS HENA bits for the VF
3114 **/
3115static int i40e_vc_set_rss_hena(struct i40e_vf *vf, u8 *msg)
3116{
3117        struct virtchnl_rss_hena *vrh =
3118                (struct virtchnl_rss_hena *)msg;
3119        struct i40e_pf *pf = vf->pf;
3120        struct i40e_hw *hw = &pf->hw;
3121        i40e_status aq_ret = 0;
3122
3123        if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
3124                aq_ret = I40E_ERR_PARAM;
3125                goto err;
3126        }
3127        i40e_write_rx_ctl(hw, I40E_VFQF_HENA1(0, vf->vf_id), (u32)vrh->hena);
3128        i40e_write_rx_ctl(hw, I40E_VFQF_HENA1(1, vf->vf_id),
3129                          (u32)(vrh->hena >> 32));
3130
3131        /* send the response to the VF */
3132err:
3133        return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_SET_RSS_HENA, aq_ret);
3134}
3135
3136/**
3137 * i40e_vc_enable_vlan_stripping
3138 * @vf: pointer to the VF info
3139 * @msg: pointer to the msg buffer
3140 *
3141 * Enable vlan header stripping for the VF
3142 **/
3143static int i40e_vc_enable_vlan_stripping(struct i40e_vf *vf, u8 *msg)
3144{
3145        i40e_status aq_ret = 0;
3146        struct i40e_vsi *vsi;
3147
3148        if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
3149                aq_ret = I40E_ERR_PARAM;
3150                goto err;
3151        }
3152
3153        vsi = vf->pf->vsi[vf->lan_vsi_idx];
3154        i40e_vlan_stripping_enable(vsi);
3155
3156        /* send the response to the VF */
3157err:
3158        return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ENABLE_VLAN_STRIPPING,
3159                                       aq_ret);
3160}
3161
3162/**
3163 * i40e_vc_disable_vlan_stripping
3164 * @vf: pointer to the VF info
3165 * @msg: pointer to the msg buffer
3166 *
3167 * Disable vlan header stripping for the VF
3168 **/
3169static int i40e_vc_disable_vlan_stripping(struct i40e_vf *vf, u8 *msg)
3170{
3171        i40e_status aq_ret = 0;
3172        struct i40e_vsi *vsi;
3173
3174        if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
3175                aq_ret = I40E_ERR_PARAM;
3176                goto err;
3177        }
3178
3179        vsi = vf->pf->vsi[vf->lan_vsi_idx];
3180        i40e_vlan_stripping_disable(vsi);
3181
3182        /* send the response to the VF */
3183err:
3184        return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DISABLE_VLAN_STRIPPING,
3185                                       aq_ret);
3186}
3187
3188/**
3189 * i40e_validate_cloud_filter
3190 * @vf: pointer to VF structure
3191 * @tc_filter: pointer to filter requested
3192 *
3193 * This function validates cloud filter programmed as TC filter for ADq
3194 **/
3195static int i40e_validate_cloud_filter(struct i40e_vf *vf,
3196                                      struct virtchnl_filter *tc_filter)
3197{
3198        struct virtchnl_l4_spec mask = tc_filter->mask.tcp_spec;
3199        struct virtchnl_l4_spec data = tc_filter->data.tcp_spec;
3200        struct i40e_pf *pf = vf->pf;
3201        struct i40e_vsi *vsi = NULL;
3202        struct i40e_mac_filter *f;
3203        struct hlist_node *h;
3204        bool found = false;
3205        int bkt;
3206
3207        if (!tc_filter->action) {
3208                dev_info(&pf->pdev->dev,
3209                         "VF %d: Currently ADq doesn't support Drop Action\n",
3210                         vf->vf_id);
3211                goto err;
3212        }
3213
3214        /* action_meta is TC number here to which the filter is applied */
3215        if (!tc_filter->action_meta ||
3216            tc_filter->action_meta > I40E_MAX_VF_VSI) {
3217                dev_info(&pf->pdev->dev, "VF %d: Invalid TC number %u\n",
3218                         vf->vf_id, tc_filter->action_meta);
3219                goto err;
3220        }
3221
3222        /* Check filter if it's programmed for advanced mode or basic mode.
3223         * There are two ADq modes (for VF only),
3224         * 1. Basic mode: intended to allow as many filter options as possible
3225         *                to be added to a VF in Non-trusted mode. Main goal is
3226         *                to add filters to its own MAC and VLAN id.
3227         * 2. Advanced mode: is for allowing filters to be applied other than
3228         *                its own MAC or VLAN. This mode requires the VF to be
3229         *                Trusted.
3230         */
3231        if (mask.dst_mac[0] && !mask.dst_ip[0]) {
3232                vsi = pf->vsi[vf->lan_vsi_idx];
3233                f = i40e_find_mac(vsi, data.dst_mac);
3234
3235                if (!f) {
3236                        dev_info(&pf->pdev->dev,
3237                                 "Destination MAC %pM doesn't belong to VF %d\n",
3238                                 data.dst_mac, vf->vf_id);
3239                        goto err;
3240                }
3241
3242                if (mask.vlan_id) {
3243                        hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f,
3244                                           hlist) {
3245                                if (f->vlan == ntohs(data.vlan_id)) {
3246                                        found = true;
3247                                        break;
3248                                }
3249                        }
3250                        if (!found) {
3251                                dev_info(&pf->pdev->dev,
3252                                         "VF %d doesn't have any VLAN id %u\n",
3253                                         vf->vf_id, ntohs(data.vlan_id));
3254                                goto err;
3255                        }
3256                }
3257        } else {
3258                /* Check if VF is trusted */
3259                if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) {
3260                        dev_err(&pf->pdev->dev,
3261                                "VF %d not trusted, make VF trusted to add advanced mode ADq cloud filters\n",
3262                                vf->vf_id);
3263                        return I40E_ERR_CONFIG;
3264                }
3265        }
3266
3267        if (mask.dst_mac[0] & data.dst_mac[0]) {
3268                if (is_broadcast_ether_addr(data.dst_mac) ||
3269                    is_zero_ether_addr(data.dst_mac)) {
3270                        dev_info(&pf->pdev->dev, "VF %d: Invalid Dest MAC addr %pM\n",
3271                                 vf->vf_id, data.dst_mac);
3272                        goto err;
3273                }
3274        }
3275
3276        if (mask.src_mac[0] & data.src_mac[0]) {
3277                if (is_broadcast_ether_addr(data.src_mac) ||
3278                    is_zero_ether_addr(data.src_mac)) {
3279                        dev_info(&pf->pdev->dev, "VF %d: Invalid Source MAC addr %pM\n",
3280                                 vf->vf_id, data.src_mac);
3281                        goto err;
3282                }
3283        }
3284
3285        if (mask.dst_port & data.dst_port) {
3286                if (!data.dst_port) {
3287                        dev_info(&pf->pdev->dev, "VF %d: Invalid Dest port\n",
3288                                 vf->vf_id);
3289                        goto err;
3290                }
3291        }
3292
3293        if (mask.src_port & data.src_port) {
3294                if (!data.src_port) {
3295                        dev_info(&pf->pdev->dev, "VF %d: Invalid Source port\n",
3296                                 vf->vf_id);
3297                        goto err;
3298                }
3299        }
3300
3301        if (tc_filter->flow_type != VIRTCHNL_TCP_V6_FLOW &&
3302            tc_filter->flow_type != VIRTCHNL_TCP_V4_FLOW) {
3303                dev_info(&pf->pdev->dev, "VF %d: Invalid Flow type\n",
3304                         vf->vf_id);
3305                goto err;
3306        }
3307
3308        if (mask.vlan_id & data.vlan_id) {
3309                if (ntohs(data.vlan_id) > I40E_MAX_VLANID) {
3310                        dev_info(&pf->pdev->dev, "VF %d: invalid VLAN ID\n",
3311                                 vf->vf_id);
3312                        goto err;
3313                }
3314        }
3315
3316        return I40E_SUCCESS;
3317err:
3318        return I40E_ERR_CONFIG;
3319}
3320
3321/**
3322 * i40e_find_vsi_from_seid - searches for the vsi with the given seid
3323 * @vf: pointer to the VF info
3324 * @seid: seid of the vsi it is searching for
3325 **/
3326static struct i40e_vsi *i40e_find_vsi_from_seid(struct i40e_vf *vf, u16 seid)
3327{
3328        struct i40e_pf *pf = vf->pf;
3329        struct i40e_vsi *vsi = NULL;
3330        int i;
3331
3332        for (i = 0; i < vf->num_tc ; i++) {
3333                vsi = i40e_find_vsi_from_id(pf, vf->ch[i].vsi_id);
3334                if (vsi && vsi->seid == seid)
3335                        return vsi;
3336        }
3337        return NULL;
3338}
3339
3340/**
3341 * i40e_del_all_cloud_filters
3342 * @vf: pointer to the VF info
3343 *
3344 * This function deletes all cloud filters
3345 **/
3346static void i40e_del_all_cloud_filters(struct i40e_vf *vf)
3347{
3348        struct i40e_cloud_filter *cfilter = NULL;
3349        struct i40e_pf *pf = vf->pf;
3350        struct i40e_vsi *vsi = NULL;
3351        struct hlist_node *node;
3352        int ret;
3353
3354        hlist_for_each_entry_safe(cfilter, node,
3355                                  &vf->cloud_filter_list, cloud_node) {
3356                vsi = i40e_find_vsi_from_seid(vf, cfilter->seid);
3357
3358                if (!vsi) {
3359                        dev_err(&pf->pdev->dev, "VF %d: no VSI found for matching %u seid, can't delete cloud filter\n",
3360                                vf->vf_id, cfilter->seid);
3361                        continue;
3362                }
3363
3364                if (cfilter->dst_port)
3365                        ret = i40e_add_del_cloud_filter_big_buf(vsi, cfilter,
3366                                                                false);
3367                else
3368                        ret = i40e_add_del_cloud_filter(vsi, cfilter, false);
3369                if (ret)
3370                        dev_err(&pf->pdev->dev,
3371                                "VF %d: Failed to delete cloud filter, err %s aq_err %s\n",
3372                                vf->vf_id, i40e_stat_str(&pf->hw, ret),
3373                                i40e_aq_str(&pf->hw,
3374                                            pf->hw.aq.asq_last_status));
3375
3376                hlist_del(&cfilter->cloud_node);
3377                kfree(cfilter);
3378                vf->num_cloud_filters--;
3379        }
3380}
3381
3382/**
3383 * i40e_vc_del_cloud_filter
3384 * @vf: pointer to the VF info
3385 * @msg: pointer to the msg buffer
3386 *
3387 * This function deletes a cloud filter programmed as TC filter for ADq
3388 **/
3389static int i40e_vc_del_cloud_filter(struct i40e_vf *vf, u8 *msg)
3390{
3391        struct virtchnl_filter *vcf = (struct virtchnl_filter *)msg;
3392        struct virtchnl_l4_spec mask = vcf->mask.tcp_spec;
3393        struct virtchnl_l4_spec tcf = vcf->data.tcp_spec;
3394        struct i40e_cloud_filter cfilter, *cf = NULL;
3395        struct i40e_pf *pf = vf->pf;
3396        struct i40e_vsi *vsi = NULL;
3397        struct hlist_node *node;
3398        i40e_status aq_ret = 0;
3399        int i, ret;
3400
3401        if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
3402                aq_ret = I40E_ERR_PARAM;
3403                goto err;
3404        }
3405
3406        if (!vf->adq_enabled) {
3407                dev_info(&pf->pdev->dev,
3408                         "VF %d: ADq not enabled, can't apply cloud filter\n",
3409                         vf->vf_id);
3410                aq_ret = I40E_ERR_PARAM;
3411                goto err;
3412        }
3413
3414        if (i40e_validate_cloud_filter(vf, vcf)) {
3415                dev_info(&pf->pdev->dev,
3416                         "VF %d: Invalid input, can't apply cloud filter\n",
3417                         vf->vf_id);
3418                aq_ret = I40E_ERR_PARAM;
3419                goto err;
3420        }
3421
3422        memset(&cfilter, 0, sizeof(cfilter));
3423        /* parse destination mac address */
3424        for (i = 0; i < ETH_ALEN; i++)
3425                cfilter.dst_mac[i] = mask.dst_mac[i] & tcf.dst_mac[i];
3426
3427        /* parse source mac address */
3428        for (i = 0; i < ETH_ALEN; i++)
3429                cfilter.src_mac[i] = mask.src_mac[i] & tcf.src_mac[i];
3430
3431        cfilter.vlan_id = mask.vlan_id & tcf.vlan_id;
3432        cfilter.dst_port = mask.dst_port & tcf.dst_port;
3433        cfilter.src_port = mask.src_port & tcf.src_port;
3434
3435        switch (vcf->flow_type) {
3436        case VIRTCHNL_TCP_V4_FLOW:
3437                cfilter.n_proto = ETH_P_IP;
3438                if (mask.dst_ip[0] & tcf.dst_ip[0])
3439                        memcpy(&cfilter.ip.v4.dst_ip, tcf.dst_ip,
3440                               ARRAY_SIZE(tcf.dst_ip));
3441                else if (mask.src_ip[0] & tcf.dst_ip[0])
3442                        memcpy(&cfilter.ip.v4.src_ip, tcf.src_ip,
3443                               ARRAY_SIZE(tcf.dst_ip));
3444                break;
3445        case VIRTCHNL_TCP_V6_FLOW:
3446                cfilter.n_proto = ETH_P_IPV6;
3447                if (mask.dst_ip[3] & tcf.dst_ip[3])
3448                        memcpy(&cfilter.ip.v6.dst_ip6, tcf.dst_ip,
3449                               sizeof(cfilter.ip.v6.dst_ip6));
3450                if (mask.src_ip[3] & tcf.src_ip[3])
3451                        memcpy(&cfilter.ip.v6.src_ip6, tcf.src_ip,
3452                               sizeof(cfilter.ip.v6.src_ip6));
3453                break;
3454        default:
3455                /* TC filter can be configured based on different combinations
3456                 * and in this case IP is not a part of filter config
3457                 */
3458                dev_info(&pf->pdev->dev, "VF %d: Flow type not configured\n",
3459                         vf->vf_id);
3460        }
3461
3462        /* get the vsi to which the tc belongs to */
3463        vsi = pf->vsi[vf->ch[vcf->action_meta].vsi_idx];
3464        cfilter.seid = vsi->seid;
3465        cfilter.flags = vcf->field_flags;
3466
3467        /* Deleting TC filter */
3468        if (tcf.dst_port)
3469                ret = i40e_add_del_cloud_filter_big_buf(vsi, &cfilter, false);
3470        else
3471                ret = i40e_add_del_cloud_filter(vsi, &cfilter, false);
3472        if (ret) {
3473                dev_err(&pf->pdev->dev,
3474                        "VF %d: Failed to delete cloud filter, err %s aq_err %s\n",
3475                        vf->vf_id, i40e_stat_str(&pf->hw, ret),
3476                        i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
3477                goto err;
3478        }
3479
3480        hlist_for_each_entry_safe(cf, node,
3481                                  &vf->cloud_filter_list, cloud_node) {
3482                if (cf->seid != cfilter.seid)
3483                        continue;
3484                if (mask.dst_port)
3485                        if (cfilter.dst_port != cf->dst_port)
3486                                continue;
3487                if (mask.dst_mac[0])
3488                        if (!ether_addr_equal(cf->src_mac, cfilter.src_mac))
3489                                continue;
3490                /* for ipv4 data to be valid, only first byte of mask is set */
3491                if (cfilter.n_proto == ETH_P_IP && mask.dst_ip[0])
3492                        if (memcmp(&cfilter.ip.v4.dst_ip, &cf->ip.v4.dst_ip,
3493                                   ARRAY_SIZE(tcf.dst_ip)))
3494                                continue;
3495                /* for ipv6, mask is set for all sixteen bytes (4 words) */
3496                if (cfilter.n_proto == ETH_P_IPV6 && mask.dst_ip[3])
3497                        if (memcmp(&cfilter.ip.v6.dst_ip6, &cf->ip.v6.dst_ip6,
3498                                   sizeof(cfilter.ip.v6.src_ip6)))
3499                                continue;
3500                if (mask.vlan_id)
3501                        if (cfilter.vlan_id != cf->vlan_id)
3502                                continue;
3503
3504                hlist_del(&cf->cloud_node);
3505                kfree(cf);
3506                vf->num_cloud_filters--;
3507        }
3508
3509err:
3510        return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DEL_CLOUD_FILTER,
3511                                       aq_ret);
3512}
3513
3514/**
3515 * i40e_vc_add_cloud_filter
3516 * @vf: pointer to the VF info
3517 * @msg: pointer to the msg buffer
3518 *
3519 * This function adds a cloud filter programmed as TC filter for ADq
3520 **/
3521static int i40e_vc_add_cloud_filter(struct i40e_vf *vf, u8 *msg)
3522{
3523        struct virtchnl_filter *vcf = (struct virtchnl_filter *)msg;
3524        struct virtchnl_l4_spec mask = vcf->mask.tcp_spec;
3525        struct virtchnl_l4_spec tcf = vcf->data.tcp_spec;
3526        struct i40e_cloud_filter *cfilter = NULL;
3527        struct i40e_pf *pf = vf->pf;
3528        struct i40e_vsi *vsi = NULL;
3529        i40e_status aq_ret = 0;
3530        int i, ret;
3531
3532        if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
3533                aq_ret = I40E_ERR_PARAM;
3534                goto err_out;
3535        }
3536
3537        if (!vf->adq_enabled) {
3538                dev_info(&pf->pdev->dev,
3539                         "VF %d: ADq is not enabled, can't apply cloud filter\n",
3540                         vf->vf_id);
3541                aq_ret = I40E_ERR_PARAM;
3542                goto err_out;
3543        }
3544
3545        if (i40e_validate_cloud_filter(vf, vcf)) {
3546                dev_info(&pf->pdev->dev,
3547                         "VF %d: Invalid input/s, can't apply cloud filter\n",
3548                         vf->vf_id);
3549                aq_ret = I40E_ERR_PARAM;
3550                goto err_out;
3551        }
3552
3553        cfilter = kzalloc(sizeof(*cfilter), GFP_KERNEL);
3554        if (!cfilter)
3555                return -ENOMEM;
3556
3557        /* parse destination mac address */
3558        for (i = 0; i < ETH_ALEN; i++)
3559                cfilter->dst_mac[i] = mask.dst_mac[i] & tcf.dst_mac[i];
3560
3561        /* parse source mac address */
3562        for (i = 0; i < ETH_ALEN; i++)
3563                cfilter->src_mac[i] = mask.src_mac[i] & tcf.src_mac[i];
3564
3565        cfilter->vlan_id = mask.vlan_id & tcf.vlan_id;
3566        cfilter->dst_port = mask.dst_port & tcf.dst_port;
3567        cfilter->src_port = mask.src_port & tcf.src_port;
3568
3569        switch (vcf->flow_type) {
3570        case VIRTCHNL_TCP_V4_FLOW:
3571                cfilter->n_proto = ETH_P_IP;
3572                if (mask.dst_ip[0] & tcf.dst_ip[0])
3573                        memcpy(&cfilter->ip.v4.dst_ip, tcf.dst_ip,
3574                               ARRAY_SIZE(tcf.dst_ip));
3575                else if (mask.src_ip[0] & tcf.dst_ip[0])
3576                        memcpy(&cfilter->ip.v4.src_ip, tcf.src_ip,
3577                               ARRAY_SIZE(tcf.dst_ip));
3578                break;
3579        case VIRTCHNL_TCP_V6_FLOW:
3580                cfilter->n_proto = ETH_P_IPV6;
3581                if (mask.dst_ip[3] & tcf.dst_ip[3])
3582                        memcpy(&cfilter->ip.v6.dst_ip6, tcf.dst_ip,
3583                               sizeof(cfilter->ip.v6.dst_ip6));
3584                if (mask.src_ip[3] & tcf.src_ip[3])
3585                        memcpy(&cfilter->ip.v6.src_ip6, tcf.src_ip,
3586                               sizeof(cfilter->ip.v6.src_ip6));
3587                break;
3588        default:
3589                /* TC filter can be configured based on different combinations
3590                 * and in this case IP is not a part of filter config
3591                 */
3592                dev_info(&pf->pdev->dev, "VF %d: Flow type not configured\n",
3593                         vf->vf_id);
3594        }
3595
3596        /* get the VSI to which the TC belongs to */
3597        vsi = pf->vsi[vf->ch[vcf->action_meta].vsi_idx];
3598        cfilter->seid = vsi->seid;
3599        cfilter->flags = vcf->field_flags;
3600
3601        /* Adding cloud filter programmed as TC filter */
3602        if (tcf.dst_port)
3603                ret = i40e_add_del_cloud_filter_big_buf(vsi, cfilter, true);
3604        else
3605                ret = i40e_add_del_cloud_filter(vsi, cfilter, true);
3606        if (ret) {
3607                dev_err(&pf->pdev->dev,
3608                        "VF %d: Failed to add cloud filter, err %s aq_err %s\n",
3609                        vf->vf_id, i40e_stat_str(&pf->hw, ret),
3610                        i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
3611                goto err_free;
3612        }
3613
3614        INIT_HLIST_NODE(&cfilter->cloud_node);
3615        hlist_add_head(&cfilter->cloud_node, &vf->cloud_filter_list);
3616        /* release the pointer passing it to the collection */
3617        cfilter = NULL;
3618        vf->num_cloud_filters++;
3619err_free:
3620        kfree(cfilter);
3621err_out:
3622        return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ADD_CLOUD_FILTER,
3623                                       aq_ret);
3624}
3625
3626/**
3627 * i40e_vc_add_qch_msg: Add queue channel and enable ADq
3628 * @vf: pointer to the VF info
3629 * @msg: pointer to the msg buffer
3630 **/
3631static int i40e_vc_add_qch_msg(struct i40e_vf *vf, u8 *msg)
3632{
3633        struct virtchnl_tc_info *tci =
3634                (struct virtchnl_tc_info *)msg;
3635        struct i40e_pf *pf = vf->pf;
3636        struct i40e_link_status *ls = &pf->hw.phy.link_info;
3637        int i, adq_request_qps = 0;
3638        i40e_status aq_ret = 0;
3639        u64 speed = 0;
3640
3641        if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
3642                aq_ret = I40E_ERR_PARAM;
3643                goto err;
3644        }
3645
3646        /* ADq cannot be applied if spoof check is ON */
3647        if (vf->spoofchk) {
3648                dev_err(&pf->pdev->dev,
3649                        "Spoof check is ON, turn it OFF to enable ADq\n");
3650                aq_ret = I40E_ERR_PARAM;
3651                goto err;
3652        }
3653
3654        if (!(vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ADQ)) {
3655                dev_err(&pf->pdev->dev,
3656                        "VF %d attempting to enable ADq, but hasn't properly negotiated that capability\n",
3657                        vf->vf_id);
3658                aq_ret = I40E_ERR_PARAM;
3659                goto err;
3660        }
3661
3662        /* max number of traffic classes for VF currently capped at 4 */
3663        if (!tci->num_tc || tci->num_tc > I40E_MAX_VF_VSI) {
3664                dev_err(&pf->pdev->dev,
3665                        "VF %d trying to set %u TCs, valid range 1-%u TCs per VF\n",
3666                        vf->vf_id, tci->num_tc, I40E_MAX_VF_VSI);
3667                aq_ret = I40E_ERR_PARAM;
3668                goto err;
3669        }
3670
3671        /* validate queues for each TC */
3672        for (i = 0; i < tci->num_tc; i++)
3673                if (!tci->list[i].count ||
3674                    tci->list[i].count > I40E_DEFAULT_QUEUES_PER_VF) {
3675                        dev_err(&pf->pdev->dev,
3676                                "VF %d: TC %d trying to set %u queues, valid range 1-%u queues per TC\n",
3677                                vf->vf_id, i, tci->list[i].count,
3678                                I40E_DEFAULT_QUEUES_PER_VF);
3679                        aq_ret = I40E_ERR_PARAM;
3680                        goto err;
3681                }
3682
3683        /* need Max VF queues but already have default number of queues */
3684        adq_request_qps = I40E_MAX_VF_QUEUES - I40E_DEFAULT_QUEUES_PER_VF;
3685
3686        if (pf->queues_left < adq_request_qps) {
3687                dev_err(&pf->pdev->dev,
3688                        "No queues left to allocate to VF %d\n",
3689                        vf->vf_id);
3690                aq_ret = I40E_ERR_PARAM;
3691                goto err;
3692        } else {
3693                /* we need to allocate max VF queues to enable ADq so as to
3694                 * make sure ADq enabled VF always gets back queues when it
3695                 * goes through a reset.
3696                 */
3697                vf->num_queue_pairs = I40E_MAX_VF_QUEUES;
3698        }
3699
3700        /* get link speed in MB to validate rate limit */
3701        switch (ls->link_speed) {
3702        case VIRTCHNL_LINK_SPEED_100MB:
3703                speed = SPEED_100;
3704                break;
3705        case VIRTCHNL_LINK_SPEED_1GB:
3706                speed = SPEED_1000;
3707                break;
3708        case VIRTCHNL_LINK_SPEED_10GB:
3709                speed = SPEED_10000;
3710                break;
3711        case VIRTCHNL_LINK_SPEED_20GB:
3712                speed = SPEED_20000;
3713                break;
3714        case VIRTCHNL_LINK_SPEED_25GB:
3715                speed = SPEED_25000;
3716                break;
3717        case VIRTCHNL_LINK_SPEED_40GB:
3718                speed = SPEED_40000;
3719                break;
3720        default:
3721                dev_err(&pf->pdev->dev,
3722                        "Cannot detect link speed\n");
3723                aq_ret = I40E_ERR_PARAM;
3724                goto err;
3725        }
3726
3727        /* parse data from the queue channel info */
3728        vf->num_tc = tci->num_tc;
3729        for (i = 0; i < vf->num_tc; i++) {
3730                if (tci->list[i].max_tx_rate) {
3731                        if (tci->list[i].max_tx_rate > speed) {
3732                                dev_err(&pf->pdev->dev,
3733                                        "Invalid max tx rate %llu specified for VF %d.",
3734                                        tci->list[i].max_tx_rate,
3735                                        vf->vf_id);
3736                                aq_ret = I40E_ERR_PARAM;
3737                                goto err;
3738                        } else {
3739                                vf->ch[i].max_tx_rate =
3740                                        tci->list[i].max_tx_rate;
3741                        }
3742                }
3743                vf->ch[i].num_qps = tci->list[i].count;
3744        }
3745
3746        /* set this flag only after making sure all inputs are sane */
3747        vf->adq_enabled = true;
3748        /* num_req_queues is set when user changes number of queues via ethtool
3749         * and this causes issue for default VSI(which depends on this variable)
3750         * when ADq is enabled, hence reset it.
3751         */
3752        vf->num_req_queues = 0;
3753
3754        /* reset the VF in order to allocate resources */
3755        i40e_vc_notify_vf_reset(vf);
3756        i40e_reset_vf(vf, false);
3757
3758        return I40E_SUCCESS;
3759
3760        /* send the response to the VF */
3761err:
3762        return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ENABLE_CHANNELS,
3763                                       aq_ret);
3764}
3765
3766/**
3767 * i40e_vc_del_qch_msg
3768 * @vf: pointer to the VF info
3769 * @msg: pointer to the msg buffer
3770 **/
3771static int i40e_vc_del_qch_msg(struct i40e_vf *vf, u8 *msg)
3772{
3773        struct i40e_pf *pf = vf->pf;
3774        i40e_status aq_ret = 0;
3775
3776        if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
3777                aq_ret = I40E_ERR_PARAM;
3778                goto err;
3779        }
3780
3781        if (vf->adq_enabled) {
3782                i40e_del_all_cloud_filters(vf);
3783                i40e_del_qch(vf);
3784                vf->adq_enabled = false;
3785                vf->num_tc = 0;
3786                dev_info(&pf->pdev->dev,
3787                         "Deleting Queue Channels and cloud filters for ADq on VF %d\n",
3788                         vf->vf_id);
3789        } else {
3790                dev_info(&pf->pdev->dev, "VF %d trying to delete queue channels but ADq isn't enabled\n",
3791                         vf->vf_id);
3792                aq_ret = I40E_ERR_PARAM;
3793        }
3794
3795        /* reset the VF in order to allocate resources */
3796        i40e_vc_notify_vf_reset(vf);
3797        i40e_reset_vf(vf, false);
3798
3799        return I40E_SUCCESS;
3800
3801err:
3802        return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DISABLE_CHANNELS,
3803                                       aq_ret);
3804}
3805
3806/**
3807 * i40e_vc_process_vf_msg
3808 * @pf: pointer to the PF structure
3809 * @vf_id: source VF id
3810 * @v_opcode: operation code
3811 * @v_retval: unused return value code
3812 * @msg: pointer to the msg buffer
3813 * @msglen: msg length
3814 *
3815 * called from the common aeq/arq handler to
3816 * process request from VF
3817 **/
3818int i40e_vc_process_vf_msg(struct i40e_pf *pf, s16 vf_id, u32 v_opcode,
3819                           u32 __always_unused v_retval, u8 *msg, u16 msglen)
3820{
3821        struct i40e_hw *hw = &pf->hw;
3822        int local_vf_id = vf_id - (s16)hw->func_caps.vf_base_id;
3823        struct i40e_vf *vf;
3824        int ret;
3825
3826        pf->vf_aq_requests++;
3827        if (local_vf_id < 0 || local_vf_id >= pf->num_alloc_vfs)
3828                return -EINVAL;
3829        vf = &(pf->vf[local_vf_id]);
3830
3831        /* Check if VF is disabled. */
3832        if (test_bit(I40E_VF_STATE_DISABLED, &vf->vf_states))
3833                return I40E_ERR_PARAM;
3834
3835        /* perform basic checks on the msg */
3836        ret = virtchnl_vc_validate_vf_msg(&vf->vf_ver, v_opcode, msg, msglen);
3837
3838        if (ret) {
3839                i40e_vc_send_resp_to_vf(vf, v_opcode, I40E_ERR_PARAM);
3840                dev_err(&pf->pdev->dev, "Invalid message from VF %d, opcode %d, len %d\n",
3841                        local_vf_id, v_opcode, msglen);
3842                switch (ret) {
3843                case VIRTCHNL_STATUS_ERR_PARAM:
3844                        return -EPERM;
3845                default:
3846                        return -EINVAL;
3847                }
3848        }
3849
3850        switch (v_opcode) {
3851        case VIRTCHNL_OP_VERSION:
3852                ret = i40e_vc_get_version_msg(vf, msg);
3853                break;
3854        case VIRTCHNL_OP_GET_VF_RESOURCES:
3855                ret = i40e_vc_get_vf_resources_msg(vf, msg);
3856                i40e_vc_notify_vf_link_state(vf);
3857                break;
3858        case VIRTCHNL_OP_RESET_VF:
3859                i40e_vc_reset_vf_msg(vf);
3860                ret = 0;
3861                break;
3862        case VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
3863                ret = i40e_vc_config_promiscuous_mode_msg(vf, msg);
3864                break;
3865        case VIRTCHNL_OP_CONFIG_VSI_QUEUES:
3866                ret = i40e_vc_config_queues_msg(vf, msg);
3867                break;
3868        case VIRTCHNL_OP_CONFIG_IRQ_MAP:
3869                ret = i40e_vc_config_irq_map_msg(vf, msg);
3870                break;
3871        case VIRTCHNL_OP_ENABLE_QUEUES:
3872                ret = i40e_vc_enable_queues_msg(vf, msg);
3873                i40e_vc_notify_vf_link_state(vf);
3874                break;
3875        case VIRTCHNL_OP_DISABLE_QUEUES:
3876                ret = i40e_vc_disable_queues_msg(vf, msg);
3877                break;
3878        case VIRTCHNL_OP_ADD_ETH_ADDR:
3879                ret = i40e_vc_add_mac_addr_msg(vf, msg);
3880                break;
3881        case VIRTCHNL_OP_DEL_ETH_ADDR:
3882                ret = i40e_vc_del_mac_addr_msg(vf, msg);
3883                break;
3884        case VIRTCHNL_OP_ADD_VLAN:
3885                ret = i40e_vc_add_vlan_msg(vf, msg);
3886                break;
3887        case VIRTCHNL_OP_DEL_VLAN:
3888                ret = i40e_vc_remove_vlan_msg(vf, msg);
3889                break;
3890        case VIRTCHNL_OP_GET_STATS:
3891                ret = i40e_vc_get_stats_msg(vf, msg);
3892                break;
3893        case VIRTCHNL_OP_IWARP:
3894                ret = i40e_vc_iwarp_msg(vf, msg, msglen);
3895                break;
3896        case VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP:
3897                ret = i40e_vc_iwarp_qvmap_msg(vf, msg, true);
3898                break;
3899        case VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP:
3900                ret = i40e_vc_iwarp_qvmap_msg(vf, msg, false);
3901                break;
3902        case VIRTCHNL_OP_CONFIG_RSS_KEY:
3903                ret = i40e_vc_config_rss_key(vf, msg);
3904                break;
3905        case VIRTCHNL_OP_CONFIG_RSS_LUT:
3906                ret = i40e_vc_config_rss_lut(vf, msg);
3907                break;
3908        case VIRTCHNL_OP_GET_RSS_HENA_CAPS:
3909                ret = i40e_vc_get_rss_hena(vf, msg);
3910                break;
3911        case VIRTCHNL_OP_SET_RSS_HENA:
3912                ret = i40e_vc_set_rss_hena(vf, msg);
3913                break;
3914        case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING:
3915                ret = i40e_vc_enable_vlan_stripping(vf, msg);
3916                break;
3917        case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING:
3918                ret = i40e_vc_disable_vlan_stripping(vf, msg);
3919                break;
3920        case VIRTCHNL_OP_REQUEST_QUEUES:
3921                ret = i40e_vc_request_queues_msg(vf, msg);
3922                break;
3923        case VIRTCHNL_OP_ENABLE_CHANNELS:
3924                ret = i40e_vc_add_qch_msg(vf, msg);
3925                break;
3926        case VIRTCHNL_OP_DISABLE_CHANNELS:
3927                ret = i40e_vc_del_qch_msg(vf, msg);
3928                break;
3929        case VIRTCHNL_OP_ADD_CLOUD_FILTER:
3930                ret = i40e_vc_add_cloud_filter(vf, msg);
3931                break;
3932        case VIRTCHNL_OP_DEL_CLOUD_FILTER:
3933                ret = i40e_vc_del_cloud_filter(vf, msg);
3934                break;
3935        case VIRTCHNL_OP_UNKNOWN:
3936        default:
3937                dev_err(&pf->pdev->dev, "Unsupported opcode %d from VF %d\n",
3938                        v_opcode, local_vf_id);
3939                ret = i40e_vc_send_resp_to_vf(vf, v_opcode,
3940                                              I40E_ERR_NOT_IMPLEMENTED);
3941                break;
3942        }
3943
3944        return ret;
3945}
3946
3947/**
3948 * i40e_vc_process_vflr_event
3949 * @pf: pointer to the PF structure
3950 *
3951 * called from the vlfr irq handler to
3952 * free up VF resources and state variables
3953 **/
3954int i40e_vc_process_vflr_event(struct i40e_pf *pf)
3955{
3956        struct i40e_hw *hw = &pf->hw;
3957        u32 reg, reg_idx, bit_idx;
3958        struct i40e_vf *vf;
3959        int vf_id;
3960
3961        if (!test_bit(__I40E_VFLR_EVENT_PENDING, pf->state))
3962                return 0;
3963
3964        /* Re-enable the VFLR interrupt cause here, before looking for which
3965         * VF got reset. Otherwise, if another VF gets a reset while the
3966         * first one is being processed, that interrupt will be lost, and
3967         * that VF will be stuck in reset forever.
3968         */
3969        reg = rd32(hw, I40E_PFINT_ICR0_ENA);
3970        reg |= I40E_PFINT_ICR0_ENA_VFLR_MASK;
3971        wr32(hw, I40E_PFINT_ICR0_ENA, reg);
3972        i40e_flush(hw);
3973
3974        clear_bit(__I40E_VFLR_EVENT_PENDING, pf->state);
3975        for (vf_id = 0; vf_id < pf->num_alloc_vfs; vf_id++) {
3976                reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32;
3977                bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32;
3978                /* read GLGEN_VFLRSTAT register to find out the flr VFs */
3979                vf = &pf->vf[vf_id];
3980                reg = rd32(hw, I40E_GLGEN_VFLRSTAT(reg_idx));
3981                if (reg & BIT(bit_idx))
3982                        /* i40e_reset_vf will clear the bit in GLGEN_VFLRSTAT */
3983                        i40e_reset_vf(vf, true);
3984        }
3985
3986        return 0;
3987}
3988
3989/**
3990 * i40e_validate_vf
3991 * @pf: the physical function
3992 * @vf_id: VF identifier
3993 *
3994 * Check that the VF is enabled and the VSI exists.
3995 *
3996 * Returns 0 on success, negative on failure
3997 **/
3998static int i40e_validate_vf(struct i40e_pf *pf, int vf_id)
3999{
4000        struct i40e_vsi *vsi;
4001        struct i40e_vf *vf;
4002        int ret = 0;
4003
4004        if (vf_id >= pf->num_alloc_vfs) {
4005                dev_err(&pf->pdev->dev,
4006                        "Invalid VF Identifier %d\n", vf_id);
4007                ret = -EINVAL;
4008                goto err_out;
4009        }
4010        vf = &pf->vf[vf_id];
4011        vsi = i40e_find_vsi_from_id(pf, vf->lan_vsi_id);
4012        if (!vsi)
4013                ret = -EINVAL;
4014err_out:
4015        return ret;
4016}
4017
4018/**
4019 * i40e_ndo_set_vf_mac
4020 * @netdev: network interface device structure
4021 * @vf_id: VF identifier
4022 * @mac: mac address
4023 *
4024 * program VF mac address
4025 **/
4026int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
4027{
4028        struct i40e_netdev_priv *np = netdev_priv(netdev);
4029        struct i40e_vsi *vsi = np->vsi;
4030        struct i40e_pf *pf = vsi->back;
4031        struct i40e_mac_filter *f;
4032        struct i40e_vf *vf;
4033        int ret = 0;
4034        struct hlist_node *h;
4035        int bkt;
4036        u8 i;
4037
4038        if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) {
4039                dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n");
4040                return -EAGAIN;
4041        }
4042
4043        /* validate the request */
4044        ret = i40e_validate_vf(pf, vf_id);
4045        if (ret)
4046                goto error_param;
4047
4048        vf = &pf->vf[vf_id];
4049        vsi = pf->vsi[vf->lan_vsi_idx];
4050
4051        /* When the VF is resetting wait until it is done.
4052         * It can take up to 200 milliseconds,
4053         * but wait for up to 300 milliseconds to be safe.
4054         * If the VF is indeed in reset, the vsi pointer has
4055         * to show on the newly loaded vsi under pf->vsi[id].
4056         */
4057        for (i = 0; i < 15; i++) {
4058                if (test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) {
4059                        if (i > 0)
4060                                vsi = pf->vsi[vf->lan_vsi_idx];
4061                        break;
4062                }
4063                msleep(20);
4064        }
4065        if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) {
4066                dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n",
4067                        vf_id);
4068                ret = -EAGAIN;
4069                goto error_param;
4070        }
4071
4072        if (is_multicast_ether_addr(mac)) {
4073                dev_err(&pf->pdev->dev,
4074                        "Invalid Ethernet address %pM for VF %d\n", mac, vf_id);
4075                ret = -EINVAL;
4076                goto error_param;
4077        }
4078
4079        /* Lock once because below invoked function add/del_filter requires
4080         * mac_filter_hash_lock to be held
4081         */
4082        spin_lock_bh(&vsi->mac_filter_hash_lock);
4083
4084        /* delete the temporary mac address */
4085        if (!is_zero_ether_addr(vf->default_lan_addr.addr))
4086                i40e_del_mac_filter(vsi, vf->default_lan_addr.addr);
4087
4088        /* Delete all the filters for this VSI - we're going to kill it
4089         * anyway.
4090         */
4091        hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist)
4092                __i40e_del_filter(vsi, f);
4093
4094        spin_unlock_bh(&vsi->mac_filter_hash_lock);
4095
4096        /* program mac filter */
4097        if (i40e_sync_vsi_filters(vsi)) {
4098                dev_err(&pf->pdev->dev, "Unable to program ucast filters\n");
4099                ret = -EIO;
4100                goto error_param;
4101        }
4102        ether_addr_copy(vf->default_lan_addr.addr, mac);
4103
4104        if (is_zero_ether_addr(mac)) {
4105                vf->pf_set_mac = false;
4106                dev_info(&pf->pdev->dev, "Removing MAC on VF %d\n", vf_id);
4107        } else {
4108                vf->pf_set_mac = true;
4109                dev_info(&pf->pdev->dev, "Setting MAC %pM on VF %d\n",
4110                         mac, vf_id);
4111        }
4112
4113        /* Force the VF interface down so it has to bring up with new MAC
4114         * address
4115         */
4116        i40e_vc_disable_vf(vf);
4117        dev_info(&pf->pdev->dev, "Bring down and up the VF interface to make this change effective.\n");
4118
4119error_param:
4120        clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state);
4121        return ret;
4122}
4123
4124/**
4125 * i40e_vsi_has_vlans - True if VSI has configured VLANs
4126 * @vsi: pointer to the vsi
4127 *
4128 * Check if a VSI has configured any VLANs. False if we have a port VLAN or if
4129 * we have no configured VLANs. Do not call while holding the
4130 * mac_filter_hash_lock.
4131 */
4132static bool i40e_vsi_has_vlans(struct i40e_vsi *vsi)
4133{
4134        bool have_vlans;
4135
4136        /* If we have a port VLAN, then the VSI cannot have any VLANs
4137         * configured, as all MAC/VLAN filters will be assigned to the PVID.
4138         */
4139        if (vsi->info.pvid)
4140                return false;
4141
4142        /* Since we don't have a PVID, we know that if the device is in VLAN
4143         * mode it must be because of a VLAN filter configured on this VSI.
4144         */
4145        spin_lock_bh(&vsi->mac_filter_hash_lock);
4146        have_vlans = i40e_is_vsi_in_vlan(vsi);
4147        spin_unlock_bh(&vsi->mac_filter_hash_lock);
4148
4149        return have_vlans;
4150}
4151
4152/**
4153 * i40e_ndo_set_vf_port_vlan
4154 * @netdev: network interface device structure
4155 * @vf_id: VF identifier
4156 * @vlan_id: mac address
4157 * @qos: priority setting
4158 * @vlan_proto: vlan protocol
4159 *
4160 * program VF vlan id and/or qos
4161 **/
4162int i40e_ndo_set_vf_port_vlan(struct net_device *netdev, int vf_id,
4163                              u16 vlan_id, u8 qos, __be16 vlan_proto)
4164{
4165        u16 vlanprio = vlan_id | (qos << I40E_VLAN_PRIORITY_SHIFT);
4166        struct i40e_netdev_priv *np = netdev_priv(netdev);
4167        bool allmulti = false, alluni = false;
4168        struct i40e_pf *pf = np->vsi->back;
4169        struct i40e_vsi *vsi;
4170        struct i40e_vf *vf;
4171        int ret = 0;
4172
4173        if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) {
4174                dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n");
4175                return -EAGAIN;
4176        }
4177
4178        /* validate the request */
4179        ret = i40e_validate_vf(pf, vf_id);
4180        if (ret)
4181                goto error_pvid;
4182
4183        if ((vlan_id > I40E_MAX_VLANID) || (qos > 7)) {
4184                dev_err(&pf->pdev->dev, "Invalid VF Parameters\n");
4185                ret = -EINVAL;
4186                goto error_pvid;
4187        }
4188
4189        if (vlan_proto != htons(ETH_P_8021Q)) {
4190                dev_err(&pf->pdev->dev, "VF VLAN protocol is not supported\n");
4191                ret = -EPROTONOSUPPORT;
4192                goto error_pvid;
4193        }
4194
4195        vf = &pf->vf[vf_id];
4196        vsi = pf->vsi[vf->lan_vsi_idx];
4197        if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) {
4198                dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n",
4199                        vf_id);
4200                ret = -EAGAIN;
4201                goto error_pvid;
4202        }
4203
4204        if (le16_to_cpu(vsi->info.pvid) == vlanprio)
4205                /* duplicate request, so just return success */
4206                goto error_pvid;
4207
4208        if (i40e_vsi_has_vlans(vsi)) {
4209                dev_err(&pf->pdev->dev,
4210                        "VF %d has already configured VLAN filters and the administrator is requesting a port VLAN override.\nPlease unload and reload the VF driver for this change to take effect.\n",
4211                        vf_id);
4212                /* Administrator Error - knock the VF offline until he does
4213                 * the right thing by reconfiguring his network correctly
4214                 * and then reloading the VF driver.
4215                 */
4216                i40e_vc_disable_vf(vf);
4217                /* During reset the VF got a new VSI, so refresh the pointer. */
4218                vsi = pf->vsi[vf->lan_vsi_idx];
4219        }
4220
4221        /* Locked once because multiple functions below iterate list */
4222        spin_lock_bh(&vsi->mac_filter_hash_lock);
4223
4224        /* Check for condition where there was already a port VLAN ID
4225         * filter set and now it is being deleted by setting it to zero.
4226         * Additionally check for the condition where there was a port
4227         * VLAN but now there is a new and different port VLAN being set.
4228         * Before deleting all the old VLAN filters we must add new ones
4229         * with -1 (I40E_VLAN_ANY) or otherwise we're left with all our
4230         * MAC addresses deleted.
4231         */
4232        if ((!(vlan_id || qos) ||
4233            vlanprio != le16_to_cpu(vsi->info.pvid)) &&
4234            vsi->info.pvid) {
4235                ret = i40e_add_vlan_all_mac(vsi, I40E_VLAN_ANY);
4236                if (ret) {
4237                        dev_info(&vsi->back->pdev->dev,
4238                                 "add VF VLAN failed, ret=%d aq_err=%d\n", ret,
4239                                 vsi->back->hw.aq.asq_last_status);
4240                        spin_unlock_bh(&vsi->mac_filter_hash_lock);
4241                        goto error_pvid;
4242                }
4243        }
4244
4245        if (vsi->info.pvid) {
4246                /* remove all filters on the old VLAN */
4247                i40e_rm_vlan_all_mac(vsi, (le16_to_cpu(vsi->info.pvid) &
4248                                           VLAN_VID_MASK));
4249        }
4250
4251        spin_unlock_bh(&vsi->mac_filter_hash_lock);
4252
4253        /* disable promisc modes in case they were enabled */
4254        ret = i40e_config_vf_promiscuous_mode(vf, vf->lan_vsi_id,
4255                                              allmulti, alluni);
4256        if (ret) {
4257                dev_err(&pf->pdev->dev, "Unable to config VF promiscuous mode\n");
4258                goto error_pvid;
4259        }
4260
4261        if (vlan_id || qos)
4262                ret = i40e_vsi_add_pvid(vsi, vlanprio);
4263        else
4264                i40e_vsi_remove_pvid(vsi);
4265        spin_lock_bh(&vsi->mac_filter_hash_lock);
4266
4267        if (vlan_id) {
4268                dev_info(&pf->pdev->dev, "Setting VLAN %d, QOS 0x%x on VF %d\n",
4269                         vlan_id, qos, vf_id);
4270
4271                /* add new VLAN filter for each MAC */
4272                ret = i40e_add_vlan_all_mac(vsi, vlan_id);
4273                if (ret) {
4274                        dev_info(&vsi->back->pdev->dev,
4275                                 "add VF VLAN failed, ret=%d aq_err=%d\n", ret,
4276                                 vsi->back->hw.aq.asq_last_status);
4277                        spin_unlock_bh(&vsi->mac_filter_hash_lock);
4278                        goto error_pvid;
4279                }
4280
4281                /* remove the previously added non-VLAN MAC filters */
4282                i40e_rm_vlan_all_mac(vsi, I40E_VLAN_ANY);
4283        }
4284
4285        spin_unlock_bh(&vsi->mac_filter_hash_lock);
4286
4287        if (test_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states))
4288                alluni = true;
4289
4290        if (test_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states))
4291                allmulti = true;
4292
4293        /* Schedule the worker thread to take care of applying changes */
4294        i40e_service_event_schedule(vsi->back);
4295
4296        if (ret) {
4297                dev_err(&pf->pdev->dev, "Unable to update VF vsi context\n");
4298                goto error_pvid;
4299        }
4300
4301        /* The Port VLAN needs to be saved across resets the same as the
4302         * default LAN MAC address.
4303         */
4304        vf->port_vlan_id = le16_to_cpu(vsi->info.pvid);
4305
4306        ret = i40e_config_vf_promiscuous_mode(vf, vsi->id, allmulti, alluni);
4307        if (ret) {
4308                dev_err(&pf->pdev->dev, "Unable to config vf promiscuous mode\n");
4309                goto error_pvid;
4310        }
4311
4312        ret = 0;
4313
4314error_pvid:
4315        clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state);
4316        return ret;
4317}
4318
4319/**
4320 * i40e_ndo_set_vf_bw
4321 * @netdev: network interface device structure
4322 * @vf_id: VF identifier
4323 * @min_tx_rate: Minimum Tx rate
4324 * @max_tx_rate: Maximum Tx rate
4325 *
4326 * configure VF Tx rate
4327 **/
4328int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate,
4329                       int max_tx_rate)
4330{
4331        struct i40e_netdev_priv *np = netdev_priv(netdev);
4332        struct i40e_pf *pf = np->vsi->back;
4333        struct i40e_vsi *vsi;
4334        struct i40e_vf *vf;
4335        int ret = 0;
4336
4337        if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) {
4338                dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n");
4339                return -EAGAIN;
4340        }
4341
4342        /* validate the request */
4343        ret = i40e_validate_vf(pf, vf_id);
4344        if (ret)
4345                goto error;
4346
4347        if (min_tx_rate) {
4348                dev_err(&pf->pdev->dev, "Invalid min tx rate (%d) (greater than 0) specified for VF %d.\n",
4349                        min_tx_rate, vf_id);
4350                ret = -EINVAL;
4351                goto error;
4352        }
4353
4354        vf = &pf->vf[vf_id];
4355        vsi = pf->vsi[vf->lan_vsi_idx];
4356        if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) {
4357                dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n",
4358                        vf_id);
4359                ret = -EAGAIN;
4360                goto error;
4361        }
4362
4363        ret = i40e_set_bw_limit(vsi, vsi->seid, max_tx_rate);
4364        if (ret)
4365                goto error;
4366
4367        vf->tx_rate = max_tx_rate;
4368error:
4369        clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state);
4370        return ret;
4371}
4372
4373/**
4374 * i40e_ndo_get_vf_config
4375 * @netdev: network interface device structure
4376 * @vf_id: VF identifier
4377 * @ivi: VF configuration structure
4378 *
4379 * return VF configuration
4380 **/
4381int i40e_ndo_get_vf_config(struct net_device *netdev,
4382                           int vf_id, struct ifla_vf_info *ivi)
4383{
4384        struct i40e_netdev_priv *np = netdev_priv(netdev);
4385        struct i40e_vsi *vsi = np->vsi;
4386        struct i40e_pf *pf = vsi->back;
4387        struct i40e_vf *vf;
4388        int ret = 0;
4389
4390        if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) {
4391                dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n");
4392                return -EAGAIN;
4393        }
4394
4395        /* validate the request */
4396        ret = i40e_validate_vf(pf, vf_id);
4397        if (ret)
4398                goto error_param;
4399
4400        vf = &pf->vf[vf_id];
4401        /* first vsi is always the LAN vsi */
4402        vsi = pf->vsi[vf->lan_vsi_idx];
4403        if (!vsi) {
4404                ret = -ENOENT;
4405                goto error_param;
4406        }
4407
4408        ivi->vf = vf_id;
4409
4410        ether_addr_copy(ivi->mac, vf->default_lan_addr.addr);
4411
4412        ivi->max_tx_rate = vf->tx_rate;
4413        ivi->min_tx_rate = 0;
4414        ivi->vlan = le16_to_cpu(vsi->info.pvid) & I40E_VLAN_MASK;
4415        ivi->qos = (le16_to_cpu(vsi->info.pvid) & I40E_PRIORITY_MASK) >>
4416                   I40E_VLAN_PRIORITY_SHIFT;
4417        if (vf->link_forced == false)
4418                ivi->linkstate = IFLA_VF_LINK_STATE_AUTO;
4419        else if (vf->link_up == true)
4420                ivi->linkstate = IFLA_VF_LINK_STATE_ENABLE;
4421        else
4422                ivi->linkstate = IFLA_VF_LINK_STATE_DISABLE;
4423        ivi->spoofchk = vf->spoofchk;
4424        ivi->trusted = vf->trusted;
4425        ret = 0;
4426
4427error_param:
4428        clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state);
4429        return ret;
4430}
4431
4432/**
4433 * i40e_ndo_set_vf_link_state
4434 * @netdev: network interface device structure
4435 * @vf_id: VF identifier
4436 * @link: required link state
4437 *
4438 * Set the link state of a specified VF, regardless of physical link state
4439 **/
4440int i40e_ndo_set_vf_link_state(struct net_device *netdev, int vf_id, int link)
4441{
4442        struct i40e_netdev_priv *np = netdev_priv(netdev);
4443        struct i40e_pf *pf = np->vsi->back;
4444        struct virtchnl_pf_event pfe;
4445        struct i40e_hw *hw = &pf->hw;
4446        struct i40e_vf *vf;
4447        int abs_vf_id;
4448        int ret = 0;
4449
4450        if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) {
4451                dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n");
4452                return -EAGAIN;
4453        }
4454
4455        /* validate the request */
4456        if (vf_id >= pf->num_alloc_vfs) {
4457                dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id);
4458                ret = -EINVAL;
4459                goto error_out;
4460        }
4461
4462        vf = &pf->vf[vf_id];
4463        abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
4464
4465        pfe.event = VIRTCHNL_EVENT_LINK_CHANGE;
4466        pfe.severity = PF_EVENT_SEVERITY_INFO;
4467
4468        switch (link) {
4469        case IFLA_VF_LINK_STATE_AUTO:
4470                vf->link_forced = false;
4471                pfe.event_data.link_event.link_status =
4472                        pf->hw.phy.link_info.link_info & I40E_AQ_LINK_UP;
4473                pfe.event_data.link_event.link_speed =
4474                        (enum virtchnl_link_speed)
4475                        pf->hw.phy.link_info.link_speed;
4476                break;
4477        case IFLA_VF_LINK_STATE_ENABLE:
4478                vf->link_forced = true;
4479                vf->link_up = true;
4480                pfe.event_data.link_event.link_status = true;
4481                pfe.event_data.link_event.link_speed = VIRTCHNL_LINK_SPEED_40GB;
4482                break;
4483        case IFLA_VF_LINK_STATE_DISABLE:
4484                vf->link_forced = true;
4485                vf->link_up = false;
4486                pfe.event_data.link_event.link_status = false;
4487                pfe.event_data.link_event.link_speed = 0;
4488                break;
4489        default:
4490                ret = -EINVAL;
4491                goto error_out;
4492        }
4493        /* Notify the VF of its new link state */
4494        i40e_aq_send_msg_to_vf(hw, abs_vf_id, VIRTCHNL_OP_EVENT,
4495                               0, (u8 *)&pfe, sizeof(pfe), NULL);
4496
4497error_out:
4498        clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state);
4499        return ret;
4500}
4501
4502/**
4503 * i40e_ndo_set_vf_spoofchk
4504 * @netdev: network interface device structure
4505 * @vf_id: VF identifier
4506 * @enable: flag to enable or disable feature
4507 *
4508 * Enable or disable VF spoof checking
4509 **/
4510int i40e_ndo_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool enable)
4511{
4512        struct i40e_netdev_priv *np = netdev_priv(netdev);
4513        struct i40e_vsi *vsi = np->vsi;
4514        struct i40e_pf *pf = vsi->back;
4515        struct i40e_vsi_context ctxt;
4516        struct i40e_hw *hw = &pf->hw;
4517        struct i40e_vf *vf;
4518        int ret = 0;
4519
4520        if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) {
4521                dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n");
4522                return -EAGAIN;
4523        }
4524
4525        /* validate the request */
4526        if (vf_id >= pf->num_alloc_vfs) {
4527                dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id);
4528                ret = -EINVAL;
4529                goto out;
4530        }
4531
4532        vf = &(pf->vf[vf_id]);
4533        if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) {
4534                dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n",
4535                        vf_id);
4536                ret = -EAGAIN;
4537                goto out;
4538        }
4539
4540        if (enable == vf->spoofchk)
4541                goto out;
4542
4543        vf->spoofchk = enable;
4544        memset(&ctxt, 0, sizeof(ctxt));
4545        ctxt.seid = pf->vsi[vf->lan_vsi_idx]->seid;
4546        ctxt.pf_num = pf->hw.pf_id;
4547        ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SECURITY_VALID);
4548        if (enable)
4549                ctxt.info.sec_flags |= (I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK |
4550                                        I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK);
4551        ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
4552        if (ret) {
4553                dev_err(&pf->pdev->dev, "Error %d updating VSI parameters\n",
4554                        ret);
4555                ret = -EIO;
4556        }
4557out:
4558        clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state);
4559        return ret;
4560}
4561
4562/**
4563 * i40e_ndo_set_vf_trust
4564 * @netdev: network interface device structure of the pf
4565 * @vf_id: VF identifier
4566 * @setting: trust setting
4567 *
4568 * Enable or disable VF trust setting
4569 **/
4570int i40e_ndo_set_vf_trust(struct net_device *netdev, int vf_id, bool setting)
4571{
4572        struct i40e_netdev_priv *np = netdev_priv(netdev);
4573        struct i40e_pf *pf = np->vsi->back;
4574        struct i40e_vf *vf;
4575        int ret = 0;
4576
4577        if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) {
4578                dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n");
4579                return -EAGAIN;
4580        }
4581
4582        /* validate the request */
4583        if (vf_id >= pf->num_alloc_vfs) {
4584                dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id);
4585                ret = -EINVAL;
4586                goto out;
4587        }
4588
4589        if (pf->flags & I40E_FLAG_MFP_ENABLED) {
4590                dev_err(&pf->pdev->dev, "Trusted VF not supported in MFP mode.\n");
4591                ret = -EINVAL;
4592                goto out;
4593        }
4594
4595        vf = &pf->vf[vf_id];
4596
4597        if (setting == vf->trusted)
4598                goto out;
4599
4600        vf->trusted = setting;
4601        i40e_vc_disable_vf(vf);
4602        dev_info(&pf->pdev->dev, "VF %u is now %strusted\n",
4603                 vf_id, setting ? "" : "un");
4604
4605        if (vf->adq_enabled) {
4606                if (!vf->trusted) {
4607                        dev_info(&pf->pdev->dev,
4608                                 "VF %u no longer Trusted, deleting all cloud filters\n",
4609                                 vf_id);
4610                        i40e_del_all_cloud_filters(vf);
4611                }
4612        }
4613
4614out:
4615        clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state);
4616        return ret;
4617}
4618
4619/**
4620 * i40e_get_vf_stats - populate some stats for the VF
4621 * @netdev: the netdev of the PF
4622 * @vf_id: the host OS identifier (0-127)
4623 * @vf_stats: pointer to the OS memory to be initialized
4624 */
4625int i40e_get_vf_stats(struct net_device *netdev, int vf_id,
4626                      struct ifla_vf_stats *vf_stats)
4627{
4628        struct i40e_netdev_priv *np = netdev_priv(netdev);
4629        struct i40e_pf *pf = np->vsi->back;
4630        struct i40e_eth_stats *stats;
4631        struct i40e_vsi *vsi;
4632        struct i40e_vf *vf;
4633
4634        /* validate the request */
4635        if (i40e_validate_vf(pf, vf_id))
4636                return -EINVAL;
4637
4638        vf = &pf->vf[vf_id];
4639        if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) {
4640                dev_err(&pf->pdev->dev, "VF %d in reset. Try again.\n", vf_id);
4641                return -EBUSY;
4642        }
4643
4644        vsi = pf->vsi[vf->lan_vsi_idx];
4645        if (!vsi)
4646                return -EINVAL;
4647
4648        i40e_update_eth_stats(vsi);
4649        stats = &vsi->eth_stats;
4650
4651        memset(vf_stats, 0, sizeof(*vf_stats));
4652
4653        vf_stats->rx_packets = stats->rx_unicast + stats->rx_broadcast +
4654                stats->rx_multicast;
4655        vf_stats->tx_packets = stats->tx_unicast + stats->tx_broadcast +
4656                stats->tx_multicast;
4657        vf_stats->rx_bytes   = stats->rx_bytes;
4658        vf_stats->tx_bytes   = stats->tx_bytes;
4659        vf_stats->broadcast  = stats->rx_broadcast;
4660        vf_stats->multicast  = stats->rx_multicast;
4661        vf_stats->rx_dropped = stats->rx_discards;
4662        vf_stats->tx_dropped = stats->tx_discards;
4663
4664        return 0;
4665}
4666