linux/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/* Copyright (c) 2018, Intel Corporation. */
   3
   4#include "ice.h"
   5#include "ice_base.h"
   6#include "ice_lib.h"
   7#include "ice_fltr.h"
   8
   9/**
  10 * ice_validate_vf_id - helper to check if VF ID is valid
  11 * @pf: pointer to the PF structure
  12 * @vf_id: the ID of the VF to check
  13 */
  14static int ice_validate_vf_id(struct ice_pf *pf, u16 vf_id)
  15{
  16        /* vf_id range is only valid for 0-255, and should always be unsigned */
  17        if (vf_id >= pf->num_alloc_vfs) {
  18                dev_err(ice_pf_to_dev(pf), "Invalid VF ID: %u\n", vf_id);
  19                return -EINVAL;
  20        }
  21        return 0;
  22}
  23
  24/**
  25 * ice_check_vf_init - helper to check if VF init complete
  26 * @pf: pointer to the PF structure
  27 * @vf: the pointer to the VF to check
  28 */
  29static int ice_check_vf_init(struct ice_pf *pf, struct ice_vf *vf)
  30{
  31        if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states)) {
  32                dev_err(ice_pf_to_dev(pf), "VF ID: %u in reset. Try again.\n",
  33                        vf->vf_id);
  34                return -EBUSY;
  35        }
  36        return 0;
  37}
  38
  39/**
  40 * ice_err_to_virt_err - translate errors for VF return code
  41 * @ice_err: error return code
  42 */
  43static enum virtchnl_status_code ice_err_to_virt_err(enum ice_status ice_err)
  44{
  45        switch (ice_err) {
  46        case ICE_SUCCESS:
  47                return VIRTCHNL_STATUS_SUCCESS;
  48        case ICE_ERR_BAD_PTR:
  49        case ICE_ERR_INVAL_SIZE:
  50        case ICE_ERR_DEVICE_NOT_SUPPORTED:
  51        case ICE_ERR_PARAM:
  52        case ICE_ERR_CFG:
  53                return VIRTCHNL_STATUS_ERR_PARAM;
  54        case ICE_ERR_NO_MEMORY:
  55                return VIRTCHNL_STATUS_ERR_NO_MEMORY;
  56        case ICE_ERR_NOT_READY:
  57        case ICE_ERR_RESET_FAILED:
  58        case ICE_ERR_FW_API_VER:
  59        case ICE_ERR_AQ_ERROR:
  60        case ICE_ERR_AQ_TIMEOUT:
  61        case ICE_ERR_AQ_FULL:
  62        case ICE_ERR_AQ_NO_WORK:
  63        case ICE_ERR_AQ_EMPTY:
  64                return VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
  65        default:
  66                return VIRTCHNL_STATUS_ERR_NOT_SUPPORTED;
  67        }
  68}
  69
  70/**
  71 * ice_vc_vf_broadcast - Broadcast a message to all VFs on PF
  72 * @pf: pointer to the PF structure
  73 * @v_opcode: operation code
  74 * @v_retval: return value
  75 * @msg: pointer to the msg buffer
  76 * @msglen: msg length
  77 */
  78static void
  79ice_vc_vf_broadcast(struct ice_pf *pf, enum virtchnl_ops v_opcode,
  80                    enum virtchnl_status_code v_retval, u8 *msg, u16 msglen)
  81{
  82        struct ice_hw *hw = &pf->hw;
  83        unsigned int i;
  84
  85        ice_for_each_vf(pf, i) {
  86                struct ice_vf *vf = &pf->vf[i];
  87
  88                /* Not all vfs are enabled so skip the ones that are not */
  89                if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states) &&
  90                    !test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states))
  91                        continue;
  92
  93                /* Ignore return value on purpose - a given VF may fail, but
  94                 * we need to keep going and send to all of them
  95                 */
  96                ice_aq_send_msg_to_vf(hw, vf->vf_id, v_opcode, v_retval, msg,
  97                                      msglen, NULL);
  98        }
  99}
 100
 101/**
 102 * ice_set_pfe_link - Set the link speed/status of the virtchnl_pf_event
 103 * @vf: pointer to the VF structure
 104 * @pfe: pointer to the virtchnl_pf_event to set link speed/status for
 105 * @ice_link_speed: link speed specified by ICE_AQ_LINK_SPEED_*
 106 * @link_up: whether or not to set the link up/down
 107 */
 108static void
 109ice_set_pfe_link(struct ice_vf *vf, struct virtchnl_pf_event *pfe,
 110                 int ice_link_speed, bool link_up)
 111{
 112        if (vf->driver_caps & VIRTCHNL_VF_CAP_ADV_LINK_SPEED) {
 113                pfe->event_data.link_event_adv.link_status = link_up;
 114                /* Speed in Mbps */
 115                pfe->event_data.link_event_adv.link_speed =
 116                        ice_conv_link_speed_to_virtchnl(true, ice_link_speed);
 117        } else {
 118                pfe->event_data.link_event.link_status = link_up;
 119                /* Legacy method for virtchnl link speeds */
 120                pfe->event_data.link_event.link_speed =
 121                        (enum virtchnl_link_speed)
 122                        ice_conv_link_speed_to_virtchnl(false, ice_link_speed);
 123        }
 124}
 125
 126/**
 127 * ice_vf_has_no_qs_ena - check if the VF has any Rx or Tx queues enabled
 128 * @vf: the VF to check
 129 *
 130 * Returns true if the VF has no Rx and no Tx queues enabled and returns false
 131 * otherwise
 132 */
 133static bool ice_vf_has_no_qs_ena(struct ice_vf *vf)
 134{
 135        return (!bitmap_weight(vf->rxq_ena, ICE_MAX_RSS_QS_PER_VF) &&
 136                !bitmap_weight(vf->txq_ena, ICE_MAX_RSS_QS_PER_VF));
 137}
 138
 139/**
 140 * ice_is_vf_link_up - check if the VF's link is up
 141 * @vf: VF to check if link is up
 142 */
 143static bool ice_is_vf_link_up(struct ice_vf *vf)
 144{
 145        struct ice_pf *pf = vf->pf;
 146
 147        if (ice_check_vf_init(pf, vf))
 148                return false;
 149
 150        if (ice_vf_has_no_qs_ena(vf))
 151                return false;
 152        else if (vf->link_forced)
 153                return vf->link_up;
 154        else
 155                return pf->hw.port_info->phy.link_info.link_info &
 156                        ICE_AQ_LINK_UP;
 157}
 158
 159/**
 160 * ice_vc_notify_vf_link_state - Inform a VF of link status
 161 * @vf: pointer to the VF structure
 162 *
 163 * send a link status message to a single VF
 164 */
 165static void ice_vc_notify_vf_link_state(struct ice_vf *vf)
 166{
 167        struct virtchnl_pf_event pfe = { 0 };
 168        struct ice_hw *hw = &vf->pf->hw;
 169
 170        pfe.event = VIRTCHNL_EVENT_LINK_CHANGE;
 171        pfe.severity = PF_EVENT_SEVERITY_INFO;
 172
 173        if (ice_is_vf_link_up(vf))
 174                ice_set_pfe_link(vf, &pfe,
 175                                 hw->port_info->phy.link_info.link_speed, true);
 176        else
 177                ice_set_pfe_link(vf, &pfe, ICE_AQ_LINK_SPEED_UNKNOWN, false);
 178
 179        ice_aq_send_msg_to_vf(hw, vf->vf_id, VIRTCHNL_OP_EVENT,
 180                              VIRTCHNL_STATUS_SUCCESS, (u8 *)&pfe,
 181                              sizeof(pfe), NULL);
 182}
 183
 184/**
 185 * ice_vf_invalidate_vsi - invalidate vsi_idx/vsi_num to remove VSI access
 186 * @vf: VF to remove access to VSI for
 187 */
 188static void ice_vf_invalidate_vsi(struct ice_vf *vf)
 189{
 190        vf->lan_vsi_idx = ICE_NO_VSI;
 191        vf->lan_vsi_num = ICE_NO_VSI;
 192}
 193
 194/**
 195 * ice_vf_vsi_release - invalidate the VF's VSI after freeing it
 196 * @vf: invalidate this VF's VSI after freeing it
 197 */
 198static void ice_vf_vsi_release(struct ice_vf *vf)
 199{
 200        ice_vsi_release(vf->pf->vsi[vf->lan_vsi_idx]);
 201        ice_vf_invalidate_vsi(vf);
 202}
 203
 204/**
 205 * ice_free_vf_res - Free a VF's resources
 206 * @vf: pointer to the VF info
 207 */
 208static void ice_free_vf_res(struct ice_vf *vf)
 209{
 210        struct ice_pf *pf = vf->pf;
 211        int i, last_vector_idx;
 212
 213        /* First, disable VF's configuration API to prevent OS from
 214         * accessing the VF's VSI after it's freed or invalidated.
 215         */
 216        clear_bit(ICE_VF_STATE_INIT, vf->vf_states);
 217
 218        /* free VSI and disconnect it from the parent uplink */
 219        if (vf->lan_vsi_idx != ICE_NO_VSI) {
 220                ice_vf_vsi_release(vf);
 221                vf->num_mac = 0;
 222        }
 223
 224        last_vector_idx = vf->first_vector_idx + pf->num_msix_per_vf - 1;
 225
 226        /* clear VF MDD event information */
 227        memset(&vf->mdd_tx_events, 0, sizeof(vf->mdd_tx_events));
 228        memset(&vf->mdd_rx_events, 0, sizeof(vf->mdd_rx_events));
 229
 230        /* Disable interrupts so that VF starts in a known state */
 231        for (i = vf->first_vector_idx; i <= last_vector_idx; i++) {
 232                wr32(&pf->hw, GLINT_DYN_CTL(i), GLINT_DYN_CTL_CLEARPBA_M);
 233                ice_flush(&pf->hw);
 234        }
 235        /* reset some of the state variables keeping track of the resources */
 236        clear_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states);
 237        clear_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states);
 238}
 239
 240/**
 241 * ice_dis_vf_mappings
 242 * @vf: pointer to the VF structure
 243 */
 244static void ice_dis_vf_mappings(struct ice_vf *vf)
 245{
 246        struct ice_pf *pf = vf->pf;
 247        struct ice_vsi *vsi;
 248        struct device *dev;
 249        int first, last, v;
 250        struct ice_hw *hw;
 251
 252        hw = &pf->hw;
 253        vsi = pf->vsi[vf->lan_vsi_idx];
 254
 255        dev = ice_pf_to_dev(pf);
 256        wr32(hw, VPINT_ALLOC(vf->vf_id), 0);
 257        wr32(hw, VPINT_ALLOC_PCI(vf->vf_id), 0);
 258
 259        first = vf->first_vector_idx;
 260        last = first + pf->num_msix_per_vf - 1;
 261        for (v = first; v <= last; v++) {
 262                u32 reg;
 263
 264                reg = (((1 << GLINT_VECT2FUNC_IS_PF_S) &
 265                        GLINT_VECT2FUNC_IS_PF_M) |
 266                       ((hw->pf_id << GLINT_VECT2FUNC_PF_NUM_S) &
 267                        GLINT_VECT2FUNC_PF_NUM_M));
 268                wr32(hw, GLINT_VECT2FUNC(v), reg);
 269        }
 270
 271        if (vsi->tx_mapping_mode == ICE_VSI_MAP_CONTIG)
 272                wr32(hw, VPLAN_TX_QBASE(vf->vf_id), 0);
 273        else
 274                dev_err(dev, "Scattered mode for VF Tx queues is not yet implemented\n");
 275
 276        if (vsi->rx_mapping_mode == ICE_VSI_MAP_CONTIG)
 277                wr32(hw, VPLAN_RX_QBASE(vf->vf_id), 0);
 278        else
 279                dev_err(dev, "Scattered mode for VF Rx queues is not yet implemented\n");
 280}
 281
 282/**
 283 * ice_sriov_free_msix_res - Reset/free any used MSIX resources
 284 * @pf: pointer to the PF structure
 285 *
 286 * Since no MSIX entries are taken from the pf->irq_tracker then just clear
 287 * the pf->sriov_base_vector.
 288 *
 289 * Returns 0 on success, and -EINVAL on error.
 290 */
 291static int ice_sriov_free_msix_res(struct ice_pf *pf)
 292{
 293        struct ice_res_tracker *res;
 294
 295        if (!pf)
 296                return -EINVAL;
 297
 298        res = pf->irq_tracker;
 299        if (!res)
 300                return -EINVAL;
 301
 302        /* give back irq_tracker resources used */
 303        WARN_ON(pf->sriov_base_vector < res->num_entries);
 304
 305        pf->sriov_base_vector = 0;
 306
 307        return 0;
 308}
 309
 310/**
 311 * ice_set_vf_state_qs_dis - Set VF queues state to disabled
 312 * @vf: pointer to the VF structure
 313 */
 314void ice_set_vf_state_qs_dis(struct ice_vf *vf)
 315{
 316        /* Clear Rx/Tx enabled queues flag */
 317        bitmap_zero(vf->txq_ena, ICE_MAX_RSS_QS_PER_VF);
 318        bitmap_zero(vf->rxq_ena, ICE_MAX_RSS_QS_PER_VF);
 319        clear_bit(ICE_VF_STATE_QS_ENA, vf->vf_states);
 320}
 321
 322/**
 323 * ice_dis_vf_qs - Disable the VF queues
 324 * @vf: pointer to the VF structure
 325 */
 326static void ice_dis_vf_qs(struct ice_vf *vf)
 327{
 328        struct ice_pf *pf = vf->pf;
 329        struct ice_vsi *vsi;
 330
 331        vsi = pf->vsi[vf->lan_vsi_idx];
 332
 333        ice_vsi_stop_lan_tx_rings(vsi, ICE_NO_RESET, vf->vf_id);
 334        ice_vsi_stop_all_rx_rings(vsi);
 335        ice_set_vf_state_qs_dis(vf);
 336}
 337
 338/**
 339 * ice_free_vfs - Free all VFs
 340 * @pf: pointer to the PF structure
 341 */
 342void ice_free_vfs(struct ice_pf *pf)
 343{
 344        struct device *dev = ice_pf_to_dev(pf);
 345        struct ice_hw *hw = &pf->hw;
 346        unsigned int tmp, i;
 347
 348        if (!pf->vf)
 349                return;
 350
 351        while (test_and_set_bit(__ICE_VF_DIS, pf->state))
 352                usleep_range(1000, 2000);
 353
 354        /* Disable IOV before freeing resources. This lets any VF drivers
 355         * running in the host get themselves cleaned up before we yank
 356         * the carpet out from underneath their feet.
 357         */
 358        if (!pci_vfs_assigned(pf->pdev))
 359                pci_disable_sriov(pf->pdev);
 360        else
 361                dev_warn(dev, "VFs are assigned - not disabling SR-IOV\n");
 362
 363        /* Avoid wait time by stopping all VFs at the same time */
 364        ice_for_each_vf(pf, i)
 365                if (test_bit(ICE_VF_STATE_QS_ENA, pf->vf[i].vf_states))
 366                        ice_dis_vf_qs(&pf->vf[i]);
 367
 368        tmp = pf->num_alloc_vfs;
 369        pf->num_qps_per_vf = 0;
 370        pf->num_alloc_vfs = 0;
 371        for (i = 0; i < tmp; i++) {
 372                if (test_bit(ICE_VF_STATE_INIT, pf->vf[i].vf_states)) {
 373                        /* disable VF qp mappings and set VF disable state */
 374                        ice_dis_vf_mappings(&pf->vf[i]);
 375                        set_bit(ICE_VF_STATE_DIS, pf->vf[i].vf_states);
 376                        ice_free_vf_res(&pf->vf[i]);
 377                }
 378        }
 379
 380        if (ice_sriov_free_msix_res(pf))
 381                dev_err(dev, "Failed to free MSIX resources used by SR-IOV\n");
 382
 383        devm_kfree(dev, pf->vf);
 384        pf->vf = NULL;
 385
 386        /* This check is for when the driver is unloaded while VFs are
 387         * assigned. Setting the number of VFs to 0 through sysfs is caught
 388         * before this function ever gets called.
 389         */
 390        if (!pci_vfs_assigned(pf->pdev)) {
 391                unsigned int vf_id;
 392
 393                /* Acknowledge VFLR for all VFs. Without this, VFs will fail to
 394                 * work correctly when SR-IOV gets re-enabled.
 395                 */
 396                for (vf_id = 0; vf_id < tmp; vf_id++) {
 397                        u32 reg_idx, bit_idx;
 398
 399                        reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32;
 400                        bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32;
 401                        wr32(hw, GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx));
 402                }
 403        }
 404        clear_bit(__ICE_VF_DIS, pf->state);
 405        clear_bit(ICE_FLAG_SRIOV_ENA, pf->flags);
 406}
 407
 408/**
 409 * ice_trigger_vf_reset - Reset a VF on HW
 410 * @vf: pointer to the VF structure
 411 * @is_vflr: true if VFLR was issued, false if not
 412 * @is_pfr: true if the reset was triggered due to a previous PFR
 413 *
 414 * Trigger hardware to start a reset for a particular VF. Expects the caller
 415 * to wait the proper amount of time to allow hardware to reset the VF before
 416 * it cleans up and restores VF functionality.
 417 */
 418static void ice_trigger_vf_reset(struct ice_vf *vf, bool is_vflr, bool is_pfr)
 419{
 420        struct ice_pf *pf = vf->pf;
 421        u32 reg, reg_idx, bit_idx;
 422        unsigned int vf_abs_id, i;
 423        struct device *dev;
 424        struct ice_hw *hw;
 425
 426        dev = ice_pf_to_dev(pf);
 427        hw = &pf->hw;
 428        vf_abs_id = vf->vf_id + hw->func_caps.vf_base_id;
 429
 430        /* Inform VF that it is no longer active, as a warning */
 431        clear_bit(ICE_VF_STATE_ACTIVE, vf->vf_states);
 432
 433        /* Disable VF's configuration API during reset. The flag is re-enabled
 434         * when it's safe again to access VF's VSI.
 435         */
 436        clear_bit(ICE_VF_STATE_INIT, vf->vf_states);
 437
 438        /* VF_MBX_ARQLEN is cleared by PFR, so the driver needs to clear it
 439         * in the case of VFR. If this is done for PFR, it can mess up VF
 440         * resets because the VF driver may already have started cleanup
 441         * by the time we get here.
 442         */
 443        if (!is_pfr)
 444                wr32(hw, VF_MBX_ARQLEN(vf->vf_id), 0);
 445
 446        /* In the case of a VFLR, the HW has already reset the VF and we
 447         * just need to clean up, so don't hit the VFRTRIG register.
 448         */
 449        if (!is_vflr) {
 450                /* reset VF using VPGEN_VFRTRIG reg */
 451                reg = rd32(hw, VPGEN_VFRTRIG(vf->vf_id));
 452                reg |= VPGEN_VFRTRIG_VFSWR_M;
 453                wr32(hw, VPGEN_VFRTRIG(vf->vf_id), reg);
 454        }
 455        /* clear the VFLR bit in GLGEN_VFLRSTAT */
 456        reg_idx = (vf_abs_id) / 32;
 457        bit_idx = (vf_abs_id) % 32;
 458        wr32(hw, GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx));
 459        ice_flush(hw);
 460
 461        wr32(hw, PF_PCI_CIAA,
 462             VF_DEVICE_STATUS | (vf_abs_id << PF_PCI_CIAA_VF_NUM_S));
 463        for (i = 0; i < ICE_PCI_CIAD_WAIT_COUNT; i++) {
 464                reg = rd32(hw, PF_PCI_CIAD);
 465                /* no transactions pending so stop polling */
 466                if ((reg & VF_TRANS_PENDING_M) == 0)
 467                        break;
 468
 469                dev_err(dev, "VF %u PCI transactions stuck\n", vf->vf_id);
 470                udelay(ICE_PCI_CIAD_WAIT_DELAY_US);
 471        }
 472}
 473
 474/**
 475 * ice_vsi_manage_pvid - Enable or disable port VLAN for VSI
 476 * @vsi: the VSI to update
 477 * @pvid_info: VLAN ID and QoS used to set the PVID VSI context field
 478 * @enable: true for enable PVID false for disable
 479 */
 480static int ice_vsi_manage_pvid(struct ice_vsi *vsi, u16 pvid_info, bool enable)
 481{
 482        struct ice_hw *hw = &vsi->back->hw;
 483        struct ice_aqc_vsi_props *info;
 484        struct ice_vsi_ctx *ctxt;
 485        enum ice_status status;
 486        int ret = 0;
 487
 488        ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
 489        if (!ctxt)
 490                return -ENOMEM;
 491
 492        ctxt->info = vsi->info;
 493        info = &ctxt->info;
 494        if (enable) {
 495                info->vlan_flags = ICE_AQ_VSI_VLAN_MODE_UNTAGGED |
 496                        ICE_AQ_VSI_PVLAN_INSERT_PVID |
 497                        ICE_AQ_VSI_VLAN_EMOD_STR;
 498                info->sw_flags2 |= ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
 499        } else {
 500                info->vlan_flags = ICE_AQ_VSI_VLAN_EMOD_NOTHING |
 501                        ICE_AQ_VSI_VLAN_MODE_ALL;
 502                info->sw_flags2 &= ~ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
 503        }
 504
 505        info->pvid = cpu_to_le16(pvid_info);
 506        info->valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID |
 507                                           ICE_AQ_VSI_PROP_SW_VALID);
 508
 509        status = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
 510        if (status) {
 511                dev_info(ice_hw_to_dev(hw), "update VSI for port VLAN failed, err %s aq_err %s\n",
 512                         ice_stat_str(status),
 513                         ice_aq_str(hw->adminq.sq_last_status));
 514                ret = -EIO;
 515                goto out;
 516        }
 517
 518        vsi->info.vlan_flags = info->vlan_flags;
 519        vsi->info.sw_flags2 = info->sw_flags2;
 520        vsi->info.pvid = info->pvid;
 521out:
 522        kfree(ctxt);
 523        return ret;
 524}
 525
 526/**
 527 * ice_vf_get_port_info - Get the VF's port info structure
 528 * @vf: VF used to get the port info structure for
 529 */
 530static struct ice_port_info *ice_vf_get_port_info(struct ice_vf *vf)
 531{
 532        return vf->pf->hw.port_info;
 533}
 534
 535/**
 536 * ice_vf_vsi_setup - Set up a VF VSI
 537 * @vf: VF to setup VSI for
 538 *
 539 * Returns pointer to the successfully allocated VSI struct on success,
 540 * otherwise returns NULL on failure.
 541 */
 542static struct ice_vsi *ice_vf_vsi_setup(struct ice_vf *vf)
 543{
 544        struct ice_port_info *pi = ice_vf_get_port_info(vf);
 545        struct ice_pf *pf = vf->pf;
 546        struct ice_vsi *vsi;
 547
 548        vsi = ice_vsi_setup(pf, pi, ICE_VSI_VF, vf->vf_id);
 549
 550        if (!vsi) {
 551                dev_err(ice_pf_to_dev(pf), "Failed to create VF VSI\n");
 552                ice_vf_invalidate_vsi(vf);
 553                return NULL;
 554        }
 555
 556        vf->lan_vsi_idx = vsi->idx;
 557        vf->lan_vsi_num = vsi->vsi_num;
 558
 559        return vsi;
 560}
 561
 562/**
 563 * ice_calc_vf_first_vector_idx - Calculate MSIX vector index in the PF space
 564 * @pf: pointer to PF structure
 565 * @vf: pointer to VF that the first MSIX vector index is being calculated for
 566 *
 567 * This returns the first MSIX vector index in PF space that is used by this VF.
 568 * This index is used when accessing PF relative registers such as
 569 * GLINT_VECT2FUNC and GLINT_DYN_CTL.
 570 * This will always be the OICR index in the AVF driver so any functionality
 571 * using vf->first_vector_idx for queue configuration will have to increment by
 572 * 1 to avoid meddling with the OICR index.
 573 */
 574static int ice_calc_vf_first_vector_idx(struct ice_pf *pf, struct ice_vf *vf)
 575{
 576        return pf->sriov_base_vector + vf->vf_id * pf->num_msix_per_vf;
 577}
 578
 579/**
 580 * ice_vf_rebuild_host_vlan_cfg - add VLAN 0 filter or rebuild the Port VLAN
 581 * @vf: VF to add MAC filters for
 582 *
 583 * Called after a VF VSI has been re-added/rebuilt during reset. The PF driver
 584 * always re-adds either a VLAN 0 or port VLAN based filter after reset.
 585 */
 586static int ice_vf_rebuild_host_vlan_cfg(struct ice_vf *vf)
 587{
 588        struct ice_vsi *vsi = vf->pf->vsi[vf->lan_vsi_idx];
 589        struct device *dev = ice_pf_to_dev(vf->pf);
 590        u16 vlan_id = 0;
 591        int err;
 592
 593        if (vf->port_vlan_info) {
 594                err = ice_vsi_manage_pvid(vsi, vf->port_vlan_info, true);
 595                if (err) {
 596                        dev_err(dev, "failed to configure port VLAN via VSI parameters for VF %u, error %d\n",
 597                                vf->vf_id, err);
 598                        return err;
 599                }
 600
 601                vlan_id = vf->port_vlan_info & VLAN_VID_MASK;
 602        }
 603
 604        /* vlan_id will either be 0 or the port VLAN number */
 605        err = ice_vsi_add_vlan(vsi, vlan_id, ICE_FWD_TO_VSI);
 606        if (err) {
 607                dev_err(dev, "failed to add %s VLAN %u filter for VF %u, error %d\n",
 608                        vf->port_vlan_info ? "port" : "", vlan_id, vf->vf_id,
 609                        err);
 610                return err;
 611        }
 612
 613        return 0;
 614}
 615
 616/**
 617 * ice_vf_rebuild_host_mac_cfg - add broadcast and the VF's perm_addr/LAA
 618 * @vf: VF to add MAC filters for
 619 *
 620 * Called after a VF VSI has been re-added/rebuilt during reset. The PF driver
 621 * always re-adds a broadcast filter and the VF's perm_addr/LAA after reset.
 622 */
 623static int ice_vf_rebuild_host_mac_cfg(struct ice_vf *vf)
 624{
 625        struct ice_vsi *vsi = vf->pf->vsi[vf->lan_vsi_idx];
 626        struct device *dev = ice_pf_to_dev(vf->pf);
 627        enum ice_status status;
 628        u8 broadcast[ETH_ALEN];
 629
 630        eth_broadcast_addr(broadcast);
 631        status = ice_fltr_add_mac(vsi, broadcast, ICE_FWD_TO_VSI);
 632        if (status) {
 633                dev_err(dev, "failed to add broadcast MAC filter for VF %u, error %s\n",
 634                        vf->vf_id, ice_stat_str(status));
 635                return ice_status_to_errno(status);
 636        }
 637
 638        vf->num_mac++;
 639
 640        if (is_valid_ether_addr(vf->dflt_lan_addr.addr)) {
 641                status = ice_fltr_add_mac(vsi, vf->dflt_lan_addr.addr,
 642                                          ICE_FWD_TO_VSI);
 643                if (status) {
 644                        dev_err(dev, "failed to add default unicast MAC filter %pM for VF %u, error %s\n",
 645                                &vf->dflt_lan_addr.addr[0], vf->vf_id,
 646                                ice_stat_str(status));
 647                        return ice_status_to_errno(status);
 648                }
 649                vf->num_mac++;
 650        }
 651
 652        return 0;
 653}
 654
 655/**
 656 * ice_vf_set_host_trust_cfg - set trust setting based on pre-reset value
 657 * @vf: VF to configure trust setting for
 658 */
 659static void ice_vf_set_host_trust_cfg(struct ice_vf *vf)
 660{
 661        if (vf->trusted)
 662                set_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
 663        else
 664                clear_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
 665}
 666
 667/**
 668 * ice_ena_vf_msix_mappings - enable VF MSIX mappings in hardware
 669 * @vf: VF to enable MSIX mappings for
 670 *
 671 * Some of the registers need to be indexed/configured using hardware global
 672 * device values and other registers need 0-based values, which represent PF
 673 * based values.
 674 */
 675static void ice_ena_vf_msix_mappings(struct ice_vf *vf)
 676{
 677        int device_based_first_msix, device_based_last_msix;
 678        int pf_based_first_msix, pf_based_last_msix, v;
 679        struct ice_pf *pf = vf->pf;
 680        int device_based_vf_id;
 681        struct ice_hw *hw;
 682        u32 reg;
 683
 684        hw = &pf->hw;
 685        pf_based_first_msix = vf->first_vector_idx;
 686        pf_based_last_msix = (pf_based_first_msix + pf->num_msix_per_vf) - 1;
 687
 688        device_based_first_msix = pf_based_first_msix +
 689                pf->hw.func_caps.common_cap.msix_vector_first_id;
 690        device_based_last_msix =
 691                (device_based_first_msix + pf->num_msix_per_vf) - 1;
 692        device_based_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
 693
 694        reg = (((device_based_first_msix << VPINT_ALLOC_FIRST_S) &
 695                VPINT_ALLOC_FIRST_M) |
 696               ((device_based_last_msix << VPINT_ALLOC_LAST_S) &
 697                VPINT_ALLOC_LAST_M) | VPINT_ALLOC_VALID_M);
 698        wr32(hw, VPINT_ALLOC(vf->vf_id), reg);
 699
 700        reg = (((device_based_first_msix << VPINT_ALLOC_PCI_FIRST_S)
 701                 & VPINT_ALLOC_PCI_FIRST_M) |
 702               ((device_based_last_msix << VPINT_ALLOC_PCI_LAST_S) &
 703                VPINT_ALLOC_PCI_LAST_M) | VPINT_ALLOC_PCI_VALID_M);
 704        wr32(hw, VPINT_ALLOC_PCI(vf->vf_id), reg);
 705
 706        /* map the interrupts to its functions */
 707        for (v = pf_based_first_msix; v <= pf_based_last_msix; v++) {
 708                reg = (((device_based_vf_id << GLINT_VECT2FUNC_VF_NUM_S) &
 709                        GLINT_VECT2FUNC_VF_NUM_M) |
 710                       ((hw->pf_id << GLINT_VECT2FUNC_PF_NUM_S) &
 711                        GLINT_VECT2FUNC_PF_NUM_M));
 712                wr32(hw, GLINT_VECT2FUNC(v), reg);
 713        }
 714
 715        /* Map mailbox interrupt to VF MSI-X vector 0 */
 716        wr32(hw, VPINT_MBX_CTL(device_based_vf_id), VPINT_MBX_CTL_CAUSE_ENA_M);
 717}
 718
 719/**
 720 * ice_ena_vf_q_mappings - enable Rx/Tx queue mappings for a VF
 721 * @vf: VF to enable the mappings for
 722 * @max_txq: max Tx queues allowed on the VF's VSI
 723 * @max_rxq: max Rx queues allowed on the VF's VSI
 724 */
 725static void ice_ena_vf_q_mappings(struct ice_vf *vf, u16 max_txq, u16 max_rxq)
 726{
 727        struct ice_vsi *vsi = vf->pf->vsi[vf->lan_vsi_idx];
 728        struct device *dev = ice_pf_to_dev(vf->pf);
 729        struct ice_hw *hw = &vf->pf->hw;
 730        u32 reg;
 731
 732        /* set regardless of mapping mode */
 733        wr32(hw, VPLAN_TXQ_MAPENA(vf->vf_id), VPLAN_TXQ_MAPENA_TX_ENA_M);
 734
 735        /* VF Tx queues allocation */
 736        if (vsi->tx_mapping_mode == ICE_VSI_MAP_CONTIG) {
 737                /* set the VF PF Tx queue range
 738                 * VFNUMQ value should be set to (number of queues - 1). A value
 739                 * of 0 means 1 queue and a value of 255 means 256 queues
 740                 */
 741                reg = (((vsi->txq_map[0] << VPLAN_TX_QBASE_VFFIRSTQ_S) &
 742                        VPLAN_TX_QBASE_VFFIRSTQ_M) |
 743                       (((max_txq - 1) << VPLAN_TX_QBASE_VFNUMQ_S) &
 744                        VPLAN_TX_QBASE_VFNUMQ_M));
 745                wr32(hw, VPLAN_TX_QBASE(vf->vf_id), reg);
 746        } else {
 747                dev_err(dev, "Scattered mode for VF Tx queues is not yet implemented\n");
 748        }
 749
 750        /* set regardless of mapping mode */
 751        wr32(hw, VPLAN_RXQ_MAPENA(vf->vf_id), VPLAN_RXQ_MAPENA_RX_ENA_M);
 752
 753        /* VF Rx queues allocation */
 754        if (vsi->rx_mapping_mode == ICE_VSI_MAP_CONTIG) {
 755                /* set the VF PF Rx queue range
 756                 * VFNUMQ value should be set to (number of queues - 1). A value
 757                 * of 0 means 1 queue and a value of 255 means 256 queues
 758                 */
 759                reg = (((vsi->rxq_map[0] << VPLAN_RX_QBASE_VFFIRSTQ_S) &
 760                        VPLAN_RX_QBASE_VFFIRSTQ_M) |
 761                       (((max_rxq - 1) << VPLAN_RX_QBASE_VFNUMQ_S) &
 762                        VPLAN_RX_QBASE_VFNUMQ_M));
 763                wr32(hw, VPLAN_RX_QBASE(vf->vf_id), reg);
 764        } else {
 765                dev_err(dev, "Scattered mode for VF Rx queues is not yet implemented\n");
 766        }
 767}
 768
 769/**
 770 * ice_ena_vf_mappings - enable VF MSIX and queue mapping
 771 * @vf: pointer to the VF structure
 772 */
 773static void ice_ena_vf_mappings(struct ice_vf *vf)
 774{
 775        struct ice_vsi *vsi = vf->pf->vsi[vf->lan_vsi_idx];
 776
 777        ice_ena_vf_msix_mappings(vf);
 778        ice_ena_vf_q_mappings(vf, vsi->alloc_txq, vsi->alloc_rxq);
 779}
 780
 781/**
 782 * ice_determine_res
 783 * @pf: pointer to the PF structure
 784 * @avail_res: available resources in the PF structure
 785 * @max_res: maximum resources that can be given per VF
 786 * @min_res: minimum resources that can be given per VF
 787 *
 788 * Returns non-zero value if resources (queues/vectors) are available or
 789 * returns zero if PF cannot accommodate for all num_alloc_vfs.
 790 */
 791static int
 792ice_determine_res(struct ice_pf *pf, u16 avail_res, u16 max_res, u16 min_res)
 793{
 794        bool checked_min_res = false;
 795        int res;
 796
 797        /* start by checking if PF can assign max number of resources for
 798         * all num_alloc_vfs.
 799         * if yes, return number per VF
 800         * If no, divide by 2 and roundup, check again
 801         * repeat the loop till we reach a point where even minimum resources
 802         * are not available, in that case return 0
 803         */
 804        res = max_res;
 805        while ((res >= min_res) && !checked_min_res) {
 806                int num_all_res;
 807
 808                num_all_res = pf->num_alloc_vfs * res;
 809                if (num_all_res <= avail_res)
 810                        return res;
 811
 812                if (res == min_res)
 813                        checked_min_res = true;
 814
 815                res = DIV_ROUND_UP(res, 2);
 816        }
 817        return 0;
 818}
 819
 820/**
 821 * ice_calc_vf_reg_idx - Calculate the VF's register index in the PF space
 822 * @vf: VF to calculate the register index for
 823 * @q_vector: a q_vector associated to the VF
 824 */
 825int ice_calc_vf_reg_idx(struct ice_vf *vf, struct ice_q_vector *q_vector)
 826{
 827        struct ice_pf *pf;
 828
 829        if (!vf || !q_vector)
 830                return -EINVAL;
 831
 832        pf = vf->pf;
 833
 834        /* always add one to account for the OICR being the first MSIX */
 835        return pf->sriov_base_vector + pf->num_msix_per_vf * vf->vf_id +
 836                q_vector->v_idx + 1;
 837}
 838
 839/**
 840 * ice_get_max_valid_res_idx - Get the max valid resource index
 841 * @res: pointer to the resource to find the max valid index for
 842 *
 843 * Start from the end of the ice_res_tracker and return right when we find the
 844 * first res->list entry with the ICE_RES_VALID_BIT set. This function is only
 845 * valid for SR-IOV because it is the only consumer that manipulates the
 846 * res->end and this is always called when res->end is set to res->num_entries.
 847 */
 848static int ice_get_max_valid_res_idx(struct ice_res_tracker *res)
 849{
 850        int i;
 851
 852        if (!res)
 853                return -EINVAL;
 854
 855        for (i = res->num_entries - 1; i >= 0; i--)
 856                if (res->list[i] & ICE_RES_VALID_BIT)
 857                        return i;
 858
 859        return 0;
 860}
 861
 862/**
 863 * ice_sriov_set_msix_res - Set any used MSIX resources
 864 * @pf: pointer to PF structure
 865 * @num_msix_needed: number of MSIX vectors needed for all SR-IOV VFs
 866 *
 867 * This function allows SR-IOV resources to be taken from the end of the PF's
 868 * allowed HW MSIX vectors so that the irq_tracker will not be affected. We
 869 * just set the pf->sriov_base_vector and return success.
 870 *
 871 * If there are not enough resources available, return an error. This should
 872 * always be caught by ice_set_per_vf_res().
 873 *
 874 * Return 0 on success, and -EINVAL when there are not enough MSIX vectors in
 875 * in the PF's space available for SR-IOV.
 876 */
 877static int ice_sriov_set_msix_res(struct ice_pf *pf, u16 num_msix_needed)
 878{
 879        u16 total_vectors = pf->hw.func_caps.common_cap.num_msix_vectors;
 880        int vectors_used = pf->irq_tracker->num_entries;
 881        int sriov_base_vector;
 882
 883        sriov_base_vector = total_vectors - num_msix_needed;
 884
 885        /* make sure we only grab irq_tracker entries from the list end and
 886         * that we have enough available MSIX vectors
 887         */
 888        if (sriov_base_vector < vectors_used)
 889                return -EINVAL;
 890
 891        pf->sriov_base_vector = sriov_base_vector;
 892
 893        return 0;
 894}
 895
 896/**
 897 * ice_set_per_vf_res - check if vectors and queues are available
 898 * @pf: pointer to the PF structure
 899 *
 900 * First, determine HW interrupts from common pool. If we allocate fewer VFs, we
 901 * get more vectors and can enable more queues per VF. Note that this does not
 902 * grab any vectors from the SW pool already allocated. Also note, that all
 903 * vector counts include one for each VF's miscellaneous interrupt vector
 904 * (i.e. OICR).
 905 *
 906 * Minimum VFs - 2 vectors, 1 queue pair
 907 * Small VFs - 5 vectors, 4 queue pairs
 908 * Medium VFs - 17 vectors, 16 queue pairs
 909 *
 910 * Second, determine number of queue pairs per VF by starting with a pre-defined
 911 * maximum each VF supports. If this is not possible, then we adjust based on
 912 * queue pairs available on the device.
 913 *
 914 * Lastly, set queue and MSI-X VF variables tracked by the PF so it can be used
 915 * by each VF during VF initialization and reset.
 916 */
 917static int ice_set_per_vf_res(struct ice_pf *pf)
 918{
 919        int max_valid_res_idx = ice_get_max_valid_res_idx(pf->irq_tracker);
 920        int msix_avail_per_vf, msix_avail_for_sriov;
 921        struct device *dev = ice_pf_to_dev(pf);
 922        u16 num_msix_per_vf, num_txq, num_rxq;
 923
 924        if (!pf->num_alloc_vfs || max_valid_res_idx < 0)
 925                return -EINVAL;
 926
 927        /* determine MSI-X resources per VF */
 928        msix_avail_for_sriov = pf->hw.func_caps.common_cap.num_msix_vectors -
 929                pf->irq_tracker->num_entries;
 930        msix_avail_per_vf = msix_avail_for_sriov / pf->num_alloc_vfs;
 931        if (msix_avail_per_vf >= ICE_NUM_VF_MSIX_MED) {
 932                num_msix_per_vf = ICE_NUM_VF_MSIX_MED;
 933        } else if (msix_avail_per_vf >= ICE_NUM_VF_MSIX_SMALL) {
 934                num_msix_per_vf = ICE_NUM_VF_MSIX_SMALL;
 935        } else if (msix_avail_per_vf >= ICE_NUM_VF_MSIX_MULTIQ_MIN) {
 936                num_msix_per_vf = ICE_NUM_VF_MSIX_MULTIQ_MIN;
 937        } else if (msix_avail_per_vf >= ICE_MIN_INTR_PER_VF) {
 938                num_msix_per_vf = ICE_MIN_INTR_PER_VF;
 939        } else {
 940                dev_err(dev, "Only %d MSI-X interrupts available for SR-IOV. Not enough to support minimum of %d MSI-X interrupts per VF for %d VFs\n",
 941                        msix_avail_for_sriov, ICE_MIN_INTR_PER_VF,
 942                        pf->num_alloc_vfs);
 943                return -EIO;
 944        }
 945
 946        /* determine queue resources per VF */
 947        num_txq = ice_determine_res(pf, ice_get_avail_txq_count(pf),
 948                                    min_t(u16,
 949                                          num_msix_per_vf - ICE_NONQ_VECS_VF,
 950                                          ICE_MAX_RSS_QS_PER_VF),
 951                                    ICE_MIN_QS_PER_VF);
 952
 953        num_rxq = ice_determine_res(pf, ice_get_avail_rxq_count(pf),
 954                                    min_t(u16,
 955                                          num_msix_per_vf - ICE_NONQ_VECS_VF,
 956                                          ICE_MAX_RSS_QS_PER_VF),
 957                                    ICE_MIN_QS_PER_VF);
 958
 959        if (!num_txq || !num_rxq) {
 960                dev_err(dev, "Not enough queues to support minimum of %d queue pairs per VF for %d VFs\n",
 961                        ICE_MIN_QS_PER_VF, pf->num_alloc_vfs);
 962                return -EIO;
 963        }
 964
 965        if (ice_sriov_set_msix_res(pf, num_msix_per_vf * pf->num_alloc_vfs)) {
 966                dev_err(dev, "Unable to set MSI-X resources for %d VFs\n",
 967                        pf->num_alloc_vfs);
 968                return -EINVAL;
 969        }
 970
 971        /* only allow equal Tx/Rx queue count (i.e. queue pairs) */
 972        pf->num_qps_per_vf = min_t(int, num_txq, num_rxq);
 973        pf->num_msix_per_vf = num_msix_per_vf;
 974        dev_info(dev, "Enabling %d VFs with %d vectors and %d queues per VF\n",
 975                 pf->num_alloc_vfs, pf->num_msix_per_vf, pf->num_qps_per_vf);
 976
 977        return 0;
 978}
 979
 980/**
 981 * ice_clear_vf_reset_trigger - enable VF to access hardware
 982 * @vf: VF to enabled hardware access for
 983 */
 984static void ice_clear_vf_reset_trigger(struct ice_vf *vf)
 985{
 986        struct ice_hw *hw = &vf->pf->hw;
 987        u32 reg;
 988
 989        reg = rd32(hw, VPGEN_VFRTRIG(vf->vf_id));
 990        reg &= ~VPGEN_VFRTRIG_VFSWR_M;
 991        wr32(hw, VPGEN_VFRTRIG(vf->vf_id), reg);
 992        ice_flush(hw);
 993}
 994
 995/**
 996 * ice_vf_set_vsi_promisc - set given VF VSI to given promiscuous mode(s)
 997 * @vf: pointer to the VF info
 998 * @vsi: the VSI being configured
 999 * @promisc_m: mask of promiscuous config bits
1000 * @rm_promisc: promisc flag request from the VF to remove or add filter
1001 *
1002 * This function configures VF VSI promiscuous mode, based on the VF requests,
1003 * for Unicast, Multicast and VLAN
1004 */
1005static enum ice_status
1006ice_vf_set_vsi_promisc(struct ice_vf *vf, struct ice_vsi *vsi, u8 promisc_m,
1007                       bool rm_promisc)
1008{
1009        struct ice_pf *pf = vf->pf;
1010        enum ice_status status = 0;
1011        struct ice_hw *hw;
1012
1013        hw = &pf->hw;
1014        if (vsi->num_vlan) {
1015                status = ice_set_vlan_vsi_promisc(hw, vsi->idx, promisc_m,
1016                                                  rm_promisc);
1017        } else if (vf->port_vlan_info) {
1018                if (rm_promisc)
1019                        status = ice_clear_vsi_promisc(hw, vsi->idx, promisc_m,
1020                                                       vf->port_vlan_info);
1021                else
1022                        status = ice_set_vsi_promisc(hw, vsi->idx, promisc_m,
1023                                                     vf->port_vlan_info);
1024        } else {
1025                if (rm_promisc)
1026                        status = ice_clear_vsi_promisc(hw, vsi->idx, promisc_m,
1027                                                       0);
1028                else
1029                        status = ice_set_vsi_promisc(hw, vsi->idx, promisc_m,
1030                                                     0);
1031        }
1032
1033        return status;
1034}
1035
1036static void ice_vf_clear_counters(struct ice_vf *vf)
1037{
1038        struct ice_vsi *vsi = vf->pf->vsi[vf->lan_vsi_idx];
1039
1040        vf->num_mac = 0;
1041        vsi->num_vlan = 0;
1042        memset(&vf->mdd_tx_events, 0, sizeof(vf->mdd_tx_events));
1043        memset(&vf->mdd_rx_events, 0, sizeof(vf->mdd_rx_events));
1044}
1045
1046/**
1047 * ice_vf_pre_vsi_rebuild - tasks to be done prior to VSI rebuild
1048 * @vf: VF to perform pre VSI rebuild tasks
1049 *
1050 * These tasks are items that don't need to be amortized since they are most
1051 * likely called in a for loop with all VF(s) in the reset_all_vfs() case.
1052 */
1053static void ice_vf_pre_vsi_rebuild(struct ice_vf *vf)
1054{
1055        ice_vf_clear_counters(vf);
1056        ice_clear_vf_reset_trigger(vf);
1057}
1058
1059/**
1060 * ice_vf_rebuild_host_cfg - host admin configuration is persistent across reset
1061 * @vf: VF to rebuild host configuration on
1062 */
1063static void ice_vf_rebuild_host_cfg(struct ice_vf *vf)
1064{
1065        struct device *dev = ice_pf_to_dev(vf->pf);
1066
1067        ice_vf_set_host_trust_cfg(vf);
1068
1069        if (ice_vf_rebuild_host_mac_cfg(vf))
1070                dev_err(dev, "failed to rebuild default MAC configuration for VF %d\n",
1071                        vf->vf_id);
1072
1073        if (ice_vf_rebuild_host_vlan_cfg(vf))
1074                dev_err(dev, "failed to rebuild VLAN configuration for VF %u\n",
1075                        vf->vf_id);
1076}
1077
1078/**
1079 * ice_vf_rebuild_vsi_with_release - release and setup the VF's VSI
1080 * @vf: VF to release and setup the VSI for
1081 *
1082 * This is only called when a single VF is being reset (i.e. VFR, VFLR, host VF
1083 * configuration change, etc.).
1084 */
1085static int ice_vf_rebuild_vsi_with_release(struct ice_vf *vf)
1086{
1087        ice_vf_vsi_release(vf);
1088        if (!ice_vf_vsi_setup(vf))
1089                return -ENOMEM;
1090
1091        return 0;
1092}
1093
1094/**
1095 * ice_vf_rebuild_vsi - rebuild the VF's VSI
1096 * @vf: VF to rebuild the VSI for
1097 *
1098 * This is only called when all VF(s) are being reset (i.e. PCIe Reset on the
1099 * host, PFR, CORER, etc.).
1100 */
1101static int ice_vf_rebuild_vsi(struct ice_vf *vf)
1102{
1103        struct ice_pf *pf = vf->pf;
1104        struct ice_vsi *vsi;
1105
1106        vsi = pf->vsi[vf->lan_vsi_idx];
1107
1108        if (ice_vsi_rebuild(vsi, true)) {
1109                dev_err(ice_pf_to_dev(pf), "failed to rebuild VF %d VSI\n",
1110                        vf->vf_id);
1111                return -EIO;
1112        }
1113        /* vsi->idx will remain the same in this case so don't update
1114         * vf->lan_vsi_idx
1115         */
1116        vsi->vsi_num = ice_get_hw_vsi_num(&pf->hw, vsi->idx);
1117        vf->lan_vsi_num = vsi->vsi_num;
1118
1119        return 0;
1120}
1121
1122/**
1123 * ice_vf_set_initialized - VF is ready for VIRTCHNL communication
1124 * @vf: VF to set in initialized state
1125 *
1126 * After this function the VF will be ready to receive/handle the
1127 * VIRTCHNL_OP_GET_VF_RESOURCES message
1128 */
1129static void ice_vf_set_initialized(struct ice_vf *vf)
1130{
1131        ice_set_vf_state_qs_dis(vf);
1132        clear_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states);
1133        clear_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states);
1134        clear_bit(ICE_VF_STATE_DIS, vf->vf_states);
1135        set_bit(ICE_VF_STATE_INIT, vf->vf_states);
1136}
1137
1138/**
1139 * ice_vf_post_vsi_rebuild - tasks to do after the VF's VSI have been rebuilt
1140 * @vf: VF to perform tasks on
1141 */
1142static void ice_vf_post_vsi_rebuild(struct ice_vf *vf)
1143{
1144        struct ice_pf *pf = vf->pf;
1145        struct ice_hw *hw;
1146
1147        hw = &pf->hw;
1148
1149        ice_vf_rebuild_host_cfg(vf);
1150
1151        ice_vf_set_initialized(vf);
1152        ice_ena_vf_mappings(vf);
1153        wr32(hw, VFGEN_RSTAT(vf->vf_id), VIRTCHNL_VFR_VFACTIVE);
1154}
1155
1156/**
1157 * ice_reset_all_vfs - reset all allocated VFs in one go
1158 * @pf: pointer to the PF structure
1159 * @is_vflr: true if VFLR was issued, false if not
1160 *
1161 * First, tell the hardware to reset each VF, then do all the waiting in one
1162 * chunk, and finally finish restoring each VF after the wait. This is useful
1163 * during PF routines which need to reset all VFs, as otherwise it must perform
1164 * these resets in a serialized fashion.
1165 *
1166 * Returns true if any VFs were reset, and false otherwise.
1167 */
1168bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr)
1169{
1170        struct device *dev = ice_pf_to_dev(pf);
1171        struct ice_hw *hw = &pf->hw;
1172        struct ice_vf *vf;
1173        int v, i;
1174
1175        /* If we don't have any VFs, then there is nothing to reset */
1176        if (!pf->num_alloc_vfs)
1177                return false;
1178
1179        /* If VFs have been disabled, there is no need to reset */
1180        if (test_and_set_bit(__ICE_VF_DIS, pf->state))
1181                return false;
1182
1183        /* Begin reset on all VFs at once */
1184        ice_for_each_vf(pf, v)
1185                ice_trigger_vf_reset(&pf->vf[v], is_vflr, true);
1186
1187        /* HW requires some time to make sure it can flush the FIFO for a VF
1188         * when it resets it. Poll the VPGEN_VFRSTAT register for each VF in
1189         * sequence to make sure that it has completed. We'll keep track of
1190         * the VFs using a simple iterator that increments once that VF has
1191         * finished resetting.
1192         */
1193        for (i = 0, v = 0; i < 10 && v < pf->num_alloc_vfs; i++) {
1194                /* Check each VF in sequence */
1195                while (v < pf->num_alloc_vfs) {
1196                        u32 reg;
1197
1198                        vf = &pf->vf[v];
1199                        reg = rd32(hw, VPGEN_VFRSTAT(vf->vf_id));
1200                        if (!(reg & VPGEN_VFRSTAT_VFRD_M)) {
1201                                /* only delay if the check failed */
1202                                usleep_range(10, 20);
1203                                break;
1204                        }
1205
1206                        /* If the current VF has finished resetting, move on
1207                         * to the next VF in sequence.
1208                         */
1209                        v++;
1210                }
1211        }
1212
1213        /* Display a warning if at least one VF didn't manage to reset in
1214         * time, but continue on with the operation.
1215         */
1216        if (v < pf->num_alloc_vfs)
1217                dev_warn(dev, "VF reset check timeout\n");
1218
1219        /* free VF resources to begin resetting the VSI state */
1220        ice_for_each_vf(pf, v) {
1221                vf = &pf->vf[v];
1222
1223                ice_vf_pre_vsi_rebuild(vf);
1224                ice_vf_rebuild_vsi(vf);
1225                ice_vf_post_vsi_rebuild(vf);
1226        }
1227
1228        ice_flush(hw);
1229        clear_bit(__ICE_VF_DIS, pf->state);
1230
1231        return true;
1232}
1233
1234/**
1235 * ice_is_vf_disabled
1236 * @vf: pointer to the VF info
1237 *
1238 * Returns true if the PF or VF is disabled, false otherwise.
1239 */
1240static bool ice_is_vf_disabled(struct ice_vf *vf)
1241{
1242        struct ice_pf *pf = vf->pf;
1243
1244        /* If the PF has been disabled, there is no need resetting VF until
1245         * PF is active again. Similarly, if the VF has been disabled, this
1246         * means something else is resetting the VF, so we shouldn't continue.
1247         * Otherwise, set disable VF state bit for actual reset, and continue.
1248         */
1249        return (test_bit(__ICE_VF_DIS, pf->state) ||
1250                test_bit(ICE_VF_STATE_DIS, vf->vf_states));
1251}
1252
1253/**
1254 * ice_reset_vf - Reset a particular VF
1255 * @vf: pointer to the VF structure
1256 * @is_vflr: true if VFLR was issued, false if not
1257 *
1258 * Returns true if the VF is currently in reset, resets successfully, or resets
1259 * are disabled and false otherwise.
1260 */
1261bool ice_reset_vf(struct ice_vf *vf, bool is_vflr)
1262{
1263        struct ice_pf *pf = vf->pf;
1264        struct ice_vsi *vsi;
1265        struct device *dev;
1266        struct ice_hw *hw;
1267        bool rsd = false;
1268        u8 promisc_m;
1269        u32 reg;
1270        int i;
1271
1272        dev = ice_pf_to_dev(pf);
1273
1274        if (test_bit(__ICE_VF_RESETS_DISABLED, pf->state)) {
1275                dev_dbg(dev, "Trying to reset VF %d, but all VF resets are disabled\n",
1276                        vf->vf_id);
1277                return true;
1278        }
1279
1280        if (ice_is_vf_disabled(vf)) {
1281                dev_dbg(dev, "VF is already disabled, there is no need for resetting it, telling VM, all is fine %d\n",
1282                        vf->vf_id);
1283                return true;
1284        }
1285
1286        /* Set VF disable bit state here, before triggering reset */
1287        set_bit(ICE_VF_STATE_DIS, vf->vf_states);
1288        ice_trigger_vf_reset(vf, is_vflr, false);
1289
1290        vsi = pf->vsi[vf->lan_vsi_idx];
1291
1292        if (test_bit(ICE_VF_STATE_QS_ENA, vf->vf_states))
1293                ice_dis_vf_qs(vf);
1294
1295        /* Call Disable LAN Tx queue AQ whether or not queues are
1296         * enabled. This is needed for successful completion of VFR.
1297         */
1298        ice_dis_vsi_txq(vsi->port_info, vsi->idx, 0, 0, NULL, NULL,
1299                        NULL, ICE_VF_RESET, vf->vf_id, NULL);
1300
1301        hw = &pf->hw;
1302        /* poll VPGEN_VFRSTAT reg to make sure
1303         * that reset is complete
1304         */
1305        for (i = 0; i < 10; i++) {
1306                /* VF reset requires driver to first reset the VF and then
1307                 * poll the status register to make sure that the reset
1308                 * completed successfully.
1309                 */
1310                reg = rd32(hw, VPGEN_VFRSTAT(vf->vf_id));
1311                if (reg & VPGEN_VFRSTAT_VFRD_M) {
1312                        rsd = true;
1313                        break;
1314                }
1315
1316                /* only sleep if the reset is not done */
1317                usleep_range(10, 20);
1318        }
1319
1320        /* Display a warning if VF didn't manage to reset in time, but need to
1321         * continue on with the operation.
1322         */
1323        if (!rsd)
1324                dev_warn(dev, "VF reset check timeout on VF %d\n", vf->vf_id);
1325
1326        /* disable promiscuous modes in case they were enabled
1327         * ignore any error if disabling process failed
1328         */
1329        if (test_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states) ||
1330            test_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states)) {
1331                if (vf->port_vlan_info || vsi->num_vlan)
1332                        promisc_m = ICE_UCAST_VLAN_PROMISC_BITS;
1333                else
1334                        promisc_m = ICE_UCAST_PROMISC_BITS;
1335
1336                vsi = pf->vsi[vf->lan_vsi_idx];
1337                if (ice_vf_set_vsi_promisc(vf, vsi, promisc_m, true))
1338                        dev_err(dev, "disabling promiscuous mode failed\n");
1339        }
1340
1341        ice_vf_pre_vsi_rebuild(vf);
1342        ice_vf_rebuild_vsi_with_release(vf);
1343        ice_vf_post_vsi_rebuild(vf);
1344
1345        return true;
1346}
1347
1348/**
1349 * ice_vc_notify_link_state - Inform all VFs on a PF of link status
1350 * @pf: pointer to the PF structure
1351 */
1352void ice_vc_notify_link_state(struct ice_pf *pf)
1353{
1354        int i;
1355
1356        ice_for_each_vf(pf, i)
1357                ice_vc_notify_vf_link_state(&pf->vf[i]);
1358}
1359
1360/**
1361 * ice_vc_notify_reset - Send pending reset message to all VFs
1362 * @pf: pointer to the PF structure
1363 *
1364 * indicate a pending reset to all VFs on a given PF
1365 */
1366void ice_vc_notify_reset(struct ice_pf *pf)
1367{
1368        struct virtchnl_pf_event pfe;
1369
1370        if (!pf->num_alloc_vfs)
1371                return;
1372
1373        pfe.event = VIRTCHNL_EVENT_RESET_IMPENDING;
1374        pfe.severity = PF_EVENT_SEVERITY_CERTAIN_DOOM;
1375        ice_vc_vf_broadcast(pf, VIRTCHNL_OP_EVENT, VIRTCHNL_STATUS_SUCCESS,
1376                            (u8 *)&pfe, sizeof(struct virtchnl_pf_event));
1377}
1378
1379/**
1380 * ice_vc_notify_vf_reset - Notify VF of a reset event
1381 * @vf: pointer to the VF structure
1382 */
1383static void ice_vc_notify_vf_reset(struct ice_vf *vf)
1384{
1385        struct virtchnl_pf_event pfe;
1386        struct ice_pf *pf;
1387
1388        if (!vf)
1389                return;
1390
1391        pf = vf->pf;
1392        if (ice_validate_vf_id(pf, vf->vf_id))
1393                return;
1394
1395        /* Bail out if VF is in disabled state, neither initialized, nor active
1396         * state - otherwise proceed with notifications
1397         */
1398        if ((!test_bit(ICE_VF_STATE_INIT, vf->vf_states) &&
1399             !test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) ||
1400            test_bit(ICE_VF_STATE_DIS, vf->vf_states))
1401                return;
1402
1403        pfe.event = VIRTCHNL_EVENT_RESET_IMPENDING;
1404        pfe.severity = PF_EVENT_SEVERITY_CERTAIN_DOOM;
1405        ice_aq_send_msg_to_vf(&pf->hw, vf->vf_id, VIRTCHNL_OP_EVENT,
1406                              VIRTCHNL_STATUS_SUCCESS, (u8 *)&pfe, sizeof(pfe),
1407                              NULL);
1408}
1409
1410/**
1411 * ice_init_vf_vsi_res - initialize/setup VF VSI resources
1412 * @vf: VF to initialize/setup the VSI for
1413 *
1414 * This function creates a VSI for the VF, adds a VLAN 0 filter, and sets up the
1415 * VF VSI's broadcast filter and is only used during initial VF creation.
1416 */
1417static int ice_init_vf_vsi_res(struct ice_vf *vf)
1418{
1419        struct ice_pf *pf = vf->pf;
1420        u8 broadcast[ETH_ALEN];
1421        enum ice_status status;
1422        struct ice_vsi *vsi;
1423        struct device *dev;
1424        int err;
1425
1426        vf->first_vector_idx = ice_calc_vf_first_vector_idx(pf, vf);
1427
1428        dev = ice_pf_to_dev(pf);
1429        vsi = ice_vf_vsi_setup(vf);
1430        if (!vsi)
1431                return -ENOMEM;
1432
1433        err = ice_vsi_add_vlan(vsi, 0, ICE_FWD_TO_VSI);
1434        if (err) {
1435                dev_warn(dev, "Failed to add VLAN 0 filter for VF %d\n",
1436                         vf->vf_id);
1437                goto release_vsi;
1438        }
1439
1440        eth_broadcast_addr(broadcast);
1441        status = ice_fltr_add_mac(vsi, broadcast, ICE_FWD_TO_VSI);
1442        if (status) {
1443                dev_err(dev, "Failed to add broadcast MAC filter for VF %d, status %s\n",
1444                        vf->vf_id, ice_stat_str(status));
1445                err = ice_status_to_errno(status);
1446                goto release_vsi;
1447        }
1448
1449        vf->num_mac = 1;
1450
1451        return 0;
1452
1453release_vsi:
1454        ice_vf_vsi_release(vf);
1455        return err;
1456}
1457
1458/**
1459 * ice_start_vfs - start VFs so they are ready to be used by SR-IOV
1460 * @pf: PF the VFs are associated with
1461 */
1462static int ice_start_vfs(struct ice_pf *pf)
1463{
1464        struct ice_hw *hw = &pf->hw;
1465        int retval, i;
1466
1467        ice_for_each_vf(pf, i) {
1468                struct ice_vf *vf = &pf->vf[i];
1469
1470                ice_clear_vf_reset_trigger(vf);
1471
1472                retval = ice_init_vf_vsi_res(vf);
1473                if (retval) {
1474                        dev_err(ice_pf_to_dev(pf), "Failed to initialize VSI resources for VF %d, error %d\n",
1475                                vf->vf_id, retval);
1476                        goto teardown;
1477                }
1478
1479                set_bit(ICE_VF_STATE_INIT, vf->vf_states);
1480                ice_ena_vf_mappings(vf);
1481                wr32(hw, VFGEN_RSTAT(vf->vf_id), VIRTCHNL_VFR_VFACTIVE);
1482        }
1483
1484        ice_flush(hw);
1485        return 0;
1486
1487teardown:
1488        for (i = i - 1; i >= 0; i--) {
1489                struct ice_vf *vf = &pf->vf[i];
1490
1491                ice_dis_vf_mappings(vf);
1492                ice_vf_vsi_release(vf);
1493        }
1494
1495        return retval;
1496}
1497
1498/**
1499 * ice_set_dflt_settings - set VF defaults during initialization/creation
1500 * @pf: PF holding reference to all VFs for default configuration
1501 */
1502static void ice_set_dflt_settings_vfs(struct ice_pf *pf)
1503{
1504        int i;
1505
1506        ice_for_each_vf(pf, i) {
1507                struct ice_vf *vf = &pf->vf[i];
1508
1509                vf->pf = pf;
1510                vf->vf_id = i;
1511                vf->vf_sw_id = pf->first_sw;
1512                /* assign default capabilities */
1513                set_bit(ICE_VIRTCHNL_VF_CAP_L2, &vf->vf_caps);
1514                vf->spoofchk = true;
1515                vf->num_vf_qs = pf->num_qps_per_vf;
1516        }
1517}
1518
1519/**
1520 * ice_alloc_vfs - allocate num_vfs in the PF structure
1521 * @pf: PF to store the allocated VFs in
1522 * @num_vfs: number of VFs to allocate
1523 */
1524static int ice_alloc_vfs(struct ice_pf *pf, int num_vfs)
1525{
1526        struct ice_vf *vfs;
1527
1528        vfs = devm_kcalloc(ice_pf_to_dev(pf), num_vfs, sizeof(*vfs),
1529                           GFP_KERNEL);
1530        if (!vfs)
1531                return -ENOMEM;
1532
1533        pf->vf = vfs;
1534        pf->num_alloc_vfs = num_vfs;
1535
1536        return 0;
1537}
1538
1539/**
1540 * ice_ena_vfs - enable VFs so they are ready to be used
1541 * @pf: pointer to the PF structure
1542 * @num_vfs: number of VFs to enable
1543 */
1544static int ice_ena_vfs(struct ice_pf *pf, u16 num_vfs)
1545{
1546        struct device *dev = ice_pf_to_dev(pf);
1547        struct ice_hw *hw = &pf->hw;
1548        int ret;
1549
1550        /* Disable global interrupt 0 so we don't try to handle the VFLR. */
1551        wr32(hw, GLINT_DYN_CTL(pf->oicr_idx),
1552             ICE_ITR_NONE << GLINT_DYN_CTL_ITR_INDX_S);
1553        set_bit(__ICE_OICR_INTR_DIS, pf->state);
1554        ice_flush(hw);
1555
1556        ret = pci_enable_sriov(pf->pdev, num_vfs);
1557        if (ret) {
1558                pf->num_alloc_vfs = 0;
1559                goto err_unroll_intr;
1560        }
1561
1562        ret = ice_alloc_vfs(pf, num_vfs);
1563        if (ret)
1564                goto err_pci_disable_sriov;
1565
1566        if (ice_set_per_vf_res(pf)) {
1567                dev_err(dev, "Not enough resources for %d VFs, try with fewer number of VFs\n",
1568                        num_vfs);
1569                ret = -ENOSPC;
1570                goto err_unroll_sriov;
1571        }
1572
1573        ice_set_dflt_settings_vfs(pf);
1574
1575        if (ice_start_vfs(pf)) {
1576                dev_err(dev, "Failed to start VF(s)\n");
1577                ret = -EAGAIN;
1578                goto err_unroll_sriov;
1579        }
1580
1581        clear_bit(__ICE_VF_DIS, pf->state);
1582        return 0;
1583
1584err_unroll_sriov:
1585        devm_kfree(dev, pf->vf);
1586        pf->vf = NULL;
1587        pf->num_alloc_vfs = 0;
1588err_pci_disable_sriov:
1589        pci_disable_sriov(pf->pdev);
1590err_unroll_intr:
1591        /* rearm interrupts here */
1592        ice_irq_dynamic_ena(hw, NULL, NULL);
1593        clear_bit(__ICE_OICR_INTR_DIS, pf->state);
1594        return ret;
1595}
1596
1597/**
1598 * ice_pci_sriov_ena - Enable or change number of VFs
1599 * @pf: pointer to the PF structure
1600 * @num_vfs: number of VFs to allocate
1601 *
1602 * Returns 0 on success and negative on failure
1603 */
1604static int ice_pci_sriov_ena(struct ice_pf *pf, int num_vfs)
1605{
1606        int pre_existing_vfs = pci_num_vf(pf->pdev);
1607        struct device *dev = ice_pf_to_dev(pf);
1608        int err;
1609
1610        if (pre_existing_vfs && pre_existing_vfs != num_vfs)
1611                ice_free_vfs(pf);
1612        else if (pre_existing_vfs && pre_existing_vfs == num_vfs)
1613                return 0;
1614
1615        if (num_vfs > pf->num_vfs_supported) {
1616                dev_err(dev, "Can't enable %d VFs, max VFs supported is %d\n",
1617                        num_vfs, pf->num_vfs_supported);
1618                return -EOPNOTSUPP;
1619        }
1620
1621        dev_info(dev, "Enabling %d VFs\n", num_vfs);
1622        err = ice_ena_vfs(pf, num_vfs);
1623        if (err) {
1624                dev_err(dev, "Failed to enable SR-IOV: %d\n", err);
1625                return err;
1626        }
1627
1628        set_bit(ICE_FLAG_SRIOV_ENA, pf->flags);
1629        return 0;
1630}
1631
1632/**
1633 * ice_check_sriov_allowed - check if SR-IOV is allowed based on various checks
1634 * @pf: PF to enabled SR-IOV on
1635 */
1636static int ice_check_sriov_allowed(struct ice_pf *pf)
1637{
1638        struct device *dev = ice_pf_to_dev(pf);
1639
1640        if (!test_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags)) {
1641                dev_err(dev, "This device is not capable of SR-IOV\n");
1642                return -EOPNOTSUPP;
1643        }
1644
1645        if (ice_is_safe_mode(pf)) {
1646                dev_err(dev, "SR-IOV cannot be configured - Device is in Safe Mode\n");
1647                return -EOPNOTSUPP;
1648        }
1649
1650        if (!ice_pf_state_is_nominal(pf)) {
1651                dev_err(dev, "Cannot enable SR-IOV, device not ready\n");
1652                return -EBUSY;
1653        }
1654
1655        return 0;
1656}
1657
1658/**
1659 * ice_sriov_configure - Enable or change number of VFs via sysfs
1660 * @pdev: pointer to a pci_dev structure
1661 * @num_vfs: number of VFs to allocate or 0 to free VFs
1662 *
1663 * This function is called when the user updates the number of VFs in sysfs. On
1664 * success return whatever num_vfs was set to by the caller. Return negative on
1665 * failure.
1666 */
1667int ice_sriov_configure(struct pci_dev *pdev, int num_vfs)
1668{
1669        struct ice_pf *pf = pci_get_drvdata(pdev);
1670        struct device *dev = ice_pf_to_dev(pf);
1671        int err;
1672
1673        err = ice_check_sriov_allowed(pf);
1674        if (err)
1675                return err;
1676
1677        if (!num_vfs) {
1678                if (!pci_vfs_assigned(pdev)) {
1679                        ice_free_vfs(pf);
1680                        return 0;
1681                }
1682
1683                dev_err(dev, "can't free VFs because some are assigned to VMs.\n");
1684                return -EBUSY;
1685        }
1686
1687        err = ice_pci_sriov_ena(pf, num_vfs);
1688        if (err)
1689                return err;
1690
1691        return num_vfs;
1692}
1693
1694/**
1695 * ice_process_vflr_event - Free VF resources via IRQ calls
1696 * @pf: pointer to the PF structure
1697 *
1698 * called from the VFLR IRQ handler to
1699 * free up VF resources and state variables
1700 */
1701void ice_process_vflr_event(struct ice_pf *pf)
1702{
1703        struct ice_hw *hw = &pf->hw;
1704        unsigned int vf_id;
1705        u32 reg;
1706
1707        if (!test_and_clear_bit(__ICE_VFLR_EVENT_PENDING, pf->state) ||
1708            !pf->num_alloc_vfs)
1709                return;
1710
1711        ice_for_each_vf(pf, vf_id) {
1712                struct ice_vf *vf = &pf->vf[vf_id];
1713                u32 reg_idx, bit_idx;
1714
1715                reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32;
1716                bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32;
1717                /* read GLGEN_VFLRSTAT register to find out the flr VFs */
1718                reg = rd32(hw, GLGEN_VFLRSTAT(reg_idx));
1719                if (reg & BIT(bit_idx))
1720                        /* GLGEN_VFLRSTAT bit will be cleared in ice_reset_vf */
1721                        ice_reset_vf(vf, true);
1722        }
1723}
1724
1725/**
1726 * ice_vc_reset_vf - Perform software reset on the VF after informing the AVF
1727 * @vf: pointer to the VF info
1728 */
1729static void ice_vc_reset_vf(struct ice_vf *vf)
1730{
1731        ice_vc_notify_vf_reset(vf);
1732        ice_reset_vf(vf, false);
1733}
1734
1735/**
1736 * ice_get_vf_from_pfq - get the VF who owns the PF space queue passed in
1737 * @pf: PF used to index all VFs
1738 * @pfq: queue index relative to the PF's function space
1739 *
1740 * If no VF is found who owns the pfq then return NULL, otherwise return a
1741 * pointer to the VF who owns the pfq
1742 */
1743static struct ice_vf *ice_get_vf_from_pfq(struct ice_pf *pf, u16 pfq)
1744{
1745        unsigned int vf_id;
1746
1747        ice_for_each_vf(pf, vf_id) {
1748                struct ice_vf *vf = &pf->vf[vf_id];
1749                struct ice_vsi *vsi;
1750                u16 rxq_idx;
1751
1752                vsi = pf->vsi[vf->lan_vsi_idx];
1753
1754                ice_for_each_rxq(vsi, rxq_idx)
1755                        if (vsi->rxq_map[rxq_idx] == pfq)
1756                                return vf;
1757        }
1758
1759        return NULL;
1760}
1761
1762/**
1763 * ice_globalq_to_pfq - convert from global queue index to PF space queue index
1764 * @pf: PF used for conversion
1765 * @globalq: global queue index used to convert to PF space queue index
1766 */
1767static u32 ice_globalq_to_pfq(struct ice_pf *pf, u32 globalq)
1768{
1769        return globalq - pf->hw.func_caps.common_cap.rxq_first_id;
1770}
1771
1772/**
1773 * ice_vf_lan_overflow_event - handle LAN overflow event for a VF
1774 * @pf: PF that the LAN overflow event happened on
1775 * @event: structure holding the event information for the LAN overflow event
1776 *
1777 * Determine if the LAN overflow event was caused by a VF queue. If it was not
1778 * caused by a VF, do nothing. If a VF caused this LAN overflow event trigger a
1779 * reset on the offending VF.
1780 */
1781void
1782ice_vf_lan_overflow_event(struct ice_pf *pf, struct ice_rq_event_info *event)
1783{
1784        u32 gldcb_rtctq, queue;
1785        struct ice_vf *vf;
1786
1787        gldcb_rtctq = le32_to_cpu(event->desc.params.lan_overflow.prtdcb_ruptq);
1788        dev_dbg(ice_pf_to_dev(pf), "GLDCB_RTCTQ: 0x%08x\n", gldcb_rtctq);
1789
1790        /* event returns device global Rx queue number */
1791        queue = (gldcb_rtctq & GLDCB_RTCTQ_RXQNUM_M) >>
1792                GLDCB_RTCTQ_RXQNUM_S;
1793
1794        vf = ice_get_vf_from_pfq(pf, ice_globalq_to_pfq(pf, queue));
1795        if (!vf)
1796                return;
1797
1798        ice_vc_reset_vf(vf);
1799}
1800
1801/**
1802 * ice_vc_send_msg_to_vf - Send message to VF
1803 * @vf: pointer to the VF info
1804 * @v_opcode: virtual channel opcode
1805 * @v_retval: virtual channel return value
1806 * @msg: pointer to the msg buffer
1807 * @msglen: msg length
1808 *
1809 * send msg to VF
1810 */
1811static int
1812ice_vc_send_msg_to_vf(struct ice_vf *vf, u32 v_opcode,
1813                      enum virtchnl_status_code v_retval, u8 *msg, u16 msglen)
1814{
1815        enum ice_status aq_ret;
1816        struct device *dev;
1817        struct ice_pf *pf;
1818
1819        if (!vf)
1820                return -EINVAL;
1821
1822        pf = vf->pf;
1823        if (ice_validate_vf_id(pf, vf->vf_id))
1824                return -EINVAL;
1825
1826        dev = ice_pf_to_dev(pf);
1827
1828        /* single place to detect unsuccessful return values */
1829        if (v_retval) {
1830                vf->num_inval_msgs++;
1831                dev_info(dev, "VF %d failed opcode %d, retval: %d\n", vf->vf_id,
1832                         v_opcode, v_retval);
1833                if (vf->num_inval_msgs > ICE_DFLT_NUM_INVAL_MSGS_ALLOWED) {
1834                        dev_err(dev, "Number of invalid messages exceeded for VF %d\n",
1835                                vf->vf_id);
1836                        dev_err(dev, "Use PF Control I/F to enable the VF\n");
1837                        set_bit(ICE_VF_STATE_DIS, vf->vf_states);
1838                        return -EIO;
1839                }
1840        } else {
1841                vf->num_valid_msgs++;
1842                /* reset the invalid counter, if a valid message is received. */
1843                vf->num_inval_msgs = 0;
1844        }
1845
1846        aq_ret = ice_aq_send_msg_to_vf(&pf->hw, vf->vf_id, v_opcode, v_retval,
1847                                       msg, msglen, NULL);
1848        if (aq_ret && pf->hw.mailboxq.sq_last_status != ICE_AQ_RC_ENOSYS) {
1849                dev_info(dev, "Unable to send the message to VF %d ret %s aq_err %s\n",
1850                         vf->vf_id, ice_stat_str(aq_ret),
1851                         ice_aq_str(pf->hw.mailboxq.sq_last_status));
1852                return -EIO;
1853        }
1854
1855        return 0;
1856}
1857
1858/**
1859 * ice_vc_get_ver_msg
1860 * @vf: pointer to the VF info
1861 * @msg: pointer to the msg buffer
1862 *
1863 * called from the VF to request the API version used by the PF
1864 */
1865static int ice_vc_get_ver_msg(struct ice_vf *vf, u8 *msg)
1866{
1867        struct virtchnl_version_info info = {
1868                VIRTCHNL_VERSION_MAJOR, VIRTCHNL_VERSION_MINOR
1869        };
1870
1871        vf->vf_ver = *(struct virtchnl_version_info *)msg;
1872        /* VFs running the 1.0 API expect to get 1.0 back or they will cry. */
1873        if (VF_IS_V10(&vf->vf_ver))
1874                info.minor = VIRTCHNL_VERSION_MINOR_NO_VF_CAPS;
1875
1876        return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_VERSION,
1877                                     VIRTCHNL_STATUS_SUCCESS, (u8 *)&info,
1878                                     sizeof(struct virtchnl_version_info));
1879}
1880
1881/**
1882 * ice_vc_get_vf_res_msg
1883 * @vf: pointer to the VF info
1884 * @msg: pointer to the msg buffer
1885 *
1886 * called from the VF to request its resources
1887 */
1888static int ice_vc_get_vf_res_msg(struct ice_vf *vf, u8 *msg)
1889{
1890        enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
1891        struct virtchnl_vf_resource *vfres = NULL;
1892        struct ice_pf *pf = vf->pf;
1893        struct ice_vsi *vsi;
1894        int len = 0;
1895        int ret;
1896
1897        if (ice_check_vf_init(pf, vf)) {
1898                v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1899                goto err;
1900        }
1901
1902        len = sizeof(struct virtchnl_vf_resource);
1903
1904        vfres = kzalloc(len, GFP_KERNEL);
1905        if (!vfres) {
1906                v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY;
1907                len = 0;
1908                goto err;
1909        }
1910        if (VF_IS_V11(&vf->vf_ver))
1911                vf->driver_caps = *(u32 *)msg;
1912        else
1913                vf->driver_caps = VIRTCHNL_VF_OFFLOAD_L2 |
1914                                  VIRTCHNL_VF_OFFLOAD_RSS_REG |
1915                                  VIRTCHNL_VF_OFFLOAD_VLAN;
1916
1917        vfres->vf_cap_flags = VIRTCHNL_VF_OFFLOAD_L2;
1918        vsi = pf->vsi[vf->lan_vsi_idx];
1919        if (!vsi) {
1920                v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1921                goto err;
1922        }
1923
1924        if (!vsi->info.pvid)
1925                vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_VLAN;
1926
1927        if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PF) {
1928                vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_PF;
1929        } else {
1930                if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_AQ)
1931                        vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_AQ;
1932                else
1933                        vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_REG;
1934        }
1935
1936        if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2)
1937                vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2;
1938
1939        if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ENCAP)
1940                vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ENCAP;
1941
1942        if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM)
1943                vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM;
1944
1945        if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RX_POLLING)
1946                vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RX_POLLING;
1947
1948        if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_WB_ON_ITR)
1949                vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_WB_ON_ITR;
1950
1951        if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_REQ_QUEUES)
1952                vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_REQ_QUEUES;
1953
1954        if (vf->driver_caps & VIRTCHNL_VF_CAP_ADV_LINK_SPEED)
1955                vfres->vf_cap_flags |= VIRTCHNL_VF_CAP_ADV_LINK_SPEED;
1956
1957        vfres->num_vsis = 1;
1958        /* Tx and Rx queue are equal for VF */
1959        vfres->num_queue_pairs = vsi->num_txq;
1960        vfres->max_vectors = pf->num_msix_per_vf;
1961        vfres->rss_key_size = ICE_VSIQF_HKEY_ARRAY_SIZE;
1962        vfres->rss_lut_size = ICE_VSIQF_HLUT_ARRAY_SIZE;
1963
1964        vfres->vsi_res[0].vsi_id = vf->lan_vsi_num;
1965        vfres->vsi_res[0].vsi_type = VIRTCHNL_VSI_SRIOV;
1966        vfres->vsi_res[0].num_queue_pairs = vsi->num_txq;
1967        ether_addr_copy(vfres->vsi_res[0].default_mac_addr,
1968                        vf->dflt_lan_addr.addr);
1969
1970        /* match guest capabilities */
1971        vf->driver_caps = vfres->vf_cap_flags;
1972
1973        set_bit(ICE_VF_STATE_ACTIVE, vf->vf_states);
1974
1975err:
1976        /* send the response back to the VF */
1977        ret = ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_VF_RESOURCES, v_ret,
1978                                    (u8 *)vfres, len);
1979
1980        kfree(vfres);
1981        return ret;
1982}
1983
1984/**
1985 * ice_vc_reset_vf_msg
1986 * @vf: pointer to the VF info
1987 *
1988 * called from the VF to reset itself,
1989 * unlike other virtchnl messages, PF driver
1990 * doesn't send the response back to the VF
1991 */
1992static void ice_vc_reset_vf_msg(struct ice_vf *vf)
1993{
1994        if (test_bit(ICE_VF_STATE_INIT, vf->vf_states))
1995                ice_reset_vf(vf, false);
1996}
1997
1998/**
1999 * ice_find_vsi_from_id
2000 * @pf: the PF structure to search for the VSI
2001 * @id: ID of the VSI it is searching for
2002 *
2003 * searches for the VSI with the given ID
2004 */
2005static struct ice_vsi *ice_find_vsi_from_id(struct ice_pf *pf, u16 id)
2006{
2007        int i;
2008
2009        ice_for_each_vsi(pf, i)
2010                if (pf->vsi[i] && pf->vsi[i]->vsi_num == id)
2011                        return pf->vsi[i];
2012
2013        return NULL;
2014}
2015
2016/**
2017 * ice_vc_isvalid_vsi_id
2018 * @vf: pointer to the VF info
2019 * @vsi_id: VF relative VSI ID
2020 *
2021 * check for the valid VSI ID
2022 */
2023static bool ice_vc_isvalid_vsi_id(struct ice_vf *vf, u16 vsi_id)
2024{
2025        struct ice_pf *pf = vf->pf;
2026        struct ice_vsi *vsi;
2027
2028        vsi = ice_find_vsi_from_id(pf, vsi_id);
2029
2030        return (vsi && (vsi->vf_id == vf->vf_id));
2031}
2032
2033/**
2034 * ice_vc_isvalid_q_id
2035 * @vf: pointer to the VF info
2036 * @vsi_id: VSI ID
2037 * @qid: VSI relative queue ID
2038 *
2039 * check for the valid queue ID
2040 */
2041static bool ice_vc_isvalid_q_id(struct ice_vf *vf, u16 vsi_id, u8 qid)
2042{
2043        struct ice_vsi *vsi = ice_find_vsi_from_id(vf->pf, vsi_id);
2044        /* allocated Tx and Rx queues should be always equal for VF VSI */
2045        return (vsi && (qid < vsi->alloc_txq));
2046}
2047
2048/**
2049 * ice_vc_isvalid_ring_len
2050 * @ring_len: length of ring
2051 *
2052 * check for the valid ring count, should be multiple of ICE_REQ_DESC_MULTIPLE
2053 * or zero
2054 */
2055static bool ice_vc_isvalid_ring_len(u16 ring_len)
2056{
2057        return ring_len == 0 ||
2058               (ring_len >= ICE_MIN_NUM_DESC &&
2059                ring_len <= ICE_MAX_NUM_DESC &&
2060                !(ring_len % ICE_REQ_DESC_MULTIPLE));
2061}
2062
2063/**
2064 * ice_vc_config_rss_key
2065 * @vf: pointer to the VF info
2066 * @msg: pointer to the msg buffer
2067 *
2068 * Configure the VF's RSS key
2069 */
2070static int ice_vc_config_rss_key(struct ice_vf *vf, u8 *msg)
2071{
2072        enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
2073        struct virtchnl_rss_key *vrk =
2074                (struct virtchnl_rss_key *)msg;
2075        struct ice_pf *pf = vf->pf;
2076        struct ice_vsi *vsi;
2077
2078        if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
2079                v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2080                goto error_param;
2081        }
2082
2083        if (!ice_vc_isvalid_vsi_id(vf, vrk->vsi_id)) {
2084                v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2085                goto error_param;
2086        }
2087
2088        if (vrk->key_len != ICE_VSIQF_HKEY_ARRAY_SIZE) {
2089                v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2090                goto error_param;
2091        }
2092
2093        if (!test_bit(ICE_FLAG_RSS_ENA, vf->pf->flags)) {
2094                v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2095                goto error_param;
2096        }
2097
2098        vsi = pf->vsi[vf->lan_vsi_idx];
2099        if (!vsi) {
2100                v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2101                goto error_param;
2102        }
2103
2104        if (ice_set_rss(vsi, vrk->key, NULL, 0))
2105                v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
2106error_param:
2107        return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_KEY, v_ret,
2108                                     NULL, 0);
2109}
2110
2111/**
2112 * ice_vc_config_rss_lut
2113 * @vf: pointer to the VF info
2114 * @msg: pointer to the msg buffer
2115 *
2116 * Configure the VF's RSS LUT
2117 */
2118static int ice_vc_config_rss_lut(struct ice_vf *vf, u8 *msg)
2119{
2120        struct virtchnl_rss_lut *vrl = (struct virtchnl_rss_lut *)msg;
2121        enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
2122        struct ice_pf *pf = vf->pf;
2123        struct ice_vsi *vsi;
2124
2125        if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
2126                v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2127                goto error_param;
2128        }
2129
2130        if (!ice_vc_isvalid_vsi_id(vf, vrl->vsi_id)) {
2131                v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2132                goto error_param;
2133        }
2134
2135        if (vrl->lut_entries != ICE_VSIQF_HLUT_ARRAY_SIZE) {
2136                v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2137                goto error_param;
2138        }
2139
2140        if (!test_bit(ICE_FLAG_RSS_ENA, vf->pf->flags)) {
2141                v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2142                goto error_param;
2143        }
2144
2145        vsi = pf->vsi[vf->lan_vsi_idx];
2146        if (!vsi) {
2147                v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2148                goto error_param;
2149        }
2150
2151        if (ice_set_rss(vsi, NULL, vrl->lut, ICE_VSIQF_HLUT_ARRAY_SIZE))
2152                v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
2153error_param:
2154        return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_LUT, v_ret,
2155                                     NULL, 0);
2156}
2157
2158/**
2159 * ice_wait_on_vf_reset - poll to make sure a given VF is ready after reset
2160 * @vf: The VF being resseting
2161 *
2162 * The max poll time is about ~800ms, which is about the maximum time it takes
2163 * for a VF to be reset and/or a VF driver to be removed.
2164 */
2165static void ice_wait_on_vf_reset(struct ice_vf *vf)
2166{
2167        int i;
2168
2169        for (i = 0; i < ICE_MAX_VF_RESET_TRIES; i++) {
2170                if (test_bit(ICE_VF_STATE_INIT, vf->vf_states))
2171                        break;
2172                msleep(ICE_MAX_VF_RESET_SLEEP_MS);
2173        }
2174}
2175
2176/**
2177 * ice_check_vf_ready_for_cfg - check if VF is ready to be configured/queried
2178 * @vf: VF to check if it's ready to be configured/queried
2179 *
2180 * The purpose of this function is to make sure the VF is not in reset, not
2181 * disabled, and initialized so it can be configured and/or queried by a host
2182 * administrator.
2183 */
2184static int ice_check_vf_ready_for_cfg(struct ice_vf *vf)
2185{
2186        struct ice_pf *pf;
2187
2188        ice_wait_on_vf_reset(vf);
2189
2190        if (ice_is_vf_disabled(vf))
2191                return -EINVAL;
2192
2193        pf = vf->pf;
2194        if (ice_check_vf_init(pf, vf))
2195                return -EBUSY;
2196
2197        return 0;
2198}
2199
2200/**
2201 * ice_set_vf_spoofchk
2202 * @netdev: network interface device structure
2203 * @vf_id: VF identifier
2204 * @ena: flag to enable or disable feature
2205 *
2206 * Enable or disable VF spoof checking
2207 */
2208int ice_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool ena)
2209{
2210        struct ice_netdev_priv *np = netdev_priv(netdev);
2211        struct ice_pf *pf = np->vsi->back;
2212        struct ice_vsi_ctx *ctx;
2213        struct ice_vsi *vf_vsi;
2214        enum ice_status status;
2215        struct device *dev;
2216        struct ice_vf *vf;
2217        int ret;
2218
2219        dev = ice_pf_to_dev(pf);
2220        if (ice_validate_vf_id(pf, vf_id))
2221                return -EINVAL;
2222
2223        vf = &pf->vf[vf_id];
2224        ret = ice_check_vf_ready_for_cfg(vf);
2225        if (ret)
2226                return ret;
2227
2228        vf_vsi = pf->vsi[vf->lan_vsi_idx];
2229        if (!vf_vsi) {
2230                netdev_err(netdev, "VSI %d for VF %d is null\n",
2231                           vf->lan_vsi_idx, vf->vf_id);
2232                return -EINVAL;
2233        }
2234
2235        if (vf_vsi->type != ICE_VSI_VF) {
2236                netdev_err(netdev, "Type %d of VSI %d for VF %d is no ICE_VSI_VF\n",
2237                           vf_vsi->type, vf_vsi->vsi_num, vf->vf_id);
2238                return -ENODEV;
2239        }
2240
2241        if (ena == vf->spoofchk) {
2242                dev_dbg(dev, "VF spoofchk already %s\n", ena ? "ON" : "OFF");
2243                return 0;
2244        }
2245
2246        ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
2247        if (!ctx)
2248                return -ENOMEM;
2249
2250        ctx->info.sec_flags = vf_vsi->info.sec_flags;
2251        ctx->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_SECURITY_VALID);
2252        if (ena) {
2253                ctx->info.sec_flags |=
2254                        ICE_AQ_VSI_SEC_FLAG_ENA_MAC_ANTI_SPOOF |
2255                        (ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA <<
2256                         ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S);
2257        } else {
2258                ctx->info.sec_flags &=
2259                        ~(ICE_AQ_VSI_SEC_FLAG_ENA_MAC_ANTI_SPOOF |
2260                          (ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA <<
2261                           ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S));
2262        }
2263
2264        status = ice_update_vsi(&pf->hw, vf_vsi->idx, ctx, NULL);
2265        if (status) {
2266                dev_err(dev, "Failed to %sable spoofchk on VF %d VSI %d\n error %s\n",
2267                        ena ? "en" : "dis", vf->vf_id, vf_vsi->vsi_num,
2268                        ice_stat_str(status));
2269                ret = -EIO;
2270                goto out;
2271        }
2272
2273        /* only update spoofchk state and VSI context on success */
2274        vf_vsi->info.sec_flags = ctx->info.sec_flags;
2275        vf->spoofchk = ena;
2276
2277out:
2278        kfree(ctx);
2279        return ret;
2280}
2281
2282/**
2283 * ice_is_any_vf_in_promisc - check if any VF(s) are in promiscuous mode
2284 * @pf: PF structure for accessing VF(s)
2285 *
2286 * Return false if no VF(s) are in unicast and/or multicast promiscuous mode,
2287 * else return true
2288 */
2289bool ice_is_any_vf_in_promisc(struct ice_pf *pf)
2290{
2291        int vf_idx;
2292
2293        ice_for_each_vf(pf, vf_idx) {
2294                struct ice_vf *vf = &pf->vf[vf_idx];
2295
2296                /* found a VF that has promiscuous mode configured */
2297                if (test_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states) ||
2298                    test_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states))
2299                        return true;
2300        }
2301
2302        return false;
2303}
2304
2305/**
2306 * ice_vc_cfg_promiscuous_mode_msg
2307 * @vf: pointer to the VF info
2308 * @msg: pointer to the msg buffer
2309 *
2310 * called from the VF to configure VF VSIs promiscuous mode
2311 */
2312static int ice_vc_cfg_promiscuous_mode_msg(struct ice_vf *vf, u8 *msg)
2313{
2314        enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
2315        struct virtchnl_promisc_info *info =
2316            (struct virtchnl_promisc_info *)msg;
2317        struct ice_pf *pf = vf->pf;
2318        struct ice_vsi *vsi;
2319        struct device *dev;
2320        bool rm_promisc;
2321        int ret = 0;
2322
2323        if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
2324                v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2325                goto error_param;
2326        }
2327
2328        if (!ice_vc_isvalid_vsi_id(vf, info->vsi_id)) {
2329                v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2330                goto error_param;
2331        }
2332
2333        vsi = pf->vsi[vf->lan_vsi_idx];
2334        if (!vsi) {
2335                v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2336                goto error_param;
2337        }
2338
2339        dev = ice_pf_to_dev(pf);
2340        if (!test_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) {
2341                dev_err(dev, "Unprivileged VF %d is attempting to configure promiscuous mode\n",
2342                        vf->vf_id);
2343                /* Leave v_ret alone, lie to the VF on purpose. */
2344                goto error_param;
2345        }
2346
2347        rm_promisc = !(info->flags & FLAG_VF_UNICAST_PROMISC) &&
2348                !(info->flags & FLAG_VF_MULTICAST_PROMISC);
2349
2350        if (vsi->num_vlan || vf->port_vlan_info) {
2351                struct ice_vsi *pf_vsi = ice_get_main_vsi(pf);
2352                struct net_device *pf_netdev;
2353
2354                if (!pf_vsi) {
2355                        v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2356                        goto error_param;
2357                }
2358
2359                pf_netdev = pf_vsi->netdev;
2360
2361                ret = ice_set_vf_spoofchk(pf_netdev, vf->vf_id, rm_promisc);
2362                if (ret) {
2363                        dev_err(dev, "Failed to update spoofchk to %s for VF %d VSI %d when setting promiscuous mode\n",
2364                                rm_promisc ? "ON" : "OFF", vf->vf_id,
2365                                vsi->vsi_num);
2366                        v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2367                }
2368
2369                ret = ice_cfg_vlan_pruning(vsi, true, !rm_promisc);
2370                if (ret) {
2371                        dev_err(dev, "Failed to configure VLAN pruning in promiscuous mode\n");
2372                        v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2373                        goto error_param;
2374                }
2375        }
2376
2377        if (!test_bit(ICE_FLAG_VF_TRUE_PROMISC_ENA, pf->flags)) {
2378                bool set_dflt_vsi = !!(info->flags & FLAG_VF_UNICAST_PROMISC);
2379
2380                if (set_dflt_vsi && !ice_is_dflt_vsi_in_use(pf->first_sw))
2381                        /* only attempt to set the default forwarding VSI if
2382                         * it's not currently set
2383                         */
2384                        ret = ice_set_dflt_vsi(pf->first_sw, vsi);
2385                else if (!set_dflt_vsi &&
2386                         ice_is_vsi_dflt_vsi(pf->first_sw, vsi))
2387                        /* only attempt to free the default forwarding VSI if we
2388                         * are the owner
2389                         */
2390                        ret = ice_clear_dflt_vsi(pf->first_sw);
2391
2392                if (ret) {
2393                        dev_err(dev, "%sable VF %d as the default VSI failed, error %d\n",
2394                                set_dflt_vsi ? "en" : "dis", vf->vf_id, ret);
2395                        v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
2396                        goto error_param;
2397                }
2398        } else {
2399                enum ice_status status;
2400                u8 promisc_m;
2401
2402                if (info->flags & FLAG_VF_UNICAST_PROMISC) {
2403                        if (vf->port_vlan_info || vsi->num_vlan)
2404                                promisc_m = ICE_UCAST_VLAN_PROMISC_BITS;
2405                        else
2406                                promisc_m = ICE_UCAST_PROMISC_BITS;
2407                } else if (info->flags & FLAG_VF_MULTICAST_PROMISC) {
2408                        if (vf->port_vlan_info || vsi->num_vlan)
2409                                promisc_m = ICE_MCAST_VLAN_PROMISC_BITS;
2410                        else
2411                                promisc_m = ICE_MCAST_PROMISC_BITS;
2412                } else {
2413                        if (vf->port_vlan_info || vsi->num_vlan)
2414                                promisc_m = ICE_UCAST_VLAN_PROMISC_BITS;
2415                        else
2416                                promisc_m = ICE_UCAST_PROMISC_BITS;
2417                }
2418
2419                /* Configure multicast/unicast with or without VLAN promiscuous
2420                 * mode
2421                 */
2422                status = ice_vf_set_vsi_promisc(vf, vsi, promisc_m, rm_promisc);
2423                if (status) {
2424                        dev_err(dev, "%sable Tx/Rx filter promiscuous mode on VF-%d failed, error: %s\n",
2425                                rm_promisc ? "dis" : "en", vf->vf_id,
2426                                ice_stat_str(status));
2427                        v_ret = ice_err_to_virt_err(status);
2428                        goto error_param;
2429                } else {
2430                        dev_dbg(dev, "%sable Tx/Rx filter promiscuous mode on VF-%d succeeded\n",
2431                                rm_promisc ? "dis" : "en", vf->vf_id);
2432                }
2433        }
2434
2435        if (info->flags & FLAG_VF_MULTICAST_PROMISC)
2436                set_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states);
2437        else
2438                clear_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states);
2439
2440        if (info->flags & FLAG_VF_UNICAST_PROMISC)
2441                set_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states);
2442        else
2443                clear_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states);
2444
2445error_param:
2446        return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE,
2447                                     v_ret, NULL, 0);
2448}
2449
2450/**
2451 * ice_vc_get_stats_msg
2452 * @vf: pointer to the VF info
2453 * @msg: pointer to the msg buffer
2454 *
2455 * called from the VF to get VSI stats
2456 */
2457static int ice_vc_get_stats_msg(struct ice_vf *vf, u8 *msg)
2458{
2459        enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
2460        struct virtchnl_queue_select *vqs =
2461                (struct virtchnl_queue_select *)msg;
2462        struct ice_eth_stats stats = { 0 };
2463        struct ice_pf *pf = vf->pf;
2464        struct ice_vsi *vsi;
2465
2466        if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
2467                v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2468                goto error_param;
2469        }
2470
2471        if (!ice_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
2472                v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2473                goto error_param;
2474        }
2475
2476        vsi = pf->vsi[vf->lan_vsi_idx];
2477        if (!vsi) {
2478                v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2479                goto error_param;
2480        }
2481
2482        ice_update_eth_stats(vsi);
2483
2484        stats = vsi->eth_stats;
2485
2486error_param:
2487        /* send the response to the VF */
2488        return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_STATS, v_ret,
2489                                     (u8 *)&stats, sizeof(stats));
2490}
2491
2492/**
2493 * ice_vc_validate_vqs_bitmaps - validate Rx/Tx queue bitmaps from VIRTCHNL
2494 * @vqs: virtchnl_queue_select structure containing bitmaps to validate
2495 *
2496 * Return true on successful validation, else false
2497 */
2498static bool ice_vc_validate_vqs_bitmaps(struct virtchnl_queue_select *vqs)
2499{
2500        if ((!vqs->rx_queues && !vqs->tx_queues) ||
2501            vqs->rx_queues >= BIT(ICE_MAX_RSS_QS_PER_VF) ||
2502            vqs->tx_queues >= BIT(ICE_MAX_RSS_QS_PER_VF))
2503                return false;
2504
2505        return true;
2506}
2507
2508/**
2509 * ice_vf_ena_txq_interrupt - enable Tx queue interrupt via QINT_TQCTL
2510 * @vsi: VSI of the VF to configure
2511 * @q_idx: VF queue index used to determine the queue in the PF's space
2512 */
2513static void ice_vf_ena_txq_interrupt(struct ice_vsi *vsi, u32 q_idx)
2514{
2515        struct ice_hw *hw = &vsi->back->hw;
2516        u32 pfq = vsi->txq_map[q_idx];
2517        u32 reg;
2518
2519        reg = rd32(hw, QINT_TQCTL(pfq));
2520
2521        /* MSI-X index 0 in the VF's space is always for the OICR, which means
2522         * this is most likely a poll mode VF driver, so don't enable an
2523         * interrupt that was never configured via VIRTCHNL_OP_CONFIG_IRQ_MAP
2524         */
2525        if (!(reg & QINT_TQCTL_MSIX_INDX_M))
2526                return;
2527
2528        wr32(hw, QINT_TQCTL(pfq), reg | QINT_TQCTL_CAUSE_ENA_M);
2529}
2530
2531/**
2532 * ice_vf_ena_rxq_interrupt - enable Tx queue interrupt via QINT_RQCTL
2533 * @vsi: VSI of the VF to configure
2534 * @q_idx: VF queue index used to determine the queue in the PF's space
2535 */
2536static void ice_vf_ena_rxq_interrupt(struct ice_vsi *vsi, u32 q_idx)
2537{
2538        struct ice_hw *hw = &vsi->back->hw;
2539        u32 pfq = vsi->rxq_map[q_idx];
2540        u32 reg;
2541
2542        reg = rd32(hw, QINT_RQCTL(pfq));
2543
2544        /* MSI-X index 0 in the VF's space is always for the OICR, which means
2545         * this is most likely a poll mode VF driver, so don't enable an
2546         * interrupt that was never configured via VIRTCHNL_OP_CONFIG_IRQ_MAP
2547         */
2548        if (!(reg & QINT_RQCTL_MSIX_INDX_M))
2549                return;
2550
2551        wr32(hw, QINT_RQCTL(pfq), reg | QINT_RQCTL_CAUSE_ENA_M);
2552}
2553
2554/**
2555 * ice_vc_ena_qs_msg
2556 * @vf: pointer to the VF info
2557 * @msg: pointer to the msg buffer
2558 *
2559 * called from the VF to enable all or specific queue(s)
2560 */
2561static int ice_vc_ena_qs_msg(struct ice_vf *vf, u8 *msg)
2562{
2563        enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
2564        struct virtchnl_queue_select *vqs =
2565            (struct virtchnl_queue_select *)msg;
2566        struct ice_pf *pf = vf->pf;
2567        struct ice_vsi *vsi;
2568        unsigned long q_map;
2569        u16 vf_q_id;
2570
2571        if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
2572                v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2573                goto error_param;
2574        }
2575
2576        if (!ice_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
2577                v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2578                goto error_param;
2579        }
2580
2581        if (!ice_vc_validate_vqs_bitmaps(vqs)) {
2582                v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2583                goto error_param;
2584        }
2585
2586        vsi = pf->vsi[vf->lan_vsi_idx];
2587        if (!vsi) {
2588                v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2589                goto error_param;
2590        }
2591
2592        /* Enable only Rx rings, Tx rings were enabled by the FW when the
2593         * Tx queue group list was configured and the context bits were
2594         * programmed using ice_vsi_cfg_txqs
2595         */
2596        q_map = vqs->rx_queues;
2597        for_each_set_bit(vf_q_id, &q_map, ICE_MAX_RSS_QS_PER_VF) {
2598                if (!ice_vc_isvalid_q_id(vf, vqs->vsi_id, vf_q_id)) {
2599                        v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2600                        goto error_param;
2601                }
2602
2603                /* Skip queue if enabled */
2604                if (test_bit(vf_q_id, vf->rxq_ena))
2605                        continue;
2606
2607                if (ice_vsi_ctrl_one_rx_ring(vsi, true, vf_q_id, true)) {
2608                        dev_err(ice_pf_to_dev(vsi->back), "Failed to enable Rx ring %d on VSI %d\n",
2609                                vf_q_id, vsi->vsi_num);
2610                        v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2611                        goto error_param;
2612                }
2613
2614                ice_vf_ena_rxq_interrupt(vsi, vf_q_id);
2615                set_bit(vf_q_id, vf->rxq_ena);
2616        }
2617
2618        vsi = pf->vsi[vf->lan_vsi_idx];
2619        q_map = vqs->tx_queues;
2620        for_each_set_bit(vf_q_id, &q_map, ICE_MAX_RSS_QS_PER_VF) {
2621                if (!ice_vc_isvalid_q_id(vf, vqs->vsi_id, vf_q_id)) {
2622                        v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2623                        goto error_param;
2624                }
2625
2626                /* Skip queue if enabled */
2627                if (test_bit(vf_q_id, vf->txq_ena))
2628                        continue;
2629
2630                ice_vf_ena_txq_interrupt(vsi, vf_q_id);
2631                set_bit(vf_q_id, vf->txq_ena);
2632        }
2633
2634        /* Set flag to indicate that queues are enabled */
2635        if (v_ret == VIRTCHNL_STATUS_SUCCESS)
2636                set_bit(ICE_VF_STATE_QS_ENA, vf->vf_states);
2637
2638error_param:
2639        /* send the response to the VF */
2640        return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ENABLE_QUEUES, v_ret,
2641                                     NULL, 0);
2642}
2643
2644/**
2645 * ice_vc_dis_qs_msg
2646 * @vf: pointer to the VF info
2647 * @msg: pointer to the msg buffer
2648 *
2649 * called from the VF to disable all or specific
2650 * queue(s)
2651 */
2652static int ice_vc_dis_qs_msg(struct ice_vf *vf, u8 *msg)
2653{
2654        enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
2655        struct virtchnl_queue_select *vqs =
2656            (struct virtchnl_queue_select *)msg;
2657        struct ice_pf *pf = vf->pf;
2658        struct ice_vsi *vsi;
2659        unsigned long q_map;
2660        u16 vf_q_id;
2661
2662        if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states) &&
2663            !test_bit(ICE_VF_STATE_QS_ENA, vf->vf_states)) {
2664                v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2665                goto error_param;
2666        }
2667
2668        if (!ice_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
2669                v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2670                goto error_param;
2671        }
2672
2673        if (!ice_vc_validate_vqs_bitmaps(vqs)) {
2674                v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2675                goto error_param;
2676        }
2677
2678        vsi = pf->vsi[vf->lan_vsi_idx];
2679        if (!vsi) {
2680                v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2681                goto error_param;
2682        }
2683
2684        if (vqs->tx_queues) {
2685                q_map = vqs->tx_queues;
2686
2687                for_each_set_bit(vf_q_id, &q_map, ICE_MAX_RSS_QS_PER_VF) {
2688                        struct ice_ring *ring = vsi->tx_rings[vf_q_id];
2689                        struct ice_txq_meta txq_meta = { 0 };
2690
2691                        if (!ice_vc_isvalid_q_id(vf, vqs->vsi_id, vf_q_id)) {
2692                                v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2693                                goto error_param;
2694                        }
2695
2696                        /* Skip queue if not enabled */
2697                        if (!test_bit(vf_q_id, vf->txq_ena))
2698                                continue;
2699
2700                        ice_fill_txq_meta(vsi, ring, &txq_meta);
2701
2702                        if (ice_vsi_stop_tx_ring(vsi, ICE_NO_RESET, vf->vf_id,
2703                                                 ring, &txq_meta)) {
2704                                dev_err(ice_pf_to_dev(vsi->back), "Failed to stop Tx ring %d on VSI %d\n",
2705                                        vf_q_id, vsi->vsi_num);
2706                                v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2707                                goto error_param;
2708                        }
2709
2710                        /* Clear enabled queues flag */
2711                        clear_bit(vf_q_id, vf->txq_ena);
2712                }
2713        }
2714
2715        q_map = vqs->rx_queues;
2716        /* speed up Rx queue disable by batching them if possible */
2717        if (q_map &&
2718            bitmap_equal(&q_map, vf->rxq_ena, ICE_MAX_RSS_QS_PER_VF)) {
2719                if (ice_vsi_stop_all_rx_rings(vsi)) {
2720                        dev_err(ice_pf_to_dev(vsi->back), "Failed to stop all Rx rings on VSI %d\n",
2721                                vsi->vsi_num);
2722                        v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2723                        goto error_param;
2724                }
2725
2726                bitmap_zero(vf->rxq_ena, ICE_MAX_RSS_QS_PER_VF);
2727        } else if (q_map) {
2728                for_each_set_bit(vf_q_id, &q_map, ICE_MAX_RSS_QS_PER_VF) {
2729                        if (!ice_vc_isvalid_q_id(vf, vqs->vsi_id, vf_q_id)) {
2730                                v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2731                                goto error_param;
2732                        }
2733
2734                        /* Skip queue if not enabled */
2735                        if (!test_bit(vf_q_id, vf->rxq_ena))
2736                                continue;
2737
2738                        if (ice_vsi_ctrl_one_rx_ring(vsi, false, vf_q_id,
2739                                                     true)) {
2740                                dev_err(ice_pf_to_dev(vsi->back), "Failed to stop Rx ring %d on VSI %d\n",
2741                                        vf_q_id, vsi->vsi_num);
2742                                v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2743                                goto error_param;
2744                        }
2745
2746                        /* Clear enabled queues flag */
2747                        clear_bit(vf_q_id, vf->rxq_ena);
2748                }
2749        }
2750
2751        /* Clear enabled queues flag */
2752        if (v_ret == VIRTCHNL_STATUS_SUCCESS && ice_vf_has_no_qs_ena(vf))
2753                clear_bit(ICE_VF_STATE_QS_ENA, vf->vf_states);
2754
2755error_param:
2756        /* send the response to the VF */
2757        return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DISABLE_QUEUES, v_ret,
2758                                     NULL, 0);
2759}
2760
2761/**
2762 * ice_cfg_interrupt
2763 * @vf: pointer to the VF info
2764 * @vsi: the VSI being configured
2765 * @vector_id: vector ID
2766 * @map: vector map for mapping vectors to queues
2767 * @q_vector: structure for interrupt vector
2768 * configure the IRQ to queue map
2769 */
2770static int
2771ice_cfg_interrupt(struct ice_vf *vf, struct ice_vsi *vsi, u16 vector_id,
2772                  struct virtchnl_vector_map *map,
2773                  struct ice_q_vector *q_vector)
2774{
2775        u16 vsi_q_id, vsi_q_id_idx;
2776        unsigned long qmap;
2777
2778        q_vector->num_ring_rx = 0;
2779        q_vector->num_ring_tx = 0;
2780
2781        qmap = map->rxq_map;
2782        for_each_set_bit(vsi_q_id_idx, &qmap, ICE_MAX_RSS_QS_PER_VF) {
2783                vsi_q_id = vsi_q_id_idx;
2784
2785                if (!ice_vc_isvalid_q_id(vf, vsi->vsi_num, vsi_q_id))
2786                        return VIRTCHNL_STATUS_ERR_PARAM;
2787
2788                q_vector->num_ring_rx++;
2789                q_vector->rx.itr_idx = map->rxitr_idx;
2790                vsi->rx_rings[vsi_q_id]->q_vector = q_vector;
2791                ice_cfg_rxq_interrupt(vsi, vsi_q_id, vector_id,
2792                                      q_vector->rx.itr_idx);
2793        }
2794
2795        qmap = map->txq_map;
2796        for_each_set_bit(vsi_q_id_idx, &qmap, ICE_MAX_RSS_QS_PER_VF) {
2797                vsi_q_id = vsi_q_id_idx;
2798
2799                if (!ice_vc_isvalid_q_id(vf, vsi->vsi_num, vsi_q_id))
2800                        return VIRTCHNL_STATUS_ERR_PARAM;
2801
2802                q_vector->num_ring_tx++;
2803                q_vector->tx.itr_idx = map->txitr_idx;
2804                vsi->tx_rings[vsi_q_id]->q_vector = q_vector;
2805                ice_cfg_txq_interrupt(vsi, vsi_q_id, vector_id,
2806                                      q_vector->tx.itr_idx);
2807        }
2808
2809        return VIRTCHNL_STATUS_SUCCESS;
2810}
2811
2812/**
2813 * ice_vc_cfg_irq_map_msg
2814 * @vf: pointer to the VF info
2815 * @msg: pointer to the msg buffer
2816 *
2817 * called from the VF to configure the IRQ to queue map
2818 */
2819static int ice_vc_cfg_irq_map_msg(struct ice_vf *vf, u8 *msg)
2820{
2821        enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
2822        u16 num_q_vectors_mapped, vsi_id, vector_id;
2823        struct virtchnl_irq_map_info *irqmap_info;
2824        struct virtchnl_vector_map *map;
2825        struct ice_pf *pf = vf->pf;
2826        struct ice_vsi *vsi;
2827        int i;
2828
2829        irqmap_info = (struct virtchnl_irq_map_info *)msg;
2830        num_q_vectors_mapped = irqmap_info->num_vectors;
2831
2832        /* Check to make sure number of VF vectors mapped is not greater than
2833         * number of VF vectors originally allocated, and check that
2834         * there is actually at least a single VF queue vector mapped
2835         */
2836        if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states) ||
2837            pf->num_msix_per_vf < num_q_vectors_mapped ||
2838            !num_q_vectors_mapped) {
2839                v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2840                goto error_param;
2841        }
2842
2843        vsi = pf->vsi[vf->lan_vsi_idx];
2844        if (!vsi) {
2845                v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2846                goto error_param;
2847        }
2848
2849        for (i = 0; i < num_q_vectors_mapped; i++) {
2850                struct ice_q_vector *q_vector;
2851
2852                map = &irqmap_info->vecmap[i];
2853
2854                vector_id = map->vector_id;
2855                vsi_id = map->vsi_id;
2856                /* vector_id is always 0-based for each VF, and can never be
2857                 * larger than or equal to the max allowed interrupts per VF
2858                 */
2859                if (!(vector_id < pf->num_msix_per_vf) ||
2860                    !ice_vc_isvalid_vsi_id(vf, vsi_id) ||
2861                    (!vector_id && (map->rxq_map || map->txq_map))) {
2862                        v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2863                        goto error_param;
2864                }
2865
2866                /* No need to map VF miscellaneous or rogue vector */
2867                if (!vector_id)
2868                        continue;
2869
2870                /* Subtract non queue vector from vector_id passed by VF
2871                 * to get actual number of VSI queue vector array index
2872                 */
2873                q_vector = vsi->q_vectors[vector_id - ICE_NONQ_VECS_VF];
2874                if (!q_vector) {
2875                        v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2876                        goto error_param;
2877                }
2878
2879                /* lookout for the invalid queue index */
2880                v_ret = (enum virtchnl_status_code)
2881                        ice_cfg_interrupt(vf, vsi, vector_id, map, q_vector);
2882                if (v_ret)
2883                        goto error_param;
2884        }
2885
2886error_param:
2887        /* send the response to the VF */
2888        return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_IRQ_MAP, v_ret,
2889                                     NULL, 0);
2890}
2891
2892/**
2893 * ice_vc_cfg_qs_msg
2894 * @vf: pointer to the VF info
2895 * @msg: pointer to the msg buffer
2896 *
2897 * called from the VF to configure the Rx/Tx queues
2898 */
2899static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg)
2900{
2901        enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
2902        struct virtchnl_vsi_queue_config_info *qci =
2903            (struct virtchnl_vsi_queue_config_info *)msg;
2904        struct virtchnl_queue_pair_info *qpi;
2905        u16 num_rxq = 0, num_txq = 0;
2906        struct ice_pf *pf = vf->pf;
2907        struct ice_vsi *vsi;
2908        int i;
2909
2910        if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
2911                v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2912                goto error_param;
2913        }
2914
2915        if (!ice_vc_isvalid_vsi_id(vf, qci->vsi_id)) {
2916                v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2917                goto error_param;
2918        }
2919
2920        vsi = pf->vsi[vf->lan_vsi_idx];
2921        if (!vsi) {
2922                v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2923                goto error_param;
2924        }
2925
2926        if (qci->num_queue_pairs > ICE_MAX_RSS_QS_PER_VF ||
2927            qci->num_queue_pairs > min_t(u16, vsi->alloc_txq, vsi->alloc_rxq)) {
2928                dev_err(ice_pf_to_dev(pf), "VF-%d requesting more than supported number of queues: %d\n",
2929                        vf->vf_id, min_t(u16, vsi->alloc_txq, vsi->alloc_rxq));
2930                v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2931                goto error_param;
2932        }
2933
2934        for (i = 0; i < qci->num_queue_pairs; i++) {
2935                qpi = &qci->qpair[i];
2936                if (qpi->txq.vsi_id != qci->vsi_id ||
2937                    qpi->rxq.vsi_id != qci->vsi_id ||
2938                    qpi->rxq.queue_id != qpi->txq.queue_id ||
2939                    qpi->txq.headwb_enabled ||
2940                    !ice_vc_isvalid_ring_len(qpi->txq.ring_len) ||
2941                    !ice_vc_isvalid_ring_len(qpi->rxq.ring_len) ||
2942                    !ice_vc_isvalid_q_id(vf, qci->vsi_id, qpi->txq.queue_id)) {
2943                        v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2944                        goto error_param;
2945                }
2946                /* copy Tx queue info from VF into VSI */
2947                if (qpi->txq.ring_len > 0) {
2948                        num_txq++;
2949                        vsi->tx_rings[i]->dma = qpi->txq.dma_ring_addr;
2950                        vsi->tx_rings[i]->count = qpi->txq.ring_len;
2951                }
2952
2953                /* copy Rx queue info from VF into VSI */
2954                if (qpi->rxq.ring_len > 0) {
2955                        num_rxq++;
2956                        vsi->rx_rings[i]->dma = qpi->rxq.dma_ring_addr;
2957                        vsi->rx_rings[i]->count = qpi->rxq.ring_len;
2958
2959                        if (qpi->rxq.databuffer_size != 0 &&
2960                            (qpi->rxq.databuffer_size > ((16 * 1024) - 128) ||
2961                             qpi->rxq.databuffer_size < 1024)) {
2962                                v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2963                                goto error_param;
2964                        }
2965                        vsi->rx_buf_len = qpi->rxq.databuffer_size;
2966                        vsi->rx_rings[i]->rx_buf_len = vsi->rx_buf_len;
2967                        if (qpi->rxq.max_pkt_size >= (16 * 1024) ||
2968                            qpi->rxq.max_pkt_size < 64) {
2969                                v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2970                                goto error_param;
2971                        }
2972                }
2973
2974                vsi->max_frame = qpi->rxq.max_pkt_size;
2975        }
2976
2977        /* VF can request to configure less than allocated queues or default
2978         * allocated queues. So update the VSI with new number
2979         */
2980        vsi->num_txq = num_txq;
2981        vsi->num_rxq = num_rxq;
2982        /* All queues of VF VSI are in TC 0 */
2983        vsi->tc_cfg.tc_info[0].qcount_tx = num_txq;
2984        vsi->tc_cfg.tc_info[0].qcount_rx = num_rxq;
2985
2986        if (ice_vsi_cfg_lan_txqs(vsi) || ice_vsi_cfg_rxqs(vsi))
2987                v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
2988
2989error_param:
2990        /* send the response to the VF */
2991        return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES, v_ret,
2992                                     NULL, 0);
2993}
2994
2995/**
2996 * ice_is_vf_trusted
2997 * @vf: pointer to the VF info
2998 */
2999static bool ice_is_vf_trusted(struct ice_vf *vf)
3000{
3001        return test_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
3002}
3003
3004/**
3005 * ice_can_vf_change_mac
3006 * @vf: pointer to the VF info
3007 *
3008 * Return true if the VF is allowed to change its MAC filters, false otherwise
3009 */
3010static bool ice_can_vf_change_mac(struct ice_vf *vf)
3011{
3012        /* If the VF MAC address has been set administratively (via the
3013         * ndo_set_vf_mac command), then deny permission to the VF to
3014         * add/delete unicast MAC addresses, unless the VF is trusted
3015         */
3016        if (vf->pf_set_mac && !ice_is_vf_trusted(vf))
3017                return false;
3018
3019        return true;
3020}
3021
3022/**
3023 * ice_vc_add_mac_addr - attempt to add the MAC address passed in
3024 * @vf: pointer to the VF info
3025 * @vsi: pointer to the VF's VSI
3026 * @mac_addr: MAC address to add
3027 */
3028static int
3029ice_vc_add_mac_addr(struct ice_vf *vf, struct ice_vsi *vsi, u8 *mac_addr)
3030{
3031        struct device *dev = ice_pf_to_dev(vf->pf);
3032        enum ice_status status;
3033
3034        /* default unicast MAC already added */
3035        if (ether_addr_equal(mac_addr, vf->dflt_lan_addr.addr))
3036                return 0;
3037
3038        if (is_unicast_ether_addr(mac_addr) && !ice_can_vf_change_mac(vf)) {
3039                dev_err(dev, "VF attempting to override administratively set MAC address, bring down and up the VF interface to resume normal operation\n");
3040                return -EPERM;
3041        }
3042
3043        status = ice_fltr_add_mac(vsi, mac_addr, ICE_FWD_TO_VSI);
3044        if (status == ICE_ERR_ALREADY_EXISTS) {
3045                dev_err(dev, "MAC %pM already exists for VF %d\n", mac_addr,
3046                        vf->vf_id);
3047                return -EEXIST;
3048        } else if (status) {
3049                dev_err(dev, "Failed to add MAC %pM for VF %d\n, error %s\n",
3050                        mac_addr, vf->vf_id, ice_stat_str(status));
3051                return -EIO;
3052        }
3053
3054        /* Set the default LAN address to the latest unicast MAC address added
3055         * by the VF. The default LAN address is reported by the PF via
3056         * ndo_get_vf_config.
3057         */
3058        if (is_unicast_ether_addr(mac_addr))
3059                ether_addr_copy(vf->dflt_lan_addr.addr, mac_addr);
3060
3061        vf->num_mac++;
3062
3063        return 0;
3064}
3065
3066/**
3067 * ice_vc_del_mac_addr - attempt to delete the MAC address passed in
3068 * @vf: pointer to the VF info
3069 * @vsi: pointer to the VF's VSI
3070 * @mac_addr: MAC address to delete
3071 */
3072static int
3073ice_vc_del_mac_addr(struct ice_vf *vf, struct ice_vsi *vsi, u8 *mac_addr)
3074{
3075        struct device *dev = ice_pf_to_dev(vf->pf);
3076        enum ice_status status;
3077
3078        if (!ice_can_vf_change_mac(vf) &&
3079            ether_addr_equal(mac_addr, vf->dflt_lan_addr.addr))
3080                return 0;
3081
3082        status = ice_fltr_remove_mac(vsi, mac_addr, ICE_FWD_TO_VSI);
3083        if (status == ICE_ERR_DOES_NOT_EXIST) {
3084                dev_err(dev, "MAC %pM does not exist for VF %d\n", mac_addr,
3085                        vf->vf_id);
3086                return -ENOENT;
3087        } else if (status) {
3088                dev_err(dev, "Failed to delete MAC %pM for VF %d, error %s\n",
3089                        mac_addr, vf->vf_id, ice_stat_str(status));
3090                return -EIO;
3091        }
3092
3093        if (ether_addr_equal(mac_addr, vf->dflt_lan_addr.addr))
3094                eth_zero_addr(vf->dflt_lan_addr.addr);
3095
3096        vf->num_mac--;
3097
3098        return 0;
3099}
3100
3101/**
3102 * ice_vc_handle_mac_addr_msg
3103 * @vf: pointer to the VF info
3104 * @msg: pointer to the msg buffer
3105 * @set: true if MAC filters are being set, false otherwise
3106 *
3107 * add guest MAC address filter
3108 */
3109static int
3110ice_vc_handle_mac_addr_msg(struct ice_vf *vf, u8 *msg, bool set)
3111{
3112        int (*ice_vc_cfg_mac)
3113                (struct ice_vf *vf, struct ice_vsi *vsi, u8 *mac_addr);
3114        enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
3115        struct virtchnl_ether_addr_list *al =
3116            (struct virtchnl_ether_addr_list *)msg;
3117        struct ice_pf *pf = vf->pf;
3118        enum virtchnl_ops vc_op;
3119        struct ice_vsi *vsi;
3120        int i;
3121
3122        if (set) {
3123                vc_op = VIRTCHNL_OP_ADD_ETH_ADDR;
3124                ice_vc_cfg_mac = ice_vc_add_mac_addr;
3125        } else {
3126                vc_op = VIRTCHNL_OP_DEL_ETH_ADDR;
3127                ice_vc_cfg_mac = ice_vc_del_mac_addr;
3128        }
3129
3130        if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states) ||
3131            !ice_vc_isvalid_vsi_id(vf, al->vsi_id)) {
3132                v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3133                goto handle_mac_exit;
3134        }
3135
3136        /* If this VF is not privileged, then we can't add more than a
3137         * limited number of addresses. Check to make sure that the
3138         * additions do not push us over the limit.
3139         */
3140        if (set && !ice_is_vf_trusted(vf) &&
3141            (vf->num_mac + al->num_elements) > ICE_MAX_MACADDR_PER_VF) {
3142                dev_err(ice_pf_to_dev(pf), "Can't add more MAC addresses, because VF-%d is not trusted, switch the VF to trusted mode in order to add more functionalities\n",
3143                        vf->vf_id);
3144                v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3145                goto handle_mac_exit;
3146        }
3147
3148        vsi = pf->vsi[vf->lan_vsi_idx];
3149        if (!vsi) {
3150                v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3151                goto handle_mac_exit;
3152        }
3153
3154        for (i = 0; i < al->num_elements; i++) {
3155                u8 *mac_addr = al->list[i].addr;
3156                int result;
3157
3158                if (is_broadcast_ether_addr(mac_addr) ||
3159                    is_zero_ether_addr(mac_addr))
3160                        continue;
3161
3162                result = ice_vc_cfg_mac(vf, vsi, mac_addr);
3163                if (result == -EEXIST || result == -ENOENT) {
3164                        continue;
3165                } else if (result) {
3166                        v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
3167                        goto handle_mac_exit;
3168                }
3169        }
3170
3171handle_mac_exit:
3172        /* send the response to the VF */
3173        return ice_vc_send_msg_to_vf(vf, vc_op, v_ret, NULL, 0);
3174}
3175
3176/**
3177 * ice_vc_add_mac_addr_msg
3178 * @vf: pointer to the VF info
3179 * @msg: pointer to the msg buffer
3180 *
3181 * add guest MAC address filter
3182 */
3183static int ice_vc_add_mac_addr_msg(struct ice_vf *vf, u8 *msg)
3184{
3185        return ice_vc_handle_mac_addr_msg(vf, msg, true);
3186}
3187
3188/**
3189 * ice_vc_del_mac_addr_msg
3190 * @vf: pointer to the VF info
3191 * @msg: pointer to the msg buffer
3192 *
3193 * remove guest MAC address filter
3194 */
3195static int ice_vc_del_mac_addr_msg(struct ice_vf *vf, u8 *msg)
3196{
3197        return ice_vc_handle_mac_addr_msg(vf, msg, false);
3198}
3199
3200/**
3201 * ice_vc_request_qs_msg
3202 * @vf: pointer to the VF info
3203 * @msg: pointer to the msg buffer
3204 *
3205 * VFs get a default number of queues but can use this message to request a
3206 * different number. If the request is successful, PF will reset the VF and
3207 * return 0. If unsuccessful, PF will send message informing VF of number of
3208 * available queue pairs via virtchnl message response to VF.
3209 */
3210static int ice_vc_request_qs_msg(struct ice_vf *vf, u8 *msg)
3211{
3212        enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
3213        struct virtchnl_vf_res_request *vfres =
3214                (struct virtchnl_vf_res_request *)msg;
3215        u16 req_queues = vfres->num_queue_pairs;
3216        struct ice_pf *pf = vf->pf;
3217        u16 max_allowed_vf_queues;
3218        u16 tx_rx_queue_left;
3219        struct device *dev;
3220        u16 cur_queues;
3221
3222        dev = ice_pf_to_dev(pf);
3223        if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
3224                v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3225                goto error_param;
3226        }
3227
3228        cur_queues = vf->num_vf_qs;
3229        tx_rx_queue_left = min_t(u16, ice_get_avail_txq_count(pf),
3230                                 ice_get_avail_rxq_count(pf));
3231        max_allowed_vf_queues = tx_rx_queue_left + cur_queues;
3232        if (!req_queues) {
3233                dev_err(dev, "VF %d tried to request 0 queues. Ignoring.\n",
3234                        vf->vf_id);
3235        } else if (req_queues > ICE_MAX_RSS_QS_PER_VF) {
3236                dev_err(dev, "VF %d tried to request more than %d queues.\n",
3237                        vf->vf_id, ICE_MAX_RSS_QS_PER_VF);
3238                vfres->num_queue_pairs = ICE_MAX_RSS_QS_PER_VF;
3239        } else if (req_queues > cur_queues &&
3240                   req_queues - cur_queues > tx_rx_queue_left) {
3241                dev_warn(dev, "VF %d requested %u more queues, but only %u left.\n",
3242                         vf->vf_id, req_queues - cur_queues, tx_rx_queue_left);
3243                vfres->num_queue_pairs = min_t(u16, max_allowed_vf_queues,
3244                                               ICE_MAX_RSS_QS_PER_VF);
3245        } else {
3246                /* request is successful, then reset VF */
3247                vf->num_req_qs = req_queues;
3248                ice_vc_reset_vf(vf);
3249                dev_info(dev, "VF %d granted request of %u queues.\n",
3250                         vf->vf_id, req_queues);
3251                return 0;
3252        }
3253
3254error_param:
3255        /* send the response to the VF */
3256        return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_REQUEST_QUEUES,
3257                                     v_ret, (u8 *)vfres, sizeof(*vfres));
3258}
3259
3260/**
3261 * ice_set_vf_port_vlan
3262 * @netdev: network interface device structure
3263 * @vf_id: VF identifier
3264 * @vlan_id: VLAN ID being set
3265 * @qos: priority setting
3266 * @vlan_proto: VLAN protocol
3267 *
3268 * program VF Port VLAN ID and/or QoS
3269 */
3270int
3271ice_set_vf_port_vlan(struct net_device *netdev, int vf_id, u16 vlan_id, u8 qos,
3272                     __be16 vlan_proto)
3273{
3274        struct ice_pf *pf = ice_netdev_to_pf(netdev);
3275        struct device *dev;
3276        struct ice_vf *vf;
3277        u16 vlanprio;
3278        int ret;
3279
3280        dev = ice_pf_to_dev(pf);
3281        if (ice_validate_vf_id(pf, vf_id))
3282                return -EINVAL;
3283
3284        if (vlan_id >= VLAN_N_VID || qos > 7) {
3285                dev_err(dev, "Invalid Port VLAN parameters for VF %d, ID %d, QoS %d\n",
3286                        vf_id, vlan_id, qos);
3287                return -EINVAL;
3288        }
3289
3290        if (vlan_proto != htons(ETH_P_8021Q)) {
3291                dev_err(dev, "VF VLAN protocol is not supported\n");
3292                return -EPROTONOSUPPORT;
3293        }
3294
3295        vf = &pf->vf[vf_id];
3296        ret = ice_check_vf_ready_for_cfg(vf);
3297        if (ret)
3298                return ret;
3299
3300        vlanprio = vlan_id | (qos << VLAN_PRIO_SHIFT);
3301
3302        if (vf->port_vlan_info == vlanprio) {
3303                /* duplicate request, so just return success */
3304                dev_dbg(dev, "Duplicate pvid %d request\n", vlanprio);
3305                return 0;
3306        }
3307
3308        vf->port_vlan_info = vlanprio;
3309
3310        if (vf->port_vlan_info)
3311                dev_info(dev, "Setting VLAN %d, QoS 0x%x on VF %d\n",
3312                         vlan_id, qos, vf_id);
3313        else
3314                dev_info(dev, "Clearing port VLAN on VF %d\n", vf_id);
3315
3316        ice_vc_reset_vf(vf);
3317
3318        return 0;
3319}
3320
3321/**
3322 * ice_vf_vlan_offload_ena - determine if capabilities support VLAN offloads
3323 * @caps: VF driver negotiated capabilities
3324 *
3325 * Return true if VIRTCHNL_VF_OFFLOAD_VLAN capability is set, else return false
3326 */
3327static bool ice_vf_vlan_offload_ena(u32 caps)
3328{
3329        return !!(caps & VIRTCHNL_VF_OFFLOAD_VLAN);
3330}
3331
3332/**
3333 * ice_vc_process_vlan_msg
3334 * @vf: pointer to the VF info
3335 * @msg: pointer to the msg buffer
3336 * @add_v: Add VLAN if true, otherwise delete VLAN
3337 *
3338 * Process virtchnl op to add or remove programmed guest VLAN ID
3339 */
3340static int ice_vc_process_vlan_msg(struct ice_vf *vf, u8 *msg, bool add_v)
3341{
3342        enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
3343        struct virtchnl_vlan_filter_list *vfl =
3344            (struct virtchnl_vlan_filter_list *)msg;
3345        struct ice_pf *pf = vf->pf;
3346        bool vlan_promisc = false;
3347        struct ice_vsi *vsi;
3348        struct device *dev;
3349        struct ice_hw *hw;
3350        int status = 0;
3351        u8 promisc_m;
3352        int i;
3353
3354        dev = ice_pf_to_dev(pf);
3355        if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
3356                v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3357                goto error_param;
3358        }
3359
3360        if (!ice_vf_vlan_offload_ena(vf->driver_caps)) {
3361                v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3362                goto error_param;
3363        }
3364
3365        if (!ice_vc_isvalid_vsi_id(vf, vfl->vsi_id)) {
3366                v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3367                goto error_param;
3368        }
3369
3370        for (i = 0; i < vfl->num_elements; i++) {
3371                if (vfl->vlan_id[i] >= VLAN_N_VID) {
3372                        v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3373                        dev_err(dev, "invalid VF VLAN id %d\n",
3374                                vfl->vlan_id[i]);
3375                        goto error_param;
3376                }
3377        }
3378
3379        hw = &pf->hw;
3380        vsi = pf->vsi[vf->lan_vsi_idx];
3381        if (!vsi) {
3382                v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3383                goto error_param;
3384        }
3385
3386        if (add_v && !ice_is_vf_trusted(vf) &&
3387            vsi->num_vlan >= ICE_MAX_VLAN_PER_VF) {
3388                dev_info(dev, "VF-%d is not trusted, switch the VF to trusted mode, in order to add more VLAN addresses\n",
3389                         vf->vf_id);
3390                /* There is no need to let VF know about being not trusted,
3391                 * so we can just return success message here
3392                 */
3393                goto error_param;
3394        }
3395
3396        if (vsi->info.pvid) {
3397                v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3398                goto error_param;
3399        }
3400
3401        if ((test_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states) ||
3402             test_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states)) &&
3403            test_bit(ICE_FLAG_VF_TRUE_PROMISC_ENA, pf->flags))
3404                vlan_promisc = true;
3405
3406        if (add_v) {
3407                for (i = 0; i < vfl->num_elements; i++) {
3408                        u16 vid = vfl->vlan_id[i];
3409
3410                        if (!ice_is_vf_trusted(vf) &&
3411                            vsi->num_vlan >= ICE_MAX_VLAN_PER_VF) {
3412                                dev_info(dev, "VF-%d is not trusted, switch the VF to trusted mode, in order to add more VLAN addresses\n",
3413                                         vf->vf_id);
3414                                /* There is no need to let VF know about being
3415                                 * not trusted, so we can just return success
3416                                 * message here as well.
3417                                 */
3418                                goto error_param;
3419                        }
3420
3421                        /* we add VLAN 0 by default for each VF so we can enable
3422                         * Tx VLAN anti-spoof without triggering MDD events so
3423                         * we don't need to add it again here
3424                         */
3425                        if (!vid)
3426                                continue;
3427
3428                        status = ice_vsi_add_vlan(vsi, vid, ICE_FWD_TO_VSI);
3429                        if (status) {
3430                                v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3431                                goto error_param;
3432                        }
3433
3434                        /* Enable VLAN pruning when non-zero VLAN is added */
3435                        if (!vlan_promisc && vid &&
3436                            !ice_vsi_is_vlan_pruning_ena(vsi)) {
3437                                status = ice_cfg_vlan_pruning(vsi, true, false);
3438                                if (status) {
3439                                        v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3440                                        dev_err(dev, "Enable VLAN pruning on VLAN ID: %d failed error-%d\n",
3441                                                vid, status);
3442                                        goto error_param;
3443                                }
3444                        } else if (vlan_promisc) {
3445                                /* Enable Ucast/Mcast VLAN promiscuous mode */
3446                                promisc_m = ICE_PROMISC_VLAN_TX |
3447                                            ICE_PROMISC_VLAN_RX;
3448
3449                                status = ice_set_vsi_promisc(hw, vsi->idx,
3450                                                             promisc_m, vid);
3451                                if (status) {
3452                                        v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3453                                        dev_err(dev, "Enable Unicast/multicast promiscuous mode on VLAN ID:%d failed error-%d\n",
3454                                                vid, status);
3455                                }
3456                        }
3457                }
3458        } else {
3459                /* In case of non_trusted VF, number of VLAN elements passed
3460                 * to PF for removal might be greater than number of VLANs
3461                 * filter programmed for that VF - So, use actual number of
3462                 * VLANS added earlier with add VLAN opcode. In order to avoid
3463                 * removing VLAN that doesn't exist, which result to sending
3464                 * erroneous failed message back to the VF
3465                 */
3466                int num_vf_vlan;
3467
3468                num_vf_vlan = vsi->num_vlan;
3469                for (i = 0; i < vfl->num_elements && i < num_vf_vlan; i++) {
3470                        u16 vid = vfl->vlan_id[i];
3471
3472                        /* we add VLAN 0 by default for each VF so we can enable
3473                         * Tx VLAN anti-spoof without triggering MDD events so
3474                         * we don't want a VIRTCHNL request to remove it
3475                         */
3476                        if (!vid)
3477                                continue;
3478
3479                        /* Make sure ice_vsi_kill_vlan is successful before
3480                         * updating VLAN information
3481                         */
3482                        status = ice_vsi_kill_vlan(vsi, vid);
3483                        if (status) {
3484                                v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3485                                goto error_param;
3486                        }
3487
3488                        /* Disable VLAN pruning when only VLAN 0 is left */
3489                        if (vsi->num_vlan == 1 &&
3490                            ice_vsi_is_vlan_pruning_ena(vsi))
3491                                ice_cfg_vlan_pruning(vsi, false, false);
3492
3493                        /* Disable Unicast/Multicast VLAN promiscuous mode */
3494                        if (vlan_promisc) {
3495                                promisc_m = ICE_PROMISC_VLAN_TX |
3496                                            ICE_PROMISC_VLAN_RX;
3497
3498                                ice_clear_vsi_promisc(hw, vsi->idx,
3499                                                      promisc_m, vid);
3500                        }
3501                }
3502        }
3503
3504error_param:
3505        /* send the response to the VF */
3506        if (add_v)
3507                return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ADD_VLAN, v_ret,
3508                                             NULL, 0);
3509        else
3510                return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DEL_VLAN, v_ret,
3511                                             NULL, 0);
3512}
3513
3514/**
3515 * ice_vc_add_vlan_msg
3516 * @vf: pointer to the VF info
3517 * @msg: pointer to the msg buffer
3518 *
3519 * Add and program guest VLAN ID
3520 */
3521static int ice_vc_add_vlan_msg(struct ice_vf *vf, u8 *msg)
3522{
3523        return ice_vc_process_vlan_msg(vf, msg, true);
3524}
3525
3526/**
3527 * ice_vc_remove_vlan_msg
3528 * @vf: pointer to the VF info
3529 * @msg: pointer to the msg buffer
3530 *
3531 * remove programmed guest VLAN ID
3532 */
3533static int ice_vc_remove_vlan_msg(struct ice_vf *vf, u8 *msg)
3534{
3535        return ice_vc_process_vlan_msg(vf, msg, false);
3536}
3537
3538/**
3539 * ice_vc_ena_vlan_stripping
3540 * @vf: pointer to the VF info
3541 *
3542 * Enable VLAN header stripping for a given VF
3543 */
3544static int ice_vc_ena_vlan_stripping(struct ice_vf *vf)
3545{
3546        enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
3547        struct ice_pf *pf = vf->pf;
3548        struct ice_vsi *vsi;
3549
3550        if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
3551                v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3552                goto error_param;
3553        }
3554
3555        if (!ice_vf_vlan_offload_ena(vf->driver_caps)) {
3556                v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3557                goto error_param;
3558        }
3559
3560        vsi = pf->vsi[vf->lan_vsi_idx];
3561        if (ice_vsi_manage_vlan_stripping(vsi, true))
3562                v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3563
3564error_param:
3565        return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ENABLE_VLAN_STRIPPING,
3566                                     v_ret, NULL, 0);
3567}
3568
3569/**
3570 * ice_vc_dis_vlan_stripping
3571 * @vf: pointer to the VF info
3572 *
3573 * Disable VLAN header stripping for a given VF
3574 */
3575static int ice_vc_dis_vlan_stripping(struct ice_vf *vf)
3576{
3577        enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
3578        struct ice_pf *pf = vf->pf;
3579        struct ice_vsi *vsi;
3580
3581        if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
3582                v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3583                goto error_param;
3584        }
3585
3586        if (!ice_vf_vlan_offload_ena(vf->driver_caps)) {
3587                v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3588                goto error_param;
3589        }
3590
3591        vsi = pf->vsi[vf->lan_vsi_idx];
3592        if (!vsi) {
3593                v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3594                goto error_param;
3595        }
3596
3597        if (ice_vsi_manage_vlan_stripping(vsi, false))
3598                v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3599
3600error_param:
3601        return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DISABLE_VLAN_STRIPPING,
3602                                     v_ret, NULL, 0);
3603}
3604
3605/**
3606 * ice_vf_init_vlan_stripping - enable/disable VLAN stripping on initialization
3607 * @vf: VF to enable/disable VLAN stripping for on initialization
3608 *
3609 * If the VIRTCHNL_VF_OFFLOAD_VLAN flag is set enable VLAN stripping, else if
3610 * the flag is cleared then we want to disable stripping. For example, the flag
3611 * will be cleared when port VLANs are configured by the administrator before
3612 * passing the VF to the guest or if the AVF driver doesn't support VLAN
3613 * offloads.
3614 */
3615static int ice_vf_init_vlan_stripping(struct ice_vf *vf)
3616{
3617        struct ice_vsi *vsi = vf->pf->vsi[vf->lan_vsi_idx];
3618
3619        if (!vsi)
3620                return -EINVAL;
3621
3622        /* don't modify stripping if port VLAN is configured */
3623        if (vsi->info.pvid)
3624                return 0;
3625
3626        if (ice_vf_vlan_offload_ena(vf->driver_caps))
3627                return ice_vsi_manage_vlan_stripping(vsi, true);
3628        else
3629                return ice_vsi_manage_vlan_stripping(vsi, false);
3630}
3631
3632/**
3633 * ice_vc_process_vf_msg - Process request from VF
3634 * @pf: pointer to the PF structure
3635 * @event: pointer to the AQ event
3636 *
3637 * called from the common asq/arq handler to
3638 * process request from VF
3639 */
3640void ice_vc_process_vf_msg(struct ice_pf *pf, struct ice_rq_event_info *event)
3641{
3642        u32 v_opcode = le32_to_cpu(event->desc.cookie_high);
3643        s16 vf_id = le16_to_cpu(event->desc.retval);
3644        u16 msglen = event->msg_len;
3645        u8 *msg = event->msg_buf;
3646        struct ice_vf *vf = NULL;
3647        struct device *dev;
3648        int err = 0;
3649
3650        dev = ice_pf_to_dev(pf);
3651        if (ice_validate_vf_id(pf, vf_id)) {
3652                err = -EINVAL;
3653                goto error_handler;
3654        }
3655
3656        vf = &pf->vf[vf_id];
3657
3658        /* Check if VF is disabled. */
3659        if (test_bit(ICE_VF_STATE_DIS, vf->vf_states)) {
3660                err = -EPERM;
3661                goto error_handler;
3662        }
3663
3664        /* Perform basic checks on the msg */
3665        err = virtchnl_vc_validate_vf_msg(&vf->vf_ver, v_opcode, msg, msglen);
3666        if (err) {
3667                if (err == VIRTCHNL_STATUS_ERR_PARAM)
3668                        err = -EPERM;
3669                else
3670                        err = -EINVAL;
3671        }
3672
3673error_handler:
3674        if (err) {
3675                ice_vc_send_msg_to_vf(vf, v_opcode, VIRTCHNL_STATUS_ERR_PARAM,
3676                                      NULL, 0);
3677                dev_err(dev, "Invalid message from VF %d, opcode %d, len %d, error %d\n",
3678                        vf_id, v_opcode, msglen, err);
3679                return;
3680        }
3681
3682        switch (v_opcode) {
3683        case VIRTCHNL_OP_VERSION:
3684                err = ice_vc_get_ver_msg(vf, msg);
3685                break;
3686        case VIRTCHNL_OP_GET_VF_RESOURCES:
3687                err = ice_vc_get_vf_res_msg(vf, msg);
3688                if (ice_vf_init_vlan_stripping(vf))
3689                        dev_err(dev, "Failed to initialize VLAN stripping for VF %d\n",
3690                                vf->vf_id);
3691                ice_vc_notify_vf_link_state(vf);
3692                break;
3693        case VIRTCHNL_OP_RESET_VF:
3694                ice_vc_reset_vf_msg(vf);
3695                break;
3696        case VIRTCHNL_OP_ADD_ETH_ADDR:
3697                err = ice_vc_add_mac_addr_msg(vf, msg);
3698                break;
3699        case VIRTCHNL_OP_DEL_ETH_ADDR:
3700                err = ice_vc_del_mac_addr_msg(vf, msg);
3701                break;
3702        case VIRTCHNL_OP_CONFIG_VSI_QUEUES:
3703                err = ice_vc_cfg_qs_msg(vf, msg);
3704                break;
3705        case VIRTCHNL_OP_ENABLE_QUEUES:
3706                err = ice_vc_ena_qs_msg(vf, msg);
3707                ice_vc_notify_vf_link_state(vf);
3708                break;
3709        case VIRTCHNL_OP_DISABLE_QUEUES:
3710                err = ice_vc_dis_qs_msg(vf, msg);
3711                break;
3712        case VIRTCHNL_OP_REQUEST_QUEUES:
3713                err = ice_vc_request_qs_msg(vf, msg);
3714                break;
3715        case VIRTCHNL_OP_CONFIG_IRQ_MAP:
3716                err = ice_vc_cfg_irq_map_msg(vf, msg);
3717                break;
3718        case VIRTCHNL_OP_CONFIG_RSS_KEY:
3719                err = ice_vc_config_rss_key(vf, msg);
3720                break;
3721        case VIRTCHNL_OP_CONFIG_RSS_LUT:
3722                err = ice_vc_config_rss_lut(vf, msg);
3723                break;
3724        case VIRTCHNL_OP_GET_STATS:
3725                err = ice_vc_get_stats_msg(vf, msg);
3726                break;
3727        case VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
3728                err = ice_vc_cfg_promiscuous_mode_msg(vf, msg);
3729                break;
3730        case VIRTCHNL_OP_ADD_VLAN:
3731                err = ice_vc_add_vlan_msg(vf, msg);
3732                break;
3733        case VIRTCHNL_OP_DEL_VLAN:
3734                err = ice_vc_remove_vlan_msg(vf, msg);
3735                break;
3736        case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING:
3737                err = ice_vc_ena_vlan_stripping(vf);
3738                break;
3739        case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING:
3740                err = ice_vc_dis_vlan_stripping(vf);
3741                break;
3742        case VIRTCHNL_OP_UNKNOWN:
3743        default:
3744                dev_err(dev, "Unsupported opcode %d from VF %d\n", v_opcode,
3745                        vf_id);
3746                err = ice_vc_send_msg_to_vf(vf, v_opcode,
3747                                            VIRTCHNL_STATUS_ERR_NOT_SUPPORTED,
3748                                            NULL, 0);
3749                break;
3750        }
3751        if (err) {
3752                /* Helper function cares less about error return values here
3753                 * as it is busy with pending work.
3754                 */
3755                dev_info(dev, "PF failed to honor VF %d, opcode %d, error %d\n",
3756                         vf_id, v_opcode, err);
3757        }
3758}
3759
3760/**
3761 * ice_get_vf_cfg
3762 * @netdev: network interface device structure
3763 * @vf_id: VF identifier
3764 * @ivi: VF configuration structure
3765 *
3766 * return VF configuration
3767 */
3768int
3769ice_get_vf_cfg(struct net_device *netdev, int vf_id, struct ifla_vf_info *ivi)
3770{
3771        struct ice_pf *pf = ice_netdev_to_pf(netdev);
3772        struct ice_vf *vf;
3773
3774        if (ice_validate_vf_id(pf, vf_id))
3775                return -EINVAL;
3776
3777        vf = &pf->vf[vf_id];
3778
3779        if (ice_check_vf_init(pf, vf))
3780                return -EBUSY;
3781
3782        ivi->vf = vf_id;
3783        ether_addr_copy(ivi->mac, vf->dflt_lan_addr.addr);
3784
3785        /* VF configuration for VLAN and applicable QoS */
3786        ivi->vlan = vf->port_vlan_info & VLAN_VID_MASK;
3787        ivi->qos = (vf->port_vlan_info & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
3788
3789        ivi->trusted = vf->trusted;
3790        ivi->spoofchk = vf->spoofchk;
3791        if (!vf->link_forced)
3792                ivi->linkstate = IFLA_VF_LINK_STATE_AUTO;
3793        else if (vf->link_up)
3794                ivi->linkstate = IFLA_VF_LINK_STATE_ENABLE;
3795        else
3796                ivi->linkstate = IFLA_VF_LINK_STATE_DISABLE;
3797        ivi->max_tx_rate = vf->tx_rate;
3798        ivi->min_tx_rate = 0;
3799        return 0;
3800}
3801
3802/**
3803 * ice_unicast_mac_exists - check if the unicast MAC exists on the PF's switch
3804 * @pf: PF used to reference the switch's rules
3805 * @umac: unicast MAC to compare against existing switch rules
3806 *
3807 * Return true on the first/any match, else return false
3808 */
3809static bool ice_unicast_mac_exists(struct ice_pf *pf, u8 *umac)
3810{
3811        struct ice_sw_recipe *mac_recipe_list =
3812                &pf->hw.switch_info->recp_list[ICE_SW_LKUP_MAC];
3813        struct ice_fltr_mgmt_list_entry *list_itr;
3814        struct list_head *rule_head;
3815        struct mutex *rule_lock; /* protect MAC filter list access */
3816
3817        rule_head = &mac_recipe_list->filt_rules;
3818        rule_lock = &mac_recipe_list->filt_rule_lock;
3819
3820        mutex_lock(rule_lock);
3821        list_for_each_entry(list_itr, rule_head, list_entry) {
3822                u8 *existing_mac = &list_itr->fltr_info.l_data.mac.mac_addr[0];
3823
3824                if (ether_addr_equal(existing_mac, umac)) {
3825                        mutex_unlock(rule_lock);
3826                        return true;
3827                }
3828        }
3829
3830        mutex_unlock(rule_lock);
3831
3832        return false;
3833}
3834
3835/**
3836 * ice_set_vf_mac
3837 * @netdev: network interface device structure
3838 * @vf_id: VF identifier
3839 * @mac: MAC address
3840 *
3841 * program VF MAC address
3842 */
3843int ice_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
3844{
3845        struct ice_pf *pf = ice_netdev_to_pf(netdev);
3846        struct ice_vf *vf;
3847        int ret;
3848
3849        if (ice_validate_vf_id(pf, vf_id))
3850                return -EINVAL;
3851
3852        if (is_multicast_ether_addr(mac)) {
3853                netdev_err(netdev, "%pM not a valid unicast address\n", mac);
3854                return -EINVAL;
3855        }
3856
3857        vf = &pf->vf[vf_id];
3858        /* nothing left to do, unicast MAC already set */
3859        if (ether_addr_equal(vf->dflt_lan_addr.addr, mac))
3860                return 0;
3861
3862        ret = ice_check_vf_ready_for_cfg(vf);
3863        if (ret)
3864                return ret;
3865
3866        if (ice_unicast_mac_exists(pf, mac)) {
3867                netdev_err(netdev, "Unicast MAC %pM already exists on this PF. Preventing setting VF %u unicast MAC address to %pM\n",
3868                           mac, vf_id, mac);
3869                return -EINVAL;
3870        }
3871
3872        /* VF is notified of its new MAC via the PF's response to the
3873         * VIRTCHNL_OP_GET_VF_RESOURCES message after the VF has been reset
3874         */
3875        ether_addr_copy(vf->dflt_lan_addr.addr, mac);
3876        if (is_zero_ether_addr(mac)) {
3877                /* VF will send VIRTCHNL_OP_ADD_ETH_ADDR message with its MAC */
3878                vf->pf_set_mac = false;
3879                netdev_info(netdev, "Removing MAC on VF %d. VF driver will be reinitialized\n",
3880                            vf->vf_id);
3881        } else {
3882                /* PF will add MAC rule for the VF */
3883                vf->pf_set_mac = true;
3884                netdev_info(netdev, "Setting MAC %pM on VF %d. VF driver will be reinitialized\n",
3885                            mac, vf_id);
3886        }
3887
3888        ice_vc_reset_vf(vf);
3889        return 0;
3890}
3891
3892/**
3893 * ice_set_vf_trust
3894 * @netdev: network interface device structure
3895 * @vf_id: VF identifier
3896 * @trusted: Boolean value to enable/disable trusted VF
3897 *
3898 * Enable or disable a given VF as trusted
3899 */
3900int ice_set_vf_trust(struct net_device *netdev, int vf_id, bool trusted)
3901{
3902        struct ice_pf *pf = ice_netdev_to_pf(netdev);
3903        struct ice_vf *vf;
3904        int ret;
3905
3906        if (ice_validate_vf_id(pf, vf_id))
3907                return -EINVAL;
3908
3909        vf = &pf->vf[vf_id];
3910        ret = ice_check_vf_ready_for_cfg(vf);
3911        if (ret)
3912                return ret;
3913
3914        /* Check if already trusted */
3915        if (trusted == vf->trusted)
3916                return 0;
3917
3918        vf->trusted = trusted;
3919        ice_vc_reset_vf(vf);
3920        dev_info(ice_pf_to_dev(pf), "VF %u is now %strusted\n",
3921                 vf_id, trusted ? "" : "un");
3922
3923        return 0;
3924}
3925
3926/**
3927 * ice_set_vf_link_state
3928 * @netdev: network interface device structure
3929 * @vf_id: VF identifier
3930 * @link_state: required link state
3931 *
3932 * Set VF's link state, irrespective of physical link state status
3933 */
3934int ice_set_vf_link_state(struct net_device *netdev, int vf_id, int link_state)
3935{
3936        struct ice_pf *pf = ice_netdev_to_pf(netdev);
3937        struct ice_vf *vf;
3938        int ret;
3939
3940        if (ice_validate_vf_id(pf, vf_id))
3941                return -EINVAL;
3942
3943        vf = &pf->vf[vf_id];
3944        ret = ice_check_vf_ready_for_cfg(vf);
3945        if (ret)
3946                return ret;
3947
3948        switch (link_state) {
3949        case IFLA_VF_LINK_STATE_AUTO:
3950                vf->link_forced = false;
3951                break;
3952        case IFLA_VF_LINK_STATE_ENABLE:
3953                vf->link_forced = true;
3954                vf->link_up = true;
3955                break;
3956        case IFLA_VF_LINK_STATE_DISABLE:
3957                vf->link_forced = true;
3958                vf->link_up = false;
3959                break;
3960        default:
3961                return -EINVAL;
3962        }
3963
3964        ice_vc_notify_vf_link_state(vf);
3965
3966        return 0;
3967}
3968
3969/**
3970 * ice_get_vf_stats - populate some stats for the VF
3971 * @netdev: the netdev of the PF
3972 * @vf_id: the host OS identifier (0-255)
3973 * @vf_stats: pointer to the OS memory to be initialized
3974 */
3975int ice_get_vf_stats(struct net_device *netdev, int vf_id,
3976                     struct ifla_vf_stats *vf_stats)
3977{
3978        struct ice_pf *pf = ice_netdev_to_pf(netdev);
3979        struct ice_eth_stats *stats;
3980        struct ice_vsi *vsi;
3981        struct ice_vf *vf;
3982        int ret;
3983
3984        if (ice_validate_vf_id(pf, vf_id))
3985                return -EINVAL;
3986
3987        vf = &pf->vf[vf_id];
3988        ret = ice_check_vf_ready_for_cfg(vf);
3989        if (ret)
3990                return ret;
3991
3992        vsi = pf->vsi[vf->lan_vsi_idx];
3993        if (!vsi)
3994                return -EINVAL;
3995
3996        ice_update_eth_stats(vsi);
3997        stats = &vsi->eth_stats;
3998
3999        memset(vf_stats, 0, sizeof(*vf_stats));
4000
4001        vf_stats->rx_packets = stats->rx_unicast + stats->rx_broadcast +
4002                stats->rx_multicast;
4003        vf_stats->tx_packets = stats->tx_unicast + stats->tx_broadcast +
4004                stats->tx_multicast;
4005        vf_stats->rx_bytes   = stats->rx_bytes;
4006        vf_stats->tx_bytes   = stats->tx_bytes;
4007        vf_stats->broadcast  = stats->rx_broadcast;
4008        vf_stats->multicast  = stats->rx_multicast;
4009        vf_stats->rx_dropped = stats->rx_discards;
4010        vf_stats->tx_dropped = stats->tx_discards;
4011
4012        return 0;
4013}
4014
4015/**
4016 * ice_print_vf_rx_mdd_event - print VF Rx malicious driver detect event
4017 * @vf: pointer to the VF structure
4018 */
4019void ice_print_vf_rx_mdd_event(struct ice_vf *vf)
4020{
4021        struct ice_pf *pf = vf->pf;
4022        struct device *dev;
4023
4024        dev = ice_pf_to_dev(pf);
4025
4026        dev_info(dev, "%d Rx Malicious Driver Detection events detected on PF %d VF %d MAC %pM. mdd-auto-reset-vfs=%s\n",
4027                 vf->mdd_rx_events.count, pf->hw.pf_id, vf->vf_id,
4028                 vf->dflt_lan_addr.addr,
4029                 test_bit(ICE_FLAG_MDD_AUTO_RESET_VF, pf->flags)
4030                          ? "on" : "off");
4031}
4032
4033/**
4034 * ice_print_vfs_mdd_event - print VFs malicious driver detect event
4035 * @pf: pointer to the PF structure
4036 *
4037 * Called from ice_handle_mdd_event to rate limit and print VFs MDD events.
4038 */
4039void ice_print_vfs_mdd_events(struct ice_pf *pf)
4040{
4041        struct device *dev = ice_pf_to_dev(pf);
4042        struct ice_hw *hw = &pf->hw;
4043        int i;
4044
4045        /* check that there are pending MDD events to print */
4046        if (!test_and_clear_bit(__ICE_MDD_VF_PRINT_PENDING, pf->state))
4047                return;
4048
4049        /* VF MDD event logs are rate limited to one second intervals */
4050        if (time_is_after_jiffies(pf->last_printed_mdd_jiffies + HZ * 1))
4051                return;
4052
4053        pf->last_printed_mdd_jiffies = jiffies;
4054
4055        ice_for_each_vf(pf, i) {
4056                struct ice_vf *vf = &pf->vf[i];
4057
4058                /* only print Rx MDD event message if there are new events */
4059                if (vf->mdd_rx_events.count != vf->mdd_rx_events.last_printed) {
4060                        vf->mdd_rx_events.last_printed =
4061                                                        vf->mdd_rx_events.count;
4062                        ice_print_vf_rx_mdd_event(vf);
4063                }
4064
4065                /* only print Tx MDD event message if there are new events */
4066                if (vf->mdd_tx_events.count != vf->mdd_tx_events.last_printed) {
4067                        vf->mdd_tx_events.last_printed =
4068                                                        vf->mdd_tx_events.count;
4069
4070                        dev_info(dev, "%d Tx Malicious Driver Detection events detected on PF %d VF %d MAC %pM.\n",
4071                                 vf->mdd_tx_events.count, hw->pf_id, i,
4072                                 vf->dflt_lan_addr.addr);
4073                }
4074        }
4075}
4076
4077/**
4078 * ice_restore_all_vfs_msi_state - restore VF MSI state after PF FLR
4079 * @pdev: pointer to a pci_dev structure
4080 *
4081 * Called when recovering from a PF FLR to restore interrupt capability to
4082 * the VFs.
4083 */
4084void ice_restore_all_vfs_msi_state(struct pci_dev *pdev)
4085{
4086        struct pci_dev *vfdev;
4087        u16 vf_id;
4088        int pos;
4089
4090        if (!pci_num_vf(pdev))
4091                return;
4092
4093        pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
4094        if (pos) {
4095                pci_read_config_word(pdev, pos + PCI_SRIOV_VF_DID,
4096                                     &vf_id);
4097                vfdev = pci_get_device(pdev->vendor, vf_id, NULL);
4098                while (vfdev) {
4099                        if (vfdev->is_virtfn && vfdev->physfn == pdev)
4100                                pci_restore_msi_state(vfdev);
4101                        vfdev = pci_get_device(pdev->vendor, vf_id,
4102                                               vfdev);
4103                }
4104        }
4105}
4106