linux/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/* Copyright (c) 2018, Intel Corporation. */
   3
   4#include "ice.h"
   5#include "ice_base.h"
   6#include "ice_lib.h"
   7#include "ice_fltr.h"
   8
   9/**
  10 * ice_validate_vf_id - helper to check if VF ID is valid
  11 * @pf: pointer to the PF structure
  12 * @vf_id: the ID of the VF to check
  13 */
  14static int ice_validate_vf_id(struct ice_pf *pf, u16 vf_id)
  15{
  16        /* vf_id range is only valid for 0-255, and should always be unsigned */
  17        if (vf_id >= pf->num_alloc_vfs) {
  18                dev_err(ice_pf_to_dev(pf), "Invalid VF ID: %u\n", vf_id);
  19                return -EINVAL;
  20        }
  21        return 0;
  22}
  23
  24/**
  25 * ice_check_vf_init - helper to check if VF init complete
  26 * @pf: pointer to the PF structure
  27 * @vf: the pointer to the VF to check
  28 */
  29static int ice_check_vf_init(struct ice_pf *pf, struct ice_vf *vf)
  30{
  31        if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states)) {
  32                dev_err(ice_pf_to_dev(pf), "VF ID: %u in reset. Try again.\n",
  33                        vf->vf_id);
  34                return -EBUSY;
  35        }
  36        return 0;
  37}
  38
  39/**
  40 * ice_err_to_virt_err - translate errors for VF return code
  41 * @ice_err: error return code
  42 */
  43static enum virtchnl_status_code ice_err_to_virt_err(enum ice_status ice_err)
  44{
  45        switch (ice_err) {
  46        case ICE_SUCCESS:
  47                return VIRTCHNL_STATUS_SUCCESS;
  48        case ICE_ERR_BAD_PTR:
  49        case ICE_ERR_INVAL_SIZE:
  50        case ICE_ERR_DEVICE_NOT_SUPPORTED:
  51        case ICE_ERR_PARAM:
  52        case ICE_ERR_CFG:
  53                return VIRTCHNL_STATUS_ERR_PARAM;
  54        case ICE_ERR_NO_MEMORY:
  55                return VIRTCHNL_STATUS_ERR_NO_MEMORY;
  56        case ICE_ERR_NOT_READY:
  57        case ICE_ERR_RESET_FAILED:
  58        case ICE_ERR_FW_API_VER:
  59        case ICE_ERR_AQ_ERROR:
  60        case ICE_ERR_AQ_TIMEOUT:
  61        case ICE_ERR_AQ_FULL:
  62        case ICE_ERR_AQ_NO_WORK:
  63        case ICE_ERR_AQ_EMPTY:
  64                return VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
  65        default:
  66                return VIRTCHNL_STATUS_ERR_NOT_SUPPORTED;
  67        }
  68}
  69
  70/**
  71 * ice_vc_vf_broadcast - Broadcast a message to all VFs on PF
  72 * @pf: pointer to the PF structure
  73 * @v_opcode: operation code
  74 * @v_retval: return value
  75 * @msg: pointer to the msg buffer
  76 * @msglen: msg length
  77 */
  78static void
  79ice_vc_vf_broadcast(struct ice_pf *pf, enum virtchnl_ops v_opcode,
  80                    enum virtchnl_status_code v_retval, u8 *msg, u16 msglen)
  81{
  82        struct ice_hw *hw = &pf->hw;
  83        unsigned int i;
  84
  85        ice_for_each_vf(pf, i) {
  86                struct ice_vf *vf = &pf->vf[i];
  87
  88                /* Not all vfs are enabled so skip the ones that are not */
  89                if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states) &&
  90                    !test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states))
  91                        continue;
  92
  93                /* Ignore return value on purpose - a given VF may fail, but
  94                 * we need to keep going and send to all of them
  95                 */
  96                ice_aq_send_msg_to_vf(hw, vf->vf_id, v_opcode, v_retval, msg,
  97                                      msglen, NULL);
  98        }
  99}
 100
 101/**
 102 * ice_set_pfe_link - Set the link speed/status of the virtchnl_pf_event
 103 * @vf: pointer to the VF structure
 104 * @pfe: pointer to the virtchnl_pf_event to set link speed/status for
 105 * @ice_link_speed: link speed specified by ICE_AQ_LINK_SPEED_*
 106 * @link_up: whether or not to set the link up/down
 107 */
 108static void
 109ice_set_pfe_link(struct ice_vf *vf, struct virtchnl_pf_event *pfe,
 110                 int ice_link_speed, bool link_up)
 111{
 112        if (vf->driver_caps & VIRTCHNL_VF_CAP_ADV_LINK_SPEED) {
 113                pfe->event_data.link_event_adv.link_status = link_up;
 114                /* Speed in Mbps */
 115                pfe->event_data.link_event_adv.link_speed =
 116                        ice_conv_link_speed_to_virtchnl(true, ice_link_speed);
 117        } else {
 118                pfe->event_data.link_event.link_status = link_up;
 119                /* Legacy method for virtchnl link speeds */
 120                pfe->event_data.link_event.link_speed =
 121                        (enum virtchnl_link_speed)
 122                        ice_conv_link_speed_to_virtchnl(false, ice_link_speed);
 123        }
 124}
 125
 126/**
 127 * ice_vf_has_no_qs_ena - check if the VF has any Rx or Tx queues enabled
 128 * @vf: the VF to check
 129 *
 130 * Returns true if the VF has no Rx and no Tx queues enabled and returns false
 131 * otherwise
 132 */
 133static bool ice_vf_has_no_qs_ena(struct ice_vf *vf)
 134{
 135        return (!bitmap_weight(vf->rxq_ena, ICE_MAX_RSS_QS_PER_VF) &&
 136                !bitmap_weight(vf->txq_ena, ICE_MAX_RSS_QS_PER_VF));
 137}
 138
 139/**
 140 * ice_is_vf_link_up - check if the VF's link is up
 141 * @vf: VF to check if link is up
 142 */
 143static bool ice_is_vf_link_up(struct ice_vf *vf)
 144{
 145        struct ice_pf *pf = vf->pf;
 146
 147        if (ice_check_vf_init(pf, vf))
 148                return false;
 149
 150        if (ice_vf_has_no_qs_ena(vf))
 151                return false;
 152        else if (vf->link_forced)
 153                return vf->link_up;
 154        else
 155                return pf->hw.port_info->phy.link_info.link_info &
 156                        ICE_AQ_LINK_UP;
 157}
 158
 159/**
 160 * ice_vc_notify_vf_link_state - Inform a VF of link status
 161 * @vf: pointer to the VF structure
 162 *
 163 * send a link status message to a single VF
 164 */
 165static void ice_vc_notify_vf_link_state(struct ice_vf *vf)
 166{
 167        struct virtchnl_pf_event pfe = { 0 };
 168        struct ice_hw *hw = &vf->pf->hw;
 169
 170        pfe.event = VIRTCHNL_EVENT_LINK_CHANGE;
 171        pfe.severity = PF_EVENT_SEVERITY_INFO;
 172
 173        if (ice_is_vf_link_up(vf))
 174                ice_set_pfe_link(vf, &pfe,
 175                                 hw->port_info->phy.link_info.link_speed, true);
 176        else
 177                ice_set_pfe_link(vf, &pfe, ICE_AQ_LINK_SPEED_UNKNOWN, false);
 178
 179        ice_aq_send_msg_to_vf(hw, vf->vf_id, VIRTCHNL_OP_EVENT,
 180                              VIRTCHNL_STATUS_SUCCESS, (u8 *)&pfe,
 181                              sizeof(pfe), NULL);
 182}
 183
 184/**
 185 * ice_vf_invalidate_vsi - invalidate vsi_idx/vsi_num to remove VSI access
 186 * @vf: VF to remove access to VSI for
 187 */
 188static void ice_vf_invalidate_vsi(struct ice_vf *vf)
 189{
 190        vf->lan_vsi_idx = ICE_NO_VSI;
 191        vf->lan_vsi_num = ICE_NO_VSI;
 192}
 193
 194/**
 195 * ice_vf_vsi_release - invalidate the VF's VSI after freeing it
 196 * @vf: invalidate this VF's VSI after freeing it
 197 */
 198static void ice_vf_vsi_release(struct ice_vf *vf)
 199{
 200        ice_vsi_release(vf->pf->vsi[vf->lan_vsi_idx]);
 201        ice_vf_invalidate_vsi(vf);
 202}
 203
 204/**
 205 * ice_free_vf_res - Free a VF's resources
 206 * @vf: pointer to the VF info
 207 */
 208static void ice_free_vf_res(struct ice_vf *vf)
 209{
 210        struct ice_pf *pf = vf->pf;
 211        int i, last_vector_idx;
 212
 213        /* First, disable VF's configuration API to prevent OS from
 214         * accessing the VF's VSI after it's freed or invalidated.
 215         */
 216        clear_bit(ICE_VF_STATE_INIT, vf->vf_states);
 217
 218        /* free VSI and disconnect it from the parent uplink */
 219        if (vf->lan_vsi_idx != ICE_NO_VSI) {
 220                ice_vf_vsi_release(vf);
 221                vf->num_mac = 0;
 222        }
 223
 224        last_vector_idx = vf->first_vector_idx + pf->num_msix_per_vf - 1;
 225
 226        /* clear VF MDD event information */
 227        memset(&vf->mdd_tx_events, 0, sizeof(vf->mdd_tx_events));
 228        memset(&vf->mdd_rx_events, 0, sizeof(vf->mdd_rx_events));
 229
 230        /* Disable interrupts so that VF starts in a known state */
 231        for (i = vf->first_vector_idx; i <= last_vector_idx; i++) {
 232                wr32(&pf->hw, GLINT_DYN_CTL(i), GLINT_DYN_CTL_CLEARPBA_M);
 233                ice_flush(&pf->hw);
 234        }
 235        /* reset some of the state variables keeping track of the resources */
 236        clear_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states);
 237        clear_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states);
 238}
 239
 240/**
 241 * ice_dis_vf_mappings
 242 * @vf: pointer to the VF structure
 243 */
 244static void ice_dis_vf_mappings(struct ice_vf *vf)
 245{
 246        struct ice_pf *pf = vf->pf;
 247        struct ice_vsi *vsi;
 248        struct device *dev;
 249        int first, last, v;
 250        struct ice_hw *hw;
 251
 252        hw = &pf->hw;
 253        vsi = pf->vsi[vf->lan_vsi_idx];
 254
 255        dev = ice_pf_to_dev(pf);
 256        wr32(hw, VPINT_ALLOC(vf->vf_id), 0);
 257        wr32(hw, VPINT_ALLOC_PCI(vf->vf_id), 0);
 258
 259        first = vf->first_vector_idx;
 260        last = first + pf->num_msix_per_vf - 1;
 261        for (v = first; v <= last; v++) {
 262                u32 reg;
 263
 264                reg = (((1 << GLINT_VECT2FUNC_IS_PF_S) &
 265                        GLINT_VECT2FUNC_IS_PF_M) |
 266                       ((hw->pf_id << GLINT_VECT2FUNC_PF_NUM_S) &
 267                        GLINT_VECT2FUNC_PF_NUM_M));
 268                wr32(hw, GLINT_VECT2FUNC(v), reg);
 269        }
 270
 271        if (vsi->tx_mapping_mode == ICE_VSI_MAP_CONTIG)
 272                wr32(hw, VPLAN_TX_QBASE(vf->vf_id), 0);
 273        else
 274                dev_err(dev, "Scattered mode for VF Tx queues is not yet implemented\n");
 275
 276        if (vsi->rx_mapping_mode == ICE_VSI_MAP_CONTIG)
 277                wr32(hw, VPLAN_RX_QBASE(vf->vf_id), 0);
 278        else
 279                dev_err(dev, "Scattered mode for VF Rx queues is not yet implemented\n");
 280}
 281
 282/**
 283 * ice_sriov_free_msix_res - Reset/free any used MSIX resources
 284 * @pf: pointer to the PF structure
 285 *
 286 * Since no MSIX entries are taken from the pf->irq_tracker then just clear
 287 * the pf->sriov_base_vector.
 288 *
 289 * Returns 0 on success, and -EINVAL on error.
 290 */
 291static int ice_sriov_free_msix_res(struct ice_pf *pf)
 292{
 293        struct ice_res_tracker *res;
 294
 295        if (!pf)
 296                return -EINVAL;
 297
 298        res = pf->irq_tracker;
 299        if (!res)
 300                return -EINVAL;
 301
 302        /* give back irq_tracker resources used */
 303        WARN_ON(pf->sriov_base_vector < res->num_entries);
 304
 305        pf->sriov_base_vector = 0;
 306
 307        return 0;
 308}
 309
 310/**
 311 * ice_set_vf_state_qs_dis - Set VF queues state to disabled
 312 * @vf: pointer to the VF structure
 313 */
 314void ice_set_vf_state_qs_dis(struct ice_vf *vf)
 315{
 316        /* Clear Rx/Tx enabled queues flag */
 317        bitmap_zero(vf->txq_ena, ICE_MAX_RSS_QS_PER_VF);
 318        bitmap_zero(vf->rxq_ena, ICE_MAX_RSS_QS_PER_VF);
 319        clear_bit(ICE_VF_STATE_QS_ENA, vf->vf_states);
 320}
 321
 322/**
 323 * ice_dis_vf_qs - Disable the VF queues
 324 * @vf: pointer to the VF structure
 325 */
 326static void ice_dis_vf_qs(struct ice_vf *vf)
 327{
 328        struct ice_pf *pf = vf->pf;
 329        struct ice_vsi *vsi;
 330
 331        vsi = pf->vsi[vf->lan_vsi_idx];
 332
 333        ice_vsi_stop_lan_tx_rings(vsi, ICE_NO_RESET, vf->vf_id);
 334        ice_vsi_stop_all_rx_rings(vsi);
 335        ice_set_vf_state_qs_dis(vf);
 336}
 337
 338/**
 339 * ice_free_vfs - Free all VFs
 340 * @pf: pointer to the PF structure
 341 */
 342void ice_free_vfs(struct ice_pf *pf)
 343{
 344        struct device *dev = ice_pf_to_dev(pf);
 345        struct ice_hw *hw = &pf->hw;
 346        unsigned int tmp, i;
 347
 348        if (!pf->vf)
 349                return;
 350
 351        while (test_and_set_bit(__ICE_VF_DIS, pf->state))
 352                usleep_range(1000, 2000);
 353
 354        /* Disable IOV before freeing resources. This lets any VF drivers
 355         * running in the host get themselves cleaned up before we yank
 356         * the carpet out from underneath their feet.
 357         */
 358        if (!pci_vfs_assigned(pf->pdev))
 359                pci_disable_sriov(pf->pdev);
 360        else
 361                dev_warn(dev, "VFs are assigned - not disabling SR-IOV\n");
 362
 363        /* Avoid wait time by stopping all VFs at the same time */
 364        ice_for_each_vf(pf, i)
 365                if (test_bit(ICE_VF_STATE_QS_ENA, pf->vf[i].vf_states))
 366                        ice_dis_vf_qs(&pf->vf[i]);
 367
 368        tmp = pf->num_alloc_vfs;
 369        pf->num_qps_per_vf = 0;
 370        pf->num_alloc_vfs = 0;
 371        for (i = 0; i < tmp; i++) {
 372                if (test_bit(ICE_VF_STATE_INIT, pf->vf[i].vf_states)) {
 373                        /* disable VF qp mappings and set VF disable state */
 374                        ice_dis_vf_mappings(&pf->vf[i]);
 375                        set_bit(ICE_VF_STATE_DIS, pf->vf[i].vf_states);
 376                        ice_free_vf_res(&pf->vf[i]);
 377                }
 378        }
 379
 380        if (ice_sriov_free_msix_res(pf))
 381                dev_err(dev, "Failed to free MSIX resources used by SR-IOV\n");
 382
 383        devm_kfree(dev, pf->vf);
 384        pf->vf = NULL;
 385
 386        /* This check is for when the driver is unloaded while VFs are
 387         * assigned. Setting the number of VFs to 0 through sysfs is caught
 388         * before this function ever gets called.
 389         */
 390        if (!pci_vfs_assigned(pf->pdev)) {
 391                unsigned int vf_id;
 392
 393                /* Acknowledge VFLR for all VFs. Without this, VFs will fail to
 394                 * work correctly when SR-IOV gets re-enabled.
 395                 */
 396                for (vf_id = 0; vf_id < tmp; vf_id++) {
 397                        u32 reg_idx, bit_idx;
 398
 399                        reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32;
 400                        bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32;
 401                        wr32(hw, GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx));
 402                }
 403        }
 404        clear_bit(__ICE_VF_DIS, pf->state);
 405        clear_bit(ICE_FLAG_SRIOV_ENA, pf->flags);
 406}
 407
 408/**
 409 * ice_trigger_vf_reset - Reset a VF on HW
 410 * @vf: pointer to the VF structure
 411 * @is_vflr: true if VFLR was issued, false if not
 412 * @is_pfr: true if the reset was triggered due to a previous PFR
 413 *
 414 * Trigger hardware to start a reset for a particular VF. Expects the caller
 415 * to wait the proper amount of time to allow hardware to reset the VF before
 416 * it cleans up and restores VF functionality.
 417 */
 418static void ice_trigger_vf_reset(struct ice_vf *vf, bool is_vflr, bool is_pfr)
 419{
 420        struct ice_pf *pf = vf->pf;
 421        u32 reg, reg_idx, bit_idx;
 422        unsigned int vf_abs_id, i;
 423        struct device *dev;
 424        struct ice_hw *hw;
 425
 426        dev = ice_pf_to_dev(pf);
 427        hw = &pf->hw;
 428        vf_abs_id = vf->vf_id + hw->func_caps.vf_base_id;
 429
 430        /* Inform VF that it is no longer active, as a warning */
 431        clear_bit(ICE_VF_STATE_ACTIVE, vf->vf_states);
 432
 433        /* Disable VF's configuration API during reset. The flag is re-enabled
 434         * when it's safe again to access VF's VSI.
 435         */
 436        clear_bit(ICE_VF_STATE_INIT, vf->vf_states);
 437
 438        /* VF_MBX_ARQLEN is cleared by PFR, so the driver needs to clear it
 439         * in the case of VFR. If this is done for PFR, it can mess up VF
 440         * resets because the VF driver may already have started cleanup
 441         * by the time we get here.
 442         */
 443        if (!is_pfr)
 444                wr32(hw, VF_MBX_ARQLEN(vf->vf_id), 0);
 445
 446        /* In the case of a VFLR, the HW has already reset the VF and we
 447         * just need to clean up, so don't hit the VFRTRIG register.
 448         */
 449        if (!is_vflr) {
 450                /* reset VF using VPGEN_VFRTRIG reg */
 451                reg = rd32(hw, VPGEN_VFRTRIG(vf->vf_id));
 452                reg |= VPGEN_VFRTRIG_VFSWR_M;
 453                wr32(hw, VPGEN_VFRTRIG(vf->vf_id), reg);
 454        }
 455        /* clear the VFLR bit in GLGEN_VFLRSTAT */
 456        reg_idx = (vf_abs_id) / 32;
 457        bit_idx = (vf_abs_id) % 32;
 458        wr32(hw, GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx));
 459        ice_flush(hw);
 460
 461        wr32(hw, PF_PCI_CIAA,
 462             VF_DEVICE_STATUS | (vf_abs_id << PF_PCI_CIAA_VF_NUM_S));
 463        for (i = 0; i < ICE_PCI_CIAD_WAIT_COUNT; i++) {
 464                reg = rd32(hw, PF_PCI_CIAD);
 465                /* no transactions pending so stop polling */
 466                if ((reg & VF_TRANS_PENDING_M) == 0)
 467                        break;
 468
 469                dev_err(dev, "VF %u PCI transactions stuck\n", vf->vf_id);
 470                udelay(ICE_PCI_CIAD_WAIT_DELAY_US);
 471        }
 472}
 473
 474/**
 475 * ice_vsi_manage_pvid - Enable or disable port VLAN for VSI
 476 * @vsi: the VSI to update
 477 * @pvid_info: VLAN ID and QoS used to set the PVID VSI context field
 478 * @enable: true for enable PVID false for disable
 479 */
 480static int ice_vsi_manage_pvid(struct ice_vsi *vsi, u16 pvid_info, bool enable)
 481{
 482        struct ice_hw *hw = &vsi->back->hw;
 483        struct ice_aqc_vsi_props *info;
 484        struct ice_vsi_ctx *ctxt;
 485        enum ice_status status;
 486        int ret = 0;
 487
 488        ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
 489        if (!ctxt)
 490                return -ENOMEM;
 491
 492        ctxt->info = vsi->info;
 493        info = &ctxt->info;
 494        if (enable) {
 495                info->vlan_flags = ICE_AQ_VSI_VLAN_MODE_UNTAGGED |
 496                        ICE_AQ_VSI_PVLAN_INSERT_PVID |
 497                        ICE_AQ_VSI_VLAN_EMOD_STR;
 498                info->sw_flags2 |= ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
 499        } else {
 500                info->vlan_flags = ICE_AQ_VSI_VLAN_EMOD_NOTHING |
 501                        ICE_AQ_VSI_VLAN_MODE_ALL;
 502                info->sw_flags2 &= ~ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
 503        }
 504
 505        info->pvid = cpu_to_le16(pvid_info);
 506        info->valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID |
 507                                           ICE_AQ_VSI_PROP_SW_VALID);
 508
 509        status = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
 510        if (status) {
 511                dev_info(ice_hw_to_dev(hw), "update VSI for port VLAN failed, err %s aq_err %s\n",
 512                         ice_stat_str(status),
 513                         ice_aq_str(hw->adminq.sq_last_status));
 514                ret = -EIO;
 515                goto out;
 516        }
 517
 518        vsi->info.vlan_flags = info->vlan_flags;
 519        vsi->info.sw_flags2 = info->sw_flags2;
 520        vsi->info.pvid = info->pvid;
 521out:
 522        kfree(ctxt);
 523        return ret;
 524}
 525
 526/**
 527 * ice_vf_get_port_info - Get the VF's port info structure
 528 * @vf: VF used to get the port info structure for
 529 */
 530static struct ice_port_info *ice_vf_get_port_info(struct ice_vf *vf)
 531{
 532        return vf->pf->hw.port_info;
 533}
 534
 535/**
 536 * ice_vf_vsi_setup - Set up a VF VSI
 537 * @vf: VF to setup VSI for
 538 *
 539 * Returns pointer to the successfully allocated VSI struct on success,
 540 * otherwise returns NULL on failure.
 541 */
 542static struct ice_vsi *ice_vf_vsi_setup(struct ice_vf *vf)
 543{
 544        struct ice_port_info *pi = ice_vf_get_port_info(vf);
 545        struct ice_pf *pf = vf->pf;
 546        struct ice_vsi *vsi;
 547
 548        vsi = ice_vsi_setup(pf, pi, ICE_VSI_VF, vf->vf_id);
 549
 550        if (!vsi) {
 551                dev_err(ice_pf_to_dev(pf), "Failed to create VF VSI\n");
 552                ice_vf_invalidate_vsi(vf);
 553                return NULL;
 554        }
 555
 556        vf->lan_vsi_idx = vsi->idx;
 557        vf->lan_vsi_num = vsi->vsi_num;
 558
 559        return vsi;
 560}
 561
 562/**
 563 * ice_calc_vf_first_vector_idx - Calculate MSIX vector index in the PF space
 564 * @pf: pointer to PF structure
 565 * @vf: pointer to VF that the first MSIX vector index is being calculated for
 566 *
 567 * This returns the first MSIX vector index in PF space that is used by this VF.
 568 * This index is used when accessing PF relative registers such as
 569 * GLINT_VECT2FUNC and GLINT_DYN_CTL.
 570 * This will always be the OICR index in the AVF driver so any functionality
 571 * using vf->first_vector_idx for queue configuration will have to increment by
 572 * 1 to avoid meddling with the OICR index.
 573 */
 574static int ice_calc_vf_first_vector_idx(struct ice_pf *pf, struct ice_vf *vf)
 575{
 576        return pf->sriov_base_vector + vf->vf_id * pf->num_msix_per_vf;
 577}
 578
 579/**
 580 * ice_vf_rebuild_host_vlan_cfg - add VLAN 0 filter or rebuild the Port VLAN
 581 * @vf: VF to add MAC filters for
 582 *
 583 * Called after a VF VSI has been re-added/rebuilt during reset. The PF driver
 584 * always re-adds either a VLAN 0 or port VLAN based filter after reset.
 585 */
 586static int ice_vf_rebuild_host_vlan_cfg(struct ice_vf *vf)
 587{
 588        struct ice_vsi *vsi = vf->pf->vsi[vf->lan_vsi_idx];
 589        struct device *dev = ice_pf_to_dev(vf->pf);
 590        u16 vlan_id = 0;
 591        int err;
 592
 593        if (vf->port_vlan_info) {
 594                err = ice_vsi_manage_pvid(vsi, vf->port_vlan_info, true);
 595                if (err) {
 596                        dev_err(dev, "failed to configure port VLAN via VSI parameters for VF %u, error %d\n",
 597                                vf->vf_id, err);
 598                        return err;
 599                }
 600
 601                vlan_id = vf->port_vlan_info & VLAN_VID_MASK;
 602        }
 603
 604        /* vlan_id will either be 0 or the port VLAN number */
 605        err = ice_vsi_add_vlan(vsi, vlan_id, ICE_FWD_TO_VSI);
 606        if (err) {
 607                dev_err(dev, "failed to add %s VLAN %u filter for VF %u, error %d\n",
 608                        vf->port_vlan_info ? "port" : "", vlan_id, vf->vf_id,
 609                        err);
 610                return err;
 611        }
 612
 613        return 0;
 614}
 615
 616/**
 617 * ice_vf_rebuild_host_mac_cfg - add broadcast and the VF's perm_addr/LAA
 618 * @vf: VF to add MAC filters for
 619 *
 620 * Called after a VF VSI has been re-added/rebuilt during reset. The PF driver
 621 * always re-adds a broadcast filter and the VF's perm_addr/LAA after reset.
 622 */
 623static int ice_vf_rebuild_host_mac_cfg(struct ice_vf *vf)
 624{
 625        struct ice_vsi *vsi = vf->pf->vsi[vf->lan_vsi_idx];
 626        struct device *dev = ice_pf_to_dev(vf->pf);
 627        enum ice_status status;
 628        u8 broadcast[ETH_ALEN];
 629
 630        eth_broadcast_addr(broadcast);
 631        status = ice_fltr_add_mac(vsi, broadcast, ICE_FWD_TO_VSI);
 632        if (status) {
 633                dev_err(dev, "failed to add broadcast MAC filter for VF %u, error %s\n",
 634                        vf->vf_id, ice_stat_str(status));
 635                return ice_status_to_errno(status);
 636        }
 637
 638        vf->num_mac++;
 639
 640        if (is_valid_ether_addr(vf->dflt_lan_addr.addr)) {
 641                status = ice_fltr_add_mac(vsi, vf->dflt_lan_addr.addr,
 642                                          ICE_FWD_TO_VSI);
 643                if (status) {
 644                        dev_err(dev, "failed to add default unicast MAC filter %pM for VF %u, error %s\n",
 645                                &vf->dflt_lan_addr.addr[0], vf->vf_id,
 646                                ice_stat_str(status));
 647                        return ice_status_to_errno(status);
 648                }
 649                vf->num_mac++;
 650        }
 651
 652        return 0;
 653}
 654
 655/**
 656 * ice_vf_set_host_trust_cfg - set trust setting based on pre-reset value
 657 * @vf: VF to configure trust setting for
 658 */
 659static void ice_vf_set_host_trust_cfg(struct ice_vf *vf)
 660{
 661        if (vf->trusted)
 662                set_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
 663        else
 664                clear_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
 665}
 666
 667/**
 668 * ice_ena_vf_msix_mappings - enable VF MSIX mappings in hardware
 669 * @vf: VF to enable MSIX mappings for
 670 *
 671 * Some of the registers need to be indexed/configured using hardware global
 672 * device values and other registers need 0-based values, which represent PF
 673 * based values.
 674 */
 675static void ice_ena_vf_msix_mappings(struct ice_vf *vf)
 676{
 677        int device_based_first_msix, device_based_last_msix;
 678        int pf_based_first_msix, pf_based_last_msix, v;
 679        struct ice_pf *pf = vf->pf;
 680        int device_based_vf_id;
 681        struct ice_hw *hw;
 682        u32 reg;
 683
 684        hw = &pf->hw;
 685        pf_based_first_msix = vf->first_vector_idx;
 686        pf_based_last_msix = (pf_based_first_msix + pf->num_msix_per_vf) - 1;
 687
 688        device_based_first_msix = pf_based_first_msix +
 689                pf->hw.func_caps.common_cap.msix_vector_first_id;
 690        device_based_last_msix =
 691                (device_based_first_msix + pf->num_msix_per_vf) - 1;
 692        device_based_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
 693
 694        reg = (((device_based_first_msix << VPINT_ALLOC_FIRST_S) &
 695                VPINT_ALLOC_FIRST_M) |
 696               ((device_based_last_msix << VPINT_ALLOC_LAST_S) &
 697                VPINT_ALLOC_LAST_M) | VPINT_ALLOC_VALID_M);
 698        wr32(hw, VPINT_ALLOC(vf->vf_id), reg);
 699
 700        reg = (((device_based_first_msix << VPINT_ALLOC_PCI_FIRST_S)
 701                 & VPINT_ALLOC_PCI_FIRST_M) |
 702               ((device_based_last_msix << VPINT_ALLOC_PCI_LAST_S) &
 703                VPINT_ALLOC_PCI_LAST_M) | VPINT_ALLOC_PCI_VALID_M);
 704        wr32(hw, VPINT_ALLOC_PCI(vf->vf_id), reg);
 705
 706        /* map the interrupts to its functions */
 707        for (v = pf_based_first_msix; v <= pf_based_last_msix; v++) {
 708                reg = (((device_based_vf_id << GLINT_VECT2FUNC_VF_NUM_S) &
 709                        GLINT_VECT2FUNC_VF_NUM_M) |
 710                       ((hw->pf_id << GLINT_VECT2FUNC_PF_NUM_S) &
 711                        GLINT_VECT2FUNC_PF_NUM_M));
 712                wr32(hw, GLINT_VECT2FUNC(v), reg);
 713        }
 714
 715        /* Map mailbox interrupt to VF MSI-X vector 0 */
 716        wr32(hw, VPINT_MBX_CTL(device_based_vf_id), VPINT_MBX_CTL_CAUSE_ENA_M);
 717}
 718
 719/**
 720 * ice_ena_vf_q_mappings - enable Rx/Tx queue mappings for a VF
 721 * @vf: VF to enable the mappings for
 722 * @max_txq: max Tx queues allowed on the VF's VSI
 723 * @max_rxq: max Rx queues allowed on the VF's VSI
 724 */
 725static void ice_ena_vf_q_mappings(struct ice_vf *vf, u16 max_txq, u16 max_rxq)
 726{
 727        struct ice_vsi *vsi = vf->pf->vsi[vf->lan_vsi_idx];
 728        struct device *dev = ice_pf_to_dev(vf->pf);
 729        struct ice_hw *hw = &vf->pf->hw;
 730        u32 reg;
 731
 732        /* set regardless of mapping mode */
 733        wr32(hw, VPLAN_TXQ_MAPENA(vf->vf_id), VPLAN_TXQ_MAPENA_TX_ENA_M);
 734
 735        /* VF Tx queues allocation */
 736        if (vsi->tx_mapping_mode == ICE_VSI_MAP_CONTIG) {
 737                /* set the VF PF Tx queue range
 738                 * VFNUMQ value should be set to (number of queues - 1). A value
 739                 * of 0 means 1 queue and a value of 255 means 256 queues
 740                 */
 741                reg = (((vsi->txq_map[0] << VPLAN_TX_QBASE_VFFIRSTQ_S) &
 742                        VPLAN_TX_QBASE_VFFIRSTQ_M) |
 743                       (((max_txq - 1) << VPLAN_TX_QBASE_VFNUMQ_S) &
 744                        VPLAN_TX_QBASE_VFNUMQ_M));
 745                wr32(hw, VPLAN_TX_QBASE(vf->vf_id), reg);
 746        } else {
 747                dev_err(dev, "Scattered mode for VF Tx queues is not yet implemented\n");
 748        }
 749
 750        /* set regardless of mapping mode */
 751        wr32(hw, VPLAN_RXQ_MAPENA(vf->vf_id), VPLAN_RXQ_MAPENA_RX_ENA_M);
 752
 753        /* VF Rx queues allocation */
 754        if (vsi->rx_mapping_mode == ICE_VSI_MAP_CONTIG) {
 755                /* set the VF PF Rx queue range
 756                 * VFNUMQ value should be set to (number of queues - 1). A value
 757                 * of 0 means 1 queue and a value of 255 means 256 queues
 758                 */
 759                reg = (((vsi->rxq_map[0] << VPLAN_RX_QBASE_VFFIRSTQ_S) &
 760                        VPLAN_RX_QBASE_VFFIRSTQ_M) |
 761                       (((max_rxq - 1) << VPLAN_RX_QBASE_VFNUMQ_S) &
 762                        VPLAN_RX_QBASE_VFNUMQ_M));
 763                wr32(hw, VPLAN_RX_QBASE(vf->vf_id), reg);
 764        } else {
 765                dev_err(dev, "Scattered mode for VF Rx queues is not yet implemented\n");
 766        }
 767}
 768
 769/**
 770 * ice_ena_vf_mappings - enable VF MSIX and queue mapping
 771 * @vf: pointer to the VF structure
 772 */
 773static void ice_ena_vf_mappings(struct ice_vf *vf)
 774{
 775        struct ice_vsi *vsi = vf->pf->vsi[vf->lan_vsi_idx];
 776
 777        ice_ena_vf_msix_mappings(vf);
 778        ice_ena_vf_q_mappings(vf, vsi->alloc_txq, vsi->alloc_rxq);
 779}
 780
 781/**
 782 * ice_determine_res
 783 * @pf: pointer to the PF structure
 784 * @avail_res: available resources in the PF structure
 785 * @max_res: maximum resources that can be given per VF
 786 * @min_res: minimum resources that can be given per VF
 787 *
 788 * Returns non-zero value if resources (queues/vectors) are available or
 789 * returns zero if PF cannot accommodate for all num_alloc_vfs.
 790 */
 791static int
 792ice_determine_res(struct ice_pf *pf, u16 avail_res, u16 max_res, u16 min_res)
 793{
 794        bool checked_min_res = false;
 795        int res;
 796
 797        /* start by checking if PF can assign max number of resources for
 798         * all num_alloc_vfs.
 799         * if yes, return number per VF
 800         * If no, divide by 2 and roundup, check again
 801         * repeat the loop till we reach a point where even minimum resources
 802         * are not available, in that case return 0
 803         */
 804        res = max_res;
 805        while ((res >= min_res) && !checked_min_res) {
 806                int num_all_res;
 807
 808                num_all_res = pf->num_alloc_vfs * res;
 809                if (num_all_res <= avail_res)
 810                        return res;
 811
 812                if (res == min_res)
 813                        checked_min_res = true;
 814
 815                res = DIV_ROUND_UP(res, 2);
 816        }
 817        return 0;
 818}
 819
 820/**
 821 * ice_calc_vf_reg_idx - Calculate the VF's register index in the PF space
 822 * @vf: VF to calculate the register index for
 823 * @q_vector: a q_vector associated to the VF
 824 */
 825int ice_calc_vf_reg_idx(struct ice_vf *vf, struct ice_q_vector *q_vector)
 826{
 827        struct ice_pf *pf;
 828
 829        if (!vf || !q_vector)
 830                return -EINVAL;
 831
 832        pf = vf->pf;
 833
 834        /* always add one to account for the OICR being the first MSIX */
 835        return pf->sriov_base_vector + pf->num_msix_per_vf * vf->vf_id +
 836                q_vector->v_idx + 1;
 837}
 838
 839/**
 840 * ice_get_max_valid_res_idx - Get the max valid resource index
 841 * @res: pointer to the resource to find the max valid index for
 842 *
 843 * Start from the end of the ice_res_tracker and return right when we find the
 844 * first res->list entry with the ICE_RES_VALID_BIT set. This function is only
 845 * valid for SR-IOV because it is the only consumer that manipulates the
 846 * res->end and this is always called when res->end is set to res->num_entries.
 847 */
 848static int ice_get_max_valid_res_idx(struct ice_res_tracker *res)
 849{
 850        int i;
 851
 852        if (!res)
 853                return -EINVAL;
 854
 855        for (i = res->num_entries - 1; i >= 0; i--)
 856                if (res->list[i] & ICE_RES_VALID_BIT)
 857                        return i;
 858
 859        return 0;
 860}
 861
 862/**
 863 * ice_sriov_set_msix_res - Set any used MSIX resources
 864 * @pf: pointer to PF structure
 865 * @num_msix_needed: number of MSIX vectors needed for all SR-IOV VFs
 866 *
 867 * This function allows SR-IOV resources to be taken from the end of the PF's
 868 * allowed HW MSIX vectors so that the irq_tracker will not be affected. We
 869 * just set the pf->sriov_base_vector and return success.
 870 *
 871 * If there are not enough resources available, return an error. This should
 872 * always be caught by ice_set_per_vf_res().
 873 *
 874 * Return 0 on success, and -EINVAL when there are not enough MSIX vectors in
 875 * in the PF's space available for SR-IOV.
 876 */
 877static int ice_sriov_set_msix_res(struct ice_pf *pf, u16 num_msix_needed)
 878{
 879        u16 total_vectors = pf->hw.func_caps.common_cap.num_msix_vectors;
 880        int vectors_used = pf->irq_tracker->num_entries;
 881        int sriov_base_vector;
 882
 883        sriov_base_vector = total_vectors - num_msix_needed;
 884
 885        /* make sure we only grab irq_tracker entries from the list end and
 886         * that we have enough available MSIX vectors
 887         */
 888        if (sriov_base_vector < vectors_used)
 889                return -EINVAL;
 890
 891        pf->sriov_base_vector = sriov_base_vector;
 892
 893        return 0;
 894}
 895
 896/**
 897 * ice_set_per_vf_res - check if vectors and queues are available
 898 * @pf: pointer to the PF structure
 899 *
 900 * First, determine HW interrupts from common pool. If we allocate fewer VFs, we
 901 * get more vectors and can enable more queues per VF. Note that this does not
 902 * grab any vectors from the SW pool already allocated. Also note, that all
 903 * vector counts include one for each VF's miscellaneous interrupt vector
 904 * (i.e. OICR).
 905 *
 906 * Minimum VFs - 2 vectors, 1 queue pair
 907 * Small VFs - 5 vectors, 4 queue pairs
 908 * Medium VFs - 17 vectors, 16 queue pairs
 909 *
 910 * Second, determine number of queue pairs per VF by starting with a pre-defined
 911 * maximum each VF supports. If this is not possible, then we adjust based on
 912 * queue pairs available on the device.
 913 *
 914 * Lastly, set queue and MSI-X VF variables tracked by the PF so it can be used
 915 * by each VF during VF initialization and reset.
 916 */
 917static int ice_set_per_vf_res(struct ice_pf *pf)
 918{
 919        int max_valid_res_idx = ice_get_max_valid_res_idx(pf->irq_tracker);
 920        int msix_avail_per_vf, msix_avail_for_sriov;
 921        struct device *dev = ice_pf_to_dev(pf);
 922        u16 num_msix_per_vf, num_txq, num_rxq;
 923
 924        if (!pf->num_alloc_vfs || max_valid_res_idx < 0)
 925                return -EINVAL;
 926
 927        /* determine MSI-X resources per VF */
 928        msix_avail_for_sriov = pf->hw.func_caps.common_cap.num_msix_vectors -
 929                pf->irq_tracker->num_entries;
 930        msix_avail_per_vf = msix_avail_for_sriov / pf->num_alloc_vfs;
 931        if (msix_avail_per_vf >= ICE_NUM_VF_MSIX_MED) {
 932                num_msix_per_vf = ICE_NUM_VF_MSIX_MED;
 933        } else if (msix_avail_per_vf >= ICE_NUM_VF_MSIX_SMALL) {
 934                num_msix_per_vf = ICE_NUM_VF_MSIX_SMALL;
 935        } else if (msix_avail_per_vf >= ICE_MIN_INTR_PER_VF) {
 936                num_msix_per_vf = ICE_MIN_INTR_PER_VF;
 937        } else {
 938                dev_err(dev, "Only %d MSI-X interrupts available for SR-IOV. Not enough to support minimum of %d MSI-X interrupts per VF for %d VFs\n",
 939                        msix_avail_for_sriov, ICE_MIN_INTR_PER_VF,
 940                        pf->num_alloc_vfs);
 941                return -EIO;
 942        }
 943
 944        /* determine queue resources per VF */
 945        num_txq = ice_determine_res(pf, ice_get_avail_txq_count(pf),
 946                                    min_t(u16,
 947                                          num_msix_per_vf - ICE_NONQ_VECS_VF,
 948                                          ICE_MAX_RSS_QS_PER_VF),
 949                                    ICE_MIN_QS_PER_VF);
 950
 951        num_rxq = ice_determine_res(pf, ice_get_avail_rxq_count(pf),
 952                                    min_t(u16,
 953                                          num_msix_per_vf - ICE_NONQ_VECS_VF,
 954                                          ICE_MAX_RSS_QS_PER_VF),
 955                                    ICE_MIN_QS_PER_VF);
 956
 957        if (!num_txq || !num_rxq) {
 958                dev_err(dev, "Not enough queues to support minimum of %d queue pairs per VF for %d VFs\n",
 959                        ICE_MIN_QS_PER_VF, pf->num_alloc_vfs);
 960                return -EIO;
 961        }
 962
 963        if (ice_sriov_set_msix_res(pf, num_msix_per_vf * pf->num_alloc_vfs)) {
 964                dev_err(dev, "Unable to set MSI-X resources for %d VFs\n",
 965                        pf->num_alloc_vfs);
 966                return -EINVAL;
 967        }
 968
 969        /* only allow equal Tx/Rx queue count (i.e. queue pairs) */
 970        pf->num_qps_per_vf = min_t(int, num_txq, num_rxq);
 971        pf->num_msix_per_vf = num_msix_per_vf;
 972        dev_info(dev, "Enabling %d VFs with %d vectors and %d queues per VF\n",
 973                 pf->num_alloc_vfs, pf->num_msix_per_vf, pf->num_qps_per_vf);
 974
 975        return 0;
 976}
 977
 978/**
 979 * ice_clear_vf_reset_trigger - enable VF to access hardware
 980 * @vf: VF to enabled hardware access for
 981 */
 982static void ice_clear_vf_reset_trigger(struct ice_vf *vf)
 983{
 984        struct ice_hw *hw = &vf->pf->hw;
 985        u32 reg;
 986
 987        reg = rd32(hw, VPGEN_VFRTRIG(vf->vf_id));
 988        reg &= ~VPGEN_VFRTRIG_VFSWR_M;
 989        wr32(hw, VPGEN_VFRTRIG(vf->vf_id), reg);
 990        ice_flush(hw);
 991}
 992
 993/**
 994 * ice_vf_set_vsi_promisc - set given VF VSI to given promiscuous mode(s)
 995 * @vf: pointer to the VF info
 996 * @vsi: the VSI being configured
 997 * @promisc_m: mask of promiscuous config bits
 998 * @rm_promisc: promisc flag request from the VF to remove or add filter
 999 *
1000 * This function configures VF VSI promiscuous mode, based on the VF requests,
1001 * for Unicast, Multicast and VLAN
1002 */
1003static enum ice_status
1004ice_vf_set_vsi_promisc(struct ice_vf *vf, struct ice_vsi *vsi, u8 promisc_m,
1005                       bool rm_promisc)
1006{
1007        struct ice_pf *pf = vf->pf;
1008        enum ice_status status = 0;
1009        struct ice_hw *hw;
1010
1011        hw = &pf->hw;
1012        if (vsi->num_vlan) {
1013                status = ice_set_vlan_vsi_promisc(hw, vsi->idx, promisc_m,
1014                                                  rm_promisc);
1015        } else if (vf->port_vlan_info) {
1016                if (rm_promisc)
1017                        status = ice_clear_vsi_promisc(hw, vsi->idx, promisc_m,
1018                                                       vf->port_vlan_info);
1019                else
1020                        status = ice_set_vsi_promisc(hw, vsi->idx, promisc_m,
1021                                                     vf->port_vlan_info);
1022        } else {
1023                if (rm_promisc)
1024                        status = ice_clear_vsi_promisc(hw, vsi->idx, promisc_m,
1025                                                       0);
1026                else
1027                        status = ice_set_vsi_promisc(hw, vsi->idx, promisc_m,
1028                                                     0);
1029        }
1030
1031        return status;
1032}
1033
1034static void ice_vf_clear_counters(struct ice_vf *vf)
1035{
1036        struct ice_vsi *vsi = vf->pf->vsi[vf->lan_vsi_idx];
1037
1038        vf->num_mac = 0;
1039        vsi->num_vlan = 0;
1040        memset(&vf->mdd_tx_events, 0, sizeof(vf->mdd_tx_events));
1041        memset(&vf->mdd_rx_events, 0, sizeof(vf->mdd_rx_events));
1042}
1043
1044/**
1045 * ice_vf_pre_vsi_rebuild - tasks to be done prior to VSI rebuild
1046 * @vf: VF to perform pre VSI rebuild tasks
1047 *
1048 * These tasks are items that don't need to be amortized since they are most
1049 * likely called in a for loop with all VF(s) in the reset_all_vfs() case.
1050 */
1051static void ice_vf_pre_vsi_rebuild(struct ice_vf *vf)
1052{
1053        ice_vf_clear_counters(vf);
1054        ice_clear_vf_reset_trigger(vf);
1055}
1056
1057/**
1058 * ice_vf_rebuild_host_cfg - host admin configuration is persistent across reset
1059 * @vf: VF to rebuild host configuration on
1060 */
1061static void ice_vf_rebuild_host_cfg(struct ice_vf *vf)
1062{
1063        struct device *dev = ice_pf_to_dev(vf->pf);
1064
1065        ice_vf_set_host_trust_cfg(vf);
1066
1067        if (ice_vf_rebuild_host_mac_cfg(vf))
1068                dev_err(dev, "failed to rebuild default MAC configuration for VF %d\n",
1069                        vf->vf_id);
1070
1071        if (ice_vf_rebuild_host_vlan_cfg(vf))
1072                dev_err(dev, "failed to rebuild VLAN configuration for VF %u\n",
1073                        vf->vf_id);
1074}
1075
1076/**
1077 * ice_vf_rebuild_vsi_with_release - release and setup the VF's VSI
1078 * @vf: VF to release and setup the VSI for
1079 *
1080 * This is only called when a single VF is being reset (i.e. VFR, VFLR, host VF
1081 * configuration change, etc.).
1082 */
1083static int ice_vf_rebuild_vsi_with_release(struct ice_vf *vf)
1084{
1085        ice_vf_vsi_release(vf);
1086        if (!ice_vf_vsi_setup(vf))
1087                return -ENOMEM;
1088
1089        return 0;
1090}
1091
1092/**
1093 * ice_vf_rebuild_vsi - rebuild the VF's VSI
1094 * @vf: VF to rebuild the VSI for
1095 *
1096 * This is only called when all VF(s) are being reset (i.e. PCIe Reset on the
1097 * host, PFR, CORER, etc.).
1098 */
1099static int ice_vf_rebuild_vsi(struct ice_vf *vf)
1100{
1101        struct ice_pf *pf = vf->pf;
1102        struct ice_vsi *vsi;
1103
1104        vsi = pf->vsi[vf->lan_vsi_idx];
1105
1106        if (ice_vsi_rebuild(vsi, true)) {
1107                dev_err(ice_pf_to_dev(pf), "failed to rebuild VF %d VSI\n",
1108                        vf->vf_id);
1109                return -EIO;
1110        }
1111        /* vsi->idx will remain the same in this case so don't update
1112         * vf->lan_vsi_idx
1113         */
1114        vsi->vsi_num = ice_get_hw_vsi_num(&pf->hw, vsi->idx);
1115        vf->lan_vsi_num = vsi->vsi_num;
1116
1117        return 0;
1118}
1119
1120/**
1121 * ice_vf_set_initialized - VF is ready for VIRTCHNL communication
1122 * @vf: VF to set in initialized state
1123 *
1124 * After this function the VF will be ready to receive/handle the
1125 * VIRTCHNL_OP_GET_VF_RESOURCES message
1126 */
1127static void ice_vf_set_initialized(struct ice_vf *vf)
1128{
1129        ice_set_vf_state_qs_dis(vf);
1130        clear_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states);
1131        clear_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states);
1132        clear_bit(ICE_VF_STATE_DIS, vf->vf_states);
1133        set_bit(ICE_VF_STATE_INIT, vf->vf_states);
1134}
1135
1136/**
1137 * ice_vf_post_vsi_rebuild - tasks to do after the VF's VSI have been rebuilt
1138 * @vf: VF to perform tasks on
1139 */
1140static void ice_vf_post_vsi_rebuild(struct ice_vf *vf)
1141{
1142        struct ice_pf *pf = vf->pf;
1143        struct ice_hw *hw;
1144
1145        hw = &pf->hw;
1146
1147        ice_vf_rebuild_host_cfg(vf);
1148
1149        ice_vf_set_initialized(vf);
1150        ice_ena_vf_mappings(vf);
1151        wr32(hw, VFGEN_RSTAT(vf->vf_id), VIRTCHNL_VFR_VFACTIVE);
1152}
1153
1154/**
1155 * ice_reset_all_vfs - reset all allocated VFs in one go
1156 * @pf: pointer to the PF structure
1157 * @is_vflr: true if VFLR was issued, false if not
1158 *
1159 * First, tell the hardware to reset each VF, then do all the waiting in one
1160 * chunk, and finally finish restoring each VF after the wait. This is useful
1161 * during PF routines which need to reset all VFs, as otherwise it must perform
1162 * these resets in a serialized fashion.
1163 *
1164 * Returns true if any VFs were reset, and false otherwise.
1165 */
1166bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr)
1167{
1168        struct device *dev = ice_pf_to_dev(pf);
1169        struct ice_hw *hw = &pf->hw;
1170        struct ice_vf *vf;
1171        int v, i;
1172
1173        /* If we don't have any VFs, then there is nothing to reset */
1174        if (!pf->num_alloc_vfs)
1175                return false;
1176
1177        /* If VFs have been disabled, there is no need to reset */
1178        if (test_and_set_bit(__ICE_VF_DIS, pf->state))
1179                return false;
1180
1181        /* Begin reset on all VFs at once */
1182        ice_for_each_vf(pf, v)
1183                ice_trigger_vf_reset(&pf->vf[v], is_vflr, true);
1184
1185        /* HW requires some time to make sure it can flush the FIFO for a VF
1186         * when it resets it. Poll the VPGEN_VFRSTAT register for each VF in
1187         * sequence to make sure that it has completed. We'll keep track of
1188         * the VFs using a simple iterator that increments once that VF has
1189         * finished resetting.
1190         */
1191        for (i = 0, v = 0; i < 10 && v < pf->num_alloc_vfs; i++) {
1192                /* Check each VF in sequence */
1193                while (v < pf->num_alloc_vfs) {
1194                        u32 reg;
1195
1196                        vf = &pf->vf[v];
1197                        reg = rd32(hw, VPGEN_VFRSTAT(vf->vf_id));
1198                        if (!(reg & VPGEN_VFRSTAT_VFRD_M)) {
1199                                /* only delay if the check failed */
1200                                usleep_range(10, 20);
1201                                break;
1202                        }
1203
1204                        /* If the current VF has finished resetting, move on
1205                         * to the next VF in sequence.
1206                         */
1207                        v++;
1208                }
1209        }
1210
1211        /* Display a warning if at least one VF didn't manage to reset in
1212         * time, but continue on with the operation.
1213         */
1214        if (v < pf->num_alloc_vfs)
1215                dev_warn(dev, "VF reset check timeout\n");
1216
1217        /* free VF resources to begin resetting the VSI state */
1218        ice_for_each_vf(pf, v) {
1219                vf = &pf->vf[v];
1220
1221                ice_vf_pre_vsi_rebuild(vf);
1222                ice_vf_rebuild_vsi(vf);
1223                ice_vf_post_vsi_rebuild(vf);
1224        }
1225
1226        ice_flush(hw);
1227        clear_bit(__ICE_VF_DIS, pf->state);
1228
1229        return true;
1230}
1231
1232/**
1233 * ice_is_vf_disabled
1234 * @vf: pointer to the VF info
1235 *
1236 * Returns true if the PF or VF is disabled, false otherwise.
1237 */
1238static bool ice_is_vf_disabled(struct ice_vf *vf)
1239{
1240        struct ice_pf *pf = vf->pf;
1241
1242        /* If the PF has been disabled, there is no need resetting VF until
1243         * PF is active again. Similarly, if the VF has been disabled, this
1244         * means something else is resetting the VF, so we shouldn't continue.
1245         * Otherwise, set disable VF state bit for actual reset, and continue.
1246         */
1247        return (test_bit(__ICE_VF_DIS, pf->state) ||
1248                test_bit(ICE_VF_STATE_DIS, vf->vf_states));
1249}
1250
1251/**
1252 * ice_reset_vf - Reset a particular VF
1253 * @vf: pointer to the VF structure
1254 * @is_vflr: true if VFLR was issued, false if not
1255 *
1256 * Returns true if the VF is currently in reset, resets successfully, or resets
1257 * are disabled and false otherwise.
1258 */
1259bool ice_reset_vf(struct ice_vf *vf, bool is_vflr)
1260{
1261        struct ice_pf *pf = vf->pf;
1262        struct ice_vsi *vsi;
1263        struct device *dev;
1264        struct ice_hw *hw;
1265        bool rsd = false;
1266        u8 promisc_m;
1267        u32 reg;
1268        int i;
1269
1270        dev = ice_pf_to_dev(pf);
1271
1272        if (test_bit(__ICE_VF_RESETS_DISABLED, pf->state)) {
1273                dev_dbg(dev, "Trying to reset VF %d, but all VF resets are disabled\n",
1274                        vf->vf_id);
1275                return true;
1276        }
1277
1278        if (ice_is_vf_disabled(vf)) {
1279                dev_dbg(dev, "VF is already disabled, there is no need for resetting it, telling VM, all is fine %d\n",
1280                        vf->vf_id);
1281                return true;
1282        }
1283
1284        /* Set VF disable bit state here, before triggering reset */
1285        set_bit(ICE_VF_STATE_DIS, vf->vf_states);
1286        ice_trigger_vf_reset(vf, is_vflr, false);
1287
1288        vsi = pf->vsi[vf->lan_vsi_idx];
1289
1290        if (test_bit(ICE_VF_STATE_QS_ENA, vf->vf_states))
1291                ice_dis_vf_qs(vf);
1292
1293        /* Call Disable LAN Tx queue AQ whether or not queues are
1294         * enabled. This is needed for successful completion of VFR.
1295         */
1296        ice_dis_vsi_txq(vsi->port_info, vsi->idx, 0, 0, NULL, NULL,
1297                        NULL, ICE_VF_RESET, vf->vf_id, NULL);
1298
1299        hw = &pf->hw;
1300        /* poll VPGEN_VFRSTAT reg to make sure
1301         * that reset is complete
1302         */
1303        for (i = 0; i < 10; i++) {
1304                /* VF reset requires driver to first reset the VF and then
1305                 * poll the status register to make sure that the reset
1306                 * completed successfully.
1307                 */
1308                reg = rd32(hw, VPGEN_VFRSTAT(vf->vf_id));
1309                if (reg & VPGEN_VFRSTAT_VFRD_M) {
1310                        rsd = true;
1311                        break;
1312                }
1313
1314                /* only sleep if the reset is not done */
1315                usleep_range(10, 20);
1316        }
1317
1318        /* Display a warning if VF didn't manage to reset in time, but need to
1319         * continue on with the operation.
1320         */
1321        if (!rsd)
1322                dev_warn(dev, "VF reset check timeout on VF %d\n", vf->vf_id);
1323
1324        /* disable promiscuous modes in case they were enabled
1325         * ignore any error if disabling process failed
1326         */
1327        if (test_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states) ||
1328            test_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states)) {
1329                if (vf->port_vlan_info || vsi->num_vlan)
1330                        promisc_m = ICE_UCAST_VLAN_PROMISC_BITS;
1331                else
1332                        promisc_m = ICE_UCAST_PROMISC_BITS;
1333
1334                vsi = pf->vsi[vf->lan_vsi_idx];
1335                if (ice_vf_set_vsi_promisc(vf, vsi, promisc_m, true))
1336                        dev_err(dev, "disabling promiscuous mode failed\n");
1337        }
1338
1339        ice_vf_pre_vsi_rebuild(vf);
1340        ice_vf_rebuild_vsi_with_release(vf);
1341        ice_vf_post_vsi_rebuild(vf);
1342
1343        return true;
1344}
1345
1346/**
1347 * ice_vc_notify_link_state - Inform all VFs on a PF of link status
1348 * @pf: pointer to the PF structure
1349 */
1350void ice_vc_notify_link_state(struct ice_pf *pf)
1351{
1352        int i;
1353
1354        ice_for_each_vf(pf, i)
1355                ice_vc_notify_vf_link_state(&pf->vf[i]);
1356}
1357
1358/**
1359 * ice_vc_notify_reset - Send pending reset message to all VFs
1360 * @pf: pointer to the PF structure
1361 *
1362 * indicate a pending reset to all VFs on a given PF
1363 */
1364void ice_vc_notify_reset(struct ice_pf *pf)
1365{
1366        struct virtchnl_pf_event pfe;
1367
1368        if (!pf->num_alloc_vfs)
1369                return;
1370
1371        pfe.event = VIRTCHNL_EVENT_RESET_IMPENDING;
1372        pfe.severity = PF_EVENT_SEVERITY_CERTAIN_DOOM;
1373        ice_vc_vf_broadcast(pf, VIRTCHNL_OP_EVENT, VIRTCHNL_STATUS_SUCCESS,
1374                            (u8 *)&pfe, sizeof(struct virtchnl_pf_event));
1375}
1376
1377/**
1378 * ice_vc_notify_vf_reset - Notify VF of a reset event
1379 * @vf: pointer to the VF structure
1380 */
1381static void ice_vc_notify_vf_reset(struct ice_vf *vf)
1382{
1383        struct virtchnl_pf_event pfe;
1384        struct ice_pf *pf;
1385
1386        if (!vf)
1387                return;
1388
1389        pf = vf->pf;
1390        if (ice_validate_vf_id(pf, vf->vf_id))
1391                return;
1392
1393        /* Bail out if VF is in disabled state, neither initialized, nor active
1394         * state - otherwise proceed with notifications
1395         */
1396        if ((!test_bit(ICE_VF_STATE_INIT, vf->vf_states) &&
1397             !test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) ||
1398            test_bit(ICE_VF_STATE_DIS, vf->vf_states))
1399                return;
1400
1401        pfe.event = VIRTCHNL_EVENT_RESET_IMPENDING;
1402        pfe.severity = PF_EVENT_SEVERITY_CERTAIN_DOOM;
1403        ice_aq_send_msg_to_vf(&pf->hw, vf->vf_id, VIRTCHNL_OP_EVENT,
1404                              VIRTCHNL_STATUS_SUCCESS, (u8 *)&pfe, sizeof(pfe),
1405                              NULL);
1406}
1407
1408/**
1409 * ice_init_vf_vsi_res - initialize/setup VF VSI resources
1410 * @vf: VF to initialize/setup the VSI for
1411 *
1412 * This function creates a VSI for the VF, adds a VLAN 0 filter, and sets up the
1413 * VF VSI's broadcast filter and is only used during initial VF creation.
1414 */
1415static int ice_init_vf_vsi_res(struct ice_vf *vf)
1416{
1417        struct ice_pf *pf = vf->pf;
1418        u8 broadcast[ETH_ALEN];
1419        enum ice_status status;
1420        struct ice_vsi *vsi;
1421        struct device *dev;
1422        int err;
1423
1424        vf->first_vector_idx = ice_calc_vf_first_vector_idx(pf, vf);
1425
1426        dev = ice_pf_to_dev(pf);
1427        vsi = ice_vf_vsi_setup(vf);
1428        if (!vsi)
1429                return -ENOMEM;
1430
1431        err = ice_vsi_add_vlan(vsi, 0, ICE_FWD_TO_VSI);
1432        if (err) {
1433                dev_warn(dev, "Failed to add VLAN 0 filter for VF %d\n",
1434                         vf->vf_id);
1435                goto release_vsi;
1436        }
1437
1438        eth_broadcast_addr(broadcast);
1439        status = ice_fltr_add_mac(vsi, broadcast, ICE_FWD_TO_VSI);
1440        if (status) {
1441                dev_err(dev, "Failed to add broadcast MAC filter for VF %d, status %s\n",
1442                        vf->vf_id, ice_stat_str(status));
1443                err = ice_status_to_errno(status);
1444                goto release_vsi;
1445        }
1446
1447        vf->num_mac = 1;
1448
1449        return 0;
1450
1451release_vsi:
1452        ice_vf_vsi_release(vf);
1453        return err;
1454}
1455
1456/**
1457 * ice_start_vfs - start VFs so they are ready to be used by SR-IOV
1458 * @pf: PF the VFs are associated with
1459 */
1460static int ice_start_vfs(struct ice_pf *pf)
1461{
1462        struct ice_hw *hw = &pf->hw;
1463        int retval, i;
1464
1465        ice_for_each_vf(pf, i) {
1466                struct ice_vf *vf = &pf->vf[i];
1467
1468                ice_clear_vf_reset_trigger(vf);
1469
1470                retval = ice_init_vf_vsi_res(vf);
1471                if (retval) {
1472                        dev_err(ice_pf_to_dev(pf), "Failed to initialize VSI resources for VF %d, error %d\n",
1473                                vf->vf_id, retval);
1474                        goto teardown;
1475                }
1476
1477                set_bit(ICE_VF_STATE_INIT, vf->vf_states);
1478                ice_ena_vf_mappings(vf);
1479                wr32(hw, VFGEN_RSTAT(vf->vf_id), VIRTCHNL_VFR_VFACTIVE);
1480        }
1481
1482        ice_flush(hw);
1483        return 0;
1484
1485teardown:
1486        for (i = i - 1; i >= 0; i--) {
1487                struct ice_vf *vf = &pf->vf[i];
1488
1489                ice_dis_vf_mappings(vf);
1490                ice_vf_vsi_release(vf);
1491        }
1492
1493        return retval;
1494}
1495
1496/**
1497 * ice_set_dflt_settings - set VF defaults during initialization/creation
1498 * @pf: PF holding reference to all VFs for default configuration
1499 */
1500static void ice_set_dflt_settings_vfs(struct ice_pf *pf)
1501{
1502        int i;
1503
1504        ice_for_each_vf(pf, i) {
1505                struct ice_vf *vf = &pf->vf[i];
1506
1507                vf->pf = pf;
1508                vf->vf_id = i;
1509                vf->vf_sw_id = pf->first_sw;
1510                /* assign default capabilities */
1511                set_bit(ICE_VIRTCHNL_VF_CAP_L2, &vf->vf_caps);
1512                vf->spoofchk = true;
1513                vf->num_vf_qs = pf->num_qps_per_vf;
1514        }
1515}
1516
1517/**
1518 * ice_alloc_vfs - allocate num_vfs in the PF structure
1519 * @pf: PF to store the allocated VFs in
1520 * @num_vfs: number of VFs to allocate
1521 */
1522static int ice_alloc_vfs(struct ice_pf *pf, int num_vfs)
1523{
1524        struct ice_vf *vfs;
1525
1526        vfs = devm_kcalloc(ice_pf_to_dev(pf), num_vfs, sizeof(*vfs),
1527                           GFP_KERNEL);
1528        if (!vfs)
1529                return -ENOMEM;
1530
1531        pf->vf = vfs;
1532        pf->num_alloc_vfs = num_vfs;
1533
1534        return 0;
1535}
1536
1537/**
1538 * ice_ena_vfs - enable VFs so they are ready to be used
1539 * @pf: pointer to the PF structure
1540 * @num_vfs: number of VFs to enable
1541 */
1542static int ice_ena_vfs(struct ice_pf *pf, u16 num_vfs)
1543{
1544        struct device *dev = ice_pf_to_dev(pf);
1545        struct ice_hw *hw = &pf->hw;
1546        int ret;
1547
1548        /* Disable global interrupt 0 so we don't try to handle the VFLR. */
1549        wr32(hw, GLINT_DYN_CTL(pf->oicr_idx),
1550             ICE_ITR_NONE << GLINT_DYN_CTL_ITR_INDX_S);
1551        set_bit(__ICE_OICR_INTR_DIS, pf->state);
1552        ice_flush(hw);
1553
1554        ret = pci_enable_sriov(pf->pdev, num_vfs);
1555        if (ret) {
1556                pf->num_alloc_vfs = 0;
1557                goto err_unroll_intr;
1558        }
1559
1560        ret = ice_alloc_vfs(pf, num_vfs);
1561        if (ret)
1562                goto err_pci_disable_sriov;
1563
1564        if (ice_set_per_vf_res(pf)) {
1565                dev_err(dev, "Not enough resources for %d VFs, try with fewer number of VFs\n",
1566                        num_vfs);
1567                ret = -ENOSPC;
1568                goto err_unroll_sriov;
1569        }
1570
1571        ice_set_dflt_settings_vfs(pf);
1572
1573        if (ice_start_vfs(pf)) {
1574                dev_err(dev, "Failed to start VF(s)\n");
1575                ret = -EAGAIN;
1576                goto err_unroll_sriov;
1577        }
1578
1579        clear_bit(__ICE_VF_DIS, pf->state);
1580        return 0;
1581
1582err_unroll_sriov:
1583        devm_kfree(dev, pf->vf);
1584        pf->vf = NULL;
1585        pf->num_alloc_vfs = 0;
1586err_pci_disable_sriov:
1587        pci_disable_sriov(pf->pdev);
1588err_unroll_intr:
1589        /* rearm interrupts here */
1590        ice_irq_dynamic_ena(hw, NULL, NULL);
1591        clear_bit(__ICE_OICR_INTR_DIS, pf->state);
1592        return ret;
1593}
1594
1595/**
1596 * ice_pf_state_is_nominal - checks the PF for nominal state
1597 * @pf: pointer to PF to check
1598 *
1599 * Check the PF's state for a collection of bits that would indicate
1600 * the PF is in a state that would inhibit normal operation for
1601 * driver functionality.
1602 *
1603 * Returns true if PF is in a nominal state.
1604 * Returns false otherwise
1605 */
1606static bool ice_pf_state_is_nominal(struct ice_pf *pf)
1607{
1608        DECLARE_BITMAP(check_bits, __ICE_STATE_NBITS) = { 0 };
1609
1610        if (!pf)
1611                return false;
1612
1613        bitmap_set(check_bits, 0, __ICE_STATE_NOMINAL_CHECK_BITS);
1614        if (bitmap_intersects(pf->state, check_bits, __ICE_STATE_NBITS))
1615                return false;
1616
1617        return true;
1618}
1619
1620/**
1621 * ice_pci_sriov_ena - Enable or change number of VFs
1622 * @pf: pointer to the PF structure
1623 * @num_vfs: number of VFs to allocate
1624 *
1625 * Returns 0 on success and negative on failure
1626 */
1627static int ice_pci_sriov_ena(struct ice_pf *pf, int num_vfs)
1628{
1629        int pre_existing_vfs = pci_num_vf(pf->pdev);
1630        struct device *dev = ice_pf_to_dev(pf);
1631        int err;
1632
1633        if (pre_existing_vfs && pre_existing_vfs != num_vfs)
1634                ice_free_vfs(pf);
1635        else if (pre_existing_vfs && pre_existing_vfs == num_vfs)
1636                return 0;
1637
1638        if (num_vfs > pf->num_vfs_supported) {
1639                dev_err(dev, "Can't enable %d VFs, max VFs supported is %d\n",
1640                        num_vfs, pf->num_vfs_supported);
1641                return -EOPNOTSUPP;
1642        }
1643
1644        dev_info(dev, "Enabling %d VFs\n", num_vfs);
1645        err = ice_ena_vfs(pf, num_vfs);
1646        if (err) {
1647                dev_err(dev, "Failed to enable SR-IOV: %d\n", err);
1648                return err;
1649        }
1650
1651        set_bit(ICE_FLAG_SRIOV_ENA, pf->flags);
1652        return 0;
1653}
1654
1655/**
1656 * ice_check_sriov_allowed - check if SR-IOV is allowed based on various checks
1657 * @pf: PF to enabled SR-IOV on
1658 */
1659static int ice_check_sriov_allowed(struct ice_pf *pf)
1660{
1661        struct device *dev = ice_pf_to_dev(pf);
1662
1663        if (!test_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags)) {
1664                dev_err(dev, "This device is not capable of SR-IOV\n");
1665                return -EOPNOTSUPP;
1666        }
1667
1668        if (ice_is_safe_mode(pf)) {
1669                dev_err(dev, "SR-IOV cannot be configured - Device is in Safe Mode\n");
1670                return -EOPNOTSUPP;
1671        }
1672
1673        if (!ice_pf_state_is_nominal(pf)) {
1674                dev_err(dev, "Cannot enable SR-IOV, device not ready\n");
1675                return -EBUSY;
1676        }
1677
1678        return 0;
1679}
1680
1681/**
1682 * ice_sriov_configure - Enable or change number of VFs via sysfs
1683 * @pdev: pointer to a pci_dev structure
1684 * @num_vfs: number of VFs to allocate or 0 to free VFs
1685 *
1686 * This function is called when the user updates the number of VFs in sysfs. On
1687 * success return whatever num_vfs was set to by the caller. Return negative on
1688 * failure.
1689 */
1690int ice_sriov_configure(struct pci_dev *pdev, int num_vfs)
1691{
1692        struct ice_pf *pf = pci_get_drvdata(pdev);
1693        struct device *dev = ice_pf_to_dev(pf);
1694        int err;
1695
1696        err = ice_check_sriov_allowed(pf);
1697        if (err)
1698                return err;
1699
1700        if (!num_vfs) {
1701                if (!pci_vfs_assigned(pdev)) {
1702                        ice_free_vfs(pf);
1703                        return 0;
1704                }
1705
1706                dev_err(dev, "can't free VFs because some are assigned to VMs.\n");
1707                return -EBUSY;
1708        }
1709
1710        err = ice_pci_sriov_ena(pf, num_vfs);
1711        if (err)
1712                return err;
1713
1714        return num_vfs;
1715}
1716
1717/**
1718 * ice_process_vflr_event - Free VF resources via IRQ calls
1719 * @pf: pointer to the PF structure
1720 *
1721 * called from the VFLR IRQ handler to
1722 * free up VF resources and state variables
1723 */
1724void ice_process_vflr_event(struct ice_pf *pf)
1725{
1726        struct ice_hw *hw = &pf->hw;
1727        unsigned int vf_id;
1728        u32 reg;
1729
1730        if (!test_and_clear_bit(__ICE_VFLR_EVENT_PENDING, pf->state) ||
1731            !pf->num_alloc_vfs)
1732                return;
1733
1734        ice_for_each_vf(pf, vf_id) {
1735                struct ice_vf *vf = &pf->vf[vf_id];
1736                u32 reg_idx, bit_idx;
1737
1738                reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32;
1739                bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32;
1740                /* read GLGEN_VFLRSTAT register to find out the flr VFs */
1741                reg = rd32(hw, GLGEN_VFLRSTAT(reg_idx));
1742                if (reg & BIT(bit_idx))
1743                        /* GLGEN_VFLRSTAT bit will be cleared in ice_reset_vf */
1744                        ice_reset_vf(vf, true);
1745        }
1746}
1747
1748/**
1749 * ice_vc_reset_vf - Perform software reset on the VF after informing the AVF
1750 * @vf: pointer to the VF info
1751 */
1752static void ice_vc_reset_vf(struct ice_vf *vf)
1753{
1754        ice_vc_notify_vf_reset(vf);
1755        ice_reset_vf(vf, false);
1756}
1757
1758/**
1759 * ice_get_vf_from_pfq - get the VF who owns the PF space queue passed in
1760 * @pf: PF used to index all VFs
1761 * @pfq: queue index relative to the PF's function space
1762 *
1763 * If no VF is found who owns the pfq then return NULL, otherwise return a
1764 * pointer to the VF who owns the pfq
1765 */
1766static struct ice_vf *ice_get_vf_from_pfq(struct ice_pf *pf, u16 pfq)
1767{
1768        unsigned int vf_id;
1769
1770        ice_for_each_vf(pf, vf_id) {
1771                struct ice_vf *vf = &pf->vf[vf_id];
1772                struct ice_vsi *vsi;
1773                u16 rxq_idx;
1774
1775                vsi = pf->vsi[vf->lan_vsi_idx];
1776
1777                ice_for_each_rxq(vsi, rxq_idx)
1778                        if (vsi->rxq_map[rxq_idx] == pfq)
1779                                return vf;
1780        }
1781
1782        return NULL;
1783}
1784
1785/**
1786 * ice_globalq_to_pfq - convert from global queue index to PF space queue index
1787 * @pf: PF used for conversion
1788 * @globalq: global queue index used to convert to PF space queue index
1789 */
1790static u32 ice_globalq_to_pfq(struct ice_pf *pf, u32 globalq)
1791{
1792        return globalq - pf->hw.func_caps.common_cap.rxq_first_id;
1793}
1794
1795/**
1796 * ice_vf_lan_overflow_event - handle LAN overflow event for a VF
1797 * @pf: PF that the LAN overflow event happened on
1798 * @event: structure holding the event information for the LAN overflow event
1799 *
1800 * Determine if the LAN overflow event was caused by a VF queue. If it was not
1801 * caused by a VF, do nothing. If a VF caused this LAN overflow event trigger a
1802 * reset on the offending VF.
1803 */
1804void
1805ice_vf_lan_overflow_event(struct ice_pf *pf, struct ice_rq_event_info *event)
1806{
1807        u32 gldcb_rtctq, queue;
1808        struct ice_vf *vf;
1809
1810        gldcb_rtctq = le32_to_cpu(event->desc.params.lan_overflow.prtdcb_ruptq);
1811        dev_dbg(ice_pf_to_dev(pf), "GLDCB_RTCTQ: 0x%08x\n", gldcb_rtctq);
1812
1813        /* event returns device global Rx queue number */
1814        queue = (gldcb_rtctq & GLDCB_RTCTQ_RXQNUM_M) >>
1815                GLDCB_RTCTQ_RXQNUM_S;
1816
1817        vf = ice_get_vf_from_pfq(pf, ice_globalq_to_pfq(pf, queue));
1818        if (!vf)
1819                return;
1820
1821        ice_vc_reset_vf(vf);
1822}
1823
1824/**
1825 * ice_vc_send_msg_to_vf - Send message to VF
1826 * @vf: pointer to the VF info
1827 * @v_opcode: virtual channel opcode
1828 * @v_retval: virtual channel return value
1829 * @msg: pointer to the msg buffer
1830 * @msglen: msg length
1831 *
1832 * send msg to VF
1833 */
1834static int
1835ice_vc_send_msg_to_vf(struct ice_vf *vf, u32 v_opcode,
1836                      enum virtchnl_status_code v_retval, u8 *msg, u16 msglen)
1837{
1838        enum ice_status aq_ret;
1839        struct device *dev;
1840        struct ice_pf *pf;
1841
1842        if (!vf)
1843                return -EINVAL;
1844
1845        pf = vf->pf;
1846        if (ice_validate_vf_id(pf, vf->vf_id))
1847                return -EINVAL;
1848
1849        dev = ice_pf_to_dev(pf);
1850
1851        /* single place to detect unsuccessful return values */
1852        if (v_retval) {
1853                vf->num_inval_msgs++;
1854                dev_info(dev, "VF %d failed opcode %d, retval: %d\n", vf->vf_id,
1855                         v_opcode, v_retval);
1856                if (vf->num_inval_msgs > ICE_DFLT_NUM_INVAL_MSGS_ALLOWED) {
1857                        dev_err(dev, "Number of invalid messages exceeded for VF %d\n",
1858                                vf->vf_id);
1859                        dev_err(dev, "Use PF Control I/F to enable the VF\n");
1860                        set_bit(ICE_VF_STATE_DIS, vf->vf_states);
1861                        return -EIO;
1862                }
1863        } else {
1864                vf->num_valid_msgs++;
1865                /* reset the invalid counter, if a valid message is received. */
1866                vf->num_inval_msgs = 0;
1867        }
1868
1869        aq_ret = ice_aq_send_msg_to_vf(&pf->hw, vf->vf_id, v_opcode, v_retval,
1870                                       msg, msglen, NULL);
1871        if (aq_ret && pf->hw.mailboxq.sq_last_status != ICE_AQ_RC_ENOSYS) {
1872                dev_info(dev, "Unable to send the message to VF %d ret %s aq_err %s\n",
1873                         vf->vf_id, ice_stat_str(aq_ret),
1874                         ice_aq_str(pf->hw.mailboxq.sq_last_status));
1875                return -EIO;
1876        }
1877
1878        return 0;
1879}
1880
1881/**
1882 * ice_vc_get_ver_msg
1883 * @vf: pointer to the VF info
1884 * @msg: pointer to the msg buffer
1885 *
1886 * called from the VF to request the API version used by the PF
1887 */
1888static int ice_vc_get_ver_msg(struct ice_vf *vf, u8 *msg)
1889{
1890        struct virtchnl_version_info info = {
1891                VIRTCHNL_VERSION_MAJOR, VIRTCHNL_VERSION_MINOR
1892        };
1893
1894        vf->vf_ver = *(struct virtchnl_version_info *)msg;
1895        /* VFs running the 1.0 API expect to get 1.0 back or they will cry. */
1896        if (VF_IS_V10(&vf->vf_ver))
1897                info.minor = VIRTCHNL_VERSION_MINOR_NO_VF_CAPS;
1898
1899        return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_VERSION,
1900                                     VIRTCHNL_STATUS_SUCCESS, (u8 *)&info,
1901                                     sizeof(struct virtchnl_version_info));
1902}
1903
1904/**
1905 * ice_vc_get_vf_res_msg
1906 * @vf: pointer to the VF info
1907 * @msg: pointer to the msg buffer
1908 *
1909 * called from the VF to request its resources
1910 */
1911static int ice_vc_get_vf_res_msg(struct ice_vf *vf, u8 *msg)
1912{
1913        enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
1914        struct virtchnl_vf_resource *vfres = NULL;
1915        struct ice_pf *pf = vf->pf;
1916        struct ice_vsi *vsi;
1917        int len = 0;
1918        int ret;
1919
1920        if (ice_check_vf_init(pf, vf)) {
1921                v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1922                goto err;
1923        }
1924
1925        len = sizeof(struct virtchnl_vf_resource);
1926
1927        vfres = kzalloc(len, GFP_KERNEL);
1928        if (!vfres) {
1929                v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY;
1930                len = 0;
1931                goto err;
1932        }
1933        if (VF_IS_V11(&vf->vf_ver))
1934                vf->driver_caps = *(u32 *)msg;
1935        else
1936                vf->driver_caps = VIRTCHNL_VF_OFFLOAD_L2 |
1937                                  VIRTCHNL_VF_OFFLOAD_RSS_REG |
1938                                  VIRTCHNL_VF_OFFLOAD_VLAN;
1939
1940        vfres->vf_cap_flags = VIRTCHNL_VF_OFFLOAD_L2;
1941        vsi = pf->vsi[vf->lan_vsi_idx];
1942        if (!vsi) {
1943                v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1944                goto err;
1945        }
1946
1947        if (!vsi->info.pvid)
1948                vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_VLAN;
1949
1950        if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PF) {
1951                vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_PF;
1952        } else {
1953                if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_AQ)
1954                        vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_AQ;
1955                else
1956                        vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_REG;
1957        }
1958
1959        if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2)
1960                vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2;
1961
1962        if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ENCAP)
1963                vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ENCAP;
1964
1965        if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM)
1966                vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM;
1967
1968        if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RX_POLLING)
1969                vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RX_POLLING;
1970
1971        if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_WB_ON_ITR)
1972                vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_WB_ON_ITR;
1973
1974        if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_REQ_QUEUES)
1975                vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_REQ_QUEUES;
1976
1977        if (vf->driver_caps & VIRTCHNL_VF_CAP_ADV_LINK_SPEED)
1978                vfres->vf_cap_flags |= VIRTCHNL_VF_CAP_ADV_LINK_SPEED;
1979
1980        vfres->num_vsis = 1;
1981        /* Tx and Rx queue are equal for VF */
1982        vfres->num_queue_pairs = vsi->num_txq;
1983        vfres->max_vectors = pf->num_msix_per_vf;
1984        vfres->rss_key_size = ICE_VSIQF_HKEY_ARRAY_SIZE;
1985        vfres->rss_lut_size = ICE_VSIQF_HLUT_ARRAY_SIZE;
1986
1987        vfres->vsi_res[0].vsi_id = vf->lan_vsi_num;
1988        vfres->vsi_res[0].vsi_type = VIRTCHNL_VSI_SRIOV;
1989        vfres->vsi_res[0].num_queue_pairs = vsi->num_txq;
1990        ether_addr_copy(vfres->vsi_res[0].default_mac_addr,
1991                        vf->dflt_lan_addr.addr);
1992
1993        /* match guest capabilities */
1994        vf->driver_caps = vfres->vf_cap_flags;
1995
1996        set_bit(ICE_VF_STATE_ACTIVE, vf->vf_states);
1997
1998err:
1999        /* send the response back to the VF */
2000        ret = ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_VF_RESOURCES, v_ret,
2001                                    (u8 *)vfres, len);
2002
2003        kfree(vfres);
2004        return ret;
2005}
2006
2007/**
2008 * ice_vc_reset_vf_msg
2009 * @vf: pointer to the VF info
2010 *
2011 * called from the VF to reset itself,
2012 * unlike other virtchnl messages, PF driver
2013 * doesn't send the response back to the VF
2014 */
2015static void ice_vc_reset_vf_msg(struct ice_vf *vf)
2016{
2017        if (test_bit(ICE_VF_STATE_INIT, vf->vf_states))
2018                ice_reset_vf(vf, false);
2019}
2020
2021/**
2022 * ice_find_vsi_from_id
2023 * @pf: the PF structure to search for the VSI
2024 * @id: ID of the VSI it is searching for
2025 *
2026 * searches for the VSI with the given ID
2027 */
2028static struct ice_vsi *ice_find_vsi_from_id(struct ice_pf *pf, u16 id)
2029{
2030        int i;
2031
2032        ice_for_each_vsi(pf, i)
2033                if (pf->vsi[i] && pf->vsi[i]->vsi_num == id)
2034                        return pf->vsi[i];
2035
2036        return NULL;
2037}
2038
2039/**
2040 * ice_vc_isvalid_vsi_id
2041 * @vf: pointer to the VF info
2042 * @vsi_id: VF relative VSI ID
2043 *
2044 * check for the valid VSI ID
2045 */
2046static bool ice_vc_isvalid_vsi_id(struct ice_vf *vf, u16 vsi_id)
2047{
2048        struct ice_pf *pf = vf->pf;
2049        struct ice_vsi *vsi;
2050
2051        vsi = ice_find_vsi_from_id(pf, vsi_id);
2052
2053        return (vsi && (vsi->vf_id == vf->vf_id));
2054}
2055
2056/**
2057 * ice_vc_isvalid_q_id
2058 * @vf: pointer to the VF info
2059 * @vsi_id: VSI ID
2060 * @qid: VSI relative queue ID
2061 *
2062 * check for the valid queue ID
2063 */
2064static bool ice_vc_isvalid_q_id(struct ice_vf *vf, u16 vsi_id, u8 qid)
2065{
2066        struct ice_vsi *vsi = ice_find_vsi_from_id(vf->pf, vsi_id);
2067        /* allocated Tx and Rx queues should be always equal for VF VSI */
2068        return (vsi && (qid < vsi->alloc_txq));
2069}
2070
2071/**
2072 * ice_vc_isvalid_ring_len
2073 * @ring_len: length of ring
2074 *
2075 * check for the valid ring count, should be multiple of ICE_REQ_DESC_MULTIPLE
2076 * or zero
2077 */
2078static bool ice_vc_isvalid_ring_len(u16 ring_len)
2079{
2080        return ring_len == 0 ||
2081               (ring_len >= ICE_MIN_NUM_DESC &&
2082                ring_len <= ICE_MAX_NUM_DESC &&
2083                !(ring_len % ICE_REQ_DESC_MULTIPLE));
2084}
2085
2086/**
2087 * ice_vc_config_rss_key
2088 * @vf: pointer to the VF info
2089 * @msg: pointer to the msg buffer
2090 *
2091 * Configure the VF's RSS key
2092 */
2093static int ice_vc_config_rss_key(struct ice_vf *vf, u8 *msg)
2094{
2095        enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
2096        struct virtchnl_rss_key *vrk =
2097                (struct virtchnl_rss_key *)msg;
2098        struct ice_pf *pf = vf->pf;
2099        struct ice_vsi *vsi;
2100
2101        if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
2102                v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2103                goto error_param;
2104        }
2105
2106        if (!ice_vc_isvalid_vsi_id(vf, vrk->vsi_id)) {
2107                v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2108                goto error_param;
2109        }
2110
2111        if (vrk->key_len != ICE_VSIQF_HKEY_ARRAY_SIZE) {
2112                v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2113                goto error_param;
2114        }
2115
2116        if (!test_bit(ICE_FLAG_RSS_ENA, vf->pf->flags)) {
2117                v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2118                goto error_param;
2119        }
2120
2121        vsi = pf->vsi[vf->lan_vsi_idx];
2122        if (!vsi) {
2123                v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2124                goto error_param;
2125        }
2126
2127        if (ice_set_rss(vsi, vrk->key, NULL, 0))
2128                v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
2129error_param:
2130        return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_KEY, v_ret,
2131                                     NULL, 0);
2132}
2133
2134/**
2135 * ice_vc_config_rss_lut
2136 * @vf: pointer to the VF info
2137 * @msg: pointer to the msg buffer
2138 *
2139 * Configure the VF's RSS LUT
2140 */
2141static int ice_vc_config_rss_lut(struct ice_vf *vf, u8 *msg)
2142{
2143        struct virtchnl_rss_lut *vrl = (struct virtchnl_rss_lut *)msg;
2144        enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
2145        struct ice_pf *pf = vf->pf;
2146        struct ice_vsi *vsi;
2147
2148        if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
2149                v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2150                goto error_param;
2151        }
2152
2153        if (!ice_vc_isvalid_vsi_id(vf, vrl->vsi_id)) {
2154                v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2155                goto error_param;
2156        }
2157
2158        if (vrl->lut_entries != ICE_VSIQF_HLUT_ARRAY_SIZE) {
2159                v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2160                goto error_param;
2161        }
2162
2163        if (!test_bit(ICE_FLAG_RSS_ENA, vf->pf->flags)) {
2164                v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2165                goto error_param;
2166        }
2167
2168        vsi = pf->vsi[vf->lan_vsi_idx];
2169        if (!vsi) {
2170                v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2171                goto error_param;
2172        }
2173
2174        if (ice_set_rss(vsi, NULL, vrl->lut, ICE_VSIQF_HLUT_ARRAY_SIZE))
2175                v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
2176error_param:
2177        return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_LUT, v_ret,
2178                                     NULL, 0);
2179}
2180
2181/**
2182 * ice_wait_on_vf_reset - poll to make sure a given VF is ready after reset
2183 * @vf: The VF being resseting
2184 *
2185 * The max poll time is about ~800ms, which is about the maximum time it takes
2186 * for a VF to be reset and/or a VF driver to be removed.
2187 */
2188static void ice_wait_on_vf_reset(struct ice_vf *vf)
2189{
2190        int i;
2191
2192        for (i = 0; i < ICE_MAX_VF_RESET_TRIES; i++) {
2193                if (test_bit(ICE_VF_STATE_INIT, vf->vf_states))
2194                        break;
2195                msleep(ICE_MAX_VF_RESET_SLEEP_MS);
2196        }
2197}
2198
2199/**
2200 * ice_check_vf_ready_for_cfg - check if VF is ready to be configured/queried
2201 * @vf: VF to check if it's ready to be configured/queried
2202 *
2203 * The purpose of this function is to make sure the VF is not in reset, not
2204 * disabled, and initialized so it can be configured and/or queried by a host
2205 * administrator.
2206 */
2207static int ice_check_vf_ready_for_cfg(struct ice_vf *vf)
2208{
2209        struct ice_pf *pf;
2210
2211        ice_wait_on_vf_reset(vf);
2212
2213        if (ice_is_vf_disabled(vf))
2214                return -EINVAL;
2215
2216        pf = vf->pf;
2217        if (ice_check_vf_init(pf, vf))
2218                return -EBUSY;
2219
2220        return 0;
2221}
2222
2223/**
2224 * ice_set_vf_spoofchk
2225 * @netdev: network interface device structure
2226 * @vf_id: VF identifier
2227 * @ena: flag to enable or disable feature
2228 *
2229 * Enable or disable VF spoof checking
2230 */
2231int ice_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool ena)
2232{
2233        struct ice_netdev_priv *np = netdev_priv(netdev);
2234        struct ice_pf *pf = np->vsi->back;
2235        struct ice_vsi_ctx *ctx;
2236        struct ice_vsi *vf_vsi;
2237        enum ice_status status;
2238        struct device *dev;
2239        struct ice_vf *vf;
2240        int ret;
2241
2242        dev = ice_pf_to_dev(pf);
2243        if (ice_validate_vf_id(pf, vf_id))
2244                return -EINVAL;
2245
2246        vf = &pf->vf[vf_id];
2247        ret = ice_check_vf_ready_for_cfg(vf);
2248        if (ret)
2249                return ret;
2250
2251        vf_vsi = pf->vsi[vf->lan_vsi_idx];
2252        if (!vf_vsi) {
2253                netdev_err(netdev, "VSI %d for VF %d is null\n",
2254                           vf->lan_vsi_idx, vf->vf_id);
2255                return -EINVAL;
2256        }
2257
2258        if (vf_vsi->type != ICE_VSI_VF) {
2259                netdev_err(netdev, "Type %d of VSI %d for VF %d is no ICE_VSI_VF\n",
2260                           vf_vsi->type, vf_vsi->vsi_num, vf->vf_id);
2261                return -ENODEV;
2262        }
2263
2264        if (ena == vf->spoofchk) {
2265                dev_dbg(dev, "VF spoofchk already %s\n", ena ? "ON" : "OFF");
2266                return 0;
2267        }
2268
2269        ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
2270        if (!ctx)
2271                return -ENOMEM;
2272
2273        ctx->info.sec_flags = vf_vsi->info.sec_flags;
2274        ctx->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_SECURITY_VALID);
2275        if (ena) {
2276                ctx->info.sec_flags |=
2277                        ICE_AQ_VSI_SEC_FLAG_ENA_MAC_ANTI_SPOOF |
2278                        (ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA <<
2279                         ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S);
2280        } else {
2281                ctx->info.sec_flags &=
2282                        ~(ICE_AQ_VSI_SEC_FLAG_ENA_MAC_ANTI_SPOOF |
2283                          (ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA <<
2284                           ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S));
2285        }
2286
2287        status = ice_update_vsi(&pf->hw, vf_vsi->idx, ctx, NULL);
2288        if (status) {
2289                dev_err(dev, "Failed to %sable spoofchk on VF %d VSI %d\n error %s\n",
2290                        ena ? "en" : "dis", vf->vf_id, vf_vsi->vsi_num,
2291                        ice_stat_str(status));
2292                ret = -EIO;
2293                goto out;
2294        }
2295
2296        /* only update spoofchk state and VSI context on success */
2297        vf_vsi->info.sec_flags = ctx->info.sec_flags;
2298        vf->spoofchk = ena;
2299
2300out:
2301        kfree(ctx);
2302        return ret;
2303}
2304
2305/**
2306 * ice_is_any_vf_in_promisc - check if any VF(s) are in promiscuous mode
2307 * @pf: PF structure for accessing VF(s)
2308 *
2309 * Return false if no VF(s) are in unicast and/or multicast promiscuous mode,
2310 * else return true
2311 */
2312bool ice_is_any_vf_in_promisc(struct ice_pf *pf)
2313{
2314        int vf_idx;
2315
2316        ice_for_each_vf(pf, vf_idx) {
2317                struct ice_vf *vf = &pf->vf[vf_idx];
2318
2319                /* found a VF that has promiscuous mode configured */
2320                if (test_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states) ||
2321                    test_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states))
2322                        return true;
2323        }
2324
2325        return false;
2326}
2327
2328/**
2329 * ice_vc_cfg_promiscuous_mode_msg
2330 * @vf: pointer to the VF info
2331 * @msg: pointer to the msg buffer
2332 *
2333 * called from the VF to configure VF VSIs promiscuous mode
2334 */
2335static int ice_vc_cfg_promiscuous_mode_msg(struct ice_vf *vf, u8 *msg)
2336{
2337        enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
2338        struct virtchnl_promisc_info *info =
2339            (struct virtchnl_promisc_info *)msg;
2340        struct ice_pf *pf = vf->pf;
2341        struct ice_vsi *vsi;
2342        struct device *dev;
2343        bool rm_promisc;
2344        int ret = 0;
2345
2346        if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
2347                v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2348                goto error_param;
2349        }
2350
2351        if (!ice_vc_isvalid_vsi_id(vf, info->vsi_id)) {
2352                v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2353                goto error_param;
2354        }
2355
2356        vsi = pf->vsi[vf->lan_vsi_idx];
2357        if (!vsi) {
2358                v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2359                goto error_param;
2360        }
2361
2362        dev = ice_pf_to_dev(pf);
2363        if (!test_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) {
2364                dev_err(dev, "Unprivileged VF %d is attempting to configure promiscuous mode\n",
2365                        vf->vf_id);
2366                /* Leave v_ret alone, lie to the VF on purpose. */
2367                goto error_param;
2368        }
2369
2370        rm_promisc = !(info->flags & FLAG_VF_UNICAST_PROMISC) &&
2371                !(info->flags & FLAG_VF_MULTICAST_PROMISC);
2372
2373        if (vsi->num_vlan || vf->port_vlan_info) {
2374                struct ice_vsi *pf_vsi = ice_get_main_vsi(pf);
2375                struct net_device *pf_netdev;
2376
2377                if (!pf_vsi) {
2378                        v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2379                        goto error_param;
2380                }
2381
2382                pf_netdev = pf_vsi->netdev;
2383
2384                ret = ice_set_vf_spoofchk(pf_netdev, vf->vf_id, rm_promisc);
2385                if (ret) {
2386                        dev_err(dev, "Failed to update spoofchk to %s for VF %d VSI %d when setting promiscuous mode\n",
2387                                rm_promisc ? "ON" : "OFF", vf->vf_id,
2388                                vsi->vsi_num);
2389                        v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2390                }
2391
2392                ret = ice_cfg_vlan_pruning(vsi, true, !rm_promisc);
2393                if (ret) {
2394                        dev_err(dev, "Failed to configure VLAN pruning in promiscuous mode\n");
2395                        v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2396                        goto error_param;
2397                }
2398        }
2399
2400        if (!test_bit(ICE_FLAG_VF_TRUE_PROMISC_ENA, pf->flags)) {
2401                bool set_dflt_vsi = !!(info->flags & FLAG_VF_UNICAST_PROMISC);
2402
2403                if (set_dflt_vsi && !ice_is_dflt_vsi_in_use(pf->first_sw))
2404                        /* only attempt to set the default forwarding VSI if
2405                         * it's not currently set
2406                         */
2407                        ret = ice_set_dflt_vsi(pf->first_sw, vsi);
2408                else if (!set_dflt_vsi &&
2409                         ice_is_vsi_dflt_vsi(pf->first_sw, vsi))
2410                        /* only attempt to free the default forwarding VSI if we
2411                         * are the owner
2412                         */
2413                        ret = ice_clear_dflt_vsi(pf->first_sw);
2414
2415                if (ret) {
2416                        dev_err(dev, "%sable VF %d as the default VSI failed, error %d\n",
2417                                set_dflt_vsi ? "en" : "dis", vf->vf_id, ret);
2418                        v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
2419                        goto error_param;
2420                }
2421        } else {
2422                enum ice_status status;
2423                u8 promisc_m;
2424
2425                if (info->flags & FLAG_VF_UNICAST_PROMISC) {
2426                        if (vf->port_vlan_info || vsi->num_vlan)
2427                                promisc_m = ICE_UCAST_VLAN_PROMISC_BITS;
2428                        else
2429                                promisc_m = ICE_UCAST_PROMISC_BITS;
2430                } else if (info->flags & FLAG_VF_MULTICAST_PROMISC) {
2431                        if (vf->port_vlan_info || vsi->num_vlan)
2432                                promisc_m = ICE_MCAST_VLAN_PROMISC_BITS;
2433                        else
2434                                promisc_m = ICE_MCAST_PROMISC_BITS;
2435                } else {
2436                        if (vf->port_vlan_info || vsi->num_vlan)
2437                                promisc_m = ICE_UCAST_VLAN_PROMISC_BITS;
2438                        else
2439                                promisc_m = ICE_UCAST_PROMISC_BITS;
2440                }
2441
2442                /* Configure multicast/unicast with or without VLAN promiscuous
2443                 * mode
2444                 */
2445                status = ice_vf_set_vsi_promisc(vf, vsi, promisc_m, rm_promisc);
2446                if (status) {
2447                        dev_err(dev, "%sable Tx/Rx filter promiscuous mode on VF-%d failed, error: %s\n",
2448                                rm_promisc ? "dis" : "en", vf->vf_id,
2449                                ice_stat_str(status));
2450                        v_ret = ice_err_to_virt_err(status);
2451                        goto error_param;
2452                } else {
2453                        dev_dbg(dev, "%sable Tx/Rx filter promiscuous mode on VF-%d succeeded\n",
2454                                rm_promisc ? "dis" : "en", vf->vf_id);
2455                }
2456        }
2457
2458        if (info->flags & FLAG_VF_MULTICAST_PROMISC)
2459                set_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states);
2460        else
2461                clear_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states);
2462
2463        if (info->flags & FLAG_VF_UNICAST_PROMISC)
2464                set_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states);
2465        else
2466                clear_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states);
2467
2468error_param:
2469        return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE,
2470                                     v_ret, NULL, 0);
2471}
2472
2473/**
2474 * ice_vc_get_stats_msg
2475 * @vf: pointer to the VF info
2476 * @msg: pointer to the msg buffer
2477 *
2478 * called from the VF to get VSI stats
2479 */
2480static int ice_vc_get_stats_msg(struct ice_vf *vf, u8 *msg)
2481{
2482        enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
2483        struct virtchnl_queue_select *vqs =
2484                (struct virtchnl_queue_select *)msg;
2485        struct ice_eth_stats stats = { 0 };
2486        struct ice_pf *pf = vf->pf;
2487        struct ice_vsi *vsi;
2488
2489        if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
2490                v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2491                goto error_param;
2492        }
2493
2494        if (!ice_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
2495                v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2496                goto error_param;
2497        }
2498
2499        vsi = pf->vsi[vf->lan_vsi_idx];
2500        if (!vsi) {
2501                v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2502                goto error_param;
2503        }
2504
2505        ice_update_eth_stats(vsi);
2506
2507        stats = vsi->eth_stats;
2508
2509error_param:
2510        /* send the response to the VF */
2511        return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_STATS, v_ret,
2512                                     (u8 *)&stats, sizeof(stats));
2513}
2514
2515/**
2516 * ice_vc_validate_vqs_bitmaps - validate Rx/Tx queue bitmaps from VIRTCHNL
2517 * @vqs: virtchnl_queue_select structure containing bitmaps to validate
2518 *
2519 * Return true on successful validation, else false
2520 */
2521static bool ice_vc_validate_vqs_bitmaps(struct virtchnl_queue_select *vqs)
2522{
2523        if ((!vqs->rx_queues && !vqs->tx_queues) ||
2524            vqs->rx_queues >= BIT(ICE_MAX_RSS_QS_PER_VF) ||
2525            vqs->tx_queues >= BIT(ICE_MAX_RSS_QS_PER_VF))
2526                return false;
2527
2528        return true;
2529}
2530
2531/**
2532 * ice_vf_ena_txq_interrupt - enable Tx queue interrupt via QINT_TQCTL
2533 * @vsi: VSI of the VF to configure
2534 * @q_idx: VF queue index used to determine the queue in the PF's space
2535 */
2536static void ice_vf_ena_txq_interrupt(struct ice_vsi *vsi, u32 q_idx)
2537{
2538        struct ice_hw *hw = &vsi->back->hw;
2539        u32 pfq = vsi->txq_map[q_idx];
2540        u32 reg;
2541
2542        reg = rd32(hw, QINT_TQCTL(pfq));
2543
2544        /* MSI-X index 0 in the VF's space is always for the OICR, which means
2545         * this is most likely a poll mode VF driver, so don't enable an
2546         * interrupt that was never configured via VIRTCHNL_OP_CONFIG_IRQ_MAP
2547         */
2548        if (!(reg & QINT_TQCTL_MSIX_INDX_M))
2549                return;
2550
2551        wr32(hw, QINT_TQCTL(pfq), reg | QINT_TQCTL_CAUSE_ENA_M);
2552}
2553
2554/**
2555 * ice_vf_ena_rxq_interrupt - enable Tx queue interrupt via QINT_RQCTL
2556 * @vsi: VSI of the VF to configure
2557 * @q_idx: VF queue index used to determine the queue in the PF's space
2558 */
2559static void ice_vf_ena_rxq_interrupt(struct ice_vsi *vsi, u32 q_idx)
2560{
2561        struct ice_hw *hw = &vsi->back->hw;
2562        u32 pfq = vsi->rxq_map[q_idx];
2563        u32 reg;
2564
2565        reg = rd32(hw, QINT_RQCTL(pfq));
2566
2567        /* MSI-X index 0 in the VF's space is always for the OICR, which means
2568         * this is most likely a poll mode VF driver, so don't enable an
2569         * interrupt that was never configured via VIRTCHNL_OP_CONFIG_IRQ_MAP
2570         */
2571        if (!(reg & QINT_RQCTL_MSIX_INDX_M))
2572                return;
2573
2574        wr32(hw, QINT_RQCTL(pfq), reg | QINT_RQCTL_CAUSE_ENA_M);
2575}
2576
2577/**
2578 * ice_vc_ena_qs_msg
2579 * @vf: pointer to the VF info
2580 * @msg: pointer to the msg buffer
2581 *
2582 * called from the VF to enable all or specific queue(s)
2583 */
2584static int ice_vc_ena_qs_msg(struct ice_vf *vf, u8 *msg)
2585{
2586        enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
2587        struct virtchnl_queue_select *vqs =
2588            (struct virtchnl_queue_select *)msg;
2589        struct ice_pf *pf = vf->pf;
2590        struct ice_vsi *vsi;
2591        unsigned long q_map;
2592        u16 vf_q_id;
2593
2594        if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
2595                v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2596                goto error_param;
2597        }
2598
2599        if (!ice_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
2600                v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2601                goto error_param;
2602        }
2603
2604        if (!ice_vc_validate_vqs_bitmaps(vqs)) {
2605                v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2606                goto error_param;
2607        }
2608
2609        vsi = pf->vsi[vf->lan_vsi_idx];
2610        if (!vsi) {
2611                v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2612                goto error_param;
2613        }
2614
2615        /* Enable only Rx rings, Tx rings were enabled by the FW when the
2616         * Tx queue group list was configured and the context bits were
2617         * programmed using ice_vsi_cfg_txqs
2618         */
2619        q_map = vqs->rx_queues;
2620        for_each_set_bit(vf_q_id, &q_map, ICE_MAX_RSS_QS_PER_VF) {
2621                if (!ice_vc_isvalid_q_id(vf, vqs->vsi_id, vf_q_id)) {
2622                        v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2623                        goto error_param;
2624                }
2625
2626                /* Skip queue if enabled */
2627                if (test_bit(vf_q_id, vf->rxq_ena))
2628                        continue;
2629
2630                if (ice_vsi_ctrl_one_rx_ring(vsi, true, vf_q_id, true)) {
2631                        dev_err(ice_pf_to_dev(vsi->back), "Failed to enable Rx ring %d on VSI %d\n",
2632                                vf_q_id, vsi->vsi_num);
2633                        v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2634                        goto error_param;
2635                }
2636
2637                ice_vf_ena_rxq_interrupt(vsi, vf_q_id);
2638                set_bit(vf_q_id, vf->rxq_ena);
2639        }
2640
2641        vsi = pf->vsi[vf->lan_vsi_idx];
2642        q_map = vqs->tx_queues;
2643        for_each_set_bit(vf_q_id, &q_map, ICE_MAX_RSS_QS_PER_VF) {
2644                if (!ice_vc_isvalid_q_id(vf, vqs->vsi_id, vf_q_id)) {
2645                        v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2646                        goto error_param;
2647                }
2648
2649                /* Skip queue if enabled */
2650                if (test_bit(vf_q_id, vf->txq_ena))
2651                        continue;
2652
2653                ice_vf_ena_txq_interrupt(vsi, vf_q_id);
2654                set_bit(vf_q_id, vf->txq_ena);
2655        }
2656
2657        /* Set flag to indicate that queues are enabled */
2658        if (v_ret == VIRTCHNL_STATUS_SUCCESS)
2659                set_bit(ICE_VF_STATE_QS_ENA, vf->vf_states);
2660
2661error_param:
2662        /* send the response to the VF */
2663        return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ENABLE_QUEUES, v_ret,
2664                                     NULL, 0);
2665}
2666
2667/**
2668 * ice_vc_dis_qs_msg
2669 * @vf: pointer to the VF info
2670 * @msg: pointer to the msg buffer
2671 *
2672 * called from the VF to disable all or specific
2673 * queue(s)
2674 */
2675static int ice_vc_dis_qs_msg(struct ice_vf *vf, u8 *msg)
2676{
2677        enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
2678        struct virtchnl_queue_select *vqs =
2679            (struct virtchnl_queue_select *)msg;
2680        struct ice_pf *pf = vf->pf;
2681        struct ice_vsi *vsi;
2682        unsigned long q_map;
2683        u16 vf_q_id;
2684
2685        if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states) &&
2686            !test_bit(ICE_VF_STATE_QS_ENA, vf->vf_states)) {
2687                v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2688                goto error_param;
2689        }
2690
2691        if (!ice_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
2692                v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2693                goto error_param;
2694        }
2695
2696        if (!ice_vc_validate_vqs_bitmaps(vqs)) {
2697                v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2698                goto error_param;
2699        }
2700
2701        vsi = pf->vsi[vf->lan_vsi_idx];
2702        if (!vsi) {
2703                v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2704                goto error_param;
2705        }
2706
2707        if (vqs->tx_queues) {
2708                q_map = vqs->tx_queues;
2709
2710                for_each_set_bit(vf_q_id, &q_map, ICE_MAX_RSS_QS_PER_VF) {
2711                        struct ice_ring *ring = vsi->tx_rings[vf_q_id];
2712                        struct ice_txq_meta txq_meta = { 0 };
2713
2714                        if (!ice_vc_isvalid_q_id(vf, vqs->vsi_id, vf_q_id)) {
2715                                v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2716                                goto error_param;
2717                        }
2718
2719                        /* Skip queue if not enabled */
2720                        if (!test_bit(vf_q_id, vf->txq_ena))
2721                                continue;
2722
2723                        ice_fill_txq_meta(vsi, ring, &txq_meta);
2724
2725                        if (ice_vsi_stop_tx_ring(vsi, ICE_NO_RESET, vf->vf_id,
2726                                                 ring, &txq_meta)) {
2727                                dev_err(ice_pf_to_dev(vsi->back), "Failed to stop Tx ring %d on VSI %d\n",
2728                                        vf_q_id, vsi->vsi_num);
2729                                v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2730                                goto error_param;
2731                        }
2732
2733                        /* Clear enabled queues flag */
2734                        clear_bit(vf_q_id, vf->txq_ena);
2735                }
2736        }
2737
2738        q_map = vqs->rx_queues;
2739        /* speed up Rx queue disable by batching them if possible */
2740        if (q_map &&
2741            bitmap_equal(&q_map, vf->rxq_ena, ICE_MAX_RSS_QS_PER_VF)) {
2742                if (ice_vsi_stop_all_rx_rings(vsi)) {
2743                        dev_err(ice_pf_to_dev(vsi->back), "Failed to stop all Rx rings on VSI %d\n",
2744                                vsi->vsi_num);
2745                        v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2746                        goto error_param;
2747                }
2748
2749                bitmap_zero(vf->rxq_ena, ICE_MAX_RSS_QS_PER_VF);
2750        } else if (q_map) {
2751                for_each_set_bit(vf_q_id, &q_map, ICE_MAX_RSS_QS_PER_VF) {
2752                        if (!ice_vc_isvalid_q_id(vf, vqs->vsi_id, vf_q_id)) {
2753                                v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2754                                goto error_param;
2755                        }
2756
2757                        /* Skip queue if not enabled */
2758                        if (!test_bit(vf_q_id, vf->rxq_ena))
2759                                continue;
2760
2761                        if (ice_vsi_ctrl_one_rx_ring(vsi, false, vf_q_id,
2762                                                     true)) {
2763                                dev_err(ice_pf_to_dev(vsi->back), "Failed to stop Rx ring %d on VSI %d\n",
2764                                        vf_q_id, vsi->vsi_num);
2765                                v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2766                                goto error_param;
2767                        }
2768
2769                        /* Clear enabled queues flag */
2770                        clear_bit(vf_q_id, vf->rxq_ena);
2771                }
2772        }
2773
2774        /* Clear enabled queues flag */
2775        if (v_ret == VIRTCHNL_STATUS_SUCCESS && ice_vf_has_no_qs_ena(vf))
2776                clear_bit(ICE_VF_STATE_QS_ENA, vf->vf_states);
2777
2778error_param:
2779        /* send the response to the VF */
2780        return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DISABLE_QUEUES, v_ret,
2781                                     NULL, 0);
2782}
2783
2784/**
2785 * ice_cfg_interrupt
2786 * @vf: pointer to the VF info
2787 * @vsi: the VSI being configured
2788 * @vector_id: vector ID
2789 * @map: vector map for mapping vectors to queues
2790 * @q_vector: structure for interrupt vector
2791 * configure the IRQ to queue map
2792 */
2793static int
2794ice_cfg_interrupt(struct ice_vf *vf, struct ice_vsi *vsi, u16 vector_id,
2795                  struct virtchnl_vector_map *map,
2796                  struct ice_q_vector *q_vector)
2797{
2798        u16 vsi_q_id, vsi_q_id_idx;
2799        unsigned long qmap;
2800
2801        q_vector->num_ring_rx = 0;
2802        q_vector->num_ring_tx = 0;
2803
2804        qmap = map->rxq_map;
2805        for_each_set_bit(vsi_q_id_idx, &qmap, ICE_MAX_RSS_QS_PER_VF) {
2806                vsi_q_id = vsi_q_id_idx;
2807
2808                if (!ice_vc_isvalid_q_id(vf, vsi->vsi_num, vsi_q_id))
2809                        return VIRTCHNL_STATUS_ERR_PARAM;
2810
2811                q_vector->num_ring_rx++;
2812                q_vector->rx.itr_idx = map->rxitr_idx;
2813                vsi->rx_rings[vsi_q_id]->q_vector = q_vector;
2814                ice_cfg_rxq_interrupt(vsi, vsi_q_id, vector_id,
2815                                      q_vector->rx.itr_idx);
2816        }
2817
2818        qmap = map->txq_map;
2819        for_each_set_bit(vsi_q_id_idx, &qmap, ICE_MAX_RSS_QS_PER_VF) {
2820                vsi_q_id = vsi_q_id_idx;
2821
2822                if (!ice_vc_isvalid_q_id(vf, vsi->vsi_num, vsi_q_id))
2823                        return VIRTCHNL_STATUS_ERR_PARAM;
2824
2825                q_vector->num_ring_tx++;
2826                q_vector->tx.itr_idx = map->txitr_idx;
2827                vsi->tx_rings[vsi_q_id]->q_vector = q_vector;
2828                ice_cfg_txq_interrupt(vsi, vsi_q_id, vector_id,
2829                                      q_vector->tx.itr_idx);
2830        }
2831
2832        return VIRTCHNL_STATUS_SUCCESS;
2833}
2834
2835/**
2836 * ice_vc_cfg_irq_map_msg
2837 * @vf: pointer to the VF info
2838 * @msg: pointer to the msg buffer
2839 *
2840 * called from the VF to configure the IRQ to queue map
2841 */
2842static int ice_vc_cfg_irq_map_msg(struct ice_vf *vf, u8 *msg)
2843{
2844        enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
2845        u16 num_q_vectors_mapped, vsi_id, vector_id;
2846        struct virtchnl_irq_map_info *irqmap_info;
2847        struct virtchnl_vector_map *map;
2848        struct ice_pf *pf = vf->pf;
2849        struct ice_vsi *vsi;
2850        int i;
2851
2852        irqmap_info = (struct virtchnl_irq_map_info *)msg;
2853        num_q_vectors_mapped = irqmap_info->num_vectors;
2854
2855        /* Check to make sure number of VF vectors mapped is not greater than
2856         * number of VF vectors originally allocated, and check that
2857         * there is actually at least a single VF queue vector mapped
2858         */
2859        if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states) ||
2860            pf->num_msix_per_vf < num_q_vectors_mapped ||
2861            !num_q_vectors_mapped) {
2862                v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2863                goto error_param;
2864        }
2865
2866        vsi = pf->vsi[vf->lan_vsi_idx];
2867        if (!vsi) {
2868                v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2869                goto error_param;
2870        }
2871
2872        for (i = 0; i < num_q_vectors_mapped; i++) {
2873                struct ice_q_vector *q_vector;
2874
2875                map = &irqmap_info->vecmap[i];
2876
2877                vector_id = map->vector_id;
2878                vsi_id = map->vsi_id;
2879                /* vector_id is always 0-based for each VF, and can never be
2880                 * larger than or equal to the max allowed interrupts per VF
2881                 */
2882                if (!(vector_id < pf->num_msix_per_vf) ||
2883                    !ice_vc_isvalid_vsi_id(vf, vsi_id) ||
2884                    (!vector_id && (map->rxq_map || map->txq_map))) {
2885                        v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2886                        goto error_param;
2887                }
2888
2889                /* No need to map VF miscellaneous or rogue vector */
2890                if (!vector_id)
2891                        continue;
2892
2893                /* Subtract non queue vector from vector_id passed by VF
2894                 * to get actual number of VSI queue vector array index
2895                 */
2896                q_vector = vsi->q_vectors[vector_id - ICE_NONQ_VECS_VF];
2897                if (!q_vector) {
2898                        v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2899                        goto error_param;
2900                }
2901
2902                /* lookout for the invalid queue index */
2903                v_ret = (enum virtchnl_status_code)
2904                        ice_cfg_interrupt(vf, vsi, vector_id, map, q_vector);
2905                if (v_ret)
2906                        goto error_param;
2907        }
2908
2909error_param:
2910        /* send the response to the VF */
2911        return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_IRQ_MAP, v_ret,
2912                                     NULL, 0);
2913}
2914
2915/**
2916 * ice_vc_cfg_qs_msg
2917 * @vf: pointer to the VF info
2918 * @msg: pointer to the msg buffer
2919 *
2920 * called from the VF to configure the Rx/Tx queues
2921 */
2922static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg)
2923{
2924        enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
2925        struct virtchnl_vsi_queue_config_info *qci =
2926            (struct virtchnl_vsi_queue_config_info *)msg;
2927        struct virtchnl_queue_pair_info *qpi;
2928        u16 num_rxq = 0, num_txq = 0;
2929        struct ice_pf *pf = vf->pf;
2930        struct ice_vsi *vsi;
2931        int i;
2932
2933        if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
2934                v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2935                goto error_param;
2936        }
2937
2938        if (!ice_vc_isvalid_vsi_id(vf, qci->vsi_id)) {
2939                v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2940                goto error_param;
2941        }
2942
2943        vsi = pf->vsi[vf->lan_vsi_idx];
2944        if (!vsi) {
2945                v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2946                goto error_param;
2947        }
2948
2949        if (qci->num_queue_pairs > ICE_MAX_RSS_QS_PER_VF ||
2950            qci->num_queue_pairs > min_t(u16, vsi->alloc_txq, vsi->alloc_rxq)) {
2951                dev_err(ice_pf_to_dev(pf), "VF-%d requesting more than supported number of queues: %d\n",
2952                        vf->vf_id, min_t(u16, vsi->alloc_txq, vsi->alloc_rxq));
2953                v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2954                goto error_param;
2955        }
2956
2957        for (i = 0; i < qci->num_queue_pairs; i++) {
2958                qpi = &qci->qpair[i];
2959                if (qpi->txq.vsi_id != qci->vsi_id ||
2960                    qpi->rxq.vsi_id != qci->vsi_id ||
2961                    qpi->rxq.queue_id != qpi->txq.queue_id ||
2962                    qpi->txq.headwb_enabled ||
2963                    !ice_vc_isvalid_ring_len(qpi->txq.ring_len) ||
2964                    !ice_vc_isvalid_ring_len(qpi->rxq.ring_len) ||
2965                    !ice_vc_isvalid_q_id(vf, qci->vsi_id, qpi->txq.queue_id)) {
2966                        v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2967                        goto error_param;
2968                }
2969                /* copy Tx queue info from VF into VSI */
2970                if (qpi->txq.ring_len > 0) {
2971                        num_txq++;
2972                        vsi->tx_rings[i]->dma = qpi->txq.dma_ring_addr;
2973                        vsi->tx_rings[i]->count = qpi->txq.ring_len;
2974                }
2975
2976                /* copy Rx queue info from VF into VSI */
2977                if (qpi->rxq.ring_len > 0) {
2978                        num_rxq++;
2979                        vsi->rx_rings[i]->dma = qpi->rxq.dma_ring_addr;
2980                        vsi->rx_rings[i]->count = qpi->rxq.ring_len;
2981
2982                        if (qpi->rxq.databuffer_size != 0 &&
2983                            (qpi->rxq.databuffer_size > ((16 * 1024) - 128) ||
2984                             qpi->rxq.databuffer_size < 1024)) {
2985                                v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2986                                goto error_param;
2987                        }
2988                        vsi->rx_buf_len = qpi->rxq.databuffer_size;
2989                        vsi->rx_rings[i]->rx_buf_len = vsi->rx_buf_len;
2990                        if (qpi->rxq.max_pkt_size >= (16 * 1024) ||
2991                            qpi->rxq.max_pkt_size < 64) {
2992                                v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2993                                goto error_param;
2994                        }
2995                }
2996
2997                vsi->max_frame = qpi->rxq.max_pkt_size;
2998        }
2999
3000        /* VF can request to configure less than allocated queues
3001         * or default allocated queues. So update the VSI with new number
3002         */
3003        vsi->num_txq = num_txq;
3004        vsi->num_rxq = num_rxq;
3005        /* All queues of VF VSI are in TC 0 */
3006        vsi->tc_cfg.tc_info[0].qcount_tx = num_txq;
3007        vsi->tc_cfg.tc_info[0].qcount_rx = num_rxq;
3008
3009        if (ice_vsi_cfg_lan_txqs(vsi) || ice_vsi_cfg_rxqs(vsi))
3010                v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
3011
3012error_param:
3013        /* send the response to the VF */
3014        return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES, v_ret,
3015                                     NULL, 0);
3016}
3017
3018/**
3019 * ice_is_vf_trusted
3020 * @vf: pointer to the VF info
3021 */
3022static bool ice_is_vf_trusted(struct ice_vf *vf)
3023{
3024        return test_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
3025}
3026
3027/**
3028 * ice_can_vf_change_mac
3029 * @vf: pointer to the VF info
3030 *
3031 * Return true if the VF is allowed to change its MAC filters, false otherwise
3032 */
3033static bool ice_can_vf_change_mac(struct ice_vf *vf)
3034{
3035        /* If the VF MAC address has been set administratively (via the
3036         * ndo_set_vf_mac command), then deny permission to the VF to
3037         * add/delete unicast MAC addresses, unless the VF is trusted
3038         */
3039        if (vf->pf_set_mac && !ice_is_vf_trusted(vf))
3040                return false;
3041
3042        return true;
3043}
3044
3045/**
3046 * ice_vc_add_mac_addr - attempt to add the MAC address passed in
3047 * @vf: pointer to the VF info
3048 * @vsi: pointer to the VF's VSI
3049 * @mac_addr: MAC address to add
3050 */
3051static int
3052ice_vc_add_mac_addr(struct ice_vf *vf, struct ice_vsi *vsi, u8 *mac_addr)
3053{
3054        struct device *dev = ice_pf_to_dev(vf->pf);
3055        enum ice_status status;
3056
3057        /* default unicast MAC already added */
3058        if (ether_addr_equal(mac_addr, vf->dflt_lan_addr.addr))
3059                return 0;
3060
3061        if (is_unicast_ether_addr(mac_addr) && !ice_can_vf_change_mac(vf)) {
3062                dev_err(dev, "VF attempting to override administratively set MAC address, bring down and up the VF interface to resume normal operation\n");
3063                return -EPERM;
3064        }
3065
3066        status = ice_fltr_add_mac(vsi, mac_addr, ICE_FWD_TO_VSI);
3067        if (status == ICE_ERR_ALREADY_EXISTS) {
3068                dev_err(dev, "MAC %pM already exists for VF %d\n", mac_addr,
3069                        vf->vf_id);
3070                return -EEXIST;
3071        } else if (status) {
3072                dev_err(dev, "Failed to add MAC %pM for VF %d\n, error %s\n",
3073                        mac_addr, vf->vf_id, ice_stat_str(status));
3074                return -EIO;
3075        }
3076
3077        /* Set the default LAN address to the latest unicast MAC address added
3078         * by the VF. The default LAN address is reported by the PF via
3079         * ndo_get_vf_config.
3080         */
3081        if (is_unicast_ether_addr(mac_addr))
3082                ether_addr_copy(vf->dflt_lan_addr.addr, mac_addr);
3083
3084        vf->num_mac++;
3085
3086        return 0;
3087}
3088
3089/**
3090 * ice_vc_del_mac_addr - attempt to delete the MAC address passed in
3091 * @vf: pointer to the VF info
3092 * @vsi: pointer to the VF's VSI
3093 * @mac_addr: MAC address to delete
3094 */
3095static int
3096ice_vc_del_mac_addr(struct ice_vf *vf, struct ice_vsi *vsi, u8 *mac_addr)
3097{
3098        struct device *dev = ice_pf_to_dev(vf->pf);
3099        enum ice_status status;
3100
3101        if (!ice_can_vf_change_mac(vf) &&
3102            ether_addr_equal(mac_addr, vf->dflt_lan_addr.addr))
3103                return 0;
3104
3105        status = ice_fltr_remove_mac(vsi, mac_addr, ICE_FWD_TO_VSI);
3106        if (status == ICE_ERR_DOES_NOT_EXIST) {
3107                dev_err(dev, "MAC %pM does not exist for VF %d\n", mac_addr,
3108                        vf->vf_id);
3109                return -ENOENT;
3110        } else if (status) {
3111                dev_err(dev, "Failed to delete MAC %pM for VF %d, error %s\n",
3112                        mac_addr, vf->vf_id, ice_stat_str(status));
3113                return -EIO;
3114        }
3115
3116        if (ether_addr_equal(mac_addr, vf->dflt_lan_addr.addr))
3117                eth_zero_addr(vf->dflt_lan_addr.addr);
3118
3119        vf->num_mac--;
3120
3121        return 0;
3122}
3123
3124/**
3125 * ice_vc_handle_mac_addr_msg
3126 * @vf: pointer to the VF info
3127 * @msg: pointer to the msg buffer
3128 * @set: true if MAC filters are being set, false otherwise
3129 *
3130 * add guest MAC address filter
3131 */
3132static int
3133ice_vc_handle_mac_addr_msg(struct ice_vf *vf, u8 *msg, bool set)
3134{
3135        int (*ice_vc_cfg_mac)
3136                (struct ice_vf *vf, struct ice_vsi *vsi, u8 *mac_addr);
3137        enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
3138        struct virtchnl_ether_addr_list *al =
3139            (struct virtchnl_ether_addr_list *)msg;
3140        struct ice_pf *pf = vf->pf;
3141        enum virtchnl_ops vc_op;
3142        struct ice_vsi *vsi;
3143        int i;
3144
3145        if (set) {
3146                vc_op = VIRTCHNL_OP_ADD_ETH_ADDR;
3147                ice_vc_cfg_mac = ice_vc_add_mac_addr;
3148        } else {
3149                vc_op = VIRTCHNL_OP_DEL_ETH_ADDR;
3150                ice_vc_cfg_mac = ice_vc_del_mac_addr;
3151        }
3152
3153        if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states) ||
3154            !ice_vc_isvalid_vsi_id(vf, al->vsi_id)) {
3155                v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3156                goto handle_mac_exit;
3157        }
3158
3159        /* If this VF is not privileged, then we can't add more than a
3160         * limited number of addresses. Check to make sure that the
3161         * additions do not push us over the limit.
3162         */
3163        if (set && !ice_is_vf_trusted(vf) &&
3164            (vf->num_mac + al->num_elements) > ICE_MAX_MACADDR_PER_VF) {
3165                dev_err(ice_pf_to_dev(pf), "Can't add more MAC addresses, because VF-%d is not trusted, switch the VF to trusted mode in order to add more functionalities\n",
3166                        vf->vf_id);
3167                v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3168                goto handle_mac_exit;
3169        }
3170
3171        vsi = pf->vsi[vf->lan_vsi_idx];
3172        if (!vsi) {
3173                v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3174                goto handle_mac_exit;
3175        }
3176
3177        for (i = 0; i < al->num_elements; i++) {
3178                u8 *mac_addr = al->list[i].addr;
3179                int result;
3180
3181                if (is_broadcast_ether_addr(mac_addr) ||
3182                    is_zero_ether_addr(mac_addr))
3183                        continue;
3184
3185                result = ice_vc_cfg_mac(vf, vsi, mac_addr);
3186                if (result == -EEXIST || result == -ENOENT) {
3187                        continue;
3188                } else if (result) {
3189                        v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
3190                        goto handle_mac_exit;
3191                }
3192        }
3193
3194handle_mac_exit:
3195        /* send the response to the VF */
3196        return ice_vc_send_msg_to_vf(vf, vc_op, v_ret, NULL, 0);
3197}
3198
3199/**
3200 * ice_vc_add_mac_addr_msg
3201 * @vf: pointer to the VF info
3202 * @msg: pointer to the msg buffer
3203 *
3204 * add guest MAC address filter
3205 */
3206static int ice_vc_add_mac_addr_msg(struct ice_vf *vf, u8 *msg)
3207{
3208        return ice_vc_handle_mac_addr_msg(vf, msg, true);
3209}
3210
3211/**
3212 * ice_vc_del_mac_addr_msg
3213 * @vf: pointer to the VF info
3214 * @msg: pointer to the msg buffer
3215 *
3216 * remove guest MAC address filter
3217 */
3218static int ice_vc_del_mac_addr_msg(struct ice_vf *vf, u8 *msg)
3219{
3220        return ice_vc_handle_mac_addr_msg(vf, msg, false);
3221}
3222
3223/**
3224 * ice_vc_request_qs_msg
3225 * @vf: pointer to the VF info
3226 * @msg: pointer to the msg buffer
3227 *
3228 * VFs get a default number of queues but can use this message to request a
3229 * different number. If the request is successful, PF will reset the VF and
3230 * return 0. If unsuccessful, PF will send message informing VF of number of
3231 * available queue pairs via virtchnl message response to VF.
3232 */
3233static int ice_vc_request_qs_msg(struct ice_vf *vf, u8 *msg)
3234{
3235        enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
3236        struct virtchnl_vf_res_request *vfres =
3237                (struct virtchnl_vf_res_request *)msg;
3238        u16 req_queues = vfres->num_queue_pairs;
3239        struct ice_pf *pf = vf->pf;
3240        u16 max_allowed_vf_queues;
3241        u16 tx_rx_queue_left;
3242        struct device *dev;
3243        u16 cur_queues;
3244
3245        dev = ice_pf_to_dev(pf);
3246        if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
3247                v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3248                goto error_param;
3249        }
3250
3251        cur_queues = vf->num_vf_qs;
3252        tx_rx_queue_left = min_t(u16, ice_get_avail_txq_count(pf),
3253                                 ice_get_avail_rxq_count(pf));
3254        max_allowed_vf_queues = tx_rx_queue_left + cur_queues;
3255        if (!req_queues) {
3256                dev_err(dev, "VF %d tried to request 0 queues. Ignoring.\n",
3257                        vf->vf_id);
3258        } else if (req_queues > ICE_MAX_RSS_QS_PER_VF) {
3259                dev_err(dev, "VF %d tried to request more than %d queues.\n",
3260                        vf->vf_id, ICE_MAX_RSS_QS_PER_VF);
3261                vfres->num_queue_pairs = ICE_MAX_RSS_QS_PER_VF;
3262        } else if (req_queues > cur_queues &&
3263                   req_queues - cur_queues > tx_rx_queue_left) {
3264                dev_warn(dev, "VF %d requested %u more queues, but only %u left.\n",
3265                         vf->vf_id, req_queues - cur_queues, tx_rx_queue_left);
3266                vfres->num_queue_pairs = min_t(u16, max_allowed_vf_queues,
3267                                               ICE_MAX_RSS_QS_PER_VF);
3268        } else {
3269                /* request is successful, then reset VF */
3270                vf->num_req_qs = req_queues;
3271                ice_vc_reset_vf(vf);
3272                dev_info(dev, "VF %d granted request of %u queues.\n",
3273                         vf->vf_id, req_queues);
3274                return 0;
3275        }
3276
3277error_param:
3278        /* send the response to the VF */
3279        return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_REQUEST_QUEUES,
3280                                     v_ret, (u8 *)vfres, sizeof(*vfres));
3281}
3282
3283/**
3284 * ice_set_vf_port_vlan
3285 * @netdev: network interface device structure
3286 * @vf_id: VF identifier
3287 * @vlan_id: VLAN ID being set
3288 * @qos: priority setting
3289 * @vlan_proto: VLAN protocol
3290 *
3291 * program VF Port VLAN ID and/or QoS
3292 */
3293int
3294ice_set_vf_port_vlan(struct net_device *netdev, int vf_id, u16 vlan_id, u8 qos,
3295                     __be16 vlan_proto)
3296{
3297        struct ice_pf *pf = ice_netdev_to_pf(netdev);
3298        struct device *dev;
3299        struct ice_vf *vf;
3300        u16 vlanprio;
3301        int ret;
3302
3303        dev = ice_pf_to_dev(pf);
3304        if (ice_validate_vf_id(pf, vf_id))
3305                return -EINVAL;
3306
3307        if (vlan_id >= VLAN_N_VID || qos > 7) {
3308                dev_err(dev, "Invalid Port VLAN parameters for VF %d, ID %d, QoS %d\n",
3309                        vf_id, vlan_id, qos);
3310                return -EINVAL;
3311        }
3312
3313        if (vlan_proto != htons(ETH_P_8021Q)) {
3314                dev_err(dev, "VF VLAN protocol is not supported\n");
3315                return -EPROTONOSUPPORT;
3316        }
3317
3318        vf = &pf->vf[vf_id];
3319        ret = ice_check_vf_ready_for_cfg(vf);
3320        if (ret)
3321                return ret;
3322
3323        vlanprio = vlan_id | (qos << VLAN_PRIO_SHIFT);
3324
3325        if (vf->port_vlan_info == vlanprio) {
3326                /* duplicate request, so just return success */
3327                dev_dbg(dev, "Duplicate pvid %d request\n", vlanprio);
3328                return 0;
3329        }
3330
3331        vf->port_vlan_info = vlanprio;
3332
3333        if (vf->port_vlan_info)
3334                dev_info(dev, "Setting VLAN %d, QoS 0x%x on VF %d\n",
3335                         vlan_id, qos, vf_id);
3336        else
3337                dev_info(dev, "Clearing port VLAN on VF %d\n", vf_id);
3338
3339        ice_vc_reset_vf(vf);
3340
3341        return 0;
3342}
3343
3344/**
3345 * ice_vf_vlan_offload_ena - determine if capabilities support VLAN offloads
3346 * @caps: VF driver negotiated capabilities
3347 *
3348 * Return true if VIRTCHNL_VF_OFFLOAD_VLAN capability is set, else return false
3349 */
3350static bool ice_vf_vlan_offload_ena(u32 caps)
3351{
3352        return !!(caps & VIRTCHNL_VF_OFFLOAD_VLAN);
3353}
3354
3355/**
3356 * ice_vc_process_vlan_msg
3357 * @vf: pointer to the VF info
3358 * @msg: pointer to the msg buffer
3359 * @add_v: Add VLAN if true, otherwise delete VLAN
3360 *
3361 * Process virtchnl op to add or remove programmed guest VLAN ID
3362 */
3363static int ice_vc_process_vlan_msg(struct ice_vf *vf, u8 *msg, bool add_v)
3364{
3365        enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
3366        struct virtchnl_vlan_filter_list *vfl =
3367            (struct virtchnl_vlan_filter_list *)msg;
3368        struct ice_pf *pf = vf->pf;
3369        bool vlan_promisc = false;
3370        struct ice_vsi *vsi;
3371        struct device *dev;
3372        struct ice_hw *hw;
3373        int status = 0;
3374        u8 promisc_m;
3375        int i;
3376
3377        dev = ice_pf_to_dev(pf);
3378        if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
3379                v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3380                goto error_param;
3381        }
3382
3383        if (!ice_vf_vlan_offload_ena(vf->driver_caps)) {
3384                v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3385                goto error_param;
3386        }
3387
3388        if (!ice_vc_isvalid_vsi_id(vf, vfl->vsi_id)) {
3389                v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3390                goto error_param;
3391        }
3392
3393        for (i = 0; i < vfl->num_elements; i++) {
3394                if (vfl->vlan_id[i] >= VLAN_N_VID) {
3395                        v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3396                        dev_err(dev, "invalid VF VLAN id %d\n",
3397                                vfl->vlan_id[i]);
3398                        goto error_param;
3399                }
3400        }
3401
3402        hw = &pf->hw;
3403        vsi = pf->vsi[vf->lan_vsi_idx];
3404        if (!vsi) {
3405                v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3406                goto error_param;
3407        }
3408
3409        if (add_v && !ice_is_vf_trusted(vf) &&
3410            vsi->num_vlan >= ICE_MAX_VLAN_PER_VF) {
3411                dev_info(dev, "VF-%d is not trusted, switch the VF to trusted mode, in order to add more VLAN addresses\n",
3412                         vf->vf_id);
3413                /* There is no need to let VF know about being not trusted,
3414                 * so we can just return success message here
3415                 */
3416                goto error_param;
3417        }
3418
3419        if (vsi->info.pvid) {
3420                v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3421                goto error_param;
3422        }
3423
3424        if ((test_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states) ||
3425             test_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states)) &&
3426            test_bit(ICE_FLAG_VF_TRUE_PROMISC_ENA, pf->flags))
3427                vlan_promisc = true;
3428
3429        if (add_v) {
3430                for (i = 0; i < vfl->num_elements; i++) {
3431                        u16 vid = vfl->vlan_id[i];
3432
3433                        if (!ice_is_vf_trusted(vf) &&
3434                            vsi->num_vlan >= ICE_MAX_VLAN_PER_VF) {
3435                                dev_info(dev, "VF-%d is not trusted, switch the VF to trusted mode, in order to add more VLAN addresses\n",
3436                                         vf->vf_id);
3437                                /* There is no need to let VF know about being
3438                                 * not trusted, so we can just return success
3439                                 * message here as well.
3440                                 */
3441                                goto error_param;
3442                        }
3443
3444                        /* we add VLAN 0 by default for each VF so we can enable
3445                         * Tx VLAN anti-spoof without triggering MDD events so
3446                         * we don't need to add it again here
3447                         */
3448                        if (!vid)
3449                                continue;
3450
3451                        status = ice_vsi_add_vlan(vsi, vid, ICE_FWD_TO_VSI);
3452                        if (status) {
3453                                v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3454                                goto error_param;
3455                        }
3456
3457                        /* Enable VLAN pruning when non-zero VLAN is added */
3458                        if (!vlan_promisc && vid &&
3459                            !ice_vsi_is_vlan_pruning_ena(vsi)) {
3460                                status = ice_cfg_vlan_pruning(vsi, true, false);
3461                                if (status) {
3462                                        v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3463                                        dev_err(dev, "Enable VLAN pruning on VLAN ID: %d failed error-%d\n",
3464                                                vid, status);
3465                                        goto error_param;
3466                                }
3467                        } else if (vlan_promisc) {
3468                                /* Enable Ucast/Mcast VLAN promiscuous mode */
3469                                promisc_m = ICE_PROMISC_VLAN_TX |
3470                                            ICE_PROMISC_VLAN_RX;
3471
3472                                status = ice_set_vsi_promisc(hw, vsi->idx,
3473                                                             promisc_m, vid);
3474                                if (status) {
3475                                        v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3476                                        dev_err(dev, "Enable Unicast/multicast promiscuous mode on VLAN ID:%d failed error-%d\n",
3477                                                vid, status);
3478                                }
3479                        }
3480                }
3481        } else {
3482                /* In case of non_trusted VF, number of VLAN elements passed
3483                 * to PF for removal might be greater than number of VLANs
3484                 * filter programmed for that VF - So, use actual number of
3485                 * VLANS added earlier with add VLAN opcode. In order to avoid
3486                 * removing VLAN that doesn't exist, which result to sending
3487                 * erroneous failed message back to the VF
3488                 */
3489                int num_vf_vlan;
3490
3491                num_vf_vlan = vsi->num_vlan;
3492                for (i = 0; i < vfl->num_elements && i < num_vf_vlan; i++) {
3493                        u16 vid = vfl->vlan_id[i];
3494
3495                        /* we add VLAN 0 by default for each VF so we can enable
3496                         * Tx VLAN anti-spoof without triggering MDD events so
3497                         * we don't want a VIRTCHNL request to remove it
3498                         */
3499                        if (!vid)
3500                                continue;
3501
3502                        /* Make sure ice_vsi_kill_vlan is successful before
3503                         * updating VLAN information
3504                         */
3505                        status = ice_vsi_kill_vlan(vsi, vid);
3506                        if (status) {
3507                                v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3508                                goto error_param;
3509                        }
3510
3511                        /* Disable VLAN pruning when only VLAN 0 is left */
3512                        if (vsi->num_vlan == 1 &&
3513                            ice_vsi_is_vlan_pruning_ena(vsi))
3514                                ice_cfg_vlan_pruning(vsi, false, false);
3515
3516                        /* Disable Unicast/Multicast VLAN promiscuous mode */
3517                        if (vlan_promisc) {
3518                                promisc_m = ICE_PROMISC_VLAN_TX |
3519                                            ICE_PROMISC_VLAN_RX;
3520
3521                                ice_clear_vsi_promisc(hw, vsi->idx,
3522                                                      promisc_m, vid);
3523                        }
3524                }
3525        }
3526
3527error_param:
3528        /* send the response to the VF */
3529        if (add_v)
3530                return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ADD_VLAN, v_ret,
3531                                             NULL, 0);
3532        else
3533                return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DEL_VLAN, v_ret,
3534                                             NULL, 0);
3535}
3536
3537/**
3538 * ice_vc_add_vlan_msg
3539 * @vf: pointer to the VF info
3540 * @msg: pointer to the msg buffer
3541 *
3542 * Add and program guest VLAN ID
3543 */
3544static int ice_vc_add_vlan_msg(struct ice_vf *vf, u8 *msg)
3545{
3546        return ice_vc_process_vlan_msg(vf, msg, true);
3547}
3548
3549/**
3550 * ice_vc_remove_vlan_msg
3551 * @vf: pointer to the VF info
3552 * @msg: pointer to the msg buffer
3553 *
3554 * remove programmed guest VLAN ID
3555 */
3556static int ice_vc_remove_vlan_msg(struct ice_vf *vf, u8 *msg)
3557{
3558        return ice_vc_process_vlan_msg(vf, msg, false);
3559}
3560
3561/**
3562 * ice_vc_ena_vlan_stripping
3563 * @vf: pointer to the VF info
3564 *
3565 * Enable VLAN header stripping for a given VF
3566 */
3567static int ice_vc_ena_vlan_stripping(struct ice_vf *vf)
3568{
3569        enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
3570        struct ice_pf *pf = vf->pf;
3571        struct ice_vsi *vsi;
3572
3573        if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
3574                v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3575                goto error_param;
3576        }
3577
3578        if (!ice_vf_vlan_offload_ena(vf->driver_caps)) {
3579                v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3580                goto error_param;
3581        }
3582
3583        vsi = pf->vsi[vf->lan_vsi_idx];
3584        if (ice_vsi_manage_vlan_stripping(vsi, true))
3585                v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3586
3587error_param:
3588        return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ENABLE_VLAN_STRIPPING,
3589                                     v_ret, NULL, 0);
3590}
3591
3592/**
3593 * ice_vc_dis_vlan_stripping
3594 * @vf: pointer to the VF info
3595 *
3596 * Disable VLAN header stripping for a given VF
3597 */
3598static int ice_vc_dis_vlan_stripping(struct ice_vf *vf)
3599{
3600        enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
3601        struct ice_pf *pf = vf->pf;
3602        struct ice_vsi *vsi;
3603
3604        if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
3605                v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3606                goto error_param;
3607        }
3608
3609        if (!ice_vf_vlan_offload_ena(vf->driver_caps)) {
3610                v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3611                goto error_param;
3612        }
3613
3614        vsi = pf->vsi[vf->lan_vsi_idx];
3615        if (!vsi) {
3616                v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3617                goto error_param;
3618        }
3619
3620        if (ice_vsi_manage_vlan_stripping(vsi, false))
3621                v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3622
3623error_param:
3624        return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DISABLE_VLAN_STRIPPING,
3625                                     v_ret, NULL, 0);
3626}
3627
3628/**
3629 * ice_vf_init_vlan_stripping - enable/disable VLAN stripping on initialization
3630 * @vf: VF to enable/disable VLAN stripping for on initialization
3631 *
3632 * If the VIRTCHNL_VF_OFFLOAD_VLAN flag is set enable VLAN stripping, else if
3633 * the flag is cleared then we want to disable stripping. For example, the flag
3634 * will be cleared when port VLANs are configured by the administrator before
3635 * passing the VF to the guest or if the AVF driver doesn't support VLAN
3636 * offloads.
3637 */
3638static int ice_vf_init_vlan_stripping(struct ice_vf *vf)
3639{
3640        struct ice_vsi *vsi = vf->pf->vsi[vf->lan_vsi_idx];
3641
3642        if (!vsi)
3643                return -EINVAL;
3644
3645        /* don't modify stripping if port VLAN is configured */
3646        if (vsi->info.pvid)
3647                return 0;
3648
3649        if (ice_vf_vlan_offload_ena(vf->driver_caps))
3650                return ice_vsi_manage_vlan_stripping(vsi, true);
3651        else
3652                return ice_vsi_manage_vlan_stripping(vsi, false);
3653}
3654
3655/**
3656 * ice_vc_process_vf_msg - Process request from VF
3657 * @pf: pointer to the PF structure
3658 * @event: pointer to the AQ event
3659 *
3660 * called from the common asq/arq handler to
3661 * process request from VF
3662 */
3663void ice_vc_process_vf_msg(struct ice_pf *pf, struct ice_rq_event_info *event)
3664{
3665        u32 v_opcode = le32_to_cpu(event->desc.cookie_high);
3666        s16 vf_id = le16_to_cpu(event->desc.retval);
3667        u16 msglen = event->msg_len;
3668        u8 *msg = event->msg_buf;
3669        struct ice_vf *vf = NULL;
3670        struct device *dev;
3671        int err = 0;
3672
3673        dev = ice_pf_to_dev(pf);
3674        if (ice_validate_vf_id(pf, vf_id)) {
3675                err = -EINVAL;
3676                goto error_handler;
3677        }
3678
3679        vf = &pf->vf[vf_id];
3680
3681        /* Check if VF is disabled. */
3682        if (test_bit(ICE_VF_STATE_DIS, vf->vf_states)) {
3683                err = -EPERM;
3684                goto error_handler;
3685        }
3686
3687        /* Perform basic checks on the msg */
3688        err = virtchnl_vc_validate_vf_msg(&vf->vf_ver, v_opcode, msg, msglen);
3689        if (err) {
3690                if (err == VIRTCHNL_STATUS_ERR_PARAM)
3691                        err = -EPERM;
3692                else
3693                        err = -EINVAL;
3694        }
3695
3696error_handler:
3697        if (err) {
3698                ice_vc_send_msg_to_vf(vf, v_opcode, VIRTCHNL_STATUS_ERR_PARAM,
3699                                      NULL, 0);
3700                dev_err(dev, "Invalid message from VF %d, opcode %d, len %d, error %d\n",
3701                        vf_id, v_opcode, msglen, err);
3702                return;
3703        }
3704
3705        switch (v_opcode) {
3706        case VIRTCHNL_OP_VERSION:
3707                err = ice_vc_get_ver_msg(vf, msg);
3708                break;
3709        case VIRTCHNL_OP_GET_VF_RESOURCES:
3710                err = ice_vc_get_vf_res_msg(vf, msg);
3711                if (ice_vf_init_vlan_stripping(vf))
3712                        dev_err(dev, "Failed to initialize VLAN stripping for VF %d\n",
3713                                vf->vf_id);
3714                ice_vc_notify_vf_link_state(vf);
3715                break;
3716        case VIRTCHNL_OP_RESET_VF:
3717                ice_vc_reset_vf_msg(vf);
3718                break;
3719        case VIRTCHNL_OP_ADD_ETH_ADDR:
3720                err = ice_vc_add_mac_addr_msg(vf, msg);
3721                break;
3722        case VIRTCHNL_OP_DEL_ETH_ADDR:
3723                err = ice_vc_del_mac_addr_msg(vf, msg);
3724                break;
3725        case VIRTCHNL_OP_CONFIG_VSI_QUEUES:
3726                err = ice_vc_cfg_qs_msg(vf, msg);
3727                break;
3728        case VIRTCHNL_OP_ENABLE_QUEUES:
3729                err = ice_vc_ena_qs_msg(vf, msg);
3730                ice_vc_notify_vf_link_state(vf);
3731                break;
3732        case VIRTCHNL_OP_DISABLE_QUEUES:
3733                err = ice_vc_dis_qs_msg(vf, msg);
3734                break;
3735        case VIRTCHNL_OP_REQUEST_QUEUES:
3736                err = ice_vc_request_qs_msg(vf, msg);
3737                break;
3738        case VIRTCHNL_OP_CONFIG_IRQ_MAP:
3739                err = ice_vc_cfg_irq_map_msg(vf, msg);
3740                break;
3741        case VIRTCHNL_OP_CONFIG_RSS_KEY:
3742                err = ice_vc_config_rss_key(vf, msg);
3743                break;
3744        case VIRTCHNL_OP_CONFIG_RSS_LUT:
3745                err = ice_vc_config_rss_lut(vf, msg);
3746                break;
3747        case VIRTCHNL_OP_GET_STATS:
3748                err = ice_vc_get_stats_msg(vf, msg);
3749                break;
3750        case VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
3751                err = ice_vc_cfg_promiscuous_mode_msg(vf, msg);
3752                break;
3753        case VIRTCHNL_OP_ADD_VLAN:
3754                err = ice_vc_add_vlan_msg(vf, msg);
3755                break;
3756        case VIRTCHNL_OP_DEL_VLAN:
3757                err = ice_vc_remove_vlan_msg(vf, msg);
3758                break;
3759        case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING:
3760                err = ice_vc_ena_vlan_stripping(vf);
3761                break;
3762        case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING:
3763                err = ice_vc_dis_vlan_stripping(vf);
3764                break;
3765        case VIRTCHNL_OP_UNKNOWN:
3766        default:
3767                dev_err(dev, "Unsupported opcode %d from VF %d\n", v_opcode,
3768                        vf_id);
3769                err = ice_vc_send_msg_to_vf(vf, v_opcode,
3770                                            VIRTCHNL_STATUS_ERR_NOT_SUPPORTED,
3771                                            NULL, 0);
3772                break;
3773        }
3774        if (err) {
3775                /* Helper function cares less about error return values here
3776                 * as it is busy with pending work.
3777                 */
3778                dev_info(dev, "PF failed to honor VF %d, opcode %d, error %d\n",
3779                         vf_id, v_opcode, err);
3780        }
3781}
3782
3783/**
3784 * ice_get_vf_cfg
3785 * @netdev: network interface device structure
3786 * @vf_id: VF identifier
3787 * @ivi: VF configuration structure
3788 *
3789 * return VF configuration
3790 */
3791int
3792ice_get_vf_cfg(struct net_device *netdev, int vf_id, struct ifla_vf_info *ivi)
3793{
3794        struct ice_pf *pf = ice_netdev_to_pf(netdev);
3795        struct ice_vf *vf;
3796
3797        if (ice_validate_vf_id(pf, vf_id))
3798                return -EINVAL;
3799
3800        vf = &pf->vf[vf_id];
3801
3802        if (ice_check_vf_init(pf, vf))
3803                return -EBUSY;
3804
3805        ivi->vf = vf_id;
3806        ether_addr_copy(ivi->mac, vf->dflt_lan_addr.addr);
3807
3808        /* VF configuration for VLAN and applicable QoS */
3809        ivi->vlan = vf->port_vlan_info & VLAN_VID_MASK;
3810        ivi->qos = (vf->port_vlan_info & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
3811
3812        ivi->trusted = vf->trusted;
3813        ivi->spoofchk = vf->spoofchk;
3814        if (!vf->link_forced)
3815                ivi->linkstate = IFLA_VF_LINK_STATE_AUTO;
3816        else if (vf->link_up)
3817                ivi->linkstate = IFLA_VF_LINK_STATE_ENABLE;
3818        else
3819                ivi->linkstate = IFLA_VF_LINK_STATE_DISABLE;
3820        ivi->max_tx_rate = vf->tx_rate;
3821        ivi->min_tx_rate = 0;
3822        return 0;
3823}
3824
3825/**
3826 * ice_unicast_mac_exists - check if the unicast MAC exists on the PF's switch
3827 * @pf: PF used to reference the switch's rules
3828 * @umac: unicast MAC to compare against existing switch rules
3829 *
3830 * Return true on the first/any match, else return false
3831 */
3832static bool ice_unicast_mac_exists(struct ice_pf *pf, u8 *umac)
3833{
3834        struct ice_sw_recipe *mac_recipe_list =
3835                &pf->hw.switch_info->recp_list[ICE_SW_LKUP_MAC];
3836        struct ice_fltr_mgmt_list_entry *list_itr;
3837        struct list_head *rule_head;
3838        struct mutex *rule_lock; /* protect MAC filter list access */
3839
3840        rule_head = &mac_recipe_list->filt_rules;
3841        rule_lock = &mac_recipe_list->filt_rule_lock;
3842
3843        mutex_lock(rule_lock);
3844        list_for_each_entry(list_itr, rule_head, list_entry) {
3845                u8 *existing_mac = &list_itr->fltr_info.l_data.mac.mac_addr[0];
3846
3847                if (ether_addr_equal(existing_mac, umac)) {
3848                        mutex_unlock(rule_lock);
3849                        return true;
3850                }
3851        }
3852
3853        mutex_unlock(rule_lock);
3854
3855        return false;
3856}
3857
3858/**
3859 * ice_set_vf_mac
3860 * @netdev: network interface device structure
3861 * @vf_id: VF identifier
3862 * @mac: MAC address
3863 *
3864 * program VF MAC address
3865 */
3866int ice_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
3867{
3868        struct ice_pf *pf = ice_netdev_to_pf(netdev);
3869        struct ice_vf *vf;
3870        int ret;
3871
3872        if (ice_validate_vf_id(pf, vf_id))
3873                return -EINVAL;
3874
3875        if (is_multicast_ether_addr(mac)) {
3876                netdev_err(netdev, "%pM not a valid unicast address\n", mac);
3877                return -EINVAL;
3878        }
3879
3880        vf = &pf->vf[vf_id];
3881        /* nothing left to do, unicast MAC already set */
3882        if (ether_addr_equal(vf->dflt_lan_addr.addr, mac))
3883                return 0;
3884
3885        ret = ice_check_vf_ready_for_cfg(vf);
3886        if (ret)
3887                return ret;
3888
3889        if (ice_unicast_mac_exists(pf, mac)) {
3890                netdev_err(netdev, "Unicast MAC %pM already exists on this PF. Preventing setting VF %u unicast MAC address to %pM\n",
3891                           mac, vf_id, mac);
3892                return -EINVAL;
3893        }
3894
3895        /* VF is notified of its new MAC via the PF's response to the
3896         * VIRTCHNL_OP_GET_VF_RESOURCES message after the VF has been reset
3897         */
3898        ether_addr_copy(vf->dflt_lan_addr.addr, mac);
3899        if (is_zero_ether_addr(mac)) {
3900                /* VF will send VIRTCHNL_OP_ADD_ETH_ADDR message with its MAC */
3901                vf->pf_set_mac = false;
3902                netdev_info(netdev, "Removing MAC on VF %d. VF driver will be reinitialized\n",
3903                            vf->vf_id);
3904        } else {
3905                /* PF will add MAC rule for the VF */
3906                vf->pf_set_mac = true;
3907                netdev_info(netdev, "Setting MAC %pM on VF %d. VF driver will be reinitialized\n",
3908                            mac, vf_id);
3909        }
3910
3911        ice_vc_reset_vf(vf);
3912        return 0;
3913}
3914
3915/**
3916 * ice_set_vf_trust
3917 * @netdev: network interface device structure
3918 * @vf_id: VF identifier
3919 * @trusted: Boolean value to enable/disable trusted VF
3920 *
3921 * Enable or disable a given VF as trusted
3922 */
3923int ice_set_vf_trust(struct net_device *netdev, int vf_id, bool trusted)
3924{
3925        struct ice_pf *pf = ice_netdev_to_pf(netdev);
3926        struct ice_vf *vf;
3927        int ret;
3928
3929        if (ice_validate_vf_id(pf, vf_id))
3930                return -EINVAL;
3931
3932        vf = &pf->vf[vf_id];
3933        ret = ice_check_vf_ready_for_cfg(vf);
3934        if (ret)
3935                return ret;
3936
3937        /* Check if already trusted */
3938        if (trusted == vf->trusted)
3939                return 0;
3940
3941        vf->trusted = trusted;
3942        ice_vc_reset_vf(vf);
3943        dev_info(ice_pf_to_dev(pf), "VF %u is now %strusted\n",
3944                 vf_id, trusted ? "" : "un");
3945
3946        return 0;
3947}
3948
3949/**
3950 * ice_set_vf_link_state
3951 * @netdev: network interface device structure
3952 * @vf_id: VF identifier
3953 * @link_state: required link state
3954 *
3955 * Set VF's link state, irrespective of physical link state status
3956 */
3957int ice_set_vf_link_state(struct net_device *netdev, int vf_id, int link_state)
3958{
3959        struct ice_pf *pf = ice_netdev_to_pf(netdev);
3960        struct ice_vf *vf;
3961        int ret;
3962
3963        if (ice_validate_vf_id(pf, vf_id))
3964                return -EINVAL;
3965
3966        vf = &pf->vf[vf_id];
3967        ret = ice_check_vf_ready_for_cfg(vf);
3968        if (ret)
3969                return ret;
3970
3971        switch (link_state) {
3972        case IFLA_VF_LINK_STATE_AUTO:
3973                vf->link_forced = false;
3974                break;
3975        case IFLA_VF_LINK_STATE_ENABLE:
3976                vf->link_forced = true;
3977                vf->link_up = true;
3978                break;
3979        case IFLA_VF_LINK_STATE_DISABLE:
3980                vf->link_forced = true;
3981                vf->link_up = false;
3982                break;
3983        default:
3984                return -EINVAL;
3985        }
3986
3987        ice_vc_notify_vf_link_state(vf);
3988
3989        return 0;
3990}
3991
3992/**
3993 * ice_get_vf_stats - populate some stats for the VF
3994 * @netdev: the netdev of the PF
3995 * @vf_id: the host OS identifier (0-255)
3996 * @vf_stats: pointer to the OS memory to be initialized
3997 */
3998int ice_get_vf_stats(struct net_device *netdev, int vf_id,
3999                     struct ifla_vf_stats *vf_stats)
4000{
4001        struct ice_pf *pf = ice_netdev_to_pf(netdev);
4002        struct ice_eth_stats *stats;
4003        struct ice_vsi *vsi;
4004        struct ice_vf *vf;
4005        int ret;
4006
4007        if (ice_validate_vf_id(pf, vf_id))
4008                return -EINVAL;
4009
4010        vf = &pf->vf[vf_id];
4011        ret = ice_check_vf_ready_for_cfg(vf);
4012        if (ret)
4013                return ret;
4014
4015        vsi = pf->vsi[vf->lan_vsi_idx];
4016        if (!vsi)
4017                return -EINVAL;
4018
4019        ice_update_eth_stats(vsi);
4020        stats = &vsi->eth_stats;
4021
4022        memset(vf_stats, 0, sizeof(*vf_stats));
4023
4024        vf_stats->rx_packets = stats->rx_unicast + stats->rx_broadcast +
4025                stats->rx_multicast;
4026        vf_stats->tx_packets = stats->tx_unicast + stats->tx_broadcast +
4027                stats->tx_multicast;
4028        vf_stats->rx_bytes   = stats->rx_bytes;
4029        vf_stats->tx_bytes   = stats->tx_bytes;
4030        vf_stats->broadcast  = stats->rx_broadcast;
4031        vf_stats->multicast  = stats->rx_multicast;
4032        vf_stats->rx_dropped = stats->rx_discards;
4033        vf_stats->tx_dropped = stats->tx_discards;
4034
4035        return 0;
4036}
4037
4038/**
4039 * ice_print_vf_rx_mdd_event - print VF Rx malicious driver detect event
4040 * @vf: pointer to the VF structure
4041 */
4042void ice_print_vf_rx_mdd_event(struct ice_vf *vf)
4043{
4044        struct ice_pf *pf = vf->pf;
4045        struct device *dev;
4046
4047        dev = ice_pf_to_dev(pf);
4048
4049        dev_info(dev, "%d Rx Malicious Driver Detection events detected on PF %d VF %d MAC %pM. mdd-auto-reset-vfs=%s\n",
4050                 vf->mdd_rx_events.count, pf->hw.pf_id, vf->vf_id,
4051                 vf->dflt_lan_addr.addr,
4052                 test_bit(ICE_FLAG_MDD_AUTO_RESET_VF, pf->flags)
4053                          ? "on" : "off");
4054}
4055
4056/**
4057 * ice_print_vfs_mdd_event - print VFs malicious driver detect event
4058 * @pf: pointer to the PF structure
4059 *
4060 * Called from ice_handle_mdd_event to rate limit and print VFs MDD events.
4061 */
4062void ice_print_vfs_mdd_events(struct ice_pf *pf)
4063{
4064        struct device *dev = ice_pf_to_dev(pf);
4065        struct ice_hw *hw = &pf->hw;
4066        int i;
4067
4068        /* check that there are pending MDD events to print */
4069        if (!test_and_clear_bit(__ICE_MDD_VF_PRINT_PENDING, pf->state))
4070                return;
4071
4072        /* VF MDD event logs are rate limited to one second intervals */
4073        if (time_is_after_jiffies(pf->last_printed_mdd_jiffies + HZ * 1))
4074                return;
4075
4076        pf->last_printed_mdd_jiffies = jiffies;
4077
4078        ice_for_each_vf(pf, i) {
4079                struct ice_vf *vf = &pf->vf[i];
4080
4081                /* only print Rx MDD event message if there are new events */
4082                if (vf->mdd_rx_events.count != vf->mdd_rx_events.last_printed) {
4083                        vf->mdd_rx_events.last_printed =
4084                                                        vf->mdd_rx_events.count;
4085                        ice_print_vf_rx_mdd_event(vf);
4086                }
4087
4088                /* only print Tx MDD event message if there are new events */
4089                if (vf->mdd_tx_events.count != vf->mdd_tx_events.last_printed) {
4090                        vf->mdd_tx_events.last_printed =
4091                                                        vf->mdd_tx_events.count;
4092
4093                        dev_info(dev, "%d Tx Malicious Driver Detection events detected on PF %d VF %d MAC %pM.\n",
4094                                 vf->mdd_tx_events.count, hw->pf_id, i,
4095                                 vf->dflt_lan_addr.addr);
4096                }
4097        }
4098}
4099