linux/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/* Copyright (c) 2018, Intel Corporation. */
   3
   4#include "ice.h"
   5#include "ice_lib.h"
   6
   7/**
   8 * ice_err_to_virt err - translate errors for VF return code
   9 * @ice_err: error return code
  10 */
  11static enum virtchnl_status_code ice_err_to_virt_err(enum ice_status ice_err)
  12{
  13        switch (ice_err) {
  14        case ICE_SUCCESS:
  15                return VIRTCHNL_STATUS_SUCCESS;
  16        case ICE_ERR_BAD_PTR:
  17        case ICE_ERR_INVAL_SIZE:
  18        case ICE_ERR_DEVICE_NOT_SUPPORTED:
  19        case ICE_ERR_PARAM:
  20        case ICE_ERR_CFG:
  21                return VIRTCHNL_STATUS_ERR_PARAM;
  22        case ICE_ERR_NO_MEMORY:
  23                return VIRTCHNL_STATUS_ERR_NO_MEMORY;
  24        case ICE_ERR_NOT_READY:
  25        case ICE_ERR_RESET_FAILED:
  26        case ICE_ERR_FW_API_VER:
  27        case ICE_ERR_AQ_ERROR:
  28        case ICE_ERR_AQ_TIMEOUT:
  29        case ICE_ERR_AQ_FULL:
  30        case ICE_ERR_AQ_NO_WORK:
  31        case ICE_ERR_AQ_EMPTY:
  32                return VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
  33        default:
  34                return VIRTCHNL_STATUS_ERR_NOT_SUPPORTED;
  35        }
  36}
  37
  38/**
  39 * ice_vc_vf_broadcast - Broadcast a message to all VFs on PF
  40 * @pf: pointer to the PF structure
  41 * @v_opcode: operation code
  42 * @v_retval: return value
  43 * @msg: pointer to the msg buffer
  44 * @msglen: msg length
  45 */
  46static void
  47ice_vc_vf_broadcast(struct ice_pf *pf, enum virtchnl_ops v_opcode,
  48                    enum virtchnl_status_code v_retval, u8 *msg, u16 msglen)
  49{
  50        struct ice_hw *hw = &pf->hw;
  51        struct ice_vf *vf = pf->vf;
  52        int i;
  53
  54        for (i = 0; i < pf->num_alloc_vfs; i++, vf++) {
  55                /* Not all vfs are enabled so skip the ones that are not */
  56                if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states) &&
  57                    !test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states))
  58                        continue;
  59
  60                /* Ignore return value on purpose - a given VF may fail, but
  61                 * we need to keep going and send to all of them
  62                 */
  63                ice_aq_send_msg_to_vf(hw, vf->vf_id, v_opcode, v_retval, msg,
  64                                      msglen, NULL);
  65        }
  66}
  67
  68/**
  69 * ice_set_pfe_link - Set the link speed/status of the virtchnl_pf_event
  70 * @vf: pointer to the VF structure
  71 * @pfe: pointer to the virtchnl_pf_event to set link speed/status for
  72 * @ice_link_speed: link speed specified by ICE_AQ_LINK_SPEED_*
  73 * @link_up: whether or not to set the link up/down
  74 */
  75static void
  76ice_set_pfe_link(struct ice_vf *vf, struct virtchnl_pf_event *pfe,
  77                 int ice_link_speed, bool link_up)
  78{
  79        if (vf->driver_caps & VIRTCHNL_VF_CAP_ADV_LINK_SPEED) {
  80                pfe->event_data.link_event_adv.link_status = link_up;
  81                /* Speed in Mbps */
  82                pfe->event_data.link_event_adv.link_speed =
  83                        ice_conv_link_speed_to_virtchnl(true, ice_link_speed);
  84        } else {
  85                pfe->event_data.link_event.link_status = link_up;
  86                /* Legacy method for virtchnl link speeds */
  87                pfe->event_data.link_event.link_speed =
  88                        (enum virtchnl_link_speed)
  89                        ice_conv_link_speed_to_virtchnl(false, ice_link_speed);
  90        }
  91}
  92
  93/**
  94 * ice_set_pfe_link_forced - Force the virtchnl_pf_event link speed/status
  95 * @vf: pointer to the VF structure
  96 * @pfe: pointer to the virtchnl_pf_event to set link speed/status for
  97 * @link_up: whether or not to set the link up/down
  98 */
  99static void
 100ice_set_pfe_link_forced(struct ice_vf *vf, struct virtchnl_pf_event *pfe,
 101                        bool link_up)
 102{
 103        u16 link_speed;
 104
 105        if (link_up)
 106                link_speed = ICE_AQ_LINK_SPEED_100GB;
 107        else
 108                link_speed = ICE_AQ_LINK_SPEED_UNKNOWN;
 109
 110        ice_set_pfe_link(vf, pfe, link_speed, link_up);
 111}
 112
 113/**
 114 * ice_vc_notify_vf_link_state - Inform a VF of link status
 115 * @vf: pointer to the VF structure
 116 *
 117 * send a link status message to a single VF
 118 */
 119static void ice_vc_notify_vf_link_state(struct ice_vf *vf)
 120{
 121        struct virtchnl_pf_event pfe = { 0 };
 122        struct ice_link_status *ls;
 123        struct ice_pf *pf = vf->pf;
 124        struct ice_hw *hw;
 125
 126        hw = &pf->hw;
 127        ls = &hw->port_info->phy.link_info;
 128
 129        pfe.event = VIRTCHNL_EVENT_LINK_CHANGE;
 130        pfe.severity = PF_EVENT_SEVERITY_INFO;
 131
 132        if (vf->link_forced)
 133                ice_set_pfe_link_forced(vf, &pfe, vf->link_up);
 134        else
 135                ice_set_pfe_link(vf, &pfe, ls->link_speed, ls->link_info &
 136                                 ICE_AQ_LINK_UP);
 137
 138        ice_aq_send_msg_to_vf(hw, vf->vf_id, VIRTCHNL_OP_EVENT,
 139                              VIRTCHNL_STATUS_SUCCESS, (u8 *)&pfe,
 140                              sizeof(pfe), NULL);
 141}
 142
 143/**
 144 * ice_free_vf_res - Free a VF's resources
 145 * @vf: pointer to the VF info
 146 */
 147static void ice_free_vf_res(struct ice_vf *vf)
 148{
 149        struct ice_pf *pf = vf->pf;
 150        int i, last_vector_idx;
 151
 152        /* First, disable VF's configuration API to prevent OS from
 153         * accessing the VF's VSI after it's freed or invalidated.
 154         */
 155        clear_bit(ICE_VF_STATE_INIT, vf->vf_states);
 156
 157        /* free VSI and disconnect it from the parent uplink */
 158        if (vf->lan_vsi_idx) {
 159                ice_vsi_release(pf->vsi[vf->lan_vsi_idx]);
 160                vf->lan_vsi_idx = 0;
 161                vf->lan_vsi_num = 0;
 162                vf->num_mac = 0;
 163        }
 164
 165        last_vector_idx = vf->first_vector_idx + pf->num_vf_msix - 1;
 166        /* Disable interrupts so that VF starts in a known state */
 167        for (i = vf->first_vector_idx; i <= last_vector_idx; i++) {
 168                wr32(&pf->hw, GLINT_DYN_CTL(i), GLINT_DYN_CTL_CLEARPBA_M);
 169                ice_flush(&pf->hw);
 170        }
 171        /* reset some of the state variables keeping track of the resources */
 172        clear_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states);
 173        clear_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states);
 174}
 175
 176/**
 177 * ice_dis_vf_mappings
 178 * @vf: pointer to the VF structure
 179 */
 180static void ice_dis_vf_mappings(struct ice_vf *vf)
 181{
 182        struct ice_pf *pf = vf->pf;
 183        struct ice_vsi *vsi;
 184        int first, last, v;
 185        struct ice_hw *hw;
 186
 187        hw = &pf->hw;
 188        vsi = pf->vsi[vf->lan_vsi_idx];
 189
 190        wr32(hw, VPINT_ALLOC(vf->vf_id), 0);
 191        wr32(hw, VPINT_ALLOC_PCI(vf->vf_id), 0);
 192
 193        first = vf->first_vector_idx;
 194        last = first + pf->num_vf_msix - 1;
 195        for (v = first; v <= last; v++) {
 196                u32 reg;
 197
 198                reg = (((1 << GLINT_VECT2FUNC_IS_PF_S) &
 199                        GLINT_VECT2FUNC_IS_PF_M) |
 200                       ((hw->pf_id << GLINT_VECT2FUNC_PF_NUM_S) &
 201                        GLINT_VECT2FUNC_PF_NUM_M));
 202                wr32(hw, GLINT_VECT2FUNC(v), reg);
 203        }
 204
 205        if (vsi->tx_mapping_mode == ICE_VSI_MAP_CONTIG)
 206                wr32(hw, VPLAN_TX_QBASE(vf->vf_id), 0);
 207        else
 208                dev_err(&pf->pdev->dev,
 209                        "Scattered mode for VF Tx queues is not yet implemented\n");
 210
 211        if (vsi->rx_mapping_mode == ICE_VSI_MAP_CONTIG)
 212                wr32(hw, VPLAN_RX_QBASE(vf->vf_id), 0);
 213        else
 214                dev_err(&pf->pdev->dev,
 215                        "Scattered mode for VF Rx queues is not yet implemented\n");
 216}
 217
 218/**
 219 * ice_sriov_free_msix_res - Reset/free any used MSIX resources
 220 * @pf: pointer to the PF structure
 221 *
 222 * If MSIX entries from the pf->irq_tracker were needed then we need to
 223 * reset the irq_tracker->end and give back the entries we needed to
 224 * num_avail_sw_msix.
 225 *
 226 * If no MSIX entries were taken from the pf->irq_tracker then just clear
 227 * the pf->sriov_base_vector.
 228 *
 229 * Returns 0 on success, and -EINVAL on error.
 230 */
 231static int ice_sriov_free_msix_res(struct ice_pf *pf)
 232{
 233        struct ice_res_tracker *res;
 234
 235        if (!pf)
 236                return -EINVAL;
 237
 238        res = pf->irq_tracker;
 239        if (!res)
 240                return -EINVAL;
 241
 242        /* give back irq_tracker resources used */
 243        if (pf->sriov_base_vector < res->num_entries) {
 244                res->end = res->num_entries;
 245                pf->num_avail_sw_msix +=
 246                        res->num_entries - pf->sriov_base_vector;
 247        }
 248
 249        pf->sriov_base_vector = 0;
 250
 251        return 0;
 252}
 253
 254/**
 255 * ice_free_vfs - Free all VFs
 256 * @pf: pointer to the PF structure
 257 */
 258void ice_free_vfs(struct ice_pf *pf)
 259{
 260        struct ice_hw *hw = &pf->hw;
 261        int tmp, i;
 262
 263        if (!pf->vf)
 264                return;
 265
 266        while (test_and_set_bit(__ICE_VF_DIS, pf->state))
 267                usleep_range(1000, 2000);
 268
 269        /* Avoid wait time by stopping all VFs at the same time */
 270        for (i = 0; i < pf->num_alloc_vfs; i++) {
 271                struct ice_vsi *vsi;
 272
 273                if (!test_bit(ICE_VF_STATE_ENA, pf->vf[i].vf_states))
 274                        continue;
 275
 276                vsi = pf->vsi[pf->vf[i].lan_vsi_idx];
 277                /* stop rings without wait time */
 278                ice_vsi_stop_lan_tx_rings(vsi, ICE_NO_RESET, i);
 279                ice_vsi_stop_rx_rings(vsi);
 280
 281                clear_bit(ICE_VF_STATE_ENA, pf->vf[i].vf_states);
 282        }
 283
 284        /* Disable IOV before freeing resources. This lets any VF drivers
 285         * running in the host get themselves cleaned up before we yank
 286         * the carpet out from underneath their feet.
 287         */
 288        if (!pci_vfs_assigned(pf->pdev))
 289                pci_disable_sriov(pf->pdev);
 290        else
 291                dev_warn(&pf->pdev->dev, "VFs are assigned - not disabling SR-IOV\n");
 292
 293        tmp = pf->num_alloc_vfs;
 294        pf->num_vf_qps = 0;
 295        pf->num_alloc_vfs = 0;
 296        for (i = 0; i < tmp; i++) {
 297                if (test_bit(ICE_VF_STATE_INIT, pf->vf[i].vf_states)) {
 298                        /* disable VF qp mappings */
 299                        ice_dis_vf_mappings(&pf->vf[i]);
 300
 301                        /* Set this state so that assigned VF vectors can be
 302                         * reclaimed by PF for reuse in ice_vsi_release(). No
 303                         * need to clear this bit since pf->vf array is being
 304                         * freed anyways after this for loop
 305                         */
 306                        set_bit(ICE_VF_STATE_CFG_INTR, pf->vf[i].vf_states);
 307                        ice_free_vf_res(&pf->vf[i]);
 308                }
 309        }
 310
 311        if (ice_sriov_free_msix_res(pf))
 312                dev_err(&pf->pdev->dev,
 313                        "Failed to free MSIX resources used by SR-IOV\n");
 314
 315        devm_kfree(&pf->pdev->dev, pf->vf);
 316        pf->vf = NULL;
 317
 318        /* This check is for when the driver is unloaded while VFs are
 319         * assigned. Setting the number of VFs to 0 through sysfs is caught
 320         * before this function ever gets called.
 321         */
 322        if (!pci_vfs_assigned(pf->pdev)) {
 323                int vf_id;
 324
 325                /* Acknowledge VFLR for all VFs. Without this, VFs will fail to
 326                 * work correctly when SR-IOV gets re-enabled.
 327                 */
 328                for (vf_id = 0; vf_id < tmp; vf_id++) {
 329                        u32 reg_idx, bit_idx;
 330
 331                        reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32;
 332                        bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32;
 333                        wr32(hw, GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx));
 334                }
 335        }
 336        clear_bit(__ICE_VF_DIS, pf->state);
 337        clear_bit(ICE_FLAG_SRIOV_ENA, pf->flags);
 338}
 339
 340/**
 341 * ice_trigger_vf_reset - Reset a VF on HW
 342 * @vf: pointer to the VF structure
 343 * @is_vflr: true if VFLR was issued, false if not
 344 *
 345 * Trigger hardware to start a reset for a particular VF. Expects the caller
 346 * to wait the proper amount of time to allow hardware to reset the VF before
 347 * it cleans up and restores VF functionality.
 348 */
 349static void ice_trigger_vf_reset(struct ice_vf *vf, bool is_vflr)
 350{
 351        struct ice_pf *pf = vf->pf;
 352        u32 reg, reg_idx, bit_idx;
 353        struct ice_hw *hw;
 354        int vf_abs_id, i;
 355
 356        hw = &pf->hw;
 357        vf_abs_id = vf->vf_id + hw->func_caps.vf_base_id;
 358
 359        /* Inform VF that it is no longer active, as a warning */
 360        clear_bit(ICE_VF_STATE_ACTIVE, vf->vf_states);
 361
 362        /* Disable VF's configuration API during reset. The flag is re-enabled
 363         * in ice_alloc_vf_res(), when it's safe again to access VF's VSI.
 364         * It's normally disabled in ice_free_vf_res(), but it's safer
 365         * to do it earlier to give some time to finish to any VF config
 366         * functions that may still be running at this point.
 367         */
 368        clear_bit(ICE_VF_STATE_INIT, vf->vf_states);
 369
 370        /* Clear the VF's ARQLEN register. This is how the VF detects reset,
 371         * since the VFGEN_RSTAT register doesn't stick at 0 after reset.
 372         */
 373        wr32(hw, VF_MBX_ARQLEN(vf_abs_id), 0);
 374
 375        /* In the case of a VFLR, the HW has already reset the VF and we
 376         * just need to clean up, so don't hit the VFRTRIG register.
 377         */
 378        if (!is_vflr) {
 379                /* reset VF using VPGEN_VFRTRIG reg */
 380                reg = rd32(hw, VPGEN_VFRTRIG(vf->vf_id));
 381                reg |= VPGEN_VFRTRIG_VFSWR_M;
 382                wr32(hw, VPGEN_VFRTRIG(vf->vf_id), reg);
 383        }
 384        /* clear the VFLR bit in GLGEN_VFLRSTAT */
 385        reg_idx = (vf_abs_id) / 32;
 386        bit_idx = (vf_abs_id) % 32;
 387        wr32(hw, GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx));
 388        ice_flush(hw);
 389
 390        wr32(hw, PF_PCI_CIAA,
 391             VF_DEVICE_STATUS | (vf_abs_id << PF_PCI_CIAA_VF_NUM_S));
 392        for (i = 0; i < 100; i++) {
 393                reg = rd32(hw, PF_PCI_CIAD);
 394                if ((reg & VF_TRANS_PENDING_M) != 0)
 395                        dev_err(&pf->pdev->dev,
 396                                "VF %d PCI transactions stuck\n", vf->vf_id);
 397                udelay(1);
 398        }
 399}
 400
 401/**
 402 * ice_vsi_set_pvid_fill_ctxt - Set VSI ctxt for add PVID
 403 * @ctxt: the VSI ctxt to fill
 404 * @vid: the VLAN ID to set as a PVID
 405 */
 406static void ice_vsi_set_pvid_fill_ctxt(struct ice_vsi_ctx *ctxt, u16 vid)
 407{
 408        ctxt->info.vlan_flags = (ICE_AQ_VSI_VLAN_MODE_UNTAGGED |
 409                                 ICE_AQ_VSI_PVLAN_INSERT_PVID |
 410                                 ICE_AQ_VSI_VLAN_EMOD_STR);
 411        ctxt->info.pvid = cpu_to_le16(vid);
 412        ctxt->info.sw_flags2 |= ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
 413        ctxt->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID |
 414                                                ICE_AQ_VSI_PROP_SW_VALID);
 415}
 416
 417/**
 418 * ice_vsi_kill_pvid_fill_ctxt - Set VSI ctx for remove PVID
 419 * @ctxt: the VSI ctxt to fill
 420 */
 421static void ice_vsi_kill_pvid_fill_ctxt(struct ice_vsi_ctx *ctxt)
 422{
 423        ctxt->info.vlan_flags = ICE_AQ_VSI_VLAN_EMOD_NOTHING;
 424        ctxt->info.vlan_flags |= ICE_AQ_VSI_VLAN_MODE_ALL;
 425        ctxt->info.sw_flags2 &= ~ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
 426        ctxt->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID |
 427                                                ICE_AQ_VSI_PROP_SW_VALID);
 428}
 429
 430/**
 431 * ice_vsi_manage_pvid - Enable or disable port VLAN for VSI
 432 * @vsi: the VSI to update
 433 * @vid: the VLAN ID to set as a PVID
 434 * @enable: true for enable PVID false for disable
 435 */
 436static int ice_vsi_manage_pvid(struct ice_vsi *vsi, u16 vid, bool enable)
 437{
 438        struct device *dev = &vsi->back->pdev->dev;
 439        struct ice_hw *hw = &vsi->back->hw;
 440        struct ice_vsi_ctx *ctxt;
 441        enum ice_status status;
 442        int ret = 0;
 443
 444        ctxt = devm_kzalloc(dev, sizeof(*ctxt), GFP_KERNEL);
 445        if (!ctxt)
 446                return -ENOMEM;
 447
 448        ctxt->info = vsi->info;
 449        if (enable)
 450                ice_vsi_set_pvid_fill_ctxt(ctxt, vid);
 451        else
 452                ice_vsi_kill_pvid_fill_ctxt(ctxt);
 453
 454        status = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
 455        if (status) {
 456                dev_info(dev, "update VSI for port VLAN failed, err %d aq_err %d\n",
 457                         status, hw->adminq.sq_last_status);
 458                ret = -EIO;
 459                goto out;
 460        }
 461
 462        vsi->info = ctxt->info;
 463out:
 464        devm_kfree(dev, ctxt);
 465        return ret;
 466}
 467
 468/**
 469 * ice_vf_vsi_setup - Set up a VF VSI
 470 * @pf: board private structure
 471 * @pi: pointer to the port_info instance
 472 * @vf_id: defines VF ID to which this VSI connects.
 473 *
 474 * Returns pointer to the successfully allocated VSI struct on success,
 475 * otherwise returns NULL on failure.
 476 */
 477static struct ice_vsi *
 478ice_vf_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi, u16 vf_id)
 479{
 480        return ice_vsi_setup(pf, pi, ICE_VSI_VF, vf_id);
 481}
 482
 483/**
 484 * ice_calc_vf_first_vector_idx - Calculate absolute MSIX vector index in HW
 485 * @pf: pointer to PF structure
 486 * @vf: pointer to VF that the first MSIX vector index is being calculated for
 487 *
 488 * This returns the first MSIX vector index in HW that is used by this VF and
 489 * this will always be the OICR index in the AVF driver so any functionality
 490 * using vf->first_vector_idx for queue configuration will have to increment by
 491 * 1 to avoid meddling with the OICR index.
 492 */
 493static int ice_calc_vf_first_vector_idx(struct ice_pf *pf, struct ice_vf *vf)
 494{
 495        return pf->hw.func_caps.common_cap.msix_vector_first_id +
 496                pf->sriov_base_vector + vf->vf_id * pf->num_vf_msix;
 497}
 498
 499/**
 500 * ice_alloc_vsi_res - Setup VF VSI and its resources
 501 * @vf: pointer to the VF structure
 502 *
 503 * Returns 0 on success, negative value on failure
 504 */
 505static int ice_alloc_vsi_res(struct ice_vf *vf)
 506{
 507        struct ice_pf *pf = vf->pf;
 508        LIST_HEAD(tmp_add_list);
 509        u8 broadcast[ETH_ALEN];
 510        struct ice_vsi *vsi;
 511        int status = 0;
 512
 513        /* first vector index is the VFs OICR index */
 514        vf->first_vector_idx = ice_calc_vf_first_vector_idx(pf, vf);
 515
 516        vsi = ice_vf_vsi_setup(pf, pf->hw.port_info, vf->vf_id);
 517        if (!vsi) {
 518                dev_err(&pf->pdev->dev, "Failed to create VF VSI\n");
 519                return -ENOMEM;
 520        }
 521
 522        vf->lan_vsi_idx = vsi->idx;
 523        vf->lan_vsi_num = vsi->vsi_num;
 524
 525        /* Check if port VLAN exist before, and restore it accordingly */
 526        if (vf->port_vlan_id) {
 527                ice_vsi_manage_pvid(vsi, vf->port_vlan_id, true);
 528                ice_vsi_add_vlan(vsi, vf->port_vlan_id & ICE_VLAN_M);
 529        }
 530
 531        eth_broadcast_addr(broadcast);
 532
 533        status = ice_add_mac_to_list(vsi, &tmp_add_list, broadcast);
 534        if (status)
 535                goto ice_alloc_vsi_res_exit;
 536
 537        if (is_valid_ether_addr(vf->dflt_lan_addr.addr)) {
 538                status = ice_add_mac_to_list(vsi, &tmp_add_list,
 539                                             vf->dflt_lan_addr.addr);
 540                if (status)
 541                        goto ice_alloc_vsi_res_exit;
 542        }
 543
 544        status = ice_add_mac(&pf->hw, &tmp_add_list);
 545        if (status)
 546                dev_err(&pf->pdev->dev, "could not add mac filters\n");
 547
 548        /* Clear this bit after VF initialization since we shouldn't reclaim
 549         * and reassign interrupts for synchronous or asynchronous VFR events.
 550         * We don't want to reconfigure interrupts since AVF driver doesn't
 551         * expect vector assignment to be changed unless there is a request for
 552         * more vectors.
 553         */
 554        clear_bit(ICE_VF_STATE_CFG_INTR, vf->vf_states);
 555ice_alloc_vsi_res_exit:
 556        ice_free_fltr_list(&pf->pdev->dev, &tmp_add_list);
 557        return status;
 558}
 559
 560/**
 561 * ice_alloc_vf_res - Allocate VF resources
 562 * @vf: pointer to the VF structure
 563 */
 564static int ice_alloc_vf_res(struct ice_vf *vf)
 565{
 566        struct ice_pf *pf = vf->pf;
 567        int tx_rx_queue_left;
 568        int status;
 569
 570        /* setup VF VSI and necessary resources */
 571        status = ice_alloc_vsi_res(vf);
 572        if (status)
 573                goto ice_alloc_vf_res_exit;
 574
 575        /* Update number of VF queues, in case VF had requested for queue
 576         * changes
 577         */
 578        tx_rx_queue_left = min_t(int, pf->q_left_tx, pf->q_left_rx);
 579        tx_rx_queue_left += ICE_DFLT_QS_PER_VF;
 580        if (vf->num_req_qs && vf->num_req_qs <= tx_rx_queue_left &&
 581            vf->num_req_qs != vf->num_vf_qs)
 582                vf->num_vf_qs = vf->num_req_qs;
 583
 584        if (vf->trusted)
 585                set_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
 586        else
 587                clear_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
 588
 589        /* VF is now completely initialized */
 590        set_bit(ICE_VF_STATE_INIT, vf->vf_states);
 591
 592        return status;
 593
 594ice_alloc_vf_res_exit:
 595        ice_free_vf_res(vf);
 596        return status;
 597}
 598
 599/**
 600 * ice_ena_vf_mappings
 601 * @vf: pointer to the VF structure
 602 *
 603 * Enable VF vectors and queues allocation by writing the details into
 604 * respective registers.
 605 */
 606static void ice_ena_vf_mappings(struct ice_vf *vf)
 607{
 608        struct ice_pf *pf = vf->pf;
 609        struct ice_vsi *vsi;
 610        int first, last, v;
 611        struct ice_hw *hw;
 612        int abs_vf_id;
 613        u32 reg;
 614
 615        hw = &pf->hw;
 616        vsi = pf->vsi[vf->lan_vsi_idx];
 617        first = vf->first_vector_idx;
 618        last = (first + pf->num_vf_msix) - 1;
 619        abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
 620
 621        /* VF Vector allocation */
 622        reg = (((first << VPINT_ALLOC_FIRST_S) & VPINT_ALLOC_FIRST_M) |
 623               ((last << VPINT_ALLOC_LAST_S) & VPINT_ALLOC_LAST_M) |
 624               VPINT_ALLOC_VALID_M);
 625        wr32(hw, VPINT_ALLOC(vf->vf_id), reg);
 626
 627        reg = (((first << VPINT_ALLOC_PCI_FIRST_S) & VPINT_ALLOC_PCI_FIRST_M) |
 628               ((last << VPINT_ALLOC_PCI_LAST_S) & VPINT_ALLOC_PCI_LAST_M) |
 629               VPINT_ALLOC_PCI_VALID_M);
 630        wr32(hw, VPINT_ALLOC_PCI(vf->vf_id), reg);
 631        /* map the interrupts to its functions */
 632        for (v = first; v <= last; v++) {
 633                reg = (((abs_vf_id << GLINT_VECT2FUNC_VF_NUM_S) &
 634                        GLINT_VECT2FUNC_VF_NUM_M) |
 635                       ((hw->pf_id << GLINT_VECT2FUNC_PF_NUM_S) &
 636                        GLINT_VECT2FUNC_PF_NUM_M));
 637                wr32(hw, GLINT_VECT2FUNC(v), reg);
 638        }
 639
 640        /* Map mailbox interrupt. We put an explicit 0 here to remind us that
 641         * VF admin queue interrupts will go to VF MSI-X vector 0.
 642         */
 643        wr32(hw, VPINT_MBX_CTL(abs_vf_id), VPINT_MBX_CTL_CAUSE_ENA_M | 0);
 644        /* set regardless of mapping mode */
 645        wr32(hw, VPLAN_TXQ_MAPENA(vf->vf_id), VPLAN_TXQ_MAPENA_TX_ENA_M);
 646
 647        /* VF Tx queues allocation */
 648        if (vsi->tx_mapping_mode == ICE_VSI_MAP_CONTIG) {
 649                /* set the VF PF Tx queue range
 650                 * VFNUMQ value should be set to (number of queues - 1). A value
 651                 * of 0 means 1 queue and a value of 255 means 256 queues
 652                 */
 653                reg = (((vsi->txq_map[0] << VPLAN_TX_QBASE_VFFIRSTQ_S) &
 654                        VPLAN_TX_QBASE_VFFIRSTQ_M) |
 655                       (((vsi->alloc_txq - 1) << VPLAN_TX_QBASE_VFNUMQ_S) &
 656                        VPLAN_TX_QBASE_VFNUMQ_M));
 657                wr32(hw, VPLAN_TX_QBASE(vf->vf_id), reg);
 658        } else {
 659                dev_err(&pf->pdev->dev,
 660                        "Scattered mode for VF Tx queues is not yet implemented\n");
 661        }
 662
 663        /* set regardless of mapping mode */
 664        wr32(hw, VPLAN_RXQ_MAPENA(vf->vf_id), VPLAN_RXQ_MAPENA_RX_ENA_M);
 665
 666        /* VF Rx queues allocation */
 667        if (vsi->rx_mapping_mode == ICE_VSI_MAP_CONTIG) {
 668                /* set the VF PF Rx queue range
 669                 * VFNUMQ value should be set to (number of queues - 1). A value
 670                 * of 0 means 1 queue and a value of 255 means 256 queues
 671                 */
 672                reg = (((vsi->rxq_map[0] << VPLAN_RX_QBASE_VFFIRSTQ_S) &
 673                        VPLAN_RX_QBASE_VFFIRSTQ_M) |
 674                       (((vsi->alloc_txq - 1) << VPLAN_RX_QBASE_VFNUMQ_S) &
 675                        VPLAN_RX_QBASE_VFNUMQ_M));
 676                wr32(hw, VPLAN_RX_QBASE(vf->vf_id), reg);
 677        } else {
 678                dev_err(&pf->pdev->dev,
 679                        "Scattered mode for VF Rx queues is not yet implemented\n");
 680        }
 681}
 682
 683/**
 684 * ice_determine_res
 685 * @pf: pointer to the PF structure
 686 * @avail_res: available resources in the PF structure
 687 * @max_res: maximum resources that can be given per VF
 688 * @min_res: minimum resources that can be given per VF
 689 *
 690 * Returns non-zero value if resources (queues/vectors) are available or
 691 * returns zero if PF cannot accommodate for all num_alloc_vfs.
 692 */
 693static int
 694ice_determine_res(struct ice_pf *pf, u16 avail_res, u16 max_res, u16 min_res)
 695{
 696        bool checked_min_res = false;
 697        int res;
 698
 699        /* start by checking if PF can assign max number of resources for
 700         * all num_alloc_vfs.
 701         * if yes, return number per VF
 702         * If no, divide by 2 and roundup, check again
 703         * repeat the loop till we reach a point where even minimum resources
 704         * are not available, in that case return 0
 705         */
 706        res = max_res;
 707        while ((res >= min_res) && !checked_min_res) {
 708                int num_all_res;
 709
 710                num_all_res = pf->num_alloc_vfs * res;
 711                if (num_all_res <= avail_res)
 712                        return res;
 713
 714                if (res == min_res)
 715                        checked_min_res = true;
 716
 717                res = DIV_ROUND_UP(res, 2);
 718        }
 719        return 0;
 720}
 721
 722/**
 723 * ice_calc_vf_reg_idx - Calculate the VF's register index in the PF space
 724 * @vf: VF to calculate the register index for
 725 * @q_vector: a q_vector associated to the VF
 726 */
 727int ice_calc_vf_reg_idx(struct ice_vf *vf, struct ice_q_vector *q_vector)
 728{
 729        struct ice_pf *pf;
 730
 731        if (!vf || !q_vector)
 732                return -EINVAL;
 733
 734        pf = vf->pf;
 735
 736        /* always add one to account for the OICR being the first MSIX */
 737        return pf->sriov_base_vector + pf->num_vf_msix * vf->vf_id +
 738                q_vector->v_idx + 1;
 739}
 740
 741/**
 742 * ice_get_max_valid_res_idx - Get the max valid resource index
 743 * @res: pointer to the resource to find the max valid index for
 744 *
 745 * Start from the end of the ice_res_tracker and return right when we find the
 746 * first res->list entry with the ICE_RES_VALID_BIT set. This function is only
 747 * valid for SR-IOV because it is the only consumer that manipulates the
 748 * res->end and this is always called when res->end is set to res->num_entries.
 749 */
 750static int ice_get_max_valid_res_idx(struct ice_res_tracker *res)
 751{
 752        int i;
 753
 754        if (!res)
 755                return -EINVAL;
 756
 757        for (i = res->num_entries - 1; i >= 0; i--)
 758                if (res->list[i] & ICE_RES_VALID_BIT)
 759                        return i;
 760
 761        return 0;
 762}
 763
 764/**
 765 * ice_sriov_set_msix_res - Set any used MSIX resources
 766 * @pf: pointer to PF structure
 767 * @num_msix_needed: number of MSIX vectors needed for all SR-IOV VFs
 768 *
 769 * This function allows SR-IOV resources to be taken from the end of the PF's
 770 * allowed HW MSIX vectors so in many cases the irq_tracker will not
 771 * be needed. In these cases we just set the pf->sriov_base_vector and return
 772 * success.
 773 *
 774 * If SR-IOV needs to use any pf->irq_tracker entries it updates the
 775 * irq_tracker->end based on the first entry needed for SR-IOV. This makes it
 776 * so any calls to ice_get_res() using the irq_tracker will not try to use
 777 * resources at or beyond the newly set value.
 778 *
 779 * Return 0 on success, and -EINVAL when there are not enough MSIX vectors in
 780 * in the PF's space available for SR-IOV.
 781 */
 782static int ice_sriov_set_msix_res(struct ice_pf *pf, u16 num_msix_needed)
 783{
 784        int max_valid_res_idx = ice_get_max_valid_res_idx(pf->irq_tracker);
 785        u16 pf_total_msix_vectors =
 786                pf->hw.func_caps.common_cap.num_msix_vectors;
 787        struct ice_res_tracker *res = pf->irq_tracker;
 788        int sriov_base_vector;
 789
 790        if (max_valid_res_idx < 0)
 791                return max_valid_res_idx;
 792
 793        sriov_base_vector = pf_total_msix_vectors - num_msix_needed;
 794
 795        /* make sure we only grab irq_tracker entries from the list end and
 796         * that we have enough available MSIX vectors
 797         */
 798        if (sriov_base_vector <= max_valid_res_idx)
 799                return -EINVAL;
 800
 801        pf->sriov_base_vector = sriov_base_vector;
 802
 803        /* dip into irq_tracker entries and update used resources */
 804        if (num_msix_needed > (pf_total_msix_vectors - res->num_entries)) {
 805                pf->num_avail_sw_msix -=
 806                        res->num_entries - pf->sriov_base_vector;
 807                res->end = pf->sriov_base_vector;
 808        }
 809
 810        return 0;
 811}
 812
 813/**
 814 * ice_check_avail_res - check if vectors and queues are available
 815 * @pf: pointer to the PF structure
 816 *
 817 * This function is where we calculate actual number of resources for VF VSIs,
 818 * we don't reserve ahead of time during probe. Returns success if vectors and
 819 * queues resources are available, otherwise returns error code
 820 */
 821static int ice_check_avail_res(struct ice_pf *pf)
 822{
 823        int max_valid_res_idx = ice_get_max_valid_res_idx(pf->irq_tracker);
 824        u16 num_msix, num_txq, num_rxq, num_avail_msix;
 825
 826        if (!pf->num_alloc_vfs || max_valid_res_idx < 0)
 827                return -EINVAL;
 828
 829        /* add 1 to max_valid_res_idx to account for it being 0-based */
 830        num_avail_msix = pf->hw.func_caps.common_cap.num_msix_vectors -
 831                (max_valid_res_idx + 1);
 832
 833        /* Grab from HW interrupts common pool
 834         * Note: By the time the user decides it needs more vectors in a VF
 835         * its already too late since one must decide this prior to creating the
 836         * VF interface. So the best we can do is take a guess as to what the
 837         * user might want.
 838         *
 839         * We have two policies for vector allocation:
 840         * 1. if num_alloc_vfs is from 1 to 16, then we consider this as small
 841         * number of NFV VFs used for NFV appliances, since this is a special
 842         * case, we try to assign maximum vectors per VF (65) as much as
 843         * possible, based on determine_resources algorithm.
 844         * 2. if num_alloc_vfs is from 17 to 256, then its large number of
 845         * regular VFs which are not used for any special purpose. Hence try to
 846         * grab default interrupt vectors (5 as supported by AVF driver).
 847         */
 848        if (pf->num_alloc_vfs <= 16) {
 849                num_msix = ice_determine_res(pf, num_avail_msix,
 850                                             ICE_MAX_INTR_PER_VF,
 851                                             ICE_MIN_INTR_PER_VF);
 852        } else if (pf->num_alloc_vfs <= ICE_MAX_VF_COUNT) {
 853                num_msix = ice_determine_res(pf, num_avail_msix,
 854                                             ICE_DFLT_INTR_PER_VF,
 855                                             ICE_MIN_INTR_PER_VF);
 856        } else {
 857                dev_err(&pf->pdev->dev,
 858                        "Number of VFs %d exceeds max VF count %d\n",
 859                        pf->num_alloc_vfs, ICE_MAX_VF_COUNT);
 860                return -EIO;
 861        }
 862
 863        if (!num_msix)
 864                return -EIO;
 865
 866        /* Grab from the common pool
 867         * start by requesting Default queues (4 as supported by AVF driver),
 868         * Note that, the main difference between queues and vectors is, latter
 869         * can only be reserved at init time but queues can be requested by VF
 870         * at runtime through Virtchnl, that is the reason we start by reserving
 871         * few queues.
 872         */
 873        num_txq = ice_determine_res(pf, pf->q_left_tx, ICE_DFLT_QS_PER_VF,
 874                                    ICE_MIN_QS_PER_VF);
 875
 876        num_rxq = ice_determine_res(pf, pf->q_left_rx, ICE_DFLT_QS_PER_VF,
 877                                    ICE_MIN_QS_PER_VF);
 878
 879        if (!num_txq || !num_rxq)
 880                return -EIO;
 881
 882        if (ice_sriov_set_msix_res(pf, num_msix * pf->num_alloc_vfs))
 883                return -EINVAL;
 884
 885        /* since AVF driver works with only queue pairs which means, it expects
 886         * to have equal number of Rx and Tx queues, so take the minimum of
 887         * available Tx or Rx queues
 888         */
 889        pf->num_vf_qps = min_t(int, num_txq, num_rxq);
 890        pf->num_vf_msix = num_msix;
 891
 892        return 0;
 893}
 894
 895/**
 896 * ice_cleanup_and_realloc_vf - Clean up VF and reallocate resources after reset
 897 * @vf: pointer to the VF structure
 898 *
 899 * Cleanup a VF after the hardware reset is finished. Expects the caller to
 900 * have verified whether the reset is finished properly, and ensure the
 901 * minimum amount of wait time has passed. Reallocate VF resources back to make
 902 * VF state active
 903 */
 904static void ice_cleanup_and_realloc_vf(struct ice_vf *vf)
 905{
 906        struct ice_pf *pf = vf->pf;
 907        struct ice_hw *hw;
 908        u32 reg;
 909
 910        hw = &pf->hw;
 911
 912        /* PF software completes the flow by notifying VF that reset flow is
 913         * completed. This is done by enabling hardware by clearing the reset
 914         * bit in the VPGEN_VFRTRIG reg and setting VFR_STATE in the VFGEN_RSTAT
 915         * register to VFR completed (done at the end of this function)
 916         * By doing this we allow HW to access VF memory at any point. If we
 917         * did it any sooner, HW could access memory while it was being freed
 918         * in ice_free_vf_res(), causing an IOMMU fault.
 919         *
 920         * On the other hand, this needs to be done ASAP, because the VF driver
 921         * is waiting for this to happen and may report a timeout. It's
 922         * harmless, but it gets logged into Guest OS kernel log, so best avoid
 923         * it.
 924         */
 925        reg = rd32(hw, VPGEN_VFRTRIG(vf->vf_id));
 926        reg &= ~VPGEN_VFRTRIG_VFSWR_M;
 927        wr32(hw, VPGEN_VFRTRIG(vf->vf_id), reg);
 928
 929        /* reallocate VF resources to finish resetting the VSI state */
 930        if (!ice_alloc_vf_res(vf)) {
 931                ice_ena_vf_mappings(vf);
 932                set_bit(ICE_VF_STATE_ACTIVE, vf->vf_states);
 933                clear_bit(ICE_VF_STATE_DIS, vf->vf_states);
 934                vf->num_vlan = 0;
 935        }
 936
 937        /* Tell the VF driver the reset is done. This needs to be done only
 938         * after VF has been fully initialized, because the VF driver may
 939         * request resources immediately after setting this flag.
 940         */
 941        wr32(hw, VFGEN_RSTAT(vf->vf_id), VIRTCHNL_VFR_VFACTIVE);
 942}
 943
 944/**
 945 * ice_vf_set_vsi_promisc - set given VF VSI to given promiscuous mode(s)
 946 * @vf: pointer to the VF info
 947 * @vsi: the VSI being configured
 948 * @promisc_m: mask of promiscuous config bits
 949 * @rm_promisc: promisc flag request from the VF to remove or add filter
 950 *
 951 * This function configures VF VSI promiscuous mode, based on the VF requests,
 952 * for Unicast, Multicast and VLAN
 953 */
 954static enum ice_status
 955ice_vf_set_vsi_promisc(struct ice_vf *vf, struct ice_vsi *vsi, u8 promisc_m,
 956                       bool rm_promisc)
 957{
 958        struct ice_pf *pf = vf->pf;
 959        enum ice_status status = 0;
 960        struct ice_hw *hw;
 961
 962        hw = &pf->hw;
 963        if (vf->num_vlan) {
 964                status = ice_set_vlan_vsi_promisc(hw, vsi->idx, promisc_m,
 965                                                  rm_promisc);
 966        } else if (vf->port_vlan_id) {
 967                if (rm_promisc)
 968                        status = ice_clear_vsi_promisc(hw, vsi->idx, promisc_m,
 969                                                       vf->port_vlan_id);
 970                else
 971                        status = ice_set_vsi_promisc(hw, vsi->idx, promisc_m,
 972                                                     vf->port_vlan_id);
 973        } else {
 974                if (rm_promisc)
 975                        status = ice_clear_vsi_promisc(hw, vsi->idx, promisc_m,
 976                                                       0);
 977                else
 978                        status = ice_set_vsi_promisc(hw, vsi->idx, promisc_m,
 979                                                     0);
 980        }
 981
 982        return status;
 983}
 984
 985/**
 986 * ice_reset_all_vfs - reset all allocated VFs in one go
 987 * @pf: pointer to the PF structure
 988 * @is_vflr: true if VFLR was issued, false if not
 989 *
 990 * First, tell the hardware to reset each VF, then do all the waiting in one
 991 * chunk, and finally finish restoring each VF after the wait. This is useful
 992 * during PF routines which need to reset all VFs, as otherwise it must perform
 993 * these resets in a serialized fashion.
 994 *
 995 * Returns true if any VFs were reset, and false otherwise.
 996 */
 997bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr)
 998{
 999        struct ice_hw *hw = &pf->hw;
1000        struct ice_vf *vf;
1001        int v, i;
1002
1003        /* If we don't have any VFs, then there is nothing to reset */
1004        if (!pf->num_alloc_vfs)
1005                return false;
1006
1007        /* If VFs have been disabled, there is no need to reset */
1008        if (test_and_set_bit(__ICE_VF_DIS, pf->state))
1009                return false;
1010
1011        /* Begin reset on all VFs at once */
1012        for (v = 0; v < pf->num_alloc_vfs; v++)
1013                ice_trigger_vf_reset(&pf->vf[v], is_vflr);
1014
1015        for (v = 0; v < pf->num_alloc_vfs; v++) {
1016                struct ice_vsi *vsi;
1017
1018                vf = &pf->vf[v];
1019                vsi = pf->vsi[vf->lan_vsi_idx];
1020                if (test_bit(ICE_VF_STATE_ENA, vf->vf_states)) {
1021                        ice_vsi_stop_lan_tx_rings(vsi, ICE_VF_RESET, vf->vf_id);
1022                        ice_vsi_stop_rx_rings(vsi);
1023                        clear_bit(ICE_VF_STATE_ENA, vf->vf_states);
1024                }
1025        }
1026
1027        /* HW requires some time to make sure it can flush the FIFO for a VF
1028         * when it resets it. Poll the VPGEN_VFRSTAT register for each VF in
1029         * sequence to make sure that it has completed. We'll keep track of
1030         * the VFs using a simple iterator that increments once that VF has
1031         * finished resetting.
1032         */
1033        for (i = 0, v = 0; i < 10 && v < pf->num_alloc_vfs; i++) {
1034                usleep_range(10000, 20000);
1035
1036                /* Check each VF in sequence */
1037                while (v < pf->num_alloc_vfs) {
1038                        u32 reg;
1039
1040                        vf = &pf->vf[v];
1041                        reg = rd32(hw, VPGEN_VFRSTAT(vf->vf_id));
1042                        if (!(reg & VPGEN_VFRSTAT_VFRD_M))
1043                                break;
1044
1045                        /* If the current VF has finished resetting, move on
1046                         * to the next VF in sequence.
1047                         */
1048                        v++;
1049                }
1050        }
1051
1052        /* Display a warning if at least one VF didn't manage to reset in
1053         * time, but continue on with the operation.
1054         */
1055        if (v < pf->num_alloc_vfs)
1056                dev_warn(&pf->pdev->dev, "VF reset check timeout\n");
1057        usleep_range(10000, 20000);
1058
1059        /* free VF resources to begin resetting the VSI state */
1060        for (v = 0; v < pf->num_alloc_vfs; v++) {
1061                vf = &pf->vf[v];
1062
1063                ice_free_vf_res(vf);
1064
1065                /* Free VF queues as well, and reallocate later.
1066                 * If a given VF has different number of queues
1067                 * configured, the request for update will come
1068                 * via mailbox communication.
1069                 */
1070                vf->num_vf_qs = 0;
1071        }
1072
1073        if (ice_sriov_free_msix_res(pf))
1074                dev_err(&pf->pdev->dev,
1075                        "Failed to free MSIX resources used by SR-IOV\n");
1076
1077        if (ice_check_avail_res(pf)) {
1078                dev_err(&pf->pdev->dev,
1079                        "Cannot allocate VF resources, try with fewer number of VFs\n");
1080                return false;
1081        }
1082
1083        /* Finish the reset on each VF */
1084        for (v = 0; v < pf->num_alloc_vfs; v++) {
1085                vf = &pf->vf[v];
1086
1087                vf->num_vf_qs = pf->num_vf_qps;
1088                dev_dbg(&pf->pdev->dev,
1089                        "VF-id %d has %d queues configured\n",
1090                        vf->vf_id, vf->num_vf_qs);
1091                ice_cleanup_and_realloc_vf(vf);
1092        }
1093
1094        ice_flush(hw);
1095        clear_bit(__ICE_VF_DIS, pf->state);
1096
1097        return true;
1098}
1099
1100/**
1101 * ice_reset_vf - Reset a particular VF
1102 * @vf: pointer to the VF structure
1103 * @is_vflr: true if VFLR was issued, false if not
1104 *
1105 * Returns true if the VF is reset, false otherwise.
1106 */
1107static bool ice_reset_vf(struct ice_vf *vf, bool is_vflr)
1108{
1109        struct ice_pf *pf = vf->pf;
1110        struct ice_vsi *vsi;
1111        struct ice_hw *hw;
1112        bool rsd = false;
1113        u8 promisc_m;
1114        u32 reg;
1115        int i;
1116
1117        /* If the VFs have been disabled, this means something else is
1118         * resetting the VF, so we shouldn't continue.
1119         */
1120        if (test_and_set_bit(__ICE_VF_DIS, pf->state))
1121                return false;
1122
1123        ice_trigger_vf_reset(vf, is_vflr);
1124
1125        vsi = pf->vsi[vf->lan_vsi_idx];
1126
1127        if (test_bit(ICE_VF_STATE_ENA, vf->vf_states)) {
1128                ice_vsi_stop_lan_tx_rings(vsi, ICE_VF_RESET, vf->vf_id);
1129                ice_vsi_stop_rx_rings(vsi);
1130                clear_bit(ICE_VF_STATE_ENA, vf->vf_states);
1131        } else {
1132                /* Call Disable LAN Tx queue AQ call even when queues are not
1133                 * enabled. This is needed for successful completiom of VFR
1134                 */
1135                ice_dis_vsi_txq(vsi->port_info, vsi->idx, 0, 0, NULL, NULL,
1136                                NULL, ICE_VF_RESET, vf->vf_id, NULL);
1137        }
1138
1139        hw = &pf->hw;
1140        /* poll VPGEN_VFRSTAT reg to make sure
1141         * that reset is complete
1142         */
1143        for (i = 0; i < 10; i++) {
1144                /* VF reset requires driver to first reset the VF and then
1145                 * poll the status register to make sure that the reset
1146                 * completed successfully.
1147                 */
1148                usleep_range(10000, 20000);
1149                reg = rd32(hw, VPGEN_VFRSTAT(vf->vf_id));
1150                if (reg & VPGEN_VFRSTAT_VFRD_M) {
1151                        rsd = true;
1152                        break;
1153                }
1154        }
1155
1156        /* Display a warning if VF didn't manage to reset in time, but need to
1157         * continue on with the operation.
1158         */
1159        if (!rsd)
1160                dev_warn(&pf->pdev->dev, "VF reset check timeout on VF %d\n",
1161                         vf->vf_id);
1162
1163        usleep_range(10000, 20000);
1164
1165        /* disable promiscuous modes in case they were enabled
1166         * ignore any error if disabling process failed
1167         */
1168        if (test_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states) ||
1169            test_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states)) {
1170                if (vf->port_vlan_id ||  vf->num_vlan)
1171                        promisc_m = ICE_UCAST_VLAN_PROMISC_BITS;
1172                else
1173                        promisc_m = ICE_UCAST_PROMISC_BITS;
1174
1175                vsi = pf->vsi[vf->lan_vsi_idx];
1176                if (ice_vf_set_vsi_promisc(vf, vsi, promisc_m, true))
1177                        dev_err(&pf->pdev->dev, "disabling promiscuous mode failed\n");
1178        }
1179
1180        /* free VF resources to begin resetting the VSI state */
1181        ice_free_vf_res(vf);
1182
1183        ice_cleanup_and_realloc_vf(vf);
1184
1185        ice_flush(hw);
1186        clear_bit(__ICE_VF_DIS, pf->state);
1187
1188        return true;
1189}
1190
1191/**
1192 * ice_vc_notify_link_state - Inform all VFs on a PF of link status
1193 * @pf: pointer to the PF structure
1194 */
1195void ice_vc_notify_link_state(struct ice_pf *pf)
1196{
1197        int i;
1198
1199        for (i = 0; i < pf->num_alloc_vfs; i++)
1200                ice_vc_notify_vf_link_state(&pf->vf[i]);
1201}
1202
1203/**
1204 * ice_vc_notify_reset - Send pending reset message to all VFs
1205 * @pf: pointer to the PF structure
1206 *
1207 * indicate a pending reset to all VFs on a given PF
1208 */
1209void ice_vc_notify_reset(struct ice_pf *pf)
1210{
1211        struct virtchnl_pf_event pfe;
1212
1213        if (!pf->num_alloc_vfs)
1214                return;
1215
1216        pfe.event = VIRTCHNL_EVENT_RESET_IMPENDING;
1217        pfe.severity = PF_EVENT_SEVERITY_CERTAIN_DOOM;
1218        ice_vc_vf_broadcast(pf, VIRTCHNL_OP_EVENT, VIRTCHNL_STATUS_SUCCESS,
1219                            (u8 *)&pfe, sizeof(struct virtchnl_pf_event));
1220}
1221
1222/**
1223 * ice_vc_notify_vf_reset - Notify VF of a reset event
1224 * @vf: pointer to the VF structure
1225 */
1226static void ice_vc_notify_vf_reset(struct ice_vf *vf)
1227{
1228        struct virtchnl_pf_event pfe;
1229
1230        /* validate the request */
1231        if (!vf || vf->vf_id >= vf->pf->num_alloc_vfs)
1232                return;
1233
1234        /* verify if the VF is in either init or active before proceeding */
1235        if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states) &&
1236            !test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states))
1237                return;
1238
1239        pfe.event = VIRTCHNL_EVENT_RESET_IMPENDING;
1240        pfe.severity = PF_EVENT_SEVERITY_CERTAIN_DOOM;
1241        ice_aq_send_msg_to_vf(&vf->pf->hw, vf->vf_id, VIRTCHNL_OP_EVENT,
1242                              VIRTCHNL_STATUS_SUCCESS, (u8 *)&pfe, sizeof(pfe),
1243                              NULL);
1244}
1245
1246/**
1247 * ice_alloc_vfs - Allocate and set up VFs resources
1248 * @pf: pointer to the PF structure
1249 * @num_alloc_vfs: number of VFs to allocate
1250 */
1251static int ice_alloc_vfs(struct ice_pf *pf, u16 num_alloc_vfs)
1252{
1253        struct ice_hw *hw = &pf->hw;
1254        struct ice_vf *vfs;
1255        int i, ret;
1256
1257        /* Disable global interrupt 0 so we don't try to handle the VFLR. */
1258        wr32(hw, GLINT_DYN_CTL(pf->oicr_idx),
1259             ICE_ITR_NONE << GLINT_DYN_CTL_ITR_INDX_S);
1260
1261        ice_flush(hw);
1262
1263        ret = pci_enable_sriov(pf->pdev, num_alloc_vfs);
1264        if (ret) {
1265                pf->num_alloc_vfs = 0;
1266                goto err_unroll_intr;
1267        }
1268        /* allocate memory */
1269        vfs = devm_kcalloc(&pf->pdev->dev, num_alloc_vfs, sizeof(*vfs),
1270                           GFP_KERNEL);
1271        if (!vfs) {
1272                ret = -ENOMEM;
1273                goto err_pci_disable_sriov;
1274        }
1275        pf->vf = vfs;
1276
1277        /* apply default profile */
1278        for (i = 0; i < num_alloc_vfs; i++) {
1279                vfs[i].pf = pf;
1280                vfs[i].vf_sw_id = pf->first_sw;
1281                vfs[i].vf_id = i;
1282
1283                /* assign default capabilities */
1284                set_bit(ICE_VIRTCHNL_VF_CAP_L2, &vfs[i].vf_caps);
1285                vfs[i].spoofchk = true;
1286
1287                /* Set this state so that PF driver does VF vector assignment */
1288                set_bit(ICE_VF_STATE_CFG_INTR, vfs[i].vf_states);
1289        }
1290        pf->num_alloc_vfs = num_alloc_vfs;
1291
1292        /* VF resources get allocated during reset */
1293        if (!ice_reset_all_vfs(pf, true)) {
1294                ret = -EIO;
1295                goto err_unroll_sriov;
1296        }
1297
1298        goto err_unroll_intr;
1299
1300err_unroll_sriov:
1301        pf->vf = NULL;
1302        devm_kfree(&pf->pdev->dev, vfs);
1303        vfs = NULL;
1304        pf->num_alloc_vfs = 0;
1305err_pci_disable_sriov:
1306        pci_disable_sriov(pf->pdev);
1307err_unroll_intr:
1308        /* rearm interrupts here */
1309        ice_irq_dynamic_ena(hw, NULL, NULL);
1310        return ret;
1311}
1312
1313/**
1314 * ice_pf_state_is_nominal - checks the PF for nominal state
1315 * @pf: pointer to PF to check
1316 *
1317 * Check the PF's state for a collection of bits that would indicate
1318 * the PF is in a state that would inhibit normal operation for
1319 * driver functionality.
1320 *
1321 * Returns true if PF is in a nominal state.
1322 * Returns false otherwise
1323 */
1324static bool ice_pf_state_is_nominal(struct ice_pf *pf)
1325{
1326        DECLARE_BITMAP(check_bits, __ICE_STATE_NBITS) = { 0 };
1327
1328        if (!pf)
1329                return false;
1330
1331        bitmap_set(check_bits, 0, __ICE_STATE_NOMINAL_CHECK_BITS);
1332        if (bitmap_intersects(pf->state, check_bits, __ICE_STATE_NBITS))
1333                return false;
1334
1335        return true;
1336}
1337
1338/**
1339 * ice_pci_sriov_ena - Enable or change number of VFs
1340 * @pf: pointer to the PF structure
1341 * @num_vfs: number of VFs to allocate
1342 */
1343static int ice_pci_sriov_ena(struct ice_pf *pf, int num_vfs)
1344{
1345        int pre_existing_vfs = pci_num_vf(pf->pdev);
1346        struct device *dev = &pf->pdev->dev;
1347        int err;
1348
1349        if (!ice_pf_state_is_nominal(pf)) {
1350                dev_err(dev, "Cannot enable SR-IOV, device not ready\n");
1351                return -EBUSY;
1352        }
1353
1354        if (!test_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags)) {
1355                dev_err(dev, "This device is not capable of SR-IOV\n");
1356                return -ENODEV;
1357        }
1358
1359        if (pre_existing_vfs && pre_existing_vfs != num_vfs)
1360                ice_free_vfs(pf);
1361        else if (pre_existing_vfs && pre_existing_vfs == num_vfs)
1362                return num_vfs;
1363
1364        if (num_vfs > pf->num_vfs_supported) {
1365                dev_err(dev, "Can't enable %d VFs, max VFs supported is %d\n",
1366                        num_vfs, pf->num_vfs_supported);
1367                return -ENOTSUPP;
1368        }
1369
1370        dev_info(dev, "Allocating %d VFs\n", num_vfs);
1371        err = ice_alloc_vfs(pf, num_vfs);
1372        if (err) {
1373                dev_err(dev, "Failed to enable SR-IOV: %d\n", err);
1374                return err;
1375        }
1376
1377        set_bit(ICE_FLAG_SRIOV_ENA, pf->flags);
1378        return num_vfs;
1379}
1380
1381/**
1382 * ice_sriov_configure - Enable or change number of VFs via sysfs
1383 * @pdev: pointer to a pci_dev structure
1384 * @num_vfs: number of VFs to allocate
1385 *
1386 * This function is called when the user updates the number of VFs in sysfs.
1387 */
1388int ice_sriov_configure(struct pci_dev *pdev, int num_vfs)
1389{
1390        struct ice_pf *pf = pci_get_drvdata(pdev);
1391
1392        if (num_vfs)
1393                return ice_pci_sriov_ena(pf, num_vfs);
1394
1395        if (!pci_vfs_assigned(pdev)) {
1396                ice_free_vfs(pf);
1397        } else {
1398                dev_err(&pf->pdev->dev,
1399                        "can't free VFs because some are assigned to VMs.\n");
1400                return -EBUSY;
1401        }
1402
1403        return 0;
1404}
1405
1406/**
1407 * ice_process_vflr_event - Free VF resources via IRQ calls
1408 * @pf: pointer to the PF structure
1409 *
1410 * called from the VFLR IRQ handler to
1411 * free up VF resources and state variables
1412 */
1413void ice_process_vflr_event(struct ice_pf *pf)
1414{
1415        struct ice_hw *hw = &pf->hw;
1416        int vf_id;
1417        u32 reg;
1418
1419        if (!test_and_clear_bit(__ICE_VFLR_EVENT_PENDING, pf->state) ||
1420            !pf->num_alloc_vfs)
1421                return;
1422
1423        for (vf_id = 0; vf_id < pf->num_alloc_vfs; vf_id++) {
1424                struct ice_vf *vf = &pf->vf[vf_id];
1425                u32 reg_idx, bit_idx;
1426
1427                reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32;
1428                bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32;
1429                /* read GLGEN_VFLRSTAT register to find out the flr VFs */
1430                reg = rd32(hw, GLGEN_VFLRSTAT(reg_idx));
1431                if (reg & BIT(bit_idx))
1432                        /* GLGEN_VFLRSTAT bit will be cleared in ice_reset_vf */
1433                        ice_reset_vf(vf, true);
1434        }
1435}
1436
1437/**
1438 * ice_vc_dis_vf - Disable a given VF via SW reset
1439 * @vf: pointer to the VF info
1440 *
1441 * Disable the VF through a SW reset
1442 */
1443static void ice_vc_dis_vf(struct ice_vf *vf)
1444{
1445        ice_vc_notify_vf_reset(vf);
1446        ice_reset_vf(vf, false);
1447}
1448
1449/**
1450 * ice_vc_send_msg_to_vf - Send message to VF
1451 * @vf: pointer to the VF info
1452 * @v_opcode: virtual channel opcode
1453 * @v_retval: virtual channel return value
1454 * @msg: pointer to the msg buffer
1455 * @msglen: msg length
1456 *
1457 * send msg to VF
1458 */
1459static int
1460ice_vc_send_msg_to_vf(struct ice_vf *vf, u32 v_opcode,
1461                      enum virtchnl_status_code v_retval, u8 *msg, u16 msglen)
1462{
1463        enum ice_status aq_ret;
1464        struct ice_pf *pf;
1465
1466        /* validate the request */
1467        if (!vf || vf->vf_id >= vf->pf->num_alloc_vfs)
1468                return -EINVAL;
1469
1470        pf = vf->pf;
1471
1472        /* single place to detect unsuccessful return values */
1473        if (v_retval) {
1474                vf->num_inval_msgs++;
1475                dev_info(&pf->pdev->dev, "VF %d failed opcode %d, retval: %d\n",
1476                         vf->vf_id, v_opcode, v_retval);
1477                if (vf->num_inval_msgs > ICE_DFLT_NUM_INVAL_MSGS_ALLOWED) {
1478                        dev_err(&pf->pdev->dev,
1479                                "Number of invalid messages exceeded for VF %d\n",
1480                                vf->vf_id);
1481                        dev_err(&pf->pdev->dev, "Use PF Control I/F to enable the VF\n");
1482                        set_bit(ICE_VF_STATE_DIS, vf->vf_states);
1483                        return -EIO;
1484                }
1485        } else {
1486                vf->num_valid_msgs++;
1487                /* reset the invalid counter, if a valid message is received. */
1488                vf->num_inval_msgs = 0;
1489        }
1490
1491        aq_ret = ice_aq_send_msg_to_vf(&pf->hw, vf->vf_id, v_opcode, v_retval,
1492                                       msg, msglen, NULL);
1493        if (aq_ret) {
1494                dev_info(&pf->pdev->dev,
1495                         "Unable to send the message to VF %d aq_err %d\n",
1496                         vf->vf_id, pf->hw.mailboxq.sq_last_status);
1497                return -EIO;
1498        }
1499
1500        return 0;
1501}
1502
1503/**
1504 * ice_vc_get_ver_msg
1505 * @vf: pointer to the VF info
1506 * @msg: pointer to the msg buffer
1507 *
1508 * called from the VF to request the API version used by the PF
1509 */
1510static int ice_vc_get_ver_msg(struct ice_vf *vf, u8 *msg)
1511{
1512        struct virtchnl_version_info info = {
1513                VIRTCHNL_VERSION_MAJOR, VIRTCHNL_VERSION_MINOR
1514        };
1515
1516        vf->vf_ver = *(struct virtchnl_version_info *)msg;
1517        /* VFs running the 1.0 API expect to get 1.0 back or they will cry. */
1518        if (VF_IS_V10(&vf->vf_ver))
1519                info.minor = VIRTCHNL_VERSION_MINOR_NO_VF_CAPS;
1520
1521        return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_VERSION,
1522                                     VIRTCHNL_STATUS_SUCCESS, (u8 *)&info,
1523                                     sizeof(struct virtchnl_version_info));
1524}
1525
1526/**
1527 * ice_vc_get_vf_res_msg
1528 * @vf: pointer to the VF info
1529 * @msg: pointer to the msg buffer
1530 *
1531 * called from the VF to request its resources
1532 */
1533static int ice_vc_get_vf_res_msg(struct ice_vf *vf, u8 *msg)
1534{
1535        enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
1536        struct virtchnl_vf_resource *vfres = NULL;
1537        struct ice_pf *pf = vf->pf;
1538        struct ice_vsi *vsi;
1539        int len = 0;
1540        int ret;
1541
1542        if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states)) {
1543                v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1544                goto err;
1545        }
1546
1547        len = sizeof(struct virtchnl_vf_resource);
1548
1549        vfres = devm_kzalloc(&pf->pdev->dev, len, GFP_KERNEL);
1550        if (!vfres) {
1551                v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY;
1552                len = 0;
1553                goto err;
1554        }
1555        if (VF_IS_V11(&vf->vf_ver))
1556                vf->driver_caps = *(u32 *)msg;
1557        else
1558                vf->driver_caps = VIRTCHNL_VF_OFFLOAD_L2 |
1559                                  VIRTCHNL_VF_OFFLOAD_RSS_REG |
1560                                  VIRTCHNL_VF_OFFLOAD_VLAN;
1561
1562        vfres->vf_cap_flags = VIRTCHNL_VF_OFFLOAD_L2;
1563        vsi = pf->vsi[vf->lan_vsi_idx];
1564        if (!vsi) {
1565                v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1566                goto err;
1567        }
1568
1569        if (!vsi->info.pvid)
1570                vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_VLAN;
1571
1572        if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PF) {
1573                vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_PF;
1574        } else {
1575                if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_AQ)
1576                        vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_AQ;
1577                else
1578                        vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_REG;
1579        }
1580
1581        if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2)
1582                vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2;
1583
1584        if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ENCAP)
1585                vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ENCAP;
1586
1587        if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM)
1588                vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM;
1589
1590        if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RX_POLLING)
1591                vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RX_POLLING;
1592
1593        if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_WB_ON_ITR)
1594                vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_WB_ON_ITR;
1595
1596        if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_REQ_QUEUES)
1597                vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_REQ_QUEUES;
1598
1599        if (vf->driver_caps & VIRTCHNL_VF_CAP_ADV_LINK_SPEED)
1600                vfres->vf_cap_flags |= VIRTCHNL_VF_CAP_ADV_LINK_SPEED;
1601
1602        vfres->num_vsis = 1;
1603        /* Tx and Rx queue are equal for VF */
1604        vfres->num_queue_pairs = vsi->num_txq;
1605        vfres->max_vectors = pf->num_vf_msix;
1606        vfres->rss_key_size = ICE_VSIQF_HKEY_ARRAY_SIZE;
1607        vfres->rss_lut_size = ICE_VSIQF_HLUT_ARRAY_SIZE;
1608
1609        vfres->vsi_res[0].vsi_id = vf->lan_vsi_num;
1610        vfres->vsi_res[0].vsi_type = VIRTCHNL_VSI_SRIOV;
1611        vfres->vsi_res[0].num_queue_pairs = vsi->num_txq;
1612        ether_addr_copy(vfres->vsi_res[0].default_mac_addr,
1613                        vf->dflt_lan_addr.addr);
1614
1615        set_bit(ICE_VF_STATE_ACTIVE, vf->vf_states);
1616
1617err:
1618        /* send the response back to the VF */
1619        ret = ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_VF_RESOURCES, v_ret,
1620                                    (u8 *)vfres, len);
1621
1622        devm_kfree(&pf->pdev->dev, vfres);
1623        return ret;
1624}
1625
1626/**
1627 * ice_vc_reset_vf_msg
1628 * @vf: pointer to the VF info
1629 *
1630 * called from the VF to reset itself,
1631 * unlike other virtchnl messages, PF driver
1632 * doesn't send the response back to the VF
1633 */
1634static void ice_vc_reset_vf_msg(struct ice_vf *vf)
1635{
1636        if (test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states))
1637                ice_reset_vf(vf, false);
1638}
1639
1640/**
1641 * ice_find_vsi_from_id
1642 * @pf: the PF structure to search for the VSI
1643 * @id: ID of the VSI it is searching for
1644 *
1645 * searches for the VSI with the given ID
1646 */
1647static struct ice_vsi *ice_find_vsi_from_id(struct ice_pf *pf, u16 id)
1648{
1649        int i;
1650
1651        ice_for_each_vsi(pf, i)
1652                if (pf->vsi[i] && pf->vsi[i]->vsi_num == id)
1653                        return pf->vsi[i];
1654
1655        return NULL;
1656}
1657
1658/**
1659 * ice_vc_isvalid_vsi_id
1660 * @vf: pointer to the VF info
1661 * @vsi_id: VF relative VSI ID
1662 *
1663 * check for the valid VSI ID
1664 */
1665static bool ice_vc_isvalid_vsi_id(struct ice_vf *vf, u16 vsi_id)
1666{
1667        struct ice_pf *pf = vf->pf;
1668        struct ice_vsi *vsi;
1669
1670        vsi = ice_find_vsi_from_id(pf, vsi_id);
1671
1672        return (vsi && (vsi->vf_id == vf->vf_id));
1673}
1674
1675/**
1676 * ice_vc_isvalid_q_id
1677 * @vf: pointer to the VF info
1678 * @vsi_id: VSI ID
1679 * @qid: VSI relative queue ID
1680 *
1681 * check for the valid queue ID
1682 */
1683static bool ice_vc_isvalid_q_id(struct ice_vf *vf, u16 vsi_id, u8 qid)
1684{
1685        struct ice_vsi *vsi = ice_find_vsi_from_id(vf->pf, vsi_id);
1686        /* allocated Tx and Rx queues should be always equal for VF VSI */
1687        return (vsi && (qid < vsi->alloc_txq));
1688}
1689
1690/**
1691 * ice_vc_config_rss_key
1692 * @vf: pointer to the VF info
1693 * @msg: pointer to the msg buffer
1694 *
1695 * Configure the VF's RSS key
1696 */
1697static int ice_vc_config_rss_key(struct ice_vf *vf, u8 *msg)
1698{
1699        enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
1700        struct virtchnl_rss_key *vrk =
1701                (struct virtchnl_rss_key *)msg;
1702        struct ice_pf *pf = vf->pf;
1703        struct ice_vsi *vsi = NULL;
1704
1705        if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
1706                v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1707                goto error_param;
1708        }
1709
1710        if (!ice_vc_isvalid_vsi_id(vf, vrk->vsi_id)) {
1711                v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1712                goto error_param;
1713        }
1714
1715        vsi = pf->vsi[vf->lan_vsi_idx];
1716        if (!vsi) {
1717                v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1718                goto error_param;
1719        }
1720
1721        if (vrk->key_len != ICE_VSIQF_HKEY_ARRAY_SIZE) {
1722                v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1723                goto error_param;
1724        }
1725
1726        if (!test_bit(ICE_FLAG_RSS_ENA, vf->pf->flags)) {
1727                v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1728                goto error_param;
1729        }
1730
1731        if (ice_set_rss(vsi, vrk->key, NULL, 0))
1732                v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
1733error_param:
1734        return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_KEY, v_ret,
1735                                     NULL, 0);
1736}
1737
1738/**
1739 * ice_vc_config_rss_lut
1740 * @vf: pointer to the VF info
1741 * @msg: pointer to the msg buffer
1742 *
1743 * Configure the VF's RSS LUT
1744 */
1745static int ice_vc_config_rss_lut(struct ice_vf *vf, u8 *msg)
1746{
1747        struct virtchnl_rss_lut *vrl = (struct virtchnl_rss_lut *)msg;
1748        enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
1749        struct ice_pf *pf = vf->pf;
1750        struct ice_vsi *vsi = NULL;
1751
1752        if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
1753                v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1754                goto error_param;
1755        }
1756
1757        if (!ice_vc_isvalid_vsi_id(vf, vrl->vsi_id)) {
1758                v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1759                goto error_param;
1760        }
1761
1762        vsi = pf->vsi[vf->lan_vsi_idx];
1763        if (!vsi) {
1764                v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1765                goto error_param;
1766        }
1767
1768        if (vrl->lut_entries != ICE_VSIQF_HLUT_ARRAY_SIZE) {
1769                v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1770                goto error_param;
1771        }
1772
1773        if (!test_bit(ICE_FLAG_RSS_ENA, vf->pf->flags)) {
1774                v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1775                goto error_param;
1776        }
1777
1778        if (ice_set_rss(vsi, NULL, vrl->lut, ICE_VSIQF_HLUT_ARRAY_SIZE))
1779                v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
1780error_param:
1781        return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_LUT, v_ret,
1782                                     NULL, 0);
1783}
1784
1785/**
1786 * ice_vc_get_stats_msg
1787 * @vf: pointer to the VF info
1788 * @msg: pointer to the msg buffer
1789 *
1790 * called from the VF to get VSI stats
1791 */
1792static int ice_vc_get_stats_msg(struct ice_vf *vf, u8 *msg)
1793{
1794        enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
1795        struct virtchnl_queue_select *vqs =
1796                (struct virtchnl_queue_select *)msg;
1797        struct ice_pf *pf = vf->pf;
1798        struct ice_eth_stats stats;
1799        struct ice_vsi *vsi;
1800
1801        if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
1802                v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1803                goto error_param;
1804        }
1805
1806        if (!ice_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
1807                v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1808                goto error_param;
1809        }
1810
1811        vsi = pf->vsi[vf->lan_vsi_idx];
1812        if (!vsi) {
1813                v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1814                goto error_param;
1815        }
1816
1817        memset(&stats, 0, sizeof(struct ice_eth_stats));
1818        ice_update_eth_stats(vsi);
1819
1820        stats = vsi->eth_stats;
1821
1822error_param:
1823        /* send the response to the VF */
1824        return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_STATS, v_ret,
1825                                     (u8 *)&stats, sizeof(stats));
1826}
1827
1828/**
1829 * ice_vc_ena_qs_msg
1830 * @vf: pointer to the VF info
1831 * @msg: pointer to the msg buffer
1832 *
1833 * called from the VF to enable all or specific queue(s)
1834 */
1835static int ice_vc_ena_qs_msg(struct ice_vf *vf, u8 *msg)
1836{
1837        enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
1838        struct virtchnl_queue_select *vqs =
1839            (struct virtchnl_queue_select *)msg;
1840        struct ice_pf *pf = vf->pf;
1841        struct ice_vsi *vsi;
1842
1843        if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
1844                v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1845                goto error_param;
1846        }
1847
1848        if (!ice_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
1849                v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1850                goto error_param;
1851        }
1852
1853        if (!vqs->rx_queues && !vqs->tx_queues) {
1854                v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1855                goto error_param;
1856        }
1857
1858        vsi = pf->vsi[vf->lan_vsi_idx];
1859        if (!vsi) {
1860                v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1861                goto error_param;
1862        }
1863
1864        /* Enable only Rx rings, Tx rings were enabled by the FW when the
1865         * Tx queue group list was configured and the context bits were
1866         * programmed using ice_vsi_cfg_txqs
1867         */
1868        if (ice_vsi_start_rx_rings(vsi))
1869                v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1870
1871        /* Set flag to indicate that queues are enabled */
1872        if (v_ret == VIRTCHNL_STATUS_SUCCESS)
1873                set_bit(ICE_VF_STATE_ENA, vf->vf_states);
1874
1875error_param:
1876        /* send the response to the VF */
1877        return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ENABLE_QUEUES, v_ret,
1878                                     NULL, 0);
1879}
1880
1881/**
1882 * ice_vc_dis_qs_msg
1883 * @vf: pointer to the VF info
1884 * @msg: pointer to the msg buffer
1885 *
1886 * called from the VF to disable all or specific
1887 * queue(s)
1888 */
1889static int ice_vc_dis_qs_msg(struct ice_vf *vf, u8 *msg)
1890{
1891        enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
1892        struct virtchnl_queue_select *vqs =
1893            (struct virtchnl_queue_select *)msg;
1894        struct ice_pf *pf = vf->pf;
1895        struct ice_vsi *vsi;
1896
1897        if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states) &&
1898            !test_bit(ICE_VF_STATE_ENA, vf->vf_states)) {
1899                v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1900                goto error_param;
1901        }
1902
1903        if (!ice_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
1904                v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1905                goto error_param;
1906        }
1907
1908        if (!vqs->rx_queues && !vqs->tx_queues) {
1909                v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1910                goto error_param;
1911        }
1912
1913        vsi = pf->vsi[vf->lan_vsi_idx];
1914        if (!vsi) {
1915                v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1916                goto error_param;
1917        }
1918
1919        if (ice_vsi_stop_lan_tx_rings(vsi, ICE_NO_RESET, vf->vf_id)) {
1920                dev_err(&vsi->back->pdev->dev,
1921                        "Failed to stop tx rings on VSI %d\n",
1922                        vsi->vsi_num);
1923                v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1924        }
1925
1926        if (ice_vsi_stop_rx_rings(vsi)) {
1927                dev_err(&vsi->back->pdev->dev,
1928                        "Failed to stop rx rings on VSI %d\n",
1929                        vsi->vsi_num);
1930                v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1931        }
1932
1933        /* Clear enabled queues flag */
1934        if (v_ret == VIRTCHNL_STATUS_SUCCESS)
1935                clear_bit(ICE_VF_STATE_ENA, vf->vf_states);
1936
1937error_param:
1938        /* send the response to the VF */
1939        return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DISABLE_QUEUES, v_ret,
1940                                     NULL, 0);
1941}
1942
1943/**
1944 * ice_vc_cfg_irq_map_msg
1945 * @vf: pointer to the VF info
1946 * @msg: pointer to the msg buffer
1947 *
1948 * called from the VF to configure the IRQ to queue map
1949 */
1950static int ice_vc_cfg_irq_map_msg(struct ice_vf *vf, u8 *msg)
1951{
1952        enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
1953        struct virtchnl_irq_map_info *irqmap_info;
1954        u16 vsi_id, vsi_q_id, vector_id;
1955        struct virtchnl_vector_map *map;
1956        struct ice_pf *pf = vf->pf;
1957        u16 num_q_vectors_mapped;
1958        struct ice_vsi *vsi;
1959        unsigned long qmap;
1960        int i;
1961
1962        irqmap_info = (struct virtchnl_irq_map_info *)msg;
1963        num_q_vectors_mapped = irqmap_info->num_vectors;
1964
1965        vsi = pf->vsi[vf->lan_vsi_idx];
1966        if (!vsi) {
1967                v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1968                goto error_param;
1969        }
1970
1971        /* Check to make sure number of VF vectors mapped is not greater than
1972         * number of VF vectors originally allocated, and check that
1973         * there is actually at least a single VF queue vector mapped
1974         */
1975        if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states) ||
1976            pf->num_vf_msix < num_q_vectors_mapped ||
1977            !irqmap_info->num_vectors) {
1978                v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1979                goto error_param;
1980        }
1981
1982        for (i = 0; i < num_q_vectors_mapped; i++) {
1983                struct ice_q_vector *q_vector;
1984
1985                map = &irqmap_info->vecmap[i];
1986
1987                vector_id = map->vector_id;
1988                vsi_id = map->vsi_id;
1989                /* validate msg params */
1990                if (!(vector_id < pf->hw.func_caps.common_cap
1991                    .num_msix_vectors) || !ice_vc_isvalid_vsi_id(vf, vsi_id) ||
1992                    (!vector_id && (map->rxq_map || map->txq_map))) {
1993                        v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1994                        goto error_param;
1995                }
1996
1997                /* No need to map VF miscellaneous or rogue vector */
1998                if (!vector_id)
1999                        continue;
2000
2001                /* Subtract non queue vector from vector_id passed by VF
2002                 * to get actual number of VSI queue vector array index
2003                 */
2004                q_vector = vsi->q_vectors[vector_id - ICE_NONQ_VECS_VF];
2005                if (!q_vector) {
2006                        v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2007                        goto error_param;
2008                }
2009
2010                /* lookout for the invalid queue index */
2011                qmap = map->rxq_map;
2012                q_vector->num_ring_rx = 0;
2013                for_each_set_bit(vsi_q_id, &qmap, ICE_MAX_BASE_QS_PER_VF) {
2014                        if (!ice_vc_isvalid_q_id(vf, vsi_id, vsi_q_id)) {
2015                                v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2016                                goto error_param;
2017                        }
2018                        q_vector->num_ring_rx++;
2019                        q_vector->rx.itr_idx = map->rxitr_idx;
2020                        vsi->rx_rings[vsi_q_id]->q_vector = q_vector;
2021                        ice_cfg_rxq_interrupt(vsi, vsi_q_id, vector_id,
2022                                              q_vector->rx.itr_idx);
2023                }
2024
2025                qmap = map->txq_map;
2026                q_vector->num_ring_tx = 0;
2027                for_each_set_bit(vsi_q_id, &qmap, ICE_MAX_BASE_QS_PER_VF) {
2028                        if (!ice_vc_isvalid_q_id(vf, vsi_id, vsi_q_id)) {
2029                                v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2030                                goto error_param;
2031                        }
2032                        q_vector->num_ring_tx++;
2033                        q_vector->tx.itr_idx = map->txitr_idx;
2034                        vsi->tx_rings[vsi_q_id]->q_vector = q_vector;
2035                        ice_cfg_txq_interrupt(vsi, vsi_q_id, vector_id,
2036                                              q_vector->tx.itr_idx);
2037                }
2038        }
2039
2040error_param:
2041        /* send the response to the VF */
2042        return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_IRQ_MAP, v_ret,
2043                                     NULL, 0);
2044}
2045
2046/**
2047 * ice_vc_cfg_qs_msg
2048 * @vf: pointer to the VF info
2049 * @msg: pointer to the msg buffer
2050 *
2051 * called from the VF to configure the Rx/Tx queues
2052 */
2053static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg)
2054{
2055        enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
2056        struct virtchnl_vsi_queue_config_info *qci =
2057            (struct virtchnl_vsi_queue_config_info *)msg;
2058        struct virtchnl_queue_pair_info *qpi;
2059        struct ice_pf *pf = vf->pf;
2060        struct ice_vsi *vsi;
2061        int i;
2062
2063        if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
2064                v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2065                goto error_param;
2066        }
2067
2068        if (!ice_vc_isvalid_vsi_id(vf, qci->vsi_id)) {
2069                v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2070                goto error_param;
2071        }
2072
2073        vsi = pf->vsi[vf->lan_vsi_idx];
2074        if (!vsi)
2075                goto error_param;
2076
2077        if (qci->num_queue_pairs > ICE_MAX_BASE_QS_PER_VF) {
2078                dev_err(&pf->pdev->dev,
2079                        "VF-%d requesting more than supported number of queues: %d\n",
2080                        vf->vf_id, qci->num_queue_pairs);
2081                v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2082                goto error_param;
2083        }
2084
2085        for (i = 0; i < qci->num_queue_pairs; i++) {
2086                qpi = &qci->qpair[i];
2087                if (qpi->txq.vsi_id != qci->vsi_id ||
2088                    qpi->rxq.vsi_id != qci->vsi_id ||
2089                    qpi->rxq.queue_id != qpi->txq.queue_id ||
2090                    !ice_vc_isvalid_q_id(vf, qci->vsi_id, qpi->txq.queue_id)) {
2091                        v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2092                        goto error_param;
2093                }
2094                /* copy Tx queue info from VF into VSI */
2095                vsi->tx_rings[i]->dma = qpi->txq.dma_ring_addr;
2096                vsi->tx_rings[i]->count = qpi->txq.ring_len;
2097                /* copy Rx queue info from VF into VSI */
2098                vsi->rx_rings[i]->dma = qpi->rxq.dma_ring_addr;
2099                vsi->rx_rings[i]->count = qpi->rxq.ring_len;
2100                if (qpi->rxq.databuffer_size > ((16 * 1024) - 128)) {
2101                        v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2102                        goto error_param;
2103                }
2104                vsi->rx_buf_len = qpi->rxq.databuffer_size;
2105                if (qpi->rxq.max_pkt_size >= (16 * 1024) ||
2106                    qpi->rxq.max_pkt_size < 64) {
2107                        v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2108                        goto error_param;
2109                }
2110                vsi->max_frame = qpi->rxq.max_pkt_size;
2111        }
2112
2113        /* VF can request to configure less than allocated queues
2114         * or default allocated queues. So update the VSI with new number
2115         */
2116        vsi->num_txq = qci->num_queue_pairs;
2117        vsi->num_rxq = qci->num_queue_pairs;
2118        /* All queues of VF VSI are in TC 0 */
2119        vsi->tc_cfg.tc_info[0].qcount_tx = qci->num_queue_pairs;
2120        vsi->tc_cfg.tc_info[0].qcount_rx = qci->num_queue_pairs;
2121
2122        if (ice_vsi_cfg_lan_txqs(vsi) || ice_vsi_cfg_rxqs(vsi))
2123                v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
2124
2125error_param:
2126        /* send the response to the VF */
2127        return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES, v_ret,
2128                                     NULL, 0);
2129}
2130
2131/**
2132 * ice_is_vf_trusted
2133 * @vf: pointer to the VF info
2134 */
2135static bool ice_is_vf_trusted(struct ice_vf *vf)
2136{
2137        return test_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
2138}
2139
2140/**
2141 * ice_can_vf_change_mac
2142 * @vf: pointer to the VF info
2143 *
2144 * Return true if the VF is allowed to change its MAC filters, false otherwise
2145 */
2146static bool ice_can_vf_change_mac(struct ice_vf *vf)
2147{
2148        /* If the VF MAC address has been set administratively (via the
2149         * ndo_set_vf_mac command), then deny permission to the VF to
2150         * add/delete unicast MAC addresses, unless the VF is trusted
2151         */
2152        if (vf->pf_set_mac && !ice_is_vf_trusted(vf))
2153                return false;
2154
2155        return true;
2156}
2157
2158/**
2159 * ice_vc_handle_mac_addr_msg
2160 * @vf: pointer to the VF info
2161 * @msg: pointer to the msg buffer
2162 * @set: true if MAC filters are being set, false otherwise
2163 *
2164 * add guest MAC address filter
2165 */
2166static int
2167ice_vc_handle_mac_addr_msg(struct ice_vf *vf, u8 *msg, bool set)
2168{
2169        enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
2170        struct virtchnl_ether_addr_list *al =
2171            (struct virtchnl_ether_addr_list *)msg;
2172        struct ice_pf *pf = vf->pf;
2173        enum virtchnl_ops vc_op;
2174        LIST_HEAD(mac_list);
2175        struct ice_vsi *vsi;
2176        int mac_count = 0;
2177        int i;
2178
2179        if (set)
2180                vc_op = VIRTCHNL_OP_ADD_ETH_ADDR;
2181        else
2182                vc_op = VIRTCHNL_OP_DEL_ETH_ADDR;
2183
2184        if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states) ||
2185            !ice_vc_isvalid_vsi_id(vf, al->vsi_id)) {
2186                v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2187                goto handle_mac_exit;
2188        }
2189
2190        if (set && !ice_is_vf_trusted(vf) &&
2191            (vf->num_mac + al->num_elements) > ICE_MAX_MACADDR_PER_VF) {
2192                dev_err(&pf->pdev->dev,
2193                        "Can't add more MAC addresses, because VF-%d is not trusted, switch the VF to trusted mode in order to add more functionalities\n",
2194                        vf->vf_id);
2195                /* There is no need to let VF know about not being trusted
2196                 * to add more MAC addr, so we can just return success message.
2197                 */
2198                v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2199                goto handle_mac_exit;
2200        }
2201
2202        vsi = pf->vsi[vf->lan_vsi_idx];
2203        if (!vsi) {
2204                v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2205                goto handle_mac_exit;
2206        }
2207
2208        for (i = 0; i < al->num_elements; i++) {
2209                u8 *maddr = al->list[i].addr;
2210
2211                if (ether_addr_equal(maddr, vf->dflt_lan_addr.addr) ||
2212                    is_broadcast_ether_addr(maddr)) {
2213                        if (set) {
2214                                /* VF is trying to add filters that the PF
2215                                 * already added. Just continue.
2216                                 */
2217                                dev_info(&pf->pdev->dev,
2218                                         "MAC %pM already set for VF %d\n",
2219                                         maddr, vf->vf_id);
2220                                continue;
2221                        } else {
2222                                /* VF can't remove dflt_lan_addr/bcast MAC */
2223                                dev_err(&pf->pdev->dev,
2224                                        "VF can't remove default MAC address or MAC %pM programmed by PF for VF %d\n",
2225                                        maddr, vf->vf_id);
2226                                continue;
2227                        }
2228                }
2229
2230                /* check for the invalid cases and bail if necessary */
2231                if (is_zero_ether_addr(maddr)) {
2232                        dev_err(&pf->pdev->dev,
2233                                "invalid MAC %pM provided for VF %d\n",
2234                                maddr, vf->vf_id);
2235                        v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2236                        goto handle_mac_exit;
2237                }
2238
2239                if (is_unicast_ether_addr(maddr) &&
2240                    !ice_can_vf_change_mac(vf)) {
2241                        dev_err(&pf->pdev->dev,
2242                                "can't change unicast MAC for untrusted VF %d\n",
2243                                vf->vf_id);
2244                        v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2245                        goto handle_mac_exit;
2246                }
2247
2248                /* get here if maddr is multicast or if VF can change MAC */
2249                if (ice_add_mac_to_list(vsi, &mac_list, al->list[i].addr)) {
2250                        v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY;
2251                        goto handle_mac_exit;
2252                }
2253                mac_count++;
2254        }
2255
2256        /* program the updated filter list */
2257        if (set)
2258                v_ret = ice_err_to_virt_err(ice_add_mac(&pf->hw, &mac_list));
2259        else
2260                v_ret = ice_err_to_virt_err(ice_remove_mac(&pf->hw, &mac_list));
2261
2262        if (v_ret) {
2263                dev_err(&pf->pdev->dev,
2264                        "can't update MAC filters for VF %d, error %d\n",
2265                        vf->vf_id, v_ret);
2266        } else {
2267                if (set)
2268                        vf->num_mac += mac_count;
2269                else
2270                        vf->num_mac -= mac_count;
2271        }
2272
2273handle_mac_exit:
2274        ice_free_fltr_list(&pf->pdev->dev, &mac_list);
2275        /* send the response to the VF */
2276        return ice_vc_send_msg_to_vf(vf, vc_op, v_ret, NULL, 0);
2277}
2278
2279/**
2280 * ice_vc_add_mac_addr_msg
2281 * @vf: pointer to the VF info
2282 * @msg: pointer to the msg buffer
2283 *
2284 * add guest MAC address filter
2285 */
2286static int ice_vc_add_mac_addr_msg(struct ice_vf *vf, u8 *msg)
2287{
2288        return ice_vc_handle_mac_addr_msg(vf, msg, true);
2289}
2290
2291/**
2292 * ice_vc_del_mac_addr_msg
2293 * @vf: pointer to the VF info
2294 * @msg: pointer to the msg buffer
2295 *
2296 * remove guest MAC address filter
2297 */
2298static int ice_vc_del_mac_addr_msg(struct ice_vf *vf, u8 *msg)
2299{
2300        return ice_vc_handle_mac_addr_msg(vf, msg, false);
2301}
2302
2303/**
2304 * ice_vc_request_qs_msg
2305 * @vf: pointer to the VF info
2306 * @msg: pointer to the msg buffer
2307 *
2308 * VFs get a default number of queues but can use this message to request a
2309 * different number. If the request is successful, PF will reset the VF and
2310 * return 0. If unsuccessful, PF will send message informing VF of number of
2311 * available queue pairs via virtchnl message response to VF.
2312 */
2313static int ice_vc_request_qs_msg(struct ice_vf *vf, u8 *msg)
2314{
2315        enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
2316        struct virtchnl_vf_res_request *vfres =
2317                (struct virtchnl_vf_res_request *)msg;
2318        int req_queues = vfres->num_queue_pairs;
2319        struct ice_pf *pf = vf->pf;
2320        int max_allowed_vf_queues;
2321        int tx_rx_queue_left;
2322        int cur_queues;
2323
2324        if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
2325                v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2326                goto error_param;
2327        }
2328
2329        cur_queues = vf->num_vf_qs;
2330        tx_rx_queue_left = min_t(int, pf->q_left_tx, pf->q_left_rx);
2331        max_allowed_vf_queues = tx_rx_queue_left + cur_queues;
2332        if (req_queues <= 0) {
2333                dev_err(&pf->pdev->dev,
2334                        "VF %d tried to request %d queues. Ignoring.\n",
2335                        vf->vf_id, req_queues);
2336        } else if (req_queues > ICE_MAX_BASE_QS_PER_VF) {
2337                dev_err(&pf->pdev->dev,
2338                        "VF %d tried to request more than %d queues.\n",
2339                        vf->vf_id, ICE_MAX_BASE_QS_PER_VF);
2340                vfres->num_queue_pairs = ICE_MAX_BASE_QS_PER_VF;
2341        } else if (req_queues - cur_queues > tx_rx_queue_left) {
2342                dev_warn(&pf->pdev->dev,
2343                         "VF %d requested %d more queues, but only %d left.\n",
2344                         vf->vf_id, req_queues - cur_queues, tx_rx_queue_left);
2345                vfres->num_queue_pairs = min_t(int, max_allowed_vf_queues,
2346                                               ICE_MAX_BASE_QS_PER_VF);
2347        } else {
2348                /* request is successful, then reset VF */
2349                vf->num_req_qs = req_queues;
2350                ice_vc_dis_vf(vf);
2351                dev_info(&pf->pdev->dev,
2352                         "VF %d granted request of %d queues.\n",
2353                         vf->vf_id, req_queues);
2354                return 0;
2355        }
2356
2357error_param:
2358        /* send the response to the VF */
2359        return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_REQUEST_QUEUES,
2360                                     v_ret, (u8 *)vfres, sizeof(*vfres));
2361}
2362
2363/**
2364 * ice_set_vf_port_vlan
2365 * @netdev: network interface device structure
2366 * @vf_id: VF identifier
2367 * @vlan_id: VLAN ID being set
2368 * @qos: priority setting
2369 * @vlan_proto: VLAN protocol
2370 *
2371 * program VF Port VLAN ID and/or QoS
2372 */
2373int
2374ice_set_vf_port_vlan(struct net_device *netdev, int vf_id, u16 vlan_id, u8 qos,
2375                     __be16 vlan_proto)
2376{
2377        u16 vlanprio = vlan_id | (qos << ICE_VLAN_PRIORITY_S);
2378        struct ice_netdev_priv *np = netdev_priv(netdev);
2379        struct ice_pf *pf = np->vsi->back;
2380        struct ice_vsi *vsi;
2381        struct ice_vf *vf;
2382        int ret = 0;
2383
2384        /* validate the request */
2385        if (vf_id >= pf->num_alloc_vfs) {
2386                dev_err(&pf->pdev->dev, "invalid VF id: %d\n", vf_id);
2387                return -EINVAL;
2388        }
2389
2390        if (vlan_id > ICE_MAX_VLANID || qos > 7) {
2391                dev_err(&pf->pdev->dev, "Invalid VF Parameters\n");
2392                return -EINVAL;
2393        }
2394
2395        if (vlan_proto != htons(ETH_P_8021Q)) {
2396                dev_err(&pf->pdev->dev, "VF VLAN protocol is not supported\n");
2397                return -EPROTONOSUPPORT;
2398        }
2399
2400        vf = &pf->vf[vf_id];
2401        vsi = pf->vsi[vf->lan_vsi_idx];
2402        if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states)) {
2403                dev_err(&pf->pdev->dev, "VF %d in reset. Try again.\n", vf_id);
2404                return -EBUSY;
2405        }
2406
2407        if (le16_to_cpu(vsi->info.pvid) == vlanprio) {
2408                /* duplicate request, so just return success */
2409                dev_info(&pf->pdev->dev,
2410                         "Duplicate pvid %d request\n", vlanprio);
2411                return ret;
2412        }
2413
2414        /* If PVID, then remove all filters on the old VLAN */
2415        if (vsi->info.pvid)
2416                ice_vsi_kill_vlan(vsi, (le16_to_cpu(vsi->info.pvid) &
2417                                  VLAN_VID_MASK));
2418
2419        if (vlan_id || qos) {
2420                ret = ice_vsi_manage_pvid(vsi, vlanprio, true);
2421                if (ret)
2422                        goto error_set_pvid;
2423        } else {
2424                ice_vsi_manage_pvid(vsi, 0, false);
2425                vsi->info.pvid = 0;
2426        }
2427
2428        if (vlan_id) {
2429                dev_info(&pf->pdev->dev, "Setting VLAN %d, QOS 0x%x on VF %d\n",
2430                         vlan_id, qos, vf_id);
2431
2432                /* add new VLAN filter for each MAC */
2433                ret = ice_vsi_add_vlan(vsi, vlan_id);
2434                if (ret)
2435                        goto error_set_pvid;
2436        }
2437
2438        /* The Port VLAN needs to be saved across resets the same as the
2439         * default LAN MAC address.
2440         */
2441        vf->port_vlan_id = le16_to_cpu(vsi->info.pvid);
2442
2443error_set_pvid:
2444        return ret;
2445}
2446
2447/**
2448 * ice_vc_process_vlan_msg
2449 * @vf: pointer to the VF info
2450 * @msg: pointer to the msg buffer
2451 * @add_v: Add VLAN if true, otherwise delete VLAN
2452 *
2453 * Process virtchnl op to add or remove programmed guest VLAN ID
2454 */
2455static int ice_vc_process_vlan_msg(struct ice_vf *vf, u8 *msg, bool add_v)
2456{
2457        enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
2458        struct virtchnl_vlan_filter_list *vfl =
2459            (struct virtchnl_vlan_filter_list *)msg;
2460        struct ice_pf *pf = vf->pf;
2461        bool vlan_promisc = false;
2462        struct ice_vsi *vsi;
2463        struct ice_hw *hw;
2464        int status = 0;
2465        u8 promisc_m;
2466        int i;
2467
2468        if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
2469                v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2470                goto error_param;
2471        }
2472
2473        if (!ice_vc_isvalid_vsi_id(vf, vfl->vsi_id)) {
2474                v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2475                goto error_param;
2476        }
2477
2478        if (add_v && !ice_is_vf_trusted(vf) &&
2479            vf->num_vlan >= ICE_MAX_VLAN_PER_VF) {
2480                dev_info(&pf->pdev->dev,
2481                         "VF-%d is not trusted, switch the VF to trusted mode, in order to add more VLAN addresses\n",
2482                         vf->vf_id);
2483                /* There is no need to let VF know about being not trusted,
2484                 * so we can just return success message here
2485                 */
2486                goto error_param;
2487        }
2488
2489        for (i = 0; i < vfl->num_elements; i++) {
2490                if (vfl->vlan_id[i] > ICE_MAX_VLANID) {
2491                        v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2492                        dev_err(&pf->pdev->dev,
2493                                "invalid VF VLAN id %d\n", vfl->vlan_id[i]);
2494                        goto error_param;
2495                }
2496        }
2497
2498        hw = &pf->hw;
2499        vsi = pf->vsi[vf->lan_vsi_idx];
2500        if (!vsi) {
2501                v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2502                goto error_param;
2503        }
2504
2505        if (vsi->info.pvid) {
2506                v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2507                goto error_param;
2508        }
2509
2510        if (ice_vsi_manage_vlan_stripping(vsi, add_v)) {
2511                dev_err(&pf->pdev->dev,
2512                        "%sable VLAN stripping failed for VSI %i\n",
2513                         add_v ? "en" : "dis", vsi->vsi_num);
2514                v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2515                goto error_param;
2516        }
2517
2518        if (test_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states) ||
2519            test_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states))
2520                vlan_promisc = true;
2521
2522        if (add_v) {
2523                for (i = 0; i < vfl->num_elements; i++) {
2524                        u16 vid = vfl->vlan_id[i];
2525
2526                        if (!ice_is_vf_trusted(vf) &&
2527                            vf->num_vlan >= ICE_MAX_VLAN_PER_VF) {
2528                                dev_info(&pf->pdev->dev,
2529                                         "VF-%d is not trusted, switch the VF to trusted mode, in order to add more VLAN addresses\n",
2530                                         vf->vf_id);
2531                                /* There is no need to let VF know about being
2532                                 * not trusted, so we can just return success
2533                                 * message here as well.
2534                                 */
2535                                goto error_param;
2536                        }
2537
2538                        if (ice_vsi_add_vlan(vsi, vid)) {
2539                                v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2540                                goto error_param;
2541                        }
2542
2543                        vf->num_vlan++;
2544                        /* Enable VLAN pruning when VLAN is added */
2545                        if (!vlan_promisc) {
2546                                status = ice_cfg_vlan_pruning(vsi, true, false);
2547                                if (status) {
2548                                        v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2549                                        dev_err(&pf->pdev->dev,
2550                                                "Enable VLAN pruning on VLAN ID: %d failed error-%d\n",
2551                                                vid, status);
2552                                        goto error_param;
2553                                }
2554                        } else {
2555                                /* Enable Ucast/Mcast VLAN promiscuous mode */
2556                                promisc_m = ICE_PROMISC_VLAN_TX |
2557                                            ICE_PROMISC_VLAN_RX;
2558
2559                                status = ice_set_vsi_promisc(hw, vsi->idx,
2560                                                             promisc_m, vid);
2561                                if (status) {
2562                                        v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2563                                        dev_err(&pf->pdev->dev,
2564                                                "Enable Unicast/multicast promiscuous mode on VLAN ID:%d failed error-%d\n",
2565                                                vid, status);
2566                                }
2567                        }
2568                }
2569        } else {
2570                /* In case of non_trusted VF, number of VLAN elements passed
2571                 * to PF for removal might be greater than number of VLANs
2572                 * filter programmed for that VF - So, use actual number of
2573                 * VLANS added earlier with add VLAN opcode. In order to avoid
2574                 * removing VLAN that doesn't exist, which result to sending
2575                 * erroneous failed message back to the VF
2576                 */
2577                int num_vf_vlan;
2578
2579                num_vf_vlan = vf->num_vlan;
2580                for (i = 0; i < vfl->num_elements && i < num_vf_vlan; i++) {
2581                        u16 vid = vfl->vlan_id[i];
2582
2583                        /* Make sure ice_vsi_kill_vlan is successful before
2584                         * updating VLAN information
2585                         */
2586                        if (ice_vsi_kill_vlan(vsi, vid)) {
2587                                v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2588                                goto error_param;
2589                        }
2590
2591                        vf->num_vlan--;
2592                        /* Disable VLAN pruning when removing VLAN */
2593                        ice_cfg_vlan_pruning(vsi, false, false);
2594
2595                        /* Disable Unicast/Multicast VLAN promiscuous mode */
2596                        if (vlan_promisc) {
2597                                promisc_m = ICE_PROMISC_VLAN_TX |
2598                                            ICE_PROMISC_VLAN_RX;
2599
2600                                ice_clear_vsi_promisc(hw, vsi->idx,
2601                                                      promisc_m, vid);
2602                        }
2603                }
2604        }
2605
2606error_param:
2607        /* send the response to the VF */
2608        if (add_v)
2609                return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ADD_VLAN, v_ret,
2610                                             NULL, 0);
2611        else
2612                return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DEL_VLAN, v_ret,
2613                                             NULL, 0);
2614}
2615
2616/**
2617 * ice_vc_add_vlan_msg
2618 * @vf: pointer to the VF info
2619 * @msg: pointer to the msg buffer
2620 *
2621 * Add and program guest VLAN ID
2622 */
2623static int ice_vc_add_vlan_msg(struct ice_vf *vf, u8 *msg)
2624{
2625        return ice_vc_process_vlan_msg(vf, msg, true);
2626}
2627
2628/**
2629 * ice_vc_remove_vlan_msg
2630 * @vf: pointer to the VF info
2631 * @msg: pointer to the msg buffer
2632 *
2633 * remove programmed guest VLAN ID
2634 */
2635static int ice_vc_remove_vlan_msg(struct ice_vf *vf, u8 *msg)
2636{
2637        return ice_vc_process_vlan_msg(vf, msg, false);
2638}
2639
2640/**
2641 * ice_vc_ena_vlan_stripping
2642 * @vf: pointer to the VF info
2643 *
2644 * Enable VLAN header stripping for a given VF
2645 */
2646static int ice_vc_ena_vlan_stripping(struct ice_vf *vf)
2647{
2648        enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
2649        struct ice_pf *pf = vf->pf;
2650        struct ice_vsi *vsi;
2651
2652        if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
2653                v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2654                goto error_param;
2655        }
2656
2657        vsi = pf->vsi[vf->lan_vsi_idx];
2658        if (ice_vsi_manage_vlan_stripping(vsi, true))
2659                v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2660
2661error_param:
2662        return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ENABLE_VLAN_STRIPPING,
2663                                     v_ret, NULL, 0);
2664}
2665
2666/**
2667 * ice_vc_dis_vlan_stripping
2668 * @vf: pointer to the VF info
2669 *
2670 * Disable VLAN header stripping for a given VF
2671 */
2672static int ice_vc_dis_vlan_stripping(struct ice_vf *vf)
2673{
2674        enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
2675        struct ice_pf *pf = vf->pf;
2676        struct ice_vsi *vsi;
2677
2678        if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
2679                v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2680                goto error_param;
2681        }
2682
2683        vsi = pf->vsi[vf->lan_vsi_idx];
2684        if (!vsi) {
2685                v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2686                goto error_param;
2687        }
2688
2689        if (ice_vsi_manage_vlan_stripping(vsi, false))
2690                v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2691
2692error_param:
2693        return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DISABLE_VLAN_STRIPPING,
2694                                     v_ret, NULL, 0);
2695}
2696
2697/**
2698 * ice_vc_process_vf_msg - Process request from VF
2699 * @pf: pointer to the PF structure
2700 * @event: pointer to the AQ event
2701 *
2702 * called from the common asq/arq handler to
2703 * process request from VF
2704 */
2705void ice_vc_process_vf_msg(struct ice_pf *pf, struct ice_rq_event_info *event)
2706{
2707        u32 v_opcode = le32_to_cpu(event->desc.cookie_high);
2708        s16 vf_id = le16_to_cpu(event->desc.retval);
2709        u16 msglen = event->msg_len;
2710        u8 *msg = event->msg_buf;
2711        struct ice_vf *vf = NULL;
2712        int err = 0;
2713
2714        if (vf_id >= pf->num_alloc_vfs) {
2715                err = -EINVAL;
2716                goto error_handler;
2717        }
2718
2719        vf = &pf->vf[vf_id];
2720
2721        /* Check if VF is disabled. */
2722        if (test_bit(ICE_VF_STATE_DIS, vf->vf_states)) {
2723                err = -EPERM;
2724                goto error_handler;
2725        }
2726
2727        /* Perform basic checks on the msg */
2728        err = virtchnl_vc_validate_vf_msg(&vf->vf_ver, v_opcode, msg, msglen);
2729        if (err) {
2730                if (err == VIRTCHNL_STATUS_ERR_PARAM)
2731                        err = -EPERM;
2732                else
2733                        err = -EINVAL;
2734                goto error_handler;
2735        }
2736
2737        /* Perform additional checks specific to RSS and Virtchnl */
2738        if (v_opcode == VIRTCHNL_OP_CONFIG_RSS_KEY) {
2739                struct virtchnl_rss_key *vrk = (struct virtchnl_rss_key *)msg;
2740
2741                if (vrk->key_len != ICE_VSIQF_HKEY_ARRAY_SIZE)
2742                        err = -EINVAL;
2743        } else if (v_opcode == VIRTCHNL_OP_CONFIG_RSS_LUT) {
2744                struct virtchnl_rss_lut *vrl = (struct virtchnl_rss_lut *)msg;
2745
2746                if (vrl->lut_entries != ICE_VSIQF_HLUT_ARRAY_SIZE)
2747                        err = -EINVAL;
2748        }
2749
2750error_handler:
2751        if (err) {
2752                ice_vc_send_msg_to_vf(vf, v_opcode, VIRTCHNL_STATUS_ERR_PARAM,
2753                                      NULL, 0);
2754                dev_err(&pf->pdev->dev, "Invalid message from VF %d, opcode %d, len %d, error %d\n",
2755                        vf_id, v_opcode, msglen, err);
2756                return;
2757        }
2758
2759        switch (v_opcode) {
2760        case VIRTCHNL_OP_VERSION:
2761                err = ice_vc_get_ver_msg(vf, msg);
2762                break;
2763        case VIRTCHNL_OP_GET_VF_RESOURCES:
2764                err = ice_vc_get_vf_res_msg(vf, msg);
2765                break;
2766        case VIRTCHNL_OP_RESET_VF:
2767                ice_vc_reset_vf_msg(vf);
2768                break;
2769        case VIRTCHNL_OP_ADD_ETH_ADDR:
2770                err = ice_vc_add_mac_addr_msg(vf, msg);
2771                break;
2772        case VIRTCHNL_OP_DEL_ETH_ADDR:
2773                err = ice_vc_del_mac_addr_msg(vf, msg);
2774                break;
2775        case VIRTCHNL_OP_CONFIG_VSI_QUEUES:
2776                err = ice_vc_cfg_qs_msg(vf, msg);
2777                break;
2778        case VIRTCHNL_OP_ENABLE_QUEUES:
2779                err = ice_vc_ena_qs_msg(vf, msg);
2780                ice_vc_notify_vf_link_state(vf);
2781                break;
2782        case VIRTCHNL_OP_DISABLE_QUEUES:
2783                err = ice_vc_dis_qs_msg(vf, msg);
2784                break;
2785        case VIRTCHNL_OP_REQUEST_QUEUES:
2786                err = ice_vc_request_qs_msg(vf, msg);
2787                break;
2788        case VIRTCHNL_OP_CONFIG_IRQ_MAP:
2789                err = ice_vc_cfg_irq_map_msg(vf, msg);
2790                break;
2791        case VIRTCHNL_OP_CONFIG_RSS_KEY:
2792                err = ice_vc_config_rss_key(vf, msg);
2793                break;
2794        case VIRTCHNL_OP_CONFIG_RSS_LUT:
2795                err = ice_vc_config_rss_lut(vf, msg);
2796                break;
2797        case VIRTCHNL_OP_GET_STATS:
2798                err = ice_vc_get_stats_msg(vf, msg);
2799                break;
2800        case VIRTCHNL_OP_ADD_VLAN:
2801                err = ice_vc_add_vlan_msg(vf, msg);
2802                break;
2803        case VIRTCHNL_OP_DEL_VLAN:
2804                err = ice_vc_remove_vlan_msg(vf, msg);
2805                break;
2806        case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING:
2807                err = ice_vc_ena_vlan_stripping(vf);
2808                break;
2809        case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING:
2810                err = ice_vc_dis_vlan_stripping(vf);
2811                break;
2812        case VIRTCHNL_OP_UNKNOWN:
2813        default:
2814                dev_err(&pf->pdev->dev, "Unsupported opcode %d from VF %d\n",
2815                        v_opcode, vf_id);
2816                err = ice_vc_send_msg_to_vf(vf, v_opcode,
2817                                            VIRTCHNL_STATUS_ERR_NOT_SUPPORTED,
2818                                            NULL, 0);
2819                break;
2820        }
2821        if (err) {
2822                /* Helper function cares less about error return values here
2823                 * as it is busy with pending work.
2824                 */
2825                dev_info(&pf->pdev->dev,
2826                         "PF failed to honor VF %d, opcode %d, error %d\n",
2827                         vf_id, v_opcode, err);
2828        }
2829}
2830
2831/**
2832 * ice_get_vf_cfg
2833 * @netdev: network interface device structure
2834 * @vf_id: VF identifier
2835 * @ivi: VF configuration structure
2836 *
2837 * return VF configuration
2838 */
2839int
2840ice_get_vf_cfg(struct net_device *netdev, int vf_id, struct ifla_vf_info *ivi)
2841{
2842        struct ice_netdev_priv *np = netdev_priv(netdev);
2843        struct ice_vsi *vsi = np->vsi;
2844        struct ice_pf *pf = vsi->back;
2845        struct ice_vf *vf;
2846
2847        /* validate the request */
2848        if (vf_id >= pf->num_alloc_vfs) {
2849                netdev_err(netdev, "invalid VF id: %d\n", vf_id);
2850                return -EINVAL;
2851        }
2852
2853        vf = &pf->vf[vf_id];
2854        vsi = pf->vsi[vf->lan_vsi_idx];
2855
2856        if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states)) {
2857                netdev_err(netdev, "VF %d in reset. Try again.\n", vf_id);
2858                return -EBUSY;
2859        }
2860
2861        ivi->vf = vf_id;
2862        ether_addr_copy(ivi->mac, vf->dflt_lan_addr.addr);
2863
2864        /* VF configuration for VLAN and applicable QoS */
2865        ivi->vlan = le16_to_cpu(vsi->info.pvid) & ICE_VLAN_M;
2866        ivi->qos = (le16_to_cpu(vsi->info.pvid) & ICE_PRIORITY_M) >>
2867                    ICE_VLAN_PRIORITY_S;
2868
2869        ivi->trusted = vf->trusted;
2870        ivi->spoofchk = vf->spoofchk;
2871        if (!vf->link_forced)
2872                ivi->linkstate = IFLA_VF_LINK_STATE_AUTO;
2873        else if (vf->link_up)
2874                ivi->linkstate = IFLA_VF_LINK_STATE_ENABLE;
2875        else
2876                ivi->linkstate = IFLA_VF_LINK_STATE_DISABLE;
2877        ivi->max_tx_rate = vf->tx_rate;
2878        ivi->min_tx_rate = 0;
2879        return 0;
2880}
2881
2882/**
2883 * ice_set_vf_spoofchk
2884 * @netdev: network interface device structure
2885 * @vf_id: VF identifier
2886 * @ena: flag to enable or disable feature
2887 *
2888 * Enable or disable VF spoof checking
2889 */
2890int ice_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool ena)
2891{
2892        struct ice_netdev_priv *np = netdev_priv(netdev);
2893        struct ice_vsi *vsi = np->vsi;
2894        struct ice_pf *pf = vsi->back;
2895        struct ice_vsi_ctx *ctx;
2896        enum ice_status status;
2897        struct ice_vf *vf;
2898        int ret = 0;
2899
2900        /* validate the request */
2901        if (vf_id >= pf->num_alloc_vfs) {
2902                netdev_err(netdev, "invalid VF id: %d\n", vf_id);
2903                return -EINVAL;
2904        }
2905
2906        vf = &pf->vf[vf_id];
2907        if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states)) {
2908                netdev_err(netdev, "VF %d in reset. Try again.\n", vf_id);
2909                return -EBUSY;
2910        }
2911
2912        if (ena == vf->spoofchk) {
2913                dev_dbg(&pf->pdev->dev, "VF spoofchk already %s\n",
2914                        ena ? "ON" : "OFF");
2915                return 0;
2916        }
2917
2918        ctx = devm_kzalloc(&pf->pdev->dev, sizeof(*ctx), GFP_KERNEL);
2919        if (!ctx)
2920                return -ENOMEM;
2921
2922        ctx->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_SECURITY_VALID);
2923
2924        if (ena) {
2925                ctx->info.sec_flags |= ICE_AQ_VSI_SEC_FLAG_ENA_MAC_ANTI_SPOOF;
2926                ctx->info.sw_flags2 |= ICE_AQ_VSI_SW_FLAG_RX_PRUNE_EN_M;
2927        }
2928
2929        status = ice_update_vsi(&pf->hw, vsi->idx, ctx, NULL);
2930        if (status) {
2931                dev_dbg(&pf->pdev->dev,
2932                        "Error %d, failed to update VSI* parameters\n", status);
2933                ret = -EIO;
2934                goto out;
2935        }
2936
2937        vf->spoofchk = ena;
2938        vsi->info.sec_flags = ctx->info.sec_flags;
2939        vsi->info.sw_flags2 = ctx->info.sw_flags2;
2940out:
2941        devm_kfree(&pf->pdev->dev, ctx);
2942        return ret;
2943}
2944
2945/**
2946 * ice_set_vf_mac
2947 * @netdev: network interface device structure
2948 * @vf_id: VF identifier
2949 * @mac: MAC address
2950 *
2951 * program VF MAC address
2952 */
2953int ice_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
2954{
2955        struct ice_netdev_priv *np = netdev_priv(netdev);
2956        struct ice_vsi *vsi = np->vsi;
2957        struct ice_pf *pf = vsi->back;
2958        struct ice_vf *vf;
2959        int ret = 0;
2960
2961        /* validate the request */
2962        if (vf_id >= pf->num_alloc_vfs) {
2963                netdev_err(netdev, "invalid VF id: %d\n", vf_id);
2964                return -EINVAL;
2965        }
2966
2967        vf = &pf->vf[vf_id];
2968        if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states)) {
2969                netdev_err(netdev, "VF %d in reset. Try again.\n", vf_id);
2970                return -EBUSY;
2971        }
2972
2973        if (is_zero_ether_addr(mac) || is_multicast_ether_addr(mac)) {
2974                netdev_err(netdev, "%pM not a valid unicast address\n", mac);
2975                return -EINVAL;
2976        }
2977
2978        /* copy MAC into dflt_lan_addr and trigger a VF reset. The reset
2979         * flow will use the updated dflt_lan_addr and add a MAC filter
2980         * using ice_add_mac. Also set pf_set_mac to indicate that the PF has
2981         * set the MAC address for this VF.
2982         */
2983        ether_addr_copy(vf->dflt_lan_addr.addr, mac);
2984        vf->pf_set_mac = true;
2985        netdev_info(netdev,
2986                    "MAC on VF %d set to %pM. VF driver will be reinitialized\n",
2987                    vf_id, mac);
2988
2989        ice_vc_dis_vf(vf);
2990        return ret;
2991}
2992
2993/**
2994 * ice_set_vf_trust
2995 * @netdev: network interface device structure
2996 * @vf_id: VF identifier
2997 * @trusted: Boolean value to enable/disable trusted VF
2998 *
2999 * Enable or disable a given VF as trusted
3000 */
3001int ice_set_vf_trust(struct net_device *netdev, int vf_id, bool trusted)
3002{
3003        struct ice_netdev_priv *np = netdev_priv(netdev);
3004        struct ice_vsi *vsi = np->vsi;
3005        struct ice_pf *pf = vsi->back;
3006        struct ice_vf *vf;
3007
3008        /* validate the request */
3009        if (vf_id >= pf->num_alloc_vfs) {
3010                dev_err(&pf->pdev->dev, "invalid VF id: %d\n", vf_id);
3011                return -EINVAL;
3012        }
3013
3014        vf = &pf->vf[vf_id];
3015        if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states)) {
3016                dev_err(&pf->pdev->dev, "VF %d in reset. Try again.\n", vf_id);
3017                return -EBUSY;
3018        }
3019
3020        /* Check if already trusted */
3021        if (trusted == vf->trusted)
3022                return 0;
3023
3024        vf->trusted = trusted;
3025        ice_vc_dis_vf(vf);
3026        dev_info(&pf->pdev->dev, "VF %u is now %strusted\n",
3027                 vf_id, trusted ? "" : "un");
3028
3029        return 0;
3030}
3031
3032/**
3033 * ice_set_vf_link_state
3034 * @netdev: network interface device structure
3035 * @vf_id: VF identifier
3036 * @link_state: required link state
3037 *
3038 * Set VF's link state, irrespective of physical link state status
3039 */
3040int ice_set_vf_link_state(struct net_device *netdev, int vf_id, int link_state)
3041{
3042        struct ice_netdev_priv *np = netdev_priv(netdev);
3043        struct ice_pf *pf = np->vsi->back;
3044        struct virtchnl_pf_event pfe = { 0 };
3045        struct ice_link_status *ls;
3046        struct ice_vf *vf;
3047        struct ice_hw *hw;
3048
3049        if (vf_id >= pf->num_alloc_vfs) {
3050                dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id);
3051                return -EINVAL;
3052        }
3053
3054        vf = &pf->vf[vf_id];
3055        hw = &pf->hw;
3056        ls = &pf->hw.port_info->phy.link_info;
3057
3058        if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states)) {
3059                dev_err(&pf->pdev->dev, "vf %d in reset. Try again.\n", vf_id);
3060                return -EBUSY;
3061        }
3062
3063        pfe.event = VIRTCHNL_EVENT_LINK_CHANGE;
3064        pfe.severity = PF_EVENT_SEVERITY_INFO;
3065
3066        switch (link_state) {
3067        case IFLA_VF_LINK_STATE_AUTO:
3068                vf->link_forced = false;
3069                vf->link_up = ls->link_info & ICE_AQ_LINK_UP;
3070                break;
3071        case IFLA_VF_LINK_STATE_ENABLE:
3072                vf->link_forced = true;
3073                vf->link_up = true;
3074                break;
3075        case IFLA_VF_LINK_STATE_DISABLE:
3076                vf->link_forced = true;
3077                vf->link_up = false;
3078                break;
3079        default:
3080                return -EINVAL;
3081        }
3082
3083        if (vf->link_forced)
3084                ice_set_pfe_link_forced(vf, &pfe, vf->link_up);
3085        else
3086                ice_set_pfe_link(vf, &pfe, ls->link_speed, vf->link_up);
3087
3088        /* Notify the VF of its new link state */
3089        ice_aq_send_msg_to_vf(hw, vf->vf_id, VIRTCHNL_OP_EVENT,
3090                              VIRTCHNL_STATUS_SUCCESS, (u8 *)&pfe,
3091                              sizeof(pfe), NULL);
3092
3093        return 0;
3094}
3095