linux/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
<<
>>
Prefs
   1/*******************************************************************************
   2
   3  Intel 10 Gigabit PCI Express Linux driver
   4  Copyright(c) 1999 - 2015 Intel Corporation.
   5
   6  This program is free software; you can redistribute it and/or modify it
   7  under the terms and conditions of the GNU General Public License,
   8  version 2, as published by the Free Software Foundation.
   9
  10  This program is distributed in the hope it will be useful, but WITHOUT
  11  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  13  more details.
  14
  15  You should have received a copy of the GNU General Public License along with
  16  this program; if not, write to the Free Software Foundation, Inc.,
  17  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
  18
  19  The full GNU General Public License is included in this distribution in
  20  the file called "COPYING".
  21
  22  Contact Information:
  23  Linux NICS <linux.nics@intel.com>
  24  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
  25  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
  26
  27*******************************************************************************/
  28
  29#include <linux/types.h>
  30#include <linux/module.h>
  31#include <linux/pci.h>
  32#include <linux/netdevice.h>
  33#include <linux/vmalloc.h>
  34#include <linux/string.h>
  35#include <linux/in.h>
  36#include <linux/ip.h>
  37#include <linux/tcp.h>
  38#include <linux/ipv6.h>
  39#include <linux/if_bridge.h>
  40#ifdef NETIF_F_HW_VLAN_CTAG_TX
  41#include <linux/if_vlan.h>
  42#endif
  43
  44#include "ixgbe.h"
  45#include "ixgbe_type.h"
  46#include "ixgbe_sriov.h"
  47
  48#ifdef CONFIG_PCI_IOV
  49static inline void ixgbe_alloc_vf_macvlans(struct ixgbe_adapter *adapter,
  50                                           unsigned int num_vfs)
  51{
  52        struct ixgbe_hw *hw = &adapter->hw;
  53        struct vf_macvlans *mv_list;
  54        int num_vf_macvlans, i;
  55
  56        num_vf_macvlans = hw->mac.num_rar_entries -
  57                          (IXGBE_MAX_PF_MACVLANS + 1 + num_vfs);
  58        if (!num_vf_macvlans)
  59                return;
  60
  61        mv_list = kcalloc(num_vf_macvlans, sizeof(struct vf_macvlans),
  62                          GFP_KERNEL);
  63        if (mv_list) {
  64                /* Initialize list of VF macvlans */
  65                INIT_LIST_HEAD(&adapter->vf_mvs.l);
  66                for (i = 0; i < num_vf_macvlans; i++) {
  67                        mv_list[i].vf = -1;
  68                        mv_list[i].free = true;
  69                        list_add(&mv_list[i].l, &adapter->vf_mvs.l);
  70                }
  71                adapter->mv_list = mv_list;
  72        }
  73}
  74
  75static int __ixgbe_enable_sriov(struct ixgbe_adapter *adapter,
  76                                unsigned int num_vfs)
  77{
  78        struct ixgbe_hw *hw = &adapter->hw;
  79        int i;
  80
  81        adapter->flags |= IXGBE_FLAG_SRIOV_ENABLED;
  82
  83        /* Enable VMDq flag so device will be set in VM mode */
  84        adapter->flags |= IXGBE_FLAG_VMDQ_ENABLED;
  85        if (!adapter->ring_feature[RING_F_VMDQ].limit)
  86                adapter->ring_feature[RING_F_VMDQ].limit = 1;
  87
  88        /* Allocate memory for per VF control structures */
  89        adapter->vfinfo = kcalloc(num_vfs, sizeof(struct vf_data_storage),
  90                                  GFP_KERNEL);
  91        if (!adapter->vfinfo)
  92                return -ENOMEM;
  93
  94        adapter->num_vfs = num_vfs;
  95
  96        ixgbe_alloc_vf_macvlans(adapter, num_vfs);
  97        adapter->ring_feature[RING_F_VMDQ].offset = num_vfs;
  98
  99        /* Initialize default switching mode VEB */
 100        IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN);
 101        adapter->bridge_mode = BRIDGE_MODE_VEB;
 102
 103        /* limit trafffic classes based on VFs enabled */
 104        if ((adapter->hw.mac.type == ixgbe_mac_82599EB) && (num_vfs < 16)) {
 105                adapter->dcb_cfg.num_tcs.pg_tcs = MAX_TRAFFIC_CLASS;
 106                adapter->dcb_cfg.num_tcs.pfc_tcs = MAX_TRAFFIC_CLASS;
 107        } else if (num_vfs < 32) {
 108                adapter->dcb_cfg.num_tcs.pg_tcs = 4;
 109                adapter->dcb_cfg.num_tcs.pfc_tcs = 4;
 110        } else {
 111                adapter->dcb_cfg.num_tcs.pg_tcs = 1;
 112                adapter->dcb_cfg.num_tcs.pfc_tcs = 1;
 113        }
 114
 115        /* Disable RSC when in SR-IOV mode */
 116        adapter->flags2 &= ~(IXGBE_FLAG2_RSC_CAPABLE |
 117                             IXGBE_FLAG2_RSC_ENABLED);
 118
 119        for (i = 0; i < num_vfs; i++) {
 120                /* enable spoof checking for all VFs */
 121                adapter->vfinfo[i].spoofchk_enabled = true;
 122
 123                /* We support VF RSS querying only for 82599 and x540
 124                 * devices at the moment. These devices share RSS
 125                 * indirection table and RSS hash key with PF therefore
 126                 * we want to disable the querying by default.
 127                 */
 128                adapter->vfinfo[i].rss_query_enabled = 0;
 129
 130                /* Untrust all VFs */
 131                adapter->vfinfo[i].trusted = false;
 132
 133                /* set the default xcast mode */
 134                adapter->vfinfo[i].xcast_mode = IXGBEVF_XCAST_MODE_NONE;
 135        }
 136
 137        e_info(probe, "SR-IOV enabled with %d VFs\n", num_vfs);
 138        return 0;
 139}
 140
 141/**
 142 * ixgbe_get_vfs - Find and take references to all vf devices
 143 * @adapter: Pointer to adapter struct
 144 */
 145static void ixgbe_get_vfs(struct ixgbe_adapter *adapter)
 146{
 147        struct pci_dev *pdev = adapter->pdev;
 148        u16 vendor = pdev->vendor;
 149        struct pci_dev *vfdev;
 150        int vf = 0;
 151        u16 vf_id;
 152        int pos;
 153
 154        pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
 155        if (!pos)
 156                return;
 157        pci_read_config_word(pdev, pos + PCI_SRIOV_VF_DID, &vf_id);
 158
 159        vfdev = pci_get_device(vendor, vf_id, NULL);
 160        for (; vfdev; vfdev = pci_get_device(vendor, vf_id, vfdev)) {
 161                if (!vfdev->is_virtfn)
 162                        continue;
 163                if (vfdev->physfn != pdev)
 164                        continue;
 165                if (vf >= adapter->num_vfs)
 166                        continue;
 167                pci_dev_get(vfdev);
 168                adapter->vfinfo[vf].vfdev = vfdev;
 169                ++vf;
 170        }
 171}
 172
 173/* Note this function is called when the user wants to enable SR-IOV
 174 * VFs using the now deprecated module parameter
 175 */
 176void ixgbe_enable_sriov(struct ixgbe_adapter *adapter, unsigned int max_vfs)
 177{
 178        int pre_existing_vfs = 0;
 179        unsigned int num_vfs;
 180
 181        pre_existing_vfs = pci_num_vf(adapter->pdev);
 182        if (!pre_existing_vfs && !max_vfs)
 183                return;
 184
 185        /* If there are pre-existing VFs then we have to force
 186         * use of that many - over ride any module parameter value.
 187         * This may result from the user unloading the PF driver
 188         * while VFs were assigned to guest VMs or because the VFs
 189         * have been created via the new PCI SR-IOV sysfs interface.
 190         */
 191        if (pre_existing_vfs) {
 192                num_vfs = pre_existing_vfs;
 193                dev_warn(&adapter->pdev->dev,
 194                         "Virtual Functions already enabled for this device - Please reload all VF drivers to avoid spoofed packet errors\n");
 195        } else {
 196                int err;
 197                /*
 198                 * The 82599 supports up to 64 VFs per physical function
 199                 * but this implementation limits allocation to 63 so that
 200                 * basic networking resources are still available to the
 201                 * physical function.  If the user requests greater than
 202                 * 63 VFs then it is an error - reset to default of zero.
 203                 */
 204                num_vfs = min_t(unsigned int, max_vfs, IXGBE_MAX_VFS_DRV_LIMIT);
 205
 206                err = pci_enable_sriov(adapter->pdev, num_vfs);
 207                if (err) {
 208                        e_err(probe, "Failed to enable PCI sriov: %d\n", err);
 209                        return;
 210                }
 211        }
 212
 213        if (!__ixgbe_enable_sriov(adapter, num_vfs)) {
 214                ixgbe_get_vfs(adapter);
 215                return;
 216        }
 217
 218        /* If we have gotten to this point then there is no memory available
 219         * to manage the VF devices - print message and bail.
 220         */
 221        e_err(probe, "Unable to allocate memory for VF Data Storage - "
 222              "SRIOV disabled\n");
 223        ixgbe_disable_sriov(adapter);
 224}
 225
 226#endif /* #ifdef CONFIG_PCI_IOV */
 227int ixgbe_disable_sriov(struct ixgbe_adapter *adapter)
 228{
 229        unsigned int num_vfs = adapter->num_vfs, vf;
 230        struct ixgbe_hw *hw = &adapter->hw;
 231        u32 gpie;
 232        u32 vmdctl;
 233        int rss;
 234
 235        /* set num VFs to 0 to prevent access to vfinfo */
 236        adapter->num_vfs = 0;
 237
 238        /* put the reference to all of the vf devices */
 239        for (vf = 0; vf < num_vfs; ++vf) {
 240                struct pci_dev *vfdev = adapter->vfinfo[vf].vfdev;
 241
 242                if (!vfdev)
 243                        continue;
 244                adapter->vfinfo[vf].vfdev = NULL;
 245                pci_dev_put(vfdev);
 246        }
 247
 248        /* free VF control structures */
 249        kfree(adapter->vfinfo);
 250        adapter->vfinfo = NULL;
 251
 252        /* free macvlan list */
 253        kfree(adapter->mv_list);
 254        adapter->mv_list = NULL;
 255
 256        /* if SR-IOV is already disabled then there is nothing to do */
 257        if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
 258                return 0;
 259
 260#ifdef CONFIG_PCI_IOV
 261        /*
 262         * If our VFs are assigned we cannot shut down SR-IOV
 263         * without causing issues, so just leave the hardware
 264         * available but disabled
 265         */
 266        if (pci_vfs_assigned(adapter->pdev)) {
 267                e_dev_warn("Unloading driver while VFs are assigned - VFs will not be deallocated\n");
 268                return -EPERM;
 269        }
 270        /* disable iov and allow time for transactions to clear */
 271        pci_disable_sriov(adapter->pdev);
 272#endif
 273
 274        /* turn off device IOV mode */
 275        IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, 0);
 276        gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
 277        gpie &= ~IXGBE_GPIE_VTMODE_MASK;
 278        IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
 279
 280        /* set default pool back to 0 */
 281        vmdctl = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
 282        vmdctl &= ~IXGBE_VT_CTL_POOL_MASK;
 283        IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vmdctl);
 284        IXGBE_WRITE_FLUSH(hw);
 285
 286        /* Disable VMDq flag so device will be set in VM mode */
 287        if (adapter->ring_feature[RING_F_VMDQ].limit == 1) {
 288                adapter->flags &= ~IXGBE_FLAG_VMDQ_ENABLED;
 289                adapter->flags &= ~IXGBE_FLAG_SRIOV_ENABLED;
 290                rss = min_t(int, ixgbe_max_rss_indices(adapter),
 291                            num_online_cpus());
 292        } else {
 293                rss = min_t(int, IXGBE_MAX_L2A_QUEUES, num_online_cpus());
 294        }
 295
 296        adapter->ring_feature[RING_F_VMDQ].offset = 0;
 297        adapter->ring_feature[RING_F_RSS].limit = rss;
 298
 299        /* take a breather then clean up driver data */
 300        msleep(100);
 301        return 0;
 302}
 303
 304static int ixgbe_pci_sriov_enable(struct pci_dev *dev, int num_vfs)
 305{
 306#ifdef CONFIG_PCI_IOV
 307        struct ixgbe_adapter *adapter = pci_get_drvdata(dev);
 308        int err = 0;
 309        u8 num_tc;
 310        int i;
 311        int pre_existing_vfs = pci_num_vf(dev);
 312
 313        if (pre_existing_vfs && pre_existing_vfs != num_vfs)
 314                err = ixgbe_disable_sriov(adapter);
 315        else if (pre_existing_vfs && pre_existing_vfs == num_vfs)
 316                return num_vfs;
 317
 318        if (err)
 319                return err;
 320
 321        /* While the SR-IOV capability structure reports total VFs to be 64,
 322         * we limit the actual number allocated as below based on two factors.
 323         *    Num_TCs   MAX_VFs
 324         *      1         63
 325         *      <=4       31
 326         *      >4        15
 327         * First, we reserve some transmit/receive resources for the PF.
 328         * Second, VMDQ also uses the same pools that SR-IOV does. We need to
 329         * account for this, so that we don't accidentally allocate more VFs
 330         * than we have available pools. The PCI bus driver already checks for
 331         * other values out of range.
 332         */
 333        num_tc = netdev_get_num_tc(adapter->netdev);
 334
 335        if (num_tc > 4) {
 336                if ((num_vfs + adapter->num_rx_pools) > IXGBE_MAX_VFS_8TC) {
 337                        e_dev_err("Currently the device is configured with %d TCs, Creating more than %d VFs is not allowed\n", num_tc, IXGBE_MAX_VFS_8TC);
 338                        return -EPERM;
 339                }
 340        } else if ((num_tc > 1) && (num_tc <= 4)) {
 341                if ((num_vfs + adapter->num_rx_pools) > IXGBE_MAX_VFS_4TC) {
 342                        e_dev_err("Currently the device is configured with %d TCs, Creating more than %d VFs is not allowed\n", num_tc, IXGBE_MAX_VFS_4TC);
 343                        return -EPERM;
 344                }
 345        } else {
 346                if ((num_vfs + adapter->num_rx_pools) > IXGBE_MAX_VFS_1TC) {
 347                        e_dev_err("Currently the device is configured with %d TCs, Creating more than %d VFs is not allowed\n", num_tc, IXGBE_MAX_VFS_1TC);
 348                        return -EPERM;
 349                }
 350        }
 351
 352        err = __ixgbe_enable_sriov(adapter, num_vfs);
 353        if (err)
 354                return  err;
 355
 356        for (i = 0; i < num_vfs; i++)
 357                ixgbe_vf_configuration(dev, (i | 0x10000000));
 358
 359        /* reset before enabling SRIOV to avoid mailbox issues */
 360        ixgbe_sriov_reinit(adapter);
 361
 362        err = pci_enable_sriov(dev, num_vfs);
 363        if (err) {
 364                e_dev_warn("Failed to enable PCI sriov: %d\n", err);
 365                return err;
 366        }
 367        ixgbe_get_vfs(adapter);
 368
 369        return num_vfs;
 370#else
 371        return 0;
 372#endif
 373}
 374
 375static int ixgbe_pci_sriov_disable(struct pci_dev *dev)
 376{
 377        struct ixgbe_adapter *adapter = pci_get_drvdata(dev);
 378        int err;
 379#ifdef CONFIG_PCI_IOV
 380        u32 current_flags = adapter->flags;
 381#endif
 382
 383        err = ixgbe_disable_sriov(adapter);
 384
 385        /* Only reinit if no error and state changed */
 386#ifdef CONFIG_PCI_IOV
 387        if (!err && current_flags != adapter->flags)
 388                ixgbe_sriov_reinit(adapter);
 389#endif
 390
 391        return err;
 392}
 393
 394int ixgbe_pci_sriov_configure(struct pci_dev *dev, int num_vfs)
 395{
 396        if (num_vfs == 0)
 397                return ixgbe_pci_sriov_disable(dev);
 398        else
 399                return ixgbe_pci_sriov_enable(dev, num_vfs);
 400}
 401
 402static int ixgbe_set_vf_multicasts(struct ixgbe_adapter *adapter,
 403                                   u32 *msgbuf, u32 vf)
 404{
 405        int entries = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK)
 406                       >> IXGBE_VT_MSGINFO_SHIFT;
 407        u16 *hash_list = (u16 *)&msgbuf[1];
 408        struct vf_data_storage *vfinfo = &adapter->vfinfo[vf];
 409        struct ixgbe_hw *hw = &adapter->hw;
 410        int i;
 411        u32 vector_bit;
 412        u32 vector_reg;
 413        u32 mta_reg;
 414        u32 vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf));
 415
 416        /* only so many hash values supported */
 417        entries = min(entries, IXGBE_MAX_VF_MC_ENTRIES);
 418
 419        /*
 420         * salt away the number of multi cast addresses assigned
 421         * to this VF for later use to restore when the PF multi cast
 422         * list changes
 423         */
 424        vfinfo->num_vf_mc_hashes = entries;
 425
 426        /*
 427         * VFs are limited to using the MTA hash table for their multicast
 428         * addresses
 429         */
 430        for (i = 0; i < entries; i++) {
 431                vfinfo->vf_mc_hashes[i] = hash_list[i];
 432        }
 433
 434        for (i = 0; i < vfinfo->num_vf_mc_hashes; i++) {
 435                vector_reg = (vfinfo->vf_mc_hashes[i] >> 5) & 0x7F;
 436                vector_bit = vfinfo->vf_mc_hashes[i] & 0x1F;
 437                mta_reg = IXGBE_READ_REG(hw, IXGBE_MTA(vector_reg));
 438                mta_reg |= BIT(vector_bit);
 439                IXGBE_WRITE_REG(hw, IXGBE_MTA(vector_reg), mta_reg);
 440        }
 441        vmolr |= IXGBE_VMOLR_ROMPE;
 442        IXGBE_WRITE_REG(hw, IXGBE_VMOLR(vf), vmolr);
 443
 444        return 0;
 445}
 446
 447#ifdef CONFIG_PCI_IOV
 448void ixgbe_restore_vf_multicasts(struct ixgbe_adapter *adapter)
 449{
 450        struct ixgbe_hw *hw = &adapter->hw;
 451        struct vf_data_storage *vfinfo;
 452        int i, j;
 453        u32 vector_bit;
 454        u32 vector_reg;
 455        u32 mta_reg;
 456
 457        for (i = 0; i < adapter->num_vfs; i++) {
 458                u32 vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(i));
 459                vfinfo = &adapter->vfinfo[i];
 460                for (j = 0; j < vfinfo->num_vf_mc_hashes; j++) {
 461                        hw->addr_ctrl.mta_in_use++;
 462                        vector_reg = (vfinfo->vf_mc_hashes[j] >> 5) & 0x7F;
 463                        vector_bit = vfinfo->vf_mc_hashes[j] & 0x1F;
 464                        mta_reg = IXGBE_READ_REG(hw, IXGBE_MTA(vector_reg));
 465                        mta_reg |= BIT(vector_bit);
 466                        IXGBE_WRITE_REG(hw, IXGBE_MTA(vector_reg), mta_reg);
 467                }
 468
 469                if (vfinfo->num_vf_mc_hashes)
 470                        vmolr |= IXGBE_VMOLR_ROMPE;
 471                else
 472                        vmolr &= ~IXGBE_VMOLR_ROMPE;
 473                IXGBE_WRITE_REG(hw, IXGBE_VMOLR(i), vmolr);
 474        }
 475
 476        /* Restore any VF macvlans */
 477        ixgbe_full_sync_mac_table(adapter);
 478}
 479#endif
 480
 481static int ixgbe_set_vf_vlan(struct ixgbe_adapter *adapter, int add, int vid,
 482                             u32 vf)
 483{
 484        struct ixgbe_hw *hw = &adapter->hw;
 485        int err;
 486
 487        /* If VLAN overlaps with one the PF is currently monitoring make
 488         * sure that we are able to allocate a VLVF entry.  This may be
 489         * redundant but it guarantees PF will maintain visibility to
 490         * the VLAN.
 491         */
 492        if (add && test_bit(vid, adapter->active_vlans)) {
 493                err = hw->mac.ops.set_vfta(hw, vid, VMDQ_P(0), true, false);
 494                if (err)
 495                        return err;
 496        }
 497
 498        err = hw->mac.ops.set_vfta(hw, vid, vf, !!add, false);
 499
 500        if (add && !err)
 501                return err;
 502
 503        /* If we failed to add the VF VLAN or we are removing the VF VLAN
 504         * we may need to drop the PF pool bit in order to allow us to free
 505         * up the VLVF resources.
 506         */
 507        if (test_bit(vid, adapter->active_vlans) ||
 508            (adapter->flags2 & IXGBE_FLAG2_VLAN_PROMISC))
 509                ixgbe_update_pf_promisc_vlvf(adapter, vid);
 510
 511        return err;
 512}
 513
 514static s32 ixgbe_set_vf_lpe(struct ixgbe_adapter *adapter, u32 *msgbuf, u32 vf)
 515{
 516        struct ixgbe_hw *hw = &adapter->hw;
 517        int max_frame = msgbuf[1];
 518        u32 max_frs;
 519
 520        /*
 521         * For 82599EB we have to keep all PFs and VFs operating with
 522         * the same max_frame value in order to avoid sending an oversize
 523         * frame to a VF.  In order to guarantee this is handled correctly
 524         * for all cases we have several special exceptions to take into
 525         * account before we can enable the VF for receive
 526         */
 527        if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
 528                struct net_device *dev = adapter->netdev;
 529                int pf_max_frame = dev->mtu + ETH_HLEN;
 530                u32 reg_offset, vf_shift, vfre;
 531                s32 err = 0;
 532
 533#ifdef CONFIG_FCOE
 534                if (dev->features & NETIF_F_FCOE_MTU)
 535                        pf_max_frame = max_t(int, pf_max_frame,
 536                                             IXGBE_FCOE_JUMBO_FRAME_SIZE);
 537
 538#endif /* CONFIG_FCOE */
 539                switch (adapter->vfinfo[vf].vf_api) {
 540                case ixgbe_mbox_api_11:
 541                case ixgbe_mbox_api_12:
 542                case ixgbe_mbox_api_13:
 543                        /* Version 1.1 supports jumbo frames on VFs if PF has
 544                         * jumbo frames enabled which means legacy VFs are
 545                         * disabled
 546                         */
 547                        if (pf_max_frame > ETH_FRAME_LEN)
 548                                break;
 549                        /* fall through */
 550                default:
 551                        /* If the PF or VF are running w/ jumbo frames enabled
 552                         * we need to shut down the VF Rx path as we cannot
 553                         * support jumbo frames on legacy VFs
 554                         */
 555                        if ((pf_max_frame > ETH_FRAME_LEN) ||
 556                            (max_frame > (ETH_FRAME_LEN + ETH_FCS_LEN)))
 557                                err = -EINVAL;
 558                        break;
 559                }
 560
 561                /* determine VF receive enable location */
 562                vf_shift = vf % 32;
 563                reg_offset = vf / 32;
 564
 565                /* enable or disable receive depending on error */
 566                vfre = IXGBE_READ_REG(hw, IXGBE_VFRE(reg_offset));
 567                if (err)
 568                        vfre &= ~BIT(vf_shift);
 569                else
 570                        vfre |= BIT(vf_shift);
 571                IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), vfre);
 572
 573                if (err) {
 574                        e_err(drv, "VF max_frame %d out of range\n", max_frame);
 575                        return err;
 576                }
 577        }
 578
 579        /* MTU < 68 is an error and causes problems on some kernels */
 580        if (max_frame > IXGBE_MAX_JUMBO_FRAME_SIZE) {
 581                e_err(drv, "VF max_frame %d out of range\n", max_frame);
 582                return -EINVAL;
 583        }
 584
 585        /* pull current max frame size from hardware */
 586        max_frs = IXGBE_READ_REG(hw, IXGBE_MAXFRS);
 587        max_frs &= IXGBE_MHADD_MFS_MASK;
 588        max_frs >>= IXGBE_MHADD_MFS_SHIFT;
 589
 590        if (max_frs < max_frame) {
 591                max_frs = max_frame << IXGBE_MHADD_MFS_SHIFT;
 592                IXGBE_WRITE_REG(hw, IXGBE_MAXFRS, max_frs);
 593        }
 594
 595        e_info(hw, "VF requests change max MTU to %d\n", max_frame);
 596
 597        return 0;
 598}
 599
 600static void ixgbe_set_vmolr(struct ixgbe_hw *hw, u32 vf, bool aupe)
 601{
 602        u32 vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf));
 603        vmolr |= IXGBE_VMOLR_BAM;
 604        if (aupe)
 605                vmolr |= IXGBE_VMOLR_AUPE;
 606        else
 607                vmolr &= ~IXGBE_VMOLR_AUPE;
 608        IXGBE_WRITE_REG(hw, IXGBE_VMOLR(vf), vmolr);
 609}
 610
 611static void ixgbe_clear_vmvir(struct ixgbe_adapter *adapter, u32 vf)
 612{
 613        struct ixgbe_hw *hw = &adapter->hw;
 614
 615        IXGBE_WRITE_REG(hw, IXGBE_VMVIR(vf), 0);
 616}
 617
 618static void ixgbe_clear_vf_vlans(struct ixgbe_adapter *adapter, u32 vf)
 619{
 620        struct ixgbe_hw *hw = &adapter->hw;
 621        u32 vlvfb_mask, pool_mask, i;
 622
 623        /* create mask for VF and other pools */
 624        pool_mask = ~BIT(VMDQ_P(0) % 32);
 625        vlvfb_mask = BIT(vf % 32);
 626
 627        /* post increment loop, covers VLVF_ENTRIES - 1 to 0 */
 628        for (i = IXGBE_VLVF_ENTRIES; i--;) {
 629                u32 bits[2], vlvfb, vid, vfta, vlvf;
 630                u32 word = i * 2 + vf / 32;
 631                u32 mask;
 632
 633                vlvfb = IXGBE_READ_REG(hw, IXGBE_VLVFB(word));
 634
 635                /* if our bit isn't set we can skip it */
 636                if (!(vlvfb & vlvfb_mask))
 637                        continue;
 638
 639                /* clear our bit from vlvfb */
 640                vlvfb ^= vlvfb_mask;
 641
 642                /* create 64b mask to chedk to see if we should clear VLVF */
 643                bits[word % 2] = vlvfb;
 644                bits[~word % 2] = IXGBE_READ_REG(hw, IXGBE_VLVFB(word ^ 1));
 645
 646                /* if other pools are present, just remove ourselves */
 647                if (bits[(VMDQ_P(0) / 32) ^ 1] ||
 648                    (bits[VMDQ_P(0) / 32] & pool_mask))
 649                        goto update_vlvfb;
 650
 651                /* if PF is present, leave VFTA */
 652                if (bits[0] || bits[1])
 653                        goto update_vlvf;
 654
 655                /* if we cannot determine VLAN just remove ourselves */
 656                vlvf = IXGBE_READ_REG(hw, IXGBE_VLVF(i));
 657                if (!vlvf)
 658                        goto update_vlvfb;
 659
 660                vid = vlvf & VLAN_VID_MASK;
 661                mask = BIT(vid % 32);
 662
 663                /* clear bit from VFTA */
 664                vfta = IXGBE_READ_REG(hw, IXGBE_VFTA(vid / 32));
 665                if (vfta & mask)
 666                        IXGBE_WRITE_REG(hw, IXGBE_VFTA(vid / 32), vfta ^ mask);
 667update_vlvf:
 668                /* clear POOL selection enable */
 669                IXGBE_WRITE_REG(hw, IXGBE_VLVF(i), 0);
 670
 671                if (!(adapter->flags2 & IXGBE_FLAG2_VLAN_PROMISC))
 672                        vlvfb = 0;
 673update_vlvfb:
 674                /* clear pool bits */
 675                IXGBE_WRITE_REG(hw, IXGBE_VLVFB(word), vlvfb);
 676        }
 677}
 678
 679static int ixgbe_set_vf_macvlan(struct ixgbe_adapter *adapter,
 680                                int vf, int index, unsigned char *mac_addr)
 681{
 682        struct vf_macvlans *entry;
 683        struct list_head *pos;
 684        int retval = 0;
 685
 686        if (index <= 1) {
 687                list_for_each(pos, &adapter->vf_mvs.l) {
 688                        entry = list_entry(pos, struct vf_macvlans, l);
 689                        if (entry->vf == vf) {
 690                                entry->vf = -1;
 691                                entry->free = true;
 692                                entry->is_macvlan = false;
 693                                ixgbe_del_mac_filter(adapter,
 694                                                     entry->vf_macvlan, vf);
 695                        }
 696                }
 697        }
 698
 699        /*
 700         * If index was zero then we were asked to clear the uc list
 701         * for the VF.  We're done.
 702         */
 703        if (!index)
 704                return 0;
 705
 706        entry = NULL;
 707
 708        list_for_each(pos, &adapter->vf_mvs.l) {
 709                entry = list_entry(pos, struct vf_macvlans, l);
 710                if (entry->free)
 711                        break;
 712        }
 713
 714        /*
 715         * If we traversed the entire list and didn't find a free entry
 716         * then we're out of space on the RAR table.  Also entry may
 717         * be NULL because the original memory allocation for the list
 718         * failed, which is not fatal but does mean we can't support
 719         * VF requests for MACVLAN because we couldn't allocate
 720         * memory for the list management required.
 721         */
 722        if (!entry || !entry->free)
 723                return -ENOSPC;
 724
 725        retval = ixgbe_add_mac_filter(adapter, mac_addr, vf);
 726        if (retval < 0)
 727                return retval;
 728
 729        entry->free = false;
 730        entry->is_macvlan = true;
 731        entry->vf = vf;
 732        memcpy(entry->vf_macvlan, mac_addr, ETH_ALEN);
 733
 734        return 0;
 735}
 736
 737static inline void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf)
 738{
 739        struct ixgbe_hw *hw = &adapter->hw;
 740        struct vf_data_storage *vfinfo = &adapter->vfinfo[vf];
 741        u8 num_tcs = netdev_get_num_tc(adapter->netdev);
 742
 743        /* remove VLAN filters beloning to this VF */
 744        ixgbe_clear_vf_vlans(adapter, vf);
 745
 746        /* add back PF assigned VLAN or VLAN 0 */
 747        ixgbe_set_vf_vlan(adapter, true, vfinfo->pf_vlan, vf);
 748
 749        /* reset offloads to defaults */
 750        ixgbe_set_vmolr(hw, vf, !vfinfo->pf_vlan);
 751
 752        /* set outgoing tags for VFs */
 753        if (!vfinfo->pf_vlan && !vfinfo->pf_qos && !num_tcs) {
 754                ixgbe_clear_vmvir(adapter, vf);
 755        } else {
 756                if (vfinfo->pf_qos || !num_tcs)
 757                        ixgbe_set_vmvir(adapter, vfinfo->pf_vlan,
 758                                        vfinfo->pf_qos, vf);
 759                else
 760                        ixgbe_set_vmvir(adapter, vfinfo->pf_vlan,
 761                                        adapter->default_up, vf);
 762
 763                if (vfinfo->spoofchk_enabled)
 764                        hw->mac.ops.set_vlan_anti_spoofing(hw, true, vf);
 765        }
 766
 767        /* reset multicast table array for vf */
 768        adapter->vfinfo[vf].num_vf_mc_hashes = 0;
 769
 770        /* Flush and reset the mta with the new values */
 771        ixgbe_set_rx_mode(adapter->netdev);
 772
 773        ixgbe_del_mac_filter(adapter, adapter->vfinfo[vf].vf_mac_addresses, vf);
 774        ixgbe_set_vf_macvlan(adapter, vf, 0, NULL);
 775
 776        /* reset VF api back to unknown */
 777        adapter->vfinfo[vf].vf_api = ixgbe_mbox_api_10;
 778}
 779
 780static int ixgbe_set_vf_mac(struct ixgbe_adapter *adapter,
 781                            int vf, unsigned char *mac_addr)
 782{
 783        s32 retval;
 784
 785        ixgbe_del_mac_filter(adapter, adapter->vfinfo[vf].vf_mac_addresses, vf);
 786        retval = ixgbe_add_mac_filter(adapter, mac_addr, vf);
 787        if (retval >= 0)
 788                memcpy(adapter->vfinfo[vf].vf_mac_addresses, mac_addr,
 789                       ETH_ALEN);
 790        else
 791                memset(adapter->vfinfo[vf].vf_mac_addresses, 0, ETH_ALEN);
 792
 793        return retval;
 794}
 795
 796int ixgbe_vf_configuration(struct pci_dev *pdev, unsigned int event_mask)
 797{
 798        struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
 799        unsigned int vfn = (event_mask & 0x3f);
 800
 801        bool enable = ((event_mask & 0x10000000U) != 0);
 802
 803        if (enable)
 804                eth_zero_addr(adapter->vfinfo[vfn].vf_mac_addresses);
 805
 806        return 0;
 807}
 808
 809static inline void ixgbe_write_qde(struct ixgbe_adapter *adapter, u32 vf,
 810                                   u32 qde)
 811{
 812        struct ixgbe_hw *hw = &adapter->hw;
 813        struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ];
 814        u32 q_per_pool = __ALIGN_MASK(1, ~vmdq->mask);
 815        int i;
 816
 817        for (i = vf * q_per_pool; i < ((vf + 1) * q_per_pool); i++) {
 818                u32 reg;
 819
 820                /* flush previous write */
 821                IXGBE_WRITE_FLUSH(hw);
 822
 823                /* indicate to hardware that we want to set drop enable */
 824                reg = IXGBE_QDE_WRITE | qde;
 825                reg |= i <<  IXGBE_QDE_IDX_SHIFT;
 826                IXGBE_WRITE_REG(hw, IXGBE_QDE, reg);
 827        }
 828}
 829
 830static int ixgbe_vf_reset_msg(struct ixgbe_adapter *adapter, u32 vf)
 831{
 832        struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ];
 833        struct ixgbe_hw *hw = &adapter->hw;
 834        unsigned char *vf_mac = adapter->vfinfo[vf].vf_mac_addresses;
 835        u32 reg, reg_offset, vf_shift;
 836        u32 msgbuf[4] = {0, 0, 0, 0};
 837        u8 *addr = (u8 *)(&msgbuf[1]);
 838        u32 q_per_pool = __ALIGN_MASK(1, ~vmdq->mask);
 839        int i;
 840
 841        e_info(probe, "VF Reset msg received from vf %d\n", vf);
 842
 843        /* reset the filters for the device */
 844        ixgbe_vf_reset_event(adapter, vf);
 845
 846        /* set vf mac address */
 847        if (!is_zero_ether_addr(vf_mac))
 848                ixgbe_set_vf_mac(adapter, vf, vf_mac);
 849
 850        vf_shift = vf % 32;
 851        reg_offset = vf / 32;
 852
 853        /* enable transmit for vf */
 854        reg = IXGBE_READ_REG(hw, IXGBE_VFTE(reg_offset));
 855        reg |= BIT(vf_shift);
 856        IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), reg);
 857
 858        /* force drop enable for all VF Rx queues */
 859        ixgbe_write_qde(adapter, vf, IXGBE_QDE_ENABLE);
 860
 861        /* enable receive for vf */
 862        reg = IXGBE_READ_REG(hw, IXGBE_VFRE(reg_offset));
 863        reg |= BIT(vf_shift);
 864        /*
 865         * The 82599 cannot support a mix of jumbo and non-jumbo PF/VFs.
 866         * For more info take a look at ixgbe_set_vf_lpe
 867         */
 868        if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
 869                struct net_device *dev = adapter->netdev;
 870                int pf_max_frame = dev->mtu + ETH_HLEN;
 871
 872#ifdef CONFIG_FCOE
 873                if (dev->features & NETIF_F_FCOE_MTU)
 874                        pf_max_frame = max_t(int, pf_max_frame,
 875                                             IXGBE_FCOE_JUMBO_FRAME_SIZE);
 876
 877#endif /* CONFIG_FCOE */
 878                if (pf_max_frame > ETH_FRAME_LEN)
 879                        reg &= ~BIT(vf_shift);
 880        }
 881        IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), reg);
 882
 883        /* enable VF mailbox for further messages */
 884        adapter->vfinfo[vf].clear_to_send = true;
 885
 886        /* Enable counting of spoofed packets in the SSVPC register */
 887        reg = IXGBE_READ_REG(hw, IXGBE_VMECM(reg_offset));
 888        reg |= BIT(vf_shift);
 889        IXGBE_WRITE_REG(hw, IXGBE_VMECM(reg_offset), reg);
 890
 891        /*
 892         * Reset the VFs TDWBAL and TDWBAH registers
 893         * which are not cleared by an FLR
 894         */
 895        for (i = 0; i < q_per_pool; i++) {
 896                IXGBE_WRITE_REG(hw, IXGBE_PVFTDWBAHn(q_per_pool, vf, i), 0);
 897                IXGBE_WRITE_REG(hw, IXGBE_PVFTDWBALn(q_per_pool, vf, i), 0);
 898        }
 899
 900        /* reply to reset with ack and vf mac address */
 901        msgbuf[0] = IXGBE_VF_RESET;
 902        if (!is_zero_ether_addr(vf_mac)) {
 903                msgbuf[0] |= IXGBE_VT_MSGTYPE_ACK;
 904                memcpy(addr, vf_mac, ETH_ALEN);
 905        } else {
 906                msgbuf[0] |= IXGBE_VT_MSGTYPE_NACK;
 907                dev_warn(&adapter->pdev->dev,
 908                         "VF %d has no MAC address assigned, you may have to assign one manually\n",
 909                         vf);
 910        }
 911
 912        /*
 913         * Piggyback the multicast filter type so VF can compute the
 914         * correct vectors
 915         */
 916        msgbuf[3] = hw->mac.mc_filter_type;
 917        ixgbe_write_mbx(hw, msgbuf, IXGBE_VF_PERMADDR_MSG_LEN, vf);
 918
 919        return 0;
 920}
 921
 922static int ixgbe_set_vf_mac_addr(struct ixgbe_adapter *adapter,
 923                                 u32 *msgbuf, u32 vf)
 924{
 925        u8 *new_mac = ((u8 *)(&msgbuf[1]));
 926
 927        if (!is_valid_ether_addr(new_mac)) {
 928                e_warn(drv, "VF %d attempted to set invalid mac\n", vf);
 929                return -1;
 930        }
 931
 932        if (adapter->vfinfo[vf].pf_set_mac && !adapter->vfinfo[vf].trusted &&
 933            !ether_addr_equal(adapter->vfinfo[vf].vf_mac_addresses, new_mac)) {
 934                e_warn(drv,
 935                       "VF %d attempted to override administratively set MAC address\n"
 936                       "Reload the VF driver to resume operations\n",
 937                       vf);
 938                return -1;
 939        }
 940
 941        return ixgbe_set_vf_mac(adapter, vf, new_mac) < 0;
 942}
 943
 944static int ixgbe_set_vf_vlan_msg(struct ixgbe_adapter *adapter,
 945                                 u32 *msgbuf, u32 vf)
 946{
 947        u32 add = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK) >> IXGBE_VT_MSGINFO_SHIFT;
 948        u32 vid = (msgbuf[1] & IXGBE_VLVF_VLANID_MASK);
 949        u8 tcs = netdev_get_num_tc(adapter->netdev);
 950
 951        if (adapter->vfinfo[vf].pf_vlan || tcs) {
 952                e_warn(drv,
 953                       "VF %d attempted to override administratively set VLAN configuration\n"
 954                       "Reload the VF driver to resume operations\n",
 955                       vf);
 956                return -1;
 957        }
 958
 959        /* VLAN 0 is a special case, don't allow it to be removed */
 960        if (!vid && !add)
 961                return 0;
 962
 963        return ixgbe_set_vf_vlan(adapter, add, vid, vf);
 964}
 965
 966static int ixgbe_set_vf_macvlan_msg(struct ixgbe_adapter *adapter,
 967                                    u32 *msgbuf, u32 vf)
 968{
 969        u8 *new_mac = ((u8 *)(&msgbuf[1]));
 970        int index = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK) >>
 971                    IXGBE_VT_MSGINFO_SHIFT;
 972        int err;
 973
 974        if (adapter->vfinfo[vf].pf_set_mac && !adapter->vfinfo[vf].trusted &&
 975            index > 0) {
 976                e_warn(drv,
 977                       "VF %d requested MACVLAN filter but is administratively denied\n",
 978                       vf);
 979                return -1;
 980        }
 981
 982        /* An non-zero index indicates the VF is setting a filter */
 983        if (index) {
 984                if (!is_valid_ether_addr(new_mac)) {
 985                        e_warn(drv, "VF %d attempted to set invalid mac\n", vf);
 986                        return -1;
 987                }
 988
 989                /*
 990                 * If the VF is allowed to set MAC filters then turn off
 991                 * anti-spoofing to avoid false positives.
 992                 */
 993                if (adapter->vfinfo[vf].spoofchk_enabled) {
 994                        struct ixgbe_hw *hw = &adapter->hw;
 995
 996                        hw->mac.ops.set_mac_anti_spoofing(hw, false, vf);
 997                        hw->mac.ops.set_vlan_anti_spoofing(hw, false, vf);
 998                }
 999        }
1000
1001        err = ixgbe_set_vf_macvlan(adapter, vf, index, new_mac);
1002        if (err == -ENOSPC)
1003                e_warn(drv,
1004                       "VF %d has requested a MACVLAN filter but there is no space for it\n",
1005                       vf);
1006
1007        return err < 0;
1008}
1009
1010static int ixgbe_negotiate_vf_api(struct ixgbe_adapter *adapter,
1011                                  u32 *msgbuf, u32 vf)
1012{
1013        int api = msgbuf[1];
1014
1015        switch (api) {
1016        case ixgbe_mbox_api_10:
1017        case ixgbe_mbox_api_11:
1018        case ixgbe_mbox_api_12:
1019        case ixgbe_mbox_api_13:
1020                adapter->vfinfo[vf].vf_api = api;
1021                return 0;
1022        default:
1023                break;
1024        }
1025
1026        e_info(drv, "VF %d requested invalid api version %u\n", vf, api);
1027
1028        return -1;
1029}
1030
1031static int ixgbe_get_vf_queues(struct ixgbe_adapter *adapter,
1032                               u32 *msgbuf, u32 vf)
1033{
1034        struct net_device *dev = adapter->netdev;
1035        struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ];
1036        unsigned int default_tc = 0;
1037        u8 num_tcs = netdev_get_num_tc(dev);
1038
1039        /* verify the PF is supporting the correct APIs */
1040        switch (adapter->vfinfo[vf].vf_api) {
1041        case ixgbe_mbox_api_20:
1042        case ixgbe_mbox_api_11:
1043        case ixgbe_mbox_api_12:
1044        case ixgbe_mbox_api_13:
1045                break;
1046        default:
1047                return -1;
1048        }
1049
1050        /* only allow 1 Tx queue for bandwidth limiting */
1051        msgbuf[IXGBE_VF_TX_QUEUES] = __ALIGN_MASK(1, ~vmdq->mask);
1052        msgbuf[IXGBE_VF_RX_QUEUES] = __ALIGN_MASK(1, ~vmdq->mask);
1053
1054        /* if TCs > 1 determine which TC belongs to default user priority */
1055        if (num_tcs > 1)
1056                default_tc = netdev_get_prio_tc_map(dev, adapter->default_up);
1057
1058        /* notify VF of need for VLAN tag stripping, and correct queue */
1059        if (num_tcs)
1060                msgbuf[IXGBE_VF_TRANS_VLAN] = num_tcs;
1061        else if (adapter->vfinfo[vf].pf_vlan || adapter->vfinfo[vf].pf_qos)
1062                msgbuf[IXGBE_VF_TRANS_VLAN] = 1;
1063        else
1064                msgbuf[IXGBE_VF_TRANS_VLAN] = 0;
1065
1066        /* notify VF of default queue */
1067        msgbuf[IXGBE_VF_DEF_QUEUE] = default_tc;
1068
1069        return 0;
1070}
1071
1072static int ixgbe_get_vf_reta(struct ixgbe_adapter *adapter, u32 *msgbuf, u32 vf)
1073{
1074        u32 i, j;
1075        u32 *out_buf = &msgbuf[1];
1076        const u8 *reta = adapter->rss_indir_tbl;
1077        u32 reta_size = ixgbe_rss_indir_tbl_entries(adapter);
1078
1079        /* Check if operation is permitted */
1080        if (!adapter->vfinfo[vf].rss_query_enabled)
1081                return -EPERM;
1082
1083        /* verify the PF is supporting the correct API */
1084        switch (adapter->vfinfo[vf].vf_api) {
1085        case ixgbe_mbox_api_13:
1086        case ixgbe_mbox_api_12:
1087                break;
1088        default:
1089                return -EOPNOTSUPP;
1090        }
1091
1092        /* This mailbox command is supported (required) only for 82599 and x540
1093         * VFs which support up to 4 RSS queues. Therefore we will compress the
1094         * RETA by saving only 2 bits from each entry. This way we will be able
1095         * to transfer the whole RETA in a single mailbox operation.
1096         */
1097        for (i = 0; i < reta_size / 16; i++) {
1098                out_buf[i] = 0;
1099                for (j = 0; j < 16; j++)
1100                        out_buf[i] |= (u32)(reta[16 * i + j] & 0x3) << (2 * j);
1101        }
1102
1103        return 0;
1104}
1105
1106static int ixgbe_get_vf_rss_key(struct ixgbe_adapter *adapter,
1107                                u32 *msgbuf, u32 vf)
1108{
1109        u32 *rss_key = &msgbuf[1];
1110
1111        /* Check if the operation is permitted */
1112        if (!adapter->vfinfo[vf].rss_query_enabled)
1113                return -EPERM;
1114
1115        /* verify the PF is supporting the correct API */
1116        switch (adapter->vfinfo[vf].vf_api) {
1117        case ixgbe_mbox_api_13:
1118        case ixgbe_mbox_api_12:
1119                break;
1120        default:
1121                return -EOPNOTSUPP;
1122        }
1123
1124        memcpy(rss_key, adapter->rss_key, IXGBE_RSS_KEY_SIZE);
1125
1126        return 0;
1127}
1128
1129static int ixgbe_update_vf_xcast_mode(struct ixgbe_adapter *adapter,
1130                                      u32 *msgbuf, u32 vf)
1131{
1132        struct ixgbe_hw *hw = &adapter->hw;
1133        int xcast_mode = msgbuf[1];
1134        u32 vmolr, fctrl, disable, enable;
1135
1136        /* verify the PF is supporting the correct APIs */
1137        switch (adapter->vfinfo[vf].vf_api) {
1138        case ixgbe_mbox_api_12:
1139                /* promisc introduced in 1.3 version */
1140                if (xcast_mode == IXGBEVF_XCAST_MODE_PROMISC)
1141                        return -EOPNOTSUPP;
1142                /* Fall threw */
1143        case ixgbe_mbox_api_13:
1144                break;
1145        default:
1146                return -EOPNOTSUPP;
1147        }
1148
1149        if (xcast_mode > IXGBEVF_XCAST_MODE_MULTI &&
1150            !adapter->vfinfo[vf].trusted) {
1151                xcast_mode = IXGBEVF_XCAST_MODE_MULTI;
1152        }
1153
1154        if (adapter->vfinfo[vf].xcast_mode == xcast_mode)
1155                goto out;
1156
1157        switch (xcast_mode) {
1158        case IXGBEVF_XCAST_MODE_NONE:
1159                disable = IXGBE_VMOLR_BAM | IXGBE_VMOLR_ROMPE |
1160                          IXGBE_VMOLR_MPE | IXGBE_VMOLR_UPE | IXGBE_VMOLR_VPE;
1161                enable = 0;
1162                break;
1163        case IXGBEVF_XCAST_MODE_MULTI:
1164                disable = IXGBE_VMOLR_MPE | IXGBE_VMOLR_UPE | IXGBE_VMOLR_VPE;
1165                enable = IXGBE_VMOLR_BAM | IXGBE_VMOLR_ROMPE;
1166                break;
1167        case IXGBEVF_XCAST_MODE_ALLMULTI:
1168                disable = IXGBE_VMOLR_UPE | IXGBE_VMOLR_VPE;
1169                enable = IXGBE_VMOLR_BAM | IXGBE_VMOLR_ROMPE | IXGBE_VMOLR_MPE;
1170                break;
1171        case IXGBEVF_XCAST_MODE_PROMISC:
1172                if (hw->mac.type <= ixgbe_mac_82599EB)
1173                        return -EOPNOTSUPP;
1174
1175                fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
1176                if (!(fctrl & IXGBE_FCTRL_UPE)) {
1177                        /* VF promisc requires PF in promisc */
1178                        e_warn(drv,
1179                               "Enabling VF promisc requires PF in promisc\n");
1180                        return -EPERM;
1181                }
1182
1183                disable = 0;
1184                enable = IXGBE_VMOLR_BAM | IXGBE_VMOLR_ROMPE |
1185                         IXGBE_VMOLR_MPE | IXGBE_VMOLR_UPE | IXGBE_VMOLR_VPE;
1186                break;
1187        default:
1188                return -EOPNOTSUPP;
1189        }
1190
1191        vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf));
1192        vmolr &= ~disable;
1193        vmolr |= enable;
1194        IXGBE_WRITE_REG(hw, IXGBE_VMOLR(vf), vmolr);
1195
1196        adapter->vfinfo[vf].xcast_mode = xcast_mode;
1197
1198out:
1199        msgbuf[1] = xcast_mode;
1200
1201        return 0;
1202}
1203
1204static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf)
1205{
1206        u32 mbx_size = IXGBE_VFMAILBOX_SIZE;
1207        u32 msgbuf[IXGBE_VFMAILBOX_SIZE];
1208        struct ixgbe_hw *hw = &adapter->hw;
1209        s32 retval;
1210
1211        retval = ixgbe_read_mbx(hw, msgbuf, mbx_size, vf);
1212
1213        if (retval) {
1214                pr_err("Error receiving message from VF\n");
1215                return retval;
1216        }
1217
1218        /* this is a message we already processed, do nothing */
1219        if (msgbuf[0] & (IXGBE_VT_MSGTYPE_ACK | IXGBE_VT_MSGTYPE_NACK))
1220                return 0;
1221
1222        /* flush the ack before we write any messages back */
1223        IXGBE_WRITE_FLUSH(hw);
1224
1225        if (msgbuf[0] == IXGBE_VF_RESET)
1226                return ixgbe_vf_reset_msg(adapter, vf);
1227
1228        /*
1229         * until the vf completes a virtual function reset it should not be
1230         * allowed to start any configuration.
1231         */
1232        if (!adapter->vfinfo[vf].clear_to_send) {
1233                msgbuf[0] |= IXGBE_VT_MSGTYPE_NACK;
1234                ixgbe_write_mbx(hw, msgbuf, 1, vf);
1235                return 0;
1236        }
1237
1238        switch ((msgbuf[0] & 0xFFFF)) {
1239        case IXGBE_VF_SET_MAC_ADDR:
1240                retval = ixgbe_set_vf_mac_addr(adapter, msgbuf, vf);
1241                break;
1242        case IXGBE_VF_SET_MULTICAST:
1243                retval = ixgbe_set_vf_multicasts(adapter, msgbuf, vf);
1244                break;
1245        case IXGBE_VF_SET_VLAN:
1246                retval = ixgbe_set_vf_vlan_msg(adapter, msgbuf, vf);
1247                break;
1248        case IXGBE_VF_SET_LPE:
1249                retval = ixgbe_set_vf_lpe(adapter, msgbuf, vf);
1250                break;
1251        case IXGBE_VF_SET_MACVLAN:
1252                retval = ixgbe_set_vf_macvlan_msg(adapter, msgbuf, vf);
1253                break;
1254        case IXGBE_VF_API_NEGOTIATE:
1255                retval = ixgbe_negotiate_vf_api(adapter, msgbuf, vf);
1256                break;
1257        case IXGBE_VF_GET_QUEUES:
1258                retval = ixgbe_get_vf_queues(adapter, msgbuf, vf);
1259                break;
1260        case IXGBE_VF_GET_RETA:
1261                retval = ixgbe_get_vf_reta(adapter, msgbuf, vf);
1262                break;
1263        case IXGBE_VF_GET_RSS_KEY:
1264                retval = ixgbe_get_vf_rss_key(adapter, msgbuf, vf);
1265                break;
1266        case IXGBE_VF_UPDATE_XCAST_MODE:
1267                retval = ixgbe_update_vf_xcast_mode(adapter, msgbuf, vf);
1268                break;
1269        default:
1270                e_err(drv, "Unhandled Msg %8.8x\n", msgbuf[0]);
1271                retval = IXGBE_ERR_MBX;
1272                break;
1273        }
1274
1275        /* notify the VF of the results of what it sent us */
1276        if (retval)
1277                msgbuf[0] |= IXGBE_VT_MSGTYPE_NACK;
1278        else
1279                msgbuf[0] |= IXGBE_VT_MSGTYPE_ACK;
1280
1281        msgbuf[0] |= IXGBE_VT_MSGTYPE_CTS;
1282
1283        ixgbe_write_mbx(hw, msgbuf, mbx_size, vf);
1284
1285        return retval;
1286}
1287
1288static void ixgbe_rcv_ack_from_vf(struct ixgbe_adapter *adapter, u32 vf)
1289{
1290        struct ixgbe_hw *hw = &adapter->hw;
1291        u32 msg = IXGBE_VT_MSGTYPE_NACK;
1292
1293        /* if device isn't clear to send it shouldn't be reading either */
1294        if (!adapter->vfinfo[vf].clear_to_send)
1295                ixgbe_write_mbx(hw, &msg, 1, vf);
1296}
1297
1298void ixgbe_msg_task(struct ixgbe_adapter *adapter)
1299{
1300        struct ixgbe_hw *hw = &adapter->hw;
1301        u32 vf;
1302
1303        for (vf = 0; vf < adapter->num_vfs; vf++) {
1304                /* process any reset requests */
1305                if (!ixgbe_check_for_rst(hw, vf))
1306                        ixgbe_vf_reset_event(adapter, vf);
1307
1308                /* process any messages pending */
1309                if (!ixgbe_check_for_msg(hw, vf))
1310                        ixgbe_rcv_msg_from_vf(adapter, vf);
1311
1312                /* process any acks */
1313                if (!ixgbe_check_for_ack(hw, vf))
1314                        ixgbe_rcv_ack_from_vf(adapter, vf);
1315        }
1316}
1317
1318void ixgbe_disable_tx_rx(struct ixgbe_adapter *adapter)
1319{
1320        struct ixgbe_hw *hw = &adapter->hw;
1321
1322        /* disable transmit and receive for all vfs */
1323        IXGBE_WRITE_REG(hw, IXGBE_VFTE(0), 0);
1324        IXGBE_WRITE_REG(hw, IXGBE_VFTE(1), 0);
1325
1326        IXGBE_WRITE_REG(hw, IXGBE_VFRE(0), 0);
1327        IXGBE_WRITE_REG(hw, IXGBE_VFRE(1), 0);
1328}
1329
1330static inline void ixgbe_ping_vf(struct ixgbe_adapter *adapter, int vf)
1331{
1332        struct ixgbe_hw *hw = &adapter->hw;
1333        u32 ping;
1334
1335        ping = IXGBE_PF_CONTROL_MSG;
1336        if (adapter->vfinfo[vf].clear_to_send)
1337                ping |= IXGBE_VT_MSGTYPE_CTS;
1338        ixgbe_write_mbx(hw, &ping, 1, vf);
1339}
1340
1341void ixgbe_ping_all_vfs(struct ixgbe_adapter *adapter)
1342{
1343        struct ixgbe_hw *hw = &adapter->hw;
1344        u32 ping;
1345        int i;
1346
1347        for (i = 0 ; i < adapter->num_vfs; i++) {
1348                ping = IXGBE_PF_CONTROL_MSG;
1349                if (adapter->vfinfo[i].clear_to_send)
1350                        ping |= IXGBE_VT_MSGTYPE_CTS;
1351                ixgbe_write_mbx(hw, &ping, 1, i);
1352        }
1353}
1354
1355int ixgbe_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1356{
1357        struct ixgbe_adapter *adapter = netdev_priv(netdev);
1358        s32 retval;
1359
1360        if (vf >= adapter->num_vfs)
1361                return -EINVAL;
1362
1363        if (is_valid_ether_addr(mac)) {
1364                dev_info(&adapter->pdev->dev, "setting MAC %pM on VF %d\n",
1365                         mac, vf);
1366                dev_info(&adapter->pdev->dev, "Reload the VF driver to make this change effective.");
1367
1368                retval = ixgbe_set_vf_mac(adapter, vf, mac);
1369                if (retval >= 0) {
1370                        adapter->vfinfo[vf].pf_set_mac = true;
1371
1372                        if (test_bit(__IXGBE_DOWN, &adapter->state)) {
1373                                dev_warn(&adapter->pdev->dev, "The VF MAC address has been set, but the PF device is not up.\n");
1374                                dev_warn(&adapter->pdev->dev, "Bring the PF device up before attempting to use the VF device.\n");
1375                        }
1376                } else {
1377                        dev_warn(&adapter->pdev->dev, "The VF MAC address was NOT set due to invalid or duplicate MAC address.\n");
1378                }
1379        } else if (is_zero_ether_addr(mac)) {
1380                unsigned char *vf_mac_addr =
1381                                           adapter->vfinfo[vf].vf_mac_addresses;
1382
1383                /* nothing to do */
1384                if (is_zero_ether_addr(vf_mac_addr))
1385                        return 0;
1386
1387                dev_info(&adapter->pdev->dev, "removing MAC on VF %d\n", vf);
1388
1389                retval = ixgbe_del_mac_filter(adapter, vf_mac_addr, vf);
1390                if (retval >= 0) {
1391                        adapter->vfinfo[vf].pf_set_mac = false;
1392                        memcpy(vf_mac_addr, mac, ETH_ALEN);
1393                } else {
1394                        dev_warn(&adapter->pdev->dev, "Could NOT remove the VF MAC address.\n");
1395                }
1396        } else {
1397                retval = -EINVAL;
1398        }
1399
1400        return retval;
1401}
1402
1403static int ixgbe_enable_port_vlan(struct ixgbe_adapter *adapter, int vf,
1404                                  u16 vlan, u8 qos)
1405{
1406        struct ixgbe_hw *hw = &adapter->hw;
1407        int err;
1408
1409        err = ixgbe_set_vf_vlan(adapter, true, vlan, vf);
1410        if (err)
1411                goto out;
1412
1413        /* Revoke tagless access via VLAN 0 */
1414        ixgbe_set_vf_vlan(adapter, false, 0, vf);
1415
1416        ixgbe_set_vmvir(adapter, vlan, qos, vf);
1417        ixgbe_set_vmolr(hw, vf, false);
1418
1419        /* enable hide vlan on X550 */
1420        if (hw->mac.type >= ixgbe_mac_X550)
1421                ixgbe_write_qde(adapter, vf, IXGBE_QDE_ENABLE |
1422                                IXGBE_QDE_HIDE_VLAN);
1423
1424        adapter->vfinfo[vf].pf_vlan = vlan;
1425        adapter->vfinfo[vf].pf_qos = qos;
1426        dev_info(&adapter->pdev->dev,
1427                 "Setting VLAN %d, QOS 0x%x on VF %d\n", vlan, qos, vf);
1428        if (test_bit(__IXGBE_DOWN, &adapter->state)) {
1429                dev_warn(&adapter->pdev->dev,
1430                         "The VF VLAN has been set, but the PF device is not up.\n");
1431                dev_warn(&adapter->pdev->dev,
1432                         "Bring the PF device up before attempting to use the VF device.\n");
1433        }
1434
1435out:
1436        return err;
1437}
1438
1439static int ixgbe_disable_port_vlan(struct ixgbe_adapter *adapter, int vf)
1440{
1441        struct ixgbe_hw *hw = &adapter->hw;
1442        int err;
1443
1444        err = ixgbe_set_vf_vlan(adapter, false,
1445                                adapter->vfinfo[vf].pf_vlan, vf);
1446        /* Restore tagless access via VLAN 0 */
1447        ixgbe_set_vf_vlan(adapter, true, 0, vf);
1448        ixgbe_clear_vmvir(adapter, vf);
1449        ixgbe_set_vmolr(hw, vf, true);
1450
1451        /* disable hide VLAN on X550 */
1452        if (hw->mac.type >= ixgbe_mac_X550)
1453                ixgbe_write_qde(adapter, vf, IXGBE_QDE_ENABLE);
1454
1455        adapter->vfinfo[vf].pf_vlan = 0;
1456        adapter->vfinfo[vf].pf_qos = 0;
1457
1458        return err;
1459}
1460
1461int ixgbe_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan,
1462                          u8 qos, __be16 vlan_proto)
1463{
1464        int err = 0;
1465        struct ixgbe_adapter *adapter = netdev_priv(netdev);
1466
1467        if ((vf >= adapter->num_vfs) || (vlan > 4095) || (qos > 7))
1468                return -EINVAL;
1469        if (vlan_proto != htons(ETH_P_8021Q))
1470                return -EPROTONOSUPPORT;
1471        if (vlan || qos) {
1472                /* Check if there is already a port VLAN set, if so
1473                 * we have to delete the old one first before we
1474                 * can set the new one.  The usage model had
1475                 * previously assumed the user would delete the
1476                 * old port VLAN before setting a new one but this
1477                 * is not necessarily the case.
1478                 */
1479                if (adapter->vfinfo[vf].pf_vlan)
1480                        err = ixgbe_disable_port_vlan(adapter, vf);
1481                if (err)
1482                        goto out;
1483                err = ixgbe_enable_port_vlan(adapter, vf, vlan, qos);
1484        } else {
1485                err = ixgbe_disable_port_vlan(adapter, vf);
1486        }
1487
1488out:
1489        return err;
1490}
1491
1492int ixgbe_link_mbps(struct ixgbe_adapter *adapter)
1493{
1494        switch (adapter->link_speed) {
1495        case IXGBE_LINK_SPEED_100_FULL:
1496                return 100;
1497        case IXGBE_LINK_SPEED_1GB_FULL:
1498                return 1000;
1499        case IXGBE_LINK_SPEED_10GB_FULL:
1500                return 10000;
1501        default:
1502                return 0;
1503        }
1504}
1505
1506static void ixgbe_set_vf_rate_limit(struct ixgbe_adapter *adapter, int vf)
1507{
1508        struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ];
1509        struct ixgbe_hw *hw = &adapter->hw;
1510        u32 bcnrc_val = 0;
1511        u16 queue, queues_per_pool;
1512        u16 tx_rate = adapter->vfinfo[vf].tx_rate;
1513
1514        if (tx_rate) {
1515                /* start with base link speed value */
1516                bcnrc_val = adapter->vf_rate_link_speed;
1517
1518                /* Calculate the rate factor values to set */
1519                bcnrc_val <<= IXGBE_RTTBCNRC_RF_INT_SHIFT;
1520                bcnrc_val /= tx_rate;
1521
1522                /* clear everything but the rate factor */
1523                bcnrc_val &= IXGBE_RTTBCNRC_RF_INT_MASK |
1524                             IXGBE_RTTBCNRC_RF_DEC_MASK;
1525
1526                /* enable the rate scheduler */
1527                bcnrc_val |= IXGBE_RTTBCNRC_RS_ENA;
1528        }
1529
1530        /*
1531         * Set global transmit compensation time to the MMW_SIZE in RTTBCNRM
1532         * register. Typically MMW_SIZE=0x014 if 9728-byte jumbo is supported
1533         * and 0x004 otherwise.
1534         */
1535        switch (hw->mac.type) {
1536        case ixgbe_mac_82599EB:
1537                IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRM, 0x4);
1538                break;
1539        case ixgbe_mac_X540:
1540                IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRM, 0x14);
1541                break;
1542        default:
1543                break;
1544        }
1545
1546        /* determine how many queues per pool based on VMDq mask */
1547        queues_per_pool = __ALIGN_MASK(1, ~vmdq->mask);
1548
1549        /* write value for all Tx queues belonging to VF */
1550        for (queue = 0; queue < queues_per_pool; queue++) {
1551                unsigned int reg_idx = (vf * queues_per_pool) + queue;
1552
1553                IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, reg_idx);
1554                IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRC, bcnrc_val);
1555        }
1556}
1557
1558void ixgbe_check_vf_rate_limit(struct ixgbe_adapter *adapter)
1559{
1560        int i;
1561
1562        /* VF Tx rate limit was not set */
1563        if (!adapter->vf_rate_link_speed)
1564                return;
1565
1566        if (ixgbe_link_mbps(adapter) != adapter->vf_rate_link_speed) {
1567                adapter->vf_rate_link_speed = 0;
1568                dev_info(&adapter->pdev->dev,
1569                         "Link speed has been changed. VF Transmit rate is disabled\n");
1570        }
1571
1572        for (i = 0; i < adapter->num_vfs; i++) {
1573                if (!adapter->vf_rate_link_speed)
1574                        adapter->vfinfo[i].tx_rate = 0;
1575
1576                ixgbe_set_vf_rate_limit(adapter, i);
1577        }
1578}
1579
1580int ixgbe_ndo_set_vf_bw(struct net_device *netdev, int vf, int min_tx_rate,
1581                        int max_tx_rate)
1582{
1583        struct ixgbe_adapter *adapter = netdev_priv(netdev);
1584        int link_speed;
1585
1586        /* verify VF is active */
1587        if (vf >= adapter->num_vfs)
1588                return -EINVAL;
1589
1590        /* verify link is up */
1591        if (!adapter->link_up)
1592                return -EINVAL;
1593
1594        /* verify we are linked at 10Gbps */
1595        link_speed = ixgbe_link_mbps(adapter);
1596        if (link_speed != 10000)
1597                return -EINVAL;
1598
1599        if (min_tx_rate)
1600                return -EINVAL;
1601
1602        /* rate limit cannot be less than 10Mbs or greater than link speed */
1603        if (max_tx_rate && ((max_tx_rate <= 10) || (max_tx_rate > link_speed)))
1604                return -EINVAL;
1605
1606        /* store values */
1607        adapter->vf_rate_link_speed = link_speed;
1608        adapter->vfinfo[vf].tx_rate = max_tx_rate;
1609
1610        /* update hardware configuration */
1611        ixgbe_set_vf_rate_limit(adapter, vf);
1612
1613        return 0;
1614}
1615
1616int ixgbe_ndo_set_vf_spoofchk(struct net_device *netdev, int vf, bool setting)
1617{
1618        struct ixgbe_adapter *adapter = netdev_priv(netdev);
1619        struct ixgbe_hw *hw = &adapter->hw;
1620
1621        if (vf >= adapter->num_vfs)
1622                return -EINVAL;
1623
1624        adapter->vfinfo[vf].spoofchk_enabled = setting;
1625
1626        /* configure MAC spoofing */
1627        hw->mac.ops.set_mac_anti_spoofing(hw, setting, vf);
1628
1629        /* configure VLAN spoofing */
1630        hw->mac.ops.set_vlan_anti_spoofing(hw, setting, vf);
1631
1632        /* Ensure LLDP and FC is set for Ethertype Antispoofing if we will be
1633         * calling set_ethertype_anti_spoofing for each VF in loop below
1634         */
1635        if (hw->mac.ops.set_ethertype_anti_spoofing) {
1636                IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_LLDP),
1637                                (IXGBE_ETQF_FILTER_EN    |
1638                                 IXGBE_ETQF_TX_ANTISPOOF |
1639                                 IXGBE_ETH_P_LLDP));
1640
1641                IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_FC),
1642                                (IXGBE_ETQF_FILTER_EN |
1643                                 IXGBE_ETQF_TX_ANTISPOOF |
1644                                 ETH_P_PAUSE));
1645
1646                hw->mac.ops.set_ethertype_anti_spoofing(hw, setting, vf);
1647        }
1648
1649        return 0;
1650}
1651
1652int ixgbe_ndo_set_vf_rss_query_en(struct net_device *netdev, int vf,
1653                                  bool setting)
1654{
1655        struct ixgbe_adapter *adapter = netdev_priv(netdev);
1656
1657        /* This operation is currently supported only for 82599 and x540
1658         * devices.
1659         */
1660        if (adapter->hw.mac.type < ixgbe_mac_82599EB ||
1661            adapter->hw.mac.type >= ixgbe_mac_X550)
1662                return -EOPNOTSUPP;
1663
1664        if (vf >= adapter->num_vfs)
1665                return -EINVAL;
1666
1667        adapter->vfinfo[vf].rss_query_enabled = setting;
1668
1669        return 0;
1670}
1671
1672int ixgbe_ndo_set_vf_trust(struct net_device *netdev, int vf, bool setting)
1673{
1674        struct ixgbe_adapter *adapter = netdev_priv(netdev);
1675
1676        if (vf >= adapter->num_vfs)
1677                return -EINVAL;
1678
1679        /* nothing to do */
1680        if (adapter->vfinfo[vf].trusted == setting)
1681                return 0;
1682
1683        adapter->vfinfo[vf].trusted = setting;
1684
1685        /* reset VF to reconfigure features */
1686        adapter->vfinfo[vf].clear_to_send = false;
1687        ixgbe_ping_vf(adapter, vf);
1688
1689        e_info(drv, "VF %u is %strusted\n", vf, setting ? "" : "not ");
1690
1691        return 0;
1692}
1693
1694int ixgbe_ndo_get_vf_config(struct net_device *netdev,
1695                            int vf, struct ifla_vf_info *ivi)
1696{
1697        struct ixgbe_adapter *adapter = netdev_priv(netdev);
1698        if (vf >= adapter->num_vfs)
1699                return -EINVAL;
1700        ivi->vf = vf;
1701        memcpy(&ivi->mac, adapter->vfinfo[vf].vf_mac_addresses, ETH_ALEN);
1702        ivi->max_tx_rate = adapter->vfinfo[vf].tx_rate;
1703        ivi->min_tx_rate = 0;
1704        ivi->vlan = adapter->vfinfo[vf].pf_vlan;
1705        ivi->qos = adapter->vfinfo[vf].pf_qos;
1706        ivi->spoofchk = adapter->vfinfo[vf].spoofchk_enabled;
1707        ivi->rss_query_en = adapter->vfinfo[vf].rss_query_enabled;
1708        ivi->trusted = adapter->vfinfo[vf].trusted;
1709        return 0;
1710}
1711