linux/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
<<
>>
Prefs
   1/*******************************************************************************
   2
   3  Intel 10 Gigabit PCI Express Linux driver
   4  Copyright(c) 1999 - 2014 Intel Corporation.
   5
   6  This program is free software; you can redistribute it and/or modify it
   7  under the terms and conditions of the GNU General Public License,
   8  version 2, as published by the Free Software Foundation.
   9
  10  This program is distributed in the hope it will be useful, but WITHOUT
  11  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  13  more details.
  14
  15  You should have received a copy of the GNU General Public License along with
  16  this program; if not, write to the Free Software Foundation, Inc.,
  17  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
  18
  19  The full GNU General Public License is included in this distribution in
  20  the file called "COPYING".
  21
  22  Contact Information:
  23  Linux NICS <linux.nics@intel.com>
  24  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
  25  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
  26
  27*******************************************************************************/
  28
  29#include <linux/types.h>
  30#include <linux/module.h>
  31#include <linux/pci.h>
  32#include <linux/netdevice.h>
  33#include <linux/vmalloc.h>
  34#include <linux/string.h>
  35#include <linux/in.h>
  36#include <linux/ip.h>
  37#include <linux/tcp.h>
  38#include <linux/ipv6.h>
  39#ifdef NETIF_F_HW_VLAN_CTAG_TX
  40#include <linux/if_vlan.h>
  41#endif
  42
  43#include "ixgbe.h"
  44#include "ixgbe_type.h"
  45#include "ixgbe_sriov.h"
  46
  47#ifdef CONFIG_PCI_IOV
  48static int __ixgbe_enable_sriov(struct ixgbe_adapter *adapter)
  49{
  50        struct ixgbe_hw *hw = &adapter->hw;
  51        int num_vf_macvlans, i;
  52        struct vf_macvlans *mv_list;
  53
  54        adapter->flags |= IXGBE_FLAG_SRIOV_ENABLED;
  55        e_info(probe, "SR-IOV enabled with %d VFs\n", adapter->num_vfs);
  56
  57        /* Enable VMDq flag so device will be set in VM mode */
  58        adapter->flags |= IXGBE_FLAG_VMDQ_ENABLED;
  59        if (!adapter->ring_feature[RING_F_VMDQ].limit)
  60                adapter->ring_feature[RING_F_VMDQ].limit = 1;
  61        adapter->ring_feature[RING_F_VMDQ].offset = adapter->num_vfs;
  62
  63        num_vf_macvlans = hw->mac.num_rar_entries -
  64        (IXGBE_MAX_PF_MACVLANS + 1 + adapter->num_vfs);
  65
  66        adapter->mv_list = mv_list = kcalloc(num_vf_macvlans,
  67                                             sizeof(struct vf_macvlans),
  68                                             GFP_KERNEL);
  69        if (mv_list) {
  70                /* Initialize list of VF macvlans */
  71                INIT_LIST_HEAD(&adapter->vf_mvs.l);
  72                for (i = 0; i < num_vf_macvlans; i++) {
  73                        mv_list->vf = -1;
  74                        mv_list->free = true;
  75                        list_add(&mv_list->l, &adapter->vf_mvs.l);
  76                        mv_list++;
  77                }
  78        }
  79
  80        /* Initialize default switching mode VEB */
  81        IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN);
  82        adapter->flags2 |= IXGBE_FLAG2_BRIDGE_MODE_VEB;
  83
  84        /* If call to enable VFs succeeded then allocate memory
  85         * for per VF control structures.
  86         */
  87        adapter->vfinfo =
  88                kcalloc(adapter->num_vfs,
  89                        sizeof(struct vf_data_storage), GFP_KERNEL);
  90        if (adapter->vfinfo) {
  91                /* limit trafffic classes based on VFs enabled */
  92                if ((adapter->hw.mac.type == ixgbe_mac_82599EB) &&
  93                    (adapter->num_vfs < 16)) {
  94                        adapter->dcb_cfg.num_tcs.pg_tcs = MAX_TRAFFIC_CLASS;
  95                        adapter->dcb_cfg.num_tcs.pfc_tcs = MAX_TRAFFIC_CLASS;
  96                } else if (adapter->num_vfs < 32) {
  97                        adapter->dcb_cfg.num_tcs.pg_tcs = 4;
  98                        adapter->dcb_cfg.num_tcs.pfc_tcs = 4;
  99                } else {
 100                        adapter->dcb_cfg.num_tcs.pg_tcs = 1;
 101                        adapter->dcb_cfg.num_tcs.pfc_tcs = 1;
 102                }
 103
 104                /* We do not support RSS w/ SR-IOV */
 105                adapter->ring_feature[RING_F_RSS].limit = 1;
 106
 107                /* Disable RSC when in SR-IOV mode */
 108                adapter->flags2 &= ~(IXGBE_FLAG2_RSC_CAPABLE |
 109                                     IXGBE_FLAG2_RSC_ENABLED);
 110
 111                /* enable spoof checking for all VFs */
 112                for (i = 0; i < adapter->num_vfs; i++)
 113                        adapter->vfinfo[i].spoofchk_enabled = true;
 114                return 0;
 115        }
 116
 117        return -ENOMEM;
 118}
 119
 120/* Note this function is called when the user wants to enable SR-IOV
 121 * VFs using the now deprecated module parameter
 122 */
 123void ixgbe_enable_sriov(struct ixgbe_adapter *adapter)
 124{
 125        int pre_existing_vfs = 0;
 126
 127        pre_existing_vfs = pci_num_vf(adapter->pdev);
 128        if (!pre_existing_vfs && !adapter->num_vfs)
 129                return;
 130
 131        /* If there are pre-existing VFs then we have to force
 132         * use of that many - over ride any module parameter value.
 133         * This may result from the user unloading the PF driver
 134         * while VFs were assigned to guest VMs or because the VFs
 135         * have been created via the new PCI SR-IOV sysfs interface.
 136         */
 137        if (pre_existing_vfs) {
 138                adapter->num_vfs = pre_existing_vfs;
 139                dev_warn(&adapter->pdev->dev,
 140                         "Virtual Functions already enabled for this device - Please reload all VF drivers to avoid spoofed packet errors\n");
 141        } else {
 142                int err;
 143                /*
 144                 * The 82599 supports up to 64 VFs per physical function
 145                 * but this implementation limits allocation to 63 so that
 146                 * basic networking resources are still available to the
 147                 * physical function.  If the user requests greater thn
 148                 * 63 VFs then it is an error - reset to default of zero.
 149                 */
 150                adapter->num_vfs = min_t(unsigned int, adapter->num_vfs, IXGBE_MAX_VFS_DRV_LIMIT);
 151
 152                err = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
 153                if (err) {
 154                        e_err(probe, "Failed to enable PCI sriov: %d\n", err);
 155                        adapter->num_vfs = 0;
 156                        return;
 157                }
 158        }
 159
 160        if (!__ixgbe_enable_sriov(adapter))
 161                return;
 162
 163        /* If we have gotten to this point then there is no memory available
 164         * to manage the VF devices - print message and bail.
 165         */
 166        e_err(probe, "Unable to allocate memory for VF Data Storage - "
 167              "SRIOV disabled\n");
 168        ixgbe_disable_sriov(adapter);
 169}
 170
 171#endif /* #ifdef CONFIG_PCI_IOV */
 172int ixgbe_disable_sriov(struct ixgbe_adapter *adapter)
 173{
 174        struct ixgbe_hw *hw = &adapter->hw;
 175        u32 gpie;
 176        u32 vmdctl;
 177        int rss;
 178
 179        /* set num VFs to 0 to prevent access to vfinfo */
 180        adapter->num_vfs = 0;
 181
 182        /* free VF control structures */
 183        kfree(adapter->vfinfo);
 184        adapter->vfinfo = NULL;
 185
 186        /* free macvlan list */
 187        kfree(adapter->mv_list);
 188        adapter->mv_list = NULL;
 189
 190        /* if SR-IOV is already disabled then there is nothing to do */
 191        if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
 192                return 0;
 193
 194#ifdef CONFIG_PCI_IOV
 195        /*
 196         * If our VFs are assigned we cannot shut down SR-IOV
 197         * without causing issues, so just leave the hardware
 198         * available but disabled
 199         */
 200        if (pci_vfs_assigned(adapter->pdev)) {
 201                e_dev_warn("Unloading driver while VFs are assigned - VFs will not be deallocated\n");
 202                return -EPERM;
 203        }
 204        /* disable iov and allow time for transactions to clear */
 205        pci_disable_sriov(adapter->pdev);
 206#endif
 207
 208        /* turn off device IOV mode */
 209        IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, 0);
 210        gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
 211        gpie &= ~IXGBE_GPIE_VTMODE_MASK;
 212        IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
 213
 214        /* set default pool back to 0 */
 215        vmdctl = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
 216        vmdctl &= ~IXGBE_VT_CTL_POOL_MASK;
 217        IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vmdctl);
 218        IXGBE_WRITE_FLUSH(hw);
 219
 220        /* Disable VMDq flag so device will be set in VM mode */
 221        if (adapter->ring_feature[RING_F_VMDQ].limit == 1) {
 222                adapter->flags &= ~IXGBE_FLAG_VMDQ_ENABLED;
 223                adapter->flags &= ~IXGBE_FLAG_SRIOV_ENABLED;
 224                rss = min_t(int, IXGBE_MAX_RSS_INDICES, num_online_cpus());
 225        } else {
 226                rss = min_t(int, IXGBE_MAX_L2A_QUEUES, num_online_cpus());
 227        }
 228
 229        adapter->ring_feature[RING_F_VMDQ].offset = 0;
 230        adapter->ring_feature[RING_F_RSS].limit = rss;
 231
 232        /* take a breather then clean up driver data */
 233        msleep(100);
 234        return 0;
 235}
 236
 237static int ixgbe_pci_sriov_enable(struct pci_dev *dev, int num_vfs)
 238{
 239#ifdef CONFIG_PCI_IOV
 240        struct ixgbe_adapter *adapter = pci_get_drvdata(dev);
 241        int err = 0;
 242        int i;
 243        int pre_existing_vfs = pci_num_vf(dev);
 244
 245        if (pre_existing_vfs && pre_existing_vfs != num_vfs)
 246                err = ixgbe_disable_sriov(adapter);
 247        else if (pre_existing_vfs && pre_existing_vfs == num_vfs)
 248                return num_vfs;
 249
 250        if (err)
 251                return err;
 252
 253        /* While the SR-IOV capability structure reports total VFs to be 64,
 254         * we have to limit the actual number allocated based on two factors.
 255         * First, we reserve some transmit/receive resources for the PF.
 256         * Second, VMDQ also uses the same pools that SR-IOV does. We need to
 257         * account for this, so that we don't accidentally allocate more VFs
 258         * than we have available pools. The PCI bus driver already checks for
 259         * other values out of range.
 260         */
 261        if ((num_vfs + adapter->num_rx_pools) > IXGBE_MAX_VF_FUNCTIONS)
 262                return -EPERM;
 263
 264        adapter->num_vfs = num_vfs;
 265
 266        err = __ixgbe_enable_sriov(adapter);
 267        if (err)
 268                return  err;
 269
 270        for (i = 0; i < adapter->num_vfs; i++)
 271                ixgbe_vf_configuration(dev, (i | 0x10000000));
 272
 273        err = pci_enable_sriov(dev, num_vfs);
 274        if (err) {
 275                e_dev_warn("Failed to enable PCI sriov: %d\n", err);
 276                return err;
 277        }
 278        ixgbe_sriov_reinit(adapter);
 279
 280        return num_vfs;
 281#else
 282        return 0;
 283#endif
 284}
 285
 286static int ixgbe_pci_sriov_disable(struct pci_dev *dev)
 287{
 288        struct ixgbe_adapter *adapter = pci_get_drvdata(dev);
 289        int err;
 290#ifdef CONFIG_PCI_IOV
 291        u32 current_flags = adapter->flags;
 292#endif
 293
 294        err = ixgbe_disable_sriov(adapter);
 295
 296        /* Only reinit if no error and state changed */
 297#ifdef CONFIG_PCI_IOV
 298        if (!err && current_flags != adapter->flags)
 299                ixgbe_sriov_reinit(adapter);
 300#endif
 301
 302        return err;
 303}
 304
 305int ixgbe_pci_sriov_configure(struct pci_dev *dev, int num_vfs)
 306{
 307        if (num_vfs == 0)
 308                return ixgbe_pci_sriov_disable(dev);
 309        else
 310                return ixgbe_pci_sriov_enable(dev, num_vfs);
 311}
 312
 313static int ixgbe_set_vf_multicasts(struct ixgbe_adapter *adapter,
 314                                   u32 *msgbuf, u32 vf)
 315{
 316        int entries = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK)
 317                       >> IXGBE_VT_MSGINFO_SHIFT;
 318        u16 *hash_list = (u16 *)&msgbuf[1];
 319        struct vf_data_storage *vfinfo = &adapter->vfinfo[vf];
 320        struct ixgbe_hw *hw = &adapter->hw;
 321        int i;
 322        u32 vector_bit;
 323        u32 vector_reg;
 324        u32 mta_reg;
 325        u32 vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf));
 326
 327        /* only so many hash values supported */
 328        entries = min(entries, IXGBE_MAX_VF_MC_ENTRIES);
 329
 330        /*
 331         * salt away the number of multi cast addresses assigned
 332         * to this VF for later use to restore when the PF multi cast
 333         * list changes
 334         */
 335        vfinfo->num_vf_mc_hashes = entries;
 336
 337        /*
 338         * VFs are limited to using the MTA hash table for their multicast
 339         * addresses
 340         */
 341        for (i = 0; i < entries; i++) {
 342                vfinfo->vf_mc_hashes[i] = hash_list[i];
 343        }
 344
 345        for (i = 0; i < vfinfo->num_vf_mc_hashes; i++) {
 346                vector_reg = (vfinfo->vf_mc_hashes[i] >> 5) & 0x7F;
 347                vector_bit = vfinfo->vf_mc_hashes[i] & 0x1F;
 348                mta_reg = IXGBE_READ_REG(hw, IXGBE_MTA(vector_reg));
 349                mta_reg |= (1 << vector_bit);
 350                IXGBE_WRITE_REG(hw, IXGBE_MTA(vector_reg), mta_reg);
 351        }
 352        vmolr |= IXGBE_VMOLR_ROMPE;
 353        IXGBE_WRITE_REG(hw, IXGBE_VMOLR(vf), vmolr);
 354
 355        return 0;
 356}
 357
 358#ifdef CONFIG_PCI_IOV
 359void ixgbe_restore_vf_multicasts(struct ixgbe_adapter *adapter)
 360{
 361        struct ixgbe_hw *hw = &adapter->hw;
 362        struct vf_data_storage *vfinfo;
 363        int i, j;
 364        u32 vector_bit;
 365        u32 vector_reg;
 366        u32 mta_reg;
 367
 368        for (i = 0; i < adapter->num_vfs; i++) {
 369                u32 vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(i));
 370                vfinfo = &adapter->vfinfo[i];
 371                for (j = 0; j < vfinfo->num_vf_mc_hashes; j++) {
 372                        hw->addr_ctrl.mta_in_use++;
 373                        vector_reg = (vfinfo->vf_mc_hashes[j] >> 5) & 0x7F;
 374                        vector_bit = vfinfo->vf_mc_hashes[j] & 0x1F;
 375                        mta_reg = IXGBE_READ_REG(hw, IXGBE_MTA(vector_reg));
 376                        mta_reg |= (1 << vector_bit);
 377                        IXGBE_WRITE_REG(hw, IXGBE_MTA(vector_reg), mta_reg);
 378                }
 379
 380                if (vfinfo->num_vf_mc_hashes)
 381                        vmolr |= IXGBE_VMOLR_ROMPE;
 382                else
 383                        vmolr &= ~IXGBE_VMOLR_ROMPE;
 384                IXGBE_WRITE_REG(hw, IXGBE_VMOLR(i), vmolr);
 385        }
 386
 387        /* Restore any VF macvlans */
 388        ixgbe_full_sync_mac_table(adapter);
 389}
 390#endif
 391
 392static int ixgbe_set_vf_vlan(struct ixgbe_adapter *adapter, int add, int vid,
 393                             u32 vf)
 394{
 395        /* VLAN 0 is a special case, don't allow it to be removed */
 396        if (!vid && !add)
 397                return 0;
 398
 399        return adapter->hw.mac.ops.set_vfta(&adapter->hw, vid, vf, (bool)add);
 400}
 401
 402static s32 ixgbe_set_vf_lpe(struct ixgbe_adapter *adapter, u32 *msgbuf, u32 vf)
 403{
 404        struct ixgbe_hw *hw = &adapter->hw;
 405        int max_frame = msgbuf[1];
 406        u32 max_frs;
 407
 408        /*
 409         * For 82599EB we have to keep all PFs and VFs operating with
 410         * the same max_frame value in order to avoid sending an oversize
 411         * frame to a VF.  In order to guarantee this is handled correctly
 412         * for all cases we have several special exceptions to take into
 413         * account before we can enable the VF for receive
 414         */
 415        if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
 416                struct net_device *dev = adapter->netdev;
 417                int pf_max_frame = dev->mtu + ETH_HLEN;
 418                u32 reg_offset, vf_shift, vfre;
 419                s32 err = 0;
 420
 421#ifdef CONFIG_FCOE
 422                if (dev->features & NETIF_F_FCOE_MTU)
 423                        pf_max_frame = max_t(int, pf_max_frame,
 424                                             IXGBE_FCOE_JUMBO_FRAME_SIZE);
 425
 426#endif /* CONFIG_FCOE */
 427                switch (adapter->vfinfo[vf].vf_api) {
 428                case ixgbe_mbox_api_11:
 429                        /*
 430                         * Version 1.1 supports jumbo frames on VFs if PF has
 431                         * jumbo frames enabled which means legacy VFs are
 432                         * disabled
 433                         */
 434                        if (pf_max_frame > ETH_FRAME_LEN)
 435                                break;
 436                default:
 437                        /*
 438                         * If the PF or VF are running w/ jumbo frames enabled
 439                         * we need to shut down the VF Rx path as we cannot
 440                         * support jumbo frames on legacy VFs
 441                         */
 442                        if ((pf_max_frame > ETH_FRAME_LEN) ||
 443                            (max_frame > (ETH_FRAME_LEN + ETH_FCS_LEN)))
 444                                err = -EINVAL;
 445                        break;
 446                }
 447
 448                /* determine VF receive enable location */
 449                vf_shift = vf % 32;
 450                reg_offset = vf / 32;
 451
 452                /* enable or disable receive depending on error */
 453                vfre = IXGBE_READ_REG(hw, IXGBE_VFRE(reg_offset));
 454                if (err)
 455                        vfre &= ~(1 << vf_shift);
 456                else
 457                        vfre |= 1 << vf_shift;
 458                IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), vfre);
 459
 460                if (err) {
 461                        e_err(drv, "VF max_frame %d out of range\n", max_frame);
 462                        return err;
 463                }
 464        }
 465
 466        /* MTU < 68 is an error and causes problems on some kernels */
 467        if (max_frame > IXGBE_MAX_JUMBO_FRAME_SIZE) {
 468                e_err(drv, "VF max_frame %d out of range\n", max_frame);
 469                return -EINVAL;
 470        }
 471
 472        /* pull current max frame size from hardware */
 473        max_frs = IXGBE_READ_REG(hw, IXGBE_MAXFRS);
 474        max_frs &= IXGBE_MHADD_MFS_MASK;
 475        max_frs >>= IXGBE_MHADD_MFS_SHIFT;
 476
 477        if (max_frs < max_frame) {
 478                max_frs = max_frame << IXGBE_MHADD_MFS_SHIFT;
 479                IXGBE_WRITE_REG(hw, IXGBE_MAXFRS, max_frs);
 480        }
 481
 482        e_info(hw, "VF requests change max MTU to %d\n", max_frame);
 483
 484        return 0;
 485}
 486
 487static void ixgbe_set_vmolr(struct ixgbe_hw *hw, u32 vf, bool aupe)
 488{
 489        u32 vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf));
 490        vmolr |= IXGBE_VMOLR_BAM;
 491        if (aupe)
 492                vmolr |= IXGBE_VMOLR_AUPE;
 493        else
 494                vmolr &= ~IXGBE_VMOLR_AUPE;
 495        IXGBE_WRITE_REG(hw, IXGBE_VMOLR(vf), vmolr);
 496}
 497
 498static void ixgbe_clear_vmvir(struct ixgbe_adapter *adapter, u32 vf)
 499{
 500        struct ixgbe_hw *hw = &adapter->hw;
 501
 502        IXGBE_WRITE_REG(hw, IXGBE_VMVIR(vf), 0);
 503}
 504static inline void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf)
 505{
 506        struct ixgbe_hw *hw = &adapter->hw;
 507        struct vf_data_storage *vfinfo = &adapter->vfinfo[vf];
 508        u8 num_tcs = netdev_get_num_tc(adapter->netdev);
 509
 510        /* add PF assigned VLAN or VLAN 0 */
 511        ixgbe_set_vf_vlan(adapter, true, vfinfo->pf_vlan, vf);
 512
 513        /* reset offloads to defaults */
 514        ixgbe_set_vmolr(hw, vf, !vfinfo->pf_vlan);
 515
 516        /* set outgoing tags for VFs */
 517        if (!vfinfo->pf_vlan && !vfinfo->pf_qos && !num_tcs) {
 518                ixgbe_clear_vmvir(adapter, vf);
 519        } else {
 520                if (vfinfo->pf_qos || !num_tcs)
 521                        ixgbe_set_vmvir(adapter, vfinfo->pf_vlan,
 522                                        vfinfo->pf_qos, vf);
 523                else
 524                        ixgbe_set_vmvir(adapter, vfinfo->pf_vlan,
 525                                        adapter->default_up, vf);
 526
 527                if (vfinfo->spoofchk_enabled)
 528                        hw->mac.ops.set_vlan_anti_spoofing(hw, true, vf);
 529        }
 530
 531        /* reset multicast table array for vf */
 532        adapter->vfinfo[vf].num_vf_mc_hashes = 0;
 533
 534        /* Flush and reset the mta with the new values */
 535        ixgbe_set_rx_mode(adapter->netdev);
 536
 537        ixgbe_del_mac_filter(adapter, adapter->vfinfo[vf].vf_mac_addresses, vf);
 538
 539        /* reset VF api back to unknown */
 540        adapter->vfinfo[vf].vf_api = ixgbe_mbox_api_10;
 541}
 542
 543static int ixgbe_set_vf_mac(struct ixgbe_adapter *adapter,
 544                            int vf, unsigned char *mac_addr)
 545{
 546        ixgbe_del_mac_filter(adapter, adapter->vfinfo[vf].vf_mac_addresses, vf);
 547        memcpy(adapter->vfinfo[vf].vf_mac_addresses, mac_addr, ETH_ALEN);
 548        ixgbe_add_mac_filter(adapter, adapter->vfinfo[vf].vf_mac_addresses, vf);
 549
 550        return 0;
 551}
 552
 553static int ixgbe_set_vf_macvlan(struct ixgbe_adapter *adapter,
 554                                int vf, int index, unsigned char *mac_addr)
 555{
 556        struct list_head *pos;
 557        struct vf_macvlans *entry;
 558
 559        if (index <= 1) {
 560                list_for_each(pos, &adapter->vf_mvs.l) {
 561                        entry = list_entry(pos, struct vf_macvlans, l);
 562                        if (entry->vf == vf) {
 563                                entry->vf = -1;
 564                                entry->free = true;
 565                                entry->is_macvlan = false;
 566                                ixgbe_del_mac_filter(adapter,
 567                                                     entry->vf_macvlan, vf);
 568                        }
 569                }
 570        }
 571
 572        /*
 573         * If index was zero then we were asked to clear the uc list
 574         * for the VF.  We're done.
 575         */
 576        if (!index)
 577                return 0;
 578
 579        entry = NULL;
 580
 581        list_for_each(pos, &adapter->vf_mvs.l) {
 582                entry = list_entry(pos, struct vf_macvlans, l);
 583                if (entry->free)
 584                        break;
 585        }
 586
 587        /*
 588         * If we traversed the entire list and didn't find a free entry
 589         * then we're out of space on the RAR table.  Also entry may
 590         * be NULL because the original memory allocation for the list
 591         * failed, which is not fatal but does mean we can't support
 592         * VF requests for MACVLAN because we couldn't allocate
 593         * memory for the list management required.
 594         */
 595        if (!entry || !entry->free)
 596                return -ENOSPC;
 597
 598        entry->free = false;
 599        entry->is_macvlan = true;
 600        entry->vf = vf;
 601        memcpy(entry->vf_macvlan, mac_addr, ETH_ALEN);
 602
 603        ixgbe_add_mac_filter(adapter, mac_addr, vf);
 604
 605        return 0;
 606}
 607
 608int ixgbe_vf_configuration(struct pci_dev *pdev, unsigned int event_mask)
 609{
 610        struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
 611        unsigned int vfn = (event_mask & 0x3f);
 612
 613        bool enable = ((event_mask & 0x10000000U) != 0);
 614
 615        if (enable)
 616                eth_zero_addr(adapter->vfinfo[vfn].vf_mac_addresses);
 617
 618        return 0;
 619}
 620
 621static int ixgbe_vf_reset_msg(struct ixgbe_adapter *adapter, u32 vf)
 622{
 623        struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ];
 624        struct ixgbe_hw *hw = &adapter->hw;
 625        unsigned char *vf_mac = adapter->vfinfo[vf].vf_mac_addresses;
 626        u32 reg, reg_offset, vf_shift;
 627        u32 msgbuf[4] = {0, 0, 0, 0};
 628        u8 *addr = (u8 *)(&msgbuf[1]);
 629        u32 q_per_pool = __ALIGN_MASK(1, ~vmdq->mask);
 630        int i;
 631
 632        e_info(probe, "VF Reset msg received from vf %d\n", vf);
 633
 634        /* reset the filters for the device */
 635        ixgbe_vf_reset_event(adapter, vf);
 636
 637        /* set vf mac address */
 638        if (!is_zero_ether_addr(vf_mac))
 639                ixgbe_set_vf_mac(adapter, vf, vf_mac);
 640
 641        vf_shift = vf % 32;
 642        reg_offset = vf / 32;
 643
 644        /* enable transmit for vf */
 645        reg = IXGBE_READ_REG(hw, IXGBE_VFTE(reg_offset));
 646        reg |= 1 << vf_shift;
 647        IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), reg);
 648
 649        /* force drop enable for all VF Rx queues */
 650        for (i = vf * q_per_pool; i < ((vf + 1) * q_per_pool); i++) {
 651                /* flush previous write */
 652                IXGBE_WRITE_FLUSH(hw);
 653
 654                /* indicate to hardware that we want to set drop enable */
 655                reg = IXGBE_QDE_WRITE | IXGBE_QDE_ENABLE;
 656                reg |= i <<  IXGBE_QDE_IDX_SHIFT;
 657                IXGBE_WRITE_REG(hw, IXGBE_QDE, reg);
 658        }
 659
 660        /* enable receive for vf */
 661        reg = IXGBE_READ_REG(hw, IXGBE_VFRE(reg_offset));
 662        reg |= 1 << vf_shift;
 663        /*
 664         * The 82599 cannot support a mix of jumbo and non-jumbo PF/VFs.
 665         * For more info take a look at ixgbe_set_vf_lpe
 666         */
 667        if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
 668                struct net_device *dev = adapter->netdev;
 669                int pf_max_frame = dev->mtu + ETH_HLEN;
 670
 671#ifdef CONFIG_FCOE
 672                if (dev->features & NETIF_F_FCOE_MTU)
 673                        pf_max_frame = max_t(int, pf_max_frame,
 674                                             IXGBE_FCOE_JUMBO_FRAME_SIZE);
 675
 676#endif /* CONFIG_FCOE */
 677                if (pf_max_frame > ETH_FRAME_LEN)
 678                        reg &= ~(1 << vf_shift);
 679        }
 680        IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), reg);
 681
 682        /* enable VF mailbox for further messages */
 683        adapter->vfinfo[vf].clear_to_send = true;
 684
 685        /* Enable counting of spoofed packets in the SSVPC register */
 686        reg = IXGBE_READ_REG(hw, IXGBE_VMECM(reg_offset));
 687        reg |= (1 << vf_shift);
 688        IXGBE_WRITE_REG(hw, IXGBE_VMECM(reg_offset), reg);
 689
 690        /*
 691         * Reset the VFs TDWBAL and TDWBAH registers
 692         * which are not cleared by an FLR
 693         */
 694        for (i = 0; i < q_per_pool; i++) {
 695                IXGBE_WRITE_REG(hw, IXGBE_PVFTDWBAHn(q_per_pool, vf, i), 0);
 696                IXGBE_WRITE_REG(hw, IXGBE_PVFTDWBALn(q_per_pool, vf, i), 0);
 697        }
 698
 699        /* reply to reset with ack and vf mac address */
 700        msgbuf[0] = IXGBE_VF_RESET;
 701        if (!is_zero_ether_addr(vf_mac)) {
 702                msgbuf[0] |= IXGBE_VT_MSGTYPE_ACK;
 703                memcpy(addr, vf_mac, ETH_ALEN);
 704        } else {
 705                msgbuf[0] |= IXGBE_VT_MSGTYPE_NACK;
 706                dev_warn(&adapter->pdev->dev,
 707                         "VF %d has no MAC address assigned, you may have to assign one manually\n",
 708                         vf);
 709        }
 710
 711        /*
 712         * Piggyback the multicast filter type so VF can compute the
 713         * correct vectors
 714         */
 715        msgbuf[3] = hw->mac.mc_filter_type;
 716        ixgbe_write_mbx(hw, msgbuf, IXGBE_VF_PERMADDR_MSG_LEN, vf);
 717
 718        return 0;
 719}
 720
 721static int ixgbe_set_vf_mac_addr(struct ixgbe_adapter *adapter,
 722                                 u32 *msgbuf, u32 vf)
 723{
 724        u8 *new_mac = ((u8 *)(&msgbuf[1]));
 725
 726        if (!is_valid_ether_addr(new_mac)) {
 727                e_warn(drv, "VF %d attempted to set invalid mac\n", vf);
 728                return -1;
 729        }
 730
 731        if (adapter->vfinfo[vf].pf_set_mac &&
 732            !ether_addr_equal(adapter->vfinfo[vf].vf_mac_addresses, new_mac)) {
 733                e_warn(drv,
 734                       "VF %d attempted to override administratively set MAC address\n"
 735                       "Reload the VF driver to resume operations\n",
 736                       vf);
 737                return -1;
 738        }
 739
 740        return ixgbe_set_vf_mac(adapter, vf, new_mac) < 0;
 741}
 742
 743static int ixgbe_find_vlvf_entry(struct ixgbe_hw *hw, u32 vlan)
 744{
 745        u32 vlvf;
 746        s32 regindex;
 747
 748        /* short cut the special case */
 749        if (vlan == 0)
 750                return 0;
 751
 752        /* Search for the vlan id in the VLVF entries */
 753        for (regindex = 1; regindex < IXGBE_VLVF_ENTRIES; regindex++) {
 754                vlvf = IXGBE_READ_REG(hw, IXGBE_VLVF(regindex));
 755                if ((vlvf & VLAN_VID_MASK) == vlan)
 756                        break;
 757        }
 758
 759        /* Return a negative value if not found */
 760        if (regindex >= IXGBE_VLVF_ENTRIES)
 761                regindex = -1;
 762
 763        return regindex;
 764}
 765
 766static int ixgbe_set_vf_vlan_msg(struct ixgbe_adapter *adapter,
 767                                 u32 *msgbuf, u32 vf)
 768{
 769        struct ixgbe_hw *hw = &adapter->hw;
 770        int add = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK) >> IXGBE_VT_MSGINFO_SHIFT;
 771        int vid = (msgbuf[1] & IXGBE_VLVF_VLANID_MASK);
 772        int err;
 773        s32 reg_ndx;
 774        u32 vlvf;
 775        u32 bits;
 776        u8 tcs = netdev_get_num_tc(adapter->netdev);
 777
 778        if (adapter->vfinfo[vf].pf_vlan || tcs) {
 779                e_warn(drv,
 780                       "VF %d attempted to override administratively set VLAN configuration\n"
 781                       "Reload the VF driver to resume operations\n",
 782                       vf);
 783                return -1;
 784        }
 785
 786        if (add)
 787                adapter->vfinfo[vf].vlan_count++;
 788        else if (adapter->vfinfo[vf].vlan_count)
 789                adapter->vfinfo[vf].vlan_count--;
 790
 791        /* in case of promiscuous mode any VLAN filter set for a VF must
 792         * also have the PF pool added to it.
 793         */
 794        if (add && adapter->netdev->flags & IFF_PROMISC)
 795                err = ixgbe_set_vf_vlan(adapter, add, vid, VMDQ_P(0));
 796
 797        err = ixgbe_set_vf_vlan(adapter, add, vid, vf);
 798        if (!err && adapter->vfinfo[vf].spoofchk_enabled)
 799                hw->mac.ops.set_vlan_anti_spoofing(hw, true, vf);
 800
 801        /* Go through all the checks to see if the VLAN filter should
 802         * be wiped completely.
 803         */
 804        if (!add && adapter->netdev->flags & IFF_PROMISC) {
 805                reg_ndx = ixgbe_find_vlvf_entry(hw, vid);
 806                if (reg_ndx < 0)
 807                        return err;
 808                vlvf = IXGBE_READ_REG(hw, IXGBE_VLVF(reg_ndx));
 809                /* See if any other pools are set for this VLAN filter
 810                 * entry other than the PF.
 811                 */
 812                if (VMDQ_P(0) < 32) {
 813                        bits = IXGBE_READ_REG(hw, IXGBE_VLVFB(reg_ndx * 2));
 814                        bits &= ~(1 << VMDQ_P(0));
 815                        bits |= IXGBE_READ_REG(hw,
 816                                               IXGBE_VLVFB(reg_ndx * 2) + 1);
 817                } else {
 818                        bits = IXGBE_READ_REG(hw,
 819                                              IXGBE_VLVFB(reg_ndx * 2) + 1);
 820                        bits &= ~(1 << (VMDQ_P(0) - 32));
 821                        bits |= IXGBE_READ_REG(hw, IXGBE_VLVFB(reg_ndx * 2));
 822                }
 823
 824                /* If the filter was removed then ensure PF pool bit
 825                 * is cleared if the PF only added itself to the pool
 826                 * because the PF is in promiscuous mode.
 827                 */
 828                if ((vlvf & VLAN_VID_MASK) == vid &&
 829                    !test_bit(vid, adapter->active_vlans) && !bits)
 830                        ixgbe_set_vf_vlan(adapter, add, vid, VMDQ_P(0));
 831        }
 832
 833        return err;
 834}
 835
 836static int ixgbe_set_vf_macvlan_msg(struct ixgbe_adapter *adapter,
 837                                    u32 *msgbuf, u32 vf)
 838{
 839        u8 *new_mac = ((u8 *)(&msgbuf[1]));
 840        int index = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK) >>
 841                    IXGBE_VT_MSGINFO_SHIFT;
 842        int err;
 843
 844        if (adapter->vfinfo[vf].pf_set_mac && index > 0) {
 845                e_warn(drv,
 846                       "VF %d requested MACVLAN filter but is administratively denied\n",
 847                       vf);
 848                return -1;
 849        }
 850
 851        /* An non-zero index indicates the VF is setting a filter */
 852        if (index) {
 853                if (!is_valid_ether_addr(new_mac)) {
 854                        e_warn(drv, "VF %d attempted to set invalid mac\n", vf);
 855                        return -1;
 856                }
 857
 858                /*
 859                 * If the VF is allowed to set MAC filters then turn off
 860                 * anti-spoofing to avoid false positives.
 861                 */
 862                if (adapter->vfinfo[vf].spoofchk_enabled)
 863                        ixgbe_ndo_set_vf_spoofchk(adapter->netdev, vf, false);
 864        }
 865
 866        err = ixgbe_set_vf_macvlan(adapter, vf, index, new_mac);
 867        if (err == -ENOSPC)
 868                e_warn(drv,
 869                       "VF %d has requested a MACVLAN filter but there is no space for it\n",
 870                       vf);
 871
 872        return err < 0;
 873}
 874
 875static int ixgbe_negotiate_vf_api(struct ixgbe_adapter *adapter,
 876                                  u32 *msgbuf, u32 vf)
 877{
 878        int api = msgbuf[1];
 879
 880        switch (api) {
 881        case ixgbe_mbox_api_10:
 882        case ixgbe_mbox_api_11:
 883                adapter->vfinfo[vf].vf_api = api;
 884                return 0;
 885        default:
 886                break;
 887        }
 888
 889        e_info(drv, "VF %d requested invalid api version %u\n", vf, api);
 890
 891        return -1;
 892}
 893
 894static int ixgbe_get_vf_queues(struct ixgbe_adapter *adapter,
 895                               u32 *msgbuf, u32 vf)
 896{
 897        struct net_device *dev = adapter->netdev;
 898        struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ];
 899        unsigned int default_tc = 0;
 900        u8 num_tcs = netdev_get_num_tc(dev);
 901
 902        /* verify the PF is supporting the correct APIs */
 903        switch (adapter->vfinfo[vf].vf_api) {
 904        case ixgbe_mbox_api_20:
 905        case ixgbe_mbox_api_11:
 906                break;
 907        default:
 908                return -1;
 909        }
 910
 911        /* only allow 1 Tx queue for bandwidth limiting */
 912        msgbuf[IXGBE_VF_TX_QUEUES] = __ALIGN_MASK(1, ~vmdq->mask);
 913        msgbuf[IXGBE_VF_RX_QUEUES] = __ALIGN_MASK(1, ~vmdq->mask);
 914
 915        /* if TCs > 1 determine which TC belongs to default user priority */
 916        if (num_tcs > 1)
 917                default_tc = netdev_get_prio_tc_map(dev, adapter->default_up);
 918
 919        /* notify VF of need for VLAN tag stripping, and correct queue */
 920        if (num_tcs)
 921                msgbuf[IXGBE_VF_TRANS_VLAN] = num_tcs;
 922        else if (adapter->vfinfo[vf].pf_vlan || adapter->vfinfo[vf].pf_qos)
 923                msgbuf[IXGBE_VF_TRANS_VLAN] = 1;
 924        else
 925                msgbuf[IXGBE_VF_TRANS_VLAN] = 0;
 926
 927        /* notify VF of default queue */
 928        msgbuf[IXGBE_VF_DEF_QUEUE] = default_tc;
 929
 930        return 0;
 931}
 932
 933static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf)
 934{
 935        u32 mbx_size = IXGBE_VFMAILBOX_SIZE;
 936        u32 msgbuf[IXGBE_VFMAILBOX_SIZE];
 937        struct ixgbe_hw *hw = &adapter->hw;
 938        s32 retval;
 939
 940        retval = ixgbe_read_mbx(hw, msgbuf, mbx_size, vf);
 941
 942        if (retval) {
 943                pr_err("Error receiving message from VF\n");
 944                return retval;
 945        }
 946
 947        /* this is a message we already processed, do nothing */
 948        if (msgbuf[0] & (IXGBE_VT_MSGTYPE_ACK | IXGBE_VT_MSGTYPE_NACK))
 949                return 0;
 950
 951        /* flush the ack before we write any messages back */
 952        IXGBE_WRITE_FLUSH(hw);
 953
 954        if (msgbuf[0] == IXGBE_VF_RESET)
 955                return ixgbe_vf_reset_msg(adapter, vf);
 956
 957        /*
 958         * until the vf completes a virtual function reset it should not be
 959         * allowed to start any configuration.
 960         */
 961        if (!adapter->vfinfo[vf].clear_to_send) {
 962                msgbuf[0] |= IXGBE_VT_MSGTYPE_NACK;
 963                ixgbe_write_mbx(hw, msgbuf, 1, vf);
 964                return 0;
 965        }
 966
 967        switch ((msgbuf[0] & 0xFFFF)) {
 968        case IXGBE_VF_SET_MAC_ADDR:
 969                retval = ixgbe_set_vf_mac_addr(adapter, msgbuf, vf);
 970                break;
 971        case IXGBE_VF_SET_MULTICAST:
 972                retval = ixgbe_set_vf_multicasts(adapter, msgbuf, vf);
 973                break;
 974        case IXGBE_VF_SET_VLAN:
 975                retval = ixgbe_set_vf_vlan_msg(adapter, msgbuf, vf);
 976                break;
 977        case IXGBE_VF_SET_LPE:
 978                retval = ixgbe_set_vf_lpe(adapter, msgbuf, vf);
 979                break;
 980        case IXGBE_VF_SET_MACVLAN:
 981                retval = ixgbe_set_vf_macvlan_msg(adapter, msgbuf, vf);
 982                break;
 983        case IXGBE_VF_API_NEGOTIATE:
 984                retval = ixgbe_negotiate_vf_api(adapter, msgbuf, vf);
 985                break;
 986        case IXGBE_VF_GET_QUEUES:
 987                retval = ixgbe_get_vf_queues(adapter, msgbuf, vf);
 988                break;
 989        default:
 990                e_err(drv, "Unhandled Msg %8.8x\n", msgbuf[0]);
 991                retval = IXGBE_ERR_MBX;
 992                break;
 993        }
 994
 995        /* notify the VF of the results of what it sent us */
 996        if (retval)
 997                msgbuf[0] |= IXGBE_VT_MSGTYPE_NACK;
 998        else
 999                msgbuf[0] |= IXGBE_VT_MSGTYPE_ACK;
1000
1001        msgbuf[0] |= IXGBE_VT_MSGTYPE_CTS;
1002
1003        ixgbe_write_mbx(hw, msgbuf, mbx_size, vf);
1004
1005        return retval;
1006}
1007
1008static void ixgbe_rcv_ack_from_vf(struct ixgbe_adapter *adapter, u32 vf)
1009{
1010        struct ixgbe_hw *hw = &adapter->hw;
1011        u32 msg = IXGBE_VT_MSGTYPE_NACK;
1012
1013        /* if device isn't clear to send it shouldn't be reading either */
1014        if (!adapter->vfinfo[vf].clear_to_send)
1015                ixgbe_write_mbx(hw, &msg, 1, vf);
1016}
1017
1018void ixgbe_msg_task(struct ixgbe_adapter *adapter)
1019{
1020        struct ixgbe_hw *hw = &adapter->hw;
1021        u32 vf;
1022
1023        for (vf = 0; vf < adapter->num_vfs; vf++) {
1024                /* process any reset requests */
1025                if (!ixgbe_check_for_rst(hw, vf))
1026                        ixgbe_vf_reset_event(adapter, vf);
1027
1028                /* process any messages pending */
1029                if (!ixgbe_check_for_msg(hw, vf))
1030                        ixgbe_rcv_msg_from_vf(adapter, vf);
1031
1032                /* process any acks */
1033                if (!ixgbe_check_for_ack(hw, vf))
1034                        ixgbe_rcv_ack_from_vf(adapter, vf);
1035        }
1036}
1037
1038void ixgbe_disable_tx_rx(struct ixgbe_adapter *adapter)
1039{
1040        struct ixgbe_hw *hw = &adapter->hw;
1041
1042        /* disable transmit and receive for all vfs */
1043        IXGBE_WRITE_REG(hw, IXGBE_VFTE(0), 0);
1044        IXGBE_WRITE_REG(hw, IXGBE_VFTE(1), 0);
1045
1046        IXGBE_WRITE_REG(hw, IXGBE_VFRE(0), 0);
1047        IXGBE_WRITE_REG(hw, IXGBE_VFRE(1), 0);
1048}
1049
1050void ixgbe_ping_all_vfs(struct ixgbe_adapter *adapter)
1051{
1052        struct ixgbe_hw *hw = &adapter->hw;
1053        u32 ping;
1054        int i;
1055
1056        for (i = 0 ; i < adapter->num_vfs; i++) {
1057                ping = IXGBE_PF_CONTROL_MSG;
1058                if (adapter->vfinfo[i].clear_to_send)
1059                        ping |= IXGBE_VT_MSGTYPE_CTS;
1060                ixgbe_write_mbx(hw, &ping, 1, i);
1061        }
1062}
1063
1064int ixgbe_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1065{
1066        struct ixgbe_adapter *adapter = netdev_priv(netdev);
1067        if (!is_valid_ether_addr(mac) || (vf >= adapter->num_vfs))
1068                return -EINVAL;
1069        adapter->vfinfo[vf].pf_set_mac = true;
1070        dev_info(&adapter->pdev->dev, "setting MAC %pM on VF %d\n", mac, vf);
1071        dev_info(&adapter->pdev->dev, "Reload the VF driver to make this"
1072                                      " change effective.");
1073        if (test_bit(__IXGBE_DOWN, &adapter->state)) {
1074                dev_warn(&adapter->pdev->dev, "The VF MAC address has been set,"
1075                         " but the PF device is not up.\n");
1076                dev_warn(&adapter->pdev->dev, "Bring the PF device up before"
1077                         " attempting to use the VF device.\n");
1078        }
1079        return ixgbe_set_vf_mac(adapter, vf, mac);
1080}
1081
1082int ixgbe_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos)
1083{
1084        int err = 0;
1085        struct ixgbe_adapter *adapter = netdev_priv(netdev);
1086        struct ixgbe_hw *hw = &adapter->hw;
1087
1088        if ((vf >= adapter->num_vfs) || (vlan > 4095) || (qos > 7))
1089                return -EINVAL;
1090        if (vlan || qos) {
1091                if (adapter->vfinfo[vf].pf_vlan)
1092                        err = ixgbe_set_vf_vlan(adapter, false,
1093                                                adapter->vfinfo[vf].pf_vlan,
1094                                                vf);
1095                if (err)
1096                        goto out;
1097                err = ixgbe_set_vf_vlan(adapter, true, vlan, vf);
1098                if (err)
1099                        goto out;
1100                ixgbe_set_vmvir(adapter, vlan, qos, vf);
1101                ixgbe_set_vmolr(hw, vf, false);
1102                if (adapter->vfinfo[vf].spoofchk_enabled)
1103                        hw->mac.ops.set_vlan_anti_spoofing(hw, true, vf);
1104                adapter->vfinfo[vf].vlan_count++;
1105                adapter->vfinfo[vf].pf_vlan = vlan;
1106                adapter->vfinfo[vf].pf_qos = qos;
1107                dev_info(&adapter->pdev->dev,
1108                         "Setting VLAN %d, QOS 0x%x on VF %d\n", vlan, qos, vf);
1109                if (test_bit(__IXGBE_DOWN, &adapter->state)) {
1110                        dev_warn(&adapter->pdev->dev,
1111                                 "The VF VLAN has been set,"
1112                                 " but the PF device is not up.\n");
1113                        dev_warn(&adapter->pdev->dev,
1114                                 "Bring the PF device up before"
1115                                 " attempting to use the VF device.\n");
1116                }
1117        } else {
1118                err = ixgbe_set_vf_vlan(adapter, false,
1119                                        adapter->vfinfo[vf].pf_vlan, vf);
1120                ixgbe_clear_vmvir(adapter, vf);
1121                ixgbe_set_vmolr(hw, vf, true);
1122                hw->mac.ops.set_vlan_anti_spoofing(hw, false, vf);
1123                if (adapter->vfinfo[vf].vlan_count)
1124                        adapter->vfinfo[vf].vlan_count--;
1125                adapter->vfinfo[vf].pf_vlan = 0;
1126                adapter->vfinfo[vf].pf_qos = 0;
1127        }
1128out:
1129        return err;
1130}
1131
1132static int ixgbe_link_mbps(struct ixgbe_adapter *adapter)
1133{
1134        switch (adapter->link_speed) {
1135        case IXGBE_LINK_SPEED_100_FULL:
1136                return 100;
1137        case IXGBE_LINK_SPEED_1GB_FULL:
1138                return 1000;
1139        case IXGBE_LINK_SPEED_10GB_FULL:
1140                return 10000;
1141        default:
1142                return 0;
1143        }
1144}
1145
1146static void ixgbe_set_vf_rate_limit(struct ixgbe_adapter *adapter, int vf)
1147{
1148        struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ];
1149        struct ixgbe_hw *hw = &adapter->hw;
1150        u32 bcnrc_val = 0;
1151        u16 queue, queues_per_pool;
1152        u16 tx_rate = adapter->vfinfo[vf].tx_rate;
1153
1154        if (tx_rate) {
1155                /* start with base link speed value */
1156                bcnrc_val = adapter->vf_rate_link_speed;
1157
1158                /* Calculate the rate factor values to set */
1159                bcnrc_val <<= IXGBE_RTTBCNRC_RF_INT_SHIFT;
1160                bcnrc_val /= tx_rate;
1161
1162                /* clear everything but the rate factor */
1163                bcnrc_val &= IXGBE_RTTBCNRC_RF_INT_MASK |
1164                             IXGBE_RTTBCNRC_RF_DEC_MASK;
1165
1166                /* enable the rate scheduler */
1167                bcnrc_val |= IXGBE_RTTBCNRC_RS_ENA;
1168        }
1169
1170        /*
1171         * Set global transmit compensation time to the MMW_SIZE in RTTBCNRM
1172         * register. Typically MMW_SIZE=0x014 if 9728-byte jumbo is supported
1173         * and 0x004 otherwise.
1174         */
1175        switch (hw->mac.type) {
1176        case ixgbe_mac_82599EB:
1177                IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRM, 0x4);
1178                break;
1179        case ixgbe_mac_X540:
1180                IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRM, 0x14);
1181                break;
1182        default:
1183                break;
1184        }
1185
1186        /* determine how many queues per pool based on VMDq mask */
1187        queues_per_pool = __ALIGN_MASK(1, ~vmdq->mask);
1188
1189        /* write value for all Tx queues belonging to VF */
1190        for (queue = 0; queue < queues_per_pool; queue++) {
1191                unsigned int reg_idx = (vf * queues_per_pool) + queue;
1192
1193                IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, reg_idx);
1194                IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRC, bcnrc_val);
1195        }
1196}
1197
1198void ixgbe_check_vf_rate_limit(struct ixgbe_adapter *adapter)
1199{
1200        int i;
1201
1202        /* VF Tx rate limit was not set */
1203        if (!adapter->vf_rate_link_speed)
1204                return;
1205
1206        if (ixgbe_link_mbps(adapter) != adapter->vf_rate_link_speed) {
1207                adapter->vf_rate_link_speed = 0;
1208                dev_info(&adapter->pdev->dev,
1209                         "Link speed has been changed. VF Transmit rate is disabled\n");
1210        }
1211
1212        for (i = 0; i < adapter->num_vfs; i++) {
1213                if (!adapter->vf_rate_link_speed)
1214                        adapter->vfinfo[i].tx_rate = 0;
1215
1216                ixgbe_set_vf_rate_limit(adapter, i);
1217        }
1218}
1219
1220int ixgbe_ndo_set_vf_bw(struct net_device *netdev, int vf, int min_tx_rate,
1221                        int max_tx_rate)
1222{
1223        struct ixgbe_adapter *adapter = netdev_priv(netdev);
1224        int link_speed;
1225
1226        /* verify VF is active */
1227        if (vf >= adapter->num_vfs)
1228                return -EINVAL;
1229
1230        /* verify link is up */
1231        if (!adapter->link_up)
1232                return -EINVAL;
1233
1234        /* verify we are linked at 10Gbps */
1235        link_speed = ixgbe_link_mbps(adapter);
1236        if (link_speed != 10000)
1237                return -EINVAL;
1238
1239        if (min_tx_rate)
1240                return -EINVAL;
1241
1242        /* rate limit cannot be less than 10Mbs or greater than link speed */
1243        if (max_tx_rate && ((max_tx_rate <= 10) || (max_tx_rate > link_speed)))
1244                return -EINVAL;
1245
1246        /* store values */
1247        adapter->vf_rate_link_speed = link_speed;
1248        adapter->vfinfo[vf].tx_rate = max_tx_rate;
1249
1250        /* update hardware configuration */
1251        ixgbe_set_vf_rate_limit(adapter, vf);
1252
1253        return 0;
1254}
1255
1256int ixgbe_ndo_set_vf_spoofchk(struct net_device *netdev, int vf, bool setting)
1257{
1258        struct ixgbe_adapter *adapter = netdev_priv(netdev);
1259        int vf_target_reg = vf >> 3;
1260        int vf_target_shift = vf % 8;
1261        struct ixgbe_hw *hw = &adapter->hw;
1262        u32 regval;
1263
1264        if (vf >= adapter->num_vfs)
1265                return -EINVAL;
1266
1267        adapter->vfinfo[vf].spoofchk_enabled = setting;
1268
1269        regval = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg));
1270        regval &= ~(1 << vf_target_shift);
1271        regval |= (setting << vf_target_shift);
1272        IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg), regval);
1273
1274        if (adapter->vfinfo[vf].vlan_count) {
1275                vf_target_shift += IXGBE_SPOOF_VLANAS_SHIFT;
1276                regval = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg));
1277                regval &= ~(1 << vf_target_shift);
1278                regval |= (setting << vf_target_shift);
1279                IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg), regval);
1280        }
1281
1282        return 0;
1283}
1284
1285int ixgbe_ndo_get_vf_config(struct net_device *netdev,
1286                            int vf, struct ifla_vf_info *ivi)
1287{
1288        struct ixgbe_adapter *adapter = netdev_priv(netdev);
1289        if (vf >= adapter->num_vfs)
1290                return -EINVAL;
1291        ivi->vf = vf;
1292        memcpy(&ivi->mac, adapter->vfinfo[vf].vf_mac_addresses, ETH_ALEN);
1293        ivi->max_tx_rate = adapter->vfinfo[vf].tx_rate;
1294        ivi->min_tx_rate = 0;
1295        ivi->vlan = adapter->vfinfo[vf].pf_vlan;
1296        ivi->qos = adapter->vfinfo[vf].pf_qos;
1297        ivi->spoofchk = adapter->vfinfo[vf].spoofchk_enabled;
1298        return 0;
1299}
1300