linux/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
<<
>>
Prefs
   1/*******************************************************************************
   2
   3  Intel 10 Gigabit PCI Express Linux driver
   4  Copyright(c) 1999 - 2013 Intel Corporation.
   5
   6  This program is free software; you can redistribute it and/or modify it
   7  under the terms and conditions of the GNU General Public License,
   8  version 2, as published by the Free Software Foundation.
   9
  10  This program is distributed in the hope it will be useful, but WITHOUT
  11  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  13  more details.
  14
  15  You should have received a copy of the GNU General Public License along with
  16  this program; if not, write to the Free Software Foundation, Inc.,
  17  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
  18
  19  The full GNU General Public License is included in this distribution in
  20  the file called "COPYING".
  21
  22  Contact Information:
  23  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
  24  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
  25
  26*******************************************************************************/
  27
  28#include <linux/types.h>
  29#include <linux/module.h>
  30#include <linux/pci.h>
  31#include <linux/netdevice.h>
  32#include <linux/vmalloc.h>
  33#include <linux/string.h>
  34#include <linux/in.h>
  35#include <linux/ip.h>
  36#include <linux/tcp.h>
  37#include <linux/ipv6.h>
  38#ifdef NETIF_F_HW_VLAN_CTAG_TX
  39#include <linux/if_vlan.h>
  40#endif
  41
  42#include "ixgbe.h"
  43#include "ixgbe_type.h"
  44#include "ixgbe_sriov.h"
  45
  46#ifdef CONFIG_PCI_IOV
  47static int __ixgbe_enable_sriov(struct ixgbe_adapter *adapter)
  48{
  49        struct ixgbe_hw *hw = &adapter->hw;
  50        int num_vf_macvlans, i;
  51        struct vf_macvlans *mv_list;
  52
  53        adapter->flags |= IXGBE_FLAG_SRIOV_ENABLED;
  54        e_info(probe, "SR-IOV enabled with %d VFs\n", adapter->num_vfs);
  55
  56        /* Enable VMDq flag so device will be set in VM mode */
  57        adapter->flags |= IXGBE_FLAG_VMDQ_ENABLED;
  58        if (!adapter->ring_feature[RING_F_VMDQ].limit)
  59                adapter->ring_feature[RING_F_VMDQ].limit = 1;
  60        adapter->ring_feature[RING_F_VMDQ].offset = adapter->num_vfs;
  61
  62        num_vf_macvlans = hw->mac.num_rar_entries -
  63        (IXGBE_MAX_PF_MACVLANS + 1 + adapter->num_vfs);
  64
  65        adapter->mv_list = mv_list = kcalloc(num_vf_macvlans,
  66                                             sizeof(struct vf_macvlans),
  67                                             GFP_KERNEL);
  68        if (mv_list) {
  69                /* Initialize list of VF macvlans */
  70                INIT_LIST_HEAD(&adapter->vf_mvs.l);
  71                for (i = 0; i < num_vf_macvlans; i++) {
  72                        mv_list->vf = -1;
  73                        mv_list->free = true;
  74                        mv_list->rar_entry = hw->mac.num_rar_entries -
  75                                (i + adapter->num_vfs + 1);
  76                        list_add(&mv_list->l, &adapter->vf_mvs.l);
  77                        mv_list++;
  78                }
  79        }
  80
  81        /* Initialize default switching mode VEB */
  82        IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN);
  83        adapter->flags2 |= IXGBE_FLAG2_BRIDGE_MODE_VEB;
  84
  85        /* If call to enable VFs succeeded then allocate memory
  86         * for per VF control structures.
  87         */
  88        adapter->vfinfo =
  89                kcalloc(adapter->num_vfs,
  90                        sizeof(struct vf_data_storage), GFP_KERNEL);
  91        if (adapter->vfinfo) {
  92                /* limit trafffic classes based on VFs enabled */
  93                if ((adapter->hw.mac.type == ixgbe_mac_82599EB) &&
  94                    (adapter->num_vfs < 16)) {
  95                        adapter->dcb_cfg.num_tcs.pg_tcs = MAX_TRAFFIC_CLASS;
  96                        adapter->dcb_cfg.num_tcs.pfc_tcs = MAX_TRAFFIC_CLASS;
  97                } else if (adapter->num_vfs < 32) {
  98                        adapter->dcb_cfg.num_tcs.pg_tcs = 4;
  99                        adapter->dcb_cfg.num_tcs.pfc_tcs = 4;
 100                } else {
 101                        adapter->dcb_cfg.num_tcs.pg_tcs = 1;
 102                        adapter->dcb_cfg.num_tcs.pfc_tcs = 1;
 103                }
 104
 105                /* We do not support RSS w/ SR-IOV */
 106                adapter->ring_feature[RING_F_RSS].limit = 1;
 107
 108                /* Disable RSC when in SR-IOV mode */
 109                adapter->flags2 &= ~(IXGBE_FLAG2_RSC_CAPABLE |
 110                                     IXGBE_FLAG2_RSC_ENABLED);
 111
 112                /* enable spoof checking for all VFs */
 113                for (i = 0; i < adapter->num_vfs; i++)
 114                        adapter->vfinfo[i].spoofchk_enabled = true;
 115                return 0;
 116        }
 117
 118        return -ENOMEM;
 119}
 120
 121/* Note this function is called when the user wants to enable SR-IOV
 122 * VFs using the now deprecated module parameter
 123 */
 124void ixgbe_enable_sriov(struct ixgbe_adapter *adapter)
 125{
 126        int pre_existing_vfs = 0;
 127
 128        pre_existing_vfs = pci_num_vf(adapter->pdev);
 129        if (!pre_existing_vfs && !adapter->num_vfs)
 130                return;
 131
 132        if (!pre_existing_vfs)
 133                dev_warn(&adapter->pdev->dev,
 134                         "Enabling SR-IOV VFs using the module parameter is deprecated - please use the pci sysfs interface.\n");
 135
 136        /* If there are pre-existing VFs then we have to force
 137         * use of that many - over ride any module parameter value.
 138         * This may result from the user unloading the PF driver
 139         * while VFs were assigned to guest VMs or because the VFs
 140         * have been created via the new PCI SR-IOV sysfs interface.
 141         */
 142        if (pre_existing_vfs) {
 143                adapter->num_vfs = pre_existing_vfs;
 144                dev_warn(&adapter->pdev->dev,
 145                         "Virtual Functions already enabled for this device - Please reload all VF drivers to avoid spoofed packet errors\n");
 146        } else {
 147                int err;
 148                /*
 149                 * The 82599 supports up to 64 VFs per physical function
 150                 * but this implementation limits allocation to 63 so that
 151                 * basic networking resources are still available to the
 152                 * physical function.  If the user requests greater thn
 153                 * 63 VFs then it is an error - reset to default of zero.
 154                 */
 155                adapter->num_vfs = min_t(unsigned int, adapter->num_vfs, 63);
 156
 157                err = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
 158                if (err) {
 159                        e_err(probe, "Failed to enable PCI sriov: %d\n", err);
 160                        adapter->num_vfs = 0;
 161                        return;
 162                }
 163        }
 164
 165        if (!__ixgbe_enable_sriov(adapter))
 166                return;
 167
 168        /* If we have gotten to this point then there is no memory available
 169         * to manage the VF devices - print message and bail.
 170         */
 171        e_err(probe, "Unable to allocate memory for VF Data Storage - "
 172              "SRIOV disabled\n");
 173        ixgbe_disable_sriov(adapter);
 174}
 175
 176#endif /* #ifdef CONFIG_PCI_IOV */
 177int ixgbe_disable_sriov(struct ixgbe_adapter *adapter)
 178{
 179        struct ixgbe_hw *hw = &adapter->hw;
 180        u32 gpie;
 181        u32 vmdctl;
 182        int rss;
 183
 184        /* set num VFs to 0 to prevent access to vfinfo */
 185        adapter->num_vfs = 0;
 186
 187        /* free VF control structures */
 188        kfree(adapter->vfinfo);
 189        adapter->vfinfo = NULL;
 190
 191        /* free macvlan list */
 192        kfree(adapter->mv_list);
 193        adapter->mv_list = NULL;
 194
 195        /* if SR-IOV is already disabled then there is nothing to do */
 196        if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
 197                return 0;
 198
 199#ifdef CONFIG_PCI_IOV
 200        /*
 201         * If our VFs are assigned we cannot shut down SR-IOV
 202         * without causing issues, so just leave the hardware
 203         * available but disabled
 204         */
 205        if (pci_vfs_assigned(adapter->pdev)) {
 206                e_dev_warn("Unloading driver while VFs are assigned - VFs will not be deallocated\n");
 207                return -EPERM;
 208        }
 209        /* disable iov and allow time for transactions to clear */
 210        pci_disable_sriov(adapter->pdev);
 211#endif
 212
 213        /* turn off device IOV mode */
 214        IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, 0);
 215        gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
 216        gpie &= ~IXGBE_GPIE_VTMODE_MASK;
 217        IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
 218
 219        /* set default pool back to 0 */
 220        vmdctl = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
 221        vmdctl &= ~IXGBE_VT_CTL_POOL_MASK;
 222        IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vmdctl);
 223        IXGBE_WRITE_FLUSH(hw);
 224
 225        /* Disable VMDq flag so device will be set in VM mode */
 226        if (adapter->ring_feature[RING_F_VMDQ].limit == 1)
 227                adapter->flags &= ~IXGBE_FLAG_VMDQ_ENABLED;
 228        adapter->ring_feature[RING_F_VMDQ].offset = 0;
 229
 230        rss = min_t(int, IXGBE_MAX_RSS_INDICES, num_online_cpus());
 231        adapter->ring_feature[RING_F_RSS].limit = rss;
 232
 233        /* take a breather then clean up driver data */
 234        msleep(100);
 235
 236        adapter->flags &= ~IXGBE_FLAG_SRIOV_ENABLED;
 237        return 0;
 238}
 239
 240static int ixgbe_pci_sriov_enable(struct pci_dev *dev, int num_vfs)
 241{
 242#ifdef CONFIG_PCI_IOV
 243        struct ixgbe_adapter *adapter = pci_get_drvdata(dev);
 244        int err = 0;
 245        int i;
 246        int pre_existing_vfs = pci_num_vf(dev);
 247
 248        if (pre_existing_vfs && pre_existing_vfs != num_vfs)
 249                err = ixgbe_disable_sriov(adapter);
 250        else if (pre_existing_vfs && pre_existing_vfs == num_vfs)
 251                goto out;
 252
 253        if (err)
 254                goto err_out;
 255
 256        /* While the SR-IOV capability structure reports total VFs to be
 257         * 64 we limit the actual number that can be allocated to 63 so
 258         * that some transmit/receive resources can be reserved to the
 259         * PF.  The PCI bus driver already checks for other values out of
 260         * range.
 261         */
 262        if (num_vfs > 63) {
 263                err = -EPERM;
 264                goto err_out;
 265        }
 266
 267        adapter->num_vfs = num_vfs;
 268
 269        err = __ixgbe_enable_sriov(adapter);
 270        if (err)
 271                goto err_out;
 272
 273        for (i = 0; i < adapter->num_vfs; i++)
 274                ixgbe_vf_configuration(dev, (i | 0x10000000));
 275
 276        err = pci_enable_sriov(dev, num_vfs);
 277        if (err) {
 278                e_dev_warn("Failed to enable PCI sriov: %d\n", err);
 279                goto err_out;
 280        }
 281        ixgbe_sriov_reinit(adapter);
 282
 283out:
 284        return num_vfs;
 285
 286err_out:
 287        return err;
 288#endif
 289        return 0;
 290}
 291
 292static int ixgbe_pci_sriov_disable(struct pci_dev *dev)
 293{
 294        struct ixgbe_adapter *adapter = pci_get_drvdata(dev);
 295        int err;
 296        u32 current_flags = adapter->flags;
 297
 298        err = ixgbe_disable_sriov(adapter);
 299
 300        /* Only reinit if no error and state changed */
 301        if (!err && current_flags != adapter->flags) {
 302                /* ixgbe_disable_sriov() doesn't clear VMDQ flag */
 303                adapter->flags &= ~IXGBE_FLAG_VMDQ_ENABLED;
 304#ifdef CONFIG_PCI_IOV
 305                ixgbe_sriov_reinit(adapter);
 306#endif
 307        }
 308
 309        return err;
 310}
 311
 312int ixgbe_pci_sriov_configure(struct pci_dev *dev, int num_vfs)
 313{
 314        if (num_vfs == 0)
 315                return ixgbe_pci_sriov_disable(dev);
 316        else
 317                return ixgbe_pci_sriov_enable(dev, num_vfs);
 318}
 319
 320static int ixgbe_set_vf_multicasts(struct ixgbe_adapter *adapter,
 321                                   u32 *msgbuf, u32 vf)
 322{
 323        int entries = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK)
 324                       >> IXGBE_VT_MSGINFO_SHIFT;
 325        u16 *hash_list = (u16 *)&msgbuf[1];
 326        struct vf_data_storage *vfinfo = &adapter->vfinfo[vf];
 327        struct ixgbe_hw *hw = &adapter->hw;
 328        int i;
 329        u32 vector_bit;
 330        u32 vector_reg;
 331        u32 mta_reg;
 332
 333        /* only so many hash values supported */
 334        entries = min(entries, IXGBE_MAX_VF_MC_ENTRIES);
 335
 336        /*
 337         * salt away the number of multi cast addresses assigned
 338         * to this VF for later use to restore when the PF multi cast
 339         * list changes
 340         */
 341        vfinfo->num_vf_mc_hashes = entries;
 342
 343        /*
 344         * VFs are limited to using the MTA hash table for their multicast
 345         * addresses
 346         */
 347        for (i = 0; i < entries; i++) {
 348                vfinfo->vf_mc_hashes[i] = hash_list[i];
 349        }
 350
 351        for (i = 0; i < vfinfo->num_vf_mc_hashes; i++) {
 352                vector_reg = (vfinfo->vf_mc_hashes[i] >> 5) & 0x7F;
 353                vector_bit = vfinfo->vf_mc_hashes[i] & 0x1F;
 354                mta_reg = IXGBE_READ_REG(hw, IXGBE_MTA(vector_reg));
 355                mta_reg |= (1 << vector_bit);
 356                IXGBE_WRITE_REG(hw, IXGBE_MTA(vector_reg), mta_reg);
 357        }
 358
 359        return 0;
 360}
 361
 362static void ixgbe_restore_vf_macvlans(struct ixgbe_adapter *adapter)
 363{
 364        struct ixgbe_hw *hw = &adapter->hw;
 365        struct list_head *pos;
 366        struct vf_macvlans *entry;
 367
 368        list_for_each(pos, &adapter->vf_mvs.l) {
 369                entry = list_entry(pos, struct vf_macvlans, l);
 370                if (!entry->free)
 371                        hw->mac.ops.set_rar(hw, entry->rar_entry,
 372                                            entry->vf_macvlan,
 373                                            entry->vf, IXGBE_RAH_AV);
 374        }
 375}
 376
 377void ixgbe_restore_vf_multicasts(struct ixgbe_adapter *adapter)
 378{
 379        struct ixgbe_hw *hw = &adapter->hw;
 380        struct vf_data_storage *vfinfo;
 381        int i, j;
 382        u32 vector_bit;
 383        u32 vector_reg;
 384        u32 mta_reg;
 385
 386        for (i = 0; i < adapter->num_vfs; i++) {
 387                vfinfo = &adapter->vfinfo[i];
 388                for (j = 0; j < vfinfo->num_vf_mc_hashes; j++) {
 389                        hw->addr_ctrl.mta_in_use++;
 390                        vector_reg = (vfinfo->vf_mc_hashes[j] >> 5) & 0x7F;
 391                        vector_bit = vfinfo->vf_mc_hashes[j] & 0x1F;
 392                        mta_reg = IXGBE_READ_REG(hw, IXGBE_MTA(vector_reg));
 393                        mta_reg |= (1 << vector_bit);
 394                        IXGBE_WRITE_REG(hw, IXGBE_MTA(vector_reg), mta_reg);
 395                }
 396        }
 397
 398        /* Restore any VF macvlans */
 399        ixgbe_restore_vf_macvlans(adapter);
 400}
 401
 402static int ixgbe_set_vf_vlan(struct ixgbe_adapter *adapter, int add, int vid,
 403                             u32 vf)
 404{
 405        /* VLAN 0 is a special case, don't allow it to be removed */
 406        if (!vid && !add)
 407                return 0;
 408
 409        return adapter->hw.mac.ops.set_vfta(&adapter->hw, vid, vf, (bool)add);
 410}
 411
 412static s32 ixgbe_set_vf_lpe(struct ixgbe_adapter *adapter, u32 *msgbuf, u32 vf)
 413{
 414        struct ixgbe_hw *hw = &adapter->hw;
 415        int max_frame = msgbuf[1];
 416        u32 max_frs;
 417
 418        /*
 419         * For 82599EB we have to keep all PFs and VFs operating with
 420         * the same max_frame value in order to avoid sending an oversize
 421         * frame to a VF.  In order to guarantee this is handled correctly
 422         * for all cases we have several special exceptions to take into
 423         * account before we can enable the VF for receive
 424         */
 425        if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
 426                struct net_device *dev = adapter->netdev;
 427                int pf_max_frame = dev->mtu + ETH_HLEN;
 428                u32 reg_offset, vf_shift, vfre;
 429                s32 err = 0;
 430
 431#ifdef CONFIG_FCOE
 432                if (dev->features & NETIF_F_FCOE_MTU)
 433                        pf_max_frame = max_t(int, pf_max_frame,
 434                                             IXGBE_FCOE_JUMBO_FRAME_SIZE);
 435
 436#endif /* CONFIG_FCOE */
 437                switch (adapter->vfinfo[vf].vf_api) {
 438                case ixgbe_mbox_api_11:
 439                        /*
 440                         * Version 1.1 supports jumbo frames on VFs if PF has
 441                         * jumbo frames enabled which means legacy VFs are
 442                         * disabled
 443                         */
 444                        if (pf_max_frame > ETH_FRAME_LEN)
 445                                break;
 446                default:
 447                        /*
 448                         * If the PF or VF are running w/ jumbo frames enabled
 449                         * we need to shut down the VF Rx path as we cannot
 450                         * support jumbo frames on legacy VFs
 451                         */
 452                        if ((pf_max_frame > ETH_FRAME_LEN) ||
 453                            (max_frame > (ETH_FRAME_LEN + ETH_FCS_LEN)))
 454                                err = -EINVAL;
 455                        break;
 456                }
 457
 458                /* determine VF receive enable location */
 459                vf_shift = vf % 32;
 460                reg_offset = vf / 32;
 461
 462                /* enable or disable receive depending on error */
 463                vfre = IXGBE_READ_REG(hw, IXGBE_VFRE(reg_offset));
 464                if (err)
 465                        vfre &= ~(1 << vf_shift);
 466                else
 467                        vfre |= 1 << vf_shift;
 468                IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), vfre);
 469
 470                if (err) {
 471                        e_err(drv, "VF max_frame %d out of range\n", max_frame);
 472                        return err;
 473                }
 474        }
 475
 476        /* MTU < 68 is an error and causes problems on some kernels */
 477        if (max_frame > IXGBE_MAX_JUMBO_FRAME_SIZE) {
 478                e_err(drv, "VF max_frame %d out of range\n", max_frame);
 479                return -EINVAL;
 480        }
 481
 482        /* pull current max frame size from hardware */
 483        max_frs = IXGBE_READ_REG(hw, IXGBE_MAXFRS);
 484        max_frs &= IXGBE_MHADD_MFS_MASK;
 485        max_frs >>= IXGBE_MHADD_MFS_SHIFT;
 486
 487        if (max_frs < max_frame) {
 488                max_frs = max_frame << IXGBE_MHADD_MFS_SHIFT;
 489                IXGBE_WRITE_REG(hw, IXGBE_MAXFRS, max_frs);
 490        }
 491
 492        e_info(hw, "VF requests change max MTU to %d\n", max_frame);
 493
 494        return 0;
 495}
 496
 497static void ixgbe_set_vmolr(struct ixgbe_hw *hw, u32 vf, bool aupe)
 498{
 499        u32 vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf));
 500        vmolr |= (IXGBE_VMOLR_ROMPE |
 501                  IXGBE_VMOLR_BAM);
 502        if (aupe)
 503                vmolr |= IXGBE_VMOLR_AUPE;
 504        else
 505                vmolr &= ~IXGBE_VMOLR_AUPE;
 506        IXGBE_WRITE_REG(hw, IXGBE_VMOLR(vf), vmolr);
 507}
 508
 509static void ixgbe_clear_vmvir(struct ixgbe_adapter *adapter, u32 vf)
 510{
 511        struct ixgbe_hw *hw = &adapter->hw;
 512
 513        IXGBE_WRITE_REG(hw, IXGBE_VMVIR(vf), 0);
 514}
 515static inline void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf)
 516{
 517        struct ixgbe_hw *hw = &adapter->hw;
 518        struct vf_data_storage *vfinfo = &adapter->vfinfo[vf];
 519        int rar_entry = hw->mac.num_rar_entries - (vf + 1);
 520        u8 num_tcs = netdev_get_num_tc(adapter->netdev);
 521
 522        /* add PF assigned VLAN or VLAN 0 */
 523        ixgbe_set_vf_vlan(adapter, true, vfinfo->pf_vlan, vf);
 524
 525        /* reset offloads to defaults */
 526        ixgbe_set_vmolr(hw, vf, !vfinfo->pf_vlan);
 527
 528        /* set outgoing tags for VFs */
 529        if (!vfinfo->pf_vlan && !vfinfo->pf_qos && !num_tcs) {
 530                ixgbe_clear_vmvir(adapter, vf);
 531        } else {
 532                if (vfinfo->pf_qos || !num_tcs)
 533                        ixgbe_set_vmvir(adapter, vfinfo->pf_vlan,
 534                                        vfinfo->pf_qos, vf);
 535                else
 536                        ixgbe_set_vmvir(adapter, vfinfo->pf_vlan,
 537                                        adapter->default_up, vf);
 538
 539                if (vfinfo->spoofchk_enabled)
 540                        hw->mac.ops.set_vlan_anti_spoofing(hw, true, vf);
 541        }
 542
 543        /* reset multicast table array for vf */
 544        adapter->vfinfo[vf].num_vf_mc_hashes = 0;
 545
 546        /* Flush and reset the mta with the new values */
 547        ixgbe_set_rx_mode(adapter->netdev);
 548
 549        hw->mac.ops.clear_rar(hw, rar_entry);
 550
 551        /* reset VF api back to unknown */
 552        adapter->vfinfo[vf].vf_api = ixgbe_mbox_api_10;
 553}
 554
 555static int ixgbe_set_vf_mac(struct ixgbe_adapter *adapter,
 556                            int vf, unsigned char *mac_addr)
 557{
 558        struct ixgbe_hw *hw = &adapter->hw;
 559        int rar_entry = hw->mac.num_rar_entries - (vf + 1);
 560
 561        memcpy(adapter->vfinfo[vf].vf_mac_addresses, mac_addr, 6);
 562        hw->mac.ops.set_rar(hw, rar_entry, mac_addr, vf, IXGBE_RAH_AV);
 563
 564        return 0;
 565}
 566
 567static int ixgbe_set_vf_macvlan(struct ixgbe_adapter *adapter,
 568                                int vf, int index, unsigned char *mac_addr)
 569{
 570        struct ixgbe_hw *hw = &adapter->hw;
 571        struct list_head *pos;
 572        struct vf_macvlans *entry;
 573
 574        if (index <= 1) {
 575                list_for_each(pos, &adapter->vf_mvs.l) {
 576                        entry = list_entry(pos, struct vf_macvlans, l);
 577                        if (entry->vf == vf) {
 578                                entry->vf = -1;
 579                                entry->free = true;
 580                                entry->is_macvlan = false;
 581                                hw->mac.ops.clear_rar(hw, entry->rar_entry);
 582                        }
 583                }
 584        }
 585
 586        /*
 587         * If index was zero then we were asked to clear the uc list
 588         * for the VF.  We're done.
 589         */
 590        if (!index)
 591                return 0;
 592
 593        entry = NULL;
 594
 595        list_for_each(pos, &adapter->vf_mvs.l) {
 596                entry = list_entry(pos, struct vf_macvlans, l);
 597                if (entry->free)
 598                        break;
 599        }
 600
 601        /*
 602         * If we traversed the entire list and didn't find a free entry
 603         * then we're out of space on the RAR table.  Also entry may
 604         * be NULL because the original memory allocation for the list
 605         * failed, which is not fatal but does mean we can't support
 606         * VF requests for MACVLAN because we couldn't allocate
 607         * memory for the list management required.
 608         */
 609        if (!entry || !entry->free)
 610                return -ENOSPC;
 611
 612        entry->free = false;
 613        entry->is_macvlan = true;
 614        entry->vf = vf;
 615        memcpy(entry->vf_macvlan, mac_addr, ETH_ALEN);
 616
 617        hw->mac.ops.set_rar(hw, entry->rar_entry, mac_addr, vf, IXGBE_RAH_AV);
 618
 619        return 0;
 620}
 621
 622int ixgbe_vf_configuration(struct pci_dev *pdev, unsigned int event_mask)
 623{
 624        unsigned char vf_mac_addr[6];
 625        struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
 626        unsigned int vfn = (event_mask & 0x3f);
 627
 628        bool enable = ((event_mask & 0x10000000U) != 0);
 629
 630        if (enable) {
 631                eth_zero_addr(vf_mac_addr);
 632                memcpy(adapter->vfinfo[vfn].vf_mac_addresses, vf_mac_addr, 6);
 633        }
 634
 635        return 0;
 636}
 637
 638static int ixgbe_vf_reset_msg(struct ixgbe_adapter *adapter, u32 vf)
 639{
 640        struct ixgbe_hw *hw = &adapter->hw;
 641        unsigned char *vf_mac = adapter->vfinfo[vf].vf_mac_addresses;
 642        u32 reg, reg_offset, vf_shift;
 643        u32 msgbuf[4] = {0, 0, 0, 0};
 644        u8 *addr = (u8 *)(&msgbuf[1]);
 645
 646        e_info(probe, "VF Reset msg received from vf %d\n", vf);
 647
 648        /* reset the filters for the device */
 649        ixgbe_vf_reset_event(adapter, vf);
 650
 651        /* set vf mac address */
 652        if (!is_zero_ether_addr(vf_mac))
 653                ixgbe_set_vf_mac(adapter, vf, vf_mac);
 654
 655        vf_shift = vf % 32;
 656        reg_offset = vf / 32;
 657
 658        /* enable transmit for vf */
 659        reg = IXGBE_READ_REG(hw, IXGBE_VFTE(reg_offset));
 660        reg |= 1 << vf_shift;
 661        IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), reg);
 662
 663        /* enable receive for vf */
 664        reg = IXGBE_READ_REG(hw, IXGBE_VFRE(reg_offset));
 665        reg |= 1 << vf_shift;
 666        /*
 667         * The 82599 cannot support a mix of jumbo and non-jumbo PF/VFs.
 668         * For more info take a look at ixgbe_set_vf_lpe
 669         */
 670        if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
 671                struct net_device *dev = adapter->netdev;
 672                int pf_max_frame = dev->mtu + ETH_HLEN;
 673
 674#ifdef CONFIG_FCOE
 675                if (dev->features & NETIF_F_FCOE_MTU)
 676                        pf_max_frame = max_t(int, pf_max_frame,
 677                                             IXGBE_FCOE_JUMBO_FRAME_SIZE);
 678
 679#endif /* CONFIG_FCOE */
 680                if (pf_max_frame > ETH_FRAME_LEN)
 681                        reg &= ~(1 << vf_shift);
 682        }
 683        IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), reg);
 684
 685        /* enable VF mailbox for further messages */
 686        adapter->vfinfo[vf].clear_to_send = true;
 687
 688        /* Enable counting of spoofed packets in the SSVPC register */
 689        reg = IXGBE_READ_REG(hw, IXGBE_VMECM(reg_offset));
 690        reg |= (1 << vf_shift);
 691        IXGBE_WRITE_REG(hw, IXGBE_VMECM(reg_offset), reg);
 692
 693        /* reply to reset with ack and vf mac address */
 694        msgbuf[0] = IXGBE_VF_RESET;
 695        if (!is_zero_ether_addr(vf_mac)) {
 696                msgbuf[0] |= IXGBE_VT_MSGTYPE_ACK;
 697                memcpy(addr, vf_mac, ETH_ALEN);
 698        } else {
 699                msgbuf[0] |= IXGBE_VT_MSGTYPE_NACK;
 700                dev_warn(&adapter->pdev->dev,
 701                         "VF %d has no MAC address assigned, you may have to assign one manually\n",
 702                         vf);
 703        }
 704
 705        /*
 706         * Piggyback the multicast filter type so VF can compute the
 707         * correct vectors
 708         */
 709        msgbuf[3] = hw->mac.mc_filter_type;
 710        ixgbe_write_mbx(hw, msgbuf, IXGBE_VF_PERMADDR_MSG_LEN, vf);
 711
 712        return 0;
 713}
 714
 715static int ixgbe_set_vf_mac_addr(struct ixgbe_adapter *adapter,
 716                                 u32 *msgbuf, u32 vf)
 717{
 718        u8 *new_mac = ((u8 *)(&msgbuf[1]));
 719
 720        if (!is_valid_ether_addr(new_mac)) {
 721                e_warn(drv, "VF %d attempted to set invalid mac\n", vf);
 722                return -1;
 723        }
 724
 725        if (adapter->vfinfo[vf].pf_set_mac &&
 726            memcmp(adapter->vfinfo[vf].vf_mac_addresses, new_mac,
 727                   ETH_ALEN)) {
 728                e_warn(drv,
 729                       "VF %d attempted to override administratively set MAC address\n"
 730                       "Reload the VF driver to resume operations\n",
 731                       vf);
 732                return -1;
 733        }
 734
 735        return ixgbe_set_vf_mac(adapter, vf, new_mac) < 0;
 736}
 737
 738static int ixgbe_find_vlvf_entry(struct ixgbe_hw *hw, u32 vlan)
 739{
 740        u32 vlvf;
 741        s32 regindex;
 742
 743        /* short cut the special case */
 744        if (vlan == 0)
 745                return 0;
 746
 747        /* Search for the vlan id in the VLVF entries */
 748        for (regindex = 1; regindex < IXGBE_VLVF_ENTRIES; regindex++) {
 749                vlvf = IXGBE_READ_REG(hw, IXGBE_VLVF(regindex));
 750                if ((vlvf & VLAN_VID_MASK) == vlan)
 751                        break;
 752        }
 753
 754        /* Return a negative value if not found */
 755        if (regindex >= IXGBE_VLVF_ENTRIES)
 756                regindex = -1;
 757
 758        return regindex;
 759}
 760
 761static int ixgbe_set_vf_vlan_msg(struct ixgbe_adapter *adapter,
 762                                 u32 *msgbuf, u32 vf)
 763{
 764        struct ixgbe_hw *hw = &adapter->hw;
 765        int add = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK) >> IXGBE_VT_MSGINFO_SHIFT;
 766        int vid = (msgbuf[1] & IXGBE_VLVF_VLANID_MASK);
 767        int err;
 768        s32 reg_ndx;
 769        u32 vlvf;
 770        u32 bits;
 771        u8 tcs = netdev_get_num_tc(adapter->netdev);
 772
 773        if (adapter->vfinfo[vf].pf_vlan || tcs) {
 774                e_warn(drv,
 775                       "VF %d attempted to override administratively set VLAN configuration\n"
 776                       "Reload the VF driver to resume operations\n",
 777                       vf);
 778                return -1;
 779        }
 780
 781        if (add)
 782                adapter->vfinfo[vf].vlan_count++;
 783        else if (adapter->vfinfo[vf].vlan_count)
 784                adapter->vfinfo[vf].vlan_count--;
 785
 786        /* in case of promiscuous mode any VLAN filter set for a VF must
 787         * also have the PF pool added to it.
 788         */
 789        if (add && adapter->netdev->flags & IFF_PROMISC)
 790                err = ixgbe_set_vf_vlan(adapter, add, vid, VMDQ_P(0));
 791
 792        err = ixgbe_set_vf_vlan(adapter, add, vid, vf);
 793        if (!err && adapter->vfinfo[vf].spoofchk_enabled)
 794                hw->mac.ops.set_vlan_anti_spoofing(hw, true, vf);
 795
 796        /* Go through all the checks to see if the VLAN filter should
 797         * be wiped completely.
 798         */
 799        if (!add && adapter->netdev->flags & IFF_PROMISC) {
 800                reg_ndx = ixgbe_find_vlvf_entry(hw, vid);
 801                if (reg_ndx < 0)
 802                        goto out;
 803                vlvf = IXGBE_READ_REG(hw, IXGBE_VLVF(reg_ndx));
 804                /* See if any other pools are set for this VLAN filter
 805                 * entry other than the PF.
 806                 */
 807                if (VMDQ_P(0) < 32) {
 808                        bits = IXGBE_READ_REG(hw, IXGBE_VLVFB(reg_ndx * 2));
 809                        bits &= ~(1 << VMDQ_P(0));
 810                        bits |= IXGBE_READ_REG(hw,
 811                                               IXGBE_VLVFB(reg_ndx * 2) + 1);
 812                } else {
 813                        bits = IXGBE_READ_REG(hw,
 814                                              IXGBE_VLVFB(reg_ndx * 2) + 1);
 815                        bits &= ~(1 << (VMDQ_P(0) - 32));
 816                        bits |= IXGBE_READ_REG(hw, IXGBE_VLVFB(reg_ndx * 2));
 817                }
 818
 819                /* If the filter was removed then ensure PF pool bit
 820                 * is cleared if the PF only added itself to the pool
 821                 * because the PF is in promiscuous mode.
 822                 */
 823                if ((vlvf & VLAN_VID_MASK) == vid &&
 824                    !test_bit(vid, adapter->active_vlans) && !bits)
 825                        ixgbe_set_vf_vlan(adapter, add, vid, VMDQ_P(0));
 826        }
 827
 828out:
 829
 830        return err;
 831}
 832
 833static int ixgbe_set_vf_macvlan_msg(struct ixgbe_adapter *adapter,
 834                                    u32 *msgbuf, u32 vf)
 835{
 836        u8 *new_mac = ((u8 *)(&msgbuf[1]));
 837        int index = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK) >>
 838                    IXGBE_VT_MSGINFO_SHIFT;
 839        int err;
 840
 841        if (adapter->vfinfo[vf].pf_set_mac && index > 0) {
 842                e_warn(drv,
 843                       "VF %d requested MACVLAN filter but is administratively denied\n",
 844                       vf);
 845                return -1;
 846        }
 847
 848        /* An non-zero index indicates the VF is setting a filter */
 849        if (index) {
 850                if (!is_valid_ether_addr(new_mac)) {
 851                        e_warn(drv, "VF %d attempted to set invalid mac\n", vf);
 852                        return -1;
 853                }
 854
 855                /*
 856                 * If the VF is allowed to set MAC filters then turn off
 857                 * anti-spoofing to avoid false positives.
 858                 */
 859                if (adapter->vfinfo[vf].spoofchk_enabled)
 860                        ixgbe_ndo_set_vf_spoofchk(adapter->netdev, vf, false);
 861        }
 862
 863        err = ixgbe_set_vf_macvlan(adapter, vf, index, new_mac);
 864        if (err == -ENOSPC)
 865                e_warn(drv,
 866                       "VF %d has requested a MACVLAN filter but there is no space for it\n",
 867                       vf);
 868
 869        return err < 0;
 870}
 871
 872static int ixgbe_negotiate_vf_api(struct ixgbe_adapter *adapter,
 873                                  u32 *msgbuf, u32 vf)
 874{
 875        int api = msgbuf[1];
 876
 877        switch (api) {
 878        case ixgbe_mbox_api_10:
 879        case ixgbe_mbox_api_11:
 880                adapter->vfinfo[vf].vf_api = api;
 881                return 0;
 882        default:
 883                break;
 884        }
 885
 886        e_info(drv, "VF %d requested invalid api version %u\n", vf, api);
 887
 888        return -1;
 889}
 890
 891static int ixgbe_get_vf_queues(struct ixgbe_adapter *adapter,
 892                               u32 *msgbuf, u32 vf)
 893{
 894        struct net_device *dev = adapter->netdev;
 895        struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ];
 896        unsigned int default_tc = 0;
 897        u8 num_tcs = netdev_get_num_tc(dev);
 898
 899        /* verify the PF is supporting the correct APIs */
 900        switch (adapter->vfinfo[vf].vf_api) {
 901        case ixgbe_mbox_api_20:
 902        case ixgbe_mbox_api_11:
 903                break;
 904        default:
 905                return -1;
 906        }
 907
 908        /* only allow 1 Tx queue for bandwidth limiting */
 909        msgbuf[IXGBE_VF_TX_QUEUES] = __ALIGN_MASK(1, ~vmdq->mask);
 910        msgbuf[IXGBE_VF_RX_QUEUES] = __ALIGN_MASK(1, ~vmdq->mask);
 911
 912        /* if TCs > 1 determine which TC belongs to default user priority */
 913        if (num_tcs > 1)
 914                default_tc = netdev_get_prio_tc_map(dev, adapter->default_up);
 915
 916        /* notify VF of need for VLAN tag stripping, and correct queue */
 917        if (num_tcs)
 918                msgbuf[IXGBE_VF_TRANS_VLAN] = num_tcs;
 919        else if (adapter->vfinfo[vf].pf_vlan || adapter->vfinfo[vf].pf_qos)
 920                msgbuf[IXGBE_VF_TRANS_VLAN] = 1;
 921        else
 922                msgbuf[IXGBE_VF_TRANS_VLAN] = 0;
 923
 924        /* notify VF of default queue */
 925        msgbuf[IXGBE_VF_DEF_QUEUE] = default_tc;
 926
 927        return 0;
 928}
 929
 930static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf)
 931{
 932        u32 mbx_size = IXGBE_VFMAILBOX_SIZE;
 933        u32 msgbuf[IXGBE_VFMAILBOX_SIZE];
 934        struct ixgbe_hw *hw = &adapter->hw;
 935        s32 retval;
 936
 937        retval = ixgbe_read_mbx(hw, msgbuf, mbx_size, vf);
 938
 939        if (retval) {
 940                pr_err("Error receiving message from VF\n");
 941                return retval;
 942        }
 943
 944        /* this is a message we already processed, do nothing */
 945        if (msgbuf[0] & (IXGBE_VT_MSGTYPE_ACK | IXGBE_VT_MSGTYPE_NACK))
 946                return retval;
 947
 948        /* flush the ack before we write any messages back */
 949        IXGBE_WRITE_FLUSH(hw);
 950
 951        if (msgbuf[0] == IXGBE_VF_RESET)
 952                return ixgbe_vf_reset_msg(adapter, vf);
 953
 954        /*
 955         * until the vf completes a virtual function reset it should not be
 956         * allowed to start any configuration.
 957         */
 958        if (!adapter->vfinfo[vf].clear_to_send) {
 959                msgbuf[0] |= IXGBE_VT_MSGTYPE_NACK;
 960                ixgbe_write_mbx(hw, msgbuf, 1, vf);
 961                return retval;
 962        }
 963
 964        switch ((msgbuf[0] & 0xFFFF)) {
 965        case IXGBE_VF_SET_MAC_ADDR:
 966                retval = ixgbe_set_vf_mac_addr(adapter, msgbuf, vf);
 967                break;
 968        case IXGBE_VF_SET_MULTICAST:
 969                retval = ixgbe_set_vf_multicasts(adapter, msgbuf, vf);
 970                break;
 971        case IXGBE_VF_SET_VLAN:
 972                retval = ixgbe_set_vf_vlan_msg(adapter, msgbuf, vf);
 973                break;
 974        case IXGBE_VF_SET_LPE:
 975                retval = ixgbe_set_vf_lpe(adapter, msgbuf, vf);
 976                break;
 977        case IXGBE_VF_SET_MACVLAN:
 978                retval = ixgbe_set_vf_macvlan_msg(adapter, msgbuf, vf);
 979                break;
 980        case IXGBE_VF_API_NEGOTIATE:
 981                retval = ixgbe_negotiate_vf_api(adapter, msgbuf, vf);
 982                break;
 983        case IXGBE_VF_GET_QUEUES:
 984                retval = ixgbe_get_vf_queues(adapter, msgbuf, vf);
 985                break;
 986        default:
 987                e_err(drv, "Unhandled Msg %8.8x\n", msgbuf[0]);
 988                retval = IXGBE_ERR_MBX;
 989                break;
 990        }
 991
 992        /* notify the VF of the results of what it sent us */
 993        if (retval)
 994                msgbuf[0] |= IXGBE_VT_MSGTYPE_NACK;
 995        else
 996                msgbuf[0] |= IXGBE_VT_MSGTYPE_ACK;
 997
 998        msgbuf[0] |= IXGBE_VT_MSGTYPE_CTS;
 999
1000        ixgbe_write_mbx(hw, msgbuf, mbx_size, vf);
1001
1002        return retval;
1003}
1004
1005static void ixgbe_rcv_ack_from_vf(struct ixgbe_adapter *adapter, u32 vf)
1006{
1007        struct ixgbe_hw *hw = &adapter->hw;
1008        u32 msg = IXGBE_VT_MSGTYPE_NACK;
1009
1010        /* if device isn't clear to send it shouldn't be reading either */
1011        if (!adapter->vfinfo[vf].clear_to_send)
1012                ixgbe_write_mbx(hw, &msg, 1, vf);
1013}
1014
1015void ixgbe_msg_task(struct ixgbe_adapter *adapter)
1016{
1017        struct ixgbe_hw *hw = &adapter->hw;
1018        u32 vf;
1019
1020        for (vf = 0; vf < adapter->num_vfs; vf++) {
1021                /* process any reset requests */
1022                if (!ixgbe_check_for_rst(hw, vf))
1023                        ixgbe_vf_reset_event(adapter, vf);
1024
1025                /* process any messages pending */
1026                if (!ixgbe_check_for_msg(hw, vf))
1027                        ixgbe_rcv_msg_from_vf(adapter, vf);
1028
1029                /* process any acks */
1030                if (!ixgbe_check_for_ack(hw, vf))
1031                        ixgbe_rcv_ack_from_vf(adapter, vf);
1032        }
1033}
1034
1035void ixgbe_disable_tx_rx(struct ixgbe_adapter *adapter)
1036{
1037        struct ixgbe_hw *hw = &adapter->hw;
1038
1039        /* disable transmit and receive for all vfs */
1040        IXGBE_WRITE_REG(hw, IXGBE_VFTE(0), 0);
1041        IXGBE_WRITE_REG(hw, IXGBE_VFTE(1), 0);
1042
1043        IXGBE_WRITE_REG(hw, IXGBE_VFRE(0), 0);
1044        IXGBE_WRITE_REG(hw, IXGBE_VFRE(1), 0);
1045}
1046
1047void ixgbe_ping_all_vfs(struct ixgbe_adapter *adapter)
1048{
1049        struct ixgbe_hw *hw = &adapter->hw;
1050        u32 ping;
1051        int i;
1052
1053        for (i = 0 ; i < adapter->num_vfs; i++) {
1054                ping = IXGBE_PF_CONTROL_MSG;
1055                if (adapter->vfinfo[i].clear_to_send)
1056                        ping |= IXGBE_VT_MSGTYPE_CTS;
1057                ixgbe_write_mbx(hw, &ping, 1, i);
1058        }
1059}
1060
1061int ixgbe_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1062{
1063        struct ixgbe_adapter *adapter = netdev_priv(netdev);
1064        if (!is_valid_ether_addr(mac) || (vf >= adapter->num_vfs))
1065                return -EINVAL;
1066        adapter->vfinfo[vf].pf_set_mac = true;
1067        dev_info(&adapter->pdev->dev, "setting MAC %pM on VF %d\n", mac, vf);
1068        dev_info(&adapter->pdev->dev, "Reload the VF driver to make this"
1069                                      " change effective.");
1070        if (test_bit(__IXGBE_DOWN, &adapter->state)) {
1071                dev_warn(&adapter->pdev->dev, "The VF MAC address has been set,"
1072                         " but the PF device is not up.\n");
1073                dev_warn(&adapter->pdev->dev, "Bring the PF device up before"
1074                         " attempting to use the VF device.\n");
1075        }
1076        return ixgbe_set_vf_mac(adapter, vf, mac);
1077}
1078
1079int ixgbe_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos)
1080{
1081        int err = 0;
1082        struct ixgbe_adapter *adapter = netdev_priv(netdev);
1083        struct ixgbe_hw *hw = &adapter->hw;
1084
1085        if ((vf >= adapter->num_vfs) || (vlan > 4095) || (qos > 7))
1086                return -EINVAL;
1087        if (vlan || qos) {
1088                if (adapter->vfinfo[vf].pf_vlan)
1089                        err = ixgbe_set_vf_vlan(adapter, false,
1090                                                adapter->vfinfo[vf].pf_vlan,
1091                                                vf);
1092                if (err)
1093                        goto out;
1094                err = ixgbe_set_vf_vlan(adapter, true, vlan, vf);
1095                if (err)
1096                        goto out;
1097                ixgbe_set_vmvir(adapter, vlan, qos, vf);
1098                ixgbe_set_vmolr(hw, vf, false);
1099                if (adapter->vfinfo[vf].spoofchk_enabled)
1100                        hw->mac.ops.set_vlan_anti_spoofing(hw, true, vf);
1101                adapter->vfinfo[vf].vlan_count++;
1102                adapter->vfinfo[vf].pf_vlan = vlan;
1103                adapter->vfinfo[vf].pf_qos = qos;
1104                dev_info(&adapter->pdev->dev,
1105                         "Setting VLAN %d, QOS 0x%x on VF %d\n", vlan, qos, vf);
1106                if (test_bit(__IXGBE_DOWN, &adapter->state)) {
1107                        dev_warn(&adapter->pdev->dev,
1108                                 "The VF VLAN has been set,"
1109                                 " but the PF device is not up.\n");
1110                        dev_warn(&adapter->pdev->dev,
1111                                 "Bring the PF device up before"
1112                                 " attempting to use the VF device.\n");
1113                }
1114        } else {
1115                err = ixgbe_set_vf_vlan(adapter, false,
1116                                        adapter->vfinfo[vf].pf_vlan, vf);
1117                ixgbe_clear_vmvir(adapter, vf);
1118                ixgbe_set_vmolr(hw, vf, true);
1119                hw->mac.ops.set_vlan_anti_spoofing(hw, false, vf);
1120                if (adapter->vfinfo[vf].vlan_count)
1121                        adapter->vfinfo[vf].vlan_count--;
1122                adapter->vfinfo[vf].pf_vlan = 0;
1123                adapter->vfinfo[vf].pf_qos = 0;
1124       }
1125out:
1126       return err;
1127}
1128
1129static int ixgbe_link_mbps(struct ixgbe_adapter *adapter)
1130{
1131        switch (adapter->link_speed) {
1132        case IXGBE_LINK_SPEED_100_FULL:
1133                return 100;
1134        case IXGBE_LINK_SPEED_1GB_FULL:
1135                return 1000;
1136        case IXGBE_LINK_SPEED_10GB_FULL:
1137                return 10000;
1138        default:
1139                return 0;
1140        }
1141}
1142
1143static void ixgbe_set_vf_rate_limit(struct ixgbe_adapter *adapter, int vf)
1144{
1145        struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ];
1146        struct ixgbe_hw *hw = &adapter->hw;
1147        u32 bcnrc_val = 0;
1148        u16 queue, queues_per_pool;
1149        u16 tx_rate = adapter->vfinfo[vf].tx_rate;
1150
1151        if (tx_rate) {
1152                /* start with base link speed value */
1153                bcnrc_val = adapter->vf_rate_link_speed;
1154
1155                /* Calculate the rate factor values to set */
1156                bcnrc_val <<= IXGBE_RTTBCNRC_RF_INT_SHIFT;
1157                bcnrc_val /= tx_rate;
1158
1159                /* clear everything but the rate factor */
1160                bcnrc_val &= IXGBE_RTTBCNRC_RF_INT_MASK |
1161                             IXGBE_RTTBCNRC_RF_DEC_MASK;
1162
1163                /* enable the rate scheduler */
1164                bcnrc_val |= IXGBE_RTTBCNRC_RS_ENA;
1165        }
1166
1167        /*
1168         * Set global transmit compensation time to the MMW_SIZE in RTTBCNRM
1169         * register. Typically MMW_SIZE=0x014 if 9728-byte jumbo is supported
1170         * and 0x004 otherwise.
1171         */
1172        switch (hw->mac.type) {
1173        case ixgbe_mac_82599EB:
1174                IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRM, 0x4);
1175                break;
1176        case ixgbe_mac_X540:
1177                IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRM, 0x14);
1178                break;
1179        default:
1180                break;
1181        }
1182
1183        /* determine how many queues per pool based on VMDq mask */
1184        queues_per_pool = __ALIGN_MASK(1, ~vmdq->mask);
1185
1186        /* write value for all Tx queues belonging to VF */
1187        for (queue = 0; queue < queues_per_pool; queue++) {
1188                unsigned int reg_idx = (vf * queues_per_pool) + queue;
1189
1190                IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, reg_idx);
1191                IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRC, bcnrc_val);
1192        }
1193}
1194
1195void ixgbe_check_vf_rate_limit(struct ixgbe_adapter *adapter)
1196{
1197        int i;
1198
1199        /* VF Tx rate limit was not set */
1200        if (!adapter->vf_rate_link_speed)
1201                return;
1202
1203        if (ixgbe_link_mbps(adapter) != adapter->vf_rate_link_speed) {
1204                adapter->vf_rate_link_speed = 0;
1205                dev_info(&adapter->pdev->dev,
1206                         "Link speed has been changed. VF Transmit rate is disabled\n");
1207        }
1208
1209        for (i = 0; i < adapter->num_vfs; i++) {
1210                if (!adapter->vf_rate_link_speed)
1211                        adapter->vfinfo[i].tx_rate = 0;
1212
1213                ixgbe_set_vf_rate_limit(adapter, i);
1214        }
1215}
1216
1217int ixgbe_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate)
1218{
1219        struct ixgbe_adapter *adapter = netdev_priv(netdev);
1220        int link_speed;
1221
1222        /* verify VF is active */
1223        if (vf >= adapter->num_vfs)
1224                return -EINVAL;
1225
1226        /* verify link is up */
1227        if (!adapter->link_up)
1228                return -EINVAL;
1229
1230        /* verify we are linked at 10Gbps */
1231        link_speed = ixgbe_link_mbps(adapter);
1232        if (link_speed != 10000)
1233                return -EINVAL;
1234
1235        /* rate limit cannot be less than 10Mbs or greater than link speed */
1236        if (tx_rate && ((tx_rate <= 10) || (tx_rate > link_speed)))
1237                return -EINVAL;
1238
1239        /* store values */
1240        adapter->vf_rate_link_speed = link_speed;
1241        adapter->vfinfo[vf].tx_rate = tx_rate;
1242
1243        /* update hardware configuration */
1244        ixgbe_set_vf_rate_limit(adapter, vf);
1245
1246        return 0;
1247}
1248
1249int ixgbe_ndo_set_vf_spoofchk(struct net_device *netdev, int vf, bool setting)
1250{
1251        struct ixgbe_adapter *adapter = netdev_priv(netdev);
1252        int vf_target_reg = vf >> 3;
1253        int vf_target_shift = vf % 8;
1254        struct ixgbe_hw *hw = &adapter->hw;
1255        u32 regval;
1256
1257        adapter->vfinfo[vf].spoofchk_enabled = setting;
1258
1259        regval = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg));
1260        regval &= ~(1 << vf_target_shift);
1261        regval |= (setting << vf_target_shift);
1262        IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg), regval);
1263
1264        if (adapter->vfinfo[vf].vlan_count) {
1265                vf_target_shift += IXGBE_SPOOF_VLANAS_SHIFT;
1266                regval = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg));
1267                regval &= ~(1 << vf_target_shift);
1268                regval |= (setting << vf_target_shift);
1269                IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg), regval);
1270        }
1271
1272        return 0;
1273}
1274
1275int ixgbe_ndo_get_vf_config(struct net_device *netdev,
1276                            int vf, struct ifla_vf_info *ivi)
1277{
1278        struct ixgbe_adapter *adapter = netdev_priv(netdev);
1279        if (vf >= adapter->num_vfs)
1280                return -EINVAL;
1281        ivi->vf = vf;
1282        memcpy(&ivi->mac, adapter->vfinfo[vf].vf_mac_addresses, ETH_ALEN);
1283        ivi->tx_rate = adapter->vfinfo[vf].tx_rate;
1284        ivi->vlan = adapter->vfinfo[vf].pf_vlan;
1285        ivi->qos = adapter->vfinfo[vf].pf_qos;
1286        ivi->spoofchk = adapter->vfinfo[vf].spoofchk_enabled;
1287        return 0;
1288}
1289