linux/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
<<
>>
Prefs
   1/*******************************************************************************
   2
   3  Intel 10 Gigabit PCI Express Linux driver
   4  Copyright(c) 1999 - 2016 Intel Corporation.
   5
   6  This program is free software; you can redistribute it and/or modify it
   7  under the terms and conditions of the GNU General Public License,
   8  version 2, as published by the Free Software Foundation.
   9
  10  This program is distributed in the hope it will be useful, but WITHOUT
  11  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  13  more details.
  14
  15  You should have received a copy of the GNU General Public License along with
  16  this program; if not, write to the Free Software Foundation, Inc.,
  17  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
  18
  19  The full GNU General Public License is included in this distribution in
  20  the file called "COPYING".
  21
  22  Contact Information:
  23  Linux NICS <linux.nics@intel.com>
  24  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
  25  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
  26
  27*******************************************************************************/
  28
  29#include "ixgbe.h"
  30#include "ixgbe_sriov.h"
  31
  32#ifdef CONFIG_IXGBE_DCB
  33/**
  34 * ixgbe_cache_ring_dcb_sriov - Descriptor ring to register mapping for SR-IOV
  35 * @adapter: board private structure to initialize
  36 *
  37 * Cache the descriptor ring offsets for SR-IOV to the assigned rings.  It
  38 * will also try to cache the proper offsets if RSS/FCoE are enabled along
  39 * with VMDq.
  40 *
  41 **/
  42static bool ixgbe_cache_ring_dcb_sriov(struct ixgbe_adapter *adapter)
  43{
  44#ifdef IXGBE_FCOE
  45        struct ixgbe_ring_feature *fcoe = &adapter->ring_feature[RING_F_FCOE];
  46#endif /* IXGBE_FCOE */
  47        struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ];
  48        int i;
  49        u16 reg_idx;
  50        u8 tcs = netdev_get_num_tc(adapter->netdev);
  51
  52        /* verify we have DCB queueing enabled before proceeding */
  53        if (tcs <= 1)
  54                return false;
  55
  56        /* verify we have VMDq enabled before proceeding */
  57        if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
  58                return false;
  59
  60        /* start at VMDq register offset for SR-IOV enabled setups */
  61        reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask);
  62        for (i = 0; i < adapter->num_rx_queues; i++, reg_idx++) {
  63                /* If we are greater than indices move to next pool */
  64                if ((reg_idx & ~vmdq->mask) >= tcs)
  65                        reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask);
  66                adapter->rx_ring[i]->reg_idx = reg_idx;
  67        }
  68
  69        reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask);
  70        for (i = 0; i < adapter->num_tx_queues; i++, reg_idx++) {
  71                /* If we are greater than indices move to next pool */
  72                if ((reg_idx & ~vmdq->mask) >= tcs)
  73                        reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask);
  74                adapter->tx_ring[i]->reg_idx = reg_idx;
  75        }
  76
  77#ifdef IXGBE_FCOE
  78        /* nothing to do if FCoE is disabled */
  79        if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED))
  80                return true;
  81
  82        /* The work is already done if the FCoE ring is shared */
  83        if (fcoe->offset < tcs)
  84                return true;
  85
  86        /* The FCoE rings exist separately, we need to move their reg_idx */
  87        if (fcoe->indices) {
  88                u16 queues_per_pool = __ALIGN_MASK(1, ~vmdq->mask);
  89                u8 fcoe_tc = ixgbe_fcoe_get_tc(adapter);
  90
  91                reg_idx = (vmdq->offset + vmdq->indices) * queues_per_pool;
  92                for (i = fcoe->offset; i < adapter->num_rx_queues; i++) {
  93                        reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask) + fcoe_tc;
  94                        adapter->rx_ring[i]->reg_idx = reg_idx;
  95                        reg_idx++;
  96                }
  97
  98                reg_idx = (vmdq->offset + vmdq->indices) * queues_per_pool;
  99                for (i = fcoe->offset; i < adapter->num_tx_queues; i++) {
 100                        reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask) + fcoe_tc;
 101                        adapter->tx_ring[i]->reg_idx = reg_idx;
 102                        reg_idx++;
 103                }
 104        }
 105
 106#endif /* IXGBE_FCOE */
 107        return true;
 108}
 109
 110/* ixgbe_get_first_reg_idx - Return first register index associated with ring */
 111static void ixgbe_get_first_reg_idx(struct ixgbe_adapter *adapter, u8 tc,
 112                                    unsigned int *tx, unsigned int *rx)
 113{
 114        struct net_device *dev = adapter->netdev;
 115        struct ixgbe_hw *hw = &adapter->hw;
 116        u8 num_tcs = netdev_get_num_tc(dev);
 117
 118        *tx = 0;
 119        *rx = 0;
 120
 121        switch (hw->mac.type) {
 122        case ixgbe_mac_82598EB:
 123                /* TxQs/TC: 4   RxQs/TC: 8 */
 124                *tx = tc << 2; /* 0, 4,  8, 12, 16, 20, 24, 28 */
 125                *rx = tc << 3; /* 0, 8, 16, 24, 32, 40, 48, 56 */
 126                break;
 127        case ixgbe_mac_82599EB:
 128        case ixgbe_mac_X540:
 129        case ixgbe_mac_X550:
 130        case ixgbe_mac_X550EM_x:
 131        case ixgbe_mac_x550em_a:
 132                if (num_tcs > 4) {
 133                        /*
 134                         * TCs    : TC0/1 TC2/3 TC4-7
 135                         * TxQs/TC:    32    16     8
 136                         * RxQs/TC:    16    16    16
 137                         */
 138                        *rx = tc << 4;
 139                        if (tc < 3)
 140                                *tx = tc << 5;          /*   0,  32,  64 */
 141                        else if (tc < 5)
 142                                *tx = (tc + 2) << 4;    /*  80,  96 */
 143                        else
 144                                *tx = (tc + 8) << 3;    /* 104, 112, 120 */
 145                } else {
 146                        /*
 147                         * TCs    : TC0 TC1 TC2/3
 148                         * TxQs/TC:  64  32    16
 149                         * RxQs/TC:  32  32    32
 150                         */
 151                        *rx = tc << 5;
 152                        if (tc < 2)
 153                                *tx = tc << 6;          /*  0,  64 */
 154                        else
 155                                *tx = (tc + 4) << 4;    /* 96, 112 */
 156                }
 157        default:
 158                break;
 159        }
 160}
 161
 162/**
 163 * ixgbe_cache_ring_dcb - Descriptor ring to register mapping for DCB
 164 * @adapter: board private structure to initialize
 165 *
 166 * Cache the descriptor ring offsets for DCB to the assigned rings.
 167 *
 168 **/
 169static bool ixgbe_cache_ring_dcb(struct ixgbe_adapter *adapter)
 170{
 171        struct net_device *dev = adapter->netdev;
 172        unsigned int tx_idx, rx_idx;
 173        int tc, offset, rss_i, i;
 174        u8 num_tcs = netdev_get_num_tc(dev);
 175
 176        /* verify we have DCB queueing enabled before proceeding */
 177        if (num_tcs <= 1)
 178                return false;
 179
 180        rss_i = adapter->ring_feature[RING_F_RSS].indices;
 181
 182        for (tc = 0, offset = 0; tc < num_tcs; tc++, offset += rss_i) {
 183                ixgbe_get_first_reg_idx(adapter, tc, &tx_idx, &rx_idx);
 184                for (i = 0; i < rss_i; i++, tx_idx++, rx_idx++) {
 185                        adapter->tx_ring[offset + i]->reg_idx = tx_idx;
 186                        adapter->rx_ring[offset + i]->reg_idx = rx_idx;
 187                        adapter->tx_ring[offset + i]->dcb_tc = tc;
 188                        adapter->rx_ring[offset + i]->dcb_tc = tc;
 189                }
 190        }
 191
 192        return true;
 193}
 194
 195#endif
 196/**
 197 * ixgbe_cache_ring_sriov - Descriptor ring to register mapping for sriov
 198 * @adapter: board private structure to initialize
 199 *
 200 * SR-IOV doesn't use any descriptor rings but changes the default if
 201 * no other mapping is used.
 202 *
 203 */
 204static bool ixgbe_cache_ring_sriov(struct ixgbe_adapter *adapter)
 205{
 206#ifdef IXGBE_FCOE
 207        struct ixgbe_ring_feature *fcoe = &adapter->ring_feature[RING_F_FCOE];
 208#endif /* IXGBE_FCOE */
 209        struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ];
 210        struct ixgbe_ring_feature *rss = &adapter->ring_feature[RING_F_RSS];
 211        int i;
 212        u16 reg_idx;
 213
 214        /* only proceed if VMDq is enabled */
 215        if (!(adapter->flags & IXGBE_FLAG_VMDQ_ENABLED))
 216                return false;
 217
 218        /* start at VMDq register offset for SR-IOV enabled setups */
 219        reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask);
 220        for (i = 0; i < adapter->num_rx_queues; i++, reg_idx++) {
 221#ifdef IXGBE_FCOE
 222                /* Allow first FCoE queue to be mapped as RSS */
 223                if (fcoe->offset && (i > fcoe->offset))
 224                        break;
 225#endif
 226                /* If we are greater than indices move to next pool */
 227                if ((reg_idx & ~vmdq->mask) >= rss->indices)
 228                        reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask);
 229                adapter->rx_ring[i]->reg_idx = reg_idx;
 230        }
 231
 232#ifdef IXGBE_FCOE
 233        /* FCoE uses a linear block of queues so just assigning 1:1 */
 234        for (; i < adapter->num_rx_queues; i++, reg_idx++)
 235                adapter->rx_ring[i]->reg_idx = reg_idx;
 236
 237#endif
 238        reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask);
 239        for (i = 0; i < adapter->num_tx_queues; i++, reg_idx++) {
 240#ifdef IXGBE_FCOE
 241                /* Allow first FCoE queue to be mapped as RSS */
 242                if (fcoe->offset && (i > fcoe->offset))
 243                        break;
 244#endif
 245                /* If we are greater than indices move to next pool */
 246                if ((reg_idx & rss->mask) >= rss->indices)
 247                        reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask);
 248                adapter->tx_ring[i]->reg_idx = reg_idx;
 249        }
 250
 251#ifdef IXGBE_FCOE
 252        /* FCoE uses a linear block of queues so just assigning 1:1 */
 253        for (; i < adapter->num_tx_queues; i++, reg_idx++)
 254                adapter->tx_ring[i]->reg_idx = reg_idx;
 255
 256#endif
 257
 258        return true;
 259}
 260
 261/**
 262 * ixgbe_cache_ring_rss - Descriptor ring to register mapping for RSS
 263 * @adapter: board private structure to initialize
 264 *
 265 * Cache the descriptor ring offsets for RSS to the assigned rings.
 266 *
 267 **/
 268static bool ixgbe_cache_ring_rss(struct ixgbe_adapter *adapter)
 269{
 270        int i;
 271
 272        for (i = 0; i < adapter->num_rx_queues; i++)
 273                adapter->rx_ring[i]->reg_idx = i;
 274        for (i = 0; i < adapter->num_tx_queues; i++)
 275                adapter->tx_ring[i]->reg_idx = i;
 276
 277        return true;
 278}
 279
 280/**
 281 * ixgbe_cache_ring_register - Descriptor ring to register mapping
 282 * @adapter: board private structure to initialize
 283 *
 284 * Once we know the feature-set enabled for the device, we'll cache
 285 * the register offset the descriptor ring is assigned to.
 286 *
 287 * Note, the order the various feature calls is important.  It must start with
 288 * the "most" features enabled at the same time, then trickle down to the
 289 * least amount of features turned on at once.
 290 **/
 291static void ixgbe_cache_ring_register(struct ixgbe_adapter *adapter)
 292{
 293        /* start with default case */
 294        adapter->rx_ring[0]->reg_idx = 0;
 295        adapter->tx_ring[0]->reg_idx = 0;
 296
 297#ifdef CONFIG_IXGBE_DCB
 298        if (ixgbe_cache_ring_dcb_sriov(adapter))
 299                return;
 300
 301        if (ixgbe_cache_ring_dcb(adapter))
 302                return;
 303
 304#endif
 305        if (ixgbe_cache_ring_sriov(adapter))
 306                return;
 307
 308        ixgbe_cache_ring_rss(adapter);
 309}
 310
 311#define IXGBE_RSS_16Q_MASK      0xF
 312#define IXGBE_RSS_8Q_MASK       0x7
 313#define IXGBE_RSS_4Q_MASK       0x3
 314#define IXGBE_RSS_2Q_MASK       0x1
 315#define IXGBE_RSS_DISABLED_MASK 0x0
 316
 317#ifdef CONFIG_IXGBE_DCB
 318/**
 319 * ixgbe_set_dcb_sriov_queues: Allocate queues for SR-IOV devices w/ DCB
 320 * @adapter: board private structure to initialize
 321 *
 322 * When SR-IOV (Single Root IO Virtualiztion) is enabled, allocate queues
 323 * and VM pools where appropriate.  Also assign queues based on DCB
 324 * priorities and map accordingly..
 325 *
 326 **/
 327static bool ixgbe_set_dcb_sriov_queues(struct ixgbe_adapter *adapter)
 328{
 329        int i;
 330        u16 vmdq_i = adapter->ring_feature[RING_F_VMDQ].limit;
 331        u16 vmdq_m = 0;
 332#ifdef IXGBE_FCOE
 333        u16 fcoe_i = 0;
 334#endif
 335        u8 tcs = netdev_get_num_tc(adapter->netdev);
 336
 337        /* verify we have DCB queueing enabled before proceeding */
 338        if (tcs <= 1)
 339                return false;
 340
 341        /* verify we have VMDq enabled before proceeding */
 342        if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
 343                return false;
 344
 345        /* Add starting offset to total pool count */
 346        vmdq_i += adapter->ring_feature[RING_F_VMDQ].offset;
 347
 348        /* 16 pools w/ 8 TC per pool */
 349        if (tcs > 4) {
 350                vmdq_i = min_t(u16, vmdq_i, 16);
 351                vmdq_m = IXGBE_82599_VMDQ_8Q_MASK;
 352        /* 32 pools w/ 4 TC per pool */
 353        } else {
 354                vmdq_i = min_t(u16, vmdq_i, 32);
 355                vmdq_m = IXGBE_82599_VMDQ_4Q_MASK;
 356        }
 357
 358#ifdef IXGBE_FCOE
 359        /* queues in the remaining pools are available for FCoE */
 360        fcoe_i = (128 / __ALIGN_MASK(1, ~vmdq_m)) - vmdq_i;
 361
 362#endif
 363        /* remove the starting offset from the pool count */
 364        vmdq_i -= adapter->ring_feature[RING_F_VMDQ].offset;
 365
 366        /* save features for later use */
 367        adapter->ring_feature[RING_F_VMDQ].indices = vmdq_i;
 368        adapter->ring_feature[RING_F_VMDQ].mask = vmdq_m;
 369
 370        /*
 371         * We do not support DCB, VMDq, and RSS all simultaneously
 372         * so we will disable RSS since it is the lowest priority
 373         */
 374        adapter->ring_feature[RING_F_RSS].indices = 1;
 375        adapter->ring_feature[RING_F_RSS].mask = IXGBE_RSS_DISABLED_MASK;
 376
 377        /* disable ATR as it is not supported when VMDq is enabled */
 378        adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
 379
 380        adapter->num_rx_pools = vmdq_i;
 381        adapter->num_rx_queues_per_pool = tcs;
 382
 383        adapter->num_tx_queues = vmdq_i * tcs;
 384        adapter->num_rx_queues = vmdq_i * tcs;
 385
 386#ifdef IXGBE_FCOE
 387        if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
 388                struct ixgbe_ring_feature *fcoe;
 389
 390                fcoe = &adapter->ring_feature[RING_F_FCOE];
 391
 392                /* limit ourselves based on feature limits */
 393                fcoe_i = min_t(u16, fcoe_i, fcoe->limit);
 394
 395                if (fcoe_i) {
 396                        /* alloc queues for FCoE separately */
 397                        fcoe->indices = fcoe_i;
 398                        fcoe->offset = vmdq_i * tcs;
 399
 400                        /* add queues to adapter */
 401                        adapter->num_tx_queues += fcoe_i;
 402                        adapter->num_rx_queues += fcoe_i;
 403                } else if (tcs > 1) {
 404                        /* use queue belonging to FcoE TC */
 405                        fcoe->indices = 1;
 406                        fcoe->offset = ixgbe_fcoe_get_tc(adapter);
 407                } else {
 408                        adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED;
 409
 410                        fcoe->indices = 0;
 411                        fcoe->offset = 0;
 412                }
 413        }
 414
 415#endif /* IXGBE_FCOE */
 416        /* configure TC to queue mapping */
 417        for (i = 0; i < tcs; i++)
 418                netdev_set_tc_queue(adapter->netdev, i, 1, i);
 419
 420        return true;
 421}
 422
 423static bool ixgbe_set_dcb_queues(struct ixgbe_adapter *adapter)
 424{
 425        struct net_device *dev = adapter->netdev;
 426        struct ixgbe_ring_feature *f;
 427        int rss_i, rss_m, i;
 428        int tcs;
 429
 430        /* Map queue offset and counts onto allocated tx queues */
 431        tcs = netdev_get_num_tc(dev);
 432
 433        /* verify we have DCB queueing enabled before proceeding */
 434        if (tcs <= 1)
 435                return false;
 436
 437        /* determine the upper limit for our current DCB mode */
 438        rss_i = dev->num_tx_queues / tcs;
 439        if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
 440                /* 8 TC w/ 4 queues per TC */
 441                rss_i = min_t(u16, rss_i, 4);
 442                rss_m = IXGBE_RSS_4Q_MASK;
 443        } else if (tcs > 4) {
 444                /* 8 TC w/ 8 queues per TC */
 445                rss_i = min_t(u16, rss_i, 8);
 446                rss_m = IXGBE_RSS_8Q_MASK;
 447        } else {
 448                /* 4 TC w/ 16 queues per TC */
 449                rss_i = min_t(u16, rss_i, 16);
 450                rss_m = IXGBE_RSS_16Q_MASK;
 451        }
 452
 453        /* set RSS mask and indices */
 454        f = &adapter->ring_feature[RING_F_RSS];
 455        rss_i = min_t(int, rss_i, f->limit);
 456        f->indices = rss_i;
 457        f->mask = rss_m;
 458
 459        /* disable ATR as it is not supported when multiple TCs are enabled */
 460        adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
 461
 462#ifdef IXGBE_FCOE
 463        /* FCoE enabled queues require special configuration indexed
 464         * by feature specific indices and offset. Here we map FCoE
 465         * indices onto the DCB queue pairs allowing FCoE to own
 466         * configuration later.
 467         */
 468        if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
 469                u8 tc = ixgbe_fcoe_get_tc(adapter);
 470
 471                f = &adapter->ring_feature[RING_F_FCOE];
 472                f->indices = min_t(u16, rss_i, f->limit);
 473                f->offset = rss_i * tc;
 474        }
 475
 476#endif /* IXGBE_FCOE */
 477        for (i = 0; i < tcs; i++)
 478                netdev_set_tc_queue(dev, i, rss_i, rss_i * i);
 479
 480        adapter->num_tx_queues = rss_i * tcs;
 481        adapter->num_rx_queues = rss_i * tcs;
 482
 483        return true;
 484}
 485
 486#endif
 487/**
 488 * ixgbe_set_sriov_queues - Allocate queues for SR-IOV devices
 489 * @adapter: board private structure to initialize
 490 *
 491 * When SR-IOV (Single Root IO Virtualiztion) is enabled, allocate queues
 492 * and VM pools where appropriate.  If RSS is available, then also try and
 493 * enable RSS and map accordingly.
 494 *
 495 **/
 496static bool ixgbe_set_sriov_queues(struct ixgbe_adapter *adapter)
 497{
 498        u16 vmdq_i = adapter->ring_feature[RING_F_VMDQ].limit;
 499        u16 vmdq_m = 0;
 500        u16 rss_i = adapter->ring_feature[RING_F_RSS].limit;
 501        u16 rss_m = IXGBE_RSS_DISABLED_MASK;
 502#ifdef IXGBE_FCOE
 503        u16 fcoe_i = 0;
 504#endif
 505        bool pools = (find_first_zero_bit(&adapter->fwd_bitmask, 32) > 1);
 506
 507        /* only proceed if SR-IOV is enabled */
 508        if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
 509                return false;
 510
 511        /* Add starting offset to total pool count */
 512        vmdq_i += adapter->ring_feature[RING_F_VMDQ].offset;
 513
 514        /* double check we are limited to maximum pools */
 515        vmdq_i = min_t(u16, IXGBE_MAX_VMDQ_INDICES, vmdq_i);
 516
 517        /* 64 pool mode with 2 queues per pool */
 518        if ((vmdq_i > 32) || (rss_i < 4) || (vmdq_i > 16 && pools)) {
 519                vmdq_m = IXGBE_82599_VMDQ_2Q_MASK;
 520                rss_m = IXGBE_RSS_2Q_MASK;
 521                rss_i = min_t(u16, rss_i, 2);
 522        /* 32 pool mode with 4 queues per pool */
 523        } else {
 524                vmdq_m = IXGBE_82599_VMDQ_4Q_MASK;
 525                rss_m = IXGBE_RSS_4Q_MASK;
 526                rss_i = 4;
 527        }
 528
 529#ifdef IXGBE_FCOE
 530        /* queues in the remaining pools are available for FCoE */
 531        fcoe_i = 128 - (vmdq_i * __ALIGN_MASK(1, ~vmdq_m));
 532
 533#endif
 534        /* remove the starting offset from the pool count */
 535        vmdq_i -= adapter->ring_feature[RING_F_VMDQ].offset;
 536
 537        /* save features for later use */
 538        adapter->ring_feature[RING_F_VMDQ].indices = vmdq_i;
 539        adapter->ring_feature[RING_F_VMDQ].mask = vmdq_m;
 540
 541        /* limit RSS based on user input and save for later use */
 542        adapter->ring_feature[RING_F_RSS].indices = rss_i;
 543        adapter->ring_feature[RING_F_RSS].mask = rss_m;
 544
 545        adapter->num_rx_pools = vmdq_i;
 546        adapter->num_rx_queues_per_pool = rss_i;
 547
 548        adapter->num_rx_queues = vmdq_i * rss_i;
 549        adapter->num_tx_queues = vmdq_i * rss_i;
 550
 551        /* disable ATR as it is not supported when VMDq is enabled */
 552        adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
 553
 554#ifdef IXGBE_FCOE
 555        /*
 556         * FCoE can use rings from adjacent buffers to allow RSS
 557         * like behavior.  To account for this we need to add the
 558         * FCoE indices to the total ring count.
 559         */
 560        if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
 561                struct ixgbe_ring_feature *fcoe;
 562
 563                fcoe = &adapter->ring_feature[RING_F_FCOE];
 564
 565                /* limit ourselves based on feature limits */
 566                fcoe_i = min_t(u16, fcoe_i, fcoe->limit);
 567
 568                if (vmdq_i > 1 && fcoe_i) {
 569                        /* alloc queues for FCoE separately */
 570                        fcoe->indices = fcoe_i;
 571                        fcoe->offset = vmdq_i * rss_i;
 572                } else {
 573                        /* merge FCoE queues with RSS queues */
 574                        fcoe_i = min_t(u16, fcoe_i + rss_i, num_online_cpus());
 575
 576                        /* limit indices to rss_i if MSI-X is disabled */
 577                        if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED))
 578                                fcoe_i = rss_i;
 579
 580                        /* attempt to reserve some queues for just FCoE */
 581                        fcoe->indices = min_t(u16, fcoe_i, fcoe->limit);
 582                        fcoe->offset = fcoe_i - fcoe->indices;
 583
 584                        fcoe_i -= rss_i;
 585                }
 586
 587                /* add queues to adapter */
 588                adapter->num_tx_queues += fcoe_i;
 589                adapter->num_rx_queues += fcoe_i;
 590        }
 591
 592#endif
 593        return true;
 594}
 595
 596/**
 597 * ixgbe_set_rss_queues - Allocate queues for RSS
 598 * @adapter: board private structure to initialize
 599 *
 600 * This is our "base" multiqueue mode.  RSS (Receive Side Scaling) will try
 601 * to allocate one Rx queue per CPU, and if available, one Tx queue per CPU.
 602 *
 603 **/
 604static bool ixgbe_set_rss_queues(struct ixgbe_adapter *adapter)
 605{
 606        struct ixgbe_ring_feature *f;
 607        u16 rss_i;
 608
 609        /* set mask for 16 queue limit of RSS */
 610        f = &adapter->ring_feature[RING_F_RSS];
 611        rss_i = f->limit;
 612
 613        f->indices = rss_i;
 614        f->mask = IXGBE_RSS_16Q_MASK;
 615
 616        /* disable ATR by default, it will be configured below */
 617        adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
 618
 619        /*
 620         * Use Flow Director in addition to RSS to ensure the best
 621         * distribution of flows across cores, even when an FDIR flow
 622         * isn't matched.
 623         */
 624        if (rss_i > 1 && adapter->atr_sample_rate) {
 625                f = &adapter->ring_feature[RING_F_FDIR];
 626
 627                rss_i = f->indices = f->limit;
 628
 629                if (!(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE))
 630                        adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE;
 631        }
 632
 633#ifdef IXGBE_FCOE
 634        /*
 635         * FCoE can exist on the same rings as standard network traffic
 636         * however it is preferred to avoid that if possible.  In order
 637         * to get the best performance we allocate as many FCoE queues
 638         * as we can and we place them at the end of the ring array to
 639         * avoid sharing queues with standard RSS on systems with 24 or
 640         * more CPUs.
 641         */
 642        if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
 643                struct net_device *dev = adapter->netdev;
 644                u16 fcoe_i;
 645
 646                f = &adapter->ring_feature[RING_F_FCOE];
 647
 648                /* merge FCoE queues with RSS queues */
 649                fcoe_i = min_t(u16, f->limit + rss_i, num_online_cpus());
 650                fcoe_i = min_t(u16, fcoe_i, dev->num_tx_queues);
 651
 652                /* limit indices to rss_i if MSI-X is disabled */
 653                if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED))
 654                        fcoe_i = rss_i;
 655
 656                /* attempt to reserve some queues for just FCoE */
 657                f->indices = min_t(u16, fcoe_i, f->limit);
 658                f->offset = fcoe_i - f->indices;
 659                rss_i = max_t(u16, fcoe_i, rss_i);
 660        }
 661
 662#endif /* IXGBE_FCOE */
 663        adapter->num_rx_queues = rss_i;
 664        adapter->num_tx_queues = rss_i;
 665
 666        return true;
 667}
 668
 669/**
 670 * ixgbe_set_num_queues - Allocate queues for device, feature dependent
 671 * @adapter: board private structure to initialize
 672 *
 673 * This is the top level queue allocation routine.  The order here is very
 674 * important, starting with the "most" number of features turned on at once,
 675 * and ending with the smallest set of features.  This way large combinations
 676 * can be allocated if they're turned on, and smaller combinations are the
 677 * fallthrough conditions.
 678 *
 679 **/
 680static void ixgbe_set_num_queues(struct ixgbe_adapter *adapter)
 681{
 682        /* Start with base case */
 683        adapter->num_rx_queues = 1;
 684        adapter->num_tx_queues = 1;
 685        adapter->num_rx_pools = adapter->num_rx_queues;
 686        adapter->num_rx_queues_per_pool = 1;
 687
 688#ifdef CONFIG_IXGBE_DCB
 689        if (ixgbe_set_dcb_sriov_queues(adapter))
 690                return;
 691
 692        if (ixgbe_set_dcb_queues(adapter))
 693                return;
 694
 695#endif
 696        if (ixgbe_set_sriov_queues(adapter))
 697                return;
 698
 699        ixgbe_set_rss_queues(adapter);
 700}
 701
 702/**
 703 * ixgbe_acquire_msix_vectors - acquire MSI-X vectors
 704 * @adapter: board private structure
 705 *
 706 * Attempts to acquire a suitable range of MSI-X vector interrupts. Will
 707 * return a negative error code if unable to acquire MSI-X vectors for any
 708 * reason.
 709 */
 710static int ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter)
 711{
 712        struct ixgbe_hw *hw = &adapter->hw;
 713        int i, vectors, vector_threshold;
 714
 715        /* We start by asking for one vector per queue pair */
 716        vectors = max(adapter->num_rx_queues, adapter->num_tx_queues);
 717
 718        /* It is easy to be greedy for MSI-X vectors. However, it really
 719         * doesn't do much good if we have a lot more vectors than CPUs. We'll
 720         * be somewhat conservative and only ask for (roughly) the same number
 721         * of vectors as there are CPUs.
 722         */
 723        vectors = min_t(int, vectors, num_online_cpus());
 724
 725        /* Some vectors are necessary for non-queue interrupts */
 726        vectors += NON_Q_VECTORS;
 727
 728        /* Hardware can only support a maximum of hw.mac->max_msix_vectors.
 729         * With features such as RSS and VMDq, we can easily surpass the
 730         * number of Rx and Tx descriptor queues supported by our device.
 731         * Thus, we cap the maximum in the rare cases where the CPU count also
 732         * exceeds our vector limit
 733         */
 734        vectors = min_t(int, vectors, hw->mac.max_msix_vectors);
 735
 736        /* We want a minimum of two MSI-X vectors for (1) a TxQ[0] + RxQ[0]
 737         * handler, and (2) an Other (Link Status Change, etc.) handler.
 738         */
 739        vector_threshold = MIN_MSIX_COUNT;
 740
 741        adapter->msix_entries = kcalloc(vectors,
 742                                        sizeof(struct msix_entry),
 743                                        GFP_KERNEL);
 744        if (!adapter->msix_entries)
 745                return -ENOMEM;
 746
 747        for (i = 0; i < vectors; i++)
 748                adapter->msix_entries[i].entry = i;
 749
 750        vectors = pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
 751                                        vector_threshold, vectors);
 752
 753        if (vectors < 0) {
 754                /* A negative count of allocated vectors indicates an error in
 755                 * acquiring within the specified range of MSI-X vectors
 756                 */
 757                e_dev_warn("Failed to allocate MSI-X interrupts. Err: %d\n",
 758                           vectors);
 759
 760                adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED;
 761                kfree(adapter->msix_entries);
 762                adapter->msix_entries = NULL;
 763
 764                return vectors;
 765        }
 766
 767        /* we successfully allocated some number of vectors within our
 768         * requested range.
 769         */
 770        adapter->flags |= IXGBE_FLAG_MSIX_ENABLED;
 771
 772        /* Adjust for only the vectors we'll use, which is minimum
 773         * of max_q_vectors, or the number of vectors we were allocated.
 774         */
 775        vectors -= NON_Q_VECTORS;
 776        adapter->num_q_vectors = min_t(int, vectors, adapter->max_q_vectors);
 777
 778        return 0;
 779}
 780
 781static void ixgbe_add_ring(struct ixgbe_ring *ring,
 782                           struct ixgbe_ring_container *head)
 783{
 784        ring->next = head->ring;
 785        head->ring = ring;
 786        head->count++;
 787}
 788
 789/**
 790 * ixgbe_alloc_q_vector - Allocate memory for a single interrupt vector
 791 * @adapter: board private structure to initialize
 792 * @v_count: q_vectors allocated on adapter, used for ring interleaving
 793 * @v_idx: index of vector in adapter struct
 794 * @txr_count: total number of Tx rings to allocate
 795 * @txr_idx: index of first Tx ring to allocate
 796 * @rxr_count: total number of Rx rings to allocate
 797 * @rxr_idx: index of first Rx ring to allocate
 798 *
 799 * We allocate one q_vector.  If allocation fails we return -ENOMEM.
 800 **/
 801static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter,
 802                                int v_count, int v_idx,
 803                                int txr_count, int txr_idx,
 804                                int rxr_count, int rxr_idx)
 805{
 806        struct ixgbe_q_vector *q_vector;
 807        struct ixgbe_ring *ring;
 808        int node = NUMA_NO_NODE;
 809        int cpu = -1;
 810        int ring_count, size;
 811        u8 tcs = netdev_get_num_tc(adapter->netdev);
 812
 813        ring_count = txr_count + rxr_count;
 814        size = sizeof(struct ixgbe_q_vector) +
 815               (sizeof(struct ixgbe_ring) * ring_count);
 816
 817        /* customize cpu for Flow Director mapping */
 818        if ((tcs <= 1) && !(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) {
 819                u16 rss_i = adapter->ring_feature[RING_F_RSS].indices;
 820                if (rss_i > 1 && adapter->atr_sample_rate) {
 821                        if (cpu_online(v_idx)) {
 822                                cpu = v_idx;
 823                                node = cpu_to_node(cpu);
 824                        }
 825                }
 826        }
 827
 828        /* allocate q_vector and rings */
 829        q_vector = kzalloc_node(size, GFP_KERNEL, node);
 830        if (!q_vector)
 831                q_vector = kzalloc(size, GFP_KERNEL);
 832        if (!q_vector)
 833                return -ENOMEM;
 834
 835        /* setup affinity mask and node */
 836        if (cpu != -1)
 837                cpumask_set_cpu(cpu, &q_vector->affinity_mask);
 838        q_vector->numa_node = node;
 839
 840#ifdef CONFIG_IXGBE_DCA
 841        /* initialize CPU for DCA */
 842        q_vector->cpu = -1;
 843
 844#endif
 845        /* initialize NAPI */
 846        netif_napi_add(adapter->netdev, &q_vector->napi,
 847                       ixgbe_poll, 64);
 848
 849#ifdef CONFIG_NET_RX_BUSY_POLL
 850        /* initialize busy poll */
 851        atomic_set(&q_vector->state, IXGBE_QV_STATE_DISABLE);
 852
 853#endif
 854        /* tie q_vector and adapter together */
 855        adapter->q_vector[v_idx] = q_vector;
 856        q_vector->adapter = adapter;
 857        q_vector->v_idx = v_idx;
 858
 859        /* initialize work limits */
 860        q_vector->tx.work_limit = adapter->tx_work_limit;
 861
 862        /* initialize pointer to rings */
 863        ring = q_vector->ring;
 864
 865        /* intialize ITR */
 866        if (txr_count && !rxr_count) {
 867                /* tx only vector */
 868                if (adapter->tx_itr_setting == 1)
 869                        q_vector->itr = IXGBE_12K_ITR;
 870                else
 871                        q_vector->itr = adapter->tx_itr_setting;
 872        } else {
 873                /* rx or rx/tx vector */
 874                if (adapter->rx_itr_setting == 1)
 875                        q_vector->itr = IXGBE_20K_ITR;
 876                else
 877                        q_vector->itr = adapter->rx_itr_setting;
 878        }
 879
 880        while (txr_count) {
 881                /* assign generic ring traits */
 882                ring->dev = &adapter->pdev->dev;
 883                ring->netdev = adapter->netdev;
 884
 885                /* configure backlink on ring */
 886                ring->q_vector = q_vector;
 887
 888                /* update q_vector Tx values */
 889                ixgbe_add_ring(ring, &q_vector->tx);
 890
 891                /* apply Tx specific ring traits */
 892                ring->count = adapter->tx_ring_count;
 893                if (adapter->num_rx_pools > 1)
 894                        ring->queue_index =
 895                                txr_idx % adapter->num_rx_queues_per_pool;
 896                else
 897                        ring->queue_index = txr_idx;
 898
 899                /* assign ring to adapter */
 900                adapter->tx_ring[txr_idx] = ring;
 901
 902                /* update count and index */
 903                txr_count--;
 904                txr_idx += v_count;
 905
 906                /* push pointer to next ring */
 907                ring++;
 908        }
 909
 910        while (rxr_count) {
 911                /* assign generic ring traits */
 912                ring->dev = &adapter->pdev->dev;
 913                ring->netdev = adapter->netdev;
 914
 915                /* configure backlink on ring */
 916                ring->q_vector = q_vector;
 917
 918                /* update q_vector Rx values */
 919                ixgbe_add_ring(ring, &q_vector->rx);
 920
 921                /*
 922                 * 82599 errata, UDP frames with a 0 checksum
 923                 * can be marked as checksum errors.
 924                 */
 925                if (adapter->hw.mac.type == ixgbe_mac_82599EB)
 926                        set_bit(__IXGBE_RX_CSUM_UDP_ZERO_ERR, &ring->state);
 927
 928#ifdef IXGBE_FCOE
 929                if (adapter->netdev->features & NETIF_F_FCOE_MTU) {
 930                        struct ixgbe_ring_feature *f;
 931                        f = &adapter->ring_feature[RING_F_FCOE];
 932                        if ((rxr_idx >= f->offset) &&
 933                            (rxr_idx < f->offset + f->indices))
 934                                set_bit(__IXGBE_RX_FCOE, &ring->state);
 935                }
 936
 937#endif /* IXGBE_FCOE */
 938                /* apply Rx specific ring traits */
 939                ring->count = adapter->rx_ring_count;
 940                if (adapter->num_rx_pools > 1)
 941                        ring->queue_index =
 942                                rxr_idx % adapter->num_rx_queues_per_pool;
 943                else
 944                        ring->queue_index = rxr_idx;
 945
 946                /* assign ring to adapter */
 947                adapter->rx_ring[rxr_idx] = ring;
 948
 949                /* update count and index */
 950                rxr_count--;
 951                rxr_idx += v_count;
 952
 953                /* push pointer to next ring */
 954                ring++;
 955        }
 956
 957        return 0;
 958}
 959
 960/**
 961 * ixgbe_free_q_vector - Free memory allocated for specific interrupt vector
 962 * @adapter: board private structure to initialize
 963 * @v_idx: Index of vector to be freed
 964 *
 965 * This function frees the memory allocated to the q_vector.  In addition if
 966 * NAPI is enabled it will delete any references to the NAPI struct prior
 967 * to freeing the q_vector.
 968 **/
 969static void ixgbe_free_q_vector(struct ixgbe_adapter *adapter, int v_idx)
 970{
 971        struct ixgbe_q_vector *q_vector = adapter->q_vector[v_idx];
 972        struct ixgbe_ring *ring;
 973
 974        ixgbe_for_each_ring(ring, q_vector->tx)
 975                adapter->tx_ring[ring->queue_index] = NULL;
 976
 977        ixgbe_for_each_ring(ring, q_vector->rx)
 978                adapter->rx_ring[ring->queue_index] = NULL;
 979
 980        adapter->q_vector[v_idx] = NULL;
 981        napi_hash_del(&q_vector->napi);
 982        netif_napi_del(&q_vector->napi);
 983
 984        /*
 985         * ixgbe_get_stats64() might access the rings on this vector,
 986         * we must wait a grace period before freeing it.
 987         */
 988        kfree_rcu(q_vector, rcu);
 989}
 990
 991/**
 992 * ixgbe_alloc_q_vectors - Allocate memory for interrupt vectors
 993 * @adapter: board private structure to initialize
 994 *
 995 * We allocate one q_vector per queue interrupt.  If allocation fails we
 996 * return -ENOMEM.
 997 **/
 998static int ixgbe_alloc_q_vectors(struct ixgbe_adapter *adapter)
 999{
1000        int q_vectors = adapter->num_q_vectors;
1001        int rxr_remaining = adapter->num_rx_queues;
1002        int txr_remaining = adapter->num_tx_queues;
1003        int rxr_idx = 0, txr_idx = 0, v_idx = 0;
1004        int err;
1005
1006        /* only one q_vector if MSI-X is disabled. */
1007        if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED))
1008                q_vectors = 1;
1009
1010        if (q_vectors >= (rxr_remaining + txr_remaining)) {
1011                for (; rxr_remaining; v_idx++) {
1012                        err = ixgbe_alloc_q_vector(adapter, q_vectors, v_idx,
1013                                                   0, 0, 1, rxr_idx);
1014
1015                        if (err)
1016                                goto err_out;
1017
1018                        /* update counts and index */
1019                        rxr_remaining--;
1020                        rxr_idx++;
1021                }
1022        }
1023
1024        for (; v_idx < q_vectors; v_idx++) {
1025                int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - v_idx);
1026                int tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - v_idx);
1027                err = ixgbe_alloc_q_vector(adapter, q_vectors, v_idx,
1028                                           tqpv, txr_idx,
1029                                           rqpv, rxr_idx);
1030
1031                if (err)
1032                        goto err_out;
1033
1034                /* update counts and index */
1035                rxr_remaining -= rqpv;
1036                txr_remaining -= tqpv;
1037                rxr_idx++;
1038                txr_idx++;
1039        }
1040
1041        return 0;
1042
1043err_out:
1044        adapter->num_tx_queues = 0;
1045        adapter->num_rx_queues = 0;
1046        adapter->num_q_vectors = 0;
1047
1048        while (v_idx--)
1049                ixgbe_free_q_vector(adapter, v_idx);
1050
1051        return -ENOMEM;
1052}
1053
1054/**
1055 * ixgbe_free_q_vectors - Free memory allocated for interrupt vectors
1056 * @adapter: board private structure to initialize
1057 *
1058 * This function frees the memory allocated to the q_vectors.  In addition if
1059 * NAPI is enabled it will delete any references to the NAPI struct prior
1060 * to freeing the q_vector.
1061 **/
1062static void ixgbe_free_q_vectors(struct ixgbe_adapter *adapter)
1063{
1064        int v_idx = adapter->num_q_vectors;
1065
1066        adapter->num_tx_queues = 0;
1067        adapter->num_rx_queues = 0;
1068        adapter->num_q_vectors = 0;
1069
1070        while (v_idx--)
1071                ixgbe_free_q_vector(adapter, v_idx);
1072}
1073
1074static void ixgbe_reset_interrupt_capability(struct ixgbe_adapter *adapter)
1075{
1076        if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
1077                adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED;
1078                pci_disable_msix(adapter->pdev);
1079                kfree(adapter->msix_entries);
1080                adapter->msix_entries = NULL;
1081        } else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) {
1082                adapter->flags &= ~IXGBE_FLAG_MSI_ENABLED;
1083                pci_disable_msi(adapter->pdev);
1084        }
1085}
1086
1087/**
1088 * ixgbe_set_interrupt_capability - set MSI-X or MSI if supported
1089 * @adapter: board private structure to initialize
1090 *
1091 * Attempt to configure the interrupts using the best available
1092 * capabilities of the hardware and the kernel.
1093 **/
1094static void ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter)
1095{
1096        int err;
1097
1098        /* We will try to get MSI-X interrupts first */
1099        if (!ixgbe_acquire_msix_vectors(adapter))
1100                return;
1101
1102        /* At this point, we do not have MSI-X capabilities. We need to
1103         * reconfigure or disable various features which require MSI-X
1104         * capability.
1105         */
1106
1107        /* Disable DCB unless we only have a single traffic class */
1108        if (netdev_get_num_tc(adapter->netdev) > 1) {
1109                e_dev_warn("Number of DCB TCs exceeds number of available queues. Disabling DCB support.\n");
1110                netdev_reset_tc(adapter->netdev);
1111
1112                if (adapter->hw.mac.type == ixgbe_mac_82598EB)
1113                        adapter->hw.fc.requested_mode = adapter->last_lfc_mode;
1114
1115                adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED;
1116                adapter->temp_dcb_cfg.pfc_mode_enable = false;
1117                adapter->dcb_cfg.pfc_mode_enable = false;
1118        }
1119
1120        adapter->dcb_cfg.num_tcs.pg_tcs = 1;
1121        adapter->dcb_cfg.num_tcs.pfc_tcs = 1;
1122
1123        /* Disable SR-IOV support */
1124        e_dev_warn("Disabling SR-IOV support\n");
1125        ixgbe_disable_sriov(adapter);
1126
1127        /* Disable RSS */
1128        e_dev_warn("Disabling RSS support\n");
1129        adapter->ring_feature[RING_F_RSS].limit = 1;
1130
1131        /* recalculate number of queues now that many features have been
1132         * changed or disabled.
1133         */
1134        ixgbe_set_num_queues(adapter);
1135        adapter->num_q_vectors = 1;
1136
1137        err = pci_enable_msi(adapter->pdev);
1138        if (err)
1139                e_dev_warn("Failed to allocate MSI interrupt, falling back to legacy. Error: %d\n",
1140                           err);
1141        else
1142                adapter->flags |= IXGBE_FLAG_MSI_ENABLED;
1143}
1144
1145/**
1146 * ixgbe_init_interrupt_scheme - Determine proper interrupt scheme
1147 * @adapter: board private structure to initialize
1148 *
1149 * We determine which interrupt scheme to use based on...
1150 * - Kernel support (MSI, MSI-X)
1151 *   - which can be user-defined (via MODULE_PARAM)
1152 * - Hardware queue count (num_*_queues)
1153 *   - defined by miscellaneous hardware support/features (RSS, etc.)
1154 **/
1155int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter)
1156{
1157        int err;
1158
1159        /* Number of supported queues */
1160        ixgbe_set_num_queues(adapter);
1161
1162        /* Set interrupt mode */
1163        ixgbe_set_interrupt_capability(adapter);
1164
1165        err = ixgbe_alloc_q_vectors(adapter);
1166        if (err) {
1167                e_dev_err("Unable to allocate memory for queue vectors\n");
1168                goto err_alloc_q_vectors;
1169        }
1170
1171        ixgbe_cache_ring_register(adapter);
1172
1173        e_dev_info("Multiqueue %s: Rx Queue count = %u, Tx Queue count = %u\n",
1174                   (adapter->num_rx_queues > 1) ? "Enabled" : "Disabled",
1175                   adapter->num_rx_queues, adapter->num_tx_queues);
1176
1177        set_bit(__IXGBE_DOWN, &adapter->state);
1178
1179        return 0;
1180
1181err_alloc_q_vectors:
1182        ixgbe_reset_interrupt_capability(adapter);
1183        return err;
1184}
1185
1186/**
1187 * ixgbe_clear_interrupt_scheme - Clear the current interrupt scheme settings
1188 * @adapter: board private structure to clear interrupt scheme on
1189 *
1190 * We go through and clear interrupt specific resources and reset the structure
1191 * to pre-load conditions
1192 **/
1193void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter)
1194{
1195        adapter->num_tx_queues = 0;
1196        adapter->num_rx_queues = 0;
1197
1198        ixgbe_free_q_vectors(adapter);
1199        ixgbe_reset_interrupt_capability(adapter);
1200}
1201
1202void ixgbe_tx_ctxtdesc(struct ixgbe_ring *tx_ring, u32 vlan_macip_lens,
1203                       u32 fcoe_sof_eof, u32 type_tucmd, u32 mss_l4len_idx)
1204{
1205        struct ixgbe_adv_tx_context_desc *context_desc;
1206        u16 i = tx_ring->next_to_use;
1207
1208        context_desc = IXGBE_TX_CTXTDESC(tx_ring, i);
1209
1210        i++;
1211        tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
1212
1213        /* set bits to identify this as an advanced context descriptor */
1214        type_tucmd |= IXGBE_TXD_CMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
1215
1216        context_desc->vlan_macip_lens   = cpu_to_le32(vlan_macip_lens);
1217        context_desc->seqnum_seed       = cpu_to_le32(fcoe_sof_eof);
1218        context_desc->type_tucmd_mlhl   = cpu_to_le32(type_tucmd);
1219        context_desc->mss_l4len_idx     = cpu_to_le32(mss_l4len_idx);
1220}
1221
1222