linux/drivers/net/ethernet/intel/igb/igb.h
<<
>>
Prefs
   1/*******************************************************************************
   2
   3  Intel(R) Gigabit Ethernet Linux driver
   4  Copyright(c) 2007-2014 Intel Corporation.
   5
   6  This program is free software; you can redistribute it and/or modify it
   7  under the terms and conditions of the GNU General Public License,
   8  version 2, as published by the Free Software Foundation.
   9
  10  This program is distributed in the hope it will be useful, but WITHOUT
  11  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  13  more details.
  14
  15  You should have received a copy of the GNU General Public License along with
  16  this program; if not, see <http://www.gnu.org/licenses/>.
  17
  18  The full GNU General Public License is included in this distribution in
  19  the file called "COPYING".
  20
  21  Contact Information:
  22  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
  23  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
  24
  25*******************************************************************************/
  26
  27
  28/* Linux PRO/1000 Ethernet Driver main header file */
  29
  30#ifndef _IGB_H_
  31#define _IGB_H_
  32
  33#include "e1000_mac.h"
  34#include "e1000_82575.h"
  35
  36#include <linux/clocksource.h>
  37#include <linux/net_tstamp.h>
  38#include <linux/ptp_clock_kernel.h>
  39#include <linux/bitops.h>
  40#include <linux/if_vlan.h>
  41#include <linux/i2c.h>
  42#include <linux/i2c-algo-bit.h>
  43#include <linux/pci.h>
  44#include <linux/mdio.h>
  45
  46struct igb_adapter;
  47
  48#define E1000_PCS_CFG_IGN_SD    1
  49
  50/* Interrupt defines */
  51#define IGB_START_ITR           648 /* ~6000 ints/sec */
  52#define IGB_4K_ITR              980
  53#define IGB_20K_ITR             196
  54#define IGB_70K_ITR             56
  55
  56/* TX/RX descriptor defines */
  57#define IGB_DEFAULT_TXD         256
  58#define IGB_DEFAULT_TX_WORK     128
  59#define IGB_MIN_TXD             80
  60#define IGB_MAX_TXD             4096
  61
  62#define IGB_DEFAULT_RXD         256
  63#define IGB_MIN_RXD             80
  64#define IGB_MAX_RXD             4096
  65
  66#define IGB_DEFAULT_ITR         3 /* dynamic */
  67#define IGB_MAX_ITR_USECS       10000
  68#define IGB_MIN_ITR_USECS       10
  69#define NON_Q_VECTORS           1
  70#define MAX_Q_VECTORS           8
  71#define MAX_MSIX_ENTRIES        10
  72
  73/* Transmit and receive queues */
  74#define IGB_MAX_RX_QUEUES       8
  75#define IGB_MAX_RX_QUEUES_82575 4
  76#define IGB_MAX_RX_QUEUES_I211  2
  77#define IGB_MAX_TX_QUEUES       8
  78#define IGB_MAX_VF_MC_ENTRIES   30
  79#define IGB_MAX_VF_FUNCTIONS    8
  80#define IGB_MAX_VFTA_ENTRIES    128
  81#define IGB_82576_VF_DEV_ID     0x10CA
  82#define IGB_I350_VF_DEV_ID      0x1520
  83
  84/* NVM version defines */
  85#define IGB_MAJOR_MASK          0xF000
  86#define IGB_MINOR_MASK          0x0FF0
  87#define IGB_BUILD_MASK          0x000F
  88#define IGB_COMB_VER_MASK       0x00FF
  89#define IGB_MAJOR_SHIFT         12
  90#define IGB_MINOR_SHIFT         4
  91#define IGB_COMB_VER_SHFT       8
  92#define IGB_NVM_VER_INVALID     0xFFFF
  93#define IGB_ETRACK_SHIFT        16
  94#define NVM_ETRACK_WORD         0x0042
  95#define NVM_COMB_VER_OFF        0x0083
  96#define NVM_COMB_VER_PTR        0x003d
  97
  98struct vf_data_storage {
  99        unsigned char vf_mac_addresses[ETH_ALEN];
 100        u16 vf_mc_hashes[IGB_MAX_VF_MC_ENTRIES];
 101        u16 num_vf_mc_hashes;
 102        u16 vlans_enabled;
 103        u32 flags;
 104        unsigned long last_nack;
 105        u16 pf_vlan; /* When set, guest VLAN config not allowed. */
 106        u16 pf_qos;
 107        u16 tx_rate;
 108        bool spoofchk_enabled;
 109};
 110
 111#define IGB_VF_FLAG_CTS            0x00000001 /* VF is clear to send data */
 112#define IGB_VF_FLAG_UNI_PROMISC    0x00000002 /* VF has unicast promisc */
 113#define IGB_VF_FLAG_MULTI_PROMISC  0x00000004 /* VF has multicast promisc */
 114#define IGB_VF_FLAG_PF_SET_MAC     0x00000008 /* PF has set MAC address */
 115
 116/* RX descriptor control thresholds.
 117 * PTHRESH - MAC will consider prefetch if it has fewer than this number of
 118 *           descriptors available in its onboard memory.
 119 *           Setting this to 0 disables RX descriptor prefetch.
 120 * HTHRESH - MAC will only prefetch if there are at least this many descriptors
 121 *           available in host memory.
 122 *           If PTHRESH is 0, this should also be 0.
 123 * WTHRESH - RX descriptor writeback threshold - MAC will delay writing back
 124 *           descriptors until either it has this many to write back, or the
 125 *           ITR timer expires.
 126 */
 127#define IGB_RX_PTHRESH  ((hw->mac.type == e1000_i354) ? 12 : 8)
 128#define IGB_RX_HTHRESH  8
 129#define IGB_TX_PTHRESH  ((hw->mac.type == e1000_i354) ? 20 : 8)
 130#define IGB_TX_HTHRESH  1
 131#define IGB_RX_WTHRESH  ((hw->mac.type == e1000_82576 && \
 132                          (adapter->flags & IGB_FLAG_HAS_MSIX)) ? 1 : 4)
 133#define IGB_TX_WTHRESH  ((hw->mac.type == e1000_82576 && \
 134                          (adapter->flags & IGB_FLAG_HAS_MSIX)) ? 1 : 16)
 135
 136/* this is the size past which hardware will drop packets when setting LPE=0 */
 137#define MAXIMUM_ETHERNET_VLAN_SIZE 1522
 138
 139/* Supported Rx Buffer Sizes */
 140#define IGB_RXBUFFER_256        256
 141#define IGB_RXBUFFER_2048       2048
 142#define IGB_RX_HDR_LEN          IGB_RXBUFFER_256
 143#define IGB_RX_BUFSZ            IGB_RXBUFFER_2048
 144
 145/* How many Rx Buffers do we bundle into one write to the hardware ? */
 146#define IGB_RX_BUFFER_WRITE     16 /* Must be power of 2 */
 147
 148#define AUTO_ALL_MODES          0
 149#define IGB_EEPROM_APME         0x0400
 150
 151#ifndef IGB_MASTER_SLAVE
 152/* Switch to override PHY master/slave setting */
 153#define IGB_MASTER_SLAVE        e1000_ms_hw_default
 154#endif
 155
 156#define IGB_MNG_VLAN_NONE       -1
 157
 158enum igb_tx_flags {
 159        /* cmd_type flags */
 160        IGB_TX_FLAGS_VLAN       = 0x01,
 161        IGB_TX_FLAGS_TSO        = 0x02,
 162        IGB_TX_FLAGS_TSTAMP     = 0x04,
 163
 164        /* olinfo flags */
 165        IGB_TX_FLAGS_IPV4       = 0x10,
 166        IGB_TX_FLAGS_CSUM       = 0x20,
 167};
 168
 169/* VLAN info */
 170#define IGB_TX_FLAGS_VLAN_MASK  0xffff0000
 171#define IGB_TX_FLAGS_VLAN_SHIFT 16
 172
 173/* The largest size we can write to the descriptor is 65535.  In order to
 174 * maintain a power of two alignment we have to limit ourselves to 32K.
 175 */
 176#define IGB_MAX_TXD_PWR 15
 177#define IGB_MAX_DATA_PER_TXD    (1 << IGB_MAX_TXD_PWR)
 178
 179/* Tx Descriptors needed, worst case */
 180#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), IGB_MAX_DATA_PER_TXD)
 181#define DESC_NEEDED (MAX_SKB_FRAGS + 4)
 182
 183/* EEPROM byte offsets */
 184#define IGB_SFF_8472_SWAP               0x5C
 185#define IGB_SFF_8472_COMP               0x5E
 186
 187/* Bitmasks */
 188#define IGB_SFF_ADDRESSING_MODE         0x4
 189#define IGB_SFF_8472_UNSUP              0x00
 190
 191/* wrapper around a pointer to a socket buffer,
 192 * so a DMA handle can be stored along with the buffer
 193 */
 194struct igb_tx_buffer {
 195        union e1000_adv_tx_desc *next_to_watch;
 196        unsigned long time_stamp;
 197        struct sk_buff *skb;
 198        unsigned int bytecount;
 199        u16 gso_segs;
 200        __be16 protocol;
 201        DEFINE_DMA_UNMAP_ADDR(dma);
 202        DEFINE_DMA_UNMAP_LEN(len);
 203        u32 tx_flags;
 204};
 205
 206struct igb_rx_buffer {
 207        dma_addr_t dma;
 208        struct page *page;
 209        unsigned int page_offset;
 210};
 211
 212struct igb_tx_queue_stats {
 213        u64 packets;
 214        u64 bytes;
 215        u64 restart_queue;
 216        u64 restart_queue2;
 217};
 218
 219struct igb_rx_queue_stats {
 220        u64 packets;
 221        u64 bytes;
 222        u64 drops;
 223        u64 csum_err;
 224        u64 alloc_failed;
 225};
 226
 227struct igb_ring_container {
 228        struct igb_ring *ring;          /* pointer to linked list of rings */
 229        unsigned int total_bytes;       /* total bytes processed this int */
 230        unsigned int total_packets;     /* total packets processed this int */
 231        u16 work_limit;                 /* total work allowed per interrupt */
 232        u8 count;                       /* total number of rings in vector */
 233        u8 itr;                         /* current ITR setting for ring */
 234};
 235
 236struct igb_ring {
 237        struct igb_q_vector *q_vector;  /* backlink to q_vector */
 238        struct net_device *netdev;      /* back pointer to net_device */
 239        struct device *dev;             /* device pointer for dma mapping */
 240        union {                         /* array of buffer info structs */
 241                struct igb_tx_buffer *tx_buffer_info;
 242                struct igb_rx_buffer *rx_buffer_info;
 243        };
 244        void *desc;                     /* descriptor ring memory */
 245        unsigned long flags;            /* ring specific flags */
 246        void __iomem *tail;             /* pointer to ring tail register */
 247        dma_addr_t dma;                 /* phys address of the ring */
 248        unsigned int  size;             /* length of desc. ring in bytes */
 249
 250        u16 count;                      /* number of desc. in the ring */
 251        u8 queue_index;                 /* logical index of the ring*/
 252        u8 reg_idx;                     /* physical index of the ring */
 253
 254        /* everything past this point are written often */
 255        u16 next_to_clean;
 256        u16 next_to_use;
 257        u16 next_to_alloc;
 258
 259        union {
 260                /* TX */
 261                struct {
 262                        struct igb_tx_queue_stats tx_stats;
 263                        struct u64_stats_sync tx_syncp;
 264                        struct u64_stats_sync tx_syncp2;
 265                };
 266                /* RX */
 267                struct {
 268                        struct sk_buff *skb;
 269                        struct igb_rx_queue_stats rx_stats;
 270                        struct u64_stats_sync rx_syncp;
 271                };
 272        };
 273} ____cacheline_internodealigned_in_smp;
 274
 275struct igb_q_vector {
 276        struct igb_adapter *adapter;    /* backlink */
 277        int cpu;                        /* CPU for DCA */
 278        u32 eims_value;                 /* EIMS mask value */
 279
 280        u16 itr_val;
 281        u8 set_itr;
 282        void __iomem *itr_register;
 283
 284        struct igb_ring_container rx, tx;
 285
 286        struct napi_struct napi;
 287        struct rcu_head rcu;    /* to avoid race with update stats on free */
 288        char name[IFNAMSIZ + 9];
 289
 290        /* for dynamic allocation of rings associated with this q_vector */
 291        struct igb_ring ring[0] ____cacheline_internodealigned_in_smp;
 292};
 293
 294enum e1000_ring_flags_t {
 295        IGB_RING_FLAG_RX_SCTP_CSUM,
 296        IGB_RING_FLAG_RX_LB_VLAN_BSWAP,
 297        IGB_RING_FLAG_TX_CTX_IDX,
 298        IGB_RING_FLAG_TX_DETECT_HANG
 299};
 300
 301#define IGB_TXD_DCMD (E1000_ADVTXD_DCMD_EOP | E1000_ADVTXD_DCMD_RS)
 302
 303#define IGB_RX_DESC(R, i)       \
 304        (&(((union e1000_adv_rx_desc *)((R)->desc))[i]))
 305#define IGB_TX_DESC(R, i)       \
 306        (&(((union e1000_adv_tx_desc *)((R)->desc))[i]))
 307#define IGB_TX_CTXTDESC(R, i)   \
 308        (&(((struct e1000_adv_tx_context_desc *)((R)->desc))[i]))
 309
 310/* igb_test_staterr - tests bits within Rx descriptor status and error fields */
 311static inline __le32 igb_test_staterr(union e1000_adv_rx_desc *rx_desc,
 312                                      const u32 stat_err_bits)
 313{
 314        return rx_desc->wb.upper.status_error & cpu_to_le32(stat_err_bits);
 315}
 316
 317/* igb_desc_unused - calculate if we have unused descriptors */
 318static inline int igb_desc_unused(struct igb_ring *ring)
 319{
 320        if (ring->next_to_clean > ring->next_to_use)
 321                return ring->next_to_clean - ring->next_to_use - 1;
 322
 323        return ring->count + ring->next_to_clean - ring->next_to_use - 1;
 324}
 325
 326#ifdef CONFIG_IGB_HWMON
 327
 328#define IGB_HWMON_TYPE_LOC      0
 329#define IGB_HWMON_TYPE_TEMP     1
 330#define IGB_HWMON_TYPE_CAUTION  2
 331#define IGB_HWMON_TYPE_MAX      3
 332
 333struct hwmon_attr {
 334        struct device_attribute dev_attr;
 335        struct e1000_hw *hw;
 336        struct e1000_thermal_diode_data *sensor;
 337        char name[12];
 338        };
 339
 340struct hwmon_buff {
 341        struct attribute_group group;
 342        const struct attribute_group *groups[2];
 343        struct attribute *attrs[E1000_MAX_SENSORS * 4 + 1];
 344        struct hwmon_attr hwmon_list[E1000_MAX_SENSORS * 4];
 345        unsigned int n_hwmon;
 346        };
 347#endif
 348
 349#define IGB_RETA_SIZE   128
 350
 351/* board specific private data structure */
 352struct igb_adapter {
 353        unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
 354
 355        struct net_device *netdev;
 356
 357        unsigned long state;
 358        unsigned int flags;
 359
 360        unsigned int num_q_vectors;
 361        struct msix_entry msix_entries[MAX_MSIX_ENTRIES];
 362
 363        /* Interrupt Throttle Rate */
 364        u32 rx_itr_setting;
 365        u32 tx_itr_setting;
 366        u16 tx_itr;
 367        u16 rx_itr;
 368
 369        /* TX */
 370        u16 tx_work_limit;
 371        u32 tx_timeout_count;
 372        int num_tx_queues;
 373        struct igb_ring *tx_ring[16];
 374
 375        /* RX */
 376        int num_rx_queues;
 377        struct igb_ring *rx_ring[16];
 378
 379        u32 max_frame_size;
 380        u32 min_frame_size;
 381
 382        struct timer_list watchdog_timer;
 383        struct timer_list phy_info_timer;
 384
 385        u16 mng_vlan_id;
 386        u32 bd_number;
 387        u32 wol;
 388        u32 en_mng_pt;
 389        u16 link_speed;
 390        u16 link_duplex;
 391
 392        struct work_struct reset_task;
 393        struct work_struct watchdog_task;
 394        bool fc_autoneg;
 395        u8  tx_timeout_factor;
 396        struct timer_list blink_timer;
 397        unsigned long led_status;
 398
 399        /* OS defined structs */
 400        struct pci_dev *pdev;
 401
 402        spinlock_t stats64_lock;
 403        struct rtnl_link_stats64 stats64;
 404
 405        /* structs defined in e1000_hw.h */
 406        struct e1000_hw hw;
 407        struct e1000_hw_stats stats;
 408        struct e1000_phy_info phy_info;
 409        struct e1000_phy_stats phy_stats;
 410
 411        u32 test_icr;
 412        struct igb_ring test_tx_ring;
 413        struct igb_ring test_rx_ring;
 414
 415        int msg_enable;
 416
 417        struct igb_q_vector *q_vector[MAX_Q_VECTORS];
 418        u32 eims_enable_mask;
 419        u32 eims_other;
 420
 421        /* to not mess up cache alignment, always add to the bottom */
 422        u16 tx_ring_count;
 423        u16 rx_ring_count;
 424        unsigned int vfs_allocated_count;
 425        struct vf_data_storage *vf_data;
 426        int vf_rate_link_speed;
 427        u32 rss_queues;
 428        u32 wvbr;
 429        u32 *shadow_vfta;
 430
 431        struct ptp_clock *ptp_clock;
 432        struct ptp_clock_info ptp_caps;
 433        struct delayed_work ptp_overflow_work;
 434        struct work_struct ptp_tx_work;
 435        struct sk_buff *ptp_tx_skb;
 436        struct hwtstamp_config tstamp_config;
 437        unsigned long ptp_tx_start;
 438        unsigned long last_rx_ptp_check;
 439        unsigned long last_rx_timestamp;
 440        spinlock_t tmreg_lock;
 441        struct cyclecounter cc;
 442        struct timecounter tc;
 443        u32 tx_hwtstamp_timeouts;
 444        u32 rx_hwtstamp_cleared;
 445
 446        char fw_version[32];
 447#ifdef CONFIG_IGB_HWMON
 448        struct hwmon_buff *igb_hwmon_buff;
 449        bool ets;
 450#endif
 451        struct i2c_algo_bit_data i2c_algo;
 452        struct i2c_adapter i2c_adap;
 453        struct i2c_client *i2c_client;
 454        u32 rss_indir_tbl_init;
 455        u8 rss_indir_tbl[IGB_RETA_SIZE];
 456
 457        unsigned long link_check_timeout;
 458        int copper_tries;
 459        struct e1000_info ei;
 460        u16 eee_advert;
 461};
 462
 463#define IGB_FLAG_HAS_MSI                (1 << 0)
 464#define IGB_FLAG_DCA_ENABLED            (1 << 1)
 465#define IGB_FLAG_QUAD_PORT_A            (1 << 2)
 466#define IGB_FLAG_QUEUE_PAIRS            (1 << 3)
 467#define IGB_FLAG_DMAC                   (1 << 4)
 468#define IGB_FLAG_PTP                    (1 << 5)
 469#define IGB_FLAG_RSS_FIELD_IPV4_UDP     (1 << 6)
 470#define IGB_FLAG_RSS_FIELD_IPV6_UDP     (1 << 7)
 471#define IGB_FLAG_WOL_SUPPORTED          (1 << 8)
 472#define IGB_FLAG_NEED_LINK_UPDATE       (1 << 9)
 473#define IGB_FLAG_MEDIA_RESET            (1 << 10)
 474#define IGB_FLAG_MAS_CAPABLE            (1 << 11)
 475#define IGB_FLAG_MAS_ENABLE             (1 << 12)
 476#define IGB_FLAG_HAS_MSIX               (1 << 13)
 477#define IGB_FLAG_EEE                    (1 << 14)
 478
 479/* Media Auto Sense */
 480#define IGB_MAS_ENABLE_0                0X0001
 481#define IGB_MAS_ENABLE_1                0X0002
 482#define IGB_MAS_ENABLE_2                0X0004
 483#define IGB_MAS_ENABLE_3                0X0008
 484
 485/* DMA Coalescing defines */
 486#define IGB_MIN_TXPBSIZE        20408
 487#define IGB_TX_BUF_4096         4096
 488#define IGB_DMCTLX_DCFLUSH_DIS  0x80000000  /* Disable DMA Coal Flush */
 489
 490#define IGB_82576_TSYNC_SHIFT   19
 491#define IGB_TS_HDR_LEN          16
 492enum e1000_state_t {
 493        __IGB_TESTING,
 494        __IGB_RESETTING,
 495        __IGB_DOWN,
 496        __IGB_PTP_TX_IN_PROGRESS,
 497};
 498
 499enum igb_boards {
 500        board_82575,
 501};
 502
 503extern char igb_driver_name[];
 504extern char igb_driver_version[];
 505
 506int igb_up(struct igb_adapter *);
 507void igb_down(struct igb_adapter *);
 508void igb_reinit_locked(struct igb_adapter *);
 509void igb_reset(struct igb_adapter *);
 510int igb_reinit_queues(struct igb_adapter *);
 511void igb_write_rss_indir_tbl(struct igb_adapter *);
 512int igb_set_spd_dplx(struct igb_adapter *, u32, u8);
 513int igb_setup_tx_resources(struct igb_ring *);
 514int igb_setup_rx_resources(struct igb_ring *);
 515void igb_free_tx_resources(struct igb_ring *);
 516void igb_free_rx_resources(struct igb_ring *);
 517void igb_configure_tx_ring(struct igb_adapter *, struct igb_ring *);
 518void igb_configure_rx_ring(struct igb_adapter *, struct igb_ring *);
 519void igb_setup_tctl(struct igb_adapter *);
 520void igb_setup_rctl(struct igb_adapter *);
 521netdev_tx_t igb_xmit_frame_ring(struct sk_buff *, struct igb_ring *);
 522void igb_unmap_and_free_tx_resource(struct igb_ring *, struct igb_tx_buffer *);
 523void igb_alloc_rx_buffers(struct igb_ring *, u16);
 524void igb_update_stats(struct igb_adapter *, struct rtnl_link_stats64 *);
 525bool igb_has_link(struct igb_adapter *adapter);
 526void igb_set_ethtool_ops(struct net_device *);
 527void igb_power_up_link(struct igb_adapter *);
 528void igb_set_fw_version(struct igb_adapter *);
 529void igb_ptp_init(struct igb_adapter *adapter);
 530void igb_ptp_stop(struct igb_adapter *adapter);
 531void igb_ptp_reset(struct igb_adapter *adapter);
 532void igb_ptp_rx_hang(struct igb_adapter *adapter);
 533void igb_ptp_rx_rgtstamp(struct igb_q_vector *q_vector, struct sk_buff *skb);
 534void igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, unsigned char *va,
 535                         struct sk_buff *skb);
 536int igb_ptp_set_ts_config(struct net_device *netdev, struct ifreq *ifr);
 537int igb_ptp_get_ts_config(struct net_device *netdev, struct ifreq *ifr);
 538#ifdef CONFIG_IGB_HWMON
 539void igb_sysfs_exit(struct igb_adapter *adapter);
 540int igb_sysfs_init(struct igb_adapter *adapter);
 541#endif
 542static inline s32 igb_reset_phy(struct e1000_hw *hw)
 543{
 544        if (hw->phy.ops.reset)
 545                return hw->phy.ops.reset(hw);
 546
 547        return 0;
 548}
 549
 550static inline s32 igb_read_phy_reg(struct e1000_hw *hw, u32 offset, u16 *data)
 551{
 552        if (hw->phy.ops.read_reg)
 553                return hw->phy.ops.read_reg(hw, offset, data);
 554
 555        return 0;
 556}
 557
 558static inline s32 igb_write_phy_reg(struct e1000_hw *hw, u32 offset, u16 data)
 559{
 560        if (hw->phy.ops.write_reg)
 561                return hw->phy.ops.write_reg(hw, offset, data);
 562
 563        return 0;
 564}
 565
 566static inline s32 igb_get_phy_info(struct e1000_hw *hw)
 567{
 568        if (hw->phy.ops.get_phy_info)
 569                return hw->phy.ops.get_phy_info(hw);
 570
 571        return 0;
 572}
 573
 574static inline struct netdev_queue *txring_txq(const struct igb_ring *tx_ring)
 575{
 576        return netdev_get_tx_queue(tx_ring->netdev, tx_ring->queue_index);
 577}
 578
 579#endif /* _IGB_H_ */
 580