linux/drivers/net/ethernet/intel/ixgbe/ixgbe.h
<<
>>
Prefs
   1/*******************************************************************************
   2
   3  Intel 10 Gigabit PCI Express Linux driver
   4  Copyright(c) 1999 - 2016 Intel Corporation.
   5
   6  This program is free software; you can redistribute it and/or modify it
   7  under the terms and conditions of the GNU General Public License,
   8  version 2, as published by the Free Software Foundation.
   9
  10  This program is distributed in the hope it will be useful, but WITHOUT
  11  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  13  more details.
  14
  15  You should have received a copy of the GNU General Public License along with
  16  this program; if not, write to the Free Software Foundation, Inc.,
  17  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
  18
  19  The full GNU General Public License is included in this distribution in
  20  the file called "COPYING".
  21
  22  Contact Information:
  23  Linux NICS <linux.nics@intel.com>
  24  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
  25  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
  26
  27*******************************************************************************/
  28
  29#ifndef _IXGBE_H_
  30#define _IXGBE_H_
  31
  32#include <linux/bitops.h>
  33#include <linux/types.h>
  34#include <linux/pci.h>
  35#include <linux/netdevice.h>
  36#include <linux/cpumask.h>
  37#include <linux/aer.h>
  38#include <linux/if_vlan.h>
  39#include <linux/jiffies.h>
  40
  41#include <linux/timecounter.h>
  42#include <linux/net_tstamp.h>
  43#include <linux/ptp_clock_kernel.h>
  44
  45#include "ixgbe_type.h"
  46#include "ixgbe_common.h"
  47#include "ixgbe_dcb.h"
  48#if IS_ENABLED(CONFIG_FCOE)
  49#define IXGBE_FCOE
  50#include "ixgbe_fcoe.h"
  51#endif /* IS_ENABLED(CONFIG_FCOE) */
  52#ifdef CONFIG_IXGBE_DCA
  53#include <linux/dca.h>
  54#endif
  55#include "ixgbe_ipsec.h"
  56
  57#include <net/xdp.h>
  58#include <net/busy_poll.h>
  59
  60/* common prefix used by pr_<> macros */
  61#undef pr_fmt
  62#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  63
  64/* TX/RX descriptor defines */
  65#define IXGBE_DEFAULT_TXD                   512
  66#define IXGBE_DEFAULT_TX_WORK               256
  67#define IXGBE_MAX_TXD                      4096
  68#define IXGBE_MIN_TXD                        64
  69
  70#if (PAGE_SIZE < 8192)
  71#define IXGBE_DEFAULT_RXD                   512
  72#else
  73#define IXGBE_DEFAULT_RXD                   128
  74#endif
  75#define IXGBE_MAX_RXD                      4096
  76#define IXGBE_MIN_RXD                        64
  77
  78#define IXGBE_ETH_P_LLDP                 0x88CC
  79
  80/* flow control */
  81#define IXGBE_MIN_FCRTL                    0x40
  82#define IXGBE_MAX_FCRTL                 0x7FF80
  83#define IXGBE_MIN_FCRTH                   0x600
  84#define IXGBE_MAX_FCRTH                 0x7FFF0
  85#define IXGBE_DEFAULT_FCPAUSE            0xFFFF
  86#define IXGBE_MIN_FCPAUSE                     0
  87#define IXGBE_MAX_FCPAUSE                0xFFFF
  88
  89/* Supported Rx Buffer Sizes */
  90#define IXGBE_RXBUFFER_256    256  /* Used for skb receive header */
  91#define IXGBE_RXBUFFER_1536  1536
  92#define IXGBE_RXBUFFER_2K    2048
  93#define IXGBE_RXBUFFER_3K    3072
  94#define IXGBE_RXBUFFER_4K    4096
  95#define IXGBE_MAX_RXBUFFER  16384  /* largest size for a single descriptor */
  96
  97/* Attempt to maximize the headroom available for incoming frames.  We
  98 * use a 2K buffer for receives and need 1536/1534 to store the data for
  99 * the frame.  This leaves us with 512 bytes of room.  From that we need
 100 * to deduct the space needed for the shared info and the padding needed
 101 * to IP align the frame.
 102 *
 103 * Note: For cache line sizes 256 or larger this value is going to end
 104 *       up negative.  In these cases we should fall back to the 3K
 105 *       buffers.
 106 */
 107#if (PAGE_SIZE < 8192)
 108#define IXGBE_MAX_2K_FRAME_BUILD_SKB (IXGBE_RXBUFFER_1536 - NET_IP_ALIGN)
 109#define IXGBE_2K_TOO_SMALL_WITH_PADDING \
 110((NET_SKB_PAD + IXGBE_RXBUFFER_1536) > SKB_WITH_OVERHEAD(IXGBE_RXBUFFER_2K))
 111
 112static inline int ixgbe_compute_pad(int rx_buf_len)
 113{
 114        int page_size, pad_size;
 115
 116        page_size = ALIGN(rx_buf_len, PAGE_SIZE / 2);
 117        pad_size = SKB_WITH_OVERHEAD(page_size) - rx_buf_len;
 118
 119        return pad_size;
 120}
 121
 122static inline int ixgbe_skb_pad(void)
 123{
 124        int rx_buf_len;
 125
 126        /* If a 2K buffer cannot handle a standard Ethernet frame then
 127         * optimize padding for a 3K buffer instead of a 1.5K buffer.
 128         *
 129         * For a 3K buffer we need to add enough padding to allow for
 130         * tailroom due to NET_IP_ALIGN possibly shifting us out of
 131         * cache-line alignment.
 132         */
 133        if (IXGBE_2K_TOO_SMALL_WITH_PADDING)
 134                rx_buf_len = IXGBE_RXBUFFER_3K + SKB_DATA_ALIGN(NET_IP_ALIGN);
 135        else
 136                rx_buf_len = IXGBE_RXBUFFER_1536;
 137
 138        /* if needed make room for NET_IP_ALIGN */
 139        rx_buf_len -= NET_IP_ALIGN;
 140
 141        return ixgbe_compute_pad(rx_buf_len);
 142}
 143
 144#define IXGBE_SKB_PAD   ixgbe_skb_pad()
 145#else
 146#define IXGBE_SKB_PAD   (NET_SKB_PAD + NET_IP_ALIGN)
 147#endif
 148
 149/*
 150 * NOTE: netdev_alloc_skb reserves up to 64 bytes, NET_IP_ALIGN means we
 151 * reserve 64 more, and skb_shared_info adds an additional 320 bytes more,
 152 * this adds up to 448 bytes of extra data.
 153 *
 154 * Since netdev_alloc_skb now allocates a page fragment we can use a value
 155 * of 256 and the resultant skb will have a truesize of 960 or less.
 156 */
 157#define IXGBE_RX_HDR_SIZE IXGBE_RXBUFFER_256
 158
 159/* How many Rx Buffers do we bundle into one write to the hardware ? */
 160#define IXGBE_RX_BUFFER_WRITE   16      /* Must be power of 2 */
 161
 162#define IXGBE_RX_DMA_ATTR \
 163        (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING)
 164
 165enum ixgbe_tx_flags {
 166        /* cmd_type flags */
 167        IXGBE_TX_FLAGS_HW_VLAN  = 0x01,
 168        IXGBE_TX_FLAGS_TSO      = 0x02,
 169        IXGBE_TX_FLAGS_TSTAMP   = 0x04,
 170
 171        /* olinfo flags */
 172        IXGBE_TX_FLAGS_CC       = 0x08,
 173        IXGBE_TX_FLAGS_IPV4     = 0x10,
 174        IXGBE_TX_FLAGS_CSUM     = 0x20,
 175        IXGBE_TX_FLAGS_IPSEC    = 0x40,
 176
 177        /* software defined flags */
 178        IXGBE_TX_FLAGS_SW_VLAN  = 0x80,
 179        IXGBE_TX_FLAGS_FCOE     = 0x100,
 180};
 181
 182/* VLAN info */
 183#define IXGBE_TX_FLAGS_VLAN_MASK        0xffff0000
 184#define IXGBE_TX_FLAGS_VLAN_PRIO_MASK   0xe0000000
 185#define IXGBE_TX_FLAGS_VLAN_PRIO_SHIFT  29
 186#define IXGBE_TX_FLAGS_VLAN_SHIFT       16
 187
 188#define IXGBE_MAX_VF_MC_ENTRIES         30
 189#define IXGBE_MAX_VF_FUNCTIONS          64
 190#define IXGBE_MAX_VFTA_ENTRIES          128
 191#define MAX_EMULATION_MAC_ADDRS         16
 192#define IXGBE_MAX_PF_MACVLANS           15
 193#define VMDQ_P(p)   ((p) + adapter->ring_feature[RING_F_VMDQ].offset)
 194#define IXGBE_82599_VF_DEVICE_ID        0x10ED
 195#define IXGBE_X540_VF_DEVICE_ID         0x1515
 196
 197struct vf_data_storage {
 198        struct pci_dev *vfdev;
 199        unsigned char vf_mac_addresses[ETH_ALEN];
 200        u16 vf_mc_hashes[IXGBE_MAX_VF_MC_ENTRIES];
 201        u16 num_vf_mc_hashes;
 202        bool clear_to_send;
 203        bool pf_set_mac;
 204        u16 pf_vlan; /* When set, guest VLAN config not allowed. */
 205        u16 pf_qos;
 206        u16 tx_rate;
 207        u8 spoofchk_enabled;
 208        bool rss_query_enabled;
 209        u8 trusted;
 210        int xcast_mode;
 211        unsigned int vf_api;
 212};
 213
 214enum ixgbevf_xcast_modes {
 215        IXGBEVF_XCAST_MODE_NONE = 0,
 216        IXGBEVF_XCAST_MODE_MULTI,
 217        IXGBEVF_XCAST_MODE_ALLMULTI,
 218        IXGBEVF_XCAST_MODE_PROMISC,
 219};
 220
 221struct vf_macvlans {
 222        struct list_head l;
 223        int vf;
 224        bool free;
 225        bool is_macvlan;
 226        u8 vf_macvlan[ETH_ALEN];
 227};
 228
 229#define IXGBE_MAX_TXD_PWR       14
 230#define IXGBE_MAX_DATA_PER_TXD  (1u << IXGBE_MAX_TXD_PWR)
 231
 232/* Tx Descriptors needed, worst case */
 233#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), IXGBE_MAX_DATA_PER_TXD)
 234#define DESC_NEEDED (MAX_SKB_FRAGS + 4)
 235
 236/* wrapper around a pointer to a socket buffer,
 237 * so a DMA handle can be stored along with the buffer */
 238struct ixgbe_tx_buffer {
 239        union ixgbe_adv_tx_desc *next_to_watch;
 240        unsigned long time_stamp;
 241        union {
 242                struct sk_buff *skb;
 243                /* XDP uses address ptr on irq_clean */
 244                void *data;
 245        };
 246        unsigned int bytecount;
 247        unsigned short gso_segs;
 248        __be16 protocol;
 249        DEFINE_DMA_UNMAP_ADDR(dma);
 250        DEFINE_DMA_UNMAP_LEN(len);
 251        u32 tx_flags;
 252};
 253
 254struct ixgbe_rx_buffer {
 255        struct sk_buff *skb;
 256        dma_addr_t dma;
 257        struct page *page;
 258#if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536)
 259        __u32 page_offset;
 260#else
 261        __u16 page_offset;
 262#endif
 263        __u16 pagecnt_bias;
 264};
 265
 266struct ixgbe_queue_stats {
 267        u64 packets;
 268        u64 bytes;
 269};
 270
 271struct ixgbe_tx_queue_stats {
 272        u64 restart_queue;
 273        u64 tx_busy;
 274        u64 tx_done_old;
 275};
 276
 277struct ixgbe_rx_queue_stats {
 278        u64 rsc_count;
 279        u64 rsc_flush;
 280        u64 non_eop_descs;
 281        u64 alloc_rx_page;
 282        u64 alloc_rx_page_failed;
 283        u64 alloc_rx_buff_failed;
 284        u64 csum_err;
 285};
 286
 287#define IXGBE_TS_HDR_LEN 8
 288
 289enum ixgbe_ring_state_t {
 290        __IXGBE_RX_3K_BUFFER,
 291        __IXGBE_RX_BUILD_SKB_ENABLED,
 292        __IXGBE_RX_RSC_ENABLED,
 293        __IXGBE_RX_CSUM_UDP_ZERO_ERR,
 294        __IXGBE_RX_FCOE,
 295        __IXGBE_TX_FDIR_INIT_DONE,
 296        __IXGBE_TX_XPS_INIT_DONE,
 297        __IXGBE_TX_DETECT_HANG,
 298        __IXGBE_HANG_CHECK_ARMED,
 299        __IXGBE_TX_XDP_RING,
 300};
 301
 302#define ring_uses_build_skb(ring) \
 303        test_bit(__IXGBE_RX_BUILD_SKB_ENABLED, &(ring)->state)
 304
 305struct ixgbe_fwd_adapter {
 306        unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
 307        struct net_device *netdev;
 308        struct ixgbe_adapter *real_adapter;
 309        unsigned int tx_base_queue;
 310        unsigned int rx_base_queue;
 311        int pool;
 312};
 313
 314#define check_for_tx_hang(ring) \
 315        test_bit(__IXGBE_TX_DETECT_HANG, &(ring)->state)
 316#define set_check_for_tx_hang(ring) \
 317        set_bit(__IXGBE_TX_DETECT_HANG, &(ring)->state)
 318#define clear_check_for_tx_hang(ring) \
 319        clear_bit(__IXGBE_TX_DETECT_HANG, &(ring)->state)
 320#define ring_is_rsc_enabled(ring) \
 321        test_bit(__IXGBE_RX_RSC_ENABLED, &(ring)->state)
 322#define set_ring_rsc_enabled(ring) \
 323        set_bit(__IXGBE_RX_RSC_ENABLED, &(ring)->state)
 324#define clear_ring_rsc_enabled(ring) \
 325        clear_bit(__IXGBE_RX_RSC_ENABLED, &(ring)->state)
 326#define ring_is_xdp(ring) \
 327        test_bit(__IXGBE_TX_XDP_RING, &(ring)->state)
 328#define set_ring_xdp(ring) \
 329        set_bit(__IXGBE_TX_XDP_RING, &(ring)->state)
 330#define clear_ring_xdp(ring) \
 331        clear_bit(__IXGBE_TX_XDP_RING, &(ring)->state)
 332struct ixgbe_ring {
 333        struct ixgbe_ring *next;        /* pointer to next ring in q_vector */
 334        struct ixgbe_q_vector *q_vector; /* backpointer to host q_vector */
 335        struct net_device *netdev;      /* netdev ring belongs to */
 336        struct bpf_prog *xdp_prog;
 337        struct device *dev;             /* device for DMA mapping */
 338        void *desc;                     /* descriptor ring memory */
 339        union {
 340                struct ixgbe_tx_buffer *tx_buffer_info;
 341                struct ixgbe_rx_buffer *rx_buffer_info;
 342        };
 343        unsigned long state;
 344        u8 __iomem *tail;
 345        dma_addr_t dma;                 /* phys. address of descriptor ring */
 346        unsigned int size;              /* length in bytes */
 347
 348        u16 count;                      /* amount of descriptors */
 349
 350        u8 queue_index; /* needed for multiqueue queue management */
 351        u8 reg_idx;                     /* holds the special value that gets
 352                                         * the hardware register offset
 353                                         * associated with this ring, which is
 354                                         * different for DCB and RSS modes
 355                                         */
 356        u16 next_to_use;
 357        u16 next_to_clean;
 358
 359        unsigned long last_rx_timestamp;
 360
 361        union {
 362                u16 next_to_alloc;
 363                struct {
 364                        u8 atr_sample_rate;
 365                        u8 atr_count;
 366                };
 367        };
 368
 369        u8 dcb_tc;
 370        struct ixgbe_queue_stats stats;
 371        struct u64_stats_sync syncp;
 372        union {
 373                struct ixgbe_tx_queue_stats tx_stats;
 374                struct ixgbe_rx_queue_stats rx_stats;
 375        };
 376        struct xdp_rxq_info xdp_rxq;
 377} ____cacheline_internodealigned_in_smp;
 378
 379enum ixgbe_ring_f_enum {
 380        RING_F_NONE = 0,
 381        RING_F_VMDQ,  /* SR-IOV uses the same ring feature */
 382        RING_F_RSS,
 383        RING_F_FDIR,
 384#ifdef IXGBE_FCOE
 385        RING_F_FCOE,
 386#endif /* IXGBE_FCOE */
 387
 388        RING_F_ARRAY_SIZE      /* must be last in enum set */
 389};
 390
 391#define IXGBE_MAX_RSS_INDICES           16
 392#define IXGBE_MAX_RSS_INDICES_X550      63
 393#define IXGBE_MAX_VMDQ_INDICES          64
 394#define IXGBE_MAX_FDIR_INDICES          63      /* based on q_vector limit */
 395#define IXGBE_MAX_FCOE_INDICES          8
 396#define MAX_RX_QUEUES                   (IXGBE_MAX_FDIR_INDICES + 1)
 397#define MAX_TX_QUEUES                   (IXGBE_MAX_FDIR_INDICES + 1)
 398#define MAX_XDP_QUEUES                  (IXGBE_MAX_FDIR_INDICES + 1)
 399#define IXGBE_MAX_L2A_QUEUES            4
 400#define IXGBE_BAD_L2A_QUEUE             3
 401#define IXGBE_MAX_MACVLANS              63
 402
 403struct ixgbe_ring_feature {
 404        u16 limit;      /* upper limit on feature indices */
 405        u16 indices;    /* current value of indices */
 406        u16 mask;       /* Mask used for feature to ring mapping */
 407        u16 offset;     /* offset to start of feature */
 408} ____cacheline_internodealigned_in_smp;
 409
 410#define IXGBE_82599_VMDQ_8Q_MASK 0x78
 411#define IXGBE_82599_VMDQ_4Q_MASK 0x7C
 412#define IXGBE_82599_VMDQ_2Q_MASK 0x7E
 413
 414/*
 415 * FCoE requires that all Rx buffers be over 2200 bytes in length.  Since
 416 * this is twice the size of a half page we need to double the page order
 417 * for FCoE enabled Rx queues.
 418 */
 419static inline unsigned int ixgbe_rx_bufsz(struct ixgbe_ring *ring)
 420{
 421        if (test_bit(__IXGBE_RX_3K_BUFFER, &ring->state))
 422                return IXGBE_RXBUFFER_3K;
 423#if (PAGE_SIZE < 8192)
 424        if (ring_uses_build_skb(ring))
 425                return IXGBE_MAX_2K_FRAME_BUILD_SKB;
 426#endif
 427        return IXGBE_RXBUFFER_2K;
 428}
 429
 430static inline unsigned int ixgbe_rx_pg_order(struct ixgbe_ring *ring)
 431{
 432#if (PAGE_SIZE < 8192)
 433        if (test_bit(__IXGBE_RX_3K_BUFFER, &ring->state))
 434                return 1;
 435#endif
 436        return 0;
 437}
 438#define ixgbe_rx_pg_size(_ring) (PAGE_SIZE << ixgbe_rx_pg_order(_ring))
 439
 440#define IXGBE_ITR_ADAPTIVE_MIN_INC      2
 441#define IXGBE_ITR_ADAPTIVE_MIN_USECS    10
 442#define IXGBE_ITR_ADAPTIVE_MAX_USECS    126
 443#define IXGBE_ITR_ADAPTIVE_LATENCY      0x80
 444#define IXGBE_ITR_ADAPTIVE_BULK         0x00
 445
 446struct ixgbe_ring_container {
 447        struct ixgbe_ring *ring;        /* pointer to linked list of rings */
 448        unsigned long next_update;      /* jiffies value of last update */
 449        unsigned int total_bytes;       /* total bytes processed this int */
 450        unsigned int total_packets;     /* total packets processed this int */
 451        u16 work_limit;                 /* total work allowed per interrupt */
 452        u8 count;                       /* total number of rings in vector */
 453        u8 itr;                         /* current ITR setting for ring */
 454};
 455
 456/* iterator for handling rings in ring container */
 457#define ixgbe_for_each_ring(pos, head) \
 458        for (pos = (head).ring; pos != NULL; pos = pos->next)
 459
 460#define MAX_RX_PACKET_BUFFERS ((adapter->flags & IXGBE_FLAG_DCB_ENABLED) \
 461                              ? 8 : 1)
 462#define MAX_TX_PACKET_BUFFERS MAX_RX_PACKET_BUFFERS
 463
 464/* MAX_Q_VECTORS of these are allocated,
 465 * but we only use one per queue-specific vector.
 466 */
 467struct ixgbe_q_vector {
 468        struct ixgbe_adapter *adapter;
 469#ifdef CONFIG_IXGBE_DCA
 470        int cpu;            /* CPU for DCA */
 471#endif
 472        u16 v_idx;              /* index of q_vector within array, also used for
 473                                 * finding the bit in EICR and friends that
 474                                 * represents the vector for this ring */
 475        u16 itr;                /* Interrupt throttle rate written to EITR */
 476        struct ixgbe_ring_container rx, tx;
 477
 478        struct napi_struct napi;
 479        cpumask_t affinity_mask;
 480        int numa_node;
 481        struct rcu_head rcu;    /* to avoid race with update stats on free */
 482        char name[IFNAMSIZ + 9];
 483
 484        /* for dynamic allocation of rings associated with this q_vector */
 485        struct ixgbe_ring ring[0] ____cacheline_internodealigned_in_smp;
 486};
 487
 488#ifdef CONFIG_IXGBE_HWMON
 489
 490#define IXGBE_HWMON_TYPE_LOC            0
 491#define IXGBE_HWMON_TYPE_TEMP           1
 492#define IXGBE_HWMON_TYPE_CAUTION        2
 493#define IXGBE_HWMON_TYPE_MAX            3
 494
 495struct hwmon_attr {
 496        struct device_attribute dev_attr;
 497        struct ixgbe_hw *hw;
 498        struct ixgbe_thermal_diode_data *sensor;
 499        char name[12];
 500};
 501
 502struct hwmon_buff {
 503        struct attribute_group group;
 504        const struct attribute_group *groups[2];
 505        struct attribute *attrs[IXGBE_MAX_SENSORS * 4 + 1];
 506        struct hwmon_attr hwmon_list[IXGBE_MAX_SENSORS * 4];
 507        unsigned int n_hwmon;
 508};
 509#endif /* CONFIG_IXGBE_HWMON */
 510
 511/*
 512 * microsecond values for various ITR rates shifted by 2 to fit itr register
 513 * with the first 3 bits reserved 0
 514 */
 515#define IXGBE_MIN_RSC_ITR       24
 516#define IXGBE_100K_ITR          40
 517#define IXGBE_20K_ITR           200
 518#define IXGBE_12K_ITR           336
 519
 520/* ixgbe_test_staterr - tests bits in Rx descriptor status and error fields */
 521static inline __le32 ixgbe_test_staterr(union ixgbe_adv_rx_desc *rx_desc,
 522                                        const u32 stat_err_bits)
 523{
 524        return rx_desc->wb.upper.status_error & cpu_to_le32(stat_err_bits);
 525}
 526
 527static inline u16 ixgbe_desc_unused(struct ixgbe_ring *ring)
 528{
 529        u16 ntc = ring->next_to_clean;
 530        u16 ntu = ring->next_to_use;
 531
 532        return ((ntc > ntu) ? 0 : ring->count) + ntc - ntu - 1;
 533}
 534
 535#define IXGBE_RX_DESC(R, i)         \
 536        (&(((union ixgbe_adv_rx_desc *)((R)->desc))[i]))
 537#define IXGBE_TX_DESC(R, i)         \
 538        (&(((union ixgbe_adv_tx_desc *)((R)->desc))[i]))
 539#define IXGBE_TX_CTXTDESC(R, i)     \
 540        (&(((struct ixgbe_adv_tx_context_desc *)((R)->desc))[i]))
 541
 542#define IXGBE_MAX_JUMBO_FRAME_SIZE      9728 /* Maximum Supported Size 9.5KB */
 543#ifdef IXGBE_FCOE
 544/* Use 3K as the baby jumbo frame size for FCoE */
 545#define IXGBE_FCOE_JUMBO_FRAME_SIZE       3072
 546#endif /* IXGBE_FCOE */
 547
 548#define OTHER_VECTOR 1
 549#define NON_Q_VECTORS (OTHER_VECTOR)
 550
 551#define MAX_MSIX_VECTORS_82599 64
 552#define MAX_Q_VECTORS_82599 64
 553#define MAX_MSIX_VECTORS_82598 18
 554#define MAX_Q_VECTORS_82598 16
 555
 556struct ixgbe_mac_addr {
 557        u8 addr[ETH_ALEN];
 558        u16 pool;
 559        u16 state; /* bitmask */
 560};
 561
 562#define IXGBE_MAC_STATE_DEFAULT         0x1
 563#define IXGBE_MAC_STATE_MODIFIED        0x2
 564#define IXGBE_MAC_STATE_IN_USE          0x4
 565
 566#define MAX_Q_VECTORS MAX_Q_VECTORS_82599
 567#define MAX_MSIX_COUNT MAX_MSIX_VECTORS_82599
 568
 569#define MIN_MSIX_Q_VECTORS 1
 570#define MIN_MSIX_COUNT (MIN_MSIX_Q_VECTORS + NON_Q_VECTORS)
 571
 572/* default to trying for four seconds */
 573#define IXGBE_TRY_LINK_TIMEOUT (4 * HZ)
 574#define IXGBE_SFP_POLL_JIFFIES (2 * HZ) /* SFP poll every 2 seconds */
 575
 576/* board specific private data structure */
 577struct ixgbe_adapter {
 578        unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
 579        /* OS defined structs */
 580        struct net_device *netdev;
 581        struct bpf_prog *xdp_prog;
 582        struct pci_dev *pdev;
 583
 584        unsigned long state;
 585
 586        /* Some features need tri-state capability,
 587         * thus the additional *_CAPABLE flags.
 588         */
 589        u32 flags;
 590#define IXGBE_FLAG_MSI_ENABLED                  BIT(1)
 591#define IXGBE_FLAG_MSIX_ENABLED                 BIT(3)
 592#define IXGBE_FLAG_RX_1BUF_CAPABLE              BIT(4)
 593#define IXGBE_FLAG_RX_PS_CAPABLE                BIT(5)
 594#define IXGBE_FLAG_RX_PS_ENABLED                BIT(6)
 595#define IXGBE_FLAG_DCA_ENABLED                  BIT(8)
 596#define IXGBE_FLAG_DCA_CAPABLE                  BIT(9)
 597#define IXGBE_FLAG_IMIR_ENABLED                 BIT(10)
 598#define IXGBE_FLAG_MQ_CAPABLE                   BIT(11)
 599#define IXGBE_FLAG_DCB_ENABLED                  BIT(12)
 600#define IXGBE_FLAG_VMDQ_CAPABLE                 BIT(13)
 601#define IXGBE_FLAG_VMDQ_ENABLED                 BIT(14)
 602#define IXGBE_FLAG_FAN_FAIL_CAPABLE             BIT(15)
 603#define IXGBE_FLAG_NEED_LINK_UPDATE             BIT(16)
 604#define IXGBE_FLAG_NEED_LINK_CONFIG             BIT(17)
 605#define IXGBE_FLAG_FDIR_HASH_CAPABLE            BIT(18)
 606#define IXGBE_FLAG_FDIR_PERFECT_CAPABLE         BIT(19)
 607#define IXGBE_FLAG_FCOE_CAPABLE                 BIT(20)
 608#define IXGBE_FLAG_FCOE_ENABLED                 BIT(21)
 609#define IXGBE_FLAG_SRIOV_CAPABLE                BIT(22)
 610#define IXGBE_FLAG_SRIOV_ENABLED                BIT(23)
 611#define IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE        BIT(24)
 612#define IXGBE_FLAG_RX_HWTSTAMP_ENABLED          BIT(25)
 613#define IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER      BIT(26)
 614#define IXGBE_FLAG_DCB_CAPABLE                  BIT(27)
 615#define IXGBE_FLAG_GENEVE_OFFLOAD_CAPABLE       BIT(28)
 616
 617        u32 flags2;
 618#define IXGBE_FLAG2_RSC_CAPABLE                 BIT(0)
 619#define IXGBE_FLAG2_RSC_ENABLED                 BIT(1)
 620#define IXGBE_FLAG2_TEMP_SENSOR_CAPABLE         BIT(2)
 621#define IXGBE_FLAG2_TEMP_SENSOR_EVENT           BIT(3)
 622#define IXGBE_FLAG2_SEARCH_FOR_SFP              BIT(4)
 623#define IXGBE_FLAG2_SFP_NEEDS_RESET             BIT(5)
 624#define IXGBE_FLAG2_FDIR_REQUIRES_REINIT        BIT(7)
 625#define IXGBE_FLAG2_RSS_FIELD_IPV4_UDP          BIT(8)
 626#define IXGBE_FLAG2_RSS_FIELD_IPV6_UDP          BIT(9)
 627#define IXGBE_FLAG2_PTP_PPS_ENABLED             BIT(10)
 628#define IXGBE_FLAG2_PHY_INTERRUPT               BIT(11)
 629#define IXGBE_FLAG2_UDP_TUN_REREG_NEEDED        BIT(12)
 630#define IXGBE_FLAG2_VLAN_PROMISC                BIT(13)
 631#define IXGBE_FLAG2_EEE_CAPABLE                 BIT(14)
 632#define IXGBE_FLAG2_EEE_ENABLED                 BIT(15)
 633#define IXGBE_FLAG2_RX_LEGACY                   BIT(16)
 634#define IXGBE_FLAG2_IPSEC_ENABLED               BIT(17)
 635
 636        /* Tx fast path data */
 637        int num_tx_queues;
 638        u16 tx_itr_setting;
 639        u16 tx_work_limit;
 640        u64 tx_ipsec;
 641
 642        /* Rx fast path data */
 643        int num_rx_queues;
 644        u16 rx_itr_setting;
 645        u64 rx_ipsec;
 646
 647        /* Port number used to identify VXLAN traffic */
 648        __be16 vxlan_port;
 649        __be16 geneve_port;
 650
 651        /* XDP */
 652        int num_xdp_queues;
 653        struct ixgbe_ring *xdp_ring[MAX_XDP_QUEUES];
 654
 655        /* TX */
 656        struct ixgbe_ring *tx_ring[MAX_TX_QUEUES] ____cacheline_aligned_in_smp;
 657
 658        u64 restart_queue;
 659        u64 lsc_int;
 660        u32 tx_timeout_count;
 661
 662        /* RX */
 663        struct ixgbe_ring *rx_ring[MAX_RX_QUEUES];
 664        int num_rx_pools;               /* == num_rx_queues in 82598 */
 665        int num_rx_queues_per_pool;     /* 1 if 82598, can be many if 82599 */
 666        u64 hw_csum_rx_error;
 667        u64 hw_rx_no_dma_resources;
 668        u64 rsc_total_count;
 669        u64 rsc_total_flush;
 670        u64 non_eop_descs;
 671        u32 alloc_rx_page;
 672        u32 alloc_rx_page_failed;
 673        u32 alloc_rx_buff_failed;
 674
 675        struct ixgbe_q_vector *q_vector[MAX_Q_VECTORS];
 676
 677        /* DCB parameters */
 678        struct ieee_pfc *ixgbe_ieee_pfc;
 679        struct ieee_ets *ixgbe_ieee_ets;
 680        struct ixgbe_dcb_config dcb_cfg;
 681        struct ixgbe_dcb_config temp_dcb_cfg;
 682        u8 hw_tcs;
 683        u8 dcb_set_bitmap;
 684        u8 dcbx_cap;
 685        enum ixgbe_fc_mode last_lfc_mode;
 686
 687        int num_q_vectors;      /* current number of q_vectors for device */
 688        int max_q_vectors;      /* true count of q_vectors for device */
 689        struct ixgbe_ring_feature ring_feature[RING_F_ARRAY_SIZE];
 690        struct msix_entry *msix_entries;
 691
 692        u32 test_icr;
 693        struct ixgbe_ring test_tx_ring;
 694        struct ixgbe_ring test_rx_ring;
 695
 696        /* structs defined in ixgbe_hw.h */
 697        struct ixgbe_hw hw;
 698        u16 msg_enable;
 699        struct ixgbe_hw_stats stats;
 700
 701        u64 tx_busy;
 702        unsigned int tx_ring_count;
 703        unsigned int xdp_ring_count;
 704        unsigned int rx_ring_count;
 705
 706        u32 link_speed;
 707        bool link_up;
 708        unsigned long sfp_poll_time;
 709        unsigned long link_check_timeout;
 710
 711        struct timer_list service_timer;
 712        struct work_struct service_task;
 713
 714        struct hlist_head fdir_filter_list;
 715        unsigned long fdir_overflow; /* number of times ATR was backed off */
 716        union ixgbe_atr_input fdir_mask;
 717        int fdir_filter_count;
 718        u32 fdir_pballoc;
 719        u32 atr_sample_rate;
 720        spinlock_t fdir_perfect_lock;
 721
 722#ifdef IXGBE_FCOE
 723        struct ixgbe_fcoe fcoe;
 724#endif /* IXGBE_FCOE */
 725        u8 __iomem *io_addr; /* Mainly for iounmap use */
 726        u32 wol;
 727
 728        u16 bridge_mode;
 729
 730        char eeprom_id[NVM_VER_SIZE];
 731        u16 eeprom_cap;
 732
 733        u32 interrupt_event;
 734        u32 led_reg;
 735
 736        struct ptp_clock *ptp_clock;
 737        struct ptp_clock_info ptp_caps;
 738        struct work_struct ptp_tx_work;
 739        struct sk_buff *ptp_tx_skb;
 740        struct hwtstamp_config tstamp_config;
 741        unsigned long ptp_tx_start;
 742        unsigned long last_overflow_check;
 743        unsigned long last_rx_ptp_check;
 744        unsigned long last_rx_timestamp;
 745        spinlock_t tmreg_lock;
 746        struct cyclecounter hw_cc;
 747        struct timecounter hw_tc;
 748        u32 base_incval;
 749        u32 tx_hwtstamp_timeouts;
 750        u32 tx_hwtstamp_skipped;
 751        u32 rx_hwtstamp_cleared;
 752        void (*ptp_setup_sdp)(struct ixgbe_adapter *);
 753
 754        /* SR-IOV */
 755        DECLARE_BITMAP(active_vfs, IXGBE_MAX_VF_FUNCTIONS);
 756        unsigned int num_vfs;
 757        struct vf_data_storage *vfinfo;
 758        int vf_rate_link_speed;
 759        struct vf_macvlans vf_mvs;
 760        struct vf_macvlans *mv_list;
 761
 762        u32 timer_event_accumulator;
 763        u32 vferr_refcount;
 764        struct ixgbe_mac_addr *mac_table;
 765        struct kobject *info_kobj;
 766#ifdef CONFIG_IXGBE_HWMON
 767        struct hwmon_buff *ixgbe_hwmon_buff;
 768#endif /* CONFIG_IXGBE_HWMON */
 769#ifdef CONFIG_DEBUG_FS
 770        struct dentry *ixgbe_dbg_adapter;
 771#endif /*CONFIG_DEBUG_FS*/
 772
 773        u8 default_up;
 774        /* Bitmask indicating in use pools */
 775        DECLARE_BITMAP(fwd_bitmask, IXGBE_MAX_MACVLANS + 1);
 776
 777#define IXGBE_MAX_LINK_HANDLE 10
 778        struct ixgbe_jump_table *jump_tables[IXGBE_MAX_LINK_HANDLE];
 779        unsigned long tables;
 780
 781/* maximum number of RETA entries among all devices supported by ixgbe
 782 * driver: currently it's x550 device in non-SRIOV mode
 783 */
 784#define IXGBE_MAX_RETA_ENTRIES 512
 785        u8 rss_indir_tbl[IXGBE_MAX_RETA_ENTRIES];
 786
 787#define IXGBE_RSS_KEY_SIZE     40  /* size of RSS Hash Key in bytes */
 788        u32 *rss_key;
 789
 790#ifdef CONFIG_XFRM
 791        struct ixgbe_ipsec *ipsec;
 792#endif /* CONFIG_XFRM */
 793};
 794
 795static inline u8 ixgbe_max_rss_indices(struct ixgbe_adapter *adapter)
 796{
 797        switch (adapter->hw.mac.type) {
 798        case ixgbe_mac_82598EB:
 799        case ixgbe_mac_82599EB:
 800        case ixgbe_mac_X540:
 801                return IXGBE_MAX_RSS_INDICES;
 802        case ixgbe_mac_X550:
 803        case ixgbe_mac_X550EM_x:
 804        case ixgbe_mac_x550em_a:
 805                return IXGBE_MAX_RSS_INDICES_X550;
 806        default:
 807                return 0;
 808        }
 809}
 810
 811struct ixgbe_fdir_filter {
 812        struct hlist_node fdir_node;
 813        union ixgbe_atr_input filter;
 814        u16 sw_idx;
 815        u64 action;
 816};
 817
 818enum ixgbe_state_t {
 819        __IXGBE_TESTING,
 820        __IXGBE_RESETTING,
 821        __IXGBE_DOWN,
 822        __IXGBE_DISABLED,
 823        __IXGBE_REMOVING,
 824        __IXGBE_SERVICE_SCHED,
 825        __IXGBE_SERVICE_INITED,
 826        __IXGBE_IN_SFP_INIT,
 827        __IXGBE_PTP_RUNNING,
 828        __IXGBE_PTP_TX_IN_PROGRESS,
 829        __IXGBE_RESET_REQUESTED,
 830};
 831
 832struct ixgbe_cb {
 833        union {                         /* Union defining head/tail partner */
 834                struct sk_buff *head;
 835                struct sk_buff *tail;
 836        };
 837        dma_addr_t dma;
 838        u16 append_cnt;
 839        bool page_released;
 840};
 841#define IXGBE_CB(skb) ((struct ixgbe_cb *)(skb)->cb)
 842
 843enum ixgbe_boards {
 844        board_82598,
 845        board_82599,
 846        board_X540,
 847        board_X550,
 848        board_X550EM_x,
 849        board_x550em_x_fw,
 850        board_x550em_a,
 851        board_x550em_a_fw,
 852};
 853
 854extern const struct ixgbe_info ixgbe_82598_info;
 855extern const struct ixgbe_info ixgbe_82599_info;
 856extern const struct ixgbe_info ixgbe_X540_info;
 857extern const struct ixgbe_info ixgbe_X550_info;
 858extern const struct ixgbe_info ixgbe_X550EM_x_info;
 859extern const struct ixgbe_info ixgbe_x550em_x_fw_info;
 860extern const struct ixgbe_info ixgbe_x550em_a_info;
 861extern const struct ixgbe_info ixgbe_x550em_a_fw_info;
 862#ifdef CONFIG_IXGBE_DCB
 863extern const struct dcbnl_rtnl_ops ixgbe_dcbnl_ops;
 864#endif
 865
 866extern char ixgbe_driver_name[];
 867extern const char ixgbe_driver_version[];
 868#ifdef IXGBE_FCOE
 869extern char ixgbe_default_device_descr[];
 870#endif /* IXGBE_FCOE */
 871
 872int ixgbe_open(struct net_device *netdev);
 873int ixgbe_close(struct net_device *netdev);
 874void ixgbe_up(struct ixgbe_adapter *adapter);
 875void ixgbe_down(struct ixgbe_adapter *adapter);
 876void ixgbe_reinit_locked(struct ixgbe_adapter *adapter);
 877void ixgbe_reset(struct ixgbe_adapter *adapter);
 878void ixgbe_set_ethtool_ops(struct net_device *netdev);
 879int ixgbe_setup_rx_resources(struct ixgbe_adapter *, struct ixgbe_ring *);
 880int ixgbe_setup_tx_resources(struct ixgbe_ring *);
 881void ixgbe_free_rx_resources(struct ixgbe_ring *);
 882void ixgbe_free_tx_resources(struct ixgbe_ring *);
 883void ixgbe_configure_rx_ring(struct ixgbe_adapter *, struct ixgbe_ring *);
 884void ixgbe_configure_tx_ring(struct ixgbe_adapter *, struct ixgbe_ring *);
 885void ixgbe_disable_rx_queue(struct ixgbe_adapter *adapter, struct ixgbe_ring *);
 886void ixgbe_update_stats(struct ixgbe_adapter *adapter);
 887int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter);
 888bool ixgbe_wol_supported(struct ixgbe_adapter *adapter, u16 device_id,
 889                         u16 subdevice_id);
 890#ifdef CONFIG_PCI_IOV
 891void ixgbe_full_sync_mac_table(struct ixgbe_adapter *adapter);
 892#endif
 893int ixgbe_add_mac_filter(struct ixgbe_adapter *adapter,
 894                         const u8 *addr, u16 queue);
 895int ixgbe_del_mac_filter(struct ixgbe_adapter *adapter,
 896                         const u8 *addr, u16 queue);
 897void ixgbe_update_pf_promisc_vlvf(struct ixgbe_adapter *adapter, u32 vid);
 898void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter);
 899netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *, struct ixgbe_adapter *,
 900                                  struct ixgbe_ring *);
 901void ixgbe_unmap_and_free_tx_resource(struct ixgbe_ring *,
 902                                      struct ixgbe_tx_buffer *);
 903void ixgbe_alloc_rx_buffers(struct ixgbe_ring *, u16);
 904void ixgbe_write_eitr(struct ixgbe_q_vector *);
 905int ixgbe_poll(struct napi_struct *napi, int budget);
 906int ethtool_ioctl(struct ifreq *ifr);
 907s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw);
 908s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 fdirctrl);
 909s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 fdirctrl);
 910s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
 911                                          union ixgbe_atr_hash_dword input,
 912                                          union ixgbe_atr_hash_dword common,
 913                                          u8 queue);
 914s32 ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw,
 915                                    union ixgbe_atr_input *input_mask);
 916s32 ixgbe_fdir_write_perfect_filter_82599(struct ixgbe_hw *hw,
 917                                          union ixgbe_atr_input *input,
 918                                          u16 soft_id, u8 queue);
 919s32 ixgbe_fdir_erase_perfect_filter_82599(struct ixgbe_hw *hw,
 920                                          union ixgbe_atr_input *input,
 921                                          u16 soft_id);
 922void ixgbe_atr_compute_perfect_hash_82599(union ixgbe_atr_input *input,
 923                                          union ixgbe_atr_input *mask);
 924int ixgbe_update_ethtool_fdir_entry(struct ixgbe_adapter *adapter,
 925                                    struct ixgbe_fdir_filter *input,
 926                                    u16 sw_idx);
 927void ixgbe_set_rx_mode(struct net_device *netdev);
 928#ifdef CONFIG_IXGBE_DCB
 929void ixgbe_set_rx_drop_en(struct ixgbe_adapter *adapter);
 930#endif
 931int ixgbe_setup_tc(struct net_device *dev, u8 tc);
 932void ixgbe_tx_ctxtdesc(struct ixgbe_ring *, u32, u32, u32, u32);
 933void ixgbe_do_reset(struct net_device *netdev);
 934#ifdef CONFIG_IXGBE_HWMON
 935void ixgbe_sysfs_exit(struct ixgbe_adapter *adapter);
 936int ixgbe_sysfs_init(struct ixgbe_adapter *adapter);
 937#endif /* CONFIG_IXGBE_HWMON */
 938#ifdef IXGBE_FCOE
 939void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter);
 940int ixgbe_fso(struct ixgbe_ring *tx_ring, struct ixgbe_tx_buffer *first,
 941              u8 *hdr_len);
 942int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter,
 943                   union ixgbe_adv_rx_desc *rx_desc, struct sk_buff *skb);
 944int ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid,
 945                       struct scatterlist *sgl, unsigned int sgc);
 946int ixgbe_fcoe_ddp_target(struct net_device *netdev, u16 xid,
 947                          struct scatterlist *sgl, unsigned int sgc);
 948int ixgbe_fcoe_ddp_put(struct net_device *netdev, u16 xid);
 949int ixgbe_setup_fcoe_ddp_resources(struct ixgbe_adapter *adapter);
 950void ixgbe_free_fcoe_ddp_resources(struct ixgbe_adapter *adapter);
 951int ixgbe_fcoe_enable(struct net_device *netdev);
 952int ixgbe_fcoe_disable(struct net_device *netdev);
 953#ifdef CONFIG_IXGBE_DCB
 954u8 ixgbe_fcoe_getapp(struct ixgbe_adapter *adapter);
 955u8 ixgbe_fcoe_setapp(struct ixgbe_adapter *adapter, u8 up);
 956#endif /* CONFIG_IXGBE_DCB */
 957int ixgbe_fcoe_get_wwn(struct net_device *netdev, u64 *wwn, int type);
 958int ixgbe_fcoe_get_hbainfo(struct net_device *netdev,
 959                           struct netdev_fcoe_hbainfo *info);
 960u8 ixgbe_fcoe_get_tc(struct ixgbe_adapter *adapter);
 961#endif /* IXGBE_FCOE */
 962#ifdef CONFIG_DEBUG_FS
 963void ixgbe_dbg_adapter_init(struct ixgbe_adapter *adapter);
 964void ixgbe_dbg_adapter_exit(struct ixgbe_adapter *adapter);
 965void ixgbe_dbg_init(void);
 966void ixgbe_dbg_exit(void);
 967#else
 968static inline void ixgbe_dbg_adapter_init(struct ixgbe_adapter *adapter) {}
 969static inline void ixgbe_dbg_adapter_exit(struct ixgbe_adapter *adapter) {}
 970static inline void ixgbe_dbg_init(void) {}
 971static inline void ixgbe_dbg_exit(void) {}
 972#endif /* CONFIG_DEBUG_FS */
 973static inline struct netdev_queue *txring_txq(const struct ixgbe_ring *ring)
 974{
 975        return netdev_get_tx_queue(ring->netdev, ring->queue_index);
 976}
 977
 978void ixgbe_ptp_init(struct ixgbe_adapter *adapter);
 979void ixgbe_ptp_suspend(struct ixgbe_adapter *adapter);
 980void ixgbe_ptp_stop(struct ixgbe_adapter *adapter);
 981void ixgbe_ptp_overflow_check(struct ixgbe_adapter *adapter);
 982void ixgbe_ptp_rx_hang(struct ixgbe_adapter *adapter);
 983void ixgbe_ptp_tx_hang(struct ixgbe_adapter *adapter);
 984void ixgbe_ptp_rx_pktstamp(struct ixgbe_q_vector *, struct sk_buff *);
 985void ixgbe_ptp_rx_rgtstamp(struct ixgbe_q_vector *, struct sk_buff *skb);
 986static inline void ixgbe_ptp_rx_hwtstamp(struct ixgbe_ring *rx_ring,
 987                                         union ixgbe_adv_rx_desc *rx_desc,
 988                                         struct sk_buff *skb)
 989{
 990        if (unlikely(ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_TSIP))) {
 991                ixgbe_ptp_rx_pktstamp(rx_ring->q_vector, skb);
 992                return;
 993        }
 994
 995        if (unlikely(!ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_STAT_TS)))
 996                return;
 997
 998        ixgbe_ptp_rx_rgtstamp(rx_ring->q_vector, skb);
 999
1000        /* Update the last_rx_timestamp timer in order to enable watchdog check
1001         * for error case of latched timestamp on a dropped packet.
1002         */
1003        rx_ring->last_rx_timestamp = jiffies;
1004}
1005
1006int ixgbe_ptp_set_ts_config(struct ixgbe_adapter *adapter, struct ifreq *ifr);
1007int ixgbe_ptp_get_ts_config(struct ixgbe_adapter *adapter, struct ifreq *ifr);
1008void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter);
1009void ixgbe_ptp_reset(struct ixgbe_adapter *adapter);
1010void ixgbe_ptp_check_pps_event(struct ixgbe_adapter *adapter);
1011#ifdef CONFIG_PCI_IOV
1012void ixgbe_sriov_reinit(struct ixgbe_adapter *adapter);
1013#endif
1014
1015netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
1016                                  struct ixgbe_adapter *adapter,
1017                                  struct ixgbe_ring *tx_ring);
1018u32 ixgbe_rss_indir_tbl_entries(struct ixgbe_adapter *adapter);
1019void ixgbe_store_key(struct ixgbe_adapter *adapter);
1020void ixgbe_store_reta(struct ixgbe_adapter *adapter);
1021s32 ixgbe_negotiate_fc(struct ixgbe_hw *hw, u32 adv_reg, u32 lp_reg,
1022                       u32 adv_sym, u32 adv_asm, u32 lp_sym, u32 lp_asm);
1023#ifdef CONFIG_XFRM_OFFLOAD
1024void ixgbe_init_ipsec_offload(struct ixgbe_adapter *adapter);
1025void ixgbe_stop_ipsec_offload(struct ixgbe_adapter *adapter);
1026void ixgbe_ipsec_restore(struct ixgbe_adapter *adapter);
1027void ixgbe_ipsec_rx(struct ixgbe_ring *rx_ring,
1028                    union ixgbe_adv_rx_desc *rx_desc,
1029                    struct sk_buff *skb);
1030int ixgbe_ipsec_tx(struct ixgbe_ring *tx_ring, struct ixgbe_tx_buffer *first,
1031                   struct ixgbe_ipsec_tx_data *itd);
1032#else
1033static inline void ixgbe_init_ipsec_offload(struct ixgbe_adapter *adapter) { };
1034static inline void ixgbe_stop_ipsec_offload(struct ixgbe_adapter *adapter) { };
1035static inline void ixgbe_ipsec_restore(struct ixgbe_adapter *adapter) { };
1036static inline void ixgbe_ipsec_rx(struct ixgbe_ring *rx_ring,
1037                                  union ixgbe_adv_rx_desc *rx_desc,
1038                                  struct sk_buff *skb) { };
1039static inline int ixgbe_ipsec_tx(struct ixgbe_ring *tx_ring,
1040                                 struct ixgbe_tx_buffer *first,
1041                                 struct ixgbe_ipsec_tx_data *itd) { return 0; };
1042#endif /* CONFIG_XFRM_OFFLOAD */
1043#endif /* _IXGBE_H_ */
1044