dpdk/drivers/net/e1000/e1000_ethdev.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: BSD-3-Clause
   2 * Copyright(c) 2010-2015 Intel Corporation
   3 */
   4
   5#ifndef _E1000_ETHDEV_H_
   6#define _E1000_ETHDEV_H_
   7
   8#include <stdint.h>
   9
  10#include <rte_flow.h>
  11#include <rte_time.h>
  12#include <rte_pci.h>
  13
  14#define E1000_INTEL_VENDOR_ID 0x8086
  15
  16/* need update link, bit flag */
  17#define E1000_FLAG_NEED_LINK_UPDATE (uint32_t)(1 << 0)
  18#define E1000_FLAG_MAILBOX          (uint32_t)(1 << 1)
  19
  20/*
  21 * Defines that were not part of e1000_hw.h as they are not used by the FreeBSD
  22 * driver.
  23 */
  24#define E1000_ADVTXD_POPTS_TXSM     0x00000200 /* L4 Checksum offload request */
  25#define E1000_ADVTXD_POPTS_IXSM     0x00000100 /* IP Checksum offload request */
  26#define E1000_ADVTXD_TUCMD_L4T_RSV  0x00001800 /* L4 Packet TYPE of Reserved */
  27#define E1000_RXD_STAT_TMST         0x10000    /* Timestamped Packet indication */
  28#define E1000_RXD_ERR_CKSUM_BIT     29
  29#define E1000_RXD_ERR_CKSUM_MSK     3
  30#define E1000_ADVTXD_MACLEN_SHIFT   9          /* Bit shift for l2_len */
  31#define E1000_CTRL_EXT_EXTEND_VLAN  (1<<26)    /* EXTENDED VLAN */
  32#define IGB_VFTA_SIZE 128
  33
  34#define IGB_HKEY_MAX_INDEX             10
  35#define IGB_MAX_RX_QUEUE_NUM           8
  36#define IGB_MAX_RX_QUEUE_NUM_82576     16
  37
  38#define E1000_I219_MAX_RX_QUEUE_NUM             2
  39#define E1000_I219_MAX_TX_QUEUE_NUM             2
  40
  41#define E1000_SYN_FILTER_ENABLE        0x00000001 /* syn filter enable field */
  42#define E1000_SYN_FILTER_QUEUE         0x0000000E /* syn filter queue field */
  43#define E1000_SYN_FILTER_QUEUE_SHIFT   1          /* syn filter queue field */
  44#define E1000_RFCTL_SYNQFP             0x00080000 /* SYNQFP in RFCTL register */
  45
  46#define E1000_ETQF_ETHERTYPE           0x0000FFFF
  47#define E1000_ETQF_QUEUE               0x00070000
  48#define E1000_ETQF_QUEUE_SHIFT         16
  49#define E1000_MAX_ETQF_FILTERS         8
  50
  51#define E1000_IMIR_DSTPORT             0x0000FFFF
  52#define E1000_IMIR_PRIORITY            0xE0000000
  53#define E1000_MAX_TTQF_FILTERS         8
  54#define E1000_2TUPLE_MAX_PRI           7
  55
  56#define E1000_MAX_FLEX_FILTERS           8
  57#define E1000_MAX_FHFT                   4
  58#define E1000_MAX_FHFT_EXT               4
  59#define E1000_FHFT_SIZE_IN_DWD           64
  60#define E1000_MAX_FLEX_FILTER_PRI        7
  61#define E1000_MAX_FLEX_FILTER_LEN        128
  62#define E1000_MAX_FLEX_FILTER_DWDS \
  63        (E1000_MAX_FLEX_FILTER_LEN / sizeof(uint32_t))
  64#define E1000_FLEX_FILTERS_MASK_SIZE \
  65        (E1000_MAX_FLEX_FILTER_DWDS / 2)
  66#define E1000_FHFT_QUEUEING_LEN          0x0000007F
  67#define E1000_FHFT_QUEUEING_QUEUE        0x00000700
  68#define E1000_FHFT_QUEUEING_PRIO         0x00070000
  69#define E1000_FHFT_QUEUEING_OFFSET       0xFC
  70#define E1000_FHFT_QUEUEING_QUEUE_SHIFT  8
  71#define E1000_FHFT_QUEUEING_PRIO_SHIFT   16
  72#define E1000_WUFC_FLEX_HQ               0x00004000
  73
  74#define E1000_SPQF_SRCPORT               0x0000FFFF
  75
  76#define E1000_MAX_FTQF_FILTERS           8
  77#define E1000_FTQF_PROTOCOL_MASK         0x000000FF
  78#define E1000_FTQF_5TUPLE_MASK_SHIFT     28
  79#define E1000_FTQF_QUEUE_MASK            0x03ff0000
  80#define E1000_FTQF_QUEUE_SHIFT           16
  81#define E1000_FTQF_QUEUE_ENABLE          0x00000100
  82
  83#define IGB_RSS_OFFLOAD_ALL ( \
  84        ETH_RSS_IPV4 | \
  85        ETH_RSS_NONFRAG_IPV4_TCP | \
  86        ETH_RSS_NONFRAG_IPV4_UDP | \
  87        ETH_RSS_IPV6 | \
  88        ETH_RSS_NONFRAG_IPV6_TCP | \
  89        ETH_RSS_NONFRAG_IPV6_UDP | \
  90        ETH_RSS_IPV6_EX | \
  91        ETH_RSS_IPV6_TCP_EX | \
  92        ETH_RSS_IPV6_UDP_EX)
  93
  94/*
  95 * The overhead from MTU to max frame size.
  96 * Considering VLAN so a tag needs to be counted.
  97 */
  98#define E1000_ETH_OVERHEAD (RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN + \
  99                                VLAN_TAG_SIZE)
 100
 101/*
 102 * Maximum number of Ring Descriptors.
 103 *
 104 * Since RDLEN/TDLEN should be multiple of 128 bytes, the number of ring
 105 * desscriptors should meet the following condition:
 106 * (num_ring_desc * sizeof(struct e1000_rx/tx_desc)) % 128 == 0
 107 */
 108#define E1000_MIN_RING_DESC     32
 109#define E1000_MAX_RING_DESC     4096
 110
 111/*
 112 * TDBA/RDBA should be aligned on 16 byte boundary. But TDLEN/RDLEN should be
 113 * multiple of 128 bytes. So we align TDBA/RDBA on 128 byte boundary.
 114 * This will also optimize cache line size effect.
 115 * H/W supports up to cache line size 128.
 116 */
 117#define E1000_ALIGN     128
 118
 119#define IGB_RXD_ALIGN   (E1000_ALIGN / sizeof(union e1000_adv_rx_desc))
 120#define IGB_TXD_ALIGN   (E1000_ALIGN / sizeof(union e1000_adv_tx_desc))
 121
 122#define EM_RXD_ALIGN    (E1000_ALIGN / sizeof(struct e1000_rx_desc))
 123#define EM_TXD_ALIGN    (E1000_ALIGN / sizeof(struct e1000_data_desc))
 124
 125#define E1000_MISC_VEC_ID               RTE_INTR_VEC_ZERO_OFFSET
 126#define E1000_RX_VEC_START              RTE_INTR_VEC_RXTX_OFFSET
 127
 128#define IGB_TX_MAX_SEG     UINT8_MAX
 129#define IGB_TX_MAX_MTU_SEG UINT8_MAX
 130#define EM_TX_MAX_SEG      UINT8_MAX
 131#define EM_TX_MAX_MTU_SEG  UINT8_MAX
 132
 133#define MAC_TYPE_FILTER_SUP(type)    do {\
 134        if ((type) != e1000_82580 && (type) != e1000_i350 &&\
 135                (type) != e1000_82576 && (type) != e1000_i210 &&\
 136                (type) != e1000_i211)\
 137                return -ENOTSUP;\
 138} while (0)
 139
 140#define MAC_TYPE_FILTER_SUP_EXT(type)    do {\
 141        if ((type) != e1000_82580 && (type) != e1000_i350 &&\
 142                (type) != e1000_i210 && (type) != e1000_i211)\
 143                return -ENOTSUP; \
 144} while (0)
 145
 146/* structure for interrupt relative data */
 147struct e1000_interrupt {
 148        uint32_t flags;
 149        uint32_t mask;
 150};
 151
 152/* local vfta copy */
 153struct e1000_vfta {
 154        uint32_t vfta[IGB_VFTA_SIZE];
 155};
 156
 157/*
 158 * VF data which used by PF host only
 159 */
 160#define E1000_MAX_VF_MC_ENTRIES         30
 161struct e1000_vf_info {
 162        uint8_t vf_mac_addresses[RTE_ETHER_ADDR_LEN];
 163        uint16_t vf_mc_hashes[E1000_MAX_VF_MC_ENTRIES];
 164        uint16_t num_vf_mc_hashes;
 165        uint16_t default_vf_vlan_id;
 166        uint16_t vlans_enabled;
 167        uint16_t pf_qos;
 168        uint16_t vlan_count;
 169        uint16_t tx_rate;
 170};
 171
 172TAILQ_HEAD(e1000_flex_filter_list, e1000_flex_filter);
 173
 174struct e1000_flex_filter_info {
 175        uint16_t len;
 176        uint32_t dwords[E1000_MAX_FLEX_FILTER_DWDS]; /* flex bytes in dword. */
 177        /* if mask bit is 1b, do not compare corresponding byte in dwords. */
 178        uint8_t mask[E1000_FLEX_FILTERS_MASK_SIZE];
 179        uint8_t priority;
 180};
 181
 182/* Flex filter structure */
 183struct e1000_flex_filter {
 184        TAILQ_ENTRY(e1000_flex_filter) entries;
 185        uint16_t index; /* index of flex filter */
 186        struct e1000_flex_filter_info filter_info;
 187        uint16_t queue; /* rx queue assigned to */
 188};
 189
 190TAILQ_HEAD(e1000_5tuple_filter_list, e1000_5tuple_filter);
 191TAILQ_HEAD(e1000_2tuple_filter_list, e1000_2tuple_filter);
 192
 193struct e1000_5tuple_filter_info {
 194        uint32_t dst_ip;
 195        uint32_t src_ip;
 196        uint16_t dst_port;
 197        uint16_t src_port;
 198        uint8_t proto;           /* l4 protocol. */
 199        /* the packet matched above 5tuple and contain any set bit will hit this filter. */
 200        uint8_t tcp_flags;
 201        uint8_t priority;        /* seven levels (001b-111b), 111b is highest,
 202                                      used when more than one filter matches. */
 203        uint8_t dst_ip_mask:1,   /* if mask is 1b, do not compare dst ip. */
 204                src_ip_mask:1,   /* if mask is 1b, do not compare src ip. */
 205                dst_port_mask:1, /* if mask is 1b, do not compare dst port. */
 206                src_port_mask:1, /* if mask is 1b, do not compare src port. */
 207                proto_mask:1;    /* if mask is 1b, do not compare protocol. */
 208};
 209
 210struct e1000_2tuple_filter_info {
 211        uint16_t dst_port;
 212        uint8_t proto;           /* l4 protocol. */
 213        /* the packet matched above 2tuple and contain any set bit will hit this filter. */
 214        uint8_t tcp_flags;
 215        uint8_t priority;        /* seven levels (001b-111b), 111b is highest,
 216                                      used when more than one filter matches. */
 217        uint8_t dst_ip_mask:1,   /* if mask is 1b, do not compare dst ip. */
 218                src_ip_mask:1,   /* if mask is 1b, do not compare src ip. */
 219                dst_port_mask:1, /* if mask is 1b, do not compare dst port. */
 220                src_port_mask:1, /* if mask is 1b, do not compare src port. */
 221                proto_mask:1;    /* if mask is 1b, do not compare protocol. */
 222};
 223
 224/* 5tuple filter structure */
 225struct e1000_5tuple_filter {
 226        TAILQ_ENTRY(e1000_5tuple_filter) entries;
 227        uint16_t index;       /* the index of 5tuple filter */
 228        struct e1000_5tuple_filter_info filter_info;
 229        uint16_t queue;       /* rx queue assigned to */
 230};
 231
 232/* 2tuple filter structure */
 233struct e1000_2tuple_filter {
 234        TAILQ_ENTRY(e1000_2tuple_filter) entries;
 235        uint16_t index;         /* the index of 2tuple filter */
 236        struct e1000_2tuple_filter_info filter_info;
 237        uint16_t queue;       /* rx queue assigned to */
 238};
 239
 240/* ethertype filter structure */
 241struct igb_ethertype_filter {
 242        uint16_t ethertype;
 243        uint32_t etqf;
 244};
 245
 246struct igb_rte_flow_rss_conf {
 247        struct rte_flow_action_rss conf; /**< RSS parameters. */
 248        uint8_t key[IGB_HKEY_MAX_INDEX * sizeof(uint32_t)]; /* Hash key. */
 249        /* Queues indices to use. */
 250        uint16_t queue[IGB_MAX_RX_QUEUE_NUM_82576];
 251};
 252
 253/*
 254 * Structure to store filters'info.
 255 */
 256struct e1000_filter_info {
 257        uint8_t ethertype_mask; /* Bit mask for every used ethertype filter */
 258        /* store used ethertype filters*/
 259        struct igb_ethertype_filter ethertype_filters[E1000_MAX_ETQF_FILTERS];
 260        uint8_t flex_mask;      /* Bit mask for every used flex filter */
 261        struct e1000_flex_filter_list flex_list;
 262        /* Bit mask for every used 5tuple filter */
 263        uint8_t fivetuple_mask;
 264        struct e1000_5tuple_filter_list fivetuple_list;
 265        /* Bit mask for every used 2tuple filter */
 266        uint8_t twotuple_mask;
 267        struct e1000_2tuple_filter_list twotuple_list;
 268        /* store the SYN filter info */
 269        uint32_t syn_info;
 270        /* store the rss filter info */
 271        struct igb_rte_flow_rss_conf rss_info;
 272};
 273
 274/*
 275 * Structure to store private data for each driver instance (for each port).
 276 */
 277struct e1000_adapter {
 278        struct e1000_hw         hw;
 279        struct e1000_hw_stats   stats;
 280        struct e1000_interrupt  intr;
 281        struct e1000_vfta       shadow_vfta;
 282        struct e1000_vf_info    *vfdata;
 283        struct e1000_filter_info filter;
 284        bool stopped;
 285        struct rte_timecounter  systime_tc;
 286        struct rte_timecounter  rx_tstamp_tc;
 287        struct rte_timecounter  tx_tstamp_tc;
 288};
 289
 290#define E1000_DEV_PRIVATE(adapter) \
 291        ((struct e1000_adapter *)adapter)
 292
 293#define E1000_DEV_PRIVATE_TO_HW(adapter) \
 294        (&((struct e1000_adapter *)adapter)->hw)
 295
 296#define E1000_DEV_PRIVATE_TO_STATS(adapter) \
 297        (&((struct e1000_adapter *)adapter)->stats)
 298
 299#define E1000_DEV_PRIVATE_TO_INTR(adapter) \
 300        (&((struct e1000_adapter *)adapter)->intr)
 301
 302#define E1000_DEV_PRIVATE_TO_VFTA(adapter) \
 303        (&((struct e1000_adapter *)adapter)->shadow_vfta)
 304
 305#define E1000_DEV_PRIVATE_TO_P_VFDATA(adapter) \
 306        (&((struct e1000_adapter *)adapter)->vfdata)
 307
 308#define E1000_DEV_PRIVATE_TO_FILTER_INFO(adapter) \
 309        (&((struct e1000_adapter *)adapter)->filter)
 310
 311struct rte_flow {
 312        enum rte_filter_type filter_type;
 313        void *rule;
 314};
 315
 316/* ntuple filter list structure */
 317struct igb_ntuple_filter_ele {
 318        TAILQ_ENTRY(igb_ntuple_filter_ele) entries;
 319        struct rte_eth_ntuple_filter filter_info;
 320};
 321
 322/* ethertype filter list structure */
 323struct igb_ethertype_filter_ele {
 324        TAILQ_ENTRY(igb_ethertype_filter_ele) entries;
 325        struct rte_eth_ethertype_filter filter_info;
 326};
 327
 328/* syn filter list structure */
 329struct igb_eth_syn_filter_ele {
 330        TAILQ_ENTRY(igb_eth_syn_filter_ele) entries;
 331        struct rte_eth_syn_filter filter_info;
 332};
 333
 334#define IGB_FLEX_FILTER_MAXLEN  128     /**< bytes to use in flex filter. */
 335#define IGB_FLEX_FILTER_MASK_SIZE       \
 336        (RTE_ALIGN(IGB_FLEX_FILTER_MAXLEN, CHAR_BIT) / CHAR_BIT)
 337                                        /**< mask bytes in flex filter. */
 338
 339/**
 340 * A structure used to define the flex filter entry
 341 * to support RTE_ETH_FILTER_FLEXIBLE data representation.
 342 */
 343struct igb_flex_filter {
 344        uint16_t len;
 345        uint8_t bytes[IGB_FLEX_FILTER_MAXLEN]; /**< flex bytes in big endian. */
 346        uint8_t mask[IGB_FLEX_FILTER_MASK_SIZE];
 347                /**< if mask bit is 1b, do not compare corresponding byte. */
 348        uint8_t priority;
 349        uint16_t queue;       /**< Queue assigned to when match. */
 350};
 351
 352/* flex filter list structure */
 353struct igb_flex_filter_ele {
 354        TAILQ_ENTRY(igb_flex_filter_ele) entries;
 355        struct igb_flex_filter filter_info;
 356};
 357
 358/* rss filter  list structure */
 359struct igb_rss_conf_ele {
 360        TAILQ_ENTRY(igb_rss_conf_ele) entries;
 361        struct igb_rte_flow_rss_conf filter_info;
 362};
 363
 364/* igb_flow memory list structure */
 365struct igb_flow_mem {
 366        TAILQ_ENTRY(igb_flow_mem) entries;
 367        struct rte_flow *flow;
 368        struct rte_eth_dev *dev;
 369};
 370
 371TAILQ_HEAD(igb_ntuple_filter_list, igb_ntuple_filter_ele);
 372extern struct igb_ntuple_filter_list igb_filter_ntuple_list;
 373TAILQ_HEAD(igb_ethertype_filter_list, igb_ethertype_filter_ele);
 374extern struct igb_ethertype_filter_list igb_filter_ethertype_list;
 375TAILQ_HEAD(igb_syn_filter_list, igb_eth_syn_filter_ele);
 376extern struct igb_syn_filter_list igb_filter_syn_list;
 377TAILQ_HEAD(igb_flex_filter_list, igb_flex_filter_ele);
 378extern struct igb_flex_filter_list igb_filter_flex_list;
 379TAILQ_HEAD(igb_rss_filter_list, igb_rss_conf_ele);
 380extern struct igb_rss_filter_list igb_filter_rss_list;
 381TAILQ_HEAD(igb_flow_mem_list, igb_flow_mem);
 382extern struct igb_flow_mem_list igb_flow_list;
 383
 384extern const struct rte_flow_ops igb_flow_ops;
 385
 386/*
 387 * RX/TX IGB function prototypes
 388 */
 389void eth_igb_tx_queue_release(void *txq);
 390void eth_igb_rx_queue_release(void *rxq);
 391void igb_dev_clear_queues(struct rte_eth_dev *dev);
 392void igb_dev_free_queues(struct rte_eth_dev *dev);
 393
 394uint64_t igb_get_rx_port_offloads_capa(struct rte_eth_dev *dev);
 395uint64_t igb_get_rx_queue_offloads_capa(struct rte_eth_dev *dev);
 396
 397int eth_igb_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
 398                uint16_t nb_rx_desc, unsigned int socket_id,
 399                const struct rte_eth_rxconf *rx_conf,
 400                struct rte_mempool *mb_pool);
 401
 402uint32_t eth_igb_rx_queue_count(struct rte_eth_dev *dev,
 403                uint16_t rx_queue_id);
 404
 405int eth_igb_rx_descriptor_done(void *rx_queue, uint16_t offset);
 406
 407int eth_igb_rx_descriptor_status(void *rx_queue, uint16_t offset);
 408int eth_igb_tx_descriptor_status(void *tx_queue, uint16_t offset);
 409
 410uint64_t igb_get_tx_port_offloads_capa(struct rte_eth_dev *dev);
 411uint64_t igb_get_tx_queue_offloads_capa(struct rte_eth_dev *dev);
 412
 413int eth_igb_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
 414                uint16_t nb_tx_desc, unsigned int socket_id,
 415                const struct rte_eth_txconf *tx_conf);
 416
 417int eth_igb_tx_done_cleanup(void *txq, uint32_t free_cnt);
 418
 419int eth_igb_rx_init(struct rte_eth_dev *dev);
 420
 421void eth_igb_tx_init(struct rte_eth_dev *dev);
 422
 423uint16_t eth_igb_xmit_pkts(void *txq, struct rte_mbuf **tx_pkts,
 424                uint16_t nb_pkts);
 425
 426uint16_t eth_igb_prep_pkts(void *txq, struct rte_mbuf **tx_pkts,
 427                uint16_t nb_pkts);
 428
 429uint16_t eth_igb_recv_pkts(void *rxq, struct rte_mbuf **rx_pkts,
 430                uint16_t nb_pkts);
 431
 432uint16_t eth_igb_recv_scattered_pkts(void *rxq,
 433                struct rte_mbuf **rx_pkts, uint16_t nb_pkts);
 434
 435int eth_igb_rss_hash_update(struct rte_eth_dev *dev,
 436                            struct rte_eth_rss_conf *rss_conf);
 437
 438int eth_igb_rss_hash_conf_get(struct rte_eth_dev *dev,
 439                              struct rte_eth_rss_conf *rss_conf);
 440
 441int eth_igbvf_rx_init(struct rte_eth_dev *dev);
 442
 443void eth_igbvf_tx_init(struct rte_eth_dev *dev);
 444
 445/*
 446 * misc function prototypes
 447 */
 448void igb_pf_host_init(struct rte_eth_dev *eth_dev);
 449
 450void igb_pf_mbx_process(struct rte_eth_dev *eth_dev);
 451
 452int igb_pf_host_configure(struct rte_eth_dev *eth_dev);
 453
 454void igb_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
 455        struct rte_eth_rxq_info *qinfo);
 456
 457void igb_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
 458        struct rte_eth_txq_info *qinfo);
 459
 460uint32_t em_get_max_pktlen(struct rte_eth_dev *dev);
 461
 462/*
 463 * RX/TX EM function prototypes
 464 */
 465void eth_em_tx_queue_release(void *txq);
 466void eth_em_rx_queue_release(void *rxq);
 467
 468void em_dev_clear_queues(struct rte_eth_dev *dev);
 469void em_dev_free_queues(struct rte_eth_dev *dev);
 470
 471uint64_t em_get_rx_port_offloads_capa(struct rte_eth_dev *dev);
 472uint64_t em_get_rx_queue_offloads_capa(struct rte_eth_dev *dev);
 473
 474int eth_em_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
 475                uint16_t nb_rx_desc, unsigned int socket_id,
 476                const struct rte_eth_rxconf *rx_conf,
 477                struct rte_mempool *mb_pool);
 478
 479uint32_t eth_em_rx_queue_count(struct rte_eth_dev *dev,
 480                uint16_t rx_queue_id);
 481
 482int eth_em_rx_descriptor_done(void *rx_queue, uint16_t offset);
 483
 484int eth_em_rx_descriptor_status(void *rx_queue, uint16_t offset);
 485int eth_em_tx_descriptor_status(void *tx_queue, uint16_t offset);
 486
 487uint64_t em_get_tx_port_offloads_capa(struct rte_eth_dev *dev);
 488uint64_t em_get_tx_queue_offloads_capa(struct rte_eth_dev *dev);
 489
 490int eth_em_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
 491                uint16_t nb_tx_desc, unsigned int socket_id,
 492                const struct rte_eth_txconf *tx_conf);
 493
 494int eth_em_rx_init(struct rte_eth_dev *dev);
 495
 496void eth_em_tx_init(struct rte_eth_dev *dev);
 497
 498uint16_t eth_em_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
 499                uint16_t nb_pkts);
 500
 501uint16_t eth_em_prep_pkts(void *txq, struct rte_mbuf **tx_pkts,
 502                uint16_t nb_pkts);
 503
 504uint16_t eth_em_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
 505                uint16_t nb_pkts);
 506
 507uint16_t eth_em_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
 508                uint16_t nb_pkts);
 509
 510void em_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
 511        struct rte_eth_rxq_info *qinfo);
 512
 513void em_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
 514        struct rte_eth_txq_info *qinfo);
 515
 516void igb_pf_host_uninit(struct rte_eth_dev *dev);
 517
 518void igb_filterlist_flush(struct rte_eth_dev *dev);
 519int igb_delete_5tuple_filter_82576(struct rte_eth_dev *dev,
 520                struct e1000_5tuple_filter *filter);
 521int igb_delete_2tuple_filter(struct rte_eth_dev *dev,
 522                struct e1000_2tuple_filter *filter);
 523void igb_remove_flex_filter(struct rte_eth_dev *dev,
 524                        struct e1000_flex_filter *filter);
 525int igb_ethertype_filter_remove(struct e1000_filter_info *filter_info,
 526        uint8_t idx);
 527int igb_add_del_ntuple_filter(struct rte_eth_dev *dev,
 528                struct rte_eth_ntuple_filter *ntuple_filter, bool add);
 529int igb_add_del_ethertype_filter(struct rte_eth_dev *dev,
 530                        struct rte_eth_ethertype_filter *filter,
 531                        bool add);
 532int eth_igb_syn_filter_set(struct rte_eth_dev *dev,
 533                        struct rte_eth_syn_filter *filter,
 534                        bool add);
 535int eth_igb_add_del_flex_filter(struct rte_eth_dev *dev,
 536                        struct igb_flex_filter *filter,
 537                        bool add);
 538int igb_rss_conf_init(struct rte_eth_dev *dev,
 539                      struct igb_rte_flow_rss_conf *out,
 540                      const struct rte_flow_action_rss *in);
 541int igb_action_rss_same(const struct rte_flow_action_rss *comp,
 542                        const struct rte_flow_action_rss *with);
 543int igb_config_rss_filter(struct rte_eth_dev *dev,
 544                        struct igb_rte_flow_rss_conf *conf,
 545                        bool add);
 546void em_flush_desc_rings(struct rte_eth_dev *dev);
 547
 548#endif /* _E1000_ETHDEV_H_ */
 549