dpdk/drivers/net/e1000/e1000_ethdev.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: BSD-3-Clause
   2 * Copyright(c) 2010-2015 Intel Corporation
   3 */
   4
   5#ifndef _E1000_ETHDEV_H_
   6#define _E1000_ETHDEV_H_
   7
   8#include <stdint.h>
   9#include <sys/queue.h>
  10
  11#include <rte_flow.h>
  12#include <rte_time.h>
  13#include <rte_pci.h>
  14
  15#define E1000_INTEL_VENDOR_ID 0x8086
  16
  17/* need update link, bit flag */
  18#define E1000_FLAG_NEED_LINK_UPDATE (uint32_t)(1 << 0)
  19#define E1000_FLAG_MAILBOX          (uint32_t)(1 << 1)
  20
  21/*
  22 * Defines that were not part of e1000_hw.h as they are not used by the FreeBSD
  23 * driver.
  24 */
  25#define E1000_ADVTXD_POPTS_TXSM     0x00000200 /* L4 Checksum offload request */
  26#define E1000_ADVTXD_POPTS_IXSM     0x00000100 /* IP Checksum offload request */
  27#define E1000_ADVTXD_TUCMD_L4T_RSV  0x00001800 /* L4 Packet TYPE of Reserved */
  28#define E1000_RXD_STAT_TMST         0x10000    /* Timestamped Packet indication */
  29#define E1000_RXD_ERR_CKSUM_BIT     29
  30#define E1000_RXD_ERR_CKSUM_MSK     3
  31#define E1000_ADVTXD_MACLEN_SHIFT   9          /* Bit shift for l2_len */
  32#define E1000_CTRL_EXT_EXTEND_VLAN  (1<<26)    /* EXTENDED VLAN */
  33#define IGB_VFTA_SIZE 128
  34
  35#define IGB_HKEY_MAX_INDEX             10
  36#define IGB_MAX_RX_QUEUE_NUM           8
  37#define IGB_MAX_RX_QUEUE_NUM_82576     16
  38
  39#define E1000_I219_MAX_RX_QUEUE_NUM             2
  40#define E1000_I219_MAX_TX_QUEUE_NUM             2
  41
  42#define E1000_SYN_FILTER_ENABLE        0x00000001 /* syn filter enable field */
  43#define E1000_SYN_FILTER_QUEUE         0x0000000E /* syn filter queue field */
  44#define E1000_SYN_FILTER_QUEUE_SHIFT   1          /* syn filter queue field */
  45#define E1000_RFCTL_SYNQFP             0x00080000 /* SYNQFP in RFCTL register */
  46
  47#define E1000_ETQF_ETHERTYPE           0x0000FFFF
  48#define E1000_ETQF_QUEUE               0x00070000
  49#define E1000_ETQF_QUEUE_SHIFT         16
  50#define E1000_MAX_ETQF_FILTERS         8
  51
  52#define E1000_IMIR_DSTPORT             0x0000FFFF
  53#define E1000_IMIR_PRIORITY            0xE0000000
  54#define E1000_MAX_TTQF_FILTERS         8
  55#define E1000_2TUPLE_MAX_PRI           7
  56
  57#define E1000_MAX_FLEX_FILTERS           8
  58#define E1000_MAX_FHFT                   4
  59#define E1000_MAX_FHFT_EXT               4
  60#define E1000_FHFT_SIZE_IN_DWD           64
  61#define E1000_MAX_FLEX_FILTER_PRI        7
  62#define E1000_MAX_FLEX_FILTER_LEN        128
  63#define E1000_MAX_FLEX_FILTER_DWDS \
  64        (E1000_MAX_FLEX_FILTER_LEN / sizeof(uint32_t))
  65#define E1000_FLEX_FILTERS_MASK_SIZE \
  66        (E1000_MAX_FLEX_FILTER_DWDS / 2)
  67#define E1000_FHFT_QUEUEING_LEN          0x0000007F
  68#define E1000_FHFT_QUEUEING_QUEUE        0x00000700
  69#define E1000_FHFT_QUEUEING_PRIO         0x00070000
  70#define E1000_FHFT_QUEUEING_OFFSET       0xFC
  71#define E1000_FHFT_QUEUEING_QUEUE_SHIFT  8
  72#define E1000_FHFT_QUEUEING_PRIO_SHIFT   16
  73#define E1000_WUFC_FLEX_HQ               0x00004000
  74
  75#define E1000_SPQF_SRCPORT               0x0000FFFF
  76
  77#define E1000_MAX_FTQF_FILTERS           8
  78#define E1000_FTQF_PROTOCOL_MASK         0x000000FF
  79#define E1000_FTQF_5TUPLE_MASK_SHIFT     28
  80#define E1000_FTQF_QUEUE_MASK            0x03ff0000
  81#define E1000_FTQF_QUEUE_SHIFT           16
  82#define E1000_FTQF_QUEUE_ENABLE          0x00000100
  83
  84#define IGB_RSS_OFFLOAD_ALL ( \
  85        RTE_ETH_RSS_IPV4 | \
  86        RTE_ETH_RSS_NONFRAG_IPV4_TCP | \
  87        RTE_ETH_RSS_NONFRAG_IPV4_UDP | \
  88        RTE_ETH_RSS_IPV6 | \
  89        RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
  90        RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
  91        RTE_ETH_RSS_IPV6_EX | \
  92        RTE_ETH_RSS_IPV6_TCP_EX | \
  93        RTE_ETH_RSS_IPV6_UDP_EX)
  94
  95/*
  96 * The overhead from MTU to max frame size.
  97 * Considering VLAN so a tag needs to be counted.
  98 */
  99#define E1000_ETH_OVERHEAD (RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN + \
 100                                VLAN_TAG_SIZE)
 101#define E1000_ETH_MAX_LEN (RTE_ETHER_MTU + E1000_ETH_OVERHEAD)
 102/*
 103 * Maximum number of Ring Descriptors.
 104 *
 105 * Since RDLEN/TDLEN should be multiple of 128 bytes, the number of ring
 106 * descriptors should meet the following condition:
 107 * (num_ring_desc * sizeof(struct e1000_rx/tx_desc)) % 128 == 0
 108 */
 109#define E1000_MIN_RING_DESC     32
 110#define E1000_MAX_RING_DESC     4096
 111
 112/*
 113 * TDBA/RDBA should be aligned on 16 byte boundary. But TDLEN/RDLEN should be
 114 * multiple of 128 bytes. So we align TDBA/RDBA on 128 byte boundary.
 115 * This will also optimize cache line size effect.
 116 * H/W supports up to cache line size 128.
 117 */
 118#define E1000_ALIGN     128
 119
 120#define IGB_RXD_ALIGN   (E1000_ALIGN / sizeof(union e1000_adv_rx_desc))
 121#define IGB_TXD_ALIGN   (E1000_ALIGN / sizeof(union e1000_adv_tx_desc))
 122
 123#define EM_RXD_ALIGN    (E1000_ALIGN / sizeof(struct e1000_rx_desc))
 124#define EM_TXD_ALIGN    (E1000_ALIGN / sizeof(struct e1000_data_desc))
 125
 126#define E1000_MISC_VEC_ID               RTE_INTR_VEC_ZERO_OFFSET
 127#define E1000_RX_VEC_START              RTE_INTR_VEC_RXTX_OFFSET
 128
 129#define IGB_TX_MAX_SEG     UINT8_MAX
 130#define IGB_TX_MAX_MTU_SEG UINT8_MAX
 131#define EM_TX_MAX_SEG      UINT8_MAX
 132#define EM_TX_MAX_MTU_SEG  UINT8_MAX
 133
 134#define MAC_TYPE_FILTER_SUP(type)    do {\
 135        if ((type) != e1000_82580 && (type) != e1000_i350 &&\
 136                (type) != e1000_82576 && (type) != e1000_i210 &&\
 137                (type) != e1000_i211)\
 138                return -ENOTSUP;\
 139} while (0)
 140
 141#define MAC_TYPE_FILTER_SUP_EXT(type)    do {\
 142        if ((type) != e1000_82580 && (type) != e1000_i350 &&\
 143                (type) != e1000_i210 && (type) != e1000_i211)\
 144                return -ENOTSUP; \
 145} while (0)
 146
 147/* structure for interrupt relative data */
 148struct e1000_interrupt {
 149        uint32_t flags;
 150        uint32_t mask;
 151};
 152
 153/* local vfta copy */
 154struct e1000_vfta {
 155        uint32_t vfta[IGB_VFTA_SIZE];
 156};
 157
 158/*
 159 * VF data which used by PF host only
 160 */
 161#define E1000_MAX_VF_MC_ENTRIES         30
 162struct e1000_vf_info {
 163        uint8_t vf_mac_addresses[RTE_ETHER_ADDR_LEN];
 164        uint16_t vf_mc_hashes[E1000_MAX_VF_MC_ENTRIES];
 165        uint16_t num_vf_mc_hashes;
 166        uint16_t default_vf_vlan_id;
 167        uint16_t vlans_enabled;
 168        uint16_t pf_qos;
 169        uint16_t vlan_count;
 170        uint16_t tx_rate;
 171};
 172
 173TAILQ_HEAD(e1000_flex_filter_list, e1000_flex_filter);
 174
 175struct e1000_flex_filter_info {
 176        uint16_t len;
 177        uint32_t dwords[E1000_MAX_FLEX_FILTER_DWDS]; /* flex bytes in dword. */
 178        /* if mask bit is 1b, do not compare corresponding byte in dwords. */
 179        uint8_t mask[E1000_FLEX_FILTERS_MASK_SIZE];
 180        uint8_t priority;
 181};
 182
 183/* Flex filter structure */
 184struct e1000_flex_filter {
 185        TAILQ_ENTRY(e1000_flex_filter) entries;
 186        uint16_t index; /* index of flex filter */
 187        struct e1000_flex_filter_info filter_info;
 188        uint16_t queue; /* rx queue assigned to */
 189};
 190
 191TAILQ_HEAD(e1000_5tuple_filter_list, e1000_5tuple_filter);
 192TAILQ_HEAD(e1000_2tuple_filter_list, e1000_2tuple_filter);
 193
 194struct e1000_5tuple_filter_info {
 195        uint32_t dst_ip;
 196        uint32_t src_ip;
 197        uint16_t dst_port;
 198        uint16_t src_port;
 199        uint8_t proto;           /* l4 protocol. */
 200        /* the packet matched above 5tuple and contain any set bit will hit this filter. */
 201        uint8_t tcp_flags;
 202        uint8_t priority;        /* seven levels (001b-111b), 111b is highest,
 203                                      used when more than one filter matches. */
 204        uint8_t dst_ip_mask:1,   /* if mask is 1b, do not compare dst ip. */
 205                src_ip_mask:1,   /* if mask is 1b, do not compare src ip. */
 206                dst_port_mask:1, /* if mask is 1b, do not compare dst port. */
 207                src_port_mask:1, /* if mask is 1b, do not compare src port. */
 208                proto_mask:1;    /* if mask is 1b, do not compare protocol. */
 209};
 210
 211struct e1000_2tuple_filter_info {
 212        uint16_t dst_port;
 213        uint8_t proto;           /* l4 protocol. */
 214        /* the packet matched above 2tuple and contain any set bit will hit this filter. */
 215        uint8_t tcp_flags;
 216        uint8_t priority;        /* seven levels (001b-111b), 111b is highest,
 217                                      used when more than one filter matches. */
 218        uint8_t dst_ip_mask:1,   /* if mask is 1b, do not compare dst ip. */
 219                src_ip_mask:1,   /* if mask is 1b, do not compare src ip. */
 220                dst_port_mask:1, /* if mask is 1b, do not compare dst port. */
 221                src_port_mask:1, /* if mask is 1b, do not compare src port. */
 222                proto_mask:1;    /* if mask is 1b, do not compare protocol. */
 223};
 224
 225/* 5tuple filter structure */
 226struct e1000_5tuple_filter {
 227        TAILQ_ENTRY(e1000_5tuple_filter) entries;
 228        uint16_t index;       /* the index of 5tuple filter */
 229        struct e1000_5tuple_filter_info filter_info;
 230        uint16_t queue;       /* rx queue assigned to */
 231};
 232
 233/* 2tuple filter structure */
 234struct e1000_2tuple_filter {
 235        TAILQ_ENTRY(e1000_2tuple_filter) entries;
 236        uint16_t index;         /* the index of 2tuple filter */
 237        struct e1000_2tuple_filter_info filter_info;
 238        uint16_t queue;       /* rx queue assigned to */
 239};
 240
 241/* ethertype filter structure */
 242struct igb_ethertype_filter {
 243        uint16_t ethertype;
 244        uint32_t etqf;
 245};
 246
 247struct igb_rte_flow_rss_conf {
 248        struct rte_flow_action_rss conf; /**< RSS parameters. */
 249        uint8_t key[IGB_HKEY_MAX_INDEX * sizeof(uint32_t)]; /* Hash key. */
 250        /* Queues indices to use. */
 251        uint16_t queue[IGB_MAX_RX_QUEUE_NUM_82576];
 252};
 253
 254/*
 255 * Structure to store filters' info.
 256 */
 257struct e1000_filter_info {
 258        uint8_t ethertype_mask; /* Bit mask for every used ethertype filter */
 259        /* store used ethertype filters*/
 260        struct igb_ethertype_filter ethertype_filters[E1000_MAX_ETQF_FILTERS];
 261        uint8_t flex_mask;      /* Bit mask for every used flex filter */
 262        struct e1000_flex_filter_list flex_list;
 263        /* Bit mask for every used 5tuple filter */
 264        uint8_t fivetuple_mask;
 265        struct e1000_5tuple_filter_list fivetuple_list;
 266        /* Bit mask for every used 2tuple filter */
 267        uint8_t twotuple_mask;
 268        struct e1000_2tuple_filter_list twotuple_list;
 269        /* store the SYN filter info */
 270        uint32_t syn_info;
 271        /* store the rss filter info */
 272        struct igb_rte_flow_rss_conf rss_info;
 273};
 274
 275/*
 276 * Structure to store private data for each driver instance (for each port).
 277 */
 278struct e1000_adapter {
 279        struct e1000_hw         hw;
 280        struct e1000_hw_stats   stats;
 281        struct e1000_interrupt  intr;
 282        struct e1000_vfta       shadow_vfta;
 283        struct e1000_vf_info    *vfdata;
 284        struct e1000_filter_info filter;
 285        bool stopped;
 286        struct rte_timecounter  systime_tc;
 287        struct rte_timecounter  rx_tstamp_tc;
 288        struct rte_timecounter  tx_tstamp_tc;
 289};
 290
 291#define E1000_DEV_PRIVATE(adapter) \
 292        ((struct e1000_adapter *)adapter)
 293
 294#define E1000_DEV_PRIVATE_TO_HW(adapter) \
 295        (&((struct e1000_adapter *)adapter)->hw)
 296
 297#define E1000_DEV_PRIVATE_TO_STATS(adapter) \
 298        (&((struct e1000_adapter *)adapter)->stats)
 299
 300#define E1000_DEV_PRIVATE_TO_INTR(adapter) \
 301        (&((struct e1000_adapter *)adapter)->intr)
 302
 303#define E1000_DEV_PRIVATE_TO_VFTA(adapter) \
 304        (&((struct e1000_adapter *)adapter)->shadow_vfta)
 305
 306#define E1000_DEV_PRIVATE_TO_P_VFDATA(adapter) \
 307        (&((struct e1000_adapter *)adapter)->vfdata)
 308
 309#define E1000_DEV_PRIVATE_TO_FILTER_INFO(adapter) \
 310        (&((struct e1000_adapter *)adapter)->filter)
 311
 312struct rte_flow {
 313        enum rte_filter_type filter_type;
 314        void *rule;
 315};
 316
 317/* ntuple filter list structure */
 318struct igb_ntuple_filter_ele {
 319        TAILQ_ENTRY(igb_ntuple_filter_ele) entries;
 320        struct rte_eth_ntuple_filter filter_info;
 321};
 322
 323/* ethertype filter list structure */
 324struct igb_ethertype_filter_ele {
 325        TAILQ_ENTRY(igb_ethertype_filter_ele) entries;
 326        struct rte_eth_ethertype_filter filter_info;
 327};
 328
 329/* syn filter list structure */
 330struct igb_eth_syn_filter_ele {
 331        TAILQ_ENTRY(igb_eth_syn_filter_ele) entries;
 332        struct rte_eth_syn_filter filter_info;
 333};
 334
 335#define IGB_FLEX_FILTER_MAXLEN  128     /**< bytes to use in flex filter. */
 336#define IGB_FLEX_FILTER_MASK_SIZE       \
 337        (RTE_ALIGN(IGB_FLEX_FILTER_MAXLEN, CHAR_BIT) / CHAR_BIT)
 338                                        /**< mask bytes in flex filter. */
 339
 340/**
 341 * A structure used to define the flex filter entry
 342 * to support RTE_ETH_FILTER_FLEXIBLE data representation.
 343 */
 344struct igb_flex_filter {
 345        uint16_t len;
 346        uint8_t bytes[IGB_FLEX_FILTER_MAXLEN]; /**< flex bytes in big endian. */
 347        uint8_t mask[IGB_FLEX_FILTER_MASK_SIZE];
 348                /**< if mask bit is 1b, do not compare corresponding byte. */
 349        uint8_t priority;
 350        uint16_t queue;       /**< Queue assigned to when match. */
 351};
 352
 353/* flex filter list structure */
 354struct igb_flex_filter_ele {
 355        TAILQ_ENTRY(igb_flex_filter_ele) entries;
 356        struct igb_flex_filter filter_info;
 357};
 358
 359/* rss filter  list structure */
 360struct igb_rss_conf_ele {
 361        TAILQ_ENTRY(igb_rss_conf_ele) entries;
 362        struct igb_rte_flow_rss_conf filter_info;
 363};
 364
 365/* igb_flow memory list structure */
 366struct igb_flow_mem {
 367        TAILQ_ENTRY(igb_flow_mem) entries;
 368        struct rte_flow *flow;
 369        struct rte_eth_dev *dev;
 370};
 371
 372TAILQ_HEAD(igb_ntuple_filter_list, igb_ntuple_filter_ele);
 373extern struct igb_ntuple_filter_list igb_filter_ntuple_list;
 374TAILQ_HEAD(igb_ethertype_filter_list, igb_ethertype_filter_ele);
 375extern struct igb_ethertype_filter_list igb_filter_ethertype_list;
 376TAILQ_HEAD(igb_syn_filter_list, igb_eth_syn_filter_ele);
 377extern struct igb_syn_filter_list igb_filter_syn_list;
 378TAILQ_HEAD(igb_flex_filter_list, igb_flex_filter_ele);
 379extern struct igb_flex_filter_list igb_filter_flex_list;
 380TAILQ_HEAD(igb_rss_filter_list, igb_rss_conf_ele);
 381extern struct igb_rss_filter_list igb_filter_rss_list;
 382TAILQ_HEAD(igb_flow_mem_list, igb_flow_mem);
 383extern struct igb_flow_mem_list igb_flow_list;
 384
 385extern const struct rte_flow_ops igb_flow_ops;
 386
 387/*
 388 * RX/TX IGB function prototypes
 389 */
 390void eth_igb_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid);
 391void eth_igb_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid);
 392void igb_dev_clear_queues(struct rte_eth_dev *dev);
 393void igb_dev_free_queues(struct rte_eth_dev *dev);
 394
 395uint64_t igb_get_rx_port_offloads_capa(struct rte_eth_dev *dev);
 396uint64_t igb_get_rx_queue_offloads_capa(struct rte_eth_dev *dev);
 397
 398int eth_igb_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
 399                uint16_t nb_rx_desc, unsigned int socket_id,
 400                const struct rte_eth_rxconf *rx_conf,
 401                struct rte_mempool *mb_pool);
 402
 403uint32_t eth_igb_rx_queue_count(void *rx_queue);
 404
 405int eth_igb_rx_descriptor_status(void *rx_queue, uint16_t offset);
 406int eth_igb_tx_descriptor_status(void *tx_queue, uint16_t offset);
 407
 408uint64_t igb_get_tx_port_offloads_capa(struct rte_eth_dev *dev);
 409uint64_t igb_get_tx_queue_offloads_capa(struct rte_eth_dev *dev);
 410
 411int eth_igb_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
 412                uint16_t nb_tx_desc, unsigned int socket_id,
 413                const struct rte_eth_txconf *tx_conf);
 414
 415int eth_igb_tx_done_cleanup(void *txq, uint32_t free_cnt);
 416
 417int eth_igb_rx_init(struct rte_eth_dev *dev);
 418
 419void eth_igb_tx_init(struct rte_eth_dev *dev);
 420
 421uint16_t eth_igb_xmit_pkts(void *txq, struct rte_mbuf **tx_pkts,
 422                uint16_t nb_pkts);
 423
 424uint16_t eth_igb_prep_pkts(void *txq, struct rte_mbuf **tx_pkts,
 425                uint16_t nb_pkts);
 426
 427uint16_t eth_igb_recv_pkts(void *rxq, struct rte_mbuf **rx_pkts,
 428                uint16_t nb_pkts);
 429
 430uint16_t eth_igb_recv_scattered_pkts(void *rxq,
 431                struct rte_mbuf **rx_pkts, uint16_t nb_pkts);
 432
 433int eth_igb_rss_hash_update(struct rte_eth_dev *dev,
 434                            struct rte_eth_rss_conf *rss_conf);
 435
 436int eth_igb_rss_hash_conf_get(struct rte_eth_dev *dev,
 437                              struct rte_eth_rss_conf *rss_conf);
 438
 439int eth_igbvf_rx_init(struct rte_eth_dev *dev);
 440
 441void eth_igbvf_tx_init(struct rte_eth_dev *dev);
 442
 443/*
 444 * misc function prototypes
 445 */
 446void igb_pf_host_init(struct rte_eth_dev *eth_dev);
 447
 448void igb_pf_mbx_process(struct rte_eth_dev *eth_dev);
 449
 450int igb_pf_host_configure(struct rte_eth_dev *eth_dev);
 451
 452void igb_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
 453        struct rte_eth_rxq_info *qinfo);
 454
 455void igb_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
 456        struct rte_eth_txq_info *qinfo);
 457
 458uint32_t em_get_max_pktlen(struct rte_eth_dev *dev);
 459
 460/*
 461 * RX/TX EM function prototypes
 462 */
 463void eth_em_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid);
 464void eth_em_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid);
 465
 466void em_dev_clear_queues(struct rte_eth_dev *dev);
 467void em_dev_free_queues(struct rte_eth_dev *dev);
 468
 469uint64_t em_get_rx_port_offloads_capa(void);
 470uint64_t em_get_rx_queue_offloads_capa(void);
 471
 472int eth_em_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
 473                uint16_t nb_rx_desc, unsigned int socket_id,
 474                const struct rte_eth_rxconf *rx_conf,
 475                struct rte_mempool *mb_pool);
 476
 477uint32_t eth_em_rx_queue_count(void *rx_queue);
 478
 479int eth_em_rx_descriptor_status(void *rx_queue, uint16_t offset);
 480int eth_em_tx_descriptor_status(void *tx_queue, uint16_t offset);
 481
 482uint64_t em_get_tx_port_offloads_capa(struct rte_eth_dev *dev);
 483uint64_t em_get_tx_queue_offloads_capa(struct rte_eth_dev *dev);
 484
 485int eth_em_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
 486                uint16_t nb_tx_desc, unsigned int socket_id,
 487                const struct rte_eth_txconf *tx_conf);
 488
 489int eth_em_rx_init(struct rte_eth_dev *dev);
 490
 491void eth_em_tx_init(struct rte_eth_dev *dev);
 492
 493uint16_t eth_em_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
 494                uint16_t nb_pkts);
 495
 496uint16_t eth_em_prep_pkts(void *txq, struct rte_mbuf **tx_pkts,
 497                uint16_t nb_pkts);
 498
 499uint16_t eth_em_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
 500                uint16_t nb_pkts);
 501
 502uint16_t eth_em_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
 503                uint16_t nb_pkts);
 504
 505void em_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
 506        struct rte_eth_rxq_info *qinfo);
 507
 508void em_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
 509        struct rte_eth_txq_info *qinfo);
 510
 511void igb_pf_host_uninit(struct rte_eth_dev *dev);
 512
 513void igb_filterlist_flush(struct rte_eth_dev *dev);
 514int igb_delete_5tuple_filter_82576(struct rte_eth_dev *dev,
 515                struct e1000_5tuple_filter *filter);
 516int igb_delete_2tuple_filter(struct rte_eth_dev *dev,
 517                struct e1000_2tuple_filter *filter);
 518void igb_remove_flex_filter(struct rte_eth_dev *dev,
 519                        struct e1000_flex_filter *filter);
 520int igb_ethertype_filter_remove(struct e1000_filter_info *filter_info,
 521        uint8_t idx);
 522int igb_add_del_ntuple_filter(struct rte_eth_dev *dev,
 523                struct rte_eth_ntuple_filter *ntuple_filter, bool add);
 524int igb_add_del_ethertype_filter(struct rte_eth_dev *dev,
 525                        struct rte_eth_ethertype_filter *filter,
 526                        bool add);
 527int eth_igb_syn_filter_set(struct rte_eth_dev *dev,
 528                        struct rte_eth_syn_filter *filter,
 529                        bool add);
 530int eth_igb_add_del_flex_filter(struct rte_eth_dev *dev,
 531                        struct igb_flex_filter *filter,
 532                        bool add);
 533int igb_rss_conf_init(struct rte_eth_dev *dev,
 534                      struct igb_rte_flow_rss_conf *out,
 535                      const struct rte_flow_action_rss *in);
 536int igb_action_rss_same(const struct rte_flow_action_rss *comp,
 537                        const struct rte_flow_action_rss *with);
 538int igb_config_rss_filter(struct rte_eth_dev *dev,
 539                        struct igb_rte_flow_rss_conf *conf,
 540                        bool add);
 541void em_flush_desc_rings(struct rte_eth_dev *dev);
 542
 543#endif /* _E1000_ETHDEV_H_ */
 544