dpdk/drivers/net/iavf/iavf_rxtx.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: BSD-3-Clause
   2 * Copyright(c) 2017 Intel Corporation
   3 */
   4
   5#ifndef _IAVF_RXTX_H_
   6#define _IAVF_RXTX_H_
   7
   8/* In QLEN must be whole number of 32 descriptors. */
   9#define IAVF_ALIGN_RING_DESC      32
  10#define IAVF_MIN_RING_DESC        64
  11#define IAVF_MAX_RING_DESC        4096
  12#define IAVF_DMA_MEM_ALIGN        4096
  13/* Base address of the HW descriptor ring should be 128B aligned. */
  14#define IAVF_RING_BASE_ALIGN      128
  15
  16/* used for Rx Bulk Allocate */
  17#define IAVF_RX_MAX_BURST         32
  18
  19/* used for Vector PMD */
  20#define IAVF_VPMD_RX_MAX_BURST    32
  21#define IAVF_VPMD_TX_MAX_BURST    32
  22#define IAVF_RXQ_REARM_THRESH     32
  23#define IAVF_VPMD_DESCS_PER_LOOP  4
  24#define IAVF_VPMD_TX_MAX_FREE_BUF 64
  25
  26#define IAVF_NO_VECTOR_FLAGS (                           \
  27                DEV_TX_OFFLOAD_MULTI_SEGS |              \
  28                DEV_TX_OFFLOAD_VLAN_INSERT |             \
  29                DEV_TX_OFFLOAD_SCTP_CKSUM |              \
  30                DEV_TX_OFFLOAD_UDP_CKSUM |               \
  31                DEV_TX_OFFLOAD_TCP_TSO |                 \
  32                DEV_TX_OFFLOAD_TCP_CKSUM)
  33
  34#define DEFAULT_TX_RS_THRESH     32
  35#define DEFAULT_TX_FREE_THRESH   32
  36
  37#define IAVF_MIN_TSO_MSS          256
  38#define IAVF_MAX_TSO_MSS          9668
  39#define IAVF_TSO_MAX_SEG          UINT8_MAX
  40#define IAVF_TX_MAX_MTU_SEG       8
  41
  42#define IAVF_TX_CKSUM_OFFLOAD_MASK (             \
  43                PKT_TX_IP_CKSUM |                \
  44                PKT_TX_L4_MASK |                 \
  45                PKT_TX_TCP_SEG)
  46
  47#define IAVF_TX_OFFLOAD_MASK (  \
  48                PKT_TX_OUTER_IPV6 |              \
  49                PKT_TX_OUTER_IPV4 |              \
  50                PKT_TX_IPV6 |                    \
  51                PKT_TX_IPV4 |                    \
  52                PKT_TX_VLAN_PKT |                \
  53                PKT_TX_IP_CKSUM |                \
  54                PKT_TX_L4_MASK |                 \
  55                PKT_TX_TCP_SEG)
  56
  57#define IAVF_TX_OFFLOAD_NOTSUP_MASK \
  58                (PKT_TX_OFFLOAD_MASK ^ IAVF_TX_OFFLOAD_MASK)
  59
  60/**
  61 * Rx Flex Descriptors
  62 * These descriptors are used instead of the legacy version descriptors
  63 */
  64union iavf_16b_rx_flex_desc {
  65        struct {
  66                __le64 pkt_addr; /* Packet buffer address */
  67                __le64 hdr_addr; /* Header buffer address */
  68                                 /* bit 0 of hdr_addr is DD bit */
  69        } read;
  70        struct {
  71                /* Qword 0 */
  72                u8 rxdid; /* descriptor builder profile ID */
  73                u8 mir_id_umb_cast; /* mirror=[5:0], umb=[7:6] */
  74                __le16 ptype_flex_flags0; /* ptype=[9:0], ff0=[15:10] */
  75                __le16 pkt_len; /* [15:14] are reserved */
  76                __le16 hdr_len_sph_flex_flags1; /* header=[10:0] */
  77                                                /* sph=[11:11] */
  78                                                /* ff1/ext=[15:12] */
  79
  80                /* Qword 1 */
  81                __le16 status_error0;
  82                __le16 l2tag1;
  83                __le16 flex_meta0;
  84                __le16 flex_meta1;
  85        } wb; /* writeback */
  86};
  87
  88union iavf_32b_rx_flex_desc {
  89        struct {
  90                __le64 pkt_addr; /* Packet buffer address */
  91                __le64 hdr_addr; /* Header buffer address */
  92                                 /* bit 0 of hdr_addr is DD bit */
  93                __le64 rsvd1;
  94                __le64 rsvd2;
  95        } read;
  96        struct {
  97                /* Qword 0 */
  98                u8 rxdid; /* descriptor builder profile ID */
  99                u8 mir_id_umb_cast; /* mirror=[5:0], umb=[7:6] */
 100                __le16 ptype_flex_flags0; /* ptype=[9:0], ff0=[15:10] */
 101                __le16 pkt_len; /* [15:14] are reserved */
 102                __le16 hdr_len_sph_flex_flags1; /* header=[10:0] */
 103                                                /* sph=[11:11] */
 104                                                /* ff1/ext=[15:12] */
 105
 106                /* Qword 1 */
 107                __le16 status_error0;
 108                __le16 l2tag1;
 109                __le16 flex_meta0;
 110                __le16 flex_meta1;
 111
 112                /* Qword 2 */
 113                __le16 status_error1;
 114                u8 flex_flags2;
 115                u8 time_stamp_low;
 116                __le16 l2tag2_1st;
 117                __le16 l2tag2_2nd;
 118
 119                /* Qword 3 */
 120                __le16 flex_meta2;
 121                __le16 flex_meta3;
 122                union {
 123                        struct {
 124                                __le16 flex_meta4;
 125                                __le16 flex_meta5;
 126                        } flex;
 127                        __le32 ts_high;
 128                } flex_ts;
 129        } wb; /* writeback */
 130};
 131
 132/* HW desc structure, both 16-byte and 32-byte types are supported */
 133#ifdef RTE_LIBRTE_IAVF_16BYTE_RX_DESC
 134#define iavf_rx_desc iavf_16byte_rx_desc
 135#define iavf_rx_flex_desc iavf_16b_rx_flex_desc
 136#else
 137#define iavf_rx_desc iavf_32byte_rx_desc
 138#define iavf_rx_flex_desc iavf_32b_rx_flex_desc
 139#endif
 140
 141typedef void (*iavf_rxd_to_pkt_fields_t)(struct iavf_rx_queue *rxq,
 142                                struct rte_mbuf *mb,
 143                                volatile union iavf_rx_flex_desc *rxdp);
 144
 145struct iavf_rxq_ops {
 146        void (*release_mbufs)(struct iavf_rx_queue *rxq);
 147};
 148
 149struct iavf_txq_ops {
 150        void (*release_mbufs)(struct iavf_tx_queue *txq);
 151};
 152
 153/* Structure associated with each Rx queue. */
 154struct iavf_rx_queue {
 155        struct rte_mempool *mp;       /* mbuf pool to populate Rx ring */
 156        const struct rte_memzone *mz; /* memzone for Rx ring */
 157        volatile union iavf_rx_desc *rx_ring; /* Rx ring virtual address */
 158        uint64_t rx_ring_phys_addr;   /* Rx ring DMA address */
 159        struct rte_mbuf **sw_ring;     /* address of SW ring */
 160        uint16_t nb_rx_desc;          /* ring length */
 161        uint16_t rx_tail;             /* current value of tail */
 162        volatile uint8_t *qrx_tail;   /* register address of tail */
 163        uint16_t rx_free_thresh;      /* max free RX desc to hold */
 164        uint16_t nb_rx_hold;          /* number of held free RX desc */
 165        struct rte_mbuf *pkt_first_seg; /* first segment of current packet */
 166        struct rte_mbuf *pkt_last_seg;  /* last segment of current packet */
 167        struct rte_mbuf fake_mbuf;      /* dummy mbuf */
 168        uint8_t rxdid;
 169
 170        /* used for VPMD */
 171        uint16_t rxrearm_nb;       /* number of remaining to be re-armed */
 172        uint16_t rxrearm_start;    /* the idx we start the re-arming from */
 173        uint64_t mbuf_initializer; /* value to init mbufs */
 174
 175        /* for rx bulk */
 176        uint16_t rx_nb_avail;      /* number of staged packets ready */
 177        uint16_t rx_next_avail;    /* index of next staged packets */
 178        uint16_t rx_free_trigger;  /* triggers rx buffer allocation */
 179        struct rte_mbuf *rx_stage[IAVF_RX_MAX_BURST * 2]; /* store mbuf */
 180
 181        uint16_t port_id;        /* device port ID */
 182        uint8_t crc_len;        /* 0 if CRC stripped, 4 otherwise */
 183        uint8_t fdir_enabled;   /* 0 if FDIR disabled, 1 when enabled */
 184        uint16_t queue_id;      /* Rx queue index */
 185        uint16_t rx_buf_len;    /* The packet buffer size */
 186        uint16_t rx_hdr_len;    /* The header buffer size */
 187        uint16_t max_pkt_len;   /* Maximum packet length */
 188        struct iavf_vsi *vsi; /**< the VSI this queue belongs to */
 189
 190        bool q_set;             /* if rx queue has been configured */
 191        bool rx_deferred_start; /* don't start this queue in dev start */
 192        const struct iavf_rxq_ops *ops;
 193        uint8_t proto_xtr; /* protocol extraction type */
 194        uint64_t xtr_ol_flag;
 195                /* flexible descriptor metadata extraction offload flag */
 196        iavf_rxd_to_pkt_fields_t rxd_to_pkt_fields;
 197                                /* handle flexible descriptor by RXDID */
 198};
 199
 200struct iavf_tx_entry {
 201        struct rte_mbuf *mbuf;
 202        uint16_t next_id;
 203        uint16_t last_id;
 204};
 205
 206struct iavf_tx_vec_entry {
 207        struct rte_mbuf *mbuf;
 208};
 209
 210/* Structure associated with each TX queue. */
 211struct iavf_tx_queue {
 212        const struct rte_memzone *mz;  /* memzone for Tx ring */
 213        volatile struct iavf_tx_desc *tx_ring; /* Tx ring virtual address */
 214        uint64_t tx_ring_phys_addr;    /* Tx ring DMA address */
 215        struct iavf_tx_entry *sw_ring;  /* address array of SW ring */
 216        uint16_t nb_tx_desc;           /* ring length */
 217        uint16_t tx_tail;              /* current value of tail */
 218        volatile uint8_t *qtx_tail;    /* register address of tail */
 219        /* number of used desc since RS bit set */
 220        uint16_t nb_used;
 221        uint16_t nb_free;
 222        uint16_t last_desc_cleaned;    /* last desc have been cleaned*/
 223        uint16_t free_thresh;
 224        uint16_t rs_thresh;
 225
 226        uint16_t port_id;
 227        uint16_t queue_id;
 228        uint64_t offloads;
 229        uint16_t next_dd;              /* next to set RS, for VPMD */
 230        uint16_t next_rs;              /* next to check DD,  for VPMD */
 231
 232        bool q_set;                    /* if rx queue has been configured */
 233        bool tx_deferred_start;        /* don't start this queue in dev start */
 234        const struct iavf_txq_ops *ops;
 235#define IAVF_TX_FLAGS_VLAN_TAG_LOC_L2TAG1       BIT(0)
 236#define IAVF_TX_FLAGS_VLAN_TAG_LOC_L2TAG2       BIT(1)
 237        uint8_t vlan_flag;
 238};
 239
 240/* Offload features */
 241union iavf_tx_offload {
 242        uint64_t data;
 243        struct {
 244                uint64_t l2_len:7; /* L2 (MAC) Header Length. */
 245                uint64_t l3_len:9; /* L3 (IP) Header Length. */
 246                uint64_t l4_len:8; /* L4 Header Length. */
 247                uint64_t tso_segsz:16; /* TCP TSO segment size */
 248                /* uint64_t unused : 24; */
 249        };
 250};
 251
 252/* Rx Flex Descriptor
 253 * RxDID Profile ID 16-21
 254 * Flex-field 0: RSS hash lower 16-bits
 255 * Flex-field 1: RSS hash upper 16-bits
 256 * Flex-field 2: Flow ID lower 16-bits
 257 * Flex-field 3: Flow ID upper 16-bits
 258 * Flex-field 4: AUX0
 259 * Flex-field 5: AUX1
 260 */
 261struct iavf_32b_rx_flex_desc_comms {
 262        /* Qword 0 */
 263        u8 rxdid;
 264        u8 mir_id_umb_cast;
 265        __le16 ptype_flexi_flags0;
 266        __le16 pkt_len;
 267        __le16 hdr_len_sph_flex_flags1;
 268
 269        /* Qword 1 */
 270        __le16 status_error0;
 271        __le16 l2tag1;
 272        __le32 rss_hash;
 273
 274        /* Qword 2 */
 275        __le16 status_error1;
 276        u8 flexi_flags2;
 277        u8 ts_low;
 278        __le16 l2tag2_1st;
 279        __le16 l2tag2_2nd;
 280
 281        /* Qword 3 */
 282        __le32 flow_id;
 283        union {
 284                struct {
 285                        __le16 aux0;
 286                        __le16 aux1;
 287                } flex;
 288                __le32 ts_high;
 289        } flex_ts;
 290};
 291
 292/* Rx Flex Descriptor
 293 * RxDID Profile ID 22-23 (swap Hash and FlowID)
 294 * Flex-field 0: Flow ID lower 16-bits
 295 * Flex-field 1: Flow ID upper 16-bits
 296 * Flex-field 2: RSS hash lower 16-bits
 297 * Flex-field 3: RSS hash upper 16-bits
 298 * Flex-field 4: AUX0
 299 * Flex-field 5: AUX1
 300 */
 301struct iavf_32b_rx_flex_desc_comms_ovs {
 302        /* Qword 0 */
 303        u8 rxdid;
 304        u8 mir_id_umb_cast;
 305        __le16 ptype_flexi_flags0;
 306        __le16 pkt_len;
 307        __le16 hdr_len_sph_flex_flags1;
 308
 309        /* Qword 1 */
 310        __le16 status_error0;
 311        __le16 l2tag1;
 312        __le32 flow_id;
 313
 314        /* Qword 2 */
 315        __le16 status_error1;
 316        u8 flexi_flags2;
 317        u8 ts_low;
 318        __le16 l2tag2_1st;
 319        __le16 l2tag2_2nd;
 320
 321        /* Qword 3 */
 322        __le32 rss_hash;
 323        union {
 324                struct {
 325                        __le16 aux0;
 326                        __le16 aux1;
 327                } flex;
 328                __le32 ts_high;
 329        } flex_ts;
 330};
 331
 332/* Receive Flex Descriptor profile IDs: There are a total
 333 * of 64 profiles where profile IDs 0/1 are for legacy; and
 334 * profiles 2-63 are flex profiles that can be programmed
 335 * with a specific metadata (profile 7 reserved for HW)
 336 */
 337enum iavf_rxdid {
 338        IAVF_RXDID_LEGACY_0             = 0,
 339        IAVF_RXDID_LEGACY_1             = 1,
 340        IAVF_RXDID_FLEX_NIC             = 2,
 341        IAVF_RXDID_FLEX_NIC_2           = 6,
 342        IAVF_RXDID_HW                   = 7,
 343        IAVF_RXDID_COMMS_GENERIC        = 16,
 344        IAVF_RXDID_COMMS_AUX_VLAN       = 17,
 345        IAVF_RXDID_COMMS_AUX_IPV4       = 18,
 346        IAVF_RXDID_COMMS_AUX_IPV6       = 19,
 347        IAVF_RXDID_COMMS_AUX_IPV6_FLOW  = 20,
 348        IAVF_RXDID_COMMS_AUX_TCP        = 21,
 349        IAVF_RXDID_COMMS_OVS_1          = 22,
 350        IAVF_RXDID_COMMS_OVS_2          = 23,
 351        IAVF_RXDID_COMMS_AUX_IP_OFFSET  = 25,
 352        IAVF_RXDID_LAST                 = 63,
 353};
 354
 355enum iavf_rx_flex_desc_status_error_0_bits {
 356        /* Note: These are predefined bit offsets */
 357        IAVF_RX_FLEX_DESC_STATUS0_DD_S = 0,
 358        IAVF_RX_FLEX_DESC_STATUS0_EOF_S,
 359        IAVF_RX_FLEX_DESC_STATUS0_HBO_S,
 360        IAVF_RX_FLEX_DESC_STATUS0_L3L4P_S,
 361        IAVF_RX_FLEX_DESC_STATUS0_XSUM_IPE_S,
 362        IAVF_RX_FLEX_DESC_STATUS0_XSUM_L4E_S,
 363        IAVF_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S,
 364        IAVF_RX_FLEX_DESC_STATUS0_XSUM_EUDPE_S,
 365        IAVF_RX_FLEX_DESC_STATUS0_LPBK_S,
 366        IAVF_RX_FLEX_DESC_STATUS0_IPV6EXADD_S,
 367        IAVF_RX_FLEX_DESC_STATUS0_RXE_S,
 368        IAVF_RX_FLEX_DESC_STATUS0_CRCP_S,
 369        IAVF_RX_FLEX_DESC_STATUS0_RSS_VALID_S,
 370        IAVF_RX_FLEX_DESC_STATUS0_L2TAG1P_S,
 371        IAVF_RX_FLEX_DESC_STATUS0_XTRMD0_VALID_S,
 372        IAVF_RX_FLEX_DESC_STATUS0_XTRMD1_VALID_S,
 373        IAVF_RX_FLEX_DESC_STATUS0_LAST /* this entry must be last!!! */
 374};
 375
 376enum iavf_rx_flex_desc_status_error_1_bits {
 377        /* Note: These are predefined bit offsets */
 378        IAVF_RX_FLEX_DESC_STATUS1_CPM_S = 0, /* 4 bits */
 379        IAVF_RX_FLEX_DESC_STATUS1_NAT_S = 4,
 380        IAVF_RX_FLEX_DESC_STATUS1_CRYPTO_S = 5,
 381        /* [10:6] reserved */
 382        IAVF_RX_FLEX_DESC_STATUS1_L2TAG2P_S = 11,
 383        IAVF_RX_FLEX_DESC_STATUS1_XTRMD2_VALID_S = 12,
 384        IAVF_RX_FLEX_DESC_STATUS1_XTRMD3_VALID_S = 13,
 385        IAVF_RX_FLEX_DESC_STATUS1_XTRMD4_VALID_S = 14,
 386        IAVF_RX_FLEX_DESC_STATUS1_XTRMD5_VALID_S = 15,
 387        IAVF_RX_FLEX_DESC_STATUS1_LAST /* this entry must be last!!! */
 388};
 389
 390/* for iavf_32b_rx_flex_desc.ptype_flex_flags0 member */
 391#define IAVF_RX_FLEX_DESC_PTYPE_M       (0x3FF) /* 10-bits */
 392
 393/* for iavf_32b_rx_flex_desc.pkt_len member */
 394#define IAVF_RX_FLX_DESC_PKT_LEN_M      (0x3FFF) /* 14-bits */
 395
 396int iavf_dev_rx_queue_setup(struct rte_eth_dev *dev,
 397                           uint16_t queue_idx,
 398                           uint16_t nb_desc,
 399                           unsigned int socket_id,
 400                           const struct rte_eth_rxconf *rx_conf,
 401                           struct rte_mempool *mp);
 402
 403int iavf_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id);
 404int iavf_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id);
 405void iavf_dev_rx_queue_release(void *rxq);
 406
 407int iavf_dev_tx_queue_setup(struct rte_eth_dev *dev,
 408                           uint16_t queue_idx,
 409                           uint16_t nb_desc,
 410                           unsigned int socket_id,
 411                           const struct rte_eth_txconf *tx_conf);
 412int iavf_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id);
 413int iavf_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id);
 414int iavf_dev_tx_done_cleanup(void *txq, uint32_t free_cnt);
 415void iavf_dev_tx_queue_release(void *txq);
 416void iavf_stop_queues(struct rte_eth_dev *dev);
 417uint16_t iavf_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
 418                       uint16_t nb_pkts);
 419uint16_t iavf_recv_pkts_flex_rxd(void *rx_queue,
 420                                 struct rte_mbuf **rx_pkts,
 421                                 uint16_t nb_pkts);
 422uint16_t iavf_recv_scattered_pkts(void *rx_queue,
 423                                 struct rte_mbuf **rx_pkts,
 424                                 uint16_t nb_pkts);
 425uint16_t iavf_recv_scattered_pkts_flex_rxd(void *rx_queue,
 426                                           struct rte_mbuf **rx_pkts,
 427                                           uint16_t nb_pkts);
 428uint16_t iavf_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
 429                       uint16_t nb_pkts);
 430uint16_t iavf_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
 431                       uint16_t nb_pkts);
 432void iavf_set_rx_function(struct rte_eth_dev *dev);
 433void iavf_set_tx_function(struct rte_eth_dev *dev);
 434void iavf_dev_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
 435                          struct rte_eth_rxq_info *qinfo);
 436void iavf_dev_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
 437                          struct rte_eth_txq_info *qinfo);
 438uint32_t iavf_dev_rxq_count(struct rte_eth_dev *dev, uint16_t queue_id);
 439int iavf_dev_rx_desc_status(void *rx_queue, uint16_t offset);
 440int iavf_dev_tx_desc_status(void *tx_queue, uint16_t offset);
 441
 442uint16_t iavf_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
 443                           uint16_t nb_pkts);
 444uint16_t iavf_recv_pkts_vec_flex_rxd(void *rx_queue, struct rte_mbuf **rx_pkts,
 445                                     uint16_t nb_pkts);
 446uint16_t iavf_recv_scattered_pkts_vec(void *rx_queue,
 447                                     struct rte_mbuf **rx_pkts,
 448                                     uint16_t nb_pkts);
 449uint16_t iavf_recv_scattered_pkts_vec_flex_rxd(void *rx_queue,
 450                                               struct rte_mbuf **rx_pkts,
 451                                               uint16_t nb_pkts);
 452uint16_t iavf_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
 453                                  uint16_t nb_pkts);
 454uint16_t iavf_recv_pkts_vec_avx2(void *rx_queue, struct rte_mbuf **rx_pkts,
 455                                 uint16_t nb_pkts);
 456uint16_t iavf_recv_pkts_vec_avx2_flex_rxd(void *rx_queue,
 457                                          struct rte_mbuf **rx_pkts,
 458                                          uint16_t nb_pkts);
 459uint16_t iavf_recv_scattered_pkts_vec_avx2(void *rx_queue,
 460                                           struct rte_mbuf **rx_pkts,
 461                                           uint16_t nb_pkts);
 462uint16_t iavf_recv_scattered_pkts_vec_avx2_flex_rxd(void *rx_queue,
 463                                                    struct rte_mbuf **rx_pkts,
 464                                                    uint16_t nb_pkts);
 465uint16_t iavf_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
 466                            uint16_t nb_pkts);
 467uint16_t iavf_xmit_pkts_vec_avx2(void *tx_queue, struct rte_mbuf **tx_pkts,
 468                                 uint16_t nb_pkts);
 469int iavf_rx_vec_dev_check(struct rte_eth_dev *dev);
 470int iavf_tx_vec_dev_check(struct rte_eth_dev *dev);
 471int iavf_rxq_vec_setup(struct iavf_rx_queue *rxq);
 472int iavf_txq_vec_setup(struct iavf_tx_queue *txq);
 473uint16_t iavf_recv_pkts_vec_avx512(void *rx_queue, struct rte_mbuf **rx_pkts,
 474                                   uint16_t nb_pkts);
 475uint16_t iavf_recv_pkts_vec_avx512_flex_rxd(void *rx_queue,
 476                                            struct rte_mbuf **rx_pkts,
 477                                            uint16_t nb_pkts);
 478uint16_t iavf_recv_scattered_pkts_vec_avx512(void *rx_queue,
 479                                             struct rte_mbuf **rx_pkts,
 480                                             uint16_t nb_pkts);
 481uint16_t iavf_recv_scattered_pkts_vec_avx512_flex_rxd(void *rx_queue,
 482                                                      struct rte_mbuf **rx_pkts,
 483                                                      uint16_t nb_pkts);
 484uint16_t iavf_xmit_pkts_vec_avx512(void *tx_queue, struct rte_mbuf **tx_pkts,
 485                                   uint16_t nb_pkts);
 486int iavf_txq_vec_setup_avx512(struct iavf_tx_queue *txq);
 487
 488uint8_t iavf_proto_xtr_type_to_rxdid(uint8_t xtr_type);
 489
 490const uint32_t *iavf_get_default_ptype_table(void);
 491
 492static inline
 493void iavf_dump_rx_descriptor(struct iavf_rx_queue *rxq,
 494                            const volatile void *desc,
 495                            uint16_t rx_id)
 496{
 497#ifdef RTE_LIBRTE_IAVF_16BYTE_RX_DESC
 498        const volatile union iavf_16byte_rx_desc *rx_desc = desc;
 499
 500        printf("Queue %d Rx_desc %d: QW0: 0x%016"PRIx64" QW1: 0x%016"PRIx64"\n",
 501               rxq->queue_id, rx_id, rx_desc->read.pkt_addr,
 502               rx_desc->read.hdr_addr);
 503#else
 504        const volatile union iavf_32byte_rx_desc *rx_desc = desc;
 505
 506        printf("Queue %d Rx_desc %d: QW0: 0x%016"PRIx64" QW1: 0x%016"PRIx64
 507               " QW2: 0x%016"PRIx64" QW3: 0x%016"PRIx64"\n", rxq->queue_id,
 508               rx_id, rx_desc->read.pkt_addr, rx_desc->read.hdr_addr,
 509               rx_desc->read.rsvd1, rx_desc->read.rsvd2);
 510#endif
 511}
 512
 513/* All the descriptors are 16 bytes, so just use one of them
 514 * to print the qwords
 515 */
 516static inline
 517void iavf_dump_tx_descriptor(const struct iavf_tx_queue *txq,
 518                            const volatile void *desc, uint16_t tx_id)
 519{
 520        const char *name;
 521        const volatile struct iavf_tx_desc *tx_desc = desc;
 522        enum iavf_tx_desc_dtype_value type;
 523
 524        type = (enum iavf_tx_desc_dtype_value)rte_le_to_cpu_64(
 525                tx_desc->cmd_type_offset_bsz &
 526                rte_cpu_to_le_64(IAVF_TXD_QW1_DTYPE_MASK));
 527        switch (type) {
 528        case IAVF_TX_DESC_DTYPE_DATA:
 529                name = "Tx_data_desc";
 530                break;
 531        case IAVF_TX_DESC_DTYPE_CONTEXT:
 532                name = "Tx_context_desc";
 533                break;
 534        default:
 535                name = "unknown_desc";
 536                break;
 537        }
 538
 539        printf("Queue %d %s %d: QW0: 0x%016"PRIx64" QW1: 0x%016"PRIx64"\n",
 540               txq->queue_id, name, tx_id, tx_desc->buffer_addr,
 541               tx_desc->cmd_type_offset_bsz);
 542}
 543
 544#define FDIR_PROC_ENABLE_PER_QUEUE(ad, on) do { \
 545        int i; \
 546        for (i = 0; i < (ad)->eth_dev->data->nb_rx_queues; i++) { \
 547                struct iavf_rx_queue *rxq = (ad)->eth_dev->data->rx_queues[i]; \
 548                if (!rxq) \
 549                        continue; \
 550                rxq->fdir_enabled = on; \
 551        } \
 552        PMD_DRV_LOG(DEBUG, "FDIR processing on RX set to %d", on); \
 553} while (0)
 554
 555/* Enable/disable flow director Rx processing in data path. */
 556static inline
 557void iavf_fdir_rx_proc_enable(struct iavf_adapter *ad, bool on)
 558{
 559        if (on) {
 560                /* enable flow director processing */
 561                FDIR_PROC_ENABLE_PER_QUEUE(ad, on);
 562                ad->fdir_ref_cnt++;
 563        } else {
 564                if (ad->fdir_ref_cnt >= 1) {
 565                        ad->fdir_ref_cnt--;
 566
 567                        if (ad->fdir_ref_cnt == 0)
 568                                FDIR_PROC_ENABLE_PER_QUEUE(ad, on);
 569                }
 570        }
 571}
 572
 573#ifdef RTE_LIBRTE_IAVF_DEBUG_DUMP_DESC
 574#define IAVF_DUMP_RX_DESC(rxq, desc, rx_id) \
 575        iavf_dump_rx_descriptor(rxq, desc, rx_id)
 576#define IAVF_DUMP_TX_DESC(txq, desc, tx_id) \
 577        iavf_dump_tx_descriptor(txq, desc, tx_id)
 578#else
 579#define IAVF_DUMP_RX_DESC(rxq, desc, rx_id) do { } while (0)
 580#define IAVF_DUMP_TX_DESC(txq, desc, tx_id) do { } while (0)
 581#endif
 582
 583#endif /* _IAVF_RXTX_H_ */
 584