dpdk/drivers/net/hns3/hns3_rxtx.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: BSD-3-Clause
   2 * Copyright(c) 2018-2019 Hisilicon Limited.
   3 */
   4
   5#ifndef _HNS3_RXTX_H_
   6#define _HNS3_RXTX_H_
   7
   8#include <stdint.h>
   9#include <rte_mbuf_core.h>
  10
  11#define HNS3_MIN_RING_DESC      64
  12#define HNS3_MAX_RING_DESC      32768
  13#define HNS3_DEFAULT_RING_DESC  1024
  14#define HNS3_ALIGN_RING_DESC    32
  15#define HNS3_RING_BASE_ALIGN    128
  16#define HNS3_BULK_ALLOC_MBUF_NUM        32
  17
  18#define HNS3_DEFAULT_RX_FREE_THRESH     32
  19#define HNS3_DEFAULT_TX_FREE_THRESH     32
  20#define HNS3_DEFAULT_TX_RS_THRESH       32
  21#define HNS3_TX_FAST_FREE_AHEAD         64
  22
  23#define HNS3_DEFAULT_RX_BURST           32
  24#if (HNS3_DEFAULT_RX_BURST > 64)
  25#error "PMD HNS3: HNS3_DEFAULT_RX_BURST must <= 64\n"
  26#endif
  27#define HNS3_DEFAULT_DESCS_PER_LOOP     4
  28#define HNS3_SVE_DEFAULT_DESCS_PER_LOOP 8
  29#if (HNS3_DEFAULT_DESCS_PER_LOOP > HNS3_SVE_DEFAULT_DESCS_PER_LOOP)
  30#define HNS3_VECTOR_RX_OFFSET_TABLE_LEN HNS3_DEFAULT_DESCS_PER_LOOP
  31#else
  32#define HNS3_VECTOR_RX_OFFSET_TABLE_LEN HNS3_SVE_DEFAULT_DESCS_PER_LOOP
  33#endif
  34#define HNS3_DEFAULT_RXQ_REARM_THRESH   64
  35#define HNS3_UINT8_BIT                  8
  36#define HNS3_UINT16_BIT                 16
  37#define HNS3_UINT32_BIT                 32
  38
  39#define HNS3_512_BD_BUF_SIZE    512
  40#define HNS3_1K_BD_BUF_SIZE     1024
  41#define HNS3_2K_BD_BUF_SIZE     2048
  42#define HNS3_4K_BD_BUF_SIZE     4096
  43
  44#define HNS3_MIN_BD_BUF_SIZE    HNS3_512_BD_BUF_SIZE
  45#define HNS3_MAX_BD_BUF_SIZE    HNS3_4K_BD_BUF_SIZE
  46
  47#define HNS3_BD_SIZE_512_TYPE                   0
  48#define HNS3_BD_SIZE_1024_TYPE                  1
  49#define HNS3_BD_SIZE_2048_TYPE                  2
  50#define HNS3_BD_SIZE_4096_TYPE                  3
  51
  52#define HNS3_RX_FLAG_VLAN_PRESENT               0x1
  53#define HNS3_RX_FLAG_L3ID_IPV4                  0x0
  54#define HNS3_RX_FLAG_L3ID_IPV6                  0x1
  55#define HNS3_RX_FLAG_L4ID_UDP                   0x0
  56#define HNS3_RX_FLAG_L4ID_TCP                   0x1
  57
  58#define HNS3_RXD_DMAC_S                         0
  59#define HNS3_RXD_DMAC_M                         (0x3 << HNS3_RXD_DMAC_S)
  60#define HNS3_RXD_VLAN_S                         2
  61#define HNS3_RXD_VLAN_M                         (0x3 << HNS3_RXD_VLAN_S)
  62#define HNS3_RXD_L3ID_S                         4
  63#define HNS3_RXD_L3ID_M                         (0xf << HNS3_RXD_L3ID_S)
  64#define HNS3_RXD_L4ID_S                         8
  65#define HNS3_RXD_L4ID_M                         (0xf << HNS3_RXD_L4ID_S)
  66#define HNS3_RXD_FRAG_B                         12
  67#define HNS3_RXD_STRP_TAGP_S                    13
  68#define HNS3_RXD_STRP_TAGP_M                    (0x3 << HNS3_RXD_STRP_TAGP_S)
  69
  70#define HNS3_RXD_L2E_B                          16
  71#define HNS3_RXD_L3E_B                          17
  72#define HNS3_RXD_L4E_B                          18
  73#define HNS3_RXD_TRUNCATE_B                     19
  74#define HNS3_RXD_HOI_B                          20
  75#define HNS3_RXD_DOI_B                          21
  76#define HNS3_RXD_OL3E_B                         22
  77#define HNS3_RXD_OL4E_B                         23
  78#define HNS3_RXD_GRO_COUNT_S                    24
  79#define HNS3_RXD_GRO_COUNT_M                    (0x3f << HNS3_RXD_GRO_COUNT_S)
  80#define HNS3_RXD_GRO_FIXID_B                    30
  81#define HNS3_RXD_GRO_ECN_B                      31
  82
  83#define HNS3_RXD_ODMAC_S                        0
  84#define HNS3_RXD_ODMAC_M                        (0x3 << HNS3_RXD_ODMAC_S)
  85#define HNS3_RXD_OVLAN_S                        2
  86#define HNS3_RXD_OVLAN_M                        (0x3 << HNS3_RXD_OVLAN_S)
  87#define HNS3_RXD_OL3ID_S                        4
  88#define HNS3_RXD_OL3ID_M                        (0xf << HNS3_RXD_OL3ID_S)
  89#define HNS3_RXD_OL4ID_S                        8
  90#define HNS3_RXD_OL4ID_M                        (0xf << HNS3_RXD_OL4ID_S)
  91#define HNS3_RXD_FBHI_S                         12
  92#define HNS3_RXD_FBHI_M                         (0x3 << HNS3_RXD_FBHI_S)
  93#define HNS3_RXD_FBLI_S                         14
  94#define HNS3_RXD_FBLI_M                         (0x3 << HNS3_RXD_FBLI_S)
  95
  96#define HNS3_RXD_BDTYPE_S                       0
  97#define HNS3_RXD_BDTYPE_M                       (0xf << HNS3_RXD_BDTYPE_S)
  98#define HNS3_RXD_VLD_B                          4
  99#define HNS3_RXD_UDP0_B                         5
 100#define HNS3_RXD_EXTEND_B                       7
 101#define HNS3_RXD_FE_B                           8
 102#define HNS3_RXD_LUM_B                          9
 103#define HNS3_RXD_CRCP_B                         10
 104#define HNS3_RXD_L3L4P_B                        11
 105#define HNS3_RXD_TSIND_S                        12
 106#define HNS3_RXD_TSIND_M                        (0x7 << HNS3_RXD_TSIND_S)
 107#define HNS3_RXD_LKBK_B                         15
 108#define HNS3_RXD_GRO_SIZE_S                     16
 109#define HNS3_RXD_GRO_SIZE_M                     (0x3fff << HNS3_RXD_GRO_SIZE_S)
 110
 111#define HNS3_TXD_L3T_S                          0
 112#define HNS3_TXD_L3T_M                          (0x3 << HNS3_TXD_L3T_S)
 113#define HNS3_TXD_L4T_S                          2
 114#define HNS3_TXD_L4T_M                          (0x3 << HNS3_TXD_L4T_S)
 115#define HNS3_TXD_L3CS_B                         4
 116#define HNS3_TXD_L4CS_B                         5
 117#define HNS3_TXD_VLAN_B                         6
 118#define HNS3_TXD_TSO_B                          7
 119
 120#define HNS3_TXD_L2LEN_S                        8
 121#define HNS3_TXD_L2LEN_M                        (0xff << HNS3_TXD_L2LEN_S)
 122#define HNS3_TXD_L3LEN_S                        16
 123#define HNS3_TXD_L3LEN_M                        (0xff << HNS3_TXD_L3LEN_S)
 124#define HNS3_TXD_L4LEN_S                        24
 125#define HNS3_TXD_L4LEN_M                        (0xffUL << HNS3_TXD_L4LEN_S)
 126
 127#define HNS3_TXD_OL3T_S                         0
 128#define HNS3_TXD_OL3T_M                         (0x3 << HNS3_TXD_OL3T_S)
 129#define HNS3_TXD_OVLAN_B                        2
 130#define HNS3_TXD_MACSEC_B                       3
 131#define HNS3_TXD_TUNTYPE_S                      4
 132#define HNS3_TXD_TUNTYPE_M                      (0xf << HNS3_TXD_TUNTYPE_S)
 133
 134#define HNS3_TXD_BDTYPE_S                       0
 135#define HNS3_TXD_BDTYPE_M                       (0xf << HNS3_TXD_BDTYPE_S)
 136#define HNS3_TXD_FE_B                           4
 137#define HNS3_TXD_SC_S                           5
 138#define HNS3_TXD_SC_M                           (0x3 << HNS3_TXD_SC_S)
 139#define HNS3_TXD_EXTEND_B                       7
 140#define HNS3_TXD_VLD_B                          8
 141#define HNS3_TXD_RI_B                           9
 142#define HNS3_TXD_RA_B                           10
 143#define HNS3_TXD_TSYN_B                         11
 144#define HNS3_TXD_DECTTL_S                       12
 145#define HNS3_TXD_DECTTL_M                       (0xf << HNS3_TXD_DECTTL_S)
 146
 147#define HNS3_TXD_MSS_S                          0
 148#define HNS3_TXD_MSS_M                          (0x3fff << HNS3_TXD_MSS_S)
 149
 150#define HNS3_L2_LEN_UNIT                        1UL
 151#define HNS3_L3_LEN_UNIT                        2UL
 152#define HNS3_L4_LEN_UNIT                        2UL
 153
 154#define HNS3_TXD_DEFAULT_BDTYPE         0
 155#define HNS3_TXD_VLD_CMD                (0x1 << HNS3_TXD_VLD_B)
 156#define HNS3_TXD_FE_CMD                 (0x1 << HNS3_TXD_FE_B)
 157#define HNS3_TXD_DEFAULT_VLD_FE_BDTYPE          \
 158                (HNS3_TXD_VLD_CMD | HNS3_TXD_FE_CMD | HNS3_TXD_DEFAULT_BDTYPE)
 159#define HNS3_TXD_SEND_SIZE_SHIFT        16
 160
 161enum hns3_pkt_l2t_type {
 162        HNS3_L2_TYPE_UNICAST,
 163        HNS3_L2_TYPE_MULTICAST,
 164        HNS3_L2_TYPE_BROADCAST,
 165        HNS3_L2_TYPE_INVALID,
 166};
 167
 168enum hns3_pkt_l3t_type {
 169        HNS3_L3T_NONE,
 170        HNS3_L3T_IPV6,
 171        HNS3_L3T_IPV4,
 172        HNS3_L3T_RESERVED
 173};
 174
 175enum hns3_pkt_l4t_type {
 176        HNS3_L4T_UNKNOWN,
 177        HNS3_L4T_TCP,
 178        HNS3_L4T_UDP,
 179        HNS3_L4T_SCTP
 180};
 181
 182enum hns3_pkt_ol3t_type {
 183        HNS3_OL3T_NONE,
 184        HNS3_OL3T_IPV6,
 185        HNS3_OL3T_IPV4_NO_CSUM,
 186        HNS3_OL3T_IPV4_CSUM
 187};
 188
 189enum hns3_pkt_tun_type {
 190        HNS3_TUN_NONE,
 191        HNS3_TUN_MAC_IN_UDP,
 192        HNS3_TUN_NVGRE,
 193        HNS3_TUN_OTHER
 194};
 195
 196/* hardware spec ring buffer format */
 197struct hns3_desc {
 198        union {
 199                uint64_t addr;
 200                struct {
 201                        uint32_t addr0;
 202                        uint32_t addr1;
 203                };
 204        };
 205        union {
 206                struct {
 207                        uint16_t vlan_tag;
 208                        uint16_t send_size;
 209                        union {
 210                                /*
 211                                 * L3T | L4T | L3CS | L4CS | VLAN | TSO |
 212                                 * L2_LEN
 213                                 */
 214                                uint32_t type_cs_vlan_tso_len;
 215                                struct {
 216                                        uint8_t type_cs_vlan_tso;
 217                                        uint8_t l2_len;
 218                                        uint8_t l3_len;
 219                                        uint8_t l4_len;
 220                                };
 221                        };
 222                        uint16_t outer_vlan_tag;
 223                        uint16_t tv;
 224                        union {
 225                                /* OL3T | OVALAN | MACSEC */
 226                                uint32_t ol_type_vlan_len_msec;
 227                                struct {
 228                                        uint8_t ol_type_vlan_msec;
 229                                        uint8_t ol2_len;
 230                                        uint8_t ol3_len;
 231                                        uint8_t ol4_len;
 232                                };
 233                        };
 234
 235                        uint32_t paylen;
 236                        uint16_t tp_fe_sc_vld_ra_ri;
 237                        uint16_t mss;
 238                } tx;
 239
 240                struct {
 241                        uint32_t l234_info;
 242                        uint16_t pkt_len;
 243                        uint16_t size;
 244                        uint32_t rss_hash;
 245                        uint16_t fd_id;
 246                        uint16_t vlan_tag;
 247                        union {
 248                                uint32_t ol_info;
 249                                struct {
 250                                        uint16_t o_dm_vlan_id_fb;
 251                                        uint16_t ot_vlan_tag;
 252                                };
 253                        };
 254                        union {
 255                                uint32_t bd_base_info;
 256                                struct {
 257                                        uint16_t bdtype_vld_udp0;
 258                                        uint16_t fe_lum_crcp_l3l4p;
 259                                };
 260                        };
 261                } rx;
 262        };
 263} __rte_packed;
 264
 265struct hns3_entry {
 266        struct rte_mbuf *mbuf;
 267};
 268
 269struct hns3_rx_basic_stats {
 270        uint64_t packets;
 271        uint64_t bytes;
 272        uint64_t errors;
 273};
 274
 275struct hns3_rx_dfx_stats {
 276        uint64_t l3_csum_errors;
 277        uint64_t l4_csum_errors;
 278        uint64_t ol3_csum_errors;
 279        uint64_t ol4_csum_errors;
 280};
 281
 282struct hns3_rx_bd_errors_stats {
 283        uint64_t l2_errors;
 284        uint64_t pkt_len_errors;
 285};
 286
 287struct hns3_rx_queue {
 288        void *io_base;
 289        volatile void *io_head_reg;
 290        struct hns3_adapter *hns;
 291        struct hns3_ptype_table *ptype_tbl;
 292        struct rte_mempool *mb_pool;
 293        struct hns3_desc *rx_ring;
 294        uint64_t rx_ring_phys_addr; /* RX ring DMA address */
 295        const struct rte_memzone *mz;
 296        struct hns3_entry *sw_ring;
 297        struct rte_mbuf *pkt_first_seg;
 298        struct rte_mbuf *pkt_last_seg;
 299
 300        uint16_t queue_id;
 301        uint16_t port_id;
 302        uint16_t nb_rx_desc;
 303        uint16_t rx_buf_len;
 304        /*
 305         * threshold for the number of BDs waited to passed to hardware. If the
 306         * number exceeds the threshold, driver will pass these BDs to hardware.
 307         */
 308        uint16_t rx_free_thresh;
 309        uint16_t next_to_use;    /* index of next BD to be polled */
 310        uint16_t rx_free_hold;   /* num of BDs waited to passed to hardware */
 311        uint16_t rx_rearm_start; /* index of BD that driver re-arming from */
 312        uint16_t rx_rearm_nb;    /* number of remaining BDs to be re-armed */
 313
 314        /* 4 if DEV_RX_OFFLOAD_KEEP_CRC offload set, 0 otherwise */
 315        uint8_t crc_len;
 316
 317        bool rx_deferred_start; /* don't start this queue in dev start */
 318        bool configured;        /* indicate if rx queue has been configured */
 319        /*
 320         * Indicate whether ignore the outer VLAN field in the Rx BD reported
 321         * by the Hardware. Because the outer VLAN is the PVID if the PVID is
 322         * set for some version of hardware network engine whose vlan mode is
 323         * HNS3_SW_SHIFT_AND_DISCARD_MODE, such as kunpeng 920. And this VLAN
 324         * should not be transitted to the upper-layer application. For hardware
 325         * network engine whose vlan mode is HNS3_HW_SHIFT_AND_DISCARD_MODE,
 326         * such as kunpeng 930, PVID will not be reported to the BDs. So, PMD
 327         * driver does not need to perform PVID-related operation in Rx. At this
 328         * point, the pvid_sw_discard_en will be false.
 329         */
 330        bool pvid_sw_discard_en;
 331        bool enabled;           /* indicate if Rx queue has been enabled */
 332
 333        struct hns3_rx_basic_stats basic_stats;
 334        /* DFX statistics that driver does not need to discard packets */
 335        struct hns3_rx_dfx_stats dfx_stats;
 336        /* Error statistics that driver needs to discard packets */
 337        struct hns3_rx_bd_errors_stats err_stats;
 338
 339        struct rte_mbuf *bulk_mbuf[HNS3_BULK_ALLOC_MBUF_NUM];
 340        uint16_t bulk_mbuf_num;
 341
 342        /* offset_table: used for vector, to solve execute re-order problem */
 343        uint8_t offset_table[HNS3_VECTOR_RX_OFFSET_TABLE_LEN + 1];
 344        uint64_t mbuf_initializer; /* value to init mbufs used with vector rx */
 345        struct rte_mbuf fake_mbuf; /* fake mbuf used with vector rx */
 346};
 347
 348struct hns3_tx_basic_stats {
 349        uint64_t packets;
 350        uint64_t bytes;
 351};
 352
 353/*
 354 * The following items are used for the abnormal errors statistics in
 355 * the Tx datapath. When upper level application calls the
 356 * rte_eth_tx_burst API function to send multiple packets at a time with
 357 * burst mode based on hns3 network engine, there are some abnormal
 358 * conditions that cause the driver to fail to operate the hardware to
 359 * send packets correctly.
 360 * Note: When using burst mode to call the rte_eth_tx_burst API function
 361 * to send multiple packets at a time. When the first abnormal error is
 362 * detected, add one to the relevant error statistics item, and then
 363 * exit the loop of sending multiple packets of the function. That is to
 364 * say, even if there are multiple packets in which abnormal errors may
 365 * be detected in the burst, the relevant error statistics in the driver
 366 * will only be increased by one.
 367 * The detail description of the Tx abnormal errors statistic items as
 368 * below:
 369 *  - over_length_pkt_cnt
 370 *     Total number of greater than HNS3_MAX_FRAME_LEN the driver
 371 *     supported.
 372 *
 373 * - exceed_limit_bd_pkt_cnt
 374 *     Total number of exceeding the hardware limited bd which process
 375 *     a packet needed bd numbers.
 376 *
 377 * - exceed_limit_bd_reassem_fail
 378 *     Total number of exceeding the hardware limited bd fail which
 379 *     process a packet needed bd numbers and reassemble fail.
 380 *
 381 * - unsupported_tunnel_pkt_cnt
 382 *     Total number of unsupported tunnel packet. The unsupported tunnel
 383 *     type: vxlan_gpe, gtp, ipip and MPLSINUDP, MPLSINUDP is a packet
 384 *     with MPLS-in-UDP RFC 7510 header.
 385 *
 386 * - queue_full_cnt
 387 *     Total count which the available bd numbers in current bd queue is
 388 *     less than the bd numbers with the pkt process needed.
 389 *
 390 * - pkt_padding_fail_cnt
 391 *     Total count which the packet length is less than minimum packet
 392 *     length(struct hns3_tx_queue::min_tx_pkt_len) supported by
 393 *     hardware in Tx direction and fail to be appended with 0.
 394 */
 395struct hns3_tx_dfx_stats {
 396        uint64_t over_length_pkt_cnt;
 397        uint64_t exceed_limit_bd_pkt_cnt;
 398        uint64_t exceed_limit_bd_reassem_fail;
 399        uint64_t unsupported_tunnel_pkt_cnt;
 400        uint64_t queue_full_cnt;
 401        uint64_t pkt_padding_fail_cnt;
 402};
 403
 404struct hns3_tx_queue {
 405        void *io_base;
 406        volatile void *io_tail_reg;
 407        struct hns3_adapter *hns;
 408        struct hns3_desc *tx_ring;
 409        uint64_t tx_ring_phys_addr; /* TX ring DMA address */
 410        const struct rte_memzone *mz;
 411        struct hns3_entry *sw_ring;
 412
 413        uint16_t queue_id;
 414        uint16_t port_id;
 415        uint16_t nb_tx_desc;
 416        /*
 417         * index of next BD whose corresponding rte_mbuf can be released by
 418         * driver.
 419         */
 420        uint16_t next_to_clean;
 421        /* index of next BD to be filled by driver to send packet */
 422        uint16_t next_to_use;
 423        /* num of remaining BDs ready to be filled by driver to send packet */
 424        uint16_t tx_bd_ready;
 425
 426        /* threshold for free tx buffer if available BDs less than this value */
 427        uint16_t tx_free_thresh;
 428
 429        /*
 430         * For better performance in tx datapath, releasing mbuf in batches is
 431         * required.
 432         * Only checking the VLD bit of the last descriptor in a batch of the
 433         * thresh descriptors does not mean that these descriptors are all sent
 434         * by hardware successfully. So we need to check that the VLD bits of
 435         * all descriptors are cleared. and then free all mbufs in the batch.
 436         * - tx_rs_thresh
 437         *   Number of mbufs released at a time.
 438         *
 439         * - free
 440         *   Tx mbuf free array used for preserving temporarily address of mbuf
 441         *   released back to mempool, when releasing mbuf in batches.
 442         */
 443        uint16_t tx_rs_thresh;
 444        struct rte_mbuf **free;
 445
 446        /*
 447         * tso mode.
 448         * value range:
 449         *      HNS3_TSO_SW_CAL_PSEUDO_H_CSUM/HNS3_TSO_HW_CAL_PSEUDO_H_CSUM
 450         *
 451         *  - HNS3_TSO_SW_CAL_PSEUDO_H_CSUM
 452         *     In this mode, because of the hardware constraint, network driver
 453         *     software need erase the L4 len value of the TCP pseudo header
 454         *     and recalculate the TCP pseudo header checksum of packets that
 455         *     need TSO.
 456         *
 457         *  - HNS3_TSO_HW_CAL_PSEUDO_H_CSUM
 458         *     In this mode, hardware support recalculate the TCP pseudo header
 459         *     checksum of packets that need TSO, so network driver software
 460         *     not need to recalculate it.
 461         */
 462        uint8_t tso_mode;
 463        /*
 464         * The minimum length of the packet supported by hardware in the Tx
 465         * direction.
 466         */
 467        uint32_t min_tx_pkt_len;
 468
 469        uint8_t max_non_tso_bd_num; /* max BD number of one non-TSO packet */
 470        bool tx_deferred_start; /* don't start this queue in dev start */
 471        bool configured;        /* indicate if tx queue has been configured */
 472        /*
 473         * Indicate whether add the vlan_tci of the mbuf to the inner VLAN field
 474         * of Tx BD. Because the outer VLAN will always be the PVID when the
 475         * PVID is set and for some version of hardware network engine whose
 476         * vlan mode is HNS3_SW_SHIFT_AND_DISCARD_MODE, such as kunpeng 920, the
 477         * PVID will overwrite the outer VLAN field of Tx BD. For the hardware
 478         * network engine whose vlan mode is HNS3_HW_SHIFT_AND_DISCARD_MODE,
 479         * such as kunpeng 930, if the PVID is set, the hardware will shift the
 480         * VLAN field automatically. So, PMD driver does not need to do
 481         * PVID-related operations in Tx. And pvid_sw_shift_en will be false at
 482         * this point.
 483         */
 484        bool pvid_sw_shift_en;
 485        bool enabled;           /* indicate if Tx queue has been enabled */
 486
 487        struct hns3_tx_basic_stats basic_stats;
 488        struct hns3_tx_dfx_stats dfx_stats;
 489};
 490
 491#define HNS3_GET_TX_QUEUE_PEND_BD_NUM(txq) \
 492                ((txq)->nb_tx_desc - 1 - (txq)->tx_bd_ready)
 493
 494struct hns3_queue_info {
 495        const char *type;   /* point to queue memory name */
 496        const char *ring_name;  /* point to hardware ring name */
 497        uint16_t idx;
 498        uint16_t nb_desc;
 499        unsigned int socket_id;
 500};
 501
 502#define HNS3_TX_CKSUM_OFFLOAD_MASK ( \
 503        PKT_TX_OUTER_IP_CKSUM | \
 504        PKT_TX_IP_CKSUM | \
 505        PKT_TX_TCP_SEG | \
 506        PKT_TX_L4_MASK)
 507
 508enum hns3_cksum_status {
 509        HNS3_CKSUM_NONE = 0,
 510        HNS3_L3_CKSUM_ERR = 1,
 511        HNS3_L4_CKSUM_ERR = 2,
 512        HNS3_OUTER_L3_CKSUM_ERR = 4,
 513        HNS3_OUTER_L4_CKSUM_ERR = 8
 514};
 515
 516static inline int
 517hns3_handle_bdinfo(struct hns3_rx_queue *rxq, struct rte_mbuf *rxm,
 518                   uint32_t bd_base_info, uint32_t l234_info,
 519                   uint32_t *cksum_err)
 520{
 521#define L2E_TRUNC_ERR_FLAG      (BIT(HNS3_RXD_L2E_B) | \
 522                                 BIT(HNS3_RXD_TRUNCATE_B))
 523#define CHECKSUM_ERR_FLAG       (BIT(HNS3_RXD_L3E_B) | \
 524                                 BIT(HNS3_RXD_L4E_B) | \
 525                                 BIT(HNS3_RXD_OL3E_B) | \
 526                                 BIT(HNS3_RXD_OL4E_B))
 527
 528        uint32_t tmp = 0;
 529
 530        /*
 531         * If packet len bigger than mtu when recv with no-scattered algorithm,
 532         * the first n bd will without FE bit, we need process this sisution.
 533         * Note: we don't need add statistic counter because latest BD which
 534         *       with FE bit will mark HNS3_RXD_L2E_B bit.
 535         */
 536        if (unlikely((bd_base_info & BIT(HNS3_RXD_FE_B)) == 0))
 537                return -EINVAL;
 538
 539        if (unlikely((l234_info & L2E_TRUNC_ERR_FLAG) || rxm->pkt_len == 0)) {
 540                if (l234_info & BIT(HNS3_RXD_L2E_B))
 541                        rxq->err_stats.l2_errors++;
 542                else
 543                        rxq->err_stats.pkt_len_errors++;
 544                return -EINVAL;
 545        }
 546
 547        if (bd_base_info & BIT(HNS3_RXD_L3L4P_B)) {
 548                if (likely((l234_info & CHECKSUM_ERR_FLAG) == 0)) {
 549                        *cksum_err = 0;
 550                        return 0;
 551                }
 552
 553                if (unlikely(l234_info & BIT(HNS3_RXD_L3E_B))) {
 554                        rxm->ol_flags |= PKT_RX_IP_CKSUM_BAD;
 555                        rxq->dfx_stats.l3_csum_errors++;
 556                        tmp |= HNS3_L3_CKSUM_ERR;
 557                }
 558
 559                if (unlikely(l234_info & BIT(HNS3_RXD_L4E_B))) {
 560                        rxm->ol_flags |= PKT_RX_L4_CKSUM_BAD;
 561                        rxq->dfx_stats.l4_csum_errors++;
 562                        tmp |= HNS3_L4_CKSUM_ERR;
 563                }
 564
 565                if (unlikely(l234_info & BIT(HNS3_RXD_OL3E_B))) {
 566                        rxq->dfx_stats.ol3_csum_errors++;
 567                        tmp |= HNS3_OUTER_L3_CKSUM_ERR;
 568                }
 569
 570                if (unlikely(l234_info & BIT(HNS3_RXD_OL4E_B))) {
 571                        rxm->ol_flags |= PKT_RX_OUTER_L4_CKSUM_BAD;
 572                        rxq->dfx_stats.ol4_csum_errors++;
 573                        tmp |= HNS3_OUTER_L4_CKSUM_ERR;
 574                }
 575        }
 576        *cksum_err = tmp;
 577
 578        return 0;
 579}
 580
 581static inline void
 582hns3_rx_set_cksum_flag(struct rte_mbuf *rxm, const uint64_t packet_type,
 583                       const uint32_t cksum_err)
 584{
 585        if (unlikely((packet_type & RTE_PTYPE_TUNNEL_MASK))) {
 586                if (likely(packet_type & RTE_PTYPE_INNER_L3_MASK) &&
 587                    (cksum_err & HNS3_L3_CKSUM_ERR) == 0)
 588                        rxm->ol_flags |= PKT_RX_IP_CKSUM_GOOD;
 589                if (likely(packet_type & RTE_PTYPE_INNER_L4_MASK) &&
 590                    (cksum_err & HNS3_L4_CKSUM_ERR) == 0)
 591                        rxm->ol_flags |= PKT_RX_L4_CKSUM_GOOD;
 592                if (likely(packet_type & RTE_PTYPE_L4_MASK) &&
 593                    (cksum_err & HNS3_OUTER_L4_CKSUM_ERR) == 0)
 594                        rxm->ol_flags |= PKT_RX_OUTER_L4_CKSUM_GOOD;
 595        } else {
 596                if (likely(packet_type & RTE_PTYPE_L3_MASK) &&
 597                    (cksum_err & HNS3_L3_CKSUM_ERR) == 0)
 598                        rxm->ol_flags |= PKT_RX_IP_CKSUM_GOOD;
 599                if (likely(packet_type & RTE_PTYPE_L4_MASK) &&
 600                    (cksum_err & HNS3_L4_CKSUM_ERR) == 0)
 601                        rxm->ol_flags |= PKT_RX_L4_CKSUM_GOOD;
 602        }
 603}
 604
 605static inline uint32_t
 606hns3_rx_calc_ptype(struct hns3_rx_queue *rxq, const uint32_t l234_info,
 607                   const uint32_t ol_info)
 608{
 609        const struct hns3_ptype_table * const ptype_tbl = rxq->ptype_tbl;
 610        uint32_t l2id, l3id, l4id;
 611        uint32_t ol3id, ol4id, ol2id;
 612
 613        ol4id = hns3_get_field(ol_info, HNS3_RXD_OL4ID_M, HNS3_RXD_OL4ID_S);
 614        ol3id = hns3_get_field(ol_info, HNS3_RXD_OL3ID_M, HNS3_RXD_OL3ID_S);
 615        ol2id = hns3_get_field(ol_info, HNS3_RXD_OVLAN_M, HNS3_RXD_OVLAN_S);
 616        l2id = hns3_get_field(l234_info, HNS3_RXD_VLAN_M, HNS3_RXD_VLAN_S);
 617        l3id = hns3_get_field(l234_info, HNS3_RXD_L3ID_M, HNS3_RXD_L3ID_S);
 618        l4id = hns3_get_field(l234_info, HNS3_RXD_L4ID_M, HNS3_RXD_L4ID_S);
 619
 620        if (unlikely(ptype_tbl->ol4table[ol4id]))
 621                return ptype_tbl->inner_l2table[l2id] |
 622                        ptype_tbl->inner_l3table[l3id] |
 623                        ptype_tbl->inner_l4table[l4id] |
 624                        ptype_tbl->ol3table[ol3id] |
 625                        ptype_tbl->ol4table[ol4id] | ptype_tbl->ol2table[ol2id];
 626        else
 627                return ptype_tbl->l2l3table[l2id][l3id] |
 628                        ptype_tbl->l4table[l4id];
 629}
 630
 631void hns3_dev_rx_queue_release(void *queue);
 632void hns3_dev_tx_queue_release(void *queue);
 633void hns3_free_all_queues(struct rte_eth_dev *dev);
 634int hns3_reset_all_tqps(struct hns3_adapter *hns);
 635void hns3_dev_all_rx_queue_intr_enable(struct hns3_hw *hw, bool en);
 636int hns3_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id);
 637int hns3_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id);
 638void hns3_enable_all_queues(struct hns3_hw *hw, bool en);
 639int hns3_init_queues(struct hns3_adapter *hns, bool reset_queue);
 640void hns3_start_tqps(struct hns3_hw *hw);
 641void hns3_stop_tqps(struct hns3_hw *hw);
 642int hns3_rxq_iterate(struct rte_eth_dev *dev,
 643                 int (*callback)(struct hns3_rx_queue *, void *), void *arg);
 644void hns3_dev_release_mbufs(struct hns3_adapter *hns);
 645int hns3_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t nb_desc,
 646                        unsigned int socket, const struct rte_eth_rxconf *conf,
 647                        struct rte_mempool *mp);
 648int hns3_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t nb_desc,
 649                        unsigned int socket, const struct rte_eth_txconf *conf);
 650uint32_t hns3_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id);
 651int hns3_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id);
 652int hns3_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id);
 653int hns3_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id);
 654int hns3_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id);
 655uint16_t hns3_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
 656                        uint16_t nb_pkts);
 657uint16_t hns3_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
 658                                  uint16_t nb_pkts);
 659uint16_t hns3_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
 660                            uint16_t nb_pkts);
 661uint16_t hns3_recv_pkts_vec_sve(void *rx_queue, struct rte_mbuf **rx_pkts,
 662                                uint16_t nb_pkts);
 663int hns3_rx_burst_mode_get(struct rte_eth_dev *dev,
 664                           __rte_unused uint16_t queue_id,
 665                           struct rte_eth_burst_mode *mode);
 666int hns3_rx_check_vec_support(struct rte_eth_dev *dev);
 667uint16_t hns3_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,
 668                        uint16_t nb_pkts);
 669uint16_t hns3_xmit_pkts_simple(void *tx_queue, struct rte_mbuf **tx_pkts,
 670                               uint16_t nb_pkts);
 671uint16_t hns3_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
 672                        uint16_t nb_pkts);
 673uint16_t hns3_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
 674                                                        uint16_t nb_pkts);
 675uint16_t hns3_xmit_pkts_vec_sve(void *tx_queue, struct rte_mbuf **tx_pkts,
 676                                uint16_t nb_pkts);
 677int hns3_tx_burst_mode_get(struct rte_eth_dev *dev,
 678                           __rte_unused uint16_t queue_id,
 679                           struct rte_eth_burst_mode *mode);
 680const uint32_t *hns3_dev_supported_ptypes_get(struct rte_eth_dev *dev);
 681void hns3_init_rx_ptype_tble(struct rte_eth_dev *dev);
 682void hns3_set_rxtx_function(struct rte_eth_dev *eth_dev);
 683uint32_t hns3_get_tqp_intr_reg_offset(uint16_t tqp_intr_id);
 684void hns3_set_queue_intr_gl(struct hns3_hw *hw, uint16_t queue_id,
 685                            uint8_t gl_idx, uint16_t gl_value);
 686void hns3_set_queue_intr_rl(struct hns3_hw *hw, uint16_t queue_id,
 687                            uint16_t rl_value);
 688void hns3_set_queue_intr_ql(struct hns3_hw *hw, uint16_t queue_id,
 689                            uint16_t ql_value);
 690int hns3_set_fake_rx_or_tx_queues(struct rte_eth_dev *dev, uint16_t nb_rx_q,
 691                                  uint16_t nb_tx_q);
 692int hns3_config_gro(struct hns3_hw *hw, bool en);
 693int hns3_restore_gro_conf(struct hns3_hw *hw);
 694void hns3_update_all_queues_pvid_proc_en(struct hns3_hw *hw);
 695void hns3_rx_scattered_reset(struct rte_eth_dev *dev);
 696void hns3_rx_scattered_calc(struct rte_eth_dev *dev);
 697int hns3_rx_check_vec_support(struct rte_eth_dev *dev);
 698int hns3_tx_check_vec_support(struct rte_eth_dev *dev);
 699void hns3_rxq_vec_setup(struct hns3_rx_queue *rxq);
 700void hns3_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
 701                       struct rte_eth_rxq_info *qinfo);
 702void hns3_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
 703                       struct rte_eth_txq_info *qinfo);
 704uint32_t hns3_get_tqp_reg_offset(uint16_t idx);
 705int hns3_start_all_txqs(struct rte_eth_dev *dev);
 706int hns3_start_all_rxqs(struct rte_eth_dev *dev);
 707void hns3_stop_all_txqs(struct rte_eth_dev *dev);
 708void hns3_restore_tqp_enable_state(struct hns3_hw *hw);
 709
 710#endif /* _HNS3_RXTX_H_ */
 711