dpdk/drivers/net/hns3/hns3_rxtx_vec_sve.c
<<
>>
Prefs
   1/* SPDX-License-Identifier: BSD-3-Clause
   2 * Copyright(c) 2020-2021 HiSilicon Limited.
   3 */
   4
   5#include <arm_sve.h>
   6#include <rte_io.h>
   7#include <ethdev_driver.h>
   8
   9#include "hns3_ethdev.h"
  10#include "hns3_rxtx.h"
  11#include "hns3_rxtx_vec.h"
  12
  13#define PG16_128BIT             svwhilelt_b16(0, 8)
  14#define PG16_256BIT             svwhilelt_b16(0, 16)
  15#define PG32_256BIT             svwhilelt_b32(0, 8)
  16#define PG64_64BIT              svwhilelt_b64(0, 1)
  17#define PG64_128BIT             svwhilelt_b64(0, 2)
  18#define PG64_256BIT             svwhilelt_b64(0, 4)
  19#define PG64_ALLBIT             svptrue_b64()
  20
  21#define BD_SIZE                 32
  22#define BD_FIELD_ADDR_OFFSET    0
  23#define BD_FIELD_L234_OFFSET    8
  24#define BD_FIELD_XLEN_OFFSET    12
  25#define BD_FIELD_RSS_OFFSET     16
  26#define BD_FIELD_OL_OFFSET      24
  27#define BD_FIELD_VALID_OFFSET   28
  28
  29typedef struct {
  30        uint32_t l234_info[HNS3_SVE_DEFAULT_DESCS_PER_LOOP];
  31        uint32_t ol_info[HNS3_SVE_DEFAULT_DESCS_PER_LOOP];
  32        uint32_t bd_base_info[HNS3_SVE_DEFAULT_DESCS_PER_LOOP];
  33} HNS3_SVE_KEY_FIELD_S;
  34
  35static inline uint32_t
  36hns3_desc_parse_field_sve(struct hns3_rx_queue *rxq,
  37                          struct rte_mbuf **rx_pkts,
  38                          HNS3_SVE_KEY_FIELD_S *key,
  39                          uint32_t   bd_vld_num)
  40{
  41        uint32_t retcode = 0;
  42        int ret, i;
  43
  44        for (i = 0; i < (int)bd_vld_num; i++) {
  45                /* init rte_mbuf.rearm_data last 64-bit */
  46                rx_pkts[i]->ol_flags = PKT_RX_RSS_HASH;
  47
  48                ret = hns3_handle_bdinfo(rxq, rx_pkts[i], key->bd_base_info[i],
  49                                         key->l234_info[i]);
  50                if (unlikely(ret)) {
  51                        retcode |= 1u << i;
  52                        continue;
  53                }
  54
  55                rx_pkts[i]->packet_type = hns3_rx_calc_ptype(rxq,
  56                                        key->l234_info[i], key->ol_info[i]);
  57
  58                /* Increment bytes counter */
  59                rxq->basic_stats.bytes += rx_pkts[i]->pkt_len;
  60        }
  61
  62        return retcode;
  63}
  64
  65static inline void
  66hns3_rx_prefetch_mbuf_sve(struct hns3_entry *sw_ring)
  67{
  68        svuint64_t prf1st = svld1_u64(PG64_256BIT, (uint64_t *)&sw_ring[0]);
  69        svuint64_t prf2st = svld1_u64(PG64_256BIT, (uint64_t *)&sw_ring[4]);
  70        svprfd_gather_u64base(PG64_256BIT, prf1st, SV_PLDL1KEEP);
  71        svprfd_gather_u64base(PG64_256BIT, prf2st, SV_PLDL1KEEP);
  72}
  73
  74static inline uint16_t
  75hns3_recv_burst_vec_sve(struct hns3_rx_queue *__restrict rxq,
  76                        struct rte_mbuf **__restrict rx_pkts,
  77                        uint16_t nb_pkts,
  78                        uint64_t *bd_err_mask)
  79{
  80#define XLEN_ADJUST_LEN         32
  81#define RSS_ADJUST_LEN          16
  82#define GEN_VLD_U8_ZIP_INDEX    svindex_s8(28, -4)
  83        uint16_t rx_id = rxq->next_to_use;
  84        struct hns3_entry *sw_ring = &rxq->sw_ring[rx_id];
  85        struct hns3_desc *rxdp = &rxq->rx_ring[rx_id];
  86        struct hns3_desc *rxdp2;
  87        HNS3_SVE_KEY_FIELD_S key_field;
  88        uint64_t bd_valid_num;
  89        uint32_t parse_retcode;
  90        uint16_t nb_rx = 0;
  91        int pos, offset;
  92
  93        uint16_t xlen_adjust[XLEN_ADJUST_LEN] = {
  94                0,  0xffff, 1,  0xffff,    /* 1st mbuf: pkt_len and dat_len */
  95                2,  0xffff, 3,  0xffff,    /* 2st mbuf: pkt_len and dat_len */
  96                4,  0xffff, 5,  0xffff,    /* 3st mbuf: pkt_len and dat_len */
  97                6,  0xffff, 7,  0xffff,    /* 4st mbuf: pkt_len and dat_len */
  98                8,  0xffff, 9,  0xffff,    /* 5st mbuf: pkt_len and dat_len */
  99                10, 0xffff, 11, 0xffff,    /* 6st mbuf: pkt_len and dat_len */
 100                12, 0xffff, 13, 0xffff,    /* 7st mbuf: pkt_len and dat_len */
 101                14, 0xffff, 15, 0xffff,    /* 8st mbuf: pkt_len and dat_len */
 102        };
 103
 104        uint32_t rss_adjust[RSS_ADJUST_LEN] = {
 105                0, 0xffff,        /* 1st mbuf: rss */
 106                1, 0xffff,        /* 2st mbuf: rss */
 107                2, 0xffff,        /* 3st mbuf: rss */
 108                3, 0xffff,        /* 4st mbuf: rss */
 109                4, 0xffff,        /* 5st mbuf: rss */
 110                5, 0xffff,        /* 6st mbuf: rss */
 111                6, 0xffff,        /* 7st mbuf: rss */
 112                7, 0xffff,        /* 8st mbuf: rss */
 113        };
 114
 115        svbool_t pg32 = svwhilelt_b32(0, HNS3_SVE_DEFAULT_DESCS_PER_LOOP);
 116        svuint16_t xlen_tbl1 = svld1_u16(PG16_256BIT, xlen_adjust);
 117        svuint16_t xlen_tbl2 = svld1_u16(PG16_256BIT, &xlen_adjust[16]);
 118        svuint32_t rss_tbl1 = svld1_u32(PG32_256BIT, rss_adjust);
 119        svuint32_t rss_tbl2 = svld1_u32(PG32_256BIT, &rss_adjust[8]);
 120
 121        /* compile-time verifies the xlen_adjust mask */
 122        RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, data_len) !=
 123                         offsetof(struct rte_mbuf, pkt_len) + 4);
 124        RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, vlan_tci) !=
 125                         offsetof(struct rte_mbuf, data_len) + 2);
 126
 127        for (pos = 0; pos < nb_pkts; pos += HNS3_SVE_DEFAULT_DESCS_PER_LOOP,
 128                                     rxdp += HNS3_SVE_DEFAULT_DESCS_PER_LOOP) {
 129                svuint64_t vld_clz, mbp1st, mbp2st, mbuf_init;
 130                svuint64_t xlen1st, xlen2st, rss1st, rss2st;
 131                svuint32_t l234, ol, vld, vld2, xlen, rss;
 132                svuint8_t  vld_u8;
 133
 134                /* calc how many bd valid: part 1 */
 135                vld = svld1_gather_u32offset_u32(pg32, (uint32_t *)rxdp,
 136                        svindex_u32(BD_FIELD_VALID_OFFSET, BD_SIZE));
 137                vld2 = svlsl_n_u32_z(pg32, vld,
 138                                    HNS3_UINT32_BIT - 1 - HNS3_RXD_VLD_B);
 139                vld2 = svreinterpret_u32_s32(svasr_n_s32_z(pg32,
 140                        svreinterpret_s32_u32(vld2), HNS3_UINT32_BIT - 1));
 141
 142                /* load 4 mbuf pointer */
 143                mbp1st = svld1_u64(PG64_256BIT, (uint64_t *)&sw_ring[pos]);
 144
 145                /* calc how many bd valid: part 2 */
 146                vld_u8 = svtbl_u8(svreinterpret_u8_u32(vld2),
 147                                  svreinterpret_u8_s8(GEN_VLD_U8_ZIP_INDEX));
 148                vld_clz = svnot_u64_z(PG64_64BIT, svreinterpret_u64_u8(vld_u8));
 149                vld_clz = svclz_u64_z(PG64_64BIT, vld_clz);
 150                svst1_u64(PG64_64BIT, &bd_valid_num, vld_clz);
 151                bd_valid_num /= HNS3_UINT8_BIT;
 152
 153                /* load 4 more mbuf pointer */
 154                mbp2st = svld1_u64(PG64_256BIT, (uint64_t *)&sw_ring[pos + 4]);
 155
 156                /* use offset to control below data load oper ordering */
 157                offset = rxq->offset_table[bd_valid_num];
 158                rxdp2 = rxdp + offset;
 159
 160                /* store 4 mbuf pointer into rx_pkts */
 161                svst1_u64(PG64_256BIT, (uint64_t *)&rx_pkts[pos], mbp1st);
 162
 163                /* load key field to vector reg */
 164                l234 = svld1_gather_u32offset_u32(pg32, (uint32_t *)rxdp2,
 165                                svindex_u32(BD_FIELD_L234_OFFSET, BD_SIZE));
 166                ol = svld1_gather_u32offset_u32(pg32, (uint32_t *)rxdp2,
 167                                svindex_u32(BD_FIELD_OL_OFFSET, BD_SIZE));
 168
 169                /* store 4 mbuf pointer into rx_pkts again */
 170                svst1_u64(PG64_256BIT, (uint64_t *)&rx_pkts[pos + 4], mbp2st);
 171
 172                /* load datalen, pktlen and rss_hash */
 173                xlen = svld1_gather_u32offset_u32(pg32, (uint32_t *)rxdp2,
 174                                svindex_u32(BD_FIELD_XLEN_OFFSET, BD_SIZE));
 175                rss = svld1_gather_u32offset_u32(pg32, (uint32_t *)rxdp2,
 176                                svindex_u32(BD_FIELD_RSS_OFFSET, BD_SIZE));
 177
 178                /* store key field to stash buffer */
 179                svst1_u32(pg32, (uint32_t *)key_field.l234_info, l234);
 180                svst1_u32(pg32, (uint32_t *)key_field.bd_base_info, vld);
 181                svst1_u32(pg32, (uint32_t *)key_field.ol_info, ol);
 182
 183                /* sub crc_len for pkt_len and data_len */
 184                xlen = svreinterpret_u32_u16(svsub_n_u16_z(PG16_256BIT,
 185                        svreinterpret_u16_u32(xlen), rxq->crc_len));
 186
 187                /* init mbuf_initializer */
 188                mbuf_init = svdup_n_u64(rxq->mbuf_initializer);
 189
 190                /* extract datalen, pktlen and rss from xlen and rss */
 191                xlen1st = svreinterpret_u64_u16(
 192                        svtbl_u16(svreinterpret_u16_u32(xlen), xlen_tbl1));
 193                xlen2st = svreinterpret_u64_u16(
 194                        svtbl_u16(svreinterpret_u16_u32(xlen), xlen_tbl2));
 195                rss1st = svreinterpret_u64_u32(
 196                        svtbl_u32(svreinterpret_u32_u32(rss), rss_tbl1));
 197                rss2st = svreinterpret_u64_u32(
 198                        svtbl_u32(svreinterpret_u32_u32(rss), rss_tbl2));
 199
 200                /* save mbuf_initializer */
 201                svst1_scatter_u64base_offset_u64(PG64_256BIT, mbp1st,
 202                        offsetof(struct rte_mbuf, rearm_data), mbuf_init);
 203                svst1_scatter_u64base_offset_u64(PG64_256BIT, mbp2st,
 204                        offsetof(struct rte_mbuf, rearm_data), mbuf_init);
 205
 206                /* save datalen and pktlen and rss */
 207                svst1_scatter_u64base_offset_u64(PG64_256BIT, mbp1st,
 208                        offsetof(struct rte_mbuf, pkt_len), xlen1st);
 209                svst1_scatter_u64base_offset_u64(PG64_256BIT, mbp1st,
 210                        offsetof(struct rte_mbuf, hash.rss), rss1st);
 211                svst1_scatter_u64base_offset_u64(PG64_256BIT, mbp2st,
 212                        offsetof(struct rte_mbuf, pkt_len), xlen2st);
 213                svst1_scatter_u64base_offset_u64(PG64_256BIT, mbp2st,
 214                        offsetof(struct rte_mbuf, hash.rss), rss2st);
 215
 216                rte_prefetch_non_temporal(rxdp +
 217                                          HNS3_SVE_DEFAULT_DESCS_PER_LOOP);
 218
 219                parse_retcode = hns3_desc_parse_field_sve(rxq, &rx_pkts[pos],
 220                                        &key_field, bd_valid_num);
 221                if (unlikely(parse_retcode))
 222                        (*bd_err_mask) |= ((uint64_t)parse_retcode) << pos;
 223
 224                hns3_rx_prefetch_mbuf_sve(&sw_ring[pos +
 225                                        HNS3_SVE_DEFAULT_DESCS_PER_LOOP]);
 226
 227                nb_rx += bd_valid_num;
 228                if (unlikely(bd_valid_num < HNS3_SVE_DEFAULT_DESCS_PER_LOOP))
 229                        break;
 230        }
 231
 232        rxq->rx_rearm_nb += nb_rx;
 233        rxq->next_to_use += nb_rx;
 234        if (rxq->next_to_use >= rxq->nb_rx_desc)
 235                rxq->next_to_use = 0;
 236
 237        return nb_rx;
 238}
 239
 240static inline void
 241hns3_rxq_rearm_mbuf_sve(struct hns3_rx_queue *rxq)
 242{
 243#define REARM_LOOP_STEP_NUM     4
 244        struct hns3_entry *rxep = &rxq->sw_ring[rxq->rx_rearm_start];
 245        struct hns3_desc *rxdp = rxq->rx_ring + rxq->rx_rearm_start;
 246        struct hns3_entry *rxep_tmp = rxep;
 247        int i;
 248
 249        if (unlikely(rte_mempool_get_bulk(rxq->mb_pool, (void *)rxep,
 250                                          HNS3_DEFAULT_RXQ_REARM_THRESH) < 0)) {
 251                rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++;
 252                return;
 253        }
 254
 255        for (i = 0; i < HNS3_DEFAULT_RXQ_REARM_THRESH; i += REARM_LOOP_STEP_NUM,
 256                rxep_tmp += REARM_LOOP_STEP_NUM) {
 257                svuint64_t prf = svld1_u64(PG64_256BIT, (uint64_t *)rxep_tmp);
 258                svprfd_gather_u64base(PG64_256BIT, prf, SV_PLDL1STRM);
 259        }
 260
 261        for (i = 0; i < HNS3_DEFAULT_RXQ_REARM_THRESH; i += REARM_LOOP_STEP_NUM,
 262                rxep += REARM_LOOP_STEP_NUM, rxdp += REARM_LOOP_STEP_NUM) {
 263                uint64_t iova[REARM_LOOP_STEP_NUM];
 264                iova[0] = rxep[0].mbuf->buf_iova;
 265                iova[1] = rxep[1].mbuf->buf_iova;
 266                iova[2] = rxep[2].mbuf->buf_iova;
 267                iova[3] = rxep[3].mbuf->buf_iova;
 268                svuint64_t siova = svld1_u64(PG64_256BIT, iova);
 269                siova = svadd_n_u64_z(PG64_256BIT, siova, RTE_PKTMBUF_HEADROOM);
 270                svuint64_t ol_base = svdup_n_u64(0);
 271                svst1_scatter_u64offset_u64(PG64_256BIT,
 272                        (uint64_t *)&rxdp[0].addr,
 273                        svindex_u64(BD_FIELD_ADDR_OFFSET, BD_SIZE), siova);
 274                svst1_scatter_u64offset_u64(PG64_256BIT,
 275                        (uint64_t *)&rxdp[0].addr,
 276                        svindex_u64(BD_FIELD_OL_OFFSET, BD_SIZE), ol_base);
 277        }
 278
 279        rxq->rx_rearm_start += HNS3_DEFAULT_RXQ_REARM_THRESH;
 280        if (rxq->rx_rearm_start >= rxq->nb_rx_desc)
 281                rxq->rx_rearm_start = 0;
 282
 283        rxq->rx_rearm_nb -= HNS3_DEFAULT_RXQ_REARM_THRESH;
 284
 285        hns3_write_reg_opt(rxq->io_head_reg, HNS3_DEFAULT_RXQ_REARM_THRESH);
 286}
 287
 288uint16_t
 289hns3_recv_pkts_vec_sve(void *__restrict rx_queue,
 290                       struct rte_mbuf **__restrict rx_pkts,
 291                       uint16_t nb_pkts)
 292{
 293        struct hns3_rx_queue *rxq = rx_queue;
 294        struct hns3_desc *rxdp = &rxq->rx_ring[rxq->next_to_use];
 295        uint64_t pkt_err_mask;  /* bit mask indicate whick pkts is error */
 296        uint16_t nb_rx;
 297
 298        rte_prefetch_non_temporal(rxdp);
 299
 300        nb_pkts = RTE_ALIGN_FLOOR(nb_pkts, HNS3_SVE_DEFAULT_DESCS_PER_LOOP);
 301
 302        if (rxq->rx_rearm_nb > HNS3_DEFAULT_RXQ_REARM_THRESH)
 303                hns3_rxq_rearm_mbuf_sve(rxq);
 304
 305        if (unlikely(!(rxdp->rx.bd_base_info &
 306                        rte_cpu_to_le_32(1u << HNS3_RXD_VLD_B))))
 307                return 0;
 308
 309        hns3_rx_prefetch_mbuf_sve(&rxq->sw_ring[rxq->next_to_use]);
 310
 311        if (likely(nb_pkts <= HNS3_DEFAULT_RX_BURST)) {
 312                pkt_err_mask = 0;
 313                nb_rx = hns3_recv_burst_vec_sve(rxq, rx_pkts, nb_pkts,
 314                                                &pkt_err_mask);
 315                nb_rx = hns3_rx_reassemble_pkts(rx_pkts, nb_rx, pkt_err_mask);
 316                return nb_rx;
 317        }
 318
 319        nb_rx = 0;
 320        while (nb_pkts > 0) {
 321                uint16_t ret, n;
 322
 323                n = RTE_MIN(nb_pkts, HNS3_DEFAULT_RX_BURST);
 324                pkt_err_mask = 0;
 325                ret = hns3_recv_burst_vec_sve(rxq, &rx_pkts[nb_rx], n,
 326                                              &pkt_err_mask);
 327                nb_pkts -= ret;
 328                nb_rx += hns3_rx_reassemble_pkts(&rx_pkts[nb_rx], ret,
 329                                                 pkt_err_mask);
 330                if (ret < n)
 331                        break;
 332
 333                if (rxq->rx_rearm_nb > HNS3_DEFAULT_RXQ_REARM_THRESH)
 334                        hns3_rxq_rearm_mbuf_sve(rxq);
 335        }
 336
 337        return nb_rx;
 338}
 339
 340static inline void
 341hns3_tx_free_buffers_sve(struct hns3_tx_queue *txq)
 342{
 343#define HNS3_SVE_CHECK_DESCS_PER_LOOP   8
 344#define TX_VLD_U8_ZIP_INDEX             svindex_u8(0, 4)
 345        svbool_t pg32 = svwhilelt_b32(0, HNS3_SVE_CHECK_DESCS_PER_LOOP);
 346        svuint32_t vld, vld2;
 347        svuint8_t vld_u8;
 348        uint64_t vld_all;
 349        struct hns3_desc *tx_desc;
 350        int i;
 351
 352        /*
 353         * All mbufs can be released only when the VLD bits of all
 354         * descriptors in a batch are cleared.
 355         */
 356        /* do logical OR operation for all desc's valid field */
 357        vld = svdup_n_u32(0);
 358        tx_desc = &txq->tx_ring[txq->next_to_clean];
 359        for (i = 0; i < txq->tx_rs_thresh; i += HNS3_SVE_CHECK_DESCS_PER_LOOP,
 360                                tx_desc += HNS3_SVE_CHECK_DESCS_PER_LOOP) {
 361                vld2 = svld1_gather_u32offset_u32(pg32, (uint32_t *)tx_desc,
 362                                svindex_u32(BD_FIELD_VALID_OFFSET, BD_SIZE));
 363                vld = svorr_u32_z(pg32, vld, vld2);
 364        }
 365        /* shift left and then right to get all valid bit */
 366        vld = svlsl_n_u32_z(pg32, vld,
 367                            HNS3_UINT32_BIT - 1 - HNS3_TXD_VLD_B);
 368        vld = svreinterpret_u32_s32(svasr_n_s32_z(pg32,
 369                svreinterpret_s32_u32(vld), HNS3_UINT32_BIT - 1));
 370        /* use tbl to compress 32bit-lane to 8bit-lane */
 371        vld_u8 = svtbl_u8(svreinterpret_u8_u32(vld), TX_VLD_U8_ZIP_INDEX);
 372        /* dump compressed 64bit to variable */
 373        svst1_u64(PG64_64BIT, &vld_all, svreinterpret_u64_u8(vld_u8));
 374        if (vld_all > 0)
 375                return;
 376
 377        hns3_tx_bulk_free_buffers(txq);
 378}
 379
 380static inline void
 381hns3_tx_fill_hw_ring_sve(struct hns3_tx_queue *txq,
 382                         struct rte_mbuf **pkts,
 383                         uint16_t nb_pkts)
 384{
 385#define DATA_OFF_LEN_VAL_MASK   0xFFFF
 386        struct hns3_desc *txdp = &txq->tx_ring[txq->next_to_use];
 387        struct hns3_entry *tx_entry = &txq->sw_ring[txq->next_to_use];
 388        const uint64_t valid_bit = (BIT(HNS3_TXD_VLD_B) | BIT(HNS3_TXD_FE_B)) <<
 389                                   HNS3_UINT32_BIT;
 390        svuint64_t base_addr, buf_iova, data_off, data_len, addr;
 391        svuint64_t offsets = svindex_u64(0, BD_SIZE);
 392        uint32_t i = 0;
 393        svbool_t pg = svwhilelt_b64_u32(i, nb_pkts);
 394
 395        do {
 396                base_addr = svld1_u64(pg, (uint64_t *)pkts);
 397                /* calc mbuf's field buf_iova address */
 398                buf_iova = svadd_n_u64_z(pg, base_addr,
 399                                         offsetof(struct rte_mbuf, buf_iova));
 400                /* calc mbuf's field data_off address */
 401                data_off = svadd_n_u64_z(pg, base_addr,
 402                                         offsetof(struct rte_mbuf, data_off));
 403                /* calc mbuf's field data_len address */
 404                data_len = svadd_n_u64_z(pg, base_addr,
 405                                         offsetof(struct rte_mbuf, data_len));
 406                /* store mbuf to tx_entry */
 407                svst1_u64(pg, (uint64_t *)tx_entry, base_addr);
 408                /* read pkts->buf_iova */
 409                buf_iova = svld1_gather_u64base_u64(pg, buf_iova);
 410                /* read pkts->data_off's 64bit val  */
 411                data_off = svld1_gather_u64base_u64(pg, data_off);
 412                /* read pkts->data_len's 64bit val */
 413                data_len = svld1_gather_u64base_u64(pg, data_len);
 414                /* zero data_off high 48bit by svand ops */
 415                data_off = svand_n_u64_z(pg, data_off, DATA_OFF_LEN_VAL_MASK);
 416                /* zero data_len high 48bit by svand ops */
 417                data_len = svand_n_u64_z(pg, data_len, DATA_OFF_LEN_VAL_MASK);
 418                /* calc mbuf data region iova addr */
 419                addr = svadd_u64_z(pg, buf_iova, data_off);
 420                /* shift due data_len's offset is 2byte of BD's second 8byte */
 421                data_len = svlsl_n_u64_z(pg, data_len, HNS3_UINT16_BIT);
 422                /* save offset 0~7byte of every BD */
 423                svst1_scatter_u64offset_u64(pg, (uint64_t *)&txdp->addr,
 424                                            offsets, addr);
 425                /* save offset 8~15byte of every BD */
 426                svst1_scatter_u64offset_u64(pg, (uint64_t *)&txdp->tx.vlan_tag,
 427                                            offsets, data_len);
 428                /* save offset 16~23byte of every BD */
 429                svst1_scatter_u64offset_u64(pg,
 430                                (uint64_t *)&txdp->tx.outer_vlan_tag,
 431                                offsets, svdup_n_u64(0));
 432                /* save offset 24~31byte of every BD */
 433                svst1_scatter_u64offset_u64(pg,
 434                                (uint64_t *)&txdp->tx.paylen_fd_dop_ol4cs,
 435                                offsets, svdup_n_u64(valid_bit));
 436
 437                /* Increment bytes counter */
 438                uint32_t idx;
 439                for (idx = 0; idx < svcntd(); idx++)
 440                        txq->basic_stats.bytes += pkts[idx]->pkt_len;
 441
 442                /* update index for next loop */
 443                i += svcntd();
 444                pkts += svcntd();
 445                txdp += svcntd();
 446                tx_entry += svcntd();
 447                pg = svwhilelt_b64_u32(i, nb_pkts);
 448        } while (svptest_any(svptrue_b64(), pg));
 449}
 450
 451static uint16_t
 452hns3_xmit_fixed_burst_vec_sve(void *__restrict tx_queue,
 453                              struct rte_mbuf **__restrict tx_pkts,
 454                              uint16_t nb_pkts)
 455{
 456        struct hns3_tx_queue *txq = (struct hns3_tx_queue *)tx_queue;
 457        uint16_t nb_tx = 0;
 458
 459        if (txq->tx_bd_ready < txq->tx_free_thresh)
 460                hns3_tx_free_buffers_sve(txq);
 461
 462        nb_pkts = RTE_MIN(txq->tx_bd_ready, nb_pkts);
 463        if (unlikely(nb_pkts == 0)) {
 464                txq->dfx_stats.queue_full_cnt++;
 465                return 0;
 466        }
 467
 468        if (txq->next_to_use + nb_pkts > txq->nb_tx_desc) {
 469                nb_tx = txq->nb_tx_desc - txq->next_to_use;
 470                hns3_tx_fill_hw_ring_sve(txq, tx_pkts, nb_tx);
 471                txq->next_to_use = 0;
 472        }
 473
 474        hns3_tx_fill_hw_ring_sve(txq, tx_pkts + nb_tx, nb_pkts - nb_tx);
 475        txq->next_to_use += nb_pkts - nb_tx;
 476
 477        txq->tx_bd_ready -= nb_pkts;
 478        hns3_write_txq_tail_reg(txq, nb_pkts);
 479
 480        return nb_pkts;
 481}
 482
 483uint16_t
 484hns3_xmit_pkts_vec_sve(void *tx_queue,
 485                       struct rte_mbuf **tx_pkts,
 486                       uint16_t nb_pkts)
 487{
 488        struct hns3_tx_queue *txq = (struct hns3_tx_queue *)tx_queue;
 489        uint16_t ret, new_burst;
 490        uint16_t nb_tx = 0;
 491
 492        while (nb_pkts) {
 493                new_burst = RTE_MIN(nb_pkts, txq->tx_rs_thresh);
 494                ret = hns3_xmit_fixed_burst_vec_sve(tx_queue, &tx_pkts[nb_tx],
 495                                                    new_burst);
 496                nb_tx += ret;
 497                nb_pkts -= ret;
 498                if (ret < new_burst)
 499                        break;
 500        }
 501
 502        return nb_tx;
 503}
 504