dpdk/drivers/net/hns3/hns3_rxtx_vec.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: BSD-3-Clause
   2 * Copyright(c) 2020-2021 HiSilicon Limited.
   3 */
   4
   5#ifndef _HNS3_RXTX_VEC_H_
   6#define _HNS3_RXTX_VEC_H_
   7
   8#include "hns3_rxtx.h"
   9#include "hns3_ethdev.h"
  10
  11static inline void
  12hns3_tx_bulk_free_buffers(struct hns3_tx_queue *txq)
  13{
  14        struct rte_mbuf **free = txq->free;
  15        struct hns3_entry *tx_entry;
  16        struct rte_mbuf *m;
  17        int nb_free = 0;
  18        int i;
  19
  20        tx_entry = &txq->sw_ring[txq->next_to_clean];
  21        for (i = 0; i < txq->tx_rs_thresh; i++, tx_entry++) {
  22                m = rte_pktmbuf_prefree_seg(tx_entry->mbuf);
  23                tx_entry->mbuf = NULL;
  24
  25                if (m == NULL)
  26                        continue;
  27
  28                if (nb_free && m->pool != free[0]->pool) {
  29                        rte_mempool_put_bulk(free[0]->pool, (void **)free,
  30                                             nb_free);
  31                        nb_free = 0;
  32                }
  33                free[nb_free++] = m;
  34        }
  35
  36        if (nb_free)
  37                rte_mempool_put_bulk(free[0]->pool, (void **)free, nb_free);
  38
  39        /* Update numbers of available descriptor due to buffer freed */
  40        txq->tx_bd_ready += txq->tx_rs_thresh;
  41        txq->next_to_clean += txq->tx_rs_thresh;
  42        if (txq->next_to_clean >= txq->nb_tx_desc)
  43                txq->next_to_clean = 0;
  44}
  45
  46static inline void
  47hns3_tx_free_buffers(struct hns3_tx_queue *txq)
  48{
  49        struct hns3_desc *tx_desc;
  50        int i;
  51
  52        /*
  53         * All mbufs can be released only when the VLD bits of all
  54         * descriptors in a batch are cleared.
  55         */
  56        tx_desc = &txq->tx_ring[txq->next_to_clean];
  57        for (i = 0; i < txq->tx_rs_thresh; i++, tx_desc++) {
  58                if (tx_desc->tx.tp_fe_sc_vld_ra_ri &
  59                                rte_le_to_cpu_16(BIT(HNS3_TXD_VLD_B)))
  60                        return;
  61        }
  62
  63        hns3_tx_bulk_free_buffers(txq);
  64}
  65
  66static inline uint16_t
  67hns3_rx_reassemble_pkts(struct rte_mbuf **rx_pkts,
  68                        uint16_t nb_pkts,
  69                        uint64_t pkt_err_mask)
  70{
  71        uint16_t count, i;
  72        uint64_t mask;
  73
  74        if (likely(pkt_err_mask == 0))
  75                return nb_pkts;
  76
  77        count = 0;
  78        for (i = 0; i < nb_pkts; i++) {
  79                mask = ((uint64_t)1u) << i;
  80                if (pkt_err_mask & mask)
  81                        rte_pktmbuf_free_seg(rx_pkts[i]);
  82                else
  83                        rx_pkts[count++] = rx_pkts[i];
  84        }
  85
  86        return count;
  87}
  88#endif /* _HNS3_RXTX_VEC_H_ */
  89