dpdk/drivers/net/axgbe/axgbe_rxtx.h
<<
>>
Prefs
   1/*   SPDX-License-Identifier: BSD-3-Clause
   2 *   Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved.
   3 *   Copyright(c) 2018 Synopsys, Inc. All rights reserved.
   4 */
   5
   6#ifndef _AXGBE_RXTX_H_
   7#define _AXGBE_RXTX_H_
   8
   9/* to suppress gcc warnings related to descriptor casting*/
  10#ifdef RTE_TOOLCHAIN_GCC
  11#pragma GCC diagnostic ignored "-Wcast-qual"
  12#endif
  13
  14#ifdef RTE_TOOLCHAIN_CLANG
  15#pragma GCC diagnostic ignored "-Wcast-qual"
  16#endif
  17
  18/* Descriptor related defines */
  19#define AXGBE_MAX_RING_DESC             4096 /*should be power of 2*/
  20#define AXGBE_TX_DESC_MIN_FREE          (AXGBE_MAX_RING_DESC >> 3)
  21#define AXGBE_TX_DESC_MAX_PROC          (AXGBE_MAX_RING_DESC >> 1)
  22#define AXGBE_MIN_RING_DESC             32
  23#define RTE_AXGBE_DESCS_PER_LOOP        4
  24#define RTE_AXGBE_MAX_RX_BURST          32
  25
  26#define AXGBE_RX_FREE_THRESH            32
  27#define AXGBE_TX_FREE_THRESH            32
  28
  29#define AXGBE_DESC_ALIGN                128
  30#define AXGBE_DESC_OWN                  0x80000000
  31#define AXGBE_ERR_STATUS                0x000f0000
  32#define AXGBE_L3_CSUM_ERR               0x00050000
  33#define AXGBE_L4_CSUM_ERR               0x00060000
  34
  35#include "axgbe_common.h"
  36
  37#define AXGBE_GET_DESC_PT(_queue, _idx)                 \
  38        (((_queue)->desc) +                             \
  39        ((_idx) & ((_queue)->nb_desc - 1)))
  40
  41#define AXGBE_GET_DESC_IDX(_queue, _idx)                        \
  42        ((_idx) & ((_queue)->nb_desc - 1))                      \
  43
  44/* Rx desc format */
  45union axgbe_rx_desc {
  46        struct {
  47                uint64_t baddr;
  48                uint32_t desc2;
  49                uint32_t desc3;
  50        } read;
  51        struct {
  52                uint32_t desc0;
  53                uint32_t desc1;
  54                uint32_t desc2;
  55                uint32_t desc3;
  56        } write;
  57};
  58
  59struct axgbe_rx_queue {
  60        /* membuf pool for rx buffers */
  61        struct rte_mempool *mb_pool;
  62        /* H/w Rx buffer size configured in DMA */
  63        unsigned int buf_size;
  64        /* CRC h/w offload */
  65        uint16_t crc_len;
  66        /* address of  s/w rx buffers */
  67        struct rte_mbuf **sw_ring;
  68        /* Port private data */
  69        struct axgbe_port *pdata;
  70        /* Number of Rx descriptors in queue */
  71        uint16_t nb_desc;
  72        /* max free RX desc to hold */
  73        uint16_t free_thresh;
  74        /* Index of descriptor to check for packet availability */
  75        uint64_t cur;
  76        /* Index of descriptor to check for buffer reallocation */
  77        uint64_t dirty;
  78        /* Software Rx descriptor ring*/
  79        volatile union axgbe_rx_desc *desc;
  80        /* Ring physical address */
  81        uint64_t ring_phys_addr;
  82        /* Dma Channel register address */
  83        void *dma_regs;
  84        /* Dma channel tail register address*/
  85        volatile uint32_t *dma_tail_reg;
  86        /* DPDK queue index */
  87        uint16_t queue_id;
  88        /* dpdk port id*/
  89        uint16_t port_id;
  90        /* queue stats */
  91        uint64_t pkts;
  92        uint64_t bytes;
  93        uint64_t errors;
  94        uint64_t rx_mbuf_alloc_failed;
  95        /* Number of mbufs allocated from pool*/
  96        uint64_t mbuf_alloc;
  97
  98} __rte_cache_aligned;
  99
 100/*Tx descriptor format */
 101struct axgbe_tx_desc {
 102        phys_addr_t baddr;
 103        uint32_t desc2;
 104        uint32_t desc3;
 105};
 106
 107struct axgbe_tx_queue {
 108        /* Port private data reference */
 109        struct axgbe_port *pdata;
 110        /* Number of Tx descriptors in queue*/
 111        uint16_t nb_desc;
 112        /* Start freeing TX buffers if there are less free descriptors than
 113         * this value
 114         */
 115        uint16_t free_thresh;
 116        /* Available descriptors for Tx processing*/
 117        uint16_t nb_desc_free;
 118        /* Batch of mbufs/descs to release */
 119        uint16_t free_batch_cnt;
 120        /* Flag for vector support */
 121        uint16_t vector_disable;
 122        /* Index of descriptor to be used for current transfer */
 123        uint64_t cur;
 124        /* Index of descriptor to check for transfer complete */
 125        uint64_t dirty;
 126        /* Virtual address of ring */
 127        volatile struct axgbe_tx_desc *desc;
 128        /* Physical address of ring */
 129        uint64_t ring_phys_addr;
 130        /* Dma channel register space */
 131        void  *dma_regs;
 132        /* Dma tail register address of ring*/
 133        volatile uint32_t *dma_tail_reg;
 134        /* Tx queue index/id*/
 135        uint16_t queue_id;
 136        /* Reference to hold Tx mbufs mapped to Tx descriptors freed
 137         * after transmission confirmation
 138         */
 139        struct rte_mbuf **sw_ring;
 140        /* dpdk port id*/
 141        uint16_t port_id;
 142        /* queue stats */
 143        uint64_t pkts;
 144        uint64_t bytes;
 145        uint64_t errors;
 146
 147} __rte_cache_aligned;
 148
 149/*Queue related APIs */
 150
 151/*
 152 * RX/TX function prototypes
 153 */
 154
 155
 156void axgbe_dev_tx_queue_release(void *txq);
 157int  axgbe_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
 158                              uint16_t nb_tx_desc, unsigned int socket_id,
 159                              const struct rte_eth_txconf *tx_conf);
 160void axgbe_dev_enable_tx(struct rte_eth_dev *dev);
 161void axgbe_dev_disable_tx(struct rte_eth_dev *dev);
 162int axgbe_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id);
 163int axgbe_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id);
 164
 165int axgbe_dev_fw_version_get(struct rte_eth_dev *eth_dev,
 166                        char *fw_version, size_t fw_size);
 167
 168uint16_t axgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
 169                         uint16_t nb_pkts);
 170uint16_t axgbe_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
 171                         uint16_t nb_pkts);
 172
 173
 174void axgbe_dev_rx_queue_release(void *rxq);
 175int  axgbe_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
 176                              uint16_t nb_rx_desc, unsigned int socket_id,
 177                              const struct rte_eth_rxconf *rx_conf,
 178                              struct rte_mempool *mb_pool);
 179void axgbe_dev_enable_rx(struct rte_eth_dev *dev);
 180void axgbe_dev_disable_rx(struct rte_eth_dev *dev);
 181int axgbe_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id);
 182int axgbe_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id);
 183uint16_t axgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
 184                         uint16_t nb_pkts);
 185uint16_t eth_axgbe_recv_scattered_pkts(void *rx_queue,
 186                struct rte_mbuf **rx_pkts, uint16_t nb_pkts);
 187uint16_t axgbe_recv_pkts_threshold_refresh(void *rx_queue,
 188                                           struct rte_mbuf **rx_pkts,
 189                                           uint16_t nb_pkts);
 190void axgbe_dev_clear_queues(struct rte_eth_dev *dev);
 191int axgbe_dev_rx_descriptor_status(void *rx_queue, uint16_t offset);
 192int axgbe_dev_tx_descriptor_status(void *tx_queue, uint16_t offset);
 193
 194#endif /* _AXGBE_RXTX_H_ */
 195