dpdk/drivers/net/ena/ena_ethdev.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: BSD-3-Clause
   2 * Copyright (c) 2015-2020 Amazon.com, Inc. or its affiliates.
   3 * All rights reserved.
   4 */
   5
   6#ifndef _ENA_ETHDEV_H_
   7#define _ENA_ETHDEV_H_
   8
   9#include <rte_atomic.h>
  10#include <rte_ether.h>
  11#include <ethdev_driver.h>
  12#include <ethdev_pci.h>
  13#include <rte_cycles.h>
  14#include <rte_pci.h>
  15#include <rte_bus_pci.h>
  16#include <rte_timer.h>
  17#include <rte_dev.h>
  18#include <rte_net.h>
  19
  20#include "ena_com.h"
  21
  22#define ENA_REGS_BAR    0
  23#define ENA_MEM_BAR     2
  24
  25#define ENA_MAX_NUM_QUEUES      128
  26#define ENA_MIN_FRAME_LEN       64
  27#define ENA_NAME_MAX_LEN        20
  28#define ENA_PKT_MAX_BUFS        17
  29#define ENA_RX_BUF_MIN_SIZE     1400
  30#define ENA_DEFAULT_RING_SIZE   1024
  31
  32#define ENA_MIN_MTU             128
  33
  34#define ENA_MMIO_DISABLE_REG_READ       BIT(0)
  35
  36#define ENA_WD_TIMEOUT_SEC      3
  37#define ENA_DEVICE_KALIVE_TIMEOUT (ENA_WD_TIMEOUT_SEC * rte_get_timer_hz())
  38
  39#define ENA_TX_TIMEOUT                  (5 * rte_get_timer_hz())
  40#define ENA_MONITORED_TX_QUEUES         3
  41#define ENA_DEFAULT_MISSING_COMP        256U
  42
  43/* While processing submitted and completed descriptors (rx and tx path
  44 * respectively) in a loop it is desired to:
  45 *  - perform batch submissions while populating sumbissmion queue
  46 *  - avoid blocking transmission of other packets during cleanup phase
  47 * Hence the utilization ratio of 1/8 of a queue size or max value if the size
  48 * of the ring is very big - like 8k Rx rings.
  49 */
  50#define ENA_REFILL_THRESH_DIVIDER      8
  51#define ENA_REFILL_THRESH_PACKET       256
  52
  53#define ENA_IDX_NEXT_MASKED(idx, mask) (((idx) + 1) & (mask))
  54#define ENA_IDX_ADD_MASKED(idx, n, mask) (((idx) + (n)) & (mask))
  55
  56#define ENA_RX_RSS_TABLE_LOG_SIZE       7
  57#define ENA_RX_RSS_TABLE_SIZE           (1 << ENA_RX_RSS_TABLE_LOG_SIZE)
  58
  59#define ENA_HASH_KEY_SIZE               40
  60
  61#define ENA_ALL_RSS_HF (RTE_ETH_RSS_NONFRAG_IPV4_TCP | RTE_ETH_RSS_NONFRAG_IPV4_UDP | \
  62                        RTE_ETH_RSS_NONFRAG_IPV6_TCP | RTE_ETH_RSS_NONFRAG_IPV6_UDP)
  63
  64#define ENA_IO_TXQ_IDX(q)               (2 * (q))
  65#define ENA_IO_RXQ_IDX(q)               (2 * (q) + 1)
  66/* Reversed version of ENA_IO_RXQ_IDX */
  67#define ENA_IO_RXQ_IDX_REV(q)           (((q) - 1) / 2)
  68
  69extern struct ena_shared_data *ena_shared_data;
  70
  71struct ena_adapter;
  72
  73enum ena_ring_type {
  74        ENA_RING_TYPE_RX = 1,
  75        ENA_RING_TYPE_TX = 2,
  76};
  77
  78struct ena_tx_buffer {
  79        struct rte_mbuf *mbuf;
  80        unsigned int tx_descs;
  81        unsigned int num_of_bufs;
  82        uint64_t timestamp;
  83        bool print_once;
  84        struct ena_com_buf bufs[ENA_PKT_MAX_BUFS];
  85};
  86
  87/* Rx buffer holds only pointer to the mbuf - may be expanded in the future */
  88struct ena_rx_buffer {
  89        struct rte_mbuf *mbuf;
  90        struct ena_com_buf ena_buf;
  91};
  92
  93struct ena_calc_queue_size_ctx {
  94        struct ena_com_dev_get_features_ctx *get_feat_ctx;
  95        struct ena_com_dev *ena_dev;
  96        u32 max_rx_queue_size;
  97        u32 max_tx_queue_size;
  98        u16 max_tx_sgl_size;
  99        u16 max_rx_sgl_size;
 100};
 101
 102struct ena_stats_tx {
 103        u64 cnt;
 104        u64 bytes;
 105        u64 prepare_ctx_err;
 106        u64 linearize;
 107        u64 linearize_failed;
 108        u64 tx_poll;
 109        u64 doorbells;
 110        u64 bad_req_id;
 111        u64 available_desc;
 112        u64 missed_tx;
 113};
 114
 115struct ena_stats_rx {
 116        u64 cnt;
 117        u64 bytes;
 118        u64 refill_partial;
 119        u64 bad_csum;
 120        u64 mbuf_alloc_fail;
 121        u64 bad_desc_num;
 122        u64 bad_req_id;
 123};
 124
 125struct ena_ring {
 126        u16 next_to_use;
 127        u16 next_to_clean;
 128        uint64_t last_cleanup_ticks;
 129
 130        enum ena_ring_type type;
 131        enum ena_admin_placement_policy_type tx_mem_queue_type;
 132
 133        /* Indicate there are Tx packets pushed to the device and wait for db */
 134        bool pkts_without_db;
 135
 136        /* Holds the empty requests for TX/RX OOO completions */
 137        union {
 138                uint16_t *empty_tx_reqs;
 139                uint16_t *empty_rx_reqs;
 140        };
 141
 142        union {
 143                struct ena_tx_buffer *tx_buffer_info; /* contex of tx packet */
 144                struct ena_rx_buffer *rx_buffer_info; /* contex of rx packet */
 145        };
 146        struct rte_mbuf **rx_refill_buffer;
 147        unsigned int ring_size; /* number of tx/rx_buffer_info's entries */
 148        unsigned int size_mask;
 149
 150        struct ena_com_io_cq *ena_com_io_cq;
 151        struct ena_com_io_sq *ena_com_io_sq;
 152
 153        union {
 154                uint16_t tx_free_thresh;
 155                uint16_t rx_free_thresh;
 156        };
 157
 158        struct ena_com_rx_buf_info ena_bufs[ENA_PKT_MAX_BUFS]
 159                                                __rte_cache_aligned;
 160
 161        struct rte_mempool *mb_pool;
 162        unsigned int port_id;
 163        unsigned int id;
 164        /* Max length PMD can push to device for LLQ */
 165        uint8_t tx_max_header_size;
 166        int configured;
 167
 168        uint8_t *push_buf_intermediate_buf;
 169
 170        struct ena_adapter *adapter;
 171        uint64_t offloads;
 172        u16 sgl_size;
 173
 174        bool disable_meta_caching;
 175
 176        union {
 177                struct ena_stats_rx rx_stats;
 178                struct ena_stats_tx tx_stats;
 179        };
 180
 181        unsigned int numa_socket_id;
 182
 183        uint32_t missing_tx_completion_threshold;
 184} __rte_cache_aligned;
 185
 186enum ena_adapter_state {
 187        ENA_ADAPTER_STATE_FREE    = 0,
 188        ENA_ADAPTER_STATE_INIT    = 1,
 189        ENA_ADAPTER_STATE_RUNNING = 2,
 190        ENA_ADAPTER_STATE_STOPPED = 3,
 191        ENA_ADAPTER_STATE_CONFIG  = 4,
 192        ENA_ADAPTER_STATE_CLOSED  = 5,
 193};
 194
 195struct ena_driver_stats {
 196        rte_atomic64_t ierrors;
 197        rte_atomic64_t oerrors;
 198        rte_atomic64_t rx_nombuf;
 199        u64 rx_drops;
 200};
 201
 202struct ena_stats_dev {
 203        u64 wd_expired;
 204        u64 dev_start;
 205        u64 dev_stop;
 206        /*
 207         * Tx drops cannot be reported as the driver statistic, because DPDK
 208         * rte_eth_stats structure isn't providing appropriate field for that.
 209         * As a workaround it is being published as an extended statistic.
 210         */
 211        u64 tx_drops;
 212};
 213
 214struct ena_stats_eni {
 215        /*
 216         * The number of packets shaped due to inbound aggregate BW
 217         * allowance being exceeded
 218         */
 219        uint64_t bw_in_allowance_exceeded;
 220        /*
 221         * The number of packets shaped due to outbound aggregate BW
 222         * allowance being exceeded
 223         */
 224        uint64_t bw_out_allowance_exceeded;
 225        /* The number of packets shaped due to PPS allowance being exceeded */
 226        uint64_t pps_allowance_exceeded;
 227        /*
 228         * The number of packets shaped due to connection tracking
 229         * allowance being exceeded and leading to failure in establishment
 230         * of new connections
 231         */
 232        uint64_t conntrack_allowance_exceeded;
 233        /*
 234         * The number of packets shaped due to linklocal packet rate
 235         * allowance being exceeded
 236         */
 237        uint64_t linklocal_allowance_exceeded;
 238};
 239
 240struct ena_offloads {
 241        uint32_t tx_offloads;
 242        uint32_t rx_offloads;
 243};
 244
 245/* board specific private data structure */
 246struct ena_adapter {
 247        /* OS defined structs */
 248        struct rte_eth_dev_data *edev_data;
 249
 250        struct ena_com_dev ena_dev __rte_cache_aligned;
 251
 252        /* TX */
 253        struct ena_ring tx_ring[ENA_MAX_NUM_QUEUES] __rte_cache_aligned;
 254        u32 max_tx_ring_size;
 255        u16 max_tx_sgl_size;
 256
 257        /* RX */
 258        struct ena_ring rx_ring[ENA_MAX_NUM_QUEUES] __rte_cache_aligned;
 259        u32 max_rx_ring_size;
 260        u16 max_rx_sgl_size;
 261
 262        u32 max_num_io_queues;
 263        u16 max_mtu;
 264        struct ena_offloads offloads;
 265
 266        /* The admin queue isn't protected by the lock and is used to
 267         * retrieve statistics from the device. As there is no guarantee that
 268         * application won't try to get statistics from multiple threads, it is
 269         * safer to lock the queue to avoid admin queue failure.
 270         */
 271        rte_spinlock_t admin_lock;
 272
 273        int id_number;
 274        char name[ENA_NAME_MAX_LEN];
 275        u8 mac_addr[RTE_ETHER_ADDR_LEN];
 276
 277        void *regs;
 278        void *dev_mem_base;
 279
 280        struct ena_driver_stats *drv_stats;
 281        enum ena_adapter_state state;
 282
 283        uint64_t tx_supported_offloads;
 284        uint64_t tx_selected_offloads;
 285        uint64_t rx_supported_offloads;
 286        uint64_t rx_selected_offloads;
 287
 288        bool link_status;
 289
 290        enum ena_regs_reset_reason_types reset_reason;
 291
 292        struct rte_timer timer_wd;
 293        uint64_t timestamp_wd;
 294        uint64_t keep_alive_timeout;
 295
 296        struct ena_stats_dev dev_stats;
 297        struct ena_stats_eni eni_stats;
 298
 299        bool trigger_reset;
 300
 301        bool wd_state;
 302
 303        bool use_large_llq_hdr;
 304
 305        uint32_t last_tx_comp_qid;
 306        uint64_t missing_tx_completion_to;
 307        uint64_t missing_tx_completion_budget;
 308        uint64_t tx_cleanup_stall_delay;
 309};
 310
 311int ena_rss_reta_update(struct rte_eth_dev *dev,
 312                        struct rte_eth_rss_reta_entry64 *reta_conf,
 313                        uint16_t reta_size);
 314int ena_rss_reta_query(struct rte_eth_dev *dev,
 315                       struct rte_eth_rss_reta_entry64 *reta_conf,
 316                       uint16_t reta_size);
 317int ena_rss_hash_update(struct rte_eth_dev *dev,
 318                        struct rte_eth_rss_conf *rss_conf);
 319int ena_rss_hash_conf_get(struct rte_eth_dev *dev,
 320                          struct rte_eth_rss_conf *rss_conf);
 321int ena_rss_configure(struct ena_adapter *adapter);
 322
 323#endif /* _ENA_ETHDEV_H_ */
 324