dpdk/drivers/net/netvsc/hn_var.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: BSD-3-Clause
   2 * Copyright (c) 2009-2018 Microsoft Corp.
   3 * Copyright (c) 2016 Brocade Communications Systems, Inc.
   4 * Copyright (c) 2012 NetApp Inc.
   5 * Copyright (c) 2012 Citrix Inc.
   6 * All rights reserved.
   7 */
   8
   9#include <rte_eal_paging.h>
  10
  11/*
  12 * Tunable ethdev params
  13 */
  14#define HN_MIN_RX_BUF_SIZE      1024
  15#define HN_MAX_XFER_LEN         2048
  16#define HN_MAX_MAC_ADDRS        1
  17#define HN_MAX_CHANNELS         64
  18
  19/* Claimed to be 12232B */
  20#define HN_MTU_MAX              (9 * 1024)
  21
  22/* Retry interval */
  23#define HN_CHAN_INTERVAL_US     100
  24
  25/* Host monitor interval */
  26#define HN_CHAN_LATENCY_NS      50000
  27
  28#define HN_TXCOPY_THRESHOLD     512
  29#define HN_RXCOPY_THRESHOLD     256
  30
  31#define HN_RX_EXTMBUF_ENABLE    0
  32
  33#ifndef PAGE_MASK
  34#define PAGE_MASK (rte_mem_page_size() - 1)
  35#endif
  36
  37struct hn_data;
  38struct hn_txdesc;
  39
  40struct hn_stats {
  41        uint64_t        packets;
  42        uint64_t        bytes;
  43        uint64_t        errors;
  44        uint64_t        ring_full;
  45        uint64_t        channel_full;
  46        uint64_t        multicast;
  47        uint64_t        broadcast;
  48        /* Size bins in array as RFC 2819, undersized [0], 64 [1], etc */
  49        uint64_t        size_bins[8];
  50};
  51
  52struct hn_tx_queue {
  53        struct hn_data  *hv;
  54        struct vmbus_channel *chan;
  55        uint16_t        port_id;
  56        uint16_t        queue_id;
  57        uint32_t        free_thresh;
  58        struct rte_mempool *txdesc_pool;
  59        const struct rte_memzone *tx_rndis_mz;
  60        void            *tx_rndis;
  61        rte_iova_t      tx_rndis_iova;
  62
  63        /* Applied packet transmission aggregation limits. */
  64        uint32_t        agg_szmax;
  65        uint32_t        agg_pktmax;
  66        uint32_t        agg_align;
  67
  68        /* Packet transmission aggregation states */
  69        struct hn_txdesc *agg_txd;
  70        uint32_t        agg_pktleft;
  71        uint32_t        agg_szleft;
  72        struct rndis_packet_msg *agg_prevpkt;
  73
  74        struct hn_stats stats;
  75};
  76
  77struct hn_rx_queue {
  78        struct hn_data  *hv;
  79        struct vmbus_channel *chan;
  80        struct rte_mempool *mb_pool;
  81        struct rte_ring *rx_ring;
  82
  83        rte_spinlock_t ring_lock;
  84        uint32_t event_sz;
  85        uint16_t port_id;
  86        uint16_t queue_id;
  87        struct hn_stats stats;
  88
  89        void *event_buf;
  90        struct hn_rx_bufinfo *rxbuf_info;
  91        rte_atomic32_t  rxbuf_outstanding;
  92};
  93
  94
  95/* multi-packet data from host */
  96struct hn_rx_bufinfo {
  97        struct vmbus_channel *chan;
  98        struct hn_rx_queue *rxq;
  99        uint64_t        xactid;
 100        struct rte_mbuf_ext_shared_info shinfo;
 101} __rte_cache_aligned;
 102
 103#define HN_INVALID_PORT UINT16_MAX
 104
 105enum vf_device_state {
 106        vf_unknown = 0,
 107        vf_removed,
 108        vf_configured,
 109        vf_started,
 110        vf_stopped,
 111};
 112
 113struct hn_vf_ctx {
 114        uint16_t        vf_port;
 115
 116        /* We have taken ownership of this VF port from DPDK */
 117        bool            vf_attached;
 118
 119        /* VSC has requested to switch data path to VF */
 120        bool            vf_vsc_switched;
 121
 122        /* VSP has reported the VF is present for this NIC */
 123        bool            vf_vsp_reported;
 124
 125        enum vf_device_state    vf_state;
 126};
 127
 128struct hn_data {
 129        struct rte_vmbus_device *vmbus;
 130        struct hn_rx_queue *primary;
 131        rte_rwlock_t    vf_lock;
 132        uint16_t        port_id;
 133
 134        struct hn_vf_ctx        vf_ctx;
 135
 136        uint8_t         closed;
 137        uint8_t         vlan_strip;
 138
 139        uint32_t        link_status;
 140        uint32_t        link_speed;
 141
 142        struct rte_mem_resource *rxbuf_res;     /* UIO resource for Rx */
 143        uint32_t        rxbuf_section_cnt;      /* # of Rx sections */
 144        uint32_t        rx_copybreak;
 145        uint32_t        rx_extmbuf_enable;
 146        uint16_t        max_queues;             /* Max available queues */
 147        uint16_t        num_queues;
 148        uint64_t        rss_offloads;
 149
 150        rte_spinlock_t  chim_lock;
 151        struct rte_mem_resource *chim_res;      /* UIO resource for Tx */
 152        struct rte_bitmap *chim_bmap;           /* Send buffer map */
 153        void            *chim_bmem;
 154        uint32_t        tx_copybreak;
 155        uint32_t        chim_szmax;             /* Max size per buffer */
 156        uint32_t        chim_cnt;               /* Max packets per buffer */
 157
 158        uint32_t        latency;
 159        uint32_t        nvs_ver;
 160        uint32_t        ndis_ver;
 161        uint32_t        rndis_agg_size;
 162        uint32_t        rndis_agg_pkts;
 163        uint32_t        rndis_agg_align;
 164
 165        volatile uint32_t  rndis_pending;
 166        rte_atomic32_t  rndis_req_id;
 167        uint8_t         rndis_resp[256];
 168
 169        uint32_t        rss_hash;
 170        uint8_t         rss_key[40];
 171        uint16_t        rss_ind[128];
 172
 173        struct rte_eth_dev_owner owner;
 174
 175        struct vmbus_channel *channels[HN_MAX_CHANNELS];
 176
 177        struct rte_devargs devargs;
 178        int             eal_hot_plug_retry;
 179};
 180
 181static inline struct vmbus_channel *
 182hn_primary_chan(const struct hn_data *hv)
 183{
 184        return hv->channels[0];
 185}
 186
 187uint32_t hn_process_events(struct hn_data *hv, uint16_t queue_id,
 188                       uint32_t tx_limit);
 189
 190uint16_t hn_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
 191                      uint16_t nb_pkts);
 192uint16_t hn_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
 193                      uint16_t nb_pkts);
 194
 195int     hn_chim_init(struct rte_eth_dev *dev);
 196void    hn_chim_uninit(struct rte_eth_dev *dev);
 197int     hn_dev_link_update(struct rte_eth_dev *dev, int wait);
 198int     hn_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
 199                              uint16_t nb_desc, unsigned int socket_id,
 200                              const struct rte_eth_txconf *tx_conf);
 201void    hn_dev_tx_queue_release(void *arg);
 202void    hn_dev_tx_queue_info(struct rte_eth_dev *dev, uint16_t queue_idx,
 203                             struct rte_eth_txq_info *qinfo);
 204int     hn_dev_tx_done_cleanup(void *arg, uint32_t free_cnt);
 205int     hn_dev_tx_descriptor_status(void *arg, uint16_t offset);
 206
 207struct hn_rx_queue *hn_rx_queue_alloc(struct hn_data *hv,
 208                                      uint16_t queue_id,
 209                                      unsigned int socket_id);
 210int     hn_dev_rx_queue_setup(struct rte_eth_dev *dev,
 211                              uint16_t queue_idx, uint16_t nb_desc,
 212                              unsigned int socket_id,
 213                              const struct rte_eth_rxconf *rx_conf,
 214                              struct rte_mempool *mp);
 215void    hn_dev_rx_queue_info(struct rte_eth_dev *dev, uint16_t queue_id,
 216                             struct rte_eth_rxq_info *qinfo);
 217void    hn_dev_rx_queue_release(void *arg);
 218uint32_t hn_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t queue_id);
 219int     hn_dev_rx_queue_status(void *rxq, uint16_t offset);
 220void    hn_dev_free_queues(struct rte_eth_dev *dev);
 221
 222/*
 223 * Get VF device for existing netvsc device
 224 * Assumes vf_lock is held.
 225 */
 226static inline struct rte_eth_dev *
 227hn_get_vf_dev(const struct hn_data *hv)
 228{
 229        if (hv->vf_ctx.vf_attached)
 230                return &rte_eth_devices[hv->vf_ctx.vf_port];
 231        else
 232                return NULL;
 233}
 234
 235int     hn_vf_info_get(struct hn_data *hv,
 236                       struct rte_eth_dev_info *info);
 237int     hn_vf_add(struct rte_eth_dev *dev, struct hn_data *hv);
 238int     hn_vf_configure_locked(struct rte_eth_dev *dev,
 239                               const struct rte_eth_conf *dev_conf);
 240const uint32_t *hn_vf_supported_ptypes(struct rte_eth_dev *dev);
 241int     hn_vf_start(struct rte_eth_dev *dev);
 242void    hn_vf_reset(struct rte_eth_dev *dev);
 243int     hn_vf_close(struct rte_eth_dev *dev);
 244int     hn_vf_stop(struct rte_eth_dev *dev);
 245
 246int     hn_vf_allmulticast_enable(struct rte_eth_dev *dev);
 247int     hn_vf_allmulticast_disable(struct rte_eth_dev *dev);
 248int     hn_vf_promiscuous_enable(struct rte_eth_dev *dev);
 249int     hn_vf_promiscuous_disable(struct rte_eth_dev *dev);
 250int     hn_vf_mc_addr_list(struct rte_eth_dev *dev,
 251                           struct rte_ether_addr *mc_addr_set,
 252                           uint32_t nb_mc_addr);
 253
 254int     hn_vf_tx_queue_setup(struct rte_eth_dev *dev,
 255                             uint16_t queue_idx, uint16_t nb_desc,
 256                             unsigned int socket_id,
 257                             const struct rte_eth_txconf *tx_conf);
 258void    hn_vf_tx_queue_release(struct hn_data *hv, uint16_t queue_id);
 259int     hn_vf_tx_queue_status(struct hn_data *hv, uint16_t queue_id, uint16_t offset);
 260
 261int     hn_vf_rx_queue_setup(struct rte_eth_dev *dev,
 262                             uint16_t queue_idx, uint16_t nb_desc,
 263                             unsigned int socket_id,
 264                             const struct rte_eth_rxconf *rx_conf,
 265                             struct rte_mempool *mp);
 266void    hn_vf_rx_queue_release(struct hn_data *hv, uint16_t queue_id);
 267
 268int     hn_vf_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats);
 269int     hn_vf_stats_reset(struct rte_eth_dev *dev);
 270int     hn_vf_xstats_get_names(struct rte_eth_dev *dev,
 271                               struct rte_eth_xstat_name *xstats_names,
 272                               unsigned int size);
 273int     hn_vf_xstats_get(struct rte_eth_dev *dev,
 274                         struct rte_eth_xstat *xstats,
 275                         unsigned int offset, unsigned int n);
 276int     hn_vf_xstats_reset(struct rte_eth_dev *dev);
 277int     hn_vf_rss_hash_update(struct rte_eth_dev *dev,
 278                              struct rte_eth_rss_conf *rss_conf);
 279int     hn_vf_reta_hash_update(struct rte_eth_dev *dev,
 280                               struct rte_eth_rss_reta_entry64 *reta_conf,
 281                               uint16_t reta_size);
 282int     hn_eth_rmv_event_callback(uint16_t port_id,
 283                                  enum rte_eth_event_type event __rte_unused,
 284                                  void *cb_arg, void *out __rte_unused);
 285