linux/drivers/net/ethernet/intel/ice/ice_txrx.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 */
   2/* Copyright (c) 2018, Intel Corporation. */
   3
   4#ifndef _ICE_TXRX_H_
   5#define _ICE_TXRX_H_
   6
   7#define ICE_DFLT_IRQ_WORK       256
   8#define ICE_RXBUF_2048          2048
   9#define ICE_MAX_CHAINED_RX_BUFS 5
  10#define ICE_MAX_BUF_TXD         8
  11#define ICE_MIN_TX_LEN          17
  12
  13/* The size limit for a transmit buffer in a descriptor is (16K - 1).
  14 * In order to align with the read requests we will align the value to
  15 * the nearest 4K which represents our maximum read request size.
  16 */
  17#define ICE_MAX_READ_REQ_SIZE   4096
  18#define ICE_MAX_DATA_PER_TXD    (16 * 1024 - 1)
  19#define ICE_MAX_DATA_PER_TXD_ALIGNED \
  20        (~(ICE_MAX_READ_REQ_SIZE - 1) & ICE_MAX_DATA_PER_TXD)
  21
  22#define ICE_RX_BUF_WRITE        16      /* Must be power of 2 */
  23#define ICE_MAX_TXQ_PER_TXQG    128
  24
  25/* We are assuming that the cache line is always 64 Bytes here for ice.
  26 * In order to make sure that is a correct assumption there is a check in probe
  27 * to print a warning if the read from GLPCI_CNF2 tells us that the cache line
  28 * size is 128 bytes. We do it this way because we do not want to read the
  29 * GLPCI_CNF2 register or a variable containing the value on every pass through
  30 * the Tx path.
  31 */
  32#define ICE_CACHE_LINE_BYTES            64
  33#define ICE_DESCS_PER_CACHE_LINE        (ICE_CACHE_LINE_BYTES / \
  34                                         sizeof(struct ice_tx_desc))
  35#define ICE_DESCS_FOR_CTX_DESC          1
  36#define ICE_DESCS_FOR_SKB_DATA_PTR      1
  37/* Tx descriptors needed, worst case */
  38#define DESC_NEEDED (MAX_SKB_FRAGS + ICE_DESCS_FOR_CTX_DESC + \
  39                     ICE_DESCS_PER_CACHE_LINE + ICE_DESCS_FOR_SKB_DATA_PTR)
  40#define ICE_DESC_UNUSED(R)      \
  41        ((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \
  42        (R)->next_to_clean - (R)->next_to_use - 1)
  43
  44#define ICE_TX_FLAGS_TSO        BIT(0)
  45#define ICE_TX_FLAGS_HW_VLAN    BIT(1)
  46#define ICE_TX_FLAGS_SW_VLAN    BIT(2)
  47#define ICE_TX_FLAGS_VLAN_M     0xffff0000
  48#define ICE_TX_FLAGS_VLAN_S     16
  49
  50struct ice_tx_buf {
  51        struct ice_tx_desc *next_to_watch;
  52        struct sk_buff *skb;
  53        unsigned int bytecount;
  54        unsigned short gso_segs;
  55        u32 tx_flags;
  56        DEFINE_DMA_UNMAP_ADDR(dma);
  57        DEFINE_DMA_UNMAP_LEN(len);
  58};
  59
  60struct ice_tx_offload_params {
  61        u8 header_len;
  62        u32 td_cmd;
  63        u32 td_offset;
  64        u32 td_l2tag1;
  65        u16 cd_l2tag2;
  66        u32 cd_tunnel_params;
  67        u64 cd_qw1;
  68        struct ice_ring *tx_ring;
  69};
  70
  71struct ice_rx_buf {
  72        struct sk_buff *skb;
  73        dma_addr_t dma;
  74        struct page *page;
  75        unsigned int page_offset;
  76};
  77
  78struct ice_q_stats {
  79        u64 pkts;
  80        u64 bytes;
  81};
  82
  83struct ice_txq_stats {
  84        u64 restart_q;
  85        u64 tx_busy;
  86        u64 tx_linearize;
  87        int prev_pkt; /* negative if no pending Tx descriptors */
  88};
  89
  90struct ice_rxq_stats {
  91        u64 non_eop_descs;
  92        u64 alloc_page_failed;
  93        u64 alloc_buf_failed;
  94        u64 page_reuse_count;
  95};
  96
  97/* this enum matches hardware bits and is meant to be used by DYN_CTLN
  98 * registers and QINT registers or more generally anywhere in the manual
  99 * mentioning ITR_INDX, ITR_NONE cannot be used as an index 'n' into any
 100 * register but instead is a special value meaning "don't update" ITR0/1/2.
 101 */
 102enum ice_dyn_idx_t {
 103        ICE_IDX_ITR0 = 0,
 104        ICE_IDX_ITR1 = 1,
 105        ICE_IDX_ITR2 = 2,
 106        ICE_ITR_NONE = 3        /* ITR_NONE must not be used as an index */
 107};
 108
 109/* Header split modes defined by DTYPE field of Rx RLAN context */
 110enum ice_rx_dtype {
 111        ICE_RX_DTYPE_NO_SPLIT           = 0,
 112        ICE_RX_DTYPE_HEADER_SPLIT       = 1,
 113        ICE_RX_DTYPE_SPLIT_ALWAYS       = 2,
 114};
 115
 116/* indices into GLINT_ITR registers */
 117#define ICE_RX_ITR      ICE_IDX_ITR0
 118#define ICE_TX_ITR      ICE_IDX_ITR1
 119#define ICE_ITR_8K      124
 120#define ICE_ITR_20K     50
 121#define ICE_ITR_MAX     8160
 122#define ICE_DFLT_TX_ITR (ICE_ITR_20K | ICE_ITR_DYNAMIC)
 123#define ICE_DFLT_RX_ITR (ICE_ITR_20K | ICE_ITR_DYNAMIC)
 124#define ICE_ITR_DYNAMIC 0x8000  /* used as flag for itr_setting */
 125#define ITR_IS_DYNAMIC(setting) (!!((setting) & ICE_ITR_DYNAMIC))
 126#define ITR_TO_REG(setting)     ((setting) & ~ICE_ITR_DYNAMIC)
 127#define ICE_ITR_GRAN_S          1       /* Assume ITR granularity is 2us */
 128#define ICE_ITR_MASK            0x1FFE  /* ITR register value alignment mask */
 129#define ITR_REG_ALIGN(setting)  __ALIGN_MASK(setting, ~ICE_ITR_MASK)
 130
 131#define ICE_DFLT_INTRL  0
 132
 133/* Legacy or Advanced Mode Queue */
 134#define ICE_TX_ADVANCED 0
 135#define ICE_TX_LEGACY   1
 136
 137/* descriptor ring, associated with a VSI */
 138struct ice_ring {
 139        struct ice_ring *next;          /* pointer to next ring in q_vector */
 140        void *desc;                     /* Descriptor ring memory */
 141        struct device *dev;             /* Used for DMA mapping */
 142        struct net_device *netdev;      /* netdev ring maps to */
 143        struct ice_vsi *vsi;            /* Backreference to associated VSI */
 144        struct ice_q_vector *q_vector;  /* Backreference to associated vector */
 145        u8 __iomem *tail;
 146        union {
 147                struct ice_tx_buf *tx_buf;
 148                struct ice_rx_buf *rx_buf;
 149        };
 150        u16 q_index;                    /* Queue number of ring */
 151        u32 txq_teid;                   /* Added Tx queue TEID */
 152
 153        u16 count;                      /* Number of descriptors */
 154        u16 reg_idx;                    /* HW register index of the ring */
 155
 156        /* used in interrupt processing */
 157        u16 next_to_use;
 158        u16 next_to_clean;
 159
 160        u8 ring_active;                 /* is ring online or not */
 161
 162        /* stats structs */
 163        struct ice_q_stats      stats;
 164        struct u64_stats_sync syncp;
 165        union {
 166                struct ice_txq_stats tx_stats;
 167                struct ice_rxq_stats rx_stats;
 168        };
 169
 170        unsigned int size;              /* length of descriptor ring in bytes */
 171        dma_addr_t dma;                 /* physical address of ring */
 172        struct rcu_head rcu;            /* to avoid race on free */
 173        u16 next_to_alloc;
 174} ____cacheline_internodealigned_in_smp;
 175
 176enum ice_latency_range {
 177        ICE_LOWEST_LATENCY = 0,
 178        ICE_LOW_LATENCY = 1,
 179        ICE_BULK_LATENCY = 2,
 180        ICE_ULTRA_LATENCY = 3,
 181};
 182
 183struct ice_ring_container {
 184        /* head of linked-list of rings */
 185        struct ice_ring *ring;
 186        unsigned long next_update;      /* jiffies value of next queue update */
 187        unsigned int total_bytes;       /* total bytes processed this int */
 188        unsigned int total_pkts;        /* total packets processed this int */
 189        enum ice_latency_range latency_range;
 190        int itr_idx;            /* index in the interrupt vector */
 191        u16 target_itr;         /* value in usecs divided by the hw->itr_gran */
 192        u16 current_itr;        /* value in usecs divided by the hw->itr_gran */
 193        /* high bit set means dynamic ITR, rest is used to store user
 194         * readable ITR value in usecs and must be converted before programming
 195         * to a register.
 196         */
 197        u16 itr_setting;
 198};
 199
 200/* iterator for handling rings in ring container */
 201#define ice_for_each_ring(pos, head) \
 202        for (pos = (head).ring; pos; pos = pos->next)
 203
 204bool ice_alloc_rx_bufs(struct ice_ring *rxr, u16 cleaned_count);
 205netdev_tx_t ice_start_xmit(struct sk_buff *skb, struct net_device *netdev);
 206void ice_clean_tx_ring(struct ice_ring *tx_ring);
 207void ice_clean_rx_ring(struct ice_ring *rx_ring);
 208int ice_setup_tx_ring(struct ice_ring *tx_ring);
 209int ice_setup_rx_ring(struct ice_ring *rx_ring);
 210void ice_free_tx_ring(struct ice_ring *tx_ring);
 211void ice_free_rx_ring(struct ice_ring *rx_ring);
 212int ice_napi_poll(struct napi_struct *napi, int budget);
 213
 214#endif /* _ICE_TXRX_H_ */
 215