linux/drivers/net/ethernet/intel/i40e/i40e_txrx.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 */
   2/* Copyright(c) 2013 - 2018 Intel Corporation. */
   3
   4#ifndef _I40E_TXRX_H_
   5#define _I40E_TXRX_H_
   6
   7#include <net/xdp.h>
   8
   9/* Interrupt Throttling and Rate Limiting Goodies */
  10#define I40E_DEFAULT_IRQ_WORK      256
  11
  12/* The datasheet for the X710 and XL710 indicate that the maximum value for
  13 * the ITR is 8160usec which is then called out as 0xFF0 with a 2usec
  14 * resolution. 8160 is 0x1FE0 when written out in hex. So instead of storing
  15 * the register value which is divided by 2 lets use the actual values and
  16 * avoid an excessive amount of translation.
  17 */
  18#define I40E_ITR_DYNAMIC        0x8000  /* use top bit as a flag */
  19#define I40E_ITR_MASK           0x1FFE  /* mask for ITR register value */
  20#define I40E_MIN_ITR                 2  /* reg uses 2 usec resolution */
  21#define I40E_ITR_20K                50
  22#define I40E_ITR_8K                122
  23#define I40E_MAX_ITR              8160  /* maximum value as per datasheet */
  24#define ITR_TO_REG(setting) ((setting) & ~I40E_ITR_DYNAMIC)
  25#define ITR_REG_ALIGN(setting) __ALIGN_MASK(setting, ~I40E_ITR_MASK)
  26#define ITR_IS_DYNAMIC(setting) (!!((setting) & I40E_ITR_DYNAMIC))
  27
  28#define I40E_ITR_RX_DEF         (I40E_ITR_20K | I40E_ITR_DYNAMIC)
  29#define I40E_ITR_TX_DEF         (I40E_ITR_20K | I40E_ITR_DYNAMIC)
  30
  31/* 0x40 is the enable bit for interrupt rate limiting, and must be set if
  32 * the value of the rate limit is non-zero
  33 */
  34#define INTRL_ENA                  BIT(6)
  35#define I40E_MAX_INTRL             0x3B    /* reg uses 4 usec resolution */
  36#define INTRL_REG_TO_USEC(intrl) ((intrl & ~INTRL_ENA) << 2)
  37
  38/**
  39 * i40e_intrl_usec_to_reg - convert interrupt rate limit to register
  40 * @intrl: interrupt rate limit to convert
  41 *
  42 * This function converts a decimal interrupt rate limit to the appropriate
  43 * register format expected by the firmware when setting interrupt rate limit.
  44 */
  45static inline u16 i40e_intrl_usec_to_reg(int intrl)
  46{
  47        if (intrl >> 2)
  48                return ((intrl >> 2) | INTRL_ENA);
  49        else
  50                return 0;
  51}
  52
  53#define I40E_QUEUE_END_OF_LIST 0x7FF
  54
  55/* this enum matches hardware bits and is meant to be used by DYN_CTLN
  56 * registers and QINT registers or more generally anywhere in the manual
  57 * mentioning ITR_INDX, ITR_NONE cannot be used as an index 'n' into any
  58 * register but instead is a special value meaning "don't update" ITR0/1/2.
  59 */
  60enum i40e_dyn_idx_t {
  61        I40E_IDX_ITR0 = 0,
  62        I40E_IDX_ITR1 = 1,
  63        I40E_IDX_ITR2 = 2,
  64        I40E_ITR_NONE = 3       /* ITR_NONE must not be used as an index */
  65};
  66
  67/* these are indexes into ITRN registers */
  68#define I40E_RX_ITR    I40E_IDX_ITR0
  69#define I40E_TX_ITR    I40E_IDX_ITR1
  70
  71/* Supported RSS offloads */
  72#define I40E_DEFAULT_RSS_HENA ( \
  73        BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_UDP) | \
  74        BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_SCTP) | \
  75        BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP) | \
  76        BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) | \
  77        BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4) | \
  78        BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_UDP) | \
  79        BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP) | \
  80        BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_SCTP) | \
  81        BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) | \
  82        BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6) | \
  83        BIT_ULL(I40E_FILTER_PCTYPE_L2_PAYLOAD))
  84
  85#define I40E_DEFAULT_RSS_HENA_EXPANDED (I40E_DEFAULT_RSS_HENA | \
  86        BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK) | \
  87        BIT_ULL(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) | \
  88        BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP) | \
  89        BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK) | \
  90        BIT_ULL(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) | \
  91        BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP))
  92
  93#define i40e_pf_get_default_rss_hena(pf) \
  94        (((pf)->hw_features & I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE) ? \
  95          I40E_DEFAULT_RSS_HENA_EXPANDED : I40E_DEFAULT_RSS_HENA)
  96
  97/* Supported Rx Buffer Sizes (a multiple of 128) */
  98#define I40E_RXBUFFER_256   256
  99#define I40E_RXBUFFER_1536  1536  /* 128B aligned standard Ethernet frame */
 100#define I40E_RXBUFFER_2048  2048
 101#define I40E_RXBUFFER_3072  3072  /* Used for large frames w/ padding */
 102#define I40E_MAX_RXBUFFER   9728  /* largest size for single descriptor */
 103
 104/* NOTE: netdev_alloc_skb reserves up to 64 bytes, NET_IP_ALIGN means we
 105 * reserve 2 more, and skb_shared_info adds an additional 384 bytes more,
 106 * this adds up to 512 bytes of extra data meaning the smallest allocation
 107 * we could have is 1K.
 108 * i.e. RXBUFFER_256 --> 960 byte skb (size-1024 slab)
 109 * i.e. RXBUFFER_512 --> 1216 byte skb (size-2048 slab)
 110 */
 111#define I40E_RX_HDR_SIZE I40E_RXBUFFER_256
 112#define I40E_PACKET_HDR_PAD (ETH_HLEN + ETH_FCS_LEN + (VLAN_HLEN * 2))
 113#define i40e_rx_desc i40e_32byte_rx_desc
 114
 115#define I40E_RX_DMA_ATTR \
 116        (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING)
 117
 118/* Attempt to maximize the headroom available for incoming frames.  We
 119 * use a 2K buffer for receives and need 1536/1534 to store the data for
 120 * the frame.  This leaves us with 512 bytes of room.  From that we need
 121 * to deduct the space needed for the shared info and the padding needed
 122 * to IP align the frame.
 123 *
 124 * Note: For cache line sizes 256 or larger this value is going to end
 125 *       up negative.  In these cases we should fall back to the legacy
 126 *       receive path.
 127 */
 128#if (PAGE_SIZE < 8192)
 129#define I40E_2K_TOO_SMALL_WITH_PADDING \
 130((NET_SKB_PAD + I40E_RXBUFFER_1536) > SKB_WITH_OVERHEAD(I40E_RXBUFFER_2048))
 131
 132static inline int i40e_compute_pad(int rx_buf_len)
 133{
 134        int page_size, pad_size;
 135
 136        page_size = ALIGN(rx_buf_len, PAGE_SIZE / 2);
 137        pad_size = SKB_WITH_OVERHEAD(page_size) - rx_buf_len;
 138
 139        return pad_size;
 140}
 141
 142static inline int i40e_skb_pad(void)
 143{
 144        int rx_buf_len;
 145
 146        /* If a 2K buffer cannot handle a standard Ethernet frame then
 147         * optimize padding for a 3K buffer instead of a 1.5K buffer.
 148         *
 149         * For a 3K buffer we need to add enough padding to allow for
 150         * tailroom due to NET_IP_ALIGN possibly shifting us out of
 151         * cache-line alignment.
 152         */
 153        if (I40E_2K_TOO_SMALL_WITH_PADDING)
 154                rx_buf_len = I40E_RXBUFFER_3072 + SKB_DATA_ALIGN(NET_IP_ALIGN);
 155        else
 156                rx_buf_len = I40E_RXBUFFER_1536;
 157
 158        /* if needed make room for NET_IP_ALIGN */
 159        rx_buf_len -= NET_IP_ALIGN;
 160
 161        return i40e_compute_pad(rx_buf_len);
 162}
 163
 164#define I40E_SKB_PAD i40e_skb_pad()
 165#else
 166#define I40E_2K_TOO_SMALL_WITH_PADDING false
 167#define I40E_SKB_PAD (NET_SKB_PAD + NET_IP_ALIGN)
 168#endif
 169
 170/**
 171 * i40e_test_staterr - tests bits in Rx descriptor status and error fields
 172 * @rx_desc: pointer to receive descriptor (in le64 format)
 173 * @stat_err_bits: value to mask
 174 *
 175 * This function does some fast chicanery in order to return the
 176 * value of the mask which is really only used for boolean tests.
 177 * The status_error_len doesn't need to be shifted because it begins
 178 * at offset zero.
 179 */
 180static inline bool i40e_test_staterr(union i40e_rx_desc *rx_desc,
 181                                     const u64 stat_err_bits)
 182{
 183        return !!(rx_desc->wb.qword1.status_error_len &
 184                  cpu_to_le64(stat_err_bits));
 185}
 186
 187/* How many Rx Buffers do we bundle into one write to the hardware ? */
 188#define I40E_RX_BUFFER_WRITE    32      /* Must be power of 2 */
 189
 190#define I40E_RX_NEXT_DESC(r, i, n)              \
 191        do {                                    \
 192                (i)++;                          \
 193                if ((i) == (r)->count)          \
 194                        i = 0;                  \
 195                (n) = I40E_RX_DESC((r), (i));   \
 196        } while (0)
 197
 198
 199#define I40E_MAX_BUFFER_TXD     8
 200#define I40E_MIN_TX_LEN         17
 201
 202/* The size limit for a transmit buffer in a descriptor is (16K - 1).
 203 * In order to align with the read requests we will align the value to
 204 * the nearest 4K which represents our maximum read request size.
 205 */
 206#define I40E_MAX_READ_REQ_SIZE          4096
 207#define I40E_MAX_DATA_PER_TXD           (16 * 1024 - 1)
 208#define I40E_MAX_DATA_PER_TXD_ALIGNED \
 209        (I40E_MAX_DATA_PER_TXD & ~(I40E_MAX_READ_REQ_SIZE - 1))
 210
 211/**
 212 * i40e_txd_use_count  - estimate the number of descriptors needed for Tx
 213 * @size: transmit request size in bytes
 214 *
 215 * Due to hardware alignment restrictions (4K alignment), we need to
 216 * assume that we can have no more than 12K of data per descriptor, even
 217 * though each descriptor can take up to 16K - 1 bytes of aligned memory.
 218 * Thus, we need to divide by 12K. But division is slow! Instead,
 219 * we decompose the operation into shifts and one relatively cheap
 220 * multiply operation.
 221 *
 222 * To divide by 12K, we first divide by 4K, then divide by 3:
 223 *     To divide by 4K, shift right by 12 bits
 224 *     To divide by 3, multiply by 85, then divide by 256
 225 *     (Divide by 256 is done by shifting right by 8 bits)
 226 * Finally, we add one to round up. Because 256 isn't an exact multiple of
 227 * 3, we'll underestimate near each multiple of 12K. This is actually more
 228 * accurate as we have 4K - 1 of wiggle room that we can fit into the last
 229 * segment.  For our purposes this is accurate out to 1M which is orders of
 230 * magnitude greater than our largest possible GSO size.
 231 *
 232 * This would then be implemented as:
 233 *     return (((size >> 12) * 85) >> 8) + 1;
 234 *
 235 * Since multiplication and division are commutative, we can reorder
 236 * operations into:
 237 *     return ((size * 85) >> 20) + 1;
 238 */
 239static inline unsigned int i40e_txd_use_count(unsigned int size)
 240{
 241        return ((size * 85) >> 20) + 1;
 242}
 243
 244/* Tx Descriptors needed, worst case */
 245#define DESC_NEEDED (MAX_SKB_FRAGS + 6)
 246
 247#define I40E_TX_FLAGS_HW_VLAN           BIT(1)
 248#define I40E_TX_FLAGS_SW_VLAN           BIT(2)
 249#define I40E_TX_FLAGS_TSO               BIT(3)
 250#define I40E_TX_FLAGS_IPV4              BIT(4)
 251#define I40E_TX_FLAGS_IPV6              BIT(5)
 252#define I40E_TX_FLAGS_TSYN              BIT(8)
 253#define I40E_TX_FLAGS_FD_SB             BIT(9)
 254#define I40E_TX_FLAGS_UDP_TUNNEL        BIT(10)
 255#define I40E_TX_FLAGS_VLAN_MASK         0xffff0000
 256#define I40E_TX_FLAGS_VLAN_PRIO_MASK    0xe0000000
 257#define I40E_TX_FLAGS_VLAN_PRIO_SHIFT   29
 258#define I40E_TX_FLAGS_VLAN_SHIFT        16
 259
 260struct i40e_tx_buffer {
 261        struct i40e_tx_desc *next_to_watch;
 262        union {
 263                struct xdp_frame *xdpf;
 264                struct sk_buff *skb;
 265                void *raw_buf;
 266        };
 267        unsigned int bytecount;
 268        unsigned short gso_segs;
 269
 270        DEFINE_DMA_UNMAP_ADDR(dma);
 271        DEFINE_DMA_UNMAP_LEN(len);
 272        u32 tx_flags;
 273};
 274
 275struct i40e_rx_buffer {
 276        dma_addr_t dma;
 277        struct page *page;
 278        __u32 page_offset;
 279        __u16 pagecnt_bias;
 280};
 281
 282struct i40e_queue_stats {
 283        u64 packets;
 284        u64 bytes;
 285};
 286
 287struct i40e_tx_queue_stats {
 288        u64 restart_queue;
 289        u64 tx_busy;
 290        u64 tx_done_old;
 291        u64 tx_linearize;
 292        u64 tx_force_wb;
 293        int prev_pkt_ctr;
 294};
 295
 296struct i40e_rx_queue_stats {
 297        u64 non_eop_descs;
 298        u64 alloc_page_failed;
 299        u64 alloc_buff_failed;
 300        u64 page_reuse_count;
 301        u64 realloc_count;
 302};
 303
 304enum i40e_ring_state_t {
 305        __I40E_TX_FDIR_INIT_DONE,
 306        __I40E_TX_XPS_INIT_DONE,
 307        __I40E_RING_STATE_NBITS /* must be last */
 308};
 309
 310/* some useful defines for virtchannel interface, which
 311 * is the only remaining user of header split
 312 */
 313#define I40E_RX_DTYPE_HEADER_SPLIT  1
 314#define I40E_RX_SPLIT_L2      0x1
 315#define I40E_RX_SPLIT_IP      0x2
 316#define I40E_RX_SPLIT_TCP_UDP 0x4
 317#define I40E_RX_SPLIT_SCTP    0x8
 318
 319/* struct that defines a descriptor ring, associated with a VSI */
 320struct i40e_ring {
 321        struct i40e_ring *next;         /* pointer to next ring in q_vector */
 322        void *desc;                     /* Descriptor ring memory */
 323        struct device *dev;             /* Used for DMA mapping */
 324        struct net_device *netdev;      /* netdev ring maps to */
 325        struct bpf_prog *xdp_prog;
 326        union {
 327                struct i40e_tx_buffer *tx_bi;
 328                struct i40e_rx_buffer *rx_bi;
 329                struct xdp_buff **rx_bi_zc;
 330        };
 331        DECLARE_BITMAP(state, __I40E_RING_STATE_NBITS);
 332        u16 queue_index;                /* Queue number of ring */
 333        u8 dcb_tc;                      /* Traffic class of ring */
 334        u8 __iomem *tail;
 335
 336        /* high bit set means dynamic, use accessor routines to read/write.
 337         * hardware only supports 2us resolution for the ITR registers.
 338         * these values always store the USER setting, and must be converted
 339         * before programming to a register.
 340         */
 341        u16 itr_setting;
 342
 343        u16 count;                      /* Number of descriptors */
 344        u16 reg_idx;                    /* HW register index of the ring */
 345        u16 rx_buf_len;
 346
 347        /* used in interrupt processing */
 348        u16 next_to_use;
 349        u16 next_to_clean;
 350        u16 xdp_tx_active;
 351
 352        u8 atr_sample_rate;
 353        u8 atr_count;
 354
 355        bool ring_active;               /* is ring online or not */
 356        bool arm_wb;            /* do something to arm write back */
 357        u8 packet_stride;
 358
 359        u16 flags;
 360#define I40E_TXR_FLAGS_WB_ON_ITR                BIT(0)
 361#define I40E_RXR_FLAGS_BUILD_SKB_ENABLED        BIT(1)
 362#define I40E_TXR_FLAGS_XDP                      BIT(2)
 363
 364        /* stats structs */
 365        struct i40e_queue_stats stats;
 366        struct u64_stats_sync syncp;
 367        union {
 368                struct i40e_tx_queue_stats tx_stats;
 369                struct i40e_rx_queue_stats rx_stats;
 370        };
 371
 372        unsigned int size;              /* length of descriptor ring in bytes */
 373        dma_addr_t dma;                 /* physical address of ring */
 374
 375        struct i40e_vsi *vsi;           /* Backreference to associated VSI */
 376        struct i40e_q_vector *q_vector; /* Backreference to associated vector */
 377
 378        struct rcu_head rcu;            /* to avoid race on free */
 379        u16 next_to_alloc;
 380        struct sk_buff *skb;            /* When i40e_clean_rx_ring_irq() must
 381                                         * return before it sees the EOP for
 382                                         * the current packet, we save that skb
 383                                         * here and resume receiving this
 384                                         * packet the next time
 385                                         * i40e_clean_rx_ring_irq() is called
 386                                         * for this ring.
 387                                         */
 388
 389        struct i40e_channel *ch;
 390        struct xdp_rxq_info xdp_rxq;
 391        struct xdp_umem *xsk_umem;
 392} ____cacheline_internodealigned_in_smp;
 393
 394static inline bool ring_uses_build_skb(struct i40e_ring *ring)
 395{
 396        return !!(ring->flags & I40E_RXR_FLAGS_BUILD_SKB_ENABLED);
 397}
 398
 399static inline void set_ring_build_skb_enabled(struct i40e_ring *ring)
 400{
 401        ring->flags |= I40E_RXR_FLAGS_BUILD_SKB_ENABLED;
 402}
 403
 404static inline void clear_ring_build_skb_enabled(struct i40e_ring *ring)
 405{
 406        ring->flags &= ~I40E_RXR_FLAGS_BUILD_SKB_ENABLED;
 407}
 408
 409static inline bool ring_is_xdp(struct i40e_ring *ring)
 410{
 411        return !!(ring->flags & I40E_TXR_FLAGS_XDP);
 412}
 413
 414static inline void set_ring_xdp(struct i40e_ring *ring)
 415{
 416        ring->flags |= I40E_TXR_FLAGS_XDP;
 417}
 418
 419#define I40E_ITR_ADAPTIVE_MIN_INC       0x0002
 420#define I40E_ITR_ADAPTIVE_MIN_USECS     0x0002
 421#define I40E_ITR_ADAPTIVE_MAX_USECS     0x007e
 422#define I40E_ITR_ADAPTIVE_LATENCY       0x8000
 423#define I40E_ITR_ADAPTIVE_BULK          0x0000
 424
 425struct i40e_ring_container {
 426        struct i40e_ring *ring;         /* pointer to linked list of ring(s) */
 427        unsigned long next_update;      /* jiffies value of next update */
 428        unsigned int total_bytes;       /* total bytes processed this int */
 429        unsigned int total_packets;     /* total packets processed this int */
 430        u16 count;
 431        u16 target_itr;                 /* target ITR setting for ring(s) */
 432        u16 current_itr;                /* current ITR setting for ring(s) */
 433};
 434
 435/* iterator for handling rings in ring container */
 436#define i40e_for_each_ring(pos, head) \
 437        for (pos = (head).ring; pos != NULL; pos = pos->next)
 438
 439static inline unsigned int i40e_rx_pg_order(struct i40e_ring *ring)
 440{
 441#if (PAGE_SIZE < 8192)
 442        if (ring->rx_buf_len > (PAGE_SIZE / 2))
 443                return 1;
 444#endif
 445        return 0;
 446}
 447
 448#define i40e_rx_pg_size(_ring) (PAGE_SIZE << i40e_rx_pg_order(_ring))
 449
 450bool i40e_alloc_rx_buffers(struct i40e_ring *rxr, u16 cleaned_count);
 451netdev_tx_t i40e_lan_xmit_frame(struct sk_buff *skb, struct net_device *netdev);
 452void i40e_clean_tx_ring(struct i40e_ring *tx_ring);
 453void i40e_clean_rx_ring(struct i40e_ring *rx_ring);
 454int i40e_setup_tx_descriptors(struct i40e_ring *tx_ring);
 455int i40e_setup_rx_descriptors(struct i40e_ring *rx_ring);
 456void i40e_free_tx_resources(struct i40e_ring *tx_ring);
 457void i40e_free_rx_resources(struct i40e_ring *rx_ring);
 458int i40e_napi_poll(struct napi_struct *napi, int budget);
 459void i40e_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector);
 460u32 i40e_get_tx_pending(struct i40e_ring *ring, bool in_sw);
 461void i40e_detect_recover_hung(struct i40e_vsi *vsi);
 462int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size);
 463bool __i40e_chk_linearize(struct sk_buff *skb);
 464int i40e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
 465                  u32 flags);
 466int i40e_alloc_rx_bi(struct i40e_ring *rx_ring);
 467
 468/**
 469 * i40e_get_head - Retrieve head from head writeback
 470 * @tx_ring:  tx ring to fetch head of
 471 *
 472 * Returns value of Tx ring head based on value stored
 473 * in head write-back location
 474 **/
 475static inline u32 i40e_get_head(struct i40e_ring *tx_ring)
 476{
 477        void *head = (struct i40e_tx_desc *)tx_ring->desc + tx_ring->count;
 478
 479        return le32_to_cpu(*(volatile __le32 *)head);
 480}
 481
 482/**
 483 * i40e_xmit_descriptor_count - calculate number of Tx descriptors needed
 484 * @skb:     send buffer
 485 * @tx_ring: ring to send buffer on
 486 *
 487 * Returns number of data descriptors needed for this skb. Returns 0 to indicate
 488 * there is not enough descriptors available in this ring since we need at least
 489 * one descriptor.
 490 **/
 491static inline int i40e_xmit_descriptor_count(struct sk_buff *skb)
 492{
 493        const skb_frag_t *frag = &skb_shinfo(skb)->frags[0];
 494        unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
 495        int count = 0, size = skb_headlen(skb);
 496
 497        for (;;) {
 498                count += i40e_txd_use_count(size);
 499
 500                if (!nr_frags--)
 501                        break;
 502
 503                size = skb_frag_size(frag++);
 504        }
 505
 506        return count;
 507}
 508
 509/**
 510 * i40e_maybe_stop_tx - 1st level check for Tx stop conditions
 511 * @tx_ring: the ring to be checked
 512 * @size:    the size buffer we want to assure is available
 513 *
 514 * Returns 0 if stop is not needed
 515 **/
 516static inline int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
 517{
 518        if (likely(I40E_DESC_UNUSED(tx_ring) >= size))
 519                return 0;
 520        return __i40e_maybe_stop_tx(tx_ring, size);
 521}
 522
 523/**
 524 * i40e_chk_linearize - Check if there are more than 8 fragments per packet
 525 * @skb:      send buffer
 526 * @count:    number of buffers used
 527 *
 528 * Note: Our HW can't scatter-gather more than 8 fragments to build
 529 * a packet on the wire and so we need to figure out the cases where we
 530 * need to linearize the skb.
 531 **/
 532static inline bool i40e_chk_linearize(struct sk_buff *skb, int count)
 533{
 534        /* Both TSO and single send will work if count is less than 8 */
 535        if (likely(count < I40E_MAX_BUFFER_TXD))
 536                return false;
 537
 538        if (skb_is_gso(skb))
 539                return __i40e_chk_linearize(skb);
 540
 541        /* we can support up to 8 data buffers for a single send */
 542        return count != I40E_MAX_BUFFER_TXD;
 543}
 544
 545/**
 546 * txring_txq - Find the netdev Tx ring based on the i40e Tx ring
 547 * @ring: Tx ring to find the netdev equivalent of
 548 **/
 549static inline struct netdev_queue *txring_txq(const struct i40e_ring *ring)
 550{
 551        return netdev_get_tx_queue(ring->netdev, ring->queue_index);
 552}
 553#endif /* _I40E_TXRX_H_ */
 554