linux/drivers/net/ethernet/intel/iavf/iavf_txrx.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 */
   2/* Copyright(c) 2013 - 2018 Intel Corporation. */
   3
   4#ifndef _IAVF_TXRX_H_
   5#define _IAVF_TXRX_H_
   6
   7/* Interrupt Throttling and Rate Limiting Goodies */
   8#define IAVF_DEFAULT_IRQ_WORK      256
   9
  10/* The datasheet for the X710 and XL710 indicate that the maximum value for
  11 * the ITR is 8160usec which is then called out as 0xFF0 with a 2usec
  12 * resolution. 8160 is 0x1FE0 when written out in hex. So instead of storing
  13 * the register value which is divided by 2 lets use the actual values and
  14 * avoid an excessive amount of translation.
  15 */
  16#define IAVF_ITR_DYNAMIC        0x8000  /* use top bit as a flag */
  17#define IAVF_ITR_MASK           0x1FFE  /* mask for ITR register value */
  18#define IAVF_MIN_ITR                 2  /* reg uses 2 usec resolution */
  19#define IAVF_ITR_100K               10  /* all values below must be even */
  20#define IAVF_ITR_50K                20
  21#define IAVF_ITR_20K                50
  22#define IAVF_ITR_18K                60
  23#define IAVF_ITR_8K                122
  24#define IAVF_MAX_ITR              8160  /* maximum value as per datasheet */
  25#define ITR_TO_REG(setting) ((setting) & ~IAVF_ITR_DYNAMIC)
  26#define ITR_REG_ALIGN(setting) __ALIGN_MASK(setting, ~IAVF_ITR_MASK)
  27#define ITR_IS_DYNAMIC(setting) (!!((setting) & IAVF_ITR_DYNAMIC))
  28
  29#define IAVF_ITR_RX_DEF         (IAVF_ITR_20K | IAVF_ITR_DYNAMIC)
  30#define IAVF_ITR_TX_DEF         (IAVF_ITR_20K | IAVF_ITR_DYNAMIC)
  31
  32/* 0x40 is the enable bit for interrupt rate limiting, and must be set if
  33 * the value of the rate limit is non-zero
  34 */
  35#define INTRL_ENA                  BIT(6)
  36#define IAVF_MAX_INTRL             0x3B    /* reg uses 4 usec resolution */
  37#define INTRL_REG_TO_USEC(intrl) ((intrl & ~INTRL_ENA) << 2)
  38#define INTRL_USEC_TO_REG(set) ((set) ? ((set) >> 2) | INTRL_ENA : 0)
  39#define IAVF_INTRL_8K              125     /* 8000 ints/sec */
  40#define IAVF_INTRL_62K             16      /* 62500 ints/sec */
  41#define IAVF_INTRL_83K             12      /* 83333 ints/sec */
  42
  43#define IAVF_QUEUE_END_OF_LIST 0x7FF
  44
  45/* this enum matches hardware bits and is meant to be used by DYN_CTLN
  46 * registers and QINT registers or more generally anywhere in the manual
  47 * mentioning ITR_INDX, ITR_NONE cannot be used as an index 'n' into any
  48 * register but instead is a special value meaning "don't update" ITR0/1/2.
  49 */
  50enum iavf_dyn_idx_t {
  51        IAVF_IDX_ITR0 = 0,
  52        IAVF_IDX_ITR1 = 1,
  53        IAVF_IDX_ITR2 = 2,
  54        IAVF_ITR_NONE = 3       /* ITR_NONE must not be used as an index */
  55};
  56
  57/* these are indexes into ITRN registers */
  58#define IAVF_RX_ITR    IAVF_IDX_ITR0
  59#define IAVF_TX_ITR    IAVF_IDX_ITR1
  60#define IAVF_PE_ITR    IAVF_IDX_ITR2
  61
  62/* Supported RSS offloads */
  63#define IAVF_DEFAULT_RSS_HENA ( \
  64        BIT_ULL(IAVF_FILTER_PCTYPE_NONF_IPV4_UDP) | \
  65        BIT_ULL(IAVF_FILTER_PCTYPE_NONF_IPV4_SCTP) | \
  66        BIT_ULL(IAVF_FILTER_PCTYPE_NONF_IPV4_TCP) | \
  67        BIT_ULL(IAVF_FILTER_PCTYPE_NONF_IPV4_OTHER) | \
  68        BIT_ULL(IAVF_FILTER_PCTYPE_FRAG_IPV4) | \
  69        BIT_ULL(IAVF_FILTER_PCTYPE_NONF_IPV6_UDP) | \
  70        BIT_ULL(IAVF_FILTER_PCTYPE_NONF_IPV6_TCP) | \
  71        BIT_ULL(IAVF_FILTER_PCTYPE_NONF_IPV6_SCTP) | \
  72        BIT_ULL(IAVF_FILTER_PCTYPE_NONF_IPV6_OTHER) | \
  73        BIT_ULL(IAVF_FILTER_PCTYPE_FRAG_IPV6) | \
  74        BIT_ULL(IAVF_FILTER_PCTYPE_L2_PAYLOAD))
  75
  76#define IAVF_DEFAULT_RSS_HENA_EXPANDED (IAVF_DEFAULT_RSS_HENA | \
  77        BIT_ULL(IAVF_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK) | \
  78        BIT_ULL(IAVF_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) | \
  79        BIT_ULL(IAVF_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP) | \
  80        BIT_ULL(IAVF_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK) | \
  81        BIT_ULL(IAVF_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) | \
  82        BIT_ULL(IAVF_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP))
  83
  84/* Supported Rx Buffer Sizes (a multiple of 128) */
  85#define IAVF_RXBUFFER_256   256
  86#define IAVF_RXBUFFER_1536  1536  /* 128B aligned standard Ethernet frame */
  87#define IAVF_RXBUFFER_2048  2048
  88#define IAVF_RXBUFFER_3072  3072  /* Used for large frames w/ padding */
  89#define IAVF_MAX_RXBUFFER   9728  /* largest size for single descriptor */
  90
  91/* NOTE: netdev_alloc_skb reserves up to 64 bytes, NET_IP_ALIGN means we
  92 * reserve 2 more, and skb_shared_info adds an additional 384 bytes more,
  93 * this adds up to 512 bytes of extra data meaning the smallest allocation
  94 * we could have is 1K.
  95 * i.e. RXBUFFER_256 --> 960 byte skb (size-1024 slab)
  96 * i.e. RXBUFFER_512 --> 1216 byte skb (size-2048 slab)
  97 */
  98#define IAVF_RX_HDR_SIZE IAVF_RXBUFFER_256
  99#define IAVF_PACKET_HDR_PAD (ETH_HLEN + ETH_FCS_LEN + (VLAN_HLEN * 2))
 100#define iavf_rx_desc iavf_32byte_rx_desc
 101
 102#define IAVF_RX_DMA_ATTR \
 103        (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING)
 104
 105/* Attempt to maximize the headroom available for incoming frames.  We
 106 * use a 2K buffer for receives and need 1536/1534 to store the data for
 107 * the frame.  This leaves us with 512 bytes of room.  From that we need
 108 * to deduct the space needed for the shared info and the padding needed
 109 * to IP align the frame.
 110 *
 111 * Note: For cache line sizes 256 or larger this value is going to end
 112 *       up negative.  In these cases we should fall back to the legacy
 113 *       receive path.
 114 */
 115#if (PAGE_SIZE < 8192)
 116#define IAVF_2K_TOO_SMALL_WITH_PADDING \
 117((NET_SKB_PAD + IAVF_RXBUFFER_1536) > SKB_WITH_OVERHEAD(IAVF_RXBUFFER_2048))
 118
 119static inline int iavf_compute_pad(int rx_buf_len)
 120{
 121        int page_size, pad_size;
 122
 123        page_size = ALIGN(rx_buf_len, PAGE_SIZE / 2);
 124        pad_size = SKB_WITH_OVERHEAD(page_size) - rx_buf_len;
 125
 126        return pad_size;
 127}
 128
 129static inline int iavf_skb_pad(void)
 130{
 131        int rx_buf_len;
 132
 133        /* If a 2K buffer cannot handle a standard Ethernet frame then
 134         * optimize padding for a 3K buffer instead of a 1.5K buffer.
 135         *
 136         * For a 3K buffer we need to add enough padding to allow for
 137         * tailroom due to NET_IP_ALIGN possibly shifting us out of
 138         * cache-line alignment.
 139         */
 140        if (IAVF_2K_TOO_SMALL_WITH_PADDING)
 141                rx_buf_len = IAVF_RXBUFFER_3072 + SKB_DATA_ALIGN(NET_IP_ALIGN);
 142        else
 143                rx_buf_len = IAVF_RXBUFFER_1536;
 144
 145        /* if needed make room for NET_IP_ALIGN */
 146        rx_buf_len -= NET_IP_ALIGN;
 147
 148        return iavf_compute_pad(rx_buf_len);
 149}
 150
 151#define IAVF_SKB_PAD iavf_skb_pad()
 152#else
 153#define IAVF_2K_TOO_SMALL_WITH_PADDING false
 154#define IAVF_SKB_PAD (NET_SKB_PAD + NET_IP_ALIGN)
 155#endif
 156
 157/**
 158 * iavf_test_staterr - tests bits in Rx descriptor status and error fields
 159 * @rx_desc: pointer to receive descriptor (in le64 format)
 160 * @stat_err_bits: value to mask
 161 *
 162 * This function does some fast chicanery in order to return the
 163 * value of the mask which is really only used for boolean tests.
 164 * The status_error_len doesn't need to be shifted because it begins
 165 * at offset zero.
 166 */
 167static inline bool iavf_test_staterr(union iavf_rx_desc *rx_desc,
 168                                     const u64 stat_err_bits)
 169{
 170        return !!(rx_desc->wb.qword1.status_error_len &
 171                  cpu_to_le64(stat_err_bits));
 172}
 173
 174/* How many Rx Buffers do we bundle into one write to the hardware ? */
 175#define IAVF_RX_INCREMENT(r, i) \
 176        do {                                    \
 177                (i)++;                          \
 178                if ((i) == (r)->count)          \
 179                        i = 0;                  \
 180                r->next_to_clean = i;           \
 181        } while (0)
 182
 183#define IAVF_RX_NEXT_DESC(r, i, n)              \
 184        do {                                    \
 185                (i)++;                          \
 186                if ((i) == (r)->count)          \
 187                        i = 0;                  \
 188                (n) = IAVF_RX_DESC((r), (i));   \
 189        } while (0)
 190
 191#define IAVF_RX_NEXT_DESC_PREFETCH(r, i, n)             \
 192        do {                                            \
 193                IAVF_RX_NEXT_DESC((r), (i), (n));       \
 194                prefetch((n));                          \
 195        } while (0)
 196
 197#define IAVF_MAX_BUFFER_TXD     8
 198#define IAVF_MIN_TX_LEN         17
 199
 200/* The size limit for a transmit buffer in a descriptor is (16K - 1).
 201 * In order to align with the read requests we will align the value to
 202 * the nearest 4K which represents our maximum read request size.
 203 */
 204#define IAVF_MAX_READ_REQ_SIZE          4096
 205#define IAVF_MAX_DATA_PER_TXD           (16 * 1024 - 1)
 206#define IAVF_MAX_DATA_PER_TXD_ALIGNED \
 207        (IAVF_MAX_DATA_PER_TXD & ~(IAVF_MAX_READ_REQ_SIZE - 1))
 208
 209/**
 210 * iavf_txd_use_count  - estimate the number of descriptors needed for Tx
 211 * @size: transmit request size in bytes
 212 *
 213 * Due to hardware alignment restrictions (4K alignment), we need to
 214 * assume that we can have no more than 12K of data per descriptor, even
 215 * though each descriptor can take up to 16K - 1 bytes of aligned memory.
 216 * Thus, we need to divide by 12K. But division is slow! Instead,
 217 * we decompose the operation into shifts and one relatively cheap
 218 * multiply operation.
 219 *
 220 * To divide by 12K, we first divide by 4K, then divide by 3:
 221 *     To divide by 4K, shift right by 12 bits
 222 *     To divide by 3, multiply by 85, then divide by 256
 223 *     (Divide by 256 is done by shifting right by 8 bits)
 224 * Finally, we add one to round up. Because 256 isn't an exact multiple of
 225 * 3, we'll underestimate near each multiple of 12K. This is actually more
 226 * accurate as we have 4K - 1 of wiggle room that we can fit into the last
 227 * segment.  For our purposes this is accurate out to 1M which is orders of
 228 * magnitude greater than our largest possible GSO size.
 229 *
 230 * This would then be implemented as:
 231 *     return (((size >> 12) * 85) >> 8) + 1;
 232 *
 233 * Since multiplication and division are commutative, we can reorder
 234 * operations into:
 235 *     return ((size * 85) >> 20) + 1;
 236 */
 237static inline unsigned int iavf_txd_use_count(unsigned int size)
 238{
 239        return ((size * 85) >> 20) + 1;
 240}
 241
 242/* Tx Descriptors needed, worst case */
 243#define DESC_NEEDED (MAX_SKB_FRAGS + 6)
 244#define IAVF_MIN_DESC_PENDING   4
 245
 246#define IAVF_TX_FLAGS_HW_VLAN           BIT(1)
 247#define IAVF_TX_FLAGS_SW_VLAN           BIT(2)
 248#define IAVF_TX_FLAGS_TSO               BIT(3)
 249#define IAVF_TX_FLAGS_IPV4              BIT(4)
 250#define IAVF_TX_FLAGS_IPV6              BIT(5)
 251#define IAVF_TX_FLAGS_FCCRC             BIT(6)
 252#define IAVF_TX_FLAGS_FSO               BIT(7)
 253#define IAVF_TX_FLAGS_FD_SB             BIT(9)
 254#define IAVF_TX_FLAGS_VXLAN_TUNNEL      BIT(10)
 255#define IAVF_TX_FLAGS_VLAN_MASK         0xffff0000
 256#define IAVF_TX_FLAGS_VLAN_PRIO_MASK    0xe0000000
 257#define IAVF_TX_FLAGS_VLAN_PRIO_SHIFT   29
 258#define IAVF_TX_FLAGS_VLAN_SHIFT        16
 259
 260struct iavf_tx_buffer {
 261        struct iavf_tx_desc *next_to_watch;
 262        union {
 263                struct sk_buff *skb;
 264                void *raw_buf;
 265        };
 266        unsigned int bytecount;
 267        unsigned short gso_segs;
 268
 269        DEFINE_DMA_UNMAP_ADDR(dma);
 270        DEFINE_DMA_UNMAP_LEN(len);
 271        u32 tx_flags;
 272};
 273
 274struct iavf_rx_buffer {
 275        dma_addr_t dma;
 276        struct page *page;
 277#if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536)
 278        __u32 page_offset;
 279#else
 280        __u16 page_offset;
 281#endif
 282        __u16 pagecnt_bias;
 283};
 284
 285struct iavf_queue_stats {
 286        u64 packets;
 287        u64 bytes;
 288};
 289
 290struct iavf_tx_queue_stats {
 291        u64 restart_queue;
 292        u64 tx_busy;
 293        u64 tx_done_old;
 294        u64 tx_linearize;
 295        u64 tx_force_wb;
 296        int prev_pkt_ctr;
 297        u64 tx_lost_interrupt;
 298};
 299
 300struct iavf_rx_queue_stats {
 301        u64 non_eop_descs;
 302        u64 alloc_page_failed;
 303        u64 alloc_buff_failed;
 304        u64 page_reuse_count;
 305        u64 realloc_count;
 306};
 307
 308enum iavf_ring_state_t {
 309        __IAVF_TX_FDIR_INIT_DONE,
 310        __IAVF_TX_XPS_INIT_DONE,
 311        __IAVF_RING_STATE_NBITS /* must be last */
 312};
 313
 314/* some useful defines for virtchannel interface, which
 315 * is the only remaining user of header split
 316 */
 317#define IAVF_RX_DTYPE_NO_SPLIT      0
 318#define IAVF_RX_DTYPE_HEADER_SPLIT  1
 319#define IAVF_RX_DTYPE_SPLIT_ALWAYS  2
 320#define IAVF_RX_SPLIT_L2      0x1
 321#define IAVF_RX_SPLIT_IP      0x2
 322#define IAVF_RX_SPLIT_TCP_UDP 0x4
 323#define IAVF_RX_SPLIT_SCTP    0x8
 324
 325/* struct that defines a descriptor ring, associated with a VSI */
 326struct iavf_ring {
 327        struct iavf_ring *next;         /* pointer to next ring in q_vector */
 328        void *desc;                     /* Descriptor ring memory */
 329        struct device *dev;             /* Used for DMA mapping */
 330        struct net_device *netdev;      /* netdev ring maps to */
 331        union {
 332                struct iavf_tx_buffer *tx_bi;
 333                struct iavf_rx_buffer *rx_bi;
 334        };
 335        DECLARE_BITMAP(state, __IAVF_RING_STATE_NBITS);
 336        u16 queue_index;                /* Queue number of ring */
 337        u8 dcb_tc;                      /* Traffic class of ring */
 338        u8 __iomem *tail;
 339
 340        /* high bit set means dynamic, use accessors routines to read/write.
 341         * hardware only supports 2us resolution for the ITR registers.
 342         * these values always store the USER setting, and must be converted
 343         * before programming to a register.
 344         */
 345        u16 itr_setting;
 346
 347        u16 count;                      /* Number of descriptors */
 348        u16 reg_idx;                    /* HW register index of the ring */
 349        u16 rx_buf_len;
 350
 351        /* used in interrupt processing */
 352        u16 next_to_use;
 353        u16 next_to_clean;
 354
 355        u8 atr_sample_rate;
 356        u8 atr_count;
 357
 358        bool ring_active;               /* is ring online or not */
 359        bool arm_wb;            /* do something to arm write back */
 360        u8 packet_stride;
 361
 362        u16 flags;
 363#define IAVF_TXR_FLAGS_WB_ON_ITR                BIT(0)
 364#define IAVF_RXR_FLAGS_BUILD_SKB_ENABLED        BIT(1)
 365
 366        /* stats structs */
 367        struct iavf_queue_stats stats;
 368        struct u64_stats_sync syncp;
 369        union {
 370                struct iavf_tx_queue_stats tx_stats;
 371                struct iavf_rx_queue_stats rx_stats;
 372        };
 373
 374        unsigned int size;              /* length of descriptor ring in bytes */
 375        dma_addr_t dma;                 /* physical address of ring */
 376
 377        struct iavf_vsi *vsi;           /* Backreference to associated VSI */
 378        struct iavf_q_vector *q_vector; /* Backreference to associated vector */
 379
 380        struct rcu_head rcu;            /* to avoid race on free */
 381        u16 next_to_alloc;
 382        struct sk_buff *skb;            /* When iavf_clean_rx_ring_irq() must
 383                                         * return before it sees the EOP for
 384                                         * the current packet, we save that skb
 385                                         * here and resume receiving this
 386                                         * packet the next time
 387                                         * iavf_clean_rx_ring_irq() is called
 388                                         * for this ring.
 389                                         */
 390} ____cacheline_internodealigned_in_smp;
 391
 392static inline bool ring_uses_build_skb(struct iavf_ring *ring)
 393{
 394        return !!(ring->flags & IAVF_RXR_FLAGS_BUILD_SKB_ENABLED);
 395}
 396
 397static inline void set_ring_build_skb_enabled(struct iavf_ring *ring)
 398{
 399        ring->flags |= IAVF_RXR_FLAGS_BUILD_SKB_ENABLED;
 400}
 401
 402static inline void clear_ring_build_skb_enabled(struct iavf_ring *ring)
 403{
 404        ring->flags &= ~IAVF_RXR_FLAGS_BUILD_SKB_ENABLED;
 405}
 406
 407#define IAVF_ITR_ADAPTIVE_MIN_INC       0x0002
 408#define IAVF_ITR_ADAPTIVE_MIN_USECS     0x0002
 409#define IAVF_ITR_ADAPTIVE_MAX_USECS     0x007e
 410#define IAVF_ITR_ADAPTIVE_LATENCY       0x8000
 411#define IAVF_ITR_ADAPTIVE_BULK          0x0000
 412#define ITR_IS_BULK(x) (!((x) & IAVF_ITR_ADAPTIVE_LATENCY))
 413
 414struct iavf_ring_container {
 415        struct iavf_ring *ring;         /* pointer to linked list of ring(s) */
 416        unsigned long next_update;      /* jiffies value of next update */
 417        unsigned int total_bytes;       /* total bytes processed this int */
 418        unsigned int total_packets;     /* total packets processed this int */
 419        u16 count;
 420        u16 target_itr;                 /* target ITR setting for ring(s) */
 421        u16 current_itr;                /* current ITR setting for ring(s) */
 422};
 423
 424/* iterator for handling rings in ring container */
 425#define iavf_for_each_ring(pos, head) \
 426        for (pos = (head).ring; pos != NULL; pos = pos->next)
 427
 428static inline unsigned int iavf_rx_pg_order(struct iavf_ring *ring)
 429{
 430#if (PAGE_SIZE < 8192)
 431        if (ring->rx_buf_len > (PAGE_SIZE / 2))
 432                return 1;
 433#endif
 434        return 0;
 435}
 436
 437#define iavf_rx_pg_size(_ring) (PAGE_SIZE << iavf_rx_pg_order(_ring))
 438
 439bool iavf_alloc_rx_buffers(struct iavf_ring *rxr, u16 cleaned_count);
 440netdev_tx_t iavf_xmit_frame(struct sk_buff *skb, struct net_device *netdev);
 441void iavf_clean_tx_ring(struct iavf_ring *tx_ring);
 442void iavf_clean_rx_ring(struct iavf_ring *rx_ring);
 443int iavf_setup_tx_descriptors(struct iavf_ring *tx_ring);
 444int iavf_setup_rx_descriptors(struct iavf_ring *rx_ring);
 445void iavf_free_tx_resources(struct iavf_ring *tx_ring);
 446void iavf_free_rx_resources(struct iavf_ring *rx_ring);
 447int iavf_napi_poll(struct napi_struct *napi, int budget);
 448void iavf_force_wb(struct iavf_vsi *vsi, struct iavf_q_vector *q_vector);
 449u32 iavf_get_tx_pending(struct iavf_ring *ring, bool in_sw);
 450void iavf_detect_recover_hung(struct iavf_vsi *vsi);
 451int __iavf_maybe_stop_tx(struct iavf_ring *tx_ring, int size);
 452bool __iavf_chk_linearize(struct sk_buff *skb);
 453
 454/**
 455 * iavf_xmit_descriptor_count - calculate number of Tx descriptors needed
 456 * @skb:     send buffer
 457 * @tx_ring: ring to send buffer on
 458 *
 459 * Returns number of data descriptors needed for this skb. Returns 0 to indicate
 460 * there is not enough descriptors available in this ring since we need at least
 461 * one descriptor.
 462 **/
 463static inline int iavf_xmit_descriptor_count(struct sk_buff *skb)
 464{
 465        const skb_frag_t *frag = &skb_shinfo(skb)->frags[0];
 466        unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
 467        int count = 0, size = skb_headlen(skb);
 468
 469        for (;;) {
 470                count += iavf_txd_use_count(size);
 471
 472                if (!nr_frags--)
 473                        break;
 474
 475                size = skb_frag_size(frag++);
 476        }
 477
 478        return count;
 479}
 480
 481/**
 482 * iavf_maybe_stop_tx - 1st level check for Tx stop conditions
 483 * @tx_ring: the ring to be checked
 484 * @size:    the size buffer we want to assure is available
 485 *
 486 * Returns 0 if stop is not needed
 487 **/
 488static inline int iavf_maybe_stop_tx(struct iavf_ring *tx_ring, int size)
 489{
 490        if (likely(IAVF_DESC_UNUSED(tx_ring) >= size))
 491                return 0;
 492        return __iavf_maybe_stop_tx(tx_ring, size);
 493}
 494
 495/**
 496 * iavf_chk_linearize - Check if there are more than 8 fragments per packet
 497 * @skb:      send buffer
 498 * @count:    number of buffers used
 499 *
 500 * Note: Our HW can't scatter-gather more than 8 fragments to build
 501 * a packet on the wire and so we need to figure out the cases where we
 502 * need to linearize the skb.
 503 **/
 504static inline bool iavf_chk_linearize(struct sk_buff *skb, int count)
 505{
 506        /* Both TSO and single send will work if count is less than 8 */
 507        if (likely(count < IAVF_MAX_BUFFER_TXD))
 508                return false;
 509
 510        if (skb_is_gso(skb))
 511                return __iavf_chk_linearize(skb);
 512
 513        /* we can support up to 8 data buffers for a single send */
 514        return count != IAVF_MAX_BUFFER_TXD;
 515}
 516/**
 517 * @ring: Tx ring to find the netdev equivalent of
 518 **/
 519static inline struct netdev_queue *txring_txq(const struct iavf_ring *ring)
 520{
 521        return netdev_get_tx_queue(ring->netdev, ring->queue_index);
 522}
 523#endif /* _IAVF_TXRX_H_ */
 524