linux/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
<<
>>
Prefs
   1/*******************************************************************************
   2 *
   3 * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
   4 * Copyright(c) 2013 - 2016 Intel Corporation.
   5 *
   6 * This program is free software; you can redistribute it and/or modify it
   7 * under the terms and conditions of the GNU General Public License,
   8 * version 2, as published by the Free Software Foundation.
   9 *
  10 * This program is distributed in the hope it will be useful, but WITHOUT
  11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  13 * more details.
  14 *
  15 * You should have received a copy of the GNU General Public License along
  16 * with this program.  If not, see <http://www.gnu.org/licenses/>.
  17 *
  18 * The full GNU General Public License is included in this distribution in
  19 * the file called "COPYING".
  20 *
  21 * Contact Information:
  22 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
  23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
  24 *
  25 ******************************************************************************/
  26
  27#ifndef _I40E_TXRX_H_
  28#define _I40E_TXRX_H_
  29
  30/* Interrupt Throttling and Rate Limiting Goodies */
  31
  32#define I40E_MAX_ITR               0x0FF0  /* reg uses 2 usec resolution */
  33#define I40E_MIN_ITR               0x0001  /* reg uses 2 usec resolution */
  34#define I40E_ITR_100K              0x0005
  35#define I40E_ITR_50K               0x000A
  36#define I40E_ITR_20K               0x0019
  37#define I40E_ITR_18K               0x001B
  38#define I40E_ITR_8K                0x003E
  39#define I40E_ITR_4K                0x007A
  40#define I40E_MAX_INTRL             0x3B    /* reg uses 4 usec resolution */
  41#define I40E_ITR_RX_DEF            I40E_ITR_20K
  42#define I40E_ITR_TX_DEF            I40E_ITR_20K
  43#define I40E_ITR_DYNAMIC           0x8000  /* use top bit as a flag */
  44#define I40E_MIN_INT_RATE          250     /* ~= 1000000 / (I40E_MAX_ITR * 2) */
  45#define I40E_MAX_INT_RATE          500000  /* == 1000000 / (I40E_MIN_ITR * 2) */
  46#define I40E_DEFAULT_IRQ_WORK      256
  47#define ITR_TO_REG(setting) ((setting & ~I40E_ITR_DYNAMIC) >> 1)
  48#define ITR_IS_DYNAMIC(setting) (!!(setting & I40E_ITR_DYNAMIC))
  49#define ITR_REG_TO_USEC(itr_reg) (itr_reg << 1)
  50/* 0x40 is the enable bit for interrupt rate limiting, and must be set if
  51 * the value of the rate limit is non-zero
  52 */
  53#define INTRL_ENA                  BIT(6)
  54#define INTRL_REG_TO_USEC(intrl) ((intrl & ~INTRL_ENA) << 2)
  55#define INTRL_USEC_TO_REG(set) ((set) ? ((set) >> 2) | INTRL_ENA : 0)
  56#define I40E_INTRL_8K              125     /* 8000 ints/sec */
  57#define I40E_INTRL_62K             16      /* 62500 ints/sec */
  58#define I40E_INTRL_83K             12      /* 83333 ints/sec */
  59
  60#define I40E_QUEUE_END_OF_LIST 0x7FF
  61
  62/* this enum matches hardware bits and is meant to be used by DYN_CTLN
  63 * registers and QINT registers or more generally anywhere in the manual
  64 * mentioning ITR_INDX, ITR_NONE cannot be used as an index 'n' into any
  65 * register but instead is a special value meaning "don't update" ITR0/1/2.
  66 */
  67enum i40e_dyn_idx_t {
  68        I40E_IDX_ITR0 = 0,
  69        I40E_IDX_ITR1 = 1,
  70        I40E_IDX_ITR2 = 2,
  71        I40E_ITR_NONE = 3       /* ITR_NONE must not be used as an index */
  72};
  73
  74/* these are indexes into ITRN registers */
  75#define I40E_RX_ITR    I40E_IDX_ITR0
  76#define I40E_TX_ITR    I40E_IDX_ITR1
  77#define I40E_PE_ITR    I40E_IDX_ITR2
  78
  79/* Supported RSS offloads */
  80#define I40E_DEFAULT_RSS_HENA ( \
  81        BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_UDP) | \
  82        BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_SCTP) | \
  83        BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP) | \
  84        BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) | \
  85        BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4) | \
  86        BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_UDP) | \
  87        BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP) | \
  88        BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_SCTP) | \
  89        BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) | \
  90        BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6) | \
  91        BIT_ULL(I40E_FILTER_PCTYPE_L2_PAYLOAD))
  92
  93#define I40E_DEFAULT_RSS_HENA_EXPANDED (I40E_DEFAULT_RSS_HENA | \
  94        BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK) | \
  95        BIT_ULL(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) | \
  96        BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP) | \
  97        BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK) | \
  98        BIT_ULL(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) | \
  99        BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP))
 100
 101#define i40e_pf_get_default_rss_hena(pf) \
 102        (((pf)->flags & I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE) ? \
 103          I40E_DEFAULT_RSS_HENA_EXPANDED : I40E_DEFAULT_RSS_HENA)
 104
 105/* Supported Rx Buffer Sizes (a multiple of 128) */
 106#define I40E_RXBUFFER_256   256
 107#define I40E_RXBUFFER_2048  2048
 108#define I40E_RXBUFFER_3072  3072   /* For FCoE MTU of 2158 */
 109#define I40E_RXBUFFER_4096  4096
 110#define I40E_RXBUFFER_8192  8192
 111#define I40E_MAX_RXBUFFER   9728  /* largest size for single descriptor */
 112
 113/* NOTE: netdev_alloc_skb reserves up to 64 bytes, NET_IP_ALIGN means we
 114 * reserve 2 more, and skb_shared_info adds an additional 384 bytes more,
 115 * this adds up to 512 bytes of extra data meaning the smallest allocation
 116 * we could have is 1K.
 117 * i.e. RXBUFFER_256 --> 960 byte skb (size-1024 slab)
 118 * i.e. RXBUFFER_512 --> 1216 byte skb (size-2048 slab)
 119 */
 120#define I40E_RX_HDR_SIZE I40E_RXBUFFER_256
 121#define i40e_rx_desc i40e_32byte_rx_desc
 122
 123/**
 124 * i40e_test_staterr - tests bits in Rx descriptor status and error fields
 125 * @rx_desc: pointer to receive descriptor (in le64 format)
 126 * @stat_err_bits: value to mask
 127 *
 128 * This function does some fast chicanery in order to return the
 129 * value of the mask which is really only used for boolean tests.
 130 * The status_error_len doesn't need to be shifted because it begins
 131 * at offset zero.
 132 */
 133static inline bool i40e_test_staterr(union i40e_rx_desc *rx_desc,
 134                                     const u64 stat_err_bits)
 135{
 136        return !!(rx_desc->wb.qword1.status_error_len &
 137                  cpu_to_le64(stat_err_bits));
 138}
 139
 140/* How many Rx Buffers do we bundle into one write to the hardware ? */
 141#define I40E_RX_BUFFER_WRITE    16      /* Must be power of 2 */
 142#define I40E_RX_INCREMENT(r, i) \
 143        do {                                    \
 144                (i)++;                          \
 145                if ((i) == (r)->count)          \
 146                        i = 0;                  \
 147                r->next_to_clean = i;           \
 148        } while (0)
 149
 150#define I40E_RX_NEXT_DESC(r, i, n)              \
 151        do {                                    \
 152                (i)++;                          \
 153                if ((i) == (r)->count)          \
 154                        i = 0;                  \
 155                (n) = I40E_RX_DESC((r), (i));   \
 156        } while (0)
 157
 158#define I40E_RX_NEXT_DESC_PREFETCH(r, i, n)             \
 159        do {                                            \
 160                I40E_RX_NEXT_DESC((r), (i), (n));       \
 161                prefetch((n));                          \
 162        } while (0)
 163
 164#define I40E_MAX_BUFFER_TXD     8
 165#define I40E_MIN_TX_LEN         17
 166
 167/* The size limit for a transmit buffer in a descriptor is (16K - 1).
 168 * In order to align with the read requests we will align the value to
 169 * the nearest 4K which represents our maximum read request size.
 170 */
 171#define I40E_MAX_READ_REQ_SIZE          4096
 172#define I40E_MAX_DATA_PER_TXD           (16 * 1024 - 1)
 173#define I40E_MAX_DATA_PER_TXD_ALIGNED \
 174        (I40E_MAX_DATA_PER_TXD & ~(I40E_MAX_READ_REQ_SIZE - 1))
 175
 176/**
 177 * i40e_txd_use_count  - estimate the number of descriptors needed for Tx
 178 * @size: transmit request size in bytes
 179 *
 180 * Due to hardware alignment restrictions (4K alignment), we need to
 181 * assume that we can have no more than 12K of data per descriptor, even
 182 * though each descriptor can take up to 16K - 1 bytes of aligned memory.
 183 * Thus, we need to divide by 12K. But division is slow! Instead,
 184 * we decompose the operation into shifts and one relatively cheap
 185 * multiply operation.
 186 *
 187 * To divide by 12K, we first divide by 4K, then divide by 3:
 188 *     To divide by 4K, shift right by 12 bits
 189 *     To divide by 3, multiply by 85, then divide by 256
 190 *     (Divide by 256 is done by shifting right by 8 bits)
 191 * Finally, we add one to round up. Because 256 isn't an exact multiple of
 192 * 3, we'll underestimate near each multiple of 12K. This is actually more
 193 * accurate as we have 4K - 1 of wiggle room that we can fit into the last
 194 * segment.  For our purposes this is accurate out to 1M which is orders of
 195 * magnitude greater than our largest possible GSO size.
 196 *
 197 * This would then be implemented as:
 198 *     return (((size >> 12) * 85) >> 8) + 1;
 199 *
 200 * Since multiplication and division are commutative, we can reorder
 201 * operations into:
 202 *     return ((size * 85) >> 20) + 1;
 203 */
 204static inline unsigned int i40e_txd_use_count(unsigned int size)
 205{
 206        return ((size * 85) >> 20) + 1;
 207}
 208
 209/* Tx Descriptors needed, worst case */
 210#define DESC_NEEDED (MAX_SKB_FRAGS + 4)
 211#define I40E_MIN_DESC_PENDING   4
 212
 213#define I40E_TX_FLAGS_HW_VLAN           BIT(1)
 214#define I40E_TX_FLAGS_SW_VLAN           BIT(2)
 215#define I40E_TX_FLAGS_TSO               BIT(3)
 216#define I40E_TX_FLAGS_IPV4              BIT(4)
 217#define I40E_TX_FLAGS_IPV6              BIT(5)
 218#define I40E_TX_FLAGS_FCCRC             BIT(6)
 219#define I40E_TX_FLAGS_FSO               BIT(7)
 220#define I40E_TX_FLAGS_FD_SB             BIT(9)
 221#define I40E_TX_FLAGS_VXLAN_TUNNEL      BIT(10)
 222#define I40E_TX_FLAGS_VLAN_MASK         0xffff0000
 223#define I40E_TX_FLAGS_VLAN_PRIO_MASK    0xe0000000
 224#define I40E_TX_FLAGS_VLAN_PRIO_SHIFT   29
 225#define I40E_TX_FLAGS_VLAN_SHIFT        16
 226
 227struct i40e_tx_buffer {
 228        struct i40e_tx_desc *next_to_watch;
 229        union {
 230                struct sk_buff *skb;
 231                void *raw_buf;
 232        };
 233        unsigned int bytecount;
 234        unsigned short gso_segs;
 235
 236        DEFINE_DMA_UNMAP_ADDR(dma);
 237        DEFINE_DMA_UNMAP_LEN(len);
 238        u32 tx_flags;
 239};
 240
 241struct i40e_rx_buffer {
 242        dma_addr_t dma;
 243        struct page *page;
 244        unsigned int page_offset;
 245};
 246
 247struct i40e_queue_stats {
 248        u64 packets;
 249        u64 bytes;
 250};
 251
 252struct i40e_tx_queue_stats {
 253        u64 restart_queue;
 254        u64 tx_busy;
 255        u64 tx_done_old;
 256        u64 tx_linearize;
 257        u64 tx_force_wb;
 258        u64 tx_lost_interrupt;
 259};
 260
 261struct i40e_rx_queue_stats {
 262        u64 non_eop_descs;
 263        u64 alloc_page_failed;
 264        u64 alloc_buff_failed;
 265        u64 page_reuse_count;
 266        u64 realloc_count;
 267};
 268
 269enum i40e_ring_state_t {
 270        __I40E_TX_FDIR_INIT_DONE,
 271        __I40E_TX_XPS_INIT_DONE,
 272};
 273
 274/* some useful defines for virtchannel interface, which
 275 * is the only remaining user of header split
 276 */
 277#define I40E_RX_DTYPE_NO_SPLIT      0
 278#define I40E_RX_DTYPE_HEADER_SPLIT  1
 279#define I40E_RX_DTYPE_SPLIT_ALWAYS  2
 280#define I40E_RX_SPLIT_L2      0x1
 281#define I40E_RX_SPLIT_IP      0x2
 282#define I40E_RX_SPLIT_TCP_UDP 0x4
 283#define I40E_RX_SPLIT_SCTP    0x8
 284
 285/* struct that defines a descriptor ring, associated with a VSI */
 286struct i40e_ring {
 287        struct i40e_ring *next;         /* pointer to next ring in q_vector */
 288        void *desc;                     /* Descriptor ring memory */
 289        struct device *dev;             /* Used for DMA mapping */
 290        struct net_device *netdev;      /* netdev ring maps to */
 291        union {
 292                struct i40e_tx_buffer *tx_bi;
 293                struct i40e_rx_buffer *rx_bi;
 294        };
 295        unsigned long state;
 296        u16 queue_index;                /* Queue number of ring */
 297        u8 dcb_tc;                      /* Traffic class of ring */
 298        u8 __iomem *tail;
 299
 300        /* high bit set means dynamic, use accessors routines to read/write.
 301         * hardware only supports 2us resolution for the ITR registers.
 302         * these values always store the USER setting, and must be converted
 303         * before programming to a register.
 304         */
 305        u16 rx_itr_setting;
 306        u16 tx_itr_setting;
 307
 308        u16 count;                      /* Number of descriptors */
 309        u16 reg_idx;                    /* HW register index of the ring */
 310        u16 rx_buf_len;
 311
 312        /* used in interrupt processing */
 313        u16 next_to_use;
 314        u16 next_to_clean;
 315
 316        u8 atr_sample_rate;
 317        u8 atr_count;
 318
 319        bool ring_active;               /* is ring online or not */
 320        bool arm_wb;            /* do something to arm write back */
 321        u8 packet_stride;
 322
 323        u16 flags;
 324#define I40E_TXR_FLAGS_WB_ON_ITR        BIT(0)
 325
 326        /* stats structs */
 327        struct i40e_queue_stats stats;
 328        struct u64_stats_sync syncp;
 329        union {
 330                struct i40e_tx_queue_stats tx_stats;
 331                struct i40e_rx_queue_stats rx_stats;
 332        };
 333
 334        unsigned int size;              /* length of descriptor ring in bytes */
 335        dma_addr_t dma;                 /* physical address of ring */
 336
 337        struct i40e_vsi *vsi;           /* Backreference to associated VSI */
 338        struct i40e_q_vector *q_vector; /* Backreference to associated vector */
 339
 340        struct rcu_head rcu;            /* to avoid race on free */
 341        u16 next_to_alloc;
 342        struct sk_buff *skb;            /* When i40evf_clean_rx_ring_irq() must
 343                                         * return before it sees the EOP for
 344                                         * the current packet, we save that skb
 345                                         * here and resume receiving this
 346                                         * packet the next time
 347                                         * i40evf_clean_rx_ring_irq() is called
 348                                         * for this ring.
 349                                         */
 350} ____cacheline_internodealigned_in_smp;
 351
 352enum i40e_latency_range {
 353        I40E_LOWEST_LATENCY = 0,
 354        I40E_LOW_LATENCY = 1,
 355        I40E_BULK_LATENCY = 2,
 356        I40E_ULTRA_LATENCY = 3,
 357};
 358
 359struct i40e_ring_container {
 360        /* array of pointers to rings */
 361        struct i40e_ring *ring;
 362        unsigned int total_bytes;       /* total bytes processed this int */
 363        unsigned int total_packets;     /* total packets processed this int */
 364        u16 count;
 365        enum i40e_latency_range latency_range;
 366        u16 itr;
 367};
 368
 369/* iterator for handling rings in ring container */
 370#define i40e_for_each_ring(pos, head) \
 371        for (pos = (head).ring; pos != NULL; pos = pos->next)
 372
 373bool i40evf_alloc_rx_buffers(struct i40e_ring *rxr, u16 cleaned_count);
 374netdev_tx_t i40evf_xmit_frame(struct sk_buff *skb, struct net_device *netdev);
 375void i40evf_clean_tx_ring(struct i40e_ring *tx_ring);
 376void i40evf_clean_rx_ring(struct i40e_ring *rx_ring);
 377int i40evf_setup_tx_descriptors(struct i40e_ring *tx_ring);
 378int i40evf_setup_rx_descriptors(struct i40e_ring *rx_ring);
 379void i40evf_free_tx_resources(struct i40e_ring *tx_ring);
 380void i40evf_free_rx_resources(struct i40e_ring *rx_ring);
 381int i40evf_napi_poll(struct napi_struct *napi, int budget);
 382void i40evf_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector);
 383u32 i40evf_get_tx_pending(struct i40e_ring *ring, bool in_sw);
 384int __i40evf_maybe_stop_tx(struct i40e_ring *tx_ring, int size);
 385bool __i40evf_chk_linearize(struct sk_buff *skb);
 386
 387/**
 388 * i40e_get_head - Retrieve head from head writeback
 389 * @tx_ring: Tx ring to fetch head of
 390 *
 391 * Returns value of Tx ring head based on value stored
 392 * in head write-back location
 393 **/
 394static inline u32 i40e_get_head(struct i40e_ring *tx_ring)
 395{
 396        void *head = (struct i40e_tx_desc *)tx_ring->desc + tx_ring->count;
 397
 398        return le32_to_cpu(*(volatile __le32 *)head);
 399}
 400
 401/**
 402 * i40e_xmit_descriptor_count - calculate number of Tx descriptors needed
 403 * @skb:     send buffer
 404 * @tx_ring: ring to send buffer on
 405 *
 406 * Returns number of data descriptors needed for this skb. Returns 0 to indicate
 407 * there is not enough descriptors available in this ring since we need at least
 408 * one descriptor.
 409 **/
 410static inline int i40e_xmit_descriptor_count(struct sk_buff *skb)
 411{
 412        const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
 413        unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
 414        int count = 0, size = skb_headlen(skb);
 415
 416        for (;;) {
 417                count += i40e_txd_use_count(size);
 418
 419                if (!nr_frags--)
 420                        break;
 421
 422                size = skb_frag_size(frag++);
 423        }
 424
 425        return count;
 426}
 427
 428/**
 429 * i40e_maybe_stop_tx - 1st level check for Tx stop conditions
 430 * @tx_ring: the ring to be checked
 431 * @size:    the size buffer we want to assure is available
 432 *
 433 * Returns 0 if stop is not needed
 434 **/
 435static inline int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
 436{
 437        if (likely(I40E_DESC_UNUSED(tx_ring) >= size))
 438                return 0;
 439        return __i40evf_maybe_stop_tx(tx_ring, size);
 440}
 441
 442/**
 443 * i40e_chk_linearize - Check if there are more than 8 fragments per packet
 444 * @skb:      send buffer
 445 * @count:    number of buffers used
 446 *
 447 * Note: Our HW can't scatter-gather more than 8 fragments to build
 448 * a packet on the wire and so we need to figure out the cases where we
 449 * need to linearize the skb.
 450 **/
 451static inline bool i40e_chk_linearize(struct sk_buff *skb, int count)
 452{
 453        /* Both TSO and single send will work if count is less than 8 */
 454        if (likely(count < I40E_MAX_BUFFER_TXD))
 455                return false;
 456
 457        if (skb_is_gso(skb))
 458                return __i40evf_chk_linearize(skb);
 459
 460        /* we can support up to 8 data buffers for a single send */
 461        return count != I40E_MAX_BUFFER_TXD;
 462}
 463
 464/**
 465 * i40e_rx_is_fcoe - returns true if the Rx packet type is FCoE
 466 * @ptype: the packet type field from Rx descriptor write-back
 467 **/
 468static inline bool i40e_rx_is_fcoe(u16 ptype)
 469{
 470        return (ptype >= I40E_RX_PTYPE_L2_FCOE_PAY3) &&
 471               (ptype <= I40E_RX_PTYPE_L2_FCOE_VFT_FCOTHER);
 472}
 473
 474/**
 475 * txring_txq - Find the netdev Tx ring based on the i40e Tx ring
 476 * @ring: Tx ring to find the netdev equivalent of
 477 **/
 478static inline struct netdev_queue *txring_txq(const struct i40e_ring *ring)
 479{
 480        return netdev_get_tx_queue(ring->netdev, ring->queue_index);
 481}
 482#endif /* _I40E_TXRX_H_ */
 483