1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27#ifndef _I40E_TXRX_H_
28#define _I40E_TXRX_H_
29
30
31
32#define I40E_MAX_ITR 0x0FF0
33#define I40E_MIN_ITR 0x0001
34#define I40E_ITR_100K 0x0005
35#define I40E_ITR_50K 0x000A
36#define I40E_ITR_20K 0x0019
37#define I40E_ITR_18K 0x001B
38#define I40E_ITR_8K 0x003E
39#define I40E_ITR_4K 0x007A
40#define I40E_MAX_INTRL 0x3B
41#define I40E_ITR_RX_DEF I40E_ITR_20K
42#define I40E_ITR_TX_DEF I40E_ITR_20K
43#define I40E_ITR_DYNAMIC 0x8000
44#define I40E_MIN_INT_RATE 250
45#define I40E_MAX_INT_RATE 500000
46#define I40E_DEFAULT_IRQ_WORK 256
47#define ITR_TO_REG(setting) ((setting & ~I40E_ITR_DYNAMIC) >> 1)
48#define ITR_IS_DYNAMIC(setting) (!!(setting & I40E_ITR_DYNAMIC))
49#define ITR_REG_TO_USEC(itr_reg) (itr_reg << 1)
50
51
52
53#define INTRL_ENA BIT(6)
54#define INTRL_REG_TO_USEC(intrl) ((intrl & ~INTRL_ENA) << 2)
55#define INTRL_USEC_TO_REG(set) ((set) ? ((set) >> 2) | INTRL_ENA : 0)
56#define I40E_INTRL_8K 125
57#define I40E_INTRL_62K 16
58#define I40E_INTRL_83K 12
59
60#define I40E_QUEUE_END_OF_LIST 0x7FF
61
62
63
64
65
66
67enum i40e_dyn_idx_t {
68 I40E_IDX_ITR0 = 0,
69 I40E_IDX_ITR1 = 1,
70 I40E_IDX_ITR2 = 2,
71 I40E_ITR_NONE = 3
72};
73
74
75#define I40E_RX_ITR I40E_IDX_ITR0
76#define I40E_TX_ITR I40E_IDX_ITR1
77#define I40E_PE_ITR I40E_IDX_ITR2
78
79
80#define I40E_DEFAULT_RSS_HENA ( \
81 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_UDP) | \
82 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_SCTP) | \
83 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP) | \
84 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) | \
85 BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4) | \
86 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_UDP) | \
87 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP) | \
88 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_SCTP) | \
89 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) | \
90 BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6) | \
91 BIT_ULL(I40E_FILTER_PCTYPE_L2_PAYLOAD))
92
93#define I40E_DEFAULT_RSS_HENA_EXPANDED (I40E_DEFAULT_RSS_HENA | \
94 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK) | \
95 BIT_ULL(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) | \
96 BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP) | \
97 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK) | \
98 BIT_ULL(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) | \
99 BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP))
100
101
102#define I40E_RXBUFFER_256 256
103#define I40E_RXBUFFER_1536 1536
104#define I40E_RXBUFFER_2048 2048
105#define I40E_RXBUFFER_3072 3072
106#define I40E_MAX_RXBUFFER 9728
107
108
109
110
111
112
113
114
115#define I40E_RX_HDR_SIZE I40E_RXBUFFER_256
116#define I40E_PACKET_HDR_PAD (ETH_HLEN + ETH_FCS_LEN + (VLAN_HLEN * 2))
117#define i40e_rx_desc i40e_32byte_rx_desc
118
119#define I40E_RX_DMA_ATTR \
120 (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING)
121
122
123
124
125
126
127
128
129
130
131
132#if (PAGE_SIZE < 8192)
133#define I40E_2K_TOO_SMALL_WITH_PADDING \
134((NET_SKB_PAD + I40E_RXBUFFER_1536) > SKB_WITH_OVERHEAD(I40E_RXBUFFER_2048))
135
136static inline int i40e_compute_pad(int rx_buf_len)
137{
138 int page_size, pad_size;
139
140 page_size = ALIGN(rx_buf_len, PAGE_SIZE / 2);
141 pad_size = SKB_WITH_OVERHEAD(page_size) - rx_buf_len;
142
143 return pad_size;
144}
145
146static inline int i40e_skb_pad(void)
147{
148 int rx_buf_len;
149
150
151
152
153
154
155
156
157 if (I40E_2K_TOO_SMALL_WITH_PADDING)
158 rx_buf_len = I40E_RXBUFFER_3072 + SKB_DATA_ALIGN(NET_IP_ALIGN);
159 else
160 rx_buf_len = I40E_RXBUFFER_1536;
161
162
163 rx_buf_len -= NET_IP_ALIGN;
164
165 return i40e_compute_pad(rx_buf_len);
166}
167
168#define I40E_SKB_PAD i40e_skb_pad()
169#else
170#define I40E_2K_TOO_SMALL_WITH_PADDING false
171#define I40E_SKB_PAD (NET_SKB_PAD + NET_IP_ALIGN)
172#endif
173
174
175
176
177
178
179
180
181
182
183
184static inline bool i40e_test_staterr(union i40e_rx_desc *rx_desc,
185 const u64 stat_err_bits)
186{
187 return !!(rx_desc->wb.qword1.status_error_len &
188 cpu_to_le64(stat_err_bits));
189}
190
191
192#define I40E_RX_BUFFER_WRITE 16
193#define I40E_RX_INCREMENT(r, i) \
194 do { \
195 (i)++; \
196 if ((i) == (r)->count) \
197 i = 0; \
198 r->next_to_clean = i; \
199 } while (0)
200
201#define I40E_RX_NEXT_DESC(r, i, n) \
202 do { \
203 (i)++; \
204 if ((i) == (r)->count) \
205 i = 0; \
206 (n) = I40E_RX_DESC((r), (i)); \
207 } while (0)
208
209#define I40E_RX_NEXT_DESC_PREFETCH(r, i, n) \
210 do { \
211 I40E_RX_NEXT_DESC((r), (i), (n)); \
212 prefetch((n)); \
213 } while (0)
214
215#define I40E_MAX_BUFFER_TXD 8
216#define I40E_MIN_TX_LEN 17
217
218
219
220
221
222#define I40E_MAX_READ_REQ_SIZE 4096
223#define I40E_MAX_DATA_PER_TXD (16 * 1024 - 1)
224#define I40E_MAX_DATA_PER_TXD_ALIGNED \
225 (I40E_MAX_DATA_PER_TXD & ~(I40E_MAX_READ_REQ_SIZE - 1))
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255static inline unsigned int i40e_txd_use_count(unsigned int size)
256{
257 return ((size * 85) >> 20) + 1;
258}
259
260
261#define DESC_NEEDED (MAX_SKB_FRAGS + 4)
262#define I40E_MIN_DESC_PENDING 4
263
264#define I40E_TX_FLAGS_HW_VLAN BIT(1)
265#define I40E_TX_FLAGS_SW_VLAN BIT(2)
266#define I40E_TX_FLAGS_TSO BIT(3)
267#define I40E_TX_FLAGS_IPV4 BIT(4)
268#define I40E_TX_FLAGS_IPV6 BIT(5)
269#define I40E_TX_FLAGS_FCCRC BIT(6)
270#define I40E_TX_FLAGS_FSO BIT(7)
271#define I40E_TX_FLAGS_FD_SB BIT(9)
272#define I40E_TX_FLAGS_VXLAN_TUNNEL BIT(10)
273#define I40E_TX_FLAGS_VLAN_MASK 0xffff0000
274#define I40E_TX_FLAGS_VLAN_PRIO_MASK 0xe0000000
275#define I40E_TX_FLAGS_VLAN_PRIO_SHIFT 29
276#define I40E_TX_FLAGS_VLAN_SHIFT 16
277
278struct i40e_tx_buffer {
279 struct i40e_tx_desc *next_to_watch;
280 union {
281 struct sk_buff *skb;
282 void *raw_buf;
283 };
284 unsigned int bytecount;
285 unsigned short gso_segs;
286
287 DEFINE_DMA_UNMAP_ADDR(dma);
288 DEFINE_DMA_UNMAP_LEN(len);
289 u32 tx_flags;
290};
291
292struct i40e_rx_buffer {
293 dma_addr_t dma;
294 struct page *page;
295#if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536)
296 __u32 page_offset;
297#else
298 __u16 page_offset;
299#endif
300 __u16 pagecnt_bias;
301};
302
303struct i40e_queue_stats {
304 u64 packets;
305 u64 bytes;
306};
307
308struct i40e_tx_queue_stats {
309 u64 restart_queue;
310 u64 tx_busy;
311 u64 tx_done_old;
312 u64 tx_linearize;
313 u64 tx_force_wb;
314 u64 tx_lost_interrupt;
315};
316
317struct i40e_rx_queue_stats {
318 u64 non_eop_descs;
319 u64 alloc_page_failed;
320 u64 alloc_buff_failed;
321 u64 page_reuse_count;
322 u64 realloc_count;
323};
324
325enum i40e_ring_state_t {
326 __I40E_TX_FDIR_INIT_DONE,
327 __I40E_TX_XPS_INIT_DONE,
328};
329
330
331
332
333#define I40E_RX_DTYPE_NO_SPLIT 0
334#define I40E_RX_DTYPE_HEADER_SPLIT 1
335#define I40E_RX_DTYPE_SPLIT_ALWAYS 2
336#define I40E_RX_SPLIT_L2 0x1
337#define I40E_RX_SPLIT_IP 0x2
338#define I40E_RX_SPLIT_TCP_UDP 0x4
339#define I40E_RX_SPLIT_SCTP 0x8
340
341
342struct i40e_ring {
343 struct i40e_ring *next;
344 void *desc;
345 struct device *dev;
346 struct net_device *netdev;
347 union {
348 struct i40e_tx_buffer *tx_bi;
349 struct i40e_rx_buffer *rx_bi;
350 };
351 unsigned long state;
352 u16 queue_index;
353 u8 dcb_tc;
354 u8 __iomem *tail;
355
356
357
358
359
360
361 u16 rx_itr_setting;
362 u16 tx_itr_setting;
363
364 u16 count;
365 u16 reg_idx;
366 u16 rx_buf_len;
367
368
369 u16 next_to_use;
370 u16 next_to_clean;
371
372 u8 atr_sample_rate;
373 u8 atr_count;
374
375 bool ring_active;
376 bool arm_wb;
377 u8 packet_stride;
378
379 u16 flags;
380#define I40E_TXR_FLAGS_WB_ON_ITR BIT(0)
381#define I40E_RXR_FLAGS_BUILD_SKB_ENABLED BIT(1)
382
383
384 struct i40e_queue_stats stats;
385 struct u64_stats_sync syncp;
386 union {
387 struct i40e_tx_queue_stats tx_stats;
388 struct i40e_rx_queue_stats rx_stats;
389 };
390
391 unsigned int size;
392 dma_addr_t dma;
393
394 struct i40e_vsi *vsi;
395 struct i40e_q_vector *q_vector;
396
397 struct rcu_head rcu;
398 u16 next_to_alloc;
399 struct sk_buff *skb;
400
401
402
403
404
405
406
407} ____cacheline_internodealigned_in_smp;
408
409static inline bool ring_uses_build_skb(struct i40e_ring *ring)
410{
411 return !!(ring->flags & I40E_RXR_FLAGS_BUILD_SKB_ENABLED);
412}
413
414static inline void set_ring_build_skb_enabled(struct i40e_ring *ring)
415{
416 ring->flags |= I40E_RXR_FLAGS_BUILD_SKB_ENABLED;
417}
418
419static inline void clear_ring_build_skb_enabled(struct i40e_ring *ring)
420{
421 ring->flags &= ~I40E_RXR_FLAGS_BUILD_SKB_ENABLED;
422}
423
424enum i40e_latency_range {
425 I40E_LOWEST_LATENCY = 0,
426 I40E_LOW_LATENCY = 1,
427 I40E_BULK_LATENCY = 2,
428};
429
430struct i40e_ring_container {
431
432 struct i40e_ring *ring;
433 unsigned int total_bytes;
434 unsigned int total_packets;
435 unsigned long last_itr_update;
436 u16 count;
437 enum i40e_latency_range latency_range;
438 u16 itr;
439};
440
441
442#define i40e_for_each_ring(pos, head) \
443 for (pos = (head).ring; pos != NULL; pos = pos->next)
444
445static inline unsigned int i40e_rx_pg_order(struct i40e_ring *ring)
446{
447#if (PAGE_SIZE < 8192)
448 if (ring->rx_buf_len > (PAGE_SIZE / 2))
449 return 1;
450#endif
451 return 0;
452}
453
454#define i40e_rx_pg_size(_ring) (PAGE_SIZE << i40e_rx_pg_order(_ring))
455
456bool i40evf_alloc_rx_buffers(struct i40e_ring *rxr, u16 cleaned_count);
457netdev_tx_t i40evf_xmit_frame(struct sk_buff *skb, struct net_device *netdev);
458void i40evf_clean_tx_ring(struct i40e_ring *tx_ring);
459void i40evf_clean_rx_ring(struct i40e_ring *rx_ring);
460int i40evf_setup_tx_descriptors(struct i40e_ring *tx_ring);
461int i40evf_setup_rx_descriptors(struct i40e_ring *rx_ring);
462void i40evf_free_tx_resources(struct i40e_ring *tx_ring);
463void i40evf_free_rx_resources(struct i40e_ring *rx_ring);
464int i40evf_napi_poll(struct napi_struct *napi, int budget);
465void i40evf_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector);
466u32 i40evf_get_tx_pending(struct i40e_ring *ring, bool in_sw);
467int __i40evf_maybe_stop_tx(struct i40e_ring *tx_ring, int size);
468bool __i40evf_chk_linearize(struct sk_buff *skb);
469
470
471
472
473
474
475
476
477
478
479static inline int i40e_xmit_descriptor_count(struct sk_buff *skb)
480{
481 const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
482 unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
483 int count = 0, size = skb_headlen(skb);
484
485 for (;;) {
486 count += i40e_txd_use_count(size);
487
488 if (!nr_frags--)
489 break;
490
491 size = skb_frag_size(frag++);
492 }
493
494 return count;
495}
496
497
498
499
500
501
502
503
504static inline int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
505{
506 if (likely(I40E_DESC_UNUSED(tx_ring) >= size))
507 return 0;
508 return __i40evf_maybe_stop_tx(tx_ring, size);
509}
510
511
512
513
514
515
516
517
518
519
520static inline bool i40e_chk_linearize(struct sk_buff *skb, int count)
521{
522
523 if (likely(count < I40E_MAX_BUFFER_TXD))
524 return false;
525
526 if (skb_is_gso(skb))
527 return __i40evf_chk_linearize(skb);
528
529
530 return count != I40E_MAX_BUFFER_TXD;
531}
532
533
534
535static inline struct netdev_queue *txring_txq(const struct i40e_ring *ring)
536{
537 return netdev_get_tx_queue(ring->netdev, ring->queue_index);
538}
539#endif
540