1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27#ifndef _I40E_TXRX_H_
28#define _I40E_TXRX_H_
29
30
31
32#define I40E_MAX_ITR 0x0FF0
33#define I40E_MIN_ITR 0x0001
34#define I40E_ITR_100K 0x0005
35#define I40E_ITR_50K 0x000A
36#define I40E_ITR_20K 0x0019
37#define I40E_ITR_18K 0x001B
38#define I40E_ITR_8K 0x003E
39#define I40E_ITR_4K 0x007A
40#define I40E_MAX_INTRL 0x3B
41#define I40E_ITR_RX_DEF I40E_ITR_20K
42#define I40E_ITR_TX_DEF I40E_ITR_20K
43#define I40E_ITR_DYNAMIC 0x8000
44#define I40E_MIN_INT_RATE 250
45#define I40E_MAX_INT_RATE 500000
46#define I40E_DEFAULT_IRQ_WORK 256
47#define ITR_TO_REG(setting) ((setting & ~I40E_ITR_DYNAMIC) >> 1)
48#define ITR_IS_DYNAMIC(setting) (!!(setting & I40E_ITR_DYNAMIC))
49#define ITR_REG_TO_USEC(itr_reg) (itr_reg << 1)
50
51
52
53#define INTRL_ENA BIT(6)
54#define INTRL_REG_TO_USEC(intrl) ((intrl & ~INTRL_ENA) << 2)
55
56
57
58
59
60
61
62static inline u16 i40e_intrl_usec_to_reg(int intrl)
63{
64 if (intrl >> 2)
65 return ((intrl >> 2) | INTRL_ENA);
66 else
67 return 0;
68}
69#define I40E_INTRL_8K 125
70#define I40E_INTRL_62K 16
71#define I40E_INTRL_83K 12
72
73#define I40E_QUEUE_END_OF_LIST 0x7FF
74
75
76
77
78
79
80enum i40e_dyn_idx_t {
81 I40E_IDX_ITR0 = 0,
82 I40E_IDX_ITR1 = 1,
83 I40E_IDX_ITR2 = 2,
84 I40E_ITR_NONE = 3
85};
86
87
88#define I40E_RX_ITR I40E_IDX_ITR0
89#define I40E_TX_ITR I40E_IDX_ITR1
90#define I40E_PE_ITR I40E_IDX_ITR2
91
92
93#define I40E_DEFAULT_RSS_HENA ( \
94 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_UDP) | \
95 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_SCTP) | \
96 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP) | \
97 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) | \
98 BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4) | \
99 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_UDP) | \
100 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP) | \
101 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_SCTP) | \
102 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) | \
103 BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6) | \
104 BIT_ULL(I40E_FILTER_PCTYPE_L2_PAYLOAD))
105
106#define I40E_DEFAULT_RSS_HENA_EXPANDED (I40E_DEFAULT_RSS_HENA | \
107 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK) | \
108 BIT_ULL(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) | \
109 BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP) | \
110 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK) | \
111 BIT_ULL(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) | \
112 BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP))
113
114#define i40e_pf_get_default_rss_hena(pf) \
115 (((pf)->flags & I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE) ? \
116 I40E_DEFAULT_RSS_HENA_EXPANDED : I40E_DEFAULT_RSS_HENA)
117
118
119#define I40E_RXBUFFER_256 256
120#define I40E_RXBUFFER_2048 2048
121#define I40E_RXBUFFER_3072 3072
122#define I40E_RXBUFFER_4096 4096
123#define I40E_RXBUFFER_8192 8192
124#define I40E_MAX_RXBUFFER 9728
125
126
127
128
129
130
131
132
133#define I40E_RX_HDR_SIZE I40E_RXBUFFER_256
134#define i40e_rx_desc i40e_32byte_rx_desc
135
136
137
138
139
140
141
142
143
144
145
146static inline bool i40e_test_staterr(union i40e_rx_desc *rx_desc,
147 const u64 stat_err_bits)
148{
149 return !!(rx_desc->wb.qword1.status_error_len &
150 cpu_to_le64(stat_err_bits));
151}
152
153
154#define I40E_RX_BUFFER_WRITE 16
155#define I40E_RX_INCREMENT(r, i) \
156 do { \
157 (i)++; \
158 if ((i) == (r)->count) \
159 i = 0; \
160 r->next_to_clean = i; \
161 } while (0)
162
163#define I40E_RX_NEXT_DESC(r, i, n) \
164 do { \
165 (i)++; \
166 if ((i) == (r)->count) \
167 i = 0; \
168 (n) = I40E_RX_DESC((r), (i)); \
169 } while (0)
170
171#define I40E_RX_NEXT_DESC_PREFETCH(r, i, n) \
172 do { \
173 I40E_RX_NEXT_DESC((r), (i), (n)); \
174 prefetch((n)); \
175 } while (0)
176
177#define I40E_MAX_BUFFER_TXD 8
178#define I40E_MIN_TX_LEN 17
179
180
181
182
183
184#define I40E_MAX_READ_REQ_SIZE 4096
185#define I40E_MAX_DATA_PER_TXD (16 * 1024 - 1)
186#define I40E_MAX_DATA_PER_TXD_ALIGNED \
187 (I40E_MAX_DATA_PER_TXD & ~(I40E_MAX_READ_REQ_SIZE - 1))
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217static inline unsigned int i40e_txd_use_count(unsigned int size)
218{
219 return ((size * 85) >> 20) + 1;
220}
221
222
223#define DESC_NEEDED (MAX_SKB_FRAGS + 4)
224#define I40E_MIN_DESC_PENDING 4
225
226#define I40E_TX_FLAGS_HW_VLAN BIT(1)
227#define I40E_TX_FLAGS_SW_VLAN BIT(2)
228#define I40E_TX_FLAGS_TSO BIT(3)
229#define I40E_TX_FLAGS_IPV4 BIT(4)
230#define I40E_TX_FLAGS_IPV6 BIT(5)
231#define I40E_TX_FLAGS_FCCRC BIT(6)
232#define I40E_TX_FLAGS_FSO BIT(7)
233#define I40E_TX_FLAGS_TSYN BIT(8)
234#define I40E_TX_FLAGS_FD_SB BIT(9)
235#define I40E_TX_FLAGS_UDP_TUNNEL BIT(10)
236#define I40E_TX_FLAGS_VLAN_MASK 0xffff0000
237#define I40E_TX_FLAGS_VLAN_PRIO_MASK 0xe0000000
238#define I40E_TX_FLAGS_VLAN_PRIO_SHIFT 29
239#define I40E_TX_FLAGS_VLAN_SHIFT 16
240
241struct i40e_tx_buffer {
242 struct i40e_tx_desc *next_to_watch;
243 union {
244 struct sk_buff *skb;
245 void *raw_buf;
246 };
247 unsigned int bytecount;
248 unsigned short gso_segs;
249
250 DEFINE_DMA_UNMAP_ADDR(dma);
251 DEFINE_DMA_UNMAP_LEN(len);
252 u32 tx_flags;
253};
254
255struct i40e_rx_buffer {
256 dma_addr_t dma;
257 struct page *page;
258 unsigned int page_offset;
259};
260
261struct i40e_queue_stats {
262 u64 packets;
263 u64 bytes;
264};
265
266struct i40e_tx_queue_stats {
267 u64 restart_queue;
268 u64 tx_busy;
269 u64 tx_done_old;
270 u64 tx_linearize;
271 u64 tx_force_wb;
272 u64 tx_lost_interrupt;
273};
274
275struct i40e_rx_queue_stats {
276 u64 non_eop_descs;
277 u64 alloc_page_failed;
278 u64 alloc_buff_failed;
279 u64 page_reuse_count;
280 u64 realloc_count;
281};
282
283enum i40e_ring_state_t {
284 __I40E_TX_FDIR_INIT_DONE,
285 __I40E_TX_XPS_INIT_DONE,
286};
287
288
289
290
291#define I40E_RX_DTYPE_NO_SPLIT 0
292#define I40E_RX_DTYPE_HEADER_SPLIT 1
293#define I40E_RX_DTYPE_SPLIT_ALWAYS 2
294#define I40E_RX_SPLIT_L2 0x1
295#define I40E_RX_SPLIT_IP 0x2
296#define I40E_RX_SPLIT_TCP_UDP 0x4
297#define I40E_RX_SPLIT_SCTP 0x8
298
299
300struct i40e_ring {
301 struct i40e_ring *next;
302 void *desc;
303 struct device *dev;
304 struct net_device *netdev;
305 union {
306 struct i40e_tx_buffer *tx_bi;
307 struct i40e_rx_buffer *rx_bi;
308 };
309 unsigned long state;
310 u16 queue_index;
311 u8 dcb_tc;
312 u8 __iomem *tail;
313
314
315
316
317
318
319 u16 rx_itr_setting;
320 u16 tx_itr_setting;
321
322 u16 count;
323 u16 reg_idx;
324 u16 rx_buf_len;
325
326
327 u16 next_to_use;
328 u16 next_to_clean;
329
330 u8 atr_sample_rate;
331 u8 atr_count;
332
333 bool ring_active;
334 bool arm_wb;
335 u8 packet_stride;
336
337 u16 flags;
338#define I40E_TXR_FLAGS_WB_ON_ITR BIT(0)
339
340
341 struct i40e_queue_stats stats;
342 struct u64_stats_sync syncp;
343 union {
344 struct i40e_tx_queue_stats tx_stats;
345 struct i40e_rx_queue_stats rx_stats;
346 };
347
348 unsigned int size;
349 dma_addr_t dma;
350
351 struct i40e_vsi *vsi;
352 struct i40e_q_vector *q_vector;
353
354 struct rcu_head rcu;
355 u16 next_to_alloc;
356 struct sk_buff *skb;
357
358
359
360
361
362
363
364} ____cacheline_internodealigned_in_smp;
365
366enum i40e_latency_range {
367 I40E_LOWEST_LATENCY = 0,
368 I40E_LOW_LATENCY = 1,
369 I40E_BULK_LATENCY = 2,
370 I40E_ULTRA_LATENCY = 3,
371};
372
373struct i40e_ring_container {
374
375 struct i40e_ring *ring;
376 unsigned int total_bytes;
377 unsigned int total_packets;
378 u16 count;
379 enum i40e_latency_range latency_range;
380 u16 itr;
381};
382
383
384#define i40e_for_each_ring(pos, head) \
385 for (pos = (head).ring; pos != NULL; pos = pos->next)
386
387bool i40e_alloc_rx_buffers(struct i40e_ring *rxr, u16 cleaned_count);
388netdev_tx_t i40e_lan_xmit_frame(struct sk_buff *skb, struct net_device *netdev);
389void i40e_clean_tx_ring(struct i40e_ring *tx_ring);
390void i40e_clean_rx_ring(struct i40e_ring *rx_ring);
391int i40e_setup_tx_descriptors(struct i40e_ring *tx_ring);
392int i40e_setup_rx_descriptors(struct i40e_ring *rx_ring);
393void i40e_free_tx_resources(struct i40e_ring *tx_ring);
394void i40e_free_rx_resources(struct i40e_ring *rx_ring);
395int i40e_napi_poll(struct napi_struct *napi, int budget);
396#ifdef I40E_FCOE
397void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
398 struct i40e_tx_buffer *first, u32 tx_flags,
399 const u8 hdr_len, u32 td_cmd, u32 td_offset);
400int i40e_tx_prepare_vlan_flags(struct sk_buff *skb,
401 struct i40e_ring *tx_ring, u32 *flags);
402#endif
403void i40e_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector);
404u32 i40e_get_tx_pending(struct i40e_ring *ring, bool in_sw);
405int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size);
406bool __i40e_chk_linearize(struct sk_buff *skb);
407
408
409
410
411
412
413
414
415static inline u32 i40e_get_head(struct i40e_ring *tx_ring)
416{
417 void *head = (struct i40e_tx_desc *)tx_ring->desc + tx_ring->count;
418
419 return le32_to_cpu(*(volatile __le32 *)head);
420}
421
422
423
424
425
426
427
428
429
430
431static inline int i40e_xmit_descriptor_count(struct sk_buff *skb)
432{
433 const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
434 unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
435 int count = 0, size = skb_headlen(skb);
436
437 for (;;) {
438 count += i40e_txd_use_count(size);
439
440 if (!nr_frags--)
441 break;
442
443 size = skb_frag_size(frag++);
444 }
445
446 return count;
447}
448
449
450
451
452
453
454
455
456static inline int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
457{
458 if (likely(I40E_DESC_UNUSED(tx_ring) >= size))
459 return 0;
460 return __i40e_maybe_stop_tx(tx_ring, size);
461}
462
463
464
465
466
467
468
469
470
471
472static inline bool i40e_chk_linearize(struct sk_buff *skb, int count)
473{
474
475 if (likely(count < I40E_MAX_BUFFER_TXD))
476 return false;
477
478 if (skb_is_gso(skb))
479 return __i40e_chk_linearize(skb);
480
481
482 return count != I40E_MAX_BUFFER_TXD;
483}
484
485
486
487
488
489static inline bool i40e_rx_is_fcoe(u16 ptype)
490{
491 return (ptype >= I40E_RX_PTYPE_L2_FCOE_PAY3) &&
492 (ptype <= I40E_RX_PTYPE_L2_FCOE_VFT_FCOTHER);
493}
494
495
496
497
498
499static inline struct netdev_queue *txring_txq(const struct i40e_ring *ring)
500{
501 return netdev_get_tx_queue(ring->netdev, ring->queue_index);
502}
503#endif
504