1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27#ifndef _I40E_TXRX_H_
28#define _I40E_TXRX_H_
29
30
31
32#define I40E_MAX_ITR 0x0FF0
33#define I40E_MIN_ITR 0x0001
34#define I40E_ITR_100K 0x0005
35#define I40E_ITR_50K 0x000A
36#define I40E_ITR_20K 0x0019
37#define I40E_ITR_18K 0x001B
38#define I40E_ITR_8K 0x003E
39#define I40E_ITR_4K 0x007A
40#define I40E_MAX_INTRL 0x3B
41#define I40E_ITR_RX_DEF I40E_ITR_20K
42#define I40E_ITR_TX_DEF I40E_ITR_20K
43#define I40E_ITR_DYNAMIC 0x8000
44#define I40E_MIN_INT_RATE 250
45#define I40E_MAX_INT_RATE 500000
46#define I40E_DEFAULT_IRQ_WORK 256
47#define ITR_TO_REG(setting) ((setting & ~I40E_ITR_DYNAMIC) >> 1)
48#define ITR_IS_DYNAMIC(setting) (!!(setting & I40E_ITR_DYNAMIC))
49#define ITR_REG_TO_USEC(itr_reg) (itr_reg << 1)
50
51
52
53#define INTRL_ENA BIT(6)
54#define INTRL_REG_TO_USEC(intrl) ((intrl & ~INTRL_ENA) << 2)
55#define INTRL_USEC_TO_REG(set) ((set) ? ((set) >> 2) | INTRL_ENA : 0)
56#define I40E_INTRL_8K 125
57#define I40E_INTRL_62K 16
58#define I40E_INTRL_83K 12
59
60#define I40E_QUEUE_END_OF_LIST 0x7FF
61
62
63
64
65
66
67enum i40e_dyn_idx_t {
68 I40E_IDX_ITR0 = 0,
69 I40E_IDX_ITR1 = 1,
70 I40E_IDX_ITR2 = 2,
71 I40E_ITR_NONE = 3
72};
73
74
75#define I40E_RX_ITR I40E_IDX_ITR0
76#define I40E_TX_ITR I40E_IDX_ITR1
77#define I40E_PE_ITR I40E_IDX_ITR2
78
79
80#define I40E_DEFAULT_RSS_HENA ( \
81 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_UDP) | \
82 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_SCTP) | \
83 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP) | \
84 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) | \
85 BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4) | \
86 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_UDP) | \
87 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP) | \
88 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_SCTP) | \
89 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) | \
90 BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6) | \
91 BIT_ULL(I40E_FILTER_PCTYPE_L2_PAYLOAD))
92
93#define I40E_DEFAULT_RSS_HENA_EXPANDED (I40E_DEFAULT_RSS_HENA | \
94 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK) | \
95 BIT_ULL(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) | \
96 BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP) | \
97 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK) | \
98 BIT_ULL(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) | \
99 BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP))
100
101#define i40e_pf_get_default_rss_hena(pf) \
102 (((pf)->flags & I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE) ? \
103 I40E_DEFAULT_RSS_HENA_EXPANDED : I40E_DEFAULT_RSS_HENA)
104
105
106#define I40E_RXBUFFER_256 256
107#define I40E_RXBUFFER_2048 2048
108#define I40E_RXBUFFER_3072 3072
109#define I40E_RXBUFFER_4096 4096
110#define I40E_RXBUFFER_8192 8192
111#define I40E_MAX_RXBUFFER 9728
112
113
114
115
116
117
118
119
120#define I40E_RX_HDR_SIZE I40E_RXBUFFER_256
121#define i40e_rx_desc i40e_32byte_rx_desc
122
123
124
125
126
127
128
129
130
131
132
133static inline bool i40e_test_staterr(union i40e_rx_desc *rx_desc,
134 const u64 stat_err_bits)
135{
136 return !!(rx_desc->wb.qword1.status_error_len &
137 cpu_to_le64(stat_err_bits));
138}
139
140
141#define I40E_RX_BUFFER_WRITE 16
142#define I40E_RX_INCREMENT(r, i) \
143 do { \
144 (i)++; \
145 if ((i) == (r)->count) \
146 i = 0; \
147 r->next_to_clean = i; \
148 } while (0)
149
150#define I40E_RX_NEXT_DESC(r, i, n) \
151 do { \
152 (i)++; \
153 if ((i) == (r)->count) \
154 i = 0; \
155 (n) = I40E_RX_DESC((r), (i)); \
156 } while (0)
157
158#define I40E_RX_NEXT_DESC_PREFETCH(r, i, n) \
159 do { \
160 I40E_RX_NEXT_DESC((r), (i), (n)); \
161 prefetch((n)); \
162 } while (0)
163
164#define I40E_MAX_BUFFER_TXD 8
165#define I40E_MIN_TX_LEN 17
166
167
168
169
170
171#define I40E_MAX_READ_REQ_SIZE 4096
172#define I40E_MAX_DATA_PER_TXD (16 * 1024 - 1)
173#define I40E_MAX_DATA_PER_TXD_ALIGNED \
174 (I40E_MAX_DATA_PER_TXD & ~(I40E_MAX_READ_REQ_SIZE - 1))
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204static inline unsigned int i40e_txd_use_count(unsigned int size)
205{
206 return ((size * 85) >> 20) + 1;
207}
208
209
210#define DESC_NEEDED (MAX_SKB_FRAGS + 4)
211#define I40E_MIN_DESC_PENDING 4
212
213#define I40E_TX_FLAGS_HW_VLAN BIT(1)
214#define I40E_TX_FLAGS_SW_VLAN BIT(2)
215#define I40E_TX_FLAGS_TSO BIT(3)
216#define I40E_TX_FLAGS_IPV4 BIT(4)
217#define I40E_TX_FLAGS_IPV6 BIT(5)
218#define I40E_TX_FLAGS_FCCRC BIT(6)
219#define I40E_TX_FLAGS_FSO BIT(7)
220#define I40E_TX_FLAGS_FD_SB BIT(9)
221#define I40E_TX_FLAGS_VXLAN_TUNNEL BIT(10)
222#define I40E_TX_FLAGS_VLAN_MASK 0xffff0000
223#define I40E_TX_FLAGS_VLAN_PRIO_MASK 0xe0000000
224#define I40E_TX_FLAGS_VLAN_PRIO_SHIFT 29
225#define I40E_TX_FLAGS_VLAN_SHIFT 16
226
227struct i40e_tx_buffer {
228 struct i40e_tx_desc *next_to_watch;
229 union {
230 struct sk_buff *skb;
231 void *raw_buf;
232 };
233 unsigned int bytecount;
234 unsigned short gso_segs;
235
236 DEFINE_DMA_UNMAP_ADDR(dma);
237 DEFINE_DMA_UNMAP_LEN(len);
238 u32 tx_flags;
239};
240
241struct i40e_rx_buffer {
242 dma_addr_t dma;
243 struct page *page;
244 unsigned int page_offset;
245};
246
247struct i40e_queue_stats {
248 u64 packets;
249 u64 bytes;
250};
251
252struct i40e_tx_queue_stats {
253 u64 restart_queue;
254 u64 tx_busy;
255 u64 tx_done_old;
256 u64 tx_linearize;
257 u64 tx_force_wb;
258 u64 tx_lost_interrupt;
259};
260
261struct i40e_rx_queue_stats {
262 u64 non_eop_descs;
263 u64 alloc_page_failed;
264 u64 alloc_buff_failed;
265 u64 page_reuse_count;
266 u64 realloc_count;
267};
268
269enum i40e_ring_state_t {
270 __I40E_TX_FDIR_INIT_DONE,
271 __I40E_TX_XPS_INIT_DONE,
272};
273
274
275
276
277#define I40E_RX_DTYPE_NO_SPLIT 0
278#define I40E_RX_DTYPE_HEADER_SPLIT 1
279#define I40E_RX_DTYPE_SPLIT_ALWAYS 2
280#define I40E_RX_SPLIT_L2 0x1
281#define I40E_RX_SPLIT_IP 0x2
282#define I40E_RX_SPLIT_TCP_UDP 0x4
283#define I40E_RX_SPLIT_SCTP 0x8
284
285
286struct i40e_ring {
287 struct i40e_ring *next;
288 void *desc;
289 struct device *dev;
290 struct net_device *netdev;
291 union {
292 struct i40e_tx_buffer *tx_bi;
293 struct i40e_rx_buffer *rx_bi;
294 };
295 unsigned long state;
296 u16 queue_index;
297 u8 dcb_tc;
298 u8 __iomem *tail;
299
300
301
302
303
304
305 u16 rx_itr_setting;
306 u16 tx_itr_setting;
307
308 u16 count;
309 u16 reg_idx;
310 u16 rx_buf_len;
311
312
313 u16 next_to_use;
314 u16 next_to_clean;
315
316 u8 atr_sample_rate;
317 u8 atr_count;
318
319 bool ring_active;
320 bool arm_wb;
321 u8 packet_stride;
322
323 u16 flags;
324#define I40E_TXR_FLAGS_WB_ON_ITR BIT(0)
325
326
327 struct i40e_queue_stats stats;
328 struct u64_stats_sync syncp;
329 union {
330 struct i40e_tx_queue_stats tx_stats;
331 struct i40e_rx_queue_stats rx_stats;
332 };
333
334 unsigned int size;
335 dma_addr_t dma;
336
337 struct i40e_vsi *vsi;
338 struct i40e_q_vector *q_vector;
339
340 struct rcu_head rcu;
341 u16 next_to_alloc;
342 struct sk_buff *skb;
343
344
345
346
347
348
349
350} ____cacheline_internodealigned_in_smp;
351
352enum i40e_latency_range {
353 I40E_LOWEST_LATENCY = 0,
354 I40E_LOW_LATENCY = 1,
355 I40E_BULK_LATENCY = 2,
356 I40E_ULTRA_LATENCY = 3,
357};
358
359struct i40e_ring_container {
360
361 struct i40e_ring *ring;
362 unsigned int total_bytes;
363 unsigned int total_packets;
364 u16 count;
365 enum i40e_latency_range latency_range;
366 u16 itr;
367};
368
369
370#define i40e_for_each_ring(pos, head) \
371 for (pos = (head).ring; pos != NULL; pos = pos->next)
372
373bool i40evf_alloc_rx_buffers(struct i40e_ring *rxr, u16 cleaned_count);
374netdev_tx_t i40evf_xmit_frame(struct sk_buff *skb, struct net_device *netdev);
375void i40evf_clean_tx_ring(struct i40e_ring *tx_ring);
376void i40evf_clean_rx_ring(struct i40e_ring *rx_ring);
377int i40evf_setup_tx_descriptors(struct i40e_ring *tx_ring);
378int i40evf_setup_rx_descriptors(struct i40e_ring *rx_ring);
379void i40evf_free_tx_resources(struct i40e_ring *tx_ring);
380void i40evf_free_rx_resources(struct i40e_ring *rx_ring);
381int i40evf_napi_poll(struct napi_struct *napi, int budget);
382void i40evf_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector);
383u32 i40evf_get_tx_pending(struct i40e_ring *ring, bool in_sw);
384int __i40evf_maybe_stop_tx(struct i40e_ring *tx_ring, int size);
385bool __i40evf_chk_linearize(struct sk_buff *skb);
386
387
388
389
390
391
392
393
394static inline u32 i40e_get_head(struct i40e_ring *tx_ring)
395{
396 void *head = (struct i40e_tx_desc *)tx_ring->desc + tx_ring->count;
397
398 return le32_to_cpu(*(volatile __le32 *)head);
399}
400
401
402
403
404
405
406
407
408
409
410static inline int i40e_xmit_descriptor_count(struct sk_buff *skb)
411{
412 const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
413 unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
414 int count = 0, size = skb_headlen(skb);
415
416 for (;;) {
417 count += i40e_txd_use_count(size);
418
419 if (!nr_frags--)
420 break;
421
422 size = skb_frag_size(frag++);
423 }
424
425 return count;
426}
427
428
429
430
431
432
433
434
435static inline int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
436{
437 if (likely(I40E_DESC_UNUSED(tx_ring) >= size))
438 return 0;
439 return __i40evf_maybe_stop_tx(tx_ring, size);
440}
441
442
443
444
445
446
447
448
449
450
451static inline bool i40e_chk_linearize(struct sk_buff *skb, int count)
452{
453
454 if (likely(count < I40E_MAX_BUFFER_TXD))
455 return false;
456
457 if (skb_is_gso(skb))
458 return __i40evf_chk_linearize(skb);
459
460
461 return count != I40E_MAX_BUFFER_TXD;
462}
463
464
465
466
467
468static inline bool i40e_rx_is_fcoe(u16 ptype)
469{
470 return (ptype >= I40E_RX_PTYPE_L2_FCOE_PAY3) &&
471 (ptype <= I40E_RX_PTYPE_L2_FCOE_VFT_FCOTHER);
472}
473
474
475
476
477
478static inline struct netdev_queue *txring_txq(const struct i40e_ring *ring)
479{
480 return netdev_get_tx_queue(ring->netdev, ring->queue_index);
481}
482#endif
483