1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28#include <linux/prefetch.h>
29#include <net/busy_poll.h>
30#include <linux/bpf_trace.h>
31#include <net/xdp.h>
32#include "i40e.h"
33#include "i40e_trace.h"
34#include "i40e_prototype.h"
35
36static inline __le64 build_ctob(u32 td_cmd, u32 td_offset, unsigned int size,
37 u32 td_tag)
38{
39 return cpu_to_le64(I40E_TX_DESC_DTYPE_DATA |
40 ((u64)td_cmd << I40E_TXD_QW1_CMD_SHIFT) |
41 ((u64)td_offset << I40E_TXD_QW1_OFFSET_SHIFT) |
42 ((u64)size << I40E_TXD_QW1_TX_BUF_SZ_SHIFT) |
43 ((u64)td_tag << I40E_TXD_QW1_L2TAG1_SHIFT));
44}
45
46#define I40E_TXD_CMD (I40E_TX_DESC_CMD_EOP | I40E_TX_DESC_CMD_RS)
47
48
49
50
51
52
53
54static void i40e_fdir(struct i40e_ring *tx_ring,
55 struct i40e_fdir_filter *fdata, bool add)
56{
57 struct i40e_filter_program_desc *fdir_desc;
58 struct i40e_pf *pf = tx_ring->vsi->back;
59 u32 flex_ptype, dtype_cmd;
60 u16 i;
61
62
63 i = tx_ring->next_to_use;
64 fdir_desc = I40E_TX_FDIRDESC(tx_ring, i);
65
66 i++;
67 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
68
69 flex_ptype = I40E_TXD_FLTR_QW0_QINDEX_MASK &
70 (fdata->q_index << I40E_TXD_FLTR_QW0_QINDEX_SHIFT);
71
72 flex_ptype |= I40E_TXD_FLTR_QW0_FLEXOFF_MASK &
73 (fdata->flex_off << I40E_TXD_FLTR_QW0_FLEXOFF_SHIFT);
74
75 flex_ptype |= I40E_TXD_FLTR_QW0_PCTYPE_MASK &
76 (fdata->pctype << I40E_TXD_FLTR_QW0_PCTYPE_SHIFT);
77
78 flex_ptype |= I40E_TXD_FLTR_QW0_PCTYPE_MASK &
79 (fdata->flex_offset << I40E_TXD_FLTR_QW0_FLEXOFF_SHIFT);
80
81
82 flex_ptype |= I40E_TXD_FLTR_QW0_DEST_VSI_MASK &
83 ((u32)(fdata->dest_vsi ? : pf->vsi[pf->lan_vsi]->id) <<
84 I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT);
85
86 dtype_cmd = I40E_TX_DESC_DTYPE_FILTER_PROG;
87
88 dtype_cmd |= add ?
89 I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE <<
90 I40E_TXD_FLTR_QW1_PCMD_SHIFT :
91 I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE <<
92 I40E_TXD_FLTR_QW1_PCMD_SHIFT;
93
94 dtype_cmd |= I40E_TXD_FLTR_QW1_DEST_MASK &
95 (fdata->dest_ctl << I40E_TXD_FLTR_QW1_DEST_SHIFT);
96
97 dtype_cmd |= I40E_TXD_FLTR_QW1_FD_STATUS_MASK &
98 (fdata->fd_status << I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT);
99
100 if (fdata->cnt_index) {
101 dtype_cmd |= I40E_TXD_FLTR_QW1_CNT_ENA_MASK;
102 dtype_cmd |= I40E_TXD_FLTR_QW1_CNTINDEX_MASK &
103 ((u32)fdata->cnt_index <<
104 I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT);
105 }
106
107 fdir_desc->qindex_flex_ptype_vsi = cpu_to_le32(flex_ptype);
108 fdir_desc->rsvd = cpu_to_le32(0);
109 fdir_desc->dtype_cmd_cntindex = cpu_to_le32(dtype_cmd);
110 fdir_desc->fd_id = cpu_to_le32(fdata->fd_id);
111}
112
113#define I40E_FD_CLEAN_DELAY 10
114
115
116
117
118
119
120
121static int i40e_program_fdir_filter(struct i40e_fdir_filter *fdir_data,
122 u8 *raw_packet, struct i40e_pf *pf,
123 bool add)
124{
125 struct i40e_tx_buffer *tx_buf, *first;
126 struct i40e_tx_desc *tx_desc;
127 struct i40e_ring *tx_ring;
128 struct i40e_vsi *vsi;
129 struct device *dev;
130 dma_addr_t dma;
131 u32 td_cmd = 0;
132 u16 i;
133
134
135 vsi = i40e_find_vsi_by_type(pf, I40E_VSI_FDIR);
136 if (!vsi)
137 return -ENOENT;
138
139 tx_ring = vsi->tx_rings[0];
140 dev = tx_ring->dev;
141
142
143 for (i = I40E_FD_CLEAN_DELAY; I40E_DESC_UNUSED(tx_ring) < 2; i--) {
144 if (!i)
145 return -EAGAIN;
146 msleep_interruptible(1);
147 }
148
149 dma = dma_map_single(dev, raw_packet,
150 I40E_FDIR_MAX_RAW_PACKET_SIZE, DMA_TO_DEVICE);
151 if (dma_mapping_error(dev, dma))
152 goto dma_fail;
153
154
155 i = tx_ring->next_to_use;
156 first = &tx_ring->tx_bi[i];
157 i40e_fdir(tx_ring, fdir_data, add);
158
159
160 i = tx_ring->next_to_use;
161 tx_desc = I40E_TX_DESC(tx_ring, i);
162 tx_buf = &tx_ring->tx_bi[i];
163
164 tx_ring->next_to_use = ((i + 1) < tx_ring->count) ? i + 1 : 0;
165
166 memset(tx_buf, 0, sizeof(struct i40e_tx_buffer));
167
168
169 dma_unmap_len_set(tx_buf, len, I40E_FDIR_MAX_RAW_PACKET_SIZE);
170 dma_unmap_addr_set(tx_buf, dma, dma);
171
172 tx_desc->buffer_addr = cpu_to_le64(dma);
173 td_cmd = I40E_TXD_CMD | I40E_TX_DESC_CMD_DUMMY;
174
175 tx_buf->tx_flags = I40E_TX_FLAGS_FD_SB;
176 tx_buf->raw_buf = (void *)raw_packet;
177
178 tx_desc->cmd_type_offset_bsz =
179 build_ctob(td_cmd, 0, I40E_FDIR_MAX_RAW_PACKET_SIZE, 0);
180
181
182
183
184 wmb();
185
186
187 first->next_to_watch = tx_desc;
188
189 writel(tx_ring->next_to_use, tx_ring->tail);
190 return 0;
191
192dma_fail:
193 return -1;
194}
195
196#define IP_HEADER_OFFSET 14
197#define I40E_UDPIP_DUMMY_PACKET_LEN 42
198
199
200
201
202
203
204
205
206static int i40e_add_del_fdir_udpv4(struct i40e_vsi *vsi,
207 struct i40e_fdir_filter *fd_data,
208 bool add)
209{
210 struct i40e_pf *pf = vsi->back;
211 struct udphdr *udp;
212 struct iphdr *ip;
213 u8 *raw_packet;
214 int ret;
215 static char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
216 0x45, 0, 0, 0x1c, 0, 0, 0x40, 0, 0x40, 0x11, 0, 0, 0, 0, 0, 0,
217 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
218
219 raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE, GFP_KERNEL);
220 if (!raw_packet)
221 return -ENOMEM;
222 memcpy(raw_packet, packet, I40E_UDPIP_DUMMY_PACKET_LEN);
223
224 ip = (struct iphdr *)(raw_packet + IP_HEADER_OFFSET);
225 udp = (struct udphdr *)(raw_packet + IP_HEADER_OFFSET
226 + sizeof(struct iphdr));
227
228 ip->daddr = fd_data->dst_ip;
229 udp->dest = fd_data->dst_port;
230 ip->saddr = fd_data->src_ip;
231 udp->source = fd_data->src_port;
232
233 if (fd_data->flex_filter) {
234 u8 *payload = raw_packet + I40E_UDPIP_DUMMY_PACKET_LEN;
235 __be16 pattern = fd_data->flex_word;
236 u16 off = fd_data->flex_offset;
237
238 *((__force __be16 *)(payload + off)) = pattern;
239 }
240
241 fd_data->pctype = I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
242 ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add);
243 if (ret) {
244 dev_info(&pf->pdev->dev,
245 "PCTYPE:%d, Filter command send failed for fd_id:%d (ret = %d)\n",
246 fd_data->pctype, fd_data->fd_id, ret);
247
248 kfree(raw_packet);
249 return -EOPNOTSUPP;
250 } else if (I40E_DEBUG_FD & pf->hw.debug_mask) {
251 if (add)
252 dev_info(&pf->pdev->dev,
253 "Filter OK for PCTYPE %d loc = %d\n",
254 fd_data->pctype, fd_data->fd_id);
255 else
256 dev_info(&pf->pdev->dev,
257 "Filter deleted for PCTYPE %d loc = %d\n",
258 fd_data->pctype, fd_data->fd_id);
259 }
260
261 if (add)
262 pf->fd_udp4_filter_cnt++;
263 else
264 pf->fd_udp4_filter_cnt--;
265
266 return 0;
267}
268
269#define I40E_TCPIP_DUMMY_PACKET_LEN 54
270
271
272
273
274
275
276
277
278static int i40e_add_del_fdir_tcpv4(struct i40e_vsi *vsi,
279 struct i40e_fdir_filter *fd_data,
280 bool add)
281{
282 struct i40e_pf *pf = vsi->back;
283 struct tcphdr *tcp;
284 struct iphdr *ip;
285 u8 *raw_packet;
286 int ret;
287
288 static char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
289 0x45, 0, 0, 0x28, 0, 0, 0x40, 0, 0x40, 0x6, 0, 0, 0, 0, 0, 0,
290 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x80, 0x11,
291 0x0, 0x72, 0, 0, 0, 0};
292
293 raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE, GFP_KERNEL);
294 if (!raw_packet)
295 return -ENOMEM;
296 memcpy(raw_packet, packet, I40E_TCPIP_DUMMY_PACKET_LEN);
297
298 ip = (struct iphdr *)(raw_packet + IP_HEADER_OFFSET);
299 tcp = (struct tcphdr *)(raw_packet + IP_HEADER_OFFSET
300 + sizeof(struct iphdr));
301
302 ip->daddr = fd_data->dst_ip;
303 tcp->dest = fd_data->dst_port;
304 ip->saddr = fd_data->src_ip;
305 tcp->source = fd_data->src_port;
306
307 if (fd_data->flex_filter) {
308 u8 *payload = raw_packet + I40E_TCPIP_DUMMY_PACKET_LEN;
309 __be16 pattern = fd_data->flex_word;
310 u16 off = fd_data->flex_offset;
311
312 *((__force __be16 *)(payload + off)) = pattern;
313 }
314
315 fd_data->pctype = I40E_FILTER_PCTYPE_NONF_IPV4_TCP;
316 ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add);
317 if (ret) {
318 dev_info(&pf->pdev->dev,
319 "PCTYPE:%d, Filter command send failed for fd_id:%d (ret = %d)\n",
320 fd_data->pctype, fd_data->fd_id, ret);
321
322 kfree(raw_packet);
323 return -EOPNOTSUPP;
324 } else if (I40E_DEBUG_FD & pf->hw.debug_mask) {
325 if (add)
326 dev_info(&pf->pdev->dev, "Filter OK for PCTYPE %d loc = %d)\n",
327 fd_data->pctype, fd_data->fd_id);
328 else
329 dev_info(&pf->pdev->dev,
330 "Filter deleted for PCTYPE %d loc = %d\n",
331 fd_data->pctype, fd_data->fd_id);
332 }
333
334 if (add) {
335 pf->fd_tcp4_filter_cnt++;
336 if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
337 I40E_DEBUG_FD & pf->hw.debug_mask)
338 dev_info(&pf->pdev->dev, "Forcing ATR off, sideband rules for TCP/IPv4 flow being applied\n");
339 set_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state);
340 } else {
341 pf->fd_tcp4_filter_cnt--;
342 }
343
344 return 0;
345}
346
347#define I40E_SCTPIP_DUMMY_PACKET_LEN 46
348
349
350
351
352
353
354
355
356
357static int i40e_add_del_fdir_sctpv4(struct i40e_vsi *vsi,
358 struct i40e_fdir_filter *fd_data,
359 bool add)
360{
361 struct i40e_pf *pf = vsi->back;
362 struct sctphdr *sctp;
363 struct iphdr *ip;
364 u8 *raw_packet;
365 int ret;
366
367 static char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
368 0x45, 0, 0, 0x20, 0, 0, 0x40, 0, 0x40, 0x84, 0, 0, 0, 0, 0, 0,
369 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
370
371 raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE, GFP_KERNEL);
372 if (!raw_packet)
373 return -ENOMEM;
374 memcpy(raw_packet, packet, I40E_SCTPIP_DUMMY_PACKET_LEN);
375
376 ip = (struct iphdr *)(raw_packet + IP_HEADER_OFFSET);
377 sctp = (struct sctphdr *)(raw_packet + IP_HEADER_OFFSET
378 + sizeof(struct iphdr));
379
380 ip->daddr = fd_data->dst_ip;
381 sctp->dest = fd_data->dst_port;
382 ip->saddr = fd_data->src_ip;
383 sctp->source = fd_data->src_port;
384
385 if (fd_data->flex_filter) {
386 u8 *payload = raw_packet + I40E_SCTPIP_DUMMY_PACKET_LEN;
387 __be16 pattern = fd_data->flex_word;
388 u16 off = fd_data->flex_offset;
389
390 *((__force __be16 *)(payload + off)) = pattern;
391 }
392
393 fd_data->pctype = I40E_FILTER_PCTYPE_NONF_IPV4_SCTP;
394 ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add);
395 if (ret) {
396 dev_info(&pf->pdev->dev,
397 "PCTYPE:%d, Filter command send failed for fd_id:%d (ret = %d)\n",
398 fd_data->pctype, fd_data->fd_id, ret);
399
400 kfree(raw_packet);
401 return -EOPNOTSUPP;
402 } else if (I40E_DEBUG_FD & pf->hw.debug_mask) {
403 if (add)
404 dev_info(&pf->pdev->dev,
405 "Filter OK for PCTYPE %d loc = %d\n",
406 fd_data->pctype, fd_data->fd_id);
407 else
408 dev_info(&pf->pdev->dev,
409 "Filter deleted for PCTYPE %d loc = %d\n",
410 fd_data->pctype, fd_data->fd_id);
411 }
412
413 if (add)
414 pf->fd_sctp4_filter_cnt++;
415 else
416 pf->fd_sctp4_filter_cnt--;
417
418 return 0;
419}
420
421#define I40E_IP_DUMMY_PACKET_LEN 34
422
423
424
425
426
427
428
429
430
431static int i40e_add_del_fdir_ipv4(struct i40e_vsi *vsi,
432 struct i40e_fdir_filter *fd_data,
433 bool add)
434{
435 struct i40e_pf *pf = vsi->back;
436 struct iphdr *ip;
437 u8 *raw_packet;
438 int ret;
439 int i;
440 static char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
441 0x45, 0, 0, 0x14, 0, 0, 0x40, 0, 0x40, 0x10, 0, 0, 0, 0, 0, 0,
442 0, 0, 0, 0};
443
444 for (i = I40E_FILTER_PCTYPE_NONF_IPV4_OTHER;
445 i <= I40E_FILTER_PCTYPE_FRAG_IPV4; i++) {
446 raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE, GFP_KERNEL);
447 if (!raw_packet)
448 return -ENOMEM;
449 memcpy(raw_packet, packet, I40E_IP_DUMMY_PACKET_LEN);
450 ip = (struct iphdr *)(raw_packet + IP_HEADER_OFFSET);
451
452 ip->saddr = fd_data->src_ip;
453 ip->daddr = fd_data->dst_ip;
454 ip->protocol = 0;
455
456 if (fd_data->flex_filter) {
457 u8 *payload = raw_packet + I40E_IP_DUMMY_PACKET_LEN;
458 __be16 pattern = fd_data->flex_word;
459 u16 off = fd_data->flex_offset;
460
461 *((__force __be16 *)(payload + off)) = pattern;
462 }
463
464 fd_data->pctype = i;
465 ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add);
466 if (ret) {
467 dev_info(&pf->pdev->dev,
468 "PCTYPE:%d, Filter command send failed for fd_id:%d (ret = %d)\n",
469 fd_data->pctype, fd_data->fd_id, ret);
470
471
472
473 kfree(raw_packet);
474 return -EOPNOTSUPP;
475 } else if (I40E_DEBUG_FD & pf->hw.debug_mask) {
476 if (add)
477 dev_info(&pf->pdev->dev,
478 "Filter OK for PCTYPE %d loc = %d\n",
479 fd_data->pctype, fd_data->fd_id);
480 else
481 dev_info(&pf->pdev->dev,
482 "Filter deleted for PCTYPE %d loc = %d\n",
483 fd_data->pctype, fd_data->fd_id);
484 }
485 }
486
487 if (add)
488 pf->fd_ip4_filter_cnt++;
489 else
490 pf->fd_ip4_filter_cnt--;
491
492 return 0;
493}
494
495
496
497
498
499
500
501
502int i40e_add_del_fdir(struct i40e_vsi *vsi,
503 struct i40e_fdir_filter *input, bool add)
504{
505 struct i40e_pf *pf = vsi->back;
506 int ret;
507
508 switch (input->flow_type & ~FLOW_EXT) {
509 case TCP_V4_FLOW:
510 ret = i40e_add_del_fdir_tcpv4(vsi, input, add);
511 break;
512 case UDP_V4_FLOW:
513 ret = i40e_add_del_fdir_udpv4(vsi, input, add);
514 break;
515 case SCTP_V4_FLOW:
516 ret = i40e_add_del_fdir_sctpv4(vsi, input, add);
517 break;
518 case IP_USER_FLOW:
519 switch (input->ip4_proto) {
520 case IPPROTO_TCP:
521 ret = i40e_add_del_fdir_tcpv4(vsi, input, add);
522 break;
523 case IPPROTO_UDP:
524 ret = i40e_add_del_fdir_udpv4(vsi, input, add);
525 break;
526 case IPPROTO_SCTP:
527 ret = i40e_add_del_fdir_sctpv4(vsi, input, add);
528 break;
529 case IPPROTO_IP:
530 ret = i40e_add_del_fdir_ipv4(vsi, input, add);
531 break;
532 default:
533
534 dev_info(&pf->pdev->dev, "Unsupported IPv4 protocol 0x%02x\n",
535 input->ip4_proto);
536 return -EINVAL;
537 }
538 break;
539 default:
540 dev_info(&pf->pdev->dev, "Unsupported flow type 0x%02x\n",
541 input->flow_type);
542 return -EINVAL;
543 }
544
545
546
547
548
549
550
551 return ret;
552}
553
554
555
556
557
558
559
560
561
562
563static void i40e_fd_handle_status(struct i40e_ring *rx_ring,
564 union i40e_rx_desc *rx_desc, u8 prog_id)
565{
566 struct i40e_pf *pf = rx_ring->vsi->back;
567 struct pci_dev *pdev = pf->pdev;
568 u32 fcnt_prog, fcnt_avail;
569 u32 error;
570 u64 qw;
571
572 qw = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
573 error = (qw & I40E_RX_PROG_STATUS_DESC_QW1_ERROR_MASK) >>
574 I40E_RX_PROG_STATUS_DESC_QW1_ERROR_SHIFT;
575
576 if (error == BIT(I40E_RX_PROG_STATUS_DESC_FD_TBL_FULL_SHIFT)) {
577 pf->fd_inv = le32_to_cpu(rx_desc->wb.qword0.hi_dword.fd_id);
578 if ((rx_desc->wb.qword0.hi_dword.fd_id != 0) ||
579 (I40E_DEBUG_FD & pf->hw.debug_mask))
580 dev_warn(&pdev->dev, "ntuple filter loc = %d, could not be added\n",
581 pf->fd_inv);
582
583
584
585
586
587
588
589 if (test_bit(__I40E_FD_FLUSH_REQUESTED, pf->state))
590 return;
591
592 pf->fd_add_err++;
593
594 pf->fd_atr_cnt = i40e_get_current_atr_cnt(pf);
595
596 if ((rx_desc->wb.qword0.hi_dword.fd_id == 0) &&
597 test_bit(__I40E_FD_SB_AUTO_DISABLED, pf->state)) {
598
599
600
601
602
603
604 set_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state);
605 set_bit(__I40E_FD_FLUSH_REQUESTED, pf->state);
606 }
607
608
609 fcnt_prog = i40e_get_global_fd_count(pf);
610 fcnt_avail = pf->fdir_pf_filter_count;
611
612
613
614
615 if (fcnt_prog >= (fcnt_avail - I40E_FDIR_BUFFER_FULL_MARGIN)) {
616 if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) &&
617 !test_and_set_bit(__I40E_FD_SB_AUTO_DISABLED,
618 pf->state))
619 if (I40E_DEBUG_FD & pf->hw.debug_mask)
620 dev_warn(&pdev->dev, "FD filter space full, new ntuple rules will not be added\n");
621 }
622 } else if (error == BIT(I40E_RX_PROG_STATUS_DESC_NO_FD_ENTRY_SHIFT)) {
623 if (I40E_DEBUG_FD & pf->hw.debug_mask)
624 dev_info(&pdev->dev, "ntuple filter fd_id = %d, could not be removed\n",
625 rx_desc->wb.qword0.hi_dword.fd_id);
626 }
627}
628
629
630
631
632
633
634static void i40e_unmap_and_free_tx_resource(struct i40e_ring *ring,
635 struct i40e_tx_buffer *tx_buffer)
636{
637 if (tx_buffer->skb) {
638 if (tx_buffer->tx_flags & I40E_TX_FLAGS_FD_SB)
639 kfree(tx_buffer->raw_buf);
640 else if (ring_is_xdp(ring))
641 page_frag_free(tx_buffer->raw_buf);
642 else
643 dev_kfree_skb_any(tx_buffer->skb);
644 if (dma_unmap_len(tx_buffer, len))
645 dma_unmap_single(ring->dev,
646 dma_unmap_addr(tx_buffer, dma),
647 dma_unmap_len(tx_buffer, len),
648 DMA_TO_DEVICE);
649 } else if (dma_unmap_len(tx_buffer, len)) {
650 dma_unmap_page(ring->dev,
651 dma_unmap_addr(tx_buffer, dma),
652 dma_unmap_len(tx_buffer, len),
653 DMA_TO_DEVICE);
654 }
655
656 tx_buffer->next_to_watch = NULL;
657 tx_buffer->skb = NULL;
658 dma_unmap_len_set(tx_buffer, len, 0);
659
660}
661
662
663
664
665
666void i40e_clean_tx_ring(struct i40e_ring *tx_ring)
667{
668 unsigned long bi_size;
669 u16 i;
670
671
672 if (!tx_ring->tx_bi)
673 return;
674
675
676 for (i = 0; i < tx_ring->count; i++)
677 i40e_unmap_and_free_tx_resource(tx_ring, &tx_ring->tx_bi[i]);
678
679 bi_size = sizeof(struct i40e_tx_buffer) * tx_ring->count;
680 memset(tx_ring->tx_bi, 0, bi_size);
681
682
683 memset(tx_ring->desc, 0, tx_ring->size);
684
685 tx_ring->next_to_use = 0;
686 tx_ring->next_to_clean = 0;
687
688 if (!tx_ring->netdev)
689 return;
690
691
692 netdev_tx_reset_queue(txring_txq(tx_ring));
693}
694
695
696
697
698
699
700
701void i40e_free_tx_resources(struct i40e_ring *tx_ring)
702{
703 i40e_clean_tx_ring(tx_ring);
704 kfree(tx_ring->tx_bi);
705 tx_ring->tx_bi = NULL;
706
707 if (tx_ring->desc) {
708 dma_free_coherent(tx_ring->dev, tx_ring->size,
709 tx_ring->desc, tx_ring->dma);
710 tx_ring->desc = NULL;
711 }
712}
713
714
715
716
717
718
719
720
721
722u32 i40e_get_tx_pending(struct i40e_ring *ring, bool in_sw)
723{
724 u32 head, tail;
725
726 if (!in_sw) {
727 head = i40e_get_head(ring);
728 tail = readl(ring->tail);
729 } else {
730 head = ring->next_to_clean;
731 tail = ring->next_to_use;
732 }
733
734 if (head != tail)
735 return (head < tail) ?
736 tail - head : (tail + ring->count - head);
737
738 return 0;
739}
740
741
742
743
744
745
746
747
748void i40e_detect_recover_hung(struct i40e_vsi *vsi)
749{
750 struct i40e_ring *tx_ring = NULL;
751 struct net_device *netdev;
752 unsigned int i;
753 int packets;
754
755 if (!vsi)
756 return;
757
758 if (test_bit(__I40E_VSI_DOWN, vsi->state))
759 return;
760
761 netdev = vsi->netdev;
762 if (!netdev)
763 return;
764
765 if (!netif_carrier_ok(netdev))
766 return;
767
768 for (i = 0; i < vsi->num_queue_pairs; i++) {
769 tx_ring = vsi->tx_rings[i];
770 if (tx_ring && tx_ring->desc) {
771
772
773
774
775
776
777
778 packets = tx_ring->stats.packets & INT_MAX;
779 if (tx_ring->tx_stats.prev_pkt_ctr == packets) {
780 i40e_force_wb(vsi, tx_ring->q_vector);
781 continue;
782 }
783
784
785
786
787 smp_rmb();
788 tx_ring->tx_stats.prev_pkt_ctr =
789 i40e_get_tx_pending(tx_ring, true) ? packets : -1;
790 }
791 }
792}
793
794#define WB_STRIDE 4
795
796
797
798
799
800
801
802
803
804static bool i40e_clean_tx_irq(struct i40e_vsi *vsi,
805 struct i40e_ring *tx_ring, int napi_budget)
806{
807 u16 i = tx_ring->next_to_clean;
808 struct i40e_tx_buffer *tx_buf;
809 struct i40e_tx_desc *tx_head;
810 struct i40e_tx_desc *tx_desc;
811 unsigned int total_bytes = 0, total_packets = 0;
812 unsigned int budget = vsi->work_limit;
813
814 tx_buf = &tx_ring->tx_bi[i];
815 tx_desc = I40E_TX_DESC(tx_ring, i);
816 i -= tx_ring->count;
817
818 tx_head = I40E_TX_DESC(tx_ring, i40e_get_head(tx_ring));
819
820 do {
821 struct i40e_tx_desc *eop_desc = tx_buf->next_to_watch;
822
823
824 if (!eop_desc)
825 break;
826
827
828 smp_rmb();
829
830 i40e_trace(clean_tx_irq, tx_ring, tx_desc, tx_buf);
831
832 if (tx_head == tx_desc)
833 break;
834
835
836 tx_buf->next_to_watch = NULL;
837
838
839 total_bytes += tx_buf->bytecount;
840 total_packets += tx_buf->gso_segs;
841
842
843 if (ring_is_xdp(tx_ring))
844 page_frag_free(tx_buf->raw_buf);
845 else
846 napi_consume_skb(tx_buf->skb, napi_budget);
847
848
849 dma_unmap_single(tx_ring->dev,
850 dma_unmap_addr(tx_buf, dma),
851 dma_unmap_len(tx_buf, len),
852 DMA_TO_DEVICE);
853
854
855 tx_buf->skb = NULL;
856 dma_unmap_len_set(tx_buf, len, 0);
857
858
859 while (tx_desc != eop_desc) {
860 i40e_trace(clean_tx_irq_unmap,
861 tx_ring, tx_desc, tx_buf);
862
863 tx_buf++;
864 tx_desc++;
865 i++;
866 if (unlikely(!i)) {
867 i -= tx_ring->count;
868 tx_buf = tx_ring->tx_bi;
869 tx_desc = I40E_TX_DESC(tx_ring, 0);
870 }
871
872
873 if (dma_unmap_len(tx_buf, len)) {
874 dma_unmap_page(tx_ring->dev,
875 dma_unmap_addr(tx_buf, dma),
876 dma_unmap_len(tx_buf, len),
877 DMA_TO_DEVICE);
878 dma_unmap_len_set(tx_buf, len, 0);
879 }
880 }
881
882
883 tx_buf++;
884 tx_desc++;
885 i++;
886 if (unlikely(!i)) {
887 i -= tx_ring->count;
888 tx_buf = tx_ring->tx_bi;
889 tx_desc = I40E_TX_DESC(tx_ring, 0);
890 }
891
892 prefetch(tx_desc);
893
894
895 budget--;
896 } while (likely(budget));
897
898 i += tx_ring->count;
899 tx_ring->next_to_clean = i;
900 u64_stats_update_begin(&tx_ring->syncp);
901 tx_ring->stats.bytes += total_bytes;
902 tx_ring->stats.packets += total_packets;
903 u64_stats_update_end(&tx_ring->syncp);
904 tx_ring->q_vector->tx.total_bytes += total_bytes;
905 tx_ring->q_vector->tx.total_packets += total_packets;
906
907 if (tx_ring->flags & I40E_TXR_FLAGS_WB_ON_ITR) {
908
909
910
911
912
913 unsigned int j = i40e_get_tx_pending(tx_ring, false);
914
915 if (budget &&
916 ((j / WB_STRIDE) == 0) && (j > 0) &&
917 !test_bit(__I40E_VSI_DOWN, vsi->state) &&
918 (I40E_DESC_UNUSED(tx_ring) != tx_ring->count))
919 tx_ring->arm_wb = true;
920 }
921
922 if (ring_is_xdp(tx_ring))
923 return !!budget;
924
925
926 netdev_tx_completed_queue(txring_txq(tx_ring),
927 total_packets, total_bytes);
928
929#define TX_WAKE_THRESHOLD ((s16)(DESC_NEEDED * 2))
930 if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) &&
931 (I40E_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) {
932
933
934
935 smp_mb();
936 if (__netif_subqueue_stopped(tx_ring->netdev,
937 tx_ring->queue_index) &&
938 !test_bit(__I40E_VSI_DOWN, vsi->state)) {
939 netif_wake_subqueue(tx_ring->netdev,
940 tx_ring->queue_index);
941 ++tx_ring->tx_stats.restart_queue;
942 }
943 }
944
945 return !!budget;
946}
947
948
949
950
951
952
953
954static void i40e_enable_wb_on_itr(struct i40e_vsi *vsi,
955 struct i40e_q_vector *q_vector)
956{
957 u16 flags = q_vector->tx.ring[0].flags;
958 u32 val;
959
960 if (!(flags & I40E_TXR_FLAGS_WB_ON_ITR))
961 return;
962
963 if (q_vector->arm_wb_state)
964 return;
965
966 if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED) {
967 val = I40E_PFINT_DYN_CTLN_WB_ON_ITR_MASK |
968 I40E_PFINT_DYN_CTLN_ITR_INDX_MASK;
969
970 wr32(&vsi->back->hw,
971 I40E_PFINT_DYN_CTLN(q_vector->reg_idx),
972 val);
973 } else {
974 val = I40E_PFINT_DYN_CTL0_WB_ON_ITR_MASK |
975 I40E_PFINT_DYN_CTL0_ITR_INDX_MASK;
976
977 wr32(&vsi->back->hw, I40E_PFINT_DYN_CTL0, val);
978 }
979 q_vector->arm_wb_state = true;
980}
981
982
983
984
985
986
987
988void i40e_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector)
989{
990 if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED) {
991 u32 val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
992 I40E_PFINT_DYN_CTLN_ITR_INDX_MASK |
993 I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK |
994 I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_MASK;
995
996
997 wr32(&vsi->back->hw,
998 I40E_PFINT_DYN_CTLN(q_vector->reg_idx), val);
999 } else {
1000 u32 val = I40E_PFINT_DYN_CTL0_INTENA_MASK |
1001 I40E_PFINT_DYN_CTL0_ITR_INDX_MASK |
1002 I40E_PFINT_DYN_CTL0_SWINT_TRIG_MASK |
1003 I40E_PFINT_DYN_CTL0_SW_ITR_INDX_ENA_MASK;
1004
1005
1006 wr32(&vsi->back->hw, I40E_PFINT_DYN_CTL0, val);
1007 }
1008}
1009
1010static inline bool i40e_container_is_rx(struct i40e_q_vector *q_vector,
1011 struct i40e_ring_container *rc)
1012{
1013 return &q_vector->rx == rc;
1014}
1015
1016static inline unsigned int i40e_itr_divisor(struct i40e_q_vector *q_vector)
1017{
1018 unsigned int divisor;
1019
1020 switch (q_vector->vsi->back->hw.phy.link_info.link_speed) {
1021 case I40E_LINK_SPEED_40GB:
1022 divisor = I40E_ITR_ADAPTIVE_MIN_INC * 1024;
1023 break;
1024 case I40E_LINK_SPEED_25GB:
1025 case I40E_LINK_SPEED_20GB:
1026 divisor = I40E_ITR_ADAPTIVE_MIN_INC * 512;
1027 break;
1028 default:
1029 case I40E_LINK_SPEED_10GB:
1030 divisor = I40E_ITR_ADAPTIVE_MIN_INC * 256;
1031 break;
1032 case I40E_LINK_SPEED_1GB:
1033 case I40E_LINK_SPEED_100MB:
1034 divisor = I40E_ITR_ADAPTIVE_MIN_INC * 32;
1035 break;
1036 }
1037
1038 return divisor;
1039}
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054static void i40e_update_itr(struct i40e_q_vector *q_vector,
1055 struct i40e_ring_container *rc)
1056{
1057 unsigned int avg_wire_size, packets, bytes, itr;
1058 unsigned long next_update = jiffies;
1059
1060
1061
1062
1063 if (!rc->ring || !ITR_IS_DYNAMIC(rc->ring->itr_setting))
1064 return;
1065
1066
1067
1068
1069 itr = i40e_container_is_rx(q_vector, rc) ?
1070 I40E_ITR_ADAPTIVE_MIN_USECS | I40E_ITR_ADAPTIVE_LATENCY :
1071 I40E_ITR_ADAPTIVE_MAX_USECS | I40E_ITR_ADAPTIVE_LATENCY;
1072
1073
1074
1075
1076
1077
1078 if (time_after(next_update, rc->next_update))
1079 goto clear_counts;
1080
1081
1082
1083
1084
1085
1086
1087 if (q_vector->itr_countdown) {
1088 itr = rc->target_itr;
1089 goto clear_counts;
1090 }
1091
1092 packets = rc->total_packets;
1093 bytes = rc->total_bytes;
1094
1095 if (i40e_container_is_rx(q_vector, rc)) {
1096
1097
1098
1099
1100
1101 if (packets && packets < 4 && bytes < 9000 &&
1102 (q_vector->tx.target_itr & I40E_ITR_ADAPTIVE_LATENCY)) {
1103 itr = I40E_ITR_ADAPTIVE_LATENCY;
1104 goto adjust_by_size;
1105 }
1106 } else if (packets < 4) {
1107
1108
1109
1110
1111
1112 if (rc->target_itr == I40E_ITR_ADAPTIVE_MAX_USECS &&
1113 (q_vector->rx.target_itr & I40E_ITR_MASK) ==
1114 I40E_ITR_ADAPTIVE_MAX_USECS)
1115 goto clear_counts;
1116 } else if (packets > 32) {
1117
1118
1119
1120 rc->target_itr &= ~I40E_ITR_ADAPTIVE_LATENCY;
1121 }
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131 if (packets < 56) {
1132 itr = rc->target_itr + I40E_ITR_ADAPTIVE_MIN_INC;
1133 if ((itr & I40E_ITR_MASK) > I40E_ITR_ADAPTIVE_MAX_USECS) {
1134 itr &= I40E_ITR_ADAPTIVE_LATENCY;
1135 itr += I40E_ITR_ADAPTIVE_MAX_USECS;
1136 }
1137 goto clear_counts;
1138 }
1139
1140 if (packets <= 256) {
1141 itr = min(q_vector->tx.current_itr, q_vector->rx.current_itr);
1142 itr &= I40E_ITR_MASK;
1143
1144
1145
1146
1147
1148 if (packets <= 112)
1149 goto clear_counts;
1150
1151
1152
1153
1154
1155
1156 itr /= 2;
1157 itr &= I40E_ITR_MASK;
1158 if (itr < I40E_ITR_ADAPTIVE_MIN_USECS)
1159 itr = I40E_ITR_ADAPTIVE_MIN_USECS;
1160
1161 goto clear_counts;
1162 }
1163
1164
1165
1166
1167
1168
1169
1170 itr = I40E_ITR_ADAPTIVE_BULK;
1171
1172adjust_by_size:
1173
1174
1175
1176
1177
1178 avg_wire_size = bytes / packets;
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195 if (avg_wire_size <= 60) {
1196
1197 avg_wire_size = 4096;
1198 } else if (avg_wire_size <= 380) {
1199
1200 avg_wire_size *= 40;
1201 avg_wire_size += 1696;
1202 } else if (avg_wire_size <= 1084) {
1203
1204 avg_wire_size *= 15;
1205 avg_wire_size += 11452;
1206 } else if (avg_wire_size <= 1980) {
1207
1208 avg_wire_size *= 5;
1209 avg_wire_size += 22420;
1210 } else {
1211
1212 avg_wire_size = 32256;
1213 }
1214
1215
1216
1217
1218 if (itr & I40E_ITR_ADAPTIVE_LATENCY)
1219 avg_wire_size /= 2;
1220
1221
1222
1223
1224
1225
1226
1227
1228 itr += DIV_ROUND_UP(avg_wire_size, i40e_itr_divisor(q_vector)) *
1229 I40E_ITR_ADAPTIVE_MIN_INC;
1230
1231 if ((itr & I40E_ITR_MASK) > I40E_ITR_ADAPTIVE_MAX_USECS) {
1232 itr &= I40E_ITR_ADAPTIVE_LATENCY;
1233 itr += I40E_ITR_ADAPTIVE_MAX_USECS;
1234 }
1235
1236clear_counts:
1237
1238 rc->target_itr = itr;
1239
1240
1241 rc->next_update = next_update + 1;
1242
1243 rc->total_bytes = 0;
1244 rc->total_packets = 0;
1245}
1246
1247
1248
1249
1250
1251
1252
1253
1254static void i40e_reuse_rx_page(struct i40e_ring *rx_ring,
1255 struct i40e_rx_buffer *old_buff)
1256{
1257 struct i40e_rx_buffer *new_buff;
1258 u16 nta = rx_ring->next_to_alloc;
1259
1260 new_buff = &rx_ring->rx_bi[nta];
1261
1262
1263 nta++;
1264 rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
1265
1266
1267 new_buff->dma = old_buff->dma;
1268 new_buff->page = old_buff->page;
1269 new_buff->page_offset = old_buff->page_offset;
1270 new_buff->pagecnt_bias = old_buff->pagecnt_bias;
1271}
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282static inline bool i40e_rx_is_programming_status(u64 qw)
1283{
1284
1285
1286
1287
1288
1289 return qw & I40E_RXD_QW1_LENGTH_SPH_MASK;
1290}
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303static void i40e_clean_programming_status(struct i40e_ring *rx_ring,
1304 union i40e_rx_desc *rx_desc,
1305 u64 qw)
1306{
1307 struct i40e_rx_buffer *rx_buffer;
1308 u32 ntc = rx_ring->next_to_clean;
1309 u8 id;
1310
1311
1312 rx_buffer = &rx_ring->rx_bi[ntc++];
1313 ntc = (ntc < rx_ring->count) ? ntc : 0;
1314 rx_ring->next_to_clean = ntc;
1315
1316 prefetch(I40E_RX_DESC(rx_ring, ntc));
1317
1318
1319 i40e_reuse_rx_page(rx_ring, rx_buffer);
1320 rx_ring->rx_stats.page_reuse_count++;
1321
1322
1323 rx_buffer->page = NULL;
1324
1325 id = (qw & I40E_RX_PROG_STATUS_DESC_QW1_PROGID_MASK) >>
1326 I40E_RX_PROG_STATUS_DESC_QW1_PROGID_SHIFT;
1327
1328 if (id == I40E_RX_PROG_STATUS_DESC_FD_FILTER_STATUS)
1329 i40e_fd_handle_status(rx_ring, rx_desc, id);
1330}
1331
1332
1333
1334
1335
1336
1337
1338int i40e_setup_tx_descriptors(struct i40e_ring *tx_ring)
1339{
1340 struct device *dev = tx_ring->dev;
1341 int bi_size;
1342
1343 if (!dev)
1344 return -ENOMEM;
1345
1346
1347 WARN_ON(tx_ring->tx_bi);
1348 bi_size = sizeof(struct i40e_tx_buffer) * tx_ring->count;
1349 tx_ring->tx_bi = kzalloc(bi_size, GFP_KERNEL);
1350 if (!tx_ring->tx_bi)
1351 goto err;
1352
1353 u64_stats_init(&tx_ring->syncp);
1354
1355
1356 tx_ring->size = tx_ring->count * sizeof(struct i40e_tx_desc);
1357
1358
1359
1360 tx_ring->size += sizeof(u32);
1361 tx_ring->size = ALIGN(tx_ring->size, 4096);
1362 tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size,
1363 &tx_ring->dma, GFP_KERNEL);
1364 if (!tx_ring->desc) {
1365 dev_info(dev, "Unable to allocate memory for the Tx descriptor ring, size=%d\n",
1366 tx_ring->size);
1367 goto err;
1368 }
1369
1370 tx_ring->next_to_use = 0;
1371 tx_ring->next_to_clean = 0;
1372 tx_ring->tx_stats.prev_pkt_ctr = -1;
1373 return 0;
1374
1375err:
1376 kfree(tx_ring->tx_bi);
1377 tx_ring->tx_bi = NULL;
1378 return -ENOMEM;
1379}
1380
1381
1382
1383
1384
1385void i40e_clean_rx_ring(struct i40e_ring *rx_ring)
1386{
1387 unsigned long bi_size;
1388 u16 i;
1389
1390
1391 if (!rx_ring->rx_bi)
1392 return;
1393
1394 if (rx_ring->skb) {
1395 dev_kfree_skb(rx_ring->skb);
1396 rx_ring->skb = NULL;
1397 }
1398
1399
1400 for (i = 0; i < rx_ring->count; i++) {
1401 struct i40e_rx_buffer *rx_bi = &rx_ring->rx_bi[i];
1402
1403 if (!rx_bi->page)
1404 continue;
1405
1406
1407
1408
1409 dma_sync_single_range_for_cpu(rx_ring->dev,
1410 rx_bi->dma,
1411 rx_bi->page_offset,
1412 rx_ring->rx_buf_len,
1413 DMA_FROM_DEVICE);
1414
1415
1416 dma_unmap_page_attrs(rx_ring->dev, rx_bi->dma,
1417 i40e_rx_pg_size(rx_ring),
1418 DMA_FROM_DEVICE,
1419 I40E_RX_DMA_ATTR);
1420
1421 __page_frag_cache_drain(rx_bi->page, rx_bi->pagecnt_bias);
1422
1423 rx_bi->page = NULL;
1424 rx_bi->page_offset = 0;
1425 }
1426
1427 bi_size = sizeof(struct i40e_rx_buffer) * rx_ring->count;
1428 memset(rx_ring->rx_bi, 0, bi_size);
1429
1430
1431 memset(rx_ring->desc, 0, rx_ring->size);
1432
1433 rx_ring->next_to_alloc = 0;
1434 rx_ring->next_to_clean = 0;
1435 rx_ring->next_to_use = 0;
1436}
1437
1438
1439
1440
1441
1442
1443
1444void i40e_free_rx_resources(struct i40e_ring *rx_ring)
1445{
1446 i40e_clean_rx_ring(rx_ring);
1447 if (rx_ring->vsi->type == I40E_VSI_MAIN)
1448 xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
1449 rx_ring->xdp_prog = NULL;
1450 kfree(rx_ring->rx_bi);
1451 rx_ring->rx_bi = NULL;
1452
1453 if (rx_ring->desc) {
1454 dma_free_coherent(rx_ring->dev, rx_ring->size,
1455 rx_ring->desc, rx_ring->dma);
1456 rx_ring->desc = NULL;
1457 }
1458}
1459
1460
1461
1462
1463
1464
1465
1466int i40e_setup_rx_descriptors(struct i40e_ring *rx_ring)
1467{
1468 struct device *dev = rx_ring->dev;
1469 int err = -ENOMEM;
1470 int bi_size;
1471
1472
1473 WARN_ON(rx_ring->rx_bi);
1474 bi_size = sizeof(struct i40e_rx_buffer) * rx_ring->count;
1475 rx_ring->rx_bi = kzalloc(bi_size, GFP_KERNEL);
1476 if (!rx_ring->rx_bi)
1477 goto err;
1478
1479 u64_stats_init(&rx_ring->syncp);
1480
1481
1482 rx_ring->size = rx_ring->count * sizeof(union i40e_32byte_rx_desc);
1483 rx_ring->size = ALIGN(rx_ring->size, 4096);
1484 rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size,
1485 &rx_ring->dma, GFP_KERNEL);
1486
1487 if (!rx_ring->desc) {
1488 dev_info(dev, "Unable to allocate memory for the Rx descriptor ring, size=%d\n",
1489 rx_ring->size);
1490 goto err;
1491 }
1492
1493 rx_ring->next_to_alloc = 0;
1494 rx_ring->next_to_clean = 0;
1495 rx_ring->next_to_use = 0;
1496
1497
1498 if (rx_ring->vsi->type == I40E_VSI_MAIN) {
1499 err = xdp_rxq_info_reg(&rx_ring->xdp_rxq, rx_ring->netdev,
1500 rx_ring->queue_index);
1501 if (err < 0)
1502 goto err;
1503 }
1504
1505 rx_ring->xdp_prog = rx_ring->vsi->xdp_prog;
1506
1507 return 0;
1508err:
1509 kfree(rx_ring->rx_bi);
1510 rx_ring->rx_bi = NULL;
1511 return err;
1512}
1513
1514
1515
1516
1517
1518
1519static inline void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val)
1520{
1521 rx_ring->next_to_use = val;
1522
1523
1524 rx_ring->next_to_alloc = val;
1525
1526
1527
1528
1529
1530
1531 wmb();
1532 writel(val, rx_ring->tail);
1533}
1534
1535
1536
1537
1538
1539
1540
1541static inline unsigned int i40e_rx_offset(struct i40e_ring *rx_ring)
1542{
1543 return ring_uses_build_skb(rx_ring) ? I40E_SKB_PAD : 0;
1544}
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554static bool i40e_alloc_mapped_page(struct i40e_ring *rx_ring,
1555 struct i40e_rx_buffer *bi)
1556{
1557 struct page *page = bi->page;
1558 dma_addr_t dma;
1559
1560
1561 if (likely(page)) {
1562 rx_ring->rx_stats.page_reuse_count++;
1563 return true;
1564 }
1565
1566
1567 page = dev_alloc_pages(i40e_rx_pg_order(rx_ring));
1568 if (unlikely(!page)) {
1569 rx_ring->rx_stats.alloc_page_failed++;
1570 return false;
1571 }
1572
1573
1574 dma = dma_map_page_attrs(rx_ring->dev, page, 0,
1575 i40e_rx_pg_size(rx_ring),
1576 DMA_FROM_DEVICE,
1577 I40E_RX_DMA_ATTR);
1578
1579
1580
1581
1582 if (dma_mapping_error(rx_ring->dev, dma)) {
1583 __free_pages(page, i40e_rx_pg_order(rx_ring));
1584 rx_ring->rx_stats.alloc_page_failed++;
1585 return false;
1586 }
1587
1588 bi->dma = dma;
1589 bi->page = page;
1590 bi->page_offset = i40e_rx_offset(rx_ring);
1591 page_ref_add(page, USHRT_MAX - 1);
1592 bi->pagecnt_bias = USHRT_MAX;
1593
1594 return true;
1595}
1596
1597
1598
1599
1600
1601
1602
1603static void i40e_receive_skb(struct i40e_ring *rx_ring,
1604 struct sk_buff *skb, u16 vlan_tag)
1605{
1606 struct i40e_q_vector *q_vector = rx_ring->q_vector;
1607
1608 if ((rx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
1609 (vlan_tag & VLAN_VID_MASK))
1610 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
1611
1612 napi_gro_receive(&q_vector->napi, skb);
1613}
1614
1615
1616
1617
1618
1619
1620
1621
1622bool i40e_alloc_rx_buffers(struct i40e_ring *rx_ring, u16 cleaned_count)
1623{
1624 u16 ntu = rx_ring->next_to_use;
1625 union i40e_rx_desc *rx_desc;
1626 struct i40e_rx_buffer *bi;
1627
1628
1629 if (!rx_ring->netdev || !cleaned_count)
1630 return false;
1631
1632 rx_desc = I40E_RX_DESC(rx_ring, ntu);
1633 bi = &rx_ring->rx_bi[ntu];
1634
1635 do {
1636 if (!i40e_alloc_mapped_page(rx_ring, bi))
1637 goto no_buffers;
1638
1639
1640 dma_sync_single_range_for_device(rx_ring->dev, bi->dma,
1641 bi->page_offset,
1642 rx_ring->rx_buf_len,
1643 DMA_FROM_DEVICE);
1644
1645
1646
1647
1648 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset);
1649
1650 rx_desc++;
1651 bi++;
1652 ntu++;
1653 if (unlikely(ntu == rx_ring->count)) {
1654 rx_desc = I40E_RX_DESC(rx_ring, 0);
1655 bi = rx_ring->rx_bi;
1656 ntu = 0;
1657 }
1658
1659
1660 rx_desc->wb.qword1.status_error_len = 0;
1661
1662 cleaned_count--;
1663 } while (cleaned_count);
1664
1665 if (rx_ring->next_to_use != ntu)
1666 i40e_release_rx_desc(rx_ring, ntu);
1667
1668 return false;
1669
1670no_buffers:
1671 if (rx_ring->next_to_use != ntu)
1672 i40e_release_rx_desc(rx_ring, ntu);
1673
1674
1675
1676
1677 return true;
1678}
1679
1680
1681
1682
1683
1684
1685
1686static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
1687 struct sk_buff *skb,
1688 union i40e_rx_desc *rx_desc)
1689{
1690 struct i40e_rx_ptype_decoded decoded;
1691 u32 rx_error, rx_status;
1692 bool ipv4, ipv6;
1693 u8 ptype;
1694 u64 qword;
1695
1696 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
1697 ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >> I40E_RXD_QW1_PTYPE_SHIFT;
1698 rx_error = (qword & I40E_RXD_QW1_ERROR_MASK) >>
1699 I40E_RXD_QW1_ERROR_SHIFT;
1700 rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
1701 I40E_RXD_QW1_STATUS_SHIFT;
1702 decoded = decode_rx_desc_ptype(ptype);
1703
1704 skb->ip_summed = CHECKSUM_NONE;
1705
1706 skb_checksum_none_assert(skb);
1707
1708
1709 if (!(vsi->netdev->features & NETIF_F_RXCSUM))
1710 return;
1711
1712
1713 if (!(rx_status & BIT(I40E_RX_DESC_STATUS_L3L4P_SHIFT)))
1714 return;
1715
1716
1717 if (!(decoded.known && decoded.outer_ip))
1718 return;
1719
1720 ipv4 = (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP) &&
1721 (decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV4);
1722 ipv6 = (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP) &&
1723 (decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV6);
1724
1725 if (ipv4 &&
1726 (rx_error & (BIT(I40E_RX_DESC_ERROR_IPE_SHIFT) |
1727 BIT(I40E_RX_DESC_ERROR_EIPE_SHIFT))))
1728 goto checksum_fail;
1729
1730
1731 if (ipv6 &&
1732 rx_status & BIT(I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT))
1733
1734 return;
1735
1736
1737 if (rx_error & BIT(I40E_RX_DESC_ERROR_L4E_SHIFT))
1738 goto checksum_fail;
1739
1740
1741
1742
1743
1744 if (rx_error & BIT(I40E_RX_DESC_ERROR_PPRS_SHIFT))
1745 return;
1746
1747
1748
1749
1750
1751 if (decoded.tunnel_type >= I40E_RX_PTYPE_TUNNEL_IP_GRENAT)
1752 skb->csum_level = 1;
1753
1754
1755 switch (decoded.inner_prot) {
1756 case I40E_RX_PTYPE_INNER_PROT_TCP:
1757 case I40E_RX_PTYPE_INNER_PROT_UDP:
1758 case I40E_RX_PTYPE_INNER_PROT_SCTP:
1759 skb->ip_summed = CHECKSUM_UNNECESSARY;
1760
1761 default:
1762 break;
1763 }
1764
1765 return;
1766
1767checksum_fail:
1768 vsi->back->hw_csum_rx_error++;
1769}
1770
1771
1772
1773
1774
1775
1776
1777static inline int i40e_ptype_to_htype(u8 ptype)
1778{
1779 struct i40e_rx_ptype_decoded decoded = decode_rx_desc_ptype(ptype);
1780
1781 if (!decoded.known)
1782 return PKT_HASH_TYPE_NONE;
1783
1784 if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP &&
1785 decoded.payload_layer == I40E_RX_PTYPE_PAYLOAD_LAYER_PAY4)
1786 return PKT_HASH_TYPE_L4;
1787 else if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP &&
1788 decoded.payload_layer == I40E_RX_PTYPE_PAYLOAD_LAYER_PAY3)
1789 return PKT_HASH_TYPE_L3;
1790 else
1791 return PKT_HASH_TYPE_L2;
1792}
1793
1794
1795
1796
1797
1798
1799static inline void i40e_rx_hash(struct i40e_ring *ring,
1800 union i40e_rx_desc *rx_desc,
1801 struct sk_buff *skb,
1802 u8 rx_ptype)
1803{
1804 u32 hash;
1805 const __le64 rss_mask =
1806 cpu_to_le64((u64)I40E_RX_DESC_FLTSTAT_RSS_HASH <<
1807 I40E_RX_DESC_STATUS_FLTSTAT_SHIFT);
1808
1809 if (!(ring->netdev->features & NETIF_F_RXHASH))
1810 return;
1811
1812 if ((rx_desc->wb.qword1.status_error_len & rss_mask) == rss_mask) {
1813 hash = le32_to_cpu(rx_desc->wb.qword0.hi_dword.rss);
1814 skb_set_hash(skb, hash, i40e_ptype_to_htype(rx_ptype));
1815 }
1816}
1817
1818
1819
1820
1821
1822
1823
1824
1825
1826
1827
1828
1829static inline
1830void i40e_process_skb_fields(struct i40e_ring *rx_ring,
1831 union i40e_rx_desc *rx_desc, struct sk_buff *skb,
1832 u8 rx_ptype)
1833{
1834 u64 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
1835 u32 rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
1836 I40E_RXD_QW1_STATUS_SHIFT;
1837 u32 tsynvalid = rx_status & I40E_RXD_QW1_STATUS_TSYNVALID_MASK;
1838 u32 tsyn = (rx_status & I40E_RXD_QW1_STATUS_TSYNINDX_MASK) >>
1839 I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT;
1840
1841 if (unlikely(tsynvalid))
1842 i40e_ptp_rx_hwtstamp(rx_ring->vsi->back, skb, tsyn);
1843
1844 i40e_rx_hash(rx_ring, rx_desc, skb, rx_ptype);
1845
1846 i40e_rx_checksum(rx_ring->vsi, skb, rx_desc);
1847
1848 skb_record_rx_queue(skb, rx_ring->queue_index);
1849
1850
1851 skb->protocol = eth_type_trans(skb, rx_ring->netdev);
1852}
1853
1854
1855
1856
1857
1858
1859
1860
1861
1862
1863
1864
1865
1866
1867
1868static bool i40e_cleanup_headers(struct i40e_ring *rx_ring, struct sk_buff *skb,
1869 union i40e_rx_desc *rx_desc)
1870
1871{
1872
1873 if (IS_ERR(skb))
1874 return true;
1875
1876
1877
1878
1879
1880
1881 if (unlikely(i40e_test_staterr(rx_desc,
1882 BIT(I40E_RXD_QW1_ERROR_SHIFT)))) {
1883 dev_kfree_skb_any(skb);
1884 return true;
1885 }
1886
1887
1888 if (eth_skb_pad(skb))
1889 return true;
1890
1891 return false;
1892}
1893
1894
1895
1896
1897
1898
1899
1900
1901static inline bool i40e_page_is_reusable(struct page *page)
1902{
1903 return (page_to_nid(page) == numa_mem_id()) &&
1904 !page_is_pfmemalloc(page);
1905}
1906
1907
1908
1909
1910
1911
1912
1913
1914
1915
1916
1917
1918
1919
1920
1921
1922
1923
1924
1925
1926
1927
1928
1929
1930
1931
1932
1933
1934static bool i40e_can_reuse_rx_page(struct i40e_rx_buffer *rx_buffer)
1935{
1936 unsigned int pagecnt_bias = rx_buffer->pagecnt_bias;
1937 struct page *page = rx_buffer->page;
1938
1939
1940 if (unlikely(!i40e_page_is_reusable(page)))
1941 return false;
1942
1943#if (PAGE_SIZE < 8192)
1944
1945 if (unlikely((page_count(page) - pagecnt_bias) > 1))
1946 return false;
1947#else
1948#define I40E_LAST_OFFSET \
1949 (SKB_WITH_OVERHEAD(PAGE_SIZE) - I40E_RXBUFFER_2048)
1950 if (rx_buffer->page_offset > I40E_LAST_OFFSET)
1951 return false;
1952#endif
1953
1954
1955
1956
1957
1958 if (unlikely(pagecnt_bias == 1)) {
1959 page_ref_add(page, USHRT_MAX - 1);
1960 rx_buffer->pagecnt_bias = USHRT_MAX;
1961 }
1962
1963 return true;
1964}
1965
1966
1967
1968
1969
1970
1971
1972
1973
1974
1975
1976
1977
1978static void i40e_add_rx_frag(struct i40e_ring *rx_ring,
1979 struct i40e_rx_buffer *rx_buffer,
1980 struct sk_buff *skb,
1981 unsigned int size)
1982{
1983#if (PAGE_SIZE < 8192)
1984 unsigned int truesize = i40e_rx_pg_size(rx_ring) / 2;
1985#else
1986 unsigned int truesize = SKB_DATA_ALIGN(size + i40e_rx_offset(rx_ring));
1987#endif
1988
1989 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page,
1990 rx_buffer->page_offset, size, truesize);
1991
1992
1993#if (PAGE_SIZE < 8192)
1994 rx_buffer->page_offset ^= truesize;
1995#else
1996 rx_buffer->page_offset += truesize;
1997#endif
1998}
1999
2000
2001
2002
2003
2004
2005
2006
2007
2008static struct i40e_rx_buffer *i40e_get_rx_buffer(struct i40e_ring *rx_ring,
2009 const unsigned int size)
2010{
2011 struct i40e_rx_buffer *rx_buffer;
2012
2013 rx_buffer = &rx_ring->rx_bi[rx_ring->next_to_clean];
2014 prefetchw(rx_buffer->page);
2015
2016
2017 dma_sync_single_range_for_cpu(rx_ring->dev,
2018 rx_buffer->dma,
2019 rx_buffer->page_offset,
2020 size,
2021 DMA_FROM_DEVICE);
2022
2023
2024 rx_buffer->pagecnt_bias--;
2025
2026 return rx_buffer;
2027}
2028
2029
2030
2031
2032
2033
2034
2035
2036
2037
2038
2039static struct sk_buff *i40e_construct_skb(struct i40e_ring *rx_ring,
2040 struct i40e_rx_buffer *rx_buffer,
2041 struct xdp_buff *xdp)
2042{
2043 unsigned int size = xdp->data_end - xdp->data;
2044#if (PAGE_SIZE < 8192)
2045 unsigned int truesize = i40e_rx_pg_size(rx_ring) / 2;
2046#else
2047 unsigned int truesize = SKB_DATA_ALIGN(size);
2048#endif
2049 unsigned int headlen;
2050 struct sk_buff *skb;
2051
2052
2053 prefetch(xdp->data);
2054#if L1_CACHE_BYTES < 128
2055 prefetch(xdp->data + L1_CACHE_BYTES);
2056#endif
2057
2058
2059 skb = __napi_alloc_skb(&rx_ring->q_vector->napi,
2060 I40E_RX_HDR_SIZE,
2061 GFP_ATOMIC | __GFP_NOWARN);
2062 if (unlikely(!skb))
2063 return NULL;
2064
2065
2066 headlen = size;
2067 if (headlen > I40E_RX_HDR_SIZE)
2068 headlen = eth_get_headlen(xdp->data, I40E_RX_HDR_SIZE);
2069
2070
2071 memcpy(__skb_put(skb, headlen), xdp->data,
2072 ALIGN(headlen, sizeof(long)));
2073
2074
2075 size -= headlen;
2076 if (size) {
2077 skb_add_rx_frag(skb, 0, rx_buffer->page,
2078 rx_buffer->page_offset + headlen,
2079 size, truesize);
2080
2081
2082#if (PAGE_SIZE < 8192)
2083 rx_buffer->page_offset ^= truesize;
2084#else
2085 rx_buffer->page_offset += truesize;
2086#endif
2087 } else {
2088
2089 rx_buffer->pagecnt_bias++;
2090 }
2091
2092 return skb;
2093}
2094
2095
2096
2097
2098
2099
2100
2101
2102
2103
2104static struct sk_buff *i40e_build_skb(struct i40e_ring *rx_ring,
2105 struct i40e_rx_buffer *rx_buffer,
2106 struct xdp_buff *xdp)
2107{
2108 unsigned int size = xdp->data_end - xdp->data;
2109#if (PAGE_SIZE < 8192)
2110 unsigned int truesize = i40e_rx_pg_size(rx_ring) / 2;
2111#else
2112 unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
2113 SKB_DATA_ALIGN(I40E_SKB_PAD + size);
2114#endif
2115 struct sk_buff *skb;
2116
2117
2118 prefetch(xdp->data);
2119#if L1_CACHE_BYTES < 128
2120 prefetch(xdp->data + L1_CACHE_BYTES);
2121#endif
2122
2123 skb = build_skb(xdp->data_hard_start, truesize);
2124 if (unlikely(!skb))
2125 return NULL;
2126
2127
2128 skb_reserve(skb, I40E_SKB_PAD);
2129 __skb_put(skb, size);
2130
2131
2132#if (PAGE_SIZE < 8192)
2133 rx_buffer->page_offset ^= truesize;
2134#else
2135 rx_buffer->page_offset += truesize;
2136#endif
2137
2138 return skb;
2139}
2140
2141
2142
2143
2144
2145
2146
2147
2148
2149static void i40e_put_rx_buffer(struct i40e_ring *rx_ring,
2150 struct i40e_rx_buffer *rx_buffer)
2151{
2152 if (i40e_can_reuse_rx_page(rx_buffer)) {
2153
2154 i40e_reuse_rx_page(rx_ring, rx_buffer);
2155 rx_ring->rx_stats.page_reuse_count++;
2156 } else {
2157
2158 dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma,
2159 i40e_rx_pg_size(rx_ring),
2160 DMA_FROM_DEVICE, I40E_RX_DMA_ATTR);
2161 __page_frag_cache_drain(rx_buffer->page,
2162 rx_buffer->pagecnt_bias);
2163 }
2164
2165
2166 rx_buffer->page = NULL;
2167}
2168
2169
2170
2171
2172
2173
2174
2175
2176
2177
2178
2179
2180static bool i40e_is_non_eop(struct i40e_ring *rx_ring,
2181 union i40e_rx_desc *rx_desc,
2182 struct sk_buff *skb)
2183{
2184 u32 ntc = rx_ring->next_to_clean + 1;
2185
2186
2187 ntc = (ntc < rx_ring->count) ? ntc : 0;
2188 rx_ring->next_to_clean = ntc;
2189
2190 prefetch(I40E_RX_DESC(rx_ring, ntc));
2191
2192
2193#define I40E_RXD_EOF BIT(I40E_RX_DESC_STATUS_EOF_SHIFT)
2194 if (likely(i40e_test_staterr(rx_desc, I40E_RXD_EOF)))
2195 return false;
2196
2197 rx_ring->rx_stats.non_eop_descs++;
2198
2199 return true;
2200}
2201
2202#define I40E_XDP_PASS 0
2203#define I40E_XDP_CONSUMED 1
2204#define I40E_XDP_TX 2
2205
2206static int i40e_xmit_xdp_ring(struct xdp_buff *xdp,
2207 struct i40e_ring *xdp_ring);
2208
2209
2210
2211
2212
2213
2214static struct sk_buff *i40e_run_xdp(struct i40e_ring *rx_ring,
2215 struct xdp_buff *xdp)
2216{
2217 int err, result = I40E_XDP_PASS;
2218 struct i40e_ring *xdp_ring;
2219 struct bpf_prog *xdp_prog;
2220 u32 act;
2221
2222 rcu_read_lock();
2223 xdp_prog = READ_ONCE(rx_ring->xdp_prog);
2224
2225 if (!xdp_prog)
2226 goto xdp_out;
2227
2228 act = bpf_prog_run_xdp(xdp_prog, xdp);
2229 switch (act) {
2230 case XDP_PASS:
2231 break;
2232 case XDP_TX:
2233 xdp_ring = rx_ring->vsi->xdp_rings[rx_ring->queue_index];
2234 result = i40e_xmit_xdp_ring(xdp, xdp_ring);
2235 break;
2236 case XDP_REDIRECT:
2237 err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog);
2238 result = !err ? I40E_XDP_TX : I40E_XDP_CONSUMED;
2239 break;
2240 default:
2241 bpf_warn_invalid_xdp_action(act);
2242 case XDP_ABORTED:
2243 trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
2244
2245 case XDP_DROP:
2246 result = I40E_XDP_CONSUMED;
2247 break;
2248 }
2249xdp_out:
2250 rcu_read_unlock();
2251 return ERR_PTR(-result);
2252}
2253
2254
2255
2256
2257
2258
2259
2260static void i40e_rx_buffer_flip(struct i40e_ring *rx_ring,
2261 struct i40e_rx_buffer *rx_buffer,
2262 unsigned int size)
2263{
2264#if (PAGE_SIZE < 8192)
2265 unsigned int truesize = i40e_rx_pg_size(rx_ring) / 2;
2266
2267 rx_buffer->page_offset ^= truesize;
2268#else
2269 unsigned int truesize = SKB_DATA_ALIGN(i40e_rx_offset(rx_ring) + size);
2270
2271 rx_buffer->page_offset += truesize;
2272#endif
2273}
2274
2275static inline void i40e_xdp_ring_update_tail(struct i40e_ring *xdp_ring)
2276{
2277
2278
2279
2280 wmb();
2281 writel_relaxed(xdp_ring->next_to_use, xdp_ring->tail);
2282}
2283
2284
2285
2286
2287
2288
2289
2290
2291
2292
2293
2294
2295
2296static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
2297{
2298 unsigned int total_rx_bytes = 0, total_rx_packets = 0;
2299 struct sk_buff *skb = rx_ring->skb;
2300 u16 cleaned_count = I40E_DESC_UNUSED(rx_ring);
2301 bool failure = false, xdp_xmit = false;
2302 struct xdp_buff xdp;
2303
2304 xdp.rxq = &rx_ring->xdp_rxq;
2305
2306 while (likely(total_rx_packets < (unsigned int)budget)) {
2307 struct i40e_rx_buffer *rx_buffer;
2308 union i40e_rx_desc *rx_desc;
2309 unsigned int size;
2310 u16 vlan_tag;
2311 u8 rx_ptype;
2312 u64 qword;
2313
2314
2315 if (cleaned_count >= I40E_RX_BUFFER_WRITE) {
2316 failure = failure ||
2317 i40e_alloc_rx_buffers(rx_ring, cleaned_count);
2318 cleaned_count = 0;
2319 }
2320
2321 rx_desc = I40E_RX_DESC(rx_ring, rx_ring->next_to_clean);
2322
2323
2324
2325
2326
2327
2328 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
2329
2330
2331
2332
2333
2334 dma_rmb();
2335
2336 if (unlikely(i40e_rx_is_programming_status(qword))) {
2337 i40e_clean_programming_status(rx_ring, rx_desc, qword);
2338 cleaned_count++;
2339 continue;
2340 }
2341 size = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >>
2342 I40E_RXD_QW1_LENGTH_PBUF_SHIFT;
2343 if (!size)
2344 break;
2345
2346 i40e_trace(clean_rx_irq, rx_ring, rx_desc, skb);
2347 rx_buffer = i40e_get_rx_buffer(rx_ring, size);
2348
2349
2350 if (!skb) {
2351 xdp.data = page_address(rx_buffer->page) +
2352 rx_buffer->page_offset;
2353 xdp_set_data_meta_invalid(&xdp);
2354 xdp.data_hard_start = xdp.data -
2355 i40e_rx_offset(rx_ring);
2356 xdp.data_end = xdp.data + size;
2357
2358 skb = i40e_run_xdp(rx_ring, &xdp);
2359 }
2360
2361 if (IS_ERR(skb)) {
2362 if (PTR_ERR(skb) == -I40E_XDP_TX) {
2363 xdp_xmit = true;
2364 i40e_rx_buffer_flip(rx_ring, rx_buffer, size);
2365 } else {
2366 rx_buffer->pagecnt_bias++;
2367 }
2368 total_rx_bytes += size;
2369 total_rx_packets++;
2370 } else if (skb) {
2371 i40e_add_rx_frag(rx_ring, rx_buffer, skb, size);
2372 } else if (ring_uses_build_skb(rx_ring)) {
2373 skb = i40e_build_skb(rx_ring, rx_buffer, &xdp);
2374 } else {
2375 skb = i40e_construct_skb(rx_ring, rx_buffer, &xdp);
2376 }
2377
2378
2379 if (!skb) {
2380 rx_ring->rx_stats.alloc_buff_failed++;
2381 rx_buffer->pagecnt_bias++;
2382 break;
2383 }
2384
2385 i40e_put_rx_buffer(rx_ring, rx_buffer);
2386 cleaned_count++;
2387
2388 if (i40e_is_non_eop(rx_ring, rx_desc, skb))
2389 continue;
2390
2391 if (i40e_cleanup_headers(rx_ring, skb, rx_desc)) {
2392 skb = NULL;
2393 continue;
2394 }
2395
2396
2397 total_rx_bytes += skb->len;
2398
2399 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
2400 rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >>
2401 I40E_RXD_QW1_PTYPE_SHIFT;
2402
2403
2404 i40e_process_skb_fields(rx_ring, rx_desc, skb, rx_ptype);
2405
2406 vlan_tag = (qword & BIT(I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)) ?
2407 le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1) : 0;
2408
2409 i40e_trace(clean_rx_irq_rx, rx_ring, rx_desc, skb);
2410 i40e_receive_skb(rx_ring, skb, vlan_tag);
2411 skb = NULL;
2412
2413
2414 total_rx_packets++;
2415 }
2416
2417 if (xdp_xmit) {
2418 struct i40e_ring *xdp_ring =
2419 rx_ring->vsi->xdp_rings[rx_ring->queue_index];
2420
2421 i40e_xdp_ring_update_tail(xdp_ring);
2422 xdp_do_flush_map();
2423 }
2424
2425 rx_ring->skb = skb;
2426
2427 u64_stats_update_begin(&rx_ring->syncp);
2428 rx_ring->stats.packets += total_rx_packets;
2429 rx_ring->stats.bytes += total_rx_bytes;
2430 u64_stats_update_end(&rx_ring->syncp);
2431 rx_ring->q_vector->rx.total_packets += total_rx_packets;
2432 rx_ring->q_vector->rx.total_bytes += total_rx_bytes;
2433
2434
2435 return failure ? budget : (int)total_rx_packets;
2436}
2437
2438static inline u32 i40e_buildreg_itr(const int type, u16 itr)
2439{
2440 u32 val;
2441
2442
2443
2444
2445
2446
2447
2448
2449
2450
2451
2452
2453
2454
2455
2456
2457 itr &= I40E_ITR_MASK;
2458
2459 val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
2460 (type << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT) |
2461 (itr << (I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT - 1));
2462
2463 return val;
2464}
2465
2466
2467#define INTREG I40E_PFINT_DYN_CTLN
2468
2469
2470
2471
2472
2473
2474
2475
2476#define ITR_COUNTDOWN_START 3
2477
2478
2479
2480
2481
2482
2483
2484static inline void i40e_update_enable_itr(struct i40e_vsi *vsi,
2485 struct i40e_q_vector *q_vector)
2486{
2487 struct i40e_hw *hw = &vsi->back->hw;
2488 u32 intval;
2489
2490
2491 if (!(vsi->back->flags & I40E_FLAG_MSIX_ENABLED)) {
2492 i40e_irq_dynamic_enable_icr0(vsi->back);
2493 return;
2494 }
2495
2496
2497 i40e_update_itr(q_vector, &q_vector->tx);
2498 i40e_update_itr(q_vector, &q_vector->rx);
2499
2500
2501
2502
2503
2504
2505
2506
2507
2508 if (q_vector->rx.target_itr < q_vector->rx.current_itr) {
2509
2510 intval = i40e_buildreg_itr(I40E_RX_ITR,
2511 q_vector->rx.target_itr);
2512 q_vector->rx.current_itr = q_vector->rx.target_itr;
2513 q_vector->itr_countdown = ITR_COUNTDOWN_START;
2514 } else if ((q_vector->tx.target_itr < q_vector->tx.current_itr) ||
2515 ((q_vector->rx.target_itr - q_vector->rx.current_itr) <
2516 (q_vector->tx.target_itr - q_vector->tx.current_itr))) {
2517
2518
2519
2520 intval = i40e_buildreg_itr(I40E_TX_ITR,
2521 q_vector->tx.target_itr);
2522 q_vector->tx.current_itr = q_vector->tx.target_itr;
2523 q_vector->itr_countdown = ITR_COUNTDOWN_START;
2524 } else if (q_vector->rx.current_itr != q_vector->rx.target_itr) {
2525
2526 intval = i40e_buildreg_itr(I40E_RX_ITR,
2527 q_vector->rx.target_itr);
2528 q_vector->rx.current_itr = q_vector->rx.target_itr;
2529 q_vector->itr_countdown = ITR_COUNTDOWN_START;
2530 } else {
2531
2532 intval = i40e_buildreg_itr(I40E_ITR_NONE, 0);
2533 if (q_vector->itr_countdown)
2534 q_vector->itr_countdown--;
2535 }
2536
2537 if (!test_bit(__I40E_VSI_DOWN, vsi->state))
2538 wr32(hw, INTREG(q_vector->reg_idx), intval);
2539}
2540
2541
2542
2543
2544
2545
2546
2547
2548
2549
2550int i40e_napi_poll(struct napi_struct *napi, int budget)
2551{
2552 struct i40e_q_vector *q_vector =
2553 container_of(napi, struct i40e_q_vector, napi);
2554 struct i40e_vsi *vsi = q_vector->vsi;
2555 struct i40e_ring *ring;
2556 bool clean_complete = true;
2557 bool arm_wb = false;
2558 int budget_per_ring;
2559 int work_done = 0;
2560
2561 if (test_bit(__I40E_VSI_DOWN, vsi->state)) {
2562 napi_complete(napi);
2563 return 0;
2564 }
2565
2566
2567
2568
2569 i40e_for_each_ring(ring, q_vector->tx) {
2570 if (!i40e_clean_tx_irq(vsi, ring, budget)) {
2571 clean_complete = false;
2572 continue;
2573 }
2574 arm_wb |= ring->arm_wb;
2575 ring->arm_wb = false;
2576 }
2577
2578
2579 if (budget <= 0)
2580 goto tx_only;
2581
2582
2583
2584
2585 budget_per_ring = max(budget/q_vector->num_ringpairs, 1);
2586
2587 i40e_for_each_ring(ring, q_vector->rx) {
2588 int cleaned = i40e_clean_rx_irq(ring, budget_per_ring);
2589
2590 work_done += cleaned;
2591
2592 if (cleaned >= budget_per_ring)
2593 clean_complete = false;
2594 }
2595
2596
2597 if (!clean_complete) {
2598 int cpu_id = smp_processor_id();
2599
2600
2601
2602
2603
2604
2605
2606
2607 if (!cpumask_test_cpu(cpu_id, &q_vector->affinity_mask)) {
2608
2609 napi_complete_done(napi, work_done);
2610
2611
2612 i40e_force_wb(vsi, q_vector);
2613
2614
2615 return budget - 1;
2616 }
2617tx_only:
2618 if (arm_wb) {
2619 q_vector->tx.ring[0].tx_stats.tx_force_wb++;
2620 i40e_enable_wb_on_itr(vsi, q_vector);
2621 }
2622 return budget;
2623 }
2624
2625 if (vsi->back->flags & I40E_TXR_FLAGS_WB_ON_ITR)
2626 q_vector->arm_wb_state = false;
2627
2628
2629 napi_complete_done(napi, work_done);
2630
2631 i40e_update_enable_itr(vsi, q_vector);
2632
2633 return min(work_done, budget - 1);
2634}
2635
2636
2637
2638
2639
2640
2641
2642static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb,
2643 u32 tx_flags)
2644{
2645 struct i40e_filter_program_desc *fdir_desc;
2646 struct i40e_pf *pf = tx_ring->vsi->back;
2647 union {
2648 unsigned char *network;
2649 struct iphdr *ipv4;
2650 struct ipv6hdr *ipv6;
2651 } hdr;
2652 struct tcphdr *th;
2653 unsigned int hlen;
2654 u32 flex_ptype, dtype_cmd;
2655 int l4_proto;
2656 u16 i;
2657
2658
2659 if (!(pf->flags & I40E_FLAG_FD_ATR_ENABLED))
2660 return;
2661
2662 if (test_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state))
2663 return;
2664
2665
2666 if (!tx_ring->atr_sample_rate)
2667 return;
2668
2669
2670 if (!(tx_flags & (I40E_TX_FLAGS_IPV4 | I40E_TX_FLAGS_IPV6)))
2671 return;
2672
2673
2674 hdr.network = (tx_flags & I40E_TX_FLAGS_UDP_TUNNEL) ?
2675 skb_inner_network_header(skb) : skb_network_header(skb);
2676
2677
2678
2679
2680 if (tx_flags & I40E_TX_FLAGS_IPV4) {
2681
2682 hlen = (hdr.network[0] & 0x0F) << 2;
2683 l4_proto = hdr.ipv4->protocol;
2684 } else {
2685
2686 unsigned int inner_hlen = hdr.network - skb->data;
2687 unsigned int h_offset = inner_hlen;
2688
2689
2690 l4_proto =
2691 ipv6_find_hdr(skb, &h_offset, IPPROTO_TCP, NULL, NULL);
2692
2693 hlen = h_offset - inner_hlen;
2694 }
2695
2696 if (l4_proto != IPPROTO_TCP)
2697 return;
2698
2699 th = (struct tcphdr *)(hdr.network + hlen);
2700
2701
2702 if (th->syn && test_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state))
2703 return;
2704 if (pf->flags & I40E_FLAG_HW_ATR_EVICT_ENABLED) {
2705
2706
2707
2708 if (th->fin || th->rst)
2709 return;
2710 }
2711
2712 tx_ring->atr_count++;
2713
2714
2715 if (!th->fin &&
2716 !th->syn &&
2717 !th->rst &&
2718 (tx_ring->atr_count < tx_ring->atr_sample_rate))
2719 return;
2720
2721 tx_ring->atr_count = 0;
2722
2723
2724 i = tx_ring->next_to_use;
2725 fdir_desc = I40E_TX_FDIRDESC(tx_ring, i);
2726
2727 i++;
2728 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
2729
2730 flex_ptype = (tx_ring->queue_index << I40E_TXD_FLTR_QW0_QINDEX_SHIFT) &
2731 I40E_TXD_FLTR_QW0_QINDEX_MASK;
2732 flex_ptype |= (tx_flags & I40E_TX_FLAGS_IPV4) ?
2733 (I40E_FILTER_PCTYPE_NONF_IPV4_TCP <<
2734 I40E_TXD_FLTR_QW0_PCTYPE_SHIFT) :
2735 (I40E_FILTER_PCTYPE_NONF_IPV6_TCP <<
2736 I40E_TXD_FLTR_QW0_PCTYPE_SHIFT);
2737
2738 flex_ptype |= tx_ring->vsi->id << I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT;
2739
2740 dtype_cmd = I40E_TX_DESC_DTYPE_FILTER_PROG;
2741
2742 dtype_cmd |= (th->fin || th->rst) ?
2743 (I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE <<
2744 I40E_TXD_FLTR_QW1_PCMD_SHIFT) :
2745 (I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE <<
2746 I40E_TXD_FLTR_QW1_PCMD_SHIFT);
2747
2748 dtype_cmd |= I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_QINDEX <<
2749 I40E_TXD_FLTR_QW1_DEST_SHIFT;
2750
2751 dtype_cmd |= I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID <<
2752 I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT;
2753
2754 dtype_cmd |= I40E_TXD_FLTR_QW1_CNT_ENA_MASK;
2755 if (!(tx_flags & I40E_TX_FLAGS_UDP_TUNNEL))
2756 dtype_cmd |=
2757 ((u32)I40E_FD_ATR_STAT_IDX(pf->hw.pf_id) <<
2758 I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) &
2759 I40E_TXD_FLTR_QW1_CNTINDEX_MASK;
2760 else
2761 dtype_cmd |=
2762 ((u32)I40E_FD_ATR_TUNNEL_STAT_IDX(pf->hw.pf_id) <<
2763 I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) &
2764 I40E_TXD_FLTR_QW1_CNTINDEX_MASK;
2765
2766 if (pf->flags & I40E_FLAG_HW_ATR_EVICT_ENABLED)
2767 dtype_cmd |= I40E_TXD_FLTR_QW1_ATR_MASK;
2768
2769 fdir_desc->qindex_flex_ptype_vsi = cpu_to_le32(flex_ptype);
2770 fdir_desc->rsvd = cpu_to_le32(0);
2771 fdir_desc->dtype_cmd_cntindex = cpu_to_le32(dtype_cmd);
2772 fdir_desc->fd_id = cpu_to_le32(0);
2773}
2774
2775
2776
2777
2778
2779
2780
2781
2782
2783
2784
2785
2786
2787static inline int i40e_tx_prepare_vlan_flags(struct sk_buff *skb,
2788 struct i40e_ring *tx_ring,
2789 u32 *flags)
2790{
2791 __be16 protocol = skb->protocol;
2792 u32 tx_flags = 0;
2793
2794 if (protocol == htons(ETH_P_8021Q) &&
2795 !(tx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_TX)) {
2796
2797
2798
2799
2800
2801
2802
2803 skb->protocol = vlan_get_protocol(skb);
2804 goto out;
2805 }
2806
2807
2808 if (skb_vlan_tag_present(skb)) {
2809 tx_flags |= skb_vlan_tag_get(skb) << I40E_TX_FLAGS_VLAN_SHIFT;
2810 tx_flags |= I40E_TX_FLAGS_HW_VLAN;
2811
2812 } else if (protocol == htons(ETH_P_8021Q)) {
2813 struct vlan_hdr *vhdr, _vhdr;
2814
2815 vhdr = skb_header_pointer(skb, ETH_HLEN, sizeof(_vhdr), &_vhdr);
2816 if (!vhdr)
2817 return -EINVAL;
2818
2819 protocol = vhdr->h_vlan_encapsulated_proto;
2820 tx_flags |= ntohs(vhdr->h_vlan_TCI) << I40E_TX_FLAGS_VLAN_SHIFT;
2821 tx_flags |= I40E_TX_FLAGS_SW_VLAN;
2822 }
2823
2824 if (!(tx_ring->vsi->back->flags & I40E_FLAG_DCB_ENABLED))
2825 goto out;
2826
2827
2828 if ((tx_flags & (I40E_TX_FLAGS_HW_VLAN | I40E_TX_FLAGS_SW_VLAN)) ||
2829 (skb->priority != TC_PRIO_CONTROL)) {
2830 tx_flags &= ~I40E_TX_FLAGS_VLAN_PRIO_MASK;
2831 tx_flags |= (skb->priority & 0x7) <<
2832 I40E_TX_FLAGS_VLAN_PRIO_SHIFT;
2833 if (tx_flags & I40E_TX_FLAGS_SW_VLAN) {
2834 struct vlan_ethhdr *vhdr;
2835 int rc;
2836
2837 rc = skb_cow_head(skb, 0);
2838 if (rc < 0)
2839 return rc;
2840 vhdr = (struct vlan_ethhdr *)skb->data;
2841 vhdr->h_vlan_TCI = htons(tx_flags >>
2842 I40E_TX_FLAGS_VLAN_SHIFT);
2843 } else {
2844 tx_flags |= I40E_TX_FLAGS_HW_VLAN;
2845 }
2846 }
2847
2848out:
2849 *flags = tx_flags;
2850 return 0;
2851}
2852
2853
2854
2855
2856
2857
2858
2859
2860
2861static int i40e_tso(struct i40e_tx_buffer *first, u8 *hdr_len,
2862 u64 *cd_type_cmd_tso_mss)
2863{
2864 struct sk_buff *skb = first->skb;
2865 u64 cd_cmd, cd_tso_len, cd_mss;
2866 union {
2867 struct iphdr *v4;
2868 struct ipv6hdr *v6;
2869 unsigned char *hdr;
2870 } ip;
2871 union {
2872 struct tcphdr *tcp;
2873 struct udphdr *udp;
2874 unsigned char *hdr;
2875 } l4;
2876 u32 paylen, l4_offset;
2877 u16 gso_segs, gso_size;
2878 int err;
2879
2880 if (skb->ip_summed != CHECKSUM_PARTIAL)
2881 return 0;
2882
2883 if (!skb_is_gso(skb))
2884 return 0;
2885
2886 err = skb_cow_head(skb, 0);
2887 if (err < 0)
2888 return err;
2889
2890 ip.hdr = skb_network_header(skb);
2891 l4.hdr = skb_transport_header(skb);
2892
2893
2894 if (ip.v4->version == 4) {
2895 ip.v4->tot_len = 0;
2896 ip.v4->check = 0;
2897 } else {
2898 ip.v6->payload_len = 0;
2899 }
2900
2901 if (skb_shinfo(skb)->gso_type & (SKB_GSO_GRE |
2902 SKB_GSO_GRE_CSUM |
2903 SKB_GSO_IPXIP4 |
2904 SKB_GSO_IPXIP6 |
2905 SKB_GSO_UDP_TUNNEL |
2906 SKB_GSO_UDP_TUNNEL_CSUM)) {
2907 if (!(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) &&
2908 (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM)) {
2909 l4.udp->len = 0;
2910
2911
2912 l4_offset = l4.hdr - skb->data;
2913
2914
2915 paylen = skb->len - l4_offset;
2916 csum_replace_by_diff(&l4.udp->check,
2917 (__force __wsum)htonl(paylen));
2918 }
2919
2920
2921 ip.hdr = skb_inner_network_header(skb);
2922 l4.hdr = skb_inner_transport_header(skb);
2923
2924
2925 if (ip.v4->version == 4) {
2926 ip.v4->tot_len = 0;
2927 ip.v4->check = 0;
2928 } else {
2929 ip.v6->payload_len = 0;
2930 }
2931 }
2932
2933
2934 l4_offset = l4.hdr - skb->data;
2935
2936
2937 paylen = skb->len - l4_offset;
2938 csum_replace_by_diff(&l4.tcp->check, (__force __wsum)htonl(paylen));
2939
2940
2941 *hdr_len = (l4.tcp->doff * 4) + l4_offset;
2942
2943
2944 gso_size = skb_shinfo(skb)->gso_size;
2945 gso_segs = skb_shinfo(skb)->gso_segs;
2946
2947
2948 first->gso_segs = gso_segs;
2949 first->bytecount += (first->gso_segs - 1) * *hdr_len;
2950
2951
2952 cd_cmd = I40E_TX_CTX_DESC_TSO;
2953 cd_tso_len = skb->len - *hdr_len;
2954 cd_mss = gso_size;
2955 *cd_type_cmd_tso_mss |= (cd_cmd << I40E_TXD_CTX_QW1_CMD_SHIFT) |
2956 (cd_tso_len << I40E_TXD_CTX_QW1_TSO_LEN_SHIFT) |
2957 (cd_mss << I40E_TXD_CTX_QW1_MSS_SHIFT);
2958 return 1;
2959}
2960
2961
2962
2963
2964
2965
2966
2967
2968
2969
2970static int i40e_tsyn(struct i40e_ring *tx_ring, struct sk_buff *skb,
2971 u32 tx_flags, u64 *cd_type_cmd_tso_mss)
2972{
2973 struct i40e_pf *pf;
2974
2975 if (likely(!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)))
2976 return 0;
2977
2978
2979 if (tx_flags & I40E_TX_FLAGS_TSO)
2980 return 0;
2981
2982
2983
2984
2985 pf = i40e_netdev_to_pf(tx_ring->netdev);
2986 if (!(pf->flags & I40E_FLAG_PTP))
2987 return 0;
2988
2989 if (pf->ptp_tx &&
2990 !test_and_set_bit_lock(__I40E_PTP_TX_IN_PROGRESS, pf->state)) {
2991 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
2992 pf->ptp_tx_start = jiffies;
2993 pf->ptp_tx_skb = skb_get(skb);
2994 } else {
2995 pf->tx_hwtstamp_skipped++;
2996 return 0;
2997 }
2998
2999 *cd_type_cmd_tso_mss |= (u64)I40E_TX_CTX_DESC_TSYN <<
3000 I40E_TXD_CTX_QW1_CMD_SHIFT;
3001
3002 return 1;
3003}
3004
3005
3006
3007
3008
3009
3010
3011
3012
3013
3014static int i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags,
3015 u32 *td_cmd, u32 *td_offset,
3016 struct i40e_ring *tx_ring,
3017 u32 *cd_tunneling)
3018{
3019 union {
3020 struct iphdr *v4;
3021 struct ipv6hdr *v6;
3022 unsigned char *hdr;
3023 } ip;
3024 union {
3025 struct tcphdr *tcp;
3026 struct udphdr *udp;
3027 unsigned char *hdr;
3028 } l4;
3029 unsigned char *exthdr;
3030 u32 offset, cmd = 0;
3031 __be16 frag_off;
3032 u8 l4_proto = 0;
3033
3034 if (skb->ip_summed != CHECKSUM_PARTIAL)
3035 return 0;
3036
3037 ip.hdr = skb_network_header(skb);
3038 l4.hdr = skb_transport_header(skb);
3039
3040
3041 offset = ((ip.hdr - skb->data) / 2) << I40E_TX_DESC_LENGTH_MACLEN_SHIFT;
3042
3043 if (skb->encapsulation) {
3044 u32 tunnel = 0;
3045
3046 if (*tx_flags & I40E_TX_FLAGS_IPV4) {
3047 tunnel |= (*tx_flags & I40E_TX_FLAGS_TSO) ?
3048 I40E_TX_CTX_EXT_IP_IPV4 :
3049 I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM;
3050
3051 l4_proto = ip.v4->protocol;
3052 } else if (*tx_flags & I40E_TX_FLAGS_IPV6) {
3053 tunnel |= I40E_TX_CTX_EXT_IP_IPV6;
3054
3055 exthdr = ip.hdr + sizeof(*ip.v6);
3056 l4_proto = ip.v6->nexthdr;
3057 if (l4.hdr != exthdr)
3058 ipv6_skip_exthdr(skb, exthdr - skb->data,
3059 &l4_proto, &frag_off);
3060 }
3061
3062
3063 switch (l4_proto) {
3064 case IPPROTO_UDP:
3065 tunnel |= I40E_TXD_CTX_UDP_TUNNELING;
3066 *tx_flags |= I40E_TX_FLAGS_UDP_TUNNEL;
3067 break;
3068 case IPPROTO_GRE:
3069 tunnel |= I40E_TXD_CTX_GRE_TUNNELING;
3070 *tx_flags |= I40E_TX_FLAGS_UDP_TUNNEL;
3071 break;
3072 case IPPROTO_IPIP:
3073 case IPPROTO_IPV6:
3074 *tx_flags |= I40E_TX_FLAGS_UDP_TUNNEL;
3075 l4.hdr = skb_inner_network_header(skb);
3076 break;
3077 default:
3078 if (*tx_flags & I40E_TX_FLAGS_TSO)
3079 return -1;
3080
3081 skb_checksum_help(skb);
3082 return 0;
3083 }
3084
3085
3086 tunnel |= ((l4.hdr - ip.hdr) / 4) <<
3087 I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT;
3088
3089
3090 ip.hdr = skb_inner_network_header(skb);
3091
3092
3093 tunnel |= ((ip.hdr - l4.hdr) / 2) <<
3094 I40E_TXD_CTX_QW0_NATLEN_SHIFT;
3095
3096
3097 if ((*tx_flags & I40E_TX_FLAGS_TSO) &&
3098 !(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) &&
3099 (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM))
3100 tunnel |= I40E_TXD_CTX_QW0_L4T_CS_MASK;
3101
3102
3103 *cd_tunneling |= tunnel;
3104
3105
3106 l4.hdr = skb_inner_transport_header(skb);
3107 l4_proto = 0;
3108
3109
3110 *tx_flags &= ~(I40E_TX_FLAGS_IPV4 | I40E_TX_FLAGS_IPV6);
3111 if (ip.v4->version == 4)
3112 *tx_flags |= I40E_TX_FLAGS_IPV4;
3113 if (ip.v6->version == 6)
3114 *tx_flags |= I40E_TX_FLAGS_IPV6;
3115 }
3116
3117
3118 if (*tx_flags & I40E_TX_FLAGS_IPV4) {
3119 l4_proto = ip.v4->protocol;
3120
3121
3122
3123 cmd |= (*tx_flags & I40E_TX_FLAGS_TSO) ?
3124 I40E_TX_DESC_CMD_IIPT_IPV4_CSUM :
3125 I40E_TX_DESC_CMD_IIPT_IPV4;
3126 } else if (*tx_flags & I40E_TX_FLAGS_IPV6) {
3127 cmd |= I40E_TX_DESC_CMD_IIPT_IPV6;
3128
3129 exthdr = ip.hdr + sizeof(*ip.v6);
3130 l4_proto = ip.v6->nexthdr;
3131 if (l4.hdr != exthdr)
3132 ipv6_skip_exthdr(skb, exthdr - skb->data,
3133 &l4_proto, &frag_off);
3134 }
3135
3136
3137 offset |= ((l4.hdr - ip.hdr) / 4) << I40E_TX_DESC_LENGTH_IPLEN_SHIFT;
3138
3139
3140 switch (l4_proto) {
3141 case IPPROTO_TCP:
3142
3143 cmd |= I40E_TX_DESC_CMD_L4T_EOFT_TCP;
3144 offset |= l4.tcp->doff << I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
3145 break;
3146 case IPPROTO_SCTP:
3147
3148 cmd |= I40E_TX_DESC_CMD_L4T_EOFT_SCTP;
3149 offset |= (sizeof(struct sctphdr) >> 2) <<
3150 I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
3151 break;
3152 case IPPROTO_UDP:
3153
3154 cmd |= I40E_TX_DESC_CMD_L4T_EOFT_UDP;
3155 offset |= (sizeof(struct udphdr) >> 2) <<
3156 I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
3157 break;
3158 default:
3159 if (*tx_flags & I40E_TX_FLAGS_TSO)
3160 return -1;
3161 skb_checksum_help(skb);
3162 return 0;
3163 }
3164
3165 *td_cmd |= cmd;
3166 *td_offset |= offset;
3167
3168 return 1;
3169}
3170
3171
3172
3173
3174
3175
3176
3177
3178static void i40e_create_tx_ctx(struct i40e_ring *tx_ring,
3179 const u64 cd_type_cmd_tso_mss,
3180 const u32 cd_tunneling, const u32 cd_l2tag2)
3181{
3182 struct i40e_tx_context_desc *context_desc;
3183 int i = tx_ring->next_to_use;
3184
3185 if ((cd_type_cmd_tso_mss == I40E_TX_DESC_DTYPE_CONTEXT) &&
3186 !cd_tunneling && !cd_l2tag2)
3187 return;
3188
3189
3190 context_desc = I40E_TX_CTXTDESC(tx_ring, i);
3191
3192 i++;
3193 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
3194
3195
3196 context_desc->tunneling_params = cpu_to_le32(cd_tunneling);
3197 context_desc->l2tag2 = cpu_to_le16(cd_l2tag2);
3198 context_desc->rsvd = cpu_to_le16(0);
3199 context_desc->type_cmd_tso_mss = cpu_to_le64(cd_type_cmd_tso_mss);
3200}
3201
3202
3203
3204
3205
3206
3207
3208
3209int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
3210{
3211 netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
3212
3213 smp_mb();
3214
3215
3216 if (likely(I40E_DESC_UNUSED(tx_ring) < size))
3217 return -EBUSY;
3218
3219
3220 netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index);
3221 ++tx_ring->tx_stats.restart_queue;
3222 return 0;
3223}
3224
3225
3226
3227
3228
3229
3230
3231
3232
3233
3234
3235
3236
3237
3238bool __i40e_chk_linearize(struct sk_buff *skb)
3239{
3240 const struct skb_frag_struct *frag, *stale;
3241 int nr_frags, sum;
3242
3243
3244 nr_frags = skb_shinfo(skb)->nr_frags;
3245 if (nr_frags < (I40E_MAX_BUFFER_TXD - 1))
3246 return false;
3247
3248
3249
3250
3251 nr_frags -= I40E_MAX_BUFFER_TXD - 2;
3252 frag = &skb_shinfo(skb)->frags[0];
3253
3254
3255
3256
3257
3258
3259
3260 sum = 1 - skb_shinfo(skb)->gso_size;
3261
3262
3263 sum += skb_frag_size(frag++);
3264 sum += skb_frag_size(frag++);
3265 sum += skb_frag_size(frag++);
3266 sum += skb_frag_size(frag++);
3267 sum += skb_frag_size(frag++);
3268
3269
3270
3271
3272 for (stale = &skb_shinfo(skb)->frags[0];; stale++) {
3273 int stale_size = skb_frag_size(stale);
3274
3275 sum += skb_frag_size(frag++);
3276
3277
3278
3279
3280
3281
3282
3283 if (stale_size > I40E_MAX_DATA_PER_TXD) {
3284 int align_pad = -(stale->page_offset) &
3285 (I40E_MAX_READ_REQ_SIZE - 1);
3286
3287 sum -= align_pad;
3288 stale_size -= align_pad;
3289
3290 do {
3291 sum -= I40E_MAX_DATA_PER_TXD_ALIGNED;
3292 stale_size -= I40E_MAX_DATA_PER_TXD_ALIGNED;
3293 } while (stale_size > I40E_MAX_DATA_PER_TXD);
3294 }
3295
3296
3297 if (sum < 0)
3298 return true;
3299
3300 if (!nr_frags--)
3301 break;
3302
3303 sum -= stale_size;
3304 }
3305
3306 return false;
3307}
3308
3309
3310
3311
3312
3313
3314
3315
3316
3317
3318
3319
3320
3321static inline int i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
3322 struct i40e_tx_buffer *first, u32 tx_flags,
3323 const u8 hdr_len, u32 td_cmd, u32 td_offset)
3324{
3325 unsigned int data_len = skb->data_len;
3326 unsigned int size = skb_headlen(skb);
3327 struct skb_frag_struct *frag;
3328 struct i40e_tx_buffer *tx_bi;
3329 struct i40e_tx_desc *tx_desc;
3330 u16 i = tx_ring->next_to_use;
3331 u32 td_tag = 0;
3332 dma_addr_t dma;
3333 u16 desc_count = 1;
3334
3335 if (tx_flags & I40E_TX_FLAGS_HW_VLAN) {
3336 td_cmd |= I40E_TX_DESC_CMD_IL2TAG1;
3337 td_tag = (tx_flags & I40E_TX_FLAGS_VLAN_MASK) >>
3338 I40E_TX_FLAGS_VLAN_SHIFT;
3339 }
3340
3341 first->tx_flags = tx_flags;
3342
3343 dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
3344
3345 tx_desc = I40E_TX_DESC(tx_ring, i);
3346 tx_bi = first;
3347
3348 for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
3349 unsigned int max_data = I40E_MAX_DATA_PER_TXD_ALIGNED;
3350
3351 if (dma_mapping_error(tx_ring->dev, dma))
3352 goto dma_error;
3353
3354
3355 dma_unmap_len_set(tx_bi, len, size);
3356 dma_unmap_addr_set(tx_bi, dma, dma);
3357
3358
3359 max_data += -dma & (I40E_MAX_READ_REQ_SIZE - 1);
3360 tx_desc->buffer_addr = cpu_to_le64(dma);
3361
3362 while (unlikely(size > I40E_MAX_DATA_PER_TXD)) {
3363 tx_desc->cmd_type_offset_bsz =
3364 build_ctob(td_cmd, td_offset,
3365 max_data, td_tag);
3366
3367 tx_desc++;
3368 i++;
3369 desc_count++;
3370
3371 if (i == tx_ring->count) {
3372 tx_desc = I40E_TX_DESC(tx_ring, 0);
3373 i = 0;
3374 }
3375
3376 dma += max_data;
3377 size -= max_data;
3378
3379 max_data = I40E_MAX_DATA_PER_TXD_ALIGNED;
3380 tx_desc->buffer_addr = cpu_to_le64(dma);
3381 }
3382
3383 if (likely(!data_len))
3384 break;
3385
3386 tx_desc->cmd_type_offset_bsz = build_ctob(td_cmd, td_offset,
3387 size, td_tag);
3388
3389 tx_desc++;
3390 i++;
3391 desc_count++;
3392
3393 if (i == tx_ring->count) {
3394 tx_desc = I40E_TX_DESC(tx_ring, 0);
3395 i = 0;
3396 }
3397
3398 size = skb_frag_size(frag);
3399 data_len -= size;
3400
3401 dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size,
3402 DMA_TO_DEVICE);
3403
3404 tx_bi = &tx_ring->tx_bi[i];
3405 }
3406
3407 netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount);
3408
3409 i++;
3410 if (i == tx_ring->count)
3411 i = 0;
3412
3413 tx_ring->next_to_use = i;
3414
3415 i40e_maybe_stop_tx(tx_ring, DESC_NEEDED);
3416
3417
3418 td_cmd |= I40E_TX_DESC_CMD_EOP;
3419
3420
3421
3422
3423 desc_count |= ++tx_ring->packet_stride;
3424
3425 if (desc_count >= WB_STRIDE) {
3426
3427 td_cmd |= I40E_TX_DESC_CMD_RS;
3428 tx_ring->packet_stride = 0;
3429 }
3430
3431 tx_desc->cmd_type_offset_bsz =
3432 build_ctob(td_cmd, td_offset, size, td_tag);
3433
3434
3435
3436
3437
3438
3439
3440 wmb();
3441
3442
3443 first->next_to_watch = tx_desc;
3444
3445
3446 if (netif_xmit_stopped(txring_txq(tx_ring)) || !skb->xmit_more) {
3447 writel(i, tx_ring->tail);
3448
3449
3450
3451
3452 mmiowb();
3453 }
3454
3455 return 0;
3456
3457dma_error:
3458 dev_info(tx_ring->dev, "TX DMA map failed\n");
3459
3460
3461 for (;;) {
3462 tx_bi = &tx_ring->tx_bi[i];
3463 i40e_unmap_and_free_tx_resource(tx_ring, tx_bi);
3464 if (tx_bi == first)
3465 break;
3466 if (i == 0)
3467 i = tx_ring->count;
3468 i--;
3469 }
3470
3471 tx_ring->next_to_use = i;
3472
3473 return -1;
3474}
3475
3476
3477
3478
3479
3480
3481static int i40e_xmit_xdp_ring(struct xdp_buff *xdp,
3482 struct i40e_ring *xdp_ring)
3483{
3484 u32 size = xdp->data_end - xdp->data;
3485 u16 i = xdp_ring->next_to_use;
3486 struct i40e_tx_buffer *tx_bi;
3487 struct i40e_tx_desc *tx_desc;
3488 dma_addr_t dma;
3489
3490 if (!unlikely(I40E_DESC_UNUSED(xdp_ring))) {
3491 xdp_ring->tx_stats.tx_busy++;
3492 return I40E_XDP_CONSUMED;
3493 }
3494
3495 dma = dma_map_single(xdp_ring->dev, xdp->data, size, DMA_TO_DEVICE);
3496 if (dma_mapping_error(xdp_ring->dev, dma))
3497 return I40E_XDP_CONSUMED;
3498
3499 tx_bi = &xdp_ring->tx_bi[i];
3500 tx_bi->bytecount = size;
3501 tx_bi->gso_segs = 1;
3502 tx_bi->raw_buf = xdp->data;
3503
3504
3505 dma_unmap_len_set(tx_bi, len, size);
3506 dma_unmap_addr_set(tx_bi, dma, dma);
3507
3508 tx_desc = I40E_TX_DESC(xdp_ring, i);
3509 tx_desc->buffer_addr = cpu_to_le64(dma);
3510 tx_desc->cmd_type_offset_bsz = build_ctob(I40E_TX_DESC_CMD_ICRC
3511 | I40E_TXD_CMD,
3512 0, size, 0);
3513
3514
3515
3516
3517 smp_wmb();
3518
3519 i++;
3520 if (i == xdp_ring->count)
3521 i = 0;
3522
3523 tx_bi->next_to_watch = tx_desc;
3524 xdp_ring->next_to_use = i;
3525
3526 return I40E_XDP_TX;
3527}
3528
3529
3530
3531
3532
3533
3534
3535
3536static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
3537 struct i40e_ring *tx_ring)
3538{
3539 u64 cd_type_cmd_tso_mss = I40E_TX_DESC_DTYPE_CONTEXT;
3540 u32 cd_tunneling = 0, cd_l2tag2 = 0;
3541 struct i40e_tx_buffer *first;
3542 u32 td_offset = 0;
3543 u32 tx_flags = 0;
3544 __be16 protocol;
3545 u32 td_cmd = 0;
3546 u8 hdr_len = 0;
3547 int tso, count;
3548 int tsyn;
3549
3550
3551 prefetch(skb->data);
3552
3553 i40e_trace(xmit_frame_ring, skb, tx_ring);
3554
3555 count = i40e_xmit_descriptor_count(skb);
3556 if (i40e_chk_linearize(skb, count)) {
3557 if (__skb_linearize(skb)) {
3558 dev_kfree_skb_any(skb);
3559 return NETDEV_TX_OK;
3560 }
3561 count = i40e_txd_use_count(skb->len);
3562 tx_ring->tx_stats.tx_linearize++;
3563 }
3564
3565
3566
3567
3568
3569
3570
3571 if (i40e_maybe_stop_tx(tx_ring, count + 4 + 1)) {
3572 tx_ring->tx_stats.tx_busy++;
3573 return NETDEV_TX_BUSY;
3574 }
3575
3576
3577 first = &tx_ring->tx_bi[tx_ring->next_to_use];
3578 first->skb = skb;
3579 first->bytecount = skb->len;
3580 first->gso_segs = 1;
3581
3582
3583 if (i40e_tx_prepare_vlan_flags(skb, tx_ring, &tx_flags))
3584 goto out_drop;
3585
3586
3587 protocol = vlan_get_protocol(skb);
3588
3589
3590 if (protocol == htons(ETH_P_IP))
3591 tx_flags |= I40E_TX_FLAGS_IPV4;
3592 else if (protocol == htons(ETH_P_IPV6))
3593 tx_flags |= I40E_TX_FLAGS_IPV6;
3594
3595 tso = i40e_tso(first, &hdr_len, &cd_type_cmd_tso_mss);
3596
3597 if (tso < 0)
3598 goto out_drop;
3599 else if (tso)
3600 tx_flags |= I40E_TX_FLAGS_TSO;
3601
3602
3603 tso = i40e_tx_enable_csum(skb, &tx_flags, &td_cmd, &td_offset,
3604 tx_ring, &cd_tunneling);
3605 if (tso < 0)
3606 goto out_drop;
3607
3608 tsyn = i40e_tsyn(tx_ring, skb, tx_flags, &cd_type_cmd_tso_mss);
3609
3610 if (tsyn)
3611 tx_flags |= I40E_TX_FLAGS_TSYN;
3612
3613 skb_tx_timestamp(skb);
3614
3615
3616 td_cmd |= I40E_TX_DESC_CMD_ICRC;
3617
3618 i40e_create_tx_ctx(tx_ring, cd_type_cmd_tso_mss,
3619 cd_tunneling, cd_l2tag2);
3620
3621
3622
3623
3624
3625 i40e_atr(tx_ring, skb, tx_flags);
3626
3627 if (i40e_tx_map(tx_ring, skb, first, tx_flags, hdr_len,
3628 td_cmd, td_offset))
3629 goto cleanup_tx_tstamp;
3630
3631 return NETDEV_TX_OK;
3632
3633out_drop:
3634 i40e_trace(xmit_frame_ring_drop, first->skb, tx_ring);
3635 dev_kfree_skb_any(first->skb);
3636 first->skb = NULL;
3637cleanup_tx_tstamp:
3638 if (unlikely(tx_flags & I40E_TX_FLAGS_TSYN)) {
3639 struct i40e_pf *pf = i40e_netdev_to_pf(tx_ring->netdev);
3640
3641 dev_kfree_skb_any(pf->ptp_tx_skb);
3642 pf->ptp_tx_skb = NULL;
3643 clear_bit_unlock(__I40E_PTP_TX_IN_PROGRESS, pf->state);
3644 }
3645
3646 return NETDEV_TX_OK;
3647}
3648
3649
3650
3651
3652
3653
3654
3655
3656netdev_tx_t i40e_lan_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
3657{
3658 struct i40e_netdev_priv *np = netdev_priv(netdev);
3659 struct i40e_vsi *vsi = np->vsi;
3660 struct i40e_ring *tx_ring = vsi->tx_rings[skb->queue_mapping];
3661
3662
3663
3664
3665 if (skb_put_padto(skb, I40E_MIN_TX_LEN))
3666 return NETDEV_TX_OK;
3667
3668 return i40e_xmit_frame_ring(skb, tx_ring);
3669}
3670
3671
3672
3673
3674
3675
3676
3677
3678int i40e_xdp_xmit(struct net_device *dev, struct xdp_buff *xdp)
3679{
3680 struct i40e_netdev_priv *np = netdev_priv(dev);
3681 unsigned int queue_index = smp_processor_id();
3682 struct i40e_vsi *vsi = np->vsi;
3683 int err;
3684
3685 if (test_bit(__I40E_VSI_DOWN, vsi->state))
3686 return -ENETDOWN;
3687
3688 if (!i40e_enabled_xdp_vsi(vsi) || queue_index >= vsi->num_queue_pairs)
3689 return -ENXIO;
3690
3691 err = i40e_xmit_xdp_ring(xdp, vsi->xdp_rings[queue_index]);
3692 if (err != I40E_XDP_TX)
3693 return -ENOSPC;
3694
3695 return 0;
3696}
3697
3698
3699
3700
3701
3702void i40e_xdp_flush(struct net_device *dev)
3703{
3704 struct i40e_netdev_priv *np = netdev_priv(dev);
3705 unsigned int queue_index = smp_processor_id();
3706 struct i40e_vsi *vsi = np->vsi;
3707
3708 if (test_bit(__I40E_VSI_DOWN, vsi->state))
3709 return;
3710
3711 if (!i40e_enabled_xdp_vsi(vsi) || queue_index >= vsi->num_queue_pairs)
3712 return;
3713
3714 i40e_xdp_ring_update_tail(vsi->xdp_rings[queue_index]);
3715}
3716