1
2
3
4#include <linux/prefetch.h>
5#include <linux/bpf_trace.h>
6#include <net/xdp.h>
7#include "i40e.h"
8#include "i40e_trace.h"
9#include "i40e_prototype.h"
10#include "i40e_txrx_common.h"
11#include "i40e_xsk.h"
12
13#define I40E_TXD_CMD (I40E_TX_DESC_CMD_EOP | I40E_TX_DESC_CMD_RS)
14
15
16
17
18
19
20
21static void i40e_fdir(struct i40e_ring *tx_ring,
22 struct i40e_fdir_filter *fdata, bool add)
23{
24 struct i40e_filter_program_desc *fdir_desc;
25 struct i40e_pf *pf = tx_ring->vsi->back;
26 u32 flex_ptype, dtype_cmd;
27 u16 i;
28
29
30 i = tx_ring->next_to_use;
31 fdir_desc = I40E_TX_FDIRDESC(tx_ring, i);
32
33 i++;
34 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
35
36 flex_ptype = I40E_TXD_FLTR_QW0_QINDEX_MASK &
37 (fdata->q_index << I40E_TXD_FLTR_QW0_QINDEX_SHIFT);
38
39 flex_ptype |= I40E_TXD_FLTR_QW0_FLEXOFF_MASK &
40 (fdata->flex_off << I40E_TXD_FLTR_QW0_FLEXOFF_SHIFT);
41
42 flex_ptype |= I40E_TXD_FLTR_QW0_PCTYPE_MASK &
43 (fdata->pctype << I40E_TXD_FLTR_QW0_PCTYPE_SHIFT);
44
45 flex_ptype |= I40E_TXD_FLTR_QW0_PCTYPE_MASK &
46 (fdata->flex_offset << I40E_TXD_FLTR_QW0_FLEXOFF_SHIFT);
47
48
49 flex_ptype |= I40E_TXD_FLTR_QW0_DEST_VSI_MASK &
50 ((u32)(fdata->dest_vsi ? : pf->vsi[pf->lan_vsi]->id) <<
51 I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT);
52
53 dtype_cmd = I40E_TX_DESC_DTYPE_FILTER_PROG;
54
55 dtype_cmd |= add ?
56 I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE <<
57 I40E_TXD_FLTR_QW1_PCMD_SHIFT :
58 I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE <<
59 I40E_TXD_FLTR_QW1_PCMD_SHIFT;
60
61 dtype_cmd |= I40E_TXD_FLTR_QW1_DEST_MASK &
62 (fdata->dest_ctl << I40E_TXD_FLTR_QW1_DEST_SHIFT);
63
64 dtype_cmd |= I40E_TXD_FLTR_QW1_FD_STATUS_MASK &
65 (fdata->fd_status << I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT);
66
67 if (fdata->cnt_index) {
68 dtype_cmd |= I40E_TXD_FLTR_QW1_CNT_ENA_MASK;
69 dtype_cmd |= I40E_TXD_FLTR_QW1_CNTINDEX_MASK &
70 ((u32)fdata->cnt_index <<
71 I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT);
72 }
73
74 fdir_desc->qindex_flex_ptype_vsi = cpu_to_le32(flex_ptype);
75 fdir_desc->rsvd = cpu_to_le32(0);
76 fdir_desc->dtype_cmd_cntindex = cpu_to_le32(dtype_cmd);
77 fdir_desc->fd_id = cpu_to_le32(fdata->fd_id);
78}
79
80#define I40E_FD_CLEAN_DELAY 10
81
82
83
84
85
86
87
88static int i40e_program_fdir_filter(struct i40e_fdir_filter *fdir_data,
89 u8 *raw_packet, struct i40e_pf *pf,
90 bool add)
91{
92 struct i40e_tx_buffer *tx_buf, *first;
93 struct i40e_tx_desc *tx_desc;
94 struct i40e_ring *tx_ring;
95 struct i40e_vsi *vsi;
96 struct device *dev;
97 dma_addr_t dma;
98 u32 td_cmd = 0;
99 u16 i;
100
101
102 vsi = i40e_find_vsi_by_type(pf, I40E_VSI_FDIR);
103 if (!vsi)
104 return -ENOENT;
105
106 tx_ring = vsi->tx_rings[0];
107 dev = tx_ring->dev;
108
109
110 for (i = I40E_FD_CLEAN_DELAY; I40E_DESC_UNUSED(tx_ring) < 2; i--) {
111 if (!i)
112 return -EAGAIN;
113 msleep_interruptible(1);
114 }
115
116 dma = dma_map_single(dev, raw_packet,
117 I40E_FDIR_MAX_RAW_PACKET_SIZE, DMA_TO_DEVICE);
118 if (dma_mapping_error(dev, dma))
119 goto dma_fail;
120
121
122 i = tx_ring->next_to_use;
123 first = &tx_ring->tx_bi[i];
124 i40e_fdir(tx_ring, fdir_data, add);
125
126
127 i = tx_ring->next_to_use;
128 tx_desc = I40E_TX_DESC(tx_ring, i);
129 tx_buf = &tx_ring->tx_bi[i];
130
131 tx_ring->next_to_use = ((i + 1) < tx_ring->count) ? i + 1 : 0;
132
133 memset(tx_buf, 0, sizeof(struct i40e_tx_buffer));
134
135
136 dma_unmap_len_set(tx_buf, len, I40E_FDIR_MAX_RAW_PACKET_SIZE);
137 dma_unmap_addr_set(tx_buf, dma, dma);
138
139 tx_desc->buffer_addr = cpu_to_le64(dma);
140 td_cmd = I40E_TXD_CMD | I40E_TX_DESC_CMD_DUMMY;
141
142 tx_buf->tx_flags = I40E_TX_FLAGS_FD_SB;
143 tx_buf->raw_buf = (void *)raw_packet;
144
145 tx_desc->cmd_type_offset_bsz =
146 build_ctob(td_cmd, 0, I40E_FDIR_MAX_RAW_PACKET_SIZE, 0);
147
148
149
150
151 wmb();
152
153
154 first->next_to_watch = tx_desc;
155
156 writel(tx_ring->next_to_use, tx_ring->tail);
157 return 0;
158
159dma_fail:
160 return -1;
161}
162
163#define IP_HEADER_OFFSET 14
164#define I40E_UDPIP_DUMMY_PACKET_LEN 42
165
166
167
168
169
170
171
172
173static int i40e_add_del_fdir_udpv4(struct i40e_vsi *vsi,
174 struct i40e_fdir_filter *fd_data,
175 bool add)
176{
177 struct i40e_pf *pf = vsi->back;
178 struct udphdr *udp;
179 struct iphdr *ip;
180 u8 *raw_packet;
181 int ret;
182 static char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
183 0x45, 0, 0, 0x1c, 0, 0, 0x40, 0, 0x40, 0x11, 0, 0, 0, 0, 0, 0,
184 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
185
186 raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE, GFP_KERNEL);
187 if (!raw_packet)
188 return -ENOMEM;
189 memcpy(raw_packet, packet, I40E_UDPIP_DUMMY_PACKET_LEN);
190
191 ip = (struct iphdr *)(raw_packet + IP_HEADER_OFFSET);
192 udp = (struct udphdr *)(raw_packet + IP_HEADER_OFFSET
193 + sizeof(struct iphdr));
194
195 ip->daddr = fd_data->dst_ip;
196 udp->dest = fd_data->dst_port;
197 ip->saddr = fd_data->src_ip;
198 udp->source = fd_data->src_port;
199
200 if (fd_data->flex_filter) {
201 u8 *payload = raw_packet + I40E_UDPIP_DUMMY_PACKET_LEN;
202 __be16 pattern = fd_data->flex_word;
203 u16 off = fd_data->flex_offset;
204
205 *((__force __be16 *)(payload + off)) = pattern;
206 }
207
208 fd_data->pctype = I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
209 ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add);
210 if (ret) {
211 dev_info(&pf->pdev->dev,
212 "PCTYPE:%d, Filter command send failed for fd_id:%d (ret = %d)\n",
213 fd_data->pctype, fd_data->fd_id, ret);
214
215 kfree(raw_packet);
216 return -EOPNOTSUPP;
217 } else if (I40E_DEBUG_FD & pf->hw.debug_mask) {
218 if (add)
219 dev_info(&pf->pdev->dev,
220 "Filter OK for PCTYPE %d loc = %d\n",
221 fd_data->pctype, fd_data->fd_id);
222 else
223 dev_info(&pf->pdev->dev,
224 "Filter deleted for PCTYPE %d loc = %d\n",
225 fd_data->pctype, fd_data->fd_id);
226 }
227
228 if (add)
229 pf->fd_udp4_filter_cnt++;
230 else
231 pf->fd_udp4_filter_cnt--;
232
233 return 0;
234}
235
236#define I40E_TCPIP_DUMMY_PACKET_LEN 54
237
238
239
240
241
242
243
244
245static int i40e_add_del_fdir_tcpv4(struct i40e_vsi *vsi,
246 struct i40e_fdir_filter *fd_data,
247 bool add)
248{
249 struct i40e_pf *pf = vsi->back;
250 struct tcphdr *tcp;
251 struct iphdr *ip;
252 u8 *raw_packet;
253 int ret;
254
255 static char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
256 0x45, 0, 0, 0x28, 0, 0, 0x40, 0, 0x40, 0x6, 0, 0, 0, 0, 0, 0,
257 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x80, 0x11,
258 0x0, 0x72, 0, 0, 0, 0};
259
260 raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE, GFP_KERNEL);
261 if (!raw_packet)
262 return -ENOMEM;
263 memcpy(raw_packet, packet, I40E_TCPIP_DUMMY_PACKET_LEN);
264
265 ip = (struct iphdr *)(raw_packet + IP_HEADER_OFFSET);
266 tcp = (struct tcphdr *)(raw_packet + IP_HEADER_OFFSET
267 + sizeof(struct iphdr));
268
269 ip->daddr = fd_data->dst_ip;
270 tcp->dest = fd_data->dst_port;
271 ip->saddr = fd_data->src_ip;
272 tcp->source = fd_data->src_port;
273
274 if (fd_data->flex_filter) {
275 u8 *payload = raw_packet + I40E_TCPIP_DUMMY_PACKET_LEN;
276 __be16 pattern = fd_data->flex_word;
277 u16 off = fd_data->flex_offset;
278
279 *((__force __be16 *)(payload + off)) = pattern;
280 }
281
282 fd_data->pctype = I40E_FILTER_PCTYPE_NONF_IPV4_TCP;
283 ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add);
284 if (ret) {
285 dev_info(&pf->pdev->dev,
286 "PCTYPE:%d, Filter command send failed for fd_id:%d (ret = %d)\n",
287 fd_data->pctype, fd_data->fd_id, ret);
288
289 kfree(raw_packet);
290 return -EOPNOTSUPP;
291 } else if (I40E_DEBUG_FD & pf->hw.debug_mask) {
292 if (add)
293 dev_info(&pf->pdev->dev, "Filter OK for PCTYPE %d loc = %d)\n",
294 fd_data->pctype, fd_data->fd_id);
295 else
296 dev_info(&pf->pdev->dev,
297 "Filter deleted for PCTYPE %d loc = %d\n",
298 fd_data->pctype, fd_data->fd_id);
299 }
300
301 if (add) {
302 pf->fd_tcp4_filter_cnt++;
303 if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
304 I40E_DEBUG_FD & pf->hw.debug_mask)
305 dev_info(&pf->pdev->dev, "Forcing ATR off, sideband rules for TCP/IPv4 flow being applied\n");
306 set_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state);
307 } else {
308 pf->fd_tcp4_filter_cnt--;
309 }
310
311 return 0;
312}
313
314#define I40E_SCTPIP_DUMMY_PACKET_LEN 46
315
316
317
318
319
320
321
322
323
324static int i40e_add_del_fdir_sctpv4(struct i40e_vsi *vsi,
325 struct i40e_fdir_filter *fd_data,
326 bool add)
327{
328 struct i40e_pf *pf = vsi->back;
329 struct sctphdr *sctp;
330 struct iphdr *ip;
331 u8 *raw_packet;
332 int ret;
333
334 static char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
335 0x45, 0, 0, 0x20, 0, 0, 0x40, 0, 0x40, 0x84, 0, 0, 0, 0, 0, 0,
336 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
337
338 raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE, GFP_KERNEL);
339 if (!raw_packet)
340 return -ENOMEM;
341 memcpy(raw_packet, packet, I40E_SCTPIP_DUMMY_PACKET_LEN);
342
343 ip = (struct iphdr *)(raw_packet + IP_HEADER_OFFSET);
344 sctp = (struct sctphdr *)(raw_packet + IP_HEADER_OFFSET
345 + sizeof(struct iphdr));
346
347 ip->daddr = fd_data->dst_ip;
348 sctp->dest = fd_data->dst_port;
349 ip->saddr = fd_data->src_ip;
350 sctp->source = fd_data->src_port;
351
352 if (fd_data->flex_filter) {
353 u8 *payload = raw_packet + I40E_SCTPIP_DUMMY_PACKET_LEN;
354 __be16 pattern = fd_data->flex_word;
355 u16 off = fd_data->flex_offset;
356
357 *((__force __be16 *)(payload + off)) = pattern;
358 }
359
360 fd_data->pctype = I40E_FILTER_PCTYPE_NONF_IPV4_SCTP;
361 ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add);
362 if (ret) {
363 dev_info(&pf->pdev->dev,
364 "PCTYPE:%d, Filter command send failed for fd_id:%d (ret = %d)\n",
365 fd_data->pctype, fd_data->fd_id, ret);
366
367 kfree(raw_packet);
368 return -EOPNOTSUPP;
369 } else if (I40E_DEBUG_FD & pf->hw.debug_mask) {
370 if (add)
371 dev_info(&pf->pdev->dev,
372 "Filter OK for PCTYPE %d loc = %d\n",
373 fd_data->pctype, fd_data->fd_id);
374 else
375 dev_info(&pf->pdev->dev,
376 "Filter deleted for PCTYPE %d loc = %d\n",
377 fd_data->pctype, fd_data->fd_id);
378 }
379
380 if (add)
381 pf->fd_sctp4_filter_cnt++;
382 else
383 pf->fd_sctp4_filter_cnt--;
384
385 return 0;
386}
387
388#define I40E_IP_DUMMY_PACKET_LEN 34
389
390
391
392
393
394
395
396
397
398static int i40e_add_del_fdir_ipv4(struct i40e_vsi *vsi,
399 struct i40e_fdir_filter *fd_data,
400 bool add)
401{
402 struct i40e_pf *pf = vsi->back;
403 struct iphdr *ip;
404 u8 *raw_packet;
405 int ret;
406 int i;
407 static char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
408 0x45, 0, 0, 0x14, 0, 0, 0x40, 0, 0x40, 0x10, 0, 0, 0, 0, 0, 0,
409 0, 0, 0, 0};
410
411 for (i = I40E_FILTER_PCTYPE_NONF_IPV4_OTHER;
412 i <= I40E_FILTER_PCTYPE_FRAG_IPV4; i++) {
413 raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE, GFP_KERNEL);
414 if (!raw_packet)
415 return -ENOMEM;
416 memcpy(raw_packet, packet, I40E_IP_DUMMY_PACKET_LEN);
417 ip = (struct iphdr *)(raw_packet + IP_HEADER_OFFSET);
418
419 ip->saddr = fd_data->src_ip;
420 ip->daddr = fd_data->dst_ip;
421 ip->protocol = 0;
422
423 if (fd_data->flex_filter) {
424 u8 *payload = raw_packet + I40E_IP_DUMMY_PACKET_LEN;
425 __be16 pattern = fd_data->flex_word;
426 u16 off = fd_data->flex_offset;
427
428 *((__force __be16 *)(payload + off)) = pattern;
429 }
430
431 fd_data->pctype = i;
432 ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add);
433 if (ret) {
434 dev_info(&pf->pdev->dev,
435 "PCTYPE:%d, Filter command send failed for fd_id:%d (ret = %d)\n",
436 fd_data->pctype, fd_data->fd_id, ret);
437
438
439
440 kfree(raw_packet);
441 return -EOPNOTSUPP;
442 } else if (I40E_DEBUG_FD & pf->hw.debug_mask) {
443 if (add)
444 dev_info(&pf->pdev->dev,
445 "Filter OK for PCTYPE %d loc = %d\n",
446 fd_data->pctype, fd_data->fd_id);
447 else
448 dev_info(&pf->pdev->dev,
449 "Filter deleted for PCTYPE %d loc = %d\n",
450 fd_data->pctype, fd_data->fd_id);
451 }
452 }
453
454 if (add)
455 pf->fd_ip4_filter_cnt++;
456 else
457 pf->fd_ip4_filter_cnt--;
458
459 return 0;
460}
461
462
463
464
465
466
467
468
469int i40e_add_del_fdir(struct i40e_vsi *vsi,
470 struct i40e_fdir_filter *input, bool add)
471{
472 struct i40e_pf *pf = vsi->back;
473 int ret;
474
475 switch (input->flow_type & ~FLOW_EXT) {
476 case TCP_V4_FLOW:
477 ret = i40e_add_del_fdir_tcpv4(vsi, input, add);
478 break;
479 case UDP_V4_FLOW:
480 ret = i40e_add_del_fdir_udpv4(vsi, input, add);
481 break;
482 case SCTP_V4_FLOW:
483 ret = i40e_add_del_fdir_sctpv4(vsi, input, add);
484 break;
485 case IP_USER_FLOW:
486 switch (input->ip4_proto) {
487 case IPPROTO_TCP:
488 ret = i40e_add_del_fdir_tcpv4(vsi, input, add);
489 break;
490 case IPPROTO_UDP:
491 ret = i40e_add_del_fdir_udpv4(vsi, input, add);
492 break;
493 case IPPROTO_SCTP:
494 ret = i40e_add_del_fdir_sctpv4(vsi, input, add);
495 break;
496 case IPPROTO_IP:
497 ret = i40e_add_del_fdir_ipv4(vsi, input, add);
498 break;
499 default:
500
501 dev_info(&pf->pdev->dev, "Unsupported IPv4 protocol 0x%02x\n",
502 input->ip4_proto);
503 return -EINVAL;
504 }
505 break;
506 default:
507 dev_info(&pf->pdev->dev, "Unsupported flow type 0x%02x\n",
508 input->flow_type);
509 return -EINVAL;
510 }
511
512
513
514
515
516
517
518 return ret;
519}
520
521
522
523
524
525
526
527
528
529
530
531static void i40e_fd_handle_status(struct i40e_ring *rx_ring, u64 qword0_raw,
532 u64 qword1, u8 prog_id)
533{
534 struct i40e_pf *pf = rx_ring->vsi->back;
535 struct pci_dev *pdev = pf->pdev;
536 struct i40e_32b_rx_wb_qw0 *qw0;
537 u32 fcnt_prog, fcnt_avail;
538 u32 error;
539
540 qw0 = (struct i40e_32b_rx_wb_qw0 *)&qword0_raw;
541 error = (qword1 & I40E_RX_PROG_STATUS_DESC_QW1_ERROR_MASK) >>
542 I40E_RX_PROG_STATUS_DESC_QW1_ERROR_SHIFT;
543
544 if (error == BIT(I40E_RX_PROG_STATUS_DESC_FD_TBL_FULL_SHIFT)) {
545 pf->fd_inv = le32_to_cpu(qw0->hi_dword.fd_id);
546 if (qw0->hi_dword.fd_id != 0 ||
547 (I40E_DEBUG_FD & pf->hw.debug_mask))
548 dev_warn(&pdev->dev, "ntuple filter loc = %d, could not be added\n",
549 pf->fd_inv);
550
551
552
553
554
555
556
557 if (test_bit(__I40E_FD_FLUSH_REQUESTED, pf->state))
558 return;
559
560 pf->fd_add_err++;
561
562 pf->fd_atr_cnt = i40e_get_current_atr_cnt(pf);
563
564 if (qw0->hi_dword.fd_id == 0 &&
565 test_bit(__I40E_FD_SB_AUTO_DISABLED, pf->state)) {
566
567
568
569
570
571
572 set_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state);
573 set_bit(__I40E_FD_FLUSH_REQUESTED, pf->state);
574 }
575
576
577 fcnt_prog = i40e_get_global_fd_count(pf);
578 fcnt_avail = pf->fdir_pf_filter_count;
579
580
581
582
583 if (fcnt_prog >= (fcnt_avail - I40E_FDIR_BUFFER_FULL_MARGIN)) {
584 if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) &&
585 !test_and_set_bit(__I40E_FD_SB_AUTO_DISABLED,
586 pf->state))
587 if (I40E_DEBUG_FD & pf->hw.debug_mask)
588 dev_warn(&pdev->dev, "FD filter space full, new ntuple rules will not be added\n");
589 }
590 } else if (error == BIT(I40E_RX_PROG_STATUS_DESC_NO_FD_ENTRY_SHIFT)) {
591 if (I40E_DEBUG_FD & pf->hw.debug_mask)
592 dev_info(&pdev->dev, "ntuple filter fd_id = %d, could not be removed\n",
593 qw0->hi_dword.fd_id);
594 }
595}
596
597
598
599
600
601
602static void i40e_unmap_and_free_tx_resource(struct i40e_ring *ring,
603 struct i40e_tx_buffer *tx_buffer)
604{
605 if (tx_buffer->skb) {
606 if (tx_buffer->tx_flags & I40E_TX_FLAGS_FD_SB)
607 kfree(tx_buffer->raw_buf);
608 else if (ring_is_xdp(ring))
609 xdp_return_frame(tx_buffer->xdpf);
610 else
611 dev_kfree_skb_any(tx_buffer->skb);
612 if (dma_unmap_len(tx_buffer, len))
613 dma_unmap_single(ring->dev,
614 dma_unmap_addr(tx_buffer, dma),
615 dma_unmap_len(tx_buffer, len),
616 DMA_TO_DEVICE);
617 } else if (dma_unmap_len(tx_buffer, len)) {
618 dma_unmap_page(ring->dev,
619 dma_unmap_addr(tx_buffer, dma),
620 dma_unmap_len(tx_buffer, len),
621 DMA_TO_DEVICE);
622 }
623
624 tx_buffer->next_to_watch = NULL;
625 tx_buffer->skb = NULL;
626 dma_unmap_len_set(tx_buffer, len, 0);
627
628}
629
630
631
632
633
634void i40e_clean_tx_ring(struct i40e_ring *tx_ring)
635{
636 unsigned long bi_size;
637 u16 i;
638
639 if (ring_is_xdp(tx_ring) && tx_ring->xsk_umem) {
640 i40e_xsk_clean_tx_ring(tx_ring);
641 } else {
642
643 if (!tx_ring->tx_bi)
644 return;
645
646
647 for (i = 0; i < tx_ring->count; i++)
648 i40e_unmap_and_free_tx_resource(tx_ring,
649 &tx_ring->tx_bi[i]);
650 }
651
652 bi_size = sizeof(struct i40e_tx_buffer) * tx_ring->count;
653 memset(tx_ring->tx_bi, 0, bi_size);
654
655
656 memset(tx_ring->desc, 0, tx_ring->size);
657
658 tx_ring->next_to_use = 0;
659 tx_ring->next_to_clean = 0;
660
661 if (!tx_ring->netdev)
662 return;
663
664
665 netdev_tx_reset_queue(txring_txq(tx_ring));
666}
667
668
669
670
671
672
673
674void i40e_free_tx_resources(struct i40e_ring *tx_ring)
675{
676 i40e_clean_tx_ring(tx_ring);
677 kfree(tx_ring->tx_bi);
678 tx_ring->tx_bi = NULL;
679
680 if (tx_ring->desc) {
681 dma_free_coherent(tx_ring->dev, tx_ring->size,
682 tx_ring->desc, tx_ring->dma);
683 tx_ring->desc = NULL;
684 }
685}
686
687
688
689
690
691
692
693
694
695u32 i40e_get_tx_pending(struct i40e_ring *ring, bool in_sw)
696{
697 u32 head, tail;
698
699 if (!in_sw) {
700 head = i40e_get_head(ring);
701 tail = readl(ring->tail);
702 } else {
703 head = ring->next_to_clean;
704 tail = ring->next_to_use;
705 }
706
707 if (head != tail)
708 return (head < tail) ?
709 tail - head : (tail + ring->count - head);
710
711 return 0;
712}
713
714
715
716
717
718
719
720
721void i40e_detect_recover_hung(struct i40e_vsi *vsi)
722{
723 struct i40e_ring *tx_ring = NULL;
724 struct net_device *netdev;
725 unsigned int i;
726 int packets;
727
728 if (!vsi)
729 return;
730
731 if (test_bit(__I40E_VSI_DOWN, vsi->state))
732 return;
733
734 netdev = vsi->netdev;
735 if (!netdev)
736 return;
737
738 if (!netif_carrier_ok(netdev))
739 return;
740
741 for (i = 0; i < vsi->num_queue_pairs; i++) {
742 tx_ring = vsi->tx_rings[i];
743 if (tx_ring && tx_ring->desc) {
744
745
746
747
748
749
750
751 packets = tx_ring->stats.packets & INT_MAX;
752 if (tx_ring->tx_stats.prev_pkt_ctr == packets) {
753 i40e_force_wb(vsi, tx_ring->q_vector);
754 continue;
755 }
756
757
758
759
760 smp_rmb();
761 tx_ring->tx_stats.prev_pkt_ctr =
762 i40e_get_tx_pending(tx_ring, true) ? packets : -1;
763 }
764 }
765}
766
767
768
769
770
771
772
773
774
775static bool i40e_clean_tx_irq(struct i40e_vsi *vsi,
776 struct i40e_ring *tx_ring, int napi_budget)
777{
778 int i = tx_ring->next_to_clean;
779 struct i40e_tx_buffer *tx_buf;
780 struct i40e_tx_desc *tx_head;
781 struct i40e_tx_desc *tx_desc;
782 unsigned int total_bytes = 0, total_packets = 0;
783 unsigned int budget = vsi->work_limit;
784
785 tx_buf = &tx_ring->tx_bi[i];
786 tx_desc = I40E_TX_DESC(tx_ring, i);
787 i -= tx_ring->count;
788
789 tx_head = I40E_TX_DESC(tx_ring, i40e_get_head(tx_ring));
790
791 do {
792 struct i40e_tx_desc *eop_desc = tx_buf->next_to_watch;
793
794
795 if (!eop_desc)
796 break;
797
798
799 smp_rmb();
800
801 i40e_trace(clean_tx_irq, tx_ring, tx_desc, tx_buf);
802
803 if (tx_head == tx_desc)
804 break;
805
806
807 tx_buf->next_to_watch = NULL;
808
809
810 total_bytes += tx_buf->bytecount;
811 total_packets += tx_buf->gso_segs;
812
813
814 if (ring_is_xdp(tx_ring))
815 xdp_return_frame(tx_buf->xdpf);
816 else
817 napi_consume_skb(tx_buf->skb, napi_budget);
818
819
820 dma_unmap_single(tx_ring->dev,
821 dma_unmap_addr(tx_buf, dma),
822 dma_unmap_len(tx_buf, len),
823 DMA_TO_DEVICE);
824
825
826 tx_buf->skb = NULL;
827 dma_unmap_len_set(tx_buf, len, 0);
828
829
830 while (tx_desc != eop_desc) {
831 i40e_trace(clean_tx_irq_unmap,
832 tx_ring, tx_desc, tx_buf);
833
834 tx_buf++;
835 tx_desc++;
836 i++;
837 if (unlikely(!i)) {
838 i -= tx_ring->count;
839 tx_buf = tx_ring->tx_bi;
840 tx_desc = I40E_TX_DESC(tx_ring, 0);
841 }
842
843
844 if (dma_unmap_len(tx_buf, len)) {
845 dma_unmap_page(tx_ring->dev,
846 dma_unmap_addr(tx_buf, dma),
847 dma_unmap_len(tx_buf, len),
848 DMA_TO_DEVICE);
849 dma_unmap_len_set(tx_buf, len, 0);
850 }
851 }
852
853
854 tx_buf++;
855 tx_desc++;
856 i++;
857 if (unlikely(!i)) {
858 i -= tx_ring->count;
859 tx_buf = tx_ring->tx_bi;
860 tx_desc = I40E_TX_DESC(tx_ring, 0);
861 }
862
863 prefetch(tx_desc);
864
865
866 budget--;
867 } while (likely(budget));
868
869 i += tx_ring->count;
870 tx_ring->next_to_clean = i;
871 i40e_update_tx_stats(tx_ring, total_packets, total_bytes);
872 i40e_arm_wb(tx_ring, vsi, budget);
873
874 if (ring_is_xdp(tx_ring))
875 return !!budget;
876
877
878 netdev_tx_completed_queue(txring_txq(tx_ring),
879 total_packets, total_bytes);
880
881#define TX_WAKE_THRESHOLD ((s16)(DESC_NEEDED * 2))
882 if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) &&
883 (I40E_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) {
884
885
886
887 smp_mb();
888 if (__netif_subqueue_stopped(tx_ring->netdev,
889 tx_ring->queue_index) &&
890 !test_bit(__I40E_VSI_DOWN, vsi->state)) {
891 netif_wake_subqueue(tx_ring->netdev,
892 tx_ring->queue_index);
893 ++tx_ring->tx_stats.restart_queue;
894 }
895 }
896
897 return !!budget;
898}
899
900
901
902
903
904
905
906static void i40e_enable_wb_on_itr(struct i40e_vsi *vsi,
907 struct i40e_q_vector *q_vector)
908{
909 u16 flags = q_vector->tx.ring[0].flags;
910 u32 val;
911
912 if (!(flags & I40E_TXR_FLAGS_WB_ON_ITR))
913 return;
914
915 if (q_vector->arm_wb_state)
916 return;
917
918 if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED) {
919 val = I40E_PFINT_DYN_CTLN_WB_ON_ITR_MASK |
920 I40E_PFINT_DYN_CTLN_ITR_INDX_MASK;
921
922 wr32(&vsi->back->hw,
923 I40E_PFINT_DYN_CTLN(q_vector->reg_idx),
924 val);
925 } else {
926 val = I40E_PFINT_DYN_CTL0_WB_ON_ITR_MASK |
927 I40E_PFINT_DYN_CTL0_ITR_INDX_MASK;
928
929 wr32(&vsi->back->hw, I40E_PFINT_DYN_CTL0, val);
930 }
931 q_vector->arm_wb_state = true;
932}
933
934
935
936
937
938
939
940void i40e_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector)
941{
942 if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED) {
943 u32 val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
944 I40E_PFINT_DYN_CTLN_ITR_INDX_MASK |
945 I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK |
946 I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_MASK;
947
948
949 wr32(&vsi->back->hw,
950 I40E_PFINT_DYN_CTLN(q_vector->reg_idx), val);
951 } else {
952 u32 val = I40E_PFINT_DYN_CTL0_INTENA_MASK |
953 I40E_PFINT_DYN_CTL0_ITR_INDX_MASK |
954 I40E_PFINT_DYN_CTL0_SWINT_TRIG_MASK |
955 I40E_PFINT_DYN_CTL0_SW_ITR_INDX_ENA_MASK;
956
957
958 wr32(&vsi->back->hw, I40E_PFINT_DYN_CTL0, val);
959 }
960}
961
962static inline bool i40e_container_is_rx(struct i40e_q_vector *q_vector,
963 struct i40e_ring_container *rc)
964{
965 return &q_vector->rx == rc;
966}
967
968static inline unsigned int i40e_itr_divisor(struct i40e_q_vector *q_vector)
969{
970 unsigned int divisor;
971
972 switch (q_vector->vsi->back->hw.phy.link_info.link_speed) {
973 case I40E_LINK_SPEED_40GB:
974 divisor = I40E_ITR_ADAPTIVE_MIN_INC * 1024;
975 break;
976 case I40E_LINK_SPEED_25GB:
977 case I40E_LINK_SPEED_20GB:
978 divisor = I40E_ITR_ADAPTIVE_MIN_INC * 512;
979 break;
980 default:
981 case I40E_LINK_SPEED_10GB:
982 divisor = I40E_ITR_ADAPTIVE_MIN_INC * 256;
983 break;
984 case I40E_LINK_SPEED_1GB:
985 case I40E_LINK_SPEED_100MB:
986 divisor = I40E_ITR_ADAPTIVE_MIN_INC * 32;
987 break;
988 }
989
990 return divisor;
991}
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006static void i40e_update_itr(struct i40e_q_vector *q_vector,
1007 struct i40e_ring_container *rc)
1008{
1009 unsigned int avg_wire_size, packets, bytes, itr;
1010 unsigned long next_update = jiffies;
1011
1012
1013
1014
1015 if (!rc->ring || !ITR_IS_DYNAMIC(rc->ring->itr_setting))
1016 return;
1017
1018
1019
1020
1021 itr = i40e_container_is_rx(q_vector, rc) ?
1022 I40E_ITR_ADAPTIVE_MIN_USECS | I40E_ITR_ADAPTIVE_LATENCY :
1023 I40E_ITR_ADAPTIVE_MAX_USECS | I40E_ITR_ADAPTIVE_LATENCY;
1024
1025
1026
1027
1028
1029
1030 if (time_after(next_update, rc->next_update))
1031 goto clear_counts;
1032
1033
1034
1035
1036
1037
1038
1039 if (q_vector->itr_countdown) {
1040 itr = rc->target_itr;
1041 goto clear_counts;
1042 }
1043
1044 packets = rc->total_packets;
1045 bytes = rc->total_bytes;
1046
1047 if (i40e_container_is_rx(q_vector, rc)) {
1048
1049
1050
1051
1052
1053 if (packets && packets < 4 && bytes < 9000 &&
1054 (q_vector->tx.target_itr & I40E_ITR_ADAPTIVE_LATENCY)) {
1055 itr = I40E_ITR_ADAPTIVE_LATENCY;
1056 goto adjust_by_size;
1057 }
1058 } else if (packets < 4) {
1059
1060
1061
1062
1063
1064 if (rc->target_itr == I40E_ITR_ADAPTIVE_MAX_USECS &&
1065 (q_vector->rx.target_itr & I40E_ITR_MASK) ==
1066 I40E_ITR_ADAPTIVE_MAX_USECS)
1067 goto clear_counts;
1068 } else if (packets > 32) {
1069
1070
1071
1072 rc->target_itr &= ~I40E_ITR_ADAPTIVE_LATENCY;
1073 }
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083 if (packets < 56) {
1084 itr = rc->target_itr + I40E_ITR_ADAPTIVE_MIN_INC;
1085 if ((itr & I40E_ITR_MASK) > I40E_ITR_ADAPTIVE_MAX_USECS) {
1086 itr &= I40E_ITR_ADAPTIVE_LATENCY;
1087 itr += I40E_ITR_ADAPTIVE_MAX_USECS;
1088 }
1089 goto clear_counts;
1090 }
1091
1092 if (packets <= 256) {
1093 itr = min(q_vector->tx.current_itr, q_vector->rx.current_itr);
1094 itr &= I40E_ITR_MASK;
1095
1096
1097
1098
1099
1100 if (packets <= 112)
1101 goto clear_counts;
1102
1103
1104
1105
1106
1107
1108 itr /= 2;
1109 itr &= I40E_ITR_MASK;
1110 if (itr < I40E_ITR_ADAPTIVE_MIN_USECS)
1111 itr = I40E_ITR_ADAPTIVE_MIN_USECS;
1112
1113 goto clear_counts;
1114 }
1115
1116
1117
1118
1119
1120
1121
1122 itr = I40E_ITR_ADAPTIVE_BULK;
1123
1124adjust_by_size:
1125
1126
1127
1128
1129
1130 avg_wire_size = bytes / packets;
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147 if (avg_wire_size <= 60) {
1148
1149 avg_wire_size = 4096;
1150 } else if (avg_wire_size <= 380) {
1151
1152 avg_wire_size *= 40;
1153 avg_wire_size += 1696;
1154 } else if (avg_wire_size <= 1084) {
1155
1156 avg_wire_size *= 15;
1157 avg_wire_size += 11452;
1158 } else if (avg_wire_size <= 1980) {
1159
1160 avg_wire_size *= 5;
1161 avg_wire_size += 22420;
1162 } else {
1163
1164 avg_wire_size = 32256;
1165 }
1166
1167
1168
1169
1170 if (itr & I40E_ITR_ADAPTIVE_LATENCY)
1171 avg_wire_size /= 2;
1172
1173
1174
1175
1176
1177
1178
1179
1180 itr += DIV_ROUND_UP(avg_wire_size, i40e_itr_divisor(q_vector)) *
1181 I40E_ITR_ADAPTIVE_MIN_INC;
1182
1183 if ((itr & I40E_ITR_MASK) > I40E_ITR_ADAPTIVE_MAX_USECS) {
1184 itr &= I40E_ITR_ADAPTIVE_LATENCY;
1185 itr += I40E_ITR_ADAPTIVE_MAX_USECS;
1186 }
1187
1188clear_counts:
1189
1190 rc->target_itr = itr;
1191
1192
1193 rc->next_update = next_update + 1;
1194
1195 rc->total_bytes = 0;
1196 rc->total_packets = 0;
1197}
1198
1199static struct i40e_rx_buffer *i40e_rx_bi(struct i40e_ring *rx_ring, u32 idx)
1200{
1201 return &rx_ring->rx_bi[idx];
1202}
1203
1204
1205
1206
1207
1208
1209
1210
1211static void i40e_reuse_rx_page(struct i40e_ring *rx_ring,
1212 struct i40e_rx_buffer *old_buff)
1213{
1214 struct i40e_rx_buffer *new_buff;
1215 u16 nta = rx_ring->next_to_alloc;
1216
1217 new_buff = i40e_rx_bi(rx_ring, nta);
1218
1219
1220 nta++;
1221 rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
1222
1223
1224 new_buff->dma = old_buff->dma;
1225 new_buff->page = old_buff->page;
1226 new_buff->page_offset = old_buff->page_offset;
1227 new_buff->pagecnt_bias = old_buff->pagecnt_bias;
1228
1229 rx_ring->rx_stats.page_reuse_count++;
1230
1231
1232 old_buff->page = NULL;
1233}
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247void i40e_clean_programming_status(struct i40e_ring *rx_ring, u64 qword0_raw,
1248 u64 qword1)
1249{
1250 u8 id;
1251
1252 id = (qword1 & I40E_RX_PROG_STATUS_DESC_QW1_PROGID_MASK) >>
1253 I40E_RX_PROG_STATUS_DESC_QW1_PROGID_SHIFT;
1254
1255 if (id == I40E_RX_PROG_STATUS_DESC_FD_FILTER_STATUS)
1256 i40e_fd_handle_status(rx_ring, qword0_raw, qword1, id);
1257}
1258
1259
1260
1261
1262
1263
1264
1265int i40e_setup_tx_descriptors(struct i40e_ring *tx_ring)
1266{
1267 struct device *dev = tx_ring->dev;
1268 int bi_size;
1269
1270 if (!dev)
1271 return -ENOMEM;
1272
1273
1274 WARN_ON(tx_ring->tx_bi);
1275 bi_size = sizeof(struct i40e_tx_buffer) * tx_ring->count;
1276 tx_ring->tx_bi = kzalloc(bi_size, GFP_KERNEL);
1277 if (!tx_ring->tx_bi)
1278 goto err;
1279
1280 u64_stats_init(&tx_ring->syncp);
1281
1282
1283 tx_ring->size = tx_ring->count * sizeof(struct i40e_tx_desc);
1284
1285
1286
1287 tx_ring->size += sizeof(u32);
1288 tx_ring->size = ALIGN(tx_ring->size, 4096);
1289 tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size,
1290 &tx_ring->dma, GFP_KERNEL);
1291 if (!tx_ring->desc) {
1292 dev_info(dev, "Unable to allocate memory for the Tx descriptor ring, size=%d\n",
1293 tx_ring->size);
1294 goto err;
1295 }
1296
1297 tx_ring->next_to_use = 0;
1298 tx_ring->next_to_clean = 0;
1299 tx_ring->tx_stats.prev_pkt_ctr = -1;
1300 return 0;
1301
1302err:
1303 kfree(tx_ring->tx_bi);
1304 tx_ring->tx_bi = NULL;
1305 return -ENOMEM;
1306}
1307
1308int i40e_alloc_rx_bi(struct i40e_ring *rx_ring)
1309{
1310 unsigned long sz = sizeof(*rx_ring->rx_bi) * rx_ring->count;
1311
1312 rx_ring->rx_bi = kzalloc(sz, GFP_KERNEL);
1313 return rx_ring->rx_bi ? 0 : -ENOMEM;
1314}
1315
1316static void i40e_clear_rx_bi(struct i40e_ring *rx_ring)
1317{
1318 memset(rx_ring->rx_bi, 0, sizeof(*rx_ring->rx_bi) * rx_ring->count);
1319}
1320
1321
1322
1323
1324
1325void i40e_clean_rx_ring(struct i40e_ring *rx_ring)
1326{
1327 u16 i;
1328
1329
1330 if (!rx_ring->rx_bi)
1331 return;
1332
1333 if (rx_ring->skb) {
1334 dev_kfree_skb(rx_ring->skb);
1335 rx_ring->skb = NULL;
1336 }
1337
1338 if (rx_ring->xsk_umem) {
1339 i40e_xsk_clean_rx_ring(rx_ring);
1340 goto skip_free;
1341 }
1342
1343
1344 for (i = 0; i < rx_ring->count; i++) {
1345 struct i40e_rx_buffer *rx_bi = i40e_rx_bi(rx_ring, i);
1346
1347 if (!rx_bi->page)
1348 continue;
1349
1350
1351
1352
1353 dma_sync_single_range_for_cpu(rx_ring->dev,
1354 rx_bi->dma,
1355 rx_bi->page_offset,
1356 rx_ring->rx_buf_len,
1357 DMA_FROM_DEVICE);
1358
1359
1360 dma_unmap_page_attrs(rx_ring->dev, rx_bi->dma,
1361 i40e_rx_pg_size(rx_ring),
1362 DMA_FROM_DEVICE,
1363 I40E_RX_DMA_ATTR);
1364
1365 __page_frag_cache_drain(rx_bi->page, rx_bi->pagecnt_bias);
1366
1367 rx_bi->page = NULL;
1368 rx_bi->page_offset = 0;
1369 }
1370
1371skip_free:
1372 if (rx_ring->xsk_umem)
1373 i40e_clear_rx_bi_zc(rx_ring);
1374 else
1375 i40e_clear_rx_bi(rx_ring);
1376
1377
1378 memset(rx_ring->desc, 0, rx_ring->size);
1379
1380 rx_ring->next_to_alloc = 0;
1381 rx_ring->next_to_clean = 0;
1382 rx_ring->next_to_use = 0;
1383}
1384
1385
1386
1387
1388
1389
1390
1391void i40e_free_rx_resources(struct i40e_ring *rx_ring)
1392{
1393 i40e_clean_rx_ring(rx_ring);
1394 if (rx_ring->vsi->type == I40E_VSI_MAIN)
1395 xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
1396 rx_ring->xdp_prog = NULL;
1397 kfree(rx_ring->rx_bi);
1398 rx_ring->rx_bi = NULL;
1399
1400 if (rx_ring->desc) {
1401 dma_free_coherent(rx_ring->dev, rx_ring->size,
1402 rx_ring->desc, rx_ring->dma);
1403 rx_ring->desc = NULL;
1404 }
1405}
1406
1407
1408
1409
1410
1411
1412
1413int i40e_setup_rx_descriptors(struct i40e_ring *rx_ring)
1414{
1415 struct device *dev = rx_ring->dev;
1416 int err;
1417
1418 u64_stats_init(&rx_ring->syncp);
1419
1420
1421 rx_ring->size = rx_ring->count * sizeof(union i40e_32byte_rx_desc);
1422 rx_ring->size = ALIGN(rx_ring->size, 4096);
1423 rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size,
1424 &rx_ring->dma, GFP_KERNEL);
1425
1426 if (!rx_ring->desc) {
1427 dev_info(dev, "Unable to allocate memory for the Rx descriptor ring, size=%d\n",
1428 rx_ring->size);
1429 return -ENOMEM;
1430 }
1431
1432 rx_ring->next_to_alloc = 0;
1433 rx_ring->next_to_clean = 0;
1434 rx_ring->next_to_use = 0;
1435
1436
1437 if (rx_ring->vsi->type == I40E_VSI_MAIN) {
1438 err = xdp_rxq_info_reg(&rx_ring->xdp_rxq, rx_ring->netdev,
1439 rx_ring->queue_index);
1440 if (err < 0)
1441 return err;
1442 }
1443
1444 rx_ring->xdp_prog = rx_ring->vsi->xdp_prog;
1445
1446 return 0;
1447}
1448
1449
1450
1451
1452
1453
1454void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val)
1455{
1456 rx_ring->next_to_use = val;
1457
1458
1459 rx_ring->next_to_alloc = val;
1460
1461
1462
1463
1464
1465
1466 wmb();
1467 writel(val, rx_ring->tail);
1468}
1469
1470
1471
1472
1473
1474
1475
1476static inline unsigned int i40e_rx_offset(struct i40e_ring *rx_ring)
1477{
1478 return ring_uses_build_skb(rx_ring) ? I40E_SKB_PAD : 0;
1479}
1480
1481static unsigned int i40e_rx_frame_truesize(struct i40e_ring *rx_ring,
1482 unsigned int size)
1483{
1484 unsigned int truesize;
1485
1486#if (PAGE_SIZE < 8192)
1487 truesize = i40e_rx_pg_size(rx_ring) / 2;
1488#else
1489 truesize = i40e_rx_offset(rx_ring) ?
1490 SKB_DATA_ALIGN(size + i40e_rx_offset(rx_ring)) +
1491 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) :
1492 SKB_DATA_ALIGN(size);
1493#endif
1494 return truesize;
1495}
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505static bool i40e_alloc_mapped_page(struct i40e_ring *rx_ring,
1506 struct i40e_rx_buffer *bi)
1507{
1508 struct page *page = bi->page;
1509 dma_addr_t dma;
1510
1511
1512 if (likely(page)) {
1513 rx_ring->rx_stats.page_reuse_count++;
1514 return true;
1515 }
1516
1517
1518 page = dev_alloc_pages(i40e_rx_pg_order(rx_ring));
1519 if (unlikely(!page)) {
1520 rx_ring->rx_stats.alloc_page_failed++;
1521 return false;
1522 }
1523
1524
1525 dma = dma_map_page_attrs(rx_ring->dev, page, 0,
1526 i40e_rx_pg_size(rx_ring),
1527 DMA_FROM_DEVICE,
1528 I40E_RX_DMA_ATTR);
1529
1530
1531
1532
1533 if (dma_mapping_error(rx_ring->dev, dma)) {
1534 __free_pages(page, i40e_rx_pg_order(rx_ring));
1535 rx_ring->rx_stats.alloc_page_failed++;
1536 return false;
1537 }
1538
1539 bi->dma = dma;
1540 bi->page = page;
1541 bi->page_offset = i40e_rx_offset(rx_ring);
1542 page_ref_add(page, USHRT_MAX - 1);
1543 bi->pagecnt_bias = USHRT_MAX;
1544
1545 return true;
1546}
1547
1548
1549
1550
1551
1552
1553
1554
1555bool i40e_alloc_rx_buffers(struct i40e_ring *rx_ring, u16 cleaned_count)
1556{
1557 u16 ntu = rx_ring->next_to_use;
1558 union i40e_rx_desc *rx_desc;
1559 struct i40e_rx_buffer *bi;
1560
1561
1562 if (!rx_ring->netdev || !cleaned_count)
1563 return false;
1564
1565 rx_desc = I40E_RX_DESC(rx_ring, ntu);
1566 bi = i40e_rx_bi(rx_ring, ntu);
1567
1568 do {
1569 if (!i40e_alloc_mapped_page(rx_ring, bi))
1570 goto no_buffers;
1571
1572
1573 dma_sync_single_range_for_device(rx_ring->dev, bi->dma,
1574 bi->page_offset,
1575 rx_ring->rx_buf_len,
1576 DMA_FROM_DEVICE);
1577
1578
1579
1580
1581 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset);
1582
1583 rx_desc++;
1584 bi++;
1585 ntu++;
1586 if (unlikely(ntu == rx_ring->count)) {
1587 rx_desc = I40E_RX_DESC(rx_ring, 0);
1588 bi = i40e_rx_bi(rx_ring, 0);
1589 ntu = 0;
1590 }
1591
1592
1593 rx_desc->wb.qword1.status_error_len = 0;
1594
1595 cleaned_count--;
1596 } while (cleaned_count);
1597
1598 if (rx_ring->next_to_use != ntu)
1599 i40e_release_rx_desc(rx_ring, ntu);
1600
1601 return false;
1602
1603no_buffers:
1604 if (rx_ring->next_to_use != ntu)
1605 i40e_release_rx_desc(rx_ring, ntu);
1606
1607
1608
1609
1610 return true;
1611}
1612
1613
1614
1615
1616
1617
1618
1619static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
1620 struct sk_buff *skb,
1621 union i40e_rx_desc *rx_desc)
1622{
1623 struct i40e_rx_ptype_decoded decoded;
1624 u32 rx_error, rx_status;
1625 bool ipv4, ipv6;
1626 u8 ptype;
1627 u64 qword;
1628
1629 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
1630 ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >> I40E_RXD_QW1_PTYPE_SHIFT;
1631 rx_error = (qword & I40E_RXD_QW1_ERROR_MASK) >>
1632 I40E_RXD_QW1_ERROR_SHIFT;
1633 rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
1634 I40E_RXD_QW1_STATUS_SHIFT;
1635 decoded = decode_rx_desc_ptype(ptype);
1636
1637 skb->ip_summed = CHECKSUM_NONE;
1638
1639 skb_checksum_none_assert(skb);
1640
1641
1642 if (!(vsi->netdev->features & NETIF_F_RXCSUM))
1643 return;
1644
1645
1646 if (!(rx_status & BIT(I40E_RX_DESC_STATUS_L3L4P_SHIFT)))
1647 return;
1648
1649
1650 if (!(decoded.known && decoded.outer_ip))
1651 return;
1652
1653 ipv4 = (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP) &&
1654 (decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV4);
1655 ipv6 = (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP) &&
1656 (decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV6);
1657
1658 if (ipv4 &&
1659 (rx_error & (BIT(I40E_RX_DESC_ERROR_IPE_SHIFT) |
1660 BIT(I40E_RX_DESC_ERROR_EIPE_SHIFT))))
1661 goto checksum_fail;
1662
1663
1664 if (ipv6 &&
1665 rx_status & BIT(I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT))
1666
1667 return;
1668
1669
1670 if (rx_error & BIT(I40E_RX_DESC_ERROR_L4E_SHIFT))
1671 goto checksum_fail;
1672
1673
1674
1675
1676
1677 if (rx_error & BIT(I40E_RX_DESC_ERROR_PPRS_SHIFT))
1678 return;
1679
1680
1681
1682
1683
1684 if (decoded.tunnel_type >= I40E_RX_PTYPE_TUNNEL_IP_GRENAT)
1685 skb->csum_level = 1;
1686
1687
1688 switch (decoded.inner_prot) {
1689 case I40E_RX_PTYPE_INNER_PROT_TCP:
1690 case I40E_RX_PTYPE_INNER_PROT_UDP:
1691 case I40E_RX_PTYPE_INNER_PROT_SCTP:
1692 skb->ip_summed = CHECKSUM_UNNECESSARY;
1693 fallthrough;
1694 default:
1695 break;
1696 }
1697
1698 return;
1699
1700checksum_fail:
1701 vsi->back->hw_csum_rx_error++;
1702}
1703
1704
1705
1706
1707
1708
1709
1710static inline int i40e_ptype_to_htype(u8 ptype)
1711{
1712 struct i40e_rx_ptype_decoded decoded = decode_rx_desc_ptype(ptype);
1713
1714 if (!decoded.known)
1715 return PKT_HASH_TYPE_NONE;
1716
1717 if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP &&
1718 decoded.payload_layer == I40E_RX_PTYPE_PAYLOAD_LAYER_PAY4)
1719 return PKT_HASH_TYPE_L4;
1720 else if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP &&
1721 decoded.payload_layer == I40E_RX_PTYPE_PAYLOAD_LAYER_PAY3)
1722 return PKT_HASH_TYPE_L3;
1723 else
1724 return PKT_HASH_TYPE_L2;
1725}
1726
1727
1728
1729
1730
1731
1732
1733
1734static inline void i40e_rx_hash(struct i40e_ring *ring,
1735 union i40e_rx_desc *rx_desc,
1736 struct sk_buff *skb,
1737 u8 rx_ptype)
1738{
1739 u32 hash;
1740 const __le64 rss_mask =
1741 cpu_to_le64((u64)I40E_RX_DESC_FLTSTAT_RSS_HASH <<
1742 I40E_RX_DESC_STATUS_FLTSTAT_SHIFT);
1743
1744 if (!(ring->netdev->features & NETIF_F_RXHASH))
1745 return;
1746
1747 if ((rx_desc->wb.qword1.status_error_len & rss_mask) == rss_mask) {
1748 hash = le32_to_cpu(rx_desc->wb.qword0.hi_dword.rss);
1749 skb_set_hash(skb, hash, i40e_ptype_to_htype(rx_ptype));
1750 }
1751}
1752
1753
1754
1755
1756
1757
1758
1759
1760
1761
1762
1763
1764void i40e_process_skb_fields(struct i40e_ring *rx_ring,
1765 union i40e_rx_desc *rx_desc, struct sk_buff *skb)
1766{
1767 u64 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
1768 u32 rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
1769 I40E_RXD_QW1_STATUS_SHIFT;
1770 u32 tsynvalid = rx_status & I40E_RXD_QW1_STATUS_TSYNVALID_MASK;
1771 u32 tsyn = (rx_status & I40E_RXD_QW1_STATUS_TSYNINDX_MASK) >>
1772 I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT;
1773 u8 rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >>
1774 I40E_RXD_QW1_PTYPE_SHIFT;
1775
1776 if (unlikely(tsynvalid))
1777 i40e_ptp_rx_hwtstamp(rx_ring->vsi->back, skb, tsyn);
1778
1779 i40e_rx_hash(rx_ring, rx_desc, skb, rx_ptype);
1780
1781 i40e_rx_checksum(rx_ring->vsi, skb, rx_desc);
1782
1783 skb_record_rx_queue(skb, rx_ring->queue_index);
1784
1785 if (qword & BIT(I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)) {
1786 u16 vlan_tag = rx_desc->wb.qword0.lo_dword.l2tag1;
1787
1788 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
1789 le16_to_cpu(vlan_tag));
1790 }
1791
1792
1793 skb->protocol = eth_type_trans(skb, rx_ring->netdev);
1794}
1795
1796
1797
1798
1799
1800
1801
1802
1803
1804
1805
1806
1807
1808
1809
1810static bool i40e_cleanup_headers(struct i40e_ring *rx_ring, struct sk_buff *skb,
1811 union i40e_rx_desc *rx_desc)
1812
1813{
1814
1815 if (IS_ERR(skb))
1816 return true;
1817
1818
1819
1820
1821
1822
1823 if (unlikely(i40e_test_staterr(rx_desc,
1824 BIT(I40E_RXD_QW1_ERROR_SHIFT)))) {
1825 dev_kfree_skb_any(skb);
1826 return true;
1827 }
1828
1829
1830 if (eth_skb_pad(skb))
1831 return true;
1832
1833 return false;
1834}
1835
1836
1837
1838
1839
1840
1841
1842
1843static inline bool i40e_page_is_reusable(struct page *page)
1844{
1845 return (page_to_nid(page) == numa_mem_id()) &&
1846 !page_is_pfmemalloc(page);
1847}
1848
1849
1850
1851
1852
1853
1854
1855
1856
1857
1858
1859
1860
1861
1862
1863
1864
1865
1866
1867
1868
1869
1870
1871
1872
1873
1874
1875
1876static bool i40e_can_reuse_rx_page(struct i40e_rx_buffer *rx_buffer)
1877{
1878 unsigned int pagecnt_bias = rx_buffer->pagecnt_bias;
1879 struct page *page = rx_buffer->page;
1880
1881
1882 if (unlikely(!i40e_page_is_reusable(page)))
1883 return false;
1884
1885#if (PAGE_SIZE < 8192)
1886
1887 if (unlikely((page_count(page) - pagecnt_bias) > 1))
1888 return false;
1889#else
1890#define I40E_LAST_OFFSET \
1891 (SKB_WITH_OVERHEAD(PAGE_SIZE) - I40E_RXBUFFER_2048)
1892 if (rx_buffer->page_offset > I40E_LAST_OFFSET)
1893 return false;
1894#endif
1895
1896
1897
1898
1899
1900 if (unlikely(pagecnt_bias == 1)) {
1901 page_ref_add(page, USHRT_MAX - 1);
1902 rx_buffer->pagecnt_bias = USHRT_MAX;
1903 }
1904
1905 return true;
1906}
1907
1908
1909
1910
1911
1912
1913
1914
1915
1916
1917
1918
1919
1920static void i40e_add_rx_frag(struct i40e_ring *rx_ring,
1921 struct i40e_rx_buffer *rx_buffer,
1922 struct sk_buff *skb,
1923 unsigned int size)
1924{
1925#if (PAGE_SIZE < 8192)
1926 unsigned int truesize = i40e_rx_pg_size(rx_ring) / 2;
1927#else
1928 unsigned int truesize = SKB_DATA_ALIGN(size + i40e_rx_offset(rx_ring));
1929#endif
1930
1931 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page,
1932 rx_buffer->page_offset, size, truesize);
1933
1934
1935#if (PAGE_SIZE < 8192)
1936 rx_buffer->page_offset ^= truesize;
1937#else
1938 rx_buffer->page_offset += truesize;
1939#endif
1940}
1941
1942
1943
1944
1945
1946
1947
1948
1949
1950static struct i40e_rx_buffer *i40e_get_rx_buffer(struct i40e_ring *rx_ring,
1951 const unsigned int size)
1952{
1953 struct i40e_rx_buffer *rx_buffer;
1954
1955 rx_buffer = i40e_rx_bi(rx_ring, rx_ring->next_to_clean);
1956 prefetchw(rx_buffer->page);
1957
1958
1959 dma_sync_single_range_for_cpu(rx_ring->dev,
1960 rx_buffer->dma,
1961 rx_buffer->page_offset,
1962 size,
1963 DMA_FROM_DEVICE);
1964
1965
1966 rx_buffer->pagecnt_bias--;
1967
1968 return rx_buffer;
1969}
1970
1971
1972
1973
1974
1975
1976
1977
1978
1979
1980
1981static struct sk_buff *i40e_construct_skb(struct i40e_ring *rx_ring,
1982 struct i40e_rx_buffer *rx_buffer,
1983 struct xdp_buff *xdp)
1984{
1985 unsigned int size = xdp->data_end - xdp->data;
1986#if (PAGE_SIZE < 8192)
1987 unsigned int truesize = i40e_rx_pg_size(rx_ring) / 2;
1988#else
1989 unsigned int truesize = SKB_DATA_ALIGN(size);
1990#endif
1991 unsigned int headlen;
1992 struct sk_buff *skb;
1993
1994
1995 prefetch(xdp->data);
1996#if L1_CACHE_BYTES < 128
1997 prefetch(xdp->data + L1_CACHE_BYTES);
1998#endif
1999
2000
2001
2002
2003
2004
2005
2006
2007
2008
2009
2010
2011
2012
2013
2014
2015
2016 skb = __napi_alloc_skb(&rx_ring->q_vector->napi,
2017 I40E_RX_HDR_SIZE,
2018 GFP_ATOMIC | __GFP_NOWARN);
2019 if (unlikely(!skb))
2020 return NULL;
2021
2022
2023 headlen = size;
2024 if (headlen > I40E_RX_HDR_SIZE)
2025 headlen = eth_get_headlen(skb->dev, xdp->data,
2026 I40E_RX_HDR_SIZE);
2027
2028
2029 memcpy(__skb_put(skb, headlen), xdp->data,
2030 ALIGN(headlen, sizeof(long)));
2031
2032
2033 size -= headlen;
2034 if (size) {
2035 skb_add_rx_frag(skb, 0, rx_buffer->page,
2036 rx_buffer->page_offset + headlen,
2037 size, truesize);
2038
2039
2040#if (PAGE_SIZE < 8192)
2041 rx_buffer->page_offset ^= truesize;
2042#else
2043 rx_buffer->page_offset += truesize;
2044#endif
2045 } else {
2046
2047 rx_buffer->pagecnt_bias++;
2048 }
2049
2050 return skb;
2051}
2052
2053
2054
2055
2056
2057
2058
2059
2060
2061
2062static struct sk_buff *i40e_build_skb(struct i40e_ring *rx_ring,
2063 struct i40e_rx_buffer *rx_buffer,
2064 struct xdp_buff *xdp)
2065{
2066 unsigned int metasize = xdp->data - xdp->data_meta;
2067#if (PAGE_SIZE < 8192)
2068 unsigned int truesize = i40e_rx_pg_size(rx_ring) / 2;
2069#else
2070 unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
2071 SKB_DATA_ALIGN(xdp->data_end -
2072 xdp->data_hard_start);
2073#endif
2074 struct sk_buff *skb;
2075
2076
2077
2078
2079
2080
2081 prefetch(xdp->data_meta);
2082#if L1_CACHE_BYTES < 128
2083 prefetch(xdp->data_meta + L1_CACHE_BYTES);
2084#endif
2085
2086 skb = build_skb(xdp->data_hard_start, truesize);
2087 if (unlikely(!skb))
2088 return NULL;
2089
2090
2091 skb_reserve(skb, xdp->data - xdp->data_hard_start);
2092 __skb_put(skb, xdp->data_end - xdp->data);
2093 if (metasize)
2094 skb_metadata_set(skb, metasize);
2095
2096
2097#if (PAGE_SIZE < 8192)
2098 rx_buffer->page_offset ^= truesize;
2099#else
2100 rx_buffer->page_offset += truesize;
2101#endif
2102
2103 return skb;
2104}
2105
2106
2107
2108
2109
2110
2111
2112
2113
2114static void i40e_put_rx_buffer(struct i40e_ring *rx_ring,
2115 struct i40e_rx_buffer *rx_buffer)
2116{
2117 if (i40e_can_reuse_rx_page(rx_buffer)) {
2118
2119 i40e_reuse_rx_page(rx_ring, rx_buffer);
2120 } else {
2121
2122 dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma,
2123 i40e_rx_pg_size(rx_ring),
2124 DMA_FROM_DEVICE, I40E_RX_DMA_ATTR);
2125 __page_frag_cache_drain(rx_buffer->page,
2126 rx_buffer->pagecnt_bias);
2127
2128 rx_buffer->page = NULL;
2129 }
2130}
2131
2132
2133
2134
2135
2136
2137
2138
2139
2140
2141
2142
2143static bool i40e_is_non_eop(struct i40e_ring *rx_ring,
2144 union i40e_rx_desc *rx_desc,
2145 struct sk_buff *skb)
2146{
2147 u32 ntc = rx_ring->next_to_clean + 1;
2148
2149
2150 ntc = (ntc < rx_ring->count) ? ntc : 0;
2151 rx_ring->next_to_clean = ntc;
2152
2153 prefetch(I40E_RX_DESC(rx_ring, ntc));
2154
2155
2156#define I40E_RXD_EOF BIT(I40E_RX_DESC_STATUS_EOF_SHIFT)
2157 if (likely(i40e_test_staterr(rx_desc, I40E_RXD_EOF)))
2158 return false;
2159
2160 rx_ring->rx_stats.non_eop_descs++;
2161
2162 return true;
2163}
2164
2165static int i40e_xmit_xdp_ring(struct xdp_frame *xdpf,
2166 struct i40e_ring *xdp_ring);
2167
2168int i40e_xmit_xdp_tx_ring(struct xdp_buff *xdp, struct i40e_ring *xdp_ring)
2169{
2170 struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp);
2171
2172 if (unlikely(!xdpf))
2173 return I40E_XDP_CONSUMED;
2174
2175 return i40e_xmit_xdp_ring(xdpf, xdp_ring);
2176}
2177
2178
2179
2180
2181
2182
2183static struct sk_buff *i40e_run_xdp(struct i40e_ring *rx_ring,
2184 struct xdp_buff *xdp)
2185{
2186 int err, result = I40E_XDP_PASS;
2187 struct i40e_ring *xdp_ring;
2188 struct bpf_prog *xdp_prog;
2189 u32 act;
2190
2191 rcu_read_lock();
2192 xdp_prog = READ_ONCE(rx_ring->xdp_prog);
2193
2194 if (!xdp_prog)
2195 goto xdp_out;
2196
2197 prefetchw(xdp->data_hard_start);
2198
2199 act = bpf_prog_run_xdp(xdp_prog, xdp);
2200 switch (act) {
2201 case XDP_PASS:
2202 break;
2203 case XDP_TX:
2204 xdp_ring = rx_ring->vsi->xdp_rings[rx_ring->queue_index];
2205 result = i40e_xmit_xdp_tx_ring(xdp, xdp_ring);
2206 break;
2207 case XDP_REDIRECT:
2208 err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog);
2209 result = !err ? I40E_XDP_REDIR : I40E_XDP_CONSUMED;
2210 break;
2211 default:
2212 bpf_warn_invalid_xdp_action(act);
2213 fallthrough;
2214 case XDP_ABORTED:
2215 trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
2216 fallthrough;
2217 case XDP_DROP:
2218 result = I40E_XDP_CONSUMED;
2219 break;
2220 }
2221xdp_out:
2222 rcu_read_unlock();
2223 return ERR_PTR(-result);
2224}
2225
2226
2227
2228
2229
2230
2231
2232static void i40e_rx_buffer_flip(struct i40e_ring *rx_ring,
2233 struct i40e_rx_buffer *rx_buffer,
2234 unsigned int size)
2235{
2236 unsigned int truesize = i40e_rx_frame_truesize(rx_ring, size);
2237
2238#if (PAGE_SIZE < 8192)
2239 rx_buffer->page_offset ^= truesize;
2240#else
2241 rx_buffer->page_offset += truesize;
2242#endif
2243}
2244
2245
2246
2247
2248
2249
2250
2251void i40e_xdp_ring_update_tail(struct i40e_ring *xdp_ring)
2252{
2253
2254
2255
2256 wmb();
2257 writel_relaxed(xdp_ring->next_to_use, xdp_ring->tail);
2258}
2259
2260
2261
2262
2263
2264
2265
2266
2267
2268void i40e_update_rx_stats(struct i40e_ring *rx_ring,
2269 unsigned int total_rx_bytes,
2270 unsigned int total_rx_packets)
2271{
2272 u64_stats_update_begin(&rx_ring->syncp);
2273 rx_ring->stats.packets += total_rx_packets;
2274 rx_ring->stats.bytes += total_rx_bytes;
2275 u64_stats_update_end(&rx_ring->syncp);
2276 rx_ring->q_vector->rx.total_packets += total_rx_packets;
2277 rx_ring->q_vector->rx.total_bytes += total_rx_bytes;
2278}
2279
2280
2281
2282
2283
2284
2285
2286
2287
2288
2289void i40e_finalize_xdp_rx(struct i40e_ring *rx_ring, unsigned int xdp_res)
2290{
2291 if (xdp_res & I40E_XDP_REDIR)
2292 xdp_do_flush_map();
2293
2294 if (xdp_res & I40E_XDP_TX) {
2295 struct i40e_ring *xdp_ring =
2296 rx_ring->vsi->xdp_rings[rx_ring->queue_index];
2297
2298 i40e_xdp_ring_update_tail(xdp_ring);
2299 }
2300}
2301
2302
2303
2304
2305
2306
2307
2308
2309
2310
2311
2312
2313
2314static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
2315{
2316 unsigned int total_rx_bytes = 0, total_rx_packets = 0;
2317 struct sk_buff *skb = rx_ring->skb;
2318 u16 cleaned_count = I40E_DESC_UNUSED(rx_ring);
2319 unsigned int xdp_xmit = 0;
2320 bool failure = false;
2321 struct xdp_buff xdp;
2322
2323#if (PAGE_SIZE < 8192)
2324 xdp.frame_sz = i40e_rx_frame_truesize(rx_ring, 0);
2325#endif
2326 xdp.rxq = &rx_ring->xdp_rxq;
2327
2328 while (likely(total_rx_packets < (unsigned int)budget)) {
2329 struct i40e_rx_buffer *rx_buffer;
2330 union i40e_rx_desc *rx_desc;
2331 unsigned int size;
2332 u64 qword;
2333
2334
2335 if (cleaned_count >= I40E_RX_BUFFER_WRITE) {
2336 failure = failure ||
2337 i40e_alloc_rx_buffers(rx_ring, cleaned_count);
2338 cleaned_count = 0;
2339 }
2340
2341 rx_desc = I40E_RX_DESC(rx_ring, rx_ring->next_to_clean);
2342
2343
2344
2345
2346
2347
2348 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
2349
2350
2351
2352
2353
2354 dma_rmb();
2355
2356 if (i40e_rx_is_programming_status(qword)) {
2357 i40e_clean_programming_status(rx_ring,
2358 rx_desc->raw.qword[0],
2359 qword);
2360 rx_buffer = i40e_rx_bi(rx_ring, rx_ring->next_to_clean);
2361 i40e_inc_ntc(rx_ring);
2362 i40e_reuse_rx_page(rx_ring, rx_buffer);
2363 cleaned_count++;
2364 continue;
2365 }
2366
2367 size = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >>
2368 I40E_RXD_QW1_LENGTH_PBUF_SHIFT;
2369 if (!size)
2370 break;
2371
2372 i40e_trace(clean_rx_irq, rx_ring, rx_desc, skb);
2373 rx_buffer = i40e_get_rx_buffer(rx_ring, size);
2374
2375
2376 if (!skb) {
2377 xdp.data = page_address(rx_buffer->page) +
2378 rx_buffer->page_offset;
2379 xdp.data_meta = xdp.data;
2380 xdp.data_hard_start = xdp.data -
2381 i40e_rx_offset(rx_ring);
2382 xdp.data_end = xdp.data + size;
2383#if (PAGE_SIZE > 4096)
2384
2385 xdp.frame_sz = i40e_rx_frame_truesize(rx_ring, size);
2386#endif
2387 skb = i40e_run_xdp(rx_ring, &xdp);
2388 }
2389
2390 if (IS_ERR(skb)) {
2391 unsigned int xdp_res = -PTR_ERR(skb);
2392
2393 if (xdp_res & (I40E_XDP_TX | I40E_XDP_REDIR)) {
2394 xdp_xmit |= xdp_res;
2395 i40e_rx_buffer_flip(rx_ring, rx_buffer, size);
2396 } else {
2397 rx_buffer->pagecnt_bias++;
2398 }
2399 total_rx_bytes += size;
2400 total_rx_packets++;
2401 } else if (skb) {
2402 i40e_add_rx_frag(rx_ring, rx_buffer, skb, size);
2403 } else if (ring_uses_build_skb(rx_ring)) {
2404 skb = i40e_build_skb(rx_ring, rx_buffer, &xdp);
2405 } else {
2406 skb = i40e_construct_skb(rx_ring, rx_buffer, &xdp);
2407 }
2408
2409
2410 if (!skb) {
2411 rx_ring->rx_stats.alloc_buff_failed++;
2412 rx_buffer->pagecnt_bias++;
2413 break;
2414 }
2415
2416 i40e_put_rx_buffer(rx_ring, rx_buffer);
2417 cleaned_count++;
2418
2419 if (i40e_is_non_eop(rx_ring, rx_desc, skb))
2420 continue;
2421
2422 if (i40e_cleanup_headers(rx_ring, skb, rx_desc)) {
2423 skb = NULL;
2424 continue;
2425 }
2426
2427
2428 total_rx_bytes += skb->len;
2429
2430
2431 i40e_process_skb_fields(rx_ring, rx_desc, skb);
2432
2433 i40e_trace(clean_rx_irq_rx, rx_ring, rx_desc, skb);
2434 napi_gro_receive(&rx_ring->q_vector->napi, skb);
2435 skb = NULL;
2436
2437
2438 total_rx_packets++;
2439 }
2440
2441 i40e_finalize_xdp_rx(rx_ring, xdp_xmit);
2442 rx_ring->skb = skb;
2443
2444 i40e_update_rx_stats(rx_ring, total_rx_bytes, total_rx_packets);
2445
2446
2447 return failure ? budget : (int)total_rx_packets;
2448}
2449
2450static inline u32 i40e_buildreg_itr(const int type, u16 itr)
2451{
2452 u32 val;
2453
2454
2455
2456
2457
2458
2459
2460
2461
2462
2463
2464
2465
2466
2467
2468
2469 itr &= I40E_ITR_MASK;
2470
2471 val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
2472 (type << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT) |
2473 (itr << (I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT - 1));
2474
2475 return val;
2476}
2477
2478
2479#define INTREG I40E_PFINT_DYN_CTLN
2480
2481
2482
2483
2484
2485
2486
2487
2488#define ITR_COUNTDOWN_START 3
2489
2490
2491
2492
2493
2494
2495
2496static inline void i40e_update_enable_itr(struct i40e_vsi *vsi,
2497 struct i40e_q_vector *q_vector)
2498{
2499 struct i40e_hw *hw = &vsi->back->hw;
2500 u32 intval;
2501
2502
2503 if (!(vsi->back->flags & I40E_FLAG_MSIX_ENABLED)) {
2504 i40e_irq_dynamic_enable_icr0(vsi->back);
2505 return;
2506 }
2507
2508
2509 i40e_update_itr(q_vector, &q_vector->tx);
2510 i40e_update_itr(q_vector, &q_vector->rx);
2511
2512
2513
2514
2515
2516
2517
2518
2519
2520 if (q_vector->rx.target_itr < q_vector->rx.current_itr) {
2521
2522 intval = i40e_buildreg_itr(I40E_RX_ITR,
2523 q_vector->rx.target_itr);
2524 q_vector->rx.current_itr = q_vector->rx.target_itr;
2525 q_vector->itr_countdown = ITR_COUNTDOWN_START;
2526 } else if ((q_vector->tx.target_itr < q_vector->tx.current_itr) ||
2527 ((q_vector->rx.target_itr - q_vector->rx.current_itr) <
2528 (q_vector->tx.target_itr - q_vector->tx.current_itr))) {
2529
2530
2531
2532 intval = i40e_buildreg_itr(I40E_TX_ITR,
2533 q_vector->tx.target_itr);
2534 q_vector->tx.current_itr = q_vector->tx.target_itr;
2535 q_vector->itr_countdown = ITR_COUNTDOWN_START;
2536 } else if (q_vector->rx.current_itr != q_vector->rx.target_itr) {
2537
2538 intval = i40e_buildreg_itr(I40E_RX_ITR,
2539 q_vector->rx.target_itr);
2540 q_vector->rx.current_itr = q_vector->rx.target_itr;
2541 q_vector->itr_countdown = ITR_COUNTDOWN_START;
2542 } else {
2543
2544 intval = i40e_buildreg_itr(I40E_ITR_NONE, 0);
2545 if (q_vector->itr_countdown)
2546 q_vector->itr_countdown--;
2547 }
2548
2549 if (!test_bit(__I40E_VSI_DOWN, vsi->state))
2550 wr32(hw, INTREG(q_vector->reg_idx), intval);
2551}
2552
2553
2554
2555
2556
2557
2558
2559
2560
2561
2562int i40e_napi_poll(struct napi_struct *napi, int budget)
2563{
2564 struct i40e_q_vector *q_vector =
2565 container_of(napi, struct i40e_q_vector, napi);
2566 struct i40e_vsi *vsi = q_vector->vsi;
2567 struct i40e_ring *ring;
2568 bool clean_complete = true;
2569 bool arm_wb = false;
2570 int budget_per_ring;
2571 int work_done = 0;
2572
2573 if (test_bit(__I40E_VSI_DOWN, vsi->state)) {
2574 napi_complete(napi);
2575 return 0;
2576 }
2577
2578
2579
2580
2581 i40e_for_each_ring(ring, q_vector->tx) {
2582 bool wd = ring->xsk_umem ?
2583 i40e_clean_xdp_tx_irq(vsi, ring) :
2584 i40e_clean_tx_irq(vsi, ring, budget);
2585
2586 if (!wd) {
2587 clean_complete = false;
2588 continue;
2589 }
2590 arm_wb |= ring->arm_wb;
2591 ring->arm_wb = false;
2592 }
2593
2594
2595 if (budget <= 0)
2596 goto tx_only;
2597
2598
2599 if (unlikely(q_vector->num_ringpairs > 1))
2600
2601
2602
2603
2604 budget_per_ring = max_t(int, budget / q_vector->num_ringpairs, 1);
2605 else
2606
2607 budget_per_ring = budget;
2608
2609 i40e_for_each_ring(ring, q_vector->rx) {
2610 int cleaned = ring->xsk_umem ?
2611 i40e_clean_rx_irq_zc(ring, budget_per_ring) :
2612 i40e_clean_rx_irq(ring, budget_per_ring);
2613
2614 work_done += cleaned;
2615
2616 if (cleaned >= budget_per_ring)
2617 clean_complete = false;
2618 }
2619
2620
2621 if (!clean_complete) {
2622 int cpu_id = smp_processor_id();
2623
2624
2625
2626
2627
2628
2629
2630
2631 if (!cpumask_test_cpu(cpu_id, &q_vector->affinity_mask)) {
2632
2633 napi_complete_done(napi, work_done);
2634
2635
2636 i40e_force_wb(vsi, q_vector);
2637
2638
2639 return budget - 1;
2640 }
2641tx_only:
2642 if (arm_wb) {
2643 q_vector->tx.ring[0].tx_stats.tx_force_wb++;
2644 i40e_enable_wb_on_itr(vsi, q_vector);
2645 }
2646 return budget;
2647 }
2648
2649 if (vsi->back->flags & I40E_TXR_FLAGS_WB_ON_ITR)
2650 q_vector->arm_wb_state = false;
2651
2652
2653
2654
2655 if (likely(napi_complete_done(napi, work_done)))
2656 i40e_update_enable_itr(vsi, q_vector);
2657
2658 return min(work_done, budget - 1);
2659}
2660
2661
2662
2663
2664
2665
2666
2667static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb,
2668 u32 tx_flags)
2669{
2670 struct i40e_filter_program_desc *fdir_desc;
2671 struct i40e_pf *pf = tx_ring->vsi->back;
2672 union {
2673 unsigned char *network;
2674 struct iphdr *ipv4;
2675 struct ipv6hdr *ipv6;
2676 } hdr;
2677 struct tcphdr *th;
2678 unsigned int hlen;
2679 u32 flex_ptype, dtype_cmd;
2680 int l4_proto;
2681 u16 i;
2682
2683
2684 if (!(pf->flags & I40E_FLAG_FD_ATR_ENABLED))
2685 return;
2686
2687 if (test_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state))
2688 return;
2689
2690
2691 if (!tx_ring->atr_sample_rate)
2692 return;
2693
2694
2695 if (!(tx_flags & (I40E_TX_FLAGS_IPV4 | I40E_TX_FLAGS_IPV6)))
2696 return;
2697
2698
2699 hdr.network = (tx_flags & I40E_TX_FLAGS_UDP_TUNNEL) ?
2700 skb_inner_network_header(skb) : skb_network_header(skb);
2701
2702
2703
2704
2705 if (tx_flags & I40E_TX_FLAGS_IPV4) {
2706
2707 hlen = (hdr.network[0] & 0x0F) << 2;
2708 l4_proto = hdr.ipv4->protocol;
2709 } else {
2710
2711 unsigned int inner_hlen = hdr.network - skb->data;
2712 unsigned int h_offset = inner_hlen;
2713
2714
2715 l4_proto =
2716 ipv6_find_hdr(skb, &h_offset, IPPROTO_TCP, NULL, NULL);
2717
2718 hlen = h_offset - inner_hlen;
2719 }
2720
2721 if (l4_proto != IPPROTO_TCP)
2722 return;
2723
2724 th = (struct tcphdr *)(hdr.network + hlen);
2725
2726
2727 if (th->syn && test_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state))
2728 return;
2729 if (pf->flags & I40E_FLAG_HW_ATR_EVICT_ENABLED) {
2730
2731
2732
2733 if (th->fin || th->rst)
2734 return;
2735 }
2736
2737 tx_ring->atr_count++;
2738
2739
2740 if (!th->fin &&
2741 !th->syn &&
2742 !th->rst &&
2743 (tx_ring->atr_count < tx_ring->atr_sample_rate))
2744 return;
2745
2746 tx_ring->atr_count = 0;
2747
2748
2749 i = tx_ring->next_to_use;
2750 fdir_desc = I40E_TX_FDIRDESC(tx_ring, i);
2751
2752 i++;
2753 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
2754
2755 flex_ptype = (tx_ring->queue_index << I40E_TXD_FLTR_QW0_QINDEX_SHIFT) &
2756 I40E_TXD_FLTR_QW0_QINDEX_MASK;
2757 flex_ptype |= (tx_flags & I40E_TX_FLAGS_IPV4) ?
2758 (I40E_FILTER_PCTYPE_NONF_IPV4_TCP <<
2759 I40E_TXD_FLTR_QW0_PCTYPE_SHIFT) :
2760 (I40E_FILTER_PCTYPE_NONF_IPV6_TCP <<
2761 I40E_TXD_FLTR_QW0_PCTYPE_SHIFT);
2762
2763 flex_ptype |= tx_ring->vsi->id << I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT;
2764
2765 dtype_cmd = I40E_TX_DESC_DTYPE_FILTER_PROG;
2766
2767 dtype_cmd |= (th->fin || th->rst) ?
2768 (I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE <<
2769 I40E_TXD_FLTR_QW1_PCMD_SHIFT) :
2770 (I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE <<
2771 I40E_TXD_FLTR_QW1_PCMD_SHIFT);
2772
2773 dtype_cmd |= I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_QINDEX <<
2774 I40E_TXD_FLTR_QW1_DEST_SHIFT;
2775
2776 dtype_cmd |= I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID <<
2777 I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT;
2778
2779 dtype_cmd |= I40E_TXD_FLTR_QW1_CNT_ENA_MASK;
2780 if (!(tx_flags & I40E_TX_FLAGS_UDP_TUNNEL))
2781 dtype_cmd |=
2782 ((u32)I40E_FD_ATR_STAT_IDX(pf->hw.pf_id) <<
2783 I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) &
2784 I40E_TXD_FLTR_QW1_CNTINDEX_MASK;
2785 else
2786 dtype_cmd |=
2787 ((u32)I40E_FD_ATR_TUNNEL_STAT_IDX(pf->hw.pf_id) <<
2788 I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) &
2789 I40E_TXD_FLTR_QW1_CNTINDEX_MASK;
2790
2791 if (pf->flags & I40E_FLAG_HW_ATR_EVICT_ENABLED)
2792 dtype_cmd |= I40E_TXD_FLTR_QW1_ATR_MASK;
2793
2794 fdir_desc->qindex_flex_ptype_vsi = cpu_to_le32(flex_ptype);
2795 fdir_desc->rsvd = cpu_to_le32(0);
2796 fdir_desc->dtype_cmd_cntindex = cpu_to_le32(dtype_cmd);
2797 fdir_desc->fd_id = cpu_to_le32(0);
2798}
2799
2800
2801
2802
2803
2804
2805
2806
2807
2808
2809
2810
2811
2812static inline int i40e_tx_prepare_vlan_flags(struct sk_buff *skb,
2813 struct i40e_ring *tx_ring,
2814 u32 *flags)
2815{
2816 __be16 protocol = skb->protocol;
2817 u32 tx_flags = 0;
2818
2819 if (protocol == htons(ETH_P_8021Q) &&
2820 !(tx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_TX)) {
2821
2822
2823
2824
2825
2826
2827
2828 skb->protocol = vlan_get_protocol(skb);
2829 goto out;
2830 }
2831
2832
2833 if (skb_vlan_tag_present(skb)) {
2834 tx_flags |= skb_vlan_tag_get(skb) << I40E_TX_FLAGS_VLAN_SHIFT;
2835 tx_flags |= I40E_TX_FLAGS_HW_VLAN;
2836
2837 } else if (protocol == htons(ETH_P_8021Q)) {
2838 struct vlan_hdr *vhdr, _vhdr;
2839
2840 vhdr = skb_header_pointer(skb, ETH_HLEN, sizeof(_vhdr), &_vhdr);
2841 if (!vhdr)
2842 return -EINVAL;
2843
2844 protocol = vhdr->h_vlan_encapsulated_proto;
2845 tx_flags |= ntohs(vhdr->h_vlan_TCI) << I40E_TX_FLAGS_VLAN_SHIFT;
2846 tx_flags |= I40E_TX_FLAGS_SW_VLAN;
2847 }
2848
2849 if (!(tx_ring->vsi->back->flags & I40E_FLAG_DCB_ENABLED))
2850 goto out;
2851
2852
2853 if ((tx_flags & (I40E_TX_FLAGS_HW_VLAN | I40E_TX_FLAGS_SW_VLAN)) ||
2854 (skb->priority != TC_PRIO_CONTROL)) {
2855 tx_flags &= ~I40E_TX_FLAGS_VLAN_PRIO_MASK;
2856 tx_flags |= (skb->priority & 0x7) <<
2857 I40E_TX_FLAGS_VLAN_PRIO_SHIFT;
2858 if (tx_flags & I40E_TX_FLAGS_SW_VLAN) {
2859 struct vlan_ethhdr *vhdr;
2860 int rc;
2861
2862 rc = skb_cow_head(skb, 0);
2863 if (rc < 0)
2864 return rc;
2865 vhdr = (struct vlan_ethhdr *)skb->data;
2866 vhdr->h_vlan_TCI = htons(tx_flags >>
2867 I40E_TX_FLAGS_VLAN_SHIFT);
2868 } else {
2869 tx_flags |= I40E_TX_FLAGS_HW_VLAN;
2870 }
2871 }
2872
2873out:
2874 *flags = tx_flags;
2875 return 0;
2876}
2877
2878
2879
2880
2881
2882
2883
2884
2885
2886static int i40e_tso(struct i40e_tx_buffer *first, u8 *hdr_len,
2887 u64 *cd_type_cmd_tso_mss)
2888{
2889 struct sk_buff *skb = first->skb;
2890 u64 cd_cmd, cd_tso_len, cd_mss;
2891 union {
2892 struct iphdr *v4;
2893 struct ipv6hdr *v6;
2894 unsigned char *hdr;
2895 } ip;
2896 union {
2897 struct tcphdr *tcp;
2898 struct udphdr *udp;
2899 unsigned char *hdr;
2900 } l4;
2901 u32 paylen, l4_offset;
2902 u16 gso_segs, gso_size;
2903 int err;
2904
2905 if (skb->ip_summed != CHECKSUM_PARTIAL)
2906 return 0;
2907
2908 if (!skb_is_gso(skb))
2909 return 0;
2910
2911 err = skb_cow_head(skb, 0);
2912 if (err < 0)
2913 return err;
2914
2915 ip.hdr = skb_network_header(skb);
2916 l4.hdr = skb_transport_header(skb);
2917
2918
2919 if (ip.v4->version == 4) {
2920 ip.v4->tot_len = 0;
2921 ip.v4->check = 0;
2922 } else {
2923 ip.v6->payload_len = 0;
2924 }
2925
2926 if (skb_shinfo(skb)->gso_type & (SKB_GSO_GRE |
2927 SKB_GSO_GRE_CSUM |
2928 SKB_GSO_IPXIP4 |
2929 SKB_GSO_IPXIP6 |
2930 SKB_GSO_UDP_TUNNEL |
2931 SKB_GSO_UDP_TUNNEL_CSUM)) {
2932 if (!(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) &&
2933 (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM)) {
2934 l4.udp->len = 0;
2935
2936
2937 l4_offset = l4.hdr - skb->data;
2938
2939
2940 paylen = skb->len - l4_offset;
2941 csum_replace_by_diff(&l4.udp->check,
2942 (__force __wsum)htonl(paylen));
2943 }
2944
2945
2946 ip.hdr = skb_inner_network_header(skb);
2947 l4.hdr = skb_inner_transport_header(skb);
2948
2949
2950 if (ip.v4->version == 4) {
2951 ip.v4->tot_len = 0;
2952 ip.v4->check = 0;
2953 } else {
2954 ip.v6->payload_len = 0;
2955 }
2956 }
2957
2958
2959 l4_offset = l4.hdr - skb->data;
2960
2961
2962 paylen = skb->len - l4_offset;
2963
2964 if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
2965 csum_replace_by_diff(&l4.udp->check, (__force __wsum)htonl(paylen));
2966
2967 *hdr_len = sizeof(*l4.udp) + l4_offset;
2968 } else {
2969 csum_replace_by_diff(&l4.tcp->check, (__force __wsum)htonl(paylen));
2970
2971 *hdr_len = (l4.tcp->doff * 4) + l4_offset;
2972 }
2973
2974
2975 gso_size = skb_shinfo(skb)->gso_size;
2976 gso_segs = skb_shinfo(skb)->gso_segs;
2977
2978
2979 first->gso_segs = gso_segs;
2980 first->bytecount += (first->gso_segs - 1) * *hdr_len;
2981
2982
2983 cd_cmd = I40E_TX_CTX_DESC_TSO;
2984 cd_tso_len = skb->len - *hdr_len;
2985 cd_mss = gso_size;
2986 *cd_type_cmd_tso_mss |= (cd_cmd << I40E_TXD_CTX_QW1_CMD_SHIFT) |
2987 (cd_tso_len << I40E_TXD_CTX_QW1_TSO_LEN_SHIFT) |
2988 (cd_mss << I40E_TXD_CTX_QW1_MSS_SHIFT);
2989 return 1;
2990}
2991
2992
2993
2994
2995
2996
2997
2998
2999
3000
3001static int i40e_tsyn(struct i40e_ring *tx_ring, struct sk_buff *skb,
3002 u32 tx_flags, u64 *cd_type_cmd_tso_mss)
3003{
3004 struct i40e_pf *pf;
3005
3006 if (likely(!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)))
3007 return 0;
3008
3009
3010 if (tx_flags & I40E_TX_FLAGS_TSO)
3011 return 0;
3012
3013
3014
3015
3016 pf = i40e_netdev_to_pf(tx_ring->netdev);
3017 if (!(pf->flags & I40E_FLAG_PTP))
3018 return 0;
3019
3020 if (pf->ptp_tx &&
3021 !test_and_set_bit_lock(__I40E_PTP_TX_IN_PROGRESS, pf->state)) {
3022 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
3023 pf->ptp_tx_start = jiffies;
3024 pf->ptp_tx_skb = skb_get(skb);
3025 } else {
3026 pf->tx_hwtstamp_skipped++;
3027 return 0;
3028 }
3029
3030 *cd_type_cmd_tso_mss |= (u64)I40E_TX_CTX_DESC_TSYN <<
3031 I40E_TXD_CTX_QW1_CMD_SHIFT;
3032
3033 return 1;
3034}
3035
3036
3037
3038
3039
3040
3041
3042
3043
3044
3045static int i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags,
3046 u32 *td_cmd, u32 *td_offset,
3047 struct i40e_ring *tx_ring,
3048 u32 *cd_tunneling)
3049{
3050 union {
3051 struct iphdr *v4;
3052 struct ipv6hdr *v6;
3053 unsigned char *hdr;
3054 } ip;
3055 union {
3056 struct tcphdr *tcp;
3057 struct udphdr *udp;
3058 unsigned char *hdr;
3059 } l4;
3060 unsigned char *exthdr;
3061 u32 offset, cmd = 0;
3062 __be16 frag_off;
3063 u8 l4_proto = 0;
3064
3065 if (skb->ip_summed != CHECKSUM_PARTIAL)
3066 return 0;
3067
3068 ip.hdr = skb_network_header(skb);
3069 l4.hdr = skb_transport_header(skb);
3070
3071
3072 offset = ((ip.hdr - skb->data) / 2) << I40E_TX_DESC_LENGTH_MACLEN_SHIFT;
3073
3074 if (skb->encapsulation) {
3075 u32 tunnel = 0;
3076
3077 if (*tx_flags & I40E_TX_FLAGS_IPV4) {
3078 tunnel |= (*tx_flags & I40E_TX_FLAGS_TSO) ?
3079 I40E_TX_CTX_EXT_IP_IPV4 :
3080 I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM;
3081
3082 l4_proto = ip.v4->protocol;
3083 } else if (*tx_flags & I40E_TX_FLAGS_IPV6) {
3084 tunnel |= I40E_TX_CTX_EXT_IP_IPV6;
3085
3086 exthdr = ip.hdr + sizeof(*ip.v6);
3087 l4_proto = ip.v6->nexthdr;
3088 if (l4.hdr != exthdr)
3089 ipv6_skip_exthdr(skb, exthdr - skb->data,
3090 &l4_proto, &frag_off);
3091 }
3092
3093
3094 switch (l4_proto) {
3095 case IPPROTO_UDP:
3096 tunnel |= I40E_TXD_CTX_UDP_TUNNELING;
3097 *tx_flags |= I40E_TX_FLAGS_UDP_TUNNEL;
3098 break;
3099 case IPPROTO_GRE:
3100 tunnel |= I40E_TXD_CTX_GRE_TUNNELING;
3101 *tx_flags |= I40E_TX_FLAGS_UDP_TUNNEL;
3102 break;
3103 case IPPROTO_IPIP:
3104 case IPPROTO_IPV6:
3105 *tx_flags |= I40E_TX_FLAGS_UDP_TUNNEL;
3106 l4.hdr = skb_inner_network_header(skb);
3107 break;
3108 default:
3109 if (*tx_flags & I40E_TX_FLAGS_TSO)
3110 return -1;
3111
3112 skb_checksum_help(skb);
3113 return 0;
3114 }
3115
3116
3117 tunnel |= ((l4.hdr - ip.hdr) / 4) <<
3118 I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT;
3119
3120
3121 ip.hdr = skb_inner_network_header(skb);
3122
3123
3124 tunnel |= ((ip.hdr - l4.hdr) / 2) <<
3125 I40E_TXD_CTX_QW0_NATLEN_SHIFT;
3126
3127
3128 if ((*tx_flags & I40E_TX_FLAGS_TSO) &&
3129 !(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) &&
3130 (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM))
3131 tunnel |= I40E_TXD_CTX_QW0_L4T_CS_MASK;
3132
3133
3134 *cd_tunneling |= tunnel;
3135
3136
3137 l4.hdr = skb_inner_transport_header(skb);
3138 l4_proto = 0;
3139
3140
3141 *tx_flags &= ~(I40E_TX_FLAGS_IPV4 | I40E_TX_FLAGS_IPV6);
3142 if (ip.v4->version == 4)
3143 *tx_flags |= I40E_TX_FLAGS_IPV4;
3144 if (ip.v6->version == 6)
3145 *tx_flags |= I40E_TX_FLAGS_IPV6;
3146 }
3147
3148
3149 if (*tx_flags & I40E_TX_FLAGS_IPV4) {
3150 l4_proto = ip.v4->protocol;
3151
3152
3153
3154 cmd |= (*tx_flags & I40E_TX_FLAGS_TSO) ?
3155 I40E_TX_DESC_CMD_IIPT_IPV4_CSUM :
3156 I40E_TX_DESC_CMD_IIPT_IPV4;
3157 } else if (*tx_flags & I40E_TX_FLAGS_IPV6) {
3158 cmd |= I40E_TX_DESC_CMD_IIPT_IPV6;
3159
3160 exthdr = ip.hdr + sizeof(*ip.v6);
3161 l4_proto = ip.v6->nexthdr;
3162 if (l4.hdr != exthdr)
3163 ipv6_skip_exthdr(skb, exthdr - skb->data,
3164 &l4_proto, &frag_off);
3165 }
3166
3167
3168 offset |= ((l4.hdr - ip.hdr) / 4) << I40E_TX_DESC_LENGTH_IPLEN_SHIFT;
3169
3170
3171 switch (l4_proto) {
3172 case IPPROTO_TCP:
3173
3174 cmd |= I40E_TX_DESC_CMD_L4T_EOFT_TCP;
3175 offset |= l4.tcp->doff << I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
3176 break;
3177 case IPPROTO_SCTP:
3178
3179 cmd |= I40E_TX_DESC_CMD_L4T_EOFT_SCTP;
3180 offset |= (sizeof(struct sctphdr) >> 2) <<
3181 I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
3182 break;
3183 case IPPROTO_UDP:
3184
3185 cmd |= I40E_TX_DESC_CMD_L4T_EOFT_UDP;
3186 offset |= (sizeof(struct udphdr) >> 2) <<
3187 I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
3188 break;
3189 default:
3190 if (*tx_flags & I40E_TX_FLAGS_TSO)
3191 return -1;
3192 skb_checksum_help(skb);
3193 return 0;
3194 }
3195
3196 *td_cmd |= cmd;
3197 *td_offset |= offset;
3198
3199 return 1;
3200}
3201
3202
3203
3204
3205
3206
3207
3208
3209static void i40e_create_tx_ctx(struct i40e_ring *tx_ring,
3210 const u64 cd_type_cmd_tso_mss,
3211 const u32 cd_tunneling, const u32 cd_l2tag2)
3212{
3213 struct i40e_tx_context_desc *context_desc;
3214 int i = tx_ring->next_to_use;
3215
3216 if ((cd_type_cmd_tso_mss == I40E_TX_DESC_DTYPE_CONTEXT) &&
3217 !cd_tunneling && !cd_l2tag2)
3218 return;
3219
3220
3221 context_desc = I40E_TX_CTXTDESC(tx_ring, i);
3222
3223 i++;
3224 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
3225
3226
3227 context_desc->tunneling_params = cpu_to_le32(cd_tunneling);
3228 context_desc->l2tag2 = cpu_to_le16(cd_l2tag2);
3229 context_desc->rsvd = cpu_to_le16(0);
3230 context_desc->type_cmd_tso_mss = cpu_to_le64(cd_type_cmd_tso_mss);
3231}
3232
3233
3234
3235
3236
3237
3238
3239
3240int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
3241{
3242 netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
3243
3244 smp_mb();
3245
3246
3247 if (likely(I40E_DESC_UNUSED(tx_ring) < size))
3248 return -EBUSY;
3249
3250
3251 netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index);
3252 ++tx_ring->tx_stats.restart_queue;
3253 return 0;
3254}
3255
3256
3257
3258
3259
3260
3261
3262
3263
3264
3265
3266
3267
3268
3269bool __i40e_chk_linearize(struct sk_buff *skb)
3270{
3271 const skb_frag_t *frag, *stale;
3272 int nr_frags, sum;
3273
3274
3275 nr_frags = skb_shinfo(skb)->nr_frags;
3276 if (nr_frags < (I40E_MAX_BUFFER_TXD - 1))
3277 return false;
3278
3279
3280
3281
3282 nr_frags -= I40E_MAX_BUFFER_TXD - 2;
3283 frag = &skb_shinfo(skb)->frags[0];
3284
3285
3286
3287
3288
3289
3290
3291 sum = 1 - skb_shinfo(skb)->gso_size;
3292
3293
3294 sum += skb_frag_size(frag++);
3295 sum += skb_frag_size(frag++);
3296 sum += skb_frag_size(frag++);
3297 sum += skb_frag_size(frag++);
3298 sum += skb_frag_size(frag++);
3299
3300
3301
3302
3303 for (stale = &skb_shinfo(skb)->frags[0];; stale++) {
3304 int stale_size = skb_frag_size(stale);
3305
3306 sum += skb_frag_size(frag++);
3307
3308
3309
3310
3311
3312
3313
3314 if (stale_size > I40E_MAX_DATA_PER_TXD) {
3315 int align_pad = -(skb_frag_off(stale)) &
3316 (I40E_MAX_READ_REQ_SIZE - 1);
3317
3318 sum -= align_pad;
3319 stale_size -= align_pad;
3320
3321 do {
3322 sum -= I40E_MAX_DATA_PER_TXD_ALIGNED;
3323 stale_size -= I40E_MAX_DATA_PER_TXD_ALIGNED;
3324 } while (stale_size > I40E_MAX_DATA_PER_TXD);
3325 }
3326
3327
3328 if (sum < 0)
3329 return true;
3330
3331 if (!nr_frags--)
3332 break;
3333
3334 sum -= stale_size;
3335 }
3336
3337 return false;
3338}
3339
3340
3341
3342
3343
3344
3345
3346
3347
3348
3349
3350
3351
3352static inline int i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
3353 struct i40e_tx_buffer *first, u32 tx_flags,
3354 const u8 hdr_len, u32 td_cmd, u32 td_offset)
3355{
3356 unsigned int data_len = skb->data_len;
3357 unsigned int size = skb_headlen(skb);
3358 skb_frag_t *frag;
3359 struct i40e_tx_buffer *tx_bi;
3360 struct i40e_tx_desc *tx_desc;
3361 u16 i = tx_ring->next_to_use;
3362 u32 td_tag = 0;
3363 dma_addr_t dma;
3364 u16 desc_count = 1;
3365
3366 if (tx_flags & I40E_TX_FLAGS_HW_VLAN) {
3367 td_cmd |= I40E_TX_DESC_CMD_IL2TAG1;
3368 td_tag = (tx_flags & I40E_TX_FLAGS_VLAN_MASK) >>
3369 I40E_TX_FLAGS_VLAN_SHIFT;
3370 }
3371
3372 first->tx_flags = tx_flags;
3373
3374 dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
3375
3376 tx_desc = I40E_TX_DESC(tx_ring, i);
3377 tx_bi = first;
3378
3379 for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
3380 unsigned int max_data = I40E_MAX_DATA_PER_TXD_ALIGNED;
3381
3382 if (dma_mapping_error(tx_ring->dev, dma))
3383 goto dma_error;
3384
3385
3386 dma_unmap_len_set(tx_bi, len, size);
3387 dma_unmap_addr_set(tx_bi, dma, dma);
3388
3389
3390 max_data += -dma & (I40E_MAX_READ_REQ_SIZE - 1);
3391 tx_desc->buffer_addr = cpu_to_le64(dma);
3392
3393 while (unlikely(size > I40E_MAX_DATA_PER_TXD)) {
3394 tx_desc->cmd_type_offset_bsz =
3395 build_ctob(td_cmd, td_offset,
3396 max_data, td_tag);
3397
3398 tx_desc++;
3399 i++;
3400 desc_count++;
3401
3402 if (i == tx_ring->count) {
3403 tx_desc = I40E_TX_DESC(tx_ring, 0);
3404 i = 0;
3405 }
3406
3407 dma += max_data;
3408 size -= max_data;
3409
3410 max_data = I40E_MAX_DATA_PER_TXD_ALIGNED;
3411 tx_desc->buffer_addr = cpu_to_le64(dma);
3412 }
3413
3414 if (likely(!data_len))
3415 break;
3416
3417 tx_desc->cmd_type_offset_bsz = build_ctob(td_cmd, td_offset,
3418 size, td_tag);
3419
3420 tx_desc++;
3421 i++;
3422 desc_count++;
3423
3424 if (i == tx_ring->count) {
3425 tx_desc = I40E_TX_DESC(tx_ring, 0);
3426 i = 0;
3427 }
3428
3429 size = skb_frag_size(frag);
3430 data_len -= size;
3431
3432 dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size,
3433 DMA_TO_DEVICE);
3434
3435 tx_bi = &tx_ring->tx_bi[i];
3436 }
3437
3438 netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount);
3439
3440 i++;
3441 if (i == tx_ring->count)
3442 i = 0;
3443
3444 tx_ring->next_to_use = i;
3445
3446 i40e_maybe_stop_tx(tx_ring, DESC_NEEDED);
3447
3448
3449 td_cmd |= I40E_TX_DESC_CMD_EOP;
3450
3451
3452
3453
3454 desc_count |= ++tx_ring->packet_stride;
3455
3456 if (desc_count >= WB_STRIDE) {
3457
3458 td_cmd |= I40E_TX_DESC_CMD_RS;
3459 tx_ring->packet_stride = 0;
3460 }
3461
3462 tx_desc->cmd_type_offset_bsz =
3463 build_ctob(td_cmd, td_offset, size, td_tag);
3464
3465 skb_tx_timestamp(skb);
3466
3467
3468
3469
3470
3471
3472
3473 wmb();
3474
3475
3476 first->next_to_watch = tx_desc;
3477
3478
3479 if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) {
3480 writel(i, tx_ring->tail);
3481 }
3482
3483 return 0;
3484
3485dma_error:
3486 dev_info(tx_ring->dev, "TX DMA map failed\n");
3487
3488
3489 for (;;) {
3490 tx_bi = &tx_ring->tx_bi[i];
3491 i40e_unmap_and_free_tx_resource(tx_ring, tx_bi);
3492 if (tx_bi == first)
3493 break;
3494 if (i == 0)
3495 i = tx_ring->count;
3496 i--;
3497 }
3498
3499 tx_ring->next_to_use = i;
3500
3501 return -1;
3502}
3503
3504
3505
3506
3507
3508
3509static int i40e_xmit_xdp_ring(struct xdp_frame *xdpf,
3510 struct i40e_ring *xdp_ring)
3511{
3512 u16 i = xdp_ring->next_to_use;
3513 struct i40e_tx_buffer *tx_bi;
3514 struct i40e_tx_desc *tx_desc;
3515 void *data = xdpf->data;
3516 u32 size = xdpf->len;
3517 dma_addr_t dma;
3518
3519 if (!unlikely(I40E_DESC_UNUSED(xdp_ring))) {
3520 xdp_ring->tx_stats.tx_busy++;
3521 return I40E_XDP_CONSUMED;
3522 }
3523 dma = dma_map_single(xdp_ring->dev, data, size, DMA_TO_DEVICE);
3524 if (dma_mapping_error(xdp_ring->dev, dma))
3525 return I40E_XDP_CONSUMED;
3526
3527 tx_bi = &xdp_ring->tx_bi[i];
3528 tx_bi->bytecount = size;
3529 tx_bi->gso_segs = 1;
3530 tx_bi->xdpf = xdpf;
3531
3532
3533 dma_unmap_len_set(tx_bi, len, size);
3534 dma_unmap_addr_set(tx_bi, dma, dma);
3535
3536 tx_desc = I40E_TX_DESC(xdp_ring, i);
3537 tx_desc->buffer_addr = cpu_to_le64(dma);
3538 tx_desc->cmd_type_offset_bsz = build_ctob(I40E_TX_DESC_CMD_ICRC
3539 | I40E_TXD_CMD,
3540 0, size, 0);
3541
3542
3543
3544
3545 smp_wmb();
3546
3547 xdp_ring->xdp_tx_active++;
3548 i++;
3549 if (i == xdp_ring->count)
3550 i = 0;
3551
3552 tx_bi->next_to_watch = tx_desc;
3553 xdp_ring->next_to_use = i;
3554
3555 return I40E_XDP_TX;
3556}
3557
3558
3559
3560
3561
3562
3563
3564
3565static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
3566 struct i40e_ring *tx_ring)
3567{
3568 u64 cd_type_cmd_tso_mss = I40E_TX_DESC_DTYPE_CONTEXT;
3569 u32 cd_tunneling = 0, cd_l2tag2 = 0;
3570 struct i40e_tx_buffer *first;
3571 u32 td_offset = 0;
3572 u32 tx_flags = 0;
3573 __be16 protocol;
3574 u32 td_cmd = 0;
3575 u8 hdr_len = 0;
3576 int tso, count;
3577 int tsyn;
3578
3579
3580 prefetch(skb->data);
3581
3582 i40e_trace(xmit_frame_ring, skb, tx_ring);
3583
3584 count = i40e_xmit_descriptor_count(skb);
3585 if (i40e_chk_linearize(skb, count)) {
3586 if (__skb_linearize(skb)) {
3587 dev_kfree_skb_any(skb);
3588 return NETDEV_TX_OK;
3589 }
3590 count = i40e_txd_use_count(skb->len);
3591 tx_ring->tx_stats.tx_linearize++;
3592 }
3593
3594
3595
3596
3597
3598
3599
3600 if (i40e_maybe_stop_tx(tx_ring, count + 4 + 1)) {
3601 tx_ring->tx_stats.tx_busy++;
3602 return NETDEV_TX_BUSY;
3603 }
3604
3605
3606 first = &tx_ring->tx_bi[tx_ring->next_to_use];
3607 first->skb = skb;
3608 first->bytecount = skb->len;
3609 first->gso_segs = 1;
3610
3611
3612 if (i40e_tx_prepare_vlan_flags(skb, tx_ring, &tx_flags))
3613 goto out_drop;
3614
3615
3616 protocol = vlan_get_protocol(skb);
3617
3618
3619 if (protocol == htons(ETH_P_IP))
3620 tx_flags |= I40E_TX_FLAGS_IPV4;
3621 else if (protocol == htons(ETH_P_IPV6))
3622 tx_flags |= I40E_TX_FLAGS_IPV6;
3623
3624 tso = i40e_tso(first, &hdr_len, &cd_type_cmd_tso_mss);
3625
3626 if (tso < 0)
3627 goto out_drop;
3628 else if (tso)
3629 tx_flags |= I40E_TX_FLAGS_TSO;
3630
3631
3632 tso = i40e_tx_enable_csum(skb, &tx_flags, &td_cmd, &td_offset,
3633 tx_ring, &cd_tunneling);
3634 if (tso < 0)
3635 goto out_drop;
3636
3637 tsyn = i40e_tsyn(tx_ring, skb, tx_flags, &cd_type_cmd_tso_mss);
3638
3639 if (tsyn)
3640 tx_flags |= I40E_TX_FLAGS_TSYN;
3641
3642
3643 td_cmd |= I40E_TX_DESC_CMD_ICRC;
3644
3645 i40e_create_tx_ctx(tx_ring, cd_type_cmd_tso_mss,
3646 cd_tunneling, cd_l2tag2);
3647
3648
3649
3650
3651
3652 i40e_atr(tx_ring, skb, tx_flags);
3653
3654 if (i40e_tx_map(tx_ring, skb, first, tx_flags, hdr_len,
3655 td_cmd, td_offset))
3656 goto cleanup_tx_tstamp;
3657
3658 return NETDEV_TX_OK;
3659
3660out_drop:
3661 i40e_trace(xmit_frame_ring_drop, first->skb, tx_ring);
3662 dev_kfree_skb_any(first->skb);
3663 first->skb = NULL;
3664cleanup_tx_tstamp:
3665 if (unlikely(tx_flags & I40E_TX_FLAGS_TSYN)) {
3666 struct i40e_pf *pf = i40e_netdev_to_pf(tx_ring->netdev);
3667
3668 dev_kfree_skb_any(pf->ptp_tx_skb);
3669 pf->ptp_tx_skb = NULL;
3670 clear_bit_unlock(__I40E_PTP_TX_IN_PROGRESS, pf->state);
3671 }
3672
3673 return NETDEV_TX_OK;
3674}
3675
3676
3677
3678
3679
3680
3681
3682
3683netdev_tx_t i40e_lan_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
3684{
3685 struct i40e_netdev_priv *np = netdev_priv(netdev);
3686 struct i40e_vsi *vsi = np->vsi;
3687 struct i40e_ring *tx_ring = vsi->tx_rings[skb->queue_mapping];
3688
3689
3690
3691
3692 if (skb_put_padto(skb, I40E_MIN_TX_LEN))
3693 return NETDEV_TX_OK;
3694
3695 return i40e_xmit_frame_ring(skb, tx_ring);
3696}
3697
3698
3699
3700
3701
3702
3703
3704
3705
3706
3707
3708
3709int i40e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
3710 u32 flags)
3711{
3712 struct i40e_netdev_priv *np = netdev_priv(dev);
3713 unsigned int queue_index = smp_processor_id();
3714 struct i40e_vsi *vsi = np->vsi;
3715 struct i40e_pf *pf = vsi->back;
3716 struct i40e_ring *xdp_ring;
3717 int drops = 0;
3718 int i;
3719
3720 if (test_bit(__I40E_VSI_DOWN, vsi->state))
3721 return -ENETDOWN;
3722
3723 if (!i40e_enabled_xdp_vsi(vsi) || queue_index >= vsi->num_queue_pairs ||
3724 test_bit(__I40E_CONFIG_BUSY, pf->state))
3725 return -ENXIO;
3726
3727 if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
3728 return -EINVAL;
3729
3730 xdp_ring = vsi->xdp_rings[queue_index];
3731
3732 for (i = 0; i < n; i++) {
3733 struct xdp_frame *xdpf = frames[i];
3734 int err;
3735
3736 err = i40e_xmit_xdp_ring(xdpf, xdp_ring);
3737 if (err != I40E_XDP_TX) {
3738 xdp_return_frame_rx_napi(xdpf);
3739 drops++;
3740 }
3741 }
3742
3743 if (unlikely(flags & XDP_XMIT_FLUSH))
3744 i40e_xdp_ring_update_tail(xdp_ring);
3745
3746 return n - drops;
3747}
3748