1
2
3
4#include <linux/prefetch.h>
5#include <linux/bpf_trace.h>
6#include <net/xdp.h>
7#include "i40e.h"
8#include "i40e_trace.h"
9#include "i40e_prototype.h"
10#include "i40e_txrx_common.h"
11#include "i40e_xsk.h"
12
13#define I40E_TXD_CMD (I40E_TX_DESC_CMD_EOP | I40E_TX_DESC_CMD_RS)
14
15
16
17
18
19
20
21static void i40e_fdir(struct i40e_ring *tx_ring,
22 struct i40e_fdir_filter *fdata, bool add)
23{
24 struct i40e_filter_program_desc *fdir_desc;
25 struct i40e_pf *pf = tx_ring->vsi->back;
26 u32 flex_ptype, dtype_cmd;
27 u16 i;
28
29
30 i = tx_ring->next_to_use;
31 fdir_desc = I40E_TX_FDIRDESC(tx_ring, i);
32
33 i++;
34 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
35
36 flex_ptype = I40E_TXD_FLTR_QW0_QINDEX_MASK &
37 (fdata->q_index << I40E_TXD_FLTR_QW0_QINDEX_SHIFT);
38
39 flex_ptype |= I40E_TXD_FLTR_QW0_FLEXOFF_MASK &
40 (fdata->flex_off << I40E_TXD_FLTR_QW0_FLEXOFF_SHIFT);
41
42 flex_ptype |= I40E_TXD_FLTR_QW0_PCTYPE_MASK &
43 (fdata->pctype << I40E_TXD_FLTR_QW0_PCTYPE_SHIFT);
44
45 flex_ptype |= I40E_TXD_FLTR_QW0_PCTYPE_MASK &
46 (fdata->flex_offset << I40E_TXD_FLTR_QW0_FLEXOFF_SHIFT);
47
48
49 flex_ptype |= I40E_TXD_FLTR_QW0_DEST_VSI_MASK &
50 ((u32)(fdata->dest_vsi ? : pf->vsi[pf->lan_vsi]->id) <<
51 I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT);
52
53 dtype_cmd = I40E_TX_DESC_DTYPE_FILTER_PROG;
54
55 dtype_cmd |= add ?
56 I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE <<
57 I40E_TXD_FLTR_QW1_PCMD_SHIFT :
58 I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE <<
59 I40E_TXD_FLTR_QW1_PCMD_SHIFT;
60
61 dtype_cmd |= I40E_TXD_FLTR_QW1_DEST_MASK &
62 (fdata->dest_ctl << I40E_TXD_FLTR_QW1_DEST_SHIFT);
63
64 dtype_cmd |= I40E_TXD_FLTR_QW1_FD_STATUS_MASK &
65 (fdata->fd_status << I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT);
66
67 if (fdata->cnt_index) {
68 dtype_cmd |= I40E_TXD_FLTR_QW1_CNT_ENA_MASK;
69 dtype_cmd |= I40E_TXD_FLTR_QW1_CNTINDEX_MASK &
70 ((u32)fdata->cnt_index <<
71 I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT);
72 }
73
74 fdir_desc->qindex_flex_ptype_vsi = cpu_to_le32(flex_ptype);
75 fdir_desc->rsvd = cpu_to_le32(0);
76 fdir_desc->dtype_cmd_cntindex = cpu_to_le32(dtype_cmd);
77 fdir_desc->fd_id = cpu_to_le32(fdata->fd_id);
78}
79
80#define I40E_FD_CLEAN_DELAY 10
81
82
83
84
85
86
87
88static int i40e_program_fdir_filter(struct i40e_fdir_filter *fdir_data,
89 u8 *raw_packet, struct i40e_pf *pf,
90 bool add)
91{
92 struct i40e_tx_buffer *tx_buf, *first;
93 struct i40e_tx_desc *tx_desc;
94 struct i40e_ring *tx_ring;
95 struct i40e_vsi *vsi;
96 struct device *dev;
97 dma_addr_t dma;
98 u32 td_cmd = 0;
99 u16 i;
100
101
102 vsi = i40e_find_vsi_by_type(pf, I40E_VSI_FDIR);
103 if (!vsi)
104 return -ENOENT;
105
106 tx_ring = vsi->tx_rings[0];
107 dev = tx_ring->dev;
108
109
110 for (i = I40E_FD_CLEAN_DELAY; I40E_DESC_UNUSED(tx_ring) < 2; i--) {
111 if (!i)
112 return -EAGAIN;
113 msleep_interruptible(1);
114 }
115
116 dma = dma_map_single(dev, raw_packet,
117 I40E_FDIR_MAX_RAW_PACKET_SIZE, DMA_TO_DEVICE);
118 if (dma_mapping_error(dev, dma))
119 goto dma_fail;
120
121
122 i = tx_ring->next_to_use;
123 first = &tx_ring->tx_bi[i];
124 i40e_fdir(tx_ring, fdir_data, add);
125
126
127 i = tx_ring->next_to_use;
128 tx_desc = I40E_TX_DESC(tx_ring, i);
129 tx_buf = &tx_ring->tx_bi[i];
130
131 tx_ring->next_to_use = ((i + 1) < tx_ring->count) ? i + 1 : 0;
132
133 memset(tx_buf, 0, sizeof(struct i40e_tx_buffer));
134
135
136 dma_unmap_len_set(tx_buf, len, I40E_FDIR_MAX_RAW_PACKET_SIZE);
137 dma_unmap_addr_set(tx_buf, dma, dma);
138
139 tx_desc->buffer_addr = cpu_to_le64(dma);
140 td_cmd = I40E_TXD_CMD | I40E_TX_DESC_CMD_DUMMY;
141
142 tx_buf->tx_flags = I40E_TX_FLAGS_FD_SB;
143 tx_buf->raw_buf = (void *)raw_packet;
144
145 tx_desc->cmd_type_offset_bsz =
146 build_ctob(td_cmd, 0, I40E_FDIR_MAX_RAW_PACKET_SIZE, 0);
147
148
149
150
151 wmb();
152
153
154 first->next_to_watch = tx_desc;
155
156 writel(tx_ring->next_to_use, tx_ring->tail);
157 return 0;
158
159dma_fail:
160 return -1;
161}
162
163#define IP_HEADER_OFFSET 14
164#define I40E_UDPIP_DUMMY_PACKET_LEN 42
165
166
167
168
169
170
171
172
173static int i40e_add_del_fdir_udpv4(struct i40e_vsi *vsi,
174 struct i40e_fdir_filter *fd_data,
175 bool add)
176{
177 struct i40e_pf *pf = vsi->back;
178 struct udphdr *udp;
179 struct iphdr *ip;
180 u8 *raw_packet;
181 int ret;
182 static char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
183 0x45, 0, 0, 0x1c, 0, 0, 0x40, 0, 0x40, 0x11, 0, 0, 0, 0, 0, 0,
184 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
185
186 raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE, GFP_KERNEL);
187 if (!raw_packet)
188 return -ENOMEM;
189 memcpy(raw_packet, packet, I40E_UDPIP_DUMMY_PACKET_LEN);
190
191 ip = (struct iphdr *)(raw_packet + IP_HEADER_OFFSET);
192 udp = (struct udphdr *)(raw_packet + IP_HEADER_OFFSET
193 + sizeof(struct iphdr));
194
195 ip->daddr = fd_data->dst_ip;
196 udp->dest = fd_data->dst_port;
197 ip->saddr = fd_data->src_ip;
198 udp->source = fd_data->src_port;
199
200 if (fd_data->flex_filter) {
201 u8 *payload = raw_packet + I40E_UDPIP_DUMMY_PACKET_LEN;
202 __be16 pattern = fd_data->flex_word;
203 u16 off = fd_data->flex_offset;
204
205 *((__force __be16 *)(payload + off)) = pattern;
206 }
207
208 fd_data->pctype = I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
209 ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add);
210 if (ret) {
211 dev_info(&pf->pdev->dev,
212 "PCTYPE:%d, Filter command send failed for fd_id:%d (ret = %d)\n",
213 fd_data->pctype, fd_data->fd_id, ret);
214
215 kfree(raw_packet);
216 return -EOPNOTSUPP;
217 } else if (I40E_DEBUG_FD & pf->hw.debug_mask) {
218 if (add)
219 dev_info(&pf->pdev->dev,
220 "Filter OK for PCTYPE %d loc = %d\n",
221 fd_data->pctype, fd_data->fd_id);
222 else
223 dev_info(&pf->pdev->dev,
224 "Filter deleted for PCTYPE %d loc = %d\n",
225 fd_data->pctype, fd_data->fd_id);
226 }
227
228 if (add)
229 pf->fd_udp4_filter_cnt++;
230 else
231 pf->fd_udp4_filter_cnt--;
232
233 return 0;
234}
235
236#define I40E_TCPIP_DUMMY_PACKET_LEN 54
237
238
239
240
241
242
243
244
245static int i40e_add_del_fdir_tcpv4(struct i40e_vsi *vsi,
246 struct i40e_fdir_filter *fd_data,
247 bool add)
248{
249 struct i40e_pf *pf = vsi->back;
250 struct tcphdr *tcp;
251 struct iphdr *ip;
252 u8 *raw_packet;
253 int ret;
254
255 static char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
256 0x45, 0, 0, 0x28, 0, 0, 0x40, 0, 0x40, 0x6, 0, 0, 0, 0, 0, 0,
257 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x80, 0x11,
258 0x0, 0x72, 0, 0, 0, 0};
259
260 raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE, GFP_KERNEL);
261 if (!raw_packet)
262 return -ENOMEM;
263 memcpy(raw_packet, packet, I40E_TCPIP_DUMMY_PACKET_LEN);
264
265 ip = (struct iphdr *)(raw_packet + IP_HEADER_OFFSET);
266 tcp = (struct tcphdr *)(raw_packet + IP_HEADER_OFFSET
267 + sizeof(struct iphdr));
268
269 ip->daddr = fd_data->dst_ip;
270 tcp->dest = fd_data->dst_port;
271 ip->saddr = fd_data->src_ip;
272 tcp->source = fd_data->src_port;
273
274 if (fd_data->flex_filter) {
275 u8 *payload = raw_packet + I40E_TCPIP_DUMMY_PACKET_LEN;
276 __be16 pattern = fd_data->flex_word;
277 u16 off = fd_data->flex_offset;
278
279 *((__force __be16 *)(payload + off)) = pattern;
280 }
281
282 fd_data->pctype = I40E_FILTER_PCTYPE_NONF_IPV4_TCP;
283 ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add);
284 if (ret) {
285 dev_info(&pf->pdev->dev,
286 "PCTYPE:%d, Filter command send failed for fd_id:%d (ret = %d)\n",
287 fd_data->pctype, fd_data->fd_id, ret);
288
289 kfree(raw_packet);
290 return -EOPNOTSUPP;
291 } else if (I40E_DEBUG_FD & pf->hw.debug_mask) {
292 if (add)
293 dev_info(&pf->pdev->dev, "Filter OK for PCTYPE %d loc = %d)\n",
294 fd_data->pctype, fd_data->fd_id);
295 else
296 dev_info(&pf->pdev->dev,
297 "Filter deleted for PCTYPE %d loc = %d\n",
298 fd_data->pctype, fd_data->fd_id);
299 }
300
301 if (add) {
302 pf->fd_tcp4_filter_cnt++;
303 if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
304 I40E_DEBUG_FD & pf->hw.debug_mask)
305 dev_info(&pf->pdev->dev, "Forcing ATR off, sideband rules for TCP/IPv4 flow being applied\n");
306 set_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state);
307 } else {
308 pf->fd_tcp4_filter_cnt--;
309 }
310
311 return 0;
312}
313
314#define I40E_SCTPIP_DUMMY_PACKET_LEN 46
315
316
317
318
319
320
321
322
323
324static int i40e_add_del_fdir_sctpv4(struct i40e_vsi *vsi,
325 struct i40e_fdir_filter *fd_data,
326 bool add)
327{
328 struct i40e_pf *pf = vsi->back;
329 struct sctphdr *sctp;
330 struct iphdr *ip;
331 u8 *raw_packet;
332 int ret;
333
334 static char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
335 0x45, 0, 0, 0x20, 0, 0, 0x40, 0, 0x40, 0x84, 0, 0, 0, 0, 0, 0,
336 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
337
338 raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE, GFP_KERNEL);
339 if (!raw_packet)
340 return -ENOMEM;
341 memcpy(raw_packet, packet, I40E_SCTPIP_DUMMY_PACKET_LEN);
342
343 ip = (struct iphdr *)(raw_packet + IP_HEADER_OFFSET);
344 sctp = (struct sctphdr *)(raw_packet + IP_HEADER_OFFSET
345 + sizeof(struct iphdr));
346
347 ip->daddr = fd_data->dst_ip;
348 sctp->dest = fd_data->dst_port;
349 ip->saddr = fd_data->src_ip;
350 sctp->source = fd_data->src_port;
351
352 if (fd_data->flex_filter) {
353 u8 *payload = raw_packet + I40E_SCTPIP_DUMMY_PACKET_LEN;
354 __be16 pattern = fd_data->flex_word;
355 u16 off = fd_data->flex_offset;
356
357 *((__force __be16 *)(payload + off)) = pattern;
358 }
359
360 fd_data->pctype = I40E_FILTER_PCTYPE_NONF_IPV4_SCTP;
361 ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add);
362 if (ret) {
363 dev_info(&pf->pdev->dev,
364 "PCTYPE:%d, Filter command send failed for fd_id:%d (ret = %d)\n",
365 fd_data->pctype, fd_data->fd_id, ret);
366
367 kfree(raw_packet);
368 return -EOPNOTSUPP;
369 } else if (I40E_DEBUG_FD & pf->hw.debug_mask) {
370 if (add)
371 dev_info(&pf->pdev->dev,
372 "Filter OK for PCTYPE %d loc = %d\n",
373 fd_data->pctype, fd_data->fd_id);
374 else
375 dev_info(&pf->pdev->dev,
376 "Filter deleted for PCTYPE %d loc = %d\n",
377 fd_data->pctype, fd_data->fd_id);
378 }
379
380 if (add)
381 pf->fd_sctp4_filter_cnt++;
382 else
383 pf->fd_sctp4_filter_cnt--;
384
385 return 0;
386}
387
388#define I40E_IP_DUMMY_PACKET_LEN 34
389
390
391
392
393
394
395
396
397
398static int i40e_add_del_fdir_ipv4(struct i40e_vsi *vsi,
399 struct i40e_fdir_filter *fd_data,
400 bool add)
401{
402 struct i40e_pf *pf = vsi->back;
403 struct iphdr *ip;
404 u8 *raw_packet;
405 int ret;
406 int i;
407 static char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
408 0x45, 0, 0, 0x14, 0, 0, 0x40, 0, 0x40, 0x10, 0, 0, 0, 0, 0, 0,
409 0, 0, 0, 0};
410
411 for (i = I40E_FILTER_PCTYPE_NONF_IPV4_OTHER;
412 i <= I40E_FILTER_PCTYPE_FRAG_IPV4; i++) {
413 raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE, GFP_KERNEL);
414 if (!raw_packet)
415 return -ENOMEM;
416 memcpy(raw_packet, packet, I40E_IP_DUMMY_PACKET_LEN);
417 ip = (struct iphdr *)(raw_packet + IP_HEADER_OFFSET);
418
419 ip->saddr = fd_data->src_ip;
420 ip->daddr = fd_data->dst_ip;
421 ip->protocol = 0;
422
423 if (fd_data->flex_filter) {
424 u8 *payload = raw_packet + I40E_IP_DUMMY_PACKET_LEN;
425 __be16 pattern = fd_data->flex_word;
426 u16 off = fd_data->flex_offset;
427
428 *((__force __be16 *)(payload + off)) = pattern;
429 }
430
431 fd_data->pctype = i;
432 ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add);
433 if (ret) {
434 dev_info(&pf->pdev->dev,
435 "PCTYPE:%d, Filter command send failed for fd_id:%d (ret = %d)\n",
436 fd_data->pctype, fd_data->fd_id, ret);
437
438
439
440 kfree(raw_packet);
441 return -EOPNOTSUPP;
442 } else if (I40E_DEBUG_FD & pf->hw.debug_mask) {
443 if (add)
444 dev_info(&pf->pdev->dev,
445 "Filter OK for PCTYPE %d loc = %d\n",
446 fd_data->pctype, fd_data->fd_id);
447 else
448 dev_info(&pf->pdev->dev,
449 "Filter deleted for PCTYPE %d loc = %d\n",
450 fd_data->pctype, fd_data->fd_id);
451 }
452 }
453
454 if (add)
455 pf->fd_ip4_filter_cnt++;
456 else
457 pf->fd_ip4_filter_cnt--;
458
459 return 0;
460}
461
462
463
464
465
466
467
468
469int i40e_add_del_fdir(struct i40e_vsi *vsi,
470 struct i40e_fdir_filter *input, bool add)
471{
472 struct i40e_pf *pf = vsi->back;
473 int ret;
474
475 switch (input->flow_type & ~FLOW_EXT) {
476 case TCP_V4_FLOW:
477 ret = i40e_add_del_fdir_tcpv4(vsi, input, add);
478 break;
479 case UDP_V4_FLOW:
480 ret = i40e_add_del_fdir_udpv4(vsi, input, add);
481 break;
482 case SCTP_V4_FLOW:
483 ret = i40e_add_del_fdir_sctpv4(vsi, input, add);
484 break;
485 case IP_USER_FLOW:
486 switch (input->ip4_proto) {
487 case IPPROTO_TCP:
488 ret = i40e_add_del_fdir_tcpv4(vsi, input, add);
489 break;
490 case IPPROTO_UDP:
491 ret = i40e_add_del_fdir_udpv4(vsi, input, add);
492 break;
493 case IPPROTO_SCTP:
494 ret = i40e_add_del_fdir_sctpv4(vsi, input, add);
495 break;
496 case IPPROTO_IP:
497 ret = i40e_add_del_fdir_ipv4(vsi, input, add);
498 break;
499 default:
500
501 dev_info(&pf->pdev->dev, "Unsupported IPv4 protocol 0x%02x\n",
502 input->ip4_proto);
503 return -EINVAL;
504 }
505 break;
506 default:
507 dev_info(&pf->pdev->dev, "Unsupported flow type 0x%02x\n",
508 input->flow_type);
509 return -EINVAL;
510 }
511
512
513
514
515
516
517
518 return ret;
519}
520
521
522
523
524
525
526
527
528
529
530void i40e_fd_handle_status(struct i40e_ring *rx_ring,
531 union i40e_rx_desc *rx_desc, u8 prog_id)
532{
533 struct i40e_pf *pf = rx_ring->vsi->back;
534 struct pci_dev *pdev = pf->pdev;
535 u32 fcnt_prog, fcnt_avail;
536 u32 error;
537 u64 qw;
538
539 qw = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
540 error = (qw & I40E_RX_PROG_STATUS_DESC_QW1_ERROR_MASK) >>
541 I40E_RX_PROG_STATUS_DESC_QW1_ERROR_SHIFT;
542
543 if (error == BIT(I40E_RX_PROG_STATUS_DESC_FD_TBL_FULL_SHIFT)) {
544 pf->fd_inv = le32_to_cpu(rx_desc->wb.qword0.hi_dword.fd_id);
545 if ((rx_desc->wb.qword0.hi_dword.fd_id != 0) ||
546 (I40E_DEBUG_FD & pf->hw.debug_mask))
547 dev_warn(&pdev->dev, "ntuple filter loc = %d, could not be added\n",
548 pf->fd_inv);
549
550
551
552
553
554
555
556 if (test_bit(__I40E_FD_FLUSH_REQUESTED, pf->state))
557 return;
558
559 pf->fd_add_err++;
560
561 pf->fd_atr_cnt = i40e_get_current_atr_cnt(pf);
562
563 if ((rx_desc->wb.qword0.hi_dword.fd_id == 0) &&
564 test_bit(__I40E_FD_SB_AUTO_DISABLED, pf->state)) {
565
566
567
568
569
570
571 set_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state);
572 set_bit(__I40E_FD_FLUSH_REQUESTED, pf->state);
573 }
574
575
576 fcnt_prog = i40e_get_global_fd_count(pf);
577 fcnt_avail = pf->fdir_pf_filter_count;
578
579
580
581
582 if (fcnt_prog >= (fcnt_avail - I40E_FDIR_BUFFER_FULL_MARGIN)) {
583 if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) &&
584 !test_and_set_bit(__I40E_FD_SB_AUTO_DISABLED,
585 pf->state))
586 if (I40E_DEBUG_FD & pf->hw.debug_mask)
587 dev_warn(&pdev->dev, "FD filter space full, new ntuple rules will not be added\n");
588 }
589 } else if (error == BIT(I40E_RX_PROG_STATUS_DESC_NO_FD_ENTRY_SHIFT)) {
590 if (I40E_DEBUG_FD & pf->hw.debug_mask)
591 dev_info(&pdev->dev, "ntuple filter fd_id = %d, could not be removed\n",
592 rx_desc->wb.qword0.hi_dword.fd_id);
593 }
594}
595
596
597
598
599
600
601static void i40e_unmap_and_free_tx_resource(struct i40e_ring *ring,
602 struct i40e_tx_buffer *tx_buffer)
603{
604 if (tx_buffer->skb) {
605 if (tx_buffer->tx_flags & I40E_TX_FLAGS_FD_SB)
606 kfree(tx_buffer->raw_buf);
607 else if (ring_is_xdp(ring))
608 xdp_return_frame(tx_buffer->xdpf);
609 else
610 dev_kfree_skb_any(tx_buffer->skb);
611 if (dma_unmap_len(tx_buffer, len))
612 dma_unmap_single(ring->dev,
613 dma_unmap_addr(tx_buffer, dma),
614 dma_unmap_len(tx_buffer, len),
615 DMA_TO_DEVICE);
616 } else if (dma_unmap_len(tx_buffer, len)) {
617 dma_unmap_page(ring->dev,
618 dma_unmap_addr(tx_buffer, dma),
619 dma_unmap_len(tx_buffer, len),
620 DMA_TO_DEVICE);
621 }
622
623 tx_buffer->next_to_watch = NULL;
624 tx_buffer->skb = NULL;
625 dma_unmap_len_set(tx_buffer, len, 0);
626
627}
628
629
630
631
632
633void i40e_clean_tx_ring(struct i40e_ring *tx_ring)
634{
635 unsigned long bi_size;
636 u16 i;
637
638 if (ring_is_xdp(tx_ring) && tx_ring->xsk_umem) {
639 i40e_xsk_clean_tx_ring(tx_ring);
640 } else {
641
642 if (!tx_ring->tx_bi)
643 return;
644
645
646 for (i = 0; i < tx_ring->count; i++)
647 i40e_unmap_and_free_tx_resource(tx_ring,
648 &tx_ring->tx_bi[i]);
649 }
650
651 bi_size = sizeof(struct i40e_tx_buffer) * tx_ring->count;
652 memset(tx_ring->tx_bi, 0, bi_size);
653
654
655 memset(tx_ring->desc, 0, tx_ring->size);
656
657 tx_ring->next_to_use = 0;
658 tx_ring->next_to_clean = 0;
659
660 if (!tx_ring->netdev)
661 return;
662
663
664 netdev_tx_reset_queue(txring_txq(tx_ring));
665}
666
667
668
669
670
671
672
673void i40e_free_tx_resources(struct i40e_ring *tx_ring)
674{
675 i40e_clean_tx_ring(tx_ring);
676 kfree(tx_ring->tx_bi);
677 tx_ring->tx_bi = NULL;
678
679 if (tx_ring->desc) {
680 dma_free_coherent(tx_ring->dev, tx_ring->size,
681 tx_ring->desc, tx_ring->dma);
682 tx_ring->desc = NULL;
683 }
684}
685
686
687
688
689
690
691
692
693
694u32 i40e_get_tx_pending(struct i40e_ring *ring, bool in_sw)
695{
696 u32 head, tail;
697
698 if (!in_sw) {
699 head = i40e_get_head(ring);
700 tail = readl(ring->tail);
701 } else {
702 head = ring->next_to_clean;
703 tail = ring->next_to_use;
704 }
705
706 if (head != tail)
707 return (head < tail) ?
708 tail - head : (tail + ring->count - head);
709
710 return 0;
711}
712
713
714
715
716
717
718
719
720void i40e_detect_recover_hung(struct i40e_vsi *vsi)
721{
722 struct i40e_ring *tx_ring = NULL;
723 struct net_device *netdev;
724 unsigned int i;
725 int packets;
726
727 if (!vsi)
728 return;
729
730 if (test_bit(__I40E_VSI_DOWN, vsi->state))
731 return;
732
733 netdev = vsi->netdev;
734 if (!netdev)
735 return;
736
737 if (!netif_carrier_ok(netdev))
738 return;
739
740 for (i = 0; i < vsi->num_queue_pairs; i++) {
741 tx_ring = vsi->tx_rings[i];
742 if (tx_ring && tx_ring->desc) {
743
744
745
746
747
748
749
750 packets = tx_ring->stats.packets & INT_MAX;
751 if (tx_ring->tx_stats.prev_pkt_ctr == packets) {
752 i40e_force_wb(vsi, tx_ring->q_vector);
753 continue;
754 }
755
756
757
758
759 smp_rmb();
760 tx_ring->tx_stats.prev_pkt_ctr =
761 i40e_get_tx_pending(tx_ring, true) ? packets : -1;
762 }
763 }
764}
765
766
767
768
769
770
771
772
773
774static bool i40e_clean_tx_irq(struct i40e_vsi *vsi,
775 struct i40e_ring *tx_ring, int napi_budget)
776{
777 u16 i = tx_ring->next_to_clean;
778 struct i40e_tx_buffer *tx_buf;
779 struct i40e_tx_desc *tx_head;
780 struct i40e_tx_desc *tx_desc;
781 unsigned int total_bytes = 0, total_packets = 0;
782 unsigned int budget = vsi->work_limit;
783
784 tx_buf = &tx_ring->tx_bi[i];
785 tx_desc = I40E_TX_DESC(tx_ring, i);
786 i -= tx_ring->count;
787
788 tx_head = I40E_TX_DESC(tx_ring, i40e_get_head(tx_ring));
789
790 do {
791 struct i40e_tx_desc *eop_desc = tx_buf->next_to_watch;
792
793
794 if (!eop_desc)
795 break;
796
797
798 smp_rmb();
799
800 i40e_trace(clean_tx_irq, tx_ring, tx_desc, tx_buf);
801
802 if (tx_head == tx_desc)
803 break;
804
805
806 tx_buf->next_to_watch = NULL;
807
808
809 total_bytes += tx_buf->bytecount;
810 total_packets += tx_buf->gso_segs;
811
812
813 if (ring_is_xdp(tx_ring))
814 xdp_return_frame(tx_buf->xdpf);
815 else
816 napi_consume_skb(tx_buf->skb, napi_budget);
817
818
819 dma_unmap_single(tx_ring->dev,
820 dma_unmap_addr(tx_buf, dma),
821 dma_unmap_len(tx_buf, len),
822 DMA_TO_DEVICE);
823
824
825 tx_buf->skb = NULL;
826 dma_unmap_len_set(tx_buf, len, 0);
827
828
829 while (tx_desc != eop_desc) {
830 i40e_trace(clean_tx_irq_unmap,
831 tx_ring, tx_desc, tx_buf);
832
833 tx_buf++;
834 tx_desc++;
835 i++;
836 if (unlikely(!i)) {
837 i -= tx_ring->count;
838 tx_buf = tx_ring->tx_bi;
839 tx_desc = I40E_TX_DESC(tx_ring, 0);
840 }
841
842
843 if (dma_unmap_len(tx_buf, len)) {
844 dma_unmap_page(tx_ring->dev,
845 dma_unmap_addr(tx_buf, dma),
846 dma_unmap_len(tx_buf, len),
847 DMA_TO_DEVICE);
848 dma_unmap_len_set(tx_buf, len, 0);
849 }
850 }
851
852
853 tx_buf++;
854 tx_desc++;
855 i++;
856 if (unlikely(!i)) {
857 i -= tx_ring->count;
858 tx_buf = tx_ring->tx_bi;
859 tx_desc = I40E_TX_DESC(tx_ring, 0);
860 }
861
862 prefetch(tx_desc);
863
864
865 budget--;
866 } while (likely(budget));
867
868 i += tx_ring->count;
869 tx_ring->next_to_clean = i;
870 i40e_update_tx_stats(tx_ring, total_packets, total_bytes);
871 i40e_arm_wb(tx_ring, vsi, budget);
872
873 if (ring_is_xdp(tx_ring))
874 return !!budget;
875
876
877 netdev_tx_completed_queue(txring_txq(tx_ring),
878 total_packets, total_bytes);
879
880#define TX_WAKE_THRESHOLD ((s16)(DESC_NEEDED * 2))
881 if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) &&
882 (I40E_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) {
883
884
885
886 smp_mb();
887 if (__netif_subqueue_stopped(tx_ring->netdev,
888 tx_ring->queue_index) &&
889 !test_bit(__I40E_VSI_DOWN, vsi->state)) {
890 netif_wake_subqueue(tx_ring->netdev,
891 tx_ring->queue_index);
892 ++tx_ring->tx_stats.restart_queue;
893 }
894 }
895
896 return !!budget;
897}
898
899
900
901
902
903
904
905static void i40e_enable_wb_on_itr(struct i40e_vsi *vsi,
906 struct i40e_q_vector *q_vector)
907{
908 u16 flags = q_vector->tx.ring[0].flags;
909 u32 val;
910
911 if (!(flags & I40E_TXR_FLAGS_WB_ON_ITR))
912 return;
913
914 if (q_vector->arm_wb_state)
915 return;
916
917 if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED) {
918 val = I40E_PFINT_DYN_CTLN_WB_ON_ITR_MASK |
919 I40E_PFINT_DYN_CTLN_ITR_INDX_MASK;
920
921 wr32(&vsi->back->hw,
922 I40E_PFINT_DYN_CTLN(q_vector->reg_idx),
923 val);
924 } else {
925 val = I40E_PFINT_DYN_CTL0_WB_ON_ITR_MASK |
926 I40E_PFINT_DYN_CTL0_ITR_INDX_MASK;
927
928 wr32(&vsi->back->hw, I40E_PFINT_DYN_CTL0, val);
929 }
930 q_vector->arm_wb_state = true;
931}
932
933
934
935
936
937
938
939void i40e_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector)
940{
941 if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED) {
942 u32 val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
943 I40E_PFINT_DYN_CTLN_ITR_INDX_MASK |
944 I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK |
945 I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_MASK;
946
947
948 wr32(&vsi->back->hw,
949 I40E_PFINT_DYN_CTLN(q_vector->reg_idx), val);
950 } else {
951 u32 val = I40E_PFINT_DYN_CTL0_INTENA_MASK |
952 I40E_PFINT_DYN_CTL0_ITR_INDX_MASK |
953 I40E_PFINT_DYN_CTL0_SWINT_TRIG_MASK |
954 I40E_PFINT_DYN_CTL0_SW_ITR_INDX_ENA_MASK;
955
956
957 wr32(&vsi->back->hw, I40E_PFINT_DYN_CTL0, val);
958 }
959}
960
961static inline bool i40e_container_is_rx(struct i40e_q_vector *q_vector,
962 struct i40e_ring_container *rc)
963{
964 return &q_vector->rx == rc;
965}
966
967static inline unsigned int i40e_itr_divisor(struct i40e_q_vector *q_vector)
968{
969 unsigned int divisor;
970
971 switch (q_vector->vsi->back->hw.phy.link_info.link_speed) {
972 case I40E_LINK_SPEED_40GB:
973 divisor = I40E_ITR_ADAPTIVE_MIN_INC * 1024;
974 break;
975 case I40E_LINK_SPEED_25GB:
976 case I40E_LINK_SPEED_20GB:
977 divisor = I40E_ITR_ADAPTIVE_MIN_INC * 512;
978 break;
979 default:
980 case I40E_LINK_SPEED_10GB:
981 divisor = I40E_ITR_ADAPTIVE_MIN_INC * 256;
982 break;
983 case I40E_LINK_SPEED_1GB:
984 case I40E_LINK_SPEED_100MB:
985 divisor = I40E_ITR_ADAPTIVE_MIN_INC * 32;
986 break;
987 }
988
989 return divisor;
990}
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005static void i40e_update_itr(struct i40e_q_vector *q_vector,
1006 struct i40e_ring_container *rc)
1007{
1008 unsigned int avg_wire_size, packets, bytes, itr;
1009 unsigned long next_update = jiffies;
1010
1011
1012
1013
1014 if (!rc->ring || !ITR_IS_DYNAMIC(rc->ring->itr_setting))
1015 return;
1016
1017
1018
1019
1020 itr = i40e_container_is_rx(q_vector, rc) ?
1021 I40E_ITR_ADAPTIVE_MIN_USECS | I40E_ITR_ADAPTIVE_LATENCY :
1022 I40E_ITR_ADAPTIVE_MAX_USECS | I40E_ITR_ADAPTIVE_LATENCY;
1023
1024
1025
1026
1027
1028
1029 if (time_after(next_update, rc->next_update))
1030 goto clear_counts;
1031
1032
1033
1034
1035
1036
1037
1038 if (q_vector->itr_countdown) {
1039 itr = rc->target_itr;
1040 goto clear_counts;
1041 }
1042
1043 packets = rc->total_packets;
1044 bytes = rc->total_bytes;
1045
1046 if (i40e_container_is_rx(q_vector, rc)) {
1047
1048
1049
1050
1051
1052 if (packets && packets < 4 && bytes < 9000 &&
1053 (q_vector->tx.target_itr & I40E_ITR_ADAPTIVE_LATENCY)) {
1054 itr = I40E_ITR_ADAPTIVE_LATENCY;
1055 goto adjust_by_size;
1056 }
1057 } else if (packets < 4) {
1058
1059
1060
1061
1062
1063 if (rc->target_itr == I40E_ITR_ADAPTIVE_MAX_USECS &&
1064 (q_vector->rx.target_itr & I40E_ITR_MASK) ==
1065 I40E_ITR_ADAPTIVE_MAX_USECS)
1066 goto clear_counts;
1067 } else if (packets > 32) {
1068
1069
1070
1071 rc->target_itr &= ~I40E_ITR_ADAPTIVE_LATENCY;
1072 }
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082 if (packets < 56) {
1083 itr = rc->target_itr + I40E_ITR_ADAPTIVE_MIN_INC;
1084 if ((itr & I40E_ITR_MASK) > I40E_ITR_ADAPTIVE_MAX_USECS) {
1085 itr &= I40E_ITR_ADAPTIVE_LATENCY;
1086 itr += I40E_ITR_ADAPTIVE_MAX_USECS;
1087 }
1088 goto clear_counts;
1089 }
1090
1091 if (packets <= 256) {
1092 itr = min(q_vector->tx.current_itr, q_vector->rx.current_itr);
1093 itr &= I40E_ITR_MASK;
1094
1095
1096
1097
1098
1099 if (packets <= 112)
1100 goto clear_counts;
1101
1102
1103
1104
1105
1106
1107 itr /= 2;
1108 itr &= I40E_ITR_MASK;
1109 if (itr < I40E_ITR_ADAPTIVE_MIN_USECS)
1110 itr = I40E_ITR_ADAPTIVE_MIN_USECS;
1111
1112 goto clear_counts;
1113 }
1114
1115
1116
1117
1118
1119
1120
1121 itr = I40E_ITR_ADAPTIVE_BULK;
1122
1123adjust_by_size:
1124
1125
1126
1127
1128
1129 avg_wire_size = bytes / packets;
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146 if (avg_wire_size <= 60) {
1147
1148 avg_wire_size = 4096;
1149 } else if (avg_wire_size <= 380) {
1150
1151 avg_wire_size *= 40;
1152 avg_wire_size += 1696;
1153 } else if (avg_wire_size <= 1084) {
1154
1155 avg_wire_size *= 15;
1156 avg_wire_size += 11452;
1157 } else if (avg_wire_size <= 1980) {
1158
1159 avg_wire_size *= 5;
1160 avg_wire_size += 22420;
1161 } else {
1162
1163 avg_wire_size = 32256;
1164 }
1165
1166
1167
1168
1169 if (itr & I40E_ITR_ADAPTIVE_LATENCY)
1170 avg_wire_size /= 2;
1171
1172
1173
1174
1175
1176
1177
1178
1179 itr += DIV_ROUND_UP(avg_wire_size, i40e_itr_divisor(q_vector)) *
1180 I40E_ITR_ADAPTIVE_MIN_INC;
1181
1182 if ((itr & I40E_ITR_MASK) > I40E_ITR_ADAPTIVE_MAX_USECS) {
1183 itr &= I40E_ITR_ADAPTIVE_LATENCY;
1184 itr += I40E_ITR_ADAPTIVE_MAX_USECS;
1185 }
1186
1187clear_counts:
1188
1189 rc->target_itr = itr;
1190
1191
1192 rc->next_update = next_update + 1;
1193
1194 rc->total_bytes = 0;
1195 rc->total_packets = 0;
1196}
1197
1198
1199
1200
1201
1202
1203
1204
1205static void i40e_reuse_rx_page(struct i40e_ring *rx_ring,
1206 struct i40e_rx_buffer *old_buff)
1207{
1208 struct i40e_rx_buffer *new_buff;
1209 u16 nta = rx_ring->next_to_alloc;
1210
1211 new_buff = &rx_ring->rx_bi[nta];
1212
1213
1214 nta++;
1215 rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
1216
1217
1218 new_buff->dma = old_buff->dma;
1219 new_buff->page = old_buff->page;
1220 new_buff->page_offset = old_buff->page_offset;
1221 new_buff->pagecnt_bias = old_buff->pagecnt_bias;
1222
1223 rx_ring->rx_stats.page_reuse_count++;
1224
1225
1226 old_buff->page = NULL;
1227}
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238static inline bool i40e_rx_is_programming_status(u64 qw)
1239{
1240
1241
1242
1243
1244
1245 return qw & I40E_RXD_QW1_LENGTH_SPH_MASK;
1246}
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260struct i40e_rx_buffer *i40e_clean_programming_status(
1261 struct i40e_ring *rx_ring,
1262 union i40e_rx_desc *rx_desc,
1263 u64 qw)
1264{
1265 struct i40e_rx_buffer *rx_buffer;
1266 u32 ntc;
1267 u8 id;
1268
1269 if (!i40e_rx_is_programming_status(qw))
1270 return NULL;
1271
1272 ntc = rx_ring->next_to_clean;
1273
1274
1275 rx_buffer = &rx_ring->rx_bi[ntc++];
1276 ntc = (ntc < rx_ring->count) ? ntc : 0;
1277 rx_ring->next_to_clean = ntc;
1278
1279 prefetch(I40E_RX_DESC(rx_ring, ntc));
1280
1281 id = (qw & I40E_RX_PROG_STATUS_DESC_QW1_PROGID_MASK) >>
1282 I40E_RX_PROG_STATUS_DESC_QW1_PROGID_SHIFT;
1283
1284 if (id == I40E_RX_PROG_STATUS_DESC_FD_FILTER_STATUS)
1285 i40e_fd_handle_status(rx_ring, rx_desc, id);
1286
1287 return rx_buffer;
1288}
1289
1290
1291
1292
1293
1294
1295
1296int i40e_setup_tx_descriptors(struct i40e_ring *tx_ring)
1297{
1298 struct device *dev = tx_ring->dev;
1299 int bi_size;
1300
1301 if (!dev)
1302 return -ENOMEM;
1303
1304
1305 WARN_ON(tx_ring->tx_bi);
1306 bi_size = sizeof(struct i40e_tx_buffer) * tx_ring->count;
1307 tx_ring->tx_bi = kzalloc(bi_size, GFP_KERNEL);
1308 if (!tx_ring->tx_bi)
1309 goto err;
1310
1311 u64_stats_init(&tx_ring->syncp);
1312
1313
1314 tx_ring->size = tx_ring->count * sizeof(struct i40e_tx_desc);
1315
1316
1317
1318 tx_ring->size += sizeof(u32);
1319 tx_ring->size = ALIGN(tx_ring->size, 4096);
1320 tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size,
1321 &tx_ring->dma, GFP_KERNEL);
1322 if (!tx_ring->desc) {
1323 dev_info(dev, "Unable to allocate memory for the Tx descriptor ring, size=%d\n",
1324 tx_ring->size);
1325 goto err;
1326 }
1327
1328 tx_ring->next_to_use = 0;
1329 tx_ring->next_to_clean = 0;
1330 tx_ring->tx_stats.prev_pkt_ctr = -1;
1331 return 0;
1332
1333err:
1334 kfree(tx_ring->tx_bi);
1335 tx_ring->tx_bi = NULL;
1336 return -ENOMEM;
1337}
1338
1339
1340
1341
1342
1343void i40e_clean_rx_ring(struct i40e_ring *rx_ring)
1344{
1345 unsigned long bi_size;
1346 u16 i;
1347
1348
1349 if (!rx_ring->rx_bi)
1350 return;
1351
1352 if (rx_ring->skb) {
1353 dev_kfree_skb(rx_ring->skb);
1354 rx_ring->skb = NULL;
1355 }
1356
1357 if (rx_ring->xsk_umem) {
1358 i40e_xsk_clean_rx_ring(rx_ring);
1359 goto skip_free;
1360 }
1361
1362
1363 for (i = 0; i < rx_ring->count; i++) {
1364 struct i40e_rx_buffer *rx_bi = &rx_ring->rx_bi[i];
1365
1366 if (!rx_bi->page)
1367 continue;
1368
1369
1370
1371
1372 dma_sync_single_range_for_cpu(rx_ring->dev,
1373 rx_bi->dma,
1374 rx_bi->page_offset,
1375 rx_ring->rx_buf_len,
1376 DMA_FROM_DEVICE);
1377
1378
1379 dma_unmap_page_attrs(rx_ring->dev, rx_bi->dma,
1380 i40e_rx_pg_size(rx_ring),
1381 DMA_FROM_DEVICE,
1382 I40E_RX_DMA_ATTR);
1383
1384 __page_frag_cache_drain(rx_bi->page, rx_bi->pagecnt_bias);
1385
1386 rx_bi->page = NULL;
1387 rx_bi->page_offset = 0;
1388 }
1389
1390skip_free:
1391 bi_size = sizeof(struct i40e_rx_buffer) * rx_ring->count;
1392 memset(rx_ring->rx_bi, 0, bi_size);
1393
1394
1395 memset(rx_ring->desc, 0, rx_ring->size);
1396
1397 rx_ring->next_to_alloc = 0;
1398 rx_ring->next_to_clean = 0;
1399 rx_ring->next_to_use = 0;
1400}
1401
1402
1403
1404
1405
1406
1407
1408void i40e_free_rx_resources(struct i40e_ring *rx_ring)
1409{
1410 i40e_clean_rx_ring(rx_ring);
1411 if (rx_ring->vsi->type == I40E_VSI_MAIN)
1412 xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
1413 rx_ring->xdp_prog = NULL;
1414 kfree(rx_ring->rx_bi);
1415 rx_ring->rx_bi = NULL;
1416
1417 if (rx_ring->desc) {
1418 dma_free_coherent(rx_ring->dev, rx_ring->size,
1419 rx_ring->desc, rx_ring->dma);
1420 rx_ring->desc = NULL;
1421 }
1422}
1423
1424
1425
1426
1427
1428
1429
1430int i40e_setup_rx_descriptors(struct i40e_ring *rx_ring)
1431{
1432 struct device *dev = rx_ring->dev;
1433 int err = -ENOMEM;
1434 int bi_size;
1435
1436
1437 WARN_ON(rx_ring->rx_bi);
1438 bi_size = sizeof(struct i40e_rx_buffer) * rx_ring->count;
1439 rx_ring->rx_bi = kzalloc(bi_size, GFP_KERNEL);
1440 if (!rx_ring->rx_bi)
1441 goto err;
1442
1443 u64_stats_init(&rx_ring->syncp);
1444
1445
1446 rx_ring->size = rx_ring->count * sizeof(union i40e_32byte_rx_desc);
1447 rx_ring->size = ALIGN(rx_ring->size, 4096);
1448 rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size,
1449 &rx_ring->dma, GFP_KERNEL);
1450
1451 if (!rx_ring->desc) {
1452 dev_info(dev, "Unable to allocate memory for the Rx descriptor ring, size=%d\n",
1453 rx_ring->size);
1454 goto err;
1455 }
1456
1457 rx_ring->next_to_alloc = 0;
1458 rx_ring->next_to_clean = 0;
1459 rx_ring->next_to_use = 0;
1460
1461
1462 if (rx_ring->vsi->type == I40E_VSI_MAIN) {
1463 err = xdp_rxq_info_reg(&rx_ring->xdp_rxq, rx_ring->netdev,
1464 rx_ring->queue_index);
1465 if (err < 0)
1466 goto err;
1467 }
1468
1469 rx_ring->xdp_prog = rx_ring->vsi->xdp_prog;
1470
1471 return 0;
1472err:
1473 kfree(rx_ring->rx_bi);
1474 rx_ring->rx_bi = NULL;
1475 return err;
1476}
1477
1478
1479
1480
1481
1482
1483void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val)
1484{
1485 rx_ring->next_to_use = val;
1486
1487
1488 rx_ring->next_to_alloc = val;
1489
1490
1491
1492
1493
1494
1495 wmb();
1496 writel(val, rx_ring->tail);
1497}
1498
1499
1500
1501
1502
1503
1504
1505static inline unsigned int i40e_rx_offset(struct i40e_ring *rx_ring)
1506{
1507 return ring_uses_build_skb(rx_ring) ? I40E_SKB_PAD : 0;
1508}
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518static bool i40e_alloc_mapped_page(struct i40e_ring *rx_ring,
1519 struct i40e_rx_buffer *bi)
1520{
1521 struct page *page = bi->page;
1522 dma_addr_t dma;
1523
1524
1525 if (likely(page)) {
1526 rx_ring->rx_stats.page_reuse_count++;
1527 return true;
1528 }
1529
1530
1531 page = dev_alloc_pages(i40e_rx_pg_order(rx_ring));
1532 if (unlikely(!page)) {
1533 rx_ring->rx_stats.alloc_page_failed++;
1534 return false;
1535 }
1536
1537
1538 dma = dma_map_page_attrs(rx_ring->dev, page, 0,
1539 i40e_rx_pg_size(rx_ring),
1540 DMA_FROM_DEVICE,
1541 I40E_RX_DMA_ATTR);
1542
1543
1544
1545
1546 if (dma_mapping_error(rx_ring->dev, dma)) {
1547 __free_pages(page, i40e_rx_pg_order(rx_ring));
1548 rx_ring->rx_stats.alloc_page_failed++;
1549 return false;
1550 }
1551
1552 bi->dma = dma;
1553 bi->page = page;
1554 bi->page_offset = i40e_rx_offset(rx_ring);
1555 page_ref_add(page, USHRT_MAX - 1);
1556 bi->pagecnt_bias = USHRT_MAX;
1557
1558 return true;
1559}
1560
1561
1562
1563
1564
1565
1566
1567
1568bool i40e_alloc_rx_buffers(struct i40e_ring *rx_ring, u16 cleaned_count)
1569{
1570 u16 ntu = rx_ring->next_to_use;
1571 union i40e_rx_desc *rx_desc;
1572 struct i40e_rx_buffer *bi;
1573
1574
1575 if (!rx_ring->netdev || !cleaned_count)
1576 return false;
1577
1578 rx_desc = I40E_RX_DESC(rx_ring, ntu);
1579 bi = &rx_ring->rx_bi[ntu];
1580
1581 do {
1582 if (!i40e_alloc_mapped_page(rx_ring, bi))
1583 goto no_buffers;
1584
1585
1586 dma_sync_single_range_for_device(rx_ring->dev, bi->dma,
1587 bi->page_offset,
1588 rx_ring->rx_buf_len,
1589 DMA_FROM_DEVICE);
1590
1591
1592
1593
1594 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset);
1595
1596 rx_desc++;
1597 bi++;
1598 ntu++;
1599 if (unlikely(ntu == rx_ring->count)) {
1600 rx_desc = I40E_RX_DESC(rx_ring, 0);
1601 bi = rx_ring->rx_bi;
1602 ntu = 0;
1603 }
1604
1605
1606 rx_desc->wb.qword1.status_error_len = 0;
1607
1608 cleaned_count--;
1609 } while (cleaned_count);
1610
1611 if (rx_ring->next_to_use != ntu)
1612 i40e_release_rx_desc(rx_ring, ntu);
1613
1614 return false;
1615
1616no_buffers:
1617 if (rx_ring->next_to_use != ntu)
1618 i40e_release_rx_desc(rx_ring, ntu);
1619
1620
1621
1622
1623 return true;
1624}
1625
1626
1627
1628
1629
1630
1631
1632static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
1633 struct sk_buff *skb,
1634 union i40e_rx_desc *rx_desc)
1635{
1636 struct i40e_rx_ptype_decoded decoded;
1637 u32 rx_error, rx_status;
1638 bool ipv4, ipv6;
1639 u8 ptype;
1640 u64 qword;
1641
1642 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
1643 ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >> I40E_RXD_QW1_PTYPE_SHIFT;
1644 rx_error = (qword & I40E_RXD_QW1_ERROR_MASK) >>
1645 I40E_RXD_QW1_ERROR_SHIFT;
1646 rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
1647 I40E_RXD_QW1_STATUS_SHIFT;
1648 decoded = decode_rx_desc_ptype(ptype);
1649
1650 skb->ip_summed = CHECKSUM_NONE;
1651
1652 skb_checksum_none_assert(skb);
1653
1654
1655 if (!(vsi->netdev->features & NETIF_F_RXCSUM))
1656 return;
1657
1658
1659 if (!(rx_status & BIT(I40E_RX_DESC_STATUS_L3L4P_SHIFT)))
1660 return;
1661
1662
1663 if (!(decoded.known && decoded.outer_ip))
1664 return;
1665
1666 ipv4 = (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP) &&
1667 (decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV4);
1668 ipv6 = (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP) &&
1669 (decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV6);
1670
1671 if (ipv4 &&
1672 (rx_error & (BIT(I40E_RX_DESC_ERROR_IPE_SHIFT) |
1673 BIT(I40E_RX_DESC_ERROR_EIPE_SHIFT))))
1674 goto checksum_fail;
1675
1676
1677 if (ipv6 &&
1678 rx_status & BIT(I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT))
1679
1680 return;
1681
1682
1683 if (rx_error & BIT(I40E_RX_DESC_ERROR_L4E_SHIFT))
1684 goto checksum_fail;
1685
1686
1687
1688
1689
1690 if (rx_error & BIT(I40E_RX_DESC_ERROR_PPRS_SHIFT))
1691 return;
1692
1693
1694
1695
1696
1697 if (decoded.tunnel_type >= I40E_RX_PTYPE_TUNNEL_IP_GRENAT)
1698 skb->csum_level = 1;
1699
1700
1701 switch (decoded.inner_prot) {
1702 case I40E_RX_PTYPE_INNER_PROT_TCP:
1703 case I40E_RX_PTYPE_INNER_PROT_UDP:
1704 case I40E_RX_PTYPE_INNER_PROT_SCTP:
1705 skb->ip_summed = CHECKSUM_UNNECESSARY;
1706
1707 default:
1708 break;
1709 }
1710
1711 return;
1712
1713checksum_fail:
1714 vsi->back->hw_csum_rx_error++;
1715}
1716
1717
1718
1719
1720
1721
1722
1723static inline int i40e_ptype_to_htype(u8 ptype)
1724{
1725 struct i40e_rx_ptype_decoded decoded = decode_rx_desc_ptype(ptype);
1726
1727 if (!decoded.known)
1728 return PKT_HASH_TYPE_NONE;
1729
1730 if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP &&
1731 decoded.payload_layer == I40E_RX_PTYPE_PAYLOAD_LAYER_PAY4)
1732 return PKT_HASH_TYPE_L4;
1733 else if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP &&
1734 decoded.payload_layer == I40E_RX_PTYPE_PAYLOAD_LAYER_PAY3)
1735 return PKT_HASH_TYPE_L3;
1736 else
1737 return PKT_HASH_TYPE_L2;
1738}
1739
1740
1741
1742
1743
1744
1745
1746
1747static inline void i40e_rx_hash(struct i40e_ring *ring,
1748 union i40e_rx_desc *rx_desc,
1749 struct sk_buff *skb,
1750 u8 rx_ptype)
1751{
1752 u32 hash;
1753 const __le64 rss_mask =
1754 cpu_to_le64((u64)I40E_RX_DESC_FLTSTAT_RSS_HASH <<
1755 I40E_RX_DESC_STATUS_FLTSTAT_SHIFT);
1756
1757 if (!(ring->netdev->features & NETIF_F_RXHASH))
1758 return;
1759
1760 if ((rx_desc->wb.qword1.status_error_len & rss_mask) == rss_mask) {
1761 hash = le32_to_cpu(rx_desc->wb.qword0.hi_dword.rss);
1762 skb_set_hash(skb, hash, i40e_ptype_to_htype(rx_ptype));
1763 }
1764}
1765
1766
1767
1768
1769
1770
1771
1772
1773
1774
1775
1776
1777void i40e_process_skb_fields(struct i40e_ring *rx_ring,
1778 union i40e_rx_desc *rx_desc, struct sk_buff *skb)
1779{
1780 u64 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
1781 u32 rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
1782 I40E_RXD_QW1_STATUS_SHIFT;
1783 u32 tsynvalid = rx_status & I40E_RXD_QW1_STATUS_TSYNVALID_MASK;
1784 u32 tsyn = (rx_status & I40E_RXD_QW1_STATUS_TSYNINDX_MASK) >>
1785 I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT;
1786 u8 rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >>
1787 I40E_RXD_QW1_PTYPE_SHIFT;
1788
1789 if (unlikely(tsynvalid))
1790 i40e_ptp_rx_hwtstamp(rx_ring->vsi->back, skb, tsyn);
1791
1792 i40e_rx_hash(rx_ring, rx_desc, skb, rx_ptype);
1793
1794 i40e_rx_checksum(rx_ring->vsi, skb, rx_desc);
1795
1796 skb_record_rx_queue(skb, rx_ring->queue_index);
1797
1798 if (qword & BIT(I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)) {
1799 u16 vlan_tag = rx_desc->wb.qword0.lo_dword.l2tag1;
1800
1801 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
1802 le16_to_cpu(vlan_tag));
1803 }
1804
1805
1806 skb->protocol = eth_type_trans(skb, rx_ring->netdev);
1807}
1808
1809
1810
1811
1812
1813
1814
1815
1816
1817
1818
1819
1820
1821
1822
1823static bool i40e_cleanup_headers(struct i40e_ring *rx_ring, struct sk_buff *skb,
1824 union i40e_rx_desc *rx_desc)
1825
1826{
1827
1828 if (IS_ERR(skb))
1829 return true;
1830
1831
1832
1833
1834
1835
1836 if (unlikely(i40e_test_staterr(rx_desc,
1837 BIT(I40E_RXD_QW1_ERROR_SHIFT)))) {
1838 dev_kfree_skb_any(skb);
1839 return true;
1840 }
1841
1842
1843 if (eth_skb_pad(skb))
1844 return true;
1845
1846 return false;
1847}
1848
1849
1850
1851
1852
1853
1854
1855
1856static inline bool i40e_page_is_reusable(struct page *page)
1857{
1858 return (page_to_nid(page) == numa_mem_id()) &&
1859 !page_is_pfmemalloc(page);
1860}
1861
1862
1863
1864
1865
1866
1867
1868
1869
1870
1871
1872
1873
1874
1875
1876
1877
1878
1879
1880
1881
1882
1883
1884
1885
1886
1887
1888
1889static bool i40e_can_reuse_rx_page(struct i40e_rx_buffer *rx_buffer)
1890{
1891 unsigned int pagecnt_bias = rx_buffer->pagecnt_bias;
1892 struct page *page = rx_buffer->page;
1893
1894
1895 if (unlikely(!i40e_page_is_reusable(page)))
1896 return false;
1897
1898#if (PAGE_SIZE < 8192)
1899
1900 if (unlikely((page_count(page) - pagecnt_bias) > 1))
1901 return false;
1902#else
1903#define I40E_LAST_OFFSET \
1904 (SKB_WITH_OVERHEAD(PAGE_SIZE) - I40E_RXBUFFER_2048)
1905 if (rx_buffer->page_offset > I40E_LAST_OFFSET)
1906 return false;
1907#endif
1908
1909
1910
1911
1912
1913 if (unlikely(pagecnt_bias == 1)) {
1914 page_ref_add(page, USHRT_MAX - 1);
1915 rx_buffer->pagecnt_bias = USHRT_MAX;
1916 }
1917
1918 return true;
1919}
1920
1921
1922
1923
1924
1925
1926
1927
1928
1929
1930
1931
1932
1933static void i40e_add_rx_frag(struct i40e_ring *rx_ring,
1934 struct i40e_rx_buffer *rx_buffer,
1935 struct sk_buff *skb,
1936 unsigned int size)
1937{
1938#if (PAGE_SIZE < 8192)
1939 unsigned int truesize = i40e_rx_pg_size(rx_ring) / 2;
1940#else
1941 unsigned int truesize = SKB_DATA_ALIGN(size + i40e_rx_offset(rx_ring));
1942#endif
1943
1944 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page,
1945 rx_buffer->page_offset, size, truesize);
1946
1947
1948#if (PAGE_SIZE < 8192)
1949 rx_buffer->page_offset ^= truesize;
1950#else
1951 rx_buffer->page_offset += truesize;
1952#endif
1953}
1954
1955
1956
1957
1958
1959
1960
1961
1962
1963static struct i40e_rx_buffer *i40e_get_rx_buffer(struct i40e_ring *rx_ring,
1964 const unsigned int size)
1965{
1966 struct i40e_rx_buffer *rx_buffer;
1967
1968 rx_buffer = &rx_ring->rx_bi[rx_ring->next_to_clean];
1969 prefetchw(rx_buffer->page);
1970
1971
1972 dma_sync_single_range_for_cpu(rx_ring->dev,
1973 rx_buffer->dma,
1974 rx_buffer->page_offset,
1975 size,
1976 DMA_FROM_DEVICE);
1977
1978
1979 rx_buffer->pagecnt_bias--;
1980
1981 return rx_buffer;
1982}
1983
1984
1985
1986
1987
1988
1989
1990
1991
1992
1993
1994static struct sk_buff *i40e_construct_skb(struct i40e_ring *rx_ring,
1995 struct i40e_rx_buffer *rx_buffer,
1996 struct xdp_buff *xdp)
1997{
1998 unsigned int size = xdp->data_end - xdp->data;
1999#if (PAGE_SIZE < 8192)
2000 unsigned int truesize = i40e_rx_pg_size(rx_ring) / 2;
2001#else
2002 unsigned int truesize = SKB_DATA_ALIGN(size);
2003#endif
2004 unsigned int headlen;
2005 struct sk_buff *skb;
2006
2007
2008 prefetch(xdp->data);
2009#if L1_CACHE_BYTES < 128
2010 prefetch(xdp->data + L1_CACHE_BYTES);
2011#endif
2012
2013
2014
2015
2016
2017
2018
2019
2020
2021
2022
2023
2024
2025
2026
2027
2028
2029 skb = __napi_alloc_skb(&rx_ring->q_vector->napi,
2030 I40E_RX_HDR_SIZE,
2031 GFP_ATOMIC | __GFP_NOWARN);
2032 if (unlikely(!skb))
2033 return NULL;
2034
2035
2036 headlen = size;
2037 if (headlen > I40E_RX_HDR_SIZE)
2038 headlen = eth_get_headlen(xdp->data, I40E_RX_HDR_SIZE);
2039
2040
2041 memcpy(__skb_put(skb, headlen), xdp->data,
2042 ALIGN(headlen, sizeof(long)));
2043
2044
2045 size -= headlen;
2046 if (size) {
2047 skb_add_rx_frag(skb, 0, rx_buffer->page,
2048 rx_buffer->page_offset + headlen,
2049 size, truesize);
2050
2051
2052#if (PAGE_SIZE < 8192)
2053 rx_buffer->page_offset ^= truesize;
2054#else
2055 rx_buffer->page_offset += truesize;
2056#endif
2057 } else {
2058
2059 rx_buffer->pagecnt_bias++;
2060 }
2061
2062 return skb;
2063}
2064
2065
2066
2067
2068
2069
2070
2071
2072
2073
2074static struct sk_buff *i40e_build_skb(struct i40e_ring *rx_ring,
2075 struct i40e_rx_buffer *rx_buffer,
2076 struct xdp_buff *xdp)
2077{
2078 unsigned int metasize = xdp->data - xdp->data_meta;
2079#if (PAGE_SIZE < 8192)
2080 unsigned int truesize = i40e_rx_pg_size(rx_ring) / 2;
2081#else
2082 unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
2083 SKB_DATA_ALIGN(xdp->data_end -
2084 xdp->data_hard_start);
2085#endif
2086 struct sk_buff *skb;
2087
2088
2089
2090
2091
2092
2093 prefetch(xdp->data_meta);
2094#if L1_CACHE_BYTES < 128
2095 prefetch(xdp->data_meta + L1_CACHE_BYTES);
2096#endif
2097
2098 skb = build_skb(xdp->data_hard_start, truesize);
2099 if (unlikely(!skb))
2100 return NULL;
2101
2102
2103 skb_reserve(skb, xdp->data - xdp->data_hard_start);
2104 __skb_put(skb, xdp->data_end - xdp->data);
2105 if (metasize)
2106 skb_metadata_set(skb, metasize);
2107
2108
2109#if (PAGE_SIZE < 8192)
2110 rx_buffer->page_offset ^= truesize;
2111#else
2112 rx_buffer->page_offset += truesize;
2113#endif
2114
2115 return skb;
2116}
2117
2118
2119
2120
2121
2122
2123
2124
2125
2126static void i40e_put_rx_buffer(struct i40e_ring *rx_ring,
2127 struct i40e_rx_buffer *rx_buffer)
2128{
2129 if (i40e_can_reuse_rx_page(rx_buffer)) {
2130
2131 i40e_reuse_rx_page(rx_ring, rx_buffer);
2132 } else {
2133
2134 dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma,
2135 i40e_rx_pg_size(rx_ring),
2136 DMA_FROM_DEVICE, I40E_RX_DMA_ATTR);
2137 __page_frag_cache_drain(rx_buffer->page,
2138 rx_buffer->pagecnt_bias);
2139
2140 rx_buffer->page = NULL;
2141 }
2142}
2143
2144
2145
2146
2147
2148
2149
2150
2151
2152
2153
2154
2155static bool i40e_is_non_eop(struct i40e_ring *rx_ring,
2156 union i40e_rx_desc *rx_desc,
2157 struct sk_buff *skb)
2158{
2159 u32 ntc = rx_ring->next_to_clean + 1;
2160
2161
2162 ntc = (ntc < rx_ring->count) ? ntc : 0;
2163 rx_ring->next_to_clean = ntc;
2164
2165 prefetch(I40E_RX_DESC(rx_ring, ntc));
2166
2167
2168#define I40E_RXD_EOF BIT(I40E_RX_DESC_STATUS_EOF_SHIFT)
2169 if (likely(i40e_test_staterr(rx_desc, I40E_RXD_EOF)))
2170 return false;
2171
2172 rx_ring->rx_stats.non_eop_descs++;
2173
2174 return true;
2175}
2176
2177static int i40e_xmit_xdp_ring(struct xdp_frame *xdpf,
2178 struct i40e_ring *xdp_ring);
2179
2180int i40e_xmit_xdp_tx_ring(struct xdp_buff *xdp, struct i40e_ring *xdp_ring)
2181{
2182 struct xdp_frame *xdpf = convert_to_xdp_frame(xdp);
2183
2184 if (unlikely(!xdpf))
2185 return I40E_XDP_CONSUMED;
2186
2187 return i40e_xmit_xdp_ring(xdpf, xdp_ring);
2188}
2189
2190
2191
2192
2193
2194
2195static struct sk_buff *i40e_run_xdp(struct i40e_ring *rx_ring,
2196 struct xdp_buff *xdp)
2197{
2198 int err, result = I40E_XDP_PASS;
2199 struct i40e_ring *xdp_ring;
2200 struct bpf_prog *xdp_prog;
2201 u32 act;
2202
2203 rcu_read_lock();
2204 xdp_prog = READ_ONCE(rx_ring->xdp_prog);
2205
2206 if (!xdp_prog)
2207 goto xdp_out;
2208
2209 prefetchw(xdp->data_hard_start);
2210
2211 act = bpf_prog_run_xdp(xdp_prog, xdp);
2212 switch (act) {
2213 case XDP_PASS:
2214 break;
2215 case XDP_TX:
2216 xdp_ring = rx_ring->vsi->xdp_rings[rx_ring->queue_index];
2217 result = i40e_xmit_xdp_tx_ring(xdp, xdp_ring);
2218 break;
2219 case XDP_REDIRECT:
2220 err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog);
2221 result = !err ? I40E_XDP_REDIR : I40E_XDP_CONSUMED;
2222 break;
2223 default:
2224 bpf_warn_invalid_xdp_action(act);
2225
2226 case XDP_ABORTED:
2227 trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
2228
2229 case XDP_DROP:
2230 result = I40E_XDP_CONSUMED;
2231 break;
2232 }
2233xdp_out:
2234 rcu_read_unlock();
2235 return ERR_PTR(-result);
2236}
2237
2238
2239
2240
2241
2242
2243
2244static void i40e_rx_buffer_flip(struct i40e_ring *rx_ring,
2245 struct i40e_rx_buffer *rx_buffer,
2246 unsigned int size)
2247{
2248#if (PAGE_SIZE < 8192)
2249 unsigned int truesize = i40e_rx_pg_size(rx_ring) / 2;
2250
2251 rx_buffer->page_offset ^= truesize;
2252#else
2253 unsigned int truesize = SKB_DATA_ALIGN(i40e_rx_offset(rx_ring) + size);
2254
2255 rx_buffer->page_offset += truesize;
2256#endif
2257}
2258
2259
2260
2261
2262
2263
2264
2265void i40e_xdp_ring_update_tail(struct i40e_ring *xdp_ring)
2266{
2267
2268
2269
2270 wmb();
2271 writel_relaxed(xdp_ring->next_to_use, xdp_ring->tail);
2272}
2273
2274
2275
2276
2277
2278
2279
2280
2281
2282void i40e_update_rx_stats(struct i40e_ring *rx_ring,
2283 unsigned int total_rx_bytes,
2284 unsigned int total_rx_packets)
2285{
2286 u64_stats_update_begin(&rx_ring->syncp);
2287 rx_ring->stats.packets += total_rx_packets;
2288 rx_ring->stats.bytes += total_rx_bytes;
2289 u64_stats_update_end(&rx_ring->syncp);
2290 rx_ring->q_vector->rx.total_packets += total_rx_packets;
2291 rx_ring->q_vector->rx.total_bytes += total_rx_bytes;
2292}
2293
2294
2295
2296
2297
2298
2299
2300
2301
2302
2303void i40e_finalize_xdp_rx(struct i40e_ring *rx_ring, unsigned int xdp_res)
2304{
2305 if (xdp_res & I40E_XDP_REDIR)
2306 xdp_do_flush_map();
2307
2308 if (xdp_res & I40E_XDP_TX) {
2309 struct i40e_ring *xdp_ring =
2310 rx_ring->vsi->xdp_rings[rx_ring->queue_index];
2311
2312 i40e_xdp_ring_update_tail(xdp_ring);
2313 }
2314}
2315
2316
2317
2318
2319
2320
2321
2322
2323
2324
2325
2326
2327
2328static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
2329{
2330 unsigned int total_rx_bytes = 0, total_rx_packets = 0;
2331 struct sk_buff *skb = rx_ring->skb;
2332 u16 cleaned_count = I40E_DESC_UNUSED(rx_ring);
2333 unsigned int xdp_xmit = 0;
2334 bool failure = false;
2335 struct xdp_buff xdp;
2336
2337 xdp.rxq = &rx_ring->xdp_rxq;
2338
2339 while (likely(total_rx_packets < (unsigned int)budget)) {
2340 struct i40e_rx_buffer *rx_buffer;
2341 union i40e_rx_desc *rx_desc;
2342 unsigned int size;
2343 u64 qword;
2344
2345
2346 if (cleaned_count >= I40E_RX_BUFFER_WRITE) {
2347 failure = failure ||
2348 i40e_alloc_rx_buffers(rx_ring, cleaned_count);
2349 cleaned_count = 0;
2350 }
2351
2352 rx_desc = I40E_RX_DESC(rx_ring, rx_ring->next_to_clean);
2353
2354
2355
2356
2357
2358
2359 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
2360
2361
2362
2363
2364
2365 dma_rmb();
2366
2367 rx_buffer = i40e_clean_programming_status(rx_ring, rx_desc,
2368 qword);
2369 if (unlikely(rx_buffer)) {
2370 i40e_reuse_rx_page(rx_ring, rx_buffer);
2371 cleaned_count++;
2372 continue;
2373 }
2374
2375 size = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >>
2376 I40E_RXD_QW1_LENGTH_PBUF_SHIFT;
2377 if (!size)
2378 break;
2379
2380 i40e_trace(clean_rx_irq, rx_ring, rx_desc, skb);
2381 rx_buffer = i40e_get_rx_buffer(rx_ring, size);
2382
2383
2384 if (!skb) {
2385 xdp.data = page_address(rx_buffer->page) +
2386 rx_buffer->page_offset;
2387 xdp.data_meta = xdp.data;
2388 xdp.data_hard_start = xdp.data -
2389 i40e_rx_offset(rx_ring);
2390 xdp.data_end = xdp.data + size;
2391
2392 skb = i40e_run_xdp(rx_ring, &xdp);
2393 }
2394
2395 if (IS_ERR(skb)) {
2396 unsigned int xdp_res = -PTR_ERR(skb);
2397
2398 if (xdp_res & (I40E_XDP_TX | I40E_XDP_REDIR)) {
2399 xdp_xmit |= xdp_res;
2400 i40e_rx_buffer_flip(rx_ring, rx_buffer, size);
2401 } else {
2402 rx_buffer->pagecnt_bias++;
2403 }
2404 total_rx_bytes += size;
2405 total_rx_packets++;
2406 } else if (skb) {
2407 i40e_add_rx_frag(rx_ring, rx_buffer, skb, size);
2408 } else if (ring_uses_build_skb(rx_ring)) {
2409 skb = i40e_build_skb(rx_ring, rx_buffer, &xdp);
2410 } else {
2411 skb = i40e_construct_skb(rx_ring, rx_buffer, &xdp);
2412 }
2413
2414
2415 if (!skb) {
2416 rx_ring->rx_stats.alloc_buff_failed++;
2417 rx_buffer->pagecnt_bias++;
2418 break;
2419 }
2420
2421 i40e_put_rx_buffer(rx_ring, rx_buffer);
2422 cleaned_count++;
2423
2424 if (i40e_is_non_eop(rx_ring, rx_desc, skb))
2425 continue;
2426
2427 if (i40e_cleanup_headers(rx_ring, skb, rx_desc)) {
2428 skb = NULL;
2429 continue;
2430 }
2431
2432
2433 total_rx_bytes += skb->len;
2434
2435
2436 i40e_process_skb_fields(rx_ring, rx_desc, skb);
2437
2438 i40e_trace(clean_rx_irq_rx, rx_ring, rx_desc, skb);
2439 napi_gro_receive(&rx_ring->q_vector->napi, skb);
2440 skb = NULL;
2441
2442
2443 total_rx_packets++;
2444 }
2445
2446 i40e_finalize_xdp_rx(rx_ring, xdp_xmit);
2447 rx_ring->skb = skb;
2448
2449 i40e_update_rx_stats(rx_ring, total_rx_bytes, total_rx_packets);
2450
2451
2452 return failure ? budget : (int)total_rx_packets;
2453}
2454
2455static inline u32 i40e_buildreg_itr(const int type, u16 itr)
2456{
2457 u32 val;
2458
2459
2460
2461
2462
2463
2464
2465
2466
2467
2468
2469
2470
2471
2472
2473
2474 itr &= I40E_ITR_MASK;
2475
2476 val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
2477 (type << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT) |
2478 (itr << (I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT - 1));
2479
2480 return val;
2481}
2482
2483
2484#define INTREG I40E_PFINT_DYN_CTLN
2485
2486
2487
2488
2489
2490
2491
2492
2493#define ITR_COUNTDOWN_START 3
2494
2495
2496
2497
2498
2499
2500
2501static inline void i40e_update_enable_itr(struct i40e_vsi *vsi,
2502 struct i40e_q_vector *q_vector)
2503{
2504 struct i40e_hw *hw = &vsi->back->hw;
2505 u32 intval;
2506
2507
2508 if (!(vsi->back->flags & I40E_FLAG_MSIX_ENABLED)) {
2509 i40e_irq_dynamic_enable_icr0(vsi->back);
2510 return;
2511 }
2512
2513
2514 i40e_update_itr(q_vector, &q_vector->tx);
2515 i40e_update_itr(q_vector, &q_vector->rx);
2516
2517
2518
2519
2520
2521
2522
2523
2524
2525 if (q_vector->rx.target_itr < q_vector->rx.current_itr) {
2526
2527 intval = i40e_buildreg_itr(I40E_RX_ITR,
2528 q_vector->rx.target_itr);
2529 q_vector->rx.current_itr = q_vector->rx.target_itr;
2530 q_vector->itr_countdown = ITR_COUNTDOWN_START;
2531 } else if ((q_vector->tx.target_itr < q_vector->tx.current_itr) ||
2532 ((q_vector->rx.target_itr - q_vector->rx.current_itr) <
2533 (q_vector->tx.target_itr - q_vector->tx.current_itr))) {
2534
2535
2536
2537 intval = i40e_buildreg_itr(I40E_TX_ITR,
2538 q_vector->tx.target_itr);
2539 q_vector->tx.current_itr = q_vector->tx.target_itr;
2540 q_vector->itr_countdown = ITR_COUNTDOWN_START;
2541 } else if (q_vector->rx.current_itr != q_vector->rx.target_itr) {
2542
2543 intval = i40e_buildreg_itr(I40E_RX_ITR,
2544 q_vector->rx.target_itr);
2545 q_vector->rx.current_itr = q_vector->rx.target_itr;
2546 q_vector->itr_countdown = ITR_COUNTDOWN_START;
2547 } else {
2548
2549 intval = i40e_buildreg_itr(I40E_ITR_NONE, 0);
2550 if (q_vector->itr_countdown)
2551 q_vector->itr_countdown--;
2552 }
2553
2554 if (!test_bit(__I40E_VSI_DOWN, vsi->state))
2555 wr32(hw, INTREG(q_vector->reg_idx), intval);
2556}
2557
2558
2559
2560
2561
2562
2563
2564
2565
2566
2567int i40e_napi_poll(struct napi_struct *napi, int budget)
2568{
2569 struct i40e_q_vector *q_vector =
2570 container_of(napi, struct i40e_q_vector, napi);
2571 struct i40e_vsi *vsi = q_vector->vsi;
2572 struct i40e_ring *ring;
2573 bool clean_complete = true;
2574 bool arm_wb = false;
2575 int budget_per_ring;
2576 int work_done = 0;
2577
2578 if (test_bit(__I40E_VSI_DOWN, vsi->state)) {
2579 napi_complete(napi);
2580 return 0;
2581 }
2582
2583
2584
2585
2586 i40e_for_each_ring(ring, q_vector->tx) {
2587 bool wd = ring->xsk_umem ?
2588 i40e_clean_xdp_tx_irq(vsi, ring, budget) :
2589 i40e_clean_tx_irq(vsi, ring, budget);
2590
2591 if (!wd) {
2592 clean_complete = false;
2593 continue;
2594 }
2595 arm_wb |= ring->arm_wb;
2596 ring->arm_wb = false;
2597 }
2598
2599
2600 if (budget <= 0)
2601 goto tx_only;
2602
2603
2604
2605
2606 budget_per_ring = max(budget/q_vector->num_ringpairs, 1);
2607
2608 i40e_for_each_ring(ring, q_vector->rx) {
2609 int cleaned = ring->xsk_umem ?
2610 i40e_clean_rx_irq_zc(ring, budget_per_ring) :
2611 i40e_clean_rx_irq(ring, budget_per_ring);
2612
2613 work_done += cleaned;
2614
2615 if (cleaned >= budget_per_ring)
2616 clean_complete = false;
2617 }
2618
2619
2620 if (!clean_complete) {
2621 int cpu_id = smp_processor_id();
2622
2623
2624
2625
2626
2627
2628
2629
2630 if (!cpumask_test_cpu(cpu_id, &q_vector->affinity_mask)) {
2631
2632 napi_complete_done(napi, work_done);
2633
2634
2635 i40e_force_wb(vsi, q_vector);
2636
2637
2638 return budget - 1;
2639 }
2640tx_only:
2641 if (arm_wb) {
2642 q_vector->tx.ring[0].tx_stats.tx_force_wb++;
2643 i40e_enable_wb_on_itr(vsi, q_vector);
2644 }
2645 return budget;
2646 }
2647
2648 if (vsi->back->flags & I40E_TXR_FLAGS_WB_ON_ITR)
2649 q_vector->arm_wb_state = false;
2650
2651
2652
2653
2654 if (likely(napi_complete_done(napi, work_done)))
2655 i40e_update_enable_itr(vsi, q_vector);
2656
2657 return min(work_done, budget - 1);
2658}
2659
2660
2661
2662
2663
2664
2665
2666static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb,
2667 u32 tx_flags)
2668{
2669 struct i40e_filter_program_desc *fdir_desc;
2670 struct i40e_pf *pf = tx_ring->vsi->back;
2671 union {
2672 unsigned char *network;
2673 struct iphdr *ipv4;
2674 struct ipv6hdr *ipv6;
2675 } hdr;
2676 struct tcphdr *th;
2677 unsigned int hlen;
2678 u32 flex_ptype, dtype_cmd;
2679 int l4_proto;
2680 u16 i;
2681
2682
2683 if (!(pf->flags & I40E_FLAG_FD_ATR_ENABLED))
2684 return;
2685
2686 if (test_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state))
2687 return;
2688
2689
2690 if (!tx_ring->atr_sample_rate)
2691 return;
2692
2693
2694 if (!(tx_flags & (I40E_TX_FLAGS_IPV4 | I40E_TX_FLAGS_IPV6)))
2695 return;
2696
2697
2698 hdr.network = (tx_flags & I40E_TX_FLAGS_UDP_TUNNEL) ?
2699 skb_inner_network_header(skb) : skb_network_header(skb);
2700
2701
2702
2703
2704 if (tx_flags & I40E_TX_FLAGS_IPV4) {
2705
2706 hlen = (hdr.network[0] & 0x0F) << 2;
2707 l4_proto = hdr.ipv4->protocol;
2708 } else {
2709
2710 unsigned int inner_hlen = hdr.network - skb->data;
2711 unsigned int h_offset = inner_hlen;
2712
2713
2714 l4_proto =
2715 ipv6_find_hdr(skb, &h_offset, IPPROTO_TCP, NULL, NULL);
2716
2717 hlen = h_offset - inner_hlen;
2718 }
2719
2720 if (l4_proto != IPPROTO_TCP)
2721 return;
2722
2723 th = (struct tcphdr *)(hdr.network + hlen);
2724
2725
2726 if (th->syn && test_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state))
2727 return;
2728 if (pf->flags & I40E_FLAG_HW_ATR_EVICT_ENABLED) {
2729
2730
2731
2732 if (th->fin || th->rst)
2733 return;
2734 }
2735
2736 tx_ring->atr_count++;
2737
2738
2739 if (!th->fin &&
2740 !th->syn &&
2741 !th->rst &&
2742 (tx_ring->atr_count < tx_ring->atr_sample_rate))
2743 return;
2744
2745 tx_ring->atr_count = 0;
2746
2747
2748 i = tx_ring->next_to_use;
2749 fdir_desc = I40E_TX_FDIRDESC(tx_ring, i);
2750
2751 i++;
2752 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
2753
2754 flex_ptype = (tx_ring->queue_index << I40E_TXD_FLTR_QW0_QINDEX_SHIFT) &
2755 I40E_TXD_FLTR_QW0_QINDEX_MASK;
2756 flex_ptype |= (tx_flags & I40E_TX_FLAGS_IPV4) ?
2757 (I40E_FILTER_PCTYPE_NONF_IPV4_TCP <<
2758 I40E_TXD_FLTR_QW0_PCTYPE_SHIFT) :
2759 (I40E_FILTER_PCTYPE_NONF_IPV6_TCP <<
2760 I40E_TXD_FLTR_QW0_PCTYPE_SHIFT);
2761
2762 flex_ptype |= tx_ring->vsi->id << I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT;
2763
2764 dtype_cmd = I40E_TX_DESC_DTYPE_FILTER_PROG;
2765
2766 dtype_cmd |= (th->fin || th->rst) ?
2767 (I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE <<
2768 I40E_TXD_FLTR_QW1_PCMD_SHIFT) :
2769 (I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE <<
2770 I40E_TXD_FLTR_QW1_PCMD_SHIFT);
2771
2772 dtype_cmd |= I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_QINDEX <<
2773 I40E_TXD_FLTR_QW1_DEST_SHIFT;
2774
2775 dtype_cmd |= I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID <<
2776 I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT;
2777
2778 dtype_cmd |= I40E_TXD_FLTR_QW1_CNT_ENA_MASK;
2779 if (!(tx_flags & I40E_TX_FLAGS_UDP_TUNNEL))
2780 dtype_cmd |=
2781 ((u32)I40E_FD_ATR_STAT_IDX(pf->hw.pf_id) <<
2782 I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) &
2783 I40E_TXD_FLTR_QW1_CNTINDEX_MASK;
2784 else
2785 dtype_cmd |=
2786 ((u32)I40E_FD_ATR_TUNNEL_STAT_IDX(pf->hw.pf_id) <<
2787 I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) &
2788 I40E_TXD_FLTR_QW1_CNTINDEX_MASK;
2789
2790 if (pf->flags & I40E_FLAG_HW_ATR_EVICT_ENABLED)
2791 dtype_cmd |= I40E_TXD_FLTR_QW1_ATR_MASK;
2792
2793 fdir_desc->qindex_flex_ptype_vsi = cpu_to_le32(flex_ptype);
2794 fdir_desc->rsvd = cpu_to_le32(0);
2795 fdir_desc->dtype_cmd_cntindex = cpu_to_le32(dtype_cmd);
2796 fdir_desc->fd_id = cpu_to_le32(0);
2797}
2798
2799
2800
2801
2802
2803
2804
2805
2806
2807
2808
2809
2810
2811static inline int i40e_tx_prepare_vlan_flags(struct sk_buff *skb,
2812 struct i40e_ring *tx_ring,
2813 u32 *flags)
2814{
2815 __be16 protocol = skb->protocol;
2816 u32 tx_flags = 0;
2817
2818 if (protocol == htons(ETH_P_8021Q) &&
2819 !(tx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_TX)) {
2820
2821
2822
2823
2824
2825
2826
2827 skb->protocol = vlan_get_protocol(skb);
2828 goto out;
2829 }
2830
2831
2832 if (skb_vlan_tag_present(skb)) {
2833 tx_flags |= skb_vlan_tag_get(skb) << I40E_TX_FLAGS_VLAN_SHIFT;
2834 tx_flags |= I40E_TX_FLAGS_HW_VLAN;
2835
2836 } else if (protocol == htons(ETH_P_8021Q)) {
2837 struct vlan_hdr *vhdr, _vhdr;
2838
2839 vhdr = skb_header_pointer(skb, ETH_HLEN, sizeof(_vhdr), &_vhdr);
2840 if (!vhdr)
2841 return -EINVAL;
2842
2843 protocol = vhdr->h_vlan_encapsulated_proto;
2844 tx_flags |= ntohs(vhdr->h_vlan_TCI) << I40E_TX_FLAGS_VLAN_SHIFT;
2845 tx_flags |= I40E_TX_FLAGS_SW_VLAN;
2846 }
2847
2848 if (!(tx_ring->vsi->back->flags & I40E_FLAG_DCB_ENABLED))
2849 goto out;
2850
2851
2852 if ((tx_flags & (I40E_TX_FLAGS_HW_VLAN | I40E_TX_FLAGS_SW_VLAN)) ||
2853 (skb->priority != TC_PRIO_CONTROL)) {
2854 tx_flags &= ~I40E_TX_FLAGS_VLAN_PRIO_MASK;
2855 tx_flags |= (skb->priority & 0x7) <<
2856 I40E_TX_FLAGS_VLAN_PRIO_SHIFT;
2857 if (tx_flags & I40E_TX_FLAGS_SW_VLAN) {
2858 struct vlan_ethhdr *vhdr;
2859 int rc;
2860
2861 rc = skb_cow_head(skb, 0);
2862 if (rc < 0)
2863 return rc;
2864 vhdr = (struct vlan_ethhdr *)skb->data;
2865 vhdr->h_vlan_TCI = htons(tx_flags >>
2866 I40E_TX_FLAGS_VLAN_SHIFT);
2867 } else {
2868 tx_flags |= I40E_TX_FLAGS_HW_VLAN;
2869 }
2870 }
2871
2872out:
2873 *flags = tx_flags;
2874 return 0;
2875}
2876
2877
2878
2879
2880
2881
2882
2883
2884
2885static int i40e_tso(struct i40e_tx_buffer *first, u8 *hdr_len,
2886 u64 *cd_type_cmd_tso_mss)
2887{
2888 struct sk_buff *skb = first->skb;
2889 u64 cd_cmd, cd_tso_len, cd_mss;
2890 union {
2891 struct iphdr *v4;
2892 struct ipv6hdr *v6;
2893 unsigned char *hdr;
2894 } ip;
2895 union {
2896 struct tcphdr *tcp;
2897 struct udphdr *udp;
2898 unsigned char *hdr;
2899 } l4;
2900 u32 paylen, l4_offset;
2901 u16 gso_segs, gso_size;
2902 int err;
2903
2904 if (skb->ip_summed != CHECKSUM_PARTIAL)
2905 return 0;
2906
2907 if (!skb_is_gso(skb))
2908 return 0;
2909
2910 err = skb_cow_head(skb, 0);
2911 if (err < 0)
2912 return err;
2913
2914 ip.hdr = skb_network_header(skb);
2915 l4.hdr = skb_transport_header(skb);
2916
2917
2918 if (ip.v4->version == 4) {
2919 ip.v4->tot_len = 0;
2920 ip.v4->check = 0;
2921 } else {
2922 ip.v6->payload_len = 0;
2923 }
2924
2925 if (skb_shinfo(skb)->gso_type & (SKB_GSO_GRE |
2926 SKB_GSO_GRE_CSUM |
2927 SKB_GSO_IPXIP4 |
2928 SKB_GSO_IPXIP6 |
2929 SKB_GSO_UDP_TUNNEL |
2930 SKB_GSO_UDP_TUNNEL_CSUM)) {
2931 if (!(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) &&
2932 (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM)) {
2933 l4.udp->len = 0;
2934
2935
2936 l4_offset = l4.hdr - skb->data;
2937
2938
2939 paylen = skb->len - l4_offset;
2940 csum_replace_by_diff(&l4.udp->check,
2941 (__force __wsum)htonl(paylen));
2942 }
2943
2944
2945 ip.hdr = skb_inner_network_header(skb);
2946 l4.hdr = skb_inner_transport_header(skb);
2947
2948
2949 if (ip.v4->version == 4) {
2950 ip.v4->tot_len = 0;
2951 ip.v4->check = 0;
2952 } else {
2953 ip.v6->payload_len = 0;
2954 }
2955 }
2956
2957
2958 l4_offset = l4.hdr - skb->data;
2959
2960
2961 paylen = skb->len - l4_offset;
2962 csum_replace_by_diff(&l4.tcp->check, (__force __wsum)htonl(paylen));
2963
2964
2965 *hdr_len = (l4.tcp->doff * 4) + l4_offset;
2966
2967
2968 gso_size = skb_shinfo(skb)->gso_size;
2969 gso_segs = skb_shinfo(skb)->gso_segs;
2970
2971
2972 first->gso_segs = gso_segs;
2973 first->bytecount += (first->gso_segs - 1) * *hdr_len;
2974
2975
2976 cd_cmd = I40E_TX_CTX_DESC_TSO;
2977 cd_tso_len = skb->len - *hdr_len;
2978 cd_mss = gso_size;
2979 *cd_type_cmd_tso_mss |= (cd_cmd << I40E_TXD_CTX_QW1_CMD_SHIFT) |
2980 (cd_tso_len << I40E_TXD_CTX_QW1_TSO_LEN_SHIFT) |
2981 (cd_mss << I40E_TXD_CTX_QW1_MSS_SHIFT);
2982 return 1;
2983}
2984
2985
2986
2987
2988
2989
2990
2991
2992
2993
2994static int i40e_tsyn(struct i40e_ring *tx_ring, struct sk_buff *skb,
2995 u32 tx_flags, u64 *cd_type_cmd_tso_mss)
2996{
2997 struct i40e_pf *pf;
2998
2999 if (likely(!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)))
3000 return 0;
3001
3002
3003 if (tx_flags & I40E_TX_FLAGS_TSO)
3004 return 0;
3005
3006
3007
3008
3009 pf = i40e_netdev_to_pf(tx_ring->netdev);
3010 if (!(pf->flags & I40E_FLAG_PTP))
3011 return 0;
3012
3013 if (pf->ptp_tx &&
3014 !test_and_set_bit_lock(__I40E_PTP_TX_IN_PROGRESS, pf->state)) {
3015 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
3016 pf->ptp_tx_start = jiffies;
3017 pf->ptp_tx_skb = skb_get(skb);
3018 } else {
3019 pf->tx_hwtstamp_skipped++;
3020 return 0;
3021 }
3022
3023 *cd_type_cmd_tso_mss |= (u64)I40E_TX_CTX_DESC_TSYN <<
3024 I40E_TXD_CTX_QW1_CMD_SHIFT;
3025
3026 return 1;
3027}
3028
3029
3030
3031
3032
3033
3034
3035
3036
3037
3038static int i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags,
3039 u32 *td_cmd, u32 *td_offset,
3040 struct i40e_ring *tx_ring,
3041 u32 *cd_tunneling)
3042{
3043 union {
3044 struct iphdr *v4;
3045 struct ipv6hdr *v6;
3046 unsigned char *hdr;
3047 } ip;
3048 union {
3049 struct tcphdr *tcp;
3050 struct udphdr *udp;
3051 unsigned char *hdr;
3052 } l4;
3053 unsigned char *exthdr;
3054 u32 offset, cmd = 0;
3055 __be16 frag_off;
3056 u8 l4_proto = 0;
3057
3058 if (skb->ip_summed != CHECKSUM_PARTIAL)
3059 return 0;
3060
3061 ip.hdr = skb_network_header(skb);
3062 l4.hdr = skb_transport_header(skb);
3063
3064
3065 offset = ((ip.hdr - skb->data) / 2) << I40E_TX_DESC_LENGTH_MACLEN_SHIFT;
3066
3067 if (skb->encapsulation) {
3068 u32 tunnel = 0;
3069
3070 if (*tx_flags & I40E_TX_FLAGS_IPV4) {
3071 tunnel |= (*tx_flags & I40E_TX_FLAGS_TSO) ?
3072 I40E_TX_CTX_EXT_IP_IPV4 :
3073 I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM;
3074
3075 l4_proto = ip.v4->protocol;
3076 } else if (*tx_flags & I40E_TX_FLAGS_IPV6) {
3077 tunnel |= I40E_TX_CTX_EXT_IP_IPV6;
3078
3079 exthdr = ip.hdr + sizeof(*ip.v6);
3080 l4_proto = ip.v6->nexthdr;
3081 if (l4.hdr != exthdr)
3082 ipv6_skip_exthdr(skb, exthdr - skb->data,
3083 &l4_proto, &frag_off);
3084 }
3085
3086
3087 switch (l4_proto) {
3088 case IPPROTO_UDP:
3089 tunnel |= I40E_TXD_CTX_UDP_TUNNELING;
3090 *tx_flags |= I40E_TX_FLAGS_UDP_TUNNEL;
3091 break;
3092 case IPPROTO_GRE:
3093 tunnel |= I40E_TXD_CTX_GRE_TUNNELING;
3094 *tx_flags |= I40E_TX_FLAGS_UDP_TUNNEL;
3095 break;
3096 case IPPROTO_IPIP:
3097 case IPPROTO_IPV6:
3098 *tx_flags |= I40E_TX_FLAGS_UDP_TUNNEL;
3099 l4.hdr = skb_inner_network_header(skb);
3100 break;
3101 default:
3102 if (*tx_flags & I40E_TX_FLAGS_TSO)
3103 return -1;
3104
3105 skb_checksum_help(skb);
3106 return 0;
3107 }
3108
3109
3110 tunnel |= ((l4.hdr - ip.hdr) / 4) <<
3111 I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT;
3112
3113
3114 ip.hdr = skb_inner_network_header(skb);
3115
3116
3117 tunnel |= ((ip.hdr - l4.hdr) / 2) <<
3118 I40E_TXD_CTX_QW0_NATLEN_SHIFT;
3119
3120
3121 if ((*tx_flags & I40E_TX_FLAGS_TSO) &&
3122 !(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) &&
3123 (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM))
3124 tunnel |= I40E_TXD_CTX_QW0_L4T_CS_MASK;
3125
3126
3127 *cd_tunneling |= tunnel;
3128
3129
3130 l4.hdr = skb_inner_transport_header(skb);
3131 l4_proto = 0;
3132
3133
3134 *tx_flags &= ~(I40E_TX_FLAGS_IPV4 | I40E_TX_FLAGS_IPV6);
3135 if (ip.v4->version == 4)
3136 *tx_flags |= I40E_TX_FLAGS_IPV4;
3137 if (ip.v6->version == 6)
3138 *tx_flags |= I40E_TX_FLAGS_IPV6;
3139 }
3140
3141
3142 if (*tx_flags & I40E_TX_FLAGS_IPV4) {
3143 l4_proto = ip.v4->protocol;
3144
3145
3146
3147 cmd |= (*tx_flags & I40E_TX_FLAGS_TSO) ?
3148 I40E_TX_DESC_CMD_IIPT_IPV4_CSUM :
3149 I40E_TX_DESC_CMD_IIPT_IPV4;
3150 } else if (*tx_flags & I40E_TX_FLAGS_IPV6) {
3151 cmd |= I40E_TX_DESC_CMD_IIPT_IPV6;
3152
3153 exthdr = ip.hdr + sizeof(*ip.v6);
3154 l4_proto = ip.v6->nexthdr;
3155 if (l4.hdr != exthdr)
3156 ipv6_skip_exthdr(skb, exthdr - skb->data,
3157 &l4_proto, &frag_off);
3158 }
3159
3160
3161 offset |= ((l4.hdr - ip.hdr) / 4) << I40E_TX_DESC_LENGTH_IPLEN_SHIFT;
3162
3163
3164 switch (l4_proto) {
3165 case IPPROTO_TCP:
3166
3167 cmd |= I40E_TX_DESC_CMD_L4T_EOFT_TCP;
3168 offset |= l4.tcp->doff << I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
3169 break;
3170 case IPPROTO_SCTP:
3171
3172 cmd |= I40E_TX_DESC_CMD_L4T_EOFT_SCTP;
3173 offset |= (sizeof(struct sctphdr) >> 2) <<
3174 I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
3175 break;
3176 case IPPROTO_UDP:
3177
3178 cmd |= I40E_TX_DESC_CMD_L4T_EOFT_UDP;
3179 offset |= (sizeof(struct udphdr) >> 2) <<
3180 I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
3181 break;
3182 default:
3183 if (*tx_flags & I40E_TX_FLAGS_TSO)
3184 return -1;
3185 skb_checksum_help(skb);
3186 return 0;
3187 }
3188
3189 *td_cmd |= cmd;
3190 *td_offset |= offset;
3191
3192 return 1;
3193}
3194
3195
3196
3197
3198
3199
3200
3201
3202static void i40e_create_tx_ctx(struct i40e_ring *tx_ring,
3203 const u64 cd_type_cmd_tso_mss,
3204 const u32 cd_tunneling, const u32 cd_l2tag2)
3205{
3206 struct i40e_tx_context_desc *context_desc;
3207 int i = tx_ring->next_to_use;
3208
3209 if ((cd_type_cmd_tso_mss == I40E_TX_DESC_DTYPE_CONTEXT) &&
3210 !cd_tunneling && !cd_l2tag2)
3211 return;
3212
3213
3214 context_desc = I40E_TX_CTXTDESC(tx_ring, i);
3215
3216 i++;
3217 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
3218
3219
3220 context_desc->tunneling_params = cpu_to_le32(cd_tunneling);
3221 context_desc->l2tag2 = cpu_to_le16(cd_l2tag2);
3222 context_desc->rsvd = cpu_to_le16(0);
3223 context_desc->type_cmd_tso_mss = cpu_to_le64(cd_type_cmd_tso_mss);
3224}
3225
3226
3227
3228
3229
3230
3231
3232
3233int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
3234{
3235 netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
3236
3237 smp_mb();
3238
3239
3240 if (likely(I40E_DESC_UNUSED(tx_ring) < size))
3241 return -EBUSY;
3242
3243
3244 netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index);
3245 ++tx_ring->tx_stats.restart_queue;
3246 return 0;
3247}
3248
3249
3250
3251
3252
3253
3254
3255
3256
3257
3258
3259
3260
3261
3262bool __i40e_chk_linearize(struct sk_buff *skb)
3263{
3264 const struct skb_frag_struct *frag, *stale;
3265 int nr_frags, sum;
3266
3267
3268 nr_frags = skb_shinfo(skb)->nr_frags;
3269 if (nr_frags < (I40E_MAX_BUFFER_TXD - 1))
3270 return false;
3271
3272
3273
3274
3275 nr_frags -= I40E_MAX_BUFFER_TXD - 2;
3276 frag = &skb_shinfo(skb)->frags[0];
3277
3278
3279
3280
3281
3282
3283
3284 sum = 1 - skb_shinfo(skb)->gso_size;
3285
3286
3287 sum += skb_frag_size(frag++);
3288 sum += skb_frag_size(frag++);
3289 sum += skb_frag_size(frag++);
3290 sum += skb_frag_size(frag++);
3291 sum += skb_frag_size(frag++);
3292
3293
3294
3295
3296 for (stale = &skb_shinfo(skb)->frags[0];; stale++) {
3297 int stale_size = skb_frag_size(stale);
3298
3299 sum += skb_frag_size(frag++);
3300
3301
3302
3303
3304
3305
3306
3307 if (stale_size > I40E_MAX_DATA_PER_TXD) {
3308 int align_pad = -(stale->page_offset) &
3309 (I40E_MAX_READ_REQ_SIZE - 1);
3310
3311 sum -= align_pad;
3312 stale_size -= align_pad;
3313
3314 do {
3315 sum -= I40E_MAX_DATA_PER_TXD_ALIGNED;
3316 stale_size -= I40E_MAX_DATA_PER_TXD_ALIGNED;
3317 } while (stale_size > I40E_MAX_DATA_PER_TXD);
3318 }
3319
3320
3321 if (sum < 0)
3322 return true;
3323
3324 if (!nr_frags--)
3325 break;
3326
3327 sum -= stale_size;
3328 }
3329
3330 return false;
3331}
3332
3333
3334
3335
3336
3337
3338
3339
3340
3341
3342
3343
3344
3345static inline int i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
3346 struct i40e_tx_buffer *first, u32 tx_flags,
3347 const u8 hdr_len, u32 td_cmd, u32 td_offset)
3348{
3349 unsigned int data_len = skb->data_len;
3350 unsigned int size = skb_headlen(skb);
3351 struct skb_frag_struct *frag;
3352 struct i40e_tx_buffer *tx_bi;
3353 struct i40e_tx_desc *tx_desc;
3354 u16 i = tx_ring->next_to_use;
3355 u32 td_tag = 0;
3356 dma_addr_t dma;
3357 u16 desc_count = 1;
3358
3359 if (tx_flags & I40E_TX_FLAGS_HW_VLAN) {
3360 td_cmd |= I40E_TX_DESC_CMD_IL2TAG1;
3361 td_tag = (tx_flags & I40E_TX_FLAGS_VLAN_MASK) >>
3362 I40E_TX_FLAGS_VLAN_SHIFT;
3363 }
3364
3365 first->tx_flags = tx_flags;
3366
3367 dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
3368
3369 tx_desc = I40E_TX_DESC(tx_ring, i);
3370 tx_bi = first;
3371
3372 for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
3373 unsigned int max_data = I40E_MAX_DATA_PER_TXD_ALIGNED;
3374
3375 if (dma_mapping_error(tx_ring->dev, dma))
3376 goto dma_error;
3377
3378
3379 dma_unmap_len_set(tx_bi, len, size);
3380 dma_unmap_addr_set(tx_bi, dma, dma);
3381
3382
3383 max_data += -dma & (I40E_MAX_READ_REQ_SIZE - 1);
3384 tx_desc->buffer_addr = cpu_to_le64(dma);
3385
3386 while (unlikely(size > I40E_MAX_DATA_PER_TXD)) {
3387 tx_desc->cmd_type_offset_bsz =
3388 build_ctob(td_cmd, td_offset,
3389 max_data, td_tag);
3390
3391 tx_desc++;
3392 i++;
3393 desc_count++;
3394
3395 if (i == tx_ring->count) {
3396 tx_desc = I40E_TX_DESC(tx_ring, 0);
3397 i = 0;
3398 }
3399
3400 dma += max_data;
3401 size -= max_data;
3402
3403 max_data = I40E_MAX_DATA_PER_TXD_ALIGNED;
3404 tx_desc->buffer_addr = cpu_to_le64(dma);
3405 }
3406
3407 if (likely(!data_len))
3408 break;
3409
3410 tx_desc->cmd_type_offset_bsz = build_ctob(td_cmd, td_offset,
3411 size, td_tag);
3412
3413 tx_desc++;
3414 i++;
3415 desc_count++;
3416
3417 if (i == tx_ring->count) {
3418 tx_desc = I40E_TX_DESC(tx_ring, 0);
3419 i = 0;
3420 }
3421
3422 size = skb_frag_size(frag);
3423 data_len -= size;
3424
3425 dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size,
3426 DMA_TO_DEVICE);
3427
3428 tx_bi = &tx_ring->tx_bi[i];
3429 }
3430
3431 netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount);
3432
3433 i++;
3434 if (i == tx_ring->count)
3435 i = 0;
3436
3437 tx_ring->next_to_use = i;
3438
3439 i40e_maybe_stop_tx(tx_ring, DESC_NEEDED);
3440
3441
3442 td_cmd |= I40E_TX_DESC_CMD_EOP;
3443
3444
3445
3446
3447 desc_count |= ++tx_ring->packet_stride;
3448
3449 if (desc_count >= WB_STRIDE) {
3450
3451 td_cmd |= I40E_TX_DESC_CMD_RS;
3452 tx_ring->packet_stride = 0;
3453 }
3454
3455 tx_desc->cmd_type_offset_bsz =
3456 build_ctob(td_cmd, td_offset, size, td_tag);
3457
3458 skb_tx_timestamp(skb);
3459
3460
3461
3462
3463
3464
3465
3466 wmb();
3467
3468
3469 first->next_to_watch = tx_desc;
3470
3471
3472 if (netif_xmit_stopped(txring_txq(tx_ring)) || !skb->xmit_more) {
3473 writel(i, tx_ring->tail);
3474
3475
3476
3477
3478 mmiowb();
3479 }
3480
3481 return 0;
3482
3483dma_error:
3484 dev_info(tx_ring->dev, "TX DMA map failed\n");
3485
3486
3487 for (;;) {
3488 tx_bi = &tx_ring->tx_bi[i];
3489 i40e_unmap_and_free_tx_resource(tx_ring, tx_bi);
3490 if (tx_bi == first)
3491 break;
3492 if (i == 0)
3493 i = tx_ring->count;
3494 i--;
3495 }
3496
3497 tx_ring->next_to_use = i;
3498
3499 return -1;
3500}
3501
3502
3503
3504
3505
3506
3507static int i40e_xmit_xdp_ring(struct xdp_frame *xdpf,
3508 struct i40e_ring *xdp_ring)
3509{
3510 u16 i = xdp_ring->next_to_use;
3511 struct i40e_tx_buffer *tx_bi;
3512 struct i40e_tx_desc *tx_desc;
3513 void *data = xdpf->data;
3514 u32 size = xdpf->len;
3515 dma_addr_t dma;
3516
3517 if (!unlikely(I40E_DESC_UNUSED(xdp_ring))) {
3518 xdp_ring->tx_stats.tx_busy++;
3519 return I40E_XDP_CONSUMED;
3520 }
3521 dma = dma_map_single(xdp_ring->dev, data, size, DMA_TO_DEVICE);
3522 if (dma_mapping_error(xdp_ring->dev, dma))
3523 return I40E_XDP_CONSUMED;
3524
3525 tx_bi = &xdp_ring->tx_bi[i];
3526 tx_bi->bytecount = size;
3527 tx_bi->gso_segs = 1;
3528 tx_bi->xdpf = xdpf;
3529
3530
3531 dma_unmap_len_set(tx_bi, len, size);
3532 dma_unmap_addr_set(tx_bi, dma, dma);
3533
3534 tx_desc = I40E_TX_DESC(xdp_ring, i);
3535 tx_desc->buffer_addr = cpu_to_le64(dma);
3536 tx_desc->cmd_type_offset_bsz = build_ctob(I40E_TX_DESC_CMD_ICRC
3537 | I40E_TXD_CMD,
3538 0, size, 0);
3539
3540
3541
3542
3543 smp_wmb();
3544
3545 i++;
3546 if (i == xdp_ring->count)
3547 i = 0;
3548
3549 tx_bi->next_to_watch = tx_desc;
3550 xdp_ring->next_to_use = i;
3551
3552 return I40E_XDP_TX;
3553}
3554
3555
3556
3557
3558
3559
3560
3561
3562static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
3563 struct i40e_ring *tx_ring)
3564{
3565 u64 cd_type_cmd_tso_mss = I40E_TX_DESC_DTYPE_CONTEXT;
3566 u32 cd_tunneling = 0, cd_l2tag2 = 0;
3567 struct i40e_tx_buffer *first;
3568 u32 td_offset = 0;
3569 u32 tx_flags = 0;
3570 __be16 protocol;
3571 u32 td_cmd = 0;
3572 u8 hdr_len = 0;
3573 int tso, count;
3574 int tsyn;
3575
3576
3577 prefetch(skb->data);
3578
3579 i40e_trace(xmit_frame_ring, skb, tx_ring);
3580
3581 count = i40e_xmit_descriptor_count(skb);
3582 if (i40e_chk_linearize(skb, count)) {
3583 if (__skb_linearize(skb)) {
3584 dev_kfree_skb_any(skb);
3585 return NETDEV_TX_OK;
3586 }
3587 count = i40e_txd_use_count(skb->len);
3588 tx_ring->tx_stats.tx_linearize++;
3589 }
3590
3591
3592
3593
3594
3595
3596
3597 if (i40e_maybe_stop_tx(tx_ring, count + 4 + 1)) {
3598 tx_ring->tx_stats.tx_busy++;
3599 return NETDEV_TX_BUSY;
3600 }
3601
3602
3603 first = &tx_ring->tx_bi[tx_ring->next_to_use];
3604 first->skb = skb;
3605 first->bytecount = skb->len;
3606 first->gso_segs = 1;
3607
3608
3609 if (i40e_tx_prepare_vlan_flags(skb, tx_ring, &tx_flags))
3610 goto out_drop;
3611
3612
3613 protocol = vlan_get_protocol(skb);
3614
3615
3616 if (protocol == htons(ETH_P_IP))
3617 tx_flags |= I40E_TX_FLAGS_IPV4;
3618 else if (protocol == htons(ETH_P_IPV6))
3619 tx_flags |= I40E_TX_FLAGS_IPV6;
3620
3621 tso = i40e_tso(first, &hdr_len, &cd_type_cmd_tso_mss);
3622
3623 if (tso < 0)
3624 goto out_drop;
3625 else if (tso)
3626 tx_flags |= I40E_TX_FLAGS_TSO;
3627
3628
3629 tso = i40e_tx_enable_csum(skb, &tx_flags, &td_cmd, &td_offset,
3630 tx_ring, &cd_tunneling);
3631 if (tso < 0)
3632 goto out_drop;
3633
3634 tsyn = i40e_tsyn(tx_ring, skb, tx_flags, &cd_type_cmd_tso_mss);
3635
3636 if (tsyn)
3637 tx_flags |= I40E_TX_FLAGS_TSYN;
3638
3639
3640 td_cmd |= I40E_TX_DESC_CMD_ICRC;
3641
3642 i40e_create_tx_ctx(tx_ring, cd_type_cmd_tso_mss,
3643 cd_tunneling, cd_l2tag2);
3644
3645
3646
3647
3648
3649 i40e_atr(tx_ring, skb, tx_flags);
3650
3651 if (i40e_tx_map(tx_ring, skb, first, tx_flags, hdr_len,
3652 td_cmd, td_offset))
3653 goto cleanup_tx_tstamp;
3654
3655 return NETDEV_TX_OK;
3656
3657out_drop:
3658 i40e_trace(xmit_frame_ring_drop, first->skb, tx_ring);
3659 dev_kfree_skb_any(first->skb);
3660 first->skb = NULL;
3661cleanup_tx_tstamp:
3662 if (unlikely(tx_flags & I40E_TX_FLAGS_TSYN)) {
3663 struct i40e_pf *pf = i40e_netdev_to_pf(tx_ring->netdev);
3664
3665 dev_kfree_skb_any(pf->ptp_tx_skb);
3666 pf->ptp_tx_skb = NULL;
3667 clear_bit_unlock(__I40E_PTP_TX_IN_PROGRESS, pf->state);
3668 }
3669
3670 return NETDEV_TX_OK;
3671}
3672
3673
3674
3675
3676
3677
3678
3679
3680netdev_tx_t i40e_lan_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
3681{
3682 struct i40e_netdev_priv *np = netdev_priv(netdev);
3683 struct i40e_vsi *vsi = np->vsi;
3684 struct i40e_ring *tx_ring = vsi->tx_rings[skb->queue_mapping];
3685
3686
3687
3688
3689 if (skb_put_padto(skb, I40E_MIN_TX_LEN))
3690 return NETDEV_TX_OK;
3691
3692 return i40e_xmit_frame_ring(skb, tx_ring);
3693}
3694
3695
3696
3697
3698
3699
3700
3701
3702
3703
3704
3705
3706int i40e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
3707 u32 flags)
3708{
3709 struct i40e_netdev_priv *np = netdev_priv(dev);
3710 unsigned int queue_index = smp_processor_id();
3711 struct i40e_vsi *vsi = np->vsi;
3712 struct i40e_pf *pf = vsi->back;
3713 struct i40e_ring *xdp_ring;
3714 int drops = 0;
3715 int i;
3716
3717 if (test_bit(__I40E_VSI_DOWN, vsi->state))
3718 return -ENETDOWN;
3719
3720 if (!i40e_enabled_xdp_vsi(vsi) || queue_index >= vsi->num_queue_pairs ||
3721 test_bit(__I40E_CONFIG_BUSY, pf->state))
3722 return -ENXIO;
3723
3724 if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
3725 return -EINVAL;
3726
3727 xdp_ring = vsi->xdp_rings[queue_index];
3728
3729 for (i = 0; i < n; i++) {
3730 struct xdp_frame *xdpf = frames[i];
3731 int err;
3732
3733 err = i40e_xmit_xdp_ring(xdpf, xdp_ring);
3734 if (err != I40E_XDP_TX) {
3735 xdp_return_frame_rx_napi(xdpf);
3736 drops++;
3737 }
3738 }
3739
3740 if (unlikely(flags & XDP_XMIT_FLUSH))
3741 i40e_xdp_ring_update_tail(xdp_ring);
3742
3743 return n - drops;
3744}
3745