1
2
3
4#include <linux/bpf_trace.h>
5#include <net/xdp_sock.h>
6#include <net/xdp.h>
7
8#include "i40e.h"
9#include "i40e_txrx_common.h"
10#include "i40e_xsk.h"
11
12
13
14
15
16
17
18
19static int i40e_xsk_umem_dma_map(struct i40e_vsi *vsi, struct xdp_umem *umem)
20{
21 struct i40e_pf *pf = vsi->back;
22 struct device *dev;
23 unsigned int i, j;
24 dma_addr_t dma;
25
26 dev = &pf->pdev->dev;
27 for (i = 0; i < umem->npgs; i++) {
28 dma = dma_map_page_attrs(dev, umem->pgs[i], 0, PAGE_SIZE,
29 DMA_BIDIRECTIONAL, I40E_RX_DMA_ATTR);
30 if (dma_mapping_error(dev, dma))
31 goto out_unmap;
32
33 umem->pages[i].dma = dma;
34 }
35
36 return 0;
37
38out_unmap:
39 for (j = 0; j < i; j++) {
40 dma_unmap_page_attrs(dev, umem->pages[i].dma, PAGE_SIZE,
41 DMA_BIDIRECTIONAL, I40E_RX_DMA_ATTR);
42 umem->pages[i].dma = 0;
43 }
44
45 return -1;
46}
47
48
49
50
51
52
53static void i40e_xsk_umem_dma_unmap(struct i40e_vsi *vsi, struct xdp_umem *umem)
54{
55 struct i40e_pf *pf = vsi->back;
56 struct device *dev;
57 unsigned int i;
58
59 dev = &pf->pdev->dev;
60
61 for (i = 0; i < umem->npgs; i++) {
62 dma_unmap_page_attrs(dev, umem->pages[i].dma, PAGE_SIZE,
63 DMA_BIDIRECTIONAL, I40E_RX_DMA_ATTR);
64
65 umem->pages[i].dma = 0;
66 }
67}
68
69
70
71
72
73
74
75
76
77static int i40e_xsk_umem_enable(struct i40e_vsi *vsi, struct xdp_umem *umem,
78 u16 qid)
79{
80 struct net_device *netdev = vsi->netdev;
81 struct xdp_umem_fq_reuse *reuseq;
82 bool if_running;
83 int err;
84
85 if (vsi->type != I40E_VSI_MAIN)
86 return -EINVAL;
87
88 if (qid >= vsi->num_queue_pairs)
89 return -EINVAL;
90
91 if (qid >= netdev->real_num_rx_queues ||
92 qid >= netdev->real_num_tx_queues)
93 return -EINVAL;
94
95 reuseq = xsk_reuseq_prepare(vsi->rx_rings[0]->count);
96 if (!reuseq)
97 return -ENOMEM;
98
99 xsk_reuseq_free(xsk_reuseq_swap(umem, reuseq));
100
101 err = i40e_xsk_umem_dma_map(vsi, umem);
102 if (err)
103 return err;
104
105 set_bit(qid, vsi->af_xdp_zc_qps);
106
107 if_running = netif_running(vsi->netdev) && i40e_enabled_xdp_vsi(vsi);
108
109 if (if_running) {
110 err = i40e_queue_pair_disable(vsi, qid);
111 if (err)
112 return err;
113
114 err = i40e_queue_pair_enable(vsi, qid);
115 if (err)
116 return err;
117
118
119 err = i40e_xsk_async_xmit(vsi->netdev, qid);
120 if (err)
121 return err;
122 }
123
124 return 0;
125}
126
127
128
129
130
131
132
133
134static int i40e_xsk_umem_disable(struct i40e_vsi *vsi, u16 qid)
135{
136 struct net_device *netdev = vsi->netdev;
137 struct xdp_umem *umem;
138 bool if_running;
139 int err;
140
141 umem = xdp_get_umem_from_qid(netdev, qid);
142 if (!umem)
143 return -EINVAL;
144
145 if_running = netif_running(vsi->netdev) && i40e_enabled_xdp_vsi(vsi);
146
147 if (if_running) {
148 err = i40e_queue_pair_disable(vsi, qid);
149 if (err)
150 return err;
151 }
152
153 clear_bit(qid, vsi->af_xdp_zc_qps);
154 i40e_xsk_umem_dma_unmap(vsi, umem);
155
156 if (if_running) {
157 err = i40e_queue_pair_enable(vsi, qid);
158 if (err)
159 return err;
160 }
161
162 return 0;
163}
164
165
166
167
168
169
170
171
172
173
174
175int i40e_xsk_umem_setup(struct i40e_vsi *vsi, struct xdp_umem *umem,
176 u16 qid)
177{
178 return umem ? i40e_xsk_umem_enable(vsi, umem, qid) :
179 i40e_xsk_umem_disable(vsi, qid);
180}
181
182
183
184
185
186
187
188
189
190
191static int i40e_run_xdp_zc(struct i40e_ring *rx_ring, struct xdp_buff *xdp)
192{
193 int err, result = I40E_XDP_PASS;
194 struct i40e_ring *xdp_ring;
195 struct bpf_prog *xdp_prog;
196 u32 act;
197
198 rcu_read_lock();
199
200
201
202 xdp_prog = READ_ONCE(rx_ring->xdp_prog);
203 act = bpf_prog_run_xdp(xdp_prog, xdp);
204 xdp->handle += xdp->data - xdp->data_hard_start;
205 switch (act) {
206 case XDP_PASS:
207 break;
208 case XDP_TX:
209 xdp_ring = rx_ring->vsi->xdp_rings[rx_ring->queue_index];
210 result = i40e_xmit_xdp_tx_ring(xdp, xdp_ring);
211 break;
212 case XDP_REDIRECT:
213 err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog);
214 result = !err ? I40E_XDP_REDIR : I40E_XDP_CONSUMED;
215 break;
216 default:
217 bpf_warn_invalid_xdp_action(act);
218
219 case XDP_ABORTED:
220 trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
221
222 case XDP_DROP:
223 result = I40E_XDP_CONSUMED;
224 break;
225 }
226 rcu_read_unlock();
227 return result;
228}
229
230
231
232
233
234
235
236
237
238
239
240static bool i40e_alloc_buffer_zc(struct i40e_ring *rx_ring,
241 struct i40e_rx_buffer *bi)
242{
243 struct xdp_umem *umem = rx_ring->xsk_umem;
244 void *addr = bi->addr;
245 u64 handle, hr;
246
247 if (addr) {
248 rx_ring->rx_stats.page_reuse_count++;
249 return true;
250 }
251
252 if (!xsk_umem_peek_addr(umem, &handle)) {
253 rx_ring->rx_stats.alloc_page_failed++;
254 return false;
255 }
256
257 hr = umem->headroom + XDP_PACKET_HEADROOM;
258
259 bi->dma = xdp_umem_get_dma(umem, handle);
260 bi->dma += hr;
261
262 bi->addr = xdp_umem_get_data(umem, handle);
263 bi->addr += hr;
264
265 bi->handle = handle + umem->headroom;
266
267 xsk_umem_discard_addr(umem);
268 return true;
269}
270
271
272
273
274
275
276
277
278
279
280
281static bool i40e_alloc_buffer_slow_zc(struct i40e_ring *rx_ring,
282 struct i40e_rx_buffer *bi)
283{
284 struct xdp_umem *umem = rx_ring->xsk_umem;
285 u64 handle, hr;
286
287 if (!xsk_umem_peek_addr_rq(umem, &handle)) {
288 rx_ring->rx_stats.alloc_page_failed++;
289 return false;
290 }
291
292 handle &= rx_ring->xsk_umem->chunk_mask;
293
294 hr = umem->headroom + XDP_PACKET_HEADROOM;
295
296 bi->dma = xdp_umem_get_dma(umem, handle);
297 bi->dma += hr;
298
299 bi->addr = xdp_umem_get_data(umem, handle);
300 bi->addr += hr;
301
302 bi->handle = handle + umem->headroom;
303
304 xsk_umem_discard_addr_rq(umem);
305 return true;
306}
307
308static __always_inline bool
309__i40e_alloc_rx_buffers_zc(struct i40e_ring *rx_ring, u16 count,
310 bool alloc(struct i40e_ring *rx_ring,
311 struct i40e_rx_buffer *bi))
312{
313 u16 ntu = rx_ring->next_to_use;
314 union i40e_rx_desc *rx_desc;
315 struct i40e_rx_buffer *bi;
316 bool ok = true;
317
318 rx_desc = I40E_RX_DESC(rx_ring, ntu);
319 bi = &rx_ring->rx_bi[ntu];
320 do {
321 if (!alloc(rx_ring, bi)) {
322 ok = false;
323 goto no_buffers;
324 }
325
326 dma_sync_single_range_for_device(rx_ring->dev, bi->dma, 0,
327 rx_ring->rx_buf_len,
328 DMA_BIDIRECTIONAL);
329
330 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma);
331
332 rx_desc++;
333 bi++;
334 ntu++;
335
336 if (unlikely(ntu == rx_ring->count)) {
337 rx_desc = I40E_RX_DESC(rx_ring, 0);
338 bi = rx_ring->rx_bi;
339 ntu = 0;
340 }
341
342 rx_desc->wb.qword1.status_error_len = 0;
343 count--;
344 } while (count);
345
346no_buffers:
347 if (rx_ring->next_to_use != ntu)
348 i40e_release_rx_desc(rx_ring, ntu);
349
350 return ok;
351}
352
353
354
355
356
357
358
359
360
361
362
363bool i40e_alloc_rx_buffers_zc(struct i40e_ring *rx_ring, u16 count)
364{
365 return __i40e_alloc_rx_buffers_zc(rx_ring, count,
366 i40e_alloc_buffer_slow_zc);
367}
368
369
370
371
372
373
374
375
376
377
378
379static bool i40e_alloc_rx_buffers_fast_zc(struct i40e_ring *rx_ring, u16 count)
380{
381 return __i40e_alloc_rx_buffers_zc(rx_ring, count,
382 i40e_alloc_buffer_zc);
383}
384
385
386
387
388
389
390
391
392
393
394
395static struct i40e_rx_buffer *i40e_get_rx_buffer_zc(struct i40e_ring *rx_ring,
396 const unsigned int size)
397{
398 struct i40e_rx_buffer *bi;
399
400 bi = &rx_ring->rx_bi[rx_ring->next_to_clean];
401
402
403 dma_sync_single_range_for_cpu(rx_ring->dev,
404 bi->dma, 0,
405 size,
406 DMA_BIDIRECTIONAL);
407
408 return bi;
409}
410
411
412
413
414
415
416
417
418
419static void i40e_reuse_rx_buffer_zc(struct i40e_ring *rx_ring,
420 struct i40e_rx_buffer *old_bi)
421{
422 struct i40e_rx_buffer *new_bi = &rx_ring->rx_bi[rx_ring->next_to_alloc];
423 unsigned long mask = (unsigned long)rx_ring->xsk_umem->chunk_mask;
424 u64 hr = rx_ring->xsk_umem->headroom + XDP_PACKET_HEADROOM;
425 u16 nta = rx_ring->next_to_alloc;
426
427
428 nta++;
429 rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
430
431
432 new_bi->dma = old_bi->dma & mask;
433 new_bi->dma += hr;
434
435 new_bi->addr = (void *)((unsigned long)old_bi->addr & mask);
436 new_bi->addr += hr;
437
438 new_bi->handle = old_bi->handle & mask;
439 new_bi->handle += rx_ring->xsk_umem->headroom;
440
441 old_bi->addr = NULL;
442}
443
444
445
446
447
448
449void i40e_zca_free(struct zero_copy_allocator *alloc, unsigned long handle)
450{
451 struct i40e_rx_buffer *bi;
452 struct i40e_ring *rx_ring;
453 u64 hr, mask;
454 u16 nta;
455
456 rx_ring = container_of(alloc, struct i40e_ring, zca);
457 hr = rx_ring->xsk_umem->headroom + XDP_PACKET_HEADROOM;
458 mask = rx_ring->xsk_umem->chunk_mask;
459
460 nta = rx_ring->next_to_alloc;
461 bi = &rx_ring->rx_bi[nta];
462
463 nta++;
464 rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
465
466 handle &= mask;
467
468 bi->dma = xdp_umem_get_dma(rx_ring->xsk_umem, handle);
469 bi->dma += hr;
470
471 bi->addr = xdp_umem_get_data(rx_ring->xsk_umem, handle);
472 bi->addr += hr;
473
474 bi->handle = (u64)handle + rx_ring->xsk_umem->headroom;
475}
476
477
478
479
480
481
482
483
484
485
486
487static struct sk_buff *i40e_construct_skb_zc(struct i40e_ring *rx_ring,
488 struct i40e_rx_buffer *bi,
489 struct xdp_buff *xdp)
490{
491 unsigned int metasize = xdp->data - xdp->data_meta;
492 unsigned int datasize = xdp->data_end - xdp->data;
493 struct sk_buff *skb;
494
495
496 skb = __napi_alloc_skb(&rx_ring->q_vector->napi,
497 xdp->data_end - xdp->data_hard_start,
498 GFP_ATOMIC | __GFP_NOWARN);
499 if (unlikely(!skb))
500 return NULL;
501
502 skb_reserve(skb, xdp->data - xdp->data_hard_start);
503 memcpy(__skb_put(skb, datasize), xdp->data, datasize);
504 if (metasize)
505 skb_metadata_set(skb, metasize);
506
507 i40e_reuse_rx_buffer_zc(rx_ring, bi);
508 return skb;
509}
510
511
512
513
514
515static void i40e_inc_ntc(struct i40e_ring *rx_ring)
516{
517 u32 ntc = rx_ring->next_to_clean + 1;
518
519 ntc = (ntc < rx_ring->count) ? ntc : 0;
520 rx_ring->next_to_clean = ntc;
521 prefetch(I40E_RX_DESC(rx_ring, ntc));
522}
523
524
525
526
527
528
529
530
531int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget)
532{
533 unsigned int total_rx_bytes = 0, total_rx_packets = 0;
534 u16 cleaned_count = I40E_DESC_UNUSED(rx_ring);
535 unsigned int xdp_res, xdp_xmit = 0;
536 bool failure = false;
537 struct sk_buff *skb;
538 struct xdp_buff xdp;
539
540 xdp.rxq = &rx_ring->xdp_rxq;
541
542 while (likely(total_rx_packets < (unsigned int)budget)) {
543 struct i40e_rx_buffer *bi;
544 union i40e_rx_desc *rx_desc;
545 unsigned int size;
546 u64 qword;
547
548 if (cleaned_count >= I40E_RX_BUFFER_WRITE) {
549 failure = failure ||
550 !i40e_alloc_rx_buffers_fast_zc(rx_ring,
551 cleaned_count);
552 cleaned_count = 0;
553 }
554
555 rx_desc = I40E_RX_DESC(rx_ring, rx_ring->next_to_clean);
556 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
557
558
559
560
561
562 dma_rmb();
563
564 bi = i40e_clean_programming_status(rx_ring, rx_desc,
565 qword);
566 if (unlikely(bi)) {
567 i40e_reuse_rx_buffer_zc(rx_ring, bi);
568 cleaned_count++;
569 continue;
570 }
571
572 size = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >>
573 I40E_RXD_QW1_LENGTH_PBUF_SHIFT;
574 if (!size)
575 break;
576
577 bi = i40e_get_rx_buffer_zc(rx_ring, size);
578 xdp.data = bi->addr;
579 xdp.data_meta = xdp.data;
580 xdp.data_hard_start = xdp.data - XDP_PACKET_HEADROOM;
581 xdp.data_end = xdp.data + size;
582 xdp.handle = bi->handle;
583
584 xdp_res = i40e_run_xdp_zc(rx_ring, &xdp);
585 if (xdp_res) {
586 if (xdp_res & (I40E_XDP_TX | I40E_XDP_REDIR)) {
587 xdp_xmit |= xdp_res;
588 bi->addr = NULL;
589 } else {
590 i40e_reuse_rx_buffer_zc(rx_ring, bi);
591 }
592
593 total_rx_bytes += size;
594 total_rx_packets++;
595
596 cleaned_count++;
597 i40e_inc_ntc(rx_ring);
598 continue;
599 }
600
601
602
603
604
605
606
607
608 skb = i40e_construct_skb_zc(rx_ring, bi, &xdp);
609 if (!skb) {
610 rx_ring->rx_stats.alloc_buff_failed++;
611 break;
612 }
613
614 cleaned_count++;
615 i40e_inc_ntc(rx_ring);
616
617 if (eth_skb_pad(skb))
618 continue;
619
620 total_rx_bytes += skb->len;
621 total_rx_packets++;
622
623 i40e_process_skb_fields(rx_ring, rx_desc, skb);
624 napi_gro_receive(&rx_ring->q_vector->napi, skb);
625 }
626
627 i40e_finalize_xdp_rx(rx_ring, xdp_xmit);
628 i40e_update_rx_stats(rx_ring, total_rx_bytes, total_rx_packets);
629 return failure ? budget : (int)total_rx_packets;
630}
631
632
633
634
635
636
637
638
639static bool i40e_xmit_zc(struct i40e_ring *xdp_ring, unsigned int budget)
640{
641 struct i40e_tx_desc *tx_desc = NULL;
642 struct i40e_tx_buffer *tx_bi;
643 bool work_done = true;
644 struct xdp_desc desc;
645 dma_addr_t dma;
646
647 while (budget-- > 0) {
648 if (!unlikely(I40E_DESC_UNUSED(xdp_ring))) {
649 xdp_ring->tx_stats.tx_busy++;
650 work_done = false;
651 break;
652 }
653
654 if (!xsk_umem_consume_tx(xdp_ring->xsk_umem, &desc))
655 break;
656
657 dma = xdp_umem_get_dma(xdp_ring->xsk_umem, desc.addr);
658
659 dma_sync_single_for_device(xdp_ring->dev, dma, desc.len,
660 DMA_BIDIRECTIONAL);
661
662 tx_bi = &xdp_ring->tx_bi[xdp_ring->next_to_use];
663 tx_bi->bytecount = desc.len;
664
665 tx_desc = I40E_TX_DESC(xdp_ring, xdp_ring->next_to_use);
666 tx_desc->buffer_addr = cpu_to_le64(dma);
667 tx_desc->cmd_type_offset_bsz =
668 build_ctob(I40E_TX_DESC_CMD_ICRC
669 | I40E_TX_DESC_CMD_EOP,
670 0, desc.len, 0);
671
672 xdp_ring->next_to_use++;
673 if (xdp_ring->next_to_use == xdp_ring->count)
674 xdp_ring->next_to_use = 0;
675 }
676
677 if (tx_desc) {
678
679 tx_desc->cmd_type_offset_bsz |= (I40E_TX_DESC_CMD_RS <<
680 I40E_TXD_QW1_CMD_SHIFT);
681 i40e_xdp_ring_update_tail(xdp_ring);
682
683 xsk_umem_consume_tx_done(xdp_ring->xsk_umem);
684 }
685
686 return !!budget && work_done;
687}
688
689
690
691
692
693
694static void i40e_clean_xdp_tx_buffer(struct i40e_ring *tx_ring,
695 struct i40e_tx_buffer *tx_bi)
696{
697 xdp_return_frame(tx_bi->xdpf);
698 dma_unmap_single(tx_ring->dev,
699 dma_unmap_addr(tx_bi, dma),
700 dma_unmap_len(tx_bi, len), DMA_TO_DEVICE);
701 dma_unmap_len_set(tx_bi, len, 0);
702}
703
704
705
706
707
708
709
710
711bool i40e_clean_xdp_tx_irq(struct i40e_vsi *vsi,
712 struct i40e_ring *tx_ring, int napi_budget)
713{
714 unsigned int ntc, total_bytes = 0, budget = vsi->work_limit;
715 u32 i, completed_frames, frames_ready, xsk_frames = 0;
716 struct xdp_umem *umem = tx_ring->xsk_umem;
717 u32 head_idx = i40e_get_head(tx_ring);
718 bool work_done = true, xmit_done;
719 struct i40e_tx_buffer *tx_bi;
720
721 if (head_idx < tx_ring->next_to_clean)
722 head_idx += tx_ring->count;
723 frames_ready = head_idx - tx_ring->next_to_clean;
724
725 if (frames_ready == 0) {
726 goto out_xmit;
727 } else if (frames_ready > budget) {
728 completed_frames = budget;
729 work_done = false;
730 } else {
731 completed_frames = frames_ready;
732 }
733
734 ntc = tx_ring->next_to_clean;
735
736 for (i = 0; i < completed_frames; i++) {
737 tx_bi = &tx_ring->tx_bi[ntc];
738
739 if (tx_bi->xdpf)
740 i40e_clean_xdp_tx_buffer(tx_ring, tx_bi);
741 else
742 xsk_frames++;
743
744 tx_bi->xdpf = NULL;
745 total_bytes += tx_bi->bytecount;
746
747 if (++ntc >= tx_ring->count)
748 ntc = 0;
749 }
750
751 tx_ring->next_to_clean += completed_frames;
752 if (unlikely(tx_ring->next_to_clean >= tx_ring->count))
753 tx_ring->next_to_clean -= tx_ring->count;
754
755 if (xsk_frames)
756 xsk_umem_complete_tx(umem, xsk_frames);
757
758 i40e_arm_wb(tx_ring, vsi, budget);
759 i40e_update_tx_stats(tx_ring, completed_frames, total_bytes);
760
761out_xmit:
762 xmit_done = i40e_xmit_zc(tx_ring, budget);
763
764 return work_done && xmit_done;
765}
766
767
768
769
770
771
772
773
774int i40e_xsk_async_xmit(struct net_device *dev, u32 queue_id)
775{
776 struct i40e_netdev_priv *np = netdev_priv(dev);
777 struct i40e_vsi *vsi = np->vsi;
778 struct i40e_ring *ring;
779
780 if (test_bit(__I40E_VSI_DOWN, vsi->state))
781 return -ENETDOWN;
782
783 if (!i40e_enabled_xdp_vsi(vsi))
784 return -ENXIO;
785
786 if (queue_id >= vsi->num_queue_pairs)
787 return -ENXIO;
788
789 if (!vsi->xdp_rings[queue_id]->xsk_umem)
790 return -ENXIO;
791
792 ring = vsi->xdp_rings[queue_id];
793
794
795
796
797
798
799
800 if (!napi_if_scheduled_mark_missed(&ring->q_vector->napi))
801 i40e_force_wb(vsi, ring->q_vector);
802
803 return 0;
804}
805
806void i40e_xsk_clean_rx_ring(struct i40e_ring *rx_ring)
807{
808 u16 i;
809
810 for (i = 0; i < rx_ring->count; i++) {
811 struct i40e_rx_buffer *rx_bi = &rx_ring->rx_bi[i];
812
813 if (!rx_bi->addr)
814 continue;
815
816 xsk_umem_fq_reuse(rx_ring->xsk_umem, rx_bi->handle);
817 rx_bi->addr = NULL;
818 }
819}
820
821
822
823
824
825void i40e_xsk_clean_tx_ring(struct i40e_ring *tx_ring)
826{
827 u16 ntc = tx_ring->next_to_clean, ntu = tx_ring->next_to_use;
828 struct xdp_umem *umem = tx_ring->xsk_umem;
829 struct i40e_tx_buffer *tx_bi;
830 u32 xsk_frames = 0;
831
832 while (ntc != ntu) {
833 tx_bi = &tx_ring->tx_bi[ntc];
834
835 if (tx_bi->xdpf)
836 i40e_clean_xdp_tx_buffer(tx_ring, tx_bi);
837 else
838 xsk_frames++;
839
840 tx_bi->xdpf = NULL;
841
842 ntc++;
843 if (ntc >= tx_ring->count)
844 ntc = 0;
845 }
846
847 if (xsk_frames)
848 xsk_umem_complete_tx(umem, xsk_frames);
849}
850
851
852
853
854
855
856
857bool i40e_xsk_any_rx_ring_enabled(struct i40e_vsi *vsi)
858{
859 struct net_device *netdev = vsi->netdev;
860 int i;
861
862 for (i = 0; i < vsi->num_queue_pairs; i++) {
863 if (xdp_get_umem_from_qid(netdev, i))
864 return true;
865 }
866
867 return false;
868}
869