1
2
3
4#include <linux/bpf_trace.h>
5#include <net/xdp_sock.h>
6#include <net/xdp.h>
7
8#include "ixgbe.h"
9#include "ixgbe_txrx_common.h"
10
11struct xdp_umem *ixgbe_xsk_umem(struct ixgbe_adapter *adapter,
12 struct ixgbe_ring *ring)
13{
14 bool xdp_on = READ_ONCE(adapter->xdp_prog);
15 int qid = ring->ring_idx;
16
17 if (!adapter->xsk_umems || !adapter->xsk_umems[qid] ||
18 qid >= adapter->num_xsk_umems || !xdp_on)
19 return NULL;
20
21 return adapter->xsk_umems[qid];
22}
23
24static int ixgbe_alloc_xsk_umems(struct ixgbe_adapter *adapter)
25{
26 if (adapter->xsk_umems)
27 return 0;
28
29 adapter->num_xsk_umems_used = 0;
30 adapter->num_xsk_umems = adapter->num_rx_queues;
31 adapter->xsk_umems = kcalloc(adapter->num_xsk_umems,
32 sizeof(*adapter->xsk_umems),
33 GFP_KERNEL);
34 if (!adapter->xsk_umems) {
35 adapter->num_xsk_umems = 0;
36 return -ENOMEM;
37 }
38
39 return 0;
40}
41
42static int ixgbe_add_xsk_umem(struct ixgbe_adapter *adapter,
43 struct xdp_umem *umem,
44 u16 qid)
45{
46 int err;
47
48 err = ixgbe_alloc_xsk_umems(adapter);
49 if (err)
50 return err;
51
52 adapter->xsk_umems[qid] = umem;
53 adapter->num_xsk_umems_used++;
54
55 return 0;
56}
57
58static void ixgbe_remove_xsk_umem(struct ixgbe_adapter *adapter, u16 qid)
59{
60 adapter->xsk_umems[qid] = NULL;
61 adapter->num_xsk_umems_used--;
62
63 if (adapter->num_xsk_umems == 0) {
64 kfree(adapter->xsk_umems);
65 adapter->xsk_umems = NULL;
66 adapter->num_xsk_umems = 0;
67 }
68}
69
70static int ixgbe_xsk_umem_dma_map(struct ixgbe_adapter *adapter,
71 struct xdp_umem *umem)
72{
73 struct device *dev = &adapter->pdev->dev;
74 unsigned int i, j;
75 dma_addr_t dma;
76
77 for (i = 0; i < umem->npgs; i++) {
78 dma = dma_map_page_attrs(dev, umem->pgs[i], 0, PAGE_SIZE,
79 DMA_BIDIRECTIONAL, IXGBE_RX_DMA_ATTR);
80 if (dma_mapping_error(dev, dma))
81 goto out_unmap;
82
83 umem->pages[i].dma = dma;
84 }
85
86 return 0;
87
88out_unmap:
89 for (j = 0; j < i; j++) {
90 dma_unmap_page_attrs(dev, umem->pages[i].dma, PAGE_SIZE,
91 DMA_BIDIRECTIONAL, IXGBE_RX_DMA_ATTR);
92 umem->pages[i].dma = 0;
93 }
94
95 return -1;
96}
97
98static void ixgbe_xsk_umem_dma_unmap(struct ixgbe_adapter *adapter,
99 struct xdp_umem *umem)
100{
101 struct device *dev = &adapter->pdev->dev;
102 unsigned int i;
103
104 for (i = 0; i < umem->npgs; i++) {
105 dma_unmap_page_attrs(dev, umem->pages[i].dma, PAGE_SIZE,
106 DMA_BIDIRECTIONAL, IXGBE_RX_DMA_ATTR);
107
108 umem->pages[i].dma = 0;
109 }
110}
111
112static int ixgbe_xsk_umem_enable(struct ixgbe_adapter *adapter,
113 struct xdp_umem *umem,
114 u16 qid)
115{
116 struct xdp_umem_fq_reuse *reuseq;
117 bool if_running;
118 int err;
119
120 if (qid >= adapter->num_rx_queues)
121 return -EINVAL;
122
123 if (adapter->xsk_umems) {
124 if (qid >= adapter->num_xsk_umems)
125 return -EINVAL;
126 if (adapter->xsk_umems[qid])
127 return -EBUSY;
128 }
129
130 reuseq = xsk_reuseq_prepare(adapter->rx_ring[0]->count);
131 if (!reuseq)
132 return -ENOMEM;
133
134 xsk_reuseq_free(xsk_reuseq_swap(umem, reuseq));
135
136 err = ixgbe_xsk_umem_dma_map(adapter, umem);
137 if (err)
138 return err;
139
140 if_running = netif_running(adapter->netdev) &&
141 READ_ONCE(adapter->xdp_prog);
142
143 if (if_running)
144 ixgbe_txrx_ring_disable(adapter, qid);
145
146 err = ixgbe_add_xsk_umem(adapter, umem, qid);
147 if (err)
148 return err;
149
150 if (if_running) {
151 ixgbe_txrx_ring_enable(adapter, qid);
152
153
154 err = ixgbe_xsk_async_xmit(adapter->netdev, qid);
155 if (err)
156 return err;
157 }
158
159 return 0;
160}
161
162static int ixgbe_xsk_umem_disable(struct ixgbe_adapter *adapter, u16 qid)
163{
164 bool if_running;
165
166 if (!adapter->xsk_umems || qid >= adapter->num_xsk_umems ||
167 !adapter->xsk_umems[qid])
168 return -EINVAL;
169
170 if_running = netif_running(adapter->netdev) &&
171 READ_ONCE(adapter->xdp_prog);
172
173 if (if_running)
174 ixgbe_txrx_ring_disable(adapter, qid);
175
176 ixgbe_xsk_umem_dma_unmap(adapter, adapter->xsk_umems[qid]);
177 ixgbe_remove_xsk_umem(adapter, qid);
178
179 if (if_running)
180 ixgbe_txrx_ring_enable(adapter, qid);
181
182 return 0;
183}
184
185int ixgbe_xsk_umem_setup(struct ixgbe_adapter *adapter, struct xdp_umem *umem,
186 u16 qid)
187{
188 return umem ? ixgbe_xsk_umem_enable(adapter, umem, qid) :
189 ixgbe_xsk_umem_disable(adapter, qid);
190}
191
192static int ixgbe_run_xdp_zc(struct ixgbe_adapter *adapter,
193 struct ixgbe_ring *rx_ring,
194 struct xdp_buff *xdp)
195{
196 int err, result = IXGBE_XDP_PASS;
197 struct bpf_prog *xdp_prog;
198 struct xdp_frame *xdpf;
199 u32 act;
200
201 rcu_read_lock();
202 xdp_prog = READ_ONCE(rx_ring->xdp_prog);
203 act = bpf_prog_run_xdp(xdp_prog, xdp);
204 xdp->handle += xdp->data - xdp->data_hard_start;
205 switch (act) {
206 case XDP_PASS:
207 break;
208 case XDP_TX:
209 xdpf = convert_to_xdp_frame(xdp);
210 if (unlikely(!xdpf)) {
211 result = IXGBE_XDP_CONSUMED;
212 break;
213 }
214 result = ixgbe_xmit_xdp_ring(adapter, xdpf);
215 break;
216 case XDP_REDIRECT:
217 err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog);
218 result = !err ? IXGBE_XDP_REDIR : IXGBE_XDP_CONSUMED;
219 break;
220 default:
221 bpf_warn_invalid_xdp_action(act);
222
223 case XDP_ABORTED:
224 trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
225
226 case XDP_DROP:
227 result = IXGBE_XDP_CONSUMED;
228 break;
229 }
230 rcu_read_unlock();
231 return result;
232}
233
234static struct
235ixgbe_rx_buffer *ixgbe_get_rx_buffer_zc(struct ixgbe_ring *rx_ring,
236 unsigned int size)
237{
238 struct ixgbe_rx_buffer *bi;
239
240 bi = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
241
242
243 dma_sync_single_range_for_cpu(rx_ring->dev,
244 bi->dma, 0,
245 size,
246 DMA_BIDIRECTIONAL);
247
248 return bi;
249}
250
251static void ixgbe_reuse_rx_buffer_zc(struct ixgbe_ring *rx_ring,
252 struct ixgbe_rx_buffer *obi)
253{
254 unsigned long mask = (unsigned long)rx_ring->xsk_umem->chunk_mask;
255 u64 hr = rx_ring->xsk_umem->headroom + XDP_PACKET_HEADROOM;
256 u16 nta = rx_ring->next_to_alloc;
257 struct ixgbe_rx_buffer *nbi;
258
259 nbi = &rx_ring->rx_buffer_info[rx_ring->next_to_alloc];
260
261 nta++;
262 rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
263
264
265 nbi->dma = obi->dma & mask;
266 nbi->dma += hr;
267
268 nbi->addr = (void *)((unsigned long)obi->addr & mask);
269 nbi->addr += hr;
270
271 nbi->handle = obi->handle & mask;
272 nbi->handle += rx_ring->xsk_umem->headroom;
273
274 obi->addr = NULL;
275 obi->skb = NULL;
276}
277
278void ixgbe_zca_free(struct zero_copy_allocator *alloc, unsigned long handle)
279{
280 struct ixgbe_rx_buffer *bi;
281 struct ixgbe_ring *rx_ring;
282 u64 hr, mask;
283 u16 nta;
284
285 rx_ring = container_of(alloc, struct ixgbe_ring, zca);
286 hr = rx_ring->xsk_umem->headroom + XDP_PACKET_HEADROOM;
287 mask = rx_ring->xsk_umem->chunk_mask;
288
289 nta = rx_ring->next_to_alloc;
290 bi = rx_ring->rx_buffer_info;
291
292 nta++;
293 rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
294
295 handle &= mask;
296
297 bi->dma = xdp_umem_get_dma(rx_ring->xsk_umem, handle);
298 bi->dma += hr;
299
300 bi->addr = xdp_umem_get_data(rx_ring->xsk_umem, handle);
301 bi->addr += hr;
302
303 bi->handle = (u64)handle + rx_ring->xsk_umem->headroom;
304}
305
306static bool ixgbe_alloc_buffer_zc(struct ixgbe_ring *rx_ring,
307 struct ixgbe_rx_buffer *bi)
308{
309 struct xdp_umem *umem = rx_ring->xsk_umem;
310 void *addr = bi->addr;
311 u64 handle, hr;
312
313 if (addr)
314 return true;
315
316 if (!xsk_umem_peek_addr(umem, &handle)) {
317 rx_ring->rx_stats.alloc_rx_page_failed++;
318 return false;
319 }
320
321 hr = umem->headroom + XDP_PACKET_HEADROOM;
322
323 bi->dma = xdp_umem_get_dma(umem, handle);
324 bi->dma += hr;
325
326 bi->addr = xdp_umem_get_data(umem, handle);
327 bi->addr += hr;
328
329 bi->handle = handle + umem->headroom;
330
331 xsk_umem_discard_addr(umem);
332 return true;
333}
334
335static bool ixgbe_alloc_buffer_slow_zc(struct ixgbe_ring *rx_ring,
336 struct ixgbe_rx_buffer *bi)
337{
338 struct xdp_umem *umem = rx_ring->xsk_umem;
339 u64 handle, hr;
340
341 if (!xsk_umem_peek_addr_rq(umem, &handle)) {
342 rx_ring->rx_stats.alloc_rx_page_failed++;
343 return false;
344 }
345
346 handle &= rx_ring->xsk_umem->chunk_mask;
347
348 hr = umem->headroom + XDP_PACKET_HEADROOM;
349
350 bi->dma = xdp_umem_get_dma(umem, handle);
351 bi->dma += hr;
352
353 bi->addr = xdp_umem_get_data(umem, handle);
354 bi->addr += hr;
355
356 bi->handle = handle + umem->headroom;
357
358 xsk_umem_discard_addr_rq(umem);
359 return true;
360}
361
362static __always_inline bool
363__ixgbe_alloc_rx_buffers_zc(struct ixgbe_ring *rx_ring, u16 cleaned_count,
364 bool alloc(struct ixgbe_ring *rx_ring,
365 struct ixgbe_rx_buffer *bi))
366{
367 union ixgbe_adv_rx_desc *rx_desc;
368 struct ixgbe_rx_buffer *bi;
369 u16 i = rx_ring->next_to_use;
370 bool ok = true;
371
372
373 if (!cleaned_count)
374 return true;
375
376 rx_desc = IXGBE_RX_DESC(rx_ring, i);
377 bi = &rx_ring->rx_buffer_info[i];
378 i -= rx_ring->count;
379
380 do {
381 if (!alloc(rx_ring, bi)) {
382 ok = false;
383 break;
384 }
385
386
387 dma_sync_single_range_for_device(rx_ring->dev, bi->dma,
388 bi->page_offset,
389 rx_ring->rx_buf_len,
390 DMA_BIDIRECTIONAL);
391
392
393
394
395 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma);
396
397 rx_desc++;
398 bi++;
399 i++;
400 if (unlikely(!i)) {
401 rx_desc = IXGBE_RX_DESC(rx_ring, 0);
402 bi = rx_ring->rx_buffer_info;
403 i -= rx_ring->count;
404 }
405
406
407 rx_desc->wb.upper.length = 0;
408
409 cleaned_count--;
410 } while (cleaned_count);
411
412 i += rx_ring->count;
413
414 if (rx_ring->next_to_use != i) {
415 rx_ring->next_to_use = i;
416
417
418 rx_ring->next_to_alloc = i;
419
420
421
422
423
424
425 wmb();
426 writel(i, rx_ring->tail);
427 }
428
429 return ok;
430}
431
432void ixgbe_alloc_rx_buffers_zc(struct ixgbe_ring *rx_ring, u16 count)
433{
434 __ixgbe_alloc_rx_buffers_zc(rx_ring, count,
435 ixgbe_alloc_buffer_slow_zc);
436}
437
438static bool ixgbe_alloc_rx_buffers_fast_zc(struct ixgbe_ring *rx_ring,
439 u16 count)
440{
441 return __ixgbe_alloc_rx_buffers_zc(rx_ring, count,
442 ixgbe_alloc_buffer_zc);
443}
444
445static struct sk_buff *ixgbe_construct_skb_zc(struct ixgbe_ring *rx_ring,
446 struct ixgbe_rx_buffer *bi,
447 struct xdp_buff *xdp)
448{
449 unsigned int metasize = xdp->data - xdp->data_meta;
450 unsigned int datasize = xdp->data_end - xdp->data;
451 struct sk_buff *skb;
452
453
454 skb = __napi_alloc_skb(&rx_ring->q_vector->napi,
455 xdp->data_end - xdp->data_hard_start,
456 GFP_ATOMIC | __GFP_NOWARN);
457 if (unlikely(!skb))
458 return NULL;
459
460 skb_reserve(skb, xdp->data - xdp->data_hard_start);
461 memcpy(__skb_put(skb, datasize), xdp->data, datasize);
462 if (metasize)
463 skb_metadata_set(skb, metasize);
464
465 ixgbe_reuse_rx_buffer_zc(rx_ring, bi);
466 return skb;
467}
468
469static void ixgbe_inc_ntc(struct ixgbe_ring *rx_ring)
470{
471 u32 ntc = rx_ring->next_to_clean + 1;
472
473 ntc = (ntc < rx_ring->count) ? ntc : 0;
474 rx_ring->next_to_clean = ntc;
475 prefetch(IXGBE_RX_DESC(rx_ring, ntc));
476}
477
478int ixgbe_clean_rx_irq_zc(struct ixgbe_q_vector *q_vector,
479 struct ixgbe_ring *rx_ring,
480 const int budget)
481{
482 unsigned int total_rx_bytes = 0, total_rx_packets = 0;
483 struct ixgbe_adapter *adapter = q_vector->adapter;
484 u16 cleaned_count = ixgbe_desc_unused(rx_ring);
485 unsigned int xdp_res, xdp_xmit = 0;
486 bool failure = false;
487 struct sk_buff *skb;
488 struct xdp_buff xdp;
489
490 xdp.rxq = &rx_ring->xdp_rxq;
491
492 while (likely(total_rx_packets < budget)) {
493 union ixgbe_adv_rx_desc *rx_desc;
494 struct ixgbe_rx_buffer *bi;
495 unsigned int size;
496
497
498 if (cleaned_count >= IXGBE_RX_BUFFER_WRITE) {
499 failure = failure ||
500 !ixgbe_alloc_rx_buffers_fast_zc(rx_ring,
501 cleaned_count);
502 cleaned_count = 0;
503 }
504
505 rx_desc = IXGBE_RX_DESC(rx_ring, rx_ring->next_to_clean);
506 size = le16_to_cpu(rx_desc->wb.upper.length);
507 if (!size)
508 break;
509
510
511
512
513
514 dma_rmb();
515
516 bi = ixgbe_get_rx_buffer_zc(rx_ring, size);
517
518 if (unlikely(!ixgbe_test_staterr(rx_desc,
519 IXGBE_RXD_STAT_EOP))) {
520 struct ixgbe_rx_buffer *next_bi;
521
522 ixgbe_reuse_rx_buffer_zc(rx_ring, bi);
523 ixgbe_inc_ntc(rx_ring);
524 next_bi =
525 &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
526 next_bi->skb = ERR_PTR(-EINVAL);
527 continue;
528 }
529
530 if (unlikely(bi->skb)) {
531 ixgbe_reuse_rx_buffer_zc(rx_ring, bi);
532 ixgbe_inc_ntc(rx_ring);
533 continue;
534 }
535
536 xdp.data = bi->addr;
537 xdp.data_meta = xdp.data;
538 xdp.data_hard_start = xdp.data - XDP_PACKET_HEADROOM;
539 xdp.data_end = xdp.data + size;
540 xdp.handle = bi->handle;
541
542 xdp_res = ixgbe_run_xdp_zc(adapter, rx_ring, &xdp);
543
544 if (xdp_res) {
545 if (xdp_res & (IXGBE_XDP_TX | IXGBE_XDP_REDIR)) {
546 xdp_xmit |= xdp_res;
547 bi->addr = NULL;
548 bi->skb = NULL;
549 } else {
550 ixgbe_reuse_rx_buffer_zc(rx_ring, bi);
551 }
552 total_rx_packets++;
553 total_rx_bytes += size;
554
555 cleaned_count++;
556 ixgbe_inc_ntc(rx_ring);
557 continue;
558 }
559
560
561 skb = ixgbe_construct_skb_zc(rx_ring, bi, &xdp);
562 if (!skb) {
563 rx_ring->rx_stats.alloc_rx_buff_failed++;
564 break;
565 }
566
567 cleaned_count++;
568 ixgbe_inc_ntc(rx_ring);
569
570 if (eth_skb_pad(skb))
571 continue;
572
573 total_rx_bytes += skb->len;
574 total_rx_packets++;
575
576 ixgbe_process_skb_fields(rx_ring, rx_desc, skb);
577 ixgbe_rx_skb(q_vector, skb);
578 }
579
580 if (xdp_xmit & IXGBE_XDP_REDIR)
581 xdp_do_flush_map();
582
583 if (xdp_xmit & IXGBE_XDP_TX) {
584 struct ixgbe_ring *ring = adapter->xdp_ring[smp_processor_id()];
585
586
587
588
589 wmb();
590 writel(ring->next_to_use, ring->tail);
591 }
592
593 u64_stats_update_begin(&rx_ring->syncp);
594 rx_ring->stats.packets += total_rx_packets;
595 rx_ring->stats.bytes += total_rx_bytes;
596 u64_stats_update_end(&rx_ring->syncp);
597 q_vector->rx.total_packets += total_rx_packets;
598 q_vector->rx.total_bytes += total_rx_bytes;
599
600 return failure ? budget : (int)total_rx_packets;
601}
602
603void ixgbe_xsk_clean_rx_ring(struct ixgbe_ring *rx_ring)
604{
605 u16 i = rx_ring->next_to_clean;
606 struct ixgbe_rx_buffer *bi = &rx_ring->rx_buffer_info[i];
607
608 while (i != rx_ring->next_to_alloc) {
609 xsk_umem_fq_reuse(rx_ring->xsk_umem, bi->handle);
610 i++;
611 bi++;
612 if (i == rx_ring->count) {
613 i = 0;
614 bi = rx_ring->rx_buffer_info;
615 }
616 }
617}
618
619static bool ixgbe_xmit_zc(struct ixgbe_ring *xdp_ring, unsigned int budget)
620{
621 union ixgbe_adv_tx_desc *tx_desc = NULL;
622 struct ixgbe_tx_buffer *tx_bi;
623 bool work_done = true;
624 u32 len, cmd_type;
625 dma_addr_t dma;
626
627 while (budget-- > 0) {
628 if (unlikely(!ixgbe_desc_unused(xdp_ring)) ||
629 !netif_carrier_ok(xdp_ring->netdev)) {
630 work_done = false;
631 break;
632 }
633
634 if (!xsk_umem_consume_tx(xdp_ring->xsk_umem, &dma, &len))
635 break;
636
637 dma_sync_single_for_device(xdp_ring->dev, dma, len,
638 DMA_BIDIRECTIONAL);
639
640 tx_bi = &xdp_ring->tx_buffer_info[xdp_ring->next_to_use];
641 tx_bi->bytecount = len;
642 tx_bi->xdpf = NULL;
643
644 tx_desc = IXGBE_TX_DESC(xdp_ring, xdp_ring->next_to_use);
645 tx_desc->read.buffer_addr = cpu_to_le64(dma);
646
647
648 cmd_type = IXGBE_ADVTXD_DTYP_DATA |
649 IXGBE_ADVTXD_DCMD_DEXT |
650 IXGBE_ADVTXD_DCMD_IFCS;
651 cmd_type |= len | IXGBE_TXD_CMD;
652 tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type);
653 tx_desc->read.olinfo_status =
654 cpu_to_le32(len << IXGBE_ADVTXD_PAYLEN_SHIFT);
655
656 xdp_ring->next_to_use++;
657 if (xdp_ring->next_to_use == xdp_ring->count)
658 xdp_ring->next_to_use = 0;
659 }
660
661 if (tx_desc) {
662 ixgbe_xdp_ring_update_tail(xdp_ring);
663 xsk_umem_consume_tx_done(xdp_ring->xsk_umem);
664 }
665
666 return !!budget && work_done;
667}
668
669static void ixgbe_clean_xdp_tx_buffer(struct ixgbe_ring *tx_ring,
670 struct ixgbe_tx_buffer *tx_bi)
671{
672 xdp_return_frame(tx_bi->xdpf);
673 dma_unmap_single(tx_ring->dev,
674 dma_unmap_addr(tx_bi, dma),
675 dma_unmap_len(tx_bi, len), DMA_TO_DEVICE);
676 dma_unmap_len_set(tx_bi, len, 0);
677}
678
679bool ixgbe_clean_xdp_tx_irq(struct ixgbe_q_vector *q_vector,
680 struct ixgbe_ring *tx_ring, int napi_budget)
681{
682 unsigned int total_packets = 0, total_bytes = 0;
683 u32 i = tx_ring->next_to_clean, xsk_frames = 0;
684 unsigned int budget = q_vector->tx.work_limit;
685 struct xdp_umem *umem = tx_ring->xsk_umem;
686 union ixgbe_adv_tx_desc *tx_desc;
687 struct ixgbe_tx_buffer *tx_bi;
688 bool xmit_done;
689
690 tx_bi = &tx_ring->tx_buffer_info[i];
691 tx_desc = IXGBE_TX_DESC(tx_ring, i);
692 i -= tx_ring->count;
693
694 do {
695 if (!(tx_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)))
696 break;
697
698 total_bytes += tx_bi->bytecount;
699 total_packets += tx_bi->gso_segs;
700
701 if (tx_bi->xdpf)
702 ixgbe_clean_xdp_tx_buffer(tx_ring, tx_bi);
703 else
704 xsk_frames++;
705
706 tx_bi->xdpf = NULL;
707 total_bytes += tx_bi->bytecount;
708
709 tx_bi++;
710 tx_desc++;
711 i++;
712 if (unlikely(!i)) {
713 i -= tx_ring->count;
714 tx_bi = tx_ring->tx_buffer_info;
715 tx_desc = IXGBE_TX_DESC(tx_ring, 0);
716 }
717
718
719 prefetch(tx_desc);
720
721
722 budget--;
723 } while (likely(budget));
724
725 i += tx_ring->count;
726 tx_ring->next_to_clean = i;
727
728 u64_stats_update_begin(&tx_ring->syncp);
729 tx_ring->stats.bytes += total_bytes;
730 tx_ring->stats.packets += total_packets;
731 u64_stats_update_end(&tx_ring->syncp);
732 q_vector->tx.total_bytes += total_bytes;
733 q_vector->tx.total_packets += total_packets;
734
735 if (xsk_frames)
736 xsk_umem_complete_tx(umem, xsk_frames);
737
738 xmit_done = ixgbe_xmit_zc(tx_ring, q_vector->tx.work_limit);
739 return budget > 0 && xmit_done;
740}
741
742int ixgbe_xsk_async_xmit(struct net_device *dev, u32 qid)
743{
744 struct ixgbe_adapter *adapter = netdev_priv(dev);
745 struct ixgbe_ring *ring;
746
747 if (test_bit(__IXGBE_DOWN, &adapter->state))
748 return -ENETDOWN;
749
750 if (!READ_ONCE(adapter->xdp_prog))
751 return -ENXIO;
752
753 if (qid >= adapter->num_xdp_queues)
754 return -ENXIO;
755
756 if (!adapter->xsk_umems || !adapter->xsk_umems[qid])
757 return -ENXIO;
758
759 ring = adapter->xdp_ring[qid];
760 if (!napi_if_scheduled_mark_missed(&ring->q_vector->napi)) {
761 u64 eics = BIT_ULL(ring->q_vector->v_idx);
762
763 ixgbe_irq_rearm_queues(adapter, eics);
764 }
765
766 return 0;
767}
768
769void ixgbe_xsk_clean_tx_ring(struct ixgbe_ring *tx_ring)
770{
771 u16 ntc = tx_ring->next_to_clean, ntu = tx_ring->next_to_use;
772 struct xdp_umem *umem = tx_ring->xsk_umem;
773 struct ixgbe_tx_buffer *tx_bi;
774 u32 xsk_frames = 0;
775
776 while (ntc != ntu) {
777 tx_bi = &tx_ring->tx_buffer_info[ntc];
778
779 if (tx_bi->xdpf)
780 ixgbe_clean_xdp_tx_buffer(tx_ring, tx_bi);
781 else
782 xsk_frames++;
783
784 tx_bi->xdpf = NULL;
785
786 ntc++;
787 if (ntc == tx_ring->count)
788 ntc = 0;
789 }
790
791 if (xsk_frames)
792 xsk_umem_complete_tx(umem, xsk_frames);
793}
794