1
2
3
4
5
6#include <linux/prefetch.h>
7#include <linux/mm.h>
8#include <linux/bpf_trace.h>
9#include <net/xdp.h>
10#include "ice_txrx_lib.h"
11#include "ice_lib.h"
12#include "ice.h"
13#include "ice_dcb_lib.h"
14#include "ice_xsk.h"
15
16#define ICE_RX_HDR_SIZE 256
17
18#define FDIR_DESC_RXDID 0x40
19#define ICE_FDIR_CLEAN_DELAY 10
20
21
22
23
24
25
26
27int
28ice_prgm_fdir_fltr(struct ice_vsi *vsi, struct ice_fltr_desc *fdir_desc,
29 u8 *raw_packet)
30{
31 struct ice_tx_buf *tx_buf, *first;
32 struct ice_fltr_desc *f_desc;
33 struct ice_tx_desc *tx_desc;
34 struct ice_ring *tx_ring;
35 struct device *dev;
36 dma_addr_t dma;
37 u32 td_cmd;
38 u16 i;
39
40
41 if (!vsi)
42 return -ENOENT;
43 tx_ring = vsi->tx_rings[0];
44 if (!tx_ring || !tx_ring->desc)
45 return -ENOENT;
46 dev = tx_ring->dev;
47
48
49 for (i = ICE_FDIR_CLEAN_DELAY; ICE_DESC_UNUSED(tx_ring) < 2; i--) {
50 if (!i)
51 return -EAGAIN;
52 msleep_interruptible(1);
53 }
54
55 dma = dma_map_single(dev, raw_packet, ICE_FDIR_MAX_RAW_PKT_SIZE,
56 DMA_TO_DEVICE);
57
58 if (dma_mapping_error(dev, dma))
59 return -EINVAL;
60
61
62 i = tx_ring->next_to_use;
63 first = &tx_ring->tx_buf[i];
64 f_desc = ICE_TX_FDIRDESC(tx_ring, i);
65 memcpy(f_desc, fdir_desc, sizeof(*f_desc));
66
67 i++;
68 i = (i < tx_ring->count) ? i : 0;
69 tx_desc = ICE_TX_DESC(tx_ring, i);
70 tx_buf = &tx_ring->tx_buf[i];
71
72 i++;
73 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
74
75 memset(tx_buf, 0, sizeof(*tx_buf));
76 dma_unmap_len_set(tx_buf, len, ICE_FDIR_MAX_RAW_PKT_SIZE);
77 dma_unmap_addr_set(tx_buf, dma, dma);
78
79 tx_desc->buf_addr = cpu_to_le64(dma);
80 td_cmd = ICE_TXD_LAST_DESC_CMD | ICE_TX_DESC_CMD_DUMMY |
81 ICE_TX_DESC_CMD_RE;
82
83 tx_buf->tx_flags = ICE_TX_FLAGS_DUMMY_PKT;
84 tx_buf->raw_buf = raw_packet;
85
86 tx_desc->cmd_type_offset_bsz =
87 ice_build_ctob(td_cmd, 0, ICE_FDIR_MAX_RAW_PKT_SIZE, 0);
88
89
90
91
92 wmb();
93
94
95 first->next_to_watch = tx_desc;
96
97 writel(tx_ring->next_to_use, tx_ring->tail);
98
99 return 0;
100}
101
102
103
104
105
106
107static void
108ice_unmap_and_free_tx_buf(struct ice_ring *ring, struct ice_tx_buf *tx_buf)
109{
110 if (tx_buf->skb) {
111 if (tx_buf->tx_flags & ICE_TX_FLAGS_DUMMY_PKT)
112 devm_kfree(ring->dev, tx_buf->raw_buf);
113 else if (ice_ring_is_xdp(ring))
114 page_frag_free(tx_buf->raw_buf);
115 else
116 dev_kfree_skb_any(tx_buf->skb);
117 if (dma_unmap_len(tx_buf, len))
118 dma_unmap_single(ring->dev,
119 dma_unmap_addr(tx_buf, dma),
120 dma_unmap_len(tx_buf, len),
121 DMA_TO_DEVICE);
122 } else if (dma_unmap_len(tx_buf, len)) {
123 dma_unmap_page(ring->dev,
124 dma_unmap_addr(tx_buf, dma),
125 dma_unmap_len(tx_buf, len),
126 DMA_TO_DEVICE);
127 }
128
129 tx_buf->next_to_watch = NULL;
130 tx_buf->skb = NULL;
131 dma_unmap_len_set(tx_buf, len, 0);
132
133}
134
135static struct netdev_queue *txring_txq(const struct ice_ring *ring)
136{
137 return netdev_get_tx_queue(ring->netdev, ring->q_index);
138}
139
140
141
142
143
144void ice_clean_tx_ring(struct ice_ring *tx_ring)
145{
146 u16 i;
147
148 if (ice_ring_is_xdp(tx_ring) && tx_ring->xsk_umem) {
149 ice_xsk_clean_xdp_ring(tx_ring);
150 goto tx_skip_free;
151 }
152
153
154 if (!tx_ring->tx_buf)
155 return;
156
157
158 for (i = 0; i < tx_ring->count; i++)
159 ice_unmap_and_free_tx_buf(tx_ring, &tx_ring->tx_buf[i]);
160
161tx_skip_free:
162 memset(tx_ring->tx_buf, 0, sizeof(*tx_ring->tx_buf) * tx_ring->count);
163
164
165 memset(tx_ring->desc, 0, tx_ring->size);
166
167 tx_ring->next_to_use = 0;
168 tx_ring->next_to_clean = 0;
169
170 if (!tx_ring->netdev)
171 return;
172
173
174 netdev_tx_reset_queue(txring_txq(tx_ring));
175}
176
177
178
179
180
181
182
183void ice_free_tx_ring(struct ice_ring *tx_ring)
184{
185 ice_clean_tx_ring(tx_ring);
186 devm_kfree(tx_ring->dev, tx_ring->tx_buf);
187 tx_ring->tx_buf = NULL;
188
189 if (tx_ring->desc) {
190 dmam_free_coherent(tx_ring->dev, tx_ring->size,
191 tx_ring->desc, tx_ring->dma);
192 tx_ring->desc = NULL;
193 }
194}
195
196
197
198
199
200
201
202
203static bool ice_clean_tx_irq(struct ice_ring *tx_ring, int napi_budget)
204{
205 unsigned int total_bytes = 0, total_pkts = 0;
206 unsigned int budget = ICE_DFLT_IRQ_WORK;
207 struct ice_vsi *vsi = tx_ring->vsi;
208 s16 i = tx_ring->next_to_clean;
209 struct ice_tx_desc *tx_desc;
210 struct ice_tx_buf *tx_buf;
211
212 tx_buf = &tx_ring->tx_buf[i];
213 tx_desc = ICE_TX_DESC(tx_ring, i);
214 i -= tx_ring->count;
215
216 prefetch(&vsi->state);
217
218 do {
219 struct ice_tx_desc *eop_desc = tx_buf->next_to_watch;
220
221
222 if (!eop_desc)
223 break;
224
225 smp_rmb();
226
227
228 if (!(eop_desc->cmd_type_offset_bsz &
229 cpu_to_le64(ICE_TX_DESC_DTYPE_DESC_DONE)))
230 break;
231
232
233 tx_buf->next_to_watch = NULL;
234
235
236 total_bytes += tx_buf->bytecount;
237 total_pkts += tx_buf->gso_segs;
238
239 if (ice_ring_is_xdp(tx_ring))
240 page_frag_free(tx_buf->raw_buf);
241 else
242
243 napi_consume_skb(tx_buf->skb, napi_budget);
244
245
246 dma_unmap_single(tx_ring->dev,
247 dma_unmap_addr(tx_buf, dma),
248 dma_unmap_len(tx_buf, len),
249 DMA_TO_DEVICE);
250
251
252 tx_buf->skb = NULL;
253 dma_unmap_len_set(tx_buf, len, 0);
254
255
256 while (tx_desc != eop_desc) {
257 tx_buf++;
258 tx_desc++;
259 i++;
260 if (unlikely(!i)) {
261 i -= tx_ring->count;
262 tx_buf = tx_ring->tx_buf;
263 tx_desc = ICE_TX_DESC(tx_ring, 0);
264 }
265
266
267 if (dma_unmap_len(tx_buf, len)) {
268 dma_unmap_page(tx_ring->dev,
269 dma_unmap_addr(tx_buf, dma),
270 dma_unmap_len(tx_buf, len),
271 DMA_TO_DEVICE);
272 dma_unmap_len_set(tx_buf, len, 0);
273 }
274 }
275
276
277 tx_buf++;
278 tx_desc++;
279 i++;
280 if (unlikely(!i)) {
281 i -= tx_ring->count;
282 tx_buf = tx_ring->tx_buf;
283 tx_desc = ICE_TX_DESC(tx_ring, 0);
284 }
285
286 prefetch(tx_desc);
287
288
289 budget--;
290 } while (likely(budget));
291
292 i += tx_ring->count;
293 tx_ring->next_to_clean = i;
294
295 ice_update_tx_ring_stats(tx_ring, total_pkts, total_bytes);
296
297 if (ice_ring_is_xdp(tx_ring))
298 return !!budget;
299
300 netdev_tx_completed_queue(txring_txq(tx_ring), total_pkts,
301 total_bytes);
302
303#define TX_WAKE_THRESHOLD ((s16)(DESC_NEEDED * 2))
304 if (unlikely(total_pkts && netif_carrier_ok(tx_ring->netdev) &&
305 (ICE_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) {
306
307
308
309 smp_mb();
310 if (__netif_subqueue_stopped(tx_ring->netdev,
311 tx_ring->q_index) &&
312 !test_bit(__ICE_DOWN, vsi->state)) {
313 netif_wake_subqueue(tx_ring->netdev,
314 tx_ring->q_index);
315 ++tx_ring->tx_stats.restart_q;
316 }
317 }
318
319 return !!budget;
320}
321
322
323
324
325
326
327
328int ice_setup_tx_ring(struct ice_ring *tx_ring)
329{
330 struct device *dev = tx_ring->dev;
331
332 if (!dev)
333 return -ENOMEM;
334
335
336 WARN_ON(tx_ring->tx_buf);
337 tx_ring->tx_buf =
338 devm_kzalloc(dev, sizeof(*tx_ring->tx_buf) * tx_ring->count,
339 GFP_KERNEL);
340 if (!tx_ring->tx_buf)
341 return -ENOMEM;
342
343
344 tx_ring->size = ALIGN(tx_ring->count * sizeof(struct ice_tx_desc),
345 PAGE_SIZE);
346 tx_ring->desc = dmam_alloc_coherent(dev, tx_ring->size, &tx_ring->dma,
347 GFP_KERNEL);
348 if (!tx_ring->desc) {
349 dev_err(dev, "Unable to allocate memory for the Tx descriptor ring, size=%d\n",
350 tx_ring->size);
351 goto err;
352 }
353
354 tx_ring->next_to_use = 0;
355 tx_ring->next_to_clean = 0;
356 tx_ring->tx_stats.prev_pkt = -1;
357 return 0;
358
359err:
360 devm_kfree(dev, tx_ring->tx_buf);
361 tx_ring->tx_buf = NULL;
362 return -ENOMEM;
363}
364
365
366
367
368
369void ice_clean_rx_ring(struct ice_ring *rx_ring)
370{
371 struct device *dev = rx_ring->dev;
372 u16 i;
373
374
375 if (!rx_ring->rx_buf)
376 return;
377
378 if (rx_ring->xsk_umem) {
379 ice_xsk_clean_rx_ring(rx_ring);
380 goto rx_skip_free;
381 }
382
383
384 for (i = 0; i < rx_ring->count; i++) {
385 struct ice_rx_buf *rx_buf = &rx_ring->rx_buf[i];
386
387 if (rx_buf->skb) {
388 dev_kfree_skb(rx_buf->skb);
389 rx_buf->skb = NULL;
390 }
391 if (!rx_buf->page)
392 continue;
393
394
395
396
397 dma_sync_single_range_for_cpu(dev, rx_buf->dma,
398 rx_buf->page_offset,
399 rx_ring->rx_buf_len,
400 DMA_FROM_DEVICE);
401
402
403 dma_unmap_page_attrs(dev, rx_buf->dma, ice_rx_pg_size(rx_ring),
404 DMA_FROM_DEVICE, ICE_RX_DMA_ATTR);
405 __page_frag_cache_drain(rx_buf->page, rx_buf->pagecnt_bias);
406
407 rx_buf->page = NULL;
408 rx_buf->page_offset = 0;
409 }
410
411rx_skip_free:
412 memset(rx_ring->rx_buf, 0, sizeof(*rx_ring->rx_buf) * rx_ring->count);
413
414
415 memset(rx_ring->desc, 0, rx_ring->size);
416
417 rx_ring->next_to_alloc = 0;
418 rx_ring->next_to_clean = 0;
419 rx_ring->next_to_use = 0;
420}
421
422
423
424
425
426
427
428void ice_free_rx_ring(struct ice_ring *rx_ring)
429{
430 ice_clean_rx_ring(rx_ring);
431 if (rx_ring->vsi->type == ICE_VSI_PF)
432 if (xdp_rxq_info_is_reg(&rx_ring->xdp_rxq))
433 xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
434 rx_ring->xdp_prog = NULL;
435 devm_kfree(rx_ring->dev, rx_ring->rx_buf);
436 rx_ring->rx_buf = NULL;
437
438 if (rx_ring->desc) {
439 dmam_free_coherent(rx_ring->dev, rx_ring->size,
440 rx_ring->desc, rx_ring->dma);
441 rx_ring->desc = NULL;
442 }
443}
444
445
446
447
448
449
450
451int ice_setup_rx_ring(struct ice_ring *rx_ring)
452{
453 struct device *dev = rx_ring->dev;
454
455 if (!dev)
456 return -ENOMEM;
457
458
459 WARN_ON(rx_ring->rx_buf);
460 rx_ring->rx_buf =
461 devm_kzalloc(dev, sizeof(*rx_ring->rx_buf) * rx_ring->count,
462 GFP_KERNEL);
463 if (!rx_ring->rx_buf)
464 return -ENOMEM;
465
466
467 rx_ring->size = ALIGN(rx_ring->count * sizeof(union ice_32byte_rx_desc),
468 PAGE_SIZE);
469 rx_ring->desc = dmam_alloc_coherent(dev, rx_ring->size, &rx_ring->dma,
470 GFP_KERNEL);
471 if (!rx_ring->desc) {
472 dev_err(dev, "Unable to allocate memory for the Rx descriptor ring, size=%d\n",
473 rx_ring->size);
474 goto err;
475 }
476
477 rx_ring->next_to_use = 0;
478 rx_ring->next_to_clean = 0;
479
480 if (ice_is_xdp_ena_vsi(rx_ring->vsi))
481 WRITE_ONCE(rx_ring->xdp_prog, rx_ring->vsi->xdp_prog);
482
483 if (rx_ring->vsi->type == ICE_VSI_PF &&
484 !xdp_rxq_info_is_reg(&rx_ring->xdp_rxq))
485 if (xdp_rxq_info_reg(&rx_ring->xdp_rxq, rx_ring->netdev,
486 rx_ring->q_index))
487 goto err;
488 return 0;
489
490err:
491 devm_kfree(dev, rx_ring->rx_buf);
492 rx_ring->rx_buf = NULL;
493 return -ENOMEM;
494}
495
496
497
498
499
500
501
502static unsigned int ice_rx_offset(struct ice_ring *rx_ring)
503{
504 if (ice_ring_uses_build_skb(rx_ring))
505 return ICE_SKB_PAD;
506 else if (ice_is_xdp_ena_vsi(rx_ring->vsi))
507 return XDP_PACKET_HEADROOM;
508
509 return 0;
510}
511
512static unsigned int
513ice_rx_frame_truesize(struct ice_ring *rx_ring, unsigned int __maybe_unused size)
514{
515 unsigned int truesize;
516
517#if (PAGE_SIZE < 8192)
518 truesize = ice_rx_pg_size(rx_ring) / 2;
519#else
520 truesize = ice_rx_offset(rx_ring) ?
521 SKB_DATA_ALIGN(ice_rx_offset(rx_ring) + size) +
522 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) :
523 SKB_DATA_ALIGN(size);
524#endif
525 return truesize;
526}
527
528
529
530
531
532
533
534
535
536static int
537ice_run_xdp(struct ice_ring *rx_ring, struct xdp_buff *xdp,
538 struct bpf_prog *xdp_prog)
539{
540 int err, result = ICE_XDP_PASS;
541 struct ice_ring *xdp_ring;
542 u32 act;
543
544 act = bpf_prog_run_xdp(xdp_prog, xdp);
545 switch (act) {
546 case XDP_PASS:
547 break;
548 case XDP_TX:
549 xdp_ring = rx_ring->vsi->xdp_rings[smp_processor_id()];
550 result = ice_xmit_xdp_buff(xdp, xdp_ring);
551 break;
552 case XDP_REDIRECT:
553 err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog);
554 result = !err ? ICE_XDP_REDIR : ICE_XDP_CONSUMED;
555 break;
556 default:
557 bpf_warn_invalid_xdp_action(act);
558 fallthrough;
559 case XDP_ABORTED:
560 trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
561 fallthrough;
562 case XDP_DROP:
563 result = ICE_XDP_CONSUMED;
564 break;
565 }
566
567 return result;
568}
569
570
571
572
573
574
575
576
577
578
579
580
581
582int
583ice_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
584 u32 flags)
585{
586 struct ice_netdev_priv *np = netdev_priv(dev);
587 unsigned int queue_index = smp_processor_id();
588 struct ice_vsi *vsi = np->vsi;
589 struct ice_ring *xdp_ring;
590 int drops = 0, i;
591
592 if (test_bit(__ICE_DOWN, vsi->state))
593 return -ENETDOWN;
594
595 if (!ice_is_xdp_ena_vsi(vsi) || queue_index >= vsi->num_xdp_txq)
596 return -ENXIO;
597
598 if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
599 return -EINVAL;
600
601 xdp_ring = vsi->xdp_rings[queue_index];
602 for (i = 0; i < n; i++) {
603 struct xdp_frame *xdpf = frames[i];
604 int err;
605
606 err = ice_xmit_xdp_ring(xdpf->data, xdpf->len, xdp_ring);
607 if (err != ICE_XDP_TX) {
608 xdp_return_frame_rx_napi(xdpf);
609 drops++;
610 }
611 }
612
613 if (unlikely(flags & XDP_XMIT_FLUSH))
614 ice_xdp_ring_update_tail(xdp_ring);
615
616 return n - drops;
617}
618
619
620
621
622
623
624
625
626
627static bool
628ice_alloc_mapped_page(struct ice_ring *rx_ring, struct ice_rx_buf *bi)
629{
630 struct page *page = bi->page;
631 dma_addr_t dma;
632
633
634 if (likely(page))
635 return true;
636
637
638 page = dev_alloc_pages(ice_rx_pg_order(rx_ring));
639 if (unlikely(!page)) {
640 rx_ring->rx_stats.alloc_page_failed++;
641 return false;
642 }
643
644
645 dma = dma_map_page_attrs(rx_ring->dev, page, 0, ice_rx_pg_size(rx_ring),
646 DMA_FROM_DEVICE, ICE_RX_DMA_ATTR);
647
648
649
650
651 if (dma_mapping_error(rx_ring->dev, dma)) {
652 __free_pages(page, ice_rx_pg_order(rx_ring));
653 rx_ring->rx_stats.alloc_page_failed++;
654 return false;
655 }
656
657 bi->dma = dma;
658 bi->page = page;
659 bi->page_offset = ice_rx_offset(rx_ring);
660 page_ref_add(page, USHRT_MAX - 1);
661 bi->pagecnt_bias = USHRT_MAX;
662
663 return true;
664}
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679bool ice_alloc_rx_bufs(struct ice_ring *rx_ring, u16 cleaned_count)
680{
681 union ice_32b_rx_flex_desc *rx_desc;
682 u16 ntu = rx_ring->next_to_use;
683 struct ice_rx_buf *bi;
684
685
686 if ((!rx_ring->netdev && rx_ring->vsi->type != ICE_VSI_CTRL) ||
687 !cleaned_count)
688 return false;
689
690
691 rx_desc = ICE_RX_DESC(rx_ring, ntu);
692 bi = &rx_ring->rx_buf[ntu];
693
694 do {
695
696 if (!ice_alloc_mapped_page(rx_ring, bi))
697 break;
698
699
700 dma_sync_single_range_for_device(rx_ring->dev, bi->dma,
701 bi->page_offset,
702 rx_ring->rx_buf_len,
703 DMA_FROM_DEVICE);
704
705
706
707
708 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset);
709
710 rx_desc++;
711 bi++;
712 ntu++;
713 if (unlikely(ntu == rx_ring->count)) {
714 rx_desc = ICE_RX_DESC(rx_ring, 0);
715 bi = rx_ring->rx_buf;
716 ntu = 0;
717 }
718
719
720 rx_desc->wb.status_error0 = 0;
721
722 cleaned_count--;
723 } while (cleaned_count);
724
725 if (rx_ring->next_to_use != ntu)
726 ice_release_rx_desc(rx_ring, ntu);
727
728 return !!cleaned_count;
729}
730
731
732
733
734
735static bool ice_page_is_reserved(struct page *page)
736{
737 return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page);
738}
739
740
741
742
743
744
745
746
747
748
749
750static void
751ice_rx_buf_adjust_pg_offset(struct ice_rx_buf *rx_buf, unsigned int size)
752{
753#if (PAGE_SIZE < 8192)
754
755 rx_buf->page_offset ^= size;
756#else
757
758 rx_buf->page_offset += size;
759#endif
760}
761
762
763
764
765
766
767
768
769
770
771static bool ice_can_reuse_rx_page(struct ice_rx_buf *rx_buf)
772{
773 unsigned int pagecnt_bias = rx_buf->pagecnt_bias;
774 struct page *page = rx_buf->page;
775
776
777 if (unlikely(ice_page_is_reserved(page)))
778 return false;
779
780#if (PAGE_SIZE < 8192)
781
782 if (unlikely((page_count(page) - pagecnt_bias) > 1))
783 return false;
784#else
785#define ICE_LAST_OFFSET \
786 (SKB_WITH_OVERHEAD(PAGE_SIZE) - ICE_RXBUF_2048)
787 if (rx_buf->page_offset > ICE_LAST_OFFSET)
788 return false;
789#endif
790
791
792
793
794
795 if (unlikely(pagecnt_bias == 1)) {
796 page_ref_add(page, USHRT_MAX - 1);
797 rx_buf->pagecnt_bias = USHRT_MAX;
798 }
799
800 return true;
801}
802
803
804
805
806
807
808
809
810
811
812
813
814static void
815ice_add_rx_frag(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf,
816 struct sk_buff *skb, unsigned int size)
817{
818#if (PAGE_SIZE >= 8192)
819 unsigned int truesize = SKB_DATA_ALIGN(size + ice_rx_offset(rx_ring));
820#else
821 unsigned int truesize = ice_rx_pg_size(rx_ring) / 2;
822#endif
823
824 if (!size)
825 return;
826 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buf->page,
827 rx_buf->page_offset, size, truesize);
828
829
830 ice_rx_buf_adjust_pg_offset(rx_buf, truesize);
831}
832
833
834
835
836
837
838
839
840static void
841ice_reuse_rx_page(struct ice_ring *rx_ring, struct ice_rx_buf *old_buf)
842{
843 u16 nta = rx_ring->next_to_alloc;
844 struct ice_rx_buf *new_buf;
845
846 new_buf = &rx_ring->rx_buf[nta];
847
848
849 nta++;
850 rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
851
852
853
854
855
856 new_buf->dma = old_buf->dma;
857 new_buf->page = old_buf->page;
858 new_buf->page_offset = old_buf->page_offset;
859 new_buf->pagecnt_bias = old_buf->pagecnt_bias;
860}
861
862
863
864
865
866
867
868
869
870
871static struct ice_rx_buf *
872ice_get_rx_buf(struct ice_ring *rx_ring, struct sk_buff **skb,
873 const unsigned int size)
874{
875 struct ice_rx_buf *rx_buf;
876
877 rx_buf = &rx_ring->rx_buf[rx_ring->next_to_clean];
878 prefetchw(rx_buf->page);
879 *skb = rx_buf->skb;
880
881 if (!size)
882 return rx_buf;
883
884 dma_sync_single_range_for_cpu(rx_ring->dev, rx_buf->dma,
885 rx_buf->page_offset, size,
886 DMA_FROM_DEVICE);
887
888
889 rx_buf->pagecnt_bias--;
890
891 return rx_buf;
892}
893
894
895
896
897
898
899
900
901
902
903static struct sk_buff *
904ice_build_skb(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf,
905 struct xdp_buff *xdp)
906{
907 u8 metasize = xdp->data - xdp->data_meta;
908#if (PAGE_SIZE < 8192)
909 unsigned int truesize = ice_rx_pg_size(rx_ring) / 2;
910#else
911 unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
912 SKB_DATA_ALIGN(xdp->data_end -
913 xdp->data_hard_start);
914#endif
915 struct sk_buff *skb;
916
917
918
919
920
921
922 prefetch(xdp->data_meta);
923#if L1_CACHE_BYTES < 128
924 prefetch((void *)(xdp->data + L1_CACHE_BYTES));
925#endif
926
927 skb = build_skb(xdp->data_hard_start, truesize);
928 if (unlikely(!skb))
929 return NULL;
930
931
932
933
934 skb_record_rx_queue(skb, rx_ring->q_index);
935
936
937 skb_reserve(skb, xdp->data - xdp->data_hard_start);
938 __skb_put(skb, xdp->data_end - xdp->data);
939 if (metasize)
940 skb_metadata_set(skb, metasize);
941
942
943 ice_rx_buf_adjust_pg_offset(rx_buf, truesize);
944
945 return skb;
946}
947
948
949
950
951
952
953
954
955
956
957
958static struct sk_buff *
959ice_construct_skb(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf,
960 struct xdp_buff *xdp)
961{
962 unsigned int size = xdp->data_end - xdp->data;
963 unsigned int headlen;
964 struct sk_buff *skb;
965
966
967 prefetch(xdp->data);
968#if L1_CACHE_BYTES < 128
969 prefetch((void *)(xdp->data + L1_CACHE_BYTES));
970#endif
971
972
973 skb = __napi_alloc_skb(&rx_ring->q_vector->napi, ICE_RX_HDR_SIZE,
974 GFP_ATOMIC | __GFP_NOWARN);
975 if (unlikely(!skb))
976 return NULL;
977
978 skb_record_rx_queue(skb, rx_ring->q_index);
979
980 headlen = size;
981 if (headlen > ICE_RX_HDR_SIZE)
982 headlen = eth_get_headlen(skb->dev, xdp->data, ICE_RX_HDR_SIZE);
983
984
985 memcpy(__skb_put(skb, headlen), xdp->data, ALIGN(headlen,
986 sizeof(long)));
987
988
989 size -= headlen;
990 if (size) {
991#if (PAGE_SIZE >= 8192)
992 unsigned int truesize = SKB_DATA_ALIGN(size);
993#else
994 unsigned int truesize = ice_rx_pg_size(rx_ring) / 2;
995#endif
996 skb_add_rx_frag(skb, 0, rx_buf->page,
997 rx_buf->page_offset + headlen, size, truesize);
998
999 ice_rx_buf_adjust_pg_offset(rx_buf, truesize);
1000 } else {
1001
1002
1003
1004
1005 rx_buf->pagecnt_bias++;
1006 }
1007
1008 return skb;
1009}
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020static void ice_put_rx_buf(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf)
1021{
1022 u16 ntc = rx_ring->next_to_clean + 1;
1023
1024
1025 ntc = (ntc < rx_ring->count) ? ntc : 0;
1026 rx_ring->next_to_clean = ntc;
1027
1028 if (!rx_buf)
1029 return;
1030
1031 if (ice_can_reuse_rx_page(rx_buf)) {
1032
1033 ice_reuse_rx_page(rx_ring, rx_buf);
1034 } else {
1035
1036 dma_unmap_page_attrs(rx_ring->dev, rx_buf->dma,
1037 ice_rx_pg_size(rx_ring), DMA_FROM_DEVICE,
1038 ICE_RX_DMA_ATTR);
1039 __page_frag_cache_drain(rx_buf->page, rx_buf->pagecnt_bias);
1040 }
1041
1042
1043 rx_buf->page = NULL;
1044 rx_buf->skb = NULL;
1045}
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056static bool
1057ice_is_non_eop(struct ice_ring *rx_ring, union ice_32b_rx_flex_desc *rx_desc,
1058 struct sk_buff *skb)
1059{
1060
1061#define ICE_RXD_EOF BIT(ICE_RX_FLEX_DESC_STATUS0_EOF_S)
1062 if (likely(ice_test_staterr(rx_desc, ICE_RXD_EOF)))
1063 return false;
1064
1065
1066 rx_ring->rx_buf[rx_ring->next_to_clean].skb = skb;
1067 rx_ring->rx_stats.non_eop_descs++;
1068
1069 return true;
1070}
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget)
1085{
1086 unsigned int total_rx_bytes = 0, total_rx_pkts = 0;
1087 u16 cleaned_count = ICE_DESC_UNUSED(rx_ring);
1088 unsigned int xdp_res, xdp_xmit = 0;
1089 struct bpf_prog *xdp_prog = NULL;
1090 struct xdp_buff xdp;
1091 bool failure;
1092
1093 xdp.rxq = &rx_ring->xdp_rxq;
1094
1095#if (PAGE_SIZE < 8192)
1096 xdp.frame_sz = ice_rx_frame_truesize(rx_ring, 0);
1097#endif
1098
1099
1100 while (likely(total_rx_pkts < (unsigned int)budget)) {
1101 union ice_32b_rx_flex_desc *rx_desc;
1102 struct ice_rx_buf *rx_buf;
1103 struct sk_buff *skb;
1104 unsigned int size;
1105 u16 stat_err_bits;
1106 u16 vlan_tag = 0;
1107 u8 rx_ptype;
1108
1109
1110 rx_desc = ICE_RX_DESC(rx_ring, rx_ring->next_to_clean);
1111
1112
1113
1114
1115
1116
1117 stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_DD_S);
1118 if (!ice_test_staterr(rx_desc, stat_err_bits))
1119 break;
1120
1121
1122
1123
1124
1125 dma_rmb();
1126
1127 if (rx_desc->wb.rxdid == FDIR_DESC_RXDID || !rx_ring->netdev) {
1128 ice_put_rx_buf(rx_ring, NULL);
1129 cleaned_count++;
1130 continue;
1131 }
1132
1133 size = le16_to_cpu(rx_desc->wb.pkt_len) &
1134 ICE_RX_FLX_DESC_PKT_LEN_M;
1135
1136
1137 rx_buf = ice_get_rx_buf(rx_ring, &skb, size);
1138
1139 if (!size) {
1140 xdp.data = NULL;
1141 xdp.data_end = NULL;
1142 xdp.data_hard_start = NULL;
1143 xdp.data_meta = NULL;
1144 goto construct_skb;
1145 }
1146
1147 xdp.data = page_address(rx_buf->page) + rx_buf->page_offset;
1148 xdp.data_hard_start = xdp.data - ice_rx_offset(rx_ring);
1149 xdp.data_meta = xdp.data;
1150 xdp.data_end = xdp.data + size;
1151#if (PAGE_SIZE > 4096)
1152
1153 xdp.frame_sz = ice_rx_frame_truesize(rx_ring, size);
1154#endif
1155
1156 rcu_read_lock();
1157 xdp_prog = READ_ONCE(rx_ring->xdp_prog);
1158 if (!xdp_prog) {
1159 rcu_read_unlock();
1160 goto construct_skb;
1161 }
1162
1163 xdp_res = ice_run_xdp(rx_ring, &xdp, xdp_prog);
1164 rcu_read_unlock();
1165 if (!xdp_res)
1166 goto construct_skb;
1167 if (xdp_res & (ICE_XDP_TX | ICE_XDP_REDIR)) {
1168 xdp_xmit |= xdp_res;
1169 ice_rx_buf_adjust_pg_offset(rx_buf, xdp.frame_sz);
1170 } else {
1171 rx_buf->pagecnt_bias++;
1172 }
1173 total_rx_bytes += size;
1174 total_rx_pkts++;
1175
1176 cleaned_count++;
1177 ice_put_rx_buf(rx_ring, rx_buf);
1178 continue;
1179construct_skb:
1180 if (skb) {
1181 ice_add_rx_frag(rx_ring, rx_buf, skb, size);
1182 } else if (likely(xdp.data)) {
1183 if (ice_ring_uses_build_skb(rx_ring))
1184 skb = ice_build_skb(rx_ring, rx_buf, &xdp);
1185 else
1186 skb = ice_construct_skb(rx_ring, rx_buf, &xdp);
1187 }
1188
1189 if (!skb) {
1190 rx_ring->rx_stats.alloc_buf_failed++;
1191 if (rx_buf)
1192 rx_buf->pagecnt_bias++;
1193 break;
1194 }
1195
1196 ice_put_rx_buf(rx_ring, rx_buf);
1197 cleaned_count++;
1198
1199
1200 if (ice_is_non_eop(rx_ring, rx_desc, skb))
1201 continue;
1202
1203 stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_RXE_S);
1204 if (unlikely(ice_test_staterr(rx_desc, stat_err_bits))) {
1205 dev_kfree_skb_any(skb);
1206 continue;
1207 }
1208
1209 stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_L2TAG1P_S);
1210 if (ice_test_staterr(rx_desc, stat_err_bits))
1211 vlan_tag = le16_to_cpu(rx_desc->wb.l2tag1);
1212
1213
1214 if (eth_skb_pad(skb)) {
1215 skb = NULL;
1216 continue;
1217 }
1218
1219
1220 total_rx_bytes += skb->len;
1221
1222
1223 rx_ptype = le16_to_cpu(rx_desc->wb.ptype_flex_flags0) &
1224 ICE_RX_FLEX_DESC_PTYPE_M;
1225
1226 ice_process_skb_fields(rx_ring, rx_desc, skb, rx_ptype);
1227
1228
1229 ice_receive_skb(rx_ring, skb, vlan_tag);
1230
1231
1232 total_rx_pkts++;
1233 }
1234
1235
1236 failure = ice_alloc_rx_bufs(rx_ring, cleaned_count);
1237
1238 if (xdp_prog)
1239 ice_finalize_xdp_rx(rx_ring, xdp_xmit);
1240
1241 ice_update_rx_ring_stats(rx_ring, total_rx_pkts, total_rx_bytes);
1242
1243
1244 return failure ? budget : (int)total_rx_pkts;
1245}
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270static unsigned int
1271ice_adjust_itr_by_size_and_speed(struct ice_port_info *port_info,
1272 unsigned int avg_pkt_size,
1273 unsigned int itr)
1274{
1275 switch (port_info->phy.link_info.link_speed) {
1276 case ICE_AQ_LINK_SPEED_100GB:
1277 itr += DIV_ROUND_UP(17 * (avg_pkt_size + 24),
1278 avg_pkt_size + 640);
1279 break;
1280 case ICE_AQ_LINK_SPEED_50GB:
1281 itr += DIV_ROUND_UP(34 * (avg_pkt_size + 24),
1282 avg_pkt_size + 640);
1283 break;
1284 case ICE_AQ_LINK_SPEED_40GB:
1285 itr += DIV_ROUND_UP(43 * (avg_pkt_size + 24),
1286 avg_pkt_size + 640);
1287 break;
1288 case ICE_AQ_LINK_SPEED_25GB:
1289 itr += DIV_ROUND_UP(68 * (avg_pkt_size + 24),
1290 avg_pkt_size + 640);
1291 break;
1292 case ICE_AQ_LINK_SPEED_20GB:
1293 itr += DIV_ROUND_UP(85 * (avg_pkt_size + 24),
1294 avg_pkt_size + 640);
1295 break;
1296 case ICE_AQ_LINK_SPEED_10GB:
1297 default:
1298 itr += DIV_ROUND_UP(170 * (avg_pkt_size + 24),
1299 avg_pkt_size + 640);
1300 break;
1301 }
1302
1303 if ((itr & ICE_ITR_MASK) > ICE_ITR_ADAPTIVE_MAX_USECS) {
1304 itr &= ICE_ITR_ADAPTIVE_LATENCY;
1305 itr += ICE_ITR_ADAPTIVE_MAX_USECS;
1306 }
1307
1308 return itr;
1309}
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324static void
1325ice_update_itr(struct ice_q_vector *q_vector, struct ice_ring_container *rc)
1326{
1327 unsigned long next_update = jiffies;
1328 unsigned int packets, bytes, itr;
1329 bool container_is_rx;
1330
1331 if (!rc->ring || !ITR_IS_DYNAMIC(rc->itr_setting))
1332 return;
1333
1334
1335
1336
1337
1338
1339
1340 if (q_vector->itr_countdown) {
1341 itr = rc->target_itr;
1342 goto clear_counts;
1343 }
1344
1345 container_is_rx = (&q_vector->rx == rc);
1346
1347
1348
1349 itr = container_is_rx ?
1350 ICE_ITR_ADAPTIVE_MIN_USECS | ICE_ITR_ADAPTIVE_LATENCY :
1351 ICE_ITR_ADAPTIVE_MAX_USECS | ICE_ITR_ADAPTIVE_LATENCY;
1352
1353
1354
1355
1356
1357
1358 if (time_after(next_update, rc->next_update))
1359 goto clear_counts;
1360
1361 prefetch(q_vector->vsi->port_info);
1362
1363 packets = rc->total_pkts;
1364 bytes = rc->total_bytes;
1365
1366 if (container_is_rx) {
1367
1368
1369
1370
1371
1372 if (packets && packets < 4 && bytes < 9000 &&
1373 (q_vector->tx.target_itr & ICE_ITR_ADAPTIVE_LATENCY)) {
1374 itr = ICE_ITR_ADAPTIVE_LATENCY;
1375 goto adjust_by_size_and_speed;
1376 }
1377 } else if (packets < 4) {
1378
1379
1380
1381
1382
1383 if (rc->target_itr == ICE_ITR_ADAPTIVE_MAX_USECS &&
1384 (q_vector->rx.target_itr & ICE_ITR_MASK) ==
1385 ICE_ITR_ADAPTIVE_MAX_USECS)
1386 goto clear_counts;
1387 } else if (packets > 32) {
1388
1389
1390
1391 rc->target_itr &= ~ICE_ITR_ADAPTIVE_LATENCY;
1392 }
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402 if (packets < 56) {
1403 itr = rc->target_itr + ICE_ITR_ADAPTIVE_MIN_INC;
1404 if ((itr & ICE_ITR_MASK) > ICE_ITR_ADAPTIVE_MAX_USECS) {
1405 itr &= ICE_ITR_ADAPTIVE_LATENCY;
1406 itr += ICE_ITR_ADAPTIVE_MAX_USECS;
1407 }
1408 goto clear_counts;
1409 }
1410
1411 if (packets <= 256) {
1412 itr = min(q_vector->tx.current_itr, q_vector->rx.current_itr);
1413 itr &= ICE_ITR_MASK;
1414
1415
1416
1417
1418
1419 if (packets <= 112)
1420 goto clear_counts;
1421
1422
1423
1424
1425
1426
1427 itr >>= 1;
1428 itr &= ICE_ITR_MASK;
1429 if (itr < ICE_ITR_ADAPTIVE_MIN_USECS)
1430 itr = ICE_ITR_ADAPTIVE_MIN_USECS;
1431
1432 goto clear_counts;
1433 }
1434
1435
1436
1437
1438
1439
1440
1441 itr = ICE_ITR_ADAPTIVE_BULK;
1442
1443adjust_by_size_and_speed:
1444
1445
1446 itr = ice_adjust_itr_by_size_and_speed(q_vector->vsi->port_info,
1447 bytes / packets, itr);
1448
1449clear_counts:
1450
1451 rc->target_itr = itr;
1452
1453
1454 rc->next_update = next_update + 1;
1455
1456 rc->total_bytes = 0;
1457 rc->total_pkts = 0;
1458}
1459
1460
1461
1462
1463
1464
1465static u32 ice_buildreg_itr(u16 itr_idx, u16 itr)
1466{
1467
1468
1469
1470
1471
1472
1473
1474 itr &= ICE_ITR_MASK;
1475
1476 return GLINT_DYN_CTL_INTENA_M | GLINT_DYN_CTL_CLEARPBA_M |
1477 (itr_idx << GLINT_DYN_CTL_ITR_INDX_S) |
1478 (itr << (GLINT_DYN_CTL_INTERVAL_S - ICE_ITR_GRAN_S));
1479}
1480
1481
1482
1483
1484
1485
1486
1487
1488#define ITR_COUNTDOWN_START 3
1489
1490
1491
1492
1493
1494static void ice_update_ena_itr(struct ice_q_vector *q_vector)
1495{
1496 struct ice_ring_container *tx = &q_vector->tx;
1497 struct ice_ring_container *rx = &q_vector->rx;
1498 struct ice_vsi *vsi = q_vector->vsi;
1499 u32 itr_val;
1500
1501
1502
1503
1504
1505 if (q_vector->itr_countdown == ICE_IN_WB_ON_ITR_MODE) {
1506 itr_val = ice_buildreg_itr(rx->itr_idx, ICE_WB_ON_ITR_USECS);
1507 wr32(&vsi->back->hw, GLINT_DYN_CTL(q_vector->reg_idx), itr_val);
1508
1509 rx->target_itr = rx->itr_setting;
1510
1511 rx->current_itr = ICE_WB_ON_ITR_USECS |
1512 (rx->itr_setting & ICE_ITR_DYNAMIC);
1513
1514 q_vector->itr_countdown = 0;
1515 return;
1516 }
1517
1518
1519 ice_update_itr(q_vector, tx);
1520 ice_update_itr(q_vector, rx);
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530 if (rx->target_itr < rx->current_itr) {
1531
1532 itr_val = ice_buildreg_itr(rx->itr_idx, rx->target_itr);
1533 rx->current_itr = rx->target_itr;
1534 q_vector->itr_countdown = ITR_COUNTDOWN_START;
1535 } else if ((tx->target_itr < tx->current_itr) ||
1536 ((rx->target_itr - rx->current_itr) <
1537 (tx->target_itr - tx->current_itr))) {
1538
1539
1540
1541 itr_val = ice_buildreg_itr(tx->itr_idx, tx->target_itr);
1542 tx->current_itr = tx->target_itr;
1543 q_vector->itr_countdown = ITR_COUNTDOWN_START;
1544 } else if (rx->current_itr != rx->target_itr) {
1545
1546 itr_val = ice_buildreg_itr(rx->itr_idx, rx->target_itr);
1547 rx->current_itr = rx->target_itr;
1548 q_vector->itr_countdown = ITR_COUNTDOWN_START;
1549 } else {
1550
1551 itr_val = ice_buildreg_itr(ICE_ITR_NONE, 0);
1552 if (q_vector->itr_countdown)
1553 q_vector->itr_countdown--;
1554 }
1555
1556 if (!test_bit(__ICE_DOWN, q_vector->vsi->state))
1557 wr32(&q_vector->vsi->back->hw,
1558 GLINT_DYN_CTL(q_vector->reg_idx),
1559 itr_val);
1560}
1561
1562
1563
1564
1565
1566
1567
1568
1569
1570
1571
1572
1573
1574
1575
1576static void ice_set_wb_on_itr(struct ice_q_vector *q_vector)
1577{
1578 struct ice_vsi *vsi = q_vector->vsi;
1579
1580
1581 if (q_vector->itr_countdown == ICE_IN_WB_ON_ITR_MODE)
1582 return;
1583
1584 if (q_vector->num_ring_rx)
1585 wr32(&vsi->back->hw, GLINT_DYN_CTL(q_vector->reg_idx),
1586 ICE_GLINT_DYN_CTL_WB_ON_ITR(ICE_WB_ON_ITR_USECS,
1587 ICE_RX_ITR));
1588
1589 if (q_vector->num_ring_tx)
1590 wr32(&vsi->back->hw, GLINT_DYN_CTL(q_vector->reg_idx),
1591 ICE_GLINT_DYN_CTL_WB_ON_ITR(ICE_WB_ON_ITR_USECS,
1592 ICE_TX_ITR));
1593
1594 q_vector->itr_countdown = ICE_IN_WB_ON_ITR_MODE;
1595}
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606int ice_napi_poll(struct napi_struct *napi, int budget)
1607{
1608 struct ice_q_vector *q_vector =
1609 container_of(napi, struct ice_q_vector, napi);
1610 bool clean_complete = true;
1611 struct ice_ring *ring;
1612 int budget_per_ring;
1613 int work_done = 0;
1614
1615
1616
1617
1618 ice_for_each_ring(ring, q_vector->tx) {
1619 bool wd = ring->xsk_umem ?
1620 ice_clean_tx_irq_zc(ring, budget) :
1621 ice_clean_tx_irq(ring, budget);
1622
1623 if (!wd)
1624 clean_complete = false;
1625 }
1626
1627
1628 if (unlikely(budget <= 0))
1629 return budget;
1630
1631
1632 if (unlikely(q_vector->num_ring_rx > 1))
1633
1634
1635
1636
1637 budget_per_ring = max_t(int, budget / q_vector->num_ring_rx, 1);
1638 else
1639
1640 budget_per_ring = budget;
1641
1642 ice_for_each_ring(ring, q_vector->rx) {
1643 int cleaned;
1644
1645
1646
1647
1648
1649 cleaned = ring->xsk_umem ?
1650 ice_clean_rx_irq_zc(ring, budget_per_ring) :
1651 ice_clean_rx_irq(ring, budget_per_ring);
1652 work_done += cleaned;
1653
1654 if (cleaned >= budget_per_ring)
1655 clean_complete = false;
1656 }
1657
1658
1659 if (!clean_complete)
1660 return budget;
1661
1662
1663
1664
1665 if (likely(napi_complete_done(napi, work_done)))
1666 ice_update_ena_itr(q_vector);
1667 else
1668 ice_set_wb_on_itr(q_vector);
1669
1670 return min_t(int, work_done, budget - 1);
1671}
1672
1673
1674
1675
1676
1677
1678
1679
1680static int __ice_maybe_stop_tx(struct ice_ring *tx_ring, unsigned int size)
1681{
1682 netif_stop_subqueue(tx_ring->netdev, tx_ring->q_index);
1683
1684 smp_mb();
1685
1686
1687 if (likely(ICE_DESC_UNUSED(tx_ring) < size))
1688 return -EBUSY;
1689
1690
1691 netif_start_subqueue(tx_ring->netdev, tx_ring->q_index);
1692 ++tx_ring->tx_stats.restart_q;
1693 return 0;
1694}
1695
1696
1697
1698
1699
1700
1701
1702
1703static int ice_maybe_stop_tx(struct ice_ring *tx_ring, unsigned int size)
1704{
1705 if (likely(ICE_DESC_UNUSED(tx_ring) >= size))
1706 return 0;
1707
1708 return __ice_maybe_stop_tx(tx_ring, size);
1709}
1710
1711
1712
1713
1714
1715
1716
1717
1718
1719
1720
1721static void
1722ice_tx_map(struct ice_ring *tx_ring, struct ice_tx_buf *first,
1723 struct ice_tx_offload_params *off)
1724{
1725 u64 td_offset, td_tag, td_cmd;
1726 u16 i = tx_ring->next_to_use;
1727 unsigned int data_len, size;
1728 struct ice_tx_desc *tx_desc;
1729 struct ice_tx_buf *tx_buf;
1730 struct sk_buff *skb;
1731 skb_frag_t *frag;
1732 dma_addr_t dma;
1733
1734 td_tag = off->td_l2tag1;
1735 td_cmd = off->td_cmd;
1736 td_offset = off->td_offset;
1737 skb = first->skb;
1738
1739 data_len = skb->data_len;
1740 size = skb_headlen(skb);
1741
1742 tx_desc = ICE_TX_DESC(tx_ring, i);
1743
1744 if (first->tx_flags & ICE_TX_FLAGS_HW_VLAN) {
1745 td_cmd |= (u64)ICE_TX_DESC_CMD_IL2TAG1;
1746 td_tag = (first->tx_flags & ICE_TX_FLAGS_VLAN_M) >>
1747 ICE_TX_FLAGS_VLAN_S;
1748 }
1749
1750 dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
1751
1752 tx_buf = first;
1753
1754 for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
1755 unsigned int max_data = ICE_MAX_DATA_PER_TXD_ALIGNED;
1756
1757 if (dma_mapping_error(tx_ring->dev, dma))
1758 goto dma_error;
1759
1760
1761 dma_unmap_len_set(tx_buf, len, size);
1762 dma_unmap_addr_set(tx_buf, dma, dma);
1763
1764
1765 max_data += -dma & (ICE_MAX_READ_REQ_SIZE - 1);
1766 tx_desc->buf_addr = cpu_to_le64(dma);
1767
1768
1769
1770
1771 while (unlikely(size > ICE_MAX_DATA_PER_TXD)) {
1772 tx_desc->cmd_type_offset_bsz =
1773 ice_build_ctob(td_cmd, td_offset, max_data,
1774 td_tag);
1775
1776 tx_desc++;
1777 i++;
1778
1779 if (i == tx_ring->count) {
1780 tx_desc = ICE_TX_DESC(tx_ring, 0);
1781 i = 0;
1782 }
1783
1784 dma += max_data;
1785 size -= max_data;
1786
1787 max_data = ICE_MAX_DATA_PER_TXD_ALIGNED;
1788 tx_desc->buf_addr = cpu_to_le64(dma);
1789 }
1790
1791 if (likely(!data_len))
1792 break;
1793
1794 tx_desc->cmd_type_offset_bsz = ice_build_ctob(td_cmd, td_offset,
1795 size, td_tag);
1796
1797 tx_desc++;
1798 i++;
1799
1800 if (i == tx_ring->count) {
1801 tx_desc = ICE_TX_DESC(tx_ring, 0);
1802 i = 0;
1803 }
1804
1805 size = skb_frag_size(frag);
1806 data_len -= size;
1807
1808 dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size,
1809 DMA_TO_DEVICE);
1810
1811 tx_buf = &tx_ring->tx_buf[i];
1812 }
1813
1814
1815 netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount);
1816
1817
1818 skb_tx_timestamp(first->skb);
1819
1820 i++;
1821 if (i == tx_ring->count)
1822 i = 0;
1823
1824
1825 td_cmd |= (u64)ICE_TXD_LAST_DESC_CMD;
1826 tx_desc->cmd_type_offset_bsz =
1827 ice_build_ctob(td_cmd, td_offset, size, td_tag);
1828
1829
1830
1831
1832
1833
1834
1835 wmb();
1836
1837
1838 first->next_to_watch = tx_desc;
1839
1840 tx_ring->next_to_use = i;
1841
1842 ice_maybe_stop_tx(tx_ring, DESC_NEEDED);
1843
1844
1845 if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more())
1846 writel(i, tx_ring->tail);
1847
1848 return;
1849
1850dma_error:
1851
1852 for (;;) {
1853 tx_buf = &tx_ring->tx_buf[i];
1854 ice_unmap_and_free_tx_buf(tx_ring, tx_buf);
1855 if (tx_buf == first)
1856 break;
1857 if (i == 0)
1858 i = tx_ring->count;
1859 i--;
1860 }
1861
1862 tx_ring->next_to_use = i;
1863}
1864
1865
1866
1867
1868
1869
1870
1871
1872static
1873int ice_tx_csum(struct ice_tx_buf *first, struct ice_tx_offload_params *off)
1874{
1875 u32 l4_len = 0, l3_len = 0, l2_len = 0;
1876 struct sk_buff *skb = first->skb;
1877 union {
1878 struct iphdr *v4;
1879 struct ipv6hdr *v6;
1880 unsigned char *hdr;
1881 } ip;
1882 union {
1883 struct tcphdr *tcp;
1884 unsigned char *hdr;
1885 } l4;
1886 __be16 frag_off, protocol;
1887 unsigned char *exthdr;
1888 u32 offset, cmd = 0;
1889 u8 l4_proto = 0;
1890
1891 if (skb->ip_summed != CHECKSUM_PARTIAL)
1892 return 0;
1893
1894 ip.hdr = skb_network_header(skb);
1895 l4.hdr = skb_transport_header(skb);
1896
1897
1898 l2_len = ip.hdr - skb->data;
1899 offset = (l2_len / 2) << ICE_TX_DESC_LEN_MACLEN_S;
1900
1901 protocol = vlan_get_protocol(skb);
1902
1903 if (protocol == htons(ETH_P_IP))
1904 first->tx_flags |= ICE_TX_FLAGS_IPV4;
1905 else if (protocol == htons(ETH_P_IPV6))
1906 first->tx_flags |= ICE_TX_FLAGS_IPV6;
1907
1908 if (skb->encapsulation) {
1909 bool gso_ena = false;
1910 u32 tunnel = 0;
1911
1912
1913 if (first->tx_flags & ICE_TX_FLAGS_IPV4) {
1914 tunnel |= (first->tx_flags & ICE_TX_FLAGS_TSO) ?
1915 ICE_TX_CTX_EIPT_IPV4 :
1916 ICE_TX_CTX_EIPT_IPV4_NO_CSUM;
1917 l4_proto = ip.v4->protocol;
1918 } else if (first->tx_flags & ICE_TX_FLAGS_IPV6) {
1919 tunnel |= ICE_TX_CTX_EIPT_IPV6;
1920 exthdr = ip.hdr + sizeof(*ip.v6);
1921 l4_proto = ip.v6->nexthdr;
1922 if (l4.hdr != exthdr)
1923 ipv6_skip_exthdr(skb, exthdr - skb->data,
1924 &l4_proto, &frag_off);
1925 }
1926
1927
1928 switch (l4_proto) {
1929 case IPPROTO_UDP:
1930 tunnel |= ICE_TXD_CTX_UDP_TUNNELING;
1931 first->tx_flags |= ICE_TX_FLAGS_TUNNEL;
1932 break;
1933 case IPPROTO_GRE:
1934 tunnel |= ICE_TXD_CTX_GRE_TUNNELING;
1935 first->tx_flags |= ICE_TX_FLAGS_TUNNEL;
1936 break;
1937 case IPPROTO_IPIP:
1938 case IPPROTO_IPV6:
1939 first->tx_flags |= ICE_TX_FLAGS_TUNNEL;
1940 l4.hdr = skb_inner_network_header(skb);
1941 break;
1942 default:
1943 if (first->tx_flags & ICE_TX_FLAGS_TSO)
1944 return -1;
1945
1946 skb_checksum_help(skb);
1947 return 0;
1948 }
1949
1950
1951 tunnel |= ((l4.hdr - ip.hdr) / 4) <<
1952 ICE_TXD_CTX_QW0_EIPLEN_S;
1953
1954
1955 ip.hdr = skb_inner_network_header(skb);
1956
1957
1958 tunnel |= ((ip.hdr - l4.hdr) / 2) <<
1959 ICE_TXD_CTX_QW0_NATLEN_S;
1960
1961 gso_ena = skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL;
1962
1963 if ((first->tx_flags & ICE_TX_FLAGS_TSO) && !gso_ena &&
1964 (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM))
1965 tunnel |= ICE_TXD_CTX_QW0_L4T_CS_M;
1966
1967
1968 off->cd_tunnel_params |= tunnel;
1969
1970
1971
1972
1973 off->cd_qw1 |= (u64)ICE_TX_DESC_DTYPE_CTX;
1974
1975
1976 l4.hdr = skb_inner_transport_header(skb);
1977 l4_proto = 0;
1978
1979
1980 first->tx_flags &= ~(ICE_TX_FLAGS_IPV4 | ICE_TX_FLAGS_IPV6);
1981 if (ip.v4->version == 4)
1982 first->tx_flags |= ICE_TX_FLAGS_IPV4;
1983 if (ip.v6->version == 6)
1984 first->tx_flags |= ICE_TX_FLAGS_IPV6;
1985 }
1986
1987
1988 if (first->tx_flags & ICE_TX_FLAGS_IPV4) {
1989 l4_proto = ip.v4->protocol;
1990
1991
1992
1993 if (first->tx_flags & ICE_TX_FLAGS_TSO)
1994 cmd |= ICE_TX_DESC_CMD_IIPT_IPV4_CSUM;
1995 else
1996 cmd |= ICE_TX_DESC_CMD_IIPT_IPV4;
1997
1998 } else if (first->tx_flags & ICE_TX_FLAGS_IPV6) {
1999 cmd |= ICE_TX_DESC_CMD_IIPT_IPV6;
2000 exthdr = ip.hdr + sizeof(*ip.v6);
2001 l4_proto = ip.v6->nexthdr;
2002 if (l4.hdr != exthdr)
2003 ipv6_skip_exthdr(skb, exthdr - skb->data, &l4_proto,
2004 &frag_off);
2005 } else {
2006 return -1;
2007 }
2008
2009
2010 l3_len = l4.hdr - ip.hdr;
2011 offset |= (l3_len / 4) << ICE_TX_DESC_LEN_IPLEN_S;
2012
2013
2014 switch (l4_proto) {
2015 case IPPROTO_TCP:
2016
2017 cmd |= ICE_TX_DESC_CMD_L4T_EOFT_TCP;
2018 l4_len = l4.tcp->doff;
2019 offset |= l4_len << ICE_TX_DESC_LEN_L4_LEN_S;
2020 break;
2021 case IPPROTO_UDP:
2022
2023 cmd |= ICE_TX_DESC_CMD_L4T_EOFT_UDP;
2024 l4_len = (sizeof(struct udphdr) >> 2);
2025 offset |= l4_len << ICE_TX_DESC_LEN_L4_LEN_S;
2026 break;
2027 case IPPROTO_SCTP:
2028
2029 cmd |= ICE_TX_DESC_CMD_L4T_EOFT_SCTP;
2030 l4_len = sizeof(struct sctphdr) >> 2;
2031 offset |= l4_len << ICE_TX_DESC_LEN_L4_LEN_S;
2032 break;
2033
2034 default:
2035 if (first->tx_flags & ICE_TX_FLAGS_TSO)
2036 return -1;
2037 skb_checksum_help(skb);
2038 return 0;
2039 }
2040
2041 off->td_cmd |= cmd;
2042 off->td_offset |= offset;
2043 return 1;
2044}
2045
2046
2047
2048
2049
2050
2051
2052
2053
2054static void
2055ice_tx_prepare_vlan_flags(struct ice_ring *tx_ring, struct ice_tx_buf *first)
2056{
2057 struct sk_buff *skb = first->skb;
2058
2059
2060 if (!skb_vlan_tag_present(skb) && eth_type_vlan(skb->protocol))
2061 return;
2062
2063
2064
2065
2066 if (skb_vlan_tag_present(skb)) {
2067 first->tx_flags |= skb_vlan_tag_get(skb) << ICE_TX_FLAGS_VLAN_S;
2068 first->tx_flags |= ICE_TX_FLAGS_HW_VLAN;
2069 }
2070
2071 ice_tx_prepare_vlan_flags_dcb(tx_ring, first);
2072}
2073
2074
2075
2076
2077
2078
2079
2080
2081static
2082int ice_tso(struct ice_tx_buf *first, struct ice_tx_offload_params *off)
2083{
2084 struct sk_buff *skb = first->skb;
2085 union {
2086 struct iphdr *v4;
2087 struct ipv6hdr *v6;
2088 unsigned char *hdr;
2089 } ip;
2090 union {
2091 struct tcphdr *tcp;
2092 struct udphdr *udp;
2093 unsigned char *hdr;
2094 } l4;
2095 u64 cd_mss, cd_tso_len;
2096 u32 paylen;
2097 u8 l4_start;
2098 int err;
2099
2100 if (skb->ip_summed != CHECKSUM_PARTIAL)
2101 return 0;
2102
2103 if (!skb_is_gso(skb))
2104 return 0;
2105
2106 err = skb_cow_head(skb, 0);
2107 if (err < 0)
2108 return err;
2109
2110
2111 ip.hdr = skb_network_header(skb);
2112 l4.hdr = skb_transport_header(skb);
2113
2114
2115 if (ip.v4->version == 4) {
2116 ip.v4->tot_len = 0;
2117 ip.v4->check = 0;
2118 } else {
2119 ip.v6->payload_len = 0;
2120 }
2121
2122 if (skb_shinfo(skb)->gso_type & (SKB_GSO_GRE |
2123 SKB_GSO_GRE_CSUM |
2124 SKB_GSO_IPXIP4 |
2125 SKB_GSO_IPXIP6 |
2126 SKB_GSO_UDP_TUNNEL |
2127 SKB_GSO_UDP_TUNNEL_CSUM)) {
2128 if (!(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) &&
2129 (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM)) {
2130 l4.udp->len = 0;
2131
2132
2133 l4_start = (u8)(l4.hdr - skb->data);
2134
2135
2136 paylen = skb->len - l4_start;
2137 csum_replace_by_diff(&l4.udp->check,
2138 (__force __wsum)htonl(paylen));
2139 }
2140
2141
2142
2143
2144 ip.hdr = skb_inner_network_header(skb);
2145 l4.hdr = skb_inner_transport_header(skb);
2146
2147
2148 if (ip.v4->version == 4) {
2149 ip.v4->tot_len = 0;
2150 ip.v4->check = 0;
2151 } else {
2152 ip.v6->payload_len = 0;
2153 }
2154 }
2155
2156
2157 l4_start = (u8)(l4.hdr - skb->data);
2158
2159
2160 paylen = skb->len - l4_start;
2161
2162 if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
2163 csum_replace_by_diff(&l4.udp->check,
2164 (__force __wsum)htonl(paylen));
2165
2166 off->header_len = (u8)sizeof(l4.udp) + l4_start;
2167 } else {
2168 csum_replace_by_diff(&l4.tcp->check,
2169 (__force __wsum)htonl(paylen));
2170
2171 off->header_len = (u8)((l4.tcp->doff * 4) + l4_start);
2172 }
2173
2174
2175 first->gso_segs = skb_shinfo(skb)->gso_segs;
2176 first->bytecount += (first->gso_segs - 1) * off->header_len;
2177
2178 cd_tso_len = skb->len - off->header_len;
2179 cd_mss = skb_shinfo(skb)->gso_size;
2180
2181
2182 off->cd_qw1 |= (u64)(ICE_TX_DESC_DTYPE_CTX |
2183 (ICE_TX_CTX_DESC_TSO << ICE_TXD_CTX_QW1_CMD_S) |
2184 (cd_tso_len << ICE_TXD_CTX_QW1_TSO_LEN_S) |
2185 (cd_mss << ICE_TXD_CTX_QW1_MSS_S));
2186 first->tx_flags |= ICE_TX_FLAGS_TSO;
2187 return 1;
2188}
2189
2190
2191
2192
2193
2194
2195
2196
2197
2198
2199
2200
2201
2202
2203
2204
2205
2206
2207
2208
2209
2210
2211
2212
2213
2214
2215
2216
2217
2218static unsigned int ice_txd_use_count(unsigned int size)
2219{
2220 return ((size * 85) >> 20) + ICE_DESCS_FOR_SKB_DATA_PTR;
2221}
2222
2223
2224
2225
2226
2227
2228
2229static unsigned int ice_xmit_desc_count(struct sk_buff *skb)
2230{
2231 const skb_frag_t *frag = &skb_shinfo(skb)->frags[0];
2232 unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
2233 unsigned int count = 0, size = skb_headlen(skb);
2234
2235 for (;;) {
2236 count += ice_txd_use_count(size);
2237
2238 if (!nr_frags--)
2239 break;
2240
2241 size = skb_frag_size(frag++);
2242 }
2243
2244 return count;
2245}
2246
2247
2248
2249
2250
2251
2252
2253
2254
2255
2256
2257
2258
2259
2260static bool __ice_chk_linearize(struct sk_buff *skb)
2261{
2262 const skb_frag_t *frag, *stale;
2263 int nr_frags, sum;
2264
2265
2266 nr_frags = skb_shinfo(skb)->nr_frags;
2267 if (nr_frags < (ICE_MAX_BUF_TXD - 1))
2268 return false;
2269
2270
2271
2272
2273 nr_frags -= ICE_MAX_BUF_TXD - 2;
2274 frag = &skb_shinfo(skb)->frags[0];
2275
2276
2277
2278
2279
2280
2281
2282 sum = 1 - skb_shinfo(skb)->gso_size;
2283
2284
2285 sum += skb_frag_size(frag++);
2286 sum += skb_frag_size(frag++);
2287 sum += skb_frag_size(frag++);
2288 sum += skb_frag_size(frag++);
2289 sum += skb_frag_size(frag++);
2290
2291
2292
2293
2294 for (stale = &skb_shinfo(skb)->frags[0];; stale++) {
2295 int stale_size = skb_frag_size(stale);
2296
2297 sum += skb_frag_size(frag++);
2298
2299
2300
2301
2302
2303
2304
2305 if (stale_size > ICE_MAX_DATA_PER_TXD) {
2306 int align_pad = -(skb_frag_off(stale)) &
2307 (ICE_MAX_READ_REQ_SIZE - 1);
2308
2309 sum -= align_pad;
2310 stale_size -= align_pad;
2311
2312 do {
2313 sum -= ICE_MAX_DATA_PER_TXD_ALIGNED;
2314 stale_size -= ICE_MAX_DATA_PER_TXD_ALIGNED;
2315 } while (stale_size > ICE_MAX_DATA_PER_TXD);
2316 }
2317
2318
2319 if (sum < 0)
2320 return true;
2321
2322 if (!nr_frags--)
2323 break;
2324
2325 sum -= stale_size;
2326 }
2327
2328 return false;
2329}
2330
2331
2332
2333
2334
2335
2336
2337
2338
2339
2340static bool ice_chk_linearize(struct sk_buff *skb, unsigned int count)
2341{
2342
2343 if (likely(count < ICE_MAX_BUF_TXD))
2344 return false;
2345
2346 if (skb_is_gso(skb))
2347 return __ice_chk_linearize(skb);
2348
2349
2350 return count != ICE_MAX_BUF_TXD;
2351}
2352
2353
2354
2355
2356
2357
2358
2359
2360static netdev_tx_t
2361ice_xmit_frame_ring(struct sk_buff *skb, struct ice_ring *tx_ring)
2362{
2363 struct ice_tx_offload_params offload = { 0 };
2364 struct ice_vsi *vsi = tx_ring->vsi;
2365 struct ice_tx_buf *first;
2366 unsigned int count;
2367 int tso, csum;
2368
2369 count = ice_xmit_desc_count(skb);
2370 if (ice_chk_linearize(skb, count)) {
2371 if (__skb_linearize(skb))
2372 goto out_drop;
2373 count = ice_txd_use_count(skb->len);
2374 tx_ring->tx_stats.tx_linearize++;
2375 }
2376
2377
2378
2379
2380
2381
2382
2383 if (ice_maybe_stop_tx(tx_ring, count + ICE_DESCS_PER_CACHE_LINE +
2384 ICE_DESCS_FOR_CTX_DESC)) {
2385 tx_ring->tx_stats.tx_busy++;
2386 return NETDEV_TX_BUSY;
2387 }
2388
2389 offload.tx_ring = tx_ring;
2390
2391
2392 first = &tx_ring->tx_buf[tx_ring->next_to_use];
2393 first->skb = skb;
2394 first->bytecount = max_t(unsigned int, skb->len, ETH_ZLEN);
2395 first->gso_segs = 1;
2396 first->tx_flags = 0;
2397
2398
2399 ice_tx_prepare_vlan_flags(tx_ring, first);
2400
2401
2402 tso = ice_tso(first, &offload);
2403 if (tso < 0)
2404 goto out_drop;
2405
2406
2407 csum = ice_tx_csum(first, &offload);
2408 if (csum < 0)
2409 goto out_drop;
2410
2411
2412 if (unlikely(skb->priority == TC_PRIO_CONTROL &&
2413 vsi->type == ICE_VSI_PF &&
2414 vsi->port_info->is_sw_lldp))
2415 offload.cd_qw1 |= (u64)(ICE_TX_DESC_DTYPE_CTX |
2416 ICE_TX_CTX_DESC_SWTCH_UPLINK <<
2417 ICE_TXD_CTX_QW1_CMD_S);
2418
2419 if (offload.cd_qw1 & ICE_TX_DESC_DTYPE_CTX) {
2420 struct ice_tx_ctx_desc *cdesc;
2421 u16 i = tx_ring->next_to_use;
2422
2423
2424 cdesc = ICE_TX_CTX_DESC(tx_ring, i);
2425 i++;
2426 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
2427
2428
2429 cdesc->tunneling_params = cpu_to_le32(offload.cd_tunnel_params);
2430 cdesc->l2tag2 = cpu_to_le16(offload.cd_l2tag2);
2431 cdesc->rsvd = cpu_to_le16(0);
2432 cdesc->qw1 = cpu_to_le64(offload.cd_qw1);
2433 }
2434
2435 ice_tx_map(tx_ring, first, &offload);
2436 return NETDEV_TX_OK;
2437
2438out_drop:
2439 dev_kfree_skb_any(skb);
2440 return NETDEV_TX_OK;
2441}
2442
2443
2444
2445
2446
2447
2448
2449
2450netdev_tx_t ice_start_xmit(struct sk_buff *skb, struct net_device *netdev)
2451{
2452 struct ice_netdev_priv *np = netdev_priv(netdev);
2453 struct ice_vsi *vsi = np->vsi;
2454 struct ice_ring *tx_ring;
2455
2456 tx_ring = vsi->tx_rings[skb->queue_mapping];
2457
2458
2459
2460
2461 if (skb_put_padto(skb, ICE_MIN_TX_LEN))
2462 return NETDEV_TX_OK;
2463
2464 return ice_xmit_frame_ring(skb, tx_ring);
2465}
2466
2467
2468
2469
2470
2471void ice_clean_ctrl_tx_irq(struct ice_ring *tx_ring)
2472{
2473 struct ice_vsi *vsi = tx_ring->vsi;
2474 s16 i = tx_ring->next_to_clean;
2475 int budget = ICE_DFLT_IRQ_WORK;
2476 struct ice_tx_desc *tx_desc;
2477 struct ice_tx_buf *tx_buf;
2478
2479 tx_buf = &tx_ring->tx_buf[i];
2480 tx_desc = ICE_TX_DESC(tx_ring, i);
2481 i -= tx_ring->count;
2482
2483 do {
2484 struct ice_tx_desc *eop_desc = tx_buf->next_to_watch;
2485
2486
2487 if (!eop_desc)
2488 break;
2489
2490
2491 smp_rmb();
2492
2493
2494 if (!(eop_desc->cmd_type_offset_bsz &
2495 cpu_to_le64(ICE_TX_DESC_DTYPE_DESC_DONE)))
2496 break;
2497
2498
2499 tx_buf->next_to_watch = NULL;
2500 tx_desc->buf_addr = 0;
2501 tx_desc->cmd_type_offset_bsz = 0;
2502
2503
2504 tx_buf++;
2505 tx_desc++;
2506 i++;
2507 if (unlikely(!i)) {
2508 i -= tx_ring->count;
2509 tx_buf = tx_ring->tx_buf;
2510 tx_desc = ICE_TX_DESC(tx_ring, 0);
2511 }
2512
2513
2514 if (dma_unmap_len(tx_buf, len))
2515 dma_unmap_single(tx_ring->dev,
2516 dma_unmap_addr(tx_buf, dma),
2517 dma_unmap_len(tx_buf, len),
2518 DMA_TO_DEVICE);
2519 if (tx_buf->tx_flags & ICE_TX_FLAGS_DUMMY_PKT)
2520 devm_kfree(tx_ring->dev, tx_buf->raw_buf);
2521
2522
2523 tx_buf->raw_buf = NULL;
2524 tx_buf->tx_flags = 0;
2525 tx_buf->next_to_watch = NULL;
2526 dma_unmap_len_set(tx_buf, len, 0);
2527 tx_desc->buf_addr = 0;
2528 tx_desc->cmd_type_offset_bsz = 0;
2529
2530
2531 tx_buf++;
2532 tx_desc++;
2533 i++;
2534 if (unlikely(!i)) {
2535 i -= tx_ring->count;
2536 tx_buf = tx_ring->tx_buf;
2537 tx_desc = ICE_TX_DESC(tx_ring, 0);
2538 }
2539
2540 budget--;
2541 } while (likely(budget));
2542
2543 i += tx_ring->count;
2544 tx_ring->next_to_clean = i;
2545
2546
2547 ice_irq_dynamic_ena(&vsi->back->hw, vsi, vsi->q_vectors[0]);
2548}
2549