1
2
3
4
5
6#include <linux/prefetch.h>
7#include <linux/mm.h>
8#include <linux/bpf_trace.h>
9#include <net/xdp.h>
10#include "ice_txrx_lib.h"
11#include "ice_lib.h"
12#include "ice.h"
13#include "ice_dcb_lib.h"
14#include "ice_xsk.h"
15
16#define ICE_RX_HDR_SIZE 256
17
18#define FDIR_DESC_RXDID 0x40
19#define ICE_FDIR_CLEAN_DELAY 10
20
21
22
23
24
25
26
27int
28ice_prgm_fdir_fltr(struct ice_vsi *vsi, struct ice_fltr_desc *fdir_desc,
29 u8 *raw_packet)
30{
31 struct ice_tx_buf *tx_buf, *first;
32 struct ice_fltr_desc *f_desc;
33 struct ice_tx_desc *tx_desc;
34 struct ice_ring *tx_ring;
35 struct device *dev;
36 dma_addr_t dma;
37 u32 td_cmd;
38 u16 i;
39
40
41 if (!vsi)
42 return -ENOENT;
43 tx_ring = vsi->tx_rings[0];
44 if (!tx_ring || !tx_ring->desc)
45 return -ENOENT;
46 dev = tx_ring->dev;
47
48
49 for (i = ICE_FDIR_CLEAN_DELAY; ICE_DESC_UNUSED(tx_ring) < 2; i--) {
50 if (!i)
51 return -EAGAIN;
52 msleep_interruptible(1);
53 }
54
55 dma = dma_map_single(dev, raw_packet, ICE_FDIR_MAX_RAW_PKT_SIZE,
56 DMA_TO_DEVICE);
57
58 if (dma_mapping_error(dev, dma))
59 return -EINVAL;
60
61
62 i = tx_ring->next_to_use;
63 first = &tx_ring->tx_buf[i];
64 f_desc = ICE_TX_FDIRDESC(tx_ring, i);
65 memcpy(f_desc, fdir_desc, sizeof(*f_desc));
66
67 i++;
68 i = (i < tx_ring->count) ? i : 0;
69 tx_desc = ICE_TX_DESC(tx_ring, i);
70 tx_buf = &tx_ring->tx_buf[i];
71
72 i++;
73 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
74
75 memset(tx_buf, 0, sizeof(*tx_buf));
76 dma_unmap_len_set(tx_buf, len, ICE_FDIR_MAX_RAW_PKT_SIZE);
77 dma_unmap_addr_set(tx_buf, dma, dma);
78
79 tx_desc->buf_addr = cpu_to_le64(dma);
80 td_cmd = ICE_TXD_LAST_DESC_CMD | ICE_TX_DESC_CMD_DUMMY |
81 ICE_TX_DESC_CMD_RE;
82
83 tx_buf->tx_flags = ICE_TX_FLAGS_DUMMY_PKT;
84 tx_buf->raw_buf = raw_packet;
85
86 tx_desc->cmd_type_offset_bsz =
87 ice_build_ctob(td_cmd, 0, ICE_FDIR_MAX_RAW_PKT_SIZE, 0);
88
89
90
91
92 wmb();
93
94
95 first->next_to_watch = tx_desc;
96
97 writel(tx_ring->next_to_use, tx_ring->tail);
98
99 return 0;
100}
101
102
103
104
105
106
107static void
108ice_unmap_and_free_tx_buf(struct ice_ring *ring, struct ice_tx_buf *tx_buf)
109{
110 if (tx_buf->skb) {
111 if (tx_buf->tx_flags & ICE_TX_FLAGS_DUMMY_PKT)
112 devm_kfree(ring->dev, tx_buf->raw_buf);
113 else if (ice_ring_is_xdp(ring))
114 page_frag_free(tx_buf->raw_buf);
115 else
116 dev_kfree_skb_any(tx_buf->skb);
117 if (dma_unmap_len(tx_buf, len))
118 dma_unmap_single(ring->dev,
119 dma_unmap_addr(tx_buf, dma),
120 dma_unmap_len(tx_buf, len),
121 DMA_TO_DEVICE);
122 } else if (dma_unmap_len(tx_buf, len)) {
123 dma_unmap_page(ring->dev,
124 dma_unmap_addr(tx_buf, dma),
125 dma_unmap_len(tx_buf, len),
126 DMA_TO_DEVICE);
127 }
128
129 tx_buf->next_to_watch = NULL;
130 tx_buf->skb = NULL;
131 dma_unmap_len_set(tx_buf, len, 0);
132
133}
134
135static struct netdev_queue *txring_txq(const struct ice_ring *ring)
136{
137 return netdev_get_tx_queue(ring->netdev, ring->q_index);
138}
139
140
141
142
143
144void ice_clean_tx_ring(struct ice_ring *tx_ring)
145{
146 u16 i;
147
148 if (ice_ring_is_xdp(tx_ring) && tx_ring->xsk_pool) {
149 ice_xsk_clean_xdp_ring(tx_ring);
150 goto tx_skip_free;
151 }
152
153
154 if (!tx_ring->tx_buf)
155 return;
156
157
158 for (i = 0; i < tx_ring->count; i++)
159 ice_unmap_and_free_tx_buf(tx_ring, &tx_ring->tx_buf[i]);
160
161tx_skip_free:
162 memset(tx_ring->tx_buf, 0, sizeof(*tx_ring->tx_buf) * tx_ring->count);
163
164
165 memset(tx_ring->desc, 0, tx_ring->size);
166
167 tx_ring->next_to_use = 0;
168 tx_ring->next_to_clean = 0;
169
170 if (!tx_ring->netdev)
171 return;
172
173
174 netdev_tx_reset_queue(txring_txq(tx_ring));
175}
176
177
178
179
180
181
182
183void ice_free_tx_ring(struct ice_ring *tx_ring)
184{
185 ice_clean_tx_ring(tx_ring);
186 devm_kfree(tx_ring->dev, tx_ring->tx_buf);
187 tx_ring->tx_buf = NULL;
188
189 if (tx_ring->desc) {
190 dmam_free_coherent(tx_ring->dev, tx_ring->size,
191 tx_ring->desc, tx_ring->dma);
192 tx_ring->desc = NULL;
193 }
194}
195
196
197
198
199
200
201
202
203static bool ice_clean_tx_irq(struct ice_ring *tx_ring, int napi_budget)
204{
205 unsigned int total_bytes = 0, total_pkts = 0;
206 unsigned int budget = ICE_DFLT_IRQ_WORK;
207 struct ice_vsi *vsi = tx_ring->vsi;
208 s16 i = tx_ring->next_to_clean;
209 struct ice_tx_desc *tx_desc;
210 struct ice_tx_buf *tx_buf;
211
212 tx_buf = &tx_ring->tx_buf[i];
213 tx_desc = ICE_TX_DESC(tx_ring, i);
214 i -= tx_ring->count;
215
216 prefetch(&vsi->state);
217
218 do {
219 struct ice_tx_desc *eop_desc = tx_buf->next_to_watch;
220
221
222 if (!eop_desc)
223 break;
224
225 smp_rmb();
226
227
228 if (!(eop_desc->cmd_type_offset_bsz &
229 cpu_to_le64(ICE_TX_DESC_DTYPE_DESC_DONE)))
230 break;
231
232
233 tx_buf->next_to_watch = NULL;
234
235
236 total_bytes += tx_buf->bytecount;
237 total_pkts += tx_buf->gso_segs;
238
239 if (ice_ring_is_xdp(tx_ring))
240 page_frag_free(tx_buf->raw_buf);
241 else
242
243 napi_consume_skb(tx_buf->skb, napi_budget);
244
245
246 dma_unmap_single(tx_ring->dev,
247 dma_unmap_addr(tx_buf, dma),
248 dma_unmap_len(tx_buf, len),
249 DMA_TO_DEVICE);
250
251
252 tx_buf->skb = NULL;
253 dma_unmap_len_set(tx_buf, len, 0);
254
255
256 while (tx_desc != eop_desc) {
257 tx_buf++;
258 tx_desc++;
259 i++;
260 if (unlikely(!i)) {
261 i -= tx_ring->count;
262 tx_buf = tx_ring->tx_buf;
263 tx_desc = ICE_TX_DESC(tx_ring, 0);
264 }
265
266
267 if (dma_unmap_len(tx_buf, len)) {
268 dma_unmap_page(tx_ring->dev,
269 dma_unmap_addr(tx_buf, dma),
270 dma_unmap_len(tx_buf, len),
271 DMA_TO_DEVICE);
272 dma_unmap_len_set(tx_buf, len, 0);
273 }
274 }
275
276
277 tx_buf++;
278 tx_desc++;
279 i++;
280 if (unlikely(!i)) {
281 i -= tx_ring->count;
282 tx_buf = tx_ring->tx_buf;
283 tx_desc = ICE_TX_DESC(tx_ring, 0);
284 }
285
286 prefetch(tx_desc);
287
288
289 budget--;
290 } while (likely(budget));
291
292 i += tx_ring->count;
293 tx_ring->next_to_clean = i;
294
295 ice_update_tx_ring_stats(tx_ring, total_pkts, total_bytes);
296
297 if (ice_ring_is_xdp(tx_ring))
298 return !!budget;
299
300 netdev_tx_completed_queue(txring_txq(tx_ring), total_pkts,
301 total_bytes);
302
303#define TX_WAKE_THRESHOLD ((s16)(DESC_NEEDED * 2))
304 if (unlikely(total_pkts && netif_carrier_ok(tx_ring->netdev) &&
305 (ICE_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) {
306
307
308
309 smp_mb();
310 if (__netif_subqueue_stopped(tx_ring->netdev,
311 tx_ring->q_index) &&
312 !test_bit(__ICE_DOWN, vsi->state)) {
313 netif_wake_subqueue(tx_ring->netdev,
314 tx_ring->q_index);
315 ++tx_ring->tx_stats.restart_q;
316 }
317 }
318
319 return !!budget;
320}
321
322
323
324
325
326
327
328int ice_setup_tx_ring(struct ice_ring *tx_ring)
329{
330 struct device *dev = tx_ring->dev;
331
332 if (!dev)
333 return -ENOMEM;
334
335
336 WARN_ON(tx_ring->tx_buf);
337 tx_ring->tx_buf =
338 devm_kzalloc(dev, sizeof(*tx_ring->tx_buf) * tx_ring->count,
339 GFP_KERNEL);
340 if (!tx_ring->tx_buf)
341 return -ENOMEM;
342
343
344 tx_ring->size = ALIGN(tx_ring->count * sizeof(struct ice_tx_desc),
345 PAGE_SIZE);
346 tx_ring->desc = dmam_alloc_coherent(dev, tx_ring->size, &tx_ring->dma,
347 GFP_KERNEL);
348 if (!tx_ring->desc) {
349 dev_err(dev, "Unable to allocate memory for the Tx descriptor ring, size=%d\n",
350 tx_ring->size);
351 goto err;
352 }
353
354 tx_ring->next_to_use = 0;
355 tx_ring->next_to_clean = 0;
356 tx_ring->tx_stats.prev_pkt = -1;
357 return 0;
358
359err:
360 devm_kfree(dev, tx_ring->tx_buf);
361 tx_ring->tx_buf = NULL;
362 return -ENOMEM;
363}
364
365
366
367
368
369void ice_clean_rx_ring(struct ice_ring *rx_ring)
370{
371 struct device *dev = rx_ring->dev;
372 u16 i;
373
374
375 if (!rx_ring->rx_buf)
376 return;
377
378 if (rx_ring->xsk_pool) {
379 ice_xsk_clean_rx_ring(rx_ring);
380 goto rx_skip_free;
381 }
382
383
384 for (i = 0; i < rx_ring->count; i++) {
385 struct ice_rx_buf *rx_buf = &rx_ring->rx_buf[i];
386
387 if (rx_buf->skb) {
388 dev_kfree_skb(rx_buf->skb);
389 rx_buf->skb = NULL;
390 }
391 if (!rx_buf->page)
392 continue;
393
394
395
396
397 dma_sync_single_range_for_cpu(dev, rx_buf->dma,
398 rx_buf->page_offset,
399 rx_ring->rx_buf_len,
400 DMA_FROM_DEVICE);
401
402
403 dma_unmap_page_attrs(dev, rx_buf->dma, ice_rx_pg_size(rx_ring),
404 DMA_FROM_DEVICE, ICE_RX_DMA_ATTR);
405 __page_frag_cache_drain(rx_buf->page, rx_buf->pagecnt_bias);
406
407 rx_buf->page = NULL;
408 rx_buf->page_offset = 0;
409 }
410
411rx_skip_free:
412 memset(rx_ring->rx_buf, 0, sizeof(*rx_ring->rx_buf) * rx_ring->count);
413
414
415 memset(rx_ring->desc, 0, rx_ring->size);
416
417 rx_ring->next_to_alloc = 0;
418 rx_ring->next_to_clean = 0;
419 rx_ring->next_to_use = 0;
420}
421
422
423
424
425
426
427
428void ice_free_rx_ring(struct ice_ring *rx_ring)
429{
430 ice_clean_rx_ring(rx_ring);
431 if (rx_ring->vsi->type == ICE_VSI_PF)
432 if (xdp_rxq_info_is_reg(&rx_ring->xdp_rxq))
433 xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
434 rx_ring->xdp_prog = NULL;
435 devm_kfree(rx_ring->dev, rx_ring->rx_buf);
436 rx_ring->rx_buf = NULL;
437
438 if (rx_ring->desc) {
439 dmam_free_coherent(rx_ring->dev, rx_ring->size,
440 rx_ring->desc, rx_ring->dma);
441 rx_ring->desc = NULL;
442 }
443}
444
445
446
447
448
449
450
451int ice_setup_rx_ring(struct ice_ring *rx_ring)
452{
453 struct device *dev = rx_ring->dev;
454
455 if (!dev)
456 return -ENOMEM;
457
458
459 WARN_ON(rx_ring->rx_buf);
460 rx_ring->rx_buf =
461 devm_kzalloc(dev, sizeof(*rx_ring->rx_buf) * rx_ring->count,
462 GFP_KERNEL);
463 if (!rx_ring->rx_buf)
464 return -ENOMEM;
465
466
467 rx_ring->size = ALIGN(rx_ring->count * sizeof(union ice_32byte_rx_desc),
468 PAGE_SIZE);
469 rx_ring->desc = dmam_alloc_coherent(dev, rx_ring->size, &rx_ring->dma,
470 GFP_KERNEL);
471 if (!rx_ring->desc) {
472 dev_err(dev, "Unable to allocate memory for the Rx descriptor ring, size=%d\n",
473 rx_ring->size);
474 goto err;
475 }
476
477 rx_ring->next_to_use = 0;
478 rx_ring->next_to_clean = 0;
479
480 if (ice_is_xdp_ena_vsi(rx_ring->vsi))
481 WRITE_ONCE(rx_ring->xdp_prog, rx_ring->vsi->xdp_prog);
482
483 if (rx_ring->vsi->type == ICE_VSI_PF &&
484 !xdp_rxq_info_is_reg(&rx_ring->xdp_rxq))
485 if (xdp_rxq_info_reg(&rx_ring->xdp_rxq, rx_ring->netdev,
486 rx_ring->q_index, rx_ring->q_vector->napi.napi_id))
487 goto err;
488 return 0;
489
490err:
491 devm_kfree(dev, rx_ring->rx_buf);
492 rx_ring->rx_buf = NULL;
493 return -ENOMEM;
494}
495
496
497
498
499
500
501
502static unsigned int ice_rx_offset(struct ice_ring *rx_ring)
503{
504 if (ice_ring_uses_build_skb(rx_ring))
505 return ICE_SKB_PAD;
506 else if (ice_is_xdp_ena_vsi(rx_ring->vsi))
507 return XDP_PACKET_HEADROOM;
508
509 return 0;
510}
511
512static unsigned int
513ice_rx_frame_truesize(struct ice_ring *rx_ring, unsigned int __maybe_unused size)
514{
515 unsigned int truesize;
516
517#if (PAGE_SIZE < 8192)
518 truesize = ice_rx_pg_size(rx_ring) / 2;
519#else
520 truesize = ice_rx_offset(rx_ring) ?
521 SKB_DATA_ALIGN(ice_rx_offset(rx_ring) + size) +
522 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) :
523 SKB_DATA_ALIGN(size);
524#endif
525 return truesize;
526}
527
528
529
530
531
532
533
534
535
536static int
537ice_run_xdp(struct ice_ring *rx_ring, struct xdp_buff *xdp,
538 struct bpf_prog *xdp_prog)
539{
540 int err, result = ICE_XDP_PASS;
541 struct ice_ring *xdp_ring;
542 u32 act;
543
544 act = bpf_prog_run_xdp(xdp_prog, xdp);
545 switch (act) {
546 case XDP_PASS:
547 break;
548 case XDP_TX:
549 xdp_ring = rx_ring->vsi->xdp_rings[smp_processor_id()];
550 result = ice_xmit_xdp_buff(xdp, xdp_ring);
551 break;
552 case XDP_REDIRECT:
553 err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog);
554 result = !err ? ICE_XDP_REDIR : ICE_XDP_CONSUMED;
555 break;
556 default:
557 bpf_warn_invalid_xdp_action(act);
558 fallthrough;
559 case XDP_ABORTED:
560 trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
561 fallthrough;
562 case XDP_DROP:
563 result = ICE_XDP_CONSUMED;
564 break;
565 }
566
567 return result;
568}
569
570
571
572
573
574
575
576
577
578
579
580
581
582int
583ice_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
584 u32 flags)
585{
586 struct ice_netdev_priv *np = netdev_priv(dev);
587 unsigned int queue_index = smp_processor_id();
588 struct ice_vsi *vsi = np->vsi;
589 struct ice_ring *xdp_ring;
590 int drops = 0, i;
591
592 if (test_bit(__ICE_DOWN, vsi->state))
593 return -ENETDOWN;
594
595 if (!ice_is_xdp_ena_vsi(vsi) || queue_index >= vsi->num_xdp_txq)
596 return -ENXIO;
597
598 if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
599 return -EINVAL;
600
601 xdp_ring = vsi->xdp_rings[queue_index];
602 for (i = 0; i < n; i++) {
603 struct xdp_frame *xdpf = frames[i];
604 int err;
605
606 err = ice_xmit_xdp_ring(xdpf->data, xdpf->len, xdp_ring);
607 if (err != ICE_XDP_TX) {
608 xdp_return_frame_rx_napi(xdpf);
609 drops++;
610 }
611 }
612
613 if (unlikely(flags & XDP_XMIT_FLUSH))
614 ice_xdp_ring_update_tail(xdp_ring);
615
616 return n - drops;
617}
618
619
620
621
622
623
624
625
626
627static bool
628ice_alloc_mapped_page(struct ice_ring *rx_ring, struct ice_rx_buf *bi)
629{
630 struct page *page = bi->page;
631 dma_addr_t dma;
632
633
634 if (likely(page))
635 return true;
636
637
638 page = dev_alloc_pages(ice_rx_pg_order(rx_ring));
639 if (unlikely(!page)) {
640 rx_ring->rx_stats.alloc_page_failed++;
641 return false;
642 }
643
644
645 dma = dma_map_page_attrs(rx_ring->dev, page, 0, ice_rx_pg_size(rx_ring),
646 DMA_FROM_DEVICE, ICE_RX_DMA_ATTR);
647
648
649
650
651 if (dma_mapping_error(rx_ring->dev, dma)) {
652 __free_pages(page, ice_rx_pg_order(rx_ring));
653 rx_ring->rx_stats.alloc_page_failed++;
654 return false;
655 }
656
657 bi->dma = dma;
658 bi->page = page;
659 bi->page_offset = ice_rx_offset(rx_ring);
660 page_ref_add(page, USHRT_MAX - 1);
661 bi->pagecnt_bias = USHRT_MAX;
662
663 return true;
664}
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679bool ice_alloc_rx_bufs(struct ice_ring *rx_ring, u16 cleaned_count)
680{
681 union ice_32b_rx_flex_desc *rx_desc;
682 u16 ntu = rx_ring->next_to_use;
683 struct ice_rx_buf *bi;
684
685
686 if ((!rx_ring->netdev && rx_ring->vsi->type != ICE_VSI_CTRL) ||
687 !cleaned_count)
688 return false;
689
690
691 rx_desc = ICE_RX_DESC(rx_ring, ntu);
692 bi = &rx_ring->rx_buf[ntu];
693
694 do {
695
696 if (!ice_alloc_mapped_page(rx_ring, bi))
697 break;
698
699
700 dma_sync_single_range_for_device(rx_ring->dev, bi->dma,
701 bi->page_offset,
702 rx_ring->rx_buf_len,
703 DMA_FROM_DEVICE);
704
705
706
707
708 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset);
709
710 rx_desc++;
711 bi++;
712 ntu++;
713 if (unlikely(ntu == rx_ring->count)) {
714 rx_desc = ICE_RX_DESC(rx_ring, 0);
715 bi = rx_ring->rx_buf;
716 ntu = 0;
717 }
718
719
720 rx_desc->wb.status_error0 = 0;
721
722 cleaned_count--;
723 } while (cleaned_count);
724
725 if (rx_ring->next_to_use != ntu)
726 ice_release_rx_desc(rx_ring, ntu);
727
728 return !!cleaned_count;
729}
730
731
732
733
734
735static bool ice_page_is_reserved(struct page *page)
736{
737 return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page);
738}
739
740
741
742
743
744
745
746
747
748
749
750static void
751ice_rx_buf_adjust_pg_offset(struct ice_rx_buf *rx_buf, unsigned int size)
752{
753#if (PAGE_SIZE < 8192)
754
755 rx_buf->page_offset ^= size;
756#else
757
758 rx_buf->page_offset += size;
759#endif
760}
761
762
763
764
765
766
767
768
769
770
771
772static bool
773ice_can_reuse_rx_page(struct ice_rx_buf *rx_buf, int rx_buf_pgcnt)
774{
775 unsigned int pagecnt_bias = rx_buf->pagecnt_bias;
776 struct page *page = rx_buf->page;
777
778
779 if (unlikely(ice_page_is_reserved(page)))
780 return false;
781
782#if (PAGE_SIZE < 8192)
783
784 if (unlikely((rx_buf_pgcnt - pagecnt_bias) > 1))
785 return false;
786#else
787#define ICE_LAST_OFFSET \
788 (SKB_WITH_OVERHEAD(PAGE_SIZE) - ICE_RXBUF_2048)
789 if (rx_buf->page_offset > ICE_LAST_OFFSET)
790 return false;
791#endif
792
793
794
795
796
797 if (unlikely(pagecnt_bias == 1)) {
798 page_ref_add(page, USHRT_MAX - 1);
799 rx_buf->pagecnt_bias = USHRT_MAX;
800 }
801
802 return true;
803}
804
805
806
807
808
809
810
811
812
813
814
815
816static void
817ice_add_rx_frag(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf,
818 struct sk_buff *skb, unsigned int size)
819{
820#if (PAGE_SIZE >= 8192)
821 unsigned int truesize = SKB_DATA_ALIGN(size + ice_rx_offset(rx_ring));
822#else
823 unsigned int truesize = ice_rx_pg_size(rx_ring) / 2;
824#endif
825
826 if (!size)
827 return;
828 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buf->page,
829 rx_buf->page_offset, size, truesize);
830
831
832 ice_rx_buf_adjust_pg_offset(rx_buf, truesize);
833}
834
835
836
837
838
839
840
841
842static void
843ice_reuse_rx_page(struct ice_ring *rx_ring, struct ice_rx_buf *old_buf)
844{
845 u16 nta = rx_ring->next_to_alloc;
846 struct ice_rx_buf *new_buf;
847
848 new_buf = &rx_ring->rx_buf[nta];
849
850
851 nta++;
852 rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
853
854
855
856
857
858 new_buf->dma = old_buf->dma;
859 new_buf->page = old_buf->page;
860 new_buf->page_offset = old_buf->page_offset;
861 new_buf->pagecnt_bias = old_buf->pagecnt_bias;
862}
863
864
865
866
867
868
869
870
871
872
873
874static struct ice_rx_buf *
875ice_get_rx_buf(struct ice_ring *rx_ring, struct sk_buff **skb,
876 const unsigned int size, int *rx_buf_pgcnt)
877{
878 struct ice_rx_buf *rx_buf;
879
880 rx_buf = &rx_ring->rx_buf[rx_ring->next_to_clean];
881 *rx_buf_pgcnt =
882#if (PAGE_SIZE < 8192)
883 page_count(rx_buf->page);
884#else
885 0;
886#endif
887 prefetchw(rx_buf->page);
888 *skb = rx_buf->skb;
889
890 if (!size)
891 return rx_buf;
892
893 dma_sync_single_range_for_cpu(rx_ring->dev, rx_buf->dma,
894 rx_buf->page_offset, size,
895 DMA_FROM_DEVICE);
896
897
898 rx_buf->pagecnt_bias--;
899
900 return rx_buf;
901}
902
903
904
905
906
907
908
909
910
911
912static struct sk_buff *
913ice_build_skb(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf,
914 struct xdp_buff *xdp)
915{
916 u8 metasize = xdp->data - xdp->data_meta;
917#if (PAGE_SIZE < 8192)
918 unsigned int truesize = ice_rx_pg_size(rx_ring) / 2;
919#else
920 unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
921 SKB_DATA_ALIGN(xdp->data_end -
922 xdp->data_hard_start);
923#endif
924 struct sk_buff *skb;
925
926
927
928
929
930
931 net_prefetch(xdp->data_meta);
932
933 skb = build_skb(xdp->data_hard_start, truesize);
934 if (unlikely(!skb))
935 return NULL;
936
937
938
939
940 skb_record_rx_queue(skb, rx_ring->q_index);
941
942
943 skb_reserve(skb, xdp->data - xdp->data_hard_start);
944 __skb_put(skb, xdp->data_end - xdp->data);
945 if (metasize)
946 skb_metadata_set(skb, metasize);
947
948
949 ice_rx_buf_adjust_pg_offset(rx_buf, truesize);
950
951 return skb;
952}
953
954
955
956
957
958
959
960
961
962
963
964static struct sk_buff *
965ice_construct_skb(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf,
966 struct xdp_buff *xdp)
967{
968 unsigned int size = xdp->data_end - xdp->data;
969 unsigned int headlen;
970 struct sk_buff *skb;
971
972
973 net_prefetch(xdp->data);
974
975
976 skb = __napi_alloc_skb(&rx_ring->q_vector->napi, ICE_RX_HDR_SIZE,
977 GFP_ATOMIC | __GFP_NOWARN);
978 if (unlikely(!skb))
979 return NULL;
980
981 skb_record_rx_queue(skb, rx_ring->q_index);
982
983 headlen = size;
984 if (headlen > ICE_RX_HDR_SIZE)
985 headlen = eth_get_headlen(skb->dev, xdp->data, ICE_RX_HDR_SIZE);
986
987
988 memcpy(__skb_put(skb, headlen), xdp->data, ALIGN(headlen,
989 sizeof(long)));
990
991
992 size -= headlen;
993 if (size) {
994#if (PAGE_SIZE >= 8192)
995 unsigned int truesize = SKB_DATA_ALIGN(size);
996#else
997 unsigned int truesize = ice_rx_pg_size(rx_ring) / 2;
998#endif
999 skb_add_rx_frag(skb, 0, rx_buf->page,
1000 rx_buf->page_offset + headlen, size, truesize);
1001
1002 ice_rx_buf_adjust_pg_offset(rx_buf, truesize);
1003 } else {
1004
1005
1006
1007
1008 rx_buf->pagecnt_bias++;
1009 }
1010
1011 return skb;
1012}
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024static void
1025ice_put_rx_buf(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf,
1026 int rx_buf_pgcnt)
1027{
1028 u16 ntc = rx_ring->next_to_clean + 1;
1029
1030
1031 ntc = (ntc < rx_ring->count) ? ntc : 0;
1032 rx_ring->next_to_clean = ntc;
1033
1034 if (!rx_buf)
1035 return;
1036
1037 if (ice_can_reuse_rx_page(rx_buf, rx_buf_pgcnt)) {
1038
1039 ice_reuse_rx_page(rx_ring, rx_buf);
1040 } else {
1041
1042 dma_unmap_page_attrs(rx_ring->dev, rx_buf->dma,
1043 ice_rx_pg_size(rx_ring), DMA_FROM_DEVICE,
1044 ICE_RX_DMA_ATTR);
1045 __page_frag_cache_drain(rx_buf->page, rx_buf->pagecnt_bias);
1046 }
1047
1048
1049 rx_buf->page = NULL;
1050 rx_buf->skb = NULL;
1051}
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062static bool
1063ice_is_non_eop(struct ice_ring *rx_ring, union ice_32b_rx_flex_desc *rx_desc,
1064 struct sk_buff *skb)
1065{
1066
1067#define ICE_RXD_EOF BIT(ICE_RX_FLEX_DESC_STATUS0_EOF_S)
1068 if (likely(ice_test_staterr(rx_desc, ICE_RXD_EOF)))
1069 return false;
1070
1071
1072 rx_ring->rx_buf[rx_ring->next_to_clean].skb = skb;
1073 rx_ring->rx_stats.non_eop_descs++;
1074
1075 return true;
1076}
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget)
1091{
1092 unsigned int total_rx_bytes = 0, total_rx_pkts = 0;
1093 u16 cleaned_count = ICE_DESC_UNUSED(rx_ring);
1094 unsigned int xdp_res, xdp_xmit = 0;
1095 struct bpf_prog *xdp_prog = NULL;
1096 struct xdp_buff xdp;
1097 bool failure;
1098
1099 xdp.rxq = &rx_ring->xdp_rxq;
1100
1101#if (PAGE_SIZE < 8192)
1102 xdp.frame_sz = ice_rx_frame_truesize(rx_ring, 0);
1103#endif
1104
1105
1106 while (likely(total_rx_pkts < (unsigned int)budget)) {
1107 union ice_32b_rx_flex_desc *rx_desc;
1108 struct ice_rx_buf *rx_buf;
1109 struct sk_buff *skb;
1110 unsigned int size;
1111 u16 stat_err_bits;
1112 int rx_buf_pgcnt;
1113 u16 vlan_tag = 0;
1114 u8 rx_ptype;
1115
1116
1117 rx_desc = ICE_RX_DESC(rx_ring, rx_ring->next_to_clean);
1118
1119
1120
1121
1122
1123
1124 stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_DD_S);
1125 if (!ice_test_staterr(rx_desc, stat_err_bits))
1126 break;
1127
1128
1129
1130
1131
1132 dma_rmb();
1133
1134 if (rx_desc->wb.rxdid == FDIR_DESC_RXDID || !rx_ring->netdev) {
1135 ice_put_rx_buf(rx_ring, NULL, 0);
1136 cleaned_count++;
1137 continue;
1138 }
1139
1140 size = le16_to_cpu(rx_desc->wb.pkt_len) &
1141 ICE_RX_FLX_DESC_PKT_LEN_M;
1142
1143
1144 rx_buf = ice_get_rx_buf(rx_ring, &skb, size, &rx_buf_pgcnt);
1145
1146 if (!size) {
1147 xdp.data = NULL;
1148 xdp.data_end = NULL;
1149 xdp.data_hard_start = NULL;
1150 xdp.data_meta = NULL;
1151 goto construct_skb;
1152 }
1153
1154 xdp.data = page_address(rx_buf->page) + rx_buf->page_offset;
1155 xdp.data_hard_start = xdp.data - ice_rx_offset(rx_ring);
1156 xdp.data_meta = xdp.data;
1157 xdp.data_end = xdp.data + size;
1158#if (PAGE_SIZE > 4096)
1159
1160 xdp.frame_sz = ice_rx_frame_truesize(rx_ring, size);
1161#endif
1162
1163 rcu_read_lock();
1164 xdp_prog = READ_ONCE(rx_ring->xdp_prog);
1165 if (!xdp_prog) {
1166 rcu_read_unlock();
1167 goto construct_skb;
1168 }
1169
1170 xdp_res = ice_run_xdp(rx_ring, &xdp, xdp_prog);
1171 rcu_read_unlock();
1172 if (!xdp_res)
1173 goto construct_skb;
1174 if (xdp_res & (ICE_XDP_TX | ICE_XDP_REDIR)) {
1175 xdp_xmit |= xdp_res;
1176 ice_rx_buf_adjust_pg_offset(rx_buf, xdp.frame_sz);
1177 } else {
1178 rx_buf->pagecnt_bias++;
1179 }
1180 total_rx_bytes += size;
1181 total_rx_pkts++;
1182
1183 cleaned_count++;
1184 ice_put_rx_buf(rx_ring, rx_buf, rx_buf_pgcnt);
1185 continue;
1186construct_skb:
1187 if (skb) {
1188 ice_add_rx_frag(rx_ring, rx_buf, skb, size);
1189 } else if (likely(xdp.data)) {
1190 if (ice_ring_uses_build_skb(rx_ring))
1191 skb = ice_build_skb(rx_ring, rx_buf, &xdp);
1192 else
1193 skb = ice_construct_skb(rx_ring, rx_buf, &xdp);
1194 }
1195
1196 if (!skb) {
1197 rx_ring->rx_stats.alloc_buf_failed++;
1198 if (rx_buf)
1199 rx_buf->pagecnt_bias++;
1200 break;
1201 }
1202
1203 ice_put_rx_buf(rx_ring, rx_buf, rx_buf_pgcnt);
1204 cleaned_count++;
1205
1206
1207 if (ice_is_non_eop(rx_ring, rx_desc, skb))
1208 continue;
1209
1210 stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_RXE_S);
1211 if (unlikely(ice_test_staterr(rx_desc, stat_err_bits))) {
1212 dev_kfree_skb_any(skb);
1213 continue;
1214 }
1215
1216 stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_L2TAG1P_S);
1217 if (ice_test_staterr(rx_desc, stat_err_bits))
1218 vlan_tag = le16_to_cpu(rx_desc->wb.l2tag1);
1219
1220
1221 if (eth_skb_pad(skb)) {
1222 skb = NULL;
1223 continue;
1224 }
1225
1226
1227 total_rx_bytes += skb->len;
1228
1229
1230 rx_ptype = le16_to_cpu(rx_desc->wb.ptype_flex_flags0) &
1231 ICE_RX_FLEX_DESC_PTYPE_M;
1232
1233 ice_process_skb_fields(rx_ring, rx_desc, skb, rx_ptype);
1234
1235
1236 ice_receive_skb(rx_ring, skb, vlan_tag);
1237
1238
1239 total_rx_pkts++;
1240 }
1241
1242
1243 failure = ice_alloc_rx_bufs(rx_ring, cleaned_count);
1244
1245 if (xdp_prog)
1246 ice_finalize_xdp_rx(rx_ring, xdp_xmit);
1247
1248 ice_update_rx_ring_stats(rx_ring, total_rx_pkts, total_rx_bytes);
1249
1250
1251 return failure ? budget : (int)total_rx_pkts;
1252}
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277static unsigned int
1278ice_adjust_itr_by_size_and_speed(struct ice_port_info *port_info,
1279 unsigned int avg_pkt_size,
1280 unsigned int itr)
1281{
1282 switch (port_info->phy.link_info.link_speed) {
1283 case ICE_AQ_LINK_SPEED_100GB:
1284 itr += DIV_ROUND_UP(17 * (avg_pkt_size + 24),
1285 avg_pkt_size + 640);
1286 break;
1287 case ICE_AQ_LINK_SPEED_50GB:
1288 itr += DIV_ROUND_UP(34 * (avg_pkt_size + 24),
1289 avg_pkt_size + 640);
1290 break;
1291 case ICE_AQ_LINK_SPEED_40GB:
1292 itr += DIV_ROUND_UP(43 * (avg_pkt_size + 24),
1293 avg_pkt_size + 640);
1294 break;
1295 case ICE_AQ_LINK_SPEED_25GB:
1296 itr += DIV_ROUND_UP(68 * (avg_pkt_size + 24),
1297 avg_pkt_size + 640);
1298 break;
1299 case ICE_AQ_LINK_SPEED_20GB:
1300 itr += DIV_ROUND_UP(85 * (avg_pkt_size + 24),
1301 avg_pkt_size + 640);
1302 break;
1303 case ICE_AQ_LINK_SPEED_10GB:
1304 default:
1305 itr += DIV_ROUND_UP(170 * (avg_pkt_size + 24),
1306 avg_pkt_size + 640);
1307 break;
1308 }
1309
1310 if ((itr & ICE_ITR_MASK) > ICE_ITR_ADAPTIVE_MAX_USECS) {
1311 itr &= ICE_ITR_ADAPTIVE_LATENCY;
1312 itr += ICE_ITR_ADAPTIVE_MAX_USECS;
1313 }
1314
1315 return itr;
1316}
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331static void
1332ice_update_itr(struct ice_q_vector *q_vector, struct ice_ring_container *rc)
1333{
1334 unsigned long next_update = jiffies;
1335 unsigned int packets, bytes, itr;
1336 bool container_is_rx;
1337
1338 if (!rc->ring || !ITR_IS_DYNAMIC(rc->itr_setting))
1339 return;
1340
1341
1342
1343
1344
1345
1346
1347 if (q_vector->itr_countdown) {
1348 itr = rc->target_itr;
1349 goto clear_counts;
1350 }
1351
1352 container_is_rx = (&q_vector->rx == rc);
1353
1354
1355
1356 itr = container_is_rx ?
1357 ICE_ITR_ADAPTIVE_MIN_USECS | ICE_ITR_ADAPTIVE_LATENCY :
1358 ICE_ITR_ADAPTIVE_MAX_USECS | ICE_ITR_ADAPTIVE_LATENCY;
1359
1360
1361
1362
1363
1364
1365 if (time_after(next_update, rc->next_update))
1366 goto clear_counts;
1367
1368 prefetch(q_vector->vsi->port_info);
1369
1370 packets = rc->total_pkts;
1371 bytes = rc->total_bytes;
1372
1373 if (container_is_rx) {
1374
1375
1376
1377
1378
1379 if (packets && packets < 4 && bytes < 9000 &&
1380 (q_vector->tx.target_itr & ICE_ITR_ADAPTIVE_LATENCY)) {
1381 itr = ICE_ITR_ADAPTIVE_LATENCY;
1382 goto adjust_by_size_and_speed;
1383 }
1384 } else if (packets < 4) {
1385
1386
1387
1388
1389
1390 if (rc->target_itr == ICE_ITR_ADAPTIVE_MAX_USECS &&
1391 (q_vector->rx.target_itr & ICE_ITR_MASK) ==
1392 ICE_ITR_ADAPTIVE_MAX_USECS)
1393 goto clear_counts;
1394 } else if (packets > 32) {
1395
1396
1397
1398 rc->target_itr &= ~ICE_ITR_ADAPTIVE_LATENCY;
1399 }
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409 if (packets < 56) {
1410 itr = rc->target_itr + ICE_ITR_ADAPTIVE_MIN_INC;
1411 if ((itr & ICE_ITR_MASK) > ICE_ITR_ADAPTIVE_MAX_USECS) {
1412 itr &= ICE_ITR_ADAPTIVE_LATENCY;
1413 itr += ICE_ITR_ADAPTIVE_MAX_USECS;
1414 }
1415 goto clear_counts;
1416 }
1417
1418 if (packets <= 256) {
1419 itr = min(q_vector->tx.current_itr, q_vector->rx.current_itr);
1420 itr &= ICE_ITR_MASK;
1421
1422
1423
1424
1425
1426 if (packets <= 112)
1427 goto clear_counts;
1428
1429
1430
1431
1432
1433
1434 itr >>= 1;
1435 itr &= ICE_ITR_MASK;
1436 if (itr < ICE_ITR_ADAPTIVE_MIN_USECS)
1437 itr = ICE_ITR_ADAPTIVE_MIN_USECS;
1438
1439 goto clear_counts;
1440 }
1441
1442
1443
1444
1445
1446
1447
1448 itr = ICE_ITR_ADAPTIVE_BULK;
1449
1450adjust_by_size_and_speed:
1451
1452
1453 itr = ice_adjust_itr_by_size_and_speed(q_vector->vsi->port_info,
1454 bytes / packets, itr);
1455
1456clear_counts:
1457
1458 rc->target_itr = itr;
1459
1460
1461 rc->next_update = next_update + 1;
1462
1463 rc->total_bytes = 0;
1464 rc->total_pkts = 0;
1465}
1466
1467
1468
1469
1470
1471
1472static u32 ice_buildreg_itr(u16 itr_idx, u16 itr)
1473{
1474
1475
1476
1477
1478
1479
1480
1481 itr &= ICE_ITR_MASK;
1482
1483 return GLINT_DYN_CTL_INTENA_M | GLINT_DYN_CTL_CLEARPBA_M |
1484 (itr_idx << GLINT_DYN_CTL_ITR_INDX_S) |
1485 (itr << (GLINT_DYN_CTL_INTERVAL_S - ICE_ITR_GRAN_S));
1486}
1487
1488
1489
1490
1491
1492
1493
1494
1495#define ITR_COUNTDOWN_START 3
1496
1497
1498
1499
1500
1501static void ice_update_ena_itr(struct ice_q_vector *q_vector)
1502{
1503 struct ice_ring_container *tx = &q_vector->tx;
1504 struct ice_ring_container *rx = &q_vector->rx;
1505 struct ice_vsi *vsi = q_vector->vsi;
1506 u32 itr_val;
1507
1508
1509
1510
1511
1512 if (q_vector->itr_countdown == ICE_IN_WB_ON_ITR_MODE) {
1513 itr_val = ice_buildreg_itr(rx->itr_idx, ICE_WB_ON_ITR_USECS);
1514 wr32(&vsi->back->hw, GLINT_DYN_CTL(q_vector->reg_idx), itr_val);
1515
1516 rx->target_itr = rx->itr_setting;
1517
1518 rx->current_itr = ICE_WB_ON_ITR_USECS |
1519 (rx->itr_setting & ICE_ITR_DYNAMIC);
1520
1521 q_vector->itr_countdown = 0;
1522 return;
1523 }
1524
1525
1526 ice_update_itr(q_vector, tx);
1527 ice_update_itr(q_vector, rx);
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537 if (rx->target_itr < rx->current_itr) {
1538
1539 itr_val = ice_buildreg_itr(rx->itr_idx, rx->target_itr);
1540 rx->current_itr = rx->target_itr;
1541 q_vector->itr_countdown = ITR_COUNTDOWN_START;
1542 } else if ((tx->target_itr < tx->current_itr) ||
1543 ((rx->target_itr - rx->current_itr) <
1544 (tx->target_itr - tx->current_itr))) {
1545
1546
1547
1548 itr_val = ice_buildreg_itr(tx->itr_idx, tx->target_itr);
1549 tx->current_itr = tx->target_itr;
1550 q_vector->itr_countdown = ITR_COUNTDOWN_START;
1551 } else if (rx->current_itr != rx->target_itr) {
1552
1553 itr_val = ice_buildreg_itr(rx->itr_idx, rx->target_itr);
1554 rx->current_itr = rx->target_itr;
1555 q_vector->itr_countdown = ITR_COUNTDOWN_START;
1556 } else {
1557
1558 itr_val = ice_buildreg_itr(ICE_ITR_NONE, 0);
1559 if (q_vector->itr_countdown)
1560 q_vector->itr_countdown--;
1561 }
1562
1563 if (!test_bit(__ICE_DOWN, q_vector->vsi->state))
1564 wr32(&q_vector->vsi->back->hw,
1565 GLINT_DYN_CTL(q_vector->reg_idx),
1566 itr_val);
1567}
1568
1569
1570
1571
1572
1573
1574
1575
1576
1577
1578
1579
1580
1581
1582
1583static void ice_set_wb_on_itr(struct ice_q_vector *q_vector)
1584{
1585 struct ice_vsi *vsi = q_vector->vsi;
1586
1587
1588 if (q_vector->itr_countdown == ICE_IN_WB_ON_ITR_MODE)
1589 return;
1590
1591 if (q_vector->num_ring_rx)
1592 wr32(&vsi->back->hw, GLINT_DYN_CTL(q_vector->reg_idx),
1593 ICE_GLINT_DYN_CTL_WB_ON_ITR(ICE_WB_ON_ITR_USECS,
1594 ICE_RX_ITR));
1595
1596 if (q_vector->num_ring_tx)
1597 wr32(&vsi->back->hw, GLINT_DYN_CTL(q_vector->reg_idx),
1598 ICE_GLINT_DYN_CTL_WB_ON_ITR(ICE_WB_ON_ITR_USECS,
1599 ICE_TX_ITR));
1600
1601 q_vector->itr_countdown = ICE_IN_WB_ON_ITR_MODE;
1602}
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613int ice_napi_poll(struct napi_struct *napi, int budget)
1614{
1615 struct ice_q_vector *q_vector =
1616 container_of(napi, struct ice_q_vector, napi);
1617 bool clean_complete = true;
1618 struct ice_ring *ring;
1619 int budget_per_ring;
1620 int work_done = 0;
1621
1622
1623
1624
1625 ice_for_each_ring(ring, q_vector->tx) {
1626 bool wd = ring->xsk_pool ?
1627 ice_clean_tx_irq_zc(ring, budget) :
1628 ice_clean_tx_irq(ring, budget);
1629
1630 if (!wd)
1631 clean_complete = false;
1632 }
1633
1634
1635 if (unlikely(budget <= 0))
1636 return budget;
1637
1638
1639 if (unlikely(q_vector->num_ring_rx > 1))
1640
1641
1642
1643
1644 budget_per_ring = max_t(int, budget / q_vector->num_ring_rx, 1);
1645 else
1646
1647 budget_per_ring = budget;
1648
1649 ice_for_each_ring(ring, q_vector->rx) {
1650 int cleaned;
1651
1652
1653
1654
1655
1656 cleaned = ring->xsk_pool ?
1657 ice_clean_rx_irq_zc(ring, budget_per_ring) :
1658 ice_clean_rx_irq(ring, budget_per_ring);
1659 work_done += cleaned;
1660
1661 if (cleaned >= budget_per_ring)
1662 clean_complete = false;
1663 }
1664
1665
1666 if (!clean_complete)
1667 return budget;
1668
1669
1670
1671
1672 if (likely(napi_complete_done(napi, work_done)))
1673 ice_update_ena_itr(q_vector);
1674 else
1675 ice_set_wb_on_itr(q_vector);
1676
1677 return min_t(int, work_done, budget - 1);
1678}
1679
1680
1681
1682
1683
1684
1685
1686
1687static int __ice_maybe_stop_tx(struct ice_ring *tx_ring, unsigned int size)
1688{
1689 netif_stop_subqueue(tx_ring->netdev, tx_ring->q_index);
1690
1691 smp_mb();
1692
1693
1694 if (likely(ICE_DESC_UNUSED(tx_ring) < size))
1695 return -EBUSY;
1696
1697
1698 netif_start_subqueue(tx_ring->netdev, tx_ring->q_index);
1699 ++tx_ring->tx_stats.restart_q;
1700 return 0;
1701}
1702
1703
1704
1705
1706
1707
1708
1709
1710static int ice_maybe_stop_tx(struct ice_ring *tx_ring, unsigned int size)
1711{
1712 if (likely(ICE_DESC_UNUSED(tx_ring) >= size))
1713 return 0;
1714
1715 return __ice_maybe_stop_tx(tx_ring, size);
1716}
1717
1718
1719
1720
1721
1722
1723
1724
1725
1726
1727
1728static void
1729ice_tx_map(struct ice_ring *tx_ring, struct ice_tx_buf *first,
1730 struct ice_tx_offload_params *off)
1731{
1732 u64 td_offset, td_tag, td_cmd;
1733 u16 i = tx_ring->next_to_use;
1734 unsigned int data_len, size;
1735 struct ice_tx_desc *tx_desc;
1736 struct ice_tx_buf *tx_buf;
1737 struct sk_buff *skb;
1738 skb_frag_t *frag;
1739 dma_addr_t dma;
1740
1741 td_tag = off->td_l2tag1;
1742 td_cmd = off->td_cmd;
1743 td_offset = off->td_offset;
1744 skb = first->skb;
1745
1746 data_len = skb->data_len;
1747 size = skb_headlen(skb);
1748
1749 tx_desc = ICE_TX_DESC(tx_ring, i);
1750
1751 if (first->tx_flags & ICE_TX_FLAGS_HW_VLAN) {
1752 td_cmd |= (u64)ICE_TX_DESC_CMD_IL2TAG1;
1753 td_tag = (first->tx_flags & ICE_TX_FLAGS_VLAN_M) >>
1754 ICE_TX_FLAGS_VLAN_S;
1755 }
1756
1757 dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
1758
1759 tx_buf = first;
1760
1761 for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
1762 unsigned int max_data = ICE_MAX_DATA_PER_TXD_ALIGNED;
1763
1764 if (dma_mapping_error(tx_ring->dev, dma))
1765 goto dma_error;
1766
1767
1768 dma_unmap_len_set(tx_buf, len, size);
1769 dma_unmap_addr_set(tx_buf, dma, dma);
1770
1771
1772 max_data += -dma & (ICE_MAX_READ_REQ_SIZE - 1);
1773 tx_desc->buf_addr = cpu_to_le64(dma);
1774
1775
1776
1777
1778 while (unlikely(size > ICE_MAX_DATA_PER_TXD)) {
1779 tx_desc->cmd_type_offset_bsz =
1780 ice_build_ctob(td_cmd, td_offset, max_data,
1781 td_tag);
1782
1783 tx_desc++;
1784 i++;
1785
1786 if (i == tx_ring->count) {
1787 tx_desc = ICE_TX_DESC(tx_ring, 0);
1788 i = 0;
1789 }
1790
1791 dma += max_data;
1792 size -= max_data;
1793
1794 max_data = ICE_MAX_DATA_PER_TXD_ALIGNED;
1795 tx_desc->buf_addr = cpu_to_le64(dma);
1796 }
1797
1798 if (likely(!data_len))
1799 break;
1800
1801 tx_desc->cmd_type_offset_bsz = ice_build_ctob(td_cmd, td_offset,
1802 size, td_tag);
1803
1804 tx_desc++;
1805 i++;
1806
1807 if (i == tx_ring->count) {
1808 tx_desc = ICE_TX_DESC(tx_ring, 0);
1809 i = 0;
1810 }
1811
1812 size = skb_frag_size(frag);
1813 data_len -= size;
1814
1815 dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size,
1816 DMA_TO_DEVICE);
1817
1818 tx_buf = &tx_ring->tx_buf[i];
1819 }
1820
1821
1822 netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount);
1823
1824
1825 skb_tx_timestamp(first->skb);
1826
1827 i++;
1828 if (i == tx_ring->count)
1829 i = 0;
1830
1831
1832 td_cmd |= (u64)ICE_TXD_LAST_DESC_CMD;
1833 tx_desc->cmd_type_offset_bsz =
1834 ice_build_ctob(td_cmd, td_offset, size, td_tag);
1835
1836
1837
1838
1839
1840
1841
1842 wmb();
1843
1844
1845 first->next_to_watch = tx_desc;
1846
1847 tx_ring->next_to_use = i;
1848
1849 ice_maybe_stop_tx(tx_ring, DESC_NEEDED);
1850
1851
1852 if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more())
1853 writel(i, tx_ring->tail);
1854
1855 return;
1856
1857dma_error:
1858
1859 for (;;) {
1860 tx_buf = &tx_ring->tx_buf[i];
1861 ice_unmap_and_free_tx_buf(tx_ring, tx_buf);
1862 if (tx_buf == first)
1863 break;
1864 if (i == 0)
1865 i = tx_ring->count;
1866 i--;
1867 }
1868
1869 tx_ring->next_to_use = i;
1870}
1871
1872
1873
1874
1875
1876
1877
1878
1879static
1880int ice_tx_csum(struct ice_tx_buf *first, struct ice_tx_offload_params *off)
1881{
1882 u32 l4_len = 0, l3_len = 0, l2_len = 0;
1883 struct sk_buff *skb = first->skb;
1884 union {
1885 struct iphdr *v4;
1886 struct ipv6hdr *v6;
1887 unsigned char *hdr;
1888 } ip;
1889 union {
1890 struct tcphdr *tcp;
1891 unsigned char *hdr;
1892 } l4;
1893 __be16 frag_off, protocol;
1894 unsigned char *exthdr;
1895 u32 offset, cmd = 0;
1896 u8 l4_proto = 0;
1897
1898 if (skb->ip_summed != CHECKSUM_PARTIAL)
1899 return 0;
1900
1901 ip.hdr = skb_network_header(skb);
1902 l4.hdr = skb_transport_header(skb);
1903
1904
1905 l2_len = ip.hdr - skb->data;
1906 offset = (l2_len / 2) << ICE_TX_DESC_LEN_MACLEN_S;
1907
1908 protocol = vlan_get_protocol(skb);
1909
1910 if (protocol == htons(ETH_P_IP))
1911 first->tx_flags |= ICE_TX_FLAGS_IPV4;
1912 else if (protocol == htons(ETH_P_IPV6))
1913 first->tx_flags |= ICE_TX_FLAGS_IPV6;
1914
1915 if (skb->encapsulation) {
1916 bool gso_ena = false;
1917 u32 tunnel = 0;
1918
1919
1920 if (first->tx_flags & ICE_TX_FLAGS_IPV4) {
1921 tunnel |= (first->tx_flags & ICE_TX_FLAGS_TSO) ?
1922 ICE_TX_CTX_EIPT_IPV4 :
1923 ICE_TX_CTX_EIPT_IPV4_NO_CSUM;
1924 l4_proto = ip.v4->protocol;
1925 } else if (first->tx_flags & ICE_TX_FLAGS_IPV6) {
1926 int ret;
1927
1928 tunnel |= ICE_TX_CTX_EIPT_IPV6;
1929 exthdr = ip.hdr + sizeof(*ip.v6);
1930 l4_proto = ip.v6->nexthdr;
1931 ret = ipv6_skip_exthdr(skb, exthdr - skb->data,
1932 &l4_proto, &frag_off);
1933 if (ret < 0)
1934 return -1;
1935 }
1936
1937
1938 switch (l4_proto) {
1939 case IPPROTO_UDP:
1940 tunnel |= ICE_TXD_CTX_UDP_TUNNELING;
1941 first->tx_flags |= ICE_TX_FLAGS_TUNNEL;
1942 break;
1943 case IPPROTO_GRE:
1944 tunnel |= ICE_TXD_CTX_GRE_TUNNELING;
1945 first->tx_flags |= ICE_TX_FLAGS_TUNNEL;
1946 break;
1947 case IPPROTO_IPIP:
1948 case IPPROTO_IPV6:
1949 first->tx_flags |= ICE_TX_FLAGS_TUNNEL;
1950 l4.hdr = skb_inner_network_header(skb);
1951 break;
1952 default:
1953 if (first->tx_flags & ICE_TX_FLAGS_TSO)
1954 return -1;
1955
1956 skb_checksum_help(skb);
1957 return 0;
1958 }
1959
1960
1961 tunnel |= ((l4.hdr - ip.hdr) / 4) <<
1962 ICE_TXD_CTX_QW0_EIPLEN_S;
1963
1964
1965 ip.hdr = skb_inner_network_header(skb);
1966
1967
1968 tunnel |= ((ip.hdr - l4.hdr) / 2) <<
1969 ICE_TXD_CTX_QW0_NATLEN_S;
1970
1971 gso_ena = skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL;
1972
1973 if ((first->tx_flags & ICE_TX_FLAGS_TSO) && !gso_ena &&
1974 (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM))
1975 tunnel |= ICE_TXD_CTX_QW0_L4T_CS_M;
1976
1977
1978 off->cd_tunnel_params |= tunnel;
1979
1980
1981
1982
1983 off->cd_qw1 |= (u64)ICE_TX_DESC_DTYPE_CTX;
1984
1985
1986 l4.hdr = skb_inner_transport_header(skb);
1987 l4_proto = 0;
1988
1989
1990 first->tx_flags &= ~(ICE_TX_FLAGS_IPV4 | ICE_TX_FLAGS_IPV6);
1991 if (ip.v4->version == 4)
1992 first->tx_flags |= ICE_TX_FLAGS_IPV4;
1993 if (ip.v6->version == 6)
1994 first->tx_flags |= ICE_TX_FLAGS_IPV6;
1995 }
1996
1997
1998 if (first->tx_flags & ICE_TX_FLAGS_IPV4) {
1999 l4_proto = ip.v4->protocol;
2000
2001
2002
2003 if (first->tx_flags & ICE_TX_FLAGS_TSO)
2004 cmd |= ICE_TX_DESC_CMD_IIPT_IPV4_CSUM;
2005 else
2006 cmd |= ICE_TX_DESC_CMD_IIPT_IPV4;
2007
2008 } else if (first->tx_flags & ICE_TX_FLAGS_IPV6) {
2009 cmd |= ICE_TX_DESC_CMD_IIPT_IPV6;
2010 exthdr = ip.hdr + sizeof(*ip.v6);
2011 l4_proto = ip.v6->nexthdr;
2012 if (l4.hdr != exthdr)
2013 ipv6_skip_exthdr(skb, exthdr - skb->data, &l4_proto,
2014 &frag_off);
2015 } else {
2016 return -1;
2017 }
2018
2019
2020 l3_len = l4.hdr - ip.hdr;
2021 offset |= (l3_len / 4) << ICE_TX_DESC_LEN_IPLEN_S;
2022
2023
2024 switch (l4_proto) {
2025 case IPPROTO_TCP:
2026
2027 cmd |= ICE_TX_DESC_CMD_L4T_EOFT_TCP;
2028 l4_len = l4.tcp->doff;
2029 offset |= l4_len << ICE_TX_DESC_LEN_L4_LEN_S;
2030 break;
2031 case IPPROTO_UDP:
2032
2033 cmd |= ICE_TX_DESC_CMD_L4T_EOFT_UDP;
2034 l4_len = (sizeof(struct udphdr) >> 2);
2035 offset |= l4_len << ICE_TX_DESC_LEN_L4_LEN_S;
2036 break;
2037 case IPPROTO_SCTP:
2038
2039 cmd |= ICE_TX_DESC_CMD_L4T_EOFT_SCTP;
2040 l4_len = sizeof(struct sctphdr) >> 2;
2041 offset |= l4_len << ICE_TX_DESC_LEN_L4_LEN_S;
2042 break;
2043
2044 default:
2045 if (first->tx_flags & ICE_TX_FLAGS_TSO)
2046 return -1;
2047 skb_checksum_help(skb);
2048 return 0;
2049 }
2050
2051 off->td_cmd |= cmd;
2052 off->td_offset |= offset;
2053 return 1;
2054}
2055
2056
2057
2058
2059
2060
2061
2062
2063
2064static void
2065ice_tx_prepare_vlan_flags(struct ice_ring *tx_ring, struct ice_tx_buf *first)
2066{
2067 struct sk_buff *skb = first->skb;
2068
2069
2070 if (!skb_vlan_tag_present(skb) && eth_type_vlan(skb->protocol))
2071 return;
2072
2073
2074
2075
2076 if (skb_vlan_tag_present(skb)) {
2077 first->tx_flags |= skb_vlan_tag_get(skb) << ICE_TX_FLAGS_VLAN_S;
2078 first->tx_flags |= ICE_TX_FLAGS_HW_VLAN;
2079 }
2080
2081 ice_tx_prepare_vlan_flags_dcb(tx_ring, first);
2082}
2083
2084
2085
2086
2087
2088
2089
2090
2091static
2092int ice_tso(struct ice_tx_buf *first, struct ice_tx_offload_params *off)
2093{
2094 struct sk_buff *skb = first->skb;
2095 union {
2096 struct iphdr *v4;
2097 struct ipv6hdr *v6;
2098 unsigned char *hdr;
2099 } ip;
2100 union {
2101 struct tcphdr *tcp;
2102 struct udphdr *udp;
2103 unsigned char *hdr;
2104 } l4;
2105 u64 cd_mss, cd_tso_len;
2106 u32 paylen;
2107 u8 l4_start;
2108 int err;
2109
2110 if (skb->ip_summed != CHECKSUM_PARTIAL)
2111 return 0;
2112
2113 if (!skb_is_gso(skb))
2114 return 0;
2115
2116 err = skb_cow_head(skb, 0);
2117 if (err < 0)
2118 return err;
2119
2120
2121 ip.hdr = skb_network_header(skb);
2122 l4.hdr = skb_transport_header(skb);
2123
2124
2125 if (ip.v4->version == 4) {
2126 ip.v4->tot_len = 0;
2127 ip.v4->check = 0;
2128 } else {
2129 ip.v6->payload_len = 0;
2130 }
2131
2132 if (skb_shinfo(skb)->gso_type & (SKB_GSO_GRE |
2133 SKB_GSO_GRE_CSUM |
2134 SKB_GSO_IPXIP4 |
2135 SKB_GSO_IPXIP6 |
2136 SKB_GSO_UDP_TUNNEL |
2137 SKB_GSO_UDP_TUNNEL_CSUM)) {
2138 if (!(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) &&
2139 (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM)) {
2140 l4.udp->len = 0;
2141
2142
2143 l4_start = (u8)(l4.hdr - skb->data);
2144
2145
2146 paylen = skb->len - l4_start;
2147 csum_replace_by_diff(&l4.udp->check,
2148 (__force __wsum)htonl(paylen));
2149 }
2150
2151
2152
2153
2154 ip.hdr = skb_inner_network_header(skb);
2155 l4.hdr = skb_inner_transport_header(skb);
2156
2157
2158 if (ip.v4->version == 4) {
2159 ip.v4->tot_len = 0;
2160 ip.v4->check = 0;
2161 } else {
2162 ip.v6->payload_len = 0;
2163 }
2164 }
2165
2166
2167 l4_start = (u8)(l4.hdr - skb->data);
2168
2169
2170 paylen = skb->len - l4_start;
2171
2172 if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
2173 csum_replace_by_diff(&l4.udp->check,
2174 (__force __wsum)htonl(paylen));
2175
2176 off->header_len = (u8)sizeof(l4.udp) + l4_start;
2177 } else {
2178 csum_replace_by_diff(&l4.tcp->check,
2179 (__force __wsum)htonl(paylen));
2180
2181 off->header_len = (u8)((l4.tcp->doff * 4) + l4_start);
2182 }
2183
2184
2185 first->gso_segs = skb_shinfo(skb)->gso_segs;
2186 first->bytecount += (first->gso_segs - 1) * off->header_len;
2187
2188 cd_tso_len = skb->len - off->header_len;
2189 cd_mss = skb_shinfo(skb)->gso_size;
2190
2191
2192 off->cd_qw1 |= (u64)(ICE_TX_DESC_DTYPE_CTX |
2193 (ICE_TX_CTX_DESC_TSO << ICE_TXD_CTX_QW1_CMD_S) |
2194 (cd_tso_len << ICE_TXD_CTX_QW1_TSO_LEN_S) |
2195 (cd_mss << ICE_TXD_CTX_QW1_MSS_S));
2196 first->tx_flags |= ICE_TX_FLAGS_TSO;
2197 return 1;
2198}
2199
2200
2201
2202
2203
2204
2205
2206
2207
2208
2209
2210
2211
2212
2213
2214
2215
2216
2217
2218
2219
2220
2221
2222
2223
2224
2225
2226
2227
2228static unsigned int ice_txd_use_count(unsigned int size)
2229{
2230 return ((size * 85) >> 20) + ICE_DESCS_FOR_SKB_DATA_PTR;
2231}
2232
2233
2234
2235
2236
2237
2238
2239static unsigned int ice_xmit_desc_count(struct sk_buff *skb)
2240{
2241 const skb_frag_t *frag = &skb_shinfo(skb)->frags[0];
2242 unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
2243 unsigned int count = 0, size = skb_headlen(skb);
2244
2245 for (;;) {
2246 count += ice_txd_use_count(size);
2247
2248 if (!nr_frags--)
2249 break;
2250
2251 size = skb_frag_size(frag++);
2252 }
2253
2254 return count;
2255}
2256
2257
2258
2259
2260
2261
2262
2263
2264
2265
2266
2267
2268
2269
2270static bool __ice_chk_linearize(struct sk_buff *skb)
2271{
2272 const skb_frag_t *frag, *stale;
2273 int nr_frags, sum;
2274
2275
2276 nr_frags = skb_shinfo(skb)->nr_frags;
2277 if (nr_frags < (ICE_MAX_BUF_TXD - 1))
2278 return false;
2279
2280
2281
2282
2283 nr_frags -= ICE_MAX_BUF_TXD - 2;
2284 frag = &skb_shinfo(skb)->frags[0];
2285
2286
2287
2288
2289
2290
2291
2292 sum = 1 - skb_shinfo(skb)->gso_size;
2293
2294
2295 sum += skb_frag_size(frag++);
2296 sum += skb_frag_size(frag++);
2297 sum += skb_frag_size(frag++);
2298 sum += skb_frag_size(frag++);
2299 sum += skb_frag_size(frag++);
2300
2301
2302
2303
2304 for (stale = &skb_shinfo(skb)->frags[0];; stale++) {
2305 int stale_size = skb_frag_size(stale);
2306
2307 sum += skb_frag_size(frag++);
2308
2309
2310
2311
2312
2313
2314
2315 if (stale_size > ICE_MAX_DATA_PER_TXD) {
2316 int align_pad = -(skb_frag_off(stale)) &
2317 (ICE_MAX_READ_REQ_SIZE - 1);
2318
2319 sum -= align_pad;
2320 stale_size -= align_pad;
2321
2322 do {
2323 sum -= ICE_MAX_DATA_PER_TXD_ALIGNED;
2324 stale_size -= ICE_MAX_DATA_PER_TXD_ALIGNED;
2325 } while (stale_size > ICE_MAX_DATA_PER_TXD);
2326 }
2327
2328
2329 if (sum < 0)
2330 return true;
2331
2332 if (!nr_frags--)
2333 break;
2334
2335 sum -= stale_size;
2336 }
2337
2338 return false;
2339}
2340
2341
2342
2343
2344
2345
2346
2347
2348
2349
2350static bool ice_chk_linearize(struct sk_buff *skb, unsigned int count)
2351{
2352
2353 if (likely(count < ICE_MAX_BUF_TXD))
2354 return false;
2355
2356 if (skb_is_gso(skb))
2357 return __ice_chk_linearize(skb);
2358
2359
2360 return count != ICE_MAX_BUF_TXD;
2361}
2362
2363
2364
2365
2366
2367
2368
2369
2370static netdev_tx_t
2371ice_xmit_frame_ring(struct sk_buff *skb, struct ice_ring *tx_ring)
2372{
2373 struct ice_tx_offload_params offload = { 0 };
2374 struct ice_vsi *vsi = tx_ring->vsi;
2375 struct ice_tx_buf *first;
2376 unsigned int count;
2377 int tso, csum;
2378
2379 count = ice_xmit_desc_count(skb);
2380 if (ice_chk_linearize(skb, count)) {
2381 if (__skb_linearize(skb))
2382 goto out_drop;
2383 count = ice_txd_use_count(skb->len);
2384 tx_ring->tx_stats.tx_linearize++;
2385 }
2386
2387
2388
2389
2390
2391
2392
2393 if (ice_maybe_stop_tx(tx_ring, count + ICE_DESCS_PER_CACHE_LINE +
2394 ICE_DESCS_FOR_CTX_DESC)) {
2395 tx_ring->tx_stats.tx_busy++;
2396 return NETDEV_TX_BUSY;
2397 }
2398
2399 offload.tx_ring = tx_ring;
2400
2401
2402 first = &tx_ring->tx_buf[tx_ring->next_to_use];
2403 first->skb = skb;
2404 first->bytecount = max_t(unsigned int, skb->len, ETH_ZLEN);
2405 first->gso_segs = 1;
2406 first->tx_flags = 0;
2407
2408
2409 ice_tx_prepare_vlan_flags(tx_ring, first);
2410
2411
2412 tso = ice_tso(first, &offload);
2413 if (tso < 0)
2414 goto out_drop;
2415
2416
2417 csum = ice_tx_csum(first, &offload);
2418 if (csum < 0)
2419 goto out_drop;
2420
2421
2422 if (unlikely(skb->priority == TC_PRIO_CONTROL &&
2423 vsi->type == ICE_VSI_PF &&
2424 vsi->port_info->is_sw_lldp))
2425 offload.cd_qw1 |= (u64)(ICE_TX_DESC_DTYPE_CTX |
2426 ICE_TX_CTX_DESC_SWTCH_UPLINK <<
2427 ICE_TXD_CTX_QW1_CMD_S);
2428
2429 if (offload.cd_qw1 & ICE_TX_DESC_DTYPE_CTX) {
2430 struct ice_tx_ctx_desc *cdesc;
2431 u16 i = tx_ring->next_to_use;
2432
2433
2434 cdesc = ICE_TX_CTX_DESC(tx_ring, i);
2435 i++;
2436 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
2437
2438
2439 cdesc->tunneling_params = cpu_to_le32(offload.cd_tunnel_params);
2440 cdesc->l2tag2 = cpu_to_le16(offload.cd_l2tag2);
2441 cdesc->rsvd = cpu_to_le16(0);
2442 cdesc->qw1 = cpu_to_le64(offload.cd_qw1);
2443 }
2444
2445 ice_tx_map(tx_ring, first, &offload);
2446 return NETDEV_TX_OK;
2447
2448out_drop:
2449 dev_kfree_skb_any(skb);
2450 return NETDEV_TX_OK;
2451}
2452
2453
2454
2455
2456
2457
2458
2459
2460netdev_tx_t ice_start_xmit(struct sk_buff *skb, struct net_device *netdev)
2461{
2462 struct ice_netdev_priv *np = netdev_priv(netdev);
2463 struct ice_vsi *vsi = np->vsi;
2464 struct ice_ring *tx_ring;
2465
2466 tx_ring = vsi->tx_rings[skb->queue_mapping];
2467
2468
2469
2470
2471 if (skb_put_padto(skb, ICE_MIN_TX_LEN))
2472 return NETDEV_TX_OK;
2473
2474 return ice_xmit_frame_ring(skb, tx_ring);
2475}
2476
2477
2478
2479
2480
2481void ice_clean_ctrl_tx_irq(struct ice_ring *tx_ring)
2482{
2483 struct ice_vsi *vsi = tx_ring->vsi;
2484 s16 i = tx_ring->next_to_clean;
2485 int budget = ICE_DFLT_IRQ_WORK;
2486 struct ice_tx_desc *tx_desc;
2487 struct ice_tx_buf *tx_buf;
2488
2489 tx_buf = &tx_ring->tx_buf[i];
2490 tx_desc = ICE_TX_DESC(tx_ring, i);
2491 i -= tx_ring->count;
2492
2493 do {
2494 struct ice_tx_desc *eop_desc = tx_buf->next_to_watch;
2495
2496
2497 if (!eop_desc)
2498 break;
2499
2500
2501 smp_rmb();
2502
2503
2504 if (!(eop_desc->cmd_type_offset_bsz &
2505 cpu_to_le64(ICE_TX_DESC_DTYPE_DESC_DONE)))
2506 break;
2507
2508
2509 tx_buf->next_to_watch = NULL;
2510 tx_desc->buf_addr = 0;
2511 tx_desc->cmd_type_offset_bsz = 0;
2512
2513
2514 tx_buf++;
2515 tx_desc++;
2516 i++;
2517 if (unlikely(!i)) {
2518 i -= tx_ring->count;
2519 tx_buf = tx_ring->tx_buf;
2520 tx_desc = ICE_TX_DESC(tx_ring, 0);
2521 }
2522
2523
2524 if (dma_unmap_len(tx_buf, len))
2525 dma_unmap_single(tx_ring->dev,
2526 dma_unmap_addr(tx_buf, dma),
2527 dma_unmap_len(tx_buf, len),
2528 DMA_TO_DEVICE);
2529 if (tx_buf->tx_flags & ICE_TX_FLAGS_DUMMY_PKT)
2530 devm_kfree(tx_ring->dev, tx_buf->raw_buf);
2531
2532
2533 tx_buf->raw_buf = NULL;
2534 tx_buf->tx_flags = 0;
2535 tx_buf->next_to_watch = NULL;
2536 dma_unmap_len_set(tx_buf, len, 0);
2537 tx_desc->buf_addr = 0;
2538 tx_desc->cmd_type_offset_bsz = 0;
2539
2540
2541 tx_buf++;
2542 tx_desc++;
2543 i++;
2544 if (unlikely(!i)) {
2545 i -= tx_ring->count;
2546 tx_buf = tx_ring->tx_buf;
2547 tx_desc = ICE_TX_DESC(tx_ring, 0);
2548 }
2549
2550 budget--;
2551 } while (likely(budget));
2552
2553 i += tx_ring->count;
2554 tx_ring->next_to_clean = i;
2555
2556
2557 ice_irq_dynamic_ena(&vsi->back->hw, vsi, vsi->q_vectors[0]);
2558}
2559