1
2
3
4
5
6#include <linux/mm.h>
7#include <linux/netdevice.h>
8#include <linux/prefetch.h>
9#include <linux/bpf_trace.h>
10#include <net/dsfield.h>
11#include <net/mpls.h>
12#include <net/xdp.h>
13#include "ice_txrx_lib.h"
14#include "ice_lib.h"
15#include "ice.h"
16#include "ice_trace.h"
17#include "ice_dcb_lib.h"
18#include "ice_xsk.h"
19#include "ice_eswitch.h"
20
21#define ICE_RX_HDR_SIZE 256
22
23#define FDIR_DESC_RXDID 0x40
24#define ICE_FDIR_CLEAN_DELAY 10
25
26
27
28
29
30
31
32int
33ice_prgm_fdir_fltr(struct ice_vsi *vsi, struct ice_fltr_desc *fdir_desc,
34 u8 *raw_packet)
35{
36 struct ice_tx_buf *tx_buf, *first;
37 struct ice_fltr_desc *f_desc;
38 struct ice_tx_desc *tx_desc;
39 struct ice_tx_ring *tx_ring;
40 struct device *dev;
41 dma_addr_t dma;
42 u32 td_cmd;
43 u16 i;
44
45
46 if (!vsi)
47 return -ENOENT;
48 tx_ring = vsi->tx_rings[0];
49 if (!tx_ring || !tx_ring->desc)
50 return -ENOENT;
51 dev = tx_ring->dev;
52
53
54 for (i = ICE_FDIR_CLEAN_DELAY; ICE_DESC_UNUSED(tx_ring) < 2; i--) {
55 if (!i)
56 return -EAGAIN;
57 msleep_interruptible(1);
58 }
59
60 dma = dma_map_single(dev, raw_packet, ICE_FDIR_MAX_RAW_PKT_SIZE,
61 DMA_TO_DEVICE);
62
63 if (dma_mapping_error(dev, dma))
64 return -EINVAL;
65
66
67 i = tx_ring->next_to_use;
68 first = &tx_ring->tx_buf[i];
69 f_desc = ICE_TX_FDIRDESC(tx_ring, i);
70 memcpy(f_desc, fdir_desc, sizeof(*f_desc));
71
72 i++;
73 i = (i < tx_ring->count) ? i : 0;
74 tx_desc = ICE_TX_DESC(tx_ring, i);
75 tx_buf = &tx_ring->tx_buf[i];
76
77 i++;
78 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
79
80 memset(tx_buf, 0, sizeof(*tx_buf));
81 dma_unmap_len_set(tx_buf, len, ICE_FDIR_MAX_RAW_PKT_SIZE);
82 dma_unmap_addr_set(tx_buf, dma, dma);
83
84 tx_desc->buf_addr = cpu_to_le64(dma);
85 td_cmd = ICE_TXD_LAST_DESC_CMD | ICE_TX_DESC_CMD_DUMMY |
86 ICE_TX_DESC_CMD_RE;
87
88 tx_buf->tx_flags = ICE_TX_FLAGS_DUMMY_PKT;
89 tx_buf->raw_buf = raw_packet;
90
91 tx_desc->cmd_type_offset_bsz =
92 ice_build_ctob(td_cmd, 0, ICE_FDIR_MAX_RAW_PKT_SIZE, 0);
93
94
95
96
97 wmb();
98
99
100 first->next_to_watch = tx_desc;
101
102 writel(tx_ring->next_to_use, tx_ring->tail);
103
104 return 0;
105}
106
107
108
109
110
111
112static void
113ice_unmap_and_free_tx_buf(struct ice_tx_ring *ring, struct ice_tx_buf *tx_buf)
114{
115 if (tx_buf->skb) {
116 if (tx_buf->tx_flags & ICE_TX_FLAGS_DUMMY_PKT)
117 devm_kfree(ring->dev, tx_buf->raw_buf);
118 else if (ice_ring_is_xdp(ring))
119 page_frag_free(tx_buf->raw_buf);
120 else
121 dev_kfree_skb_any(tx_buf->skb);
122 if (dma_unmap_len(tx_buf, len))
123 dma_unmap_single(ring->dev,
124 dma_unmap_addr(tx_buf, dma),
125 dma_unmap_len(tx_buf, len),
126 DMA_TO_DEVICE);
127 } else if (dma_unmap_len(tx_buf, len)) {
128 dma_unmap_page(ring->dev,
129 dma_unmap_addr(tx_buf, dma),
130 dma_unmap_len(tx_buf, len),
131 DMA_TO_DEVICE);
132 }
133
134 tx_buf->next_to_watch = NULL;
135 tx_buf->skb = NULL;
136 dma_unmap_len_set(tx_buf, len, 0);
137
138}
139
140static struct netdev_queue *txring_txq(const struct ice_tx_ring *ring)
141{
142 return netdev_get_tx_queue(ring->netdev, ring->q_index);
143}
144
145
146
147
148
149void ice_clean_tx_ring(struct ice_tx_ring *tx_ring)
150{
151 u32 size;
152 u16 i;
153
154 if (ice_ring_is_xdp(tx_ring) && tx_ring->xsk_pool) {
155 ice_xsk_clean_xdp_ring(tx_ring);
156 goto tx_skip_free;
157 }
158
159
160 if (!tx_ring->tx_buf)
161 return;
162
163
164 for (i = 0; i < tx_ring->count; i++)
165 ice_unmap_and_free_tx_buf(tx_ring, &tx_ring->tx_buf[i]);
166
167tx_skip_free:
168 memset(tx_ring->tx_buf, 0, sizeof(*tx_ring->tx_buf) * tx_ring->count);
169
170 size = ALIGN(tx_ring->count * sizeof(struct ice_tx_desc),
171 PAGE_SIZE);
172
173 memset(tx_ring->desc, 0, size);
174
175 tx_ring->next_to_use = 0;
176 tx_ring->next_to_clean = 0;
177 tx_ring->next_dd = ICE_RING_QUARTER(tx_ring) - 1;
178 tx_ring->next_rs = ICE_RING_QUARTER(tx_ring) - 1;
179
180 if (!tx_ring->netdev)
181 return;
182
183
184 netdev_tx_reset_queue(txring_txq(tx_ring));
185}
186
187
188
189
190
191
192
193void ice_free_tx_ring(struct ice_tx_ring *tx_ring)
194{
195 u32 size;
196
197 ice_clean_tx_ring(tx_ring);
198 devm_kfree(tx_ring->dev, tx_ring->tx_buf);
199 tx_ring->tx_buf = NULL;
200
201 if (tx_ring->desc) {
202 size = ALIGN(tx_ring->count * sizeof(struct ice_tx_desc),
203 PAGE_SIZE);
204 dmam_free_coherent(tx_ring->dev, size,
205 tx_ring->desc, tx_ring->dma);
206 tx_ring->desc = NULL;
207 }
208}
209
210
211
212
213
214
215
216
217static bool ice_clean_tx_irq(struct ice_tx_ring *tx_ring, int napi_budget)
218{
219 unsigned int total_bytes = 0, total_pkts = 0;
220 unsigned int budget = ICE_DFLT_IRQ_WORK;
221 struct ice_vsi *vsi = tx_ring->vsi;
222 s16 i = tx_ring->next_to_clean;
223 struct ice_tx_desc *tx_desc;
224 struct ice_tx_buf *tx_buf;
225
226
227 netdev_txq_bql_complete_prefetchw(txring_txq(tx_ring));
228
229 tx_buf = &tx_ring->tx_buf[i];
230 tx_desc = ICE_TX_DESC(tx_ring, i);
231 i -= tx_ring->count;
232
233 prefetch(&vsi->state);
234
235 do {
236 struct ice_tx_desc *eop_desc = tx_buf->next_to_watch;
237
238
239 if (!eop_desc)
240 break;
241
242
243 prefetchw(&tx_buf->skb->users);
244
245 smp_rmb();
246
247 ice_trace(clean_tx_irq, tx_ring, tx_desc, tx_buf);
248
249 if (!(eop_desc->cmd_type_offset_bsz &
250 cpu_to_le64(ICE_TX_DESC_DTYPE_DESC_DONE)))
251 break;
252
253
254 tx_buf->next_to_watch = NULL;
255
256
257 total_bytes += tx_buf->bytecount;
258 total_pkts += tx_buf->gso_segs;
259
260
261 napi_consume_skb(tx_buf->skb, napi_budget);
262
263
264 dma_unmap_single(tx_ring->dev,
265 dma_unmap_addr(tx_buf, dma),
266 dma_unmap_len(tx_buf, len),
267 DMA_TO_DEVICE);
268
269
270 tx_buf->skb = NULL;
271 dma_unmap_len_set(tx_buf, len, 0);
272
273
274 while (tx_desc != eop_desc) {
275 ice_trace(clean_tx_irq_unmap, tx_ring, tx_desc, tx_buf);
276 tx_buf++;
277 tx_desc++;
278 i++;
279 if (unlikely(!i)) {
280 i -= tx_ring->count;
281 tx_buf = tx_ring->tx_buf;
282 tx_desc = ICE_TX_DESC(tx_ring, 0);
283 }
284
285
286 if (dma_unmap_len(tx_buf, len)) {
287 dma_unmap_page(tx_ring->dev,
288 dma_unmap_addr(tx_buf, dma),
289 dma_unmap_len(tx_buf, len),
290 DMA_TO_DEVICE);
291 dma_unmap_len_set(tx_buf, len, 0);
292 }
293 }
294 ice_trace(clean_tx_irq_unmap_eop, tx_ring, tx_desc, tx_buf);
295
296
297 tx_buf++;
298 tx_desc++;
299 i++;
300 if (unlikely(!i)) {
301 i -= tx_ring->count;
302 tx_buf = tx_ring->tx_buf;
303 tx_desc = ICE_TX_DESC(tx_ring, 0);
304 }
305
306 prefetch(tx_desc);
307
308
309 budget--;
310 } while (likely(budget));
311
312 i += tx_ring->count;
313 tx_ring->next_to_clean = i;
314
315 ice_update_tx_ring_stats(tx_ring, total_pkts, total_bytes);
316 netdev_tx_completed_queue(txring_txq(tx_ring), total_pkts, total_bytes);
317
318#define TX_WAKE_THRESHOLD ((s16)(DESC_NEEDED * 2))
319 if (unlikely(total_pkts && netif_carrier_ok(tx_ring->netdev) &&
320 (ICE_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) {
321
322
323
324 smp_mb();
325 if (netif_tx_queue_stopped(txring_txq(tx_ring)) &&
326 !test_bit(ICE_VSI_DOWN, vsi->state)) {
327 netif_tx_wake_queue(txring_txq(tx_ring));
328 ++tx_ring->tx_stats.restart_q;
329 }
330 }
331
332 return !!budget;
333}
334
335
336
337
338
339
340
341int ice_setup_tx_ring(struct ice_tx_ring *tx_ring)
342{
343 struct device *dev = tx_ring->dev;
344 u32 size;
345
346 if (!dev)
347 return -ENOMEM;
348
349
350 WARN_ON(tx_ring->tx_buf);
351 tx_ring->tx_buf =
352 devm_kcalloc(dev, sizeof(*tx_ring->tx_buf), tx_ring->count,
353 GFP_KERNEL);
354 if (!tx_ring->tx_buf)
355 return -ENOMEM;
356
357
358 size = ALIGN(tx_ring->count * sizeof(struct ice_tx_desc),
359 PAGE_SIZE);
360 tx_ring->desc = dmam_alloc_coherent(dev, size, &tx_ring->dma,
361 GFP_KERNEL);
362 if (!tx_ring->desc) {
363 dev_err(dev, "Unable to allocate memory for the Tx descriptor ring, size=%d\n",
364 size);
365 goto err;
366 }
367
368 tx_ring->next_to_use = 0;
369 tx_ring->next_to_clean = 0;
370 tx_ring->tx_stats.prev_pkt = -1;
371 return 0;
372
373err:
374 devm_kfree(dev, tx_ring->tx_buf);
375 tx_ring->tx_buf = NULL;
376 return -ENOMEM;
377}
378
379
380
381
382
383void ice_clean_rx_ring(struct ice_rx_ring *rx_ring)
384{
385 struct device *dev = rx_ring->dev;
386 u32 size;
387 u16 i;
388
389
390 if (!rx_ring->rx_buf)
391 return;
392
393 if (rx_ring->skb) {
394 dev_kfree_skb(rx_ring->skb);
395 rx_ring->skb = NULL;
396 }
397
398 if (rx_ring->xsk_pool) {
399 ice_xsk_clean_rx_ring(rx_ring);
400 goto rx_skip_free;
401 }
402
403
404 for (i = 0; i < rx_ring->count; i++) {
405 struct ice_rx_buf *rx_buf = &rx_ring->rx_buf[i];
406
407 if (!rx_buf->page)
408 continue;
409
410
411
412
413 dma_sync_single_range_for_cpu(dev, rx_buf->dma,
414 rx_buf->page_offset,
415 rx_ring->rx_buf_len,
416 DMA_FROM_DEVICE);
417
418
419 dma_unmap_page_attrs(dev, rx_buf->dma, ice_rx_pg_size(rx_ring),
420 DMA_FROM_DEVICE, ICE_RX_DMA_ATTR);
421 __page_frag_cache_drain(rx_buf->page, rx_buf->pagecnt_bias);
422
423 rx_buf->page = NULL;
424 rx_buf->page_offset = 0;
425 }
426
427rx_skip_free:
428 if (rx_ring->xsk_pool)
429 memset(rx_ring->xdp_buf, 0, array_size(rx_ring->count, sizeof(*rx_ring->xdp_buf)));
430 else
431 memset(rx_ring->rx_buf, 0, array_size(rx_ring->count, sizeof(*rx_ring->rx_buf)));
432
433
434 size = ALIGN(rx_ring->count * sizeof(union ice_32byte_rx_desc),
435 PAGE_SIZE);
436 memset(rx_ring->desc, 0, size);
437
438 rx_ring->next_to_alloc = 0;
439 rx_ring->next_to_clean = 0;
440 rx_ring->next_to_use = 0;
441}
442
443
444
445
446
447
448
449void ice_free_rx_ring(struct ice_rx_ring *rx_ring)
450{
451 u32 size;
452
453 ice_clean_rx_ring(rx_ring);
454 if (rx_ring->vsi->type == ICE_VSI_PF)
455 if (xdp_rxq_info_is_reg(&rx_ring->xdp_rxq))
456 xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
457 rx_ring->xdp_prog = NULL;
458 if (rx_ring->xsk_pool) {
459 kfree(rx_ring->xdp_buf);
460 rx_ring->xdp_buf = NULL;
461 } else {
462 kfree(rx_ring->rx_buf);
463 rx_ring->rx_buf = NULL;
464 }
465
466 if (rx_ring->desc) {
467 size = ALIGN(rx_ring->count * sizeof(union ice_32byte_rx_desc),
468 PAGE_SIZE);
469 dmam_free_coherent(rx_ring->dev, size,
470 rx_ring->desc, rx_ring->dma);
471 rx_ring->desc = NULL;
472 }
473}
474
475
476
477
478
479
480
481int ice_setup_rx_ring(struct ice_rx_ring *rx_ring)
482{
483 struct device *dev = rx_ring->dev;
484 u32 size;
485
486 if (!dev)
487 return -ENOMEM;
488
489
490 WARN_ON(rx_ring->rx_buf);
491 rx_ring->rx_buf =
492 kcalloc(rx_ring->count, sizeof(*rx_ring->rx_buf), GFP_KERNEL);
493 if (!rx_ring->rx_buf)
494 return -ENOMEM;
495
496
497 size = ALIGN(rx_ring->count * sizeof(union ice_32byte_rx_desc),
498 PAGE_SIZE);
499 rx_ring->desc = dmam_alloc_coherent(dev, size, &rx_ring->dma,
500 GFP_KERNEL);
501 if (!rx_ring->desc) {
502 dev_err(dev, "Unable to allocate memory for the Rx descriptor ring, size=%d\n",
503 size);
504 goto err;
505 }
506
507 rx_ring->next_to_use = 0;
508 rx_ring->next_to_clean = 0;
509
510 if (ice_is_xdp_ena_vsi(rx_ring->vsi))
511 WRITE_ONCE(rx_ring->xdp_prog, rx_ring->vsi->xdp_prog);
512
513 if (rx_ring->vsi->type == ICE_VSI_PF &&
514 !xdp_rxq_info_is_reg(&rx_ring->xdp_rxq))
515 if (xdp_rxq_info_reg(&rx_ring->xdp_rxq, rx_ring->netdev,
516 rx_ring->q_index, rx_ring->q_vector->napi.napi_id))
517 goto err;
518 return 0;
519
520err:
521 kfree(rx_ring->rx_buf);
522 rx_ring->rx_buf = NULL;
523 return -ENOMEM;
524}
525
526static unsigned int
527ice_rx_frame_truesize(struct ice_rx_ring *rx_ring, unsigned int __maybe_unused size)
528{
529 unsigned int truesize;
530
531#if (PAGE_SIZE < 8192)
532 truesize = ice_rx_pg_size(rx_ring) / 2;
533#else
534 truesize = rx_ring->rx_offset ?
535 SKB_DATA_ALIGN(rx_ring->rx_offset + size) +
536 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) :
537 SKB_DATA_ALIGN(size);
538#endif
539 return truesize;
540}
541
542
543
544
545
546
547
548
549
550
551static int
552ice_run_xdp(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp,
553 struct bpf_prog *xdp_prog, struct ice_tx_ring *xdp_ring)
554{
555 int err;
556 u32 act;
557
558 act = bpf_prog_run_xdp(xdp_prog, xdp);
559 switch (act) {
560 case XDP_PASS:
561 return ICE_XDP_PASS;
562 case XDP_TX:
563 if (static_branch_unlikely(&ice_xdp_locking_key))
564 spin_lock(&xdp_ring->tx_lock);
565 err = ice_xmit_xdp_ring(xdp->data, xdp->data_end - xdp->data, xdp_ring);
566 if (static_branch_unlikely(&ice_xdp_locking_key))
567 spin_unlock(&xdp_ring->tx_lock);
568 if (err == ICE_XDP_CONSUMED)
569 goto out_failure;
570 return err;
571 case XDP_REDIRECT:
572 err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog);
573 if (err)
574 goto out_failure;
575 return ICE_XDP_REDIR;
576 default:
577 bpf_warn_invalid_xdp_action(rx_ring->netdev, xdp_prog, act);
578 fallthrough;
579 case XDP_ABORTED:
580out_failure:
581 trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
582 fallthrough;
583 case XDP_DROP:
584 return ICE_XDP_CONSUMED;
585 }
586}
587
588
589
590
591
592
593
594
595
596
597
598
599
600int
601ice_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
602 u32 flags)
603{
604 struct ice_netdev_priv *np = netdev_priv(dev);
605 unsigned int queue_index = smp_processor_id();
606 struct ice_vsi *vsi = np->vsi;
607 struct ice_tx_ring *xdp_ring;
608 int nxmit = 0, i;
609
610 if (test_bit(ICE_VSI_DOWN, vsi->state))
611 return -ENETDOWN;
612
613 if (!ice_is_xdp_ena_vsi(vsi) || queue_index >= vsi->num_xdp_txq)
614 return -ENXIO;
615
616 if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
617 return -EINVAL;
618
619 if (static_branch_unlikely(&ice_xdp_locking_key)) {
620 queue_index %= vsi->num_xdp_txq;
621 xdp_ring = vsi->xdp_rings[queue_index];
622 spin_lock(&xdp_ring->tx_lock);
623 } else {
624 xdp_ring = vsi->xdp_rings[queue_index];
625 }
626
627 for (i = 0; i < n; i++) {
628 struct xdp_frame *xdpf = frames[i];
629 int err;
630
631 err = ice_xmit_xdp_ring(xdpf->data, xdpf->len, xdp_ring);
632 if (err != ICE_XDP_TX)
633 break;
634 nxmit++;
635 }
636
637 if (unlikely(flags & XDP_XMIT_FLUSH))
638 ice_xdp_ring_update_tail(xdp_ring);
639
640 if (static_branch_unlikely(&ice_xdp_locking_key))
641 spin_unlock(&xdp_ring->tx_lock);
642
643 return nxmit;
644}
645
646
647
648
649
650
651
652
653
654static bool
655ice_alloc_mapped_page(struct ice_rx_ring *rx_ring, struct ice_rx_buf *bi)
656{
657 struct page *page = bi->page;
658 dma_addr_t dma;
659
660
661 if (likely(page))
662 return true;
663
664
665 page = dev_alloc_pages(ice_rx_pg_order(rx_ring));
666 if (unlikely(!page)) {
667 rx_ring->rx_stats.alloc_page_failed++;
668 return false;
669 }
670
671
672 dma = dma_map_page_attrs(rx_ring->dev, page, 0, ice_rx_pg_size(rx_ring),
673 DMA_FROM_DEVICE, ICE_RX_DMA_ATTR);
674
675
676
677
678 if (dma_mapping_error(rx_ring->dev, dma)) {
679 __free_pages(page, ice_rx_pg_order(rx_ring));
680 rx_ring->rx_stats.alloc_page_failed++;
681 return false;
682 }
683
684 bi->dma = dma;
685 bi->page = page;
686 bi->page_offset = rx_ring->rx_offset;
687 page_ref_add(page, USHRT_MAX - 1);
688 bi->pagecnt_bias = USHRT_MAX;
689
690 return true;
691}
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706bool ice_alloc_rx_bufs(struct ice_rx_ring *rx_ring, u16 cleaned_count)
707{
708 union ice_32b_rx_flex_desc *rx_desc;
709 u16 ntu = rx_ring->next_to_use;
710 struct ice_rx_buf *bi;
711
712
713 if ((!rx_ring->netdev && rx_ring->vsi->type != ICE_VSI_CTRL) ||
714 !cleaned_count)
715 return false;
716
717
718 rx_desc = ICE_RX_DESC(rx_ring, ntu);
719 bi = &rx_ring->rx_buf[ntu];
720
721 do {
722
723 if (!ice_alloc_mapped_page(rx_ring, bi))
724 break;
725
726
727 dma_sync_single_range_for_device(rx_ring->dev, bi->dma,
728 bi->page_offset,
729 rx_ring->rx_buf_len,
730 DMA_FROM_DEVICE);
731
732
733
734
735 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset);
736
737 rx_desc++;
738 bi++;
739 ntu++;
740 if (unlikely(ntu == rx_ring->count)) {
741 rx_desc = ICE_RX_DESC(rx_ring, 0);
742 bi = rx_ring->rx_buf;
743 ntu = 0;
744 }
745
746
747 rx_desc->wb.status_error0 = 0;
748
749 cleaned_count--;
750 } while (cleaned_count);
751
752 if (rx_ring->next_to_use != ntu)
753 ice_release_rx_desc(rx_ring, ntu);
754
755 return !!cleaned_count;
756}
757
758
759
760
761
762
763
764
765
766
767
768static void
769ice_rx_buf_adjust_pg_offset(struct ice_rx_buf *rx_buf, unsigned int size)
770{
771#if (PAGE_SIZE < 8192)
772
773 rx_buf->page_offset ^= size;
774#else
775
776 rx_buf->page_offset += size;
777#endif
778}
779
780
781
782
783
784
785
786
787
788
789
790static bool
791ice_can_reuse_rx_page(struct ice_rx_buf *rx_buf, int rx_buf_pgcnt)
792{
793 unsigned int pagecnt_bias = rx_buf->pagecnt_bias;
794 struct page *page = rx_buf->page;
795
796
797 if (!dev_page_is_reusable(page))
798 return false;
799
800#if (PAGE_SIZE < 8192)
801
802 if (unlikely((rx_buf_pgcnt - pagecnt_bias) > 1))
803 return false;
804#else
805#define ICE_LAST_OFFSET \
806 (SKB_WITH_OVERHEAD(PAGE_SIZE) - ICE_RXBUF_2048)
807 if (rx_buf->page_offset > ICE_LAST_OFFSET)
808 return false;
809#endif
810
811
812
813
814
815 if (unlikely(pagecnt_bias == 1)) {
816 page_ref_add(page, USHRT_MAX - 1);
817 rx_buf->pagecnt_bias = USHRT_MAX;
818 }
819
820 return true;
821}
822
823
824
825
826
827
828
829
830
831
832
833
834static void
835ice_add_rx_frag(struct ice_rx_ring *rx_ring, struct ice_rx_buf *rx_buf,
836 struct sk_buff *skb, unsigned int size)
837{
838#if (PAGE_SIZE >= 8192)
839 unsigned int truesize = SKB_DATA_ALIGN(size + rx_ring->rx_offset);
840#else
841 unsigned int truesize = ice_rx_pg_size(rx_ring) / 2;
842#endif
843
844 if (!size)
845 return;
846 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buf->page,
847 rx_buf->page_offset, size, truesize);
848
849
850 ice_rx_buf_adjust_pg_offset(rx_buf, truesize);
851}
852
853
854
855
856
857
858
859
860static void
861ice_reuse_rx_page(struct ice_rx_ring *rx_ring, struct ice_rx_buf *old_buf)
862{
863 u16 nta = rx_ring->next_to_alloc;
864 struct ice_rx_buf *new_buf;
865
866 new_buf = &rx_ring->rx_buf[nta];
867
868
869 nta++;
870 rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
871
872
873
874
875
876 new_buf->dma = old_buf->dma;
877 new_buf->page = old_buf->page;
878 new_buf->page_offset = old_buf->page_offset;
879 new_buf->pagecnt_bias = old_buf->pagecnt_bias;
880}
881
882
883
884
885
886
887
888
889
890
891static struct ice_rx_buf *
892ice_get_rx_buf(struct ice_rx_ring *rx_ring, const unsigned int size,
893 int *rx_buf_pgcnt)
894{
895 struct ice_rx_buf *rx_buf;
896
897 rx_buf = &rx_ring->rx_buf[rx_ring->next_to_clean];
898 *rx_buf_pgcnt =
899#if (PAGE_SIZE < 8192)
900 page_count(rx_buf->page);
901#else
902 0;
903#endif
904 prefetchw(rx_buf->page);
905
906 if (!size)
907 return rx_buf;
908
909 dma_sync_single_range_for_cpu(rx_ring->dev, rx_buf->dma,
910 rx_buf->page_offset, size,
911 DMA_FROM_DEVICE);
912
913
914 rx_buf->pagecnt_bias--;
915
916 return rx_buf;
917}
918
919
920
921
922
923
924
925
926
927
928static struct sk_buff *
929ice_build_skb(struct ice_rx_ring *rx_ring, struct ice_rx_buf *rx_buf,
930 struct xdp_buff *xdp)
931{
932 u8 metasize = xdp->data - xdp->data_meta;
933#if (PAGE_SIZE < 8192)
934 unsigned int truesize = ice_rx_pg_size(rx_ring) / 2;
935#else
936 unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
937 SKB_DATA_ALIGN(xdp->data_end -
938 xdp->data_hard_start);
939#endif
940 struct sk_buff *skb;
941
942
943
944
945
946
947 net_prefetch(xdp->data_meta);
948
949 skb = napi_build_skb(xdp->data_hard_start, truesize);
950 if (unlikely(!skb))
951 return NULL;
952
953
954
955
956 skb_record_rx_queue(skb, rx_ring->q_index);
957
958
959 skb_reserve(skb, xdp->data - xdp->data_hard_start);
960 __skb_put(skb, xdp->data_end - xdp->data);
961 if (metasize)
962 skb_metadata_set(skb, metasize);
963
964
965 ice_rx_buf_adjust_pg_offset(rx_buf, truesize);
966
967 return skb;
968}
969
970
971
972
973
974
975
976
977
978
979
980static struct sk_buff *
981ice_construct_skb(struct ice_rx_ring *rx_ring, struct ice_rx_buf *rx_buf,
982 struct xdp_buff *xdp)
983{
984 unsigned int metasize = xdp->data - xdp->data_meta;
985 unsigned int size = xdp->data_end - xdp->data;
986 unsigned int headlen;
987 struct sk_buff *skb;
988
989
990 net_prefetch(xdp->data_meta);
991
992
993 skb = __napi_alloc_skb(&rx_ring->q_vector->napi,
994 ICE_RX_HDR_SIZE + metasize,
995 GFP_ATOMIC | __GFP_NOWARN);
996 if (unlikely(!skb))
997 return NULL;
998
999 skb_record_rx_queue(skb, rx_ring->q_index);
1000
1001 headlen = size;
1002 if (headlen > ICE_RX_HDR_SIZE)
1003 headlen = eth_get_headlen(skb->dev, xdp->data, ICE_RX_HDR_SIZE);
1004
1005
1006 memcpy(__skb_put(skb, headlen + metasize), xdp->data_meta,
1007 ALIGN(headlen + metasize, sizeof(long)));
1008
1009 if (metasize) {
1010 skb_metadata_set(skb, metasize);
1011 __skb_pull(skb, metasize);
1012 }
1013
1014
1015 size -= headlen;
1016 if (size) {
1017#if (PAGE_SIZE >= 8192)
1018 unsigned int truesize = SKB_DATA_ALIGN(size);
1019#else
1020 unsigned int truesize = ice_rx_pg_size(rx_ring) / 2;
1021#endif
1022 skb_add_rx_frag(skb, 0, rx_buf->page,
1023 rx_buf->page_offset + headlen, size, truesize);
1024
1025 ice_rx_buf_adjust_pg_offset(rx_buf, truesize);
1026 } else {
1027
1028
1029
1030
1031 rx_buf->pagecnt_bias++;
1032 }
1033
1034 return skb;
1035}
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047static void
1048ice_put_rx_buf(struct ice_rx_ring *rx_ring, struct ice_rx_buf *rx_buf,
1049 int rx_buf_pgcnt)
1050{
1051 u16 ntc = rx_ring->next_to_clean + 1;
1052
1053
1054 ntc = (ntc < rx_ring->count) ? ntc : 0;
1055 rx_ring->next_to_clean = ntc;
1056
1057 if (!rx_buf)
1058 return;
1059
1060 if (ice_can_reuse_rx_page(rx_buf, rx_buf_pgcnt)) {
1061
1062 ice_reuse_rx_page(rx_ring, rx_buf);
1063 } else {
1064
1065 dma_unmap_page_attrs(rx_ring->dev, rx_buf->dma,
1066 ice_rx_pg_size(rx_ring), DMA_FROM_DEVICE,
1067 ICE_RX_DMA_ATTR);
1068 __page_frag_cache_drain(rx_buf->page, rx_buf->pagecnt_bias);
1069 }
1070
1071
1072 rx_buf->page = NULL;
1073}
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083static bool
1084ice_is_non_eop(struct ice_rx_ring *rx_ring, union ice_32b_rx_flex_desc *rx_desc)
1085{
1086
1087#define ICE_RXD_EOF BIT(ICE_RX_FLEX_DESC_STATUS0_EOF_S)
1088 if (likely(ice_test_staterr(rx_desc->wb.status_error0, ICE_RXD_EOF)))
1089 return false;
1090
1091 rx_ring->rx_stats.non_eop_descs++;
1092
1093 return true;
1094}
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget)
1109{
1110 unsigned int total_rx_bytes = 0, total_rx_pkts = 0, frame_sz = 0;
1111 u16 cleaned_count = ICE_DESC_UNUSED(rx_ring);
1112 unsigned int offset = rx_ring->rx_offset;
1113 struct ice_tx_ring *xdp_ring = NULL;
1114 unsigned int xdp_res, xdp_xmit = 0;
1115 struct sk_buff *skb = rx_ring->skb;
1116 struct bpf_prog *xdp_prog = NULL;
1117 struct xdp_buff xdp;
1118 bool failure;
1119
1120
1121#if (PAGE_SIZE < 8192)
1122 frame_sz = ice_rx_frame_truesize(rx_ring, 0);
1123#endif
1124 xdp_init_buff(&xdp, frame_sz, &rx_ring->xdp_rxq);
1125
1126 xdp_prog = READ_ONCE(rx_ring->xdp_prog);
1127 if (xdp_prog)
1128 xdp_ring = rx_ring->xdp_ring;
1129
1130
1131 while (likely(total_rx_pkts < (unsigned int)budget)) {
1132 union ice_32b_rx_flex_desc *rx_desc;
1133 struct ice_rx_buf *rx_buf;
1134 unsigned char *hard_start;
1135 unsigned int size;
1136 u16 stat_err_bits;
1137 int rx_buf_pgcnt;
1138 u16 vlan_tag = 0;
1139 u16 rx_ptype;
1140
1141
1142 rx_desc = ICE_RX_DESC(rx_ring, rx_ring->next_to_clean);
1143
1144
1145
1146
1147
1148
1149 stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_DD_S);
1150 if (!ice_test_staterr(rx_desc->wb.status_error0, stat_err_bits))
1151 break;
1152
1153
1154
1155
1156
1157 dma_rmb();
1158
1159 ice_trace(clean_rx_irq, rx_ring, rx_desc);
1160 if (rx_desc->wb.rxdid == FDIR_DESC_RXDID || !rx_ring->netdev) {
1161 struct ice_vsi *ctrl_vsi = rx_ring->vsi;
1162
1163 if (rx_desc->wb.rxdid == FDIR_DESC_RXDID &&
1164 ctrl_vsi->vf)
1165 ice_vc_fdir_irq_handler(ctrl_vsi, rx_desc);
1166 ice_put_rx_buf(rx_ring, NULL, 0);
1167 cleaned_count++;
1168 continue;
1169 }
1170
1171 size = le16_to_cpu(rx_desc->wb.pkt_len) &
1172 ICE_RX_FLX_DESC_PKT_LEN_M;
1173
1174
1175 rx_buf = ice_get_rx_buf(rx_ring, size, &rx_buf_pgcnt);
1176
1177 if (!size) {
1178 xdp.data = NULL;
1179 xdp.data_end = NULL;
1180 xdp.data_hard_start = NULL;
1181 xdp.data_meta = NULL;
1182 goto construct_skb;
1183 }
1184
1185 hard_start = page_address(rx_buf->page) + rx_buf->page_offset -
1186 offset;
1187 xdp_prepare_buff(&xdp, hard_start, offset, size, true);
1188#if (PAGE_SIZE > 4096)
1189
1190 xdp.frame_sz = ice_rx_frame_truesize(rx_ring, size);
1191#endif
1192
1193 if (!xdp_prog)
1194 goto construct_skb;
1195
1196 xdp_res = ice_run_xdp(rx_ring, &xdp, xdp_prog, xdp_ring);
1197 if (!xdp_res)
1198 goto construct_skb;
1199 if (xdp_res & (ICE_XDP_TX | ICE_XDP_REDIR)) {
1200 xdp_xmit |= xdp_res;
1201 ice_rx_buf_adjust_pg_offset(rx_buf, xdp.frame_sz);
1202 } else {
1203 rx_buf->pagecnt_bias++;
1204 }
1205 total_rx_bytes += size;
1206 total_rx_pkts++;
1207
1208 cleaned_count++;
1209 ice_put_rx_buf(rx_ring, rx_buf, rx_buf_pgcnt);
1210 continue;
1211construct_skb:
1212 if (skb) {
1213 ice_add_rx_frag(rx_ring, rx_buf, skb, size);
1214 } else if (likely(xdp.data)) {
1215 if (ice_ring_uses_build_skb(rx_ring))
1216 skb = ice_build_skb(rx_ring, rx_buf, &xdp);
1217 else
1218 skb = ice_construct_skb(rx_ring, rx_buf, &xdp);
1219 }
1220
1221 if (!skb) {
1222 rx_ring->rx_stats.alloc_buf_failed++;
1223 if (rx_buf)
1224 rx_buf->pagecnt_bias++;
1225 break;
1226 }
1227
1228 ice_put_rx_buf(rx_ring, rx_buf, rx_buf_pgcnt);
1229 cleaned_count++;
1230
1231
1232 if (ice_is_non_eop(rx_ring, rx_desc))
1233 continue;
1234
1235 stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_RXE_S);
1236 if (unlikely(ice_test_staterr(rx_desc->wb.status_error0,
1237 stat_err_bits))) {
1238 dev_kfree_skb_any(skb);
1239 continue;
1240 }
1241
1242 vlan_tag = ice_get_vlan_tag_from_rx_desc(rx_desc);
1243
1244
1245 if (eth_skb_pad(skb)) {
1246 skb = NULL;
1247 continue;
1248 }
1249
1250
1251 total_rx_bytes += skb->len;
1252
1253
1254 rx_ptype = le16_to_cpu(rx_desc->wb.ptype_flex_flags0) &
1255 ICE_RX_FLEX_DESC_PTYPE_M;
1256
1257 ice_process_skb_fields(rx_ring, rx_desc, skb, rx_ptype);
1258
1259 ice_trace(clean_rx_irq_indicate, rx_ring, rx_desc, skb);
1260
1261 ice_receive_skb(rx_ring, skb, vlan_tag);
1262 skb = NULL;
1263
1264
1265 total_rx_pkts++;
1266 }
1267
1268
1269 failure = ice_alloc_rx_bufs(rx_ring, cleaned_count);
1270
1271 if (xdp_prog)
1272 ice_finalize_xdp_rx(xdp_ring, xdp_xmit);
1273 rx_ring->skb = skb;
1274
1275 ice_update_rx_ring_stats(rx_ring, total_rx_pkts, total_rx_bytes);
1276
1277
1278 return failure ? budget : (int)total_rx_pkts;
1279}
1280
1281static void __ice_update_sample(struct ice_q_vector *q_vector,
1282 struct ice_ring_container *rc,
1283 struct dim_sample *sample,
1284 bool is_tx)
1285{
1286 u64 packets = 0, bytes = 0;
1287
1288 if (is_tx) {
1289 struct ice_tx_ring *tx_ring;
1290
1291 ice_for_each_tx_ring(tx_ring, *rc) {
1292 packets += tx_ring->stats.pkts;
1293 bytes += tx_ring->stats.bytes;
1294 }
1295 } else {
1296 struct ice_rx_ring *rx_ring;
1297
1298 ice_for_each_rx_ring(rx_ring, *rc) {
1299 packets += rx_ring->stats.pkts;
1300 bytes += rx_ring->stats.bytes;
1301 }
1302 }
1303
1304 dim_update_sample(q_vector->total_events, packets, bytes, sample);
1305 sample->comp_ctr = 0;
1306
1307
1308
1309
1310
1311
1312 if (ktime_ms_delta(sample->time, rc->dim.start_sample.time) >= 1000)
1313 rc->dim.state = DIM_START_MEASURE;
1314}
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325static void ice_net_dim(struct ice_q_vector *q_vector)
1326{
1327 struct ice_ring_container *tx = &q_vector->tx;
1328 struct ice_ring_container *rx = &q_vector->rx;
1329
1330 if (ITR_IS_DYNAMIC(tx)) {
1331 struct dim_sample dim_sample;
1332
1333 __ice_update_sample(q_vector, tx, &dim_sample, true);
1334 net_dim(&tx->dim, dim_sample);
1335 }
1336
1337 if (ITR_IS_DYNAMIC(rx)) {
1338 struct dim_sample dim_sample;
1339
1340 __ice_update_sample(q_vector, rx, &dim_sample, false);
1341 net_dim(&rx->dim, dim_sample);
1342 }
1343}
1344
1345
1346
1347
1348
1349
1350static u32 ice_buildreg_itr(u16 itr_idx, u16 itr)
1351{
1352
1353
1354
1355
1356
1357
1358
1359 itr &= ICE_ITR_MASK;
1360
1361 return GLINT_DYN_CTL_INTENA_M | GLINT_DYN_CTL_CLEARPBA_M |
1362 (itr_idx << GLINT_DYN_CTL_ITR_INDX_S) |
1363 (itr << (GLINT_DYN_CTL_INTERVAL_S - ICE_ITR_GRAN_S));
1364}
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374static void ice_enable_interrupt(struct ice_q_vector *q_vector)
1375{
1376 struct ice_vsi *vsi = q_vector->vsi;
1377 bool wb_en = q_vector->wb_on_itr;
1378 u32 itr_val;
1379
1380 if (test_bit(ICE_DOWN, vsi->state))
1381 return;
1382
1383
1384
1385
1386
1387
1388 if (!wb_en) {
1389 itr_val = ice_buildreg_itr(ICE_ITR_NONE, 0);
1390 } else {
1391 q_vector->wb_on_itr = false;
1392
1393
1394
1395
1396
1397
1398
1399 itr_val = ice_buildreg_itr(ICE_IDX_ITR2, ICE_ITR_20K);
1400 itr_val |= GLINT_DYN_CTL_SWINT_TRIG_M |
1401 ICE_IDX_ITR2 << GLINT_DYN_CTL_SW_ITR_INDX_S |
1402 GLINT_DYN_CTL_SW_ITR_INDX_ENA_M;
1403 }
1404 wr32(&vsi->back->hw, GLINT_DYN_CTL(q_vector->reg_idx), itr_val);
1405}
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421static void ice_set_wb_on_itr(struct ice_q_vector *q_vector)
1422{
1423 struct ice_vsi *vsi = q_vector->vsi;
1424
1425
1426 if (q_vector->wb_on_itr)
1427 return;
1428
1429
1430
1431
1432
1433 wr32(&vsi->back->hw, GLINT_DYN_CTL(q_vector->reg_idx),
1434 ((ICE_ITR_NONE << GLINT_DYN_CTL_ITR_INDX_S) &
1435 GLINT_DYN_CTL_ITR_INDX_M) | GLINT_DYN_CTL_INTENA_MSK_M |
1436 GLINT_DYN_CTL_WB_ON_ITR_M);
1437
1438 q_vector->wb_on_itr = true;
1439}
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450int ice_napi_poll(struct napi_struct *napi, int budget)
1451{
1452 struct ice_q_vector *q_vector =
1453 container_of(napi, struct ice_q_vector, napi);
1454 struct ice_tx_ring *tx_ring;
1455 struct ice_rx_ring *rx_ring;
1456 bool clean_complete = true;
1457 int budget_per_ring;
1458 int work_done = 0;
1459
1460
1461
1462
1463 ice_for_each_tx_ring(tx_ring, q_vector->tx) {
1464 bool wd;
1465
1466 if (tx_ring->xsk_pool)
1467 wd = ice_xmit_zc(tx_ring, ICE_DESC_UNUSED(tx_ring), budget);
1468 else if (ice_ring_is_xdp(tx_ring))
1469 wd = true;
1470 else
1471 wd = ice_clean_tx_irq(tx_ring, budget);
1472
1473 if (!wd)
1474 clean_complete = false;
1475 }
1476
1477
1478 if (unlikely(budget <= 0))
1479 return budget;
1480
1481
1482 if (unlikely(q_vector->num_ring_rx > 1))
1483
1484
1485
1486
1487 budget_per_ring = max_t(int, budget / q_vector->num_ring_rx, 1);
1488 else
1489
1490 budget_per_ring = budget;
1491
1492 ice_for_each_rx_ring(rx_ring, q_vector->rx) {
1493 int cleaned;
1494
1495
1496
1497
1498
1499 cleaned = rx_ring->xsk_pool ?
1500 ice_clean_rx_irq_zc(rx_ring, budget_per_ring) :
1501 ice_clean_rx_irq(rx_ring, budget_per_ring);
1502 work_done += cleaned;
1503
1504 if (cleaned >= budget_per_ring)
1505 clean_complete = false;
1506 }
1507
1508
1509 if (!clean_complete) {
1510
1511
1512
1513 ice_set_wb_on_itr(q_vector);
1514 return budget;
1515 }
1516
1517
1518
1519
1520 if (napi_complete_done(napi, work_done)) {
1521 ice_net_dim(q_vector);
1522 ice_enable_interrupt(q_vector);
1523 } else {
1524 ice_set_wb_on_itr(q_vector);
1525 }
1526
1527 return min_t(int, work_done, budget - 1);
1528}
1529
1530
1531
1532
1533
1534
1535
1536
1537static int __ice_maybe_stop_tx(struct ice_tx_ring *tx_ring, unsigned int size)
1538{
1539 netif_tx_stop_queue(txring_txq(tx_ring));
1540
1541 smp_mb();
1542
1543
1544 if (likely(ICE_DESC_UNUSED(tx_ring) < size))
1545 return -EBUSY;
1546
1547
1548 netif_tx_start_queue(txring_txq(tx_ring));
1549 ++tx_ring->tx_stats.restart_q;
1550 return 0;
1551}
1552
1553
1554
1555
1556
1557
1558
1559
1560static int ice_maybe_stop_tx(struct ice_tx_ring *tx_ring, unsigned int size)
1561{
1562 if (likely(ICE_DESC_UNUSED(tx_ring) >= size))
1563 return 0;
1564
1565 return __ice_maybe_stop_tx(tx_ring, size);
1566}
1567
1568
1569
1570
1571
1572
1573
1574
1575
1576
1577
1578static void
1579ice_tx_map(struct ice_tx_ring *tx_ring, struct ice_tx_buf *first,
1580 struct ice_tx_offload_params *off)
1581{
1582 u64 td_offset, td_tag, td_cmd;
1583 u16 i = tx_ring->next_to_use;
1584 unsigned int data_len, size;
1585 struct ice_tx_desc *tx_desc;
1586 struct ice_tx_buf *tx_buf;
1587 struct sk_buff *skb;
1588 skb_frag_t *frag;
1589 dma_addr_t dma;
1590 bool kick;
1591
1592 td_tag = off->td_l2tag1;
1593 td_cmd = off->td_cmd;
1594 td_offset = off->td_offset;
1595 skb = first->skb;
1596
1597 data_len = skb->data_len;
1598 size = skb_headlen(skb);
1599
1600 tx_desc = ICE_TX_DESC(tx_ring, i);
1601
1602 if (first->tx_flags & ICE_TX_FLAGS_HW_VLAN) {
1603 td_cmd |= (u64)ICE_TX_DESC_CMD_IL2TAG1;
1604 td_tag = (first->tx_flags & ICE_TX_FLAGS_VLAN_M) >>
1605 ICE_TX_FLAGS_VLAN_S;
1606 }
1607
1608 dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
1609
1610 tx_buf = first;
1611
1612 for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
1613 unsigned int max_data = ICE_MAX_DATA_PER_TXD_ALIGNED;
1614
1615 if (dma_mapping_error(tx_ring->dev, dma))
1616 goto dma_error;
1617
1618
1619 dma_unmap_len_set(tx_buf, len, size);
1620 dma_unmap_addr_set(tx_buf, dma, dma);
1621
1622
1623 max_data += -dma & (ICE_MAX_READ_REQ_SIZE - 1);
1624 tx_desc->buf_addr = cpu_to_le64(dma);
1625
1626
1627
1628
1629 while (unlikely(size > ICE_MAX_DATA_PER_TXD)) {
1630 tx_desc->cmd_type_offset_bsz =
1631 ice_build_ctob(td_cmd, td_offset, max_data,
1632 td_tag);
1633
1634 tx_desc++;
1635 i++;
1636
1637 if (i == tx_ring->count) {
1638 tx_desc = ICE_TX_DESC(tx_ring, 0);
1639 i = 0;
1640 }
1641
1642 dma += max_data;
1643 size -= max_data;
1644
1645 max_data = ICE_MAX_DATA_PER_TXD_ALIGNED;
1646 tx_desc->buf_addr = cpu_to_le64(dma);
1647 }
1648
1649 if (likely(!data_len))
1650 break;
1651
1652 tx_desc->cmd_type_offset_bsz = ice_build_ctob(td_cmd, td_offset,
1653 size, td_tag);
1654
1655 tx_desc++;
1656 i++;
1657
1658 if (i == tx_ring->count) {
1659 tx_desc = ICE_TX_DESC(tx_ring, 0);
1660 i = 0;
1661 }
1662
1663 size = skb_frag_size(frag);
1664 data_len -= size;
1665
1666 dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size,
1667 DMA_TO_DEVICE);
1668
1669 tx_buf = &tx_ring->tx_buf[i];
1670 }
1671
1672
1673 skb_tx_timestamp(first->skb);
1674
1675 i++;
1676 if (i == tx_ring->count)
1677 i = 0;
1678
1679
1680 td_cmd |= (u64)ICE_TXD_LAST_DESC_CMD;
1681 tx_desc->cmd_type_offset_bsz =
1682 ice_build_ctob(td_cmd, td_offset, size, td_tag);
1683
1684
1685
1686
1687
1688
1689
1690 wmb();
1691
1692
1693 first->next_to_watch = tx_desc;
1694
1695 tx_ring->next_to_use = i;
1696
1697 ice_maybe_stop_tx(tx_ring, DESC_NEEDED);
1698
1699
1700 kick = __netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount,
1701 netdev_xmit_more());
1702 if (kick)
1703
1704 writel(i, tx_ring->tail);
1705
1706 return;
1707
1708dma_error:
1709
1710 for (;;) {
1711 tx_buf = &tx_ring->tx_buf[i];
1712 ice_unmap_and_free_tx_buf(tx_ring, tx_buf);
1713 if (tx_buf == first)
1714 break;
1715 if (i == 0)
1716 i = tx_ring->count;
1717 i--;
1718 }
1719
1720 tx_ring->next_to_use = i;
1721}
1722
1723
1724
1725
1726
1727
1728
1729
1730static
1731int ice_tx_csum(struct ice_tx_buf *first, struct ice_tx_offload_params *off)
1732{
1733 u32 l4_len = 0, l3_len = 0, l2_len = 0;
1734 struct sk_buff *skb = first->skb;
1735 union {
1736 struct iphdr *v4;
1737 struct ipv6hdr *v6;
1738 unsigned char *hdr;
1739 } ip;
1740 union {
1741 struct tcphdr *tcp;
1742 unsigned char *hdr;
1743 } l4;
1744 __be16 frag_off, protocol;
1745 unsigned char *exthdr;
1746 u32 offset, cmd = 0;
1747 u8 l4_proto = 0;
1748
1749 if (skb->ip_summed != CHECKSUM_PARTIAL)
1750 return 0;
1751
1752 protocol = vlan_get_protocol(skb);
1753
1754 if (eth_p_mpls(protocol)) {
1755 ip.hdr = skb_inner_network_header(skb);
1756 l4.hdr = skb_checksum_start(skb);
1757 } else {
1758 ip.hdr = skb_network_header(skb);
1759 l4.hdr = skb_transport_header(skb);
1760 }
1761
1762
1763 l2_len = ip.hdr - skb->data;
1764 offset = (l2_len / 2) << ICE_TX_DESC_LEN_MACLEN_S;
1765
1766
1767
1768
1769 if (ip.v4->version == 4)
1770 first->tx_flags |= ICE_TX_FLAGS_IPV4;
1771 else if (ip.v6->version == 6)
1772 first->tx_flags |= ICE_TX_FLAGS_IPV6;
1773
1774 if (skb->encapsulation) {
1775 bool gso_ena = false;
1776 u32 tunnel = 0;
1777
1778
1779 if (first->tx_flags & ICE_TX_FLAGS_IPV4) {
1780 tunnel |= (first->tx_flags & ICE_TX_FLAGS_TSO) ?
1781 ICE_TX_CTX_EIPT_IPV4 :
1782 ICE_TX_CTX_EIPT_IPV4_NO_CSUM;
1783 l4_proto = ip.v4->protocol;
1784 } else if (first->tx_flags & ICE_TX_FLAGS_IPV6) {
1785 int ret;
1786
1787 tunnel |= ICE_TX_CTX_EIPT_IPV6;
1788 exthdr = ip.hdr + sizeof(*ip.v6);
1789 l4_proto = ip.v6->nexthdr;
1790 ret = ipv6_skip_exthdr(skb, exthdr - skb->data,
1791 &l4_proto, &frag_off);
1792 if (ret < 0)
1793 return -1;
1794 }
1795
1796
1797 switch (l4_proto) {
1798 case IPPROTO_UDP:
1799 tunnel |= ICE_TXD_CTX_UDP_TUNNELING;
1800 first->tx_flags |= ICE_TX_FLAGS_TUNNEL;
1801 break;
1802 case IPPROTO_GRE:
1803 tunnel |= ICE_TXD_CTX_GRE_TUNNELING;
1804 first->tx_flags |= ICE_TX_FLAGS_TUNNEL;
1805 break;
1806 case IPPROTO_IPIP:
1807 case IPPROTO_IPV6:
1808 first->tx_flags |= ICE_TX_FLAGS_TUNNEL;
1809 l4.hdr = skb_inner_network_header(skb);
1810 break;
1811 default:
1812 if (first->tx_flags & ICE_TX_FLAGS_TSO)
1813 return -1;
1814
1815 skb_checksum_help(skb);
1816 return 0;
1817 }
1818
1819
1820 tunnel |= ((l4.hdr - ip.hdr) / 4) <<
1821 ICE_TXD_CTX_QW0_EIPLEN_S;
1822
1823
1824 ip.hdr = skb_inner_network_header(skb);
1825
1826
1827 tunnel |= ((ip.hdr - l4.hdr) / 2) <<
1828 ICE_TXD_CTX_QW0_NATLEN_S;
1829
1830 gso_ena = skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL;
1831
1832 if ((first->tx_flags & ICE_TX_FLAGS_TSO) && !gso_ena &&
1833 (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM))
1834 tunnel |= ICE_TXD_CTX_QW0_L4T_CS_M;
1835
1836
1837 off->cd_tunnel_params |= tunnel;
1838
1839
1840
1841
1842 off->cd_qw1 |= (u64)ICE_TX_DESC_DTYPE_CTX;
1843
1844
1845 l4.hdr = skb_inner_transport_header(skb);
1846 l4_proto = 0;
1847
1848
1849 first->tx_flags &= ~(ICE_TX_FLAGS_IPV4 | ICE_TX_FLAGS_IPV6);
1850 if (ip.v4->version == 4)
1851 first->tx_flags |= ICE_TX_FLAGS_IPV4;
1852 if (ip.v6->version == 6)
1853 first->tx_flags |= ICE_TX_FLAGS_IPV6;
1854 }
1855
1856
1857 if (first->tx_flags & ICE_TX_FLAGS_IPV4) {
1858 l4_proto = ip.v4->protocol;
1859
1860
1861
1862 if (first->tx_flags & ICE_TX_FLAGS_TSO)
1863 cmd |= ICE_TX_DESC_CMD_IIPT_IPV4_CSUM;
1864 else
1865 cmd |= ICE_TX_DESC_CMD_IIPT_IPV4;
1866
1867 } else if (first->tx_flags & ICE_TX_FLAGS_IPV6) {
1868 cmd |= ICE_TX_DESC_CMD_IIPT_IPV6;
1869 exthdr = ip.hdr + sizeof(*ip.v6);
1870 l4_proto = ip.v6->nexthdr;
1871 if (l4.hdr != exthdr)
1872 ipv6_skip_exthdr(skb, exthdr - skb->data, &l4_proto,
1873 &frag_off);
1874 } else {
1875 return -1;
1876 }
1877
1878
1879 l3_len = l4.hdr - ip.hdr;
1880 offset |= (l3_len / 4) << ICE_TX_DESC_LEN_IPLEN_S;
1881
1882
1883 switch (l4_proto) {
1884 case IPPROTO_TCP:
1885
1886 cmd |= ICE_TX_DESC_CMD_L4T_EOFT_TCP;
1887 l4_len = l4.tcp->doff;
1888 offset |= l4_len << ICE_TX_DESC_LEN_L4_LEN_S;
1889 break;
1890 case IPPROTO_UDP:
1891
1892 cmd |= ICE_TX_DESC_CMD_L4T_EOFT_UDP;
1893 l4_len = (sizeof(struct udphdr) >> 2);
1894 offset |= l4_len << ICE_TX_DESC_LEN_L4_LEN_S;
1895 break;
1896 case IPPROTO_SCTP:
1897
1898 cmd |= ICE_TX_DESC_CMD_L4T_EOFT_SCTP;
1899 l4_len = sizeof(struct sctphdr) >> 2;
1900 offset |= l4_len << ICE_TX_DESC_LEN_L4_LEN_S;
1901 break;
1902
1903 default:
1904 if (first->tx_flags & ICE_TX_FLAGS_TSO)
1905 return -1;
1906 skb_checksum_help(skb);
1907 return 0;
1908 }
1909
1910 off->td_cmd |= cmd;
1911 off->td_offset |= offset;
1912 return 1;
1913}
1914
1915
1916
1917
1918
1919
1920
1921
1922
1923static void
1924ice_tx_prepare_vlan_flags(struct ice_tx_ring *tx_ring, struct ice_tx_buf *first)
1925{
1926 struct sk_buff *skb = first->skb;
1927
1928
1929 if (!skb_vlan_tag_present(skb) && eth_type_vlan(skb->protocol))
1930 return;
1931
1932
1933
1934
1935
1936 if (skb_vlan_tag_present(skb)) {
1937 first->tx_flags |= skb_vlan_tag_get(skb) << ICE_TX_FLAGS_VLAN_S;
1938 if (tx_ring->flags & ICE_TX_FLAGS_RING_VLAN_L2TAG2)
1939 first->tx_flags |= ICE_TX_FLAGS_HW_OUTER_SINGLE_VLAN;
1940 else
1941 first->tx_flags |= ICE_TX_FLAGS_HW_VLAN;
1942 }
1943
1944 ice_tx_prepare_vlan_flags_dcb(tx_ring, first);
1945}
1946
1947
1948
1949
1950
1951
1952
1953
1954static
1955int ice_tso(struct ice_tx_buf *first, struct ice_tx_offload_params *off)
1956{
1957 struct sk_buff *skb = first->skb;
1958 union {
1959 struct iphdr *v4;
1960 struct ipv6hdr *v6;
1961 unsigned char *hdr;
1962 } ip;
1963 union {
1964 struct tcphdr *tcp;
1965 struct udphdr *udp;
1966 unsigned char *hdr;
1967 } l4;
1968 u64 cd_mss, cd_tso_len;
1969 __be16 protocol;
1970 u32 paylen;
1971 u8 l4_start;
1972 int err;
1973
1974 if (skb->ip_summed != CHECKSUM_PARTIAL)
1975 return 0;
1976
1977 if (!skb_is_gso(skb))
1978 return 0;
1979
1980 err = skb_cow_head(skb, 0);
1981 if (err < 0)
1982 return err;
1983
1984
1985 protocol = vlan_get_protocol(skb);
1986
1987 if (eth_p_mpls(protocol))
1988 ip.hdr = skb_inner_network_header(skb);
1989 else
1990 ip.hdr = skb_network_header(skb);
1991 l4.hdr = skb_checksum_start(skb);
1992
1993
1994 if (ip.v4->version == 4) {
1995 ip.v4->tot_len = 0;
1996 ip.v4->check = 0;
1997 } else {
1998 ip.v6->payload_len = 0;
1999 }
2000
2001 if (skb_shinfo(skb)->gso_type & (SKB_GSO_GRE |
2002 SKB_GSO_GRE_CSUM |
2003 SKB_GSO_IPXIP4 |
2004 SKB_GSO_IPXIP6 |
2005 SKB_GSO_UDP_TUNNEL |
2006 SKB_GSO_UDP_TUNNEL_CSUM)) {
2007 if (!(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) &&
2008 (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM)) {
2009 l4.udp->len = 0;
2010
2011
2012 l4_start = (u8)(l4.hdr - skb->data);
2013
2014
2015 paylen = skb->len - l4_start;
2016 csum_replace_by_diff(&l4.udp->check,
2017 (__force __wsum)htonl(paylen));
2018 }
2019
2020
2021
2022
2023 ip.hdr = skb_inner_network_header(skb);
2024 l4.hdr = skb_inner_transport_header(skb);
2025
2026
2027 if (ip.v4->version == 4) {
2028 ip.v4->tot_len = 0;
2029 ip.v4->check = 0;
2030 } else {
2031 ip.v6->payload_len = 0;
2032 }
2033 }
2034
2035
2036 l4_start = (u8)(l4.hdr - skb->data);
2037
2038
2039 paylen = skb->len - l4_start;
2040
2041 if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
2042 csum_replace_by_diff(&l4.udp->check,
2043 (__force __wsum)htonl(paylen));
2044
2045 off->header_len = (u8)sizeof(l4.udp) + l4_start;
2046 } else {
2047 csum_replace_by_diff(&l4.tcp->check,
2048 (__force __wsum)htonl(paylen));
2049
2050 off->header_len = (u8)((l4.tcp->doff * 4) + l4_start);
2051 }
2052
2053
2054 first->gso_segs = skb_shinfo(skb)->gso_segs;
2055 first->bytecount += (first->gso_segs - 1) * off->header_len;
2056
2057 cd_tso_len = skb->len - off->header_len;
2058 cd_mss = skb_shinfo(skb)->gso_size;
2059
2060
2061 off->cd_qw1 |= (u64)(ICE_TX_DESC_DTYPE_CTX |
2062 (ICE_TX_CTX_DESC_TSO << ICE_TXD_CTX_QW1_CMD_S) |
2063 (cd_tso_len << ICE_TXD_CTX_QW1_TSO_LEN_S) |
2064 (cd_mss << ICE_TXD_CTX_QW1_MSS_S));
2065 first->tx_flags |= ICE_TX_FLAGS_TSO;
2066 return 1;
2067}
2068
2069
2070
2071
2072
2073
2074
2075
2076
2077
2078
2079
2080
2081
2082
2083
2084
2085
2086
2087
2088
2089
2090
2091
2092
2093
2094
2095
2096
2097static unsigned int ice_txd_use_count(unsigned int size)
2098{
2099 return ((size * 85) >> 20) + ICE_DESCS_FOR_SKB_DATA_PTR;
2100}
2101
2102
2103
2104
2105
2106
2107
2108static unsigned int ice_xmit_desc_count(struct sk_buff *skb)
2109{
2110 const skb_frag_t *frag = &skb_shinfo(skb)->frags[0];
2111 unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
2112 unsigned int count = 0, size = skb_headlen(skb);
2113
2114 for (;;) {
2115 count += ice_txd_use_count(size);
2116
2117 if (!nr_frags--)
2118 break;
2119
2120 size = skb_frag_size(frag++);
2121 }
2122
2123 return count;
2124}
2125
2126
2127
2128
2129
2130
2131
2132
2133
2134
2135
2136
2137
2138
2139static bool __ice_chk_linearize(struct sk_buff *skb)
2140{
2141 const skb_frag_t *frag, *stale;
2142 int nr_frags, sum;
2143
2144
2145 nr_frags = skb_shinfo(skb)->nr_frags;
2146 if (nr_frags < (ICE_MAX_BUF_TXD - 1))
2147 return false;
2148
2149
2150
2151
2152 nr_frags -= ICE_MAX_BUF_TXD - 2;
2153 frag = &skb_shinfo(skb)->frags[0];
2154
2155
2156
2157
2158
2159
2160
2161 sum = 1 - skb_shinfo(skb)->gso_size;
2162
2163
2164 sum += skb_frag_size(frag++);
2165 sum += skb_frag_size(frag++);
2166 sum += skb_frag_size(frag++);
2167 sum += skb_frag_size(frag++);
2168 sum += skb_frag_size(frag++);
2169
2170
2171
2172
2173 for (stale = &skb_shinfo(skb)->frags[0];; stale++) {
2174 int stale_size = skb_frag_size(stale);
2175
2176 sum += skb_frag_size(frag++);
2177
2178
2179
2180
2181
2182
2183
2184 if (stale_size > ICE_MAX_DATA_PER_TXD) {
2185 int align_pad = -(skb_frag_off(stale)) &
2186 (ICE_MAX_READ_REQ_SIZE - 1);
2187
2188 sum -= align_pad;
2189 stale_size -= align_pad;
2190
2191 do {
2192 sum -= ICE_MAX_DATA_PER_TXD_ALIGNED;
2193 stale_size -= ICE_MAX_DATA_PER_TXD_ALIGNED;
2194 } while (stale_size > ICE_MAX_DATA_PER_TXD);
2195 }
2196
2197
2198 if (sum < 0)
2199 return true;
2200
2201 if (!nr_frags--)
2202 break;
2203
2204 sum -= stale_size;
2205 }
2206
2207 return false;
2208}
2209
2210
2211
2212
2213
2214
2215
2216
2217
2218
2219static bool ice_chk_linearize(struct sk_buff *skb, unsigned int count)
2220{
2221
2222 if (likely(count < ICE_MAX_BUF_TXD))
2223 return false;
2224
2225 if (skb_is_gso(skb))
2226 return __ice_chk_linearize(skb);
2227
2228
2229 return count != ICE_MAX_BUF_TXD;
2230}
2231
2232
2233
2234
2235
2236
2237
2238
2239static void
2240ice_tstamp(struct ice_tx_ring *tx_ring, struct sk_buff *skb,
2241 struct ice_tx_buf *first, struct ice_tx_offload_params *off)
2242{
2243 s8 idx;
2244
2245
2246 if (likely(!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)))
2247 return;
2248
2249 if (!tx_ring->ptp_tx)
2250 return;
2251
2252
2253 if (first->tx_flags & ICE_TX_FLAGS_TSO)
2254 return;
2255
2256
2257 idx = ice_ptp_request_ts(tx_ring->tx_tstamps, skb);
2258 if (idx < 0)
2259 return;
2260
2261 off->cd_qw1 |= (u64)(ICE_TX_DESC_DTYPE_CTX |
2262 (ICE_TX_CTX_DESC_TSYN << ICE_TXD_CTX_QW1_CMD_S) |
2263 ((u64)idx << ICE_TXD_CTX_QW1_TSO_LEN_S));
2264 first->tx_flags |= ICE_TX_FLAGS_TSYN;
2265}
2266
2267
2268
2269
2270
2271
2272
2273
2274static netdev_tx_t
2275ice_xmit_frame_ring(struct sk_buff *skb, struct ice_tx_ring *tx_ring)
2276{
2277 struct ice_tx_offload_params offload = { 0 };
2278 struct ice_vsi *vsi = tx_ring->vsi;
2279 struct ice_tx_buf *first;
2280 struct ethhdr *eth;
2281 unsigned int count;
2282 int tso, csum;
2283
2284 ice_trace(xmit_frame_ring, tx_ring, skb);
2285
2286 count = ice_xmit_desc_count(skb);
2287 if (ice_chk_linearize(skb, count)) {
2288 if (__skb_linearize(skb))
2289 goto out_drop;
2290 count = ice_txd_use_count(skb->len);
2291 tx_ring->tx_stats.tx_linearize++;
2292 }
2293
2294
2295
2296
2297
2298
2299
2300 if (ice_maybe_stop_tx(tx_ring, count + ICE_DESCS_PER_CACHE_LINE +
2301 ICE_DESCS_FOR_CTX_DESC)) {
2302 tx_ring->tx_stats.tx_busy++;
2303 return NETDEV_TX_BUSY;
2304 }
2305
2306
2307 netdev_txq_bql_enqueue_prefetchw(txring_txq(tx_ring));
2308
2309 offload.tx_ring = tx_ring;
2310
2311
2312 first = &tx_ring->tx_buf[tx_ring->next_to_use];
2313 first->skb = skb;
2314 first->bytecount = max_t(unsigned int, skb->len, ETH_ZLEN);
2315 first->gso_segs = 1;
2316 first->tx_flags = 0;
2317
2318
2319 ice_tx_prepare_vlan_flags(tx_ring, first);
2320 if (first->tx_flags & ICE_TX_FLAGS_HW_OUTER_SINGLE_VLAN) {
2321 offload.cd_qw1 |= (u64)(ICE_TX_DESC_DTYPE_CTX |
2322 (ICE_TX_CTX_DESC_IL2TAG2 <<
2323 ICE_TXD_CTX_QW1_CMD_S));
2324 offload.cd_l2tag2 = (first->tx_flags & ICE_TX_FLAGS_VLAN_M) >>
2325 ICE_TX_FLAGS_VLAN_S;
2326 }
2327
2328
2329 tso = ice_tso(first, &offload);
2330 if (tso < 0)
2331 goto out_drop;
2332
2333
2334 csum = ice_tx_csum(first, &offload);
2335 if (csum < 0)
2336 goto out_drop;
2337
2338
2339 eth = (struct ethhdr *)skb_mac_header(skb);
2340 if (unlikely((skb->priority == TC_PRIO_CONTROL ||
2341 eth->h_proto == htons(ETH_P_LLDP)) &&
2342 vsi->type == ICE_VSI_PF &&
2343 vsi->port_info->qos_cfg.is_sw_lldp))
2344 offload.cd_qw1 |= (u64)(ICE_TX_DESC_DTYPE_CTX |
2345 ICE_TX_CTX_DESC_SWTCH_UPLINK <<
2346 ICE_TXD_CTX_QW1_CMD_S);
2347
2348 ice_tstamp(tx_ring, skb, first, &offload);
2349 if (ice_is_switchdev_running(vsi->back))
2350 ice_eswitch_set_target_vsi(skb, &offload);
2351
2352 if (offload.cd_qw1 & ICE_TX_DESC_DTYPE_CTX) {
2353 struct ice_tx_ctx_desc *cdesc;
2354 u16 i = tx_ring->next_to_use;
2355
2356
2357 cdesc = ICE_TX_CTX_DESC(tx_ring, i);
2358 i++;
2359 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
2360
2361
2362 cdesc->tunneling_params = cpu_to_le32(offload.cd_tunnel_params);
2363 cdesc->l2tag2 = cpu_to_le16(offload.cd_l2tag2);
2364 cdesc->rsvd = cpu_to_le16(0);
2365 cdesc->qw1 = cpu_to_le64(offload.cd_qw1);
2366 }
2367
2368 ice_tx_map(tx_ring, first, &offload);
2369 return NETDEV_TX_OK;
2370
2371out_drop:
2372 ice_trace(xmit_frame_ring_drop, tx_ring, skb);
2373 dev_kfree_skb_any(skb);
2374 return NETDEV_TX_OK;
2375}
2376
2377
2378
2379
2380
2381
2382
2383
2384netdev_tx_t ice_start_xmit(struct sk_buff *skb, struct net_device *netdev)
2385{
2386 struct ice_netdev_priv *np = netdev_priv(netdev);
2387 struct ice_vsi *vsi = np->vsi;
2388 struct ice_tx_ring *tx_ring;
2389
2390 tx_ring = vsi->tx_rings[skb->queue_mapping];
2391
2392
2393
2394
2395 if (skb_put_padto(skb, ICE_MIN_TX_LEN))
2396 return NETDEV_TX_OK;
2397
2398 return ice_xmit_frame_ring(skb, tx_ring);
2399}
2400
2401
2402
2403
2404
2405
2406
2407
2408static u8 ice_get_dscp_up(struct ice_dcbx_cfg *dcbcfg, struct sk_buff *skb)
2409{
2410 u8 dscp = 0;
2411
2412 if (skb->protocol == htons(ETH_P_IP))
2413 dscp = ipv4_get_dsfield(ip_hdr(skb)) >> 2;
2414 else if (skb->protocol == htons(ETH_P_IPV6))
2415 dscp = ipv6_get_dsfield(ipv6_hdr(skb)) >> 2;
2416
2417 return dcbcfg->dscp_map[dscp];
2418}
2419
2420u16
2421ice_select_queue(struct net_device *netdev, struct sk_buff *skb,
2422 struct net_device *sb_dev)
2423{
2424 struct ice_pf *pf = ice_netdev_to_pf(netdev);
2425 struct ice_dcbx_cfg *dcbcfg;
2426
2427 dcbcfg = &pf->hw.port_info->qos_cfg.local_dcbx_cfg;
2428 if (dcbcfg->pfc_mode == ICE_QOS_MODE_DSCP)
2429 skb->priority = ice_get_dscp_up(dcbcfg, skb);
2430
2431 return netdev_pick_tx(netdev, skb, sb_dev);
2432}
2433
2434
2435
2436
2437
2438void ice_clean_ctrl_tx_irq(struct ice_tx_ring *tx_ring)
2439{
2440 struct ice_vsi *vsi = tx_ring->vsi;
2441 s16 i = tx_ring->next_to_clean;
2442 int budget = ICE_DFLT_IRQ_WORK;
2443 struct ice_tx_desc *tx_desc;
2444 struct ice_tx_buf *tx_buf;
2445
2446 tx_buf = &tx_ring->tx_buf[i];
2447 tx_desc = ICE_TX_DESC(tx_ring, i);
2448 i -= tx_ring->count;
2449
2450 do {
2451 struct ice_tx_desc *eop_desc = tx_buf->next_to_watch;
2452
2453
2454 if (!eop_desc)
2455 break;
2456
2457
2458 smp_rmb();
2459
2460
2461 if (!(eop_desc->cmd_type_offset_bsz &
2462 cpu_to_le64(ICE_TX_DESC_DTYPE_DESC_DONE)))
2463 break;
2464
2465
2466 tx_buf->next_to_watch = NULL;
2467 tx_desc->buf_addr = 0;
2468 tx_desc->cmd_type_offset_bsz = 0;
2469
2470
2471 tx_buf++;
2472 tx_desc++;
2473 i++;
2474 if (unlikely(!i)) {
2475 i -= tx_ring->count;
2476 tx_buf = tx_ring->tx_buf;
2477 tx_desc = ICE_TX_DESC(tx_ring, 0);
2478 }
2479
2480
2481 if (dma_unmap_len(tx_buf, len))
2482 dma_unmap_single(tx_ring->dev,
2483 dma_unmap_addr(tx_buf, dma),
2484 dma_unmap_len(tx_buf, len),
2485 DMA_TO_DEVICE);
2486 if (tx_buf->tx_flags & ICE_TX_FLAGS_DUMMY_PKT)
2487 devm_kfree(tx_ring->dev, tx_buf->raw_buf);
2488
2489
2490 tx_buf->raw_buf = NULL;
2491 tx_buf->tx_flags = 0;
2492 tx_buf->next_to_watch = NULL;
2493 dma_unmap_len_set(tx_buf, len, 0);
2494 tx_desc->buf_addr = 0;
2495 tx_desc->cmd_type_offset_bsz = 0;
2496
2497
2498 tx_buf++;
2499 tx_desc++;
2500 i++;
2501 if (unlikely(!i)) {
2502 i -= tx_ring->count;
2503 tx_buf = tx_ring->tx_buf;
2504 tx_desc = ICE_TX_DESC(tx_ring, 0);
2505 }
2506
2507 budget--;
2508 } while (likely(budget));
2509
2510 i += tx_ring->count;
2511 tx_ring->next_to_clean = i;
2512
2513
2514 ice_irq_dynamic_ena(&vsi->back->hw, vsi, vsi->q_vectors[0]);
2515}
2516