1
2
3
4
5
6#include <linux/prefetch.h>
7#include <linux/mm.h>
8#include <linux/bpf_trace.h>
9#include <net/xdp.h>
10#include "ice_txrx_lib.h"
11#include "ice_lib.h"
12#include "ice.h"
13#include "ice_trace.h"
14#include "ice_dcb_lib.h"
15#include "ice_xsk.h"
16
17#define ICE_RX_HDR_SIZE 256
18
19#define FDIR_DESC_RXDID 0x40
20#define ICE_FDIR_CLEAN_DELAY 10
21
22
23
24
25
26
27
28int
29ice_prgm_fdir_fltr(struct ice_vsi *vsi, struct ice_fltr_desc *fdir_desc,
30 u8 *raw_packet)
31{
32 struct ice_tx_buf *tx_buf, *first;
33 struct ice_fltr_desc *f_desc;
34 struct ice_tx_desc *tx_desc;
35 struct ice_ring *tx_ring;
36 struct device *dev;
37 dma_addr_t dma;
38 u32 td_cmd;
39 u16 i;
40
41
42 if (!vsi)
43 return -ENOENT;
44 tx_ring = vsi->tx_rings[0];
45 if (!tx_ring || !tx_ring->desc)
46 return -ENOENT;
47 dev = tx_ring->dev;
48
49
50 for (i = ICE_FDIR_CLEAN_DELAY; ICE_DESC_UNUSED(tx_ring) < 2; i--) {
51 if (!i)
52 return -EAGAIN;
53 msleep_interruptible(1);
54 }
55
56 dma = dma_map_single(dev, raw_packet, ICE_FDIR_MAX_RAW_PKT_SIZE,
57 DMA_TO_DEVICE);
58
59 if (dma_mapping_error(dev, dma))
60 return -EINVAL;
61
62
63 i = tx_ring->next_to_use;
64 first = &tx_ring->tx_buf[i];
65 f_desc = ICE_TX_FDIRDESC(tx_ring, i);
66 memcpy(f_desc, fdir_desc, sizeof(*f_desc));
67
68 i++;
69 i = (i < tx_ring->count) ? i : 0;
70 tx_desc = ICE_TX_DESC(tx_ring, i);
71 tx_buf = &tx_ring->tx_buf[i];
72
73 i++;
74 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
75
76 memset(tx_buf, 0, sizeof(*tx_buf));
77 dma_unmap_len_set(tx_buf, len, ICE_FDIR_MAX_RAW_PKT_SIZE);
78 dma_unmap_addr_set(tx_buf, dma, dma);
79
80 tx_desc->buf_addr = cpu_to_le64(dma);
81 td_cmd = ICE_TXD_LAST_DESC_CMD | ICE_TX_DESC_CMD_DUMMY |
82 ICE_TX_DESC_CMD_RE;
83
84 tx_buf->tx_flags = ICE_TX_FLAGS_DUMMY_PKT;
85 tx_buf->raw_buf = raw_packet;
86
87 tx_desc->cmd_type_offset_bsz =
88 ice_build_ctob(td_cmd, 0, ICE_FDIR_MAX_RAW_PKT_SIZE, 0);
89
90
91
92
93 wmb();
94
95
96 first->next_to_watch = tx_desc;
97
98 writel(tx_ring->next_to_use, tx_ring->tail);
99
100 return 0;
101}
102
103
104
105
106
107
108static void
109ice_unmap_and_free_tx_buf(struct ice_ring *ring, struct ice_tx_buf *tx_buf)
110{
111 if (tx_buf->skb) {
112 if (tx_buf->tx_flags & ICE_TX_FLAGS_DUMMY_PKT)
113 devm_kfree(ring->dev, tx_buf->raw_buf);
114 else if (ice_ring_is_xdp(ring))
115 page_frag_free(tx_buf->raw_buf);
116 else
117 dev_kfree_skb_any(tx_buf->skb);
118 if (dma_unmap_len(tx_buf, len))
119 dma_unmap_single(ring->dev,
120 dma_unmap_addr(tx_buf, dma),
121 dma_unmap_len(tx_buf, len),
122 DMA_TO_DEVICE);
123 } else if (dma_unmap_len(tx_buf, len)) {
124 dma_unmap_page(ring->dev,
125 dma_unmap_addr(tx_buf, dma),
126 dma_unmap_len(tx_buf, len),
127 DMA_TO_DEVICE);
128 }
129
130 tx_buf->next_to_watch = NULL;
131 tx_buf->skb = NULL;
132 dma_unmap_len_set(tx_buf, len, 0);
133
134}
135
136static struct netdev_queue *txring_txq(const struct ice_ring *ring)
137{
138 return netdev_get_tx_queue(ring->netdev, ring->q_index);
139}
140
141
142
143
144
145void ice_clean_tx_ring(struct ice_ring *tx_ring)
146{
147 u16 i;
148
149 if (ice_ring_is_xdp(tx_ring) && tx_ring->xsk_pool) {
150 ice_xsk_clean_xdp_ring(tx_ring);
151 goto tx_skip_free;
152 }
153
154
155 if (!tx_ring->tx_buf)
156 return;
157
158
159 for (i = 0; i < tx_ring->count; i++)
160 ice_unmap_and_free_tx_buf(tx_ring, &tx_ring->tx_buf[i]);
161
162tx_skip_free:
163 memset(tx_ring->tx_buf, 0, sizeof(*tx_ring->tx_buf) * tx_ring->count);
164
165
166 memset(tx_ring->desc, 0, tx_ring->size);
167
168 tx_ring->next_to_use = 0;
169 tx_ring->next_to_clean = 0;
170
171 if (!tx_ring->netdev)
172 return;
173
174
175 netdev_tx_reset_queue(txring_txq(tx_ring));
176}
177
178
179
180
181
182
183
184void ice_free_tx_ring(struct ice_ring *tx_ring)
185{
186 ice_clean_tx_ring(tx_ring);
187 devm_kfree(tx_ring->dev, tx_ring->tx_buf);
188 tx_ring->tx_buf = NULL;
189
190 if (tx_ring->desc) {
191 dmam_free_coherent(tx_ring->dev, tx_ring->size,
192 tx_ring->desc, tx_ring->dma);
193 tx_ring->desc = NULL;
194 }
195}
196
197
198
199
200
201
202
203
204static bool ice_clean_tx_irq(struct ice_ring *tx_ring, int napi_budget)
205{
206 unsigned int total_bytes = 0, total_pkts = 0;
207 unsigned int budget = ICE_DFLT_IRQ_WORK;
208 struct ice_vsi *vsi = tx_ring->vsi;
209 s16 i = tx_ring->next_to_clean;
210 struct ice_tx_desc *tx_desc;
211 struct ice_tx_buf *tx_buf;
212
213 tx_buf = &tx_ring->tx_buf[i];
214 tx_desc = ICE_TX_DESC(tx_ring, i);
215 i -= tx_ring->count;
216
217 prefetch(&vsi->state);
218
219 do {
220 struct ice_tx_desc *eop_desc = tx_buf->next_to_watch;
221
222
223 if (!eop_desc)
224 break;
225
226 smp_rmb();
227
228 ice_trace(clean_tx_irq, tx_ring, tx_desc, tx_buf);
229
230 if (!(eop_desc->cmd_type_offset_bsz &
231 cpu_to_le64(ICE_TX_DESC_DTYPE_DESC_DONE)))
232 break;
233
234
235 tx_buf->next_to_watch = NULL;
236
237
238 total_bytes += tx_buf->bytecount;
239 total_pkts += tx_buf->gso_segs;
240
241 if (ice_ring_is_xdp(tx_ring))
242 page_frag_free(tx_buf->raw_buf);
243 else
244
245 napi_consume_skb(tx_buf->skb, napi_budget);
246
247
248 dma_unmap_single(tx_ring->dev,
249 dma_unmap_addr(tx_buf, dma),
250 dma_unmap_len(tx_buf, len),
251 DMA_TO_DEVICE);
252
253
254 tx_buf->skb = NULL;
255 dma_unmap_len_set(tx_buf, len, 0);
256
257
258 while (tx_desc != eop_desc) {
259 ice_trace(clean_tx_irq_unmap, tx_ring, tx_desc, tx_buf);
260 tx_buf++;
261 tx_desc++;
262 i++;
263 if (unlikely(!i)) {
264 i -= tx_ring->count;
265 tx_buf = tx_ring->tx_buf;
266 tx_desc = ICE_TX_DESC(tx_ring, 0);
267 }
268
269
270 if (dma_unmap_len(tx_buf, len)) {
271 dma_unmap_page(tx_ring->dev,
272 dma_unmap_addr(tx_buf, dma),
273 dma_unmap_len(tx_buf, len),
274 DMA_TO_DEVICE);
275 dma_unmap_len_set(tx_buf, len, 0);
276 }
277 }
278 ice_trace(clean_tx_irq_unmap_eop, tx_ring, tx_desc, tx_buf);
279
280
281 tx_buf++;
282 tx_desc++;
283 i++;
284 if (unlikely(!i)) {
285 i -= tx_ring->count;
286 tx_buf = tx_ring->tx_buf;
287 tx_desc = ICE_TX_DESC(tx_ring, 0);
288 }
289
290 prefetch(tx_desc);
291
292
293 budget--;
294 } while (likely(budget));
295
296 i += tx_ring->count;
297 tx_ring->next_to_clean = i;
298
299 ice_update_tx_ring_stats(tx_ring, total_pkts, total_bytes);
300
301 if (ice_ring_is_xdp(tx_ring))
302 return !!budget;
303
304 netdev_tx_completed_queue(txring_txq(tx_ring), total_pkts,
305 total_bytes);
306
307#define TX_WAKE_THRESHOLD ((s16)(DESC_NEEDED * 2))
308 if (unlikely(total_pkts && netif_carrier_ok(tx_ring->netdev) &&
309 (ICE_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) {
310
311
312
313 smp_mb();
314 if (__netif_subqueue_stopped(tx_ring->netdev,
315 tx_ring->q_index) &&
316 !test_bit(ICE_VSI_DOWN, vsi->state)) {
317 netif_wake_subqueue(tx_ring->netdev,
318 tx_ring->q_index);
319 ++tx_ring->tx_stats.restart_q;
320 }
321 }
322
323 return !!budget;
324}
325
326
327
328
329
330
331
332int ice_setup_tx_ring(struct ice_ring *tx_ring)
333{
334 struct device *dev = tx_ring->dev;
335
336 if (!dev)
337 return -ENOMEM;
338
339
340 WARN_ON(tx_ring->tx_buf);
341 tx_ring->tx_buf =
342 devm_kzalloc(dev, sizeof(*tx_ring->tx_buf) * tx_ring->count,
343 GFP_KERNEL);
344 if (!tx_ring->tx_buf)
345 return -ENOMEM;
346
347
348 tx_ring->size = ALIGN(tx_ring->count * sizeof(struct ice_tx_desc),
349 PAGE_SIZE);
350 tx_ring->desc = dmam_alloc_coherent(dev, tx_ring->size, &tx_ring->dma,
351 GFP_KERNEL);
352 if (!tx_ring->desc) {
353 dev_err(dev, "Unable to allocate memory for the Tx descriptor ring, size=%d\n",
354 tx_ring->size);
355 goto err;
356 }
357
358 tx_ring->next_to_use = 0;
359 tx_ring->next_to_clean = 0;
360 tx_ring->tx_stats.prev_pkt = -1;
361 return 0;
362
363err:
364 devm_kfree(dev, tx_ring->tx_buf);
365 tx_ring->tx_buf = NULL;
366 return -ENOMEM;
367}
368
369
370
371
372
373void ice_clean_rx_ring(struct ice_ring *rx_ring)
374{
375 struct device *dev = rx_ring->dev;
376 u16 i;
377
378
379 if (!rx_ring->rx_buf)
380 return;
381
382 if (rx_ring->skb) {
383 dev_kfree_skb(rx_ring->skb);
384 rx_ring->skb = NULL;
385 }
386
387 if (rx_ring->xsk_pool) {
388 ice_xsk_clean_rx_ring(rx_ring);
389 goto rx_skip_free;
390 }
391
392
393 for (i = 0; i < rx_ring->count; i++) {
394 struct ice_rx_buf *rx_buf = &rx_ring->rx_buf[i];
395
396 if (!rx_buf->page)
397 continue;
398
399
400
401
402 dma_sync_single_range_for_cpu(dev, rx_buf->dma,
403 rx_buf->page_offset,
404 rx_ring->rx_buf_len,
405 DMA_FROM_DEVICE);
406
407
408 dma_unmap_page_attrs(dev, rx_buf->dma, ice_rx_pg_size(rx_ring),
409 DMA_FROM_DEVICE, ICE_RX_DMA_ATTR);
410 __page_frag_cache_drain(rx_buf->page, rx_buf->pagecnt_bias);
411
412 rx_buf->page = NULL;
413 rx_buf->page_offset = 0;
414 }
415
416rx_skip_free:
417 memset(rx_ring->rx_buf, 0, sizeof(*rx_ring->rx_buf) * rx_ring->count);
418
419
420 memset(rx_ring->desc, 0, rx_ring->size);
421
422 rx_ring->next_to_alloc = 0;
423 rx_ring->next_to_clean = 0;
424 rx_ring->next_to_use = 0;
425}
426
427
428
429
430
431
432
433void ice_free_rx_ring(struct ice_ring *rx_ring)
434{
435 ice_clean_rx_ring(rx_ring);
436 if (rx_ring->vsi->type == ICE_VSI_PF)
437 if (xdp_rxq_info_is_reg(&rx_ring->xdp_rxq))
438 xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
439 rx_ring->xdp_prog = NULL;
440 devm_kfree(rx_ring->dev, rx_ring->rx_buf);
441 rx_ring->rx_buf = NULL;
442
443 if (rx_ring->desc) {
444 dmam_free_coherent(rx_ring->dev, rx_ring->size,
445 rx_ring->desc, rx_ring->dma);
446 rx_ring->desc = NULL;
447 }
448}
449
450
451
452
453
454
455
456int ice_setup_rx_ring(struct ice_ring *rx_ring)
457{
458 struct device *dev = rx_ring->dev;
459
460 if (!dev)
461 return -ENOMEM;
462
463
464 WARN_ON(rx_ring->rx_buf);
465 rx_ring->rx_buf =
466 devm_kzalloc(dev, sizeof(*rx_ring->rx_buf) * rx_ring->count,
467 GFP_KERNEL);
468 if (!rx_ring->rx_buf)
469 return -ENOMEM;
470
471
472 rx_ring->size = ALIGN(rx_ring->count * sizeof(union ice_32byte_rx_desc),
473 PAGE_SIZE);
474 rx_ring->desc = dmam_alloc_coherent(dev, rx_ring->size, &rx_ring->dma,
475 GFP_KERNEL);
476 if (!rx_ring->desc) {
477 dev_err(dev, "Unable to allocate memory for the Rx descriptor ring, size=%d\n",
478 rx_ring->size);
479 goto err;
480 }
481
482 rx_ring->next_to_use = 0;
483 rx_ring->next_to_clean = 0;
484
485 if (ice_is_xdp_ena_vsi(rx_ring->vsi))
486 WRITE_ONCE(rx_ring->xdp_prog, rx_ring->vsi->xdp_prog);
487
488 if (rx_ring->vsi->type == ICE_VSI_PF &&
489 !xdp_rxq_info_is_reg(&rx_ring->xdp_rxq))
490 if (xdp_rxq_info_reg(&rx_ring->xdp_rxq, rx_ring->netdev,
491 rx_ring->q_index, rx_ring->q_vector->napi.napi_id))
492 goto err;
493 return 0;
494
495err:
496 devm_kfree(dev, rx_ring->rx_buf);
497 rx_ring->rx_buf = NULL;
498 return -ENOMEM;
499}
500
501static unsigned int
502ice_rx_frame_truesize(struct ice_ring *rx_ring, unsigned int __maybe_unused size)
503{
504 unsigned int truesize;
505
506#if (PAGE_SIZE < 8192)
507 truesize = ice_rx_pg_size(rx_ring) / 2;
508#else
509 truesize = rx_ring->rx_offset ?
510 SKB_DATA_ALIGN(rx_ring->rx_offset + size) +
511 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) :
512 SKB_DATA_ALIGN(size);
513#endif
514 return truesize;
515}
516
517
518
519
520
521
522
523
524
525static int
526ice_run_xdp(struct ice_ring *rx_ring, struct xdp_buff *xdp,
527 struct bpf_prog *xdp_prog)
528{
529 struct ice_ring *xdp_ring;
530 int err, result;
531 u32 act;
532
533 act = bpf_prog_run_xdp(xdp_prog, xdp);
534 switch (act) {
535 case XDP_PASS:
536 return ICE_XDP_PASS;
537 case XDP_TX:
538 xdp_ring = rx_ring->vsi->xdp_rings[smp_processor_id()];
539 result = ice_xmit_xdp_buff(xdp, xdp_ring);
540 if (result == ICE_XDP_CONSUMED)
541 goto out_failure;
542 return result;
543 case XDP_REDIRECT:
544 err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog);
545 if (err)
546 goto out_failure;
547 return ICE_XDP_REDIR;
548 default:
549 bpf_warn_invalid_xdp_action(act);
550 fallthrough;
551 case XDP_ABORTED:
552out_failure:
553 trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
554 fallthrough;
555 case XDP_DROP:
556 return ICE_XDP_CONSUMED;
557 }
558}
559
560
561
562
563
564
565
566
567
568
569
570
571
572int
573ice_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
574 u32 flags)
575{
576 struct ice_netdev_priv *np = netdev_priv(dev);
577 unsigned int queue_index = smp_processor_id();
578 struct ice_vsi *vsi = np->vsi;
579 struct ice_ring *xdp_ring;
580 int nxmit = 0, i;
581
582 if (test_bit(ICE_VSI_DOWN, vsi->state))
583 return -ENETDOWN;
584
585 if (!ice_is_xdp_ena_vsi(vsi) || queue_index >= vsi->num_xdp_txq)
586 return -ENXIO;
587
588 if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
589 return -EINVAL;
590
591 xdp_ring = vsi->xdp_rings[queue_index];
592 for (i = 0; i < n; i++) {
593 struct xdp_frame *xdpf = frames[i];
594 int err;
595
596 err = ice_xmit_xdp_ring(xdpf->data, xdpf->len, xdp_ring);
597 if (err != ICE_XDP_TX)
598 break;
599 nxmit++;
600 }
601
602 if (unlikely(flags & XDP_XMIT_FLUSH))
603 ice_xdp_ring_update_tail(xdp_ring);
604
605 return nxmit;
606}
607
608
609
610
611
612
613
614
615
616static bool
617ice_alloc_mapped_page(struct ice_ring *rx_ring, struct ice_rx_buf *bi)
618{
619 struct page *page = bi->page;
620 dma_addr_t dma;
621
622
623 if (likely(page))
624 return true;
625
626
627 page = dev_alloc_pages(ice_rx_pg_order(rx_ring));
628 if (unlikely(!page)) {
629 rx_ring->rx_stats.alloc_page_failed++;
630 return false;
631 }
632
633
634 dma = dma_map_page_attrs(rx_ring->dev, page, 0, ice_rx_pg_size(rx_ring),
635 DMA_FROM_DEVICE, ICE_RX_DMA_ATTR);
636
637
638
639
640 if (dma_mapping_error(rx_ring->dev, dma)) {
641 __free_pages(page, ice_rx_pg_order(rx_ring));
642 rx_ring->rx_stats.alloc_page_failed++;
643 return false;
644 }
645
646 bi->dma = dma;
647 bi->page = page;
648 bi->page_offset = rx_ring->rx_offset;
649 page_ref_add(page, USHRT_MAX - 1);
650 bi->pagecnt_bias = USHRT_MAX;
651
652 return true;
653}
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668bool ice_alloc_rx_bufs(struct ice_ring *rx_ring, u16 cleaned_count)
669{
670 union ice_32b_rx_flex_desc *rx_desc;
671 u16 ntu = rx_ring->next_to_use;
672 struct ice_rx_buf *bi;
673
674
675 if ((!rx_ring->netdev && rx_ring->vsi->type != ICE_VSI_CTRL) ||
676 !cleaned_count)
677 return false;
678
679
680 rx_desc = ICE_RX_DESC(rx_ring, ntu);
681 bi = &rx_ring->rx_buf[ntu];
682
683 do {
684
685 if (!ice_alloc_mapped_page(rx_ring, bi))
686 break;
687
688
689 dma_sync_single_range_for_device(rx_ring->dev, bi->dma,
690 bi->page_offset,
691 rx_ring->rx_buf_len,
692 DMA_FROM_DEVICE);
693
694
695
696
697 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset);
698
699 rx_desc++;
700 bi++;
701 ntu++;
702 if (unlikely(ntu == rx_ring->count)) {
703 rx_desc = ICE_RX_DESC(rx_ring, 0);
704 bi = rx_ring->rx_buf;
705 ntu = 0;
706 }
707
708
709 rx_desc->wb.status_error0 = 0;
710
711 cleaned_count--;
712 } while (cleaned_count);
713
714 if (rx_ring->next_to_use != ntu)
715 ice_release_rx_desc(rx_ring, ntu);
716
717 return !!cleaned_count;
718}
719
720
721
722
723
724
725
726
727
728
729
730static void
731ice_rx_buf_adjust_pg_offset(struct ice_rx_buf *rx_buf, unsigned int size)
732{
733#if (PAGE_SIZE < 8192)
734
735 rx_buf->page_offset ^= size;
736#else
737
738 rx_buf->page_offset += size;
739#endif
740}
741
742
743
744
745
746
747
748
749
750
751
752static bool
753ice_can_reuse_rx_page(struct ice_rx_buf *rx_buf, int rx_buf_pgcnt)
754{
755 unsigned int pagecnt_bias = rx_buf->pagecnt_bias;
756 struct page *page = rx_buf->page;
757
758
759 if (!dev_page_is_reusable(page))
760 return false;
761
762#if (PAGE_SIZE < 8192)
763
764 if (unlikely((rx_buf_pgcnt - pagecnt_bias) > 1))
765 return false;
766#else
767#define ICE_LAST_OFFSET \
768 (SKB_WITH_OVERHEAD(PAGE_SIZE) - ICE_RXBUF_2048)
769 if (rx_buf->page_offset > ICE_LAST_OFFSET)
770 return false;
771#endif
772
773
774
775
776
777 if (unlikely(pagecnt_bias == 1)) {
778 page_ref_add(page, USHRT_MAX - 1);
779 rx_buf->pagecnt_bias = USHRT_MAX;
780 }
781
782 return true;
783}
784
785
786
787
788
789
790
791
792
793
794
795
796static void
797ice_add_rx_frag(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf,
798 struct sk_buff *skb, unsigned int size)
799{
800#if (PAGE_SIZE >= 8192)
801 unsigned int truesize = SKB_DATA_ALIGN(size + rx_ring->rx_offset);
802#else
803 unsigned int truesize = ice_rx_pg_size(rx_ring) / 2;
804#endif
805
806 if (!size)
807 return;
808 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buf->page,
809 rx_buf->page_offset, size, truesize);
810
811
812 ice_rx_buf_adjust_pg_offset(rx_buf, truesize);
813}
814
815
816
817
818
819
820
821
822static void
823ice_reuse_rx_page(struct ice_ring *rx_ring, struct ice_rx_buf *old_buf)
824{
825 u16 nta = rx_ring->next_to_alloc;
826 struct ice_rx_buf *new_buf;
827
828 new_buf = &rx_ring->rx_buf[nta];
829
830
831 nta++;
832 rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
833
834
835
836
837
838 new_buf->dma = old_buf->dma;
839 new_buf->page = old_buf->page;
840 new_buf->page_offset = old_buf->page_offset;
841 new_buf->pagecnt_bias = old_buf->pagecnt_bias;
842}
843
844
845
846
847
848
849
850
851
852
853static struct ice_rx_buf *
854ice_get_rx_buf(struct ice_ring *rx_ring, const unsigned int size,
855 int *rx_buf_pgcnt)
856{
857 struct ice_rx_buf *rx_buf;
858
859 rx_buf = &rx_ring->rx_buf[rx_ring->next_to_clean];
860 *rx_buf_pgcnt =
861#if (PAGE_SIZE < 8192)
862 page_count(rx_buf->page);
863#else
864 0;
865#endif
866 prefetchw(rx_buf->page);
867
868 if (!size)
869 return rx_buf;
870
871 dma_sync_single_range_for_cpu(rx_ring->dev, rx_buf->dma,
872 rx_buf->page_offset, size,
873 DMA_FROM_DEVICE);
874
875
876 rx_buf->pagecnt_bias--;
877
878 return rx_buf;
879}
880
881
882
883
884
885
886
887
888
889
890static struct sk_buff *
891ice_build_skb(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf,
892 struct xdp_buff *xdp)
893{
894 u8 metasize = xdp->data - xdp->data_meta;
895#if (PAGE_SIZE < 8192)
896 unsigned int truesize = ice_rx_pg_size(rx_ring) / 2;
897#else
898 unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
899 SKB_DATA_ALIGN(xdp->data_end -
900 xdp->data_hard_start);
901#endif
902 struct sk_buff *skb;
903
904
905
906
907
908
909 net_prefetch(xdp->data_meta);
910
911 skb = build_skb(xdp->data_hard_start, truesize);
912 if (unlikely(!skb))
913 return NULL;
914
915
916
917
918 skb_record_rx_queue(skb, rx_ring->q_index);
919
920
921 skb_reserve(skb, xdp->data - xdp->data_hard_start);
922 __skb_put(skb, xdp->data_end - xdp->data);
923 if (metasize)
924 skb_metadata_set(skb, metasize);
925
926
927 ice_rx_buf_adjust_pg_offset(rx_buf, truesize);
928
929 return skb;
930}
931
932
933
934
935
936
937
938
939
940
941
942static struct sk_buff *
943ice_construct_skb(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf,
944 struct xdp_buff *xdp)
945{
946 unsigned int size = xdp->data_end - xdp->data;
947 unsigned int headlen;
948 struct sk_buff *skb;
949
950
951 net_prefetch(xdp->data);
952
953
954 skb = __napi_alloc_skb(&rx_ring->q_vector->napi, ICE_RX_HDR_SIZE,
955 GFP_ATOMIC | __GFP_NOWARN);
956 if (unlikely(!skb))
957 return NULL;
958
959 skb_record_rx_queue(skb, rx_ring->q_index);
960
961 headlen = size;
962 if (headlen > ICE_RX_HDR_SIZE)
963 headlen = eth_get_headlen(skb->dev, xdp->data, ICE_RX_HDR_SIZE);
964
965
966 memcpy(__skb_put(skb, headlen), xdp->data, ALIGN(headlen,
967 sizeof(long)));
968
969
970 size -= headlen;
971 if (size) {
972#if (PAGE_SIZE >= 8192)
973 unsigned int truesize = SKB_DATA_ALIGN(size);
974#else
975 unsigned int truesize = ice_rx_pg_size(rx_ring) / 2;
976#endif
977 skb_add_rx_frag(skb, 0, rx_buf->page,
978 rx_buf->page_offset + headlen, size, truesize);
979
980 ice_rx_buf_adjust_pg_offset(rx_buf, truesize);
981 } else {
982
983
984
985
986 rx_buf->pagecnt_bias++;
987 }
988
989 return skb;
990}
991
992
993
994
995
996
997
998
999
1000
1001
1002static void
1003ice_put_rx_buf(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf,
1004 int rx_buf_pgcnt)
1005{
1006 u16 ntc = rx_ring->next_to_clean + 1;
1007
1008
1009 ntc = (ntc < rx_ring->count) ? ntc : 0;
1010 rx_ring->next_to_clean = ntc;
1011
1012 if (!rx_buf)
1013 return;
1014
1015 if (ice_can_reuse_rx_page(rx_buf, rx_buf_pgcnt)) {
1016
1017 ice_reuse_rx_page(rx_ring, rx_buf);
1018 } else {
1019
1020 dma_unmap_page_attrs(rx_ring->dev, rx_buf->dma,
1021 ice_rx_pg_size(rx_ring), DMA_FROM_DEVICE,
1022 ICE_RX_DMA_ATTR);
1023 __page_frag_cache_drain(rx_buf->page, rx_buf->pagecnt_bias);
1024 }
1025
1026
1027 rx_buf->page = NULL;
1028}
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038static bool
1039ice_is_non_eop(struct ice_ring *rx_ring, union ice_32b_rx_flex_desc *rx_desc)
1040{
1041
1042#define ICE_RXD_EOF BIT(ICE_RX_FLEX_DESC_STATUS0_EOF_S)
1043 if (likely(ice_test_staterr(rx_desc, ICE_RXD_EOF)))
1044 return false;
1045
1046 rx_ring->rx_stats.non_eop_descs++;
1047
1048 return true;
1049}
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget)
1064{
1065 unsigned int total_rx_bytes = 0, total_rx_pkts = 0, frame_sz = 0;
1066 u16 cleaned_count = ICE_DESC_UNUSED(rx_ring);
1067 unsigned int offset = rx_ring->rx_offset;
1068 unsigned int xdp_res, xdp_xmit = 0;
1069 struct sk_buff *skb = rx_ring->skb;
1070 struct bpf_prog *xdp_prog = NULL;
1071 struct xdp_buff xdp;
1072 bool failure;
1073
1074
1075#if (PAGE_SIZE < 8192)
1076 frame_sz = ice_rx_frame_truesize(rx_ring, 0);
1077#endif
1078 xdp_init_buff(&xdp, frame_sz, &rx_ring->xdp_rxq);
1079
1080
1081 while (likely(total_rx_pkts < (unsigned int)budget)) {
1082 union ice_32b_rx_flex_desc *rx_desc;
1083 struct ice_rx_buf *rx_buf;
1084 unsigned char *hard_start;
1085 unsigned int size;
1086 u16 stat_err_bits;
1087 int rx_buf_pgcnt;
1088 u16 vlan_tag = 0;
1089 u16 rx_ptype;
1090
1091
1092 rx_desc = ICE_RX_DESC(rx_ring, rx_ring->next_to_clean);
1093
1094
1095
1096
1097
1098
1099 stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_DD_S);
1100 if (!ice_test_staterr(rx_desc, stat_err_bits))
1101 break;
1102
1103
1104
1105
1106
1107 dma_rmb();
1108
1109 ice_trace(clean_rx_irq, rx_ring, rx_desc);
1110 if (rx_desc->wb.rxdid == FDIR_DESC_RXDID || !rx_ring->netdev) {
1111 struct ice_vsi *ctrl_vsi = rx_ring->vsi;
1112
1113 if (rx_desc->wb.rxdid == FDIR_DESC_RXDID &&
1114 ctrl_vsi->vf_id != ICE_INVAL_VFID)
1115 ice_vc_fdir_irq_handler(ctrl_vsi, rx_desc);
1116 ice_put_rx_buf(rx_ring, NULL, 0);
1117 cleaned_count++;
1118 continue;
1119 }
1120
1121 size = le16_to_cpu(rx_desc->wb.pkt_len) &
1122 ICE_RX_FLX_DESC_PKT_LEN_M;
1123
1124
1125 rx_buf = ice_get_rx_buf(rx_ring, size, &rx_buf_pgcnt);
1126
1127 if (!size) {
1128 xdp.data = NULL;
1129 xdp.data_end = NULL;
1130 xdp.data_hard_start = NULL;
1131 xdp.data_meta = NULL;
1132 goto construct_skb;
1133 }
1134
1135 hard_start = page_address(rx_buf->page) + rx_buf->page_offset -
1136 offset;
1137 xdp_prepare_buff(&xdp, hard_start, offset, size, true);
1138#if (PAGE_SIZE > 4096)
1139
1140 xdp.frame_sz = ice_rx_frame_truesize(rx_ring, size);
1141#endif
1142
1143 xdp_prog = READ_ONCE(rx_ring->xdp_prog);
1144 if (!xdp_prog)
1145 goto construct_skb;
1146
1147 xdp_res = ice_run_xdp(rx_ring, &xdp, xdp_prog);
1148 if (!xdp_res)
1149 goto construct_skb;
1150 if (xdp_res & (ICE_XDP_TX | ICE_XDP_REDIR)) {
1151 xdp_xmit |= xdp_res;
1152 ice_rx_buf_adjust_pg_offset(rx_buf, xdp.frame_sz);
1153 } else {
1154 rx_buf->pagecnt_bias++;
1155 }
1156 total_rx_bytes += size;
1157 total_rx_pkts++;
1158
1159 cleaned_count++;
1160 ice_put_rx_buf(rx_ring, rx_buf, rx_buf_pgcnt);
1161 continue;
1162construct_skb:
1163 if (skb) {
1164 ice_add_rx_frag(rx_ring, rx_buf, skb, size);
1165 } else if (likely(xdp.data)) {
1166 if (ice_ring_uses_build_skb(rx_ring))
1167 skb = ice_build_skb(rx_ring, rx_buf, &xdp);
1168 else
1169 skb = ice_construct_skb(rx_ring, rx_buf, &xdp);
1170 }
1171
1172 if (!skb) {
1173 rx_ring->rx_stats.alloc_buf_failed++;
1174 if (rx_buf)
1175 rx_buf->pagecnt_bias++;
1176 break;
1177 }
1178
1179 ice_put_rx_buf(rx_ring, rx_buf, rx_buf_pgcnt);
1180 cleaned_count++;
1181
1182
1183 if (ice_is_non_eop(rx_ring, rx_desc))
1184 continue;
1185
1186 stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_RXE_S);
1187 if (unlikely(ice_test_staterr(rx_desc, stat_err_bits))) {
1188 dev_kfree_skb_any(skb);
1189 continue;
1190 }
1191
1192 stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_L2TAG1P_S);
1193 if (ice_test_staterr(rx_desc, stat_err_bits))
1194 vlan_tag = le16_to_cpu(rx_desc->wb.l2tag1);
1195
1196
1197 if (eth_skb_pad(skb)) {
1198 skb = NULL;
1199 continue;
1200 }
1201
1202
1203 total_rx_bytes += skb->len;
1204
1205
1206 rx_ptype = le16_to_cpu(rx_desc->wb.ptype_flex_flags0) &
1207 ICE_RX_FLEX_DESC_PTYPE_M;
1208
1209 ice_process_skb_fields(rx_ring, rx_desc, skb, rx_ptype);
1210
1211 ice_trace(clean_rx_irq_indicate, rx_ring, rx_desc, skb);
1212
1213 ice_receive_skb(rx_ring, skb, vlan_tag);
1214 skb = NULL;
1215
1216
1217 total_rx_pkts++;
1218 }
1219
1220
1221 failure = ice_alloc_rx_bufs(rx_ring, cleaned_count);
1222
1223 if (xdp_prog)
1224 ice_finalize_xdp_rx(rx_ring, xdp_xmit);
1225 rx_ring->skb = skb;
1226
1227 ice_update_rx_ring_stats(rx_ring, total_rx_pkts, total_rx_bytes);
1228
1229
1230 return failure ? budget : (int)total_rx_pkts;
1231}
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242static void ice_net_dim(struct ice_q_vector *q_vector)
1243{
1244 struct ice_ring_container *tx = &q_vector->tx;
1245 struct ice_ring_container *rx = &q_vector->rx;
1246
1247 if (ITR_IS_DYNAMIC(tx)) {
1248 struct dim_sample dim_sample = {};
1249 u64 packets = 0, bytes = 0;
1250 struct ice_ring *ring;
1251
1252 ice_for_each_ring(ring, q_vector->tx) {
1253 packets += ring->stats.pkts;
1254 bytes += ring->stats.bytes;
1255 }
1256
1257 dim_update_sample(q_vector->total_events, packets, bytes,
1258 &dim_sample);
1259
1260 net_dim(&tx->dim, dim_sample);
1261 }
1262
1263 if (ITR_IS_DYNAMIC(rx)) {
1264 struct dim_sample dim_sample = {};
1265 u64 packets = 0, bytes = 0;
1266 struct ice_ring *ring;
1267
1268 ice_for_each_ring(ring, q_vector->rx) {
1269 packets += ring->stats.pkts;
1270 bytes += ring->stats.bytes;
1271 }
1272
1273 dim_update_sample(q_vector->total_events, packets, bytes,
1274 &dim_sample);
1275
1276 net_dim(&rx->dim, dim_sample);
1277 }
1278}
1279
1280
1281
1282
1283
1284
1285static u32 ice_buildreg_itr(u16 itr_idx, u16 itr)
1286{
1287
1288
1289
1290
1291
1292
1293
1294 itr &= ICE_ITR_MASK;
1295
1296 return GLINT_DYN_CTL_INTENA_M | GLINT_DYN_CTL_CLEARPBA_M |
1297 (itr_idx << GLINT_DYN_CTL_ITR_INDX_S) |
1298 (itr << (GLINT_DYN_CTL_INTERVAL_S - ICE_ITR_GRAN_S));
1299}
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310static void ice_update_ena_itr(struct ice_q_vector *q_vector)
1311{
1312 struct ice_vsi *vsi = q_vector->vsi;
1313 bool wb_en = q_vector->wb_on_itr;
1314 u32 itr_val;
1315
1316 if (test_bit(ICE_DOWN, vsi->state))
1317 return;
1318
1319
1320
1321
1322 if (wb_en)
1323 q_vector->wb_on_itr = false;
1324
1325
1326 ice_net_dim(q_vector);
1327
1328
1329 itr_val = ice_buildreg_itr(ICE_ITR_NONE, 0);
1330
1331
1332
1333
1334
1335 if (wb_en) {
1336 itr_val |= GLINT_DYN_CTL_SWINT_TRIG_M |
1337 GLINT_DYN_CTL_SW_ITR_INDX_M |
1338 GLINT_DYN_CTL_SW_ITR_INDX_ENA_M;
1339 }
1340 wr32(&vsi->back->hw, GLINT_DYN_CTL(q_vector->reg_idx), itr_val);
1341}
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357static void ice_set_wb_on_itr(struct ice_q_vector *q_vector)
1358{
1359 struct ice_vsi *vsi = q_vector->vsi;
1360
1361
1362 if (q_vector->wb_on_itr)
1363 return;
1364
1365
1366
1367
1368
1369 wr32(&vsi->back->hw, GLINT_DYN_CTL(q_vector->reg_idx),
1370 ((ICE_ITR_NONE << GLINT_DYN_CTL_ITR_INDX_S) &
1371 GLINT_DYN_CTL_ITR_INDX_M) | GLINT_DYN_CTL_INTENA_MSK_M |
1372 GLINT_DYN_CTL_WB_ON_ITR_M);
1373
1374 q_vector->wb_on_itr = true;
1375}
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386int ice_napi_poll(struct napi_struct *napi, int budget)
1387{
1388 struct ice_q_vector *q_vector =
1389 container_of(napi, struct ice_q_vector, napi);
1390 bool clean_complete = true;
1391 struct ice_ring *ring;
1392 int budget_per_ring;
1393 int work_done = 0;
1394
1395
1396
1397
1398 ice_for_each_ring(ring, q_vector->tx) {
1399 bool wd = ring->xsk_pool ?
1400 ice_clean_tx_irq_zc(ring, budget) :
1401 ice_clean_tx_irq(ring, budget);
1402
1403 if (!wd)
1404 clean_complete = false;
1405 }
1406
1407
1408 if (unlikely(budget <= 0))
1409 return budget;
1410
1411
1412 if (unlikely(q_vector->num_ring_rx > 1))
1413
1414
1415
1416
1417 budget_per_ring = max_t(int, budget / q_vector->num_ring_rx, 1);
1418 else
1419
1420 budget_per_ring = budget;
1421
1422 ice_for_each_ring(ring, q_vector->rx) {
1423 int cleaned;
1424
1425
1426
1427
1428
1429 cleaned = ring->xsk_pool ?
1430 ice_clean_rx_irq_zc(ring, budget_per_ring) :
1431 ice_clean_rx_irq(ring, budget_per_ring);
1432 work_done += cleaned;
1433
1434 if (cleaned >= budget_per_ring)
1435 clean_complete = false;
1436 }
1437
1438
1439 if (!clean_complete) {
1440
1441
1442
1443 ice_set_wb_on_itr(q_vector);
1444 return budget;
1445 }
1446
1447
1448
1449
1450 if (likely(napi_complete_done(napi, work_done)))
1451 ice_update_ena_itr(q_vector);
1452 else
1453 ice_set_wb_on_itr(q_vector);
1454
1455 return min_t(int, work_done, budget - 1);
1456}
1457
1458
1459
1460
1461
1462
1463
1464
1465static int __ice_maybe_stop_tx(struct ice_ring *tx_ring, unsigned int size)
1466{
1467 netif_stop_subqueue(tx_ring->netdev, tx_ring->q_index);
1468
1469 smp_mb();
1470
1471
1472 if (likely(ICE_DESC_UNUSED(tx_ring) < size))
1473 return -EBUSY;
1474
1475
1476 netif_start_subqueue(tx_ring->netdev, tx_ring->q_index);
1477 ++tx_ring->tx_stats.restart_q;
1478 return 0;
1479}
1480
1481
1482
1483
1484
1485
1486
1487
1488static int ice_maybe_stop_tx(struct ice_ring *tx_ring, unsigned int size)
1489{
1490 if (likely(ICE_DESC_UNUSED(tx_ring) >= size))
1491 return 0;
1492
1493 return __ice_maybe_stop_tx(tx_ring, size);
1494}
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506static void
1507ice_tx_map(struct ice_ring *tx_ring, struct ice_tx_buf *first,
1508 struct ice_tx_offload_params *off)
1509{
1510 u64 td_offset, td_tag, td_cmd;
1511 u16 i = tx_ring->next_to_use;
1512 unsigned int data_len, size;
1513 struct ice_tx_desc *tx_desc;
1514 struct ice_tx_buf *tx_buf;
1515 struct sk_buff *skb;
1516 skb_frag_t *frag;
1517 dma_addr_t dma;
1518
1519 td_tag = off->td_l2tag1;
1520 td_cmd = off->td_cmd;
1521 td_offset = off->td_offset;
1522 skb = first->skb;
1523
1524 data_len = skb->data_len;
1525 size = skb_headlen(skb);
1526
1527 tx_desc = ICE_TX_DESC(tx_ring, i);
1528
1529 if (first->tx_flags & ICE_TX_FLAGS_HW_VLAN) {
1530 td_cmd |= (u64)ICE_TX_DESC_CMD_IL2TAG1;
1531 td_tag = (first->tx_flags & ICE_TX_FLAGS_VLAN_M) >>
1532 ICE_TX_FLAGS_VLAN_S;
1533 }
1534
1535 dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
1536
1537 tx_buf = first;
1538
1539 for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
1540 unsigned int max_data = ICE_MAX_DATA_PER_TXD_ALIGNED;
1541
1542 if (dma_mapping_error(tx_ring->dev, dma))
1543 goto dma_error;
1544
1545
1546 dma_unmap_len_set(tx_buf, len, size);
1547 dma_unmap_addr_set(tx_buf, dma, dma);
1548
1549
1550 max_data += -dma & (ICE_MAX_READ_REQ_SIZE - 1);
1551 tx_desc->buf_addr = cpu_to_le64(dma);
1552
1553
1554
1555
1556 while (unlikely(size > ICE_MAX_DATA_PER_TXD)) {
1557 tx_desc->cmd_type_offset_bsz =
1558 ice_build_ctob(td_cmd, td_offset, max_data,
1559 td_tag);
1560
1561 tx_desc++;
1562 i++;
1563
1564 if (i == tx_ring->count) {
1565 tx_desc = ICE_TX_DESC(tx_ring, 0);
1566 i = 0;
1567 }
1568
1569 dma += max_data;
1570 size -= max_data;
1571
1572 max_data = ICE_MAX_DATA_PER_TXD_ALIGNED;
1573 tx_desc->buf_addr = cpu_to_le64(dma);
1574 }
1575
1576 if (likely(!data_len))
1577 break;
1578
1579 tx_desc->cmd_type_offset_bsz = ice_build_ctob(td_cmd, td_offset,
1580 size, td_tag);
1581
1582 tx_desc++;
1583 i++;
1584
1585 if (i == tx_ring->count) {
1586 tx_desc = ICE_TX_DESC(tx_ring, 0);
1587 i = 0;
1588 }
1589
1590 size = skb_frag_size(frag);
1591 data_len -= size;
1592
1593 dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size,
1594 DMA_TO_DEVICE);
1595
1596 tx_buf = &tx_ring->tx_buf[i];
1597 }
1598
1599
1600 netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount);
1601
1602
1603 skb_tx_timestamp(first->skb);
1604
1605 i++;
1606 if (i == tx_ring->count)
1607 i = 0;
1608
1609
1610 td_cmd |= (u64)ICE_TXD_LAST_DESC_CMD;
1611 tx_desc->cmd_type_offset_bsz =
1612 ice_build_ctob(td_cmd, td_offset, size, td_tag);
1613
1614
1615
1616
1617
1618
1619
1620 wmb();
1621
1622
1623 first->next_to_watch = tx_desc;
1624
1625 tx_ring->next_to_use = i;
1626
1627 ice_maybe_stop_tx(tx_ring, DESC_NEEDED);
1628
1629
1630 if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more())
1631 writel(i, tx_ring->tail);
1632
1633 return;
1634
1635dma_error:
1636
1637 for (;;) {
1638 tx_buf = &tx_ring->tx_buf[i];
1639 ice_unmap_and_free_tx_buf(tx_ring, tx_buf);
1640 if (tx_buf == first)
1641 break;
1642 if (i == 0)
1643 i = tx_ring->count;
1644 i--;
1645 }
1646
1647 tx_ring->next_to_use = i;
1648}
1649
1650
1651
1652
1653
1654
1655
1656
1657static
1658int ice_tx_csum(struct ice_tx_buf *first, struct ice_tx_offload_params *off)
1659{
1660 u32 l4_len = 0, l3_len = 0, l2_len = 0;
1661 struct sk_buff *skb = first->skb;
1662 union {
1663 struct iphdr *v4;
1664 struct ipv6hdr *v6;
1665 unsigned char *hdr;
1666 } ip;
1667 union {
1668 struct tcphdr *tcp;
1669 unsigned char *hdr;
1670 } l4;
1671 __be16 frag_off, protocol;
1672 unsigned char *exthdr;
1673 u32 offset, cmd = 0;
1674 u8 l4_proto = 0;
1675
1676 if (skb->ip_summed != CHECKSUM_PARTIAL)
1677 return 0;
1678
1679 ip.hdr = skb_network_header(skb);
1680 l4.hdr = skb_transport_header(skb);
1681
1682
1683 l2_len = ip.hdr - skb->data;
1684 offset = (l2_len / 2) << ICE_TX_DESC_LEN_MACLEN_S;
1685
1686 protocol = vlan_get_protocol(skb);
1687
1688 if (protocol == htons(ETH_P_IP))
1689 first->tx_flags |= ICE_TX_FLAGS_IPV4;
1690 else if (protocol == htons(ETH_P_IPV6))
1691 first->tx_flags |= ICE_TX_FLAGS_IPV6;
1692
1693 if (skb->encapsulation) {
1694 bool gso_ena = false;
1695 u32 tunnel = 0;
1696
1697
1698 if (first->tx_flags & ICE_TX_FLAGS_IPV4) {
1699 tunnel |= (first->tx_flags & ICE_TX_FLAGS_TSO) ?
1700 ICE_TX_CTX_EIPT_IPV4 :
1701 ICE_TX_CTX_EIPT_IPV4_NO_CSUM;
1702 l4_proto = ip.v4->protocol;
1703 } else if (first->tx_flags & ICE_TX_FLAGS_IPV6) {
1704 int ret;
1705
1706 tunnel |= ICE_TX_CTX_EIPT_IPV6;
1707 exthdr = ip.hdr + sizeof(*ip.v6);
1708 l4_proto = ip.v6->nexthdr;
1709 ret = ipv6_skip_exthdr(skb, exthdr - skb->data,
1710 &l4_proto, &frag_off);
1711 if (ret < 0)
1712 return -1;
1713 }
1714
1715
1716 switch (l4_proto) {
1717 case IPPROTO_UDP:
1718 tunnel |= ICE_TXD_CTX_UDP_TUNNELING;
1719 first->tx_flags |= ICE_TX_FLAGS_TUNNEL;
1720 break;
1721 case IPPROTO_GRE:
1722 tunnel |= ICE_TXD_CTX_GRE_TUNNELING;
1723 first->tx_flags |= ICE_TX_FLAGS_TUNNEL;
1724 break;
1725 case IPPROTO_IPIP:
1726 case IPPROTO_IPV6:
1727 first->tx_flags |= ICE_TX_FLAGS_TUNNEL;
1728 l4.hdr = skb_inner_network_header(skb);
1729 break;
1730 default:
1731 if (first->tx_flags & ICE_TX_FLAGS_TSO)
1732 return -1;
1733
1734 skb_checksum_help(skb);
1735 return 0;
1736 }
1737
1738
1739 tunnel |= ((l4.hdr - ip.hdr) / 4) <<
1740 ICE_TXD_CTX_QW0_EIPLEN_S;
1741
1742
1743 ip.hdr = skb_inner_network_header(skb);
1744
1745
1746 tunnel |= ((ip.hdr - l4.hdr) / 2) <<
1747 ICE_TXD_CTX_QW0_NATLEN_S;
1748
1749 gso_ena = skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL;
1750
1751 if ((first->tx_flags & ICE_TX_FLAGS_TSO) && !gso_ena &&
1752 (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM))
1753 tunnel |= ICE_TXD_CTX_QW0_L4T_CS_M;
1754
1755
1756 off->cd_tunnel_params |= tunnel;
1757
1758
1759
1760
1761 off->cd_qw1 |= (u64)ICE_TX_DESC_DTYPE_CTX;
1762
1763
1764 l4.hdr = skb_inner_transport_header(skb);
1765 l4_proto = 0;
1766
1767
1768 first->tx_flags &= ~(ICE_TX_FLAGS_IPV4 | ICE_TX_FLAGS_IPV6);
1769 if (ip.v4->version == 4)
1770 first->tx_flags |= ICE_TX_FLAGS_IPV4;
1771 if (ip.v6->version == 6)
1772 first->tx_flags |= ICE_TX_FLAGS_IPV6;
1773 }
1774
1775
1776 if (first->tx_flags & ICE_TX_FLAGS_IPV4) {
1777 l4_proto = ip.v4->protocol;
1778
1779
1780
1781 if (first->tx_flags & ICE_TX_FLAGS_TSO)
1782 cmd |= ICE_TX_DESC_CMD_IIPT_IPV4_CSUM;
1783 else
1784 cmd |= ICE_TX_DESC_CMD_IIPT_IPV4;
1785
1786 } else if (first->tx_flags & ICE_TX_FLAGS_IPV6) {
1787 cmd |= ICE_TX_DESC_CMD_IIPT_IPV6;
1788 exthdr = ip.hdr + sizeof(*ip.v6);
1789 l4_proto = ip.v6->nexthdr;
1790 if (l4.hdr != exthdr)
1791 ipv6_skip_exthdr(skb, exthdr - skb->data, &l4_proto,
1792 &frag_off);
1793 } else {
1794 return -1;
1795 }
1796
1797
1798 l3_len = l4.hdr - ip.hdr;
1799 offset |= (l3_len / 4) << ICE_TX_DESC_LEN_IPLEN_S;
1800
1801
1802 switch (l4_proto) {
1803 case IPPROTO_TCP:
1804
1805 cmd |= ICE_TX_DESC_CMD_L4T_EOFT_TCP;
1806 l4_len = l4.tcp->doff;
1807 offset |= l4_len << ICE_TX_DESC_LEN_L4_LEN_S;
1808 break;
1809 case IPPROTO_UDP:
1810
1811 cmd |= ICE_TX_DESC_CMD_L4T_EOFT_UDP;
1812 l4_len = (sizeof(struct udphdr) >> 2);
1813 offset |= l4_len << ICE_TX_DESC_LEN_L4_LEN_S;
1814 break;
1815 case IPPROTO_SCTP:
1816
1817 cmd |= ICE_TX_DESC_CMD_L4T_EOFT_SCTP;
1818 l4_len = sizeof(struct sctphdr) >> 2;
1819 offset |= l4_len << ICE_TX_DESC_LEN_L4_LEN_S;
1820 break;
1821
1822 default:
1823 if (first->tx_flags & ICE_TX_FLAGS_TSO)
1824 return -1;
1825 skb_checksum_help(skb);
1826 return 0;
1827 }
1828
1829 off->td_cmd |= cmd;
1830 off->td_offset |= offset;
1831 return 1;
1832}
1833
1834
1835
1836
1837
1838
1839
1840
1841
1842static void
1843ice_tx_prepare_vlan_flags(struct ice_ring *tx_ring, struct ice_tx_buf *first)
1844{
1845 struct sk_buff *skb = first->skb;
1846
1847
1848 if (!skb_vlan_tag_present(skb) && eth_type_vlan(skb->protocol))
1849 return;
1850
1851
1852
1853
1854 if (skb_vlan_tag_present(skb)) {
1855 first->tx_flags |= skb_vlan_tag_get(skb) << ICE_TX_FLAGS_VLAN_S;
1856 first->tx_flags |= ICE_TX_FLAGS_HW_VLAN;
1857 }
1858
1859 ice_tx_prepare_vlan_flags_dcb(tx_ring, first);
1860}
1861
1862
1863
1864
1865
1866
1867
1868
1869static
1870int ice_tso(struct ice_tx_buf *first, struct ice_tx_offload_params *off)
1871{
1872 struct sk_buff *skb = first->skb;
1873 union {
1874 struct iphdr *v4;
1875 struct ipv6hdr *v6;
1876 unsigned char *hdr;
1877 } ip;
1878 union {
1879 struct tcphdr *tcp;
1880 struct udphdr *udp;
1881 unsigned char *hdr;
1882 } l4;
1883 u64 cd_mss, cd_tso_len;
1884 u32 paylen;
1885 u8 l4_start;
1886 int err;
1887
1888 if (skb->ip_summed != CHECKSUM_PARTIAL)
1889 return 0;
1890
1891 if (!skb_is_gso(skb))
1892 return 0;
1893
1894 err = skb_cow_head(skb, 0);
1895 if (err < 0)
1896 return err;
1897
1898
1899 ip.hdr = skb_network_header(skb);
1900 l4.hdr = skb_transport_header(skb);
1901
1902
1903 if (ip.v4->version == 4) {
1904 ip.v4->tot_len = 0;
1905 ip.v4->check = 0;
1906 } else {
1907 ip.v6->payload_len = 0;
1908 }
1909
1910 if (skb_shinfo(skb)->gso_type & (SKB_GSO_GRE |
1911 SKB_GSO_GRE_CSUM |
1912 SKB_GSO_IPXIP4 |
1913 SKB_GSO_IPXIP6 |
1914 SKB_GSO_UDP_TUNNEL |
1915 SKB_GSO_UDP_TUNNEL_CSUM)) {
1916 if (!(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) &&
1917 (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM)) {
1918 l4.udp->len = 0;
1919
1920
1921 l4_start = (u8)(l4.hdr - skb->data);
1922
1923
1924 paylen = skb->len - l4_start;
1925 csum_replace_by_diff(&l4.udp->check,
1926 (__force __wsum)htonl(paylen));
1927 }
1928
1929
1930
1931
1932 ip.hdr = skb_inner_network_header(skb);
1933 l4.hdr = skb_inner_transport_header(skb);
1934
1935
1936 if (ip.v4->version == 4) {
1937 ip.v4->tot_len = 0;
1938 ip.v4->check = 0;
1939 } else {
1940 ip.v6->payload_len = 0;
1941 }
1942 }
1943
1944
1945 l4_start = (u8)(l4.hdr - skb->data);
1946
1947
1948 paylen = skb->len - l4_start;
1949
1950 if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
1951 csum_replace_by_diff(&l4.udp->check,
1952 (__force __wsum)htonl(paylen));
1953
1954 off->header_len = (u8)sizeof(l4.udp) + l4_start;
1955 } else {
1956 csum_replace_by_diff(&l4.tcp->check,
1957 (__force __wsum)htonl(paylen));
1958
1959 off->header_len = (u8)((l4.tcp->doff * 4) + l4_start);
1960 }
1961
1962
1963 first->gso_segs = skb_shinfo(skb)->gso_segs;
1964 first->bytecount += (first->gso_segs - 1) * off->header_len;
1965
1966 cd_tso_len = skb->len - off->header_len;
1967 cd_mss = skb_shinfo(skb)->gso_size;
1968
1969
1970 off->cd_qw1 |= (u64)(ICE_TX_DESC_DTYPE_CTX |
1971 (ICE_TX_CTX_DESC_TSO << ICE_TXD_CTX_QW1_CMD_S) |
1972 (cd_tso_len << ICE_TXD_CTX_QW1_TSO_LEN_S) |
1973 (cd_mss << ICE_TXD_CTX_QW1_MSS_S));
1974 first->tx_flags |= ICE_TX_FLAGS_TSO;
1975 return 1;
1976}
1977
1978
1979
1980
1981
1982
1983
1984
1985
1986
1987
1988
1989
1990
1991
1992
1993
1994
1995
1996
1997
1998
1999
2000
2001
2002
2003
2004
2005
2006static unsigned int ice_txd_use_count(unsigned int size)
2007{
2008 return ((size * 85) >> 20) + ICE_DESCS_FOR_SKB_DATA_PTR;
2009}
2010
2011
2012
2013
2014
2015
2016
2017static unsigned int ice_xmit_desc_count(struct sk_buff *skb)
2018{
2019 const skb_frag_t *frag = &skb_shinfo(skb)->frags[0];
2020 unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
2021 unsigned int count = 0, size = skb_headlen(skb);
2022
2023 for (;;) {
2024 count += ice_txd_use_count(size);
2025
2026 if (!nr_frags--)
2027 break;
2028
2029 size = skb_frag_size(frag++);
2030 }
2031
2032 return count;
2033}
2034
2035
2036
2037
2038
2039
2040
2041
2042
2043
2044
2045
2046
2047
2048static bool __ice_chk_linearize(struct sk_buff *skb)
2049{
2050 const skb_frag_t *frag, *stale;
2051 int nr_frags, sum;
2052
2053
2054 nr_frags = skb_shinfo(skb)->nr_frags;
2055 if (nr_frags < (ICE_MAX_BUF_TXD - 1))
2056 return false;
2057
2058
2059
2060
2061 nr_frags -= ICE_MAX_BUF_TXD - 2;
2062 frag = &skb_shinfo(skb)->frags[0];
2063
2064
2065
2066
2067
2068
2069
2070 sum = 1 - skb_shinfo(skb)->gso_size;
2071
2072
2073 sum += skb_frag_size(frag++);
2074 sum += skb_frag_size(frag++);
2075 sum += skb_frag_size(frag++);
2076 sum += skb_frag_size(frag++);
2077 sum += skb_frag_size(frag++);
2078
2079
2080
2081
2082 for (stale = &skb_shinfo(skb)->frags[0];; stale++) {
2083 int stale_size = skb_frag_size(stale);
2084
2085 sum += skb_frag_size(frag++);
2086
2087
2088
2089
2090
2091
2092
2093 if (stale_size > ICE_MAX_DATA_PER_TXD) {
2094 int align_pad = -(skb_frag_off(stale)) &
2095 (ICE_MAX_READ_REQ_SIZE - 1);
2096
2097 sum -= align_pad;
2098 stale_size -= align_pad;
2099
2100 do {
2101 sum -= ICE_MAX_DATA_PER_TXD_ALIGNED;
2102 stale_size -= ICE_MAX_DATA_PER_TXD_ALIGNED;
2103 } while (stale_size > ICE_MAX_DATA_PER_TXD);
2104 }
2105
2106
2107 if (sum < 0)
2108 return true;
2109
2110 if (!nr_frags--)
2111 break;
2112
2113 sum -= stale_size;
2114 }
2115
2116 return false;
2117}
2118
2119
2120
2121
2122
2123
2124
2125
2126
2127
2128static bool ice_chk_linearize(struct sk_buff *skb, unsigned int count)
2129{
2130
2131 if (likely(count < ICE_MAX_BUF_TXD))
2132 return false;
2133
2134 if (skb_is_gso(skb))
2135 return __ice_chk_linearize(skb);
2136
2137
2138 return count != ICE_MAX_BUF_TXD;
2139}
2140
2141
2142
2143
2144
2145
2146
2147
2148static void
2149ice_tstamp(struct ice_ring *tx_ring, struct sk_buff *skb,
2150 struct ice_tx_buf *first, struct ice_tx_offload_params *off)
2151{
2152 s8 idx;
2153
2154
2155 if (likely(!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)))
2156 return;
2157
2158 if (!tx_ring->ptp_tx)
2159 return;
2160
2161
2162 if (first->tx_flags & ICE_TX_FLAGS_TSO)
2163 return;
2164
2165
2166 idx = ice_ptp_request_ts(tx_ring->tx_tstamps, skb);
2167 if (idx < 0)
2168 return;
2169
2170 off->cd_qw1 |= (u64)(ICE_TX_DESC_DTYPE_CTX |
2171 (ICE_TX_CTX_DESC_TSYN << ICE_TXD_CTX_QW1_CMD_S) |
2172 ((u64)idx << ICE_TXD_CTX_QW1_TSO_LEN_S));
2173 first->tx_flags |= ICE_TX_FLAGS_TSYN;
2174}
2175
2176
2177
2178
2179
2180
2181
2182
2183static netdev_tx_t
2184ice_xmit_frame_ring(struct sk_buff *skb, struct ice_ring *tx_ring)
2185{
2186 struct ice_tx_offload_params offload = { 0 };
2187 struct ice_vsi *vsi = tx_ring->vsi;
2188 struct ice_tx_buf *first;
2189 struct ethhdr *eth;
2190 unsigned int count;
2191 int tso, csum;
2192
2193 ice_trace(xmit_frame_ring, tx_ring, skb);
2194
2195 count = ice_xmit_desc_count(skb);
2196 if (ice_chk_linearize(skb, count)) {
2197 if (__skb_linearize(skb))
2198 goto out_drop;
2199 count = ice_txd_use_count(skb->len);
2200 tx_ring->tx_stats.tx_linearize++;
2201 }
2202
2203
2204
2205
2206
2207
2208
2209 if (ice_maybe_stop_tx(tx_ring, count + ICE_DESCS_PER_CACHE_LINE +
2210 ICE_DESCS_FOR_CTX_DESC)) {
2211 tx_ring->tx_stats.tx_busy++;
2212 return NETDEV_TX_BUSY;
2213 }
2214
2215 offload.tx_ring = tx_ring;
2216
2217
2218 first = &tx_ring->tx_buf[tx_ring->next_to_use];
2219 first->skb = skb;
2220 first->bytecount = max_t(unsigned int, skb->len, ETH_ZLEN);
2221 first->gso_segs = 1;
2222 first->tx_flags = 0;
2223
2224
2225 ice_tx_prepare_vlan_flags(tx_ring, first);
2226
2227
2228 tso = ice_tso(first, &offload);
2229 if (tso < 0)
2230 goto out_drop;
2231
2232
2233 csum = ice_tx_csum(first, &offload);
2234 if (csum < 0)
2235 goto out_drop;
2236
2237
2238 eth = (struct ethhdr *)skb_mac_header(skb);
2239 if (unlikely((skb->priority == TC_PRIO_CONTROL ||
2240 eth->h_proto == htons(ETH_P_LLDP)) &&
2241 vsi->type == ICE_VSI_PF &&
2242 vsi->port_info->qos_cfg.is_sw_lldp))
2243 offload.cd_qw1 |= (u64)(ICE_TX_DESC_DTYPE_CTX |
2244 ICE_TX_CTX_DESC_SWTCH_UPLINK <<
2245 ICE_TXD_CTX_QW1_CMD_S);
2246
2247 ice_tstamp(tx_ring, skb, first, &offload);
2248
2249 if (offload.cd_qw1 & ICE_TX_DESC_DTYPE_CTX) {
2250 struct ice_tx_ctx_desc *cdesc;
2251 u16 i = tx_ring->next_to_use;
2252
2253
2254 cdesc = ICE_TX_CTX_DESC(tx_ring, i);
2255 i++;
2256 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
2257
2258
2259 cdesc->tunneling_params = cpu_to_le32(offload.cd_tunnel_params);
2260 cdesc->l2tag2 = cpu_to_le16(offload.cd_l2tag2);
2261 cdesc->rsvd = cpu_to_le16(0);
2262 cdesc->qw1 = cpu_to_le64(offload.cd_qw1);
2263 }
2264
2265 ice_tx_map(tx_ring, first, &offload);
2266 return NETDEV_TX_OK;
2267
2268out_drop:
2269 ice_trace(xmit_frame_ring_drop, tx_ring, skb);
2270 dev_kfree_skb_any(skb);
2271 return NETDEV_TX_OK;
2272}
2273
2274
2275
2276
2277
2278
2279
2280
2281netdev_tx_t ice_start_xmit(struct sk_buff *skb, struct net_device *netdev)
2282{
2283 struct ice_netdev_priv *np = netdev_priv(netdev);
2284 struct ice_vsi *vsi = np->vsi;
2285 struct ice_ring *tx_ring;
2286
2287 tx_ring = vsi->tx_rings[skb->queue_mapping];
2288
2289
2290
2291
2292 if (skb_put_padto(skb, ICE_MIN_TX_LEN))
2293 return NETDEV_TX_OK;
2294
2295 return ice_xmit_frame_ring(skb, tx_ring);
2296}
2297
2298
2299
2300
2301
2302void ice_clean_ctrl_tx_irq(struct ice_ring *tx_ring)
2303{
2304 struct ice_vsi *vsi = tx_ring->vsi;
2305 s16 i = tx_ring->next_to_clean;
2306 int budget = ICE_DFLT_IRQ_WORK;
2307 struct ice_tx_desc *tx_desc;
2308 struct ice_tx_buf *tx_buf;
2309
2310 tx_buf = &tx_ring->tx_buf[i];
2311 tx_desc = ICE_TX_DESC(tx_ring, i);
2312 i -= tx_ring->count;
2313
2314 do {
2315 struct ice_tx_desc *eop_desc = tx_buf->next_to_watch;
2316
2317
2318 if (!eop_desc)
2319 break;
2320
2321
2322 smp_rmb();
2323
2324
2325 if (!(eop_desc->cmd_type_offset_bsz &
2326 cpu_to_le64(ICE_TX_DESC_DTYPE_DESC_DONE)))
2327 break;
2328
2329
2330 tx_buf->next_to_watch = NULL;
2331 tx_desc->buf_addr = 0;
2332 tx_desc->cmd_type_offset_bsz = 0;
2333
2334
2335 tx_buf++;
2336 tx_desc++;
2337 i++;
2338 if (unlikely(!i)) {
2339 i -= tx_ring->count;
2340 tx_buf = tx_ring->tx_buf;
2341 tx_desc = ICE_TX_DESC(tx_ring, 0);
2342 }
2343
2344
2345 if (dma_unmap_len(tx_buf, len))
2346 dma_unmap_single(tx_ring->dev,
2347 dma_unmap_addr(tx_buf, dma),
2348 dma_unmap_len(tx_buf, len),
2349 DMA_TO_DEVICE);
2350 if (tx_buf->tx_flags & ICE_TX_FLAGS_DUMMY_PKT)
2351 devm_kfree(tx_ring->dev, tx_buf->raw_buf);
2352
2353
2354 tx_buf->raw_buf = NULL;
2355 tx_buf->tx_flags = 0;
2356 tx_buf->next_to_watch = NULL;
2357 dma_unmap_len_set(tx_buf, len, 0);
2358 tx_desc->buf_addr = 0;
2359 tx_desc->cmd_type_offset_bsz = 0;
2360
2361
2362 tx_buf++;
2363 tx_desc++;
2364 i++;
2365 if (unlikely(!i)) {
2366 i -= tx_ring->count;
2367 tx_buf = tx_ring->tx_buf;
2368 tx_desc = ICE_TX_DESC(tx_ring, 0);
2369 }
2370
2371 budget--;
2372 } while (likely(budget));
2373
2374 i += tx_ring->count;
2375 tx_ring->next_to_clean = i;
2376
2377
2378 ice_irq_dynamic_ena(&vsi->back->hw, vsi, vsi->q_vectors[0]);
2379}
2380