1
2
3
4#include <linux/prefetch.h>
5
6#include "iavf.h"
7#include "iavf_trace.h"
8#include "iavf_prototype.h"
9
10static inline __le64 build_ctob(u32 td_cmd, u32 td_offset, unsigned int size,
11 u32 td_tag)
12{
13 return cpu_to_le64(IAVF_TX_DESC_DTYPE_DATA |
14 ((u64)td_cmd << IAVF_TXD_QW1_CMD_SHIFT) |
15 ((u64)td_offset << IAVF_TXD_QW1_OFFSET_SHIFT) |
16 ((u64)size << IAVF_TXD_QW1_TX_BUF_SZ_SHIFT) |
17 ((u64)td_tag << IAVF_TXD_QW1_L2TAG1_SHIFT));
18}
19
20#define IAVF_TXD_CMD (IAVF_TX_DESC_CMD_EOP | IAVF_TX_DESC_CMD_RS)
21
22
23
24
25
26
27static void iavf_unmap_and_free_tx_resource(struct iavf_ring *ring,
28 struct iavf_tx_buffer *tx_buffer)
29{
30 if (tx_buffer->skb) {
31 if (tx_buffer->tx_flags & IAVF_TX_FLAGS_FD_SB)
32 kfree(tx_buffer->raw_buf);
33 else
34 dev_kfree_skb_any(tx_buffer->skb);
35 if (dma_unmap_len(tx_buffer, len))
36 dma_unmap_single(ring->dev,
37 dma_unmap_addr(tx_buffer, dma),
38 dma_unmap_len(tx_buffer, len),
39 DMA_TO_DEVICE);
40 } else if (dma_unmap_len(tx_buffer, len)) {
41 dma_unmap_page(ring->dev,
42 dma_unmap_addr(tx_buffer, dma),
43 dma_unmap_len(tx_buffer, len),
44 DMA_TO_DEVICE);
45 }
46
47 tx_buffer->next_to_watch = NULL;
48 tx_buffer->skb = NULL;
49 dma_unmap_len_set(tx_buffer, len, 0);
50
51}
52
53
54
55
56
57void iavf_clean_tx_ring(struct iavf_ring *tx_ring)
58{
59 unsigned long bi_size;
60 u16 i;
61
62
63 if (!tx_ring->tx_bi)
64 return;
65
66
67 for (i = 0; i < tx_ring->count; i++)
68 iavf_unmap_and_free_tx_resource(tx_ring, &tx_ring->tx_bi[i]);
69
70 bi_size = sizeof(struct iavf_tx_buffer) * tx_ring->count;
71 memset(tx_ring->tx_bi, 0, bi_size);
72
73
74 memset(tx_ring->desc, 0, tx_ring->size);
75
76 tx_ring->next_to_use = 0;
77 tx_ring->next_to_clean = 0;
78
79 if (!tx_ring->netdev)
80 return;
81
82
83 netdev_tx_reset_queue(txring_txq(tx_ring));
84}
85
86
87
88
89
90
91
92void iavf_free_tx_resources(struct iavf_ring *tx_ring)
93{
94 iavf_clean_tx_ring(tx_ring);
95 kfree(tx_ring->tx_bi);
96 tx_ring->tx_bi = NULL;
97
98 if (tx_ring->desc) {
99 dma_free_coherent(tx_ring->dev, tx_ring->size,
100 tx_ring->desc, tx_ring->dma);
101 tx_ring->desc = NULL;
102 }
103}
104
105
106
107
108
109
110
111
112
113u32 iavf_get_tx_pending(struct iavf_ring *ring, bool in_sw)
114{
115 u32 head, tail;
116
117 head = ring->next_to_clean;
118 tail = readl(ring->tail);
119
120 if (head != tail)
121 return (head < tail) ?
122 tail - head : (tail + ring->count - head);
123
124 return 0;
125}
126
127
128
129
130
131
132
133
134void iavf_detect_recover_hung(struct iavf_vsi *vsi)
135{
136 struct iavf_ring *tx_ring = NULL;
137 struct net_device *netdev;
138 unsigned int i;
139 int packets;
140
141 if (!vsi)
142 return;
143
144 if (test_bit(__IAVF_VSI_DOWN, vsi->state))
145 return;
146
147 netdev = vsi->netdev;
148 if (!netdev)
149 return;
150
151 if (!netif_carrier_ok(netdev))
152 return;
153
154 for (i = 0; i < vsi->back->num_active_queues; i++) {
155 tx_ring = &vsi->back->tx_rings[i];
156 if (tx_ring && tx_ring->desc) {
157
158
159
160
161
162
163
164 packets = tx_ring->stats.packets & INT_MAX;
165 if (tx_ring->tx_stats.prev_pkt_ctr == packets) {
166 iavf_force_wb(vsi, tx_ring->q_vector);
167 continue;
168 }
169
170
171
172
173 smp_rmb();
174 tx_ring->tx_stats.prev_pkt_ctr =
175 iavf_get_tx_pending(tx_ring, true) ? packets : -1;
176 }
177 }
178}
179
180#define WB_STRIDE 4
181
182
183
184
185
186
187
188
189
190static bool iavf_clean_tx_irq(struct iavf_vsi *vsi,
191 struct iavf_ring *tx_ring, int napi_budget)
192{
193 int i = tx_ring->next_to_clean;
194 struct iavf_tx_buffer *tx_buf;
195 struct iavf_tx_desc *tx_desc;
196 unsigned int total_bytes = 0, total_packets = 0;
197 unsigned int budget = vsi->work_limit;
198
199 tx_buf = &tx_ring->tx_bi[i];
200 tx_desc = IAVF_TX_DESC(tx_ring, i);
201 i -= tx_ring->count;
202
203 do {
204 struct iavf_tx_desc *eop_desc = tx_buf->next_to_watch;
205
206
207 if (!eop_desc)
208 break;
209
210
211 smp_rmb();
212
213 iavf_trace(clean_tx_irq, tx_ring, tx_desc, tx_buf);
214
215 if (!(eop_desc->cmd_type_offset_bsz &
216 cpu_to_le64(IAVF_TX_DESC_DTYPE_DESC_DONE)))
217 break;
218
219
220 tx_buf->next_to_watch = NULL;
221
222
223 total_bytes += tx_buf->bytecount;
224 total_packets += tx_buf->gso_segs;
225
226
227 napi_consume_skb(tx_buf->skb, napi_budget);
228
229
230 dma_unmap_single(tx_ring->dev,
231 dma_unmap_addr(tx_buf, dma),
232 dma_unmap_len(tx_buf, len),
233 DMA_TO_DEVICE);
234
235
236 tx_buf->skb = NULL;
237 dma_unmap_len_set(tx_buf, len, 0);
238
239
240 while (tx_desc != eop_desc) {
241 iavf_trace(clean_tx_irq_unmap,
242 tx_ring, tx_desc, tx_buf);
243
244 tx_buf++;
245 tx_desc++;
246 i++;
247 if (unlikely(!i)) {
248 i -= tx_ring->count;
249 tx_buf = tx_ring->tx_bi;
250 tx_desc = IAVF_TX_DESC(tx_ring, 0);
251 }
252
253
254 if (dma_unmap_len(tx_buf, len)) {
255 dma_unmap_page(tx_ring->dev,
256 dma_unmap_addr(tx_buf, dma),
257 dma_unmap_len(tx_buf, len),
258 DMA_TO_DEVICE);
259 dma_unmap_len_set(tx_buf, len, 0);
260 }
261 }
262
263
264 tx_buf++;
265 tx_desc++;
266 i++;
267 if (unlikely(!i)) {
268 i -= tx_ring->count;
269 tx_buf = tx_ring->tx_bi;
270 tx_desc = IAVF_TX_DESC(tx_ring, 0);
271 }
272
273 prefetch(tx_desc);
274
275
276 budget--;
277 } while (likely(budget));
278
279 i += tx_ring->count;
280 tx_ring->next_to_clean = i;
281 u64_stats_update_begin(&tx_ring->syncp);
282 tx_ring->stats.bytes += total_bytes;
283 tx_ring->stats.packets += total_packets;
284 u64_stats_update_end(&tx_ring->syncp);
285 tx_ring->q_vector->tx.total_bytes += total_bytes;
286 tx_ring->q_vector->tx.total_packets += total_packets;
287
288 if (tx_ring->flags & IAVF_TXR_FLAGS_WB_ON_ITR) {
289
290
291
292
293
294 unsigned int j = iavf_get_tx_pending(tx_ring, false);
295
296 if (budget &&
297 ((j / WB_STRIDE) == 0) && (j > 0) &&
298 !test_bit(__IAVF_VSI_DOWN, vsi->state) &&
299 (IAVF_DESC_UNUSED(tx_ring) != tx_ring->count))
300 tx_ring->arm_wb = true;
301 }
302
303
304 netdev_tx_completed_queue(txring_txq(tx_ring),
305 total_packets, total_bytes);
306
307#define TX_WAKE_THRESHOLD ((s16)(DESC_NEEDED * 2))
308 if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) &&
309 (IAVF_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) {
310
311
312
313 smp_mb();
314 if (__netif_subqueue_stopped(tx_ring->netdev,
315 tx_ring->queue_index) &&
316 !test_bit(__IAVF_VSI_DOWN, vsi->state)) {
317 netif_wake_subqueue(tx_ring->netdev,
318 tx_ring->queue_index);
319 ++tx_ring->tx_stats.restart_queue;
320 }
321 }
322
323 return !!budget;
324}
325
326
327
328
329
330
331
332static void iavf_enable_wb_on_itr(struct iavf_vsi *vsi,
333 struct iavf_q_vector *q_vector)
334{
335 u16 flags = q_vector->tx.ring[0].flags;
336 u32 val;
337
338 if (!(flags & IAVF_TXR_FLAGS_WB_ON_ITR))
339 return;
340
341 if (q_vector->arm_wb_state)
342 return;
343
344 val = IAVF_VFINT_DYN_CTLN1_WB_ON_ITR_MASK |
345 IAVF_VFINT_DYN_CTLN1_ITR_INDX_MASK;
346
347 wr32(&vsi->back->hw,
348 IAVF_VFINT_DYN_CTLN1(q_vector->reg_idx), val);
349 q_vector->arm_wb_state = true;
350}
351
352
353
354
355
356
357
358void iavf_force_wb(struct iavf_vsi *vsi, struct iavf_q_vector *q_vector)
359{
360 u32 val = IAVF_VFINT_DYN_CTLN1_INTENA_MASK |
361 IAVF_VFINT_DYN_CTLN1_ITR_INDX_MASK |
362 IAVF_VFINT_DYN_CTLN1_SWINT_TRIG_MASK |
363 IAVF_VFINT_DYN_CTLN1_SW_ITR_INDX_ENA_MASK
364 ;
365
366 wr32(&vsi->back->hw,
367 IAVF_VFINT_DYN_CTLN1(q_vector->reg_idx),
368 val);
369}
370
371static inline bool iavf_container_is_rx(struct iavf_q_vector *q_vector,
372 struct iavf_ring_container *rc)
373{
374 return &q_vector->rx == rc;
375}
376
377static inline unsigned int iavf_itr_divisor(struct iavf_q_vector *q_vector)
378{
379 unsigned int divisor;
380
381 switch (q_vector->adapter->link_speed) {
382 case VIRTCHNL_LINK_SPEED_40GB:
383 divisor = IAVF_ITR_ADAPTIVE_MIN_INC * 1024;
384 break;
385 case VIRTCHNL_LINK_SPEED_25GB:
386 case VIRTCHNL_LINK_SPEED_20GB:
387 divisor = IAVF_ITR_ADAPTIVE_MIN_INC * 512;
388 break;
389 default:
390 case VIRTCHNL_LINK_SPEED_10GB:
391 divisor = IAVF_ITR_ADAPTIVE_MIN_INC * 256;
392 break;
393 case VIRTCHNL_LINK_SPEED_1GB:
394 case VIRTCHNL_LINK_SPEED_100MB:
395 divisor = IAVF_ITR_ADAPTIVE_MIN_INC * 32;
396 break;
397 }
398
399 return divisor;
400}
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415static void iavf_update_itr(struct iavf_q_vector *q_vector,
416 struct iavf_ring_container *rc)
417{
418 unsigned int avg_wire_size, packets, bytes, itr;
419 unsigned long next_update = jiffies;
420
421
422
423
424 if (!rc->ring || !ITR_IS_DYNAMIC(rc->ring->itr_setting))
425 return;
426
427
428
429
430 itr = iavf_container_is_rx(q_vector, rc) ?
431 IAVF_ITR_ADAPTIVE_MIN_USECS | IAVF_ITR_ADAPTIVE_LATENCY :
432 IAVF_ITR_ADAPTIVE_MAX_USECS | IAVF_ITR_ADAPTIVE_LATENCY;
433
434
435
436
437
438
439 if (time_after(next_update, rc->next_update))
440 goto clear_counts;
441
442
443
444
445
446
447
448 if (q_vector->itr_countdown) {
449 itr = rc->target_itr;
450 goto clear_counts;
451 }
452
453 packets = rc->total_packets;
454 bytes = rc->total_bytes;
455
456 if (iavf_container_is_rx(q_vector, rc)) {
457
458
459
460
461
462 if (packets && packets < 4 && bytes < 9000 &&
463 (q_vector->tx.target_itr & IAVF_ITR_ADAPTIVE_LATENCY)) {
464 itr = IAVF_ITR_ADAPTIVE_LATENCY;
465 goto adjust_by_size;
466 }
467 } else if (packets < 4) {
468
469
470
471
472
473 if (rc->target_itr == IAVF_ITR_ADAPTIVE_MAX_USECS &&
474 (q_vector->rx.target_itr & IAVF_ITR_MASK) ==
475 IAVF_ITR_ADAPTIVE_MAX_USECS)
476 goto clear_counts;
477 } else if (packets > 32) {
478
479
480
481 rc->target_itr &= ~IAVF_ITR_ADAPTIVE_LATENCY;
482 }
483
484
485
486
487
488
489
490
491
492 if (packets < 56) {
493 itr = rc->target_itr + IAVF_ITR_ADAPTIVE_MIN_INC;
494 if ((itr & IAVF_ITR_MASK) > IAVF_ITR_ADAPTIVE_MAX_USECS) {
495 itr &= IAVF_ITR_ADAPTIVE_LATENCY;
496 itr += IAVF_ITR_ADAPTIVE_MAX_USECS;
497 }
498 goto clear_counts;
499 }
500
501 if (packets <= 256) {
502 itr = min(q_vector->tx.current_itr, q_vector->rx.current_itr);
503 itr &= IAVF_ITR_MASK;
504
505
506
507
508
509 if (packets <= 112)
510 goto clear_counts;
511
512
513
514
515
516
517 itr /= 2;
518 itr &= IAVF_ITR_MASK;
519 if (itr < IAVF_ITR_ADAPTIVE_MIN_USECS)
520 itr = IAVF_ITR_ADAPTIVE_MIN_USECS;
521
522 goto clear_counts;
523 }
524
525
526
527
528
529
530
531 itr = IAVF_ITR_ADAPTIVE_BULK;
532
533adjust_by_size:
534
535
536
537
538
539 avg_wire_size = bytes / packets;
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556 if (avg_wire_size <= 60) {
557
558 avg_wire_size = 4096;
559 } else if (avg_wire_size <= 380) {
560
561 avg_wire_size *= 40;
562 avg_wire_size += 1696;
563 } else if (avg_wire_size <= 1084) {
564
565 avg_wire_size *= 15;
566 avg_wire_size += 11452;
567 } else if (avg_wire_size <= 1980) {
568
569 avg_wire_size *= 5;
570 avg_wire_size += 22420;
571 } else {
572
573 avg_wire_size = 32256;
574 }
575
576
577
578
579 if (itr & IAVF_ITR_ADAPTIVE_LATENCY)
580 avg_wire_size /= 2;
581
582
583
584
585
586
587
588
589 itr += DIV_ROUND_UP(avg_wire_size, iavf_itr_divisor(q_vector)) *
590 IAVF_ITR_ADAPTIVE_MIN_INC;
591
592 if ((itr & IAVF_ITR_MASK) > IAVF_ITR_ADAPTIVE_MAX_USECS) {
593 itr &= IAVF_ITR_ADAPTIVE_LATENCY;
594 itr += IAVF_ITR_ADAPTIVE_MAX_USECS;
595 }
596
597clear_counts:
598
599 rc->target_itr = itr;
600
601
602 rc->next_update = next_update + 1;
603
604 rc->total_bytes = 0;
605 rc->total_packets = 0;
606}
607
608
609
610
611
612
613
614int iavf_setup_tx_descriptors(struct iavf_ring *tx_ring)
615{
616 struct device *dev = tx_ring->dev;
617 int bi_size;
618
619 if (!dev)
620 return -ENOMEM;
621
622
623 WARN_ON(tx_ring->tx_bi);
624 bi_size = sizeof(struct iavf_tx_buffer) * tx_ring->count;
625 tx_ring->tx_bi = kzalloc(bi_size, GFP_KERNEL);
626 if (!tx_ring->tx_bi)
627 goto err;
628
629
630 tx_ring->size = tx_ring->count * sizeof(struct iavf_tx_desc);
631 tx_ring->size = ALIGN(tx_ring->size, 4096);
632 tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size,
633 &tx_ring->dma, GFP_KERNEL);
634 if (!tx_ring->desc) {
635 dev_info(dev, "Unable to allocate memory for the Tx descriptor ring, size=%d\n",
636 tx_ring->size);
637 goto err;
638 }
639
640 tx_ring->next_to_use = 0;
641 tx_ring->next_to_clean = 0;
642 tx_ring->tx_stats.prev_pkt_ctr = -1;
643 return 0;
644
645err:
646 kfree(tx_ring->tx_bi);
647 tx_ring->tx_bi = NULL;
648 return -ENOMEM;
649}
650
651
652
653
654
655void iavf_clean_rx_ring(struct iavf_ring *rx_ring)
656{
657 unsigned long bi_size;
658 u16 i;
659
660
661 if (!rx_ring->rx_bi)
662 return;
663
664 if (rx_ring->skb) {
665 dev_kfree_skb(rx_ring->skb);
666 rx_ring->skb = NULL;
667 }
668
669
670 for (i = 0; i < rx_ring->count; i++) {
671 struct iavf_rx_buffer *rx_bi = &rx_ring->rx_bi[i];
672
673 if (!rx_bi->page)
674 continue;
675
676
677
678
679 dma_sync_single_range_for_cpu(rx_ring->dev,
680 rx_bi->dma,
681 rx_bi->page_offset,
682 rx_ring->rx_buf_len,
683 DMA_FROM_DEVICE);
684
685
686 dma_unmap_page_attrs(rx_ring->dev, rx_bi->dma,
687 iavf_rx_pg_size(rx_ring),
688 DMA_FROM_DEVICE,
689 IAVF_RX_DMA_ATTR);
690
691 __page_frag_cache_drain(rx_bi->page, rx_bi->pagecnt_bias);
692
693 rx_bi->page = NULL;
694 rx_bi->page_offset = 0;
695 }
696
697 bi_size = sizeof(struct iavf_rx_buffer) * rx_ring->count;
698 memset(rx_ring->rx_bi, 0, bi_size);
699
700
701 memset(rx_ring->desc, 0, rx_ring->size);
702
703 rx_ring->next_to_alloc = 0;
704 rx_ring->next_to_clean = 0;
705 rx_ring->next_to_use = 0;
706}
707
708
709
710
711
712
713
714void iavf_free_rx_resources(struct iavf_ring *rx_ring)
715{
716 iavf_clean_rx_ring(rx_ring);
717 kfree(rx_ring->rx_bi);
718 rx_ring->rx_bi = NULL;
719
720 if (rx_ring->desc) {
721 dma_free_coherent(rx_ring->dev, rx_ring->size,
722 rx_ring->desc, rx_ring->dma);
723 rx_ring->desc = NULL;
724 }
725}
726
727
728
729
730
731
732
733int iavf_setup_rx_descriptors(struct iavf_ring *rx_ring)
734{
735 struct device *dev = rx_ring->dev;
736 int bi_size;
737
738
739 WARN_ON(rx_ring->rx_bi);
740 bi_size = sizeof(struct iavf_rx_buffer) * rx_ring->count;
741 rx_ring->rx_bi = kzalloc(bi_size, GFP_KERNEL);
742 if (!rx_ring->rx_bi)
743 goto err;
744
745 u64_stats_init(&rx_ring->syncp);
746
747
748 rx_ring->size = rx_ring->count * sizeof(union iavf_32byte_rx_desc);
749 rx_ring->size = ALIGN(rx_ring->size, 4096);
750 rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size,
751 &rx_ring->dma, GFP_KERNEL);
752
753 if (!rx_ring->desc) {
754 dev_info(dev, "Unable to allocate memory for the Rx descriptor ring, size=%d\n",
755 rx_ring->size);
756 goto err;
757 }
758
759 rx_ring->next_to_alloc = 0;
760 rx_ring->next_to_clean = 0;
761 rx_ring->next_to_use = 0;
762
763 return 0;
764err:
765 kfree(rx_ring->rx_bi);
766 rx_ring->rx_bi = NULL;
767 return -ENOMEM;
768}
769
770
771
772
773
774
775static inline void iavf_release_rx_desc(struct iavf_ring *rx_ring, u32 val)
776{
777 rx_ring->next_to_use = val;
778
779
780 rx_ring->next_to_alloc = val;
781
782
783
784
785
786
787 wmb();
788 writel(val, rx_ring->tail);
789}
790
791
792
793
794
795
796
797static inline unsigned int iavf_rx_offset(struct iavf_ring *rx_ring)
798{
799 return ring_uses_build_skb(rx_ring) ? IAVF_SKB_PAD : 0;
800}
801
802
803
804
805
806
807
808
809
810static bool iavf_alloc_mapped_page(struct iavf_ring *rx_ring,
811 struct iavf_rx_buffer *bi)
812{
813 struct page *page = bi->page;
814 dma_addr_t dma;
815
816
817 if (likely(page)) {
818 rx_ring->rx_stats.page_reuse_count++;
819 return true;
820 }
821
822
823 page = dev_alloc_pages(iavf_rx_pg_order(rx_ring));
824 if (unlikely(!page)) {
825 rx_ring->rx_stats.alloc_page_failed++;
826 return false;
827 }
828
829
830 dma = dma_map_page_attrs(rx_ring->dev, page, 0,
831 iavf_rx_pg_size(rx_ring),
832 DMA_FROM_DEVICE,
833 IAVF_RX_DMA_ATTR);
834
835
836
837
838 if (dma_mapping_error(rx_ring->dev, dma)) {
839 __free_pages(page, iavf_rx_pg_order(rx_ring));
840 rx_ring->rx_stats.alloc_page_failed++;
841 return false;
842 }
843
844 bi->dma = dma;
845 bi->page = page;
846 bi->page_offset = iavf_rx_offset(rx_ring);
847
848
849 bi->pagecnt_bias = 1;
850
851 return true;
852}
853
854
855
856
857
858
859
860static void iavf_receive_skb(struct iavf_ring *rx_ring,
861 struct sk_buff *skb, u16 vlan_tag)
862{
863 struct iavf_q_vector *q_vector = rx_ring->q_vector;
864
865 if ((rx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
866 (vlan_tag & VLAN_VID_MASK))
867 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
868
869 napi_gro_receive(&q_vector->napi, skb);
870}
871
872
873
874
875
876
877
878
879bool iavf_alloc_rx_buffers(struct iavf_ring *rx_ring, u16 cleaned_count)
880{
881 u16 ntu = rx_ring->next_to_use;
882 union iavf_rx_desc *rx_desc;
883 struct iavf_rx_buffer *bi;
884
885
886 if (!rx_ring->netdev || !cleaned_count)
887 return false;
888
889 rx_desc = IAVF_RX_DESC(rx_ring, ntu);
890 bi = &rx_ring->rx_bi[ntu];
891
892 do {
893 if (!iavf_alloc_mapped_page(rx_ring, bi))
894 goto no_buffers;
895
896
897 dma_sync_single_range_for_device(rx_ring->dev, bi->dma,
898 bi->page_offset,
899 rx_ring->rx_buf_len,
900 DMA_FROM_DEVICE);
901
902
903
904
905 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset);
906
907 rx_desc++;
908 bi++;
909 ntu++;
910 if (unlikely(ntu == rx_ring->count)) {
911 rx_desc = IAVF_RX_DESC(rx_ring, 0);
912 bi = rx_ring->rx_bi;
913 ntu = 0;
914 }
915
916
917 rx_desc->wb.qword1.status_error_len = 0;
918
919 cleaned_count--;
920 } while (cleaned_count);
921
922 if (rx_ring->next_to_use != ntu)
923 iavf_release_rx_desc(rx_ring, ntu);
924
925 return false;
926
927no_buffers:
928 if (rx_ring->next_to_use != ntu)
929 iavf_release_rx_desc(rx_ring, ntu);
930
931
932
933
934 return true;
935}
936
937
938
939
940
941
942
943static inline void iavf_rx_checksum(struct iavf_vsi *vsi,
944 struct sk_buff *skb,
945 union iavf_rx_desc *rx_desc)
946{
947 struct iavf_rx_ptype_decoded decoded;
948 u32 rx_error, rx_status;
949 bool ipv4, ipv6;
950 u8 ptype;
951 u64 qword;
952
953 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
954 ptype = (qword & IAVF_RXD_QW1_PTYPE_MASK) >> IAVF_RXD_QW1_PTYPE_SHIFT;
955 rx_error = (qword & IAVF_RXD_QW1_ERROR_MASK) >>
956 IAVF_RXD_QW1_ERROR_SHIFT;
957 rx_status = (qword & IAVF_RXD_QW1_STATUS_MASK) >>
958 IAVF_RXD_QW1_STATUS_SHIFT;
959 decoded = decode_rx_desc_ptype(ptype);
960
961 skb->ip_summed = CHECKSUM_NONE;
962
963 skb_checksum_none_assert(skb);
964
965
966 if (!(vsi->netdev->features & NETIF_F_RXCSUM))
967 return;
968
969
970 if (!(rx_status & BIT(IAVF_RX_DESC_STATUS_L3L4P_SHIFT)))
971 return;
972
973
974 if (!(decoded.known && decoded.outer_ip))
975 return;
976
977 ipv4 = (decoded.outer_ip == IAVF_RX_PTYPE_OUTER_IP) &&
978 (decoded.outer_ip_ver == IAVF_RX_PTYPE_OUTER_IPV4);
979 ipv6 = (decoded.outer_ip == IAVF_RX_PTYPE_OUTER_IP) &&
980 (decoded.outer_ip_ver == IAVF_RX_PTYPE_OUTER_IPV6);
981
982 if (ipv4 &&
983 (rx_error & (BIT(IAVF_RX_DESC_ERROR_IPE_SHIFT) |
984 BIT(IAVF_RX_DESC_ERROR_EIPE_SHIFT))))
985 goto checksum_fail;
986
987
988 if (ipv6 &&
989 rx_status & BIT(IAVF_RX_DESC_STATUS_IPV6EXADD_SHIFT))
990
991 return;
992
993
994 if (rx_error & BIT(IAVF_RX_DESC_ERROR_L4E_SHIFT))
995 goto checksum_fail;
996
997
998
999
1000
1001 if (rx_error & BIT(IAVF_RX_DESC_ERROR_PPRS_SHIFT))
1002 return;
1003
1004
1005 switch (decoded.inner_prot) {
1006 case IAVF_RX_PTYPE_INNER_PROT_TCP:
1007 case IAVF_RX_PTYPE_INNER_PROT_UDP:
1008 case IAVF_RX_PTYPE_INNER_PROT_SCTP:
1009 skb->ip_summed = CHECKSUM_UNNECESSARY;
1010 fallthrough;
1011 default:
1012 break;
1013 }
1014
1015 return;
1016
1017checksum_fail:
1018 vsi->back->hw_csum_rx_error++;
1019}
1020
1021
1022
1023
1024
1025
1026
1027static inline int iavf_ptype_to_htype(u8 ptype)
1028{
1029 struct iavf_rx_ptype_decoded decoded = decode_rx_desc_ptype(ptype);
1030
1031 if (!decoded.known)
1032 return PKT_HASH_TYPE_NONE;
1033
1034 if (decoded.outer_ip == IAVF_RX_PTYPE_OUTER_IP &&
1035 decoded.payload_layer == IAVF_RX_PTYPE_PAYLOAD_LAYER_PAY4)
1036 return PKT_HASH_TYPE_L4;
1037 else if (decoded.outer_ip == IAVF_RX_PTYPE_OUTER_IP &&
1038 decoded.payload_layer == IAVF_RX_PTYPE_PAYLOAD_LAYER_PAY3)
1039 return PKT_HASH_TYPE_L3;
1040 else
1041 return PKT_HASH_TYPE_L2;
1042}
1043
1044
1045
1046
1047
1048
1049
1050
1051static inline void iavf_rx_hash(struct iavf_ring *ring,
1052 union iavf_rx_desc *rx_desc,
1053 struct sk_buff *skb,
1054 u8 rx_ptype)
1055{
1056 u32 hash;
1057 const __le64 rss_mask =
1058 cpu_to_le64((u64)IAVF_RX_DESC_FLTSTAT_RSS_HASH <<
1059 IAVF_RX_DESC_STATUS_FLTSTAT_SHIFT);
1060
1061 if (ring->netdev->features & NETIF_F_RXHASH)
1062 return;
1063
1064 if ((rx_desc->wb.qword1.status_error_len & rss_mask) == rss_mask) {
1065 hash = le32_to_cpu(rx_desc->wb.qword0.hi_dword.rss);
1066 skb_set_hash(skb, hash, iavf_ptype_to_htype(rx_ptype));
1067 }
1068}
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081static inline
1082void iavf_process_skb_fields(struct iavf_ring *rx_ring,
1083 union iavf_rx_desc *rx_desc, struct sk_buff *skb,
1084 u8 rx_ptype)
1085{
1086 iavf_rx_hash(rx_ring, rx_desc, skb, rx_ptype);
1087
1088 iavf_rx_checksum(rx_ring->vsi, skb, rx_desc);
1089
1090 skb_record_rx_queue(skb, rx_ring->queue_index);
1091
1092
1093 skb->protocol = eth_type_trans(skb, rx_ring->netdev);
1094}
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109static bool iavf_cleanup_headers(struct iavf_ring *rx_ring, struct sk_buff *skb)
1110{
1111
1112 if (eth_skb_pad(skb))
1113 return true;
1114
1115 return false;
1116}
1117
1118
1119
1120
1121
1122
1123
1124
1125static void iavf_reuse_rx_page(struct iavf_ring *rx_ring,
1126 struct iavf_rx_buffer *old_buff)
1127{
1128 struct iavf_rx_buffer *new_buff;
1129 u16 nta = rx_ring->next_to_alloc;
1130
1131 new_buff = &rx_ring->rx_bi[nta];
1132
1133
1134 nta++;
1135 rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
1136
1137
1138 new_buff->dma = old_buff->dma;
1139 new_buff->page = old_buff->page;
1140 new_buff->page_offset = old_buff->page_offset;
1141 new_buff->pagecnt_bias = old_buff->pagecnt_bias;
1142}
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171static bool iavf_can_reuse_rx_page(struct iavf_rx_buffer *rx_buffer)
1172{
1173 unsigned int pagecnt_bias = rx_buffer->pagecnt_bias;
1174 struct page *page = rx_buffer->page;
1175
1176
1177 if (!dev_page_is_reusable(page))
1178 return false;
1179
1180#if (PAGE_SIZE < 8192)
1181
1182 if (unlikely((page_count(page) - pagecnt_bias) > 1))
1183 return false;
1184#else
1185#define IAVF_LAST_OFFSET \
1186 (SKB_WITH_OVERHEAD(PAGE_SIZE) - IAVF_RXBUFFER_2048)
1187 if (rx_buffer->page_offset > IAVF_LAST_OFFSET)
1188 return false;
1189#endif
1190
1191
1192
1193
1194
1195 if (unlikely(!pagecnt_bias)) {
1196 page_ref_add(page, USHRT_MAX);
1197 rx_buffer->pagecnt_bias = USHRT_MAX;
1198 }
1199
1200 return true;
1201}
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215static void iavf_add_rx_frag(struct iavf_ring *rx_ring,
1216 struct iavf_rx_buffer *rx_buffer,
1217 struct sk_buff *skb,
1218 unsigned int size)
1219{
1220#if (PAGE_SIZE < 8192)
1221 unsigned int truesize = iavf_rx_pg_size(rx_ring) / 2;
1222#else
1223 unsigned int truesize = SKB_DATA_ALIGN(size + iavf_rx_offset(rx_ring));
1224#endif
1225
1226 if (!size)
1227 return;
1228
1229 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page,
1230 rx_buffer->page_offset, size, truesize);
1231
1232
1233#if (PAGE_SIZE < 8192)
1234 rx_buffer->page_offset ^= truesize;
1235#else
1236 rx_buffer->page_offset += truesize;
1237#endif
1238}
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248static struct iavf_rx_buffer *iavf_get_rx_buffer(struct iavf_ring *rx_ring,
1249 const unsigned int size)
1250{
1251 struct iavf_rx_buffer *rx_buffer;
1252
1253 if (!size)
1254 return NULL;
1255
1256 rx_buffer = &rx_ring->rx_bi[rx_ring->next_to_clean];
1257 prefetchw(rx_buffer->page);
1258
1259
1260 dma_sync_single_range_for_cpu(rx_ring->dev,
1261 rx_buffer->dma,
1262 rx_buffer->page_offset,
1263 size,
1264 DMA_FROM_DEVICE);
1265
1266
1267 rx_buffer->pagecnt_bias--;
1268
1269 return rx_buffer;
1270}
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282static struct sk_buff *iavf_construct_skb(struct iavf_ring *rx_ring,
1283 struct iavf_rx_buffer *rx_buffer,
1284 unsigned int size)
1285{
1286 void *va;
1287#if (PAGE_SIZE < 8192)
1288 unsigned int truesize = iavf_rx_pg_size(rx_ring) / 2;
1289#else
1290 unsigned int truesize = SKB_DATA_ALIGN(size);
1291#endif
1292 unsigned int headlen;
1293 struct sk_buff *skb;
1294
1295 if (!rx_buffer)
1296 return NULL;
1297
1298 va = page_address(rx_buffer->page) + rx_buffer->page_offset;
1299 net_prefetch(va);
1300
1301
1302 skb = __napi_alloc_skb(&rx_ring->q_vector->napi,
1303 IAVF_RX_HDR_SIZE,
1304 GFP_ATOMIC | __GFP_NOWARN);
1305 if (unlikely(!skb))
1306 return NULL;
1307
1308
1309 headlen = size;
1310 if (headlen > IAVF_RX_HDR_SIZE)
1311 headlen = eth_get_headlen(skb->dev, va, IAVF_RX_HDR_SIZE);
1312
1313
1314 memcpy(__skb_put(skb, headlen), va, ALIGN(headlen, sizeof(long)));
1315
1316
1317 size -= headlen;
1318 if (size) {
1319 skb_add_rx_frag(skb, 0, rx_buffer->page,
1320 rx_buffer->page_offset + headlen,
1321 size, truesize);
1322
1323
1324#if (PAGE_SIZE < 8192)
1325 rx_buffer->page_offset ^= truesize;
1326#else
1327 rx_buffer->page_offset += truesize;
1328#endif
1329 } else {
1330
1331 rx_buffer->pagecnt_bias++;
1332 }
1333
1334 return skb;
1335}
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346static struct sk_buff *iavf_build_skb(struct iavf_ring *rx_ring,
1347 struct iavf_rx_buffer *rx_buffer,
1348 unsigned int size)
1349{
1350 void *va;
1351#if (PAGE_SIZE < 8192)
1352 unsigned int truesize = iavf_rx_pg_size(rx_ring) / 2;
1353#else
1354 unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
1355 SKB_DATA_ALIGN(IAVF_SKB_PAD + size);
1356#endif
1357 struct sk_buff *skb;
1358
1359 if (!rx_buffer)
1360 return NULL;
1361
1362 va = page_address(rx_buffer->page) + rx_buffer->page_offset;
1363 net_prefetch(va);
1364
1365
1366 skb = build_skb(va - IAVF_SKB_PAD, truesize);
1367 if (unlikely(!skb))
1368 return NULL;
1369
1370
1371 skb_reserve(skb, IAVF_SKB_PAD);
1372 __skb_put(skb, size);
1373
1374
1375#if (PAGE_SIZE < 8192)
1376 rx_buffer->page_offset ^= truesize;
1377#else
1378 rx_buffer->page_offset += truesize;
1379#endif
1380
1381 return skb;
1382}
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392static void iavf_put_rx_buffer(struct iavf_ring *rx_ring,
1393 struct iavf_rx_buffer *rx_buffer)
1394{
1395 if (!rx_buffer)
1396 return;
1397
1398 if (iavf_can_reuse_rx_page(rx_buffer)) {
1399
1400 iavf_reuse_rx_page(rx_ring, rx_buffer);
1401 rx_ring->rx_stats.page_reuse_count++;
1402 } else {
1403
1404 dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma,
1405 iavf_rx_pg_size(rx_ring),
1406 DMA_FROM_DEVICE, IAVF_RX_DMA_ATTR);
1407 __page_frag_cache_drain(rx_buffer->page,
1408 rx_buffer->pagecnt_bias);
1409 }
1410
1411
1412 rx_buffer->page = NULL;
1413}
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426static bool iavf_is_non_eop(struct iavf_ring *rx_ring,
1427 union iavf_rx_desc *rx_desc,
1428 struct sk_buff *skb)
1429{
1430 u32 ntc = rx_ring->next_to_clean + 1;
1431
1432
1433 ntc = (ntc < rx_ring->count) ? ntc : 0;
1434 rx_ring->next_to_clean = ntc;
1435
1436 prefetch(IAVF_RX_DESC(rx_ring, ntc));
1437
1438
1439#define IAVF_RXD_EOF BIT(IAVF_RX_DESC_STATUS_EOF_SHIFT)
1440 if (likely(iavf_test_staterr(rx_desc, IAVF_RXD_EOF)))
1441 return false;
1442
1443 rx_ring->rx_stats.non_eop_descs++;
1444
1445 return true;
1446}
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460static int iavf_clean_rx_irq(struct iavf_ring *rx_ring, int budget)
1461{
1462 unsigned int total_rx_bytes = 0, total_rx_packets = 0;
1463 struct sk_buff *skb = rx_ring->skb;
1464 u16 cleaned_count = IAVF_DESC_UNUSED(rx_ring);
1465 bool failure = false;
1466
1467 while (likely(total_rx_packets < (unsigned int)budget)) {
1468 struct iavf_rx_buffer *rx_buffer;
1469 union iavf_rx_desc *rx_desc;
1470 unsigned int size;
1471 u16 vlan_tag;
1472 u8 rx_ptype;
1473 u64 qword;
1474
1475
1476 if (cleaned_count >= IAVF_RX_BUFFER_WRITE) {
1477 failure = failure ||
1478 iavf_alloc_rx_buffers(rx_ring, cleaned_count);
1479 cleaned_count = 0;
1480 }
1481
1482 rx_desc = IAVF_RX_DESC(rx_ring, rx_ring->next_to_clean);
1483
1484
1485
1486
1487
1488
1489 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
1490
1491
1492
1493
1494
1495 dma_rmb();
1496#define IAVF_RXD_DD BIT(IAVF_RX_DESC_STATUS_DD_SHIFT)
1497 if (!iavf_test_staterr(rx_desc, IAVF_RXD_DD))
1498 break;
1499
1500 size = (qword & IAVF_RXD_QW1_LENGTH_PBUF_MASK) >>
1501 IAVF_RXD_QW1_LENGTH_PBUF_SHIFT;
1502
1503 iavf_trace(clean_rx_irq, rx_ring, rx_desc, skb);
1504 rx_buffer = iavf_get_rx_buffer(rx_ring, size);
1505
1506
1507 if (skb)
1508 iavf_add_rx_frag(rx_ring, rx_buffer, skb, size);
1509 else if (ring_uses_build_skb(rx_ring))
1510 skb = iavf_build_skb(rx_ring, rx_buffer, size);
1511 else
1512 skb = iavf_construct_skb(rx_ring, rx_buffer, size);
1513
1514
1515 if (!skb) {
1516 rx_ring->rx_stats.alloc_buff_failed++;
1517 if (rx_buffer)
1518 rx_buffer->pagecnt_bias++;
1519 break;
1520 }
1521
1522 iavf_put_rx_buffer(rx_ring, rx_buffer);
1523 cleaned_count++;
1524
1525 if (iavf_is_non_eop(rx_ring, rx_desc, skb))
1526 continue;
1527
1528
1529
1530
1531
1532
1533 if (unlikely(iavf_test_staterr(rx_desc, BIT(IAVF_RXD_QW1_ERROR_SHIFT)))) {
1534 dev_kfree_skb_any(skb);
1535 skb = NULL;
1536 continue;
1537 }
1538
1539 if (iavf_cleanup_headers(rx_ring, skb)) {
1540 skb = NULL;
1541 continue;
1542 }
1543
1544
1545 total_rx_bytes += skb->len;
1546
1547 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
1548 rx_ptype = (qword & IAVF_RXD_QW1_PTYPE_MASK) >>
1549 IAVF_RXD_QW1_PTYPE_SHIFT;
1550
1551
1552 iavf_process_skb_fields(rx_ring, rx_desc, skb, rx_ptype);
1553
1554
1555 vlan_tag = (qword & BIT(IAVF_RX_DESC_STATUS_L2TAG1P_SHIFT)) ?
1556 le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1) : 0;
1557
1558 iavf_trace(clean_rx_irq_rx, rx_ring, rx_desc, skb);
1559 iavf_receive_skb(rx_ring, skb, vlan_tag);
1560 skb = NULL;
1561
1562
1563 total_rx_packets++;
1564 }
1565
1566 rx_ring->skb = skb;
1567
1568 u64_stats_update_begin(&rx_ring->syncp);
1569 rx_ring->stats.packets += total_rx_packets;
1570 rx_ring->stats.bytes += total_rx_bytes;
1571 u64_stats_update_end(&rx_ring->syncp);
1572 rx_ring->q_vector->rx.total_packets += total_rx_packets;
1573 rx_ring->q_vector->rx.total_bytes += total_rx_bytes;
1574
1575
1576 return failure ? budget : (int)total_rx_packets;
1577}
1578
1579static inline u32 iavf_buildreg_itr(const int type, u16 itr)
1580{
1581 u32 val;
1582
1583
1584
1585
1586
1587
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598 itr &= IAVF_ITR_MASK;
1599
1600 val = IAVF_VFINT_DYN_CTLN1_INTENA_MASK |
1601 (type << IAVF_VFINT_DYN_CTLN1_ITR_INDX_SHIFT) |
1602 (itr << (IAVF_VFINT_DYN_CTLN1_INTERVAL_SHIFT - 1));
1603
1604 return val;
1605}
1606
1607
1608#define INTREG IAVF_VFINT_DYN_CTLN1
1609
1610
1611
1612
1613
1614
1615
1616
1617#define ITR_COUNTDOWN_START 3
1618
1619
1620
1621
1622
1623
1624
1625static inline void iavf_update_enable_itr(struct iavf_vsi *vsi,
1626 struct iavf_q_vector *q_vector)
1627{
1628 struct iavf_hw *hw = &vsi->back->hw;
1629 u32 intval;
1630
1631
1632 iavf_update_itr(q_vector, &q_vector->tx);
1633 iavf_update_itr(q_vector, &q_vector->rx);
1634
1635
1636
1637
1638
1639
1640
1641
1642
1643 if (q_vector->rx.target_itr < q_vector->rx.current_itr) {
1644
1645 intval = iavf_buildreg_itr(IAVF_RX_ITR,
1646 q_vector->rx.target_itr);
1647 q_vector->rx.current_itr = q_vector->rx.target_itr;
1648 q_vector->itr_countdown = ITR_COUNTDOWN_START;
1649 } else if ((q_vector->tx.target_itr < q_vector->tx.current_itr) ||
1650 ((q_vector->rx.target_itr - q_vector->rx.current_itr) <
1651 (q_vector->tx.target_itr - q_vector->tx.current_itr))) {
1652
1653
1654
1655 intval = iavf_buildreg_itr(IAVF_TX_ITR,
1656 q_vector->tx.target_itr);
1657 q_vector->tx.current_itr = q_vector->tx.target_itr;
1658 q_vector->itr_countdown = ITR_COUNTDOWN_START;
1659 } else if (q_vector->rx.current_itr != q_vector->rx.target_itr) {
1660
1661 intval = iavf_buildreg_itr(IAVF_RX_ITR,
1662 q_vector->rx.target_itr);
1663 q_vector->rx.current_itr = q_vector->rx.target_itr;
1664 q_vector->itr_countdown = ITR_COUNTDOWN_START;
1665 } else {
1666
1667 intval = iavf_buildreg_itr(IAVF_ITR_NONE, 0);
1668 if (q_vector->itr_countdown)
1669 q_vector->itr_countdown--;
1670 }
1671
1672 if (!test_bit(__IAVF_VSI_DOWN, vsi->state))
1673 wr32(hw, INTREG(q_vector->reg_idx), intval);
1674}
1675
1676
1677
1678
1679
1680
1681
1682
1683
1684
1685int iavf_napi_poll(struct napi_struct *napi, int budget)
1686{
1687 struct iavf_q_vector *q_vector =
1688 container_of(napi, struct iavf_q_vector, napi);
1689 struct iavf_vsi *vsi = q_vector->vsi;
1690 struct iavf_ring *ring;
1691 bool clean_complete = true;
1692 bool arm_wb = false;
1693 int budget_per_ring;
1694 int work_done = 0;
1695
1696 if (test_bit(__IAVF_VSI_DOWN, vsi->state)) {
1697 napi_complete(napi);
1698 return 0;
1699 }
1700
1701
1702
1703
1704 iavf_for_each_ring(ring, q_vector->tx) {
1705 if (!iavf_clean_tx_irq(vsi, ring, budget)) {
1706 clean_complete = false;
1707 continue;
1708 }
1709 arm_wb |= ring->arm_wb;
1710 ring->arm_wb = false;
1711 }
1712
1713
1714 if (budget <= 0)
1715 goto tx_only;
1716
1717
1718
1719
1720 budget_per_ring = max(budget/q_vector->num_ringpairs, 1);
1721
1722 iavf_for_each_ring(ring, q_vector->rx) {
1723 int cleaned = iavf_clean_rx_irq(ring, budget_per_ring);
1724
1725 work_done += cleaned;
1726
1727 if (cleaned >= budget_per_ring)
1728 clean_complete = false;
1729 }
1730
1731
1732 if (!clean_complete) {
1733 int cpu_id = smp_processor_id();
1734
1735
1736
1737
1738
1739
1740
1741
1742 if (!cpumask_test_cpu(cpu_id, &q_vector->affinity_mask)) {
1743
1744 napi_complete_done(napi, work_done);
1745
1746
1747 iavf_force_wb(vsi, q_vector);
1748
1749
1750 return budget - 1;
1751 }
1752tx_only:
1753 if (arm_wb) {
1754 q_vector->tx.ring[0].tx_stats.tx_force_wb++;
1755 iavf_enable_wb_on_itr(vsi, q_vector);
1756 }
1757 return budget;
1758 }
1759
1760 if (vsi->back->flags & IAVF_TXR_FLAGS_WB_ON_ITR)
1761 q_vector->arm_wb_state = false;
1762
1763
1764
1765
1766 if (likely(napi_complete_done(napi, work_done)))
1767 iavf_update_enable_itr(vsi, q_vector);
1768
1769 return min(work_done, budget - 1);
1770}
1771
1772
1773
1774
1775
1776
1777
1778
1779
1780
1781
1782
1783
1784static inline int iavf_tx_prepare_vlan_flags(struct sk_buff *skb,
1785 struct iavf_ring *tx_ring,
1786 u32 *flags)
1787{
1788 __be16 protocol = skb->protocol;
1789 u32 tx_flags = 0;
1790
1791 if (protocol == htons(ETH_P_8021Q) &&
1792 !(tx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_TX)) {
1793
1794
1795
1796
1797
1798
1799
1800 skb->protocol = vlan_get_protocol(skb);
1801 goto out;
1802 }
1803
1804
1805 if (skb_vlan_tag_present(skb)) {
1806 tx_flags |= skb_vlan_tag_get(skb) << IAVF_TX_FLAGS_VLAN_SHIFT;
1807 tx_flags |= IAVF_TX_FLAGS_HW_VLAN;
1808
1809 } else if (protocol == htons(ETH_P_8021Q)) {
1810 struct vlan_hdr *vhdr, _vhdr;
1811
1812 vhdr = skb_header_pointer(skb, ETH_HLEN, sizeof(_vhdr), &_vhdr);
1813 if (!vhdr)
1814 return -EINVAL;
1815
1816 protocol = vhdr->h_vlan_encapsulated_proto;
1817 tx_flags |= ntohs(vhdr->h_vlan_TCI) << IAVF_TX_FLAGS_VLAN_SHIFT;
1818 tx_flags |= IAVF_TX_FLAGS_SW_VLAN;
1819 }
1820
1821out:
1822 *flags = tx_flags;
1823 return 0;
1824}
1825
1826
1827
1828
1829
1830
1831
1832
1833
1834static int iavf_tso(struct iavf_tx_buffer *first, u8 *hdr_len,
1835 u64 *cd_type_cmd_tso_mss)
1836{
1837 struct sk_buff *skb = first->skb;
1838 u64 cd_cmd, cd_tso_len, cd_mss;
1839 union {
1840 struct iphdr *v4;
1841 struct ipv6hdr *v6;
1842 unsigned char *hdr;
1843 } ip;
1844 union {
1845 struct tcphdr *tcp;
1846 struct udphdr *udp;
1847 unsigned char *hdr;
1848 } l4;
1849 u32 paylen, l4_offset;
1850 u16 gso_segs, gso_size;
1851 int err;
1852
1853 if (skb->ip_summed != CHECKSUM_PARTIAL)
1854 return 0;
1855
1856 if (!skb_is_gso(skb))
1857 return 0;
1858
1859 err = skb_cow_head(skb, 0);
1860 if (err < 0)
1861 return err;
1862
1863 ip.hdr = skb_network_header(skb);
1864 l4.hdr = skb_transport_header(skb);
1865
1866
1867 if (ip.v4->version == 4) {
1868 ip.v4->tot_len = 0;
1869 ip.v4->check = 0;
1870 } else {
1871 ip.v6->payload_len = 0;
1872 }
1873
1874 if (skb_shinfo(skb)->gso_type & (SKB_GSO_GRE |
1875 SKB_GSO_GRE_CSUM |
1876 SKB_GSO_IPXIP4 |
1877 SKB_GSO_IPXIP6 |
1878 SKB_GSO_UDP_TUNNEL |
1879 SKB_GSO_UDP_TUNNEL_CSUM)) {
1880 if (!(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) &&
1881 (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM)) {
1882 l4.udp->len = 0;
1883
1884
1885 l4_offset = l4.hdr - skb->data;
1886
1887
1888 paylen = skb->len - l4_offset;
1889 csum_replace_by_diff(&l4.udp->check,
1890 (__force __wsum)htonl(paylen));
1891 }
1892
1893
1894 ip.hdr = skb_inner_network_header(skb);
1895 l4.hdr = skb_inner_transport_header(skb);
1896
1897
1898 if (ip.v4->version == 4) {
1899 ip.v4->tot_len = 0;
1900 ip.v4->check = 0;
1901 } else {
1902 ip.v6->payload_len = 0;
1903 }
1904 }
1905
1906
1907 l4_offset = l4.hdr - skb->data;
1908
1909 paylen = skb->len - l4_offset;
1910
1911 if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
1912 csum_replace_by_diff(&l4.udp->check,
1913 (__force __wsum)htonl(paylen));
1914
1915 *hdr_len = (u8)sizeof(l4.udp) + l4_offset;
1916 } else {
1917 csum_replace_by_diff(&l4.tcp->check,
1918 (__force __wsum)htonl(paylen));
1919
1920 *hdr_len = (u8)((l4.tcp->doff * 4) + l4_offset);
1921 }
1922
1923
1924 gso_size = skb_shinfo(skb)->gso_size;
1925 gso_segs = skb_shinfo(skb)->gso_segs;
1926
1927
1928 first->gso_segs = gso_segs;
1929 first->bytecount += (first->gso_segs - 1) * *hdr_len;
1930
1931
1932 cd_cmd = IAVF_TX_CTX_DESC_TSO;
1933 cd_tso_len = skb->len - *hdr_len;
1934 cd_mss = gso_size;
1935 *cd_type_cmd_tso_mss |= (cd_cmd << IAVF_TXD_CTX_QW1_CMD_SHIFT) |
1936 (cd_tso_len << IAVF_TXD_CTX_QW1_TSO_LEN_SHIFT) |
1937 (cd_mss << IAVF_TXD_CTX_QW1_MSS_SHIFT);
1938 return 1;
1939}
1940
1941
1942
1943
1944
1945
1946
1947
1948
1949
1950static int iavf_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags,
1951 u32 *td_cmd, u32 *td_offset,
1952 struct iavf_ring *tx_ring,
1953 u32 *cd_tunneling)
1954{
1955 union {
1956 struct iphdr *v4;
1957 struct ipv6hdr *v6;
1958 unsigned char *hdr;
1959 } ip;
1960 union {
1961 struct tcphdr *tcp;
1962 struct udphdr *udp;
1963 unsigned char *hdr;
1964 } l4;
1965 unsigned char *exthdr;
1966 u32 offset, cmd = 0;
1967 __be16 frag_off;
1968 u8 l4_proto = 0;
1969
1970 if (skb->ip_summed != CHECKSUM_PARTIAL)
1971 return 0;
1972
1973 ip.hdr = skb_network_header(skb);
1974 l4.hdr = skb_transport_header(skb);
1975
1976
1977 offset = ((ip.hdr - skb->data) / 2) << IAVF_TX_DESC_LENGTH_MACLEN_SHIFT;
1978
1979 if (skb->encapsulation) {
1980 u32 tunnel = 0;
1981
1982 if (*tx_flags & IAVF_TX_FLAGS_IPV4) {
1983 tunnel |= (*tx_flags & IAVF_TX_FLAGS_TSO) ?
1984 IAVF_TX_CTX_EXT_IP_IPV4 :
1985 IAVF_TX_CTX_EXT_IP_IPV4_NO_CSUM;
1986
1987 l4_proto = ip.v4->protocol;
1988 } else if (*tx_flags & IAVF_TX_FLAGS_IPV6) {
1989 tunnel |= IAVF_TX_CTX_EXT_IP_IPV6;
1990
1991 exthdr = ip.hdr + sizeof(*ip.v6);
1992 l4_proto = ip.v6->nexthdr;
1993 if (l4.hdr != exthdr)
1994 ipv6_skip_exthdr(skb, exthdr - skb->data,
1995 &l4_proto, &frag_off);
1996 }
1997
1998
1999 switch (l4_proto) {
2000 case IPPROTO_UDP:
2001 tunnel |= IAVF_TXD_CTX_UDP_TUNNELING;
2002 *tx_flags |= IAVF_TX_FLAGS_VXLAN_TUNNEL;
2003 break;
2004 case IPPROTO_GRE:
2005 tunnel |= IAVF_TXD_CTX_GRE_TUNNELING;
2006 *tx_flags |= IAVF_TX_FLAGS_VXLAN_TUNNEL;
2007 break;
2008 case IPPROTO_IPIP:
2009 case IPPROTO_IPV6:
2010 *tx_flags |= IAVF_TX_FLAGS_VXLAN_TUNNEL;
2011 l4.hdr = skb_inner_network_header(skb);
2012 break;
2013 default:
2014 if (*tx_flags & IAVF_TX_FLAGS_TSO)
2015 return -1;
2016
2017 skb_checksum_help(skb);
2018 return 0;
2019 }
2020
2021
2022 tunnel |= ((l4.hdr - ip.hdr) / 4) <<
2023 IAVF_TXD_CTX_QW0_EXT_IPLEN_SHIFT;
2024
2025
2026 ip.hdr = skb_inner_network_header(skb);
2027
2028
2029 tunnel |= ((ip.hdr - l4.hdr) / 2) <<
2030 IAVF_TXD_CTX_QW0_NATLEN_SHIFT;
2031
2032
2033 if ((*tx_flags & IAVF_TX_FLAGS_TSO) &&
2034 !(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) &&
2035 (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM))
2036 tunnel |= IAVF_TXD_CTX_QW0_L4T_CS_MASK;
2037
2038
2039 *cd_tunneling |= tunnel;
2040
2041
2042 l4.hdr = skb_inner_transport_header(skb);
2043 l4_proto = 0;
2044
2045
2046 *tx_flags &= ~(IAVF_TX_FLAGS_IPV4 | IAVF_TX_FLAGS_IPV6);
2047 if (ip.v4->version == 4)
2048 *tx_flags |= IAVF_TX_FLAGS_IPV4;
2049 if (ip.v6->version == 6)
2050 *tx_flags |= IAVF_TX_FLAGS_IPV6;
2051 }
2052
2053
2054 if (*tx_flags & IAVF_TX_FLAGS_IPV4) {
2055 l4_proto = ip.v4->protocol;
2056
2057
2058
2059 cmd |= (*tx_flags & IAVF_TX_FLAGS_TSO) ?
2060 IAVF_TX_DESC_CMD_IIPT_IPV4_CSUM :
2061 IAVF_TX_DESC_CMD_IIPT_IPV4;
2062 } else if (*tx_flags & IAVF_TX_FLAGS_IPV6) {
2063 cmd |= IAVF_TX_DESC_CMD_IIPT_IPV6;
2064
2065 exthdr = ip.hdr + sizeof(*ip.v6);
2066 l4_proto = ip.v6->nexthdr;
2067 if (l4.hdr != exthdr)
2068 ipv6_skip_exthdr(skb, exthdr - skb->data,
2069 &l4_proto, &frag_off);
2070 }
2071
2072
2073 offset |= ((l4.hdr - ip.hdr) / 4) << IAVF_TX_DESC_LENGTH_IPLEN_SHIFT;
2074
2075
2076 switch (l4_proto) {
2077 case IPPROTO_TCP:
2078
2079 cmd |= IAVF_TX_DESC_CMD_L4T_EOFT_TCP;
2080 offset |= l4.tcp->doff << IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
2081 break;
2082 case IPPROTO_SCTP:
2083
2084 cmd |= IAVF_TX_DESC_CMD_L4T_EOFT_SCTP;
2085 offset |= (sizeof(struct sctphdr) >> 2) <<
2086 IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
2087 break;
2088 case IPPROTO_UDP:
2089
2090 cmd |= IAVF_TX_DESC_CMD_L4T_EOFT_UDP;
2091 offset |= (sizeof(struct udphdr) >> 2) <<
2092 IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
2093 break;
2094 default:
2095 if (*tx_flags & IAVF_TX_FLAGS_TSO)
2096 return -1;
2097 skb_checksum_help(skb);
2098 return 0;
2099 }
2100
2101 *td_cmd |= cmd;
2102 *td_offset |= offset;
2103
2104 return 1;
2105}
2106
2107
2108
2109
2110
2111
2112
2113
2114static void iavf_create_tx_ctx(struct iavf_ring *tx_ring,
2115 const u64 cd_type_cmd_tso_mss,
2116 const u32 cd_tunneling, const u32 cd_l2tag2)
2117{
2118 struct iavf_tx_context_desc *context_desc;
2119 int i = tx_ring->next_to_use;
2120
2121 if ((cd_type_cmd_tso_mss == IAVF_TX_DESC_DTYPE_CONTEXT) &&
2122 !cd_tunneling && !cd_l2tag2)
2123 return;
2124
2125
2126 context_desc = IAVF_TX_CTXTDESC(tx_ring, i);
2127
2128 i++;
2129 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
2130
2131
2132 context_desc->tunneling_params = cpu_to_le32(cd_tunneling);
2133 context_desc->l2tag2 = cpu_to_le16(cd_l2tag2);
2134 context_desc->rsvd = cpu_to_le16(0);
2135 context_desc->type_cmd_tso_mss = cpu_to_le64(cd_type_cmd_tso_mss);
2136}
2137
2138
2139
2140
2141
2142
2143
2144
2145
2146
2147
2148
2149
2150
2151bool __iavf_chk_linearize(struct sk_buff *skb)
2152{
2153 const skb_frag_t *frag, *stale;
2154 int nr_frags, sum;
2155
2156
2157 nr_frags = skb_shinfo(skb)->nr_frags;
2158 if (nr_frags < (IAVF_MAX_BUFFER_TXD - 1))
2159 return false;
2160
2161
2162
2163
2164 nr_frags -= IAVF_MAX_BUFFER_TXD - 2;
2165 frag = &skb_shinfo(skb)->frags[0];
2166
2167
2168
2169
2170
2171
2172
2173 sum = 1 - skb_shinfo(skb)->gso_size;
2174
2175
2176 sum += skb_frag_size(frag++);
2177 sum += skb_frag_size(frag++);
2178 sum += skb_frag_size(frag++);
2179 sum += skb_frag_size(frag++);
2180 sum += skb_frag_size(frag++);
2181
2182
2183
2184
2185 for (stale = &skb_shinfo(skb)->frags[0];; stale++) {
2186 int stale_size = skb_frag_size(stale);
2187
2188 sum += skb_frag_size(frag++);
2189
2190
2191
2192
2193
2194
2195
2196 if (stale_size > IAVF_MAX_DATA_PER_TXD) {
2197 int align_pad = -(skb_frag_off(stale)) &
2198 (IAVF_MAX_READ_REQ_SIZE - 1);
2199
2200 sum -= align_pad;
2201 stale_size -= align_pad;
2202
2203 do {
2204 sum -= IAVF_MAX_DATA_PER_TXD_ALIGNED;
2205 stale_size -= IAVF_MAX_DATA_PER_TXD_ALIGNED;
2206 } while (stale_size > IAVF_MAX_DATA_PER_TXD);
2207 }
2208
2209
2210 if (sum < 0)
2211 return true;
2212
2213 if (!nr_frags--)
2214 break;
2215
2216 sum -= stale_size;
2217 }
2218
2219 return false;
2220}
2221
2222
2223
2224
2225
2226
2227
2228
2229int __iavf_maybe_stop_tx(struct iavf_ring *tx_ring, int size)
2230{
2231 netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
2232
2233 smp_mb();
2234
2235
2236 if (likely(IAVF_DESC_UNUSED(tx_ring) < size))
2237 return -EBUSY;
2238
2239
2240 netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index);
2241 ++tx_ring->tx_stats.restart_queue;
2242 return 0;
2243}
2244
2245
2246
2247
2248
2249
2250
2251
2252
2253
2254
2255static inline void iavf_tx_map(struct iavf_ring *tx_ring, struct sk_buff *skb,
2256 struct iavf_tx_buffer *first, u32 tx_flags,
2257 const u8 hdr_len, u32 td_cmd, u32 td_offset)
2258{
2259 unsigned int data_len = skb->data_len;
2260 unsigned int size = skb_headlen(skb);
2261 skb_frag_t *frag;
2262 struct iavf_tx_buffer *tx_bi;
2263 struct iavf_tx_desc *tx_desc;
2264 u16 i = tx_ring->next_to_use;
2265 u32 td_tag = 0;
2266 dma_addr_t dma;
2267
2268 if (tx_flags & IAVF_TX_FLAGS_HW_VLAN) {
2269 td_cmd |= IAVF_TX_DESC_CMD_IL2TAG1;
2270 td_tag = (tx_flags & IAVF_TX_FLAGS_VLAN_MASK) >>
2271 IAVF_TX_FLAGS_VLAN_SHIFT;
2272 }
2273
2274 first->tx_flags = tx_flags;
2275
2276 dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
2277
2278 tx_desc = IAVF_TX_DESC(tx_ring, i);
2279 tx_bi = first;
2280
2281 for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
2282 unsigned int max_data = IAVF_MAX_DATA_PER_TXD_ALIGNED;
2283
2284 if (dma_mapping_error(tx_ring->dev, dma))
2285 goto dma_error;
2286
2287
2288 dma_unmap_len_set(tx_bi, len, size);
2289 dma_unmap_addr_set(tx_bi, dma, dma);
2290
2291
2292 max_data += -dma & (IAVF_MAX_READ_REQ_SIZE - 1);
2293 tx_desc->buffer_addr = cpu_to_le64(dma);
2294
2295 while (unlikely(size > IAVF_MAX_DATA_PER_TXD)) {
2296 tx_desc->cmd_type_offset_bsz =
2297 build_ctob(td_cmd, td_offset,
2298 max_data, td_tag);
2299
2300 tx_desc++;
2301 i++;
2302
2303 if (i == tx_ring->count) {
2304 tx_desc = IAVF_TX_DESC(tx_ring, 0);
2305 i = 0;
2306 }
2307
2308 dma += max_data;
2309 size -= max_data;
2310
2311 max_data = IAVF_MAX_DATA_PER_TXD_ALIGNED;
2312 tx_desc->buffer_addr = cpu_to_le64(dma);
2313 }
2314
2315 if (likely(!data_len))
2316 break;
2317
2318 tx_desc->cmd_type_offset_bsz = build_ctob(td_cmd, td_offset,
2319 size, td_tag);
2320
2321 tx_desc++;
2322 i++;
2323
2324 if (i == tx_ring->count) {
2325 tx_desc = IAVF_TX_DESC(tx_ring, 0);
2326 i = 0;
2327 }
2328
2329 size = skb_frag_size(frag);
2330 data_len -= size;
2331
2332 dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size,
2333 DMA_TO_DEVICE);
2334
2335 tx_bi = &tx_ring->tx_bi[i];
2336 }
2337
2338 netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount);
2339
2340 i++;
2341 if (i == tx_ring->count)
2342 i = 0;
2343
2344 tx_ring->next_to_use = i;
2345
2346 iavf_maybe_stop_tx(tx_ring, DESC_NEEDED);
2347
2348
2349 td_cmd |= IAVF_TXD_CMD;
2350 tx_desc->cmd_type_offset_bsz =
2351 build_ctob(td_cmd, td_offset, size, td_tag);
2352
2353 skb_tx_timestamp(skb);
2354
2355
2356
2357
2358
2359
2360
2361 wmb();
2362
2363
2364 first->next_to_watch = tx_desc;
2365
2366
2367 if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) {
2368 writel(i, tx_ring->tail);
2369 }
2370
2371 return;
2372
2373dma_error:
2374 dev_info(tx_ring->dev, "TX DMA map failed\n");
2375
2376
2377 for (;;) {
2378 tx_bi = &tx_ring->tx_bi[i];
2379 iavf_unmap_and_free_tx_resource(tx_ring, tx_bi);
2380 if (tx_bi == first)
2381 break;
2382 if (i == 0)
2383 i = tx_ring->count;
2384 i--;
2385 }
2386
2387 tx_ring->next_to_use = i;
2388}
2389
2390
2391
2392
2393
2394
2395
2396
2397static netdev_tx_t iavf_xmit_frame_ring(struct sk_buff *skb,
2398 struct iavf_ring *tx_ring)
2399{
2400 u64 cd_type_cmd_tso_mss = IAVF_TX_DESC_DTYPE_CONTEXT;
2401 u32 cd_tunneling = 0, cd_l2tag2 = 0;
2402 struct iavf_tx_buffer *first;
2403 u32 td_offset = 0;
2404 u32 tx_flags = 0;
2405 __be16 protocol;
2406 u32 td_cmd = 0;
2407 u8 hdr_len = 0;
2408 int tso, count;
2409
2410
2411 prefetch(skb->data);
2412
2413 iavf_trace(xmit_frame_ring, skb, tx_ring);
2414
2415 count = iavf_xmit_descriptor_count(skb);
2416 if (iavf_chk_linearize(skb, count)) {
2417 if (__skb_linearize(skb)) {
2418 dev_kfree_skb_any(skb);
2419 return NETDEV_TX_OK;
2420 }
2421 count = iavf_txd_use_count(skb->len);
2422 tx_ring->tx_stats.tx_linearize++;
2423 }
2424
2425
2426
2427
2428
2429
2430
2431 if (iavf_maybe_stop_tx(tx_ring, count + 4 + 1)) {
2432 tx_ring->tx_stats.tx_busy++;
2433 return NETDEV_TX_BUSY;
2434 }
2435
2436
2437 first = &tx_ring->tx_bi[tx_ring->next_to_use];
2438 first->skb = skb;
2439 first->bytecount = skb->len;
2440 first->gso_segs = 1;
2441
2442
2443 if (iavf_tx_prepare_vlan_flags(skb, tx_ring, &tx_flags))
2444 goto out_drop;
2445
2446
2447 protocol = vlan_get_protocol(skb);
2448
2449
2450 if (protocol == htons(ETH_P_IP))
2451 tx_flags |= IAVF_TX_FLAGS_IPV4;
2452 else if (protocol == htons(ETH_P_IPV6))
2453 tx_flags |= IAVF_TX_FLAGS_IPV6;
2454
2455 tso = iavf_tso(first, &hdr_len, &cd_type_cmd_tso_mss);
2456
2457 if (tso < 0)
2458 goto out_drop;
2459 else if (tso)
2460 tx_flags |= IAVF_TX_FLAGS_TSO;
2461
2462
2463 tso = iavf_tx_enable_csum(skb, &tx_flags, &td_cmd, &td_offset,
2464 tx_ring, &cd_tunneling);
2465 if (tso < 0)
2466 goto out_drop;
2467
2468
2469 td_cmd |= IAVF_TX_DESC_CMD_ICRC;
2470
2471 iavf_create_tx_ctx(tx_ring, cd_type_cmd_tso_mss,
2472 cd_tunneling, cd_l2tag2);
2473
2474 iavf_tx_map(tx_ring, skb, first, tx_flags, hdr_len,
2475 td_cmd, td_offset);
2476
2477 return NETDEV_TX_OK;
2478
2479out_drop:
2480 iavf_trace(xmit_frame_ring_drop, first->skb, tx_ring);
2481 dev_kfree_skb_any(first->skb);
2482 first->skb = NULL;
2483 return NETDEV_TX_OK;
2484}
2485
2486
2487
2488
2489
2490
2491
2492
2493netdev_tx_t iavf_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
2494{
2495 struct iavf_adapter *adapter = netdev_priv(netdev);
2496 struct iavf_ring *tx_ring = &adapter->tx_rings[skb->queue_mapping];
2497
2498
2499
2500
2501 if (unlikely(skb->len < IAVF_MIN_TX_LEN)) {
2502 if (skb_pad(skb, IAVF_MIN_TX_LEN - skb->len))
2503 return NETDEV_TX_OK;
2504 skb->len = IAVF_MIN_TX_LEN;
2505 skb_set_tail_pointer(skb, IAVF_MIN_TX_LEN);
2506 }
2507
2508 return iavf_xmit_frame_ring(skb, tx_ring);
2509}
2510