1
2
3
4#include <linux/prefetch.h>
5
6#include "iavf.h"
7#include "iavf_trace.h"
8#include "iavf_prototype.h"
9
10static inline __le64 build_ctob(u32 td_cmd, u32 td_offset, unsigned int size,
11 u32 td_tag)
12{
13 return cpu_to_le64(IAVF_TX_DESC_DTYPE_DATA |
14 ((u64)td_cmd << IAVF_TXD_QW1_CMD_SHIFT) |
15 ((u64)td_offset << IAVF_TXD_QW1_OFFSET_SHIFT) |
16 ((u64)size << IAVF_TXD_QW1_TX_BUF_SZ_SHIFT) |
17 ((u64)td_tag << IAVF_TXD_QW1_L2TAG1_SHIFT));
18}
19
20#define IAVF_TXD_CMD (IAVF_TX_DESC_CMD_EOP | IAVF_TX_DESC_CMD_RS)
21
22
23
24
25
26
27static void iavf_unmap_and_free_tx_resource(struct iavf_ring *ring,
28 struct iavf_tx_buffer *tx_buffer)
29{
30 if (tx_buffer->skb) {
31 if (tx_buffer->tx_flags & IAVF_TX_FLAGS_FD_SB)
32 kfree(tx_buffer->raw_buf);
33 else
34 dev_kfree_skb_any(tx_buffer->skb);
35 if (dma_unmap_len(tx_buffer, len))
36 dma_unmap_single(ring->dev,
37 dma_unmap_addr(tx_buffer, dma),
38 dma_unmap_len(tx_buffer, len),
39 DMA_TO_DEVICE);
40 } else if (dma_unmap_len(tx_buffer, len)) {
41 dma_unmap_page(ring->dev,
42 dma_unmap_addr(tx_buffer, dma),
43 dma_unmap_len(tx_buffer, len),
44 DMA_TO_DEVICE);
45 }
46
47 tx_buffer->next_to_watch = NULL;
48 tx_buffer->skb = NULL;
49 dma_unmap_len_set(tx_buffer, len, 0);
50
51}
52
53
54
55
56
57void iavf_clean_tx_ring(struct iavf_ring *tx_ring)
58{
59 unsigned long bi_size;
60 u16 i;
61
62
63 if (!tx_ring->tx_bi)
64 return;
65
66
67 for (i = 0; i < tx_ring->count; i++)
68 iavf_unmap_and_free_tx_resource(tx_ring, &tx_ring->tx_bi[i]);
69
70 bi_size = sizeof(struct iavf_tx_buffer) * tx_ring->count;
71 memset(tx_ring->tx_bi, 0, bi_size);
72
73
74 memset(tx_ring->desc, 0, tx_ring->size);
75
76 tx_ring->next_to_use = 0;
77 tx_ring->next_to_clean = 0;
78
79 if (!tx_ring->netdev)
80 return;
81
82
83 netdev_tx_reset_queue(txring_txq(tx_ring));
84}
85
86
87
88
89
90
91
92void iavf_free_tx_resources(struct iavf_ring *tx_ring)
93{
94 iavf_clean_tx_ring(tx_ring);
95 kfree(tx_ring->tx_bi);
96 tx_ring->tx_bi = NULL;
97
98 if (tx_ring->desc) {
99 dma_free_coherent(tx_ring->dev, tx_ring->size,
100 tx_ring->desc, tx_ring->dma);
101 tx_ring->desc = NULL;
102 }
103}
104
105
106
107
108
109
110
111
112
113u32 iavf_get_tx_pending(struct iavf_ring *ring, bool in_sw)
114{
115 u32 head, tail;
116
117 head = ring->next_to_clean;
118 tail = readl(ring->tail);
119
120 if (head != tail)
121 return (head < tail) ?
122 tail - head : (tail + ring->count - head);
123
124 return 0;
125}
126
127
128
129
130
131
132
133
134void iavf_detect_recover_hung(struct iavf_vsi *vsi)
135{
136 struct iavf_ring *tx_ring = NULL;
137 struct net_device *netdev;
138 unsigned int i;
139 int packets;
140
141 if (!vsi)
142 return;
143
144 if (test_bit(__IAVF_VSI_DOWN, vsi->state))
145 return;
146
147 netdev = vsi->netdev;
148 if (!netdev)
149 return;
150
151 if (!netif_carrier_ok(netdev))
152 return;
153
154 for (i = 0; i < vsi->back->num_active_queues; i++) {
155 tx_ring = &vsi->back->tx_rings[i];
156 if (tx_ring && tx_ring->desc) {
157
158
159
160
161
162
163
164 packets = tx_ring->stats.packets & INT_MAX;
165 if (tx_ring->tx_stats.prev_pkt_ctr == packets) {
166 iavf_force_wb(vsi, tx_ring->q_vector);
167 continue;
168 }
169
170
171
172
173 smp_rmb();
174 tx_ring->tx_stats.prev_pkt_ctr =
175 iavf_get_tx_pending(tx_ring, true) ? packets : -1;
176 }
177 }
178}
179
180#define WB_STRIDE 4
181
182
183
184
185
186
187
188
189
190static bool iavf_clean_tx_irq(struct iavf_vsi *vsi,
191 struct iavf_ring *tx_ring, int napi_budget)
192{
193 int i = tx_ring->next_to_clean;
194 struct iavf_tx_buffer *tx_buf;
195 struct iavf_tx_desc *tx_desc;
196 unsigned int total_bytes = 0, total_packets = 0;
197 unsigned int budget = vsi->work_limit;
198
199 tx_buf = &tx_ring->tx_bi[i];
200 tx_desc = IAVF_TX_DESC(tx_ring, i);
201 i -= tx_ring->count;
202
203 do {
204 struct iavf_tx_desc *eop_desc = tx_buf->next_to_watch;
205
206
207 if (!eop_desc)
208 break;
209
210
211 smp_rmb();
212
213 iavf_trace(clean_tx_irq, tx_ring, tx_desc, tx_buf);
214
215 if (!(eop_desc->cmd_type_offset_bsz &
216 cpu_to_le64(IAVF_TX_DESC_DTYPE_DESC_DONE)))
217 break;
218
219
220 tx_buf->next_to_watch = NULL;
221
222
223 total_bytes += tx_buf->bytecount;
224 total_packets += tx_buf->gso_segs;
225
226
227 napi_consume_skb(tx_buf->skb, napi_budget);
228
229
230 dma_unmap_single(tx_ring->dev,
231 dma_unmap_addr(tx_buf, dma),
232 dma_unmap_len(tx_buf, len),
233 DMA_TO_DEVICE);
234
235
236 tx_buf->skb = NULL;
237 dma_unmap_len_set(tx_buf, len, 0);
238
239
240 while (tx_desc != eop_desc) {
241 iavf_trace(clean_tx_irq_unmap,
242 tx_ring, tx_desc, tx_buf);
243
244 tx_buf++;
245 tx_desc++;
246 i++;
247 if (unlikely(!i)) {
248 i -= tx_ring->count;
249 tx_buf = tx_ring->tx_bi;
250 tx_desc = IAVF_TX_DESC(tx_ring, 0);
251 }
252
253
254 if (dma_unmap_len(tx_buf, len)) {
255 dma_unmap_page(tx_ring->dev,
256 dma_unmap_addr(tx_buf, dma),
257 dma_unmap_len(tx_buf, len),
258 DMA_TO_DEVICE);
259 dma_unmap_len_set(tx_buf, len, 0);
260 }
261 }
262
263
264 tx_buf++;
265 tx_desc++;
266 i++;
267 if (unlikely(!i)) {
268 i -= tx_ring->count;
269 tx_buf = tx_ring->tx_bi;
270 tx_desc = IAVF_TX_DESC(tx_ring, 0);
271 }
272
273 prefetch(tx_desc);
274
275
276 budget--;
277 } while (likely(budget));
278
279 i += tx_ring->count;
280 tx_ring->next_to_clean = i;
281 u64_stats_update_begin(&tx_ring->syncp);
282 tx_ring->stats.bytes += total_bytes;
283 tx_ring->stats.packets += total_packets;
284 u64_stats_update_end(&tx_ring->syncp);
285 tx_ring->q_vector->tx.total_bytes += total_bytes;
286 tx_ring->q_vector->tx.total_packets += total_packets;
287
288 if (tx_ring->flags & IAVF_TXR_FLAGS_WB_ON_ITR) {
289
290
291
292
293
294 unsigned int j = iavf_get_tx_pending(tx_ring, false);
295
296 if (budget &&
297 ((j / WB_STRIDE) == 0) && (j > 0) &&
298 !test_bit(__IAVF_VSI_DOWN, vsi->state) &&
299 (IAVF_DESC_UNUSED(tx_ring) != tx_ring->count))
300 tx_ring->arm_wb = true;
301 }
302
303
304 netdev_tx_completed_queue(txring_txq(tx_ring),
305 total_packets, total_bytes);
306
307#define TX_WAKE_THRESHOLD ((s16)(DESC_NEEDED * 2))
308 if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) &&
309 (IAVF_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) {
310
311
312
313 smp_mb();
314 if (__netif_subqueue_stopped(tx_ring->netdev,
315 tx_ring->queue_index) &&
316 !test_bit(__IAVF_VSI_DOWN, vsi->state)) {
317 netif_wake_subqueue(tx_ring->netdev,
318 tx_ring->queue_index);
319 ++tx_ring->tx_stats.restart_queue;
320 }
321 }
322
323 return !!budget;
324}
325
326
327
328
329
330
331
332static void iavf_enable_wb_on_itr(struct iavf_vsi *vsi,
333 struct iavf_q_vector *q_vector)
334{
335 u16 flags = q_vector->tx.ring[0].flags;
336 u32 val;
337
338 if (!(flags & IAVF_TXR_FLAGS_WB_ON_ITR))
339 return;
340
341 if (q_vector->arm_wb_state)
342 return;
343
344 val = IAVF_VFINT_DYN_CTLN1_WB_ON_ITR_MASK |
345 IAVF_VFINT_DYN_CTLN1_ITR_INDX_MASK;
346
347 wr32(&vsi->back->hw,
348 IAVF_VFINT_DYN_CTLN1(q_vector->reg_idx), val);
349 q_vector->arm_wb_state = true;
350}
351
352
353
354
355
356
357
358void iavf_force_wb(struct iavf_vsi *vsi, struct iavf_q_vector *q_vector)
359{
360 u32 val = IAVF_VFINT_DYN_CTLN1_INTENA_MASK |
361 IAVF_VFINT_DYN_CTLN1_ITR_INDX_MASK |
362 IAVF_VFINT_DYN_CTLN1_SWINT_TRIG_MASK |
363 IAVF_VFINT_DYN_CTLN1_SW_ITR_INDX_ENA_MASK
364 ;
365
366 wr32(&vsi->back->hw,
367 IAVF_VFINT_DYN_CTLN1(q_vector->reg_idx),
368 val);
369}
370
371static inline bool iavf_container_is_rx(struct iavf_q_vector *q_vector,
372 struct iavf_ring_container *rc)
373{
374 return &q_vector->rx == rc;
375}
376
377static inline unsigned int iavf_itr_divisor(struct iavf_q_vector *q_vector)
378{
379 unsigned int divisor;
380
381 switch (q_vector->adapter->link_speed) {
382 case VIRTCHNL_LINK_SPEED_40GB:
383 divisor = IAVF_ITR_ADAPTIVE_MIN_INC * 1024;
384 break;
385 case VIRTCHNL_LINK_SPEED_25GB:
386 case VIRTCHNL_LINK_SPEED_20GB:
387 divisor = IAVF_ITR_ADAPTIVE_MIN_INC * 512;
388 break;
389 default:
390 case VIRTCHNL_LINK_SPEED_10GB:
391 divisor = IAVF_ITR_ADAPTIVE_MIN_INC * 256;
392 break;
393 case VIRTCHNL_LINK_SPEED_1GB:
394 case VIRTCHNL_LINK_SPEED_100MB:
395 divisor = IAVF_ITR_ADAPTIVE_MIN_INC * 32;
396 break;
397 }
398
399 return divisor;
400}
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415static void iavf_update_itr(struct iavf_q_vector *q_vector,
416 struct iavf_ring_container *rc)
417{
418 unsigned int avg_wire_size, packets, bytes, itr;
419 unsigned long next_update = jiffies;
420
421
422
423
424 if (!rc->ring || !ITR_IS_DYNAMIC(rc->ring->itr_setting))
425 return;
426
427
428
429
430 itr = iavf_container_is_rx(q_vector, rc) ?
431 IAVF_ITR_ADAPTIVE_MIN_USECS | IAVF_ITR_ADAPTIVE_LATENCY :
432 IAVF_ITR_ADAPTIVE_MAX_USECS | IAVF_ITR_ADAPTIVE_LATENCY;
433
434
435
436
437
438
439 if (time_after(next_update, rc->next_update))
440 goto clear_counts;
441
442
443
444
445
446
447
448 if (q_vector->itr_countdown) {
449 itr = rc->target_itr;
450 goto clear_counts;
451 }
452
453 packets = rc->total_packets;
454 bytes = rc->total_bytes;
455
456 if (iavf_container_is_rx(q_vector, rc)) {
457
458
459
460
461
462 if (packets && packets < 4 && bytes < 9000 &&
463 (q_vector->tx.target_itr & IAVF_ITR_ADAPTIVE_LATENCY)) {
464 itr = IAVF_ITR_ADAPTIVE_LATENCY;
465 goto adjust_by_size;
466 }
467 } else if (packets < 4) {
468
469
470
471
472
473 if (rc->target_itr == IAVF_ITR_ADAPTIVE_MAX_USECS &&
474 (q_vector->rx.target_itr & IAVF_ITR_MASK) ==
475 IAVF_ITR_ADAPTIVE_MAX_USECS)
476 goto clear_counts;
477 } else if (packets > 32) {
478
479
480
481 rc->target_itr &= ~IAVF_ITR_ADAPTIVE_LATENCY;
482 }
483
484
485
486
487
488
489
490
491
492 if (packets < 56) {
493 itr = rc->target_itr + IAVF_ITR_ADAPTIVE_MIN_INC;
494 if ((itr & IAVF_ITR_MASK) > IAVF_ITR_ADAPTIVE_MAX_USECS) {
495 itr &= IAVF_ITR_ADAPTIVE_LATENCY;
496 itr += IAVF_ITR_ADAPTIVE_MAX_USECS;
497 }
498 goto clear_counts;
499 }
500
501 if (packets <= 256) {
502 itr = min(q_vector->tx.current_itr, q_vector->rx.current_itr);
503 itr &= IAVF_ITR_MASK;
504
505
506
507
508
509 if (packets <= 112)
510 goto clear_counts;
511
512
513
514
515
516
517 itr /= 2;
518 itr &= IAVF_ITR_MASK;
519 if (itr < IAVF_ITR_ADAPTIVE_MIN_USECS)
520 itr = IAVF_ITR_ADAPTIVE_MIN_USECS;
521
522 goto clear_counts;
523 }
524
525
526
527
528
529
530
531 itr = IAVF_ITR_ADAPTIVE_BULK;
532
533adjust_by_size:
534
535
536
537
538
539 avg_wire_size = bytes / packets;
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556 if (avg_wire_size <= 60) {
557
558 avg_wire_size = 4096;
559 } else if (avg_wire_size <= 380) {
560
561 avg_wire_size *= 40;
562 avg_wire_size += 1696;
563 } else if (avg_wire_size <= 1084) {
564
565 avg_wire_size *= 15;
566 avg_wire_size += 11452;
567 } else if (avg_wire_size <= 1980) {
568
569 avg_wire_size *= 5;
570 avg_wire_size += 22420;
571 } else {
572
573 avg_wire_size = 32256;
574 }
575
576
577
578
579 if (itr & IAVF_ITR_ADAPTIVE_LATENCY)
580 avg_wire_size /= 2;
581
582
583
584
585
586
587
588
589 itr += DIV_ROUND_UP(avg_wire_size, iavf_itr_divisor(q_vector)) *
590 IAVF_ITR_ADAPTIVE_MIN_INC;
591
592 if ((itr & IAVF_ITR_MASK) > IAVF_ITR_ADAPTIVE_MAX_USECS) {
593 itr &= IAVF_ITR_ADAPTIVE_LATENCY;
594 itr += IAVF_ITR_ADAPTIVE_MAX_USECS;
595 }
596
597clear_counts:
598
599 rc->target_itr = itr;
600
601
602 rc->next_update = next_update + 1;
603
604 rc->total_bytes = 0;
605 rc->total_packets = 0;
606}
607
608
609
610
611
612
613
614int iavf_setup_tx_descriptors(struct iavf_ring *tx_ring)
615{
616 struct device *dev = tx_ring->dev;
617 int bi_size;
618
619 if (!dev)
620 return -ENOMEM;
621
622
623 WARN_ON(tx_ring->tx_bi);
624 bi_size = sizeof(struct iavf_tx_buffer) * tx_ring->count;
625 tx_ring->tx_bi = kzalloc(bi_size, GFP_KERNEL);
626 if (!tx_ring->tx_bi)
627 goto err;
628
629
630 tx_ring->size = tx_ring->count * sizeof(struct iavf_tx_desc);
631 tx_ring->size = ALIGN(tx_ring->size, 4096);
632 tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size,
633 &tx_ring->dma, GFP_KERNEL);
634 if (!tx_ring->desc) {
635 dev_info(dev, "Unable to allocate memory for the Tx descriptor ring, size=%d\n",
636 tx_ring->size);
637 goto err;
638 }
639
640 tx_ring->next_to_use = 0;
641 tx_ring->next_to_clean = 0;
642 tx_ring->tx_stats.prev_pkt_ctr = -1;
643 return 0;
644
645err:
646 kfree(tx_ring->tx_bi);
647 tx_ring->tx_bi = NULL;
648 return -ENOMEM;
649}
650
651
652
653
654
655void iavf_clean_rx_ring(struct iavf_ring *rx_ring)
656{
657 unsigned long bi_size;
658 u16 i;
659
660
661 if (!rx_ring->rx_bi)
662 return;
663
664 if (rx_ring->skb) {
665 dev_kfree_skb(rx_ring->skb);
666 rx_ring->skb = NULL;
667 }
668
669
670 for (i = 0; i < rx_ring->count; i++) {
671 struct iavf_rx_buffer *rx_bi = &rx_ring->rx_bi[i];
672
673 if (!rx_bi->page)
674 continue;
675
676
677
678
679 dma_sync_single_range_for_cpu(rx_ring->dev,
680 rx_bi->dma,
681 rx_bi->page_offset,
682 rx_ring->rx_buf_len,
683 DMA_FROM_DEVICE);
684
685
686 dma_unmap_page_attrs(rx_ring->dev, rx_bi->dma,
687 iavf_rx_pg_size(rx_ring),
688 DMA_FROM_DEVICE,
689 IAVF_RX_DMA_ATTR);
690
691 __page_frag_cache_drain(rx_bi->page, rx_bi->pagecnt_bias);
692
693 rx_bi->page = NULL;
694 rx_bi->page_offset = 0;
695 }
696
697 bi_size = sizeof(struct iavf_rx_buffer) * rx_ring->count;
698 memset(rx_ring->rx_bi, 0, bi_size);
699
700
701 memset(rx_ring->desc, 0, rx_ring->size);
702
703 rx_ring->next_to_alloc = 0;
704 rx_ring->next_to_clean = 0;
705 rx_ring->next_to_use = 0;
706}
707
708
709
710
711
712
713
714void iavf_free_rx_resources(struct iavf_ring *rx_ring)
715{
716 iavf_clean_rx_ring(rx_ring);
717 kfree(rx_ring->rx_bi);
718 rx_ring->rx_bi = NULL;
719
720 if (rx_ring->desc) {
721 dma_free_coherent(rx_ring->dev, rx_ring->size,
722 rx_ring->desc, rx_ring->dma);
723 rx_ring->desc = NULL;
724 }
725}
726
727
728
729
730
731
732
733int iavf_setup_rx_descriptors(struct iavf_ring *rx_ring)
734{
735 struct device *dev = rx_ring->dev;
736 int bi_size;
737
738
739 WARN_ON(rx_ring->rx_bi);
740 bi_size = sizeof(struct iavf_rx_buffer) * rx_ring->count;
741 rx_ring->rx_bi = kzalloc(bi_size, GFP_KERNEL);
742 if (!rx_ring->rx_bi)
743 goto err;
744
745 u64_stats_init(&rx_ring->syncp);
746
747
748 rx_ring->size = rx_ring->count * sizeof(union iavf_32byte_rx_desc);
749 rx_ring->size = ALIGN(rx_ring->size, 4096);
750 rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size,
751 &rx_ring->dma, GFP_KERNEL);
752
753 if (!rx_ring->desc) {
754 dev_info(dev, "Unable to allocate memory for the Rx descriptor ring, size=%d\n",
755 rx_ring->size);
756 goto err;
757 }
758
759 rx_ring->next_to_alloc = 0;
760 rx_ring->next_to_clean = 0;
761 rx_ring->next_to_use = 0;
762
763 return 0;
764err:
765 kfree(rx_ring->rx_bi);
766 rx_ring->rx_bi = NULL;
767 return -ENOMEM;
768}
769
770
771
772
773
774
775static inline void iavf_release_rx_desc(struct iavf_ring *rx_ring, u32 val)
776{
777 rx_ring->next_to_use = val;
778
779
780 rx_ring->next_to_alloc = val;
781
782
783
784
785
786
787 wmb();
788 writel(val, rx_ring->tail);
789}
790
791
792
793
794
795
796
797static inline unsigned int iavf_rx_offset(struct iavf_ring *rx_ring)
798{
799 return ring_uses_build_skb(rx_ring) ? IAVF_SKB_PAD : 0;
800}
801
802
803
804
805
806
807
808
809
810static bool iavf_alloc_mapped_page(struct iavf_ring *rx_ring,
811 struct iavf_rx_buffer *bi)
812{
813 struct page *page = bi->page;
814 dma_addr_t dma;
815
816
817 if (likely(page)) {
818 rx_ring->rx_stats.page_reuse_count++;
819 return true;
820 }
821
822
823 page = dev_alloc_pages(iavf_rx_pg_order(rx_ring));
824 if (unlikely(!page)) {
825 rx_ring->rx_stats.alloc_page_failed++;
826 return false;
827 }
828
829
830 dma = dma_map_page_attrs(rx_ring->dev, page, 0,
831 iavf_rx_pg_size(rx_ring),
832 DMA_FROM_DEVICE,
833 IAVF_RX_DMA_ATTR);
834
835
836
837
838 if (dma_mapping_error(rx_ring->dev, dma)) {
839 __free_pages(page, iavf_rx_pg_order(rx_ring));
840 rx_ring->rx_stats.alloc_page_failed++;
841 return false;
842 }
843
844 bi->dma = dma;
845 bi->page = page;
846 bi->page_offset = iavf_rx_offset(rx_ring);
847
848
849 bi->pagecnt_bias = 1;
850
851 return true;
852}
853
854
855
856
857
858
859
860static void iavf_receive_skb(struct iavf_ring *rx_ring,
861 struct sk_buff *skb, u16 vlan_tag)
862{
863 struct iavf_q_vector *q_vector = rx_ring->q_vector;
864
865 if ((rx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
866 (vlan_tag & VLAN_VID_MASK))
867 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
868
869 napi_gro_receive(&q_vector->napi, skb);
870}
871
872
873
874
875
876
877
878
879bool iavf_alloc_rx_buffers(struct iavf_ring *rx_ring, u16 cleaned_count)
880{
881 u16 ntu = rx_ring->next_to_use;
882 union iavf_rx_desc *rx_desc;
883 struct iavf_rx_buffer *bi;
884
885
886 if (!rx_ring->netdev || !cleaned_count)
887 return false;
888
889 rx_desc = IAVF_RX_DESC(rx_ring, ntu);
890 bi = &rx_ring->rx_bi[ntu];
891
892 do {
893 if (!iavf_alloc_mapped_page(rx_ring, bi))
894 goto no_buffers;
895
896
897 dma_sync_single_range_for_device(rx_ring->dev, bi->dma,
898 bi->page_offset,
899 rx_ring->rx_buf_len,
900 DMA_FROM_DEVICE);
901
902
903
904
905 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset);
906
907 rx_desc++;
908 bi++;
909 ntu++;
910 if (unlikely(ntu == rx_ring->count)) {
911 rx_desc = IAVF_RX_DESC(rx_ring, 0);
912 bi = rx_ring->rx_bi;
913 ntu = 0;
914 }
915
916
917 rx_desc->wb.qword1.status_error_len = 0;
918
919 cleaned_count--;
920 } while (cleaned_count);
921
922 if (rx_ring->next_to_use != ntu)
923 iavf_release_rx_desc(rx_ring, ntu);
924
925 return false;
926
927no_buffers:
928 if (rx_ring->next_to_use != ntu)
929 iavf_release_rx_desc(rx_ring, ntu);
930
931
932
933
934 return true;
935}
936
937
938
939
940
941
942
943static inline void iavf_rx_checksum(struct iavf_vsi *vsi,
944 struct sk_buff *skb,
945 union iavf_rx_desc *rx_desc)
946{
947 struct iavf_rx_ptype_decoded decoded;
948 u32 rx_error, rx_status;
949 bool ipv4, ipv6;
950 u8 ptype;
951 u64 qword;
952
953 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
954 ptype = (qword & IAVF_RXD_QW1_PTYPE_MASK) >> IAVF_RXD_QW1_PTYPE_SHIFT;
955 rx_error = (qword & IAVF_RXD_QW1_ERROR_MASK) >>
956 IAVF_RXD_QW1_ERROR_SHIFT;
957 rx_status = (qword & IAVF_RXD_QW1_STATUS_MASK) >>
958 IAVF_RXD_QW1_STATUS_SHIFT;
959 decoded = decode_rx_desc_ptype(ptype);
960
961 skb->ip_summed = CHECKSUM_NONE;
962
963 skb_checksum_none_assert(skb);
964
965
966 if (!(vsi->netdev->features & NETIF_F_RXCSUM))
967 return;
968
969
970 if (!(rx_status & BIT(IAVF_RX_DESC_STATUS_L3L4P_SHIFT)))
971 return;
972
973
974 if (!(decoded.known && decoded.outer_ip))
975 return;
976
977 ipv4 = (decoded.outer_ip == IAVF_RX_PTYPE_OUTER_IP) &&
978 (decoded.outer_ip_ver == IAVF_RX_PTYPE_OUTER_IPV4);
979 ipv6 = (decoded.outer_ip == IAVF_RX_PTYPE_OUTER_IP) &&
980 (decoded.outer_ip_ver == IAVF_RX_PTYPE_OUTER_IPV6);
981
982 if (ipv4 &&
983 (rx_error & (BIT(IAVF_RX_DESC_ERROR_IPE_SHIFT) |
984 BIT(IAVF_RX_DESC_ERROR_EIPE_SHIFT))))
985 goto checksum_fail;
986
987
988 if (ipv6 &&
989 rx_status & BIT(IAVF_RX_DESC_STATUS_IPV6EXADD_SHIFT))
990
991 return;
992
993
994 if (rx_error & BIT(IAVF_RX_DESC_ERROR_L4E_SHIFT))
995 goto checksum_fail;
996
997
998
999
1000
1001 if (rx_error & BIT(IAVF_RX_DESC_ERROR_PPRS_SHIFT))
1002 return;
1003
1004
1005 switch (decoded.inner_prot) {
1006 case IAVF_RX_PTYPE_INNER_PROT_TCP:
1007 case IAVF_RX_PTYPE_INNER_PROT_UDP:
1008 case IAVF_RX_PTYPE_INNER_PROT_SCTP:
1009 skb->ip_summed = CHECKSUM_UNNECESSARY;
1010 fallthrough;
1011 default:
1012 break;
1013 }
1014
1015 return;
1016
1017checksum_fail:
1018 vsi->back->hw_csum_rx_error++;
1019}
1020
1021
1022
1023
1024
1025
1026
1027static inline int iavf_ptype_to_htype(u8 ptype)
1028{
1029 struct iavf_rx_ptype_decoded decoded = decode_rx_desc_ptype(ptype);
1030
1031 if (!decoded.known)
1032 return PKT_HASH_TYPE_NONE;
1033
1034 if (decoded.outer_ip == IAVF_RX_PTYPE_OUTER_IP &&
1035 decoded.payload_layer == IAVF_RX_PTYPE_PAYLOAD_LAYER_PAY4)
1036 return PKT_HASH_TYPE_L4;
1037 else if (decoded.outer_ip == IAVF_RX_PTYPE_OUTER_IP &&
1038 decoded.payload_layer == IAVF_RX_PTYPE_PAYLOAD_LAYER_PAY3)
1039 return PKT_HASH_TYPE_L3;
1040 else
1041 return PKT_HASH_TYPE_L2;
1042}
1043
1044
1045
1046
1047
1048
1049
1050
1051static inline void iavf_rx_hash(struct iavf_ring *ring,
1052 union iavf_rx_desc *rx_desc,
1053 struct sk_buff *skb,
1054 u8 rx_ptype)
1055{
1056 u32 hash;
1057 const __le64 rss_mask =
1058 cpu_to_le64((u64)IAVF_RX_DESC_FLTSTAT_RSS_HASH <<
1059 IAVF_RX_DESC_STATUS_FLTSTAT_SHIFT);
1060
1061 if (ring->netdev->features & NETIF_F_RXHASH)
1062 return;
1063
1064 if ((rx_desc->wb.qword1.status_error_len & rss_mask) == rss_mask) {
1065 hash = le32_to_cpu(rx_desc->wb.qword0.hi_dword.rss);
1066 skb_set_hash(skb, hash, iavf_ptype_to_htype(rx_ptype));
1067 }
1068}
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081static inline
1082void iavf_process_skb_fields(struct iavf_ring *rx_ring,
1083 union iavf_rx_desc *rx_desc, struct sk_buff *skb,
1084 u8 rx_ptype)
1085{
1086 iavf_rx_hash(rx_ring, rx_desc, skb, rx_ptype);
1087
1088 iavf_rx_checksum(rx_ring->vsi, skb, rx_desc);
1089
1090 skb_record_rx_queue(skb, rx_ring->queue_index);
1091
1092
1093 skb->protocol = eth_type_trans(skb, rx_ring->netdev);
1094}
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109static bool iavf_cleanup_headers(struct iavf_ring *rx_ring, struct sk_buff *skb)
1110{
1111
1112 if (eth_skb_pad(skb))
1113 return true;
1114
1115 return false;
1116}
1117
1118
1119
1120
1121
1122
1123
1124
1125static void iavf_reuse_rx_page(struct iavf_ring *rx_ring,
1126 struct iavf_rx_buffer *old_buff)
1127{
1128 struct iavf_rx_buffer *new_buff;
1129 u16 nta = rx_ring->next_to_alloc;
1130
1131 new_buff = &rx_ring->rx_bi[nta];
1132
1133
1134 nta++;
1135 rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
1136
1137
1138 new_buff->dma = old_buff->dma;
1139 new_buff->page = old_buff->page;
1140 new_buff->page_offset = old_buff->page_offset;
1141 new_buff->pagecnt_bias = old_buff->pagecnt_bias;
1142}
1143
1144
1145
1146
1147
1148
1149
1150
1151static inline bool iavf_page_is_reusable(struct page *page)
1152{
1153 return (page_to_nid(page) == numa_mem_id()) &&
1154 !page_is_pfmemalloc(page);
1155}
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184static bool iavf_can_reuse_rx_page(struct iavf_rx_buffer *rx_buffer)
1185{
1186 unsigned int pagecnt_bias = rx_buffer->pagecnt_bias;
1187 struct page *page = rx_buffer->page;
1188
1189
1190 if (unlikely(!iavf_page_is_reusable(page)))
1191 return false;
1192
1193#if (PAGE_SIZE < 8192)
1194
1195 if (unlikely((page_count(page) - pagecnt_bias) > 1))
1196 return false;
1197#else
1198#define IAVF_LAST_OFFSET \
1199 (SKB_WITH_OVERHEAD(PAGE_SIZE) - IAVF_RXBUFFER_2048)
1200 if (rx_buffer->page_offset > IAVF_LAST_OFFSET)
1201 return false;
1202#endif
1203
1204
1205
1206
1207
1208 if (unlikely(!pagecnt_bias)) {
1209 page_ref_add(page, USHRT_MAX);
1210 rx_buffer->pagecnt_bias = USHRT_MAX;
1211 }
1212
1213 return true;
1214}
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228static void iavf_add_rx_frag(struct iavf_ring *rx_ring,
1229 struct iavf_rx_buffer *rx_buffer,
1230 struct sk_buff *skb,
1231 unsigned int size)
1232{
1233#if (PAGE_SIZE < 8192)
1234 unsigned int truesize = iavf_rx_pg_size(rx_ring) / 2;
1235#else
1236 unsigned int truesize = SKB_DATA_ALIGN(size + iavf_rx_offset(rx_ring));
1237#endif
1238
1239 if (!size)
1240 return;
1241
1242 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page,
1243 rx_buffer->page_offset, size, truesize);
1244
1245
1246#if (PAGE_SIZE < 8192)
1247 rx_buffer->page_offset ^= truesize;
1248#else
1249 rx_buffer->page_offset += truesize;
1250#endif
1251}
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261static struct iavf_rx_buffer *iavf_get_rx_buffer(struct iavf_ring *rx_ring,
1262 const unsigned int size)
1263{
1264 struct iavf_rx_buffer *rx_buffer;
1265
1266 if (!size)
1267 return NULL;
1268
1269 rx_buffer = &rx_ring->rx_bi[rx_ring->next_to_clean];
1270 prefetchw(rx_buffer->page);
1271
1272
1273 dma_sync_single_range_for_cpu(rx_ring->dev,
1274 rx_buffer->dma,
1275 rx_buffer->page_offset,
1276 size,
1277 DMA_FROM_DEVICE);
1278
1279
1280 rx_buffer->pagecnt_bias--;
1281
1282 return rx_buffer;
1283}
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295static struct sk_buff *iavf_construct_skb(struct iavf_ring *rx_ring,
1296 struct iavf_rx_buffer *rx_buffer,
1297 unsigned int size)
1298{
1299 void *va;
1300#if (PAGE_SIZE < 8192)
1301 unsigned int truesize = iavf_rx_pg_size(rx_ring) / 2;
1302#else
1303 unsigned int truesize = SKB_DATA_ALIGN(size);
1304#endif
1305 unsigned int headlen;
1306 struct sk_buff *skb;
1307
1308 if (!rx_buffer)
1309 return NULL;
1310
1311 va = page_address(rx_buffer->page) + rx_buffer->page_offset;
1312 net_prefetch(va);
1313
1314
1315 skb = __napi_alloc_skb(&rx_ring->q_vector->napi,
1316 IAVF_RX_HDR_SIZE,
1317 GFP_ATOMIC | __GFP_NOWARN);
1318 if (unlikely(!skb))
1319 return NULL;
1320
1321
1322 headlen = size;
1323 if (headlen > IAVF_RX_HDR_SIZE)
1324 headlen = eth_get_headlen(skb->dev, va, IAVF_RX_HDR_SIZE);
1325
1326
1327 memcpy(__skb_put(skb, headlen), va, ALIGN(headlen, sizeof(long)));
1328
1329
1330 size -= headlen;
1331 if (size) {
1332 skb_add_rx_frag(skb, 0, rx_buffer->page,
1333 rx_buffer->page_offset + headlen,
1334 size, truesize);
1335
1336
1337#if (PAGE_SIZE < 8192)
1338 rx_buffer->page_offset ^= truesize;
1339#else
1340 rx_buffer->page_offset += truesize;
1341#endif
1342 } else {
1343
1344 rx_buffer->pagecnt_bias++;
1345 }
1346
1347 return skb;
1348}
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359static struct sk_buff *iavf_build_skb(struct iavf_ring *rx_ring,
1360 struct iavf_rx_buffer *rx_buffer,
1361 unsigned int size)
1362{
1363 void *va;
1364#if (PAGE_SIZE < 8192)
1365 unsigned int truesize = iavf_rx_pg_size(rx_ring) / 2;
1366#else
1367 unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
1368 SKB_DATA_ALIGN(IAVF_SKB_PAD + size);
1369#endif
1370 struct sk_buff *skb;
1371
1372 if (!rx_buffer)
1373 return NULL;
1374
1375 va = page_address(rx_buffer->page) + rx_buffer->page_offset;
1376 net_prefetch(va);
1377
1378
1379 skb = build_skb(va - IAVF_SKB_PAD, truesize);
1380 if (unlikely(!skb))
1381 return NULL;
1382
1383
1384 skb_reserve(skb, IAVF_SKB_PAD);
1385 __skb_put(skb, size);
1386
1387
1388#if (PAGE_SIZE < 8192)
1389 rx_buffer->page_offset ^= truesize;
1390#else
1391 rx_buffer->page_offset += truesize;
1392#endif
1393
1394 return skb;
1395}
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405static void iavf_put_rx_buffer(struct iavf_ring *rx_ring,
1406 struct iavf_rx_buffer *rx_buffer)
1407{
1408 if (!rx_buffer)
1409 return;
1410
1411 if (iavf_can_reuse_rx_page(rx_buffer)) {
1412
1413 iavf_reuse_rx_page(rx_ring, rx_buffer);
1414 rx_ring->rx_stats.page_reuse_count++;
1415 } else {
1416
1417 dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma,
1418 iavf_rx_pg_size(rx_ring),
1419 DMA_FROM_DEVICE, IAVF_RX_DMA_ATTR);
1420 __page_frag_cache_drain(rx_buffer->page,
1421 rx_buffer->pagecnt_bias);
1422 }
1423
1424
1425 rx_buffer->page = NULL;
1426}
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439static bool iavf_is_non_eop(struct iavf_ring *rx_ring,
1440 union iavf_rx_desc *rx_desc,
1441 struct sk_buff *skb)
1442{
1443 u32 ntc = rx_ring->next_to_clean + 1;
1444
1445
1446 ntc = (ntc < rx_ring->count) ? ntc : 0;
1447 rx_ring->next_to_clean = ntc;
1448
1449 prefetch(IAVF_RX_DESC(rx_ring, ntc));
1450
1451
1452#define IAVF_RXD_EOF BIT(IAVF_RX_DESC_STATUS_EOF_SHIFT)
1453 if (likely(iavf_test_staterr(rx_desc, IAVF_RXD_EOF)))
1454 return false;
1455
1456 rx_ring->rx_stats.non_eop_descs++;
1457
1458 return true;
1459}
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473static int iavf_clean_rx_irq(struct iavf_ring *rx_ring, int budget)
1474{
1475 unsigned int total_rx_bytes = 0, total_rx_packets = 0;
1476 struct sk_buff *skb = rx_ring->skb;
1477 u16 cleaned_count = IAVF_DESC_UNUSED(rx_ring);
1478 bool failure = false;
1479
1480 while (likely(total_rx_packets < (unsigned int)budget)) {
1481 struct iavf_rx_buffer *rx_buffer;
1482 union iavf_rx_desc *rx_desc;
1483 unsigned int size;
1484 u16 vlan_tag;
1485 u8 rx_ptype;
1486 u64 qword;
1487
1488
1489 if (cleaned_count >= IAVF_RX_BUFFER_WRITE) {
1490 failure = failure ||
1491 iavf_alloc_rx_buffers(rx_ring, cleaned_count);
1492 cleaned_count = 0;
1493 }
1494
1495 rx_desc = IAVF_RX_DESC(rx_ring, rx_ring->next_to_clean);
1496
1497
1498
1499
1500
1501
1502 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
1503
1504
1505
1506
1507
1508 dma_rmb();
1509#define IAVF_RXD_DD BIT(IAVF_RX_DESC_STATUS_DD_SHIFT)
1510 if (!iavf_test_staterr(rx_desc, IAVF_RXD_DD))
1511 break;
1512
1513 size = (qword & IAVF_RXD_QW1_LENGTH_PBUF_MASK) >>
1514 IAVF_RXD_QW1_LENGTH_PBUF_SHIFT;
1515
1516 iavf_trace(clean_rx_irq, rx_ring, rx_desc, skb);
1517 rx_buffer = iavf_get_rx_buffer(rx_ring, size);
1518
1519
1520 if (skb)
1521 iavf_add_rx_frag(rx_ring, rx_buffer, skb, size);
1522 else if (ring_uses_build_skb(rx_ring))
1523 skb = iavf_build_skb(rx_ring, rx_buffer, size);
1524 else
1525 skb = iavf_construct_skb(rx_ring, rx_buffer, size);
1526
1527
1528 if (!skb) {
1529 rx_ring->rx_stats.alloc_buff_failed++;
1530 if (rx_buffer)
1531 rx_buffer->pagecnt_bias++;
1532 break;
1533 }
1534
1535 iavf_put_rx_buffer(rx_ring, rx_buffer);
1536 cleaned_count++;
1537
1538 if (iavf_is_non_eop(rx_ring, rx_desc, skb))
1539 continue;
1540
1541
1542
1543
1544
1545
1546 if (unlikely(iavf_test_staterr(rx_desc, BIT(IAVF_RXD_QW1_ERROR_SHIFT)))) {
1547 dev_kfree_skb_any(skb);
1548 skb = NULL;
1549 continue;
1550 }
1551
1552 if (iavf_cleanup_headers(rx_ring, skb)) {
1553 skb = NULL;
1554 continue;
1555 }
1556
1557
1558 total_rx_bytes += skb->len;
1559
1560 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
1561 rx_ptype = (qword & IAVF_RXD_QW1_PTYPE_MASK) >>
1562 IAVF_RXD_QW1_PTYPE_SHIFT;
1563
1564
1565 iavf_process_skb_fields(rx_ring, rx_desc, skb, rx_ptype);
1566
1567
1568 vlan_tag = (qword & BIT(IAVF_RX_DESC_STATUS_L2TAG1P_SHIFT)) ?
1569 le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1) : 0;
1570
1571 iavf_trace(clean_rx_irq_rx, rx_ring, rx_desc, skb);
1572 iavf_receive_skb(rx_ring, skb, vlan_tag);
1573 skb = NULL;
1574
1575
1576 total_rx_packets++;
1577 }
1578
1579 rx_ring->skb = skb;
1580
1581 u64_stats_update_begin(&rx_ring->syncp);
1582 rx_ring->stats.packets += total_rx_packets;
1583 rx_ring->stats.bytes += total_rx_bytes;
1584 u64_stats_update_end(&rx_ring->syncp);
1585 rx_ring->q_vector->rx.total_packets += total_rx_packets;
1586 rx_ring->q_vector->rx.total_bytes += total_rx_bytes;
1587
1588
1589 return failure ? budget : (int)total_rx_packets;
1590}
1591
1592static inline u32 iavf_buildreg_itr(const int type, u16 itr)
1593{
1594 u32 val;
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611 itr &= IAVF_ITR_MASK;
1612
1613 val = IAVF_VFINT_DYN_CTLN1_INTENA_MASK |
1614 (type << IAVF_VFINT_DYN_CTLN1_ITR_INDX_SHIFT) |
1615 (itr << (IAVF_VFINT_DYN_CTLN1_INTERVAL_SHIFT - 1));
1616
1617 return val;
1618}
1619
1620
1621#define INTREG IAVF_VFINT_DYN_CTLN1
1622
1623
1624
1625
1626
1627
1628
1629
1630#define ITR_COUNTDOWN_START 3
1631
1632
1633
1634
1635
1636
1637
1638static inline void iavf_update_enable_itr(struct iavf_vsi *vsi,
1639 struct iavf_q_vector *q_vector)
1640{
1641 struct iavf_hw *hw = &vsi->back->hw;
1642 u32 intval;
1643
1644
1645 iavf_update_itr(q_vector, &q_vector->tx);
1646 iavf_update_itr(q_vector, &q_vector->rx);
1647
1648
1649
1650
1651
1652
1653
1654
1655
1656 if (q_vector->rx.target_itr < q_vector->rx.current_itr) {
1657
1658 intval = iavf_buildreg_itr(IAVF_RX_ITR,
1659 q_vector->rx.target_itr);
1660 q_vector->rx.current_itr = q_vector->rx.target_itr;
1661 q_vector->itr_countdown = ITR_COUNTDOWN_START;
1662 } else if ((q_vector->tx.target_itr < q_vector->tx.current_itr) ||
1663 ((q_vector->rx.target_itr - q_vector->rx.current_itr) <
1664 (q_vector->tx.target_itr - q_vector->tx.current_itr))) {
1665
1666
1667
1668 intval = iavf_buildreg_itr(IAVF_TX_ITR,
1669 q_vector->tx.target_itr);
1670 q_vector->tx.current_itr = q_vector->tx.target_itr;
1671 q_vector->itr_countdown = ITR_COUNTDOWN_START;
1672 } else if (q_vector->rx.current_itr != q_vector->rx.target_itr) {
1673
1674 intval = iavf_buildreg_itr(IAVF_RX_ITR,
1675 q_vector->rx.target_itr);
1676 q_vector->rx.current_itr = q_vector->rx.target_itr;
1677 q_vector->itr_countdown = ITR_COUNTDOWN_START;
1678 } else {
1679
1680 intval = iavf_buildreg_itr(IAVF_ITR_NONE, 0);
1681 if (q_vector->itr_countdown)
1682 q_vector->itr_countdown--;
1683 }
1684
1685 if (!test_bit(__IAVF_VSI_DOWN, vsi->state))
1686 wr32(hw, INTREG(q_vector->reg_idx), intval);
1687}
1688
1689
1690
1691
1692
1693
1694
1695
1696
1697
1698int iavf_napi_poll(struct napi_struct *napi, int budget)
1699{
1700 struct iavf_q_vector *q_vector =
1701 container_of(napi, struct iavf_q_vector, napi);
1702 struct iavf_vsi *vsi = q_vector->vsi;
1703 struct iavf_ring *ring;
1704 bool clean_complete = true;
1705 bool arm_wb = false;
1706 int budget_per_ring;
1707 int work_done = 0;
1708
1709 if (test_bit(__IAVF_VSI_DOWN, vsi->state)) {
1710 napi_complete(napi);
1711 return 0;
1712 }
1713
1714
1715
1716
1717 iavf_for_each_ring(ring, q_vector->tx) {
1718 if (!iavf_clean_tx_irq(vsi, ring, budget)) {
1719 clean_complete = false;
1720 continue;
1721 }
1722 arm_wb |= ring->arm_wb;
1723 ring->arm_wb = false;
1724 }
1725
1726
1727 if (budget <= 0)
1728 goto tx_only;
1729
1730
1731
1732
1733 budget_per_ring = max(budget/q_vector->num_ringpairs, 1);
1734
1735 iavf_for_each_ring(ring, q_vector->rx) {
1736 int cleaned = iavf_clean_rx_irq(ring, budget_per_ring);
1737
1738 work_done += cleaned;
1739
1740 if (cleaned >= budget_per_ring)
1741 clean_complete = false;
1742 }
1743
1744
1745 if (!clean_complete) {
1746 int cpu_id = smp_processor_id();
1747
1748
1749
1750
1751
1752
1753
1754
1755 if (!cpumask_test_cpu(cpu_id, &q_vector->affinity_mask)) {
1756
1757 napi_complete_done(napi, work_done);
1758
1759
1760 iavf_force_wb(vsi, q_vector);
1761
1762
1763 return budget - 1;
1764 }
1765tx_only:
1766 if (arm_wb) {
1767 q_vector->tx.ring[0].tx_stats.tx_force_wb++;
1768 iavf_enable_wb_on_itr(vsi, q_vector);
1769 }
1770 return budget;
1771 }
1772
1773 if (vsi->back->flags & IAVF_TXR_FLAGS_WB_ON_ITR)
1774 q_vector->arm_wb_state = false;
1775
1776
1777
1778
1779 if (likely(napi_complete_done(napi, work_done)))
1780 iavf_update_enable_itr(vsi, q_vector);
1781
1782 return min(work_done, budget - 1);
1783}
1784
1785
1786
1787
1788
1789
1790
1791
1792
1793
1794
1795
1796
1797static inline int iavf_tx_prepare_vlan_flags(struct sk_buff *skb,
1798 struct iavf_ring *tx_ring,
1799 u32 *flags)
1800{
1801 __be16 protocol = skb->protocol;
1802 u32 tx_flags = 0;
1803
1804 if (protocol == htons(ETH_P_8021Q) &&
1805 !(tx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_TX)) {
1806
1807
1808
1809
1810
1811
1812
1813 skb->protocol = vlan_get_protocol(skb);
1814 goto out;
1815 }
1816
1817
1818 if (skb_vlan_tag_present(skb)) {
1819 tx_flags |= skb_vlan_tag_get(skb) << IAVF_TX_FLAGS_VLAN_SHIFT;
1820 tx_flags |= IAVF_TX_FLAGS_HW_VLAN;
1821
1822 } else if (protocol == htons(ETH_P_8021Q)) {
1823 struct vlan_hdr *vhdr, _vhdr;
1824
1825 vhdr = skb_header_pointer(skb, ETH_HLEN, sizeof(_vhdr), &_vhdr);
1826 if (!vhdr)
1827 return -EINVAL;
1828
1829 protocol = vhdr->h_vlan_encapsulated_proto;
1830 tx_flags |= ntohs(vhdr->h_vlan_TCI) << IAVF_TX_FLAGS_VLAN_SHIFT;
1831 tx_flags |= IAVF_TX_FLAGS_SW_VLAN;
1832 }
1833
1834out:
1835 *flags = tx_flags;
1836 return 0;
1837}
1838
1839
1840
1841
1842
1843
1844
1845
1846
1847static int iavf_tso(struct iavf_tx_buffer *first, u8 *hdr_len,
1848 u64 *cd_type_cmd_tso_mss)
1849{
1850 struct sk_buff *skb = first->skb;
1851 u64 cd_cmd, cd_tso_len, cd_mss;
1852 union {
1853 struct iphdr *v4;
1854 struct ipv6hdr *v6;
1855 unsigned char *hdr;
1856 } ip;
1857 union {
1858 struct tcphdr *tcp;
1859 struct udphdr *udp;
1860 unsigned char *hdr;
1861 } l4;
1862 u32 paylen, l4_offset;
1863 u16 gso_segs, gso_size;
1864 int err;
1865
1866 if (skb->ip_summed != CHECKSUM_PARTIAL)
1867 return 0;
1868
1869 if (!skb_is_gso(skb))
1870 return 0;
1871
1872 err = skb_cow_head(skb, 0);
1873 if (err < 0)
1874 return err;
1875
1876 ip.hdr = skb_network_header(skb);
1877 l4.hdr = skb_transport_header(skb);
1878
1879
1880 if (ip.v4->version == 4) {
1881 ip.v4->tot_len = 0;
1882 ip.v4->check = 0;
1883 } else {
1884 ip.v6->payload_len = 0;
1885 }
1886
1887 if (skb_shinfo(skb)->gso_type & (SKB_GSO_GRE |
1888 SKB_GSO_GRE_CSUM |
1889 SKB_GSO_IPXIP4 |
1890 SKB_GSO_IPXIP6 |
1891 SKB_GSO_UDP_TUNNEL |
1892 SKB_GSO_UDP_TUNNEL_CSUM)) {
1893 if (!(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) &&
1894 (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM)) {
1895 l4.udp->len = 0;
1896
1897
1898 l4_offset = l4.hdr - skb->data;
1899
1900
1901 paylen = skb->len - l4_offset;
1902 csum_replace_by_diff(&l4.udp->check,
1903 (__force __wsum)htonl(paylen));
1904 }
1905
1906
1907 ip.hdr = skb_inner_network_header(skb);
1908 l4.hdr = skb_inner_transport_header(skb);
1909
1910
1911 if (ip.v4->version == 4) {
1912 ip.v4->tot_len = 0;
1913 ip.v4->check = 0;
1914 } else {
1915 ip.v6->payload_len = 0;
1916 }
1917 }
1918
1919
1920 l4_offset = l4.hdr - skb->data;
1921
1922
1923 paylen = skb->len - l4_offset;
1924 csum_replace_by_diff(&l4.tcp->check, (__force __wsum)htonl(paylen));
1925
1926
1927 *hdr_len = (l4.tcp->doff * 4) + l4_offset;
1928
1929
1930 gso_size = skb_shinfo(skb)->gso_size;
1931 gso_segs = skb_shinfo(skb)->gso_segs;
1932
1933
1934 first->gso_segs = gso_segs;
1935 first->bytecount += (first->gso_segs - 1) * *hdr_len;
1936
1937
1938 cd_cmd = IAVF_TX_CTX_DESC_TSO;
1939 cd_tso_len = skb->len - *hdr_len;
1940 cd_mss = gso_size;
1941 *cd_type_cmd_tso_mss |= (cd_cmd << IAVF_TXD_CTX_QW1_CMD_SHIFT) |
1942 (cd_tso_len << IAVF_TXD_CTX_QW1_TSO_LEN_SHIFT) |
1943 (cd_mss << IAVF_TXD_CTX_QW1_MSS_SHIFT);
1944 return 1;
1945}
1946
1947
1948
1949
1950
1951
1952
1953
1954
1955
1956static int iavf_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags,
1957 u32 *td_cmd, u32 *td_offset,
1958 struct iavf_ring *tx_ring,
1959 u32 *cd_tunneling)
1960{
1961 union {
1962 struct iphdr *v4;
1963 struct ipv6hdr *v6;
1964 unsigned char *hdr;
1965 } ip;
1966 union {
1967 struct tcphdr *tcp;
1968 struct udphdr *udp;
1969 unsigned char *hdr;
1970 } l4;
1971 unsigned char *exthdr;
1972 u32 offset, cmd = 0;
1973 __be16 frag_off;
1974 u8 l4_proto = 0;
1975
1976 if (skb->ip_summed != CHECKSUM_PARTIAL)
1977 return 0;
1978
1979 ip.hdr = skb_network_header(skb);
1980 l4.hdr = skb_transport_header(skb);
1981
1982
1983 offset = ((ip.hdr - skb->data) / 2) << IAVF_TX_DESC_LENGTH_MACLEN_SHIFT;
1984
1985 if (skb->encapsulation) {
1986 u32 tunnel = 0;
1987
1988 if (*tx_flags & IAVF_TX_FLAGS_IPV4) {
1989 tunnel |= (*tx_flags & IAVF_TX_FLAGS_TSO) ?
1990 IAVF_TX_CTX_EXT_IP_IPV4 :
1991 IAVF_TX_CTX_EXT_IP_IPV4_NO_CSUM;
1992
1993 l4_proto = ip.v4->protocol;
1994 } else if (*tx_flags & IAVF_TX_FLAGS_IPV6) {
1995 tunnel |= IAVF_TX_CTX_EXT_IP_IPV6;
1996
1997 exthdr = ip.hdr + sizeof(*ip.v6);
1998 l4_proto = ip.v6->nexthdr;
1999 if (l4.hdr != exthdr)
2000 ipv6_skip_exthdr(skb, exthdr - skb->data,
2001 &l4_proto, &frag_off);
2002 }
2003
2004
2005 switch (l4_proto) {
2006 case IPPROTO_UDP:
2007 tunnel |= IAVF_TXD_CTX_UDP_TUNNELING;
2008 *tx_flags |= IAVF_TX_FLAGS_VXLAN_TUNNEL;
2009 break;
2010 case IPPROTO_GRE:
2011 tunnel |= IAVF_TXD_CTX_GRE_TUNNELING;
2012 *tx_flags |= IAVF_TX_FLAGS_VXLAN_TUNNEL;
2013 break;
2014 case IPPROTO_IPIP:
2015 case IPPROTO_IPV6:
2016 *tx_flags |= IAVF_TX_FLAGS_VXLAN_TUNNEL;
2017 l4.hdr = skb_inner_network_header(skb);
2018 break;
2019 default:
2020 if (*tx_flags & IAVF_TX_FLAGS_TSO)
2021 return -1;
2022
2023 skb_checksum_help(skb);
2024 return 0;
2025 }
2026
2027
2028 tunnel |= ((l4.hdr - ip.hdr) / 4) <<
2029 IAVF_TXD_CTX_QW0_EXT_IPLEN_SHIFT;
2030
2031
2032 ip.hdr = skb_inner_network_header(skb);
2033
2034
2035 tunnel |= ((ip.hdr - l4.hdr) / 2) <<
2036 IAVF_TXD_CTX_QW0_NATLEN_SHIFT;
2037
2038
2039 if ((*tx_flags & IAVF_TX_FLAGS_TSO) &&
2040 !(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) &&
2041 (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM))
2042 tunnel |= IAVF_TXD_CTX_QW0_L4T_CS_MASK;
2043
2044
2045 *cd_tunneling |= tunnel;
2046
2047
2048 l4.hdr = skb_inner_transport_header(skb);
2049 l4_proto = 0;
2050
2051
2052 *tx_flags &= ~(IAVF_TX_FLAGS_IPV4 | IAVF_TX_FLAGS_IPV6);
2053 if (ip.v4->version == 4)
2054 *tx_flags |= IAVF_TX_FLAGS_IPV4;
2055 if (ip.v6->version == 6)
2056 *tx_flags |= IAVF_TX_FLAGS_IPV6;
2057 }
2058
2059
2060 if (*tx_flags & IAVF_TX_FLAGS_IPV4) {
2061 l4_proto = ip.v4->protocol;
2062
2063
2064
2065 cmd |= (*tx_flags & IAVF_TX_FLAGS_TSO) ?
2066 IAVF_TX_DESC_CMD_IIPT_IPV4_CSUM :
2067 IAVF_TX_DESC_CMD_IIPT_IPV4;
2068 } else if (*tx_flags & IAVF_TX_FLAGS_IPV6) {
2069 cmd |= IAVF_TX_DESC_CMD_IIPT_IPV6;
2070
2071 exthdr = ip.hdr + sizeof(*ip.v6);
2072 l4_proto = ip.v6->nexthdr;
2073 if (l4.hdr != exthdr)
2074 ipv6_skip_exthdr(skb, exthdr - skb->data,
2075 &l4_proto, &frag_off);
2076 }
2077
2078
2079 offset |= ((l4.hdr - ip.hdr) / 4) << IAVF_TX_DESC_LENGTH_IPLEN_SHIFT;
2080
2081
2082 switch (l4_proto) {
2083 case IPPROTO_TCP:
2084
2085 cmd |= IAVF_TX_DESC_CMD_L4T_EOFT_TCP;
2086 offset |= l4.tcp->doff << IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
2087 break;
2088 case IPPROTO_SCTP:
2089
2090 cmd |= IAVF_TX_DESC_CMD_L4T_EOFT_SCTP;
2091 offset |= (sizeof(struct sctphdr) >> 2) <<
2092 IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
2093 break;
2094 case IPPROTO_UDP:
2095
2096 cmd |= IAVF_TX_DESC_CMD_L4T_EOFT_UDP;
2097 offset |= (sizeof(struct udphdr) >> 2) <<
2098 IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
2099 break;
2100 default:
2101 if (*tx_flags & IAVF_TX_FLAGS_TSO)
2102 return -1;
2103 skb_checksum_help(skb);
2104 return 0;
2105 }
2106
2107 *td_cmd |= cmd;
2108 *td_offset |= offset;
2109
2110 return 1;
2111}
2112
2113
2114
2115
2116
2117
2118
2119
2120static void iavf_create_tx_ctx(struct iavf_ring *tx_ring,
2121 const u64 cd_type_cmd_tso_mss,
2122 const u32 cd_tunneling, const u32 cd_l2tag2)
2123{
2124 struct iavf_tx_context_desc *context_desc;
2125 int i = tx_ring->next_to_use;
2126
2127 if ((cd_type_cmd_tso_mss == IAVF_TX_DESC_DTYPE_CONTEXT) &&
2128 !cd_tunneling && !cd_l2tag2)
2129 return;
2130
2131
2132 context_desc = IAVF_TX_CTXTDESC(tx_ring, i);
2133
2134 i++;
2135 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
2136
2137
2138 context_desc->tunneling_params = cpu_to_le32(cd_tunneling);
2139 context_desc->l2tag2 = cpu_to_le16(cd_l2tag2);
2140 context_desc->rsvd = cpu_to_le16(0);
2141 context_desc->type_cmd_tso_mss = cpu_to_le64(cd_type_cmd_tso_mss);
2142}
2143
2144
2145
2146
2147
2148
2149
2150
2151
2152
2153
2154
2155
2156
2157bool __iavf_chk_linearize(struct sk_buff *skb)
2158{
2159 const skb_frag_t *frag, *stale;
2160 int nr_frags, sum;
2161
2162
2163 nr_frags = skb_shinfo(skb)->nr_frags;
2164 if (nr_frags < (IAVF_MAX_BUFFER_TXD - 1))
2165 return false;
2166
2167
2168
2169
2170 nr_frags -= IAVF_MAX_BUFFER_TXD - 2;
2171 frag = &skb_shinfo(skb)->frags[0];
2172
2173
2174
2175
2176
2177
2178
2179 sum = 1 - skb_shinfo(skb)->gso_size;
2180
2181
2182 sum += skb_frag_size(frag++);
2183 sum += skb_frag_size(frag++);
2184 sum += skb_frag_size(frag++);
2185 sum += skb_frag_size(frag++);
2186 sum += skb_frag_size(frag++);
2187
2188
2189
2190
2191 for (stale = &skb_shinfo(skb)->frags[0];; stale++) {
2192 int stale_size = skb_frag_size(stale);
2193
2194 sum += skb_frag_size(frag++);
2195
2196
2197
2198
2199
2200
2201
2202 if (stale_size > IAVF_MAX_DATA_PER_TXD) {
2203 int align_pad = -(skb_frag_off(stale)) &
2204 (IAVF_MAX_READ_REQ_SIZE - 1);
2205
2206 sum -= align_pad;
2207 stale_size -= align_pad;
2208
2209 do {
2210 sum -= IAVF_MAX_DATA_PER_TXD_ALIGNED;
2211 stale_size -= IAVF_MAX_DATA_PER_TXD_ALIGNED;
2212 } while (stale_size > IAVF_MAX_DATA_PER_TXD);
2213 }
2214
2215
2216 if (sum < 0)
2217 return true;
2218
2219 if (!nr_frags--)
2220 break;
2221
2222 sum -= stale_size;
2223 }
2224
2225 return false;
2226}
2227
2228
2229
2230
2231
2232
2233
2234
2235int __iavf_maybe_stop_tx(struct iavf_ring *tx_ring, int size)
2236{
2237 netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
2238
2239 smp_mb();
2240
2241
2242 if (likely(IAVF_DESC_UNUSED(tx_ring) < size))
2243 return -EBUSY;
2244
2245
2246 netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index);
2247 ++tx_ring->tx_stats.restart_queue;
2248 return 0;
2249}
2250
2251
2252
2253
2254
2255
2256
2257
2258
2259
2260
2261static inline void iavf_tx_map(struct iavf_ring *tx_ring, struct sk_buff *skb,
2262 struct iavf_tx_buffer *first, u32 tx_flags,
2263 const u8 hdr_len, u32 td_cmd, u32 td_offset)
2264{
2265 unsigned int data_len = skb->data_len;
2266 unsigned int size = skb_headlen(skb);
2267 skb_frag_t *frag;
2268 struct iavf_tx_buffer *tx_bi;
2269 struct iavf_tx_desc *tx_desc;
2270 u16 i = tx_ring->next_to_use;
2271 u32 td_tag = 0;
2272 dma_addr_t dma;
2273
2274 if (tx_flags & IAVF_TX_FLAGS_HW_VLAN) {
2275 td_cmd |= IAVF_TX_DESC_CMD_IL2TAG1;
2276 td_tag = (tx_flags & IAVF_TX_FLAGS_VLAN_MASK) >>
2277 IAVF_TX_FLAGS_VLAN_SHIFT;
2278 }
2279
2280 first->tx_flags = tx_flags;
2281
2282 dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
2283
2284 tx_desc = IAVF_TX_DESC(tx_ring, i);
2285 tx_bi = first;
2286
2287 for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
2288 unsigned int max_data = IAVF_MAX_DATA_PER_TXD_ALIGNED;
2289
2290 if (dma_mapping_error(tx_ring->dev, dma))
2291 goto dma_error;
2292
2293
2294 dma_unmap_len_set(tx_bi, len, size);
2295 dma_unmap_addr_set(tx_bi, dma, dma);
2296
2297
2298 max_data += -dma & (IAVF_MAX_READ_REQ_SIZE - 1);
2299 tx_desc->buffer_addr = cpu_to_le64(dma);
2300
2301 while (unlikely(size > IAVF_MAX_DATA_PER_TXD)) {
2302 tx_desc->cmd_type_offset_bsz =
2303 build_ctob(td_cmd, td_offset,
2304 max_data, td_tag);
2305
2306 tx_desc++;
2307 i++;
2308
2309 if (i == tx_ring->count) {
2310 tx_desc = IAVF_TX_DESC(tx_ring, 0);
2311 i = 0;
2312 }
2313
2314 dma += max_data;
2315 size -= max_data;
2316
2317 max_data = IAVF_MAX_DATA_PER_TXD_ALIGNED;
2318 tx_desc->buffer_addr = cpu_to_le64(dma);
2319 }
2320
2321 if (likely(!data_len))
2322 break;
2323
2324 tx_desc->cmd_type_offset_bsz = build_ctob(td_cmd, td_offset,
2325 size, td_tag);
2326
2327 tx_desc++;
2328 i++;
2329
2330 if (i == tx_ring->count) {
2331 tx_desc = IAVF_TX_DESC(tx_ring, 0);
2332 i = 0;
2333 }
2334
2335 size = skb_frag_size(frag);
2336 data_len -= size;
2337
2338 dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size,
2339 DMA_TO_DEVICE);
2340
2341 tx_bi = &tx_ring->tx_bi[i];
2342 }
2343
2344 netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount);
2345
2346 i++;
2347 if (i == tx_ring->count)
2348 i = 0;
2349
2350 tx_ring->next_to_use = i;
2351
2352 iavf_maybe_stop_tx(tx_ring, DESC_NEEDED);
2353
2354
2355 td_cmd |= IAVF_TXD_CMD;
2356 tx_desc->cmd_type_offset_bsz =
2357 build_ctob(td_cmd, td_offset, size, td_tag);
2358
2359 skb_tx_timestamp(skb);
2360
2361
2362
2363
2364
2365
2366
2367 wmb();
2368
2369
2370 first->next_to_watch = tx_desc;
2371
2372
2373 if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) {
2374 writel(i, tx_ring->tail);
2375 }
2376
2377 return;
2378
2379dma_error:
2380 dev_info(tx_ring->dev, "TX DMA map failed\n");
2381
2382
2383 for (;;) {
2384 tx_bi = &tx_ring->tx_bi[i];
2385 iavf_unmap_and_free_tx_resource(tx_ring, tx_bi);
2386 if (tx_bi == first)
2387 break;
2388 if (i == 0)
2389 i = tx_ring->count;
2390 i--;
2391 }
2392
2393 tx_ring->next_to_use = i;
2394}
2395
2396
2397
2398
2399
2400
2401
2402
2403static netdev_tx_t iavf_xmit_frame_ring(struct sk_buff *skb,
2404 struct iavf_ring *tx_ring)
2405{
2406 u64 cd_type_cmd_tso_mss = IAVF_TX_DESC_DTYPE_CONTEXT;
2407 u32 cd_tunneling = 0, cd_l2tag2 = 0;
2408 struct iavf_tx_buffer *first;
2409 u32 td_offset = 0;
2410 u32 tx_flags = 0;
2411 __be16 protocol;
2412 u32 td_cmd = 0;
2413 u8 hdr_len = 0;
2414 int tso, count;
2415
2416
2417 prefetch(skb->data);
2418
2419 iavf_trace(xmit_frame_ring, skb, tx_ring);
2420
2421 count = iavf_xmit_descriptor_count(skb);
2422 if (iavf_chk_linearize(skb, count)) {
2423 if (__skb_linearize(skb)) {
2424 dev_kfree_skb_any(skb);
2425 return NETDEV_TX_OK;
2426 }
2427 count = iavf_txd_use_count(skb->len);
2428 tx_ring->tx_stats.tx_linearize++;
2429 }
2430
2431
2432
2433
2434
2435
2436
2437 if (iavf_maybe_stop_tx(tx_ring, count + 4 + 1)) {
2438 tx_ring->tx_stats.tx_busy++;
2439 return NETDEV_TX_BUSY;
2440 }
2441
2442
2443 first = &tx_ring->tx_bi[tx_ring->next_to_use];
2444 first->skb = skb;
2445 first->bytecount = skb->len;
2446 first->gso_segs = 1;
2447
2448
2449 if (iavf_tx_prepare_vlan_flags(skb, tx_ring, &tx_flags))
2450 goto out_drop;
2451
2452
2453 protocol = vlan_get_protocol(skb);
2454
2455
2456 if (protocol == htons(ETH_P_IP))
2457 tx_flags |= IAVF_TX_FLAGS_IPV4;
2458 else if (protocol == htons(ETH_P_IPV6))
2459 tx_flags |= IAVF_TX_FLAGS_IPV6;
2460
2461 tso = iavf_tso(first, &hdr_len, &cd_type_cmd_tso_mss);
2462
2463 if (tso < 0)
2464 goto out_drop;
2465 else if (tso)
2466 tx_flags |= IAVF_TX_FLAGS_TSO;
2467
2468
2469 tso = iavf_tx_enable_csum(skb, &tx_flags, &td_cmd, &td_offset,
2470 tx_ring, &cd_tunneling);
2471 if (tso < 0)
2472 goto out_drop;
2473
2474
2475 td_cmd |= IAVF_TX_DESC_CMD_ICRC;
2476
2477 iavf_create_tx_ctx(tx_ring, cd_type_cmd_tso_mss,
2478 cd_tunneling, cd_l2tag2);
2479
2480 iavf_tx_map(tx_ring, skb, first, tx_flags, hdr_len,
2481 td_cmd, td_offset);
2482
2483 return NETDEV_TX_OK;
2484
2485out_drop:
2486 iavf_trace(xmit_frame_ring_drop, first->skb, tx_ring);
2487 dev_kfree_skb_any(first->skb);
2488 first->skb = NULL;
2489 return NETDEV_TX_OK;
2490}
2491
2492
2493
2494
2495
2496
2497
2498
2499netdev_tx_t iavf_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
2500{
2501 struct iavf_adapter *adapter = netdev_priv(netdev);
2502 struct iavf_ring *tx_ring = &adapter->tx_rings[skb->queue_mapping];
2503
2504
2505
2506
2507 if (unlikely(skb->len < IAVF_MIN_TX_LEN)) {
2508 if (skb_pad(skb, IAVF_MIN_TX_LEN - skb->len))
2509 return NETDEV_TX_OK;
2510 skb->len = IAVF_MIN_TX_LEN;
2511 skb_set_tail_pointer(skb, IAVF_MIN_TX_LEN);
2512 }
2513
2514 return iavf_xmit_frame_ring(skb, tx_ring);
2515}
2516