1
2
3
4#include <linux/prefetch.h>
5
6#include "iavf.h"
7#include "iavf_trace.h"
8#include "iavf_prototype.h"
9
10static inline __le64 build_ctob(u32 td_cmd, u32 td_offset, unsigned int size,
11 u32 td_tag)
12{
13 return cpu_to_le64(IAVF_TX_DESC_DTYPE_DATA |
14 ((u64)td_cmd << IAVF_TXD_QW1_CMD_SHIFT) |
15 ((u64)td_offset << IAVF_TXD_QW1_OFFSET_SHIFT) |
16 ((u64)size << IAVF_TXD_QW1_TX_BUF_SZ_SHIFT) |
17 ((u64)td_tag << IAVF_TXD_QW1_L2TAG1_SHIFT));
18}
19
20#define IAVF_TXD_CMD (IAVF_TX_DESC_CMD_EOP | IAVF_TX_DESC_CMD_RS)
21
22
23
24
25
26
27static void iavf_unmap_and_free_tx_resource(struct iavf_ring *ring,
28 struct iavf_tx_buffer *tx_buffer)
29{
30 if (tx_buffer->skb) {
31 if (tx_buffer->tx_flags & IAVF_TX_FLAGS_FD_SB)
32 kfree(tx_buffer->raw_buf);
33 else
34 dev_kfree_skb_any(tx_buffer->skb);
35 if (dma_unmap_len(tx_buffer, len))
36 dma_unmap_single(ring->dev,
37 dma_unmap_addr(tx_buffer, dma),
38 dma_unmap_len(tx_buffer, len),
39 DMA_TO_DEVICE);
40 } else if (dma_unmap_len(tx_buffer, len)) {
41 dma_unmap_page(ring->dev,
42 dma_unmap_addr(tx_buffer, dma),
43 dma_unmap_len(tx_buffer, len),
44 DMA_TO_DEVICE);
45 }
46
47 tx_buffer->next_to_watch = NULL;
48 tx_buffer->skb = NULL;
49 dma_unmap_len_set(tx_buffer, len, 0);
50
51}
52
53
54
55
56
57void iavf_clean_tx_ring(struct iavf_ring *tx_ring)
58{
59 unsigned long bi_size;
60 u16 i;
61
62
63 if (!tx_ring->tx_bi)
64 return;
65
66
67 for (i = 0; i < tx_ring->count; i++)
68 iavf_unmap_and_free_tx_resource(tx_ring, &tx_ring->tx_bi[i]);
69
70 bi_size = sizeof(struct iavf_tx_buffer) * tx_ring->count;
71 memset(tx_ring->tx_bi, 0, bi_size);
72
73
74 memset(tx_ring->desc, 0, tx_ring->size);
75
76 tx_ring->next_to_use = 0;
77 tx_ring->next_to_clean = 0;
78
79 if (!tx_ring->netdev)
80 return;
81
82
83 netdev_tx_reset_queue(txring_txq(tx_ring));
84}
85
86
87
88
89
90
91
92void iavf_free_tx_resources(struct iavf_ring *tx_ring)
93{
94 iavf_clean_tx_ring(tx_ring);
95 kfree(tx_ring->tx_bi);
96 tx_ring->tx_bi = NULL;
97
98 if (tx_ring->desc) {
99 dma_free_coherent(tx_ring->dev, tx_ring->size,
100 tx_ring->desc, tx_ring->dma);
101 tx_ring->desc = NULL;
102 }
103}
104
105
106
107
108
109
110
111
112
113u32 iavf_get_tx_pending(struct iavf_ring *ring, bool in_sw)
114{
115 u32 head, tail;
116
117 head = ring->next_to_clean;
118 tail = readl(ring->tail);
119
120 if (head != tail)
121 return (head < tail) ?
122 tail - head : (tail + ring->count - head);
123
124 return 0;
125}
126
127
128
129
130
131
132
133
134void iavf_detect_recover_hung(struct iavf_vsi *vsi)
135{
136 struct iavf_ring *tx_ring = NULL;
137 struct net_device *netdev;
138 unsigned int i;
139 int packets;
140
141 if (!vsi)
142 return;
143
144 if (test_bit(__IAVF_VSI_DOWN, vsi->state))
145 return;
146
147 netdev = vsi->netdev;
148 if (!netdev)
149 return;
150
151 if (!netif_carrier_ok(netdev))
152 return;
153
154 for (i = 0; i < vsi->back->num_active_queues; i++) {
155 tx_ring = &vsi->back->tx_rings[i];
156 if (tx_ring && tx_ring->desc) {
157
158
159
160
161
162
163
164 packets = tx_ring->stats.packets & INT_MAX;
165 if (tx_ring->tx_stats.prev_pkt_ctr == packets) {
166 iavf_force_wb(vsi, tx_ring->q_vector);
167 continue;
168 }
169
170
171
172
173 smp_rmb();
174 tx_ring->tx_stats.prev_pkt_ctr =
175 iavf_get_tx_pending(tx_ring, true) ? packets : -1;
176 }
177 }
178}
179
180#define WB_STRIDE 4
181
182
183
184
185
186
187
188
189
190static bool iavf_clean_tx_irq(struct iavf_vsi *vsi,
191 struct iavf_ring *tx_ring, int napi_budget)
192{
193 int i = tx_ring->next_to_clean;
194 struct iavf_tx_buffer *tx_buf;
195 struct iavf_tx_desc *tx_desc;
196 unsigned int total_bytes = 0, total_packets = 0;
197 unsigned int budget = vsi->work_limit;
198
199 tx_buf = &tx_ring->tx_bi[i];
200 tx_desc = IAVF_TX_DESC(tx_ring, i);
201 i -= tx_ring->count;
202
203 do {
204 struct iavf_tx_desc *eop_desc = tx_buf->next_to_watch;
205
206
207 if (!eop_desc)
208 break;
209
210
211 smp_rmb();
212
213 iavf_trace(clean_tx_irq, tx_ring, tx_desc, tx_buf);
214
215 if (!(eop_desc->cmd_type_offset_bsz &
216 cpu_to_le64(IAVF_TX_DESC_DTYPE_DESC_DONE)))
217 break;
218
219
220 tx_buf->next_to_watch = NULL;
221
222
223 total_bytes += tx_buf->bytecount;
224 total_packets += tx_buf->gso_segs;
225
226
227 napi_consume_skb(tx_buf->skb, napi_budget);
228
229
230 dma_unmap_single(tx_ring->dev,
231 dma_unmap_addr(tx_buf, dma),
232 dma_unmap_len(tx_buf, len),
233 DMA_TO_DEVICE);
234
235
236 tx_buf->skb = NULL;
237 dma_unmap_len_set(tx_buf, len, 0);
238
239
240 while (tx_desc != eop_desc) {
241 iavf_trace(clean_tx_irq_unmap,
242 tx_ring, tx_desc, tx_buf);
243
244 tx_buf++;
245 tx_desc++;
246 i++;
247 if (unlikely(!i)) {
248 i -= tx_ring->count;
249 tx_buf = tx_ring->tx_bi;
250 tx_desc = IAVF_TX_DESC(tx_ring, 0);
251 }
252
253
254 if (dma_unmap_len(tx_buf, len)) {
255 dma_unmap_page(tx_ring->dev,
256 dma_unmap_addr(tx_buf, dma),
257 dma_unmap_len(tx_buf, len),
258 DMA_TO_DEVICE);
259 dma_unmap_len_set(tx_buf, len, 0);
260 }
261 }
262
263
264 tx_buf++;
265 tx_desc++;
266 i++;
267 if (unlikely(!i)) {
268 i -= tx_ring->count;
269 tx_buf = tx_ring->tx_bi;
270 tx_desc = IAVF_TX_DESC(tx_ring, 0);
271 }
272
273 prefetch(tx_desc);
274
275
276 budget--;
277 } while (likely(budget));
278
279 i += tx_ring->count;
280 tx_ring->next_to_clean = i;
281 u64_stats_update_begin(&tx_ring->syncp);
282 tx_ring->stats.bytes += total_bytes;
283 tx_ring->stats.packets += total_packets;
284 u64_stats_update_end(&tx_ring->syncp);
285 tx_ring->q_vector->tx.total_bytes += total_bytes;
286 tx_ring->q_vector->tx.total_packets += total_packets;
287
288 if (tx_ring->flags & IAVF_TXR_FLAGS_WB_ON_ITR) {
289
290
291
292
293
294 unsigned int j = iavf_get_tx_pending(tx_ring, false);
295
296 if (budget &&
297 ((j / WB_STRIDE) == 0) && (j > 0) &&
298 !test_bit(__IAVF_VSI_DOWN, vsi->state) &&
299 (IAVF_DESC_UNUSED(tx_ring) != tx_ring->count))
300 tx_ring->arm_wb = true;
301 }
302
303
304 netdev_tx_completed_queue(txring_txq(tx_ring),
305 total_packets, total_bytes);
306
307#define TX_WAKE_THRESHOLD ((s16)(DESC_NEEDED * 2))
308 if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) &&
309 (IAVF_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) {
310
311
312
313 smp_mb();
314 if (__netif_subqueue_stopped(tx_ring->netdev,
315 tx_ring->queue_index) &&
316 !test_bit(__IAVF_VSI_DOWN, vsi->state)) {
317 netif_wake_subqueue(tx_ring->netdev,
318 tx_ring->queue_index);
319 ++tx_ring->tx_stats.restart_queue;
320 }
321 }
322
323 return !!budget;
324}
325
326
327
328
329
330
331
332static void iavf_enable_wb_on_itr(struct iavf_vsi *vsi,
333 struct iavf_q_vector *q_vector)
334{
335 u16 flags = q_vector->tx.ring[0].flags;
336 u32 val;
337
338 if (!(flags & IAVF_TXR_FLAGS_WB_ON_ITR))
339 return;
340
341 if (q_vector->arm_wb_state)
342 return;
343
344 val = IAVF_VFINT_DYN_CTLN1_WB_ON_ITR_MASK |
345 IAVF_VFINT_DYN_CTLN1_ITR_INDX_MASK;
346
347 wr32(&vsi->back->hw,
348 IAVF_VFINT_DYN_CTLN1(q_vector->reg_idx), val);
349 q_vector->arm_wb_state = true;
350}
351
352
353
354
355
356
357
358void iavf_force_wb(struct iavf_vsi *vsi, struct iavf_q_vector *q_vector)
359{
360 u32 val = IAVF_VFINT_DYN_CTLN1_INTENA_MASK |
361 IAVF_VFINT_DYN_CTLN1_ITR_INDX_MASK |
362 IAVF_VFINT_DYN_CTLN1_SWINT_TRIG_MASK |
363 IAVF_VFINT_DYN_CTLN1_SW_ITR_INDX_ENA_MASK
364 ;
365
366 wr32(&vsi->back->hw,
367 IAVF_VFINT_DYN_CTLN1(q_vector->reg_idx),
368 val);
369}
370
371static inline bool iavf_container_is_rx(struct iavf_q_vector *q_vector,
372 struct iavf_ring_container *rc)
373{
374 return &q_vector->rx == rc;
375}
376
377static inline unsigned int iavf_itr_divisor(struct iavf_q_vector *q_vector)
378{
379 unsigned int divisor;
380
381 switch (q_vector->adapter->link_speed) {
382 case VIRTCHNL_LINK_SPEED_40GB:
383 divisor = IAVF_ITR_ADAPTIVE_MIN_INC * 1024;
384 break;
385 case VIRTCHNL_LINK_SPEED_25GB:
386 case VIRTCHNL_LINK_SPEED_20GB:
387 divisor = IAVF_ITR_ADAPTIVE_MIN_INC * 512;
388 break;
389 default:
390 case VIRTCHNL_LINK_SPEED_10GB:
391 divisor = IAVF_ITR_ADAPTIVE_MIN_INC * 256;
392 break;
393 case VIRTCHNL_LINK_SPEED_1GB:
394 case VIRTCHNL_LINK_SPEED_100MB:
395 divisor = IAVF_ITR_ADAPTIVE_MIN_INC * 32;
396 break;
397 }
398
399 return divisor;
400}
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415static void iavf_update_itr(struct iavf_q_vector *q_vector,
416 struct iavf_ring_container *rc)
417{
418 unsigned int avg_wire_size, packets, bytes, itr;
419 unsigned long next_update = jiffies;
420
421
422
423
424 if (!rc->ring || !ITR_IS_DYNAMIC(rc->ring->itr_setting))
425 return;
426
427
428
429
430 itr = iavf_container_is_rx(q_vector, rc) ?
431 IAVF_ITR_ADAPTIVE_MIN_USECS | IAVF_ITR_ADAPTIVE_LATENCY :
432 IAVF_ITR_ADAPTIVE_MAX_USECS | IAVF_ITR_ADAPTIVE_LATENCY;
433
434
435
436
437
438
439 if (time_after(next_update, rc->next_update))
440 goto clear_counts;
441
442
443
444
445
446
447
448 if (q_vector->itr_countdown) {
449 itr = rc->target_itr;
450 goto clear_counts;
451 }
452
453 packets = rc->total_packets;
454 bytes = rc->total_bytes;
455
456 if (iavf_container_is_rx(q_vector, rc)) {
457
458
459
460
461
462 if (packets && packets < 4 && bytes < 9000 &&
463 (q_vector->tx.target_itr & IAVF_ITR_ADAPTIVE_LATENCY)) {
464 itr = IAVF_ITR_ADAPTIVE_LATENCY;
465 goto adjust_by_size;
466 }
467 } else if (packets < 4) {
468
469
470
471
472
473 if (rc->target_itr == IAVF_ITR_ADAPTIVE_MAX_USECS &&
474 (q_vector->rx.target_itr & IAVF_ITR_MASK) ==
475 IAVF_ITR_ADAPTIVE_MAX_USECS)
476 goto clear_counts;
477 } else if (packets > 32) {
478
479
480
481 rc->target_itr &= ~IAVF_ITR_ADAPTIVE_LATENCY;
482 }
483
484
485
486
487
488
489
490
491
492 if (packets < 56) {
493 itr = rc->target_itr + IAVF_ITR_ADAPTIVE_MIN_INC;
494 if ((itr & IAVF_ITR_MASK) > IAVF_ITR_ADAPTIVE_MAX_USECS) {
495 itr &= IAVF_ITR_ADAPTIVE_LATENCY;
496 itr += IAVF_ITR_ADAPTIVE_MAX_USECS;
497 }
498 goto clear_counts;
499 }
500
501 if (packets <= 256) {
502 itr = min(q_vector->tx.current_itr, q_vector->rx.current_itr);
503 itr &= IAVF_ITR_MASK;
504
505
506
507
508
509 if (packets <= 112)
510 goto clear_counts;
511
512
513
514
515
516
517 itr /= 2;
518 itr &= IAVF_ITR_MASK;
519 if (itr < IAVF_ITR_ADAPTIVE_MIN_USECS)
520 itr = IAVF_ITR_ADAPTIVE_MIN_USECS;
521
522 goto clear_counts;
523 }
524
525
526
527
528
529
530
531 itr = IAVF_ITR_ADAPTIVE_BULK;
532
533adjust_by_size:
534
535
536
537
538
539 avg_wire_size = bytes / packets;
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556 if (avg_wire_size <= 60) {
557
558 avg_wire_size = 4096;
559 } else if (avg_wire_size <= 380) {
560
561 avg_wire_size *= 40;
562 avg_wire_size += 1696;
563 } else if (avg_wire_size <= 1084) {
564
565 avg_wire_size *= 15;
566 avg_wire_size += 11452;
567 } else if (avg_wire_size <= 1980) {
568
569 avg_wire_size *= 5;
570 avg_wire_size += 22420;
571 } else {
572
573 avg_wire_size = 32256;
574 }
575
576
577
578
579 if (itr & IAVF_ITR_ADAPTIVE_LATENCY)
580 avg_wire_size /= 2;
581
582
583
584
585
586
587
588
589 itr += DIV_ROUND_UP(avg_wire_size, iavf_itr_divisor(q_vector)) *
590 IAVF_ITR_ADAPTIVE_MIN_INC;
591
592 if ((itr & IAVF_ITR_MASK) > IAVF_ITR_ADAPTIVE_MAX_USECS) {
593 itr &= IAVF_ITR_ADAPTIVE_LATENCY;
594 itr += IAVF_ITR_ADAPTIVE_MAX_USECS;
595 }
596
597clear_counts:
598
599 rc->target_itr = itr;
600
601
602 rc->next_update = next_update + 1;
603
604 rc->total_bytes = 0;
605 rc->total_packets = 0;
606}
607
608
609
610
611
612
613
614int iavf_setup_tx_descriptors(struct iavf_ring *tx_ring)
615{
616 struct device *dev = tx_ring->dev;
617 int bi_size;
618
619 if (!dev)
620 return -ENOMEM;
621
622
623 WARN_ON(tx_ring->tx_bi);
624 bi_size = sizeof(struct iavf_tx_buffer) * tx_ring->count;
625 tx_ring->tx_bi = kzalloc(bi_size, GFP_KERNEL);
626 if (!tx_ring->tx_bi)
627 goto err;
628
629
630 tx_ring->size = tx_ring->count * sizeof(struct iavf_tx_desc);
631 tx_ring->size = ALIGN(tx_ring->size, 4096);
632 tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size,
633 &tx_ring->dma, GFP_KERNEL);
634 if (!tx_ring->desc) {
635 dev_info(dev, "Unable to allocate memory for the Tx descriptor ring, size=%d\n",
636 tx_ring->size);
637 goto err;
638 }
639
640 tx_ring->next_to_use = 0;
641 tx_ring->next_to_clean = 0;
642 tx_ring->tx_stats.prev_pkt_ctr = -1;
643 return 0;
644
645err:
646 kfree(tx_ring->tx_bi);
647 tx_ring->tx_bi = NULL;
648 return -ENOMEM;
649}
650
651
652
653
654
655void iavf_clean_rx_ring(struct iavf_ring *rx_ring)
656{
657 unsigned long bi_size;
658 u16 i;
659
660
661 if (!rx_ring->rx_bi)
662 return;
663
664 if (rx_ring->skb) {
665 dev_kfree_skb(rx_ring->skb);
666 rx_ring->skb = NULL;
667 }
668
669
670 for (i = 0; i < rx_ring->count; i++) {
671 struct iavf_rx_buffer *rx_bi = &rx_ring->rx_bi[i];
672
673 if (!rx_bi->page)
674 continue;
675
676
677
678
679 dma_sync_single_range_for_cpu(rx_ring->dev,
680 rx_bi->dma,
681 rx_bi->page_offset,
682 rx_ring->rx_buf_len,
683 DMA_FROM_DEVICE);
684
685
686 dma_unmap_page_attrs(rx_ring->dev, rx_bi->dma,
687 iavf_rx_pg_size(rx_ring),
688 DMA_FROM_DEVICE,
689 IAVF_RX_DMA_ATTR);
690
691 __page_frag_cache_drain(rx_bi->page, rx_bi->pagecnt_bias);
692
693 rx_bi->page = NULL;
694 rx_bi->page_offset = 0;
695 }
696
697 bi_size = sizeof(struct iavf_rx_buffer) * rx_ring->count;
698 memset(rx_ring->rx_bi, 0, bi_size);
699
700
701 memset(rx_ring->desc, 0, rx_ring->size);
702
703 rx_ring->next_to_alloc = 0;
704 rx_ring->next_to_clean = 0;
705 rx_ring->next_to_use = 0;
706}
707
708
709
710
711
712
713
714void iavf_free_rx_resources(struct iavf_ring *rx_ring)
715{
716 iavf_clean_rx_ring(rx_ring);
717 kfree(rx_ring->rx_bi);
718 rx_ring->rx_bi = NULL;
719
720 if (rx_ring->desc) {
721 dma_free_coherent(rx_ring->dev, rx_ring->size,
722 rx_ring->desc, rx_ring->dma);
723 rx_ring->desc = NULL;
724 }
725}
726
727
728
729
730
731
732
733int iavf_setup_rx_descriptors(struct iavf_ring *rx_ring)
734{
735 struct device *dev = rx_ring->dev;
736 int bi_size;
737
738
739 WARN_ON(rx_ring->rx_bi);
740 bi_size = sizeof(struct iavf_rx_buffer) * rx_ring->count;
741 rx_ring->rx_bi = kzalloc(bi_size, GFP_KERNEL);
742 if (!rx_ring->rx_bi)
743 goto err;
744
745 u64_stats_init(&rx_ring->syncp);
746
747
748 rx_ring->size = rx_ring->count * sizeof(union iavf_32byte_rx_desc);
749 rx_ring->size = ALIGN(rx_ring->size, 4096);
750 rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size,
751 &rx_ring->dma, GFP_KERNEL);
752
753 if (!rx_ring->desc) {
754 dev_info(dev, "Unable to allocate memory for the Rx descriptor ring, size=%d\n",
755 rx_ring->size);
756 goto err;
757 }
758
759 rx_ring->next_to_alloc = 0;
760 rx_ring->next_to_clean = 0;
761 rx_ring->next_to_use = 0;
762
763 return 0;
764err:
765 kfree(rx_ring->rx_bi);
766 rx_ring->rx_bi = NULL;
767 return -ENOMEM;
768}
769
770
771
772
773
774
775static inline void iavf_release_rx_desc(struct iavf_ring *rx_ring, u32 val)
776{
777 rx_ring->next_to_use = val;
778
779
780 rx_ring->next_to_alloc = val;
781
782
783
784
785
786
787 wmb();
788 writel(val, rx_ring->tail);
789}
790
791
792
793
794
795
796
797static inline unsigned int iavf_rx_offset(struct iavf_ring *rx_ring)
798{
799 return ring_uses_build_skb(rx_ring) ? IAVF_SKB_PAD : 0;
800}
801
802
803
804
805
806
807
808
809
810static bool iavf_alloc_mapped_page(struct iavf_ring *rx_ring,
811 struct iavf_rx_buffer *bi)
812{
813 struct page *page = bi->page;
814 dma_addr_t dma;
815
816
817 if (likely(page)) {
818 rx_ring->rx_stats.page_reuse_count++;
819 return true;
820 }
821
822
823 page = dev_alloc_pages(iavf_rx_pg_order(rx_ring));
824 if (unlikely(!page)) {
825 rx_ring->rx_stats.alloc_page_failed++;
826 return false;
827 }
828
829
830 dma = dma_map_page_attrs(rx_ring->dev, page, 0,
831 iavf_rx_pg_size(rx_ring),
832 DMA_FROM_DEVICE,
833 IAVF_RX_DMA_ATTR);
834
835
836
837
838 if (dma_mapping_error(rx_ring->dev, dma)) {
839 __free_pages(page, iavf_rx_pg_order(rx_ring));
840 rx_ring->rx_stats.alloc_page_failed++;
841 return false;
842 }
843
844 bi->dma = dma;
845 bi->page = page;
846 bi->page_offset = iavf_rx_offset(rx_ring);
847
848
849 bi->pagecnt_bias = 1;
850
851 return true;
852}
853
854
855
856
857
858
859
860static void iavf_receive_skb(struct iavf_ring *rx_ring,
861 struct sk_buff *skb, u16 vlan_tag)
862{
863 struct iavf_q_vector *q_vector = rx_ring->q_vector;
864
865 if ((rx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
866 (vlan_tag & VLAN_VID_MASK))
867 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
868
869 napi_gro_receive(&q_vector->napi, skb);
870}
871
872
873
874
875
876
877
878
879bool iavf_alloc_rx_buffers(struct iavf_ring *rx_ring, u16 cleaned_count)
880{
881 u16 ntu = rx_ring->next_to_use;
882 union iavf_rx_desc *rx_desc;
883 struct iavf_rx_buffer *bi;
884
885
886 if (!rx_ring->netdev || !cleaned_count)
887 return false;
888
889 rx_desc = IAVF_RX_DESC(rx_ring, ntu);
890 bi = &rx_ring->rx_bi[ntu];
891
892 do {
893 if (!iavf_alloc_mapped_page(rx_ring, bi))
894 goto no_buffers;
895
896
897 dma_sync_single_range_for_device(rx_ring->dev, bi->dma,
898 bi->page_offset,
899 rx_ring->rx_buf_len,
900 DMA_FROM_DEVICE);
901
902
903
904
905 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset);
906
907 rx_desc++;
908 bi++;
909 ntu++;
910 if (unlikely(ntu == rx_ring->count)) {
911 rx_desc = IAVF_RX_DESC(rx_ring, 0);
912 bi = rx_ring->rx_bi;
913 ntu = 0;
914 }
915
916
917 rx_desc->wb.qword1.status_error_len = 0;
918
919 cleaned_count--;
920 } while (cleaned_count);
921
922 if (rx_ring->next_to_use != ntu)
923 iavf_release_rx_desc(rx_ring, ntu);
924
925 return false;
926
927no_buffers:
928 if (rx_ring->next_to_use != ntu)
929 iavf_release_rx_desc(rx_ring, ntu);
930
931
932
933
934 return true;
935}
936
937
938
939
940
941
942
943static inline void iavf_rx_checksum(struct iavf_vsi *vsi,
944 struct sk_buff *skb,
945 union iavf_rx_desc *rx_desc)
946{
947 struct iavf_rx_ptype_decoded decoded;
948 u32 rx_error, rx_status;
949 bool ipv4, ipv6;
950 u8 ptype;
951 u64 qword;
952
953 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
954 ptype = (qword & IAVF_RXD_QW1_PTYPE_MASK) >> IAVF_RXD_QW1_PTYPE_SHIFT;
955 rx_error = (qword & IAVF_RXD_QW1_ERROR_MASK) >>
956 IAVF_RXD_QW1_ERROR_SHIFT;
957 rx_status = (qword & IAVF_RXD_QW1_STATUS_MASK) >>
958 IAVF_RXD_QW1_STATUS_SHIFT;
959 decoded = decode_rx_desc_ptype(ptype);
960
961 skb->ip_summed = CHECKSUM_NONE;
962
963 skb_checksum_none_assert(skb);
964
965
966 if (!(vsi->netdev->features & NETIF_F_RXCSUM))
967 return;
968
969
970 if (!(rx_status & BIT(IAVF_RX_DESC_STATUS_L3L4P_SHIFT)))
971 return;
972
973
974 if (!(decoded.known && decoded.outer_ip))
975 return;
976
977 ipv4 = (decoded.outer_ip == IAVF_RX_PTYPE_OUTER_IP) &&
978 (decoded.outer_ip_ver == IAVF_RX_PTYPE_OUTER_IPV4);
979 ipv6 = (decoded.outer_ip == IAVF_RX_PTYPE_OUTER_IP) &&
980 (decoded.outer_ip_ver == IAVF_RX_PTYPE_OUTER_IPV6);
981
982 if (ipv4 &&
983 (rx_error & (BIT(IAVF_RX_DESC_ERROR_IPE_SHIFT) |
984 BIT(IAVF_RX_DESC_ERROR_EIPE_SHIFT))))
985 goto checksum_fail;
986
987
988 if (ipv6 &&
989 rx_status & BIT(IAVF_RX_DESC_STATUS_IPV6EXADD_SHIFT))
990
991 return;
992
993
994 if (rx_error & BIT(IAVF_RX_DESC_ERROR_L4E_SHIFT))
995 goto checksum_fail;
996
997
998
999
1000
1001 if (rx_error & BIT(IAVF_RX_DESC_ERROR_PPRS_SHIFT))
1002 return;
1003
1004
1005 switch (decoded.inner_prot) {
1006 case IAVF_RX_PTYPE_INNER_PROT_TCP:
1007 case IAVF_RX_PTYPE_INNER_PROT_UDP:
1008 case IAVF_RX_PTYPE_INNER_PROT_SCTP:
1009 skb->ip_summed = CHECKSUM_UNNECESSARY;
1010
1011 default:
1012 break;
1013 }
1014
1015 return;
1016
1017checksum_fail:
1018 vsi->back->hw_csum_rx_error++;
1019}
1020
1021
1022
1023
1024
1025
1026
1027static inline int iavf_ptype_to_htype(u8 ptype)
1028{
1029 struct iavf_rx_ptype_decoded decoded = decode_rx_desc_ptype(ptype);
1030
1031 if (!decoded.known)
1032 return PKT_HASH_TYPE_NONE;
1033
1034 if (decoded.outer_ip == IAVF_RX_PTYPE_OUTER_IP &&
1035 decoded.payload_layer == IAVF_RX_PTYPE_PAYLOAD_LAYER_PAY4)
1036 return PKT_HASH_TYPE_L4;
1037 else if (decoded.outer_ip == IAVF_RX_PTYPE_OUTER_IP &&
1038 decoded.payload_layer == IAVF_RX_PTYPE_PAYLOAD_LAYER_PAY3)
1039 return PKT_HASH_TYPE_L3;
1040 else
1041 return PKT_HASH_TYPE_L2;
1042}
1043
1044
1045
1046
1047
1048
1049
1050
1051static inline void iavf_rx_hash(struct iavf_ring *ring,
1052 union iavf_rx_desc *rx_desc,
1053 struct sk_buff *skb,
1054 u8 rx_ptype)
1055{
1056 u32 hash;
1057 const __le64 rss_mask =
1058 cpu_to_le64((u64)IAVF_RX_DESC_FLTSTAT_RSS_HASH <<
1059 IAVF_RX_DESC_STATUS_FLTSTAT_SHIFT);
1060
1061 if (ring->netdev->features & NETIF_F_RXHASH)
1062 return;
1063
1064 if ((rx_desc->wb.qword1.status_error_len & rss_mask) == rss_mask) {
1065 hash = le32_to_cpu(rx_desc->wb.qword0.hi_dword.rss);
1066 skb_set_hash(skb, hash, iavf_ptype_to_htype(rx_ptype));
1067 }
1068}
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081static inline
1082void iavf_process_skb_fields(struct iavf_ring *rx_ring,
1083 union iavf_rx_desc *rx_desc, struct sk_buff *skb,
1084 u8 rx_ptype)
1085{
1086 iavf_rx_hash(rx_ring, rx_desc, skb, rx_ptype);
1087
1088 iavf_rx_checksum(rx_ring->vsi, skb, rx_desc);
1089
1090 skb_record_rx_queue(skb, rx_ring->queue_index);
1091
1092
1093 skb->protocol = eth_type_trans(skb, rx_ring->netdev);
1094}
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109static bool iavf_cleanup_headers(struct iavf_ring *rx_ring, struct sk_buff *skb)
1110{
1111
1112 if (eth_skb_pad(skb))
1113 return true;
1114
1115 return false;
1116}
1117
1118
1119
1120
1121
1122
1123
1124
1125static void iavf_reuse_rx_page(struct iavf_ring *rx_ring,
1126 struct iavf_rx_buffer *old_buff)
1127{
1128 struct iavf_rx_buffer *new_buff;
1129 u16 nta = rx_ring->next_to_alloc;
1130
1131 new_buff = &rx_ring->rx_bi[nta];
1132
1133
1134 nta++;
1135 rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
1136
1137
1138 new_buff->dma = old_buff->dma;
1139 new_buff->page = old_buff->page;
1140 new_buff->page_offset = old_buff->page_offset;
1141 new_buff->pagecnt_bias = old_buff->pagecnt_bias;
1142}
1143
1144
1145
1146
1147
1148
1149
1150
1151static inline bool iavf_page_is_reusable(struct page *page)
1152{
1153 return (page_to_nid(page) == numa_mem_id()) &&
1154 !page_is_pfmemalloc(page);
1155}
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184static bool iavf_can_reuse_rx_page(struct iavf_rx_buffer *rx_buffer)
1185{
1186 unsigned int pagecnt_bias = rx_buffer->pagecnt_bias;
1187 struct page *page = rx_buffer->page;
1188
1189
1190 if (unlikely(!iavf_page_is_reusable(page)))
1191 return false;
1192
1193#if (PAGE_SIZE < 8192)
1194
1195 if (unlikely((page_count(page) - pagecnt_bias) > 1))
1196 return false;
1197#else
1198#define IAVF_LAST_OFFSET \
1199 (SKB_WITH_OVERHEAD(PAGE_SIZE) - IAVF_RXBUFFER_2048)
1200 if (rx_buffer->page_offset > IAVF_LAST_OFFSET)
1201 return false;
1202#endif
1203
1204
1205
1206
1207
1208 if (unlikely(!pagecnt_bias)) {
1209 page_ref_add(page, USHRT_MAX);
1210 rx_buffer->pagecnt_bias = USHRT_MAX;
1211 }
1212
1213 return true;
1214}
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228static void iavf_add_rx_frag(struct iavf_ring *rx_ring,
1229 struct iavf_rx_buffer *rx_buffer,
1230 struct sk_buff *skb,
1231 unsigned int size)
1232{
1233#if (PAGE_SIZE < 8192)
1234 unsigned int truesize = iavf_rx_pg_size(rx_ring) / 2;
1235#else
1236 unsigned int truesize = SKB_DATA_ALIGN(size + iavf_rx_offset(rx_ring));
1237#endif
1238
1239 if (!size)
1240 return;
1241
1242 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page,
1243 rx_buffer->page_offset, size, truesize);
1244
1245
1246#if (PAGE_SIZE < 8192)
1247 rx_buffer->page_offset ^= truesize;
1248#else
1249 rx_buffer->page_offset += truesize;
1250#endif
1251}
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261static struct iavf_rx_buffer *iavf_get_rx_buffer(struct iavf_ring *rx_ring,
1262 const unsigned int size)
1263{
1264 struct iavf_rx_buffer *rx_buffer;
1265
1266 if (!size)
1267 return NULL;
1268
1269 rx_buffer = &rx_ring->rx_bi[rx_ring->next_to_clean];
1270 prefetchw(rx_buffer->page);
1271
1272
1273 dma_sync_single_range_for_cpu(rx_ring->dev,
1274 rx_buffer->dma,
1275 rx_buffer->page_offset,
1276 size,
1277 DMA_FROM_DEVICE);
1278
1279
1280 rx_buffer->pagecnt_bias--;
1281
1282 return rx_buffer;
1283}
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295static struct sk_buff *iavf_construct_skb(struct iavf_ring *rx_ring,
1296 struct iavf_rx_buffer *rx_buffer,
1297 unsigned int size)
1298{
1299 void *va;
1300#if (PAGE_SIZE < 8192)
1301 unsigned int truesize = iavf_rx_pg_size(rx_ring) / 2;
1302#else
1303 unsigned int truesize = SKB_DATA_ALIGN(size);
1304#endif
1305 unsigned int headlen;
1306 struct sk_buff *skb;
1307
1308 if (!rx_buffer)
1309 return NULL;
1310
1311 va = page_address(rx_buffer->page) + rx_buffer->page_offset;
1312 prefetch(va);
1313#if L1_CACHE_BYTES < 128
1314 prefetch(va + L1_CACHE_BYTES);
1315#endif
1316
1317
1318 skb = __napi_alloc_skb(&rx_ring->q_vector->napi,
1319 IAVF_RX_HDR_SIZE,
1320 GFP_ATOMIC | __GFP_NOWARN);
1321 if (unlikely(!skb))
1322 return NULL;
1323
1324
1325 headlen = size;
1326 if (headlen > IAVF_RX_HDR_SIZE)
1327 headlen = eth_get_headlen(skb->dev, va, IAVF_RX_HDR_SIZE);
1328
1329
1330 memcpy(__skb_put(skb, headlen), va, ALIGN(headlen, sizeof(long)));
1331
1332
1333 size -= headlen;
1334 if (size) {
1335 skb_add_rx_frag(skb, 0, rx_buffer->page,
1336 rx_buffer->page_offset + headlen,
1337 size, truesize);
1338
1339
1340#if (PAGE_SIZE < 8192)
1341 rx_buffer->page_offset ^= truesize;
1342#else
1343 rx_buffer->page_offset += truesize;
1344#endif
1345 } else {
1346
1347 rx_buffer->pagecnt_bias++;
1348 }
1349
1350 return skb;
1351}
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362static struct sk_buff *iavf_build_skb(struct iavf_ring *rx_ring,
1363 struct iavf_rx_buffer *rx_buffer,
1364 unsigned int size)
1365{
1366 void *va;
1367#if (PAGE_SIZE < 8192)
1368 unsigned int truesize = iavf_rx_pg_size(rx_ring) / 2;
1369#else
1370 unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
1371 SKB_DATA_ALIGN(IAVF_SKB_PAD + size);
1372#endif
1373 struct sk_buff *skb;
1374
1375 if (!rx_buffer)
1376 return NULL;
1377
1378 va = page_address(rx_buffer->page) + rx_buffer->page_offset;
1379 prefetch(va);
1380#if L1_CACHE_BYTES < 128
1381 prefetch(va + L1_CACHE_BYTES);
1382#endif
1383
1384 skb = build_skb(va - IAVF_SKB_PAD, truesize);
1385 if (unlikely(!skb))
1386 return NULL;
1387
1388
1389 skb_reserve(skb, IAVF_SKB_PAD);
1390 __skb_put(skb, size);
1391
1392
1393#if (PAGE_SIZE < 8192)
1394 rx_buffer->page_offset ^= truesize;
1395#else
1396 rx_buffer->page_offset += truesize;
1397#endif
1398
1399 return skb;
1400}
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410static void iavf_put_rx_buffer(struct iavf_ring *rx_ring,
1411 struct iavf_rx_buffer *rx_buffer)
1412{
1413 if (!rx_buffer)
1414 return;
1415
1416 if (iavf_can_reuse_rx_page(rx_buffer)) {
1417
1418 iavf_reuse_rx_page(rx_ring, rx_buffer);
1419 rx_ring->rx_stats.page_reuse_count++;
1420 } else {
1421
1422 dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma,
1423 iavf_rx_pg_size(rx_ring),
1424 DMA_FROM_DEVICE, IAVF_RX_DMA_ATTR);
1425 __page_frag_cache_drain(rx_buffer->page,
1426 rx_buffer->pagecnt_bias);
1427 }
1428
1429
1430 rx_buffer->page = NULL;
1431}
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444static bool iavf_is_non_eop(struct iavf_ring *rx_ring,
1445 union iavf_rx_desc *rx_desc,
1446 struct sk_buff *skb)
1447{
1448 u32 ntc = rx_ring->next_to_clean + 1;
1449
1450
1451 ntc = (ntc < rx_ring->count) ? ntc : 0;
1452 rx_ring->next_to_clean = ntc;
1453
1454 prefetch(IAVF_RX_DESC(rx_ring, ntc));
1455
1456
1457#define IAVF_RXD_EOF BIT(IAVF_RX_DESC_STATUS_EOF_SHIFT)
1458 if (likely(iavf_test_staterr(rx_desc, IAVF_RXD_EOF)))
1459 return false;
1460
1461 rx_ring->rx_stats.non_eop_descs++;
1462
1463 return true;
1464}
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478static int iavf_clean_rx_irq(struct iavf_ring *rx_ring, int budget)
1479{
1480 unsigned int total_rx_bytes = 0, total_rx_packets = 0;
1481 struct sk_buff *skb = rx_ring->skb;
1482 u16 cleaned_count = IAVF_DESC_UNUSED(rx_ring);
1483 bool failure = false;
1484
1485 while (likely(total_rx_packets < (unsigned int)budget)) {
1486 struct iavf_rx_buffer *rx_buffer;
1487 union iavf_rx_desc *rx_desc;
1488 unsigned int size;
1489 u16 vlan_tag;
1490 u8 rx_ptype;
1491 u64 qword;
1492
1493
1494 if (cleaned_count >= IAVF_RX_BUFFER_WRITE) {
1495 failure = failure ||
1496 iavf_alloc_rx_buffers(rx_ring, cleaned_count);
1497 cleaned_count = 0;
1498 }
1499
1500 rx_desc = IAVF_RX_DESC(rx_ring, rx_ring->next_to_clean);
1501
1502
1503
1504
1505
1506
1507 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
1508
1509
1510
1511
1512
1513 dma_rmb();
1514#define IAVF_RXD_DD BIT(IAVF_RX_DESC_STATUS_DD_SHIFT)
1515 if (!iavf_test_staterr(rx_desc, IAVF_RXD_DD))
1516 break;
1517
1518 size = (qword & IAVF_RXD_QW1_LENGTH_PBUF_MASK) >>
1519 IAVF_RXD_QW1_LENGTH_PBUF_SHIFT;
1520
1521 iavf_trace(clean_rx_irq, rx_ring, rx_desc, skb);
1522 rx_buffer = iavf_get_rx_buffer(rx_ring, size);
1523
1524
1525 if (skb)
1526 iavf_add_rx_frag(rx_ring, rx_buffer, skb, size);
1527 else if (ring_uses_build_skb(rx_ring))
1528 skb = iavf_build_skb(rx_ring, rx_buffer, size);
1529 else
1530 skb = iavf_construct_skb(rx_ring, rx_buffer, size);
1531
1532
1533 if (!skb) {
1534 rx_ring->rx_stats.alloc_buff_failed++;
1535 if (rx_buffer)
1536 rx_buffer->pagecnt_bias++;
1537 break;
1538 }
1539
1540 iavf_put_rx_buffer(rx_ring, rx_buffer);
1541 cleaned_count++;
1542
1543 if (iavf_is_non_eop(rx_ring, rx_desc, skb))
1544 continue;
1545
1546
1547
1548
1549
1550
1551 if (unlikely(iavf_test_staterr(rx_desc, BIT(IAVF_RXD_QW1_ERROR_SHIFT)))) {
1552 dev_kfree_skb_any(skb);
1553 skb = NULL;
1554 continue;
1555 }
1556
1557 if (iavf_cleanup_headers(rx_ring, skb)) {
1558 skb = NULL;
1559 continue;
1560 }
1561
1562
1563 total_rx_bytes += skb->len;
1564
1565 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
1566 rx_ptype = (qword & IAVF_RXD_QW1_PTYPE_MASK) >>
1567 IAVF_RXD_QW1_PTYPE_SHIFT;
1568
1569
1570 iavf_process_skb_fields(rx_ring, rx_desc, skb, rx_ptype);
1571
1572
1573 vlan_tag = (qword & BIT(IAVF_RX_DESC_STATUS_L2TAG1P_SHIFT)) ?
1574 le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1) : 0;
1575
1576 iavf_trace(clean_rx_irq_rx, rx_ring, rx_desc, skb);
1577 iavf_receive_skb(rx_ring, skb, vlan_tag);
1578 skb = NULL;
1579
1580
1581 total_rx_packets++;
1582 }
1583
1584 rx_ring->skb = skb;
1585
1586 u64_stats_update_begin(&rx_ring->syncp);
1587 rx_ring->stats.packets += total_rx_packets;
1588 rx_ring->stats.bytes += total_rx_bytes;
1589 u64_stats_update_end(&rx_ring->syncp);
1590 rx_ring->q_vector->rx.total_packets += total_rx_packets;
1591 rx_ring->q_vector->rx.total_bytes += total_rx_bytes;
1592
1593
1594 return failure ? budget : (int)total_rx_packets;
1595}
1596
1597static inline u32 iavf_buildreg_itr(const int type, u16 itr)
1598{
1599 u32 val;
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616 itr &= IAVF_ITR_MASK;
1617
1618 val = IAVF_VFINT_DYN_CTLN1_INTENA_MASK |
1619 (type << IAVF_VFINT_DYN_CTLN1_ITR_INDX_SHIFT) |
1620 (itr << (IAVF_VFINT_DYN_CTLN1_INTERVAL_SHIFT - 1));
1621
1622 return val;
1623}
1624
1625
1626#define INTREG IAVF_VFINT_DYN_CTLN1
1627
1628
1629
1630
1631
1632
1633
1634
1635#define ITR_COUNTDOWN_START 3
1636
1637
1638
1639
1640
1641
1642
1643static inline void iavf_update_enable_itr(struct iavf_vsi *vsi,
1644 struct iavf_q_vector *q_vector)
1645{
1646 struct iavf_hw *hw = &vsi->back->hw;
1647 u32 intval;
1648
1649
1650 iavf_update_itr(q_vector, &q_vector->tx);
1651 iavf_update_itr(q_vector, &q_vector->rx);
1652
1653
1654
1655
1656
1657
1658
1659
1660
1661 if (q_vector->rx.target_itr < q_vector->rx.current_itr) {
1662
1663 intval = iavf_buildreg_itr(IAVF_RX_ITR,
1664 q_vector->rx.target_itr);
1665 q_vector->rx.current_itr = q_vector->rx.target_itr;
1666 q_vector->itr_countdown = ITR_COUNTDOWN_START;
1667 } else if ((q_vector->tx.target_itr < q_vector->tx.current_itr) ||
1668 ((q_vector->rx.target_itr - q_vector->rx.current_itr) <
1669 (q_vector->tx.target_itr - q_vector->tx.current_itr))) {
1670
1671
1672
1673 intval = iavf_buildreg_itr(IAVF_TX_ITR,
1674 q_vector->tx.target_itr);
1675 q_vector->tx.current_itr = q_vector->tx.target_itr;
1676 q_vector->itr_countdown = ITR_COUNTDOWN_START;
1677 } else if (q_vector->rx.current_itr != q_vector->rx.target_itr) {
1678
1679 intval = iavf_buildreg_itr(IAVF_RX_ITR,
1680 q_vector->rx.target_itr);
1681 q_vector->rx.current_itr = q_vector->rx.target_itr;
1682 q_vector->itr_countdown = ITR_COUNTDOWN_START;
1683 } else {
1684
1685 intval = iavf_buildreg_itr(IAVF_ITR_NONE, 0);
1686 if (q_vector->itr_countdown)
1687 q_vector->itr_countdown--;
1688 }
1689
1690 if (!test_bit(__IAVF_VSI_DOWN, vsi->state))
1691 wr32(hw, INTREG(q_vector->reg_idx), intval);
1692}
1693
1694
1695
1696
1697
1698
1699
1700
1701
1702
1703int iavf_napi_poll(struct napi_struct *napi, int budget)
1704{
1705 struct iavf_q_vector *q_vector =
1706 container_of(napi, struct iavf_q_vector, napi);
1707 struct iavf_vsi *vsi = q_vector->vsi;
1708 struct iavf_ring *ring;
1709 bool clean_complete = true;
1710 bool arm_wb = false;
1711 int budget_per_ring;
1712 int work_done = 0;
1713
1714 if (test_bit(__IAVF_VSI_DOWN, vsi->state)) {
1715 napi_complete(napi);
1716 return 0;
1717 }
1718
1719
1720
1721
1722 iavf_for_each_ring(ring, q_vector->tx) {
1723 if (!iavf_clean_tx_irq(vsi, ring, budget)) {
1724 clean_complete = false;
1725 continue;
1726 }
1727 arm_wb |= ring->arm_wb;
1728 ring->arm_wb = false;
1729 }
1730
1731
1732 if (budget <= 0)
1733 goto tx_only;
1734
1735
1736
1737
1738 budget_per_ring = max(budget/q_vector->num_ringpairs, 1);
1739
1740 iavf_for_each_ring(ring, q_vector->rx) {
1741 int cleaned = iavf_clean_rx_irq(ring, budget_per_ring);
1742
1743 work_done += cleaned;
1744
1745 if (cleaned >= budget_per_ring)
1746 clean_complete = false;
1747 }
1748
1749
1750 if (!clean_complete) {
1751 int cpu_id = smp_processor_id();
1752
1753
1754
1755
1756
1757
1758
1759
1760 if (!cpumask_test_cpu(cpu_id, &q_vector->affinity_mask)) {
1761
1762 napi_complete_done(napi, work_done);
1763
1764
1765 iavf_force_wb(vsi, q_vector);
1766
1767
1768 return budget - 1;
1769 }
1770tx_only:
1771 if (arm_wb) {
1772 q_vector->tx.ring[0].tx_stats.tx_force_wb++;
1773 iavf_enable_wb_on_itr(vsi, q_vector);
1774 }
1775 return budget;
1776 }
1777
1778 if (vsi->back->flags & IAVF_TXR_FLAGS_WB_ON_ITR)
1779 q_vector->arm_wb_state = false;
1780
1781
1782
1783
1784 if (likely(napi_complete_done(napi, work_done)))
1785 iavf_update_enable_itr(vsi, q_vector);
1786
1787 return min(work_done, budget - 1);
1788}
1789
1790
1791
1792
1793
1794
1795
1796
1797
1798
1799
1800
1801
1802static inline int iavf_tx_prepare_vlan_flags(struct sk_buff *skb,
1803 struct iavf_ring *tx_ring,
1804 u32 *flags)
1805{
1806 __be16 protocol = skb->protocol;
1807 u32 tx_flags = 0;
1808
1809 if (protocol == htons(ETH_P_8021Q) &&
1810 !(tx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_TX)) {
1811
1812
1813
1814
1815
1816
1817
1818 skb->protocol = vlan_get_protocol(skb);
1819 goto out;
1820 }
1821
1822
1823 if (skb_vlan_tag_present(skb)) {
1824 tx_flags |= skb_vlan_tag_get(skb) << IAVF_TX_FLAGS_VLAN_SHIFT;
1825 tx_flags |= IAVF_TX_FLAGS_HW_VLAN;
1826
1827 } else if (protocol == htons(ETH_P_8021Q)) {
1828 struct vlan_hdr *vhdr, _vhdr;
1829
1830 vhdr = skb_header_pointer(skb, ETH_HLEN, sizeof(_vhdr), &_vhdr);
1831 if (!vhdr)
1832 return -EINVAL;
1833
1834 protocol = vhdr->h_vlan_encapsulated_proto;
1835 tx_flags |= ntohs(vhdr->h_vlan_TCI) << IAVF_TX_FLAGS_VLAN_SHIFT;
1836 tx_flags |= IAVF_TX_FLAGS_SW_VLAN;
1837 }
1838
1839out:
1840 *flags = tx_flags;
1841 return 0;
1842}
1843
1844
1845
1846
1847
1848
1849
1850
1851
1852static int iavf_tso(struct iavf_tx_buffer *first, u8 *hdr_len,
1853 u64 *cd_type_cmd_tso_mss)
1854{
1855 struct sk_buff *skb = first->skb;
1856 u64 cd_cmd, cd_tso_len, cd_mss;
1857 union {
1858 struct iphdr *v4;
1859 struct ipv6hdr *v6;
1860 unsigned char *hdr;
1861 } ip;
1862 union {
1863 struct tcphdr *tcp;
1864 struct udphdr *udp;
1865 unsigned char *hdr;
1866 } l4;
1867 u32 paylen, l4_offset;
1868 u16 gso_segs, gso_size;
1869 int err;
1870
1871 if (skb->ip_summed != CHECKSUM_PARTIAL)
1872 return 0;
1873
1874 if (!skb_is_gso(skb))
1875 return 0;
1876
1877 err = skb_cow_head(skb, 0);
1878 if (err < 0)
1879 return err;
1880
1881 ip.hdr = skb_network_header(skb);
1882 l4.hdr = skb_transport_header(skb);
1883
1884
1885 if (ip.v4->version == 4) {
1886 ip.v4->tot_len = 0;
1887 ip.v4->check = 0;
1888 } else {
1889 ip.v6->payload_len = 0;
1890 }
1891
1892 if (skb_shinfo(skb)->gso_type & (SKB_GSO_GRE |
1893 SKB_GSO_GRE_CSUM |
1894 SKB_GSO_IPXIP4 |
1895 SKB_GSO_IPXIP6 |
1896 SKB_GSO_UDP_TUNNEL |
1897 SKB_GSO_UDP_TUNNEL_CSUM)) {
1898 if (!(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) &&
1899 (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM)) {
1900 l4.udp->len = 0;
1901
1902
1903 l4_offset = l4.hdr - skb->data;
1904
1905
1906 paylen = skb->len - l4_offset;
1907 csum_replace_by_diff(&l4.udp->check,
1908 (__force __wsum)htonl(paylen));
1909 }
1910
1911
1912 ip.hdr = skb_inner_network_header(skb);
1913 l4.hdr = skb_inner_transport_header(skb);
1914
1915
1916 if (ip.v4->version == 4) {
1917 ip.v4->tot_len = 0;
1918 ip.v4->check = 0;
1919 } else {
1920 ip.v6->payload_len = 0;
1921 }
1922 }
1923
1924
1925 l4_offset = l4.hdr - skb->data;
1926
1927
1928 paylen = skb->len - l4_offset;
1929 csum_replace_by_diff(&l4.tcp->check, (__force __wsum)htonl(paylen));
1930
1931
1932 *hdr_len = (l4.tcp->doff * 4) + l4_offset;
1933
1934
1935 gso_size = skb_shinfo(skb)->gso_size;
1936 gso_segs = skb_shinfo(skb)->gso_segs;
1937
1938
1939 first->gso_segs = gso_segs;
1940 first->bytecount += (first->gso_segs - 1) * *hdr_len;
1941
1942
1943 cd_cmd = IAVF_TX_CTX_DESC_TSO;
1944 cd_tso_len = skb->len - *hdr_len;
1945 cd_mss = gso_size;
1946 *cd_type_cmd_tso_mss |= (cd_cmd << IAVF_TXD_CTX_QW1_CMD_SHIFT) |
1947 (cd_tso_len << IAVF_TXD_CTX_QW1_TSO_LEN_SHIFT) |
1948 (cd_mss << IAVF_TXD_CTX_QW1_MSS_SHIFT);
1949 return 1;
1950}
1951
1952
1953
1954
1955
1956
1957
1958
1959
1960
1961static int iavf_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags,
1962 u32 *td_cmd, u32 *td_offset,
1963 struct iavf_ring *tx_ring,
1964 u32 *cd_tunneling)
1965{
1966 union {
1967 struct iphdr *v4;
1968 struct ipv6hdr *v6;
1969 unsigned char *hdr;
1970 } ip;
1971 union {
1972 struct tcphdr *tcp;
1973 struct udphdr *udp;
1974 unsigned char *hdr;
1975 } l4;
1976 unsigned char *exthdr;
1977 u32 offset, cmd = 0;
1978 __be16 frag_off;
1979 u8 l4_proto = 0;
1980
1981 if (skb->ip_summed != CHECKSUM_PARTIAL)
1982 return 0;
1983
1984 ip.hdr = skb_network_header(skb);
1985 l4.hdr = skb_transport_header(skb);
1986
1987
1988 offset = ((ip.hdr - skb->data) / 2) << IAVF_TX_DESC_LENGTH_MACLEN_SHIFT;
1989
1990 if (skb->encapsulation) {
1991 u32 tunnel = 0;
1992
1993 if (*tx_flags & IAVF_TX_FLAGS_IPV4) {
1994 tunnel |= (*tx_flags & IAVF_TX_FLAGS_TSO) ?
1995 IAVF_TX_CTX_EXT_IP_IPV4 :
1996 IAVF_TX_CTX_EXT_IP_IPV4_NO_CSUM;
1997
1998 l4_proto = ip.v4->protocol;
1999 } else if (*tx_flags & IAVF_TX_FLAGS_IPV6) {
2000 tunnel |= IAVF_TX_CTX_EXT_IP_IPV6;
2001
2002 exthdr = ip.hdr + sizeof(*ip.v6);
2003 l4_proto = ip.v6->nexthdr;
2004 if (l4.hdr != exthdr)
2005 ipv6_skip_exthdr(skb, exthdr - skb->data,
2006 &l4_proto, &frag_off);
2007 }
2008
2009
2010 switch (l4_proto) {
2011 case IPPROTO_UDP:
2012 tunnel |= IAVF_TXD_CTX_UDP_TUNNELING;
2013 *tx_flags |= IAVF_TX_FLAGS_VXLAN_TUNNEL;
2014 break;
2015 case IPPROTO_GRE:
2016 tunnel |= IAVF_TXD_CTX_GRE_TUNNELING;
2017 *tx_flags |= IAVF_TX_FLAGS_VXLAN_TUNNEL;
2018 break;
2019 case IPPROTO_IPIP:
2020 case IPPROTO_IPV6:
2021 *tx_flags |= IAVF_TX_FLAGS_VXLAN_TUNNEL;
2022 l4.hdr = skb_inner_network_header(skb);
2023 break;
2024 default:
2025 if (*tx_flags & IAVF_TX_FLAGS_TSO)
2026 return -1;
2027
2028 skb_checksum_help(skb);
2029 return 0;
2030 }
2031
2032
2033 tunnel |= ((l4.hdr - ip.hdr) / 4) <<
2034 IAVF_TXD_CTX_QW0_EXT_IPLEN_SHIFT;
2035
2036
2037 ip.hdr = skb_inner_network_header(skb);
2038
2039
2040 tunnel |= ((ip.hdr - l4.hdr) / 2) <<
2041 IAVF_TXD_CTX_QW0_NATLEN_SHIFT;
2042
2043
2044 if ((*tx_flags & IAVF_TX_FLAGS_TSO) &&
2045 !(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) &&
2046 (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM))
2047 tunnel |= IAVF_TXD_CTX_QW0_L4T_CS_MASK;
2048
2049
2050 *cd_tunneling |= tunnel;
2051
2052
2053 l4.hdr = skb_inner_transport_header(skb);
2054 l4_proto = 0;
2055
2056
2057 *tx_flags &= ~(IAVF_TX_FLAGS_IPV4 | IAVF_TX_FLAGS_IPV6);
2058 if (ip.v4->version == 4)
2059 *tx_flags |= IAVF_TX_FLAGS_IPV4;
2060 if (ip.v6->version == 6)
2061 *tx_flags |= IAVF_TX_FLAGS_IPV6;
2062 }
2063
2064
2065 if (*tx_flags & IAVF_TX_FLAGS_IPV4) {
2066 l4_proto = ip.v4->protocol;
2067
2068
2069
2070 cmd |= (*tx_flags & IAVF_TX_FLAGS_TSO) ?
2071 IAVF_TX_DESC_CMD_IIPT_IPV4_CSUM :
2072 IAVF_TX_DESC_CMD_IIPT_IPV4;
2073 } else if (*tx_flags & IAVF_TX_FLAGS_IPV6) {
2074 cmd |= IAVF_TX_DESC_CMD_IIPT_IPV6;
2075
2076 exthdr = ip.hdr + sizeof(*ip.v6);
2077 l4_proto = ip.v6->nexthdr;
2078 if (l4.hdr != exthdr)
2079 ipv6_skip_exthdr(skb, exthdr - skb->data,
2080 &l4_proto, &frag_off);
2081 }
2082
2083
2084 offset |= ((l4.hdr - ip.hdr) / 4) << IAVF_TX_DESC_LENGTH_IPLEN_SHIFT;
2085
2086
2087 switch (l4_proto) {
2088 case IPPROTO_TCP:
2089
2090 cmd |= IAVF_TX_DESC_CMD_L4T_EOFT_TCP;
2091 offset |= l4.tcp->doff << IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
2092 break;
2093 case IPPROTO_SCTP:
2094
2095 cmd |= IAVF_TX_DESC_CMD_L4T_EOFT_SCTP;
2096 offset |= (sizeof(struct sctphdr) >> 2) <<
2097 IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
2098 break;
2099 case IPPROTO_UDP:
2100
2101 cmd |= IAVF_TX_DESC_CMD_L4T_EOFT_UDP;
2102 offset |= (sizeof(struct udphdr) >> 2) <<
2103 IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
2104 break;
2105 default:
2106 if (*tx_flags & IAVF_TX_FLAGS_TSO)
2107 return -1;
2108 skb_checksum_help(skb);
2109 return 0;
2110 }
2111
2112 *td_cmd |= cmd;
2113 *td_offset |= offset;
2114
2115 return 1;
2116}
2117
2118
2119
2120
2121
2122
2123
2124
2125static void iavf_create_tx_ctx(struct iavf_ring *tx_ring,
2126 const u64 cd_type_cmd_tso_mss,
2127 const u32 cd_tunneling, const u32 cd_l2tag2)
2128{
2129 struct iavf_tx_context_desc *context_desc;
2130 int i = tx_ring->next_to_use;
2131
2132 if ((cd_type_cmd_tso_mss == IAVF_TX_DESC_DTYPE_CONTEXT) &&
2133 !cd_tunneling && !cd_l2tag2)
2134 return;
2135
2136
2137 context_desc = IAVF_TX_CTXTDESC(tx_ring, i);
2138
2139 i++;
2140 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
2141
2142
2143 context_desc->tunneling_params = cpu_to_le32(cd_tunneling);
2144 context_desc->l2tag2 = cpu_to_le16(cd_l2tag2);
2145 context_desc->rsvd = cpu_to_le16(0);
2146 context_desc->type_cmd_tso_mss = cpu_to_le64(cd_type_cmd_tso_mss);
2147}
2148
2149
2150
2151
2152
2153
2154
2155
2156
2157
2158
2159
2160
2161
2162bool __iavf_chk_linearize(struct sk_buff *skb)
2163{
2164 const skb_frag_t *frag, *stale;
2165 int nr_frags, sum;
2166
2167
2168 nr_frags = skb_shinfo(skb)->nr_frags;
2169 if (nr_frags < (IAVF_MAX_BUFFER_TXD - 1))
2170 return false;
2171
2172
2173
2174
2175 nr_frags -= IAVF_MAX_BUFFER_TXD - 2;
2176 frag = &skb_shinfo(skb)->frags[0];
2177
2178
2179
2180
2181
2182
2183
2184 sum = 1 - skb_shinfo(skb)->gso_size;
2185
2186
2187 sum += skb_frag_size(frag++);
2188 sum += skb_frag_size(frag++);
2189 sum += skb_frag_size(frag++);
2190 sum += skb_frag_size(frag++);
2191 sum += skb_frag_size(frag++);
2192
2193
2194
2195
2196 for (stale = &skb_shinfo(skb)->frags[0];; stale++) {
2197 int stale_size = skb_frag_size(stale);
2198
2199 sum += skb_frag_size(frag++);
2200
2201
2202
2203
2204
2205
2206
2207 if (stale_size > IAVF_MAX_DATA_PER_TXD) {
2208 int align_pad = -(skb_frag_off(stale)) &
2209 (IAVF_MAX_READ_REQ_SIZE - 1);
2210
2211 sum -= align_pad;
2212 stale_size -= align_pad;
2213
2214 do {
2215 sum -= IAVF_MAX_DATA_PER_TXD_ALIGNED;
2216 stale_size -= IAVF_MAX_DATA_PER_TXD_ALIGNED;
2217 } while (stale_size > IAVF_MAX_DATA_PER_TXD);
2218 }
2219
2220
2221 if (sum < 0)
2222 return true;
2223
2224 if (!nr_frags--)
2225 break;
2226
2227 sum -= stale_size;
2228 }
2229
2230 return false;
2231}
2232
2233
2234
2235
2236
2237
2238
2239
2240int __iavf_maybe_stop_tx(struct iavf_ring *tx_ring, int size)
2241{
2242 netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
2243
2244 smp_mb();
2245
2246
2247 if (likely(IAVF_DESC_UNUSED(tx_ring) < size))
2248 return -EBUSY;
2249
2250
2251 netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index);
2252 ++tx_ring->tx_stats.restart_queue;
2253 return 0;
2254}
2255
2256
2257
2258
2259
2260
2261
2262
2263
2264
2265
2266static inline void iavf_tx_map(struct iavf_ring *tx_ring, struct sk_buff *skb,
2267 struct iavf_tx_buffer *first, u32 tx_flags,
2268 const u8 hdr_len, u32 td_cmd, u32 td_offset)
2269{
2270 unsigned int data_len = skb->data_len;
2271 unsigned int size = skb_headlen(skb);
2272 skb_frag_t *frag;
2273 struct iavf_tx_buffer *tx_bi;
2274 struct iavf_tx_desc *tx_desc;
2275 u16 i = tx_ring->next_to_use;
2276 u32 td_tag = 0;
2277 dma_addr_t dma;
2278
2279 if (tx_flags & IAVF_TX_FLAGS_HW_VLAN) {
2280 td_cmd |= IAVF_TX_DESC_CMD_IL2TAG1;
2281 td_tag = (tx_flags & IAVF_TX_FLAGS_VLAN_MASK) >>
2282 IAVF_TX_FLAGS_VLAN_SHIFT;
2283 }
2284
2285 first->tx_flags = tx_flags;
2286
2287 dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
2288
2289 tx_desc = IAVF_TX_DESC(tx_ring, i);
2290 tx_bi = first;
2291
2292 for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
2293 unsigned int max_data = IAVF_MAX_DATA_PER_TXD_ALIGNED;
2294
2295 if (dma_mapping_error(tx_ring->dev, dma))
2296 goto dma_error;
2297
2298
2299 dma_unmap_len_set(tx_bi, len, size);
2300 dma_unmap_addr_set(tx_bi, dma, dma);
2301
2302
2303 max_data += -dma & (IAVF_MAX_READ_REQ_SIZE - 1);
2304 tx_desc->buffer_addr = cpu_to_le64(dma);
2305
2306 while (unlikely(size > IAVF_MAX_DATA_PER_TXD)) {
2307 tx_desc->cmd_type_offset_bsz =
2308 build_ctob(td_cmd, td_offset,
2309 max_data, td_tag);
2310
2311 tx_desc++;
2312 i++;
2313
2314 if (i == tx_ring->count) {
2315 tx_desc = IAVF_TX_DESC(tx_ring, 0);
2316 i = 0;
2317 }
2318
2319 dma += max_data;
2320 size -= max_data;
2321
2322 max_data = IAVF_MAX_DATA_PER_TXD_ALIGNED;
2323 tx_desc->buffer_addr = cpu_to_le64(dma);
2324 }
2325
2326 if (likely(!data_len))
2327 break;
2328
2329 tx_desc->cmd_type_offset_bsz = build_ctob(td_cmd, td_offset,
2330 size, td_tag);
2331
2332 tx_desc++;
2333 i++;
2334
2335 if (i == tx_ring->count) {
2336 tx_desc = IAVF_TX_DESC(tx_ring, 0);
2337 i = 0;
2338 }
2339
2340 size = skb_frag_size(frag);
2341 data_len -= size;
2342
2343 dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size,
2344 DMA_TO_DEVICE);
2345
2346 tx_bi = &tx_ring->tx_bi[i];
2347 }
2348
2349 netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount);
2350
2351 i++;
2352 if (i == tx_ring->count)
2353 i = 0;
2354
2355 tx_ring->next_to_use = i;
2356
2357 iavf_maybe_stop_tx(tx_ring, DESC_NEEDED);
2358
2359
2360 td_cmd |= IAVF_TXD_CMD;
2361 tx_desc->cmd_type_offset_bsz =
2362 build_ctob(td_cmd, td_offset, size, td_tag);
2363
2364 skb_tx_timestamp(skb);
2365
2366
2367
2368
2369
2370
2371
2372 wmb();
2373
2374
2375 first->next_to_watch = tx_desc;
2376
2377
2378 if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) {
2379 writel(i, tx_ring->tail);
2380 }
2381
2382 return;
2383
2384dma_error:
2385 dev_info(tx_ring->dev, "TX DMA map failed\n");
2386
2387
2388 for (;;) {
2389 tx_bi = &tx_ring->tx_bi[i];
2390 iavf_unmap_and_free_tx_resource(tx_ring, tx_bi);
2391 if (tx_bi == first)
2392 break;
2393 if (i == 0)
2394 i = tx_ring->count;
2395 i--;
2396 }
2397
2398 tx_ring->next_to_use = i;
2399}
2400
2401
2402
2403
2404
2405
2406
2407
2408static netdev_tx_t iavf_xmit_frame_ring(struct sk_buff *skb,
2409 struct iavf_ring *tx_ring)
2410{
2411 u64 cd_type_cmd_tso_mss = IAVF_TX_DESC_DTYPE_CONTEXT;
2412 u32 cd_tunneling = 0, cd_l2tag2 = 0;
2413 struct iavf_tx_buffer *first;
2414 u32 td_offset = 0;
2415 u32 tx_flags = 0;
2416 __be16 protocol;
2417 u32 td_cmd = 0;
2418 u8 hdr_len = 0;
2419 int tso, count;
2420
2421
2422 prefetch(skb->data);
2423
2424 iavf_trace(xmit_frame_ring, skb, tx_ring);
2425
2426 count = iavf_xmit_descriptor_count(skb);
2427 if (iavf_chk_linearize(skb, count)) {
2428 if (__skb_linearize(skb)) {
2429 dev_kfree_skb_any(skb);
2430 return NETDEV_TX_OK;
2431 }
2432 count = iavf_txd_use_count(skb->len);
2433 tx_ring->tx_stats.tx_linearize++;
2434 }
2435
2436
2437
2438
2439
2440
2441
2442 if (iavf_maybe_stop_tx(tx_ring, count + 4 + 1)) {
2443 tx_ring->tx_stats.tx_busy++;
2444 return NETDEV_TX_BUSY;
2445 }
2446
2447
2448 first = &tx_ring->tx_bi[tx_ring->next_to_use];
2449 first->skb = skb;
2450 first->bytecount = skb->len;
2451 first->gso_segs = 1;
2452
2453
2454 if (iavf_tx_prepare_vlan_flags(skb, tx_ring, &tx_flags))
2455 goto out_drop;
2456
2457
2458 protocol = vlan_get_protocol(skb);
2459
2460
2461 if (protocol == htons(ETH_P_IP))
2462 tx_flags |= IAVF_TX_FLAGS_IPV4;
2463 else if (protocol == htons(ETH_P_IPV6))
2464 tx_flags |= IAVF_TX_FLAGS_IPV6;
2465
2466 tso = iavf_tso(first, &hdr_len, &cd_type_cmd_tso_mss);
2467
2468 if (tso < 0)
2469 goto out_drop;
2470 else if (tso)
2471 tx_flags |= IAVF_TX_FLAGS_TSO;
2472
2473
2474 tso = iavf_tx_enable_csum(skb, &tx_flags, &td_cmd, &td_offset,
2475 tx_ring, &cd_tunneling);
2476 if (tso < 0)
2477 goto out_drop;
2478
2479
2480 td_cmd |= IAVF_TX_DESC_CMD_ICRC;
2481
2482 iavf_create_tx_ctx(tx_ring, cd_type_cmd_tso_mss,
2483 cd_tunneling, cd_l2tag2);
2484
2485 iavf_tx_map(tx_ring, skb, first, tx_flags, hdr_len,
2486 td_cmd, td_offset);
2487
2488 return NETDEV_TX_OK;
2489
2490out_drop:
2491 iavf_trace(xmit_frame_ring_drop, first->skb, tx_ring);
2492 dev_kfree_skb_any(first->skb);
2493 first->skb = NULL;
2494 return NETDEV_TX_OK;
2495}
2496
2497
2498
2499
2500
2501
2502
2503
2504netdev_tx_t iavf_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
2505{
2506 struct iavf_adapter *adapter = netdev_priv(netdev);
2507 struct iavf_ring *tx_ring = &adapter->tx_rings[skb->queue_mapping];
2508
2509
2510
2511
2512 if (unlikely(skb->len < IAVF_MIN_TX_LEN)) {
2513 if (skb_pad(skb, IAVF_MIN_TX_LEN - skb->len))
2514 return NETDEV_TX_OK;
2515 skb->len = IAVF_MIN_TX_LEN;
2516 skb_set_tail_pointer(skb, IAVF_MIN_TX_LEN);
2517 }
2518
2519 return iavf_xmit_frame_ring(skb, tx_ring);
2520}
2521