1
2
3
4#include <linux/prefetch.h>
5
6#include "iavf.h"
7#include "iavf_trace.h"
8#include "iavf_prototype.h"
9
10static inline __le64 build_ctob(u32 td_cmd, u32 td_offset, unsigned int size,
11 u32 td_tag)
12{
13 return cpu_to_le64(IAVF_TX_DESC_DTYPE_DATA |
14 ((u64)td_cmd << IAVF_TXD_QW1_CMD_SHIFT) |
15 ((u64)td_offset << IAVF_TXD_QW1_OFFSET_SHIFT) |
16 ((u64)size << IAVF_TXD_QW1_TX_BUF_SZ_SHIFT) |
17 ((u64)td_tag << IAVF_TXD_QW1_L2TAG1_SHIFT));
18}
19
20#define IAVF_TXD_CMD (IAVF_TX_DESC_CMD_EOP | IAVF_TX_DESC_CMD_RS)
21
22
23
24
25
26
27static void iavf_unmap_and_free_tx_resource(struct iavf_ring *ring,
28 struct iavf_tx_buffer *tx_buffer)
29{
30 if (tx_buffer->skb) {
31 if (tx_buffer->tx_flags & IAVF_TX_FLAGS_FD_SB)
32 kfree(tx_buffer->raw_buf);
33 else
34 dev_kfree_skb_any(tx_buffer->skb);
35 if (dma_unmap_len(tx_buffer, len))
36 dma_unmap_single(ring->dev,
37 dma_unmap_addr(tx_buffer, dma),
38 dma_unmap_len(tx_buffer, len),
39 DMA_TO_DEVICE);
40 } else if (dma_unmap_len(tx_buffer, len)) {
41 dma_unmap_page(ring->dev,
42 dma_unmap_addr(tx_buffer, dma),
43 dma_unmap_len(tx_buffer, len),
44 DMA_TO_DEVICE);
45 }
46
47 tx_buffer->next_to_watch = NULL;
48 tx_buffer->skb = NULL;
49 dma_unmap_len_set(tx_buffer, len, 0);
50
51}
52
53
54
55
56
57void iavf_clean_tx_ring(struct iavf_ring *tx_ring)
58{
59 unsigned long bi_size;
60 u16 i;
61
62
63 if (!tx_ring->tx_bi)
64 return;
65
66
67 for (i = 0; i < tx_ring->count; i++)
68 iavf_unmap_and_free_tx_resource(tx_ring, &tx_ring->tx_bi[i]);
69
70 bi_size = sizeof(struct iavf_tx_buffer) * tx_ring->count;
71 memset(tx_ring->tx_bi, 0, bi_size);
72
73
74 memset(tx_ring->desc, 0, tx_ring->size);
75
76 tx_ring->next_to_use = 0;
77 tx_ring->next_to_clean = 0;
78
79 if (!tx_ring->netdev)
80 return;
81
82
83 netdev_tx_reset_queue(txring_txq(tx_ring));
84}
85
86
87
88
89
90
91
92void iavf_free_tx_resources(struct iavf_ring *tx_ring)
93{
94 iavf_clean_tx_ring(tx_ring);
95 kfree(tx_ring->tx_bi);
96 tx_ring->tx_bi = NULL;
97
98 if (tx_ring->desc) {
99 dma_free_coherent(tx_ring->dev, tx_ring->size,
100 tx_ring->desc, tx_ring->dma);
101 tx_ring->desc = NULL;
102 }
103}
104
105
106
107
108
109
110
111
112
113u32 iavf_get_tx_pending(struct iavf_ring *ring, bool in_sw)
114{
115 u32 head, tail;
116
117 head = ring->next_to_clean;
118 tail = readl(ring->tail);
119
120 if (head != tail)
121 return (head < tail) ?
122 tail - head : (tail + ring->count - head);
123
124 return 0;
125}
126
127
128
129
130
131
132
133
134void iavf_detect_recover_hung(struct iavf_vsi *vsi)
135{
136 struct iavf_ring *tx_ring = NULL;
137 struct net_device *netdev;
138 unsigned int i;
139 int packets;
140
141 if (!vsi)
142 return;
143
144 if (test_bit(__IAVF_VSI_DOWN, vsi->state))
145 return;
146
147 netdev = vsi->netdev;
148 if (!netdev)
149 return;
150
151 if (!netif_carrier_ok(netdev))
152 return;
153
154 for (i = 0; i < vsi->back->num_active_queues; i++) {
155 tx_ring = &vsi->back->tx_rings[i];
156 if (tx_ring && tx_ring->desc) {
157
158
159
160
161
162
163
164 packets = tx_ring->stats.packets & INT_MAX;
165 if (tx_ring->tx_stats.prev_pkt_ctr == packets) {
166 iavf_force_wb(vsi, tx_ring->q_vector);
167 continue;
168 }
169
170
171
172
173 smp_rmb();
174 tx_ring->tx_stats.prev_pkt_ctr =
175 iavf_get_tx_pending(tx_ring, true) ? packets : -1;
176 }
177 }
178}
179
180#define WB_STRIDE 4
181
182
183
184
185
186
187
188
189
190static bool iavf_clean_tx_irq(struct iavf_vsi *vsi,
191 struct iavf_ring *tx_ring, int napi_budget)
192{
193 u16 i = tx_ring->next_to_clean;
194 struct iavf_tx_buffer *tx_buf;
195 struct iavf_tx_desc *tx_desc;
196 unsigned int total_bytes = 0, total_packets = 0;
197 unsigned int budget = vsi->work_limit;
198
199 tx_buf = &tx_ring->tx_bi[i];
200 tx_desc = IAVF_TX_DESC(tx_ring, i);
201 i -= tx_ring->count;
202
203 do {
204 struct iavf_tx_desc *eop_desc = tx_buf->next_to_watch;
205
206
207 if (!eop_desc)
208 break;
209
210
211 smp_rmb();
212
213 iavf_trace(clean_tx_irq, tx_ring, tx_desc, tx_buf);
214
215 if (!(eop_desc->cmd_type_offset_bsz &
216 cpu_to_le64(IAVF_TX_DESC_DTYPE_DESC_DONE)))
217 break;
218
219
220 tx_buf->next_to_watch = NULL;
221
222
223 total_bytes += tx_buf->bytecount;
224 total_packets += tx_buf->gso_segs;
225
226
227 napi_consume_skb(tx_buf->skb, napi_budget);
228
229
230 dma_unmap_single(tx_ring->dev,
231 dma_unmap_addr(tx_buf, dma),
232 dma_unmap_len(tx_buf, len),
233 DMA_TO_DEVICE);
234
235
236 tx_buf->skb = NULL;
237 dma_unmap_len_set(tx_buf, len, 0);
238
239
240 while (tx_desc != eop_desc) {
241 iavf_trace(clean_tx_irq_unmap,
242 tx_ring, tx_desc, tx_buf);
243
244 tx_buf++;
245 tx_desc++;
246 i++;
247 if (unlikely(!i)) {
248 i -= tx_ring->count;
249 tx_buf = tx_ring->tx_bi;
250 tx_desc = IAVF_TX_DESC(tx_ring, 0);
251 }
252
253
254 if (dma_unmap_len(tx_buf, len)) {
255 dma_unmap_page(tx_ring->dev,
256 dma_unmap_addr(tx_buf, dma),
257 dma_unmap_len(tx_buf, len),
258 DMA_TO_DEVICE);
259 dma_unmap_len_set(tx_buf, len, 0);
260 }
261 }
262
263
264 tx_buf++;
265 tx_desc++;
266 i++;
267 if (unlikely(!i)) {
268 i -= tx_ring->count;
269 tx_buf = tx_ring->tx_bi;
270 tx_desc = IAVF_TX_DESC(tx_ring, 0);
271 }
272
273 prefetch(tx_desc);
274
275
276 budget--;
277 } while (likely(budget));
278
279 i += tx_ring->count;
280 tx_ring->next_to_clean = i;
281 u64_stats_update_begin(&tx_ring->syncp);
282 tx_ring->stats.bytes += total_bytes;
283 tx_ring->stats.packets += total_packets;
284 u64_stats_update_end(&tx_ring->syncp);
285 tx_ring->q_vector->tx.total_bytes += total_bytes;
286 tx_ring->q_vector->tx.total_packets += total_packets;
287
288 if (tx_ring->flags & IAVF_TXR_FLAGS_WB_ON_ITR) {
289
290
291
292
293
294 unsigned int j = iavf_get_tx_pending(tx_ring, false);
295
296 if (budget &&
297 ((j / WB_STRIDE) == 0) && (j > 0) &&
298 !test_bit(__IAVF_VSI_DOWN, vsi->state) &&
299 (IAVF_DESC_UNUSED(tx_ring) != tx_ring->count))
300 tx_ring->arm_wb = true;
301 }
302
303
304 netdev_tx_completed_queue(txring_txq(tx_ring),
305 total_packets, total_bytes);
306
307#define TX_WAKE_THRESHOLD ((s16)(DESC_NEEDED * 2))
308 if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) &&
309 (IAVF_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) {
310
311
312
313 smp_mb();
314 if (__netif_subqueue_stopped(tx_ring->netdev,
315 tx_ring->queue_index) &&
316 !test_bit(__IAVF_VSI_DOWN, vsi->state)) {
317 netif_wake_subqueue(tx_ring->netdev,
318 tx_ring->queue_index);
319 ++tx_ring->tx_stats.restart_queue;
320 }
321 }
322
323 return !!budget;
324}
325
326
327
328
329
330
331
332static void iavf_enable_wb_on_itr(struct iavf_vsi *vsi,
333 struct iavf_q_vector *q_vector)
334{
335 u16 flags = q_vector->tx.ring[0].flags;
336 u32 val;
337
338 if (!(flags & IAVF_TXR_FLAGS_WB_ON_ITR))
339 return;
340
341 if (q_vector->arm_wb_state)
342 return;
343
344 val = IAVF_VFINT_DYN_CTLN1_WB_ON_ITR_MASK |
345 IAVF_VFINT_DYN_CTLN1_ITR_INDX_MASK;
346
347 wr32(&vsi->back->hw,
348 IAVF_VFINT_DYN_CTLN1(q_vector->reg_idx), val);
349 q_vector->arm_wb_state = true;
350}
351
352
353
354
355
356
357
358void iavf_force_wb(struct iavf_vsi *vsi, struct iavf_q_vector *q_vector)
359{
360 u32 val = IAVF_VFINT_DYN_CTLN1_INTENA_MASK |
361 IAVF_VFINT_DYN_CTLN1_ITR_INDX_MASK |
362 IAVF_VFINT_DYN_CTLN1_SWINT_TRIG_MASK |
363 IAVF_VFINT_DYN_CTLN1_SW_ITR_INDX_ENA_MASK
364 ;
365
366 wr32(&vsi->back->hw,
367 IAVF_VFINT_DYN_CTLN1(q_vector->reg_idx),
368 val);
369}
370
371static inline bool iavf_container_is_rx(struct iavf_q_vector *q_vector,
372 struct iavf_ring_container *rc)
373{
374 return &q_vector->rx == rc;
375}
376
377static inline unsigned int iavf_itr_divisor(struct iavf_q_vector *q_vector)
378{
379 unsigned int divisor;
380
381 switch (q_vector->adapter->link_speed) {
382 case I40E_LINK_SPEED_40GB:
383 divisor = IAVF_ITR_ADAPTIVE_MIN_INC * 1024;
384 break;
385 case I40E_LINK_SPEED_25GB:
386 case I40E_LINK_SPEED_20GB:
387 divisor = IAVF_ITR_ADAPTIVE_MIN_INC * 512;
388 break;
389 default:
390 case I40E_LINK_SPEED_10GB:
391 divisor = IAVF_ITR_ADAPTIVE_MIN_INC * 256;
392 break;
393 case I40E_LINK_SPEED_1GB:
394 case I40E_LINK_SPEED_100MB:
395 divisor = IAVF_ITR_ADAPTIVE_MIN_INC * 32;
396 break;
397 }
398
399 return divisor;
400}
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415static void iavf_update_itr(struct iavf_q_vector *q_vector,
416 struct iavf_ring_container *rc)
417{
418 unsigned int avg_wire_size, packets, bytes, itr;
419 unsigned long next_update = jiffies;
420
421
422
423
424 if (!rc->ring || !ITR_IS_DYNAMIC(rc->ring->itr_setting))
425 return;
426
427
428
429
430 itr = iavf_container_is_rx(q_vector, rc) ?
431 IAVF_ITR_ADAPTIVE_MIN_USECS | IAVF_ITR_ADAPTIVE_LATENCY :
432 IAVF_ITR_ADAPTIVE_MAX_USECS | IAVF_ITR_ADAPTIVE_LATENCY;
433
434
435
436
437
438
439 if (time_after(next_update, rc->next_update))
440 goto clear_counts;
441
442
443
444
445
446
447
448 if (q_vector->itr_countdown) {
449 itr = rc->target_itr;
450 goto clear_counts;
451 }
452
453 packets = rc->total_packets;
454 bytes = rc->total_bytes;
455
456 if (iavf_container_is_rx(q_vector, rc)) {
457
458
459
460
461
462 if (packets && packets < 4 && bytes < 9000 &&
463 (q_vector->tx.target_itr & IAVF_ITR_ADAPTIVE_LATENCY)) {
464 itr = IAVF_ITR_ADAPTIVE_LATENCY;
465 goto adjust_by_size;
466 }
467 } else if (packets < 4) {
468
469
470
471
472
473 if (rc->target_itr == IAVF_ITR_ADAPTIVE_MAX_USECS &&
474 (q_vector->rx.target_itr & IAVF_ITR_MASK) ==
475 IAVF_ITR_ADAPTIVE_MAX_USECS)
476 goto clear_counts;
477 } else if (packets > 32) {
478
479
480
481 rc->target_itr &= ~IAVF_ITR_ADAPTIVE_LATENCY;
482 }
483
484
485
486
487
488
489
490
491
492 if (packets < 56) {
493 itr = rc->target_itr + IAVF_ITR_ADAPTIVE_MIN_INC;
494 if ((itr & IAVF_ITR_MASK) > IAVF_ITR_ADAPTIVE_MAX_USECS) {
495 itr &= IAVF_ITR_ADAPTIVE_LATENCY;
496 itr += IAVF_ITR_ADAPTIVE_MAX_USECS;
497 }
498 goto clear_counts;
499 }
500
501 if (packets <= 256) {
502 itr = min(q_vector->tx.current_itr, q_vector->rx.current_itr);
503 itr &= IAVF_ITR_MASK;
504
505
506
507
508
509 if (packets <= 112)
510 goto clear_counts;
511
512
513
514
515
516
517 itr /= 2;
518 itr &= IAVF_ITR_MASK;
519 if (itr < IAVF_ITR_ADAPTIVE_MIN_USECS)
520 itr = IAVF_ITR_ADAPTIVE_MIN_USECS;
521
522 goto clear_counts;
523 }
524
525
526
527
528
529
530
531 itr = IAVF_ITR_ADAPTIVE_BULK;
532
533adjust_by_size:
534
535
536
537
538
539 avg_wire_size = bytes / packets;
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556 if (avg_wire_size <= 60) {
557
558 avg_wire_size = 4096;
559 } else if (avg_wire_size <= 380) {
560
561 avg_wire_size *= 40;
562 avg_wire_size += 1696;
563 } else if (avg_wire_size <= 1084) {
564
565 avg_wire_size *= 15;
566 avg_wire_size += 11452;
567 } else if (avg_wire_size <= 1980) {
568
569 avg_wire_size *= 5;
570 avg_wire_size += 22420;
571 } else {
572
573 avg_wire_size = 32256;
574 }
575
576
577
578
579 if (itr & IAVF_ITR_ADAPTIVE_LATENCY)
580 avg_wire_size /= 2;
581
582
583
584
585
586
587
588
589 itr += DIV_ROUND_UP(avg_wire_size, iavf_itr_divisor(q_vector)) *
590 IAVF_ITR_ADAPTIVE_MIN_INC;
591
592 if ((itr & IAVF_ITR_MASK) > IAVF_ITR_ADAPTIVE_MAX_USECS) {
593 itr &= IAVF_ITR_ADAPTIVE_LATENCY;
594 itr += IAVF_ITR_ADAPTIVE_MAX_USECS;
595 }
596
597clear_counts:
598
599 rc->target_itr = itr;
600
601
602 rc->next_update = next_update + 1;
603
604 rc->total_bytes = 0;
605 rc->total_packets = 0;
606}
607
608
609
610
611
612
613
614int iavf_setup_tx_descriptors(struct iavf_ring *tx_ring)
615{
616 struct device *dev = tx_ring->dev;
617 int bi_size;
618
619 if (!dev)
620 return -ENOMEM;
621
622
623 WARN_ON(tx_ring->tx_bi);
624 bi_size = sizeof(struct iavf_tx_buffer) * tx_ring->count;
625 tx_ring->tx_bi = kzalloc(bi_size, GFP_KERNEL);
626 if (!tx_ring->tx_bi)
627 goto err;
628
629
630 tx_ring->size = tx_ring->count * sizeof(struct iavf_tx_desc);
631 tx_ring->size = ALIGN(tx_ring->size, 4096);
632 tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size,
633 &tx_ring->dma, GFP_KERNEL);
634 if (!tx_ring->desc) {
635 dev_info(dev, "Unable to allocate memory for the Tx descriptor ring, size=%d\n",
636 tx_ring->size);
637 goto err;
638 }
639
640 tx_ring->next_to_use = 0;
641 tx_ring->next_to_clean = 0;
642 tx_ring->tx_stats.prev_pkt_ctr = -1;
643 return 0;
644
645err:
646 kfree(tx_ring->tx_bi);
647 tx_ring->tx_bi = NULL;
648 return -ENOMEM;
649}
650
651
652
653
654
655void iavf_clean_rx_ring(struct iavf_ring *rx_ring)
656{
657 unsigned long bi_size;
658 u16 i;
659 DEFINE_DMA_ATTRS(attrs);
660
661
662 if (!rx_ring->rx_bi)
663 return;
664
665 if (rx_ring->skb) {
666 dev_kfree_skb(rx_ring->skb);
667 rx_ring->skb = NULL;
668 }
669
670
671 for (i = 0; i < rx_ring->count; i++) {
672 struct iavf_rx_buffer *rx_bi = &rx_ring->rx_bi[i];
673
674 if (!rx_bi->page)
675 continue;
676
677
678
679
680 dma_sync_single_range_for_cpu(rx_ring->dev,
681 rx_bi->dma,
682 rx_bi->page_offset,
683 rx_ring->rx_buf_len,
684 DMA_FROM_DEVICE);
685
686
687 dma_set_attr(DMA_ATTR_SKIP_CPU_SYNC, &attrs);
688 dma_set_attr(DMA_ATTR_WEAK_ORDERING, &attrs);
689 dma_unmap_page_attrs(rx_ring->dev, rx_bi->dma,
690 iavf_rx_pg_size(rx_ring),
691 DMA_FROM_DEVICE,
692 &attrs);
693
694 __page_frag_cache_drain(rx_bi->page, rx_bi->pagecnt_bias);
695
696 rx_bi->page = NULL;
697 rx_bi->page_offset = 0;
698 }
699
700 bi_size = sizeof(struct iavf_rx_buffer) * rx_ring->count;
701 memset(rx_ring->rx_bi, 0, bi_size);
702
703
704 memset(rx_ring->desc, 0, rx_ring->size);
705
706 rx_ring->next_to_alloc = 0;
707 rx_ring->next_to_clean = 0;
708 rx_ring->next_to_use = 0;
709}
710
711
712
713
714
715
716
717void iavf_free_rx_resources(struct iavf_ring *rx_ring)
718{
719 iavf_clean_rx_ring(rx_ring);
720 kfree(rx_ring->rx_bi);
721 rx_ring->rx_bi = NULL;
722
723 if (rx_ring->desc) {
724 dma_free_coherent(rx_ring->dev, rx_ring->size,
725 rx_ring->desc, rx_ring->dma);
726 rx_ring->desc = NULL;
727 }
728}
729
730
731
732
733
734
735
736int iavf_setup_rx_descriptors(struct iavf_ring *rx_ring)
737{
738 struct device *dev = rx_ring->dev;
739 int bi_size;
740
741
742 WARN_ON(rx_ring->rx_bi);
743 bi_size = sizeof(struct iavf_rx_buffer) * rx_ring->count;
744 rx_ring->rx_bi = kzalloc(bi_size, GFP_KERNEL);
745 if (!rx_ring->rx_bi)
746 goto err;
747
748 u64_stats_init(&rx_ring->syncp);
749
750
751 rx_ring->size = rx_ring->count * sizeof(union iavf_32byte_rx_desc);
752 rx_ring->size = ALIGN(rx_ring->size, 4096);
753 rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size,
754 &rx_ring->dma, GFP_KERNEL);
755
756 if (!rx_ring->desc) {
757 dev_info(dev, "Unable to allocate memory for the Rx descriptor ring, size=%d\n",
758 rx_ring->size);
759 goto err;
760 }
761
762 rx_ring->next_to_alloc = 0;
763 rx_ring->next_to_clean = 0;
764 rx_ring->next_to_use = 0;
765
766 return 0;
767err:
768 kfree(rx_ring->rx_bi);
769 rx_ring->rx_bi = NULL;
770 return -ENOMEM;
771}
772
773
774
775
776
777
778static inline void iavf_release_rx_desc(struct iavf_ring *rx_ring, u32 val)
779{
780 rx_ring->next_to_use = val;
781
782
783 rx_ring->next_to_alloc = val;
784
785
786
787
788
789
790 wmb();
791 writel(val, rx_ring->tail);
792}
793
794
795
796
797
798
799
800static inline unsigned int iavf_rx_offset(struct iavf_ring *rx_ring)
801{
802 return ring_uses_build_skb(rx_ring) ? IAVF_SKB_PAD : 0;
803}
804
805
806
807
808
809
810
811
812
813static bool iavf_alloc_mapped_page(struct iavf_ring *rx_ring,
814 struct iavf_rx_buffer *bi)
815{
816 struct page *page = bi->page;
817 dma_addr_t dma;
818 DEFINE_DMA_ATTRS(attrs);
819
820
821 if (likely(page)) {
822 rx_ring->rx_stats.page_reuse_count++;
823 return true;
824 }
825
826
827 page = dev_alloc_pages(iavf_rx_pg_order(rx_ring));
828 if (unlikely(!page)) {
829 rx_ring->rx_stats.alloc_page_failed++;
830 return false;
831 }
832
833
834 dma_set_attr(DMA_ATTR_SKIP_CPU_SYNC, &attrs);
835 dma_set_attr(DMA_ATTR_WEAK_ORDERING, &attrs);
836 dma = dma_map_page_attrs(rx_ring->dev, page, 0,
837 iavf_rx_pg_size(rx_ring),
838 DMA_FROM_DEVICE,
839 &attrs);
840
841
842
843
844 if (dma_mapping_error(rx_ring->dev, dma)) {
845 __free_pages(page, iavf_rx_pg_order(rx_ring));
846 rx_ring->rx_stats.alloc_page_failed++;
847 return false;
848 }
849
850 bi->dma = dma;
851 bi->page = page;
852 bi->page_offset = iavf_rx_offset(rx_ring);
853
854
855 bi->pagecnt_bias = 1;
856
857 return true;
858}
859
860
861
862
863
864
865
866static void iavf_receive_skb(struct iavf_ring *rx_ring,
867 struct sk_buff *skb, u16 vlan_tag)
868{
869 struct iavf_q_vector *q_vector = rx_ring->q_vector;
870
871 if ((rx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
872 (vlan_tag & VLAN_VID_MASK))
873 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
874
875 napi_gro_receive(&q_vector->napi, skb);
876}
877
878
879
880
881
882
883
884
885bool iavf_alloc_rx_buffers(struct iavf_ring *rx_ring, u16 cleaned_count)
886{
887 u16 ntu = rx_ring->next_to_use;
888 union iavf_rx_desc *rx_desc;
889 struct iavf_rx_buffer *bi;
890
891
892 if (!rx_ring->netdev || !cleaned_count)
893 return false;
894
895 rx_desc = IAVF_RX_DESC(rx_ring, ntu);
896 bi = &rx_ring->rx_bi[ntu];
897
898 do {
899 if (!iavf_alloc_mapped_page(rx_ring, bi))
900 goto no_buffers;
901
902
903 dma_sync_single_range_for_device(rx_ring->dev, bi->dma,
904 bi->page_offset,
905 rx_ring->rx_buf_len,
906 DMA_FROM_DEVICE);
907
908
909
910
911 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset);
912
913 rx_desc++;
914 bi++;
915 ntu++;
916 if (unlikely(ntu == rx_ring->count)) {
917 rx_desc = IAVF_RX_DESC(rx_ring, 0);
918 bi = rx_ring->rx_bi;
919 ntu = 0;
920 }
921
922
923 rx_desc->wb.qword1.status_error_len = 0;
924
925 cleaned_count--;
926 } while (cleaned_count);
927
928 if (rx_ring->next_to_use != ntu)
929 iavf_release_rx_desc(rx_ring, ntu);
930
931 return false;
932
933no_buffers:
934 if (rx_ring->next_to_use != ntu)
935 iavf_release_rx_desc(rx_ring, ntu);
936
937
938
939
940 return true;
941}
942
943
944
945
946
947
948
949static inline void iavf_rx_checksum(struct iavf_vsi *vsi,
950 struct sk_buff *skb,
951 union iavf_rx_desc *rx_desc)
952{
953 struct iavf_rx_ptype_decoded decoded;
954 u32 rx_error, rx_status;
955 bool ipv4, ipv6;
956 u8 ptype;
957 u64 qword;
958
959 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
960 ptype = (qword & IAVF_RXD_QW1_PTYPE_MASK) >> IAVF_RXD_QW1_PTYPE_SHIFT;
961 rx_error = (qword & IAVF_RXD_QW1_ERROR_MASK) >>
962 IAVF_RXD_QW1_ERROR_SHIFT;
963 rx_status = (qword & IAVF_RXD_QW1_STATUS_MASK) >>
964 IAVF_RXD_QW1_STATUS_SHIFT;
965 decoded = decode_rx_desc_ptype(ptype);
966
967 skb->ip_summed = CHECKSUM_NONE;
968
969 skb_checksum_none_assert(skb);
970
971
972 if (!(vsi->netdev->features & NETIF_F_RXCSUM))
973 return;
974
975
976 if (!(rx_status & BIT(IAVF_RX_DESC_STATUS_L3L4P_SHIFT)))
977 return;
978
979
980 if (!(decoded.known && decoded.outer_ip))
981 return;
982
983 ipv4 = (decoded.outer_ip == IAVF_RX_PTYPE_OUTER_IP) &&
984 (decoded.outer_ip_ver == IAVF_RX_PTYPE_OUTER_IPV4);
985 ipv6 = (decoded.outer_ip == IAVF_RX_PTYPE_OUTER_IP) &&
986 (decoded.outer_ip_ver == IAVF_RX_PTYPE_OUTER_IPV6);
987
988 if (ipv4 &&
989 (rx_error & (BIT(IAVF_RX_DESC_ERROR_IPE_SHIFT) |
990 BIT(IAVF_RX_DESC_ERROR_EIPE_SHIFT))))
991 goto checksum_fail;
992
993
994 if (ipv6 &&
995 rx_status & BIT(IAVF_RX_DESC_STATUS_IPV6EXADD_SHIFT))
996
997 return;
998
999
1000 if (rx_error & BIT(IAVF_RX_DESC_ERROR_L4E_SHIFT))
1001 goto checksum_fail;
1002
1003
1004
1005
1006
1007 if (rx_error & BIT(IAVF_RX_DESC_ERROR_PPRS_SHIFT))
1008 return;
1009
1010
1011 switch (decoded.inner_prot) {
1012 case IAVF_RX_PTYPE_INNER_PROT_TCP:
1013 case IAVF_RX_PTYPE_INNER_PROT_UDP:
1014 case IAVF_RX_PTYPE_INNER_PROT_SCTP:
1015 skb->ip_summed = CHECKSUM_UNNECESSARY;
1016
1017 default:
1018 break;
1019 }
1020
1021 return;
1022
1023checksum_fail:
1024 vsi->back->hw_csum_rx_error++;
1025}
1026
1027
1028
1029
1030
1031
1032
1033static inline int iavf_ptype_to_htype(u8 ptype)
1034{
1035 struct iavf_rx_ptype_decoded decoded = decode_rx_desc_ptype(ptype);
1036
1037 if (!decoded.known)
1038 return PKT_HASH_TYPE_NONE;
1039
1040 if (decoded.outer_ip == IAVF_RX_PTYPE_OUTER_IP &&
1041 decoded.payload_layer == IAVF_RX_PTYPE_PAYLOAD_LAYER_PAY4)
1042 return PKT_HASH_TYPE_L4;
1043 else if (decoded.outer_ip == IAVF_RX_PTYPE_OUTER_IP &&
1044 decoded.payload_layer == IAVF_RX_PTYPE_PAYLOAD_LAYER_PAY3)
1045 return PKT_HASH_TYPE_L3;
1046 else
1047 return PKT_HASH_TYPE_L2;
1048}
1049
1050
1051
1052
1053
1054
1055
1056
1057static inline void iavf_rx_hash(struct iavf_ring *ring,
1058 union iavf_rx_desc *rx_desc,
1059 struct sk_buff *skb,
1060 u8 rx_ptype)
1061{
1062 u32 hash;
1063 const __le64 rss_mask =
1064 cpu_to_le64((u64)IAVF_RX_DESC_FLTSTAT_RSS_HASH <<
1065 IAVF_RX_DESC_STATUS_FLTSTAT_SHIFT);
1066
1067 if (ring->netdev->features & NETIF_F_RXHASH)
1068 return;
1069
1070 if ((rx_desc->wb.qword1.status_error_len & rss_mask) == rss_mask) {
1071 hash = le32_to_cpu(rx_desc->wb.qword0.hi_dword.rss);
1072 skb_set_hash(skb, hash, iavf_ptype_to_htype(rx_ptype));
1073 }
1074}
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087static inline
1088void iavf_process_skb_fields(struct iavf_ring *rx_ring,
1089 union iavf_rx_desc *rx_desc, struct sk_buff *skb,
1090 u8 rx_ptype)
1091{
1092 iavf_rx_hash(rx_ring, rx_desc, skb, rx_ptype);
1093
1094 iavf_rx_checksum(rx_ring->vsi, skb, rx_desc);
1095
1096 skb_record_rx_queue(skb, rx_ring->queue_index);
1097
1098
1099 skb->protocol = eth_type_trans(skb, rx_ring->netdev);
1100}
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115static bool iavf_cleanup_headers(struct iavf_ring *rx_ring, struct sk_buff *skb)
1116{
1117
1118 if (eth_skb_pad(skb))
1119 return true;
1120
1121 return false;
1122}
1123
1124
1125
1126
1127
1128
1129
1130
1131static void iavf_reuse_rx_page(struct iavf_ring *rx_ring,
1132 struct iavf_rx_buffer *old_buff)
1133{
1134 struct iavf_rx_buffer *new_buff;
1135 u16 nta = rx_ring->next_to_alloc;
1136
1137 new_buff = &rx_ring->rx_bi[nta];
1138
1139
1140 nta++;
1141 rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
1142
1143
1144 new_buff->dma = old_buff->dma;
1145 new_buff->page = old_buff->page;
1146 new_buff->page_offset = old_buff->page_offset;
1147 new_buff->pagecnt_bias = old_buff->pagecnt_bias;
1148}
1149
1150
1151
1152
1153
1154
1155
1156
1157static inline bool iavf_page_is_reusable(struct page *page)
1158{
1159 return (page_to_nid(page) == numa_mem_id()) &&
1160 !page_is_pfmemalloc(page);
1161}
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190static bool iavf_can_reuse_rx_page(struct iavf_rx_buffer *rx_buffer)
1191{
1192 unsigned int pagecnt_bias = rx_buffer->pagecnt_bias;
1193 struct page *page = rx_buffer->page;
1194
1195
1196 if (unlikely(!iavf_page_is_reusable(page)))
1197 return false;
1198
1199#if (PAGE_SIZE < 8192)
1200
1201 if (unlikely((page_count(page) - pagecnt_bias) > 1))
1202 return false;
1203#else
1204#define IAVF_LAST_OFFSET \
1205 (SKB_WITH_OVERHEAD(PAGE_SIZE) - IAVF_RXBUFFER_2048)
1206 if (rx_buffer->page_offset > IAVF_LAST_OFFSET)
1207 return false;
1208#endif
1209
1210
1211
1212
1213
1214 if (unlikely(!pagecnt_bias)) {
1215 page_ref_add(page, USHRT_MAX);
1216 rx_buffer->pagecnt_bias = USHRT_MAX;
1217 }
1218
1219 return true;
1220}
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234static void iavf_add_rx_frag(struct iavf_ring *rx_ring,
1235 struct iavf_rx_buffer *rx_buffer,
1236 struct sk_buff *skb,
1237 unsigned int size)
1238{
1239#if (PAGE_SIZE < 8192)
1240 unsigned int truesize = iavf_rx_pg_size(rx_ring) / 2;
1241#else
1242 unsigned int truesize = SKB_DATA_ALIGN(size + iavf_rx_offset(rx_ring));
1243#endif
1244
1245 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page,
1246 rx_buffer->page_offset, size, truesize);
1247
1248
1249#if (PAGE_SIZE < 8192)
1250 rx_buffer->page_offset ^= truesize;
1251#else
1252 rx_buffer->page_offset += truesize;
1253#endif
1254}
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264static struct iavf_rx_buffer *iavf_get_rx_buffer(struct iavf_ring *rx_ring,
1265 const unsigned int size)
1266{
1267 struct iavf_rx_buffer *rx_buffer;
1268
1269 rx_buffer = &rx_ring->rx_bi[rx_ring->next_to_clean];
1270 prefetchw(rx_buffer->page);
1271
1272
1273 dma_sync_single_range_for_cpu(rx_ring->dev,
1274 rx_buffer->dma,
1275 rx_buffer->page_offset,
1276 size,
1277 DMA_FROM_DEVICE);
1278
1279
1280 rx_buffer->pagecnt_bias--;
1281
1282 return rx_buffer;
1283}
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295static struct sk_buff *iavf_construct_skb(struct iavf_ring *rx_ring,
1296 struct iavf_rx_buffer *rx_buffer,
1297 unsigned int size)
1298{
1299 void *va = page_address(rx_buffer->page) + rx_buffer->page_offset;
1300#if (PAGE_SIZE < 8192)
1301 unsigned int truesize = iavf_rx_pg_size(rx_ring) / 2;
1302#else
1303 unsigned int truesize = SKB_DATA_ALIGN(size);
1304#endif
1305 unsigned int headlen;
1306 struct sk_buff *skb;
1307
1308
1309 prefetch(va);
1310#if L1_CACHE_BYTES < 128
1311 prefetch(va + L1_CACHE_BYTES);
1312#endif
1313
1314
1315 skb = __napi_alloc_skb(&rx_ring->q_vector->napi,
1316 IAVF_RX_HDR_SIZE,
1317 GFP_ATOMIC | __GFP_NOWARN);
1318 if (unlikely(!skb))
1319 return NULL;
1320
1321
1322 headlen = size;
1323 if (headlen > IAVF_RX_HDR_SIZE)
1324 headlen = eth_get_headlen(va, IAVF_RX_HDR_SIZE);
1325
1326
1327 memcpy(__skb_put(skb, headlen), va, ALIGN(headlen, sizeof(long)));
1328
1329
1330 size -= headlen;
1331 if (size) {
1332 skb_add_rx_frag(skb, 0, rx_buffer->page,
1333 rx_buffer->page_offset + headlen,
1334 size, truesize);
1335
1336
1337#if (PAGE_SIZE < 8192)
1338 rx_buffer->page_offset ^= truesize;
1339#else
1340 rx_buffer->page_offset += truesize;
1341#endif
1342 } else {
1343
1344 rx_buffer->pagecnt_bias++;
1345 }
1346
1347 return skb;
1348}
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359static struct sk_buff *iavf_build_skb(struct iavf_ring *rx_ring,
1360 struct iavf_rx_buffer *rx_buffer,
1361 unsigned int size)
1362{
1363 void *va = page_address(rx_buffer->page) + rx_buffer->page_offset;
1364#if (PAGE_SIZE < 8192)
1365 unsigned int truesize = iavf_rx_pg_size(rx_ring) / 2;
1366#else
1367 unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
1368 SKB_DATA_ALIGN(IAVF_SKB_PAD + size);
1369#endif
1370 struct sk_buff *skb;
1371
1372
1373 prefetch(va);
1374#if L1_CACHE_BYTES < 128
1375 prefetch(va + L1_CACHE_BYTES);
1376#endif
1377
1378 skb = build_skb(va - IAVF_SKB_PAD, truesize);
1379 if (unlikely(!skb))
1380 return NULL;
1381
1382
1383 skb_reserve(skb, IAVF_SKB_PAD);
1384 __skb_put(skb, size);
1385
1386
1387#if (PAGE_SIZE < 8192)
1388 rx_buffer->page_offset ^= truesize;
1389#else
1390 rx_buffer->page_offset += truesize;
1391#endif
1392
1393 return skb;
1394}
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404static void iavf_put_rx_buffer(struct iavf_ring *rx_ring,
1405 struct iavf_rx_buffer *rx_buffer)
1406{
1407 DEFINE_DMA_ATTRS(attrs);
1408
1409 if (iavf_can_reuse_rx_page(rx_buffer)) {
1410
1411 iavf_reuse_rx_page(rx_ring, rx_buffer);
1412 rx_ring->rx_stats.page_reuse_count++;
1413 } else {
1414
1415 dma_set_attr(DMA_ATTR_SKIP_CPU_SYNC, &attrs);
1416 dma_set_attr(DMA_ATTR_WEAK_ORDERING, &attrs);
1417 dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma,
1418 iavf_rx_pg_size(rx_ring),
1419 DMA_FROM_DEVICE, &attrs);
1420 __page_frag_cache_drain(rx_buffer->page,
1421 rx_buffer->pagecnt_bias);
1422 }
1423
1424
1425 rx_buffer->page = NULL;
1426}
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439static bool iavf_is_non_eop(struct iavf_ring *rx_ring,
1440 union iavf_rx_desc *rx_desc,
1441 struct sk_buff *skb)
1442{
1443 u32 ntc = rx_ring->next_to_clean + 1;
1444
1445
1446 ntc = (ntc < rx_ring->count) ? ntc : 0;
1447 rx_ring->next_to_clean = ntc;
1448
1449 prefetch(IAVF_RX_DESC(rx_ring, ntc));
1450
1451
1452#define IAVF_RXD_EOF BIT(IAVF_RX_DESC_STATUS_EOF_SHIFT)
1453 if (likely(iavf_test_staterr(rx_desc, IAVF_RXD_EOF)))
1454 return false;
1455
1456 rx_ring->rx_stats.non_eop_descs++;
1457
1458 return true;
1459}
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473static int iavf_clean_rx_irq(struct iavf_ring *rx_ring, int budget)
1474{
1475 unsigned int total_rx_bytes = 0, total_rx_packets = 0;
1476 struct sk_buff *skb = rx_ring->skb;
1477 u16 cleaned_count = IAVF_DESC_UNUSED(rx_ring);
1478 bool failure = false;
1479
1480 while (likely(total_rx_packets < (unsigned int)budget)) {
1481 struct iavf_rx_buffer *rx_buffer;
1482 union iavf_rx_desc *rx_desc;
1483 unsigned int size;
1484 u16 vlan_tag;
1485 u8 rx_ptype;
1486 u64 qword;
1487
1488
1489 if (cleaned_count >= IAVF_RX_BUFFER_WRITE) {
1490 failure = failure ||
1491 iavf_alloc_rx_buffers(rx_ring, cleaned_count);
1492 cleaned_count = 0;
1493 }
1494
1495 rx_desc = IAVF_RX_DESC(rx_ring, rx_ring->next_to_clean);
1496
1497
1498
1499
1500
1501
1502 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
1503
1504
1505
1506
1507
1508 dma_rmb();
1509
1510 size = (qword & IAVF_RXD_QW1_LENGTH_PBUF_MASK) >>
1511 IAVF_RXD_QW1_LENGTH_PBUF_SHIFT;
1512 if (!size)
1513 break;
1514
1515 iavf_trace(clean_rx_irq, rx_ring, rx_desc, skb);
1516 rx_buffer = iavf_get_rx_buffer(rx_ring, size);
1517
1518
1519 if (skb)
1520 iavf_add_rx_frag(rx_ring, rx_buffer, skb, size);
1521 else if (ring_uses_build_skb(rx_ring))
1522 skb = iavf_build_skb(rx_ring, rx_buffer, size);
1523 else
1524 skb = iavf_construct_skb(rx_ring, rx_buffer, size);
1525
1526
1527 if (!skb) {
1528 rx_ring->rx_stats.alloc_buff_failed++;
1529 rx_buffer->pagecnt_bias++;
1530 break;
1531 }
1532
1533 iavf_put_rx_buffer(rx_ring, rx_buffer);
1534 cleaned_count++;
1535
1536 if (iavf_is_non_eop(rx_ring, rx_desc, skb))
1537 continue;
1538
1539
1540
1541
1542
1543
1544 if (unlikely(iavf_test_staterr(rx_desc, BIT(IAVF_RXD_QW1_ERROR_SHIFT)))) {
1545 dev_kfree_skb_any(skb);
1546 skb = NULL;
1547 continue;
1548 }
1549
1550 if (iavf_cleanup_headers(rx_ring, skb)) {
1551 skb = NULL;
1552 continue;
1553 }
1554
1555
1556 total_rx_bytes += skb->len;
1557
1558 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
1559 rx_ptype = (qword & IAVF_RXD_QW1_PTYPE_MASK) >>
1560 IAVF_RXD_QW1_PTYPE_SHIFT;
1561
1562
1563 iavf_process_skb_fields(rx_ring, rx_desc, skb, rx_ptype);
1564
1565
1566 vlan_tag = (qword & BIT(IAVF_RX_DESC_STATUS_L2TAG1P_SHIFT)) ?
1567 le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1) : 0;
1568
1569 iavf_trace(clean_rx_irq_rx, rx_ring, rx_desc, skb);
1570 iavf_receive_skb(rx_ring, skb, vlan_tag);
1571 skb = NULL;
1572
1573
1574 total_rx_packets++;
1575 }
1576
1577 rx_ring->skb = skb;
1578
1579 u64_stats_update_begin(&rx_ring->syncp);
1580 rx_ring->stats.packets += total_rx_packets;
1581 rx_ring->stats.bytes += total_rx_bytes;
1582 u64_stats_update_end(&rx_ring->syncp);
1583 rx_ring->q_vector->rx.total_packets += total_rx_packets;
1584 rx_ring->q_vector->rx.total_bytes += total_rx_bytes;
1585
1586
1587 return failure ? budget : (int)total_rx_packets;
1588}
1589
1590static inline u32 iavf_buildreg_itr(const int type, u16 itr)
1591{
1592 u32 val;
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609 itr &= IAVF_ITR_MASK;
1610
1611 val = IAVF_VFINT_DYN_CTLN1_INTENA_MASK |
1612 (type << IAVF_VFINT_DYN_CTLN1_ITR_INDX_SHIFT) |
1613 (itr << (IAVF_VFINT_DYN_CTLN1_INTERVAL_SHIFT - 1));
1614
1615 return val;
1616}
1617
1618
1619#define INTREG IAVF_VFINT_DYN_CTLN1
1620
1621
1622
1623
1624
1625
1626
1627
1628#define ITR_COUNTDOWN_START 3
1629
1630
1631
1632
1633
1634
1635
1636static inline void iavf_update_enable_itr(struct iavf_vsi *vsi,
1637 struct iavf_q_vector *q_vector)
1638{
1639 struct iavf_hw *hw = &vsi->back->hw;
1640 u32 intval;
1641
1642
1643 iavf_update_itr(q_vector, &q_vector->tx);
1644 iavf_update_itr(q_vector, &q_vector->rx);
1645
1646
1647
1648
1649
1650
1651
1652
1653
1654 if (q_vector->rx.target_itr < q_vector->rx.current_itr) {
1655
1656 intval = iavf_buildreg_itr(IAVF_RX_ITR,
1657 q_vector->rx.target_itr);
1658 q_vector->rx.current_itr = q_vector->rx.target_itr;
1659 q_vector->itr_countdown = ITR_COUNTDOWN_START;
1660 } else if ((q_vector->tx.target_itr < q_vector->tx.current_itr) ||
1661 ((q_vector->rx.target_itr - q_vector->rx.current_itr) <
1662 (q_vector->tx.target_itr - q_vector->tx.current_itr))) {
1663
1664
1665
1666 intval = iavf_buildreg_itr(IAVF_TX_ITR,
1667 q_vector->tx.target_itr);
1668 q_vector->tx.current_itr = q_vector->tx.target_itr;
1669 q_vector->itr_countdown = ITR_COUNTDOWN_START;
1670 } else if (q_vector->rx.current_itr != q_vector->rx.target_itr) {
1671
1672 intval = iavf_buildreg_itr(IAVF_RX_ITR,
1673 q_vector->rx.target_itr);
1674 q_vector->rx.current_itr = q_vector->rx.target_itr;
1675 q_vector->itr_countdown = ITR_COUNTDOWN_START;
1676 } else {
1677
1678 intval = iavf_buildreg_itr(IAVF_ITR_NONE, 0);
1679 if (q_vector->itr_countdown)
1680 q_vector->itr_countdown--;
1681 }
1682
1683 if (!test_bit(__IAVF_VSI_DOWN, vsi->state))
1684 wr32(hw, INTREG(q_vector->reg_idx), intval);
1685}
1686
1687
1688
1689
1690
1691
1692
1693
1694
1695
1696int iavf_napi_poll(struct napi_struct *napi, int budget)
1697{
1698 struct iavf_q_vector *q_vector =
1699 container_of(napi, struct iavf_q_vector, napi);
1700 struct iavf_vsi *vsi = q_vector->vsi;
1701 struct iavf_ring *ring;
1702 bool clean_complete = true;
1703 bool arm_wb = false;
1704 int budget_per_ring;
1705 int work_done = 0;
1706
1707 if (test_bit(__IAVF_VSI_DOWN, vsi->state)) {
1708 napi_complete(napi);
1709 return 0;
1710 }
1711
1712
1713
1714
1715 iavf_for_each_ring(ring, q_vector->tx) {
1716 if (!iavf_clean_tx_irq(vsi, ring, budget)) {
1717 clean_complete = false;
1718 continue;
1719 }
1720 arm_wb |= ring->arm_wb;
1721 ring->arm_wb = false;
1722 }
1723
1724
1725 if (budget <= 0)
1726 goto tx_only;
1727
1728
1729
1730
1731 budget_per_ring = max(budget/q_vector->num_ringpairs, 1);
1732
1733 iavf_for_each_ring(ring, q_vector->rx) {
1734 int cleaned = iavf_clean_rx_irq(ring, budget_per_ring);
1735
1736 work_done += cleaned;
1737
1738 if (cleaned >= budget_per_ring)
1739 clean_complete = false;
1740 }
1741
1742
1743 if (!clean_complete) {
1744 int cpu_id = smp_processor_id();
1745
1746
1747
1748
1749
1750
1751
1752
1753 if (!cpumask_test_cpu(cpu_id, &q_vector->affinity_mask)) {
1754
1755 napi_complete_done(napi, work_done);
1756
1757
1758 iavf_force_wb(vsi, q_vector);
1759
1760
1761 return budget - 1;
1762 }
1763tx_only:
1764 if (arm_wb) {
1765 q_vector->tx.ring[0].tx_stats.tx_force_wb++;
1766 iavf_enable_wb_on_itr(vsi, q_vector);
1767 }
1768 return budget;
1769 }
1770
1771 if (vsi->back->flags & IAVF_TXR_FLAGS_WB_ON_ITR)
1772 q_vector->arm_wb_state = false;
1773
1774
1775
1776
1777 if (likely(napi_complete_done(napi, work_done)))
1778 iavf_update_enable_itr(vsi, q_vector);
1779
1780 return min(work_done, budget - 1);
1781}
1782
1783
1784
1785
1786
1787
1788
1789
1790
1791
1792
1793
1794
1795static inline int iavf_tx_prepare_vlan_flags(struct sk_buff *skb,
1796 struct iavf_ring *tx_ring,
1797 u32 *flags)
1798{
1799 __be16 protocol = skb->protocol;
1800 u32 tx_flags = 0;
1801
1802 if (protocol == htons(ETH_P_8021Q) &&
1803 !(tx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_TX)) {
1804
1805
1806
1807
1808
1809
1810
1811 skb->protocol = vlan_get_protocol(skb);
1812 goto out;
1813 }
1814
1815
1816 if (skb_vlan_tag_present(skb)) {
1817 tx_flags |= skb_vlan_tag_get(skb) << IAVF_TX_FLAGS_VLAN_SHIFT;
1818 tx_flags |= IAVF_TX_FLAGS_HW_VLAN;
1819
1820 } else if (protocol == htons(ETH_P_8021Q)) {
1821 struct vlan_hdr *vhdr, _vhdr;
1822
1823 vhdr = skb_header_pointer(skb, ETH_HLEN, sizeof(_vhdr), &_vhdr);
1824 if (!vhdr)
1825 return -EINVAL;
1826
1827 protocol = vhdr->h_vlan_encapsulated_proto;
1828 tx_flags |= ntohs(vhdr->h_vlan_TCI) << IAVF_TX_FLAGS_VLAN_SHIFT;
1829 tx_flags |= IAVF_TX_FLAGS_SW_VLAN;
1830 }
1831
1832out:
1833 *flags = tx_flags;
1834 return 0;
1835}
1836
1837
1838
1839
1840
1841
1842
1843
1844
1845static int iavf_tso(struct iavf_tx_buffer *first, u8 *hdr_len,
1846 u64 *cd_type_cmd_tso_mss)
1847{
1848 struct sk_buff *skb = first->skb;
1849 u64 cd_cmd, cd_tso_len, cd_mss;
1850 union {
1851 struct iphdr *v4;
1852 struct ipv6hdr *v6;
1853 unsigned char *hdr;
1854 } ip;
1855 union {
1856 struct tcphdr *tcp;
1857 struct udphdr *udp;
1858 unsigned char *hdr;
1859 } l4;
1860 u32 paylen, l4_offset;
1861 u16 gso_segs, gso_size;
1862 int err;
1863
1864 if (skb->ip_summed != CHECKSUM_PARTIAL)
1865 return 0;
1866
1867 if (!skb_is_gso(skb))
1868 return 0;
1869
1870 err = skb_cow_head(skb, 0);
1871 if (err < 0)
1872 return err;
1873
1874 ip.hdr = skb_network_header(skb);
1875 l4.hdr = skb_transport_header(skb);
1876
1877
1878 if (ip.v4->version == 4) {
1879 ip.v4->tot_len = 0;
1880 ip.v4->check = 0;
1881 } else {
1882 ip.v6->payload_len = 0;
1883 }
1884
1885 if (skb_shinfo(skb)->gso_type & (SKB_GSO_GRE |
1886 SKB_GSO_GRE_CSUM |
1887 SKB_GSO_IPIP |
1888 SKB_GSO_SIT |
1889 SKB_GSO_UDP_TUNNEL |
1890 SKB_GSO_UDP_TUNNEL_CSUM)) {
1891 if (!(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) &&
1892 (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM)) {
1893 l4.udp->len = 0;
1894
1895
1896 l4_offset = l4.hdr - skb->data;
1897
1898
1899 paylen = skb->len - l4_offset;
1900 csum_replace_by_diff(&l4.udp->check,
1901 (__force __wsum)htonl(paylen));
1902 }
1903
1904
1905 ip.hdr = skb_inner_network_header(skb);
1906 l4.hdr = skb_inner_transport_header(skb);
1907
1908
1909 if (ip.v4->version == 4) {
1910 ip.v4->tot_len = 0;
1911 ip.v4->check = 0;
1912 } else {
1913 ip.v6->payload_len = 0;
1914 }
1915 }
1916
1917
1918 l4_offset = l4.hdr - skb->data;
1919
1920
1921 paylen = skb->len - l4_offset;
1922 csum_replace_by_diff(&l4.tcp->check, (__force __wsum)htonl(paylen));
1923
1924
1925 *hdr_len = (l4.tcp->doff * 4) + l4_offset;
1926
1927
1928 gso_size = skb_shinfo(skb)->gso_size;
1929 gso_segs = skb_shinfo(skb)->gso_segs;
1930
1931
1932 first->gso_segs = gso_segs;
1933 first->bytecount += (first->gso_segs - 1) * *hdr_len;
1934
1935
1936 cd_cmd = IAVF_TX_CTX_DESC_TSO;
1937 cd_tso_len = skb->len - *hdr_len;
1938 cd_mss = gso_size;
1939 *cd_type_cmd_tso_mss |= (cd_cmd << IAVF_TXD_CTX_QW1_CMD_SHIFT) |
1940 (cd_tso_len << IAVF_TXD_CTX_QW1_TSO_LEN_SHIFT) |
1941 (cd_mss << IAVF_TXD_CTX_QW1_MSS_SHIFT);
1942 return 1;
1943}
1944
1945
1946
1947
1948
1949
1950
1951
1952
1953
1954static int iavf_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags,
1955 u32 *td_cmd, u32 *td_offset,
1956 struct iavf_ring *tx_ring,
1957 u32 *cd_tunneling)
1958{
1959 union {
1960 struct iphdr *v4;
1961 struct ipv6hdr *v6;
1962 unsigned char *hdr;
1963 } ip;
1964 union {
1965 struct tcphdr *tcp;
1966 struct udphdr *udp;
1967 unsigned char *hdr;
1968 } l4;
1969 unsigned char *exthdr;
1970 u32 offset, cmd = 0;
1971 __be16 frag_off;
1972 u8 l4_proto = 0;
1973
1974 if (skb->ip_summed != CHECKSUM_PARTIAL)
1975 return 0;
1976
1977 ip.hdr = skb_network_header(skb);
1978 l4.hdr = skb_transport_header(skb);
1979
1980
1981 offset = ((ip.hdr - skb->data) / 2) << IAVF_TX_DESC_LENGTH_MACLEN_SHIFT;
1982
1983 if (skb->encapsulation) {
1984 u32 tunnel = 0;
1985
1986 if (*tx_flags & IAVF_TX_FLAGS_IPV4) {
1987 tunnel |= (*tx_flags & IAVF_TX_FLAGS_TSO) ?
1988 IAVF_TX_CTX_EXT_IP_IPV4 :
1989 IAVF_TX_CTX_EXT_IP_IPV4_NO_CSUM;
1990
1991 l4_proto = ip.v4->protocol;
1992 } else if (*tx_flags & IAVF_TX_FLAGS_IPV6) {
1993 tunnel |= IAVF_TX_CTX_EXT_IP_IPV6;
1994
1995 exthdr = ip.hdr + sizeof(*ip.v6);
1996 l4_proto = ip.v6->nexthdr;
1997 if (l4.hdr != exthdr)
1998 ipv6_skip_exthdr(skb, exthdr - skb->data,
1999 &l4_proto, &frag_off);
2000 }
2001
2002
2003 switch (l4_proto) {
2004 case IPPROTO_UDP:
2005 tunnel |= IAVF_TXD_CTX_UDP_TUNNELING;
2006 *tx_flags |= IAVF_TX_FLAGS_VXLAN_TUNNEL;
2007 break;
2008 case IPPROTO_GRE:
2009 tunnel |= IAVF_TXD_CTX_GRE_TUNNELING;
2010 *tx_flags |= IAVF_TX_FLAGS_VXLAN_TUNNEL;
2011 break;
2012 case IPPROTO_IPIP:
2013 case IPPROTO_IPV6:
2014 *tx_flags |= IAVF_TX_FLAGS_VXLAN_TUNNEL;
2015 l4.hdr = skb_inner_network_header(skb);
2016 break;
2017 default:
2018 if (*tx_flags & IAVF_TX_FLAGS_TSO)
2019 return -1;
2020
2021 skb_checksum_help(skb);
2022 return 0;
2023 }
2024
2025
2026 tunnel |= ((l4.hdr - ip.hdr) / 4) <<
2027 IAVF_TXD_CTX_QW0_EXT_IPLEN_SHIFT;
2028
2029
2030 ip.hdr = skb_inner_network_header(skb);
2031
2032
2033 tunnel |= ((ip.hdr - l4.hdr) / 2) <<
2034 IAVF_TXD_CTX_QW0_NATLEN_SHIFT;
2035
2036
2037 if ((*tx_flags & IAVF_TX_FLAGS_TSO) &&
2038 !(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) &&
2039 (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM))
2040 tunnel |= IAVF_TXD_CTX_QW0_L4T_CS_MASK;
2041
2042
2043 *cd_tunneling |= tunnel;
2044
2045
2046 l4.hdr = skb_inner_transport_header(skb);
2047 l4_proto = 0;
2048
2049
2050 *tx_flags &= ~(IAVF_TX_FLAGS_IPV4 | IAVF_TX_FLAGS_IPV6);
2051 if (ip.v4->version == 4)
2052 *tx_flags |= IAVF_TX_FLAGS_IPV4;
2053 if (ip.v6->version == 6)
2054 *tx_flags |= IAVF_TX_FLAGS_IPV6;
2055 }
2056
2057
2058 if (*tx_flags & IAVF_TX_FLAGS_IPV4) {
2059 l4_proto = ip.v4->protocol;
2060
2061
2062
2063 cmd |= (*tx_flags & IAVF_TX_FLAGS_TSO) ?
2064 IAVF_TX_DESC_CMD_IIPT_IPV4_CSUM :
2065 IAVF_TX_DESC_CMD_IIPT_IPV4;
2066 } else if (*tx_flags & IAVF_TX_FLAGS_IPV6) {
2067 cmd |= IAVF_TX_DESC_CMD_IIPT_IPV6;
2068
2069 exthdr = ip.hdr + sizeof(*ip.v6);
2070 l4_proto = ip.v6->nexthdr;
2071 if (l4.hdr != exthdr)
2072 ipv6_skip_exthdr(skb, exthdr - skb->data,
2073 &l4_proto, &frag_off);
2074 }
2075
2076
2077 offset |= ((l4.hdr - ip.hdr) / 4) << IAVF_TX_DESC_LENGTH_IPLEN_SHIFT;
2078
2079
2080 switch (l4_proto) {
2081 case IPPROTO_TCP:
2082
2083 cmd |= IAVF_TX_DESC_CMD_L4T_EOFT_TCP;
2084 offset |= l4.tcp->doff << IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
2085 break;
2086 case IPPROTO_SCTP:
2087
2088 cmd |= IAVF_TX_DESC_CMD_L4T_EOFT_SCTP;
2089 offset |= (sizeof(struct sctphdr) >> 2) <<
2090 IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
2091 break;
2092 case IPPROTO_UDP:
2093
2094 cmd |= IAVF_TX_DESC_CMD_L4T_EOFT_UDP;
2095 offset |= (sizeof(struct udphdr) >> 2) <<
2096 IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
2097 break;
2098 default:
2099 if (*tx_flags & IAVF_TX_FLAGS_TSO)
2100 return -1;
2101 skb_checksum_help(skb);
2102 return 0;
2103 }
2104
2105 *td_cmd |= cmd;
2106 *td_offset |= offset;
2107
2108 return 1;
2109}
2110
2111
2112
2113
2114
2115
2116
2117
2118static void iavf_create_tx_ctx(struct iavf_ring *tx_ring,
2119 const u64 cd_type_cmd_tso_mss,
2120 const u32 cd_tunneling, const u32 cd_l2tag2)
2121{
2122 struct iavf_tx_context_desc *context_desc;
2123 int i = tx_ring->next_to_use;
2124
2125 if ((cd_type_cmd_tso_mss == IAVF_TX_DESC_DTYPE_CONTEXT) &&
2126 !cd_tunneling && !cd_l2tag2)
2127 return;
2128
2129
2130 context_desc = IAVF_TX_CTXTDESC(tx_ring, i);
2131
2132 i++;
2133 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
2134
2135
2136 context_desc->tunneling_params = cpu_to_le32(cd_tunneling);
2137 context_desc->l2tag2 = cpu_to_le16(cd_l2tag2);
2138 context_desc->rsvd = cpu_to_le16(0);
2139 context_desc->type_cmd_tso_mss = cpu_to_le64(cd_type_cmd_tso_mss);
2140}
2141
2142
2143
2144
2145
2146
2147
2148
2149
2150
2151
2152
2153
2154
2155bool __iavf_chk_linearize(struct sk_buff *skb)
2156{
2157 const struct skb_frag_struct *frag, *stale;
2158 int nr_frags, sum;
2159
2160
2161 nr_frags = skb_shinfo(skb)->nr_frags;
2162 if (nr_frags < (IAVF_MAX_BUFFER_TXD - 1))
2163 return false;
2164
2165
2166
2167
2168 nr_frags -= IAVF_MAX_BUFFER_TXD - 2;
2169 frag = &skb_shinfo(skb)->frags[0];
2170
2171
2172
2173
2174
2175
2176
2177 sum = 1 - skb_shinfo(skb)->gso_size;
2178
2179
2180 sum += skb_frag_size(frag++);
2181 sum += skb_frag_size(frag++);
2182 sum += skb_frag_size(frag++);
2183 sum += skb_frag_size(frag++);
2184 sum += skb_frag_size(frag++);
2185
2186
2187
2188
2189 for (stale = &skb_shinfo(skb)->frags[0];; stale++) {
2190 int stale_size = skb_frag_size(stale);
2191
2192 sum += skb_frag_size(frag++);
2193
2194
2195
2196
2197
2198
2199
2200 if (stale_size > IAVF_MAX_DATA_PER_TXD) {
2201 int align_pad = -(stale->page_offset) &
2202 (IAVF_MAX_READ_REQ_SIZE - 1);
2203
2204 sum -= align_pad;
2205 stale_size -= align_pad;
2206
2207 do {
2208 sum -= IAVF_MAX_DATA_PER_TXD_ALIGNED;
2209 stale_size -= IAVF_MAX_DATA_PER_TXD_ALIGNED;
2210 } while (stale_size > IAVF_MAX_DATA_PER_TXD);
2211 }
2212
2213
2214 if (sum < 0)
2215 return true;
2216
2217 if (!nr_frags--)
2218 break;
2219
2220 sum -= stale_size;
2221 }
2222
2223 return false;
2224}
2225
2226
2227
2228
2229
2230
2231
2232
2233int __iavf_maybe_stop_tx(struct iavf_ring *tx_ring, int size)
2234{
2235 netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
2236
2237 smp_mb();
2238
2239
2240 if (likely(IAVF_DESC_UNUSED(tx_ring) < size))
2241 return -EBUSY;
2242
2243
2244 netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index);
2245 ++tx_ring->tx_stats.restart_queue;
2246 return 0;
2247}
2248
2249
2250
2251
2252
2253
2254
2255
2256
2257
2258
2259static inline void iavf_tx_map(struct iavf_ring *tx_ring, struct sk_buff *skb,
2260 struct iavf_tx_buffer *first, u32 tx_flags,
2261 const u8 hdr_len, u32 td_cmd, u32 td_offset)
2262{
2263 unsigned int data_len = skb->data_len;
2264 unsigned int size = skb_headlen(skb);
2265 struct skb_frag_struct *frag;
2266 struct iavf_tx_buffer *tx_bi;
2267 struct iavf_tx_desc *tx_desc;
2268 u16 i = tx_ring->next_to_use;
2269 u32 td_tag = 0;
2270 dma_addr_t dma;
2271
2272 if (tx_flags & IAVF_TX_FLAGS_HW_VLAN) {
2273 td_cmd |= IAVF_TX_DESC_CMD_IL2TAG1;
2274 td_tag = (tx_flags & IAVF_TX_FLAGS_VLAN_MASK) >>
2275 IAVF_TX_FLAGS_VLAN_SHIFT;
2276 }
2277
2278 first->tx_flags = tx_flags;
2279
2280 dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
2281
2282 tx_desc = IAVF_TX_DESC(tx_ring, i);
2283 tx_bi = first;
2284
2285 for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
2286 unsigned int max_data = IAVF_MAX_DATA_PER_TXD_ALIGNED;
2287
2288 if (dma_mapping_error(tx_ring->dev, dma))
2289 goto dma_error;
2290
2291
2292 dma_unmap_len_set(tx_bi, len, size);
2293 dma_unmap_addr_set(tx_bi, dma, dma);
2294
2295
2296 max_data += -dma & (IAVF_MAX_READ_REQ_SIZE - 1);
2297 tx_desc->buffer_addr = cpu_to_le64(dma);
2298
2299 while (unlikely(size > IAVF_MAX_DATA_PER_TXD)) {
2300 tx_desc->cmd_type_offset_bsz =
2301 build_ctob(td_cmd, td_offset,
2302 max_data, td_tag);
2303
2304 tx_desc++;
2305 i++;
2306
2307 if (i == tx_ring->count) {
2308 tx_desc = IAVF_TX_DESC(tx_ring, 0);
2309 i = 0;
2310 }
2311
2312 dma += max_data;
2313 size -= max_data;
2314
2315 max_data = IAVF_MAX_DATA_PER_TXD_ALIGNED;
2316 tx_desc->buffer_addr = cpu_to_le64(dma);
2317 }
2318
2319 if (likely(!data_len))
2320 break;
2321
2322 tx_desc->cmd_type_offset_bsz = build_ctob(td_cmd, td_offset,
2323 size, td_tag);
2324
2325 tx_desc++;
2326 i++;
2327
2328 if (i == tx_ring->count) {
2329 tx_desc = IAVF_TX_DESC(tx_ring, 0);
2330 i = 0;
2331 }
2332
2333 size = skb_frag_size(frag);
2334 data_len -= size;
2335
2336 dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size,
2337 DMA_TO_DEVICE);
2338
2339 tx_bi = &tx_ring->tx_bi[i];
2340 }
2341
2342 netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount);
2343
2344 i++;
2345 if (i == tx_ring->count)
2346 i = 0;
2347
2348 tx_ring->next_to_use = i;
2349
2350 iavf_maybe_stop_tx(tx_ring, DESC_NEEDED);
2351
2352
2353 td_cmd |= IAVF_TXD_CMD;
2354 tx_desc->cmd_type_offset_bsz =
2355 build_ctob(td_cmd, td_offset, size, td_tag);
2356
2357 skb_tx_timestamp(skb);
2358
2359
2360
2361
2362
2363
2364
2365 wmb();
2366
2367
2368 first->next_to_watch = tx_desc;
2369
2370
2371 if (netif_xmit_stopped(txring_txq(tx_ring)) || !skb->xmit_more) {
2372 writel(i, tx_ring->tail);
2373
2374
2375
2376
2377 mmiowb();
2378 }
2379
2380 return;
2381
2382dma_error:
2383 dev_info(tx_ring->dev, "TX DMA map failed\n");
2384
2385
2386 for (;;) {
2387 tx_bi = &tx_ring->tx_bi[i];
2388 iavf_unmap_and_free_tx_resource(tx_ring, tx_bi);
2389 if (tx_bi == first)
2390 break;
2391 if (i == 0)
2392 i = tx_ring->count;
2393 i--;
2394 }
2395
2396 tx_ring->next_to_use = i;
2397}
2398
2399
2400
2401
2402
2403
2404
2405
2406static netdev_tx_t iavf_xmit_frame_ring(struct sk_buff *skb,
2407 struct iavf_ring *tx_ring)
2408{
2409 u64 cd_type_cmd_tso_mss = IAVF_TX_DESC_DTYPE_CONTEXT;
2410 u32 cd_tunneling = 0, cd_l2tag2 = 0;
2411 struct iavf_tx_buffer *first;
2412 u32 td_offset = 0;
2413 u32 tx_flags = 0;
2414 __be16 protocol;
2415 u32 td_cmd = 0;
2416 u8 hdr_len = 0;
2417 int tso, count;
2418
2419
2420 prefetch(skb->data);
2421
2422 iavf_trace(xmit_frame_ring, skb, tx_ring);
2423
2424 count = iavf_xmit_descriptor_count(skb);
2425 if (iavf_chk_linearize(skb, count)) {
2426 if (__skb_linearize(skb)) {
2427 dev_kfree_skb_any(skb);
2428 return NETDEV_TX_OK;
2429 }
2430 count = iavf_txd_use_count(skb->len);
2431 tx_ring->tx_stats.tx_linearize++;
2432 }
2433
2434
2435
2436
2437
2438
2439
2440 if (iavf_maybe_stop_tx(tx_ring, count + 4 + 1)) {
2441 tx_ring->tx_stats.tx_busy++;
2442 return NETDEV_TX_BUSY;
2443 }
2444
2445
2446 first = &tx_ring->tx_bi[tx_ring->next_to_use];
2447 first->skb = skb;
2448 first->bytecount = skb->len;
2449 first->gso_segs = 1;
2450
2451
2452 if (iavf_tx_prepare_vlan_flags(skb, tx_ring, &tx_flags))
2453 goto out_drop;
2454
2455
2456 protocol = vlan_get_protocol(skb);
2457
2458
2459 if (protocol == htons(ETH_P_IP))
2460 tx_flags |= IAVF_TX_FLAGS_IPV4;
2461 else if (protocol == htons(ETH_P_IPV6))
2462 tx_flags |= IAVF_TX_FLAGS_IPV6;
2463
2464 tso = iavf_tso(first, &hdr_len, &cd_type_cmd_tso_mss);
2465
2466 if (tso < 0)
2467 goto out_drop;
2468 else if (tso)
2469 tx_flags |= IAVF_TX_FLAGS_TSO;
2470
2471
2472 tso = iavf_tx_enable_csum(skb, &tx_flags, &td_cmd, &td_offset,
2473 tx_ring, &cd_tunneling);
2474 if (tso < 0)
2475 goto out_drop;
2476
2477
2478 td_cmd |= IAVF_TX_DESC_CMD_ICRC;
2479
2480 iavf_create_tx_ctx(tx_ring, cd_type_cmd_tso_mss,
2481 cd_tunneling, cd_l2tag2);
2482
2483 iavf_tx_map(tx_ring, skb, first, tx_flags, hdr_len,
2484 td_cmd, td_offset);
2485
2486 return NETDEV_TX_OK;
2487
2488out_drop:
2489 iavf_trace(xmit_frame_ring_drop, first->skb, tx_ring);
2490 dev_kfree_skb_any(first->skb);
2491 first->skb = NULL;
2492 return NETDEV_TX_OK;
2493}
2494
2495
2496
2497
2498
2499
2500
2501
2502netdev_tx_t iavf_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
2503{
2504 struct iavf_adapter *adapter = netdev_priv(netdev);
2505 struct iavf_ring *tx_ring = &adapter->tx_rings[skb->queue_mapping];
2506
2507
2508
2509
2510 if (unlikely(skb->len < IAVF_MIN_TX_LEN)) {
2511 if (skb_pad(skb, IAVF_MIN_TX_LEN - skb->len))
2512 return NETDEV_TX_OK;
2513 skb->len = IAVF_MIN_TX_LEN;
2514 skb_set_tail_pointer(skb, IAVF_MIN_TX_LEN);
2515 }
2516
2517 return iavf_xmit_frame_ring(skb, tx_ring);
2518}
2519