1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27#include <linux/prefetch.h>
28#include <net/busy_poll.h>
29
30#include "i40evf.h"
31#include "i40e_prototype.h"
32
33static inline __le64 build_ctob(u32 td_cmd, u32 td_offset, unsigned int size,
34 u32 td_tag)
35{
36 return cpu_to_le64(I40E_TX_DESC_DTYPE_DATA |
37 ((u64)td_cmd << I40E_TXD_QW1_CMD_SHIFT) |
38 ((u64)td_offset << I40E_TXD_QW1_OFFSET_SHIFT) |
39 ((u64)size << I40E_TXD_QW1_TX_BUF_SZ_SHIFT) |
40 ((u64)td_tag << I40E_TXD_QW1_L2TAG1_SHIFT));
41}
42
43#define I40E_TXD_CMD (I40E_TX_DESC_CMD_EOP | I40E_TX_DESC_CMD_RS)
44
45
46
47
48
49
50static void i40e_unmap_and_free_tx_resource(struct i40e_ring *ring,
51 struct i40e_tx_buffer *tx_buffer)
52{
53 if (tx_buffer->skb) {
54 dev_kfree_skb_any(tx_buffer->skb);
55 if (dma_unmap_len(tx_buffer, len))
56 dma_unmap_single(ring->dev,
57 dma_unmap_addr(tx_buffer, dma),
58 dma_unmap_len(tx_buffer, len),
59 DMA_TO_DEVICE);
60 } else if (dma_unmap_len(tx_buffer, len)) {
61 dma_unmap_page(ring->dev,
62 dma_unmap_addr(tx_buffer, dma),
63 dma_unmap_len(tx_buffer, len),
64 DMA_TO_DEVICE);
65 }
66
67 if (tx_buffer->tx_flags & I40E_TX_FLAGS_FD_SB)
68 kfree(tx_buffer->raw_buf);
69
70 tx_buffer->next_to_watch = NULL;
71 tx_buffer->skb = NULL;
72 dma_unmap_len_set(tx_buffer, len, 0);
73
74}
75
76
77
78
79
80void i40evf_clean_tx_ring(struct i40e_ring *tx_ring)
81{
82 unsigned long bi_size;
83 u16 i;
84
85
86 if (!tx_ring->tx_bi)
87 return;
88
89
90 for (i = 0; i < tx_ring->count; i++)
91 i40e_unmap_and_free_tx_resource(tx_ring, &tx_ring->tx_bi[i]);
92
93 bi_size = sizeof(struct i40e_tx_buffer) * tx_ring->count;
94 memset(tx_ring->tx_bi, 0, bi_size);
95
96
97 memset(tx_ring->desc, 0, tx_ring->size);
98
99 tx_ring->next_to_use = 0;
100 tx_ring->next_to_clean = 0;
101
102 if (!tx_ring->netdev)
103 return;
104
105
106 netdev_tx_reset_queue(netdev_get_tx_queue(tx_ring->netdev,
107 tx_ring->queue_index));
108}
109
110
111
112
113
114
115
116void i40evf_free_tx_resources(struct i40e_ring *tx_ring)
117{
118 i40evf_clean_tx_ring(tx_ring);
119 kfree(tx_ring->tx_bi);
120 tx_ring->tx_bi = NULL;
121
122 if (tx_ring->desc) {
123 dma_free_coherent(tx_ring->dev, tx_ring->size,
124 tx_ring->desc, tx_ring->dma);
125 tx_ring->desc = NULL;
126 }
127}
128
129
130
131
132
133
134
135
136
137u32 i40evf_get_tx_pending(struct i40e_ring *ring, bool in_sw)
138{
139 u32 head, tail;
140
141 if (!in_sw)
142 head = i40e_get_head(ring);
143 else
144 head = ring->next_to_clean;
145 tail = readl(ring->tail);
146
147 if (head != tail)
148 return (head < tail) ?
149 tail - head : (tail + ring->count - head);
150
151 return 0;
152}
153
154#define WB_STRIDE 0x3
155
156
157
158
159
160
161
162
163static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget)
164{
165 u16 i = tx_ring->next_to_clean;
166 struct i40e_tx_buffer *tx_buf;
167 struct i40e_tx_desc *tx_head;
168 struct i40e_tx_desc *tx_desc;
169 unsigned int total_packets = 0;
170 unsigned int total_bytes = 0;
171
172 tx_buf = &tx_ring->tx_bi[i];
173 tx_desc = I40E_TX_DESC(tx_ring, i);
174 i -= tx_ring->count;
175
176 tx_head = I40E_TX_DESC(tx_ring, i40e_get_head(tx_ring));
177
178 do {
179 struct i40e_tx_desc *eop_desc = tx_buf->next_to_watch;
180
181
182 if (!eop_desc)
183 break;
184
185
186 read_barrier_depends();
187
188
189 if (tx_head == tx_desc)
190 break;
191
192
193 tx_buf->next_to_watch = NULL;
194
195
196 total_bytes += tx_buf->bytecount;
197 total_packets += tx_buf->gso_segs;
198
199
200 dev_kfree_skb_any(tx_buf->skb);
201
202
203 dma_unmap_single(tx_ring->dev,
204 dma_unmap_addr(tx_buf, dma),
205 dma_unmap_len(tx_buf, len),
206 DMA_TO_DEVICE);
207
208
209 tx_buf->skb = NULL;
210 dma_unmap_len_set(tx_buf, len, 0);
211
212
213 while (tx_desc != eop_desc) {
214
215 tx_buf++;
216 tx_desc++;
217 i++;
218 if (unlikely(!i)) {
219 i -= tx_ring->count;
220 tx_buf = tx_ring->tx_bi;
221 tx_desc = I40E_TX_DESC(tx_ring, 0);
222 }
223
224
225 if (dma_unmap_len(tx_buf, len)) {
226 dma_unmap_page(tx_ring->dev,
227 dma_unmap_addr(tx_buf, dma),
228 dma_unmap_len(tx_buf, len),
229 DMA_TO_DEVICE);
230 dma_unmap_len_set(tx_buf, len, 0);
231 }
232 }
233
234
235 tx_buf++;
236 tx_desc++;
237 i++;
238 if (unlikely(!i)) {
239 i -= tx_ring->count;
240 tx_buf = tx_ring->tx_bi;
241 tx_desc = I40E_TX_DESC(tx_ring, 0);
242 }
243
244 prefetch(tx_desc);
245
246
247 budget--;
248 } while (likely(budget));
249
250 i += tx_ring->count;
251 tx_ring->next_to_clean = i;
252 u64_stats_update_begin(&tx_ring->syncp);
253 tx_ring->stats.bytes += total_bytes;
254 tx_ring->stats.packets += total_packets;
255 u64_stats_update_end(&tx_ring->syncp);
256 tx_ring->q_vector->tx.total_bytes += total_bytes;
257 tx_ring->q_vector->tx.total_packets += total_packets;
258
259 if (tx_ring->flags & I40E_TXR_FLAGS_WB_ON_ITR) {
260 unsigned int j = 0;
261
262
263
264
265
266 j = i40evf_get_tx_pending(tx_ring, false);
267
268 if (budget &&
269 ((j / (WB_STRIDE + 1)) == 0) && (j > 0) &&
270 !test_bit(__I40E_DOWN, &tx_ring->vsi->state) &&
271 (I40E_DESC_UNUSED(tx_ring) != tx_ring->count))
272 tx_ring->arm_wb = true;
273 }
274
275 netdev_tx_completed_queue(netdev_get_tx_queue(tx_ring->netdev,
276 tx_ring->queue_index),
277 total_packets, total_bytes);
278
279#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
280 if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) &&
281 (I40E_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) {
282
283
284
285 smp_mb();
286 if (__netif_subqueue_stopped(tx_ring->netdev,
287 tx_ring->queue_index) &&
288 !test_bit(__I40E_DOWN, &tx_ring->vsi->state)) {
289 netif_wake_subqueue(tx_ring->netdev,
290 tx_ring->queue_index);
291 ++tx_ring->tx_stats.restart_queue;
292 }
293 }
294
295 return !!budget;
296}
297
298
299
300
301
302
303
304static void i40e_enable_wb_on_itr(struct i40e_vsi *vsi,
305 struct i40e_q_vector *q_vector)
306{
307 u16 flags = q_vector->tx.ring[0].flags;
308 u32 val;
309
310 if (!(flags & I40E_TXR_FLAGS_WB_ON_ITR))
311 return;
312
313 if (q_vector->arm_wb_state)
314 return;
315
316 val = I40E_VFINT_DYN_CTLN1_WB_ON_ITR_MASK |
317 I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK;
318
319 wr32(&vsi->back->hw,
320 I40E_VFINT_DYN_CTLN1(q_vector->v_idx +
321 vsi->base_vector - 1), val);
322 q_vector->arm_wb_state = true;
323}
324
325
326
327
328
329
330
331void i40evf_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector)
332{
333 u32 val = I40E_VFINT_DYN_CTLN1_INTENA_MASK |
334 I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK |
335 I40E_VFINT_DYN_CTLN1_SWINT_TRIG_MASK |
336 I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_ENA_MASK
337 ;
338
339 wr32(&vsi->back->hw,
340 I40E_VFINT_DYN_CTLN1(q_vector->v_idx + vsi->base_vector - 1),
341 val);
342}
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358static bool i40e_set_new_dynamic_itr(struct i40e_ring_container *rc)
359{
360 enum i40e_latency_range new_latency_range = rc->latency_range;
361 struct i40e_q_vector *qv = rc->ring->q_vector;
362 u32 new_itr = rc->itr;
363 int bytes_per_int;
364 int usecs;
365
366 if (rc->total_packets == 0 || !rc->itr)
367 return false;
368
369
370
371
372
373
374
375
376
377
378
379
380
381 usecs = (rc->itr << 1) * ITR_COUNTDOWN_START;
382 bytes_per_int = rc->total_bytes / usecs;
383
384 switch (new_latency_range) {
385 case I40E_LOWEST_LATENCY:
386 if (bytes_per_int > 10)
387 new_latency_range = I40E_LOW_LATENCY;
388 break;
389 case I40E_LOW_LATENCY:
390 if (bytes_per_int > 20)
391 new_latency_range = I40E_BULK_LATENCY;
392 else if (bytes_per_int <= 10)
393 new_latency_range = I40E_LOWEST_LATENCY;
394 break;
395 case I40E_BULK_LATENCY:
396 case I40E_ULTRA_LATENCY:
397 default:
398 if (bytes_per_int <= 20)
399 new_latency_range = I40E_LOW_LATENCY;
400 break;
401 }
402
403
404
405
406
407
408#define RX_ULTRA_PACKET_RATE 40000
409
410 if ((((rc->total_packets * 1000000) / usecs) > RX_ULTRA_PACKET_RATE) &&
411 (&qv->rx == rc))
412 new_latency_range = I40E_ULTRA_LATENCY;
413
414 rc->latency_range = new_latency_range;
415
416 switch (new_latency_range) {
417 case I40E_LOWEST_LATENCY:
418 new_itr = I40E_ITR_50K;
419 break;
420 case I40E_LOW_LATENCY:
421 new_itr = I40E_ITR_20K;
422 break;
423 case I40E_BULK_LATENCY:
424 new_itr = I40E_ITR_18K;
425 break;
426 case I40E_ULTRA_LATENCY:
427 new_itr = I40E_ITR_8K;
428 break;
429 default:
430 break;
431 }
432
433 rc->total_bytes = 0;
434 rc->total_packets = 0;
435
436 if (new_itr != rc->itr) {
437 rc->itr = new_itr;
438 return true;
439 }
440
441 return false;
442}
443
444
445
446
447
448
449
450int i40evf_setup_tx_descriptors(struct i40e_ring *tx_ring)
451{
452 struct device *dev = tx_ring->dev;
453 int bi_size;
454
455 if (!dev)
456 return -ENOMEM;
457
458
459 WARN_ON(tx_ring->tx_bi);
460 bi_size = sizeof(struct i40e_tx_buffer) * tx_ring->count;
461 tx_ring->tx_bi = kzalloc(bi_size, GFP_KERNEL);
462 if (!tx_ring->tx_bi)
463 goto err;
464
465
466 tx_ring->size = tx_ring->count * sizeof(struct i40e_tx_desc);
467
468
469
470 tx_ring->size += sizeof(u32);
471 tx_ring->size = ALIGN(tx_ring->size, 4096);
472 tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size,
473 &tx_ring->dma, GFP_KERNEL);
474 if (!tx_ring->desc) {
475 dev_info(dev, "Unable to allocate memory for the Tx descriptor ring, size=%d\n",
476 tx_ring->size);
477 goto err;
478 }
479
480 tx_ring->next_to_use = 0;
481 tx_ring->next_to_clean = 0;
482 return 0;
483
484err:
485 kfree(tx_ring->tx_bi);
486 tx_ring->tx_bi = NULL;
487 return -ENOMEM;
488}
489
490
491
492
493
494void i40evf_clean_rx_ring(struct i40e_ring *rx_ring)
495{
496 struct device *dev = rx_ring->dev;
497 struct i40e_rx_buffer *rx_bi;
498 unsigned long bi_size;
499 u16 i;
500
501
502 if (!rx_ring->rx_bi)
503 return;
504
505 if (ring_is_ps_enabled(rx_ring)) {
506 int bufsz = ALIGN(rx_ring->rx_hdr_len, 256) * rx_ring->count;
507
508 rx_bi = &rx_ring->rx_bi[0];
509 if (rx_bi->hdr_buf) {
510 dma_free_coherent(dev,
511 bufsz,
512 rx_bi->hdr_buf,
513 rx_bi->dma);
514 for (i = 0; i < rx_ring->count; i++) {
515 rx_bi = &rx_ring->rx_bi[i];
516 rx_bi->dma = 0;
517 rx_bi->hdr_buf = NULL;
518 }
519 }
520 }
521
522 for (i = 0; i < rx_ring->count; i++) {
523 rx_bi = &rx_ring->rx_bi[i];
524 if (rx_bi->dma) {
525 dma_unmap_single(dev,
526 rx_bi->dma,
527 rx_ring->rx_buf_len,
528 DMA_FROM_DEVICE);
529 rx_bi->dma = 0;
530 }
531 if (rx_bi->skb) {
532 dev_kfree_skb(rx_bi->skb);
533 rx_bi->skb = NULL;
534 }
535 if (rx_bi->page) {
536 if (rx_bi->page_dma) {
537 dma_unmap_page(dev,
538 rx_bi->page_dma,
539 PAGE_SIZE,
540 DMA_FROM_DEVICE);
541 rx_bi->page_dma = 0;
542 }
543 __free_page(rx_bi->page);
544 rx_bi->page = NULL;
545 rx_bi->page_offset = 0;
546 }
547 }
548
549 bi_size = sizeof(struct i40e_rx_buffer) * rx_ring->count;
550 memset(rx_ring->rx_bi, 0, bi_size);
551
552
553 memset(rx_ring->desc, 0, rx_ring->size);
554
555 rx_ring->next_to_clean = 0;
556 rx_ring->next_to_use = 0;
557}
558
559
560
561
562
563
564
565void i40evf_free_rx_resources(struct i40e_ring *rx_ring)
566{
567 i40evf_clean_rx_ring(rx_ring);
568 kfree(rx_ring->rx_bi);
569 rx_ring->rx_bi = NULL;
570
571 if (rx_ring->desc) {
572 dma_free_coherent(rx_ring->dev, rx_ring->size,
573 rx_ring->desc, rx_ring->dma);
574 rx_ring->desc = NULL;
575 }
576}
577
578
579
580
581
582
583
584
585void i40evf_alloc_rx_headers(struct i40e_ring *rx_ring)
586{
587 struct device *dev = rx_ring->dev;
588 struct i40e_rx_buffer *rx_bi;
589 dma_addr_t dma;
590 void *buffer;
591 int buf_size;
592 int i;
593
594 if (rx_ring->rx_bi[0].hdr_buf)
595 return;
596
597 buf_size = ALIGN(rx_ring->rx_hdr_len, 256);
598 buffer = dma_alloc_coherent(dev, buf_size * rx_ring->count,
599 &dma, GFP_KERNEL);
600 if (!buffer)
601 return;
602 for (i = 0; i < rx_ring->count; i++) {
603 rx_bi = &rx_ring->rx_bi[i];
604 rx_bi->dma = dma + (i * buf_size);
605 rx_bi->hdr_buf = buffer + (i * buf_size);
606 }
607}
608
609
610
611
612
613
614
615int i40evf_setup_rx_descriptors(struct i40e_ring *rx_ring)
616{
617 struct device *dev = rx_ring->dev;
618 int bi_size;
619
620
621 WARN_ON(rx_ring->rx_bi);
622 bi_size = sizeof(struct i40e_rx_buffer) * rx_ring->count;
623 rx_ring->rx_bi = kzalloc(bi_size, GFP_KERNEL);
624 if (!rx_ring->rx_bi)
625 goto err;
626
627 u64_stats_init(&rx_ring->syncp);
628
629
630 rx_ring->size = ring_is_16byte_desc_enabled(rx_ring)
631 ? rx_ring->count * sizeof(union i40e_16byte_rx_desc)
632 : rx_ring->count * sizeof(union i40e_32byte_rx_desc);
633 rx_ring->size = ALIGN(rx_ring->size, 4096);
634 rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size,
635 &rx_ring->dma, GFP_KERNEL);
636
637 if (!rx_ring->desc) {
638 dev_info(dev, "Unable to allocate memory for the Rx descriptor ring, size=%d\n",
639 rx_ring->size);
640 goto err;
641 }
642
643 rx_ring->next_to_clean = 0;
644 rx_ring->next_to_use = 0;
645
646 return 0;
647err:
648 kfree(rx_ring->rx_bi);
649 rx_ring->rx_bi = NULL;
650 return -ENOMEM;
651}
652
653
654
655
656
657
658static inline void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val)
659{
660 rx_ring->next_to_use = val;
661
662
663
664
665
666 wmb();
667 writel(val, rx_ring->tail);
668}
669
670
671
672
673
674
675
676
677bool i40evf_alloc_rx_buffers_ps(struct i40e_ring *rx_ring, u16 cleaned_count)
678{
679 u16 i = rx_ring->next_to_use;
680 union i40e_rx_desc *rx_desc;
681 struct i40e_rx_buffer *bi;
682 const int current_node = numa_node_id();
683
684
685 if (!rx_ring->netdev || !cleaned_count)
686 return false;
687
688 while (cleaned_count--) {
689 rx_desc = I40E_RX_DESC(rx_ring, i);
690 bi = &rx_ring->rx_bi[i];
691
692 if (bi->skb)
693 goto no_buffers;
694
695
696
697
698 if (bi->page && page_to_nid(bi->page) != current_node) {
699 dma_unmap_page(rx_ring->dev,
700 bi->page_dma,
701 PAGE_SIZE,
702 DMA_FROM_DEVICE);
703 __free_page(bi->page);
704 bi->page = NULL;
705 bi->page_dma = 0;
706 rx_ring->rx_stats.realloc_count++;
707 } else if (bi->page) {
708 rx_ring->rx_stats.page_reuse_count++;
709 }
710
711 if (!bi->page) {
712 bi->page = alloc_page(GFP_ATOMIC);
713 if (!bi->page) {
714 rx_ring->rx_stats.alloc_page_failed++;
715 goto no_buffers;
716 }
717 bi->page_dma = dma_map_page(rx_ring->dev,
718 bi->page,
719 0,
720 PAGE_SIZE,
721 DMA_FROM_DEVICE);
722 if (dma_mapping_error(rx_ring->dev, bi->page_dma)) {
723 rx_ring->rx_stats.alloc_page_failed++;
724 __free_page(bi->page);
725 bi->page = NULL;
726 bi->page_dma = 0;
727 bi->page_offset = 0;
728 goto no_buffers;
729 }
730 bi->page_offset = 0;
731 }
732
733
734
735
736 rx_desc->read.pkt_addr =
737 cpu_to_le64(bi->page_dma + bi->page_offset);
738 rx_desc->read.hdr_addr = cpu_to_le64(bi->dma);
739 i++;
740 if (i == rx_ring->count)
741 i = 0;
742 }
743
744 if (rx_ring->next_to_use != i)
745 i40e_release_rx_desc(rx_ring, i);
746
747 return false;
748
749no_buffers:
750 if (rx_ring->next_to_use != i)
751 i40e_release_rx_desc(rx_ring, i);
752
753
754
755
756 return true;
757}
758
759
760
761
762
763
764
765
766bool i40evf_alloc_rx_buffers_1buf(struct i40e_ring *rx_ring, u16 cleaned_count)
767{
768 u16 i = rx_ring->next_to_use;
769 union i40e_rx_desc *rx_desc;
770 struct i40e_rx_buffer *bi;
771 struct sk_buff *skb;
772
773
774 if (!rx_ring->netdev || !cleaned_count)
775 return false;
776
777 while (cleaned_count--) {
778 rx_desc = I40E_RX_DESC(rx_ring, i);
779 bi = &rx_ring->rx_bi[i];
780 skb = bi->skb;
781
782 if (!skb) {
783 skb = __netdev_alloc_skb_ip_align(rx_ring->netdev,
784 rx_ring->rx_buf_len,
785 GFP_ATOMIC |
786 __GFP_NOWARN);
787 if (!skb) {
788 rx_ring->rx_stats.alloc_buff_failed++;
789 goto no_buffers;
790 }
791
792 skb_record_rx_queue(skb, rx_ring->queue_index);
793 bi->skb = skb;
794 }
795
796 if (!bi->dma) {
797 bi->dma = dma_map_single(rx_ring->dev,
798 skb->data,
799 rx_ring->rx_buf_len,
800 DMA_FROM_DEVICE);
801 if (dma_mapping_error(rx_ring->dev, bi->dma)) {
802 rx_ring->rx_stats.alloc_buff_failed++;
803 bi->dma = 0;
804 dev_kfree_skb(bi->skb);
805 bi->skb = NULL;
806 goto no_buffers;
807 }
808 }
809
810 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma);
811 rx_desc->read.hdr_addr = 0;
812 i++;
813 if (i == rx_ring->count)
814 i = 0;
815 }
816
817 if (rx_ring->next_to_use != i)
818 i40e_release_rx_desc(rx_ring, i);
819
820 return false;
821
822no_buffers:
823 if (rx_ring->next_to_use != i)
824 i40e_release_rx_desc(rx_ring, i);
825
826
827
828
829 return true;
830}
831
832
833
834
835
836
837
838static void i40e_receive_skb(struct i40e_ring *rx_ring,
839 struct sk_buff *skb, u16 vlan_tag)
840{
841 struct i40e_q_vector *q_vector = rx_ring->q_vector;
842
843 if (vlan_tag & VLAN_VID_MASK)
844 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
845
846 napi_gro_receive(&q_vector->napi, skb);
847}
848
849
850
851
852
853
854
855
856
857static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
858 struct sk_buff *skb,
859 u32 rx_status,
860 u32 rx_error,
861 u16 rx_ptype)
862{
863 struct i40e_rx_ptype_decoded decoded = decode_rx_desc_ptype(rx_ptype);
864 bool ipv4, ipv6, ipv4_tunnel, ipv6_tunnel;
865
866 skb->ip_summed = CHECKSUM_NONE;
867
868
869 if (!(vsi->netdev->features & NETIF_F_RXCSUM))
870 return;
871
872
873 if (!(rx_status & BIT(I40E_RX_DESC_STATUS_L3L4P_SHIFT)))
874 return;
875
876
877 if (!(decoded.known && decoded.outer_ip))
878 return;
879
880 ipv4 = (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP) &&
881 (decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV4);
882 ipv6 = (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP) &&
883 (decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV6);
884
885 if (ipv4 &&
886 (rx_error & (BIT(I40E_RX_DESC_ERROR_IPE_SHIFT) |
887 BIT(I40E_RX_DESC_ERROR_EIPE_SHIFT))))
888 goto checksum_fail;
889
890
891 if (ipv6 &&
892 rx_status & BIT(I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT))
893
894 return;
895
896
897 if (rx_error & BIT(I40E_RX_DESC_ERROR_L4E_SHIFT))
898 goto checksum_fail;
899
900
901
902
903
904 if (rx_error & BIT(I40E_RX_DESC_ERROR_PPRS_SHIFT))
905 return;
906
907
908
909
910
911
912
913
914 ipv4_tunnel = (rx_ptype >= I40E_RX_PTYPE_GRENAT4_MAC_PAY3) &&
915 (rx_ptype <= I40E_RX_PTYPE_GRENAT4_MACVLAN_IPV6_ICMP_PAY4);
916 ipv6_tunnel = (rx_ptype >= I40E_RX_PTYPE_GRENAT6_MAC_PAY3) &&
917 (rx_ptype <= I40E_RX_PTYPE_GRENAT6_MACVLAN_IPV6_ICMP_PAY4);
918
919 skb->ip_summed = CHECKSUM_UNNECESSARY;
920 skb->csum_level = ipv4_tunnel || ipv6_tunnel;
921
922 return;
923
924checksum_fail:
925 vsi->back->hw_csum_rx_error++;
926}
927
928
929
930
931
932
933
934static inline enum pkt_hash_types i40e_ptype_to_htype(u8 ptype)
935{
936 struct i40e_rx_ptype_decoded decoded = decode_rx_desc_ptype(ptype);
937
938 if (!decoded.known)
939 return PKT_HASH_TYPE_NONE;
940
941 if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP &&
942 decoded.payload_layer == I40E_RX_PTYPE_PAYLOAD_LAYER_PAY4)
943 return PKT_HASH_TYPE_L4;
944 else if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP &&
945 decoded.payload_layer == I40E_RX_PTYPE_PAYLOAD_LAYER_PAY3)
946 return PKT_HASH_TYPE_L3;
947 else
948 return PKT_HASH_TYPE_L2;
949}
950
951
952
953
954
955
956static inline void i40e_rx_hash(struct i40e_ring *ring,
957 union i40e_rx_desc *rx_desc,
958 struct sk_buff *skb,
959 u8 rx_ptype)
960{
961 u32 hash;
962 const __le64 rss_mask =
963 cpu_to_le64((u64)I40E_RX_DESC_FLTSTAT_RSS_HASH <<
964 I40E_RX_DESC_STATUS_FLTSTAT_SHIFT);
965
966 if (ring->netdev->features & NETIF_F_RXHASH)
967 return;
968
969 if ((rx_desc->wb.qword1.status_error_len & rss_mask) == rss_mask) {
970 hash = le32_to_cpu(rx_desc->wb.qword0.hi_dword.rss);
971 skb_set_hash(skb, hash, i40e_ptype_to_htype(rx_ptype));
972 }
973}
974
975
976
977
978
979
980
981
982static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, const int budget)
983{
984 unsigned int total_rx_bytes = 0, total_rx_packets = 0;
985 u16 rx_packet_len, rx_header_len, rx_sph, rx_hbo;
986 u16 cleaned_count = I40E_DESC_UNUSED(rx_ring);
987 struct i40e_vsi *vsi = rx_ring->vsi;
988 u16 i = rx_ring->next_to_clean;
989 union i40e_rx_desc *rx_desc;
990 u32 rx_error, rx_status;
991 bool failure = false;
992 u8 rx_ptype;
993 u64 qword;
994 u32 copysize;
995
996 do {
997 struct i40e_rx_buffer *rx_bi;
998 struct sk_buff *skb;
999 u16 vlan_tag;
1000
1001 if (cleaned_count >= I40E_RX_BUFFER_WRITE) {
1002 failure = failure ||
1003 i40evf_alloc_rx_buffers_ps(rx_ring,
1004 cleaned_count);
1005 cleaned_count = 0;
1006 }
1007
1008 i = rx_ring->next_to_clean;
1009 rx_desc = I40E_RX_DESC(rx_ring, i);
1010 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
1011 rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
1012 I40E_RXD_QW1_STATUS_SHIFT;
1013
1014 if (!(rx_status & BIT(I40E_RX_DESC_STATUS_DD_SHIFT)))
1015 break;
1016
1017
1018
1019
1020
1021 dma_rmb();
1022
1023 dma_sync_single_range_for_cpu(rx_ring->dev,
1024 rx_ring->rx_bi[0].dma,
1025 i * rx_ring->rx_hdr_len,
1026 rx_ring->rx_hdr_len,
1027 DMA_FROM_DEVICE);
1028 rx_bi = &rx_ring->rx_bi[i];
1029 skb = rx_bi->skb;
1030 if (likely(!skb)) {
1031 skb = __netdev_alloc_skb_ip_align(rx_ring->netdev,
1032 rx_ring->rx_hdr_len,
1033 GFP_ATOMIC |
1034 __GFP_NOWARN);
1035 if (!skb) {
1036 rx_ring->rx_stats.alloc_buff_failed++;
1037 failure = true;
1038 break;
1039 }
1040
1041
1042 skb_record_rx_queue(skb, rx_ring->queue_index);
1043
1044 dma_sync_single_range_for_cpu(rx_ring->dev,
1045 rx_ring->rx_bi[0].dma,
1046 i * rx_ring->rx_hdr_len,
1047 rx_ring->rx_hdr_len,
1048 DMA_FROM_DEVICE);
1049 }
1050 rx_packet_len = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >>
1051 I40E_RXD_QW1_LENGTH_PBUF_SHIFT;
1052 rx_header_len = (qword & I40E_RXD_QW1_LENGTH_HBUF_MASK) >>
1053 I40E_RXD_QW1_LENGTH_HBUF_SHIFT;
1054 rx_sph = (qword & I40E_RXD_QW1_LENGTH_SPH_MASK) >>
1055 I40E_RXD_QW1_LENGTH_SPH_SHIFT;
1056
1057 rx_error = (qword & I40E_RXD_QW1_ERROR_MASK) >>
1058 I40E_RXD_QW1_ERROR_SHIFT;
1059 rx_hbo = rx_error & BIT(I40E_RX_DESC_ERROR_HBO_SHIFT);
1060 rx_error &= ~BIT(I40E_RX_DESC_ERROR_HBO_SHIFT);
1061
1062 rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >>
1063 I40E_RXD_QW1_PTYPE_SHIFT;
1064
1065 dma_sync_single_range_for_cpu(rx_ring->dev,
1066 rx_bi->page_dma,
1067 rx_bi->page_offset,
1068 PAGE_SIZE / 2,
1069 DMA_FROM_DEVICE);
1070 prefetch(page_address(rx_bi->page) + rx_bi->page_offset);
1071 rx_bi->skb = NULL;
1072 cleaned_count++;
1073 copysize = 0;
1074 if (rx_hbo || rx_sph) {
1075 int len;
1076
1077 if (rx_hbo)
1078 len = I40E_RX_HDR_SIZE;
1079 else
1080 len = rx_header_len;
1081 memcpy(__skb_put(skb, len), rx_bi->hdr_buf, len);
1082 } else if (skb->len == 0) {
1083 int len;
1084 unsigned char *va = page_address(rx_bi->page) +
1085 rx_bi->page_offset;
1086
1087 len = min(rx_packet_len, rx_ring->rx_hdr_len);
1088 memcpy(__skb_put(skb, len), va, len);
1089 copysize = len;
1090 rx_packet_len -= len;
1091 }
1092
1093 if (rx_packet_len) {
1094 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
1095 rx_bi->page,
1096 rx_bi->page_offset + copysize,
1097 rx_packet_len, I40E_RXBUFFER_2048);
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109 if (page_count(rx_bi->page) > 2) {
1110 dma_unmap_page(rx_ring->dev,
1111 rx_bi->page_dma,
1112 PAGE_SIZE,
1113 DMA_FROM_DEVICE);
1114 rx_bi->page = NULL;
1115 rx_bi->page_dma = 0;
1116 rx_ring->rx_stats.realloc_count++;
1117 } else {
1118 get_page(rx_bi->page);
1119
1120
1121
1122
1123
1124
1125 rx_bi->page_offset ^= PAGE_SIZE / 2;
1126 }
1127
1128 }
1129 I40E_RX_INCREMENT(rx_ring, i);
1130
1131 if (unlikely(
1132 !(rx_status & BIT(I40E_RX_DESC_STATUS_EOF_SHIFT)))) {
1133 struct i40e_rx_buffer *next_buffer;
1134
1135 next_buffer = &rx_ring->rx_bi[i];
1136 next_buffer->skb = skb;
1137 rx_ring->rx_stats.non_eop_descs++;
1138 continue;
1139 }
1140
1141
1142 if (unlikely(rx_error & BIT(I40E_RX_DESC_ERROR_RXE_SHIFT))) {
1143 dev_kfree_skb_any(skb);
1144 continue;
1145 }
1146
1147 i40e_rx_hash(rx_ring, rx_desc, skb, rx_ptype);
1148
1149
1150 total_rx_bytes += skb->len;
1151 total_rx_packets++;
1152
1153 skb->protocol = eth_type_trans(skb, rx_ring->netdev);
1154
1155 i40e_rx_checksum(vsi, skb, rx_status, rx_error, rx_ptype);
1156
1157 vlan_tag = rx_status & BIT(I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)
1158 ? le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1)
1159 : 0;
1160#ifdef I40E_FCOE
1161 if (!i40e_fcoe_handle_offload(rx_ring, rx_desc, skb)) {
1162 dev_kfree_skb_any(skb);
1163 continue;
1164 }
1165#endif
1166 i40e_receive_skb(rx_ring, skb, vlan_tag);
1167
1168 rx_desc->wb.qword1.status_error_len = 0;
1169
1170 } while (likely(total_rx_packets < budget));
1171
1172 u64_stats_update_begin(&rx_ring->syncp);
1173 rx_ring->stats.packets += total_rx_packets;
1174 rx_ring->stats.bytes += total_rx_bytes;
1175 u64_stats_update_end(&rx_ring->syncp);
1176 rx_ring->q_vector->rx.total_packets += total_rx_packets;
1177 rx_ring->q_vector->rx.total_bytes += total_rx_bytes;
1178
1179 return failure ? budget : total_rx_packets;
1180}
1181
1182
1183
1184
1185
1186
1187
1188
1189static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget)
1190{
1191 unsigned int total_rx_bytes = 0, total_rx_packets = 0;
1192 u16 cleaned_count = I40E_DESC_UNUSED(rx_ring);
1193 struct i40e_vsi *vsi = rx_ring->vsi;
1194 union i40e_rx_desc *rx_desc;
1195 u32 rx_error, rx_status;
1196 u16 rx_packet_len;
1197 bool failure = false;
1198 u8 rx_ptype;
1199 u64 qword;
1200 u16 i;
1201
1202 do {
1203 struct i40e_rx_buffer *rx_bi;
1204 struct sk_buff *skb;
1205 u16 vlan_tag;
1206
1207 if (cleaned_count >= I40E_RX_BUFFER_WRITE) {
1208 failure = failure ||
1209 i40evf_alloc_rx_buffers_1buf(rx_ring,
1210 cleaned_count);
1211 cleaned_count = 0;
1212 }
1213
1214 i = rx_ring->next_to_clean;
1215 rx_desc = I40E_RX_DESC(rx_ring, i);
1216 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
1217 rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
1218 I40E_RXD_QW1_STATUS_SHIFT;
1219
1220 if (!(rx_status & BIT(I40E_RX_DESC_STATUS_DD_SHIFT)))
1221 break;
1222
1223
1224
1225
1226
1227 dma_rmb();
1228
1229 rx_bi = &rx_ring->rx_bi[i];
1230 skb = rx_bi->skb;
1231 prefetch(skb->data);
1232
1233 rx_packet_len = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >>
1234 I40E_RXD_QW1_LENGTH_PBUF_SHIFT;
1235
1236 rx_error = (qword & I40E_RXD_QW1_ERROR_MASK) >>
1237 I40E_RXD_QW1_ERROR_SHIFT;
1238 rx_error &= ~BIT(I40E_RX_DESC_ERROR_HBO_SHIFT);
1239
1240 rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >>
1241 I40E_RXD_QW1_PTYPE_SHIFT;
1242 rx_bi->skb = NULL;
1243 cleaned_count++;
1244
1245
1246
1247
1248 skb_put(skb, rx_packet_len);
1249 dma_unmap_single(rx_ring->dev, rx_bi->dma, rx_ring->rx_buf_len,
1250 DMA_FROM_DEVICE);
1251 rx_bi->dma = 0;
1252
1253 I40E_RX_INCREMENT(rx_ring, i);
1254
1255 if (unlikely(
1256 !(rx_status & BIT(I40E_RX_DESC_STATUS_EOF_SHIFT)))) {
1257 rx_ring->rx_stats.non_eop_descs++;
1258 continue;
1259 }
1260
1261
1262 if (unlikely(rx_error & BIT(I40E_RX_DESC_ERROR_RXE_SHIFT))) {
1263 dev_kfree_skb_any(skb);
1264 continue;
1265 }
1266
1267 i40e_rx_hash(rx_ring, rx_desc, skb, rx_ptype);
1268
1269 total_rx_bytes += skb->len;
1270 total_rx_packets++;
1271
1272 skb->protocol = eth_type_trans(skb, rx_ring->netdev);
1273
1274 i40e_rx_checksum(vsi, skb, rx_status, rx_error, rx_ptype);
1275
1276 vlan_tag = rx_status & BIT(I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)
1277 ? le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1)
1278 : 0;
1279 i40e_receive_skb(rx_ring, skb, vlan_tag);
1280
1281 rx_desc->wb.qword1.status_error_len = 0;
1282 } while (likely(total_rx_packets < budget));
1283
1284 u64_stats_update_begin(&rx_ring->syncp);
1285 rx_ring->stats.packets += total_rx_packets;
1286 rx_ring->stats.bytes += total_rx_bytes;
1287 u64_stats_update_end(&rx_ring->syncp);
1288 rx_ring->q_vector->rx.total_packets += total_rx_packets;
1289 rx_ring->q_vector->rx.total_bytes += total_rx_bytes;
1290
1291 return failure ? budget : total_rx_packets;
1292}
1293
1294static u32 i40e_buildreg_itr(const int type, const u16 itr)
1295{
1296 u32 val;
1297
1298 val = I40E_VFINT_DYN_CTLN1_INTENA_MASK |
1299
1300
1301
1302 (type << I40E_VFINT_DYN_CTLN1_ITR_INDX_SHIFT) |
1303 (itr << I40E_VFINT_DYN_CTLN1_INTERVAL_SHIFT);
1304
1305 return val;
1306}
1307
1308
1309#define INTREG I40E_VFINT_DYN_CTLN1
1310
1311
1312
1313
1314
1315
1316
1317static inline void i40e_update_enable_itr(struct i40e_vsi *vsi,
1318 struct i40e_q_vector *q_vector)
1319{
1320 struct i40e_hw *hw = &vsi->back->hw;
1321 bool rx = false, tx = false;
1322 u32 rxval, txval;
1323 int vector;
1324
1325 vector = (q_vector->v_idx + vsi->base_vector);
1326
1327
1328
1329
1330 rxval = txval = i40e_buildreg_itr(I40E_ITR_NONE, 0);
1331
1332 if (q_vector->itr_countdown > 0 ||
1333 (!ITR_IS_DYNAMIC(vsi->rx_itr_setting) &&
1334 !ITR_IS_DYNAMIC(vsi->tx_itr_setting))) {
1335 goto enable_int;
1336 }
1337
1338 if (ITR_IS_DYNAMIC(vsi->rx_itr_setting)) {
1339 rx = i40e_set_new_dynamic_itr(&q_vector->rx);
1340 rxval = i40e_buildreg_itr(I40E_RX_ITR, q_vector->rx.itr);
1341 }
1342
1343 if (ITR_IS_DYNAMIC(vsi->tx_itr_setting)) {
1344 tx = i40e_set_new_dynamic_itr(&q_vector->tx);
1345 txval = i40e_buildreg_itr(I40E_TX_ITR, q_vector->tx.itr);
1346 }
1347
1348 if (rx || tx) {
1349
1350
1351
1352
1353 u16 itr = max(q_vector->tx.itr, q_vector->rx.itr);
1354
1355 q_vector->tx.itr = q_vector->rx.itr = itr;
1356 txval = i40e_buildreg_itr(I40E_TX_ITR, itr);
1357 tx = true;
1358 rxval = i40e_buildreg_itr(I40E_RX_ITR, itr);
1359 rx = true;
1360 }
1361
1362
1363
1364
1365 if (rx) {
1366
1367
1368
1369
1370 rxval |= BIT(31);
1371
1372 wr32(hw, INTREG(vector - 1), rxval);
1373 }
1374
1375enable_int:
1376 if (!test_bit(__I40E_DOWN, &vsi->state))
1377 wr32(hw, INTREG(vector - 1), txval);
1378
1379 if (q_vector->itr_countdown)
1380 q_vector->itr_countdown--;
1381 else
1382 q_vector->itr_countdown = ITR_COUNTDOWN_START;
1383}
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394int i40evf_napi_poll(struct napi_struct *napi, int budget)
1395{
1396 struct i40e_q_vector *q_vector =
1397 container_of(napi, struct i40e_q_vector, napi);
1398 struct i40e_vsi *vsi = q_vector->vsi;
1399 struct i40e_ring *ring;
1400 bool clean_complete = true;
1401 bool arm_wb = false;
1402 int budget_per_ring;
1403 int work_done = 0;
1404
1405 if (test_bit(__I40E_DOWN, &vsi->state)) {
1406 napi_complete(napi);
1407 return 0;
1408 }
1409
1410
1411
1412
1413 i40e_for_each_ring(ring, q_vector->tx) {
1414 clean_complete = clean_complete &&
1415 i40e_clean_tx_irq(ring, vsi->work_limit);
1416 arm_wb = arm_wb || ring->arm_wb;
1417 ring->arm_wb = false;
1418 }
1419
1420
1421 if (budget <= 0)
1422 goto tx_only;
1423
1424
1425
1426
1427 budget_per_ring = max(budget/q_vector->num_ringpairs, 1);
1428
1429 i40e_for_each_ring(ring, q_vector->rx) {
1430 int cleaned;
1431
1432 if (ring_is_ps_enabled(ring))
1433 cleaned = i40e_clean_rx_irq_ps(ring, budget_per_ring);
1434 else
1435 cleaned = i40e_clean_rx_irq_1buf(ring, budget_per_ring);
1436
1437 work_done += cleaned;
1438
1439 clean_complete = clean_complete && (budget_per_ring > cleaned);
1440 }
1441
1442
1443 if (!clean_complete) {
1444tx_only:
1445 if (arm_wb) {
1446 q_vector->tx.ring[0].tx_stats.tx_force_wb++;
1447 i40e_enable_wb_on_itr(vsi, q_vector);
1448 }
1449 return budget;
1450 }
1451
1452 if (vsi->back->flags & I40E_TXR_FLAGS_WB_ON_ITR)
1453 q_vector->arm_wb_state = false;
1454
1455
1456 napi_complete_done(napi, work_done);
1457 i40e_update_enable_itr(vsi, q_vector);
1458 return 0;
1459}
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473static inline int i40evf_tx_prepare_vlan_flags(struct sk_buff *skb,
1474 struct i40e_ring *tx_ring,
1475 u32 *flags)
1476{
1477 __be16 protocol = skb->protocol;
1478 u32 tx_flags = 0;
1479
1480 if (protocol == htons(ETH_P_8021Q) &&
1481 !(tx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_TX)) {
1482
1483
1484
1485
1486
1487
1488
1489 skb->protocol = vlan_get_protocol(skb);
1490 goto out;
1491 }
1492
1493
1494 if (skb_vlan_tag_present(skb)) {
1495 tx_flags |= skb_vlan_tag_get(skb) << I40E_TX_FLAGS_VLAN_SHIFT;
1496 tx_flags |= I40E_TX_FLAGS_HW_VLAN;
1497
1498 } else if (protocol == htons(ETH_P_8021Q)) {
1499 struct vlan_hdr *vhdr, _vhdr;
1500
1501 vhdr = skb_header_pointer(skb, ETH_HLEN, sizeof(_vhdr), &_vhdr);
1502 if (!vhdr)
1503 return -EINVAL;
1504
1505 protocol = vhdr->h_vlan_encapsulated_proto;
1506 tx_flags |= ntohs(vhdr->h_vlan_TCI) << I40E_TX_FLAGS_VLAN_SHIFT;
1507 tx_flags |= I40E_TX_FLAGS_SW_VLAN;
1508 }
1509
1510out:
1511 *flags = tx_flags;
1512 return 0;
1513}
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb,
1525 u8 *hdr_len, u64 *cd_type_cmd_tso_mss)
1526{
1527 u64 cd_cmd, cd_tso_len, cd_mss;
1528 union {
1529 struct iphdr *v4;
1530 struct ipv6hdr *v6;
1531 unsigned char *hdr;
1532 } ip;
1533 union {
1534 struct tcphdr *tcp;
1535 struct udphdr *udp;
1536 unsigned char *hdr;
1537 } l4;
1538 u32 paylen, l4_offset;
1539 int err;
1540
1541 if (skb->ip_summed != CHECKSUM_PARTIAL)
1542 return 0;
1543
1544 if (!skb_is_gso(skb))
1545 return 0;
1546
1547 err = skb_cow_head(skb, 0);
1548 if (err < 0)
1549 return err;
1550
1551 ip.hdr = skb_network_header(skb);
1552 l4.hdr = skb_transport_header(skb);
1553
1554
1555 if (ip.v4->version == 4) {
1556 ip.v4->tot_len = 0;
1557 ip.v4->check = 0;
1558 } else {
1559 ip.v6->payload_len = 0;
1560 }
1561
1562 if (skb_shinfo(skb)->gso_type & (SKB_GSO_UDP_TUNNEL | SKB_GSO_GRE |
1563 SKB_GSO_UDP_TUNNEL_CSUM)) {
1564 if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM) {
1565
1566 l4_offset = l4.hdr - skb->data;
1567
1568
1569 paylen = (__force u16)l4.udp->check;
1570 paylen += ntohs(1) * (u16)~(skb->len - l4_offset);
1571 l4.udp->check = ~csum_fold((__force __wsum)paylen);
1572 }
1573
1574
1575 ip.hdr = skb_inner_network_header(skb);
1576 l4.hdr = skb_inner_transport_header(skb);
1577
1578
1579 if (ip.v4->version == 4) {
1580 ip.v4->tot_len = 0;
1581 ip.v4->check = 0;
1582 } else {
1583 ip.v6->payload_len = 0;
1584 }
1585 }
1586
1587
1588 l4_offset = l4.hdr - skb->data;
1589
1590
1591 paylen = (__force u16)l4.tcp->check;
1592 paylen += ntohs(1) * (u16)~(skb->len - l4_offset);
1593 l4.tcp->check = ~csum_fold((__force __wsum)paylen);
1594
1595
1596 *hdr_len = (l4.tcp->doff * 4) + l4_offset;
1597
1598
1599 cd_cmd = I40E_TX_CTX_DESC_TSO;
1600 cd_tso_len = skb->len - *hdr_len;
1601 cd_mss = skb_shinfo(skb)->gso_size;
1602 *cd_type_cmd_tso_mss |= (cd_cmd << I40E_TXD_CTX_QW1_CMD_SHIFT) |
1603 (cd_tso_len << I40E_TXD_CTX_QW1_TSO_LEN_SHIFT) |
1604 (cd_mss << I40E_TXD_CTX_QW1_MSS_SHIFT);
1605 return 1;
1606}
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617static int i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags,
1618 u32 *td_cmd, u32 *td_offset,
1619 struct i40e_ring *tx_ring,
1620 u32 *cd_tunneling)
1621{
1622 union {
1623 struct iphdr *v4;
1624 struct ipv6hdr *v6;
1625 unsigned char *hdr;
1626 } ip;
1627 union {
1628 struct tcphdr *tcp;
1629 struct udphdr *udp;
1630 unsigned char *hdr;
1631 } l4;
1632 unsigned char *exthdr;
1633 u32 offset, cmd = 0, tunnel = 0;
1634 __be16 frag_off;
1635 u8 l4_proto = 0;
1636
1637 if (skb->ip_summed != CHECKSUM_PARTIAL)
1638 return 0;
1639
1640 ip.hdr = skb_network_header(skb);
1641 l4.hdr = skb_transport_header(skb);
1642
1643
1644 offset = ((ip.hdr - skb->data) / 2) << I40E_TX_DESC_LENGTH_MACLEN_SHIFT;
1645
1646 if (skb->encapsulation) {
1647
1648 if (*tx_flags & I40E_TX_FLAGS_IPV4) {
1649 tunnel |= (*tx_flags & I40E_TX_FLAGS_TSO) ?
1650 I40E_TX_CTX_EXT_IP_IPV4 :
1651 I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM;
1652
1653 l4_proto = ip.v4->protocol;
1654 } else if (*tx_flags & I40E_TX_FLAGS_IPV6) {
1655 tunnel |= I40E_TX_CTX_EXT_IP_IPV6;
1656
1657 exthdr = ip.hdr + sizeof(*ip.v6);
1658 l4_proto = ip.v6->nexthdr;
1659 if (l4.hdr != exthdr)
1660 ipv6_skip_exthdr(skb, exthdr - skb->data,
1661 &l4_proto, &frag_off);
1662 }
1663
1664
1665 tunnel |= ((l4.hdr - ip.hdr) / 4) <<
1666 I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT;
1667
1668
1669 ip.hdr = skb_inner_network_header(skb);
1670
1671
1672 switch (l4_proto) {
1673 case IPPROTO_UDP:
1674 tunnel |= I40E_TXD_CTX_UDP_TUNNELING;
1675 *tx_flags |= I40E_TX_FLAGS_VXLAN_TUNNEL;
1676 break;
1677 case IPPROTO_GRE:
1678 tunnel |= I40E_TXD_CTX_GRE_TUNNELING;
1679 *tx_flags |= I40E_TX_FLAGS_VXLAN_TUNNEL;
1680 break;
1681 default:
1682 if (*tx_flags & I40E_TX_FLAGS_TSO)
1683 return -1;
1684
1685 skb_checksum_help(skb);
1686 return 0;
1687 }
1688
1689
1690 tunnel |= ((ip.hdr - l4.hdr) / 2) <<
1691 I40E_TXD_CTX_QW0_NATLEN_SHIFT;
1692
1693
1694 if ((*tx_flags & I40E_TX_FLAGS_TSO) &&
1695 (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM))
1696 tunnel |= I40E_TXD_CTX_QW0_L4T_CS_MASK;
1697
1698
1699 *cd_tunneling |= tunnel;
1700
1701
1702 l4.hdr = skb_inner_transport_header(skb);
1703 l4_proto = 0;
1704
1705
1706 *tx_flags &= ~(I40E_TX_FLAGS_IPV4 | I40E_TX_FLAGS_IPV6);
1707 if (ip.v4->version == 4)
1708 *tx_flags |= I40E_TX_FLAGS_IPV4;
1709 if (ip.v6->version == 6)
1710 *tx_flags |= I40E_TX_FLAGS_IPV6;
1711 }
1712
1713
1714 if (*tx_flags & I40E_TX_FLAGS_IPV4) {
1715 l4_proto = ip.v4->protocol;
1716
1717
1718
1719 cmd |= (*tx_flags & I40E_TX_FLAGS_TSO) ?
1720 I40E_TX_DESC_CMD_IIPT_IPV4_CSUM :
1721 I40E_TX_DESC_CMD_IIPT_IPV4;
1722 } else if (*tx_flags & I40E_TX_FLAGS_IPV6) {
1723 cmd |= I40E_TX_DESC_CMD_IIPT_IPV6;
1724
1725 exthdr = ip.hdr + sizeof(*ip.v6);
1726 l4_proto = ip.v6->nexthdr;
1727 if (l4.hdr != exthdr)
1728 ipv6_skip_exthdr(skb, exthdr - skb->data,
1729 &l4_proto, &frag_off);
1730 }
1731
1732
1733 offset |= ((l4.hdr - ip.hdr) / 4) << I40E_TX_DESC_LENGTH_IPLEN_SHIFT;
1734
1735
1736 switch (l4_proto) {
1737 case IPPROTO_TCP:
1738
1739 cmd |= I40E_TX_DESC_CMD_L4T_EOFT_TCP;
1740 offset |= l4.tcp->doff << I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
1741 break;
1742 case IPPROTO_SCTP:
1743
1744 cmd |= I40E_TX_DESC_CMD_L4T_EOFT_SCTP;
1745 offset |= (sizeof(struct sctphdr) >> 2) <<
1746 I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
1747 break;
1748 case IPPROTO_UDP:
1749
1750 cmd |= I40E_TX_DESC_CMD_L4T_EOFT_UDP;
1751 offset |= (sizeof(struct udphdr) >> 2) <<
1752 I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
1753 break;
1754 default:
1755 if (*tx_flags & I40E_TX_FLAGS_TSO)
1756 return -1;
1757 skb_checksum_help(skb);
1758 return 0;
1759 }
1760
1761 *td_cmd |= cmd;
1762 *td_offset |= offset;
1763
1764 return 1;
1765}
1766
1767
1768
1769
1770
1771
1772
1773
1774static void i40e_create_tx_ctx(struct i40e_ring *tx_ring,
1775 const u64 cd_type_cmd_tso_mss,
1776 const u32 cd_tunneling, const u32 cd_l2tag2)
1777{
1778 struct i40e_tx_context_desc *context_desc;
1779 int i = tx_ring->next_to_use;
1780
1781 if ((cd_type_cmd_tso_mss == I40E_TX_DESC_DTYPE_CONTEXT) &&
1782 !cd_tunneling && !cd_l2tag2)
1783 return;
1784
1785
1786 context_desc = I40E_TX_CTXTDESC(tx_ring, i);
1787
1788 i++;
1789 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
1790
1791
1792 context_desc->tunneling_params = cpu_to_le32(cd_tunneling);
1793 context_desc->l2tag2 = cpu_to_le16(cd_l2tag2);
1794 context_desc->rsvd = cpu_to_le16(0);
1795 context_desc->type_cmd_tso_mss = cpu_to_le64(cd_type_cmd_tso_mss);
1796}
1797
1798
1799
1800
1801
1802
1803
1804
1805
1806
1807
1808
1809
1810
1811bool __i40evf_chk_linearize(struct sk_buff *skb)
1812{
1813 const struct skb_frag_struct *frag, *stale;
1814 int nr_frags, sum;
1815
1816
1817 nr_frags = skb_shinfo(skb)->nr_frags;
1818 if (nr_frags < (I40E_MAX_BUFFER_TXD - 1))
1819 return false;
1820
1821
1822
1823
1824
1825
1826 nr_frags -= I40E_MAX_BUFFER_TXD - 2;
1827 frag = &skb_shinfo(skb)->frags[0];
1828
1829
1830
1831
1832
1833
1834
1835 sum = 1 - skb_shinfo(skb)->gso_size;
1836
1837
1838 sum += skb_frag_size(frag++);
1839 sum += skb_frag_size(frag++);
1840 sum += skb_frag_size(frag++);
1841 sum += skb_frag_size(frag++);
1842 sum += skb_frag_size(frag++);
1843
1844
1845
1846
1847 stale = &skb_shinfo(skb)->frags[0];
1848 for (;;) {
1849 sum += skb_frag_size(frag++);
1850
1851
1852 if (sum < 0)
1853 return true;
1854
1855
1856 if (!--nr_frags)
1857 break;
1858
1859 sum -= skb_frag_size(stale++);
1860 }
1861
1862 return false;
1863}
1864
1865
1866
1867
1868
1869
1870
1871
1872int __i40evf_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
1873{
1874 netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
1875
1876 smp_mb();
1877
1878
1879 if (likely(I40E_DESC_UNUSED(tx_ring) < size))
1880 return -EBUSY;
1881
1882
1883 netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index);
1884 ++tx_ring->tx_stats.restart_queue;
1885 return 0;
1886}
1887
1888
1889
1890
1891
1892
1893
1894
1895
1896
1897
1898static inline void i40evf_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
1899 struct i40e_tx_buffer *first, u32 tx_flags,
1900 const u8 hdr_len, u32 td_cmd, u32 td_offset)
1901{
1902 unsigned int data_len = skb->data_len;
1903 unsigned int size = skb_headlen(skb);
1904 struct skb_frag_struct *frag;
1905 struct i40e_tx_buffer *tx_bi;
1906 struct i40e_tx_desc *tx_desc;
1907 u16 i = tx_ring->next_to_use;
1908 u32 td_tag = 0;
1909 dma_addr_t dma;
1910 u16 gso_segs;
1911 u16 desc_count = 0;
1912 bool tail_bump = true;
1913 bool do_rs = false;
1914
1915 if (tx_flags & I40E_TX_FLAGS_HW_VLAN) {
1916 td_cmd |= I40E_TX_DESC_CMD_IL2TAG1;
1917 td_tag = (tx_flags & I40E_TX_FLAGS_VLAN_MASK) >>
1918 I40E_TX_FLAGS_VLAN_SHIFT;
1919 }
1920
1921 if (tx_flags & (I40E_TX_FLAGS_TSO | I40E_TX_FLAGS_FSO))
1922 gso_segs = skb_shinfo(skb)->gso_segs;
1923 else
1924 gso_segs = 1;
1925
1926
1927 first->bytecount = skb->len - hdr_len + (gso_segs * hdr_len);
1928 first->gso_segs = gso_segs;
1929 first->skb = skb;
1930 first->tx_flags = tx_flags;
1931
1932 dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
1933
1934 tx_desc = I40E_TX_DESC(tx_ring, i);
1935 tx_bi = first;
1936
1937 for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
1938 if (dma_mapping_error(tx_ring->dev, dma))
1939 goto dma_error;
1940
1941
1942 dma_unmap_len_set(tx_bi, len, size);
1943 dma_unmap_addr_set(tx_bi, dma, dma);
1944
1945 tx_desc->buffer_addr = cpu_to_le64(dma);
1946
1947 while (unlikely(size > I40E_MAX_DATA_PER_TXD)) {
1948 tx_desc->cmd_type_offset_bsz =
1949 build_ctob(td_cmd, td_offset,
1950 I40E_MAX_DATA_PER_TXD, td_tag);
1951
1952 tx_desc++;
1953 i++;
1954 desc_count++;
1955
1956 if (i == tx_ring->count) {
1957 tx_desc = I40E_TX_DESC(tx_ring, 0);
1958 i = 0;
1959 }
1960
1961 dma += I40E_MAX_DATA_PER_TXD;
1962 size -= I40E_MAX_DATA_PER_TXD;
1963
1964 tx_desc->buffer_addr = cpu_to_le64(dma);
1965 }
1966
1967 if (likely(!data_len))
1968 break;
1969
1970 tx_desc->cmd_type_offset_bsz = build_ctob(td_cmd, td_offset,
1971 size, td_tag);
1972
1973 tx_desc++;
1974 i++;
1975 desc_count++;
1976
1977 if (i == tx_ring->count) {
1978 tx_desc = I40E_TX_DESC(tx_ring, 0);
1979 i = 0;
1980 }
1981
1982 size = skb_frag_size(frag);
1983 data_len -= size;
1984
1985 dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size,
1986 DMA_TO_DEVICE);
1987
1988 tx_bi = &tx_ring->tx_bi[i];
1989 }
1990
1991
1992 first->next_to_watch = tx_desc;
1993
1994 i++;
1995 if (i == tx_ring->count)
1996 i = 0;
1997
1998 tx_ring->next_to_use = i;
1999
2000 netdev_tx_sent_queue(netdev_get_tx_queue(tx_ring->netdev,
2001 tx_ring->queue_index),
2002 first->bytecount);
2003 i40e_maybe_stop_tx(tx_ring, DESC_NEEDED);
2004
2005
2006
2007
2008
2009
2010
2011
2012
2013
2014
2015
2016
2017
2018
2019
2020
2021
2022
2023
2024
2025
2026 if (skb->xmit_more &&
2027 !netif_xmit_stopped(netdev_get_tx_queue(tx_ring->netdev,
2028 tx_ring->queue_index))) {
2029 tx_ring->flags |= I40E_TXR_FLAGS_LAST_XMIT_MORE_SET;
2030 tail_bump = false;
2031 } else if (!skb->xmit_more &&
2032 !netif_xmit_stopped(netdev_get_tx_queue(tx_ring->netdev,
2033 tx_ring->queue_index)) &&
2034 (!(tx_ring->flags & I40E_TXR_FLAGS_LAST_XMIT_MORE_SET)) &&
2035 (tx_ring->packet_stride < WB_STRIDE) &&
2036 (desc_count < WB_STRIDE)) {
2037 tx_ring->packet_stride++;
2038 } else {
2039 tx_ring->packet_stride = 0;
2040 tx_ring->flags &= ~I40E_TXR_FLAGS_LAST_XMIT_MORE_SET;
2041 do_rs = true;
2042 }
2043 if (do_rs)
2044 tx_ring->packet_stride = 0;
2045
2046 tx_desc->cmd_type_offset_bsz =
2047 build_ctob(td_cmd, td_offset, size, td_tag) |
2048 cpu_to_le64((u64)(do_rs ? I40E_TXD_CMD :
2049 I40E_TX_DESC_CMD_EOP) <<
2050 I40E_TXD_QW1_CMD_SHIFT);
2051
2052
2053 if (!tail_bump)
2054 prefetchw(tx_desc + 1);
2055
2056 if (tail_bump) {
2057
2058
2059
2060
2061
2062 wmb();
2063 writel(i, tx_ring->tail);
2064 }
2065
2066 return;
2067
2068dma_error:
2069 dev_info(tx_ring->dev, "TX DMA map failed\n");
2070
2071
2072 for (;;) {
2073 tx_bi = &tx_ring->tx_bi[i];
2074 i40e_unmap_and_free_tx_resource(tx_ring, tx_bi);
2075 if (tx_bi == first)
2076 break;
2077 if (i == 0)
2078 i = tx_ring->count;
2079 i--;
2080 }
2081
2082 tx_ring->next_to_use = i;
2083}
2084
2085
2086
2087
2088
2089
2090
2091
2092static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
2093 struct i40e_ring *tx_ring)
2094{
2095 u64 cd_type_cmd_tso_mss = I40E_TX_DESC_DTYPE_CONTEXT;
2096 u32 cd_tunneling = 0, cd_l2tag2 = 0;
2097 struct i40e_tx_buffer *first;
2098 u32 td_offset = 0;
2099 u32 tx_flags = 0;
2100 __be16 protocol;
2101 u32 td_cmd = 0;
2102 u8 hdr_len = 0;
2103 int tso, count;
2104
2105
2106 prefetch(skb->data);
2107
2108 count = i40e_xmit_descriptor_count(skb);
2109 if (i40e_chk_linearize(skb, count)) {
2110 if (__skb_linearize(skb))
2111 goto out_drop;
2112 count = TXD_USE_COUNT(skb->len);
2113 tx_ring->tx_stats.tx_linearize++;
2114 }
2115
2116
2117
2118
2119
2120
2121
2122 if (i40e_maybe_stop_tx(tx_ring, count + 4 + 1)) {
2123 tx_ring->tx_stats.tx_busy++;
2124 return NETDEV_TX_BUSY;
2125 }
2126
2127
2128 if (i40evf_tx_prepare_vlan_flags(skb, tx_ring, &tx_flags))
2129 goto out_drop;
2130
2131
2132 protocol = vlan_get_protocol(skb);
2133
2134
2135 first = &tx_ring->tx_bi[tx_ring->next_to_use];
2136
2137
2138 if (protocol == htons(ETH_P_IP))
2139 tx_flags |= I40E_TX_FLAGS_IPV4;
2140 else if (protocol == htons(ETH_P_IPV6))
2141 tx_flags |= I40E_TX_FLAGS_IPV6;
2142
2143 tso = i40e_tso(tx_ring, skb, &hdr_len, &cd_type_cmd_tso_mss);
2144
2145 if (tso < 0)
2146 goto out_drop;
2147 else if (tso)
2148 tx_flags |= I40E_TX_FLAGS_TSO;
2149
2150
2151 tso = i40e_tx_enable_csum(skb, &tx_flags, &td_cmd, &td_offset,
2152 tx_ring, &cd_tunneling);
2153 if (tso < 0)
2154 goto out_drop;
2155
2156 skb_tx_timestamp(skb);
2157
2158
2159 td_cmd |= I40E_TX_DESC_CMD_ICRC;
2160
2161 i40e_create_tx_ctx(tx_ring, cd_type_cmd_tso_mss,
2162 cd_tunneling, cd_l2tag2);
2163
2164 i40evf_tx_map(tx_ring, skb, first, tx_flags, hdr_len,
2165 td_cmd, td_offset);
2166
2167 return NETDEV_TX_OK;
2168
2169out_drop:
2170 dev_kfree_skb_any(skb);
2171 return NETDEV_TX_OK;
2172}
2173
2174
2175
2176
2177
2178
2179
2180
2181netdev_tx_t i40evf_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
2182{
2183 struct i40evf_adapter *adapter = netdev_priv(netdev);
2184 struct i40e_ring *tx_ring = &adapter->tx_rings[skb->queue_mapping];
2185
2186
2187
2188
2189 if (unlikely(skb->len < I40E_MIN_TX_LEN)) {
2190 if (skb_pad(skb, I40E_MIN_TX_LEN - skb->len))
2191 return NETDEV_TX_OK;
2192 skb->len = I40E_MIN_TX_LEN;
2193 skb_set_tail_pointer(skb, I40E_MIN_TX_LEN);
2194 }
2195
2196 return i40e_xmit_frame_ring(skb, tx_ring);
2197}
2198