1
2
3
4#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
5
6#include <linux/module.h>
7#include <linux/types.h>
8#include <linux/init.h>
9#include <linux/pci.h>
10#include <linux/vmalloc.h>
11#include <linux/pagemap.h>
12#include <linux/delay.h>
13#include <linux/netdevice.h>
14#include <linux/tcp.h>
15#include <linux/ipv6.h>
16#include <linux/slab.h>
17#include <net/checksum.h>
18#include <net/ip6_checksum.h>
19#include <linux/mii.h>
20#include <linux/ethtool.h>
21#include <linux/if_vlan.h>
22#include <linux/prefetch.h>
23#include <linux/sctp.h>
24
25#include "igbvf.h"
26
27char igbvf_driver_name[] = "igbvf";
28static const char igbvf_driver_string[] =
29 "Intel(R) Gigabit Virtual Function Network Driver";
30static const char igbvf_copyright[] =
31 "Copyright (c) 2009 - 2012 Intel Corporation.";
32
33#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)
34static int debug = -1;
35module_param(debug, int, 0);
36MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
37
38static int igbvf_poll(struct napi_struct *napi, int budget);
39static void igbvf_reset(struct igbvf_adapter *);
40static void igbvf_set_interrupt_capability(struct igbvf_adapter *);
41static void igbvf_reset_interrupt_capability(struct igbvf_adapter *);
42
43static struct igbvf_info igbvf_vf_info = {
44 .mac = e1000_vfadapt,
45 .flags = 0,
46 .pba = 10,
47 .init_ops = e1000_init_function_pointers_vf,
48};
49
50static struct igbvf_info igbvf_i350_vf_info = {
51 .mac = e1000_vfadapt_i350,
52 .flags = 0,
53 .pba = 10,
54 .init_ops = e1000_init_function_pointers_vf,
55};
56
57static const struct igbvf_info *igbvf_info_tbl[] = {
58 [board_vf] = &igbvf_vf_info,
59 [board_i350_vf] = &igbvf_i350_vf_info,
60};
61
62
63
64
65
66static int igbvf_desc_unused(struct igbvf_ring *ring)
67{
68 if (ring->next_to_clean > ring->next_to_use)
69 return ring->next_to_clean - ring->next_to_use - 1;
70
71 return ring->count + ring->next_to_clean - ring->next_to_use - 1;
72}
73
74
75
76
77
78
79
80
81static void igbvf_receive_skb(struct igbvf_adapter *adapter,
82 struct net_device *netdev,
83 struct sk_buff *skb,
84 u32 status, u16 vlan)
85{
86 u16 vid;
87
88 if (status & E1000_RXD_STAT_VP) {
89 if ((adapter->flags & IGBVF_FLAG_RX_LB_VLAN_BSWAP) &&
90 (status & E1000_RXDEXT_STATERR_LB))
91 vid = be16_to_cpu(vlan) & E1000_RXD_SPC_VLAN_MASK;
92 else
93 vid = le16_to_cpu(vlan) & E1000_RXD_SPC_VLAN_MASK;
94 if (test_bit(vid, adapter->active_vlans))
95 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
96 }
97
98 napi_gro_receive(&adapter->rx_ring->napi, skb);
99}
100
101static inline void igbvf_rx_checksum_adv(struct igbvf_adapter *adapter,
102 u32 status_err, struct sk_buff *skb)
103{
104 skb_checksum_none_assert(skb);
105
106
107 if ((status_err & E1000_RXD_STAT_IXSM) ||
108 (adapter->flags & IGBVF_FLAG_RX_CSUM_DISABLED))
109 return;
110
111
112 if (status_err &
113 (E1000_RXDEXT_STATERR_TCPE | E1000_RXDEXT_STATERR_IPE)) {
114
115 adapter->hw_csum_err++;
116 return;
117 }
118
119
120 if (status_err & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS))
121 skb->ip_summed = CHECKSUM_UNNECESSARY;
122
123 adapter->hw_csum_good++;
124}
125
126
127
128
129
130
131static void igbvf_alloc_rx_buffers(struct igbvf_ring *rx_ring,
132 int cleaned_count)
133{
134 struct igbvf_adapter *adapter = rx_ring->adapter;
135 struct net_device *netdev = adapter->netdev;
136 struct pci_dev *pdev = adapter->pdev;
137 union e1000_adv_rx_desc *rx_desc;
138 struct igbvf_buffer *buffer_info;
139 struct sk_buff *skb;
140 unsigned int i;
141 int bufsz;
142
143 i = rx_ring->next_to_use;
144 buffer_info = &rx_ring->buffer_info[i];
145
146 if (adapter->rx_ps_hdr_size)
147 bufsz = adapter->rx_ps_hdr_size;
148 else
149 bufsz = adapter->rx_buffer_len;
150
151 while (cleaned_count--) {
152 rx_desc = IGBVF_RX_DESC_ADV(*rx_ring, i);
153
154 if (adapter->rx_ps_hdr_size && !buffer_info->page_dma) {
155 if (!buffer_info->page) {
156 buffer_info->page = alloc_page(GFP_ATOMIC);
157 if (!buffer_info->page) {
158 adapter->alloc_rx_buff_failed++;
159 goto no_buffers;
160 }
161 buffer_info->page_offset = 0;
162 } else {
163 buffer_info->page_offset ^= PAGE_SIZE / 2;
164 }
165 buffer_info->page_dma =
166 dma_map_page(&pdev->dev, buffer_info->page,
167 buffer_info->page_offset,
168 PAGE_SIZE / 2,
169 DMA_FROM_DEVICE);
170 if (dma_mapping_error(&pdev->dev,
171 buffer_info->page_dma)) {
172 __free_page(buffer_info->page);
173 buffer_info->page = NULL;
174 dev_err(&pdev->dev, "RX DMA map failed\n");
175 break;
176 }
177 }
178
179 if (!buffer_info->skb) {
180 skb = netdev_alloc_skb_ip_align(netdev, bufsz);
181 if (!skb) {
182 adapter->alloc_rx_buff_failed++;
183 goto no_buffers;
184 }
185
186 buffer_info->skb = skb;
187 buffer_info->dma = dma_map_single(&pdev->dev, skb->data,
188 bufsz,
189 DMA_FROM_DEVICE);
190 if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
191 dev_kfree_skb(buffer_info->skb);
192 buffer_info->skb = NULL;
193 dev_err(&pdev->dev, "RX DMA map failed\n");
194 goto no_buffers;
195 }
196 }
197
198
199
200 if (adapter->rx_ps_hdr_size) {
201 rx_desc->read.pkt_addr =
202 cpu_to_le64(buffer_info->page_dma);
203 rx_desc->read.hdr_addr = cpu_to_le64(buffer_info->dma);
204 } else {
205 rx_desc->read.pkt_addr = cpu_to_le64(buffer_info->dma);
206 rx_desc->read.hdr_addr = 0;
207 }
208
209 i++;
210 if (i == rx_ring->count)
211 i = 0;
212 buffer_info = &rx_ring->buffer_info[i];
213 }
214
215no_buffers:
216 if (rx_ring->next_to_use != i) {
217 rx_ring->next_to_use = i;
218 if (i == 0)
219 i = (rx_ring->count - 1);
220 else
221 i--;
222
223
224
225
226
227
228 wmb();
229 writel(i, adapter->hw.hw_addr + rx_ring->tail);
230 }
231}
232
233
234
235
236
237
238
239
240static bool igbvf_clean_rx_irq(struct igbvf_adapter *adapter,
241 int *work_done, int work_to_do)
242{
243 struct igbvf_ring *rx_ring = adapter->rx_ring;
244 struct net_device *netdev = adapter->netdev;
245 struct pci_dev *pdev = adapter->pdev;
246 union e1000_adv_rx_desc *rx_desc, *next_rxd;
247 struct igbvf_buffer *buffer_info, *next_buffer;
248 struct sk_buff *skb;
249 bool cleaned = false;
250 int cleaned_count = 0;
251 unsigned int total_bytes = 0, total_packets = 0;
252 unsigned int i;
253 u32 length, hlen, staterr;
254
255 i = rx_ring->next_to_clean;
256 rx_desc = IGBVF_RX_DESC_ADV(*rx_ring, i);
257 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
258
259 while (staterr & E1000_RXD_STAT_DD) {
260 if (*work_done >= work_to_do)
261 break;
262 (*work_done)++;
263 rmb();
264
265 buffer_info = &rx_ring->buffer_info[i];
266
267
268
269
270
271
272 hlen = (le16_to_cpu(rx_desc->wb.lower.lo_dword.hs_rss.hdr_info)
273 & E1000_RXDADV_HDRBUFLEN_MASK) >>
274 E1000_RXDADV_HDRBUFLEN_SHIFT;
275 if (hlen > adapter->rx_ps_hdr_size)
276 hlen = adapter->rx_ps_hdr_size;
277
278 length = le16_to_cpu(rx_desc->wb.upper.length);
279 cleaned = true;
280 cleaned_count++;
281
282 skb = buffer_info->skb;
283 prefetch(skb->data - NET_IP_ALIGN);
284 buffer_info->skb = NULL;
285 if (!adapter->rx_ps_hdr_size) {
286 dma_unmap_single(&pdev->dev, buffer_info->dma,
287 adapter->rx_buffer_len,
288 DMA_FROM_DEVICE);
289 buffer_info->dma = 0;
290 skb_put(skb, length);
291 goto send_up;
292 }
293
294 if (!skb_shinfo(skb)->nr_frags) {
295 dma_unmap_single(&pdev->dev, buffer_info->dma,
296 adapter->rx_ps_hdr_size,
297 DMA_FROM_DEVICE);
298 buffer_info->dma = 0;
299 skb_put(skb, hlen);
300 }
301
302 if (length) {
303 dma_unmap_page(&pdev->dev, buffer_info->page_dma,
304 PAGE_SIZE / 2,
305 DMA_FROM_DEVICE);
306 buffer_info->page_dma = 0;
307
308 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
309 buffer_info->page,
310 buffer_info->page_offset,
311 length);
312
313 if ((adapter->rx_buffer_len > (PAGE_SIZE / 2)) ||
314 (page_count(buffer_info->page) != 1))
315 buffer_info->page = NULL;
316 else
317 get_page(buffer_info->page);
318
319 skb->len += length;
320 skb->data_len += length;
321 skb->truesize += PAGE_SIZE / 2;
322 }
323send_up:
324 i++;
325 if (i == rx_ring->count)
326 i = 0;
327 next_rxd = IGBVF_RX_DESC_ADV(*rx_ring, i);
328 prefetch(next_rxd);
329 next_buffer = &rx_ring->buffer_info[i];
330
331 if (!(staterr & E1000_RXD_STAT_EOP)) {
332 buffer_info->skb = next_buffer->skb;
333 buffer_info->dma = next_buffer->dma;
334 next_buffer->skb = skb;
335 next_buffer->dma = 0;
336 goto next_desc;
337 }
338
339 if (staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) {
340 dev_kfree_skb_irq(skb);
341 goto next_desc;
342 }
343
344 total_bytes += skb->len;
345 total_packets++;
346
347 igbvf_rx_checksum_adv(adapter, staterr, skb);
348
349 skb->protocol = eth_type_trans(skb, netdev);
350
351 igbvf_receive_skb(adapter, netdev, skb, staterr,
352 rx_desc->wb.upper.vlan);
353
354next_desc:
355 rx_desc->wb.upper.status_error = 0;
356
357
358 if (cleaned_count >= IGBVF_RX_BUFFER_WRITE) {
359 igbvf_alloc_rx_buffers(rx_ring, cleaned_count);
360 cleaned_count = 0;
361 }
362
363
364 rx_desc = next_rxd;
365 buffer_info = next_buffer;
366
367 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
368 }
369
370 rx_ring->next_to_clean = i;
371 cleaned_count = igbvf_desc_unused(rx_ring);
372
373 if (cleaned_count)
374 igbvf_alloc_rx_buffers(rx_ring, cleaned_count);
375
376 adapter->total_rx_packets += total_packets;
377 adapter->total_rx_bytes += total_bytes;
378 netdev->stats.rx_bytes += total_bytes;
379 netdev->stats.rx_packets += total_packets;
380 return cleaned;
381}
382
383static void igbvf_put_txbuf(struct igbvf_adapter *adapter,
384 struct igbvf_buffer *buffer_info)
385{
386 if (buffer_info->dma) {
387 if (buffer_info->mapped_as_page)
388 dma_unmap_page(&adapter->pdev->dev,
389 buffer_info->dma,
390 buffer_info->length,
391 DMA_TO_DEVICE);
392 else
393 dma_unmap_single(&adapter->pdev->dev,
394 buffer_info->dma,
395 buffer_info->length,
396 DMA_TO_DEVICE);
397 buffer_info->dma = 0;
398 }
399 if (buffer_info->skb) {
400 dev_kfree_skb_any(buffer_info->skb);
401 buffer_info->skb = NULL;
402 }
403 buffer_info->time_stamp = 0;
404}
405
406
407
408
409
410
411
412int igbvf_setup_tx_resources(struct igbvf_adapter *adapter,
413 struct igbvf_ring *tx_ring)
414{
415 struct pci_dev *pdev = adapter->pdev;
416 int size;
417
418 size = sizeof(struct igbvf_buffer) * tx_ring->count;
419 tx_ring->buffer_info = vzalloc(size);
420 if (!tx_ring->buffer_info)
421 goto err;
422
423
424 tx_ring->size = tx_ring->count * sizeof(union e1000_adv_tx_desc);
425 tx_ring->size = ALIGN(tx_ring->size, 4096);
426
427 tx_ring->desc = dma_alloc_coherent(&pdev->dev, tx_ring->size,
428 &tx_ring->dma, GFP_KERNEL);
429 if (!tx_ring->desc)
430 goto err;
431
432 tx_ring->adapter = adapter;
433 tx_ring->next_to_use = 0;
434 tx_ring->next_to_clean = 0;
435
436 return 0;
437err:
438 vfree(tx_ring->buffer_info);
439 dev_err(&adapter->pdev->dev,
440 "Unable to allocate memory for the transmit descriptor ring\n");
441 return -ENOMEM;
442}
443
444
445
446
447
448
449
450int igbvf_setup_rx_resources(struct igbvf_adapter *adapter,
451 struct igbvf_ring *rx_ring)
452{
453 struct pci_dev *pdev = adapter->pdev;
454 int size, desc_len;
455
456 size = sizeof(struct igbvf_buffer) * rx_ring->count;
457 rx_ring->buffer_info = vzalloc(size);
458 if (!rx_ring->buffer_info)
459 goto err;
460
461 desc_len = sizeof(union e1000_adv_rx_desc);
462
463
464 rx_ring->size = rx_ring->count * desc_len;
465 rx_ring->size = ALIGN(rx_ring->size, 4096);
466
467 rx_ring->desc = dma_alloc_coherent(&pdev->dev, rx_ring->size,
468 &rx_ring->dma, GFP_KERNEL);
469 if (!rx_ring->desc)
470 goto err;
471
472 rx_ring->next_to_clean = 0;
473 rx_ring->next_to_use = 0;
474
475 rx_ring->adapter = adapter;
476
477 return 0;
478
479err:
480 vfree(rx_ring->buffer_info);
481 rx_ring->buffer_info = NULL;
482 dev_err(&adapter->pdev->dev,
483 "Unable to allocate memory for the receive descriptor ring\n");
484 return -ENOMEM;
485}
486
487
488
489
490
491static void igbvf_clean_tx_ring(struct igbvf_ring *tx_ring)
492{
493 struct igbvf_adapter *adapter = tx_ring->adapter;
494 struct igbvf_buffer *buffer_info;
495 unsigned long size;
496 unsigned int i;
497
498 if (!tx_ring->buffer_info)
499 return;
500
501
502 for (i = 0; i < tx_ring->count; i++) {
503 buffer_info = &tx_ring->buffer_info[i];
504 igbvf_put_txbuf(adapter, buffer_info);
505 }
506
507 size = sizeof(struct igbvf_buffer) * tx_ring->count;
508 memset(tx_ring->buffer_info, 0, size);
509
510
511 memset(tx_ring->desc, 0, tx_ring->size);
512
513 tx_ring->next_to_use = 0;
514 tx_ring->next_to_clean = 0;
515
516 writel(0, adapter->hw.hw_addr + tx_ring->head);
517 writel(0, adapter->hw.hw_addr + tx_ring->tail);
518}
519
520
521
522
523
524
525
526void igbvf_free_tx_resources(struct igbvf_ring *tx_ring)
527{
528 struct pci_dev *pdev = tx_ring->adapter->pdev;
529
530 igbvf_clean_tx_ring(tx_ring);
531
532 vfree(tx_ring->buffer_info);
533 tx_ring->buffer_info = NULL;
534
535 dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc,
536 tx_ring->dma);
537
538 tx_ring->desc = NULL;
539}
540
541
542
543
544
545static void igbvf_clean_rx_ring(struct igbvf_ring *rx_ring)
546{
547 struct igbvf_adapter *adapter = rx_ring->adapter;
548 struct igbvf_buffer *buffer_info;
549 struct pci_dev *pdev = adapter->pdev;
550 unsigned long size;
551 unsigned int i;
552
553 if (!rx_ring->buffer_info)
554 return;
555
556
557 for (i = 0; i < rx_ring->count; i++) {
558 buffer_info = &rx_ring->buffer_info[i];
559 if (buffer_info->dma) {
560 if (adapter->rx_ps_hdr_size) {
561 dma_unmap_single(&pdev->dev, buffer_info->dma,
562 adapter->rx_ps_hdr_size,
563 DMA_FROM_DEVICE);
564 } else {
565 dma_unmap_single(&pdev->dev, buffer_info->dma,
566 adapter->rx_buffer_len,
567 DMA_FROM_DEVICE);
568 }
569 buffer_info->dma = 0;
570 }
571
572 if (buffer_info->skb) {
573 dev_kfree_skb(buffer_info->skb);
574 buffer_info->skb = NULL;
575 }
576
577 if (buffer_info->page) {
578 if (buffer_info->page_dma)
579 dma_unmap_page(&pdev->dev,
580 buffer_info->page_dma,
581 PAGE_SIZE / 2,
582 DMA_FROM_DEVICE);
583 put_page(buffer_info->page);
584 buffer_info->page = NULL;
585 buffer_info->page_dma = 0;
586 buffer_info->page_offset = 0;
587 }
588 }
589
590 size = sizeof(struct igbvf_buffer) * rx_ring->count;
591 memset(rx_ring->buffer_info, 0, size);
592
593
594 memset(rx_ring->desc, 0, rx_ring->size);
595
596 rx_ring->next_to_clean = 0;
597 rx_ring->next_to_use = 0;
598
599 writel(0, adapter->hw.hw_addr + rx_ring->head);
600 writel(0, adapter->hw.hw_addr + rx_ring->tail);
601}
602
603
604
605
606
607
608
609
610void igbvf_free_rx_resources(struct igbvf_ring *rx_ring)
611{
612 struct pci_dev *pdev = rx_ring->adapter->pdev;
613
614 igbvf_clean_rx_ring(rx_ring);
615
616 vfree(rx_ring->buffer_info);
617 rx_ring->buffer_info = NULL;
618
619 dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc,
620 rx_ring->dma);
621 rx_ring->desc = NULL;
622}
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638static enum latency_range igbvf_update_itr(struct igbvf_adapter *adapter,
639 enum latency_range itr_setting,
640 int packets, int bytes)
641{
642 enum latency_range retval = itr_setting;
643
644 if (packets == 0)
645 goto update_itr_done;
646
647 switch (itr_setting) {
648 case lowest_latency:
649
650 if (bytes/packets > 8000)
651 retval = bulk_latency;
652 else if ((packets < 5) && (bytes > 512))
653 retval = low_latency;
654 break;
655 case low_latency:
656 if (bytes > 10000) {
657
658 if (bytes/packets > 8000)
659 retval = bulk_latency;
660 else if ((packets < 10) || ((bytes/packets) > 1200))
661 retval = bulk_latency;
662 else if ((packets > 35))
663 retval = lowest_latency;
664 } else if (bytes/packets > 2000) {
665 retval = bulk_latency;
666 } else if (packets <= 2 && bytes < 512) {
667 retval = lowest_latency;
668 }
669 break;
670 case bulk_latency:
671 if (bytes > 25000) {
672 if (packets > 35)
673 retval = low_latency;
674 } else if (bytes < 6000) {
675 retval = low_latency;
676 }
677 break;
678 default:
679 break;
680 }
681
682update_itr_done:
683 return retval;
684}
685
686static int igbvf_range_to_itr(enum latency_range current_range)
687{
688 int new_itr;
689
690 switch (current_range) {
691
692 case lowest_latency:
693 new_itr = IGBVF_70K_ITR;
694 break;
695 case low_latency:
696 new_itr = IGBVF_20K_ITR;
697 break;
698 case bulk_latency:
699 new_itr = IGBVF_4K_ITR;
700 break;
701 default:
702 new_itr = IGBVF_START_ITR;
703 break;
704 }
705 return new_itr;
706}
707
708static void igbvf_set_itr(struct igbvf_adapter *adapter)
709{
710 u32 new_itr;
711
712 adapter->tx_ring->itr_range =
713 igbvf_update_itr(adapter,
714 adapter->tx_ring->itr_val,
715 adapter->total_tx_packets,
716 adapter->total_tx_bytes);
717
718
719 if (adapter->requested_itr == 3 &&
720 adapter->tx_ring->itr_range == lowest_latency)
721 adapter->tx_ring->itr_range = low_latency;
722
723 new_itr = igbvf_range_to_itr(adapter->tx_ring->itr_range);
724
725 if (new_itr != adapter->tx_ring->itr_val) {
726 u32 current_itr = adapter->tx_ring->itr_val;
727
728
729
730
731 new_itr = new_itr > current_itr ?
732 min(current_itr + (new_itr >> 2), new_itr) :
733 new_itr;
734 adapter->tx_ring->itr_val = new_itr;
735
736 adapter->tx_ring->set_itr = 1;
737 }
738
739 adapter->rx_ring->itr_range =
740 igbvf_update_itr(adapter, adapter->rx_ring->itr_val,
741 adapter->total_rx_packets,
742 adapter->total_rx_bytes);
743 if (adapter->requested_itr == 3 &&
744 adapter->rx_ring->itr_range == lowest_latency)
745 adapter->rx_ring->itr_range = low_latency;
746
747 new_itr = igbvf_range_to_itr(adapter->rx_ring->itr_range);
748
749 if (new_itr != adapter->rx_ring->itr_val) {
750 u32 current_itr = adapter->rx_ring->itr_val;
751
752 new_itr = new_itr > current_itr ?
753 min(current_itr + (new_itr >> 2), new_itr) :
754 new_itr;
755 adapter->rx_ring->itr_val = new_itr;
756
757 adapter->rx_ring->set_itr = 1;
758 }
759}
760
761
762
763
764
765
766
767static bool igbvf_clean_tx_irq(struct igbvf_ring *tx_ring)
768{
769 struct igbvf_adapter *adapter = tx_ring->adapter;
770 struct net_device *netdev = adapter->netdev;
771 struct igbvf_buffer *buffer_info;
772 struct sk_buff *skb;
773 union e1000_adv_tx_desc *tx_desc, *eop_desc;
774 unsigned int total_bytes = 0, total_packets = 0;
775 unsigned int i, count = 0;
776 bool cleaned = false;
777
778 i = tx_ring->next_to_clean;
779 buffer_info = &tx_ring->buffer_info[i];
780 eop_desc = buffer_info->next_to_watch;
781
782 do {
783
784 if (!eop_desc)
785 break;
786
787
788 smp_rmb();
789
790
791 if (!(eop_desc->wb.status & cpu_to_le32(E1000_TXD_STAT_DD)))
792 break;
793
794
795 buffer_info->next_to_watch = NULL;
796
797 for (cleaned = false; !cleaned; count++) {
798 tx_desc = IGBVF_TX_DESC_ADV(*tx_ring, i);
799 cleaned = (tx_desc == eop_desc);
800 skb = buffer_info->skb;
801
802 if (skb) {
803 unsigned int segs, bytecount;
804
805
806 segs = skb_shinfo(skb)->gso_segs ?: 1;
807
808 bytecount = ((segs - 1) * skb_headlen(skb)) +
809 skb->len;
810 total_packets += segs;
811 total_bytes += bytecount;
812 }
813
814 igbvf_put_txbuf(adapter, buffer_info);
815 tx_desc->wb.status = 0;
816
817 i++;
818 if (i == tx_ring->count)
819 i = 0;
820
821 buffer_info = &tx_ring->buffer_info[i];
822 }
823
824 eop_desc = buffer_info->next_to_watch;
825 } while (count < tx_ring->count);
826
827 tx_ring->next_to_clean = i;
828
829 if (unlikely(count && netif_carrier_ok(netdev) &&
830 igbvf_desc_unused(tx_ring) >= IGBVF_TX_QUEUE_WAKE)) {
831
832
833
834 smp_mb();
835 if (netif_queue_stopped(netdev) &&
836 !(test_bit(__IGBVF_DOWN, &adapter->state))) {
837 netif_wake_queue(netdev);
838 ++adapter->restart_queue;
839 }
840 }
841
842 netdev->stats.tx_bytes += total_bytes;
843 netdev->stats.tx_packets += total_packets;
844 return count < tx_ring->count;
845}
846
847static irqreturn_t igbvf_msix_other(int irq, void *data)
848{
849 struct net_device *netdev = data;
850 struct igbvf_adapter *adapter = netdev_priv(netdev);
851 struct e1000_hw *hw = &adapter->hw;
852
853 adapter->int_counter1++;
854
855 hw->mac.get_link_status = 1;
856 if (!test_bit(__IGBVF_DOWN, &adapter->state))
857 mod_timer(&adapter->watchdog_timer, jiffies + 1);
858
859 ew32(EIMS, adapter->eims_other);
860
861 return IRQ_HANDLED;
862}
863
864static irqreturn_t igbvf_intr_msix_tx(int irq, void *data)
865{
866 struct net_device *netdev = data;
867 struct igbvf_adapter *adapter = netdev_priv(netdev);
868 struct e1000_hw *hw = &adapter->hw;
869 struct igbvf_ring *tx_ring = adapter->tx_ring;
870
871 if (tx_ring->set_itr) {
872 writel(tx_ring->itr_val,
873 adapter->hw.hw_addr + tx_ring->itr_register);
874 adapter->tx_ring->set_itr = 0;
875 }
876
877 adapter->total_tx_bytes = 0;
878 adapter->total_tx_packets = 0;
879
880
881
882
883 if (!igbvf_clean_tx_irq(tx_ring))
884
885 ew32(EICS, tx_ring->eims_value);
886 else
887 ew32(EIMS, tx_ring->eims_value);
888
889 return IRQ_HANDLED;
890}
891
892static irqreturn_t igbvf_intr_msix_rx(int irq, void *data)
893{
894 struct net_device *netdev = data;
895 struct igbvf_adapter *adapter = netdev_priv(netdev);
896
897 adapter->int_counter0++;
898
899
900
901
902 if (adapter->rx_ring->set_itr) {
903 writel(adapter->rx_ring->itr_val,
904 adapter->hw.hw_addr + adapter->rx_ring->itr_register);
905 adapter->rx_ring->set_itr = 0;
906 }
907
908 if (napi_schedule_prep(&adapter->rx_ring->napi)) {
909 adapter->total_rx_bytes = 0;
910 adapter->total_rx_packets = 0;
911 __napi_schedule(&adapter->rx_ring->napi);
912 }
913
914 return IRQ_HANDLED;
915}
916
917#define IGBVF_NO_QUEUE -1
918
919static void igbvf_assign_vector(struct igbvf_adapter *adapter, int rx_queue,
920 int tx_queue, int msix_vector)
921{
922 struct e1000_hw *hw = &adapter->hw;
923 u32 ivar, index;
924
925
926
927
928
929
930 if (rx_queue > IGBVF_NO_QUEUE) {
931 index = (rx_queue >> 1);
932 ivar = array_er32(IVAR0, index);
933 if (rx_queue & 0x1) {
934
935 ivar = ivar & 0xFF00FFFF;
936 ivar |= (msix_vector | E1000_IVAR_VALID) << 16;
937 } else {
938
939 ivar = ivar & 0xFFFFFF00;
940 ivar |= msix_vector | E1000_IVAR_VALID;
941 }
942 adapter->rx_ring[rx_queue].eims_value = BIT(msix_vector);
943 array_ew32(IVAR0, index, ivar);
944 }
945 if (tx_queue > IGBVF_NO_QUEUE) {
946 index = (tx_queue >> 1);
947 ivar = array_er32(IVAR0, index);
948 if (tx_queue & 0x1) {
949
950 ivar = ivar & 0x00FFFFFF;
951 ivar |= (msix_vector | E1000_IVAR_VALID) << 24;
952 } else {
953
954 ivar = ivar & 0xFFFF00FF;
955 ivar |= (msix_vector | E1000_IVAR_VALID) << 8;
956 }
957 adapter->tx_ring[tx_queue].eims_value = BIT(msix_vector);
958 array_ew32(IVAR0, index, ivar);
959 }
960}
961
962
963
964
965
966
967
968
969static void igbvf_configure_msix(struct igbvf_adapter *adapter)
970{
971 u32 tmp;
972 struct e1000_hw *hw = &adapter->hw;
973 struct igbvf_ring *tx_ring = adapter->tx_ring;
974 struct igbvf_ring *rx_ring = adapter->rx_ring;
975 int vector = 0;
976
977 adapter->eims_enable_mask = 0;
978
979 igbvf_assign_vector(adapter, IGBVF_NO_QUEUE, 0, vector++);
980 adapter->eims_enable_mask |= tx_ring->eims_value;
981 writel(tx_ring->itr_val, hw->hw_addr + tx_ring->itr_register);
982 igbvf_assign_vector(adapter, 0, IGBVF_NO_QUEUE, vector++);
983 adapter->eims_enable_mask |= rx_ring->eims_value;
984 writel(rx_ring->itr_val, hw->hw_addr + rx_ring->itr_register);
985
986
987
988 tmp = (vector++ | E1000_IVAR_VALID);
989
990 ew32(IVAR_MISC, tmp);
991
992 adapter->eims_enable_mask = GENMASK(vector - 1, 0);
993 adapter->eims_other = BIT(vector - 1);
994 e1e_flush();
995}
996
997static void igbvf_reset_interrupt_capability(struct igbvf_adapter *adapter)
998{
999 if (adapter->msix_entries) {
1000 pci_disable_msix(adapter->pdev);
1001 kfree(adapter->msix_entries);
1002 adapter->msix_entries = NULL;
1003 }
1004}
1005
1006
1007
1008
1009
1010
1011
1012
1013static void igbvf_set_interrupt_capability(struct igbvf_adapter *adapter)
1014{
1015 int err = -ENOMEM;
1016 int i;
1017
1018
1019 adapter->msix_entries = kcalloc(3, sizeof(struct msix_entry),
1020 GFP_KERNEL);
1021 if (adapter->msix_entries) {
1022 for (i = 0; i < 3; i++)
1023 adapter->msix_entries[i].entry = i;
1024
1025 err = pci_enable_msix_range(adapter->pdev,
1026 adapter->msix_entries, 3, 3);
1027 }
1028
1029 if (err < 0) {
1030
1031 dev_err(&adapter->pdev->dev,
1032 "Failed to initialize MSI-X interrupts.\n");
1033 igbvf_reset_interrupt_capability(adapter);
1034 }
1035}
1036
1037
1038
1039
1040
1041
1042
1043
1044static int igbvf_request_msix(struct igbvf_adapter *adapter)
1045{
1046 struct net_device *netdev = adapter->netdev;
1047 int err = 0, vector = 0;
1048
1049 if (strlen(netdev->name) < (IFNAMSIZ - 5)) {
1050 sprintf(adapter->tx_ring->name, "%s-tx-0", netdev->name);
1051 sprintf(adapter->rx_ring->name, "%s-rx-0", netdev->name);
1052 } else {
1053 memcpy(adapter->tx_ring->name, netdev->name, IFNAMSIZ);
1054 memcpy(adapter->rx_ring->name, netdev->name, IFNAMSIZ);
1055 }
1056
1057 err = request_irq(adapter->msix_entries[vector].vector,
1058 igbvf_intr_msix_tx, 0, adapter->tx_ring->name,
1059 netdev);
1060 if (err)
1061 goto out;
1062
1063 adapter->tx_ring->itr_register = E1000_EITR(vector);
1064 adapter->tx_ring->itr_val = adapter->current_itr;
1065 vector++;
1066
1067 err = request_irq(adapter->msix_entries[vector].vector,
1068 igbvf_intr_msix_rx, 0, adapter->rx_ring->name,
1069 netdev);
1070 if (err)
1071 goto out;
1072
1073 adapter->rx_ring->itr_register = E1000_EITR(vector);
1074 adapter->rx_ring->itr_val = adapter->current_itr;
1075 vector++;
1076
1077 err = request_irq(adapter->msix_entries[vector].vector,
1078 igbvf_msix_other, 0, netdev->name, netdev);
1079 if (err)
1080 goto out;
1081
1082 igbvf_configure_msix(adapter);
1083 return 0;
1084out:
1085 return err;
1086}
1087
1088
1089
1090
1091
1092static int igbvf_alloc_queues(struct igbvf_adapter *adapter)
1093{
1094 struct net_device *netdev = adapter->netdev;
1095
1096 adapter->tx_ring = kzalloc(sizeof(struct igbvf_ring), GFP_KERNEL);
1097 if (!adapter->tx_ring)
1098 return -ENOMEM;
1099
1100 adapter->rx_ring = kzalloc(sizeof(struct igbvf_ring), GFP_KERNEL);
1101 if (!adapter->rx_ring) {
1102 kfree(adapter->tx_ring);
1103 return -ENOMEM;
1104 }
1105
1106 netif_napi_add(netdev, &adapter->rx_ring->napi, igbvf_poll, 64);
1107
1108 return 0;
1109}
1110
1111
1112
1113
1114
1115
1116
1117
1118static int igbvf_request_irq(struct igbvf_adapter *adapter)
1119{
1120 int err = -1;
1121
1122
1123 if (adapter->msix_entries)
1124 err = igbvf_request_msix(adapter);
1125
1126 if (!err)
1127 return err;
1128
1129 dev_err(&adapter->pdev->dev,
1130 "Unable to allocate interrupt, Error: %d\n", err);
1131
1132 return err;
1133}
1134
1135static void igbvf_free_irq(struct igbvf_adapter *adapter)
1136{
1137 struct net_device *netdev = adapter->netdev;
1138 int vector;
1139
1140 if (adapter->msix_entries) {
1141 for (vector = 0; vector < 3; vector++)
1142 free_irq(adapter->msix_entries[vector].vector, netdev);
1143 }
1144}
1145
1146
1147
1148
1149
1150static void igbvf_irq_disable(struct igbvf_adapter *adapter)
1151{
1152 struct e1000_hw *hw = &adapter->hw;
1153
1154 ew32(EIMC, ~0);
1155
1156 if (adapter->msix_entries)
1157 ew32(EIAC, 0);
1158}
1159
1160
1161
1162
1163
1164static void igbvf_irq_enable(struct igbvf_adapter *adapter)
1165{
1166 struct e1000_hw *hw = &adapter->hw;
1167
1168 ew32(EIAC, adapter->eims_enable_mask);
1169 ew32(EIAM, adapter->eims_enable_mask);
1170 ew32(EIMS, adapter->eims_enable_mask);
1171}
1172
1173
1174
1175
1176
1177
1178static int igbvf_poll(struct napi_struct *napi, int budget)
1179{
1180 struct igbvf_ring *rx_ring = container_of(napi, struct igbvf_ring, napi);
1181 struct igbvf_adapter *adapter = rx_ring->adapter;
1182 struct e1000_hw *hw = &adapter->hw;
1183 int work_done = 0;
1184
1185 igbvf_clean_rx_irq(adapter, &work_done, budget);
1186
1187 if (work_done == budget)
1188 return budget;
1189
1190
1191
1192
1193 if (likely(napi_complete_done(napi, work_done))) {
1194 if (adapter->requested_itr & 3)
1195 igbvf_set_itr(adapter);
1196
1197 if (!test_bit(__IGBVF_DOWN, &adapter->state))
1198 ew32(EIMS, adapter->rx_ring->eims_value);
1199 }
1200
1201 return work_done;
1202}
1203
1204
1205
1206
1207
1208
1209
1210static void igbvf_set_rlpml(struct igbvf_adapter *adapter)
1211{
1212 int max_frame_size;
1213 struct e1000_hw *hw = &adapter->hw;
1214
1215 max_frame_size = adapter->max_frame_size + VLAN_TAG_SIZE;
1216
1217 spin_lock_bh(&hw->mbx_lock);
1218
1219 e1000_rlpml_set_vf(hw, max_frame_size);
1220
1221 spin_unlock_bh(&hw->mbx_lock);
1222}
1223
1224static int igbvf_vlan_rx_add_vid(struct net_device *netdev,
1225 __be16 proto, u16 vid)
1226{
1227 struct igbvf_adapter *adapter = netdev_priv(netdev);
1228 struct e1000_hw *hw = &adapter->hw;
1229
1230 spin_lock_bh(&hw->mbx_lock);
1231
1232 if (hw->mac.ops.set_vfta(hw, vid, true)) {
1233 dev_err(&adapter->pdev->dev, "Failed to add vlan id %d\n", vid);
1234 spin_unlock_bh(&hw->mbx_lock);
1235 return -EINVAL;
1236 }
1237
1238 spin_unlock_bh(&hw->mbx_lock);
1239
1240 set_bit(vid, adapter->active_vlans);
1241 return 0;
1242}
1243
1244static int igbvf_vlan_rx_kill_vid(struct net_device *netdev,
1245 __be16 proto, u16 vid)
1246{
1247 struct igbvf_adapter *adapter = netdev_priv(netdev);
1248 struct e1000_hw *hw = &adapter->hw;
1249
1250 spin_lock_bh(&hw->mbx_lock);
1251
1252 if (hw->mac.ops.set_vfta(hw, vid, false)) {
1253 dev_err(&adapter->pdev->dev,
1254 "Failed to remove vlan id %d\n", vid);
1255 spin_unlock_bh(&hw->mbx_lock);
1256 return -EINVAL;
1257 }
1258
1259 spin_unlock_bh(&hw->mbx_lock);
1260
1261 clear_bit(vid, adapter->active_vlans);
1262 return 0;
1263}
1264
1265static void igbvf_restore_vlan(struct igbvf_adapter *adapter)
1266{
1267 u16 vid;
1268
1269 for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
1270 igbvf_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), vid);
1271}
1272
1273
1274
1275
1276
1277
1278
1279static void igbvf_configure_tx(struct igbvf_adapter *adapter)
1280{
1281 struct e1000_hw *hw = &adapter->hw;
1282 struct igbvf_ring *tx_ring = adapter->tx_ring;
1283 u64 tdba;
1284 u32 txdctl, dca_txctrl;
1285
1286
1287 txdctl = er32(TXDCTL(0));
1288 ew32(TXDCTL(0), txdctl & ~E1000_TXDCTL_QUEUE_ENABLE);
1289 e1e_flush();
1290 msleep(10);
1291
1292
1293 ew32(TDLEN(0), tx_ring->count * sizeof(union e1000_adv_tx_desc));
1294 tdba = tx_ring->dma;
1295 ew32(TDBAL(0), (tdba & DMA_BIT_MASK(32)));
1296 ew32(TDBAH(0), (tdba >> 32));
1297 ew32(TDH(0), 0);
1298 ew32(TDT(0), 0);
1299 tx_ring->head = E1000_TDH(0);
1300 tx_ring->tail = E1000_TDT(0);
1301
1302
1303
1304
1305
1306 dca_txctrl = er32(DCA_TXCTRL(0));
1307 dca_txctrl &= ~E1000_DCA_TXCTRL_TX_WB_RO_EN;
1308 ew32(DCA_TXCTRL(0), dca_txctrl);
1309
1310
1311 txdctl |= E1000_TXDCTL_QUEUE_ENABLE;
1312 ew32(TXDCTL(0), txdctl);
1313
1314
1315 adapter->txd_cmd = E1000_ADVTXD_DCMD_EOP | E1000_ADVTXD_DCMD_IFCS;
1316
1317
1318 adapter->txd_cmd |= E1000_ADVTXD_DCMD_RS;
1319}
1320
1321
1322
1323
1324
1325static void igbvf_setup_srrctl(struct igbvf_adapter *adapter)
1326{
1327 struct e1000_hw *hw = &adapter->hw;
1328 u32 srrctl = 0;
1329
1330 srrctl &= ~(E1000_SRRCTL_DESCTYPE_MASK |
1331 E1000_SRRCTL_BSIZEHDR_MASK |
1332 E1000_SRRCTL_BSIZEPKT_MASK);
1333
1334
1335 srrctl |= E1000_SRRCTL_DROP_EN;
1336
1337
1338 srrctl |= ALIGN(adapter->rx_buffer_len, 1024) >>
1339 E1000_SRRCTL_BSIZEPKT_SHIFT;
1340
1341 if (adapter->rx_buffer_len < 2048) {
1342 adapter->rx_ps_hdr_size = 0;
1343 srrctl |= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
1344 } else {
1345 adapter->rx_ps_hdr_size = 128;
1346 srrctl |= adapter->rx_ps_hdr_size <<
1347 E1000_SRRCTL_BSIZEHDRSIZE_SHIFT;
1348 srrctl |= E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
1349 }
1350
1351 ew32(SRRCTL(0), srrctl);
1352}
1353
1354
1355
1356
1357
1358
1359
1360static void igbvf_configure_rx(struct igbvf_adapter *adapter)
1361{
1362 struct e1000_hw *hw = &adapter->hw;
1363 struct igbvf_ring *rx_ring = adapter->rx_ring;
1364 u64 rdba;
1365 u32 rxdctl;
1366
1367
1368 rxdctl = er32(RXDCTL(0));
1369 ew32(RXDCTL(0), rxdctl & ~E1000_RXDCTL_QUEUE_ENABLE);
1370 e1e_flush();
1371 msleep(10);
1372
1373
1374
1375
1376 rdba = rx_ring->dma;
1377 ew32(RDBAL(0), (rdba & DMA_BIT_MASK(32)));
1378 ew32(RDBAH(0), (rdba >> 32));
1379 ew32(RDLEN(0), rx_ring->count * sizeof(union e1000_adv_rx_desc));
1380 rx_ring->head = E1000_RDH(0);
1381 rx_ring->tail = E1000_RDT(0);
1382 ew32(RDH(0), 0);
1383 ew32(RDT(0), 0);
1384
1385 rxdctl |= E1000_RXDCTL_QUEUE_ENABLE;
1386 rxdctl &= 0xFFF00000;
1387 rxdctl |= IGBVF_RX_PTHRESH;
1388 rxdctl |= IGBVF_RX_HTHRESH << 8;
1389 rxdctl |= IGBVF_RX_WTHRESH << 16;
1390
1391 igbvf_set_rlpml(adapter);
1392
1393
1394 ew32(RXDCTL(0), rxdctl);
1395}
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406static void igbvf_set_multi(struct net_device *netdev)
1407{
1408 struct igbvf_adapter *adapter = netdev_priv(netdev);
1409 struct e1000_hw *hw = &adapter->hw;
1410 struct netdev_hw_addr *ha;
1411 u8 *mta_list = NULL;
1412 int i;
1413
1414 if (!netdev_mc_empty(netdev)) {
1415 mta_list = kmalloc_array(netdev_mc_count(netdev), ETH_ALEN,
1416 GFP_ATOMIC);
1417 if (!mta_list)
1418 return;
1419 }
1420
1421
1422 i = 0;
1423 netdev_for_each_mc_addr(ha, netdev)
1424 memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN);
1425
1426 spin_lock_bh(&hw->mbx_lock);
1427
1428 hw->mac.ops.update_mc_addr_list(hw, mta_list, i, 0, 0);
1429
1430 spin_unlock_bh(&hw->mbx_lock);
1431 kfree(mta_list);
1432}
1433
1434
1435
1436
1437
1438
1439
1440
1441static int igbvf_set_uni(struct net_device *netdev)
1442{
1443 struct igbvf_adapter *adapter = netdev_priv(netdev);
1444 struct e1000_hw *hw = &adapter->hw;
1445
1446 if (netdev_uc_count(netdev) > IGBVF_MAX_MAC_FILTERS) {
1447 pr_err("Too many unicast filters - No Space\n");
1448 return -ENOSPC;
1449 }
1450
1451 spin_lock_bh(&hw->mbx_lock);
1452
1453
1454 hw->mac.ops.set_uc_addr(hw, E1000_VF_MAC_FILTER_CLR, NULL);
1455
1456 spin_unlock_bh(&hw->mbx_lock);
1457
1458 if (!netdev_uc_empty(netdev)) {
1459 struct netdev_hw_addr *ha;
1460
1461
1462 netdev_for_each_uc_addr(ha, netdev) {
1463 spin_lock_bh(&hw->mbx_lock);
1464
1465 hw->mac.ops.set_uc_addr(hw, E1000_VF_MAC_FILTER_ADD,
1466 ha->addr);
1467
1468 spin_unlock_bh(&hw->mbx_lock);
1469 udelay(200);
1470 }
1471 }
1472
1473 return 0;
1474}
1475
1476static void igbvf_set_rx_mode(struct net_device *netdev)
1477{
1478 igbvf_set_multi(netdev);
1479 igbvf_set_uni(netdev);
1480}
1481
1482
1483
1484
1485
1486static void igbvf_configure(struct igbvf_adapter *adapter)
1487{
1488 igbvf_set_rx_mode(adapter->netdev);
1489
1490 igbvf_restore_vlan(adapter);
1491
1492 igbvf_configure_tx(adapter);
1493 igbvf_setup_srrctl(adapter);
1494 igbvf_configure_rx(adapter);
1495 igbvf_alloc_rx_buffers(adapter->rx_ring,
1496 igbvf_desc_unused(adapter->rx_ring));
1497}
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507static void igbvf_reset(struct igbvf_adapter *adapter)
1508{
1509 struct e1000_mac_info *mac = &adapter->hw.mac;
1510 struct net_device *netdev = adapter->netdev;
1511 struct e1000_hw *hw = &adapter->hw;
1512
1513 spin_lock_bh(&hw->mbx_lock);
1514
1515
1516 if (mac->ops.reset_hw(hw))
1517 dev_err(&adapter->pdev->dev, "PF still resetting\n");
1518
1519 mac->ops.init_hw(hw);
1520
1521 spin_unlock_bh(&hw->mbx_lock);
1522
1523 if (is_valid_ether_addr(adapter->hw.mac.addr)) {
1524 memcpy(netdev->dev_addr, adapter->hw.mac.addr,
1525 netdev->addr_len);
1526 memcpy(netdev->perm_addr, adapter->hw.mac.addr,
1527 netdev->addr_len);
1528 }
1529
1530 adapter->last_reset = jiffies;
1531}
1532
1533int igbvf_up(struct igbvf_adapter *adapter)
1534{
1535 struct e1000_hw *hw = &adapter->hw;
1536
1537
1538 igbvf_configure(adapter);
1539
1540 clear_bit(__IGBVF_DOWN, &adapter->state);
1541
1542 napi_enable(&adapter->rx_ring->napi);
1543 if (adapter->msix_entries)
1544 igbvf_configure_msix(adapter);
1545
1546
1547 er32(EICR);
1548 igbvf_irq_enable(adapter);
1549
1550
1551 hw->mac.get_link_status = 1;
1552 mod_timer(&adapter->watchdog_timer, jiffies + 1);
1553
1554 return 0;
1555}
1556
1557void igbvf_down(struct igbvf_adapter *adapter)
1558{
1559 struct net_device *netdev = adapter->netdev;
1560 struct e1000_hw *hw = &adapter->hw;
1561 u32 rxdctl, txdctl;
1562
1563
1564
1565
1566 set_bit(__IGBVF_DOWN, &adapter->state);
1567
1568
1569 rxdctl = er32(RXDCTL(0));
1570 ew32(RXDCTL(0), rxdctl & ~E1000_RXDCTL_QUEUE_ENABLE);
1571
1572 netif_carrier_off(netdev);
1573 netif_stop_queue(netdev);
1574
1575
1576 txdctl = er32(TXDCTL(0));
1577 ew32(TXDCTL(0), txdctl & ~E1000_TXDCTL_QUEUE_ENABLE);
1578
1579
1580 e1e_flush();
1581 msleep(10);
1582
1583 napi_disable(&adapter->rx_ring->napi);
1584
1585 igbvf_irq_disable(adapter);
1586
1587 del_timer_sync(&adapter->watchdog_timer);
1588
1589
1590 igbvf_update_stats(adapter);
1591
1592 adapter->link_speed = 0;
1593 adapter->link_duplex = 0;
1594
1595 igbvf_reset(adapter);
1596 igbvf_clean_tx_ring(adapter->tx_ring);
1597 igbvf_clean_rx_ring(adapter->rx_ring);
1598}
1599
1600void igbvf_reinit_locked(struct igbvf_adapter *adapter)
1601{
1602 might_sleep();
1603 while (test_and_set_bit(__IGBVF_RESETTING, &adapter->state))
1604 usleep_range(1000, 2000);
1605 igbvf_down(adapter);
1606 igbvf_up(adapter);
1607 clear_bit(__IGBVF_RESETTING, &adapter->state);
1608}
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618static int igbvf_sw_init(struct igbvf_adapter *adapter)
1619{
1620 struct net_device *netdev = adapter->netdev;
1621 s32 rc;
1622
1623 adapter->rx_buffer_len = ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN;
1624 adapter->rx_ps_hdr_size = 0;
1625 adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
1626 adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
1627
1628 adapter->tx_int_delay = 8;
1629 adapter->tx_abs_int_delay = 32;
1630 adapter->rx_int_delay = 0;
1631 adapter->rx_abs_int_delay = 8;
1632 adapter->requested_itr = 3;
1633 adapter->current_itr = IGBVF_START_ITR;
1634
1635
1636 adapter->ei->init_ops(&adapter->hw);
1637
1638 rc = adapter->hw.mac.ops.init_params(&adapter->hw);
1639 if (rc)
1640 return rc;
1641
1642 rc = adapter->hw.mbx.ops.init_params(&adapter->hw);
1643 if (rc)
1644 return rc;
1645
1646 igbvf_set_interrupt_capability(adapter);
1647
1648 if (igbvf_alloc_queues(adapter))
1649 return -ENOMEM;
1650
1651 spin_lock_init(&adapter->tx_queue_lock);
1652
1653
1654 igbvf_irq_disable(adapter);
1655
1656 spin_lock_init(&adapter->stats_lock);
1657 spin_lock_init(&adapter->hw.mbx_lock);
1658
1659 set_bit(__IGBVF_DOWN, &adapter->state);
1660 return 0;
1661}
1662
1663static void igbvf_initialize_last_counter_stats(struct igbvf_adapter *adapter)
1664{
1665 struct e1000_hw *hw = &adapter->hw;
1666
1667 adapter->stats.last_gprc = er32(VFGPRC);
1668 adapter->stats.last_gorc = er32(VFGORC);
1669 adapter->stats.last_gptc = er32(VFGPTC);
1670 adapter->stats.last_gotc = er32(VFGOTC);
1671 adapter->stats.last_mprc = er32(VFMPRC);
1672 adapter->stats.last_gotlbc = er32(VFGOTLBC);
1673 adapter->stats.last_gptlbc = er32(VFGPTLBC);
1674 adapter->stats.last_gorlbc = er32(VFGORLBC);
1675 adapter->stats.last_gprlbc = er32(VFGPRLBC);
1676
1677 adapter->stats.base_gprc = er32(VFGPRC);
1678 adapter->stats.base_gorc = er32(VFGORC);
1679 adapter->stats.base_gptc = er32(VFGPTC);
1680 adapter->stats.base_gotc = er32(VFGOTC);
1681 adapter->stats.base_mprc = er32(VFMPRC);
1682 adapter->stats.base_gotlbc = er32(VFGOTLBC);
1683 adapter->stats.base_gptlbc = er32(VFGPTLBC);
1684 adapter->stats.base_gorlbc = er32(VFGORLBC);
1685 adapter->stats.base_gprlbc = er32(VFGPRLBC);
1686}
1687
1688
1689
1690
1691
1692
1693
1694
1695
1696
1697
1698
1699
1700static int igbvf_open(struct net_device *netdev)
1701{
1702 struct igbvf_adapter *adapter = netdev_priv(netdev);
1703 struct e1000_hw *hw = &adapter->hw;
1704 int err;
1705
1706
1707 if (test_bit(__IGBVF_TESTING, &adapter->state))
1708 return -EBUSY;
1709
1710
1711 err = igbvf_setup_tx_resources(adapter, adapter->tx_ring);
1712 if (err)
1713 goto err_setup_tx;
1714
1715
1716 err = igbvf_setup_rx_resources(adapter, adapter->rx_ring);
1717 if (err)
1718 goto err_setup_rx;
1719
1720
1721
1722
1723
1724
1725 igbvf_configure(adapter);
1726
1727 err = igbvf_request_irq(adapter);
1728 if (err)
1729 goto err_req_irq;
1730
1731
1732 clear_bit(__IGBVF_DOWN, &adapter->state);
1733
1734 napi_enable(&adapter->rx_ring->napi);
1735
1736
1737 er32(EICR);
1738
1739 igbvf_irq_enable(adapter);
1740
1741
1742 hw->mac.get_link_status = 1;
1743 mod_timer(&adapter->watchdog_timer, jiffies + 1);
1744
1745 return 0;
1746
1747err_req_irq:
1748 igbvf_free_rx_resources(adapter->rx_ring);
1749err_setup_rx:
1750 igbvf_free_tx_resources(adapter->tx_ring);
1751err_setup_tx:
1752 igbvf_reset(adapter);
1753
1754 return err;
1755}
1756
1757
1758
1759
1760
1761
1762
1763
1764
1765
1766
1767
1768static int igbvf_close(struct net_device *netdev)
1769{
1770 struct igbvf_adapter *adapter = netdev_priv(netdev);
1771
1772 WARN_ON(test_bit(__IGBVF_RESETTING, &adapter->state));
1773 igbvf_down(adapter);
1774
1775 igbvf_free_irq(adapter);
1776
1777 igbvf_free_tx_resources(adapter->tx_ring);
1778 igbvf_free_rx_resources(adapter->rx_ring);
1779
1780 return 0;
1781}
1782
1783
1784
1785
1786
1787
1788
1789
1790static int igbvf_set_mac(struct net_device *netdev, void *p)
1791{
1792 struct igbvf_adapter *adapter = netdev_priv(netdev);
1793 struct e1000_hw *hw = &adapter->hw;
1794 struct sockaddr *addr = p;
1795
1796 if (!is_valid_ether_addr(addr->sa_data))
1797 return -EADDRNOTAVAIL;
1798
1799 memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
1800
1801 spin_lock_bh(&hw->mbx_lock);
1802
1803 hw->mac.ops.rar_set(hw, hw->mac.addr, 0);
1804
1805 spin_unlock_bh(&hw->mbx_lock);
1806
1807 if (!ether_addr_equal(addr->sa_data, hw->mac.addr))
1808 return -EADDRNOTAVAIL;
1809
1810 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
1811
1812 return 0;
1813}
1814
1815#define UPDATE_VF_COUNTER(reg, name) \
1816{ \
1817 u32 current_counter = er32(reg); \
1818 if (current_counter < adapter->stats.last_##name) \
1819 adapter->stats.name += 0x100000000LL; \
1820 adapter->stats.last_##name = current_counter; \
1821 adapter->stats.name &= 0xFFFFFFFF00000000LL; \
1822 adapter->stats.name |= current_counter; \
1823}
1824
1825
1826
1827
1828
1829void igbvf_update_stats(struct igbvf_adapter *adapter)
1830{
1831 struct e1000_hw *hw = &adapter->hw;
1832 struct pci_dev *pdev = adapter->pdev;
1833
1834
1835
1836
1837 if (adapter->link_speed == 0)
1838 return;
1839
1840 if (test_bit(__IGBVF_RESETTING, &adapter->state))
1841 return;
1842
1843 if (pci_channel_offline(pdev))
1844 return;
1845
1846 UPDATE_VF_COUNTER(VFGPRC, gprc);
1847 UPDATE_VF_COUNTER(VFGORC, gorc);
1848 UPDATE_VF_COUNTER(VFGPTC, gptc);
1849 UPDATE_VF_COUNTER(VFGOTC, gotc);
1850 UPDATE_VF_COUNTER(VFMPRC, mprc);
1851 UPDATE_VF_COUNTER(VFGOTLBC, gotlbc);
1852 UPDATE_VF_COUNTER(VFGPTLBC, gptlbc);
1853 UPDATE_VF_COUNTER(VFGORLBC, gorlbc);
1854 UPDATE_VF_COUNTER(VFGPRLBC, gprlbc);
1855
1856
1857 adapter->netdev->stats.multicast = adapter->stats.mprc;
1858}
1859
1860static void igbvf_print_link_info(struct igbvf_adapter *adapter)
1861{
1862 dev_info(&adapter->pdev->dev, "Link is Up %d Mbps %s Duplex\n",
1863 adapter->link_speed,
1864 adapter->link_duplex == FULL_DUPLEX ? "Full" : "Half");
1865}
1866
1867static bool igbvf_has_link(struct igbvf_adapter *adapter)
1868{
1869 struct e1000_hw *hw = &adapter->hw;
1870 s32 ret_val = E1000_SUCCESS;
1871 bool link_active;
1872
1873
1874 if (test_bit(__IGBVF_DOWN, &adapter->state))
1875 return false;
1876
1877 spin_lock_bh(&hw->mbx_lock);
1878
1879 ret_val = hw->mac.ops.check_for_link(hw);
1880
1881 spin_unlock_bh(&hw->mbx_lock);
1882
1883 link_active = !hw->mac.get_link_status;
1884
1885
1886 if (ret_val && time_after(jiffies, adapter->last_reset + (10 * HZ)))
1887 schedule_work(&adapter->reset_task);
1888
1889 return link_active;
1890}
1891
1892
1893
1894
1895
1896static void igbvf_watchdog(struct timer_list *t)
1897{
1898 struct igbvf_adapter *adapter = from_timer(adapter, t, watchdog_timer);
1899
1900
1901 schedule_work(&adapter->watchdog_task);
1902}
1903
1904static void igbvf_watchdog_task(struct work_struct *work)
1905{
1906 struct igbvf_adapter *adapter = container_of(work,
1907 struct igbvf_adapter,
1908 watchdog_task);
1909 struct net_device *netdev = adapter->netdev;
1910 struct e1000_mac_info *mac = &adapter->hw.mac;
1911 struct igbvf_ring *tx_ring = adapter->tx_ring;
1912 struct e1000_hw *hw = &adapter->hw;
1913 u32 link;
1914 int tx_pending = 0;
1915
1916 link = igbvf_has_link(adapter);
1917
1918 if (link) {
1919 if (!netif_carrier_ok(netdev)) {
1920 mac->ops.get_link_up_info(&adapter->hw,
1921 &adapter->link_speed,
1922 &adapter->link_duplex);
1923 igbvf_print_link_info(adapter);
1924
1925 netif_carrier_on(netdev);
1926 netif_wake_queue(netdev);
1927 }
1928 } else {
1929 if (netif_carrier_ok(netdev)) {
1930 adapter->link_speed = 0;
1931 adapter->link_duplex = 0;
1932 dev_info(&adapter->pdev->dev, "Link is Down\n");
1933 netif_carrier_off(netdev);
1934 netif_stop_queue(netdev);
1935 }
1936 }
1937
1938 if (netif_carrier_ok(netdev)) {
1939 igbvf_update_stats(adapter);
1940 } else {
1941 tx_pending = (igbvf_desc_unused(tx_ring) + 1 <
1942 tx_ring->count);
1943 if (tx_pending) {
1944
1945
1946
1947
1948
1949 adapter->tx_timeout_count++;
1950 schedule_work(&adapter->reset_task);
1951 }
1952 }
1953
1954
1955 ew32(EICS, adapter->rx_ring->eims_value);
1956
1957
1958 if (!test_bit(__IGBVF_DOWN, &adapter->state))
1959 mod_timer(&adapter->watchdog_timer,
1960 round_jiffies(jiffies + (2 * HZ)));
1961}
1962
1963#define IGBVF_TX_FLAGS_CSUM 0x00000001
1964#define IGBVF_TX_FLAGS_VLAN 0x00000002
1965#define IGBVF_TX_FLAGS_TSO 0x00000004
1966#define IGBVF_TX_FLAGS_IPV4 0x00000008
1967#define IGBVF_TX_FLAGS_VLAN_MASK 0xffff0000
1968#define IGBVF_TX_FLAGS_VLAN_SHIFT 16
1969
1970static void igbvf_tx_ctxtdesc(struct igbvf_ring *tx_ring, u32 vlan_macip_lens,
1971 u32 type_tucmd, u32 mss_l4len_idx)
1972{
1973 struct e1000_adv_tx_context_desc *context_desc;
1974 struct igbvf_buffer *buffer_info;
1975 u16 i = tx_ring->next_to_use;
1976
1977 context_desc = IGBVF_TX_CTXTDESC_ADV(*tx_ring, i);
1978 buffer_info = &tx_ring->buffer_info[i];
1979
1980 i++;
1981 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
1982
1983
1984 type_tucmd |= E1000_TXD_CMD_DEXT | E1000_ADVTXD_DTYP_CTXT;
1985
1986 context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
1987 context_desc->seqnum_seed = 0;
1988 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd);
1989 context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
1990
1991 buffer_info->time_stamp = jiffies;
1992 buffer_info->dma = 0;
1993}
1994
1995static int igbvf_tso(struct igbvf_ring *tx_ring,
1996 struct sk_buff *skb, u32 tx_flags, u8 *hdr_len)
1997{
1998 u32 vlan_macip_lens, type_tucmd, mss_l4len_idx;
1999 union {
2000 struct iphdr *v4;
2001 struct ipv6hdr *v6;
2002 unsigned char *hdr;
2003 } ip;
2004 union {
2005 struct tcphdr *tcp;
2006 unsigned char *hdr;
2007 } l4;
2008 u32 paylen, l4_offset;
2009 int err;
2010
2011 if (skb->ip_summed != CHECKSUM_PARTIAL)
2012 return 0;
2013
2014 if (!skb_is_gso(skb))
2015 return 0;
2016
2017 err = skb_cow_head(skb, 0);
2018 if (err < 0)
2019 return err;
2020
2021 ip.hdr = skb_network_header(skb);
2022 l4.hdr = skb_checksum_start(skb);
2023
2024
2025 type_tucmd = E1000_ADVTXD_TUCMD_L4T_TCP;
2026
2027
2028 if (ip.v4->version == 4) {
2029 unsigned char *csum_start = skb_checksum_start(skb);
2030 unsigned char *trans_start = ip.hdr + (ip.v4->ihl * 4);
2031
2032
2033
2034
2035 ip.v4->check = csum_fold(csum_partial(trans_start,
2036 csum_start - trans_start,
2037 0));
2038 type_tucmd |= E1000_ADVTXD_TUCMD_IPV4;
2039
2040 ip.v4->tot_len = 0;
2041 } else {
2042 ip.v6->payload_len = 0;
2043 }
2044
2045
2046 l4_offset = l4.hdr - skb->data;
2047
2048
2049 *hdr_len = (l4.tcp->doff * 4) + l4_offset;
2050
2051
2052 paylen = skb->len - l4_offset;
2053 csum_replace_by_diff(&l4.tcp->check, htonl(paylen));
2054
2055
2056 mss_l4len_idx = (*hdr_len - l4_offset) << E1000_ADVTXD_L4LEN_SHIFT;
2057 mss_l4len_idx |= skb_shinfo(skb)->gso_size << E1000_ADVTXD_MSS_SHIFT;
2058
2059
2060 vlan_macip_lens = l4.hdr - ip.hdr;
2061 vlan_macip_lens |= (ip.hdr - skb->data) << E1000_ADVTXD_MACLEN_SHIFT;
2062 vlan_macip_lens |= tx_flags & IGBVF_TX_FLAGS_VLAN_MASK;
2063
2064 igbvf_tx_ctxtdesc(tx_ring, vlan_macip_lens, type_tucmd, mss_l4len_idx);
2065
2066 return 1;
2067}
2068
2069static inline bool igbvf_ipv6_csum_is_sctp(struct sk_buff *skb)
2070{
2071 unsigned int offset = 0;
2072
2073 ipv6_find_hdr(skb, &offset, IPPROTO_SCTP, NULL, NULL);
2074
2075 return offset == skb_checksum_start_offset(skb);
2076}
2077
2078static bool igbvf_tx_csum(struct igbvf_ring *tx_ring, struct sk_buff *skb,
2079 u32 tx_flags, __be16 protocol)
2080{
2081 u32 vlan_macip_lens = 0;
2082 u32 type_tucmd = 0;
2083
2084 if (skb->ip_summed != CHECKSUM_PARTIAL) {
2085csum_failed:
2086 if (!(tx_flags & IGBVF_TX_FLAGS_VLAN))
2087 return false;
2088 goto no_csum;
2089 }
2090
2091 switch (skb->csum_offset) {
2092 case offsetof(struct tcphdr, check):
2093 type_tucmd = E1000_ADVTXD_TUCMD_L4T_TCP;
2094 fallthrough;
2095 case offsetof(struct udphdr, check):
2096 break;
2097 case offsetof(struct sctphdr, checksum):
2098
2099 if (((protocol == htons(ETH_P_IP)) &&
2100 (ip_hdr(skb)->protocol == IPPROTO_SCTP)) ||
2101 ((protocol == htons(ETH_P_IPV6)) &&
2102 igbvf_ipv6_csum_is_sctp(skb))) {
2103 type_tucmd = E1000_ADVTXD_TUCMD_L4T_SCTP;
2104 break;
2105 }
2106 fallthrough;
2107 default:
2108 skb_checksum_help(skb);
2109 goto csum_failed;
2110 }
2111
2112 vlan_macip_lens = skb_checksum_start_offset(skb) -
2113 skb_network_offset(skb);
2114no_csum:
2115 vlan_macip_lens |= skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT;
2116 vlan_macip_lens |= tx_flags & IGBVF_TX_FLAGS_VLAN_MASK;
2117
2118 igbvf_tx_ctxtdesc(tx_ring, vlan_macip_lens, type_tucmd, 0);
2119 return true;
2120}
2121
2122static int igbvf_maybe_stop_tx(struct net_device *netdev, int size)
2123{
2124 struct igbvf_adapter *adapter = netdev_priv(netdev);
2125
2126
2127 if (igbvf_desc_unused(adapter->tx_ring) >= size)
2128 return 0;
2129
2130 netif_stop_queue(netdev);
2131
2132
2133
2134
2135
2136 smp_mb();
2137
2138
2139 if (igbvf_desc_unused(adapter->tx_ring) < size)
2140 return -EBUSY;
2141
2142 netif_wake_queue(netdev);
2143
2144 ++adapter->restart_queue;
2145 return 0;
2146}
2147
2148#define IGBVF_MAX_TXD_PWR 16
2149#define IGBVF_MAX_DATA_PER_TXD (1u << IGBVF_MAX_TXD_PWR)
2150
2151static inline int igbvf_tx_map_adv(struct igbvf_adapter *adapter,
2152 struct igbvf_ring *tx_ring,
2153 struct sk_buff *skb)
2154{
2155 struct igbvf_buffer *buffer_info;
2156 struct pci_dev *pdev = adapter->pdev;
2157 unsigned int len = skb_headlen(skb);
2158 unsigned int count = 0, i;
2159 unsigned int f;
2160
2161 i = tx_ring->next_to_use;
2162
2163 buffer_info = &tx_ring->buffer_info[i];
2164 BUG_ON(len >= IGBVF_MAX_DATA_PER_TXD);
2165 buffer_info->length = len;
2166
2167 buffer_info->time_stamp = jiffies;
2168 buffer_info->mapped_as_page = false;
2169 buffer_info->dma = dma_map_single(&pdev->dev, skb->data, len,
2170 DMA_TO_DEVICE);
2171 if (dma_mapping_error(&pdev->dev, buffer_info->dma))
2172 goto dma_error;
2173
2174 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) {
2175 const skb_frag_t *frag;
2176
2177 count++;
2178 i++;
2179 if (i == tx_ring->count)
2180 i = 0;
2181
2182 frag = &skb_shinfo(skb)->frags[f];
2183 len = skb_frag_size(frag);
2184
2185 buffer_info = &tx_ring->buffer_info[i];
2186 BUG_ON(len >= IGBVF_MAX_DATA_PER_TXD);
2187 buffer_info->length = len;
2188 buffer_info->time_stamp = jiffies;
2189 buffer_info->mapped_as_page = true;
2190 buffer_info->dma = skb_frag_dma_map(&pdev->dev, frag, 0, len,
2191 DMA_TO_DEVICE);
2192 if (dma_mapping_error(&pdev->dev, buffer_info->dma))
2193 goto dma_error;
2194 }
2195
2196 tx_ring->buffer_info[i].skb = skb;
2197
2198 return ++count;
2199
2200dma_error:
2201 dev_err(&pdev->dev, "TX DMA map failed\n");
2202
2203
2204 buffer_info->dma = 0;
2205 buffer_info->time_stamp = 0;
2206 buffer_info->length = 0;
2207 buffer_info->mapped_as_page = false;
2208 if (count)
2209 count--;
2210
2211
2212 while (count--) {
2213 if (i == 0)
2214 i += tx_ring->count;
2215 i--;
2216 buffer_info = &tx_ring->buffer_info[i];
2217 igbvf_put_txbuf(adapter, buffer_info);
2218 }
2219
2220 return 0;
2221}
2222
2223static inline void igbvf_tx_queue_adv(struct igbvf_adapter *adapter,
2224 struct igbvf_ring *tx_ring,
2225 int tx_flags, int count,
2226 unsigned int first, u32 paylen,
2227 u8 hdr_len)
2228{
2229 union e1000_adv_tx_desc *tx_desc = NULL;
2230 struct igbvf_buffer *buffer_info;
2231 u32 olinfo_status = 0, cmd_type_len;
2232 unsigned int i;
2233
2234 cmd_type_len = (E1000_ADVTXD_DTYP_DATA | E1000_ADVTXD_DCMD_IFCS |
2235 E1000_ADVTXD_DCMD_DEXT);
2236
2237 if (tx_flags & IGBVF_TX_FLAGS_VLAN)
2238 cmd_type_len |= E1000_ADVTXD_DCMD_VLE;
2239
2240 if (tx_flags & IGBVF_TX_FLAGS_TSO) {
2241 cmd_type_len |= E1000_ADVTXD_DCMD_TSE;
2242
2243
2244 olinfo_status |= E1000_TXD_POPTS_TXSM << 8;
2245
2246
2247 if (tx_flags & IGBVF_TX_FLAGS_IPV4)
2248 olinfo_status |= E1000_TXD_POPTS_IXSM << 8;
2249
2250 } else if (tx_flags & IGBVF_TX_FLAGS_CSUM) {
2251 olinfo_status |= E1000_TXD_POPTS_TXSM << 8;
2252 }
2253
2254 olinfo_status |= ((paylen - hdr_len) << E1000_ADVTXD_PAYLEN_SHIFT);
2255
2256 i = tx_ring->next_to_use;
2257 while (count--) {
2258 buffer_info = &tx_ring->buffer_info[i];
2259 tx_desc = IGBVF_TX_DESC_ADV(*tx_ring, i);
2260 tx_desc->read.buffer_addr = cpu_to_le64(buffer_info->dma);
2261 tx_desc->read.cmd_type_len =
2262 cpu_to_le32(cmd_type_len | buffer_info->length);
2263 tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
2264 i++;
2265 if (i == tx_ring->count)
2266 i = 0;
2267 }
2268
2269 tx_desc->read.cmd_type_len |= cpu_to_le32(adapter->txd_cmd);
2270
2271
2272
2273
2274
2275 wmb();
2276
2277 tx_ring->buffer_info[first].next_to_watch = tx_desc;
2278 tx_ring->next_to_use = i;
2279 writel(i, adapter->hw.hw_addr + tx_ring->tail);
2280}
2281
2282static netdev_tx_t igbvf_xmit_frame_ring_adv(struct sk_buff *skb,
2283 struct net_device *netdev,
2284 struct igbvf_ring *tx_ring)
2285{
2286 struct igbvf_adapter *adapter = netdev_priv(netdev);
2287 unsigned int first, tx_flags = 0;
2288 u8 hdr_len = 0;
2289 int count = 0;
2290 int tso = 0;
2291 __be16 protocol = vlan_get_protocol(skb);
2292
2293 if (test_bit(__IGBVF_DOWN, &adapter->state)) {
2294 dev_kfree_skb_any(skb);
2295 return NETDEV_TX_OK;
2296 }
2297
2298 if (skb->len <= 0) {
2299 dev_kfree_skb_any(skb);
2300 return NETDEV_TX_OK;
2301 }
2302
2303
2304
2305
2306
2307
2308
2309 if (igbvf_maybe_stop_tx(netdev, skb_shinfo(skb)->nr_frags + 4)) {
2310
2311 return NETDEV_TX_BUSY;
2312 }
2313
2314 if (skb_vlan_tag_present(skb)) {
2315 tx_flags |= IGBVF_TX_FLAGS_VLAN;
2316 tx_flags |= (skb_vlan_tag_get(skb) <<
2317 IGBVF_TX_FLAGS_VLAN_SHIFT);
2318 }
2319
2320 if (protocol == htons(ETH_P_IP))
2321 tx_flags |= IGBVF_TX_FLAGS_IPV4;
2322
2323 first = tx_ring->next_to_use;
2324
2325 tso = igbvf_tso(tx_ring, skb, tx_flags, &hdr_len);
2326 if (unlikely(tso < 0)) {
2327 dev_kfree_skb_any(skb);
2328 return NETDEV_TX_OK;
2329 }
2330
2331 if (tso)
2332 tx_flags |= IGBVF_TX_FLAGS_TSO;
2333 else if (igbvf_tx_csum(tx_ring, skb, tx_flags, protocol) &&
2334 (skb->ip_summed == CHECKSUM_PARTIAL))
2335 tx_flags |= IGBVF_TX_FLAGS_CSUM;
2336
2337
2338
2339
2340 count = igbvf_tx_map_adv(adapter, tx_ring, skb);
2341
2342 if (count) {
2343 igbvf_tx_queue_adv(adapter, tx_ring, tx_flags, count,
2344 first, skb->len, hdr_len);
2345
2346 igbvf_maybe_stop_tx(netdev, MAX_SKB_FRAGS + 4);
2347 } else {
2348 dev_kfree_skb_any(skb);
2349 tx_ring->buffer_info[first].time_stamp = 0;
2350 tx_ring->next_to_use = first;
2351 }
2352
2353 return NETDEV_TX_OK;
2354}
2355
2356static netdev_tx_t igbvf_xmit_frame(struct sk_buff *skb,
2357 struct net_device *netdev)
2358{
2359 struct igbvf_adapter *adapter = netdev_priv(netdev);
2360 struct igbvf_ring *tx_ring;
2361
2362 if (test_bit(__IGBVF_DOWN, &adapter->state)) {
2363 dev_kfree_skb_any(skb);
2364 return NETDEV_TX_OK;
2365 }
2366
2367 tx_ring = &adapter->tx_ring[0];
2368
2369 return igbvf_xmit_frame_ring_adv(skb, netdev, tx_ring);
2370}
2371
2372
2373
2374
2375
2376static void igbvf_tx_timeout(struct net_device *netdev, unsigned int txqueue)
2377{
2378 struct igbvf_adapter *adapter = netdev_priv(netdev);
2379
2380
2381 adapter->tx_timeout_count++;
2382 schedule_work(&adapter->reset_task);
2383}
2384
2385static void igbvf_reset_task(struct work_struct *work)
2386{
2387 struct igbvf_adapter *adapter;
2388
2389 adapter = container_of(work, struct igbvf_adapter, reset_task);
2390
2391 igbvf_reinit_locked(adapter);
2392}
2393
2394
2395
2396
2397
2398
2399
2400
2401static int igbvf_change_mtu(struct net_device *netdev, int new_mtu)
2402{
2403 struct igbvf_adapter *adapter = netdev_priv(netdev);
2404 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
2405
2406 while (test_and_set_bit(__IGBVF_RESETTING, &adapter->state))
2407 usleep_range(1000, 2000);
2408
2409 adapter->max_frame_size = max_frame;
2410 if (netif_running(netdev))
2411 igbvf_down(adapter);
2412
2413
2414
2415
2416
2417
2418
2419
2420
2421 if (max_frame <= 1024)
2422 adapter->rx_buffer_len = 1024;
2423 else if (max_frame <= 2048)
2424 adapter->rx_buffer_len = 2048;
2425 else
2426#if (PAGE_SIZE / 2) > 16384
2427 adapter->rx_buffer_len = 16384;
2428#else
2429 adapter->rx_buffer_len = PAGE_SIZE / 2;
2430#endif
2431
2432
2433 if ((max_frame == ETH_FRAME_LEN + ETH_FCS_LEN) ||
2434 (max_frame == ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN))
2435 adapter->rx_buffer_len = ETH_FRAME_LEN + VLAN_HLEN +
2436 ETH_FCS_LEN;
2437
2438 netdev_dbg(netdev, "changing MTU from %d to %d\n",
2439 netdev->mtu, new_mtu);
2440 netdev->mtu = new_mtu;
2441
2442 if (netif_running(netdev))
2443 igbvf_up(adapter);
2444 else
2445 igbvf_reset(adapter);
2446
2447 clear_bit(__IGBVF_RESETTING, &adapter->state);
2448
2449 return 0;
2450}
2451
2452static int igbvf_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
2453{
2454 switch (cmd) {
2455 default:
2456 return -EOPNOTSUPP;
2457 }
2458}
2459
2460static int igbvf_suspend(struct device *dev_d)
2461{
2462 struct net_device *netdev = dev_get_drvdata(dev_d);
2463 struct igbvf_adapter *adapter = netdev_priv(netdev);
2464
2465 netif_device_detach(netdev);
2466
2467 if (netif_running(netdev)) {
2468 WARN_ON(test_bit(__IGBVF_RESETTING, &adapter->state));
2469 igbvf_down(adapter);
2470 igbvf_free_irq(adapter);
2471 }
2472
2473 return 0;
2474}
2475
2476static int __maybe_unused igbvf_resume(struct device *dev_d)
2477{
2478 struct pci_dev *pdev = to_pci_dev(dev_d);
2479 struct net_device *netdev = pci_get_drvdata(pdev);
2480 struct igbvf_adapter *adapter = netdev_priv(netdev);
2481 u32 err;
2482
2483 pci_set_master(pdev);
2484
2485 if (netif_running(netdev)) {
2486 err = igbvf_request_irq(adapter);
2487 if (err)
2488 return err;
2489 }
2490
2491 igbvf_reset(adapter);
2492
2493 if (netif_running(netdev))
2494 igbvf_up(adapter);
2495
2496 netif_device_attach(netdev);
2497
2498 return 0;
2499}
2500
2501static void igbvf_shutdown(struct pci_dev *pdev)
2502{
2503 igbvf_suspend(&pdev->dev);
2504}
2505
2506#ifdef CONFIG_NET_POLL_CONTROLLER
2507
2508
2509
2510
2511static void igbvf_netpoll(struct net_device *netdev)
2512{
2513 struct igbvf_adapter *adapter = netdev_priv(netdev);
2514
2515 disable_irq(adapter->pdev->irq);
2516
2517 igbvf_clean_tx_irq(adapter->tx_ring);
2518
2519 enable_irq(adapter->pdev->irq);
2520}
2521#endif
2522
2523
2524
2525
2526
2527
2528
2529
2530
2531static pci_ers_result_t igbvf_io_error_detected(struct pci_dev *pdev,
2532 pci_channel_state_t state)
2533{
2534 struct net_device *netdev = pci_get_drvdata(pdev);
2535 struct igbvf_adapter *adapter = netdev_priv(netdev);
2536
2537 netif_device_detach(netdev);
2538
2539 if (state == pci_channel_io_perm_failure)
2540 return PCI_ERS_RESULT_DISCONNECT;
2541
2542 if (netif_running(netdev))
2543 igbvf_down(adapter);
2544 pci_disable_device(pdev);
2545
2546
2547 return PCI_ERS_RESULT_NEED_RESET;
2548}
2549
2550
2551
2552
2553
2554
2555
2556
2557static pci_ers_result_t igbvf_io_slot_reset(struct pci_dev *pdev)
2558{
2559 struct net_device *netdev = pci_get_drvdata(pdev);
2560 struct igbvf_adapter *adapter = netdev_priv(netdev);
2561
2562 if (pci_enable_device_mem(pdev)) {
2563 dev_err(&pdev->dev,
2564 "Cannot re-enable PCI device after reset.\n");
2565 return PCI_ERS_RESULT_DISCONNECT;
2566 }
2567 pci_set_master(pdev);
2568
2569 igbvf_reset(adapter);
2570
2571 return PCI_ERS_RESULT_RECOVERED;
2572}
2573
2574
2575
2576
2577
2578
2579
2580
2581
2582static void igbvf_io_resume(struct pci_dev *pdev)
2583{
2584 struct net_device *netdev = pci_get_drvdata(pdev);
2585 struct igbvf_adapter *adapter = netdev_priv(netdev);
2586
2587 if (netif_running(netdev)) {
2588 if (igbvf_up(adapter)) {
2589 dev_err(&pdev->dev,
2590 "can't bring device back up after reset\n");
2591 return;
2592 }
2593 }
2594
2595 netif_device_attach(netdev);
2596}
2597
2598static void igbvf_print_device_info(struct igbvf_adapter *adapter)
2599{
2600 struct e1000_hw *hw = &adapter->hw;
2601 struct net_device *netdev = adapter->netdev;
2602 struct pci_dev *pdev = adapter->pdev;
2603
2604 if (hw->mac.type == e1000_vfadapt_i350)
2605 dev_info(&pdev->dev, "Intel(R) I350 Virtual Function\n");
2606 else
2607 dev_info(&pdev->dev, "Intel(R) 82576 Virtual Function\n");
2608 dev_info(&pdev->dev, "Address: %pM\n", netdev->dev_addr);
2609}
2610
2611static int igbvf_set_features(struct net_device *netdev,
2612 netdev_features_t features)
2613{
2614 struct igbvf_adapter *adapter = netdev_priv(netdev);
2615
2616 if (features & NETIF_F_RXCSUM)
2617 adapter->flags &= ~IGBVF_FLAG_RX_CSUM_DISABLED;
2618 else
2619 adapter->flags |= IGBVF_FLAG_RX_CSUM_DISABLED;
2620
2621 return 0;
2622}
2623
2624#define IGBVF_MAX_MAC_HDR_LEN 127
2625#define IGBVF_MAX_NETWORK_HDR_LEN 511
2626
2627static netdev_features_t
2628igbvf_features_check(struct sk_buff *skb, struct net_device *dev,
2629 netdev_features_t features)
2630{
2631 unsigned int network_hdr_len, mac_hdr_len;
2632
2633
2634 mac_hdr_len = skb_network_header(skb) - skb->data;
2635 if (unlikely(mac_hdr_len > IGBVF_MAX_MAC_HDR_LEN))
2636 return features & ~(NETIF_F_HW_CSUM |
2637 NETIF_F_SCTP_CRC |
2638 NETIF_F_HW_VLAN_CTAG_TX |
2639 NETIF_F_TSO |
2640 NETIF_F_TSO6);
2641
2642 network_hdr_len = skb_checksum_start(skb) - skb_network_header(skb);
2643 if (unlikely(network_hdr_len > IGBVF_MAX_NETWORK_HDR_LEN))
2644 return features & ~(NETIF_F_HW_CSUM |
2645 NETIF_F_SCTP_CRC |
2646 NETIF_F_TSO |
2647 NETIF_F_TSO6);
2648
2649
2650
2651
2652 if (skb->encapsulation && !(features & NETIF_F_TSO_MANGLEID))
2653 features &= ~NETIF_F_TSO;
2654
2655 return features;
2656}
2657
2658static const struct net_device_ops igbvf_netdev_ops = {
2659 .ndo_open = igbvf_open,
2660 .ndo_stop = igbvf_close,
2661 .ndo_start_xmit = igbvf_xmit_frame,
2662 .ndo_set_rx_mode = igbvf_set_rx_mode,
2663 .ndo_set_mac_address = igbvf_set_mac,
2664 .ndo_change_mtu = igbvf_change_mtu,
2665 .ndo_do_ioctl = igbvf_ioctl,
2666 .ndo_tx_timeout = igbvf_tx_timeout,
2667 .ndo_vlan_rx_add_vid = igbvf_vlan_rx_add_vid,
2668 .ndo_vlan_rx_kill_vid = igbvf_vlan_rx_kill_vid,
2669#ifdef CONFIG_NET_POLL_CONTROLLER
2670 .ndo_poll_controller = igbvf_netpoll,
2671#endif
2672 .ndo_set_features = igbvf_set_features,
2673 .ndo_features_check = igbvf_features_check,
2674};
2675
2676
2677
2678
2679
2680
2681
2682
2683
2684
2685
2686
2687static int igbvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2688{
2689 struct net_device *netdev;
2690 struct igbvf_adapter *adapter;
2691 struct e1000_hw *hw;
2692 const struct igbvf_info *ei = igbvf_info_tbl[ent->driver_data];
2693
2694 static int cards_found;
2695 int err, pci_using_dac;
2696
2697 err = pci_enable_device_mem(pdev);
2698 if (err)
2699 return err;
2700
2701 pci_using_dac = 0;
2702 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
2703 if (!err) {
2704 pci_using_dac = 1;
2705 } else {
2706 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
2707 if (err) {
2708 dev_err(&pdev->dev,
2709 "No usable DMA configuration, aborting\n");
2710 goto err_dma;
2711 }
2712 }
2713
2714 err = pci_request_regions(pdev, igbvf_driver_name);
2715 if (err)
2716 goto err_pci_reg;
2717
2718 pci_set_master(pdev);
2719
2720 err = -ENOMEM;
2721 netdev = alloc_etherdev(sizeof(struct igbvf_adapter));
2722 if (!netdev)
2723 goto err_alloc_etherdev;
2724
2725 SET_NETDEV_DEV(netdev, &pdev->dev);
2726
2727 pci_set_drvdata(pdev, netdev);
2728 adapter = netdev_priv(netdev);
2729 hw = &adapter->hw;
2730 adapter->netdev = netdev;
2731 adapter->pdev = pdev;
2732 adapter->ei = ei;
2733 adapter->pba = ei->pba;
2734 adapter->flags = ei->flags;
2735 adapter->hw.back = adapter;
2736 adapter->hw.mac.type = ei->mac;
2737 adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
2738
2739
2740
2741 hw->vendor_id = pdev->vendor;
2742 hw->device_id = pdev->device;
2743 hw->subsystem_vendor_id = pdev->subsystem_vendor;
2744 hw->subsystem_device_id = pdev->subsystem_device;
2745 hw->revision_id = pdev->revision;
2746
2747 err = -EIO;
2748 adapter->hw.hw_addr = ioremap(pci_resource_start(pdev, 0),
2749 pci_resource_len(pdev, 0));
2750
2751 if (!adapter->hw.hw_addr)
2752 goto err_ioremap;
2753
2754 if (ei->get_variants) {
2755 err = ei->get_variants(adapter);
2756 if (err)
2757 goto err_get_variants;
2758 }
2759
2760
2761 err = igbvf_sw_init(adapter);
2762 if (err)
2763 goto err_sw_init;
2764
2765
2766 netdev->netdev_ops = &igbvf_netdev_ops;
2767
2768 igbvf_set_ethtool_ops(netdev);
2769 netdev->watchdog_timeo = 5 * HZ;
2770 strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
2771
2772 adapter->bd_number = cards_found++;
2773
2774 netdev->hw_features = NETIF_F_SG |
2775 NETIF_F_TSO |
2776 NETIF_F_TSO6 |
2777 NETIF_F_RXCSUM |
2778 NETIF_F_HW_CSUM |
2779 NETIF_F_SCTP_CRC;
2780
2781#define IGBVF_GSO_PARTIAL_FEATURES (NETIF_F_GSO_GRE | \
2782 NETIF_F_GSO_GRE_CSUM | \
2783 NETIF_F_GSO_IPXIP4 | \
2784 NETIF_F_GSO_IPXIP6 | \
2785 NETIF_F_GSO_UDP_TUNNEL | \
2786 NETIF_F_GSO_UDP_TUNNEL_CSUM)
2787
2788 netdev->gso_partial_features = IGBVF_GSO_PARTIAL_FEATURES;
2789 netdev->hw_features |= NETIF_F_GSO_PARTIAL |
2790 IGBVF_GSO_PARTIAL_FEATURES;
2791
2792 netdev->features = netdev->hw_features;
2793
2794 if (pci_using_dac)
2795 netdev->features |= NETIF_F_HIGHDMA;
2796
2797 netdev->vlan_features |= netdev->features | NETIF_F_TSO_MANGLEID;
2798 netdev->mpls_features |= NETIF_F_HW_CSUM;
2799 netdev->hw_enc_features |= netdev->vlan_features;
2800
2801
2802 netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER |
2803 NETIF_F_HW_VLAN_CTAG_RX |
2804 NETIF_F_HW_VLAN_CTAG_TX;
2805
2806
2807 netdev->min_mtu = ETH_MIN_MTU;
2808 netdev->max_mtu = MAX_STD_JUMBO_FRAME_SIZE;
2809
2810 spin_lock_bh(&hw->mbx_lock);
2811
2812
2813 err = hw->mac.ops.reset_hw(hw);
2814 if (err) {
2815 dev_info(&pdev->dev,
2816 "PF still in reset state. Is the PF interface up?\n");
2817 } else {
2818 err = hw->mac.ops.read_mac_addr(hw);
2819 if (err)
2820 dev_info(&pdev->dev, "Error reading MAC address.\n");
2821 else if (is_zero_ether_addr(adapter->hw.mac.addr))
2822 dev_info(&pdev->dev,
2823 "MAC address not assigned by administrator.\n");
2824 memcpy(netdev->dev_addr, adapter->hw.mac.addr,
2825 netdev->addr_len);
2826 }
2827
2828 spin_unlock_bh(&hw->mbx_lock);
2829
2830 if (!is_valid_ether_addr(netdev->dev_addr)) {
2831 dev_info(&pdev->dev, "Assigning random MAC address.\n");
2832 eth_hw_addr_random(netdev);
2833 memcpy(adapter->hw.mac.addr, netdev->dev_addr,
2834 netdev->addr_len);
2835 }
2836
2837 timer_setup(&adapter->watchdog_timer, igbvf_watchdog, 0);
2838
2839 INIT_WORK(&adapter->reset_task, igbvf_reset_task);
2840 INIT_WORK(&adapter->watchdog_task, igbvf_watchdog_task);
2841
2842
2843 adapter->rx_ring->count = 1024;
2844 adapter->tx_ring->count = 1024;
2845
2846
2847 igbvf_reset(adapter);
2848
2849
2850 if (adapter->hw.mac.type == e1000_vfadapt_i350)
2851 adapter->flags |= IGBVF_FLAG_RX_LB_VLAN_BSWAP;
2852
2853 strcpy(netdev->name, "eth%d");
2854 err = register_netdev(netdev);
2855 if (err)
2856 goto err_hw_init;
2857
2858
2859 netif_carrier_off(netdev);
2860 netif_stop_queue(netdev);
2861
2862 igbvf_print_device_info(adapter);
2863
2864 igbvf_initialize_last_counter_stats(adapter);
2865
2866 return 0;
2867
2868err_hw_init:
2869 kfree(adapter->tx_ring);
2870 kfree(adapter->rx_ring);
2871err_sw_init:
2872 igbvf_reset_interrupt_capability(adapter);
2873err_get_variants:
2874 iounmap(adapter->hw.hw_addr);
2875err_ioremap:
2876 free_netdev(netdev);
2877err_alloc_etherdev:
2878 pci_release_regions(pdev);
2879err_pci_reg:
2880err_dma:
2881 pci_disable_device(pdev);
2882 return err;
2883}
2884
2885
2886
2887
2888
2889
2890
2891
2892
2893
2894static void igbvf_remove(struct pci_dev *pdev)
2895{
2896 struct net_device *netdev = pci_get_drvdata(pdev);
2897 struct igbvf_adapter *adapter = netdev_priv(netdev);
2898 struct e1000_hw *hw = &adapter->hw;
2899
2900
2901
2902
2903 set_bit(__IGBVF_DOWN, &adapter->state);
2904 del_timer_sync(&adapter->watchdog_timer);
2905
2906 cancel_work_sync(&adapter->reset_task);
2907 cancel_work_sync(&adapter->watchdog_task);
2908
2909 unregister_netdev(netdev);
2910
2911 igbvf_reset_interrupt_capability(adapter);
2912
2913
2914
2915
2916 netif_napi_del(&adapter->rx_ring->napi);
2917 kfree(adapter->tx_ring);
2918 kfree(adapter->rx_ring);
2919
2920 iounmap(hw->hw_addr);
2921 if (hw->flash_address)
2922 iounmap(hw->flash_address);
2923 pci_release_regions(pdev);
2924
2925 free_netdev(netdev);
2926
2927 pci_disable_device(pdev);
2928}
2929
2930
2931static const struct pci_error_handlers igbvf_err_handler = {
2932 .error_detected = igbvf_io_error_detected,
2933 .slot_reset = igbvf_io_slot_reset,
2934 .resume = igbvf_io_resume,
2935};
2936
2937static const struct pci_device_id igbvf_pci_tbl[] = {
2938 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_VF), board_vf },
2939 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_VF), board_i350_vf },
2940 { }
2941};
2942MODULE_DEVICE_TABLE(pci, igbvf_pci_tbl);
2943
2944static SIMPLE_DEV_PM_OPS(igbvf_pm_ops, igbvf_suspend, igbvf_resume);
2945
2946
2947static struct pci_driver igbvf_driver = {
2948 .name = igbvf_driver_name,
2949 .id_table = igbvf_pci_tbl,
2950 .probe = igbvf_probe,
2951 .remove = igbvf_remove,
2952 .driver.pm = &igbvf_pm_ops,
2953 .shutdown = igbvf_shutdown,
2954 .err_handler = &igbvf_err_handler
2955};
2956
2957
2958
2959
2960
2961
2962
2963static int __init igbvf_init_module(void)
2964{
2965 int ret;
2966
2967 pr_info("%s\n", igbvf_driver_string);
2968 pr_info("%s\n", igbvf_copyright);
2969
2970 ret = pci_register_driver(&igbvf_driver);
2971
2972 return ret;
2973}
2974module_init(igbvf_init_module);
2975
2976
2977
2978
2979
2980
2981
2982static void __exit igbvf_exit_module(void)
2983{
2984 pci_unregister_driver(&igbvf_driver);
2985}
2986module_exit(igbvf_exit_module);
2987
2988MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>");
2989MODULE_DESCRIPTION("Intel(R) Gigabit Virtual Function Network Driver");
2990MODULE_LICENSE("GPL v2");
2991
2992
2993