1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
34
35#include <linux/types.h>
36#include <linux/bitops.h>
37#include <linux/module.h>
38#include <linux/pci.h>
39#include <linux/netdevice.h>
40#include <linux/vmalloc.h>
41#include <linux/string.h>
42#include <linux/in.h>
43#include <linux/ip.h>
44#include <linux/tcp.h>
45#include <linux/sctp.h>
46#include <linux/ipv6.h>
47#include <linux/slab.h>
48#include <net/checksum.h>
49#include <net/ip6_checksum.h>
50#include <linux/ethtool.h>
51#include <linux/if.h>
52#include <linux/if_vlan.h>
53#include <linux/prefetch.h>
54
55#include "ixgbevf.h"
56
57const char ixgbevf_driver_name[] = "ixgbevf";
58static const char ixgbevf_driver_string[] =
59 "Intel(R) 10 Gigabit PCI Express Virtual Function Network Driver";
60
61#define DRV_VERSION "2.6.0-k"
62const char ixgbevf_driver_version[] = DRV_VERSION;
63static char ixgbevf_copyright[] =
64 "Copyright (c) 2009 - 2012 Intel Corporation.";
65
66static const struct ixgbevf_info *ixgbevf_info_tbl[] = {
67 [board_82599_vf] = &ixgbevf_82599_vf_info,
68 [board_X540_vf] = &ixgbevf_X540_vf_info,
69};
70
71
72
73
74
75
76
77
78
79static struct pci_device_id ixgbevf_pci_tbl[] = {
80 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_VF),
81 board_82599_vf},
82 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540_VF),
83 board_X540_vf},
84
85
86 {0, }
87};
88MODULE_DEVICE_TABLE(pci, ixgbevf_pci_tbl);
89
90MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
91MODULE_DESCRIPTION("Intel(R) 82599 Virtual Function Driver");
92MODULE_LICENSE("GPL");
93MODULE_VERSION(DRV_VERSION);
94
95#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)
96static int debug = -1;
97module_param(debug, int, 0);
98MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
99
100
101static void ixgbevf_set_itr(struct ixgbevf_q_vector *q_vector);
102
103static inline void ixgbevf_release_rx_desc(struct ixgbe_hw *hw,
104 struct ixgbevf_ring *rx_ring,
105 u32 val)
106{
107
108
109
110
111
112
113 wmb();
114 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rx_ring->reg_idx), val);
115}
116
117
118
119
120
121
122
123
124
125static void ixgbevf_set_ivar(struct ixgbevf_adapter *adapter, s8 direction,
126 u8 queue, u8 msix_vector)
127{
128 u32 ivar, index;
129 struct ixgbe_hw *hw = &adapter->hw;
130 if (direction == -1) {
131
132 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
133 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC);
134 ivar &= ~0xFF;
135 ivar |= msix_vector;
136 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, ivar);
137 } else {
138
139 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
140 index = ((16 * (queue & 1)) + (8 * direction));
141 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR(queue >> 1));
142 ivar &= ~(0xFF << index);
143 ivar |= (msix_vector << index);
144 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(queue >> 1), ivar);
145 }
146}
147
148static void ixgbevf_unmap_and_free_tx_resource(struct ixgbevf_ring *tx_ring,
149 struct ixgbevf_tx_buffer
150 *tx_buffer_info)
151{
152 if (tx_buffer_info->dma) {
153 if (tx_buffer_info->mapped_as_page)
154 dma_unmap_page(tx_ring->dev,
155 tx_buffer_info->dma,
156 tx_buffer_info->length,
157 DMA_TO_DEVICE);
158 else
159 dma_unmap_single(tx_ring->dev,
160 tx_buffer_info->dma,
161 tx_buffer_info->length,
162 DMA_TO_DEVICE);
163 tx_buffer_info->dma = 0;
164 }
165 if (tx_buffer_info->skb) {
166 dev_kfree_skb_any(tx_buffer_info->skb);
167 tx_buffer_info->skb = NULL;
168 }
169 tx_buffer_info->time_stamp = 0;
170
171}
172
173#define IXGBE_MAX_TXD_PWR 14
174#define IXGBE_MAX_DATA_PER_TXD (1 << IXGBE_MAX_TXD_PWR)
175
176
177#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), IXGBE_MAX_DATA_PER_TXD)
178#define DESC_NEEDED (MAX_SKB_FRAGS + 4)
179
180static void ixgbevf_tx_timeout(struct net_device *netdev);
181
182
183
184
185
186
187static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector *q_vector,
188 struct ixgbevf_ring *tx_ring)
189{
190 struct ixgbevf_adapter *adapter = q_vector->adapter;
191 union ixgbe_adv_tx_desc *tx_desc, *eop_desc;
192 struct ixgbevf_tx_buffer *tx_buffer_info;
193 unsigned int i, eop, count = 0;
194 unsigned int total_bytes = 0, total_packets = 0;
195
196 if (test_bit(__IXGBEVF_DOWN, &adapter->state))
197 return true;
198
199 i = tx_ring->next_to_clean;
200 eop = tx_ring->tx_buffer_info[i].next_to_watch;
201 eop_desc = IXGBEVF_TX_DESC(tx_ring, eop);
202
203 while ((eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)) &&
204 (count < tx_ring->count)) {
205 bool cleaned = false;
206 rmb();
207
208 if (unlikely(eop != tx_ring->tx_buffer_info[i].next_to_watch))
209 goto cont_loop;
210 for ( ; !cleaned; count++) {
211 struct sk_buff *skb;
212 tx_desc = IXGBEVF_TX_DESC(tx_ring, i);
213 tx_buffer_info = &tx_ring->tx_buffer_info[i];
214 cleaned = (i == eop);
215 skb = tx_buffer_info->skb;
216
217 if (cleaned && skb) {
218 unsigned int segs, bytecount;
219
220
221 segs = skb_shinfo(skb)->gso_segs ?: 1;
222
223 bytecount = ((segs - 1) * skb_headlen(skb)) +
224 skb->len;
225 total_packets += segs;
226 total_bytes += bytecount;
227 }
228
229 ixgbevf_unmap_and_free_tx_resource(tx_ring,
230 tx_buffer_info);
231
232 tx_desc->wb.status = 0;
233
234 i++;
235 if (i == tx_ring->count)
236 i = 0;
237 }
238
239cont_loop:
240 eop = tx_ring->tx_buffer_info[i].next_to_watch;
241 eop_desc = IXGBEVF_TX_DESC(tx_ring, eop);
242 }
243
244 tx_ring->next_to_clean = i;
245
246#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
247 if (unlikely(count && netif_carrier_ok(tx_ring->netdev) &&
248 (IXGBE_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) {
249
250
251
252 smp_mb();
253 if (__netif_subqueue_stopped(tx_ring->netdev,
254 tx_ring->queue_index) &&
255 !test_bit(__IXGBEVF_DOWN, &adapter->state)) {
256 netif_wake_subqueue(tx_ring->netdev,
257 tx_ring->queue_index);
258 ++adapter->restart_queue;
259 }
260 }
261
262 u64_stats_update_begin(&tx_ring->syncp);
263 tx_ring->total_bytes += total_bytes;
264 tx_ring->total_packets += total_packets;
265 u64_stats_update_end(&tx_ring->syncp);
266
267 return count < tx_ring->count;
268}
269
270
271
272
273
274
275
276
277
278static void ixgbevf_receive_skb(struct ixgbevf_q_vector *q_vector,
279 struct sk_buff *skb, u8 status,
280 struct ixgbevf_ring *ring,
281 union ixgbe_adv_rx_desc *rx_desc)
282{
283 struct ixgbevf_adapter *adapter = q_vector->adapter;
284 bool is_vlan = (status & IXGBE_RXD_STAT_VP);
285 u16 tag = le16_to_cpu(rx_desc->wb.upper.vlan);
286
287 if (is_vlan && test_bit(tag & VLAN_VID_MASK, adapter->active_vlans))
288 __vlan_hwaccel_put_tag(skb, tag);
289
290 napi_gro_receive(&q_vector->napi, skb);
291}
292
293
294
295
296
297
298
299static inline void ixgbevf_rx_checksum(struct ixgbevf_adapter *adapter,
300 struct ixgbevf_ring *ring,
301 u32 status_err, struct sk_buff *skb)
302{
303 skb_checksum_none_assert(skb);
304
305
306 if (!(ring->netdev->features & NETIF_F_RXCSUM))
307 return;
308
309
310 if ((status_err & IXGBE_RXD_STAT_IPCS) &&
311 (status_err & IXGBE_RXDADV_ERR_IPE)) {
312 adapter->hw_csum_rx_error++;
313 return;
314 }
315
316 if (!(status_err & IXGBE_RXD_STAT_L4CS))
317 return;
318
319 if (status_err & IXGBE_RXDADV_ERR_TCPE) {
320 adapter->hw_csum_rx_error++;
321 return;
322 }
323
324
325 skb->ip_summed = CHECKSUM_UNNECESSARY;
326 adapter->hw_csum_rx_good++;
327}
328
329
330
331
332
333static void ixgbevf_alloc_rx_buffers(struct ixgbevf_adapter *adapter,
334 struct ixgbevf_ring *rx_ring,
335 int cleaned_count)
336{
337 struct pci_dev *pdev = adapter->pdev;
338 union ixgbe_adv_rx_desc *rx_desc;
339 struct ixgbevf_rx_buffer *bi;
340 struct sk_buff *skb;
341 unsigned int i = rx_ring->next_to_use;
342
343 bi = &rx_ring->rx_buffer_info[i];
344
345 while (cleaned_count--) {
346 rx_desc = IXGBEVF_RX_DESC(rx_ring, i);
347 skb = bi->skb;
348 if (!skb) {
349 skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
350 rx_ring->rx_buf_len);
351 if (!skb) {
352 adapter->alloc_rx_buff_failed++;
353 goto no_buffers;
354 }
355 bi->skb = skb;
356 }
357 if (!bi->dma) {
358 bi->dma = dma_map_single(&pdev->dev, skb->data,
359 rx_ring->rx_buf_len,
360 DMA_FROM_DEVICE);
361 }
362 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma);
363
364 i++;
365 if (i == rx_ring->count)
366 i = 0;
367 bi = &rx_ring->rx_buffer_info[i];
368 }
369
370no_buffers:
371 if (rx_ring->next_to_use != i) {
372 rx_ring->next_to_use = i;
373
374 ixgbevf_release_rx_desc(&adapter->hw, rx_ring, i);
375 }
376}
377
378static inline void ixgbevf_irq_enable_queues(struct ixgbevf_adapter *adapter,
379 u32 qmask)
380{
381 struct ixgbe_hw *hw = &adapter->hw;
382
383 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, qmask);
384}
385
386static bool ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
387 struct ixgbevf_ring *rx_ring,
388 int budget)
389{
390 struct ixgbevf_adapter *adapter = q_vector->adapter;
391 struct pci_dev *pdev = adapter->pdev;
392 union ixgbe_adv_rx_desc *rx_desc, *next_rxd;
393 struct ixgbevf_rx_buffer *rx_buffer_info, *next_buffer;
394 struct sk_buff *skb;
395 unsigned int i;
396 u32 len, staterr;
397 int cleaned_count = 0;
398 unsigned int total_rx_bytes = 0, total_rx_packets = 0;
399
400 i = rx_ring->next_to_clean;
401 rx_desc = IXGBEVF_RX_DESC(rx_ring, i);
402 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
403 rx_buffer_info = &rx_ring->rx_buffer_info[i];
404
405 while (staterr & IXGBE_RXD_STAT_DD) {
406 if (!budget)
407 break;
408 budget--;
409
410 rmb();
411 len = le16_to_cpu(rx_desc->wb.upper.length);
412 skb = rx_buffer_info->skb;
413 prefetch(skb->data - NET_IP_ALIGN);
414 rx_buffer_info->skb = NULL;
415
416 if (rx_buffer_info->dma) {
417 dma_unmap_single(&pdev->dev, rx_buffer_info->dma,
418 rx_ring->rx_buf_len,
419 DMA_FROM_DEVICE);
420 rx_buffer_info->dma = 0;
421 skb_put(skb, len);
422 }
423
424 i++;
425 if (i == rx_ring->count)
426 i = 0;
427
428 next_rxd = IXGBEVF_RX_DESC(rx_ring, i);
429 prefetch(next_rxd);
430 cleaned_count++;
431
432 next_buffer = &rx_ring->rx_buffer_info[i];
433
434 if (!(staterr & IXGBE_RXD_STAT_EOP)) {
435 skb->next = next_buffer->skb;
436 skb->next->prev = skb;
437 adapter->non_eop_descs++;
438 goto next_desc;
439 }
440
441
442 if (unlikely(staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK)) {
443 dev_kfree_skb_irq(skb);
444 goto next_desc;
445 }
446
447 ixgbevf_rx_checksum(adapter, rx_ring, staterr, skb);
448
449
450 total_rx_bytes += skb->len;
451 total_rx_packets++;
452
453
454
455
456
457 if (staterr & IXGBE_RXD_STAT_LB) {
458 u32 header_fixup_len = skb_headlen(skb);
459 if (header_fixup_len < 14)
460 skb_push(skb, header_fixup_len);
461 }
462 skb->protocol = eth_type_trans(skb, rx_ring->netdev);
463
464 ixgbevf_receive_skb(q_vector, skb, staterr, rx_ring, rx_desc);
465
466next_desc:
467 rx_desc->wb.upper.status_error = 0;
468
469
470 if (cleaned_count >= IXGBEVF_RX_BUFFER_WRITE) {
471 ixgbevf_alloc_rx_buffers(adapter, rx_ring,
472 cleaned_count);
473 cleaned_count = 0;
474 }
475
476
477 rx_desc = next_rxd;
478 rx_buffer_info = &rx_ring->rx_buffer_info[i];
479
480 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
481 }
482
483 rx_ring->next_to_clean = i;
484 cleaned_count = IXGBE_DESC_UNUSED(rx_ring);
485
486 if (cleaned_count)
487 ixgbevf_alloc_rx_buffers(adapter, rx_ring, cleaned_count);
488
489 u64_stats_update_begin(&rx_ring->syncp);
490 rx_ring->total_packets += total_rx_packets;
491 rx_ring->total_bytes += total_rx_bytes;
492 u64_stats_update_end(&rx_ring->syncp);
493
494 return !!budget;
495}
496
497
498
499
500
501
502
503
504
505static int ixgbevf_poll(struct napi_struct *napi, int budget)
506{
507 struct ixgbevf_q_vector *q_vector =
508 container_of(napi, struct ixgbevf_q_vector, napi);
509 struct ixgbevf_adapter *adapter = q_vector->adapter;
510 struct ixgbevf_ring *ring;
511 int per_ring_budget;
512 bool clean_complete = true;
513
514 ixgbevf_for_each_ring(ring, q_vector->tx)
515 clean_complete &= ixgbevf_clean_tx_irq(q_vector, ring);
516
517
518
519 if (q_vector->rx.count > 1)
520 per_ring_budget = max(budget/q_vector->rx.count, 1);
521 else
522 per_ring_budget = budget;
523
524 ixgbevf_for_each_ring(ring, q_vector->rx)
525 clean_complete &= ixgbevf_clean_rx_irq(q_vector, ring,
526 per_ring_budget);
527
528
529 if (!clean_complete)
530 return budget;
531
532 napi_complete(napi);
533 if (adapter->rx_itr_setting & 1)
534 ixgbevf_set_itr(q_vector);
535 if (!test_bit(__IXGBEVF_DOWN, &adapter->state))
536 ixgbevf_irq_enable_queues(adapter,
537 1 << q_vector->v_idx);
538
539 return 0;
540}
541
542
543
544
545
546static void ixgbevf_write_eitr(struct ixgbevf_q_vector *q_vector)
547{
548 struct ixgbevf_adapter *adapter = q_vector->adapter;
549 struct ixgbe_hw *hw = &adapter->hw;
550 int v_idx = q_vector->v_idx;
551 u32 itr_reg = q_vector->itr & IXGBE_MAX_EITR;
552
553
554
555
556
557 itr_reg |= IXGBE_EITR_CNT_WDIS;
558
559 IXGBE_WRITE_REG(hw, IXGBE_VTEITR(v_idx), itr_reg);
560}
561
562
563
564
565
566
567
568
569static void ixgbevf_configure_msix(struct ixgbevf_adapter *adapter)
570{
571 struct ixgbevf_q_vector *q_vector;
572 int q_vectors, v_idx;
573
574 q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
575 adapter->eims_enable_mask = 0;
576
577
578
579
580
581 for (v_idx = 0; v_idx < q_vectors; v_idx++) {
582 struct ixgbevf_ring *ring;
583 q_vector = adapter->q_vector[v_idx];
584
585 ixgbevf_for_each_ring(ring, q_vector->rx)
586 ixgbevf_set_ivar(adapter, 0, ring->reg_idx, v_idx);
587
588 ixgbevf_for_each_ring(ring, q_vector->tx)
589 ixgbevf_set_ivar(adapter, 1, ring->reg_idx, v_idx);
590
591 if (q_vector->tx.ring && !q_vector->rx.ring) {
592
593 if (adapter->tx_itr_setting == 1)
594 q_vector->itr = IXGBE_10K_ITR;
595 else
596 q_vector->itr = adapter->tx_itr_setting;
597 } else {
598
599 if (adapter->rx_itr_setting == 1)
600 q_vector->itr = IXGBE_20K_ITR;
601 else
602 q_vector->itr = adapter->rx_itr_setting;
603 }
604
605
606 adapter->eims_enable_mask |= 1 << v_idx;
607
608 ixgbevf_write_eitr(q_vector);
609 }
610
611 ixgbevf_set_ivar(adapter, -1, 1, v_idx);
612
613 adapter->eims_other = 1 << v_idx;
614 adapter->eims_enable_mask |= adapter->eims_other;
615}
616
617enum latency_range {
618 lowest_latency = 0,
619 low_latency = 1,
620 bulk_latency = 2,
621 latency_invalid = 255
622};
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637static void ixgbevf_update_itr(struct ixgbevf_q_vector *q_vector,
638 struct ixgbevf_ring_container *ring_container)
639{
640 int bytes = ring_container->total_bytes;
641 int packets = ring_container->total_packets;
642 u32 timepassed_us;
643 u64 bytes_perint;
644 u8 itr_setting = ring_container->itr;
645
646 if (packets == 0)
647 return;
648
649
650
651
652
653
654
655 timepassed_us = q_vector->itr >> 2;
656 bytes_perint = bytes / timepassed_us;
657
658 switch (itr_setting) {
659 case lowest_latency:
660 if (bytes_perint > 10)
661 itr_setting = low_latency;
662 break;
663 case low_latency:
664 if (bytes_perint > 20)
665 itr_setting = bulk_latency;
666 else if (bytes_perint <= 10)
667 itr_setting = lowest_latency;
668 break;
669 case bulk_latency:
670 if (bytes_perint <= 20)
671 itr_setting = low_latency;
672 break;
673 }
674
675
676 ring_container->total_bytes = 0;
677 ring_container->total_packets = 0;
678
679
680 ring_container->itr = itr_setting;
681}
682
683static void ixgbevf_set_itr(struct ixgbevf_q_vector *q_vector)
684{
685 u32 new_itr = q_vector->itr;
686 u8 current_itr;
687
688 ixgbevf_update_itr(q_vector, &q_vector->tx);
689 ixgbevf_update_itr(q_vector, &q_vector->rx);
690
691 current_itr = max(q_vector->rx.itr, q_vector->tx.itr);
692
693 switch (current_itr) {
694
695 case lowest_latency:
696 new_itr = IXGBE_100K_ITR;
697 break;
698 case low_latency:
699 new_itr = IXGBE_20K_ITR;
700 break;
701 case bulk_latency:
702 default:
703 new_itr = IXGBE_8K_ITR;
704 break;
705 }
706
707 if (new_itr != q_vector->itr) {
708
709 new_itr = (10 * new_itr * q_vector->itr) /
710 ((9 * new_itr) + q_vector->itr);
711
712
713 q_vector->itr = new_itr;
714
715 ixgbevf_write_eitr(q_vector);
716 }
717}
718
719static irqreturn_t ixgbevf_msix_mbx(int irq, void *data)
720{
721 struct ixgbevf_adapter *adapter = data;
722 struct ixgbe_hw *hw = &adapter->hw;
723 u32 msg;
724 bool got_ack = false;
725
726 if (!hw->mbx.ops.check_for_ack(hw))
727 got_ack = true;
728
729 if (!hw->mbx.ops.check_for_msg(hw)) {
730 hw->mbx.ops.read(hw, &msg, 1);
731
732 if ((msg & IXGBE_MBVFICR_VFREQ_MASK) == IXGBE_PF_CONTROL_MSG)
733 mod_timer(&adapter->watchdog_timer,
734 round_jiffies(jiffies + 1));
735
736 if (msg & IXGBE_VT_MSGTYPE_NACK)
737 pr_warn("Last Request of type %2.2x to PF Nacked\n",
738 msg & 0xFF);
739
740
741
742
743 hw->mbx.v2p_mailbox |= IXGBE_VFMAILBOX_PFSTS;
744 }
745
746
747
748
749
750
751 if (got_ack)
752 hw->mbx.v2p_mailbox |= IXGBE_VFMAILBOX_PFACK;
753
754 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, adapter->eims_other);
755
756 return IRQ_HANDLED;
757}
758
759
760
761
762
763
764
765static irqreturn_t ixgbevf_msix_clean_rings(int irq, void *data)
766{
767 struct ixgbevf_q_vector *q_vector = data;
768
769
770 if (q_vector->rx.ring || q_vector->tx.ring)
771 napi_schedule(&q_vector->napi);
772
773 return IRQ_HANDLED;
774}
775
776static inline void map_vector_to_rxq(struct ixgbevf_adapter *a, int v_idx,
777 int r_idx)
778{
779 struct ixgbevf_q_vector *q_vector = a->q_vector[v_idx];
780
781 a->rx_ring[r_idx].next = q_vector->rx.ring;
782 q_vector->rx.ring = &a->rx_ring[r_idx];
783 q_vector->rx.count++;
784}
785
786static inline void map_vector_to_txq(struct ixgbevf_adapter *a, int v_idx,
787 int t_idx)
788{
789 struct ixgbevf_q_vector *q_vector = a->q_vector[v_idx];
790
791 a->tx_ring[t_idx].next = q_vector->tx.ring;
792 q_vector->tx.ring = &a->tx_ring[t_idx];
793 q_vector->tx.count++;
794}
795
796
797
798
799
800
801
802
803
804
805
806static int ixgbevf_map_rings_to_vectors(struct ixgbevf_adapter *adapter)
807{
808 int q_vectors;
809 int v_start = 0;
810 int rxr_idx = 0, txr_idx = 0;
811 int rxr_remaining = adapter->num_rx_queues;
812 int txr_remaining = adapter->num_tx_queues;
813 int i, j;
814 int rqpv, tqpv;
815 int err = 0;
816
817 q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
818
819
820
821
822
823 if (q_vectors == adapter->num_rx_queues + adapter->num_tx_queues) {
824 for (; rxr_idx < rxr_remaining; v_start++, rxr_idx++)
825 map_vector_to_rxq(adapter, v_start, rxr_idx);
826
827 for (; txr_idx < txr_remaining; v_start++, txr_idx++)
828 map_vector_to_txq(adapter, v_start, txr_idx);
829 goto out;
830 }
831
832
833
834
835
836
837
838 for (i = v_start; i < q_vectors; i++) {
839 rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - i);
840 for (j = 0; j < rqpv; j++) {
841 map_vector_to_rxq(adapter, i, rxr_idx);
842 rxr_idx++;
843 rxr_remaining--;
844 }
845 }
846 for (i = v_start; i < q_vectors; i++) {
847 tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - i);
848 for (j = 0; j < tqpv; j++) {
849 map_vector_to_txq(adapter, i, txr_idx);
850 txr_idx++;
851 txr_remaining--;
852 }
853 }
854
855out:
856 return err;
857}
858
859
860
861
862
863
864
865
866static int ixgbevf_request_msix_irqs(struct ixgbevf_adapter *adapter)
867{
868 struct net_device *netdev = adapter->netdev;
869 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
870 int vector, err;
871 int ri = 0, ti = 0;
872
873 for (vector = 0; vector < q_vectors; vector++) {
874 struct ixgbevf_q_vector *q_vector = adapter->q_vector[vector];
875 struct msix_entry *entry = &adapter->msix_entries[vector];
876
877 if (q_vector->tx.ring && q_vector->rx.ring) {
878 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
879 "%s-%s-%d", netdev->name, "TxRx", ri++);
880 ti++;
881 } else if (q_vector->rx.ring) {
882 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
883 "%s-%s-%d", netdev->name, "rx", ri++);
884 } else if (q_vector->tx.ring) {
885 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
886 "%s-%s-%d", netdev->name, "tx", ti++);
887 } else {
888
889 continue;
890 }
891 err = request_irq(entry->vector, &ixgbevf_msix_clean_rings, 0,
892 q_vector->name, q_vector);
893 if (err) {
894 hw_dbg(&adapter->hw,
895 "request_irq failed for MSIX interrupt "
896 "Error: %d\n", err);
897 goto free_queue_irqs;
898 }
899 }
900
901 err = request_irq(adapter->msix_entries[vector].vector,
902 &ixgbevf_msix_mbx, 0, netdev->name, adapter);
903 if (err) {
904 hw_dbg(&adapter->hw,
905 "request_irq for msix_mbx failed: %d\n", err);
906 goto free_queue_irqs;
907 }
908
909 return 0;
910
911free_queue_irqs:
912 while (vector) {
913 vector--;
914 free_irq(adapter->msix_entries[vector].vector,
915 adapter->q_vector[vector]);
916 }
917 pci_disable_msix(adapter->pdev);
918 kfree(adapter->msix_entries);
919 adapter->msix_entries = NULL;
920 return err;
921}
922
923static inline void ixgbevf_reset_q_vectors(struct ixgbevf_adapter *adapter)
924{
925 int i, q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
926
927 for (i = 0; i < q_vectors; i++) {
928 struct ixgbevf_q_vector *q_vector = adapter->q_vector[i];
929 q_vector->rx.ring = NULL;
930 q_vector->tx.ring = NULL;
931 q_vector->rx.count = 0;
932 q_vector->tx.count = 0;
933 }
934}
935
936
937
938
939
940
941
942
943static int ixgbevf_request_irq(struct ixgbevf_adapter *adapter)
944{
945 int err = 0;
946
947 err = ixgbevf_request_msix_irqs(adapter);
948
949 if (err)
950 hw_dbg(&adapter->hw,
951 "request_irq failed, Error %d\n", err);
952
953 return err;
954}
955
956static void ixgbevf_free_irq(struct ixgbevf_adapter *adapter)
957{
958 int i, q_vectors;
959
960 q_vectors = adapter->num_msix_vectors;
961 i = q_vectors - 1;
962
963 free_irq(adapter->msix_entries[i].vector, adapter);
964 i--;
965
966 for (; i >= 0; i--) {
967
968 if (!adapter->q_vector[i]->rx.ring &&
969 !adapter->q_vector[i]->tx.ring)
970 continue;
971
972 free_irq(adapter->msix_entries[i].vector,
973 adapter->q_vector[i]);
974 }
975
976 ixgbevf_reset_q_vectors(adapter);
977}
978
979
980
981
982
983static inline void ixgbevf_irq_disable(struct ixgbevf_adapter *adapter)
984{
985 struct ixgbe_hw *hw = &adapter->hw;
986 int i;
987
988 IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, 0);
989 IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, ~0);
990 IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, 0);
991
992 IXGBE_WRITE_FLUSH(hw);
993
994 for (i = 0; i < adapter->num_msix_vectors; i++)
995 synchronize_irq(adapter->msix_entries[i].vector);
996}
997
998
999
1000
1001
1002static inline void ixgbevf_irq_enable(struct ixgbevf_adapter *adapter)
1003{
1004 struct ixgbe_hw *hw = &adapter->hw;
1005
1006 IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, adapter->eims_enable_mask);
1007 IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, adapter->eims_enable_mask);
1008 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, adapter->eims_enable_mask);
1009}
1010
1011
1012
1013
1014
1015
1016
1017static void ixgbevf_configure_tx(struct ixgbevf_adapter *adapter)
1018{
1019 u64 tdba;
1020 struct ixgbe_hw *hw = &adapter->hw;
1021 u32 i, j, tdlen, txctrl;
1022
1023
1024 for (i = 0; i < adapter->num_tx_queues; i++) {
1025 struct ixgbevf_ring *ring = &adapter->tx_ring[i];
1026 j = ring->reg_idx;
1027 tdba = ring->dma;
1028 tdlen = ring->count * sizeof(union ixgbe_adv_tx_desc);
1029 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(j),
1030 (tdba & DMA_BIT_MASK(32)));
1031 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(j), (tdba >> 32));
1032 IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(j), tdlen);
1033 IXGBE_WRITE_REG(hw, IXGBE_VFTDH(j), 0);
1034 IXGBE_WRITE_REG(hw, IXGBE_VFTDT(j), 0);
1035 adapter->tx_ring[i].head = IXGBE_VFTDH(j);
1036 adapter->tx_ring[i].tail = IXGBE_VFTDT(j);
1037
1038
1039
1040 txctrl = IXGBE_READ_REG(hw, IXGBE_VFDCA_TXCTRL(j));
1041 txctrl &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN;
1042 IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(j), txctrl);
1043 }
1044}
1045
1046#define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
1047
1048static void ixgbevf_configure_srrctl(struct ixgbevf_adapter *adapter, int index)
1049{
1050 struct ixgbevf_ring *rx_ring;
1051 struct ixgbe_hw *hw = &adapter->hw;
1052 u32 srrctl;
1053
1054 rx_ring = &adapter->rx_ring[index];
1055
1056 srrctl = IXGBE_SRRCTL_DROP_EN;
1057
1058 srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
1059
1060 if (rx_ring->rx_buf_len == MAXIMUM_ETHERNET_VLAN_SIZE)
1061 srrctl |= IXGBEVF_RXBUFFER_2048 >>
1062 IXGBE_SRRCTL_BSIZEPKT_SHIFT;
1063 else
1064 srrctl |= rx_ring->rx_buf_len >>
1065 IXGBE_SRRCTL_BSIZEPKT_SHIFT;
1066 IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(index), srrctl);
1067}
1068
1069
1070
1071
1072
1073
1074
1075static void ixgbevf_configure_rx(struct ixgbevf_adapter *adapter)
1076{
1077 u64 rdba;
1078 struct ixgbe_hw *hw = &adapter->hw;
1079 struct net_device *netdev = adapter->netdev;
1080 int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
1081 int i, j;
1082 u32 rdlen;
1083 int rx_buf_len;
1084
1085
1086 IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, 0);
1087 if (netdev->mtu <= ETH_DATA_LEN)
1088 rx_buf_len = MAXIMUM_ETHERNET_VLAN_SIZE;
1089 else
1090 rx_buf_len = ALIGN(max_frame, 1024);
1091
1092 rdlen = adapter->rx_ring[0].count * sizeof(union ixgbe_adv_rx_desc);
1093
1094
1095 for (i = 0; i < adapter->num_rx_queues; i++) {
1096 rdba = adapter->rx_ring[i].dma;
1097 j = adapter->rx_ring[i].reg_idx;
1098 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(j),
1099 (rdba & DMA_BIT_MASK(32)));
1100 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(j), (rdba >> 32));
1101 IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(j), rdlen);
1102 IXGBE_WRITE_REG(hw, IXGBE_VFRDH(j), 0);
1103 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(j), 0);
1104 adapter->rx_ring[i].head = IXGBE_VFRDH(j);
1105 adapter->rx_ring[i].tail = IXGBE_VFRDT(j);
1106 adapter->rx_ring[i].rx_buf_len = rx_buf_len;
1107
1108 ixgbevf_configure_srrctl(adapter, j);
1109 }
1110}
1111
1112static int ixgbevf_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
1113{
1114 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
1115 struct ixgbe_hw *hw = &adapter->hw;
1116
1117 spin_lock(&adapter->mbx_lock);
1118
1119
1120 if (hw->mac.ops.set_vfta)
1121 hw->mac.ops.set_vfta(hw, vid, 0, true);
1122
1123 spin_unlock(&adapter->mbx_lock);
1124
1125 set_bit(vid, adapter->active_vlans);
1126
1127 return 0;
1128}
1129
1130static int ixgbevf_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
1131{
1132 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
1133 struct ixgbe_hw *hw = &adapter->hw;
1134
1135 spin_lock(&adapter->mbx_lock);
1136
1137
1138 if (hw->mac.ops.set_vfta)
1139 hw->mac.ops.set_vfta(hw, vid, 0, false);
1140
1141 spin_unlock(&adapter->mbx_lock);
1142
1143 clear_bit(vid, adapter->active_vlans);
1144
1145 return 0;
1146}
1147
1148static void ixgbevf_restore_vlan(struct ixgbevf_adapter *adapter)
1149{
1150 u16 vid;
1151
1152 for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
1153 ixgbevf_vlan_rx_add_vid(adapter->netdev, vid);
1154}
1155
1156static int ixgbevf_write_uc_addr_list(struct net_device *netdev)
1157{
1158 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
1159 struct ixgbe_hw *hw = &adapter->hw;
1160 int count = 0;
1161
1162 if ((netdev_uc_count(netdev)) > 10) {
1163 pr_err("Too many unicast filters - No Space\n");
1164 return -ENOSPC;
1165 }
1166
1167 if (!netdev_uc_empty(netdev)) {
1168 struct netdev_hw_addr *ha;
1169 netdev_for_each_uc_addr(ha, netdev) {
1170 hw->mac.ops.set_uc_addr(hw, ++count, ha->addr);
1171 udelay(200);
1172 }
1173 } else {
1174
1175
1176
1177
1178 hw->mac.ops.set_uc_addr(hw, 0, NULL);
1179 }
1180
1181 return count;
1182}
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192static void ixgbevf_set_rx_mode(struct net_device *netdev)
1193{
1194 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
1195 struct ixgbe_hw *hw = &adapter->hw;
1196
1197 spin_lock(&adapter->mbx_lock);
1198
1199
1200 if (hw->mac.ops.update_mc_addr_list)
1201 hw->mac.ops.update_mc_addr_list(hw, netdev);
1202
1203 ixgbevf_write_uc_addr_list(netdev);
1204
1205 spin_unlock(&adapter->mbx_lock);
1206}
1207
1208static void ixgbevf_napi_enable_all(struct ixgbevf_adapter *adapter)
1209{
1210 int q_idx;
1211 struct ixgbevf_q_vector *q_vector;
1212 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1213
1214 for (q_idx = 0; q_idx < q_vectors; q_idx++) {
1215 q_vector = adapter->q_vector[q_idx];
1216 napi_enable(&q_vector->napi);
1217 }
1218}
1219
1220static void ixgbevf_napi_disable_all(struct ixgbevf_adapter *adapter)
1221{
1222 int q_idx;
1223 struct ixgbevf_q_vector *q_vector;
1224 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1225
1226 for (q_idx = 0; q_idx < q_vectors; q_idx++) {
1227 q_vector = adapter->q_vector[q_idx];
1228 napi_disable(&q_vector->napi);
1229 }
1230}
1231
1232static void ixgbevf_configure(struct ixgbevf_adapter *adapter)
1233{
1234 struct net_device *netdev = adapter->netdev;
1235 int i;
1236
1237 ixgbevf_set_rx_mode(netdev);
1238
1239 ixgbevf_restore_vlan(adapter);
1240
1241 ixgbevf_configure_tx(adapter);
1242 ixgbevf_configure_rx(adapter);
1243 for (i = 0; i < adapter->num_rx_queues; i++) {
1244 struct ixgbevf_ring *ring = &adapter->rx_ring[i];
1245 ixgbevf_alloc_rx_buffers(adapter, ring,
1246 IXGBE_DESC_UNUSED(ring));
1247 }
1248}
1249
1250#define IXGBE_MAX_RX_DESC_POLL 10
1251static inline void ixgbevf_rx_desc_queue_enable(struct ixgbevf_adapter *adapter,
1252 int rxr)
1253{
1254 struct ixgbe_hw *hw = &adapter->hw;
1255 int j = adapter->rx_ring[rxr].reg_idx;
1256 int k;
1257
1258 for (k = 0; k < IXGBE_MAX_RX_DESC_POLL; k++) {
1259 if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j)) & IXGBE_RXDCTL_ENABLE)
1260 break;
1261 else
1262 msleep(1);
1263 }
1264 if (k >= IXGBE_MAX_RX_DESC_POLL) {
1265 hw_dbg(hw, "RXDCTL.ENABLE on Rx queue %d "
1266 "not set within the polling period\n", rxr);
1267 }
1268
1269 ixgbevf_release_rx_desc(&adapter->hw, &adapter->rx_ring[rxr],
1270 (adapter->rx_ring[rxr].count - 1));
1271}
1272
1273static void ixgbevf_save_reset_stats(struct ixgbevf_adapter *adapter)
1274{
1275
1276 if (adapter->stats.vfgprc || adapter->stats.vfgptc) {
1277 adapter->stats.saved_reset_vfgprc += adapter->stats.vfgprc -
1278 adapter->stats.base_vfgprc;
1279 adapter->stats.saved_reset_vfgptc += adapter->stats.vfgptc -
1280 adapter->stats.base_vfgptc;
1281 adapter->stats.saved_reset_vfgorc += adapter->stats.vfgorc -
1282 adapter->stats.base_vfgorc;
1283 adapter->stats.saved_reset_vfgotc += adapter->stats.vfgotc -
1284 adapter->stats.base_vfgotc;
1285 adapter->stats.saved_reset_vfmprc += adapter->stats.vfmprc -
1286 adapter->stats.base_vfmprc;
1287 }
1288}
1289
1290static void ixgbevf_init_last_counter_stats(struct ixgbevf_adapter *adapter)
1291{
1292 struct ixgbe_hw *hw = &adapter->hw;
1293
1294 adapter->stats.last_vfgprc = IXGBE_READ_REG(hw, IXGBE_VFGPRC);
1295 adapter->stats.last_vfgorc = IXGBE_READ_REG(hw, IXGBE_VFGORC_LSB);
1296 adapter->stats.last_vfgorc |=
1297 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGORC_MSB))) << 32);
1298 adapter->stats.last_vfgptc = IXGBE_READ_REG(hw, IXGBE_VFGPTC);
1299 adapter->stats.last_vfgotc = IXGBE_READ_REG(hw, IXGBE_VFGOTC_LSB);
1300 adapter->stats.last_vfgotc |=
1301 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGOTC_MSB))) << 32);
1302 adapter->stats.last_vfmprc = IXGBE_READ_REG(hw, IXGBE_VFMPRC);
1303
1304 adapter->stats.base_vfgprc = adapter->stats.last_vfgprc;
1305 adapter->stats.base_vfgorc = adapter->stats.last_vfgorc;
1306 adapter->stats.base_vfgptc = adapter->stats.last_vfgptc;
1307 adapter->stats.base_vfgotc = adapter->stats.last_vfgotc;
1308 adapter->stats.base_vfmprc = adapter->stats.last_vfmprc;
1309}
1310
1311static void ixgbevf_up_complete(struct ixgbevf_adapter *adapter)
1312{
1313 struct net_device *netdev = adapter->netdev;
1314 struct ixgbe_hw *hw = &adapter->hw;
1315 int i, j = 0;
1316 int num_rx_rings = adapter->num_rx_queues;
1317 u32 txdctl, rxdctl;
1318 u32 msg[2];
1319
1320 for (i = 0; i < adapter->num_tx_queues; i++) {
1321 j = adapter->tx_ring[i].reg_idx;
1322 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j));
1323
1324 txdctl |= (8 << 16);
1325 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j), txdctl);
1326 }
1327
1328 for (i = 0; i < adapter->num_tx_queues; i++) {
1329 j = adapter->tx_ring[i].reg_idx;
1330 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j));
1331 txdctl |= IXGBE_TXDCTL_ENABLE;
1332 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j), txdctl);
1333 }
1334
1335 for (i = 0; i < num_rx_rings; i++) {
1336 j = adapter->rx_ring[i].reg_idx;
1337 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j));
1338 rxdctl |= IXGBE_RXDCTL_ENABLE | IXGBE_RXDCTL_VME;
1339 if (hw->mac.type == ixgbe_mac_X540_vf) {
1340 rxdctl &= ~IXGBE_RXDCTL_RLPMLMASK;
1341 rxdctl |= ((netdev->mtu + ETH_HLEN + ETH_FCS_LEN) |
1342 IXGBE_RXDCTL_RLPML_EN);
1343 }
1344 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(j), rxdctl);
1345 ixgbevf_rx_desc_queue_enable(adapter, i);
1346 }
1347
1348 ixgbevf_configure_msix(adapter);
1349
1350 spin_lock(&adapter->mbx_lock);
1351
1352 if (hw->mac.ops.set_rar) {
1353 if (is_valid_ether_addr(hw->mac.addr))
1354 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0);
1355 else
1356 hw->mac.ops.set_rar(hw, 0, hw->mac.perm_addr, 0);
1357 }
1358
1359 msg[0] = IXGBE_VF_SET_LPE;
1360 msg[1] = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
1361 hw->mbx.ops.write_posted(hw, msg, 2);
1362
1363 spin_unlock(&adapter->mbx_lock);
1364
1365 clear_bit(__IXGBEVF_DOWN, &adapter->state);
1366 ixgbevf_napi_enable_all(adapter);
1367
1368
1369 netif_tx_start_all_queues(netdev);
1370
1371 ixgbevf_save_reset_stats(adapter);
1372 ixgbevf_init_last_counter_stats(adapter);
1373
1374 mod_timer(&adapter->watchdog_timer, jiffies);
1375}
1376
1377void ixgbevf_up(struct ixgbevf_adapter *adapter)
1378{
1379 struct ixgbe_hw *hw = &adapter->hw;
1380
1381 ixgbevf_configure(adapter);
1382
1383 ixgbevf_up_complete(adapter);
1384
1385
1386 IXGBE_READ_REG(hw, IXGBE_VTEICR);
1387
1388 ixgbevf_irq_enable(adapter);
1389}
1390
1391
1392
1393
1394
1395
1396static void ixgbevf_clean_rx_ring(struct ixgbevf_adapter *adapter,
1397 struct ixgbevf_ring *rx_ring)
1398{
1399 struct pci_dev *pdev = adapter->pdev;
1400 unsigned long size;
1401 unsigned int i;
1402
1403 if (!rx_ring->rx_buffer_info)
1404 return;
1405
1406
1407 for (i = 0; i < rx_ring->count; i++) {
1408 struct ixgbevf_rx_buffer *rx_buffer_info;
1409
1410 rx_buffer_info = &rx_ring->rx_buffer_info[i];
1411 if (rx_buffer_info->dma) {
1412 dma_unmap_single(&pdev->dev, rx_buffer_info->dma,
1413 rx_ring->rx_buf_len,
1414 DMA_FROM_DEVICE);
1415 rx_buffer_info->dma = 0;
1416 }
1417 if (rx_buffer_info->skb) {
1418 struct sk_buff *skb = rx_buffer_info->skb;
1419 rx_buffer_info->skb = NULL;
1420 do {
1421 struct sk_buff *this = skb;
1422 skb = skb->prev;
1423 dev_kfree_skb(this);
1424 } while (skb);
1425 }
1426 }
1427
1428 size = sizeof(struct ixgbevf_rx_buffer) * rx_ring->count;
1429 memset(rx_ring->rx_buffer_info, 0, size);
1430
1431
1432 memset(rx_ring->desc, 0, rx_ring->size);
1433
1434 rx_ring->next_to_clean = 0;
1435 rx_ring->next_to_use = 0;
1436
1437 if (rx_ring->head)
1438 writel(0, adapter->hw.hw_addr + rx_ring->head);
1439 if (rx_ring->tail)
1440 writel(0, adapter->hw.hw_addr + rx_ring->tail);
1441}
1442
1443
1444
1445
1446
1447
1448static void ixgbevf_clean_tx_ring(struct ixgbevf_adapter *adapter,
1449 struct ixgbevf_ring *tx_ring)
1450{
1451 struct ixgbevf_tx_buffer *tx_buffer_info;
1452 unsigned long size;
1453 unsigned int i;
1454
1455 if (!tx_ring->tx_buffer_info)
1456 return;
1457
1458
1459
1460 for (i = 0; i < tx_ring->count; i++) {
1461 tx_buffer_info = &tx_ring->tx_buffer_info[i];
1462 ixgbevf_unmap_and_free_tx_resource(tx_ring, tx_buffer_info);
1463 }
1464
1465 size = sizeof(struct ixgbevf_tx_buffer) * tx_ring->count;
1466 memset(tx_ring->tx_buffer_info, 0, size);
1467
1468 memset(tx_ring->desc, 0, tx_ring->size);
1469
1470 tx_ring->next_to_use = 0;
1471 tx_ring->next_to_clean = 0;
1472
1473 if (tx_ring->head)
1474 writel(0, adapter->hw.hw_addr + tx_ring->head);
1475 if (tx_ring->tail)
1476 writel(0, adapter->hw.hw_addr + tx_ring->tail);
1477}
1478
1479
1480
1481
1482
1483static void ixgbevf_clean_all_rx_rings(struct ixgbevf_adapter *adapter)
1484{
1485 int i;
1486
1487 for (i = 0; i < adapter->num_rx_queues; i++)
1488 ixgbevf_clean_rx_ring(adapter, &adapter->rx_ring[i]);
1489}
1490
1491
1492
1493
1494
1495static void ixgbevf_clean_all_tx_rings(struct ixgbevf_adapter *adapter)
1496{
1497 int i;
1498
1499 for (i = 0; i < adapter->num_tx_queues; i++)
1500 ixgbevf_clean_tx_ring(adapter, &adapter->tx_ring[i]);
1501}
1502
1503void ixgbevf_down(struct ixgbevf_adapter *adapter)
1504{
1505 struct net_device *netdev = adapter->netdev;
1506 struct ixgbe_hw *hw = &adapter->hw;
1507 u32 txdctl;
1508 int i, j;
1509
1510
1511 set_bit(__IXGBEVF_DOWN, &adapter->state);
1512
1513
1514 netif_tx_disable(netdev);
1515
1516 msleep(10);
1517
1518 netif_tx_stop_all_queues(netdev);
1519
1520 ixgbevf_irq_disable(adapter);
1521
1522 ixgbevf_napi_disable_all(adapter);
1523
1524 del_timer_sync(&adapter->watchdog_timer);
1525
1526
1527
1528 while (adapter->flags & IXGBE_FLAG_IN_WATCHDOG_TASK)
1529 msleep(1);
1530
1531
1532 for (i = 0; i < adapter->num_tx_queues; i++) {
1533 j = adapter->tx_ring[i].reg_idx;
1534 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j));
1535 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j),
1536 (txdctl & ~IXGBE_TXDCTL_ENABLE));
1537 }
1538
1539 netif_carrier_off(netdev);
1540
1541 if (!pci_channel_offline(adapter->pdev))
1542 ixgbevf_reset(adapter);
1543
1544 ixgbevf_clean_all_tx_rings(adapter);
1545 ixgbevf_clean_all_rx_rings(adapter);
1546}
1547
1548void ixgbevf_reinit_locked(struct ixgbevf_adapter *adapter)
1549{
1550 struct ixgbe_hw *hw = &adapter->hw;
1551
1552 WARN_ON(in_interrupt());
1553
1554 while (test_and_set_bit(__IXGBEVF_RESETTING, &adapter->state))
1555 msleep(1);
1556
1557
1558
1559
1560
1561
1562
1563
1564 if (!hw->mac.ops.reset_hw(hw)) {
1565 ixgbevf_down(adapter);
1566 ixgbevf_up(adapter);
1567 }
1568
1569 clear_bit(__IXGBEVF_RESETTING, &adapter->state);
1570}
1571
1572void ixgbevf_reset(struct ixgbevf_adapter *adapter)
1573{
1574 struct ixgbe_hw *hw = &adapter->hw;
1575 struct net_device *netdev = adapter->netdev;
1576
1577 spin_lock(&adapter->mbx_lock);
1578
1579 if (hw->mac.ops.reset_hw(hw))
1580 hw_dbg(hw, "PF still resetting\n");
1581 else
1582 hw->mac.ops.init_hw(hw);
1583
1584 spin_unlock(&adapter->mbx_lock);
1585
1586 if (is_valid_ether_addr(adapter->hw.mac.addr)) {
1587 memcpy(netdev->dev_addr, adapter->hw.mac.addr,
1588 netdev->addr_len);
1589 memcpy(netdev->perm_addr, adapter->hw.mac.addr,
1590 netdev->addr_len);
1591 }
1592}
1593
1594static void ixgbevf_acquire_msix_vectors(struct ixgbevf_adapter *adapter,
1595 int vectors)
1596{
1597 int err, vector_threshold;
1598
1599
1600
1601
1602
1603 vector_threshold = MIN_MSIX_COUNT;
1604
1605
1606
1607
1608
1609
1610 while (vectors >= vector_threshold) {
1611 err = pci_enable_msix(adapter->pdev, adapter->msix_entries,
1612 vectors);
1613 if (!err)
1614 break;
1615 else if (err < 0)
1616 vectors = 0;
1617 else
1618 vectors = err;
1619 }
1620
1621 if (vectors < vector_threshold) {
1622
1623
1624
1625
1626 hw_dbg(&adapter->hw,
1627 "Unable to allocate MSI-X interrupts\n");
1628 kfree(adapter->msix_entries);
1629 adapter->msix_entries = NULL;
1630 } else {
1631
1632
1633
1634
1635
1636 adapter->num_msix_vectors = vectors;
1637 }
1638}
1639
1640
1641
1642
1643
1644
1645
1646
1647
1648
1649
1650
1651static void ixgbevf_set_num_queues(struct ixgbevf_adapter *adapter)
1652{
1653
1654 adapter->num_rx_queues = 1;
1655 adapter->num_tx_queues = 1;
1656}
1657
1658
1659
1660
1661
1662
1663
1664
1665
1666static int ixgbevf_alloc_queues(struct ixgbevf_adapter *adapter)
1667{
1668 int i;
1669
1670 adapter->tx_ring = kcalloc(adapter->num_tx_queues,
1671 sizeof(struct ixgbevf_ring), GFP_KERNEL);
1672 if (!adapter->tx_ring)
1673 goto err_tx_ring_allocation;
1674
1675 adapter->rx_ring = kcalloc(adapter->num_rx_queues,
1676 sizeof(struct ixgbevf_ring), GFP_KERNEL);
1677 if (!adapter->rx_ring)
1678 goto err_rx_ring_allocation;
1679
1680 for (i = 0; i < adapter->num_tx_queues; i++) {
1681 adapter->tx_ring[i].count = adapter->tx_ring_count;
1682 adapter->tx_ring[i].queue_index = i;
1683 adapter->tx_ring[i].reg_idx = i;
1684 adapter->tx_ring[i].dev = &adapter->pdev->dev;
1685 adapter->tx_ring[i].netdev = adapter->netdev;
1686 }
1687
1688 for (i = 0; i < adapter->num_rx_queues; i++) {
1689 adapter->rx_ring[i].count = adapter->rx_ring_count;
1690 adapter->rx_ring[i].queue_index = i;
1691 adapter->rx_ring[i].reg_idx = i;
1692 adapter->rx_ring[i].dev = &adapter->pdev->dev;
1693 adapter->rx_ring[i].netdev = adapter->netdev;
1694 }
1695
1696 return 0;
1697
1698err_rx_ring_allocation:
1699 kfree(adapter->tx_ring);
1700err_tx_ring_allocation:
1701 return -ENOMEM;
1702}
1703
1704
1705
1706
1707
1708
1709
1710
1711static int ixgbevf_set_interrupt_capability(struct ixgbevf_adapter *adapter)
1712{
1713 int err = 0;
1714 int vector, v_budget;
1715
1716
1717
1718
1719
1720
1721
1722
1723 v_budget = max(adapter->num_rx_queues, adapter->num_tx_queues);
1724 v_budget = min_t(int, v_budget, num_online_cpus());
1725 v_budget += NON_Q_VECTORS;
1726
1727
1728
1729 adapter->msix_entries = kcalloc(v_budget,
1730 sizeof(struct msix_entry), GFP_KERNEL);
1731 if (!adapter->msix_entries) {
1732 err = -ENOMEM;
1733 goto out;
1734 }
1735
1736 for (vector = 0; vector < v_budget; vector++)
1737 adapter->msix_entries[vector].entry = vector;
1738
1739 ixgbevf_acquire_msix_vectors(adapter, v_budget);
1740
1741out:
1742 return err;
1743}
1744
1745
1746
1747
1748
1749
1750
1751
1752static int ixgbevf_alloc_q_vectors(struct ixgbevf_adapter *adapter)
1753{
1754 int q_idx, num_q_vectors;
1755 struct ixgbevf_q_vector *q_vector;
1756
1757 num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1758
1759 for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
1760 q_vector = kzalloc(sizeof(struct ixgbevf_q_vector), GFP_KERNEL);
1761 if (!q_vector)
1762 goto err_out;
1763 q_vector->adapter = adapter;
1764 q_vector->v_idx = q_idx;
1765 netif_napi_add(adapter->netdev, &q_vector->napi,
1766 ixgbevf_poll, 64);
1767 adapter->q_vector[q_idx] = q_vector;
1768 }
1769
1770 return 0;
1771
1772err_out:
1773 while (q_idx) {
1774 q_idx--;
1775 q_vector = adapter->q_vector[q_idx];
1776 netif_napi_del(&q_vector->napi);
1777 kfree(q_vector);
1778 adapter->q_vector[q_idx] = NULL;
1779 }
1780 return -ENOMEM;
1781}
1782
1783
1784
1785
1786
1787
1788
1789
1790
1791static void ixgbevf_free_q_vectors(struct ixgbevf_adapter *adapter)
1792{
1793 int q_idx, num_q_vectors;
1794 int napi_vectors;
1795
1796 num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1797 napi_vectors = adapter->num_rx_queues;
1798
1799 for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
1800 struct ixgbevf_q_vector *q_vector = adapter->q_vector[q_idx];
1801
1802 adapter->q_vector[q_idx] = NULL;
1803 if (q_idx < napi_vectors)
1804 netif_napi_del(&q_vector->napi);
1805 kfree(q_vector);
1806 }
1807}
1808
1809
1810
1811
1812
1813
1814static void ixgbevf_reset_interrupt_capability(struct ixgbevf_adapter *adapter)
1815{
1816 pci_disable_msix(adapter->pdev);
1817 kfree(adapter->msix_entries);
1818 adapter->msix_entries = NULL;
1819}
1820
1821
1822
1823
1824
1825
1826static int ixgbevf_init_interrupt_scheme(struct ixgbevf_adapter *adapter)
1827{
1828 int err;
1829
1830
1831 ixgbevf_set_num_queues(adapter);
1832
1833 err = ixgbevf_set_interrupt_capability(adapter);
1834 if (err) {
1835 hw_dbg(&adapter->hw,
1836 "Unable to setup interrupt capabilities\n");
1837 goto err_set_interrupt;
1838 }
1839
1840 err = ixgbevf_alloc_q_vectors(adapter);
1841 if (err) {
1842 hw_dbg(&adapter->hw, "Unable to allocate memory for queue "
1843 "vectors\n");
1844 goto err_alloc_q_vectors;
1845 }
1846
1847 err = ixgbevf_alloc_queues(adapter);
1848 if (err) {
1849 pr_err("Unable to allocate memory for queues\n");
1850 goto err_alloc_queues;
1851 }
1852
1853 hw_dbg(&adapter->hw, "Multiqueue %s: Rx Queue count = %u, "
1854 "Tx Queue count = %u\n",
1855 (adapter->num_rx_queues > 1) ? "Enabled" :
1856 "Disabled", adapter->num_rx_queues, adapter->num_tx_queues);
1857
1858 set_bit(__IXGBEVF_DOWN, &adapter->state);
1859
1860 return 0;
1861err_alloc_queues:
1862 ixgbevf_free_q_vectors(adapter);
1863err_alloc_q_vectors:
1864 ixgbevf_reset_interrupt_capability(adapter);
1865err_set_interrupt:
1866 return err;
1867}
1868
1869
1870
1871
1872
1873
1874
1875
1876
1877
1878static int __devinit ixgbevf_sw_init(struct ixgbevf_adapter *adapter)
1879{
1880 struct ixgbe_hw *hw = &adapter->hw;
1881 struct pci_dev *pdev = adapter->pdev;
1882 int err;
1883
1884
1885
1886 hw->vendor_id = pdev->vendor;
1887 hw->device_id = pdev->device;
1888 hw->revision_id = pdev->revision;
1889 hw->subsystem_vendor_id = pdev->subsystem_vendor;
1890 hw->subsystem_device_id = pdev->subsystem_device;
1891
1892 hw->mbx.ops.init_params(hw);
1893 hw->mac.max_tx_queues = MAX_TX_QUEUES;
1894 hw->mac.max_rx_queues = MAX_RX_QUEUES;
1895 err = hw->mac.ops.reset_hw(hw);
1896 if (err) {
1897 dev_info(&pdev->dev,
1898 "PF still in reset state, assigning new address\n");
1899 eth_hw_addr_random(adapter->netdev);
1900 memcpy(adapter->hw.mac.addr, adapter->netdev->dev_addr,
1901 adapter->netdev->addr_len);
1902 } else {
1903 err = hw->mac.ops.init_hw(hw);
1904 if (err) {
1905 pr_err("init_shared_code failed: %d\n", err);
1906 goto out;
1907 }
1908 memcpy(adapter->netdev->dev_addr, adapter->hw.mac.addr,
1909 adapter->netdev->addr_len);
1910 }
1911
1912
1913 spin_lock_init(&adapter->mbx_lock);
1914
1915
1916 adapter->rx_itr_setting = 1;
1917 adapter->tx_itr_setting = 1;
1918
1919
1920 adapter->tx_ring_count = IXGBEVF_DEFAULT_TXD;
1921 adapter->rx_ring_count = IXGBEVF_DEFAULT_RXD;
1922
1923 set_bit(__IXGBEVF_DOWN, &adapter->state);
1924 return 0;
1925
1926out:
1927 return err;
1928}
1929
1930#define UPDATE_VF_COUNTER_32bit(reg, last_counter, counter) \
1931 { \
1932 u32 current_counter = IXGBE_READ_REG(hw, reg); \
1933 if (current_counter < last_counter) \
1934 counter += 0x100000000LL; \
1935 last_counter = current_counter; \
1936 counter &= 0xFFFFFFFF00000000LL; \
1937 counter |= current_counter; \
1938 }
1939
1940#define UPDATE_VF_COUNTER_36bit(reg_lsb, reg_msb, last_counter, counter) \
1941 { \
1942 u64 current_counter_lsb = IXGBE_READ_REG(hw, reg_lsb); \
1943 u64 current_counter_msb = IXGBE_READ_REG(hw, reg_msb); \
1944 u64 current_counter = (current_counter_msb << 32) | \
1945 current_counter_lsb; \
1946 if (current_counter < last_counter) \
1947 counter += 0x1000000000LL; \
1948 last_counter = current_counter; \
1949 counter &= 0xFFFFFFF000000000LL; \
1950 counter |= current_counter; \
1951 }
1952
1953
1954
1955
1956void ixgbevf_update_stats(struct ixgbevf_adapter *adapter)
1957{
1958 struct ixgbe_hw *hw = &adapter->hw;
1959
1960 UPDATE_VF_COUNTER_32bit(IXGBE_VFGPRC, adapter->stats.last_vfgprc,
1961 adapter->stats.vfgprc);
1962 UPDATE_VF_COUNTER_32bit(IXGBE_VFGPTC, adapter->stats.last_vfgptc,
1963 adapter->stats.vfgptc);
1964 UPDATE_VF_COUNTER_36bit(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB,
1965 adapter->stats.last_vfgorc,
1966 adapter->stats.vfgorc);
1967 UPDATE_VF_COUNTER_36bit(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB,
1968 adapter->stats.last_vfgotc,
1969 adapter->stats.vfgotc);
1970 UPDATE_VF_COUNTER_32bit(IXGBE_VFMPRC, adapter->stats.last_vfmprc,
1971 adapter->stats.vfmprc);
1972}
1973
1974
1975
1976
1977
1978static void ixgbevf_watchdog(unsigned long data)
1979{
1980 struct ixgbevf_adapter *adapter = (struct ixgbevf_adapter *)data;
1981 struct ixgbe_hw *hw = &adapter->hw;
1982 u32 eics = 0;
1983 int i;
1984
1985
1986
1987
1988
1989
1990 if (test_bit(__IXGBEVF_DOWN, &adapter->state))
1991 goto watchdog_short_circuit;
1992
1993
1994 for (i = 0; i < adapter->num_msix_vectors - NON_Q_VECTORS; i++) {
1995 struct ixgbevf_q_vector *qv = adapter->q_vector[i];
1996 if (qv->rx.ring || qv->tx.ring)
1997 eics |= 1 << i;
1998 }
1999
2000 IXGBE_WRITE_REG(hw, IXGBE_VTEICS, eics);
2001
2002watchdog_short_circuit:
2003 schedule_work(&adapter->watchdog_task);
2004}
2005
2006
2007
2008
2009
2010static void ixgbevf_tx_timeout(struct net_device *netdev)
2011{
2012 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
2013
2014
2015 schedule_work(&adapter->reset_task);
2016}
2017
2018static void ixgbevf_reset_task(struct work_struct *work)
2019{
2020 struct ixgbevf_adapter *adapter;
2021 adapter = container_of(work, struct ixgbevf_adapter, reset_task);
2022
2023
2024 if (test_bit(__IXGBEVF_DOWN, &adapter->state) ||
2025 test_bit(__IXGBEVF_RESETTING, &adapter->state))
2026 return;
2027
2028 adapter->tx_timeout_count++;
2029
2030 ixgbevf_reinit_locked(adapter);
2031}
2032
2033
2034
2035
2036
2037static void ixgbevf_watchdog_task(struct work_struct *work)
2038{
2039 struct ixgbevf_adapter *adapter = container_of(work,
2040 struct ixgbevf_adapter,
2041 watchdog_task);
2042 struct net_device *netdev = adapter->netdev;
2043 struct ixgbe_hw *hw = &adapter->hw;
2044 u32 link_speed = adapter->link_speed;
2045 bool link_up = adapter->link_up;
2046
2047 adapter->flags |= IXGBE_FLAG_IN_WATCHDOG_TASK;
2048
2049
2050
2051
2052
2053 if (hw->mac.ops.check_link) {
2054 s32 need_reset;
2055
2056 spin_lock(&adapter->mbx_lock);
2057
2058 need_reset = hw->mac.ops.check_link(hw, &link_speed,
2059 &link_up, false);
2060
2061 spin_unlock(&adapter->mbx_lock);
2062
2063 if (need_reset) {
2064 adapter->link_up = link_up;
2065 adapter->link_speed = link_speed;
2066 netif_carrier_off(netdev);
2067 netif_tx_stop_all_queues(netdev);
2068 schedule_work(&adapter->reset_task);
2069 goto pf_has_reset;
2070 }
2071 } else {
2072
2073
2074 link_speed = IXGBE_LINK_SPEED_10GB_FULL;
2075 link_up = true;
2076 }
2077 adapter->link_up = link_up;
2078 adapter->link_speed = link_speed;
2079
2080 if (link_up) {
2081 if (!netif_carrier_ok(netdev)) {
2082 hw_dbg(&adapter->hw, "NIC Link is Up, %u Gbps\n",
2083 (link_speed == IXGBE_LINK_SPEED_10GB_FULL) ?
2084 10 : 1);
2085 netif_carrier_on(netdev);
2086 netif_tx_wake_all_queues(netdev);
2087 }
2088 } else {
2089 adapter->link_up = false;
2090 adapter->link_speed = 0;
2091 if (netif_carrier_ok(netdev)) {
2092 hw_dbg(&adapter->hw, "NIC Link is Down\n");
2093 netif_carrier_off(netdev);
2094 netif_tx_stop_all_queues(netdev);
2095 }
2096 }
2097
2098 ixgbevf_update_stats(adapter);
2099
2100pf_has_reset:
2101
2102 if (!test_bit(__IXGBEVF_DOWN, &adapter->state))
2103 mod_timer(&adapter->watchdog_timer,
2104 round_jiffies(jiffies + (2 * HZ)));
2105
2106 adapter->flags &= ~IXGBE_FLAG_IN_WATCHDOG_TASK;
2107}
2108
2109
2110
2111
2112
2113
2114
2115
2116void ixgbevf_free_tx_resources(struct ixgbevf_adapter *adapter,
2117 struct ixgbevf_ring *tx_ring)
2118{
2119 struct pci_dev *pdev = adapter->pdev;
2120
2121 ixgbevf_clean_tx_ring(adapter, tx_ring);
2122
2123 vfree(tx_ring->tx_buffer_info);
2124 tx_ring->tx_buffer_info = NULL;
2125
2126 dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc,
2127 tx_ring->dma);
2128
2129 tx_ring->desc = NULL;
2130}
2131
2132
2133
2134
2135
2136
2137
2138static void ixgbevf_free_all_tx_resources(struct ixgbevf_adapter *adapter)
2139{
2140 int i;
2141
2142 for (i = 0; i < adapter->num_tx_queues; i++)
2143 if (adapter->tx_ring[i].desc)
2144 ixgbevf_free_tx_resources(adapter,
2145 &adapter->tx_ring[i]);
2146
2147}
2148
2149
2150
2151
2152
2153
2154
2155
2156int ixgbevf_setup_tx_resources(struct ixgbevf_adapter *adapter,
2157 struct ixgbevf_ring *tx_ring)
2158{
2159 struct pci_dev *pdev = adapter->pdev;
2160 int size;
2161
2162 size = sizeof(struct ixgbevf_tx_buffer) * tx_ring->count;
2163 tx_ring->tx_buffer_info = vzalloc(size);
2164 if (!tx_ring->tx_buffer_info)
2165 goto err;
2166
2167
2168 tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc);
2169 tx_ring->size = ALIGN(tx_ring->size, 4096);
2170
2171 tx_ring->desc = dma_alloc_coherent(&pdev->dev, tx_ring->size,
2172 &tx_ring->dma, GFP_KERNEL);
2173 if (!tx_ring->desc)
2174 goto err;
2175
2176 tx_ring->next_to_use = 0;
2177 tx_ring->next_to_clean = 0;
2178 return 0;
2179
2180err:
2181 vfree(tx_ring->tx_buffer_info);
2182 tx_ring->tx_buffer_info = NULL;
2183 hw_dbg(&adapter->hw, "Unable to allocate memory for the transmit "
2184 "descriptor ring\n");
2185 return -ENOMEM;
2186}
2187
2188
2189
2190
2191
2192
2193
2194
2195
2196
2197
2198static int ixgbevf_setup_all_tx_resources(struct ixgbevf_adapter *adapter)
2199{
2200 int i, err = 0;
2201
2202 for (i = 0; i < adapter->num_tx_queues; i++) {
2203 err = ixgbevf_setup_tx_resources(adapter, &adapter->tx_ring[i]);
2204 if (!err)
2205 continue;
2206 hw_dbg(&adapter->hw,
2207 "Allocation for Tx Queue %u failed\n", i);
2208 break;
2209 }
2210
2211 return err;
2212}
2213
2214
2215
2216
2217
2218
2219
2220
2221int ixgbevf_setup_rx_resources(struct ixgbevf_adapter *adapter,
2222 struct ixgbevf_ring *rx_ring)
2223{
2224 struct pci_dev *pdev = adapter->pdev;
2225 int size;
2226
2227 size = sizeof(struct ixgbevf_rx_buffer) * rx_ring->count;
2228 rx_ring->rx_buffer_info = vzalloc(size);
2229 if (!rx_ring->rx_buffer_info)
2230 goto alloc_failed;
2231
2232
2233 rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc);
2234 rx_ring->size = ALIGN(rx_ring->size, 4096);
2235
2236 rx_ring->desc = dma_alloc_coherent(&pdev->dev, rx_ring->size,
2237 &rx_ring->dma, GFP_KERNEL);
2238
2239 if (!rx_ring->desc) {
2240 hw_dbg(&adapter->hw,
2241 "Unable to allocate memory for "
2242 "the receive descriptor ring\n");
2243 vfree(rx_ring->rx_buffer_info);
2244 rx_ring->rx_buffer_info = NULL;
2245 goto alloc_failed;
2246 }
2247
2248 rx_ring->next_to_clean = 0;
2249 rx_ring->next_to_use = 0;
2250
2251 return 0;
2252alloc_failed:
2253 return -ENOMEM;
2254}
2255
2256
2257
2258
2259
2260
2261
2262
2263
2264
2265
2266static int ixgbevf_setup_all_rx_resources(struct ixgbevf_adapter *adapter)
2267{
2268 int i, err = 0;
2269
2270 for (i = 0; i < adapter->num_rx_queues; i++) {
2271 err = ixgbevf_setup_rx_resources(adapter, &adapter->rx_ring[i]);
2272 if (!err)
2273 continue;
2274 hw_dbg(&adapter->hw,
2275 "Allocation for Rx Queue %u failed\n", i);
2276 break;
2277 }
2278 return err;
2279}
2280
2281
2282
2283
2284
2285
2286
2287
2288void ixgbevf_free_rx_resources(struct ixgbevf_adapter *adapter,
2289 struct ixgbevf_ring *rx_ring)
2290{
2291 struct pci_dev *pdev = adapter->pdev;
2292
2293 ixgbevf_clean_rx_ring(adapter, rx_ring);
2294
2295 vfree(rx_ring->rx_buffer_info);
2296 rx_ring->rx_buffer_info = NULL;
2297
2298 dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc,
2299 rx_ring->dma);
2300
2301 rx_ring->desc = NULL;
2302}
2303
2304
2305
2306
2307
2308
2309
2310static void ixgbevf_free_all_rx_resources(struct ixgbevf_adapter *adapter)
2311{
2312 int i;
2313
2314 for (i = 0; i < adapter->num_rx_queues; i++)
2315 if (adapter->rx_ring[i].desc)
2316 ixgbevf_free_rx_resources(adapter,
2317 &adapter->rx_ring[i]);
2318}
2319
2320
2321
2322
2323
2324
2325
2326
2327
2328
2329
2330
2331
2332static int ixgbevf_open(struct net_device *netdev)
2333{
2334 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
2335 struct ixgbe_hw *hw = &adapter->hw;
2336 int err;
2337
2338
2339 if (test_bit(__IXGBEVF_TESTING, &adapter->state))
2340 return -EBUSY;
2341
2342 if (hw->adapter_stopped) {
2343 ixgbevf_reset(adapter);
2344
2345
2346 if (hw->adapter_stopped) {
2347 err = IXGBE_ERR_MBX;
2348 pr_err("Unable to start - perhaps the PF Driver isn't "
2349 "up yet\n");
2350 goto err_setup_reset;
2351 }
2352 }
2353
2354
2355 err = ixgbevf_setup_all_tx_resources(adapter);
2356 if (err)
2357 goto err_setup_tx;
2358
2359
2360 err = ixgbevf_setup_all_rx_resources(adapter);
2361 if (err)
2362 goto err_setup_rx;
2363
2364 ixgbevf_configure(adapter);
2365
2366
2367
2368
2369
2370
2371 ixgbevf_map_rings_to_vectors(adapter);
2372
2373 ixgbevf_up_complete(adapter);
2374
2375
2376 IXGBE_READ_REG(hw, IXGBE_VTEICR);
2377 err = ixgbevf_request_irq(adapter);
2378 if (err)
2379 goto err_req_irq;
2380
2381 ixgbevf_irq_enable(adapter);
2382
2383 return 0;
2384
2385err_req_irq:
2386 ixgbevf_down(adapter);
2387 ixgbevf_free_irq(adapter);
2388err_setup_rx:
2389 ixgbevf_free_all_rx_resources(adapter);
2390err_setup_tx:
2391 ixgbevf_free_all_tx_resources(adapter);
2392 ixgbevf_reset(adapter);
2393
2394err_setup_reset:
2395
2396 return err;
2397}
2398
2399
2400
2401
2402
2403
2404
2405
2406
2407
2408
2409
2410static int ixgbevf_close(struct net_device *netdev)
2411{
2412 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
2413
2414 ixgbevf_down(adapter);
2415 ixgbevf_free_irq(adapter);
2416
2417 ixgbevf_free_all_tx_resources(adapter);
2418 ixgbevf_free_all_rx_resources(adapter);
2419
2420 return 0;
2421}
2422
2423static void ixgbevf_tx_ctxtdesc(struct ixgbevf_ring *tx_ring,
2424 u32 vlan_macip_lens, u32 type_tucmd,
2425 u32 mss_l4len_idx)
2426{
2427 struct ixgbe_adv_tx_context_desc *context_desc;
2428 u16 i = tx_ring->next_to_use;
2429
2430 context_desc = IXGBEVF_TX_CTXTDESC(tx_ring, i);
2431
2432 i++;
2433 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
2434
2435
2436 type_tucmd |= IXGBE_TXD_CMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
2437
2438 context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
2439 context_desc->seqnum_seed = 0;
2440 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd);
2441 context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
2442}
2443
2444static int ixgbevf_tso(struct ixgbevf_ring *tx_ring,
2445 struct sk_buff *skb, u32 tx_flags, u8 *hdr_len)
2446{
2447 u32 vlan_macip_lens, type_tucmd;
2448 u32 mss_l4len_idx, l4len;
2449
2450 if (!skb_is_gso(skb))
2451 return 0;
2452
2453 if (skb_header_cloned(skb)) {
2454 int err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2455 if (err)
2456 return err;
2457 }
2458
2459
2460 type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP;
2461
2462 if (skb->protocol == htons(ETH_P_IP)) {
2463 struct iphdr *iph = ip_hdr(skb);
2464 iph->tot_len = 0;
2465 iph->check = 0;
2466 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
2467 iph->daddr, 0,
2468 IPPROTO_TCP,
2469 0);
2470 type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4;
2471 } else if (skb_is_gso_v6(skb)) {
2472 ipv6_hdr(skb)->payload_len = 0;
2473 tcp_hdr(skb)->check =
2474 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
2475 &ipv6_hdr(skb)->daddr,
2476 0, IPPROTO_TCP, 0);
2477 }
2478
2479
2480 l4len = tcp_hdrlen(skb);
2481 *hdr_len += l4len;
2482 *hdr_len = skb_transport_offset(skb) + l4len;
2483
2484
2485 mss_l4len_idx = l4len << IXGBE_ADVTXD_L4LEN_SHIFT;
2486 mss_l4len_idx |= skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT;
2487 mss_l4len_idx |= 1 << IXGBE_ADVTXD_IDX_SHIFT;
2488
2489
2490 vlan_macip_lens = skb_network_header_len(skb);
2491 vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT;
2492 vlan_macip_lens |= tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
2493
2494 ixgbevf_tx_ctxtdesc(tx_ring, vlan_macip_lens,
2495 type_tucmd, mss_l4len_idx);
2496
2497 return 1;
2498}
2499
2500static bool ixgbevf_tx_csum(struct ixgbevf_ring *tx_ring,
2501 struct sk_buff *skb, u32 tx_flags)
2502{
2503
2504
2505
2506 u32 vlan_macip_lens = 0;
2507 u32 mss_l4len_idx = 0;
2508 u32 type_tucmd = 0;
2509
2510 if (skb->ip_summed == CHECKSUM_PARTIAL) {
2511 u8 l4_hdr = 0;
2512 switch (skb->protocol) {
2513 case __constant_htons(ETH_P_IP):
2514 vlan_macip_lens |= skb_network_header_len(skb);
2515 type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4;
2516 l4_hdr = ip_hdr(skb)->protocol;
2517 break;
2518 case __constant_htons(ETH_P_IPV6):
2519 vlan_macip_lens |= skb_network_header_len(skb);
2520 l4_hdr = ipv6_hdr(skb)->nexthdr;
2521 break;
2522 default:
2523 if (unlikely(net_ratelimit())) {
2524 dev_warn(tx_ring->dev,
2525 "partial checksum but proto=%x!\n",
2526 skb->protocol);
2527 }
2528 break;
2529 }
2530
2531 switch (l4_hdr) {
2532 case IPPROTO_TCP:
2533 type_tucmd |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
2534 mss_l4len_idx = tcp_hdrlen(skb) <<
2535 IXGBE_ADVTXD_L4LEN_SHIFT;
2536 break;
2537 case IPPROTO_SCTP:
2538 type_tucmd |= IXGBE_ADVTXD_TUCMD_L4T_SCTP;
2539 mss_l4len_idx = sizeof(struct sctphdr) <<
2540 IXGBE_ADVTXD_L4LEN_SHIFT;
2541 break;
2542 case IPPROTO_UDP:
2543 mss_l4len_idx = sizeof(struct udphdr) <<
2544 IXGBE_ADVTXD_L4LEN_SHIFT;
2545 break;
2546 default:
2547 if (unlikely(net_ratelimit())) {
2548 dev_warn(tx_ring->dev,
2549 "partial checksum but l4 proto=%x!\n",
2550 l4_hdr);
2551 }
2552 break;
2553 }
2554 }
2555
2556
2557 vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT;
2558 vlan_macip_lens |= tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
2559
2560 ixgbevf_tx_ctxtdesc(tx_ring, vlan_macip_lens,
2561 type_tucmd, mss_l4len_idx);
2562
2563 return (skb->ip_summed == CHECKSUM_PARTIAL);
2564}
2565
2566static int ixgbevf_tx_map(struct ixgbevf_ring *tx_ring,
2567 struct sk_buff *skb, u32 tx_flags,
2568 unsigned int first)
2569{
2570 struct ixgbevf_tx_buffer *tx_buffer_info;
2571 unsigned int len;
2572 unsigned int total = skb->len;
2573 unsigned int offset = 0, size;
2574 int count = 0;
2575 unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
2576 unsigned int f;
2577 int i;
2578
2579 i = tx_ring->next_to_use;
2580
2581 len = min(skb_headlen(skb), total);
2582 while (len) {
2583 tx_buffer_info = &tx_ring->tx_buffer_info[i];
2584 size = min(len, (unsigned int)IXGBE_MAX_DATA_PER_TXD);
2585
2586 tx_buffer_info->length = size;
2587 tx_buffer_info->mapped_as_page = false;
2588 tx_buffer_info->dma = dma_map_single(tx_ring->dev,
2589 skb->data + offset,
2590 size, DMA_TO_DEVICE);
2591 if (dma_mapping_error(tx_ring->dev, tx_buffer_info->dma))
2592 goto dma_error;
2593 tx_buffer_info->next_to_watch = i;
2594
2595 len -= size;
2596 total -= size;
2597 offset += size;
2598 count++;
2599 i++;
2600 if (i == tx_ring->count)
2601 i = 0;
2602 }
2603
2604 for (f = 0; f < nr_frags; f++) {
2605 const struct skb_frag_struct *frag;
2606
2607 frag = &skb_shinfo(skb)->frags[f];
2608 len = min((unsigned int)skb_frag_size(frag), total);
2609 offset = 0;
2610
2611 while (len) {
2612 tx_buffer_info = &tx_ring->tx_buffer_info[i];
2613 size = min(len, (unsigned int)IXGBE_MAX_DATA_PER_TXD);
2614
2615 tx_buffer_info->length = size;
2616 tx_buffer_info->dma =
2617 skb_frag_dma_map(tx_ring->dev, frag,
2618 offset, size, DMA_TO_DEVICE);
2619 tx_buffer_info->mapped_as_page = true;
2620 if (dma_mapping_error(tx_ring->dev,
2621 tx_buffer_info->dma))
2622 goto dma_error;
2623 tx_buffer_info->next_to_watch = i;
2624
2625 len -= size;
2626 total -= size;
2627 offset += size;
2628 count++;
2629 i++;
2630 if (i == tx_ring->count)
2631 i = 0;
2632 }
2633 if (total == 0)
2634 break;
2635 }
2636
2637 if (i == 0)
2638 i = tx_ring->count - 1;
2639 else
2640 i = i - 1;
2641 tx_ring->tx_buffer_info[i].skb = skb;
2642 tx_ring->tx_buffer_info[first].next_to_watch = i;
2643 tx_ring->tx_buffer_info[first].time_stamp = jiffies;
2644
2645 return count;
2646
2647dma_error:
2648 dev_err(tx_ring->dev, "TX DMA map failed\n");
2649
2650
2651 tx_buffer_info->dma = 0;
2652 tx_buffer_info->next_to_watch = 0;
2653 count--;
2654
2655
2656 while (count >= 0) {
2657 count--;
2658 i--;
2659 if (i < 0)
2660 i += tx_ring->count;
2661 tx_buffer_info = &tx_ring->tx_buffer_info[i];
2662 ixgbevf_unmap_and_free_tx_resource(tx_ring, tx_buffer_info);
2663 }
2664
2665 return count;
2666}
2667
2668static void ixgbevf_tx_queue(struct ixgbevf_ring *tx_ring, int tx_flags,
2669 int count, u32 paylen, u8 hdr_len)
2670{
2671 union ixgbe_adv_tx_desc *tx_desc = NULL;
2672 struct ixgbevf_tx_buffer *tx_buffer_info;
2673 u32 olinfo_status = 0, cmd_type_len = 0;
2674 unsigned int i;
2675
2676 u32 txd_cmd = IXGBE_TXD_CMD_EOP | IXGBE_TXD_CMD_RS | IXGBE_TXD_CMD_IFCS;
2677
2678 cmd_type_len |= IXGBE_ADVTXD_DTYP_DATA;
2679
2680 cmd_type_len |= IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT;
2681
2682 if (tx_flags & IXGBE_TX_FLAGS_VLAN)
2683 cmd_type_len |= IXGBE_ADVTXD_DCMD_VLE;
2684
2685 if (tx_flags & IXGBE_TX_FLAGS_CSUM)
2686 olinfo_status |= IXGBE_ADVTXD_POPTS_TXSM;
2687
2688 if (tx_flags & IXGBE_TX_FLAGS_TSO) {
2689 cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE;
2690
2691
2692 olinfo_status |= (1 << IXGBE_ADVTXD_IDX_SHIFT);
2693 if (tx_flags & IXGBE_TX_FLAGS_IPV4)
2694 olinfo_status |= IXGBE_ADVTXD_POPTS_IXSM;
2695
2696 }
2697
2698
2699
2700
2701
2702 olinfo_status |= IXGBE_ADVTXD_CC;
2703
2704 olinfo_status |= ((paylen - hdr_len) << IXGBE_ADVTXD_PAYLEN_SHIFT);
2705
2706 i = tx_ring->next_to_use;
2707 while (count--) {
2708 tx_buffer_info = &tx_ring->tx_buffer_info[i];
2709 tx_desc = IXGBEVF_TX_DESC(tx_ring, i);
2710 tx_desc->read.buffer_addr = cpu_to_le64(tx_buffer_info->dma);
2711 tx_desc->read.cmd_type_len =
2712 cpu_to_le32(cmd_type_len | tx_buffer_info->length);
2713 tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
2714 i++;
2715 if (i == tx_ring->count)
2716 i = 0;
2717 }
2718
2719 tx_desc->read.cmd_type_len |= cpu_to_le32(txd_cmd);
2720
2721 tx_ring->next_to_use = i;
2722}
2723
2724static int __ixgbevf_maybe_stop_tx(struct ixgbevf_ring *tx_ring, int size)
2725{
2726 struct ixgbevf_adapter *adapter = netdev_priv(tx_ring->netdev);
2727
2728 netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
2729
2730
2731
2732 smp_mb();
2733
2734
2735
2736 if (likely(IXGBE_DESC_UNUSED(tx_ring) < size))
2737 return -EBUSY;
2738
2739
2740 netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index);
2741 ++adapter->restart_queue;
2742 return 0;
2743}
2744
2745static int ixgbevf_maybe_stop_tx(struct ixgbevf_ring *tx_ring, int size)
2746{
2747 if (likely(IXGBE_DESC_UNUSED(tx_ring) >= size))
2748 return 0;
2749 return __ixgbevf_maybe_stop_tx(tx_ring, size);
2750}
2751
2752static int ixgbevf_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
2753{
2754 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
2755 struct ixgbevf_ring *tx_ring;
2756 unsigned int first;
2757 unsigned int tx_flags = 0;
2758 u8 hdr_len = 0;
2759 int r_idx = 0, tso;
2760 u16 count = TXD_USE_COUNT(skb_headlen(skb));
2761#if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD
2762 unsigned short f;
2763#endif
2764
2765 tx_ring = &adapter->tx_ring[r_idx];
2766
2767
2768
2769
2770
2771
2772
2773
2774#if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD
2775 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
2776 count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
2777#else
2778 count += skb_shinfo(skb)->nr_frags;
2779#endif
2780 if (ixgbevf_maybe_stop_tx(tx_ring, count + 3)) {
2781 adapter->tx_busy++;
2782 return NETDEV_TX_BUSY;
2783 }
2784
2785 if (vlan_tx_tag_present(skb)) {
2786 tx_flags |= vlan_tx_tag_get(skb);
2787 tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT;
2788 tx_flags |= IXGBE_TX_FLAGS_VLAN;
2789 }
2790
2791 first = tx_ring->next_to_use;
2792
2793 if (skb->protocol == htons(ETH_P_IP))
2794 tx_flags |= IXGBE_TX_FLAGS_IPV4;
2795 tso = ixgbevf_tso(tx_ring, skb, tx_flags, &hdr_len);
2796 if (tso < 0) {
2797 dev_kfree_skb_any(skb);
2798 return NETDEV_TX_OK;
2799 }
2800
2801 if (tso)
2802 tx_flags |= IXGBE_TX_FLAGS_TSO | IXGBE_TX_FLAGS_CSUM;
2803 else if (ixgbevf_tx_csum(tx_ring, skb, tx_flags))
2804 tx_flags |= IXGBE_TX_FLAGS_CSUM;
2805
2806 ixgbevf_tx_queue(tx_ring, tx_flags,
2807 ixgbevf_tx_map(tx_ring, skb, tx_flags, first),
2808 skb->len, hdr_len);
2809
2810
2811
2812
2813
2814
2815 wmb();
2816
2817 writel(tx_ring->next_to_use, adapter->hw.hw_addr + tx_ring->tail);
2818
2819 ixgbevf_maybe_stop_tx(tx_ring, DESC_NEEDED);
2820
2821 return NETDEV_TX_OK;
2822}
2823
2824
2825
2826
2827
2828
2829
2830
2831static int ixgbevf_set_mac(struct net_device *netdev, void *p)
2832{
2833 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
2834 struct ixgbe_hw *hw = &adapter->hw;
2835 struct sockaddr *addr = p;
2836
2837 if (!is_valid_ether_addr(addr->sa_data))
2838 return -EADDRNOTAVAIL;
2839
2840 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
2841 memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
2842
2843 spin_lock(&adapter->mbx_lock);
2844
2845 if (hw->mac.ops.set_rar)
2846 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0);
2847
2848 spin_unlock(&adapter->mbx_lock);
2849
2850 return 0;
2851}
2852
2853
2854
2855
2856
2857
2858
2859
2860static int ixgbevf_change_mtu(struct net_device *netdev, int new_mtu)
2861{
2862 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
2863 struct ixgbe_hw *hw = &adapter->hw;
2864 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
2865 int max_possible_frame = MAXIMUM_ETHERNET_VLAN_SIZE;
2866 u32 msg[2];
2867
2868 if (adapter->hw.mac.type == ixgbe_mac_X540_vf)
2869 max_possible_frame = IXGBE_MAX_JUMBO_FRAME_SIZE;
2870
2871
2872 if ((new_mtu < 68) || (max_frame > max_possible_frame))
2873 return -EINVAL;
2874
2875 hw_dbg(&adapter->hw, "changing MTU from %d to %d\n",
2876 netdev->mtu, new_mtu);
2877
2878 netdev->mtu = new_mtu;
2879
2880 if (!netif_running(netdev)) {
2881 msg[0] = IXGBE_VF_SET_LPE;
2882 msg[1] = max_frame;
2883 hw->mbx.ops.write_posted(hw, msg, 2);
2884 }
2885
2886 if (netif_running(netdev))
2887 ixgbevf_reinit_locked(adapter);
2888
2889 return 0;
2890}
2891
2892static void ixgbevf_shutdown(struct pci_dev *pdev)
2893{
2894 struct net_device *netdev = pci_get_drvdata(pdev);
2895 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
2896
2897 netif_device_detach(netdev);
2898
2899 if (netif_running(netdev)) {
2900 ixgbevf_down(adapter);
2901 ixgbevf_free_irq(adapter);
2902 ixgbevf_free_all_tx_resources(adapter);
2903 ixgbevf_free_all_rx_resources(adapter);
2904 }
2905
2906 pci_save_state(pdev);
2907
2908 pci_disable_device(pdev);
2909}
2910
2911static struct rtnl_link_stats64 *ixgbevf_get_stats(struct net_device *netdev,
2912 struct rtnl_link_stats64 *stats)
2913{
2914 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
2915 unsigned int start;
2916 u64 bytes, packets;
2917 const struct ixgbevf_ring *ring;
2918 int i;
2919
2920 ixgbevf_update_stats(adapter);
2921
2922 stats->multicast = adapter->stats.vfmprc - adapter->stats.base_vfmprc;
2923
2924 for (i = 0; i < adapter->num_rx_queues; i++) {
2925 ring = &adapter->rx_ring[i];
2926 do {
2927 start = u64_stats_fetch_begin_bh(&ring->syncp);
2928 bytes = ring->total_bytes;
2929 packets = ring->total_packets;
2930 } while (u64_stats_fetch_retry_bh(&ring->syncp, start));
2931 stats->rx_bytes += bytes;
2932 stats->rx_packets += packets;
2933 }
2934
2935 for (i = 0; i < adapter->num_tx_queues; i++) {
2936 ring = &adapter->tx_ring[i];
2937 do {
2938 start = u64_stats_fetch_begin_bh(&ring->syncp);
2939 bytes = ring->total_bytes;
2940 packets = ring->total_packets;
2941 } while (u64_stats_fetch_retry_bh(&ring->syncp, start));
2942 stats->tx_bytes += bytes;
2943 stats->tx_packets += packets;
2944 }
2945
2946 return stats;
2947}
2948
2949static const struct net_device_ops ixgbe_netdev_ops = {
2950 .ndo_open = ixgbevf_open,
2951 .ndo_stop = ixgbevf_close,
2952 .ndo_start_xmit = ixgbevf_xmit_frame,
2953 .ndo_set_rx_mode = ixgbevf_set_rx_mode,
2954 .ndo_get_stats64 = ixgbevf_get_stats,
2955 .ndo_validate_addr = eth_validate_addr,
2956 .ndo_set_mac_address = ixgbevf_set_mac,
2957 .ndo_change_mtu = ixgbevf_change_mtu,
2958 .ndo_tx_timeout = ixgbevf_tx_timeout,
2959 .ndo_vlan_rx_add_vid = ixgbevf_vlan_rx_add_vid,
2960 .ndo_vlan_rx_kill_vid = ixgbevf_vlan_rx_kill_vid,
2961};
2962
2963static void ixgbevf_assign_netdev_ops(struct net_device *dev)
2964{
2965 dev->netdev_ops = &ixgbe_netdev_ops;
2966 ixgbevf_set_ethtool_ops(dev);
2967 dev->watchdog_timeo = 5 * HZ;
2968}
2969
2970
2971
2972
2973
2974
2975
2976
2977
2978
2979
2980
2981static int __devinit ixgbevf_probe(struct pci_dev *pdev,
2982 const struct pci_device_id *ent)
2983{
2984 struct net_device *netdev;
2985 struct ixgbevf_adapter *adapter = NULL;
2986 struct ixgbe_hw *hw = NULL;
2987 const struct ixgbevf_info *ii = ixgbevf_info_tbl[ent->driver_data];
2988 static int cards_found;
2989 int err, pci_using_dac;
2990
2991 err = pci_enable_device(pdev);
2992 if (err)
2993 return err;
2994
2995 if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) &&
2996 !dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) {
2997 pci_using_dac = 1;
2998 } else {
2999 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
3000 if (err) {
3001 err = dma_set_coherent_mask(&pdev->dev,
3002 DMA_BIT_MASK(32));
3003 if (err) {
3004 dev_err(&pdev->dev, "No usable DMA "
3005 "configuration, aborting\n");
3006 goto err_dma;
3007 }
3008 }
3009 pci_using_dac = 0;
3010 }
3011
3012 err = pci_request_regions(pdev, ixgbevf_driver_name);
3013 if (err) {
3014 dev_err(&pdev->dev, "pci_request_regions failed 0x%x\n", err);
3015 goto err_pci_reg;
3016 }
3017
3018 pci_set_master(pdev);
3019
3020 netdev = alloc_etherdev_mq(sizeof(struct ixgbevf_adapter),
3021 MAX_TX_QUEUES);
3022 if (!netdev) {
3023 err = -ENOMEM;
3024 goto err_alloc_etherdev;
3025 }
3026
3027 SET_NETDEV_DEV(netdev, &pdev->dev);
3028
3029 pci_set_drvdata(pdev, netdev);
3030 adapter = netdev_priv(netdev);
3031
3032 adapter->netdev = netdev;
3033 adapter->pdev = pdev;
3034 hw = &adapter->hw;
3035 hw->back = adapter;
3036 adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
3037
3038
3039
3040
3041
3042 pci_save_state(pdev);
3043
3044 hw->hw_addr = ioremap(pci_resource_start(pdev, 0),
3045 pci_resource_len(pdev, 0));
3046 if (!hw->hw_addr) {
3047 err = -EIO;
3048 goto err_ioremap;
3049 }
3050
3051 ixgbevf_assign_netdev_ops(netdev);
3052
3053 adapter->bd_number = cards_found;
3054
3055
3056 memcpy(&hw->mac.ops, ii->mac_ops, sizeof(hw->mac.ops));
3057 hw->mac.type = ii->mac;
3058
3059 memcpy(&hw->mbx.ops, &ixgbevf_mbx_ops,
3060 sizeof(struct ixgbe_mbx_operations));
3061
3062
3063 err = ixgbevf_sw_init(adapter);
3064 if (err)
3065 goto err_sw_init;
3066
3067
3068 memcpy(netdev->perm_addr, adapter->hw.mac.addr, netdev->addr_len);
3069
3070 if (!is_valid_ether_addr(netdev->dev_addr)) {
3071 pr_err("invalid MAC address\n");
3072 err = -EIO;
3073 goto err_sw_init;
3074 }
3075
3076 netdev->hw_features = NETIF_F_SG |
3077 NETIF_F_IP_CSUM |
3078 NETIF_F_IPV6_CSUM |
3079 NETIF_F_TSO |
3080 NETIF_F_TSO6 |
3081 NETIF_F_RXCSUM;
3082
3083 netdev->features = netdev->hw_features |
3084 NETIF_F_HW_VLAN_TX |
3085 NETIF_F_HW_VLAN_RX |
3086 NETIF_F_HW_VLAN_FILTER;
3087
3088 netdev->vlan_features |= NETIF_F_TSO;
3089 netdev->vlan_features |= NETIF_F_TSO6;
3090 netdev->vlan_features |= NETIF_F_IP_CSUM;
3091 netdev->vlan_features |= NETIF_F_IPV6_CSUM;
3092 netdev->vlan_features |= NETIF_F_SG;
3093
3094 if (pci_using_dac)
3095 netdev->features |= NETIF_F_HIGHDMA;
3096
3097 netdev->priv_flags |= IFF_UNICAST_FLT;
3098
3099 init_timer(&adapter->watchdog_timer);
3100 adapter->watchdog_timer.function = ixgbevf_watchdog;
3101 adapter->watchdog_timer.data = (unsigned long)adapter;
3102
3103 INIT_WORK(&adapter->reset_task, ixgbevf_reset_task);
3104 INIT_WORK(&adapter->watchdog_task, ixgbevf_watchdog_task);
3105
3106 err = ixgbevf_init_interrupt_scheme(adapter);
3107 if (err)
3108 goto err_sw_init;
3109
3110
3111 if (hw->mac.ops.get_bus_info)
3112 hw->mac.ops.get_bus_info(hw);
3113
3114 strcpy(netdev->name, "eth%d");
3115
3116 err = register_netdev(netdev);
3117 if (err)
3118 goto err_register;
3119
3120 netif_carrier_off(netdev);
3121
3122 ixgbevf_init_last_counter_stats(adapter);
3123
3124
3125 hw_dbg(hw, "%pM\n", netdev->dev_addr);
3126
3127 hw_dbg(hw, "MAC: %d\n", hw->mac.type);
3128
3129 hw_dbg(hw, "Intel(R) 82599 Virtual Function\n");
3130 cards_found++;
3131 return 0;
3132
3133err_register:
3134err_sw_init:
3135 ixgbevf_reset_interrupt_capability(adapter);
3136 iounmap(hw->hw_addr);
3137err_ioremap:
3138 free_netdev(netdev);
3139err_alloc_etherdev:
3140 pci_release_regions(pdev);
3141err_pci_reg:
3142err_dma:
3143 pci_disable_device(pdev);
3144 return err;
3145}
3146
3147
3148
3149
3150
3151
3152
3153
3154
3155
3156static void __devexit ixgbevf_remove(struct pci_dev *pdev)
3157{
3158 struct net_device *netdev = pci_get_drvdata(pdev);
3159 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3160
3161 set_bit(__IXGBEVF_DOWN, &adapter->state);
3162
3163 del_timer_sync(&adapter->watchdog_timer);
3164
3165 cancel_work_sync(&adapter->reset_task);
3166 cancel_work_sync(&adapter->watchdog_task);
3167
3168 if (netdev->reg_state == NETREG_REGISTERED)
3169 unregister_netdev(netdev);
3170
3171 ixgbevf_reset_interrupt_capability(adapter);
3172
3173 iounmap(adapter->hw.hw_addr);
3174 pci_release_regions(pdev);
3175
3176 hw_dbg(&adapter->hw, "Remove complete\n");
3177
3178 kfree(adapter->tx_ring);
3179 kfree(adapter->rx_ring);
3180
3181 free_netdev(netdev);
3182
3183 pci_disable_device(pdev);
3184}
3185
3186
3187
3188
3189
3190
3191
3192
3193
3194static pci_ers_result_t ixgbevf_io_error_detected(struct pci_dev *pdev,
3195 pci_channel_state_t state)
3196{
3197 struct net_device *netdev = pci_get_drvdata(pdev);
3198 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3199
3200 netif_device_detach(netdev);
3201
3202 if (state == pci_channel_io_perm_failure)
3203 return PCI_ERS_RESULT_DISCONNECT;
3204
3205 if (netif_running(netdev))
3206 ixgbevf_down(adapter);
3207
3208 pci_disable_device(pdev);
3209
3210
3211 return PCI_ERS_RESULT_NEED_RESET;
3212}
3213
3214
3215
3216
3217
3218
3219
3220
3221static pci_ers_result_t ixgbevf_io_slot_reset(struct pci_dev *pdev)
3222{
3223 struct net_device *netdev = pci_get_drvdata(pdev);
3224 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3225
3226 if (pci_enable_device_mem(pdev)) {
3227 dev_err(&pdev->dev,
3228 "Cannot re-enable PCI device after reset.\n");
3229 return PCI_ERS_RESULT_DISCONNECT;
3230 }
3231
3232 pci_set_master(pdev);
3233
3234 ixgbevf_reset(adapter);
3235
3236 return PCI_ERS_RESULT_RECOVERED;
3237}
3238
3239
3240
3241
3242
3243
3244
3245
3246
3247static void ixgbevf_io_resume(struct pci_dev *pdev)
3248{
3249 struct net_device *netdev = pci_get_drvdata(pdev);
3250 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3251
3252 if (netif_running(netdev))
3253 ixgbevf_up(adapter);
3254
3255 netif_device_attach(netdev);
3256}
3257
3258
3259static struct pci_error_handlers ixgbevf_err_handler = {
3260 .error_detected = ixgbevf_io_error_detected,
3261 .slot_reset = ixgbevf_io_slot_reset,
3262 .resume = ixgbevf_io_resume,
3263};
3264
3265static struct pci_driver ixgbevf_driver = {
3266 .name = ixgbevf_driver_name,
3267 .id_table = ixgbevf_pci_tbl,
3268 .probe = ixgbevf_probe,
3269 .remove = __devexit_p(ixgbevf_remove),
3270 .shutdown = ixgbevf_shutdown,
3271 .err_handler = &ixgbevf_err_handler
3272};
3273
3274
3275
3276
3277
3278
3279
3280static int __init ixgbevf_init_module(void)
3281{
3282 int ret;
3283 pr_info("%s - version %s\n", ixgbevf_driver_string,
3284 ixgbevf_driver_version);
3285
3286 pr_info("%s\n", ixgbevf_copyright);
3287
3288 ret = pci_register_driver(&ixgbevf_driver);
3289 return ret;
3290}
3291
3292module_init(ixgbevf_init_module);
3293
3294
3295
3296
3297
3298
3299
3300static void __exit ixgbevf_exit_module(void)
3301{
3302 pci_unregister_driver(&ixgbevf_driver);
3303}
3304
3305#ifdef DEBUG
3306
3307
3308
3309
3310char *ixgbevf_get_hw_dev_name(struct ixgbe_hw *hw)
3311{
3312 struct ixgbevf_adapter *adapter = hw->back;
3313 return adapter->netdev->name;
3314}
3315
3316#endif
3317module_exit(ixgbevf_exit_module);
3318
3319
3320