1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
34
35#include <linux/types.h>
36#include <linux/bitops.h>
37#include <linux/module.h>
38#include <linux/pci.h>
39#include <linux/netdevice.h>
40#include <linux/vmalloc.h>
41#include <linux/string.h>
42#include <linux/in.h>
43#include <linux/ip.h>
44#include <linux/tcp.h>
45#include <linux/sctp.h>
46#include <linux/ipv6.h>
47#include <linux/slab.h>
48#include <net/checksum.h>
49#include <net/ip6_checksum.h>
50#include <linux/ethtool.h>
51#include <linux/if.h>
52#include <linux/if_vlan.h>
53#include <linux/prefetch.h>
54
55#include "ixgbevf.h"
56
57const char ixgbevf_driver_name[] = "ixgbevf";
58static const char ixgbevf_driver_string[] =
59 "Intel(R) 10 Gigabit PCI Express Virtual Function Network Driver";
60
61#define DRV_VERSION "2.7.12-k"
62const char ixgbevf_driver_version[] = DRV_VERSION;
63static char ixgbevf_copyright[] =
64 "Copyright (c) 2009 - 2012 Intel Corporation.";
65
66static const struct ixgbevf_info *ixgbevf_info_tbl[] = {
67 [board_82599_vf] = &ixgbevf_82599_vf_info,
68 [board_X540_vf] = &ixgbevf_X540_vf_info,
69};
70
71
72
73
74
75
76
77
78
79static struct pci_device_id ixgbevf_pci_tbl[] = {
80 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_VF),
81 board_82599_vf},
82 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540_VF),
83 board_X540_vf},
84
85
86 {0, }
87};
88MODULE_DEVICE_TABLE(pci, ixgbevf_pci_tbl);
89
90MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
91MODULE_DESCRIPTION("Intel(R) 82599 Virtual Function Driver");
92MODULE_LICENSE("GPL");
93MODULE_VERSION(DRV_VERSION);
94
95#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)
96static int debug = -1;
97module_param(debug, int, 0);
98MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
99
100
101static void ixgbevf_set_itr(struct ixgbevf_q_vector *q_vector);
102static void ixgbevf_free_all_rx_resources(struct ixgbevf_adapter *adapter);
103
104static inline void ixgbevf_release_rx_desc(struct ixgbe_hw *hw,
105 struct ixgbevf_ring *rx_ring,
106 u32 val)
107{
108
109
110
111
112
113
114 wmb();
115 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rx_ring->reg_idx), val);
116}
117
118
119
120
121
122
123
124
125static void ixgbevf_set_ivar(struct ixgbevf_adapter *adapter, s8 direction,
126 u8 queue, u8 msix_vector)
127{
128 u32 ivar, index;
129 struct ixgbe_hw *hw = &adapter->hw;
130 if (direction == -1) {
131
132 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
133 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC);
134 ivar &= ~0xFF;
135 ivar |= msix_vector;
136 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, ivar);
137 } else {
138
139 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
140 index = ((16 * (queue & 1)) + (8 * direction));
141 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR(queue >> 1));
142 ivar &= ~(0xFF << index);
143 ivar |= (msix_vector << index);
144 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(queue >> 1), ivar);
145 }
146}
147
148static void ixgbevf_unmap_and_free_tx_resource(struct ixgbevf_ring *tx_ring,
149 struct ixgbevf_tx_buffer
150 *tx_buffer_info)
151{
152 if (tx_buffer_info->dma) {
153 if (tx_buffer_info->mapped_as_page)
154 dma_unmap_page(tx_ring->dev,
155 tx_buffer_info->dma,
156 tx_buffer_info->length,
157 DMA_TO_DEVICE);
158 else
159 dma_unmap_single(tx_ring->dev,
160 tx_buffer_info->dma,
161 tx_buffer_info->length,
162 DMA_TO_DEVICE);
163 tx_buffer_info->dma = 0;
164 }
165 if (tx_buffer_info->skb) {
166 dev_kfree_skb_any(tx_buffer_info->skb);
167 tx_buffer_info->skb = NULL;
168 }
169 tx_buffer_info->time_stamp = 0;
170
171}
172
173#define IXGBE_MAX_TXD_PWR 14
174#define IXGBE_MAX_DATA_PER_TXD (1 << IXGBE_MAX_TXD_PWR)
175
176
177#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), IXGBE_MAX_DATA_PER_TXD)
178#define DESC_NEEDED (MAX_SKB_FRAGS + 4)
179
180static void ixgbevf_tx_timeout(struct net_device *netdev);
181
182
183
184
185
186
187static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector *q_vector,
188 struct ixgbevf_ring *tx_ring)
189{
190 struct ixgbevf_adapter *adapter = q_vector->adapter;
191 union ixgbe_adv_tx_desc *tx_desc, *eop_desc;
192 struct ixgbevf_tx_buffer *tx_buffer_info;
193 unsigned int i, eop, count = 0;
194 unsigned int total_bytes = 0, total_packets = 0;
195
196 if (test_bit(__IXGBEVF_DOWN, &adapter->state))
197 return true;
198
199 i = tx_ring->next_to_clean;
200 eop = tx_ring->tx_buffer_info[i].next_to_watch;
201 eop_desc = IXGBEVF_TX_DESC(tx_ring, eop);
202
203 while ((eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)) &&
204 (count < tx_ring->count)) {
205 bool cleaned = false;
206 rmb();
207
208 if (unlikely(eop != tx_ring->tx_buffer_info[i].next_to_watch))
209 goto cont_loop;
210 for ( ; !cleaned; count++) {
211 struct sk_buff *skb;
212 tx_desc = IXGBEVF_TX_DESC(tx_ring, i);
213 tx_buffer_info = &tx_ring->tx_buffer_info[i];
214 cleaned = (i == eop);
215 skb = tx_buffer_info->skb;
216
217 if (cleaned && skb) {
218 unsigned int segs, bytecount;
219
220
221 segs = skb_shinfo(skb)->gso_segs ?: 1;
222
223 bytecount = ((segs - 1) * skb_headlen(skb)) +
224 skb->len;
225 total_packets += segs;
226 total_bytes += bytecount;
227 }
228
229 ixgbevf_unmap_and_free_tx_resource(tx_ring,
230 tx_buffer_info);
231
232 tx_desc->wb.status = 0;
233
234 i++;
235 if (i == tx_ring->count)
236 i = 0;
237 }
238
239cont_loop:
240 eop = tx_ring->tx_buffer_info[i].next_to_watch;
241 eop_desc = IXGBEVF_TX_DESC(tx_ring, eop);
242 }
243
244 tx_ring->next_to_clean = i;
245
246#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
247 if (unlikely(count && netif_carrier_ok(tx_ring->netdev) &&
248 (IXGBE_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) {
249
250
251
252 smp_mb();
253 if (__netif_subqueue_stopped(tx_ring->netdev,
254 tx_ring->queue_index) &&
255 !test_bit(__IXGBEVF_DOWN, &adapter->state)) {
256 netif_wake_subqueue(tx_ring->netdev,
257 tx_ring->queue_index);
258 ++adapter->restart_queue;
259 }
260 }
261
262 u64_stats_update_begin(&tx_ring->syncp);
263 tx_ring->total_bytes += total_bytes;
264 tx_ring->total_packets += total_packets;
265 u64_stats_update_end(&tx_ring->syncp);
266 q_vector->tx.total_bytes += total_bytes;
267 q_vector->tx.total_packets += total_packets;
268
269 return count < tx_ring->count;
270}
271
272
273
274
275
276
277
278
279static void ixgbevf_receive_skb(struct ixgbevf_q_vector *q_vector,
280 struct sk_buff *skb, u8 status,
281 union ixgbe_adv_rx_desc *rx_desc)
282{
283 struct ixgbevf_adapter *adapter = q_vector->adapter;
284 bool is_vlan = (status & IXGBE_RXD_STAT_VP);
285 u16 tag = le16_to_cpu(rx_desc->wb.upper.vlan);
286
287 if (is_vlan && test_bit(tag & VLAN_VID_MASK, adapter->active_vlans))
288 __vlan_hwaccel_put_tag(skb, tag);
289
290 if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL))
291 napi_gro_receive(&q_vector->napi, skb);
292 else
293 netif_rx(skb);
294}
295
296
297
298
299
300
301
302static inline void ixgbevf_rx_checksum(struct ixgbevf_ring *ring,
303 u32 status_err, struct sk_buff *skb)
304{
305 skb_checksum_none_assert(skb);
306
307
308 if (!(ring->netdev->features & NETIF_F_RXCSUM))
309 return;
310
311
312 if ((status_err & IXGBE_RXD_STAT_IPCS) &&
313 (status_err & IXGBE_RXDADV_ERR_IPE)) {
314 ring->hw_csum_rx_error++;
315 return;
316 }
317
318 if (!(status_err & IXGBE_RXD_STAT_L4CS))
319 return;
320
321 if (status_err & IXGBE_RXDADV_ERR_TCPE) {
322 ring->hw_csum_rx_error++;
323 return;
324 }
325
326
327 skb->ip_summed = CHECKSUM_UNNECESSARY;
328 ring->hw_csum_rx_good++;
329}
330
331
332
333
334
335static void ixgbevf_alloc_rx_buffers(struct ixgbevf_adapter *adapter,
336 struct ixgbevf_ring *rx_ring,
337 int cleaned_count)
338{
339 struct pci_dev *pdev = adapter->pdev;
340 union ixgbe_adv_rx_desc *rx_desc;
341 struct ixgbevf_rx_buffer *bi;
342 unsigned int i = rx_ring->next_to_use;
343
344 bi = &rx_ring->rx_buffer_info[i];
345
346 while (cleaned_count--) {
347 rx_desc = IXGBEVF_RX_DESC(rx_ring, i);
348
349 if (!bi->skb) {
350 struct sk_buff *skb;
351
352 skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
353 rx_ring->rx_buf_len);
354 if (!skb) {
355 adapter->alloc_rx_buff_failed++;
356 goto no_buffers;
357 }
358 bi->skb = skb;
359
360 bi->dma = dma_map_single(&pdev->dev, skb->data,
361 rx_ring->rx_buf_len,
362 DMA_FROM_DEVICE);
363 if (dma_mapping_error(&pdev->dev, bi->dma)) {
364 dev_kfree_skb(skb);
365 bi->skb = NULL;
366 dev_err(&pdev->dev, "RX DMA map failed\n");
367 break;
368 }
369 }
370 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma);
371
372 i++;
373 if (i == rx_ring->count)
374 i = 0;
375 bi = &rx_ring->rx_buffer_info[i];
376 }
377
378no_buffers:
379 if (rx_ring->next_to_use != i) {
380 rx_ring->next_to_use = i;
381 ixgbevf_release_rx_desc(&adapter->hw, rx_ring, i);
382 }
383}
384
385static inline void ixgbevf_irq_enable_queues(struct ixgbevf_adapter *adapter,
386 u32 qmask)
387{
388 struct ixgbe_hw *hw = &adapter->hw;
389
390 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, qmask);
391}
392
393static bool ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
394 struct ixgbevf_ring *rx_ring,
395 int budget)
396{
397 struct ixgbevf_adapter *adapter = q_vector->adapter;
398 struct pci_dev *pdev = adapter->pdev;
399 union ixgbe_adv_rx_desc *rx_desc, *next_rxd;
400 struct ixgbevf_rx_buffer *rx_buffer_info, *next_buffer;
401 struct sk_buff *skb;
402 unsigned int i;
403 u32 len, staterr;
404 int cleaned_count = 0;
405 unsigned int total_rx_bytes = 0, total_rx_packets = 0;
406
407 i = rx_ring->next_to_clean;
408 rx_desc = IXGBEVF_RX_DESC(rx_ring, i);
409 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
410 rx_buffer_info = &rx_ring->rx_buffer_info[i];
411
412 while (staterr & IXGBE_RXD_STAT_DD) {
413 if (!budget)
414 break;
415 budget--;
416
417 rmb();
418 len = le16_to_cpu(rx_desc->wb.upper.length);
419 skb = rx_buffer_info->skb;
420 prefetch(skb->data - NET_IP_ALIGN);
421 rx_buffer_info->skb = NULL;
422
423 if (rx_buffer_info->dma) {
424 dma_unmap_single(&pdev->dev, rx_buffer_info->dma,
425 rx_ring->rx_buf_len,
426 DMA_FROM_DEVICE);
427 rx_buffer_info->dma = 0;
428 skb_put(skb, len);
429 }
430
431 i++;
432 if (i == rx_ring->count)
433 i = 0;
434
435 next_rxd = IXGBEVF_RX_DESC(rx_ring, i);
436 prefetch(next_rxd);
437 cleaned_count++;
438
439 next_buffer = &rx_ring->rx_buffer_info[i];
440
441 if (!(staterr & IXGBE_RXD_STAT_EOP)) {
442 skb->next = next_buffer->skb;
443 IXGBE_CB(skb->next)->prev = skb;
444 adapter->non_eop_descs++;
445 goto next_desc;
446 }
447
448
449 if (IXGBE_CB(skb)->prev) {
450 do {
451 struct sk_buff *this = skb;
452 skb = IXGBE_CB(skb)->prev;
453 dev_kfree_skb(this);
454 } while (skb);
455 goto next_desc;
456 }
457
458
459 if (unlikely(staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK)) {
460 dev_kfree_skb_irq(skb);
461 goto next_desc;
462 }
463
464 ixgbevf_rx_checksum(rx_ring, staterr, skb);
465
466
467 total_rx_bytes += skb->len;
468 total_rx_packets++;
469
470
471
472
473
474 if (staterr & IXGBE_RXD_STAT_LB) {
475 u32 header_fixup_len = skb_headlen(skb);
476 if (header_fixup_len < 14)
477 skb_push(skb, header_fixup_len);
478 }
479 skb->protocol = eth_type_trans(skb, rx_ring->netdev);
480
481
482
483
484 if ((skb->pkt_type & (PACKET_BROADCAST | PACKET_MULTICAST)) &&
485 !(compare_ether_addr(adapter->netdev->dev_addr,
486 eth_hdr(skb)->h_source))) {
487 dev_kfree_skb_irq(skb);
488 goto next_desc;
489 }
490
491 ixgbevf_receive_skb(q_vector, skb, staterr, rx_desc);
492
493next_desc:
494 rx_desc->wb.upper.status_error = 0;
495
496
497 if (cleaned_count >= IXGBEVF_RX_BUFFER_WRITE) {
498 ixgbevf_alloc_rx_buffers(adapter, rx_ring,
499 cleaned_count);
500 cleaned_count = 0;
501 }
502
503
504 rx_desc = next_rxd;
505 rx_buffer_info = &rx_ring->rx_buffer_info[i];
506
507 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
508 }
509
510 rx_ring->next_to_clean = i;
511 cleaned_count = IXGBE_DESC_UNUSED(rx_ring);
512
513 if (cleaned_count)
514 ixgbevf_alloc_rx_buffers(adapter, rx_ring, cleaned_count);
515
516 u64_stats_update_begin(&rx_ring->syncp);
517 rx_ring->total_packets += total_rx_packets;
518 rx_ring->total_bytes += total_rx_bytes;
519 u64_stats_update_end(&rx_ring->syncp);
520 q_vector->rx.total_packets += total_rx_packets;
521 q_vector->rx.total_bytes += total_rx_bytes;
522
523 return !!budget;
524}
525
526
527
528
529
530
531
532
533
534static int ixgbevf_poll(struct napi_struct *napi, int budget)
535{
536 struct ixgbevf_q_vector *q_vector =
537 container_of(napi, struct ixgbevf_q_vector, napi);
538 struct ixgbevf_adapter *adapter = q_vector->adapter;
539 struct ixgbevf_ring *ring;
540 int per_ring_budget;
541 bool clean_complete = true;
542
543 ixgbevf_for_each_ring(ring, q_vector->tx)
544 clean_complete &= ixgbevf_clean_tx_irq(q_vector, ring);
545
546
547
548 if (q_vector->rx.count > 1)
549 per_ring_budget = max(budget/q_vector->rx.count, 1);
550 else
551 per_ring_budget = budget;
552
553 adapter->flags |= IXGBE_FLAG_IN_NETPOLL;
554 ixgbevf_for_each_ring(ring, q_vector->rx)
555 clean_complete &= ixgbevf_clean_rx_irq(q_vector, ring,
556 per_ring_budget);
557 adapter->flags &= ~IXGBE_FLAG_IN_NETPOLL;
558
559
560 if (!clean_complete)
561 return budget;
562
563 napi_complete(napi);
564 if (adapter->rx_itr_setting & 1)
565 ixgbevf_set_itr(q_vector);
566 if (!test_bit(__IXGBEVF_DOWN, &adapter->state))
567 ixgbevf_irq_enable_queues(adapter,
568 1 << q_vector->v_idx);
569
570 return 0;
571}
572
573
574
575
576
577static void ixgbevf_write_eitr(struct ixgbevf_q_vector *q_vector)
578{
579 struct ixgbevf_adapter *adapter = q_vector->adapter;
580 struct ixgbe_hw *hw = &adapter->hw;
581 int v_idx = q_vector->v_idx;
582 u32 itr_reg = q_vector->itr & IXGBE_MAX_EITR;
583
584
585
586
587
588 itr_reg |= IXGBE_EITR_CNT_WDIS;
589
590 IXGBE_WRITE_REG(hw, IXGBE_VTEITR(v_idx), itr_reg);
591}
592
593
594
595
596
597
598
599
600static void ixgbevf_configure_msix(struct ixgbevf_adapter *adapter)
601{
602 struct ixgbevf_q_vector *q_vector;
603 int q_vectors, v_idx;
604
605 q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
606 adapter->eims_enable_mask = 0;
607
608
609
610
611
612 for (v_idx = 0; v_idx < q_vectors; v_idx++) {
613 struct ixgbevf_ring *ring;
614 q_vector = adapter->q_vector[v_idx];
615
616 ixgbevf_for_each_ring(ring, q_vector->rx)
617 ixgbevf_set_ivar(adapter, 0, ring->reg_idx, v_idx);
618
619 ixgbevf_for_each_ring(ring, q_vector->tx)
620 ixgbevf_set_ivar(adapter, 1, ring->reg_idx, v_idx);
621
622 if (q_vector->tx.ring && !q_vector->rx.ring) {
623
624 if (adapter->tx_itr_setting == 1)
625 q_vector->itr = IXGBE_10K_ITR;
626 else
627 q_vector->itr = adapter->tx_itr_setting;
628 } else {
629
630 if (adapter->rx_itr_setting == 1)
631 q_vector->itr = IXGBE_20K_ITR;
632 else
633 q_vector->itr = adapter->rx_itr_setting;
634 }
635
636
637 adapter->eims_enable_mask |= 1 << v_idx;
638
639 ixgbevf_write_eitr(q_vector);
640 }
641
642 ixgbevf_set_ivar(adapter, -1, 1, v_idx);
643
644 adapter->eims_other = 1 << v_idx;
645 adapter->eims_enable_mask |= adapter->eims_other;
646}
647
648enum latency_range {
649 lowest_latency = 0,
650 low_latency = 1,
651 bulk_latency = 2,
652 latency_invalid = 255
653};
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668static void ixgbevf_update_itr(struct ixgbevf_q_vector *q_vector,
669 struct ixgbevf_ring_container *ring_container)
670{
671 int bytes = ring_container->total_bytes;
672 int packets = ring_container->total_packets;
673 u32 timepassed_us;
674 u64 bytes_perint;
675 u8 itr_setting = ring_container->itr;
676
677 if (packets == 0)
678 return;
679
680
681
682
683
684
685
686 timepassed_us = q_vector->itr >> 2;
687 bytes_perint = bytes / timepassed_us;
688
689 switch (itr_setting) {
690 case lowest_latency:
691 if (bytes_perint > 10)
692 itr_setting = low_latency;
693 break;
694 case low_latency:
695 if (bytes_perint > 20)
696 itr_setting = bulk_latency;
697 else if (bytes_perint <= 10)
698 itr_setting = lowest_latency;
699 break;
700 case bulk_latency:
701 if (bytes_perint <= 20)
702 itr_setting = low_latency;
703 break;
704 }
705
706
707 ring_container->total_bytes = 0;
708 ring_container->total_packets = 0;
709
710
711 ring_container->itr = itr_setting;
712}
713
714static void ixgbevf_set_itr(struct ixgbevf_q_vector *q_vector)
715{
716 u32 new_itr = q_vector->itr;
717 u8 current_itr;
718
719 ixgbevf_update_itr(q_vector, &q_vector->tx);
720 ixgbevf_update_itr(q_vector, &q_vector->rx);
721
722 current_itr = max(q_vector->rx.itr, q_vector->tx.itr);
723
724 switch (current_itr) {
725
726 case lowest_latency:
727 new_itr = IXGBE_100K_ITR;
728 break;
729 case low_latency:
730 new_itr = IXGBE_20K_ITR;
731 break;
732 case bulk_latency:
733 default:
734 new_itr = IXGBE_8K_ITR;
735 break;
736 }
737
738 if (new_itr != q_vector->itr) {
739
740 new_itr = (10 * new_itr * q_vector->itr) /
741 ((9 * new_itr) + q_vector->itr);
742
743
744 q_vector->itr = new_itr;
745
746 ixgbevf_write_eitr(q_vector);
747 }
748}
749
750static irqreturn_t ixgbevf_msix_other(int irq, void *data)
751{
752 struct ixgbevf_adapter *adapter = data;
753 struct pci_dev *pdev = adapter->pdev;
754 struct ixgbe_hw *hw = &adapter->hw;
755 u32 msg;
756 bool got_ack = false;
757
758 hw->mac.get_link_status = 1;
759 if (!hw->mbx.ops.check_for_ack(hw))
760 got_ack = true;
761
762 if (!hw->mbx.ops.check_for_msg(hw)) {
763 hw->mbx.ops.read(hw, &msg, 1);
764
765 if ((msg & IXGBE_MBVFICR_VFREQ_MASK) == IXGBE_PF_CONTROL_MSG) {
766 mod_timer(&adapter->watchdog_timer,
767 round_jiffies(jiffies + 1));
768 adapter->link_up = false;
769 }
770
771 if (msg & IXGBE_VT_MSGTYPE_NACK)
772 dev_info(&pdev->dev,
773 "Last Request of type %2.2x to PF Nacked\n",
774 msg & 0xFF);
775 hw->mbx.v2p_mailbox |= IXGBE_VFMAILBOX_PFSTS;
776 }
777
778
779
780
781
782 if (got_ack)
783 hw->mbx.v2p_mailbox |= IXGBE_VFMAILBOX_PFACK;
784
785 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, adapter->eims_other);
786
787 return IRQ_HANDLED;
788}
789
790
791
792
793
794
795static irqreturn_t ixgbevf_msix_clean_rings(int irq, void *data)
796{
797 struct ixgbevf_q_vector *q_vector = data;
798
799
800 if (q_vector->rx.ring || q_vector->tx.ring)
801 napi_schedule(&q_vector->napi);
802
803 return IRQ_HANDLED;
804}
805
806static inline void map_vector_to_rxq(struct ixgbevf_adapter *a, int v_idx,
807 int r_idx)
808{
809 struct ixgbevf_q_vector *q_vector = a->q_vector[v_idx];
810
811 a->rx_ring[r_idx].next = q_vector->rx.ring;
812 q_vector->rx.ring = &a->rx_ring[r_idx];
813 q_vector->rx.count++;
814}
815
816static inline void map_vector_to_txq(struct ixgbevf_adapter *a, int v_idx,
817 int t_idx)
818{
819 struct ixgbevf_q_vector *q_vector = a->q_vector[v_idx];
820
821 a->tx_ring[t_idx].next = q_vector->tx.ring;
822 q_vector->tx.ring = &a->tx_ring[t_idx];
823 q_vector->tx.count++;
824}
825
826
827
828
829
830
831
832
833
834
835
836static int ixgbevf_map_rings_to_vectors(struct ixgbevf_adapter *adapter)
837{
838 int q_vectors;
839 int v_start = 0;
840 int rxr_idx = 0, txr_idx = 0;
841 int rxr_remaining = adapter->num_rx_queues;
842 int txr_remaining = adapter->num_tx_queues;
843 int i, j;
844 int rqpv, tqpv;
845 int err = 0;
846
847 q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
848
849
850
851
852
853 if (q_vectors == adapter->num_rx_queues + adapter->num_tx_queues) {
854 for (; rxr_idx < rxr_remaining; v_start++, rxr_idx++)
855 map_vector_to_rxq(adapter, v_start, rxr_idx);
856
857 for (; txr_idx < txr_remaining; v_start++, txr_idx++)
858 map_vector_to_txq(adapter, v_start, txr_idx);
859 goto out;
860 }
861
862
863
864
865
866
867
868 for (i = v_start; i < q_vectors; i++) {
869 rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - i);
870 for (j = 0; j < rqpv; j++) {
871 map_vector_to_rxq(adapter, i, rxr_idx);
872 rxr_idx++;
873 rxr_remaining--;
874 }
875 }
876 for (i = v_start; i < q_vectors; i++) {
877 tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - i);
878 for (j = 0; j < tqpv; j++) {
879 map_vector_to_txq(adapter, i, txr_idx);
880 txr_idx++;
881 txr_remaining--;
882 }
883 }
884
885out:
886 return err;
887}
888
889
890
891
892
893
894
895
896static int ixgbevf_request_msix_irqs(struct ixgbevf_adapter *adapter)
897{
898 struct net_device *netdev = adapter->netdev;
899 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
900 int vector, err;
901 int ri = 0, ti = 0;
902
903 for (vector = 0; vector < q_vectors; vector++) {
904 struct ixgbevf_q_vector *q_vector = adapter->q_vector[vector];
905 struct msix_entry *entry = &adapter->msix_entries[vector];
906
907 if (q_vector->tx.ring && q_vector->rx.ring) {
908 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
909 "%s-%s-%d", netdev->name, "TxRx", ri++);
910 ti++;
911 } else if (q_vector->rx.ring) {
912 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
913 "%s-%s-%d", netdev->name, "rx", ri++);
914 } else if (q_vector->tx.ring) {
915 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
916 "%s-%s-%d", netdev->name, "tx", ti++);
917 } else {
918
919 continue;
920 }
921 err = request_irq(entry->vector, &ixgbevf_msix_clean_rings, 0,
922 q_vector->name, q_vector);
923 if (err) {
924 hw_dbg(&adapter->hw,
925 "request_irq failed for MSIX interrupt "
926 "Error: %d\n", err);
927 goto free_queue_irqs;
928 }
929 }
930
931 err = request_irq(adapter->msix_entries[vector].vector,
932 &ixgbevf_msix_other, 0, netdev->name, adapter);
933 if (err) {
934 hw_dbg(&adapter->hw,
935 "request_irq for msix_other failed: %d\n", err);
936 goto free_queue_irqs;
937 }
938
939 return 0;
940
941free_queue_irqs:
942 while (vector) {
943 vector--;
944 free_irq(adapter->msix_entries[vector].vector,
945 adapter->q_vector[vector]);
946 }
947
948
949
950
951
952
953
954
955
956
957 adapter->num_msix_vectors = 0;
958 return err;
959}
960
961static inline void ixgbevf_reset_q_vectors(struct ixgbevf_adapter *adapter)
962{
963 int i, q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
964
965 for (i = 0; i < q_vectors; i++) {
966 struct ixgbevf_q_vector *q_vector = adapter->q_vector[i];
967 q_vector->rx.ring = NULL;
968 q_vector->tx.ring = NULL;
969 q_vector->rx.count = 0;
970 q_vector->tx.count = 0;
971 }
972}
973
974
975
976
977
978
979
980
981static int ixgbevf_request_irq(struct ixgbevf_adapter *adapter)
982{
983 int err = 0;
984
985 err = ixgbevf_request_msix_irqs(adapter);
986
987 if (err)
988 hw_dbg(&adapter->hw,
989 "request_irq failed, Error %d\n", err);
990
991 return err;
992}
993
994static void ixgbevf_free_irq(struct ixgbevf_adapter *adapter)
995{
996 int i, q_vectors;
997
998 q_vectors = adapter->num_msix_vectors;
999 i = q_vectors - 1;
1000
1001 free_irq(adapter->msix_entries[i].vector, adapter);
1002 i--;
1003
1004 for (; i >= 0; i--) {
1005
1006 if (!adapter->q_vector[i]->rx.ring &&
1007 !adapter->q_vector[i]->tx.ring)
1008 continue;
1009
1010 free_irq(adapter->msix_entries[i].vector,
1011 adapter->q_vector[i]);
1012 }
1013
1014 ixgbevf_reset_q_vectors(adapter);
1015}
1016
1017
1018
1019
1020
1021static inline void ixgbevf_irq_disable(struct ixgbevf_adapter *adapter)
1022{
1023 struct ixgbe_hw *hw = &adapter->hw;
1024 int i;
1025
1026 IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, 0);
1027 IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, ~0);
1028 IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, 0);
1029
1030 IXGBE_WRITE_FLUSH(hw);
1031
1032 for (i = 0; i < adapter->num_msix_vectors; i++)
1033 synchronize_irq(adapter->msix_entries[i].vector);
1034}
1035
1036
1037
1038
1039
1040static inline void ixgbevf_irq_enable(struct ixgbevf_adapter *adapter)
1041{
1042 struct ixgbe_hw *hw = &adapter->hw;
1043
1044 IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, adapter->eims_enable_mask);
1045 IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, adapter->eims_enable_mask);
1046 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, adapter->eims_enable_mask);
1047}
1048
1049
1050
1051
1052
1053
1054
1055static void ixgbevf_configure_tx(struct ixgbevf_adapter *adapter)
1056{
1057 u64 tdba;
1058 struct ixgbe_hw *hw = &adapter->hw;
1059 u32 i, j, tdlen, txctrl;
1060
1061
1062 for (i = 0; i < adapter->num_tx_queues; i++) {
1063 struct ixgbevf_ring *ring = &adapter->tx_ring[i];
1064 j = ring->reg_idx;
1065 tdba = ring->dma;
1066 tdlen = ring->count * sizeof(union ixgbe_adv_tx_desc);
1067 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(j),
1068 (tdba & DMA_BIT_MASK(32)));
1069 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(j), (tdba >> 32));
1070 IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(j), tdlen);
1071 IXGBE_WRITE_REG(hw, IXGBE_VFTDH(j), 0);
1072 IXGBE_WRITE_REG(hw, IXGBE_VFTDT(j), 0);
1073 adapter->tx_ring[i].head = IXGBE_VFTDH(j);
1074 adapter->tx_ring[i].tail = IXGBE_VFTDT(j);
1075
1076
1077
1078 txctrl = IXGBE_READ_REG(hw, IXGBE_VFDCA_TXCTRL(j));
1079 txctrl &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN;
1080 IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(j), txctrl);
1081 }
1082}
1083
1084#define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
1085
1086static void ixgbevf_configure_srrctl(struct ixgbevf_adapter *adapter, int index)
1087{
1088 struct ixgbevf_ring *rx_ring;
1089 struct ixgbe_hw *hw = &adapter->hw;
1090 u32 srrctl;
1091
1092 rx_ring = &adapter->rx_ring[index];
1093
1094 srrctl = IXGBE_SRRCTL_DROP_EN;
1095
1096 srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
1097
1098 srrctl |= ALIGN(rx_ring->rx_buf_len, 1024) >>
1099 IXGBE_SRRCTL_BSIZEPKT_SHIFT;
1100
1101 IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(index), srrctl);
1102}
1103
1104static void ixgbevf_set_rx_buffer_len(struct ixgbevf_adapter *adapter)
1105{
1106 struct ixgbe_hw *hw = &adapter->hw;
1107 struct net_device *netdev = adapter->netdev;
1108 int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
1109 int i;
1110 u16 rx_buf_len;
1111
1112
1113 ixgbevf_rlpml_set_vf(hw, max_frame);
1114
1115
1116 max_frame += VLAN_HLEN;
1117
1118
1119
1120
1121
1122 if ((hw->mac.type == ixgbe_mac_X540_vf) &&
1123 (max_frame <= MAXIMUM_ETHERNET_VLAN_SIZE))
1124 rx_buf_len = MAXIMUM_ETHERNET_VLAN_SIZE;
1125 else if (max_frame <= IXGBEVF_RXBUFFER_2K)
1126 rx_buf_len = IXGBEVF_RXBUFFER_2K;
1127 else if (max_frame <= IXGBEVF_RXBUFFER_4K)
1128 rx_buf_len = IXGBEVF_RXBUFFER_4K;
1129 else if (max_frame <= IXGBEVF_RXBUFFER_8K)
1130 rx_buf_len = IXGBEVF_RXBUFFER_8K;
1131 else
1132 rx_buf_len = IXGBEVF_RXBUFFER_10K;
1133
1134 for (i = 0; i < adapter->num_rx_queues; i++)
1135 adapter->rx_ring[i].rx_buf_len = rx_buf_len;
1136}
1137
1138
1139
1140
1141
1142
1143
1144static void ixgbevf_configure_rx(struct ixgbevf_adapter *adapter)
1145{
1146 u64 rdba;
1147 struct ixgbe_hw *hw = &adapter->hw;
1148 int i, j;
1149 u32 rdlen;
1150
1151
1152 IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, 0);
1153
1154
1155 ixgbevf_set_rx_buffer_len(adapter);
1156
1157 rdlen = adapter->rx_ring[0].count * sizeof(union ixgbe_adv_rx_desc);
1158
1159
1160 for (i = 0; i < adapter->num_rx_queues; i++) {
1161 rdba = adapter->rx_ring[i].dma;
1162 j = adapter->rx_ring[i].reg_idx;
1163 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(j),
1164 (rdba & DMA_BIT_MASK(32)));
1165 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(j), (rdba >> 32));
1166 IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(j), rdlen);
1167 IXGBE_WRITE_REG(hw, IXGBE_VFRDH(j), 0);
1168 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(j), 0);
1169 adapter->rx_ring[i].head = IXGBE_VFRDH(j);
1170 adapter->rx_ring[i].tail = IXGBE_VFRDT(j);
1171
1172 ixgbevf_configure_srrctl(adapter, j);
1173 }
1174}
1175
1176static int ixgbevf_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
1177{
1178 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
1179 struct ixgbe_hw *hw = &adapter->hw;
1180 int err;
1181
1182 spin_lock_bh(&adapter->mbx_lock);
1183
1184
1185 err = hw->mac.ops.set_vfta(hw, vid, 0, true);
1186
1187 spin_unlock_bh(&adapter->mbx_lock);
1188
1189
1190 if (err == IXGBE_ERR_MBX)
1191 return -EIO;
1192
1193 if (err == IXGBE_ERR_INVALID_ARGUMENT)
1194 return -EACCES;
1195
1196 set_bit(vid, adapter->active_vlans);
1197
1198 return err;
1199}
1200
1201static int ixgbevf_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
1202{
1203 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
1204 struct ixgbe_hw *hw = &adapter->hw;
1205 int err = -EOPNOTSUPP;
1206
1207 spin_lock_bh(&adapter->mbx_lock);
1208
1209
1210 err = hw->mac.ops.set_vfta(hw, vid, 0, false);
1211
1212 spin_unlock_bh(&adapter->mbx_lock);
1213
1214 clear_bit(vid, adapter->active_vlans);
1215
1216 return err;
1217}
1218
1219static void ixgbevf_restore_vlan(struct ixgbevf_adapter *adapter)
1220{
1221 u16 vid;
1222
1223 for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
1224 ixgbevf_vlan_rx_add_vid(adapter->netdev, vid);
1225}
1226
1227static int ixgbevf_write_uc_addr_list(struct net_device *netdev)
1228{
1229 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
1230 struct ixgbe_hw *hw = &adapter->hw;
1231 int count = 0;
1232
1233 if ((netdev_uc_count(netdev)) > 10) {
1234 pr_err("Too many unicast filters - No Space\n");
1235 return -ENOSPC;
1236 }
1237
1238 if (!netdev_uc_empty(netdev)) {
1239 struct netdev_hw_addr *ha;
1240 netdev_for_each_uc_addr(ha, netdev) {
1241 hw->mac.ops.set_uc_addr(hw, ++count, ha->addr);
1242 udelay(200);
1243 }
1244 } else {
1245
1246
1247
1248
1249 hw->mac.ops.set_uc_addr(hw, 0, NULL);
1250 }
1251
1252 return count;
1253}
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264static void ixgbevf_set_rx_mode(struct net_device *netdev)
1265{
1266 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
1267 struct ixgbe_hw *hw = &adapter->hw;
1268
1269 spin_lock_bh(&adapter->mbx_lock);
1270
1271
1272 hw->mac.ops.update_mc_addr_list(hw, netdev);
1273
1274 ixgbevf_write_uc_addr_list(netdev);
1275
1276 spin_unlock_bh(&adapter->mbx_lock);
1277}
1278
1279static void ixgbevf_napi_enable_all(struct ixgbevf_adapter *adapter)
1280{
1281 int q_idx;
1282 struct ixgbevf_q_vector *q_vector;
1283 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1284
1285 for (q_idx = 0; q_idx < q_vectors; q_idx++) {
1286 q_vector = adapter->q_vector[q_idx];
1287 napi_enable(&q_vector->napi);
1288 }
1289}
1290
1291static void ixgbevf_napi_disable_all(struct ixgbevf_adapter *adapter)
1292{
1293 int q_idx;
1294 struct ixgbevf_q_vector *q_vector;
1295 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1296
1297 for (q_idx = 0; q_idx < q_vectors; q_idx++) {
1298 q_vector = adapter->q_vector[q_idx];
1299 napi_disable(&q_vector->napi);
1300 }
1301}
1302
1303static void ixgbevf_configure(struct ixgbevf_adapter *adapter)
1304{
1305 struct net_device *netdev = adapter->netdev;
1306 int i;
1307
1308 ixgbevf_set_rx_mode(netdev);
1309
1310 ixgbevf_restore_vlan(adapter);
1311
1312 ixgbevf_configure_tx(adapter);
1313 ixgbevf_configure_rx(adapter);
1314 for (i = 0; i < adapter->num_rx_queues; i++) {
1315 struct ixgbevf_ring *ring = &adapter->rx_ring[i];
1316 ixgbevf_alloc_rx_buffers(adapter, ring,
1317 IXGBE_DESC_UNUSED(ring));
1318 }
1319}
1320
1321#define IXGBE_MAX_RX_DESC_POLL 10
1322static inline void ixgbevf_rx_desc_queue_enable(struct ixgbevf_adapter *adapter,
1323 int rxr)
1324{
1325 struct ixgbe_hw *hw = &adapter->hw;
1326 int j = adapter->rx_ring[rxr].reg_idx;
1327 int k;
1328
1329 for (k = 0; k < IXGBE_MAX_RX_DESC_POLL; k++) {
1330 if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j)) & IXGBE_RXDCTL_ENABLE)
1331 break;
1332 else
1333 msleep(1);
1334 }
1335 if (k >= IXGBE_MAX_RX_DESC_POLL) {
1336 hw_dbg(hw, "RXDCTL.ENABLE on Rx queue %d "
1337 "not set within the polling period\n", rxr);
1338 }
1339
1340 ixgbevf_release_rx_desc(hw, &adapter->rx_ring[rxr],
1341 adapter->rx_ring[rxr].count - 1);
1342}
1343
1344static void ixgbevf_save_reset_stats(struct ixgbevf_adapter *adapter)
1345{
1346
1347 if (adapter->stats.vfgprc || adapter->stats.vfgptc) {
1348 adapter->stats.saved_reset_vfgprc += adapter->stats.vfgprc -
1349 adapter->stats.base_vfgprc;
1350 adapter->stats.saved_reset_vfgptc += adapter->stats.vfgptc -
1351 adapter->stats.base_vfgptc;
1352 adapter->stats.saved_reset_vfgorc += adapter->stats.vfgorc -
1353 adapter->stats.base_vfgorc;
1354 adapter->stats.saved_reset_vfgotc += adapter->stats.vfgotc -
1355 adapter->stats.base_vfgotc;
1356 adapter->stats.saved_reset_vfmprc += adapter->stats.vfmprc -
1357 adapter->stats.base_vfmprc;
1358 }
1359}
1360
1361static void ixgbevf_init_last_counter_stats(struct ixgbevf_adapter *adapter)
1362{
1363 struct ixgbe_hw *hw = &adapter->hw;
1364
1365 adapter->stats.last_vfgprc = IXGBE_READ_REG(hw, IXGBE_VFGPRC);
1366 adapter->stats.last_vfgorc = IXGBE_READ_REG(hw, IXGBE_VFGORC_LSB);
1367 adapter->stats.last_vfgorc |=
1368 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGORC_MSB))) << 32);
1369 adapter->stats.last_vfgptc = IXGBE_READ_REG(hw, IXGBE_VFGPTC);
1370 adapter->stats.last_vfgotc = IXGBE_READ_REG(hw, IXGBE_VFGOTC_LSB);
1371 adapter->stats.last_vfgotc |=
1372 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGOTC_MSB))) << 32);
1373 adapter->stats.last_vfmprc = IXGBE_READ_REG(hw, IXGBE_VFMPRC);
1374
1375 adapter->stats.base_vfgprc = adapter->stats.last_vfgprc;
1376 adapter->stats.base_vfgorc = adapter->stats.last_vfgorc;
1377 adapter->stats.base_vfgptc = adapter->stats.last_vfgptc;
1378 adapter->stats.base_vfgotc = adapter->stats.last_vfgotc;
1379 adapter->stats.base_vfmprc = adapter->stats.last_vfmprc;
1380}
1381
1382static void ixgbevf_negotiate_api(struct ixgbevf_adapter *adapter)
1383{
1384 struct ixgbe_hw *hw = &adapter->hw;
1385 int api[] = { ixgbe_mbox_api_11,
1386 ixgbe_mbox_api_10,
1387 ixgbe_mbox_api_unknown };
1388 int err = 0, idx = 0;
1389
1390 spin_lock_bh(&adapter->mbx_lock);
1391
1392 while (api[idx] != ixgbe_mbox_api_unknown) {
1393 err = ixgbevf_negotiate_api_version(hw, api[idx]);
1394 if (!err)
1395 break;
1396 idx++;
1397 }
1398
1399 spin_unlock_bh(&adapter->mbx_lock);
1400}
1401
1402static void ixgbevf_up_complete(struct ixgbevf_adapter *adapter)
1403{
1404 struct net_device *netdev = adapter->netdev;
1405 struct ixgbe_hw *hw = &adapter->hw;
1406 int i, j = 0;
1407 int num_rx_rings = adapter->num_rx_queues;
1408 u32 txdctl, rxdctl;
1409
1410 for (i = 0; i < adapter->num_tx_queues; i++) {
1411 j = adapter->tx_ring[i].reg_idx;
1412 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j));
1413
1414 txdctl |= (8 << 16);
1415 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j), txdctl);
1416 }
1417
1418 for (i = 0; i < adapter->num_tx_queues; i++) {
1419 j = adapter->tx_ring[i].reg_idx;
1420 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j));
1421 txdctl |= IXGBE_TXDCTL_ENABLE;
1422 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j), txdctl);
1423 }
1424
1425 for (i = 0; i < num_rx_rings; i++) {
1426 j = adapter->rx_ring[i].reg_idx;
1427 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j));
1428 rxdctl |= IXGBE_RXDCTL_ENABLE | IXGBE_RXDCTL_VME;
1429 if (hw->mac.type == ixgbe_mac_X540_vf) {
1430 rxdctl &= ~IXGBE_RXDCTL_RLPMLMASK;
1431 rxdctl |= ((netdev->mtu + ETH_HLEN + ETH_FCS_LEN) |
1432 IXGBE_RXDCTL_RLPML_EN);
1433 }
1434 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(j), rxdctl);
1435 ixgbevf_rx_desc_queue_enable(adapter, i);
1436 }
1437
1438 ixgbevf_configure_msix(adapter);
1439
1440 spin_lock_bh(&adapter->mbx_lock);
1441
1442 if (is_valid_ether_addr(hw->mac.addr))
1443 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0);
1444 else
1445 hw->mac.ops.set_rar(hw, 0, hw->mac.perm_addr, 0);
1446
1447 spin_unlock_bh(&adapter->mbx_lock);
1448
1449 clear_bit(__IXGBEVF_DOWN, &adapter->state);
1450 ixgbevf_napi_enable_all(adapter);
1451
1452
1453 netif_tx_start_all_queues(netdev);
1454
1455 ixgbevf_save_reset_stats(adapter);
1456 ixgbevf_init_last_counter_stats(adapter);
1457
1458 hw->mac.get_link_status = 1;
1459 mod_timer(&adapter->watchdog_timer, jiffies);
1460}
1461
1462static int ixgbevf_reset_queues(struct ixgbevf_adapter *adapter)
1463{
1464 struct ixgbe_hw *hw = &adapter->hw;
1465 struct ixgbevf_ring *rx_ring;
1466 unsigned int def_q = 0;
1467 unsigned int num_tcs = 0;
1468 unsigned int num_rx_queues = 1;
1469 int err, i;
1470
1471 spin_lock_bh(&adapter->mbx_lock);
1472
1473
1474 err = ixgbevf_get_queues(hw, &num_tcs, &def_q);
1475
1476 spin_unlock_bh(&adapter->mbx_lock);
1477
1478 if (err)
1479 return err;
1480
1481 if (num_tcs > 1) {
1482
1483 adapter->tx_ring[0].reg_idx = def_q;
1484
1485
1486 num_rx_queues = num_tcs;
1487 }
1488
1489
1490 if (adapter->num_rx_queues == num_rx_queues)
1491 return 0;
1492
1493
1494 rx_ring = kcalloc(num_rx_queues,
1495 sizeof(struct ixgbevf_ring), GFP_KERNEL);
1496 if (!rx_ring)
1497 return -ENOMEM;
1498
1499
1500 for (i = 0; i < num_rx_queues; i++) {
1501 rx_ring[i].count = adapter->rx_ring_count;
1502 rx_ring[i].queue_index = i;
1503 rx_ring[i].reg_idx = i;
1504 rx_ring[i].dev = &adapter->pdev->dev;
1505 rx_ring[i].netdev = adapter->netdev;
1506
1507
1508 err = ixgbevf_setup_rx_resources(adapter, &rx_ring[i]);
1509 if (err) {
1510 while (i) {
1511 i--;
1512 ixgbevf_free_rx_resources(adapter, &rx_ring[i]);
1513 }
1514 kfree(rx_ring);
1515 return err;
1516 }
1517 }
1518
1519
1520 ixgbevf_free_all_rx_resources(adapter);
1521 adapter->num_rx_queues = 0;
1522 kfree(adapter->rx_ring);
1523
1524
1525 adapter->rx_ring = rx_ring;
1526 adapter->num_rx_queues = num_rx_queues;
1527
1528
1529 ixgbevf_reset_q_vectors(adapter);
1530 ixgbevf_map_rings_to_vectors(adapter);
1531
1532 return 0;
1533}
1534
1535void ixgbevf_up(struct ixgbevf_adapter *adapter)
1536{
1537 struct ixgbe_hw *hw = &adapter->hw;
1538
1539 ixgbevf_negotiate_api(adapter);
1540
1541 ixgbevf_reset_queues(adapter);
1542
1543 ixgbevf_configure(adapter);
1544
1545 ixgbevf_up_complete(adapter);
1546
1547
1548 IXGBE_READ_REG(hw, IXGBE_VTEICR);
1549
1550 ixgbevf_irq_enable(adapter);
1551}
1552
1553
1554
1555
1556
1557
1558static void ixgbevf_clean_rx_ring(struct ixgbevf_adapter *adapter,
1559 struct ixgbevf_ring *rx_ring)
1560{
1561 struct pci_dev *pdev = adapter->pdev;
1562 unsigned long size;
1563 unsigned int i;
1564
1565 if (!rx_ring->rx_buffer_info)
1566 return;
1567
1568
1569 for (i = 0; i < rx_ring->count; i++) {
1570 struct ixgbevf_rx_buffer *rx_buffer_info;
1571
1572 rx_buffer_info = &rx_ring->rx_buffer_info[i];
1573 if (rx_buffer_info->dma) {
1574 dma_unmap_single(&pdev->dev, rx_buffer_info->dma,
1575 rx_ring->rx_buf_len,
1576 DMA_FROM_DEVICE);
1577 rx_buffer_info->dma = 0;
1578 }
1579 if (rx_buffer_info->skb) {
1580 struct sk_buff *skb = rx_buffer_info->skb;
1581 rx_buffer_info->skb = NULL;
1582 do {
1583 struct sk_buff *this = skb;
1584 skb = IXGBE_CB(skb)->prev;
1585 dev_kfree_skb(this);
1586 } while (skb);
1587 }
1588 }
1589
1590 size = sizeof(struct ixgbevf_rx_buffer) * rx_ring->count;
1591 memset(rx_ring->rx_buffer_info, 0, size);
1592
1593
1594 memset(rx_ring->desc, 0, rx_ring->size);
1595
1596 rx_ring->next_to_clean = 0;
1597 rx_ring->next_to_use = 0;
1598
1599 if (rx_ring->head)
1600 writel(0, adapter->hw.hw_addr + rx_ring->head);
1601 if (rx_ring->tail)
1602 writel(0, adapter->hw.hw_addr + rx_ring->tail);
1603}
1604
1605
1606
1607
1608
1609
1610static void ixgbevf_clean_tx_ring(struct ixgbevf_adapter *adapter,
1611 struct ixgbevf_ring *tx_ring)
1612{
1613 struct ixgbevf_tx_buffer *tx_buffer_info;
1614 unsigned long size;
1615 unsigned int i;
1616
1617 if (!tx_ring->tx_buffer_info)
1618 return;
1619
1620
1621 for (i = 0; i < tx_ring->count; i++) {
1622 tx_buffer_info = &tx_ring->tx_buffer_info[i];
1623 ixgbevf_unmap_and_free_tx_resource(tx_ring, tx_buffer_info);
1624 }
1625
1626 size = sizeof(struct ixgbevf_tx_buffer) * tx_ring->count;
1627 memset(tx_ring->tx_buffer_info, 0, size);
1628
1629 memset(tx_ring->desc, 0, tx_ring->size);
1630
1631 tx_ring->next_to_use = 0;
1632 tx_ring->next_to_clean = 0;
1633
1634 if (tx_ring->head)
1635 writel(0, adapter->hw.hw_addr + tx_ring->head);
1636 if (tx_ring->tail)
1637 writel(0, adapter->hw.hw_addr + tx_ring->tail);
1638}
1639
1640
1641
1642
1643
1644static void ixgbevf_clean_all_rx_rings(struct ixgbevf_adapter *adapter)
1645{
1646 int i;
1647
1648 for (i = 0; i < adapter->num_rx_queues; i++)
1649 ixgbevf_clean_rx_ring(adapter, &adapter->rx_ring[i]);
1650}
1651
1652
1653
1654
1655
1656static void ixgbevf_clean_all_tx_rings(struct ixgbevf_adapter *adapter)
1657{
1658 int i;
1659
1660 for (i = 0; i < adapter->num_tx_queues; i++)
1661 ixgbevf_clean_tx_ring(adapter, &adapter->tx_ring[i]);
1662}
1663
1664void ixgbevf_down(struct ixgbevf_adapter *adapter)
1665{
1666 struct net_device *netdev = adapter->netdev;
1667 struct ixgbe_hw *hw = &adapter->hw;
1668 u32 txdctl;
1669 int i, j;
1670
1671
1672 set_bit(__IXGBEVF_DOWN, &adapter->state);
1673
1674
1675 netif_tx_disable(netdev);
1676
1677 msleep(10);
1678
1679 netif_tx_stop_all_queues(netdev);
1680
1681 ixgbevf_irq_disable(adapter);
1682
1683 ixgbevf_napi_disable_all(adapter);
1684
1685 del_timer_sync(&adapter->watchdog_timer);
1686
1687
1688
1689 while (adapter->flags & IXGBE_FLAG_IN_WATCHDOG_TASK)
1690 msleep(1);
1691
1692
1693 for (i = 0; i < adapter->num_tx_queues; i++) {
1694 j = adapter->tx_ring[i].reg_idx;
1695 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j));
1696 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j),
1697 (txdctl & ~IXGBE_TXDCTL_ENABLE));
1698 }
1699
1700 netif_carrier_off(netdev);
1701
1702 if (!pci_channel_offline(adapter->pdev))
1703 ixgbevf_reset(adapter);
1704
1705 ixgbevf_clean_all_tx_rings(adapter);
1706 ixgbevf_clean_all_rx_rings(adapter);
1707}
1708
1709void ixgbevf_reinit_locked(struct ixgbevf_adapter *adapter)
1710{
1711 WARN_ON(in_interrupt());
1712
1713 while (test_and_set_bit(__IXGBEVF_RESETTING, &adapter->state))
1714 msleep(1);
1715
1716 ixgbevf_down(adapter);
1717 ixgbevf_up(adapter);
1718
1719 clear_bit(__IXGBEVF_RESETTING, &adapter->state);
1720}
1721
1722void ixgbevf_reset(struct ixgbevf_adapter *adapter)
1723{
1724 struct ixgbe_hw *hw = &adapter->hw;
1725 struct net_device *netdev = adapter->netdev;
1726
1727 if (hw->mac.ops.reset_hw(hw))
1728 hw_dbg(hw, "PF still resetting\n");
1729 else
1730 hw->mac.ops.init_hw(hw);
1731
1732 if (is_valid_ether_addr(adapter->hw.mac.addr)) {
1733 memcpy(netdev->dev_addr, adapter->hw.mac.addr,
1734 netdev->addr_len);
1735 memcpy(netdev->perm_addr, adapter->hw.mac.addr,
1736 netdev->addr_len);
1737 }
1738}
1739
1740static int ixgbevf_acquire_msix_vectors(struct ixgbevf_adapter *adapter,
1741 int vectors)
1742{
1743 int err = 0;
1744 int vector_threshold;
1745
1746
1747
1748
1749
1750 vector_threshold = MIN_MSIX_COUNT;
1751
1752
1753
1754
1755
1756
1757 while (vectors >= vector_threshold) {
1758 err = pci_enable_msix(adapter->pdev, adapter->msix_entries,
1759 vectors);
1760 if (!err || err < 0)
1761 break;
1762 else
1763 vectors = err;
1764 }
1765
1766 if (vectors < vector_threshold)
1767 err = -ENOMEM;
1768
1769 if (err) {
1770 dev_err(&adapter->pdev->dev,
1771 "Unable to allocate MSI-X interrupts\n");
1772 kfree(adapter->msix_entries);
1773 adapter->msix_entries = NULL;
1774 } else {
1775
1776
1777
1778
1779
1780 adapter->num_msix_vectors = vectors;
1781 }
1782
1783 return err;
1784}
1785
1786
1787
1788
1789
1790
1791
1792
1793
1794
1795
1796
1797static void ixgbevf_set_num_queues(struct ixgbevf_adapter *adapter)
1798{
1799
1800 adapter->num_rx_queues = 1;
1801 adapter->num_tx_queues = 1;
1802}
1803
1804
1805
1806
1807
1808
1809
1810
1811
1812static int ixgbevf_alloc_queues(struct ixgbevf_adapter *adapter)
1813{
1814 int i;
1815
1816 adapter->tx_ring = kcalloc(adapter->num_tx_queues,
1817 sizeof(struct ixgbevf_ring), GFP_KERNEL);
1818 if (!adapter->tx_ring)
1819 goto err_tx_ring_allocation;
1820
1821 adapter->rx_ring = kcalloc(adapter->num_rx_queues,
1822 sizeof(struct ixgbevf_ring), GFP_KERNEL);
1823 if (!adapter->rx_ring)
1824 goto err_rx_ring_allocation;
1825
1826 for (i = 0; i < adapter->num_tx_queues; i++) {
1827 adapter->tx_ring[i].count = adapter->tx_ring_count;
1828 adapter->tx_ring[i].queue_index = i;
1829
1830 adapter->tx_ring[i].reg_idx = i;
1831 adapter->tx_ring[i].dev = &adapter->pdev->dev;
1832 adapter->tx_ring[i].netdev = adapter->netdev;
1833 }
1834
1835 for (i = 0; i < adapter->num_rx_queues; i++) {
1836 adapter->rx_ring[i].count = adapter->rx_ring_count;
1837 adapter->rx_ring[i].queue_index = i;
1838 adapter->rx_ring[i].reg_idx = i;
1839 adapter->rx_ring[i].dev = &adapter->pdev->dev;
1840 adapter->rx_ring[i].netdev = adapter->netdev;
1841 }
1842
1843 return 0;
1844
1845err_rx_ring_allocation:
1846 kfree(adapter->tx_ring);
1847err_tx_ring_allocation:
1848 return -ENOMEM;
1849}
1850
1851
1852
1853
1854
1855
1856
1857
1858static int ixgbevf_set_interrupt_capability(struct ixgbevf_adapter *adapter)
1859{
1860 struct net_device *netdev = adapter->netdev;
1861 int err = 0;
1862 int vector, v_budget;
1863
1864
1865
1866
1867
1868
1869
1870
1871 v_budget = max(adapter->num_rx_queues, adapter->num_tx_queues);
1872 v_budget = min_t(int, v_budget, num_online_cpus());
1873 v_budget += NON_Q_VECTORS;
1874
1875
1876
1877 adapter->msix_entries = kcalloc(v_budget,
1878 sizeof(struct msix_entry), GFP_KERNEL);
1879 if (!adapter->msix_entries) {
1880 err = -ENOMEM;
1881 goto out;
1882 }
1883
1884 for (vector = 0; vector < v_budget; vector++)
1885 adapter->msix_entries[vector].entry = vector;
1886
1887 err = ixgbevf_acquire_msix_vectors(adapter, v_budget);
1888 if (err)
1889 goto out;
1890
1891 err = netif_set_real_num_tx_queues(netdev, adapter->num_tx_queues);
1892 if (err)
1893 goto out;
1894
1895 err = netif_set_real_num_rx_queues(netdev, adapter->num_rx_queues);
1896
1897out:
1898 return err;
1899}
1900
1901
1902
1903
1904
1905
1906
1907
1908static int ixgbevf_alloc_q_vectors(struct ixgbevf_adapter *adapter)
1909{
1910 int q_idx, num_q_vectors;
1911 struct ixgbevf_q_vector *q_vector;
1912
1913 num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1914
1915 for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
1916 q_vector = kzalloc(sizeof(struct ixgbevf_q_vector), GFP_KERNEL);
1917 if (!q_vector)
1918 goto err_out;
1919 q_vector->adapter = adapter;
1920 q_vector->v_idx = q_idx;
1921 netif_napi_add(adapter->netdev, &q_vector->napi,
1922 ixgbevf_poll, 64);
1923 adapter->q_vector[q_idx] = q_vector;
1924 }
1925
1926 return 0;
1927
1928err_out:
1929 while (q_idx) {
1930 q_idx--;
1931 q_vector = adapter->q_vector[q_idx];
1932 netif_napi_del(&q_vector->napi);
1933 kfree(q_vector);
1934 adapter->q_vector[q_idx] = NULL;
1935 }
1936 return -ENOMEM;
1937}
1938
1939
1940
1941
1942
1943
1944
1945
1946
1947static void ixgbevf_free_q_vectors(struct ixgbevf_adapter *adapter)
1948{
1949 int q_idx, num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1950
1951 for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
1952 struct ixgbevf_q_vector *q_vector = adapter->q_vector[q_idx];
1953
1954 adapter->q_vector[q_idx] = NULL;
1955 netif_napi_del(&q_vector->napi);
1956 kfree(q_vector);
1957 }
1958}
1959
1960
1961
1962
1963
1964
1965static void ixgbevf_reset_interrupt_capability(struct ixgbevf_adapter *adapter)
1966{
1967 pci_disable_msix(adapter->pdev);
1968 kfree(adapter->msix_entries);
1969 adapter->msix_entries = NULL;
1970}
1971
1972
1973
1974
1975
1976
1977static int ixgbevf_init_interrupt_scheme(struct ixgbevf_adapter *adapter)
1978{
1979 int err;
1980
1981
1982 ixgbevf_set_num_queues(adapter);
1983
1984 err = ixgbevf_set_interrupt_capability(adapter);
1985 if (err) {
1986 hw_dbg(&adapter->hw,
1987 "Unable to setup interrupt capabilities\n");
1988 goto err_set_interrupt;
1989 }
1990
1991 err = ixgbevf_alloc_q_vectors(adapter);
1992 if (err) {
1993 hw_dbg(&adapter->hw, "Unable to allocate memory for queue "
1994 "vectors\n");
1995 goto err_alloc_q_vectors;
1996 }
1997
1998 err = ixgbevf_alloc_queues(adapter);
1999 if (err) {
2000 pr_err("Unable to allocate memory for queues\n");
2001 goto err_alloc_queues;
2002 }
2003
2004 hw_dbg(&adapter->hw, "Multiqueue %s: Rx Queue count = %u, "
2005 "Tx Queue count = %u\n",
2006 (adapter->num_rx_queues > 1) ? "Enabled" :
2007 "Disabled", adapter->num_rx_queues, adapter->num_tx_queues);
2008
2009 set_bit(__IXGBEVF_DOWN, &adapter->state);
2010
2011 return 0;
2012err_alloc_queues:
2013 ixgbevf_free_q_vectors(adapter);
2014err_alloc_q_vectors:
2015 ixgbevf_reset_interrupt_capability(adapter);
2016err_set_interrupt:
2017 return err;
2018}
2019
2020
2021
2022
2023
2024
2025
2026
2027static void ixgbevf_clear_interrupt_scheme(struct ixgbevf_adapter *adapter)
2028{
2029 adapter->num_tx_queues = 0;
2030 adapter->num_rx_queues = 0;
2031
2032 ixgbevf_free_q_vectors(adapter);
2033 ixgbevf_reset_interrupt_capability(adapter);
2034}
2035
2036
2037
2038
2039
2040
2041
2042
2043
2044
2045static int ixgbevf_sw_init(struct ixgbevf_adapter *adapter)
2046{
2047 struct ixgbe_hw *hw = &adapter->hw;
2048 struct pci_dev *pdev = adapter->pdev;
2049 int err;
2050
2051
2052
2053 hw->vendor_id = pdev->vendor;
2054 hw->device_id = pdev->device;
2055 hw->revision_id = pdev->revision;
2056 hw->subsystem_vendor_id = pdev->subsystem_vendor;
2057 hw->subsystem_device_id = pdev->subsystem_device;
2058
2059 hw->mbx.ops.init_params(hw);
2060
2061
2062 hw->mac.max_tx_queues = 2;
2063 hw->mac.max_rx_queues = 2;
2064
2065 err = hw->mac.ops.reset_hw(hw);
2066 if (err) {
2067 dev_info(&pdev->dev,
2068 "PF still in reset state, assigning new address\n");
2069 eth_hw_addr_random(adapter->netdev);
2070 memcpy(adapter->hw.mac.addr, adapter->netdev->dev_addr,
2071 adapter->netdev->addr_len);
2072 } else {
2073 err = hw->mac.ops.init_hw(hw);
2074 if (err) {
2075 pr_err("init_shared_code failed: %d\n", err);
2076 goto out;
2077 }
2078 memcpy(adapter->netdev->dev_addr, adapter->hw.mac.addr,
2079 adapter->netdev->addr_len);
2080 }
2081
2082
2083 spin_lock_init(&adapter->mbx_lock);
2084
2085
2086 adapter->rx_itr_setting = 1;
2087 adapter->tx_itr_setting = 1;
2088
2089
2090 adapter->tx_ring_count = IXGBEVF_DEFAULT_TXD;
2091 adapter->rx_ring_count = IXGBEVF_DEFAULT_RXD;
2092
2093 set_bit(__IXGBEVF_DOWN, &adapter->state);
2094 return 0;
2095
2096out:
2097 return err;
2098}
2099
2100#define UPDATE_VF_COUNTER_32bit(reg, last_counter, counter) \
2101 { \
2102 u32 current_counter = IXGBE_READ_REG(hw, reg); \
2103 if (current_counter < last_counter) \
2104 counter += 0x100000000LL; \
2105 last_counter = current_counter; \
2106 counter &= 0xFFFFFFFF00000000LL; \
2107 counter |= current_counter; \
2108 }
2109
2110#define UPDATE_VF_COUNTER_36bit(reg_lsb, reg_msb, last_counter, counter) \
2111 { \
2112 u64 current_counter_lsb = IXGBE_READ_REG(hw, reg_lsb); \
2113 u64 current_counter_msb = IXGBE_READ_REG(hw, reg_msb); \
2114 u64 current_counter = (current_counter_msb << 32) | \
2115 current_counter_lsb; \
2116 if (current_counter < last_counter) \
2117 counter += 0x1000000000LL; \
2118 last_counter = current_counter; \
2119 counter &= 0xFFFFFFF000000000LL; \
2120 counter |= current_counter; \
2121 }
2122
2123
2124
2125
2126void ixgbevf_update_stats(struct ixgbevf_adapter *adapter)
2127{
2128 struct ixgbe_hw *hw = &adapter->hw;
2129 int i;
2130
2131 if (!adapter->link_up)
2132 return;
2133
2134 UPDATE_VF_COUNTER_32bit(IXGBE_VFGPRC, adapter->stats.last_vfgprc,
2135 adapter->stats.vfgprc);
2136 UPDATE_VF_COUNTER_32bit(IXGBE_VFGPTC, adapter->stats.last_vfgptc,
2137 adapter->stats.vfgptc);
2138 UPDATE_VF_COUNTER_36bit(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB,
2139 adapter->stats.last_vfgorc,
2140 adapter->stats.vfgorc);
2141 UPDATE_VF_COUNTER_36bit(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB,
2142 adapter->stats.last_vfgotc,
2143 adapter->stats.vfgotc);
2144 UPDATE_VF_COUNTER_32bit(IXGBE_VFMPRC, adapter->stats.last_vfmprc,
2145 adapter->stats.vfmprc);
2146
2147 for (i = 0; i < adapter->num_rx_queues; i++) {
2148 adapter->hw_csum_rx_error +=
2149 adapter->rx_ring[i].hw_csum_rx_error;
2150 adapter->hw_csum_rx_good +=
2151 adapter->rx_ring[i].hw_csum_rx_good;
2152 adapter->rx_ring[i].hw_csum_rx_error = 0;
2153 adapter->rx_ring[i].hw_csum_rx_good = 0;
2154 }
2155}
2156
2157
2158
2159
2160
2161static void ixgbevf_watchdog(unsigned long data)
2162{
2163 struct ixgbevf_adapter *adapter = (struct ixgbevf_adapter *)data;
2164 struct ixgbe_hw *hw = &adapter->hw;
2165 u32 eics = 0;
2166 int i;
2167
2168
2169
2170
2171
2172
2173 if (test_bit(__IXGBEVF_DOWN, &adapter->state))
2174 goto watchdog_short_circuit;
2175
2176
2177 for (i = 0; i < adapter->num_msix_vectors - NON_Q_VECTORS; i++) {
2178 struct ixgbevf_q_vector *qv = adapter->q_vector[i];
2179 if (qv->rx.ring || qv->tx.ring)
2180 eics |= 1 << i;
2181 }
2182
2183 IXGBE_WRITE_REG(hw, IXGBE_VTEICS, eics);
2184
2185watchdog_short_circuit:
2186 schedule_work(&adapter->watchdog_task);
2187}
2188
2189
2190
2191
2192
2193static void ixgbevf_tx_timeout(struct net_device *netdev)
2194{
2195 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
2196
2197
2198 schedule_work(&adapter->reset_task);
2199}
2200
2201static void ixgbevf_reset_task(struct work_struct *work)
2202{
2203 struct ixgbevf_adapter *adapter;
2204 adapter = container_of(work, struct ixgbevf_adapter, reset_task);
2205
2206
2207 if (test_bit(__IXGBEVF_DOWN, &adapter->state) ||
2208 test_bit(__IXGBEVF_RESETTING, &adapter->state))
2209 return;
2210
2211 adapter->tx_timeout_count++;
2212
2213 ixgbevf_reinit_locked(adapter);
2214}
2215
2216
2217
2218
2219
2220static void ixgbevf_watchdog_task(struct work_struct *work)
2221{
2222 struct ixgbevf_adapter *adapter = container_of(work,
2223 struct ixgbevf_adapter,
2224 watchdog_task);
2225 struct net_device *netdev = adapter->netdev;
2226 struct ixgbe_hw *hw = &adapter->hw;
2227 u32 link_speed = adapter->link_speed;
2228 bool link_up = adapter->link_up;
2229 s32 need_reset;
2230
2231 adapter->flags |= IXGBE_FLAG_IN_WATCHDOG_TASK;
2232
2233
2234
2235
2236
2237 spin_lock_bh(&adapter->mbx_lock);
2238
2239 need_reset = hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
2240
2241 spin_unlock_bh(&adapter->mbx_lock);
2242
2243 if (need_reset) {
2244 adapter->link_up = link_up;
2245 adapter->link_speed = link_speed;
2246 netif_carrier_off(netdev);
2247 netif_tx_stop_all_queues(netdev);
2248 schedule_work(&adapter->reset_task);
2249 goto pf_has_reset;
2250 }
2251 adapter->link_up = link_up;
2252 adapter->link_speed = link_speed;
2253
2254 if (link_up) {
2255 if (!netif_carrier_ok(netdev)) {
2256 char *link_speed_string;
2257 switch (link_speed) {
2258 case IXGBE_LINK_SPEED_10GB_FULL:
2259 link_speed_string = "10 Gbps";
2260 break;
2261 case IXGBE_LINK_SPEED_1GB_FULL:
2262 link_speed_string = "1 Gbps";
2263 break;
2264 case IXGBE_LINK_SPEED_100_FULL:
2265 link_speed_string = "100 Mbps";
2266 break;
2267 default:
2268 link_speed_string = "unknown speed";
2269 break;
2270 }
2271 dev_info(&adapter->pdev->dev,
2272 "NIC Link is Up, %s\n", link_speed_string);
2273 netif_carrier_on(netdev);
2274 netif_tx_wake_all_queues(netdev);
2275 }
2276 } else {
2277 adapter->link_up = false;
2278 adapter->link_speed = 0;
2279 if (netif_carrier_ok(netdev)) {
2280 dev_info(&adapter->pdev->dev, "NIC Link is Down\n");
2281 netif_carrier_off(netdev);
2282 netif_tx_stop_all_queues(netdev);
2283 }
2284 }
2285
2286 ixgbevf_update_stats(adapter);
2287
2288pf_has_reset:
2289
2290 if (!test_bit(__IXGBEVF_DOWN, &adapter->state))
2291 mod_timer(&adapter->watchdog_timer,
2292 round_jiffies(jiffies + (2 * HZ)));
2293
2294 adapter->flags &= ~IXGBE_FLAG_IN_WATCHDOG_TASK;
2295}
2296
2297
2298
2299
2300
2301
2302
2303
2304void ixgbevf_free_tx_resources(struct ixgbevf_adapter *adapter,
2305 struct ixgbevf_ring *tx_ring)
2306{
2307 struct pci_dev *pdev = adapter->pdev;
2308
2309 ixgbevf_clean_tx_ring(adapter, tx_ring);
2310
2311 vfree(tx_ring->tx_buffer_info);
2312 tx_ring->tx_buffer_info = NULL;
2313
2314 dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc,
2315 tx_ring->dma);
2316
2317 tx_ring->desc = NULL;
2318}
2319
2320
2321
2322
2323
2324
2325
2326static void ixgbevf_free_all_tx_resources(struct ixgbevf_adapter *adapter)
2327{
2328 int i;
2329
2330 for (i = 0; i < adapter->num_tx_queues; i++)
2331 if (adapter->tx_ring[i].desc)
2332 ixgbevf_free_tx_resources(adapter,
2333 &adapter->tx_ring[i]);
2334
2335}
2336
2337
2338
2339
2340
2341
2342
2343
2344int ixgbevf_setup_tx_resources(struct ixgbevf_adapter *adapter,
2345 struct ixgbevf_ring *tx_ring)
2346{
2347 struct pci_dev *pdev = adapter->pdev;
2348 int size;
2349
2350 size = sizeof(struct ixgbevf_tx_buffer) * tx_ring->count;
2351 tx_ring->tx_buffer_info = vzalloc(size);
2352 if (!tx_ring->tx_buffer_info)
2353 goto err;
2354
2355
2356 tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc);
2357 tx_ring->size = ALIGN(tx_ring->size, 4096);
2358
2359 tx_ring->desc = dma_alloc_coherent(&pdev->dev, tx_ring->size,
2360 &tx_ring->dma, GFP_KERNEL);
2361 if (!tx_ring->desc)
2362 goto err;
2363
2364 tx_ring->next_to_use = 0;
2365 tx_ring->next_to_clean = 0;
2366 return 0;
2367
2368err:
2369 vfree(tx_ring->tx_buffer_info);
2370 tx_ring->tx_buffer_info = NULL;
2371 hw_dbg(&adapter->hw, "Unable to allocate memory for the transmit "
2372 "descriptor ring\n");
2373 return -ENOMEM;
2374}
2375
2376
2377
2378
2379
2380
2381
2382
2383
2384
2385
2386static int ixgbevf_setup_all_tx_resources(struct ixgbevf_adapter *adapter)
2387{
2388 int i, err = 0;
2389
2390 for (i = 0; i < adapter->num_tx_queues; i++) {
2391 err = ixgbevf_setup_tx_resources(adapter, &adapter->tx_ring[i]);
2392 if (!err)
2393 continue;
2394 hw_dbg(&adapter->hw,
2395 "Allocation for Tx Queue %u failed\n", i);
2396 break;
2397 }
2398
2399 return err;
2400}
2401
2402
2403
2404
2405
2406
2407
2408
2409int ixgbevf_setup_rx_resources(struct ixgbevf_adapter *adapter,
2410 struct ixgbevf_ring *rx_ring)
2411{
2412 struct pci_dev *pdev = adapter->pdev;
2413 int size;
2414
2415 size = sizeof(struct ixgbevf_rx_buffer) * rx_ring->count;
2416 rx_ring->rx_buffer_info = vzalloc(size);
2417 if (!rx_ring->rx_buffer_info)
2418 goto alloc_failed;
2419
2420
2421 rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc);
2422 rx_ring->size = ALIGN(rx_ring->size, 4096);
2423
2424 rx_ring->desc = dma_alloc_coherent(&pdev->dev, rx_ring->size,
2425 &rx_ring->dma, GFP_KERNEL);
2426
2427 if (!rx_ring->desc) {
2428 hw_dbg(&adapter->hw,
2429 "Unable to allocate memory for "
2430 "the receive descriptor ring\n");
2431 vfree(rx_ring->rx_buffer_info);
2432 rx_ring->rx_buffer_info = NULL;
2433 goto alloc_failed;
2434 }
2435
2436 rx_ring->next_to_clean = 0;
2437 rx_ring->next_to_use = 0;
2438
2439 return 0;
2440alloc_failed:
2441 return -ENOMEM;
2442}
2443
2444
2445
2446
2447
2448
2449
2450
2451
2452
2453
2454static int ixgbevf_setup_all_rx_resources(struct ixgbevf_adapter *adapter)
2455{
2456 int i, err = 0;
2457
2458 for (i = 0; i < adapter->num_rx_queues; i++) {
2459 err = ixgbevf_setup_rx_resources(adapter, &adapter->rx_ring[i]);
2460 if (!err)
2461 continue;
2462 hw_dbg(&adapter->hw,
2463 "Allocation for Rx Queue %u failed\n", i);
2464 break;
2465 }
2466 return err;
2467}
2468
2469
2470
2471
2472
2473
2474
2475
2476void ixgbevf_free_rx_resources(struct ixgbevf_adapter *adapter,
2477 struct ixgbevf_ring *rx_ring)
2478{
2479 struct pci_dev *pdev = adapter->pdev;
2480
2481 ixgbevf_clean_rx_ring(adapter, rx_ring);
2482
2483 vfree(rx_ring->rx_buffer_info);
2484 rx_ring->rx_buffer_info = NULL;
2485
2486 dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc,
2487 rx_ring->dma);
2488
2489 rx_ring->desc = NULL;
2490}
2491
2492
2493
2494
2495
2496
2497
2498static void ixgbevf_free_all_rx_resources(struct ixgbevf_adapter *adapter)
2499{
2500 int i;
2501
2502 for (i = 0; i < adapter->num_rx_queues; i++)
2503 if (adapter->rx_ring[i].desc)
2504 ixgbevf_free_rx_resources(adapter,
2505 &adapter->rx_ring[i]);
2506}
2507
2508static int ixgbevf_setup_queues(struct ixgbevf_adapter *adapter)
2509{
2510 struct ixgbe_hw *hw = &adapter->hw;
2511 struct ixgbevf_ring *rx_ring;
2512 unsigned int def_q = 0;
2513 unsigned int num_tcs = 0;
2514 unsigned int num_rx_queues = 1;
2515 int err, i;
2516
2517 spin_lock_bh(&adapter->mbx_lock);
2518
2519
2520 err = ixgbevf_get_queues(hw, &num_tcs, &def_q);
2521
2522 spin_unlock_bh(&adapter->mbx_lock);
2523
2524 if (err)
2525 return err;
2526
2527 if (num_tcs > 1) {
2528
2529 adapter->tx_ring[0].reg_idx = def_q;
2530
2531
2532 num_rx_queues = num_tcs;
2533 }
2534
2535
2536 if (adapter->num_rx_queues == num_rx_queues)
2537 return 0;
2538
2539
2540 rx_ring = kcalloc(num_rx_queues,
2541 sizeof(struct ixgbevf_ring), GFP_KERNEL);
2542 if (!rx_ring)
2543 return -ENOMEM;
2544
2545
2546 for (i = 0; i < num_rx_queues; i++) {
2547 rx_ring[i].count = adapter->rx_ring_count;
2548 rx_ring[i].queue_index = i;
2549 rx_ring[i].reg_idx = i;
2550 rx_ring[i].dev = &adapter->pdev->dev;
2551 rx_ring[i].netdev = adapter->netdev;
2552 }
2553
2554
2555 adapter->num_rx_queues = 0;
2556 kfree(adapter->rx_ring);
2557
2558
2559 adapter->rx_ring = rx_ring;
2560 adapter->num_rx_queues = num_rx_queues;
2561
2562 return 0;
2563}
2564
2565
2566
2567
2568
2569
2570
2571
2572
2573
2574
2575
2576
2577static int ixgbevf_open(struct net_device *netdev)
2578{
2579 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
2580 struct ixgbe_hw *hw = &adapter->hw;
2581 int err;
2582
2583
2584
2585
2586
2587
2588
2589 if (!adapter->num_msix_vectors)
2590 return -ENOMEM;
2591
2592
2593 if (test_bit(__IXGBEVF_TESTING, &adapter->state))
2594 return -EBUSY;
2595
2596 if (hw->adapter_stopped) {
2597 ixgbevf_reset(adapter);
2598
2599
2600 if (hw->adapter_stopped) {
2601 err = IXGBE_ERR_MBX;
2602 pr_err("Unable to start - perhaps the PF Driver isn't "
2603 "up yet\n");
2604 goto err_setup_reset;
2605 }
2606 }
2607
2608 ixgbevf_negotiate_api(adapter);
2609
2610
2611 err = ixgbevf_setup_queues(adapter);
2612 if (err)
2613 goto err_setup_queues;
2614
2615
2616 err = ixgbevf_setup_all_tx_resources(adapter);
2617 if (err)
2618 goto err_setup_tx;
2619
2620
2621 err = ixgbevf_setup_all_rx_resources(adapter);
2622 if (err)
2623 goto err_setup_rx;
2624
2625 ixgbevf_configure(adapter);
2626
2627
2628
2629
2630
2631
2632 ixgbevf_map_rings_to_vectors(adapter);
2633
2634 ixgbevf_up_complete(adapter);
2635
2636
2637 IXGBE_READ_REG(hw, IXGBE_VTEICR);
2638 err = ixgbevf_request_irq(adapter);
2639 if (err)
2640 goto err_req_irq;
2641
2642 ixgbevf_irq_enable(adapter);
2643
2644 return 0;
2645
2646err_req_irq:
2647 ixgbevf_down(adapter);
2648err_setup_rx:
2649 ixgbevf_free_all_rx_resources(adapter);
2650err_setup_tx:
2651 ixgbevf_free_all_tx_resources(adapter);
2652err_setup_queues:
2653 ixgbevf_reset(adapter);
2654
2655err_setup_reset:
2656
2657 return err;
2658}
2659
2660
2661
2662
2663
2664
2665
2666
2667
2668
2669
2670
2671static int ixgbevf_close(struct net_device *netdev)
2672{
2673 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
2674
2675 ixgbevf_down(adapter);
2676 ixgbevf_free_irq(adapter);
2677
2678 ixgbevf_free_all_tx_resources(adapter);
2679 ixgbevf_free_all_rx_resources(adapter);
2680
2681 return 0;
2682}
2683
2684static void ixgbevf_tx_ctxtdesc(struct ixgbevf_ring *tx_ring,
2685 u32 vlan_macip_lens, u32 type_tucmd,
2686 u32 mss_l4len_idx)
2687{
2688 struct ixgbe_adv_tx_context_desc *context_desc;
2689 u16 i = tx_ring->next_to_use;
2690
2691 context_desc = IXGBEVF_TX_CTXTDESC(tx_ring, i);
2692
2693 i++;
2694 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
2695
2696
2697 type_tucmd |= IXGBE_TXD_CMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
2698
2699 context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
2700 context_desc->seqnum_seed = 0;
2701 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd);
2702 context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
2703}
2704
2705static int ixgbevf_tso(struct ixgbevf_ring *tx_ring,
2706 struct sk_buff *skb, u32 tx_flags, u8 *hdr_len)
2707{
2708 u32 vlan_macip_lens, type_tucmd;
2709 u32 mss_l4len_idx, l4len;
2710
2711 if (!skb_is_gso(skb))
2712 return 0;
2713
2714 if (skb_header_cloned(skb)) {
2715 int err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2716 if (err)
2717 return err;
2718 }
2719
2720
2721 type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP;
2722
2723 if (skb->protocol == htons(ETH_P_IP)) {
2724 struct iphdr *iph = ip_hdr(skb);
2725 iph->tot_len = 0;
2726 iph->check = 0;
2727 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
2728 iph->daddr, 0,
2729 IPPROTO_TCP,
2730 0);
2731 type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4;
2732 } else if (skb_is_gso_v6(skb)) {
2733 ipv6_hdr(skb)->payload_len = 0;
2734 tcp_hdr(skb)->check =
2735 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
2736 &ipv6_hdr(skb)->daddr,
2737 0, IPPROTO_TCP, 0);
2738 }
2739
2740
2741 l4len = tcp_hdrlen(skb);
2742 *hdr_len += l4len;
2743 *hdr_len = skb_transport_offset(skb) + l4len;
2744
2745
2746 mss_l4len_idx = l4len << IXGBE_ADVTXD_L4LEN_SHIFT;
2747 mss_l4len_idx |= skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT;
2748 mss_l4len_idx |= 1 << IXGBE_ADVTXD_IDX_SHIFT;
2749
2750
2751 vlan_macip_lens = skb_network_header_len(skb);
2752 vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT;
2753 vlan_macip_lens |= tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
2754
2755 ixgbevf_tx_ctxtdesc(tx_ring, vlan_macip_lens,
2756 type_tucmd, mss_l4len_idx);
2757
2758 return 1;
2759}
2760
2761static bool ixgbevf_tx_csum(struct ixgbevf_ring *tx_ring,
2762 struct sk_buff *skb, u32 tx_flags)
2763{
2764 u32 vlan_macip_lens = 0;
2765 u32 mss_l4len_idx = 0;
2766 u32 type_tucmd = 0;
2767
2768 if (skb->ip_summed == CHECKSUM_PARTIAL) {
2769 u8 l4_hdr = 0;
2770 switch (skb->protocol) {
2771 case __constant_htons(ETH_P_IP):
2772 vlan_macip_lens |= skb_network_header_len(skb);
2773 type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4;
2774 l4_hdr = ip_hdr(skb)->protocol;
2775 break;
2776 case __constant_htons(ETH_P_IPV6):
2777 vlan_macip_lens |= skb_network_header_len(skb);
2778 l4_hdr = ipv6_hdr(skb)->nexthdr;
2779 break;
2780 default:
2781 if (unlikely(net_ratelimit())) {
2782 dev_warn(tx_ring->dev,
2783 "partial checksum but proto=%x!\n",
2784 skb->protocol);
2785 }
2786 break;
2787 }
2788
2789 switch (l4_hdr) {
2790 case IPPROTO_TCP:
2791 type_tucmd |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
2792 mss_l4len_idx = tcp_hdrlen(skb) <<
2793 IXGBE_ADVTXD_L4LEN_SHIFT;
2794 break;
2795 case IPPROTO_SCTP:
2796 type_tucmd |= IXGBE_ADVTXD_TUCMD_L4T_SCTP;
2797 mss_l4len_idx = sizeof(struct sctphdr) <<
2798 IXGBE_ADVTXD_L4LEN_SHIFT;
2799 break;
2800 case IPPROTO_UDP:
2801 mss_l4len_idx = sizeof(struct udphdr) <<
2802 IXGBE_ADVTXD_L4LEN_SHIFT;
2803 break;
2804 default:
2805 if (unlikely(net_ratelimit())) {
2806 dev_warn(tx_ring->dev,
2807 "partial checksum but l4 proto=%x!\n",
2808 l4_hdr);
2809 }
2810 break;
2811 }
2812 }
2813
2814
2815 vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT;
2816 vlan_macip_lens |= tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
2817
2818 ixgbevf_tx_ctxtdesc(tx_ring, vlan_macip_lens,
2819 type_tucmd, mss_l4len_idx);
2820
2821 return (skb->ip_summed == CHECKSUM_PARTIAL);
2822}
2823
2824static int ixgbevf_tx_map(struct ixgbevf_ring *tx_ring,
2825 struct sk_buff *skb, u32 tx_flags,
2826 unsigned int first)
2827{
2828 struct ixgbevf_tx_buffer *tx_buffer_info;
2829 unsigned int len;
2830 unsigned int total = skb->len;
2831 unsigned int offset = 0, size;
2832 int count = 0;
2833 unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
2834 unsigned int f;
2835 int i;
2836
2837 i = tx_ring->next_to_use;
2838
2839 len = min(skb_headlen(skb), total);
2840 while (len) {
2841 tx_buffer_info = &tx_ring->tx_buffer_info[i];
2842 size = min(len, (unsigned int)IXGBE_MAX_DATA_PER_TXD);
2843
2844 tx_buffer_info->length = size;
2845 tx_buffer_info->mapped_as_page = false;
2846 tx_buffer_info->dma = dma_map_single(tx_ring->dev,
2847 skb->data + offset,
2848 size, DMA_TO_DEVICE);
2849 if (dma_mapping_error(tx_ring->dev, tx_buffer_info->dma))
2850 goto dma_error;
2851 tx_buffer_info->next_to_watch = i;
2852
2853 len -= size;
2854 total -= size;
2855 offset += size;
2856 count++;
2857 i++;
2858 if (i == tx_ring->count)
2859 i = 0;
2860 }
2861
2862 for (f = 0; f < nr_frags; f++) {
2863 const struct skb_frag_struct *frag;
2864
2865 frag = &skb_shinfo(skb)->frags[f];
2866 len = min((unsigned int)skb_frag_size(frag), total);
2867 offset = 0;
2868
2869 while (len) {
2870 tx_buffer_info = &tx_ring->tx_buffer_info[i];
2871 size = min(len, (unsigned int)IXGBE_MAX_DATA_PER_TXD);
2872
2873 tx_buffer_info->length = size;
2874 tx_buffer_info->dma =
2875 skb_frag_dma_map(tx_ring->dev, frag,
2876 offset, size, DMA_TO_DEVICE);
2877 if (dma_mapping_error(tx_ring->dev,
2878 tx_buffer_info->dma))
2879 goto dma_error;
2880 tx_buffer_info->mapped_as_page = true;
2881 tx_buffer_info->next_to_watch = i;
2882
2883 len -= size;
2884 total -= size;
2885 offset += size;
2886 count++;
2887 i++;
2888 if (i == tx_ring->count)
2889 i = 0;
2890 }
2891 if (total == 0)
2892 break;
2893 }
2894
2895 if (i == 0)
2896 i = tx_ring->count - 1;
2897 else
2898 i = i - 1;
2899 tx_ring->tx_buffer_info[i].skb = skb;
2900 tx_ring->tx_buffer_info[first].next_to_watch = i;
2901 tx_ring->tx_buffer_info[first].time_stamp = jiffies;
2902
2903 return count;
2904
2905dma_error:
2906 dev_err(tx_ring->dev, "TX DMA map failed\n");
2907
2908
2909 tx_buffer_info->dma = 0;
2910 tx_buffer_info->next_to_watch = 0;
2911 count--;
2912
2913
2914 while (count >= 0) {
2915 count--;
2916 i--;
2917 if (i < 0)
2918 i += tx_ring->count;
2919 tx_buffer_info = &tx_ring->tx_buffer_info[i];
2920 ixgbevf_unmap_and_free_tx_resource(tx_ring, tx_buffer_info);
2921 }
2922
2923 return count;
2924}
2925
2926static void ixgbevf_tx_queue(struct ixgbevf_ring *tx_ring, int tx_flags,
2927 int count, u32 paylen, u8 hdr_len)
2928{
2929 union ixgbe_adv_tx_desc *tx_desc = NULL;
2930 struct ixgbevf_tx_buffer *tx_buffer_info;
2931 u32 olinfo_status = 0, cmd_type_len = 0;
2932 unsigned int i;
2933
2934 u32 txd_cmd = IXGBE_TXD_CMD_EOP | IXGBE_TXD_CMD_RS | IXGBE_TXD_CMD_IFCS;
2935
2936 cmd_type_len |= IXGBE_ADVTXD_DTYP_DATA;
2937
2938 cmd_type_len |= IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT;
2939
2940 if (tx_flags & IXGBE_TX_FLAGS_VLAN)
2941 cmd_type_len |= IXGBE_ADVTXD_DCMD_VLE;
2942
2943 if (tx_flags & IXGBE_TX_FLAGS_CSUM)
2944 olinfo_status |= IXGBE_ADVTXD_POPTS_TXSM;
2945
2946 if (tx_flags & IXGBE_TX_FLAGS_TSO) {
2947 cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE;
2948
2949
2950 olinfo_status |= (1 << IXGBE_ADVTXD_IDX_SHIFT);
2951 if (tx_flags & IXGBE_TX_FLAGS_IPV4)
2952 olinfo_status |= IXGBE_ADVTXD_POPTS_IXSM;
2953 }
2954
2955
2956
2957
2958
2959 olinfo_status |= IXGBE_ADVTXD_CC;
2960
2961 olinfo_status |= ((paylen - hdr_len) << IXGBE_ADVTXD_PAYLEN_SHIFT);
2962
2963 i = tx_ring->next_to_use;
2964 while (count--) {
2965 tx_buffer_info = &tx_ring->tx_buffer_info[i];
2966 tx_desc = IXGBEVF_TX_DESC(tx_ring, i);
2967 tx_desc->read.buffer_addr = cpu_to_le64(tx_buffer_info->dma);
2968 tx_desc->read.cmd_type_len =
2969 cpu_to_le32(cmd_type_len | tx_buffer_info->length);
2970 tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
2971 i++;
2972 if (i == tx_ring->count)
2973 i = 0;
2974 }
2975
2976 tx_desc->read.cmd_type_len |= cpu_to_le32(txd_cmd);
2977
2978 tx_ring->next_to_use = i;
2979}
2980
2981static int __ixgbevf_maybe_stop_tx(struct ixgbevf_ring *tx_ring, int size)
2982{
2983 struct ixgbevf_adapter *adapter = netdev_priv(tx_ring->netdev);
2984
2985 netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
2986
2987
2988
2989 smp_mb();
2990
2991
2992
2993 if (likely(IXGBE_DESC_UNUSED(tx_ring) < size))
2994 return -EBUSY;
2995
2996
2997 netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index);
2998 ++adapter->restart_queue;
2999 return 0;
3000}
3001
3002static int ixgbevf_maybe_stop_tx(struct ixgbevf_ring *tx_ring, int size)
3003{
3004 if (likely(IXGBE_DESC_UNUSED(tx_ring) >= size))
3005 return 0;
3006 return __ixgbevf_maybe_stop_tx(tx_ring, size);
3007}
3008
3009static int ixgbevf_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
3010{
3011 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3012 struct ixgbevf_ring *tx_ring;
3013 unsigned int first;
3014 unsigned int tx_flags = 0;
3015 u8 hdr_len = 0;
3016 int r_idx = 0, tso;
3017 u16 count = TXD_USE_COUNT(skb_headlen(skb));
3018#if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD
3019 unsigned short f;
3020#endif
3021 u8 *dst_mac = skb_header_pointer(skb, 0, 0, NULL);
3022 if (!dst_mac || is_link_local_ether_addr(dst_mac)) {
3023 dev_kfree_skb(skb);
3024 return NETDEV_TX_OK;
3025 }
3026
3027 tx_ring = &adapter->tx_ring[r_idx];
3028
3029
3030
3031
3032
3033
3034
3035
3036#if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD
3037 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
3038 count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
3039#else
3040 count += skb_shinfo(skb)->nr_frags;
3041#endif
3042 if (ixgbevf_maybe_stop_tx(tx_ring, count + 3)) {
3043 adapter->tx_busy++;
3044 return NETDEV_TX_BUSY;
3045 }
3046
3047 if (vlan_tx_tag_present(skb)) {
3048 tx_flags |= vlan_tx_tag_get(skb);
3049 tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT;
3050 tx_flags |= IXGBE_TX_FLAGS_VLAN;
3051 }
3052
3053 first = tx_ring->next_to_use;
3054
3055 if (skb->protocol == htons(ETH_P_IP))
3056 tx_flags |= IXGBE_TX_FLAGS_IPV4;
3057 tso = ixgbevf_tso(tx_ring, skb, tx_flags, &hdr_len);
3058 if (tso < 0) {
3059 dev_kfree_skb_any(skb);
3060 return NETDEV_TX_OK;
3061 }
3062
3063 if (tso)
3064 tx_flags |= IXGBE_TX_FLAGS_TSO | IXGBE_TX_FLAGS_CSUM;
3065 else if (ixgbevf_tx_csum(tx_ring, skb, tx_flags))
3066 tx_flags |= IXGBE_TX_FLAGS_CSUM;
3067
3068 ixgbevf_tx_queue(tx_ring, tx_flags,
3069 ixgbevf_tx_map(tx_ring, skb, tx_flags, first),
3070 skb->len, hdr_len);
3071
3072
3073
3074
3075
3076
3077 wmb();
3078
3079 writel(tx_ring->next_to_use, adapter->hw.hw_addr + tx_ring->tail);
3080
3081 ixgbevf_maybe_stop_tx(tx_ring, DESC_NEEDED);
3082
3083 return NETDEV_TX_OK;
3084}
3085
3086
3087
3088
3089
3090
3091
3092
3093static int ixgbevf_set_mac(struct net_device *netdev, void *p)
3094{
3095 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3096 struct ixgbe_hw *hw = &adapter->hw;
3097 struct sockaddr *addr = p;
3098
3099 if (!is_valid_ether_addr(addr->sa_data))
3100 return -EADDRNOTAVAIL;
3101
3102 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
3103 memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
3104
3105 spin_lock_bh(&adapter->mbx_lock);
3106
3107 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0);
3108
3109 spin_unlock_bh(&adapter->mbx_lock);
3110
3111 return 0;
3112}
3113
3114
3115
3116
3117
3118
3119
3120
3121static int ixgbevf_change_mtu(struct net_device *netdev, int new_mtu)
3122{
3123 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3124 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
3125 int max_possible_frame = MAXIMUM_ETHERNET_VLAN_SIZE;
3126
3127 switch (adapter->hw.api_version) {
3128 case ixgbe_mbox_api_11:
3129 max_possible_frame = IXGBE_MAX_JUMBO_FRAME_SIZE;
3130 break;
3131 default:
3132 if (adapter->hw.mac.type == ixgbe_mac_X540_vf)
3133 max_possible_frame = IXGBE_MAX_JUMBO_FRAME_SIZE;
3134 break;
3135 }
3136
3137
3138 if ((new_mtu < 68) || (max_frame > max_possible_frame))
3139 return -EINVAL;
3140
3141 hw_dbg(&adapter->hw, "changing MTU from %d to %d\n",
3142 netdev->mtu, new_mtu);
3143
3144 netdev->mtu = new_mtu;
3145
3146 if (netif_running(netdev))
3147 ixgbevf_reinit_locked(adapter);
3148
3149 return 0;
3150}
3151
3152static int ixgbevf_suspend(struct pci_dev *pdev, pm_message_t state)
3153{
3154 struct net_device *netdev = pci_get_drvdata(pdev);
3155 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3156#ifdef CONFIG_PM
3157 int retval = 0;
3158#endif
3159
3160 netif_device_detach(netdev);
3161
3162 if (netif_running(netdev)) {
3163 rtnl_lock();
3164 ixgbevf_down(adapter);
3165 ixgbevf_free_irq(adapter);
3166 ixgbevf_free_all_tx_resources(adapter);
3167 ixgbevf_free_all_rx_resources(adapter);
3168 rtnl_unlock();
3169 }
3170
3171 ixgbevf_clear_interrupt_scheme(adapter);
3172
3173#ifdef CONFIG_PM
3174 retval = pci_save_state(pdev);
3175 if (retval)
3176 return retval;
3177
3178#endif
3179 pci_disable_device(pdev);
3180
3181 return 0;
3182}
3183
3184#ifdef CONFIG_PM
3185static int ixgbevf_resume(struct pci_dev *pdev)
3186{
3187 struct ixgbevf_adapter *adapter = pci_get_drvdata(pdev);
3188 struct net_device *netdev = adapter->netdev;
3189 u32 err;
3190
3191 pci_set_power_state(pdev, PCI_D0);
3192 pci_restore_state(pdev);
3193
3194
3195
3196
3197 pci_save_state(pdev);
3198
3199 err = pci_enable_device_mem(pdev);
3200 if (err) {
3201 dev_err(&pdev->dev, "Cannot enable PCI device from suspend\n");
3202 return err;
3203 }
3204 pci_set_master(pdev);
3205
3206 rtnl_lock();
3207 err = ixgbevf_init_interrupt_scheme(adapter);
3208 rtnl_unlock();
3209 if (err) {
3210 dev_err(&pdev->dev, "Cannot initialize interrupts\n");
3211 return err;
3212 }
3213
3214 ixgbevf_reset(adapter);
3215
3216 if (netif_running(netdev)) {
3217 err = ixgbevf_open(netdev);
3218 if (err)
3219 return err;
3220 }
3221
3222 netif_device_attach(netdev);
3223
3224 return err;
3225}
3226
3227#endif
3228static void ixgbevf_shutdown(struct pci_dev *pdev)
3229{
3230 ixgbevf_suspend(pdev, PMSG_SUSPEND);
3231}
3232
3233static struct rtnl_link_stats64 *ixgbevf_get_stats(struct net_device *netdev,
3234 struct rtnl_link_stats64 *stats)
3235{
3236 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3237 unsigned int start;
3238 u64 bytes, packets;
3239 const struct ixgbevf_ring *ring;
3240 int i;
3241
3242 ixgbevf_update_stats(adapter);
3243
3244 stats->multicast = adapter->stats.vfmprc - adapter->stats.base_vfmprc;
3245
3246 for (i = 0; i < adapter->num_rx_queues; i++) {
3247 ring = &adapter->rx_ring[i];
3248 do {
3249 start = u64_stats_fetch_begin_bh(&ring->syncp);
3250 bytes = ring->total_bytes;
3251 packets = ring->total_packets;
3252 } while (u64_stats_fetch_retry_bh(&ring->syncp, start));
3253 stats->rx_bytes += bytes;
3254 stats->rx_packets += packets;
3255 }
3256
3257 for (i = 0; i < adapter->num_tx_queues; i++) {
3258 ring = &adapter->tx_ring[i];
3259 do {
3260 start = u64_stats_fetch_begin_bh(&ring->syncp);
3261 bytes = ring->total_bytes;
3262 packets = ring->total_packets;
3263 } while (u64_stats_fetch_retry_bh(&ring->syncp, start));
3264 stats->tx_bytes += bytes;
3265 stats->tx_packets += packets;
3266 }
3267
3268 return stats;
3269}
3270
3271static const struct net_device_ops ixgbevf_netdev_ops = {
3272 .ndo_open = ixgbevf_open,
3273 .ndo_stop = ixgbevf_close,
3274 .ndo_start_xmit = ixgbevf_xmit_frame,
3275 .ndo_set_rx_mode = ixgbevf_set_rx_mode,
3276 .ndo_get_stats64 = ixgbevf_get_stats,
3277 .ndo_validate_addr = eth_validate_addr,
3278 .ndo_set_mac_address = ixgbevf_set_mac,
3279 .ndo_change_mtu = ixgbevf_change_mtu,
3280 .ndo_tx_timeout = ixgbevf_tx_timeout,
3281 .ndo_vlan_rx_add_vid = ixgbevf_vlan_rx_add_vid,
3282 .ndo_vlan_rx_kill_vid = ixgbevf_vlan_rx_kill_vid,
3283};
3284
3285static void ixgbevf_assign_netdev_ops(struct net_device *dev)
3286{
3287 dev->netdev_ops = &ixgbevf_netdev_ops;
3288 ixgbevf_set_ethtool_ops(dev);
3289 dev->watchdog_timeo = 5 * HZ;
3290}
3291
3292
3293
3294
3295
3296
3297
3298
3299
3300
3301
3302
3303static int ixgbevf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
3304{
3305 struct net_device *netdev;
3306 struct ixgbevf_adapter *adapter = NULL;
3307 struct ixgbe_hw *hw = NULL;
3308 const struct ixgbevf_info *ii = ixgbevf_info_tbl[ent->driver_data];
3309 static int cards_found;
3310 int err, pci_using_dac;
3311
3312 err = pci_enable_device(pdev);
3313 if (err)
3314 return err;
3315
3316 if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) &&
3317 !dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) {
3318 pci_using_dac = 1;
3319 } else {
3320 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
3321 if (err) {
3322 err = dma_set_coherent_mask(&pdev->dev,
3323 DMA_BIT_MASK(32));
3324 if (err) {
3325 dev_err(&pdev->dev, "No usable DMA "
3326 "configuration, aborting\n");
3327 goto err_dma;
3328 }
3329 }
3330 pci_using_dac = 0;
3331 }
3332
3333 err = pci_request_regions(pdev, ixgbevf_driver_name);
3334 if (err) {
3335 dev_err(&pdev->dev, "pci_request_regions failed 0x%x\n", err);
3336 goto err_pci_reg;
3337 }
3338
3339 pci_set_master(pdev);
3340
3341 netdev = alloc_etherdev_mq(sizeof(struct ixgbevf_adapter),
3342 MAX_TX_QUEUES);
3343 if (!netdev) {
3344 err = -ENOMEM;
3345 goto err_alloc_etherdev;
3346 }
3347
3348 SET_NETDEV_DEV(netdev, &pdev->dev);
3349
3350 pci_set_drvdata(pdev, netdev);
3351 adapter = netdev_priv(netdev);
3352
3353 adapter->netdev = netdev;
3354 adapter->pdev = pdev;
3355 hw = &adapter->hw;
3356 hw->back = adapter;
3357 adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
3358
3359
3360
3361
3362
3363 pci_save_state(pdev);
3364
3365 hw->hw_addr = ioremap(pci_resource_start(pdev, 0),
3366 pci_resource_len(pdev, 0));
3367 if (!hw->hw_addr) {
3368 err = -EIO;
3369 goto err_ioremap;
3370 }
3371
3372 ixgbevf_assign_netdev_ops(netdev);
3373
3374 adapter->bd_number = cards_found;
3375
3376
3377 memcpy(&hw->mac.ops, ii->mac_ops, sizeof(hw->mac.ops));
3378 hw->mac.type = ii->mac;
3379
3380 memcpy(&hw->mbx.ops, &ixgbevf_mbx_ops,
3381 sizeof(struct ixgbe_mbx_operations));
3382
3383
3384 err = ixgbevf_sw_init(adapter);
3385 if (err)
3386 goto err_sw_init;
3387
3388
3389 if (!is_valid_ether_addr(netdev->dev_addr)) {
3390 pr_err("invalid MAC address\n");
3391 err = -EIO;
3392 goto err_sw_init;
3393 }
3394
3395 netdev->hw_features = NETIF_F_SG |
3396 NETIF_F_IP_CSUM |
3397 NETIF_F_IPV6_CSUM |
3398 NETIF_F_TSO |
3399 NETIF_F_TSO6 |
3400 NETIF_F_RXCSUM;
3401
3402 netdev->features = netdev->hw_features |
3403 NETIF_F_HW_VLAN_TX |
3404 NETIF_F_HW_VLAN_RX |
3405 NETIF_F_HW_VLAN_FILTER;
3406
3407 netdev->vlan_features |= NETIF_F_TSO;
3408 netdev->vlan_features |= NETIF_F_TSO6;
3409 netdev->vlan_features |= NETIF_F_IP_CSUM;
3410 netdev->vlan_features |= NETIF_F_IPV6_CSUM;
3411 netdev->vlan_features |= NETIF_F_SG;
3412
3413 if (pci_using_dac)
3414 netdev->features |= NETIF_F_HIGHDMA;
3415
3416 netdev->priv_flags |= IFF_UNICAST_FLT;
3417
3418 init_timer(&adapter->watchdog_timer);
3419 adapter->watchdog_timer.function = ixgbevf_watchdog;
3420 adapter->watchdog_timer.data = (unsigned long)adapter;
3421
3422 INIT_WORK(&adapter->reset_task, ixgbevf_reset_task);
3423 INIT_WORK(&adapter->watchdog_task, ixgbevf_watchdog_task);
3424
3425 err = ixgbevf_init_interrupt_scheme(adapter);
3426 if (err)
3427 goto err_sw_init;
3428
3429 strcpy(netdev->name, "eth%d");
3430
3431 err = register_netdev(netdev);
3432 if (err)
3433 goto err_register;
3434
3435 netif_carrier_off(netdev);
3436
3437 ixgbevf_init_last_counter_stats(adapter);
3438
3439
3440 hw_dbg(hw, "%pM\n", netdev->dev_addr);
3441
3442 hw_dbg(hw, "MAC: %d\n", hw->mac.type);
3443
3444 hw_dbg(hw, "Intel(R) 82599 Virtual Function\n");
3445 cards_found++;
3446 return 0;
3447
3448err_register:
3449 ixgbevf_clear_interrupt_scheme(adapter);
3450err_sw_init:
3451 ixgbevf_reset_interrupt_capability(adapter);
3452 iounmap(hw->hw_addr);
3453err_ioremap:
3454 free_netdev(netdev);
3455err_alloc_etherdev:
3456 pci_release_regions(pdev);
3457err_pci_reg:
3458err_dma:
3459 pci_disable_device(pdev);
3460 return err;
3461}
3462
3463
3464
3465
3466
3467
3468
3469
3470
3471
3472static void ixgbevf_remove(struct pci_dev *pdev)
3473{
3474 struct net_device *netdev = pci_get_drvdata(pdev);
3475 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3476
3477 set_bit(__IXGBEVF_DOWN, &adapter->state);
3478
3479 del_timer_sync(&adapter->watchdog_timer);
3480
3481 cancel_work_sync(&adapter->reset_task);
3482 cancel_work_sync(&adapter->watchdog_task);
3483
3484 if (netdev->reg_state == NETREG_REGISTERED)
3485 unregister_netdev(netdev);
3486
3487 ixgbevf_clear_interrupt_scheme(adapter);
3488 ixgbevf_reset_interrupt_capability(adapter);
3489
3490 iounmap(adapter->hw.hw_addr);
3491 pci_release_regions(pdev);
3492
3493 hw_dbg(&adapter->hw, "Remove complete\n");
3494
3495 kfree(adapter->tx_ring);
3496 kfree(adapter->rx_ring);
3497
3498 free_netdev(netdev);
3499
3500 pci_disable_device(pdev);
3501}
3502
3503
3504
3505
3506
3507
3508
3509
3510
3511static pci_ers_result_t ixgbevf_io_error_detected(struct pci_dev *pdev,
3512 pci_channel_state_t state)
3513{
3514 struct net_device *netdev = pci_get_drvdata(pdev);
3515 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3516
3517 netif_device_detach(netdev);
3518
3519 if (state == pci_channel_io_perm_failure)
3520 return PCI_ERS_RESULT_DISCONNECT;
3521
3522 if (netif_running(netdev))
3523 ixgbevf_down(adapter);
3524
3525 pci_disable_device(pdev);
3526
3527
3528 return PCI_ERS_RESULT_NEED_RESET;
3529}
3530
3531
3532
3533
3534
3535
3536
3537
3538static pci_ers_result_t ixgbevf_io_slot_reset(struct pci_dev *pdev)
3539{
3540 struct net_device *netdev = pci_get_drvdata(pdev);
3541 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3542
3543 if (pci_enable_device_mem(pdev)) {
3544 dev_err(&pdev->dev,
3545 "Cannot re-enable PCI device after reset.\n");
3546 return PCI_ERS_RESULT_DISCONNECT;
3547 }
3548
3549 pci_set_master(pdev);
3550
3551 ixgbevf_reset(adapter);
3552
3553 return PCI_ERS_RESULT_RECOVERED;
3554}
3555
3556
3557
3558
3559
3560
3561
3562
3563
3564static void ixgbevf_io_resume(struct pci_dev *pdev)
3565{
3566 struct net_device *netdev = pci_get_drvdata(pdev);
3567 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3568
3569 if (netif_running(netdev))
3570 ixgbevf_up(adapter);
3571
3572 netif_device_attach(netdev);
3573}
3574
3575
3576static const struct pci_error_handlers ixgbevf_err_handler = {
3577 .error_detected = ixgbevf_io_error_detected,
3578 .slot_reset = ixgbevf_io_slot_reset,
3579 .resume = ixgbevf_io_resume,
3580};
3581
3582static struct pci_driver ixgbevf_driver = {
3583 .name = ixgbevf_driver_name,
3584 .id_table = ixgbevf_pci_tbl,
3585 .probe = ixgbevf_probe,
3586 .remove = ixgbevf_remove,
3587#ifdef CONFIG_PM
3588
3589 .suspend = ixgbevf_suspend,
3590 .resume = ixgbevf_resume,
3591#endif
3592 .shutdown = ixgbevf_shutdown,
3593 .err_handler = &ixgbevf_err_handler
3594};
3595
3596
3597
3598
3599
3600
3601
3602static int __init ixgbevf_init_module(void)
3603{
3604 int ret;
3605 pr_info("%s - version %s\n", ixgbevf_driver_string,
3606 ixgbevf_driver_version);
3607
3608 pr_info("%s\n", ixgbevf_copyright);
3609
3610 ret = pci_register_driver(&ixgbevf_driver);
3611 return ret;
3612}
3613
3614module_init(ixgbevf_init_module);
3615
3616
3617
3618
3619
3620
3621
3622static void __exit ixgbevf_exit_module(void)
3623{
3624 pci_unregister_driver(&ixgbevf_driver);
3625}
3626
3627#ifdef DEBUG
3628
3629
3630
3631
3632char *ixgbevf_get_hw_dev_name(struct ixgbe_hw *hw)
3633{
3634 struct ixgbevf_adapter *adapter = hw->back;
3635 return adapter->netdev->name;
3636}
3637
3638#endif
3639module_exit(ixgbevf_exit_module);
3640
3641
3642