1
2
3
4#include <linux/module.h>
5#include <linux/types.h>
6#include <linux/if_vlan.h>
7#include <linux/aer.h>
8
9#include "igc.h"
10#include "igc_hw.h"
11
12#define DRV_VERSION "0.0.1-k"
13#define DRV_SUMMARY "Intel(R) 2.5G Ethernet Linux Driver"
14
15#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK)
16
17static int debug = -1;
18
19MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
20MODULE_DESCRIPTION(DRV_SUMMARY);
21MODULE_LICENSE("GPL v2");
22MODULE_VERSION(DRV_VERSION);
23module_param(debug, int, 0);
24MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
25
26char igc_driver_name[] = "igc";
27char igc_driver_version[] = DRV_VERSION;
28static const char igc_driver_string[] = DRV_SUMMARY;
29static const char igc_copyright[] =
30 "Copyright(c) 2018 Intel Corporation.";
31
32static const struct igc_info *igc_info_tbl[] = {
33 [board_base] = &igc_base_info,
34};
35
36static const struct pci_device_id igc_pci_tbl[] = {
37 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_LM), board_base },
38 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_V), board_base },
39
40 {0, }
41};
42
43MODULE_DEVICE_TABLE(pci, igc_pci_tbl);
44
45
46static void igc_clean_tx_ring(struct igc_ring *tx_ring);
47static int igc_sw_init(struct igc_adapter *);
48static void igc_configure(struct igc_adapter *adapter);
49static void igc_power_down_link(struct igc_adapter *adapter);
50static void igc_set_default_mac_filter(struct igc_adapter *adapter);
51static void igc_set_rx_mode(struct net_device *netdev);
52static void igc_write_itr(struct igc_q_vector *q_vector);
53static void igc_assign_vector(struct igc_q_vector *q_vector, int msix_vector);
54static void igc_free_q_vector(struct igc_adapter *adapter, int v_idx);
55static void igc_set_interrupt_capability(struct igc_adapter *adapter,
56 bool msix);
57static void igc_free_q_vectors(struct igc_adapter *adapter);
58static void igc_irq_disable(struct igc_adapter *adapter);
59static void igc_irq_enable(struct igc_adapter *adapter);
60static void igc_configure_msix(struct igc_adapter *adapter);
61static bool igc_alloc_mapped_page(struct igc_ring *rx_ring,
62 struct igc_rx_buffer *bi);
63
64enum latency_range {
65 lowest_latency = 0,
66 low_latency = 1,
67 bulk_latency = 2,
68 latency_invalid = 255
69};
70
71void igc_reset(struct igc_adapter *adapter)
72{
73 struct pci_dev *pdev = adapter->pdev;
74 struct igc_hw *hw = &adapter->hw;
75 struct igc_fc_info *fc = &hw->fc;
76 u32 pba, hwm;
77
78
79 pba = IGC_PBA_34K;
80
81
82
83
84
85
86
87
88
89 hwm = (pba << 10) - (adapter->max_frame_size + MAX_JUMBO_FRAME_SIZE);
90
91 fc->high_water = hwm & 0xFFFFFFF0;
92 fc->low_water = fc->high_water - 16;
93 fc->pause_time = 0xFFFF;
94 fc->send_xon = 1;
95 fc->current_mode = fc->requested_mode;
96
97 hw->mac.ops.reset_hw(hw);
98
99 if (hw->mac.ops.init_hw(hw))
100 dev_err(&pdev->dev, "Hardware Error\n");
101
102 if (!netif_running(adapter->netdev))
103 igc_power_down_link(adapter);
104
105 igc_get_phy_info(hw);
106}
107
108
109
110
111
112static void igc_power_up_link(struct igc_adapter *adapter)
113{
114 igc_reset_phy(&adapter->hw);
115
116 if (adapter->hw.phy.media_type == igc_media_type_copper)
117 igc_power_up_phy_copper(&adapter->hw);
118
119 igc_setup_link(&adapter->hw);
120}
121
122
123
124
125
126static void igc_power_down_link(struct igc_adapter *adapter)
127{
128 if (adapter->hw.phy.media_type == igc_media_type_copper)
129 igc_power_down_phy_copper_base(&adapter->hw);
130}
131
132
133
134
135
136
137
138
139
140static void igc_release_hw_control(struct igc_adapter *adapter)
141{
142 struct igc_hw *hw = &adapter->hw;
143 u32 ctrl_ext;
144
145
146 ctrl_ext = rd32(IGC_CTRL_EXT);
147 wr32(IGC_CTRL_EXT,
148 ctrl_ext & ~IGC_CTRL_EXT_DRV_LOAD);
149}
150
151
152
153
154
155
156
157
158
159static void igc_get_hw_control(struct igc_adapter *adapter)
160{
161 struct igc_hw *hw = &adapter->hw;
162 u32 ctrl_ext;
163
164
165 ctrl_ext = rd32(IGC_CTRL_EXT);
166 wr32(IGC_CTRL_EXT,
167 ctrl_ext | IGC_CTRL_EXT_DRV_LOAD);
168}
169
170
171
172
173
174
175
176void igc_free_tx_resources(struct igc_ring *tx_ring)
177{
178 igc_clean_tx_ring(tx_ring);
179
180 vfree(tx_ring->tx_buffer_info);
181 tx_ring->tx_buffer_info = NULL;
182
183
184 if (!tx_ring->desc)
185 return;
186
187 dma_free_coherent(tx_ring->dev, tx_ring->size,
188 tx_ring->desc, tx_ring->dma);
189
190 tx_ring->desc = NULL;
191}
192
193
194
195
196
197
198
199static void igc_free_all_tx_resources(struct igc_adapter *adapter)
200{
201 int i;
202
203 for (i = 0; i < adapter->num_tx_queues; i++)
204 igc_free_tx_resources(adapter->tx_ring[i]);
205}
206
207
208
209
210
211static void igc_clean_tx_ring(struct igc_ring *tx_ring)
212{
213 u16 i = tx_ring->next_to_clean;
214 struct igc_tx_buffer *tx_buffer = &tx_ring->tx_buffer_info[i];
215
216 while (i != tx_ring->next_to_use) {
217 union igc_adv_tx_desc *eop_desc, *tx_desc;
218
219
220 dev_kfree_skb_any(tx_buffer->skb);
221
222
223 dma_unmap_single(tx_ring->dev,
224 dma_unmap_addr(tx_buffer, dma),
225 dma_unmap_len(tx_buffer, len),
226 DMA_TO_DEVICE);
227
228
229 eop_desc = tx_buffer->next_to_watch;
230 tx_desc = IGC_TX_DESC(tx_ring, i);
231
232
233 while (tx_desc != eop_desc) {
234 tx_buffer++;
235 tx_desc++;
236 i++;
237 if (unlikely(i == tx_ring->count)) {
238 i = 0;
239 tx_buffer = tx_ring->tx_buffer_info;
240 tx_desc = IGC_TX_DESC(tx_ring, 0);
241 }
242
243
244 if (dma_unmap_len(tx_buffer, len))
245 dma_unmap_page(tx_ring->dev,
246 dma_unmap_addr(tx_buffer, dma),
247 dma_unmap_len(tx_buffer, len),
248 DMA_TO_DEVICE);
249 }
250
251
252 tx_buffer++;
253 i++;
254 if (unlikely(i == tx_ring->count)) {
255 i = 0;
256 tx_buffer = tx_ring->tx_buffer_info;
257 }
258 }
259
260
261 netdev_tx_reset_queue(txring_txq(tx_ring));
262
263
264 tx_ring->next_to_use = 0;
265 tx_ring->next_to_clean = 0;
266}
267
268
269
270
271
272static void igc_clean_all_tx_rings(struct igc_adapter *adapter)
273{
274 int i;
275
276 for (i = 0; i < adapter->num_tx_queues; i++)
277 if (adapter->tx_ring[i])
278 igc_clean_tx_ring(adapter->tx_ring[i]);
279}
280
281
282
283
284
285
286
287int igc_setup_tx_resources(struct igc_ring *tx_ring)
288{
289 struct device *dev = tx_ring->dev;
290 int size = 0;
291
292 size = sizeof(struct igc_tx_buffer) * tx_ring->count;
293 tx_ring->tx_buffer_info = vzalloc(size);
294 if (!tx_ring->tx_buffer_info)
295 goto err;
296
297
298 tx_ring->size = tx_ring->count * sizeof(union igc_adv_tx_desc);
299 tx_ring->size = ALIGN(tx_ring->size, 4096);
300
301 tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size,
302 &tx_ring->dma, GFP_KERNEL);
303
304 if (!tx_ring->desc)
305 goto err;
306
307 tx_ring->next_to_use = 0;
308 tx_ring->next_to_clean = 0;
309
310 return 0;
311
312err:
313 vfree(tx_ring->tx_buffer_info);
314 dev_err(dev,
315 "Unable to allocate memory for the transmit descriptor ring\n");
316 return -ENOMEM;
317}
318
319
320
321
322
323
324
325static int igc_setup_all_tx_resources(struct igc_adapter *adapter)
326{
327 struct pci_dev *pdev = adapter->pdev;
328 int i, err = 0;
329
330 for (i = 0; i < adapter->num_tx_queues; i++) {
331 err = igc_setup_tx_resources(adapter->tx_ring[i]);
332 if (err) {
333 dev_err(&pdev->dev,
334 "Allocation for Tx Queue %u failed\n", i);
335 for (i--; i >= 0; i--)
336 igc_free_tx_resources(adapter->tx_ring[i]);
337 break;
338 }
339 }
340
341 return err;
342}
343
344
345
346
347
348static void igc_clean_rx_ring(struct igc_ring *rx_ring)
349{
350 u16 i = rx_ring->next_to_clean;
351
352 if (rx_ring->skb)
353 dev_kfree_skb(rx_ring->skb);
354 rx_ring->skb = NULL;
355
356
357 while (i != rx_ring->next_to_alloc) {
358 struct igc_rx_buffer *buffer_info = &rx_ring->rx_buffer_info[i];
359
360
361
362
363 dma_sync_single_range_for_cpu(rx_ring->dev,
364 buffer_info->dma,
365 buffer_info->page_offset,
366 igc_rx_bufsz(rx_ring),
367 DMA_FROM_DEVICE);
368
369
370 dma_unmap_page_attrs(rx_ring->dev,
371 buffer_info->dma,
372 igc_rx_pg_size(rx_ring),
373 DMA_FROM_DEVICE,
374 IGC_RX_DMA_ATTR);
375 __page_frag_cache_drain(buffer_info->page,
376 buffer_info->pagecnt_bias);
377
378 i++;
379 if (i == rx_ring->count)
380 i = 0;
381 }
382
383 rx_ring->next_to_alloc = 0;
384 rx_ring->next_to_clean = 0;
385 rx_ring->next_to_use = 0;
386}
387
388
389
390
391
392static void igc_clean_all_rx_rings(struct igc_adapter *adapter)
393{
394 int i;
395
396 for (i = 0; i < adapter->num_rx_queues; i++)
397 if (adapter->rx_ring[i])
398 igc_clean_rx_ring(adapter->rx_ring[i]);
399}
400
401
402
403
404
405
406
407void igc_free_rx_resources(struct igc_ring *rx_ring)
408{
409 igc_clean_rx_ring(rx_ring);
410
411 vfree(rx_ring->rx_buffer_info);
412 rx_ring->rx_buffer_info = NULL;
413
414
415 if (!rx_ring->desc)
416 return;
417
418 dma_free_coherent(rx_ring->dev, rx_ring->size,
419 rx_ring->desc, rx_ring->dma);
420
421 rx_ring->desc = NULL;
422}
423
424
425
426
427
428
429
430static void igc_free_all_rx_resources(struct igc_adapter *adapter)
431{
432 int i;
433
434 for (i = 0; i < adapter->num_rx_queues; i++)
435 igc_free_rx_resources(adapter->rx_ring[i]);
436}
437
438
439
440
441
442
443
444int igc_setup_rx_resources(struct igc_ring *rx_ring)
445{
446 struct device *dev = rx_ring->dev;
447 int size, desc_len;
448
449 size = sizeof(struct igc_rx_buffer) * rx_ring->count;
450 rx_ring->rx_buffer_info = vzalloc(size);
451 if (!rx_ring->rx_buffer_info)
452 goto err;
453
454 desc_len = sizeof(union igc_adv_rx_desc);
455
456
457 rx_ring->size = rx_ring->count * desc_len;
458 rx_ring->size = ALIGN(rx_ring->size, 4096);
459
460 rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size,
461 &rx_ring->dma, GFP_KERNEL);
462
463 if (!rx_ring->desc)
464 goto err;
465
466 rx_ring->next_to_alloc = 0;
467 rx_ring->next_to_clean = 0;
468 rx_ring->next_to_use = 0;
469
470 return 0;
471
472err:
473 vfree(rx_ring->rx_buffer_info);
474 rx_ring->rx_buffer_info = NULL;
475 dev_err(dev,
476 "Unable to allocate memory for the receive descriptor ring\n");
477 return -ENOMEM;
478}
479
480
481
482
483
484
485
486
487static int igc_setup_all_rx_resources(struct igc_adapter *adapter)
488{
489 struct pci_dev *pdev = adapter->pdev;
490 int i, err = 0;
491
492 for (i = 0; i < adapter->num_rx_queues; i++) {
493 err = igc_setup_rx_resources(adapter->rx_ring[i]);
494 if (err) {
495 dev_err(&pdev->dev,
496 "Allocation for Rx Queue %u failed\n", i);
497 for (i--; i >= 0; i--)
498 igc_free_rx_resources(adapter->rx_ring[i]);
499 break;
500 }
501 }
502
503 return err;
504}
505
506
507
508
509
510
511
512
513static void igc_configure_rx_ring(struct igc_adapter *adapter,
514 struct igc_ring *ring)
515{
516 struct igc_hw *hw = &adapter->hw;
517 union igc_adv_rx_desc *rx_desc;
518 int reg_idx = ring->reg_idx;
519 u32 srrctl = 0, rxdctl = 0;
520 u64 rdba = ring->dma;
521
522
523 wr32(IGC_RXDCTL(reg_idx), 0);
524
525
526 wr32(IGC_RDBAL(reg_idx),
527 rdba & 0x00000000ffffffffULL);
528 wr32(IGC_RDBAH(reg_idx), rdba >> 32);
529 wr32(IGC_RDLEN(reg_idx),
530 ring->count * sizeof(union igc_adv_rx_desc));
531
532
533 ring->tail = adapter->io_addr + IGC_RDT(reg_idx);
534 wr32(IGC_RDH(reg_idx), 0);
535 writel(0, ring->tail);
536
537
538 ring->next_to_clean = 0;
539 ring->next_to_use = 0;
540
541
542 srrctl = IGC_RX_HDR_LEN << IGC_SRRCTL_BSIZEHDRSIZE_SHIFT;
543 if (ring_uses_large_buffer(ring))
544 srrctl |= IGC_RXBUFFER_3072 >> IGC_SRRCTL_BSIZEPKT_SHIFT;
545 else
546 srrctl |= IGC_RXBUFFER_2048 >> IGC_SRRCTL_BSIZEPKT_SHIFT;
547 srrctl |= IGC_SRRCTL_DESCTYPE_ADV_ONEBUF;
548
549 wr32(IGC_SRRCTL(reg_idx), srrctl);
550
551 rxdctl |= IGC_RX_PTHRESH;
552 rxdctl |= IGC_RX_HTHRESH << 8;
553 rxdctl |= IGC_RX_WTHRESH << 16;
554
555
556 memset(ring->rx_buffer_info, 0,
557 sizeof(struct igc_rx_buffer) * ring->count);
558
559
560 rx_desc = IGC_RX_DESC(ring, 0);
561 rx_desc->wb.upper.length = 0;
562
563
564 rxdctl |= IGC_RXDCTL_QUEUE_ENABLE;
565
566 wr32(IGC_RXDCTL(reg_idx), rxdctl);
567}
568
569
570
571
572
573
574
575static void igc_configure_rx(struct igc_adapter *adapter)
576{
577 int i;
578
579
580
581
582 for (i = 0; i < adapter->num_rx_queues; i++)
583 igc_configure_rx_ring(adapter, adapter->rx_ring[i]);
584}
585
586
587
588
589
590
591
592
593static void igc_configure_tx_ring(struct igc_adapter *adapter,
594 struct igc_ring *ring)
595{
596 struct igc_hw *hw = &adapter->hw;
597 int reg_idx = ring->reg_idx;
598 u64 tdba = ring->dma;
599 u32 txdctl = 0;
600
601
602 wr32(IGC_TXDCTL(reg_idx), 0);
603 wrfl();
604 mdelay(10);
605
606 wr32(IGC_TDLEN(reg_idx),
607 ring->count * sizeof(union igc_adv_tx_desc));
608 wr32(IGC_TDBAL(reg_idx),
609 tdba & 0x00000000ffffffffULL);
610 wr32(IGC_TDBAH(reg_idx), tdba >> 32);
611
612 ring->tail = adapter->io_addr + IGC_TDT(reg_idx);
613 wr32(IGC_TDH(reg_idx), 0);
614 writel(0, ring->tail);
615
616 txdctl |= IGC_TX_PTHRESH;
617 txdctl |= IGC_TX_HTHRESH << 8;
618 txdctl |= IGC_TX_WTHRESH << 16;
619
620 txdctl |= IGC_TXDCTL_QUEUE_ENABLE;
621 wr32(IGC_TXDCTL(reg_idx), txdctl);
622}
623
624
625
626
627
628
629
630static void igc_configure_tx(struct igc_adapter *adapter)
631{
632 int i;
633
634 for (i = 0; i < adapter->num_tx_queues; i++)
635 igc_configure_tx_ring(adapter, adapter->tx_ring[i]);
636}
637
638
639
640
641
642static void igc_setup_mrqc(struct igc_adapter *adapter)
643{
644 struct igc_hw *hw = &adapter->hw;
645 u32 j, num_rx_queues;
646 u32 mrqc, rxcsum;
647 u32 rss_key[10];
648
649 netdev_rss_key_fill(rss_key, sizeof(rss_key));
650 for (j = 0; j < 10; j++)
651 wr32(IGC_RSSRK(j), rss_key[j]);
652
653 num_rx_queues = adapter->rss_queues;
654
655 if (adapter->rss_indir_tbl_init != num_rx_queues) {
656 for (j = 0; j < IGC_RETA_SIZE; j++)
657 adapter->rss_indir_tbl[j] =
658 (j * num_rx_queues) / IGC_RETA_SIZE;
659 adapter->rss_indir_tbl_init = num_rx_queues;
660 }
661 igc_write_rss_indir_tbl(adapter);
662
663
664
665
666
667 rxcsum = rd32(IGC_RXCSUM);
668 rxcsum |= IGC_RXCSUM_PCSD;
669
670
671 rxcsum |= IGC_RXCSUM_CRCOFL;
672
673
674 wr32(IGC_RXCSUM, rxcsum);
675
676
677
678
679 mrqc = IGC_MRQC_RSS_FIELD_IPV4 |
680 IGC_MRQC_RSS_FIELD_IPV4_TCP |
681 IGC_MRQC_RSS_FIELD_IPV6 |
682 IGC_MRQC_RSS_FIELD_IPV6_TCP |
683 IGC_MRQC_RSS_FIELD_IPV6_TCP_EX;
684
685 if (adapter->flags & IGC_FLAG_RSS_FIELD_IPV4_UDP)
686 mrqc |= IGC_MRQC_RSS_FIELD_IPV4_UDP;
687 if (adapter->flags & IGC_FLAG_RSS_FIELD_IPV6_UDP)
688 mrqc |= IGC_MRQC_RSS_FIELD_IPV6_UDP;
689
690 mrqc |= IGC_MRQC_ENABLE_RSS_MQ;
691
692 wr32(IGC_MRQC, mrqc);
693}
694
695
696
697
698
699static void igc_setup_rctl(struct igc_adapter *adapter)
700{
701 struct igc_hw *hw = &adapter->hw;
702 u32 rctl;
703
704 rctl = rd32(IGC_RCTL);
705
706 rctl &= ~(3 << IGC_RCTL_MO_SHIFT);
707 rctl &= ~(IGC_RCTL_LBM_TCVR | IGC_RCTL_LBM_MAC);
708
709 rctl |= IGC_RCTL_EN | IGC_RCTL_BAM | IGC_RCTL_RDMTS_HALF |
710 (hw->mac.mc_filter_type << IGC_RCTL_MO_SHIFT);
711
712
713
714
715 rctl |= IGC_RCTL_SECRC;
716
717
718 rctl &= ~(IGC_RCTL_SBP | IGC_RCTL_SZ_256);
719
720
721 rctl |= IGC_RCTL_LPE;
722
723
724 wr32(IGC_RXDCTL(0), 0);
725
726
727 if (adapter->netdev->features & NETIF_F_RXALL) {
728
729
730
731 rctl |= (IGC_RCTL_SBP |
732 IGC_RCTL_BAM |
733 IGC_RCTL_PMCF);
734
735 rctl &= ~(IGC_RCTL_DPF |
736 IGC_RCTL_CFIEN);
737 }
738
739 wr32(IGC_RCTL, rctl);
740}
741
742
743
744
745
746static void igc_setup_tctl(struct igc_adapter *adapter)
747{
748 struct igc_hw *hw = &adapter->hw;
749 u32 tctl;
750
751
752 wr32(IGC_TXDCTL(0), 0);
753
754
755 tctl = rd32(IGC_TCTL);
756 tctl &= ~IGC_TCTL_CT;
757 tctl |= IGC_TCTL_PSP | IGC_TCTL_RTLC |
758 (IGC_COLLISION_THRESHOLD << IGC_CT_SHIFT);
759
760
761 tctl |= IGC_TCTL_EN;
762
763 wr32(IGC_TCTL, tctl);
764}
765
766
767
768
769
770
771
772
773static int igc_set_mac(struct net_device *netdev, void *p)
774{
775 struct igc_adapter *adapter = netdev_priv(netdev);
776 struct igc_hw *hw = &adapter->hw;
777 struct sockaddr *addr = p;
778
779 if (!is_valid_ether_addr(addr->sa_data))
780 return -EADDRNOTAVAIL;
781
782 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
783 memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
784
785
786 igc_set_default_mac_filter(adapter);
787
788 return 0;
789}
790
791static void igc_tx_csum(struct igc_ring *tx_ring, struct igc_tx_buffer *first)
792{
793}
794
795static int __igc_maybe_stop_tx(struct igc_ring *tx_ring, const u16 size)
796{
797 struct net_device *netdev = tx_ring->netdev;
798
799 netif_stop_subqueue(netdev, tx_ring->queue_index);
800
801
802 smp_mb();
803
804
805
806
807 if (igc_desc_unused(tx_ring) < size)
808 return -EBUSY;
809
810
811 netif_wake_subqueue(netdev, tx_ring->queue_index);
812
813 u64_stats_update_begin(&tx_ring->tx_syncp2);
814 tx_ring->tx_stats.restart_queue2++;
815 u64_stats_update_end(&tx_ring->tx_syncp2);
816
817 return 0;
818}
819
820static inline int igc_maybe_stop_tx(struct igc_ring *tx_ring, const u16 size)
821{
822 if (igc_desc_unused(tx_ring) >= size)
823 return 0;
824 return __igc_maybe_stop_tx(tx_ring, size);
825}
826
827static u32 igc_tx_cmd_type(struct sk_buff *skb, u32 tx_flags)
828{
829
830 u32 cmd_type = IGC_ADVTXD_DTYP_DATA |
831 IGC_ADVTXD_DCMD_DEXT |
832 IGC_ADVTXD_DCMD_IFCS;
833
834 return cmd_type;
835}
836
837static void igc_tx_olinfo_status(struct igc_ring *tx_ring,
838 union igc_adv_tx_desc *tx_desc,
839 u32 tx_flags, unsigned int paylen)
840{
841 u32 olinfo_status = paylen << IGC_ADVTXD_PAYLEN_SHIFT;
842
843
844 olinfo_status |= (tx_flags & IGC_TX_FLAGS_CSUM) *
845 ((IGC_TXD_POPTS_TXSM << 8) /
846 IGC_TX_FLAGS_CSUM);
847
848
849 olinfo_status |= (tx_flags & IGC_TX_FLAGS_IPV4) *
850 (((IGC_TXD_POPTS_IXSM << 8)) /
851 IGC_TX_FLAGS_IPV4);
852
853 tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
854}
855
856static int igc_tx_map(struct igc_ring *tx_ring,
857 struct igc_tx_buffer *first,
858 const u8 hdr_len)
859{
860 struct sk_buff *skb = first->skb;
861 struct igc_tx_buffer *tx_buffer;
862 union igc_adv_tx_desc *tx_desc;
863 u32 tx_flags = first->tx_flags;
864 struct skb_frag_struct *frag;
865 u16 i = tx_ring->next_to_use;
866 unsigned int data_len, size;
867 dma_addr_t dma;
868 u32 cmd_type = igc_tx_cmd_type(skb, tx_flags);
869
870 tx_desc = IGC_TX_DESC(tx_ring, i);
871
872 igc_tx_olinfo_status(tx_ring, tx_desc, tx_flags, skb->len - hdr_len);
873
874 size = skb_headlen(skb);
875 data_len = skb->data_len;
876
877 dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
878
879 tx_buffer = first;
880
881 for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
882 if (dma_mapping_error(tx_ring->dev, dma))
883 goto dma_error;
884
885
886 dma_unmap_len_set(tx_buffer, len, size);
887 dma_unmap_addr_set(tx_buffer, dma, dma);
888
889 tx_desc->read.buffer_addr = cpu_to_le64(dma);
890
891 while (unlikely(size > IGC_MAX_DATA_PER_TXD)) {
892 tx_desc->read.cmd_type_len =
893 cpu_to_le32(cmd_type ^ IGC_MAX_DATA_PER_TXD);
894
895 i++;
896 tx_desc++;
897 if (i == tx_ring->count) {
898 tx_desc = IGC_TX_DESC(tx_ring, 0);
899 i = 0;
900 }
901 tx_desc->read.olinfo_status = 0;
902
903 dma += IGC_MAX_DATA_PER_TXD;
904 size -= IGC_MAX_DATA_PER_TXD;
905
906 tx_desc->read.buffer_addr = cpu_to_le64(dma);
907 }
908
909 if (likely(!data_len))
910 break;
911
912 tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type ^ size);
913
914 i++;
915 tx_desc++;
916 if (i == tx_ring->count) {
917 tx_desc = IGC_TX_DESC(tx_ring, 0);
918 i = 0;
919 }
920 tx_desc->read.olinfo_status = 0;
921
922 size = skb_frag_size(frag);
923 data_len -= size;
924
925 dma = skb_frag_dma_map(tx_ring->dev, frag, 0,
926 size, DMA_TO_DEVICE);
927
928 tx_buffer = &tx_ring->tx_buffer_info[i];
929 }
930
931
932 cmd_type |= size | IGC_TXD_DCMD;
933 tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type);
934
935 netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount);
936
937
938 first->time_stamp = jiffies;
939
940 skb_tx_timestamp(skb);
941
942
943
944
945
946
947
948
949 wmb();
950
951
952 first->next_to_watch = tx_desc;
953
954 i++;
955 if (i == tx_ring->count)
956 i = 0;
957
958 tx_ring->next_to_use = i;
959
960
961 igc_maybe_stop_tx(tx_ring, DESC_NEEDED);
962
963 if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) {
964 writel(i, tx_ring->tail);
965 }
966
967 return 0;
968dma_error:
969 dev_err(tx_ring->dev, "TX DMA map failed\n");
970 tx_buffer = &tx_ring->tx_buffer_info[i];
971
972
973 while (tx_buffer != first) {
974 if (dma_unmap_len(tx_buffer, len))
975 dma_unmap_page(tx_ring->dev,
976 dma_unmap_addr(tx_buffer, dma),
977 dma_unmap_len(tx_buffer, len),
978 DMA_TO_DEVICE);
979 dma_unmap_len_set(tx_buffer, len, 0);
980
981 if (i-- == 0)
982 i += tx_ring->count;
983 tx_buffer = &tx_ring->tx_buffer_info[i];
984 }
985
986 if (dma_unmap_len(tx_buffer, len))
987 dma_unmap_single(tx_ring->dev,
988 dma_unmap_addr(tx_buffer, dma),
989 dma_unmap_len(tx_buffer, len),
990 DMA_TO_DEVICE);
991 dma_unmap_len_set(tx_buffer, len, 0);
992
993 dev_kfree_skb_any(tx_buffer->skb);
994 tx_buffer->skb = NULL;
995
996 tx_ring->next_to_use = i;
997
998 return -1;
999}
1000
1001static netdev_tx_t igc_xmit_frame_ring(struct sk_buff *skb,
1002 struct igc_ring *tx_ring)
1003{
1004 u16 count = TXD_USE_COUNT(skb_headlen(skb));
1005 __be16 protocol = vlan_get_protocol(skb);
1006 struct igc_tx_buffer *first;
1007 u32 tx_flags = 0;
1008 unsigned short f;
1009 u8 hdr_len = 0;
1010
1011
1012
1013
1014
1015
1016
1017 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
1018 count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
1019
1020 if (igc_maybe_stop_tx(tx_ring, count + 3)) {
1021
1022 return NETDEV_TX_BUSY;
1023 }
1024
1025
1026 first = &tx_ring->tx_buffer_info[tx_ring->next_to_use];
1027 first->skb = skb;
1028 first->bytecount = skb->len;
1029 first->gso_segs = 1;
1030
1031
1032 first->tx_flags = tx_flags;
1033 first->protocol = protocol;
1034
1035 igc_tx_csum(tx_ring, first);
1036
1037 igc_tx_map(tx_ring, first, hdr_len);
1038
1039 return NETDEV_TX_OK;
1040}
1041
1042static inline struct igc_ring *igc_tx_queue_mapping(struct igc_adapter *adapter,
1043 struct sk_buff *skb)
1044{
1045 unsigned int r_idx = skb->queue_mapping;
1046
1047 if (r_idx >= adapter->num_tx_queues)
1048 r_idx = r_idx % adapter->num_tx_queues;
1049
1050 return adapter->tx_ring[r_idx];
1051}
1052
1053static netdev_tx_t igc_xmit_frame(struct sk_buff *skb,
1054 struct net_device *netdev)
1055{
1056 struct igc_adapter *adapter = netdev_priv(netdev);
1057
1058
1059
1060
1061 if (skb->len < 17) {
1062 if (skb_padto(skb, 17))
1063 return NETDEV_TX_OK;
1064 skb->len = 17;
1065 }
1066
1067 return igc_xmit_frame_ring(skb, igc_tx_queue_mapping(adapter, skb));
1068}
1069
1070static inline void igc_rx_hash(struct igc_ring *ring,
1071 union igc_adv_rx_desc *rx_desc,
1072 struct sk_buff *skb)
1073{
1074 if (ring->netdev->features & NETIF_F_RXHASH)
1075 skb_set_hash(skb,
1076 le32_to_cpu(rx_desc->wb.lower.hi_dword.rss),
1077 PKT_HASH_TYPE_L3);
1078}
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090static void igc_process_skb_fields(struct igc_ring *rx_ring,
1091 union igc_adv_rx_desc *rx_desc,
1092 struct sk_buff *skb)
1093{
1094 igc_rx_hash(rx_ring, rx_desc, skb);
1095
1096 skb_record_rx_queue(skb, rx_ring->queue_index);
1097
1098 skb->protocol = eth_type_trans(skb, rx_ring->netdev);
1099}
1100
1101static struct igc_rx_buffer *igc_get_rx_buffer(struct igc_ring *rx_ring,
1102 const unsigned int size)
1103{
1104 struct igc_rx_buffer *rx_buffer;
1105
1106 rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
1107 prefetchw(rx_buffer->page);
1108
1109
1110 dma_sync_single_range_for_cpu(rx_ring->dev,
1111 rx_buffer->dma,
1112 rx_buffer->page_offset,
1113 size,
1114 DMA_FROM_DEVICE);
1115
1116 rx_buffer->pagecnt_bias--;
1117
1118 return rx_buffer;
1119}
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130static void igc_add_rx_frag(struct igc_ring *rx_ring,
1131 struct igc_rx_buffer *rx_buffer,
1132 struct sk_buff *skb,
1133 unsigned int size)
1134{
1135#if (PAGE_SIZE < 8192)
1136 unsigned int truesize = igc_rx_pg_size(rx_ring) / 2;
1137
1138 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page,
1139 rx_buffer->page_offset, size, truesize);
1140 rx_buffer->page_offset ^= truesize;
1141#else
1142 unsigned int truesize = ring_uses_build_skb(rx_ring) ?
1143 SKB_DATA_ALIGN(IGC_SKB_PAD + size) :
1144 SKB_DATA_ALIGN(size);
1145 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page,
1146 rx_buffer->page_offset, size, truesize);
1147 rx_buffer->page_offset += truesize;
1148#endif
1149}
1150
1151static struct sk_buff *igc_build_skb(struct igc_ring *rx_ring,
1152 struct igc_rx_buffer *rx_buffer,
1153 union igc_adv_rx_desc *rx_desc,
1154 unsigned int size)
1155{
1156 void *va = page_address(rx_buffer->page) + rx_buffer->page_offset;
1157#if (PAGE_SIZE < 8192)
1158 unsigned int truesize = igc_rx_pg_size(rx_ring) / 2;
1159#else
1160 unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
1161 SKB_DATA_ALIGN(IGC_SKB_PAD + size);
1162#endif
1163 struct sk_buff *skb;
1164
1165
1166 prefetch(va);
1167#if L1_CACHE_BYTES < 128
1168 prefetch(va + L1_CACHE_BYTES);
1169#endif
1170
1171
1172 skb = build_skb(va - IGC_SKB_PAD, truesize);
1173 if (unlikely(!skb))
1174 return NULL;
1175
1176
1177 skb_reserve(skb, IGC_SKB_PAD);
1178 __skb_put(skb, size);
1179
1180
1181#if (PAGE_SIZE < 8192)
1182 rx_buffer->page_offset ^= truesize;
1183#else
1184 rx_buffer->page_offset += truesize;
1185#endif
1186
1187 return skb;
1188}
1189
1190static struct sk_buff *igc_construct_skb(struct igc_ring *rx_ring,
1191 struct igc_rx_buffer *rx_buffer,
1192 union igc_adv_rx_desc *rx_desc,
1193 unsigned int size)
1194{
1195 void *va = page_address(rx_buffer->page) + rx_buffer->page_offset;
1196#if (PAGE_SIZE < 8192)
1197 unsigned int truesize = igc_rx_pg_size(rx_ring) / 2;
1198#else
1199 unsigned int truesize = SKB_DATA_ALIGN(size);
1200#endif
1201 unsigned int headlen;
1202 struct sk_buff *skb;
1203
1204
1205 prefetch(va);
1206#if L1_CACHE_BYTES < 128
1207 prefetch(va + L1_CACHE_BYTES);
1208#endif
1209
1210
1211 skb = napi_alloc_skb(&rx_ring->q_vector->napi, IGC_RX_HDR_LEN);
1212 if (unlikely(!skb))
1213 return NULL;
1214
1215
1216 headlen = size;
1217 if (headlen > IGC_RX_HDR_LEN)
1218 headlen = eth_get_headlen(skb->dev, va, IGC_RX_HDR_LEN);
1219
1220
1221 memcpy(__skb_put(skb, headlen), va, ALIGN(headlen, sizeof(long)));
1222
1223
1224 size -= headlen;
1225 if (size) {
1226 skb_add_rx_frag(skb, 0, rx_buffer->page,
1227 (va + headlen) - page_address(rx_buffer->page),
1228 size, truesize);
1229#if (PAGE_SIZE < 8192)
1230 rx_buffer->page_offset ^= truesize;
1231#else
1232 rx_buffer->page_offset += truesize;
1233#endif
1234 } else {
1235 rx_buffer->pagecnt_bias++;
1236 }
1237
1238 return skb;
1239}
1240
1241
1242
1243
1244
1245
1246
1247
1248static void igc_reuse_rx_page(struct igc_ring *rx_ring,
1249 struct igc_rx_buffer *old_buff)
1250{
1251 u16 nta = rx_ring->next_to_alloc;
1252 struct igc_rx_buffer *new_buff;
1253
1254 new_buff = &rx_ring->rx_buffer_info[nta];
1255
1256
1257 nta++;
1258 rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
1259
1260
1261
1262
1263
1264 new_buff->dma = old_buff->dma;
1265 new_buff->page = old_buff->page;
1266 new_buff->page_offset = old_buff->page_offset;
1267 new_buff->pagecnt_bias = old_buff->pagecnt_bias;
1268}
1269
1270static inline bool igc_page_is_reserved(struct page *page)
1271{
1272 return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page);
1273}
1274
1275static bool igc_can_reuse_rx_page(struct igc_rx_buffer *rx_buffer)
1276{
1277 unsigned int pagecnt_bias = rx_buffer->pagecnt_bias;
1278 struct page *page = rx_buffer->page;
1279
1280
1281 if (unlikely(igc_page_is_reserved(page)))
1282 return false;
1283
1284#if (PAGE_SIZE < 8192)
1285
1286 if (unlikely((page_ref_count(page) - pagecnt_bias) > 1))
1287 return false;
1288#else
1289#define IGC_LAST_OFFSET \
1290 (SKB_WITH_OVERHEAD(PAGE_SIZE) - IGC_RXBUFFER_2048)
1291
1292 if (rx_buffer->page_offset > IGC_LAST_OFFSET)
1293 return false;
1294#endif
1295
1296
1297
1298
1299
1300 if (unlikely(!pagecnt_bias)) {
1301 page_ref_add(page, USHRT_MAX);
1302 rx_buffer->pagecnt_bias = USHRT_MAX;
1303 }
1304
1305 return true;
1306}
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319static bool igc_is_non_eop(struct igc_ring *rx_ring,
1320 union igc_adv_rx_desc *rx_desc)
1321{
1322 u32 ntc = rx_ring->next_to_clean + 1;
1323
1324
1325 ntc = (ntc < rx_ring->count) ? ntc : 0;
1326 rx_ring->next_to_clean = ntc;
1327
1328 prefetch(IGC_RX_DESC(rx_ring, ntc));
1329
1330 if (likely(igc_test_staterr(rx_desc, IGC_RXD_STAT_EOP)))
1331 return false;
1332
1333 return true;
1334}
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350static bool igc_cleanup_headers(struct igc_ring *rx_ring,
1351 union igc_adv_rx_desc *rx_desc,
1352 struct sk_buff *skb)
1353{
1354 if (unlikely((igc_test_staterr(rx_desc,
1355 IGC_RXDEXT_ERR_FRAME_ERR_MASK)))) {
1356 struct net_device *netdev = rx_ring->netdev;
1357
1358 if (!(netdev->features & NETIF_F_RXALL)) {
1359 dev_kfree_skb_any(skb);
1360 return true;
1361 }
1362 }
1363
1364
1365 if (eth_skb_pad(skb))
1366 return true;
1367
1368 return false;
1369}
1370
1371static void igc_put_rx_buffer(struct igc_ring *rx_ring,
1372 struct igc_rx_buffer *rx_buffer)
1373{
1374 if (igc_can_reuse_rx_page(rx_buffer)) {
1375
1376 igc_reuse_rx_page(rx_ring, rx_buffer);
1377 } else {
1378
1379
1380
1381 dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma,
1382 igc_rx_pg_size(rx_ring), DMA_FROM_DEVICE,
1383 IGC_RX_DMA_ATTR);
1384 __page_frag_cache_drain(rx_buffer->page,
1385 rx_buffer->pagecnt_bias);
1386 }
1387
1388
1389 rx_buffer->page = NULL;
1390}
1391
1392
1393
1394
1395
1396static void igc_alloc_rx_buffers(struct igc_ring *rx_ring, u16 cleaned_count)
1397{
1398 union igc_adv_rx_desc *rx_desc;
1399 u16 i = rx_ring->next_to_use;
1400 struct igc_rx_buffer *bi;
1401 u16 bufsz;
1402
1403
1404 if (!cleaned_count)
1405 return;
1406
1407 rx_desc = IGC_RX_DESC(rx_ring, i);
1408 bi = &rx_ring->rx_buffer_info[i];
1409 i -= rx_ring->count;
1410
1411 bufsz = igc_rx_bufsz(rx_ring);
1412
1413 do {
1414 if (!igc_alloc_mapped_page(rx_ring, bi))
1415 break;
1416
1417
1418 dma_sync_single_range_for_device(rx_ring->dev, bi->dma,
1419 bi->page_offset, bufsz,
1420 DMA_FROM_DEVICE);
1421
1422
1423
1424
1425 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset);
1426
1427 rx_desc++;
1428 bi++;
1429 i++;
1430 if (unlikely(!i)) {
1431 rx_desc = IGC_RX_DESC(rx_ring, 0);
1432 bi = rx_ring->rx_buffer_info;
1433 i -= rx_ring->count;
1434 }
1435
1436
1437 rx_desc->wb.upper.length = 0;
1438
1439 cleaned_count--;
1440 } while (cleaned_count);
1441
1442 i += rx_ring->count;
1443
1444 if (rx_ring->next_to_use != i) {
1445
1446 rx_ring->next_to_use = i;
1447
1448
1449 rx_ring->next_to_alloc = i;
1450
1451
1452
1453
1454
1455
1456 wmb();
1457 writel(i, rx_ring->tail);
1458 }
1459}
1460
1461static int igc_clean_rx_irq(struct igc_q_vector *q_vector, const int budget)
1462{
1463 unsigned int total_bytes = 0, total_packets = 0;
1464 struct igc_ring *rx_ring = q_vector->rx.ring;
1465 struct sk_buff *skb = rx_ring->skb;
1466 u16 cleaned_count = igc_desc_unused(rx_ring);
1467
1468 while (likely(total_packets < budget)) {
1469 union igc_adv_rx_desc *rx_desc;
1470 struct igc_rx_buffer *rx_buffer;
1471 unsigned int size;
1472
1473
1474 if (cleaned_count >= IGC_RX_BUFFER_WRITE) {
1475 igc_alloc_rx_buffers(rx_ring, cleaned_count);
1476 cleaned_count = 0;
1477 }
1478
1479 rx_desc = IGC_RX_DESC(rx_ring, rx_ring->next_to_clean);
1480 size = le16_to_cpu(rx_desc->wb.upper.length);
1481 if (!size)
1482 break;
1483
1484
1485
1486
1487
1488 dma_rmb();
1489
1490 rx_buffer = igc_get_rx_buffer(rx_ring, size);
1491
1492
1493 if (skb)
1494 igc_add_rx_frag(rx_ring, rx_buffer, skb, size);
1495 else if (ring_uses_build_skb(rx_ring))
1496 skb = igc_build_skb(rx_ring, rx_buffer, rx_desc, size);
1497 else
1498 skb = igc_construct_skb(rx_ring, rx_buffer,
1499 rx_desc, size);
1500
1501
1502 if (!skb) {
1503 rx_ring->rx_stats.alloc_failed++;
1504 rx_buffer->pagecnt_bias++;
1505 break;
1506 }
1507
1508 igc_put_rx_buffer(rx_ring, rx_buffer);
1509 cleaned_count++;
1510
1511
1512 if (igc_is_non_eop(rx_ring, rx_desc))
1513 continue;
1514
1515
1516 if (igc_cleanup_headers(rx_ring, rx_desc, skb)) {
1517 skb = NULL;
1518 continue;
1519 }
1520
1521
1522 total_bytes += skb->len;
1523
1524
1525 igc_process_skb_fields(rx_ring, rx_desc, skb);
1526
1527 napi_gro_receive(&q_vector->napi, skb);
1528
1529
1530 skb = NULL;
1531
1532
1533 total_packets++;
1534 }
1535
1536
1537 rx_ring->skb = skb;
1538
1539 u64_stats_update_begin(&rx_ring->rx_syncp);
1540 rx_ring->rx_stats.packets += total_packets;
1541 rx_ring->rx_stats.bytes += total_bytes;
1542 u64_stats_update_end(&rx_ring->rx_syncp);
1543 q_vector->rx.total_packets += total_packets;
1544 q_vector->rx.total_bytes += total_bytes;
1545
1546 if (cleaned_count)
1547 igc_alloc_rx_buffers(rx_ring, cleaned_count);
1548
1549 return total_packets;
1550}
1551
1552static inline unsigned int igc_rx_offset(struct igc_ring *rx_ring)
1553{
1554 return ring_uses_build_skb(rx_ring) ? IGC_SKB_PAD : 0;
1555}
1556
1557static bool igc_alloc_mapped_page(struct igc_ring *rx_ring,
1558 struct igc_rx_buffer *bi)
1559{
1560 struct page *page = bi->page;
1561 dma_addr_t dma;
1562
1563
1564 if (likely(page))
1565 return true;
1566
1567
1568 page = dev_alloc_pages(igc_rx_pg_order(rx_ring));
1569 if (unlikely(!page)) {
1570 rx_ring->rx_stats.alloc_failed++;
1571 return false;
1572 }
1573
1574
1575 dma = dma_map_page_attrs(rx_ring->dev, page, 0,
1576 igc_rx_pg_size(rx_ring),
1577 DMA_FROM_DEVICE,
1578 IGC_RX_DMA_ATTR);
1579
1580
1581
1582
1583 if (dma_mapping_error(rx_ring->dev, dma)) {
1584 __free_page(page);
1585
1586 rx_ring->rx_stats.alloc_failed++;
1587 return false;
1588 }
1589
1590 bi->dma = dma;
1591 bi->page = page;
1592 bi->page_offset = igc_rx_offset(rx_ring);
1593 bi->pagecnt_bias = 1;
1594
1595 return true;
1596}
1597
1598
1599
1600
1601
1602
1603
1604
1605static bool igc_clean_tx_irq(struct igc_q_vector *q_vector, int napi_budget)
1606{
1607 struct igc_adapter *adapter = q_vector->adapter;
1608 unsigned int total_bytes = 0, total_packets = 0;
1609 unsigned int budget = q_vector->tx.work_limit;
1610 struct igc_ring *tx_ring = q_vector->tx.ring;
1611 unsigned int i = tx_ring->next_to_clean;
1612 struct igc_tx_buffer *tx_buffer;
1613 union igc_adv_tx_desc *tx_desc;
1614
1615 if (test_bit(__IGC_DOWN, &adapter->state))
1616 return true;
1617
1618 tx_buffer = &tx_ring->tx_buffer_info[i];
1619 tx_desc = IGC_TX_DESC(tx_ring, i);
1620 i -= tx_ring->count;
1621
1622 do {
1623 union igc_adv_tx_desc *eop_desc = tx_buffer->next_to_watch;
1624
1625
1626 if (!eop_desc)
1627 break;
1628
1629
1630 smp_rmb();
1631
1632
1633 if (!(eop_desc->wb.status & cpu_to_le32(IGC_TXD_STAT_DD)))
1634 break;
1635
1636
1637 tx_buffer->next_to_watch = NULL;
1638
1639
1640 total_bytes += tx_buffer->bytecount;
1641 total_packets += tx_buffer->gso_segs;
1642
1643
1644 napi_consume_skb(tx_buffer->skb, napi_budget);
1645
1646
1647 dma_unmap_single(tx_ring->dev,
1648 dma_unmap_addr(tx_buffer, dma),
1649 dma_unmap_len(tx_buffer, len),
1650 DMA_TO_DEVICE);
1651
1652
1653 dma_unmap_len_set(tx_buffer, len, 0);
1654
1655
1656 while (tx_desc != eop_desc) {
1657 tx_buffer++;
1658 tx_desc++;
1659 i++;
1660 if (unlikely(!i)) {
1661 i -= tx_ring->count;
1662 tx_buffer = tx_ring->tx_buffer_info;
1663 tx_desc = IGC_TX_DESC(tx_ring, 0);
1664 }
1665
1666
1667 if (dma_unmap_len(tx_buffer, len)) {
1668 dma_unmap_page(tx_ring->dev,
1669 dma_unmap_addr(tx_buffer, dma),
1670 dma_unmap_len(tx_buffer, len),
1671 DMA_TO_DEVICE);
1672 dma_unmap_len_set(tx_buffer, len, 0);
1673 }
1674 }
1675
1676
1677 tx_buffer++;
1678 tx_desc++;
1679 i++;
1680 if (unlikely(!i)) {
1681 i -= tx_ring->count;
1682 tx_buffer = tx_ring->tx_buffer_info;
1683 tx_desc = IGC_TX_DESC(tx_ring, 0);
1684 }
1685
1686
1687 prefetch(tx_desc);
1688
1689
1690 budget--;
1691 } while (likely(budget));
1692
1693 netdev_tx_completed_queue(txring_txq(tx_ring),
1694 total_packets, total_bytes);
1695
1696 i += tx_ring->count;
1697 tx_ring->next_to_clean = i;
1698 u64_stats_update_begin(&tx_ring->tx_syncp);
1699 tx_ring->tx_stats.bytes += total_bytes;
1700 tx_ring->tx_stats.packets += total_packets;
1701 u64_stats_update_end(&tx_ring->tx_syncp);
1702 q_vector->tx.total_bytes += total_bytes;
1703 q_vector->tx.total_packets += total_packets;
1704
1705 if (test_bit(IGC_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags)) {
1706 struct igc_hw *hw = &adapter->hw;
1707
1708
1709
1710
1711 clear_bit(IGC_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags);
1712 if (tx_buffer->next_to_watch &&
1713 time_after(jiffies, tx_buffer->time_stamp +
1714 (adapter->tx_timeout_factor * HZ)) &&
1715 !(rd32(IGC_STATUS) & IGC_STATUS_TXOFF)) {
1716
1717 dev_err(tx_ring->dev,
1718 "Detected Tx Unit Hang\n"
1719 " Tx Queue <%d>\n"
1720 " TDH <%x>\n"
1721 " TDT <%x>\n"
1722 " next_to_use <%x>\n"
1723 " next_to_clean <%x>\n"
1724 "buffer_info[next_to_clean]\n"
1725 " time_stamp <%lx>\n"
1726 " next_to_watch <%p>\n"
1727 " jiffies <%lx>\n"
1728 " desc.status <%x>\n",
1729 tx_ring->queue_index,
1730 rd32(IGC_TDH(tx_ring->reg_idx)),
1731 readl(tx_ring->tail),
1732 tx_ring->next_to_use,
1733 tx_ring->next_to_clean,
1734 tx_buffer->time_stamp,
1735 tx_buffer->next_to_watch,
1736 jiffies,
1737 tx_buffer->next_to_watch->wb.status);
1738 netif_stop_subqueue(tx_ring->netdev,
1739 tx_ring->queue_index);
1740
1741
1742 return true;
1743 }
1744 }
1745
1746#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
1747 if (unlikely(total_packets &&
1748 netif_carrier_ok(tx_ring->netdev) &&
1749 igc_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD)) {
1750
1751
1752
1753 smp_mb();
1754 if (__netif_subqueue_stopped(tx_ring->netdev,
1755 tx_ring->queue_index) &&
1756 !(test_bit(__IGC_DOWN, &adapter->state))) {
1757 netif_wake_subqueue(tx_ring->netdev,
1758 tx_ring->queue_index);
1759
1760 u64_stats_update_begin(&tx_ring->tx_syncp);
1761 tx_ring->tx_stats.restart_queue++;
1762 u64_stats_update_end(&tx_ring->tx_syncp);
1763 }
1764 }
1765
1766 return !!budget;
1767}
1768
1769
1770
1771
1772
1773void igc_up(struct igc_adapter *adapter)
1774{
1775 struct igc_hw *hw = &adapter->hw;
1776 int i = 0;
1777
1778
1779 igc_configure(adapter);
1780
1781 clear_bit(__IGC_DOWN, &adapter->state);
1782
1783 for (i = 0; i < adapter->num_q_vectors; i++)
1784 napi_enable(&adapter->q_vector[i]->napi);
1785
1786 if (adapter->msix_entries)
1787 igc_configure_msix(adapter);
1788 else
1789 igc_assign_vector(adapter->q_vector[0], 0);
1790
1791
1792 rd32(IGC_ICR);
1793 igc_irq_enable(adapter);
1794
1795 netif_tx_start_all_queues(adapter->netdev);
1796
1797
1798 hw->mac.get_link_status = 1;
1799 schedule_work(&adapter->watchdog_task);
1800}
1801
1802
1803
1804
1805
1806void igc_update_stats(struct igc_adapter *adapter)
1807{
1808 struct rtnl_link_stats64 *net_stats = &adapter->stats64;
1809 struct pci_dev *pdev = adapter->pdev;
1810 struct igc_hw *hw = &adapter->hw;
1811 u64 _bytes, _packets;
1812 u64 bytes, packets;
1813 unsigned int start;
1814 u32 mpc;
1815 int i;
1816
1817
1818
1819
1820 if (adapter->link_speed == 0)
1821 return;
1822 if (pci_channel_offline(pdev))
1823 return;
1824
1825 packets = 0;
1826 bytes = 0;
1827
1828 rcu_read_lock();
1829 for (i = 0; i < adapter->num_rx_queues; i++) {
1830 struct igc_ring *ring = adapter->rx_ring[i];
1831 u32 rqdpc = rd32(IGC_RQDPC(i));
1832
1833 if (hw->mac.type >= igc_i225)
1834 wr32(IGC_RQDPC(i), 0);
1835
1836 if (rqdpc) {
1837 ring->rx_stats.drops += rqdpc;
1838 net_stats->rx_fifo_errors += rqdpc;
1839 }
1840
1841 do {
1842 start = u64_stats_fetch_begin_irq(&ring->rx_syncp);
1843 _bytes = ring->rx_stats.bytes;
1844 _packets = ring->rx_stats.packets;
1845 } while (u64_stats_fetch_retry_irq(&ring->rx_syncp, start));
1846 bytes += _bytes;
1847 packets += _packets;
1848 }
1849
1850 net_stats->rx_bytes = bytes;
1851 net_stats->rx_packets = packets;
1852
1853 packets = 0;
1854 bytes = 0;
1855 for (i = 0; i < adapter->num_tx_queues; i++) {
1856 struct igc_ring *ring = adapter->tx_ring[i];
1857
1858 do {
1859 start = u64_stats_fetch_begin_irq(&ring->tx_syncp);
1860 _bytes = ring->tx_stats.bytes;
1861 _packets = ring->tx_stats.packets;
1862 } while (u64_stats_fetch_retry_irq(&ring->tx_syncp, start));
1863 bytes += _bytes;
1864 packets += _packets;
1865 }
1866 net_stats->tx_bytes = bytes;
1867 net_stats->tx_packets = packets;
1868 rcu_read_unlock();
1869
1870
1871 adapter->stats.crcerrs += rd32(IGC_CRCERRS);
1872 adapter->stats.gprc += rd32(IGC_GPRC);
1873 adapter->stats.gorc += rd32(IGC_GORCL);
1874 rd32(IGC_GORCH);
1875 adapter->stats.bprc += rd32(IGC_BPRC);
1876 adapter->stats.mprc += rd32(IGC_MPRC);
1877 adapter->stats.roc += rd32(IGC_ROC);
1878
1879 adapter->stats.prc64 += rd32(IGC_PRC64);
1880 adapter->stats.prc127 += rd32(IGC_PRC127);
1881 adapter->stats.prc255 += rd32(IGC_PRC255);
1882 adapter->stats.prc511 += rd32(IGC_PRC511);
1883 adapter->stats.prc1023 += rd32(IGC_PRC1023);
1884 adapter->stats.prc1522 += rd32(IGC_PRC1522);
1885 adapter->stats.symerrs += rd32(IGC_SYMERRS);
1886 adapter->stats.sec += rd32(IGC_SEC);
1887
1888 mpc = rd32(IGC_MPC);
1889 adapter->stats.mpc += mpc;
1890 net_stats->rx_fifo_errors += mpc;
1891 adapter->stats.scc += rd32(IGC_SCC);
1892 adapter->stats.ecol += rd32(IGC_ECOL);
1893 adapter->stats.mcc += rd32(IGC_MCC);
1894 adapter->stats.latecol += rd32(IGC_LATECOL);
1895 adapter->stats.dc += rd32(IGC_DC);
1896 adapter->stats.rlec += rd32(IGC_RLEC);
1897 adapter->stats.xonrxc += rd32(IGC_XONRXC);
1898 adapter->stats.xontxc += rd32(IGC_XONTXC);
1899 adapter->stats.xoffrxc += rd32(IGC_XOFFRXC);
1900 adapter->stats.xofftxc += rd32(IGC_XOFFTXC);
1901 adapter->stats.fcruc += rd32(IGC_FCRUC);
1902 adapter->stats.gptc += rd32(IGC_GPTC);
1903 adapter->stats.gotc += rd32(IGC_GOTCL);
1904 rd32(IGC_GOTCH);
1905 adapter->stats.rnbc += rd32(IGC_RNBC);
1906 adapter->stats.ruc += rd32(IGC_RUC);
1907 adapter->stats.rfc += rd32(IGC_RFC);
1908 adapter->stats.rjc += rd32(IGC_RJC);
1909 adapter->stats.tor += rd32(IGC_TORH);
1910 adapter->stats.tot += rd32(IGC_TOTH);
1911 adapter->stats.tpr += rd32(IGC_TPR);
1912
1913 adapter->stats.ptc64 += rd32(IGC_PTC64);
1914 adapter->stats.ptc127 += rd32(IGC_PTC127);
1915 adapter->stats.ptc255 += rd32(IGC_PTC255);
1916 adapter->stats.ptc511 += rd32(IGC_PTC511);
1917 adapter->stats.ptc1023 += rd32(IGC_PTC1023);
1918 adapter->stats.ptc1522 += rd32(IGC_PTC1522);
1919
1920 adapter->stats.mptc += rd32(IGC_MPTC);
1921 adapter->stats.bptc += rd32(IGC_BPTC);
1922
1923 adapter->stats.tpt += rd32(IGC_TPT);
1924 adapter->stats.colc += rd32(IGC_COLC);
1925
1926 adapter->stats.algnerrc += rd32(IGC_ALGNERRC);
1927
1928 adapter->stats.tsctc += rd32(IGC_TSCTC);
1929 adapter->stats.tsctfc += rd32(IGC_TSCTFC);
1930
1931 adapter->stats.iac += rd32(IGC_IAC);
1932 adapter->stats.icrxoc += rd32(IGC_ICRXOC);
1933 adapter->stats.icrxptc += rd32(IGC_ICRXPTC);
1934 adapter->stats.icrxatc += rd32(IGC_ICRXATC);
1935 adapter->stats.ictxptc += rd32(IGC_ICTXPTC);
1936 adapter->stats.ictxatc += rd32(IGC_ICTXATC);
1937 adapter->stats.ictxqec += rd32(IGC_ICTXQEC);
1938 adapter->stats.ictxqmtc += rd32(IGC_ICTXQMTC);
1939 adapter->stats.icrxdmtc += rd32(IGC_ICRXDMTC);
1940
1941
1942 net_stats->multicast = adapter->stats.mprc;
1943 net_stats->collisions = adapter->stats.colc;
1944
1945
1946
1947
1948
1949
1950 net_stats->rx_errors = adapter->stats.rxerrc +
1951 adapter->stats.crcerrs + adapter->stats.algnerrc +
1952 adapter->stats.ruc + adapter->stats.roc +
1953 adapter->stats.cexterr;
1954 net_stats->rx_length_errors = adapter->stats.ruc +
1955 adapter->stats.roc;
1956 net_stats->rx_crc_errors = adapter->stats.crcerrs;
1957 net_stats->rx_frame_errors = adapter->stats.algnerrc;
1958 net_stats->rx_missed_errors = adapter->stats.mpc;
1959
1960
1961 net_stats->tx_errors = adapter->stats.ecol +
1962 adapter->stats.latecol;
1963 net_stats->tx_aborted_errors = adapter->stats.ecol;
1964 net_stats->tx_window_errors = adapter->stats.latecol;
1965 net_stats->tx_carrier_errors = adapter->stats.tncrs;
1966
1967
1968
1969
1970 adapter->stats.mgptc += rd32(IGC_MGTPTC);
1971 adapter->stats.mgprc += rd32(IGC_MGTPRC);
1972 adapter->stats.mgpdc += rd32(IGC_MGTPDC);
1973}
1974
1975static void igc_nfc_filter_exit(struct igc_adapter *adapter)
1976{
1977 struct igc_nfc_filter *rule;
1978
1979 spin_lock(&adapter->nfc_lock);
1980
1981 hlist_for_each_entry(rule, &adapter->nfc_filter_list, nfc_node)
1982 igc_erase_filter(adapter, rule);
1983
1984 hlist_for_each_entry(rule, &adapter->cls_flower_list, nfc_node)
1985 igc_erase_filter(adapter, rule);
1986
1987 spin_unlock(&adapter->nfc_lock);
1988}
1989
1990static void igc_nfc_filter_restore(struct igc_adapter *adapter)
1991{
1992 struct igc_nfc_filter *rule;
1993
1994 spin_lock(&adapter->nfc_lock);
1995
1996 hlist_for_each_entry(rule, &adapter->nfc_filter_list, nfc_node)
1997 igc_add_filter(adapter, rule);
1998
1999 spin_unlock(&adapter->nfc_lock);
2000}
2001
2002
2003
2004
2005
2006void igc_down(struct igc_adapter *adapter)
2007{
2008 struct net_device *netdev = adapter->netdev;
2009 struct igc_hw *hw = &adapter->hw;
2010 u32 tctl, rctl;
2011 int i = 0;
2012
2013 set_bit(__IGC_DOWN, &adapter->state);
2014
2015
2016 rctl = rd32(IGC_RCTL);
2017 wr32(IGC_RCTL, rctl & ~IGC_RCTL_EN);
2018
2019
2020 igc_nfc_filter_exit(adapter);
2021
2022
2023 netif_trans_update(netdev);
2024
2025 netif_carrier_off(netdev);
2026 netif_tx_stop_all_queues(netdev);
2027
2028
2029 tctl = rd32(IGC_TCTL);
2030 tctl &= ~IGC_TCTL_EN;
2031 wr32(IGC_TCTL, tctl);
2032
2033 wrfl();
2034 usleep_range(10000, 20000);
2035
2036 igc_irq_disable(adapter);
2037
2038 adapter->flags &= ~IGC_FLAG_NEED_LINK_UPDATE;
2039
2040 for (i = 0; i < adapter->num_q_vectors; i++) {
2041 if (adapter->q_vector[i]) {
2042 napi_synchronize(&adapter->q_vector[i]->napi);
2043 napi_disable(&adapter->q_vector[i]->napi);
2044 }
2045 }
2046
2047 del_timer_sync(&adapter->watchdog_timer);
2048 del_timer_sync(&adapter->phy_info_timer);
2049
2050
2051 spin_lock(&adapter->stats64_lock);
2052 igc_update_stats(adapter);
2053 spin_unlock(&adapter->stats64_lock);
2054
2055 adapter->link_speed = 0;
2056 adapter->link_duplex = 0;
2057
2058 if (!pci_channel_offline(adapter->pdev))
2059 igc_reset(adapter);
2060
2061
2062 adapter->flags &= ~IGC_FLAG_VLAN_PROMISC;
2063
2064 igc_clean_all_tx_rings(adapter);
2065 igc_clean_all_rx_rings(adapter);
2066}
2067
2068void igc_reinit_locked(struct igc_adapter *adapter)
2069{
2070 WARN_ON(in_interrupt());
2071 while (test_and_set_bit(__IGC_RESETTING, &adapter->state))
2072 usleep_range(1000, 2000);
2073 igc_down(adapter);
2074 igc_up(adapter);
2075 clear_bit(__IGC_RESETTING, &adapter->state);
2076}
2077
2078static void igc_reset_task(struct work_struct *work)
2079{
2080 struct igc_adapter *adapter;
2081
2082 adapter = container_of(work, struct igc_adapter, reset_task);
2083
2084 netdev_err(adapter->netdev, "Reset adapter\n");
2085 igc_reinit_locked(adapter);
2086}
2087
2088
2089
2090
2091
2092
2093
2094
2095static int igc_change_mtu(struct net_device *netdev, int new_mtu)
2096{
2097 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
2098 struct igc_adapter *adapter = netdev_priv(netdev);
2099 struct pci_dev *pdev = adapter->pdev;
2100
2101
2102 if (max_frame < (ETH_FRAME_LEN + ETH_FCS_LEN))
2103 max_frame = ETH_FRAME_LEN + ETH_FCS_LEN;
2104
2105 while (test_and_set_bit(__IGC_RESETTING, &adapter->state))
2106 usleep_range(1000, 2000);
2107
2108
2109 adapter->max_frame_size = max_frame;
2110
2111 if (netif_running(netdev))
2112 igc_down(adapter);
2113
2114 dev_info(&pdev->dev, "changing MTU from %d to %d\n",
2115 netdev->mtu, new_mtu);
2116 netdev->mtu = new_mtu;
2117
2118 if (netif_running(netdev))
2119 igc_up(adapter);
2120 else
2121 igc_reset(adapter);
2122
2123 clear_bit(__IGC_RESETTING, &adapter->state);
2124
2125 return 0;
2126}
2127
2128
2129
2130
2131
2132
2133
2134
2135static struct net_device_stats *igc_get_stats(struct net_device *netdev)
2136{
2137 struct igc_adapter *adapter = netdev_priv(netdev);
2138
2139 if (!test_bit(__IGC_RESETTING, &adapter->state))
2140 igc_update_stats(adapter);
2141
2142
2143 return &netdev->stats;
2144}
2145
2146static netdev_features_t igc_fix_features(struct net_device *netdev,
2147 netdev_features_t features)
2148{
2149
2150
2151
2152 if (features & NETIF_F_HW_VLAN_CTAG_RX)
2153 features |= NETIF_F_HW_VLAN_CTAG_TX;
2154 else
2155 features &= ~NETIF_F_HW_VLAN_CTAG_TX;
2156
2157 return features;
2158}
2159
2160static int igc_set_features(struct net_device *netdev,
2161 netdev_features_t features)
2162{
2163 netdev_features_t changed = netdev->features ^ features;
2164 struct igc_adapter *adapter = netdev_priv(netdev);
2165
2166
2167 if (!(changed & (NETIF_F_RXALL | NETIF_F_NTUPLE)))
2168 return 0;
2169
2170 if (!(features & NETIF_F_NTUPLE)) {
2171 struct hlist_node *node2;
2172 struct igc_nfc_filter *rule;
2173
2174 spin_lock(&adapter->nfc_lock);
2175 hlist_for_each_entry_safe(rule, node2,
2176 &adapter->nfc_filter_list, nfc_node) {
2177 igc_erase_filter(adapter, rule);
2178 hlist_del(&rule->nfc_node);
2179 kfree(rule);
2180 }
2181 spin_unlock(&adapter->nfc_lock);
2182 adapter->nfc_filter_count = 0;
2183 }
2184
2185 netdev->features = features;
2186
2187 if (netif_running(netdev))
2188 igc_reinit_locked(adapter);
2189 else
2190 igc_reset(adapter);
2191
2192 return 1;
2193}
2194
2195static netdev_features_t
2196igc_features_check(struct sk_buff *skb, struct net_device *dev,
2197 netdev_features_t features)
2198{
2199 unsigned int network_hdr_len, mac_hdr_len;
2200
2201
2202 mac_hdr_len = skb_network_header(skb) - skb->data;
2203 if (unlikely(mac_hdr_len > IGC_MAX_MAC_HDR_LEN))
2204 return features & ~(NETIF_F_HW_CSUM |
2205 NETIF_F_SCTP_CRC |
2206 NETIF_F_HW_VLAN_CTAG_TX |
2207 NETIF_F_TSO |
2208 NETIF_F_TSO6);
2209
2210 network_hdr_len = skb_checksum_start(skb) - skb_network_header(skb);
2211 if (unlikely(network_hdr_len > IGC_MAX_NETWORK_HDR_LEN))
2212 return features & ~(NETIF_F_HW_CSUM |
2213 NETIF_F_SCTP_CRC |
2214 NETIF_F_TSO |
2215 NETIF_F_TSO6);
2216
2217
2218
2219
2220 if (skb->encapsulation && !(features & NETIF_F_TSO_MANGLEID))
2221 features &= ~NETIF_F_TSO;
2222
2223 return features;
2224}
2225
2226
2227
2228
2229
2230static void igc_configure(struct igc_adapter *adapter)
2231{
2232 struct net_device *netdev = adapter->netdev;
2233 int i = 0;
2234
2235 igc_get_hw_control(adapter);
2236 igc_set_rx_mode(netdev);
2237
2238 igc_setup_tctl(adapter);
2239 igc_setup_mrqc(adapter);
2240 igc_setup_rctl(adapter);
2241
2242 igc_nfc_filter_restore(adapter);
2243 igc_configure_tx(adapter);
2244 igc_configure_rx(adapter);
2245
2246 igc_rx_fifo_flush_base(&adapter->hw);
2247
2248
2249
2250
2251
2252 for (i = 0; i < adapter->num_rx_queues; i++) {
2253 struct igc_ring *ring = adapter->rx_ring[i];
2254
2255 igc_alloc_rx_buffers(ring, igc_desc_unused(ring));
2256 }
2257}
2258
2259
2260
2261
2262
2263
2264static void igc_rar_set_index(struct igc_adapter *adapter, u32 index)
2265{
2266 u8 *addr = adapter->mac_table[index].addr;
2267 struct igc_hw *hw = &adapter->hw;
2268 u32 rar_low, rar_high;
2269
2270
2271
2272
2273
2274
2275 rar_low = le32_to_cpup((__le32 *)(addr));
2276 rar_high = le16_to_cpup((__le16 *)(addr + 4));
2277
2278
2279 if (adapter->mac_table[index].state & IGC_MAC_STATE_IN_USE) {
2280 if (is_valid_ether_addr(addr))
2281 rar_high |= IGC_RAH_AV;
2282
2283 rar_high |= IGC_RAH_POOL_1 <<
2284 adapter->mac_table[index].queue;
2285 }
2286
2287 wr32(IGC_RAL(index), rar_low);
2288 wrfl();
2289 wr32(IGC_RAH(index), rar_high);
2290 wrfl();
2291}
2292
2293
2294static void igc_set_default_mac_filter(struct igc_adapter *adapter)
2295{
2296 struct igc_mac_addr *mac_table = &adapter->mac_table[0];
2297
2298 ether_addr_copy(mac_table->addr, adapter->hw.mac.addr);
2299 mac_table->state = IGC_MAC_STATE_DEFAULT | IGC_MAC_STATE_IN_USE;
2300
2301 igc_rar_set_index(adapter, 0);
2302}
2303
2304
2305
2306
2307
2308
2309static bool igc_mac_entry_can_be_used(const struct igc_mac_addr *entry,
2310 const u8 *addr, const u8 flags)
2311{
2312 if (!(entry->state & IGC_MAC_STATE_IN_USE))
2313 return true;
2314
2315 if ((entry->state & IGC_MAC_STATE_SRC_ADDR) !=
2316 (flags & IGC_MAC_STATE_SRC_ADDR))
2317 return false;
2318
2319 if (!ether_addr_equal(addr, entry->addr))
2320 return false;
2321
2322 return true;
2323}
2324
2325
2326
2327
2328
2329
2330static int igc_add_mac_filter_flags(struct igc_adapter *adapter,
2331 const u8 *addr, const u8 queue,
2332 const u8 flags)
2333{
2334 struct igc_hw *hw = &adapter->hw;
2335 int rar_entries = hw->mac.rar_entry_count;
2336 int i;
2337
2338 if (is_zero_ether_addr(addr))
2339 return -EINVAL;
2340
2341
2342
2343
2344
2345 for (i = 0; i < rar_entries; i++) {
2346 if (!igc_mac_entry_can_be_used(&adapter->mac_table[i],
2347 addr, flags))
2348 continue;
2349
2350 ether_addr_copy(adapter->mac_table[i].addr, addr);
2351 adapter->mac_table[i].queue = queue;
2352 adapter->mac_table[i].state |= IGC_MAC_STATE_IN_USE | flags;
2353
2354 igc_rar_set_index(adapter, i);
2355 return i;
2356 }
2357
2358 return -ENOSPC;
2359}
2360
2361int igc_add_mac_steering_filter(struct igc_adapter *adapter,
2362 const u8 *addr, u8 queue, u8 flags)
2363{
2364 return igc_add_mac_filter_flags(adapter, addr, queue,
2365 IGC_MAC_STATE_QUEUE_STEERING | flags);
2366}
2367
2368
2369
2370
2371
2372
2373
2374static int igc_del_mac_filter_flags(struct igc_adapter *adapter,
2375 const u8 *addr, const u8 queue,
2376 const u8 flags)
2377{
2378 struct igc_hw *hw = &adapter->hw;
2379 int rar_entries = hw->mac.rar_entry_count;
2380 int i;
2381
2382 if (is_zero_ether_addr(addr))
2383 return -EINVAL;
2384
2385
2386
2387
2388
2389 for (i = 0; i < rar_entries; i++) {
2390 if (!(adapter->mac_table[i].state & IGC_MAC_STATE_IN_USE))
2391 continue;
2392 if ((adapter->mac_table[i].state & flags) != flags)
2393 continue;
2394 if (adapter->mac_table[i].queue != queue)
2395 continue;
2396 if (!ether_addr_equal(adapter->mac_table[i].addr, addr))
2397 continue;
2398
2399
2400
2401
2402 if (adapter->mac_table[i].state & IGC_MAC_STATE_DEFAULT) {
2403 adapter->mac_table[i].state =
2404 IGC_MAC_STATE_DEFAULT | IGC_MAC_STATE_IN_USE;
2405 } else {
2406 adapter->mac_table[i].state = 0;
2407 adapter->mac_table[i].queue = 0;
2408 memset(adapter->mac_table[i].addr, 0, ETH_ALEN);
2409 }
2410
2411 igc_rar_set_index(adapter, i);
2412 return 0;
2413 }
2414
2415 return -ENOENT;
2416}
2417
2418int igc_del_mac_steering_filter(struct igc_adapter *adapter,
2419 const u8 *addr, u8 queue, u8 flags)
2420{
2421 return igc_del_mac_filter_flags(adapter, addr, queue,
2422 IGC_MAC_STATE_QUEUE_STEERING | flags);
2423}
2424
2425
2426
2427
2428
2429
2430
2431
2432
2433
2434static void igc_set_rx_mode(struct net_device *netdev)
2435{
2436}
2437
2438
2439
2440
2441
2442
2443static irqreturn_t igc_msix_other(int irq, void *data)
2444{
2445 struct igc_adapter *adapter = data;
2446 struct igc_hw *hw = &adapter->hw;
2447 u32 icr = rd32(IGC_ICR);
2448
2449
2450 if (icr & IGC_ICR_DRSTA)
2451 schedule_work(&adapter->reset_task);
2452
2453 if (icr & IGC_ICR_DOUTSYNC) {
2454
2455 adapter->stats.doosync++;
2456 }
2457
2458 if (icr & IGC_ICR_LSC) {
2459 hw->mac.get_link_status = 1;
2460
2461 if (!test_bit(__IGC_DOWN, &adapter->state))
2462 mod_timer(&adapter->watchdog_timer, jiffies + 1);
2463 }
2464
2465 wr32(IGC_EIMS, adapter->eims_other);
2466
2467 return IRQ_HANDLED;
2468}
2469
2470
2471
2472
2473
2474
2475
2476
2477
2478
2479
2480
2481static void igc_write_ivar(struct igc_hw *hw, int msix_vector,
2482 int index, int offset)
2483{
2484 u32 ivar = array_rd32(IGC_IVAR0, index);
2485
2486
2487 ivar &= ~((u32)0xFF << offset);
2488
2489
2490 ivar |= (msix_vector | IGC_IVAR_VALID) << offset;
2491
2492 array_wr32(IGC_IVAR0, index, ivar);
2493}
2494
2495static void igc_assign_vector(struct igc_q_vector *q_vector, int msix_vector)
2496{
2497 struct igc_adapter *adapter = q_vector->adapter;
2498 struct igc_hw *hw = &adapter->hw;
2499 int rx_queue = IGC_N0_QUEUE;
2500 int tx_queue = IGC_N0_QUEUE;
2501
2502 if (q_vector->rx.ring)
2503 rx_queue = q_vector->rx.ring->reg_idx;
2504 if (q_vector->tx.ring)
2505 tx_queue = q_vector->tx.ring->reg_idx;
2506
2507 switch (hw->mac.type) {
2508 case igc_i225:
2509 if (rx_queue > IGC_N0_QUEUE)
2510 igc_write_ivar(hw, msix_vector,
2511 rx_queue >> 1,
2512 (rx_queue & 0x1) << 4);
2513 if (tx_queue > IGC_N0_QUEUE)
2514 igc_write_ivar(hw, msix_vector,
2515 tx_queue >> 1,
2516 ((tx_queue & 0x1) << 4) + 8);
2517 q_vector->eims_value = BIT(msix_vector);
2518 break;
2519 default:
2520 WARN_ONCE(hw->mac.type != igc_i225, "Wrong MAC type\n");
2521 break;
2522 }
2523
2524
2525 adapter->eims_enable_mask |= q_vector->eims_value;
2526
2527
2528 q_vector->set_itr = 1;
2529}
2530
2531
2532
2533
2534
2535
2536
2537
2538static void igc_configure_msix(struct igc_adapter *adapter)
2539{
2540 struct igc_hw *hw = &adapter->hw;
2541 int i, vector = 0;
2542 u32 tmp;
2543
2544 adapter->eims_enable_mask = 0;
2545
2546
2547 switch (hw->mac.type) {
2548 case igc_i225:
2549
2550
2551
2552 wr32(IGC_GPIE, IGC_GPIE_MSIX_MODE |
2553 IGC_GPIE_PBA | IGC_GPIE_EIAME |
2554 IGC_GPIE_NSICR);
2555
2556
2557 adapter->eims_other = BIT(vector);
2558 tmp = (vector++ | IGC_IVAR_VALID) << 8;
2559
2560 wr32(IGC_IVAR_MISC, tmp);
2561 break;
2562 default:
2563
2564 break;
2565 }
2566
2567 adapter->eims_enable_mask |= adapter->eims_other;
2568
2569 for (i = 0; i < adapter->num_q_vectors; i++)
2570 igc_assign_vector(adapter->q_vector[i], vector++);
2571
2572 wrfl();
2573}
2574
2575static irqreturn_t igc_msix_ring(int irq, void *data)
2576{
2577 struct igc_q_vector *q_vector = data;
2578
2579
2580 igc_write_itr(q_vector);
2581
2582 napi_schedule(&q_vector->napi);
2583
2584 return IRQ_HANDLED;
2585}
2586
2587
2588
2589
2590
2591
2592
2593
2594static int igc_request_msix(struct igc_adapter *adapter)
2595{
2596 int i = 0, err = 0, vector = 0, free_vector = 0;
2597 struct net_device *netdev = adapter->netdev;
2598
2599 err = request_irq(adapter->msix_entries[vector].vector,
2600 &igc_msix_other, 0, netdev->name, adapter);
2601 if (err)
2602 goto err_out;
2603
2604 for (i = 0; i < adapter->num_q_vectors; i++) {
2605 struct igc_q_vector *q_vector = adapter->q_vector[i];
2606
2607 vector++;
2608
2609 q_vector->itr_register = adapter->io_addr + IGC_EITR(vector);
2610
2611 if (q_vector->rx.ring && q_vector->tx.ring)
2612 sprintf(q_vector->name, "%s-TxRx-%u", netdev->name,
2613 q_vector->rx.ring->queue_index);
2614 else if (q_vector->tx.ring)
2615 sprintf(q_vector->name, "%s-tx-%u", netdev->name,
2616 q_vector->tx.ring->queue_index);
2617 else if (q_vector->rx.ring)
2618 sprintf(q_vector->name, "%s-rx-%u", netdev->name,
2619 q_vector->rx.ring->queue_index);
2620 else
2621 sprintf(q_vector->name, "%s-unused", netdev->name);
2622
2623 err = request_irq(adapter->msix_entries[vector].vector,
2624 igc_msix_ring, 0, q_vector->name,
2625 q_vector);
2626 if (err)
2627 goto err_free;
2628 }
2629
2630 igc_configure_msix(adapter);
2631 return 0;
2632
2633err_free:
2634
2635 free_irq(adapter->msix_entries[free_vector++].vector, adapter);
2636
2637 vector--;
2638 for (i = 0; i < vector; i++) {
2639 free_irq(adapter->msix_entries[free_vector++].vector,
2640 adapter->q_vector[i]);
2641 }
2642err_out:
2643 return err;
2644}
2645
2646
2647
2648
2649
2650
2651
2652
2653
2654static void igc_reset_q_vector(struct igc_adapter *adapter, int v_idx)
2655{
2656 struct igc_q_vector *q_vector = adapter->q_vector[v_idx];
2657
2658
2659
2660
2661 if (!q_vector)
2662 return;
2663
2664 if (q_vector->tx.ring)
2665 adapter->tx_ring[q_vector->tx.ring->queue_index] = NULL;
2666
2667 if (q_vector->rx.ring)
2668 adapter->rx_ring[q_vector->rx.ring->queue_index] = NULL;
2669
2670 netif_napi_del(&q_vector->napi);
2671}
2672
2673static void igc_reset_interrupt_capability(struct igc_adapter *adapter)
2674{
2675 int v_idx = adapter->num_q_vectors;
2676
2677 if (adapter->msix_entries) {
2678 pci_disable_msix(adapter->pdev);
2679 kfree(adapter->msix_entries);
2680 adapter->msix_entries = NULL;
2681 } else if (adapter->flags & IGC_FLAG_HAS_MSI) {
2682 pci_disable_msi(adapter->pdev);
2683 }
2684
2685 while (v_idx--)
2686 igc_reset_q_vector(adapter, v_idx);
2687}
2688
2689
2690
2691
2692
2693
2694
2695
2696static void igc_clear_interrupt_scheme(struct igc_adapter *adapter)
2697{
2698 igc_free_q_vectors(adapter);
2699 igc_reset_interrupt_capability(adapter);
2700}
2701
2702
2703
2704
2705
2706
2707
2708
2709
2710static void igc_free_q_vectors(struct igc_adapter *adapter)
2711{
2712 int v_idx = adapter->num_q_vectors;
2713
2714 adapter->num_tx_queues = 0;
2715 adapter->num_rx_queues = 0;
2716 adapter->num_q_vectors = 0;
2717
2718 while (v_idx--) {
2719 igc_reset_q_vector(adapter, v_idx);
2720 igc_free_q_vector(adapter, v_idx);
2721 }
2722}
2723
2724
2725
2726
2727
2728
2729
2730
2731static void igc_free_q_vector(struct igc_adapter *adapter, int v_idx)
2732{
2733 struct igc_q_vector *q_vector = adapter->q_vector[v_idx];
2734
2735 adapter->q_vector[v_idx] = NULL;
2736
2737
2738
2739
2740 if (q_vector)
2741 kfree_rcu(q_vector, rcu);
2742}
2743
2744
2745
2746
2747static void igc_update_phy_info(struct timer_list *t)
2748{
2749 struct igc_adapter *adapter = from_timer(adapter, t, phy_info_timer);
2750
2751 igc_get_phy_info(&adapter->hw);
2752}
2753
2754
2755
2756
2757
2758bool igc_has_link(struct igc_adapter *adapter)
2759{
2760 struct igc_hw *hw = &adapter->hw;
2761 bool link_active = false;
2762
2763
2764
2765
2766
2767
2768 switch (hw->phy.media_type) {
2769 case igc_media_type_copper:
2770 if (!hw->mac.get_link_status)
2771 return true;
2772 hw->mac.ops.check_for_link(hw);
2773 link_active = !hw->mac.get_link_status;
2774 break;
2775 default:
2776 case igc_media_type_unknown:
2777 break;
2778 }
2779
2780 if (hw->mac.type == igc_i225 &&
2781 hw->phy.id == I225_I_PHY_ID) {
2782 if (!netif_carrier_ok(adapter->netdev)) {
2783 adapter->flags &= ~IGC_FLAG_NEED_LINK_UPDATE;
2784 } else if (!(adapter->flags & IGC_FLAG_NEED_LINK_UPDATE)) {
2785 adapter->flags |= IGC_FLAG_NEED_LINK_UPDATE;
2786 adapter->link_check_timeout = jiffies;
2787 }
2788 }
2789
2790 return link_active;
2791}
2792
2793
2794
2795
2796
2797static void igc_watchdog(struct timer_list *t)
2798{
2799 struct igc_adapter *adapter = from_timer(adapter, t, watchdog_timer);
2800
2801 schedule_work(&adapter->watchdog_task);
2802}
2803
2804static void igc_watchdog_task(struct work_struct *work)
2805{
2806 struct igc_adapter *adapter = container_of(work,
2807 struct igc_adapter,
2808 watchdog_task);
2809 struct net_device *netdev = adapter->netdev;
2810 struct igc_hw *hw = &adapter->hw;
2811 struct igc_phy_info *phy = &hw->phy;
2812 u16 phy_data, retry_count = 20;
2813 u32 connsw;
2814 u32 link;
2815 int i;
2816
2817 link = igc_has_link(adapter);
2818
2819 if (adapter->flags & IGC_FLAG_NEED_LINK_UPDATE) {
2820 if (time_after(jiffies, (adapter->link_check_timeout + HZ)))
2821 adapter->flags &= ~IGC_FLAG_NEED_LINK_UPDATE;
2822 else
2823 link = false;
2824 }
2825
2826
2827 if (adapter->flags & IGC_FLAG_MAS_ENABLE) {
2828 if (hw->phy.media_type == igc_media_type_copper) {
2829 connsw = rd32(IGC_CONNSW);
2830 if (!(connsw & IGC_CONNSW_AUTOSENSE_EN))
2831 link = 0;
2832 }
2833 }
2834 if (link) {
2835 if (!netif_carrier_ok(netdev)) {
2836 u32 ctrl;
2837
2838 hw->mac.ops.get_speed_and_duplex(hw,
2839 &adapter->link_speed,
2840 &adapter->link_duplex);
2841
2842 ctrl = rd32(IGC_CTRL);
2843
2844 netdev_info(netdev,
2845 "igc: %s NIC Link is Up %d Mbps %s Duplex, Flow Control: %s\n",
2846 netdev->name,
2847 adapter->link_speed,
2848 adapter->link_duplex == FULL_DUPLEX ?
2849 "Full" : "Half",
2850 (ctrl & IGC_CTRL_TFCE) &&
2851 (ctrl & IGC_CTRL_RFCE) ? "RX/TX" :
2852 (ctrl & IGC_CTRL_RFCE) ? "RX" :
2853 (ctrl & IGC_CTRL_TFCE) ? "TX" : "None");
2854
2855
2856 igc_check_downshift(hw);
2857 if (phy->speed_downgraded)
2858 netdev_warn(netdev, "Link Speed was downgraded by SmartSpeed\n");
2859
2860
2861 adapter->tx_timeout_factor = 1;
2862 switch (adapter->link_speed) {
2863 case SPEED_10:
2864 adapter->tx_timeout_factor = 14;
2865 break;
2866 case SPEED_100:
2867
2868 break;
2869 }
2870
2871 if (adapter->link_speed != SPEED_1000)
2872 goto no_wait;
2873
2874
2875retry_read_status:
2876 if (!igc_read_phy_reg(hw, PHY_1000T_STATUS,
2877 &phy_data)) {
2878 if (!(phy_data & SR_1000T_REMOTE_RX_STATUS) &&
2879 retry_count) {
2880 msleep(100);
2881 retry_count--;
2882 goto retry_read_status;
2883 } else if (!retry_count) {
2884 dev_err(&adapter->pdev->dev, "exceed max 2 second\n");
2885 }
2886 } else {
2887 dev_err(&adapter->pdev->dev, "read 1000Base-T Status Reg\n");
2888 }
2889no_wait:
2890 netif_carrier_on(netdev);
2891
2892
2893 if (!test_bit(__IGC_DOWN, &adapter->state))
2894 mod_timer(&adapter->phy_info_timer,
2895 round_jiffies(jiffies + 2 * HZ));
2896 }
2897 } else {
2898 if (netif_carrier_ok(netdev)) {
2899 adapter->link_speed = 0;
2900 adapter->link_duplex = 0;
2901
2902
2903 netdev_info(netdev, "igc: %s NIC Link is Down\n",
2904 netdev->name);
2905 netif_carrier_off(netdev);
2906
2907
2908 if (!test_bit(__IGC_DOWN, &adapter->state))
2909 mod_timer(&adapter->phy_info_timer,
2910 round_jiffies(jiffies + 2 * HZ));
2911
2912
2913 if (adapter->flags & IGC_FLAG_MAS_ENABLE) {
2914 if (adapter->flags & IGC_FLAG_MEDIA_RESET) {
2915 schedule_work(&adapter->reset_task);
2916
2917 return;
2918 }
2919 }
2920
2921
2922 } else if (!netif_carrier_ok(netdev) &&
2923 (adapter->flags & IGC_FLAG_MAS_ENABLE)) {
2924 if (adapter->flags & IGC_FLAG_MEDIA_RESET) {
2925 schedule_work(&adapter->reset_task);
2926
2927 return;
2928 }
2929 }
2930 }
2931
2932 spin_lock(&adapter->stats64_lock);
2933 igc_update_stats(adapter);
2934 spin_unlock(&adapter->stats64_lock);
2935
2936 for (i = 0; i < adapter->num_tx_queues; i++) {
2937 struct igc_ring *tx_ring = adapter->tx_ring[i];
2938
2939 if (!netif_carrier_ok(netdev)) {
2940
2941
2942
2943
2944
2945 if (igc_desc_unused(tx_ring) + 1 < tx_ring->count) {
2946 adapter->tx_timeout_count++;
2947 schedule_work(&adapter->reset_task);
2948
2949 return;
2950 }
2951 }
2952
2953
2954 set_bit(IGC_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags);
2955 }
2956
2957
2958 if (adapter->flags & IGC_FLAG_HAS_MSIX) {
2959 u32 eics = 0;
2960
2961 for (i = 0; i < adapter->num_q_vectors; i++)
2962 eics |= adapter->q_vector[i]->eims_value;
2963 wr32(IGC_EICS, eics);
2964 } else {
2965 wr32(IGC_ICS, IGC_ICS_RXDMT0);
2966 }
2967
2968
2969 if (!test_bit(__IGC_DOWN, &adapter->state)) {
2970 if (adapter->flags & IGC_FLAG_NEED_LINK_UPDATE)
2971 mod_timer(&adapter->watchdog_timer,
2972 round_jiffies(jiffies + HZ));
2973 else
2974 mod_timer(&adapter->watchdog_timer,
2975 round_jiffies(jiffies + 2 * HZ));
2976 }
2977}
2978
2979
2980
2981
2982
2983
2984
2985
2986
2987
2988
2989
2990
2991
2992
2993static void igc_update_ring_itr(struct igc_q_vector *q_vector)
2994{
2995 struct igc_adapter *adapter = q_vector->adapter;
2996 int new_val = q_vector->itr_val;
2997 int avg_wire_size = 0;
2998 unsigned int packets;
2999
3000
3001
3002
3003 switch (adapter->link_speed) {
3004 case SPEED_10:
3005 case SPEED_100:
3006 new_val = IGC_4K_ITR;
3007 goto set_itr_val;
3008 default:
3009 break;
3010 }
3011
3012 packets = q_vector->rx.total_packets;
3013 if (packets)
3014 avg_wire_size = q_vector->rx.total_bytes / packets;
3015
3016 packets = q_vector->tx.total_packets;
3017 if (packets)
3018 avg_wire_size = max_t(u32, avg_wire_size,
3019 q_vector->tx.total_bytes / packets);
3020
3021
3022 if (!avg_wire_size)
3023 goto clear_counts;
3024
3025
3026 avg_wire_size += 24;
3027
3028
3029 avg_wire_size = min(avg_wire_size, 3000);
3030
3031
3032 if (avg_wire_size > 300 && avg_wire_size < 1200)
3033 new_val = avg_wire_size / 3;
3034 else
3035 new_val = avg_wire_size / 2;
3036
3037
3038 if (new_val < IGC_20K_ITR &&
3039 ((q_vector->rx.ring && adapter->rx_itr_setting == 3) ||
3040 (!q_vector->rx.ring && adapter->tx_itr_setting == 3)))
3041 new_val = IGC_20K_ITR;
3042
3043set_itr_val:
3044 if (new_val != q_vector->itr_val) {
3045 q_vector->itr_val = new_val;
3046 q_vector->set_itr = 1;
3047 }
3048clear_counts:
3049 q_vector->rx.total_bytes = 0;
3050 q_vector->rx.total_packets = 0;
3051 q_vector->tx.total_bytes = 0;
3052 q_vector->tx.total_packets = 0;
3053}
3054
3055
3056
3057
3058
3059
3060
3061
3062
3063
3064
3065
3066
3067
3068
3069
3070static void igc_update_itr(struct igc_q_vector *q_vector,
3071 struct igc_ring_container *ring_container)
3072{
3073 unsigned int packets = ring_container->total_packets;
3074 unsigned int bytes = ring_container->total_bytes;
3075 u8 itrval = ring_container->itr;
3076
3077
3078 if (packets == 0)
3079 return;
3080
3081 switch (itrval) {
3082 case lowest_latency:
3083
3084 if (bytes / packets > 8000)
3085 itrval = bulk_latency;
3086 else if ((packets < 5) && (bytes > 512))
3087 itrval = low_latency;
3088 break;
3089 case low_latency:
3090 if (bytes > 10000) {
3091
3092 if (bytes / packets > 8000)
3093 itrval = bulk_latency;
3094 else if ((packets < 10) || ((bytes / packets) > 1200))
3095 itrval = bulk_latency;
3096 else if ((packets > 35))
3097 itrval = lowest_latency;
3098 } else if (bytes / packets > 2000) {
3099 itrval = bulk_latency;
3100 } else if (packets <= 2 && bytes < 512) {
3101 itrval = lowest_latency;
3102 }
3103 break;
3104 case bulk_latency:
3105 if (bytes > 25000) {
3106 if (packets > 35)
3107 itrval = low_latency;
3108 } else if (bytes < 1500) {
3109 itrval = low_latency;
3110 }
3111 break;
3112 }
3113
3114
3115 ring_container->total_bytes = 0;
3116 ring_container->total_packets = 0;
3117
3118
3119 ring_container->itr = itrval;
3120}
3121
3122
3123
3124
3125
3126
3127static irqreturn_t igc_intr_msi(int irq, void *data)
3128{
3129 struct igc_adapter *adapter = data;
3130 struct igc_q_vector *q_vector = adapter->q_vector[0];
3131 struct igc_hw *hw = &adapter->hw;
3132
3133 u32 icr = rd32(IGC_ICR);
3134
3135 igc_write_itr(q_vector);
3136
3137 if (icr & IGC_ICR_DRSTA)
3138 schedule_work(&adapter->reset_task);
3139
3140 if (icr & IGC_ICR_DOUTSYNC) {
3141
3142 adapter->stats.doosync++;
3143 }
3144
3145 if (icr & (IGC_ICR_RXSEQ | IGC_ICR_LSC)) {
3146 hw->mac.get_link_status = 1;
3147 if (!test_bit(__IGC_DOWN, &adapter->state))
3148 mod_timer(&adapter->watchdog_timer, jiffies + 1);
3149 }
3150
3151 napi_schedule(&q_vector->napi);
3152
3153 return IRQ_HANDLED;
3154}
3155
3156
3157
3158
3159
3160
3161static irqreturn_t igc_intr(int irq, void *data)
3162{
3163 struct igc_adapter *adapter = data;
3164 struct igc_q_vector *q_vector = adapter->q_vector[0];
3165 struct igc_hw *hw = &adapter->hw;
3166
3167
3168
3169 u32 icr = rd32(IGC_ICR);
3170
3171
3172
3173
3174 if (!(icr & IGC_ICR_INT_ASSERTED))
3175 return IRQ_NONE;
3176
3177 igc_write_itr(q_vector);
3178
3179 if (icr & IGC_ICR_DRSTA)
3180 schedule_work(&adapter->reset_task);
3181
3182 if (icr & IGC_ICR_DOUTSYNC) {
3183
3184 adapter->stats.doosync++;
3185 }
3186
3187 if (icr & (IGC_ICR_RXSEQ | IGC_ICR_LSC)) {
3188 hw->mac.get_link_status = 1;
3189
3190 if (!test_bit(__IGC_DOWN, &adapter->state))
3191 mod_timer(&adapter->watchdog_timer, jiffies + 1);
3192 }
3193
3194 napi_schedule(&q_vector->napi);
3195
3196 return IRQ_HANDLED;
3197}
3198
3199static void igc_set_itr(struct igc_q_vector *q_vector)
3200{
3201 struct igc_adapter *adapter = q_vector->adapter;
3202 u32 new_itr = q_vector->itr_val;
3203 u8 current_itr = 0;
3204
3205
3206 switch (adapter->link_speed) {
3207 case SPEED_10:
3208 case SPEED_100:
3209 current_itr = 0;
3210 new_itr = IGC_4K_ITR;
3211 goto set_itr_now;
3212 default:
3213 break;
3214 }
3215
3216 igc_update_itr(q_vector, &q_vector->tx);
3217 igc_update_itr(q_vector, &q_vector->rx);
3218
3219 current_itr = max(q_vector->rx.itr, q_vector->tx.itr);
3220
3221
3222 if (current_itr == lowest_latency &&
3223 ((q_vector->rx.ring && adapter->rx_itr_setting == 3) ||
3224 (!q_vector->rx.ring && adapter->tx_itr_setting == 3)))
3225 current_itr = low_latency;
3226
3227 switch (current_itr) {
3228
3229 case lowest_latency:
3230 new_itr = IGC_70K_ITR;
3231 break;
3232 case low_latency:
3233 new_itr = IGC_20K_ITR;
3234 break;
3235 case bulk_latency:
3236 new_itr = IGC_4K_ITR;
3237 break;
3238 default:
3239 break;
3240 }
3241
3242set_itr_now:
3243 if (new_itr != q_vector->itr_val) {
3244
3245
3246
3247
3248 new_itr = new_itr > q_vector->itr_val ?
3249 max((new_itr * q_vector->itr_val) /
3250 (new_itr + (q_vector->itr_val >> 2)),
3251 new_itr) : new_itr;
3252
3253
3254
3255
3256
3257
3258 q_vector->itr_val = new_itr;
3259 q_vector->set_itr = 1;
3260 }
3261}
3262
3263static void igc_ring_irq_enable(struct igc_q_vector *q_vector)
3264{
3265 struct igc_adapter *adapter = q_vector->adapter;
3266 struct igc_hw *hw = &adapter->hw;
3267
3268 if ((q_vector->rx.ring && (adapter->rx_itr_setting & 3)) ||
3269 (!q_vector->rx.ring && (adapter->tx_itr_setting & 3))) {
3270 if (adapter->num_q_vectors == 1)
3271 igc_set_itr(q_vector);
3272 else
3273 igc_update_ring_itr(q_vector);
3274 }
3275
3276 if (!test_bit(__IGC_DOWN, &adapter->state)) {
3277 if (adapter->msix_entries)
3278 wr32(IGC_EIMS, q_vector->eims_value);
3279 else
3280 igc_irq_enable(adapter);
3281 }
3282}
3283
3284
3285
3286
3287
3288
3289static int igc_poll(struct napi_struct *napi, int budget)
3290{
3291 struct igc_q_vector *q_vector = container_of(napi,
3292 struct igc_q_vector,
3293 napi);
3294 bool clean_complete = true;
3295 int work_done = 0;
3296
3297 if (q_vector->tx.ring)
3298 clean_complete = igc_clean_tx_irq(q_vector, budget);
3299
3300 if (q_vector->rx.ring) {
3301 int cleaned = igc_clean_rx_irq(q_vector, budget);
3302
3303 work_done += cleaned;
3304 if (cleaned >= budget)
3305 clean_complete = false;
3306 }
3307
3308
3309 if (!clean_complete)
3310 return budget;
3311
3312
3313
3314
3315 if (likely(napi_complete_done(napi, work_done)))
3316 igc_ring_irq_enable(q_vector);
3317
3318 return min(work_done, budget - 1);
3319}
3320
3321
3322
3323
3324
3325
3326
3327
3328static void igc_set_interrupt_capability(struct igc_adapter *adapter,
3329 bool msix)
3330{
3331 int numvecs, i;
3332 int err;
3333
3334 if (!msix)
3335 goto msi_only;
3336 adapter->flags |= IGC_FLAG_HAS_MSIX;
3337
3338
3339 adapter->num_rx_queues = adapter->rss_queues;
3340
3341 adapter->num_tx_queues = adapter->rss_queues;
3342
3343
3344 numvecs = adapter->num_rx_queues;
3345
3346
3347 if (!(adapter->flags & IGC_FLAG_QUEUE_PAIRS))
3348 numvecs += adapter->num_tx_queues;
3349
3350
3351 adapter->num_q_vectors = numvecs;
3352
3353
3354 numvecs++;
3355
3356 adapter->msix_entries = kcalloc(numvecs, sizeof(struct msix_entry),
3357 GFP_KERNEL);
3358
3359 if (!adapter->msix_entries)
3360 return;
3361
3362
3363 for (i = 0; i < numvecs; i++)
3364 adapter->msix_entries[i].entry = i;
3365
3366 err = pci_enable_msix_range(adapter->pdev,
3367 adapter->msix_entries,
3368 numvecs,
3369 numvecs);
3370 if (err > 0)
3371 return;
3372
3373 kfree(adapter->msix_entries);
3374 adapter->msix_entries = NULL;
3375
3376 igc_reset_interrupt_capability(adapter);
3377
3378msi_only:
3379 adapter->flags &= ~IGC_FLAG_HAS_MSIX;
3380
3381 adapter->rss_queues = 1;
3382 adapter->flags |= IGC_FLAG_QUEUE_PAIRS;
3383 adapter->num_rx_queues = 1;
3384 adapter->num_tx_queues = 1;
3385 adapter->num_q_vectors = 1;
3386 if (!pci_enable_msi(adapter->pdev))
3387 adapter->flags |= IGC_FLAG_HAS_MSI;
3388}
3389
3390static void igc_add_ring(struct igc_ring *ring,
3391 struct igc_ring_container *head)
3392{
3393 head->ring = ring;
3394 head->count++;
3395}
3396
3397
3398
3399
3400
3401
3402
3403
3404
3405
3406
3407
3408
3409static int igc_alloc_q_vector(struct igc_adapter *adapter,
3410 unsigned int v_count, unsigned int v_idx,
3411 unsigned int txr_count, unsigned int txr_idx,
3412 unsigned int rxr_count, unsigned int rxr_idx)
3413{
3414 struct igc_q_vector *q_vector;
3415 struct igc_ring *ring;
3416 int ring_count;
3417
3418
3419 if (txr_count > 1 || rxr_count > 1)
3420 return -ENOMEM;
3421
3422 ring_count = txr_count + rxr_count;
3423
3424
3425 q_vector = adapter->q_vector[v_idx];
3426 if (!q_vector)
3427 q_vector = kzalloc(struct_size(q_vector, ring, ring_count),
3428 GFP_KERNEL);
3429 else
3430 memset(q_vector, 0, struct_size(q_vector, ring, ring_count));
3431 if (!q_vector)
3432 return -ENOMEM;
3433
3434
3435 netif_napi_add(adapter->netdev, &q_vector->napi,
3436 igc_poll, 64);
3437
3438
3439 adapter->q_vector[v_idx] = q_vector;
3440 q_vector->adapter = adapter;
3441
3442
3443 q_vector->tx.work_limit = adapter->tx_work_limit;
3444
3445
3446 q_vector->itr_register = adapter->io_addr + IGC_EITR(0);
3447 q_vector->itr_val = IGC_START_ITR;
3448
3449
3450 ring = q_vector->ring;
3451
3452
3453 if (rxr_count) {
3454
3455 if (!adapter->rx_itr_setting || adapter->rx_itr_setting > 3)
3456 q_vector->itr_val = adapter->rx_itr_setting;
3457 } else {
3458
3459 if (!adapter->tx_itr_setting || adapter->tx_itr_setting > 3)
3460 q_vector->itr_val = adapter->tx_itr_setting;
3461 }
3462
3463 if (txr_count) {
3464
3465 ring->dev = &adapter->pdev->dev;
3466 ring->netdev = adapter->netdev;
3467
3468
3469 ring->q_vector = q_vector;
3470
3471
3472 igc_add_ring(ring, &q_vector->tx);
3473
3474
3475 ring->count = adapter->tx_ring_count;
3476 ring->queue_index = txr_idx;
3477
3478
3479 adapter->tx_ring[txr_idx] = ring;
3480
3481
3482 ring++;
3483 }
3484
3485 if (rxr_count) {
3486
3487 ring->dev = &adapter->pdev->dev;
3488 ring->netdev = adapter->netdev;
3489
3490
3491 ring->q_vector = q_vector;
3492
3493
3494 igc_add_ring(ring, &q_vector->rx);
3495
3496
3497 ring->count = adapter->rx_ring_count;
3498 ring->queue_index = rxr_idx;
3499
3500
3501 adapter->rx_ring[rxr_idx] = ring;
3502 }
3503
3504 return 0;
3505}
3506
3507
3508
3509
3510
3511
3512
3513
3514static int igc_alloc_q_vectors(struct igc_adapter *adapter)
3515{
3516 int rxr_remaining = adapter->num_rx_queues;
3517 int txr_remaining = adapter->num_tx_queues;
3518 int rxr_idx = 0, txr_idx = 0, v_idx = 0;
3519 int q_vectors = adapter->num_q_vectors;
3520 int err;
3521
3522 if (q_vectors >= (rxr_remaining + txr_remaining)) {
3523 for (; rxr_remaining; v_idx++) {
3524 err = igc_alloc_q_vector(adapter, q_vectors, v_idx,
3525 0, 0, 1, rxr_idx);
3526
3527 if (err)
3528 goto err_out;
3529
3530
3531 rxr_remaining--;
3532 rxr_idx++;
3533 }
3534 }
3535
3536 for (; v_idx < q_vectors; v_idx++) {
3537 int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - v_idx);
3538 int tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - v_idx);
3539
3540 err = igc_alloc_q_vector(adapter, q_vectors, v_idx,
3541 tqpv, txr_idx, rqpv, rxr_idx);
3542
3543 if (err)
3544 goto err_out;
3545
3546
3547 rxr_remaining -= rqpv;
3548 txr_remaining -= tqpv;
3549 rxr_idx++;
3550 txr_idx++;
3551 }
3552
3553 return 0;
3554
3555err_out:
3556 adapter->num_tx_queues = 0;
3557 adapter->num_rx_queues = 0;
3558 adapter->num_q_vectors = 0;
3559
3560 while (v_idx--)
3561 igc_free_q_vector(adapter, v_idx);
3562
3563 return -ENOMEM;
3564}
3565
3566
3567
3568
3569
3570
3571
3572
3573static void igc_cache_ring_register(struct igc_adapter *adapter)
3574{
3575 int i = 0, j = 0;
3576
3577 switch (adapter->hw.mac.type) {
3578 case igc_i225:
3579
3580 default:
3581 for (; i < adapter->num_rx_queues; i++)
3582 adapter->rx_ring[i]->reg_idx = i;
3583 for (; j < adapter->num_tx_queues; j++)
3584 adapter->tx_ring[j]->reg_idx = j;
3585 break;
3586 }
3587}
3588
3589
3590
3591
3592
3593
3594
3595static int igc_init_interrupt_scheme(struct igc_adapter *adapter, bool msix)
3596{
3597 struct pci_dev *pdev = adapter->pdev;
3598 int err = 0;
3599
3600 igc_set_interrupt_capability(adapter, msix);
3601
3602 err = igc_alloc_q_vectors(adapter);
3603 if (err) {
3604 dev_err(&pdev->dev, "Unable to allocate memory for vectors\n");
3605 goto err_alloc_q_vectors;
3606 }
3607
3608 igc_cache_ring_register(adapter);
3609
3610 return 0;
3611
3612err_alloc_q_vectors:
3613 igc_reset_interrupt_capability(adapter);
3614 return err;
3615}
3616
3617static void igc_free_irq(struct igc_adapter *adapter)
3618{
3619 if (adapter->msix_entries) {
3620 int vector = 0, i;
3621
3622 free_irq(adapter->msix_entries[vector++].vector, adapter);
3623
3624 for (i = 0; i < adapter->num_q_vectors; i++)
3625 free_irq(adapter->msix_entries[vector++].vector,
3626 adapter->q_vector[i]);
3627 } else {
3628 free_irq(adapter->pdev->irq, adapter);
3629 }
3630}
3631
3632
3633
3634
3635
3636static void igc_irq_disable(struct igc_adapter *adapter)
3637{
3638 struct igc_hw *hw = &adapter->hw;
3639
3640 if (adapter->msix_entries) {
3641 u32 regval = rd32(IGC_EIAM);
3642
3643 wr32(IGC_EIAM, regval & ~adapter->eims_enable_mask);
3644 wr32(IGC_EIMC, adapter->eims_enable_mask);
3645 regval = rd32(IGC_EIAC);
3646 wr32(IGC_EIAC, regval & ~adapter->eims_enable_mask);
3647 }
3648
3649 wr32(IGC_IAM, 0);
3650 wr32(IGC_IMC, ~0);
3651 wrfl();
3652
3653 if (adapter->msix_entries) {
3654 int vector = 0, i;
3655
3656 synchronize_irq(adapter->msix_entries[vector++].vector);
3657
3658 for (i = 0; i < adapter->num_q_vectors; i++)
3659 synchronize_irq(adapter->msix_entries[vector++].vector);
3660 } else {
3661 synchronize_irq(adapter->pdev->irq);
3662 }
3663}
3664
3665
3666
3667
3668
3669static void igc_irq_enable(struct igc_adapter *adapter)
3670{
3671 struct igc_hw *hw = &adapter->hw;
3672
3673 if (adapter->msix_entries) {
3674 u32 ims = IGC_IMS_LSC | IGC_IMS_DOUTSYNC | IGC_IMS_DRSTA;
3675 u32 regval = rd32(IGC_EIAC);
3676
3677 wr32(IGC_EIAC, regval | adapter->eims_enable_mask);
3678 regval = rd32(IGC_EIAM);
3679 wr32(IGC_EIAM, regval | adapter->eims_enable_mask);
3680 wr32(IGC_EIMS, adapter->eims_enable_mask);
3681 wr32(IGC_IMS, ims);
3682 } else {
3683 wr32(IGC_IMS, IMS_ENABLE_MASK | IGC_IMS_DRSTA);
3684 wr32(IGC_IAM, IMS_ENABLE_MASK | IGC_IMS_DRSTA);
3685 }
3686}
3687
3688
3689
3690
3691
3692
3693
3694
3695static int igc_request_irq(struct igc_adapter *adapter)
3696{
3697 struct net_device *netdev = adapter->netdev;
3698 struct pci_dev *pdev = adapter->pdev;
3699 int err = 0;
3700
3701 if (adapter->flags & IGC_FLAG_HAS_MSIX) {
3702 err = igc_request_msix(adapter);
3703 if (!err)
3704 goto request_done;
3705
3706 igc_free_all_tx_resources(adapter);
3707 igc_free_all_rx_resources(adapter);
3708
3709 igc_clear_interrupt_scheme(adapter);
3710 err = igc_init_interrupt_scheme(adapter, false);
3711 if (err)
3712 goto request_done;
3713 igc_setup_all_tx_resources(adapter);
3714 igc_setup_all_rx_resources(adapter);
3715 igc_configure(adapter);
3716 }
3717
3718 igc_assign_vector(adapter->q_vector[0], 0);
3719
3720 if (adapter->flags & IGC_FLAG_HAS_MSI) {
3721 err = request_irq(pdev->irq, &igc_intr_msi, 0,
3722 netdev->name, adapter);
3723 if (!err)
3724 goto request_done;
3725
3726
3727 igc_reset_interrupt_capability(adapter);
3728 adapter->flags &= ~IGC_FLAG_HAS_MSI;
3729 }
3730
3731 err = request_irq(pdev->irq, &igc_intr, IRQF_SHARED,
3732 netdev->name, adapter);
3733
3734 if (err)
3735 dev_err(&pdev->dev, "Error %d getting interrupt\n",
3736 err);
3737
3738request_done:
3739 return err;
3740}
3741
3742static void igc_write_itr(struct igc_q_vector *q_vector)
3743{
3744 u32 itr_val = q_vector->itr_val & IGC_QVECTOR_MASK;
3745
3746 if (!q_vector->set_itr)
3747 return;
3748
3749 if (!itr_val)
3750 itr_val = IGC_ITR_VAL_MASK;
3751
3752 itr_val |= IGC_EITR_CNT_IGNR;
3753
3754 writel(itr_val, q_vector->itr_register);
3755 q_vector->set_itr = 0;
3756}
3757
3758
3759
3760
3761
3762
3763
3764
3765
3766
3767
3768
3769
3770static int __igc_open(struct net_device *netdev, bool resuming)
3771{
3772 struct igc_adapter *adapter = netdev_priv(netdev);
3773 struct igc_hw *hw = &adapter->hw;
3774 int err = 0;
3775 int i = 0;
3776
3777
3778
3779 if (test_bit(__IGC_TESTING, &adapter->state)) {
3780 WARN_ON(resuming);
3781 return -EBUSY;
3782 }
3783
3784 netif_carrier_off(netdev);
3785
3786
3787 err = igc_setup_all_tx_resources(adapter);
3788 if (err)
3789 goto err_setup_tx;
3790
3791
3792 err = igc_setup_all_rx_resources(adapter);
3793 if (err)
3794 goto err_setup_rx;
3795
3796 igc_power_up_link(adapter);
3797
3798 igc_configure(adapter);
3799
3800 err = igc_request_irq(adapter);
3801 if (err)
3802 goto err_req_irq;
3803
3804
3805 err = netif_set_real_num_tx_queues(netdev, adapter->num_tx_queues);
3806 if (err)
3807 goto err_set_queues;
3808
3809 err = netif_set_real_num_rx_queues(netdev, adapter->num_rx_queues);
3810 if (err)
3811 goto err_set_queues;
3812
3813 clear_bit(__IGC_DOWN, &adapter->state);
3814
3815 for (i = 0; i < adapter->num_q_vectors; i++)
3816 napi_enable(&adapter->q_vector[i]->napi);
3817
3818
3819 rd32(IGC_ICR);
3820 igc_irq_enable(adapter);
3821
3822 netif_tx_start_all_queues(netdev);
3823
3824
3825 hw->mac.get_link_status = 1;
3826 schedule_work(&adapter->watchdog_task);
3827
3828 return IGC_SUCCESS;
3829
3830err_set_queues:
3831 igc_free_irq(adapter);
3832err_req_irq:
3833 igc_release_hw_control(adapter);
3834 igc_power_down_link(adapter);
3835 igc_free_all_rx_resources(adapter);
3836err_setup_rx:
3837 igc_free_all_tx_resources(adapter);
3838err_setup_tx:
3839 igc_reset(adapter);
3840
3841 return err;
3842}
3843
3844static int igc_open(struct net_device *netdev)
3845{
3846 return __igc_open(netdev, false);
3847}
3848
3849
3850
3851
3852
3853
3854
3855
3856
3857
3858
3859
3860static int __igc_close(struct net_device *netdev, bool suspending)
3861{
3862 struct igc_adapter *adapter = netdev_priv(netdev);
3863
3864 WARN_ON(test_bit(__IGC_RESETTING, &adapter->state));
3865
3866 igc_down(adapter);
3867
3868 igc_release_hw_control(adapter);
3869
3870 igc_free_irq(adapter);
3871
3872 igc_free_all_tx_resources(adapter);
3873 igc_free_all_rx_resources(adapter);
3874
3875 return 0;
3876}
3877
3878static int igc_close(struct net_device *netdev)
3879{
3880 if (netif_device_present(netdev) || netdev->dismantle)
3881 return __igc_close(netdev, false);
3882 return 0;
3883}
3884
3885static const struct net_device_ops igc_netdev_ops = {
3886 .ndo_open = igc_open,
3887 .ndo_stop = igc_close,
3888 .ndo_start_xmit = igc_xmit_frame,
3889 .ndo_set_mac_address = igc_set_mac,
3890 .ndo_change_mtu = igc_change_mtu,
3891 .ndo_get_stats = igc_get_stats,
3892 .ndo_fix_features = igc_fix_features,
3893 .ndo_set_features = igc_set_features,
3894 .ndo_features_check = igc_features_check,
3895};
3896
3897
3898void igc_read_pci_cfg(struct igc_hw *hw, u32 reg, u16 *value)
3899{
3900 struct igc_adapter *adapter = hw->back;
3901
3902 pci_read_config_word(adapter->pdev, reg, value);
3903}
3904
3905void igc_write_pci_cfg(struct igc_hw *hw, u32 reg, u16 *value)
3906{
3907 struct igc_adapter *adapter = hw->back;
3908
3909 pci_write_config_word(adapter->pdev, reg, *value);
3910}
3911
3912s32 igc_read_pcie_cap_reg(struct igc_hw *hw, u32 reg, u16 *value)
3913{
3914 struct igc_adapter *adapter = hw->back;
3915
3916 if (!pci_is_pcie(adapter->pdev))
3917 return -IGC_ERR_CONFIG;
3918
3919 pcie_capability_read_word(adapter->pdev, reg, value);
3920
3921 return IGC_SUCCESS;
3922}
3923
3924s32 igc_write_pcie_cap_reg(struct igc_hw *hw, u32 reg, u16 *value)
3925{
3926 struct igc_adapter *adapter = hw->back;
3927
3928 if (!pci_is_pcie(adapter->pdev))
3929 return -IGC_ERR_CONFIG;
3930
3931 pcie_capability_write_word(adapter->pdev, reg, *value);
3932
3933 return IGC_SUCCESS;
3934}
3935
3936u32 igc_rd32(struct igc_hw *hw, u32 reg)
3937{
3938 struct igc_adapter *igc = container_of(hw, struct igc_adapter, hw);
3939 u8 __iomem *hw_addr = READ_ONCE(hw->hw_addr);
3940 u32 value = 0;
3941
3942 if (IGC_REMOVED(hw_addr))
3943 return ~value;
3944
3945 value = readl(&hw_addr[reg]);
3946
3947
3948 if (!(~value) && (!reg || !(~readl(hw_addr)))) {
3949 struct net_device *netdev = igc->netdev;
3950
3951 hw->hw_addr = NULL;
3952 netif_device_detach(netdev);
3953 netdev_err(netdev, "PCIe link lost, device now detached\n");
3954 WARN(1, "igc: Failed to read reg 0x%x!\n", reg);
3955 }
3956
3957 return value;
3958}
3959
3960int igc_set_spd_dplx(struct igc_adapter *adapter, u32 spd, u8 dplx)
3961{
3962 struct pci_dev *pdev = adapter->pdev;
3963 struct igc_mac_info *mac = &adapter->hw.mac;
3964
3965 mac->autoneg = 0;
3966
3967
3968
3969
3970 if ((spd & 1) || (dplx & ~1))
3971 goto err_inval;
3972
3973 switch (spd + dplx) {
3974 case SPEED_10 + DUPLEX_HALF:
3975 mac->forced_speed_duplex = ADVERTISE_10_HALF;
3976 break;
3977 case SPEED_10 + DUPLEX_FULL:
3978 mac->forced_speed_duplex = ADVERTISE_10_FULL;
3979 break;
3980 case SPEED_100 + DUPLEX_HALF:
3981 mac->forced_speed_duplex = ADVERTISE_100_HALF;
3982 break;
3983 case SPEED_100 + DUPLEX_FULL:
3984 mac->forced_speed_duplex = ADVERTISE_100_FULL;
3985 break;
3986 case SPEED_1000 + DUPLEX_FULL:
3987 mac->autoneg = 1;
3988 adapter->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL;
3989 break;
3990 case SPEED_1000 + DUPLEX_HALF:
3991 goto err_inval;
3992 case SPEED_2500 + DUPLEX_FULL:
3993 mac->autoneg = 1;
3994 adapter->hw.phy.autoneg_advertised = ADVERTISE_2500_FULL;
3995 break;
3996 case SPEED_2500 + DUPLEX_HALF:
3997 default:
3998 goto err_inval;
3999 }
4000
4001
4002 adapter->hw.phy.mdix = AUTO_ALL_MODES;
4003
4004 return 0;
4005
4006err_inval:
4007 dev_err(&pdev->dev, "Unsupported Speed/Duplex configuration\n");
4008 return -EINVAL;
4009}
4010
4011
4012
4013
4014
4015
4016
4017
4018
4019
4020
4021
4022static int igc_probe(struct pci_dev *pdev,
4023 const struct pci_device_id *ent)
4024{
4025 struct igc_adapter *adapter;
4026 struct net_device *netdev;
4027 struct igc_hw *hw;
4028 const struct igc_info *ei = igc_info_tbl[ent->driver_data];
4029 int err;
4030
4031 err = pci_enable_device_mem(pdev);
4032 if (err)
4033 return err;
4034
4035 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
4036 if (!err) {
4037 err = dma_set_coherent_mask(&pdev->dev,
4038 DMA_BIT_MASK(64));
4039 } else {
4040 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
4041 if (err) {
4042 err = dma_set_coherent_mask(&pdev->dev,
4043 DMA_BIT_MASK(32));
4044 if (err) {
4045 dev_err(&pdev->dev, "igc: Wrong DMA config\n");
4046 goto err_dma;
4047 }
4048 }
4049 }
4050
4051 err = pci_request_selected_regions(pdev,
4052 pci_select_bars(pdev,
4053 IORESOURCE_MEM),
4054 igc_driver_name);
4055 if (err)
4056 goto err_pci_reg;
4057
4058 pci_enable_pcie_error_reporting(pdev);
4059
4060 pci_set_master(pdev);
4061
4062 err = -ENOMEM;
4063 netdev = alloc_etherdev_mq(sizeof(struct igc_adapter),
4064 IGC_MAX_TX_QUEUES);
4065
4066 if (!netdev)
4067 goto err_alloc_etherdev;
4068
4069 SET_NETDEV_DEV(netdev, &pdev->dev);
4070
4071 pci_set_drvdata(pdev, netdev);
4072 adapter = netdev_priv(netdev);
4073 adapter->netdev = netdev;
4074 adapter->pdev = pdev;
4075 hw = &adapter->hw;
4076 hw->back = adapter;
4077 adapter->port_num = hw->bus.func;
4078 adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
4079
4080 err = pci_save_state(pdev);
4081 if (err)
4082 goto err_ioremap;
4083
4084 err = -EIO;
4085 adapter->io_addr = ioremap(pci_resource_start(pdev, 0),
4086 pci_resource_len(pdev, 0));
4087 if (!adapter->io_addr)
4088 goto err_ioremap;
4089
4090
4091 hw->hw_addr = adapter->io_addr;
4092
4093 netdev->netdev_ops = &igc_netdev_ops;
4094 igc_set_ethtool_ops(netdev);
4095 netdev->watchdog_timeo = 5 * HZ;
4096
4097 netdev->mem_start = pci_resource_start(pdev, 0);
4098 netdev->mem_end = pci_resource_end(pdev, 0);
4099
4100
4101 hw->vendor_id = pdev->vendor;
4102 hw->device_id = pdev->device;
4103 hw->revision_id = pdev->revision;
4104 hw->subsystem_vendor_id = pdev->subsystem_vendor;
4105 hw->subsystem_device_id = pdev->subsystem_device;
4106
4107
4108 memcpy(&hw->mac.ops, ei->mac_ops, sizeof(hw->mac.ops));
4109 memcpy(&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops));
4110
4111
4112 err = ei->get_invariants(hw);
4113 if (err)
4114 goto err_sw_init;
4115
4116
4117 err = igc_sw_init(adapter);
4118 if (err)
4119 goto err_sw_init;
4120
4121
4122 netdev->hw_features |= NETIF_F_NTUPLE;
4123
4124
4125 netdev->min_mtu = ETH_MIN_MTU;
4126 netdev->max_mtu = MAX_STD_JUMBO_FRAME_SIZE;
4127
4128
4129
4130
4131 hw->mac.ops.reset_hw(hw);
4132
4133 if (eth_platform_get_mac_address(&pdev->dev, hw->mac.addr)) {
4134
4135 if (hw->mac.ops.read_mac_addr(hw))
4136 dev_err(&pdev->dev, "NVM Read Error\n");
4137 }
4138
4139 memcpy(netdev->dev_addr, hw->mac.addr, netdev->addr_len);
4140
4141 if (!is_valid_ether_addr(netdev->dev_addr)) {
4142 dev_err(&pdev->dev, "Invalid MAC Address\n");
4143 err = -EIO;
4144 goto err_eeprom;
4145 }
4146
4147
4148 wr32(IGC_RXPBS, I225_RXPBSIZE_DEFAULT);
4149 wr32(IGC_TXPBS, I225_TXPBSIZE_DEFAULT);
4150
4151 timer_setup(&adapter->watchdog_timer, igc_watchdog, 0);
4152 timer_setup(&adapter->phy_info_timer, igc_update_phy_info, 0);
4153
4154 INIT_WORK(&adapter->reset_task, igc_reset_task);
4155 INIT_WORK(&adapter->watchdog_task, igc_watchdog_task);
4156
4157
4158 adapter->fc_autoneg = true;
4159 hw->mac.autoneg = true;
4160 hw->phy.autoneg_advertised = 0xaf;
4161
4162 hw->fc.requested_mode = igc_fc_default;
4163 hw->fc.current_mode = igc_fc_default;
4164
4165
4166 igc_reset(adapter);
4167
4168
4169
4170
4171 igc_get_hw_control(adapter);
4172
4173 strncpy(netdev->name, "eth%d", IFNAMSIZ);
4174 err = register_netdev(netdev);
4175 if (err)
4176 goto err_register;
4177
4178
4179 netif_carrier_off(netdev);
4180
4181
4182 adapter->ei = *ei;
4183
4184
4185 pcie_print_link_status(pdev);
4186 netdev_info(netdev, "MAC: %pM\n", netdev->dev_addr);
4187
4188 return 0;
4189
4190err_register:
4191 igc_release_hw_control(adapter);
4192err_eeprom:
4193 if (!igc_check_reset_block(hw))
4194 igc_reset_phy(hw);
4195err_sw_init:
4196 igc_clear_interrupt_scheme(adapter);
4197 iounmap(adapter->io_addr);
4198err_ioremap:
4199 free_netdev(netdev);
4200err_alloc_etherdev:
4201 pci_release_selected_regions(pdev,
4202 pci_select_bars(pdev, IORESOURCE_MEM));
4203err_pci_reg:
4204err_dma:
4205 pci_disable_device(pdev);
4206 return err;
4207}
4208
4209
4210
4211
4212
4213
4214
4215
4216
4217
4218static void igc_remove(struct pci_dev *pdev)
4219{
4220 struct net_device *netdev = pci_get_drvdata(pdev);
4221 struct igc_adapter *adapter = netdev_priv(netdev);
4222
4223 set_bit(__IGC_DOWN, &adapter->state);
4224
4225 del_timer_sync(&adapter->watchdog_timer);
4226 del_timer_sync(&adapter->phy_info_timer);
4227
4228 cancel_work_sync(&adapter->reset_task);
4229 cancel_work_sync(&adapter->watchdog_task);
4230
4231
4232
4233
4234 igc_release_hw_control(adapter);
4235 unregister_netdev(netdev);
4236
4237 igc_clear_interrupt_scheme(adapter);
4238 pci_iounmap(pdev, adapter->io_addr);
4239 pci_release_mem_regions(pdev);
4240
4241 kfree(adapter->mac_table);
4242 kfree(adapter->shadow_vfta);
4243 free_netdev(netdev);
4244
4245 pci_disable_pcie_error_reporting(pdev);
4246
4247 pci_disable_device(pdev);
4248}
4249
4250static struct pci_driver igc_driver = {
4251 .name = igc_driver_name,
4252 .id_table = igc_pci_tbl,
4253 .probe = igc_probe,
4254 .remove = igc_remove,
4255};
4256
4257void igc_set_flag_queue_pairs(struct igc_adapter *adapter,
4258 const u32 max_rss_queues)
4259{
4260
4261
4262
4263
4264 if (adapter->rss_queues > (max_rss_queues / 2))
4265 adapter->flags |= IGC_FLAG_QUEUE_PAIRS;
4266 else
4267 adapter->flags &= ~IGC_FLAG_QUEUE_PAIRS;
4268}
4269
4270unsigned int igc_get_max_rss_queues(struct igc_adapter *adapter)
4271{
4272 unsigned int max_rss_queues;
4273
4274
4275 max_rss_queues = IGC_MAX_RX_QUEUES;
4276
4277 return max_rss_queues;
4278}
4279
4280static void igc_init_queue_configuration(struct igc_adapter *adapter)
4281{
4282 u32 max_rss_queues;
4283
4284 max_rss_queues = igc_get_max_rss_queues(adapter);
4285 adapter->rss_queues = min_t(u32, max_rss_queues, num_online_cpus());
4286
4287 igc_set_flag_queue_pairs(adapter, max_rss_queues);
4288}
4289
4290
4291
4292
4293
4294
4295
4296
4297
4298static int igc_sw_init(struct igc_adapter *adapter)
4299{
4300 struct net_device *netdev = adapter->netdev;
4301 struct pci_dev *pdev = adapter->pdev;
4302 struct igc_hw *hw = &adapter->hw;
4303
4304 int size = sizeof(struct igc_mac_addr) * hw->mac.rar_entry_count;
4305
4306 pci_read_config_word(pdev, PCI_COMMAND, &hw->bus.pci_cmd_word);
4307
4308
4309 adapter->tx_ring_count = IGC_DEFAULT_TXD;
4310 adapter->rx_ring_count = IGC_DEFAULT_RXD;
4311
4312
4313 adapter->rx_itr_setting = IGC_DEFAULT_ITR;
4314 adapter->tx_itr_setting = IGC_DEFAULT_ITR;
4315
4316
4317 adapter->tx_work_limit = IGC_DEFAULT_TX_WORK;
4318
4319
4320 adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN +
4321 VLAN_HLEN;
4322 adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
4323
4324 spin_lock_init(&adapter->nfc_lock);
4325 spin_lock_init(&adapter->stats64_lock);
4326
4327 adapter->flags |= IGC_FLAG_HAS_MSIX;
4328
4329 adapter->mac_table = kzalloc(size, GFP_ATOMIC);
4330 if (!adapter->mac_table)
4331 return -ENOMEM;
4332
4333 igc_init_queue_configuration(adapter);
4334
4335
4336 if (igc_init_interrupt_scheme(adapter, true)) {
4337 dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
4338 return -ENOMEM;
4339 }
4340
4341
4342 igc_irq_disable(adapter);
4343
4344 set_bit(__IGC_DOWN, &adapter->state);
4345
4346 return 0;
4347}
4348
4349
4350
4351
4352
4353int igc_reinit_queues(struct igc_adapter *adapter)
4354{
4355 struct net_device *netdev = adapter->netdev;
4356 struct pci_dev *pdev = adapter->pdev;
4357 int err = 0;
4358
4359 if (netif_running(netdev))
4360 igc_close(netdev);
4361
4362 igc_reset_interrupt_capability(adapter);
4363
4364 if (igc_init_interrupt_scheme(adapter, true)) {
4365 dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
4366 return -ENOMEM;
4367 }
4368
4369 if (netif_running(netdev))
4370 err = igc_open(netdev);
4371
4372 return err;
4373}
4374
4375
4376
4377
4378
4379
4380
4381struct net_device *igc_get_hw_dev(struct igc_hw *hw)
4382{
4383 struct igc_adapter *adapter = hw->back;
4384
4385 return adapter->netdev;
4386}
4387
4388
4389
4390
4391
4392
4393
4394static int __init igc_init_module(void)
4395{
4396 int ret;
4397
4398 pr_info("%s - version %s\n",
4399 igc_driver_string, igc_driver_version);
4400
4401 pr_info("%s\n", igc_copyright);
4402
4403 ret = pci_register_driver(&igc_driver);
4404 return ret;
4405}
4406
4407module_init(igc_init_module);
4408
4409
4410
4411
4412
4413
4414
4415static void __exit igc_exit_module(void)
4416{
4417 pci_unregister_driver(&igc_driver);
4418}
4419
4420module_exit(igc_exit_module);
4421
4422