1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29#include "e1000.h"
30#include <net/ip6_checksum.h>
31#include <linux/io.h>
32#include <linux/prefetch.h>
33#include <linux/bitops.h>
34#include <linux/if_vlan.h>
35
36char e1000_driver_name[] = "e1000";
37static char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver";
38#define DRV_VERSION "7.3.21-k8-NAPI"
39const char e1000_driver_version[] = DRV_VERSION;
40static const char e1000_copyright[] = "Copyright (c) 1999-2006 Intel Corporation.";
41
42
43
44
45
46
47
48
49static DEFINE_PCI_DEVICE_TABLE(e1000_pci_tbl) = {
50 INTEL_E1000_ETHERNET_DEVICE(0x1000),
51 INTEL_E1000_ETHERNET_DEVICE(0x1001),
52 INTEL_E1000_ETHERNET_DEVICE(0x1004),
53 INTEL_E1000_ETHERNET_DEVICE(0x1008),
54 INTEL_E1000_ETHERNET_DEVICE(0x1009),
55 INTEL_E1000_ETHERNET_DEVICE(0x100C),
56 INTEL_E1000_ETHERNET_DEVICE(0x100D),
57 INTEL_E1000_ETHERNET_DEVICE(0x100E),
58 INTEL_E1000_ETHERNET_DEVICE(0x100F),
59 INTEL_E1000_ETHERNET_DEVICE(0x1010),
60 INTEL_E1000_ETHERNET_DEVICE(0x1011),
61 INTEL_E1000_ETHERNET_DEVICE(0x1012),
62 INTEL_E1000_ETHERNET_DEVICE(0x1013),
63 INTEL_E1000_ETHERNET_DEVICE(0x1014),
64 INTEL_E1000_ETHERNET_DEVICE(0x1015),
65 INTEL_E1000_ETHERNET_DEVICE(0x1016),
66 INTEL_E1000_ETHERNET_DEVICE(0x1017),
67 INTEL_E1000_ETHERNET_DEVICE(0x1018),
68 INTEL_E1000_ETHERNET_DEVICE(0x1019),
69 INTEL_E1000_ETHERNET_DEVICE(0x101A),
70 INTEL_E1000_ETHERNET_DEVICE(0x101D),
71 INTEL_E1000_ETHERNET_DEVICE(0x101E),
72 INTEL_E1000_ETHERNET_DEVICE(0x1026),
73 INTEL_E1000_ETHERNET_DEVICE(0x1027),
74 INTEL_E1000_ETHERNET_DEVICE(0x1028),
75 INTEL_E1000_ETHERNET_DEVICE(0x1075),
76 INTEL_E1000_ETHERNET_DEVICE(0x1076),
77 INTEL_E1000_ETHERNET_DEVICE(0x1077),
78 INTEL_E1000_ETHERNET_DEVICE(0x1078),
79 INTEL_E1000_ETHERNET_DEVICE(0x1079),
80 INTEL_E1000_ETHERNET_DEVICE(0x107A),
81 INTEL_E1000_ETHERNET_DEVICE(0x107B),
82 INTEL_E1000_ETHERNET_DEVICE(0x107C),
83 INTEL_E1000_ETHERNET_DEVICE(0x108A),
84 INTEL_E1000_ETHERNET_DEVICE(0x1099),
85 INTEL_E1000_ETHERNET_DEVICE(0x10B5),
86 INTEL_E1000_ETHERNET_DEVICE(0x2E6E),
87
88 {0,}
89};
90
91MODULE_DEVICE_TABLE(pci, e1000_pci_tbl);
92
93int e1000_up(struct e1000_adapter *adapter);
94void e1000_down(struct e1000_adapter *adapter);
95void e1000_reinit_locked(struct e1000_adapter *adapter);
96void e1000_reset(struct e1000_adapter *adapter);
97int e1000_setup_all_tx_resources(struct e1000_adapter *adapter);
98int e1000_setup_all_rx_resources(struct e1000_adapter *adapter);
99void e1000_free_all_tx_resources(struct e1000_adapter *adapter);
100void e1000_free_all_rx_resources(struct e1000_adapter *adapter);
101static int e1000_setup_tx_resources(struct e1000_adapter *adapter,
102 struct e1000_tx_ring *txdr);
103static int e1000_setup_rx_resources(struct e1000_adapter *adapter,
104 struct e1000_rx_ring *rxdr);
105static void e1000_free_tx_resources(struct e1000_adapter *adapter,
106 struct e1000_tx_ring *tx_ring);
107static void e1000_free_rx_resources(struct e1000_adapter *adapter,
108 struct e1000_rx_ring *rx_ring);
109void e1000_update_stats(struct e1000_adapter *adapter);
110
111static int e1000_init_module(void);
112static void e1000_exit_module(void);
113static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
114static void e1000_remove(struct pci_dev *pdev);
115static int e1000_alloc_queues(struct e1000_adapter *adapter);
116static int e1000_sw_init(struct e1000_adapter *adapter);
117static int e1000_open(struct net_device *netdev);
118static int e1000_close(struct net_device *netdev);
119static void e1000_configure_tx(struct e1000_adapter *adapter);
120static void e1000_configure_rx(struct e1000_adapter *adapter);
121static void e1000_setup_rctl(struct e1000_adapter *adapter);
122static void e1000_clean_all_tx_rings(struct e1000_adapter *adapter);
123static void e1000_clean_all_rx_rings(struct e1000_adapter *adapter);
124static void e1000_clean_tx_ring(struct e1000_adapter *adapter,
125 struct e1000_tx_ring *tx_ring);
126static void e1000_clean_rx_ring(struct e1000_adapter *adapter,
127 struct e1000_rx_ring *rx_ring);
128static void e1000_set_rx_mode(struct net_device *netdev);
129static void e1000_update_phy_info_task(struct work_struct *work);
130static void e1000_watchdog(struct work_struct *work);
131static void e1000_82547_tx_fifo_stall_task(struct work_struct *work);
132static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
133 struct net_device *netdev);
134static struct net_device_stats * e1000_get_stats(struct net_device *netdev);
135static int e1000_change_mtu(struct net_device *netdev, int new_mtu);
136static int e1000_set_mac(struct net_device *netdev, void *p);
137static irqreturn_t e1000_intr(int irq, void *data);
138static bool e1000_clean_tx_irq(struct e1000_adapter *adapter,
139 struct e1000_tx_ring *tx_ring);
140static int e1000_clean(struct napi_struct *napi, int budget);
141static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
142 struct e1000_rx_ring *rx_ring,
143 int *work_done, int work_to_do);
144static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
145 struct e1000_rx_ring *rx_ring,
146 int *work_done, int work_to_do);
147static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
148 struct e1000_rx_ring *rx_ring,
149 int cleaned_count);
150static void e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter,
151 struct e1000_rx_ring *rx_ring,
152 int cleaned_count);
153static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd);
154static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
155 int cmd);
156static void e1000_enter_82542_rst(struct e1000_adapter *adapter);
157static void e1000_leave_82542_rst(struct e1000_adapter *adapter);
158static void e1000_tx_timeout(struct net_device *dev);
159static void e1000_reset_task(struct work_struct *work);
160static void e1000_smartspeed(struct e1000_adapter *adapter);
161static int e1000_82547_fifo_workaround(struct e1000_adapter *adapter,
162 struct sk_buff *skb);
163
164static bool e1000_vlan_used(struct e1000_adapter *adapter);
165static void e1000_vlan_mode(struct net_device *netdev,
166 netdev_features_t features);
167static void e1000_vlan_filter_on_off(struct e1000_adapter *adapter,
168 bool filter_on);
169static int e1000_vlan_rx_add_vid(struct net_device *netdev,
170 __be16 proto, u16 vid);
171static int e1000_vlan_rx_kill_vid(struct net_device *netdev,
172 __be16 proto, u16 vid);
173static void e1000_restore_vlan(struct e1000_adapter *adapter);
174
175#ifdef CONFIG_PM
176static int e1000_suspend(struct pci_dev *pdev, pm_message_t state);
177static int e1000_resume(struct pci_dev *pdev);
178#endif
179static void e1000_shutdown(struct pci_dev *pdev);
180
181#ifdef CONFIG_NET_POLL_CONTROLLER
182
183static void e1000_netpoll (struct net_device *netdev);
184#endif
185
186#define COPYBREAK_DEFAULT 256
187static unsigned int copybreak __read_mostly = COPYBREAK_DEFAULT;
188module_param(copybreak, uint, 0644);
189MODULE_PARM_DESC(copybreak,
190 "Maximum size of packet that is copied to a new buffer on receive");
191
192static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev,
193 pci_channel_state_t state);
194static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev);
195static void e1000_io_resume(struct pci_dev *pdev);
196
197static const struct pci_error_handlers e1000_err_handler = {
198 .error_detected = e1000_io_error_detected,
199 .slot_reset = e1000_io_slot_reset,
200 .resume = e1000_io_resume,
201};
202
203static struct pci_driver e1000_driver = {
204 .name = e1000_driver_name,
205 .id_table = e1000_pci_tbl,
206 .probe = e1000_probe,
207 .remove = e1000_remove,
208#ifdef CONFIG_PM
209
210 .suspend = e1000_suspend,
211 .resume = e1000_resume,
212#endif
213 .shutdown = e1000_shutdown,
214 .err_handler = &e1000_err_handler
215};
216
217MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
218MODULE_DESCRIPTION("Intel(R) PRO/1000 Network Driver");
219MODULE_LICENSE("GPL");
220MODULE_VERSION(DRV_VERSION);
221
222#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)
223static int debug = -1;
224module_param(debug, int, 0);
225MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
226
227
228
229
230
231
232struct net_device *e1000_get_hw_dev(struct e1000_hw *hw)
233{
234 struct e1000_adapter *adapter = hw->back;
235 return adapter->netdev;
236}
237
238
239
240
241
242
243
244static int __init e1000_init_module(void)
245{
246 int ret;
247 pr_info("%s - version %s\n", e1000_driver_string, e1000_driver_version);
248
249 pr_info("%s\n", e1000_copyright);
250
251 ret = pci_register_driver(&e1000_driver);
252 if (copybreak != COPYBREAK_DEFAULT) {
253 if (copybreak == 0)
254 pr_info("copybreak disabled\n");
255 else
256 pr_info("copybreak enabled for "
257 "packets <= %u bytes\n", copybreak);
258 }
259 return ret;
260}
261
262module_init(e1000_init_module);
263
264
265
266
267
268
269
270static void __exit e1000_exit_module(void)
271{
272 pci_unregister_driver(&e1000_driver);
273}
274
275module_exit(e1000_exit_module);
276
277static int e1000_request_irq(struct e1000_adapter *adapter)
278{
279 struct net_device *netdev = adapter->netdev;
280 irq_handler_t handler = e1000_intr;
281 int irq_flags = IRQF_SHARED;
282 int err;
283
284 err = request_irq(adapter->pdev->irq, handler, irq_flags, netdev->name,
285 netdev);
286 if (err) {
287 e_err(probe, "Unable to allocate interrupt Error: %d\n", err);
288 }
289
290 return err;
291}
292
293static void e1000_free_irq(struct e1000_adapter *adapter)
294{
295 struct net_device *netdev = adapter->netdev;
296
297 free_irq(adapter->pdev->irq, netdev);
298}
299
300
301
302
303
304static void e1000_irq_disable(struct e1000_adapter *adapter)
305{
306 struct e1000_hw *hw = &adapter->hw;
307
308 ew32(IMC, ~0);
309 E1000_WRITE_FLUSH();
310 synchronize_irq(adapter->pdev->irq);
311}
312
313
314
315
316
317static void e1000_irq_enable(struct e1000_adapter *adapter)
318{
319 struct e1000_hw *hw = &adapter->hw;
320
321 ew32(IMS, IMS_ENABLE_MASK);
322 E1000_WRITE_FLUSH();
323}
324
325static void e1000_update_mng_vlan(struct e1000_adapter *adapter)
326{
327 struct e1000_hw *hw = &adapter->hw;
328 struct net_device *netdev = adapter->netdev;
329 u16 vid = hw->mng_cookie.vlan_id;
330 u16 old_vid = adapter->mng_vlan_id;
331
332 if (!e1000_vlan_used(adapter))
333 return;
334
335 if (!test_bit(vid, adapter->active_vlans)) {
336 if (hw->mng_cookie.status &
337 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) {
338 e1000_vlan_rx_add_vid(netdev, htons(ETH_P_8021Q), vid);
339 adapter->mng_vlan_id = vid;
340 } else {
341 adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
342 }
343 if ((old_vid != (u16)E1000_MNG_VLAN_NONE) &&
344 (vid != old_vid) &&
345 !test_bit(old_vid, adapter->active_vlans))
346 e1000_vlan_rx_kill_vid(netdev, htons(ETH_P_8021Q),
347 old_vid);
348 } else {
349 adapter->mng_vlan_id = vid;
350 }
351}
352
353static void e1000_init_manageability(struct e1000_adapter *adapter)
354{
355 struct e1000_hw *hw = &adapter->hw;
356
357 if (adapter->en_mng_pt) {
358 u32 manc = er32(MANC);
359
360
361 manc &= ~(E1000_MANC_ARP_EN);
362
363 ew32(MANC, manc);
364 }
365}
366
367static void e1000_release_manageability(struct e1000_adapter *adapter)
368{
369 struct e1000_hw *hw = &adapter->hw;
370
371 if (adapter->en_mng_pt) {
372 u32 manc = er32(MANC);
373
374
375 manc |= E1000_MANC_ARP_EN;
376
377 ew32(MANC, manc);
378 }
379}
380
381
382
383
384
385static void e1000_configure(struct e1000_adapter *adapter)
386{
387 struct net_device *netdev = adapter->netdev;
388 int i;
389
390 e1000_set_rx_mode(netdev);
391
392 e1000_restore_vlan(adapter);
393 e1000_init_manageability(adapter);
394
395 e1000_configure_tx(adapter);
396 e1000_setup_rctl(adapter);
397 e1000_configure_rx(adapter);
398
399
400
401
402 for (i = 0; i < adapter->num_rx_queues; i++) {
403 struct e1000_rx_ring *ring = &adapter->rx_ring[i];
404 adapter->alloc_rx_buf(adapter, ring,
405 E1000_DESC_UNUSED(ring));
406 }
407}
408
409int e1000_up(struct e1000_adapter *adapter)
410{
411 struct e1000_hw *hw = &adapter->hw;
412
413
414 e1000_configure(adapter);
415
416 clear_bit(__E1000_DOWN, &adapter->flags);
417
418 napi_enable(&adapter->napi);
419
420 e1000_irq_enable(adapter);
421
422 netif_wake_queue(adapter->netdev);
423
424
425 ew32(ICS, E1000_ICS_LSC);
426 return 0;
427}
428
429
430
431
432
433
434
435
436
437void e1000_power_up_phy(struct e1000_adapter *adapter)
438{
439 struct e1000_hw *hw = &adapter->hw;
440 u16 mii_reg = 0;
441
442
443 if (hw->media_type == e1000_media_type_copper) {
444
445
446
447 e1000_read_phy_reg(hw, PHY_CTRL, &mii_reg);
448 mii_reg &= ~MII_CR_POWER_DOWN;
449 e1000_write_phy_reg(hw, PHY_CTRL, mii_reg);
450 }
451}
452
453static void e1000_power_down_phy(struct e1000_adapter *adapter)
454{
455 struct e1000_hw *hw = &adapter->hw;
456
457
458
459
460
461
462
463 if (!adapter->wol && hw->mac_type >= e1000_82540 &&
464 hw->media_type == e1000_media_type_copper) {
465 u16 mii_reg = 0;
466
467 switch (hw->mac_type) {
468 case e1000_82540:
469 case e1000_82545:
470 case e1000_82545_rev_3:
471 case e1000_82546:
472 case e1000_ce4100:
473 case e1000_82546_rev_3:
474 case e1000_82541:
475 case e1000_82541_rev_2:
476 case e1000_82547:
477 case e1000_82547_rev_2:
478 if (er32(MANC) & E1000_MANC_SMBUS_EN)
479 goto out;
480 break;
481 default:
482 goto out;
483 }
484 e1000_read_phy_reg(hw, PHY_CTRL, &mii_reg);
485 mii_reg |= MII_CR_POWER_DOWN;
486 e1000_write_phy_reg(hw, PHY_CTRL, mii_reg);
487 msleep(1);
488 }
489out:
490 return;
491}
492
493static void e1000_down_and_stop(struct e1000_adapter *adapter)
494{
495 set_bit(__E1000_DOWN, &adapter->flags);
496
497 cancel_delayed_work_sync(&adapter->watchdog_task);
498
499
500
501
502
503
504
505 cancel_delayed_work_sync(&adapter->phy_info_task);
506 cancel_delayed_work_sync(&adapter->fifo_stall_task);
507
508
509 if (!test_bit(__E1000_RESETTING, &adapter->flags))
510 cancel_work_sync(&adapter->reset_task);
511}
512
513void e1000_down(struct e1000_adapter *adapter)
514{
515 struct e1000_hw *hw = &adapter->hw;
516 struct net_device *netdev = adapter->netdev;
517 u32 rctl, tctl;
518
519
520
521 rctl = er32(RCTL);
522 ew32(RCTL, rctl & ~E1000_RCTL_EN);
523
524
525 netif_tx_disable(netdev);
526
527
528 tctl = er32(TCTL);
529 tctl &= ~E1000_TCTL_EN;
530 ew32(TCTL, tctl);
531
532 E1000_WRITE_FLUSH();
533 msleep(10);
534
535 napi_disable(&adapter->napi);
536
537 e1000_irq_disable(adapter);
538
539
540
541
542
543 e1000_down_and_stop(adapter);
544
545 adapter->link_speed = 0;
546 adapter->link_duplex = 0;
547 netif_carrier_off(netdev);
548
549 e1000_reset(adapter);
550 e1000_clean_all_tx_rings(adapter);
551 e1000_clean_all_rx_rings(adapter);
552}
553
554void e1000_reinit_locked(struct e1000_adapter *adapter)
555{
556 WARN_ON(in_interrupt());
557 while (test_and_set_bit(__E1000_RESETTING, &adapter->flags))
558 msleep(1);
559 e1000_down(adapter);
560 e1000_up(adapter);
561 clear_bit(__E1000_RESETTING, &adapter->flags);
562}
563
564void e1000_reset(struct e1000_adapter *adapter)
565{
566 struct e1000_hw *hw = &adapter->hw;
567 u32 pba = 0, tx_space, min_tx_space, min_rx_space;
568 bool legacy_pba_adjust = false;
569 u16 hwm;
570
571
572
573
574
575 switch (hw->mac_type) {
576 case e1000_82542_rev2_0:
577 case e1000_82542_rev2_1:
578 case e1000_82543:
579 case e1000_82544:
580 case e1000_82540:
581 case e1000_82541:
582 case e1000_82541_rev_2:
583 legacy_pba_adjust = true;
584 pba = E1000_PBA_48K;
585 break;
586 case e1000_82545:
587 case e1000_82545_rev_3:
588 case e1000_82546:
589 case e1000_ce4100:
590 case e1000_82546_rev_3:
591 pba = E1000_PBA_48K;
592 break;
593 case e1000_82547:
594 case e1000_82547_rev_2:
595 legacy_pba_adjust = true;
596 pba = E1000_PBA_30K;
597 break;
598 case e1000_undefined:
599 case e1000_num_macs:
600 break;
601 }
602
603 if (legacy_pba_adjust) {
604 if (hw->max_frame_size > E1000_RXBUFFER_8192)
605 pba -= 8;
606
607 if (hw->mac_type == e1000_82547) {
608 adapter->tx_fifo_head = 0;
609 adapter->tx_head_addr = pba << E1000_TX_HEAD_ADDR_SHIFT;
610 adapter->tx_fifo_size =
611 (E1000_PBA_40K - pba) << E1000_PBA_BYTES_SHIFT;
612 atomic_set(&adapter->tx_fifo_stall, 0);
613 }
614 } else if (hw->max_frame_size > ETH_FRAME_LEN + ETH_FCS_LEN) {
615
616 ew32(PBA, pba);
617
618
619
620
621
622
623
624
625 pba = er32(PBA);
626
627 tx_space = pba >> 16;
628
629 pba &= 0xffff;
630
631
632
633 min_tx_space = (hw->max_frame_size +
634 sizeof(struct e1000_tx_desc) -
635 ETH_FCS_LEN) * 2;
636 min_tx_space = ALIGN(min_tx_space, 1024);
637 min_tx_space >>= 10;
638
639 min_rx_space = hw->max_frame_size;
640 min_rx_space = ALIGN(min_rx_space, 1024);
641 min_rx_space >>= 10;
642
643
644
645
646
647 if (tx_space < min_tx_space &&
648 ((min_tx_space - tx_space) < pba)) {
649 pba = pba - (min_tx_space - tx_space);
650
651
652 switch (hw->mac_type) {
653 case e1000_82545 ... e1000_82546_rev_3:
654 pba &= ~(E1000_PBA_8K - 1);
655 break;
656 default:
657 break;
658 }
659
660
661
662
663 if (pba < min_rx_space)
664 pba = min_rx_space;
665 }
666 }
667
668 ew32(PBA, pba);
669
670
671
672
673
674
675
676
677
678
679 hwm = min(((pba << 10) * 9 / 10),
680 ((pba << 10) - hw->max_frame_size));
681
682 hw->fc_high_water = hwm & 0xFFF8;
683 hw->fc_low_water = hw->fc_high_water - 8;
684 hw->fc_pause_time = E1000_FC_PAUSE_TIME;
685 hw->fc_send_xon = 1;
686 hw->fc = hw->original_fc;
687
688
689 e1000_reset_hw(hw);
690 if (hw->mac_type >= e1000_82544)
691 ew32(WUC, 0);
692
693 if (e1000_init_hw(hw))
694 e_dev_err("Hardware Error\n");
695 e1000_update_mng_vlan(adapter);
696
697
698 if (hw->mac_type >= e1000_82544 &&
699 hw->autoneg == 1 &&
700 hw->autoneg_advertised == ADVERTISE_1000_FULL) {
701 u32 ctrl = er32(CTRL);
702
703
704
705
706 ctrl &= ~E1000_CTRL_SWDPIN3;
707 ew32(CTRL, ctrl);
708 }
709
710
711 ew32(VET, ETHERNET_IEEE_VLAN_TYPE);
712
713 e1000_reset_adaptive(hw);
714 e1000_phy_get_info(hw, &adapter->phy_info);
715
716 e1000_release_manageability(adapter);
717}
718
719
720static void e1000_dump_eeprom(struct e1000_adapter *adapter)
721{
722 struct net_device *netdev = adapter->netdev;
723 struct ethtool_eeprom eeprom;
724 const struct ethtool_ops *ops = netdev->ethtool_ops;
725 u8 *data;
726 int i;
727 u16 csum_old, csum_new = 0;
728
729 eeprom.len = ops->get_eeprom_len(netdev);
730 eeprom.offset = 0;
731
732 data = kmalloc(eeprom.len, GFP_KERNEL);
733 if (!data)
734 return;
735
736 ops->get_eeprom(netdev, &eeprom, data);
737
738 csum_old = (data[EEPROM_CHECKSUM_REG * 2]) +
739 (data[EEPROM_CHECKSUM_REG * 2 + 1] << 8);
740 for (i = 0; i < EEPROM_CHECKSUM_REG * 2; i += 2)
741 csum_new += data[i] + (data[i + 1] << 8);
742 csum_new = EEPROM_SUM - csum_new;
743
744 pr_err("/*********************/\n");
745 pr_err("Current EEPROM Checksum : 0x%04x\n", csum_old);
746 pr_err("Calculated : 0x%04x\n", csum_new);
747
748 pr_err("Offset Values\n");
749 pr_err("======== ======\n");
750 print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 16, 1, data, 128, 0);
751
752 pr_err("Include this output when contacting your support provider.\n");
753 pr_err("This is not a software error! Something bad happened to\n");
754 pr_err("your hardware or EEPROM image. Ignoring this problem could\n");
755 pr_err("result in further problems, possibly loss of data,\n");
756 pr_err("corruption or system hangs!\n");
757 pr_err("The MAC Address will be reset to 00:00:00:00:00:00,\n");
758 pr_err("which is invalid and requires you to set the proper MAC\n");
759 pr_err("address manually before continuing to enable this network\n");
760 pr_err("device. Please inspect the EEPROM dump and report the\n");
761 pr_err("issue to your hardware vendor or Intel Customer Support.\n");
762 pr_err("/*********************/\n");
763
764 kfree(data);
765}
766
767
768
769
770
771
772
773static int e1000_is_need_ioport(struct pci_dev *pdev)
774{
775 switch (pdev->device) {
776 case E1000_DEV_ID_82540EM:
777 case E1000_DEV_ID_82540EM_LOM:
778 case E1000_DEV_ID_82540EP:
779 case E1000_DEV_ID_82540EP_LOM:
780 case E1000_DEV_ID_82540EP_LP:
781 case E1000_DEV_ID_82541EI:
782 case E1000_DEV_ID_82541EI_MOBILE:
783 case E1000_DEV_ID_82541ER:
784 case E1000_DEV_ID_82541ER_LOM:
785 case E1000_DEV_ID_82541GI:
786 case E1000_DEV_ID_82541GI_LF:
787 case E1000_DEV_ID_82541GI_MOBILE:
788 case E1000_DEV_ID_82544EI_COPPER:
789 case E1000_DEV_ID_82544EI_FIBER:
790 case E1000_DEV_ID_82544GC_COPPER:
791 case E1000_DEV_ID_82544GC_LOM:
792 case E1000_DEV_ID_82545EM_COPPER:
793 case E1000_DEV_ID_82545EM_FIBER:
794 case E1000_DEV_ID_82546EB_COPPER:
795 case E1000_DEV_ID_82546EB_FIBER:
796 case E1000_DEV_ID_82546EB_QUAD_COPPER:
797 return true;
798 default:
799 return false;
800 }
801}
802
803static netdev_features_t e1000_fix_features(struct net_device *netdev,
804 netdev_features_t features)
805{
806
807
808
809 if (features & NETIF_F_HW_VLAN_CTAG_RX)
810 features |= NETIF_F_HW_VLAN_CTAG_TX;
811 else
812 features &= ~NETIF_F_HW_VLAN_CTAG_TX;
813
814 return features;
815}
816
817static int e1000_set_features(struct net_device *netdev,
818 netdev_features_t features)
819{
820 struct e1000_adapter *adapter = netdev_priv(netdev);
821 netdev_features_t changed = features ^ netdev->features;
822
823 if (changed & NETIF_F_HW_VLAN_CTAG_RX)
824 e1000_vlan_mode(netdev, features);
825
826 if (!(changed & (NETIF_F_RXCSUM | NETIF_F_RXALL)))
827 return 0;
828
829 netdev->features = features;
830 adapter->rx_csum = !!(features & NETIF_F_RXCSUM);
831
832 if (netif_running(netdev))
833 e1000_reinit_locked(adapter);
834 else
835 e1000_reset(adapter);
836
837 return 0;
838}
839
840static const struct net_device_ops e1000_netdev_ops = {
841 .ndo_open = e1000_open,
842 .ndo_stop = e1000_close,
843 .ndo_start_xmit = e1000_xmit_frame,
844 .ndo_get_stats = e1000_get_stats,
845 .ndo_set_rx_mode = e1000_set_rx_mode,
846 .ndo_set_mac_address = e1000_set_mac,
847 .ndo_tx_timeout = e1000_tx_timeout,
848 .ndo_change_mtu = e1000_change_mtu,
849 .ndo_do_ioctl = e1000_ioctl,
850 .ndo_validate_addr = eth_validate_addr,
851 .ndo_vlan_rx_add_vid = e1000_vlan_rx_add_vid,
852 .ndo_vlan_rx_kill_vid = e1000_vlan_rx_kill_vid,
853#ifdef CONFIG_NET_POLL_CONTROLLER
854 .ndo_poll_controller = e1000_netpoll,
855#endif
856 .ndo_fix_features = e1000_fix_features,
857 .ndo_set_features = e1000_set_features,
858};
859
860
861
862
863
864
865
866
867
868
869
870
871static int e1000_init_hw_struct(struct e1000_adapter *adapter,
872 struct e1000_hw *hw)
873{
874 struct pci_dev *pdev = adapter->pdev;
875
876
877 hw->vendor_id = pdev->vendor;
878 hw->device_id = pdev->device;
879 hw->subsystem_vendor_id = pdev->subsystem_vendor;
880 hw->subsystem_id = pdev->subsystem_device;
881 hw->revision_id = pdev->revision;
882
883 pci_read_config_word(pdev, PCI_COMMAND, &hw->pci_cmd_word);
884
885 hw->max_frame_size = adapter->netdev->mtu +
886 ENET_HEADER_SIZE + ETHERNET_FCS_SIZE;
887 hw->min_frame_size = MINIMUM_ETHERNET_FRAME_SIZE;
888
889
890 if (e1000_set_mac_type(hw)) {
891 e_err(probe, "Unknown MAC Type\n");
892 return -EIO;
893 }
894
895 switch (hw->mac_type) {
896 default:
897 break;
898 case e1000_82541:
899 case e1000_82547:
900 case e1000_82541_rev_2:
901 case e1000_82547_rev_2:
902 hw->phy_init_script = 1;
903 break;
904 }
905
906 e1000_set_media_type(hw);
907 e1000_get_bus_info(hw);
908
909 hw->wait_autoneg_complete = false;
910 hw->tbi_compatibility_en = true;
911 hw->adaptive_ifs = true;
912
913
914
915 if (hw->media_type == e1000_media_type_copper) {
916 hw->mdix = AUTO_ALL_MODES;
917 hw->disable_polarity_correction = false;
918 hw->master_slave = E1000_MASTER_SLAVE;
919 }
920
921 return 0;
922}
923
924
925
926
927
928
929
930
931
932
933
934
935static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
936{
937 struct net_device *netdev;
938 struct e1000_adapter *adapter;
939 struct e1000_hw *hw;
940
941 static int cards_found = 0;
942 static int global_quad_port_a = 0;
943 int i, err, pci_using_dac;
944 u16 eeprom_data = 0;
945 u16 tmp = 0;
946 u16 eeprom_apme_mask = E1000_EEPROM_APME;
947 int bars, need_ioport;
948
949
950 need_ioport = e1000_is_need_ioport(pdev);
951 if (need_ioport) {
952 bars = pci_select_bars(pdev, IORESOURCE_MEM | IORESOURCE_IO);
953 err = pci_enable_device(pdev);
954 } else {
955 bars = pci_select_bars(pdev, IORESOURCE_MEM);
956 err = pci_enable_device_mem(pdev);
957 }
958 if (err)
959 return err;
960
961 err = pci_request_selected_regions(pdev, bars, e1000_driver_name);
962 if (err)
963 goto err_pci_reg;
964
965 pci_set_master(pdev);
966 err = pci_save_state(pdev);
967 if (err)
968 goto err_alloc_etherdev;
969
970 err = -ENOMEM;
971 netdev = alloc_etherdev(sizeof(struct e1000_adapter));
972 if (!netdev)
973 goto err_alloc_etherdev;
974
975 SET_NETDEV_DEV(netdev, &pdev->dev);
976
977 pci_set_drvdata(pdev, netdev);
978 adapter = netdev_priv(netdev);
979 adapter->netdev = netdev;
980 adapter->pdev = pdev;
981 adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
982 adapter->bars = bars;
983 adapter->need_ioport = need_ioport;
984
985 hw = &adapter->hw;
986 hw->back = adapter;
987
988 err = -EIO;
989 hw->hw_addr = pci_ioremap_bar(pdev, BAR_0);
990 if (!hw->hw_addr)
991 goto err_ioremap;
992
993 if (adapter->need_ioport) {
994 for (i = BAR_1; i <= BAR_5; i++) {
995 if (pci_resource_len(pdev, i) == 0)
996 continue;
997 if (pci_resource_flags(pdev, i) & IORESOURCE_IO) {
998 hw->io_base = pci_resource_start(pdev, i);
999 break;
1000 }
1001 }
1002 }
1003
1004
1005 err = e1000_init_hw_struct(adapter, hw);
1006 if (err)
1007 goto err_sw_init;
1008
1009
1010
1011
1012
1013 pci_using_dac = 0;
1014 if ((hw->bus_type == e1000_bus_type_pcix) &&
1015 !dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) {
1016 pci_using_dac = 1;
1017 } else {
1018 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
1019 if (err) {
1020 pr_err("No usable DMA config, aborting\n");
1021 goto err_dma;
1022 }
1023 }
1024
1025 netdev->netdev_ops = &e1000_netdev_ops;
1026 e1000_set_ethtool_ops(netdev);
1027 netdev->watchdog_timeo = 5 * HZ;
1028 netif_napi_add(netdev, &adapter->napi, e1000_clean, 64);
1029
1030 strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
1031
1032 adapter->bd_number = cards_found;
1033
1034
1035
1036 err = e1000_sw_init(adapter);
1037 if (err)
1038 goto err_sw_init;
1039
1040 err = -EIO;
1041 if (hw->mac_type == e1000_ce4100) {
1042 hw->ce4100_gbe_mdio_base_virt =
1043 ioremap(pci_resource_start(pdev, BAR_1),
1044 pci_resource_len(pdev, BAR_1));
1045
1046 if (!hw->ce4100_gbe_mdio_base_virt)
1047 goto err_mdio_ioremap;
1048 }
1049
1050 if (hw->mac_type >= e1000_82543) {
1051 netdev->hw_features = NETIF_F_SG |
1052 NETIF_F_HW_CSUM |
1053 NETIF_F_HW_VLAN_CTAG_RX;
1054 netdev->features = NETIF_F_HW_VLAN_CTAG_TX |
1055 NETIF_F_HW_VLAN_CTAG_FILTER;
1056 }
1057
1058 if ((hw->mac_type >= e1000_82544) &&
1059 (hw->mac_type != e1000_82547))
1060 netdev->hw_features |= NETIF_F_TSO;
1061
1062 netdev->priv_flags |= IFF_SUPP_NOFCS;
1063
1064 netdev->features |= netdev->hw_features;
1065 netdev->hw_features |= (NETIF_F_RXCSUM |
1066 NETIF_F_RXALL |
1067 NETIF_F_RXFCS);
1068
1069 if (pci_using_dac) {
1070 netdev->features |= NETIF_F_HIGHDMA;
1071 netdev->vlan_features |= NETIF_F_HIGHDMA;
1072 }
1073
1074 netdev->vlan_features |= (NETIF_F_TSO |
1075 NETIF_F_HW_CSUM |
1076 NETIF_F_SG);
1077
1078 netdev->priv_flags |= IFF_UNICAST_FLT;
1079
1080 adapter->en_mng_pt = e1000_enable_mng_pass_thru(hw);
1081
1082
1083 if (e1000_init_eeprom_params(hw)) {
1084 e_err(probe, "EEPROM initialization failed\n");
1085 goto err_eeprom;
1086 }
1087
1088
1089
1090
1091
1092 e1000_reset_hw(hw);
1093
1094
1095 if (e1000_validate_eeprom_checksum(hw) < 0) {
1096 e_err(probe, "The EEPROM Checksum Is Not Valid\n");
1097 e1000_dump_eeprom(adapter);
1098
1099
1100
1101
1102
1103
1104
1105 memset(hw->mac_addr, 0, netdev->addr_len);
1106 } else {
1107
1108 if (e1000_read_mac_addr(hw))
1109 e_err(probe, "EEPROM Read Error\n");
1110 }
1111
1112 memcpy(netdev->dev_addr, hw->mac_addr, netdev->addr_len);
1113
1114 if (!is_valid_ether_addr(netdev->dev_addr))
1115 e_err(probe, "Invalid MAC Address\n");
1116
1117
1118 INIT_DELAYED_WORK(&adapter->watchdog_task, e1000_watchdog);
1119 INIT_DELAYED_WORK(&adapter->fifo_stall_task,
1120 e1000_82547_tx_fifo_stall_task);
1121 INIT_DELAYED_WORK(&adapter->phy_info_task, e1000_update_phy_info_task);
1122 INIT_WORK(&adapter->reset_task, e1000_reset_task);
1123
1124 e1000_check_options(adapter);
1125
1126
1127
1128
1129
1130
1131 switch (hw->mac_type) {
1132 case e1000_82542_rev2_0:
1133 case e1000_82542_rev2_1:
1134 case e1000_82543:
1135 break;
1136 case e1000_82544:
1137 e1000_read_eeprom(hw,
1138 EEPROM_INIT_CONTROL2_REG, 1, &eeprom_data);
1139 eeprom_apme_mask = E1000_EEPROM_82544_APM;
1140 break;
1141 case e1000_82546:
1142 case e1000_82546_rev_3:
1143 if (er32(STATUS) & E1000_STATUS_FUNC_1){
1144 e1000_read_eeprom(hw,
1145 EEPROM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
1146 break;
1147 }
1148
1149 default:
1150 e1000_read_eeprom(hw,
1151 EEPROM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
1152 break;
1153 }
1154 if (eeprom_data & eeprom_apme_mask)
1155 adapter->eeprom_wol |= E1000_WUFC_MAG;
1156
1157
1158
1159
1160
1161 switch (pdev->device) {
1162 case E1000_DEV_ID_82546GB_PCIE:
1163 adapter->eeprom_wol = 0;
1164 break;
1165 case E1000_DEV_ID_82546EB_FIBER:
1166 case E1000_DEV_ID_82546GB_FIBER:
1167
1168
1169
1170 if (er32(STATUS) & E1000_STATUS_FUNC_1)
1171 adapter->eeprom_wol = 0;
1172 break;
1173 case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3:
1174
1175 if (global_quad_port_a != 0)
1176 adapter->eeprom_wol = 0;
1177 else
1178 adapter->quad_port_a = true;
1179
1180 if (++global_quad_port_a == 4)
1181 global_quad_port_a = 0;
1182 break;
1183 }
1184
1185
1186 adapter->wol = adapter->eeprom_wol;
1187 device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
1188
1189
1190 if (hw->mac_type == e1000_ce4100) {
1191 for (i = 0; i < 32; i++) {
1192 hw->phy_addr = i;
1193 e1000_read_phy_reg(hw, PHY_ID2, &tmp);
1194 if (tmp == 0 || tmp == 0xFF) {
1195 if (i == 31)
1196 goto err_eeprom;
1197 continue;
1198 } else
1199 break;
1200 }
1201 }
1202
1203
1204 e1000_reset(adapter);
1205
1206 strcpy(netdev->name, "eth%d");
1207 err = register_netdev(netdev);
1208 if (err)
1209 goto err_register;
1210
1211 e1000_vlan_filter_on_off(adapter, false);
1212
1213
1214 e_info(probe, "(PCI%s:%dMHz:%d-bit) %pM\n",
1215 ((hw->bus_type == e1000_bus_type_pcix) ? "-X" : ""),
1216 ((hw->bus_speed == e1000_bus_speed_133) ? 133 :
1217 (hw->bus_speed == e1000_bus_speed_120) ? 120 :
1218 (hw->bus_speed == e1000_bus_speed_100) ? 100 :
1219 (hw->bus_speed == e1000_bus_speed_66) ? 66 : 33),
1220 ((hw->bus_width == e1000_bus_width_64) ? 64 : 32),
1221 netdev->dev_addr);
1222
1223
1224 netif_carrier_off(netdev);
1225
1226 e_info(probe, "Intel(R) PRO/1000 Network Connection\n");
1227
1228 cards_found++;
1229 return 0;
1230
1231err_register:
1232err_eeprom:
1233 e1000_phy_hw_reset(hw);
1234
1235 if (hw->flash_address)
1236 iounmap(hw->flash_address);
1237 kfree(adapter->tx_ring);
1238 kfree(adapter->rx_ring);
1239err_dma:
1240err_sw_init:
1241err_mdio_ioremap:
1242 iounmap(hw->ce4100_gbe_mdio_base_virt);
1243 iounmap(hw->hw_addr);
1244err_ioremap:
1245 free_netdev(netdev);
1246err_alloc_etherdev:
1247 pci_release_selected_regions(pdev, bars);
1248err_pci_reg:
1249 pci_disable_device(pdev);
1250 return err;
1251}
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262static void e1000_remove(struct pci_dev *pdev)
1263{
1264 struct net_device *netdev = pci_get_drvdata(pdev);
1265 struct e1000_adapter *adapter = netdev_priv(netdev);
1266 struct e1000_hw *hw = &adapter->hw;
1267
1268 e1000_down_and_stop(adapter);
1269 e1000_release_manageability(adapter);
1270
1271 unregister_netdev(netdev);
1272
1273 e1000_phy_hw_reset(hw);
1274
1275 kfree(adapter->tx_ring);
1276 kfree(adapter->rx_ring);
1277
1278 if (hw->mac_type == e1000_ce4100)
1279 iounmap(hw->ce4100_gbe_mdio_base_virt);
1280 iounmap(hw->hw_addr);
1281 if (hw->flash_address)
1282 iounmap(hw->flash_address);
1283 pci_release_selected_regions(pdev, adapter->bars);
1284
1285 free_netdev(netdev);
1286
1287 pci_disable_device(pdev);
1288}
1289
1290
1291
1292
1293
1294
1295
1296
1297static int e1000_sw_init(struct e1000_adapter *adapter)
1298{
1299 adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
1300
1301 adapter->num_tx_queues = 1;
1302 adapter->num_rx_queues = 1;
1303
1304 if (e1000_alloc_queues(adapter)) {
1305 e_err(probe, "Unable to allocate memory for queues\n");
1306 return -ENOMEM;
1307 }
1308
1309
1310 e1000_irq_disable(adapter);
1311
1312 spin_lock_init(&adapter->stats_lock);
1313
1314 set_bit(__E1000_DOWN, &adapter->flags);
1315
1316 return 0;
1317}
1318
1319
1320
1321
1322
1323
1324
1325
1326static int e1000_alloc_queues(struct e1000_adapter *adapter)
1327{
1328 adapter->tx_ring = kcalloc(adapter->num_tx_queues,
1329 sizeof(struct e1000_tx_ring), GFP_KERNEL);
1330 if (!adapter->tx_ring)
1331 return -ENOMEM;
1332
1333 adapter->rx_ring = kcalloc(adapter->num_rx_queues,
1334 sizeof(struct e1000_rx_ring), GFP_KERNEL);
1335 if (!adapter->rx_ring) {
1336 kfree(adapter->tx_ring);
1337 return -ENOMEM;
1338 }
1339
1340 return E1000_SUCCESS;
1341}
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355static int e1000_open(struct net_device *netdev)
1356{
1357 struct e1000_adapter *adapter = netdev_priv(netdev);
1358 struct e1000_hw *hw = &adapter->hw;
1359 int err;
1360
1361
1362 if (test_bit(__E1000_TESTING, &adapter->flags))
1363 return -EBUSY;
1364
1365 netif_carrier_off(netdev);
1366
1367
1368 err = e1000_setup_all_tx_resources(adapter);
1369 if (err)
1370 goto err_setup_tx;
1371
1372
1373 err = e1000_setup_all_rx_resources(adapter);
1374 if (err)
1375 goto err_setup_rx;
1376
1377 e1000_power_up_phy(adapter);
1378
1379 adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
1380 if ((hw->mng_cookie.status &
1381 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT)) {
1382 e1000_update_mng_vlan(adapter);
1383 }
1384
1385
1386
1387
1388
1389
1390 e1000_configure(adapter);
1391
1392 err = e1000_request_irq(adapter);
1393 if (err)
1394 goto err_req_irq;
1395
1396
1397 clear_bit(__E1000_DOWN, &adapter->flags);
1398
1399 napi_enable(&adapter->napi);
1400
1401 e1000_irq_enable(adapter);
1402
1403 netif_start_queue(netdev);
1404
1405
1406 ew32(ICS, E1000_ICS_LSC);
1407
1408 return E1000_SUCCESS;
1409
1410err_req_irq:
1411 e1000_power_down_phy(adapter);
1412 e1000_free_all_rx_resources(adapter);
1413err_setup_rx:
1414 e1000_free_all_tx_resources(adapter);
1415err_setup_tx:
1416 e1000_reset(adapter);
1417
1418 return err;
1419}
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432static int e1000_close(struct net_device *netdev)
1433{
1434 struct e1000_adapter *adapter = netdev_priv(netdev);
1435 struct e1000_hw *hw = &adapter->hw;
1436 int count = E1000_CHECK_RESET_COUNT;
1437
1438 while (test_bit(__E1000_RESETTING, &adapter->flags) && count--)
1439 usleep_range(10000, 20000);
1440
1441 WARN_ON(test_bit(__E1000_RESETTING, &adapter->flags));
1442 e1000_down(adapter);
1443 e1000_power_down_phy(adapter);
1444 e1000_free_irq(adapter);
1445
1446 e1000_free_all_tx_resources(adapter);
1447 e1000_free_all_rx_resources(adapter);
1448
1449
1450
1451
1452 if ((hw->mng_cookie.status &
1453 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) &&
1454 !test_bit(adapter->mng_vlan_id, adapter->active_vlans)) {
1455 e1000_vlan_rx_kill_vid(netdev, htons(ETH_P_8021Q),
1456 adapter->mng_vlan_id);
1457 }
1458
1459 return 0;
1460}
1461
1462
1463
1464
1465
1466
1467
1468static bool e1000_check_64k_bound(struct e1000_adapter *adapter, void *start,
1469 unsigned long len)
1470{
1471 struct e1000_hw *hw = &adapter->hw;
1472 unsigned long begin = (unsigned long)start;
1473 unsigned long end = begin + len;
1474
1475
1476
1477
1478 if (hw->mac_type == e1000_82545 ||
1479 hw->mac_type == e1000_ce4100 ||
1480 hw->mac_type == e1000_82546) {
1481 return ((begin ^ (end - 1)) >> 16) != 0 ? false : true;
1482 }
1483
1484 return true;
1485}
1486
1487
1488
1489
1490
1491
1492
1493
1494static int e1000_setup_tx_resources(struct e1000_adapter *adapter,
1495 struct e1000_tx_ring *txdr)
1496{
1497 struct pci_dev *pdev = adapter->pdev;
1498 int size;
1499
1500 size = sizeof(struct e1000_buffer) * txdr->count;
1501 txdr->buffer_info = vzalloc(size);
1502 if (!txdr->buffer_info)
1503 return -ENOMEM;
1504
1505
1506
1507 txdr->size = txdr->count * sizeof(struct e1000_tx_desc);
1508 txdr->size = ALIGN(txdr->size, 4096);
1509
1510 txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size, &txdr->dma,
1511 GFP_KERNEL);
1512 if (!txdr->desc) {
1513setup_tx_desc_die:
1514 vfree(txdr->buffer_info);
1515 return -ENOMEM;
1516 }
1517
1518
1519 if (!e1000_check_64k_bound(adapter, txdr->desc, txdr->size)) {
1520 void *olddesc = txdr->desc;
1521 dma_addr_t olddma = txdr->dma;
1522 e_err(tx_err, "txdr align check failed: %u bytes at %p\n",
1523 txdr->size, txdr->desc);
1524
1525 txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size,
1526 &txdr->dma, GFP_KERNEL);
1527
1528 if (!txdr->desc) {
1529 dma_free_coherent(&pdev->dev, txdr->size, olddesc,
1530 olddma);
1531 goto setup_tx_desc_die;
1532 }
1533
1534 if (!e1000_check_64k_bound(adapter, txdr->desc, txdr->size)) {
1535
1536 dma_free_coherent(&pdev->dev, txdr->size, txdr->desc,
1537 txdr->dma);
1538 dma_free_coherent(&pdev->dev, txdr->size, olddesc,
1539 olddma);
1540 e_err(probe, "Unable to allocate aligned memory "
1541 "for the transmit descriptor ring\n");
1542 vfree(txdr->buffer_info);
1543 return -ENOMEM;
1544 } else {
1545
1546 dma_free_coherent(&pdev->dev, txdr->size, olddesc,
1547 olddma);
1548 }
1549 }
1550 memset(txdr->desc, 0, txdr->size);
1551
1552 txdr->next_to_use = 0;
1553 txdr->next_to_clean = 0;
1554
1555 return 0;
1556}
1557
1558
1559
1560
1561
1562
1563
1564
1565int e1000_setup_all_tx_resources(struct e1000_adapter *adapter)
1566{
1567 int i, err = 0;
1568
1569 for (i = 0; i < adapter->num_tx_queues; i++) {
1570 err = e1000_setup_tx_resources(adapter, &adapter->tx_ring[i]);
1571 if (err) {
1572 e_err(probe, "Allocation for Tx Queue %u failed\n", i);
1573 for (i-- ; i >= 0; i--)
1574 e1000_free_tx_resources(adapter,
1575 &adapter->tx_ring[i]);
1576 break;
1577 }
1578 }
1579
1580 return err;
1581}
1582
1583
1584
1585
1586
1587
1588
1589static void e1000_configure_tx(struct e1000_adapter *adapter)
1590{
1591 u64 tdba;
1592 struct e1000_hw *hw = &adapter->hw;
1593 u32 tdlen, tctl, tipg;
1594 u32 ipgr1, ipgr2;
1595
1596
1597
1598 switch (adapter->num_tx_queues) {
1599 case 1:
1600 default:
1601 tdba = adapter->tx_ring[0].dma;
1602 tdlen = adapter->tx_ring[0].count *
1603 sizeof(struct e1000_tx_desc);
1604 ew32(TDLEN, tdlen);
1605 ew32(TDBAH, (tdba >> 32));
1606 ew32(TDBAL, (tdba & 0x00000000ffffffffULL));
1607 ew32(TDT, 0);
1608 ew32(TDH, 0);
1609 adapter->tx_ring[0].tdh = ((hw->mac_type >= e1000_82543) ?
1610 E1000_TDH : E1000_82542_TDH);
1611 adapter->tx_ring[0].tdt = ((hw->mac_type >= e1000_82543) ?
1612 E1000_TDT : E1000_82542_TDT);
1613 break;
1614 }
1615
1616
1617 if ((hw->media_type == e1000_media_type_fiber ||
1618 hw->media_type == e1000_media_type_internal_serdes))
1619 tipg = DEFAULT_82543_TIPG_IPGT_FIBER;
1620 else
1621 tipg = DEFAULT_82543_TIPG_IPGT_COPPER;
1622
1623 switch (hw->mac_type) {
1624 case e1000_82542_rev2_0:
1625 case e1000_82542_rev2_1:
1626 tipg = DEFAULT_82542_TIPG_IPGT;
1627 ipgr1 = DEFAULT_82542_TIPG_IPGR1;
1628 ipgr2 = DEFAULT_82542_TIPG_IPGR2;
1629 break;
1630 default:
1631 ipgr1 = DEFAULT_82543_TIPG_IPGR1;
1632 ipgr2 = DEFAULT_82543_TIPG_IPGR2;
1633 break;
1634 }
1635 tipg |= ipgr1 << E1000_TIPG_IPGR1_SHIFT;
1636 tipg |= ipgr2 << E1000_TIPG_IPGR2_SHIFT;
1637 ew32(TIPG, tipg);
1638
1639
1640
1641 ew32(TIDV, adapter->tx_int_delay);
1642 if (hw->mac_type >= e1000_82540)
1643 ew32(TADV, adapter->tx_abs_int_delay);
1644
1645
1646
1647 tctl = er32(TCTL);
1648 tctl &= ~E1000_TCTL_CT;
1649 tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC |
1650 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
1651
1652 e1000_config_collision_dist(hw);
1653
1654
1655 adapter->txd_cmd = E1000_TXD_CMD_EOP | E1000_TXD_CMD_IFCS;
1656
1657
1658 if (adapter->tx_int_delay)
1659 adapter->txd_cmd |= E1000_TXD_CMD_IDE;
1660
1661 if (hw->mac_type < e1000_82543)
1662 adapter->txd_cmd |= E1000_TXD_CMD_RPS;
1663 else
1664 adapter->txd_cmd |= E1000_TXD_CMD_RS;
1665
1666
1667
1668
1669 if (hw->mac_type == e1000_82544 &&
1670 hw->bus_type == e1000_bus_type_pcix)
1671 adapter->pcix_82544 = true;
1672
1673 ew32(TCTL, tctl);
1674
1675}
1676
1677
1678
1679
1680
1681
1682
1683
1684static int e1000_setup_rx_resources(struct e1000_adapter *adapter,
1685 struct e1000_rx_ring *rxdr)
1686{
1687 struct pci_dev *pdev = adapter->pdev;
1688 int size, desc_len;
1689
1690 size = sizeof(struct e1000_buffer) * rxdr->count;
1691 rxdr->buffer_info = vzalloc(size);
1692 if (!rxdr->buffer_info)
1693 return -ENOMEM;
1694
1695 desc_len = sizeof(struct e1000_rx_desc);
1696
1697
1698
1699 rxdr->size = rxdr->count * desc_len;
1700 rxdr->size = ALIGN(rxdr->size, 4096);
1701
1702 rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma,
1703 GFP_KERNEL);
1704 if (!rxdr->desc) {
1705setup_rx_desc_die:
1706 vfree(rxdr->buffer_info);
1707 return -ENOMEM;
1708 }
1709
1710
1711 if (!e1000_check_64k_bound(adapter, rxdr->desc, rxdr->size)) {
1712 void *olddesc = rxdr->desc;
1713 dma_addr_t olddma = rxdr->dma;
1714 e_err(rx_err, "rxdr align check failed: %u bytes at %p\n",
1715 rxdr->size, rxdr->desc);
1716
1717 rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size,
1718 &rxdr->dma, GFP_KERNEL);
1719
1720 if (!rxdr->desc) {
1721 dma_free_coherent(&pdev->dev, rxdr->size, olddesc,
1722 olddma);
1723 goto setup_rx_desc_die;
1724 }
1725
1726 if (!e1000_check_64k_bound(adapter, rxdr->desc, rxdr->size)) {
1727
1728 dma_free_coherent(&pdev->dev, rxdr->size, rxdr->desc,
1729 rxdr->dma);
1730 dma_free_coherent(&pdev->dev, rxdr->size, olddesc,
1731 olddma);
1732 e_err(probe, "Unable to allocate aligned memory for "
1733 "the Rx descriptor ring\n");
1734 goto setup_rx_desc_die;
1735 } else {
1736
1737 dma_free_coherent(&pdev->dev, rxdr->size, olddesc,
1738 olddma);
1739 }
1740 }
1741 memset(rxdr->desc, 0, rxdr->size);
1742
1743 rxdr->next_to_clean = 0;
1744 rxdr->next_to_use = 0;
1745 rxdr->rx_skb_top = NULL;
1746
1747 return 0;
1748}
1749
1750
1751
1752
1753
1754
1755
1756
1757int e1000_setup_all_rx_resources(struct e1000_adapter *adapter)
1758{
1759 int i, err = 0;
1760
1761 for (i = 0; i < adapter->num_rx_queues; i++) {
1762 err = e1000_setup_rx_resources(adapter, &adapter->rx_ring[i]);
1763 if (err) {
1764 e_err(probe, "Allocation for Rx Queue %u failed\n", i);
1765 for (i-- ; i >= 0; i--)
1766 e1000_free_rx_resources(adapter,
1767 &adapter->rx_ring[i]);
1768 break;
1769 }
1770 }
1771
1772 return err;
1773}
1774
1775
1776
1777
1778
1779static void e1000_setup_rctl(struct e1000_adapter *adapter)
1780{
1781 struct e1000_hw *hw = &adapter->hw;
1782 u32 rctl;
1783
1784 rctl = er32(RCTL);
1785
1786 rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
1787
1788 rctl |= E1000_RCTL_BAM | E1000_RCTL_LBM_NO |
1789 E1000_RCTL_RDMTS_HALF |
1790 (hw->mc_filter_type << E1000_RCTL_MO_SHIFT);
1791
1792 if (hw->tbi_compatibility_on == 1)
1793 rctl |= E1000_RCTL_SBP;
1794 else
1795 rctl &= ~E1000_RCTL_SBP;
1796
1797 if (adapter->netdev->mtu <= ETH_DATA_LEN)
1798 rctl &= ~E1000_RCTL_LPE;
1799 else
1800 rctl |= E1000_RCTL_LPE;
1801
1802
1803 rctl &= ~E1000_RCTL_SZ_4096;
1804 rctl |= E1000_RCTL_BSEX;
1805 switch (adapter->rx_buffer_len) {
1806 case E1000_RXBUFFER_2048:
1807 default:
1808 rctl |= E1000_RCTL_SZ_2048;
1809 rctl &= ~E1000_RCTL_BSEX;
1810 break;
1811 case E1000_RXBUFFER_4096:
1812 rctl |= E1000_RCTL_SZ_4096;
1813 break;
1814 case E1000_RXBUFFER_8192:
1815 rctl |= E1000_RCTL_SZ_8192;
1816 break;
1817 case E1000_RXBUFFER_16384:
1818 rctl |= E1000_RCTL_SZ_16384;
1819 break;
1820 }
1821
1822
1823 if (adapter->netdev->features & NETIF_F_RXALL) {
1824
1825
1826
1827 rctl |= (E1000_RCTL_SBP |
1828 E1000_RCTL_BAM |
1829 E1000_RCTL_PMCF);
1830
1831 rctl &= ~(E1000_RCTL_VFE |
1832 E1000_RCTL_DPF |
1833 E1000_RCTL_CFIEN);
1834
1835
1836
1837 }
1838
1839 ew32(RCTL, rctl);
1840}
1841
1842
1843
1844
1845
1846
1847
1848static void e1000_configure_rx(struct e1000_adapter *adapter)
1849{
1850 u64 rdba;
1851 struct e1000_hw *hw = &adapter->hw;
1852 u32 rdlen, rctl, rxcsum;
1853
1854 if (adapter->netdev->mtu > ETH_DATA_LEN) {
1855 rdlen = adapter->rx_ring[0].count *
1856 sizeof(struct e1000_rx_desc);
1857 adapter->clean_rx = e1000_clean_jumbo_rx_irq;
1858 adapter->alloc_rx_buf = e1000_alloc_jumbo_rx_buffers;
1859 } else {
1860 rdlen = adapter->rx_ring[0].count *
1861 sizeof(struct e1000_rx_desc);
1862 adapter->clean_rx = e1000_clean_rx_irq;
1863 adapter->alloc_rx_buf = e1000_alloc_rx_buffers;
1864 }
1865
1866
1867 rctl = er32(RCTL);
1868 ew32(RCTL, rctl & ~E1000_RCTL_EN);
1869
1870
1871 ew32(RDTR, adapter->rx_int_delay);
1872
1873 if (hw->mac_type >= e1000_82540) {
1874 ew32(RADV, adapter->rx_abs_int_delay);
1875 if (adapter->itr_setting != 0)
1876 ew32(ITR, 1000000000 / (adapter->itr * 256));
1877 }
1878
1879
1880
1881
1882 switch (adapter->num_rx_queues) {
1883 case 1:
1884 default:
1885 rdba = adapter->rx_ring[0].dma;
1886 ew32(RDLEN, rdlen);
1887 ew32(RDBAH, (rdba >> 32));
1888 ew32(RDBAL, (rdba & 0x00000000ffffffffULL));
1889 ew32(RDT, 0);
1890 ew32(RDH, 0);
1891 adapter->rx_ring[0].rdh = ((hw->mac_type >= e1000_82543) ?
1892 E1000_RDH : E1000_82542_RDH);
1893 adapter->rx_ring[0].rdt = ((hw->mac_type >= e1000_82543) ?
1894 E1000_RDT : E1000_82542_RDT);
1895 break;
1896 }
1897
1898
1899 if (hw->mac_type >= e1000_82543) {
1900 rxcsum = er32(RXCSUM);
1901 if (adapter->rx_csum)
1902 rxcsum |= E1000_RXCSUM_TUOFL;
1903 else
1904
1905 rxcsum &= ~E1000_RXCSUM_TUOFL;
1906 ew32(RXCSUM, rxcsum);
1907 }
1908
1909
1910 ew32(RCTL, rctl | E1000_RCTL_EN);
1911}
1912
1913
1914
1915
1916
1917
1918
1919
1920static void e1000_free_tx_resources(struct e1000_adapter *adapter,
1921 struct e1000_tx_ring *tx_ring)
1922{
1923 struct pci_dev *pdev = adapter->pdev;
1924
1925 e1000_clean_tx_ring(adapter, tx_ring);
1926
1927 vfree(tx_ring->buffer_info);
1928 tx_ring->buffer_info = NULL;
1929
1930 dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc,
1931 tx_ring->dma);
1932
1933 tx_ring->desc = NULL;
1934}
1935
1936
1937
1938
1939
1940
1941
1942void e1000_free_all_tx_resources(struct e1000_adapter *adapter)
1943{
1944 int i;
1945
1946 for (i = 0; i < adapter->num_tx_queues; i++)
1947 e1000_free_tx_resources(adapter, &adapter->tx_ring[i]);
1948}
1949
1950static void e1000_unmap_and_free_tx_resource(struct e1000_adapter *adapter,
1951 struct e1000_buffer *buffer_info)
1952{
1953 if (buffer_info->dma) {
1954 if (buffer_info->mapped_as_page)
1955 dma_unmap_page(&adapter->pdev->dev, buffer_info->dma,
1956 buffer_info->length, DMA_TO_DEVICE);
1957 else
1958 dma_unmap_single(&adapter->pdev->dev, buffer_info->dma,
1959 buffer_info->length,
1960 DMA_TO_DEVICE);
1961 buffer_info->dma = 0;
1962 }
1963 if (buffer_info->skb) {
1964 dev_kfree_skb_any(buffer_info->skb);
1965 buffer_info->skb = NULL;
1966 }
1967 buffer_info->time_stamp = 0;
1968
1969}
1970
1971
1972
1973
1974
1975
1976static void e1000_clean_tx_ring(struct e1000_adapter *adapter,
1977 struct e1000_tx_ring *tx_ring)
1978{
1979 struct e1000_hw *hw = &adapter->hw;
1980 struct e1000_buffer *buffer_info;
1981 unsigned long size;
1982 unsigned int i;
1983
1984
1985
1986 for (i = 0; i < tx_ring->count; i++) {
1987 buffer_info = &tx_ring->buffer_info[i];
1988 e1000_unmap_and_free_tx_resource(adapter, buffer_info);
1989 }
1990
1991 netdev_reset_queue(adapter->netdev);
1992 size = sizeof(struct e1000_buffer) * tx_ring->count;
1993 memset(tx_ring->buffer_info, 0, size);
1994
1995
1996
1997 memset(tx_ring->desc, 0, tx_ring->size);
1998
1999 tx_ring->next_to_use = 0;
2000 tx_ring->next_to_clean = 0;
2001 tx_ring->last_tx_tso = false;
2002
2003 writel(0, hw->hw_addr + tx_ring->tdh);
2004 writel(0, hw->hw_addr + tx_ring->tdt);
2005}
2006
2007
2008
2009
2010
2011static void e1000_clean_all_tx_rings(struct e1000_adapter *adapter)
2012{
2013 int i;
2014
2015 for (i = 0; i < adapter->num_tx_queues; i++)
2016 e1000_clean_tx_ring(adapter, &adapter->tx_ring[i]);
2017}
2018
2019
2020
2021
2022
2023
2024
2025
2026static void e1000_free_rx_resources(struct e1000_adapter *adapter,
2027 struct e1000_rx_ring *rx_ring)
2028{
2029 struct pci_dev *pdev = adapter->pdev;
2030
2031 e1000_clean_rx_ring(adapter, rx_ring);
2032
2033 vfree(rx_ring->buffer_info);
2034 rx_ring->buffer_info = NULL;
2035
2036 dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc,
2037 rx_ring->dma);
2038
2039 rx_ring->desc = NULL;
2040}
2041
2042
2043
2044
2045
2046
2047
2048void e1000_free_all_rx_resources(struct e1000_adapter *adapter)
2049{
2050 int i;
2051
2052 for (i = 0; i < adapter->num_rx_queues; i++)
2053 e1000_free_rx_resources(adapter, &adapter->rx_ring[i]);
2054}
2055
2056
2057
2058
2059
2060
2061static void e1000_clean_rx_ring(struct e1000_adapter *adapter,
2062 struct e1000_rx_ring *rx_ring)
2063{
2064 struct e1000_hw *hw = &adapter->hw;
2065 struct e1000_buffer *buffer_info;
2066 struct pci_dev *pdev = adapter->pdev;
2067 unsigned long size;
2068 unsigned int i;
2069
2070
2071 for (i = 0; i < rx_ring->count; i++) {
2072 buffer_info = &rx_ring->buffer_info[i];
2073 if (buffer_info->dma &&
2074 adapter->clean_rx == e1000_clean_rx_irq) {
2075 dma_unmap_single(&pdev->dev, buffer_info->dma,
2076 buffer_info->length,
2077 DMA_FROM_DEVICE);
2078 } else if (buffer_info->dma &&
2079 adapter->clean_rx == e1000_clean_jumbo_rx_irq) {
2080 dma_unmap_page(&pdev->dev, buffer_info->dma,
2081 buffer_info->length,
2082 DMA_FROM_DEVICE);
2083 }
2084
2085 buffer_info->dma = 0;
2086 if (buffer_info->page) {
2087 put_page(buffer_info->page);
2088 buffer_info->page = NULL;
2089 }
2090 if (buffer_info->skb) {
2091 dev_kfree_skb(buffer_info->skb);
2092 buffer_info->skb = NULL;
2093 }
2094 }
2095
2096
2097 if (rx_ring->rx_skb_top) {
2098 dev_kfree_skb(rx_ring->rx_skb_top);
2099 rx_ring->rx_skb_top = NULL;
2100 }
2101
2102 size = sizeof(struct e1000_buffer) * rx_ring->count;
2103 memset(rx_ring->buffer_info, 0, size);
2104
2105
2106 memset(rx_ring->desc, 0, rx_ring->size);
2107
2108 rx_ring->next_to_clean = 0;
2109 rx_ring->next_to_use = 0;
2110
2111 writel(0, hw->hw_addr + rx_ring->rdh);
2112 writel(0, hw->hw_addr + rx_ring->rdt);
2113}
2114
2115
2116
2117
2118
2119static void e1000_clean_all_rx_rings(struct e1000_adapter *adapter)
2120{
2121 int i;
2122
2123 for (i = 0; i < adapter->num_rx_queues; i++)
2124 e1000_clean_rx_ring(adapter, &adapter->rx_ring[i]);
2125}
2126
2127
2128
2129
2130static void e1000_enter_82542_rst(struct e1000_adapter *adapter)
2131{
2132 struct e1000_hw *hw = &adapter->hw;
2133 struct net_device *netdev = adapter->netdev;
2134 u32 rctl;
2135
2136 e1000_pci_clear_mwi(hw);
2137
2138 rctl = er32(RCTL);
2139 rctl |= E1000_RCTL_RST;
2140 ew32(RCTL, rctl);
2141 E1000_WRITE_FLUSH();
2142 mdelay(5);
2143
2144 if (netif_running(netdev))
2145 e1000_clean_all_rx_rings(adapter);
2146}
2147
2148static void e1000_leave_82542_rst(struct e1000_adapter *adapter)
2149{
2150 struct e1000_hw *hw = &adapter->hw;
2151 struct net_device *netdev = adapter->netdev;
2152 u32 rctl;
2153
2154 rctl = er32(RCTL);
2155 rctl &= ~E1000_RCTL_RST;
2156 ew32(RCTL, rctl);
2157 E1000_WRITE_FLUSH();
2158 mdelay(5);
2159
2160 if (hw->pci_cmd_word & PCI_COMMAND_INVALIDATE)
2161 e1000_pci_set_mwi(hw);
2162
2163 if (netif_running(netdev)) {
2164
2165 struct e1000_rx_ring *ring = &adapter->rx_ring[0];
2166 e1000_configure_rx(adapter);
2167 adapter->alloc_rx_buf(adapter, ring, E1000_DESC_UNUSED(ring));
2168 }
2169}
2170
2171
2172
2173
2174
2175
2176
2177
2178static int e1000_set_mac(struct net_device *netdev, void *p)
2179{
2180 struct e1000_adapter *adapter = netdev_priv(netdev);
2181 struct e1000_hw *hw = &adapter->hw;
2182 struct sockaddr *addr = p;
2183
2184 if (!is_valid_ether_addr(addr->sa_data))
2185 return -EADDRNOTAVAIL;
2186
2187
2188
2189 if (hw->mac_type == e1000_82542_rev2_0)
2190 e1000_enter_82542_rst(adapter);
2191
2192 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
2193 memcpy(hw->mac_addr, addr->sa_data, netdev->addr_len);
2194
2195 e1000_rar_set(hw, hw->mac_addr, 0);
2196
2197 if (hw->mac_type == e1000_82542_rev2_0)
2198 e1000_leave_82542_rst(adapter);
2199
2200 return 0;
2201}
2202
2203
2204
2205
2206
2207
2208
2209
2210
2211
2212static void e1000_set_rx_mode(struct net_device *netdev)
2213{
2214 struct e1000_adapter *adapter = netdev_priv(netdev);
2215 struct e1000_hw *hw = &adapter->hw;
2216 struct netdev_hw_addr *ha;
2217 bool use_uc = false;
2218 u32 rctl;
2219 u32 hash_value;
2220 int i, rar_entries = E1000_RAR_ENTRIES;
2221 int mta_reg_count = E1000_NUM_MTA_REGISTERS;
2222 u32 *mcarray = kcalloc(mta_reg_count, sizeof(u32), GFP_ATOMIC);
2223
2224 if (!mcarray)
2225 return;
2226
2227
2228
2229 rctl = er32(RCTL);
2230
2231 if (netdev->flags & IFF_PROMISC) {
2232 rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
2233 rctl &= ~E1000_RCTL_VFE;
2234 } else {
2235 if (netdev->flags & IFF_ALLMULTI)
2236 rctl |= E1000_RCTL_MPE;
2237 else
2238 rctl &= ~E1000_RCTL_MPE;
2239
2240 if (e1000_vlan_used(adapter))
2241 rctl |= E1000_RCTL_VFE;
2242 }
2243
2244 if (netdev_uc_count(netdev) > rar_entries - 1) {
2245 rctl |= E1000_RCTL_UPE;
2246 } else if (!(netdev->flags & IFF_PROMISC)) {
2247 rctl &= ~E1000_RCTL_UPE;
2248 use_uc = true;
2249 }
2250
2251 ew32(RCTL, rctl);
2252
2253
2254
2255 if (hw->mac_type == e1000_82542_rev2_0)
2256 e1000_enter_82542_rst(adapter);
2257
2258
2259
2260
2261
2262
2263
2264
2265 i = 1;
2266 if (use_uc)
2267 netdev_for_each_uc_addr(ha, netdev) {
2268 if (i == rar_entries)
2269 break;
2270 e1000_rar_set(hw, ha->addr, i++);
2271 }
2272
2273 netdev_for_each_mc_addr(ha, netdev) {
2274 if (i == rar_entries) {
2275
2276 u32 hash_reg, hash_bit, mta;
2277 hash_value = e1000_hash_mc_addr(hw, ha->addr);
2278 hash_reg = (hash_value >> 5) & 0x7F;
2279 hash_bit = hash_value & 0x1F;
2280 mta = (1 << hash_bit);
2281 mcarray[hash_reg] |= mta;
2282 } else {
2283 e1000_rar_set(hw, ha->addr, i++);
2284 }
2285 }
2286
2287 for (; i < rar_entries; i++) {
2288 E1000_WRITE_REG_ARRAY(hw, RA, i << 1, 0);
2289 E1000_WRITE_FLUSH();
2290 E1000_WRITE_REG_ARRAY(hw, RA, (i << 1) + 1, 0);
2291 E1000_WRITE_FLUSH();
2292 }
2293
2294
2295
2296
2297 for (i = mta_reg_count - 1; i >= 0 ; i--) {
2298
2299
2300
2301
2302
2303 E1000_WRITE_REG_ARRAY(hw, MTA, i, mcarray[i]);
2304 }
2305 E1000_WRITE_FLUSH();
2306
2307 if (hw->mac_type == e1000_82542_rev2_0)
2308 e1000_leave_82542_rst(adapter);
2309
2310 kfree(mcarray);
2311}
2312
2313
2314
2315
2316
2317
2318
2319
2320static void e1000_update_phy_info_task(struct work_struct *work)
2321{
2322 struct e1000_adapter *adapter = container_of(work,
2323 struct e1000_adapter,
2324 phy_info_task.work);
2325
2326 e1000_phy_get_info(&adapter->hw, &adapter->phy_info);
2327}
2328
2329
2330
2331
2332
2333static void e1000_82547_tx_fifo_stall_task(struct work_struct *work)
2334{
2335 struct e1000_adapter *adapter = container_of(work,
2336 struct e1000_adapter,
2337 fifo_stall_task.work);
2338 struct e1000_hw *hw = &adapter->hw;
2339 struct net_device *netdev = adapter->netdev;
2340 u32 tctl;
2341
2342 if (atomic_read(&adapter->tx_fifo_stall)) {
2343 if ((er32(TDT) == er32(TDH)) &&
2344 (er32(TDFT) == er32(TDFH)) &&
2345 (er32(TDFTS) == er32(TDFHS))) {
2346 tctl = er32(TCTL);
2347 ew32(TCTL, tctl & ~E1000_TCTL_EN);
2348 ew32(TDFT, adapter->tx_head_addr);
2349 ew32(TDFH, adapter->tx_head_addr);
2350 ew32(TDFTS, adapter->tx_head_addr);
2351 ew32(TDFHS, adapter->tx_head_addr);
2352 ew32(TCTL, tctl);
2353 E1000_WRITE_FLUSH();
2354
2355 adapter->tx_fifo_head = 0;
2356 atomic_set(&adapter->tx_fifo_stall, 0);
2357 netif_wake_queue(netdev);
2358 } else if (!test_bit(__E1000_DOWN, &adapter->flags)) {
2359 schedule_delayed_work(&adapter->fifo_stall_task, 1);
2360 }
2361 }
2362}
2363
2364bool e1000_has_link(struct e1000_adapter *adapter)
2365{
2366 struct e1000_hw *hw = &adapter->hw;
2367 bool link_active = false;
2368
2369
2370
2371
2372
2373
2374
2375 switch (hw->media_type) {
2376 case e1000_media_type_copper:
2377 if (hw->mac_type == e1000_ce4100)
2378 hw->get_link_status = 1;
2379 if (hw->get_link_status) {
2380 e1000_check_for_link(hw);
2381 link_active = !hw->get_link_status;
2382 } else {
2383 link_active = true;
2384 }
2385 break;
2386 case e1000_media_type_fiber:
2387 e1000_check_for_link(hw);
2388 link_active = !!(er32(STATUS) & E1000_STATUS_LU);
2389 break;
2390 case e1000_media_type_internal_serdes:
2391 e1000_check_for_link(hw);
2392 link_active = hw->serdes_has_link;
2393 break;
2394 default:
2395 break;
2396 }
2397
2398 return link_active;
2399}
2400
2401
2402
2403
2404
2405static void e1000_watchdog(struct work_struct *work)
2406{
2407 struct e1000_adapter *adapter = container_of(work,
2408 struct e1000_adapter,
2409 watchdog_task.work);
2410 struct e1000_hw *hw = &adapter->hw;
2411 struct net_device *netdev = adapter->netdev;
2412 struct e1000_tx_ring *txdr = adapter->tx_ring;
2413 u32 link, tctl;
2414
2415 link = e1000_has_link(adapter);
2416 if ((netif_carrier_ok(netdev)) && link)
2417 goto link_up;
2418
2419 if (link) {
2420 if (!netif_carrier_ok(netdev)) {
2421 u32 ctrl;
2422 bool txb2b = true;
2423
2424 e1000_get_speed_and_duplex(hw,
2425 &adapter->link_speed,
2426 &adapter->link_duplex);
2427
2428 ctrl = er32(CTRL);
2429 pr_info("%s NIC Link is Up %d Mbps %s, "
2430 "Flow Control: %s\n",
2431 netdev->name,
2432 adapter->link_speed,
2433 adapter->link_duplex == FULL_DUPLEX ?
2434 "Full Duplex" : "Half Duplex",
2435 ((ctrl & E1000_CTRL_TFCE) && (ctrl &
2436 E1000_CTRL_RFCE)) ? "RX/TX" : ((ctrl &
2437 E1000_CTRL_RFCE) ? "RX" : ((ctrl &
2438 E1000_CTRL_TFCE) ? "TX" : "None")));
2439
2440
2441 adapter->tx_timeout_factor = 1;
2442 switch (adapter->link_speed) {
2443 case SPEED_10:
2444 txb2b = false;
2445 adapter->tx_timeout_factor = 16;
2446 break;
2447 case SPEED_100:
2448 txb2b = false;
2449
2450 break;
2451 }
2452
2453
2454 tctl = er32(TCTL);
2455 tctl |= E1000_TCTL_EN;
2456 ew32(TCTL, tctl);
2457
2458 netif_carrier_on(netdev);
2459 if (!test_bit(__E1000_DOWN, &adapter->flags))
2460 schedule_delayed_work(&adapter->phy_info_task,
2461 2 * HZ);
2462 adapter->smartspeed = 0;
2463 }
2464 } else {
2465 if (netif_carrier_ok(netdev)) {
2466 adapter->link_speed = 0;
2467 adapter->link_duplex = 0;
2468 pr_info("%s NIC Link is Down\n",
2469 netdev->name);
2470 netif_carrier_off(netdev);
2471
2472 if (!test_bit(__E1000_DOWN, &adapter->flags))
2473 schedule_delayed_work(&adapter->phy_info_task,
2474 2 * HZ);
2475 }
2476
2477 e1000_smartspeed(adapter);
2478 }
2479
2480link_up:
2481 e1000_update_stats(adapter);
2482
2483 hw->tx_packet_delta = adapter->stats.tpt - adapter->tpt_old;
2484 adapter->tpt_old = adapter->stats.tpt;
2485 hw->collision_delta = adapter->stats.colc - adapter->colc_old;
2486 adapter->colc_old = adapter->stats.colc;
2487
2488 adapter->gorcl = adapter->stats.gorcl - adapter->gorcl_old;
2489 adapter->gorcl_old = adapter->stats.gorcl;
2490 adapter->gotcl = adapter->stats.gotcl - adapter->gotcl_old;
2491 adapter->gotcl_old = adapter->stats.gotcl;
2492
2493 e1000_update_adaptive(hw);
2494
2495 if (!netif_carrier_ok(netdev)) {
2496 if (E1000_DESC_UNUSED(txdr) + 1 < txdr->count) {
2497
2498
2499
2500
2501
2502 adapter->tx_timeout_count++;
2503 schedule_work(&adapter->reset_task);
2504
2505 return;
2506 }
2507 }
2508
2509
2510 if (hw->mac_type >= e1000_82540 && adapter->itr_setting == 4) {
2511
2512
2513
2514
2515 u32 goc = (adapter->gotcl + adapter->gorcl) / 10000;
2516 u32 dif = (adapter->gotcl > adapter->gorcl ?
2517 adapter->gotcl - adapter->gorcl :
2518 adapter->gorcl - adapter->gotcl) / 10000;
2519 u32 itr = goc > 0 ? (dif * 6000 / goc + 2000) : 8000;
2520
2521 ew32(ITR, 1000000000 / (itr * 256));
2522 }
2523
2524
2525 ew32(ICS, E1000_ICS_RXDMT0);
2526
2527
2528 adapter->detect_tx_hung = true;
2529
2530
2531 if (!test_bit(__E1000_DOWN, &adapter->flags))
2532 schedule_delayed_work(&adapter->watchdog_task, 2 * HZ);
2533}
2534
2535enum latency_range {
2536 lowest_latency = 0,
2537 low_latency = 1,
2538 bulk_latency = 2,
2539 latency_invalid = 255
2540};
2541
2542
2543
2544
2545
2546
2547
2548
2549
2550
2551
2552
2553
2554
2555
2556
2557
2558
2559static unsigned int e1000_update_itr(struct e1000_adapter *adapter,
2560 u16 itr_setting, int packets, int bytes)
2561{
2562 unsigned int retval = itr_setting;
2563 struct e1000_hw *hw = &adapter->hw;
2564
2565 if (unlikely(hw->mac_type < e1000_82540))
2566 goto update_itr_done;
2567
2568 if (packets == 0)
2569 goto update_itr_done;
2570
2571 switch (itr_setting) {
2572 case lowest_latency:
2573
2574 if (bytes/packets > 8000)
2575 retval = bulk_latency;
2576 else if ((packets < 5) && (bytes > 512))
2577 retval = low_latency;
2578 break;
2579 case low_latency:
2580 if (bytes > 10000) {
2581
2582 if (bytes/packets > 8000)
2583 retval = bulk_latency;
2584 else if ((packets < 10) || ((bytes/packets) > 1200))
2585 retval = bulk_latency;
2586 else if ((packets > 35))
2587 retval = lowest_latency;
2588 } else if (bytes/packets > 2000)
2589 retval = bulk_latency;
2590 else if (packets <= 2 && bytes < 512)
2591 retval = lowest_latency;
2592 break;
2593 case bulk_latency:
2594 if (bytes > 25000) {
2595 if (packets > 35)
2596 retval = low_latency;
2597 } else if (bytes < 6000) {
2598 retval = low_latency;
2599 }
2600 break;
2601 }
2602
2603update_itr_done:
2604 return retval;
2605}
2606
2607static void e1000_set_itr(struct e1000_adapter *adapter)
2608{
2609 struct e1000_hw *hw = &adapter->hw;
2610 u16 current_itr;
2611 u32 new_itr = adapter->itr;
2612
2613 if (unlikely(hw->mac_type < e1000_82540))
2614 return;
2615
2616
2617 if (unlikely(adapter->link_speed != SPEED_1000)) {
2618 current_itr = 0;
2619 new_itr = 4000;
2620 goto set_itr_now;
2621 }
2622
2623 adapter->tx_itr = e1000_update_itr(adapter, adapter->tx_itr,
2624 adapter->total_tx_packets,
2625 adapter->total_tx_bytes);
2626
2627 if (adapter->itr_setting == 3 && adapter->tx_itr == lowest_latency)
2628 adapter->tx_itr = low_latency;
2629
2630 adapter->rx_itr = e1000_update_itr(adapter, adapter->rx_itr,
2631 adapter->total_rx_packets,
2632 adapter->total_rx_bytes);
2633
2634 if (adapter->itr_setting == 3 && adapter->rx_itr == lowest_latency)
2635 adapter->rx_itr = low_latency;
2636
2637 current_itr = max(adapter->rx_itr, adapter->tx_itr);
2638
2639 switch (current_itr) {
2640
2641 case lowest_latency:
2642 new_itr = 70000;
2643 break;
2644 case low_latency:
2645 new_itr = 20000;
2646 break;
2647 case bulk_latency:
2648 new_itr = 4000;
2649 break;
2650 default:
2651 break;
2652 }
2653
2654set_itr_now:
2655 if (new_itr != adapter->itr) {
2656
2657
2658
2659
2660 new_itr = new_itr > adapter->itr ?
2661 min(adapter->itr + (new_itr >> 2), new_itr) :
2662 new_itr;
2663 adapter->itr = new_itr;
2664 ew32(ITR, 1000000000 / (new_itr * 256));
2665 }
2666}
2667
2668#define E1000_TX_FLAGS_CSUM 0x00000001
2669#define E1000_TX_FLAGS_VLAN 0x00000002
2670#define E1000_TX_FLAGS_TSO 0x00000004
2671#define E1000_TX_FLAGS_IPV4 0x00000008
2672#define E1000_TX_FLAGS_NO_FCS 0x00000010
2673#define E1000_TX_FLAGS_VLAN_MASK 0xffff0000
2674#define E1000_TX_FLAGS_VLAN_SHIFT 16
2675
2676static int e1000_tso(struct e1000_adapter *adapter,
2677 struct e1000_tx_ring *tx_ring, struct sk_buff *skb)
2678{
2679 struct e1000_context_desc *context_desc;
2680 struct e1000_buffer *buffer_info;
2681 unsigned int i;
2682 u32 cmd_length = 0;
2683 u16 ipcse = 0, tucse, mss;
2684 u8 ipcss, ipcso, tucss, tucso, hdr_len;
2685 int err;
2686
2687 if (skb_is_gso(skb)) {
2688 if (skb_header_cloned(skb)) {
2689 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2690 if (err)
2691 return err;
2692 }
2693
2694 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
2695 mss = skb_shinfo(skb)->gso_size;
2696 if (skb->protocol == htons(ETH_P_IP)) {
2697 struct iphdr *iph = ip_hdr(skb);
2698 iph->tot_len = 0;
2699 iph->check = 0;
2700 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
2701 iph->daddr, 0,
2702 IPPROTO_TCP,
2703 0);
2704 cmd_length = E1000_TXD_CMD_IP;
2705 ipcse = skb_transport_offset(skb) - 1;
2706 } else if (skb->protocol == htons(ETH_P_IPV6)) {
2707 ipv6_hdr(skb)->payload_len = 0;
2708 tcp_hdr(skb)->check =
2709 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
2710 &ipv6_hdr(skb)->daddr,
2711 0, IPPROTO_TCP, 0);
2712 ipcse = 0;
2713 }
2714 ipcss = skb_network_offset(skb);
2715 ipcso = (void *)&(ip_hdr(skb)->check) - (void *)skb->data;
2716 tucss = skb_transport_offset(skb);
2717 tucso = (void *)&(tcp_hdr(skb)->check) - (void *)skb->data;
2718 tucse = 0;
2719
2720 cmd_length |= (E1000_TXD_CMD_DEXT | E1000_TXD_CMD_TSE |
2721 E1000_TXD_CMD_TCP | (skb->len - (hdr_len)));
2722
2723 i = tx_ring->next_to_use;
2724 context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
2725 buffer_info = &tx_ring->buffer_info[i];
2726
2727 context_desc->lower_setup.ip_fields.ipcss = ipcss;
2728 context_desc->lower_setup.ip_fields.ipcso = ipcso;
2729 context_desc->lower_setup.ip_fields.ipcse = cpu_to_le16(ipcse);
2730 context_desc->upper_setup.tcp_fields.tucss = tucss;
2731 context_desc->upper_setup.tcp_fields.tucso = tucso;
2732 context_desc->upper_setup.tcp_fields.tucse = cpu_to_le16(tucse);
2733 context_desc->tcp_seg_setup.fields.mss = cpu_to_le16(mss);
2734 context_desc->tcp_seg_setup.fields.hdr_len = hdr_len;
2735 context_desc->cmd_and_length = cpu_to_le32(cmd_length);
2736
2737 buffer_info->time_stamp = jiffies;
2738 buffer_info->next_to_watch = i;
2739
2740 if (++i == tx_ring->count) i = 0;
2741 tx_ring->next_to_use = i;
2742
2743 return true;
2744 }
2745 return false;
2746}
2747
2748static bool e1000_tx_csum(struct e1000_adapter *adapter,
2749 struct e1000_tx_ring *tx_ring, struct sk_buff *skb)
2750{
2751 struct e1000_context_desc *context_desc;
2752 struct e1000_buffer *buffer_info;
2753 unsigned int i;
2754 u8 css;
2755 u32 cmd_len = E1000_TXD_CMD_DEXT;
2756
2757 if (skb->ip_summed != CHECKSUM_PARTIAL)
2758 return false;
2759
2760 switch (skb->protocol) {
2761 case cpu_to_be16(ETH_P_IP):
2762 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
2763 cmd_len |= E1000_TXD_CMD_TCP;
2764 break;
2765 case cpu_to_be16(ETH_P_IPV6):
2766
2767 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
2768 cmd_len |= E1000_TXD_CMD_TCP;
2769 break;
2770 default:
2771 if (unlikely(net_ratelimit()))
2772 e_warn(drv, "checksum_partial proto=%x!\n",
2773 skb->protocol);
2774 break;
2775 }
2776
2777 css = skb_checksum_start_offset(skb);
2778
2779 i = tx_ring->next_to_use;
2780 buffer_info = &tx_ring->buffer_info[i];
2781 context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
2782
2783 context_desc->lower_setup.ip_config = 0;
2784 context_desc->upper_setup.tcp_fields.tucss = css;
2785 context_desc->upper_setup.tcp_fields.tucso =
2786 css + skb->csum_offset;
2787 context_desc->upper_setup.tcp_fields.tucse = 0;
2788 context_desc->tcp_seg_setup.data = 0;
2789 context_desc->cmd_and_length = cpu_to_le32(cmd_len);
2790
2791 buffer_info->time_stamp = jiffies;
2792 buffer_info->next_to_watch = i;
2793
2794 if (unlikely(++i == tx_ring->count)) i = 0;
2795 tx_ring->next_to_use = i;
2796
2797 return true;
2798}
2799
2800#define E1000_MAX_TXD_PWR 12
2801#define E1000_MAX_DATA_PER_TXD (1<<E1000_MAX_TXD_PWR)
2802
2803static int e1000_tx_map(struct e1000_adapter *adapter,
2804 struct e1000_tx_ring *tx_ring,
2805 struct sk_buff *skb, unsigned int first,
2806 unsigned int max_per_txd, unsigned int nr_frags,
2807 unsigned int mss)
2808{
2809 struct e1000_hw *hw = &adapter->hw;
2810 struct pci_dev *pdev = adapter->pdev;
2811 struct e1000_buffer *buffer_info;
2812 unsigned int len = skb_headlen(skb);
2813 unsigned int offset = 0, size, count = 0, i;
2814 unsigned int f, bytecount, segs;
2815
2816 i = tx_ring->next_to_use;
2817
2818 while (len) {
2819 buffer_info = &tx_ring->buffer_info[i];
2820 size = min(len, max_per_txd);
2821
2822
2823
2824
2825
2826 if (!skb->data_len && tx_ring->last_tx_tso &&
2827 !skb_is_gso(skb)) {
2828 tx_ring->last_tx_tso = false;
2829 size -= 4;
2830 }
2831
2832
2833
2834
2835 if (unlikely(mss && !nr_frags && size == len && size > 8))
2836 size -= 4;
2837
2838
2839
2840
2841
2842 if (unlikely((hw->bus_type == e1000_bus_type_pcix) &&
2843 (size > 2015) && count == 0))
2844 size = 2015;
2845
2846
2847
2848
2849 if (unlikely(adapter->pcix_82544 &&
2850 !((unsigned long)(skb->data + offset + size - 1) & 4) &&
2851 size > 4))
2852 size -= 4;
2853
2854 buffer_info->length = size;
2855
2856 buffer_info->time_stamp = jiffies;
2857 buffer_info->mapped_as_page = false;
2858 buffer_info->dma = dma_map_single(&pdev->dev,
2859 skb->data + offset,
2860 size, DMA_TO_DEVICE);
2861 if (dma_mapping_error(&pdev->dev, buffer_info->dma))
2862 goto dma_error;
2863 buffer_info->next_to_watch = i;
2864
2865 len -= size;
2866 offset += size;
2867 count++;
2868 if (len) {
2869 i++;
2870 if (unlikely(i == tx_ring->count))
2871 i = 0;
2872 }
2873 }
2874
2875 for (f = 0; f < nr_frags; f++) {
2876 const struct skb_frag_struct *frag;
2877
2878 frag = &skb_shinfo(skb)->frags[f];
2879 len = skb_frag_size(frag);
2880 offset = 0;
2881
2882 while (len) {
2883 unsigned long bufend;
2884 i++;
2885 if (unlikely(i == tx_ring->count))
2886 i = 0;
2887
2888 buffer_info = &tx_ring->buffer_info[i];
2889 size = min(len, max_per_txd);
2890
2891
2892
2893 if (unlikely(mss && f == (nr_frags-1) &&
2894 size == len && size > 8))
2895 size -= 4;
2896
2897
2898
2899
2900 bufend = (unsigned long)
2901 page_to_phys(skb_frag_page(frag));
2902 bufend += offset + size - 1;
2903 if (unlikely(adapter->pcix_82544 &&
2904 !(bufend & 4) &&
2905 size > 4))
2906 size -= 4;
2907
2908 buffer_info->length = size;
2909 buffer_info->time_stamp = jiffies;
2910 buffer_info->mapped_as_page = true;
2911 buffer_info->dma = skb_frag_dma_map(&pdev->dev, frag,
2912 offset, size, DMA_TO_DEVICE);
2913 if (dma_mapping_error(&pdev->dev, buffer_info->dma))
2914 goto dma_error;
2915 buffer_info->next_to_watch = i;
2916
2917 len -= size;
2918 offset += size;
2919 count++;
2920 }
2921 }
2922
2923 segs = skb_shinfo(skb)->gso_segs ?: 1;
2924
2925 bytecount = ((segs - 1) * skb_headlen(skb)) + skb->len;
2926
2927 tx_ring->buffer_info[i].skb = skb;
2928 tx_ring->buffer_info[i].segs = segs;
2929 tx_ring->buffer_info[i].bytecount = bytecount;
2930 tx_ring->buffer_info[first].next_to_watch = i;
2931
2932 return count;
2933
2934dma_error:
2935 dev_err(&pdev->dev, "TX DMA map failed\n");
2936 buffer_info->dma = 0;
2937 if (count)
2938 count--;
2939
2940 while (count--) {
2941 if (i==0)
2942 i += tx_ring->count;
2943 i--;
2944 buffer_info = &tx_ring->buffer_info[i];
2945 e1000_unmap_and_free_tx_resource(adapter, buffer_info);
2946 }
2947
2948 return 0;
2949}
2950
2951static void e1000_tx_queue(struct e1000_adapter *adapter,
2952 struct e1000_tx_ring *tx_ring, int tx_flags,
2953 int count)
2954{
2955 struct e1000_hw *hw = &adapter->hw;
2956 struct e1000_tx_desc *tx_desc = NULL;
2957 struct e1000_buffer *buffer_info;
2958 u32 txd_upper = 0, txd_lower = E1000_TXD_CMD_IFCS;
2959 unsigned int i;
2960
2961 if (likely(tx_flags & E1000_TX_FLAGS_TSO)) {
2962 txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D |
2963 E1000_TXD_CMD_TSE;
2964 txd_upper |= E1000_TXD_POPTS_TXSM << 8;
2965
2966 if (likely(tx_flags & E1000_TX_FLAGS_IPV4))
2967 txd_upper |= E1000_TXD_POPTS_IXSM << 8;
2968 }
2969
2970 if (likely(tx_flags & E1000_TX_FLAGS_CSUM)) {
2971 txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
2972 txd_upper |= E1000_TXD_POPTS_TXSM << 8;
2973 }
2974
2975 if (unlikely(tx_flags & E1000_TX_FLAGS_VLAN)) {
2976 txd_lower |= E1000_TXD_CMD_VLE;
2977 txd_upper |= (tx_flags & E1000_TX_FLAGS_VLAN_MASK);
2978 }
2979
2980 if (unlikely(tx_flags & E1000_TX_FLAGS_NO_FCS))
2981 txd_lower &= ~(E1000_TXD_CMD_IFCS);
2982
2983 i = tx_ring->next_to_use;
2984
2985 while (count--) {
2986 buffer_info = &tx_ring->buffer_info[i];
2987 tx_desc = E1000_TX_DESC(*tx_ring, i);
2988 tx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
2989 tx_desc->lower.data =
2990 cpu_to_le32(txd_lower | buffer_info->length);
2991 tx_desc->upper.data = cpu_to_le32(txd_upper);
2992 if (unlikely(++i == tx_ring->count)) i = 0;
2993 }
2994
2995 tx_desc->lower.data |= cpu_to_le32(adapter->txd_cmd);
2996
2997
2998 if (unlikely(tx_flags & E1000_TX_FLAGS_NO_FCS))
2999 tx_desc->lower.data &= ~(cpu_to_le32(E1000_TXD_CMD_IFCS));
3000
3001
3002
3003
3004
3005
3006 wmb();
3007
3008 tx_ring->next_to_use = i;
3009 writel(i, hw->hw_addr + tx_ring->tdt);
3010
3011
3012
3013 mmiowb();
3014}
3015
3016
3017
3018
3019
3020
3021
3022
3023
3024#define E1000_FIFO_HDR 0x10
3025#define E1000_82547_PAD_LEN 0x3E0
3026
3027static int e1000_82547_fifo_workaround(struct e1000_adapter *adapter,
3028 struct sk_buff *skb)
3029{
3030 u32 fifo_space = adapter->tx_fifo_size - adapter->tx_fifo_head;
3031 u32 skb_fifo_len = skb->len + E1000_FIFO_HDR;
3032
3033 skb_fifo_len = ALIGN(skb_fifo_len, E1000_FIFO_HDR);
3034
3035 if (adapter->link_duplex != HALF_DUPLEX)
3036 goto no_fifo_stall_required;
3037
3038 if (atomic_read(&adapter->tx_fifo_stall))
3039 return 1;
3040
3041 if (skb_fifo_len >= (E1000_82547_PAD_LEN + fifo_space)) {
3042 atomic_set(&adapter->tx_fifo_stall, 1);
3043 return 1;
3044 }
3045
3046no_fifo_stall_required:
3047 adapter->tx_fifo_head += skb_fifo_len;
3048 if (adapter->tx_fifo_head >= adapter->tx_fifo_size)
3049 adapter->tx_fifo_head -= adapter->tx_fifo_size;
3050 return 0;
3051}
3052
3053static int __e1000_maybe_stop_tx(struct net_device *netdev, int size)
3054{
3055 struct e1000_adapter *adapter = netdev_priv(netdev);
3056 struct e1000_tx_ring *tx_ring = adapter->tx_ring;
3057
3058 netif_stop_queue(netdev);
3059
3060
3061
3062
3063 smp_mb();
3064
3065
3066
3067
3068 if (likely(E1000_DESC_UNUSED(tx_ring) < size))
3069 return -EBUSY;
3070
3071
3072 netif_start_queue(netdev);
3073 ++adapter->restart_queue;
3074 return 0;
3075}
3076
3077static int e1000_maybe_stop_tx(struct net_device *netdev,
3078 struct e1000_tx_ring *tx_ring, int size)
3079{
3080 if (likely(E1000_DESC_UNUSED(tx_ring) >= size))
3081 return 0;
3082 return __e1000_maybe_stop_tx(netdev, size);
3083}
3084
3085#define TXD_USE_COUNT(S, X) (((S) >> (X)) + 1 )
3086static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
3087 struct net_device *netdev)
3088{
3089 struct e1000_adapter *adapter = netdev_priv(netdev);
3090 struct e1000_hw *hw = &adapter->hw;
3091 struct e1000_tx_ring *tx_ring;
3092 unsigned int first, max_per_txd = E1000_MAX_DATA_PER_TXD;
3093 unsigned int max_txd_pwr = E1000_MAX_TXD_PWR;
3094 unsigned int tx_flags = 0;
3095 unsigned int len = skb_headlen(skb);
3096 unsigned int nr_frags;
3097 unsigned int mss;
3098 int count = 0;
3099 int tso;
3100 unsigned int f;
3101
3102
3103
3104
3105
3106
3107 tx_ring = adapter->tx_ring;
3108
3109 if (unlikely(skb->len <= 0)) {
3110 dev_kfree_skb_any(skb);
3111 return NETDEV_TX_OK;
3112 }
3113
3114
3115
3116
3117
3118 if (skb->len < ETH_ZLEN) {
3119 if (skb_pad(skb, ETH_ZLEN - skb->len))
3120 return NETDEV_TX_OK;
3121 skb->len = ETH_ZLEN;
3122 skb_set_tail_pointer(skb, ETH_ZLEN);
3123 }
3124
3125 mss = skb_shinfo(skb)->gso_size;
3126
3127
3128
3129
3130
3131
3132
3133 if (mss) {
3134 u8 hdr_len;
3135 max_per_txd = min(mss << 2, max_per_txd);
3136 max_txd_pwr = fls(max_per_txd) - 1;
3137
3138 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
3139 if (skb->data_len && hdr_len == len) {
3140 switch (hw->mac_type) {
3141 unsigned int pull_size;
3142 case e1000_82544:
3143
3144
3145
3146
3147
3148
3149
3150 if ((unsigned long)(skb_tail_pointer(skb) - 1)
3151 & 4)
3152 break;
3153
3154 pull_size = min((unsigned int)4, skb->data_len);
3155 if (!__pskb_pull_tail(skb, pull_size)) {
3156 e_err(drv, "__pskb_pull_tail "
3157 "failed.\n");
3158 dev_kfree_skb_any(skb);
3159 return NETDEV_TX_OK;
3160 }
3161 len = skb_headlen(skb);
3162 break;
3163 default:
3164
3165 break;
3166 }
3167 }
3168 }
3169
3170
3171 if ((mss) || (skb->ip_summed == CHECKSUM_PARTIAL))
3172 count++;
3173 count++;
3174
3175
3176 if (!skb->data_len && tx_ring->last_tx_tso && !skb_is_gso(skb))
3177 count++;
3178
3179 count += TXD_USE_COUNT(len, max_txd_pwr);
3180
3181 if (adapter->pcix_82544)
3182 count++;
3183
3184
3185
3186
3187 if (unlikely((hw->bus_type == e1000_bus_type_pcix) &&
3188 (len > 2015)))
3189 count++;
3190
3191 nr_frags = skb_shinfo(skb)->nr_frags;
3192 for (f = 0; f < nr_frags; f++)
3193 count += TXD_USE_COUNT(skb_frag_size(&skb_shinfo(skb)->frags[f]),
3194 max_txd_pwr);
3195 if (adapter->pcix_82544)
3196 count += nr_frags;
3197
3198
3199
3200
3201 if (unlikely(e1000_maybe_stop_tx(netdev, tx_ring, count + 2)))
3202 return NETDEV_TX_BUSY;
3203
3204 if (unlikely((hw->mac_type == e1000_82547) &&
3205 (e1000_82547_fifo_workaround(adapter, skb)))) {
3206 netif_stop_queue(netdev);
3207 if (!test_bit(__E1000_DOWN, &adapter->flags))
3208 schedule_delayed_work(&adapter->fifo_stall_task, 1);
3209 return NETDEV_TX_BUSY;
3210 }
3211
3212 if (vlan_tx_tag_present(skb)) {
3213 tx_flags |= E1000_TX_FLAGS_VLAN;
3214 tx_flags |= (vlan_tx_tag_get(skb) << E1000_TX_FLAGS_VLAN_SHIFT);
3215 }
3216
3217 first = tx_ring->next_to_use;
3218
3219 tso = e1000_tso(adapter, tx_ring, skb);
3220 if (tso < 0) {
3221 dev_kfree_skb_any(skb);
3222 return NETDEV_TX_OK;
3223 }
3224
3225 if (likely(tso)) {
3226 if (likely(hw->mac_type != e1000_82544))
3227 tx_ring->last_tx_tso = true;
3228 tx_flags |= E1000_TX_FLAGS_TSO;
3229 } else if (likely(e1000_tx_csum(adapter, tx_ring, skb)))
3230 tx_flags |= E1000_TX_FLAGS_CSUM;
3231
3232 if (likely(skb->protocol == htons(ETH_P_IP)))
3233 tx_flags |= E1000_TX_FLAGS_IPV4;
3234
3235 if (unlikely(skb->no_fcs))
3236 tx_flags |= E1000_TX_FLAGS_NO_FCS;
3237
3238 count = e1000_tx_map(adapter, tx_ring, skb, first, max_per_txd,
3239 nr_frags, mss);
3240
3241 if (count) {
3242 netdev_sent_queue(netdev, skb->len);
3243 skb_tx_timestamp(skb);
3244
3245 e1000_tx_queue(adapter, tx_ring, tx_flags, count);
3246
3247 e1000_maybe_stop_tx(netdev, tx_ring, MAX_SKB_FRAGS + 2);
3248
3249 } else {
3250 dev_kfree_skb_any(skb);
3251 tx_ring->buffer_info[first].time_stamp = 0;
3252 tx_ring->next_to_use = first;
3253 }
3254
3255 return NETDEV_TX_OK;
3256}
3257
3258#define NUM_REGS 38
3259static void e1000_regdump(struct e1000_adapter *adapter)
3260{
3261 struct e1000_hw *hw = &adapter->hw;
3262 u32 regs[NUM_REGS];
3263 u32 *regs_buff = regs;
3264 int i = 0;
3265
3266 static const char * const reg_name[] = {
3267 "CTRL", "STATUS",
3268 "RCTL", "RDLEN", "RDH", "RDT", "RDTR",
3269 "TCTL", "TDBAL", "TDBAH", "TDLEN", "TDH", "TDT",
3270 "TIDV", "TXDCTL", "TADV", "TARC0",
3271 "TDBAL1", "TDBAH1", "TDLEN1", "TDH1", "TDT1",
3272 "TXDCTL1", "TARC1",
3273 "CTRL_EXT", "ERT", "RDBAL", "RDBAH",
3274 "TDFH", "TDFT", "TDFHS", "TDFTS", "TDFPC",
3275 "RDFH", "RDFT", "RDFHS", "RDFTS", "RDFPC"
3276 };
3277
3278 regs_buff[0] = er32(CTRL);
3279 regs_buff[1] = er32(STATUS);
3280
3281 regs_buff[2] = er32(RCTL);
3282 regs_buff[3] = er32(RDLEN);
3283 regs_buff[4] = er32(RDH);
3284 regs_buff[5] = er32(RDT);
3285 regs_buff[6] = er32(RDTR);
3286
3287 regs_buff[7] = er32(TCTL);
3288 regs_buff[8] = er32(TDBAL);
3289 regs_buff[9] = er32(TDBAH);
3290 regs_buff[10] = er32(TDLEN);
3291 regs_buff[11] = er32(TDH);
3292 regs_buff[12] = er32(TDT);
3293 regs_buff[13] = er32(TIDV);
3294 regs_buff[14] = er32(TXDCTL);
3295 regs_buff[15] = er32(TADV);
3296 regs_buff[16] = er32(TARC0);
3297
3298 regs_buff[17] = er32(TDBAL1);
3299 regs_buff[18] = er32(TDBAH1);
3300 regs_buff[19] = er32(TDLEN1);
3301 regs_buff[20] = er32(TDH1);
3302 regs_buff[21] = er32(TDT1);
3303 regs_buff[22] = er32(TXDCTL1);
3304 regs_buff[23] = er32(TARC1);
3305 regs_buff[24] = er32(CTRL_EXT);
3306 regs_buff[25] = er32(ERT);
3307 regs_buff[26] = er32(RDBAL0);
3308 regs_buff[27] = er32(RDBAH0);
3309 regs_buff[28] = er32(TDFH);
3310 regs_buff[29] = er32(TDFT);
3311 regs_buff[30] = er32(TDFHS);
3312 regs_buff[31] = er32(TDFTS);
3313 regs_buff[32] = er32(TDFPC);
3314 regs_buff[33] = er32(RDFH);
3315 regs_buff[34] = er32(RDFT);
3316 regs_buff[35] = er32(RDFHS);
3317 regs_buff[36] = er32(RDFTS);
3318 regs_buff[37] = er32(RDFPC);
3319
3320 pr_info("Register dump\n");
3321 for (i = 0; i < NUM_REGS; i++)
3322 pr_info("%-15s %08x\n", reg_name[i], regs_buff[i]);
3323}
3324
3325
3326
3327
3328static void e1000_dump(struct e1000_adapter *adapter)
3329{
3330
3331 struct e1000_tx_ring *tx_ring = adapter->tx_ring;
3332 struct e1000_rx_ring *rx_ring = adapter->rx_ring;
3333 int i;
3334
3335 if (!netif_msg_hw(adapter))
3336 return;
3337
3338
3339 e1000_regdump(adapter);
3340
3341
3342 pr_info("TX Desc ring0 dump\n");
3343
3344
3345
3346
3347
3348
3349
3350
3351
3352
3353
3354
3355
3356
3357
3358
3359
3360
3361
3362
3363
3364
3365
3366
3367
3368
3369
3370
3371 pr_info("Tc[desc] [Ce CoCsIpceCoS] [MssHlRSCm0Plen] [bi->dma ] leng ntw timestmp bi->skb\n");
3372 pr_info("Td[desc] [address 63:0 ] [VlaPoRSCm1Dlen] [bi->dma ] leng ntw timestmp bi->skb\n");
3373
3374 if (!netif_msg_tx_done(adapter))
3375 goto rx_ring_summary;
3376
3377 for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {
3378 struct e1000_tx_desc *tx_desc = E1000_TX_DESC(*tx_ring, i);
3379 struct e1000_buffer *buffer_info = &tx_ring->buffer_info[i];
3380 struct my_u { __le64 a; __le64 b; };
3381 struct my_u *u = (struct my_u *)tx_desc;
3382 const char *type;
3383
3384 if (i == tx_ring->next_to_use && i == tx_ring->next_to_clean)
3385 type = "NTC/U";
3386 else if (i == tx_ring->next_to_use)
3387 type = "NTU";
3388 else if (i == tx_ring->next_to_clean)
3389 type = "NTC";
3390 else
3391 type = "";
3392
3393 pr_info("T%c[0x%03X] %016llX %016llX %016llX %04X %3X %016llX %p %s\n",
3394 ((le64_to_cpu(u->b) & (1<<20)) ? 'd' : 'c'), i,
3395 le64_to_cpu(u->a), le64_to_cpu(u->b),
3396 (u64)buffer_info->dma, buffer_info->length,
3397 buffer_info->next_to_watch,
3398 (u64)buffer_info->time_stamp, buffer_info->skb, type);
3399 }
3400
3401rx_ring_summary:
3402
3403 pr_info("\nRX Desc ring dump\n");
3404
3405
3406
3407
3408
3409
3410
3411
3412
3413
3414 pr_info("R[desc] [address 63:0 ] [vl er S cks ln] [bi->dma ] [bi->skb]\n");
3415
3416 if (!netif_msg_rx_status(adapter))
3417 goto exit;
3418
3419 for (i = 0; rx_ring->desc && (i < rx_ring->count); i++) {
3420 struct e1000_rx_desc *rx_desc = E1000_RX_DESC(*rx_ring, i);
3421 struct e1000_buffer *buffer_info = &rx_ring->buffer_info[i];
3422 struct my_u { __le64 a; __le64 b; };
3423 struct my_u *u = (struct my_u *)rx_desc;
3424 const char *type;
3425
3426 if (i == rx_ring->next_to_use)
3427 type = "NTU";
3428 else if (i == rx_ring->next_to_clean)
3429 type = "NTC";
3430 else
3431 type = "";
3432
3433 pr_info("R[0x%03X] %016llX %016llX %016llX %p %s\n",
3434 i, le64_to_cpu(u->a), le64_to_cpu(u->b),
3435 (u64)buffer_info->dma, buffer_info->skb, type);
3436 }
3437
3438
3439
3440 pr_info("Rx descriptor cache in 64bit format\n");
3441 for (i = 0x6000; i <= 0x63FF ; i += 0x10) {
3442 pr_info("R%04X: %08X|%08X %08X|%08X\n",
3443 i,
3444 readl(adapter->hw.hw_addr + i+4),
3445 readl(adapter->hw.hw_addr + i),
3446 readl(adapter->hw.hw_addr + i+12),
3447 readl(adapter->hw.hw_addr + i+8));
3448 }
3449
3450 pr_info("Tx descriptor cache in 64bit format\n");
3451 for (i = 0x7000; i <= 0x73FF ; i += 0x10) {
3452 pr_info("T%04X: %08X|%08X %08X|%08X\n",
3453 i,
3454 readl(adapter->hw.hw_addr + i+4),
3455 readl(adapter->hw.hw_addr + i),
3456 readl(adapter->hw.hw_addr + i+12),
3457 readl(adapter->hw.hw_addr + i+8));
3458 }
3459exit:
3460 return;
3461}
3462
3463
3464
3465
3466
3467static void e1000_tx_timeout(struct net_device *netdev)
3468{
3469 struct e1000_adapter *adapter = netdev_priv(netdev);
3470
3471
3472 adapter->tx_timeout_count++;
3473 schedule_work(&adapter->reset_task);
3474}
3475
3476static void e1000_reset_task(struct work_struct *work)
3477{
3478 struct e1000_adapter *adapter =
3479 container_of(work, struct e1000_adapter, reset_task);
3480
3481 e_err(drv, "Reset adapter\n");
3482 e1000_reinit_locked(adapter);
3483}
3484
3485
3486
3487
3488
3489
3490
3491
3492static struct net_device_stats *e1000_get_stats(struct net_device *netdev)
3493{
3494
3495 return &netdev->stats;
3496}
3497
3498
3499
3500
3501
3502
3503
3504
3505static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
3506{
3507 struct e1000_adapter *adapter = netdev_priv(netdev);
3508 struct e1000_hw *hw = &adapter->hw;
3509 int max_frame = new_mtu + ENET_HEADER_SIZE + ETHERNET_FCS_SIZE;
3510
3511 if ((max_frame < MINIMUM_ETHERNET_FRAME_SIZE) ||
3512 (max_frame > MAX_JUMBO_FRAME_SIZE)) {
3513 e_err(probe, "Invalid MTU setting\n");
3514 return -EINVAL;
3515 }
3516
3517
3518 switch (hw->mac_type) {
3519 case e1000_undefined ... e1000_82542_rev2_1:
3520 if (max_frame > (ETH_FRAME_LEN + ETH_FCS_LEN)) {
3521 e_err(probe, "Jumbo Frames not supported.\n");
3522 return -EINVAL;
3523 }
3524 break;
3525 default:
3526
3527 break;
3528 }
3529
3530 while (test_and_set_bit(__E1000_RESETTING, &adapter->flags))
3531 msleep(1);
3532
3533 hw->max_frame_size = max_frame;
3534 if (netif_running(netdev))
3535 e1000_down(adapter);
3536
3537
3538
3539
3540
3541
3542
3543
3544
3545 if (max_frame <= E1000_RXBUFFER_2048)
3546 adapter->rx_buffer_len = E1000_RXBUFFER_2048;
3547 else
3548#if (PAGE_SIZE >= E1000_RXBUFFER_16384)
3549 adapter->rx_buffer_len = E1000_RXBUFFER_16384;
3550#elif (PAGE_SIZE >= E1000_RXBUFFER_4096)
3551 adapter->rx_buffer_len = PAGE_SIZE;
3552#endif
3553
3554
3555 if (!hw->tbi_compatibility_on &&
3556 ((max_frame == (ETH_FRAME_LEN + ETH_FCS_LEN)) ||
3557 (max_frame == MAXIMUM_ETHERNET_VLAN_SIZE)))
3558 adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
3559
3560 pr_info("%s changing MTU from %d to %d\n",
3561 netdev->name, netdev->mtu, new_mtu);
3562 netdev->mtu = new_mtu;
3563
3564 if (netif_running(netdev))
3565 e1000_up(adapter);
3566 else
3567 e1000_reset(adapter);
3568
3569 clear_bit(__E1000_RESETTING, &adapter->flags);
3570
3571 return 0;
3572}
3573
3574
3575
3576
3577
3578void e1000_update_stats(struct e1000_adapter *adapter)
3579{
3580 struct net_device *netdev = adapter->netdev;
3581 struct e1000_hw *hw = &adapter->hw;
3582 struct pci_dev *pdev = adapter->pdev;
3583 unsigned long flags;
3584 u16 phy_tmp;
3585
3586#define PHY_IDLE_ERROR_COUNT_MASK 0x00FF
3587
3588
3589
3590
3591 if (adapter->link_speed == 0)
3592 return;
3593 if (pci_channel_offline(pdev))
3594 return;
3595
3596 spin_lock_irqsave(&adapter->stats_lock, flags);
3597
3598
3599
3600
3601
3602
3603 adapter->stats.crcerrs += er32(CRCERRS);
3604 adapter->stats.gprc += er32(GPRC);
3605 adapter->stats.gorcl += er32(GORCL);
3606 adapter->stats.gorch += er32(GORCH);
3607 adapter->stats.bprc += er32(BPRC);
3608 adapter->stats.mprc += er32(MPRC);
3609 adapter->stats.roc += er32(ROC);
3610
3611 adapter->stats.prc64 += er32(PRC64);
3612 adapter->stats.prc127 += er32(PRC127);
3613 adapter->stats.prc255 += er32(PRC255);
3614 adapter->stats.prc511 += er32(PRC511);
3615 adapter->stats.prc1023 += er32(PRC1023);
3616 adapter->stats.prc1522 += er32(PRC1522);
3617
3618 adapter->stats.symerrs += er32(SYMERRS);
3619 adapter->stats.mpc += er32(MPC);
3620 adapter->stats.scc += er32(SCC);
3621 adapter->stats.ecol += er32(ECOL);
3622 adapter->stats.mcc += er32(MCC);
3623 adapter->stats.latecol += er32(LATECOL);
3624 adapter->stats.dc += er32(DC);
3625 adapter->stats.sec += er32(SEC);
3626 adapter->stats.rlec += er32(RLEC);
3627 adapter->stats.xonrxc += er32(XONRXC);
3628 adapter->stats.xontxc += er32(XONTXC);
3629 adapter->stats.xoffrxc += er32(XOFFRXC);
3630 adapter->stats.xofftxc += er32(XOFFTXC);
3631 adapter->stats.fcruc += er32(FCRUC);
3632 adapter->stats.gptc += er32(GPTC);
3633 adapter->stats.gotcl += er32(GOTCL);
3634 adapter->stats.gotch += er32(GOTCH);
3635 adapter->stats.rnbc += er32(RNBC);
3636 adapter->stats.ruc += er32(RUC);
3637 adapter->stats.rfc += er32(RFC);
3638 adapter->stats.rjc += er32(RJC);
3639 adapter->stats.torl += er32(TORL);
3640 adapter->stats.torh += er32(TORH);
3641 adapter->stats.totl += er32(TOTL);
3642 adapter->stats.toth += er32(TOTH);
3643 adapter->stats.tpr += er32(TPR);
3644
3645 adapter->stats.ptc64 += er32(PTC64);
3646 adapter->stats.ptc127 += er32(PTC127);
3647 adapter->stats.ptc255 += er32(PTC255);
3648 adapter->stats.ptc511 += er32(PTC511);
3649 adapter->stats.ptc1023 += er32(PTC1023);
3650 adapter->stats.ptc1522 += er32(PTC1522);
3651
3652 adapter->stats.mptc += er32(MPTC);
3653 adapter->stats.bptc += er32(BPTC);
3654
3655
3656
3657 hw->tx_packet_delta = er32(TPT);
3658 adapter->stats.tpt += hw->tx_packet_delta;
3659 hw->collision_delta = er32(COLC);
3660 adapter->stats.colc += hw->collision_delta;
3661
3662 if (hw->mac_type >= e1000_82543) {
3663 adapter->stats.algnerrc += er32(ALGNERRC);
3664 adapter->stats.rxerrc += er32(RXERRC);
3665 adapter->stats.tncrs += er32(TNCRS);
3666 adapter->stats.cexterr += er32(CEXTERR);
3667 adapter->stats.tsctc += er32(TSCTC);
3668 adapter->stats.tsctfc += er32(TSCTFC);
3669 }
3670
3671
3672 netdev->stats.multicast = adapter->stats.mprc;
3673 netdev->stats.collisions = adapter->stats.colc;
3674
3675
3676
3677
3678
3679
3680 netdev->stats.rx_errors = adapter->stats.rxerrc +
3681 adapter->stats.crcerrs + adapter->stats.algnerrc +
3682 adapter->stats.ruc + adapter->stats.roc +
3683 adapter->stats.cexterr;
3684 adapter->stats.rlerrc = adapter->stats.ruc + adapter->stats.roc;
3685 netdev->stats.rx_length_errors = adapter->stats.rlerrc;
3686 netdev->stats.rx_crc_errors = adapter->stats.crcerrs;
3687 netdev->stats.rx_frame_errors = adapter->stats.algnerrc;
3688 netdev->stats.rx_missed_errors = adapter->stats.mpc;
3689
3690
3691 adapter->stats.txerrc = adapter->stats.ecol + adapter->stats.latecol;
3692 netdev->stats.tx_errors = adapter->stats.txerrc;
3693 netdev->stats.tx_aborted_errors = adapter->stats.ecol;
3694 netdev->stats.tx_window_errors = adapter->stats.latecol;
3695 netdev->stats.tx_carrier_errors = adapter->stats.tncrs;
3696 if (hw->bad_tx_carr_stats_fd &&
3697 adapter->link_duplex == FULL_DUPLEX) {
3698 netdev->stats.tx_carrier_errors = 0;
3699 adapter->stats.tncrs = 0;
3700 }
3701
3702
3703
3704
3705 if (hw->media_type == e1000_media_type_copper) {
3706 if ((adapter->link_speed == SPEED_1000) &&
3707 (!e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_tmp))) {
3708 phy_tmp &= PHY_IDLE_ERROR_COUNT_MASK;
3709 adapter->phy_stats.idle_errors += phy_tmp;
3710 }
3711
3712 if ((hw->mac_type <= e1000_82546) &&
3713 (hw->phy_type == e1000_phy_m88) &&
3714 !e1000_read_phy_reg(hw, M88E1000_RX_ERR_CNTR, &phy_tmp))
3715 adapter->phy_stats.receive_errors += phy_tmp;
3716 }
3717
3718
3719 if (hw->has_smbus) {
3720 adapter->stats.mgptc += er32(MGTPTC);
3721 adapter->stats.mgprc += er32(MGTPRC);
3722 adapter->stats.mgpdc += er32(MGTPDC);
3723 }
3724
3725 spin_unlock_irqrestore(&adapter->stats_lock, flags);
3726}
3727
3728
3729
3730
3731
3732
3733static irqreturn_t e1000_intr(int irq, void *data)
3734{
3735 struct net_device *netdev = data;
3736 struct e1000_adapter *adapter = netdev_priv(netdev);
3737 struct e1000_hw *hw = &adapter->hw;
3738 u32 icr = er32(ICR);
3739
3740 if (unlikely((!icr)))
3741 return IRQ_NONE;
3742
3743
3744
3745
3746
3747 if (unlikely(test_bit(__E1000_DOWN, &adapter->flags)))
3748 return IRQ_HANDLED;
3749
3750 if (unlikely(icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC))) {
3751 hw->get_link_status = 1;
3752
3753 if (!test_bit(__E1000_DOWN, &adapter->flags))
3754 schedule_delayed_work(&adapter->watchdog_task, 1);
3755 }
3756
3757
3758 ew32(IMC, ~0);
3759 E1000_WRITE_FLUSH();
3760
3761 if (likely(napi_schedule_prep(&adapter->napi))) {
3762 adapter->total_tx_bytes = 0;
3763 adapter->total_tx_packets = 0;
3764 adapter->total_rx_bytes = 0;
3765 adapter->total_rx_packets = 0;
3766 __napi_schedule(&adapter->napi);
3767 } else {
3768
3769
3770
3771 if (!test_bit(__E1000_DOWN, &adapter->flags))
3772 e1000_irq_enable(adapter);
3773 }
3774
3775 return IRQ_HANDLED;
3776}
3777
3778
3779
3780
3781
3782static int e1000_clean(struct napi_struct *napi, int budget)
3783{
3784 struct e1000_adapter *adapter = container_of(napi, struct e1000_adapter,
3785 napi);
3786 int tx_clean_complete = 0, work_done = 0;
3787
3788 tx_clean_complete = e1000_clean_tx_irq(adapter, &adapter->tx_ring[0]);
3789
3790 adapter->clean_rx(adapter, &adapter->rx_ring[0], &work_done, budget);
3791
3792 if (!tx_clean_complete)
3793 work_done = budget;
3794
3795
3796 if (work_done < budget) {
3797 if (likely(adapter->itr_setting & 3))
3798 e1000_set_itr(adapter);
3799 napi_complete(napi);
3800 if (!test_bit(__E1000_DOWN, &adapter->flags))
3801 e1000_irq_enable(adapter);
3802 }
3803
3804 return work_done;
3805}
3806
3807
3808
3809
3810
3811static bool e1000_clean_tx_irq(struct e1000_adapter *adapter,
3812 struct e1000_tx_ring *tx_ring)
3813{
3814 struct e1000_hw *hw = &adapter->hw;
3815 struct net_device *netdev = adapter->netdev;
3816 struct e1000_tx_desc *tx_desc, *eop_desc;
3817 struct e1000_buffer *buffer_info;
3818 unsigned int i, eop;
3819 unsigned int count = 0;
3820 unsigned int total_tx_bytes=0, total_tx_packets=0;
3821 unsigned int bytes_compl = 0, pkts_compl = 0;
3822
3823 i = tx_ring->next_to_clean;
3824 eop = tx_ring->buffer_info[i].next_to_watch;
3825 eop_desc = E1000_TX_DESC(*tx_ring, eop);
3826
3827 while ((eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) &&
3828 (count < tx_ring->count)) {
3829 bool cleaned = false;
3830 rmb();
3831 for ( ; !cleaned; count++) {
3832 tx_desc = E1000_TX_DESC(*tx_ring, i);
3833 buffer_info = &tx_ring->buffer_info[i];
3834 cleaned = (i == eop);
3835
3836 if (cleaned) {
3837 total_tx_packets += buffer_info->segs;
3838 total_tx_bytes += buffer_info->bytecount;
3839 if (buffer_info->skb) {
3840 bytes_compl += buffer_info->skb->len;
3841 pkts_compl++;
3842 }
3843
3844 }
3845 e1000_unmap_and_free_tx_resource(adapter, buffer_info);
3846 tx_desc->upper.data = 0;
3847
3848 if (unlikely(++i == tx_ring->count)) i = 0;
3849 }
3850
3851 eop = tx_ring->buffer_info[i].next_to_watch;
3852 eop_desc = E1000_TX_DESC(*tx_ring, eop);
3853 }
3854
3855 tx_ring->next_to_clean = i;
3856
3857 netdev_completed_queue(netdev, pkts_compl, bytes_compl);
3858
3859#define TX_WAKE_THRESHOLD 32
3860 if (unlikely(count && netif_carrier_ok(netdev) &&
3861 E1000_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD)) {
3862
3863
3864
3865 smp_mb();
3866
3867 if (netif_queue_stopped(netdev) &&
3868 !(test_bit(__E1000_DOWN, &adapter->flags))) {
3869 netif_wake_queue(netdev);
3870 ++adapter->restart_queue;
3871 }
3872 }
3873
3874 if (adapter->detect_tx_hung) {
3875
3876
3877
3878 adapter->detect_tx_hung = false;
3879 if (tx_ring->buffer_info[eop].time_stamp &&
3880 time_after(jiffies, tx_ring->buffer_info[eop].time_stamp +
3881 (adapter->tx_timeout_factor * HZ)) &&
3882 !(er32(STATUS) & E1000_STATUS_TXOFF)) {
3883
3884
3885 e_err(drv, "Detected Tx Unit Hang\n"
3886 " Tx Queue <%lu>\n"
3887 " TDH <%x>\n"
3888 " TDT <%x>\n"
3889 " next_to_use <%x>\n"
3890 " next_to_clean <%x>\n"
3891 "buffer_info[next_to_clean]\n"
3892 " time_stamp <%lx>\n"
3893 " next_to_watch <%x>\n"
3894 " jiffies <%lx>\n"
3895 " next_to_watch.status <%x>\n",
3896 (unsigned long)(tx_ring - adapter->tx_ring),
3897 readl(hw->hw_addr + tx_ring->tdh),
3898 readl(hw->hw_addr + tx_ring->tdt),
3899 tx_ring->next_to_use,
3900 tx_ring->next_to_clean,
3901 tx_ring->buffer_info[eop].time_stamp,
3902 eop,
3903 jiffies,
3904 eop_desc->upper.fields.status);
3905 e1000_dump(adapter);
3906 netif_stop_queue(netdev);
3907 }
3908 }
3909 adapter->total_tx_bytes += total_tx_bytes;
3910 adapter->total_tx_packets += total_tx_packets;
3911 netdev->stats.tx_bytes += total_tx_bytes;
3912 netdev->stats.tx_packets += total_tx_packets;
3913 return count < tx_ring->count;
3914}
3915
3916
3917
3918
3919
3920
3921
3922
3923static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err,
3924 u32 csum, struct sk_buff *skb)
3925{
3926 struct e1000_hw *hw = &adapter->hw;
3927 u16 status = (u16)status_err;
3928 u8 errors = (u8)(status_err >> 24);
3929
3930 skb_checksum_none_assert(skb);
3931
3932
3933 if (unlikely(hw->mac_type < e1000_82543)) return;
3934
3935 if (unlikely(status & E1000_RXD_STAT_IXSM)) return;
3936
3937 if (unlikely(errors & E1000_RXD_ERR_TCPE)) {
3938
3939 adapter->hw_csum_err++;
3940 return;
3941 }
3942
3943 if (!(status & E1000_RXD_STAT_TCPCS))
3944 return;
3945
3946
3947 if (likely(status & E1000_RXD_STAT_TCPCS)) {
3948
3949 skb->ip_summed = CHECKSUM_UNNECESSARY;
3950 }
3951 adapter->hw_csum_good++;
3952}
3953
3954
3955
3956
3957static void e1000_consume_page(struct e1000_buffer *bi, struct sk_buff *skb,
3958 u16 length)
3959{
3960 bi->page = NULL;
3961 skb->len += length;
3962 skb->data_len += length;
3963 skb->truesize += PAGE_SIZE;
3964}
3965
3966
3967
3968
3969
3970
3971
3972
3973static void e1000_receive_skb(struct e1000_adapter *adapter, u8 status,
3974 __le16 vlan, struct sk_buff *skb)
3975{
3976 skb->protocol = eth_type_trans(skb, adapter->netdev);
3977
3978 if (status & E1000_RXD_STAT_VP) {
3979 u16 vid = le16_to_cpu(vlan) & E1000_RXD_SPC_VLAN_MASK;
3980
3981 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
3982 }
3983 napi_gro_receive(&adapter->napi, skb);
3984}
3985
3986
3987
3988
3989
3990
3991
3992
3993
3994
3995
3996static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
3997 struct e1000_rx_ring *rx_ring,
3998 int *work_done, int work_to_do)
3999{
4000 struct e1000_hw *hw = &adapter->hw;
4001 struct net_device *netdev = adapter->netdev;
4002 struct pci_dev *pdev = adapter->pdev;
4003 struct e1000_rx_desc *rx_desc, *next_rxd;
4004 struct e1000_buffer *buffer_info, *next_buffer;
4005 unsigned long irq_flags;
4006 u32 length;
4007 unsigned int i;
4008 int cleaned_count = 0;
4009 bool cleaned = false;
4010 unsigned int total_rx_bytes=0, total_rx_packets=0;
4011
4012 i = rx_ring->next_to_clean;
4013 rx_desc = E1000_RX_DESC(*rx_ring, i);
4014 buffer_info = &rx_ring->buffer_info[i];
4015
4016 while (rx_desc->status & E1000_RXD_STAT_DD) {
4017 struct sk_buff *skb;
4018 u8 status;
4019
4020 if (*work_done >= work_to_do)
4021 break;
4022 (*work_done)++;
4023 rmb();
4024
4025 status = rx_desc->status;
4026 skb = buffer_info->skb;
4027 buffer_info->skb = NULL;
4028
4029 if (++i == rx_ring->count) i = 0;
4030 next_rxd = E1000_RX_DESC(*rx_ring, i);
4031 prefetch(next_rxd);
4032
4033 next_buffer = &rx_ring->buffer_info[i];
4034
4035 cleaned = true;
4036 cleaned_count++;
4037 dma_unmap_page(&pdev->dev, buffer_info->dma,
4038 buffer_info->length, DMA_FROM_DEVICE);
4039 buffer_info->dma = 0;
4040
4041 length = le16_to_cpu(rx_desc->length);
4042
4043
4044 if (unlikely((status & E1000_RXD_STAT_EOP) &&
4045 (rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK))) {
4046 u8 *mapped;
4047 u8 last_byte;
4048
4049 mapped = page_address(buffer_info->page);
4050 last_byte = *(mapped + length - 1);
4051 if (TBI_ACCEPT(hw, status, rx_desc->errors, length,
4052 last_byte)) {
4053 spin_lock_irqsave(&adapter->stats_lock,
4054 irq_flags);
4055 e1000_tbi_adjust_stats(hw, &adapter->stats,
4056 length, mapped);
4057 spin_unlock_irqrestore(&adapter->stats_lock,
4058 irq_flags);
4059 length--;
4060 } else {
4061 if (netdev->features & NETIF_F_RXALL)
4062 goto process_skb;
4063
4064 buffer_info->skb = skb;
4065
4066
4067
4068 if (rx_ring->rx_skb_top)
4069 dev_kfree_skb(rx_ring->rx_skb_top);
4070 rx_ring->rx_skb_top = NULL;
4071 goto next_desc;
4072 }
4073 }
4074
4075#define rxtop rx_ring->rx_skb_top
4076process_skb:
4077 if (!(status & E1000_RXD_STAT_EOP)) {
4078
4079 if (!rxtop) {
4080
4081 rxtop = skb;
4082 skb_fill_page_desc(rxtop, 0, buffer_info->page,
4083 0, length);
4084 } else {
4085
4086 skb_fill_page_desc(rxtop,
4087 skb_shinfo(rxtop)->nr_frags,
4088 buffer_info->page, 0, length);
4089
4090 buffer_info->skb = skb;
4091 }
4092 e1000_consume_page(buffer_info, rxtop, length);
4093 goto next_desc;
4094 } else {
4095 if (rxtop) {
4096
4097 skb_fill_page_desc(rxtop,
4098 skb_shinfo(rxtop)->nr_frags,
4099 buffer_info->page, 0, length);
4100
4101
4102
4103 buffer_info->skb = skb;
4104 skb = rxtop;
4105 rxtop = NULL;
4106 e1000_consume_page(buffer_info, skb, length);
4107 } else {
4108
4109
4110
4111 if (length <= copybreak &&
4112 skb_tailroom(skb) >= length) {
4113 u8 *vaddr;
4114 vaddr = kmap_atomic(buffer_info->page);
4115 memcpy(skb_tail_pointer(skb), vaddr,
4116 length);
4117 kunmap_atomic(vaddr);
4118
4119
4120
4121 skb_put(skb, length);
4122 } else {
4123 skb_fill_page_desc(skb, 0,
4124 buffer_info->page, 0,
4125 length);
4126 e1000_consume_page(buffer_info, skb,
4127 length);
4128 }
4129 }
4130 }
4131
4132
4133 e1000_rx_checksum(adapter,
4134 (u32)(status) |
4135 ((u32)(rx_desc->errors) << 24),
4136 le16_to_cpu(rx_desc->csum), skb);
4137
4138 total_rx_bytes += (skb->len - 4);
4139 if (likely(!(netdev->features & NETIF_F_RXFCS)))
4140 pskb_trim(skb, skb->len - 4);
4141 total_rx_packets++;
4142
4143
4144 if (!pskb_may_pull(skb, ETH_HLEN)) {
4145 e_err(drv, "pskb_may_pull failed.\n");
4146 dev_kfree_skb(skb);
4147 goto next_desc;
4148 }
4149
4150 e1000_receive_skb(adapter, status, rx_desc->special, skb);
4151
4152next_desc:
4153 rx_desc->status = 0;
4154
4155
4156 if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) {
4157 adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
4158 cleaned_count = 0;
4159 }
4160
4161
4162 rx_desc = next_rxd;
4163 buffer_info = next_buffer;
4164 }
4165 rx_ring->next_to_clean = i;
4166
4167 cleaned_count = E1000_DESC_UNUSED(rx_ring);
4168 if (cleaned_count)
4169 adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
4170
4171 adapter->total_rx_packets += total_rx_packets;
4172 adapter->total_rx_bytes += total_rx_bytes;
4173 netdev->stats.rx_bytes += total_rx_bytes;
4174 netdev->stats.rx_packets += total_rx_packets;
4175 return cleaned;
4176}
4177
4178
4179
4180
4181static void e1000_check_copybreak(struct net_device *netdev,
4182 struct e1000_buffer *buffer_info,
4183 u32 length, struct sk_buff **skb)
4184{
4185 struct sk_buff *new_skb;
4186
4187 if (length > copybreak)
4188 return;
4189
4190 new_skb = netdev_alloc_skb_ip_align(netdev, length);
4191 if (!new_skb)
4192 return;
4193
4194 skb_copy_to_linear_data_offset(new_skb, -NET_IP_ALIGN,
4195 (*skb)->data - NET_IP_ALIGN,
4196 length + NET_IP_ALIGN);
4197
4198 buffer_info->skb = *skb;
4199 *skb = new_skb;
4200}
4201
4202
4203
4204
4205
4206
4207
4208
4209static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
4210 struct e1000_rx_ring *rx_ring,
4211 int *work_done, int work_to_do)
4212{
4213 struct e1000_hw *hw = &adapter->hw;
4214 struct net_device *netdev = adapter->netdev;
4215 struct pci_dev *pdev = adapter->pdev;
4216 struct e1000_rx_desc *rx_desc, *next_rxd;
4217 struct e1000_buffer *buffer_info, *next_buffer;
4218 unsigned long flags;
4219 u32 length;
4220 unsigned int i;
4221 int cleaned_count = 0;
4222 bool cleaned = false;
4223 unsigned int total_rx_bytes=0, total_rx_packets=0;
4224
4225 i = rx_ring->next_to_clean;
4226 rx_desc = E1000_RX_DESC(*rx_ring, i);
4227 buffer_info = &rx_ring->buffer_info[i];
4228
4229 while (rx_desc->status & E1000_RXD_STAT_DD) {
4230 struct sk_buff *skb;
4231 u8 status;
4232
4233 if (*work_done >= work_to_do)
4234 break;
4235 (*work_done)++;
4236 rmb();
4237
4238 status = rx_desc->status;
4239 skb = buffer_info->skb;
4240 buffer_info->skb = NULL;
4241
4242 prefetch(skb->data - NET_IP_ALIGN);
4243
4244 if (++i == rx_ring->count) i = 0;
4245 next_rxd = E1000_RX_DESC(*rx_ring, i);
4246 prefetch(next_rxd);
4247
4248 next_buffer = &rx_ring->buffer_info[i];
4249
4250 cleaned = true;
4251 cleaned_count++;
4252 dma_unmap_single(&pdev->dev, buffer_info->dma,
4253 buffer_info->length, DMA_FROM_DEVICE);
4254 buffer_info->dma = 0;
4255
4256 length = le16_to_cpu(rx_desc->length);
4257
4258
4259
4260
4261
4262
4263 if (unlikely(!(status & E1000_RXD_STAT_EOP)))
4264 adapter->discarding = true;
4265
4266 if (adapter->discarding) {
4267
4268 e_dbg("Receive packet consumed multiple buffers\n");
4269
4270 buffer_info->skb = skb;
4271 if (status & E1000_RXD_STAT_EOP)
4272 adapter->discarding = false;
4273 goto next_desc;
4274 }
4275
4276 if (unlikely(rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK)) {
4277 u8 last_byte = *(skb->data + length - 1);
4278 if (TBI_ACCEPT(hw, status, rx_desc->errors, length,
4279 last_byte)) {
4280 spin_lock_irqsave(&adapter->stats_lock, flags);
4281 e1000_tbi_adjust_stats(hw, &adapter->stats,
4282 length, skb->data);
4283 spin_unlock_irqrestore(&adapter->stats_lock,
4284 flags);
4285 length--;
4286 } else {
4287 if (netdev->features & NETIF_F_RXALL)
4288 goto process_skb;
4289
4290 buffer_info->skb = skb;
4291 goto next_desc;
4292 }
4293 }
4294
4295process_skb:
4296 total_rx_bytes += (length - 4);
4297 total_rx_packets++;
4298
4299 if (likely(!(netdev->features & NETIF_F_RXFCS)))
4300
4301
4302
4303 length -= 4;
4304
4305 e1000_check_copybreak(netdev, buffer_info, length, &skb);
4306
4307 skb_put(skb, length);
4308
4309
4310 e1000_rx_checksum(adapter,
4311 (u32)(status) |
4312 ((u32)(rx_desc->errors) << 24),
4313 le16_to_cpu(rx_desc->csum), skb);
4314
4315 e1000_receive_skb(adapter, status, rx_desc->special, skb);
4316
4317next_desc:
4318 rx_desc->status = 0;
4319
4320
4321 if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) {
4322 adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
4323 cleaned_count = 0;
4324 }
4325
4326
4327 rx_desc = next_rxd;
4328 buffer_info = next_buffer;
4329 }
4330 rx_ring->next_to_clean = i;
4331
4332 cleaned_count = E1000_DESC_UNUSED(rx_ring);
4333 if (cleaned_count)
4334 adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
4335
4336 adapter->total_rx_packets += total_rx_packets;
4337 adapter->total_rx_bytes += total_rx_bytes;
4338 netdev->stats.rx_bytes += total_rx_bytes;
4339 netdev->stats.rx_packets += total_rx_packets;
4340 return cleaned;
4341}
4342
4343
4344
4345
4346
4347
4348
4349static void
4350e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter,
4351 struct e1000_rx_ring *rx_ring, int cleaned_count)
4352{
4353 struct net_device *netdev = adapter->netdev;
4354 struct pci_dev *pdev = adapter->pdev;
4355 struct e1000_rx_desc *rx_desc;
4356 struct e1000_buffer *buffer_info;
4357 struct sk_buff *skb;
4358 unsigned int i;
4359 unsigned int bufsz = 256 - 16 ;
4360
4361 i = rx_ring->next_to_use;
4362 buffer_info = &rx_ring->buffer_info[i];
4363
4364 while (cleaned_count--) {
4365 skb = buffer_info->skb;
4366 if (skb) {
4367 skb_trim(skb, 0);
4368 goto check_page;
4369 }
4370
4371 skb = netdev_alloc_skb_ip_align(netdev, bufsz);
4372 if (unlikely(!skb)) {
4373
4374 adapter->alloc_rx_buff_failed++;
4375 break;
4376 }
4377
4378 buffer_info->skb = skb;
4379 buffer_info->length = adapter->rx_buffer_len;
4380check_page:
4381
4382 if (!buffer_info->page) {
4383 buffer_info->page = alloc_page(GFP_ATOMIC);
4384 if (unlikely(!buffer_info->page)) {
4385 adapter->alloc_rx_buff_failed++;
4386 break;
4387 }
4388 }
4389
4390 if (!buffer_info->dma) {
4391 buffer_info->dma = dma_map_page(&pdev->dev,
4392 buffer_info->page, 0,
4393 buffer_info->length,
4394 DMA_FROM_DEVICE);
4395 if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
4396 put_page(buffer_info->page);
4397 dev_kfree_skb(skb);
4398 buffer_info->page = NULL;
4399 buffer_info->skb = NULL;
4400 buffer_info->dma = 0;
4401 adapter->alloc_rx_buff_failed++;
4402 break;
4403 }
4404 }
4405
4406 rx_desc = E1000_RX_DESC(*rx_ring, i);
4407 rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
4408
4409 if (unlikely(++i == rx_ring->count))
4410 i = 0;
4411 buffer_info = &rx_ring->buffer_info[i];
4412 }
4413
4414 if (likely(rx_ring->next_to_use != i)) {
4415 rx_ring->next_to_use = i;
4416 if (unlikely(i-- == 0))
4417 i = (rx_ring->count - 1);
4418
4419
4420
4421
4422
4423
4424 wmb();
4425 writel(i, adapter->hw.hw_addr + rx_ring->rdt);
4426 }
4427}
4428
4429
4430
4431
4432
4433static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
4434 struct e1000_rx_ring *rx_ring,
4435 int cleaned_count)
4436{
4437 struct e1000_hw *hw = &adapter->hw;
4438 struct net_device *netdev = adapter->netdev;
4439 struct pci_dev *pdev = adapter->pdev;
4440 struct e1000_rx_desc *rx_desc;
4441 struct e1000_buffer *buffer_info;
4442 struct sk_buff *skb;
4443 unsigned int i;
4444 unsigned int bufsz = adapter->rx_buffer_len;
4445
4446 i = rx_ring->next_to_use;
4447 buffer_info = &rx_ring->buffer_info[i];
4448
4449 while (cleaned_count--) {
4450 skb = buffer_info->skb;
4451 if (skb) {
4452 skb_trim(skb, 0);
4453 goto map_skb;
4454 }
4455
4456 skb = netdev_alloc_skb_ip_align(netdev, bufsz);
4457 if (unlikely(!skb)) {
4458
4459 adapter->alloc_rx_buff_failed++;
4460 break;
4461 }
4462
4463
4464 if (!e1000_check_64k_bound(adapter, skb->data, bufsz)) {
4465 struct sk_buff *oldskb = skb;
4466 e_err(rx_err, "skb align check failed: %u bytes at "
4467 "%p\n", bufsz, skb->data);
4468
4469 skb = netdev_alloc_skb_ip_align(netdev, bufsz);
4470
4471 if (!skb) {
4472 dev_kfree_skb(oldskb);
4473 adapter->alloc_rx_buff_failed++;
4474 break;
4475 }
4476
4477 if (!e1000_check_64k_bound(adapter, skb->data, bufsz)) {
4478
4479 dev_kfree_skb(skb);
4480 dev_kfree_skb(oldskb);
4481 adapter->alloc_rx_buff_failed++;
4482 break;
4483 }
4484
4485
4486 dev_kfree_skb(oldskb);
4487 }
4488 buffer_info->skb = skb;
4489 buffer_info->length = adapter->rx_buffer_len;
4490map_skb:
4491 buffer_info->dma = dma_map_single(&pdev->dev,
4492 skb->data,
4493 buffer_info->length,
4494 DMA_FROM_DEVICE);
4495 if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
4496 dev_kfree_skb(skb);
4497 buffer_info->skb = NULL;
4498 buffer_info->dma = 0;
4499 adapter->alloc_rx_buff_failed++;
4500 break;
4501 }
4502
4503
4504
4505
4506
4507
4508 if (!e1000_check_64k_bound(adapter,
4509 (void *)(unsigned long)buffer_info->dma,
4510 adapter->rx_buffer_len)) {
4511 e_err(rx_err, "dma align check failed: %u bytes at "
4512 "%p\n", adapter->rx_buffer_len,
4513 (void *)(unsigned long)buffer_info->dma);
4514 dev_kfree_skb(skb);
4515 buffer_info->skb = NULL;
4516
4517 dma_unmap_single(&pdev->dev, buffer_info->dma,
4518 adapter->rx_buffer_len,
4519 DMA_FROM_DEVICE);
4520 buffer_info->dma = 0;
4521
4522 adapter->alloc_rx_buff_failed++;
4523 break;
4524 }
4525 rx_desc = E1000_RX_DESC(*rx_ring, i);
4526 rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
4527
4528 if (unlikely(++i == rx_ring->count))
4529 i = 0;
4530 buffer_info = &rx_ring->buffer_info[i];
4531 }
4532
4533 if (likely(rx_ring->next_to_use != i)) {
4534 rx_ring->next_to_use = i;
4535 if (unlikely(i-- == 0))
4536 i = (rx_ring->count - 1);
4537
4538
4539
4540
4541
4542
4543 wmb();
4544 writel(i, hw->hw_addr + rx_ring->rdt);
4545 }
4546}
4547
4548
4549
4550
4551
4552static void e1000_smartspeed(struct e1000_adapter *adapter)
4553{
4554 struct e1000_hw *hw = &adapter->hw;
4555 u16 phy_status;
4556 u16 phy_ctrl;
4557
4558 if ((hw->phy_type != e1000_phy_igp) || !hw->autoneg ||
4559 !(hw->autoneg_advertised & ADVERTISE_1000_FULL))
4560 return;
4561
4562 if (adapter->smartspeed == 0) {
4563
4564
4565
4566 e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_status);
4567 if (!(phy_status & SR_1000T_MS_CONFIG_FAULT)) return;
4568 e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_status);
4569 if (!(phy_status & SR_1000T_MS_CONFIG_FAULT)) return;
4570 e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_ctrl);
4571 if (phy_ctrl & CR_1000T_MS_ENABLE) {
4572 phy_ctrl &= ~CR_1000T_MS_ENABLE;
4573 e1000_write_phy_reg(hw, PHY_1000T_CTRL,
4574 phy_ctrl);
4575 adapter->smartspeed++;
4576 if (!e1000_phy_setup_autoneg(hw) &&
4577 !e1000_read_phy_reg(hw, PHY_CTRL,
4578 &phy_ctrl)) {
4579 phy_ctrl |= (MII_CR_AUTO_NEG_EN |
4580 MII_CR_RESTART_AUTO_NEG);
4581 e1000_write_phy_reg(hw, PHY_CTRL,
4582 phy_ctrl);
4583 }
4584 }
4585 return;
4586 } else if (adapter->smartspeed == E1000_SMARTSPEED_DOWNSHIFT) {
4587
4588 e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_ctrl);
4589 phy_ctrl |= CR_1000T_MS_ENABLE;
4590 e1000_write_phy_reg(hw, PHY_1000T_CTRL, phy_ctrl);
4591 if (!e1000_phy_setup_autoneg(hw) &&
4592 !e1000_read_phy_reg(hw, PHY_CTRL, &phy_ctrl)) {
4593 phy_ctrl |= (MII_CR_AUTO_NEG_EN |
4594 MII_CR_RESTART_AUTO_NEG);
4595 e1000_write_phy_reg(hw, PHY_CTRL, phy_ctrl);
4596 }
4597 }
4598
4599 if (adapter->smartspeed++ == E1000_SMARTSPEED_MAX)
4600 adapter->smartspeed = 0;
4601}
4602
4603
4604
4605
4606
4607
4608
4609static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
4610{
4611 switch (cmd) {
4612 case SIOCGMIIPHY:
4613 case SIOCGMIIREG:
4614 case SIOCSMIIREG:
4615 return e1000_mii_ioctl(netdev, ifr, cmd);
4616 default:
4617 return -EOPNOTSUPP;
4618 }
4619}
4620
4621
4622
4623
4624
4625
4626
4627static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
4628 int cmd)
4629{
4630 struct e1000_adapter *adapter = netdev_priv(netdev);
4631 struct e1000_hw *hw = &adapter->hw;
4632 struct mii_ioctl_data *data = if_mii(ifr);
4633 int retval;
4634 u16 mii_reg;
4635 unsigned long flags;
4636
4637 if (hw->media_type != e1000_media_type_copper)
4638 return -EOPNOTSUPP;
4639
4640 switch (cmd) {
4641 case SIOCGMIIPHY:
4642 data->phy_id = hw->phy_addr;
4643 break;
4644 case SIOCGMIIREG:
4645 spin_lock_irqsave(&adapter->stats_lock, flags);
4646 if (e1000_read_phy_reg(hw, data->reg_num & 0x1F,
4647 &data->val_out)) {
4648 spin_unlock_irqrestore(&adapter->stats_lock, flags);
4649 return -EIO;
4650 }
4651 spin_unlock_irqrestore(&adapter->stats_lock, flags);
4652 break;
4653 case SIOCSMIIREG:
4654 if (data->reg_num & ~(0x1F))
4655 return -EFAULT;
4656 mii_reg = data->val_in;
4657 spin_lock_irqsave(&adapter->stats_lock, flags);
4658 if (e1000_write_phy_reg(hw, data->reg_num,
4659 mii_reg)) {
4660 spin_unlock_irqrestore(&adapter->stats_lock, flags);
4661 return -EIO;
4662 }
4663 spin_unlock_irqrestore(&adapter->stats_lock, flags);
4664 if (hw->media_type == e1000_media_type_copper) {
4665 switch (data->reg_num) {
4666 case PHY_CTRL:
4667 if (mii_reg & MII_CR_POWER_DOWN)
4668 break;
4669 if (mii_reg & MII_CR_AUTO_NEG_EN) {
4670 hw->autoneg = 1;
4671 hw->autoneg_advertised = 0x2F;
4672 } else {
4673 u32 speed;
4674 if (mii_reg & 0x40)
4675 speed = SPEED_1000;
4676 else if (mii_reg & 0x2000)
4677 speed = SPEED_100;
4678 else
4679 speed = SPEED_10;
4680 retval = e1000_set_spd_dplx(
4681 adapter, speed,
4682 ((mii_reg & 0x100)
4683 ? DUPLEX_FULL :
4684 DUPLEX_HALF));
4685 if (retval)
4686 return retval;
4687 }
4688 if (netif_running(adapter->netdev))
4689 e1000_reinit_locked(adapter);
4690 else
4691 e1000_reset(adapter);
4692 break;
4693 case M88E1000_PHY_SPEC_CTRL:
4694 case M88E1000_EXT_PHY_SPEC_CTRL:
4695 if (e1000_phy_reset(hw))
4696 return -EIO;
4697 break;
4698 }
4699 } else {
4700 switch (data->reg_num) {
4701 case PHY_CTRL:
4702 if (mii_reg & MII_CR_POWER_DOWN)
4703 break;
4704 if (netif_running(adapter->netdev))
4705 e1000_reinit_locked(adapter);
4706 else
4707 e1000_reset(adapter);
4708 break;
4709 }
4710 }
4711 break;
4712 default:
4713 return -EOPNOTSUPP;
4714 }
4715 return E1000_SUCCESS;
4716}
4717
4718void e1000_pci_set_mwi(struct e1000_hw *hw)
4719{
4720 struct e1000_adapter *adapter = hw->back;
4721 int ret_val = pci_set_mwi(adapter->pdev);
4722
4723 if (ret_val)
4724 e_err(probe, "Error in setting MWI\n");
4725}
4726
4727void e1000_pci_clear_mwi(struct e1000_hw *hw)
4728{
4729 struct e1000_adapter *adapter = hw->back;
4730
4731 pci_clear_mwi(adapter->pdev);
4732}
4733
4734int e1000_pcix_get_mmrbc(struct e1000_hw *hw)
4735{
4736 struct e1000_adapter *adapter = hw->back;
4737 return pcix_get_mmrbc(adapter->pdev);
4738}
4739
4740void e1000_pcix_set_mmrbc(struct e1000_hw *hw, int mmrbc)
4741{
4742 struct e1000_adapter *adapter = hw->back;
4743 pcix_set_mmrbc(adapter->pdev, mmrbc);
4744}
4745
4746void e1000_io_write(struct e1000_hw *hw, unsigned long port, u32 value)
4747{
4748 outl(value, port);
4749}
4750
4751static bool e1000_vlan_used(struct e1000_adapter *adapter)
4752{
4753 u16 vid;
4754
4755 for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
4756 return true;
4757 return false;
4758}
4759
4760static void __e1000_vlan_mode(struct e1000_adapter *adapter,
4761 netdev_features_t features)
4762{
4763 struct e1000_hw *hw = &adapter->hw;
4764 u32 ctrl;
4765
4766 ctrl = er32(CTRL);
4767 if (features & NETIF_F_HW_VLAN_CTAG_RX) {
4768
4769 ctrl |= E1000_CTRL_VME;
4770 } else {
4771
4772 ctrl &= ~E1000_CTRL_VME;
4773 }
4774 ew32(CTRL, ctrl);
4775}
4776static void e1000_vlan_filter_on_off(struct e1000_adapter *adapter,
4777 bool filter_on)
4778{
4779 struct e1000_hw *hw = &adapter->hw;
4780 u32 rctl;
4781
4782 if (!test_bit(__E1000_DOWN, &adapter->flags))
4783 e1000_irq_disable(adapter);
4784
4785 __e1000_vlan_mode(adapter, adapter->netdev->features);
4786 if (filter_on) {
4787
4788 rctl = er32(RCTL);
4789 rctl &= ~E1000_RCTL_CFIEN;
4790 if (!(adapter->netdev->flags & IFF_PROMISC))
4791 rctl |= E1000_RCTL_VFE;
4792 ew32(RCTL, rctl);
4793 e1000_update_mng_vlan(adapter);
4794 } else {
4795
4796 rctl = er32(RCTL);
4797 rctl &= ~E1000_RCTL_VFE;
4798 ew32(RCTL, rctl);
4799 }
4800
4801 if (!test_bit(__E1000_DOWN, &adapter->flags))
4802 e1000_irq_enable(adapter);
4803}
4804
4805static void e1000_vlan_mode(struct net_device *netdev,
4806 netdev_features_t features)
4807{
4808 struct e1000_adapter *adapter = netdev_priv(netdev);
4809
4810 if (!test_bit(__E1000_DOWN, &adapter->flags))
4811 e1000_irq_disable(adapter);
4812
4813 __e1000_vlan_mode(adapter, features);
4814
4815 if (!test_bit(__E1000_DOWN, &adapter->flags))
4816 e1000_irq_enable(adapter);
4817}
4818
4819static int e1000_vlan_rx_add_vid(struct net_device *netdev,
4820 __be16 proto, u16 vid)
4821{
4822 struct e1000_adapter *adapter = netdev_priv(netdev);
4823 struct e1000_hw *hw = &adapter->hw;
4824 u32 vfta, index;
4825
4826 if ((hw->mng_cookie.status &
4827 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) &&
4828 (vid == adapter->mng_vlan_id))
4829 return 0;
4830
4831 if (!e1000_vlan_used(adapter))
4832 e1000_vlan_filter_on_off(adapter, true);
4833
4834
4835 index = (vid >> 5) & 0x7F;
4836 vfta = E1000_READ_REG_ARRAY(hw, VFTA, index);
4837 vfta |= (1 << (vid & 0x1F));
4838 e1000_write_vfta(hw, index, vfta);
4839
4840 set_bit(vid, adapter->active_vlans);
4841
4842 return 0;
4843}
4844
4845static int e1000_vlan_rx_kill_vid(struct net_device *netdev,
4846 __be16 proto, u16 vid)
4847{
4848 struct e1000_adapter *adapter = netdev_priv(netdev);
4849 struct e1000_hw *hw = &adapter->hw;
4850 u32 vfta, index;
4851
4852 if (!test_bit(__E1000_DOWN, &adapter->flags))
4853 e1000_irq_disable(adapter);
4854 if (!test_bit(__E1000_DOWN, &adapter->flags))
4855 e1000_irq_enable(adapter);
4856
4857
4858 index = (vid >> 5) & 0x7F;
4859 vfta = E1000_READ_REG_ARRAY(hw, VFTA, index);
4860 vfta &= ~(1 << (vid & 0x1F));
4861 e1000_write_vfta(hw, index, vfta);
4862
4863 clear_bit(vid, adapter->active_vlans);
4864
4865 if (!e1000_vlan_used(adapter))
4866 e1000_vlan_filter_on_off(adapter, false);
4867
4868 return 0;
4869}
4870
4871static void e1000_restore_vlan(struct e1000_adapter *adapter)
4872{
4873 u16 vid;
4874
4875 if (!e1000_vlan_used(adapter))
4876 return;
4877
4878 e1000_vlan_filter_on_off(adapter, true);
4879 for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
4880 e1000_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), vid);
4881}
4882
4883int e1000_set_spd_dplx(struct e1000_adapter *adapter, u32 spd, u8 dplx)
4884{
4885 struct e1000_hw *hw = &adapter->hw;
4886
4887 hw->autoneg = 0;
4888
4889
4890
4891
4892 if ((spd & 1) || (dplx & ~1))
4893 goto err_inval;
4894
4895
4896 if ((hw->media_type == e1000_media_type_fiber) &&
4897 spd != SPEED_1000 &&
4898 dplx != DUPLEX_FULL)
4899 goto err_inval;
4900
4901 switch (spd + dplx) {
4902 case SPEED_10 + DUPLEX_HALF:
4903 hw->forced_speed_duplex = e1000_10_half;
4904 break;
4905 case SPEED_10 + DUPLEX_FULL:
4906 hw->forced_speed_duplex = e1000_10_full;
4907 break;
4908 case SPEED_100 + DUPLEX_HALF:
4909 hw->forced_speed_duplex = e1000_100_half;
4910 break;
4911 case SPEED_100 + DUPLEX_FULL:
4912 hw->forced_speed_duplex = e1000_100_full;
4913 break;
4914 case SPEED_1000 + DUPLEX_FULL:
4915 hw->autoneg = 1;
4916 hw->autoneg_advertised = ADVERTISE_1000_FULL;
4917 break;
4918 case SPEED_1000 + DUPLEX_HALF:
4919 default:
4920 goto err_inval;
4921 }
4922
4923
4924 hw->mdix = AUTO_ALL_MODES;
4925
4926 return 0;
4927
4928err_inval:
4929 e_err(probe, "Unsupported Speed/Duplex configuration\n");
4930 return -EINVAL;
4931}
4932
4933static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake)
4934{
4935 struct net_device *netdev = pci_get_drvdata(pdev);
4936 struct e1000_adapter *adapter = netdev_priv(netdev);
4937 struct e1000_hw *hw = &adapter->hw;
4938 u32 ctrl, ctrl_ext, rctl, status;
4939 u32 wufc = adapter->wol;
4940#ifdef CONFIG_PM
4941 int retval = 0;
4942#endif
4943
4944 netif_device_detach(netdev);
4945
4946 if (netif_running(netdev)) {
4947 int count = E1000_CHECK_RESET_COUNT;
4948
4949 while (test_bit(__E1000_RESETTING, &adapter->flags) && count--)
4950 usleep_range(10000, 20000);
4951
4952 WARN_ON(test_bit(__E1000_RESETTING, &adapter->flags));
4953 e1000_down(adapter);
4954 }
4955
4956#ifdef CONFIG_PM
4957 retval = pci_save_state(pdev);
4958 if (retval)
4959 return retval;
4960#endif
4961
4962 status = er32(STATUS);
4963 if (status & E1000_STATUS_LU)
4964 wufc &= ~E1000_WUFC_LNKC;
4965
4966 if (wufc) {
4967 e1000_setup_rctl(adapter);
4968 e1000_set_rx_mode(netdev);
4969
4970 rctl = er32(RCTL);
4971
4972
4973 if (wufc & E1000_WUFC_MC)
4974 rctl |= E1000_RCTL_MPE;
4975
4976
4977 ew32(RCTL, rctl | E1000_RCTL_EN);
4978
4979 if (hw->mac_type >= e1000_82540) {
4980 ctrl = er32(CTRL);
4981
4982 #define E1000_CTRL_ADVD3WUC 0x00100000
4983
4984 #define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000
4985 ctrl |= E1000_CTRL_ADVD3WUC |
4986 E1000_CTRL_EN_PHY_PWR_MGMT;
4987 ew32(CTRL, ctrl);
4988 }
4989
4990 if (hw->media_type == e1000_media_type_fiber ||
4991 hw->media_type == e1000_media_type_internal_serdes) {
4992
4993 ctrl_ext = er32(CTRL_EXT);
4994 ctrl_ext |= E1000_CTRL_EXT_SDP7_DATA;
4995 ew32(CTRL_EXT, ctrl_ext);
4996 }
4997
4998 ew32(WUC, E1000_WUC_PME_EN);
4999 ew32(WUFC, wufc);
5000 } else {
5001 ew32(WUC, 0);
5002 ew32(WUFC, 0);
5003 }
5004
5005 e1000_release_manageability(adapter);
5006
5007 *enable_wake = !!wufc;
5008
5009
5010 if (adapter->en_mng_pt)
5011 *enable_wake = true;
5012
5013 if (netif_running(netdev))
5014 e1000_free_irq(adapter);
5015
5016 pci_disable_device(pdev);
5017
5018 return 0;
5019}
5020
5021#ifdef CONFIG_PM
5022static int e1000_suspend(struct pci_dev *pdev, pm_message_t state)
5023{
5024 int retval;
5025 bool wake;
5026
5027 retval = __e1000_shutdown(pdev, &wake);
5028 if (retval)
5029 return retval;
5030
5031 if (wake) {
5032 pci_prepare_to_sleep(pdev);
5033 } else {
5034 pci_wake_from_d3(pdev, false);
5035 pci_set_power_state(pdev, PCI_D3hot);
5036 }
5037
5038 return 0;
5039}
5040
5041static int e1000_resume(struct pci_dev *pdev)
5042{
5043 struct net_device *netdev = pci_get_drvdata(pdev);
5044 struct e1000_adapter *adapter = netdev_priv(netdev);
5045 struct e1000_hw *hw = &adapter->hw;
5046 u32 err;
5047
5048 pci_set_power_state(pdev, PCI_D0);
5049 pci_restore_state(pdev);
5050 pci_save_state(pdev);
5051
5052 if (adapter->need_ioport)
5053 err = pci_enable_device(pdev);
5054 else
5055 err = pci_enable_device_mem(pdev);
5056 if (err) {
5057 pr_err("Cannot enable PCI device from suspend\n");
5058 return err;
5059 }
5060 pci_set_master(pdev);
5061
5062 pci_enable_wake(pdev, PCI_D3hot, 0);
5063 pci_enable_wake(pdev, PCI_D3cold, 0);
5064
5065 if (netif_running(netdev)) {
5066 err = e1000_request_irq(adapter);
5067 if (err)
5068 return err;
5069 }
5070
5071 e1000_power_up_phy(adapter);
5072 e1000_reset(adapter);
5073 ew32(WUS, ~0);
5074
5075 e1000_init_manageability(adapter);
5076
5077 if (netif_running(netdev))
5078 e1000_up(adapter);
5079
5080 netif_device_attach(netdev);
5081
5082 return 0;
5083}
5084#endif
5085
5086static void e1000_shutdown(struct pci_dev *pdev)
5087{
5088 bool wake;
5089
5090 __e1000_shutdown(pdev, &wake);
5091
5092 if (system_state == SYSTEM_POWER_OFF) {
5093 pci_wake_from_d3(pdev, wake);
5094 pci_set_power_state(pdev, PCI_D3hot);
5095 }
5096}
5097
5098#ifdef CONFIG_NET_POLL_CONTROLLER
5099
5100
5101
5102
5103static void e1000_netpoll(struct net_device *netdev)
5104{
5105 struct e1000_adapter *adapter = netdev_priv(netdev);
5106
5107 disable_irq(adapter->pdev->irq);
5108 e1000_intr(adapter->pdev->irq, netdev);
5109 enable_irq(adapter->pdev->irq);
5110}
5111#endif
5112
5113
5114
5115
5116
5117
5118
5119
5120
5121static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev,
5122 pci_channel_state_t state)
5123{
5124 struct net_device *netdev = pci_get_drvdata(pdev);
5125 struct e1000_adapter *adapter = netdev_priv(netdev);
5126
5127 netif_device_detach(netdev);
5128
5129 if (state == pci_channel_io_perm_failure)
5130 return PCI_ERS_RESULT_DISCONNECT;
5131
5132 if (netif_running(netdev))
5133 e1000_down(adapter);
5134 pci_disable_device(pdev);
5135
5136
5137 return PCI_ERS_RESULT_NEED_RESET;
5138}
5139
5140
5141
5142
5143
5144
5145
5146
5147static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev)
5148{
5149 struct net_device *netdev = pci_get_drvdata(pdev);
5150 struct e1000_adapter *adapter = netdev_priv(netdev);
5151 struct e1000_hw *hw = &adapter->hw;
5152 int err;
5153
5154 if (adapter->need_ioport)
5155 err = pci_enable_device(pdev);
5156 else
5157 err = pci_enable_device_mem(pdev);
5158 if (err) {
5159 pr_err("Cannot re-enable PCI device after reset.\n");
5160 return PCI_ERS_RESULT_DISCONNECT;
5161 }
5162 pci_set_master(pdev);
5163
5164 pci_enable_wake(pdev, PCI_D3hot, 0);
5165 pci_enable_wake(pdev, PCI_D3cold, 0);
5166
5167 e1000_reset(adapter);
5168 ew32(WUS, ~0);
5169
5170 return PCI_ERS_RESULT_RECOVERED;
5171}
5172
5173
5174
5175
5176
5177
5178
5179
5180
5181static void e1000_io_resume(struct pci_dev *pdev)
5182{
5183 struct net_device *netdev = pci_get_drvdata(pdev);
5184 struct e1000_adapter *adapter = netdev_priv(netdev);
5185
5186 e1000_init_manageability(adapter);
5187
5188 if (netif_running(netdev)) {
5189 if (e1000_up(adapter)) {
5190 pr_info("can't bring device back up after reset\n");
5191 return;
5192 }
5193 }
5194
5195 netif_device_attach(netdev);
5196}
5197
5198
5199