1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29#include "e1000.h"
30#include <net/ip6_checksum.h>
31#include <linux/io.h>
32#include <linux/prefetch.h>
33#include <linux/bitops.h>
34#include <linux/if_vlan.h>
35
36char e1000_driver_name[] = "e1000";
37static char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver";
38#define DRV_VERSION "7.3.21-k8-NAPI"
39const char e1000_driver_version[] = DRV_VERSION;
40static const char e1000_copyright[] = "Copyright (c) 1999-2006 Intel Corporation.";
41
42
43
44
45
46
47
48
49static const struct pci_device_id e1000_pci_tbl[] = {
50 INTEL_E1000_ETHERNET_DEVICE(0x1000),
51 INTEL_E1000_ETHERNET_DEVICE(0x1001),
52 INTEL_E1000_ETHERNET_DEVICE(0x1004),
53 INTEL_E1000_ETHERNET_DEVICE(0x1008),
54 INTEL_E1000_ETHERNET_DEVICE(0x1009),
55 INTEL_E1000_ETHERNET_DEVICE(0x100C),
56 INTEL_E1000_ETHERNET_DEVICE(0x100D),
57 INTEL_E1000_ETHERNET_DEVICE(0x100E),
58 INTEL_E1000_ETHERNET_DEVICE(0x100F),
59 INTEL_E1000_ETHERNET_DEVICE(0x1010),
60 INTEL_E1000_ETHERNET_DEVICE(0x1011),
61 INTEL_E1000_ETHERNET_DEVICE(0x1012),
62 INTEL_E1000_ETHERNET_DEVICE(0x1013),
63 INTEL_E1000_ETHERNET_DEVICE(0x1014),
64 INTEL_E1000_ETHERNET_DEVICE(0x1015),
65 INTEL_E1000_ETHERNET_DEVICE(0x1016),
66 INTEL_E1000_ETHERNET_DEVICE(0x1017),
67 INTEL_E1000_ETHERNET_DEVICE(0x1018),
68 INTEL_E1000_ETHERNET_DEVICE(0x1019),
69 INTEL_E1000_ETHERNET_DEVICE(0x101A),
70 INTEL_E1000_ETHERNET_DEVICE(0x101D),
71 INTEL_E1000_ETHERNET_DEVICE(0x101E),
72 INTEL_E1000_ETHERNET_DEVICE(0x1026),
73 INTEL_E1000_ETHERNET_DEVICE(0x1027),
74 INTEL_E1000_ETHERNET_DEVICE(0x1028),
75 INTEL_E1000_ETHERNET_DEVICE(0x1075),
76 INTEL_E1000_ETHERNET_DEVICE(0x1076),
77 INTEL_E1000_ETHERNET_DEVICE(0x1077),
78 INTEL_E1000_ETHERNET_DEVICE(0x1078),
79 INTEL_E1000_ETHERNET_DEVICE(0x1079),
80 INTEL_E1000_ETHERNET_DEVICE(0x107A),
81 INTEL_E1000_ETHERNET_DEVICE(0x107B),
82 INTEL_E1000_ETHERNET_DEVICE(0x107C),
83 INTEL_E1000_ETHERNET_DEVICE(0x108A),
84 INTEL_E1000_ETHERNET_DEVICE(0x1099),
85 INTEL_E1000_ETHERNET_DEVICE(0x10B5),
86 INTEL_E1000_ETHERNET_DEVICE(0x2E6E),
87
88 {0,}
89};
90
91MODULE_DEVICE_TABLE(pci, e1000_pci_tbl);
92
93int e1000_up(struct e1000_adapter *adapter);
94void e1000_down(struct e1000_adapter *adapter);
95void e1000_reinit_locked(struct e1000_adapter *adapter);
96void e1000_reset(struct e1000_adapter *adapter);
97int e1000_setup_all_tx_resources(struct e1000_adapter *adapter);
98int e1000_setup_all_rx_resources(struct e1000_adapter *adapter);
99void e1000_free_all_tx_resources(struct e1000_adapter *adapter);
100void e1000_free_all_rx_resources(struct e1000_adapter *adapter);
101static int e1000_setup_tx_resources(struct e1000_adapter *adapter,
102 struct e1000_tx_ring *txdr);
103static int e1000_setup_rx_resources(struct e1000_adapter *adapter,
104 struct e1000_rx_ring *rxdr);
105static void e1000_free_tx_resources(struct e1000_adapter *adapter,
106 struct e1000_tx_ring *tx_ring);
107static void e1000_free_rx_resources(struct e1000_adapter *adapter,
108 struct e1000_rx_ring *rx_ring);
109void e1000_update_stats(struct e1000_adapter *adapter);
110
111static int e1000_init_module(void);
112static void e1000_exit_module(void);
113static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
114static void e1000_remove(struct pci_dev *pdev);
115static int e1000_alloc_queues(struct e1000_adapter *adapter);
116static int e1000_sw_init(struct e1000_adapter *adapter);
117static int e1000_open(struct net_device *netdev);
118static int e1000_close(struct net_device *netdev);
119static void e1000_configure_tx(struct e1000_adapter *adapter);
120static void e1000_configure_rx(struct e1000_adapter *adapter);
121static void e1000_setup_rctl(struct e1000_adapter *adapter);
122static void e1000_clean_all_tx_rings(struct e1000_adapter *adapter);
123static void e1000_clean_all_rx_rings(struct e1000_adapter *adapter);
124static void e1000_clean_tx_ring(struct e1000_adapter *adapter,
125 struct e1000_tx_ring *tx_ring);
126static void e1000_clean_rx_ring(struct e1000_adapter *adapter,
127 struct e1000_rx_ring *rx_ring);
128static void e1000_set_rx_mode(struct net_device *netdev);
129static void e1000_update_phy_info_task(struct work_struct *work);
130static void e1000_watchdog(struct work_struct *work);
131static void e1000_82547_tx_fifo_stall_task(struct work_struct *work);
132static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
133 struct net_device *netdev);
134static struct net_device_stats *e1000_get_stats(struct net_device *netdev);
135static int e1000_change_mtu(struct net_device *netdev, int new_mtu);
136static int e1000_set_mac(struct net_device *netdev, void *p);
137static irqreturn_t e1000_intr(int irq, void *data);
138static bool e1000_clean_tx_irq(struct e1000_adapter *adapter,
139 struct e1000_tx_ring *tx_ring);
140static int e1000_clean(struct napi_struct *napi, int budget);
141static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
142 struct e1000_rx_ring *rx_ring,
143 int *work_done, int work_to_do);
144static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
145 struct e1000_rx_ring *rx_ring,
146 int *work_done, int work_to_do);
147static void e1000_alloc_dummy_rx_buffers(struct e1000_adapter *adapter,
148 struct e1000_rx_ring *rx_ring,
149 int cleaned_count)
150{
151}
152static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
153 struct e1000_rx_ring *rx_ring,
154 int cleaned_count);
155static void e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter,
156 struct e1000_rx_ring *rx_ring,
157 int cleaned_count);
158static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd);
159static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
160 int cmd);
161static void e1000_enter_82542_rst(struct e1000_adapter *adapter);
162static void e1000_leave_82542_rst(struct e1000_adapter *adapter);
163static void e1000_tx_timeout(struct net_device *dev);
164static void e1000_reset_task(struct work_struct *work);
165static void e1000_smartspeed(struct e1000_adapter *adapter);
166static int e1000_82547_fifo_workaround(struct e1000_adapter *adapter,
167 struct sk_buff *skb);
168
169static bool e1000_vlan_used(struct e1000_adapter *adapter);
170static void e1000_vlan_mode(struct net_device *netdev,
171 netdev_features_t features);
172static void e1000_vlan_filter_on_off(struct e1000_adapter *adapter,
173 bool filter_on);
174static int e1000_vlan_rx_add_vid(struct net_device *netdev,
175 __be16 proto, u16 vid);
176static int e1000_vlan_rx_kill_vid(struct net_device *netdev,
177 __be16 proto, u16 vid);
178static void e1000_restore_vlan(struct e1000_adapter *adapter);
179
180#ifdef CONFIG_PM
181static int e1000_suspend(struct pci_dev *pdev, pm_message_t state);
182static int e1000_resume(struct pci_dev *pdev);
183#endif
184static void e1000_shutdown(struct pci_dev *pdev);
185
186#ifdef CONFIG_NET_POLL_CONTROLLER
187
188static void e1000_netpoll (struct net_device *netdev);
189#endif
190
191#define COPYBREAK_DEFAULT 256
192static unsigned int copybreak __read_mostly = COPYBREAK_DEFAULT;
193module_param(copybreak, uint, 0644);
194MODULE_PARM_DESC(copybreak,
195 "Maximum size of packet that is copied to a new buffer on receive");
196
197static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev,
198 pci_channel_state_t state);
199static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev);
200static void e1000_io_resume(struct pci_dev *pdev);
201
202static const struct pci_error_handlers e1000_err_handler = {
203 .error_detected = e1000_io_error_detected,
204 .slot_reset = e1000_io_slot_reset,
205 .resume = e1000_io_resume,
206};
207
208static struct pci_driver e1000_driver = {
209 .name = e1000_driver_name,
210 .id_table = e1000_pci_tbl,
211 .probe = e1000_probe,
212 .remove = e1000_remove,
213#ifdef CONFIG_PM
214
215 .suspend = e1000_suspend,
216 .resume = e1000_resume,
217#endif
218 .shutdown = e1000_shutdown,
219 .err_handler = &e1000_err_handler
220};
221
222MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
223MODULE_DESCRIPTION("Intel(R) PRO/1000 Network Driver");
224MODULE_LICENSE("GPL");
225MODULE_VERSION(DRV_VERSION);
226
227#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)
228static int debug = -1;
229module_param(debug, int, 0);
230MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
231
232
233
234
235
236
237struct net_device *e1000_get_hw_dev(struct e1000_hw *hw)
238{
239 struct e1000_adapter *adapter = hw->back;
240 return adapter->netdev;
241}
242
243
244
245
246
247
248
249static int __init e1000_init_module(void)
250{
251 int ret;
252 pr_info("%s - version %s\n", e1000_driver_string, e1000_driver_version);
253
254 pr_info("%s\n", e1000_copyright);
255
256 ret = pci_register_driver(&e1000_driver);
257 if (copybreak != COPYBREAK_DEFAULT) {
258 if (copybreak == 0)
259 pr_info("copybreak disabled\n");
260 else
261 pr_info("copybreak enabled for "
262 "packets <= %u bytes\n", copybreak);
263 }
264 return ret;
265}
266
267module_init(e1000_init_module);
268
269
270
271
272
273
274
275static void __exit e1000_exit_module(void)
276{
277 pci_unregister_driver(&e1000_driver);
278}
279
280module_exit(e1000_exit_module);
281
282static int e1000_request_irq(struct e1000_adapter *adapter)
283{
284 struct net_device *netdev = adapter->netdev;
285 irq_handler_t handler = e1000_intr;
286 int irq_flags = IRQF_SHARED;
287 int err;
288
289 err = request_irq(adapter->pdev->irq, handler, irq_flags, netdev->name,
290 netdev);
291 if (err) {
292 e_err(probe, "Unable to allocate interrupt Error: %d\n", err);
293 }
294
295 return err;
296}
297
298static void e1000_free_irq(struct e1000_adapter *adapter)
299{
300 struct net_device *netdev = adapter->netdev;
301
302 free_irq(adapter->pdev->irq, netdev);
303}
304
305
306
307
308
309static void e1000_irq_disable(struct e1000_adapter *adapter)
310{
311 struct e1000_hw *hw = &adapter->hw;
312
313 ew32(IMC, ~0);
314 E1000_WRITE_FLUSH();
315 synchronize_irq(adapter->pdev->irq);
316}
317
318
319
320
321
322static void e1000_irq_enable(struct e1000_adapter *adapter)
323{
324 struct e1000_hw *hw = &adapter->hw;
325
326 ew32(IMS, IMS_ENABLE_MASK);
327 E1000_WRITE_FLUSH();
328}
329
330static void e1000_update_mng_vlan(struct e1000_adapter *adapter)
331{
332 struct e1000_hw *hw = &adapter->hw;
333 struct net_device *netdev = adapter->netdev;
334 u16 vid = hw->mng_cookie.vlan_id;
335 u16 old_vid = adapter->mng_vlan_id;
336
337 if (!e1000_vlan_used(adapter))
338 return;
339
340 if (!test_bit(vid, adapter->active_vlans)) {
341 if (hw->mng_cookie.status &
342 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) {
343 e1000_vlan_rx_add_vid(netdev, htons(ETH_P_8021Q), vid);
344 adapter->mng_vlan_id = vid;
345 } else {
346 adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
347 }
348 if ((old_vid != (u16)E1000_MNG_VLAN_NONE) &&
349 (vid != old_vid) &&
350 !test_bit(old_vid, adapter->active_vlans))
351 e1000_vlan_rx_kill_vid(netdev, htons(ETH_P_8021Q),
352 old_vid);
353 } else {
354 adapter->mng_vlan_id = vid;
355 }
356}
357
358static void e1000_init_manageability(struct e1000_adapter *adapter)
359{
360 struct e1000_hw *hw = &adapter->hw;
361
362 if (adapter->en_mng_pt) {
363 u32 manc = er32(MANC);
364
365
366 manc &= ~(E1000_MANC_ARP_EN);
367
368 ew32(MANC, manc);
369 }
370}
371
372static void e1000_release_manageability(struct e1000_adapter *adapter)
373{
374 struct e1000_hw *hw = &adapter->hw;
375
376 if (adapter->en_mng_pt) {
377 u32 manc = er32(MANC);
378
379
380 manc |= E1000_MANC_ARP_EN;
381
382 ew32(MANC, manc);
383 }
384}
385
386
387
388
389
390static void e1000_configure(struct e1000_adapter *adapter)
391{
392 struct net_device *netdev = adapter->netdev;
393 int i;
394
395 e1000_set_rx_mode(netdev);
396
397 e1000_restore_vlan(adapter);
398 e1000_init_manageability(adapter);
399
400 e1000_configure_tx(adapter);
401 e1000_setup_rctl(adapter);
402 e1000_configure_rx(adapter);
403
404
405
406
407 for (i = 0; i < adapter->num_rx_queues; i++) {
408 struct e1000_rx_ring *ring = &adapter->rx_ring[i];
409 adapter->alloc_rx_buf(adapter, ring,
410 E1000_DESC_UNUSED(ring));
411 }
412}
413
414int e1000_up(struct e1000_adapter *adapter)
415{
416 struct e1000_hw *hw = &adapter->hw;
417
418
419 e1000_configure(adapter);
420
421 clear_bit(__E1000_DOWN, &adapter->flags);
422
423 napi_enable(&adapter->napi);
424
425 e1000_irq_enable(adapter);
426
427 netif_wake_queue(adapter->netdev);
428
429
430 ew32(ICS, E1000_ICS_LSC);
431 return 0;
432}
433
434
435
436
437
438
439
440
441
442void e1000_power_up_phy(struct e1000_adapter *adapter)
443{
444 struct e1000_hw *hw = &adapter->hw;
445 u16 mii_reg = 0;
446
447
448 if (hw->media_type == e1000_media_type_copper) {
449
450
451
452 e1000_read_phy_reg(hw, PHY_CTRL, &mii_reg);
453 mii_reg &= ~MII_CR_POWER_DOWN;
454 e1000_write_phy_reg(hw, PHY_CTRL, mii_reg);
455 }
456}
457
458static void e1000_power_down_phy(struct e1000_adapter *adapter)
459{
460 struct e1000_hw *hw = &adapter->hw;
461
462
463
464
465
466
467
468 if (!adapter->wol && hw->mac_type >= e1000_82540 &&
469 hw->media_type == e1000_media_type_copper) {
470 u16 mii_reg = 0;
471
472 switch (hw->mac_type) {
473 case e1000_82540:
474 case e1000_82545:
475 case e1000_82545_rev_3:
476 case e1000_82546:
477 case e1000_ce4100:
478 case e1000_82546_rev_3:
479 case e1000_82541:
480 case e1000_82541_rev_2:
481 case e1000_82547:
482 case e1000_82547_rev_2:
483 if (er32(MANC) & E1000_MANC_SMBUS_EN)
484 goto out;
485 break;
486 default:
487 goto out;
488 }
489 e1000_read_phy_reg(hw, PHY_CTRL, &mii_reg);
490 mii_reg |= MII_CR_POWER_DOWN;
491 e1000_write_phy_reg(hw, PHY_CTRL, mii_reg);
492 msleep(1);
493 }
494out:
495 return;
496}
497
498static void e1000_down_and_stop(struct e1000_adapter *adapter)
499{
500 set_bit(__E1000_DOWN, &adapter->flags);
501
502 cancel_delayed_work_sync(&adapter->watchdog_task);
503
504
505
506
507
508
509
510 cancel_delayed_work_sync(&adapter->phy_info_task);
511 cancel_delayed_work_sync(&adapter->fifo_stall_task);
512
513
514 if (!test_bit(__E1000_RESETTING, &adapter->flags))
515 cancel_work_sync(&adapter->reset_task);
516}
517
518void e1000_down(struct e1000_adapter *adapter)
519{
520 struct e1000_hw *hw = &adapter->hw;
521 struct net_device *netdev = adapter->netdev;
522 u32 rctl, tctl;
523
524 netif_carrier_off(netdev);
525
526
527 rctl = er32(RCTL);
528 ew32(RCTL, rctl & ~E1000_RCTL_EN);
529
530
531 netif_tx_disable(netdev);
532
533
534 tctl = er32(TCTL);
535 tctl &= ~E1000_TCTL_EN;
536 ew32(TCTL, tctl);
537
538 E1000_WRITE_FLUSH();
539 msleep(10);
540
541 napi_disable(&adapter->napi);
542
543 e1000_irq_disable(adapter);
544
545
546
547
548
549 e1000_down_and_stop(adapter);
550
551 adapter->link_speed = 0;
552 adapter->link_duplex = 0;
553
554 e1000_reset(adapter);
555 e1000_clean_all_tx_rings(adapter);
556 e1000_clean_all_rx_rings(adapter);
557}
558
559void e1000_reinit_locked(struct e1000_adapter *adapter)
560{
561 WARN_ON(in_interrupt());
562 while (test_and_set_bit(__E1000_RESETTING, &adapter->flags))
563 msleep(1);
564 e1000_down(adapter);
565 e1000_up(adapter);
566 clear_bit(__E1000_RESETTING, &adapter->flags);
567}
568
569void e1000_reset(struct e1000_adapter *adapter)
570{
571 struct e1000_hw *hw = &adapter->hw;
572 u32 pba = 0, tx_space, min_tx_space, min_rx_space;
573 bool legacy_pba_adjust = false;
574 u16 hwm;
575
576
577
578
579
580 switch (hw->mac_type) {
581 case e1000_82542_rev2_0:
582 case e1000_82542_rev2_1:
583 case e1000_82543:
584 case e1000_82544:
585 case e1000_82540:
586 case e1000_82541:
587 case e1000_82541_rev_2:
588 legacy_pba_adjust = true;
589 pba = E1000_PBA_48K;
590 break;
591 case e1000_82545:
592 case e1000_82545_rev_3:
593 case e1000_82546:
594 case e1000_ce4100:
595 case e1000_82546_rev_3:
596 pba = E1000_PBA_48K;
597 break;
598 case e1000_82547:
599 case e1000_82547_rev_2:
600 legacy_pba_adjust = true;
601 pba = E1000_PBA_30K;
602 break;
603 case e1000_undefined:
604 case e1000_num_macs:
605 break;
606 }
607
608 if (legacy_pba_adjust) {
609 if (hw->max_frame_size > E1000_RXBUFFER_8192)
610 pba -= 8;
611
612 if (hw->mac_type == e1000_82547) {
613 adapter->tx_fifo_head = 0;
614 adapter->tx_head_addr = pba << E1000_TX_HEAD_ADDR_SHIFT;
615 adapter->tx_fifo_size =
616 (E1000_PBA_40K - pba) << E1000_PBA_BYTES_SHIFT;
617 atomic_set(&adapter->tx_fifo_stall, 0);
618 }
619 } else if (hw->max_frame_size > ETH_FRAME_LEN + ETH_FCS_LEN) {
620
621 ew32(PBA, pba);
622
623
624
625
626
627
628
629
630 pba = er32(PBA);
631
632 tx_space = pba >> 16;
633
634 pba &= 0xffff;
635
636
637
638 min_tx_space = (hw->max_frame_size +
639 sizeof(struct e1000_tx_desc) -
640 ETH_FCS_LEN) * 2;
641 min_tx_space = ALIGN(min_tx_space, 1024);
642 min_tx_space >>= 10;
643
644 min_rx_space = hw->max_frame_size;
645 min_rx_space = ALIGN(min_rx_space, 1024);
646 min_rx_space >>= 10;
647
648
649
650
651
652 if (tx_space < min_tx_space &&
653 ((min_tx_space - tx_space) < pba)) {
654 pba = pba - (min_tx_space - tx_space);
655
656
657 switch (hw->mac_type) {
658 case e1000_82545 ... e1000_82546_rev_3:
659 pba &= ~(E1000_PBA_8K - 1);
660 break;
661 default:
662 break;
663 }
664
665
666
667
668 if (pba < min_rx_space)
669 pba = min_rx_space;
670 }
671 }
672
673 ew32(PBA, pba);
674
675
676
677
678
679
680
681
682
683
684 hwm = min(((pba << 10) * 9 / 10),
685 ((pba << 10) - hw->max_frame_size));
686
687 hw->fc_high_water = hwm & 0xFFF8;
688 hw->fc_low_water = hw->fc_high_water - 8;
689 hw->fc_pause_time = E1000_FC_PAUSE_TIME;
690 hw->fc_send_xon = 1;
691 hw->fc = hw->original_fc;
692
693
694 e1000_reset_hw(hw);
695 if (hw->mac_type >= e1000_82544)
696 ew32(WUC, 0);
697
698 if (e1000_init_hw(hw))
699 e_dev_err("Hardware Error\n");
700 e1000_update_mng_vlan(adapter);
701
702
703 if (hw->mac_type >= e1000_82544 &&
704 hw->autoneg == 1 &&
705 hw->autoneg_advertised == ADVERTISE_1000_FULL) {
706 u32 ctrl = er32(CTRL);
707
708
709
710
711 ctrl &= ~E1000_CTRL_SWDPIN3;
712 ew32(CTRL, ctrl);
713 }
714
715
716 ew32(VET, ETHERNET_IEEE_VLAN_TYPE);
717
718 e1000_reset_adaptive(hw);
719 e1000_phy_get_info(hw, &adapter->phy_info);
720
721 e1000_release_manageability(adapter);
722}
723
724
725static void e1000_dump_eeprom(struct e1000_adapter *adapter)
726{
727 struct net_device *netdev = adapter->netdev;
728 struct ethtool_eeprom eeprom;
729 const struct ethtool_ops *ops = netdev->ethtool_ops;
730 u8 *data;
731 int i;
732 u16 csum_old, csum_new = 0;
733
734 eeprom.len = ops->get_eeprom_len(netdev);
735 eeprom.offset = 0;
736
737 data = kmalloc(eeprom.len, GFP_KERNEL);
738 if (!data)
739 return;
740
741 ops->get_eeprom(netdev, &eeprom, data);
742
743 csum_old = (data[EEPROM_CHECKSUM_REG * 2]) +
744 (data[EEPROM_CHECKSUM_REG * 2 + 1] << 8);
745 for (i = 0; i < EEPROM_CHECKSUM_REG * 2; i += 2)
746 csum_new += data[i] + (data[i + 1] << 8);
747 csum_new = EEPROM_SUM - csum_new;
748
749 pr_err("/*********************/\n");
750 pr_err("Current EEPROM Checksum : 0x%04x\n", csum_old);
751 pr_err("Calculated : 0x%04x\n", csum_new);
752
753 pr_err("Offset Values\n");
754 pr_err("======== ======\n");
755 print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 16, 1, data, 128, 0);
756
757 pr_err("Include this output when contacting your support provider.\n");
758 pr_err("This is not a software error! Something bad happened to\n");
759 pr_err("your hardware or EEPROM image. Ignoring this problem could\n");
760 pr_err("result in further problems, possibly loss of data,\n");
761 pr_err("corruption or system hangs!\n");
762 pr_err("The MAC Address will be reset to 00:00:00:00:00:00,\n");
763 pr_err("which is invalid and requires you to set the proper MAC\n");
764 pr_err("address manually before continuing to enable this network\n");
765 pr_err("device. Please inspect the EEPROM dump and report the\n");
766 pr_err("issue to your hardware vendor or Intel Customer Support.\n");
767 pr_err("/*********************/\n");
768
769 kfree(data);
770}
771
772
773
774
775
776
777
778static int e1000_is_need_ioport(struct pci_dev *pdev)
779{
780 switch (pdev->device) {
781 case E1000_DEV_ID_82540EM:
782 case E1000_DEV_ID_82540EM_LOM:
783 case E1000_DEV_ID_82540EP:
784 case E1000_DEV_ID_82540EP_LOM:
785 case E1000_DEV_ID_82540EP_LP:
786 case E1000_DEV_ID_82541EI:
787 case E1000_DEV_ID_82541EI_MOBILE:
788 case E1000_DEV_ID_82541ER:
789 case E1000_DEV_ID_82541ER_LOM:
790 case E1000_DEV_ID_82541GI:
791 case E1000_DEV_ID_82541GI_LF:
792 case E1000_DEV_ID_82541GI_MOBILE:
793 case E1000_DEV_ID_82544EI_COPPER:
794 case E1000_DEV_ID_82544EI_FIBER:
795 case E1000_DEV_ID_82544GC_COPPER:
796 case E1000_DEV_ID_82544GC_LOM:
797 case E1000_DEV_ID_82545EM_COPPER:
798 case E1000_DEV_ID_82545EM_FIBER:
799 case E1000_DEV_ID_82546EB_COPPER:
800 case E1000_DEV_ID_82546EB_FIBER:
801 case E1000_DEV_ID_82546EB_QUAD_COPPER:
802 return true;
803 default:
804 return false;
805 }
806}
807
808static netdev_features_t e1000_fix_features(struct net_device *netdev,
809 netdev_features_t features)
810{
811
812
813
814 if (features & NETIF_F_HW_VLAN_CTAG_RX)
815 features |= NETIF_F_HW_VLAN_CTAG_TX;
816 else
817 features &= ~NETIF_F_HW_VLAN_CTAG_TX;
818
819 return features;
820}
821
822static int e1000_set_features(struct net_device *netdev,
823 netdev_features_t features)
824{
825 struct e1000_adapter *adapter = netdev_priv(netdev);
826 netdev_features_t changed = features ^ netdev->features;
827
828 if (changed & NETIF_F_HW_VLAN_CTAG_RX)
829 e1000_vlan_mode(netdev, features);
830
831 if (!(changed & (NETIF_F_RXCSUM | NETIF_F_RXALL)))
832 return 0;
833
834 netdev->features = features;
835 adapter->rx_csum = !!(features & NETIF_F_RXCSUM);
836
837 if (netif_running(netdev))
838 e1000_reinit_locked(adapter);
839 else
840 e1000_reset(adapter);
841
842 return 0;
843}
844
845static const struct net_device_ops e1000_netdev_ops = {
846 .ndo_open = e1000_open,
847 .ndo_stop = e1000_close,
848 .ndo_start_xmit = e1000_xmit_frame,
849 .ndo_get_stats = e1000_get_stats,
850 .ndo_set_rx_mode = e1000_set_rx_mode,
851 .ndo_set_mac_address = e1000_set_mac,
852 .ndo_tx_timeout = e1000_tx_timeout,
853 .ndo_change_mtu = e1000_change_mtu,
854 .ndo_do_ioctl = e1000_ioctl,
855 .ndo_validate_addr = eth_validate_addr,
856 .ndo_vlan_rx_add_vid = e1000_vlan_rx_add_vid,
857 .ndo_vlan_rx_kill_vid = e1000_vlan_rx_kill_vid,
858#ifdef CONFIG_NET_POLL_CONTROLLER
859 .ndo_poll_controller = e1000_netpoll,
860#endif
861 .ndo_fix_features = e1000_fix_features,
862 .ndo_set_features = e1000_set_features,
863};
864
865
866
867
868
869
870
871
872
873
874
875
876static int e1000_init_hw_struct(struct e1000_adapter *adapter,
877 struct e1000_hw *hw)
878{
879 struct pci_dev *pdev = adapter->pdev;
880
881
882 hw->vendor_id = pdev->vendor;
883 hw->device_id = pdev->device;
884 hw->subsystem_vendor_id = pdev->subsystem_vendor;
885 hw->subsystem_id = pdev->subsystem_device;
886 hw->revision_id = pdev->revision;
887
888 pci_read_config_word(pdev, PCI_COMMAND, &hw->pci_cmd_word);
889
890 hw->max_frame_size = adapter->netdev->mtu +
891 ENET_HEADER_SIZE + ETHERNET_FCS_SIZE;
892 hw->min_frame_size = MINIMUM_ETHERNET_FRAME_SIZE;
893
894
895 if (e1000_set_mac_type(hw)) {
896 e_err(probe, "Unknown MAC Type\n");
897 return -EIO;
898 }
899
900 switch (hw->mac_type) {
901 default:
902 break;
903 case e1000_82541:
904 case e1000_82547:
905 case e1000_82541_rev_2:
906 case e1000_82547_rev_2:
907 hw->phy_init_script = 1;
908 break;
909 }
910
911 e1000_set_media_type(hw);
912 e1000_get_bus_info(hw);
913
914 hw->wait_autoneg_complete = false;
915 hw->tbi_compatibility_en = true;
916 hw->adaptive_ifs = true;
917
918
919
920 if (hw->media_type == e1000_media_type_copper) {
921 hw->mdix = AUTO_ALL_MODES;
922 hw->disable_polarity_correction = false;
923 hw->master_slave = E1000_MASTER_SLAVE;
924 }
925
926 return 0;
927}
928
929
930
931
932
933
934
935
936
937
938
939
940static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
941{
942 struct net_device *netdev;
943 struct e1000_adapter *adapter;
944 struct e1000_hw *hw;
945
946 static int cards_found;
947 static int global_quad_port_a;
948 int i, err, pci_using_dac;
949 u16 eeprom_data = 0;
950 u16 tmp = 0;
951 u16 eeprom_apme_mask = E1000_EEPROM_APME;
952 int bars, need_ioport;
953
954
955 need_ioport = e1000_is_need_ioport(pdev);
956 if (need_ioport) {
957 bars = pci_select_bars(pdev, IORESOURCE_MEM | IORESOURCE_IO);
958 err = pci_enable_device(pdev);
959 } else {
960 bars = pci_select_bars(pdev, IORESOURCE_MEM);
961 err = pci_enable_device_mem(pdev);
962 }
963 if (err)
964 return err;
965
966 err = pci_request_selected_regions(pdev, bars, e1000_driver_name);
967 if (err)
968 goto err_pci_reg;
969
970 pci_set_master(pdev);
971 err = pci_save_state(pdev);
972 if (err)
973 goto err_alloc_etherdev;
974
975 err = -ENOMEM;
976 netdev = alloc_etherdev(sizeof(struct e1000_adapter));
977 if (!netdev)
978 goto err_alloc_etherdev;
979
980 SET_NETDEV_DEV(netdev, &pdev->dev);
981
982 pci_set_drvdata(pdev, netdev);
983 adapter = netdev_priv(netdev);
984 adapter->netdev = netdev;
985 adapter->pdev = pdev;
986 adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
987 adapter->bars = bars;
988 adapter->need_ioport = need_ioport;
989
990 hw = &adapter->hw;
991 hw->back = adapter;
992
993 err = -EIO;
994 hw->hw_addr = pci_ioremap_bar(pdev, BAR_0);
995 if (!hw->hw_addr)
996 goto err_ioremap;
997
998 if (adapter->need_ioport) {
999 for (i = BAR_1; i <= BAR_5; i++) {
1000 if (pci_resource_len(pdev, i) == 0)
1001 continue;
1002 if (pci_resource_flags(pdev, i) & IORESOURCE_IO) {
1003 hw->io_base = pci_resource_start(pdev, i);
1004 break;
1005 }
1006 }
1007 }
1008
1009
1010 err = e1000_init_hw_struct(adapter, hw);
1011 if (err)
1012 goto err_sw_init;
1013
1014
1015
1016
1017
1018 pci_using_dac = 0;
1019 if ((hw->bus_type == e1000_bus_type_pcix) &&
1020 !dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) {
1021 pci_using_dac = 1;
1022 } else {
1023 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
1024 if (err) {
1025 pr_err("No usable DMA config, aborting\n");
1026 goto err_dma;
1027 }
1028 }
1029
1030 netdev->netdev_ops = &e1000_netdev_ops;
1031 e1000_set_ethtool_ops(netdev);
1032 netdev->watchdog_timeo = 5 * HZ;
1033 netif_napi_add(netdev, &adapter->napi, e1000_clean, 64);
1034
1035 strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
1036
1037 adapter->bd_number = cards_found;
1038
1039
1040
1041 err = e1000_sw_init(adapter);
1042 if (err)
1043 goto err_sw_init;
1044
1045 err = -EIO;
1046 if (hw->mac_type == e1000_ce4100) {
1047 hw->ce4100_gbe_mdio_base_virt =
1048 ioremap(pci_resource_start(pdev, BAR_1),
1049 pci_resource_len(pdev, BAR_1));
1050
1051 if (!hw->ce4100_gbe_mdio_base_virt)
1052 goto err_mdio_ioremap;
1053 }
1054
1055 if (hw->mac_type >= e1000_82543) {
1056 netdev->hw_features = NETIF_F_SG |
1057 NETIF_F_HW_CSUM |
1058 NETIF_F_HW_VLAN_CTAG_RX;
1059 netdev->features = NETIF_F_HW_VLAN_CTAG_TX |
1060 NETIF_F_HW_VLAN_CTAG_FILTER;
1061 }
1062
1063 if ((hw->mac_type >= e1000_82544) &&
1064 (hw->mac_type != e1000_82547))
1065 netdev->hw_features |= NETIF_F_TSO;
1066
1067 netdev->priv_flags |= IFF_SUPP_NOFCS;
1068
1069 netdev->features |= netdev->hw_features;
1070 netdev->hw_features |= (NETIF_F_RXCSUM |
1071 NETIF_F_RXALL |
1072 NETIF_F_RXFCS);
1073
1074 if (pci_using_dac) {
1075 netdev->features |= NETIF_F_HIGHDMA;
1076 netdev->vlan_features |= NETIF_F_HIGHDMA;
1077 }
1078
1079 netdev->vlan_features |= (NETIF_F_TSO |
1080 NETIF_F_HW_CSUM |
1081 NETIF_F_SG);
1082
1083
1084 if (hw->device_id != E1000_DEV_ID_82545EM_COPPER ||
1085 hw->subsystem_vendor_id != PCI_VENDOR_ID_VMWARE)
1086 netdev->priv_flags |= IFF_UNICAST_FLT;
1087
1088 adapter->en_mng_pt = e1000_enable_mng_pass_thru(hw);
1089
1090
1091 if (e1000_init_eeprom_params(hw)) {
1092 e_err(probe, "EEPROM initialization failed\n");
1093 goto err_eeprom;
1094 }
1095
1096
1097
1098
1099
1100 e1000_reset_hw(hw);
1101
1102
1103 if (e1000_validate_eeprom_checksum(hw) < 0) {
1104 e_err(probe, "The EEPROM Checksum Is Not Valid\n");
1105 e1000_dump_eeprom(adapter);
1106
1107
1108
1109
1110
1111
1112
1113 memset(hw->mac_addr, 0, netdev->addr_len);
1114 } else {
1115
1116 if (e1000_read_mac_addr(hw))
1117 e_err(probe, "EEPROM Read Error\n");
1118 }
1119
1120 memcpy(netdev->dev_addr, hw->mac_addr, netdev->addr_len);
1121
1122 if (!is_valid_ether_addr(netdev->dev_addr))
1123 e_err(probe, "Invalid MAC Address\n");
1124
1125
1126 INIT_DELAYED_WORK(&adapter->watchdog_task, e1000_watchdog);
1127 INIT_DELAYED_WORK(&adapter->fifo_stall_task,
1128 e1000_82547_tx_fifo_stall_task);
1129 INIT_DELAYED_WORK(&adapter->phy_info_task, e1000_update_phy_info_task);
1130 INIT_WORK(&adapter->reset_task, e1000_reset_task);
1131
1132 e1000_check_options(adapter);
1133
1134
1135
1136
1137
1138
1139 switch (hw->mac_type) {
1140 case e1000_82542_rev2_0:
1141 case e1000_82542_rev2_1:
1142 case e1000_82543:
1143 break;
1144 case e1000_82544:
1145 e1000_read_eeprom(hw,
1146 EEPROM_INIT_CONTROL2_REG, 1, &eeprom_data);
1147 eeprom_apme_mask = E1000_EEPROM_82544_APM;
1148 break;
1149 case e1000_82546:
1150 case e1000_82546_rev_3:
1151 if (er32(STATUS) & E1000_STATUS_FUNC_1) {
1152 e1000_read_eeprom(hw,
1153 EEPROM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
1154 break;
1155 }
1156
1157 default:
1158 e1000_read_eeprom(hw,
1159 EEPROM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
1160 break;
1161 }
1162 if (eeprom_data & eeprom_apme_mask)
1163 adapter->eeprom_wol |= E1000_WUFC_MAG;
1164
1165
1166
1167
1168
1169 switch (pdev->device) {
1170 case E1000_DEV_ID_82546GB_PCIE:
1171 adapter->eeprom_wol = 0;
1172 break;
1173 case E1000_DEV_ID_82546EB_FIBER:
1174 case E1000_DEV_ID_82546GB_FIBER:
1175
1176
1177
1178 if (er32(STATUS) & E1000_STATUS_FUNC_1)
1179 adapter->eeprom_wol = 0;
1180 break;
1181 case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3:
1182
1183 if (global_quad_port_a != 0)
1184 adapter->eeprom_wol = 0;
1185 else
1186 adapter->quad_port_a = true;
1187
1188 if (++global_quad_port_a == 4)
1189 global_quad_port_a = 0;
1190 break;
1191 }
1192
1193
1194 adapter->wol = adapter->eeprom_wol;
1195 device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
1196
1197
1198 if (hw->mac_type == e1000_ce4100) {
1199 for (i = 0; i < 32; i++) {
1200 hw->phy_addr = i;
1201 e1000_read_phy_reg(hw, PHY_ID2, &tmp);
1202
1203 if (tmp != 0 && tmp != 0xFF)
1204 break;
1205 }
1206
1207 if (i >= 32)
1208 goto err_eeprom;
1209 }
1210
1211
1212 e1000_reset(adapter);
1213
1214 strcpy(netdev->name, "eth%d");
1215 err = register_netdev(netdev);
1216 if (err)
1217 goto err_register;
1218
1219 e1000_vlan_filter_on_off(adapter, false);
1220
1221
1222 e_info(probe, "(PCI%s:%dMHz:%d-bit) %pM\n",
1223 ((hw->bus_type == e1000_bus_type_pcix) ? "-X" : ""),
1224 ((hw->bus_speed == e1000_bus_speed_133) ? 133 :
1225 (hw->bus_speed == e1000_bus_speed_120) ? 120 :
1226 (hw->bus_speed == e1000_bus_speed_100) ? 100 :
1227 (hw->bus_speed == e1000_bus_speed_66) ? 66 : 33),
1228 ((hw->bus_width == e1000_bus_width_64) ? 64 : 32),
1229 netdev->dev_addr);
1230
1231
1232 netif_carrier_off(netdev);
1233
1234 e_info(probe, "Intel(R) PRO/1000 Network Connection\n");
1235
1236 cards_found++;
1237 return 0;
1238
1239err_register:
1240err_eeprom:
1241 e1000_phy_hw_reset(hw);
1242
1243 if (hw->flash_address)
1244 iounmap(hw->flash_address);
1245 kfree(adapter->tx_ring);
1246 kfree(adapter->rx_ring);
1247err_dma:
1248err_sw_init:
1249err_mdio_ioremap:
1250 iounmap(hw->ce4100_gbe_mdio_base_virt);
1251 iounmap(hw->hw_addr);
1252err_ioremap:
1253 free_netdev(netdev);
1254err_alloc_etherdev:
1255 pci_release_selected_regions(pdev, bars);
1256err_pci_reg:
1257 pci_disable_device(pdev);
1258 return err;
1259}
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270static void e1000_remove(struct pci_dev *pdev)
1271{
1272 struct net_device *netdev = pci_get_drvdata(pdev);
1273 struct e1000_adapter *adapter = netdev_priv(netdev);
1274 struct e1000_hw *hw = &adapter->hw;
1275
1276 e1000_down_and_stop(adapter);
1277 e1000_release_manageability(adapter);
1278
1279 unregister_netdev(netdev);
1280
1281 e1000_phy_hw_reset(hw);
1282
1283 kfree(adapter->tx_ring);
1284 kfree(adapter->rx_ring);
1285
1286 if (hw->mac_type == e1000_ce4100)
1287 iounmap(hw->ce4100_gbe_mdio_base_virt);
1288 iounmap(hw->hw_addr);
1289 if (hw->flash_address)
1290 iounmap(hw->flash_address);
1291 pci_release_selected_regions(pdev, adapter->bars);
1292
1293 free_netdev(netdev);
1294
1295 pci_disable_device(pdev);
1296}
1297
1298
1299
1300
1301
1302
1303
1304
1305static int e1000_sw_init(struct e1000_adapter *adapter)
1306{
1307 adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
1308
1309 adapter->num_tx_queues = 1;
1310 adapter->num_rx_queues = 1;
1311
1312 if (e1000_alloc_queues(adapter)) {
1313 e_err(probe, "Unable to allocate memory for queues\n");
1314 return -ENOMEM;
1315 }
1316
1317
1318 e1000_irq_disable(adapter);
1319
1320 spin_lock_init(&adapter->stats_lock);
1321
1322 set_bit(__E1000_DOWN, &adapter->flags);
1323
1324 return 0;
1325}
1326
1327
1328
1329
1330
1331
1332
1333
1334static int e1000_alloc_queues(struct e1000_adapter *adapter)
1335{
1336 adapter->tx_ring = kcalloc(adapter->num_tx_queues,
1337 sizeof(struct e1000_tx_ring), GFP_KERNEL);
1338 if (!adapter->tx_ring)
1339 return -ENOMEM;
1340
1341 adapter->rx_ring = kcalloc(adapter->num_rx_queues,
1342 sizeof(struct e1000_rx_ring), GFP_KERNEL);
1343 if (!adapter->rx_ring) {
1344 kfree(adapter->tx_ring);
1345 return -ENOMEM;
1346 }
1347
1348 return E1000_SUCCESS;
1349}
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363static int e1000_open(struct net_device *netdev)
1364{
1365 struct e1000_adapter *adapter = netdev_priv(netdev);
1366 struct e1000_hw *hw = &adapter->hw;
1367 int err;
1368
1369
1370 if (test_bit(__E1000_TESTING, &adapter->flags))
1371 return -EBUSY;
1372
1373 netif_carrier_off(netdev);
1374
1375
1376 err = e1000_setup_all_tx_resources(adapter);
1377 if (err)
1378 goto err_setup_tx;
1379
1380
1381 err = e1000_setup_all_rx_resources(adapter);
1382 if (err)
1383 goto err_setup_rx;
1384
1385 e1000_power_up_phy(adapter);
1386
1387 adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
1388 if ((hw->mng_cookie.status &
1389 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT)) {
1390 e1000_update_mng_vlan(adapter);
1391 }
1392
1393
1394
1395
1396
1397
1398 e1000_configure(adapter);
1399
1400 err = e1000_request_irq(adapter);
1401 if (err)
1402 goto err_req_irq;
1403
1404
1405 clear_bit(__E1000_DOWN, &adapter->flags);
1406
1407 napi_enable(&adapter->napi);
1408
1409 e1000_irq_enable(adapter);
1410
1411 netif_start_queue(netdev);
1412
1413
1414 ew32(ICS, E1000_ICS_LSC);
1415
1416 return E1000_SUCCESS;
1417
1418err_req_irq:
1419 e1000_power_down_phy(adapter);
1420 e1000_free_all_rx_resources(adapter);
1421err_setup_rx:
1422 e1000_free_all_tx_resources(adapter);
1423err_setup_tx:
1424 e1000_reset(adapter);
1425
1426 return err;
1427}
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440static int e1000_close(struct net_device *netdev)
1441{
1442 struct e1000_adapter *adapter = netdev_priv(netdev);
1443 struct e1000_hw *hw = &adapter->hw;
1444 int count = E1000_CHECK_RESET_COUNT;
1445
1446 while (test_bit(__E1000_RESETTING, &adapter->flags) && count--)
1447 usleep_range(10000, 20000);
1448
1449 WARN_ON(test_bit(__E1000_RESETTING, &adapter->flags));
1450 e1000_down(adapter);
1451 e1000_power_down_phy(adapter);
1452 e1000_free_irq(adapter);
1453
1454 e1000_free_all_tx_resources(adapter);
1455 e1000_free_all_rx_resources(adapter);
1456
1457
1458
1459
1460 if ((hw->mng_cookie.status &
1461 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) &&
1462 !test_bit(adapter->mng_vlan_id, adapter->active_vlans)) {
1463 e1000_vlan_rx_kill_vid(netdev, htons(ETH_P_8021Q),
1464 adapter->mng_vlan_id);
1465 }
1466
1467 return 0;
1468}
1469
1470
1471
1472
1473
1474
1475
1476static bool e1000_check_64k_bound(struct e1000_adapter *adapter, void *start,
1477 unsigned long len)
1478{
1479 struct e1000_hw *hw = &adapter->hw;
1480 unsigned long begin = (unsigned long)start;
1481 unsigned long end = begin + len;
1482
1483
1484
1485
1486 if (hw->mac_type == e1000_82545 ||
1487 hw->mac_type == e1000_ce4100 ||
1488 hw->mac_type == e1000_82546) {
1489 return ((begin ^ (end - 1)) >> 16) != 0 ? false : true;
1490 }
1491
1492 return true;
1493}
1494
1495
1496
1497
1498
1499
1500
1501
1502static int e1000_setup_tx_resources(struct e1000_adapter *adapter,
1503 struct e1000_tx_ring *txdr)
1504{
1505 struct pci_dev *pdev = adapter->pdev;
1506 int size;
1507
1508 size = sizeof(struct e1000_tx_buffer) * txdr->count;
1509 txdr->buffer_info = vzalloc(size);
1510 if (!txdr->buffer_info)
1511 return -ENOMEM;
1512
1513
1514
1515 txdr->size = txdr->count * sizeof(struct e1000_tx_desc);
1516 txdr->size = ALIGN(txdr->size, 4096);
1517
1518 txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size, &txdr->dma,
1519 GFP_KERNEL);
1520 if (!txdr->desc) {
1521setup_tx_desc_die:
1522 vfree(txdr->buffer_info);
1523 return -ENOMEM;
1524 }
1525
1526
1527 if (!e1000_check_64k_bound(adapter, txdr->desc, txdr->size)) {
1528 void *olddesc = txdr->desc;
1529 dma_addr_t olddma = txdr->dma;
1530 e_err(tx_err, "txdr align check failed: %u bytes at %p\n",
1531 txdr->size, txdr->desc);
1532
1533 txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size,
1534 &txdr->dma, GFP_KERNEL);
1535
1536 if (!txdr->desc) {
1537 dma_free_coherent(&pdev->dev, txdr->size, olddesc,
1538 olddma);
1539 goto setup_tx_desc_die;
1540 }
1541
1542 if (!e1000_check_64k_bound(adapter, txdr->desc, txdr->size)) {
1543
1544 dma_free_coherent(&pdev->dev, txdr->size, txdr->desc,
1545 txdr->dma);
1546 dma_free_coherent(&pdev->dev, txdr->size, olddesc,
1547 olddma);
1548 e_err(probe, "Unable to allocate aligned memory "
1549 "for the transmit descriptor ring\n");
1550 vfree(txdr->buffer_info);
1551 return -ENOMEM;
1552 } else {
1553
1554 dma_free_coherent(&pdev->dev, txdr->size, olddesc,
1555 olddma);
1556 }
1557 }
1558 memset(txdr->desc, 0, txdr->size);
1559
1560 txdr->next_to_use = 0;
1561 txdr->next_to_clean = 0;
1562
1563 return 0;
1564}
1565
1566
1567
1568
1569
1570
1571
1572
1573int e1000_setup_all_tx_resources(struct e1000_adapter *adapter)
1574{
1575 int i, err = 0;
1576
1577 for (i = 0; i < adapter->num_tx_queues; i++) {
1578 err = e1000_setup_tx_resources(adapter, &adapter->tx_ring[i]);
1579 if (err) {
1580 e_err(probe, "Allocation for Tx Queue %u failed\n", i);
1581 for (i-- ; i >= 0; i--)
1582 e1000_free_tx_resources(adapter,
1583 &adapter->tx_ring[i]);
1584 break;
1585 }
1586 }
1587
1588 return err;
1589}
1590
1591
1592
1593
1594
1595
1596
1597static void e1000_configure_tx(struct e1000_adapter *adapter)
1598{
1599 u64 tdba;
1600 struct e1000_hw *hw = &adapter->hw;
1601 u32 tdlen, tctl, tipg;
1602 u32 ipgr1, ipgr2;
1603
1604
1605
1606 switch (adapter->num_tx_queues) {
1607 case 1:
1608 default:
1609 tdba = adapter->tx_ring[0].dma;
1610 tdlen = adapter->tx_ring[0].count *
1611 sizeof(struct e1000_tx_desc);
1612 ew32(TDLEN, tdlen);
1613 ew32(TDBAH, (tdba >> 32));
1614 ew32(TDBAL, (tdba & 0x00000000ffffffffULL));
1615 ew32(TDT, 0);
1616 ew32(TDH, 0);
1617 adapter->tx_ring[0].tdh = ((hw->mac_type >= e1000_82543) ?
1618 E1000_TDH : E1000_82542_TDH);
1619 adapter->tx_ring[0].tdt = ((hw->mac_type >= e1000_82543) ?
1620 E1000_TDT : E1000_82542_TDT);
1621 break;
1622 }
1623
1624
1625 if ((hw->media_type == e1000_media_type_fiber ||
1626 hw->media_type == e1000_media_type_internal_serdes))
1627 tipg = DEFAULT_82543_TIPG_IPGT_FIBER;
1628 else
1629 tipg = DEFAULT_82543_TIPG_IPGT_COPPER;
1630
1631 switch (hw->mac_type) {
1632 case e1000_82542_rev2_0:
1633 case e1000_82542_rev2_1:
1634 tipg = DEFAULT_82542_TIPG_IPGT;
1635 ipgr1 = DEFAULT_82542_TIPG_IPGR1;
1636 ipgr2 = DEFAULT_82542_TIPG_IPGR2;
1637 break;
1638 default:
1639 ipgr1 = DEFAULT_82543_TIPG_IPGR1;
1640 ipgr2 = DEFAULT_82543_TIPG_IPGR2;
1641 break;
1642 }
1643 tipg |= ipgr1 << E1000_TIPG_IPGR1_SHIFT;
1644 tipg |= ipgr2 << E1000_TIPG_IPGR2_SHIFT;
1645 ew32(TIPG, tipg);
1646
1647
1648
1649 ew32(TIDV, adapter->tx_int_delay);
1650 if (hw->mac_type >= e1000_82540)
1651 ew32(TADV, adapter->tx_abs_int_delay);
1652
1653
1654
1655 tctl = er32(TCTL);
1656 tctl &= ~E1000_TCTL_CT;
1657 tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC |
1658 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
1659
1660 e1000_config_collision_dist(hw);
1661
1662
1663 adapter->txd_cmd = E1000_TXD_CMD_EOP | E1000_TXD_CMD_IFCS;
1664
1665
1666 if (adapter->tx_int_delay)
1667 adapter->txd_cmd |= E1000_TXD_CMD_IDE;
1668
1669 if (hw->mac_type < e1000_82543)
1670 adapter->txd_cmd |= E1000_TXD_CMD_RPS;
1671 else
1672 adapter->txd_cmd |= E1000_TXD_CMD_RS;
1673
1674
1675
1676
1677 if (hw->mac_type == e1000_82544 &&
1678 hw->bus_type == e1000_bus_type_pcix)
1679 adapter->pcix_82544 = true;
1680
1681 ew32(TCTL, tctl);
1682
1683}
1684
1685
1686
1687
1688
1689
1690
1691
1692static int e1000_setup_rx_resources(struct e1000_adapter *adapter,
1693 struct e1000_rx_ring *rxdr)
1694{
1695 struct pci_dev *pdev = adapter->pdev;
1696 int size, desc_len;
1697
1698 size = sizeof(struct e1000_rx_buffer) * rxdr->count;
1699 rxdr->buffer_info = vzalloc(size);
1700 if (!rxdr->buffer_info)
1701 return -ENOMEM;
1702
1703 desc_len = sizeof(struct e1000_rx_desc);
1704
1705
1706
1707 rxdr->size = rxdr->count * desc_len;
1708 rxdr->size = ALIGN(rxdr->size, 4096);
1709
1710 rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma,
1711 GFP_KERNEL);
1712 if (!rxdr->desc) {
1713setup_rx_desc_die:
1714 vfree(rxdr->buffer_info);
1715 return -ENOMEM;
1716 }
1717
1718
1719 if (!e1000_check_64k_bound(adapter, rxdr->desc, rxdr->size)) {
1720 void *olddesc = rxdr->desc;
1721 dma_addr_t olddma = rxdr->dma;
1722 e_err(rx_err, "rxdr align check failed: %u bytes at %p\n",
1723 rxdr->size, rxdr->desc);
1724
1725 rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size,
1726 &rxdr->dma, GFP_KERNEL);
1727
1728 if (!rxdr->desc) {
1729 dma_free_coherent(&pdev->dev, rxdr->size, olddesc,
1730 olddma);
1731 goto setup_rx_desc_die;
1732 }
1733
1734 if (!e1000_check_64k_bound(adapter, rxdr->desc, rxdr->size)) {
1735
1736 dma_free_coherent(&pdev->dev, rxdr->size, rxdr->desc,
1737 rxdr->dma);
1738 dma_free_coherent(&pdev->dev, rxdr->size, olddesc,
1739 olddma);
1740 e_err(probe, "Unable to allocate aligned memory for "
1741 "the Rx descriptor ring\n");
1742 goto setup_rx_desc_die;
1743 } else {
1744
1745 dma_free_coherent(&pdev->dev, rxdr->size, olddesc,
1746 olddma);
1747 }
1748 }
1749 memset(rxdr->desc, 0, rxdr->size);
1750
1751 rxdr->next_to_clean = 0;
1752 rxdr->next_to_use = 0;
1753 rxdr->rx_skb_top = NULL;
1754
1755 return 0;
1756}
1757
1758
1759
1760
1761
1762
1763
1764
1765int e1000_setup_all_rx_resources(struct e1000_adapter *adapter)
1766{
1767 int i, err = 0;
1768
1769 for (i = 0; i < adapter->num_rx_queues; i++) {
1770 err = e1000_setup_rx_resources(adapter, &adapter->rx_ring[i]);
1771 if (err) {
1772 e_err(probe, "Allocation for Rx Queue %u failed\n", i);
1773 for (i-- ; i >= 0; i--)
1774 e1000_free_rx_resources(adapter,
1775 &adapter->rx_ring[i]);
1776 break;
1777 }
1778 }
1779
1780 return err;
1781}
1782
1783
1784
1785
1786
1787static void e1000_setup_rctl(struct e1000_adapter *adapter)
1788{
1789 struct e1000_hw *hw = &adapter->hw;
1790 u32 rctl;
1791
1792 rctl = er32(RCTL);
1793
1794 rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
1795
1796 rctl |= E1000_RCTL_BAM | E1000_RCTL_LBM_NO |
1797 E1000_RCTL_RDMTS_HALF |
1798 (hw->mc_filter_type << E1000_RCTL_MO_SHIFT);
1799
1800 if (hw->tbi_compatibility_on == 1)
1801 rctl |= E1000_RCTL_SBP;
1802 else
1803 rctl &= ~E1000_RCTL_SBP;
1804
1805 if (adapter->netdev->mtu <= ETH_DATA_LEN)
1806 rctl &= ~E1000_RCTL_LPE;
1807 else
1808 rctl |= E1000_RCTL_LPE;
1809
1810
1811 rctl &= ~E1000_RCTL_SZ_4096;
1812 rctl |= E1000_RCTL_BSEX;
1813 switch (adapter->rx_buffer_len) {
1814 case E1000_RXBUFFER_2048:
1815 default:
1816 rctl |= E1000_RCTL_SZ_2048;
1817 rctl &= ~E1000_RCTL_BSEX;
1818 break;
1819 case E1000_RXBUFFER_4096:
1820 rctl |= E1000_RCTL_SZ_4096;
1821 break;
1822 case E1000_RXBUFFER_8192:
1823 rctl |= E1000_RCTL_SZ_8192;
1824 break;
1825 case E1000_RXBUFFER_16384:
1826 rctl |= E1000_RCTL_SZ_16384;
1827 break;
1828 }
1829
1830
1831 if (adapter->netdev->features & NETIF_F_RXALL) {
1832
1833
1834
1835 rctl |= (E1000_RCTL_SBP |
1836 E1000_RCTL_BAM |
1837 E1000_RCTL_PMCF);
1838
1839 rctl &= ~(E1000_RCTL_VFE |
1840 E1000_RCTL_DPF |
1841 E1000_RCTL_CFIEN);
1842
1843
1844
1845 }
1846
1847 ew32(RCTL, rctl);
1848}
1849
1850
1851
1852
1853
1854
1855
1856static void e1000_configure_rx(struct e1000_adapter *adapter)
1857{
1858 u64 rdba;
1859 struct e1000_hw *hw = &adapter->hw;
1860 u32 rdlen, rctl, rxcsum;
1861
1862 if (adapter->netdev->mtu > ETH_DATA_LEN) {
1863 rdlen = adapter->rx_ring[0].count *
1864 sizeof(struct e1000_rx_desc);
1865 adapter->clean_rx = e1000_clean_jumbo_rx_irq;
1866 adapter->alloc_rx_buf = e1000_alloc_jumbo_rx_buffers;
1867 } else {
1868 rdlen = adapter->rx_ring[0].count *
1869 sizeof(struct e1000_rx_desc);
1870 adapter->clean_rx = e1000_clean_rx_irq;
1871 adapter->alloc_rx_buf = e1000_alloc_rx_buffers;
1872 }
1873
1874
1875 rctl = er32(RCTL);
1876 ew32(RCTL, rctl & ~E1000_RCTL_EN);
1877
1878
1879 ew32(RDTR, adapter->rx_int_delay);
1880
1881 if (hw->mac_type >= e1000_82540) {
1882 ew32(RADV, adapter->rx_abs_int_delay);
1883 if (adapter->itr_setting != 0)
1884 ew32(ITR, 1000000000 / (adapter->itr * 256));
1885 }
1886
1887
1888
1889
1890 switch (adapter->num_rx_queues) {
1891 case 1:
1892 default:
1893 rdba = adapter->rx_ring[0].dma;
1894 ew32(RDLEN, rdlen);
1895 ew32(RDBAH, (rdba >> 32));
1896 ew32(RDBAL, (rdba & 0x00000000ffffffffULL));
1897 ew32(RDT, 0);
1898 ew32(RDH, 0);
1899 adapter->rx_ring[0].rdh = ((hw->mac_type >= e1000_82543) ?
1900 E1000_RDH : E1000_82542_RDH);
1901 adapter->rx_ring[0].rdt = ((hw->mac_type >= e1000_82543) ?
1902 E1000_RDT : E1000_82542_RDT);
1903 break;
1904 }
1905
1906
1907 if (hw->mac_type >= e1000_82543) {
1908 rxcsum = er32(RXCSUM);
1909 if (adapter->rx_csum)
1910 rxcsum |= E1000_RXCSUM_TUOFL;
1911 else
1912
1913 rxcsum &= ~E1000_RXCSUM_TUOFL;
1914 ew32(RXCSUM, rxcsum);
1915 }
1916
1917
1918 ew32(RCTL, rctl | E1000_RCTL_EN);
1919}
1920
1921
1922
1923
1924
1925
1926
1927
1928static void e1000_free_tx_resources(struct e1000_adapter *adapter,
1929 struct e1000_tx_ring *tx_ring)
1930{
1931 struct pci_dev *pdev = adapter->pdev;
1932
1933 e1000_clean_tx_ring(adapter, tx_ring);
1934
1935 vfree(tx_ring->buffer_info);
1936 tx_ring->buffer_info = NULL;
1937
1938 dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc,
1939 tx_ring->dma);
1940
1941 tx_ring->desc = NULL;
1942}
1943
1944
1945
1946
1947
1948
1949
1950void e1000_free_all_tx_resources(struct e1000_adapter *adapter)
1951{
1952 int i;
1953
1954 for (i = 0; i < adapter->num_tx_queues; i++)
1955 e1000_free_tx_resources(adapter, &adapter->tx_ring[i]);
1956}
1957
1958static void
1959e1000_unmap_and_free_tx_resource(struct e1000_adapter *adapter,
1960 struct e1000_tx_buffer *buffer_info)
1961{
1962 if (buffer_info->dma) {
1963 if (buffer_info->mapped_as_page)
1964 dma_unmap_page(&adapter->pdev->dev, buffer_info->dma,
1965 buffer_info->length, DMA_TO_DEVICE);
1966 else
1967 dma_unmap_single(&adapter->pdev->dev, buffer_info->dma,
1968 buffer_info->length,
1969 DMA_TO_DEVICE);
1970 buffer_info->dma = 0;
1971 }
1972 if (buffer_info->skb) {
1973 dev_kfree_skb_any(buffer_info->skb);
1974 buffer_info->skb = NULL;
1975 }
1976 buffer_info->time_stamp = 0;
1977
1978}
1979
1980
1981
1982
1983
1984
1985static void e1000_clean_tx_ring(struct e1000_adapter *adapter,
1986 struct e1000_tx_ring *tx_ring)
1987{
1988 struct e1000_hw *hw = &adapter->hw;
1989 struct e1000_tx_buffer *buffer_info;
1990 unsigned long size;
1991 unsigned int i;
1992
1993
1994
1995 for (i = 0; i < tx_ring->count; i++) {
1996 buffer_info = &tx_ring->buffer_info[i];
1997 e1000_unmap_and_free_tx_resource(adapter, buffer_info);
1998 }
1999
2000 netdev_reset_queue(adapter->netdev);
2001 size = sizeof(struct e1000_tx_buffer) * tx_ring->count;
2002 memset(tx_ring->buffer_info, 0, size);
2003
2004
2005
2006 memset(tx_ring->desc, 0, tx_ring->size);
2007
2008 tx_ring->next_to_use = 0;
2009 tx_ring->next_to_clean = 0;
2010 tx_ring->last_tx_tso = false;
2011
2012 writel(0, hw->hw_addr + tx_ring->tdh);
2013 writel(0, hw->hw_addr + tx_ring->tdt);
2014}
2015
2016
2017
2018
2019
2020static void e1000_clean_all_tx_rings(struct e1000_adapter *adapter)
2021{
2022 int i;
2023
2024 for (i = 0; i < adapter->num_tx_queues; i++)
2025 e1000_clean_tx_ring(adapter, &adapter->tx_ring[i]);
2026}
2027
2028
2029
2030
2031
2032
2033
2034
2035static void e1000_free_rx_resources(struct e1000_adapter *adapter,
2036 struct e1000_rx_ring *rx_ring)
2037{
2038 struct pci_dev *pdev = adapter->pdev;
2039
2040 e1000_clean_rx_ring(adapter, rx_ring);
2041
2042 vfree(rx_ring->buffer_info);
2043 rx_ring->buffer_info = NULL;
2044
2045 dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc,
2046 rx_ring->dma);
2047
2048 rx_ring->desc = NULL;
2049}
2050
2051
2052
2053
2054
2055
2056
2057void e1000_free_all_rx_resources(struct e1000_adapter *adapter)
2058{
2059 int i;
2060
2061 for (i = 0; i < adapter->num_rx_queues; i++)
2062 e1000_free_rx_resources(adapter, &adapter->rx_ring[i]);
2063}
2064
2065#define E1000_HEADROOM (NET_SKB_PAD + NET_IP_ALIGN)
2066static unsigned int e1000_frag_len(const struct e1000_adapter *a)
2067{
2068 return SKB_DATA_ALIGN(a->rx_buffer_len + E1000_HEADROOM) +
2069 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
2070}
2071
2072static void *e1000_alloc_frag(const struct e1000_adapter *a)
2073{
2074 unsigned int len = e1000_frag_len(a);
2075 u8 *data = netdev_alloc_frag(len);
2076
2077 if (likely(data))
2078 data += E1000_HEADROOM;
2079 return data;
2080}
2081
2082
2083
2084
2085
2086
2087static void e1000_clean_rx_ring(struct e1000_adapter *adapter,
2088 struct e1000_rx_ring *rx_ring)
2089{
2090 struct e1000_hw *hw = &adapter->hw;
2091 struct e1000_rx_buffer *buffer_info;
2092 struct pci_dev *pdev = adapter->pdev;
2093 unsigned long size;
2094 unsigned int i;
2095
2096
2097 for (i = 0; i < rx_ring->count; i++) {
2098 buffer_info = &rx_ring->buffer_info[i];
2099 if (adapter->clean_rx == e1000_clean_rx_irq) {
2100 if (buffer_info->dma)
2101 dma_unmap_single(&pdev->dev, buffer_info->dma,
2102 adapter->rx_buffer_len,
2103 DMA_FROM_DEVICE);
2104 if (buffer_info->rxbuf.data) {
2105 skb_free_frag(buffer_info->rxbuf.data);
2106 buffer_info->rxbuf.data = NULL;
2107 }
2108 } else if (adapter->clean_rx == e1000_clean_jumbo_rx_irq) {
2109 if (buffer_info->dma)
2110 dma_unmap_page(&pdev->dev, buffer_info->dma,
2111 adapter->rx_buffer_len,
2112 DMA_FROM_DEVICE);
2113 if (buffer_info->rxbuf.page) {
2114 put_page(buffer_info->rxbuf.page);
2115 buffer_info->rxbuf.page = NULL;
2116 }
2117 }
2118
2119 buffer_info->dma = 0;
2120 }
2121
2122
2123 napi_free_frags(&adapter->napi);
2124 rx_ring->rx_skb_top = NULL;
2125
2126 size = sizeof(struct e1000_rx_buffer) * rx_ring->count;
2127 memset(rx_ring->buffer_info, 0, size);
2128
2129
2130 memset(rx_ring->desc, 0, rx_ring->size);
2131
2132 rx_ring->next_to_clean = 0;
2133 rx_ring->next_to_use = 0;
2134
2135 writel(0, hw->hw_addr + rx_ring->rdh);
2136 writel(0, hw->hw_addr + rx_ring->rdt);
2137}
2138
2139
2140
2141
2142
2143static void e1000_clean_all_rx_rings(struct e1000_adapter *adapter)
2144{
2145 int i;
2146
2147 for (i = 0; i < adapter->num_rx_queues; i++)
2148 e1000_clean_rx_ring(adapter, &adapter->rx_ring[i]);
2149}
2150
2151
2152
2153
2154static void e1000_enter_82542_rst(struct e1000_adapter *adapter)
2155{
2156 struct e1000_hw *hw = &adapter->hw;
2157 struct net_device *netdev = adapter->netdev;
2158 u32 rctl;
2159
2160 e1000_pci_clear_mwi(hw);
2161
2162 rctl = er32(RCTL);
2163 rctl |= E1000_RCTL_RST;
2164 ew32(RCTL, rctl);
2165 E1000_WRITE_FLUSH();
2166 mdelay(5);
2167
2168 if (netif_running(netdev))
2169 e1000_clean_all_rx_rings(adapter);
2170}
2171
2172static void e1000_leave_82542_rst(struct e1000_adapter *adapter)
2173{
2174 struct e1000_hw *hw = &adapter->hw;
2175 struct net_device *netdev = adapter->netdev;
2176 u32 rctl;
2177
2178 rctl = er32(RCTL);
2179 rctl &= ~E1000_RCTL_RST;
2180 ew32(RCTL, rctl);
2181 E1000_WRITE_FLUSH();
2182 mdelay(5);
2183
2184 if (hw->pci_cmd_word & PCI_COMMAND_INVALIDATE)
2185 e1000_pci_set_mwi(hw);
2186
2187 if (netif_running(netdev)) {
2188
2189 struct e1000_rx_ring *ring = &adapter->rx_ring[0];
2190 e1000_configure_rx(adapter);
2191 adapter->alloc_rx_buf(adapter, ring, E1000_DESC_UNUSED(ring));
2192 }
2193}
2194
2195
2196
2197
2198
2199
2200
2201
2202static int e1000_set_mac(struct net_device *netdev, void *p)
2203{
2204 struct e1000_adapter *adapter = netdev_priv(netdev);
2205 struct e1000_hw *hw = &adapter->hw;
2206 struct sockaddr *addr = p;
2207
2208 if (!is_valid_ether_addr(addr->sa_data))
2209 return -EADDRNOTAVAIL;
2210
2211
2212
2213 if (hw->mac_type == e1000_82542_rev2_0)
2214 e1000_enter_82542_rst(adapter);
2215
2216 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
2217 memcpy(hw->mac_addr, addr->sa_data, netdev->addr_len);
2218
2219 e1000_rar_set(hw, hw->mac_addr, 0);
2220
2221 if (hw->mac_type == e1000_82542_rev2_0)
2222 e1000_leave_82542_rst(adapter);
2223
2224 return 0;
2225}
2226
2227
2228
2229
2230
2231
2232
2233
2234
2235
2236static void e1000_set_rx_mode(struct net_device *netdev)
2237{
2238 struct e1000_adapter *adapter = netdev_priv(netdev);
2239 struct e1000_hw *hw = &adapter->hw;
2240 struct netdev_hw_addr *ha;
2241 bool use_uc = false;
2242 u32 rctl;
2243 u32 hash_value;
2244 int i, rar_entries = E1000_RAR_ENTRIES;
2245 int mta_reg_count = E1000_NUM_MTA_REGISTERS;
2246 u32 *mcarray = kcalloc(mta_reg_count, sizeof(u32), GFP_ATOMIC);
2247
2248 if (!mcarray)
2249 return;
2250
2251
2252
2253 rctl = er32(RCTL);
2254
2255 if (netdev->flags & IFF_PROMISC) {
2256 rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
2257 rctl &= ~E1000_RCTL_VFE;
2258 } else {
2259 if (netdev->flags & IFF_ALLMULTI)
2260 rctl |= E1000_RCTL_MPE;
2261 else
2262 rctl &= ~E1000_RCTL_MPE;
2263
2264 if (e1000_vlan_used(adapter))
2265 rctl |= E1000_RCTL_VFE;
2266 }
2267
2268 if (netdev_uc_count(netdev) > rar_entries - 1) {
2269 rctl |= E1000_RCTL_UPE;
2270 } else if (!(netdev->flags & IFF_PROMISC)) {
2271 rctl &= ~E1000_RCTL_UPE;
2272 use_uc = true;
2273 }
2274
2275 ew32(RCTL, rctl);
2276
2277
2278
2279 if (hw->mac_type == e1000_82542_rev2_0)
2280 e1000_enter_82542_rst(adapter);
2281
2282
2283
2284
2285
2286
2287
2288
2289 i = 1;
2290 if (use_uc)
2291 netdev_for_each_uc_addr(ha, netdev) {
2292 if (i == rar_entries)
2293 break;
2294 e1000_rar_set(hw, ha->addr, i++);
2295 }
2296
2297 netdev_for_each_mc_addr(ha, netdev) {
2298 if (i == rar_entries) {
2299
2300 u32 hash_reg, hash_bit, mta;
2301 hash_value = e1000_hash_mc_addr(hw, ha->addr);
2302 hash_reg = (hash_value >> 5) & 0x7F;
2303 hash_bit = hash_value & 0x1F;
2304 mta = (1 << hash_bit);
2305 mcarray[hash_reg] |= mta;
2306 } else {
2307 e1000_rar_set(hw, ha->addr, i++);
2308 }
2309 }
2310
2311 for (; i < rar_entries; i++) {
2312 E1000_WRITE_REG_ARRAY(hw, RA, i << 1, 0);
2313 E1000_WRITE_FLUSH();
2314 E1000_WRITE_REG_ARRAY(hw, RA, (i << 1) + 1, 0);
2315 E1000_WRITE_FLUSH();
2316 }
2317
2318
2319
2320
2321 for (i = mta_reg_count - 1; i >= 0 ; i--) {
2322
2323
2324
2325
2326
2327 E1000_WRITE_REG_ARRAY(hw, MTA, i, mcarray[i]);
2328 }
2329 E1000_WRITE_FLUSH();
2330
2331 if (hw->mac_type == e1000_82542_rev2_0)
2332 e1000_leave_82542_rst(adapter);
2333
2334 kfree(mcarray);
2335}
2336
2337
2338
2339
2340
2341
2342
2343
2344static void e1000_update_phy_info_task(struct work_struct *work)
2345{
2346 struct e1000_adapter *adapter = container_of(work,
2347 struct e1000_adapter,
2348 phy_info_task.work);
2349
2350 e1000_phy_get_info(&adapter->hw, &adapter->phy_info);
2351}
2352
2353
2354
2355
2356
2357static void e1000_82547_tx_fifo_stall_task(struct work_struct *work)
2358{
2359 struct e1000_adapter *adapter = container_of(work,
2360 struct e1000_adapter,
2361 fifo_stall_task.work);
2362 struct e1000_hw *hw = &adapter->hw;
2363 struct net_device *netdev = adapter->netdev;
2364 u32 tctl;
2365
2366 if (atomic_read(&adapter->tx_fifo_stall)) {
2367 if ((er32(TDT) == er32(TDH)) &&
2368 (er32(TDFT) == er32(TDFH)) &&
2369 (er32(TDFTS) == er32(TDFHS))) {
2370 tctl = er32(TCTL);
2371 ew32(TCTL, tctl & ~E1000_TCTL_EN);
2372 ew32(TDFT, adapter->tx_head_addr);
2373 ew32(TDFH, adapter->tx_head_addr);
2374 ew32(TDFTS, adapter->tx_head_addr);
2375 ew32(TDFHS, adapter->tx_head_addr);
2376 ew32(TCTL, tctl);
2377 E1000_WRITE_FLUSH();
2378
2379 adapter->tx_fifo_head = 0;
2380 atomic_set(&adapter->tx_fifo_stall, 0);
2381 netif_wake_queue(netdev);
2382 } else if (!test_bit(__E1000_DOWN, &adapter->flags)) {
2383 schedule_delayed_work(&adapter->fifo_stall_task, 1);
2384 }
2385 }
2386}
2387
2388bool e1000_has_link(struct e1000_adapter *adapter)
2389{
2390 struct e1000_hw *hw = &adapter->hw;
2391 bool link_active = false;
2392
2393
2394
2395
2396
2397
2398
2399 switch (hw->media_type) {
2400 case e1000_media_type_copper:
2401 if (hw->mac_type == e1000_ce4100)
2402 hw->get_link_status = 1;
2403 if (hw->get_link_status) {
2404 e1000_check_for_link(hw);
2405 link_active = !hw->get_link_status;
2406 } else {
2407 link_active = true;
2408 }
2409 break;
2410 case e1000_media_type_fiber:
2411 e1000_check_for_link(hw);
2412 link_active = !!(er32(STATUS) & E1000_STATUS_LU);
2413 break;
2414 case e1000_media_type_internal_serdes:
2415 e1000_check_for_link(hw);
2416 link_active = hw->serdes_has_link;
2417 break;
2418 default:
2419 break;
2420 }
2421
2422 return link_active;
2423}
2424
2425
2426
2427
2428
2429static void e1000_watchdog(struct work_struct *work)
2430{
2431 struct e1000_adapter *adapter = container_of(work,
2432 struct e1000_adapter,
2433 watchdog_task.work);
2434 struct e1000_hw *hw = &adapter->hw;
2435 struct net_device *netdev = adapter->netdev;
2436 struct e1000_tx_ring *txdr = adapter->tx_ring;
2437 u32 link, tctl;
2438
2439 link = e1000_has_link(adapter);
2440 if ((netif_carrier_ok(netdev)) && link)
2441 goto link_up;
2442
2443 if (link) {
2444 if (!netif_carrier_ok(netdev)) {
2445 u32 ctrl;
2446 bool txb2b = true;
2447
2448 e1000_get_speed_and_duplex(hw,
2449 &adapter->link_speed,
2450 &adapter->link_duplex);
2451
2452 ctrl = er32(CTRL);
2453 pr_info("%s NIC Link is Up %d Mbps %s, "
2454 "Flow Control: %s\n",
2455 netdev->name,
2456 adapter->link_speed,
2457 adapter->link_duplex == FULL_DUPLEX ?
2458 "Full Duplex" : "Half Duplex",
2459 ((ctrl & E1000_CTRL_TFCE) && (ctrl &
2460 E1000_CTRL_RFCE)) ? "RX/TX" : ((ctrl &
2461 E1000_CTRL_RFCE) ? "RX" : ((ctrl &
2462 E1000_CTRL_TFCE) ? "TX" : "None")));
2463
2464
2465 adapter->tx_timeout_factor = 1;
2466 switch (adapter->link_speed) {
2467 case SPEED_10:
2468 txb2b = false;
2469 adapter->tx_timeout_factor = 16;
2470 break;
2471 case SPEED_100:
2472 txb2b = false;
2473
2474 break;
2475 }
2476
2477
2478 tctl = er32(TCTL);
2479 tctl |= E1000_TCTL_EN;
2480 ew32(TCTL, tctl);
2481
2482 netif_carrier_on(netdev);
2483 if (!test_bit(__E1000_DOWN, &adapter->flags))
2484 schedule_delayed_work(&adapter->phy_info_task,
2485 2 * HZ);
2486 adapter->smartspeed = 0;
2487 }
2488 } else {
2489 if (netif_carrier_ok(netdev)) {
2490 adapter->link_speed = 0;
2491 adapter->link_duplex = 0;
2492 pr_info("%s NIC Link is Down\n",
2493 netdev->name);
2494 netif_carrier_off(netdev);
2495
2496 if (!test_bit(__E1000_DOWN, &adapter->flags))
2497 schedule_delayed_work(&adapter->phy_info_task,
2498 2 * HZ);
2499 }
2500
2501 e1000_smartspeed(adapter);
2502 }
2503
2504link_up:
2505 e1000_update_stats(adapter);
2506
2507 hw->tx_packet_delta = adapter->stats.tpt - adapter->tpt_old;
2508 adapter->tpt_old = adapter->stats.tpt;
2509 hw->collision_delta = adapter->stats.colc - adapter->colc_old;
2510 adapter->colc_old = adapter->stats.colc;
2511
2512 adapter->gorcl = adapter->stats.gorcl - adapter->gorcl_old;
2513 adapter->gorcl_old = adapter->stats.gorcl;
2514 adapter->gotcl = adapter->stats.gotcl - adapter->gotcl_old;
2515 adapter->gotcl_old = adapter->stats.gotcl;
2516
2517 e1000_update_adaptive(hw);
2518
2519 if (!netif_carrier_ok(netdev)) {
2520 if (E1000_DESC_UNUSED(txdr) + 1 < txdr->count) {
2521
2522
2523
2524
2525
2526 adapter->tx_timeout_count++;
2527 schedule_work(&adapter->reset_task);
2528
2529 return;
2530 }
2531 }
2532
2533
2534 if (hw->mac_type >= e1000_82540 && adapter->itr_setting == 4) {
2535
2536
2537
2538
2539 u32 goc = (adapter->gotcl + adapter->gorcl) / 10000;
2540 u32 dif = (adapter->gotcl > adapter->gorcl ?
2541 adapter->gotcl - adapter->gorcl :
2542 adapter->gorcl - adapter->gotcl) / 10000;
2543 u32 itr = goc > 0 ? (dif * 6000 / goc + 2000) : 8000;
2544
2545 ew32(ITR, 1000000000 / (itr * 256));
2546 }
2547
2548
2549 ew32(ICS, E1000_ICS_RXDMT0);
2550
2551
2552 adapter->detect_tx_hung = true;
2553
2554
2555 if (!test_bit(__E1000_DOWN, &adapter->flags))
2556 schedule_delayed_work(&adapter->watchdog_task, 2 * HZ);
2557}
2558
2559enum latency_range {
2560 lowest_latency = 0,
2561 low_latency = 1,
2562 bulk_latency = 2,
2563 latency_invalid = 255
2564};
2565
2566
2567
2568
2569
2570
2571
2572
2573
2574
2575
2576
2577
2578
2579
2580
2581
2582
2583static unsigned int e1000_update_itr(struct e1000_adapter *adapter,
2584 u16 itr_setting, int packets, int bytes)
2585{
2586 unsigned int retval = itr_setting;
2587 struct e1000_hw *hw = &adapter->hw;
2588
2589 if (unlikely(hw->mac_type < e1000_82540))
2590 goto update_itr_done;
2591
2592 if (packets == 0)
2593 goto update_itr_done;
2594
2595 switch (itr_setting) {
2596 case lowest_latency:
2597
2598 if (bytes/packets > 8000)
2599 retval = bulk_latency;
2600 else if ((packets < 5) && (bytes > 512))
2601 retval = low_latency;
2602 break;
2603 case low_latency:
2604 if (bytes > 10000) {
2605
2606 if (bytes/packets > 8000)
2607 retval = bulk_latency;
2608 else if ((packets < 10) || ((bytes/packets) > 1200))
2609 retval = bulk_latency;
2610 else if ((packets > 35))
2611 retval = lowest_latency;
2612 } else if (bytes/packets > 2000)
2613 retval = bulk_latency;
2614 else if (packets <= 2 && bytes < 512)
2615 retval = lowest_latency;
2616 break;
2617 case bulk_latency:
2618 if (bytes > 25000) {
2619 if (packets > 35)
2620 retval = low_latency;
2621 } else if (bytes < 6000) {
2622 retval = low_latency;
2623 }
2624 break;
2625 }
2626
2627update_itr_done:
2628 return retval;
2629}
2630
2631static void e1000_set_itr(struct e1000_adapter *adapter)
2632{
2633 struct e1000_hw *hw = &adapter->hw;
2634 u16 current_itr;
2635 u32 new_itr = adapter->itr;
2636
2637 if (unlikely(hw->mac_type < e1000_82540))
2638 return;
2639
2640
2641 if (unlikely(adapter->link_speed != SPEED_1000)) {
2642 current_itr = 0;
2643 new_itr = 4000;
2644 goto set_itr_now;
2645 }
2646
2647 adapter->tx_itr = e1000_update_itr(adapter, adapter->tx_itr,
2648 adapter->total_tx_packets,
2649 adapter->total_tx_bytes);
2650
2651 if (adapter->itr_setting == 3 && adapter->tx_itr == lowest_latency)
2652 adapter->tx_itr = low_latency;
2653
2654 adapter->rx_itr = e1000_update_itr(adapter, adapter->rx_itr,
2655 adapter->total_rx_packets,
2656 adapter->total_rx_bytes);
2657
2658 if (adapter->itr_setting == 3 && adapter->rx_itr == lowest_latency)
2659 adapter->rx_itr = low_latency;
2660
2661 current_itr = max(adapter->rx_itr, adapter->tx_itr);
2662
2663 switch (current_itr) {
2664
2665 case lowest_latency:
2666 new_itr = 70000;
2667 break;
2668 case low_latency:
2669 new_itr = 20000;
2670 break;
2671 case bulk_latency:
2672 new_itr = 4000;
2673 break;
2674 default:
2675 break;
2676 }
2677
2678set_itr_now:
2679 if (new_itr != adapter->itr) {
2680
2681
2682
2683
2684 new_itr = new_itr > adapter->itr ?
2685 min(adapter->itr + (new_itr >> 2), new_itr) :
2686 new_itr;
2687 adapter->itr = new_itr;
2688 ew32(ITR, 1000000000 / (new_itr * 256));
2689 }
2690}
2691
2692#define E1000_TX_FLAGS_CSUM 0x00000001
2693#define E1000_TX_FLAGS_VLAN 0x00000002
2694#define E1000_TX_FLAGS_TSO 0x00000004
2695#define E1000_TX_FLAGS_IPV4 0x00000008
2696#define E1000_TX_FLAGS_NO_FCS 0x00000010
2697#define E1000_TX_FLAGS_VLAN_MASK 0xffff0000
2698#define E1000_TX_FLAGS_VLAN_SHIFT 16
2699
2700static int e1000_tso(struct e1000_adapter *adapter,
2701 struct e1000_tx_ring *tx_ring, struct sk_buff *skb,
2702 __be16 protocol)
2703{
2704 struct e1000_context_desc *context_desc;
2705 struct e1000_tx_buffer *buffer_info;
2706 unsigned int i;
2707 u32 cmd_length = 0;
2708 u16 ipcse = 0, tucse, mss;
2709 u8 ipcss, ipcso, tucss, tucso, hdr_len;
2710
2711 if (skb_is_gso(skb)) {
2712 int err;
2713
2714 err = skb_cow_head(skb, 0);
2715 if (err < 0)
2716 return err;
2717
2718 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
2719 mss = skb_shinfo(skb)->gso_size;
2720 if (protocol == htons(ETH_P_IP)) {
2721 struct iphdr *iph = ip_hdr(skb);
2722 iph->tot_len = 0;
2723 iph->check = 0;
2724 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
2725 iph->daddr, 0,
2726 IPPROTO_TCP,
2727 0);
2728 cmd_length = E1000_TXD_CMD_IP;
2729 ipcse = skb_transport_offset(skb) - 1;
2730 } else if (skb_is_gso_v6(skb)) {
2731 ipv6_hdr(skb)->payload_len = 0;
2732 tcp_hdr(skb)->check =
2733 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
2734 &ipv6_hdr(skb)->daddr,
2735 0, IPPROTO_TCP, 0);
2736 ipcse = 0;
2737 }
2738 ipcss = skb_network_offset(skb);
2739 ipcso = (void *)&(ip_hdr(skb)->check) - (void *)skb->data;
2740 tucss = skb_transport_offset(skb);
2741 tucso = (void *)&(tcp_hdr(skb)->check) - (void *)skb->data;
2742 tucse = 0;
2743
2744 cmd_length |= (E1000_TXD_CMD_DEXT | E1000_TXD_CMD_TSE |
2745 E1000_TXD_CMD_TCP | (skb->len - (hdr_len)));
2746
2747 i = tx_ring->next_to_use;
2748 context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
2749 buffer_info = &tx_ring->buffer_info[i];
2750
2751 context_desc->lower_setup.ip_fields.ipcss = ipcss;
2752 context_desc->lower_setup.ip_fields.ipcso = ipcso;
2753 context_desc->lower_setup.ip_fields.ipcse = cpu_to_le16(ipcse);
2754 context_desc->upper_setup.tcp_fields.tucss = tucss;
2755 context_desc->upper_setup.tcp_fields.tucso = tucso;
2756 context_desc->upper_setup.tcp_fields.tucse = cpu_to_le16(tucse);
2757 context_desc->tcp_seg_setup.fields.mss = cpu_to_le16(mss);
2758 context_desc->tcp_seg_setup.fields.hdr_len = hdr_len;
2759 context_desc->cmd_and_length = cpu_to_le32(cmd_length);
2760
2761 buffer_info->time_stamp = jiffies;
2762 buffer_info->next_to_watch = i;
2763
2764 if (++i == tx_ring->count)
2765 i = 0;
2766
2767 tx_ring->next_to_use = i;
2768
2769 return true;
2770 }
2771 return false;
2772}
2773
2774static bool e1000_tx_csum(struct e1000_adapter *adapter,
2775 struct e1000_tx_ring *tx_ring, struct sk_buff *skb,
2776 __be16 protocol)
2777{
2778 struct e1000_context_desc *context_desc;
2779 struct e1000_tx_buffer *buffer_info;
2780 unsigned int i;
2781 u8 css;
2782 u32 cmd_len = E1000_TXD_CMD_DEXT;
2783
2784 if (skb->ip_summed != CHECKSUM_PARTIAL)
2785 return false;
2786
2787 switch (protocol) {
2788 case cpu_to_be16(ETH_P_IP):
2789 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
2790 cmd_len |= E1000_TXD_CMD_TCP;
2791 break;
2792 case cpu_to_be16(ETH_P_IPV6):
2793
2794 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
2795 cmd_len |= E1000_TXD_CMD_TCP;
2796 break;
2797 default:
2798 if (unlikely(net_ratelimit()))
2799 e_warn(drv, "checksum_partial proto=%x!\n",
2800 skb->protocol);
2801 break;
2802 }
2803
2804 css = skb_checksum_start_offset(skb);
2805
2806 i = tx_ring->next_to_use;
2807 buffer_info = &tx_ring->buffer_info[i];
2808 context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
2809
2810 context_desc->lower_setup.ip_config = 0;
2811 context_desc->upper_setup.tcp_fields.tucss = css;
2812 context_desc->upper_setup.tcp_fields.tucso =
2813 css + skb->csum_offset;
2814 context_desc->upper_setup.tcp_fields.tucse = 0;
2815 context_desc->tcp_seg_setup.data = 0;
2816 context_desc->cmd_and_length = cpu_to_le32(cmd_len);
2817
2818 buffer_info->time_stamp = jiffies;
2819 buffer_info->next_to_watch = i;
2820
2821 if (unlikely(++i == tx_ring->count))
2822 i = 0;
2823
2824 tx_ring->next_to_use = i;
2825
2826 return true;
2827}
2828
2829#define E1000_MAX_TXD_PWR 12
2830#define E1000_MAX_DATA_PER_TXD (1<<E1000_MAX_TXD_PWR)
2831
2832static int e1000_tx_map(struct e1000_adapter *adapter,
2833 struct e1000_tx_ring *tx_ring,
2834 struct sk_buff *skb, unsigned int first,
2835 unsigned int max_per_txd, unsigned int nr_frags,
2836 unsigned int mss)
2837{
2838 struct e1000_hw *hw = &adapter->hw;
2839 struct pci_dev *pdev = adapter->pdev;
2840 struct e1000_tx_buffer *buffer_info;
2841 unsigned int len = skb_headlen(skb);
2842 unsigned int offset = 0, size, count = 0, i;
2843 unsigned int f, bytecount, segs;
2844
2845 i = tx_ring->next_to_use;
2846
2847 while (len) {
2848 buffer_info = &tx_ring->buffer_info[i];
2849 size = min(len, max_per_txd);
2850
2851
2852
2853
2854
2855 if (!skb->data_len && tx_ring->last_tx_tso &&
2856 !skb_is_gso(skb)) {
2857 tx_ring->last_tx_tso = false;
2858 size -= 4;
2859 }
2860
2861
2862
2863
2864 if (unlikely(mss && !nr_frags && size == len && size > 8))
2865 size -= 4;
2866
2867
2868
2869
2870
2871 if (unlikely((hw->bus_type == e1000_bus_type_pcix) &&
2872 (size > 2015) && count == 0))
2873 size = 2015;
2874
2875
2876
2877
2878 if (unlikely(adapter->pcix_82544 &&
2879 !((unsigned long)(skb->data + offset + size - 1) & 4) &&
2880 size > 4))
2881 size -= 4;
2882
2883 buffer_info->length = size;
2884
2885 buffer_info->time_stamp = jiffies;
2886 buffer_info->mapped_as_page = false;
2887 buffer_info->dma = dma_map_single(&pdev->dev,
2888 skb->data + offset,
2889 size, DMA_TO_DEVICE);
2890 if (dma_mapping_error(&pdev->dev, buffer_info->dma))
2891 goto dma_error;
2892 buffer_info->next_to_watch = i;
2893
2894 len -= size;
2895 offset += size;
2896 count++;
2897 if (len) {
2898 i++;
2899 if (unlikely(i == tx_ring->count))
2900 i = 0;
2901 }
2902 }
2903
2904 for (f = 0; f < nr_frags; f++) {
2905 const struct skb_frag_struct *frag;
2906
2907 frag = &skb_shinfo(skb)->frags[f];
2908 len = skb_frag_size(frag);
2909 offset = 0;
2910
2911 while (len) {
2912 unsigned long bufend;
2913 i++;
2914 if (unlikely(i == tx_ring->count))
2915 i = 0;
2916
2917 buffer_info = &tx_ring->buffer_info[i];
2918 size = min(len, max_per_txd);
2919
2920
2921
2922 if (unlikely(mss && f == (nr_frags-1) &&
2923 size == len && size > 8))
2924 size -= 4;
2925
2926
2927
2928
2929 bufend = (unsigned long)
2930 page_to_phys(skb_frag_page(frag));
2931 bufend += offset + size - 1;
2932 if (unlikely(adapter->pcix_82544 &&
2933 !(bufend & 4) &&
2934 size > 4))
2935 size -= 4;
2936
2937 buffer_info->length = size;
2938 buffer_info->time_stamp = jiffies;
2939 buffer_info->mapped_as_page = true;
2940 buffer_info->dma = skb_frag_dma_map(&pdev->dev, frag,
2941 offset, size, DMA_TO_DEVICE);
2942 if (dma_mapping_error(&pdev->dev, buffer_info->dma))
2943 goto dma_error;
2944 buffer_info->next_to_watch = i;
2945
2946 len -= size;
2947 offset += size;
2948 count++;
2949 }
2950 }
2951
2952 segs = skb_shinfo(skb)->gso_segs ?: 1;
2953
2954 bytecount = ((segs - 1) * skb_headlen(skb)) + skb->len;
2955
2956 tx_ring->buffer_info[i].skb = skb;
2957 tx_ring->buffer_info[i].segs = segs;
2958 tx_ring->buffer_info[i].bytecount = bytecount;
2959 tx_ring->buffer_info[first].next_to_watch = i;
2960
2961 return count;
2962
2963dma_error:
2964 dev_err(&pdev->dev, "TX DMA map failed\n");
2965 buffer_info->dma = 0;
2966 if (count)
2967 count--;
2968
2969 while (count--) {
2970 if (i == 0)
2971 i += tx_ring->count;
2972 i--;
2973 buffer_info = &tx_ring->buffer_info[i];
2974 e1000_unmap_and_free_tx_resource(adapter, buffer_info);
2975 }
2976
2977 return 0;
2978}
2979
2980static void e1000_tx_queue(struct e1000_adapter *adapter,
2981 struct e1000_tx_ring *tx_ring, int tx_flags,
2982 int count)
2983{
2984 struct e1000_tx_desc *tx_desc = NULL;
2985 struct e1000_tx_buffer *buffer_info;
2986 u32 txd_upper = 0, txd_lower = E1000_TXD_CMD_IFCS;
2987 unsigned int i;
2988
2989 if (likely(tx_flags & E1000_TX_FLAGS_TSO)) {
2990 txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D |
2991 E1000_TXD_CMD_TSE;
2992 txd_upper |= E1000_TXD_POPTS_TXSM << 8;
2993
2994 if (likely(tx_flags & E1000_TX_FLAGS_IPV4))
2995 txd_upper |= E1000_TXD_POPTS_IXSM << 8;
2996 }
2997
2998 if (likely(tx_flags & E1000_TX_FLAGS_CSUM)) {
2999 txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
3000 txd_upper |= E1000_TXD_POPTS_TXSM << 8;
3001 }
3002
3003 if (unlikely(tx_flags & E1000_TX_FLAGS_VLAN)) {
3004 txd_lower |= E1000_TXD_CMD_VLE;
3005 txd_upper |= (tx_flags & E1000_TX_FLAGS_VLAN_MASK);
3006 }
3007
3008 if (unlikely(tx_flags & E1000_TX_FLAGS_NO_FCS))
3009 txd_lower &= ~(E1000_TXD_CMD_IFCS);
3010
3011 i = tx_ring->next_to_use;
3012
3013 while (count--) {
3014 buffer_info = &tx_ring->buffer_info[i];
3015 tx_desc = E1000_TX_DESC(*tx_ring, i);
3016 tx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
3017 tx_desc->lower.data =
3018 cpu_to_le32(txd_lower | buffer_info->length);
3019 tx_desc->upper.data = cpu_to_le32(txd_upper);
3020 if (unlikely(++i == tx_ring->count))
3021 i = 0;
3022 }
3023
3024 tx_desc->lower.data |= cpu_to_le32(adapter->txd_cmd);
3025
3026
3027 if (unlikely(tx_flags & E1000_TX_FLAGS_NO_FCS))
3028 tx_desc->lower.data &= ~(cpu_to_le32(E1000_TXD_CMD_IFCS));
3029
3030
3031
3032
3033
3034
3035 wmb();
3036
3037 tx_ring->next_to_use = i;
3038}
3039
3040
3041
3042
3043
3044
3045
3046
3047
3048#define E1000_FIFO_HDR 0x10
3049#define E1000_82547_PAD_LEN 0x3E0
3050
3051static int e1000_82547_fifo_workaround(struct e1000_adapter *adapter,
3052 struct sk_buff *skb)
3053{
3054 u32 fifo_space = adapter->tx_fifo_size - adapter->tx_fifo_head;
3055 u32 skb_fifo_len = skb->len + E1000_FIFO_HDR;
3056
3057 skb_fifo_len = ALIGN(skb_fifo_len, E1000_FIFO_HDR);
3058
3059 if (adapter->link_duplex != HALF_DUPLEX)
3060 goto no_fifo_stall_required;
3061
3062 if (atomic_read(&adapter->tx_fifo_stall))
3063 return 1;
3064
3065 if (skb_fifo_len >= (E1000_82547_PAD_LEN + fifo_space)) {
3066 atomic_set(&adapter->tx_fifo_stall, 1);
3067 return 1;
3068 }
3069
3070no_fifo_stall_required:
3071 adapter->tx_fifo_head += skb_fifo_len;
3072 if (adapter->tx_fifo_head >= adapter->tx_fifo_size)
3073 adapter->tx_fifo_head -= adapter->tx_fifo_size;
3074 return 0;
3075}
3076
3077static int __e1000_maybe_stop_tx(struct net_device *netdev, int size)
3078{
3079 struct e1000_adapter *adapter = netdev_priv(netdev);
3080 struct e1000_tx_ring *tx_ring = adapter->tx_ring;
3081
3082 netif_stop_queue(netdev);
3083
3084
3085
3086
3087 smp_mb();
3088
3089
3090
3091
3092 if (likely(E1000_DESC_UNUSED(tx_ring) < size))
3093 return -EBUSY;
3094
3095
3096 netif_start_queue(netdev);
3097 ++adapter->restart_queue;
3098 return 0;
3099}
3100
3101static int e1000_maybe_stop_tx(struct net_device *netdev,
3102 struct e1000_tx_ring *tx_ring, int size)
3103{
3104 if (likely(E1000_DESC_UNUSED(tx_ring) >= size))
3105 return 0;
3106 return __e1000_maybe_stop_tx(netdev, size);
3107}
3108
3109#define TXD_USE_COUNT(S, X) (((S) >> (X)) + 1)
3110static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
3111 struct net_device *netdev)
3112{
3113 struct e1000_adapter *adapter = netdev_priv(netdev);
3114 struct e1000_hw *hw = &adapter->hw;
3115 struct e1000_tx_ring *tx_ring;
3116 unsigned int first, max_per_txd = E1000_MAX_DATA_PER_TXD;
3117 unsigned int max_txd_pwr = E1000_MAX_TXD_PWR;
3118 unsigned int tx_flags = 0;
3119 unsigned int len = skb_headlen(skb);
3120 unsigned int nr_frags;
3121 unsigned int mss;
3122 int count = 0;
3123 int tso;
3124 unsigned int f;
3125 __be16 protocol = vlan_get_protocol(skb);
3126
3127
3128
3129
3130
3131
3132 tx_ring = adapter->tx_ring;
3133
3134
3135
3136
3137
3138 if (eth_skb_pad(skb))
3139 return NETDEV_TX_OK;
3140
3141 mss = skb_shinfo(skb)->gso_size;
3142
3143
3144
3145
3146
3147
3148
3149 if (mss) {
3150 u8 hdr_len;
3151 max_per_txd = min(mss << 2, max_per_txd);
3152 max_txd_pwr = fls(max_per_txd) - 1;
3153
3154 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
3155 if (skb->data_len && hdr_len == len) {
3156 switch (hw->mac_type) {
3157 unsigned int pull_size;
3158 case e1000_82544:
3159
3160
3161
3162
3163
3164
3165
3166 if ((unsigned long)(skb_tail_pointer(skb) - 1)
3167 & 4)
3168 break;
3169
3170 pull_size = min((unsigned int)4, skb->data_len);
3171 if (!__pskb_pull_tail(skb, pull_size)) {
3172 e_err(drv, "__pskb_pull_tail "
3173 "failed.\n");
3174 dev_kfree_skb_any(skb);
3175 return NETDEV_TX_OK;
3176 }
3177 len = skb_headlen(skb);
3178 break;
3179 default:
3180
3181 break;
3182 }
3183 }
3184 }
3185
3186
3187 if ((mss) || (skb->ip_summed == CHECKSUM_PARTIAL))
3188 count++;
3189 count++;
3190
3191
3192 if (!skb->data_len && tx_ring->last_tx_tso && !skb_is_gso(skb))
3193 count++;
3194
3195 count += TXD_USE_COUNT(len, max_txd_pwr);
3196
3197 if (adapter->pcix_82544)
3198 count++;
3199
3200
3201
3202
3203 if (unlikely((hw->bus_type == e1000_bus_type_pcix) &&
3204 (len > 2015)))
3205 count++;
3206
3207 nr_frags = skb_shinfo(skb)->nr_frags;
3208 for (f = 0; f < nr_frags; f++)
3209 count += TXD_USE_COUNT(skb_frag_size(&skb_shinfo(skb)->frags[f]),
3210 max_txd_pwr);
3211 if (adapter->pcix_82544)
3212 count += nr_frags;
3213
3214
3215
3216
3217 if (unlikely(e1000_maybe_stop_tx(netdev, tx_ring, count + 2)))
3218 return NETDEV_TX_BUSY;
3219
3220 if (unlikely((hw->mac_type == e1000_82547) &&
3221 (e1000_82547_fifo_workaround(adapter, skb)))) {
3222 netif_stop_queue(netdev);
3223 if (!test_bit(__E1000_DOWN, &adapter->flags))
3224 schedule_delayed_work(&adapter->fifo_stall_task, 1);
3225 return NETDEV_TX_BUSY;
3226 }
3227
3228 if (skb_vlan_tag_present(skb)) {
3229 tx_flags |= E1000_TX_FLAGS_VLAN;
3230 tx_flags |= (skb_vlan_tag_get(skb) <<
3231 E1000_TX_FLAGS_VLAN_SHIFT);
3232 }
3233
3234 first = tx_ring->next_to_use;
3235
3236 tso = e1000_tso(adapter, tx_ring, skb, protocol);
3237 if (tso < 0) {
3238 dev_kfree_skb_any(skb);
3239 return NETDEV_TX_OK;
3240 }
3241
3242 if (likely(tso)) {
3243 if (likely(hw->mac_type != e1000_82544))
3244 tx_ring->last_tx_tso = true;
3245 tx_flags |= E1000_TX_FLAGS_TSO;
3246 } else if (likely(e1000_tx_csum(adapter, tx_ring, skb, protocol)))
3247 tx_flags |= E1000_TX_FLAGS_CSUM;
3248
3249 if (protocol == htons(ETH_P_IP))
3250 tx_flags |= E1000_TX_FLAGS_IPV4;
3251
3252 if (unlikely(skb->no_fcs))
3253 tx_flags |= E1000_TX_FLAGS_NO_FCS;
3254
3255 count = e1000_tx_map(adapter, tx_ring, skb, first, max_per_txd,
3256 nr_frags, mss);
3257
3258 if (count) {
3259 netdev_sent_queue(netdev, skb->len);
3260 skb_tx_timestamp(skb);
3261
3262 e1000_tx_queue(adapter, tx_ring, tx_flags, count);
3263
3264 e1000_maybe_stop_tx(netdev, tx_ring, MAX_SKB_FRAGS + 2);
3265
3266 if (!skb->xmit_more ||
3267 netif_xmit_stopped(netdev_get_tx_queue(netdev, 0))) {
3268 writel(tx_ring->next_to_use, hw->hw_addr + tx_ring->tdt);
3269
3270
3271
3272
3273 mmiowb();
3274 }
3275 } else {
3276 dev_kfree_skb_any(skb);
3277 tx_ring->buffer_info[first].time_stamp = 0;
3278 tx_ring->next_to_use = first;
3279 }
3280
3281 return NETDEV_TX_OK;
3282}
3283
3284#define NUM_REGS 38
3285static void e1000_regdump(struct e1000_adapter *adapter)
3286{
3287 struct e1000_hw *hw = &adapter->hw;
3288 u32 regs[NUM_REGS];
3289 u32 *regs_buff = regs;
3290 int i = 0;
3291
3292 static const char * const reg_name[] = {
3293 "CTRL", "STATUS",
3294 "RCTL", "RDLEN", "RDH", "RDT", "RDTR",
3295 "TCTL", "TDBAL", "TDBAH", "TDLEN", "TDH", "TDT",
3296 "TIDV", "TXDCTL", "TADV", "TARC0",
3297 "TDBAL1", "TDBAH1", "TDLEN1", "TDH1", "TDT1",
3298 "TXDCTL1", "TARC1",
3299 "CTRL_EXT", "ERT", "RDBAL", "RDBAH",
3300 "TDFH", "TDFT", "TDFHS", "TDFTS", "TDFPC",
3301 "RDFH", "RDFT", "RDFHS", "RDFTS", "RDFPC"
3302 };
3303
3304 regs_buff[0] = er32(CTRL);
3305 regs_buff[1] = er32(STATUS);
3306
3307 regs_buff[2] = er32(RCTL);
3308 regs_buff[3] = er32(RDLEN);
3309 regs_buff[4] = er32(RDH);
3310 regs_buff[5] = er32(RDT);
3311 regs_buff[6] = er32(RDTR);
3312
3313 regs_buff[7] = er32(TCTL);
3314 regs_buff[8] = er32(TDBAL);
3315 regs_buff[9] = er32(TDBAH);
3316 regs_buff[10] = er32(TDLEN);
3317 regs_buff[11] = er32(TDH);
3318 regs_buff[12] = er32(TDT);
3319 regs_buff[13] = er32(TIDV);
3320 regs_buff[14] = er32(TXDCTL);
3321 regs_buff[15] = er32(TADV);
3322 regs_buff[16] = er32(TARC0);
3323
3324 regs_buff[17] = er32(TDBAL1);
3325 regs_buff[18] = er32(TDBAH1);
3326 regs_buff[19] = er32(TDLEN1);
3327 regs_buff[20] = er32(TDH1);
3328 regs_buff[21] = er32(TDT1);
3329 regs_buff[22] = er32(TXDCTL1);
3330 regs_buff[23] = er32(TARC1);
3331 regs_buff[24] = er32(CTRL_EXT);
3332 regs_buff[25] = er32(ERT);
3333 regs_buff[26] = er32(RDBAL0);
3334 regs_buff[27] = er32(RDBAH0);
3335 regs_buff[28] = er32(TDFH);
3336 regs_buff[29] = er32(TDFT);
3337 regs_buff[30] = er32(TDFHS);
3338 regs_buff[31] = er32(TDFTS);
3339 regs_buff[32] = er32(TDFPC);
3340 regs_buff[33] = er32(RDFH);
3341 regs_buff[34] = er32(RDFT);
3342 regs_buff[35] = er32(RDFHS);
3343 regs_buff[36] = er32(RDFTS);
3344 regs_buff[37] = er32(RDFPC);
3345
3346 pr_info("Register dump\n");
3347 for (i = 0; i < NUM_REGS; i++)
3348 pr_info("%-15s %08x\n", reg_name[i], regs_buff[i]);
3349}
3350
3351
3352
3353
3354static void e1000_dump(struct e1000_adapter *adapter)
3355{
3356
3357 struct e1000_tx_ring *tx_ring = adapter->tx_ring;
3358 struct e1000_rx_ring *rx_ring = adapter->rx_ring;
3359 int i;
3360
3361 if (!netif_msg_hw(adapter))
3362 return;
3363
3364
3365 e1000_regdump(adapter);
3366
3367
3368 pr_info("TX Desc ring0 dump\n");
3369
3370
3371
3372
3373
3374
3375
3376
3377
3378
3379
3380
3381
3382
3383
3384
3385
3386
3387
3388
3389
3390
3391
3392
3393
3394
3395
3396
3397 pr_info("Tc[desc] [Ce CoCsIpceCoS] [MssHlRSCm0Plen] [bi->dma ] leng ntw timestmp bi->skb\n");
3398 pr_info("Td[desc] [address 63:0 ] [VlaPoRSCm1Dlen] [bi->dma ] leng ntw timestmp bi->skb\n");
3399
3400 if (!netif_msg_tx_done(adapter))
3401 goto rx_ring_summary;
3402
3403 for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {
3404 struct e1000_tx_desc *tx_desc = E1000_TX_DESC(*tx_ring, i);
3405 struct e1000_tx_buffer *buffer_info = &tx_ring->buffer_info[i];
3406 struct my_u { __le64 a; __le64 b; };
3407 struct my_u *u = (struct my_u *)tx_desc;
3408 const char *type;
3409
3410 if (i == tx_ring->next_to_use && i == tx_ring->next_to_clean)
3411 type = "NTC/U";
3412 else if (i == tx_ring->next_to_use)
3413 type = "NTU";
3414 else if (i == tx_ring->next_to_clean)
3415 type = "NTC";
3416 else
3417 type = "";
3418
3419 pr_info("T%c[0x%03X] %016llX %016llX %016llX %04X %3X %016llX %p %s\n",
3420 ((le64_to_cpu(u->b) & (1<<20)) ? 'd' : 'c'), i,
3421 le64_to_cpu(u->a), le64_to_cpu(u->b),
3422 (u64)buffer_info->dma, buffer_info->length,
3423 buffer_info->next_to_watch,
3424 (u64)buffer_info->time_stamp, buffer_info->skb, type);
3425 }
3426
3427rx_ring_summary:
3428
3429 pr_info("\nRX Desc ring dump\n");
3430
3431
3432
3433
3434
3435
3436
3437
3438
3439
3440 pr_info("R[desc] [address 63:0 ] [vl er S cks ln] [bi->dma ] [bi->skb]\n");
3441
3442 if (!netif_msg_rx_status(adapter))
3443 goto exit;
3444
3445 for (i = 0; rx_ring->desc && (i < rx_ring->count); i++) {
3446 struct e1000_rx_desc *rx_desc = E1000_RX_DESC(*rx_ring, i);
3447 struct e1000_rx_buffer *buffer_info = &rx_ring->buffer_info[i];
3448 struct my_u { __le64 a; __le64 b; };
3449 struct my_u *u = (struct my_u *)rx_desc;
3450 const char *type;
3451
3452 if (i == rx_ring->next_to_use)
3453 type = "NTU";
3454 else if (i == rx_ring->next_to_clean)
3455 type = "NTC";
3456 else
3457 type = "";
3458
3459 pr_info("R[0x%03X] %016llX %016llX %016llX %p %s\n",
3460 i, le64_to_cpu(u->a), le64_to_cpu(u->b),
3461 (u64)buffer_info->dma, buffer_info->rxbuf.data, type);
3462 }
3463
3464
3465
3466 pr_info("Rx descriptor cache in 64bit format\n");
3467 for (i = 0x6000; i <= 0x63FF ; i += 0x10) {
3468 pr_info("R%04X: %08X|%08X %08X|%08X\n",
3469 i,
3470 readl(adapter->hw.hw_addr + i+4),
3471 readl(adapter->hw.hw_addr + i),
3472 readl(adapter->hw.hw_addr + i+12),
3473 readl(adapter->hw.hw_addr + i+8));
3474 }
3475
3476 pr_info("Tx descriptor cache in 64bit format\n");
3477 for (i = 0x7000; i <= 0x73FF ; i += 0x10) {
3478 pr_info("T%04X: %08X|%08X %08X|%08X\n",
3479 i,
3480 readl(adapter->hw.hw_addr + i+4),
3481 readl(adapter->hw.hw_addr + i),
3482 readl(adapter->hw.hw_addr + i+12),
3483 readl(adapter->hw.hw_addr + i+8));
3484 }
3485exit:
3486 return;
3487}
3488
3489
3490
3491
3492
3493static void e1000_tx_timeout(struct net_device *netdev)
3494{
3495 struct e1000_adapter *adapter = netdev_priv(netdev);
3496
3497
3498 adapter->tx_timeout_count++;
3499 schedule_work(&adapter->reset_task);
3500}
3501
3502static void e1000_reset_task(struct work_struct *work)
3503{
3504 struct e1000_adapter *adapter =
3505 container_of(work, struct e1000_adapter, reset_task);
3506
3507 e_err(drv, "Reset adapter\n");
3508 e1000_reinit_locked(adapter);
3509}
3510
3511
3512
3513
3514
3515
3516
3517
3518static struct net_device_stats *e1000_get_stats(struct net_device *netdev)
3519{
3520
3521 return &netdev->stats;
3522}
3523
3524
3525
3526
3527
3528
3529
3530
3531static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
3532{
3533 struct e1000_adapter *adapter = netdev_priv(netdev);
3534 struct e1000_hw *hw = &adapter->hw;
3535 int max_frame = new_mtu + ENET_HEADER_SIZE + ETHERNET_FCS_SIZE;
3536
3537 if ((max_frame < MINIMUM_ETHERNET_FRAME_SIZE) ||
3538 (max_frame > MAX_JUMBO_FRAME_SIZE)) {
3539 e_err(probe, "Invalid MTU setting\n");
3540 return -EINVAL;
3541 }
3542
3543
3544 switch (hw->mac_type) {
3545 case e1000_undefined ... e1000_82542_rev2_1:
3546 if (max_frame > (ETH_FRAME_LEN + ETH_FCS_LEN)) {
3547 e_err(probe, "Jumbo Frames not supported.\n");
3548 return -EINVAL;
3549 }
3550 break;
3551 default:
3552
3553 break;
3554 }
3555
3556 while (test_and_set_bit(__E1000_RESETTING, &adapter->flags))
3557 msleep(1);
3558
3559 hw->max_frame_size = max_frame;
3560 if (netif_running(netdev)) {
3561
3562 adapter->alloc_rx_buf = e1000_alloc_dummy_rx_buffers;
3563 e1000_down(adapter);
3564 }
3565
3566
3567
3568
3569
3570
3571
3572
3573
3574 if (max_frame <= E1000_RXBUFFER_2048)
3575 adapter->rx_buffer_len = E1000_RXBUFFER_2048;
3576 else
3577#if (PAGE_SIZE >= E1000_RXBUFFER_16384)
3578 adapter->rx_buffer_len = E1000_RXBUFFER_16384;
3579#elif (PAGE_SIZE >= E1000_RXBUFFER_4096)
3580 adapter->rx_buffer_len = PAGE_SIZE;
3581#endif
3582
3583
3584 if (!hw->tbi_compatibility_on &&
3585 ((max_frame == (ETH_FRAME_LEN + ETH_FCS_LEN)) ||
3586 (max_frame == MAXIMUM_ETHERNET_VLAN_SIZE)))
3587 adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
3588
3589 pr_info("%s changing MTU from %d to %d\n",
3590 netdev->name, netdev->mtu, new_mtu);
3591 netdev->mtu = new_mtu;
3592
3593 if (netif_running(netdev))
3594 e1000_up(adapter);
3595 else
3596 e1000_reset(adapter);
3597
3598 clear_bit(__E1000_RESETTING, &adapter->flags);
3599
3600 return 0;
3601}
3602
3603
3604
3605
3606
3607void e1000_update_stats(struct e1000_adapter *adapter)
3608{
3609 struct net_device *netdev = adapter->netdev;
3610 struct e1000_hw *hw = &adapter->hw;
3611 struct pci_dev *pdev = adapter->pdev;
3612 unsigned long flags;
3613 u16 phy_tmp;
3614
3615#define PHY_IDLE_ERROR_COUNT_MASK 0x00FF
3616
3617
3618
3619
3620 if (adapter->link_speed == 0)
3621 return;
3622 if (pci_channel_offline(pdev))
3623 return;
3624
3625 spin_lock_irqsave(&adapter->stats_lock, flags);
3626
3627
3628
3629
3630
3631
3632 adapter->stats.crcerrs += er32(CRCERRS);
3633 adapter->stats.gprc += er32(GPRC);
3634 adapter->stats.gorcl += er32(GORCL);
3635 adapter->stats.gorch += er32(GORCH);
3636 adapter->stats.bprc += er32(BPRC);
3637 adapter->stats.mprc += er32(MPRC);
3638 adapter->stats.roc += er32(ROC);
3639
3640 adapter->stats.prc64 += er32(PRC64);
3641 adapter->stats.prc127 += er32(PRC127);
3642 adapter->stats.prc255 += er32(PRC255);
3643 adapter->stats.prc511 += er32(PRC511);
3644 adapter->stats.prc1023 += er32(PRC1023);
3645 adapter->stats.prc1522 += er32(PRC1522);
3646
3647 adapter->stats.symerrs += er32(SYMERRS);
3648 adapter->stats.mpc += er32(MPC);
3649 adapter->stats.scc += er32(SCC);
3650 adapter->stats.ecol += er32(ECOL);
3651 adapter->stats.mcc += er32(MCC);
3652 adapter->stats.latecol += er32(LATECOL);
3653 adapter->stats.dc += er32(DC);
3654 adapter->stats.sec += er32(SEC);
3655 adapter->stats.rlec += er32(RLEC);
3656 adapter->stats.xonrxc += er32(XONRXC);
3657 adapter->stats.xontxc += er32(XONTXC);
3658 adapter->stats.xoffrxc += er32(XOFFRXC);
3659 adapter->stats.xofftxc += er32(XOFFTXC);
3660 adapter->stats.fcruc += er32(FCRUC);
3661 adapter->stats.gptc += er32(GPTC);
3662 adapter->stats.gotcl += er32(GOTCL);
3663 adapter->stats.gotch += er32(GOTCH);
3664 adapter->stats.rnbc += er32(RNBC);
3665 adapter->stats.ruc += er32(RUC);
3666 adapter->stats.rfc += er32(RFC);
3667 adapter->stats.rjc += er32(RJC);
3668 adapter->stats.torl += er32(TORL);
3669 adapter->stats.torh += er32(TORH);
3670 adapter->stats.totl += er32(TOTL);
3671 adapter->stats.toth += er32(TOTH);
3672 adapter->stats.tpr += er32(TPR);
3673
3674 adapter->stats.ptc64 += er32(PTC64);
3675 adapter->stats.ptc127 += er32(PTC127);
3676 adapter->stats.ptc255 += er32(PTC255);
3677 adapter->stats.ptc511 += er32(PTC511);
3678 adapter->stats.ptc1023 += er32(PTC1023);
3679 adapter->stats.ptc1522 += er32(PTC1522);
3680
3681 adapter->stats.mptc += er32(MPTC);
3682 adapter->stats.bptc += er32(BPTC);
3683
3684
3685
3686 hw->tx_packet_delta = er32(TPT);
3687 adapter->stats.tpt += hw->tx_packet_delta;
3688 hw->collision_delta = er32(COLC);
3689 adapter->stats.colc += hw->collision_delta;
3690
3691 if (hw->mac_type >= e1000_82543) {
3692 adapter->stats.algnerrc += er32(ALGNERRC);
3693 adapter->stats.rxerrc += er32(RXERRC);
3694 adapter->stats.tncrs += er32(TNCRS);
3695 adapter->stats.cexterr += er32(CEXTERR);
3696 adapter->stats.tsctc += er32(TSCTC);
3697 adapter->stats.tsctfc += er32(TSCTFC);
3698 }
3699
3700
3701 netdev->stats.multicast = adapter->stats.mprc;
3702 netdev->stats.collisions = adapter->stats.colc;
3703
3704
3705
3706
3707
3708
3709 netdev->stats.rx_errors = adapter->stats.rxerrc +
3710 adapter->stats.crcerrs + adapter->stats.algnerrc +
3711 adapter->stats.ruc + adapter->stats.roc +
3712 adapter->stats.cexterr;
3713 adapter->stats.rlerrc = adapter->stats.ruc + adapter->stats.roc;
3714 netdev->stats.rx_length_errors = adapter->stats.rlerrc;
3715 netdev->stats.rx_crc_errors = adapter->stats.crcerrs;
3716 netdev->stats.rx_frame_errors = adapter->stats.algnerrc;
3717 netdev->stats.rx_missed_errors = adapter->stats.mpc;
3718
3719
3720 adapter->stats.txerrc = adapter->stats.ecol + adapter->stats.latecol;
3721 netdev->stats.tx_errors = adapter->stats.txerrc;
3722 netdev->stats.tx_aborted_errors = adapter->stats.ecol;
3723 netdev->stats.tx_window_errors = adapter->stats.latecol;
3724 netdev->stats.tx_carrier_errors = adapter->stats.tncrs;
3725 if (hw->bad_tx_carr_stats_fd &&
3726 adapter->link_duplex == FULL_DUPLEX) {
3727 netdev->stats.tx_carrier_errors = 0;
3728 adapter->stats.tncrs = 0;
3729 }
3730
3731
3732
3733
3734 if (hw->media_type == e1000_media_type_copper) {
3735 if ((adapter->link_speed == SPEED_1000) &&
3736 (!e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_tmp))) {
3737 phy_tmp &= PHY_IDLE_ERROR_COUNT_MASK;
3738 adapter->phy_stats.idle_errors += phy_tmp;
3739 }
3740
3741 if ((hw->mac_type <= e1000_82546) &&
3742 (hw->phy_type == e1000_phy_m88) &&
3743 !e1000_read_phy_reg(hw, M88E1000_RX_ERR_CNTR, &phy_tmp))
3744 adapter->phy_stats.receive_errors += phy_tmp;
3745 }
3746
3747
3748 if (hw->has_smbus) {
3749 adapter->stats.mgptc += er32(MGTPTC);
3750 adapter->stats.mgprc += er32(MGTPRC);
3751 adapter->stats.mgpdc += er32(MGTPDC);
3752 }
3753
3754 spin_unlock_irqrestore(&adapter->stats_lock, flags);
3755}
3756
3757
3758
3759
3760
3761
3762static irqreturn_t e1000_intr(int irq, void *data)
3763{
3764 struct net_device *netdev = data;
3765 struct e1000_adapter *adapter = netdev_priv(netdev);
3766 struct e1000_hw *hw = &adapter->hw;
3767 u32 icr = er32(ICR);
3768
3769 if (unlikely((!icr)))
3770 return IRQ_NONE;
3771
3772
3773
3774
3775
3776 if (unlikely(test_bit(__E1000_DOWN, &adapter->flags)))
3777 return IRQ_HANDLED;
3778
3779 if (unlikely(icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC))) {
3780 hw->get_link_status = 1;
3781
3782 if (!test_bit(__E1000_DOWN, &adapter->flags))
3783 schedule_delayed_work(&adapter->watchdog_task, 1);
3784 }
3785
3786
3787 ew32(IMC, ~0);
3788 E1000_WRITE_FLUSH();
3789
3790 if (likely(napi_schedule_prep(&adapter->napi))) {
3791 adapter->total_tx_bytes = 0;
3792 adapter->total_tx_packets = 0;
3793 adapter->total_rx_bytes = 0;
3794 adapter->total_rx_packets = 0;
3795 __napi_schedule(&adapter->napi);
3796 } else {
3797
3798
3799
3800 if (!test_bit(__E1000_DOWN, &adapter->flags))
3801 e1000_irq_enable(adapter);
3802 }
3803
3804 return IRQ_HANDLED;
3805}
3806
3807
3808
3809
3810
3811static int e1000_clean(struct napi_struct *napi, int budget)
3812{
3813 struct e1000_adapter *adapter = container_of(napi, struct e1000_adapter,
3814 napi);
3815 int tx_clean_complete = 0, work_done = 0;
3816
3817 tx_clean_complete = e1000_clean_tx_irq(adapter, &adapter->tx_ring[0]);
3818
3819 adapter->clean_rx(adapter, &adapter->rx_ring[0], &work_done, budget);
3820
3821 if (!tx_clean_complete)
3822 work_done = budget;
3823
3824
3825 if (work_done < budget) {
3826 if (likely(adapter->itr_setting & 3))
3827 e1000_set_itr(adapter);
3828 napi_complete_done(napi, work_done);
3829 if (!test_bit(__E1000_DOWN, &adapter->flags))
3830 e1000_irq_enable(adapter);
3831 }
3832
3833 return work_done;
3834}
3835
3836
3837
3838
3839
3840static bool e1000_clean_tx_irq(struct e1000_adapter *adapter,
3841 struct e1000_tx_ring *tx_ring)
3842{
3843 struct e1000_hw *hw = &adapter->hw;
3844 struct net_device *netdev = adapter->netdev;
3845 struct e1000_tx_desc *tx_desc, *eop_desc;
3846 struct e1000_tx_buffer *buffer_info;
3847 unsigned int i, eop;
3848 unsigned int count = 0;
3849 unsigned int total_tx_bytes = 0, total_tx_packets = 0;
3850 unsigned int bytes_compl = 0, pkts_compl = 0;
3851
3852 i = tx_ring->next_to_clean;
3853 eop = tx_ring->buffer_info[i].next_to_watch;
3854 eop_desc = E1000_TX_DESC(*tx_ring, eop);
3855
3856 while ((eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) &&
3857 (count < tx_ring->count)) {
3858 bool cleaned = false;
3859 dma_rmb();
3860 for ( ; !cleaned; count++) {
3861 tx_desc = E1000_TX_DESC(*tx_ring, i);
3862 buffer_info = &tx_ring->buffer_info[i];
3863 cleaned = (i == eop);
3864
3865 if (cleaned) {
3866 total_tx_packets += buffer_info->segs;
3867 total_tx_bytes += buffer_info->bytecount;
3868 if (buffer_info->skb) {
3869 bytes_compl += buffer_info->skb->len;
3870 pkts_compl++;
3871 }
3872
3873 }
3874 e1000_unmap_and_free_tx_resource(adapter, buffer_info);
3875 tx_desc->upper.data = 0;
3876
3877 if (unlikely(++i == tx_ring->count))
3878 i = 0;
3879 }
3880
3881 eop = tx_ring->buffer_info[i].next_to_watch;
3882 eop_desc = E1000_TX_DESC(*tx_ring, eop);
3883 }
3884
3885
3886
3887
3888 smp_store_release(&tx_ring->next_to_clean, i);
3889
3890 netdev_completed_queue(netdev, pkts_compl, bytes_compl);
3891
3892#define TX_WAKE_THRESHOLD 32
3893 if (unlikely(count && netif_carrier_ok(netdev) &&
3894 E1000_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD)) {
3895
3896
3897
3898 smp_mb();
3899
3900 if (netif_queue_stopped(netdev) &&
3901 !(test_bit(__E1000_DOWN, &adapter->flags))) {
3902 netif_wake_queue(netdev);
3903 ++adapter->restart_queue;
3904 }
3905 }
3906
3907 if (adapter->detect_tx_hung) {
3908
3909
3910
3911 adapter->detect_tx_hung = false;
3912 if (tx_ring->buffer_info[eop].time_stamp &&
3913 time_after(jiffies, tx_ring->buffer_info[eop].time_stamp +
3914 (adapter->tx_timeout_factor * HZ)) &&
3915 !(er32(STATUS) & E1000_STATUS_TXOFF)) {
3916
3917
3918 e_err(drv, "Detected Tx Unit Hang\n"
3919 " Tx Queue <%lu>\n"
3920 " TDH <%x>\n"
3921 " TDT <%x>\n"
3922 " next_to_use <%x>\n"
3923 " next_to_clean <%x>\n"
3924 "buffer_info[next_to_clean]\n"
3925 " time_stamp <%lx>\n"
3926 " next_to_watch <%x>\n"
3927 " jiffies <%lx>\n"
3928 " next_to_watch.status <%x>\n",
3929 (unsigned long)(tx_ring - adapter->tx_ring),
3930 readl(hw->hw_addr + tx_ring->tdh),
3931 readl(hw->hw_addr + tx_ring->tdt),
3932 tx_ring->next_to_use,
3933 tx_ring->next_to_clean,
3934 tx_ring->buffer_info[eop].time_stamp,
3935 eop,
3936 jiffies,
3937 eop_desc->upper.fields.status);
3938 e1000_dump(adapter);
3939 netif_stop_queue(netdev);
3940 }
3941 }
3942 adapter->total_tx_bytes += total_tx_bytes;
3943 adapter->total_tx_packets += total_tx_packets;
3944 netdev->stats.tx_bytes += total_tx_bytes;
3945 netdev->stats.tx_packets += total_tx_packets;
3946 return count < tx_ring->count;
3947}
3948
3949
3950
3951
3952
3953
3954
3955
3956static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err,
3957 u32 csum, struct sk_buff *skb)
3958{
3959 struct e1000_hw *hw = &adapter->hw;
3960 u16 status = (u16)status_err;
3961 u8 errors = (u8)(status_err >> 24);
3962
3963 skb_checksum_none_assert(skb);
3964
3965
3966 if (unlikely(hw->mac_type < e1000_82543))
3967 return;
3968
3969 if (unlikely(status & E1000_RXD_STAT_IXSM))
3970 return;
3971
3972 if (unlikely(errors & E1000_RXD_ERR_TCPE)) {
3973
3974 adapter->hw_csum_err++;
3975 return;
3976 }
3977
3978 if (!(status & E1000_RXD_STAT_TCPCS))
3979 return;
3980
3981
3982 if (likely(status & E1000_RXD_STAT_TCPCS)) {
3983
3984 skb->ip_summed = CHECKSUM_UNNECESSARY;
3985 }
3986 adapter->hw_csum_good++;
3987}
3988
3989
3990
3991
3992static void e1000_consume_page(struct e1000_rx_buffer *bi, struct sk_buff *skb,
3993 u16 length)
3994{
3995 bi->rxbuf.page = NULL;
3996 skb->len += length;
3997 skb->data_len += length;
3998 skb->truesize += PAGE_SIZE;
3999}
4000
4001
4002
4003
4004
4005
4006
4007
4008static void e1000_receive_skb(struct e1000_adapter *adapter, u8 status,
4009 __le16 vlan, struct sk_buff *skb)
4010{
4011 skb->protocol = eth_type_trans(skb, adapter->netdev);
4012
4013 if (status & E1000_RXD_STAT_VP) {
4014 u16 vid = le16_to_cpu(vlan) & E1000_RXD_SPC_VLAN_MASK;
4015
4016 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
4017 }
4018 napi_gro_receive(&adapter->napi, skb);
4019}
4020
4021
4022
4023
4024
4025
4026
4027
4028
4029static void e1000_tbi_adjust_stats(struct e1000_hw *hw,
4030 struct e1000_hw_stats *stats,
4031 u32 frame_len, const u8 *mac_addr)
4032{
4033 u64 carry_bit;
4034
4035
4036 frame_len--;
4037
4038
4039
4040
4041
4042 stats->crcerrs--;
4043
4044 stats->gprc++;
4045
4046
4047 carry_bit = 0x80000000 & stats->gorcl;
4048 stats->gorcl += frame_len;
4049
4050
4051
4052
4053
4054
4055
4056 if (carry_bit && ((stats->gorcl & 0x80000000) == 0))
4057 stats->gorch++;
4058
4059
4060
4061
4062 if (is_broadcast_ether_addr(mac_addr))
4063 stats->bprc++;
4064 else if (is_multicast_ether_addr(mac_addr))
4065 stats->mprc++;
4066
4067 if (frame_len == hw->max_frame_size) {
4068
4069
4070
4071 if (stats->roc > 0)
4072 stats->roc--;
4073 }
4074
4075
4076
4077
4078 if (frame_len == 64) {
4079 stats->prc64++;
4080 stats->prc127--;
4081 } else if (frame_len == 127) {
4082 stats->prc127++;
4083 stats->prc255--;
4084 } else if (frame_len == 255) {
4085 stats->prc255++;
4086 stats->prc511--;
4087 } else if (frame_len == 511) {
4088 stats->prc511++;
4089 stats->prc1023--;
4090 } else if (frame_len == 1023) {
4091 stats->prc1023++;
4092 stats->prc1522--;
4093 } else if (frame_len == 1522) {
4094 stats->prc1522++;
4095 }
4096}
4097
4098static bool e1000_tbi_should_accept(struct e1000_adapter *adapter,
4099 u8 status, u8 errors,
4100 u32 length, const u8 *data)
4101{
4102 struct e1000_hw *hw = &adapter->hw;
4103 u8 last_byte = *(data + length - 1);
4104
4105 if (TBI_ACCEPT(hw, status, errors, length, last_byte)) {
4106 unsigned long irq_flags;
4107
4108 spin_lock_irqsave(&adapter->stats_lock, irq_flags);
4109 e1000_tbi_adjust_stats(hw, &adapter->stats, length, data);
4110 spin_unlock_irqrestore(&adapter->stats_lock, irq_flags);
4111
4112 return true;
4113 }
4114
4115 return false;
4116}
4117
4118static struct sk_buff *e1000_alloc_rx_skb(struct e1000_adapter *adapter,
4119 unsigned int bufsz)
4120{
4121 struct sk_buff *skb = napi_alloc_skb(&adapter->napi, bufsz);
4122
4123 if (unlikely(!skb))
4124 adapter->alloc_rx_buff_failed++;
4125 return skb;
4126}
4127
4128
4129
4130
4131
4132
4133
4134
4135
4136
4137
4138static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
4139 struct e1000_rx_ring *rx_ring,
4140 int *work_done, int work_to_do)
4141{
4142 struct net_device *netdev = adapter->netdev;
4143 struct pci_dev *pdev = adapter->pdev;
4144 struct e1000_rx_desc *rx_desc, *next_rxd;
4145 struct e1000_rx_buffer *buffer_info, *next_buffer;
4146 u32 length;
4147 unsigned int i;
4148 int cleaned_count = 0;
4149 bool cleaned = false;
4150 unsigned int total_rx_bytes = 0, total_rx_packets = 0;
4151
4152 i = rx_ring->next_to_clean;
4153 rx_desc = E1000_RX_DESC(*rx_ring, i);
4154 buffer_info = &rx_ring->buffer_info[i];
4155
4156 while (rx_desc->status & E1000_RXD_STAT_DD) {
4157 struct sk_buff *skb;
4158 u8 status;
4159
4160 if (*work_done >= work_to_do)
4161 break;
4162 (*work_done)++;
4163 dma_rmb();
4164
4165 status = rx_desc->status;
4166
4167 if (++i == rx_ring->count)
4168 i = 0;
4169
4170 next_rxd = E1000_RX_DESC(*rx_ring, i);
4171 prefetch(next_rxd);
4172
4173 next_buffer = &rx_ring->buffer_info[i];
4174
4175 cleaned = true;
4176 cleaned_count++;
4177 dma_unmap_page(&pdev->dev, buffer_info->dma,
4178 adapter->rx_buffer_len, DMA_FROM_DEVICE);
4179 buffer_info->dma = 0;
4180
4181 length = le16_to_cpu(rx_desc->length);
4182
4183
4184 if (unlikely((status & E1000_RXD_STAT_EOP) &&
4185 (rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK))) {
4186 u8 *mapped = page_address(buffer_info->rxbuf.page);
4187
4188 if (e1000_tbi_should_accept(adapter, status,
4189 rx_desc->errors,
4190 length, mapped)) {
4191 length--;
4192 } else if (netdev->features & NETIF_F_RXALL) {
4193 goto process_skb;
4194 } else {
4195
4196
4197
4198 if (rx_ring->rx_skb_top)
4199 dev_kfree_skb(rx_ring->rx_skb_top);
4200 rx_ring->rx_skb_top = NULL;
4201 goto next_desc;
4202 }
4203 }
4204
4205#define rxtop rx_ring->rx_skb_top
4206process_skb:
4207 if (!(status & E1000_RXD_STAT_EOP)) {
4208
4209 if (!rxtop) {
4210
4211 rxtop = napi_get_frags(&adapter->napi);
4212 if (!rxtop)
4213 break;
4214
4215 skb_fill_page_desc(rxtop, 0,
4216 buffer_info->rxbuf.page,
4217 0, length);
4218 } else {
4219
4220 skb_fill_page_desc(rxtop,
4221 skb_shinfo(rxtop)->nr_frags,
4222 buffer_info->rxbuf.page, 0, length);
4223 }
4224 e1000_consume_page(buffer_info, rxtop, length);
4225 goto next_desc;
4226 } else {
4227 if (rxtop) {
4228
4229 skb_fill_page_desc(rxtop,
4230 skb_shinfo(rxtop)->nr_frags,
4231 buffer_info->rxbuf.page, 0, length);
4232 skb = rxtop;
4233 rxtop = NULL;
4234 e1000_consume_page(buffer_info, skb, length);
4235 } else {
4236 struct page *p;
4237
4238
4239
4240 p = buffer_info->rxbuf.page;
4241 if (length <= copybreak) {
4242 u8 *vaddr;
4243
4244 if (likely(!(netdev->features & NETIF_F_RXFCS)))
4245 length -= 4;
4246 skb = e1000_alloc_rx_skb(adapter,
4247 length);
4248 if (!skb)
4249 break;
4250
4251 vaddr = kmap_atomic(p);
4252 memcpy(skb_tail_pointer(skb), vaddr,
4253 length);
4254 kunmap_atomic(vaddr);
4255
4256
4257
4258 skb_put(skb, length);
4259 e1000_rx_checksum(adapter,
4260 status | rx_desc->errors << 24,
4261 le16_to_cpu(rx_desc->csum), skb);
4262
4263 total_rx_bytes += skb->len;
4264 total_rx_packets++;
4265
4266 e1000_receive_skb(adapter, status,
4267 rx_desc->special, skb);
4268 goto next_desc;
4269 } else {
4270 skb = napi_get_frags(&adapter->napi);
4271 if (!skb) {
4272 adapter->alloc_rx_buff_failed++;
4273 break;
4274 }
4275 skb_fill_page_desc(skb, 0, p, 0,
4276 length);
4277 e1000_consume_page(buffer_info, skb,
4278 length);
4279 }
4280 }
4281 }
4282
4283
4284 e1000_rx_checksum(adapter,
4285 (u32)(status) |
4286 ((u32)(rx_desc->errors) << 24),
4287 le16_to_cpu(rx_desc->csum), skb);
4288
4289 total_rx_bytes += (skb->len - 4);
4290 if (likely(!(netdev->features & NETIF_F_RXFCS)))
4291 pskb_trim(skb, skb->len - 4);
4292 total_rx_packets++;
4293
4294 if (status & E1000_RXD_STAT_VP) {
4295 __le16 vlan = rx_desc->special;
4296 u16 vid = le16_to_cpu(vlan) & E1000_RXD_SPC_VLAN_MASK;
4297
4298 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
4299 }
4300
4301 napi_gro_frags(&adapter->napi);
4302
4303next_desc:
4304 rx_desc->status = 0;
4305
4306
4307 if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) {
4308 adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
4309 cleaned_count = 0;
4310 }
4311
4312
4313 rx_desc = next_rxd;
4314 buffer_info = next_buffer;
4315 }
4316 rx_ring->next_to_clean = i;
4317
4318 cleaned_count = E1000_DESC_UNUSED(rx_ring);
4319 if (cleaned_count)
4320 adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
4321
4322 adapter->total_rx_packets += total_rx_packets;
4323 adapter->total_rx_bytes += total_rx_bytes;
4324 netdev->stats.rx_bytes += total_rx_bytes;
4325 netdev->stats.rx_packets += total_rx_packets;
4326 return cleaned;
4327}
4328
4329
4330
4331
4332static struct sk_buff *e1000_copybreak(struct e1000_adapter *adapter,
4333 struct e1000_rx_buffer *buffer_info,
4334 u32 length, const void *data)
4335{
4336 struct sk_buff *skb;
4337
4338 if (length > copybreak)
4339 return NULL;
4340
4341 skb = e1000_alloc_rx_skb(adapter, length);
4342 if (!skb)
4343 return NULL;
4344
4345 dma_sync_single_for_cpu(&adapter->pdev->dev, buffer_info->dma,
4346 length, DMA_FROM_DEVICE);
4347
4348 memcpy(skb_put(skb, length), data, length);
4349
4350 return skb;
4351}
4352
4353
4354
4355
4356
4357
4358
4359
4360static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
4361 struct e1000_rx_ring *rx_ring,
4362 int *work_done, int work_to_do)
4363{
4364 struct net_device *netdev = adapter->netdev;
4365 struct pci_dev *pdev = adapter->pdev;
4366 struct e1000_rx_desc *rx_desc, *next_rxd;
4367 struct e1000_rx_buffer *buffer_info, *next_buffer;
4368 u32 length;
4369 unsigned int i;
4370 int cleaned_count = 0;
4371 bool cleaned = false;
4372 unsigned int total_rx_bytes = 0, total_rx_packets = 0;
4373
4374 i = rx_ring->next_to_clean;
4375 rx_desc = E1000_RX_DESC(*rx_ring, i);
4376 buffer_info = &rx_ring->buffer_info[i];
4377
4378 while (rx_desc->status & E1000_RXD_STAT_DD) {
4379 struct sk_buff *skb;
4380 u8 *data;
4381 u8 status;
4382
4383 if (*work_done >= work_to_do)
4384 break;
4385 (*work_done)++;
4386 dma_rmb();
4387
4388 status = rx_desc->status;
4389 length = le16_to_cpu(rx_desc->length);
4390
4391 data = buffer_info->rxbuf.data;
4392 prefetch(data);
4393 skb = e1000_copybreak(adapter, buffer_info, length, data);
4394 if (!skb) {
4395 unsigned int frag_len = e1000_frag_len(adapter);
4396
4397 skb = build_skb(data - E1000_HEADROOM, frag_len);
4398 if (!skb) {
4399 adapter->alloc_rx_buff_failed++;
4400 break;
4401 }
4402
4403 skb_reserve(skb, E1000_HEADROOM);
4404 dma_unmap_single(&pdev->dev, buffer_info->dma,
4405 adapter->rx_buffer_len,
4406 DMA_FROM_DEVICE);
4407 buffer_info->dma = 0;
4408 buffer_info->rxbuf.data = NULL;
4409 }
4410
4411 if (++i == rx_ring->count)
4412 i = 0;
4413
4414 next_rxd = E1000_RX_DESC(*rx_ring, i);
4415 prefetch(next_rxd);
4416
4417 next_buffer = &rx_ring->buffer_info[i];
4418
4419 cleaned = true;
4420 cleaned_count++;
4421
4422
4423
4424
4425
4426
4427
4428 if (unlikely(!(status & E1000_RXD_STAT_EOP)))
4429 adapter->discarding = true;
4430
4431 if (adapter->discarding) {
4432
4433 netdev_dbg(netdev, "Receive packet consumed multiple buffers\n");
4434 dev_kfree_skb(skb);
4435 if (status & E1000_RXD_STAT_EOP)
4436 adapter->discarding = false;
4437 goto next_desc;
4438 }
4439
4440 if (unlikely(rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK)) {
4441 if (e1000_tbi_should_accept(adapter, status,
4442 rx_desc->errors,
4443 length, data)) {
4444 length--;
4445 } else if (netdev->features & NETIF_F_RXALL) {
4446 goto process_skb;
4447 } else {
4448 dev_kfree_skb(skb);
4449 goto next_desc;
4450 }
4451 }
4452
4453process_skb:
4454 total_rx_bytes += (length - 4);
4455 total_rx_packets++;
4456
4457 if (likely(!(netdev->features & NETIF_F_RXFCS)))
4458
4459
4460
4461 length -= 4;
4462
4463 if (buffer_info->rxbuf.data == NULL)
4464 skb_put(skb, length);
4465 else
4466 skb_trim(skb, length);
4467
4468
4469 e1000_rx_checksum(adapter,
4470 (u32)(status) |
4471 ((u32)(rx_desc->errors) << 24),
4472 le16_to_cpu(rx_desc->csum), skb);
4473
4474 e1000_receive_skb(adapter, status, rx_desc->special, skb);
4475
4476next_desc:
4477 rx_desc->status = 0;
4478
4479
4480 if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) {
4481 adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
4482 cleaned_count = 0;
4483 }
4484
4485
4486 rx_desc = next_rxd;
4487 buffer_info = next_buffer;
4488 }
4489 rx_ring->next_to_clean = i;
4490
4491 cleaned_count = E1000_DESC_UNUSED(rx_ring);
4492 if (cleaned_count)
4493 adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
4494
4495 adapter->total_rx_packets += total_rx_packets;
4496 adapter->total_rx_bytes += total_rx_bytes;
4497 netdev->stats.rx_bytes += total_rx_bytes;
4498 netdev->stats.rx_packets += total_rx_packets;
4499 return cleaned;
4500}
4501
4502
4503
4504
4505
4506
4507
4508static void
4509e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter,
4510 struct e1000_rx_ring *rx_ring, int cleaned_count)
4511{
4512 struct pci_dev *pdev = adapter->pdev;
4513 struct e1000_rx_desc *rx_desc;
4514 struct e1000_rx_buffer *buffer_info;
4515 unsigned int i;
4516
4517 i = rx_ring->next_to_use;
4518 buffer_info = &rx_ring->buffer_info[i];
4519
4520 while (cleaned_count--) {
4521
4522 if (!buffer_info->rxbuf.page) {
4523 buffer_info->rxbuf.page = alloc_page(GFP_ATOMIC);
4524 if (unlikely(!buffer_info->rxbuf.page)) {
4525 adapter->alloc_rx_buff_failed++;
4526 break;
4527 }
4528 }
4529
4530 if (!buffer_info->dma) {
4531 buffer_info->dma = dma_map_page(&pdev->dev,
4532 buffer_info->rxbuf.page, 0,
4533 adapter->rx_buffer_len,
4534 DMA_FROM_DEVICE);
4535 if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
4536 put_page(buffer_info->rxbuf.page);
4537 buffer_info->rxbuf.page = NULL;
4538 buffer_info->dma = 0;
4539 adapter->alloc_rx_buff_failed++;
4540 break;
4541 }
4542 }
4543
4544 rx_desc = E1000_RX_DESC(*rx_ring, i);
4545 rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
4546
4547 if (unlikely(++i == rx_ring->count))
4548 i = 0;
4549 buffer_info = &rx_ring->buffer_info[i];
4550 }
4551
4552 if (likely(rx_ring->next_to_use != i)) {
4553 rx_ring->next_to_use = i;
4554 if (unlikely(i-- == 0))
4555 i = (rx_ring->count - 1);
4556
4557
4558
4559
4560
4561
4562 wmb();
4563 writel(i, adapter->hw.hw_addr + rx_ring->rdt);
4564 }
4565}
4566
4567
4568
4569
4570
4571static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
4572 struct e1000_rx_ring *rx_ring,
4573 int cleaned_count)
4574{
4575 struct e1000_hw *hw = &adapter->hw;
4576 struct pci_dev *pdev = adapter->pdev;
4577 struct e1000_rx_desc *rx_desc;
4578 struct e1000_rx_buffer *buffer_info;
4579 unsigned int i;
4580 unsigned int bufsz = adapter->rx_buffer_len;
4581
4582 i = rx_ring->next_to_use;
4583 buffer_info = &rx_ring->buffer_info[i];
4584
4585 while (cleaned_count--) {
4586 void *data;
4587
4588 if (buffer_info->rxbuf.data)
4589 goto skip;
4590
4591 data = e1000_alloc_frag(adapter);
4592 if (!data) {
4593
4594 adapter->alloc_rx_buff_failed++;
4595 break;
4596 }
4597
4598
4599 if (!e1000_check_64k_bound(adapter, data, bufsz)) {
4600 void *olddata = data;
4601 e_err(rx_err, "skb align check failed: %u bytes at "
4602 "%p\n", bufsz, data);
4603
4604 data = e1000_alloc_frag(adapter);
4605
4606 if (!data) {
4607 skb_free_frag(olddata);
4608 adapter->alloc_rx_buff_failed++;
4609 break;
4610 }
4611
4612 if (!e1000_check_64k_bound(adapter, data, bufsz)) {
4613
4614 skb_free_frag(data);
4615 skb_free_frag(olddata);
4616 adapter->alloc_rx_buff_failed++;
4617 break;
4618 }
4619
4620
4621 skb_free_frag(olddata);
4622 }
4623 buffer_info->dma = dma_map_single(&pdev->dev,
4624 data,
4625 adapter->rx_buffer_len,
4626 DMA_FROM_DEVICE);
4627 if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
4628 skb_free_frag(data);
4629 buffer_info->dma = 0;
4630 adapter->alloc_rx_buff_failed++;
4631 break;
4632 }
4633
4634
4635
4636
4637
4638
4639 if (!e1000_check_64k_bound(adapter,
4640 (void *)(unsigned long)buffer_info->dma,
4641 adapter->rx_buffer_len)) {
4642 e_err(rx_err, "dma align check failed: %u bytes at "
4643 "%p\n", adapter->rx_buffer_len,
4644 (void *)(unsigned long)buffer_info->dma);
4645
4646 dma_unmap_single(&pdev->dev, buffer_info->dma,
4647 adapter->rx_buffer_len,
4648 DMA_FROM_DEVICE);
4649
4650 skb_free_frag(data);
4651 buffer_info->rxbuf.data = NULL;
4652 buffer_info->dma = 0;
4653
4654 adapter->alloc_rx_buff_failed++;
4655 break;
4656 }
4657 buffer_info->rxbuf.data = data;
4658 skip:
4659 rx_desc = E1000_RX_DESC(*rx_ring, i);
4660 rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
4661
4662 if (unlikely(++i == rx_ring->count))
4663 i = 0;
4664 buffer_info = &rx_ring->buffer_info[i];
4665 }
4666
4667 if (likely(rx_ring->next_to_use != i)) {
4668 rx_ring->next_to_use = i;
4669 if (unlikely(i-- == 0))
4670 i = (rx_ring->count - 1);
4671
4672
4673
4674
4675
4676
4677 wmb();
4678 writel(i, hw->hw_addr + rx_ring->rdt);
4679 }
4680}
4681
4682
4683
4684
4685
4686static void e1000_smartspeed(struct e1000_adapter *adapter)
4687{
4688 struct e1000_hw *hw = &adapter->hw;
4689 u16 phy_status;
4690 u16 phy_ctrl;
4691
4692 if ((hw->phy_type != e1000_phy_igp) || !hw->autoneg ||
4693 !(hw->autoneg_advertised & ADVERTISE_1000_FULL))
4694 return;
4695
4696 if (adapter->smartspeed == 0) {
4697
4698
4699
4700 e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_status);
4701 if (!(phy_status & SR_1000T_MS_CONFIG_FAULT))
4702 return;
4703 e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_status);
4704 if (!(phy_status & SR_1000T_MS_CONFIG_FAULT))
4705 return;
4706 e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_ctrl);
4707 if (phy_ctrl & CR_1000T_MS_ENABLE) {
4708 phy_ctrl &= ~CR_1000T_MS_ENABLE;
4709 e1000_write_phy_reg(hw, PHY_1000T_CTRL,
4710 phy_ctrl);
4711 adapter->smartspeed++;
4712 if (!e1000_phy_setup_autoneg(hw) &&
4713 !e1000_read_phy_reg(hw, PHY_CTRL,
4714 &phy_ctrl)) {
4715 phy_ctrl |= (MII_CR_AUTO_NEG_EN |
4716 MII_CR_RESTART_AUTO_NEG);
4717 e1000_write_phy_reg(hw, PHY_CTRL,
4718 phy_ctrl);
4719 }
4720 }
4721 return;
4722 } else if (adapter->smartspeed == E1000_SMARTSPEED_DOWNSHIFT) {
4723
4724 e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_ctrl);
4725 phy_ctrl |= CR_1000T_MS_ENABLE;
4726 e1000_write_phy_reg(hw, PHY_1000T_CTRL, phy_ctrl);
4727 if (!e1000_phy_setup_autoneg(hw) &&
4728 !e1000_read_phy_reg(hw, PHY_CTRL, &phy_ctrl)) {
4729 phy_ctrl |= (MII_CR_AUTO_NEG_EN |
4730 MII_CR_RESTART_AUTO_NEG);
4731 e1000_write_phy_reg(hw, PHY_CTRL, phy_ctrl);
4732 }
4733 }
4734
4735 if (adapter->smartspeed++ == E1000_SMARTSPEED_MAX)
4736 adapter->smartspeed = 0;
4737}
4738
4739
4740
4741
4742
4743
4744
4745static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
4746{
4747 switch (cmd) {
4748 case SIOCGMIIPHY:
4749 case SIOCGMIIREG:
4750 case SIOCSMIIREG:
4751 return e1000_mii_ioctl(netdev, ifr, cmd);
4752 default:
4753 return -EOPNOTSUPP;
4754 }
4755}
4756
4757
4758
4759
4760
4761
4762
4763static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
4764 int cmd)
4765{
4766 struct e1000_adapter *adapter = netdev_priv(netdev);
4767 struct e1000_hw *hw = &adapter->hw;
4768 struct mii_ioctl_data *data = if_mii(ifr);
4769 int retval;
4770 u16 mii_reg;
4771 unsigned long flags;
4772
4773 if (hw->media_type != e1000_media_type_copper)
4774 return -EOPNOTSUPP;
4775
4776 switch (cmd) {
4777 case SIOCGMIIPHY:
4778 data->phy_id = hw->phy_addr;
4779 break;
4780 case SIOCGMIIREG:
4781 spin_lock_irqsave(&adapter->stats_lock, flags);
4782 if (e1000_read_phy_reg(hw, data->reg_num & 0x1F,
4783 &data->val_out)) {
4784 spin_unlock_irqrestore(&adapter->stats_lock, flags);
4785 return -EIO;
4786 }
4787 spin_unlock_irqrestore(&adapter->stats_lock, flags);
4788 break;
4789 case SIOCSMIIREG:
4790 if (data->reg_num & ~(0x1F))
4791 return -EFAULT;
4792 mii_reg = data->val_in;
4793 spin_lock_irqsave(&adapter->stats_lock, flags);
4794 if (e1000_write_phy_reg(hw, data->reg_num,
4795 mii_reg)) {
4796 spin_unlock_irqrestore(&adapter->stats_lock, flags);
4797 return -EIO;
4798 }
4799 spin_unlock_irqrestore(&adapter->stats_lock, flags);
4800 if (hw->media_type == e1000_media_type_copper) {
4801 switch (data->reg_num) {
4802 case PHY_CTRL:
4803 if (mii_reg & MII_CR_POWER_DOWN)
4804 break;
4805 if (mii_reg & MII_CR_AUTO_NEG_EN) {
4806 hw->autoneg = 1;
4807 hw->autoneg_advertised = 0x2F;
4808 } else {
4809 u32 speed;
4810 if (mii_reg & 0x40)
4811 speed = SPEED_1000;
4812 else if (mii_reg & 0x2000)
4813 speed = SPEED_100;
4814 else
4815 speed = SPEED_10;
4816 retval = e1000_set_spd_dplx(
4817 adapter, speed,
4818 ((mii_reg & 0x100)
4819 ? DUPLEX_FULL :
4820 DUPLEX_HALF));
4821 if (retval)
4822 return retval;
4823 }
4824 if (netif_running(adapter->netdev))
4825 e1000_reinit_locked(adapter);
4826 else
4827 e1000_reset(adapter);
4828 break;
4829 case M88E1000_PHY_SPEC_CTRL:
4830 case M88E1000_EXT_PHY_SPEC_CTRL:
4831 if (e1000_phy_reset(hw))
4832 return -EIO;
4833 break;
4834 }
4835 } else {
4836 switch (data->reg_num) {
4837 case PHY_CTRL:
4838 if (mii_reg & MII_CR_POWER_DOWN)
4839 break;
4840 if (netif_running(adapter->netdev))
4841 e1000_reinit_locked(adapter);
4842 else
4843 e1000_reset(adapter);
4844 break;
4845 }
4846 }
4847 break;
4848 default:
4849 return -EOPNOTSUPP;
4850 }
4851 return E1000_SUCCESS;
4852}
4853
4854void e1000_pci_set_mwi(struct e1000_hw *hw)
4855{
4856 struct e1000_adapter *adapter = hw->back;
4857 int ret_val = pci_set_mwi(adapter->pdev);
4858
4859 if (ret_val)
4860 e_err(probe, "Error in setting MWI\n");
4861}
4862
4863void e1000_pci_clear_mwi(struct e1000_hw *hw)
4864{
4865 struct e1000_adapter *adapter = hw->back;
4866
4867 pci_clear_mwi(adapter->pdev);
4868}
4869
4870int e1000_pcix_get_mmrbc(struct e1000_hw *hw)
4871{
4872 struct e1000_adapter *adapter = hw->back;
4873 return pcix_get_mmrbc(adapter->pdev);
4874}
4875
4876void e1000_pcix_set_mmrbc(struct e1000_hw *hw, int mmrbc)
4877{
4878 struct e1000_adapter *adapter = hw->back;
4879 pcix_set_mmrbc(adapter->pdev, mmrbc);
4880}
4881
4882void e1000_io_write(struct e1000_hw *hw, unsigned long port, u32 value)
4883{
4884 outl(value, port);
4885}
4886
4887static bool e1000_vlan_used(struct e1000_adapter *adapter)
4888{
4889 u16 vid;
4890
4891 for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
4892 return true;
4893 return false;
4894}
4895
4896static void __e1000_vlan_mode(struct e1000_adapter *adapter,
4897 netdev_features_t features)
4898{
4899 struct e1000_hw *hw = &adapter->hw;
4900 u32 ctrl;
4901
4902 ctrl = er32(CTRL);
4903 if (features & NETIF_F_HW_VLAN_CTAG_RX) {
4904
4905 ctrl |= E1000_CTRL_VME;
4906 } else {
4907
4908 ctrl &= ~E1000_CTRL_VME;
4909 }
4910 ew32(CTRL, ctrl);
4911}
4912static void e1000_vlan_filter_on_off(struct e1000_adapter *adapter,
4913 bool filter_on)
4914{
4915 struct e1000_hw *hw = &adapter->hw;
4916 u32 rctl;
4917
4918 if (!test_bit(__E1000_DOWN, &adapter->flags))
4919 e1000_irq_disable(adapter);
4920
4921 __e1000_vlan_mode(adapter, adapter->netdev->features);
4922 if (filter_on) {
4923
4924 rctl = er32(RCTL);
4925 rctl &= ~E1000_RCTL_CFIEN;
4926 if (!(adapter->netdev->flags & IFF_PROMISC))
4927 rctl |= E1000_RCTL_VFE;
4928 ew32(RCTL, rctl);
4929 e1000_update_mng_vlan(adapter);
4930 } else {
4931
4932 rctl = er32(RCTL);
4933 rctl &= ~E1000_RCTL_VFE;
4934 ew32(RCTL, rctl);
4935 }
4936
4937 if (!test_bit(__E1000_DOWN, &adapter->flags))
4938 e1000_irq_enable(adapter);
4939}
4940
4941static void e1000_vlan_mode(struct net_device *netdev,
4942 netdev_features_t features)
4943{
4944 struct e1000_adapter *adapter = netdev_priv(netdev);
4945
4946 if (!test_bit(__E1000_DOWN, &adapter->flags))
4947 e1000_irq_disable(adapter);
4948
4949 __e1000_vlan_mode(adapter, features);
4950
4951 if (!test_bit(__E1000_DOWN, &adapter->flags))
4952 e1000_irq_enable(adapter);
4953}
4954
4955static int e1000_vlan_rx_add_vid(struct net_device *netdev,
4956 __be16 proto, u16 vid)
4957{
4958 struct e1000_adapter *adapter = netdev_priv(netdev);
4959 struct e1000_hw *hw = &adapter->hw;
4960 u32 vfta, index;
4961
4962 if ((hw->mng_cookie.status &
4963 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) &&
4964 (vid == adapter->mng_vlan_id))
4965 return 0;
4966
4967 if (!e1000_vlan_used(adapter))
4968 e1000_vlan_filter_on_off(adapter, true);
4969
4970
4971 index = (vid >> 5) & 0x7F;
4972 vfta = E1000_READ_REG_ARRAY(hw, VFTA, index);
4973 vfta |= (1 << (vid & 0x1F));
4974 e1000_write_vfta(hw, index, vfta);
4975
4976 set_bit(vid, adapter->active_vlans);
4977
4978 return 0;
4979}
4980
4981static int e1000_vlan_rx_kill_vid(struct net_device *netdev,
4982 __be16 proto, u16 vid)
4983{
4984 struct e1000_adapter *adapter = netdev_priv(netdev);
4985 struct e1000_hw *hw = &adapter->hw;
4986 u32 vfta, index;
4987
4988 if (!test_bit(__E1000_DOWN, &adapter->flags))
4989 e1000_irq_disable(adapter);
4990 if (!test_bit(__E1000_DOWN, &adapter->flags))
4991 e1000_irq_enable(adapter);
4992
4993
4994 index = (vid >> 5) & 0x7F;
4995 vfta = E1000_READ_REG_ARRAY(hw, VFTA, index);
4996 vfta &= ~(1 << (vid & 0x1F));
4997 e1000_write_vfta(hw, index, vfta);
4998
4999 clear_bit(vid, adapter->active_vlans);
5000
5001 if (!e1000_vlan_used(adapter))
5002 e1000_vlan_filter_on_off(adapter, false);
5003
5004 return 0;
5005}
5006
5007static void e1000_restore_vlan(struct e1000_adapter *adapter)
5008{
5009 u16 vid;
5010
5011 if (!e1000_vlan_used(adapter))
5012 return;
5013
5014 e1000_vlan_filter_on_off(adapter, true);
5015 for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
5016 e1000_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), vid);
5017}
5018
5019int e1000_set_spd_dplx(struct e1000_adapter *adapter, u32 spd, u8 dplx)
5020{
5021 struct e1000_hw *hw = &adapter->hw;
5022
5023 hw->autoneg = 0;
5024
5025
5026
5027
5028 if ((spd & 1) || (dplx & ~1))
5029 goto err_inval;
5030
5031
5032 if ((hw->media_type == e1000_media_type_fiber) &&
5033 spd != SPEED_1000 &&
5034 dplx != DUPLEX_FULL)
5035 goto err_inval;
5036
5037 switch (spd + dplx) {
5038 case SPEED_10 + DUPLEX_HALF:
5039 hw->forced_speed_duplex = e1000_10_half;
5040 break;
5041 case SPEED_10 + DUPLEX_FULL:
5042 hw->forced_speed_duplex = e1000_10_full;
5043 break;
5044 case SPEED_100 + DUPLEX_HALF:
5045 hw->forced_speed_duplex = e1000_100_half;
5046 break;
5047 case SPEED_100 + DUPLEX_FULL:
5048 hw->forced_speed_duplex = e1000_100_full;
5049 break;
5050 case SPEED_1000 + DUPLEX_FULL:
5051 hw->autoneg = 1;
5052 hw->autoneg_advertised = ADVERTISE_1000_FULL;
5053 break;
5054 case SPEED_1000 + DUPLEX_HALF:
5055 default:
5056 goto err_inval;
5057 }
5058
5059
5060 hw->mdix = AUTO_ALL_MODES;
5061
5062 return 0;
5063
5064err_inval:
5065 e_err(probe, "Unsupported Speed/Duplex configuration\n");
5066 return -EINVAL;
5067}
5068
5069static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake)
5070{
5071 struct net_device *netdev = pci_get_drvdata(pdev);
5072 struct e1000_adapter *adapter = netdev_priv(netdev);
5073 struct e1000_hw *hw = &adapter->hw;
5074 u32 ctrl, ctrl_ext, rctl, status;
5075 u32 wufc = adapter->wol;
5076#ifdef CONFIG_PM
5077 int retval = 0;
5078#endif
5079
5080 netif_device_detach(netdev);
5081
5082 if (netif_running(netdev)) {
5083 int count = E1000_CHECK_RESET_COUNT;
5084
5085 while (test_bit(__E1000_RESETTING, &adapter->flags) && count--)
5086 usleep_range(10000, 20000);
5087
5088 WARN_ON(test_bit(__E1000_RESETTING, &adapter->flags));
5089 e1000_down(adapter);
5090 }
5091
5092#ifdef CONFIG_PM
5093 retval = pci_save_state(pdev);
5094 if (retval)
5095 return retval;
5096#endif
5097
5098 status = er32(STATUS);
5099 if (status & E1000_STATUS_LU)
5100 wufc &= ~E1000_WUFC_LNKC;
5101
5102 if (wufc) {
5103 e1000_setup_rctl(adapter);
5104 e1000_set_rx_mode(netdev);
5105
5106 rctl = er32(RCTL);
5107
5108
5109 if (wufc & E1000_WUFC_MC)
5110 rctl |= E1000_RCTL_MPE;
5111
5112
5113 ew32(RCTL, rctl | E1000_RCTL_EN);
5114
5115 if (hw->mac_type >= e1000_82540) {
5116 ctrl = er32(CTRL);
5117
5118 #define E1000_CTRL_ADVD3WUC 0x00100000
5119
5120 #define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000
5121 ctrl |= E1000_CTRL_ADVD3WUC |
5122 E1000_CTRL_EN_PHY_PWR_MGMT;
5123 ew32(CTRL, ctrl);
5124 }
5125
5126 if (hw->media_type == e1000_media_type_fiber ||
5127 hw->media_type == e1000_media_type_internal_serdes) {
5128
5129 ctrl_ext = er32(CTRL_EXT);
5130 ctrl_ext |= E1000_CTRL_EXT_SDP7_DATA;
5131 ew32(CTRL_EXT, ctrl_ext);
5132 }
5133
5134 ew32(WUC, E1000_WUC_PME_EN);
5135 ew32(WUFC, wufc);
5136 } else {
5137 ew32(WUC, 0);
5138 ew32(WUFC, 0);
5139 }
5140
5141 e1000_release_manageability(adapter);
5142
5143 *enable_wake = !!wufc;
5144
5145
5146 if (adapter->en_mng_pt)
5147 *enable_wake = true;
5148
5149 if (netif_running(netdev))
5150 e1000_free_irq(adapter);
5151
5152 pci_disable_device(pdev);
5153
5154 return 0;
5155}
5156
5157#ifdef CONFIG_PM
5158static int e1000_suspend(struct pci_dev *pdev, pm_message_t state)
5159{
5160 int retval;
5161 bool wake;
5162
5163 retval = __e1000_shutdown(pdev, &wake);
5164 if (retval)
5165 return retval;
5166
5167 if (wake) {
5168 pci_prepare_to_sleep(pdev);
5169 } else {
5170 pci_wake_from_d3(pdev, false);
5171 pci_set_power_state(pdev, PCI_D3hot);
5172 }
5173
5174 return 0;
5175}
5176
5177static int e1000_resume(struct pci_dev *pdev)
5178{
5179 struct net_device *netdev = pci_get_drvdata(pdev);
5180 struct e1000_adapter *adapter = netdev_priv(netdev);
5181 struct e1000_hw *hw = &adapter->hw;
5182 u32 err;
5183
5184 pci_set_power_state(pdev, PCI_D0);
5185 pci_restore_state(pdev);
5186 pci_save_state(pdev);
5187
5188 if (adapter->need_ioport)
5189 err = pci_enable_device(pdev);
5190 else
5191 err = pci_enable_device_mem(pdev);
5192 if (err) {
5193 pr_err("Cannot enable PCI device from suspend\n");
5194 return err;
5195 }
5196 pci_set_master(pdev);
5197
5198 pci_enable_wake(pdev, PCI_D3hot, 0);
5199 pci_enable_wake(pdev, PCI_D3cold, 0);
5200
5201 if (netif_running(netdev)) {
5202 err = e1000_request_irq(adapter);
5203 if (err)
5204 return err;
5205 }
5206
5207 e1000_power_up_phy(adapter);
5208 e1000_reset(adapter);
5209 ew32(WUS, ~0);
5210
5211 e1000_init_manageability(adapter);
5212
5213 if (netif_running(netdev))
5214 e1000_up(adapter);
5215
5216 netif_device_attach(netdev);
5217
5218 return 0;
5219}
5220#endif
5221
5222static void e1000_shutdown(struct pci_dev *pdev)
5223{
5224 bool wake;
5225
5226 __e1000_shutdown(pdev, &wake);
5227
5228 if (system_state == SYSTEM_POWER_OFF) {
5229 pci_wake_from_d3(pdev, wake);
5230 pci_set_power_state(pdev, PCI_D3hot);
5231 }
5232}
5233
5234#ifdef CONFIG_NET_POLL_CONTROLLER
5235
5236
5237
5238
5239static void e1000_netpoll(struct net_device *netdev)
5240{
5241 struct e1000_adapter *adapter = netdev_priv(netdev);
5242
5243 disable_irq(adapter->pdev->irq);
5244 e1000_intr(adapter->pdev->irq, netdev);
5245 enable_irq(adapter->pdev->irq);
5246}
5247#endif
5248
5249
5250
5251
5252
5253
5254
5255
5256
5257static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev,
5258 pci_channel_state_t state)
5259{
5260 struct net_device *netdev = pci_get_drvdata(pdev);
5261 struct e1000_adapter *adapter = netdev_priv(netdev);
5262
5263 netif_device_detach(netdev);
5264
5265 if (state == pci_channel_io_perm_failure)
5266 return PCI_ERS_RESULT_DISCONNECT;
5267
5268 if (netif_running(netdev))
5269 e1000_down(adapter);
5270 pci_disable_device(pdev);
5271
5272
5273 return PCI_ERS_RESULT_NEED_RESET;
5274}
5275
5276
5277
5278
5279
5280
5281
5282
5283static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev)
5284{
5285 struct net_device *netdev = pci_get_drvdata(pdev);
5286 struct e1000_adapter *adapter = netdev_priv(netdev);
5287 struct e1000_hw *hw = &adapter->hw;
5288 int err;
5289
5290 if (adapter->need_ioport)
5291 err = pci_enable_device(pdev);
5292 else
5293 err = pci_enable_device_mem(pdev);
5294 if (err) {
5295 pr_err("Cannot re-enable PCI device after reset.\n");
5296 return PCI_ERS_RESULT_DISCONNECT;
5297 }
5298 pci_set_master(pdev);
5299
5300 pci_enable_wake(pdev, PCI_D3hot, 0);
5301 pci_enable_wake(pdev, PCI_D3cold, 0);
5302
5303 e1000_reset(adapter);
5304 ew32(WUS, ~0);
5305
5306 return PCI_ERS_RESULT_RECOVERED;
5307}
5308
5309
5310
5311
5312
5313
5314
5315
5316
5317static void e1000_io_resume(struct pci_dev *pdev)
5318{
5319 struct net_device *netdev = pci_get_drvdata(pdev);
5320 struct e1000_adapter *adapter = netdev_priv(netdev);
5321
5322 e1000_init_manageability(adapter);
5323
5324 if (netif_running(netdev)) {
5325 if (e1000_up(adapter)) {
5326 pr_info("can't bring device back up after reset\n");
5327 return;
5328 }
5329 }
5330
5331 netif_device_attach(netdev);
5332}
5333
5334
5335