linux/drivers/net/ethernet/intel/e1000/e1000_main.c
<<
>>
Prefs
   1/*******************************************************************************
   2
   3  Intel PRO/1000 Linux driver
   4  Copyright(c) 1999 - 2006 Intel Corporation.
   5
   6  This program is free software; you can redistribute it and/or modify it
   7  under the terms and conditions of the GNU General Public License,
   8  version 2, as published by the Free Software Foundation.
   9
  10  This program is distributed in the hope it will be useful, but WITHOUT
  11  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  13  more details.
  14
  15  You should have received a copy of the GNU General Public License along with
  16  this program; if not, write to the Free Software Foundation, Inc.,
  17  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
  18
  19  The full GNU General Public License is included in this distribution in
  20  the file called "COPYING".
  21
  22  Contact Information:
  23  Linux NICS <linux.nics@intel.com>
  24  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
  25  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
  26
  27*******************************************************************************/
  28
  29#include "e1000.h"
  30#include <net/ip6_checksum.h>
  31#include <linux/io.h>
  32#include <linux/prefetch.h>
  33#include <linux/bitops.h>
  34#include <linux/if_vlan.h>
  35
  36char e1000_driver_name[] = "e1000";
  37static char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver";
  38#define DRV_VERSION "7.3.21-k8-NAPI"
  39const char e1000_driver_version[] = DRV_VERSION;
  40static const char e1000_copyright[] = "Copyright (c) 1999-2006 Intel Corporation.";
  41
  42/* e1000_pci_tbl - PCI Device ID Table
  43 *
  44 * Last entry must be all 0s
  45 *
  46 * Macro expands to...
  47 *   {PCI_DEVICE(PCI_VENDOR_ID_INTEL, device_id)}
  48 */
  49static DEFINE_PCI_DEVICE_TABLE(e1000_pci_tbl) = {
  50        INTEL_E1000_ETHERNET_DEVICE(0x1000),
  51        INTEL_E1000_ETHERNET_DEVICE(0x1001),
  52        INTEL_E1000_ETHERNET_DEVICE(0x1004),
  53        INTEL_E1000_ETHERNET_DEVICE(0x1008),
  54        INTEL_E1000_ETHERNET_DEVICE(0x1009),
  55        INTEL_E1000_ETHERNET_DEVICE(0x100C),
  56        INTEL_E1000_ETHERNET_DEVICE(0x100D),
  57        INTEL_E1000_ETHERNET_DEVICE(0x100E),
  58        INTEL_E1000_ETHERNET_DEVICE(0x100F),
  59        INTEL_E1000_ETHERNET_DEVICE(0x1010),
  60        INTEL_E1000_ETHERNET_DEVICE(0x1011),
  61        INTEL_E1000_ETHERNET_DEVICE(0x1012),
  62        INTEL_E1000_ETHERNET_DEVICE(0x1013),
  63        INTEL_E1000_ETHERNET_DEVICE(0x1014),
  64        INTEL_E1000_ETHERNET_DEVICE(0x1015),
  65        INTEL_E1000_ETHERNET_DEVICE(0x1016),
  66        INTEL_E1000_ETHERNET_DEVICE(0x1017),
  67        INTEL_E1000_ETHERNET_DEVICE(0x1018),
  68        INTEL_E1000_ETHERNET_DEVICE(0x1019),
  69        INTEL_E1000_ETHERNET_DEVICE(0x101A),
  70        INTEL_E1000_ETHERNET_DEVICE(0x101D),
  71        INTEL_E1000_ETHERNET_DEVICE(0x101E),
  72        INTEL_E1000_ETHERNET_DEVICE(0x1026),
  73        INTEL_E1000_ETHERNET_DEVICE(0x1027),
  74        INTEL_E1000_ETHERNET_DEVICE(0x1028),
  75        INTEL_E1000_ETHERNET_DEVICE(0x1075),
  76        INTEL_E1000_ETHERNET_DEVICE(0x1076),
  77        INTEL_E1000_ETHERNET_DEVICE(0x1077),
  78        INTEL_E1000_ETHERNET_DEVICE(0x1078),
  79        INTEL_E1000_ETHERNET_DEVICE(0x1079),
  80        INTEL_E1000_ETHERNET_DEVICE(0x107A),
  81        INTEL_E1000_ETHERNET_DEVICE(0x107B),
  82        INTEL_E1000_ETHERNET_DEVICE(0x107C),
  83        INTEL_E1000_ETHERNET_DEVICE(0x108A),
  84        INTEL_E1000_ETHERNET_DEVICE(0x1099),
  85        INTEL_E1000_ETHERNET_DEVICE(0x10B5),
  86        INTEL_E1000_ETHERNET_DEVICE(0x2E6E),
  87        /* required last entry */
  88        {0,}
  89};
  90
  91MODULE_DEVICE_TABLE(pci, e1000_pci_tbl);
  92
  93int e1000_up(struct e1000_adapter *adapter);
  94void e1000_down(struct e1000_adapter *adapter);
  95void e1000_reinit_locked(struct e1000_adapter *adapter);
  96void e1000_reset(struct e1000_adapter *adapter);
  97int e1000_setup_all_tx_resources(struct e1000_adapter *adapter);
  98int e1000_setup_all_rx_resources(struct e1000_adapter *adapter);
  99void e1000_free_all_tx_resources(struct e1000_adapter *adapter);
 100void e1000_free_all_rx_resources(struct e1000_adapter *adapter);
 101static int e1000_setup_tx_resources(struct e1000_adapter *adapter,
 102                             struct e1000_tx_ring *txdr);
 103static int e1000_setup_rx_resources(struct e1000_adapter *adapter,
 104                             struct e1000_rx_ring *rxdr);
 105static void e1000_free_tx_resources(struct e1000_adapter *adapter,
 106                             struct e1000_tx_ring *tx_ring);
 107static void e1000_free_rx_resources(struct e1000_adapter *adapter,
 108                             struct e1000_rx_ring *rx_ring);
 109void e1000_update_stats(struct e1000_adapter *adapter);
 110
 111static int e1000_init_module(void);
 112static void e1000_exit_module(void);
 113static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
 114static void e1000_remove(struct pci_dev *pdev);
 115static int e1000_alloc_queues(struct e1000_adapter *adapter);
 116static int e1000_sw_init(struct e1000_adapter *adapter);
 117static int e1000_open(struct net_device *netdev);
 118static int e1000_close(struct net_device *netdev);
 119static void e1000_configure_tx(struct e1000_adapter *adapter);
 120static void e1000_configure_rx(struct e1000_adapter *adapter);
 121static void e1000_setup_rctl(struct e1000_adapter *adapter);
 122static void e1000_clean_all_tx_rings(struct e1000_adapter *adapter);
 123static void e1000_clean_all_rx_rings(struct e1000_adapter *adapter);
 124static void e1000_clean_tx_ring(struct e1000_adapter *adapter,
 125                                struct e1000_tx_ring *tx_ring);
 126static void e1000_clean_rx_ring(struct e1000_adapter *adapter,
 127                                struct e1000_rx_ring *rx_ring);
 128static void e1000_set_rx_mode(struct net_device *netdev);
 129static void e1000_update_phy_info_task(struct work_struct *work);
 130static void e1000_watchdog(struct work_struct *work);
 131static void e1000_82547_tx_fifo_stall_task(struct work_struct *work);
 132static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
 133                                    struct net_device *netdev);
 134static struct net_device_stats * e1000_get_stats(struct net_device *netdev);
 135static int e1000_change_mtu(struct net_device *netdev, int new_mtu);
 136static int e1000_set_mac(struct net_device *netdev, void *p);
 137static irqreturn_t e1000_intr(int irq, void *data);
 138static bool e1000_clean_tx_irq(struct e1000_adapter *adapter,
 139                               struct e1000_tx_ring *tx_ring);
 140static int e1000_clean(struct napi_struct *napi, int budget);
 141static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
 142                               struct e1000_rx_ring *rx_ring,
 143                               int *work_done, int work_to_do);
 144static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
 145                                     struct e1000_rx_ring *rx_ring,
 146                                     int *work_done, int work_to_do);
 147static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
 148                                   struct e1000_rx_ring *rx_ring,
 149                                   int cleaned_count);
 150static void e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter,
 151                                         struct e1000_rx_ring *rx_ring,
 152                                         int cleaned_count);
 153static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd);
 154static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
 155                           int cmd);
 156static void e1000_enter_82542_rst(struct e1000_adapter *adapter);
 157static void e1000_leave_82542_rst(struct e1000_adapter *adapter);
 158static void e1000_tx_timeout(struct net_device *dev);
 159static void e1000_reset_task(struct work_struct *work);
 160static void e1000_smartspeed(struct e1000_adapter *adapter);
 161static int e1000_82547_fifo_workaround(struct e1000_adapter *adapter,
 162                                       struct sk_buff *skb);
 163
 164static bool e1000_vlan_used(struct e1000_adapter *adapter);
 165static void e1000_vlan_mode(struct net_device *netdev,
 166                            netdev_features_t features);
 167static void e1000_vlan_filter_on_off(struct e1000_adapter *adapter,
 168                                     bool filter_on);
 169static int e1000_vlan_rx_add_vid(struct net_device *netdev,
 170                                 __be16 proto, u16 vid);
 171static int e1000_vlan_rx_kill_vid(struct net_device *netdev,
 172                                  __be16 proto, u16 vid);
 173static void e1000_restore_vlan(struct e1000_adapter *adapter);
 174
 175#ifdef CONFIG_PM
 176static int e1000_suspend(struct pci_dev *pdev, pm_message_t state);
 177static int e1000_resume(struct pci_dev *pdev);
 178#endif
 179static void e1000_shutdown(struct pci_dev *pdev);
 180
 181#ifdef CONFIG_NET_POLL_CONTROLLER
 182/* for netdump / net console */
 183static void e1000_netpoll (struct net_device *netdev);
 184#endif
 185
 186#define COPYBREAK_DEFAULT 256
 187static unsigned int copybreak __read_mostly = COPYBREAK_DEFAULT;
 188module_param(copybreak, uint, 0644);
 189MODULE_PARM_DESC(copybreak,
 190        "Maximum size of packet that is copied to a new buffer on receive");
 191
 192static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev,
 193                     pci_channel_state_t state);
 194static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev);
 195static void e1000_io_resume(struct pci_dev *pdev);
 196
 197static const struct pci_error_handlers e1000_err_handler = {
 198        .error_detected = e1000_io_error_detected,
 199        .slot_reset = e1000_io_slot_reset,
 200        .resume = e1000_io_resume,
 201};
 202
 203static struct pci_driver e1000_driver = {
 204        .name     = e1000_driver_name,
 205        .id_table = e1000_pci_tbl,
 206        .probe    = e1000_probe,
 207        .remove   = e1000_remove,
 208#ifdef CONFIG_PM
 209        /* Power Management Hooks */
 210        .suspend  = e1000_suspend,
 211        .resume   = e1000_resume,
 212#endif
 213        .shutdown = e1000_shutdown,
 214        .err_handler = &e1000_err_handler
 215};
 216
 217MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
 218MODULE_DESCRIPTION("Intel(R) PRO/1000 Network Driver");
 219MODULE_LICENSE("GPL");
 220MODULE_VERSION(DRV_VERSION);
 221
 222#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)
 223static int debug = -1;
 224module_param(debug, int, 0);
 225MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
 226
 227/**
 228 * e1000_get_hw_dev - return device
 229 * used by hardware layer to print debugging information
 230 *
 231 **/
 232struct net_device *e1000_get_hw_dev(struct e1000_hw *hw)
 233{
 234        struct e1000_adapter *adapter = hw->back;
 235        return adapter->netdev;
 236}
 237
 238/**
 239 * e1000_init_module - Driver Registration Routine
 240 *
 241 * e1000_init_module is the first routine called when the driver is
 242 * loaded. All it does is register with the PCI subsystem.
 243 **/
 244static int __init e1000_init_module(void)
 245{
 246        int ret;
 247        pr_info("%s - version %s\n", e1000_driver_string, e1000_driver_version);
 248
 249        pr_info("%s\n", e1000_copyright);
 250
 251        ret = pci_register_driver(&e1000_driver);
 252        if (copybreak != COPYBREAK_DEFAULT) {
 253                if (copybreak == 0)
 254                        pr_info("copybreak disabled\n");
 255                else
 256                        pr_info("copybreak enabled for "
 257                                   "packets <= %u bytes\n", copybreak);
 258        }
 259        return ret;
 260}
 261
 262module_init(e1000_init_module);
 263
 264/**
 265 * e1000_exit_module - Driver Exit Cleanup Routine
 266 *
 267 * e1000_exit_module is called just before the driver is removed
 268 * from memory.
 269 **/
 270static void __exit e1000_exit_module(void)
 271{
 272        pci_unregister_driver(&e1000_driver);
 273}
 274
 275module_exit(e1000_exit_module);
 276
 277static int e1000_request_irq(struct e1000_adapter *adapter)
 278{
 279        struct net_device *netdev = adapter->netdev;
 280        irq_handler_t handler = e1000_intr;
 281        int irq_flags = IRQF_SHARED;
 282        int err;
 283
 284        err = request_irq(adapter->pdev->irq, handler, irq_flags, netdev->name,
 285                          netdev);
 286        if (err) {
 287                e_err(probe, "Unable to allocate interrupt Error: %d\n", err);
 288        }
 289
 290        return err;
 291}
 292
 293static void e1000_free_irq(struct e1000_adapter *adapter)
 294{
 295        struct net_device *netdev = adapter->netdev;
 296
 297        free_irq(adapter->pdev->irq, netdev);
 298}
 299
 300/**
 301 * e1000_irq_disable - Mask off interrupt generation on the NIC
 302 * @adapter: board private structure
 303 **/
 304static void e1000_irq_disable(struct e1000_adapter *adapter)
 305{
 306        struct e1000_hw *hw = &adapter->hw;
 307
 308        ew32(IMC, ~0);
 309        E1000_WRITE_FLUSH();
 310        synchronize_irq(adapter->pdev->irq);
 311}
 312
 313/**
 314 * e1000_irq_enable - Enable default interrupt generation settings
 315 * @adapter: board private structure
 316 **/
 317static void e1000_irq_enable(struct e1000_adapter *adapter)
 318{
 319        struct e1000_hw *hw = &adapter->hw;
 320
 321        ew32(IMS, IMS_ENABLE_MASK);
 322        E1000_WRITE_FLUSH();
 323}
 324
 325static void e1000_update_mng_vlan(struct e1000_adapter *adapter)
 326{
 327        struct e1000_hw *hw = &adapter->hw;
 328        struct net_device *netdev = adapter->netdev;
 329        u16 vid = hw->mng_cookie.vlan_id;
 330        u16 old_vid = adapter->mng_vlan_id;
 331
 332        if (!e1000_vlan_used(adapter))
 333                return;
 334
 335        if (!test_bit(vid, adapter->active_vlans)) {
 336                if (hw->mng_cookie.status &
 337                    E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) {
 338                        e1000_vlan_rx_add_vid(netdev, htons(ETH_P_8021Q), vid);
 339                        adapter->mng_vlan_id = vid;
 340                } else {
 341                        adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
 342                }
 343                if ((old_vid != (u16)E1000_MNG_VLAN_NONE) &&
 344                    (vid != old_vid) &&
 345                    !test_bit(old_vid, adapter->active_vlans))
 346                        e1000_vlan_rx_kill_vid(netdev, htons(ETH_P_8021Q),
 347                                               old_vid);
 348        } else {
 349                adapter->mng_vlan_id = vid;
 350        }
 351}
 352
 353static void e1000_init_manageability(struct e1000_adapter *adapter)
 354{
 355        struct e1000_hw *hw = &adapter->hw;
 356
 357        if (adapter->en_mng_pt) {
 358                u32 manc = er32(MANC);
 359
 360                /* disable hardware interception of ARP */
 361                manc &= ~(E1000_MANC_ARP_EN);
 362
 363                ew32(MANC, manc);
 364        }
 365}
 366
 367static void e1000_release_manageability(struct e1000_adapter *adapter)
 368{
 369        struct e1000_hw *hw = &adapter->hw;
 370
 371        if (adapter->en_mng_pt) {
 372                u32 manc = er32(MANC);
 373
 374                /* re-enable hardware interception of ARP */
 375                manc |= E1000_MANC_ARP_EN;
 376
 377                ew32(MANC, manc);
 378        }
 379}
 380
 381/**
 382 * e1000_configure - configure the hardware for RX and TX
 383 * @adapter = private board structure
 384 **/
 385static void e1000_configure(struct e1000_adapter *adapter)
 386{
 387        struct net_device *netdev = adapter->netdev;
 388        int i;
 389
 390        e1000_set_rx_mode(netdev);
 391
 392        e1000_restore_vlan(adapter);
 393        e1000_init_manageability(adapter);
 394
 395        e1000_configure_tx(adapter);
 396        e1000_setup_rctl(adapter);
 397        e1000_configure_rx(adapter);
 398        /* call E1000_DESC_UNUSED which always leaves
 399         * at least 1 descriptor unused to make sure
 400         * next_to_use != next_to_clean
 401         */
 402        for (i = 0; i < adapter->num_rx_queues; i++) {
 403                struct e1000_rx_ring *ring = &adapter->rx_ring[i];
 404                adapter->alloc_rx_buf(adapter, ring,
 405                                      E1000_DESC_UNUSED(ring));
 406        }
 407}
 408
 409int e1000_up(struct e1000_adapter *adapter)
 410{
 411        struct e1000_hw *hw = &adapter->hw;
 412
 413        /* hardware has been reset, we need to reload some things */
 414        e1000_configure(adapter);
 415
 416        clear_bit(__E1000_DOWN, &adapter->flags);
 417
 418        napi_enable(&adapter->napi);
 419
 420        e1000_irq_enable(adapter);
 421
 422        netif_wake_queue(adapter->netdev);
 423
 424        /* fire a link change interrupt to start the watchdog */
 425        ew32(ICS, E1000_ICS_LSC);
 426        return 0;
 427}
 428
 429/**
 430 * e1000_power_up_phy - restore link in case the phy was powered down
 431 * @adapter: address of board private structure
 432 *
 433 * The phy may be powered down to save power and turn off link when the
 434 * driver is unloaded and wake on lan is not enabled (among others)
 435 * *** this routine MUST be followed by a call to e1000_reset ***
 436 **/
 437void e1000_power_up_phy(struct e1000_adapter *adapter)
 438{
 439        struct e1000_hw *hw = &adapter->hw;
 440        u16 mii_reg = 0;
 441
 442        /* Just clear the power down bit to wake the phy back up */
 443        if (hw->media_type == e1000_media_type_copper) {
 444                /* according to the manual, the phy will retain its
 445                 * settings across a power-down/up cycle
 446                 */
 447                e1000_read_phy_reg(hw, PHY_CTRL, &mii_reg);
 448                mii_reg &= ~MII_CR_POWER_DOWN;
 449                e1000_write_phy_reg(hw, PHY_CTRL, mii_reg);
 450        }
 451}
 452
 453static void e1000_power_down_phy(struct e1000_adapter *adapter)
 454{
 455        struct e1000_hw *hw = &adapter->hw;
 456
 457        /* Power down the PHY so no link is implied when interface is down *
 458         * The PHY cannot be powered down if any of the following is true *
 459         * (a) WoL is enabled
 460         * (b) AMT is active
 461         * (c) SoL/IDER session is active
 462         */
 463        if (!adapter->wol && hw->mac_type >= e1000_82540 &&
 464           hw->media_type == e1000_media_type_copper) {
 465                u16 mii_reg = 0;
 466
 467                switch (hw->mac_type) {
 468                case e1000_82540:
 469                case e1000_82545:
 470                case e1000_82545_rev_3:
 471                case e1000_82546:
 472                case e1000_ce4100:
 473                case e1000_82546_rev_3:
 474                case e1000_82541:
 475                case e1000_82541_rev_2:
 476                case e1000_82547:
 477                case e1000_82547_rev_2:
 478                        if (er32(MANC) & E1000_MANC_SMBUS_EN)
 479                                goto out;
 480                        break;
 481                default:
 482                        goto out;
 483                }
 484                e1000_read_phy_reg(hw, PHY_CTRL, &mii_reg);
 485                mii_reg |= MII_CR_POWER_DOWN;
 486                e1000_write_phy_reg(hw, PHY_CTRL, mii_reg);
 487                msleep(1);
 488        }
 489out:
 490        return;
 491}
 492
 493static void e1000_down_and_stop(struct e1000_adapter *adapter)
 494{
 495        set_bit(__E1000_DOWN, &adapter->flags);
 496
 497        cancel_delayed_work_sync(&adapter->watchdog_task);
 498
 499        /*
 500         * Since the watchdog task can reschedule other tasks, we should cancel
 501         * it first, otherwise we can run into the situation when a work is
 502         * still running after the adapter has been turned down.
 503         */
 504
 505        cancel_delayed_work_sync(&adapter->phy_info_task);
 506        cancel_delayed_work_sync(&adapter->fifo_stall_task);
 507
 508        /* Only kill reset task if adapter is not resetting */
 509        if (!test_bit(__E1000_RESETTING, &adapter->flags))
 510                cancel_work_sync(&adapter->reset_task);
 511}
 512
 513void e1000_down(struct e1000_adapter *adapter)
 514{
 515        struct e1000_hw *hw = &adapter->hw;
 516        struct net_device *netdev = adapter->netdev;
 517        u32 rctl, tctl;
 518
 519
 520        /* disable receives in the hardware */
 521        rctl = er32(RCTL);
 522        ew32(RCTL, rctl & ~E1000_RCTL_EN);
 523        /* flush and sleep below */
 524
 525        netif_tx_disable(netdev);
 526
 527        /* disable transmits in the hardware */
 528        tctl = er32(TCTL);
 529        tctl &= ~E1000_TCTL_EN;
 530        ew32(TCTL, tctl);
 531        /* flush both disables and wait for them to finish */
 532        E1000_WRITE_FLUSH();
 533        msleep(10);
 534
 535        napi_disable(&adapter->napi);
 536
 537        e1000_irq_disable(adapter);
 538
 539        /* Setting DOWN must be after irq_disable to prevent
 540         * a screaming interrupt.  Setting DOWN also prevents
 541         * tasks from rescheduling.
 542         */
 543        e1000_down_and_stop(adapter);
 544
 545        adapter->link_speed = 0;
 546        adapter->link_duplex = 0;
 547        netif_carrier_off(netdev);
 548
 549        e1000_reset(adapter);
 550        e1000_clean_all_tx_rings(adapter);
 551        e1000_clean_all_rx_rings(adapter);
 552}
 553
 554void e1000_reinit_locked(struct e1000_adapter *adapter)
 555{
 556        WARN_ON(in_interrupt());
 557        while (test_and_set_bit(__E1000_RESETTING, &adapter->flags))
 558                msleep(1);
 559        e1000_down(adapter);
 560        e1000_up(adapter);
 561        clear_bit(__E1000_RESETTING, &adapter->flags);
 562}
 563
 564void e1000_reset(struct e1000_adapter *adapter)
 565{
 566        struct e1000_hw *hw = &adapter->hw;
 567        u32 pba = 0, tx_space, min_tx_space, min_rx_space;
 568        bool legacy_pba_adjust = false;
 569        u16 hwm;
 570
 571        /* Repartition Pba for greater than 9k mtu
 572         * To take effect CTRL.RST is required.
 573         */
 574
 575        switch (hw->mac_type) {
 576        case e1000_82542_rev2_0:
 577        case e1000_82542_rev2_1:
 578        case e1000_82543:
 579        case e1000_82544:
 580        case e1000_82540:
 581        case e1000_82541:
 582        case e1000_82541_rev_2:
 583                legacy_pba_adjust = true;
 584                pba = E1000_PBA_48K;
 585                break;
 586        case e1000_82545:
 587        case e1000_82545_rev_3:
 588        case e1000_82546:
 589        case e1000_ce4100:
 590        case e1000_82546_rev_3:
 591                pba = E1000_PBA_48K;
 592                break;
 593        case e1000_82547:
 594        case e1000_82547_rev_2:
 595                legacy_pba_adjust = true;
 596                pba = E1000_PBA_30K;
 597                break;
 598        case e1000_undefined:
 599        case e1000_num_macs:
 600                break;
 601        }
 602
 603        if (legacy_pba_adjust) {
 604                if (hw->max_frame_size > E1000_RXBUFFER_8192)
 605                        pba -= 8; /* allocate more FIFO for Tx */
 606
 607                if (hw->mac_type == e1000_82547) {
 608                        adapter->tx_fifo_head = 0;
 609                        adapter->tx_head_addr = pba << E1000_TX_HEAD_ADDR_SHIFT;
 610                        adapter->tx_fifo_size =
 611                                (E1000_PBA_40K - pba) << E1000_PBA_BYTES_SHIFT;
 612                        atomic_set(&adapter->tx_fifo_stall, 0);
 613                }
 614        } else if (hw->max_frame_size >  ETH_FRAME_LEN + ETH_FCS_LEN) {
 615                /* adjust PBA for jumbo frames */
 616                ew32(PBA, pba);
 617
 618                /* To maintain wire speed transmits, the Tx FIFO should be
 619                 * large enough to accommodate two full transmit packets,
 620                 * rounded up to the next 1KB and expressed in KB.  Likewise,
 621                 * the Rx FIFO should be large enough to accommodate at least
 622                 * one full receive packet and is similarly rounded up and
 623                 * expressed in KB.
 624                 */
 625                pba = er32(PBA);
 626                /* upper 16 bits has Tx packet buffer allocation size in KB */
 627                tx_space = pba >> 16;
 628                /* lower 16 bits has Rx packet buffer allocation size in KB */
 629                pba &= 0xffff;
 630                /* the Tx fifo also stores 16 bytes of information about the Tx
 631                 * but don't include ethernet FCS because hardware appends it
 632                 */
 633                min_tx_space = (hw->max_frame_size +
 634                                sizeof(struct e1000_tx_desc) -
 635                                ETH_FCS_LEN) * 2;
 636                min_tx_space = ALIGN(min_tx_space, 1024);
 637                min_tx_space >>= 10;
 638                /* software strips receive CRC, so leave room for it */
 639                min_rx_space = hw->max_frame_size;
 640                min_rx_space = ALIGN(min_rx_space, 1024);
 641                min_rx_space >>= 10;
 642
 643                /* If current Tx allocation is less than the min Tx FIFO size,
 644                 * and the min Tx FIFO size is less than the current Rx FIFO
 645                 * allocation, take space away from current Rx allocation
 646                 */
 647                if (tx_space < min_tx_space &&
 648                    ((min_tx_space - tx_space) < pba)) {
 649                        pba = pba - (min_tx_space - tx_space);
 650
 651                        /* PCI/PCIx hardware has PBA alignment constraints */
 652                        switch (hw->mac_type) {
 653                        case e1000_82545 ... e1000_82546_rev_3:
 654                                pba &= ~(E1000_PBA_8K - 1);
 655                                break;
 656                        default:
 657                                break;
 658                        }
 659
 660                        /* if short on Rx space, Rx wins and must trump Tx
 661                         * adjustment or use Early Receive if available
 662                         */
 663                        if (pba < min_rx_space)
 664                                pba = min_rx_space;
 665                }
 666        }
 667
 668        ew32(PBA, pba);
 669
 670        /* flow control settings:
 671         * The high water mark must be low enough to fit one full frame
 672         * (or the size used for early receive) above it in the Rx FIFO.
 673         * Set it to the lower of:
 674         * - 90% of the Rx FIFO size, and
 675         * - the full Rx FIFO size minus the early receive size (for parts
 676         *   with ERT support assuming ERT set to E1000_ERT_2048), or
 677         * - the full Rx FIFO size minus one full frame
 678         */
 679        hwm = min(((pba << 10) * 9 / 10),
 680                  ((pba << 10) - hw->max_frame_size));
 681
 682        hw->fc_high_water = hwm & 0xFFF8;       /* 8-byte granularity */
 683        hw->fc_low_water = hw->fc_high_water - 8;
 684        hw->fc_pause_time = E1000_FC_PAUSE_TIME;
 685        hw->fc_send_xon = 1;
 686        hw->fc = hw->original_fc;
 687
 688        /* Allow time for pending master requests to run */
 689        e1000_reset_hw(hw);
 690        if (hw->mac_type >= e1000_82544)
 691                ew32(WUC, 0);
 692
 693        if (e1000_init_hw(hw))
 694                e_dev_err("Hardware Error\n");
 695        e1000_update_mng_vlan(adapter);
 696
 697        /* if (adapter->hwflags & HWFLAGS_PHY_PWR_BIT) { */
 698        if (hw->mac_type >= e1000_82544 &&
 699            hw->autoneg == 1 &&
 700            hw->autoneg_advertised == ADVERTISE_1000_FULL) {
 701                u32 ctrl = er32(CTRL);
 702                /* clear phy power management bit if we are in gig only mode,
 703                 * which if enabled will attempt negotiation to 100Mb, which
 704                 * can cause a loss of link at power off or driver unload
 705                 */
 706                ctrl &= ~E1000_CTRL_SWDPIN3;
 707                ew32(CTRL, ctrl);
 708        }
 709
 710        /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */
 711        ew32(VET, ETHERNET_IEEE_VLAN_TYPE);
 712
 713        e1000_reset_adaptive(hw);
 714        e1000_phy_get_info(hw, &adapter->phy_info);
 715
 716        e1000_release_manageability(adapter);
 717}
 718
 719/* Dump the eeprom for users having checksum issues */
 720static void e1000_dump_eeprom(struct e1000_adapter *adapter)
 721{
 722        struct net_device *netdev = adapter->netdev;
 723        struct ethtool_eeprom eeprom;
 724        const struct ethtool_ops *ops = netdev->ethtool_ops;
 725        u8 *data;
 726        int i;
 727        u16 csum_old, csum_new = 0;
 728
 729        eeprom.len = ops->get_eeprom_len(netdev);
 730        eeprom.offset = 0;
 731
 732        data = kmalloc(eeprom.len, GFP_KERNEL);
 733        if (!data)
 734                return;
 735
 736        ops->get_eeprom(netdev, &eeprom, data);
 737
 738        csum_old = (data[EEPROM_CHECKSUM_REG * 2]) +
 739                   (data[EEPROM_CHECKSUM_REG * 2 + 1] << 8);
 740        for (i = 0; i < EEPROM_CHECKSUM_REG * 2; i += 2)
 741                csum_new += data[i] + (data[i + 1] << 8);
 742        csum_new = EEPROM_SUM - csum_new;
 743
 744        pr_err("/*********************/\n");
 745        pr_err("Current EEPROM Checksum : 0x%04x\n", csum_old);
 746        pr_err("Calculated              : 0x%04x\n", csum_new);
 747
 748        pr_err("Offset    Values\n");
 749        pr_err("========  ======\n");
 750        print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 16, 1, data, 128, 0);
 751
 752        pr_err("Include this output when contacting your support provider.\n");
 753        pr_err("This is not a software error! Something bad happened to\n");
 754        pr_err("your hardware or EEPROM image. Ignoring this problem could\n");
 755        pr_err("result in further problems, possibly loss of data,\n");
 756        pr_err("corruption or system hangs!\n");
 757        pr_err("The MAC Address will be reset to 00:00:00:00:00:00,\n");
 758        pr_err("which is invalid and requires you to set the proper MAC\n");
 759        pr_err("address manually before continuing to enable this network\n");
 760        pr_err("device. Please inspect the EEPROM dump and report the\n");
 761        pr_err("issue to your hardware vendor or Intel Customer Support.\n");
 762        pr_err("/*********************/\n");
 763
 764        kfree(data);
 765}
 766
 767/**
 768 * e1000_is_need_ioport - determine if an adapter needs ioport resources or not
 769 * @pdev: PCI device information struct
 770 *
 771 * Return true if an adapter needs ioport resources
 772 **/
 773static int e1000_is_need_ioport(struct pci_dev *pdev)
 774{
 775        switch (pdev->device) {
 776        case E1000_DEV_ID_82540EM:
 777        case E1000_DEV_ID_82540EM_LOM:
 778        case E1000_DEV_ID_82540EP:
 779        case E1000_DEV_ID_82540EP_LOM:
 780        case E1000_DEV_ID_82540EP_LP:
 781        case E1000_DEV_ID_82541EI:
 782        case E1000_DEV_ID_82541EI_MOBILE:
 783        case E1000_DEV_ID_82541ER:
 784        case E1000_DEV_ID_82541ER_LOM:
 785        case E1000_DEV_ID_82541GI:
 786        case E1000_DEV_ID_82541GI_LF:
 787        case E1000_DEV_ID_82541GI_MOBILE:
 788        case E1000_DEV_ID_82544EI_COPPER:
 789        case E1000_DEV_ID_82544EI_FIBER:
 790        case E1000_DEV_ID_82544GC_COPPER:
 791        case E1000_DEV_ID_82544GC_LOM:
 792        case E1000_DEV_ID_82545EM_COPPER:
 793        case E1000_DEV_ID_82545EM_FIBER:
 794        case E1000_DEV_ID_82546EB_COPPER:
 795        case E1000_DEV_ID_82546EB_FIBER:
 796        case E1000_DEV_ID_82546EB_QUAD_COPPER:
 797                return true;
 798        default:
 799                return false;
 800        }
 801}
 802
 803static netdev_features_t e1000_fix_features(struct net_device *netdev,
 804        netdev_features_t features)
 805{
 806        /* Since there is no support for separate Rx/Tx vlan accel
 807         * enable/disable make sure Tx flag is always in same state as Rx.
 808         */
 809        if (features & NETIF_F_HW_VLAN_CTAG_RX)
 810                features |= NETIF_F_HW_VLAN_CTAG_TX;
 811        else
 812                features &= ~NETIF_F_HW_VLAN_CTAG_TX;
 813
 814        return features;
 815}
 816
 817static int e1000_set_features(struct net_device *netdev,
 818        netdev_features_t features)
 819{
 820        struct e1000_adapter *adapter = netdev_priv(netdev);
 821        netdev_features_t changed = features ^ netdev->features;
 822
 823        if (changed & NETIF_F_HW_VLAN_CTAG_RX)
 824                e1000_vlan_mode(netdev, features);
 825
 826        if (!(changed & (NETIF_F_RXCSUM | NETIF_F_RXALL)))
 827                return 0;
 828
 829        netdev->features = features;
 830        adapter->rx_csum = !!(features & NETIF_F_RXCSUM);
 831
 832        if (netif_running(netdev))
 833                e1000_reinit_locked(adapter);
 834        else
 835                e1000_reset(adapter);
 836
 837        return 0;
 838}
 839
 840static const struct net_device_ops e1000_netdev_ops = {
 841        .ndo_open               = e1000_open,
 842        .ndo_stop               = e1000_close,
 843        .ndo_start_xmit         = e1000_xmit_frame,
 844        .ndo_get_stats          = e1000_get_stats,
 845        .ndo_set_rx_mode        = e1000_set_rx_mode,
 846        .ndo_set_mac_address    = e1000_set_mac,
 847        .ndo_tx_timeout         = e1000_tx_timeout,
 848        .ndo_change_mtu         = e1000_change_mtu,
 849        .ndo_do_ioctl           = e1000_ioctl,
 850        .ndo_validate_addr      = eth_validate_addr,
 851        .ndo_vlan_rx_add_vid    = e1000_vlan_rx_add_vid,
 852        .ndo_vlan_rx_kill_vid   = e1000_vlan_rx_kill_vid,
 853#ifdef CONFIG_NET_POLL_CONTROLLER
 854        .ndo_poll_controller    = e1000_netpoll,
 855#endif
 856        .ndo_fix_features       = e1000_fix_features,
 857        .ndo_set_features       = e1000_set_features,
 858};
 859
 860/**
 861 * e1000_init_hw_struct - initialize members of hw struct
 862 * @adapter: board private struct
 863 * @hw: structure used by e1000_hw.c
 864 *
 865 * Factors out initialization of the e1000_hw struct to its own function
 866 * that can be called very early at init (just after struct allocation).
 867 * Fields are initialized based on PCI device information and
 868 * OS network device settings (MTU size).
 869 * Returns negative error codes if MAC type setup fails.
 870 */
 871static int e1000_init_hw_struct(struct e1000_adapter *adapter,
 872                                struct e1000_hw *hw)
 873{
 874        struct pci_dev *pdev = adapter->pdev;
 875
 876        /* PCI config space info */
 877        hw->vendor_id = pdev->vendor;
 878        hw->device_id = pdev->device;
 879        hw->subsystem_vendor_id = pdev->subsystem_vendor;
 880        hw->subsystem_id = pdev->subsystem_device;
 881        hw->revision_id = pdev->revision;
 882
 883        pci_read_config_word(pdev, PCI_COMMAND, &hw->pci_cmd_word);
 884
 885        hw->max_frame_size = adapter->netdev->mtu +
 886                             ENET_HEADER_SIZE + ETHERNET_FCS_SIZE;
 887        hw->min_frame_size = MINIMUM_ETHERNET_FRAME_SIZE;
 888
 889        /* identify the MAC */
 890        if (e1000_set_mac_type(hw)) {
 891                e_err(probe, "Unknown MAC Type\n");
 892                return -EIO;
 893        }
 894
 895        switch (hw->mac_type) {
 896        default:
 897                break;
 898        case e1000_82541:
 899        case e1000_82547:
 900        case e1000_82541_rev_2:
 901        case e1000_82547_rev_2:
 902                hw->phy_init_script = 1;
 903                break;
 904        }
 905
 906        e1000_set_media_type(hw);
 907        e1000_get_bus_info(hw);
 908
 909        hw->wait_autoneg_complete = false;
 910        hw->tbi_compatibility_en = true;
 911        hw->adaptive_ifs = true;
 912
 913        /* Copper options */
 914
 915        if (hw->media_type == e1000_media_type_copper) {
 916                hw->mdix = AUTO_ALL_MODES;
 917                hw->disable_polarity_correction = false;
 918                hw->master_slave = E1000_MASTER_SLAVE;
 919        }
 920
 921        return 0;
 922}
 923
 924/**
 925 * e1000_probe - Device Initialization Routine
 926 * @pdev: PCI device information struct
 927 * @ent: entry in e1000_pci_tbl
 928 *
 929 * Returns 0 on success, negative on failure
 930 *
 931 * e1000_probe initializes an adapter identified by a pci_dev structure.
 932 * The OS initialization, configuring of the adapter private structure,
 933 * and a hardware reset occur.
 934 **/
 935static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 936{
 937        struct net_device *netdev;
 938        struct e1000_adapter *adapter;
 939        struct e1000_hw *hw;
 940
 941        static int cards_found = 0;
 942        static int global_quad_port_a = 0; /* global ksp3 port a indication */
 943        int i, err, pci_using_dac;
 944        u16 eeprom_data = 0;
 945        u16 tmp = 0;
 946        u16 eeprom_apme_mask = E1000_EEPROM_APME;
 947        int bars, need_ioport;
 948
 949        /* do not allocate ioport bars when not needed */
 950        need_ioport = e1000_is_need_ioport(pdev);
 951        if (need_ioport) {
 952                bars = pci_select_bars(pdev, IORESOURCE_MEM | IORESOURCE_IO);
 953                err = pci_enable_device(pdev);
 954        } else {
 955                bars = pci_select_bars(pdev, IORESOURCE_MEM);
 956                err = pci_enable_device_mem(pdev);
 957        }
 958        if (err)
 959                return err;
 960
 961        err = pci_request_selected_regions(pdev, bars, e1000_driver_name);
 962        if (err)
 963                goto err_pci_reg;
 964
 965        pci_set_master(pdev);
 966        err = pci_save_state(pdev);
 967        if (err)
 968                goto err_alloc_etherdev;
 969
 970        err = -ENOMEM;
 971        netdev = alloc_etherdev(sizeof(struct e1000_adapter));
 972        if (!netdev)
 973                goto err_alloc_etherdev;
 974
 975        SET_NETDEV_DEV(netdev, &pdev->dev);
 976
 977        pci_set_drvdata(pdev, netdev);
 978        adapter = netdev_priv(netdev);
 979        adapter->netdev = netdev;
 980        adapter->pdev = pdev;
 981        adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
 982        adapter->bars = bars;
 983        adapter->need_ioport = need_ioport;
 984
 985        hw = &adapter->hw;
 986        hw->back = adapter;
 987
 988        err = -EIO;
 989        hw->hw_addr = pci_ioremap_bar(pdev, BAR_0);
 990        if (!hw->hw_addr)
 991                goto err_ioremap;
 992
 993        if (adapter->need_ioport) {
 994                for (i = BAR_1; i <= BAR_5; i++) {
 995                        if (pci_resource_len(pdev, i) == 0)
 996                                continue;
 997                        if (pci_resource_flags(pdev, i) & IORESOURCE_IO) {
 998                                hw->io_base = pci_resource_start(pdev, i);
 999                                break;
1000                        }
1001                }
1002        }
1003
1004        /* make ready for any if (hw->...) below */
1005        err = e1000_init_hw_struct(adapter, hw);
1006        if (err)
1007                goto err_sw_init;
1008
1009        /* there is a workaround being applied below that limits
1010         * 64-bit DMA addresses to 64-bit hardware.  There are some
1011         * 32-bit adapters that Tx hang when given 64-bit DMA addresses
1012         */
1013        pci_using_dac = 0;
1014        if ((hw->bus_type == e1000_bus_type_pcix) &&
1015            !dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) {
1016                pci_using_dac = 1;
1017        } else {
1018                err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
1019                if (err) {
1020                        pr_err("No usable DMA config, aborting\n");
1021                        goto err_dma;
1022                }
1023        }
1024
1025        netdev->netdev_ops = &e1000_netdev_ops;
1026        e1000_set_ethtool_ops(netdev);
1027        netdev->watchdog_timeo = 5 * HZ;
1028        netif_napi_add(netdev, &adapter->napi, e1000_clean, 64);
1029
1030        strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
1031
1032        adapter->bd_number = cards_found;
1033
1034        /* setup the private structure */
1035
1036        err = e1000_sw_init(adapter);
1037        if (err)
1038                goto err_sw_init;
1039
1040        err = -EIO;
1041        if (hw->mac_type == e1000_ce4100) {
1042                hw->ce4100_gbe_mdio_base_virt =
1043                                        ioremap(pci_resource_start(pdev, BAR_1),
1044                                                pci_resource_len(pdev, BAR_1));
1045
1046                if (!hw->ce4100_gbe_mdio_base_virt)
1047                        goto err_mdio_ioremap;
1048        }
1049
1050        if (hw->mac_type >= e1000_82543) {
1051                netdev->hw_features = NETIF_F_SG |
1052                                   NETIF_F_HW_CSUM |
1053                                   NETIF_F_HW_VLAN_CTAG_RX;
1054                netdev->features = NETIF_F_HW_VLAN_CTAG_TX |
1055                                   NETIF_F_HW_VLAN_CTAG_FILTER;
1056        }
1057
1058        if ((hw->mac_type >= e1000_82544) &&
1059           (hw->mac_type != e1000_82547))
1060                netdev->hw_features |= NETIF_F_TSO;
1061
1062        netdev->priv_flags |= IFF_SUPP_NOFCS;
1063
1064        netdev->features |= netdev->hw_features;
1065        netdev->hw_features |= (NETIF_F_RXCSUM |
1066                                NETIF_F_RXALL |
1067                                NETIF_F_RXFCS);
1068
1069        if (pci_using_dac) {
1070                netdev->features |= NETIF_F_HIGHDMA;
1071                netdev->vlan_features |= NETIF_F_HIGHDMA;
1072        }
1073
1074        netdev->vlan_features |= (NETIF_F_TSO |
1075                                  NETIF_F_HW_CSUM |
1076                                  NETIF_F_SG);
1077
1078        netdev->priv_flags |= IFF_UNICAST_FLT;
1079
1080        adapter->en_mng_pt = e1000_enable_mng_pass_thru(hw);
1081
1082        /* initialize eeprom parameters */
1083        if (e1000_init_eeprom_params(hw)) {
1084                e_err(probe, "EEPROM initialization failed\n");
1085                goto err_eeprom;
1086        }
1087
1088        /* before reading the EEPROM, reset the controller to
1089         * put the device in a known good starting state
1090         */
1091
1092        e1000_reset_hw(hw);
1093
1094        /* make sure the EEPROM is good */
1095        if (e1000_validate_eeprom_checksum(hw) < 0) {
1096                e_err(probe, "The EEPROM Checksum Is Not Valid\n");
1097                e1000_dump_eeprom(adapter);
1098                /* set MAC address to all zeroes to invalidate and temporary
1099                 * disable this device for the user. This blocks regular
1100                 * traffic while still permitting ethtool ioctls from reaching
1101                 * the hardware as well as allowing the user to run the
1102                 * interface after manually setting a hw addr using
1103                 * `ip set address`
1104                 */
1105                memset(hw->mac_addr, 0, netdev->addr_len);
1106        } else {
1107                /* copy the MAC address out of the EEPROM */
1108                if (e1000_read_mac_addr(hw))
1109                        e_err(probe, "EEPROM Read Error\n");
1110        }
1111        /* don't block initalization here due to bad MAC address */
1112        memcpy(netdev->dev_addr, hw->mac_addr, netdev->addr_len);
1113
1114        if (!is_valid_ether_addr(netdev->dev_addr))
1115                e_err(probe, "Invalid MAC Address\n");
1116
1117
1118        INIT_DELAYED_WORK(&adapter->watchdog_task, e1000_watchdog);
1119        INIT_DELAYED_WORK(&adapter->fifo_stall_task,
1120                          e1000_82547_tx_fifo_stall_task);
1121        INIT_DELAYED_WORK(&adapter->phy_info_task, e1000_update_phy_info_task);
1122        INIT_WORK(&adapter->reset_task, e1000_reset_task);
1123
1124        e1000_check_options(adapter);
1125
1126        /* Initial Wake on LAN setting
1127         * If APM wake is enabled in the EEPROM,
1128         * enable the ACPI Magic Packet filter
1129         */
1130
1131        switch (hw->mac_type) {
1132        case e1000_82542_rev2_0:
1133        case e1000_82542_rev2_1:
1134        case e1000_82543:
1135                break;
1136        case e1000_82544:
1137                e1000_read_eeprom(hw,
1138                        EEPROM_INIT_CONTROL2_REG, 1, &eeprom_data);
1139                eeprom_apme_mask = E1000_EEPROM_82544_APM;
1140                break;
1141        case e1000_82546:
1142        case e1000_82546_rev_3:
1143                if (er32(STATUS) & E1000_STATUS_FUNC_1){
1144                        e1000_read_eeprom(hw,
1145                                EEPROM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
1146                        break;
1147                }
1148                /* Fall Through */
1149        default:
1150                e1000_read_eeprom(hw,
1151                        EEPROM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
1152                break;
1153        }
1154        if (eeprom_data & eeprom_apme_mask)
1155                adapter->eeprom_wol |= E1000_WUFC_MAG;
1156
1157        /* now that we have the eeprom settings, apply the special cases
1158         * where the eeprom may be wrong or the board simply won't support
1159         * wake on lan on a particular port
1160         */
1161        switch (pdev->device) {
1162        case E1000_DEV_ID_82546GB_PCIE:
1163                adapter->eeprom_wol = 0;
1164                break;
1165        case E1000_DEV_ID_82546EB_FIBER:
1166        case E1000_DEV_ID_82546GB_FIBER:
1167                /* Wake events only supported on port A for dual fiber
1168                 * regardless of eeprom setting
1169                 */
1170                if (er32(STATUS) & E1000_STATUS_FUNC_1)
1171                        adapter->eeprom_wol = 0;
1172                break;
1173        case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3:
1174                /* if quad port adapter, disable WoL on all but port A */
1175                if (global_quad_port_a != 0)
1176                        adapter->eeprom_wol = 0;
1177                else
1178                        adapter->quad_port_a = true;
1179                /* Reset for multiple quad port adapters */
1180                if (++global_quad_port_a == 4)
1181                        global_quad_port_a = 0;
1182                break;
1183        }
1184
1185        /* initialize the wol settings based on the eeprom settings */
1186        adapter->wol = adapter->eeprom_wol;
1187        device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
1188
1189        /* Auto detect PHY address */
1190        if (hw->mac_type == e1000_ce4100) {
1191                for (i = 0; i < 32; i++) {
1192                        hw->phy_addr = i;
1193                        e1000_read_phy_reg(hw, PHY_ID2, &tmp);
1194                        if (tmp == 0 || tmp == 0xFF) {
1195                                if (i == 31)
1196                                        goto err_eeprom;
1197                                continue;
1198                        } else
1199                                break;
1200                }
1201        }
1202
1203        /* reset the hardware with the new settings */
1204        e1000_reset(adapter);
1205
1206        strcpy(netdev->name, "eth%d");
1207        err = register_netdev(netdev);
1208        if (err)
1209                goto err_register;
1210
1211        e1000_vlan_filter_on_off(adapter, false);
1212
1213        /* print bus type/speed/width info */
1214        e_info(probe, "(PCI%s:%dMHz:%d-bit) %pM\n",
1215               ((hw->bus_type == e1000_bus_type_pcix) ? "-X" : ""),
1216               ((hw->bus_speed == e1000_bus_speed_133) ? 133 :
1217                (hw->bus_speed == e1000_bus_speed_120) ? 120 :
1218                (hw->bus_speed == e1000_bus_speed_100) ? 100 :
1219                (hw->bus_speed == e1000_bus_speed_66) ? 66 : 33),
1220               ((hw->bus_width == e1000_bus_width_64) ? 64 : 32),
1221               netdev->dev_addr);
1222
1223        /* carrier off reporting is important to ethtool even BEFORE open */
1224        netif_carrier_off(netdev);
1225
1226        e_info(probe, "Intel(R) PRO/1000 Network Connection\n");
1227
1228        cards_found++;
1229        return 0;
1230
1231err_register:
1232err_eeprom:
1233        e1000_phy_hw_reset(hw);
1234
1235        if (hw->flash_address)
1236                iounmap(hw->flash_address);
1237        kfree(adapter->tx_ring);
1238        kfree(adapter->rx_ring);
1239err_dma:
1240err_sw_init:
1241err_mdio_ioremap:
1242        iounmap(hw->ce4100_gbe_mdio_base_virt);
1243        iounmap(hw->hw_addr);
1244err_ioremap:
1245        free_netdev(netdev);
1246err_alloc_etherdev:
1247        pci_release_selected_regions(pdev, bars);
1248err_pci_reg:
1249        pci_disable_device(pdev);
1250        return err;
1251}
1252
1253/**
1254 * e1000_remove - Device Removal Routine
1255 * @pdev: PCI device information struct
1256 *
1257 * e1000_remove is called by the PCI subsystem to alert the driver
1258 * that it should release a PCI device.  The could be caused by a
1259 * Hot-Plug event, or because the driver is going to be removed from
1260 * memory.
1261 **/
1262static void e1000_remove(struct pci_dev *pdev)
1263{
1264        struct net_device *netdev = pci_get_drvdata(pdev);
1265        struct e1000_adapter *adapter = netdev_priv(netdev);
1266        struct e1000_hw *hw = &adapter->hw;
1267
1268        e1000_down_and_stop(adapter);
1269        e1000_release_manageability(adapter);
1270
1271        unregister_netdev(netdev);
1272
1273        e1000_phy_hw_reset(hw);
1274
1275        kfree(adapter->tx_ring);
1276        kfree(adapter->rx_ring);
1277
1278        if (hw->mac_type == e1000_ce4100)
1279                iounmap(hw->ce4100_gbe_mdio_base_virt);
1280        iounmap(hw->hw_addr);
1281        if (hw->flash_address)
1282                iounmap(hw->flash_address);
1283        pci_release_selected_regions(pdev, adapter->bars);
1284
1285        free_netdev(netdev);
1286
1287        pci_disable_device(pdev);
1288}
1289
1290/**
1291 * e1000_sw_init - Initialize general software structures (struct e1000_adapter)
1292 * @adapter: board private structure to initialize
1293 *
1294 * e1000_sw_init initializes the Adapter private data structure.
1295 * e1000_init_hw_struct MUST be called before this function
1296 **/
1297static int e1000_sw_init(struct e1000_adapter *adapter)
1298{
1299        adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
1300
1301        adapter->num_tx_queues = 1;
1302        adapter->num_rx_queues = 1;
1303
1304        if (e1000_alloc_queues(adapter)) {
1305                e_err(probe, "Unable to allocate memory for queues\n");
1306                return -ENOMEM;
1307        }
1308
1309        /* Explicitly disable IRQ since the NIC can be in any state. */
1310        e1000_irq_disable(adapter);
1311
1312        spin_lock_init(&adapter->stats_lock);
1313
1314        set_bit(__E1000_DOWN, &adapter->flags);
1315
1316        return 0;
1317}
1318
1319/**
1320 * e1000_alloc_queues - Allocate memory for all rings
1321 * @adapter: board private structure to initialize
1322 *
1323 * We allocate one ring per queue at run-time since we don't know the
1324 * number of queues at compile-time.
1325 **/
1326static int e1000_alloc_queues(struct e1000_adapter *adapter)
1327{
1328        adapter->tx_ring = kcalloc(adapter->num_tx_queues,
1329                                   sizeof(struct e1000_tx_ring), GFP_KERNEL);
1330        if (!adapter->tx_ring)
1331                return -ENOMEM;
1332
1333        adapter->rx_ring = kcalloc(adapter->num_rx_queues,
1334                                   sizeof(struct e1000_rx_ring), GFP_KERNEL);
1335        if (!adapter->rx_ring) {
1336                kfree(adapter->tx_ring);
1337                return -ENOMEM;
1338        }
1339
1340        return E1000_SUCCESS;
1341}
1342
1343/**
1344 * e1000_open - Called when a network interface is made active
1345 * @netdev: network interface device structure
1346 *
1347 * Returns 0 on success, negative value on failure
1348 *
1349 * The open entry point is called when a network interface is made
1350 * active by the system (IFF_UP).  At this point all resources needed
1351 * for transmit and receive operations are allocated, the interrupt
1352 * handler is registered with the OS, the watchdog task is started,
1353 * and the stack is notified that the interface is ready.
1354 **/
1355static int e1000_open(struct net_device *netdev)
1356{
1357        struct e1000_adapter *adapter = netdev_priv(netdev);
1358        struct e1000_hw *hw = &adapter->hw;
1359        int err;
1360
1361        /* disallow open during test */
1362        if (test_bit(__E1000_TESTING, &adapter->flags))
1363                return -EBUSY;
1364
1365        netif_carrier_off(netdev);
1366
1367        /* allocate transmit descriptors */
1368        err = e1000_setup_all_tx_resources(adapter);
1369        if (err)
1370                goto err_setup_tx;
1371
1372        /* allocate receive descriptors */
1373        err = e1000_setup_all_rx_resources(adapter);
1374        if (err)
1375                goto err_setup_rx;
1376
1377        e1000_power_up_phy(adapter);
1378
1379        adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
1380        if ((hw->mng_cookie.status &
1381                          E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT)) {
1382                e1000_update_mng_vlan(adapter);
1383        }
1384
1385        /* before we allocate an interrupt, we must be ready to handle it.
1386         * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt
1387         * as soon as we call pci_request_irq, so we have to setup our
1388         * clean_rx handler before we do so.
1389         */
1390        e1000_configure(adapter);
1391
1392        err = e1000_request_irq(adapter);
1393        if (err)
1394                goto err_req_irq;
1395
1396        /* From here on the code is the same as e1000_up() */
1397        clear_bit(__E1000_DOWN, &adapter->flags);
1398
1399        napi_enable(&adapter->napi);
1400
1401        e1000_irq_enable(adapter);
1402
1403        netif_start_queue(netdev);
1404
1405        /* fire a link status change interrupt to start the watchdog */
1406        ew32(ICS, E1000_ICS_LSC);
1407
1408        return E1000_SUCCESS;
1409
1410err_req_irq:
1411        e1000_power_down_phy(adapter);
1412        e1000_free_all_rx_resources(adapter);
1413err_setup_rx:
1414        e1000_free_all_tx_resources(adapter);
1415err_setup_tx:
1416        e1000_reset(adapter);
1417
1418        return err;
1419}
1420
1421/**
1422 * e1000_close - Disables a network interface
1423 * @netdev: network interface device structure
1424 *
1425 * Returns 0, this is not allowed to fail
1426 *
1427 * The close entry point is called when an interface is de-activated
1428 * by the OS.  The hardware is still under the drivers control, but
1429 * needs to be disabled.  A global MAC reset is issued to stop the
1430 * hardware, and all transmit and receive resources are freed.
1431 **/
1432static int e1000_close(struct net_device *netdev)
1433{
1434        struct e1000_adapter *adapter = netdev_priv(netdev);
1435        struct e1000_hw *hw = &adapter->hw;
1436        int count = E1000_CHECK_RESET_COUNT;
1437
1438        while (test_bit(__E1000_RESETTING, &adapter->flags) && count--)
1439                usleep_range(10000, 20000);
1440
1441        WARN_ON(test_bit(__E1000_RESETTING, &adapter->flags));
1442        e1000_down(adapter);
1443        e1000_power_down_phy(adapter);
1444        e1000_free_irq(adapter);
1445
1446        e1000_free_all_tx_resources(adapter);
1447        e1000_free_all_rx_resources(adapter);
1448
1449        /* kill manageability vlan ID if supported, but not if a vlan with
1450         * the same ID is registered on the host OS (let 8021q kill it)
1451         */
1452        if ((hw->mng_cookie.status &
1453             E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) &&
1454            !test_bit(adapter->mng_vlan_id, adapter->active_vlans)) {
1455                e1000_vlan_rx_kill_vid(netdev, htons(ETH_P_8021Q),
1456                                       adapter->mng_vlan_id);
1457        }
1458
1459        return 0;
1460}
1461
1462/**
1463 * e1000_check_64k_bound - check that memory doesn't cross 64kB boundary
1464 * @adapter: address of board private structure
1465 * @start: address of beginning of memory
1466 * @len: length of memory
1467 **/
1468static bool e1000_check_64k_bound(struct e1000_adapter *adapter, void *start,
1469                                  unsigned long len)
1470{
1471        struct e1000_hw *hw = &adapter->hw;
1472        unsigned long begin = (unsigned long)start;
1473        unsigned long end = begin + len;
1474
1475        /* First rev 82545 and 82546 need to not allow any memory
1476         * write location to cross 64k boundary due to errata 23
1477         */
1478        if (hw->mac_type == e1000_82545 ||
1479            hw->mac_type == e1000_ce4100 ||
1480            hw->mac_type == e1000_82546) {
1481                return ((begin ^ (end - 1)) >> 16) != 0 ? false : true;
1482        }
1483
1484        return true;
1485}
1486
1487/**
1488 * e1000_setup_tx_resources - allocate Tx resources (Descriptors)
1489 * @adapter: board private structure
1490 * @txdr:    tx descriptor ring (for a specific queue) to setup
1491 *
1492 * Return 0 on success, negative on failure
1493 **/
1494static int e1000_setup_tx_resources(struct e1000_adapter *adapter,
1495                                    struct e1000_tx_ring *txdr)
1496{
1497        struct pci_dev *pdev = adapter->pdev;
1498        int size;
1499
1500        size = sizeof(struct e1000_buffer) * txdr->count;
1501        txdr->buffer_info = vzalloc(size);
1502        if (!txdr->buffer_info)
1503                return -ENOMEM;
1504
1505        /* round up to nearest 4K */
1506
1507        txdr->size = txdr->count * sizeof(struct e1000_tx_desc);
1508        txdr->size = ALIGN(txdr->size, 4096);
1509
1510        txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size, &txdr->dma,
1511                                        GFP_KERNEL);
1512        if (!txdr->desc) {
1513setup_tx_desc_die:
1514                vfree(txdr->buffer_info);
1515                return -ENOMEM;
1516        }
1517
1518        /* Fix for errata 23, can't cross 64kB boundary */
1519        if (!e1000_check_64k_bound(adapter, txdr->desc, txdr->size)) {
1520                void *olddesc = txdr->desc;
1521                dma_addr_t olddma = txdr->dma;
1522                e_err(tx_err, "txdr align check failed: %u bytes at %p\n",
1523                      txdr->size, txdr->desc);
1524                /* Try again, without freeing the previous */
1525                txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size,
1526                                                &txdr->dma, GFP_KERNEL);
1527                /* Failed allocation, critical failure */
1528                if (!txdr->desc) {
1529                        dma_free_coherent(&pdev->dev, txdr->size, olddesc,
1530                                          olddma);
1531                        goto setup_tx_desc_die;
1532                }
1533
1534                if (!e1000_check_64k_bound(adapter, txdr->desc, txdr->size)) {
1535                        /* give up */
1536                        dma_free_coherent(&pdev->dev, txdr->size, txdr->desc,
1537                                          txdr->dma);
1538                        dma_free_coherent(&pdev->dev, txdr->size, olddesc,
1539                                          olddma);
1540                        e_err(probe, "Unable to allocate aligned memory "
1541                              "for the transmit descriptor ring\n");
1542                        vfree(txdr->buffer_info);
1543                        return -ENOMEM;
1544                } else {
1545                        /* Free old allocation, new allocation was successful */
1546                        dma_free_coherent(&pdev->dev, txdr->size, olddesc,
1547                                          olddma);
1548                }
1549        }
1550        memset(txdr->desc, 0, txdr->size);
1551
1552        txdr->next_to_use = 0;
1553        txdr->next_to_clean = 0;
1554
1555        return 0;
1556}
1557
1558/**
1559 * e1000_setup_all_tx_resources - wrapper to allocate Tx resources
1560 *                                (Descriptors) for all queues
1561 * @adapter: board private structure
1562 *
1563 * Return 0 on success, negative on failure
1564 **/
1565int e1000_setup_all_tx_resources(struct e1000_adapter *adapter)
1566{
1567        int i, err = 0;
1568
1569        for (i = 0; i < adapter->num_tx_queues; i++) {
1570                err = e1000_setup_tx_resources(adapter, &adapter->tx_ring[i]);
1571                if (err) {
1572                        e_err(probe, "Allocation for Tx Queue %u failed\n", i);
1573                        for (i-- ; i >= 0; i--)
1574                                e1000_free_tx_resources(adapter,
1575                                                        &adapter->tx_ring[i]);
1576                        break;
1577                }
1578        }
1579
1580        return err;
1581}
1582
1583/**
1584 * e1000_configure_tx - Configure 8254x Transmit Unit after Reset
1585 * @adapter: board private structure
1586 *
1587 * Configure the Tx unit of the MAC after a reset.
1588 **/
1589static void e1000_configure_tx(struct e1000_adapter *adapter)
1590{
1591        u64 tdba;
1592        struct e1000_hw *hw = &adapter->hw;
1593        u32 tdlen, tctl, tipg;
1594        u32 ipgr1, ipgr2;
1595
1596        /* Setup the HW Tx Head and Tail descriptor pointers */
1597
1598        switch (adapter->num_tx_queues) {
1599        case 1:
1600        default:
1601                tdba = adapter->tx_ring[0].dma;
1602                tdlen = adapter->tx_ring[0].count *
1603                        sizeof(struct e1000_tx_desc);
1604                ew32(TDLEN, tdlen);
1605                ew32(TDBAH, (tdba >> 32));
1606                ew32(TDBAL, (tdba & 0x00000000ffffffffULL));
1607                ew32(TDT, 0);
1608                ew32(TDH, 0);
1609                adapter->tx_ring[0].tdh = ((hw->mac_type >= e1000_82543) ?
1610                                           E1000_TDH : E1000_82542_TDH);
1611                adapter->tx_ring[0].tdt = ((hw->mac_type >= e1000_82543) ?
1612                                           E1000_TDT : E1000_82542_TDT);
1613                break;
1614        }
1615
1616        /* Set the default values for the Tx Inter Packet Gap timer */
1617        if ((hw->media_type == e1000_media_type_fiber ||
1618             hw->media_type == e1000_media_type_internal_serdes))
1619                tipg = DEFAULT_82543_TIPG_IPGT_FIBER;
1620        else
1621                tipg = DEFAULT_82543_TIPG_IPGT_COPPER;
1622
1623        switch (hw->mac_type) {
1624        case e1000_82542_rev2_0:
1625        case e1000_82542_rev2_1:
1626                tipg = DEFAULT_82542_TIPG_IPGT;
1627                ipgr1 = DEFAULT_82542_TIPG_IPGR1;
1628                ipgr2 = DEFAULT_82542_TIPG_IPGR2;
1629                break;
1630        default:
1631                ipgr1 = DEFAULT_82543_TIPG_IPGR1;
1632                ipgr2 = DEFAULT_82543_TIPG_IPGR2;
1633                break;
1634        }
1635        tipg |= ipgr1 << E1000_TIPG_IPGR1_SHIFT;
1636        tipg |= ipgr2 << E1000_TIPG_IPGR2_SHIFT;
1637        ew32(TIPG, tipg);
1638
1639        /* Set the Tx Interrupt Delay register */
1640
1641        ew32(TIDV, adapter->tx_int_delay);
1642        if (hw->mac_type >= e1000_82540)
1643                ew32(TADV, adapter->tx_abs_int_delay);
1644
1645        /* Program the Transmit Control Register */
1646
1647        tctl = er32(TCTL);
1648        tctl &= ~E1000_TCTL_CT;
1649        tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC |
1650                (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
1651
1652        e1000_config_collision_dist(hw);
1653
1654        /* Setup Transmit Descriptor Settings for eop descriptor */
1655        adapter->txd_cmd = E1000_TXD_CMD_EOP | E1000_TXD_CMD_IFCS;
1656
1657        /* only set IDE if we are delaying interrupts using the timers */
1658        if (adapter->tx_int_delay)
1659                adapter->txd_cmd |= E1000_TXD_CMD_IDE;
1660
1661        if (hw->mac_type < e1000_82543)
1662                adapter->txd_cmd |= E1000_TXD_CMD_RPS;
1663        else
1664                adapter->txd_cmd |= E1000_TXD_CMD_RS;
1665
1666        /* Cache if we're 82544 running in PCI-X because we'll
1667         * need this to apply a workaround later in the send path.
1668         */
1669        if (hw->mac_type == e1000_82544 &&
1670            hw->bus_type == e1000_bus_type_pcix)
1671                adapter->pcix_82544 = true;
1672
1673        ew32(TCTL, tctl);
1674
1675}
1676
1677/**
1678 * e1000_setup_rx_resources - allocate Rx resources (Descriptors)
1679 * @adapter: board private structure
1680 * @rxdr:    rx descriptor ring (for a specific queue) to setup
1681 *
1682 * Returns 0 on success, negative on failure
1683 **/
1684static int e1000_setup_rx_resources(struct e1000_adapter *adapter,
1685                                    struct e1000_rx_ring *rxdr)
1686{
1687        struct pci_dev *pdev = adapter->pdev;
1688        int size, desc_len;
1689
1690        size = sizeof(struct e1000_buffer) * rxdr->count;
1691        rxdr->buffer_info = vzalloc(size);
1692        if (!rxdr->buffer_info)
1693                return -ENOMEM;
1694
1695        desc_len = sizeof(struct e1000_rx_desc);
1696
1697        /* Round up to nearest 4K */
1698
1699        rxdr->size = rxdr->count * desc_len;
1700        rxdr->size = ALIGN(rxdr->size, 4096);
1701
1702        rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma,
1703                                        GFP_KERNEL);
1704        if (!rxdr->desc) {
1705setup_rx_desc_die:
1706                vfree(rxdr->buffer_info);
1707                return -ENOMEM;
1708        }
1709
1710        /* Fix for errata 23, can't cross 64kB boundary */
1711        if (!e1000_check_64k_bound(adapter, rxdr->desc, rxdr->size)) {
1712                void *olddesc = rxdr->desc;
1713                dma_addr_t olddma = rxdr->dma;
1714                e_err(rx_err, "rxdr align check failed: %u bytes at %p\n",
1715                      rxdr->size, rxdr->desc);
1716                /* Try again, without freeing the previous */
1717                rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size,
1718                                                &rxdr->dma, GFP_KERNEL);
1719                /* Failed allocation, critical failure */
1720                if (!rxdr->desc) {
1721                        dma_free_coherent(&pdev->dev, rxdr->size, olddesc,
1722                                          olddma);
1723                        goto setup_rx_desc_die;
1724                }
1725
1726                if (!e1000_check_64k_bound(adapter, rxdr->desc, rxdr->size)) {
1727                        /* give up */
1728                        dma_free_coherent(&pdev->dev, rxdr->size, rxdr->desc,
1729                                          rxdr->dma);
1730                        dma_free_coherent(&pdev->dev, rxdr->size, olddesc,
1731                                          olddma);
1732                        e_err(probe, "Unable to allocate aligned memory for "
1733                              "the Rx descriptor ring\n");
1734                        goto setup_rx_desc_die;
1735                } else {
1736                        /* Free old allocation, new allocation was successful */
1737                        dma_free_coherent(&pdev->dev, rxdr->size, olddesc,
1738                                          olddma);
1739                }
1740        }
1741        memset(rxdr->desc, 0, rxdr->size);
1742
1743        rxdr->next_to_clean = 0;
1744        rxdr->next_to_use = 0;
1745        rxdr->rx_skb_top = NULL;
1746
1747        return 0;
1748}
1749
1750/**
1751 * e1000_setup_all_rx_resources - wrapper to allocate Rx resources
1752 *                                (Descriptors) for all queues
1753 * @adapter: board private structure
1754 *
1755 * Return 0 on success, negative on failure
1756 **/
1757int e1000_setup_all_rx_resources(struct e1000_adapter *adapter)
1758{
1759        int i, err = 0;
1760
1761        for (i = 0; i < adapter->num_rx_queues; i++) {
1762                err = e1000_setup_rx_resources(adapter, &adapter->rx_ring[i]);
1763                if (err) {
1764                        e_err(probe, "Allocation for Rx Queue %u failed\n", i);
1765                        for (i-- ; i >= 0; i--)
1766                                e1000_free_rx_resources(adapter,
1767                                                        &adapter->rx_ring[i]);
1768                        break;
1769                }
1770        }
1771
1772        return err;
1773}
1774
1775/**
1776 * e1000_setup_rctl - configure the receive control registers
1777 * @adapter: Board private structure
1778 **/
1779static void e1000_setup_rctl(struct e1000_adapter *adapter)
1780{
1781        struct e1000_hw *hw = &adapter->hw;
1782        u32 rctl;
1783
1784        rctl = er32(RCTL);
1785
1786        rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
1787
1788        rctl |= E1000_RCTL_BAM | E1000_RCTL_LBM_NO |
1789                E1000_RCTL_RDMTS_HALF |
1790                (hw->mc_filter_type << E1000_RCTL_MO_SHIFT);
1791
1792        if (hw->tbi_compatibility_on == 1)
1793                rctl |= E1000_RCTL_SBP;
1794        else
1795                rctl &= ~E1000_RCTL_SBP;
1796
1797        if (adapter->netdev->mtu <= ETH_DATA_LEN)
1798                rctl &= ~E1000_RCTL_LPE;
1799        else
1800                rctl |= E1000_RCTL_LPE;
1801
1802        /* Setup buffer sizes */
1803        rctl &= ~E1000_RCTL_SZ_4096;
1804        rctl |= E1000_RCTL_BSEX;
1805        switch (adapter->rx_buffer_len) {
1806                case E1000_RXBUFFER_2048:
1807                default:
1808                        rctl |= E1000_RCTL_SZ_2048;
1809                        rctl &= ~E1000_RCTL_BSEX;
1810                        break;
1811                case E1000_RXBUFFER_4096:
1812                        rctl |= E1000_RCTL_SZ_4096;
1813                        break;
1814                case E1000_RXBUFFER_8192:
1815                        rctl |= E1000_RCTL_SZ_8192;
1816                        break;
1817                case E1000_RXBUFFER_16384:
1818                        rctl |= E1000_RCTL_SZ_16384;
1819                        break;
1820        }
1821
1822        /* This is useful for sniffing bad packets. */
1823        if (adapter->netdev->features & NETIF_F_RXALL) {
1824                /* UPE and MPE will be handled by normal PROMISC logic
1825                 * in e1000e_set_rx_mode
1826                 */
1827                rctl |= (E1000_RCTL_SBP | /* Receive bad packets */
1828                         E1000_RCTL_BAM | /* RX All Bcast Pkts */
1829                         E1000_RCTL_PMCF); /* RX All MAC Ctrl Pkts */
1830
1831                rctl &= ~(E1000_RCTL_VFE | /* Disable VLAN filter */
1832                          E1000_RCTL_DPF | /* Allow filtered pause */
1833                          E1000_RCTL_CFIEN); /* Dis VLAN CFIEN Filter */
1834                /* Do not mess with E1000_CTRL_VME, it affects transmit as well,
1835                 * and that breaks VLANs.
1836                 */
1837        }
1838
1839        ew32(RCTL, rctl);
1840}
1841
1842/**
1843 * e1000_configure_rx - Configure 8254x Receive Unit after Reset
1844 * @adapter: board private structure
1845 *
1846 * Configure the Rx unit of the MAC after a reset.
1847 **/
1848static void e1000_configure_rx(struct e1000_adapter *adapter)
1849{
1850        u64 rdba;
1851        struct e1000_hw *hw = &adapter->hw;
1852        u32 rdlen, rctl, rxcsum;
1853
1854        if (adapter->netdev->mtu > ETH_DATA_LEN) {
1855                rdlen = adapter->rx_ring[0].count *
1856                        sizeof(struct e1000_rx_desc);
1857                adapter->clean_rx = e1000_clean_jumbo_rx_irq;
1858                adapter->alloc_rx_buf = e1000_alloc_jumbo_rx_buffers;
1859        } else {
1860                rdlen = adapter->rx_ring[0].count *
1861                        sizeof(struct e1000_rx_desc);
1862                adapter->clean_rx = e1000_clean_rx_irq;
1863                adapter->alloc_rx_buf = e1000_alloc_rx_buffers;
1864        }
1865
1866        /* disable receives while setting up the descriptors */
1867        rctl = er32(RCTL);
1868        ew32(RCTL, rctl & ~E1000_RCTL_EN);
1869
1870        /* set the Receive Delay Timer Register */
1871        ew32(RDTR, adapter->rx_int_delay);
1872
1873        if (hw->mac_type >= e1000_82540) {
1874                ew32(RADV, adapter->rx_abs_int_delay);
1875                if (adapter->itr_setting != 0)
1876                        ew32(ITR, 1000000000 / (adapter->itr * 256));
1877        }
1878
1879        /* Setup the HW Rx Head and Tail Descriptor Pointers and
1880         * the Base and Length of the Rx Descriptor Ring
1881         */
1882        switch (adapter->num_rx_queues) {
1883        case 1:
1884        default:
1885                rdba = adapter->rx_ring[0].dma;
1886                ew32(RDLEN, rdlen);
1887                ew32(RDBAH, (rdba >> 32));
1888                ew32(RDBAL, (rdba & 0x00000000ffffffffULL));
1889                ew32(RDT, 0);
1890                ew32(RDH, 0);
1891                adapter->rx_ring[0].rdh = ((hw->mac_type >= e1000_82543) ?
1892                                           E1000_RDH : E1000_82542_RDH);
1893                adapter->rx_ring[0].rdt = ((hw->mac_type >= e1000_82543) ?
1894                                           E1000_RDT : E1000_82542_RDT);
1895                break;
1896        }
1897
1898        /* Enable 82543 Receive Checksum Offload for TCP and UDP */
1899        if (hw->mac_type >= e1000_82543) {
1900                rxcsum = er32(RXCSUM);
1901                if (adapter->rx_csum)
1902                        rxcsum |= E1000_RXCSUM_TUOFL;
1903                else
1904                        /* don't need to clear IPPCSE as it defaults to 0 */
1905                        rxcsum &= ~E1000_RXCSUM_TUOFL;
1906                ew32(RXCSUM, rxcsum);
1907        }
1908
1909        /* Enable Receives */
1910        ew32(RCTL, rctl | E1000_RCTL_EN);
1911}
1912
1913/**
1914 * e1000_free_tx_resources - Free Tx Resources per Queue
1915 * @adapter: board private structure
1916 * @tx_ring: Tx descriptor ring for a specific queue
1917 *
1918 * Free all transmit software resources
1919 **/
1920static void e1000_free_tx_resources(struct e1000_adapter *adapter,
1921                                    struct e1000_tx_ring *tx_ring)
1922{
1923        struct pci_dev *pdev = adapter->pdev;
1924
1925        e1000_clean_tx_ring(adapter, tx_ring);
1926
1927        vfree(tx_ring->buffer_info);
1928        tx_ring->buffer_info = NULL;
1929
1930        dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc,
1931                          tx_ring->dma);
1932
1933        tx_ring->desc = NULL;
1934}
1935
1936/**
1937 * e1000_free_all_tx_resources - Free Tx Resources for All Queues
1938 * @adapter: board private structure
1939 *
1940 * Free all transmit software resources
1941 **/
1942void e1000_free_all_tx_resources(struct e1000_adapter *adapter)
1943{
1944        int i;
1945
1946        for (i = 0; i < adapter->num_tx_queues; i++)
1947                e1000_free_tx_resources(adapter, &adapter->tx_ring[i]);
1948}
1949
1950static void e1000_unmap_and_free_tx_resource(struct e1000_adapter *adapter,
1951                                             struct e1000_buffer *buffer_info)
1952{
1953        if (buffer_info->dma) {
1954                if (buffer_info->mapped_as_page)
1955                        dma_unmap_page(&adapter->pdev->dev, buffer_info->dma,
1956                                       buffer_info->length, DMA_TO_DEVICE);
1957                else
1958                        dma_unmap_single(&adapter->pdev->dev, buffer_info->dma,
1959                                         buffer_info->length,
1960                                         DMA_TO_DEVICE);
1961                buffer_info->dma = 0;
1962        }
1963        if (buffer_info->skb) {
1964                dev_kfree_skb_any(buffer_info->skb);
1965                buffer_info->skb = NULL;
1966        }
1967        buffer_info->time_stamp = 0;
1968        /* buffer_info must be completely set up in the transmit path */
1969}
1970
1971/**
1972 * e1000_clean_tx_ring - Free Tx Buffers
1973 * @adapter: board private structure
1974 * @tx_ring: ring to be cleaned
1975 **/
1976static void e1000_clean_tx_ring(struct e1000_adapter *adapter,
1977                                struct e1000_tx_ring *tx_ring)
1978{
1979        struct e1000_hw *hw = &adapter->hw;
1980        struct e1000_buffer *buffer_info;
1981        unsigned long size;
1982        unsigned int i;
1983
1984        /* Free all the Tx ring sk_buffs */
1985
1986        for (i = 0; i < tx_ring->count; i++) {
1987                buffer_info = &tx_ring->buffer_info[i];
1988                e1000_unmap_and_free_tx_resource(adapter, buffer_info);
1989        }
1990
1991        netdev_reset_queue(adapter->netdev);
1992        size = sizeof(struct e1000_buffer) * tx_ring->count;
1993        memset(tx_ring->buffer_info, 0, size);
1994
1995        /* Zero out the descriptor ring */
1996
1997        memset(tx_ring->desc, 0, tx_ring->size);
1998
1999        tx_ring->next_to_use = 0;
2000        tx_ring->next_to_clean = 0;
2001        tx_ring->last_tx_tso = false;
2002
2003        writel(0, hw->hw_addr + tx_ring->tdh);
2004        writel(0, hw->hw_addr + tx_ring->tdt);
2005}
2006
2007/**
2008 * e1000_clean_all_tx_rings - Free Tx Buffers for all queues
2009 * @adapter: board private structure
2010 **/
2011static void e1000_clean_all_tx_rings(struct e1000_adapter *adapter)
2012{
2013        int i;
2014
2015        for (i = 0; i < adapter->num_tx_queues; i++)
2016                e1000_clean_tx_ring(adapter, &adapter->tx_ring[i]);
2017}
2018
2019/**
2020 * e1000_free_rx_resources - Free Rx Resources
2021 * @adapter: board private structure
2022 * @rx_ring: ring to clean the resources from
2023 *
2024 * Free all receive software resources
2025 **/
2026static void e1000_free_rx_resources(struct e1000_adapter *adapter,
2027                                    struct e1000_rx_ring *rx_ring)
2028{
2029        struct pci_dev *pdev = adapter->pdev;
2030
2031        e1000_clean_rx_ring(adapter, rx_ring);
2032
2033        vfree(rx_ring->buffer_info);
2034        rx_ring->buffer_info = NULL;
2035
2036        dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc,
2037                          rx_ring->dma);
2038
2039        rx_ring->desc = NULL;
2040}
2041
2042/**
2043 * e1000_free_all_rx_resources - Free Rx Resources for All Queues
2044 * @adapter: board private structure
2045 *
2046 * Free all receive software resources
2047 **/
2048void e1000_free_all_rx_resources(struct e1000_adapter *adapter)
2049{
2050        int i;
2051
2052        for (i = 0; i < adapter->num_rx_queues; i++)
2053                e1000_free_rx_resources(adapter, &adapter->rx_ring[i]);
2054}
2055
2056/**
2057 * e1000_clean_rx_ring - Free Rx Buffers per Queue
2058 * @adapter: board private structure
2059 * @rx_ring: ring to free buffers from
2060 **/
2061static void e1000_clean_rx_ring(struct e1000_adapter *adapter,
2062                                struct e1000_rx_ring *rx_ring)
2063{
2064        struct e1000_hw *hw = &adapter->hw;
2065        struct e1000_buffer *buffer_info;
2066        struct pci_dev *pdev = adapter->pdev;
2067        unsigned long size;
2068        unsigned int i;
2069
2070        /* Free all the Rx ring sk_buffs */
2071        for (i = 0; i < rx_ring->count; i++) {
2072                buffer_info = &rx_ring->buffer_info[i];
2073                if (buffer_info->dma &&
2074                    adapter->clean_rx == e1000_clean_rx_irq) {
2075                        dma_unmap_single(&pdev->dev, buffer_info->dma,
2076                                         buffer_info->length,
2077                                         DMA_FROM_DEVICE);
2078                } else if (buffer_info->dma &&
2079                           adapter->clean_rx == e1000_clean_jumbo_rx_irq) {
2080                        dma_unmap_page(&pdev->dev, buffer_info->dma,
2081                                       buffer_info->length,
2082                                       DMA_FROM_DEVICE);
2083                }
2084
2085                buffer_info->dma = 0;
2086                if (buffer_info->page) {
2087                        put_page(buffer_info->page);
2088                        buffer_info->page = NULL;
2089                }
2090                if (buffer_info->skb) {
2091                        dev_kfree_skb(buffer_info->skb);
2092                        buffer_info->skb = NULL;
2093                }
2094        }
2095
2096        /* there also may be some cached data from a chained receive */
2097        if (rx_ring->rx_skb_top) {
2098                dev_kfree_skb(rx_ring->rx_skb_top);
2099                rx_ring->rx_skb_top = NULL;
2100        }
2101
2102        size = sizeof(struct e1000_buffer) * rx_ring->count;
2103        memset(rx_ring->buffer_info, 0, size);
2104
2105        /* Zero out the descriptor ring */
2106        memset(rx_ring->desc, 0, rx_ring->size);
2107
2108        rx_ring->next_to_clean = 0;
2109        rx_ring->next_to_use = 0;
2110
2111        writel(0, hw->hw_addr + rx_ring->rdh);
2112        writel(0, hw->hw_addr + rx_ring->rdt);
2113}
2114
2115/**
2116 * e1000_clean_all_rx_rings - Free Rx Buffers for all queues
2117 * @adapter: board private structure
2118 **/
2119static void e1000_clean_all_rx_rings(struct e1000_adapter *adapter)
2120{
2121        int i;
2122
2123        for (i = 0; i < adapter->num_rx_queues; i++)
2124                e1000_clean_rx_ring(adapter, &adapter->rx_ring[i]);
2125}
2126
2127/* The 82542 2.0 (revision 2) needs to have the receive unit in reset
2128 * and memory write and invalidate disabled for certain operations
2129 */
2130static void e1000_enter_82542_rst(struct e1000_adapter *adapter)
2131{
2132        struct e1000_hw *hw = &adapter->hw;
2133        struct net_device *netdev = adapter->netdev;
2134        u32 rctl;
2135
2136        e1000_pci_clear_mwi(hw);
2137
2138        rctl = er32(RCTL);
2139        rctl |= E1000_RCTL_RST;
2140        ew32(RCTL, rctl);
2141        E1000_WRITE_FLUSH();
2142        mdelay(5);
2143
2144        if (netif_running(netdev))
2145                e1000_clean_all_rx_rings(adapter);
2146}
2147
2148static void e1000_leave_82542_rst(struct e1000_adapter *adapter)
2149{
2150        struct e1000_hw *hw = &adapter->hw;
2151        struct net_device *netdev = adapter->netdev;
2152        u32 rctl;
2153
2154        rctl = er32(RCTL);
2155        rctl &= ~E1000_RCTL_RST;
2156        ew32(RCTL, rctl);
2157        E1000_WRITE_FLUSH();
2158        mdelay(5);
2159
2160        if (hw->pci_cmd_word & PCI_COMMAND_INVALIDATE)
2161                e1000_pci_set_mwi(hw);
2162
2163        if (netif_running(netdev)) {
2164                /* No need to loop, because 82542 supports only 1 queue */
2165                struct e1000_rx_ring *ring = &adapter->rx_ring[0];
2166                e1000_configure_rx(adapter);
2167                adapter->alloc_rx_buf(adapter, ring, E1000_DESC_UNUSED(ring));
2168        }
2169}
2170
2171/**
2172 * e1000_set_mac - Change the Ethernet Address of the NIC
2173 * @netdev: network interface device structure
2174 * @p: pointer to an address structure
2175 *
2176 * Returns 0 on success, negative on failure
2177 **/
2178static int e1000_set_mac(struct net_device *netdev, void *p)
2179{
2180        struct e1000_adapter *adapter = netdev_priv(netdev);
2181        struct e1000_hw *hw = &adapter->hw;
2182        struct sockaddr *addr = p;
2183
2184        if (!is_valid_ether_addr(addr->sa_data))
2185                return -EADDRNOTAVAIL;
2186
2187        /* 82542 2.0 needs to be in reset to write receive address registers */
2188
2189        if (hw->mac_type == e1000_82542_rev2_0)
2190                e1000_enter_82542_rst(adapter);
2191
2192        memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
2193        memcpy(hw->mac_addr, addr->sa_data, netdev->addr_len);
2194
2195        e1000_rar_set(hw, hw->mac_addr, 0);
2196
2197        if (hw->mac_type == e1000_82542_rev2_0)
2198                e1000_leave_82542_rst(adapter);
2199
2200        return 0;
2201}
2202
2203/**
2204 * e1000_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set
2205 * @netdev: network interface device structure
2206 *
2207 * The set_rx_mode entry point is called whenever the unicast or multicast
2208 * address lists or the network interface flags are updated. This routine is
2209 * responsible for configuring the hardware for proper unicast, multicast,
2210 * promiscuous mode, and all-multi behavior.
2211 **/
2212static void e1000_set_rx_mode(struct net_device *netdev)
2213{
2214        struct e1000_adapter *adapter = netdev_priv(netdev);
2215        struct e1000_hw *hw = &adapter->hw;
2216        struct netdev_hw_addr *ha;
2217        bool use_uc = false;
2218        u32 rctl;
2219        u32 hash_value;
2220        int i, rar_entries = E1000_RAR_ENTRIES;
2221        int mta_reg_count = E1000_NUM_MTA_REGISTERS;
2222        u32 *mcarray = kcalloc(mta_reg_count, sizeof(u32), GFP_ATOMIC);
2223
2224        if (!mcarray)
2225                return;
2226
2227        /* Check for Promiscuous and All Multicast modes */
2228
2229        rctl = er32(RCTL);
2230
2231        if (netdev->flags & IFF_PROMISC) {
2232                rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
2233                rctl &= ~E1000_RCTL_VFE;
2234        } else {
2235                if (netdev->flags & IFF_ALLMULTI)
2236                        rctl |= E1000_RCTL_MPE;
2237                else
2238                        rctl &= ~E1000_RCTL_MPE;
2239                /* Enable VLAN filter if there is a VLAN */
2240                if (e1000_vlan_used(adapter))
2241                        rctl |= E1000_RCTL_VFE;
2242        }
2243
2244        if (netdev_uc_count(netdev) > rar_entries - 1) {
2245                rctl |= E1000_RCTL_UPE;
2246        } else if (!(netdev->flags & IFF_PROMISC)) {
2247                rctl &= ~E1000_RCTL_UPE;
2248                use_uc = true;
2249        }
2250
2251        ew32(RCTL, rctl);
2252
2253        /* 82542 2.0 needs to be in reset to write receive address registers */
2254
2255        if (hw->mac_type == e1000_82542_rev2_0)
2256                e1000_enter_82542_rst(adapter);
2257
2258        /* load the first 14 addresses into the exact filters 1-14. Unicast
2259         * addresses take precedence to avoid disabling unicast filtering
2260         * when possible.
2261         *
2262         * RAR 0 is used for the station MAC address
2263         * if there are not 14 addresses, go ahead and clear the filters
2264         */
2265        i = 1;
2266        if (use_uc)
2267                netdev_for_each_uc_addr(ha, netdev) {
2268                        if (i == rar_entries)
2269                                break;
2270                        e1000_rar_set(hw, ha->addr, i++);
2271                }
2272
2273        netdev_for_each_mc_addr(ha, netdev) {
2274                if (i == rar_entries) {
2275                        /* load any remaining addresses into the hash table */
2276                        u32 hash_reg, hash_bit, mta;
2277                        hash_value = e1000_hash_mc_addr(hw, ha->addr);
2278                        hash_reg = (hash_value >> 5) & 0x7F;
2279                        hash_bit = hash_value & 0x1F;
2280                        mta = (1 << hash_bit);
2281                        mcarray[hash_reg] |= mta;
2282                } else {
2283                        e1000_rar_set(hw, ha->addr, i++);
2284                }
2285        }
2286
2287        for (; i < rar_entries; i++) {
2288                E1000_WRITE_REG_ARRAY(hw, RA, i << 1, 0);
2289                E1000_WRITE_FLUSH();
2290                E1000_WRITE_REG_ARRAY(hw, RA, (i << 1) + 1, 0);
2291                E1000_WRITE_FLUSH();
2292        }
2293
2294        /* write the hash table completely, write from bottom to avoid
2295         * both stupid write combining chipsets, and flushing each write
2296         */
2297        for (i = mta_reg_count - 1; i >= 0 ; i--) {
2298                /* If we are on an 82544 has an errata where writing odd
2299                 * offsets overwrites the previous even offset, but writing
2300                 * backwards over the range solves the issue by always
2301                 * writing the odd offset first
2302                 */
2303                E1000_WRITE_REG_ARRAY(hw, MTA, i, mcarray[i]);
2304        }
2305        E1000_WRITE_FLUSH();
2306
2307        if (hw->mac_type == e1000_82542_rev2_0)
2308                e1000_leave_82542_rst(adapter);
2309
2310        kfree(mcarray);
2311}
2312
2313/**
2314 * e1000_update_phy_info_task - get phy info
2315 * @work: work struct contained inside adapter struct
2316 *
2317 * Need to wait a few seconds after link up to get diagnostic information from
2318 * the phy
2319 */
2320static void e1000_update_phy_info_task(struct work_struct *work)
2321{
2322        struct e1000_adapter *adapter = container_of(work,
2323                                                     struct e1000_adapter,
2324                                                     phy_info_task.work);
2325
2326        e1000_phy_get_info(&adapter->hw, &adapter->phy_info);
2327}
2328
2329/**
2330 * e1000_82547_tx_fifo_stall_task - task to complete work
2331 * @work: work struct contained inside adapter struct
2332 **/
2333static void e1000_82547_tx_fifo_stall_task(struct work_struct *work)
2334{
2335        struct e1000_adapter *adapter = container_of(work,
2336                                                     struct e1000_adapter,
2337                                                     fifo_stall_task.work);
2338        struct e1000_hw *hw = &adapter->hw;
2339        struct net_device *netdev = adapter->netdev;
2340        u32 tctl;
2341
2342        if (atomic_read(&adapter->tx_fifo_stall)) {
2343                if ((er32(TDT) == er32(TDH)) &&
2344                   (er32(TDFT) == er32(TDFH)) &&
2345                   (er32(TDFTS) == er32(TDFHS))) {
2346                        tctl = er32(TCTL);
2347                        ew32(TCTL, tctl & ~E1000_TCTL_EN);
2348                        ew32(TDFT, adapter->tx_head_addr);
2349                        ew32(TDFH, adapter->tx_head_addr);
2350                        ew32(TDFTS, adapter->tx_head_addr);
2351                        ew32(TDFHS, adapter->tx_head_addr);
2352                        ew32(TCTL, tctl);
2353                        E1000_WRITE_FLUSH();
2354
2355                        adapter->tx_fifo_head = 0;
2356                        atomic_set(&adapter->tx_fifo_stall, 0);
2357                        netif_wake_queue(netdev);
2358                } else if (!test_bit(__E1000_DOWN, &adapter->flags)) {
2359                        schedule_delayed_work(&adapter->fifo_stall_task, 1);
2360                }
2361        }
2362}
2363
2364bool e1000_has_link(struct e1000_adapter *adapter)
2365{
2366        struct e1000_hw *hw = &adapter->hw;
2367        bool link_active = false;
2368
2369        /* get_link_status is set on LSC (link status) interrupt or rx
2370         * sequence error interrupt (except on intel ce4100).
2371         * get_link_status will stay false until the
2372         * e1000_check_for_link establishes link for copper adapters
2373         * ONLY
2374         */
2375        switch (hw->media_type) {
2376        case e1000_media_type_copper:
2377                if (hw->mac_type == e1000_ce4100)
2378                        hw->get_link_status = 1;
2379                if (hw->get_link_status) {
2380                        e1000_check_for_link(hw);
2381                        link_active = !hw->get_link_status;
2382                } else {
2383                        link_active = true;
2384                }
2385                break;
2386        case e1000_media_type_fiber:
2387                e1000_check_for_link(hw);
2388                link_active = !!(er32(STATUS) & E1000_STATUS_LU);
2389                break;
2390        case e1000_media_type_internal_serdes:
2391                e1000_check_for_link(hw);
2392                link_active = hw->serdes_has_link;
2393                break;
2394        default:
2395                break;
2396        }
2397
2398        return link_active;
2399}
2400
2401/**
2402 * e1000_watchdog - work function
2403 * @work: work struct contained inside adapter struct
2404 **/
2405static void e1000_watchdog(struct work_struct *work)
2406{
2407        struct e1000_adapter *adapter = container_of(work,
2408                                                     struct e1000_adapter,
2409                                                     watchdog_task.work);
2410        struct e1000_hw *hw = &adapter->hw;
2411        struct net_device *netdev = adapter->netdev;
2412        struct e1000_tx_ring *txdr = adapter->tx_ring;
2413        u32 link, tctl;
2414
2415        link = e1000_has_link(adapter);
2416        if ((netif_carrier_ok(netdev)) && link)
2417                goto link_up;
2418
2419        if (link) {
2420                if (!netif_carrier_ok(netdev)) {
2421                        u32 ctrl;
2422                        bool txb2b = true;
2423                        /* update snapshot of PHY registers on LSC */
2424                        e1000_get_speed_and_duplex(hw,
2425                                                   &adapter->link_speed,
2426                                                   &adapter->link_duplex);
2427
2428                        ctrl = er32(CTRL);
2429                        pr_info("%s NIC Link is Up %d Mbps %s, "
2430                                "Flow Control: %s\n",
2431                                netdev->name,
2432                                adapter->link_speed,
2433                                adapter->link_duplex == FULL_DUPLEX ?
2434                                "Full Duplex" : "Half Duplex",
2435                                ((ctrl & E1000_CTRL_TFCE) && (ctrl &
2436                                E1000_CTRL_RFCE)) ? "RX/TX" : ((ctrl &
2437                                E1000_CTRL_RFCE) ? "RX" : ((ctrl &
2438                                E1000_CTRL_TFCE) ? "TX" : "None")));
2439
2440                        /* adjust timeout factor according to speed/duplex */
2441                        adapter->tx_timeout_factor = 1;
2442                        switch (adapter->link_speed) {
2443                        case SPEED_10:
2444                                txb2b = false;
2445                                adapter->tx_timeout_factor = 16;
2446                                break;
2447                        case SPEED_100:
2448                                txb2b = false;
2449                                /* maybe add some timeout factor ? */
2450                                break;
2451                        }
2452
2453                        /* enable transmits in the hardware */
2454                        tctl = er32(TCTL);
2455                        tctl |= E1000_TCTL_EN;
2456                        ew32(TCTL, tctl);
2457
2458                        netif_carrier_on(netdev);
2459                        if (!test_bit(__E1000_DOWN, &adapter->flags))
2460                                schedule_delayed_work(&adapter->phy_info_task,
2461                                                      2 * HZ);
2462                        adapter->smartspeed = 0;
2463                }
2464        } else {
2465                if (netif_carrier_ok(netdev)) {
2466                        adapter->link_speed = 0;
2467                        adapter->link_duplex = 0;
2468                        pr_info("%s NIC Link is Down\n",
2469                                netdev->name);
2470                        netif_carrier_off(netdev);
2471
2472                        if (!test_bit(__E1000_DOWN, &adapter->flags))
2473                                schedule_delayed_work(&adapter->phy_info_task,
2474                                                      2 * HZ);
2475                }
2476
2477                e1000_smartspeed(adapter);
2478        }
2479
2480link_up:
2481        e1000_update_stats(adapter);
2482
2483        hw->tx_packet_delta = adapter->stats.tpt - adapter->tpt_old;
2484        adapter->tpt_old = adapter->stats.tpt;
2485        hw->collision_delta = adapter->stats.colc - adapter->colc_old;
2486        adapter->colc_old = adapter->stats.colc;
2487
2488        adapter->gorcl = adapter->stats.gorcl - adapter->gorcl_old;
2489        adapter->gorcl_old = adapter->stats.gorcl;
2490        adapter->gotcl = adapter->stats.gotcl - adapter->gotcl_old;
2491        adapter->gotcl_old = adapter->stats.gotcl;
2492
2493        e1000_update_adaptive(hw);
2494
2495        if (!netif_carrier_ok(netdev)) {
2496                if (E1000_DESC_UNUSED(txdr) + 1 < txdr->count) {
2497                        /* We've lost link, so the controller stops DMA,
2498                         * but we've got queued Tx work that's never going
2499                         * to get done, so reset controller to flush Tx.
2500                         * (Do the reset outside of interrupt context).
2501                         */
2502                        adapter->tx_timeout_count++;
2503                        schedule_work(&adapter->reset_task);
2504                        /* exit immediately since reset is imminent */
2505                        return;
2506                }
2507        }
2508
2509        /* Simple mode for Interrupt Throttle Rate (ITR) */
2510        if (hw->mac_type >= e1000_82540 && adapter->itr_setting == 4) {
2511                /* Symmetric Tx/Rx gets a reduced ITR=2000;
2512                 * Total asymmetrical Tx or Rx gets ITR=8000;
2513                 * everyone else is between 2000-8000.
2514                 */
2515                u32 goc = (adapter->gotcl + adapter->gorcl) / 10000;
2516                u32 dif = (adapter->gotcl > adapter->gorcl ?
2517                            adapter->gotcl - adapter->gorcl :
2518                            adapter->gorcl - adapter->gotcl) / 10000;
2519                u32 itr = goc > 0 ? (dif * 6000 / goc + 2000) : 8000;
2520
2521                ew32(ITR, 1000000000 / (itr * 256));
2522        }
2523
2524        /* Cause software interrupt to ensure rx ring is cleaned */
2525        ew32(ICS, E1000_ICS_RXDMT0);
2526
2527        /* Force detection of hung controller every watchdog period */
2528        adapter->detect_tx_hung = true;
2529
2530        /* Reschedule the task */
2531        if (!test_bit(__E1000_DOWN, &adapter->flags))
2532                schedule_delayed_work(&adapter->watchdog_task, 2 * HZ);
2533}
2534
2535enum latency_range {
2536        lowest_latency = 0,
2537        low_latency = 1,
2538        bulk_latency = 2,
2539        latency_invalid = 255
2540};
2541
2542/**
2543 * e1000_update_itr - update the dynamic ITR value based on statistics
2544 * @adapter: pointer to adapter
2545 * @itr_setting: current adapter->itr
2546 * @packets: the number of packets during this measurement interval
2547 * @bytes: the number of bytes during this measurement interval
2548 *
2549 *      Stores a new ITR value based on packets and byte
2550 *      counts during the last interrupt.  The advantage of per interrupt
2551 *      computation is faster updates and more accurate ITR for the current
2552 *      traffic pattern.  Constants in this function were computed
2553 *      based on theoretical maximum wire speed and thresholds were set based
2554 *      on testing data as well as attempting to minimize response time
2555 *      while increasing bulk throughput.
2556 *      this functionality is controlled by the InterruptThrottleRate module
2557 *      parameter (see e1000_param.c)
2558 **/
2559static unsigned int e1000_update_itr(struct e1000_adapter *adapter,
2560                                     u16 itr_setting, int packets, int bytes)
2561{
2562        unsigned int retval = itr_setting;
2563        struct e1000_hw *hw = &adapter->hw;
2564
2565        if (unlikely(hw->mac_type < e1000_82540))
2566                goto update_itr_done;
2567
2568        if (packets == 0)
2569                goto update_itr_done;
2570
2571        switch (itr_setting) {
2572        case lowest_latency:
2573                /* jumbo frames get bulk treatment*/
2574                if (bytes/packets > 8000)
2575                        retval = bulk_latency;
2576                else if ((packets < 5) && (bytes > 512))
2577                        retval = low_latency;
2578                break;
2579        case low_latency:  /* 50 usec aka 20000 ints/s */
2580                if (bytes > 10000) {
2581                        /* jumbo frames need bulk latency setting */
2582                        if (bytes/packets > 8000)
2583                                retval = bulk_latency;
2584                        else if ((packets < 10) || ((bytes/packets) > 1200))
2585                                retval = bulk_latency;
2586                        else if ((packets > 35))
2587                                retval = lowest_latency;
2588                } else if (bytes/packets > 2000)
2589                        retval = bulk_latency;
2590                else if (packets <= 2 && bytes < 512)
2591                        retval = lowest_latency;
2592                break;
2593        case bulk_latency: /* 250 usec aka 4000 ints/s */
2594                if (bytes > 25000) {
2595                        if (packets > 35)
2596                                retval = low_latency;
2597                } else if (bytes < 6000) {
2598                        retval = low_latency;
2599                }
2600                break;
2601        }
2602
2603update_itr_done:
2604        return retval;
2605}
2606
2607static void e1000_set_itr(struct e1000_adapter *adapter)
2608{
2609        struct e1000_hw *hw = &adapter->hw;
2610        u16 current_itr;
2611        u32 new_itr = adapter->itr;
2612
2613        if (unlikely(hw->mac_type < e1000_82540))
2614                return;
2615
2616        /* for non-gigabit speeds, just fix the interrupt rate at 4000 */
2617        if (unlikely(adapter->link_speed != SPEED_1000)) {
2618                current_itr = 0;
2619                new_itr = 4000;
2620                goto set_itr_now;
2621        }
2622
2623        adapter->tx_itr = e1000_update_itr(adapter, adapter->tx_itr,
2624                                           adapter->total_tx_packets,
2625                                           adapter->total_tx_bytes);
2626        /* conservative mode (itr 3) eliminates the lowest_latency setting */
2627        if (adapter->itr_setting == 3 && adapter->tx_itr == lowest_latency)
2628                adapter->tx_itr = low_latency;
2629
2630        adapter->rx_itr = e1000_update_itr(adapter, adapter->rx_itr,
2631                                           adapter->total_rx_packets,
2632                                           adapter->total_rx_bytes);
2633        /* conservative mode (itr 3) eliminates the lowest_latency setting */
2634        if (adapter->itr_setting == 3 && adapter->rx_itr == lowest_latency)
2635                adapter->rx_itr = low_latency;
2636
2637        current_itr = max(adapter->rx_itr, adapter->tx_itr);
2638
2639        switch (current_itr) {
2640        /* counts and packets in update_itr are dependent on these numbers */
2641        case lowest_latency:
2642                new_itr = 70000;
2643                break;
2644        case low_latency:
2645                new_itr = 20000; /* aka hwitr = ~200 */
2646                break;
2647        case bulk_latency:
2648                new_itr = 4000;
2649                break;
2650        default:
2651                break;
2652        }
2653
2654set_itr_now:
2655        if (new_itr != adapter->itr) {
2656                /* this attempts to bias the interrupt rate towards Bulk
2657                 * by adding intermediate steps when interrupt rate is
2658                 * increasing
2659                 */
2660                new_itr = new_itr > adapter->itr ?
2661                          min(adapter->itr + (new_itr >> 2), new_itr) :
2662                          new_itr;
2663                adapter->itr = new_itr;
2664                ew32(ITR, 1000000000 / (new_itr * 256));
2665        }
2666}
2667
2668#define E1000_TX_FLAGS_CSUM             0x00000001
2669#define E1000_TX_FLAGS_VLAN             0x00000002
2670#define E1000_TX_FLAGS_TSO              0x00000004
2671#define E1000_TX_FLAGS_IPV4             0x00000008
2672#define E1000_TX_FLAGS_NO_FCS           0x00000010
2673#define E1000_TX_FLAGS_VLAN_MASK        0xffff0000
2674#define E1000_TX_FLAGS_VLAN_SHIFT       16
2675
2676static int e1000_tso(struct e1000_adapter *adapter,
2677                     struct e1000_tx_ring *tx_ring, struct sk_buff *skb)
2678{
2679        struct e1000_context_desc *context_desc;
2680        struct e1000_buffer *buffer_info;
2681        unsigned int i;
2682        u32 cmd_length = 0;
2683        u16 ipcse = 0, tucse, mss;
2684        u8 ipcss, ipcso, tucss, tucso, hdr_len;
2685        int err;
2686
2687        if (skb_is_gso(skb)) {
2688                if (skb_header_cloned(skb)) {
2689                        err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2690                        if (err)
2691                                return err;
2692                }
2693
2694                hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
2695                mss = skb_shinfo(skb)->gso_size;
2696                if (skb->protocol == htons(ETH_P_IP)) {
2697                        struct iphdr *iph = ip_hdr(skb);
2698                        iph->tot_len = 0;
2699                        iph->check = 0;
2700                        tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
2701                                                                 iph->daddr, 0,
2702                                                                 IPPROTO_TCP,
2703                                                                 0);
2704                        cmd_length = E1000_TXD_CMD_IP;
2705                        ipcse = skb_transport_offset(skb) - 1;
2706                } else if (skb->protocol == htons(ETH_P_IPV6)) {
2707                        ipv6_hdr(skb)->payload_len = 0;
2708                        tcp_hdr(skb)->check =
2709                                ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
2710                                                 &ipv6_hdr(skb)->daddr,
2711                                                 0, IPPROTO_TCP, 0);
2712                        ipcse = 0;
2713                }
2714                ipcss = skb_network_offset(skb);
2715                ipcso = (void *)&(ip_hdr(skb)->check) - (void *)skb->data;
2716                tucss = skb_transport_offset(skb);
2717                tucso = (void *)&(tcp_hdr(skb)->check) - (void *)skb->data;
2718                tucse = 0;
2719
2720                cmd_length |= (E1000_TXD_CMD_DEXT | E1000_TXD_CMD_TSE |
2721                               E1000_TXD_CMD_TCP | (skb->len - (hdr_len)));
2722
2723                i = tx_ring->next_to_use;
2724                context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
2725                buffer_info = &tx_ring->buffer_info[i];
2726
2727                context_desc->lower_setup.ip_fields.ipcss  = ipcss;
2728                context_desc->lower_setup.ip_fields.ipcso  = ipcso;
2729                context_desc->lower_setup.ip_fields.ipcse  = cpu_to_le16(ipcse);
2730                context_desc->upper_setup.tcp_fields.tucss = tucss;
2731                context_desc->upper_setup.tcp_fields.tucso = tucso;
2732                context_desc->upper_setup.tcp_fields.tucse = cpu_to_le16(tucse);
2733                context_desc->tcp_seg_setup.fields.mss     = cpu_to_le16(mss);
2734                context_desc->tcp_seg_setup.fields.hdr_len = hdr_len;
2735                context_desc->cmd_and_length = cpu_to_le32(cmd_length);
2736
2737                buffer_info->time_stamp = jiffies;
2738                buffer_info->next_to_watch = i;
2739
2740                if (++i == tx_ring->count) i = 0;
2741                tx_ring->next_to_use = i;
2742
2743                return true;
2744        }
2745        return false;
2746}
2747
2748static bool e1000_tx_csum(struct e1000_adapter *adapter,
2749                          struct e1000_tx_ring *tx_ring, struct sk_buff *skb)
2750{
2751        struct e1000_context_desc *context_desc;
2752        struct e1000_buffer *buffer_info;
2753        unsigned int i;
2754        u8 css;
2755        u32 cmd_len = E1000_TXD_CMD_DEXT;
2756
2757        if (skb->ip_summed != CHECKSUM_PARTIAL)
2758                return false;
2759
2760        switch (skb->protocol) {
2761        case cpu_to_be16(ETH_P_IP):
2762                if (ip_hdr(skb)->protocol == IPPROTO_TCP)
2763                        cmd_len |= E1000_TXD_CMD_TCP;
2764                break;
2765        case cpu_to_be16(ETH_P_IPV6):
2766                /* XXX not handling all IPV6 headers */
2767                if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
2768                        cmd_len |= E1000_TXD_CMD_TCP;
2769                break;
2770        default:
2771                if (unlikely(net_ratelimit()))
2772                        e_warn(drv, "checksum_partial proto=%x!\n",
2773                               skb->protocol);
2774                break;
2775        }
2776
2777        css = skb_checksum_start_offset(skb);
2778
2779        i = tx_ring->next_to_use;
2780        buffer_info = &tx_ring->buffer_info[i];
2781        context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
2782
2783        context_desc->lower_setup.ip_config = 0;
2784        context_desc->upper_setup.tcp_fields.tucss = css;
2785        context_desc->upper_setup.tcp_fields.tucso =
2786                css + skb->csum_offset;
2787        context_desc->upper_setup.tcp_fields.tucse = 0;
2788        context_desc->tcp_seg_setup.data = 0;
2789        context_desc->cmd_and_length = cpu_to_le32(cmd_len);
2790
2791        buffer_info->time_stamp = jiffies;
2792        buffer_info->next_to_watch = i;
2793
2794        if (unlikely(++i == tx_ring->count)) i = 0;
2795        tx_ring->next_to_use = i;
2796
2797        return true;
2798}
2799
2800#define E1000_MAX_TXD_PWR       12
2801#define E1000_MAX_DATA_PER_TXD  (1<<E1000_MAX_TXD_PWR)
2802
2803static int e1000_tx_map(struct e1000_adapter *adapter,
2804                        struct e1000_tx_ring *tx_ring,
2805                        struct sk_buff *skb, unsigned int first,
2806                        unsigned int max_per_txd, unsigned int nr_frags,
2807                        unsigned int mss)
2808{
2809        struct e1000_hw *hw = &adapter->hw;
2810        struct pci_dev *pdev = adapter->pdev;
2811        struct e1000_buffer *buffer_info;
2812        unsigned int len = skb_headlen(skb);
2813        unsigned int offset = 0, size, count = 0, i;
2814        unsigned int f, bytecount, segs;
2815
2816        i = tx_ring->next_to_use;
2817
2818        while (len) {
2819                buffer_info = &tx_ring->buffer_info[i];
2820                size = min(len, max_per_txd);
2821                /* Workaround for Controller erratum --
2822                 * descriptor for non-tso packet in a linear SKB that follows a
2823                 * tso gets written back prematurely before the data is fully
2824                 * DMA'd to the controller
2825                 */
2826                if (!skb->data_len && tx_ring->last_tx_tso &&
2827                    !skb_is_gso(skb)) {
2828                        tx_ring->last_tx_tso = false;
2829                        size -= 4;
2830                }
2831
2832                /* Workaround for premature desc write-backs
2833                 * in TSO mode.  Append 4-byte sentinel desc
2834                 */
2835                if (unlikely(mss && !nr_frags && size == len && size > 8))
2836                        size -= 4;
2837                /* work-around for errata 10 and it applies
2838                 * to all controllers in PCI-X mode
2839                 * The fix is to make sure that the first descriptor of a
2840                 * packet is smaller than 2048 - 16 - 16 (or 2016) bytes
2841                 */
2842                if (unlikely((hw->bus_type == e1000_bus_type_pcix) &&
2843                                (size > 2015) && count == 0))
2844                        size = 2015;
2845
2846                /* Workaround for potential 82544 hang in PCI-X.  Avoid
2847                 * terminating buffers within evenly-aligned dwords.
2848                 */
2849                if (unlikely(adapter->pcix_82544 &&
2850                   !((unsigned long)(skb->data + offset + size - 1) & 4) &&
2851                   size > 4))
2852                        size -= 4;
2853
2854                buffer_info->length = size;
2855                /* set time_stamp *before* dma to help avoid a possible race */
2856                buffer_info->time_stamp = jiffies;
2857                buffer_info->mapped_as_page = false;
2858                buffer_info->dma = dma_map_single(&pdev->dev,
2859                                                  skb->data + offset,
2860                                                  size, DMA_TO_DEVICE);
2861                if (dma_mapping_error(&pdev->dev, buffer_info->dma))
2862                        goto dma_error;
2863                buffer_info->next_to_watch = i;
2864
2865                len -= size;
2866                offset += size;
2867                count++;
2868                if (len) {
2869                        i++;
2870                        if (unlikely(i == tx_ring->count))
2871                                i = 0;
2872                }
2873        }
2874
2875        for (f = 0; f < nr_frags; f++) {
2876                const struct skb_frag_struct *frag;
2877
2878                frag = &skb_shinfo(skb)->frags[f];
2879                len = skb_frag_size(frag);
2880                offset = 0;
2881
2882                while (len) {
2883                        unsigned long bufend;
2884                        i++;
2885                        if (unlikely(i == tx_ring->count))
2886                                i = 0;
2887
2888                        buffer_info = &tx_ring->buffer_info[i];
2889                        size = min(len, max_per_txd);
2890                        /* Workaround for premature desc write-backs
2891                         * in TSO mode.  Append 4-byte sentinel desc
2892                         */
2893                        if (unlikely(mss && f == (nr_frags-1) &&
2894                            size == len && size > 8))
2895                                size -= 4;
2896                        /* Workaround for potential 82544 hang in PCI-X.
2897                         * Avoid terminating buffers within evenly-aligned
2898                         * dwords.
2899                         */
2900                        bufend = (unsigned long)
2901                                page_to_phys(skb_frag_page(frag));
2902                        bufend += offset + size - 1;
2903                        if (unlikely(adapter->pcix_82544 &&
2904                                     !(bufend & 4) &&
2905                                     size > 4))
2906                                size -= 4;
2907
2908                        buffer_info->length = size;
2909                        buffer_info->time_stamp = jiffies;
2910                        buffer_info->mapped_as_page = true;
2911                        buffer_info->dma = skb_frag_dma_map(&pdev->dev, frag,
2912                                                offset, size, DMA_TO_DEVICE);
2913                        if (dma_mapping_error(&pdev->dev, buffer_info->dma))
2914                                goto dma_error;
2915                        buffer_info->next_to_watch = i;
2916
2917                        len -= size;
2918                        offset += size;
2919                        count++;
2920                }
2921        }
2922
2923        segs = skb_shinfo(skb)->gso_segs ?: 1;
2924        /* multiply data chunks by size of headers */
2925        bytecount = ((segs - 1) * skb_headlen(skb)) + skb->len;
2926
2927        tx_ring->buffer_info[i].skb = skb;
2928        tx_ring->buffer_info[i].segs = segs;
2929        tx_ring->buffer_info[i].bytecount = bytecount;
2930        tx_ring->buffer_info[first].next_to_watch = i;
2931
2932        return count;
2933
2934dma_error:
2935        dev_err(&pdev->dev, "TX DMA map failed\n");
2936        buffer_info->dma = 0;
2937        if (count)
2938                count--;
2939
2940        while (count--) {
2941                if (i==0)
2942                        i += tx_ring->count;
2943                i--;
2944                buffer_info = &tx_ring->buffer_info[i];
2945                e1000_unmap_and_free_tx_resource(adapter, buffer_info);
2946        }
2947
2948        return 0;
2949}
2950
2951static void e1000_tx_queue(struct e1000_adapter *adapter,
2952                           struct e1000_tx_ring *tx_ring, int tx_flags,
2953                           int count)
2954{
2955        struct e1000_hw *hw = &adapter->hw;
2956        struct e1000_tx_desc *tx_desc = NULL;
2957        struct e1000_buffer *buffer_info;
2958        u32 txd_upper = 0, txd_lower = E1000_TXD_CMD_IFCS;
2959        unsigned int i;
2960
2961        if (likely(tx_flags & E1000_TX_FLAGS_TSO)) {
2962                txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D |
2963                             E1000_TXD_CMD_TSE;
2964                txd_upper |= E1000_TXD_POPTS_TXSM << 8;
2965
2966                if (likely(tx_flags & E1000_TX_FLAGS_IPV4))
2967                        txd_upper |= E1000_TXD_POPTS_IXSM << 8;
2968        }
2969
2970        if (likely(tx_flags & E1000_TX_FLAGS_CSUM)) {
2971                txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
2972                txd_upper |= E1000_TXD_POPTS_TXSM << 8;
2973        }
2974
2975        if (unlikely(tx_flags & E1000_TX_FLAGS_VLAN)) {
2976                txd_lower |= E1000_TXD_CMD_VLE;
2977                txd_upper |= (tx_flags & E1000_TX_FLAGS_VLAN_MASK);
2978        }
2979
2980        if (unlikely(tx_flags & E1000_TX_FLAGS_NO_FCS))
2981                txd_lower &= ~(E1000_TXD_CMD_IFCS);
2982
2983        i = tx_ring->next_to_use;
2984
2985        while (count--) {
2986                buffer_info = &tx_ring->buffer_info[i];
2987                tx_desc = E1000_TX_DESC(*tx_ring, i);
2988                tx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
2989                tx_desc->lower.data =
2990                        cpu_to_le32(txd_lower | buffer_info->length);
2991                tx_desc->upper.data = cpu_to_le32(txd_upper);
2992                if (unlikely(++i == tx_ring->count)) i = 0;
2993        }
2994
2995        tx_desc->lower.data |= cpu_to_le32(adapter->txd_cmd);
2996
2997        /* txd_cmd re-enables FCS, so we'll re-disable it here as desired. */
2998        if (unlikely(tx_flags & E1000_TX_FLAGS_NO_FCS))
2999                tx_desc->lower.data &= ~(cpu_to_le32(E1000_TXD_CMD_IFCS));
3000
3001        /* Force memory writes to complete before letting h/w
3002         * know there are new descriptors to fetch.  (Only
3003         * applicable for weak-ordered memory model archs,
3004         * such as IA-64).
3005         */
3006        wmb();
3007
3008        tx_ring->next_to_use = i;
3009        writel(i, hw->hw_addr + tx_ring->tdt);
3010        /* we need this if more than one processor can write to our tail
3011         * at a time, it synchronizes IO on IA64/Altix systems
3012         */
3013        mmiowb();
3014}
3015
3016/* 82547 workaround to avoid controller hang in half-duplex environment.
3017 * The workaround is to avoid queuing a large packet that would span
3018 * the internal Tx FIFO ring boundary by notifying the stack to resend
3019 * the packet at a later time.  This gives the Tx FIFO an opportunity to
3020 * flush all packets.  When that occurs, we reset the Tx FIFO pointers
3021 * to the beginning of the Tx FIFO.
3022 */
3023
3024#define E1000_FIFO_HDR                  0x10
3025#define E1000_82547_PAD_LEN             0x3E0
3026
3027static int e1000_82547_fifo_workaround(struct e1000_adapter *adapter,
3028                                       struct sk_buff *skb)
3029{
3030        u32 fifo_space = adapter->tx_fifo_size - adapter->tx_fifo_head;
3031        u32 skb_fifo_len = skb->len + E1000_FIFO_HDR;
3032
3033        skb_fifo_len = ALIGN(skb_fifo_len, E1000_FIFO_HDR);
3034
3035        if (adapter->link_duplex != HALF_DUPLEX)
3036                goto no_fifo_stall_required;
3037
3038        if (atomic_read(&adapter->tx_fifo_stall))
3039                return 1;
3040
3041        if (skb_fifo_len >= (E1000_82547_PAD_LEN + fifo_space)) {
3042                atomic_set(&adapter->tx_fifo_stall, 1);
3043                return 1;
3044        }
3045
3046no_fifo_stall_required:
3047        adapter->tx_fifo_head += skb_fifo_len;
3048        if (adapter->tx_fifo_head >= adapter->tx_fifo_size)
3049                adapter->tx_fifo_head -= adapter->tx_fifo_size;
3050        return 0;
3051}
3052
3053static int __e1000_maybe_stop_tx(struct net_device *netdev, int size)
3054{
3055        struct e1000_adapter *adapter = netdev_priv(netdev);
3056        struct e1000_tx_ring *tx_ring = adapter->tx_ring;
3057
3058        netif_stop_queue(netdev);
3059        /* Herbert's original patch had:
3060         *  smp_mb__after_netif_stop_queue();
3061         * but since that doesn't exist yet, just open code it.
3062         */
3063        smp_mb();
3064
3065        /* We need to check again in a case another CPU has just
3066         * made room available.
3067         */
3068        if (likely(E1000_DESC_UNUSED(tx_ring) < size))
3069                return -EBUSY;
3070
3071        /* A reprieve! */
3072        netif_start_queue(netdev);
3073        ++adapter->restart_queue;
3074        return 0;
3075}
3076
3077static int e1000_maybe_stop_tx(struct net_device *netdev,
3078                               struct e1000_tx_ring *tx_ring, int size)
3079{
3080        if (likely(E1000_DESC_UNUSED(tx_ring) >= size))
3081                return 0;
3082        return __e1000_maybe_stop_tx(netdev, size);
3083}
3084
3085#define TXD_USE_COUNT(S, X) (((S) >> (X)) + 1 )
3086static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
3087                                    struct net_device *netdev)
3088{
3089        struct e1000_adapter *adapter = netdev_priv(netdev);
3090        struct e1000_hw *hw = &adapter->hw;
3091        struct e1000_tx_ring *tx_ring;
3092        unsigned int first, max_per_txd = E1000_MAX_DATA_PER_TXD;
3093        unsigned int max_txd_pwr = E1000_MAX_TXD_PWR;
3094        unsigned int tx_flags = 0;
3095        unsigned int len = skb_headlen(skb);
3096        unsigned int nr_frags;
3097        unsigned int mss;
3098        int count = 0;
3099        int tso;
3100        unsigned int f;
3101
3102        /* This goes back to the question of how to logically map a Tx queue
3103         * to a flow.  Right now, performance is impacted slightly negatively
3104         * if using multiple Tx queues.  If the stack breaks away from a
3105         * single qdisc implementation, we can look at this again.
3106         */
3107        tx_ring = adapter->tx_ring;
3108
3109        if (unlikely(skb->len <= 0)) {
3110                dev_kfree_skb_any(skb);
3111                return NETDEV_TX_OK;
3112        }
3113
3114        /* On PCI/PCI-X HW, if packet size is less than ETH_ZLEN,
3115         * packets may get corrupted during padding by HW.
3116         * To WA this issue, pad all small packets manually.
3117         */
3118        if (skb->len < ETH_ZLEN) {
3119                if (skb_pad(skb, ETH_ZLEN - skb->len))
3120                        return NETDEV_TX_OK;
3121                skb->len = ETH_ZLEN;
3122                skb_set_tail_pointer(skb, ETH_ZLEN);
3123        }
3124
3125        mss = skb_shinfo(skb)->gso_size;
3126        /* The controller does a simple calculation to
3127         * make sure there is enough room in the FIFO before
3128         * initiating the DMA for each buffer.  The calc is:
3129         * 4 = ceil(buffer len/mss).  To make sure we don't
3130         * overrun the FIFO, adjust the max buffer len if mss
3131         * drops.
3132         */
3133        if (mss) {
3134                u8 hdr_len;
3135                max_per_txd = min(mss << 2, max_per_txd);
3136                max_txd_pwr = fls(max_per_txd) - 1;
3137
3138                hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
3139                if (skb->data_len && hdr_len == len) {
3140                        switch (hw->mac_type) {
3141                                unsigned int pull_size;
3142                        case e1000_82544:
3143                                /* Make sure we have room to chop off 4 bytes,
3144                                 * and that the end alignment will work out to
3145                                 * this hardware's requirements
3146                                 * NOTE: this is a TSO only workaround
3147                                 * if end byte alignment not correct move us
3148                                 * into the next dword
3149                                 */
3150                                if ((unsigned long)(skb_tail_pointer(skb) - 1)
3151                                    & 4)
3152                                        break;
3153                                /* fall through */
3154                                pull_size = min((unsigned int)4, skb->data_len);
3155                                if (!__pskb_pull_tail(skb, pull_size)) {
3156                                        e_err(drv, "__pskb_pull_tail "
3157                                              "failed.\n");
3158                                        dev_kfree_skb_any(skb);
3159                                        return NETDEV_TX_OK;
3160                                }
3161                                len = skb_headlen(skb);
3162                                break;
3163                        default:
3164                                /* do nothing */
3165                                break;
3166                        }
3167                }
3168        }
3169
3170        /* reserve a descriptor for the offload context */
3171        if ((mss) || (skb->ip_summed == CHECKSUM_PARTIAL))
3172                count++;
3173        count++;
3174
3175        /* Controller Erratum workaround */
3176        if (!skb->data_len && tx_ring->last_tx_tso && !skb_is_gso(skb))
3177                count++;
3178
3179        count += TXD_USE_COUNT(len, max_txd_pwr);
3180
3181        if (adapter->pcix_82544)
3182                count++;
3183
3184        /* work-around for errata 10 and it applies to all controllers
3185         * in PCI-X mode, so add one more descriptor to the count
3186         */
3187        if (unlikely((hw->bus_type == e1000_bus_type_pcix) &&
3188                        (len > 2015)))
3189                count++;
3190
3191        nr_frags = skb_shinfo(skb)->nr_frags;
3192        for (f = 0; f < nr_frags; f++)
3193                count += TXD_USE_COUNT(skb_frag_size(&skb_shinfo(skb)->frags[f]),
3194                                       max_txd_pwr);
3195        if (adapter->pcix_82544)
3196                count += nr_frags;
3197
3198        /* need: count + 2 desc gap to keep tail from touching
3199         * head, otherwise try next time
3200         */
3201        if (unlikely(e1000_maybe_stop_tx(netdev, tx_ring, count + 2)))
3202                return NETDEV_TX_BUSY;
3203
3204        if (unlikely((hw->mac_type == e1000_82547) &&
3205                     (e1000_82547_fifo_workaround(adapter, skb)))) {
3206                netif_stop_queue(netdev);
3207                if (!test_bit(__E1000_DOWN, &adapter->flags))
3208                        schedule_delayed_work(&adapter->fifo_stall_task, 1);
3209                return NETDEV_TX_BUSY;
3210        }
3211
3212        if (vlan_tx_tag_present(skb)) {
3213                tx_flags |= E1000_TX_FLAGS_VLAN;
3214                tx_flags |= (vlan_tx_tag_get(skb) << E1000_TX_FLAGS_VLAN_SHIFT);
3215        }
3216
3217        first = tx_ring->next_to_use;
3218
3219        tso = e1000_tso(adapter, tx_ring, skb);
3220        if (tso < 0) {
3221                dev_kfree_skb_any(skb);
3222                return NETDEV_TX_OK;
3223        }
3224
3225        if (likely(tso)) {
3226                if (likely(hw->mac_type != e1000_82544))
3227                        tx_ring->last_tx_tso = true;
3228                tx_flags |= E1000_TX_FLAGS_TSO;
3229        } else if (likely(e1000_tx_csum(adapter, tx_ring, skb)))
3230                tx_flags |= E1000_TX_FLAGS_CSUM;
3231
3232        if (likely(skb->protocol == htons(ETH_P_IP)))
3233                tx_flags |= E1000_TX_FLAGS_IPV4;
3234
3235        if (unlikely(skb->no_fcs))
3236                tx_flags |= E1000_TX_FLAGS_NO_FCS;
3237
3238        count = e1000_tx_map(adapter, tx_ring, skb, first, max_per_txd,
3239                             nr_frags, mss);
3240
3241        if (count) {
3242                netdev_sent_queue(netdev, skb->len);
3243                skb_tx_timestamp(skb);
3244
3245                e1000_tx_queue(adapter, tx_ring, tx_flags, count);
3246                /* Make sure there is space in the ring for the next send. */
3247                e1000_maybe_stop_tx(netdev, tx_ring, MAX_SKB_FRAGS + 2);
3248
3249        } else {
3250                dev_kfree_skb_any(skb);
3251                tx_ring->buffer_info[first].time_stamp = 0;
3252                tx_ring->next_to_use = first;
3253        }
3254
3255        return NETDEV_TX_OK;
3256}
3257
3258#define NUM_REGS 38 /* 1 based count */
3259static void e1000_regdump(struct e1000_adapter *adapter)
3260{
3261        struct e1000_hw *hw = &adapter->hw;
3262        u32 regs[NUM_REGS];
3263        u32 *regs_buff = regs;
3264        int i = 0;
3265
3266        static const char * const reg_name[] = {
3267                "CTRL",  "STATUS",
3268                "RCTL", "RDLEN", "RDH", "RDT", "RDTR",
3269                "TCTL", "TDBAL", "TDBAH", "TDLEN", "TDH", "TDT",
3270                "TIDV", "TXDCTL", "TADV", "TARC0",
3271                "TDBAL1", "TDBAH1", "TDLEN1", "TDH1", "TDT1",
3272                "TXDCTL1", "TARC1",
3273                "CTRL_EXT", "ERT", "RDBAL", "RDBAH",
3274                "TDFH", "TDFT", "TDFHS", "TDFTS", "TDFPC",
3275                "RDFH", "RDFT", "RDFHS", "RDFTS", "RDFPC"
3276        };
3277
3278        regs_buff[0]  = er32(CTRL);
3279        regs_buff[1]  = er32(STATUS);
3280
3281        regs_buff[2]  = er32(RCTL);
3282        regs_buff[3]  = er32(RDLEN);
3283        regs_buff[4]  = er32(RDH);
3284        regs_buff[5]  = er32(RDT);
3285        regs_buff[6]  = er32(RDTR);
3286
3287        regs_buff[7]  = er32(TCTL);
3288        regs_buff[8]  = er32(TDBAL);
3289        regs_buff[9]  = er32(TDBAH);
3290        regs_buff[10] = er32(TDLEN);
3291        regs_buff[11] = er32(TDH);
3292        regs_buff[12] = er32(TDT);
3293        regs_buff[13] = er32(TIDV);
3294        regs_buff[14] = er32(TXDCTL);
3295        regs_buff[15] = er32(TADV);
3296        regs_buff[16] = er32(TARC0);
3297
3298        regs_buff[17] = er32(TDBAL1);
3299        regs_buff[18] = er32(TDBAH1);
3300        regs_buff[19] = er32(TDLEN1);
3301        regs_buff[20] = er32(TDH1);
3302        regs_buff[21] = er32(TDT1);
3303        regs_buff[22] = er32(TXDCTL1);
3304        regs_buff[23] = er32(TARC1);
3305        regs_buff[24] = er32(CTRL_EXT);
3306        regs_buff[25] = er32(ERT);
3307        regs_buff[26] = er32(RDBAL0);
3308        regs_buff[27] = er32(RDBAH0);
3309        regs_buff[28] = er32(TDFH);
3310        regs_buff[29] = er32(TDFT);
3311        regs_buff[30] = er32(TDFHS);
3312        regs_buff[31] = er32(TDFTS);
3313        regs_buff[32] = er32(TDFPC);
3314        regs_buff[33] = er32(RDFH);
3315        regs_buff[34] = er32(RDFT);
3316        regs_buff[35] = er32(RDFHS);
3317        regs_buff[36] = er32(RDFTS);
3318        regs_buff[37] = er32(RDFPC);
3319
3320        pr_info("Register dump\n");
3321        for (i = 0; i < NUM_REGS; i++)
3322                pr_info("%-15s  %08x\n", reg_name[i], regs_buff[i]);
3323}
3324
3325/*
3326 * e1000_dump: Print registers, tx ring and rx ring
3327 */
3328static void e1000_dump(struct e1000_adapter *adapter)
3329{
3330        /* this code doesn't handle multiple rings */
3331        struct e1000_tx_ring *tx_ring = adapter->tx_ring;
3332        struct e1000_rx_ring *rx_ring = adapter->rx_ring;
3333        int i;
3334
3335        if (!netif_msg_hw(adapter))
3336                return;
3337
3338        /* Print Registers */
3339        e1000_regdump(adapter);
3340
3341        /* transmit dump */
3342        pr_info("TX Desc ring0 dump\n");
3343
3344        /* Transmit Descriptor Formats - DEXT[29] is 0 (Legacy) or 1 (Extended)
3345         *
3346         * Legacy Transmit Descriptor
3347         *   +--------------------------------------------------------------+
3348         * 0 |         Buffer Address [63:0] (Reserved on Write Back)       |
3349         *   +--------------------------------------------------------------+
3350         * 8 | Special  |    CSS     | Status |  CMD    |  CSO   |  Length  |
3351         *   +--------------------------------------------------------------+
3352         *   63       48 47        36 35    32 31     24 23    16 15        0
3353         *
3354         * Extended Context Descriptor (DTYP=0x0) for TSO or checksum offload
3355         *   63      48 47    40 39       32 31             16 15    8 7      0
3356         *   +----------------------------------------------------------------+
3357         * 0 |  TUCSE  | TUCS0  |   TUCSS   |     IPCSE       | IPCS0 | IPCSS |
3358         *   +----------------------------------------------------------------+
3359         * 8 |   MSS   | HDRLEN | RSV | STA | TUCMD | DTYP |      PAYLEN      |
3360         *   +----------------------------------------------------------------+
3361         *   63      48 47    40 39 36 35 32 31   24 23  20 19                0
3362         *
3363         * Extended Data Descriptor (DTYP=0x1)
3364         *   +----------------------------------------------------------------+
3365         * 0 |                     Buffer Address [63:0]                      |
3366         *   +----------------------------------------------------------------+
3367         * 8 | VLAN tag |  POPTS  | Rsvd | Status | Command | DTYP |  DTALEN  |
3368         *   +----------------------------------------------------------------+
3369         *   63       48 47     40 39  36 35    32 31     24 23  20 19        0
3370         */
3371        pr_info("Tc[desc]     [Ce CoCsIpceCoS] [MssHlRSCm0Plen] [bi->dma       ] leng  ntw timestmp         bi->skb\n");
3372        pr_info("Td[desc]     [address 63:0  ] [VlaPoRSCm1Dlen] [bi->dma       ] leng  ntw timestmp         bi->skb\n");
3373
3374        if (!netif_msg_tx_done(adapter))
3375                goto rx_ring_summary;
3376
3377        for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {
3378                struct e1000_tx_desc *tx_desc = E1000_TX_DESC(*tx_ring, i);
3379                struct e1000_buffer *buffer_info = &tx_ring->buffer_info[i];
3380                struct my_u { __le64 a; __le64 b; };
3381                struct my_u *u = (struct my_u *)tx_desc;
3382                const char *type;
3383
3384                if (i == tx_ring->next_to_use && i == tx_ring->next_to_clean)
3385                        type = "NTC/U";
3386                else if (i == tx_ring->next_to_use)
3387                        type = "NTU";
3388                else if (i == tx_ring->next_to_clean)
3389                        type = "NTC";
3390                else
3391                        type = "";
3392
3393                pr_info("T%c[0x%03X]    %016llX %016llX %016llX %04X  %3X %016llX %p %s\n",
3394                        ((le64_to_cpu(u->b) & (1<<20)) ? 'd' : 'c'), i,
3395                        le64_to_cpu(u->a), le64_to_cpu(u->b),
3396                        (u64)buffer_info->dma, buffer_info->length,
3397                        buffer_info->next_to_watch,
3398                        (u64)buffer_info->time_stamp, buffer_info->skb, type);
3399        }
3400
3401rx_ring_summary:
3402        /* receive dump */
3403        pr_info("\nRX Desc ring dump\n");
3404
3405        /* Legacy Receive Descriptor Format
3406         *
3407         * +-----------------------------------------------------+
3408         * |                Buffer Address [63:0]                |
3409         * +-----------------------------------------------------+
3410         * | VLAN Tag | Errors | Status 0 | Packet csum | Length |
3411         * +-----------------------------------------------------+
3412         * 63       48 47    40 39      32 31         16 15      0
3413         */
3414        pr_info("R[desc]      [address 63:0  ] [vl er S cks ln] [bi->dma       ] [bi->skb]\n");
3415
3416        if (!netif_msg_rx_status(adapter))
3417                goto exit;
3418
3419        for (i = 0; rx_ring->desc && (i < rx_ring->count); i++) {
3420                struct e1000_rx_desc *rx_desc = E1000_RX_DESC(*rx_ring, i);
3421                struct e1000_buffer *buffer_info = &rx_ring->buffer_info[i];
3422                struct my_u { __le64 a; __le64 b; };
3423                struct my_u *u = (struct my_u *)rx_desc;
3424                const char *type;
3425
3426                if (i == rx_ring->next_to_use)
3427                        type = "NTU";
3428                else if (i == rx_ring->next_to_clean)
3429                        type = "NTC";
3430                else
3431                        type = "";
3432
3433                pr_info("R[0x%03X]     %016llX %016llX %016llX %p %s\n",
3434                        i, le64_to_cpu(u->a), le64_to_cpu(u->b),
3435                        (u64)buffer_info->dma, buffer_info->skb, type);
3436        } /* for */
3437
3438        /* dump the descriptor caches */
3439        /* rx */
3440        pr_info("Rx descriptor cache in 64bit format\n");
3441        for (i = 0x6000; i <= 0x63FF ; i += 0x10) {
3442                pr_info("R%04X: %08X|%08X %08X|%08X\n",
3443                        i,
3444                        readl(adapter->hw.hw_addr + i+4),
3445                        readl(adapter->hw.hw_addr + i),
3446                        readl(adapter->hw.hw_addr + i+12),
3447                        readl(adapter->hw.hw_addr + i+8));
3448        }
3449        /* tx */
3450        pr_info("Tx descriptor cache in 64bit format\n");
3451        for (i = 0x7000; i <= 0x73FF ; i += 0x10) {
3452                pr_info("T%04X: %08X|%08X %08X|%08X\n",
3453                        i,
3454                        readl(adapter->hw.hw_addr + i+4),
3455                        readl(adapter->hw.hw_addr + i),
3456                        readl(adapter->hw.hw_addr + i+12),
3457                        readl(adapter->hw.hw_addr + i+8));
3458        }
3459exit:
3460        return;
3461}
3462
3463/**
3464 * e1000_tx_timeout - Respond to a Tx Hang
3465 * @netdev: network interface device structure
3466 **/
3467static void e1000_tx_timeout(struct net_device *netdev)
3468{
3469        struct e1000_adapter *adapter = netdev_priv(netdev);
3470
3471        /* Do the reset outside of interrupt context */
3472        adapter->tx_timeout_count++;
3473        schedule_work(&adapter->reset_task);
3474}
3475
3476static void e1000_reset_task(struct work_struct *work)
3477{
3478        struct e1000_adapter *adapter =
3479                container_of(work, struct e1000_adapter, reset_task);
3480
3481        e_err(drv, "Reset adapter\n");
3482        e1000_reinit_locked(adapter);
3483}
3484
3485/**
3486 * e1000_get_stats - Get System Network Statistics
3487 * @netdev: network interface device structure
3488 *
3489 * Returns the address of the device statistics structure.
3490 * The statistics are actually updated from the watchdog.
3491 **/
3492static struct net_device_stats *e1000_get_stats(struct net_device *netdev)
3493{
3494        /* only return the current stats */
3495        return &netdev->stats;
3496}
3497
3498/**
3499 * e1000_change_mtu - Change the Maximum Transfer Unit
3500 * @netdev: network interface device structure
3501 * @new_mtu: new value for maximum frame size
3502 *
3503 * Returns 0 on success, negative on failure
3504 **/
3505static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
3506{
3507        struct e1000_adapter *adapter = netdev_priv(netdev);
3508        struct e1000_hw *hw = &adapter->hw;
3509        int max_frame = new_mtu + ENET_HEADER_SIZE + ETHERNET_FCS_SIZE;
3510
3511        if ((max_frame < MINIMUM_ETHERNET_FRAME_SIZE) ||
3512            (max_frame > MAX_JUMBO_FRAME_SIZE)) {
3513                e_err(probe, "Invalid MTU setting\n");
3514                return -EINVAL;
3515        }
3516
3517        /* Adapter-specific max frame size limits. */
3518        switch (hw->mac_type) {
3519        case e1000_undefined ... e1000_82542_rev2_1:
3520                if (max_frame > (ETH_FRAME_LEN + ETH_FCS_LEN)) {
3521                        e_err(probe, "Jumbo Frames not supported.\n");
3522                        return -EINVAL;
3523                }
3524                break;
3525        default:
3526                /* Capable of supporting up to MAX_JUMBO_FRAME_SIZE limit. */
3527                break;
3528        }
3529
3530        while (test_and_set_bit(__E1000_RESETTING, &adapter->flags))
3531                msleep(1);
3532        /* e1000_down has a dependency on max_frame_size */
3533        hw->max_frame_size = max_frame;
3534        if (netif_running(netdev))
3535                e1000_down(adapter);
3536
3537        /* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN
3538         * means we reserve 2 more, this pushes us to allocate from the next
3539         * larger slab size.
3540         * i.e. RXBUFFER_2048 --> size-4096 slab
3541         * however with the new *_jumbo_rx* routines, jumbo receives will use
3542         * fragmented skbs
3543         */
3544
3545        if (max_frame <= E1000_RXBUFFER_2048)
3546                adapter->rx_buffer_len = E1000_RXBUFFER_2048;
3547        else
3548#if (PAGE_SIZE >= E1000_RXBUFFER_16384)
3549                adapter->rx_buffer_len = E1000_RXBUFFER_16384;
3550#elif (PAGE_SIZE >= E1000_RXBUFFER_4096)
3551                adapter->rx_buffer_len = PAGE_SIZE;
3552#endif
3553
3554        /* adjust allocation if LPE protects us, and we aren't using SBP */
3555        if (!hw->tbi_compatibility_on &&
3556            ((max_frame == (ETH_FRAME_LEN + ETH_FCS_LEN)) ||
3557             (max_frame == MAXIMUM_ETHERNET_VLAN_SIZE)))
3558                adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
3559
3560        pr_info("%s changing MTU from %d to %d\n",
3561                netdev->name, netdev->mtu, new_mtu);
3562        netdev->mtu = new_mtu;
3563
3564        if (netif_running(netdev))
3565                e1000_up(adapter);
3566        else
3567                e1000_reset(adapter);
3568
3569        clear_bit(__E1000_RESETTING, &adapter->flags);
3570
3571        return 0;
3572}
3573
3574/**
3575 * e1000_update_stats - Update the board statistics counters
3576 * @adapter: board private structure
3577 **/
3578void e1000_update_stats(struct e1000_adapter *adapter)
3579{
3580        struct net_device *netdev = adapter->netdev;
3581        struct e1000_hw *hw = &adapter->hw;
3582        struct pci_dev *pdev = adapter->pdev;
3583        unsigned long flags;
3584        u16 phy_tmp;
3585
3586#define PHY_IDLE_ERROR_COUNT_MASK 0x00FF
3587
3588        /* Prevent stats update while adapter is being reset, or if the pci
3589         * connection is down.
3590         */
3591        if (adapter->link_speed == 0)
3592                return;
3593        if (pci_channel_offline(pdev))
3594                return;
3595
3596        spin_lock_irqsave(&adapter->stats_lock, flags);
3597
3598        /* these counters are modified from e1000_tbi_adjust_stats,
3599         * called from the interrupt context, so they must only
3600         * be written while holding adapter->stats_lock
3601         */
3602
3603        adapter->stats.crcerrs += er32(CRCERRS);
3604        adapter->stats.gprc += er32(GPRC);
3605        adapter->stats.gorcl += er32(GORCL);
3606        adapter->stats.gorch += er32(GORCH);
3607        adapter->stats.bprc += er32(BPRC);
3608        adapter->stats.mprc += er32(MPRC);
3609        adapter->stats.roc += er32(ROC);
3610
3611        adapter->stats.prc64 += er32(PRC64);
3612        adapter->stats.prc127 += er32(PRC127);
3613        adapter->stats.prc255 += er32(PRC255);
3614        adapter->stats.prc511 += er32(PRC511);
3615        adapter->stats.prc1023 += er32(PRC1023);
3616        adapter->stats.prc1522 += er32(PRC1522);
3617
3618        adapter->stats.symerrs += er32(SYMERRS);
3619        adapter->stats.mpc += er32(MPC);
3620        adapter->stats.scc += er32(SCC);
3621        adapter->stats.ecol += er32(ECOL);
3622        adapter->stats.mcc += er32(MCC);
3623        adapter->stats.latecol += er32(LATECOL);
3624        adapter->stats.dc += er32(DC);
3625        adapter->stats.sec += er32(SEC);
3626        adapter->stats.rlec += er32(RLEC);
3627        adapter->stats.xonrxc += er32(XONRXC);
3628        adapter->stats.xontxc += er32(XONTXC);
3629        adapter->stats.xoffrxc += er32(XOFFRXC);
3630        adapter->stats.xofftxc += er32(XOFFTXC);
3631        adapter->stats.fcruc += er32(FCRUC);
3632        adapter->stats.gptc += er32(GPTC);
3633        adapter->stats.gotcl += er32(GOTCL);
3634        adapter->stats.gotch += er32(GOTCH);
3635        adapter->stats.rnbc += er32(RNBC);
3636        adapter->stats.ruc += er32(RUC);
3637        adapter->stats.rfc += er32(RFC);
3638        adapter->stats.rjc += er32(RJC);
3639        adapter->stats.torl += er32(TORL);
3640        adapter->stats.torh += er32(TORH);
3641        adapter->stats.totl += er32(TOTL);
3642        adapter->stats.toth += er32(TOTH);
3643        adapter->stats.tpr += er32(TPR);
3644
3645        adapter->stats.ptc64 += er32(PTC64);
3646        adapter->stats.ptc127 += er32(PTC127);
3647        adapter->stats.ptc255 += er32(PTC255);
3648        adapter->stats.ptc511 += er32(PTC511);
3649        adapter->stats.ptc1023 += er32(PTC1023);
3650        adapter->stats.ptc1522 += er32(PTC1522);
3651
3652        adapter->stats.mptc += er32(MPTC);
3653        adapter->stats.bptc += er32(BPTC);
3654
3655        /* used for adaptive IFS */
3656
3657        hw->tx_packet_delta = er32(TPT);
3658        adapter->stats.tpt += hw->tx_packet_delta;
3659        hw->collision_delta = er32(COLC);
3660        adapter->stats.colc += hw->collision_delta;
3661
3662        if (hw->mac_type >= e1000_82543) {
3663                adapter->stats.algnerrc += er32(ALGNERRC);
3664                adapter->stats.rxerrc += er32(RXERRC);
3665                adapter->stats.tncrs += er32(TNCRS);
3666                adapter->stats.cexterr += er32(CEXTERR);
3667                adapter->stats.tsctc += er32(TSCTC);
3668                adapter->stats.tsctfc += er32(TSCTFC);
3669        }
3670
3671        /* Fill out the OS statistics structure */
3672        netdev->stats.multicast = adapter->stats.mprc;
3673        netdev->stats.collisions = adapter->stats.colc;
3674
3675        /* Rx Errors */
3676
3677        /* RLEC on some newer hardware can be incorrect so build
3678         * our own version based on RUC and ROC
3679         */
3680        netdev->stats.rx_errors = adapter->stats.rxerrc +
3681                adapter->stats.crcerrs + adapter->stats.algnerrc +
3682                adapter->stats.ruc + adapter->stats.roc +
3683                adapter->stats.cexterr;
3684        adapter->stats.rlerrc = adapter->stats.ruc + adapter->stats.roc;
3685        netdev->stats.rx_length_errors = adapter->stats.rlerrc;
3686        netdev->stats.rx_crc_errors = adapter->stats.crcerrs;
3687        netdev->stats.rx_frame_errors = adapter->stats.algnerrc;
3688        netdev->stats.rx_missed_errors = adapter->stats.mpc;
3689
3690        /* Tx Errors */
3691        adapter->stats.txerrc = adapter->stats.ecol + adapter->stats.latecol;
3692        netdev->stats.tx_errors = adapter->stats.txerrc;
3693        netdev->stats.tx_aborted_errors = adapter->stats.ecol;
3694        netdev->stats.tx_window_errors = adapter->stats.latecol;
3695        netdev->stats.tx_carrier_errors = adapter->stats.tncrs;
3696        if (hw->bad_tx_carr_stats_fd &&
3697            adapter->link_duplex == FULL_DUPLEX) {
3698                netdev->stats.tx_carrier_errors = 0;
3699                adapter->stats.tncrs = 0;
3700        }
3701
3702        /* Tx Dropped needs to be maintained elsewhere */
3703
3704        /* Phy Stats */
3705        if (hw->media_type == e1000_media_type_copper) {
3706                if ((adapter->link_speed == SPEED_1000) &&
3707                   (!e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_tmp))) {
3708                        phy_tmp &= PHY_IDLE_ERROR_COUNT_MASK;
3709                        adapter->phy_stats.idle_errors += phy_tmp;
3710                }
3711
3712                if ((hw->mac_type <= e1000_82546) &&
3713                   (hw->phy_type == e1000_phy_m88) &&
3714                   !e1000_read_phy_reg(hw, M88E1000_RX_ERR_CNTR, &phy_tmp))
3715                        adapter->phy_stats.receive_errors += phy_tmp;
3716        }
3717
3718        /* Management Stats */
3719        if (hw->has_smbus) {
3720                adapter->stats.mgptc += er32(MGTPTC);
3721                adapter->stats.mgprc += er32(MGTPRC);
3722                adapter->stats.mgpdc += er32(MGTPDC);
3723        }
3724
3725        spin_unlock_irqrestore(&adapter->stats_lock, flags);
3726}
3727
3728/**
3729 * e1000_intr - Interrupt Handler
3730 * @irq: interrupt number
3731 * @data: pointer to a network interface device structure
3732 **/
3733static irqreturn_t e1000_intr(int irq, void *data)
3734{
3735        struct net_device *netdev = data;
3736        struct e1000_adapter *adapter = netdev_priv(netdev);
3737        struct e1000_hw *hw = &adapter->hw;
3738        u32 icr = er32(ICR);
3739
3740        if (unlikely((!icr)))
3741                return IRQ_NONE;  /* Not our interrupt */
3742
3743        /* we might have caused the interrupt, but the above
3744         * read cleared it, and just in case the driver is
3745         * down there is nothing to do so return handled
3746         */
3747        if (unlikely(test_bit(__E1000_DOWN, &adapter->flags)))
3748                return IRQ_HANDLED;
3749
3750        if (unlikely(icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC))) {
3751                hw->get_link_status = 1;
3752                /* guard against interrupt when we're going down */
3753                if (!test_bit(__E1000_DOWN, &adapter->flags))
3754                        schedule_delayed_work(&adapter->watchdog_task, 1);
3755        }
3756
3757        /* disable interrupts, without the synchronize_irq bit */
3758        ew32(IMC, ~0);
3759        E1000_WRITE_FLUSH();
3760
3761        if (likely(napi_schedule_prep(&adapter->napi))) {
3762                adapter->total_tx_bytes = 0;
3763                adapter->total_tx_packets = 0;
3764                adapter->total_rx_bytes = 0;
3765                adapter->total_rx_packets = 0;
3766                __napi_schedule(&adapter->napi);
3767        } else {
3768                /* this really should not happen! if it does it is basically a
3769                 * bug, but not a hard error, so enable ints and continue
3770                 */
3771                if (!test_bit(__E1000_DOWN, &adapter->flags))
3772                        e1000_irq_enable(adapter);
3773        }
3774
3775        return IRQ_HANDLED;
3776}
3777
3778/**
3779 * e1000_clean - NAPI Rx polling callback
3780 * @adapter: board private structure
3781 **/
3782static int e1000_clean(struct napi_struct *napi, int budget)
3783{
3784        struct e1000_adapter *adapter = container_of(napi, struct e1000_adapter,
3785                                                     napi);
3786        int tx_clean_complete = 0, work_done = 0;
3787
3788        tx_clean_complete = e1000_clean_tx_irq(adapter, &adapter->tx_ring[0]);
3789
3790        adapter->clean_rx(adapter, &adapter->rx_ring[0], &work_done, budget);
3791
3792        if (!tx_clean_complete)
3793                work_done = budget;
3794
3795        /* If budget not fully consumed, exit the polling mode */
3796        if (work_done < budget) {
3797                if (likely(adapter->itr_setting & 3))
3798                        e1000_set_itr(adapter);
3799                napi_complete(napi);
3800                if (!test_bit(__E1000_DOWN, &adapter->flags))
3801                        e1000_irq_enable(adapter);
3802        }
3803
3804        return work_done;
3805}
3806
3807/**
3808 * e1000_clean_tx_irq - Reclaim resources after transmit completes
3809 * @adapter: board private structure
3810 **/
3811static bool e1000_clean_tx_irq(struct e1000_adapter *adapter,
3812                               struct e1000_tx_ring *tx_ring)
3813{
3814        struct e1000_hw *hw = &adapter->hw;
3815        struct net_device *netdev = adapter->netdev;
3816        struct e1000_tx_desc *tx_desc, *eop_desc;
3817        struct e1000_buffer *buffer_info;
3818        unsigned int i, eop;
3819        unsigned int count = 0;
3820        unsigned int total_tx_bytes=0, total_tx_packets=0;
3821        unsigned int bytes_compl = 0, pkts_compl = 0;
3822
3823        i = tx_ring->next_to_clean;
3824        eop = tx_ring->buffer_info[i].next_to_watch;
3825        eop_desc = E1000_TX_DESC(*tx_ring, eop);
3826
3827        while ((eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) &&
3828               (count < tx_ring->count)) {
3829                bool cleaned = false;
3830                rmb();  /* read buffer_info after eop_desc */
3831                for ( ; !cleaned; count++) {
3832                        tx_desc = E1000_TX_DESC(*tx_ring, i);
3833                        buffer_info = &tx_ring->buffer_info[i];
3834                        cleaned = (i == eop);
3835
3836                        if (cleaned) {
3837                                total_tx_packets += buffer_info->segs;
3838                                total_tx_bytes += buffer_info->bytecount;
3839                                if (buffer_info->skb) {
3840                                        bytes_compl += buffer_info->skb->len;
3841                                        pkts_compl++;
3842                                }
3843
3844                        }
3845                        e1000_unmap_and_free_tx_resource(adapter, buffer_info);
3846                        tx_desc->upper.data = 0;
3847
3848                        if (unlikely(++i == tx_ring->count)) i = 0;
3849                }
3850
3851                eop = tx_ring->buffer_info[i].next_to_watch;
3852                eop_desc = E1000_TX_DESC(*tx_ring, eop);
3853        }
3854
3855        tx_ring->next_to_clean = i;
3856
3857        netdev_completed_queue(netdev, pkts_compl, bytes_compl);
3858
3859#define TX_WAKE_THRESHOLD 32
3860        if (unlikely(count && netif_carrier_ok(netdev) &&
3861                     E1000_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD)) {
3862                /* Make sure that anybody stopping the queue after this
3863                 * sees the new next_to_clean.
3864                 */
3865                smp_mb();
3866
3867                if (netif_queue_stopped(netdev) &&
3868                    !(test_bit(__E1000_DOWN, &adapter->flags))) {
3869                        netif_wake_queue(netdev);
3870                        ++adapter->restart_queue;
3871                }
3872        }
3873
3874        if (adapter->detect_tx_hung) {
3875                /* Detect a transmit hang in hardware, this serializes the
3876                 * check with the clearing of time_stamp and movement of i
3877                 */
3878                adapter->detect_tx_hung = false;
3879                if (tx_ring->buffer_info[eop].time_stamp &&
3880                    time_after(jiffies, tx_ring->buffer_info[eop].time_stamp +
3881                               (adapter->tx_timeout_factor * HZ)) &&
3882                    !(er32(STATUS) & E1000_STATUS_TXOFF)) {
3883
3884                        /* detected Tx unit hang */
3885                        e_err(drv, "Detected Tx Unit Hang\n"
3886                              "  Tx Queue             <%lu>\n"
3887                              "  TDH                  <%x>\n"
3888                              "  TDT                  <%x>\n"
3889                              "  next_to_use          <%x>\n"
3890                              "  next_to_clean        <%x>\n"
3891                              "buffer_info[next_to_clean]\n"
3892                              "  time_stamp           <%lx>\n"
3893                              "  next_to_watch        <%x>\n"
3894                              "  jiffies              <%lx>\n"
3895                              "  next_to_watch.status <%x>\n",
3896                                (unsigned long)(tx_ring - adapter->tx_ring),
3897                                readl(hw->hw_addr + tx_ring->tdh),
3898                                readl(hw->hw_addr + tx_ring->tdt),
3899                                tx_ring->next_to_use,
3900                                tx_ring->next_to_clean,
3901                                tx_ring->buffer_info[eop].time_stamp,
3902                                eop,
3903                                jiffies,
3904                                eop_desc->upper.fields.status);
3905                        e1000_dump(adapter);
3906                        netif_stop_queue(netdev);
3907                }
3908        }
3909        adapter->total_tx_bytes += total_tx_bytes;
3910        adapter->total_tx_packets += total_tx_packets;
3911        netdev->stats.tx_bytes += total_tx_bytes;
3912        netdev->stats.tx_packets += total_tx_packets;
3913        return count < tx_ring->count;
3914}
3915
3916/**
3917 * e1000_rx_checksum - Receive Checksum Offload for 82543
3918 * @adapter:     board private structure
3919 * @status_err:  receive descriptor status and error fields
3920 * @csum:        receive descriptor csum field
3921 * @sk_buff:     socket buffer with received data
3922 **/
3923static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err,
3924                              u32 csum, struct sk_buff *skb)
3925{
3926        struct e1000_hw *hw = &adapter->hw;
3927        u16 status = (u16)status_err;
3928        u8 errors = (u8)(status_err >> 24);
3929
3930        skb_checksum_none_assert(skb);
3931
3932        /* 82543 or newer only */
3933        if (unlikely(hw->mac_type < e1000_82543)) return;
3934        /* Ignore Checksum bit is set */
3935        if (unlikely(status & E1000_RXD_STAT_IXSM)) return;
3936        /* TCP/UDP checksum error bit is set */
3937        if (unlikely(errors & E1000_RXD_ERR_TCPE)) {
3938                /* let the stack verify checksum errors */
3939                adapter->hw_csum_err++;
3940                return;
3941        }
3942        /* TCP/UDP Checksum has not been calculated */
3943        if (!(status & E1000_RXD_STAT_TCPCS))
3944                return;
3945
3946        /* It must be a TCP or UDP packet with a valid checksum */
3947        if (likely(status & E1000_RXD_STAT_TCPCS)) {
3948                /* TCP checksum is good */
3949                skb->ip_summed = CHECKSUM_UNNECESSARY;
3950        }
3951        adapter->hw_csum_good++;
3952}
3953
3954/**
3955 * e1000_consume_page - helper function
3956 **/
3957static void e1000_consume_page(struct e1000_buffer *bi, struct sk_buff *skb,
3958                               u16 length)
3959{
3960        bi->page = NULL;
3961        skb->len += length;
3962        skb->data_len += length;
3963        skb->truesize += PAGE_SIZE;
3964}
3965
3966/**
3967 * e1000_receive_skb - helper function to handle rx indications
3968 * @adapter: board private structure
3969 * @status: descriptor status field as written by hardware
3970 * @vlan: descriptor vlan field as written by hardware (no le/be conversion)
3971 * @skb: pointer to sk_buff to be indicated to stack
3972 */
3973static void e1000_receive_skb(struct e1000_adapter *adapter, u8 status,
3974                              __le16 vlan, struct sk_buff *skb)
3975{
3976        skb->protocol = eth_type_trans(skb, adapter->netdev);
3977
3978        if (status & E1000_RXD_STAT_VP) {
3979                u16 vid = le16_to_cpu(vlan) & E1000_RXD_SPC_VLAN_MASK;
3980
3981                __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
3982        }
3983        napi_gro_receive(&adapter->napi, skb);
3984}
3985
3986/**
3987 * e1000_clean_jumbo_rx_irq - Send received data up the network stack; legacy
3988 * @adapter: board private structure
3989 * @rx_ring: ring to clean
3990 * @work_done: amount of napi work completed this call
3991 * @work_to_do: max amount of work allowed for this call to do
3992 *
3993 * the return value indicates whether actual cleaning was done, there
3994 * is no guarantee that everything was cleaned
3995 */
3996static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
3997                                     struct e1000_rx_ring *rx_ring,
3998                                     int *work_done, int work_to_do)
3999{
4000        struct e1000_hw *hw = &adapter->hw;
4001        struct net_device *netdev = adapter->netdev;
4002        struct pci_dev *pdev = adapter->pdev;
4003        struct e1000_rx_desc *rx_desc, *next_rxd;
4004        struct e1000_buffer *buffer_info, *next_buffer;
4005        unsigned long irq_flags;
4006        u32 length;
4007        unsigned int i;
4008        int cleaned_count = 0;
4009        bool cleaned = false;
4010        unsigned int total_rx_bytes=0, total_rx_packets=0;
4011
4012        i = rx_ring->next_to_clean;
4013        rx_desc = E1000_RX_DESC(*rx_ring, i);
4014        buffer_info = &rx_ring->buffer_info[i];
4015
4016        while (rx_desc->status & E1000_RXD_STAT_DD) {
4017                struct sk_buff *skb;
4018                u8 status;
4019
4020                if (*work_done >= work_to_do)
4021                        break;
4022                (*work_done)++;
4023                rmb(); /* read descriptor and rx_buffer_info after status DD */
4024
4025                status = rx_desc->status;
4026                skb = buffer_info->skb;
4027                buffer_info->skb = NULL;
4028
4029                if (++i == rx_ring->count) i = 0;
4030                next_rxd = E1000_RX_DESC(*rx_ring, i);
4031                prefetch(next_rxd);
4032
4033                next_buffer = &rx_ring->buffer_info[i];
4034
4035                cleaned = true;
4036                cleaned_count++;
4037                dma_unmap_page(&pdev->dev, buffer_info->dma,
4038                               buffer_info->length, DMA_FROM_DEVICE);
4039                buffer_info->dma = 0;
4040
4041                length = le16_to_cpu(rx_desc->length);
4042
4043                /* errors is only valid for DD + EOP descriptors */
4044                if (unlikely((status & E1000_RXD_STAT_EOP) &&
4045                    (rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK))) {
4046                        u8 *mapped;
4047                        u8 last_byte;
4048
4049                        mapped = page_address(buffer_info->page);
4050                        last_byte = *(mapped + length - 1);
4051                        if (TBI_ACCEPT(hw, status, rx_desc->errors, length,
4052                                       last_byte)) {
4053                                spin_lock_irqsave(&adapter->stats_lock,
4054                                                  irq_flags);
4055                                e1000_tbi_adjust_stats(hw, &adapter->stats,
4056                                                       length, mapped);
4057                                spin_unlock_irqrestore(&adapter->stats_lock,
4058                                                       irq_flags);
4059                                length--;
4060                        } else {
4061                                if (netdev->features & NETIF_F_RXALL)
4062                                        goto process_skb;
4063                                /* recycle both page and skb */
4064                                buffer_info->skb = skb;
4065                                /* an error means any chain goes out the window
4066                                 * too
4067                                 */
4068                                if (rx_ring->rx_skb_top)
4069                                        dev_kfree_skb(rx_ring->rx_skb_top);
4070                                rx_ring->rx_skb_top = NULL;
4071                                goto next_desc;
4072                        }
4073                }
4074
4075#define rxtop rx_ring->rx_skb_top
4076process_skb:
4077                if (!(status & E1000_RXD_STAT_EOP)) {
4078                        /* this descriptor is only the beginning (or middle) */
4079                        if (!rxtop) {
4080                                /* this is the beginning of a chain */
4081                                rxtop = skb;
4082                                skb_fill_page_desc(rxtop, 0, buffer_info->page,
4083                                                   0, length);
4084                        } else {
4085                                /* this is the middle of a chain */
4086                                skb_fill_page_desc(rxtop,
4087                                    skb_shinfo(rxtop)->nr_frags,
4088                                    buffer_info->page, 0, length);
4089                                /* re-use the skb, only consumed the page */
4090                                buffer_info->skb = skb;
4091                        }
4092                        e1000_consume_page(buffer_info, rxtop, length);
4093                        goto next_desc;
4094                } else {
4095                        if (rxtop) {
4096                                /* end of the chain */
4097                                skb_fill_page_desc(rxtop,
4098                                    skb_shinfo(rxtop)->nr_frags,
4099                                    buffer_info->page, 0, length);
4100                                /* re-use the current skb, we only consumed the
4101                                 * page
4102                                 */
4103                                buffer_info->skb = skb;
4104                                skb = rxtop;
4105                                rxtop = NULL;
4106                                e1000_consume_page(buffer_info, skb, length);
4107                        } else {
4108                                /* no chain, got EOP, this buf is the packet
4109                                 * copybreak to save the put_page/alloc_page
4110                                 */
4111                                if (length <= copybreak &&
4112                                    skb_tailroom(skb) >= length) {
4113                                        u8 *vaddr;
4114                                        vaddr = kmap_atomic(buffer_info->page);
4115                                        memcpy(skb_tail_pointer(skb), vaddr,
4116                                               length);
4117                                        kunmap_atomic(vaddr);
4118                                        /* re-use the page, so don't erase
4119                                         * buffer_info->page
4120                                         */
4121                                        skb_put(skb, length);
4122                                } else {
4123                                        skb_fill_page_desc(skb, 0,
4124                                                           buffer_info->page, 0,
4125                                                           length);
4126                                        e1000_consume_page(buffer_info, skb,
4127                                                           length);
4128                                }
4129                        }
4130                }
4131
4132                /* Receive Checksum Offload XXX recompute due to CRC strip? */
4133                e1000_rx_checksum(adapter,
4134                                  (u32)(status) |
4135                                  ((u32)(rx_desc->errors) << 24),
4136                                  le16_to_cpu(rx_desc->csum), skb);
4137
4138                total_rx_bytes += (skb->len - 4); /* don't count FCS */
4139                if (likely(!(netdev->features & NETIF_F_RXFCS)))
4140                        pskb_trim(skb, skb->len - 4);
4141                total_rx_packets++;
4142
4143                /* eth type trans needs skb->data to point to something */
4144                if (!pskb_may_pull(skb, ETH_HLEN)) {
4145                        e_err(drv, "pskb_may_pull failed.\n");
4146                        dev_kfree_skb(skb);
4147                        goto next_desc;
4148                }
4149
4150                e1000_receive_skb(adapter, status, rx_desc->special, skb);
4151
4152next_desc:
4153                rx_desc->status = 0;
4154
4155                /* return some buffers to hardware, one at a time is too slow */
4156                if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) {
4157                        adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
4158                        cleaned_count = 0;
4159                }
4160
4161                /* use prefetched values */
4162                rx_desc = next_rxd;
4163                buffer_info = next_buffer;
4164        }
4165        rx_ring->next_to_clean = i;
4166
4167        cleaned_count = E1000_DESC_UNUSED(rx_ring);
4168        if (cleaned_count)
4169                adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
4170
4171        adapter->total_rx_packets += total_rx_packets;
4172        adapter->total_rx_bytes += total_rx_bytes;
4173        netdev->stats.rx_bytes += total_rx_bytes;
4174        netdev->stats.rx_packets += total_rx_packets;
4175        return cleaned;
4176}
4177
4178/* this should improve performance for small packets with large amounts
4179 * of reassembly being done in the stack
4180 */
4181static void e1000_check_copybreak(struct net_device *netdev,
4182                                 struct e1000_buffer *buffer_info,
4183                                 u32 length, struct sk_buff **skb)
4184{
4185        struct sk_buff *new_skb;
4186
4187        if (length > copybreak)
4188                return;
4189
4190        new_skb = netdev_alloc_skb_ip_align(netdev, length);
4191        if (!new_skb)
4192                return;
4193
4194        skb_copy_to_linear_data_offset(new_skb, -NET_IP_ALIGN,
4195                                       (*skb)->data - NET_IP_ALIGN,
4196                                       length + NET_IP_ALIGN);
4197        /* save the skb in buffer_info as good */
4198        buffer_info->skb = *skb;
4199        *skb = new_skb;
4200}
4201
4202/**
4203 * e1000_clean_rx_irq - Send received data up the network stack; legacy
4204 * @adapter: board private structure
4205 * @rx_ring: ring to clean
4206 * @work_done: amount of napi work completed this call
4207 * @work_to_do: max amount of work allowed for this call to do
4208 */
4209static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
4210                               struct e1000_rx_ring *rx_ring,
4211                               int *work_done, int work_to_do)
4212{
4213        struct e1000_hw *hw = &adapter->hw;
4214        struct net_device *netdev = adapter->netdev;
4215        struct pci_dev *pdev = adapter->pdev;
4216        struct e1000_rx_desc *rx_desc, *next_rxd;
4217        struct e1000_buffer *buffer_info, *next_buffer;
4218        unsigned long flags;
4219        u32 length;
4220        unsigned int i;
4221        int cleaned_count = 0;
4222        bool cleaned = false;
4223        unsigned int total_rx_bytes=0, total_rx_packets=0;
4224
4225        i = rx_ring->next_to_clean;
4226        rx_desc = E1000_RX_DESC(*rx_ring, i);
4227        buffer_info = &rx_ring->buffer_info[i];
4228
4229        while (rx_desc->status & E1000_RXD_STAT_DD) {
4230                struct sk_buff *skb;
4231                u8 status;
4232
4233                if (*work_done >= work_to_do)
4234                        break;
4235                (*work_done)++;
4236                rmb(); /* read descriptor and rx_buffer_info after status DD */
4237
4238                status = rx_desc->status;
4239                skb = buffer_info->skb;
4240                buffer_info->skb = NULL;
4241
4242                prefetch(skb->data - NET_IP_ALIGN);
4243
4244                if (++i == rx_ring->count) i = 0;
4245                next_rxd = E1000_RX_DESC(*rx_ring, i);
4246                prefetch(next_rxd);
4247
4248                next_buffer = &rx_ring->buffer_info[i];
4249
4250                cleaned = true;
4251                cleaned_count++;
4252                dma_unmap_single(&pdev->dev, buffer_info->dma,
4253                                 buffer_info->length, DMA_FROM_DEVICE);
4254                buffer_info->dma = 0;
4255
4256                length = le16_to_cpu(rx_desc->length);
4257                /* !EOP means multiple descriptors were used to store a single
4258                 * packet, if thats the case we need to toss it.  In fact, we
4259                 * to toss every packet with the EOP bit clear and the next
4260                 * frame that _does_ have the EOP bit set, as it is by
4261                 * definition only a frame fragment
4262                 */
4263                if (unlikely(!(status & E1000_RXD_STAT_EOP)))
4264                        adapter->discarding = true;
4265
4266                if (adapter->discarding) {
4267                        /* All receives must fit into a single buffer */
4268                        e_dbg("Receive packet consumed multiple buffers\n");
4269                        /* recycle */
4270                        buffer_info->skb = skb;
4271                        if (status & E1000_RXD_STAT_EOP)
4272                                adapter->discarding = false;
4273                        goto next_desc;
4274                }
4275
4276                if (unlikely(rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK)) {
4277                        u8 last_byte = *(skb->data + length - 1);
4278                        if (TBI_ACCEPT(hw, status, rx_desc->errors, length,
4279                                       last_byte)) {
4280                                spin_lock_irqsave(&adapter->stats_lock, flags);
4281                                e1000_tbi_adjust_stats(hw, &adapter->stats,
4282                                                       length, skb->data);
4283                                spin_unlock_irqrestore(&adapter->stats_lock,
4284                                                       flags);
4285                                length--;
4286                        } else {
4287                                if (netdev->features & NETIF_F_RXALL)
4288                                        goto process_skb;
4289                                /* recycle */
4290                                buffer_info->skb = skb;
4291                                goto next_desc;
4292                        }
4293                }
4294
4295process_skb:
4296                total_rx_bytes += (length - 4); /* don't count FCS */
4297                total_rx_packets++;
4298
4299                if (likely(!(netdev->features & NETIF_F_RXFCS)))
4300                        /* adjust length to remove Ethernet CRC, this must be
4301                         * done after the TBI_ACCEPT workaround above
4302                         */
4303                        length -= 4;
4304
4305                e1000_check_copybreak(netdev, buffer_info, length, &skb);
4306
4307                skb_put(skb, length);
4308
4309                /* Receive Checksum Offload */
4310                e1000_rx_checksum(adapter,
4311                                  (u32)(status) |
4312                                  ((u32)(rx_desc->errors) << 24),
4313                                  le16_to_cpu(rx_desc->csum), skb);
4314
4315                e1000_receive_skb(adapter, status, rx_desc->special, skb);
4316
4317next_desc:
4318                rx_desc->status = 0;
4319
4320                /* return some buffers to hardware, one at a time is too slow */
4321                if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) {
4322                        adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
4323                        cleaned_count = 0;
4324                }
4325
4326                /* use prefetched values */
4327                rx_desc = next_rxd;
4328                buffer_info = next_buffer;
4329        }
4330        rx_ring->next_to_clean = i;
4331
4332        cleaned_count = E1000_DESC_UNUSED(rx_ring);
4333        if (cleaned_count)
4334                adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
4335
4336        adapter->total_rx_packets += total_rx_packets;
4337        adapter->total_rx_bytes += total_rx_bytes;
4338        netdev->stats.rx_bytes += total_rx_bytes;
4339        netdev->stats.rx_packets += total_rx_packets;
4340        return cleaned;
4341}
4342
4343/**
4344 * e1000_alloc_jumbo_rx_buffers - Replace used jumbo receive buffers
4345 * @adapter: address of board private structure
4346 * @rx_ring: pointer to receive ring structure
4347 * @cleaned_count: number of buffers to allocate this pass
4348 **/
4349static void
4350e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter,
4351                             struct e1000_rx_ring *rx_ring, int cleaned_count)
4352{
4353        struct net_device *netdev = adapter->netdev;
4354        struct pci_dev *pdev = adapter->pdev;
4355        struct e1000_rx_desc *rx_desc;
4356        struct e1000_buffer *buffer_info;
4357        struct sk_buff *skb;
4358        unsigned int i;
4359        unsigned int bufsz = 256 - 16 /*for skb_reserve */ ;
4360
4361        i = rx_ring->next_to_use;
4362        buffer_info = &rx_ring->buffer_info[i];
4363
4364        while (cleaned_count--) {
4365                skb = buffer_info->skb;
4366                if (skb) {
4367                        skb_trim(skb, 0);
4368                        goto check_page;
4369                }
4370
4371                skb = netdev_alloc_skb_ip_align(netdev, bufsz);
4372                if (unlikely(!skb)) {
4373                        /* Better luck next round */
4374                        adapter->alloc_rx_buff_failed++;
4375                        break;
4376                }
4377
4378                buffer_info->skb = skb;
4379                buffer_info->length = adapter->rx_buffer_len;
4380check_page:
4381                /* allocate a new page if necessary */
4382                if (!buffer_info->page) {
4383                        buffer_info->page = alloc_page(GFP_ATOMIC);
4384                        if (unlikely(!buffer_info->page)) {
4385                                adapter->alloc_rx_buff_failed++;
4386                                break;
4387                        }
4388                }
4389
4390                if (!buffer_info->dma) {
4391                        buffer_info->dma = dma_map_page(&pdev->dev,
4392                                                        buffer_info->page, 0,
4393                                                        buffer_info->length,
4394                                                        DMA_FROM_DEVICE);
4395                        if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
4396                                put_page(buffer_info->page);
4397                                dev_kfree_skb(skb);
4398                                buffer_info->page = NULL;
4399                                buffer_info->skb = NULL;
4400                                buffer_info->dma = 0;
4401                                adapter->alloc_rx_buff_failed++;
4402                                break; /* while !buffer_info->skb */
4403                        }
4404                }
4405
4406                rx_desc = E1000_RX_DESC(*rx_ring, i);
4407                rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
4408
4409                if (unlikely(++i == rx_ring->count))
4410                        i = 0;
4411                buffer_info = &rx_ring->buffer_info[i];
4412        }
4413
4414        if (likely(rx_ring->next_to_use != i)) {
4415                rx_ring->next_to_use = i;
4416                if (unlikely(i-- == 0))
4417                        i = (rx_ring->count - 1);
4418
4419                /* Force memory writes to complete before letting h/w
4420                 * know there are new descriptors to fetch.  (Only
4421                 * applicable for weak-ordered memory model archs,
4422                 * such as IA-64).
4423                 */
4424                wmb();
4425                writel(i, adapter->hw.hw_addr + rx_ring->rdt);
4426        }
4427}
4428
4429/**
4430 * e1000_alloc_rx_buffers - Replace used receive buffers; legacy & extended
4431 * @adapter: address of board private structure
4432 **/
4433static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
4434                                   struct e1000_rx_ring *rx_ring,
4435                                   int cleaned_count)
4436{
4437        struct e1000_hw *hw = &adapter->hw;
4438        struct net_device *netdev = adapter->netdev;
4439        struct pci_dev *pdev = adapter->pdev;
4440        struct e1000_rx_desc *rx_desc;
4441        struct e1000_buffer *buffer_info;
4442        struct sk_buff *skb;
4443        unsigned int i;
4444        unsigned int bufsz = adapter->rx_buffer_len;
4445
4446        i = rx_ring->next_to_use;
4447        buffer_info = &rx_ring->buffer_info[i];
4448
4449        while (cleaned_count--) {
4450                skb = buffer_info->skb;
4451                if (skb) {
4452                        skb_trim(skb, 0);
4453                        goto map_skb;
4454                }
4455
4456                skb = netdev_alloc_skb_ip_align(netdev, bufsz);
4457                if (unlikely(!skb)) {
4458                        /* Better luck next round */
4459                        adapter->alloc_rx_buff_failed++;
4460                        break;
4461                }
4462
4463                /* Fix for errata 23, can't cross 64kB boundary */
4464                if (!e1000_check_64k_bound(adapter, skb->data, bufsz)) {
4465                        struct sk_buff *oldskb = skb;
4466                        e_err(rx_err, "skb align check failed: %u bytes at "
4467                              "%p\n", bufsz, skb->data);
4468                        /* Try again, without freeing the previous */
4469                        skb = netdev_alloc_skb_ip_align(netdev, bufsz);
4470                        /* Failed allocation, critical failure */
4471                        if (!skb) {
4472                                dev_kfree_skb(oldskb);
4473                                adapter->alloc_rx_buff_failed++;
4474                                break;
4475                        }
4476
4477                        if (!e1000_check_64k_bound(adapter, skb->data, bufsz)) {
4478                                /* give up */
4479                                dev_kfree_skb(skb);
4480                                dev_kfree_skb(oldskb);
4481                                adapter->alloc_rx_buff_failed++;
4482                                break; /* while !buffer_info->skb */
4483                        }
4484
4485                        /* Use new allocation */
4486                        dev_kfree_skb(oldskb);
4487                }
4488                buffer_info->skb = skb;
4489                buffer_info->length = adapter->rx_buffer_len;
4490map_skb:
4491                buffer_info->dma = dma_map_single(&pdev->dev,
4492                                                  skb->data,
4493                                                  buffer_info->length,
4494                                                  DMA_FROM_DEVICE);
4495                if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
4496                        dev_kfree_skb(skb);
4497                        buffer_info->skb = NULL;
4498                        buffer_info->dma = 0;
4499                        adapter->alloc_rx_buff_failed++;
4500                        break; /* while !buffer_info->skb */
4501                }
4502
4503                /* XXX if it was allocated cleanly it will never map to a
4504                 * boundary crossing
4505                 */
4506
4507                /* Fix for errata 23, can't cross 64kB boundary */
4508                if (!e1000_check_64k_bound(adapter,
4509                                        (void *)(unsigned long)buffer_info->dma,
4510                                        adapter->rx_buffer_len)) {
4511                        e_err(rx_err, "dma align check failed: %u bytes at "
4512                              "%p\n", adapter->rx_buffer_len,
4513                              (void *)(unsigned long)buffer_info->dma);
4514                        dev_kfree_skb(skb);
4515                        buffer_info->skb = NULL;
4516
4517                        dma_unmap_single(&pdev->dev, buffer_info->dma,
4518                                         adapter->rx_buffer_len,
4519                                         DMA_FROM_DEVICE);
4520                        buffer_info->dma = 0;
4521
4522                        adapter->alloc_rx_buff_failed++;
4523                        break; /* while !buffer_info->skb */
4524                }
4525                rx_desc = E1000_RX_DESC(*rx_ring, i);
4526                rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
4527
4528                if (unlikely(++i == rx_ring->count))
4529                        i = 0;
4530                buffer_info = &rx_ring->buffer_info[i];
4531        }
4532
4533        if (likely(rx_ring->next_to_use != i)) {
4534                rx_ring->next_to_use = i;
4535                if (unlikely(i-- == 0))
4536                        i = (rx_ring->count - 1);
4537
4538                /* Force memory writes to complete before letting h/w
4539                 * know there are new descriptors to fetch.  (Only
4540                 * applicable for weak-ordered memory model archs,
4541                 * such as IA-64).
4542                 */
4543                wmb();
4544                writel(i, hw->hw_addr + rx_ring->rdt);
4545        }
4546}
4547
4548/**
4549 * e1000_smartspeed - Workaround for SmartSpeed on 82541 and 82547 controllers.
4550 * @adapter:
4551 **/
4552static void e1000_smartspeed(struct e1000_adapter *adapter)
4553{
4554        struct e1000_hw *hw = &adapter->hw;
4555        u16 phy_status;
4556        u16 phy_ctrl;
4557
4558        if ((hw->phy_type != e1000_phy_igp) || !hw->autoneg ||
4559           !(hw->autoneg_advertised & ADVERTISE_1000_FULL))
4560                return;
4561
4562        if (adapter->smartspeed == 0) {
4563                /* If Master/Slave config fault is asserted twice,
4564                 * we assume back-to-back
4565                 */
4566                e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_status);
4567                if (!(phy_status & SR_1000T_MS_CONFIG_FAULT)) return;
4568                e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_status);
4569                if (!(phy_status & SR_1000T_MS_CONFIG_FAULT)) return;
4570                e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_ctrl);
4571                if (phy_ctrl & CR_1000T_MS_ENABLE) {
4572                        phy_ctrl &= ~CR_1000T_MS_ENABLE;
4573                        e1000_write_phy_reg(hw, PHY_1000T_CTRL,
4574                                            phy_ctrl);
4575                        adapter->smartspeed++;
4576                        if (!e1000_phy_setup_autoneg(hw) &&
4577                           !e1000_read_phy_reg(hw, PHY_CTRL,
4578                                               &phy_ctrl)) {
4579                                phy_ctrl |= (MII_CR_AUTO_NEG_EN |
4580                                             MII_CR_RESTART_AUTO_NEG);
4581                                e1000_write_phy_reg(hw, PHY_CTRL,
4582                                                    phy_ctrl);
4583                        }
4584                }
4585                return;
4586        } else if (adapter->smartspeed == E1000_SMARTSPEED_DOWNSHIFT) {
4587                /* If still no link, perhaps using 2/3 pair cable */
4588                e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_ctrl);
4589                phy_ctrl |= CR_1000T_MS_ENABLE;
4590                e1000_write_phy_reg(hw, PHY_1000T_CTRL, phy_ctrl);
4591                if (!e1000_phy_setup_autoneg(hw) &&
4592                   !e1000_read_phy_reg(hw, PHY_CTRL, &phy_ctrl)) {
4593                        phy_ctrl |= (MII_CR_AUTO_NEG_EN |
4594                                     MII_CR_RESTART_AUTO_NEG);
4595                        e1000_write_phy_reg(hw, PHY_CTRL, phy_ctrl);
4596                }
4597        }
4598        /* Restart process after E1000_SMARTSPEED_MAX iterations */
4599        if (adapter->smartspeed++ == E1000_SMARTSPEED_MAX)
4600                adapter->smartspeed = 0;
4601}
4602
4603/**
4604 * e1000_ioctl -
4605 * @netdev:
4606 * @ifreq:
4607 * @cmd:
4608 **/
4609static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
4610{
4611        switch (cmd) {
4612        case SIOCGMIIPHY:
4613        case SIOCGMIIREG:
4614        case SIOCSMIIREG:
4615                return e1000_mii_ioctl(netdev, ifr, cmd);
4616        default:
4617                return -EOPNOTSUPP;
4618        }
4619}
4620
4621/**
4622 * e1000_mii_ioctl -
4623 * @netdev:
4624 * @ifreq:
4625 * @cmd:
4626 **/
4627static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
4628                           int cmd)
4629{
4630        struct e1000_adapter *adapter = netdev_priv(netdev);
4631        struct e1000_hw *hw = &adapter->hw;
4632        struct mii_ioctl_data *data = if_mii(ifr);
4633        int retval;
4634        u16 mii_reg;
4635        unsigned long flags;
4636
4637        if (hw->media_type != e1000_media_type_copper)
4638                return -EOPNOTSUPP;
4639
4640        switch (cmd) {
4641        case SIOCGMIIPHY:
4642                data->phy_id = hw->phy_addr;
4643                break;
4644        case SIOCGMIIREG:
4645                spin_lock_irqsave(&adapter->stats_lock, flags);
4646                if (e1000_read_phy_reg(hw, data->reg_num & 0x1F,
4647                                   &data->val_out)) {
4648                        spin_unlock_irqrestore(&adapter->stats_lock, flags);
4649                        return -EIO;
4650                }
4651                spin_unlock_irqrestore(&adapter->stats_lock, flags);
4652                break;
4653        case SIOCSMIIREG:
4654                if (data->reg_num & ~(0x1F))
4655                        return -EFAULT;
4656                mii_reg = data->val_in;
4657                spin_lock_irqsave(&adapter->stats_lock, flags);
4658                if (e1000_write_phy_reg(hw, data->reg_num,
4659                                        mii_reg)) {
4660                        spin_unlock_irqrestore(&adapter->stats_lock, flags);
4661                        return -EIO;
4662                }
4663                spin_unlock_irqrestore(&adapter->stats_lock, flags);
4664                if (hw->media_type == e1000_media_type_copper) {
4665                        switch (data->reg_num) {
4666                        case PHY_CTRL:
4667                                if (mii_reg & MII_CR_POWER_DOWN)
4668                                        break;
4669                                if (mii_reg & MII_CR_AUTO_NEG_EN) {
4670                                        hw->autoneg = 1;
4671                                        hw->autoneg_advertised = 0x2F;
4672                                } else {
4673                                        u32 speed;
4674                                        if (mii_reg & 0x40)
4675                                                speed = SPEED_1000;
4676                                        else if (mii_reg & 0x2000)
4677                                                speed = SPEED_100;
4678                                        else
4679                                                speed = SPEED_10;
4680                                        retval = e1000_set_spd_dplx(
4681                                                adapter, speed,
4682                                                ((mii_reg & 0x100)
4683                                                 ? DUPLEX_FULL :
4684                                                 DUPLEX_HALF));
4685                                        if (retval)
4686                                                return retval;
4687                                }
4688                                if (netif_running(adapter->netdev))
4689                                        e1000_reinit_locked(adapter);
4690                                else
4691                                        e1000_reset(adapter);
4692                                break;
4693                        case M88E1000_PHY_SPEC_CTRL:
4694                        case M88E1000_EXT_PHY_SPEC_CTRL:
4695                                if (e1000_phy_reset(hw))
4696                                        return -EIO;
4697                                break;
4698                        }
4699                } else {
4700                        switch (data->reg_num) {
4701                        case PHY_CTRL:
4702                                if (mii_reg & MII_CR_POWER_DOWN)
4703                                        break;
4704                                if (netif_running(adapter->netdev))
4705                                        e1000_reinit_locked(adapter);
4706                                else
4707                                        e1000_reset(adapter);
4708                                break;
4709                        }
4710                }
4711                break;
4712        default:
4713                return -EOPNOTSUPP;
4714        }
4715        return E1000_SUCCESS;
4716}
4717
4718void e1000_pci_set_mwi(struct e1000_hw *hw)
4719{
4720        struct e1000_adapter *adapter = hw->back;
4721        int ret_val = pci_set_mwi(adapter->pdev);
4722
4723        if (ret_val)
4724                e_err(probe, "Error in setting MWI\n");
4725}
4726
4727void e1000_pci_clear_mwi(struct e1000_hw *hw)
4728{
4729        struct e1000_adapter *adapter = hw->back;
4730
4731        pci_clear_mwi(adapter->pdev);
4732}
4733
4734int e1000_pcix_get_mmrbc(struct e1000_hw *hw)
4735{
4736        struct e1000_adapter *adapter = hw->back;
4737        return pcix_get_mmrbc(adapter->pdev);
4738}
4739
4740void e1000_pcix_set_mmrbc(struct e1000_hw *hw, int mmrbc)
4741{
4742        struct e1000_adapter *adapter = hw->back;
4743        pcix_set_mmrbc(adapter->pdev, mmrbc);
4744}
4745
4746void e1000_io_write(struct e1000_hw *hw, unsigned long port, u32 value)
4747{
4748        outl(value, port);
4749}
4750
4751static bool e1000_vlan_used(struct e1000_adapter *adapter)
4752{
4753        u16 vid;
4754
4755        for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
4756                return true;
4757        return false;
4758}
4759
4760static void __e1000_vlan_mode(struct e1000_adapter *adapter,
4761                              netdev_features_t features)
4762{
4763        struct e1000_hw *hw = &adapter->hw;
4764        u32 ctrl;
4765
4766        ctrl = er32(CTRL);
4767        if (features & NETIF_F_HW_VLAN_CTAG_RX) {
4768                /* enable VLAN tag insert/strip */
4769                ctrl |= E1000_CTRL_VME;
4770        } else {
4771                /* disable VLAN tag insert/strip */
4772                ctrl &= ~E1000_CTRL_VME;
4773        }
4774        ew32(CTRL, ctrl);
4775}
4776static void e1000_vlan_filter_on_off(struct e1000_adapter *adapter,
4777                                     bool filter_on)
4778{
4779        struct e1000_hw *hw = &adapter->hw;
4780        u32 rctl;
4781
4782        if (!test_bit(__E1000_DOWN, &adapter->flags))
4783                e1000_irq_disable(adapter);
4784
4785        __e1000_vlan_mode(adapter, adapter->netdev->features);
4786        if (filter_on) {
4787                /* enable VLAN receive filtering */
4788                rctl = er32(RCTL);
4789                rctl &= ~E1000_RCTL_CFIEN;
4790                if (!(adapter->netdev->flags & IFF_PROMISC))
4791                        rctl |= E1000_RCTL_VFE;
4792                ew32(RCTL, rctl);
4793                e1000_update_mng_vlan(adapter);
4794        } else {
4795                /* disable VLAN receive filtering */
4796                rctl = er32(RCTL);
4797                rctl &= ~E1000_RCTL_VFE;
4798                ew32(RCTL, rctl);
4799        }
4800
4801        if (!test_bit(__E1000_DOWN, &adapter->flags))
4802                e1000_irq_enable(adapter);
4803}
4804
4805static void e1000_vlan_mode(struct net_device *netdev,
4806                            netdev_features_t features)
4807{
4808        struct e1000_adapter *adapter = netdev_priv(netdev);
4809
4810        if (!test_bit(__E1000_DOWN, &adapter->flags))
4811                e1000_irq_disable(adapter);
4812
4813        __e1000_vlan_mode(adapter, features);
4814
4815        if (!test_bit(__E1000_DOWN, &adapter->flags))
4816                e1000_irq_enable(adapter);
4817}
4818
4819static int e1000_vlan_rx_add_vid(struct net_device *netdev,
4820                                 __be16 proto, u16 vid)
4821{
4822        struct e1000_adapter *adapter = netdev_priv(netdev);
4823        struct e1000_hw *hw = &adapter->hw;
4824        u32 vfta, index;
4825
4826        if ((hw->mng_cookie.status &
4827             E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) &&
4828            (vid == adapter->mng_vlan_id))
4829                return 0;
4830
4831        if (!e1000_vlan_used(adapter))
4832                e1000_vlan_filter_on_off(adapter, true);
4833
4834        /* add VID to filter table */
4835        index = (vid >> 5) & 0x7F;
4836        vfta = E1000_READ_REG_ARRAY(hw, VFTA, index);
4837        vfta |= (1 << (vid & 0x1F));
4838        e1000_write_vfta(hw, index, vfta);
4839
4840        set_bit(vid, adapter->active_vlans);
4841
4842        return 0;
4843}
4844
4845static int e1000_vlan_rx_kill_vid(struct net_device *netdev,
4846                                  __be16 proto, u16 vid)
4847{
4848        struct e1000_adapter *adapter = netdev_priv(netdev);
4849        struct e1000_hw *hw = &adapter->hw;
4850        u32 vfta, index;
4851
4852        if (!test_bit(__E1000_DOWN, &adapter->flags))
4853                e1000_irq_disable(adapter);
4854        if (!test_bit(__E1000_DOWN, &adapter->flags))
4855                e1000_irq_enable(adapter);
4856
4857        /* remove VID from filter table */
4858        index = (vid >> 5) & 0x7F;
4859        vfta = E1000_READ_REG_ARRAY(hw, VFTA, index);
4860        vfta &= ~(1 << (vid & 0x1F));
4861        e1000_write_vfta(hw, index, vfta);
4862
4863        clear_bit(vid, adapter->active_vlans);
4864
4865        if (!e1000_vlan_used(adapter))
4866                e1000_vlan_filter_on_off(adapter, false);
4867
4868        return 0;
4869}
4870
4871static void e1000_restore_vlan(struct e1000_adapter *adapter)
4872{
4873        u16 vid;
4874
4875        if (!e1000_vlan_used(adapter))
4876                return;
4877
4878        e1000_vlan_filter_on_off(adapter, true);
4879        for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
4880                e1000_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), vid);
4881}
4882
4883int e1000_set_spd_dplx(struct e1000_adapter *adapter, u32 spd, u8 dplx)
4884{
4885        struct e1000_hw *hw = &adapter->hw;
4886
4887        hw->autoneg = 0;
4888
4889        /* Make sure dplx is at most 1 bit and lsb of speed is not set
4890         * for the switch() below to work
4891         */
4892        if ((spd & 1) || (dplx & ~1))
4893                goto err_inval;
4894
4895        /* Fiber NICs only allow 1000 gbps Full duplex */
4896        if ((hw->media_type == e1000_media_type_fiber) &&
4897            spd != SPEED_1000 &&
4898            dplx != DUPLEX_FULL)
4899                goto err_inval;
4900
4901        switch (spd + dplx) {
4902        case SPEED_10 + DUPLEX_HALF:
4903                hw->forced_speed_duplex = e1000_10_half;
4904                break;
4905        case SPEED_10 + DUPLEX_FULL:
4906                hw->forced_speed_duplex = e1000_10_full;
4907                break;
4908        case SPEED_100 + DUPLEX_HALF:
4909                hw->forced_speed_duplex = e1000_100_half;
4910                break;
4911        case SPEED_100 + DUPLEX_FULL:
4912                hw->forced_speed_duplex = e1000_100_full;
4913                break;
4914        case SPEED_1000 + DUPLEX_FULL:
4915                hw->autoneg = 1;
4916                hw->autoneg_advertised = ADVERTISE_1000_FULL;
4917                break;
4918        case SPEED_1000 + DUPLEX_HALF: /* not supported */
4919        default:
4920                goto err_inval;
4921        }
4922
4923        /* clear MDI, MDI(-X) override is only allowed when autoneg enabled */
4924        hw->mdix = AUTO_ALL_MODES;
4925
4926        return 0;
4927
4928err_inval:
4929        e_err(probe, "Unsupported Speed/Duplex configuration\n");
4930        return -EINVAL;
4931}
4932
4933static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake)
4934{
4935        struct net_device *netdev = pci_get_drvdata(pdev);
4936        struct e1000_adapter *adapter = netdev_priv(netdev);
4937        struct e1000_hw *hw = &adapter->hw;
4938        u32 ctrl, ctrl_ext, rctl, status;
4939        u32 wufc = adapter->wol;
4940#ifdef CONFIG_PM
4941        int retval = 0;
4942#endif
4943
4944        netif_device_detach(netdev);
4945
4946        if (netif_running(netdev)) {
4947                int count = E1000_CHECK_RESET_COUNT;
4948
4949                while (test_bit(__E1000_RESETTING, &adapter->flags) && count--)
4950                        usleep_range(10000, 20000);
4951
4952                WARN_ON(test_bit(__E1000_RESETTING, &adapter->flags));
4953                e1000_down(adapter);
4954        }
4955
4956#ifdef CONFIG_PM
4957        retval = pci_save_state(pdev);
4958        if (retval)
4959                return retval;
4960#endif
4961
4962        status = er32(STATUS);
4963        if (status & E1000_STATUS_LU)
4964                wufc &= ~E1000_WUFC_LNKC;
4965
4966        if (wufc) {
4967                e1000_setup_rctl(adapter);
4968                e1000_set_rx_mode(netdev);
4969
4970                rctl = er32(RCTL);
4971
4972                /* turn on all-multi mode if wake on multicast is enabled */
4973                if (wufc & E1000_WUFC_MC)
4974                        rctl |= E1000_RCTL_MPE;
4975
4976                /* enable receives in the hardware */
4977                ew32(RCTL, rctl | E1000_RCTL_EN);
4978
4979                if (hw->mac_type >= e1000_82540) {
4980                        ctrl = er32(CTRL);
4981                        /* advertise wake from D3Cold */
4982                        #define E1000_CTRL_ADVD3WUC 0x00100000
4983                        /* phy power management enable */
4984                        #define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000
4985                        ctrl |= E1000_CTRL_ADVD3WUC |
4986                                E1000_CTRL_EN_PHY_PWR_MGMT;
4987                        ew32(CTRL, ctrl);
4988                }
4989
4990                if (hw->media_type == e1000_media_type_fiber ||
4991                    hw->media_type == e1000_media_type_internal_serdes) {
4992                        /* keep the laser running in D3 */
4993                        ctrl_ext = er32(CTRL_EXT);
4994                        ctrl_ext |= E1000_CTRL_EXT_SDP7_DATA;
4995                        ew32(CTRL_EXT, ctrl_ext);
4996                }
4997
4998                ew32(WUC, E1000_WUC_PME_EN);
4999                ew32(WUFC, wufc);
5000        } else {
5001                ew32(WUC, 0);
5002                ew32(WUFC, 0);
5003        }
5004
5005        e1000_release_manageability(adapter);
5006
5007        *enable_wake = !!wufc;
5008
5009        /* make sure adapter isn't asleep if manageability is enabled */
5010        if (adapter->en_mng_pt)
5011                *enable_wake = true;
5012
5013        if (netif_running(netdev))
5014                e1000_free_irq(adapter);
5015
5016        pci_disable_device(pdev);
5017
5018        return 0;
5019}
5020
5021#ifdef CONFIG_PM
5022static int e1000_suspend(struct pci_dev *pdev, pm_message_t state)
5023{
5024        int retval;
5025        bool wake;
5026
5027        retval = __e1000_shutdown(pdev, &wake);
5028        if (retval)
5029                return retval;
5030
5031        if (wake) {
5032                pci_prepare_to_sleep(pdev);
5033        } else {
5034                pci_wake_from_d3(pdev, false);
5035                pci_set_power_state(pdev, PCI_D3hot);
5036        }
5037
5038        return 0;
5039}
5040
5041static int e1000_resume(struct pci_dev *pdev)
5042{
5043        struct net_device *netdev = pci_get_drvdata(pdev);
5044        struct e1000_adapter *adapter = netdev_priv(netdev);
5045        struct e1000_hw *hw = &adapter->hw;
5046        u32 err;
5047
5048        pci_set_power_state(pdev, PCI_D0);
5049        pci_restore_state(pdev);
5050        pci_save_state(pdev);
5051
5052        if (adapter->need_ioport)
5053                err = pci_enable_device(pdev);
5054        else
5055                err = pci_enable_device_mem(pdev);
5056        if (err) {
5057                pr_err("Cannot enable PCI device from suspend\n");
5058                return err;
5059        }
5060        pci_set_master(pdev);
5061
5062        pci_enable_wake(pdev, PCI_D3hot, 0);
5063        pci_enable_wake(pdev, PCI_D3cold, 0);
5064
5065        if (netif_running(netdev)) {
5066                err = e1000_request_irq(adapter);
5067                if (err)
5068                        return err;
5069        }
5070
5071        e1000_power_up_phy(adapter);
5072        e1000_reset(adapter);
5073        ew32(WUS, ~0);
5074
5075        e1000_init_manageability(adapter);
5076
5077        if (netif_running(netdev))
5078                e1000_up(adapter);
5079
5080        netif_device_attach(netdev);
5081
5082        return 0;
5083}
5084#endif
5085
5086static void e1000_shutdown(struct pci_dev *pdev)
5087{
5088        bool wake;
5089
5090        __e1000_shutdown(pdev, &wake);
5091
5092        if (system_state == SYSTEM_POWER_OFF) {
5093                pci_wake_from_d3(pdev, wake);
5094                pci_set_power_state(pdev, PCI_D3hot);
5095        }
5096}
5097
5098#ifdef CONFIG_NET_POLL_CONTROLLER
5099/* Polling 'interrupt' - used by things like netconsole to send skbs
5100 * without having to re-enable interrupts. It's not called while
5101 * the interrupt routine is executing.
5102 */
5103static void e1000_netpoll(struct net_device *netdev)
5104{
5105        struct e1000_adapter *adapter = netdev_priv(netdev);
5106
5107        disable_irq(adapter->pdev->irq);
5108        e1000_intr(adapter->pdev->irq, netdev);
5109        enable_irq(adapter->pdev->irq);
5110}
5111#endif
5112
5113/**
5114 * e1000_io_error_detected - called when PCI error is detected
5115 * @pdev: Pointer to PCI device
5116 * @state: The current pci connection state
5117 *
5118 * This function is called after a PCI bus error affecting
5119 * this device has been detected.
5120 */
5121static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev,
5122                                                pci_channel_state_t state)
5123{
5124        struct net_device *netdev = pci_get_drvdata(pdev);
5125        struct e1000_adapter *adapter = netdev_priv(netdev);
5126
5127        netif_device_detach(netdev);
5128
5129        if (state == pci_channel_io_perm_failure)
5130                return PCI_ERS_RESULT_DISCONNECT;
5131
5132        if (netif_running(netdev))
5133                e1000_down(adapter);
5134        pci_disable_device(pdev);
5135
5136        /* Request a slot slot reset. */
5137        return PCI_ERS_RESULT_NEED_RESET;
5138}
5139
5140/**
5141 * e1000_io_slot_reset - called after the pci bus has been reset.
5142 * @pdev: Pointer to PCI device
5143 *
5144 * Restart the card from scratch, as if from a cold-boot. Implementation
5145 * resembles the first-half of the e1000_resume routine.
5146 */
5147static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev)
5148{
5149        struct net_device *netdev = pci_get_drvdata(pdev);
5150        struct e1000_adapter *adapter = netdev_priv(netdev);
5151        struct e1000_hw *hw = &adapter->hw;
5152        int err;
5153
5154        if (adapter->need_ioport)
5155                err = pci_enable_device(pdev);
5156        else
5157                err = pci_enable_device_mem(pdev);
5158        if (err) {
5159                pr_err("Cannot re-enable PCI device after reset.\n");
5160                return PCI_ERS_RESULT_DISCONNECT;
5161        }
5162        pci_set_master(pdev);
5163
5164        pci_enable_wake(pdev, PCI_D3hot, 0);
5165        pci_enable_wake(pdev, PCI_D3cold, 0);
5166
5167        e1000_reset(adapter);
5168        ew32(WUS, ~0);
5169
5170        return PCI_ERS_RESULT_RECOVERED;
5171}
5172
5173/**
5174 * e1000_io_resume - called when traffic can start flowing again.
5175 * @pdev: Pointer to PCI device
5176 *
5177 * This callback is called when the error recovery driver tells us that
5178 * its OK to resume normal operation. Implementation resembles the
5179 * second-half of the e1000_resume routine.
5180 */
5181static void e1000_io_resume(struct pci_dev *pdev)
5182{
5183        struct net_device *netdev = pci_get_drvdata(pdev);
5184        struct e1000_adapter *adapter = netdev_priv(netdev);
5185
5186        e1000_init_manageability(adapter);
5187
5188        if (netif_running(netdev)) {
5189                if (e1000_up(adapter)) {
5190                        pr_info("can't bring device back up after reset\n");
5191                        return;
5192                }
5193        }
5194
5195        netif_device_attach(netdev);
5196}
5197
5198/* e1000_main.c */
5199