linux/drivers/net/ethernet/intel/e1000/e1000_main.c
<<
>>
Prefs
   1/*******************************************************************************
   2
   3  Intel PRO/1000 Linux driver
   4  Copyright(c) 1999 - 2006 Intel Corporation.
   5
   6  This program is free software; you can redistribute it and/or modify it
   7  under the terms and conditions of the GNU General Public License,
   8  version 2, as published by the Free Software Foundation.
   9
  10  This program is distributed in the hope it will be useful, but WITHOUT
  11  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  13  more details.
  14
  15  You should have received a copy of the GNU General Public License along with
  16  this program; if not, write to the Free Software Foundation, Inc.,
  17  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
  18
  19  The full GNU General Public License is included in this distribution in
  20  the file called "COPYING".
  21
  22  Contact Information:
  23  Linux NICS <linux.nics@intel.com>
  24  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
  25  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
  26
  27*******************************************************************************/
  28
  29#include "e1000.h"
  30#include <net/ip6_checksum.h>
  31#include <linux/io.h>
  32#include <linux/prefetch.h>
  33#include <linux/bitops.h>
  34#include <linux/if_vlan.h>
  35
  36char e1000_driver_name[] = "e1000";
  37static char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver";
  38#define DRV_VERSION "7.3.21-k8-NAPI"
  39const char e1000_driver_version[] = DRV_VERSION;
  40static const char e1000_copyright[] = "Copyright (c) 1999-2006 Intel Corporation.";
  41
  42/* e1000_pci_tbl - PCI Device ID Table
  43 *
  44 * Last entry must be all 0s
  45 *
  46 * Macro expands to...
  47 *   {PCI_DEVICE(PCI_VENDOR_ID_INTEL, device_id)}
  48 */
  49static DEFINE_PCI_DEVICE_TABLE(e1000_pci_tbl) = {
  50        INTEL_E1000_ETHERNET_DEVICE(0x1000),
  51        INTEL_E1000_ETHERNET_DEVICE(0x1001),
  52        INTEL_E1000_ETHERNET_DEVICE(0x1004),
  53        INTEL_E1000_ETHERNET_DEVICE(0x1008),
  54        INTEL_E1000_ETHERNET_DEVICE(0x1009),
  55        INTEL_E1000_ETHERNET_DEVICE(0x100C),
  56        INTEL_E1000_ETHERNET_DEVICE(0x100D),
  57        INTEL_E1000_ETHERNET_DEVICE(0x100E),
  58        INTEL_E1000_ETHERNET_DEVICE(0x100F),
  59        INTEL_E1000_ETHERNET_DEVICE(0x1010),
  60        INTEL_E1000_ETHERNET_DEVICE(0x1011),
  61        INTEL_E1000_ETHERNET_DEVICE(0x1012),
  62        INTEL_E1000_ETHERNET_DEVICE(0x1013),
  63        INTEL_E1000_ETHERNET_DEVICE(0x1014),
  64        INTEL_E1000_ETHERNET_DEVICE(0x1015),
  65        INTEL_E1000_ETHERNET_DEVICE(0x1016),
  66        INTEL_E1000_ETHERNET_DEVICE(0x1017),
  67        INTEL_E1000_ETHERNET_DEVICE(0x1018),
  68        INTEL_E1000_ETHERNET_DEVICE(0x1019),
  69        INTEL_E1000_ETHERNET_DEVICE(0x101A),
  70        INTEL_E1000_ETHERNET_DEVICE(0x101D),
  71        INTEL_E1000_ETHERNET_DEVICE(0x101E),
  72        INTEL_E1000_ETHERNET_DEVICE(0x1026),
  73        INTEL_E1000_ETHERNET_DEVICE(0x1027),
  74        INTEL_E1000_ETHERNET_DEVICE(0x1028),
  75        INTEL_E1000_ETHERNET_DEVICE(0x1075),
  76        INTEL_E1000_ETHERNET_DEVICE(0x1076),
  77        INTEL_E1000_ETHERNET_DEVICE(0x1077),
  78        INTEL_E1000_ETHERNET_DEVICE(0x1078),
  79        INTEL_E1000_ETHERNET_DEVICE(0x1079),
  80        INTEL_E1000_ETHERNET_DEVICE(0x107A),
  81        INTEL_E1000_ETHERNET_DEVICE(0x107B),
  82        INTEL_E1000_ETHERNET_DEVICE(0x107C),
  83        INTEL_E1000_ETHERNET_DEVICE(0x108A),
  84        INTEL_E1000_ETHERNET_DEVICE(0x1099),
  85        INTEL_E1000_ETHERNET_DEVICE(0x10B5),
  86        INTEL_E1000_ETHERNET_DEVICE(0x2E6E),
  87        /* required last entry */
  88        {0,}
  89};
  90
  91MODULE_DEVICE_TABLE(pci, e1000_pci_tbl);
  92
  93int e1000_up(struct e1000_adapter *adapter);
  94void e1000_down(struct e1000_adapter *adapter);
  95void e1000_reinit_locked(struct e1000_adapter *adapter);
  96void e1000_reset(struct e1000_adapter *adapter);
  97int e1000_setup_all_tx_resources(struct e1000_adapter *adapter);
  98int e1000_setup_all_rx_resources(struct e1000_adapter *adapter);
  99void e1000_free_all_tx_resources(struct e1000_adapter *adapter);
 100void e1000_free_all_rx_resources(struct e1000_adapter *adapter);
 101static int e1000_setup_tx_resources(struct e1000_adapter *adapter,
 102                             struct e1000_tx_ring *txdr);
 103static int e1000_setup_rx_resources(struct e1000_adapter *adapter,
 104                             struct e1000_rx_ring *rxdr);
 105static void e1000_free_tx_resources(struct e1000_adapter *adapter,
 106                             struct e1000_tx_ring *tx_ring);
 107static void e1000_free_rx_resources(struct e1000_adapter *adapter,
 108                             struct e1000_rx_ring *rx_ring);
 109void e1000_update_stats(struct e1000_adapter *adapter);
 110
 111static int e1000_init_module(void);
 112static void e1000_exit_module(void);
 113static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
 114static void e1000_remove(struct pci_dev *pdev);
 115static int e1000_alloc_queues(struct e1000_adapter *adapter);
 116static int e1000_sw_init(struct e1000_adapter *adapter);
 117static int e1000_open(struct net_device *netdev);
 118static int e1000_close(struct net_device *netdev);
 119static void e1000_configure_tx(struct e1000_adapter *adapter);
 120static void e1000_configure_rx(struct e1000_adapter *adapter);
 121static void e1000_setup_rctl(struct e1000_adapter *adapter);
 122static void e1000_clean_all_tx_rings(struct e1000_adapter *adapter);
 123static void e1000_clean_all_rx_rings(struct e1000_adapter *adapter);
 124static void e1000_clean_tx_ring(struct e1000_adapter *adapter,
 125                                struct e1000_tx_ring *tx_ring);
 126static void e1000_clean_rx_ring(struct e1000_adapter *adapter,
 127                                struct e1000_rx_ring *rx_ring);
 128static void e1000_set_rx_mode(struct net_device *netdev);
 129static void e1000_update_phy_info_task(struct work_struct *work);
 130static void e1000_watchdog(struct work_struct *work);
 131static void e1000_82547_tx_fifo_stall_task(struct work_struct *work);
 132static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
 133                                    struct net_device *netdev);
 134static struct net_device_stats * e1000_get_stats(struct net_device *netdev);
 135static int e1000_change_mtu(struct net_device *netdev, int new_mtu);
 136static int e1000_set_mac(struct net_device *netdev, void *p);
 137static irqreturn_t e1000_intr(int irq, void *data);
 138static bool e1000_clean_tx_irq(struct e1000_adapter *adapter,
 139                               struct e1000_tx_ring *tx_ring);
 140static int e1000_clean(struct napi_struct *napi, int budget);
 141static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
 142                               struct e1000_rx_ring *rx_ring,
 143                               int *work_done, int work_to_do);
 144static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
 145                                     struct e1000_rx_ring *rx_ring,
 146                                     int *work_done, int work_to_do);
 147static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
 148                                   struct e1000_rx_ring *rx_ring,
 149                                   int cleaned_count);
 150static void e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter,
 151                                         struct e1000_rx_ring *rx_ring,
 152                                         int cleaned_count);
 153static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd);
 154static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
 155                           int cmd);
 156static void e1000_enter_82542_rst(struct e1000_adapter *adapter);
 157static void e1000_leave_82542_rst(struct e1000_adapter *adapter);
 158static void e1000_tx_timeout(struct net_device *dev);
 159static void e1000_reset_task(struct work_struct *work);
 160static void e1000_smartspeed(struct e1000_adapter *adapter);
 161static int e1000_82547_fifo_workaround(struct e1000_adapter *adapter,
 162                                       struct sk_buff *skb);
 163
 164static bool e1000_vlan_used(struct e1000_adapter *adapter);
 165static void e1000_vlan_mode(struct net_device *netdev,
 166                            netdev_features_t features);
 167static void e1000_vlan_filter_on_off(struct e1000_adapter *adapter,
 168                                     bool filter_on);
 169static int e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid);
 170static int e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid);
 171static void e1000_restore_vlan(struct e1000_adapter *adapter);
 172
 173#ifdef CONFIG_PM
 174static int e1000_suspend(struct pci_dev *pdev, pm_message_t state);
 175static int e1000_resume(struct pci_dev *pdev);
 176#endif
 177static void e1000_shutdown(struct pci_dev *pdev);
 178
 179#ifdef CONFIG_NET_POLL_CONTROLLER
 180/* for netdump / net console */
 181static void e1000_netpoll (struct net_device *netdev);
 182#endif
 183
 184#define COPYBREAK_DEFAULT 256
 185static unsigned int copybreak __read_mostly = COPYBREAK_DEFAULT;
 186module_param(copybreak, uint, 0644);
 187MODULE_PARM_DESC(copybreak,
 188        "Maximum size of packet that is copied to a new buffer on receive");
 189
 190static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev,
 191                     pci_channel_state_t state);
 192static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev);
 193static void e1000_io_resume(struct pci_dev *pdev);
 194
 195static const struct pci_error_handlers e1000_err_handler = {
 196        .error_detected = e1000_io_error_detected,
 197        .slot_reset = e1000_io_slot_reset,
 198        .resume = e1000_io_resume,
 199};
 200
 201static struct pci_driver e1000_driver = {
 202        .name     = e1000_driver_name,
 203        .id_table = e1000_pci_tbl,
 204        .probe    = e1000_probe,
 205        .remove   = e1000_remove,
 206#ifdef CONFIG_PM
 207        /* Power Management Hooks */
 208        .suspend  = e1000_suspend,
 209        .resume   = e1000_resume,
 210#endif
 211        .shutdown = e1000_shutdown,
 212        .err_handler = &e1000_err_handler
 213};
 214
 215MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
 216MODULE_DESCRIPTION("Intel(R) PRO/1000 Network Driver");
 217MODULE_LICENSE("GPL");
 218MODULE_VERSION(DRV_VERSION);
 219
 220#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)
 221static int debug = -1;
 222module_param(debug, int, 0);
 223MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
 224
 225/**
 226 * e1000_get_hw_dev - return device
 227 * used by hardware layer to print debugging information
 228 *
 229 **/
 230struct net_device *e1000_get_hw_dev(struct e1000_hw *hw)
 231{
 232        struct e1000_adapter *adapter = hw->back;
 233        return adapter->netdev;
 234}
 235
 236/**
 237 * e1000_init_module - Driver Registration Routine
 238 *
 239 * e1000_init_module is the first routine called when the driver is
 240 * loaded. All it does is register with the PCI subsystem.
 241 **/
 242
 243static int __init e1000_init_module(void)
 244{
 245        int ret;
 246        pr_info("%s - version %s\n", e1000_driver_string, e1000_driver_version);
 247
 248        pr_info("%s\n", e1000_copyright);
 249
 250        ret = pci_register_driver(&e1000_driver);
 251        if (copybreak != COPYBREAK_DEFAULT) {
 252                if (copybreak == 0)
 253                        pr_info("copybreak disabled\n");
 254                else
 255                        pr_info("copybreak enabled for "
 256                                   "packets <= %u bytes\n", copybreak);
 257        }
 258        return ret;
 259}
 260
 261module_init(e1000_init_module);
 262
 263/**
 264 * e1000_exit_module - Driver Exit Cleanup Routine
 265 *
 266 * e1000_exit_module is called just before the driver is removed
 267 * from memory.
 268 **/
 269
 270static void __exit e1000_exit_module(void)
 271{
 272        pci_unregister_driver(&e1000_driver);
 273}
 274
 275module_exit(e1000_exit_module);
 276
 277static int e1000_request_irq(struct e1000_adapter *adapter)
 278{
 279        struct net_device *netdev = adapter->netdev;
 280        irq_handler_t handler = e1000_intr;
 281        int irq_flags = IRQF_SHARED;
 282        int err;
 283
 284        err = request_irq(adapter->pdev->irq, handler, irq_flags, netdev->name,
 285                          netdev);
 286        if (err) {
 287                e_err(probe, "Unable to allocate interrupt Error: %d\n", err);
 288        }
 289
 290        return err;
 291}
 292
 293static void e1000_free_irq(struct e1000_adapter *adapter)
 294{
 295        struct net_device *netdev = adapter->netdev;
 296
 297        free_irq(adapter->pdev->irq, netdev);
 298}
 299
 300/**
 301 * e1000_irq_disable - Mask off interrupt generation on the NIC
 302 * @adapter: board private structure
 303 **/
 304
 305static void e1000_irq_disable(struct e1000_adapter *adapter)
 306{
 307        struct e1000_hw *hw = &adapter->hw;
 308
 309        ew32(IMC, ~0);
 310        E1000_WRITE_FLUSH();
 311        synchronize_irq(adapter->pdev->irq);
 312}
 313
 314/**
 315 * e1000_irq_enable - Enable default interrupt generation settings
 316 * @adapter: board private structure
 317 **/
 318
 319static void e1000_irq_enable(struct e1000_adapter *adapter)
 320{
 321        struct e1000_hw *hw = &adapter->hw;
 322
 323        ew32(IMS, IMS_ENABLE_MASK);
 324        E1000_WRITE_FLUSH();
 325}
 326
 327static void e1000_update_mng_vlan(struct e1000_adapter *adapter)
 328{
 329        struct e1000_hw *hw = &adapter->hw;
 330        struct net_device *netdev = adapter->netdev;
 331        u16 vid = hw->mng_cookie.vlan_id;
 332        u16 old_vid = adapter->mng_vlan_id;
 333
 334        if (!e1000_vlan_used(adapter))
 335                return;
 336
 337        if (!test_bit(vid, adapter->active_vlans)) {
 338                if (hw->mng_cookie.status &
 339                    E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) {
 340                        e1000_vlan_rx_add_vid(netdev, vid);
 341                        adapter->mng_vlan_id = vid;
 342                } else {
 343                        adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
 344                }
 345                if ((old_vid != (u16)E1000_MNG_VLAN_NONE) &&
 346                    (vid != old_vid) &&
 347                    !test_bit(old_vid, adapter->active_vlans))
 348                        e1000_vlan_rx_kill_vid(netdev, old_vid);
 349        } else {
 350                adapter->mng_vlan_id = vid;
 351        }
 352}
 353
 354static void e1000_init_manageability(struct e1000_adapter *adapter)
 355{
 356        struct e1000_hw *hw = &adapter->hw;
 357
 358        if (adapter->en_mng_pt) {
 359                u32 manc = er32(MANC);
 360
 361                /* disable hardware interception of ARP */
 362                manc &= ~(E1000_MANC_ARP_EN);
 363
 364                ew32(MANC, manc);
 365        }
 366}
 367
 368static void e1000_release_manageability(struct e1000_adapter *adapter)
 369{
 370        struct e1000_hw *hw = &adapter->hw;
 371
 372        if (adapter->en_mng_pt) {
 373                u32 manc = er32(MANC);
 374
 375                /* re-enable hardware interception of ARP */
 376                manc |= E1000_MANC_ARP_EN;
 377
 378                ew32(MANC, manc);
 379        }
 380}
 381
 382/**
 383 * e1000_configure - configure the hardware for RX and TX
 384 * @adapter = private board structure
 385 **/
 386static void e1000_configure(struct e1000_adapter *adapter)
 387{
 388        struct net_device *netdev = adapter->netdev;
 389        int i;
 390
 391        e1000_set_rx_mode(netdev);
 392
 393        e1000_restore_vlan(adapter);
 394        e1000_init_manageability(adapter);
 395
 396        e1000_configure_tx(adapter);
 397        e1000_setup_rctl(adapter);
 398        e1000_configure_rx(adapter);
 399        /* call E1000_DESC_UNUSED which always leaves
 400         * at least 1 descriptor unused to make sure
 401         * next_to_use != next_to_clean */
 402        for (i = 0; i < adapter->num_rx_queues; i++) {
 403                struct e1000_rx_ring *ring = &adapter->rx_ring[i];
 404                adapter->alloc_rx_buf(adapter, ring,
 405                                      E1000_DESC_UNUSED(ring));
 406        }
 407}
 408
 409int e1000_up(struct e1000_adapter *adapter)
 410{
 411        struct e1000_hw *hw = &adapter->hw;
 412
 413        /* hardware has been reset, we need to reload some things */
 414        e1000_configure(adapter);
 415
 416        clear_bit(__E1000_DOWN, &adapter->flags);
 417
 418        napi_enable(&adapter->napi);
 419
 420        e1000_irq_enable(adapter);
 421
 422        netif_wake_queue(adapter->netdev);
 423
 424        /* fire a link change interrupt to start the watchdog */
 425        ew32(ICS, E1000_ICS_LSC);
 426        return 0;
 427}
 428
 429/**
 430 * e1000_power_up_phy - restore link in case the phy was powered down
 431 * @adapter: address of board private structure
 432 *
 433 * The phy may be powered down to save power and turn off link when the
 434 * driver is unloaded and wake on lan is not enabled (among others)
 435 * *** this routine MUST be followed by a call to e1000_reset ***
 436 *
 437 **/
 438
 439void e1000_power_up_phy(struct e1000_adapter *adapter)
 440{
 441        struct e1000_hw *hw = &adapter->hw;
 442        u16 mii_reg = 0;
 443
 444        /* Just clear the power down bit to wake the phy back up */
 445        if (hw->media_type == e1000_media_type_copper) {
 446                /* according to the manual, the phy will retain its
 447                 * settings across a power-down/up cycle */
 448                e1000_read_phy_reg(hw, PHY_CTRL, &mii_reg);
 449                mii_reg &= ~MII_CR_POWER_DOWN;
 450                e1000_write_phy_reg(hw, PHY_CTRL, mii_reg);
 451        }
 452}
 453
 454static void e1000_power_down_phy(struct e1000_adapter *adapter)
 455{
 456        struct e1000_hw *hw = &adapter->hw;
 457
 458        /* Power down the PHY so no link is implied when interface is down *
 459         * The PHY cannot be powered down if any of the following is true *
 460         * (a) WoL is enabled
 461         * (b) AMT is active
 462         * (c) SoL/IDER session is active */
 463        if (!adapter->wol && hw->mac_type >= e1000_82540 &&
 464           hw->media_type == e1000_media_type_copper) {
 465                u16 mii_reg = 0;
 466
 467                switch (hw->mac_type) {
 468                case e1000_82540:
 469                case e1000_82545:
 470                case e1000_82545_rev_3:
 471                case e1000_82546:
 472                case e1000_ce4100:
 473                case e1000_82546_rev_3:
 474                case e1000_82541:
 475                case e1000_82541_rev_2:
 476                case e1000_82547:
 477                case e1000_82547_rev_2:
 478                        if (er32(MANC) & E1000_MANC_SMBUS_EN)
 479                                goto out;
 480                        break;
 481                default:
 482                        goto out;
 483                }
 484                e1000_read_phy_reg(hw, PHY_CTRL, &mii_reg);
 485                mii_reg |= MII_CR_POWER_DOWN;
 486                e1000_write_phy_reg(hw, PHY_CTRL, mii_reg);
 487                msleep(1);
 488        }
 489out:
 490        return;
 491}
 492
 493static void e1000_down_and_stop(struct e1000_adapter *adapter)
 494{
 495        set_bit(__E1000_DOWN, &adapter->flags);
 496
 497        /* Only kill reset task if adapter is not resetting */
 498        if (!test_bit(__E1000_RESETTING, &adapter->flags))
 499                cancel_work_sync(&adapter->reset_task);
 500
 501        cancel_delayed_work_sync(&adapter->watchdog_task);
 502        cancel_delayed_work_sync(&adapter->phy_info_task);
 503        cancel_delayed_work_sync(&adapter->fifo_stall_task);
 504}
 505
 506void e1000_down(struct e1000_adapter *adapter)
 507{
 508        struct e1000_hw *hw = &adapter->hw;
 509        struct net_device *netdev = adapter->netdev;
 510        u32 rctl, tctl;
 511
 512
 513        /* disable receives in the hardware */
 514        rctl = er32(RCTL);
 515        ew32(RCTL, rctl & ~E1000_RCTL_EN);
 516        /* flush and sleep below */
 517
 518        netif_tx_disable(netdev);
 519
 520        /* disable transmits in the hardware */
 521        tctl = er32(TCTL);
 522        tctl &= ~E1000_TCTL_EN;
 523        ew32(TCTL, tctl);
 524        /* flush both disables and wait for them to finish */
 525        E1000_WRITE_FLUSH();
 526        msleep(10);
 527
 528        napi_disable(&adapter->napi);
 529
 530        e1000_irq_disable(adapter);
 531
 532        /*
 533         * Setting DOWN must be after irq_disable to prevent
 534         * a screaming interrupt.  Setting DOWN also prevents
 535         * tasks from rescheduling.
 536         */
 537        e1000_down_and_stop(adapter);
 538
 539        adapter->link_speed = 0;
 540        adapter->link_duplex = 0;
 541        netif_carrier_off(netdev);
 542
 543        e1000_reset(adapter);
 544        e1000_clean_all_tx_rings(adapter);
 545        e1000_clean_all_rx_rings(adapter);
 546}
 547
 548static void e1000_reinit_safe(struct e1000_adapter *adapter)
 549{
 550        while (test_and_set_bit(__E1000_RESETTING, &adapter->flags))
 551                msleep(1);
 552        mutex_lock(&adapter->mutex);
 553        e1000_down(adapter);
 554        e1000_up(adapter);
 555        mutex_unlock(&adapter->mutex);
 556        clear_bit(__E1000_RESETTING, &adapter->flags);
 557}
 558
 559void e1000_reinit_locked(struct e1000_adapter *adapter)
 560{
 561        /* if rtnl_lock is not held the call path is bogus */
 562        ASSERT_RTNL();
 563        WARN_ON(in_interrupt());
 564        while (test_and_set_bit(__E1000_RESETTING, &adapter->flags))
 565                msleep(1);
 566        e1000_down(adapter);
 567        e1000_up(adapter);
 568        clear_bit(__E1000_RESETTING, &adapter->flags);
 569}
 570
 571void e1000_reset(struct e1000_adapter *adapter)
 572{
 573        struct e1000_hw *hw = &adapter->hw;
 574        u32 pba = 0, tx_space, min_tx_space, min_rx_space;
 575        bool legacy_pba_adjust = false;
 576        u16 hwm;
 577
 578        /* Repartition Pba for greater than 9k mtu
 579         * To take effect CTRL.RST is required.
 580         */
 581
 582        switch (hw->mac_type) {
 583        case e1000_82542_rev2_0:
 584        case e1000_82542_rev2_1:
 585        case e1000_82543:
 586        case e1000_82544:
 587        case e1000_82540:
 588        case e1000_82541:
 589        case e1000_82541_rev_2:
 590                legacy_pba_adjust = true;
 591                pba = E1000_PBA_48K;
 592                break;
 593        case e1000_82545:
 594        case e1000_82545_rev_3:
 595        case e1000_82546:
 596        case e1000_ce4100:
 597        case e1000_82546_rev_3:
 598                pba = E1000_PBA_48K;
 599                break;
 600        case e1000_82547:
 601        case e1000_82547_rev_2:
 602                legacy_pba_adjust = true;
 603                pba = E1000_PBA_30K;
 604                break;
 605        case e1000_undefined:
 606        case e1000_num_macs:
 607                break;
 608        }
 609
 610        if (legacy_pba_adjust) {
 611                if (hw->max_frame_size > E1000_RXBUFFER_8192)
 612                        pba -= 8; /* allocate more FIFO for Tx */
 613
 614                if (hw->mac_type == e1000_82547) {
 615                        adapter->tx_fifo_head = 0;
 616                        adapter->tx_head_addr = pba << E1000_TX_HEAD_ADDR_SHIFT;
 617                        adapter->tx_fifo_size =
 618                                (E1000_PBA_40K - pba) << E1000_PBA_BYTES_SHIFT;
 619                        atomic_set(&adapter->tx_fifo_stall, 0);
 620                }
 621        } else if (hw->max_frame_size >  ETH_FRAME_LEN + ETH_FCS_LEN) {
 622                /* adjust PBA for jumbo frames */
 623                ew32(PBA, pba);
 624
 625                /* To maintain wire speed transmits, the Tx FIFO should be
 626                 * large enough to accommodate two full transmit packets,
 627                 * rounded up to the next 1KB and expressed in KB.  Likewise,
 628                 * the Rx FIFO should be large enough to accommodate at least
 629                 * one full receive packet and is similarly rounded up and
 630                 * expressed in KB. */
 631                pba = er32(PBA);
 632                /* upper 16 bits has Tx packet buffer allocation size in KB */
 633                tx_space = pba >> 16;
 634                /* lower 16 bits has Rx packet buffer allocation size in KB */
 635                pba &= 0xffff;
 636                /*
 637                 * the tx fifo also stores 16 bytes of information about the tx
 638                 * but don't include ethernet FCS because hardware appends it
 639                 */
 640                min_tx_space = (hw->max_frame_size +
 641                                sizeof(struct e1000_tx_desc) -
 642                                ETH_FCS_LEN) * 2;
 643                min_tx_space = ALIGN(min_tx_space, 1024);
 644                min_tx_space >>= 10;
 645                /* software strips receive CRC, so leave room for it */
 646                min_rx_space = hw->max_frame_size;
 647                min_rx_space = ALIGN(min_rx_space, 1024);
 648                min_rx_space >>= 10;
 649
 650                /* If current Tx allocation is less than the min Tx FIFO size,
 651                 * and the min Tx FIFO size is less than the current Rx FIFO
 652                 * allocation, take space away from current Rx allocation */
 653                if (tx_space < min_tx_space &&
 654                    ((min_tx_space - tx_space) < pba)) {
 655                        pba = pba - (min_tx_space - tx_space);
 656
 657                        /* PCI/PCIx hardware has PBA alignment constraints */
 658                        switch (hw->mac_type) {
 659                        case e1000_82545 ... e1000_82546_rev_3:
 660                                pba &= ~(E1000_PBA_8K - 1);
 661                                break;
 662                        default:
 663                                break;
 664                        }
 665
 666                        /* if short on rx space, rx wins and must trump tx
 667                         * adjustment or use Early Receive if available */
 668                        if (pba < min_rx_space)
 669                                pba = min_rx_space;
 670                }
 671        }
 672
 673        ew32(PBA, pba);
 674
 675        /*
 676         * flow control settings:
 677         * The high water mark must be low enough to fit one full frame
 678         * (or the size used for early receive) above it in the Rx FIFO.
 679         * Set it to the lower of:
 680         * - 90% of the Rx FIFO size, and
 681         * - the full Rx FIFO size minus the early receive size (for parts
 682         *   with ERT support assuming ERT set to E1000_ERT_2048), or
 683         * - the full Rx FIFO size minus one full frame
 684         */
 685        hwm = min(((pba << 10) * 9 / 10),
 686                  ((pba << 10) - hw->max_frame_size));
 687
 688        hw->fc_high_water = hwm & 0xFFF8;       /* 8-byte granularity */
 689        hw->fc_low_water = hw->fc_high_water - 8;
 690        hw->fc_pause_time = E1000_FC_PAUSE_TIME;
 691        hw->fc_send_xon = 1;
 692        hw->fc = hw->original_fc;
 693
 694        /* Allow time for pending master requests to run */
 695        e1000_reset_hw(hw);
 696        if (hw->mac_type >= e1000_82544)
 697                ew32(WUC, 0);
 698
 699        if (e1000_init_hw(hw))
 700                e_dev_err("Hardware Error\n");
 701        e1000_update_mng_vlan(adapter);
 702
 703        /* if (adapter->hwflags & HWFLAGS_PHY_PWR_BIT) { */
 704        if (hw->mac_type >= e1000_82544 &&
 705            hw->autoneg == 1 &&
 706            hw->autoneg_advertised == ADVERTISE_1000_FULL) {
 707                u32 ctrl = er32(CTRL);
 708                /* clear phy power management bit if we are in gig only mode,
 709                 * which if enabled will attempt negotiation to 100Mb, which
 710                 * can cause a loss of link at power off or driver unload */
 711                ctrl &= ~E1000_CTRL_SWDPIN3;
 712                ew32(CTRL, ctrl);
 713        }
 714
 715        /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */
 716        ew32(VET, ETHERNET_IEEE_VLAN_TYPE);
 717
 718        e1000_reset_adaptive(hw);
 719        e1000_phy_get_info(hw, &adapter->phy_info);
 720
 721        e1000_release_manageability(adapter);
 722}
 723
 724/* Dump the eeprom for users having checksum issues */
 725static void e1000_dump_eeprom(struct e1000_adapter *adapter)
 726{
 727        struct net_device *netdev = adapter->netdev;
 728        struct ethtool_eeprom eeprom;
 729        const struct ethtool_ops *ops = netdev->ethtool_ops;
 730        u8 *data;
 731        int i;
 732        u16 csum_old, csum_new = 0;
 733
 734        eeprom.len = ops->get_eeprom_len(netdev);
 735        eeprom.offset = 0;
 736
 737        data = kmalloc(eeprom.len, GFP_KERNEL);
 738        if (!data)
 739                return;
 740
 741        ops->get_eeprom(netdev, &eeprom, data);
 742
 743        csum_old = (data[EEPROM_CHECKSUM_REG * 2]) +
 744                   (data[EEPROM_CHECKSUM_REG * 2 + 1] << 8);
 745        for (i = 0; i < EEPROM_CHECKSUM_REG * 2; i += 2)
 746                csum_new += data[i] + (data[i + 1] << 8);
 747        csum_new = EEPROM_SUM - csum_new;
 748
 749        pr_err("/*********************/\n");
 750        pr_err("Current EEPROM Checksum : 0x%04x\n", csum_old);
 751        pr_err("Calculated              : 0x%04x\n", csum_new);
 752
 753        pr_err("Offset    Values\n");
 754        pr_err("========  ======\n");
 755        print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 16, 1, data, 128, 0);
 756
 757        pr_err("Include this output when contacting your support provider.\n");
 758        pr_err("This is not a software error! Something bad happened to\n");
 759        pr_err("your hardware or EEPROM image. Ignoring this problem could\n");
 760        pr_err("result in further problems, possibly loss of data,\n");
 761        pr_err("corruption or system hangs!\n");
 762        pr_err("The MAC Address will be reset to 00:00:00:00:00:00,\n");
 763        pr_err("which is invalid and requires you to set the proper MAC\n");
 764        pr_err("address manually before continuing to enable this network\n");
 765        pr_err("device. Please inspect the EEPROM dump and report the\n");
 766        pr_err("issue to your hardware vendor or Intel Customer Support.\n");
 767        pr_err("/*********************/\n");
 768
 769        kfree(data);
 770}
 771
 772/**
 773 * e1000_is_need_ioport - determine if an adapter needs ioport resources or not
 774 * @pdev: PCI device information struct
 775 *
 776 * Return true if an adapter needs ioport resources
 777 **/
 778static int e1000_is_need_ioport(struct pci_dev *pdev)
 779{
 780        switch (pdev->device) {
 781        case E1000_DEV_ID_82540EM:
 782        case E1000_DEV_ID_82540EM_LOM:
 783        case E1000_DEV_ID_82540EP:
 784        case E1000_DEV_ID_82540EP_LOM:
 785        case E1000_DEV_ID_82540EP_LP:
 786        case E1000_DEV_ID_82541EI:
 787        case E1000_DEV_ID_82541EI_MOBILE:
 788        case E1000_DEV_ID_82541ER:
 789        case E1000_DEV_ID_82541ER_LOM:
 790        case E1000_DEV_ID_82541GI:
 791        case E1000_DEV_ID_82541GI_LF:
 792        case E1000_DEV_ID_82541GI_MOBILE:
 793        case E1000_DEV_ID_82544EI_COPPER:
 794        case E1000_DEV_ID_82544EI_FIBER:
 795        case E1000_DEV_ID_82544GC_COPPER:
 796        case E1000_DEV_ID_82544GC_LOM:
 797        case E1000_DEV_ID_82545EM_COPPER:
 798        case E1000_DEV_ID_82545EM_FIBER:
 799        case E1000_DEV_ID_82546EB_COPPER:
 800        case E1000_DEV_ID_82546EB_FIBER:
 801        case E1000_DEV_ID_82546EB_QUAD_COPPER:
 802                return true;
 803        default:
 804                return false;
 805        }
 806}
 807
 808static netdev_features_t e1000_fix_features(struct net_device *netdev,
 809        netdev_features_t features)
 810{
 811        /*
 812         * Since there is no support for separate rx/tx vlan accel
 813         * enable/disable make sure tx flag is always in same state as rx.
 814         */
 815        if (features & NETIF_F_HW_VLAN_RX)
 816                features |= NETIF_F_HW_VLAN_TX;
 817        else
 818                features &= ~NETIF_F_HW_VLAN_TX;
 819
 820        return features;
 821}
 822
 823static int e1000_set_features(struct net_device *netdev,
 824        netdev_features_t features)
 825{
 826        struct e1000_adapter *adapter = netdev_priv(netdev);
 827        netdev_features_t changed = features ^ netdev->features;
 828
 829        if (changed & NETIF_F_HW_VLAN_RX)
 830                e1000_vlan_mode(netdev, features);
 831
 832        if (!(changed & (NETIF_F_RXCSUM | NETIF_F_RXALL)))
 833                return 0;
 834
 835        netdev->features = features;
 836        adapter->rx_csum = !!(features & NETIF_F_RXCSUM);
 837
 838        if (netif_running(netdev))
 839                e1000_reinit_locked(adapter);
 840        else
 841                e1000_reset(adapter);
 842
 843        return 0;
 844}
 845
 846static const struct net_device_ops e1000_netdev_ops = {
 847        .ndo_open               = e1000_open,
 848        .ndo_stop               = e1000_close,
 849        .ndo_start_xmit         = e1000_xmit_frame,
 850        .ndo_get_stats          = e1000_get_stats,
 851        .ndo_set_rx_mode        = e1000_set_rx_mode,
 852        .ndo_set_mac_address    = e1000_set_mac,
 853        .ndo_tx_timeout         = e1000_tx_timeout,
 854        .ndo_change_mtu         = e1000_change_mtu,
 855        .ndo_do_ioctl           = e1000_ioctl,
 856        .ndo_validate_addr      = eth_validate_addr,
 857        .ndo_vlan_rx_add_vid    = e1000_vlan_rx_add_vid,
 858        .ndo_vlan_rx_kill_vid   = e1000_vlan_rx_kill_vid,
 859#ifdef CONFIG_NET_POLL_CONTROLLER
 860        .ndo_poll_controller    = e1000_netpoll,
 861#endif
 862        .ndo_fix_features       = e1000_fix_features,
 863        .ndo_set_features       = e1000_set_features,
 864};
 865
 866/**
 867 * e1000_init_hw_struct - initialize members of hw struct
 868 * @adapter: board private struct
 869 * @hw: structure used by e1000_hw.c
 870 *
 871 * Factors out initialization of the e1000_hw struct to its own function
 872 * that can be called very early at init (just after struct allocation).
 873 * Fields are initialized based on PCI device information and
 874 * OS network device settings (MTU size).
 875 * Returns negative error codes if MAC type setup fails.
 876 */
 877static int e1000_init_hw_struct(struct e1000_adapter *adapter,
 878                                struct e1000_hw *hw)
 879{
 880        struct pci_dev *pdev = adapter->pdev;
 881
 882        /* PCI config space info */
 883        hw->vendor_id = pdev->vendor;
 884        hw->device_id = pdev->device;
 885        hw->subsystem_vendor_id = pdev->subsystem_vendor;
 886        hw->subsystem_id = pdev->subsystem_device;
 887        hw->revision_id = pdev->revision;
 888
 889        pci_read_config_word(pdev, PCI_COMMAND, &hw->pci_cmd_word);
 890
 891        hw->max_frame_size = adapter->netdev->mtu +
 892                             ENET_HEADER_SIZE + ETHERNET_FCS_SIZE;
 893        hw->min_frame_size = MINIMUM_ETHERNET_FRAME_SIZE;
 894
 895        /* identify the MAC */
 896        if (e1000_set_mac_type(hw)) {
 897                e_err(probe, "Unknown MAC Type\n");
 898                return -EIO;
 899        }
 900
 901        switch (hw->mac_type) {
 902        default:
 903                break;
 904        case e1000_82541:
 905        case e1000_82547:
 906        case e1000_82541_rev_2:
 907        case e1000_82547_rev_2:
 908                hw->phy_init_script = 1;
 909                break;
 910        }
 911
 912        e1000_set_media_type(hw);
 913        e1000_get_bus_info(hw);
 914
 915        hw->wait_autoneg_complete = false;
 916        hw->tbi_compatibility_en = true;
 917        hw->adaptive_ifs = true;
 918
 919        /* Copper options */
 920
 921        if (hw->media_type == e1000_media_type_copper) {
 922                hw->mdix = AUTO_ALL_MODES;
 923                hw->disable_polarity_correction = false;
 924                hw->master_slave = E1000_MASTER_SLAVE;
 925        }
 926
 927        return 0;
 928}
 929
 930/**
 931 * e1000_probe - Device Initialization Routine
 932 * @pdev: PCI device information struct
 933 * @ent: entry in e1000_pci_tbl
 934 *
 935 * Returns 0 on success, negative on failure
 936 *
 937 * e1000_probe initializes an adapter identified by a pci_dev structure.
 938 * The OS initialization, configuring of the adapter private structure,
 939 * and a hardware reset occur.
 940 **/
 941static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 942{
 943        struct net_device *netdev;
 944        struct e1000_adapter *adapter;
 945        struct e1000_hw *hw;
 946
 947        static int cards_found = 0;
 948        static int global_quad_port_a = 0; /* global ksp3 port a indication */
 949        int i, err, pci_using_dac;
 950        u16 eeprom_data = 0;
 951        u16 tmp = 0;
 952        u16 eeprom_apme_mask = E1000_EEPROM_APME;
 953        int bars, need_ioport;
 954
 955        /* do not allocate ioport bars when not needed */
 956        need_ioport = e1000_is_need_ioport(pdev);
 957        if (need_ioport) {
 958                bars = pci_select_bars(pdev, IORESOURCE_MEM | IORESOURCE_IO);
 959                err = pci_enable_device(pdev);
 960        } else {
 961                bars = pci_select_bars(pdev, IORESOURCE_MEM);
 962                err = pci_enable_device_mem(pdev);
 963        }
 964        if (err)
 965                return err;
 966
 967        err = pci_request_selected_regions(pdev, bars, e1000_driver_name);
 968        if (err)
 969                goto err_pci_reg;
 970
 971        pci_set_master(pdev);
 972        err = pci_save_state(pdev);
 973        if (err)
 974                goto err_alloc_etherdev;
 975
 976        err = -ENOMEM;
 977        netdev = alloc_etherdev(sizeof(struct e1000_adapter));
 978        if (!netdev)
 979                goto err_alloc_etherdev;
 980
 981        SET_NETDEV_DEV(netdev, &pdev->dev);
 982
 983        pci_set_drvdata(pdev, netdev);
 984        adapter = netdev_priv(netdev);
 985        adapter->netdev = netdev;
 986        adapter->pdev = pdev;
 987        adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
 988        adapter->bars = bars;
 989        adapter->need_ioport = need_ioport;
 990
 991        hw = &adapter->hw;
 992        hw->back = adapter;
 993
 994        err = -EIO;
 995        hw->hw_addr = pci_ioremap_bar(pdev, BAR_0);
 996        if (!hw->hw_addr)
 997                goto err_ioremap;
 998
 999        if (adapter->need_ioport) {
1000                for (i = BAR_1; i <= BAR_5; i++) {
1001                        if (pci_resource_len(pdev, i) == 0)
1002                                continue;
1003                        if (pci_resource_flags(pdev, i) & IORESOURCE_IO) {
1004                                hw->io_base = pci_resource_start(pdev, i);
1005                                break;
1006                        }
1007                }
1008        }
1009
1010        /* make ready for any if (hw->...) below */
1011        err = e1000_init_hw_struct(adapter, hw);
1012        if (err)
1013                goto err_sw_init;
1014
1015        /*
1016         * there is a workaround being applied below that limits
1017         * 64-bit DMA addresses to 64-bit hardware.  There are some
1018         * 32-bit adapters that Tx hang when given 64-bit DMA addresses
1019         */
1020        pci_using_dac = 0;
1021        if ((hw->bus_type == e1000_bus_type_pcix) &&
1022            !dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) {
1023                /*
1024                 * according to DMA-API-HOWTO, coherent calls will always
1025                 * succeed if the set call did
1026                 */
1027                dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
1028                pci_using_dac = 1;
1029        } else {
1030                err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
1031                if (err) {
1032                        pr_err("No usable DMA config, aborting\n");
1033                        goto err_dma;
1034                }
1035                dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
1036        }
1037
1038        netdev->netdev_ops = &e1000_netdev_ops;
1039        e1000_set_ethtool_ops(netdev);
1040        netdev->watchdog_timeo = 5 * HZ;
1041        netif_napi_add(netdev, &adapter->napi, e1000_clean, 64);
1042
1043        strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
1044
1045        adapter->bd_number = cards_found;
1046
1047        /* setup the private structure */
1048
1049        err = e1000_sw_init(adapter);
1050        if (err)
1051                goto err_sw_init;
1052
1053        err = -EIO;
1054        if (hw->mac_type == e1000_ce4100) {
1055                hw->ce4100_gbe_mdio_base_virt =
1056                                        ioremap(pci_resource_start(pdev, BAR_1),
1057                                                pci_resource_len(pdev, BAR_1));
1058
1059                if (!hw->ce4100_gbe_mdio_base_virt)
1060                        goto err_mdio_ioremap;
1061        }
1062
1063        if (hw->mac_type >= e1000_82543) {
1064                netdev->hw_features = NETIF_F_SG |
1065                                   NETIF_F_HW_CSUM |
1066                                   NETIF_F_HW_VLAN_RX;
1067                netdev->features = NETIF_F_HW_VLAN_TX |
1068                                   NETIF_F_HW_VLAN_FILTER;
1069        }
1070
1071        if ((hw->mac_type >= e1000_82544) &&
1072           (hw->mac_type != e1000_82547))
1073                netdev->hw_features |= NETIF_F_TSO;
1074
1075        netdev->priv_flags |= IFF_SUPP_NOFCS;
1076
1077        netdev->features |= netdev->hw_features;
1078        netdev->hw_features |= (NETIF_F_RXCSUM |
1079                                NETIF_F_RXALL |
1080                                NETIF_F_RXFCS);
1081
1082        if (pci_using_dac) {
1083                netdev->features |= NETIF_F_HIGHDMA;
1084                netdev->vlan_features |= NETIF_F_HIGHDMA;
1085        }
1086
1087        netdev->vlan_features |= (NETIF_F_TSO |
1088                                  NETIF_F_HW_CSUM |
1089                                  NETIF_F_SG);
1090
1091        netdev->priv_flags |= IFF_UNICAST_FLT;
1092
1093        adapter->en_mng_pt = e1000_enable_mng_pass_thru(hw);
1094
1095        /* initialize eeprom parameters */
1096        if (e1000_init_eeprom_params(hw)) {
1097                e_err(probe, "EEPROM initialization failed\n");
1098                goto err_eeprom;
1099        }
1100
1101        /* before reading the EEPROM, reset the controller to
1102         * put the device in a known good starting state */
1103
1104        e1000_reset_hw(hw);
1105
1106        /* make sure the EEPROM is good */
1107        if (e1000_validate_eeprom_checksum(hw) < 0) {
1108                e_err(probe, "The EEPROM Checksum Is Not Valid\n");
1109                e1000_dump_eeprom(adapter);
1110                /*
1111                 * set MAC address to all zeroes to invalidate and temporary
1112                 * disable this device for the user. This blocks regular
1113                 * traffic while still permitting ethtool ioctls from reaching
1114                 * the hardware as well as allowing the user to run the
1115                 * interface after manually setting a hw addr using
1116                 * `ip set address`
1117                 */
1118                memset(hw->mac_addr, 0, netdev->addr_len);
1119        } else {
1120                /* copy the MAC address out of the EEPROM */
1121                if (e1000_read_mac_addr(hw))
1122                        e_err(probe, "EEPROM Read Error\n");
1123        }
1124        /* don't block initalization here due to bad MAC address */
1125        memcpy(netdev->dev_addr, hw->mac_addr, netdev->addr_len);
1126        memcpy(netdev->perm_addr, hw->mac_addr, netdev->addr_len);
1127
1128        if (!is_valid_ether_addr(netdev->perm_addr))
1129                e_err(probe, "Invalid MAC Address\n");
1130
1131
1132        INIT_DELAYED_WORK(&adapter->watchdog_task, e1000_watchdog);
1133        INIT_DELAYED_WORK(&adapter->fifo_stall_task,
1134                          e1000_82547_tx_fifo_stall_task);
1135        INIT_DELAYED_WORK(&adapter->phy_info_task, e1000_update_phy_info_task);
1136        INIT_WORK(&adapter->reset_task, e1000_reset_task);
1137
1138        e1000_check_options(adapter);
1139
1140        /* Initial Wake on LAN setting
1141         * If APM wake is enabled in the EEPROM,
1142         * enable the ACPI Magic Packet filter
1143         */
1144
1145        switch (hw->mac_type) {
1146        case e1000_82542_rev2_0:
1147        case e1000_82542_rev2_1:
1148        case e1000_82543:
1149                break;
1150        case e1000_82544:
1151                e1000_read_eeprom(hw,
1152                        EEPROM_INIT_CONTROL2_REG, 1, &eeprom_data);
1153                eeprom_apme_mask = E1000_EEPROM_82544_APM;
1154                break;
1155        case e1000_82546:
1156        case e1000_82546_rev_3:
1157                if (er32(STATUS) & E1000_STATUS_FUNC_1){
1158                        e1000_read_eeprom(hw,
1159                                EEPROM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
1160                        break;
1161                }
1162                /* Fall Through */
1163        default:
1164                e1000_read_eeprom(hw,
1165                        EEPROM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
1166                break;
1167        }
1168        if (eeprom_data & eeprom_apme_mask)
1169                adapter->eeprom_wol |= E1000_WUFC_MAG;
1170
1171        /* now that we have the eeprom settings, apply the special cases
1172         * where the eeprom may be wrong or the board simply won't support
1173         * wake on lan on a particular port */
1174        switch (pdev->device) {
1175        case E1000_DEV_ID_82546GB_PCIE:
1176                adapter->eeprom_wol = 0;
1177                break;
1178        case E1000_DEV_ID_82546EB_FIBER:
1179        case E1000_DEV_ID_82546GB_FIBER:
1180                /* Wake events only supported on port A for dual fiber
1181                 * regardless of eeprom setting */
1182                if (er32(STATUS) & E1000_STATUS_FUNC_1)
1183                        adapter->eeprom_wol = 0;
1184                break;
1185        case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3:
1186                /* if quad port adapter, disable WoL on all but port A */
1187                if (global_quad_port_a != 0)
1188                        adapter->eeprom_wol = 0;
1189                else
1190                        adapter->quad_port_a = true;
1191                /* Reset for multiple quad port adapters */
1192                if (++global_quad_port_a == 4)
1193                        global_quad_port_a = 0;
1194                break;
1195        }
1196
1197        /* initialize the wol settings based on the eeprom settings */
1198        adapter->wol = adapter->eeprom_wol;
1199        device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
1200
1201        /* Auto detect PHY address */
1202        if (hw->mac_type == e1000_ce4100) {
1203                for (i = 0; i < 32; i++) {
1204                        hw->phy_addr = i;
1205                        e1000_read_phy_reg(hw, PHY_ID2, &tmp);
1206                        if (tmp == 0 || tmp == 0xFF) {
1207                                if (i == 31)
1208                                        goto err_eeprom;
1209                                continue;
1210                        } else
1211                                break;
1212                }
1213        }
1214
1215        /* reset the hardware with the new settings */
1216        e1000_reset(adapter);
1217
1218        strcpy(netdev->name, "eth%d");
1219        err = register_netdev(netdev);
1220        if (err)
1221                goto err_register;
1222
1223        e1000_vlan_filter_on_off(adapter, false);
1224
1225        /* print bus type/speed/width info */
1226        e_info(probe, "(PCI%s:%dMHz:%d-bit) %pM\n",
1227               ((hw->bus_type == e1000_bus_type_pcix) ? "-X" : ""),
1228               ((hw->bus_speed == e1000_bus_speed_133) ? 133 :
1229                (hw->bus_speed == e1000_bus_speed_120) ? 120 :
1230                (hw->bus_speed == e1000_bus_speed_100) ? 100 :
1231                (hw->bus_speed == e1000_bus_speed_66) ? 66 : 33),
1232               ((hw->bus_width == e1000_bus_width_64) ? 64 : 32),
1233               netdev->dev_addr);
1234
1235        /* carrier off reporting is important to ethtool even BEFORE open */
1236        netif_carrier_off(netdev);
1237
1238        e_info(probe, "Intel(R) PRO/1000 Network Connection\n");
1239
1240        cards_found++;
1241        return 0;
1242
1243err_register:
1244err_eeprom:
1245        e1000_phy_hw_reset(hw);
1246
1247        if (hw->flash_address)
1248                iounmap(hw->flash_address);
1249        kfree(adapter->tx_ring);
1250        kfree(adapter->rx_ring);
1251err_dma:
1252err_sw_init:
1253err_mdio_ioremap:
1254        iounmap(hw->ce4100_gbe_mdio_base_virt);
1255        iounmap(hw->hw_addr);
1256err_ioremap:
1257        free_netdev(netdev);
1258err_alloc_etherdev:
1259        pci_release_selected_regions(pdev, bars);
1260err_pci_reg:
1261        pci_disable_device(pdev);
1262        return err;
1263}
1264
1265/**
1266 * e1000_remove - Device Removal Routine
1267 * @pdev: PCI device information struct
1268 *
1269 * e1000_remove is called by the PCI subsystem to alert the driver
1270 * that it should release a PCI device.  The could be caused by a
1271 * Hot-Plug event, or because the driver is going to be removed from
1272 * memory.
1273 **/
1274
1275static void e1000_remove(struct pci_dev *pdev)
1276{
1277        struct net_device *netdev = pci_get_drvdata(pdev);
1278        struct e1000_adapter *adapter = netdev_priv(netdev);
1279        struct e1000_hw *hw = &adapter->hw;
1280
1281        e1000_down_and_stop(adapter);
1282        e1000_release_manageability(adapter);
1283
1284        unregister_netdev(netdev);
1285
1286        e1000_phy_hw_reset(hw);
1287
1288        kfree(adapter->tx_ring);
1289        kfree(adapter->rx_ring);
1290
1291        if (hw->mac_type == e1000_ce4100)
1292                iounmap(hw->ce4100_gbe_mdio_base_virt);
1293        iounmap(hw->hw_addr);
1294        if (hw->flash_address)
1295                iounmap(hw->flash_address);
1296        pci_release_selected_regions(pdev, adapter->bars);
1297
1298        free_netdev(netdev);
1299
1300        pci_disable_device(pdev);
1301}
1302
1303/**
1304 * e1000_sw_init - Initialize general software structures (struct e1000_adapter)
1305 * @adapter: board private structure to initialize
1306 *
1307 * e1000_sw_init initializes the Adapter private data structure.
1308 * e1000_init_hw_struct MUST be called before this function
1309 **/
1310
1311static int e1000_sw_init(struct e1000_adapter *adapter)
1312{
1313        adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
1314
1315        adapter->num_tx_queues = 1;
1316        adapter->num_rx_queues = 1;
1317
1318        if (e1000_alloc_queues(adapter)) {
1319                e_err(probe, "Unable to allocate memory for queues\n");
1320                return -ENOMEM;
1321        }
1322
1323        /* Explicitly disable IRQ since the NIC can be in any state. */
1324        e1000_irq_disable(adapter);
1325
1326        spin_lock_init(&adapter->stats_lock);
1327        mutex_init(&adapter->mutex);
1328
1329        set_bit(__E1000_DOWN, &adapter->flags);
1330
1331        return 0;
1332}
1333
1334/**
1335 * e1000_alloc_queues - Allocate memory for all rings
1336 * @adapter: board private structure to initialize
1337 *
1338 * We allocate one ring per queue at run-time since we don't know the
1339 * number of queues at compile-time.
1340 **/
1341
1342static int e1000_alloc_queues(struct e1000_adapter *adapter)
1343{
1344        adapter->tx_ring = kcalloc(adapter->num_tx_queues,
1345                                   sizeof(struct e1000_tx_ring), GFP_KERNEL);
1346        if (!adapter->tx_ring)
1347                return -ENOMEM;
1348
1349        adapter->rx_ring = kcalloc(adapter->num_rx_queues,
1350                                   sizeof(struct e1000_rx_ring), GFP_KERNEL);
1351        if (!adapter->rx_ring) {
1352                kfree(adapter->tx_ring);
1353                return -ENOMEM;
1354        }
1355
1356        return E1000_SUCCESS;
1357}
1358
1359/**
1360 * e1000_open - Called when a network interface is made active
1361 * @netdev: network interface device structure
1362 *
1363 * Returns 0 on success, negative value on failure
1364 *
1365 * The open entry point is called when a network interface is made
1366 * active by the system (IFF_UP).  At this point all resources needed
1367 * for transmit and receive operations are allocated, the interrupt
1368 * handler is registered with the OS, the watchdog task is started,
1369 * and the stack is notified that the interface is ready.
1370 **/
1371
1372static int e1000_open(struct net_device *netdev)
1373{
1374        struct e1000_adapter *adapter = netdev_priv(netdev);
1375        struct e1000_hw *hw = &adapter->hw;
1376        int err;
1377
1378        /* disallow open during test */
1379        if (test_bit(__E1000_TESTING, &adapter->flags))
1380                return -EBUSY;
1381
1382        netif_carrier_off(netdev);
1383
1384        /* allocate transmit descriptors */
1385        err = e1000_setup_all_tx_resources(adapter);
1386        if (err)
1387                goto err_setup_tx;
1388
1389        /* allocate receive descriptors */
1390        err = e1000_setup_all_rx_resources(adapter);
1391        if (err)
1392                goto err_setup_rx;
1393
1394        e1000_power_up_phy(adapter);
1395
1396        adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
1397        if ((hw->mng_cookie.status &
1398                          E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT)) {
1399                e1000_update_mng_vlan(adapter);
1400        }
1401
1402        /* before we allocate an interrupt, we must be ready to handle it.
1403         * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt
1404         * as soon as we call pci_request_irq, so we have to setup our
1405         * clean_rx handler before we do so.  */
1406        e1000_configure(adapter);
1407
1408        err = e1000_request_irq(adapter);
1409        if (err)
1410                goto err_req_irq;
1411
1412        /* From here on the code is the same as e1000_up() */
1413        clear_bit(__E1000_DOWN, &adapter->flags);
1414
1415        napi_enable(&adapter->napi);
1416
1417        e1000_irq_enable(adapter);
1418
1419        netif_start_queue(netdev);
1420
1421        /* fire a link status change interrupt to start the watchdog */
1422        ew32(ICS, E1000_ICS_LSC);
1423
1424        return E1000_SUCCESS;
1425
1426err_req_irq:
1427        e1000_power_down_phy(adapter);
1428        e1000_free_all_rx_resources(adapter);
1429err_setup_rx:
1430        e1000_free_all_tx_resources(adapter);
1431err_setup_tx:
1432        e1000_reset(adapter);
1433
1434        return err;
1435}
1436
1437/**
1438 * e1000_close - Disables a network interface
1439 * @netdev: network interface device structure
1440 *
1441 * Returns 0, this is not allowed to fail
1442 *
1443 * The close entry point is called when an interface is de-activated
1444 * by the OS.  The hardware is still under the drivers control, but
1445 * needs to be disabled.  A global MAC reset is issued to stop the
1446 * hardware, and all transmit and receive resources are freed.
1447 **/
1448
1449static int e1000_close(struct net_device *netdev)
1450{
1451        struct e1000_adapter *adapter = netdev_priv(netdev);
1452        struct e1000_hw *hw = &adapter->hw;
1453
1454        WARN_ON(test_bit(__E1000_RESETTING, &adapter->flags));
1455        e1000_down(adapter);
1456        e1000_power_down_phy(adapter);
1457        e1000_free_irq(adapter);
1458
1459        e1000_free_all_tx_resources(adapter);
1460        e1000_free_all_rx_resources(adapter);
1461
1462        /* kill manageability vlan ID if supported, but not if a vlan with
1463         * the same ID is registered on the host OS (let 8021q kill it) */
1464        if ((hw->mng_cookie.status &
1465                          E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) &&
1466             !test_bit(adapter->mng_vlan_id, adapter->active_vlans)) {
1467                e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id);
1468        }
1469
1470        return 0;
1471}
1472
1473/**
1474 * e1000_check_64k_bound - check that memory doesn't cross 64kB boundary
1475 * @adapter: address of board private structure
1476 * @start: address of beginning of memory
1477 * @len: length of memory
1478 **/
1479static bool e1000_check_64k_bound(struct e1000_adapter *adapter, void *start,
1480                                  unsigned long len)
1481{
1482        struct e1000_hw *hw = &adapter->hw;
1483        unsigned long begin = (unsigned long)start;
1484        unsigned long end = begin + len;
1485
1486        /* First rev 82545 and 82546 need to not allow any memory
1487         * write location to cross 64k boundary due to errata 23 */
1488        if (hw->mac_type == e1000_82545 ||
1489            hw->mac_type == e1000_ce4100 ||
1490            hw->mac_type == e1000_82546) {
1491                return ((begin ^ (end - 1)) >> 16) != 0 ? false : true;
1492        }
1493
1494        return true;
1495}
1496
1497/**
1498 * e1000_setup_tx_resources - allocate Tx resources (Descriptors)
1499 * @adapter: board private structure
1500 * @txdr:    tx descriptor ring (for a specific queue) to setup
1501 *
1502 * Return 0 on success, negative on failure
1503 **/
1504
1505static int e1000_setup_tx_resources(struct e1000_adapter *adapter,
1506                                    struct e1000_tx_ring *txdr)
1507{
1508        struct pci_dev *pdev = adapter->pdev;
1509        int size;
1510
1511        size = sizeof(struct e1000_buffer) * txdr->count;
1512        txdr->buffer_info = vzalloc(size);
1513        if (!txdr->buffer_info) {
1514                e_err(probe, "Unable to allocate memory for the Tx descriptor "
1515                      "ring\n");
1516                return -ENOMEM;
1517        }
1518
1519        /* round up to nearest 4K */
1520
1521        txdr->size = txdr->count * sizeof(struct e1000_tx_desc);
1522        txdr->size = ALIGN(txdr->size, 4096);
1523
1524        txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size, &txdr->dma,
1525                                        GFP_KERNEL);
1526        if (!txdr->desc) {
1527setup_tx_desc_die:
1528                vfree(txdr->buffer_info);
1529                e_err(probe, "Unable to allocate memory for the Tx descriptor "
1530                      "ring\n");
1531                return -ENOMEM;
1532        }
1533
1534        /* Fix for errata 23, can't cross 64kB boundary */
1535        if (!e1000_check_64k_bound(adapter, txdr->desc, txdr->size)) {
1536                void *olddesc = txdr->desc;
1537                dma_addr_t olddma = txdr->dma;
1538                e_err(tx_err, "txdr align check failed: %u bytes at %p\n",
1539                      txdr->size, txdr->desc);
1540                /* Try again, without freeing the previous */
1541                txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size,
1542                                                &txdr->dma, GFP_KERNEL);
1543                /* Failed allocation, critical failure */
1544                if (!txdr->desc) {
1545                        dma_free_coherent(&pdev->dev, txdr->size, olddesc,
1546                                          olddma);
1547                        goto setup_tx_desc_die;
1548                }
1549
1550                if (!e1000_check_64k_bound(adapter, txdr->desc, txdr->size)) {
1551                        /* give up */
1552                        dma_free_coherent(&pdev->dev, txdr->size, txdr->desc,
1553                                          txdr->dma);
1554                        dma_free_coherent(&pdev->dev, txdr->size, olddesc,
1555                                          olddma);
1556                        e_err(probe, "Unable to allocate aligned memory "
1557                              "for the transmit descriptor ring\n");
1558                        vfree(txdr->buffer_info);
1559                        return -ENOMEM;
1560                } else {
1561                        /* Free old allocation, new allocation was successful */
1562                        dma_free_coherent(&pdev->dev, txdr->size, olddesc,
1563                                          olddma);
1564                }
1565        }
1566        memset(txdr->desc, 0, txdr->size);
1567
1568        txdr->next_to_use = 0;
1569        txdr->next_to_clean = 0;
1570
1571        return 0;
1572}
1573
1574/**
1575 * e1000_setup_all_tx_resources - wrapper to allocate Tx resources
1576 *                                (Descriptors) for all queues
1577 * @adapter: board private structure
1578 *
1579 * Return 0 on success, negative on failure
1580 **/
1581
1582int e1000_setup_all_tx_resources(struct e1000_adapter *adapter)
1583{
1584        int i, err = 0;
1585
1586        for (i = 0; i < adapter->num_tx_queues; i++) {
1587                err = e1000_setup_tx_resources(adapter, &adapter->tx_ring[i]);
1588                if (err) {
1589                        e_err(probe, "Allocation for Tx Queue %u failed\n", i);
1590                        for (i-- ; i >= 0; i--)
1591                                e1000_free_tx_resources(adapter,
1592                                                        &adapter->tx_ring[i]);
1593                        break;
1594                }
1595        }
1596
1597        return err;
1598}
1599
1600/**
1601 * e1000_configure_tx - Configure 8254x Transmit Unit after Reset
1602 * @adapter: board private structure
1603 *
1604 * Configure the Tx unit of the MAC after a reset.
1605 **/
1606
1607static void e1000_configure_tx(struct e1000_adapter *adapter)
1608{
1609        u64 tdba;
1610        struct e1000_hw *hw = &adapter->hw;
1611        u32 tdlen, tctl, tipg;
1612        u32 ipgr1, ipgr2;
1613
1614        /* Setup the HW Tx Head and Tail descriptor pointers */
1615
1616        switch (adapter->num_tx_queues) {
1617        case 1:
1618        default:
1619                tdba = adapter->tx_ring[0].dma;
1620                tdlen = adapter->tx_ring[0].count *
1621                        sizeof(struct e1000_tx_desc);
1622                ew32(TDLEN, tdlen);
1623                ew32(TDBAH, (tdba >> 32));
1624                ew32(TDBAL, (tdba & 0x00000000ffffffffULL));
1625                ew32(TDT, 0);
1626                ew32(TDH, 0);
1627                adapter->tx_ring[0].tdh = ((hw->mac_type >= e1000_82543) ? E1000_TDH : E1000_82542_TDH);
1628                adapter->tx_ring[0].tdt = ((hw->mac_type >= e1000_82543) ? E1000_TDT : E1000_82542_TDT);
1629                break;
1630        }
1631
1632        /* Set the default values for the Tx Inter Packet Gap timer */
1633        if ((hw->media_type == e1000_media_type_fiber ||
1634             hw->media_type == e1000_media_type_internal_serdes))
1635                tipg = DEFAULT_82543_TIPG_IPGT_FIBER;
1636        else
1637                tipg = DEFAULT_82543_TIPG_IPGT_COPPER;
1638
1639        switch (hw->mac_type) {
1640        case e1000_82542_rev2_0:
1641        case e1000_82542_rev2_1:
1642                tipg = DEFAULT_82542_TIPG_IPGT;
1643                ipgr1 = DEFAULT_82542_TIPG_IPGR1;
1644                ipgr2 = DEFAULT_82542_TIPG_IPGR2;
1645                break;
1646        default:
1647                ipgr1 = DEFAULT_82543_TIPG_IPGR1;
1648                ipgr2 = DEFAULT_82543_TIPG_IPGR2;
1649                break;
1650        }
1651        tipg |= ipgr1 << E1000_TIPG_IPGR1_SHIFT;
1652        tipg |= ipgr2 << E1000_TIPG_IPGR2_SHIFT;
1653        ew32(TIPG, tipg);
1654
1655        /* Set the Tx Interrupt Delay register */
1656
1657        ew32(TIDV, adapter->tx_int_delay);
1658        if (hw->mac_type >= e1000_82540)
1659                ew32(TADV, adapter->tx_abs_int_delay);
1660
1661        /* Program the Transmit Control Register */
1662
1663        tctl = er32(TCTL);
1664        tctl &= ~E1000_TCTL_CT;
1665        tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC |
1666                (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
1667
1668        e1000_config_collision_dist(hw);
1669
1670        /* Setup Transmit Descriptor Settings for eop descriptor */
1671        adapter->txd_cmd = E1000_TXD_CMD_EOP | E1000_TXD_CMD_IFCS;
1672
1673        /* only set IDE if we are delaying interrupts using the timers */
1674        if (adapter->tx_int_delay)
1675                adapter->txd_cmd |= E1000_TXD_CMD_IDE;
1676
1677        if (hw->mac_type < e1000_82543)
1678                adapter->txd_cmd |= E1000_TXD_CMD_RPS;
1679        else
1680                adapter->txd_cmd |= E1000_TXD_CMD_RS;
1681
1682        /* Cache if we're 82544 running in PCI-X because we'll
1683         * need this to apply a workaround later in the send path. */
1684        if (hw->mac_type == e1000_82544 &&
1685            hw->bus_type == e1000_bus_type_pcix)
1686                adapter->pcix_82544 = true;
1687
1688        ew32(TCTL, tctl);
1689
1690}
1691
1692/**
1693 * e1000_setup_rx_resources - allocate Rx resources (Descriptors)
1694 * @adapter: board private structure
1695 * @rxdr:    rx descriptor ring (for a specific queue) to setup
1696 *
1697 * Returns 0 on success, negative on failure
1698 **/
1699
1700static int e1000_setup_rx_resources(struct e1000_adapter *adapter,
1701                                    struct e1000_rx_ring *rxdr)
1702{
1703        struct pci_dev *pdev = adapter->pdev;
1704        int size, desc_len;
1705
1706        size = sizeof(struct e1000_buffer) * rxdr->count;
1707        rxdr->buffer_info = vzalloc(size);
1708        if (!rxdr->buffer_info) {
1709                e_err(probe, "Unable to allocate memory for the Rx descriptor "
1710                      "ring\n");
1711                return -ENOMEM;
1712        }
1713
1714        desc_len = sizeof(struct e1000_rx_desc);
1715
1716        /* Round up to nearest 4K */
1717
1718        rxdr->size = rxdr->count * desc_len;
1719        rxdr->size = ALIGN(rxdr->size, 4096);
1720
1721        rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma,
1722                                        GFP_KERNEL);
1723
1724        if (!rxdr->desc) {
1725                e_err(probe, "Unable to allocate memory for the Rx descriptor "
1726                      "ring\n");
1727setup_rx_desc_die:
1728                vfree(rxdr->buffer_info);
1729                return -ENOMEM;
1730        }
1731
1732        /* Fix for errata 23, can't cross 64kB boundary */
1733        if (!e1000_check_64k_bound(adapter, rxdr->desc, rxdr->size)) {
1734                void *olddesc = rxdr->desc;
1735                dma_addr_t olddma = rxdr->dma;
1736                e_err(rx_err, "rxdr align check failed: %u bytes at %p\n",
1737                      rxdr->size, rxdr->desc);
1738                /* Try again, without freeing the previous */
1739                rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size,
1740                                                &rxdr->dma, GFP_KERNEL);
1741                /* Failed allocation, critical failure */
1742                if (!rxdr->desc) {
1743                        dma_free_coherent(&pdev->dev, rxdr->size, olddesc,
1744                                          olddma);
1745                        e_err(probe, "Unable to allocate memory for the Rx "
1746                              "descriptor ring\n");
1747                        goto setup_rx_desc_die;
1748                }
1749
1750                if (!e1000_check_64k_bound(adapter, rxdr->desc, rxdr->size)) {
1751                        /* give up */
1752                        dma_free_coherent(&pdev->dev, rxdr->size, rxdr->desc,
1753                                          rxdr->dma);
1754                        dma_free_coherent(&pdev->dev, rxdr->size, olddesc,
1755                                          olddma);
1756                        e_err(probe, "Unable to allocate aligned memory for "
1757                              "the Rx descriptor ring\n");
1758                        goto setup_rx_desc_die;
1759                } else {
1760                        /* Free old allocation, new allocation was successful */
1761                        dma_free_coherent(&pdev->dev, rxdr->size, olddesc,
1762                                          olddma);
1763                }
1764        }
1765        memset(rxdr->desc, 0, rxdr->size);
1766
1767        rxdr->next_to_clean = 0;
1768        rxdr->next_to_use = 0;
1769        rxdr->rx_skb_top = NULL;
1770
1771        return 0;
1772}
1773
1774/**
1775 * e1000_setup_all_rx_resources - wrapper to allocate Rx resources
1776 *                                (Descriptors) for all queues
1777 * @adapter: board private structure
1778 *
1779 * Return 0 on success, negative on failure
1780 **/
1781
1782int e1000_setup_all_rx_resources(struct e1000_adapter *adapter)
1783{
1784        int i, err = 0;
1785
1786        for (i = 0; i < adapter->num_rx_queues; i++) {
1787                err = e1000_setup_rx_resources(adapter, &adapter->rx_ring[i]);
1788                if (err) {
1789                        e_err(probe, "Allocation for Rx Queue %u failed\n", i);
1790                        for (i-- ; i >= 0; i--)
1791                                e1000_free_rx_resources(adapter,
1792                                                        &adapter->rx_ring[i]);
1793                        break;
1794                }
1795        }
1796
1797        return err;
1798}
1799
1800/**
1801 * e1000_setup_rctl - configure the receive control registers
1802 * @adapter: Board private structure
1803 **/
1804static void e1000_setup_rctl(struct e1000_adapter *adapter)
1805{
1806        struct e1000_hw *hw = &adapter->hw;
1807        u32 rctl;
1808
1809        rctl = er32(RCTL);
1810
1811        rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
1812
1813        rctl |= E1000_RCTL_BAM | E1000_RCTL_LBM_NO |
1814                E1000_RCTL_RDMTS_HALF |
1815                (hw->mc_filter_type << E1000_RCTL_MO_SHIFT);
1816
1817        if (hw->tbi_compatibility_on == 1)
1818                rctl |= E1000_RCTL_SBP;
1819        else
1820                rctl &= ~E1000_RCTL_SBP;
1821
1822        if (adapter->netdev->mtu <= ETH_DATA_LEN)
1823                rctl &= ~E1000_RCTL_LPE;
1824        else
1825                rctl |= E1000_RCTL_LPE;
1826
1827        /* Setup buffer sizes */
1828        rctl &= ~E1000_RCTL_SZ_4096;
1829        rctl |= E1000_RCTL_BSEX;
1830        switch (adapter->rx_buffer_len) {
1831                case E1000_RXBUFFER_2048:
1832                default:
1833                        rctl |= E1000_RCTL_SZ_2048;
1834                        rctl &= ~E1000_RCTL_BSEX;
1835                        break;
1836                case E1000_RXBUFFER_4096:
1837                        rctl |= E1000_RCTL_SZ_4096;
1838                        break;
1839                case E1000_RXBUFFER_8192:
1840                        rctl |= E1000_RCTL_SZ_8192;
1841                        break;
1842                case E1000_RXBUFFER_16384:
1843                        rctl |= E1000_RCTL_SZ_16384;
1844                        break;
1845        }
1846
1847        /* This is useful for sniffing bad packets. */
1848        if (adapter->netdev->features & NETIF_F_RXALL) {
1849                /* UPE and MPE will be handled by normal PROMISC logic
1850                 * in e1000e_set_rx_mode */
1851                rctl |= (E1000_RCTL_SBP | /* Receive bad packets */
1852                         E1000_RCTL_BAM | /* RX All Bcast Pkts */
1853                         E1000_RCTL_PMCF); /* RX All MAC Ctrl Pkts */
1854
1855                rctl &= ~(E1000_RCTL_VFE | /* Disable VLAN filter */
1856                          E1000_RCTL_DPF | /* Allow filtered pause */
1857                          E1000_RCTL_CFIEN); /* Dis VLAN CFIEN Filter */
1858                /* Do not mess with E1000_CTRL_VME, it affects transmit as well,
1859                 * and that breaks VLANs.
1860                 */
1861        }
1862
1863        ew32(RCTL, rctl);
1864}
1865
1866/**
1867 * e1000_configure_rx - Configure 8254x Receive Unit after Reset
1868 * @adapter: board private structure
1869 *
1870 * Configure the Rx unit of the MAC after a reset.
1871 **/
1872
1873static void e1000_configure_rx(struct e1000_adapter *adapter)
1874{
1875        u64 rdba;
1876        struct e1000_hw *hw = &adapter->hw;
1877        u32 rdlen, rctl, rxcsum;
1878
1879        if (adapter->netdev->mtu > ETH_DATA_LEN) {
1880                rdlen = adapter->rx_ring[0].count *
1881                        sizeof(struct e1000_rx_desc);
1882                adapter->clean_rx = e1000_clean_jumbo_rx_irq;
1883                adapter->alloc_rx_buf = e1000_alloc_jumbo_rx_buffers;
1884        } else {
1885                rdlen = adapter->rx_ring[0].count *
1886                        sizeof(struct e1000_rx_desc);
1887                adapter->clean_rx = e1000_clean_rx_irq;
1888                adapter->alloc_rx_buf = e1000_alloc_rx_buffers;
1889        }
1890
1891        /* disable receives while setting up the descriptors */
1892        rctl = er32(RCTL);
1893        ew32(RCTL, rctl & ~E1000_RCTL_EN);
1894
1895        /* set the Receive Delay Timer Register */
1896        ew32(RDTR, adapter->rx_int_delay);
1897
1898        if (hw->mac_type >= e1000_82540) {
1899                ew32(RADV, adapter->rx_abs_int_delay);
1900                if (adapter->itr_setting != 0)
1901                        ew32(ITR, 1000000000 / (adapter->itr * 256));
1902        }
1903
1904        /* Setup the HW Rx Head and Tail Descriptor Pointers and
1905         * the Base and Length of the Rx Descriptor Ring */
1906        switch (adapter->num_rx_queues) {
1907        case 1:
1908        default:
1909                rdba = adapter->rx_ring[0].dma;
1910                ew32(RDLEN, rdlen);
1911                ew32(RDBAH, (rdba >> 32));
1912                ew32(RDBAL, (rdba & 0x00000000ffffffffULL));
1913                ew32(RDT, 0);
1914                ew32(RDH, 0);
1915                adapter->rx_ring[0].rdh = ((hw->mac_type >= e1000_82543) ? E1000_RDH : E1000_82542_RDH);
1916                adapter->rx_ring[0].rdt = ((hw->mac_type >= e1000_82543) ? E1000_RDT : E1000_82542_RDT);
1917                break;
1918        }
1919
1920        /* Enable 82543 Receive Checksum Offload for TCP and UDP */
1921        if (hw->mac_type >= e1000_82543) {
1922                rxcsum = er32(RXCSUM);
1923                if (adapter->rx_csum)
1924                        rxcsum |= E1000_RXCSUM_TUOFL;
1925                else
1926                        /* don't need to clear IPPCSE as it defaults to 0 */
1927                        rxcsum &= ~E1000_RXCSUM_TUOFL;
1928                ew32(RXCSUM, rxcsum);
1929        }
1930
1931        /* Enable Receives */
1932        ew32(RCTL, rctl | E1000_RCTL_EN);
1933}
1934
1935/**
1936 * e1000_free_tx_resources - Free Tx Resources per Queue
1937 * @adapter: board private structure
1938 * @tx_ring: Tx descriptor ring for a specific queue
1939 *
1940 * Free all transmit software resources
1941 **/
1942
1943static void e1000_free_tx_resources(struct e1000_adapter *adapter,
1944                                    struct e1000_tx_ring *tx_ring)
1945{
1946        struct pci_dev *pdev = adapter->pdev;
1947
1948        e1000_clean_tx_ring(adapter, tx_ring);
1949
1950        vfree(tx_ring->buffer_info);
1951        tx_ring->buffer_info = NULL;
1952
1953        dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc,
1954                          tx_ring->dma);
1955
1956        tx_ring->desc = NULL;
1957}
1958
1959/**
1960 * e1000_free_all_tx_resources - Free Tx Resources for All Queues
1961 * @adapter: board private structure
1962 *
1963 * Free all transmit software resources
1964 **/
1965
1966void e1000_free_all_tx_resources(struct e1000_adapter *adapter)
1967{
1968        int i;
1969
1970        for (i = 0; i < adapter->num_tx_queues; i++)
1971                e1000_free_tx_resources(adapter, &adapter->tx_ring[i]);
1972}
1973
1974static void e1000_unmap_and_free_tx_resource(struct e1000_adapter *adapter,
1975                                             struct e1000_buffer *buffer_info)
1976{
1977        if (buffer_info->dma) {
1978                if (buffer_info->mapped_as_page)
1979                        dma_unmap_page(&adapter->pdev->dev, buffer_info->dma,
1980                                       buffer_info->length, DMA_TO_DEVICE);
1981                else
1982                        dma_unmap_single(&adapter->pdev->dev, buffer_info->dma,
1983                                         buffer_info->length,
1984                                         DMA_TO_DEVICE);
1985                buffer_info->dma = 0;
1986        }
1987        if (buffer_info->skb) {
1988                dev_kfree_skb_any(buffer_info->skb);
1989                buffer_info->skb = NULL;
1990        }
1991        buffer_info->time_stamp = 0;
1992        /* buffer_info must be completely set up in the transmit path */
1993}
1994
1995/**
1996 * e1000_clean_tx_ring - Free Tx Buffers
1997 * @adapter: board private structure
1998 * @tx_ring: ring to be cleaned
1999 **/
2000
2001static void e1000_clean_tx_ring(struct e1000_adapter *adapter,
2002                                struct e1000_tx_ring *tx_ring)
2003{
2004        struct e1000_hw *hw = &adapter->hw;
2005        struct e1000_buffer *buffer_info;
2006        unsigned long size;
2007        unsigned int i;
2008
2009        /* Free all the Tx ring sk_buffs */
2010
2011        for (i = 0; i < tx_ring->count; i++) {
2012                buffer_info = &tx_ring->buffer_info[i];
2013                e1000_unmap_and_free_tx_resource(adapter, buffer_info);
2014        }
2015
2016        netdev_reset_queue(adapter->netdev);
2017        size = sizeof(struct e1000_buffer) * tx_ring->count;
2018        memset(tx_ring->buffer_info, 0, size);
2019
2020        /* Zero out the descriptor ring */
2021
2022        memset(tx_ring->desc, 0, tx_ring->size);
2023
2024        tx_ring->next_to_use = 0;
2025        tx_ring->next_to_clean = 0;
2026        tx_ring->last_tx_tso = false;
2027
2028        writel(0, hw->hw_addr + tx_ring->tdh);
2029        writel(0, hw->hw_addr + tx_ring->tdt);
2030}
2031
2032/**
2033 * e1000_clean_all_tx_rings - Free Tx Buffers for all queues
2034 * @adapter: board private structure
2035 **/
2036
2037static void e1000_clean_all_tx_rings(struct e1000_adapter *adapter)
2038{
2039        int i;
2040
2041        for (i = 0; i < adapter->num_tx_queues; i++)
2042                e1000_clean_tx_ring(adapter, &adapter->tx_ring[i]);
2043}
2044
2045/**
2046 * e1000_free_rx_resources - Free Rx Resources
2047 * @adapter: board private structure
2048 * @rx_ring: ring to clean the resources from
2049 *
2050 * Free all receive software resources
2051 **/
2052
2053static void e1000_free_rx_resources(struct e1000_adapter *adapter,
2054                                    struct e1000_rx_ring *rx_ring)
2055{
2056        struct pci_dev *pdev = adapter->pdev;
2057
2058        e1000_clean_rx_ring(adapter, rx_ring);
2059
2060        vfree(rx_ring->buffer_info);
2061        rx_ring->buffer_info = NULL;
2062
2063        dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc,
2064                          rx_ring->dma);
2065
2066        rx_ring->desc = NULL;
2067}
2068
2069/**
2070 * e1000_free_all_rx_resources - Free Rx Resources for All Queues
2071 * @adapter: board private structure
2072 *
2073 * Free all receive software resources
2074 **/
2075
2076void e1000_free_all_rx_resources(struct e1000_adapter *adapter)
2077{
2078        int i;
2079
2080        for (i = 0; i < adapter->num_rx_queues; i++)
2081                e1000_free_rx_resources(adapter, &adapter->rx_ring[i]);
2082}
2083
2084/**
2085 * e1000_clean_rx_ring - Free Rx Buffers per Queue
2086 * @adapter: board private structure
2087 * @rx_ring: ring to free buffers from
2088 **/
2089
2090static void e1000_clean_rx_ring(struct e1000_adapter *adapter,
2091                                struct e1000_rx_ring *rx_ring)
2092{
2093        struct e1000_hw *hw = &adapter->hw;
2094        struct e1000_buffer *buffer_info;
2095        struct pci_dev *pdev = adapter->pdev;
2096        unsigned long size;
2097        unsigned int i;
2098
2099        /* Free all the Rx ring sk_buffs */
2100        for (i = 0; i < rx_ring->count; i++) {
2101                buffer_info = &rx_ring->buffer_info[i];
2102                if (buffer_info->dma &&
2103                    adapter->clean_rx == e1000_clean_rx_irq) {
2104                        dma_unmap_single(&pdev->dev, buffer_info->dma,
2105                                         buffer_info->length,
2106                                         DMA_FROM_DEVICE);
2107                } else if (buffer_info->dma &&
2108                           adapter->clean_rx == e1000_clean_jumbo_rx_irq) {
2109                        dma_unmap_page(&pdev->dev, buffer_info->dma,
2110                                       buffer_info->length,
2111                                       DMA_FROM_DEVICE);
2112                }
2113
2114                buffer_info->dma = 0;
2115                if (buffer_info->page) {
2116                        put_page(buffer_info->page);
2117                        buffer_info->page = NULL;
2118                }
2119                if (buffer_info->skb) {
2120                        dev_kfree_skb(buffer_info->skb);
2121                        buffer_info->skb = NULL;
2122                }
2123        }
2124
2125        /* there also may be some cached data from a chained receive */
2126        if (rx_ring->rx_skb_top) {
2127                dev_kfree_skb(rx_ring->rx_skb_top);
2128                rx_ring->rx_skb_top = NULL;
2129        }
2130
2131        size = sizeof(struct e1000_buffer) * rx_ring->count;
2132        memset(rx_ring->buffer_info, 0, size);
2133
2134        /* Zero out the descriptor ring */
2135        memset(rx_ring->desc, 0, rx_ring->size);
2136
2137        rx_ring->next_to_clean = 0;
2138        rx_ring->next_to_use = 0;
2139
2140        writel(0, hw->hw_addr + rx_ring->rdh);
2141        writel(0, hw->hw_addr + rx_ring->rdt);
2142}
2143
2144/**
2145 * e1000_clean_all_rx_rings - Free Rx Buffers for all queues
2146 * @adapter: board private structure
2147 **/
2148
2149static void e1000_clean_all_rx_rings(struct e1000_adapter *adapter)
2150{
2151        int i;
2152
2153        for (i = 0; i < adapter->num_rx_queues; i++)
2154                e1000_clean_rx_ring(adapter, &adapter->rx_ring[i]);
2155}
2156
2157/* The 82542 2.0 (revision 2) needs to have the receive unit in reset
2158 * and memory write and invalidate disabled for certain operations
2159 */
2160static void e1000_enter_82542_rst(struct e1000_adapter *adapter)
2161{
2162        struct e1000_hw *hw = &adapter->hw;
2163        struct net_device *netdev = adapter->netdev;
2164        u32 rctl;
2165
2166        e1000_pci_clear_mwi(hw);
2167
2168        rctl = er32(RCTL);
2169        rctl |= E1000_RCTL_RST;
2170        ew32(RCTL, rctl);
2171        E1000_WRITE_FLUSH();
2172        mdelay(5);
2173
2174        if (netif_running(netdev))
2175                e1000_clean_all_rx_rings(adapter);
2176}
2177
2178static void e1000_leave_82542_rst(struct e1000_adapter *adapter)
2179{
2180        struct e1000_hw *hw = &adapter->hw;
2181        struct net_device *netdev = adapter->netdev;
2182        u32 rctl;
2183
2184        rctl = er32(RCTL);
2185        rctl &= ~E1000_RCTL_RST;
2186        ew32(RCTL, rctl);
2187        E1000_WRITE_FLUSH();
2188        mdelay(5);
2189
2190        if (hw->pci_cmd_word & PCI_COMMAND_INVALIDATE)
2191                e1000_pci_set_mwi(hw);
2192
2193        if (netif_running(netdev)) {
2194                /* No need to loop, because 82542 supports only 1 queue */
2195                struct e1000_rx_ring *ring = &adapter->rx_ring[0];
2196                e1000_configure_rx(adapter);
2197                adapter->alloc_rx_buf(adapter, ring, E1000_DESC_UNUSED(ring));
2198        }
2199}
2200
2201/**
2202 * e1000_set_mac - Change the Ethernet Address of the NIC
2203 * @netdev: network interface device structure
2204 * @p: pointer to an address structure
2205 *
2206 * Returns 0 on success, negative on failure
2207 **/
2208
2209static int e1000_set_mac(struct net_device *netdev, void *p)
2210{
2211        struct e1000_adapter *adapter = netdev_priv(netdev);
2212        struct e1000_hw *hw = &adapter->hw;
2213        struct sockaddr *addr = p;
2214
2215        if (!is_valid_ether_addr(addr->sa_data))
2216                return -EADDRNOTAVAIL;
2217
2218        /* 82542 2.0 needs to be in reset to write receive address registers */
2219
2220        if (hw->mac_type == e1000_82542_rev2_0)
2221                e1000_enter_82542_rst(adapter);
2222
2223        memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
2224        memcpy(hw->mac_addr, addr->sa_data, netdev->addr_len);
2225
2226        e1000_rar_set(hw, hw->mac_addr, 0);
2227
2228        if (hw->mac_type == e1000_82542_rev2_0)
2229                e1000_leave_82542_rst(adapter);
2230
2231        return 0;
2232}
2233
2234/**
2235 * e1000_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set
2236 * @netdev: network interface device structure
2237 *
2238 * The set_rx_mode entry point is called whenever the unicast or multicast
2239 * address lists or the network interface flags are updated. This routine is
2240 * responsible for configuring the hardware for proper unicast, multicast,
2241 * promiscuous mode, and all-multi behavior.
2242 **/
2243
2244static void e1000_set_rx_mode(struct net_device *netdev)
2245{
2246        struct e1000_adapter *adapter = netdev_priv(netdev);
2247        struct e1000_hw *hw = &adapter->hw;
2248        struct netdev_hw_addr *ha;
2249        bool use_uc = false;
2250        u32 rctl;
2251        u32 hash_value;
2252        int i, rar_entries = E1000_RAR_ENTRIES;
2253        int mta_reg_count = E1000_NUM_MTA_REGISTERS;
2254        u32 *mcarray = kcalloc(mta_reg_count, sizeof(u32), GFP_ATOMIC);
2255
2256        if (!mcarray) {
2257                e_err(probe, "memory allocation failed\n");
2258                return;
2259        }
2260
2261        /* Check for Promiscuous and All Multicast modes */
2262
2263        rctl = er32(RCTL);
2264
2265        if (netdev->flags & IFF_PROMISC) {
2266                rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
2267                rctl &= ~E1000_RCTL_VFE;
2268        } else {
2269                if (netdev->flags & IFF_ALLMULTI)
2270                        rctl |= E1000_RCTL_MPE;
2271                else
2272                        rctl &= ~E1000_RCTL_MPE;
2273                /* Enable VLAN filter if there is a VLAN */
2274                if (e1000_vlan_used(adapter))
2275                        rctl |= E1000_RCTL_VFE;
2276        }
2277
2278        if (netdev_uc_count(netdev) > rar_entries - 1) {
2279                rctl |= E1000_RCTL_UPE;
2280        } else if (!(netdev->flags & IFF_PROMISC)) {
2281                rctl &= ~E1000_RCTL_UPE;
2282                use_uc = true;
2283        }
2284
2285        ew32(RCTL, rctl);
2286
2287        /* 82542 2.0 needs to be in reset to write receive address registers */
2288
2289        if (hw->mac_type == e1000_82542_rev2_0)
2290                e1000_enter_82542_rst(adapter);
2291
2292        /* load the first 14 addresses into the exact filters 1-14. Unicast
2293         * addresses take precedence to avoid disabling unicast filtering
2294         * when possible.
2295         *
2296         * RAR 0 is used for the station MAC address
2297         * if there are not 14 addresses, go ahead and clear the filters
2298         */
2299        i = 1;
2300        if (use_uc)
2301                netdev_for_each_uc_addr(ha, netdev) {
2302                        if (i == rar_entries)
2303                                break;
2304                        e1000_rar_set(hw, ha->addr, i++);
2305                }
2306
2307        netdev_for_each_mc_addr(ha, netdev) {
2308                if (i == rar_entries) {
2309                        /* load any remaining addresses into the hash table */
2310                        u32 hash_reg, hash_bit, mta;
2311                        hash_value = e1000_hash_mc_addr(hw, ha->addr);
2312                        hash_reg = (hash_value >> 5) & 0x7F;
2313                        hash_bit = hash_value & 0x1F;
2314                        mta = (1 << hash_bit);
2315                        mcarray[hash_reg] |= mta;
2316                } else {
2317                        e1000_rar_set(hw, ha->addr, i++);
2318                }
2319        }
2320
2321        for (; i < rar_entries; i++) {
2322                E1000_WRITE_REG_ARRAY(hw, RA, i << 1, 0);
2323                E1000_WRITE_FLUSH();
2324                E1000_WRITE_REG_ARRAY(hw, RA, (i << 1) + 1, 0);
2325                E1000_WRITE_FLUSH();
2326        }
2327
2328        /* write the hash table completely, write from bottom to avoid
2329         * both stupid write combining chipsets, and flushing each write */
2330        for (i = mta_reg_count - 1; i >= 0 ; i--) {
2331                /*
2332                 * If we are on an 82544 has an errata where writing odd
2333                 * offsets overwrites the previous even offset, but writing
2334                 * backwards over the range solves the issue by always
2335                 * writing the odd offset first
2336                 */
2337                E1000_WRITE_REG_ARRAY(hw, MTA, i, mcarray[i]);
2338        }
2339        E1000_WRITE_FLUSH();
2340
2341        if (hw->mac_type == e1000_82542_rev2_0)
2342                e1000_leave_82542_rst(adapter);
2343
2344        kfree(mcarray);
2345}
2346
2347/**
2348 * e1000_update_phy_info_task - get phy info
2349 * @work: work struct contained inside adapter struct
2350 *
2351 * Need to wait a few seconds after link up to get diagnostic information from
2352 * the phy
2353 */
2354static void e1000_update_phy_info_task(struct work_struct *work)
2355{
2356        struct e1000_adapter *adapter = container_of(work,
2357                                                     struct e1000_adapter,
2358                                                     phy_info_task.work);
2359        if (test_bit(__E1000_DOWN, &adapter->flags))
2360                return;
2361        mutex_lock(&adapter->mutex);
2362        e1000_phy_get_info(&adapter->hw, &adapter->phy_info);
2363        mutex_unlock(&adapter->mutex);
2364}
2365
2366/**
2367 * e1000_82547_tx_fifo_stall_task - task to complete work
2368 * @work: work struct contained inside adapter struct
2369 **/
2370static void e1000_82547_tx_fifo_stall_task(struct work_struct *work)
2371{
2372        struct e1000_adapter *adapter = container_of(work,
2373                                                     struct e1000_adapter,
2374                                                     fifo_stall_task.work);
2375        struct e1000_hw *hw = &adapter->hw;
2376        struct net_device *netdev = adapter->netdev;
2377        u32 tctl;
2378
2379        if (test_bit(__E1000_DOWN, &adapter->flags))
2380                return;
2381        mutex_lock(&adapter->mutex);
2382        if (atomic_read(&adapter->tx_fifo_stall)) {
2383                if ((er32(TDT) == er32(TDH)) &&
2384                   (er32(TDFT) == er32(TDFH)) &&
2385                   (er32(TDFTS) == er32(TDFHS))) {
2386                        tctl = er32(TCTL);
2387                        ew32(TCTL, tctl & ~E1000_TCTL_EN);
2388                        ew32(TDFT, adapter->tx_head_addr);
2389                        ew32(TDFH, adapter->tx_head_addr);
2390                        ew32(TDFTS, adapter->tx_head_addr);
2391                        ew32(TDFHS, adapter->tx_head_addr);
2392                        ew32(TCTL, tctl);
2393                        E1000_WRITE_FLUSH();
2394
2395                        adapter->tx_fifo_head = 0;
2396                        atomic_set(&adapter->tx_fifo_stall, 0);
2397                        netif_wake_queue(netdev);
2398                } else if (!test_bit(__E1000_DOWN, &adapter->flags)) {
2399                        schedule_delayed_work(&adapter->fifo_stall_task, 1);
2400                }
2401        }
2402        mutex_unlock(&adapter->mutex);
2403}
2404
2405bool e1000_has_link(struct e1000_adapter *adapter)
2406{
2407        struct e1000_hw *hw = &adapter->hw;
2408        bool link_active = false;
2409
2410        /* get_link_status is set on LSC (link status) interrupt or rx
2411         * sequence error interrupt (except on intel ce4100).
2412         * get_link_status will stay false until the
2413         * e1000_check_for_link establishes link for copper adapters
2414         * ONLY
2415         */
2416        switch (hw->media_type) {
2417        case e1000_media_type_copper:
2418                if (hw->mac_type == e1000_ce4100)
2419                        hw->get_link_status = 1;
2420                if (hw->get_link_status) {
2421                        e1000_check_for_link(hw);
2422                        link_active = !hw->get_link_status;
2423                } else {
2424                        link_active = true;
2425                }
2426                break;
2427        case e1000_media_type_fiber:
2428                e1000_check_for_link(hw);
2429                link_active = !!(er32(STATUS) & E1000_STATUS_LU);
2430                break;
2431        case e1000_media_type_internal_serdes:
2432                e1000_check_for_link(hw);
2433                link_active = hw->serdes_has_link;
2434                break;
2435        default:
2436                break;
2437        }
2438
2439        return link_active;
2440}
2441
2442/**
2443 * e1000_watchdog - work function
2444 * @work: work struct contained inside adapter struct
2445 **/
2446static void e1000_watchdog(struct work_struct *work)
2447{
2448        struct e1000_adapter *adapter = container_of(work,
2449                                                     struct e1000_adapter,
2450                                                     watchdog_task.work);
2451        struct e1000_hw *hw = &adapter->hw;
2452        struct net_device *netdev = adapter->netdev;
2453        struct e1000_tx_ring *txdr = adapter->tx_ring;
2454        u32 link, tctl;
2455
2456        if (test_bit(__E1000_DOWN, &adapter->flags))
2457                return;
2458
2459        mutex_lock(&adapter->mutex);
2460        link = e1000_has_link(adapter);
2461        if ((netif_carrier_ok(netdev)) && link)
2462                goto link_up;
2463
2464        if (link) {
2465                if (!netif_carrier_ok(netdev)) {
2466                        u32 ctrl;
2467                        bool txb2b = true;
2468                        /* update snapshot of PHY registers on LSC */
2469                        e1000_get_speed_and_duplex(hw,
2470                                                   &adapter->link_speed,
2471                                                   &adapter->link_duplex);
2472
2473                        ctrl = er32(CTRL);
2474                        pr_info("%s NIC Link is Up %d Mbps %s, "
2475                                "Flow Control: %s\n",
2476                                netdev->name,
2477                                adapter->link_speed,
2478                                adapter->link_duplex == FULL_DUPLEX ?
2479                                "Full Duplex" : "Half Duplex",
2480                                ((ctrl & E1000_CTRL_TFCE) && (ctrl &
2481                                E1000_CTRL_RFCE)) ? "RX/TX" : ((ctrl &
2482                                E1000_CTRL_RFCE) ? "RX" : ((ctrl &
2483                                E1000_CTRL_TFCE) ? "TX" : "None")));
2484
2485                        /* adjust timeout factor according to speed/duplex */
2486                        adapter->tx_timeout_factor = 1;
2487                        switch (adapter->link_speed) {
2488                        case SPEED_10:
2489                                txb2b = false;
2490                                adapter->tx_timeout_factor = 16;
2491                                break;
2492                        case SPEED_100:
2493                                txb2b = false;
2494                                /* maybe add some timeout factor ? */
2495                                break;
2496                        }
2497
2498                        /* enable transmits in the hardware */
2499                        tctl = er32(TCTL);
2500                        tctl |= E1000_TCTL_EN;
2501                        ew32(TCTL, tctl);
2502
2503                        netif_carrier_on(netdev);
2504                        if (!test_bit(__E1000_DOWN, &adapter->flags))
2505                                schedule_delayed_work(&adapter->phy_info_task,
2506                                                      2 * HZ);
2507                        adapter->smartspeed = 0;
2508                }
2509        } else {
2510                if (netif_carrier_ok(netdev)) {
2511                        adapter->link_speed = 0;
2512                        adapter->link_duplex = 0;
2513                        pr_info("%s NIC Link is Down\n",
2514                                netdev->name);
2515                        netif_carrier_off(netdev);
2516
2517                        if (!test_bit(__E1000_DOWN, &adapter->flags))
2518                                schedule_delayed_work(&adapter->phy_info_task,
2519                                                      2 * HZ);
2520                }
2521
2522                e1000_smartspeed(adapter);
2523        }
2524
2525link_up:
2526        e1000_update_stats(adapter);
2527
2528        hw->tx_packet_delta = adapter->stats.tpt - adapter->tpt_old;
2529        adapter->tpt_old = adapter->stats.tpt;
2530        hw->collision_delta = adapter->stats.colc - adapter->colc_old;
2531        adapter->colc_old = adapter->stats.colc;
2532
2533        adapter->gorcl = adapter->stats.gorcl - adapter->gorcl_old;
2534        adapter->gorcl_old = adapter->stats.gorcl;
2535        adapter->gotcl = adapter->stats.gotcl - adapter->gotcl_old;
2536        adapter->gotcl_old = adapter->stats.gotcl;
2537
2538        e1000_update_adaptive(hw);
2539
2540        if (!netif_carrier_ok(netdev)) {
2541                if (E1000_DESC_UNUSED(txdr) + 1 < txdr->count) {
2542                        /* We've lost link, so the controller stops DMA,
2543                         * but we've got queued Tx work that's never going
2544                         * to get done, so reset controller to flush Tx.
2545                         * (Do the reset outside of interrupt context). */
2546                        adapter->tx_timeout_count++;
2547                        schedule_work(&adapter->reset_task);
2548                        /* exit immediately since reset is imminent */
2549                        goto unlock;
2550                }
2551        }
2552
2553        /* Simple mode for Interrupt Throttle Rate (ITR) */
2554        if (hw->mac_type >= e1000_82540 && adapter->itr_setting == 4) {
2555                /*
2556                 * Symmetric Tx/Rx gets a reduced ITR=2000;
2557                 * Total asymmetrical Tx or Rx gets ITR=8000;
2558                 * everyone else is between 2000-8000.
2559                 */
2560                u32 goc = (adapter->gotcl + adapter->gorcl) / 10000;
2561                u32 dif = (adapter->gotcl > adapter->gorcl ?
2562                            adapter->gotcl - adapter->gorcl :
2563                            adapter->gorcl - adapter->gotcl) / 10000;
2564                u32 itr = goc > 0 ? (dif * 6000 / goc + 2000) : 8000;
2565
2566                ew32(ITR, 1000000000 / (itr * 256));
2567        }
2568
2569        /* Cause software interrupt to ensure rx ring is cleaned */
2570        ew32(ICS, E1000_ICS_RXDMT0);
2571
2572        /* Force detection of hung controller every watchdog period */
2573        adapter->detect_tx_hung = true;
2574
2575        /* Reschedule the task */
2576        if (!test_bit(__E1000_DOWN, &adapter->flags))
2577                schedule_delayed_work(&adapter->watchdog_task, 2 * HZ);
2578
2579unlock:
2580        mutex_unlock(&adapter->mutex);
2581}
2582
2583enum latency_range {
2584        lowest_latency = 0,
2585        low_latency = 1,
2586        bulk_latency = 2,
2587        latency_invalid = 255
2588};
2589
2590/**
2591 * e1000_update_itr - update the dynamic ITR value based on statistics
2592 * @adapter: pointer to adapter
2593 * @itr_setting: current adapter->itr
2594 * @packets: the number of packets during this measurement interval
2595 * @bytes: the number of bytes during this measurement interval
2596 *
2597 *      Stores a new ITR value based on packets and byte
2598 *      counts during the last interrupt.  The advantage of per interrupt
2599 *      computation is faster updates and more accurate ITR for the current
2600 *      traffic pattern.  Constants in this function were computed
2601 *      based on theoretical maximum wire speed and thresholds were set based
2602 *      on testing data as well as attempting to minimize response time
2603 *      while increasing bulk throughput.
2604 *      this functionality is controlled by the InterruptThrottleRate module
2605 *      parameter (see e1000_param.c)
2606 **/
2607static unsigned int e1000_update_itr(struct e1000_adapter *adapter,
2608                                     u16 itr_setting, int packets, int bytes)
2609{
2610        unsigned int retval = itr_setting;
2611        struct e1000_hw *hw = &adapter->hw;
2612
2613        if (unlikely(hw->mac_type < e1000_82540))
2614                goto update_itr_done;
2615
2616        if (packets == 0)
2617                goto update_itr_done;
2618
2619        switch (itr_setting) {
2620        case lowest_latency:
2621                /* jumbo frames get bulk treatment*/
2622                if (bytes/packets > 8000)
2623                        retval = bulk_latency;
2624                else if ((packets < 5) && (bytes > 512))
2625                        retval = low_latency;
2626                break;
2627        case low_latency:  /* 50 usec aka 20000 ints/s */
2628                if (bytes > 10000) {
2629                        /* jumbo frames need bulk latency setting */
2630                        if (bytes/packets > 8000)
2631                                retval = bulk_latency;
2632                        else if ((packets < 10) || ((bytes/packets) > 1200))
2633                                retval = bulk_latency;
2634                        else if ((packets > 35))
2635                                retval = lowest_latency;
2636                } else if (bytes/packets > 2000)
2637                        retval = bulk_latency;
2638                else if (packets <= 2 && bytes < 512)
2639                        retval = lowest_latency;
2640                break;
2641        case bulk_latency: /* 250 usec aka 4000 ints/s */
2642                if (bytes > 25000) {
2643                        if (packets > 35)
2644                                retval = low_latency;
2645                } else if (bytes < 6000) {
2646                        retval = low_latency;
2647                }
2648                break;
2649        }
2650
2651update_itr_done:
2652        return retval;
2653}
2654
2655static void e1000_set_itr(struct e1000_adapter *adapter)
2656{
2657        struct e1000_hw *hw = &adapter->hw;
2658        u16 current_itr;
2659        u32 new_itr = adapter->itr;
2660
2661        if (unlikely(hw->mac_type < e1000_82540))
2662                return;
2663
2664        /* for non-gigabit speeds, just fix the interrupt rate at 4000 */
2665        if (unlikely(adapter->link_speed != SPEED_1000)) {
2666                current_itr = 0;
2667                new_itr = 4000;
2668                goto set_itr_now;
2669        }
2670
2671        adapter->tx_itr = e1000_update_itr(adapter,
2672                                    adapter->tx_itr,
2673                                    adapter->total_tx_packets,
2674                                    adapter->total_tx_bytes);
2675        /* conservative mode (itr 3) eliminates the lowest_latency setting */
2676        if (adapter->itr_setting == 3 && adapter->tx_itr == lowest_latency)
2677                adapter->tx_itr = low_latency;
2678
2679        adapter->rx_itr = e1000_update_itr(adapter,
2680                                    adapter->rx_itr,
2681                                    adapter->total_rx_packets,
2682                                    adapter->total_rx_bytes);
2683        /* conservative mode (itr 3) eliminates the lowest_latency setting */
2684        if (adapter->itr_setting == 3 && adapter->rx_itr == lowest_latency)
2685                adapter->rx_itr = low_latency;
2686
2687        current_itr = max(adapter->rx_itr, adapter->tx_itr);
2688
2689        switch (current_itr) {
2690        /* counts and packets in update_itr are dependent on these numbers */
2691        case lowest_latency:
2692                new_itr = 70000;
2693                break;
2694        case low_latency:
2695                new_itr = 20000; /* aka hwitr = ~200 */
2696                break;
2697        case bulk_latency:
2698                new_itr = 4000;
2699                break;
2700        default:
2701                break;
2702        }
2703
2704set_itr_now:
2705        if (new_itr != adapter->itr) {
2706                /* this attempts to bias the interrupt rate towards Bulk
2707                 * by adding intermediate steps when interrupt rate is
2708                 * increasing */
2709                new_itr = new_itr > adapter->itr ?
2710                             min(adapter->itr + (new_itr >> 2), new_itr) :
2711                             new_itr;
2712                adapter->itr = new_itr;
2713                ew32(ITR, 1000000000 / (new_itr * 256));
2714        }
2715}
2716
2717#define E1000_TX_FLAGS_CSUM             0x00000001
2718#define E1000_TX_FLAGS_VLAN             0x00000002
2719#define E1000_TX_FLAGS_TSO              0x00000004
2720#define E1000_TX_FLAGS_IPV4             0x00000008
2721#define E1000_TX_FLAGS_NO_FCS           0x00000010
2722#define E1000_TX_FLAGS_VLAN_MASK        0xffff0000
2723#define E1000_TX_FLAGS_VLAN_SHIFT       16
2724
2725static int e1000_tso(struct e1000_adapter *adapter,
2726                     struct e1000_tx_ring *tx_ring, struct sk_buff *skb)
2727{
2728        struct e1000_context_desc *context_desc;
2729        struct e1000_buffer *buffer_info;
2730        unsigned int i;
2731        u32 cmd_length = 0;
2732        u16 ipcse = 0, tucse, mss;
2733        u8 ipcss, ipcso, tucss, tucso, hdr_len;
2734        int err;
2735
2736        if (skb_is_gso(skb)) {
2737                if (skb_header_cloned(skb)) {
2738                        err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2739                        if (err)
2740                                return err;
2741                }
2742
2743                hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
2744                mss = skb_shinfo(skb)->gso_size;
2745                if (skb->protocol == htons(ETH_P_IP)) {
2746                        struct iphdr *iph = ip_hdr(skb);
2747                        iph->tot_len = 0;
2748                        iph->check = 0;
2749                        tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
2750                                                                 iph->daddr, 0,
2751                                                                 IPPROTO_TCP,
2752                                                                 0);
2753                        cmd_length = E1000_TXD_CMD_IP;
2754                        ipcse = skb_transport_offset(skb) - 1;
2755                } else if (skb->protocol == htons(ETH_P_IPV6)) {
2756                        ipv6_hdr(skb)->payload_len = 0;
2757                        tcp_hdr(skb)->check =
2758                                ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
2759                                                 &ipv6_hdr(skb)->daddr,
2760                                                 0, IPPROTO_TCP, 0);
2761                        ipcse = 0;
2762                }
2763                ipcss = skb_network_offset(skb);
2764                ipcso = (void *)&(ip_hdr(skb)->check) - (void *)skb->data;
2765                tucss = skb_transport_offset(skb);
2766                tucso = (void *)&(tcp_hdr(skb)->check) - (void *)skb->data;
2767                tucse = 0;
2768
2769                cmd_length |= (E1000_TXD_CMD_DEXT | E1000_TXD_CMD_TSE |
2770                               E1000_TXD_CMD_TCP | (skb->len - (hdr_len)));
2771
2772                i = tx_ring->next_to_use;
2773                context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
2774                buffer_info = &tx_ring->buffer_info[i];
2775
2776                context_desc->lower_setup.ip_fields.ipcss  = ipcss;
2777                context_desc->lower_setup.ip_fields.ipcso  = ipcso;
2778                context_desc->lower_setup.ip_fields.ipcse  = cpu_to_le16(ipcse);
2779                context_desc->upper_setup.tcp_fields.tucss = tucss;
2780                context_desc->upper_setup.tcp_fields.tucso = tucso;
2781                context_desc->upper_setup.tcp_fields.tucse = cpu_to_le16(tucse);
2782                context_desc->tcp_seg_setup.fields.mss     = cpu_to_le16(mss);
2783                context_desc->tcp_seg_setup.fields.hdr_len = hdr_len;
2784                context_desc->cmd_and_length = cpu_to_le32(cmd_length);
2785
2786                buffer_info->time_stamp = jiffies;
2787                buffer_info->next_to_watch = i;
2788
2789                if (++i == tx_ring->count) i = 0;
2790                tx_ring->next_to_use = i;
2791
2792                return true;
2793        }
2794        return false;
2795}
2796
2797static bool e1000_tx_csum(struct e1000_adapter *adapter,
2798                          struct e1000_tx_ring *tx_ring, struct sk_buff *skb)
2799{
2800        struct e1000_context_desc *context_desc;
2801        struct e1000_buffer *buffer_info;
2802        unsigned int i;
2803        u8 css;
2804        u32 cmd_len = E1000_TXD_CMD_DEXT;
2805
2806        if (skb->ip_summed != CHECKSUM_PARTIAL)
2807                return false;
2808
2809        switch (skb->protocol) {
2810        case cpu_to_be16(ETH_P_IP):
2811                if (ip_hdr(skb)->protocol == IPPROTO_TCP)
2812                        cmd_len |= E1000_TXD_CMD_TCP;
2813                break;
2814        case cpu_to_be16(ETH_P_IPV6):
2815                /* XXX not handling all IPV6 headers */
2816                if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
2817                        cmd_len |= E1000_TXD_CMD_TCP;
2818                break;
2819        default:
2820                if (unlikely(net_ratelimit()))
2821                        e_warn(drv, "checksum_partial proto=%x!\n",
2822                               skb->protocol);
2823                break;
2824        }
2825
2826        css = skb_checksum_start_offset(skb);
2827
2828        i = tx_ring->next_to_use;
2829        buffer_info = &tx_ring->buffer_info[i];
2830        context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
2831
2832        context_desc->lower_setup.ip_config = 0;
2833        context_desc->upper_setup.tcp_fields.tucss = css;
2834        context_desc->upper_setup.tcp_fields.tucso =
2835                css + skb->csum_offset;
2836        context_desc->upper_setup.tcp_fields.tucse = 0;
2837        context_desc->tcp_seg_setup.data = 0;
2838        context_desc->cmd_and_length = cpu_to_le32(cmd_len);
2839
2840        buffer_info->time_stamp = jiffies;
2841        buffer_info->next_to_watch = i;
2842
2843        if (unlikely(++i == tx_ring->count)) i = 0;
2844        tx_ring->next_to_use = i;
2845
2846        return true;
2847}
2848
2849#define E1000_MAX_TXD_PWR       12
2850#define E1000_MAX_DATA_PER_TXD  (1<<E1000_MAX_TXD_PWR)
2851
2852static int e1000_tx_map(struct e1000_adapter *adapter,
2853                        struct e1000_tx_ring *tx_ring,
2854                        struct sk_buff *skb, unsigned int first,
2855                        unsigned int max_per_txd, unsigned int nr_frags,
2856                        unsigned int mss)
2857{
2858        struct e1000_hw *hw = &adapter->hw;
2859        struct pci_dev *pdev = adapter->pdev;
2860        struct e1000_buffer *buffer_info;
2861        unsigned int len = skb_headlen(skb);
2862        unsigned int offset = 0, size, count = 0, i;
2863        unsigned int f, bytecount, segs;
2864
2865        i = tx_ring->next_to_use;
2866
2867        while (len) {
2868                buffer_info = &tx_ring->buffer_info[i];
2869                size = min(len, max_per_txd);
2870                /* Workaround for Controller erratum --
2871                 * descriptor for non-tso packet in a linear SKB that follows a
2872                 * tso gets written back prematurely before the data is fully
2873                 * DMA'd to the controller */
2874                if (!skb->data_len && tx_ring->last_tx_tso &&
2875                    !skb_is_gso(skb)) {
2876                        tx_ring->last_tx_tso = false;
2877                        size -= 4;
2878                }
2879
2880                /* Workaround for premature desc write-backs
2881                 * in TSO mode.  Append 4-byte sentinel desc */
2882                if (unlikely(mss && !nr_frags && size == len && size > 8))
2883                        size -= 4;
2884                /* work-around for errata 10 and it applies
2885                 * to all controllers in PCI-X mode
2886                 * The fix is to make sure that the first descriptor of a
2887                 * packet is smaller than 2048 - 16 - 16 (or 2016) bytes
2888                 */
2889                if (unlikely((hw->bus_type == e1000_bus_type_pcix) &&
2890                                (size > 2015) && count == 0))
2891                        size = 2015;
2892
2893                /* Workaround for potential 82544 hang in PCI-X.  Avoid
2894                 * terminating buffers within evenly-aligned dwords. */
2895                if (unlikely(adapter->pcix_82544 &&
2896                   !((unsigned long)(skb->data + offset + size - 1) & 4) &&
2897                   size > 4))
2898                        size -= 4;
2899
2900                buffer_info->length = size;
2901                /* set time_stamp *before* dma to help avoid a possible race */
2902                buffer_info->time_stamp = jiffies;
2903                buffer_info->mapped_as_page = false;
2904                buffer_info->dma = dma_map_single(&pdev->dev,
2905                                                  skb->data + offset,
2906                                                  size, DMA_TO_DEVICE);
2907                if (dma_mapping_error(&pdev->dev, buffer_info->dma))
2908                        goto dma_error;
2909                buffer_info->next_to_watch = i;
2910
2911                len -= size;
2912                offset += size;
2913                count++;
2914                if (len) {
2915                        i++;
2916                        if (unlikely(i == tx_ring->count))
2917                                i = 0;
2918                }
2919        }
2920
2921        for (f = 0; f < nr_frags; f++) {
2922                const struct skb_frag_struct *frag;
2923
2924                frag = &skb_shinfo(skb)->frags[f];
2925                len = skb_frag_size(frag);
2926                offset = 0;
2927
2928                while (len) {
2929                        unsigned long bufend;
2930                        i++;
2931                        if (unlikely(i == tx_ring->count))
2932                                i = 0;
2933
2934                        buffer_info = &tx_ring->buffer_info[i];
2935                        size = min(len, max_per_txd);
2936                        /* Workaround for premature desc write-backs
2937                         * in TSO mode.  Append 4-byte sentinel desc */
2938                        if (unlikely(mss && f == (nr_frags-1) && size == len && size > 8))
2939                                size -= 4;
2940                        /* Workaround for potential 82544 hang in PCI-X.
2941                         * Avoid terminating buffers within evenly-aligned
2942                         * dwords. */
2943                        bufend = (unsigned long)
2944                                page_to_phys(skb_frag_page(frag));
2945                        bufend += offset + size - 1;
2946                        if (unlikely(adapter->pcix_82544 &&
2947                                     !(bufend & 4) &&
2948                                     size > 4))
2949                                size -= 4;
2950
2951                        buffer_info->length = size;
2952                        buffer_info->time_stamp = jiffies;
2953                        buffer_info->mapped_as_page = true;
2954                        buffer_info->dma = skb_frag_dma_map(&pdev->dev, frag,
2955                                                offset, size, DMA_TO_DEVICE);
2956                        if (dma_mapping_error(&pdev->dev, buffer_info->dma))
2957                                goto dma_error;
2958                        buffer_info->next_to_watch = i;
2959
2960                        len -= size;
2961                        offset += size;
2962                        count++;
2963                }
2964        }
2965
2966        segs = skb_shinfo(skb)->gso_segs ?: 1;
2967        /* multiply data chunks by size of headers */
2968        bytecount = ((segs - 1) * skb_headlen(skb)) + skb->len;
2969
2970        tx_ring->buffer_info[i].skb = skb;
2971        tx_ring->buffer_info[i].segs = segs;
2972        tx_ring->buffer_info[i].bytecount = bytecount;
2973        tx_ring->buffer_info[first].next_to_watch = i;
2974
2975        return count;
2976
2977dma_error:
2978        dev_err(&pdev->dev, "TX DMA map failed\n");
2979        buffer_info->dma = 0;
2980        if (count)
2981                count--;
2982
2983        while (count--) {
2984                if (i==0)
2985                        i += tx_ring->count;
2986                i--;
2987                buffer_info = &tx_ring->buffer_info[i];
2988                e1000_unmap_and_free_tx_resource(adapter, buffer_info);
2989        }
2990
2991        return 0;
2992}
2993
2994static void e1000_tx_queue(struct e1000_adapter *adapter,
2995                           struct e1000_tx_ring *tx_ring, int tx_flags,
2996                           int count)
2997{
2998        struct e1000_hw *hw = &adapter->hw;
2999        struct e1000_tx_desc *tx_desc = NULL;
3000        struct e1000_buffer *buffer_info;
3001        u32 txd_upper = 0, txd_lower = E1000_TXD_CMD_IFCS;
3002        unsigned int i;
3003
3004        if (likely(tx_flags & E1000_TX_FLAGS_TSO)) {
3005                txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D |
3006                             E1000_TXD_CMD_TSE;
3007                txd_upper |= E1000_TXD_POPTS_TXSM << 8;
3008
3009                if (likely(tx_flags & E1000_TX_FLAGS_IPV4))
3010                        txd_upper |= E1000_TXD_POPTS_IXSM << 8;
3011        }
3012
3013        if (likely(tx_flags & E1000_TX_FLAGS_CSUM)) {
3014                txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
3015                txd_upper |= E1000_TXD_POPTS_TXSM << 8;
3016        }
3017
3018        if (unlikely(tx_flags & E1000_TX_FLAGS_VLAN)) {
3019                txd_lower |= E1000_TXD_CMD_VLE;
3020                txd_upper |= (tx_flags & E1000_TX_FLAGS_VLAN_MASK);
3021        }
3022
3023        if (unlikely(tx_flags & E1000_TX_FLAGS_NO_FCS))
3024                txd_lower &= ~(E1000_TXD_CMD_IFCS);
3025
3026        i = tx_ring->next_to_use;
3027
3028        while (count--) {
3029                buffer_info = &tx_ring->buffer_info[i];
3030                tx_desc = E1000_TX_DESC(*tx_ring, i);
3031                tx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
3032                tx_desc->lower.data =
3033                        cpu_to_le32(txd_lower | buffer_info->length);
3034                tx_desc->upper.data = cpu_to_le32(txd_upper);
3035                if (unlikely(++i == tx_ring->count)) i = 0;
3036        }
3037
3038        tx_desc->lower.data |= cpu_to_le32(adapter->txd_cmd);
3039
3040        /* txd_cmd re-enables FCS, so we'll re-disable it here as desired. */
3041        if (unlikely(tx_flags & E1000_TX_FLAGS_NO_FCS))
3042                tx_desc->lower.data &= ~(cpu_to_le32(E1000_TXD_CMD_IFCS));
3043
3044        /* Force memory writes to complete before letting h/w
3045         * know there are new descriptors to fetch.  (Only
3046         * applicable for weak-ordered memory model archs,
3047         * such as IA-64). */
3048        wmb();
3049
3050        tx_ring->next_to_use = i;
3051        writel(i, hw->hw_addr + tx_ring->tdt);
3052        /* we need this if more than one processor can write to our tail
3053         * at a time, it syncronizes IO on IA64/Altix systems */
3054        mmiowb();
3055}
3056
3057/* 82547 workaround to avoid controller hang in half-duplex environment.
3058 * The workaround is to avoid queuing a large packet that would span
3059 * the internal Tx FIFO ring boundary by notifying the stack to resend
3060 * the packet at a later time.  This gives the Tx FIFO an opportunity to
3061 * flush all packets.  When that occurs, we reset the Tx FIFO pointers
3062 * to the beginning of the Tx FIFO.
3063 */
3064
3065#define E1000_FIFO_HDR                  0x10
3066#define E1000_82547_PAD_LEN             0x3E0
3067
3068static int e1000_82547_fifo_workaround(struct e1000_adapter *adapter,
3069                                       struct sk_buff *skb)
3070{
3071        u32 fifo_space = adapter->tx_fifo_size - adapter->tx_fifo_head;
3072        u32 skb_fifo_len = skb->len + E1000_FIFO_HDR;
3073
3074        skb_fifo_len = ALIGN(skb_fifo_len, E1000_FIFO_HDR);
3075
3076        if (adapter->link_duplex != HALF_DUPLEX)
3077                goto no_fifo_stall_required;
3078
3079        if (atomic_read(&adapter->tx_fifo_stall))
3080                return 1;
3081
3082        if (skb_fifo_len >= (E1000_82547_PAD_LEN + fifo_space)) {
3083                atomic_set(&adapter->tx_fifo_stall, 1);
3084                return 1;
3085        }
3086
3087no_fifo_stall_required:
3088        adapter->tx_fifo_head += skb_fifo_len;
3089        if (adapter->tx_fifo_head >= adapter->tx_fifo_size)
3090                adapter->tx_fifo_head -= adapter->tx_fifo_size;
3091        return 0;
3092}
3093
3094static int __e1000_maybe_stop_tx(struct net_device *netdev, int size)
3095{
3096        struct e1000_adapter *adapter = netdev_priv(netdev);
3097        struct e1000_tx_ring *tx_ring = adapter->tx_ring;
3098
3099        netif_stop_queue(netdev);
3100        /* Herbert's original patch had:
3101         *  smp_mb__after_netif_stop_queue();
3102         * but since that doesn't exist yet, just open code it. */
3103        smp_mb();
3104
3105        /* We need to check again in a case another CPU has just
3106         * made room available. */
3107        if (likely(E1000_DESC_UNUSED(tx_ring) < size))
3108                return -EBUSY;
3109
3110        /* A reprieve! */
3111        netif_start_queue(netdev);
3112        ++adapter->restart_queue;
3113        return 0;
3114}
3115
3116static int e1000_maybe_stop_tx(struct net_device *netdev,
3117                               struct e1000_tx_ring *tx_ring, int size)
3118{
3119        if (likely(E1000_DESC_UNUSED(tx_ring) >= size))
3120                return 0;
3121        return __e1000_maybe_stop_tx(netdev, size);
3122}
3123
3124#define TXD_USE_COUNT(S, X) (((S) >> (X)) + 1 )
3125static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
3126                                    struct net_device *netdev)
3127{
3128        struct e1000_adapter *adapter = netdev_priv(netdev);
3129        struct e1000_hw *hw = &adapter->hw;
3130        struct e1000_tx_ring *tx_ring;
3131        unsigned int first, max_per_txd = E1000_MAX_DATA_PER_TXD;
3132        unsigned int max_txd_pwr = E1000_MAX_TXD_PWR;
3133        unsigned int tx_flags = 0;
3134        unsigned int len = skb_headlen(skb);
3135        unsigned int nr_frags;
3136        unsigned int mss;
3137        int count = 0;
3138        int tso;
3139        unsigned int f;
3140
3141        /* This goes back to the question of how to logically map a tx queue
3142         * to a flow.  Right now, performance is impacted slightly negatively
3143         * if using multiple tx queues.  If the stack breaks away from a
3144         * single qdisc implementation, we can look at this again. */
3145        tx_ring = adapter->tx_ring;
3146
3147        if (unlikely(skb->len <= 0)) {
3148                dev_kfree_skb_any(skb);
3149                return NETDEV_TX_OK;
3150        }
3151
3152        /* On PCI/PCI-X HW, if packet size is less than ETH_ZLEN,
3153         * packets may get corrupted during padding by HW.
3154         * To WA this issue, pad all small packets manually.
3155         */
3156        if (skb->len < ETH_ZLEN) {
3157                if (skb_pad(skb, ETH_ZLEN - skb->len))
3158                        return NETDEV_TX_OK;
3159                skb->len = ETH_ZLEN;
3160                skb_set_tail_pointer(skb, ETH_ZLEN);
3161        }
3162
3163        mss = skb_shinfo(skb)->gso_size;
3164        /* The controller does a simple calculation to
3165         * make sure there is enough room in the FIFO before
3166         * initiating the DMA for each buffer.  The calc is:
3167         * 4 = ceil(buffer len/mss).  To make sure we don't
3168         * overrun the FIFO, adjust the max buffer len if mss
3169         * drops. */
3170        if (mss) {
3171                u8 hdr_len;
3172                max_per_txd = min(mss << 2, max_per_txd);
3173                max_txd_pwr = fls(max_per_txd) - 1;
3174
3175                hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
3176                if (skb->data_len && hdr_len == len) {
3177                        switch (hw->mac_type) {
3178                                unsigned int pull_size;
3179                        case e1000_82544:
3180                                /* Make sure we have room to chop off 4 bytes,
3181                                 * and that the end alignment will work out to
3182                                 * this hardware's requirements
3183                                 * NOTE: this is a TSO only workaround
3184                                 * if end byte alignment not correct move us
3185                                 * into the next dword */
3186                                if ((unsigned long)(skb_tail_pointer(skb) - 1) & 4)
3187                                        break;
3188                                /* fall through */
3189                                pull_size = min((unsigned int)4, skb->data_len);
3190                                if (!__pskb_pull_tail(skb, pull_size)) {
3191                                        e_err(drv, "__pskb_pull_tail "
3192                                              "failed.\n");
3193                                        dev_kfree_skb_any(skb);
3194                                        return NETDEV_TX_OK;
3195                                }
3196                                len = skb_headlen(skb);
3197                                break;
3198                        default:
3199                                /* do nothing */
3200                                break;
3201                        }
3202                }
3203        }
3204
3205        /* reserve a descriptor for the offload context */
3206        if ((mss) || (skb->ip_summed == CHECKSUM_PARTIAL))
3207                count++;
3208        count++;
3209
3210        /* Controller Erratum workaround */
3211        if (!skb->data_len && tx_ring->last_tx_tso && !skb_is_gso(skb))
3212                count++;
3213
3214        count += TXD_USE_COUNT(len, max_txd_pwr);
3215
3216        if (adapter->pcix_82544)
3217                count++;
3218
3219        /* work-around for errata 10 and it applies to all controllers
3220         * in PCI-X mode, so add one more descriptor to the count
3221         */
3222        if (unlikely((hw->bus_type == e1000_bus_type_pcix) &&
3223                        (len > 2015)))
3224                count++;
3225
3226        nr_frags = skb_shinfo(skb)->nr_frags;
3227        for (f = 0; f < nr_frags; f++)
3228                count += TXD_USE_COUNT(skb_frag_size(&skb_shinfo(skb)->frags[f]),
3229                                       max_txd_pwr);
3230        if (adapter->pcix_82544)
3231                count += nr_frags;
3232
3233        /* need: count + 2 desc gap to keep tail from touching
3234         * head, otherwise try next time */
3235        if (unlikely(e1000_maybe_stop_tx(netdev, tx_ring, count + 2)))
3236                return NETDEV_TX_BUSY;
3237
3238        if (unlikely((hw->mac_type == e1000_82547) &&
3239                     (e1000_82547_fifo_workaround(adapter, skb)))) {
3240                netif_stop_queue(netdev);
3241                if (!test_bit(__E1000_DOWN, &adapter->flags))
3242                        schedule_delayed_work(&adapter->fifo_stall_task, 1);
3243                return NETDEV_TX_BUSY;
3244        }
3245
3246        if (vlan_tx_tag_present(skb)) {
3247                tx_flags |= E1000_TX_FLAGS_VLAN;
3248                tx_flags |= (vlan_tx_tag_get(skb) << E1000_TX_FLAGS_VLAN_SHIFT);
3249        }
3250
3251        first = tx_ring->next_to_use;
3252
3253        tso = e1000_tso(adapter, tx_ring, skb);
3254        if (tso < 0) {
3255                dev_kfree_skb_any(skb);
3256                return NETDEV_TX_OK;
3257        }
3258
3259        if (likely(tso)) {
3260                if (likely(hw->mac_type != e1000_82544))
3261                        tx_ring->last_tx_tso = true;
3262                tx_flags |= E1000_TX_FLAGS_TSO;
3263        } else if (likely(e1000_tx_csum(adapter, tx_ring, skb)))
3264                tx_flags |= E1000_TX_FLAGS_CSUM;
3265
3266        if (likely(skb->protocol == htons(ETH_P_IP)))
3267                tx_flags |= E1000_TX_FLAGS_IPV4;
3268
3269        if (unlikely(skb->no_fcs))
3270                tx_flags |= E1000_TX_FLAGS_NO_FCS;
3271
3272        count = e1000_tx_map(adapter, tx_ring, skb, first, max_per_txd,
3273                             nr_frags, mss);
3274
3275        if (count) {
3276                netdev_sent_queue(netdev, skb->len);
3277                skb_tx_timestamp(skb);
3278
3279                e1000_tx_queue(adapter, tx_ring, tx_flags, count);
3280                /* Make sure there is space in the ring for the next send. */
3281                e1000_maybe_stop_tx(netdev, tx_ring, MAX_SKB_FRAGS + 2);
3282
3283        } else {
3284                dev_kfree_skb_any(skb);
3285                tx_ring->buffer_info[first].time_stamp = 0;
3286                tx_ring->next_to_use = first;
3287        }
3288
3289        return NETDEV_TX_OK;
3290}
3291
3292#define NUM_REGS 38 /* 1 based count */
3293static void e1000_regdump(struct e1000_adapter *adapter)
3294{
3295        struct e1000_hw *hw = &adapter->hw;
3296        u32 regs[NUM_REGS];
3297        u32 *regs_buff = regs;
3298        int i = 0;
3299
3300        static const char * const reg_name[] = {
3301                "CTRL",  "STATUS",
3302                "RCTL", "RDLEN", "RDH", "RDT", "RDTR",
3303                "TCTL", "TDBAL", "TDBAH", "TDLEN", "TDH", "TDT",
3304                "TIDV", "TXDCTL", "TADV", "TARC0",
3305                "TDBAL1", "TDBAH1", "TDLEN1", "TDH1", "TDT1",
3306                "TXDCTL1", "TARC1",
3307                "CTRL_EXT", "ERT", "RDBAL", "RDBAH",
3308                "TDFH", "TDFT", "TDFHS", "TDFTS", "TDFPC",
3309                "RDFH", "RDFT", "RDFHS", "RDFTS", "RDFPC"
3310        };
3311
3312        regs_buff[0]  = er32(CTRL);
3313        regs_buff[1]  = er32(STATUS);
3314
3315        regs_buff[2]  = er32(RCTL);
3316        regs_buff[3]  = er32(RDLEN);
3317        regs_buff[4]  = er32(RDH);
3318        regs_buff[5]  = er32(RDT);
3319        regs_buff[6]  = er32(RDTR);
3320
3321        regs_buff[7]  = er32(TCTL);
3322        regs_buff[8]  = er32(TDBAL);
3323        regs_buff[9]  = er32(TDBAH);
3324        regs_buff[10] = er32(TDLEN);
3325        regs_buff[11] = er32(TDH);
3326        regs_buff[12] = er32(TDT);
3327        regs_buff[13] = er32(TIDV);
3328        regs_buff[14] = er32(TXDCTL);
3329        regs_buff[15] = er32(TADV);
3330        regs_buff[16] = er32(TARC0);
3331
3332        regs_buff[17] = er32(TDBAL1);
3333        regs_buff[18] = er32(TDBAH1);
3334        regs_buff[19] = er32(TDLEN1);
3335        regs_buff[20] = er32(TDH1);
3336        regs_buff[21] = er32(TDT1);
3337        regs_buff[22] = er32(TXDCTL1);
3338        regs_buff[23] = er32(TARC1);
3339        regs_buff[24] = er32(CTRL_EXT);
3340        regs_buff[25] = er32(ERT);
3341        regs_buff[26] = er32(RDBAL0);
3342        regs_buff[27] = er32(RDBAH0);
3343        regs_buff[28] = er32(TDFH);
3344        regs_buff[29] = er32(TDFT);
3345        regs_buff[30] = er32(TDFHS);
3346        regs_buff[31] = er32(TDFTS);
3347        regs_buff[32] = er32(TDFPC);
3348        regs_buff[33] = er32(RDFH);
3349        regs_buff[34] = er32(RDFT);
3350        regs_buff[35] = er32(RDFHS);
3351        regs_buff[36] = er32(RDFTS);
3352        regs_buff[37] = er32(RDFPC);
3353
3354        pr_info("Register dump\n");
3355        for (i = 0; i < NUM_REGS; i++)
3356                pr_info("%-15s  %08x\n", reg_name[i], regs_buff[i]);
3357}
3358
3359/*
3360 * e1000_dump: Print registers, tx ring and rx ring
3361 */
3362static void e1000_dump(struct e1000_adapter *adapter)
3363{
3364        /* this code doesn't handle multiple rings */
3365        struct e1000_tx_ring *tx_ring = adapter->tx_ring;
3366        struct e1000_rx_ring *rx_ring = adapter->rx_ring;
3367        int i;
3368
3369        if (!netif_msg_hw(adapter))
3370                return;
3371
3372        /* Print Registers */
3373        e1000_regdump(adapter);
3374
3375        /*
3376         * transmit dump
3377         */
3378        pr_info("TX Desc ring0 dump\n");
3379
3380        /* Transmit Descriptor Formats - DEXT[29] is 0 (Legacy) or 1 (Extended)
3381         *
3382         * Legacy Transmit Descriptor
3383         *   +--------------------------------------------------------------+
3384         * 0 |         Buffer Address [63:0] (Reserved on Write Back)       |
3385         *   +--------------------------------------------------------------+
3386         * 8 | Special  |    CSS     | Status |  CMD    |  CSO   |  Length  |
3387         *   +--------------------------------------------------------------+
3388         *   63       48 47        36 35    32 31     24 23    16 15        0
3389         *
3390         * Extended Context Descriptor (DTYP=0x0) for TSO or checksum offload
3391         *   63      48 47    40 39       32 31             16 15    8 7      0
3392         *   +----------------------------------------------------------------+
3393         * 0 |  TUCSE  | TUCS0  |   TUCSS   |     IPCSE       | IPCS0 | IPCSS |
3394         *   +----------------------------------------------------------------+
3395         * 8 |   MSS   | HDRLEN | RSV | STA | TUCMD | DTYP |      PAYLEN      |
3396         *   +----------------------------------------------------------------+
3397         *   63      48 47    40 39 36 35 32 31   24 23  20 19                0
3398         *
3399         * Extended Data Descriptor (DTYP=0x1)
3400         *   +----------------------------------------------------------------+
3401         * 0 |                     Buffer Address [63:0]                      |
3402         *   +----------------------------------------------------------------+
3403         * 8 | VLAN tag |  POPTS  | Rsvd | Status | Command | DTYP |  DTALEN  |
3404         *   +----------------------------------------------------------------+
3405         *   63       48 47     40 39  36 35    32 31     24 23  20 19        0
3406         */
3407        pr_info("Tc[desc]     [Ce CoCsIpceCoS] [MssHlRSCm0Plen] [bi->dma       ] leng  ntw timestmp         bi->skb\n");
3408        pr_info("Td[desc]     [address 63:0  ] [VlaPoRSCm1Dlen] [bi->dma       ] leng  ntw timestmp         bi->skb\n");
3409
3410        if (!netif_msg_tx_done(adapter))
3411                goto rx_ring_summary;
3412
3413        for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {
3414                struct e1000_tx_desc *tx_desc = E1000_TX_DESC(*tx_ring, i);
3415                struct e1000_buffer *buffer_info = &tx_ring->buffer_info[i];
3416                struct my_u { __le64 a; __le64 b; };
3417                struct my_u *u = (struct my_u *)tx_desc;
3418                const char *type;
3419
3420                if (i == tx_ring->next_to_use && i == tx_ring->next_to_clean)
3421                        type = "NTC/U";
3422                else if (i == tx_ring->next_to_use)
3423                        type = "NTU";
3424                else if (i == tx_ring->next_to_clean)
3425                        type = "NTC";
3426                else
3427                        type = "";
3428
3429                pr_info("T%c[0x%03X]    %016llX %016llX %016llX %04X  %3X %016llX %p %s\n",
3430                        ((le64_to_cpu(u->b) & (1<<20)) ? 'd' : 'c'), i,
3431                        le64_to_cpu(u->a), le64_to_cpu(u->b),
3432                        (u64)buffer_info->dma, buffer_info->length,
3433                        buffer_info->next_to_watch,
3434                        (u64)buffer_info->time_stamp, buffer_info->skb, type);
3435        }
3436
3437rx_ring_summary:
3438        /*
3439         * receive dump
3440         */
3441        pr_info("\nRX Desc ring dump\n");
3442
3443        /* Legacy Receive Descriptor Format
3444         *
3445         * +-----------------------------------------------------+
3446         * |                Buffer Address [63:0]                |
3447         * +-----------------------------------------------------+
3448         * | VLAN Tag | Errors | Status 0 | Packet csum | Length |
3449         * +-----------------------------------------------------+
3450         * 63       48 47    40 39      32 31         16 15      0
3451         */
3452        pr_info("R[desc]      [address 63:0  ] [vl er S cks ln] [bi->dma       ] [bi->skb]\n");
3453
3454        if (!netif_msg_rx_status(adapter))
3455                goto exit;
3456
3457        for (i = 0; rx_ring->desc && (i < rx_ring->count); i++) {
3458                struct e1000_rx_desc *rx_desc = E1000_RX_DESC(*rx_ring, i);
3459                struct e1000_buffer *buffer_info = &rx_ring->buffer_info[i];
3460                struct my_u { __le64 a; __le64 b; };
3461                struct my_u *u = (struct my_u *)rx_desc;
3462                const char *type;
3463
3464                if (i == rx_ring->next_to_use)
3465                        type = "NTU";
3466                else if (i == rx_ring->next_to_clean)
3467                        type = "NTC";
3468                else
3469                        type = "";
3470
3471                pr_info("R[0x%03X]     %016llX %016llX %016llX %p %s\n",
3472                        i, le64_to_cpu(u->a), le64_to_cpu(u->b),
3473                        (u64)buffer_info->dma, buffer_info->skb, type);
3474        } /* for */
3475
3476        /* dump the descriptor caches */
3477        /* rx */
3478        pr_info("Rx descriptor cache in 64bit format\n");
3479        for (i = 0x6000; i <= 0x63FF ; i += 0x10) {
3480                pr_info("R%04X: %08X|%08X %08X|%08X\n",
3481                        i,
3482                        readl(adapter->hw.hw_addr + i+4),
3483                        readl(adapter->hw.hw_addr + i),
3484                        readl(adapter->hw.hw_addr + i+12),
3485                        readl(adapter->hw.hw_addr + i+8));
3486        }
3487        /* tx */
3488        pr_info("Tx descriptor cache in 64bit format\n");
3489        for (i = 0x7000; i <= 0x73FF ; i += 0x10) {
3490                pr_info("T%04X: %08X|%08X %08X|%08X\n",
3491                        i,
3492                        readl(adapter->hw.hw_addr + i+4),
3493                        readl(adapter->hw.hw_addr + i),
3494                        readl(adapter->hw.hw_addr + i+12),
3495                        readl(adapter->hw.hw_addr + i+8));
3496        }
3497exit:
3498        return;
3499}
3500
3501/**
3502 * e1000_tx_timeout - Respond to a Tx Hang
3503 * @netdev: network interface device structure
3504 **/
3505
3506static void e1000_tx_timeout(struct net_device *netdev)
3507{
3508        struct e1000_adapter *adapter = netdev_priv(netdev);
3509
3510        /* Do the reset outside of interrupt context */
3511        adapter->tx_timeout_count++;
3512        schedule_work(&adapter->reset_task);
3513}
3514
3515static void e1000_reset_task(struct work_struct *work)
3516{
3517        struct e1000_adapter *adapter =
3518                container_of(work, struct e1000_adapter, reset_task);
3519
3520        if (test_bit(__E1000_DOWN, &adapter->flags))
3521                return;
3522        e_err(drv, "Reset adapter\n");
3523        e1000_reinit_safe(adapter);
3524}
3525
3526/**
3527 * e1000_get_stats - Get System Network Statistics
3528 * @netdev: network interface device structure
3529 *
3530 * Returns the address of the device statistics structure.
3531 * The statistics are actually updated from the watchdog.
3532 **/
3533
3534static struct net_device_stats *e1000_get_stats(struct net_device *netdev)
3535{
3536        /* only return the current stats */
3537        return &netdev->stats;
3538}
3539
3540/**
3541 * e1000_change_mtu - Change the Maximum Transfer Unit
3542 * @netdev: network interface device structure
3543 * @new_mtu: new value for maximum frame size
3544 *
3545 * Returns 0 on success, negative on failure
3546 **/
3547
3548static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
3549{
3550        struct e1000_adapter *adapter = netdev_priv(netdev);
3551        struct e1000_hw *hw = &adapter->hw;
3552        int max_frame = new_mtu + ENET_HEADER_SIZE + ETHERNET_FCS_SIZE;
3553
3554        if ((max_frame < MINIMUM_ETHERNET_FRAME_SIZE) ||
3555            (max_frame > MAX_JUMBO_FRAME_SIZE)) {
3556                e_err(probe, "Invalid MTU setting\n");
3557                return -EINVAL;
3558        }
3559
3560        /* Adapter-specific max frame size limits. */
3561        switch (hw->mac_type) {
3562        case e1000_undefined ... e1000_82542_rev2_1:
3563                if (max_frame > (ETH_FRAME_LEN + ETH_FCS_LEN)) {
3564                        e_err(probe, "Jumbo Frames not supported.\n");
3565                        return -EINVAL;
3566                }
3567                break;
3568        default:
3569                /* Capable of supporting up to MAX_JUMBO_FRAME_SIZE limit. */
3570                break;
3571        }
3572
3573        while (test_and_set_bit(__E1000_RESETTING, &adapter->flags))
3574                msleep(1);
3575        /* e1000_down has a dependency on max_frame_size */
3576        hw->max_frame_size = max_frame;
3577        if (netif_running(netdev))
3578                e1000_down(adapter);
3579
3580        /* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN
3581         * means we reserve 2 more, this pushes us to allocate from the next
3582         * larger slab size.
3583         * i.e. RXBUFFER_2048 --> size-4096 slab
3584         *  however with the new *_jumbo_rx* routines, jumbo receives will use
3585         *  fragmented skbs */
3586
3587        if (max_frame <= E1000_RXBUFFER_2048)
3588                adapter->rx_buffer_len = E1000_RXBUFFER_2048;
3589        else
3590#if (PAGE_SIZE >= E1000_RXBUFFER_16384)
3591                adapter->rx_buffer_len = E1000_RXBUFFER_16384;
3592#elif (PAGE_SIZE >= E1000_RXBUFFER_4096)
3593                adapter->rx_buffer_len = PAGE_SIZE;
3594#endif
3595
3596        /* adjust allocation if LPE protects us, and we aren't using SBP */
3597        if (!hw->tbi_compatibility_on &&
3598            ((max_frame == (ETH_FRAME_LEN + ETH_FCS_LEN)) ||
3599             (max_frame == MAXIMUM_ETHERNET_VLAN_SIZE)))
3600                adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
3601
3602        pr_info("%s changing MTU from %d to %d\n",
3603                netdev->name, netdev->mtu, new_mtu);
3604        netdev->mtu = new_mtu;
3605
3606        if (netif_running(netdev))
3607                e1000_up(adapter);
3608        else
3609                e1000_reset(adapter);
3610
3611        clear_bit(__E1000_RESETTING, &adapter->flags);
3612
3613        return 0;
3614}
3615
3616/**
3617 * e1000_update_stats - Update the board statistics counters
3618 * @adapter: board private structure
3619 **/
3620
3621void e1000_update_stats(struct e1000_adapter *adapter)
3622{
3623        struct net_device *netdev = adapter->netdev;
3624        struct e1000_hw *hw = &adapter->hw;
3625        struct pci_dev *pdev = adapter->pdev;
3626        unsigned long flags;
3627        u16 phy_tmp;
3628
3629#define PHY_IDLE_ERROR_COUNT_MASK 0x00FF
3630
3631        /*
3632         * Prevent stats update while adapter is being reset, or if the pci
3633         * connection is down.
3634         */
3635        if (adapter->link_speed == 0)
3636                return;
3637        if (pci_channel_offline(pdev))
3638                return;
3639
3640        spin_lock_irqsave(&adapter->stats_lock, flags);
3641
3642        /* these counters are modified from e1000_tbi_adjust_stats,
3643         * called from the interrupt context, so they must only
3644         * be written while holding adapter->stats_lock
3645         */
3646
3647        adapter->stats.crcerrs += er32(CRCERRS);
3648        adapter->stats.gprc += er32(GPRC);
3649        adapter->stats.gorcl += er32(GORCL);
3650        adapter->stats.gorch += er32(GORCH);
3651        adapter->stats.bprc += er32(BPRC);
3652        adapter->stats.mprc += er32(MPRC);
3653        adapter->stats.roc += er32(ROC);
3654
3655        adapter->stats.prc64 += er32(PRC64);
3656        adapter->stats.prc127 += er32(PRC127);
3657        adapter->stats.prc255 += er32(PRC255);
3658        adapter->stats.prc511 += er32(PRC511);
3659        adapter->stats.prc1023 += er32(PRC1023);
3660        adapter->stats.prc1522 += er32(PRC1522);
3661
3662        adapter->stats.symerrs += er32(SYMERRS);
3663        adapter->stats.mpc += er32(MPC);
3664        adapter->stats.scc += er32(SCC);
3665        adapter->stats.ecol += er32(ECOL);
3666        adapter->stats.mcc += er32(MCC);
3667        adapter->stats.latecol += er32(LATECOL);
3668        adapter->stats.dc += er32(DC);
3669        adapter->stats.sec += er32(SEC);
3670        adapter->stats.rlec += er32(RLEC);
3671        adapter->stats.xonrxc += er32(XONRXC);
3672        adapter->stats.xontxc += er32(XONTXC);
3673        adapter->stats.xoffrxc += er32(XOFFRXC);
3674        adapter->stats.xofftxc += er32(XOFFTXC);
3675        adapter->stats.fcruc += er32(FCRUC);
3676        adapter->stats.gptc += er32(GPTC);
3677        adapter->stats.gotcl += er32(GOTCL);
3678        adapter->stats.gotch += er32(GOTCH);
3679        adapter->stats.rnbc += er32(RNBC);
3680        adapter->stats.ruc += er32(RUC);
3681        adapter->stats.rfc += er32(RFC);
3682        adapter->stats.rjc += er32(RJC);
3683        adapter->stats.torl += er32(TORL);
3684        adapter->stats.torh += er32(TORH);
3685        adapter->stats.totl += er32(TOTL);
3686        adapter->stats.toth += er32(TOTH);
3687        adapter->stats.tpr += er32(TPR);
3688
3689        adapter->stats.ptc64 += er32(PTC64);
3690        adapter->stats.ptc127 += er32(PTC127);
3691        adapter->stats.ptc255 += er32(PTC255);
3692        adapter->stats.ptc511 += er32(PTC511);
3693        adapter->stats.ptc1023 += er32(PTC1023);
3694        adapter->stats.ptc1522 += er32(PTC1522);
3695
3696        adapter->stats.mptc += er32(MPTC);
3697        adapter->stats.bptc += er32(BPTC);
3698
3699        /* used for adaptive IFS */
3700
3701        hw->tx_packet_delta = er32(TPT);
3702        adapter->stats.tpt += hw->tx_packet_delta;
3703        hw->collision_delta = er32(COLC);
3704        adapter->stats.colc += hw->collision_delta;
3705
3706        if (hw->mac_type >= e1000_82543) {
3707                adapter->stats.algnerrc += er32(ALGNERRC);
3708                adapter->stats.rxerrc += er32(RXERRC);
3709                adapter->stats.tncrs += er32(TNCRS);
3710                adapter->stats.cexterr += er32(CEXTERR);
3711                adapter->stats.tsctc += er32(TSCTC);
3712                adapter->stats.tsctfc += er32(TSCTFC);
3713        }
3714
3715        /* Fill out the OS statistics structure */
3716        netdev->stats.multicast = adapter->stats.mprc;
3717        netdev->stats.collisions = adapter->stats.colc;
3718
3719        /* Rx Errors */
3720
3721        /* RLEC on some newer hardware can be incorrect so build
3722        * our own version based on RUC and ROC */
3723        netdev->stats.rx_errors = adapter->stats.rxerrc +
3724                adapter->stats.crcerrs + adapter->stats.algnerrc +
3725                adapter->stats.ruc + adapter->stats.roc +
3726                adapter->stats.cexterr;
3727        adapter->stats.rlerrc = adapter->stats.ruc + adapter->stats.roc;
3728        netdev->stats.rx_length_errors = adapter->stats.rlerrc;
3729        netdev->stats.rx_crc_errors = adapter->stats.crcerrs;
3730        netdev->stats.rx_frame_errors = adapter->stats.algnerrc;
3731        netdev->stats.rx_missed_errors = adapter->stats.mpc;
3732
3733        /* Tx Errors */
3734        adapter->stats.txerrc = adapter->stats.ecol + adapter->stats.latecol;
3735        netdev->stats.tx_errors = adapter->stats.txerrc;
3736        netdev->stats.tx_aborted_errors = adapter->stats.ecol;
3737        netdev->stats.tx_window_errors = adapter->stats.latecol;
3738        netdev->stats.tx_carrier_errors = adapter->stats.tncrs;
3739        if (hw->bad_tx_carr_stats_fd &&
3740            adapter->link_duplex == FULL_DUPLEX) {
3741                netdev->stats.tx_carrier_errors = 0;
3742                adapter->stats.tncrs = 0;
3743        }
3744
3745        /* Tx Dropped needs to be maintained elsewhere */
3746
3747        /* Phy Stats */
3748        if (hw->media_type == e1000_media_type_copper) {
3749                if ((adapter->link_speed == SPEED_1000) &&
3750                   (!e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_tmp))) {
3751                        phy_tmp &= PHY_IDLE_ERROR_COUNT_MASK;
3752                        adapter->phy_stats.idle_errors += phy_tmp;
3753                }
3754
3755                if ((hw->mac_type <= e1000_82546) &&
3756                   (hw->phy_type == e1000_phy_m88) &&
3757                   !e1000_read_phy_reg(hw, M88E1000_RX_ERR_CNTR, &phy_tmp))
3758                        adapter->phy_stats.receive_errors += phy_tmp;
3759        }
3760
3761        /* Management Stats */
3762        if (hw->has_smbus) {
3763                adapter->stats.mgptc += er32(MGTPTC);
3764                adapter->stats.mgprc += er32(MGTPRC);
3765                adapter->stats.mgpdc += er32(MGTPDC);
3766        }
3767
3768        spin_unlock_irqrestore(&adapter->stats_lock, flags);
3769}
3770
3771/**
3772 * e1000_intr - Interrupt Handler
3773 * @irq: interrupt number
3774 * @data: pointer to a network interface device structure
3775 **/
3776
3777static irqreturn_t e1000_intr(int irq, void *data)
3778{
3779        struct net_device *netdev = data;
3780        struct e1000_adapter *adapter = netdev_priv(netdev);
3781        struct e1000_hw *hw = &adapter->hw;
3782        u32 icr = er32(ICR);
3783
3784        if (unlikely((!icr)))
3785                return IRQ_NONE;  /* Not our interrupt */
3786
3787        /*
3788         * we might have caused the interrupt, but the above
3789         * read cleared it, and just in case the driver is
3790         * down there is nothing to do so return handled
3791         */
3792        if (unlikely(test_bit(__E1000_DOWN, &adapter->flags)))
3793                return IRQ_HANDLED;
3794
3795        if (unlikely(icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC))) {
3796                hw->get_link_status = 1;
3797                /* guard against interrupt when we're going down */
3798                if (!test_bit(__E1000_DOWN, &adapter->flags))
3799                        schedule_delayed_work(&adapter->watchdog_task, 1);
3800        }
3801
3802        /* disable interrupts, without the synchronize_irq bit */
3803        ew32(IMC, ~0);
3804        E1000_WRITE_FLUSH();
3805
3806        if (likely(napi_schedule_prep(&adapter->napi))) {
3807                adapter->total_tx_bytes = 0;
3808                adapter->total_tx_packets = 0;
3809                adapter->total_rx_bytes = 0;
3810                adapter->total_rx_packets = 0;
3811                __napi_schedule(&adapter->napi);
3812        } else {
3813                /* this really should not happen! if it does it is basically a
3814                 * bug, but not a hard error, so enable ints and continue */
3815                if (!test_bit(__E1000_DOWN, &adapter->flags))
3816                        e1000_irq_enable(adapter);
3817        }
3818
3819        return IRQ_HANDLED;
3820}
3821
3822/**
3823 * e1000_clean - NAPI Rx polling callback
3824 * @adapter: board private structure
3825 **/
3826static int e1000_clean(struct napi_struct *napi, int budget)
3827{
3828        struct e1000_adapter *adapter = container_of(napi, struct e1000_adapter, napi);
3829        int tx_clean_complete = 0, work_done = 0;
3830
3831        tx_clean_complete = e1000_clean_tx_irq(adapter, &adapter->tx_ring[0]);
3832
3833        adapter->clean_rx(adapter, &adapter->rx_ring[0], &work_done, budget);
3834
3835        if (!tx_clean_complete)
3836                work_done = budget;
3837
3838        /* If budget not fully consumed, exit the polling mode */
3839        if (work_done < budget) {
3840                if (likely(adapter->itr_setting & 3))
3841                        e1000_set_itr(adapter);
3842                napi_complete(napi);
3843                if (!test_bit(__E1000_DOWN, &adapter->flags))
3844                        e1000_irq_enable(adapter);
3845        }
3846
3847        return work_done;
3848}
3849
3850/**
3851 * e1000_clean_tx_irq - Reclaim resources after transmit completes
3852 * @adapter: board private structure
3853 **/
3854static bool e1000_clean_tx_irq(struct e1000_adapter *adapter,
3855                               struct e1000_tx_ring *tx_ring)
3856{
3857        struct e1000_hw *hw = &adapter->hw;
3858        struct net_device *netdev = adapter->netdev;
3859        struct e1000_tx_desc *tx_desc, *eop_desc;
3860        struct e1000_buffer *buffer_info;
3861        unsigned int i, eop;
3862        unsigned int count = 0;
3863        unsigned int total_tx_bytes=0, total_tx_packets=0;
3864        unsigned int bytes_compl = 0, pkts_compl = 0;
3865
3866        i = tx_ring->next_to_clean;
3867        eop = tx_ring->buffer_info[i].next_to_watch;
3868        eop_desc = E1000_TX_DESC(*tx_ring, eop);
3869
3870        while ((eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) &&
3871               (count < tx_ring->count)) {
3872                bool cleaned = false;
3873                rmb();  /* read buffer_info after eop_desc */
3874                for ( ; !cleaned; count++) {
3875                        tx_desc = E1000_TX_DESC(*tx_ring, i);
3876                        buffer_info = &tx_ring->buffer_info[i];
3877                        cleaned = (i == eop);
3878
3879                        if (cleaned) {
3880                                total_tx_packets += buffer_info->segs;
3881                                total_tx_bytes += buffer_info->bytecount;
3882                                if (buffer_info->skb) {
3883                                        bytes_compl += buffer_info->skb->len;
3884                                        pkts_compl++;
3885                                }
3886
3887                        }
3888                        e1000_unmap_and_free_tx_resource(adapter, buffer_info);
3889                        tx_desc->upper.data = 0;
3890
3891                        if (unlikely(++i == tx_ring->count)) i = 0;
3892                }
3893
3894                eop = tx_ring->buffer_info[i].next_to_watch;
3895                eop_desc = E1000_TX_DESC(*tx_ring, eop);
3896        }
3897
3898        tx_ring->next_to_clean = i;
3899
3900        netdev_completed_queue(netdev, pkts_compl, bytes_compl);
3901
3902#define TX_WAKE_THRESHOLD 32
3903        if (unlikely(count && netif_carrier_ok(netdev) &&
3904                     E1000_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD)) {
3905                /* Make sure that anybody stopping the queue after this
3906                 * sees the new next_to_clean.
3907                 */
3908                smp_mb();
3909
3910                if (netif_queue_stopped(netdev) &&
3911                    !(test_bit(__E1000_DOWN, &adapter->flags))) {
3912                        netif_wake_queue(netdev);
3913                        ++adapter->restart_queue;
3914                }
3915        }
3916
3917        if (adapter->detect_tx_hung) {
3918                /* Detect a transmit hang in hardware, this serializes the
3919                 * check with the clearing of time_stamp and movement of i */
3920                adapter->detect_tx_hung = false;
3921                if (tx_ring->buffer_info[eop].time_stamp &&
3922                    time_after(jiffies, tx_ring->buffer_info[eop].time_stamp +
3923                               (adapter->tx_timeout_factor * HZ)) &&
3924                    !(er32(STATUS) & E1000_STATUS_TXOFF)) {
3925
3926                        /* detected Tx unit hang */
3927                        e_err(drv, "Detected Tx Unit Hang\n"
3928                              "  Tx Queue             <%lu>\n"
3929                              "  TDH                  <%x>\n"
3930                              "  TDT                  <%x>\n"
3931                              "  next_to_use          <%x>\n"
3932                              "  next_to_clean        <%x>\n"
3933                              "buffer_info[next_to_clean]\n"
3934                              "  time_stamp           <%lx>\n"
3935                              "  next_to_watch        <%x>\n"
3936                              "  jiffies              <%lx>\n"
3937                              "  next_to_watch.status <%x>\n",
3938                                (unsigned long)((tx_ring - adapter->tx_ring) /
3939                                        sizeof(struct e1000_tx_ring)),
3940                                readl(hw->hw_addr + tx_ring->tdh),
3941                                readl(hw->hw_addr + tx_ring->tdt),
3942                                tx_ring->next_to_use,
3943                                tx_ring->next_to_clean,
3944                                tx_ring->buffer_info[eop].time_stamp,
3945                                eop,
3946                                jiffies,
3947                                eop_desc->upper.fields.status);
3948                        e1000_dump(adapter);
3949                        netif_stop_queue(netdev);
3950                }
3951        }
3952        adapter->total_tx_bytes += total_tx_bytes;
3953        adapter->total_tx_packets += total_tx_packets;
3954        netdev->stats.tx_bytes += total_tx_bytes;
3955        netdev->stats.tx_packets += total_tx_packets;
3956        return count < tx_ring->count;
3957}
3958
3959/**
3960 * e1000_rx_checksum - Receive Checksum Offload for 82543
3961 * @adapter:     board private structure
3962 * @status_err:  receive descriptor status and error fields
3963 * @csum:        receive descriptor csum field
3964 * @sk_buff:     socket buffer with received data
3965 **/
3966
3967static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err,
3968                              u32 csum, struct sk_buff *skb)
3969{
3970        struct e1000_hw *hw = &adapter->hw;
3971        u16 status = (u16)status_err;
3972        u8 errors = (u8)(status_err >> 24);
3973
3974        skb_checksum_none_assert(skb);
3975
3976        /* 82543 or newer only */
3977        if (unlikely(hw->mac_type < e1000_82543)) return;
3978        /* Ignore Checksum bit is set */
3979        if (unlikely(status & E1000_RXD_STAT_IXSM)) return;
3980        /* TCP/UDP checksum error bit is set */
3981        if (unlikely(errors & E1000_RXD_ERR_TCPE)) {
3982                /* let the stack verify checksum errors */
3983                adapter->hw_csum_err++;
3984                return;
3985        }
3986        /* TCP/UDP Checksum has not been calculated */
3987        if (!(status & E1000_RXD_STAT_TCPCS))
3988                return;
3989
3990        /* It must be a TCP or UDP packet with a valid checksum */
3991        if (likely(status & E1000_RXD_STAT_TCPCS)) {
3992                /* TCP checksum is good */
3993                skb->ip_summed = CHECKSUM_UNNECESSARY;
3994        }
3995        adapter->hw_csum_good++;
3996}
3997
3998/**
3999 * e1000_consume_page - helper function
4000 **/
4001static void e1000_consume_page(struct e1000_buffer *bi, struct sk_buff *skb,
4002                               u16 length)
4003{
4004        bi->page = NULL;
4005        skb->len += length;
4006        skb->data_len += length;
4007        skb->truesize += PAGE_SIZE;
4008}
4009
4010/**
4011 * e1000_receive_skb - helper function to handle rx indications
4012 * @adapter: board private structure
4013 * @status: descriptor status field as written by hardware
4014 * @vlan: descriptor vlan field as written by hardware (no le/be conversion)
4015 * @skb: pointer to sk_buff to be indicated to stack
4016 */
4017static void e1000_receive_skb(struct e1000_adapter *adapter, u8 status,
4018                              __le16 vlan, struct sk_buff *skb)
4019{
4020        skb->protocol = eth_type_trans(skb, adapter->netdev);
4021
4022        if (status & E1000_RXD_STAT_VP) {
4023                u16 vid = le16_to_cpu(vlan) & E1000_RXD_SPC_VLAN_MASK;
4024
4025                __vlan_hwaccel_put_tag(skb, vid);
4026        }
4027        napi_gro_receive(&adapter->napi, skb);
4028}
4029
4030/**
4031 * e1000_clean_jumbo_rx_irq - Send received data up the network stack; legacy
4032 * @adapter: board private structure
4033 * @rx_ring: ring to clean
4034 * @work_done: amount of napi work completed this call
4035 * @work_to_do: max amount of work allowed for this call to do
4036 *
4037 * the return value indicates whether actual cleaning was done, there
4038 * is no guarantee that everything was cleaned
4039 */
4040static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
4041                                     struct e1000_rx_ring *rx_ring,
4042                                     int *work_done, int work_to_do)
4043{
4044        struct e1000_hw *hw = &adapter->hw;
4045        struct net_device *netdev = adapter->netdev;
4046        struct pci_dev *pdev = adapter->pdev;
4047        struct e1000_rx_desc *rx_desc, *next_rxd;
4048        struct e1000_buffer *buffer_info, *next_buffer;
4049        unsigned long irq_flags;
4050        u32 length;
4051        unsigned int i;
4052        int cleaned_count = 0;
4053        bool cleaned = false;
4054        unsigned int total_rx_bytes=0, total_rx_packets=0;
4055
4056        i = rx_ring->next_to_clean;
4057        rx_desc = E1000_RX_DESC(*rx_ring, i);
4058        buffer_info = &rx_ring->buffer_info[i];
4059
4060        while (rx_desc->status & E1000_RXD_STAT_DD) {
4061                struct sk_buff *skb;
4062                u8 status;
4063
4064                if (*work_done >= work_to_do)
4065                        break;
4066                (*work_done)++;
4067                rmb(); /* read descriptor and rx_buffer_info after status DD */
4068
4069                status = rx_desc->status;
4070                skb = buffer_info->skb;
4071                buffer_info->skb = NULL;
4072
4073                if (++i == rx_ring->count) i = 0;
4074                next_rxd = E1000_RX_DESC(*rx_ring, i);
4075                prefetch(next_rxd);
4076
4077                next_buffer = &rx_ring->buffer_info[i];
4078
4079                cleaned = true;
4080                cleaned_count++;
4081                dma_unmap_page(&pdev->dev, buffer_info->dma,
4082                               buffer_info->length, DMA_FROM_DEVICE);
4083                buffer_info->dma = 0;
4084
4085                length = le16_to_cpu(rx_desc->length);
4086
4087                /* errors is only valid for DD + EOP descriptors */
4088                if (unlikely((status & E1000_RXD_STAT_EOP) &&
4089                    (rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK))) {
4090                        u8 *mapped;
4091                        u8 last_byte;
4092
4093                        mapped = page_address(buffer_info->page);
4094                        last_byte = *(mapped + length - 1);
4095                        if (TBI_ACCEPT(hw, status, rx_desc->errors, length,
4096                                       last_byte)) {
4097                                spin_lock_irqsave(&adapter->stats_lock,
4098                                                  irq_flags);
4099                                e1000_tbi_adjust_stats(hw, &adapter->stats,
4100                                                       length, mapped);
4101                                spin_unlock_irqrestore(&adapter->stats_lock,
4102                                                       irq_flags);
4103                                length--;
4104                        } else {
4105                                if (netdev->features & NETIF_F_RXALL)
4106                                        goto process_skb;
4107                                /* recycle both page and skb */
4108                                buffer_info->skb = skb;
4109                                /* an error means any chain goes out the window
4110                                 * too */
4111                                if (rx_ring->rx_skb_top)
4112                                        dev_kfree_skb(rx_ring->rx_skb_top);
4113                                rx_ring->rx_skb_top = NULL;
4114                                goto next_desc;
4115                        }
4116                }
4117
4118#define rxtop rx_ring->rx_skb_top
4119process_skb:
4120                if (!(status & E1000_RXD_STAT_EOP)) {
4121                        /* this descriptor is only the beginning (or middle) */
4122                        if (!rxtop) {
4123                                /* this is the beginning of a chain */
4124                                rxtop = skb;
4125                                skb_fill_page_desc(rxtop, 0, buffer_info->page,
4126                                                   0, length);
4127                        } else {
4128                                /* this is the middle of a chain */
4129                                skb_fill_page_desc(rxtop,
4130                                    skb_shinfo(rxtop)->nr_frags,
4131                                    buffer_info->page, 0, length);
4132                                /* re-use the skb, only consumed the page */
4133                                buffer_info->skb = skb;
4134                        }
4135                        e1000_consume_page(buffer_info, rxtop, length);
4136                        goto next_desc;
4137                } else {
4138                        if (rxtop) {
4139                                /* end of the chain */
4140                                skb_fill_page_desc(rxtop,
4141                                    skb_shinfo(rxtop)->nr_frags,
4142                                    buffer_info->page, 0, length);
4143                                /* re-use the current skb, we only consumed the
4144                                 * page */
4145                                buffer_info->skb = skb;
4146                                skb = rxtop;
4147                                rxtop = NULL;
4148                                e1000_consume_page(buffer_info, skb, length);
4149                        } else {
4150                                /* no chain, got EOP, this buf is the packet
4151                                 * copybreak to save the put_page/alloc_page */
4152                                if (length <= copybreak &&
4153                                    skb_tailroom(skb) >= length) {
4154                                        u8 *vaddr;
4155                                        vaddr = kmap_atomic(buffer_info->page);
4156                                        memcpy(skb_tail_pointer(skb), vaddr, length);
4157                                        kunmap_atomic(vaddr);
4158                                        /* re-use the page, so don't erase
4159                                         * buffer_info->page */
4160                                        skb_put(skb, length);
4161                                } else {
4162                                        skb_fill_page_desc(skb, 0,
4163                                                           buffer_info->page, 0,
4164                                                           length);
4165                                        e1000_consume_page(buffer_info, skb,
4166                                                           length);
4167                                }
4168                        }
4169                }
4170
4171                /* Receive Checksum Offload XXX recompute due to CRC strip? */
4172                e1000_rx_checksum(adapter,
4173                                  (u32)(status) |
4174                                  ((u32)(rx_desc->errors) << 24),
4175                                  le16_to_cpu(rx_desc->csum), skb);
4176
4177                total_rx_bytes += (skb->len - 4); /* don't count FCS */
4178                if (likely(!(netdev->features & NETIF_F_RXFCS)))
4179                        pskb_trim(skb, skb->len - 4);
4180                total_rx_packets++;
4181
4182                /* eth type trans needs skb->data to point to something */
4183                if (!pskb_may_pull(skb, ETH_HLEN)) {
4184                        e_err(drv, "pskb_may_pull failed.\n");
4185                        dev_kfree_skb(skb);
4186                        goto next_desc;
4187                }
4188
4189                e1000_receive_skb(adapter, status, rx_desc->special, skb);
4190
4191next_desc:
4192                rx_desc->status = 0;
4193
4194                /* return some buffers to hardware, one at a time is too slow */
4195                if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) {
4196                        adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
4197                        cleaned_count = 0;
4198                }
4199
4200                /* use prefetched values */
4201                rx_desc = next_rxd;
4202                buffer_info = next_buffer;
4203        }
4204        rx_ring->next_to_clean = i;
4205
4206        cleaned_count = E1000_DESC_UNUSED(rx_ring);
4207        if (cleaned_count)
4208                adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
4209
4210        adapter->total_rx_packets += total_rx_packets;
4211        adapter->total_rx_bytes += total_rx_bytes;
4212        netdev->stats.rx_bytes += total_rx_bytes;
4213        netdev->stats.rx_packets += total_rx_packets;
4214        return cleaned;
4215}
4216
4217/*
4218 * this should improve performance for small packets with large amounts
4219 * of reassembly being done in the stack
4220 */
4221static void e1000_check_copybreak(struct net_device *netdev,
4222                                 struct e1000_buffer *buffer_info,
4223                                 u32 length, struct sk_buff **skb)
4224{
4225        struct sk_buff *new_skb;
4226
4227        if (length > copybreak)
4228                return;
4229
4230        new_skb = netdev_alloc_skb_ip_align(netdev, length);
4231        if (!new_skb)
4232                return;
4233
4234        skb_copy_to_linear_data_offset(new_skb, -NET_IP_ALIGN,
4235                                       (*skb)->data - NET_IP_ALIGN,
4236                                       length + NET_IP_ALIGN);
4237        /* save the skb in buffer_info as good */
4238        buffer_info->skb = *skb;
4239        *skb = new_skb;
4240}
4241
4242/**
4243 * e1000_clean_rx_irq - Send received data up the network stack; legacy
4244 * @adapter: board private structure
4245 * @rx_ring: ring to clean
4246 * @work_done: amount of napi work completed this call
4247 * @work_to_do: max amount of work allowed for this call to do
4248 */
4249static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
4250                               struct e1000_rx_ring *rx_ring,
4251                               int *work_done, int work_to_do)
4252{
4253        struct e1000_hw *hw = &adapter->hw;
4254        struct net_device *netdev = adapter->netdev;
4255        struct pci_dev *pdev = adapter->pdev;
4256        struct e1000_rx_desc *rx_desc, *next_rxd;
4257        struct e1000_buffer *buffer_info, *next_buffer;
4258        unsigned long flags;
4259        u32 length;
4260        unsigned int i;
4261        int cleaned_count = 0;
4262        bool cleaned = false;
4263        unsigned int total_rx_bytes=0, total_rx_packets=0;
4264
4265        i = rx_ring->next_to_clean;
4266        rx_desc = E1000_RX_DESC(*rx_ring, i);
4267        buffer_info = &rx_ring->buffer_info[i];
4268
4269        while (rx_desc->status & E1000_RXD_STAT_DD) {
4270                struct sk_buff *skb;
4271                u8 status;
4272
4273                if (*work_done >= work_to_do)
4274                        break;
4275                (*work_done)++;
4276                rmb(); /* read descriptor and rx_buffer_info after status DD */
4277
4278                status = rx_desc->status;
4279                skb = buffer_info->skb;
4280                buffer_info->skb = NULL;
4281
4282                prefetch(skb->data - NET_IP_ALIGN);
4283
4284                if (++i == rx_ring->count) i = 0;
4285                next_rxd = E1000_RX_DESC(*rx_ring, i);
4286                prefetch(next_rxd);
4287
4288                next_buffer = &rx_ring->buffer_info[i];
4289
4290                cleaned = true;
4291                cleaned_count++;
4292                dma_unmap_single(&pdev->dev, buffer_info->dma,
4293                                 buffer_info->length, DMA_FROM_DEVICE);
4294                buffer_info->dma = 0;
4295
4296                length = le16_to_cpu(rx_desc->length);
4297                /* !EOP means multiple descriptors were used to store a single
4298                 * packet, if thats the case we need to toss it.  In fact, we
4299                 * to toss every packet with the EOP bit clear and the next
4300                 * frame that _does_ have the EOP bit set, as it is by
4301                 * definition only a frame fragment
4302                 */
4303                if (unlikely(!(status & E1000_RXD_STAT_EOP)))
4304                        adapter->discarding = true;
4305
4306                if (adapter->discarding) {
4307                        /* All receives must fit into a single buffer */
4308                        e_dbg("Receive packet consumed multiple buffers\n");
4309                        /* recycle */
4310                        buffer_info->skb = skb;
4311                        if (status & E1000_RXD_STAT_EOP)
4312                                adapter->discarding = false;
4313                        goto next_desc;
4314                }
4315
4316                if (unlikely(rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK)) {
4317                        u8 last_byte = *(skb->data + length - 1);
4318                        if (TBI_ACCEPT(hw, status, rx_desc->errors, length,
4319                                       last_byte)) {
4320                                spin_lock_irqsave(&adapter->stats_lock, flags);
4321                                e1000_tbi_adjust_stats(hw, &adapter->stats,
4322                                                       length, skb->data);
4323                                spin_unlock_irqrestore(&adapter->stats_lock,
4324                                                       flags);
4325                                length--;
4326                        } else {
4327                                if (netdev->features & NETIF_F_RXALL)
4328                                        goto process_skb;
4329                                /* recycle */
4330                                buffer_info->skb = skb;
4331                                goto next_desc;
4332                        }
4333                }
4334
4335process_skb:
4336                total_rx_bytes += (length - 4); /* don't count FCS */
4337                total_rx_packets++;
4338
4339                if (likely(!(netdev->features & NETIF_F_RXFCS)))
4340                        /* adjust length to remove Ethernet CRC, this must be
4341                         * done after the TBI_ACCEPT workaround above
4342                         */
4343                        length -= 4;
4344
4345                e1000_check_copybreak(netdev, buffer_info, length, &skb);
4346
4347                skb_put(skb, length);
4348
4349                /* Receive Checksum Offload */
4350                e1000_rx_checksum(adapter,
4351                                  (u32)(status) |
4352                                  ((u32)(rx_desc->errors) << 24),
4353                                  le16_to_cpu(rx_desc->csum), skb);
4354
4355                e1000_receive_skb(adapter, status, rx_desc->special, skb);
4356
4357next_desc:
4358                rx_desc->status = 0;
4359
4360                /* return some buffers to hardware, one at a time is too slow */
4361                if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) {
4362                        adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
4363                        cleaned_count = 0;
4364                }
4365
4366                /* use prefetched values */
4367                rx_desc = next_rxd;
4368                buffer_info = next_buffer;
4369        }
4370        rx_ring->next_to_clean = i;
4371
4372        cleaned_count = E1000_DESC_UNUSED(rx_ring);
4373        if (cleaned_count)
4374                adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
4375
4376        adapter->total_rx_packets += total_rx_packets;
4377        adapter->total_rx_bytes += total_rx_bytes;
4378        netdev->stats.rx_bytes += total_rx_bytes;
4379        netdev->stats.rx_packets += total_rx_packets;
4380        return cleaned;
4381}
4382
4383/**
4384 * e1000_alloc_jumbo_rx_buffers - Replace used jumbo receive buffers
4385 * @adapter: address of board private structure
4386 * @rx_ring: pointer to receive ring structure
4387 * @cleaned_count: number of buffers to allocate this pass
4388 **/
4389
4390static void
4391e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter,
4392                             struct e1000_rx_ring *rx_ring, int cleaned_count)
4393{
4394        struct net_device *netdev = adapter->netdev;
4395        struct pci_dev *pdev = adapter->pdev;
4396        struct e1000_rx_desc *rx_desc;
4397        struct e1000_buffer *buffer_info;
4398        struct sk_buff *skb;
4399        unsigned int i;
4400        unsigned int bufsz = 256 - 16 /*for skb_reserve */ ;
4401
4402        i = rx_ring->next_to_use;
4403        buffer_info = &rx_ring->buffer_info[i];
4404
4405        while (cleaned_count--) {
4406                skb = buffer_info->skb;
4407                if (skb) {
4408                        skb_trim(skb, 0);
4409                        goto check_page;
4410                }
4411
4412                skb = netdev_alloc_skb_ip_align(netdev, bufsz);
4413                if (unlikely(!skb)) {
4414                        /* Better luck next round */
4415                        adapter->alloc_rx_buff_failed++;
4416                        break;
4417                }
4418
4419                buffer_info->skb = skb;
4420                buffer_info->length = adapter->rx_buffer_len;
4421check_page:
4422                /* allocate a new page if necessary */
4423                if (!buffer_info->page) {
4424                        buffer_info->page = alloc_page(GFP_ATOMIC);
4425                        if (unlikely(!buffer_info->page)) {
4426                                adapter->alloc_rx_buff_failed++;
4427                                break;
4428                        }
4429                }
4430
4431                if (!buffer_info->dma) {
4432                        buffer_info->dma = dma_map_page(&pdev->dev,
4433                                                        buffer_info->page, 0,
4434                                                        buffer_info->length,
4435                                                        DMA_FROM_DEVICE);
4436                        if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
4437                                put_page(buffer_info->page);
4438                                dev_kfree_skb(skb);
4439                                buffer_info->page = NULL;
4440                                buffer_info->skb = NULL;
4441                                buffer_info->dma = 0;
4442                                adapter->alloc_rx_buff_failed++;
4443                                break; /* while !buffer_info->skb */
4444                        }
4445                }
4446
4447                rx_desc = E1000_RX_DESC(*rx_ring, i);
4448                rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
4449
4450                if (unlikely(++i == rx_ring->count))
4451                        i = 0;
4452                buffer_info = &rx_ring->buffer_info[i];
4453        }
4454
4455        if (likely(rx_ring->next_to_use != i)) {
4456                rx_ring->next_to_use = i;
4457                if (unlikely(i-- == 0))
4458                        i = (rx_ring->count - 1);
4459
4460                /* Force memory writes to complete before letting h/w
4461                 * know there are new descriptors to fetch.  (Only
4462                 * applicable for weak-ordered memory model archs,
4463                 * such as IA-64). */
4464                wmb();
4465                writel(i, adapter->hw.hw_addr + rx_ring->rdt);
4466        }
4467}
4468
4469/**
4470 * e1000_alloc_rx_buffers - Replace used receive buffers; legacy & extended
4471 * @adapter: address of board private structure
4472 **/
4473
4474static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
4475                                   struct e1000_rx_ring *rx_ring,
4476                                   int cleaned_count)
4477{
4478        struct e1000_hw *hw = &adapter->hw;
4479        struct net_device *netdev = adapter->netdev;
4480        struct pci_dev *pdev = adapter->pdev;
4481        struct e1000_rx_desc *rx_desc;
4482        struct e1000_buffer *buffer_info;
4483        struct sk_buff *skb;
4484        unsigned int i;
4485        unsigned int bufsz = adapter->rx_buffer_len;
4486
4487        i = rx_ring->next_to_use;
4488        buffer_info = &rx_ring->buffer_info[i];
4489
4490        while (cleaned_count--) {
4491                skb = buffer_info->skb;
4492                if (skb) {
4493                        skb_trim(skb, 0);
4494                        goto map_skb;
4495                }
4496
4497                skb = netdev_alloc_skb_ip_align(netdev, bufsz);
4498                if (unlikely(!skb)) {
4499                        /* Better luck next round */
4500                        adapter->alloc_rx_buff_failed++;
4501                        break;
4502                }
4503
4504                /* Fix for errata 23, can't cross 64kB boundary */
4505                if (!e1000_check_64k_bound(adapter, skb->data, bufsz)) {
4506                        struct sk_buff *oldskb = skb;
4507                        e_err(rx_err, "skb align check failed: %u bytes at "
4508                              "%p\n", bufsz, skb->data);
4509                        /* Try again, without freeing the previous */
4510                        skb = netdev_alloc_skb_ip_align(netdev, bufsz);
4511                        /* Failed allocation, critical failure */
4512                        if (!skb) {
4513                                dev_kfree_skb(oldskb);
4514                                adapter->alloc_rx_buff_failed++;
4515                                break;
4516                        }
4517
4518                        if (!e1000_check_64k_bound(adapter, skb->data, bufsz)) {
4519                                /* give up */
4520                                dev_kfree_skb(skb);
4521                                dev_kfree_skb(oldskb);
4522                                adapter->alloc_rx_buff_failed++;
4523                                break; /* while !buffer_info->skb */
4524                        }
4525
4526                        /* Use new allocation */
4527                        dev_kfree_skb(oldskb);
4528                }
4529                buffer_info->skb = skb;
4530                buffer_info->length = adapter->rx_buffer_len;
4531map_skb:
4532                buffer_info->dma = dma_map_single(&pdev->dev,
4533                                                  skb->data,
4534                                                  buffer_info->length,
4535                                                  DMA_FROM_DEVICE);
4536                if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
4537                        dev_kfree_skb(skb);
4538                        buffer_info->skb = NULL;
4539                        buffer_info->dma = 0;
4540                        adapter->alloc_rx_buff_failed++;
4541                        break; /* while !buffer_info->skb */
4542                }
4543
4544                /*
4545                 * XXX if it was allocated cleanly it will never map to a
4546                 * boundary crossing
4547                 */
4548
4549                /* Fix for errata 23, can't cross 64kB boundary */
4550                if (!e1000_check_64k_bound(adapter,
4551                                        (void *)(unsigned long)buffer_info->dma,
4552                                        adapter->rx_buffer_len)) {
4553                        e_err(rx_err, "dma align check failed: %u bytes at "
4554                              "%p\n", adapter->rx_buffer_len,
4555                              (void *)(unsigned long)buffer_info->dma);
4556                        dev_kfree_skb(skb);
4557                        buffer_info->skb = NULL;
4558
4559                        dma_unmap_single(&pdev->dev, buffer_info->dma,
4560                                         adapter->rx_buffer_len,
4561                                         DMA_FROM_DEVICE);
4562                        buffer_info->dma = 0;
4563
4564                        adapter->alloc_rx_buff_failed++;
4565                        break; /* while !buffer_info->skb */
4566                }
4567                rx_desc = E1000_RX_DESC(*rx_ring, i);
4568                rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
4569
4570                if (unlikely(++i == rx_ring->count))
4571                        i = 0;
4572                buffer_info = &rx_ring->buffer_info[i];
4573        }
4574
4575        if (likely(rx_ring->next_to_use != i)) {
4576                rx_ring->next_to_use = i;
4577                if (unlikely(i-- == 0))
4578                        i = (rx_ring->count - 1);
4579
4580                /* Force memory writes to complete before letting h/w
4581                 * know there are new descriptors to fetch.  (Only
4582                 * applicable for weak-ordered memory model archs,
4583                 * such as IA-64). */
4584                wmb();
4585                writel(i, hw->hw_addr + rx_ring->rdt);
4586        }
4587}
4588
4589/**
4590 * e1000_smartspeed - Workaround for SmartSpeed on 82541 and 82547 controllers.
4591 * @adapter:
4592 **/
4593
4594static void e1000_smartspeed(struct e1000_adapter *adapter)
4595{
4596        struct e1000_hw *hw = &adapter->hw;
4597        u16 phy_status;
4598        u16 phy_ctrl;
4599
4600        if ((hw->phy_type != e1000_phy_igp) || !hw->autoneg ||
4601           !(hw->autoneg_advertised & ADVERTISE_1000_FULL))
4602                return;
4603
4604        if (adapter->smartspeed == 0) {
4605                /* If Master/Slave config fault is asserted twice,
4606                 * we assume back-to-back */
4607                e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_status);
4608                if (!(phy_status & SR_1000T_MS_CONFIG_FAULT)) return;
4609                e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_status);
4610                if (!(phy_status & SR_1000T_MS_CONFIG_FAULT)) return;
4611                e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_ctrl);
4612                if (phy_ctrl & CR_1000T_MS_ENABLE) {
4613                        phy_ctrl &= ~CR_1000T_MS_ENABLE;
4614                        e1000_write_phy_reg(hw, PHY_1000T_CTRL,
4615                                            phy_ctrl);
4616                        adapter->smartspeed++;
4617                        if (!e1000_phy_setup_autoneg(hw) &&
4618                           !e1000_read_phy_reg(hw, PHY_CTRL,
4619                                               &phy_ctrl)) {
4620                                phy_ctrl |= (MII_CR_AUTO_NEG_EN |
4621                                             MII_CR_RESTART_AUTO_NEG);
4622                                e1000_write_phy_reg(hw, PHY_CTRL,
4623                                                    phy_ctrl);
4624                        }
4625                }
4626                return;
4627        } else if (adapter->smartspeed == E1000_SMARTSPEED_DOWNSHIFT) {
4628                /* If still no link, perhaps using 2/3 pair cable */
4629                e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_ctrl);
4630                phy_ctrl |= CR_1000T_MS_ENABLE;
4631                e1000_write_phy_reg(hw, PHY_1000T_CTRL, phy_ctrl);
4632                if (!e1000_phy_setup_autoneg(hw) &&
4633                   !e1000_read_phy_reg(hw, PHY_CTRL, &phy_ctrl)) {
4634                        phy_ctrl |= (MII_CR_AUTO_NEG_EN |
4635                                     MII_CR_RESTART_AUTO_NEG);
4636                        e1000_write_phy_reg(hw, PHY_CTRL, phy_ctrl);
4637                }
4638        }
4639        /* Restart process after E1000_SMARTSPEED_MAX iterations */
4640        if (adapter->smartspeed++ == E1000_SMARTSPEED_MAX)
4641                adapter->smartspeed = 0;
4642}
4643
4644/**
4645 * e1000_ioctl -
4646 * @netdev:
4647 * @ifreq:
4648 * @cmd:
4649 **/
4650
4651static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
4652{
4653        switch (cmd) {
4654        case SIOCGMIIPHY:
4655        case SIOCGMIIREG:
4656        case SIOCSMIIREG:
4657                return e1000_mii_ioctl(netdev, ifr, cmd);
4658        default:
4659                return -EOPNOTSUPP;
4660        }
4661}
4662
4663/**
4664 * e1000_mii_ioctl -
4665 * @netdev:
4666 * @ifreq:
4667 * @cmd:
4668 **/
4669
4670static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
4671                           int cmd)
4672{
4673        struct e1000_adapter *adapter = netdev_priv(netdev);
4674        struct e1000_hw *hw = &adapter->hw;
4675        struct mii_ioctl_data *data = if_mii(ifr);
4676        int retval;
4677        u16 mii_reg;
4678        unsigned long flags;
4679
4680        if (hw->media_type != e1000_media_type_copper)
4681                return -EOPNOTSUPP;
4682
4683        switch (cmd) {
4684        case SIOCGMIIPHY:
4685                data->phy_id = hw->phy_addr;
4686                break;
4687        case SIOCGMIIREG:
4688                spin_lock_irqsave(&adapter->stats_lock, flags);
4689                if (e1000_read_phy_reg(hw, data->reg_num & 0x1F,
4690                                   &data->val_out)) {
4691                        spin_unlock_irqrestore(&adapter->stats_lock, flags);
4692                        return -EIO;
4693                }
4694                spin_unlock_irqrestore(&adapter->stats_lock, flags);
4695                break;
4696        case SIOCSMIIREG:
4697                if (data->reg_num & ~(0x1F))
4698                        return -EFAULT;
4699                mii_reg = data->val_in;
4700                spin_lock_irqsave(&adapter->stats_lock, flags);
4701                if (e1000_write_phy_reg(hw, data->reg_num,
4702                                        mii_reg)) {
4703                        spin_unlock_irqrestore(&adapter->stats_lock, flags);
4704                        return -EIO;
4705                }
4706                spin_unlock_irqrestore(&adapter->stats_lock, flags);
4707                if (hw->media_type == e1000_media_type_copper) {
4708                        switch (data->reg_num) {
4709                        case PHY_CTRL:
4710                                if (mii_reg & MII_CR_POWER_DOWN)
4711                                        break;
4712                                if (mii_reg & MII_CR_AUTO_NEG_EN) {
4713                                        hw->autoneg = 1;
4714                                        hw->autoneg_advertised = 0x2F;
4715                                } else {
4716                                        u32 speed;
4717                                        if (mii_reg & 0x40)
4718                                                speed = SPEED_1000;
4719                                        else if (mii_reg & 0x2000)
4720                                                speed = SPEED_100;
4721                                        else
4722                                                speed = SPEED_10;
4723                                        retval = e1000_set_spd_dplx(
4724                                                adapter, speed,
4725                                                ((mii_reg & 0x100)
4726                                                 ? DUPLEX_FULL :
4727                                                 DUPLEX_HALF));
4728                                        if (retval)
4729                                                return retval;
4730                                }
4731                                if (netif_running(adapter->netdev))
4732                                        e1000_reinit_locked(adapter);
4733                                else
4734                                        e1000_reset(adapter);
4735                                break;
4736                        case M88E1000_PHY_SPEC_CTRL:
4737                        case M88E1000_EXT_PHY_SPEC_CTRL:
4738                                if (e1000_phy_reset(hw))
4739                                        return -EIO;
4740                                break;
4741                        }
4742                } else {
4743                        switch (data->reg_num) {
4744                        case PHY_CTRL:
4745                                if (mii_reg & MII_CR_POWER_DOWN)
4746                                        break;
4747                                if (netif_running(adapter->netdev))
4748                                        e1000_reinit_locked(adapter);
4749                                else
4750                                        e1000_reset(adapter);
4751                                break;
4752                        }
4753                }
4754                break;
4755        default:
4756                return -EOPNOTSUPP;
4757        }
4758        return E1000_SUCCESS;
4759}
4760
4761void e1000_pci_set_mwi(struct e1000_hw *hw)
4762{
4763        struct e1000_adapter *adapter = hw->back;
4764        int ret_val = pci_set_mwi(adapter->pdev);
4765
4766        if (ret_val)
4767                e_err(probe, "Error in setting MWI\n");
4768}
4769
4770void e1000_pci_clear_mwi(struct e1000_hw *hw)
4771{
4772        struct e1000_adapter *adapter = hw->back;
4773
4774        pci_clear_mwi(adapter->pdev);
4775}
4776
4777int e1000_pcix_get_mmrbc(struct e1000_hw *hw)
4778{
4779        struct e1000_adapter *adapter = hw->back;
4780        return pcix_get_mmrbc(adapter->pdev);
4781}
4782
4783void e1000_pcix_set_mmrbc(struct e1000_hw *hw, int mmrbc)
4784{
4785        struct e1000_adapter *adapter = hw->back;
4786        pcix_set_mmrbc(adapter->pdev, mmrbc);
4787}
4788
4789void e1000_io_write(struct e1000_hw *hw, unsigned long port, u32 value)
4790{
4791        outl(value, port);
4792}
4793
4794static bool e1000_vlan_used(struct e1000_adapter *adapter)
4795{
4796        u16 vid;
4797
4798        for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
4799                return true;
4800        return false;
4801}
4802
4803static void __e1000_vlan_mode(struct e1000_adapter *adapter,
4804                              netdev_features_t features)
4805{
4806        struct e1000_hw *hw = &adapter->hw;
4807        u32 ctrl;
4808
4809        ctrl = er32(CTRL);
4810        if (features & NETIF_F_HW_VLAN_RX) {
4811                /* enable VLAN tag insert/strip */
4812                ctrl |= E1000_CTRL_VME;
4813        } else {
4814                /* disable VLAN tag insert/strip */
4815                ctrl &= ~E1000_CTRL_VME;
4816        }
4817        ew32(CTRL, ctrl);
4818}
4819static void e1000_vlan_filter_on_off(struct e1000_adapter *adapter,
4820                                     bool filter_on)
4821{
4822        struct e1000_hw *hw = &adapter->hw;
4823        u32 rctl;
4824
4825        if (!test_bit(__E1000_DOWN, &adapter->flags))
4826                e1000_irq_disable(adapter);
4827
4828        __e1000_vlan_mode(adapter, adapter->netdev->features);
4829        if (filter_on) {
4830                /* enable VLAN receive filtering */
4831                rctl = er32(RCTL);
4832                rctl &= ~E1000_RCTL_CFIEN;
4833                if (!(adapter->netdev->flags & IFF_PROMISC))
4834                        rctl |= E1000_RCTL_VFE;
4835                ew32(RCTL, rctl);
4836                e1000_update_mng_vlan(adapter);
4837        } else {
4838                /* disable VLAN receive filtering */
4839                rctl = er32(RCTL);
4840                rctl &= ~E1000_RCTL_VFE;
4841                ew32(RCTL, rctl);
4842        }
4843
4844        if (!test_bit(__E1000_DOWN, &adapter->flags))
4845                e1000_irq_enable(adapter);
4846}
4847
4848static void e1000_vlan_mode(struct net_device *netdev,
4849                            netdev_features_t features)
4850{
4851        struct e1000_adapter *adapter = netdev_priv(netdev);
4852
4853        if (!test_bit(__E1000_DOWN, &adapter->flags))
4854                e1000_irq_disable(adapter);
4855
4856        __e1000_vlan_mode(adapter, features);
4857
4858        if (!test_bit(__E1000_DOWN, &adapter->flags))
4859                e1000_irq_enable(adapter);
4860}
4861
4862static int e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
4863{
4864        struct e1000_adapter *adapter = netdev_priv(netdev);
4865        struct e1000_hw *hw = &adapter->hw;
4866        u32 vfta, index;
4867
4868        if ((hw->mng_cookie.status &
4869             E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) &&
4870            (vid == adapter->mng_vlan_id))
4871                return 0;
4872
4873        if (!e1000_vlan_used(adapter))
4874                e1000_vlan_filter_on_off(adapter, true);
4875
4876        /* add VID to filter table */
4877        index = (vid >> 5) & 0x7F;
4878        vfta = E1000_READ_REG_ARRAY(hw, VFTA, index);
4879        vfta |= (1 << (vid & 0x1F));
4880        e1000_write_vfta(hw, index, vfta);
4881
4882        set_bit(vid, adapter->active_vlans);
4883
4884        return 0;
4885}
4886
4887static int e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
4888{
4889        struct e1000_adapter *adapter = netdev_priv(netdev);
4890        struct e1000_hw *hw = &adapter->hw;
4891        u32 vfta, index;
4892
4893        if (!test_bit(__E1000_DOWN, &adapter->flags))
4894                e1000_irq_disable(adapter);
4895        if (!test_bit(__E1000_DOWN, &adapter->flags))
4896                e1000_irq_enable(adapter);
4897
4898        /* remove VID from filter table */
4899        index = (vid >> 5) & 0x7F;
4900        vfta = E1000_READ_REG_ARRAY(hw, VFTA, index);
4901        vfta &= ~(1 << (vid & 0x1F));
4902        e1000_write_vfta(hw, index, vfta);
4903
4904        clear_bit(vid, adapter->active_vlans);
4905
4906        if (!e1000_vlan_used(adapter))
4907                e1000_vlan_filter_on_off(adapter, false);
4908
4909        return 0;
4910}
4911
4912static void e1000_restore_vlan(struct e1000_adapter *adapter)
4913{
4914        u16 vid;
4915
4916        if (!e1000_vlan_used(adapter))
4917                return;
4918
4919        e1000_vlan_filter_on_off(adapter, true);
4920        for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
4921                e1000_vlan_rx_add_vid(adapter->netdev, vid);
4922}
4923
4924int e1000_set_spd_dplx(struct e1000_adapter *adapter, u32 spd, u8 dplx)
4925{
4926        struct e1000_hw *hw = &adapter->hw;
4927
4928        hw->autoneg = 0;
4929
4930        /* Make sure dplx is at most 1 bit and lsb of speed is not set
4931         * for the switch() below to work */
4932        if ((spd & 1) || (dplx & ~1))
4933                goto err_inval;
4934
4935        /* Fiber NICs only allow 1000 gbps Full duplex */
4936        if ((hw->media_type == e1000_media_type_fiber) &&
4937            spd != SPEED_1000 &&
4938            dplx != DUPLEX_FULL)
4939                goto err_inval;
4940
4941        switch (spd + dplx) {
4942        case SPEED_10 + DUPLEX_HALF:
4943                hw->forced_speed_duplex = e1000_10_half;
4944                break;
4945        case SPEED_10 + DUPLEX_FULL:
4946                hw->forced_speed_duplex = e1000_10_full;
4947                break;
4948        case SPEED_100 + DUPLEX_HALF:
4949                hw->forced_speed_duplex = e1000_100_half;
4950                break;
4951        case SPEED_100 + DUPLEX_FULL:
4952                hw->forced_speed_duplex = e1000_100_full;
4953                break;
4954        case SPEED_1000 + DUPLEX_FULL:
4955                hw->autoneg = 1;
4956                hw->autoneg_advertised = ADVERTISE_1000_FULL;
4957                break;
4958        case SPEED_1000 + DUPLEX_HALF: /* not supported */
4959        default:
4960                goto err_inval;
4961        }
4962
4963        /* clear MDI, MDI(-X) override is only allowed when autoneg enabled */
4964        hw->mdix = AUTO_ALL_MODES;
4965
4966        return 0;
4967
4968err_inval:
4969        e_err(probe, "Unsupported Speed/Duplex configuration\n");
4970        return -EINVAL;
4971}
4972
4973static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake)
4974{
4975        struct net_device *netdev = pci_get_drvdata(pdev);
4976        struct e1000_adapter *adapter = netdev_priv(netdev);
4977        struct e1000_hw *hw = &adapter->hw;
4978        u32 ctrl, ctrl_ext, rctl, status;
4979        u32 wufc = adapter->wol;
4980#ifdef CONFIG_PM
4981        int retval = 0;
4982#endif
4983
4984        netif_device_detach(netdev);
4985
4986        if (netif_running(netdev)) {
4987                WARN_ON(test_bit(__E1000_RESETTING, &adapter->flags));
4988                e1000_down(adapter);
4989        }
4990
4991#ifdef CONFIG_PM
4992        retval = pci_save_state(pdev);
4993        if (retval)
4994                return retval;
4995#endif
4996
4997        status = er32(STATUS);
4998        if (status & E1000_STATUS_LU)
4999                wufc &= ~E1000_WUFC_LNKC;
5000
5001        if (wufc) {
5002                e1000_setup_rctl(adapter);
5003                e1000_set_rx_mode(netdev);
5004
5005                rctl = er32(RCTL);
5006
5007                /* turn on all-multi mode if wake on multicast is enabled */
5008                if (wufc & E1000_WUFC_MC)
5009                        rctl |= E1000_RCTL_MPE;
5010
5011                /* enable receives in the hardware */
5012                ew32(RCTL, rctl | E1000_RCTL_EN);
5013
5014                if (hw->mac_type >= e1000_82540) {
5015                        ctrl = er32(CTRL);
5016                        /* advertise wake from D3Cold */
5017                        #define E1000_CTRL_ADVD3WUC 0x00100000
5018                        /* phy power management enable */
5019                        #define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000
5020                        ctrl |= E1000_CTRL_ADVD3WUC |
5021                                E1000_CTRL_EN_PHY_PWR_MGMT;
5022                        ew32(CTRL, ctrl);
5023                }
5024
5025                if (hw->media_type == e1000_media_type_fiber ||
5026                    hw->media_type == e1000_media_type_internal_serdes) {
5027                        /* keep the laser running in D3 */
5028                        ctrl_ext = er32(CTRL_EXT);
5029                        ctrl_ext |= E1000_CTRL_EXT_SDP7_DATA;
5030                        ew32(CTRL_EXT, ctrl_ext);
5031                }
5032
5033                ew32(WUC, E1000_WUC_PME_EN);
5034                ew32(WUFC, wufc);
5035        } else {
5036                ew32(WUC, 0);
5037                ew32(WUFC, 0);
5038        }
5039
5040        e1000_release_manageability(adapter);
5041
5042        *enable_wake = !!wufc;
5043
5044        /* make sure adapter isn't asleep if manageability is enabled */
5045        if (adapter->en_mng_pt)
5046                *enable_wake = true;
5047
5048        if (netif_running(netdev))
5049                e1000_free_irq(adapter);
5050
5051        pci_disable_device(pdev);
5052
5053        return 0;
5054}
5055
5056#ifdef CONFIG_PM
5057static int e1000_suspend(struct pci_dev *pdev, pm_message_t state)
5058{
5059        int retval;
5060        bool wake;
5061
5062        retval = __e1000_shutdown(pdev, &wake);
5063        if (retval)
5064                return retval;
5065
5066        if (wake) {
5067                pci_prepare_to_sleep(pdev);
5068        } else {
5069                pci_wake_from_d3(pdev, false);
5070                pci_set_power_state(pdev, PCI_D3hot);
5071        }
5072
5073        return 0;
5074}
5075
5076static int e1000_resume(struct pci_dev *pdev)
5077{
5078        struct net_device *netdev = pci_get_drvdata(pdev);
5079        struct e1000_adapter *adapter = netdev_priv(netdev);
5080        struct e1000_hw *hw = &adapter->hw;
5081        u32 err;
5082
5083        pci_set_power_state(pdev, PCI_D0);
5084        pci_restore_state(pdev);
5085        pci_save_state(pdev);
5086
5087        if (adapter->need_ioport)
5088                err = pci_enable_device(pdev);
5089        else
5090                err = pci_enable_device_mem(pdev);
5091        if (err) {
5092                pr_err("Cannot enable PCI device from suspend\n");
5093                return err;
5094        }
5095        pci_set_master(pdev);
5096
5097        pci_enable_wake(pdev, PCI_D3hot, 0);
5098        pci_enable_wake(pdev, PCI_D3cold, 0);
5099
5100        if (netif_running(netdev)) {
5101                err = e1000_request_irq(adapter);
5102                if (err)
5103                        return err;
5104        }
5105
5106        e1000_power_up_phy(adapter);
5107        e1000_reset(adapter);
5108        ew32(WUS, ~0);
5109
5110        e1000_init_manageability(adapter);
5111
5112        if (netif_running(netdev))
5113                e1000_up(adapter);
5114
5115        netif_device_attach(netdev);
5116
5117        return 0;
5118}
5119#endif
5120
5121static void e1000_shutdown(struct pci_dev *pdev)
5122{
5123        bool wake;
5124
5125        __e1000_shutdown(pdev, &wake);
5126
5127        if (system_state == SYSTEM_POWER_OFF) {
5128                pci_wake_from_d3(pdev, wake);
5129                pci_set_power_state(pdev, PCI_D3hot);
5130        }
5131}
5132
5133#ifdef CONFIG_NET_POLL_CONTROLLER
5134/*
5135 * Polling 'interrupt' - used by things like netconsole to send skbs
5136 * without having to re-enable interrupts. It's not called while
5137 * the interrupt routine is executing.
5138 */
5139static void e1000_netpoll(struct net_device *netdev)
5140{
5141        struct e1000_adapter *adapter = netdev_priv(netdev);
5142
5143        disable_irq(adapter->pdev->irq);
5144        e1000_intr(adapter->pdev->irq, netdev);
5145        enable_irq(adapter->pdev->irq);
5146}
5147#endif
5148
5149/**
5150 * e1000_io_error_detected - called when PCI error is detected
5151 * @pdev: Pointer to PCI device
5152 * @state: The current pci connection state
5153 *
5154 * This function is called after a PCI bus error affecting
5155 * this device has been detected.
5156 */
5157static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev,
5158                                                pci_channel_state_t state)
5159{
5160        struct net_device *netdev = pci_get_drvdata(pdev);
5161        struct e1000_adapter *adapter = netdev_priv(netdev);
5162
5163        netif_device_detach(netdev);
5164
5165        if (state == pci_channel_io_perm_failure)
5166                return PCI_ERS_RESULT_DISCONNECT;
5167
5168        if (netif_running(netdev))
5169                e1000_down(adapter);
5170        pci_disable_device(pdev);
5171
5172        /* Request a slot slot reset. */
5173        return PCI_ERS_RESULT_NEED_RESET;
5174}
5175
5176/**
5177 * e1000_io_slot_reset - called after the pci bus has been reset.
5178 * @pdev: Pointer to PCI device
5179 *
5180 * Restart the card from scratch, as if from a cold-boot. Implementation
5181 * resembles the first-half of the e1000_resume routine.
5182 */
5183static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev)
5184{
5185        struct net_device *netdev = pci_get_drvdata(pdev);
5186        struct e1000_adapter *adapter = netdev_priv(netdev);
5187        struct e1000_hw *hw = &adapter->hw;
5188        int err;
5189
5190        if (adapter->need_ioport)
5191                err = pci_enable_device(pdev);
5192        else
5193                err = pci_enable_device_mem(pdev);
5194        if (err) {
5195                pr_err("Cannot re-enable PCI device after reset.\n");
5196                return PCI_ERS_RESULT_DISCONNECT;
5197        }
5198        pci_set_master(pdev);
5199
5200        pci_enable_wake(pdev, PCI_D3hot, 0);
5201        pci_enable_wake(pdev, PCI_D3cold, 0);
5202
5203        e1000_reset(adapter);
5204        ew32(WUS, ~0);
5205
5206        return PCI_ERS_RESULT_RECOVERED;
5207}
5208
5209/**
5210 * e1000_io_resume - called when traffic can start flowing again.
5211 * @pdev: Pointer to PCI device
5212 *
5213 * This callback is called when the error recovery driver tells us that
5214 * its OK to resume normal operation. Implementation resembles the
5215 * second-half of the e1000_resume routine.
5216 */
5217static void e1000_io_resume(struct pci_dev *pdev)
5218{
5219        struct net_device *netdev = pci_get_drvdata(pdev);
5220        struct e1000_adapter *adapter = netdev_priv(netdev);
5221
5222        e1000_init_manageability(adapter);
5223
5224        if (netif_running(netdev)) {
5225                if (e1000_up(adapter)) {
5226                        pr_info("can't bring device back up after reset\n");
5227                        return;
5228                }
5229        }
5230
5231        netif_device_attach(netdev);
5232}
5233
5234/* e1000_main.c */
5235