linux/drivers/net/ethernet/intel/e1000/e1000_main.c
<<
>>
Prefs
   1/*******************************************************************************
   2
   3  Intel PRO/1000 Linux driver
   4  Copyright(c) 1999 - 2006 Intel Corporation.
   5
   6  This program is free software; you can redistribute it and/or modify it
   7  under the terms and conditions of the GNU General Public License,
   8  version 2, as published by the Free Software Foundation.
   9
  10  This program is distributed in the hope it will be useful, but WITHOUT
  11  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  13  more details.
  14
  15  You should have received a copy of the GNU General Public License along with
  16  this program; if not, write to the Free Software Foundation, Inc.,
  17  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
  18
  19  The full GNU General Public License is included in this distribution in
  20  the file called "COPYING".
  21
  22  Contact Information:
  23  Linux NICS <linux.nics@intel.com>
  24  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
  25  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
  26
  27*******************************************************************************/
  28
  29#include "e1000.h"
  30#include <net/ip6_checksum.h>
  31#include <linux/io.h>
  32#include <linux/prefetch.h>
  33#include <linux/bitops.h>
  34#include <linux/if_vlan.h>
  35
  36char e1000_driver_name[] = "e1000";
  37static char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver";
  38#define DRV_VERSION "7.3.21-k8-NAPI"
  39const char e1000_driver_version[] = DRV_VERSION;
  40static const char e1000_copyright[] = "Copyright (c) 1999-2006 Intel Corporation.";
  41
  42/* e1000_pci_tbl - PCI Device ID Table
  43 *
  44 * Last entry must be all 0s
  45 *
  46 * Macro expands to...
  47 *   {PCI_DEVICE(PCI_VENDOR_ID_INTEL, device_id)}
  48 */
  49static const struct pci_device_id e1000_pci_tbl[] = {
  50        INTEL_E1000_ETHERNET_DEVICE(0x1000),
  51        INTEL_E1000_ETHERNET_DEVICE(0x1001),
  52        INTEL_E1000_ETHERNET_DEVICE(0x1004),
  53        INTEL_E1000_ETHERNET_DEVICE(0x1008),
  54        INTEL_E1000_ETHERNET_DEVICE(0x1009),
  55        INTEL_E1000_ETHERNET_DEVICE(0x100C),
  56        INTEL_E1000_ETHERNET_DEVICE(0x100D),
  57        INTEL_E1000_ETHERNET_DEVICE(0x100E),
  58        INTEL_E1000_ETHERNET_DEVICE(0x100F),
  59        INTEL_E1000_ETHERNET_DEVICE(0x1010),
  60        INTEL_E1000_ETHERNET_DEVICE(0x1011),
  61        INTEL_E1000_ETHERNET_DEVICE(0x1012),
  62        INTEL_E1000_ETHERNET_DEVICE(0x1013),
  63        INTEL_E1000_ETHERNET_DEVICE(0x1014),
  64        INTEL_E1000_ETHERNET_DEVICE(0x1015),
  65        INTEL_E1000_ETHERNET_DEVICE(0x1016),
  66        INTEL_E1000_ETHERNET_DEVICE(0x1017),
  67        INTEL_E1000_ETHERNET_DEVICE(0x1018),
  68        INTEL_E1000_ETHERNET_DEVICE(0x1019),
  69        INTEL_E1000_ETHERNET_DEVICE(0x101A),
  70        INTEL_E1000_ETHERNET_DEVICE(0x101D),
  71        INTEL_E1000_ETHERNET_DEVICE(0x101E),
  72        INTEL_E1000_ETHERNET_DEVICE(0x1026),
  73        INTEL_E1000_ETHERNET_DEVICE(0x1027),
  74        INTEL_E1000_ETHERNET_DEVICE(0x1028),
  75        INTEL_E1000_ETHERNET_DEVICE(0x1075),
  76        INTEL_E1000_ETHERNET_DEVICE(0x1076),
  77        INTEL_E1000_ETHERNET_DEVICE(0x1077),
  78        INTEL_E1000_ETHERNET_DEVICE(0x1078),
  79        INTEL_E1000_ETHERNET_DEVICE(0x1079),
  80        INTEL_E1000_ETHERNET_DEVICE(0x107A),
  81        INTEL_E1000_ETHERNET_DEVICE(0x107B),
  82        INTEL_E1000_ETHERNET_DEVICE(0x107C),
  83        INTEL_E1000_ETHERNET_DEVICE(0x108A),
  84        INTEL_E1000_ETHERNET_DEVICE(0x1099),
  85        INTEL_E1000_ETHERNET_DEVICE(0x10B5),
  86        INTEL_E1000_ETHERNET_DEVICE(0x2E6E),
  87        /* required last entry */
  88        {0,}
  89};
  90
  91MODULE_DEVICE_TABLE(pci, e1000_pci_tbl);
  92
  93int e1000_up(struct e1000_adapter *adapter);
  94void e1000_down(struct e1000_adapter *adapter);
  95void e1000_reinit_locked(struct e1000_adapter *adapter);
  96void e1000_reset(struct e1000_adapter *adapter);
  97int e1000_setup_all_tx_resources(struct e1000_adapter *adapter);
  98int e1000_setup_all_rx_resources(struct e1000_adapter *adapter);
  99void e1000_free_all_tx_resources(struct e1000_adapter *adapter);
 100void e1000_free_all_rx_resources(struct e1000_adapter *adapter);
 101static int e1000_setup_tx_resources(struct e1000_adapter *adapter,
 102                                    struct e1000_tx_ring *txdr);
 103static int e1000_setup_rx_resources(struct e1000_adapter *adapter,
 104                                    struct e1000_rx_ring *rxdr);
 105static void e1000_free_tx_resources(struct e1000_adapter *adapter,
 106                                    struct e1000_tx_ring *tx_ring);
 107static void e1000_free_rx_resources(struct e1000_adapter *adapter,
 108                                    struct e1000_rx_ring *rx_ring);
 109void e1000_update_stats(struct e1000_adapter *adapter);
 110
 111static int e1000_init_module(void);
 112static void e1000_exit_module(void);
 113static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
 114static void e1000_remove(struct pci_dev *pdev);
 115static int e1000_alloc_queues(struct e1000_adapter *adapter);
 116static int e1000_sw_init(struct e1000_adapter *adapter);
 117int e1000_open(struct net_device *netdev);
 118int e1000_close(struct net_device *netdev);
 119static void e1000_configure_tx(struct e1000_adapter *adapter);
 120static void e1000_configure_rx(struct e1000_adapter *adapter);
 121static void e1000_setup_rctl(struct e1000_adapter *adapter);
 122static void e1000_clean_all_tx_rings(struct e1000_adapter *adapter);
 123static void e1000_clean_all_rx_rings(struct e1000_adapter *adapter);
 124static void e1000_clean_tx_ring(struct e1000_adapter *adapter,
 125                                struct e1000_tx_ring *tx_ring);
 126static void e1000_clean_rx_ring(struct e1000_adapter *adapter,
 127                                struct e1000_rx_ring *rx_ring);
 128static void e1000_set_rx_mode(struct net_device *netdev);
 129static void e1000_update_phy_info_task(struct work_struct *work);
 130static void e1000_watchdog(struct work_struct *work);
 131static void e1000_82547_tx_fifo_stall_task(struct work_struct *work);
 132static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
 133                                    struct net_device *netdev);
 134static struct net_device_stats *e1000_get_stats(struct net_device *netdev);
 135static int e1000_change_mtu(struct net_device *netdev, int new_mtu);
 136static int e1000_set_mac(struct net_device *netdev, void *p);
 137static irqreturn_t e1000_intr(int irq, void *data);
 138static bool e1000_clean_tx_irq(struct e1000_adapter *adapter,
 139                               struct e1000_tx_ring *tx_ring);
 140static int e1000_clean(struct napi_struct *napi, int budget);
 141static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
 142                               struct e1000_rx_ring *rx_ring,
 143                               int *work_done, int work_to_do);
 144static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
 145                                     struct e1000_rx_ring *rx_ring,
 146                                     int *work_done, int work_to_do);
 147static void e1000_alloc_dummy_rx_buffers(struct e1000_adapter *adapter,
 148                                         struct e1000_rx_ring *rx_ring,
 149                                         int cleaned_count)
 150{
 151}
 152static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
 153                                   struct e1000_rx_ring *rx_ring,
 154                                   int cleaned_count);
 155static void e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter,
 156                                         struct e1000_rx_ring *rx_ring,
 157                                         int cleaned_count);
 158static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd);
 159static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
 160                           int cmd);
 161static void e1000_enter_82542_rst(struct e1000_adapter *adapter);
 162static void e1000_leave_82542_rst(struct e1000_adapter *adapter);
 163static void e1000_tx_timeout(struct net_device *dev);
 164static void e1000_reset_task(struct work_struct *work);
 165static void e1000_smartspeed(struct e1000_adapter *adapter);
 166static int e1000_82547_fifo_workaround(struct e1000_adapter *adapter,
 167                                       struct sk_buff *skb);
 168
 169static bool e1000_vlan_used(struct e1000_adapter *adapter);
 170static void e1000_vlan_mode(struct net_device *netdev,
 171                            netdev_features_t features);
 172static void e1000_vlan_filter_on_off(struct e1000_adapter *adapter,
 173                                     bool filter_on);
 174static int e1000_vlan_rx_add_vid(struct net_device *netdev,
 175                                 __be16 proto, u16 vid);
 176static int e1000_vlan_rx_kill_vid(struct net_device *netdev,
 177                                  __be16 proto, u16 vid);
 178static void e1000_restore_vlan(struct e1000_adapter *adapter);
 179
 180#ifdef CONFIG_PM
 181static int e1000_suspend(struct pci_dev *pdev, pm_message_t state);
 182static int e1000_resume(struct pci_dev *pdev);
 183#endif
 184static void e1000_shutdown(struct pci_dev *pdev);
 185
 186#ifdef CONFIG_NET_POLL_CONTROLLER
 187/* for netdump / net console */
 188static void e1000_netpoll (struct net_device *netdev);
 189#endif
 190
 191#define COPYBREAK_DEFAULT 256
 192static unsigned int copybreak __read_mostly = COPYBREAK_DEFAULT;
 193module_param(copybreak, uint, 0644);
 194MODULE_PARM_DESC(copybreak,
 195        "Maximum size of packet that is copied to a new buffer on receive");
 196
 197static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev,
 198                                                pci_channel_state_t state);
 199static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev);
 200static void e1000_io_resume(struct pci_dev *pdev);
 201
 202static const struct pci_error_handlers e1000_err_handler = {
 203        .error_detected = e1000_io_error_detected,
 204        .slot_reset = e1000_io_slot_reset,
 205        .resume = e1000_io_resume,
 206};
 207
 208static struct pci_driver e1000_driver = {
 209        .name     = e1000_driver_name,
 210        .id_table = e1000_pci_tbl,
 211        .probe    = e1000_probe,
 212        .remove   = e1000_remove,
 213#ifdef CONFIG_PM
 214        /* Power Management Hooks */
 215        .suspend  = e1000_suspend,
 216        .resume   = e1000_resume,
 217#endif
 218        .shutdown = e1000_shutdown,
 219        .err_handler = &e1000_err_handler
 220};
 221
 222MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
 223MODULE_DESCRIPTION("Intel(R) PRO/1000 Network Driver");
 224MODULE_LICENSE("GPL");
 225MODULE_VERSION(DRV_VERSION);
 226
 227#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)
 228static int debug = -1;
 229module_param(debug, int, 0);
 230MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
 231
 232/**
 233 * e1000_get_hw_dev - return device
 234 * used by hardware layer to print debugging information
 235 *
 236 **/
 237struct net_device *e1000_get_hw_dev(struct e1000_hw *hw)
 238{
 239        struct e1000_adapter *adapter = hw->back;
 240        return adapter->netdev;
 241}
 242
 243/**
 244 * e1000_init_module - Driver Registration Routine
 245 *
 246 * e1000_init_module is the first routine called when the driver is
 247 * loaded. All it does is register with the PCI subsystem.
 248 **/
 249static int __init e1000_init_module(void)
 250{
 251        int ret;
 252        pr_info("%s - version %s\n", e1000_driver_string, e1000_driver_version);
 253
 254        pr_info("%s\n", e1000_copyright);
 255
 256        ret = pci_register_driver(&e1000_driver);
 257        if (copybreak != COPYBREAK_DEFAULT) {
 258                if (copybreak == 0)
 259                        pr_info("copybreak disabled\n");
 260                else
 261                        pr_info("copybreak enabled for "
 262                                   "packets <= %u bytes\n", copybreak);
 263        }
 264        return ret;
 265}
 266
 267module_init(e1000_init_module);
 268
 269/**
 270 * e1000_exit_module - Driver Exit Cleanup Routine
 271 *
 272 * e1000_exit_module is called just before the driver is removed
 273 * from memory.
 274 **/
 275static void __exit e1000_exit_module(void)
 276{
 277        pci_unregister_driver(&e1000_driver);
 278}
 279
 280module_exit(e1000_exit_module);
 281
 282static int e1000_request_irq(struct e1000_adapter *adapter)
 283{
 284        struct net_device *netdev = adapter->netdev;
 285        irq_handler_t handler = e1000_intr;
 286        int irq_flags = IRQF_SHARED;
 287        int err;
 288
 289        err = request_irq(adapter->pdev->irq, handler, irq_flags, netdev->name,
 290                          netdev);
 291        if (err) {
 292                e_err(probe, "Unable to allocate interrupt Error: %d\n", err);
 293        }
 294
 295        return err;
 296}
 297
 298static void e1000_free_irq(struct e1000_adapter *adapter)
 299{
 300        struct net_device *netdev = adapter->netdev;
 301
 302        free_irq(adapter->pdev->irq, netdev);
 303}
 304
 305/**
 306 * e1000_irq_disable - Mask off interrupt generation on the NIC
 307 * @adapter: board private structure
 308 **/
 309static void e1000_irq_disable(struct e1000_adapter *adapter)
 310{
 311        struct e1000_hw *hw = &adapter->hw;
 312
 313        ew32(IMC, ~0);
 314        E1000_WRITE_FLUSH();
 315        synchronize_irq(adapter->pdev->irq);
 316}
 317
 318/**
 319 * e1000_irq_enable - Enable default interrupt generation settings
 320 * @adapter: board private structure
 321 **/
 322static void e1000_irq_enable(struct e1000_adapter *adapter)
 323{
 324        struct e1000_hw *hw = &adapter->hw;
 325
 326        ew32(IMS, IMS_ENABLE_MASK);
 327        E1000_WRITE_FLUSH();
 328}
 329
 330static void e1000_update_mng_vlan(struct e1000_adapter *adapter)
 331{
 332        struct e1000_hw *hw = &adapter->hw;
 333        struct net_device *netdev = adapter->netdev;
 334        u16 vid = hw->mng_cookie.vlan_id;
 335        u16 old_vid = adapter->mng_vlan_id;
 336
 337        if (!e1000_vlan_used(adapter))
 338                return;
 339
 340        if (!test_bit(vid, adapter->active_vlans)) {
 341                if (hw->mng_cookie.status &
 342                    E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) {
 343                        e1000_vlan_rx_add_vid(netdev, htons(ETH_P_8021Q), vid);
 344                        adapter->mng_vlan_id = vid;
 345                } else {
 346                        adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
 347                }
 348                if ((old_vid != (u16)E1000_MNG_VLAN_NONE) &&
 349                    (vid != old_vid) &&
 350                    !test_bit(old_vid, adapter->active_vlans))
 351                        e1000_vlan_rx_kill_vid(netdev, htons(ETH_P_8021Q),
 352                                               old_vid);
 353        } else {
 354                adapter->mng_vlan_id = vid;
 355        }
 356}
 357
 358static void e1000_init_manageability(struct e1000_adapter *adapter)
 359{
 360        struct e1000_hw *hw = &adapter->hw;
 361
 362        if (adapter->en_mng_pt) {
 363                u32 manc = er32(MANC);
 364
 365                /* disable hardware interception of ARP */
 366                manc &= ~(E1000_MANC_ARP_EN);
 367
 368                ew32(MANC, manc);
 369        }
 370}
 371
 372static void e1000_release_manageability(struct e1000_adapter *adapter)
 373{
 374        struct e1000_hw *hw = &adapter->hw;
 375
 376        if (adapter->en_mng_pt) {
 377                u32 manc = er32(MANC);
 378
 379                /* re-enable hardware interception of ARP */
 380                manc |= E1000_MANC_ARP_EN;
 381
 382                ew32(MANC, manc);
 383        }
 384}
 385
 386/**
 387 * e1000_configure - configure the hardware for RX and TX
 388 * @adapter = private board structure
 389 **/
 390static void e1000_configure(struct e1000_adapter *adapter)
 391{
 392        struct net_device *netdev = adapter->netdev;
 393        int i;
 394
 395        e1000_set_rx_mode(netdev);
 396
 397        e1000_restore_vlan(adapter);
 398        e1000_init_manageability(adapter);
 399
 400        e1000_configure_tx(adapter);
 401        e1000_setup_rctl(adapter);
 402        e1000_configure_rx(adapter);
 403        /* call E1000_DESC_UNUSED which always leaves
 404         * at least 1 descriptor unused to make sure
 405         * next_to_use != next_to_clean
 406         */
 407        for (i = 0; i < adapter->num_rx_queues; i++) {
 408                struct e1000_rx_ring *ring = &adapter->rx_ring[i];
 409                adapter->alloc_rx_buf(adapter, ring,
 410                                      E1000_DESC_UNUSED(ring));
 411        }
 412}
 413
 414int e1000_up(struct e1000_adapter *adapter)
 415{
 416        struct e1000_hw *hw = &adapter->hw;
 417
 418        /* hardware has been reset, we need to reload some things */
 419        e1000_configure(adapter);
 420
 421        clear_bit(__E1000_DOWN, &adapter->flags);
 422
 423        napi_enable(&adapter->napi);
 424
 425        e1000_irq_enable(adapter);
 426
 427        netif_wake_queue(adapter->netdev);
 428
 429        /* fire a link change interrupt to start the watchdog */
 430        ew32(ICS, E1000_ICS_LSC);
 431        return 0;
 432}
 433
 434/**
 435 * e1000_power_up_phy - restore link in case the phy was powered down
 436 * @adapter: address of board private structure
 437 *
 438 * The phy may be powered down to save power and turn off link when the
 439 * driver is unloaded and wake on lan is not enabled (among others)
 440 * *** this routine MUST be followed by a call to e1000_reset ***
 441 **/
 442void e1000_power_up_phy(struct e1000_adapter *adapter)
 443{
 444        struct e1000_hw *hw = &adapter->hw;
 445        u16 mii_reg = 0;
 446
 447        /* Just clear the power down bit to wake the phy back up */
 448        if (hw->media_type == e1000_media_type_copper) {
 449                /* according to the manual, the phy will retain its
 450                 * settings across a power-down/up cycle
 451                 */
 452                e1000_read_phy_reg(hw, PHY_CTRL, &mii_reg);
 453                mii_reg &= ~MII_CR_POWER_DOWN;
 454                e1000_write_phy_reg(hw, PHY_CTRL, mii_reg);
 455        }
 456}
 457
 458static void e1000_power_down_phy(struct e1000_adapter *adapter)
 459{
 460        struct e1000_hw *hw = &adapter->hw;
 461
 462        /* Power down the PHY so no link is implied when interface is down *
 463         * The PHY cannot be powered down if any of the following is true *
 464         * (a) WoL is enabled
 465         * (b) AMT is active
 466         * (c) SoL/IDER session is active
 467         */
 468        if (!adapter->wol && hw->mac_type >= e1000_82540 &&
 469           hw->media_type == e1000_media_type_copper) {
 470                u16 mii_reg = 0;
 471
 472                switch (hw->mac_type) {
 473                case e1000_82540:
 474                case e1000_82545:
 475                case e1000_82545_rev_3:
 476                case e1000_82546:
 477                case e1000_ce4100:
 478                case e1000_82546_rev_3:
 479                case e1000_82541:
 480                case e1000_82541_rev_2:
 481                case e1000_82547:
 482                case e1000_82547_rev_2:
 483                        if (er32(MANC) & E1000_MANC_SMBUS_EN)
 484                                goto out;
 485                        break;
 486                default:
 487                        goto out;
 488                }
 489                e1000_read_phy_reg(hw, PHY_CTRL, &mii_reg);
 490                mii_reg |= MII_CR_POWER_DOWN;
 491                e1000_write_phy_reg(hw, PHY_CTRL, mii_reg);
 492                msleep(1);
 493        }
 494out:
 495        return;
 496}
 497
 498static void e1000_down_and_stop(struct e1000_adapter *adapter)
 499{
 500        set_bit(__E1000_DOWN, &adapter->flags);
 501
 502        cancel_delayed_work_sync(&adapter->watchdog_task);
 503
 504        /*
 505         * Since the watchdog task can reschedule other tasks, we should cancel
 506         * it first, otherwise we can run into the situation when a work is
 507         * still running after the adapter has been turned down.
 508         */
 509
 510        cancel_delayed_work_sync(&adapter->phy_info_task);
 511        cancel_delayed_work_sync(&adapter->fifo_stall_task);
 512
 513        /* Only kill reset task if adapter is not resetting */
 514        if (!test_bit(__E1000_RESETTING, &adapter->flags))
 515                cancel_work_sync(&adapter->reset_task);
 516}
 517
 518void e1000_down(struct e1000_adapter *adapter)
 519{
 520        struct e1000_hw *hw = &adapter->hw;
 521        struct net_device *netdev = adapter->netdev;
 522        u32 rctl, tctl;
 523
 524        netif_carrier_off(netdev);
 525
 526        /* disable receives in the hardware */
 527        rctl = er32(RCTL);
 528        ew32(RCTL, rctl & ~E1000_RCTL_EN);
 529        /* flush and sleep below */
 530
 531        netif_tx_disable(netdev);
 532
 533        /* disable transmits in the hardware */
 534        tctl = er32(TCTL);
 535        tctl &= ~E1000_TCTL_EN;
 536        ew32(TCTL, tctl);
 537        /* flush both disables and wait for them to finish */
 538        E1000_WRITE_FLUSH();
 539        msleep(10);
 540
 541        napi_disable(&adapter->napi);
 542
 543        e1000_irq_disable(adapter);
 544
 545        /* Setting DOWN must be after irq_disable to prevent
 546         * a screaming interrupt.  Setting DOWN also prevents
 547         * tasks from rescheduling.
 548         */
 549        e1000_down_and_stop(adapter);
 550
 551        adapter->link_speed = 0;
 552        adapter->link_duplex = 0;
 553
 554        e1000_reset(adapter);
 555        e1000_clean_all_tx_rings(adapter);
 556        e1000_clean_all_rx_rings(adapter);
 557}
 558
 559void e1000_reinit_locked(struct e1000_adapter *adapter)
 560{
 561        WARN_ON(in_interrupt());
 562        while (test_and_set_bit(__E1000_RESETTING, &adapter->flags))
 563                msleep(1);
 564        e1000_down(adapter);
 565        e1000_up(adapter);
 566        clear_bit(__E1000_RESETTING, &adapter->flags);
 567}
 568
 569void e1000_reset(struct e1000_adapter *adapter)
 570{
 571        struct e1000_hw *hw = &adapter->hw;
 572        u32 pba = 0, tx_space, min_tx_space, min_rx_space;
 573        bool legacy_pba_adjust = false;
 574        u16 hwm;
 575
 576        /* Repartition Pba for greater than 9k mtu
 577         * To take effect CTRL.RST is required.
 578         */
 579
 580        switch (hw->mac_type) {
 581        case e1000_82542_rev2_0:
 582        case e1000_82542_rev2_1:
 583        case e1000_82543:
 584        case e1000_82544:
 585        case e1000_82540:
 586        case e1000_82541:
 587        case e1000_82541_rev_2:
 588                legacy_pba_adjust = true;
 589                pba = E1000_PBA_48K;
 590                break;
 591        case e1000_82545:
 592        case e1000_82545_rev_3:
 593        case e1000_82546:
 594        case e1000_ce4100:
 595        case e1000_82546_rev_3:
 596                pba = E1000_PBA_48K;
 597                break;
 598        case e1000_82547:
 599        case e1000_82547_rev_2:
 600                legacy_pba_adjust = true;
 601                pba = E1000_PBA_30K;
 602                break;
 603        case e1000_undefined:
 604        case e1000_num_macs:
 605                break;
 606        }
 607
 608        if (legacy_pba_adjust) {
 609                if (hw->max_frame_size > E1000_RXBUFFER_8192)
 610                        pba -= 8; /* allocate more FIFO for Tx */
 611
 612                if (hw->mac_type == e1000_82547) {
 613                        adapter->tx_fifo_head = 0;
 614                        adapter->tx_head_addr = pba << E1000_TX_HEAD_ADDR_SHIFT;
 615                        adapter->tx_fifo_size =
 616                                (E1000_PBA_40K - pba) << E1000_PBA_BYTES_SHIFT;
 617                        atomic_set(&adapter->tx_fifo_stall, 0);
 618                }
 619        } else if (hw->max_frame_size >  ETH_FRAME_LEN + ETH_FCS_LEN) {
 620                /* adjust PBA for jumbo frames */
 621                ew32(PBA, pba);
 622
 623                /* To maintain wire speed transmits, the Tx FIFO should be
 624                 * large enough to accommodate two full transmit packets,
 625                 * rounded up to the next 1KB and expressed in KB.  Likewise,
 626                 * the Rx FIFO should be large enough to accommodate at least
 627                 * one full receive packet and is similarly rounded up and
 628                 * expressed in KB.
 629                 */
 630                pba = er32(PBA);
 631                /* upper 16 bits has Tx packet buffer allocation size in KB */
 632                tx_space = pba >> 16;
 633                /* lower 16 bits has Rx packet buffer allocation size in KB */
 634                pba &= 0xffff;
 635                /* the Tx fifo also stores 16 bytes of information about the Tx
 636                 * but don't include ethernet FCS because hardware appends it
 637                 */
 638                min_tx_space = (hw->max_frame_size +
 639                                sizeof(struct e1000_tx_desc) -
 640                                ETH_FCS_LEN) * 2;
 641                min_tx_space = ALIGN(min_tx_space, 1024);
 642                min_tx_space >>= 10;
 643                /* software strips receive CRC, so leave room for it */
 644                min_rx_space = hw->max_frame_size;
 645                min_rx_space = ALIGN(min_rx_space, 1024);
 646                min_rx_space >>= 10;
 647
 648                /* If current Tx allocation is less than the min Tx FIFO size,
 649                 * and the min Tx FIFO size is less than the current Rx FIFO
 650                 * allocation, take space away from current Rx allocation
 651                 */
 652                if (tx_space < min_tx_space &&
 653                    ((min_tx_space - tx_space) < pba)) {
 654                        pba = pba - (min_tx_space - tx_space);
 655
 656                        /* PCI/PCIx hardware has PBA alignment constraints */
 657                        switch (hw->mac_type) {
 658                        case e1000_82545 ... e1000_82546_rev_3:
 659                                pba &= ~(E1000_PBA_8K - 1);
 660                                break;
 661                        default:
 662                                break;
 663                        }
 664
 665                        /* if short on Rx space, Rx wins and must trump Tx
 666                         * adjustment or use Early Receive if available
 667                         */
 668                        if (pba < min_rx_space)
 669                                pba = min_rx_space;
 670                }
 671        }
 672
 673        ew32(PBA, pba);
 674
 675        /* flow control settings:
 676         * The high water mark must be low enough to fit one full frame
 677         * (or the size used for early receive) above it in the Rx FIFO.
 678         * Set it to the lower of:
 679         * - 90% of the Rx FIFO size, and
 680         * - the full Rx FIFO size minus the early receive size (for parts
 681         *   with ERT support assuming ERT set to E1000_ERT_2048), or
 682         * - the full Rx FIFO size minus one full frame
 683         */
 684        hwm = min(((pba << 10) * 9 / 10),
 685                  ((pba << 10) - hw->max_frame_size));
 686
 687        hw->fc_high_water = hwm & 0xFFF8;       /* 8-byte granularity */
 688        hw->fc_low_water = hw->fc_high_water - 8;
 689        hw->fc_pause_time = E1000_FC_PAUSE_TIME;
 690        hw->fc_send_xon = 1;
 691        hw->fc = hw->original_fc;
 692
 693        /* Allow time for pending master requests to run */
 694        e1000_reset_hw(hw);
 695        if (hw->mac_type >= e1000_82544)
 696                ew32(WUC, 0);
 697
 698        if (e1000_init_hw(hw))
 699                e_dev_err("Hardware Error\n");
 700        e1000_update_mng_vlan(adapter);
 701
 702        /* if (adapter->hwflags & HWFLAGS_PHY_PWR_BIT) { */
 703        if (hw->mac_type >= e1000_82544 &&
 704            hw->autoneg == 1 &&
 705            hw->autoneg_advertised == ADVERTISE_1000_FULL) {
 706                u32 ctrl = er32(CTRL);
 707                /* clear phy power management bit if we are in gig only mode,
 708                 * which if enabled will attempt negotiation to 100Mb, which
 709                 * can cause a loss of link at power off or driver unload
 710                 */
 711                ctrl &= ~E1000_CTRL_SWDPIN3;
 712                ew32(CTRL, ctrl);
 713        }
 714
 715        /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */
 716        ew32(VET, ETHERNET_IEEE_VLAN_TYPE);
 717
 718        e1000_reset_adaptive(hw);
 719        e1000_phy_get_info(hw, &adapter->phy_info);
 720
 721        e1000_release_manageability(adapter);
 722}
 723
 724/* Dump the eeprom for users having checksum issues */
 725static void e1000_dump_eeprom(struct e1000_adapter *adapter)
 726{
 727        struct net_device *netdev = adapter->netdev;
 728        struct ethtool_eeprom eeprom;
 729        const struct ethtool_ops *ops = netdev->ethtool_ops;
 730        u8 *data;
 731        int i;
 732        u16 csum_old, csum_new = 0;
 733
 734        eeprom.len = ops->get_eeprom_len(netdev);
 735        eeprom.offset = 0;
 736
 737        data = kmalloc(eeprom.len, GFP_KERNEL);
 738        if (!data)
 739                return;
 740
 741        ops->get_eeprom(netdev, &eeprom, data);
 742
 743        csum_old = (data[EEPROM_CHECKSUM_REG * 2]) +
 744                   (data[EEPROM_CHECKSUM_REG * 2 + 1] << 8);
 745        for (i = 0; i < EEPROM_CHECKSUM_REG * 2; i += 2)
 746                csum_new += data[i] + (data[i + 1] << 8);
 747        csum_new = EEPROM_SUM - csum_new;
 748
 749        pr_err("/*********************/\n");
 750        pr_err("Current EEPROM Checksum : 0x%04x\n", csum_old);
 751        pr_err("Calculated              : 0x%04x\n", csum_new);
 752
 753        pr_err("Offset    Values\n");
 754        pr_err("========  ======\n");
 755        print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 16, 1, data, 128, 0);
 756
 757        pr_err("Include this output when contacting your support provider.\n");
 758        pr_err("This is not a software error! Something bad happened to\n");
 759        pr_err("your hardware or EEPROM image. Ignoring this problem could\n");
 760        pr_err("result in further problems, possibly loss of data,\n");
 761        pr_err("corruption or system hangs!\n");
 762        pr_err("The MAC Address will be reset to 00:00:00:00:00:00,\n");
 763        pr_err("which is invalid and requires you to set the proper MAC\n");
 764        pr_err("address manually before continuing to enable this network\n");
 765        pr_err("device. Please inspect the EEPROM dump and report the\n");
 766        pr_err("issue to your hardware vendor or Intel Customer Support.\n");
 767        pr_err("/*********************/\n");
 768
 769        kfree(data);
 770}
 771
 772/**
 773 * e1000_is_need_ioport - determine if an adapter needs ioport resources or not
 774 * @pdev: PCI device information struct
 775 *
 776 * Return true if an adapter needs ioport resources
 777 **/
 778static int e1000_is_need_ioport(struct pci_dev *pdev)
 779{
 780        switch (pdev->device) {
 781        case E1000_DEV_ID_82540EM:
 782        case E1000_DEV_ID_82540EM_LOM:
 783        case E1000_DEV_ID_82540EP:
 784        case E1000_DEV_ID_82540EP_LOM:
 785        case E1000_DEV_ID_82540EP_LP:
 786        case E1000_DEV_ID_82541EI:
 787        case E1000_DEV_ID_82541EI_MOBILE:
 788        case E1000_DEV_ID_82541ER:
 789        case E1000_DEV_ID_82541ER_LOM:
 790        case E1000_DEV_ID_82541GI:
 791        case E1000_DEV_ID_82541GI_LF:
 792        case E1000_DEV_ID_82541GI_MOBILE:
 793        case E1000_DEV_ID_82544EI_COPPER:
 794        case E1000_DEV_ID_82544EI_FIBER:
 795        case E1000_DEV_ID_82544GC_COPPER:
 796        case E1000_DEV_ID_82544GC_LOM:
 797        case E1000_DEV_ID_82545EM_COPPER:
 798        case E1000_DEV_ID_82545EM_FIBER:
 799        case E1000_DEV_ID_82546EB_COPPER:
 800        case E1000_DEV_ID_82546EB_FIBER:
 801        case E1000_DEV_ID_82546EB_QUAD_COPPER:
 802                return true;
 803        default:
 804                return false;
 805        }
 806}
 807
 808static netdev_features_t e1000_fix_features(struct net_device *netdev,
 809        netdev_features_t features)
 810{
 811        /* Since there is no support for separate Rx/Tx vlan accel
 812         * enable/disable make sure Tx flag is always in same state as Rx.
 813         */
 814        if (features & NETIF_F_HW_VLAN_CTAG_RX)
 815                features |= NETIF_F_HW_VLAN_CTAG_TX;
 816        else
 817                features &= ~NETIF_F_HW_VLAN_CTAG_TX;
 818
 819        return features;
 820}
 821
 822static int e1000_set_features(struct net_device *netdev,
 823        netdev_features_t features)
 824{
 825        struct e1000_adapter *adapter = netdev_priv(netdev);
 826        netdev_features_t changed = features ^ netdev->features;
 827
 828        if (changed & NETIF_F_HW_VLAN_CTAG_RX)
 829                e1000_vlan_mode(netdev, features);
 830
 831        if (!(changed & (NETIF_F_RXCSUM | NETIF_F_RXALL)))
 832                return 0;
 833
 834        netdev->features = features;
 835        adapter->rx_csum = !!(features & NETIF_F_RXCSUM);
 836
 837        if (netif_running(netdev))
 838                e1000_reinit_locked(adapter);
 839        else
 840                e1000_reset(adapter);
 841
 842        return 0;
 843}
 844
 845static const struct net_device_ops e1000_netdev_ops = {
 846        .ndo_open               = e1000_open,
 847        .ndo_stop               = e1000_close,
 848        .ndo_start_xmit         = e1000_xmit_frame,
 849        .ndo_get_stats          = e1000_get_stats,
 850        .ndo_set_rx_mode        = e1000_set_rx_mode,
 851        .ndo_set_mac_address    = e1000_set_mac,
 852        .ndo_tx_timeout         = e1000_tx_timeout,
 853        .ndo_change_mtu         = e1000_change_mtu,
 854        .ndo_do_ioctl           = e1000_ioctl,
 855        .ndo_validate_addr      = eth_validate_addr,
 856        .ndo_vlan_rx_add_vid    = e1000_vlan_rx_add_vid,
 857        .ndo_vlan_rx_kill_vid   = e1000_vlan_rx_kill_vid,
 858#ifdef CONFIG_NET_POLL_CONTROLLER
 859        .ndo_poll_controller    = e1000_netpoll,
 860#endif
 861        .ndo_fix_features       = e1000_fix_features,
 862        .ndo_set_features       = e1000_set_features,
 863};
 864
 865/**
 866 * e1000_init_hw_struct - initialize members of hw struct
 867 * @adapter: board private struct
 868 * @hw: structure used by e1000_hw.c
 869 *
 870 * Factors out initialization of the e1000_hw struct to its own function
 871 * that can be called very early at init (just after struct allocation).
 872 * Fields are initialized based on PCI device information and
 873 * OS network device settings (MTU size).
 874 * Returns negative error codes if MAC type setup fails.
 875 */
 876static int e1000_init_hw_struct(struct e1000_adapter *adapter,
 877                                struct e1000_hw *hw)
 878{
 879        struct pci_dev *pdev = adapter->pdev;
 880
 881        /* PCI config space info */
 882        hw->vendor_id = pdev->vendor;
 883        hw->device_id = pdev->device;
 884        hw->subsystem_vendor_id = pdev->subsystem_vendor;
 885        hw->subsystem_id = pdev->subsystem_device;
 886        hw->revision_id = pdev->revision;
 887
 888        pci_read_config_word(pdev, PCI_COMMAND, &hw->pci_cmd_word);
 889
 890        hw->max_frame_size = adapter->netdev->mtu +
 891                             ENET_HEADER_SIZE + ETHERNET_FCS_SIZE;
 892        hw->min_frame_size = MINIMUM_ETHERNET_FRAME_SIZE;
 893
 894        /* identify the MAC */
 895        if (e1000_set_mac_type(hw)) {
 896                e_err(probe, "Unknown MAC Type\n");
 897                return -EIO;
 898        }
 899
 900        switch (hw->mac_type) {
 901        default:
 902                break;
 903        case e1000_82541:
 904        case e1000_82547:
 905        case e1000_82541_rev_2:
 906        case e1000_82547_rev_2:
 907                hw->phy_init_script = 1;
 908                break;
 909        }
 910
 911        e1000_set_media_type(hw);
 912        e1000_get_bus_info(hw);
 913
 914        hw->wait_autoneg_complete = false;
 915        hw->tbi_compatibility_en = true;
 916        hw->adaptive_ifs = true;
 917
 918        /* Copper options */
 919
 920        if (hw->media_type == e1000_media_type_copper) {
 921                hw->mdix = AUTO_ALL_MODES;
 922                hw->disable_polarity_correction = false;
 923                hw->master_slave = E1000_MASTER_SLAVE;
 924        }
 925
 926        return 0;
 927}
 928
 929/**
 930 * e1000_probe - Device Initialization Routine
 931 * @pdev: PCI device information struct
 932 * @ent: entry in e1000_pci_tbl
 933 *
 934 * Returns 0 on success, negative on failure
 935 *
 936 * e1000_probe initializes an adapter identified by a pci_dev structure.
 937 * The OS initialization, configuring of the adapter private structure,
 938 * and a hardware reset occur.
 939 **/
 940static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 941{
 942        struct net_device *netdev;
 943        struct e1000_adapter *adapter;
 944        struct e1000_hw *hw;
 945
 946        static int cards_found;
 947        static int global_quad_port_a; /* global ksp3 port a indication */
 948        int i, err, pci_using_dac;
 949        u16 eeprom_data = 0;
 950        u16 tmp = 0;
 951        u16 eeprom_apme_mask = E1000_EEPROM_APME;
 952        int bars, need_ioport;
 953
 954        /* do not allocate ioport bars when not needed */
 955        need_ioport = e1000_is_need_ioport(pdev);
 956        if (need_ioport) {
 957                bars = pci_select_bars(pdev, IORESOURCE_MEM | IORESOURCE_IO);
 958                err = pci_enable_device(pdev);
 959        } else {
 960                bars = pci_select_bars(pdev, IORESOURCE_MEM);
 961                err = pci_enable_device_mem(pdev);
 962        }
 963        if (err)
 964                return err;
 965
 966        err = pci_request_selected_regions(pdev, bars, e1000_driver_name);
 967        if (err)
 968                goto err_pci_reg;
 969
 970        pci_set_master(pdev);
 971        err = pci_save_state(pdev);
 972        if (err)
 973                goto err_alloc_etherdev;
 974
 975        err = -ENOMEM;
 976        netdev = alloc_etherdev(sizeof(struct e1000_adapter));
 977        if (!netdev)
 978                goto err_alloc_etherdev;
 979
 980        SET_NETDEV_DEV(netdev, &pdev->dev);
 981
 982        pci_set_drvdata(pdev, netdev);
 983        adapter = netdev_priv(netdev);
 984        adapter->netdev = netdev;
 985        adapter->pdev = pdev;
 986        adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
 987        adapter->bars = bars;
 988        adapter->need_ioport = need_ioport;
 989
 990        hw = &adapter->hw;
 991        hw->back = adapter;
 992
 993        err = -EIO;
 994        hw->hw_addr = pci_ioremap_bar(pdev, BAR_0);
 995        if (!hw->hw_addr)
 996                goto err_ioremap;
 997
 998        if (adapter->need_ioport) {
 999                for (i = BAR_1; i <= BAR_5; i++) {
1000                        if (pci_resource_len(pdev, i) == 0)
1001                                continue;
1002                        if (pci_resource_flags(pdev, i) & IORESOURCE_IO) {
1003                                hw->io_base = pci_resource_start(pdev, i);
1004                                break;
1005                        }
1006                }
1007        }
1008
1009        /* make ready for any if (hw->...) below */
1010        err = e1000_init_hw_struct(adapter, hw);
1011        if (err)
1012                goto err_sw_init;
1013
1014        /* there is a workaround being applied below that limits
1015         * 64-bit DMA addresses to 64-bit hardware.  There are some
1016         * 32-bit adapters that Tx hang when given 64-bit DMA addresses
1017         */
1018        pci_using_dac = 0;
1019        if ((hw->bus_type == e1000_bus_type_pcix) &&
1020            !dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) {
1021                pci_using_dac = 1;
1022        } else {
1023                err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
1024                if (err) {
1025                        pr_err("No usable DMA config, aborting\n");
1026                        goto err_dma;
1027                }
1028        }
1029
1030        netdev->netdev_ops = &e1000_netdev_ops;
1031        e1000_set_ethtool_ops(netdev);
1032        netdev->watchdog_timeo = 5 * HZ;
1033        netif_napi_add(netdev, &adapter->napi, e1000_clean, 64);
1034
1035        strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
1036
1037        adapter->bd_number = cards_found;
1038
1039        /* setup the private structure */
1040
1041        err = e1000_sw_init(adapter);
1042        if (err)
1043                goto err_sw_init;
1044
1045        err = -EIO;
1046        if (hw->mac_type == e1000_ce4100) {
1047                hw->ce4100_gbe_mdio_base_virt =
1048                                        ioremap(pci_resource_start(pdev, BAR_1),
1049                                                pci_resource_len(pdev, BAR_1));
1050
1051                if (!hw->ce4100_gbe_mdio_base_virt)
1052                        goto err_mdio_ioremap;
1053        }
1054
1055        if (hw->mac_type >= e1000_82543) {
1056                netdev->hw_features = NETIF_F_SG |
1057                                   NETIF_F_HW_CSUM |
1058                                   NETIF_F_HW_VLAN_CTAG_RX;
1059                netdev->features = NETIF_F_HW_VLAN_CTAG_TX |
1060                                   NETIF_F_HW_VLAN_CTAG_FILTER;
1061        }
1062
1063        if ((hw->mac_type >= e1000_82544) &&
1064           (hw->mac_type != e1000_82547))
1065                netdev->hw_features |= NETIF_F_TSO;
1066
1067        netdev->priv_flags |= IFF_SUPP_NOFCS;
1068
1069        netdev->features |= netdev->hw_features;
1070        netdev->hw_features |= (NETIF_F_RXCSUM |
1071                                NETIF_F_RXALL |
1072                                NETIF_F_RXFCS);
1073
1074        if (pci_using_dac) {
1075                netdev->features |= NETIF_F_HIGHDMA;
1076                netdev->vlan_features |= NETIF_F_HIGHDMA;
1077        }
1078
1079        netdev->vlan_features |= (NETIF_F_TSO |
1080                                  NETIF_F_HW_CSUM |
1081                                  NETIF_F_SG);
1082
1083        /* Do not set IFF_UNICAST_FLT for VMWare's 82545EM */
1084        if (hw->device_id != E1000_DEV_ID_82545EM_COPPER ||
1085            hw->subsystem_vendor_id != PCI_VENDOR_ID_VMWARE)
1086                netdev->priv_flags |= IFF_UNICAST_FLT;
1087
1088        /* MTU range: 46 - 16110 */
1089        netdev->min_mtu = ETH_ZLEN - ETH_HLEN;
1090        netdev->max_mtu = MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN);
1091
1092        adapter->en_mng_pt = e1000_enable_mng_pass_thru(hw);
1093
1094        /* initialize eeprom parameters */
1095        if (e1000_init_eeprom_params(hw)) {
1096                e_err(probe, "EEPROM initialization failed\n");
1097                goto err_eeprom;
1098        }
1099
1100        /* before reading the EEPROM, reset the controller to
1101         * put the device in a known good starting state
1102         */
1103
1104        e1000_reset_hw(hw);
1105
1106        /* make sure the EEPROM is good */
1107        if (e1000_validate_eeprom_checksum(hw) < 0) {
1108                e_err(probe, "The EEPROM Checksum Is Not Valid\n");
1109                e1000_dump_eeprom(adapter);
1110                /* set MAC address to all zeroes to invalidate and temporary
1111                 * disable this device for the user. This blocks regular
1112                 * traffic while still permitting ethtool ioctls from reaching
1113                 * the hardware as well as allowing the user to run the
1114                 * interface after manually setting a hw addr using
1115                 * `ip set address`
1116                 */
1117                memset(hw->mac_addr, 0, netdev->addr_len);
1118        } else {
1119                /* copy the MAC address out of the EEPROM */
1120                if (e1000_read_mac_addr(hw))
1121                        e_err(probe, "EEPROM Read Error\n");
1122        }
1123        /* don't block initialization here due to bad MAC address */
1124        memcpy(netdev->dev_addr, hw->mac_addr, netdev->addr_len);
1125
1126        if (!is_valid_ether_addr(netdev->dev_addr))
1127                e_err(probe, "Invalid MAC Address\n");
1128
1129
1130        INIT_DELAYED_WORK(&adapter->watchdog_task, e1000_watchdog);
1131        INIT_DELAYED_WORK(&adapter->fifo_stall_task,
1132                          e1000_82547_tx_fifo_stall_task);
1133        INIT_DELAYED_WORK(&adapter->phy_info_task, e1000_update_phy_info_task);
1134        INIT_WORK(&adapter->reset_task, e1000_reset_task);
1135
1136        e1000_check_options(adapter);
1137
1138        /* Initial Wake on LAN setting
1139         * If APM wake is enabled in the EEPROM,
1140         * enable the ACPI Magic Packet filter
1141         */
1142
1143        switch (hw->mac_type) {
1144        case e1000_82542_rev2_0:
1145        case e1000_82542_rev2_1:
1146        case e1000_82543:
1147                break;
1148        case e1000_82544:
1149                e1000_read_eeprom(hw,
1150                        EEPROM_INIT_CONTROL2_REG, 1, &eeprom_data);
1151                eeprom_apme_mask = E1000_EEPROM_82544_APM;
1152                break;
1153        case e1000_82546:
1154        case e1000_82546_rev_3:
1155                if (er32(STATUS) & E1000_STATUS_FUNC_1) {
1156                        e1000_read_eeprom(hw,
1157                                EEPROM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
1158                        break;
1159                }
1160                /* Fall Through */
1161        default:
1162                e1000_read_eeprom(hw,
1163                        EEPROM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
1164                break;
1165        }
1166        if (eeprom_data & eeprom_apme_mask)
1167                adapter->eeprom_wol |= E1000_WUFC_MAG;
1168
1169        /* now that we have the eeprom settings, apply the special cases
1170         * where the eeprom may be wrong or the board simply won't support
1171         * wake on lan on a particular port
1172         */
1173        switch (pdev->device) {
1174        case E1000_DEV_ID_82546GB_PCIE:
1175                adapter->eeprom_wol = 0;
1176                break;
1177        case E1000_DEV_ID_82546EB_FIBER:
1178        case E1000_DEV_ID_82546GB_FIBER:
1179                /* Wake events only supported on port A for dual fiber
1180                 * regardless of eeprom setting
1181                 */
1182                if (er32(STATUS) & E1000_STATUS_FUNC_1)
1183                        adapter->eeprom_wol = 0;
1184                break;
1185        case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3:
1186                /* if quad port adapter, disable WoL on all but port A */
1187                if (global_quad_port_a != 0)
1188                        adapter->eeprom_wol = 0;
1189                else
1190                        adapter->quad_port_a = true;
1191                /* Reset for multiple quad port adapters */
1192                if (++global_quad_port_a == 4)
1193                        global_quad_port_a = 0;
1194                break;
1195        }
1196
1197        /* initialize the wol settings based on the eeprom settings */
1198        adapter->wol = adapter->eeprom_wol;
1199        device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
1200
1201        /* Auto detect PHY address */
1202        if (hw->mac_type == e1000_ce4100) {
1203                for (i = 0; i < 32; i++) {
1204                        hw->phy_addr = i;
1205                        e1000_read_phy_reg(hw, PHY_ID2, &tmp);
1206
1207                        if (tmp != 0 && tmp != 0xFF)
1208                                break;
1209                }
1210
1211                if (i >= 32)
1212                        goto err_eeprom;
1213        }
1214
1215        /* reset the hardware with the new settings */
1216        e1000_reset(adapter);
1217
1218        strcpy(netdev->name, "eth%d");
1219        err = register_netdev(netdev);
1220        if (err)
1221                goto err_register;
1222
1223        e1000_vlan_filter_on_off(adapter, false);
1224
1225        /* print bus type/speed/width info */
1226        e_info(probe, "(PCI%s:%dMHz:%d-bit) %pM\n",
1227               ((hw->bus_type == e1000_bus_type_pcix) ? "-X" : ""),
1228               ((hw->bus_speed == e1000_bus_speed_133) ? 133 :
1229                (hw->bus_speed == e1000_bus_speed_120) ? 120 :
1230                (hw->bus_speed == e1000_bus_speed_100) ? 100 :
1231                (hw->bus_speed == e1000_bus_speed_66) ? 66 : 33),
1232               ((hw->bus_width == e1000_bus_width_64) ? 64 : 32),
1233               netdev->dev_addr);
1234
1235        /* carrier off reporting is important to ethtool even BEFORE open */
1236        netif_carrier_off(netdev);
1237
1238        e_info(probe, "Intel(R) PRO/1000 Network Connection\n");
1239
1240        cards_found++;
1241        return 0;
1242
1243err_register:
1244err_eeprom:
1245        e1000_phy_hw_reset(hw);
1246
1247        if (hw->flash_address)
1248                iounmap(hw->flash_address);
1249        kfree(adapter->tx_ring);
1250        kfree(adapter->rx_ring);
1251err_dma:
1252err_sw_init:
1253err_mdio_ioremap:
1254        iounmap(hw->ce4100_gbe_mdio_base_virt);
1255        iounmap(hw->hw_addr);
1256err_ioremap:
1257        free_netdev(netdev);
1258err_alloc_etherdev:
1259        pci_release_selected_regions(pdev, bars);
1260err_pci_reg:
1261        pci_disable_device(pdev);
1262        return err;
1263}
1264
1265/**
1266 * e1000_remove - Device Removal Routine
1267 * @pdev: PCI device information struct
1268 *
1269 * e1000_remove is called by the PCI subsystem to alert the driver
1270 * that it should release a PCI device. That could be caused by a
1271 * Hot-Plug event, or because the driver is going to be removed from
1272 * memory.
1273 **/
1274static void e1000_remove(struct pci_dev *pdev)
1275{
1276        struct net_device *netdev = pci_get_drvdata(pdev);
1277        struct e1000_adapter *adapter = netdev_priv(netdev);
1278        struct e1000_hw *hw = &adapter->hw;
1279
1280        e1000_down_and_stop(adapter);
1281        e1000_release_manageability(adapter);
1282
1283        unregister_netdev(netdev);
1284
1285        e1000_phy_hw_reset(hw);
1286
1287        kfree(adapter->tx_ring);
1288        kfree(adapter->rx_ring);
1289
1290        if (hw->mac_type == e1000_ce4100)
1291                iounmap(hw->ce4100_gbe_mdio_base_virt);
1292        iounmap(hw->hw_addr);
1293        if (hw->flash_address)
1294                iounmap(hw->flash_address);
1295        pci_release_selected_regions(pdev, adapter->bars);
1296
1297        free_netdev(netdev);
1298
1299        pci_disable_device(pdev);
1300}
1301
1302/**
1303 * e1000_sw_init - Initialize general software structures (struct e1000_adapter)
1304 * @adapter: board private structure to initialize
1305 *
1306 * e1000_sw_init initializes the Adapter private data structure.
1307 * e1000_init_hw_struct MUST be called before this function
1308 **/
1309static int e1000_sw_init(struct e1000_adapter *adapter)
1310{
1311        adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
1312
1313        adapter->num_tx_queues = 1;
1314        adapter->num_rx_queues = 1;
1315
1316        if (e1000_alloc_queues(adapter)) {
1317                e_err(probe, "Unable to allocate memory for queues\n");
1318                return -ENOMEM;
1319        }
1320
1321        /* Explicitly disable IRQ since the NIC can be in any state. */
1322        e1000_irq_disable(adapter);
1323
1324        spin_lock_init(&adapter->stats_lock);
1325
1326        set_bit(__E1000_DOWN, &adapter->flags);
1327
1328        return 0;
1329}
1330
1331/**
1332 * e1000_alloc_queues - Allocate memory for all rings
1333 * @adapter: board private structure to initialize
1334 *
1335 * We allocate one ring per queue at run-time since we don't know the
1336 * number of queues at compile-time.
1337 **/
1338static int e1000_alloc_queues(struct e1000_adapter *adapter)
1339{
1340        adapter->tx_ring = kcalloc(adapter->num_tx_queues,
1341                                   sizeof(struct e1000_tx_ring), GFP_KERNEL);
1342        if (!adapter->tx_ring)
1343                return -ENOMEM;
1344
1345        adapter->rx_ring = kcalloc(adapter->num_rx_queues,
1346                                   sizeof(struct e1000_rx_ring), GFP_KERNEL);
1347        if (!adapter->rx_ring) {
1348                kfree(adapter->tx_ring);
1349                return -ENOMEM;
1350        }
1351
1352        return E1000_SUCCESS;
1353}
1354
1355/**
1356 * e1000_open - Called when a network interface is made active
1357 * @netdev: network interface device structure
1358 *
1359 * Returns 0 on success, negative value on failure
1360 *
1361 * The open entry point is called when a network interface is made
1362 * active by the system (IFF_UP).  At this point all resources needed
1363 * for transmit and receive operations are allocated, the interrupt
1364 * handler is registered with the OS, the watchdog task is started,
1365 * and the stack is notified that the interface is ready.
1366 **/
1367int e1000_open(struct net_device *netdev)
1368{
1369        struct e1000_adapter *adapter = netdev_priv(netdev);
1370        struct e1000_hw *hw = &adapter->hw;
1371        int err;
1372
1373        /* disallow open during test */
1374        if (test_bit(__E1000_TESTING, &adapter->flags))
1375                return -EBUSY;
1376
1377        netif_carrier_off(netdev);
1378
1379        /* allocate transmit descriptors */
1380        err = e1000_setup_all_tx_resources(adapter);
1381        if (err)
1382                goto err_setup_tx;
1383
1384        /* allocate receive descriptors */
1385        err = e1000_setup_all_rx_resources(adapter);
1386        if (err)
1387                goto err_setup_rx;
1388
1389        e1000_power_up_phy(adapter);
1390
1391        adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
1392        if ((hw->mng_cookie.status &
1393                          E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT)) {
1394                e1000_update_mng_vlan(adapter);
1395        }
1396
1397        /* before we allocate an interrupt, we must be ready to handle it.
1398         * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt
1399         * as soon as we call pci_request_irq, so we have to setup our
1400         * clean_rx handler before we do so.
1401         */
1402        e1000_configure(adapter);
1403
1404        err = e1000_request_irq(adapter);
1405        if (err)
1406                goto err_req_irq;
1407
1408        /* From here on the code is the same as e1000_up() */
1409        clear_bit(__E1000_DOWN, &adapter->flags);
1410
1411        napi_enable(&adapter->napi);
1412
1413        e1000_irq_enable(adapter);
1414
1415        netif_start_queue(netdev);
1416
1417        /* fire a link status change interrupt to start the watchdog */
1418        ew32(ICS, E1000_ICS_LSC);
1419
1420        return E1000_SUCCESS;
1421
1422err_req_irq:
1423        e1000_power_down_phy(adapter);
1424        e1000_free_all_rx_resources(adapter);
1425err_setup_rx:
1426        e1000_free_all_tx_resources(adapter);
1427err_setup_tx:
1428        e1000_reset(adapter);
1429
1430        return err;
1431}
1432
1433/**
1434 * e1000_close - Disables a network interface
1435 * @netdev: network interface device structure
1436 *
1437 * Returns 0, this is not allowed to fail
1438 *
1439 * The close entry point is called when an interface is de-activated
1440 * by the OS.  The hardware is still under the drivers control, but
1441 * needs to be disabled.  A global MAC reset is issued to stop the
1442 * hardware, and all transmit and receive resources are freed.
1443 **/
1444int e1000_close(struct net_device *netdev)
1445{
1446        struct e1000_adapter *adapter = netdev_priv(netdev);
1447        struct e1000_hw *hw = &adapter->hw;
1448        int count = E1000_CHECK_RESET_COUNT;
1449
1450        while (test_bit(__E1000_RESETTING, &adapter->flags) && count--)
1451                usleep_range(10000, 20000);
1452
1453        WARN_ON(test_bit(__E1000_RESETTING, &adapter->flags));
1454        e1000_down(adapter);
1455        e1000_power_down_phy(adapter);
1456        e1000_free_irq(adapter);
1457
1458        e1000_free_all_tx_resources(adapter);
1459        e1000_free_all_rx_resources(adapter);
1460
1461        /* kill manageability vlan ID if supported, but not if a vlan with
1462         * the same ID is registered on the host OS (let 8021q kill it)
1463         */
1464        if ((hw->mng_cookie.status &
1465             E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) &&
1466            !test_bit(adapter->mng_vlan_id, adapter->active_vlans)) {
1467                e1000_vlan_rx_kill_vid(netdev, htons(ETH_P_8021Q),
1468                                       adapter->mng_vlan_id);
1469        }
1470
1471        return 0;
1472}
1473
1474/**
1475 * e1000_check_64k_bound - check that memory doesn't cross 64kB boundary
1476 * @adapter: address of board private structure
1477 * @start: address of beginning of memory
1478 * @len: length of memory
1479 **/
1480static bool e1000_check_64k_bound(struct e1000_adapter *adapter, void *start,
1481                                  unsigned long len)
1482{
1483        struct e1000_hw *hw = &adapter->hw;
1484        unsigned long begin = (unsigned long)start;
1485        unsigned long end = begin + len;
1486
1487        /* First rev 82545 and 82546 need to not allow any memory
1488         * write location to cross 64k boundary due to errata 23
1489         */
1490        if (hw->mac_type == e1000_82545 ||
1491            hw->mac_type == e1000_ce4100 ||
1492            hw->mac_type == e1000_82546) {
1493                return ((begin ^ (end - 1)) >> 16) != 0 ? false : true;
1494        }
1495
1496        return true;
1497}
1498
1499/**
1500 * e1000_setup_tx_resources - allocate Tx resources (Descriptors)
1501 * @adapter: board private structure
1502 * @txdr:    tx descriptor ring (for a specific queue) to setup
1503 *
1504 * Return 0 on success, negative on failure
1505 **/
1506static int e1000_setup_tx_resources(struct e1000_adapter *adapter,
1507                                    struct e1000_tx_ring *txdr)
1508{
1509        struct pci_dev *pdev = adapter->pdev;
1510        int size;
1511
1512        size = sizeof(struct e1000_tx_buffer) * txdr->count;
1513        txdr->buffer_info = vzalloc(size);
1514        if (!txdr->buffer_info)
1515                return -ENOMEM;
1516
1517        /* round up to nearest 4K */
1518
1519        txdr->size = txdr->count * sizeof(struct e1000_tx_desc);
1520        txdr->size = ALIGN(txdr->size, 4096);
1521
1522        txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size, &txdr->dma,
1523                                        GFP_KERNEL);
1524        if (!txdr->desc) {
1525setup_tx_desc_die:
1526                vfree(txdr->buffer_info);
1527                return -ENOMEM;
1528        }
1529
1530        /* Fix for errata 23, can't cross 64kB boundary */
1531        if (!e1000_check_64k_bound(adapter, txdr->desc, txdr->size)) {
1532                void *olddesc = txdr->desc;
1533                dma_addr_t olddma = txdr->dma;
1534                e_err(tx_err, "txdr align check failed: %u bytes at %p\n",
1535                      txdr->size, txdr->desc);
1536                /* Try again, without freeing the previous */
1537                txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size,
1538                                                &txdr->dma, GFP_KERNEL);
1539                /* Failed allocation, critical failure */
1540                if (!txdr->desc) {
1541                        dma_free_coherent(&pdev->dev, txdr->size, olddesc,
1542                                          olddma);
1543                        goto setup_tx_desc_die;
1544                }
1545
1546                if (!e1000_check_64k_bound(adapter, txdr->desc, txdr->size)) {
1547                        /* give up */
1548                        dma_free_coherent(&pdev->dev, txdr->size, txdr->desc,
1549                                          txdr->dma);
1550                        dma_free_coherent(&pdev->dev, txdr->size, olddesc,
1551                                          olddma);
1552                        e_err(probe, "Unable to allocate aligned memory "
1553                              "for the transmit descriptor ring\n");
1554                        vfree(txdr->buffer_info);
1555                        return -ENOMEM;
1556                } else {
1557                        /* Free old allocation, new allocation was successful */
1558                        dma_free_coherent(&pdev->dev, txdr->size, olddesc,
1559                                          olddma);
1560                }
1561        }
1562        memset(txdr->desc, 0, txdr->size);
1563
1564        txdr->next_to_use = 0;
1565        txdr->next_to_clean = 0;
1566
1567        return 0;
1568}
1569
1570/**
1571 * e1000_setup_all_tx_resources - wrapper to allocate Tx resources
1572 *                                (Descriptors) for all queues
1573 * @adapter: board private structure
1574 *
1575 * Return 0 on success, negative on failure
1576 **/
1577int e1000_setup_all_tx_resources(struct e1000_adapter *adapter)
1578{
1579        int i, err = 0;
1580
1581        for (i = 0; i < adapter->num_tx_queues; i++) {
1582                err = e1000_setup_tx_resources(adapter, &adapter->tx_ring[i]);
1583                if (err) {
1584                        e_err(probe, "Allocation for Tx Queue %u failed\n", i);
1585                        for (i-- ; i >= 0; i--)
1586                                e1000_free_tx_resources(adapter,
1587                                                        &adapter->tx_ring[i]);
1588                        break;
1589                }
1590        }
1591
1592        return err;
1593}
1594
1595/**
1596 * e1000_configure_tx - Configure 8254x Transmit Unit after Reset
1597 * @adapter: board private structure
1598 *
1599 * Configure the Tx unit of the MAC after a reset.
1600 **/
1601static void e1000_configure_tx(struct e1000_adapter *adapter)
1602{
1603        u64 tdba;
1604        struct e1000_hw *hw = &adapter->hw;
1605        u32 tdlen, tctl, tipg;
1606        u32 ipgr1, ipgr2;
1607
1608        /* Setup the HW Tx Head and Tail descriptor pointers */
1609
1610        switch (adapter->num_tx_queues) {
1611        case 1:
1612        default:
1613                tdba = adapter->tx_ring[0].dma;
1614                tdlen = adapter->tx_ring[0].count *
1615                        sizeof(struct e1000_tx_desc);
1616                ew32(TDLEN, tdlen);
1617                ew32(TDBAH, (tdba >> 32));
1618                ew32(TDBAL, (tdba & 0x00000000ffffffffULL));
1619                ew32(TDT, 0);
1620                ew32(TDH, 0);
1621                adapter->tx_ring[0].tdh = ((hw->mac_type >= e1000_82543) ?
1622                                           E1000_TDH : E1000_82542_TDH);
1623                adapter->tx_ring[0].tdt = ((hw->mac_type >= e1000_82543) ?
1624                                           E1000_TDT : E1000_82542_TDT);
1625                break;
1626        }
1627
1628        /* Set the default values for the Tx Inter Packet Gap timer */
1629        if ((hw->media_type == e1000_media_type_fiber ||
1630             hw->media_type == e1000_media_type_internal_serdes))
1631                tipg = DEFAULT_82543_TIPG_IPGT_FIBER;
1632        else
1633                tipg = DEFAULT_82543_TIPG_IPGT_COPPER;
1634
1635        switch (hw->mac_type) {
1636        case e1000_82542_rev2_0:
1637        case e1000_82542_rev2_1:
1638                tipg = DEFAULT_82542_TIPG_IPGT;
1639                ipgr1 = DEFAULT_82542_TIPG_IPGR1;
1640                ipgr2 = DEFAULT_82542_TIPG_IPGR2;
1641                break;
1642        default:
1643                ipgr1 = DEFAULT_82543_TIPG_IPGR1;
1644                ipgr2 = DEFAULT_82543_TIPG_IPGR2;
1645                break;
1646        }
1647        tipg |= ipgr1 << E1000_TIPG_IPGR1_SHIFT;
1648        tipg |= ipgr2 << E1000_TIPG_IPGR2_SHIFT;
1649        ew32(TIPG, tipg);
1650
1651        /* Set the Tx Interrupt Delay register */
1652
1653        ew32(TIDV, adapter->tx_int_delay);
1654        if (hw->mac_type >= e1000_82540)
1655                ew32(TADV, adapter->tx_abs_int_delay);
1656
1657        /* Program the Transmit Control Register */
1658
1659        tctl = er32(TCTL);
1660        tctl &= ~E1000_TCTL_CT;
1661        tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC |
1662                (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
1663
1664        e1000_config_collision_dist(hw);
1665
1666        /* Setup Transmit Descriptor Settings for eop descriptor */
1667        adapter->txd_cmd = E1000_TXD_CMD_EOP | E1000_TXD_CMD_IFCS;
1668
1669        /* only set IDE if we are delaying interrupts using the timers */
1670        if (adapter->tx_int_delay)
1671                adapter->txd_cmd |= E1000_TXD_CMD_IDE;
1672
1673        if (hw->mac_type < e1000_82543)
1674                adapter->txd_cmd |= E1000_TXD_CMD_RPS;
1675        else
1676                adapter->txd_cmd |= E1000_TXD_CMD_RS;
1677
1678        /* Cache if we're 82544 running in PCI-X because we'll
1679         * need this to apply a workaround later in the send path.
1680         */
1681        if (hw->mac_type == e1000_82544 &&
1682            hw->bus_type == e1000_bus_type_pcix)
1683                adapter->pcix_82544 = true;
1684
1685        ew32(TCTL, tctl);
1686
1687}
1688
1689/**
1690 * e1000_setup_rx_resources - allocate Rx resources (Descriptors)
1691 * @adapter: board private structure
1692 * @rxdr:    rx descriptor ring (for a specific queue) to setup
1693 *
1694 * Returns 0 on success, negative on failure
1695 **/
1696static int e1000_setup_rx_resources(struct e1000_adapter *adapter,
1697                                    struct e1000_rx_ring *rxdr)
1698{
1699        struct pci_dev *pdev = adapter->pdev;
1700        int size, desc_len;
1701
1702        size = sizeof(struct e1000_rx_buffer) * rxdr->count;
1703        rxdr->buffer_info = vzalloc(size);
1704        if (!rxdr->buffer_info)
1705                return -ENOMEM;
1706
1707        desc_len = sizeof(struct e1000_rx_desc);
1708
1709        /* Round up to nearest 4K */
1710
1711        rxdr->size = rxdr->count * desc_len;
1712        rxdr->size = ALIGN(rxdr->size, 4096);
1713
1714        rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma,
1715                                        GFP_KERNEL);
1716        if (!rxdr->desc) {
1717setup_rx_desc_die:
1718                vfree(rxdr->buffer_info);
1719                return -ENOMEM;
1720        }
1721
1722        /* Fix for errata 23, can't cross 64kB boundary */
1723        if (!e1000_check_64k_bound(adapter, rxdr->desc, rxdr->size)) {
1724                void *olddesc = rxdr->desc;
1725                dma_addr_t olddma = rxdr->dma;
1726                e_err(rx_err, "rxdr align check failed: %u bytes at %p\n",
1727                      rxdr->size, rxdr->desc);
1728                /* Try again, without freeing the previous */
1729                rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size,
1730                                                &rxdr->dma, GFP_KERNEL);
1731                /* Failed allocation, critical failure */
1732                if (!rxdr->desc) {
1733                        dma_free_coherent(&pdev->dev, rxdr->size, olddesc,
1734                                          olddma);
1735                        goto setup_rx_desc_die;
1736                }
1737
1738                if (!e1000_check_64k_bound(adapter, rxdr->desc, rxdr->size)) {
1739                        /* give up */
1740                        dma_free_coherent(&pdev->dev, rxdr->size, rxdr->desc,
1741                                          rxdr->dma);
1742                        dma_free_coherent(&pdev->dev, rxdr->size, olddesc,
1743                                          olddma);
1744                        e_err(probe, "Unable to allocate aligned memory for "
1745                              "the Rx descriptor ring\n");
1746                        goto setup_rx_desc_die;
1747                } else {
1748                        /* Free old allocation, new allocation was successful */
1749                        dma_free_coherent(&pdev->dev, rxdr->size, olddesc,
1750                                          olddma);
1751                }
1752        }
1753        memset(rxdr->desc, 0, rxdr->size);
1754
1755        rxdr->next_to_clean = 0;
1756        rxdr->next_to_use = 0;
1757        rxdr->rx_skb_top = NULL;
1758
1759        return 0;
1760}
1761
1762/**
1763 * e1000_setup_all_rx_resources - wrapper to allocate Rx resources
1764 *                                (Descriptors) for all queues
1765 * @adapter: board private structure
1766 *
1767 * Return 0 on success, negative on failure
1768 **/
1769int e1000_setup_all_rx_resources(struct e1000_adapter *adapter)
1770{
1771        int i, err = 0;
1772
1773        for (i = 0; i < adapter->num_rx_queues; i++) {
1774                err = e1000_setup_rx_resources(adapter, &adapter->rx_ring[i]);
1775                if (err) {
1776                        e_err(probe, "Allocation for Rx Queue %u failed\n", i);
1777                        for (i-- ; i >= 0; i--)
1778                                e1000_free_rx_resources(adapter,
1779                                                        &adapter->rx_ring[i]);
1780                        break;
1781                }
1782        }
1783
1784        return err;
1785}
1786
1787/**
1788 * e1000_setup_rctl - configure the receive control registers
1789 * @adapter: Board private structure
1790 **/
1791static void e1000_setup_rctl(struct e1000_adapter *adapter)
1792{
1793        struct e1000_hw *hw = &adapter->hw;
1794        u32 rctl;
1795
1796        rctl = er32(RCTL);
1797
1798        rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
1799
1800        rctl |= E1000_RCTL_BAM | E1000_RCTL_LBM_NO |
1801                E1000_RCTL_RDMTS_HALF |
1802                (hw->mc_filter_type << E1000_RCTL_MO_SHIFT);
1803
1804        if (hw->tbi_compatibility_on == 1)
1805                rctl |= E1000_RCTL_SBP;
1806        else
1807                rctl &= ~E1000_RCTL_SBP;
1808
1809        if (adapter->netdev->mtu <= ETH_DATA_LEN)
1810                rctl &= ~E1000_RCTL_LPE;
1811        else
1812                rctl |= E1000_RCTL_LPE;
1813
1814        /* Setup buffer sizes */
1815        rctl &= ~E1000_RCTL_SZ_4096;
1816        rctl |= E1000_RCTL_BSEX;
1817        switch (adapter->rx_buffer_len) {
1818        case E1000_RXBUFFER_2048:
1819        default:
1820                rctl |= E1000_RCTL_SZ_2048;
1821                rctl &= ~E1000_RCTL_BSEX;
1822                break;
1823        case E1000_RXBUFFER_4096:
1824                rctl |= E1000_RCTL_SZ_4096;
1825                break;
1826        case E1000_RXBUFFER_8192:
1827                rctl |= E1000_RCTL_SZ_8192;
1828                break;
1829        case E1000_RXBUFFER_16384:
1830                rctl |= E1000_RCTL_SZ_16384;
1831                break;
1832        }
1833
1834        /* This is useful for sniffing bad packets. */
1835        if (adapter->netdev->features & NETIF_F_RXALL) {
1836                /* UPE and MPE will be handled by normal PROMISC logic
1837                 * in e1000e_set_rx_mode
1838                 */
1839                rctl |= (E1000_RCTL_SBP | /* Receive bad packets */
1840                         E1000_RCTL_BAM | /* RX All Bcast Pkts */
1841                         E1000_RCTL_PMCF); /* RX All MAC Ctrl Pkts */
1842
1843                rctl &= ~(E1000_RCTL_VFE | /* Disable VLAN filter */
1844                          E1000_RCTL_DPF | /* Allow filtered pause */
1845                          E1000_RCTL_CFIEN); /* Dis VLAN CFIEN Filter */
1846                /* Do not mess with E1000_CTRL_VME, it affects transmit as well,
1847                 * and that breaks VLANs.
1848                 */
1849        }
1850
1851        ew32(RCTL, rctl);
1852}
1853
1854/**
1855 * e1000_configure_rx - Configure 8254x Receive Unit after Reset
1856 * @adapter: board private structure
1857 *
1858 * Configure the Rx unit of the MAC after a reset.
1859 **/
1860static void e1000_configure_rx(struct e1000_adapter *adapter)
1861{
1862        u64 rdba;
1863        struct e1000_hw *hw = &adapter->hw;
1864        u32 rdlen, rctl, rxcsum;
1865
1866        if (adapter->netdev->mtu > ETH_DATA_LEN) {
1867                rdlen = adapter->rx_ring[0].count *
1868                        sizeof(struct e1000_rx_desc);
1869                adapter->clean_rx = e1000_clean_jumbo_rx_irq;
1870                adapter->alloc_rx_buf = e1000_alloc_jumbo_rx_buffers;
1871        } else {
1872                rdlen = adapter->rx_ring[0].count *
1873                        sizeof(struct e1000_rx_desc);
1874                adapter->clean_rx = e1000_clean_rx_irq;
1875                adapter->alloc_rx_buf = e1000_alloc_rx_buffers;
1876        }
1877
1878        /* disable receives while setting up the descriptors */
1879        rctl = er32(RCTL);
1880        ew32(RCTL, rctl & ~E1000_RCTL_EN);
1881
1882        /* set the Receive Delay Timer Register */
1883        ew32(RDTR, adapter->rx_int_delay);
1884
1885        if (hw->mac_type >= e1000_82540) {
1886                ew32(RADV, adapter->rx_abs_int_delay);
1887                if (adapter->itr_setting != 0)
1888                        ew32(ITR, 1000000000 / (adapter->itr * 256));
1889        }
1890
1891        /* Setup the HW Rx Head and Tail Descriptor Pointers and
1892         * the Base and Length of the Rx Descriptor Ring
1893         */
1894        switch (adapter->num_rx_queues) {
1895        case 1:
1896        default:
1897                rdba = adapter->rx_ring[0].dma;
1898                ew32(RDLEN, rdlen);
1899                ew32(RDBAH, (rdba >> 32));
1900                ew32(RDBAL, (rdba & 0x00000000ffffffffULL));
1901                ew32(RDT, 0);
1902                ew32(RDH, 0);
1903                adapter->rx_ring[0].rdh = ((hw->mac_type >= e1000_82543) ?
1904                                           E1000_RDH : E1000_82542_RDH);
1905                adapter->rx_ring[0].rdt = ((hw->mac_type >= e1000_82543) ?
1906                                           E1000_RDT : E1000_82542_RDT);
1907                break;
1908        }
1909
1910        /* Enable 82543 Receive Checksum Offload for TCP and UDP */
1911        if (hw->mac_type >= e1000_82543) {
1912                rxcsum = er32(RXCSUM);
1913                if (adapter->rx_csum)
1914                        rxcsum |= E1000_RXCSUM_TUOFL;
1915                else
1916                        /* don't need to clear IPPCSE as it defaults to 0 */
1917                        rxcsum &= ~E1000_RXCSUM_TUOFL;
1918                ew32(RXCSUM, rxcsum);
1919        }
1920
1921        /* Enable Receives */
1922        ew32(RCTL, rctl | E1000_RCTL_EN);
1923}
1924
1925/**
1926 * e1000_free_tx_resources - Free Tx Resources per Queue
1927 * @adapter: board private structure
1928 * @tx_ring: Tx descriptor ring for a specific queue
1929 *
1930 * Free all transmit software resources
1931 **/
1932static void e1000_free_tx_resources(struct e1000_adapter *adapter,
1933                                    struct e1000_tx_ring *tx_ring)
1934{
1935        struct pci_dev *pdev = adapter->pdev;
1936
1937        e1000_clean_tx_ring(adapter, tx_ring);
1938
1939        vfree(tx_ring->buffer_info);
1940        tx_ring->buffer_info = NULL;
1941
1942        dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc,
1943                          tx_ring->dma);
1944
1945        tx_ring->desc = NULL;
1946}
1947
1948/**
1949 * e1000_free_all_tx_resources - Free Tx Resources for All Queues
1950 * @adapter: board private structure
1951 *
1952 * Free all transmit software resources
1953 **/
1954void e1000_free_all_tx_resources(struct e1000_adapter *adapter)
1955{
1956        int i;
1957
1958        for (i = 0; i < adapter->num_tx_queues; i++)
1959                e1000_free_tx_resources(adapter, &adapter->tx_ring[i]);
1960}
1961
1962static void
1963e1000_unmap_and_free_tx_resource(struct e1000_adapter *adapter,
1964                                 struct e1000_tx_buffer *buffer_info)
1965{
1966        if (buffer_info->dma) {
1967                if (buffer_info->mapped_as_page)
1968                        dma_unmap_page(&adapter->pdev->dev, buffer_info->dma,
1969                                       buffer_info->length, DMA_TO_DEVICE);
1970                else
1971                        dma_unmap_single(&adapter->pdev->dev, buffer_info->dma,
1972                                         buffer_info->length,
1973                                         DMA_TO_DEVICE);
1974                buffer_info->dma = 0;
1975        }
1976        if (buffer_info->skb) {
1977                dev_kfree_skb_any(buffer_info->skb);
1978                buffer_info->skb = NULL;
1979        }
1980        buffer_info->time_stamp = 0;
1981        /* buffer_info must be completely set up in the transmit path */
1982}
1983
1984/**
1985 * e1000_clean_tx_ring - Free Tx Buffers
1986 * @adapter: board private structure
1987 * @tx_ring: ring to be cleaned
1988 **/
1989static void e1000_clean_tx_ring(struct e1000_adapter *adapter,
1990                                struct e1000_tx_ring *tx_ring)
1991{
1992        struct e1000_hw *hw = &adapter->hw;
1993        struct e1000_tx_buffer *buffer_info;
1994        unsigned long size;
1995        unsigned int i;
1996
1997        /* Free all the Tx ring sk_buffs */
1998
1999        for (i = 0; i < tx_ring->count; i++) {
2000                buffer_info = &tx_ring->buffer_info[i];
2001                e1000_unmap_and_free_tx_resource(adapter, buffer_info);
2002        }
2003
2004        netdev_reset_queue(adapter->netdev);
2005        size = sizeof(struct e1000_tx_buffer) * tx_ring->count;
2006        memset(tx_ring->buffer_info, 0, size);
2007
2008        /* Zero out the descriptor ring */
2009
2010        memset(tx_ring->desc, 0, tx_ring->size);
2011
2012        tx_ring->next_to_use = 0;
2013        tx_ring->next_to_clean = 0;
2014        tx_ring->last_tx_tso = false;
2015
2016        writel(0, hw->hw_addr + tx_ring->tdh);
2017        writel(0, hw->hw_addr + tx_ring->tdt);
2018}
2019
2020/**
2021 * e1000_clean_all_tx_rings - Free Tx Buffers for all queues
2022 * @adapter: board private structure
2023 **/
2024static void e1000_clean_all_tx_rings(struct e1000_adapter *adapter)
2025{
2026        int i;
2027
2028        for (i = 0; i < adapter->num_tx_queues; i++)
2029                e1000_clean_tx_ring(adapter, &adapter->tx_ring[i]);
2030}
2031
2032/**
2033 * e1000_free_rx_resources - Free Rx Resources
2034 * @adapter: board private structure
2035 * @rx_ring: ring to clean the resources from
2036 *
2037 * Free all receive software resources
2038 **/
2039static void e1000_free_rx_resources(struct e1000_adapter *adapter,
2040                                    struct e1000_rx_ring *rx_ring)
2041{
2042        struct pci_dev *pdev = adapter->pdev;
2043
2044        e1000_clean_rx_ring(adapter, rx_ring);
2045
2046        vfree(rx_ring->buffer_info);
2047        rx_ring->buffer_info = NULL;
2048
2049        dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc,
2050                          rx_ring->dma);
2051
2052        rx_ring->desc = NULL;
2053}
2054
2055/**
2056 * e1000_free_all_rx_resources - Free Rx Resources for All Queues
2057 * @adapter: board private structure
2058 *
2059 * Free all receive software resources
2060 **/
2061void e1000_free_all_rx_resources(struct e1000_adapter *adapter)
2062{
2063        int i;
2064
2065        for (i = 0; i < adapter->num_rx_queues; i++)
2066                e1000_free_rx_resources(adapter, &adapter->rx_ring[i]);
2067}
2068
2069#define E1000_HEADROOM (NET_SKB_PAD + NET_IP_ALIGN)
2070static unsigned int e1000_frag_len(const struct e1000_adapter *a)
2071{
2072        return SKB_DATA_ALIGN(a->rx_buffer_len + E1000_HEADROOM) +
2073                SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
2074}
2075
2076static void *e1000_alloc_frag(const struct e1000_adapter *a)
2077{
2078        unsigned int len = e1000_frag_len(a);
2079        u8 *data = netdev_alloc_frag(len);
2080
2081        if (likely(data))
2082                data += E1000_HEADROOM;
2083        return data;
2084}
2085
2086/**
2087 * e1000_clean_rx_ring - Free Rx Buffers per Queue
2088 * @adapter: board private structure
2089 * @rx_ring: ring to free buffers from
2090 **/
2091static void e1000_clean_rx_ring(struct e1000_adapter *adapter,
2092                                struct e1000_rx_ring *rx_ring)
2093{
2094        struct e1000_hw *hw = &adapter->hw;
2095        struct e1000_rx_buffer *buffer_info;
2096        struct pci_dev *pdev = adapter->pdev;
2097        unsigned long size;
2098        unsigned int i;
2099
2100        /* Free all the Rx netfrags */
2101        for (i = 0; i < rx_ring->count; i++) {
2102                buffer_info = &rx_ring->buffer_info[i];
2103                if (adapter->clean_rx == e1000_clean_rx_irq) {
2104                        if (buffer_info->dma)
2105                                dma_unmap_single(&pdev->dev, buffer_info->dma,
2106                                                 adapter->rx_buffer_len,
2107                                                 DMA_FROM_DEVICE);
2108                        if (buffer_info->rxbuf.data) {
2109                                skb_free_frag(buffer_info->rxbuf.data);
2110                                buffer_info->rxbuf.data = NULL;
2111                        }
2112                } else if (adapter->clean_rx == e1000_clean_jumbo_rx_irq) {
2113                        if (buffer_info->dma)
2114                                dma_unmap_page(&pdev->dev, buffer_info->dma,
2115                                               adapter->rx_buffer_len,
2116                                               DMA_FROM_DEVICE);
2117                        if (buffer_info->rxbuf.page) {
2118                                put_page(buffer_info->rxbuf.page);
2119                                buffer_info->rxbuf.page = NULL;
2120                        }
2121                }
2122
2123                buffer_info->dma = 0;
2124        }
2125
2126        /* there also may be some cached data from a chained receive */
2127        napi_free_frags(&adapter->napi);
2128        rx_ring->rx_skb_top = NULL;
2129
2130        size = sizeof(struct e1000_rx_buffer) * rx_ring->count;
2131        memset(rx_ring->buffer_info, 0, size);
2132
2133        /* Zero out the descriptor ring */
2134        memset(rx_ring->desc, 0, rx_ring->size);
2135
2136        rx_ring->next_to_clean = 0;
2137        rx_ring->next_to_use = 0;
2138
2139        writel(0, hw->hw_addr + rx_ring->rdh);
2140        writel(0, hw->hw_addr + rx_ring->rdt);
2141}
2142
2143/**
2144 * e1000_clean_all_rx_rings - Free Rx Buffers for all queues
2145 * @adapter: board private structure
2146 **/
2147static void e1000_clean_all_rx_rings(struct e1000_adapter *adapter)
2148{
2149        int i;
2150
2151        for (i = 0; i < adapter->num_rx_queues; i++)
2152                e1000_clean_rx_ring(adapter, &adapter->rx_ring[i]);
2153}
2154
2155/* The 82542 2.0 (revision 2) needs to have the receive unit in reset
2156 * and memory write and invalidate disabled for certain operations
2157 */
2158static void e1000_enter_82542_rst(struct e1000_adapter *adapter)
2159{
2160        struct e1000_hw *hw = &adapter->hw;
2161        struct net_device *netdev = adapter->netdev;
2162        u32 rctl;
2163
2164        e1000_pci_clear_mwi(hw);
2165
2166        rctl = er32(RCTL);
2167        rctl |= E1000_RCTL_RST;
2168        ew32(RCTL, rctl);
2169        E1000_WRITE_FLUSH();
2170        mdelay(5);
2171
2172        if (netif_running(netdev))
2173                e1000_clean_all_rx_rings(adapter);
2174}
2175
2176static void e1000_leave_82542_rst(struct e1000_adapter *adapter)
2177{
2178        struct e1000_hw *hw = &adapter->hw;
2179        struct net_device *netdev = adapter->netdev;
2180        u32 rctl;
2181
2182        rctl = er32(RCTL);
2183        rctl &= ~E1000_RCTL_RST;
2184        ew32(RCTL, rctl);
2185        E1000_WRITE_FLUSH();
2186        mdelay(5);
2187
2188        if (hw->pci_cmd_word & PCI_COMMAND_INVALIDATE)
2189                e1000_pci_set_mwi(hw);
2190
2191        if (netif_running(netdev)) {
2192                /* No need to loop, because 82542 supports only 1 queue */
2193                struct e1000_rx_ring *ring = &adapter->rx_ring[0];
2194                e1000_configure_rx(adapter);
2195                adapter->alloc_rx_buf(adapter, ring, E1000_DESC_UNUSED(ring));
2196        }
2197}
2198
2199/**
2200 * e1000_set_mac - Change the Ethernet Address of the NIC
2201 * @netdev: network interface device structure
2202 * @p: pointer to an address structure
2203 *
2204 * Returns 0 on success, negative on failure
2205 **/
2206static int e1000_set_mac(struct net_device *netdev, void *p)
2207{
2208        struct e1000_adapter *adapter = netdev_priv(netdev);
2209        struct e1000_hw *hw = &adapter->hw;
2210        struct sockaddr *addr = p;
2211
2212        if (!is_valid_ether_addr(addr->sa_data))
2213                return -EADDRNOTAVAIL;
2214
2215        /* 82542 2.0 needs to be in reset to write receive address registers */
2216
2217        if (hw->mac_type == e1000_82542_rev2_0)
2218                e1000_enter_82542_rst(adapter);
2219
2220        memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
2221        memcpy(hw->mac_addr, addr->sa_data, netdev->addr_len);
2222
2223        e1000_rar_set(hw, hw->mac_addr, 0);
2224
2225        if (hw->mac_type == e1000_82542_rev2_0)
2226                e1000_leave_82542_rst(adapter);
2227
2228        return 0;
2229}
2230
2231/**
2232 * e1000_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set
2233 * @netdev: network interface device structure
2234 *
2235 * The set_rx_mode entry point is called whenever the unicast or multicast
2236 * address lists or the network interface flags are updated. This routine is
2237 * responsible for configuring the hardware for proper unicast, multicast,
2238 * promiscuous mode, and all-multi behavior.
2239 **/
2240static void e1000_set_rx_mode(struct net_device *netdev)
2241{
2242        struct e1000_adapter *adapter = netdev_priv(netdev);
2243        struct e1000_hw *hw = &adapter->hw;
2244        struct netdev_hw_addr *ha;
2245        bool use_uc = false;
2246        u32 rctl;
2247        u32 hash_value;
2248        int i, rar_entries = E1000_RAR_ENTRIES;
2249        int mta_reg_count = E1000_NUM_MTA_REGISTERS;
2250        u32 *mcarray = kcalloc(mta_reg_count, sizeof(u32), GFP_ATOMIC);
2251
2252        if (!mcarray)
2253                return;
2254
2255        /* Check for Promiscuous and All Multicast modes */
2256
2257        rctl = er32(RCTL);
2258
2259        if (netdev->flags & IFF_PROMISC) {
2260                rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
2261                rctl &= ~E1000_RCTL_VFE;
2262        } else {
2263                if (netdev->flags & IFF_ALLMULTI)
2264                        rctl |= E1000_RCTL_MPE;
2265                else
2266                        rctl &= ~E1000_RCTL_MPE;
2267                /* Enable VLAN filter if there is a VLAN */
2268                if (e1000_vlan_used(adapter))
2269                        rctl |= E1000_RCTL_VFE;
2270        }
2271
2272        if (netdev_uc_count(netdev) > rar_entries - 1) {
2273                rctl |= E1000_RCTL_UPE;
2274        } else if (!(netdev->flags & IFF_PROMISC)) {
2275                rctl &= ~E1000_RCTL_UPE;
2276                use_uc = true;
2277        }
2278
2279        ew32(RCTL, rctl);
2280
2281        /* 82542 2.0 needs to be in reset to write receive address registers */
2282
2283        if (hw->mac_type == e1000_82542_rev2_0)
2284                e1000_enter_82542_rst(adapter);
2285
2286        /* load the first 14 addresses into the exact filters 1-14. Unicast
2287         * addresses take precedence to avoid disabling unicast filtering
2288         * when possible.
2289         *
2290         * RAR 0 is used for the station MAC address
2291         * if there are not 14 addresses, go ahead and clear the filters
2292         */
2293        i = 1;
2294        if (use_uc)
2295                netdev_for_each_uc_addr(ha, netdev) {
2296                        if (i == rar_entries)
2297                                break;
2298                        e1000_rar_set(hw, ha->addr, i++);
2299                }
2300
2301        netdev_for_each_mc_addr(ha, netdev) {
2302                if (i == rar_entries) {
2303                        /* load any remaining addresses into the hash table */
2304                        u32 hash_reg, hash_bit, mta;
2305                        hash_value = e1000_hash_mc_addr(hw, ha->addr);
2306                        hash_reg = (hash_value >> 5) & 0x7F;
2307                        hash_bit = hash_value & 0x1F;
2308                        mta = (1 << hash_bit);
2309                        mcarray[hash_reg] |= mta;
2310                } else {
2311                        e1000_rar_set(hw, ha->addr, i++);
2312                }
2313        }
2314
2315        for (; i < rar_entries; i++) {
2316                E1000_WRITE_REG_ARRAY(hw, RA, i << 1, 0);
2317                E1000_WRITE_FLUSH();
2318                E1000_WRITE_REG_ARRAY(hw, RA, (i << 1) + 1, 0);
2319                E1000_WRITE_FLUSH();
2320        }
2321
2322        /* write the hash table completely, write from bottom to avoid
2323         * both stupid write combining chipsets, and flushing each write
2324         */
2325        for (i = mta_reg_count - 1; i >= 0 ; i--) {
2326                /* If we are on an 82544 has an errata where writing odd
2327                 * offsets overwrites the previous even offset, but writing
2328                 * backwards over the range solves the issue by always
2329                 * writing the odd offset first
2330                 */
2331                E1000_WRITE_REG_ARRAY(hw, MTA, i, mcarray[i]);
2332        }
2333        E1000_WRITE_FLUSH();
2334
2335        if (hw->mac_type == e1000_82542_rev2_0)
2336                e1000_leave_82542_rst(adapter);
2337
2338        kfree(mcarray);
2339}
2340
2341/**
2342 * e1000_update_phy_info_task - get phy info
2343 * @work: work struct contained inside adapter struct
2344 *
2345 * Need to wait a few seconds after link up to get diagnostic information from
2346 * the phy
2347 */
2348static void e1000_update_phy_info_task(struct work_struct *work)
2349{
2350        struct e1000_adapter *adapter = container_of(work,
2351                                                     struct e1000_adapter,
2352                                                     phy_info_task.work);
2353
2354        e1000_phy_get_info(&adapter->hw, &adapter->phy_info);
2355}
2356
2357/**
2358 * e1000_82547_tx_fifo_stall_task - task to complete work
2359 * @work: work struct contained inside adapter struct
2360 **/
2361static void e1000_82547_tx_fifo_stall_task(struct work_struct *work)
2362{
2363        struct e1000_adapter *adapter = container_of(work,
2364                                                     struct e1000_adapter,
2365                                                     fifo_stall_task.work);
2366        struct e1000_hw *hw = &adapter->hw;
2367        struct net_device *netdev = adapter->netdev;
2368        u32 tctl;
2369
2370        if (atomic_read(&adapter->tx_fifo_stall)) {
2371                if ((er32(TDT) == er32(TDH)) &&
2372                   (er32(TDFT) == er32(TDFH)) &&
2373                   (er32(TDFTS) == er32(TDFHS))) {
2374                        tctl = er32(TCTL);
2375                        ew32(TCTL, tctl & ~E1000_TCTL_EN);
2376                        ew32(TDFT, adapter->tx_head_addr);
2377                        ew32(TDFH, adapter->tx_head_addr);
2378                        ew32(TDFTS, adapter->tx_head_addr);
2379                        ew32(TDFHS, adapter->tx_head_addr);
2380                        ew32(TCTL, tctl);
2381                        E1000_WRITE_FLUSH();
2382
2383                        adapter->tx_fifo_head = 0;
2384                        atomic_set(&adapter->tx_fifo_stall, 0);
2385                        netif_wake_queue(netdev);
2386                } else if (!test_bit(__E1000_DOWN, &adapter->flags)) {
2387                        schedule_delayed_work(&adapter->fifo_stall_task, 1);
2388                }
2389        }
2390}
2391
2392bool e1000_has_link(struct e1000_adapter *adapter)
2393{
2394        struct e1000_hw *hw = &adapter->hw;
2395        bool link_active = false;
2396
2397        /* get_link_status is set on LSC (link status) interrupt or rx
2398         * sequence error interrupt (except on intel ce4100).
2399         * get_link_status will stay false until the
2400         * e1000_check_for_link establishes link for copper adapters
2401         * ONLY
2402         */
2403        switch (hw->media_type) {
2404        case e1000_media_type_copper:
2405                if (hw->mac_type == e1000_ce4100)
2406                        hw->get_link_status = 1;
2407                if (hw->get_link_status) {
2408                        e1000_check_for_link(hw);
2409                        link_active = !hw->get_link_status;
2410                } else {
2411                        link_active = true;
2412                }
2413                break;
2414        case e1000_media_type_fiber:
2415                e1000_check_for_link(hw);
2416                link_active = !!(er32(STATUS) & E1000_STATUS_LU);
2417                break;
2418        case e1000_media_type_internal_serdes:
2419                e1000_check_for_link(hw);
2420                link_active = hw->serdes_has_link;
2421                break;
2422        default:
2423                break;
2424        }
2425
2426        return link_active;
2427}
2428
2429/**
2430 * e1000_watchdog - work function
2431 * @work: work struct contained inside adapter struct
2432 **/
2433static void e1000_watchdog(struct work_struct *work)
2434{
2435        struct e1000_adapter *adapter = container_of(work,
2436                                                     struct e1000_adapter,
2437                                                     watchdog_task.work);
2438        struct e1000_hw *hw = &adapter->hw;
2439        struct net_device *netdev = adapter->netdev;
2440        struct e1000_tx_ring *txdr = adapter->tx_ring;
2441        u32 link, tctl;
2442
2443        link = e1000_has_link(adapter);
2444        if ((netif_carrier_ok(netdev)) && link)
2445                goto link_up;
2446
2447        if (link) {
2448                if (!netif_carrier_ok(netdev)) {
2449                        u32 ctrl;
2450                        bool txb2b = true;
2451                        /* update snapshot of PHY registers on LSC */
2452                        e1000_get_speed_and_duplex(hw,
2453                                                   &adapter->link_speed,
2454                                                   &adapter->link_duplex);
2455
2456                        ctrl = er32(CTRL);
2457                        pr_info("%s NIC Link is Up %d Mbps %s, "
2458                                "Flow Control: %s\n",
2459                                netdev->name,
2460                                adapter->link_speed,
2461                                adapter->link_duplex == FULL_DUPLEX ?
2462                                "Full Duplex" : "Half Duplex",
2463                                ((ctrl & E1000_CTRL_TFCE) && (ctrl &
2464                                E1000_CTRL_RFCE)) ? "RX/TX" : ((ctrl &
2465                                E1000_CTRL_RFCE) ? "RX" : ((ctrl &
2466                                E1000_CTRL_TFCE) ? "TX" : "None")));
2467
2468                        /* adjust timeout factor according to speed/duplex */
2469                        adapter->tx_timeout_factor = 1;
2470                        switch (adapter->link_speed) {
2471                        case SPEED_10:
2472                                txb2b = false;
2473                                adapter->tx_timeout_factor = 16;
2474                                break;
2475                        case SPEED_100:
2476                                txb2b = false;
2477                                /* maybe add some timeout factor ? */
2478                                break;
2479                        }
2480
2481                        /* enable transmits in the hardware */
2482                        tctl = er32(TCTL);
2483                        tctl |= E1000_TCTL_EN;
2484                        ew32(TCTL, tctl);
2485
2486                        netif_carrier_on(netdev);
2487                        if (!test_bit(__E1000_DOWN, &adapter->flags))
2488                                schedule_delayed_work(&adapter->phy_info_task,
2489                                                      2 * HZ);
2490                        adapter->smartspeed = 0;
2491                }
2492        } else {
2493                if (netif_carrier_ok(netdev)) {
2494                        adapter->link_speed = 0;
2495                        adapter->link_duplex = 0;
2496                        pr_info("%s NIC Link is Down\n",
2497                                netdev->name);
2498                        netif_carrier_off(netdev);
2499
2500                        if (!test_bit(__E1000_DOWN, &adapter->flags))
2501                                schedule_delayed_work(&adapter->phy_info_task,
2502                                                      2 * HZ);
2503                }
2504
2505                e1000_smartspeed(adapter);
2506        }
2507
2508link_up:
2509        e1000_update_stats(adapter);
2510
2511        hw->tx_packet_delta = adapter->stats.tpt - adapter->tpt_old;
2512        adapter->tpt_old = adapter->stats.tpt;
2513        hw->collision_delta = adapter->stats.colc - adapter->colc_old;
2514        adapter->colc_old = adapter->stats.colc;
2515
2516        adapter->gorcl = adapter->stats.gorcl - adapter->gorcl_old;
2517        adapter->gorcl_old = adapter->stats.gorcl;
2518        adapter->gotcl = adapter->stats.gotcl - adapter->gotcl_old;
2519        adapter->gotcl_old = adapter->stats.gotcl;
2520
2521        e1000_update_adaptive(hw);
2522
2523        if (!netif_carrier_ok(netdev)) {
2524                if (E1000_DESC_UNUSED(txdr) + 1 < txdr->count) {
2525                        /* We've lost link, so the controller stops DMA,
2526                         * but we've got queued Tx work that's never going
2527                         * to get done, so reset controller to flush Tx.
2528                         * (Do the reset outside of interrupt context).
2529                         */
2530                        adapter->tx_timeout_count++;
2531                        schedule_work(&adapter->reset_task);
2532                        /* exit immediately since reset is imminent */
2533                        return;
2534                }
2535        }
2536
2537        /* Simple mode for Interrupt Throttle Rate (ITR) */
2538        if (hw->mac_type >= e1000_82540 && adapter->itr_setting == 4) {
2539                /* Symmetric Tx/Rx gets a reduced ITR=2000;
2540                 * Total asymmetrical Tx or Rx gets ITR=8000;
2541                 * everyone else is between 2000-8000.
2542                 */
2543                u32 goc = (adapter->gotcl + adapter->gorcl) / 10000;
2544                u32 dif = (adapter->gotcl > adapter->gorcl ?
2545                            adapter->gotcl - adapter->gorcl :
2546                            adapter->gorcl - adapter->gotcl) / 10000;
2547                u32 itr = goc > 0 ? (dif * 6000 / goc + 2000) : 8000;
2548
2549                ew32(ITR, 1000000000 / (itr * 256));
2550        }
2551
2552        /* Cause software interrupt to ensure rx ring is cleaned */
2553        ew32(ICS, E1000_ICS_RXDMT0);
2554
2555        /* Force detection of hung controller every watchdog period */
2556        adapter->detect_tx_hung = true;
2557
2558        /* Reschedule the task */
2559        if (!test_bit(__E1000_DOWN, &adapter->flags))
2560                schedule_delayed_work(&adapter->watchdog_task, 2 * HZ);
2561}
2562
2563enum latency_range {
2564        lowest_latency = 0,
2565        low_latency = 1,
2566        bulk_latency = 2,
2567        latency_invalid = 255
2568};
2569
2570/**
2571 * e1000_update_itr - update the dynamic ITR value based on statistics
2572 * @adapter: pointer to adapter
2573 * @itr_setting: current adapter->itr
2574 * @packets: the number of packets during this measurement interval
2575 * @bytes: the number of bytes during this measurement interval
2576 *
2577 *      Stores a new ITR value based on packets and byte
2578 *      counts during the last interrupt.  The advantage of per interrupt
2579 *      computation is faster updates and more accurate ITR for the current
2580 *      traffic pattern.  Constants in this function were computed
2581 *      based on theoretical maximum wire speed and thresholds were set based
2582 *      on testing data as well as attempting to minimize response time
2583 *      while increasing bulk throughput.
2584 *      this functionality is controlled by the InterruptThrottleRate module
2585 *      parameter (see e1000_param.c)
2586 **/
2587static unsigned int e1000_update_itr(struct e1000_adapter *adapter,
2588                                     u16 itr_setting, int packets, int bytes)
2589{
2590        unsigned int retval = itr_setting;
2591        struct e1000_hw *hw = &adapter->hw;
2592
2593        if (unlikely(hw->mac_type < e1000_82540))
2594                goto update_itr_done;
2595
2596        if (packets == 0)
2597                goto update_itr_done;
2598
2599        switch (itr_setting) {
2600        case lowest_latency:
2601                /* jumbo frames get bulk treatment*/
2602                if (bytes/packets > 8000)
2603                        retval = bulk_latency;
2604                else if ((packets < 5) && (bytes > 512))
2605                        retval = low_latency;
2606                break;
2607        case low_latency:  /* 50 usec aka 20000 ints/s */
2608                if (bytes > 10000) {
2609                        /* jumbo frames need bulk latency setting */
2610                        if (bytes/packets > 8000)
2611                                retval = bulk_latency;
2612                        else if ((packets < 10) || ((bytes/packets) > 1200))
2613                                retval = bulk_latency;
2614                        else if ((packets > 35))
2615                                retval = lowest_latency;
2616                } else if (bytes/packets > 2000)
2617                        retval = bulk_latency;
2618                else if (packets <= 2 && bytes < 512)
2619                        retval = lowest_latency;
2620                break;
2621        case bulk_latency: /* 250 usec aka 4000 ints/s */
2622                if (bytes > 25000) {
2623                        if (packets > 35)
2624                                retval = low_latency;
2625                } else if (bytes < 6000) {
2626                        retval = low_latency;
2627                }
2628                break;
2629        }
2630
2631update_itr_done:
2632        return retval;
2633}
2634
2635static void e1000_set_itr(struct e1000_adapter *adapter)
2636{
2637        struct e1000_hw *hw = &adapter->hw;
2638        u16 current_itr;
2639        u32 new_itr = adapter->itr;
2640
2641        if (unlikely(hw->mac_type < e1000_82540))
2642                return;
2643
2644        /* for non-gigabit speeds, just fix the interrupt rate at 4000 */
2645        if (unlikely(adapter->link_speed != SPEED_1000)) {
2646                current_itr = 0;
2647                new_itr = 4000;
2648                goto set_itr_now;
2649        }
2650
2651        adapter->tx_itr = e1000_update_itr(adapter, adapter->tx_itr,
2652                                           adapter->total_tx_packets,
2653                                           adapter->total_tx_bytes);
2654        /* conservative mode (itr 3) eliminates the lowest_latency setting */
2655        if (adapter->itr_setting == 3 && adapter->tx_itr == lowest_latency)
2656                adapter->tx_itr = low_latency;
2657
2658        adapter->rx_itr = e1000_update_itr(adapter, adapter->rx_itr,
2659                                           adapter->total_rx_packets,
2660                                           adapter->total_rx_bytes);
2661        /* conservative mode (itr 3) eliminates the lowest_latency setting */
2662        if (adapter->itr_setting == 3 && adapter->rx_itr == lowest_latency)
2663                adapter->rx_itr = low_latency;
2664
2665        current_itr = max(adapter->rx_itr, adapter->tx_itr);
2666
2667        switch (current_itr) {
2668        /* counts and packets in update_itr are dependent on these numbers */
2669        case lowest_latency:
2670                new_itr = 70000;
2671                break;
2672        case low_latency:
2673                new_itr = 20000; /* aka hwitr = ~200 */
2674                break;
2675        case bulk_latency:
2676                new_itr = 4000;
2677                break;
2678        default:
2679                break;
2680        }
2681
2682set_itr_now:
2683        if (new_itr != adapter->itr) {
2684                /* this attempts to bias the interrupt rate towards Bulk
2685                 * by adding intermediate steps when interrupt rate is
2686                 * increasing
2687                 */
2688                new_itr = new_itr > adapter->itr ?
2689                          min(adapter->itr + (new_itr >> 2), new_itr) :
2690                          new_itr;
2691                adapter->itr = new_itr;
2692                ew32(ITR, 1000000000 / (new_itr * 256));
2693        }
2694}
2695
2696#define E1000_TX_FLAGS_CSUM             0x00000001
2697#define E1000_TX_FLAGS_VLAN             0x00000002
2698#define E1000_TX_FLAGS_TSO              0x00000004
2699#define E1000_TX_FLAGS_IPV4             0x00000008
2700#define E1000_TX_FLAGS_NO_FCS           0x00000010
2701#define E1000_TX_FLAGS_VLAN_MASK        0xffff0000
2702#define E1000_TX_FLAGS_VLAN_SHIFT       16
2703
2704static int e1000_tso(struct e1000_adapter *adapter,
2705                     struct e1000_tx_ring *tx_ring, struct sk_buff *skb,
2706                     __be16 protocol)
2707{
2708        struct e1000_context_desc *context_desc;
2709        struct e1000_tx_buffer *buffer_info;
2710        unsigned int i;
2711        u32 cmd_length = 0;
2712        u16 ipcse = 0, tucse, mss;
2713        u8 ipcss, ipcso, tucss, tucso, hdr_len;
2714
2715        if (skb_is_gso(skb)) {
2716                int err;
2717
2718                err = skb_cow_head(skb, 0);
2719                if (err < 0)
2720                        return err;
2721
2722                hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
2723                mss = skb_shinfo(skb)->gso_size;
2724                if (protocol == htons(ETH_P_IP)) {
2725                        struct iphdr *iph = ip_hdr(skb);
2726                        iph->tot_len = 0;
2727                        iph->check = 0;
2728                        tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
2729                                                                 iph->daddr, 0,
2730                                                                 IPPROTO_TCP,
2731                                                                 0);
2732                        cmd_length = E1000_TXD_CMD_IP;
2733                        ipcse = skb_transport_offset(skb) - 1;
2734                } else if (skb_is_gso_v6(skb)) {
2735                        ipv6_hdr(skb)->payload_len = 0;
2736                        tcp_hdr(skb)->check =
2737                                ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
2738                                                 &ipv6_hdr(skb)->daddr,
2739                                                 0, IPPROTO_TCP, 0);
2740                        ipcse = 0;
2741                }
2742                ipcss = skb_network_offset(skb);
2743                ipcso = (void *)&(ip_hdr(skb)->check) - (void *)skb->data;
2744                tucss = skb_transport_offset(skb);
2745                tucso = (void *)&(tcp_hdr(skb)->check) - (void *)skb->data;
2746                tucse = 0;
2747
2748                cmd_length |= (E1000_TXD_CMD_DEXT | E1000_TXD_CMD_TSE |
2749                               E1000_TXD_CMD_TCP | (skb->len - (hdr_len)));
2750
2751                i = tx_ring->next_to_use;
2752                context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
2753                buffer_info = &tx_ring->buffer_info[i];
2754
2755                context_desc->lower_setup.ip_fields.ipcss  = ipcss;
2756                context_desc->lower_setup.ip_fields.ipcso  = ipcso;
2757                context_desc->lower_setup.ip_fields.ipcse  = cpu_to_le16(ipcse);
2758                context_desc->upper_setup.tcp_fields.tucss = tucss;
2759                context_desc->upper_setup.tcp_fields.tucso = tucso;
2760                context_desc->upper_setup.tcp_fields.tucse = cpu_to_le16(tucse);
2761                context_desc->tcp_seg_setup.fields.mss     = cpu_to_le16(mss);
2762                context_desc->tcp_seg_setup.fields.hdr_len = hdr_len;
2763                context_desc->cmd_and_length = cpu_to_le32(cmd_length);
2764
2765                buffer_info->time_stamp = jiffies;
2766                buffer_info->next_to_watch = i;
2767
2768                if (++i == tx_ring->count)
2769                        i = 0;
2770
2771                tx_ring->next_to_use = i;
2772
2773                return true;
2774        }
2775        return false;
2776}
2777
2778static bool e1000_tx_csum(struct e1000_adapter *adapter,
2779                          struct e1000_tx_ring *tx_ring, struct sk_buff *skb,
2780                          __be16 protocol)
2781{
2782        struct e1000_context_desc *context_desc;
2783        struct e1000_tx_buffer *buffer_info;
2784        unsigned int i;
2785        u8 css;
2786        u32 cmd_len = E1000_TXD_CMD_DEXT;
2787
2788        if (skb->ip_summed != CHECKSUM_PARTIAL)
2789                return false;
2790
2791        switch (protocol) {
2792        case cpu_to_be16(ETH_P_IP):
2793                if (ip_hdr(skb)->protocol == IPPROTO_TCP)
2794                        cmd_len |= E1000_TXD_CMD_TCP;
2795                break;
2796        case cpu_to_be16(ETH_P_IPV6):
2797                /* XXX not handling all IPV6 headers */
2798                if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
2799                        cmd_len |= E1000_TXD_CMD_TCP;
2800                break;
2801        default:
2802                if (unlikely(net_ratelimit()))
2803                        e_warn(drv, "checksum_partial proto=%x!\n",
2804                               skb->protocol);
2805                break;
2806        }
2807
2808        css = skb_checksum_start_offset(skb);
2809
2810        i = tx_ring->next_to_use;
2811        buffer_info = &tx_ring->buffer_info[i];
2812        context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
2813
2814        context_desc->lower_setup.ip_config = 0;
2815        context_desc->upper_setup.tcp_fields.tucss = css;
2816        context_desc->upper_setup.tcp_fields.tucso =
2817                css + skb->csum_offset;
2818        context_desc->upper_setup.tcp_fields.tucse = 0;
2819        context_desc->tcp_seg_setup.data = 0;
2820        context_desc->cmd_and_length = cpu_to_le32(cmd_len);
2821
2822        buffer_info->time_stamp = jiffies;
2823        buffer_info->next_to_watch = i;
2824
2825        if (unlikely(++i == tx_ring->count))
2826                i = 0;
2827
2828        tx_ring->next_to_use = i;
2829
2830        return true;
2831}
2832
2833#define E1000_MAX_TXD_PWR       12
2834#define E1000_MAX_DATA_PER_TXD  (1<<E1000_MAX_TXD_PWR)
2835
2836static int e1000_tx_map(struct e1000_adapter *adapter,
2837                        struct e1000_tx_ring *tx_ring,
2838                        struct sk_buff *skb, unsigned int first,
2839                        unsigned int max_per_txd, unsigned int nr_frags,
2840                        unsigned int mss)
2841{
2842        struct e1000_hw *hw = &adapter->hw;
2843        struct pci_dev *pdev = adapter->pdev;
2844        struct e1000_tx_buffer *buffer_info;
2845        unsigned int len = skb_headlen(skb);
2846        unsigned int offset = 0, size, count = 0, i;
2847        unsigned int f, bytecount, segs;
2848
2849        i = tx_ring->next_to_use;
2850
2851        while (len) {
2852                buffer_info = &tx_ring->buffer_info[i];
2853                size = min(len, max_per_txd);
2854                /* Workaround for Controller erratum --
2855                 * descriptor for non-tso packet in a linear SKB that follows a
2856                 * tso gets written back prematurely before the data is fully
2857                 * DMA'd to the controller
2858                 */
2859                if (!skb->data_len && tx_ring->last_tx_tso &&
2860                    !skb_is_gso(skb)) {
2861                        tx_ring->last_tx_tso = false;
2862                        size -= 4;
2863                }
2864
2865                /* Workaround for premature desc write-backs
2866                 * in TSO mode.  Append 4-byte sentinel desc
2867                 */
2868                if (unlikely(mss && !nr_frags && size == len && size > 8))
2869                        size -= 4;
2870                /* work-around for errata 10 and it applies
2871                 * to all controllers in PCI-X mode
2872                 * The fix is to make sure that the first descriptor of a
2873                 * packet is smaller than 2048 - 16 - 16 (or 2016) bytes
2874                 */
2875                if (unlikely((hw->bus_type == e1000_bus_type_pcix) &&
2876                             (size > 2015) && count == 0))
2877                        size = 2015;
2878
2879                /* Workaround for potential 82544 hang in PCI-X.  Avoid
2880                 * terminating buffers within evenly-aligned dwords.
2881                 */
2882                if (unlikely(adapter->pcix_82544 &&
2883                   !((unsigned long)(skb->data + offset + size - 1) & 4) &&
2884                   size > 4))
2885                        size -= 4;
2886
2887                buffer_info->length = size;
2888                /* set time_stamp *before* dma to help avoid a possible race */
2889                buffer_info->time_stamp = jiffies;
2890                buffer_info->mapped_as_page = false;
2891                buffer_info->dma = dma_map_single(&pdev->dev,
2892                                                  skb->data + offset,
2893                                                  size, DMA_TO_DEVICE);
2894                if (dma_mapping_error(&pdev->dev, buffer_info->dma))
2895                        goto dma_error;
2896                buffer_info->next_to_watch = i;
2897
2898                len -= size;
2899                offset += size;
2900                count++;
2901                if (len) {
2902                        i++;
2903                        if (unlikely(i == tx_ring->count))
2904                                i = 0;
2905                }
2906        }
2907
2908        for (f = 0; f < nr_frags; f++) {
2909                const struct skb_frag_struct *frag;
2910
2911                frag = &skb_shinfo(skb)->frags[f];
2912                len = skb_frag_size(frag);
2913                offset = 0;
2914
2915                while (len) {
2916                        unsigned long bufend;
2917                        i++;
2918                        if (unlikely(i == tx_ring->count))
2919                                i = 0;
2920
2921                        buffer_info = &tx_ring->buffer_info[i];
2922                        size = min(len, max_per_txd);
2923                        /* Workaround for premature desc write-backs
2924                         * in TSO mode.  Append 4-byte sentinel desc
2925                         */
2926                        if (unlikely(mss && f == (nr_frags-1) &&
2927                            size == len && size > 8))
2928                                size -= 4;
2929                        /* Workaround for potential 82544 hang in PCI-X.
2930                         * Avoid terminating buffers within evenly-aligned
2931                         * dwords.
2932                         */
2933                        bufend = (unsigned long)
2934                                page_to_phys(skb_frag_page(frag));
2935                        bufend += offset + size - 1;
2936                        if (unlikely(adapter->pcix_82544 &&
2937                                     !(bufend & 4) &&
2938                                     size > 4))
2939                                size -= 4;
2940
2941                        buffer_info->length = size;
2942                        buffer_info->time_stamp = jiffies;
2943                        buffer_info->mapped_as_page = true;
2944                        buffer_info->dma = skb_frag_dma_map(&pdev->dev, frag,
2945                                                offset, size, DMA_TO_DEVICE);
2946                        if (dma_mapping_error(&pdev->dev, buffer_info->dma))
2947                                goto dma_error;
2948                        buffer_info->next_to_watch = i;
2949
2950                        len -= size;
2951                        offset += size;
2952                        count++;
2953                }
2954        }
2955
2956        segs = skb_shinfo(skb)->gso_segs ?: 1;
2957        /* multiply data chunks by size of headers */
2958        bytecount = ((segs - 1) * skb_headlen(skb)) + skb->len;
2959
2960        tx_ring->buffer_info[i].skb = skb;
2961        tx_ring->buffer_info[i].segs = segs;
2962        tx_ring->buffer_info[i].bytecount = bytecount;
2963        tx_ring->buffer_info[first].next_to_watch = i;
2964
2965        return count;
2966
2967dma_error:
2968        dev_err(&pdev->dev, "TX DMA map failed\n");
2969        buffer_info->dma = 0;
2970        if (count)
2971                count--;
2972
2973        while (count--) {
2974                if (i == 0)
2975                        i += tx_ring->count;
2976                i--;
2977                buffer_info = &tx_ring->buffer_info[i];
2978                e1000_unmap_and_free_tx_resource(adapter, buffer_info);
2979        }
2980
2981        return 0;
2982}
2983
2984static void e1000_tx_queue(struct e1000_adapter *adapter,
2985                           struct e1000_tx_ring *tx_ring, int tx_flags,
2986                           int count)
2987{
2988        struct e1000_tx_desc *tx_desc = NULL;
2989        struct e1000_tx_buffer *buffer_info;
2990        u32 txd_upper = 0, txd_lower = E1000_TXD_CMD_IFCS;
2991        unsigned int i;
2992
2993        if (likely(tx_flags & E1000_TX_FLAGS_TSO)) {
2994                txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D |
2995                             E1000_TXD_CMD_TSE;
2996                txd_upper |= E1000_TXD_POPTS_TXSM << 8;
2997
2998                if (likely(tx_flags & E1000_TX_FLAGS_IPV4))
2999                        txd_upper |= E1000_TXD_POPTS_IXSM << 8;
3000        }
3001
3002        if (likely(tx_flags & E1000_TX_FLAGS_CSUM)) {
3003                txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
3004                txd_upper |= E1000_TXD_POPTS_TXSM << 8;
3005        }
3006
3007        if (unlikely(tx_flags & E1000_TX_FLAGS_VLAN)) {
3008                txd_lower |= E1000_TXD_CMD_VLE;
3009                txd_upper |= (tx_flags & E1000_TX_FLAGS_VLAN_MASK);
3010        }
3011
3012        if (unlikely(tx_flags & E1000_TX_FLAGS_NO_FCS))
3013                txd_lower &= ~(E1000_TXD_CMD_IFCS);
3014
3015        i = tx_ring->next_to_use;
3016
3017        while (count--) {
3018                buffer_info = &tx_ring->buffer_info[i];
3019                tx_desc = E1000_TX_DESC(*tx_ring, i);
3020                tx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
3021                tx_desc->lower.data =
3022                        cpu_to_le32(txd_lower | buffer_info->length);
3023                tx_desc->upper.data = cpu_to_le32(txd_upper);
3024                if (unlikely(++i == tx_ring->count))
3025                        i = 0;
3026        }
3027
3028        tx_desc->lower.data |= cpu_to_le32(adapter->txd_cmd);
3029
3030        /* txd_cmd re-enables FCS, so we'll re-disable it here as desired. */
3031        if (unlikely(tx_flags & E1000_TX_FLAGS_NO_FCS))
3032                tx_desc->lower.data &= ~(cpu_to_le32(E1000_TXD_CMD_IFCS));
3033
3034        /* Force memory writes to complete before letting h/w
3035         * know there are new descriptors to fetch.  (Only
3036         * applicable for weak-ordered memory model archs,
3037         * such as IA-64).
3038         */
3039        wmb();
3040
3041        tx_ring->next_to_use = i;
3042}
3043
3044/* 82547 workaround to avoid controller hang in half-duplex environment.
3045 * The workaround is to avoid queuing a large packet that would span
3046 * the internal Tx FIFO ring boundary by notifying the stack to resend
3047 * the packet at a later time.  This gives the Tx FIFO an opportunity to
3048 * flush all packets.  When that occurs, we reset the Tx FIFO pointers
3049 * to the beginning of the Tx FIFO.
3050 */
3051
3052#define E1000_FIFO_HDR                  0x10
3053#define E1000_82547_PAD_LEN             0x3E0
3054
3055static int e1000_82547_fifo_workaround(struct e1000_adapter *adapter,
3056                                       struct sk_buff *skb)
3057{
3058        u32 fifo_space = adapter->tx_fifo_size - adapter->tx_fifo_head;
3059        u32 skb_fifo_len = skb->len + E1000_FIFO_HDR;
3060
3061        skb_fifo_len = ALIGN(skb_fifo_len, E1000_FIFO_HDR);
3062
3063        if (adapter->link_duplex != HALF_DUPLEX)
3064                goto no_fifo_stall_required;
3065
3066        if (atomic_read(&adapter->tx_fifo_stall))
3067                return 1;
3068
3069        if (skb_fifo_len >= (E1000_82547_PAD_LEN + fifo_space)) {
3070                atomic_set(&adapter->tx_fifo_stall, 1);
3071                return 1;
3072        }
3073
3074no_fifo_stall_required:
3075        adapter->tx_fifo_head += skb_fifo_len;
3076        if (adapter->tx_fifo_head >= adapter->tx_fifo_size)
3077                adapter->tx_fifo_head -= adapter->tx_fifo_size;
3078        return 0;
3079}
3080
3081static int __e1000_maybe_stop_tx(struct net_device *netdev, int size)
3082{
3083        struct e1000_adapter *adapter = netdev_priv(netdev);
3084        struct e1000_tx_ring *tx_ring = adapter->tx_ring;
3085
3086        netif_stop_queue(netdev);
3087        /* Herbert's original patch had:
3088         *  smp_mb__after_netif_stop_queue();
3089         * but since that doesn't exist yet, just open code it.
3090         */
3091        smp_mb();
3092
3093        /* We need to check again in a case another CPU has just
3094         * made room available.
3095         */
3096        if (likely(E1000_DESC_UNUSED(tx_ring) < size))
3097                return -EBUSY;
3098
3099        /* A reprieve! */
3100        netif_start_queue(netdev);
3101        ++adapter->restart_queue;
3102        return 0;
3103}
3104
3105static int e1000_maybe_stop_tx(struct net_device *netdev,
3106                               struct e1000_tx_ring *tx_ring, int size)
3107{
3108        if (likely(E1000_DESC_UNUSED(tx_ring) >= size))
3109                return 0;
3110        return __e1000_maybe_stop_tx(netdev, size);
3111}
3112
3113#define TXD_USE_COUNT(S, X) (((S) + ((1 << (X)) - 1)) >> (X))
3114static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
3115                                    struct net_device *netdev)
3116{
3117        struct e1000_adapter *adapter = netdev_priv(netdev);
3118        struct e1000_hw *hw = &adapter->hw;
3119        struct e1000_tx_ring *tx_ring;
3120        unsigned int first, max_per_txd = E1000_MAX_DATA_PER_TXD;
3121        unsigned int max_txd_pwr = E1000_MAX_TXD_PWR;
3122        unsigned int tx_flags = 0;
3123        unsigned int len = skb_headlen(skb);
3124        unsigned int nr_frags;
3125        unsigned int mss;
3126        int count = 0;
3127        int tso;
3128        unsigned int f;
3129        __be16 protocol = vlan_get_protocol(skb);
3130
3131        /* This goes back to the question of how to logically map a Tx queue
3132         * to a flow.  Right now, performance is impacted slightly negatively
3133         * if using multiple Tx queues.  If the stack breaks away from a
3134         * single qdisc implementation, we can look at this again.
3135         */
3136        tx_ring = adapter->tx_ring;
3137
3138        /* On PCI/PCI-X HW, if packet size is less than ETH_ZLEN,
3139         * packets may get corrupted during padding by HW.
3140         * To WA this issue, pad all small packets manually.
3141         */
3142        if (eth_skb_pad(skb))
3143                return NETDEV_TX_OK;
3144
3145        mss = skb_shinfo(skb)->gso_size;
3146        /* The controller does a simple calculation to
3147         * make sure there is enough room in the FIFO before
3148         * initiating the DMA for each buffer.  The calc is:
3149         * 4 = ceil(buffer len/mss).  To make sure we don't
3150         * overrun the FIFO, adjust the max buffer len if mss
3151         * drops.
3152         */
3153        if (mss) {
3154                u8 hdr_len;
3155                max_per_txd = min(mss << 2, max_per_txd);
3156                max_txd_pwr = fls(max_per_txd) - 1;
3157
3158                hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
3159                if (skb->data_len && hdr_len == len) {
3160                        switch (hw->mac_type) {
3161                                unsigned int pull_size;
3162                        case e1000_82544:
3163                                /* Make sure we have room to chop off 4 bytes,
3164                                 * and that the end alignment will work out to
3165                                 * this hardware's requirements
3166                                 * NOTE: this is a TSO only workaround
3167                                 * if end byte alignment not correct move us
3168                                 * into the next dword
3169                                 */
3170                                if ((unsigned long)(skb_tail_pointer(skb) - 1)
3171                                    & 4)
3172                                        break;
3173                                /* fall through */
3174                                pull_size = min((unsigned int)4, skb->data_len);
3175                                if (!__pskb_pull_tail(skb, pull_size)) {
3176                                        e_err(drv, "__pskb_pull_tail "
3177                                              "failed.\n");
3178                                        dev_kfree_skb_any(skb);
3179                                        return NETDEV_TX_OK;
3180                                }
3181                                len = skb_headlen(skb);
3182                                break;
3183                        default:
3184                                /* do nothing */
3185                                break;
3186                        }
3187                }
3188        }
3189
3190        /* reserve a descriptor for the offload context */
3191        if ((mss) || (skb->ip_summed == CHECKSUM_PARTIAL))
3192                count++;
3193        count++;
3194
3195        /* Controller Erratum workaround */
3196        if (!skb->data_len && tx_ring->last_tx_tso && !skb_is_gso(skb))
3197                count++;
3198
3199        count += TXD_USE_COUNT(len, max_txd_pwr);
3200
3201        if (adapter->pcix_82544)
3202                count++;
3203
3204        /* work-around for errata 10 and it applies to all controllers
3205         * in PCI-X mode, so add one more descriptor to the count
3206         */
3207        if (unlikely((hw->bus_type == e1000_bus_type_pcix) &&
3208                        (len > 2015)))
3209                count++;
3210
3211        nr_frags = skb_shinfo(skb)->nr_frags;
3212        for (f = 0; f < nr_frags; f++)
3213                count += TXD_USE_COUNT(skb_frag_size(&skb_shinfo(skb)->frags[f]),
3214                                       max_txd_pwr);
3215        if (adapter->pcix_82544)
3216                count += nr_frags;
3217
3218        /* need: count + 2 desc gap to keep tail from touching
3219         * head, otherwise try next time
3220         */
3221        if (unlikely(e1000_maybe_stop_tx(netdev, tx_ring, count + 2)))
3222                return NETDEV_TX_BUSY;
3223
3224        if (unlikely((hw->mac_type == e1000_82547) &&
3225                     (e1000_82547_fifo_workaround(adapter, skb)))) {
3226                netif_stop_queue(netdev);
3227                if (!test_bit(__E1000_DOWN, &adapter->flags))
3228                        schedule_delayed_work(&adapter->fifo_stall_task, 1);
3229                return NETDEV_TX_BUSY;
3230        }
3231
3232        if (skb_vlan_tag_present(skb)) {
3233                tx_flags |= E1000_TX_FLAGS_VLAN;
3234                tx_flags |= (skb_vlan_tag_get(skb) <<
3235                             E1000_TX_FLAGS_VLAN_SHIFT);
3236        }
3237
3238        first = tx_ring->next_to_use;
3239
3240        tso = e1000_tso(adapter, tx_ring, skb, protocol);
3241        if (tso < 0) {
3242                dev_kfree_skb_any(skb);
3243                return NETDEV_TX_OK;
3244        }
3245
3246        if (likely(tso)) {
3247                if (likely(hw->mac_type != e1000_82544))
3248                        tx_ring->last_tx_tso = true;
3249                tx_flags |= E1000_TX_FLAGS_TSO;
3250        } else if (likely(e1000_tx_csum(adapter, tx_ring, skb, protocol)))
3251                tx_flags |= E1000_TX_FLAGS_CSUM;
3252
3253        if (protocol == htons(ETH_P_IP))
3254                tx_flags |= E1000_TX_FLAGS_IPV4;
3255
3256        if (unlikely(skb->no_fcs))
3257                tx_flags |= E1000_TX_FLAGS_NO_FCS;
3258
3259        count = e1000_tx_map(adapter, tx_ring, skb, first, max_per_txd,
3260                             nr_frags, mss);
3261
3262        if (count) {
3263                /* The descriptors needed is higher than other Intel drivers
3264                 * due to a number of workarounds.  The breakdown is below:
3265                 * Data descriptors: MAX_SKB_FRAGS + 1
3266                 * Context Descriptor: 1
3267                 * Keep head from touching tail: 2
3268                 * Workarounds: 3
3269                 */
3270                int desc_needed = MAX_SKB_FRAGS + 7;
3271
3272                netdev_sent_queue(netdev, skb->len);
3273                skb_tx_timestamp(skb);
3274
3275                e1000_tx_queue(adapter, tx_ring, tx_flags, count);
3276
3277                /* 82544 potentially requires twice as many data descriptors
3278                 * in order to guarantee buffers don't end on evenly-aligned
3279                 * dwords
3280                 */
3281                if (adapter->pcix_82544)
3282                        desc_needed += MAX_SKB_FRAGS + 1;
3283
3284                /* Make sure there is space in the ring for the next send. */
3285                e1000_maybe_stop_tx(netdev, tx_ring, desc_needed);
3286
3287                if (!skb->xmit_more ||
3288                    netif_xmit_stopped(netdev_get_tx_queue(netdev, 0))) {
3289                        writel(tx_ring->next_to_use, hw->hw_addr + tx_ring->tdt);
3290                        /* we need this if more than one processor can write to
3291                         * our tail at a time, it synchronizes IO on IA64/Altix
3292                         * systems
3293                         */
3294                        mmiowb();
3295                }
3296        } else {
3297                dev_kfree_skb_any(skb);
3298                tx_ring->buffer_info[first].time_stamp = 0;
3299                tx_ring->next_to_use = first;
3300        }
3301
3302        return NETDEV_TX_OK;
3303}
3304
3305#define NUM_REGS 38 /* 1 based count */
3306static void e1000_regdump(struct e1000_adapter *adapter)
3307{
3308        struct e1000_hw *hw = &adapter->hw;
3309        u32 regs[NUM_REGS];
3310        u32 *regs_buff = regs;
3311        int i = 0;
3312
3313        static const char * const reg_name[] = {
3314                "CTRL",  "STATUS",
3315                "RCTL", "RDLEN", "RDH", "RDT", "RDTR",
3316                "TCTL", "TDBAL", "TDBAH", "TDLEN", "TDH", "TDT",
3317                "TIDV", "TXDCTL", "TADV", "TARC0",
3318                "TDBAL1", "TDBAH1", "TDLEN1", "TDH1", "TDT1",
3319                "TXDCTL1", "TARC1",
3320                "CTRL_EXT", "ERT", "RDBAL", "RDBAH",
3321                "TDFH", "TDFT", "TDFHS", "TDFTS", "TDFPC",
3322                "RDFH", "RDFT", "RDFHS", "RDFTS", "RDFPC"
3323        };
3324
3325        regs_buff[0]  = er32(CTRL);
3326        regs_buff[1]  = er32(STATUS);
3327
3328        regs_buff[2]  = er32(RCTL);
3329        regs_buff[3]  = er32(RDLEN);
3330        regs_buff[4]  = er32(RDH);
3331        regs_buff[5]  = er32(RDT);
3332        regs_buff[6]  = er32(RDTR);
3333
3334        regs_buff[7]  = er32(TCTL);
3335        regs_buff[8]  = er32(TDBAL);
3336        regs_buff[9]  = er32(TDBAH);
3337        regs_buff[10] = er32(TDLEN);
3338        regs_buff[11] = er32(TDH);
3339        regs_buff[12] = er32(TDT);
3340        regs_buff[13] = er32(TIDV);
3341        regs_buff[14] = er32(TXDCTL);
3342        regs_buff[15] = er32(TADV);
3343        regs_buff[16] = er32(TARC0);
3344
3345        regs_buff[17] = er32(TDBAL1);
3346        regs_buff[18] = er32(TDBAH1);
3347        regs_buff[19] = er32(TDLEN1);
3348        regs_buff[20] = er32(TDH1);
3349        regs_buff[21] = er32(TDT1);
3350        regs_buff[22] = er32(TXDCTL1);
3351        regs_buff[23] = er32(TARC1);
3352        regs_buff[24] = er32(CTRL_EXT);
3353        regs_buff[25] = er32(ERT);
3354        regs_buff[26] = er32(RDBAL0);
3355        regs_buff[27] = er32(RDBAH0);
3356        regs_buff[28] = er32(TDFH);
3357        regs_buff[29] = er32(TDFT);
3358        regs_buff[30] = er32(TDFHS);
3359        regs_buff[31] = er32(TDFTS);
3360        regs_buff[32] = er32(TDFPC);
3361        regs_buff[33] = er32(RDFH);
3362        regs_buff[34] = er32(RDFT);
3363        regs_buff[35] = er32(RDFHS);
3364        regs_buff[36] = er32(RDFTS);
3365        regs_buff[37] = er32(RDFPC);
3366
3367        pr_info("Register dump\n");
3368        for (i = 0; i < NUM_REGS; i++)
3369                pr_info("%-15s  %08x\n", reg_name[i], regs_buff[i]);
3370}
3371
3372/*
3373 * e1000_dump: Print registers, tx ring and rx ring
3374 */
3375static void e1000_dump(struct e1000_adapter *adapter)
3376{
3377        /* this code doesn't handle multiple rings */
3378        struct e1000_tx_ring *tx_ring = adapter->tx_ring;
3379        struct e1000_rx_ring *rx_ring = adapter->rx_ring;
3380        int i;
3381
3382        if (!netif_msg_hw(adapter))
3383                return;
3384
3385        /* Print Registers */
3386        e1000_regdump(adapter);
3387
3388        /* transmit dump */
3389        pr_info("TX Desc ring0 dump\n");
3390
3391        /* Transmit Descriptor Formats - DEXT[29] is 0 (Legacy) or 1 (Extended)
3392         *
3393         * Legacy Transmit Descriptor
3394         *   +--------------------------------------------------------------+
3395         * 0 |         Buffer Address [63:0] (Reserved on Write Back)       |
3396         *   +--------------------------------------------------------------+
3397         * 8 | Special  |    CSS     | Status |  CMD    |  CSO   |  Length  |
3398         *   +--------------------------------------------------------------+
3399         *   63       48 47        36 35    32 31     24 23    16 15        0
3400         *
3401         * Extended Context Descriptor (DTYP=0x0) for TSO or checksum offload
3402         *   63      48 47    40 39       32 31             16 15    8 7      0
3403         *   +----------------------------------------------------------------+
3404         * 0 |  TUCSE  | TUCS0  |   TUCSS   |     IPCSE       | IPCS0 | IPCSS |
3405         *   +----------------------------------------------------------------+
3406         * 8 |   MSS   | HDRLEN | RSV | STA | TUCMD | DTYP |      PAYLEN      |
3407         *   +----------------------------------------------------------------+
3408         *   63      48 47    40 39 36 35 32 31   24 23  20 19                0
3409         *
3410         * Extended Data Descriptor (DTYP=0x1)
3411         *   +----------------------------------------------------------------+
3412         * 0 |                     Buffer Address [63:0]                      |
3413         *   +----------------------------------------------------------------+
3414         * 8 | VLAN tag |  POPTS  | Rsvd | Status | Command | DTYP |  DTALEN  |
3415         *   +----------------------------------------------------------------+
3416         *   63       48 47     40 39  36 35    32 31     24 23  20 19        0
3417         */
3418        pr_info("Tc[desc]     [Ce CoCsIpceCoS] [MssHlRSCm0Plen] [bi->dma       ] leng  ntw timestmp         bi->skb\n");
3419        pr_info("Td[desc]     [address 63:0  ] [VlaPoRSCm1Dlen] [bi->dma       ] leng  ntw timestmp         bi->skb\n");
3420
3421        if (!netif_msg_tx_done(adapter))
3422                goto rx_ring_summary;
3423
3424        for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {
3425                struct e1000_tx_desc *tx_desc = E1000_TX_DESC(*tx_ring, i);
3426                struct e1000_tx_buffer *buffer_info = &tx_ring->buffer_info[i];
3427                struct my_u { __le64 a; __le64 b; };
3428                struct my_u *u = (struct my_u *)tx_desc;
3429                const char *type;
3430
3431                if (i == tx_ring->next_to_use && i == tx_ring->next_to_clean)
3432                        type = "NTC/U";
3433                else if (i == tx_ring->next_to_use)
3434                        type = "NTU";
3435                else if (i == tx_ring->next_to_clean)
3436                        type = "NTC";
3437                else
3438                        type = "";
3439
3440                pr_info("T%c[0x%03X]    %016llX %016llX %016llX %04X  %3X %016llX %p %s\n",
3441                        ((le64_to_cpu(u->b) & (1<<20)) ? 'd' : 'c'), i,
3442                        le64_to_cpu(u->a), le64_to_cpu(u->b),
3443                        (u64)buffer_info->dma, buffer_info->length,
3444                        buffer_info->next_to_watch,
3445                        (u64)buffer_info->time_stamp, buffer_info->skb, type);
3446        }
3447
3448rx_ring_summary:
3449        /* receive dump */
3450        pr_info("\nRX Desc ring dump\n");
3451
3452        /* Legacy Receive Descriptor Format
3453         *
3454         * +-----------------------------------------------------+
3455         * |                Buffer Address [63:0]                |
3456         * +-----------------------------------------------------+
3457         * | VLAN Tag | Errors | Status 0 | Packet csum | Length |
3458         * +-----------------------------------------------------+
3459         * 63       48 47    40 39      32 31         16 15      0
3460         */
3461        pr_info("R[desc]      [address 63:0  ] [vl er S cks ln] [bi->dma       ] [bi->skb]\n");
3462
3463        if (!netif_msg_rx_status(adapter))
3464                goto exit;
3465
3466        for (i = 0; rx_ring->desc && (i < rx_ring->count); i++) {
3467                struct e1000_rx_desc *rx_desc = E1000_RX_DESC(*rx_ring, i);
3468                struct e1000_rx_buffer *buffer_info = &rx_ring->buffer_info[i];
3469                struct my_u { __le64 a; __le64 b; };
3470                struct my_u *u = (struct my_u *)rx_desc;
3471                const char *type;
3472
3473                if (i == rx_ring->next_to_use)
3474                        type = "NTU";
3475                else if (i == rx_ring->next_to_clean)
3476                        type = "NTC";
3477                else
3478                        type = "";
3479
3480                pr_info("R[0x%03X]     %016llX %016llX %016llX %p %s\n",
3481                        i, le64_to_cpu(u->a), le64_to_cpu(u->b),
3482                        (u64)buffer_info->dma, buffer_info->rxbuf.data, type);
3483        } /* for */
3484
3485        /* dump the descriptor caches */
3486        /* rx */
3487        pr_info("Rx descriptor cache in 64bit format\n");
3488        for (i = 0x6000; i <= 0x63FF ; i += 0x10) {
3489                pr_info("R%04X: %08X|%08X %08X|%08X\n",
3490                        i,
3491                        readl(adapter->hw.hw_addr + i+4),
3492                        readl(adapter->hw.hw_addr + i),
3493                        readl(adapter->hw.hw_addr + i+12),
3494                        readl(adapter->hw.hw_addr + i+8));
3495        }
3496        /* tx */
3497        pr_info("Tx descriptor cache in 64bit format\n");
3498        for (i = 0x7000; i <= 0x73FF ; i += 0x10) {
3499                pr_info("T%04X: %08X|%08X %08X|%08X\n",
3500                        i,
3501                        readl(adapter->hw.hw_addr + i+4),
3502                        readl(adapter->hw.hw_addr + i),
3503                        readl(adapter->hw.hw_addr + i+12),
3504                        readl(adapter->hw.hw_addr + i+8));
3505        }
3506exit:
3507        return;
3508}
3509
3510/**
3511 * e1000_tx_timeout - Respond to a Tx Hang
3512 * @netdev: network interface device structure
3513 **/
3514static void e1000_tx_timeout(struct net_device *netdev)
3515{
3516        struct e1000_adapter *adapter = netdev_priv(netdev);
3517
3518        /* Do the reset outside of interrupt context */
3519        adapter->tx_timeout_count++;
3520        schedule_work(&adapter->reset_task);
3521}
3522
3523static void e1000_reset_task(struct work_struct *work)
3524{
3525        struct e1000_adapter *adapter =
3526                container_of(work, struct e1000_adapter, reset_task);
3527
3528        e_err(drv, "Reset adapter\n");
3529        e1000_reinit_locked(adapter);
3530}
3531
3532/**
3533 * e1000_get_stats - Get System Network Statistics
3534 * @netdev: network interface device structure
3535 *
3536 * Returns the address of the device statistics structure.
3537 * The statistics are actually updated from the watchdog.
3538 **/
3539static struct net_device_stats *e1000_get_stats(struct net_device *netdev)
3540{
3541        /* only return the current stats */
3542        return &netdev->stats;
3543}
3544
3545/**
3546 * e1000_change_mtu - Change the Maximum Transfer Unit
3547 * @netdev: network interface device structure
3548 * @new_mtu: new value for maximum frame size
3549 *
3550 * Returns 0 on success, negative on failure
3551 **/
3552static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
3553{
3554        struct e1000_adapter *adapter = netdev_priv(netdev);
3555        struct e1000_hw *hw = &adapter->hw;
3556        int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
3557
3558        /* Adapter-specific max frame size limits. */
3559        switch (hw->mac_type) {
3560        case e1000_undefined ... e1000_82542_rev2_1:
3561                if (max_frame > (ETH_FRAME_LEN + ETH_FCS_LEN)) {
3562                        e_err(probe, "Jumbo Frames not supported.\n");
3563                        return -EINVAL;
3564                }
3565                break;
3566        default:
3567                /* Capable of supporting up to MAX_JUMBO_FRAME_SIZE limit. */
3568                break;
3569        }
3570
3571        while (test_and_set_bit(__E1000_RESETTING, &adapter->flags))
3572                msleep(1);
3573        /* e1000_down has a dependency on max_frame_size */
3574        hw->max_frame_size = max_frame;
3575        if (netif_running(netdev)) {
3576                /* prevent buffers from being reallocated */
3577                adapter->alloc_rx_buf = e1000_alloc_dummy_rx_buffers;
3578                e1000_down(adapter);
3579        }
3580
3581        /* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN
3582         * means we reserve 2 more, this pushes us to allocate from the next
3583         * larger slab size.
3584         * i.e. RXBUFFER_2048 --> size-4096 slab
3585         * however with the new *_jumbo_rx* routines, jumbo receives will use
3586         * fragmented skbs
3587         */
3588
3589        if (max_frame <= E1000_RXBUFFER_2048)
3590                adapter->rx_buffer_len = E1000_RXBUFFER_2048;
3591        else
3592#if (PAGE_SIZE >= E1000_RXBUFFER_16384)
3593                adapter->rx_buffer_len = E1000_RXBUFFER_16384;
3594#elif (PAGE_SIZE >= E1000_RXBUFFER_4096)
3595                adapter->rx_buffer_len = PAGE_SIZE;
3596#endif
3597
3598        /* adjust allocation if LPE protects us, and we aren't using SBP */
3599        if (!hw->tbi_compatibility_on &&
3600            ((max_frame == (ETH_FRAME_LEN + ETH_FCS_LEN)) ||
3601             (max_frame == MAXIMUM_ETHERNET_VLAN_SIZE)))
3602                adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
3603
3604        pr_info("%s changing MTU from %d to %d\n",
3605                netdev->name, netdev->mtu, new_mtu);
3606        netdev->mtu = new_mtu;
3607
3608        if (netif_running(netdev))
3609                e1000_up(adapter);
3610        else
3611                e1000_reset(adapter);
3612
3613        clear_bit(__E1000_RESETTING, &adapter->flags);
3614
3615        return 0;
3616}
3617
3618/**
3619 * e1000_update_stats - Update the board statistics counters
3620 * @adapter: board private structure
3621 **/
3622void e1000_update_stats(struct e1000_adapter *adapter)
3623{
3624        struct net_device *netdev = adapter->netdev;
3625        struct e1000_hw *hw = &adapter->hw;
3626        struct pci_dev *pdev = adapter->pdev;
3627        unsigned long flags;
3628        u16 phy_tmp;
3629
3630#define PHY_IDLE_ERROR_COUNT_MASK 0x00FF
3631
3632        /* Prevent stats update while adapter is being reset, or if the pci
3633         * connection is down.
3634         */
3635        if (adapter->link_speed == 0)
3636                return;
3637        if (pci_channel_offline(pdev))
3638                return;
3639
3640        spin_lock_irqsave(&adapter->stats_lock, flags);
3641
3642        /* these counters are modified from e1000_tbi_adjust_stats,
3643         * called from the interrupt context, so they must only
3644         * be written while holding adapter->stats_lock
3645         */
3646
3647        adapter->stats.crcerrs += er32(CRCERRS);
3648        adapter->stats.gprc += er32(GPRC);
3649        adapter->stats.gorcl += er32(GORCL);
3650        adapter->stats.gorch += er32(GORCH);
3651        adapter->stats.bprc += er32(BPRC);
3652        adapter->stats.mprc += er32(MPRC);
3653        adapter->stats.roc += er32(ROC);
3654
3655        adapter->stats.prc64 += er32(PRC64);
3656        adapter->stats.prc127 += er32(PRC127);
3657        adapter->stats.prc255 += er32(PRC255);
3658        adapter->stats.prc511 += er32(PRC511);
3659        adapter->stats.prc1023 += er32(PRC1023);
3660        adapter->stats.prc1522 += er32(PRC1522);
3661
3662        adapter->stats.symerrs += er32(SYMERRS);
3663        adapter->stats.mpc += er32(MPC);
3664        adapter->stats.scc += er32(SCC);
3665        adapter->stats.ecol += er32(ECOL);
3666        adapter->stats.mcc += er32(MCC);
3667        adapter->stats.latecol += er32(LATECOL);
3668        adapter->stats.dc += er32(DC);
3669        adapter->stats.sec += er32(SEC);
3670        adapter->stats.rlec += er32(RLEC);
3671        adapter->stats.xonrxc += er32(XONRXC);
3672        adapter->stats.xontxc += er32(XONTXC);
3673        adapter->stats.xoffrxc += er32(XOFFRXC);
3674        adapter->stats.xofftxc += er32(XOFFTXC);
3675        adapter->stats.fcruc += er32(FCRUC);
3676        adapter->stats.gptc += er32(GPTC);
3677        adapter->stats.gotcl += er32(GOTCL);
3678        adapter->stats.gotch += er32(GOTCH);
3679        adapter->stats.rnbc += er32(RNBC);
3680        adapter->stats.ruc += er32(RUC);
3681        adapter->stats.rfc += er32(RFC);
3682        adapter->stats.rjc += er32(RJC);
3683        adapter->stats.torl += er32(TORL);
3684        adapter->stats.torh += er32(TORH);
3685        adapter->stats.totl += er32(TOTL);
3686        adapter->stats.toth += er32(TOTH);
3687        adapter->stats.tpr += er32(TPR);
3688
3689        adapter->stats.ptc64 += er32(PTC64);
3690        adapter->stats.ptc127 += er32(PTC127);
3691        adapter->stats.ptc255 += er32(PTC255);
3692        adapter->stats.ptc511 += er32(PTC511);
3693        adapter->stats.ptc1023 += er32(PTC1023);
3694        adapter->stats.ptc1522 += er32(PTC1522);
3695
3696        adapter->stats.mptc += er32(MPTC);
3697        adapter->stats.bptc += er32(BPTC);
3698
3699        /* used for adaptive IFS */
3700
3701        hw->tx_packet_delta = er32(TPT);
3702        adapter->stats.tpt += hw->tx_packet_delta;
3703        hw->collision_delta = er32(COLC);
3704        adapter->stats.colc += hw->collision_delta;
3705
3706        if (hw->mac_type >= e1000_82543) {
3707                adapter->stats.algnerrc += er32(ALGNERRC);
3708                adapter->stats.rxerrc += er32(RXERRC);
3709                adapter->stats.tncrs += er32(TNCRS);
3710                adapter->stats.cexterr += er32(CEXTERR);
3711                adapter->stats.tsctc += er32(TSCTC);
3712                adapter->stats.tsctfc += er32(TSCTFC);
3713        }
3714
3715        /* Fill out the OS statistics structure */
3716        netdev->stats.multicast = adapter->stats.mprc;
3717        netdev->stats.collisions = adapter->stats.colc;
3718
3719        /* Rx Errors */
3720
3721        /* RLEC on some newer hardware can be incorrect so build
3722         * our own version based on RUC and ROC
3723         */
3724        netdev->stats.rx_errors = adapter->stats.rxerrc +
3725                adapter->stats.crcerrs + adapter->stats.algnerrc +
3726                adapter->stats.ruc + adapter->stats.roc +
3727                adapter->stats.cexterr;
3728        adapter->stats.rlerrc = adapter->stats.ruc + adapter->stats.roc;
3729        netdev->stats.rx_length_errors = adapter->stats.rlerrc;
3730        netdev->stats.rx_crc_errors = adapter->stats.crcerrs;
3731        netdev->stats.rx_frame_errors = adapter->stats.algnerrc;
3732        netdev->stats.rx_missed_errors = adapter->stats.mpc;
3733
3734        /* Tx Errors */
3735        adapter->stats.txerrc = adapter->stats.ecol + adapter->stats.latecol;
3736        netdev->stats.tx_errors = adapter->stats.txerrc;
3737        netdev->stats.tx_aborted_errors = adapter->stats.ecol;
3738        netdev->stats.tx_window_errors = adapter->stats.latecol;
3739        netdev->stats.tx_carrier_errors = adapter->stats.tncrs;
3740        if (hw->bad_tx_carr_stats_fd &&
3741            adapter->link_duplex == FULL_DUPLEX) {
3742                netdev->stats.tx_carrier_errors = 0;
3743                adapter->stats.tncrs = 0;
3744        }
3745
3746        /* Tx Dropped needs to be maintained elsewhere */
3747
3748        /* Phy Stats */
3749        if (hw->media_type == e1000_media_type_copper) {
3750                if ((adapter->link_speed == SPEED_1000) &&
3751                   (!e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_tmp))) {
3752                        phy_tmp &= PHY_IDLE_ERROR_COUNT_MASK;
3753                        adapter->phy_stats.idle_errors += phy_tmp;
3754                }
3755
3756                if ((hw->mac_type <= e1000_82546) &&
3757                   (hw->phy_type == e1000_phy_m88) &&
3758                   !e1000_read_phy_reg(hw, M88E1000_RX_ERR_CNTR, &phy_tmp))
3759                        adapter->phy_stats.receive_errors += phy_tmp;
3760        }
3761
3762        /* Management Stats */
3763        if (hw->has_smbus) {
3764                adapter->stats.mgptc += er32(MGTPTC);
3765                adapter->stats.mgprc += er32(MGTPRC);
3766                adapter->stats.mgpdc += er32(MGTPDC);
3767        }
3768
3769        spin_unlock_irqrestore(&adapter->stats_lock, flags);
3770}
3771
3772/**
3773 * e1000_intr - Interrupt Handler
3774 * @irq: interrupt number
3775 * @data: pointer to a network interface device structure
3776 **/
3777static irqreturn_t e1000_intr(int irq, void *data)
3778{
3779        struct net_device *netdev = data;
3780        struct e1000_adapter *adapter = netdev_priv(netdev);
3781        struct e1000_hw *hw = &adapter->hw;
3782        u32 icr = er32(ICR);
3783
3784        if (unlikely((!icr)))
3785                return IRQ_NONE;  /* Not our interrupt */
3786
3787        /* we might have caused the interrupt, but the above
3788         * read cleared it, and just in case the driver is
3789         * down there is nothing to do so return handled
3790         */
3791        if (unlikely(test_bit(__E1000_DOWN, &adapter->flags)))
3792                return IRQ_HANDLED;
3793
3794        if (unlikely(icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC))) {
3795                hw->get_link_status = 1;
3796                /* guard against interrupt when we're going down */
3797                if (!test_bit(__E1000_DOWN, &adapter->flags))
3798                        schedule_delayed_work(&adapter->watchdog_task, 1);
3799        }
3800
3801        /* disable interrupts, without the synchronize_irq bit */
3802        ew32(IMC, ~0);
3803        E1000_WRITE_FLUSH();
3804
3805        if (likely(napi_schedule_prep(&adapter->napi))) {
3806                adapter->total_tx_bytes = 0;
3807                adapter->total_tx_packets = 0;
3808                adapter->total_rx_bytes = 0;
3809                adapter->total_rx_packets = 0;
3810                __napi_schedule(&adapter->napi);
3811        } else {
3812                /* this really should not happen! if it does it is basically a
3813                 * bug, but not a hard error, so enable ints and continue
3814                 */
3815                if (!test_bit(__E1000_DOWN, &adapter->flags))
3816                        e1000_irq_enable(adapter);
3817        }
3818
3819        return IRQ_HANDLED;
3820}
3821
3822/**
3823 * e1000_clean - NAPI Rx polling callback
3824 * @adapter: board private structure
3825 **/
3826static int e1000_clean(struct napi_struct *napi, int budget)
3827{
3828        struct e1000_adapter *adapter = container_of(napi, struct e1000_adapter,
3829                                                     napi);
3830        int tx_clean_complete = 0, work_done = 0;
3831
3832        tx_clean_complete = e1000_clean_tx_irq(adapter, &adapter->tx_ring[0]);
3833
3834        adapter->clean_rx(adapter, &adapter->rx_ring[0], &work_done, budget);
3835
3836        if (!tx_clean_complete)
3837                work_done = budget;
3838
3839        /* If budget not fully consumed, exit the polling mode */
3840        if (work_done < budget) {
3841                if (likely(adapter->itr_setting & 3))
3842                        e1000_set_itr(adapter);
3843                napi_complete_done(napi, work_done);
3844                if (!test_bit(__E1000_DOWN, &adapter->flags))
3845                        e1000_irq_enable(adapter);
3846        }
3847
3848        return work_done;
3849}
3850
3851/**
3852 * e1000_clean_tx_irq - Reclaim resources after transmit completes
3853 * @adapter: board private structure
3854 **/
3855static bool e1000_clean_tx_irq(struct e1000_adapter *adapter,
3856                               struct e1000_tx_ring *tx_ring)
3857{
3858        struct e1000_hw *hw = &adapter->hw;
3859        struct net_device *netdev = adapter->netdev;
3860        struct e1000_tx_desc *tx_desc, *eop_desc;
3861        struct e1000_tx_buffer *buffer_info;
3862        unsigned int i, eop;
3863        unsigned int count = 0;
3864        unsigned int total_tx_bytes = 0, total_tx_packets = 0;
3865        unsigned int bytes_compl = 0, pkts_compl = 0;
3866
3867        i = tx_ring->next_to_clean;
3868        eop = tx_ring->buffer_info[i].next_to_watch;
3869        eop_desc = E1000_TX_DESC(*tx_ring, eop);
3870
3871        while ((eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) &&
3872               (count < tx_ring->count)) {
3873                bool cleaned = false;
3874                dma_rmb();      /* read buffer_info after eop_desc */
3875                for ( ; !cleaned; count++) {
3876                        tx_desc = E1000_TX_DESC(*tx_ring, i);
3877                        buffer_info = &tx_ring->buffer_info[i];
3878                        cleaned = (i == eop);
3879
3880                        if (cleaned) {
3881                                total_tx_packets += buffer_info->segs;
3882                                total_tx_bytes += buffer_info->bytecount;
3883                                if (buffer_info->skb) {
3884                                        bytes_compl += buffer_info->skb->len;
3885                                        pkts_compl++;
3886                                }
3887
3888                        }
3889                        e1000_unmap_and_free_tx_resource(adapter, buffer_info);
3890                        tx_desc->upper.data = 0;
3891
3892                        if (unlikely(++i == tx_ring->count))
3893                                i = 0;
3894                }
3895
3896                eop = tx_ring->buffer_info[i].next_to_watch;
3897                eop_desc = E1000_TX_DESC(*tx_ring, eop);
3898        }
3899
3900        /* Synchronize with E1000_DESC_UNUSED called from e1000_xmit_frame,
3901         * which will reuse the cleaned buffers.
3902         */
3903        smp_store_release(&tx_ring->next_to_clean, i);
3904
3905        netdev_completed_queue(netdev, pkts_compl, bytes_compl);
3906
3907#define TX_WAKE_THRESHOLD 32
3908        if (unlikely(count && netif_carrier_ok(netdev) &&
3909                     E1000_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD)) {
3910                /* Make sure that anybody stopping the queue after this
3911                 * sees the new next_to_clean.
3912                 */
3913                smp_mb();
3914
3915                if (netif_queue_stopped(netdev) &&
3916                    !(test_bit(__E1000_DOWN, &adapter->flags))) {
3917                        netif_wake_queue(netdev);
3918                        ++adapter->restart_queue;
3919                }
3920        }
3921
3922        if (adapter->detect_tx_hung) {
3923                /* Detect a transmit hang in hardware, this serializes the
3924                 * check with the clearing of time_stamp and movement of i
3925                 */
3926                adapter->detect_tx_hung = false;
3927                if (tx_ring->buffer_info[eop].time_stamp &&
3928                    time_after(jiffies, tx_ring->buffer_info[eop].time_stamp +
3929                               (adapter->tx_timeout_factor * HZ)) &&
3930                    !(er32(STATUS) & E1000_STATUS_TXOFF)) {
3931
3932                        /* detected Tx unit hang */
3933                        e_err(drv, "Detected Tx Unit Hang\n"
3934                              "  Tx Queue             <%lu>\n"
3935                              "  TDH                  <%x>\n"
3936                              "  TDT                  <%x>\n"
3937                              "  next_to_use          <%x>\n"
3938                              "  next_to_clean        <%x>\n"
3939                              "buffer_info[next_to_clean]\n"
3940                              "  time_stamp           <%lx>\n"
3941                              "  next_to_watch        <%x>\n"
3942                              "  jiffies              <%lx>\n"
3943                              "  next_to_watch.status <%x>\n",
3944                                (unsigned long)(tx_ring - adapter->tx_ring),
3945                                readl(hw->hw_addr + tx_ring->tdh),
3946                                readl(hw->hw_addr + tx_ring->tdt),
3947                                tx_ring->next_to_use,
3948                                tx_ring->next_to_clean,
3949                                tx_ring->buffer_info[eop].time_stamp,
3950                                eop,
3951                                jiffies,
3952                                eop_desc->upper.fields.status);
3953                        e1000_dump(adapter);
3954                        netif_stop_queue(netdev);
3955                }
3956        }
3957        adapter->total_tx_bytes += total_tx_bytes;
3958        adapter->total_tx_packets += total_tx_packets;
3959        netdev->stats.tx_bytes += total_tx_bytes;
3960        netdev->stats.tx_packets += total_tx_packets;
3961        return count < tx_ring->count;
3962}
3963
3964/**
3965 * e1000_rx_checksum - Receive Checksum Offload for 82543
3966 * @adapter:     board private structure
3967 * @status_err:  receive descriptor status and error fields
3968 * @csum:        receive descriptor csum field
3969 * @sk_buff:     socket buffer with received data
3970 **/
3971static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err,
3972                              u32 csum, struct sk_buff *skb)
3973{
3974        struct e1000_hw *hw = &adapter->hw;
3975        u16 status = (u16)status_err;
3976        u8 errors = (u8)(status_err >> 24);
3977
3978        skb_checksum_none_assert(skb);
3979
3980        /* 82543 or newer only */
3981        if (unlikely(hw->mac_type < e1000_82543))
3982                return;
3983        /* Ignore Checksum bit is set */
3984        if (unlikely(status & E1000_RXD_STAT_IXSM))
3985                return;
3986        /* TCP/UDP checksum error bit is set */
3987        if (unlikely(errors & E1000_RXD_ERR_TCPE)) {
3988                /* let the stack verify checksum errors */
3989                adapter->hw_csum_err++;
3990                return;
3991        }
3992        /* TCP/UDP Checksum has not been calculated */
3993        if (!(status & E1000_RXD_STAT_TCPCS))
3994                return;
3995
3996        /* It must be a TCP or UDP packet with a valid checksum */
3997        if (likely(status & E1000_RXD_STAT_TCPCS)) {
3998                /* TCP checksum is good */
3999                skb->ip_summed = CHECKSUM_UNNECESSARY;
4000        }
4001        adapter->hw_csum_good++;
4002}
4003
4004/**
4005 * e1000_consume_page - helper function for jumbo Rx path
4006 **/
4007static void e1000_consume_page(struct e1000_rx_buffer *bi, struct sk_buff *skb,
4008                               u16 length)
4009{
4010        bi->rxbuf.page = NULL;
4011        skb->len += length;
4012        skb->data_len += length;
4013        skb->truesize += PAGE_SIZE;
4014}
4015
4016/**
4017 * e1000_receive_skb - helper function to handle rx indications
4018 * @adapter: board private structure
4019 * @status: descriptor status field as written by hardware
4020 * @vlan: descriptor vlan field as written by hardware (no le/be conversion)
4021 * @skb: pointer to sk_buff to be indicated to stack
4022 */
4023static void e1000_receive_skb(struct e1000_adapter *adapter, u8 status,
4024                              __le16 vlan, struct sk_buff *skb)
4025{
4026        skb->protocol = eth_type_trans(skb, adapter->netdev);
4027
4028        if (status & E1000_RXD_STAT_VP) {
4029                u16 vid = le16_to_cpu(vlan) & E1000_RXD_SPC_VLAN_MASK;
4030
4031                __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
4032        }
4033        napi_gro_receive(&adapter->napi, skb);
4034}
4035
4036/**
4037 * e1000_tbi_adjust_stats
4038 * @hw: Struct containing variables accessed by shared code
4039 * @frame_len: The length of the frame in question
4040 * @mac_addr: The Ethernet destination address of the frame in question
4041 *
4042 * Adjusts the statistic counters when a frame is accepted by TBI_ACCEPT
4043 */
4044static void e1000_tbi_adjust_stats(struct e1000_hw *hw,
4045                                   struct e1000_hw_stats *stats,
4046                                   u32 frame_len, const u8 *mac_addr)
4047{
4048        u64 carry_bit;
4049
4050        /* First adjust the frame length. */
4051        frame_len--;
4052        /* We need to adjust the statistics counters, since the hardware
4053         * counters overcount this packet as a CRC error and undercount
4054         * the packet as a good packet
4055         */
4056        /* This packet should not be counted as a CRC error. */
4057        stats->crcerrs--;
4058        /* This packet does count as a Good Packet Received. */
4059        stats->gprc++;
4060
4061        /* Adjust the Good Octets received counters */
4062        carry_bit = 0x80000000 & stats->gorcl;
4063        stats->gorcl += frame_len;
4064        /* If the high bit of Gorcl (the low 32 bits of the Good Octets
4065         * Received Count) was one before the addition,
4066         * AND it is zero after, then we lost the carry out,
4067         * need to add one to Gorch (Good Octets Received Count High).
4068         * This could be simplified if all environments supported
4069         * 64-bit integers.
4070         */
4071        if (carry_bit && ((stats->gorcl & 0x80000000) == 0))
4072                stats->gorch++;
4073        /* Is this a broadcast or multicast?  Check broadcast first,
4074         * since the test for a multicast frame will test positive on
4075         * a broadcast frame.
4076         */
4077        if (is_broadcast_ether_addr(mac_addr))
4078                stats->bprc++;
4079        else if (is_multicast_ether_addr(mac_addr))
4080                stats->mprc++;
4081
4082        if (frame_len == hw->max_frame_size) {
4083                /* In this case, the hardware has overcounted the number of
4084                 * oversize frames.
4085                 */
4086                if (stats->roc > 0)
4087                        stats->roc--;
4088        }
4089
4090        /* Adjust the bin counters when the extra byte put the frame in the
4091         * wrong bin. Remember that the frame_len was adjusted above.
4092         */
4093        if (frame_len == 64) {
4094                stats->prc64++;
4095                stats->prc127--;
4096        } else if (frame_len == 127) {
4097                stats->prc127++;
4098                stats->prc255--;
4099        } else if (frame_len == 255) {
4100                stats->prc255++;
4101                stats->prc511--;
4102        } else if (frame_len == 511) {
4103                stats->prc511++;
4104                stats->prc1023--;
4105        } else if (frame_len == 1023) {
4106                stats->prc1023++;
4107                stats->prc1522--;
4108        } else if (frame_len == 1522) {
4109                stats->prc1522++;
4110        }
4111}
4112
4113static bool e1000_tbi_should_accept(struct e1000_adapter *adapter,
4114                                    u8 status, u8 errors,
4115                                    u32 length, const u8 *data)
4116{
4117        struct e1000_hw *hw = &adapter->hw;
4118        u8 last_byte = *(data + length - 1);
4119
4120        if (TBI_ACCEPT(hw, status, errors, length, last_byte)) {
4121                unsigned long irq_flags;
4122
4123                spin_lock_irqsave(&adapter->stats_lock, irq_flags);
4124                e1000_tbi_adjust_stats(hw, &adapter->stats, length, data);
4125                spin_unlock_irqrestore(&adapter->stats_lock, irq_flags);
4126
4127                return true;
4128        }
4129
4130        return false;
4131}
4132
4133static struct sk_buff *e1000_alloc_rx_skb(struct e1000_adapter *adapter,
4134                                          unsigned int bufsz)
4135{
4136        struct sk_buff *skb = napi_alloc_skb(&adapter->napi, bufsz);
4137
4138        if (unlikely(!skb))
4139                adapter->alloc_rx_buff_failed++;
4140        return skb;
4141}
4142
4143/**
4144 * e1000_clean_jumbo_rx_irq - Send received data up the network stack; legacy
4145 * @adapter: board private structure
4146 * @rx_ring: ring to clean
4147 * @work_done: amount of napi work completed this call
4148 * @work_to_do: max amount of work allowed for this call to do
4149 *
4150 * the return value indicates whether actual cleaning was done, there
4151 * is no guarantee that everything was cleaned
4152 */
4153static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
4154                                     struct e1000_rx_ring *rx_ring,
4155                                     int *work_done, int work_to_do)
4156{
4157        struct net_device *netdev = adapter->netdev;
4158        struct pci_dev *pdev = adapter->pdev;
4159        struct e1000_rx_desc *rx_desc, *next_rxd;
4160        struct e1000_rx_buffer *buffer_info, *next_buffer;
4161        u32 length;
4162        unsigned int i;
4163        int cleaned_count = 0;
4164        bool cleaned = false;
4165        unsigned int total_rx_bytes = 0, total_rx_packets = 0;
4166
4167        i = rx_ring->next_to_clean;
4168        rx_desc = E1000_RX_DESC(*rx_ring, i);
4169        buffer_info = &rx_ring->buffer_info[i];
4170
4171        while (rx_desc->status & E1000_RXD_STAT_DD) {
4172                struct sk_buff *skb;
4173                u8 status;
4174
4175                if (*work_done >= work_to_do)
4176                        break;
4177                (*work_done)++;
4178                dma_rmb(); /* read descriptor and rx_buffer_info after status DD */
4179
4180                status = rx_desc->status;
4181
4182                if (++i == rx_ring->count)
4183                        i = 0;
4184
4185                next_rxd = E1000_RX_DESC(*rx_ring, i);
4186                prefetch(next_rxd);
4187
4188                next_buffer = &rx_ring->buffer_info[i];
4189
4190                cleaned = true;
4191                cleaned_count++;
4192                dma_unmap_page(&pdev->dev, buffer_info->dma,
4193                               adapter->rx_buffer_len, DMA_FROM_DEVICE);
4194                buffer_info->dma = 0;
4195
4196                length = le16_to_cpu(rx_desc->length);
4197
4198                /* errors is only valid for DD + EOP descriptors */
4199                if (unlikely((status & E1000_RXD_STAT_EOP) &&
4200                    (rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK))) {
4201                        u8 *mapped = page_address(buffer_info->rxbuf.page);
4202
4203                        if (e1000_tbi_should_accept(adapter, status,
4204                                                    rx_desc->errors,
4205                                                    length, mapped)) {
4206                                length--;
4207                        } else if (netdev->features & NETIF_F_RXALL) {
4208                                goto process_skb;
4209                        } else {
4210                                /* an error means any chain goes out the window
4211                                 * too
4212                                 */
4213                                if (rx_ring->rx_skb_top)
4214                                        dev_kfree_skb(rx_ring->rx_skb_top);
4215                                rx_ring->rx_skb_top = NULL;
4216                                goto next_desc;
4217                        }
4218                }
4219
4220#define rxtop rx_ring->rx_skb_top
4221process_skb:
4222                if (!(status & E1000_RXD_STAT_EOP)) {
4223                        /* this descriptor is only the beginning (or middle) */
4224                        if (!rxtop) {
4225                                /* this is the beginning of a chain */
4226                                rxtop = napi_get_frags(&adapter->napi);
4227                                if (!rxtop)
4228                                        break;
4229
4230                                skb_fill_page_desc(rxtop, 0,
4231                                                   buffer_info->rxbuf.page,
4232                                                   0, length);
4233                        } else {
4234                                /* this is the middle of a chain */
4235                                skb_fill_page_desc(rxtop,
4236                                    skb_shinfo(rxtop)->nr_frags,
4237                                    buffer_info->rxbuf.page, 0, length);
4238                        }
4239                        e1000_consume_page(buffer_info, rxtop, length);
4240                        goto next_desc;
4241                } else {
4242                        if (rxtop) {
4243                                /* end of the chain */
4244                                skb_fill_page_desc(rxtop,
4245                                    skb_shinfo(rxtop)->nr_frags,
4246                                    buffer_info->rxbuf.page, 0, length);
4247                                skb = rxtop;
4248                                rxtop = NULL;
4249                                e1000_consume_page(buffer_info, skb, length);
4250                        } else {
4251                                struct page *p;
4252                                /* no chain, got EOP, this buf is the packet
4253                                 * copybreak to save the put_page/alloc_page
4254                                 */
4255                                p = buffer_info->rxbuf.page;
4256                                if (length <= copybreak) {
4257                                        u8 *vaddr;
4258
4259                                        if (likely(!(netdev->features & NETIF_F_RXFCS)))
4260                                                length -= 4;
4261                                        skb = e1000_alloc_rx_skb(adapter,
4262                                                                 length);
4263                                        if (!skb)
4264                                                break;
4265
4266                                        vaddr = kmap_atomic(p);
4267                                        memcpy(skb_tail_pointer(skb), vaddr,
4268                                               length);
4269                                        kunmap_atomic(vaddr);
4270                                        /* re-use the page, so don't erase
4271                                         * buffer_info->rxbuf.page
4272                                         */
4273                                        skb_put(skb, length);
4274                                        e1000_rx_checksum(adapter,
4275                                                          status | rx_desc->errors << 24,
4276                                                          le16_to_cpu(rx_desc->csum), skb);
4277
4278                                        total_rx_bytes += skb->len;
4279                                        total_rx_packets++;
4280
4281                                        e1000_receive_skb(adapter, status,
4282                                                          rx_desc->special, skb);
4283                                        goto next_desc;
4284                                } else {
4285                                        skb = napi_get_frags(&adapter->napi);
4286                                        if (!skb) {
4287                                                adapter->alloc_rx_buff_failed++;
4288                                                break;
4289                                        }
4290                                        skb_fill_page_desc(skb, 0, p, 0,
4291                                                           length);
4292                                        e1000_consume_page(buffer_info, skb,
4293                                                           length);
4294                                }
4295                        }
4296                }
4297
4298                /* Receive Checksum Offload XXX recompute due to CRC strip? */
4299                e1000_rx_checksum(adapter,
4300                                  (u32)(status) |
4301                                  ((u32)(rx_desc->errors) << 24),
4302                                  le16_to_cpu(rx_desc->csum), skb);
4303
4304                total_rx_bytes += (skb->len - 4); /* don't count FCS */
4305                if (likely(!(netdev->features & NETIF_F_RXFCS)))
4306                        pskb_trim(skb, skb->len - 4);
4307                total_rx_packets++;
4308
4309                if (status & E1000_RXD_STAT_VP) {
4310                        __le16 vlan = rx_desc->special;
4311                        u16 vid = le16_to_cpu(vlan) & E1000_RXD_SPC_VLAN_MASK;
4312
4313                        __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
4314                }
4315
4316                napi_gro_frags(&adapter->napi);
4317
4318next_desc:
4319                rx_desc->status = 0;
4320
4321                /* return some buffers to hardware, one at a time is too slow */
4322                if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) {
4323                        adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
4324                        cleaned_count = 0;
4325                }
4326
4327                /* use prefetched values */
4328                rx_desc = next_rxd;
4329                buffer_info = next_buffer;
4330        }
4331        rx_ring->next_to_clean = i;
4332
4333        cleaned_count = E1000_DESC_UNUSED(rx_ring);
4334        if (cleaned_count)
4335                adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
4336
4337        adapter->total_rx_packets += total_rx_packets;
4338        adapter->total_rx_bytes += total_rx_bytes;
4339        netdev->stats.rx_bytes += total_rx_bytes;
4340        netdev->stats.rx_packets += total_rx_packets;
4341        return cleaned;
4342}
4343
4344/* this should improve performance for small packets with large amounts
4345 * of reassembly being done in the stack
4346 */
4347static struct sk_buff *e1000_copybreak(struct e1000_adapter *adapter,
4348                                       struct e1000_rx_buffer *buffer_info,
4349                                       u32 length, const void *data)
4350{
4351        struct sk_buff *skb;
4352
4353        if (length > copybreak)
4354                return NULL;
4355
4356        skb = e1000_alloc_rx_skb(adapter, length);
4357        if (!skb)
4358                return NULL;
4359
4360        dma_sync_single_for_cpu(&adapter->pdev->dev, buffer_info->dma,
4361                                length, DMA_FROM_DEVICE);
4362
4363        memcpy(skb_put(skb, length), data, length);
4364
4365        return skb;
4366}
4367
4368/**
4369 * e1000_clean_rx_irq - Send received data up the network stack; legacy
4370 * @adapter: board private structure
4371 * @rx_ring: ring to clean
4372 * @work_done: amount of napi work completed this call
4373 * @work_to_do: max amount of work allowed for this call to do
4374 */
4375static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
4376                               struct e1000_rx_ring *rx_ring,
4377                               int *work_done, int work_to_do)
4378{
4379        struct net_device *netdev = adapter->netdev;
4380        struct pci_dev *pdev = adapter->pdev;
4381        struct e1000_rx_desc *rx_desc, *next_rxd;
4382        struct e1000_rx_buffer *buffer_info, *next_buffer;
4383        u32 length;
4384        unsigned int i;
4385        int cleaned_count = 0;
4386        bool cleaned = false;
4387        unsigned int total_rx_bytes = 0, total_rx_packets = 0;
4388
4389        i = rx_ring->next_to_clean;
4390        rx_desc = E1000_RX_DESC(*rx_ring, i);
4391        buffer_info = &rx_ring->buffer_info[i];
4392
4393        while (rx_desc->status & E1000_RXD_STAT_DD) {
4394                struct sk_buff *skb;
4395                u8 *data;
4396                u8 status;
4397
4398                if (*work_done >= work_to_do)
4399                        break;
4400                (*work_done)++;
4401                dma_rmb(); /* read descriptor and rx_buffer_info after status DD */
4402
4403                status = rx_desc->status;
4404                length = le16_to_cpu(rx_desc->length);
4405
4406                data = buffer_info->rxbuf.data;
4407                prefetch(data);
4408                skb = e1000_copybreak(adapter, buffer_info, length, data);
4409                if (!skb) {
4410                        unsigned int frag_len = e1000_frag_len(adapter);
4411
4412                        skb = build_skb(data - E1000_HEADROOM, frag_len);
4413                        if (!skb) {
4414                                adapter->alloc_rx_buff_failed++;
4415                                break;
4416                        }
4417
4418                        skb_reserve(skb, E1000_HEADROOM);
4419                        dma_unmap_single(&pdev->dev, buffer_info->dma,
4420                                         adapter->rx_buffer_len,
4421                                         DMA_FROM_DEVICE);
4422                        buffer_info->dma = 0;
4423                        buffer_info->rxbuf.data = NULL;
4424                }
4425
4426                if (++i == rx_ring->count)
4427                        i = 0;
4428
4429                next_rxd = E1000_RX_DESC(*rx_ring, i);
4430                prefetch(next_rxd);
4431
4432                next_buffer = &rx_ring->buffer_info[i];
4433
4434                cleaned = true;
4435                cleaned_count++;
4436
4437                /* !EOP means multiple descriptors were used to store a single
4438                 * packet, if thats the case we need to toss it.  In fact, we
4439                 * to toss every packet with the EOP bit clear and the next
4440                 * frame that _does_ have the EOP bit set, as it is by
4441                 * definition only a frame fragment
4442                 */
4443                if (unlikely(!(status & E1000_RXD_STAT_EOP)))
4444                        adapter->discarding = true;
4445
4446                if (adapter->discarding) {
4447                        /* All receives must fit into a single buffer */
4448                        netdev_dbg(netdev, "Receive packet consumed multiple buffers\n");
4449                        dev_kfree_skb(skb);
4450                        if (status & E1000_RXD_STAT_EOP)
4451                                adapter->discarding = false;
4452                        goto next_desc;
4453                }
4454
4455                if (unlikely(rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK)) {
4456                        if (e1000_tbi_should_accept(adapter, status,
4457                                                    rx_desc->errors,
4458                                                    length, data)) {
4459                                length--;
4460                        } else if (netdev->features & NETIF_F_RXALL) {
4461                                goto process_skb;
4462                        } else {
4463                                dev_kfree_skb(skb);
4464                                goto next_desc;
4465                        }
4466                }
4467
4468process_skb:
4469                total_rx_bytes += (length - 4); /* don't count FCS */
4470                total_rx_packets++;
4471
4472                if (likely(!(netdev->features & NETIF_F_RXFCS)))
4473                        /* adjust length to remove Ethernet CRC, this must be
4474                         * done after the TBI_ACCEPT workaround above
4475                         */
4476                        length -= 4;
4477
4478                if (buffer_info->rxbuf.data == NULL)
4479                        skb_put(skb, length);
4480                else /* copybreak skb */
4481                        skb_trim(skb, length);
4482
4483                /* Receive Checksum Offload */
4484                e1000_rx_checksum(adapter,
4485                                  (u32)(status) |
4486                                  ((u32)(rx_desc->errors) << 24),
4487                                  le16_to_cpu(rx_desc->csum), skb);
4488
4489                e1000_receive_skb(adapter, status, rx_desc->special, skb);
4490
4491next_desc:
4492                rx_desc->status = 0;
4493
4494                /* return some buffers to hardware, one at a time is too slow */
4495                if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) {
4496                        adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
4497                        cleaned_count = 0;
4498                }
4499
4500                /* use prefetched values */
4501                rx_desc = next_rxd;
4502                buffer_info = next_buffer;
4503        }
4504        rx_ring->next_to_clean = i;
4505
4506        cleaned_count = E1000_DESC_UNUSED(rx_ring);
4507        if (cleaned_count)
4508                adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
4509
4510        adapter->total_rx_packets += total_rx_packets;
4511        adapter->total_rx_bytes += total_rx_bytes;
4512        netdev->stats.rx_bytes += total_rx_bytes;
4513        netdev->stats.rx_packets += total_rx_packets;
4514        return cleaned;
4515}
4516
4517/**
4518 * e1000_alloc_jumbo_rx_buffers - Replace used jumbo receive buffers
4519 * @adapter: address of board private structure
4520 * @rx_ring: pointer to receive ring structure
4521 * @cleaned_count: number of buffers to allocate this pass
4522 **/
4523static void
4524e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter,
4525                             struct e1000_rx_ring *rx_ring, int cleaned_count)
4526{
4527        struct pci_dev *pdev = adapter->pdev;
4528        struct e1000_rx_desc *rx_desc;
4529        struct e1000_rx_buffer *buffer_info;
4530        unsigned int i;
4531
4532        i = rx_ring->next_to_use;
4533        buffer_info = &rx_ring->buffer_info[i];
4534
4535        while (cleaned_count--) {
4536                /* allocate a new page if necessary */
4537                if (!buffer_info->rxbuf.page) {
4538                        buffer_info->rxbuf.page = alloc_page(GFP_ATOMIC);
4539                        if (unlikely(!buffer_info->rxbuf.page)) {
4540                                adapter->alloc_rx_buff_failed++;
4541                                break;
4542                        }
4543                }
4544
4545                if (!buffer_info->dma) {
4546                        buffer_info->dma = dma_map_page(&pdev->dev,
4547                                                        buffer_info->rxbuf.page, 0,
4548                                                        adapter->rx_buffer_len,
4549                                                        DMA_FROM_DEVICE);
4550                        if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
4551                                put_page(buffer_info->rxbuf.page);
4552                                buffer_info->rxbuf.page = NULL;
4553                                buffer_info->dma = 0;
4554                                adapter->alloc_rx_buff_failed++;
4555                                break;
4556                        }
4557                }
4558
4559                rx_desc = E1000_RX_DESC(*rx_ring, i);
4560                rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
4561
4562                if (unlikely(++i == rx_ring->count))
4563                        i = 0;
4564                buffer_info = &rx_ring->buffer_info[i];
4565        }
4566
4567        if (likely(rx_ring->next_to_use != i)) {
4568                rx_ring->next_to_use = i;
4569                if (unlikely(i-- == 0))
4570                        i = (rx_ring->count - 1);
4571
4572                /* Force memory writes to complete before letting h/w
4573                 * know there are new descriptors to fetch.  (Only
4574                 * applicable for weak-ordered memory model archs,
4575                 * such as IA-64).
4576                 */
4577                wmb();
4578                writel(i, adapter->hw.hw_addr + rx_ring->rdt);
4579        }
4580}
4581
4582/**
4583 * e1000_alloc_rx_buffers - Replace used receive buffers; legacy & extended
4584 * @adapter: address of board private structure
4585 **/
4586static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
4587                                   struct e1000_rx_ring *rx_ring,
4588                                   int cleaned_count)
4589{
4590        struct e1000_hw *hw = &adapter->hw;
4591        struct pci_dev *pdev = adapter->pdev;
4592        struct e1000_rx_desc *rx_desc;
4593        struct e1000_rx_buffer *buffer_info;
4594        unsigned int i;
4595        unsigned int bufsz = adapter->rx_buffer_len;
4596
4597        i = rx_ring->next_to_use;
4598        buffer_info = &rx_ring->buffer_info[i];
4599
4600        while (cleaned_count--) {
4601                void *data;
4602
4603                if (buffer_info->rxbuf.data)
4604                        goto skip;
4605
4606                data = e1000_alloc_frag(adapter);
4607                if (!data) {
4608                        /* Better luck next round */
4609                        adapter->alloc_rx_buff_failed++;
4610                        break;
4611                }
4612
4613                /* Fix for errata 23, can't cross 64kB boundary */
4614                if (!e1000_check_64k_bound(adapter, data, bufsz)) {
4615                        void *olddata = data;
4616                        e_err(rx_err, "skb align check failed: %u bytes at "
4617                              "%p\n", bufsz, data);
4618                        /* Try again, without freeing the previous */
4619                        data = e1000_alloc_frag(adapter);
4620                        /* Failed allocation, critical failure */
4621                        if (!data) {
4622                                skb_free_frag(olddata);
4623                                adapter->alloc_rx_buff_failed++;
4624                                break;
4625                        }
4626
4627                        if (!e1000_check_64k_bound(adapter, data, bufsz)) {
4628                                /* give up */
4629                                skb_free_frag(data);
4630                                skb_free_frag(olddata);
4631                                adapter->alloc_rx_buff_failed++;
4632                                break;
4633                        }
4634
4635                        /* Use new allocation */
4636                        skb_free_frag(olddata);
4637                }
4638                buffer_info->dma = dma_map_single(&pdev->dev,
4639                                                  data,
4640                                                  adapter->rx_buffer_len,
4641                                                  DMA_FROM_DEVICE);
4642                if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
4643                        skb_free_frag(data);
4644                        buffer_info->dma = 0;
4645                        adapter->alloc_rx_buff_failed++;
4646                        break;
4647                }
4648
4649                /* XXX if it was allocated cleanly it will never map to a
4650                 * boundary crossing
4651                 */
4652
4653                /* Fix for errata 23, can't cross 64kB boundary */
4654                if (!e1000_check_64k_bound(adapter,
4655                                        (void *)(unsigned long)buffer_info->dma,
4656                                        adapter->rx_buffer_len)) {
4657                        e_err(rx_err, "dma align check failed: %u bytes at "
4658                              "%p\n", adapter->rx_buffer_len,
4659                              (void *)(unsigned long)buffer_info->dma);
4660
4661                        dma_unmap_single(&pdev->dev, buffer_info->dma,
4662                                         adapter->rx_buffer_len,
4663                                         DMA_FROM_DEVICE);
4664
4665                        skb_free_frag(data);
4666                        buffer_info->rxbuf.data = NULL;
4667                        buffer_info->dma = 0;
4668
4669                        adapter->alloc_rx_buff_failed++;
4670                        break;
4671                }
4672                buffer_info->rxbuf.data = data;
4673 skip:
4674                rx_desc = E1000_RX_DESC(*rx_ring, i);
4675                rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
4676
4677                if (unlikely(++i == rx_ring->count))
4678                        i = 0;
4679                buffer_info = &rx_ring->buffer_info[i];
4680        }
4681
4682        if (likely(rx_ring->next_to_use != i)) {
4683                rx_ring->next_to_use = i;
4684                if (unlikely(i-- == 0))
4685                        i = (rx_ring->count - 1);
4686
4687                /* Force memory writes to complete before letting h/w
4688                 * know there are new descriptors to fetch.  (Only
4689                 * applicable for weak-ordered memory model archs,
4690                 * such as IA-64).
4691                 */
4692                wmb();
4693                writel(i, hw->hw_addr + rx_ring->rdt);
4694        }
4695}
4696
4697/**
4698 * e1000_smartspeed - Workaround for SmartSpeed on 82541 and 82547 controllers.
4699 * @adapter:
4700 **/
4701static void e1000_smartspeed(struct e1000_adapter *adapter)
4702{
4703        struct e1000_hw *hw = &adapter->hw;
4704        u16 phy_status;
4705        u16 phy_ctrl;
4706
4707        if ((hw->phy_type != e1000_phy_igp) || !hw->autoneg ||
4708           !(hw->autoneg_advertised & ADVERTISE_1000_FULL))
4709                return;
4710
4711        if (adapter->smartspeed == 0) {
4712                /* If Master/Slave config fault is asserted twice,
4713                 * we assume back-to-back
4714                 */
4715                e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_status);
4716                if (!(phy_status & SR_1000T_MS_CONFIG_FAULT))
4717                        return;
4718                e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_status);
4719                if (!(phy_status & SR_1000T_MS_CONFIG_FAULT))
4720                        return;
4721                e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_ctrl);
4722                if (phy_ctrl & CR_1000T_MS_ENABLE) {
4723                        phy_ctrl &= ~CR_1000T_MS_ENABLE;
4724                        e1000_write_phy_reg(hw, PHY_1000T_CTRL,
4725                                            phy_ctrl);
4726                        adapter->smartspeed++;
4727                        if (!e1000_phy_setup_autoneg(hw) &&
4728                           !e1000_read_phy_reg(hw, PHY_CTRL,
4729                                               &phy_ctrl)) {
4730                                phy_ctrl |= (MII_CR_AUTO_NEG_EN |
4731                                             MII_CR_RESTART_AUTO_NEG);
4732                                e1000_write_phy_reg(hw, PHY_CTRL,
4733                                                    phy_ctrl);
4734                        }
4735                }
4736                return;
4737        } else if (adapter->smartspeed == E1000_SMARTSPEED_DOWNSHIFT) {
4738                /* If still no link, perhaps using 2/3 pair cable */
4739                e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_ctrl);
4740                phy_ctrl |= CR_1000T_MS_ENABLE;
4741                e1000_write_phy_reg(hw, PHY_1000T_CTRL, phy_ctrl);
4742                if (!e1000_phy_setup_autoneg(hw) &&
4743                   !e1000_read_phy_reg(hw, PHY_CTRL, &phy_ctrl)) {
4744                        phy_ctrl |= (MII_CR_AUTO_NEG_EN |
4745                                     MII_CR_RESTART_AUTO_NEG);
4746                        e1000_write_phy_reg(hw, PHY_CTRL, phy_ctrl);
4747                }
4748        }
4749        /* Restart process after E1000_SMARTSPEED_MAX iterations */
4750        if (adapter->smartspeed++ == E1000_SMARTSPEED_MAX)
4751                adapter->smartspeed = 0;
4752}
4753
4754/**
4755 * e1000_ioctl -
4756 * @netdev:
4757 * @ifreq:
4758 * @cmd:
4759 **/
4760static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
4761{
4762        switch (cmd) {
4763        case SIOCGMIIPHY:
4764        case SIOCGMIIREG:
4765        case SIOCSMIIREG:
4766                return e1000_mii_ioctl(netdev, ifr, cmd);
4767        default:
4768                return -EOPNOTSUPP;
4769        }
4770}
4771
4772/**
4773 * e1000_mii_ioctl -
4774 * @netdev:
4775 * @ifreq:
4776 * @cmd:
4777 **/
4778static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
4779                           int cmd)
4780{
4781        struct e1000_adapter *adapter = netdev_priv(netdev);
4782        struct e1000_hw *hw = &adapter->hw;
4783        struct mii_ioctl_data *data = if_mii(ifr);
4784        int retval;
4785        u16 mii_reg;
4786        unsigned long flags;
4787
4788        if (hw->media_type != e1000_media_type_copper)
4789                return -EOPNOTSUPP;
4790
4791        switch (cmd) {
4792        case SIOCGMIIPHY:
4793                data->phy_id = hw->phy_addr;
4794                break;
4795        case SIOCGMIIREG:
4796                spin_lock_irqsave(&adapter->stats_lock, flags);
4797                if (e1000_read_phy_reg(hw, data->reg_num & 0x1F,
4798                                   &data->val_out)) {
4799                        spin_unlock_irqrestore(&adapter->stats_lock, flags);
4800                        return -EIO;
4801                }
4802                spin_unlock_irqrestore(&adapter->stats_lock, flags);
4803                break;
4804        case SIOCSMIIREG:
4805                if (data->reg_num & ~(0x1F))
4806                        return -EFAULT;
4807                mii_reg = data->val_in;
4808                spin_lock_irqsave(&adapter->stats_lock, flags);
4809                if (e1000_write_phy_reg(hw, data->reg_num,
4810                                        mii_reg)) {
4811                        spin_unlock_irqrestore(&adapter->stats_lock, flags);
4812                        return -EIO;
4813                }
4814                spin_unlock_irqrestore(&adapter->stats_lock, flags);
4815                if (hw->media_type == e1000_media_type_copper) {
4816                        switch (data->reg_num) {
4817                        case PHY_CTRL:
4818                                if (mii_reg & MII_CR_POWER_DOWN)
4819                                        break;
4820                                if (mii_reg & MII_CR_AUTO_NEG_EN) {
4821                                        hw->autoneg = 1;
4822                                        hw->autoneg_advertised = 0x2F;
4823                                } else {
4824                                        u32 speed;
4825                                        if (mii_reg & 0x40)
4826                                                speed = SPEED_1000;
4827                                        else if (mii_reg & 0x2000)
4828                                                speed = SPEED_100;
4829                                        else
4830                                                speed = SPEED_10;
4831                                        retval = e1000_set_spd_dplx(
4832                                                adapter, speed,
4833                                                ((mii_reg & 0x100)
4834                                                 ? DUPLEX_FULL :
4835                                                 DUPLEX_HALF));
4836                                        if (retval)
4837                                                return retval;
4838                                }
4839                                if (netif_running(adapter->netdev))
4840                                        e1000_reinit_locked(adapter);
4841                                else
4842                                        e1000_reset(adapter);
4843                                break;
4844                        case M88E1000_PHY_SPEC_CTRL:
4845                        case M88E1000_EXT_PHY_SPEC_CTRL:
4846                                if (e1000_phy_reset(hw))
4847                                        return -EIO;
4848                                break;
4849                        }
4850                } else {
4851                        switch (data->reg_num) {
4852                        case PHY_CTRL:
4853                                if (mii_reg & MII_CR_POWER_DOWN)
4854                                        break;
4855                                if (netif_running(adapter->netdev))
4856                                        e1000_reinit_locked(adapter);
4857                                else
4858                                        e1000_reset(adapter);
4859                                break;
4860                        }
4861                }
4862                break;
4863        default:
4864                return -EOPNOTSUPP;
4865        }
4866        return E1000_SUCCESS;
4867}
4868
4869void e1000_pci_set_mwi(struct e1000_hw *hw)
4870{
4871        struct e1000_adapter *adapter = hw->back;
4872        int ret_val = pci_set_mwi(adapter->pdev);
4873
4874        if (ret_val)
4875                e_err(probe, "Error in setting MWI\n");
4876}
4877
4878void e1000_pci_clear_mwi(struct e1000_hw *hw)
4879{
4880        struct e1000_adapter *adapter = hw->back;
4881
4882        pci_clear_mwi(adapter->pdev);
4883}
4884
4885int e1000_pcix_get_mmrbc(struct e1000_hw *hw)
4886{
4887        struct e1000_adapter *adapter = hw->back;
4888        return pcix_get_mmrbc(adapter->pdev);
4889}
4890
4891void e1000_pcix_set_mmrbc(struct e1000_hw *hw, int mmrbc)
4892{
4893        struct e1000_adapter *adapter = hw->back;
4894        pcix_set_mmrbc(adapter->pdev, mmrbc);
4895}
4896
4897void e1000_io_write(struct e1000_hw *hw, unsigned long port, u32 value)
4898{
4899        outl(value, port);
4900}
4901
4902static bool e1000_vlan_used(struct e1000_adapter *adapter)
4903{
4904        u16 vid;
4905
4906        for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
4907                return true;
4908        return false;
4909}
4910
4911static void __e1000_vlan_mode(struct e1000_adapter *adapter,
4912                              netdev_features_t features)
4913{
4914        struct e1000_hw *hw = &adapter->hw;
4915        u32 ctrl;
4916
4917        ctrl = er32(CTRL);
4918        if (features & NETIF_F_HW_VLAN_CTAG_RX) {
4919                /* enable VLAN tag insert/strip */
4920                ctrl |= E1000_CTRL_VME;
4921        } else {
4922                /* disable VLAN tag insert/strip */
4923                ctrl &= ~E1000_CTRL_VME;
4924        }
4925        ew32(CTRL, ctrl);
4926}
4927static void e1000_vlan_filter_on_off(struct e1000_adapter *adapter,
4928                                     bool filter_on)
4929{
4930        struct e1000_hw *hw = &adapter->hw;
4931        u32 rctl;
4932
4933        if (!test_bit(__E1000_DOWN, &adapter->flags))
4934                e1000_irq_disable(adapter);
4935
4936        __e1000_vlan_mode(adapter, adapter->netdev->features);
4937        if (filter_on) {
4938                /* enable VLAN receive filtering */
4939                rctl = er32(RCTL);
4940                rctl &= ~E1000_RCTL_CFIEN;
4941                if (!(adapter->netdev->flags & IFF_PROMISC))
4942                        rctl |= E1000_RCTL_VFE;
4943                ew32(RCTL, rctl);
4944                e1000_update_mng_vlan(adapter);
4945        } else {
4946                /* disable VLAN receive filtering */
4947                rctl = er32(RCTL);
4948                rctl &= ~E1000_RCTL_VFE;
4949                ew32(RCTL, rctl);
4950        }
4951
4952        if (!test_bit(__E1000_DOWN, &adapter->flags))
4953                e1000_irq_enable(adapter);
4954}
4955
4956static void e1000_vlan_mode(struct net_device *netdev,
4957                            netdev_features_t features)
4958{
4959        struct e1000_adapter *adapter = netdev_priv(netdev);
4960
4961        if (!test_bit(__E1000_DOWN, &adapter->flags))
4962                e1000_irq_disable(adapter);
4963
4964        __e1000_vlan_mode(adapter, features);
4965
4966        if (!test_bit(__E1000_DOWN, &adapter->flags))
4967                e1000_irq_enable(adapter);
4968}
4969
4970static int e1000_vlan_rx_add_vid(struct net_device *netdev,
4971                                 __be16 proto, u16 vid)
4972{
4973        struct e1000_adapter *adapter = netdev_priv(netdev);
4974        struct e1000_hw *hw = &adapter->hw;
4975        u32 vfta, index;
4976
4977        if ((hw->mng_cookie.status &
4978             E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) &&
4979            (vid == adapter->mng_vlan_id))
4980                return 0;
4981
4982        if (!e1000_vlan_used(adapter))
4983                e1000_vlan_filter_on_off(adapter, true);
4984
4985        /* add VID to filter table */
4986        index = (vid >> 5) & 0x7F;
4987        vfta = E1000_READ_REG_ARRAY(hw, VFTA, index);
4988        vfta |= (1 << (vid & 0x1F));
4989        e1000_write_vfta(hw, index, vfta);
4990
4991        set_bit(vid, adapter->active_vlans);
4992
4993        return 0;
4994}
4995
4996static int e1000_vlan_rx_kill_vid(struct net_device *netdev,
4997                                  __be16 proto, u16 vid)
4998{
4999        struct e1000_adapter *adapter = netdev_priv(netdev);
5000        struct e1000_hw *hw = &adapter->hw;
5001        u32 vfta, index;
5002
5003        if (!test_bit(__E1000_DOWN, &adapter->flags))
5004                e1000_irq_disable(adapter);
5005        if (!test_bit(__E1000_DOWN, &adapter->flags))
5006                e1000_irq_enable(adapter);
5007
5008        /* remove VID from filter table */
5009        index = (vid >> 5) & 0x7F;
5010        vfta = E1000_READ_REG_ARRAY(hw, VFTA, index);
5011        vfta &= ~(1 << (vid & 0x1F));
5012        e1000_write_vfta(hw, index, vfta);
5013
5014        clear_bit(vid, adapter->active_vlans);
5015
5016        if (!e1000_vlan_used(adapter))
5017                e1000_vlan_filter_on_off(adapter, false);
5018
5019        return 0;
5020}
5021
5022static void e1000_restore_vlan(struct e1000_adapter *adapter)
5023{
5024        u16 vid;
5025
5026        if (!e1000_vlan_used(adapter))
5027                return;
5028
5029        e1000_vlan_filter_on_off(adapter, true);
5030        for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
5031                e1000_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), vid);
5032}
5033
5034int e1000_set_spd_dplx(struct e1000_adapter *adapter, u32 spd, u8 dplx)
5035{
5036        struct e1000_hw *hw = &adapter->hw;
5037
5038        hw->autoneg = 0;
5039
5040        /* Make sure dplx is at most 1 bit and lsb of speed is not set
5041         * for the switch() below to work
5042         */
5043        if ((spd & 1) || (dplx & ~1))
5044                goto err_inval;
5045
5046        /* Fiber NICs only allow 1000 gbps Full duplex */
5047        if ((hw->media_type == e1000_media_type_fiber) &&
5048            spd != SPEED_1000 &&
5049            dplx != DUPLEX_FULL)
5050                goto err_inval;
5051
5052        switch (spd + dplx) {
5053        case SPEED_10 + DUPLEX_HALF:
5054                hw->forced_speed_duplex = e1000_10_half;
5055                break;
5056        case SPEED_10 + DUPLEX_FULL:
5057                hw->forced_speed_duplex = e1000_10_full;
5058                break;
5059        case SPEED_100 + DUPLEX_HALF:
5060                hw->forced_speed_duplex = e1000_100_half;
5061                break;
5062        case SPEED_100 + DUPLEX_FULL:
5063                hw->forced_speed_duplex = e1000_100_full;
5064                break;
5065        case SPEED_1000 + DUPLEX_FULL:
5066                hw->autoneg = 1;
5067                hw->autoneg_advertised = ADVERTISE_1000_FULL;
5068                break;
5069        case SPEED_1000 + DUPLEX_HALF: /* not supported */
5070        default:
5071                goto err_inval;
5072        }
5073
5074        /* clear MDI, MDI(-X) override is only allowed when autoneg enabled */
5075        hw->mdix = AUTO_ALL_MODES;
5076
5077        return 0;
5078
5079err_inval:
5080        e_err(probe, "Unsupported Speed/Duplex configuration\n");
5081        return -EINVAL;
5082}
5083
5084static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake)
5085{
5086        struct net_device *netdev = pci_get_drvdata(pdev);
5087        struct e1000_adapter *adapter = netdev_priv(netdev);
5088        struct e1000_hw *hw = &adapter->hw;
5089        u32 ctrl, ctrl_ext, rctl, status;
5090        u32 wufc = adapter->wol;
5091#ifdef CONFIG_PM
5092        int retval = 0;
5093#endif
5094
5095        netif_device_detach(netdev);
5096
5097        if (netif_running(netdev)) {
5098                int count = E1000_CHECK_RESET_COUNT;
5099
5100                while (test_bit(__E1000_RESETTING, &adapter->flags) && count--)
5101                        usleep_range(10000, 20000);
5102
5103                WARN_ON(test_bit(__E1000_RESETTING, &adapter->flags));
5104                e1000_down(adapter);
5105        }
5106
5107#ifdef CONFIG_PM
5108        retval = pci_save_state(pdev);
5109        if (retval)
5110                return retval;
5111#endif
5112
5113        status = er32(STATUS);
5114        if (status & E1000_STATUS_LU)
5115                wufc &= ~E1000_WUFC_LNKC;
5116
5117        if (wufc) {
5118                e1000_setup_rctl(adapter);
5119                e1000_set_rx_mode(netdev);
5120
5121                rctl = er32(RCTL);
5122
5123                /* turn on all-multi mode if wake on multicast is enabled */
5124                if (wufc & E1000_WUFC_MC)
5125                        rctl |= E1000_RCTL_MPE;
5126
5127                /* enable receives in the hardware */
5128                ew32(RCTL, rctl | E1000_RCTL_EN);
5129
5130                if (hw->mac_type >= e1000_82540) {
5131                        ctrl = er32(CTRL);
5132                        /* advertise wake from D3Cold */
5133                        #define E1000_CTRL_ADVD3WUC 0x00100000
5134                        /* phy power management enable */
5135                        #define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000
5136                        ctrl |= E1000_CTRL_ADVD3WUC |
5137                                E1000_CTRL_EN_PHY_PWR_MGMT;
5138                        ew32(CTRL, ctrl);
5139                }
5140
5141                if (hw->media_type == e1000_media_type_fiber ||
5142                    hw->media_type == e1000_media_type_internal_serdes) {
5143                        /* keep the laser running in D3 */
5144                        ctrl_ext = er32(CTRL_EXT);
5145                        ctrl_ext |= E1000_CTRL_EXT_SDP7_DATA;
5146                        ew32(CTRL_EXT, ctrl_ext);
5147                }
5148
5149                ew32(WUC, E1000_WUC_PME_EN);
5150                ew32(WUFC, wufc);
5151        } else {
5152                ew32(WUC, 0);
5153                ew32(WUFC, 0);
5154        }
5155
5156        e1000_release_manageability(adapter);
5157
5158        *enable_wake = !!wufc;
5159
5160        /* make sure adapter isn't asleep if manageability is enabled */
5161        if (adapter->en_mng_pt)
5162                *enable_wake = true;
5163
5164        if (netif_running(netdev))
5165                e1000_free_irq(adapter);
5166
5167        pci_disable_device(pdev);
5168
5169        return 0;
5170}
5171
5172#ifdef CONFIG_PM
5173static int e1000_suspend(struct pci_dev *pdev, pm_message_t state)
5174{
5175        int retval;
5176        bool wake;
5177
5178        retval = __e1000_shutdown(pdev, &wake);
5179        if (retval)
5180                return retval;
5181
5182        if (wake) {
5183                pci_prepare_to_sleep(pdev);
5184        } else {
5185                pci_wake_from_d3(pdev, false);
5186                pci_set_power_state(pdev, PCI_D3hot);
5187        }
5188
5189        return 0;
5190}
5191
5192static int e1000_resume(struct pci_dev *pdev)
5193{
5194        struct net_device *netdev = pci_get_drvdata(pdev);
5195        struct e1000_adapter *adapter = netdev_priv(netdev);
5196        struct e1000_hw *hw = &adapter->hw;
5197        u32 err;
5198
5199        pci_set_power_state(pdev, PCI_D0);
5200        pci_restore_state(pdev);
5201        pci_save_state(pdev);
5202
5203        if (adapter->need_ioport)
5204                err = pci_enable_device(pdev);
5205        else
5206                err = pci_enable_device_mem(pdev);
5207        if (err) {
5208                pr_err("Cannot enable PCI device from suspend\n");
5209                return err;
5210        }
5211        pci_set_master(pdev);
5212
5213        pci_enable_wake(pdev, PCI_D3hot, 0);
5214        pci_enable_wake(pdev, PCI_D3cold, 0);
5215
5216        if (netif_running(netdev)) {
5217                err = e1000_request_irq(adapter);
5218                if (err)
5219                        return err;
5220        }
5221
5222        e1000_power_up_phy(adapter);
5223        e1000_reset(adapter);
5224        ew32(WUS, ~0);
5225
5226        e1000_init_manageability(adapter);
5227
5228        if (netif_running(netdev))
5229                e1000_up(adapter);
5230
5231        netif_device_attach(netdev);
5232
5233        return 0;
5234}
5235#endif
5236
5237static void e1000_shutdown(struct pci_dev *pdev)
5238{
5239        bool wake;
5240
5241        __e1000_shutdown(pdev, &wake);
5242
5243        if (system_state == SYSTEM_POWER_OFF) {
5244                pci_wake_from_d3(pdev, wake);
5245                pci_set_power_state(pdev, PCI_D3hot);
5246        }
5247}
5248
5249#ifdef CONFIG_NET_POLL_CONTROLLER
5250/* Polling 'interrupt' - used by things like netconsole to send skbs
5251 * without having to re-enable interrupts. It's not called while
5252 * the interrupt routine is executing.
5253 */
5254static void e1000_netpoll(struct net_device *netdev)
5255{
5256        struct e1000_adapter *adapter = netdev_priv(netdev);
5257
5258        if (disable_hardirq(adapter->pdev->irq))
5259                e1000_intr(adapter->pdev->irq, netdev);
5260        enable_irq(adapter->pdev->irq);
5261}
5262#endif
5263
5264/**
5265 * e1000_io_error_detected - called when PCI error is detected
5266 * @pdev: Pointer to PCI device
5267 * @state: The current pci connection state
5268 *
5269 * This function is called after a PCI bus error affecting
5270 * this device has been detected.
5271 */
5272static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev,
5273                                                pci_channel_state_t state)
5274{
5275        struct net_device *netdev = pci_get_drvdata(pdev);
5276        struct e1000_adapter *adapter = netdev_priv(netdev);
5277
5278        netif_device_detach(netdev);
5279
5280        if (state == pci_channel_io_perm_failure)
5281                return PCI_ERS_RESULT_DISCONNECT;
5282
5283        if (netif_running(netdev))
5284                e1000_down(adapter);
5285        pci_disable_device(pdev);
5286
5287        /* Request a slot slot reset. */
5288        return PCI_ERS_RESULT_NEED_RESET;
5289}
5290
5291/**
5292 * e1000_io_slot_reset - called after the pci bus has been reset.
5293 * @pdev: Pointer to PCI device
5294 *
5295 * Restart the card from scratch, as if from a cold-boot. Implementation
5296 * resembles the first-half of the e1000_resume routine.
5297 */
5298static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev)
5299{
5300        struct net_device *netdev = pci_get_drvdata(pdev);
5301        struct e1000_adapter *adapter = netdev_priv(netdev);
5302        struct e1000_hw *hw = &adapter->hw;
5303        int err;
5304
5305        if (adapter->need_ioport)
5306                err = pci_enable_device(pdev);
5307        else
5308                err = pci_enable_device_mem(pdev);
5309        if (err) {
5310                pr_err("Cannot re-enable PCI device after reset.\n");
5311                return PCI_ERS_RESULT_DISCONNECT;
5312        }
5313        pci_set_master(pdev);
5314
5315        pci_enable_wake(pdev, PCI_D3hot, 0);
5316        pci_enable_wake(pdev, PCI_D3cold, 0);
5317
5318        e1000_reset(adapter);
5319        ew32(WUS, ~0);
5320
5321        return PCI_ERS_RESULT_RECOVERED;
5322}
5323
5324/**
5325 * e1000_io_resume - called when traffic can start flowing again.
5326 * @pdev: Pointer to PCI device
5327 *
5328 * This callback is called when the error recovery driver tells us that
5329 * its OK to resume normal operation. Implementation resembles the
5330 * second-half of the e1000_resume routine.
5331 */
5332static void e1000_io_resume(struct pci_dev *pdev)
5333{
5334        struct net_device *netdev = pci_get_drvdata(pdev);
5335        struct e1000_adapter *adapter = netdev_priv(netdev);
5336
5337        e1000_init_manageability(adapter);
5338
5339        if (netif_running(netdev)) {
5340                if (e1000_up(adapter)) {
5341                        pr_info("can't bring device back up after reset\n");
5342                        return;
5343                }
5344        }
5345
5346        netif_device_attach(netdev);
5347}
5348
5349/* e1000_main.c */
5350