linux/drivers/net/ethernet/intel/e1000/e1000_main.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*******************************************************************************
   3
   4  Intel PRO/1000 Linux driver
   5  Copyright(c) 1999 - 2006 Intel Corporation.
   6
   7  This program is free software; you can redistribute it and/or modify it
   8  under the terms and conditions of the GNU General Public License,
   9  version 2, as published by the Free Software Foundation.
  10
  11  This program is distributed in the hope it will be useful, but WITHOUT
  12  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  13  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  14  more details.
  15
  16  You should have received a copy of the GNU General Public License along with
  17  this program; if not, write to the Free Software Foundation, Inc.,
  18  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
  19
  20  The full GNU General Public License is included in this distribution in
  21  the file called "COPYING".
  22
  23  Contact Information:
  24  Linux NICS <linux.nics@intel.com>
  25  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
  26  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
  27
  28*******************************************************************************/
  29
  30#include "e1000.h"
  31#include <net/ip6_checksum.h>
  32#include <linux/io.h>
  33#include <linux/prefetch.h>
  34#include <linux/bitops.h>
  35#include <linux/if_vlan.h>
  36
  37char e1000_driver_name[] = "e1000";
  38static char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver";
  39#define DRV_VERSION "7.3.21-k8-NAPI"
  40const char e1000_driver_version[] = DRV_VERSION;
  41static const char e1000_copyright[] = "Copyright (c) 1999-2006 Intel Corporation.";
  42
  43/* e1000_pci_tbl - PCI Device ID Table
  44 *
  45 * Last entry must be all 0s
  46 *
  47 * Macro expands to...
  48 *   {PCI_DEVICE(PCI_VENDOR_ID_INTEL, device_id)}
  49 */
  50static const struct pci_device_id e1000_pci_tbl[] = {
  51        INTEL_E1000_ETHERNET_DEVICE(0x1000),
  52        INTEL_E1000_ETHERNET_DEVICE(0x1001),
  53        INTEL_E1000_ETHERNET_DEVICE(0x1004),
  54        INTEL_E1000_ETHERNET_DEVICE(0x1008),
  55        INTEL_E1000_ETHERNET_DEVICE(0x1009),
  56        INTEL_E1000_ETHERNET_DEVICE(0x100C),
  57        INTEL_E1000_ETHERNET_DEVICE(0x100D),
  58        INTEL_E1000_ETHERNET_DEVICE(0x100E),
  59        INTEL_E1000_ETHERNET_DEVICE(0x100F),
  60        INTEL_E1000_ETHERNET_DEVICE(0x1010),
  61        INTEL_E1000_ETHERNET_DEVICE(0x1011),
  62        INTEL_E1000_ETHERNET_DEVICE(0x1012),
  63        INTEL_E1000_ETHERNET_DEVICE(0x1013),
  64        INTEL_E1000_ETHERNET_DEVICE(0x1014),
  65        INTEL_E1000_ETHERNET_DEVICE(0x1015),
  66        INTEL_E1000_ETHERNET_DEVICE(0x1016),
  67        INTEL_E1000_ETHERNET_DEVICE(0x1017),
  68        INTEL_E1000_ETHERNET_DEVICE(0x1018),
  69        INTEL_E1000_ETHERNET_DEVICE(0x1019),
  70        INTEL_E1000_ETHERNET_DEVICE(0x101A),
  71        INTEL_E1000_ETHERNET_DEVICE(0x101D),
  72        INTEL_E1000_ETHERNET_DEVICE(0x101E),
  73        INTEL_E1000_ETHERNET_DEVICE(0x1026),
  74        INTEL_E1000_ETHERNET_DEVICE(0x1027),
  75        INTEL_E1000_ETHERNET_DEVICE(0x1028),
  76        INTEL_E1000_ETHERNET_DEVICE(0x1075),
  77        INTEL_E1000_ETHERNET_DEVICE(0x1076),
  78        INTEL_E1000_ETHERNET_DEVICE(0x1077),
  79        INTEL_E1000_ETHERNET_DEVICE(0x1078),
  80        INTEL_E1000_ETHERNET_DEVICE(0x1079),
  81        INTEL_E1000_ETHERNET_DEVICE(0x107A),
  82        INTEL_E1000_ETHERNET_DEVICE(0x107B),
  83        INTEL_E1000_ETHERNET_DEVICE(0x107C),
  84        INTEL_E1000_ETHERNET_DEVICE(0x108A),
  85        INTEL_E1000_ETHERNET_DEVICE(0x1099),
  86        INTEL_E1000_ETHERNET_DEVICE(0x10B5),
  87        INTEL_E1000_ETHERNET_DEVICE(0x2E6E),
  88        /* required last entry */
  89        {0,}
  90};
  91
  92MODULE_DEVICE_TABLE(pci, e1000_pci_tbl);
  93
  94int e1000_up(struct e1000_adapter *adapter);
  95void e1000_down(struct e1000_adapter *adapter);
  96void e1000_reinit_locked(struct e1000_adapter *adapter);
  97void e1000_reset(struct e1000_adapter *adapter);
  98int e1000_setup_all_tx_resources(struct e1000_adapter *adapter);
  99int e1000_setup_all_rx_resources(struct e1000_adapter *adapter);
 100void e1000_free_all_tx_resources(struct e1000_adapter *adapter);
 101void e1000_free_all_rx_resources(struct e1000_adapter *adapter);
 102static int e1000_setup_tx_resources(struct e1000_adapter *adapter,
 103                                    struct e1000_tx_ring *txdr);
 104static int e1000_setup_rx_resources(struct e1000_adapter *adapter,
 105                                    struct e1000_rx_ring *rxdr);
 106static void e1000_free_tx_resources(struct e1000_adapter *adapter,
 107                                    struct e1000_tx_ring *tx_ring);
 108static void e1000_free_rx_resources(struct e1000_adapter *adapter,
 109                                    struct e1000_rx_ring *rx_ring);
 110void e1000_update_stats(struct e1000_adapter *adapter);
 111
 112static int e1000_init_module(void);
 113static void e1000_exit_module(void);
 114static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
 115static void e1000_remove(struct pci_dev *pdev);
 116static int e1000_alloc_queues(struct e1000_adapter *adapter);
 117static int e1000_sw_init(struct e1000_adapter *adapter);
 118int e1000_open(struct net_device *netdev);
 119int e1000_close(struct net_device *netdev);
 120static void e1000_configure_tx(struct e1000_adapter *adapter);
 121static void e1000_configure_rx(struct e1000_adapter *adapter);
 122static void e1000_setup_rctl(struct e1000_adapter *adapter);
 123static void e1000_clean_all_tx_rings(struct e1000_adapter *adapter);
 124static void e1000_clean_all_rx_rings(struct e1000_adapter *adapter);
 125static void e1000_clean_tx_ring(struct e1000_adapter *adapter,
 126                                struct e1000_tx_ring *tx_ring);
 127static void e1000_clean_rx_ring(struct e1000_adapter *adapter,
 128                                struct e1000_rx_ring *rx_ring);
 129static void e1000_set_rx_mode(struct net_device *netdev);
 130static void e1000_update_phy_info_task(struct work_struct *work);
 131static void e1000_watchdog(struct work_struct *work);
 132static void e1000_82547_tx_fifo_stall_task(struct work_struct *work);
 133static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
 134                                    struct net_device *netdev);
 135static int e1000_change_mtu(struct net_device *netdev, int new_mtu);
 136static int e1000_set_mac(struct net_device *netdev, void *p);
 137static irqreturn_t e1000_intr(int irq, void *data);
 138static bool e1000_clean_tx_irq(struct e1000_adapter *adapter,
 139                               struct e1000_tx_ring *tx_ring);
 140static int e1000_clean(struct napi_struct *napi, int budget);
 141static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
 142                               struct e1000_rx_ring *rx_ring,
 143                               int *work_done, int work_to_do);
 144static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
 145                                     struct e1000_rx_ring *rx_ring,
 146                                     int *work_done, int work_to_do);
 147static void e1000_alloc_dummy_rx_buffers(struct e1000_adapter *adapter,
 148                                         struct e1000_rx_ring *rx_ring,
 149                                         int cleaned_count)
 150{
 151}
 152static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
 153                                   struct e1000_rx_ring *rx_ring,
 154                                   int cleaned_count);
 155static void e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter,
 156                                         struct e1000_rx_ring *rx_ring,
 157                                         int cleaned_count);
 158static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd);
 159static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
 160                           int cmd);
 161static void e1000_enter_82542_rst(struct e1000_adapter *adapter);
 162static void e1000_leave_82542_rst(struct e1000_adapter *adapter);
 163static void e1000_tx_timeout(struct net_device *dev);
 164static void e1000_reset_task(struct work_struct *work);
 165static void e1000_smartspeed(struct e1000_adapter *adapter);
 166static int e1000_82547_fifo_workaround(struct e1000_adapter *adapter,
 167                                       struct sk_buff *skb);
 168
 169static bool e1000_vlan_used(struct e1000_adapter *adapter);
 170static void e1000_vlan_mode(struct net_device *netdev,
 171                            netdev_features_t features);
 172static void e1000_vlan_filter_on_off(struct e1000_adapter *adapter,
 173                                     bool filter_on);
 174static int e1000_vlan_rx_add_vid(struct net_device *netdev,
 175                                 __be16 proto, u16 vid);
 176static int e1000_vlan_rx_kill_vid(struct net_device *netdev,
 177                                  __be16 proto, u16 vid);
 178static void e1000_restore_vlan(struct e1000_adapter *adapter);
 179
 180#ifdef CONFIG_PM
 181static int e1000_suspend(struct pci_dev *pdev, pm_message_t state);
 182static int e1000_resume(struct pci_dev *pdev);
 183#endif
 184static void e1000_shutdown(struct pci_dev *pdev);
 185
 186#ifdef CONFIG_NET_POLL_CONTROLLER
 187/* for netdump / net console */
 188static void e1000_netpoll (struct net_device *netdev);
 189#endif
 190
 191#define COPYBREAK_DEFAULT 256
 192static unsigned int copybreak __read_mostly = COPYBREAK_DEFAULT;
 193module_param(copybreak, uint, 0644);
 194MODULE_PARM_DESC(copybreak,
 195        "Maximum size of packet that is copied to a new buffer on receive");
 196
 197static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev,
 198                                                pci_channel_state_t state);
 199static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev);
 200static void e1000_io_resume(struct pci_dev *pdev);
 201
 202static const struct pci_error_handlers e1000_err_handler = {
 203        .error_detected = e1000_io_error_detected,
 204        .slot_reset = e1000_io_slot_reset,
 205        .resume = e1000_io_resume,
 206};
 207
 208static struct pci_driver e1000_driver = {
 209        .name     = e1000_driver_name,
 210        .id_table = e1000_pci_tbl,
 211        .probe    = e1000_probe,
 212        .remove   = e1000_remove,
 213#ifdef CONFIG_PM
 214        /* Power Management Hooks */
 215        .suspend  = e1000_suspend,
 216        .resume   = e1000_resume,
 217#endif
 218        .shutdown = e1000_shutdown,
 219        .err_handler = &e1000_err_handler
 220};
 221
 222MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
 223MODULE_DESCRIPTION("Intel(R) PRO/1000 Network Driver");
 224MODULE_LICENSE("GPL");
 225MODULE_VERSION(DRV_VERSION);
 226
 227#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)
 228static int debug = -1;
 229module_param(debug, int, 0);
 230MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
 231
 232/**
 233 * e1000_get_hw_dev - return device
 234 * used by hardware layer to print debugging information
 235 *
 236 **/
 237struct net_device *e1000_get_hw_dev(struct e1000_hw *hw)
 238{
 239        struct e1000_adapter *adapter = hw->back;
 240        return adapter->netdev;
 241}
 242
 243/**
 244 * e1000_init_module - Driver Registration Routine
 245 *
 246 * e1000_init_module is the first routine called when the driver is
 247 * loaded. All it does is register with the PCI subsystem.
 248 **/
 249static int __init e1000_init_module(void)
 250{
 251        int ret;
 252        pr_info("%s - version %s\n", e1000_driver_string, e1000_driver_version);
 253
 254        pr_info("%s\n", e1000_copyright);
 255
 256        ret = pci_register_driver(&e1000_driver);
 257        if (copybreak != COPYBREAK_DEFAULT) {
 258                if (copybreak == 0)
 259                        pr_info("copybreak disabled\n");
 260                else
 261                        pr_info("copybreak enabled for "
 262                                   "packets <= %u bytes\n", copybreak);
 263        }
 264        return ret;
 265}
 266
 267module_init(e1000_init_module);
 268
 269/**
 270 * e1000_exit_module - Driver Exit Cleanup Routine
 271 *
 272 * e1000_exit_module is called just before the driver is removed
 273 * from memory.
 274 **/
 275static void __exit e1000_exit_module(void)
 276{
 277        pci_unregister_driver(&e1000_driver);
 278}
 279
 280module_exit(e1000_exit_module);
 281
 282static int e1000_request_irq(struct e1000_adapter *adapter)
 283{
 284        struct net_device *netdev = adapter->netdev;
 285        irq_handler_t handler = e1000_intr;
 286        int irq_flags = IRQF_SHARED;
 287        int err;
 288
 289        err = request_irq(adapter->pdev->irq, handler, irq_flags, netdev->name,
 290                          netdev);
 291        if (err) {
 292                e_err(probe, "Unable to allocate interrupt Error: %d\n", err);
 293        }
 294
 295        return err;
 296}
 297
 298static void e1000_free_irq(struct e1000_adapter *adapter)
 299{
 300        struct net_device *netdev = adapter->netdev;
 301
 302        free_irq(adapter->pdev->irq, netdev);
 303}
 304
 305/**
 306 * e1000_irq_disable - Mask off interrupt generation on the NIC
 307 * @adapter: board private structure
 308 **/
 309static void e1000_irq_disable(struct e1000_adapter *adapter)
 310{
 311        struct e1000_hw *hw = &adapter->hw;
 312
 313        ew32(IMC, ~0);
 314        E1000_WRITE_FLUSH();
 315        synchronize_irq(adapter->pdev->irq);
 316}
 317
 318/**
 319 * e1000_irq_enable - Enable default interrupt generation settings
 320 * @adapter: board private structure
 321 **/
 322static void e1000_irq_enable(struct e1000_adapter *adapter)
 323{
 324        struct e1000_hw *hw = &adapter->hw;
 325
 326        ew32(IMS, IMS_ENABLE_MASK);
 327        E1000_WRITE_FLUSH();
 328}
 329
 330static void e1000_update_mng_vlan(struct e1000_adapter *adapter)
 331{
 332        struct e1000_hw *hw = &adapter->hw;
 333        struct net_device *netdev = adapter->netdev;
 334        u16 vid = hw->mng_cookie.vlan_id;
 335        u16 old_vid = adapter->mng_vlan_id;
 336
 337        if (!e1000_vlan_used(adapter))
 338                return;
 339
 340        if (!test_bit(vid, adapter->active_vlans)) {
 341                if (hw->mng_cookie.status &
 342                    E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) {
 343                        e1000_vlan_rx_add_vid(netdev, htons(ETH_P_8021Q), vid);
 344                        adapter->mng_vlan_id = vid;
 345                } else {
 346                        adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
 347                }
 348                if ((old_vid != (u16)E1000_MNG_VLAN_NONE) &&
 349                    (vid != old_vid) &&
 350                    !test_bit(old_vid, adapter->active_vlans))
 351                        e1000_vlan_rx_kill_vid(netdev, htons(ETH_P_8021Q),
 352                                               old_vid);
 353        } else {
 354                adapter->mng_vlan_id = vid;
 355        }
 356}
 357
 358static void e1000_init_manageability(struct e1000_adapter *adapter)
 359{
 360        struct e1000_hw *hw = &adapter->hw;
 361
 362        if (adapter->en_mng_pt) {
 363                u32 manc = er32(MANC);
 364
 365                /* disable hardware interception of ARP */
 366                manc &= ~(E1000_MANC_ARP_EN);
 367
 368                ew32(MANC, manc);
 369        }
 370}
 371
 372static void e1000_release_manageability(struct e1000_adapter *adapter)
 373{
 374        struct e1000_hw *hw = &adapter->hw;
 375
 376        if (adapter->en_mng_pt) {
 377                u32 manc = er32(MANC);
 378
 379                /* re-enable hardware interception of ARP */
 380                manc |= E1000_MANC_ARP_EN;
 381
 382                ew32(MANC, manc);
 383        }
 384}
 385
 386/**
 387 * e1000_configure - configure the hardware for RX and TX
 388 * @adapter = private board structure
 389 **/
 390static void e1000_configure(struct e1000_adapter *adapter)
 391{
 392        struct net_device *netdev = adapter->netdev;
 393        int i;
 394
 395        e1000_set_rx_mode(netdev);
 396
 397        e1000_restore_vlan(adapter);
 398        e1000_init_manageability(adapter);
 399
 400        e1000_configure_tx(adapter);
 401        e1000_setup_rctl(adapter);
 402        e1000_configure_rx(adapter);
 403        /* call E1000_DESC_UNUSED which always leaves
 404         * at least 1 descriptor unused to make sure
 405         * next_to_use != next_to_clean
 406         */
 407        for (i = 0; i < adapter->num_rx_queues; i++) {
 408                struct e1000_rx_ring *ring = &adapter->rx_ring[i];
 409                adapter->alloc_rx_buf(adapter, ring,
 410                                      E1000_DESC_UNUSED(ring));
 411        }
 412}
 413
 414int e1000_up(struct e1000_adapter *adapter)
 415{
 416        struct e1000_hw *hw = &adapter->hw;
 417
 418        /* hardware has been reset, we need to reload some things */
 419        e1000_configure(adapter);
 420
 421        clear_bit(__E1000_DOWN, &adapter->flags);
 422
 423        napi_enable(&adapter->napi);
 424
 425        e1000_irq_enable(adapter);
 426
 427        netif_wake_queue(adapter->netdev);
 428
 429        /* fire a link change interrupt to start the watchdog */
 430        ew32(ICS, E1000_ICS_LSC);
 431        return 0;
 432}
 433
 434/**
 435 * e1000_power_up_phy - restore link in case the phy was powered down
 436 * @adapter: address of board private structure
 437 *
 438 * The phy may be powered down to save power and turn off link when the
 439 * driver is unloaded and wake on lan is not enabled (among others)
 440 * *** this routine MUST be followed by a call to e1000_reset ***
 441 **/
 442void e1000_power_up_phy(struct e1000_adapter *adapter)
 443{
 444        struct e1000_hw *hw = &adapter->hw;
 445        u16 mii_reg = 0;
 446
 447        /* Just clear the power down bit to wake the phy back up */
 448        if (hw->media_type == e1000_media_type_copper) {
 449                /* according to the manual, the phy will retain its
 450                 * settings across a power-down/up cycle
 451                 */
 452                e1000_read_phy_reg(hw, PHY_CTRL, &mii_reg);
 453                mii_reg &= ~MII_CR_POWER_DOWN;
 454                e1000_write_phy_reg(hw, PHY_CTRL, mii_reg);
 455        }
 456}
 457
 458static void e1000_power_down_phy(struct e1000_adapter *adapter)
 459{
 460        struct e1000_hw *hw = &adapter->hw;
 461
 462        /* Power down the PHY so no link is implied when interface is down *
 463         * The PHY cannot be powered down if any of the following is true *
 464         * (a) WoL is enabled
 465         * (b) AMT is active
 466         * (c) SoL/IDER session is active
 467         */
 468        if (!adapter->wol && hw->mac_type >= e1000_82540 &&
 469           hw->media_type == e1000_media_type_copper) {
 470                u16 mii_reg = 0;
 471
 472                switch (hw->mac_type) {
 473                case e1000_82540:
 474                case e1000_82545:
 475                case e1000_82545_rev_3:
 476                case e1000_82546:
 477                case e1000_ce4100:
 478                case e1000_82546_rev_3:
 479                case e1000_82541:
 480                case e1000_82541_rev_2:
 481                case e1000_82547:
 482                case e1000_82547_rev_2:
 483                        if (er32(MANC) & E1000_MANC_SMBUS_EN)
 484                                goto out;
 485                        break;
 486                default:
 487                        goto out;
 488                }
 489                e1000_read_phy_reg(hw, PHY_CTRL, &mii_reg);
 490                mii_reg |= MII_CR_POWER_DOWN;
 491                e1000_write_phy_reg(hw, PHY_CTRL, mii_reg);
 492                msleep(1);
 493        }
 494out:
 495        return;
 496}
 497
 498static void e1000_down_and_stop(struct e1000_adapter *adapter)
 499{
 500        set_bit(__E1000_DOWN, &adapter->flags);
 501
 502        cancel_delayed_work_sync(&adapter->watchdog_task);
 503
 504        /*
 505         * Since the watchdog task can reschedule other tasks, we should cancel
 506         * it first, otherwise we can run into the situation when a work is
 507         * still running after the adapter has been turned down.
 508         */
 509
 510        cancel_delayed_work_sync(&adapter->phy_info_task);
 511        cancel_delayed_work_sync(&adapter->fifo_stall_task);
 512
 513        /* Only kill reset task if adapter is not resetting */
 514        if (!test_bit(__E1000_RESETTING, &adapter->flags))
 515                cancel_work_sync(&adapter->reset_task);
 516}
 517
 518void e1000_down(struct e1000_adapter *adapter)
 519{
 520        struct e1000_hw *hw = &adapter->hw;
 521        struct net_device *netdev = adapter->netdev;
 522        u32 rctl, tctl;
 523
 524        /* disable receives in the hardware */
 525        rctl = er32(RCTL);
 526        ew32(RCTL, rctl & ~E1000_RCTL_EN);
 527        /* flush and sleep below */
 528
 529        netif_tx_disable(netdev);
 530
 531        /* disable transmits in the hardware */
 532        tctl = er32(TCTL);
 533        tctl &= ~E1000_TCTL_EN;
 534        ew32(TCTL, tctl);
 535        /* flush both disables and wait for them to finish */
 536        E1000_WRITE_FLUSH();
 537        msleep(10);
 538
 539        /* Set the carrier off after transmits have been disabled in the
 540         * hardware, to avoid race conditions with e1000_watchdog() (which
 541         * may be running concurrently to us, checking for the carrier
 542         * bit to decide whether it should enable transmits again). Such
 543         * a race condition would result into transmission being disabled
 544         * in the hardware until the next IFF_DOWN+IFF_UP cycle.
 545         */
 546        netif_carrier_off(netdev);
 547
 548        napi_disable(&adapter->napi);
 549
 550        e1000_irq_disable(adapter);
 551
 552        /* Setting DOWN must be after irq_disable to prevent
 553         * a screaming interrupt.  Setting DOWN also prevents
 554         * tasks from rescheduling.
 555         */
 556        e1000_down_and_stop(adapter);
 557
 558        adapter->link_speed = 0;
 559        adapter->link_duplex = 0;
 560
 561        e1000_reset(adapter);
 562        e1000_clean_all_tx_rings(adapter);
 563        e1000_clean_all_rx_rings(adapter);
 564}
 565
 566void e1000_reinit_locked(struct e1000_adapter *adapter)
 567{
 568        WARN_ON(in_interrupt());
 569        while (test_and_set_bit(__E1000_RESETTING, &adapter->flags))
 570                msleep(1);
 571        e1000_down(adapter);
 572        e1000_up(adapter);
 573        clear_bit(__E1000_RESETTING, &adapter->flags);
 574}
 575
 576void e1000_reset(struct e1000_adapter *adapter)
 577{
 578        struct e1000_hw *hw = &adapter->hw;
 579        u32 pba = 0, tx_space, min_tx_space, min_rx_space;
 580        bool legacy_pba_adjust = false;
 581        u16 hwm;
 582
 583        /* Repartition Pba for greater than 9k mtu
 584         * To take effect CTRL.RST is required.
 585         */
 586
 587        switch (hw->mac_type) {
 588        case e1000_82542_rev2_0:
 589        case e1000_82542_rev2_1:
 590        case e1000_82543:
 591        case e1000_82544:
 592        case e1000_82540:
 593        case e1000_82541:
 594        case e1000_82541_rev_2:
 595                legacy_pba_adjust = true;
 596                pba = E1000_PBA_48K;
 597                break;
 598        case e1000_82545:
 599        case e1000_82545_rev_3:
 600        case e1000_82546:
 601        case e1000_ce4100:
 602        case e1000_82546_rev_3:
 603                pba = E1000_PBA_48K;
 604                break;
 605        case e1000_82547:
 606        case e1000_82547_rev_2:
 607                legacy_pba_adjust = true;
 608                pba = E1000_PBA_30K;
 609                break;
 610        case e1000_undefined:
 611        case e1000_num_macs:
 612                break;
 613        }
 614
 615        if (legacy_pba_adjust) {
 616                if (hw->max_frame_size > E1000_RXBUFFER_8192)
 617                        pba -= 8; /* allocate more FIFO for Tx */
 618
 619                if (hw->mac_type == e1000_82547) {
 620                        adapter->tx_fifo_head = 0;
 621                        adapter->tx_head_addr = pba << E1000_TX_HEAD_ADDR_SHIFT;
 622                        adapter->tx_fifo_size =
 623                                (E1000_PBA_40K - pba) << E1000_PBA_BYTES_SHIFT;
 624                        atomic_set(&adapter->tx_fifo_stall, 0);
 625                }
 626        } else if (hw->max_frame_size >  ETH_FRAME_LEN + ETH_FCS_LEN) {
 627                /* adjust PBA for jumbo frames */
 628                ew32(PBA, pba);
 629
 630                /* To maintain wire speed transmits, the Tx FIFO should be
 631                 * large enough to accommodate two full transmit packets,
 632                 * rounded up to the next 1KB and expressed in KB.  Likewise,
 633                 * the Rx FIFO should be large enough to accommodate at least
 634                 * one full receive packet and is similarly rounded up and
 635                 * expressed in KB.
 636                 */
 637                pba = er32(PBA);
 638                /* upper 16 bits has Tx packet buffer allocation size in KB */
 639                tx_space = pba >> 16;
 640                /* lower 16 bits has Rx packet buffer allocation size in KB */
 641                pba &= 0xffff;
 642                /* the Tx fifo also stores 16 bytes of information about the Tx
 643                 * but don't include ethernet FCS because hardware appends it
 644                 */
 645                min_tx_space = (hw->max_frame_size +
 646                                sizeof(struct e1000_tx_desc) -
 647                                ETH_FCS_LEN) * 2;
 648                min_tx_space = ALIGN(min_tx_space, 1024);
 649                min_tx_space >>= 10;
 650                /* software strips receive CRC, so leave room for it */
 651                min_rx_space = hw->max_frame_size;
 652                min_rx_space = ALIGN(min_rx_space, 1024);
 653                min_rx_space >>= 10;
 654
 655                /* If current Tx allocation is less than the min Tx FIFO size,
 656                 * and the min Tx FIFO size is less than the current Rx FIFO
 657                 * allocation, take space away from current Rx allocation
 658                 */
 659                if (tx_space < min_tx_space &&
 660                    ((min_tx_space - tx_space) < pba)) {
 661                        pba = pba - (min_tx_space - tx_space);
 662
 663                        /* PCI/PCIx hardware has PBA alignment constraints */
 664                        switch (hw->mac_type) {
 665                        case e1000_82545 ... e1000_82546_rev_3:
 666                                pba &= ~(E1000_PBA_8K - 1);
 667                                break;
 668                        default:
 669                                break;
 670                        }
 671
 672                        /* if short on Rx space, Rx wins and must trump Tx
 673                         * adjustment or use Early Receive if available
 674                         */
 675                        if (pba < min_rx_space)
 676                                pba = min_rx_space;
 677                }
 678        }
 679
 680        ew32(PBA, pba);
 681
 682        /* flow control settings:
 683         * The high water mark must be low enough to fit one full frame
 684         * (or the size used for early receive) above it in the Rx FIFO.
 685         * Set it to the lower of:
 686         * - 90% of the Rx FIFO size, and
 687         * - the full Rx FIFO size minus the early receive size (for parts
 688         *   with ERT support assuming ERT set to E1000_ERT_2048), or
 689         * - the full Rx FIFO size minus one full frame
 690         */
 691        hwm = min(((pba << 10) * 9 / 10),
 692                  ((pba << 10) - hw->max_frame_size));
 693
 694        hw->fc_high_water = hwm & 0xFFF8;       /* 8-byte granularity */
 695        hw->fc_low_water = hw->fc_high_water - 8;
 696        hw->fc_pause_time = E1000_FC_PAUSE_TIME;
 697        hw->fc_send_xon = 1;
 698        hw->fc = hw->original_fc;
 699
 700        /* Allow time for pending master requests to run */
 701        e1000_reset_hw(hw);
 702        if (hw->mac_type >= e1000_82544)
 703                ew32(WUC, 0);
 704
 705        if (e1000_init_hw(hw))
 706                e_dev_err("Hardware Error\n");
 707        e1000_update_mng_vlan(adapter);
 708
 709        /* if (adapter->hwflags & HWFLAGS_PHY_PWR_BIT) { */
 710        if (hw->mac_type >= e1000_82544 &&
 711            hw->autoneg == 1 &&
 712            hw->autoneg_advertised == ADVERTISE_1000_FULL) {
 713                u32 ctrl = er32(CTRL);
 714                /* clear phy power management bit if we are in gig only mode,
 715                 * which if enabled will attempt negotiation to 100Mb, which
 716                 * can cause a loss of link at power off or driver unload
 717                 */
 718                ctrl &= ~E1000_CTRL_SWDPIN3;
 719                ew32(CTRL, ctrl);
 720        }
 721
 722        /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */
 723        ew32(VET, ETHERNET_IEEE_VLAN_TYPE);
 724
 725        e1000_reset_adaptive(hw);
 726        e1000_phy_get_info(hw, &adapter->phy_info);
 727
 728        e1000_release_manageability(adapter);
 729}
 730
 731/* Dump the eeprom for users having checksum issues */
 732static void e1000_dump_eeprom(struct e1000_adapter *adapter)
 733{
 734        struct net_device *netdev = adapter->netdev;
 735        struct ethtool_eeprom eeprom;
 736        const struct ethtool_ops *ops = netdev->ethtool_ops;
 737        u8 *data;
 738        int i;
 739        u16 csum_old, csum_new = 0;
 740
 741        eeprom.len = ops->get_eeprom_len(netdev);
 742        eeprom.offset = 0;
 743
 744        data = kmalloc(eeprom.len, GFP_KERNEL);
 745        if (!data)
 746                return;
 747
 748        ops->get_eeprom(netdev, &eeprom, data);
 749
 750        csum_old = (data[EEPROM_CHECKSUM_REG * 2]) +
 751                   (data[EEPROM_CHECKSUM_REG * 2 + 1] << 8);
 752        for (i = 0; i < EEPROM_CHECKSUM_REG * 2; i += 2)
 753                csum_new += data[i] + (data[i + 1] << 8);
 754        csum_new = EEPROM_SUM - csum_new;
 755
 756        pr_err("/*********************/\n");
 757        pr_err("Current EEPROM Checksum : 0x%04x\n", csum_old);
 758        pr_err("Calculated              : 0x%04x\n", csum_new);
 759
 760        pr_err("Offset    Values\n");
 761        pr_err("========  ======\n");
 762        print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 16, 1, data, 128, 0);
 763
 764        pr_err("Include this output when contacting your support provider.\n");
 765        pr_err("This is not a software error! Something bad happened to\n");
 766        pr_err("your hardware or EEPROM image. Ignoring this problem could\n");
 767        pr_err("result in further problems, possibly loss of data,\n");
 768        pr_err("corruption or system hangs!\n");
 769        pr_err("The MAC Address will be reset to 00:00:00:00:00:00,\n");
 770        pr_err("which is invalid and requires you to set the proper MAC\n");
 771        pr_err("address manually before continuing to enable this network\n");
 772        pr_err("device. Please inspect the EEPROM dump and report the\n");
 773        pr_err("issue to your hardware vendor or Intel Customer Support.\n");
 774        pr_err("/*********************/\n");
 775
 776        kfree(data);
 777}
 778
 779/**
 780 * e1000_is_need_ioport - determine if an adapter needs ioport resources or not
 781 * @pdev: PCI device information struct
 782 *
 783 * Return true if an adapter needs ioport resources
 784 **/
 785static int e1000_is_need_ioport(struct pci_dev *pdev)
 786{
 787        switch (pdev->device) {
 788        case E1000_DEV_ID_82540EM:
 789        case E1000_DEV_ID_82540EM_LOM:
 790        case E1000_DEV_ID_82540EP:
 791        case E1000_DEV_ID_82540EP_LOM:
 792        case E1000_DEV_ID_82540EP_LP:
 793        case E1000_DEV_ID_82541EI:
 794        case E1000_DEV_ID_82541EI_MOBILE:
 795        case E1000_DEV_ID_82541ER:
 796        case E1000_DEV_ID_82541ER_LOM:
 797        case E1000_DEV_ID_82541GI:
 798        case E1000_DEV_ID_82541GI_LF:
 799        case E1000_DEV_ID_82541GI_MOBILE:
 800        case E1000_DEV_ID_82544EI_COPPER:
 801        case E1000_DEV_ID_82544EI_FIBER:
 802        case E1000_DEV_ID_82544GC_COPPER:
 803        case E1000_DEV_ID_82544GC_LOM:
 804        case E1000_DEV_ID_82545EM_COPPER:
 805        case E1000_DEV_ID_82545EM_FIBER:
 806        case E1000_DEV_ID_82546EB_COPPER:
 807        case E1000_DEV_ID_82546EB_FIBER:
 808        case E1000_DEV_ID_82546EB_QUAD_COPPER:
 809                return true;
 810        default:
 811                return false;
 812        }
 813}
 814
 815static netdev_features_t e1000_fix_features(struct net_device *netdev,
 816        netdev_features_t features)
 817{
 818        /* Since there is no support for separate Rx/Tx vlan accel
 819         * enable/disable make sure Tx flag is always in same state as Rx.
 820         */
 821        if (features & NETIF_F_HW_VLAN_CTAG_RX)
 822                features |= NETIF_F_HW_VLAN_CTAG_TX;
 823        else
 824                features &= ~NETIF_F_HW_VLAN_CTAG_TX;
 825
 826        return features;
 827}
 828
 829static int e1000_set_features(struct net_device *netdev,
 830        netdev_features_t features)
 831{
 832        struct e1000_adapter *adapter = netdev_priv(netdev);
 833        netdev_features_t changed = features ^ netdev->features;
 834
 835        if (changed & NETIF_F_HW_VLAN_CTAG_RX)
 836                e1000_vlan_mode(netdev, features);
 837
 838        if (!(changed & (NETIF_F_RXCSUM | NETIF_F_RXALL)))
 839                return 0;
 840
 841        netdev->features = features;
 842        adapter->rx_csum = !!(features & NETIF_F_RXCSUM);
 843
 844        if (netif_running(netdev))
 845                e1000_reinit_locked(adapter);
 846        else
 847                e1000_reset(adapter);
 848
 849        return 0;
 850}
 851
 852static const struct net_device_ops e1000_netdev_ops = {
 853        .ndo_open               = e1000_open,
 854        .ndo_stop               = e1000_close,
 855        .ndo_start_xmit         = e1000_xmit_frame,
 856        .ndo_set_rx_mode        = e1000_set_rx_mode,
 857        .ndo_set_mac_address    = e1000_set_mac,
 858        .ndo_tx_timeout         = e1000_tx_timeout,
 859        .ndo_change_mtu         = e1000_change_mtu,
 860        .ndo_do_ioctl           = e1000_ioctl,
 861        .ndo_validate_addr      = eth_validate_addr,
 862        .ndo_vlan_rx_add_vid    = e1000_vlan_rx_add_vid,
 863        .ndo_vlan_rx_kill_vid   = e1000_vlan_rx_kill_vid,
 864#ifdef CONFIG_NET_POLL_CONTROLLER
 865        .ndo_poll_controller    = e1000_netpoll,
 866#endif
 867        .ndo_fix_features       = e1000_fix_features,
 868        .ndo_set_features       = e1000_set_features,
 869};
 870
 871/**
 872 * e1000_init_hw_struct - initialize members of hw struct
 873 * @adapter: board private struct
 874 * @hw: structure used by e1000_hw.c
 875 *
 876 * Factors out initialization of the e1000_hw struct to its own function
 877 * that can be called very early at init (just after struct allocation).
 878 * Fields are initialized based on PCI device information and
 879 * OS network device settings (MTU size).
 880 * Returns negative error codes if MAC type setup fails.
 881 */
 882static int e1000_init_hw_struct(struct e1000_adapter *adapter,
 883                                struct e1000_hw *hw)
 884{
 885        struct pci_dev *pdev = adapter->pdev;
 886
 887        /* PCI config space info */
 888        hw->vendor_id = pdev->vendor;
 889        hw->device_id = pdev->device;
 890        hw->subsystem_vendor_id = pdev->subsystem_vendor;
 891        hw->subsystem_id = pdev->subsystem_device;
 892        hw->revision_id = pdev->revision;
 893
 894        pci_read_config_word(pdev, PCI_COMMAND, &hw->pci_cmd_word);
 895
 896        hw->max_frame_size = adapter->netdev->mtu +
 897                             ENET_HEADER_SIZE + ETHERNET_FCS_SIZE;
 898        hw->min_frame_size = MINIMUM_ETHERNET_FRAME_SIZE;
 899
 900        /* identify the MAC */
 901        if (e1000_set_mac_type(hw)) {
 902                e_err(probe, "Unknown MAC Type\n");
 903                return -EIO;
 904        }
 905
 906        switch (hw->mac_type) {
 907        default:
 908                break;
 909        case e1000_82541:
 910        case e1000_82547:
 911        case e1000_82541_rev_2:
 912        case e1000_82547_rev_2:
 913                hw->phy_init_script = 1;
 914                break;
 915        }
 916
 917        e1000_set_media_type(hw);
 918        e1000_get_bus_info(hw);
 919
 920        hw->wait_autoneg_complete = false;
 921        hw->tbi_compatibility_en = true;
 922        hw->adaptive_ifs = true;
 923
 924        /* Copper options */
 925
 926        if (hw->media_type == e1000_media_type_copper) {
 927                hw->mdix = AUTO_ALL_MODES;
 928                hw->disable_polarity_correction = false;
 929                hw->master_slave = E1000_MASTER_SLAVE;
 930        }
 931
 932        return 0;
 933}
 934
 935/**
 936 * e1000_probe - Device Initialization Routine
 937 * @pdev: PCI device information struct
 938 * @ent: entry in e1000_pci_tbl
 939 *
 940 * Returns 0 on success, negative on failure
 941 *
 942 * e1000_probe initializes an adapter identified by a pci_dev structure.
 943 * The OS initialization, configuring of the adapter private structure,
 944 * and a hardware reset occur.
 945 **/
 946static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 947{
 948        struct net_device *netdev;
 949        struct e1000_adapter *adapter = NULL;
 950        struct e1000_hw *hw;
 951
 952        static int cards_found;
 953        static int global_quad_port_a; /* global ksp3 port a indication */
 954        int i, err, pci_using_dac;
 955        u16 eeprom_data = 0;
 956        u16 tmp = 0;
 957        u16 eeprom_apme_mask = E1000_EEPROM_APME;
 958        int bars, need_ioport;
 959        bool disable_dev = false;
 960
 961        /* do not allocate ioport bars when not needed */
 962        need_ioport = e1000_is_need_ioport(pdev);
 963        if (need_ioport) {
 964                bars = pci_select_bars(pdev, IORESOURCE_MEM | IORESOURCE_IO);
 965                err = pci_enable_device(pdev);
 966        } else {
 967                bars = pci_select_bars(pdev, IORESOURCE_MEM);
 968                err = pci_enable_device_mem(pdev);
 969        }
 970        if (err)
 971                return err;
 972
 973        err = pci_request_selected_regions(pdev, bars, e1000_driver_name);
 974        if (err)
 975                goto err_pci_reg;
 976
 977        pci_set_master(pdev);
 978        err = pci_save_state(pdev);
 979        if (err)
 980                goto err_alloc_etherdev;
 981
 982        err = -ENOMEM;
 983        netdev = alloc_etherdev(sizeof(struct e1000_adapter));
 984        if (!netdev)
 985                goto err_alloc_etherdev;
 986
 987        SET_NETDEV_DEV(netdev, &pdev->dev);
 988
 989        pci_set_drvdata(pdev, netdev);
 990        adapter = netdev_priv(netdev);
 991        adapter->netdev = netdev;
 992        adapter->pdev = pdev;
 993        adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
 994        adapter->bars = bars;
 995        adapter->need_ioport = need_ioport;
 996
 997        hw = &adapter->hw;
 998        hw->back = adapter;
 999
1000        err = -EIO;
1001        hw->hw_addr = pci_ioremap_bar(pdev, BAR_0);
1002        if (!hw->hw_addr)
1003                goto err_ioremap;
1004
1005        if (adapter->need_ioport) {
1006                for (i = BAR_1; i <= BAR_5; i++) {
1007                        if (pci_resource_len(pdev, i) == 0)
1008                                continue;
1009                        if (pci_resource_flags(pdev, i) & IORESOURCE_IO) {
1010                                hw->io_base = pci_resource_start(pdev, i);
1011                                break;
1012                        }
1013                }
1014        }
1015
1016        /* make ready for any if (hw->...) below */
1017        err = e1000_init_hw_struct(adapter, hw);
1018        if (err)
1019                goto err_sw_init;
1020
1021        /* there is a workaround being applied below that limits
1022         * 64-bit DMA addresses to 64-bit hardware.  There are some
1023         * 32-bit adapters that Tx hang when given 64-bit DMA addresses
1024         */
1025        pci_using_dac = 0;
1026        if ((hw->bus_type == e1000_bus_type_pcix) &&
1027            !dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) {
1028                pci_using_dac = 1;
1029        } else {
1030                err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
1031                if (err) {
1032                        pr_err("No usable DMA config, aborting\n");
1033                        goto err_dma;
1034                }
1035        }
1036
1037        netdev->netdev_ops = &e1000_netdev_ops;
1038        e1000_set_ethtool_ops(netdev);
1039        netdev->watchdog_timeo = 5 * HZ;
1040        netif_napi_add(netdev, &adapter->napi, e1000_clean, 64);
1041
1042        strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
1043
1044        adapter->bd_number = cards_found;
1045
1046        /* setup the private structure */
1047
1048        err = e1000_sw_init(adapter);
1049        if (err)
1050                goto err_sw_init;
1051
1052        err = -EIO;
1053        if (hw->mac_type == e1000_ce4100) {
1054                hw->ce4100_gbe_mdio_base_virt =
1055                                        ioremap(pci_resource_start(pdev, BAR_1),
1056                                                pci_resource_len(pdev, BAR_1));
1057
1058                if (!hw->ce4100_gbe_mdio_base_virt)
1059                        goto err_mdio_ioremap;
1060        }
1061
1062        if (hw->mac_type >= e1000_82543) {
1063                netdev->hw_features = NETIF_F_SG |
1064                                   NETIF_F_HW_CSUM |
1065                                   NETIF_F_HW_VLAN_CTAG_RX;
1066                netdev->features = NETIF_F_HW_VLAN_CTAG_TX |
1067                                   NETIF_F_HW_VLAN_CTAG_FILTER;
1068        }
1069
1070        if ((hw->mac_type >= e1000_82544) &&
1071           (hw->mac_type != e1000_82547))
1072                netdev->hw_features |= NETIF_F_TSO;
1073
1074        netdev->priv_flags |= IFF_SUPP_NOFCS;
1075
1076        netdev->features |= netdev->hw_features;
1077        netdev->hw_features |= (NETIF_F_RXCSUM |
1078                                NETIF_F_RXALL |
1079                                NETIF_F_RXFCS);
1080
1081        if (pci_using_dac) {
1082                netdev->features |= NETIF_F_HIGHDMA;
1083                netdev->vlan_features |= NETIF_F_HIGHDMA;
1084        }
1085
1086        netdev->vlan_features |= (NETIF_F_TSO |
1087                                  NETIF_F_HW_CSUM |
1088                                  NETIF_F_SG);
1089
1090        /* Do not set IFF_UNICAST_FLT for VMWare's 82545EM */
1091        if (hw->device_id != E1000_DEV_ID_82545EM_COPPER ||
1092            hw->subsystem_vendor_id != PCI_VENDOR_ID_VMWARE)
1093                netdev->priv_flags |= IFF_UNICAST_FLT;
1094
1095        /* MTU range: 46 - 16110 */
1096        netdev->min_mtu = ETH_ZLEN - ETH_HLEN;
1097        netdev->max_mtu = MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN);
1098
1099        adapter->en_mng_pt = e1000_enable_mng_pass_thru(hw);
1100
1101        /* initialize eeprom parameters */
1102        if (e1000_init_eeprom_params(hw)) {
1103                e_err(probe, "EEPROM initialization failed\n");
1104                goto err_eeprom;
1105        }
1106
1107        /* before reading the EEPROM, reset the controller to
1108         * put the device in a known good starting state
1109         */
1110
1111        e1000_reset_hw(hw);
1112
1113        /* make sure the EEPROM is good */
1114        if (e1000_validate_eeprom_checksum(hw) < 0) {
1115                e_err(probe, "The EEPROM Checksum Is Not Valid\n");
1116                e1000_dump_eeprom(adapter);
1117                /* set MAC address to all zeroes to invalidate and temporary
1118                 * disable this device for the user. This blocks regular
1119                 * traffic while still permitting ethtool ioctls from reaching
1120                 * the hardware as well as allowing the user to run the
1121                 * interface after manually setting a hw addr using
1122                 * `ip set address`
1123                 */
1124                memset(hw->mac_addr, 0, netdev->addr_len);
1125        } else {
1126                /* copy the MAC address out of the EEPROM */
1127                if (e1000_read_mac_addr(hw))
1128                        e_err(probe, "EEPROM Read Error\n");
1129        }
1130        /* don't block initialization here due to bad MAC address */
1131        memcpy(netdev->dev_addr, hw->mac_addr, netdev->addr_len);
1132
1133        if (!is_valid_ether_addr(netdev->dev_addr))
1134                e_err(probe, "Invalid MAC Address\n");
1135
1136
1137        INIT_DELAYED_WORK(&adapter->watchdog_task, e1000_watchdog);
1138        INIT_DELAYED_WORK(&adapter->fifo_stall_task,
1139                          e1000_82547_tx_fifo_stall_task);
1140        INIT_DELAYED_WORK(&adapter->phy_info_task, e1000_update_phy_info_task);
1141        INIT_WORK(&adapter->reset_task, e1000_reset_task);
1142
1143        e1000_check_options(adapter);
1144
1145        /* Initial Wake on LAN setting
1146         * If APM wake is enabled in the EEPROM,
1147         * enable the ACPI Magic Packet filter
1148         */
1149
1150        switch (hw->mac_type) {
1151        case e1000_82542_rev2_0:
1152        case e1000_82542_rev2_1:
1153        case e1000_82543:
1154                break;
1155        case e1000_82544:
1156                e1000_read_eeprom(hw,
1157                        EEPROM_INIT_CONTROL2_REG, 1, &eeprom_data);
1158                eeprom_apme_mask = E1000_EEPROM_82544_APM;
1159                break;
1160        case e1000_82546:
1161        case e1000_82546_rev_3:
1162                if (er32(STATUS) & E1000_STATUS_FUNC_1) {
1163                        e1000_read_eeprom(hw,
1164                                EEPROM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
1165                        break;
1166                }
1167                /* Fall Through */
1168        default:
1169                e1000_read_eeprom(hw,
1170                        EEPROM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
1171                break;
1172        }
1173        if (eeprom_data & eeprom_apme_mask)
1174                adapter->eeprom_wol |= E1000_WUFC_MAG;
1175
1176        /* now that we have the eeprom settings, apply the special cases
1177         * where the eeprom may be wrong or the board simply won't support
1178         * wake on lan on a particular port
1179         */
1180        switch (pdev->device) {
1181        case E1000_DEV_ID_82546GB_PCIE:
1182                adapter->eeprom_wol = 0;
1183                break;
1184        case E1000_DEV_ID_82546EB_FIBER:
1185        case E1000_DEV_ID_82546GB_FIBER:
1186                /* Wake events only supported on port A for dual fiber
1187                 * regardless of eeprom setting
1188                 */
1189                if (er32(STATUS) & E1000_STATUS_FUNC_1)
1190                        adapter->eeprom_wol = 0;
1191                break;
1192        case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3:
1193                /* if quad port adapter, disable WoL on all but port A */
1194                if (global_quad_port_a != 0)
1195                        adapter->eeprom_wol = 0;
1196                else
1197                        adapter->quad_port_a = true;
1198                /* Reset for multiple quad port adapters */
1199                if (++global_quad_port_a == 4)
1200                        global_quad_port_a = 0;
1201                break;
1202        }
1203
1204        /* initialize the wol settings based on the eeprom settings */
1205        adapter->wol = adapter->eeprom_wol;
1206        device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
1207
1208        /* Auto detect PHY address */
1209        if (hw->mac_type == e1000_ce4100) {
1210                for (i = 0; i < 32; i++) {
1211                        hw->phy_addr = i;
1212                        e1000_read_phy_reg(hw, PHY_ID2, &tmp);
1213
1214                        if (tmp != 0 && tmp != 0xFF)
1215                                break;
1216                }
1217
1218                if (i >= 32)
1219                        goto err_eeprom;
1220        }
1221
1222        /* reset the hardware with the new settings */
1223        e1000_reset(adapter);
1224
1225        strcpy(netdev->name, "eth%d");
1226        err = register_netdev(netdev);
1227        if (err)
1228                goto err_register;
1229
1230        e1000_vlan_filter_on_off(adapter, false);
1231
1232        /* print bus type/speed/width info */
1233        e_info(probe, "(PCI%s:%dMHz:%d-bit) %pM\n",
1234               ((hw->bus_type == e1000_bus_type_pcix) ? "-X" : ""),
1235               ((hw->bus_speed == e1000_bus_speed_133) ? 133 :
1236                (hw->bus_speed == e1000_bus_speed_120) ? 120 :
1237                (hw->bus_speed == e1000_bus_speed_100) ? 100 :
1238                (hw->bus_speed == e1000_bus_speed_66) ? 66 : 33),
1239               ((hw->bus_width == e1000_bus_width_64) ? 64 : 32),
1240               netdev->dev_addr);
1241
1242        /* carrier off reporting is important to ethtool even BEFORE open */
1243        netif_carrier_off(netdev);
1244
1245        e_info(probe, "Intel(R) PRO/1000 Network Connection\n");
1246
1247        cards_found++;
1248        return 0;
1249
1250err_register:
1251err_eeprom:
1252        e1000_phy_hw_reset(hw);
1253
1254        if (hw->flash_address)
1255                iounmap(hw->flash_address);
1256        kfree(adapter->tx_ring);
1257        kfree(adapter->rx_ring);
1258err_dma:
1259err_sw_init:
1260err_mdio_ioremap:
1261        iounmap(hw->ce4100_gbe_mdio_base_virt);
1262        iounmap(hw->hw_addr);
1263err_ioremap:
1264        disable_dev = !test_and_set_bit(__E1000_DISABLED, &adapter->flags);
1265        free_netdev(netdev);
1266err_alloc_etherdev:
1267        pci_release_selected_regions(pdev, bars);
1268err_pci_reg:
1269        if (!adapter || disable_dev)
1270                pci_disable_device(pdev);
1271        return err;
1272}
1273
1274/**
1275 * e1000_remove - Device Removal Routine
1276 * @pdev: PCI device information struct
1277 *
1278 * e1000_remove is called by the PCI subsystem to alert the driver
1279 * that it should release a PCI device. That could be caused by a
1280 * Hot-Plug event, or because the driver is going to be removed from
1281 * memory.
1282 **/
1283static void e1000_remove(struct pci_dev *pdev)
1284{
1285        struct net_device *netdev = pci_get_drvdata(pdev);
1286        struct e1000_adapter *adapter = netdev_priv(netdev);
1287        struct e1000_hw *hw = &adapter->hw;
1288        bool disable_dev;
1289
1290        e1000_down_and_stop(adapter);
1291        e1000_release_manageability(adapter);
1292
1293        unregister_netdev(netdev);
1294
1295        e1000_phy_hw_reset(hw);
1296
1297        kfree(adapter->tx_ring);
1298        kfree(adapter->rx_ring);
1299
1300        if (hw->mac_type == e1000_ce4100)
1301                iounmap(hw->ce4100_gbe_mdio_base_virt);
1302        iounmap(hw->hw_addr);
1303        if (hw->flash_address)
1304                iounmap(hw->flash_address);
1305        pci_release_selected_regions(pdev, adapter->bars);
1306
1307        disable_dev = !test_and_set_bit(__E1000_DISABLED, &adapter->flags);
1308        free_netdev(netdev);
1309
1310        if (disable_dev)
1311                pci_disable_device(pdev);
1312}
1313
1314/**
1315 * e1000_sw_init - Initialize general software structures (struct e1000_adapter)
1316 * @adapter: board private structure to initialize
1317 *
1318 * e1000_sw_init initializes the Adapter private data structure.
1319 * e1000_init_hw_struct MUST be called before this function
1320 **/
1321static int e1000_sw_init(struct e1000_adapter *adapter)
1322{
1323        adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
1324
1325        adapter->num_tx_queues = 1;
1326        adapter->num_rx_queues = 1;
1327
1328        if (e1000_alloc_queues(adapter)) {
1329                e_err(probe, "Unable to allocate memory for queues\n");
1330                return -ENOMEM;
1331        }
1332
1333        /* Explicitly disable IRQ since the NIC can be in any state. */
1334        e1000_irq_disable(adapter);
1335
1336        spin_lock_init(&adapter->stats_lock);
1337
1338        set_bit(__E1000_DOWN, &adapter->flags);
1339
1340        return 0;
1341}
1342
1343/**
1344 * e1000_alloc_queues - Allocate memory for all rings
1345 * @adapter: board private structure to initialize
1346 *
1347 * We allocate one ring per queue at run-time since we don't know the
1348 * number of queues at compile-time.
1349 **/
1350static int e1000_alloc_queues(struct e1000_adapter *adapter)
1351{
1352        adapter->tx_ring = kcalloc(adapter->num_tx_queues,
1353                                   sizeof(struct e1000_tx_ring), GFP_KERNEL);
1354        if (!adapter->tx_ring)
1355                return -ENOMEM;
1356
1357        adapter->rx_ring = kcalloc(adapter->num_rx_queues,
1358                                   sizeof(struct e1000_rx_ring), GFP_KERNEL);
1359        if (!adapter->rx_ring) {
1360                kfree(adapter->tx_ring);
1361                return -ENOMEM;
1362        }
1363
1364        return E1000_SUCCESS;
1365}
1366
1367/**
1368 * e1000_open - Called when a network interface is made active
1369 * @netdev: network interface device structure
1370 *
1371 * Returns 0 on success, negative value on failure
1372 *
1373 * The open entry point is called when a network interface is made
1374 * active by the system (IFF_UP).  At this point all resources needed
1375 * for transmit and receive operations are allocated, the interrupt
1376 * handler is registered with the OS, the watchdog task is started,
1377 * and the stack is notified that the interface is ready.
1378 **/
1379int e1000_open(struct net_device *netdev)
1380{
1381        struct e1000_adapter *adapter = netdev_priv(netdev);
1382        struct e1000_hw *hw = &adapter->hw;
1383        int err;
1384
1385        /* disallow open during test */
1386        if (test_bit(__E1000_TESTING, &adapter->flags))
1387                return -EBUSY;
1388
1389        netif_carrier_off(netdev);
1390
1391        /* allocate transmit descriptors */
1392        err = e1000_setup_all_tx_resources(adapter);
1393        if (err)
1394                goto err_setup_tx;
1395
1396        /* allocate receive descriptors */
1397        err = e1000_setup_all_rx_resources(adapter);
1398        if (err)
1399                goto err_setup_rx;
1400
1401        e1000_power_up_phy(adapter);
1402
1403        adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
1404        if ((hw->mng_cookie.status &
1405                          E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT)) {
1406                e1000_update_mng_vlan(adapter);
1407        }
1408
1409        /* before we allocate an interrupt, we must be ready to handle it.
1410         * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt
1411         * as soon as we call pci_request_irq, so we have to setup our
1412         * clean_rx handler before we do so.
1413         */
1414        e1000_configure(adapter);
1415
1416        err = e1000_request_irq(adapter);
1417        if (err)
1418                goto err_req_irq;
1419
1420        /* From here on the code is the same as e1000_up() */
1421        clear_bit(__E1000_DOWN, &adapter->flags);
1422
1423        napi_enable(&adapter->napi);
1424
1425        e1000_irq_enable(adapter);
1426
1427        netif_start_queue(netdev);
1428
1429        /* fire a link status change interrupt to start the watchdog */
1430        ew32(ICS, E1000_ICS_LSC);
1431
1432        return E1000_SUCCESS;
1433
1434err_req_irq:
1435        e1000_power_down_phy(adapter);
1436        e1000_free_all_rx_resources(adapter);
1437err_setup_rx:
1438        e1000_free_all_tx_resources(adapter);
1439err_setup_tx:
1440        e1000_reset(adapter);
1441
1442        return err;
1443}
1444
1445/**
1446 * e1000_close - Disables a network interface
1447 * @netdev: network interface device structure
1448 *
1449 * Returns 0, this is not allowed to fail
1450 *
1451 * The close entry point is called when an interface is de-activated
1452 * by the OS.  The hardware is still under the drivers control, but
1453 * needs to be disabled.  A global MAC reset is issued to stop the
1454 * hardware, and all transmit and receive resources are freed.
1455 **/
1456int e1000_close(struct net_device *netdev)
1457{
1458        struct e1000_adapter *adapter = netdev_priv(netdev);
1459        struct e1000_hw *hw = &adapter->hw;
1460        int count = E1000_CHECK_RESET_COUNT;
1461
1462        while (test_bit(__E1000_RESETTING, &adapter->flags) && count--)
1463                usleep_range(10000, 20000);
1464
1465        WARN_ON(test_bit(__E1000_RESETTING, &adapter->flags));
1466        e1000_down(adapter);
1467        e1000_power_down_phy(adapter);
1468        e1000_free_irq(adapter);
1469
1470        e1000_free_all_tx_resources(adapter);
1471        e1000_free_all_rx_resources(adapter);
1472
1473        /* kill manageability vlan ID if supported, but not if a vlan with
1474         * the same ID is registered on the host OS (let 8021q kill it)
1475         */
1476        if ((hw->mng_cookie.status &
1477             E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) &&
1478            !test_bit(adapter->mng_vlan_id, adapter->active_vlans)) {
1479                e1000_vlan_rx_kill_vid(netdev, htons(ETH_P_8021Q),
1480                                       adapter->mng_vlan_id);
1481        }
1482
1483        return 0;
1484}
1485
1486/**
1487 * e1000_check_64k_bound - check that memory doesn't cross 64kB boundary
1488 * @adapter: address of board private structure
1489 * @start: address of beginning of memory
1490 * @len: length of memory
1491 **/
1492static bool e1000_check_64k_bound(struct e1000_adapter *adapter, void *start,
1493                                  unsigned long len)
1494{
1495        struct e1000_hw *hw = &adapter->hw;
1496        unsigned long begin = (unsigned long)start;
1497        unsigned long end = begin + len;
1498
1499        /* First rev 82545 and 82546 need to not allow any memory
1500         * write location to cross 64k boundary due to errata 23
1501         */
1502        if (hw->mac_type == e1000_82545 ||
1503            hw->mac_type == e1000_ce4100 ||
1504            hw->mac_type == e1000_82546) {
1505                return ((begin ^ (end - 1)) >> 16) != 0 ? false : true;
1506        }
1507
1508        return true;
1509}
1510
1511/**
1512 * e1000_setup_tx_resources - allocate Tx resources (Descriptors)
1513 * @adapter: board private structure
1514 * @txdr:    tx descriptor ring (for a specific queue) to setup
1515 *
1516 * Return 0 on success, negative on failure
1517 **/
1518static int e1000_setup_tx_resources(struct e1000_adapter *adapter,
1519                                    struct e1000_tx_ring *txdr)
1520{
1521        struct pci_dev *pdev = adapter->pdev;
1522        int size;
1523
1524        size = sizeof(struct e1000_tx_buffer) * txdr->count;
1525        txdr->buffer_info = vzalloc(size);
1526        if (!txdr->buffer_info)
1527                return -ENOMEM;
1528
1529        /* round up to nearest 4K */
1530
1531        txdr->size = txdr->count * sizeof(struct e1000_tx_desc);
1532        txdr->size = ALIGN(txdr->size, 4096);
1533
1534        txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size, &txdr->dma,
1535                                        GFP_KERNEL);
1536        if (!txdr->desc) {
1537setup_tx_desc_die:
1538                vfree(txdr->buffer_info);
1539                return -ENOMEM;
1540        }
1541
1542        /* Fix for errata 23, can't cross 64kB boundary */
1543        if (!e1000_check_64k_bound(adapter, txdr->desc, txdr->size)) {
1544                void *olddesc = txdr->desc;
1545                dma_addr_t olddma = txdr->dma;
1546                e_err(tx_err, "txdr align check failed: %u bytes at %p\n",
1547                      txdr->size, txdr->desc);
1548                /* Try again, without freeing the previous */
1549                txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size,
1550                                                &txdr->dma, GFP_KERNEL);
1551                /* Failed allocation, critical failure */
1552                if (!txdr->desc) {
1553                        dma_free_coherent(&pdev->dev, txdr->size, olddesc,
1554                                          olddma);
1555                        goto setup_tx_desc_die;
1556                }
1557
1558                if (!e1000_check_64k_bound(adapter, txdr->desc, txdr->size)) {
1559                        /* give up */
1560                        dma_free_coherent(&pdev->dev, txdr->size, txdr->desc,
1561                                          txdr->dma);
1562                        dma_free_coherent(&pdev->dev, txdr->size, olddesc,
1563                                          olddma);
1564                        e_err(probe, "Unable to allocate aligned memory "
1565                              "for the transmit descriptor ring\n");
1566                        vfree(txdr->buffer_info);
1567                        return -ENOMEM;
1568                } else {
1569                        /* Free old allocation, new allocation was successful */
1570                        dma_free_coherent(&pdev->dev, txdr->size, olddesc,
1571                                          olddma);
1572                }
1573        }
1574        memset(txdr->desc, 0, txdr->size);
1575
1576        txdr->next_to_use = 0;
1577        txdr->next_to_clean = 0;
1578
1579        return 0;
1580}
1581
1582/**
1583 * e1000_setup_all_tx_resources - wrapper to allocate Tx resources
1584 *                                (Descriptors) for all queues
1585 * @adapter: board private structure
1586 *
1587 * Return 0 on success, negative on failure
1588 **/
1589int e1000_setup_all_tx_resources(struct e1000_adapter *adapter)
1590{
1591        int i, err = 0;
1592
1593        for (i = 0; i < adapter->num_tx_queues; i++) {
1594                err = e1000_setup_tx_resources(adapter, &adapter->tx_ring[i]);
1595                if (err) {
1596                        e_err(probe, "Allocation for Tx Queue %u failed\n", i);
1597                        for (i-- ; i >= 0; i--)
1598                                e1000_free_tx_resources(adapter,
1599                                                        &adapter->tx_ring[i]);
1600                        break;
1601                }
1602        }
1603
1604        return err;
1605}
1606
1607/**
1608 * e1000_configure_tx - Configure 8254x Transmit Unit after Reset
1609 * @adapter: board private structure
1610 *
1611 * Configure the Tx unit of the MAC after a reset.
1612 **/
1613static void e1000_configure_tx(struct e1000_adapter *adapter)
1614{
1615        u64 tdba;
1616        struct e1000_hw *hw = &adapter->hw;
1617        u32 tdlen, tctl, tipg;
1618        u32 ipgr1, ipgr2;
1619
1620        /* Setup the HW Tx Head and Tail descriptor pointers */
1621
1622        switch (adapter->num_tx_queues) {
1623        case 1:
1624        default:
1625                tdba = adapter->tx_ring[0].dma;
1626                tdlen = adapter->tx_ring[0].count *
1627                        sizeof(struct e1000_tx_desc);
1628                ew32(TDLEN, tdlen);
1629                ew32(TDBAH, (tdba >> 32));
1630                ew32(TDBAL, (tdba & 0x00000000ffffffffULL));
1631                ew32(TDT, 0);
1632                ew32(TDH, 0);
1633                adapter->tx_ring[0].tdh = ((hw->mac_type >= e1000_82543) ?
1634                                           E1000_TDH : E1000_82542_TDH);
1635                adapter->tx_ring[0].tdt = ((hw->mac_type >= e1000_82543) ?
1636                                           E1000_TDT : E1000_82542_TDT);
1637                break;
1638        }
1639
1640        /* Set the default values for the Tx Inter Packet Gap timer */
1641        if ((hw->media_type == e1000_media_type_fiber ||
1642             hw->media_type == e1000_media_type_internal_serdes))
1643                tipg = DEFAULT_82543_TIPG_IPGT_FIBER;
1644        else
1645                tipg = DEFAULT_82543_TIPG_IPGT_COPPER;
1646
1647        switch (hw->mac_type) {
1648        case e1000_82542_rev2_0:
1649        case e1000_82542_rev2_1:
1650                tipg = DEFAULT_82542_TIPG_IPGT;
1651                ipgr1 = DEFAULT_82542_TIPG_IPGR1;
1652                ipgr2 = DEFAULT_82542_TIPG_IPGR2;
1653                break;
1654        default:
1655                ipgr1 = DEFAULT_82543_TIPG_IPGR1;
1656                ipgr2 = DEFAULT_82543_TIPG_IPGR2;
1657                break;
1658        }
1659        tipg |= ipgr1 << E1000_TIPG_IPGR1_SHIFT;
1660        tipg |= ipgr2 << E1000_TIPG_IPGR2_SHIFT;
1661        ew32(TIPG, tipg);
1662
1663        /* Set the Tx Interrupt Delay register */
1664
1665        ew32(TIDV, adapter->tx_int_delay);
1666        if (hw->mac_type >= e1000_82540)
1667                ew32(TADV, adapter->tx_abs_int_delay);
1668
1669        /* Program the Transmit Control Register */
1670
1671        tctl = er32(TCTL);
1672        tctl &= ~E1000_TCTL_CT;
1673        tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC |
1674                (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
1675
1676        e1000_config_collision_dist(hw);
1677
1678        /* Setup Transmit Descriptor Settings for eop descriptor */
1679        adapter->txd_cmd = E1000_TXD_CMD_EOP | E1000_TXD_CMD_IFCS;
1680
1681        /* only set IDE if we are delaying interrupts using the timers */
1682        if (adapter->tx_int_delay)
1683                adapter->txd_cmd |= E1000_TXD_CMD_IDE;
1684
1685        if (hw->mac_type < e1000_82543)
1686                adapter->txd_cmd |= E1000_TXD_CMD_RPS;
1687        else
1688                adapter->txd_cmd |= E1000_TXD_CMD_RS;
1689
1690        /* Cache if we're 82544 running in PCI-X because we'll
1691         * need this to apply a workaround later in the send path.
1692         */
1693        if (hw->mac_type == e1000_82544 &&
1694            hw->bus_type == e1000_bus_type_pcix)
1695                adapter->pcix_82544 = true;
1696
1697        ew32(TCTL, tctl);
1698
1699}
1700
1701/**
1702 * e1000_setup_rx_resources - allocate Rx resources (Descriptors)
1703 * @adapter: board private structure
1704 * @rxdr:    rx descriptor ring (for a specific queue) to setup
1705 *
1706 * Returns 0 on success, negative on failure
1707 **/
1708static int e1000_setup_rx_resources(struct e1000_adapter *adapter,
1709                                    struct e1000_rx_ring *rxdr)
1710{
1711        struct pci_dev *pdev = adapter->pdev;
1712        int size, desc_len;
1713
1714        size = sizeof(struct e1000_rx_buffer) * rxdr->count;
1715        rxdr->buffer_info = vzalloc(size);
1716        if (!rxdr->buffer_info)
1717                return -ENOMEM;
1718
1719        desc_len = sizeof(struct e1000_rx_desc);
1720
1721        /* Round up to nearest 4K */
1722
1723        rxdr->size = rxdr->count * desc_len;
1724        rxdr->size = ALIGN(rxdr->size, 4096);
1725
1726        rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma,
1727                                        GFP_KERNEL);
1728        if (!rxdr->desc) {
1729setup_rx_desc_die:
1730                vfree(rxdr->buffer_info);
1731                return -ENOMEM;
1732        }
1733
1734        /* Fix for errata 23, can't cross 64kB boundary */
1735        if (!e1000_check_64k_bound(adapter, rxdr->desc, rxdr->size)) {
1736                void *olddesc = rxdr->desc;
1737                dma_addr_t olddma = rxdr->dma;
1738                e_err(rx_err, "rxdr align check failed: %u bytes at %p\n",
1739                      rxdr->size, rxdr->desc);
1740                /* Try again, without freeing the previous */
1741                rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size,
1742                                                &rxdr->dma, GFP_KERNEL);
1743                /* Failed allocation, critical failure */
1744                if (!rxdr->desc) {
1745                        dma_free_coherent(&pdev->dev, rxdr->size, olddesc,
1746                                          olddma);
1747                        goto setup_rx_desc_die;
1748                }
1749
1750                if (!e1000_check_64k_bound(adapter, rxdr->desc, rxdr->size)) {
1751                        /* give up */
1752                        dma_free_coherent(&pdev->dev, rxdr->size, rxdr->desc,
1753                                          rxdr->dma);
1754                        dma_free_coherent(&pdev->dev, rxdr->size, olddesc,
1755                                          olddma);
1756                        e_err(probe, "Unable to allocate aligned memory for "
1757                              "the Rx descriptor ring\n");
1758                        goto setup_rx_desc_die;
1759                } else {
1760                        /* Free old allocation, new allocation was successful */
1761                        dma_free_coherent(&pdev->dev, rxdr->size, olddesc,
1762                                          olddma);
1763                }
1764        }
1765        memset(rxdr->desc, 0, rxdr->size);
1766
1767        rxdr->next_to_clean = 0;
1768        rxdr->next_to_use = 0;
1769        rxdr->rx_skb_top = NULL;
1770
1771        return 0;
1772}
1773
1774/**
1775 * e1000_setup_all_rx_resources - wrapper to allocate Rx resources
1776 *                                (Descriptors) for all queues
1777 * @adapter: board private structure
1778 *
1779 * Return 0 on success, negative on failure
1780 **/
1781int e1000_setup_all_rx_resources(struct e1000_adapter *adapter)
1782{
1783        int i, err = 0;
1784
1785        for (i = 0; i < adapter->num_rx_queues; i++) {
1786                err = e1000_setup_rx_resources(adapter, &adapter->rx_ring[i]);
1787                if (err) {
1788                        e_err(probe, "Allocation for Rx Queue %u failed\n", i);
1789                        for (i-- ; i >= 0; i--)
1790                                e1000_free_rx_resources(adapter,
1791                                                        &adapter->rx_ring[i]);
1792                        break;
1793                }
1794        }
1795
1796        return err;
1797}
1798
1799/**
1800 * e1000_setup_rctl - configure the receive control registers
1801 * @adapter: Board private structure
1802 **/
1803static void e1000_setup_rctl(struct e1000_adapter *adapter)
1804{
1805        struct e1000_hw *hw = &adapter->hw;
1806        u32 rctl;
1807
1808        rctl = er32(RCTL);
1809
1810        rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
1811
1812        rctl |= E1000_RCTL_BAM | E1000_RCTL_LBM_NO |
1813                E1000_RCTL_RDMTS_HALF |
1814                (hw->mc_filter_type << E1000_RCTL_MO_SHIFT);
1815
1816        if (hw->tbi_compatibility_on == 1)
1817                rctl |= E1000_RCTL_SBP;
1818        else
1819                rctl &= ~E1000_RCTL_SBP;
1820
1821        if (adapter->netdev->mtu <= ETH_DATA_LEN)
1822                rctl &= ~E1000_RCTL_LPE;
1823        else
1824                rctl |= E1000_RCTL_LPE;
1825
1826        /* Setup buffer sizes */
1827        rctl &= ~E1000_RCTL_SZ_4096;
1828        rctl |= E1000_RCTL_BSEX;
1829        switch (adapter->rx_buffer_len) {
1830        case E1000_RXBUFFER_2048:
1831        default:
1832                rctl |= E1000_RCTL_SZ_2048;
1833                rctl &= ~E1000_RCTL_BSEX;
1834                break;
1835        case E1000_RXBUFFER_4096:
1836                rctl |= E1000_RCTL_SZ_4096;
1837                break;
1838        case E1000_RXBUFFER_8192:
1839                rctl |= E1000_RCTL_SZ_8192;
1840                break;
1841        case E1000_RXBUFFER_16384:
1842                rctl |= E1000_RCTL_SZ_16384;
1843                break;
1844        }
1845
1846        /* This is useful for sniffing bad packets. */
1847        if (adapter->netdev->features & NETIF_F_RXALL) {
1848                /* UPE and MPE will be handled by normal PROMISC logic
1849                 * in e1000e_set_rx_mode
1850                 */
1851                rctl |= (E1000_RCTL_SBP | /* Receive bad packets */
1852                         E1000_RCTL_BAM | /* RX All Bcast Pkts */
1853                         E1000_RCTL_PMCF); /* RX All MAC Ctrl Pkts */
1854
1855                rctl &= ~(E1000_RCTL_VFE | /* Disable VLAN filter */
1856                          E1000_RCTL_DPF | /* Allow filtered pause */
1857                          E1000_RCTL_CFIEN); /* Dis VLAN CFIEN Filter */
1858                /* Do not mess with E1000_CTRL_VME, it affects transmit as well,
1859                 * and that breaks VLANs.
1860                 */
1861        }
1862
1863        ew32(RCTL, rctl);
1864}
1865
1866/**
1867 * e1000_configure_rx - Configure 8254x Receive Unit after Reset
1868 * @adapter: board private structure
1869 *
1870 * Configure the Rx unit of the MAC after a reset.
1871 **/
1872static void e1000_configure_rx(struct e1000_adapter *adapter)
1873{
1874        u64 rdba;
1875        struct e1000_hw *hw = &adapter->hw;
1876        u32 rdlen, rctl, rxcsum;
1877
1878        if (adapter->netdev->mtu > ETH_DATA_LEN) {
1879                rdlen = adapter->rx_ring[0].count *
1880                        sizeof(struct e1000_rx_desc);
1881                adapter->clean_rx = e1000_clean_jumbo_rx_irq;
1882                adapter->alloc_rx_buf = e1000_alloc_jumbo_rx_buffers;
1883        } else {
1884                rdlen = adapter->rx_ring[0].count *
1885                        sizeof(struct e1000_rx_desc);
1886                adapter->clean_rx = e1000_clean_rx_irq;
1887                adapter->alloc_rx_buf = e1000_alloc_rx_buffers;
1888        }
1889
1890        /* disable receives while setting up the descriptors */
1891        rctl = er32(RCTL);
1892        ew32(RCTL, rctl & ~E1000_RCTL_EN);
1893
1894        /* set the Receive Delay Timer Register */
1895        ew32(RDTR, adapter->rx_int_delay);
1896
1897        if (hw->mac_type >= e1000_82540) {
1898                ew32(RADV, adapter->rx_abs_int_delay);
1899                if (adapter->itr_setting != 0)
1900                        ew32(ITR, 1000000000 / (adapter->itr * 256));
1901        }
1902
1903        /* Setup the HW Rx Head and Tail Descriptor Pointers and
1904         * the Base and Length of the Rx Descriptor Ring
1905         */
1906        switch (adapter->num_rx_queues) {
1907        case 1:
1908        default:
1909                rdba = adapter->rx_ring[0].dma;
1910                ew32(RDLEN, rdlen);
1911                ew32(RDBAH, (rdba >> 32));
1912                ew32(RDBAL, (rdba & 0x00000000ffffffffULL));
1913                ew32(RDT, 0);
1914                ew32(RDH, 0);
1915                adapter->rx_ring[0].rdh = ((hw->mac_type >= e1000_82543) ?
1916                                           E1000_RDH : E1000_82542_RDH);
1917                adapter->rx_ring[0].rdt = ((hw->mac_type >= e1000_82543) ?
1918                                           E1000_RDT : E1000_82542_RDT);
1919                break;
1920        }
1921
1922        /* Enable 82543 Receive Checksum Offload for TCP and UDP */
1923        if (hw->mac_type >= e1000_82543) {
1924                rxcsum = er32(RXCSUM);
1925                if (adapter->rx_csum)
1926                        rxcsum |= E1000_RXCSUM_TUOFL;
1927                else
1928                        /* don't need to clear IPPCSE as it defaults to 0 */
1929                        rxcsum &= ~E1000_RXCSUM_TUOFL;
1930                ew32(RXCSUM, rxcsum);
1931        }
1932
1933        /* Enable Receives */
1934        ew32(RCTL, rctl | E1000_RCTL_EN);
1935}
1936
1937/**
1938 * e1000_free_tx_resources - Free Tx Resources per Queue
1939 * @adapter: board private structure
1940 * @tx_ring: Tx descriptor ring for a specific queue
1941 *
1942 * Free all transmit software resources
1943 **/
1944static void e1000_free_tx_resources(struct e1000_adapter *adapter,
1945                                    struct e1000_tx_ring *tx_ring)
1946{
1947        struct pci_dev *pdev = adapter->pdev;
1948
1949        e1000_clean_tx_ring(adapter, tx_ring);
1950
1951        vfree(tx_ring->buffer_info);
1952        tx_ring->buffer_info = NULL;
1953
1954        dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc,
1955                          tx_ring->dma);
1956
1957        tx_ring->desc = NULL;
1958}
1959
1960/**
1961 * e1000_free_all_tx_resources - Free Tx Resources for All Queues
1962 * @adapter: board private structure
1963 *
1964 * Free all transmit software resources
1965 **/
1966void e1000_free_all_tx_resources(struct e1000_adapter *adapter)
1967{
1968        int i;
1969
1970        for (i = 0; i < adapter->num_tx_queues; i++)
1971                e1000_free_tx_resources(adapter, &adapter->tx_ring[i]);
1972}
1973
1974static void
1975e1000_unmap_and_free_tx_resource(struct e1000_adapter *adapter,
1976                                 struct e1000_tx_buffer *buffer_info)
1977{
1978        if (buffer_info->dma) {
1979                if (buffer_info->mapped_as_page)
1980                        dma_unmap_page(&adapter->pdev->dev, buffer_info->dma,
1981                                       buffer_info->length, DMA_TO_DEVICE);
1982                else
1983                        dma_unmap_single(&adapter->pdev->dev, buffer_info->dma,
1984                                         buffer_info->length,
1985                                         DMA_TO_DEVICE);
1986                buffer_info->dma = 0;
1987        }
1988        if (buffer_info->skb) {
1989                dev_kfree_skb_any(buffer_info->skb);
1990                buffer_info->skb = NULL;
1991        }
1992        buffer_info->time_stamp = 0;
1993        /* buffer_info must be completely set up in the transmit path */
1994}
1995
1996/**
1997 * e1000_clean_tx_ring - Free Tx Buffers
1998 * @adapter: board private structure
1999 * @tx_ring: ring to be cleaned
2000 **/
2001static void e1000_clean_tx_ring(struct e1000_adapter *adapter,
2002                                struct e1000_tx_ring *tx_ring)
2003{
2004        struct e1000_hw *hw = &adapter->hw;
2005        struct e1000_tx_buffer *buffer_info;
2006        unsigned long size;
2007        unsigned int i;
2008
2009        /* Free all the Tx ring sk_buffs */
2010
2011        for (i = 0; i < tx_ring->count; i++) {
2012                buffer_info = &tx_ring->buffer_info[i];
2013                e1000_unmap_and_free_tx_resource(adapter, buffer_info);
2014        }
2015
2016        netdev_reset_queue(adapter->netdev);
2017        size = sizeof(struct e1000_tx_buffer) * tx_ring->count;
2018        memset(tx_ring->buffer_info, 0, size);
2019
2020        /* Zero out the descriptor ring */
2021
2022        memset(tx_ring->desc, 0, tx_ring->size);
2023
2024        tx_ring->next_to_use = 0;
2025        tx_ring->next_to_clean = 0;
2026        tx_ring->last_tx_tso = false;
2027
2028        writel(0, hw->hw_addr + tx_ring->tdh);
2029        writel(0, hw->hw_addr + tx_ring->tdt);
2030}
2031
2032/**
2033 * e1000_clean_all_tx_rings - Free Tx Buffers for all queues
2034 * @adapter: board private structure
2035 **/
2036static void e1000_clean_all_tx_rings(struct e1000_adapter *adapter)
2037{
2038        int i;
2039
2040        for (i = 0; i < adapter->num_tx_queues; i++)
2041                e1000_clean_tx_ring(adapter, &adapter->tx_ring[i]);
2042}
2043
2044/**
2045 * e1000_free_rx_resources - Free Rx Resources
2046 * @adapter: board private structure
2047 * @rx_ring: ring to clean the resources from
2048 *
2049 * Free all receive software resources
2050 **/
2051static void e1000_free_rx_resources(struct e1000_adapter *adapter,
2052                                    struct e1000_rx_ring *rx_ring)
2053{
2054        struct pci_dev *pdev = adapter->pdev;
2055
2056        e1000_clean_rx_ring(adapter, rx_ring);
2057
2058        vfree(rx_ring->buffer_info);
2059        rx_ring->buffer_info = NULL;
2060
2061        dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc,
2062                          rx_ring->dma);
2063
2064        rx_ring->desc = NULL;
2065}
2066
2067/**
2068 * e1000_free_all_rx_resources - Free Rx Resources for All Queues
2069 * @adapter: board private structure
2070 *
2071 * Free all receive software resources
2072 **/
2073void e1000_free_all_rx_resources(struct e1000_adapter *adapter)
2074{
2075        int i;
2076
2077        for (i = 0; i < adapter->num_rx_queues; i++)
2078                e1000_free_rx_resources(adapter, &adapter->rx_ring[i]);
2079}
2080
2081#define E1000_HEADROOM (NET_SKB_PAD + NET_IP_ALIGN)
2082static unsigned int e1000_frag_len(const struct e1000_adapter *a)
2083{
2084        return SKB_DATA_ALIGN(a->rx_buffer_len + E1000_HEADROOM) +
2085                SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
2086}
2087
2088static void *e1000_alloc_frag(const struct e1000_adapter *a)
2089{
2090        unsigned int len = e1000_frag_len(a);
2091        u8 *data = netdev_alloc_frag(len);
2092
2093        if (likely(data))
2094                data += E1000_HEADROOM;
2095        return data;
2096}
2097
2098/**
2099 * e1000_clean_rx_ring - Free Rx Buffers per Queue
2100 * @adapter: board private structure
2101 * @rx_ring: ring to free buffers from
2102 **/
2103static void e1000_clean_rx_ring(struct e1000_adapter *adapter,
2104                                struct e1000_rx_ring *rx_ring)
2105{
2106        struct e1000_hw *hw = &adapter->hw;
2107        struct e1000_rx_buffer *buffer_info;
2108        struct pci_dev *pdev = adapter->pdev;
2109        unsigned long size;
2110        unsigned int i;
2111
2112        /* Free all the Rx netfrags */
2113        for (i = 0; i < rx_ring->count; i++) {
2114                buffer_info = &rx_ring->buffer_info[i];
2115                if (adapter->clean_rx == e1000_clean_rx_irq) {
2116                        if (buffer_info->dma)
2117                                dma_unmap_single(&pdev->dev, buffer_info->dma,
2118                                                 adapter->rx_buffer_len,
2119                                                 DMA_FROM_DEVICE);
2120                        if (buffer_info->rxbuf.data) {
2121                                skb_free_frag(buffer_info->rxbuf.data);
2122                                buffer_info->rxbuf.data = NULL;
2123                        }
2124                } else if (adapter->clean_rx == e1000_clean_jumbo_rx_irq) {
2125                        if (buffer_info->dma)
2126                                dma_unmap_page(&pdev->dev, buffer_info->dma,
2127                                               adapter->rx_buffer_len,
2128                                               DMA_FROM_DEVICE);
2129                        if (buffer_info->rxbuf.page) {
2130                                put_page(buffer_info->rxbuf.page);
2131                                buffer_info->rxbuf.page = NULL;
2132                        }
2133                }
2134
2135                buffer_info->dma = 0;
2136        }
2137
2138        /* there also may be some cached data from a chained receive */
2139        napi_free_frags(&adapter->napi);
2140        rx_ring->rx_skb_top = NULL;
2141
2142        size = sizeof(struct e1000_rx_buffer) * rx_ring->count;
2143        memset(rx_ring->buffer_info, 0, size);
2144
2145        /* Zero out the descriptor ring */
2146        memset(rx_ring->desc, 0, rx_ring->size);
2147
2148        rx_ring->next_to_clean = 0;
2149        rx_ring->next_to_use = 0;
2150
2151        writel(0, hw->hw_addr + rx_ring->rdh);
2152        writel(0, hw->hw_addr + rx_ring->rdt);
2153}
2154
2155/**
2156 * e1000_clean_all_rx_rings - Free Rx Buffers for all queues
2157 * @adapter: board private structure
2158 **/
2159static void e1000_clean_all_rx_rings(struct e1000_adapter *adapter)
2160{
2161        int i;
2162
2163        for (i = 0; i < adapter->num_rx_queues; i++)
2164                e1000_clean_rx_ring(adapter, &adapter->rx_ring[i]);
2165}
2166
2167/* The 82542 2.0 (revision 2) needs to have the receive unit in reset
2168 * and memory write and invalidate disabled for certain operations
2169 */
2170static void e1000_enter_82542_rst(struct e1000_adapter *adapter)
2171{
2172        struct e1000_hw *hw = &adapter->hw;
2173        struct net_device *netdev = adapter->netdev;
2174        u32 rctl;
2175
2176        e1000_pci_clear_mwi(hw);
2177
2178        rctl = er32(RCTL);
2179        rctl |= E1000_RCTL_RST;
2180        ew32(RCTL, rctl);
2181        E1000_WRITE_FLUSH();
2182        mdelay(5);
2183
2184        if (netif_running(netdev))
2185                e1000_clean_all_rx_rings(adapter);
2186}
2187
2188static void e1000_leave_82542_rst(struct e1000_adapter *adapter)
2189{
2190        struct e1000_hw *hw = &adapter->hw;
2191        struct net_device *netdev = adapter->netdev;
2192        u32 rctl;
2193
2194        rctl = er32(RCTL);
2195        rctl &= ~E1000_RCTL_RST;
2196        ew32(RCTL, rctl);
2197        E1000_WRITE_FLUSH();
2198        mdelay(5);
2199
2200        if (hw->pci_cmd_word & PCI_COMMAND_INVALIDATE)
2201                e1000_pci_set_mwi(hw);
2202
2203        if (netif_running(netdev)) {
2204                /* No need to loop, because 82542 supports only 1 queue */
2205                struct e1000_rx_ring *ring = &adapter->rx_ring[0];
2206                e1000_configure_rx(adapter);
2207                adapter->alloc_rx_buf(adapter, ring, E1000_DESC_UNUSED(ring));
2208        }
2209}
2210
2211/**
2212 * e1000_set_mac - Change the Ethernet Address of the NIC
2213 * @netdev: network interface device structure
2214 * @p: pointer to an address structure
2215 *
2216 * Returns 0 on success, negative on failure
2217 **/
2218static int e1000_set_mac(struct net_device *netdev, void *p)
2219{
2220        struct e1000_adapter *adapter = netdev_priv(netdev);
2221        struct e1000_hw *hw = &adapter->hw;
2222        struct sockaddr *addr = p;
2223
2224        if (!is_valid_ether_addr(addr->sa_data))
2225                return -EADDRNOTAVAIL;
2226
2227        /* 82542 2.0 needs to be in reset to write receive address registers */
2228
2229        if (hw->mac_type == e1000_82542_rev2_0)
2230                e1000_enter_82542_rst(adapter);
2231
2232        memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
2233        memcpy(hw->mac_addr, addr->sa_data, netdev->addr_len);
2234
2235        e1000_rar_set(hw, hw->mac_addr, 0);
2236
2237        if (hw->mac_type == e1000_82542_rev2_0)
2238                e1000_leave_82542_rst(adapter);
2239
2240        return 0;
2241}
2242
2243/**
2244 * e1000_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set
2245 * @netdev: network interface device structure
2246 *
2247 * The set_rx_mode entry point is called whenever the unicast or multicast
2248 * address lists or the network interface flags are updated. This routine is
2249 * responsible for configuring the hardware for proper unicast, multicast,
2250 * promiscuous mode, and all-multi behavior.
2251 **/
2252static void e1000_set_rx_mode(struct net_device *netdev)
2253{
2254        struct e1000_adapter *adapter = netdev_priv(netdev);
2255        struct e1000_hw *hw = &adapter->hw;
2256        struct netdev_hw_addr *ha;
2257        bool use_uc = false;
2258        u32 rctl;
2259        u32 hash_value;
2260        int i, rar_entries = E1000_RAR_ENTRIES;
2261        int mta_reg_count = E1000_NUM_MTA_REGISTERS;
2262        u32 *mcarray = kcalloc(mta_reg_count, sizeof(u32), GFP_ATOMIC);
2263
2264        if (!mcarray)
2265                return;
2266
2267        /* Check for Promiscuous and All Multicast modes */
2268
2269        rctl = er32(RCTL);
2270
2271        if (netdev->flags & IFF_PROMISC) {
2272                rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
2273                rctl &= ~E1000_RCTL_VFE;
2274        } else {
2275                if (netdev->flags & IFF_ALLMULTI)
2276                        rctl |= E1000_RCTL_MPE;
2277                else
2278                        rctl &= ~E1000_RCTL_MPE;
2279                /* Enable VLAN filter if there is a VLAN */
2280                if (e1000_vlan_used(adapter))
2281                        rctl |= E1000_RCTL_VFE;
2282        }
2283
2284        if (netdev_uc_count(netdev) > rar_entries - 1) {
2285                rctl |= E1000_RCTL_UPE;
2286        } else if (!(netdev->flags & IFF_PROMISC)) {
2287                rctl &= ~E1000_RCTL_UPE;
2288                use_uc = true;
2289        }
2290
2291        ew32(RCTL, rctl);
2292
2293        /* 82542 2.0 needs to be in reset to write receive address registers */
2294
2295        if (hw->mac_type == e1000_82542_rev2_0)
2296                e1000_enter_82542_rst(adapter);
2297
2298        /* load the first 14 addresses into the exact filters 1-14. Unicast
2299         * addresses take precedence to avoid disabling unicast filtering
2300         * when possible.
2301         *
2302         * RAR 0 is used for the station MAC address
2303         * if there are not 14 addresses, go ahead and clear the filters
2304         */
2305        i = 1;
2306        if (use_uc)
2307                netdev_for_each_uc_addr(ha, netdev) {
2308                        if (i == rar_entries)
2309                                break;
2310                        e1000_rar_set(hw, ha->addr, i++);
2311                }
2312
2313        netdev_for_each_mc_addr(ha, netdev) {
2314                if (i == rar_entries) {
2315                        /* load any remaining addresses into the hash table */
2316                        u32 hash_reg, hash_bit, mta;
2317                        hash_value = e1000_hash_mc_addr(hw, ha->addr);
2318                        hash_reg = (hash_value >> 5) & 0x7F;
2319                        hash_bit = hash_value & 0x1F;
2320                        mta = (1 << hash_bit);
2321                        mcarray[hash_reg] |= mta;
2322                } else {
2323                        e1000_rar_set(hw, ha->addr, i++);
2324                }
2325        }
2326
2327        for (; i < rar_entries; i++) {
2328                E1000_WRITE_REG_ARRAY(hw, RA, i << 1, 0);
2329                E1000_WRITE_FLUSH();
2330                E1000_WRITE_REG_ARRAY(hw, RA, (i << 1) + 1, 0);
2331                E1000_WRITE_FLUSH();
2332        }
2333
2334        /* write the hash table completely, write from bottom to avoid
2335         * both stupid write combining chipsets, and flushing each write
2336         */
2337        for (i = mta_reg_count - 1; i >= 0 ; i--) {
2338                /* If we are on an 82544 has an errata where writing odd
2339                 * offsets overwrites the previous even offset, but writing
2340                 * backwards over the range solves the issue by always
2341                 * writing the odd offset first
2342                 */
2343                E1000_WRITE_REG_ARRAY(hw, MTA, i, mcarray[i]);
2344        }
2345        E1000_WRITE_FLUSH();
2346
2347        if (hw->mac_type == e1000_82542_rev2_0)
2348                e1000_leave_82542_rst(adapter);
2349
2350        kfree(mcarray);
2351}
2352
2353/**
2354 * e1000_update_phy_info_task - get phy info
2355 * @work: work struct contained inside adapter struct
2356 *
2357 * Need to wait a few seconds after link up to get diagnostic information from
2358 * the phy
2359 */
2360static void e1000_update_phy_info_task(struct work_struct *work)
2361{
2362        struct e1000_adapter *adapter = container_of(work,
2363                                                     struct e1000_adapter,
2364                                                     phy_info_task.work);
2365
2366        e1000_phy_get_info(&adapter->hw, &adapter->phy_info);
2367}
2368
2369/**
2370 * e1000_82547_tx_fifo_stall_task - task to complete work
2371 * @work: work struct contained inside adapter struct
2372 **/
2373static void e1000_82547_tx_fifo_stall_task(struct work_struct *work)
2374{
2375        struct e1000_adapter *adapter = container_of(work,
2376                                                     struct e1000_adapter,
2377                                                     fifo_stall_task.work);
2378        struct e1000_hw *hw = &adapter->hw;
2379        struct net_device *netdev = adapter->netdev;
2380        u32 tctl;
2381
2382        if (atomic_read(&adapter->tx_fifo_stall)) {
2383                if ((er32(TDT) == er32(TDH)) &&
2384                   (er32(TDFT) == er32(TDFH)) &&
2385                   (er32(TDFTS) == er32(TDFHS))) {
2386                        tctl = er32(TCTL);
2387                        ew32(TCTL, tctl & ~E1000_TCTL_EN);
2388                        ew32(TDFT, adapter->tx_head_addr);
2389                        ew32(TDFH, adapter->tx_head_addr);
2390                        ew32(TDFTS, adapter->tx_head_addr);
2391                        ew32(TDFHS, adapter->tx_head_addr);
2392                        ew32(TCTL, tctl);
2393                        E1000_WRITE_FLUSH();
2394
2395                        adapter->tx_fifo_head = 0;
2396                        atomic_set(&adapter->tx_fifo_stall, 0);
2397                        netif_wake_queue(netdev);
2398                } else if (!test_bit(__E1000_DOWN, &adapter->flags)) {
2399                        schedule_delayed_work(&adapter->fifo_stall_task, 1);
2400                }
2401        }
2402}
2403
2404bool e1000_has_link(struct e1000_adapter *adapter)
2405{
2406        struct e1000_hw *hw = &adapter->hw;
2407        bool link_active = false;
2408
2409        /* get_link_status is set on LSC (link status) interrupt or rx
2410         * sequence error interrupt (except on intel ce4100).
2411         * get_link_status will stay false until the
2412         * e1000_check_for_link establishes link for copper adapters
2413         * ONLY
2414         */
2415        switch (hw->media_type) {
2416        case e1000_media_type_copper:
2417                if (hw->mac_type == e1000_ce4100)
2418                        hw->get_link_status = 1;
2419                if (hw->get_link_status) {
2420                        e1000_check_for_link(hw);
2421                        link_active = !hw->get_link_status;
2422                } else {
2423                        link_active = true;
2424                }
2425                break;
2426        case e1000_media_type_fiber:
2427                e1000_check_for_link(hw);
2428                link_active = !!(er32(STATUS) & E1000_STATUS_LU);
2429                break;
2430        case e1000_media_type_internal_serdes:
2431                e1000_check_for_link(hw);
2432                link_active = hw->serdes_has_link;
2433                break;
2434        default:
2435                break;
2436        }
2437
2438        return link_active;
2439}
2440
2441/**
2442 * e1000_watchdog - work function
2443 * @work: work struct contained inside adapter struct
2444 **/
2445static void e1000_watchdog(struct work_struct *work)
2446{
2447        struct e1000_adapter *adapter = container_of(work,
2448                                                     struct e1000_adapter,
2449                                                     watchdog_task.work);
2450        struct e1000_hw *hw = &adapter->hw;
2451        struct net_device *netdev = adapter->netdev;
2452        struct e1000_tx_ring *txdr = adapter->tx_ring;
2453        u32 link, tctl;
2454
2455        link = e1000_has_link(adapter);
2456        if ((netif_carrier_ok(netdev)) && link)
2457                goto link_up;
2458
2459        if (link) {
2460                if (!netif_carrier_ok(netdev)) {
2461                        u32 ctrl;
2462                        bool txb2b = true;
2463                        /* update snapshot of PHY registers on LSC */
2464                        e1000_get_speed_and_duplex(hw,
2465                                                   &adapter->link_speed,
2466                                                   &adapter->link_duplex);
2467
2468                        ctrl = er32(CTRL);
2469                        pr_info("%s NIC Link is Up %d Mbps %s, "
2470                                "Flow Control: %s\n",
2471                                netdev->name,
2472                                adapter->link_speed,
2473                                adapter->link_duplex == FULL_DUPLEX ?
2474                                "Full Duplex" : "Half Duplex",
2475                                ((ctrl & E1000_CTRL_TFCE) && (ctrl &
2476                                E1000_CTRL_RFCE)) ? "RX/TX" : ((ctrl &
2477                                E1000_CTRL_RFCE) ? "RX" : ((ctrl &
2478                                E1000_CTRL_TFCE) ? "TX" : "None")));
2479
2480                        /* adjust timeout factor according to speed/duplex */
2481                        adapter->tx_timeout_factor = 1;
2482                        switch (adapter->link_speed) {
2483                        case SPEED_10:
2484                                txb2b = false;
2485                                adapter->tx_timeout_factor = 16;
2486                                break;
2487                        case SPEED_100:
2488                                txb2b = false;
2489                                /* maybe add some timeout factor ? */
2490                                break;
2491                        }
2492
2493                        /* enable transmits in the hardware */
2494                        tctl = er32(TCTL);
2495                        tctl |= E1000_TCTL_EN;
2496                        ew32(TCTL, tctl);
2497
2498                        netif_carrier_on(netdev);
2499                        if (!test_bit(__E1000_DOWN, &adapter->flags))
2500                                schedule_delayed_work(&adapter->phy_info_task,
2501                                                      2 * HZ);
2502                        adapter->smartspeed = 0;
2503                }
2504        } else {
2505                if (netif_carrier_ok(netdev)) {
2506                        adapter->link_speed = 0;
2507                        adapter->link_duplex = 0;
2508                        pr_info("%s NIC Link is Down\n",
2509                                netdev->name);
2510                        netif_carrier_off(netdev);
2511
2512                        if (!test_bit(__E1000_DOWN, &adapter->flags))
2513                                schedule_delayed_work(&adapter->phy_info_task,
2514                                                      2 * HZ);
2515                }
2516
2517                e1000_smartspeed(adapter);
2518        }
2519
2520link_up:
2521        e1000_update_stats(adapter);
2522
2523        hw->tx_packet_delta = adapter->stats.tpt - adapter->tpt_old;
2524        adapter->tpt_old = adapter->stats.tpt;
2525        hw->collision_delta = adapter->stats.colc - adapter->colc_old;
2526        adapter->colc_old = adapter->stats.colc;
2527
2528        adapter->gorcl = adapter->stats.gorcl - adapter->gorcl_old;
2529        adapter->gorcl_old = adapter->stats.gorcl;
2530        adapter->gotcl = adapter->stats.gotcl - adapter->gotcl_old;
2531        adapter->gotcl_old = adapter->stats.gotcl;
2532
2533        e1000_update_adaptive(hw);
2534
2535        if (!netif_carrier_ok(netdev)) {
2536                if (E1000_DESC_UNUSED(txdr) + 1 < txdr->count) {
2537                        /* We've lost link, so the controller stops DMA,
2538                         * but we've got queued Tx work that's never going
2539                         * to get done, so reset controller to flush Tx.
2540                         * (Do the reset outside of interrupt context).
2541                         */
2542                        adapter->tx_timeout_count++;
2543                        schedule_work(&adapter->reset_task);
2544                        /* exit immediately since reset is imminent */
2545                        return;
2546                }
2547        }
2548
2549        /* Simple mode for Interrupt Throttle Rate (ITR) */
2550        if (hw->mac_type >= e1000_82540 && adapter->itr_setting == 4) {
2551                /* Symmetric Tx/Rx gets a reduced ITR=2000;
2552                 * Total asymmetrical Tx or Rx gets ITR=8000;
2553                 * everyone else is between 2000-8000.
2554                 */
2555                u32 goc = (adapter->gotcl + adapter->gorcl) / 10000;
2556                u32 dif = (adapter->gotcl > adapter->gorcl ?
2557                            adapter->gotcl - adapter->gorcl :
2558                            adapter->gorcl - adapter->gotcl) / 10000;
2559                u32 itr = goc > 0 ? (dif * 6000 / goc + 2000) : 8000;
2560
2561                ew32(ITR, 1000000000 / (itr * 256));
2562        }
2563
2564        /* Cause software interrupt to ensure rx ring is cleaned */
2565        ew32(ICS, E1000_ICS_RXDMT0);
2566
2567        /* Force detection of hung controller every watchdog period */
2568        adapter->detect_tx_hung = true;
2569
2570        /* Reschedule the task */
2571        if (!test_bit(__E1000_DOWN, &adapter->flags))
2572                schedule_delayed_work(&adapter->watchdog_task, 2 * HZ);
2573}
2574
2575enum latency_range {
2576        lowest_latency = 0,
2577        low_latency = 1,
2578        bulk_latency = 2,
2579        latency_invalid = 255
2580};
2581
2582/**
2583 * e1000_update_itr - update the dynamic ITR value based on statistics
2584 * @adapter: pointer to adapter
2585 * @itr_setting: current adapter->itr
2586 * @packets: the number of packets during this measurement interval
2587 * @bytes: the number of bytes during this measurement interval
2588 *
2589 *      Stores a new ITR value based on packets and byte
2590 *      counts during the last interrupt.  The advantage of per interrupt
2591 *      computation is faster updates and more accurate ITR for the current
2592 *      traffic pattern.  Constants in this function were computed
2593 *      based on theoretical maximum wire speed and thresholds were set based
2594 *      on testing data as well as attempting to minimize response time
2595 *      while increasing bulk throughput.
2596 *      this functionality is controlled by the InterruptThrottleRate module
2597 *      parameter (see e1000_param.c)
2598 **/
2599static unsigned int e1000_update_itr(struct e1000_adapter *adapter,
2600                                     u16 itr_setting, int packets, int bytes)
2601{
2602        unsigned int retval = itr_setting;
2603        struct e1000_hw *hw = &adapter->hw;
2604
2605        if (unlikely(hw->mac_type < e1000_82540))
2606                goto update_itr_done;
2607
2608        if (packets == 0)
2609                goto update_itr_done;
2610
2611        switch (itr_setting) {
2612        case lowest_latency:
2613                /* jumbo frames get bulk treatment*/
2614                if (bytes/packets > 8000)
2615                        retval = bulk_latency;
2616                else if ((packets < 5) && (bytes > 512))
2617                        retval = low_latency;
2618                break;
2619        case low_latency:  /* 50 usec aka 20000 ints/s */
2620                if (bytes > 10000) {
2621                        /* jumbo frames need bulk latency setting */
2622                        if (bytes/packets > 8000)
2623                                retval = bulk_latency;
2624                        else if ((packets < 10) || ((bytes/packets) > 1200))
2625                                retval = bulk_latency;
2626                        else if ((packets > 35))
2627                                retval = lowest_latency;
2628                } else if (bytes/packets > 2000)
2629                        retval = bulk_latency;
2630                else if (packets <= 2 && bytes < 512)
2631                        retval = lowest_latency;
2632                break;
2633        case bulk_latency: /* 250 usec aka 4000 ints/s */
2634                if (bytes > 25000) {
2635                        if (packets > 35)
2636                                retval = low_latency;
2637                } else if (bytes < 6000) {
2638                        retval = low_latency;
2639                }
2640                break;
2641        }
2642
2643update_itr_done:
2644        return retval;
2645}
2646
2647static void e1000_set_itr(struct e1000_adapter *adapter)
2648{
2649        struct e1000_hw *hw = &adapter->hw;
2650        u16 current_itr;
2651        u32 new_itr = adapter->itr;
2652
2653        if (unlikely(hw->mac_type < e1000_82540))
2654                return;
2655
2656        /* for non-gigabit speeds, just fix the interrupt rate at 4000 */
2657        if (unlikely(adapter->link_speed != SPEED_1000)) {
2658                current_itr = 0;
2659                new_itr = 4000;
2660                goto set_itr_now;
2661        }
2662
2663        adapter->tx_itr = e1000_update_itr(adapter, adapter->tx_itr,
2664                                           adapter->total_tx_packets,
2665                                           adapter->total_tx_bytes);
2666        /* conservative mode (itr 3) eliminates the lowest_latency setting */
2667        if (adapter->itr_setting == 3 && adapter->tx_itr == lowest_latency)
2668                adapter->tx_itr = low_latency;
2669
2670        adapter->rx_itr = e1000_update_itr(adapter, adapter->rx_itr,
2671                                           adapter->total_rx_packets,
2672                                           adapter->total_rx_bytes);
2673        /* conservative mode (itr 3) eliminates the lowest_latency setting */
2674        if (adapter->itr_setting == 3 && adapter->rx_itr == lowest_latency)
2675                adapter->rx_itr = low_latency;
2676
2677        current_itr = max(adapter->rx_itr, adapter->tx_itr);
2678
2679        switch (current_itr) {
2680        /* counts and packets in update_itr are dependent on these numbers */
2681        case lowest_latency:
2682                new_itr = 70000;
2683                break;
2684        case low_latency:
2685                new_itr = 20000; /* aka hwitr = ~200 */
2686                break;
2687        case bulk_latency:
2688                new_itr = 4000;
2689                break;
2690        default:
2691                break;
2692        }
2693
2694set_itr_now:
2695        if (new_itr != adapter->itr) {
2696                /* this attempts to bias the interrupt rate towards Bulk
2697                 * by adding intermediate steps when interrupt rate is
2698                 * increasing
2699                 */
2700                new_itr = new_itr > adapter->itr ?
2701                          min(adapter->itr + (new_itr >> 2), new_itr) :
2702                          new_itr;
2703                adapter->itr = new_itr;
2704                ew32(ITR, 1000000000 / (new_itr * 256));
2705        }
2706}
2707
2708#define E1000_TX_FLAGS_CSUM             0x00000001
2709#define E1000_TX_FLAGS_VLAN             0x00000002
2710#define E1000_TX_FLAGS_TSO              0x00000004
2711#define E1000_TX_FLAGS_IPV4             0x00000008
2712#define E1000_TX_FLAGS_NO_FCS           0x00000010
2713#define E1000_TX_FLAGS_VLAN_MASK        0xffff0000
2714#define E1000_TX_FLAGS_VLAN_SHIFT       16
2715
2716static int e1000_tso(struct e1000_adapter *adapter,
2717                     struct e1000_tx_ring *tx_ring, struct sk_buff *skb,
2718                     __be16 protocol)
2719{
2720        struct e1000_context_desc *context_desc;
2721        struct e1000_tx_buffer *buffer_info;
2722        unsigned int i;
2723        u32 cmd_length = 0;
2724        u16 ipcse = 0, tucse, mss;
2725        u8 ipcss, ipcso, tucss, tucso, hdr_len;
2726
2727        if (skb_is_gso(skb)) {
2728                int err;
2729
2730                err = skb_cow_head(skb, 0);
2731                if (err < 0)
2732                        return err;
2733
2734                hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
2735                mss = skb_shinfo(skb)->gso_size;
2736                if (protocol == htons(ETH_P_IP)) {
2737                        struct iphdr *iph = ip_hdr(skb);
2738                        iph->tot_len = 0;
2739                        iph->check = 0;
2740                        tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
2741                                                                 iph->daddr, 0,
2742                                                                 IPPROTO_TCP,
2743                                                                 0);
2744                        cmd_length = E1000_TXD_CMD_IP;
2745                        ipcse = skb_transport_offset(skb) - 1;
2746                } else if (skb_is_gso_v6(skb)) {
2747                        ipv6_hdr(skb)->payload_len = 0;
2748                        tcp_hdr(skb)->check =
2749                                ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
2750                                                 &ipv6_hdr(skb)->daddr,
2751                                                 0, IPPROTO_TCP, 0);
2752                        ipcse = 0;
2753                }
2754                ipcss = skb_network_offset(skb);
2755                ipcso = (void *)&(ip_hdr(skb)->check) - (void *)skb->data;
2756                tucss = skb_transport_offset(skb);
2757                tucso = (void *)&(tcp_hdr(skb)->check) - (void *)skb->data;
2758                tucse = 0;
2759
2760                cmd_length |= (E1000_TXD_CMD_DEXT | E1000_TXD_CMD_TSE |
2761                               E1000_TXD_CMD_TCP | (skb->len - (hdr_len)));
2762
2763                i = tx_ring->next_to_use;
2764                context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
2765                buffer_info = &tx_ring->buffer_info[i];
2766
2767                context_desc->lower_setup.ip_fields.ipcss  = ipcss;
2768                context_desc->lower_setup.ip_fields.ipcso  = ipcso;
2769                context_desc->lower_setup.ip_fields.ipcse  = cpu_to_le16(ipcse);
2770                context_desc->upper_setup.tcp_fields.tucss = tucss;
2771                context_desc->upper_setup.tcp_fields.tucso = tucso;
2772                context_desc->upper_setup.tcp_fields.tucse = cpu_to_le16(tucse);
2773                context_desc->tcp_seg_setup.fields.mss     = cpu_to_le16(mss);
2774                context_desc->tcp_seg_setup.fields.hdr_len = hdr_len;
2775                context_desc->cmd_and_length = cpu_to_le32(cmd_length);
2776
2777                buffer_info->time_stamp = jiffies;
2778                buffer_info->next_to_watch = i;
2779
2780                if (++i == tx_ring->count)
2781                        i = 0;
2782
2783                tx_ring->next_to_use = i;
2784
2785                return true;
2786        }
2787        return false;
2788}
2789
2790static bool e1000_tx_csum(struct e1000_adapter *adapter,
2791                          struct e1000_tx_ring *tx_ring, struct sk_buff *skb,
2792                          __be16 protocol)
2793{
2794        struct e1000_context_desc *context_desc;
2795        struct e1000_tx_buffer *buffer_info;
2796        unsigned int i;
2797        u8 css;
2798        u32 cmd_len = E1000_TXD_CMD_DEXT;
2799
2800        if (skb->ip_summed != CHECKSUM_PARTIAL)
2801                return false;
2802
2803        switch (protocol) {
2804        case cpu_to_be16(ETH_P_IP):
2805                if (ip_hdr(skb)->protocol == IPPROTO_TCP)
2806                        cmd_len |= E1000_TXD_CMD_TCP;
2807                break;
2808        case cpu_to_be16(ETH_P_IPV6):
2809                /* XXX not handling all IPV6 headers */
2810                if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
2811                        cmd_len |= E1000_TXD_CMD_TCP;
2812                break;
2813        default:
2814                if (unlikely(net_ratelimit()))
2815                        e_warn(drv, "checksum_partial proto=%x!\n",
2816                               skb->protocol);
2817                break;
2818        }
2819
2820        css = skb_checksum_start_offset(skb);
2821
2822        i = tx_ring->next_to_use;
2823        buffer_info = &tx_ring->buffer_info[i];
2824        context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
2825
2826        context_desc->lower_setup.ip_config = 0;
2827        context_desc->upper_setup.tcp_fields.tucss = css;
2828        context_desc->upper_setup.tcp_fields.tucso =
2829                css + skb->csum_offset;
2830        context_desc->upper_setup.tcp_fields.tucse = 0;
2831        context_desc->tcp_seg_setup.data = 0;
2832        context_desc->cmd_and_length = cpu_to_le32(cmd_len);
2833
2834        buffer_info->time_stamp = jiffies;
2835        buffer_info->next_to_watch = i;
2836
2837        if (unlikely(++i == tx_ring->count))
2838                i = 0;
2839
2840        tx_ring->next_to_use = i;
2841
2842        return true;
2843}
2844
2845#define E1000_MAX_TXD_PWR       12
2846#define E1000_MAX_DATA_PER_TXD  (1<<E1000_MAX_TXD_PWR)
2847
2848static int e1000_tx_map(struct e1000_adapter *adapter,
2849                        struct e1000_tx_ring *tx_ring,
2850                        struct sk_buff *skb, unsigned int first,
2851                        unsigned int max_per_txd, unsigned int nr_frags,
2852                        unsigned int mss)
2853{
2854        struct e1000_hw *hw = &adapter->hw;
2855        struct pci_dev *pdev = adapter->pdev;
2856        struct e1000_tx_buffer *buffer_info;
2857        unsigned int len = skb_headlen(skb);
2858        unsigned int offset = 0, size, count = 0, i;
2859        unsigned int f, bytecount, segs;
2860
2861        i = tx_ring->next_to_use;
2862
2863        while (len) {
2864                buffer_info = &tx_ring->buffer_info[i];
2865                size = min(len, max_per_txd);
2866                /* Workaround for Controller erratum --
2867                 * descriptor for non-tso packet in a linear SKB that follows a
2868                 * tso gets written back prematurely before the data is fully
2869                 * DMA'd to the controller
2870                 */
2871                if (!skb->data_len && tx_ring->last_tx_tso &&
2872                    !skb_is_gso(skb)) {
2873                        tx_ring->last_tx_tso = false;
2874                        size -= 4;
2875                }
2876
2877                /* Workaround for premature desc write-backs
2878                 * in TSO mode.  Append 4-byte sentinel desc
2879                 */
2880                if (unlikely(mss && !nr_frags && size == len && size > 8))
2881                        size -= 4;
2882                /* work-around for errata 10 and it applies
2883                 * to all controllers in PCI-X mode
2884                 * The fix is to make sure that the first descriptor of a
2885                 * packet is smaller than 2048 - 16 - 16 (or 2016) bytes
2886                 */
2887                if (unlikely((hw->bus_type == e1000_bus_type_pcix) &&
2888                             (size > 2015) && count == 0))
2889                        size = 2015;
2890
2891                /* Workaround for potential 82544 hang in PCI-X.  Avoid
2892                 * terminating buffers within evenly-aligned dwords.
2893                 */
2894                if (unlikely(adapter->pcix_82544 &&
2895                   !((unsigned long)(skb->data + offset + size - 1) & 4) &&
2896                   size > 4))
2897                        size -= 4;
2898
2899                buffer_info->length = size;
2900                /* set time_stamp *before* dma to help avoid a possible race */
2901                buffer_info->time_stamp = jiffies;
2902                buffer_info->mapped_as_page = false;
2903                buffer_info->dma = dma_map_single(&pdev->dev,
2904                                                  skb->data + offset,
2905                                                  size, DMA_TO_DEVICE);
2906                if (dma_mapping_error(&pdev->dev, buffer_info->dma))
2907                        goto dma_error;
2908                buffer_info->next_to_watch = i;
2909
2910                len -= size;
2911                offset += size;
2912                count++;
2913                if (len) {
2914                        i++;
2915                        if (unlikely(i == tx_ring->count))
2916                                i = 0;
2917                }
2918        }
2919
2920        for (f = 0; f < nr_frags; f++) {
2921                const struct skb_frag_struct *frag;
2922
2923                frag = &skb_shinfo(skb)->frags[f];
2924                len = skb_frag_size(frag);
2925                offset = 0;
2926
2927                while (len) {
2928                        unsigned long bufend;
2929                        i++;
2930                        if (unlikely(i == tx_ring->count))
2931                                i = 0;
2932
2933                        buffer_info = &tx_ring->buffer_info[i];
2934                        size = min(len, max_per_txd);
2935                        /* Workaround for premature desc write-backs
2936                         * in TSO mode.  Append 4-byte sentinel desc
2937                         */
2938                        if (unlikely(mss && f == (nr_frags-1) &&
2939                            size == len && size > 8))
2940                                size -= 4;
2941                        /* Workaround for potential 82544 hang in PCI-X.
2942                         * Avoid terminating buffers within evenly-aligned
2943                         * dwords.
2944                         */
2945                        bufend = (unsigned long)
2946                                page_to_phys(skb_frag_page(frag));
2947                        bufend += offset + size - 1;
2948                        if (unlikely(adapter->pcix_82544 &&
2949                                     !(bufend & 4) &&
2950                                     size > 4))
2951                                size -= 4;
2952
2953                        buffer_info->length = size;
2954                        buffer_info->time_stamp = jiffies;
2955                        buffer_info->mapped_as_page = true;
2956                        buffer_info->dma = skb_frag_dma_map(&pdev->dev, frag,
2957                                                offset, size, DMA_TO_DEVICE);
2958                        if (dma_mapping_error(&pdev->dev, buffer_info->dma))
2959                                goto dma_error;
2960                        buffer_info->next_to_watch = i;
2961
2962                        len -= size;
2963                        offset += size;
2964                        count++;
2965                }
2966        }
2967
2968        segs = skb_shinfo(skb)->gso_segs ?: 1;
2969        /* multiply data chunks by size of headers */
2970        bytecount = ((segs - 1) * skb_headlen(skb)) + skb->len;
2971
2972        tx_ring->buffer_info[i].skb = skb;
2973        tx_ring->buffer_info[i].segs = segs;
2974        tx_ring->buffer_info[i].bytecount = bytecount;
2975        tx_ring->buffer_info[first].next_to_watch = i;
2976
2977        return count;
2978
2979dma_error:
2980        dev_err(&pdev->dev, "TX DMA map failed\n");
2981        buffer_info->dma = 0;
2982        if (count)
2983                count--;
2984
2985        while (count--) {
2986                if (i == 0)
2987                        i += tx_ring->count;
2988                i--;
2989                buffer_info = &tx_ring->buffer_info[i];
2990                e1000_unmap_and_free_tx_resource(adapter, buffer_info);
2991        }
2992
2993        return 0;
2994}
2995
2996static void e1000_tx_queue(struct e1000_adapter *adapter,
2997                           struct e1000_tx_ring *tx_ring, int tx_flags,
2998                           int count)
2999{
3000        struct e1000_tx_desc *tx_desc = NULL;
3001        struct e1000_tx_buffer *buffer_info;
3002        u32 txd_upper = 0, txd_lower = E1000_TXD_CMD_IFCS;
3003        unsigned int i;
3004
3005        if (likely(tx_flags & E1000_TX_FLAGS_TSO)) {
3006                txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D |
3007                             E1000_TXD_CMD_TSE;
3008                txd_upper |= E1000_TXD_POPTS_TXSM << 8;
3009
3010                if (likely(tx_flags & E1000_TX_FLAGS_IPV4))
3011                        txd_upper |= E1000_TXD_POPTS_IXSM << 8;
3012        }
3013
3014        if (likely(tx_flags & E1000_TX_FLAGS_CSUM)) {
3015                txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
3016                txd_upper |= E1000_TXD_POPTS_TXSM << 8;
3017        }
3018
3019        if (unlikely(tx_flags & E1000_TX_FLAGS_VLAN)) {
3020                txd_lower |= E1000_TXD_CMD_VLE;
3021                txd_upper |= (tx_flags & E1000_TX_FLAGS_VLAN_MASK);
3022        }
3023
3024        if (unlikely(tx_flags & E1000_TX_FLAGS_NO_FCS))
3025                txd_lower &= ~(E1000_TXD_CMD_IFCS);
3026
3027        i = tx_ring->next_to_use;
3028
3029        while (count--) {
3030                buffer_info = &tx_ring->buffer_info[i];
3031                tx_desc = E1000_TX_DESC(*tx_ring, i);
3032                tx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
3033                tx_desc->lower.data =
3034                        cpu_to_le32(txd_lower | buffer_info->length);
3035                tx_desc->upper.data = cpu_to_le32(txd_upper);
3036                if (unlikely(++i == tx_ring->count))
3037                        i = 0;
3038        }
3039
3040        tx_desc->lower.data |= cpu_to_le32(adapter->txd_cmd);
3041
3042        /* txd_cmd re-enables FCS, so we'll re-disable it here as desired. */
3043        if (unlikely(tx_flags & E1000_TX_FLAGS_NO_FCS))
3044                tx_desc->lower.data &= ~(cpu_to_le32(E1000_TXD_CMD_IFCS));
3045
3046        /* Force memory writes to complete before letting h/w
3047         * know there are new descriptors to fetch.  (Only
3048         * applicable for weak-ordered memory model archs,
3049         * such as IA-64).
3050         */
3051        wmb();
3052
3053        tx_ring->next_to_use = i;
3054}
3055
3056/* 82547 workaround to avoid controller hang in half-duplex environment.
3057 * The workaround is to avoid queuing a large packet that would span
3058 * the internal Tx FIFO ring boundary by notifying the stack to resend
3059 * the packet at a later time.  This gives the Tx FIFO an opportunity to
3060 * flush all packets.  When that occurs, we reset the Tx FIFO pointers
3061 * to the beginning of the Tx FIFO.
3062 */
3063
3064#define E1000_FIFO_HDR                  0x10
3065#define E1000_82547_PAD_LEN             0x3E0
3066
3067static int e1000_82547_fifo_workaround(struct e1000_adapter *adapter,
3068                                       struct sk_buff *skb)
3069{
3070        u32 fifo_space = adapter->tx_fifo_size - adapter->tx_fifo_head;
3071        u32 skb_fifo_len = skb->len + E1000_FIFO_HDR;
3072
3073        skb_fifo_len = ALIGN(skb_fifo_len, E1000_FIFO_HDR);
3074
3075        if (adapter->link_duplex != HALF_DUPLEX)
3076                goto no_fifo_stall_required;
3077
3078        if (atomic_read(&adapter->tx_fifo_stall))
3079                return 1;
3080
3081        if (skb_fifo_len >= (E1000_82547_PAD_LEN + fifo_space)) {
3082                atomic_set(&adapter->tx_fifo_stall, 1);
3083                return 1;
3084        }
3085
3086no_fifo_stall_required:
3087        adapter->tx_fifo_head += skb_fifo_len;
3088        if (adapter->tx_fifo_head >= adapter->tx_fifo_size)
3089                adapter->tx_fifo_head -= adapter->tx_fifo_size;
3090        return 0;
3091}
3092
3093static int __e1000_maybe_stop_tx(struct net_device *netdev, int size)
3094{
3095        struct e1000_adapter *adapter = netdev_priv(netdev);
3096        struct e1000_tx_ring *tx_ring = adapter->tx_ring;
3097
3098        netif_stop_queue(netdev);
3099        /* Herbert's original patch had:
3100         *  smp_mb__after_netif_stop_queue();
3101         * but since that doesn't exist yet, just open code it.
3102         */
3103        smp_mb();
3104
3105        /* We need to check again in a case another CPU has just
3106         * made room available.
3107         */
3108        if (likely(E1000_DESC_UNUSED(tx_ring) < size))
3109                return -EBUSY;
3110
3111        /* A reprieve! */
3112        netif_start_queue(netdev);
3113        ++adapter->restart_queue;
3114        return 0;
3115}
3116
3117static int e1000_maybe_stop_tx(struct net_device *netdev,
3118                               struct e1000_tx_ring *tx_ring, int size)
3119{
3120        if (likely(E1000_DESC_UNUSED(tx_ring) >= size))
3121                return 0;
3122        return __e1000_maybe_stop_tx(netdev, size);
3123}
3124
3125#define TXD_USE_COUNT(S, X) (((S) + ((1 << (X)) - 1)) >> (X))
3126static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
3127                                    struct net_device *netdev)
3128{
3129        struct e1000_adapter *adapter = netdev_priv(netdev);
3130        struct e1000_hw *hw = &adapter->hw;
3131        struct e1000_tx_ring *tx_ring;
3132        unsigned int first, max_per_txd = E1000_MAX_DATA_PER_TXD;
3133        unsigned int max_txd_pwr = E1000_MAX_TXD_PWR;
3134        unsigned int tx_flags = 0;
3135        unsigned int len = skb_headlen(skb);
3136        unsigned int nr_frags;
3137        unsigned int mss;
3138        int count = 0;
3139        int tso;
3140        unsigned int f;
3141        __be16 protocol = vlan_get_protocol(skb);
3142
3143        /* This goes back to the question of how to logically map a Tx queue
3144         * to a flow.  Right now, performance is impacted slightly negatively
3145         * if using multiple Tx queues.  If the stack breaks away from a
3146         * single qdisc implementation, we can look at this again.
3147         */
3148        tx_ring = adapter->tx_ring;
3149
3150        /* On PCI/PCI-X HW, if packet size is less than ETH_ZLEN,
3151         * packets may get corrupted during padding by HW.
3152         * To WA this issue, pad all small packets manually.
3153         */
3154        if (eth_skb_pad(skb))
3155                return NETDEV_TX_OK;
3156
3157        mss = skb_shinfo(skb)->gso_size;
3158        /* The controller does a simple calculation to
3159         * make sure there is enough room in the FIFO before
3160         * initiating the DMA for each buffer.  The calc is:
3161         * 4 = ceil(buffer len/mss).  To make sure we don't
3162         * overrun the FIFO, adjust the max buffer len if mss
3163         * drops.
3164         */
3165        if (mss) {
3166                u8 hdr_len;
3167                max_per_txd = min(mss << 2, max_per_txd);
3168                max_txd_pwr = fls(max_per_txd) - 1;
3169
3170                hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
3171                if (skb->data_len && hdr_len == len) {
3172                        switch (hw->mac_type) {
3173                                unsigned int pull_size;
3174                        case e1000_82544:
3175                                /* Make sure we have room to chop off 4 bytes,
3176                                 * and that the end alignment will work out to
3177                                 * this hardware's requirements
3178                                 * NOTE: this is a TSO only workaround
3179                                 * if end byte alignment not correct move us
3180                                 * into the next dword
3181                                 */
3182                                if ((unsigned long)(skb_tail_pointer(skb) - 1)
3183                                    & 4)
3184                                        break;
3185                                /* fall through */
3186                                pull_size = min((unsigned int)4, skb->data_len);
3187                                if (!__pskb_pull_tail(skb, pull_size)) {
3188                                        e_err(drv, "__pskb_pull_tail "
3189                                              "failed.\n");
3190                                        dev_kfree_skb_any(skb);
3191                                        return NETDEV_TX_OK;
3192                                }
3193                                len = skb_headlen(skb);
3194                                break;
3195                        default:
3196                                /* do nothing */
3197                                break;
3198                        }
3199                }
3200        }
3201
3202        /* reserve a descriptor for the offload context */
3203        if ((mss) || (skb->ip_summed == CHECKSUM_PARTIAL))
3204                count++;
3205        count++;
3206
3207        /* Controller Erratum workaround */
3208        if (!skb->data_len && tx_ring->last_tx_tso && !skb_is_gso(skb))
3209                count++;
3210
3211        count += TXD_USE_COUNT(len, max_txd_pwr);
3212
3213        if (adapter->pcix_82544)
3214                count++;
3215
3216        /* work-around for errata 10 and it applies to all controllers
3217         * in PCI-X mode, so add one more descriptor to the count
3218         */
3219        if (unlikely((hw->bus_type == e1000_bus_type_pcix) &&
3220                        (len > 2015)))
3221                count++;
3222
3223        nr_frags = skb_shinfo(skb)->nr_frags;
3224        for (f = 0; f < nr_frags; f++)
3225                count += TXD_USE_COUNT(skb_frag_size(&skb_shinfo(skb)->frags[f]),
3226                                       max_txd_pwr);
3227        if (adapter->pcix_82544)
3228                count += nr_frags;
3229
3230        /* need: count + 2 desc gap to keep tail from touching
3231         * head, otherwise try next time
3232         */
3233        if (unlikely(e1000_maybe_stop_tx(netdev, tx_ring, count + 2)))
3234                return NETDEV_TX_BUSY;
3235
3236        if (unlikely((hw->mac_type == e1000_82547) &&
3237                     (e1000_82547_fifo_workaround(adapter, skb)))) {
3238                netif_stop_queue(netdev);
3239                if (!test_bit(__E1000_DOWN, &adapter->flags))
3240                        schedule_delayed_work(&adapter->fifo_stall_task, 1);
3241                return NETDEV_TX_BUSY;
3242        }
3243
3244        if (skb_vlan_tag_present(skb)) {
3245                tx_flags |= E1000_TX_FLAGS_VLAN;
3246                tx_flags |= (skb_vlan_tag_get(skb) <<
3247                             E1000_TX_FLAGS_VLAN_SHIFT);
3248        }
3249
3250        first = tx_ring->next_to_use;
3251
3252        tso = e1000_tso(adapter, tx_ring, skb, protocol);
3253        if (tso < 0) {
3254                dev_kfree_skb_any(skb);
3255                return NETDEV_TX_OK;
3256        }
3257
3258        if (likely(tso)) {
3259                if (likely(hw->mac_type != e1000_82544))
3260                        tx_ring->last_tx_tso = true;
3261                tx_flags |= E1000_TX_FLAGS_TSO;
3262        } else if (likely(e1000_tx_csum(adapter, tx_ring, skb, protocol)))
3263                tx_flags |= E1000_TX_FLAGS_CSUM;
3264
3265        if (protocol == htons(ETH_P_IP))
3266                tx_flags |= E1000_TX_FLAGS_IPV4;
3267
3268        if (unlikely(skb->no_fcs))
3269                tx_flags |= E1000_TX_FLAGS_NO_FCS;
3270
3271        count = e1000_tx_map(adapter, tx_ring, skb, first, max_per_txd,
3272                             nr_frags, mss);
3273
3274        if (count) {
3275                /* The descriptors needed is higher than other Intel drivers
3276                 * due to a number of workarounds.  The breakdown is below:
3277                 * Data descriptors: MAX_SKB_FRAGS + 1
3278                 * Context Descriptor: 1
3279                 * Keep head from touching tail: 2
3280                 * Workarounds: 3
3281                 */
3282                int desc_needed = MAX_SKB_FRAGS + 7;
3283
3284                netdev_sent_queue(netdev, skb->len);
3285                skb_tx_timestamp(skb);
3286
3287                e1000_tx_queue(adapter, tx_ring, tx_flags, count);
3288
3289                /* 82544 potentially requires twice as many data descriptors
3290                 * in order to guarantee buffers don't end on evenly-aligned
3291                 * dwords
3292                 */
3293                if (adapter->pcix_82544)
3294                        desc_needed += MAX_SKB_FRAGS + 1;
3295
3296                /* Make sure there is space in the ring for the next send. */
3297                e1000_maybe_stop_tx(netdev, tx_ring, desc_needed);
3298
3299                if (!skb->xmit_more ||
3300                    netif_xmit_stopped(netdev_get_tx_queue(netdev, 0))) {
3301                        writel(tx_ring->next_to_use, hw->hw_addr + tx_ring->tdt);
3302                        /* we need this if more than one processor can write to
3303                         * our tail at a time, it synchronizes IO on IA64/Altix
3304                         * systems
3305                         */
3306                        mmiowb();
3307                }
3308        } else {
3309                dev_kfree_skb_any(skb);
3310                tx_ring->buffer_info[first].time_stamp = 0;
3311                tx_ring->next_to_use = first;
3312        }
3313
3314        return NETDEV_TX_OK;
3315}
3316
3317#define NUM_REGS 38 /* 1 based count */
3318static void e1000_regdump(struct e1000_adapter *adapter)
3319{
3320        struct e1000_hw *hw = &adapter->hw;
3321        u32 regs[NUM_REGS];
3322        u32 *regs_buff = regs;
3323        int i = 0;
3324
3325        static const char * const reg_name[] = {
3326                "CTRL",  "STATUS",
3327                "RCTL", "RDLEN", "RDH", "RDT", "RDTR",
3328                "TCTL", "TDBAL", "TDBAH", "TDLEN", "TDH", "TDT",
3329                "TIDV", "TXDCTL", "TADV", "TARC0",
3330                "TDBAL1", "TDBAH1", "TDLEN1", "TDH1", "TDT1",
3331                "TXDCTL1", "TARC1",
3332                "CTRL_EXT", "ERT", "RDBAL", "RDBAH",
3333                "TDFH", "TDFT", "TDFHS", "TDFTS", "TDFPC",
3334                "RDFH", "RDFT", "RDFHS", "RDFTS", "RDFPC"
3335        };
3336
3337        regs_buff[0]  = er32(CTRL);
3338        regs_buff[1]  = er32(STATUS);
3339
3340        regs_buff[2]  = er32(RCTL);
3341        regs_buff[3]  = er32(RDLEN);
3342        regs_buff[4]  = er32(RDH);
3343        regs_buff[5]  = er32(RDT);
3344        regs_buff[6]  = er32(RDTR);
3345
3346        regs_buff[7]  = er32(TCTL);
3347        regs_buff[8]  = er32(TDBAL);
3348        regs_buff[9]  = er32(TDBAH);
3349        regs_buff[10] = er32(TDLEN);
3350        regs_buff[11] = er32(TDH);
3351        regs_buff[12] = er32(TDT);
3352        regs_buff[13] = er32(TIDV);
3353        regs_buff[14] = er32(TXDCTL);
3354        regs_buff[15] = er32(TADV);
3355        regs_buff[16] = er32(TARC0);
3356
3357        regs_buff[17] = er32(TDBAL1);
3358        regs_buff[18] = er32(TDBAH1);
3359        regs_buff[19] = er32(TDLEN1);
3360        regs_buff[20] = er32(TDH1);
3361        regs_buff[21] = er32(TDT1);
3362        regs_buff[22] = er32(TXDCTL1);
3363        regs_buff[23] = er32(TARC1);
3364        regs_buff[24] = er32(CTRL_EXT);
3365        regs_buff[25] = er32(ERT);
3366        regs_buff[26] = er32(RDBAL0);
3367        regs_buff[27] = er32(RDBAH0);
3368        regs_buff[28] = er32(TDFH);
3369        regs_buff[29] = er32(TDFT);
3370        regs_buff[30] = er32(TDFHS);
3371        regs_buff[31] = er32(TDFTS);
3372        regs_buff[32] = er32(TDFPC);
3373        regs_buff[33] = er32(RDFH);
3374        regs_buff[34] = er32(RDFT);
3375        regs_buff[35] = er32(RDFHS);
3376        regs_buff[36] = er32(RDFTS);
3377        regs_buff[37] = er32(RDFPC);
3378
3379        pr_info("Register dump\n");
3380        for (i = 0; i < NUM_REGS; i++)
3381                pr_info("%-15s  %08x\n", reg_name[i], regs_buff[i]);
3382}
3383
3384/*
3385 * e1000_dump: Print registers, tx ring and rx ring
3386 */
3387static void e1000_dump(struct e1000_adapter *adapter)
3388{
3389        /* this code doesn't handle multiple rings */
3390        struct e1000_tx_ring *tx_ring = adapter->tx_ring;
3391        struct e1000_rx_ring *rx_ring = adapter->rx_ring;
3392        int i;
3393
3394        if (!netif_msg_hw(adapter))
3395                return;
3396
3397        /* Print Registers */
3398        e1000_regdump(adapter);
3399
3400        /* transmit dump */
3401        pr_info("TX Desc ring0 dump\n");
3402
3403        /* Transmit Descriptor Formats - DEXT[29] is 0 (Legacy) or 1 (Extended)
3404         *
3405         * Legacy Transmit Descriptor
3406         *   +--------------------------------------------------------------+
3407         * 0 |         Buffer Address [63:0] (Reserved on Write Back)       |
3408         *   +--------------------------------------------------------------+
3409         * 8 | Special  |    CSS     | Status |  CMD    |  CSO   |  Length  |
3410         *   +--------------------------------------------------------------+
3411         *   63       48 47        36 35    32 31     24 23    16 15        0
3412         *
3413         * Extended Context Descriptor (DTYP=0x0) for TSO or checksum offload
3414         *   63      48 47    40 39       32 31             16 15    8 7      0
3415         *   +----------------------------------------------------------------+
3416         * 0 |  TUCSE  | TUCS0  |   TUCSS   |     IPCSE       | IPCS0 | IPCSS |
3417         *   +----------------------------------------------------------------+
3418         * 8 |   MSS   | HDRLEN | RSV | STA | TUCMD | DTYP |      PAYLEN      |
3419         *   +----------------------------------------------------------------+
3420         *   63      48 47    40 39 36 35 32 31   24 23  20 19                0
3421         *
3422         * Extended Data Descriptor (DTYP=0x1)
3423         *   +----------------------------------------------------------------+
3424         * 0 |                     Buffer Address [63:0]                      |
3425         *   +----------------------------------------------------------------+
3426         * 8 | VLAN tag |  POPTS  | Rsvd | Status | Command | DTYP |  DTALEN  |
3427         *   +----------------------------------------------------------------+
3428         *   63       48 47     40 39  36 35    32 31     24 23  20 19        0
3429         */
3430        pr_info("Tc[desc]     [Ce CoCsIpceCoS] [MssHlRSCm0Plen] [bi->dma       ] leng  ntw timestmp         bi->skb\n");
3431        pr_info("Td[desc]     [address 63:0  ] [VlaPoRSCm1Dlen] [bi->dma       ] leng  ntw timestmp         bi->skb\n");
3432
3433        if (!netif_msg_tx_done(adapter))
3434                goto rx_ring_summary;
3435
3436        for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {
3437                struct e1000_tx_desc *tx_desc = E1000_TX_DESC(*tx_ring, i);
3438                struct e1000_tx_buffer *buffer_info = &tx_ring->buffer_info[i];
3439                struct my_u { __le64 a; __le64 b; };
3440                struct my_u *u = (struct my_u *)tx_desc;
3441                const char *type;
3442
3443                if (i == tx_ring->next_to_use && i == tx_ring->next_to_clean)
3444                        type = "NTC/U";
3445                else if (i == tx_ring->next_to_use)
3446                        type = "NTU";
3447                else if (i == tx_ring->next_to_clean)
3448                        type = "NTC";
3449                else
3450                        type = "";
3451
3452                pr_info("T%c[0x%03X]    %016llX %016llX %016llX %04X  %3X %016llX %p %s\n",
3453                        ((le64_to_cpu(u->b) & (1<<20)) ? 'd' : 'c'), i,
3454                        le64_to_cpu(u->a), le64_to_cpu(u->b),
3455                        (u64)buffer_info->dma, buffer_info->length,
3456                        buffer_info->next_to_watch,
3457                        (u64)buffer_info->time_stamp, buffer_info->skb, type);
3458        }
3459
3460rx_ring_summary:
3461        /* receive dump */
3462        pr_info("\nRX Desc ring dump\n");
3463
3464        /* Legacy Receive Descriptor Format
3465         *
3466         * +-----------------------------------------------------+
3467         * |                Buffer Address [63:0]                |
3468         * +-----------------------------------------------------+
3469         * | VLAN Tag | Errors | Status 0 | Packet csum | Length |
3470         * +-----------------------------------------------------+
3471         * 63       48 47    40 39      32 31         16 15      0
3472         */
3473        pr_info("R[desc]      [address 63:0  ] [vl er S cks ln] [bi->dma       ] [bi->skb]\n");
3474
3475        if (!netif_msg_rx_status(adapter))
3476                goto exit;
3477
3478        for (i = 0; rx_ring->desc && (i < rx_ring->count); i++) {
3479                struct e1000_rx_desc *rx_desc = E1000_RX_DESC(*rx_ring, i);
3480                struct e1000_rx_buffer *buffer_info = &rx_ring->buffer_info[i];
3481                struct my_u { __le64 a; __le64 b; };
3482                struct my_u *u = (struct my_u *)rx_desc;
3483                const char *type;
3484
3485                if (i == rx_ring->next_to_use)
3486                        type = "NTU";
3487                else if (i == rx_ring->next_to_clean)
3488                        type = "NTC";
3489                else
3490                        type = "";
3491
3492                pr_info("R[0x%03X]     %016llX %016llX %016llX %p %s\n",
3493                        i, le64_to_cpu(u->a), le64_to_cpu(u->b),
3494                        (u64)buffer_info->dma, buffer_info->rxbuf.data, type);
3495        } /* for */
3496
3497        /* dump the descriptor caches */
3498        /* rx */
3499        pr_info("Rx descriptor cache in 64bit format\n");
3500        for (i = 0x6000; i <= 0x63FF ; i += 0x10) {
3501                pr_info("R%04X: %08X|%08X %08X|%08X\n",
3502                        i,
3503                        readl(adapter->hw.hw_addr + i+4),
3504                        readl(adapter->hw.hw_addr + i),
3505                        readl(adapter->hw.hw_addr + i+12),
3506                        readl(adapter->hw.hw_addr + i+8));
3507        }
3508        /* tx */
3509        pr_info("Tx descriptor cache in 64bit format\n");
3510        for (i = 0x7000; i <= 0x73FF ; i += 0x10) {
3511                pr_info("T%04X: %08X|%08X %08X|%08X\n",
3512                        i,
3513                        readl(adapter->hw.hw_addr + i+4),
3514                        readl(adapter->hw.hw_addr + i),
3515                        readl(adapter->hw.hw_addr + i+12),
3516                        readl(adapter->hw.hw_addr + i+8));
3517        }
3518exit:
3519        return;
3520}
3521
3522/**
3523 * e1000_tx_timeout - Respond to a Tx Hang
3524 * @netdev: network interface device structure
3525 **/
3526static void e1000_tx_timeout(struct net_device *netdev)
3527{
3528        struct e1000_adapter *adapter = netdev_priv(netdev);
3529
3530        /* Do the reset outside of interrupt context */
3531        adapter->tx_timeout_count++;
3532        schedule_work(&adapter->reset_task);
3533}
3534
3535static void e1000_reset_task(struct work_struct *work)
3536{
3537        struct e1000_adapter *adapter =
3538                container_of(work, struct e1000_adapter, reset_task);
3539
3540        e_err(drv, "Reset adapter\n");
3541        e1000_reinit_locked(adapter);
3542}
3543
3544/**
3545 * e1000_change_mtu - Change the Maximum Transfer Unit
3546 * @netdev: network interface device structure
3547 * @new_mtu: new value for maximum frame size
3548 *
3549 * Returns 0 on success, negative on failure
3550 **/
3551static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
3552{
3553        struct e1000_adapter *adapter = netdev_priv(netdev);
3554        struct e1000_hw *hw = &adapter->hw;
3555        int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
3556
3557        /* Adapter-specific max frame size limits. */
3558        switch (hw->mac_type) {
3559        case e1000_undefined ... e1000_82542_rev2_1:
3560                if (max_frame > (ETH_FRAME_LEN + ETH_FCS_LEN)) {
3561                        e_err(probe, "Jumbo Frames not supported.\n");
3562                        return -EINVAL;
3563                }
3564                break;
3565        default:
3566                /* Capable of supporting up to MAX_JUMBO_FRAME_SIZE limit. */
3567                break;
3568        }
3569
3570        while (test_and_set_bit(__E1000_RESETTING, &adapter->flags))
3571                msleep(1);
3572        /* e1000_down has a dependency on max_frame_size */
3573        hw->max_frame_size = max_frame;
3574        if (netif_running(netdev)) {
3575                /* prevent buffers from being reallocated */
3576                adapter->alloc_rx_buf = e1000_alloc_dummy_rx_buffers;
3577                e1000_down(adapter);
3578        }
3579
3580        /* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN
3581         * means we reserve 2 more, this pushes us to allocate from the next
3582         * larger slab size.
3583         * i.e. RXBUFFER_2048 --> size-4096 slab
3584         * however with the new *_jumbo_rx* routines, jumbo receives will use
3585         * fragmented skbs
3586         */
3587
3588        if (max_frame <= E1000_RXBUFFER_2048)
3589                adapter->rx_buffer_len = E1000_RXBUFFER_2048;
3590        else
3591#if (PAGE_SIZE >= E1000_RXBUFFER_16384)
3592                adapter->rx_buffer_len = E1000_RXBUFFER_16384;
3593#elif (PAGE_SIZE >= E1000_RXBUFFER_4096)
3594                adapter->rx_buffer_len = PAGE_SIZE;
3595#endif
3596
3597        /* adjust allocation if LPE protects us, and we aren't using SBP */
3598        if (!hw->tbi_compatibility_on &&
3599            ((max_frame == (ETH_FRAME_LEN + ETH_FCS_LEN)) ||
3600             (max_frame == MAXIMUM_ETHERNET_VLAN_SIZE)))
3601                adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
3602
3603        pr_info("%s changing MTU from %d to %d\n",
3604                netdev->name, netdev->mtu, new_mtu);
3605        netdev->mtu = new_mtu;
3606
3607        if (netif_running(netdev))
3608                e1000_up(adapter);
3609        else
3610                e1000_reset(adapter);
3611
3612        clear_bit(__E1000_RESETTING, &adapter->flags);
3613
3614        return 0;
3615}
3616
3617/**
3618 * e1000_update_stats - Update the board statistics counters
3619 * @adapter: board private structure
3620 **/
3621void e1000_update_stats(struct e1000_adapter *adapter)
3622{
3623        struct net_device *netdev = adapter->netdev;
3624        struct e1000_hw *hw = &adapter->hw;
3625        struct pci_dev *pdev = adapter->pdev;
3626        unsigned long flags;
3627        u16 phy_tmp;
3628
3629#define PHY_IDLE_ERROR_COUNT_MASK 0x00FF
3630
3631        /* Prevent stats update while adapter is being reset, or if the pci
3632         * connection is down.
3633         */
3634        if (adapter->link_speed == 0)
3635                return;
3636        if (pci_channel_offline(pdev))
3637                return;
3638
3639        spin_lock_irqsave(&adapter->stats_lock, flags);
3640
3641        /* these counters are modified from e1000_tbi_adjust_stats,
3642         * called from the interrupt context, so they must only
3643         * be written while holding adapter->stats_lock
3644         */
3645
3646        adapter->stats.crcerrs += er32(CRCERRS);
3647        adapter->stats.gprc += er32(GPRC);
3648        adapter->stats.gorcl += er32(GORCL);
3649        adapter->stats.gorch += er32(GORCH);
3650        adapter->stats.bprc += er32(BPRC);
3651        adapter->stats.mprc += er32(MPRC);
3652        adapter->stats.roc += er32(ROC);
3653
3654        adapter->stats.prc64 += er32(PRC64);
3655        adapter->stats.prc127 += er32(PRC127);
3656        adapter->stats.prc255 += er32(PRC255);
3657        adapter->stats.prc511 += er32(PRC511);
3658        adapter->stats.prc1023 += er32(PRC1023);
3659        adapter->stats.prc1522 += er32(PRC1522);
3660
3661        adapter->stats.symerrs += er32(SYMERRS);
3662        adapter->stats.mpc += er32(MPC);
3663        adapter->stats.scc += er32(SCC);
3664        adapter->stats.ecol += er32(ECOL);
3665        adapter->stats.mcc += er32(MCC);
3666        adapter->stats.latecol += er32(LATECOL);
3667        adapter->stats.dc += er32(DC);
3668        adapter->stats.sec += er32(SEC);
3669        adapter->stats.rlec += er32(RLEC);
3670        adapter->stats.xonrxc += er32(XONRXC);
3671        adapter->stats.xontxc += er32(XONTXC);
3672        adapter->stats.xoffrxc += er32(XOFFRXC);
3673        adapter->stats.xofftxc += er32(XOFFTXC);
3674        adapter->stats.fcruc += er32(FCRUC);
3675        adapter->stats.gptc += er32(GPTC);
3676        adapter->stats.gotcl += er32(GOTCL);
3677        adapter->stats.gotch += er32(GOTCH);
3678        adapter->stats.rnbc += er32(RNBC);
3679        adapter->stats.ruc += er32(RUC);
3680        adapter->stats.rfc += er32(RFC);
3681        adapter->stats.rjc += er32(RJC);
3682        adapter->stats.torl += er32(TORL);
3683        adapter->stats.torh += er32(TORH);
3684        adapter->stats.totl += er32(TOTL);
3685        adapter->stats.toth += er32(TOTH);
3686        adapter->stats.tpr += er32(TPR);
3687
3688        adapter->stats.ptc64 += er32(PTC64);
3689        adapter->stats.ptc127 += er32(PTC127);
3690        adapter->stats.ptc255 += er32(PTC255);
3691        adapter->stats.ptc511 += er32(PTC511);
3692        adapter->stats.ptc1023 += er32(PTC1023);
3693        adapter->stats.ptc1522 += er32(PTC1522);
3694
3695        adapter->stats.mptc += er32(MPTC);
3696        adapter->stats.bptc += er32(BPTC);
3697
3698        /* used for adaptive IFS */
3699
3700        hw->tx_packet_delta = er32(TPT);
3701        adapter->stats.tpt += hw->tx_packet_delta;
3702        hw->collision_delta = er32(COLC);
3703        adapter->stats.colc += hw->collision_delta;
3704
3705        if (hw->mac_type >= e1000_82543) {
3706                adapter->stats.algnerrc += er32(ALGNERRC);
3707                adapter->stats.rxerrc += er32(RXERRC);
3708                adapter->stats.tncrs += er32(TNCRS);
3709                adapter->stats.cexterr += er32(CEXTERR);
3710                adapter->stats.tsctc += er32(TSCTC);
3711                adapter->stats.tsctfc += er32(TSCTFC);
3712        }
3713
3714        /* Fill out the OS statistics structure */
3715        netdev->stats.multicast = adapter->stats.mprc;
3716        netdev->stats.collisions = adapter->stats.colc;
3717
3718        /* Rx Errors */
3719
3720        /* RLEC on some newer hardware can be incorrect so build
3721         * our own version based on RUC and ROC
3722         */
3723        netdev->stats.rx_errors = adapter->stats.rxerrc +
3724                adapter->stats.crcerrs + adapter->stats.algnerrc +
3725                adapter->stats.ruc + adapter->stats.roc +
3726                adapter->stats.cexterr;
3727        adapter->stats.rlerrc = adapter->stats.ruc + adapter->stats.roc;
3728        netdev->stats.rx_length_errors = adapter->stats.rlerrc;
3729        netdev->stats.rx_crc_errors = adapter->stats.crcerrs;
3730        netdev->stats.rx_frame_errors = adapter->stats.algnerrc;
3731        netdev->stats.rx_missed_errors = adapter->stats.mpc;
3732
3733        /* Tx Errors */
3734        adapter->stats.txerrc = adapter->stats.ecol + adapter->stats.latecol;
3735        netdev->stats.tx_errors = adapter->stats.txerrc;
3736        netdev->stats.tx_aborted_errors = adapter->stats.ecol;
3737        netdev->stats.tx_window_errors = adapter->stats.latecol;
3738        netdev->stats.tx_carrier_errors = adapter->stats.tncrs;
3739        if (hw->bad_tx_carr_stats_fd &&
3740            adapter->link_duplex == FULL_DUPLEX) {
3741                netdev->stats.tx_carrier_errors = 0;
3742                adapter->stats.tncrs = 0;
3743        }
3744
3745        /* Tx Dropped needs to be maintained elsewhere */
3746
3747        /* Phy Stats */
3748        if (hw->media_type == e1000_media_type_copper) {
3749                if ((adapter->link_speed == SPEED_1000) &&
3750                   (!e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_tmp))) {
3751                        phy_tmp &= PHY_IDLE_ERROR_COUNT_MASK;
3752                        adapter->phy_stats.idle_errors += phy_tmp;
3753                }
3754
3755                if ((hw->mac_type <= e1000_82546) &&
3756                   (hw->phy_type == e1000_phy_m88) &&
3757                   !e1000_read_phy_reg(hw, M88E1000_RX_ERR_CNTR, &phy_tmp))
3758                        adapter->phy_stats.receive_errors += phy_tmp;
3759        }
3760
3761        /* Management Stats */
3762        if (hw->has_smbus) {
3763                adapter->stats.mgptc += er32(MGTPTC);
3764                adapter->stats.mgprc += er32(MGTPRC);
3765                adapter->stats.mgpdc += er32(MGTPDC);
3766        }
3767
3768        spin_unlock_irqrestore(&adapter->stats_lock, flags);
3769}
3770
3771/**
3772 * e1000_intr - Interrupt Handler
3773 * @irq: interrupt number
3774 * @data: pointer to a network interface device structure
3775 **/
3776static irqreturn_t e1000_intr(int irq, void *data)
3777{
3778        struct net_device *netdev = data;
3779        struct e1000_adapter *adapter = netdev_priv(netdev);
3780        struct e1000_hw *hw = &adapter->hw;
3781        u32 icr = er32(ICR);
3782
3783        if (unlikely((!icr)))
3784                return IRQ_NONE;  /* Not our interrupt */
3785
3786        /* we might have caused the interrupt, but the above
3787         * read cleared it, and just in case the driver is
3788         * down there is nothing to do so return handled
3789         */
3790        if (unlikely(test_bit(__E1000_DOWN, &adapter->flags)))
3791                return IRQ_HANDLED;
3792
3793        if (unlikely(icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC))) {
3794                hw->get_link_status = 1;
3795                /* guard against interrupt when we're going down */
3796                if (!test_bit(__E1000_DOWN, &adapter->flags))
3797                        schedule_delayed_work(&adapter->watchdog_task, 1);
3798        }
3799
3800        /* disable interrupts, without the synchronize_irq bit */
3801        ew32(IMC, ~0);
3802        E1000_WRITE_FLUSH();
3803
3804        if (likely(napi_schedule_prep(&adapter->napi))) {
3805                adapter->total_tx_bytes = 0;
3806                adapter->total_tx_packets = 0;
3807                adapter->total_rx_bytes = 0;
3808                adapter->total_rx_packets = 0;
3809                __napi_schedule(&adapter->napi);
3810        } else {
3811                /* this really should not happen! if it does it is basically a
3812                 * bug, but not a hard error, so enable ints and continue
3813                 */
3814                if (!test_bit(__E1000_DOWN, &adapter->flags))
3815                        e1000_irq_enable(adapter);
3816        }
3817
3818        return IRQ_HANDLED;
3819}
3820
3821/**
3822 * e1000_clean - NAPI Rx polling callback
3823 * @adapter: board private structure
3824 **/
3825static int e1000_clean(struct napi_struct *napi, int budget)
3826{
3827        struct e1000_adapter *adapter = container_of(napi, struct e1000_adapter,
3828                                                     napi);
3829        int tx_clean_complete = 0, work_done = 0;
3830
3831        tx_clean_complete = e1000_clean_tx_irq(adapter, &adapter->tx_ring[0]);
3832
3833        adapter->clean_rx(adapter, &adapter->rx_ring[0], &work_done, budget);
3834
3835        if (!tx_clean_complete)
3836                work_done = budget;
3837
3838        /* If budget not fully consumed, exit the polling mode */
3839        if (work_done < budget) {
3840                if (likely(adapter->itr_setting & 3))
3841                        e1000_set_itr(adapter);
3842                napi_complete_done(napi, work_done);
3843                if (!test_bit(__E1000_DOWN, &adapter->flags))
3844                        e1000_irq_enable(adapter);
3845        }
3846
3847        return work_done;
3848}
3849
3850/**
3851 * e1000_clean_tx_irq - Reclaim resources after transmit completes
3852 * @adapter: board private structure
3853 **/
3854static bool e1000_clean_tx_irq(struct e1000_adapter *adapter,
3855                               struct e1000_tx_ring *tx_ring)
3856{
3857        struct e1000_hw *hw = &adapter->hw;
3858        struct net_device *netdev = adapter->netdev;
3859        struct e1000_tx_desc *tx_desc, *eop_desc;
3860        struct e1000_tx_buffer *buffer_info;
3861        unsigned int i, eop;
3862        unsigned int count = 0;
3863        unsigned int total_tx_bytes = 0, total_tx_packets = 0;
3864        unsigned int bytes_compl = 0, pkts_compl = 0;
3865
3866        i = tx_ring->next_to_clean;
3867        eop = tx_ring->buffer_info[i].next_to_watch;
3868        eop_desc = E1000_TX_DESC(*tx_ring, eop);
3869
3870        while ((eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) &&
3871               (count < tx_ring->count)) {
3872                bool cleaned = false;
3873                dma_rmb();      /* read buffer_info after eop_desc */
3874                for ( ; !cleaned; count++) {
3875                        tx_desc = E1000_TX_DESC(*tx_ring, i);
3876                        buffer_info = &tx_ring->buffer_info[i];
3877                        cleaned = (i == eop);
3878
3879                        if (cleaned) {
3880                                total_tx_packets += buffer_info->segs;
3881                                total_tx_bytes += buffer_info->bytecount;
3882                                if (buffer_info->skb) {
3883                                        bytes_compl += buffer_info->skb->len;
3884                                        pkts_compl++;
3885                                }
3886
3887                        }
3888                        e1000_unmap_and_free_tx_resource(adapter, buffer_info);
3889                        tx_desc->upper.data = 0;
3890
3891                        if (unlikely(++i == tx_ring->count))
3892                                i = 0;
3893                }
3894
3895                eop = tx_ring->buffer_info[i].next_to_watch;
3896                eop_desc = E1000_TX_DESC(*tx_ring, eop);
3897        }
3898
3899        /* Synchronize with E1000_DESC_UNUSED called from e1000_xmit_frame,
3900         * which will reuse the cleaned buffers.
3901         */
3902        smp_store_release(&tx_ring->next_to_clean, i);
3903
3904        netdev_completed_queue(netdev, pkts_compl, bytes_compl);
3905
3906#define TX_WAKE_THRESHOLD 32
3907        if (unlikely(count && netif_carrier_ok(netdev) &&
3908                     E1000_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD)) {
3909                /* Make sure that anybody stopping the queue after this
3910                 * sees the new next_to_clean.
3911                 */
3912                smp_mb();
3913
3914                if (netif_queue_stopped(netdev) &&
3915                    !(test_bit(__E1000_DOWN, &adapter->flags))) {
3916                        netif_wake_queue(netdev);
3917                        ++adapter->restart_queue;
3918                }
3919        }
3920
3921        if (adapter->detect_tx_hung) {
3922                /* Detect a transmit hang in hardware, this serializes the
3923                 * check with the clearing of time_stamp and movement of i
3924                 */
3925                adapter->detect_tx_hung = false;
3926                if (tx_ring->buffer_info[eop].time_stamp &&
3927                    time_after(jiffies, tx_ring->buffer_info[eop].time_stamp +
3928                               (adapter->tx_timeout_factor * HZ)) &&
3929                    !(er32(STATUS) & E1000_STATUS_TXOFF)) {
3930
3931                        /* detected Tx unit hang */
3932                        e_err(drv, "Detected Tx Unit Hang\n"
3933                              "  Tx Queue             <%lu>\n"
3934                              "  TDH                  <%x>\n"
3935                              "  TDT                  <%x>\n"
3936                              "  next_to_use          <%x>\n"
3937                              "  next_to_clean        <%x>\n"
3938                              "buffer_info[next_to_clean]\n"
3939                              "  time_stamp           <%lx>\n"
3940                              "  next_to_watch        <%x>\n"
3941                              "  jiffies              <%lx>\n"
3942                              "  next_to_watch.status <%x>\n",
3943                                (unsigned long)(tx_ring - adapter->tx_ring),
3944                                readl(hw->hw_addr + tx_ring->tdh),
3945                                readl(hw->hw_addr + tx_ring->tdt),
3946                                tx_ring->next_to_use,
3947                                tx_ring->next_to_clean,
3948                                tx_ring->buffer_info[eop].time_stamp,
3949                                eop,
3950                                jiffies,
3951                                eop_desc->upper.fields.status);
3952                        e1000_dump(adapter);
3953                        netif_stop_queue(netdev);
3954                }
3955        }
3956        adapter->total_tx_bytes += total_tx_bytes;
3957        adapter->total_tx_packets += total_tx_packets;
3958        netdev->stats.tx_bytes += total_tx_bytes;
3959        netdev->stats.tx_packets += total_tx_packets;
3960        return count < tx_ring->count;
3961}
3962
3963/**
3964 * e1000_rx_checksum - Receive Checksum Offload for 82543
3965 * @adapter:     board private structure
3966 * @status_err:  receive descriptor status and error fields
3967 * @csum:        receive descriptor csum field
3968 * @sk_buff:     socket buffer with received data
3969 **/
3970static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err,
3971                              u32 csum, struct sk_buff *skb)
3972{
3973        struct e1000_hw *hw = &adapter->hw;
3974        u16 status = (u16)status_err;
3975        u8 errors = (u8)(status_err >> 24);
3976
3977        skb_checksum_none_assert(skb);
3978
3979        /* 82543 or newer only */
3980        if (unlikely(hw->mac_type < e1000_82543))
3981                return;
3982        /* Ignore Checksum bit is set */
3983        if (unlikely(status & E1000_RXD_STAT_IXSM))
3984                return;
3985        /* TCP/UDP checksum error bit is set */
3986        if (unlikely(errors & E1000_RXD_ERR_TCPE)) {
3987                /* let the stack verify checksum errors */
3988                adapter->hw_csum_err++;
3989                return;
3990        }
3991        /* TCP/UDP Checksum has not been calculated */
3992        if (!(status & E1000_RXD_STAT_TCPCS))
3993                return;
3994
3995        /* It must be a TCP or UDP packet with a valid checksum */
3996        if (likely(status & E1000_RXD_STAT_TCPCS)) {
3997                /* TCP checksum is good */
3998                skb->ip_summed = CHECKSUM_UNNECESSARY;
3999        }
4000        adapter->hw_csum_good++;
4001}
4002
4003/**
4004 * e1000_consume_page - helper function for jumbo Rx path
4005 **/
4006static void e1000_consume_page(struct e1000_rx_buffer *bi, struct sk_buff *skb,
4007                               u16 length)
4008{
4009        bi->rxbuf.page = NULL;
4010        skb->len += length;
4011        skb->data_len += length;
4012        skb->truesize += PAGE_SIZE;
4013}
4014
4015/**
4016 * e1000_receive_skb - helper function to handle rx indications
4017 * @adapter: board private structure
4018 * @status: descriptor status field as written by hardware
4019 * @vlan: descriptor vlan field as written by hardware (no le/be conversion)
4020 * @skb: pointer to sk_buff to be indicated to stack
4021 */
4022static void e1000_receive_skb(struct e1000_adapter *adapter, u8 status,
4023                              __le16 vlan, struct sk_buff *skb)
4024{
4025        skb->protocol = eth_type_trans(skb, adapter->netdev);
4026
4027        if (status & E1000_RXD_STAT_VP) {
4028                u16 vid = le16_to_cpu(vlan) & E1000_RXD_SPC_VLAN_MASK;
4029
4030                __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
4031        }
4032        napi_gro_receive(&adapter->napi, skb);
4033}
4034
4035/**
4036 * e1000_tbi_adjust_stats
4037 * @hw: Struct containing variables accessed by shared code
4038 * @frame_len: The length of the frame in question
4039 * @mac_addr: The Ethernet destination address of the frame in question
4040 *
4041 * Adjusts the statistic counters when a frame is accepted by TBI_ACCEPT
4042 */
4043static void e1000_tbi_adjust_stats(struct e1000_hw *hw,
4044                                   struct e1000_hw_stats *stats,
4045                                   u32 frame_len, const u8 *mac_addr)
4046{
4047        u64 carry_bit;
4048
4049        /* First adjust the frame length. */
4050        frame_len--;
4051        /* We need to adjust the statistics counters, since the hardware
4052         * counters overcount this packet as a CRC error and undercount
4053         * the packet as a good packet
4054         */
4055        /* This packet should not be counted as a CRC error. */
4056        stats->crcerrs--;
4057        /* This packet does count as a Good Packet Received. */
4058        stats->gprc++;
4059
4060        /* Adjust the Good Octets received counters */
4061        carry_bit = 0x80000000 & stats->gorcl;
4062        stats->gorcl += frame_len;
4063        /* If the high bit of Gorcl (the low 32 bits of the Good Octets
4064         * Received Count) was one before the addition,
4065         * AND it is zero after, then we lost the carry out,
4066         * need to add one to Gorch (Good Octets Received Count High).
4067         * This could be simplified if all environments supported
4068         * 64-bit integers.
4069         */
4070        if (carry_bit && ((stats->gorcl & 0x80000000) == 0))
4071                stats->gorch++;
4072        /* Is this a broadcast or multicast?  Check broadcast first,
4073         * since the test for a multicast frame will test positive on
4074         * a broadcast frame.
4075         */
4076        if (is_broadcast_ether_addr(mac_addr))
4077                stats->bprc++;
4078        else if (is_multicast_ether_addr(mac_addr))
4079                stats->mprc++;
4080
4081        if (frame_len == hw->max_frame_size) {
4082                /* In this case, the hardware has overcounted the number of
4083                 * oversize frames.
4084                 */
4085                if (stats->roc > 0)
4086                        stats->roc--;
4087        }
4088
4089        /* Adjust the bin counters when the extra byte put the frame in the
4090         * wrong bin. Remember that the frame_len was adjusted above.
4091         */
4092        if (frame_len == 64) {
4093                stats->prc64++;
4094                stats->prc127--;
4095        } else if (frame_len == 127) {
4096                stats->prc127++;
4097                stats->prc255--;
4098        } else if (frame_len == 255) {
4099                stats->prc255++;
4100                stats->prc511--;
4101        } else if (frame_len == 511) {
4102                stats->prc511++;
4103                stats->prc1023--;
4104        } else if (frame_len == 1023) {
4105                stats->prc1023++;
4106                stats->prc1522--;
4107        } else if (frame_len == 1522) {
4108                stats->prc1522++;
4109        }
4110}
4111
4112static bool e1000_tbi_should_accept(struct e1000_adapter *adapter,
4113                                    u8 status, u8 errors,
4114                                    u32 length, const u8 *data)
4115{
4116        struct e1000_hw *hw = &adapter->hw;
4117        u8 last_byte = *(data + length - 1);
4118
4119        if (TBI_ACCEPT(hw, status, errors, length, last_byte)) {
4120                unsigned long irq_flags;
4121
4122                spin_lock_irqsave(&adapter->stats_lock, irq_flags);
4123                e1000_tbi_adjust_stats(hw, &adapter->stats, length, data);
4124                spin_unlock_irqrestore(&adapter->stats_lock, irq_flags);
4125
4126                return true;
4127        }
4128
4129        return false;
4130}
4131
4132static struct sk_buff *e1000_alloc_rx_skb(struct e1000_adapter *adapter,
4133                                          unsigned int bufsz)
4134{
4135        struct sk_buff *skb = napi_alloc_skb(&adapter->napi, bufsz);
4136
4137        if (unlikely(!skb))
4138                adapter->alloc_rx_buff_failed++;
4139        return skb;
4140}
4141
4142/**
4143 * e1000_clean_jumbo_rx_irq - Send received data up the network stack; legacy
4144 * @adapter: board private structure
4145 * @rx_ring: ring to clean
4146 * @work_done: amount of napi work completed this call
4147 * @work_to_do: max amount of work allowed for this call to do
4148 *
4149 * the return value indicates whether actual cleaning was done, there
4150 * is no guarantee that everything was cleaned
4151 */
4152static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
4153                                     struct e1000_rx_ring *rx_ring,
4154                                     int *work_done, int work_to_do)
4155{
4156        struct net_device *netdev = adapter->netdev;
4157        struct pci_dev *pdev = adapter->pdev;
4158        struct e1000_rx_desc *rx_desc, *next_rxd;
4159        struct e1000_rx_buffer *buffer_info, *next_buffer;
4160        u32 length;
4161        unsigned int i;
4162        int cleaned_count = 0;
4163        bool cleaned = false;
4164        unsigned int total_rx_bytes = 0, total_rx_packets = 0;
4165
4166        i = rx_ring->next_to_clean;
4167        rx_desc = E1000_RX_DESC(*rx_ring, i);
4168        buffer_info = &rx_ring->buffer_info[i];
4169
4170        while (rx_desc->status & E1000_RXD_STAT_DD) {
4171                struct sk_buff *skb;
4172                u8 status;
4173
4174                if (*work_done >= work_to_do)
4175                        break;
4176                (*work_done)++;
4177                dma_rmb(); /* read descriptor and rx_buffer_info after status DD */
4178
4179                status = rx_desc->status;
4180
4181                if (++i == rx_ring->count)
4182                        i = 0;
4183
4184                next_rxd = E1000_RX_DESC(*rx_ring, i);
4185                prefetch(next_rxd);
4186
4187                next_buffer = &rx_ring->buffer_info[i];
4188
4189                cleaned = true;
4190                cleaned_count++;
4191                dma_unmap_page(&pdev->dev, buffer_info->dma,
4192                               adapter->rx_buffer_len, DMA_FROM_DEVICE);
4193                buffer_info->dma = 0;
4194
4195                length = le16_to_cpu(rx_desc->length);
4196
4197                /* errors is only valid for DD + EOP descriptors */
4198                if (unlikely((status & E1000_RXD_STAT_EOP) &&
4199                    (rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK))) {
4200                        u8 *mapped = page_address(buffer_info->rxbuf.page);
4201
4202                        if (e1000_tbi_should_accept(adapter, status,
4203                                                    rx_desc->errors,
4204                                                    length, mapped)) {
4205                                length--;
4206                        } else if (netdev->features & NETIF_F_RXALL) {
4207                                goto process_skb;
4208                        } else {
4209                                /* an error means any chain goes out the window
4210                                 * too
4211                                 */
4212                                if (rx_ring->rx_skb_top)
4213                                        dev_kfree_skb(rx_ring->rx_skb_top);
4214                                rx_ring->rx_skb_top = NULL;
4215                                goto next_desc;
4216                        }
4217                }
4218
4219#define rxtop rx_ring->rx_skb_top
4220process_skb:
4221                if (!(status & E1000_RXD_STAT_EOP)) {
4222                        /* this descriptor is only the beginning (or middle) */
4223                        if (!rxtop) {
4224                                /* this is the beginning of a chain */
4225                                rxtop = napi_get_frags(&adapter->napi);
4226                                if (!rxtop)
4227                                        break;
4228
4229                                skb_fill_page_desc(rxtop, 0,
4230                                                   buffer_info->rxbuf.page,
4231                                                   0, length);
4232                        } else {
4233                                /* this is the middle of a chain */
4234                                skb_fill_page_desc(rxtop,
4235                                    skb_shinfo(rxtop)->nr_frags,
4236                                    buffer_info->rxbuf.page, 0, length);
4237                        }
4238                        e1000_consume_page(buffer_info, rxtop, length);
4239                        goto next_desc;
4240                } else {
4241                        if (rxtop) {
4242                                /* end of the chain */
4243                                skb_fill_page_desc(rxtop,
4244                                    skb_shinfo(rxtop)->nr_frags,
4245                                    buffer_info->rxbuf.page, 0, length);
4246                                skb = rxtop;
4247                                rxtop = NULL;
4248                                e1000_consume_page(buffer_info, skb, length);
4249                        } else {
4250                                struct page *p;
4251                                /* no chain, got EOP, this buf is the packet
4252                                 * copybreak to save the put_page/alloc_page
4253                                 */
4254                                p = buffer_info->rxbuf.page;
4255                                if (length <= copybreak) {
4256                                        u8 *vaddr;
4257
4258                                        if (likely(!(netdev->features & NETIF_F_RXFCS)))
4259                                                length -= 4;
4260                                        skb = e1000_alloc_rx_skb(adapter,
4261                                                                 length);
4262                                        if (!skb)
4263                                                break;
4264
4265                                        vaddr = kmap_atomic(p);
4266                                        memcpy(skb_tail_pointer(skb), vaddr,
4267                                               length);
4268                                        kunmap_atomic(vaddr);
4269                                        /* re-use the page, so don't erase
4270                                         * buffer_info->rxbuf.page
4271                                         */
4272                                        skb_put(skb, length);
4273                                        e1000_rx_checksum(adapter,
4274                                                          status | rx_desc->errors << 24,
4275                                                          le16_to_cpu(rx_desc->csum), skb);
4276
4277                                        total_rx_bytes += skb->len;
4278                                        total_rx_packets++;
4279
4280                                        e1000_receive_skb(adapter, status,
4281                                                          rx_desc->special, skb);
4282                                        goto next_desc;
4283                                } else {
4284                                        skb = napi_get_frags(&adapter->napi);
4285                                        if (!skb) {
4286                                                adapter->alloc_rx_buff_failed++;
4287                                                break;
4288                                        }
4289                                        skb_fill_page_desc(skb, 0, p, 0,
4290                                                           length);
4291                                        e1000_consume_page(buffer_info, skb,
4292                                                           length);
4293                                }
4294                        }
4295                }
4296
4297                /* Receive Checksum Offload XXX recompute due to CRC strip? */
4298                e1000_rx_checksum(adapter,
4299                                  (u32)(status) |
4300                                  ((u32)(rx_desc->errors) << 24),
4301                                  le16_to_cpu(rx_desc->csum), skb);
4302
4303                total_rx_bytes += (skb->len - 4); /* don't count FCS */
4304                if (likely(!(netdev->features & NETIF_F_RXFCS)))
4305                        pskb_trim(skb, skb->len - 4);
4306                total_rx_packets++;
4307
4308                if (status & E1000_RXD_STAT_VP) {
4309                        __le16 vlan = rx_desc->special;
4310                        u16 vid = le16_to_cpu(vlan) & E1000_RXD_SPC_VLAN_MASK;
4311
4312                        __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
4313                }
4314
4315                napi_gro_frags(&adapter->napi);
4316
4317next_desc:
4318                rx_desc->status = 0;
4319
4320                /* return some buffers to hardware, one at a time is too slow */
4321                if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) {
4322                        adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
4323                        cleaned_count = 0;
4324                }
4325
4326                /* use prefetched values */
4327                rx_desc = next_rxd;
4328                buffer_info = next_buffer;
4329        }
4330        rx_ring->next_to_clean = i;
4331
4332        cleaned_count = E1000_DESC_UNUSED(rx_ring);
4333        if (cleaned_count)
4334                adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
4335
4336        adapter->total_rx_packets += total_rx_packets;
4337        adapter->total_rx_bytes += total_rx_bytes;
4338        netdev->stats.rx_bytes += total_rx_bytes;
4339        netdev->stats.rx_packets += total_rx_packets;
4340        return cleaned;
4341}
4342
4343/* this should improve performance for small packets with large amounts
4344 * of reassembly being done in the stack
4345 */
4346static struct sk_buff *e1000_copybreak(struct e1000_adapter *adapter,
4347                                       struct e1000_rx_buffer *buffer_info,
4348                                       u32 length, const void *data)
4349{
4350        struct sk_buff *skb;
4351
4352        if (length > copybreak)
4353                return NULL;
4354
4355        skb = e1000_alloc_rx_skb(adapter, length);
4356        if (!skb)
4357                return NULL;
4358
4359        dma_sync_single_for_cpu(&adapter->pdev->dev, buffer_info->dma,
4360                                length, DMA_FROM_DEVICE);
4361
4362        skb_put_data(skb, data, length);
4363
4364        return skb;
4365}
4366
4367/**
4368 * e1000_clean_rx_irq - Send received data up the network stack; legacy
4369 * @adapter: board private structure
4370 * @rx_ring: ring to clean
4371 * @work_done: amount of napi work completed this call
4372 * @work_to_do: max amount of work allowed for this call to do
4373 */
4374static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
4375                               struct e1000_rx_ring *rx_ring,
4376                               int *work_done, int work_to_do)
4377{
4378        struct net_device *netdev = adapter->netdev;
4379        struct pci_dev *pdev = adapter->pdev;
4380        struct e1000_rx_desc *rx_desc, *next_rxd;
4381        struct e1000_rx_buffer *buffer_info, *next_buffer;
4382        u32 length;
4383        unsigned int i;
4384        int cleaned_count = 0;
4385        bool cleaned = false;
4386        unsigned int total_rx_bytes = 0, total_rx_packets = 0;
4387
4388        i = rx_ring->next_to_clean;
4389        rx_desc = E1000_RX_DESC(*rx_ring, i);
4390        buffer_info = &rx_ring->buffer_info[i];
4391
4392        while (rx_desc->status & E1000_RXD_STAT_DD) {
4393                struct sk_buff *skb;
4394                u8 *data;
4395                u8 status;
4396
4397                if (*work_done >= work_to_do)
4398                        break;
4399                (*work_done)++;
4400                dma_rmb(); /* read descriptor and rx_buffer_info after status DD */
4401
4402                status = rx_desc->status;
4403                length = le16_to_cpu(rx_desc->length);
4404
4405                data = buffer_info->rxbuf.data;
4406                prefetch(data);
4407                skb = e1000_copybreak(adapter, buffer_info, length, data);
4408                if (!skb) {
4409                        unsigned int frag_len = e1000_frag_len(adapter);
4410
4411                        skb = build_skb(data - E1000_HEADROOM, frag_len);
4412                        if (!skb) {
4413                                adapter->alloc_rx_buff_failed++;
4414                                break;
4415                        }
4416
4417                        skb_reserve(skb, E1000_HEADROOM);
4418                        dma_unmap_single(&pdev->dev, buffer_info->dma,
4419                                         adapter->rx_buffer_len,
4420                                         DMA_FROM_DEVICE);
4421                        buffer_info->dma = 0;
4422                        buffer_info->rxbuf.data = NULL;
4423                }
4424
4425                if (++i == rx_ring->count)
4426                        i = 0;
4427
4428                next_rxd = E1000_RX_DESC(*rx_ring, i);
4429                prefetch(next_rxd);
4430
4431                next_buffer = &rx_ring->buffer_info[i];
4432
4433                cleaned = true;
4434                cleaned_count++;
4435
4436                /* !EOP means multiple descriptors were used to store a single
4437                 * packet, if thats the case we need to toss it.  In fact, we
4438                 * to toss every packet with the EOP bit clear and the next
4439                 * frame that _does_ have the EOP bit set, as it is by
4440                 * definition only a frame fragment
4441                 */
4442                if (unlikely(!(status & E1000_RXD_STAT_EOP)))
4443                        adapter->discarding = true;
4444
4445                if (adapter->discarding) {
4446                        /* All receives must fit into a single buffer */
4447                        netdev_dbg(netdev, "Receive packet consumed multiple buffers\n");
4448                        dev_kfree_skb(skb);
4449                        if (status & E1000_RXD_STAT_EOP)
4450                                adapter->discarding = false;
4451                        goto next_desc;
4452                }
4453
4454                if (unlikely(rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK)) {
4455                        if (e1000_tbi_should_accept(adapter, status,
4456                                                    rx_desc->errors,
4457                                                    length, data)) {
4458                                length--;
4459                        } else if (netdev->features & NETIF_F_RXALL) {
4460                                goto process_skb;
4461                        } else {
4462                                dev_kfree_skb(skb);
4463                                goto next_desc;
4464                        }
4465                }
4466
4467process_skb:
4468                total_rx_bytes += (length - 4); /* don't count FCS */
4469                total_rx_packets++;
4470
4471                if (likely(!(netdev->features & NETIF_F_RXFCS)))
4472                        /* adjust length to remove Ethernet CRC, this must be
4473                         * done after the TBI_ACCEPT workaround above
4474                         */
4475                        length -= 4;
4476
4477                if (buffer_info->rxbuf.data == NULL)
4478                        skb_put(skb, length);
4479                else /* copybreak skb */
4480                        skb_trim(skb, length);
4481
4482                /* Receive Checksum Offload */
4483                e1000_rx_checksum(adapter,
4484                                  (u32)(status) |
4485                                  ((u32)(rx_desc->errors) << 24),
4486                                  le16_to_cpu(rx_desc->csum), skb);
4487
4488                e1000_receive_skb(adapter, status, rx_desc->special, skb);
4489
4490next_desc:
4491                rx_desc->status = 0;
4492
4493                /* return some buffers to hardware, one at a time is too slow */
4494                if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) {
4495                        adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
4496                        cleaned_count = 0;
4497                }
4498
4499                /* use prefetched values */
4500                rx_desc = next_rxd;
4501                buffer_info = next_buffer;
4502        }
4503        rx_ring->next_to_clean = i;
4504
4505        cleaned_count = E1000_DESC_UNUSED(rx_ring);
4506        if (cleaned_count)
4507                adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
4508
4509        adapter->total_rx_packets += total_rx_packets;
4510        adapter->total_rx_bytes += total_rx_bytes;
4511        netdev->stats.rx_bytes += total_rx_bytes;
4512        netdev->stats.rx_packets += total_rx_packets;
4513        return cleaned;
4514}
4515
4516/**
4517 * e1000_alloc_jumbo_rx_buffers - Replace used jumbo receive buffers
4518 * @adapter: address of board private structure
4519 * @rx_ring: pointer to receive ring structure
4520 * @cleaned_count: number of buffers to allocate this pass
4521 **/
4522static void
4523e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter,
4524                             struct e1000_rx_ring *rx_ring, int cleaned_count)
4525{
4526        struct pci_dev *pdev = adapter->pdev;
4527        struct e1000_rx_desc *rx_desc;
4528        struct e1000_rx_buffer *buffer_info;
4529        unsigned int i;
4530
4531        i = rx_ring->next_to_use;
4532        buffer_info = &rx_ring->buffer_info[i];
4533
4534        while (cleaned_count--) {
4535                /* allocate a new page if necessary */
4536                if (!buffer_info->rxbuf.page) {
4537                        buffer_info->rxbuf.page = alloc_page(GFP_ATOMIC);
4538                        if (unlikely(!buffer_info->rxbuf.page)) {
4539                                adapter->alloc_rx_buff_failed++;
4540                                break;
4541                        }
4542                }
4543
4544                if (!buffer_info->dma) {
4545                        buffer_info->dma = dma_map_page(&pdev->dev,
4546                                                        buffer_info->rxbuf.page, 0,
4547                                                        adapter->rx_buffer_len,
4548                                                        DMA_FROM_DEVICE);
4549                        if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
4550                                put_page(buffer_info->rxbuf.page);
4551                                buffer_info->rxbuf.page = NULL;
4552                                buffer_info->dma = 0;
4553                                adapter->alloc_rx_buff_failed++;
4554                                break;
4555                        }
4556                }
4557
4558                rx_desc = E1000_RX_DESC(*rx_ring, i);
4559                rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
4560
4561                if (unlikely(++i == rx_ring->count))
4562                        i = 0;
4563                buffer_info = &rx_ring->buffer_info[i];
4564        }
4565
4566        if (likely(rx_ring->next_to_use != i)) {
4567                rx_ring->next_to_use = i;
4568                if (unlikely(i-- == 0))
4569                        i = (rx_ring->count - 1);
4570
4571                /* Force memory writes to complete before letting h/w
4572                 * know there are new descriptors to fetch.  (Only
4573                 * applicable for weak-ordered memory model archs,
4574                 * such as IA-64).
4575                 */
4576                wmb();
4577                writel(i, adapter->hw.hw_addr + rx_ring->rdt);
4578        }
4579}
4580
4581/**
4582 * e1000_alloc_rx_buffers - Replace used receive buffers; legacy & extended
4583 * @adapter: address of board private structure
4584 **/
4585static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
4586                                   struct e1000_rx_ring *rx_ring,
4587                                   int cleaned_count)
4588{
4589        struct e1000_hw *hw = &adapter->hw;
4590        struct pci_dev *pdev = adapter->pdev;
4591        struct e1000_rx_desc *rx_desc;
4592        struct e1000_rx_buffer *buffer_info;
4593        unsigned int i;
4594        unsigned int bufsz = adapter->rx_buffer_len;
4595
4596        i = rx_ring->next_to_use;
4597        buffer_info = &rx_ring->buffer_info[i];
4598
4599        while (cleaned_count--) {
4600                void *data;
4601
4602                if (buffer_info->rxbuf.data)
4603                        goto skip;
4604
4605                data = e1000_alloc_frag(adapter);
4606                if (!data) {
4607                        /* Better luck next round */
4608                        adapter->alloc_rx_buff_failed++;
4609                        break;
4610                }
4611
4612                /* Fix for errata 23, can't cross 64kB boundary */
4613                if (!e1000_check_64k_bound(adapter, data, bufsz)) {
4614                        void *olddata = data;
4615                        e_err(rx_err, "skb align check failed: %u bytes at "
4616                              "%p\n", bufsz, data);
4617                        /* Try again, without freeing the previous */
4618                        data = e1000_alloc_frag(adapter);
4619                        /* Failed allocation, critical failure */
4620                        if (!data) {
4621                                skb_free_frag(olddata);
4622                                adapter->alloc_rx_buff_failed++;
4623                                break;
4624                        }
4625
4626                        if (!e1000_check_64k_bound(adapter, data, bufsz)) {
4627                                /* give up */
4628                                skb_free_frag(data);
4629                                skb_free_frag(olddata);
4630                                adapter->alloc_rx_buff_failed++;
4631                                break;
4632                        }
4633
4634                        /* Use new allocation */
4635                        skb_free_frag(olddata);
4636                }
4637                buffer_info->dma = dma_map_single(&pdev->dev,
4638                                                  data,
4639                                                  adapter->rx_buffer_len,
4640                                                  DMA_FROM_DEVICE);
4641                if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
4642                        skb_free_frag(data);
4643                        buffer_info->dma = 0;
4644                        adapter->alloc_rx_buff_failed++;
4645                        break;
4646                }
4647
4648                /* XXX if it was allocated cleanly it will never map to a
4649                 * boundary crossing
4650                 */
4651
4652                /* Fix for errata 23, can't cross 64kB boundary */
4653                if (!e1000_check_64k_bound(adapter,
4654                                        (void *)(unsigned long)buffer_info->dma,
4655                                        adapter->rx_buffer_len)) {
4656                        e_err(rx_err, "dma align check failed: %u bytes at "
4657                              "%p\n", adapter->rx_buffer_len,
4658                              (void *)(unsigned long)buffer_info->dma);
4659
4660                        dma_unmap_single(&pdev->dev, buffer_info->dma,
4661                                         adapter->rx_buffer_len,
4662                                         DMA_FROM_DEVICE);
4663
4664                        skb_free_frag(data);
4665                        buffer_info->rxbuf.data = NULL;
4666                        buffer_info->dma = 0;
4667
4668                        adapter->alloc_rx_buff_failed++;
4669                        break;
4670                }
4671                buffer_info->rxbuf.data = data;
4672 skip:
4673                rx_desc = E1000_RX_DESC(*rx_ring, i);
4674                rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
4675
4676                if (unlikely(++i == rx_ring->count))
4677                        i = 0;
4678                buffer_info = &rx_ring->buffer_info[i];
4679        }
4680
4681        if (likely(rx_ring->next_to_use != i)) {
4682                rx_ring->next_to_use = i;
4683                if (unlikely(i-- == 0))
4684                        i = (rx_ring->count - 1);
4685
4686                /* Force memory writes to complete before letting h/w
4687                 * know there are new descriptors to fetch.  (Only
4688                 * applicable for weak-ordered memory model archs,
4689                 * such as IA-64).
4690                 */
4691                wmb();
4692                writel(i, hw->hw_addr + rx_ring->rdt);
4693        }
4694}
4695
4696/**
4697 * e1000_smartspeed - Workaround for SmartSpeed on 82541 and 82547 controllers.
4698 * @adapter:
4699 **/
4700static void e1000_smartspeed(struct e1000_adapter *adapter)
4701{
4702        struct e1000_hw *hw = &adapter->hw;
4703        u16 phy_status;
4704        u16 phy_ctrl;
4705
4706        if ((hw->phy_type != e1000_phy_igp) || !hw->autoneg ||
4707           !(hw->autoneg_advertised & ADVERTISE_1000_FULL))
4708                return;
4709
4710        if (adapter->smartspeed == 0) {
4711                /* If Master/Slave config fault is asserted twice,
4712                 * we assume back-to-back
4713                 */
4714                e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_status);
4715                if (!(phy_status & SR_1000T_MS_CONFIG_FAULT))
4716                        return;
4717                e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_status);
4718                if (!(phy_status & SR_1000T_MS_CONFIG_FAULT))
4719                        return;
4720                e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_ctrl);
4721                if (phy_ctrl & CR_1000T_MS_ENABLE) {
4722                        phy_ctrl &= ~CR_1000T_MS_ENABLE;
4723                        e1000_write_phy_reg(hw, PHY_1000T_CTRL,
4724                                            phy_ctrl);
4725                        adapter->smartspeed++;
4726                        if (!e1000_phy_setup_autoneg(hw) &&
4727                           !e1000_read_phy_reg(hw, PHY_CTRL,
4728                                               &phy_ctrl)) {
4729                                phy_ctrl |= (MII_CR_AUTO_NEG_EN |
4730                                             MII_CR_RESTART_AUTO_NEG);
4731                                e1000_write_phy_reg(hw, PHY_CTRL,
4732                                                    phy_ctrl);
4733                        }
4734                }
4735                return;
4736        } else if (adapter->smartspeed == E1000_SMARTSPEED_DOWNSHIFT) {
4737                /* If still no link, perhaps using 2/3 pair cable */
4738                e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_ctrl);
4739                phy_ctrl |= CR_1000T_MS_ENABLE;
4740                e1000_write_phy_reg(hw, PHY_1000T_CTRL, phy_ctrl);
4741                if (!e1000_phy_setup_autoneg(hw) &&
4742                   !e1000_read_phy_reg(hw, PHY_CTRL, &phy_ctrl)) {
4743                        phy_ctrl |= (MII_CR_AUTO_NEG_EN |
4744                                     MII_CR_RESTART_AUTO_NEG);
4745                        e1000_write_phy_reg(hw, PHY_CTRL, phy_ctrl);
4746                }
4747        }
4748        /* Restart process after E1000_SMARTSPEED_MAX iterations */
4749        if (adapter->smartspeed++ == E1000_SMARTSPEED_MAX)
4750                adapter->smartspeed = 0;
4751}
4752
4753/**
4754 * e1000_ioctl -
4755 * @netdev:
4756 * @ifreq:
4757 * @cmd:
4758 **/
4759static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
4760{
4761        switch (cmd) {
4762        case SIOCGMIIPHY:
4763        case SIOCGMIIREG:
4764        case SIOCSMIIREG:
4765                return e1000_mii_ioctl(netdev, ifr, cmd);
4766        default:
4767                return -EOPNOTSUPP;
4768        }
4769}
4770
4771/**
4772 * e1000_mii_ioctl -
4773 * @netdev:
4774 * @ifreq:
4775 * @cmd:
4776 **/
4777static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
4778                           int cmd)
4779{
4780        struct e1000_adapter *adapter = netdev_priv(netdev);
4781        struct e1000_hw *hw = &adapter->hw;
4782        struct mii_ioctl_data *data = if_mii(ifr);
4783        int retval;
4784        u16 mii_reg;
4785        unsigned long flags;
4786
4787        if (hw->media_type != e1000_media_type_copper)
4788                return -EOPNOTSUPP;
4789
4790        switch (cmd) {
4791        case SIOCGMIIPHY:
4792                data->phy_id = hw->phy_addr;
4793                break;
4794        case SIOCGMIIREG:
4795                spin_lock_irqsave(&adapter->stats_lock, flags);
4796                if (e1000_read_phy_reg(hw, data->reg_num & 0x1F,
4797                                   &data->val_out)) {
4798                        spin_unlock_irqrestore(&adapter->stats_lock, flags);
4799                        return -EIO;
4800                }
4801                spin_unlock_irqrestore(&adapter->stats_lock, flags);
4802                break;
4803        case SIOCSMIIREG:
4804                if (data->reg_num & ~(0x1F))
4805                        return -EFAULT;
4806                mii_reg = data->val_in;
4807                spin_lock_irqsave(&adapter->stats_lock, flags);
4808                if (e1000_write_phy_reg(hw, data->reg_num,
4809                                        mii_reg)) {
4810                        spin_unlock_irqrestore(&adapter->stats_lock, flags);
4811                        return -EIO;
4812                }
4813                spin_unlock_irqrestore(&adapter->stats_lock, flags);
4814                if (hw->media_type == e1000_media_type_copper) {
4815                        switch (data->reg_num) {
4816                        case PHY_CTRL:
4817                                if (mii_reg & MII_CR_POWER_DOWN)
4818                                        break;
4819                                if (mii_reg & MII_CR_AUTO_NEG_EN) {
4820                                        hw->autoneg = 1;
4821                                        hw->autoneg_advertised = 0x2F;
4822                                } else {
4823                                        u32 speed;
4824                                        if (mii_reg & 0x40)
4825                                                speed = SPEED_1000;
4826                                        else if (mii_reg & 0x2000)
4827                                                speed = SPEED_100;
4828                                        else
4829                                                speed = SPEED_10;
4830                                        retval = e1000_set_spd_dplx(
4831                                                adapter, speed,
4832                                                ((mii_reg & 0x100)
4833                                                 ? DUPLEX_FULL :
4834                                                 DUPLEX_HALF));
4835                                        if (retval)
4836                                                return retval;
4837                                }
4838                                if (netif_running(adapter->netdev))
4839                                        e1000_reinit_locked(adapter);
4840                                else
4841                                        e1000_reset(adapter);
4842                                break;
4843                        case M88E1000_PHY_SPEC_CTRL:
4844                        case M88E1000_EXT_PHY_SPEC_CTRL:
4845                                if (e1000_phy_reset(hw))
4846                                        return -EIO;
4847                                break;
4848                        }
4849                } else {
4850                        switch (data->reg_num) {
4851                        case PHY_CTRL:
4852                                if (mii_reg & MII_CR_POWER_DOWN)
4853                                        break;
4854                                if (netif_running(adapter->netdev))
4855                                        e1000_reinit_locked(adapter);
4856                                else
4857                                        e1000_reset(adapter);
4858                                break;
4859                        }
4860                }
4861                break;
4862        default:
4863                return -EOPNOTSUPP;
4864        }
4865        return E1000_SUCCESS;
4866}
4867
4868void e1000_pci_set_mwi(struct e1000_hw *hw)
4869{
4870        struct e1000_adapter *adapter = hw->back;
4871        int ret_val = pci_set_mwi(adapter->pdev);
4872
4873        if (ret_val)
4874                e_err(probe, "Error in setting MWI\n");
4875}
4876
4877void e1000_pci_clear_mwi(struct e1000_hw *hw)
4878{
4879        struct e1000_adapter *adapter = hw->back;
4880
4881        pci_clear_mwi(adapter->pdev);
4882}
4883
4884int e1000_pcix_get_mmrbc(struct e1000_hw *hw)
4885{
4886        struct e1000_adapter *adapter = hw->back;
4887        return pcix_get_mmrbc(adapter->pdev);
4888}
4889
4890void e1000_pcix_set_mmrbc(struct e1000_hw *hw, int mmrbc)
4891{
4892        struct e1000_adapter *adapter = hw->back;
4893        pcix_set_mmrbc(adapter->pdev, mmrbc);
4894}
4895
4896void e1000_io_write(struct e1000_hw *hw, unsigned long port, u32 value)
4897{
4898        outl(value, port);
4899}
4900
4901static bool e1000_vlan_used(struct e1000_adapter *adapter)
4902{
4903        u16 vid;
4904
4905        for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
4906                return true;
4907        return false;
4908}
4909
4910static void __e1000_vlan_mode(struct e1000_adapter *adapter,
4911                              netdev_features_t features)
4912{
4913        struct e1000_hw *hw = &adapter->hw;
4914        u32 ctrl;
4915
4916        ctrl = er32(CTRL);
4917        if (features & NETIF_F_HW_VLAN_CTAG_RX) {
4918                /* enable VLAN tag insert/strip */
4919                ctrl |= E1000_CTRL_VME;
4920        } else {
4921                /* disable VLAN tag insert/strip */
4922                ctrl &= ~E1000_CTRL_VME;
4923        }
4924        ew32(CTRL, ctrl);
4925}
4926static void e1000_vlan_filter_on_off(struct e1000_adapter *adapter,
4927                                     bool filter_on)
4928{
4929        struct e1000_hw *hw = &adapter->hw;
4930        u32 rctl;
4931
4932        if (!test_bit(__E1000_DOWN, &adapter->flags))
4933                e1000_irq_disable(adapter);
4934
4935        __e1000_vlan_mode(adapter, adapter->netdev->features);
4936        if (filter_on) {
4937                /* enable VLAN receive filtering */
4938                rctl = er32(RCTL);
4939                rctl &= ~E1000_RCTL_CFIEN;
4940                if (!(adapter->netdev->flags & IFF_PROMISC))
4941                        rctl |= E1000_RCTL_VFE;
4942                ew32(RCTL, rctl);
4943                e1000_update_mng_vlan(adapter);
4944        } else {
4945                /* disable VLAN receive filtering */
4946                rctl = er32(RCTL);
4947                rctl &= ~E1000_RCTL_VFE;
4948                ew32(RCTL, rctl);
4949        }
4950
4951        if (!test_bit(__E1000_DOWN, &adapter->flags))
4952                e1000_irq_enable(adapter);
4953}
4954
4955static void e1000_vlan_mode(struct net_device *netdev,
4956                            netdev_features_t features)
4957{
4958        struct e1000_adapter *adapter = netdev_priv(netdev);
4959
4960        if (!test_bit(__E1000_DOWN, &adapter->flags))
4961                e1000_irq_disable(adapter);
4962
4963        __e1000_vlan_mode(adapter, features);
4964
4965        if (!test_bit(__E1000_DOWN, &adapter->flags))
4966                e1000_irq_enable(adapter);
4967}
4968
4969static int e1000_vlan_rx_add_vid(struct net_device *netdev,
4970                                 __be16 proto, u16 vid)
4971{
4972        struct e1000_adapter *adapter = netdev_priv(netdev);
4973        struct e1000_hw *hw = &adapter->hw;
4974        u32 vfta, index;
4975
4976        if ((hw->mng_cookie.status &
4977             E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) &&
4978            (vid == adapter->mng_vlan_id))
4979                return 0;
4980
4981        if (!e1000_vlan_used(adapter))
4982                e1000_vlan_filter_on_off(adapter, true);
4983
4984        /* add VID to filter table */
4985        index = (vid >> 5) & 0x7F;
4986        vfta = E1000_READ_REG_ARRAY(hw, VFTA, index);
4987        vfta |= (1 << (vid & 0x1F));
4988        e1000_write_vfta(hw, index, vfta);
4989
4990        set_bit(vid, adapter->active_vlans);
4991
4992        return 0;
4993}
4994
4995static int e1000_vlan_rx_kill_vid(struct net_device *netdev,
4996                                  __be16 proto, u16 vid)
4997{
4998        struct e1000_adapter *adapter = netdev_priv(netdev);
4999        struct e1000_hw *hw = &adapter->hw;
5000        u32 vfta, index;
5001
5002        if (!test_bit(__E1000_DOWN, &adapter->flags))
5003                e1000_irq_disable(adapter);
5004        if (!test_bit(__E1000_DOWN, &adapter->flags))
5005                e1000_irq_enable(adapter);
5006
5007        /* remove VID from filter table */
5008        index = (vid >> 5) & 0x7F;
5009        vfta = E1000_READ_REG_ARRAY(hw, VFTA, index);
5010        vfta &= ~(1 << (vid & 0x1F));
5011        e1000_write_vfta(hw, index, vfta);
5012
5013        clear_bit(vid, adapter->active_vlans);
5014
5015        if (!e1000_vlan_used(adapter))
5016                e1000_vlan_filter_on_off(adapter, false);
5017
5018        return 0;
5019}
5020
5021static void e1000_restore_vlan(struct e1000_adapter *adapter)
5022{
5023        u16 vid;
5024
5025        if (!e1000_vlan_used(adapter))
5026                return;
5027
5028        e1000_vlan_filter_on_off(adapter, true);
5029        for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
5030                e1000_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), vid);
5031}
5032
5033int e1000_set_spd_dplx(struct e1000_adapter *adapter, u32 spd, u8 dplx)
5034{
5035        struct e1000_hw *hw = &adapter->hw;
5036
5037        hw->autoneg = 0;
5038
5039        /* Make sure dplx is at most 1 bit and lsb of speed is not set
5040         * for the switch() below to work
5041         */
5042        if ((spd & 1) || (dplx & ~1))
5043                goto err_inval;
5044
5045        /* Fiber NICs only allow 1000 gbps Full duplex */
5046        if ((hw->media_type == e1000_media_type_fiber) &&
5047            spd != SPEED_1000 &&
5048            dplx != DUPLEX_FULL)
5049                goto err_inval;
5050
5051        switch (spd + dplx) {
5052        case SPEED_10 + DUPLEX_HALF:
5053                hw->forced_speed_duplex = e1000_10_half;
5054                break;
5055        case SPEED_10 + DUPLEX_FULL:
5056                hw->forced_speed_duplex = e1000_10_full;
5057                break;
5058        case SPEED_100 + DUPLEX_HALF:
5059                hw->forced_speed_duplex = e1000_100_half;
5060                break;
5061        case SPEED_100 + DUPLEX_FULL:
5062                hw->forced_speed_duplex = e1000_100_full;
5063                break;
5064        case SPEED_1000 + DUPLEX_FULL:
5065                hw->autoneg = 1;
5066                hw->autoneg_advertised = ADVERTISE_1000_FULL;
5067                break;
5068        case SPEED_1000 + DUPLEX_HALF: /* not supported */
5069        default:
5070                goto err_inval;
5071        }
5072
5073        /* clear MDI, MDI(-X) override is only allowed when autoneg enabled */
5074        hw->mdix = AUTO_ALL_MODES;
5075
5076        return 0;
5077
5078err_inval:
5079        e_err(probe, "Unsupported Speed/Duplex configuration\n");
5080        return -EINVAL;
5081}
5082
5083static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake)
5084{
5085        struct net_device *netdev = pci_get_drvdata(pdev);
5086        struct e1000_adapter *adapter = netdev_priv(netdev);
5087        struct e1000_hw *hw = &adapter->hw;
5088        u32 ctrl, ctrl_ext, rctl, status;
5089        u32 wufc = adapter->wol;
5090#ifdef CONFIG_PM
5091        int retval = 0;
5092#endif
5093
5094        netif_device_detach(netdev);
5095
5096        if (netif_running(netdev)) {
5097                int count = E1000_CHECK_RESET_COUNT;
5098
5099                while (test_bit(__E1000_RESETTING, &adapter->flags) && count--)
5100                        usleep_range(10000, 20000);
5101
5102                WARN_ON(test_bit(__E1000_RESETTING, &adapter->flags));
5103                e1000_down(adapter);
5104        }
5105
5106#ifdef CONFIG_PM
5107        retval = pci_save_state(pdev);
5108        if (retval)
5109                return retval;
5110#endif
5111
5112        status = er32(STATUS);
5113        if (status & E1000_STATUS_LU)
5114                wufc &= ~E1000_WUFC_LNKC;
5115
5116        if (wufc) {
5117                e1000_setup_rctl(adapter);
5118                e1000_set_rx_mode(netdev);
5119
5120                rctl = er32(RCTL);
5121
5122                /* turn on all-multi mode if wake on multicast is enabled */
5123                if (wufc & E1000_WUFC_MC)
5124                        rctl |= E1000_RCTL_MPE;
5125
5126                /* enable receives in the hardware */
5127                ew32(RCTL, rctl | E1000_RCTL_EN);
5128
5129                if (hw->mac_type >= e1000_82540) {
5130                        ctrl = er32(CTRL);
5131                        /* advertise wake from D3Cold */
5132                        #define E1000_CTRL_ADVD3WUC 0x00100000
5133                        /* phy power management enable */
5134                        #define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000
5135                        ctrl |= E1000_CTRL_ADVD3WUC |
5136                                E1000_CTRL_EN_PHY_PWR_MGMT;
5137                        ew32(CTRL, ctrl);
5138                }
5139
5140                if (hw->media_type == e1000_media_type_fiber ||
5141                    hw->media_type == e1000_media_type_internal_serdes) {
5142                        /* keep the laser running in D3 */
5143                        ctrl_ext = er32(CTRL_EXT);
5144                        ctrl_ext |= E1000_CTRL_EXT_SDP7_DATA;
5145                        ew32(CTRL_EXT, ctrl_ext);
5146                }
5147
5148                ew32(WUC, E1000_WUC_PME_EN);
5149                ew32(WUFC, wufc);
5150        } else {
5151                ew32(WUC, 0);
5152                ew32(WUFC, 0);
5153        }
5154
5155        e1000_release_manageability(adapter);
5156
5157        *enable_wake = !!wufc;
5158
5159        /* make sure adapter isn't asleep if manageability is enabled */
5160        if (adapter->en_mng_pt)
5161                *enable_wake = true;
5162
5163        if (netif_running(netdev))
5164                e1000_free_irq(adapter);
5165
5166        if (!test_and_set_bit(__E1000_DISABLED, &adapter->flags))
5167                pci_disable_device(pdev);
5168
5169        return 0;
5170}
5171
5172#ifdef CONFIG_PM
5173static int e1000_suspend(struct pci_dev *pdev, pm_message_t state)
5174{
5175        int retval;
5176        bool wake;
5177
5178        retval = __e1000_shutdown(pdev, &wake);
5179        if (retval)
5180                return retval;
5181
5182        if (wake) {
5183                pci_prepare_to_sleep(pdev);
5184        } else {
5185                pci_wake_from_d3(pdev, false);
5186                pci_set_power_state(pdev, PCI_D3hot);
5187        }
5188
5189        return 0;
5190}
5191
5192static int e1000_resume(struct pci_dev *pdev)
5193{
5194        struct net_device *netdev = pci_get_drvdata(pdev);
5195        struct e1000_adapter *adapter = netdev_priv(netdev);
5196        struct e1000_hw *hw = &adapter->hw;
5197        u32 err;
5198
5199        pci_set_power_state(pdev, PCI_D0);
5200        pci_restore_state(pdev);
5201        pci_save_state(pdev);
5202
5203        if (adapter->need_ioport)
5204                err = pci_enable_device(pdev);
5205        else
5206                err = pci_enable_device_mem(pdev);
5207        if (err) {
5208                pr_err("Cannot enable PCI device from suspend\n");
5209                return err;
5210        }
5211
5212        /* flush memory to make sure state is correct */
5213        smp_mb__before_atomic();
5214        clear_bit(__E1000_DISABLED, &adapter->flags);
5215        pci_set_master(pdev);
5216
5217        pci_enable_wake(pdev, PCI_D3hot, 0);
5218        pci_enable_wake(pdev, PCI_D3cold, 0);
5219
5220        if (netif_running(netdev)) {
5221                err = e1000_request_irq(adapter);
5222                if (err)
5223                        return err;
5224        }
5225
5226        e1000_power_up_phy(adapter);
5227        e1000_reset(adapter);
5228        ew32(WUS, ~0);
5229
5230        e1000_init_manageability(adapter);
5231
5232        if (netif_running(netdev))
5233                e1000_up(adapter);
5234
5235        netif_device_attach(netdev);
5236
5237        return 0;
5238}
5239#endif
5240
5241static void e1000_shutdown(struct pci_dev *pdev)
5242{
5243        bool wake;
5244
5245        __e1000_shutdown(pdev, &wake);
5246
5247        if (system_state == SYSTEM_POWER_OFF) {
5248                pci_wake_from_d3(pdev, wake);
5249                pci_set_power_state(pdev, PCI_D3hot);
5250        }
5251}
5252
5253#ifdef CONFIG_NET_POLL_CONTROLLER
5254/* Polling 'interrupt' - used by things like netconsole to send skbs
5255 * without having to re-enable interrupts. It's not called while
5256 * the interrupt routine is executing.
5257 */
5258static void e1000_netpoll(struct net_device *netdev)
5259{
5260        struct e1000_adapter *adapter = netdev_priv(netdev);
5261
5262        if (disable_hardirq(adapter->pdev->irq))
5263                e1000_intr(adapter->pdev->irq, netdev);
5264        enable_irq(adapter->pdev->irq);
5265}
5266#endif
5267
5268/**
5269 * e1000_io_error_detected - called when PCI error is detected
5270 * @pdev: Pointer to PCI device
5271 * @state: The current pci connection state
5272 *
5273 * This function is called after a PCI bus error affecting
5274 * this device has been detected.
5275 */
5276static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev,
5277                                                pci_channel_state_t state)
5278{
5279        struct net_device *netdev = pci_get_drvdata(pdev);
5280        struct e1000_adapter *adapter = netdev_priv(netdev);
5281
5282        netif_device_detach(netdev);
5283
5284        if (state == pci_channel_io_perm_failure)
5285                return PCI_ERS_RESULT_DISCONNECT;
5286
5287        if (netif_running(netdev))
5288                e1000_down(adapter);
5289
5290        if (!test_and_set_bit(__E1000_DISABLED, &adapter->flags))
5291                pci_disable_device(pdev);
5292
5293        /* Request a slot slot reset. */
5294        return PCI_ERS_RESULT_NEED_RESET;
5295}
5296
5297/**
5298 * e1000_io_slot_reset - called after the pci bus has been reset.
5299 * @pdev: Pointer to PCI device
5300 *
5301 * Restart the card from scratch, as if from a cold-boot. Implementation
5302 * resembles the first-half of the e1000_resume routine.
5303 */
5304static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev)
5305{
5306        struct net_device *netdev = pci_get_drvdata(pdev);
5307        struct e1000_adapter *adapter = netdev_priv(netdev);
5308        struct e1000_hw *hw = &adapter->hw;
5309        int err;
5310
5311        if (adapter->need_ioport)
5312                err = pci_enable_device(pdev);
5313        else
5314                err = pci_enable_device_mem(pdev);
5315        if (err) {
5316                pr_err("Cannot re-enable PCI device after reset.\n");
5317                return PCI_ERS_RESULT_DISCONNECT;
5318        }
5319
5320        /* flush memory to make sure state is correct */
5321        smp_mb__before_atomic();
5322        clear_bit(__E1000_DISABLED, &adapter->flags);
5323        pci_set_master(pdev);
5324
5325        pci_enable_wake(pdev, PCI_D3hot, 0);
5326        pci_enable_wake(pdev, PCI_D3cold, 0);
5327
5328        e1000_reset(adapter);
5329        ew32(WUS, ~0);
5330
5331        return PCI_ERS_RESULT_RECOVERED;
5332}
5333
5334/**
5335 * e1000_io_resume - called when traffic can start flowing again.
5336 * @pdev: Pointer to PCI device
5337 *
5338 * This callback is called when the error recovery driver tells us that
5339 * its OK to resume normal operation. Implementation resembles the
5340 * second-half of the e1000_resume routine.
5341 */
5342static void e1000_io_resume(struct pci_dev *pdev)
5343{
5344        struct net_device *netdev = pci_get_drvdata(pdev);
5345        struct e1000_adapter *adapter = netdev_priv(netdev);
5346
5347        e1000_init_manageability(adapter);
5348
5349        if (netif_running(netdev)) {
5350                if (e1000_up(adapter)) {
5351                        pr_info("can't bring device back up after reset\n");
5352                        return;
5353                }
5354        }
5355
5356        netif_device_attach(netdev);
5357}
5358
5359/* e1000_main.c */
5360