linux/drivers/net/ethernet/intel/e1000e/netdev.c
<<
>>
Prefs
   1/*******************************************************************************
   2
   3  Intel PRO/1000 Linux driver
   4  Copyright(c) 1999 - 2012 Intel Corporation.
   5
   6  This program is free software; you can redistribute it and/or modify it
   7  under the terms and conditions of the GNU General Public License,
   8  version 2, as published by the Free Software Foundation.
   9
  10  This program is distributed in the hope it will be useful, but WITHOUT
  11  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  13  more details.
  14
  15  You should have received a copy of the GNU General Public License along with
  16  this program; if not, write to the Free Software Foundation, Inc.,
  17  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
  18
  19  The full GNU General Public License is included in this distribution in
  20  the file called "COPYING".
  21
  22  Contact Information:
  23  Linux NICS <linux.nics@intel.com>
  24  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
  25  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
  26
  27*******************************************************************************/
  28
  29#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  30
  31#include <linux/module.h>
  32#include <linux/types.h>
  33#include <linux/init.h>
  34#include <linux/pci.h>
  35#include <linux/vmalloc.h>
  36#include <linux/pagemap.h>
  37#include <linux/delay.h>
  38#include <linux/netdevice.h>
  39#include <linux/interrupt.h>
  40#include <linux/tcp.h>
  41#include <linux/ipv6.h>
  42#include <linux/slab.h>
  43#include <net/checksum.h>
  44#include <net/ip6_checksum.h>
  45#include <linux/mii.h>
  46#include <linux/ethtool.h>
  47#include <linux/if_vlan.h>
  48#include <linux/cpu.h>
  49#include <linux/smp.h>
  50#include <linux/pm_qos.h>
  51#include <linux/pm_runtime.h>
  52#include <linux/aer.h>
  53#include <linux/prefetch.h>
  54
  55#include "e1000.h"
  56
  57#define DRV_EXTRAVERSION "-k"
  58
  59#define DRV_VERSION "2.1.4" DRV_EXTRAVERSION
  60char e1000e_driver_name[] = "e1000e";
  61const char e1000e_driver_version[] = DRV_VERSION;
  62
  63#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)
  64static int debug = -1;
  65module_param(debug, int, 0);
  66MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
  67
  68static void e1000e_disable_aspm(struct pci_dev *pdev, u16 state);
  69
  70static const struct e1000_info *e1000_info_tbl[] = {
  71        [board_82571]           = &e1000_82571_info,
  72        [board_82572]           = &e1000_82572_info,
  73        [board_82573]           = &e1000_82573_info,
  74        [board_82574]           = &e1000_82574_info,
  75        [board_82583]           = &e1000_82583_info,
  76        [board_80003es2lan]     = &e1000_es2_info,
  77        [board_ich8lan]         = &e1000_ich8_info,
  78        [board_ich9lan]         = &e1000_ich9_info,
  79        [board_ich10lan]        = &e1000_ich10_info,
  80        [board_pchlan]          = &e1000_pch_info,
  81        [board_pch2lan]         = &e1000_pch2_info,
  82        [board_pch_lpt]         = &e1000_pch_lpt_info,
  83};
  84
  85struct e1000_reg_info {
  86        u32 ofs;
  87        char *name;
  88};
  89
  90#define E1000_RDFH      0x02410 /* Rx Data FIFO Head - RW */
  91#define E1000_RDFT      0x02418 /* Rx Data FIFO Tail - RW */
  92#define E1000_RDFHS     0x02420 /* Rx Data FIFO Head Saved - RW */
  93#define E1000_RDFTS     0x02428 /* Rx Data FIFO Tail Saved - RW */
  94#define E1000_RDFPC     0x02430 /* Rx Data FIFO Packet Count - RW */
  95
  96#define E1000_TDFH      0x03410 /* Tx Data FIFO Head - RW */
  97#define E1000_TDFT      0x03418 /* Tx Data FIFO Tail - RW */
  98#define E1000_TDFHS     0x03420 /* Tx Data FIFO Head Saved - RW */
  99#define E1000_TDFTS     0x03428 /* Tx Data FIFO Tail Saved - RW */
 100#define E1000_TDFPC     0x03430 /* Tx Data FIFO Packet Count - RW */
 101
 102static const struct e1000_reg_info e1000_reg_info_tbl[] = {
 103
 104        /* General Registers */
 105        {E1000_CTRL, "CTRL"},
 106        {E1000_STATUS, "STATUS"},
 107        {E1000_CTRL_EXT, "CTRL_EXT"},
 108
 109        /* Interrupt Registers */
 110        {E1000_ICR, "ICR"},
 111
 112        /* Rx Registers */
 113        {E1000_RCTL, "RCTL"},
 114        {E1000_RDLEN(0), "RDLEN"},
 115        {E1000_RDH(0), "RDH"},
 116        {E1000_RDT(0), "RDT"},
 117        {E1000_RDTR, "RDTR"},
 118        {E1000_RXDCTL(0), "RXDCTL"},
 119        {E1000_ERT, "ERT"},
 120        {E1000_RDBAL(0), "RDBAL"},
 121        {E1000_RDBAH(0), "RDBAH"},
 122        {E1000_RDFH, "RDFH"},
 123        {E1000_RDFT, "RDFT"},
 124        {E1000_RDFHS, "RDFHS"},
 125        {E1000_RDFTS, "RDFTS"},
 126        {E1000_RDFPC, "RDFPC"},
 127
 128        /* Tx Registers */
 129        {E1000_TCTL, "TCTL"},
 130        {E1000_TDBAL(0), "TDBAL"},
 131        {E1000_TDBAH(0), "TDBAH"},
 132        {E1000_TDLEN(0), "TDLEN"},
 133        {E1000_TDH(0), "TDH"},
 134        {E1000_TDT(0), "TDT"},
 135        {E1000_TIDV, "TIDV"},
 136        {E1000_TXDCTL(0), "TXDCTL"},
 137        {E1000_TADV, "TADV"},
 138        {E1000_TARC(0), "TARC"},
 139        {E1000_TDFH, "TDFH"},
 140        {E1000_TDFT, "TDFT"},
 141        {E1000_TDFHS, "TDFHS"},
 142        {E1000_TDFTS, "TDFTS"},
 143        {E1000_TDFPC, "TDFPC"},
 144
 145        /* List Terminator */
 146        {0, NULL}
 147};
 148
 149/**
 150 * e1000_regdump - register printout routine
 151 * @hw: pointer to the HW structure
 152 * @reginfo: pointer to the register info table
 153 **/
 154static void e1000_regdump(struct e1000_hw *hw, struct e1000_reg_info *reginfo)
 155{
 156        int n = 0;
 157        char rname[16];
 158        u32 regs[8];
 159
 160        switch (reginfo->ofs) {
 161        case E1000_RXDCTL(0):
 162                for (n = 0; n < 2; n++)
 163                        regs[n] = __er32(hw, E1000_RXDCTL(n));
 164                break;
 165        case E1000_TXDCTL(0):
 166                for (n = 0; n < 2; n++)
 167                        regs[n] = __er32(hw, E1000_TXDCTL(n));
 168                break;
 169        case E1000_TARC(0):
 170                for (n = 0; n < 2; n++)
 171                        regs[n] = __er32(hw, E1000_TARC(n));
 172                break;
 173        default:
 174                pr_info("%-15s %08x\n",
 175                        reginfo->name, __er32(hw, reginfo->ofs));
 176                return;
 177        }
 178
 179        snprintf(rname, 16, "%s%s", reginfo->name, "[0-1]");
 180        pr_info("%-15s %08x %08x\n", rname, regs[0], regs[1]);
 181}
 182
 183static void e1000e_dump_ps_pages(struct e1000_adapter *adapter,
 184                                 struct e1000_buffer *bi)
 185{
 186        int i;
 187        struct e1000_ps_page *ps_page;
 188
 189        for (i = 0; i < adapter->rx_ps_pages; i++) {
 190                ps_page = &bi->ps_pages[i];
 191
 192                if (ps_page->page) {
 193                        pr_info("packet dump for ps_page %d:\n", i);
 194                        print_hex_dump(KERN_INFO, "", DUMP_PREFIX_ADDRESS,
 195                                       16, 1, page_address(ps_page->page),
 196                                       PAGE_SIZE, true);
 197                }
 198        }
 199}
 200
 201/**
 202 * e1000e_dump - Print registers, Tx-ring and Rx-ring
 203 * @adapter: board private structure
 204 **/
 205static void e1000e_dump(struct e1000_adapter *adapter)
 206{
 207        struct net_device *netdev = adapter->netdev;
 208        struct e1000_hw *hw = &adapter->hw;
 209        struct e1000_reg_info *reginfo;
 210        struct e1000_ring *tx_ring = adapter->tx_ring;
 211        struct e1000_tx_desc *tx_desc;
 212        struct my_u0 {
 213                __le64 a;
 214                __le64 b;
 215        } *u0;
 216        struct e1000_buffer *buffer_info;
 217        struct e1000_ring *rx_ring = adapter->rx_ring;
 218        union e1000_rx_desc_packet_split *rx_desc_ps;
 219        union e1000_rx_desc_extended *rx_desc;
 220        struct my_u1 {
 221                __le64 a;
 222                __le64 b;
 223                __le64 c;
 224                __le64 d;
 225        } *u1;
 226        u32 staterr;
 227        int i = 0;
 228
 229        if (!netif_msg_hw(adapter))
 230                return;
 231
 232        /* Print netdevice Info */
 233        if (netdev) {
 234                dev_info(&adapter->pdev->dev, "Net device Info\n");
 235                pr_info("Device Name     state            trans_start      last_rx\n");
 236                pr_info("%-15s %016lX %016lX %016lX\n",
 237                        netdev->name, netdev->state, netdev->trans_start,
 238                        netdev->last_rx);
 239        }
 240
 241        /* Print Registers */
 242        dev_info(&adapter->pdev->dev, "Register Dump\n");
 243        pr_info(" Register Name   Value\n");
 244        for (reginfo = (struct e1000_reg_info *)e1000_reg_info_tbl;
 245             reginfo->name; reginfo++) {
 246                e1000_regdump(hw, reginfo);
 247        }
 248
 249        /* Print Tx Ring Summary */
 250        if (!netdev || !netif_running(netdev))
 251                return;
 252
 253        dev_info(&adapter->pdev->dev, "Tx Ring Summary\n");
 254        pr_info("Queue [NTU] [NTC] [bi(ntc)->dma  ] leng ntw timestamp\n");
 255        buffer_info = &tx_ring->buffer_info[tx_ring->next_to_clean];
 256        pr_info(" %5d %5X %5X %016llX %04X %3X %016llX\n",
 257                0, tx_ring->next_to_use, tx_ring->next_to_clean,
 258                (unsigned long long)buffer_info->dma,
 259                buffer_info->length,
 260                buffer_info->next_to_watch,
 261                (unsigned long long)buffer_info->time_stamp);
 262
 263        /* Print Tx Ring */
 264        if (!netif_msg_tx_done(adapter))
 265                goto rx_ring_summary;
 266
 267        dev_info(&adapter->pdev->dev, "Tx Ring Dump\n");
 268
 269        /* Transmit Descriptor Formats - DEXT[29] is 0 (Legacy) or 1 (Extended)
 270         *
 271         * Legacy Transmit Descriptor
 272         *   +--------------------------------------------------------------+
 273         * 0 |         Buffer Address [63:0] (Reserved on Write Back)       |
 274         *   +--------------------------------------------------------------+
 275         * 8 | Special  |    CSS     | Status |  CMD    |  CSO   |  Length  |
 276         *   +--------------------------------------------------------------+
 277         *   63       48 47        36 35    32 31     24 23    16 15        0
 278         *
 279         * Extended Context Descriptor (DTYP=0x0) for TSO or checksum offload
 280         *   63      48 47    40 39       32 31             16 15    8 7      0
 281         *   +----------------------------------------------------------------+
 282         * 0 |  TUCSE  | TUCS0  |   TUCSS   |     IPCSE       | IPCS0 | IPCSS |
 283         *   +----------------------------------------------------------------+
 284         * 8 |   MSS   | HDRLEN | RSV | STA | TUCMD | DTYP |      PAYLEN      |
 285         *   +----------------------------------------------------------------+
 286         *   63      48 47    40 39 36 35 32 31   24 23  20 19                0
 287         *
 288         * Extended Data Descriptor (DTYP=0x1)
 289         *   +----------------------------------------------------------------+
 290         * 0 |                     Buffer Address [63:0]                      |
 291         *   +----------------------------------------------------------------+
 292         * 8 | VLAN tag |  POPTS  | Rsvd | Status | Command | DTYP |  DTALEN  |
 293         *   +----------------------------------------------------------------+
 294         *   63       48 47     40 39  36 35    32 31     24 23  20 19        0
 295         */
 296        pr_info("Tl[desc]     [address 63:0  ] [SpeCssSCmCsLen] [bi->dma       ] leng  ntw timestamp        bi->skb <-- Legacy format\n");
 297        pr_info("Tc[desc]     [Ce CoCsIpceCoS] [MssHlRSCm0Plen] [bi->dma       ] leng  ntw timestamp        bi->skb <-- Ext Context format\n");
 298        pr_info("Td[desc]     [address 63:0  ] [VlaPoRSCm1Dlen] [bi->dma       ] leng  ntw timestamp        bi->skb <-- Ext Data format\n");
 299        for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {
 300                const char *next_desc;
 301                tx_desc = E1000_TX_DESC(*tx_ring, i);
 302                buffer_info = &tx_ring->buffer_info[i];
 303                u0 = (struct my_u0 *)tx_desc;
 304                if (i == tx_ring->next_to_use && i == tx_ring->next_to_clean)
 305                        next_desc = " NTC/U";
 306                else if (i == tx_ring->next_to_use)
 307                        next_desc = " NTU";
 308                else if (i == tx_ring->next_to_clean)
 309                        next_desc = " NTC";
 310                else
 311                        next_desc = "";
 312                pr_info("T%c[0x%03X]    %016llX %016llX %016llX %04X  %3X %016llX %p%s\n",
 313                        (!(le64_to_cpu(u0->b) & (1 << 29)) ? 'l' :
 314                         ((le64_to_cpu(u0->b) & (1 << 20)) ? 'd' : 'c')),
 315                        i,
 316                        (unsigned long long)le64_to_cpu(u0->a),
 317                        (unsigned long long)le64_to_cpu(u0->b),
 318                        (unsigned long long)buffer_info->dma,
 319                        buffer_info->length, buffer_info->next_to_watch,
 320                        (unsigned long long)buffer_info->time_stamp,
 321                        buffer_info->skb, next_desc);
 322
 323                if (netif_msg_pktdata(adapter) && buffer_info->skb)
 324                        print_hex_dump(KERN_INFO, "", DUMP_PREFIX_ADDRESS,
 325                                       16, 1, buffer_info->skb->data,
 326                                       buffer_info->skb->len, true);
 327        }
 328
 329        /* Print Rx Ring Summary */
 330rx_ring_summary:
 331        dev_info(&adapter->pdev->dev, "Rx Ring Summary\n");
 332        pr_info("Queue [NTU] [NTC]\n");
 333        pr_info(" %5d %5X %5X\n",
 334                0, rx_ring->next_to_use, rx_ring->next_to_clean);
 335
 336        /* Print Rx Ring */
 337        if (!netif_msg_rx_status(adapter))
 338                return;
 339
 340        dev_info(&adapter->pdev->dev, "Rx Ring Dump\n");
 341        switch (adapter->rx_ps_pages) {
 342        case 1:
 343        case 2:
 344        case 3:
 345                /* [Extended] Packet Split Receive Descriptor Format
 346                 *
 347                 *    +-----------------------------------------------------+
 348                 *  0 |                Buffer Address 0 [63:0]              |
 349                 *    +-----------------------------------------------------+
 350                 *  8 |                Buffer Address 1 [63:0]              |
 351                 *    +-----------------------------------------------------+
 352                 * 16 |                Buffer Address 2 [63:0]              |
 353                 *    +-----------------------------------------------------+
 354                 * 24 |                Buffer Address 3 [63:0]              |
 355                 *    +-----------------------------------------------------+
 356                 */
 357                pr_info("R  [desc]      [buffer 0 63:0 ] [buffer 1 63:0 ] [buffer 2 63:0 ] [buffer 3 63:0 ] [bi->dma       ] [bi->skb] <-- Ext Pkt Split format\n");
 358                /* [Extended] Receive Descriptor (Write-Back) Format
 359                 *
 360                 *   63       48 47    32 31     13 12    8 7    4 3        0
 361                 *   +------------------------------------------------------+
 362                 * 0 | Packet   | IP     |  Rsvd   | MRQ   | Rsvd | MRQ RSS |
 363                 *   | Checksum | Ident  |         | Queue |      |  Type   |
 364                 *   +------------------------------------------------------+
 365                 * 8 | VLAN Tag | Length | Extended Error | Extended Status |
 366                 *   +------------------------------------------------------+
 367                 *   63       48 47    32 31            20 19               0
 368                 */
 369                pr_info("RWB[desc]      [ck ipid mrqhsh] [vl   l0 ee  es] [ l3  l2  l1 hs] [reserved      ] ---------------- [bi->skb] <-- Ext Rx Write-Back format\n");
 370                for (i = 0; i < rx_ring->count; i++) {
 371                        const char *next_desc;
 372                        buffer_info = &rx_ring->buffer_info[i];
 373                        rx_desc_ps = E1000_RX_DESC_PS(*rx_ring, i);
 374                        u1 = (struct my_u1 *)rx_desc_ps;
 375                        staterr =
 376                            le32_to_cpu(rx_desc_ps->wb.middle.status_error);
 377
 378                        if (i == rx_ring->next_to_use)
 379                                next_desc = " NTU";
 380                        else if (i == rx_ring->next_to_clean)
 381                                next_desc = " NTC";
 382                        else
 383                                next_desc = "";
 384
 385                        if (staterr & E1000_RXD_STAT_DD) {
 386                                /* Descriptor Done */
 387                                pr_info("%s[0x%03X]     %016llX %016llX %016llX %016llX ---------------- %p%s\n",
 388                                        "RWB", i,
 389                                        (unsigned long long)le64_to_cpu(u1->a),
 390                                        (unsigned long long)le64_to_cpu(u1->b),
 391                                        (unsigned long long)le64_to_cpu(u1->c),
 392                                        (unsigned long long)le64_to_cpu(u1->d),
 393                                        buffer_info->skb, next_desc);
 394                        } else {
 395                                pr_info("%s[0x%03X]     %016llX %016llX %016llX %016llX %016llX %p%s\n",
 396                                        "R  ", i,
 397                                        (unsigned long long)le64_to_cpu(u1->a),
 398                                        (unsigned long long)le64_to_cpu(u1->b),
 399                                        (unsigned long long)le64_to_cpu(u1->c),
 400                                        (unsigned long long)le64_to_cpu(u1->d),
 401                                        (unsigned long long)buffer_info->dma,
 402                                        buffer_info->skb, next_desc);
 403
 404                                if (netif_msg_pktdata(adapter))
 405                                        e1000e_dump_ps_pages(adapter,
 406                                                             buffer_info);
 407                        }
 408                }
 409                break;
 410        default:
 411        case 0:
 412                /* Extended Receive Descriptor (Read) Format
 413                 *
 414                 *   +-----------------------------------------------------+
 415                 * 0 |                Buffer Address [63:0]                |
 416                 *   +-----------------------------------------------------+
 417                 * 8 |                      Reserved                       |
 418                 *   +-----------------------------------------------------+
 419                 */
 420                pr_info("R  [desc]      [buf addr 63:0 ] [reserved 63:0 ] [bi->dma       ] [bi->skb] <-- Ext (Read) format\n");
 421                /* Extended Receive Descriptor (Write-Back) Format
 422                 *
 423                 *   63       48 47    32 31    24 23            4 3        0
 424                 *   +------------------------------------------------------+
 425                 *   |     RSS Hash      |        |               |         |
 426                 * 0 +-------------------+  Rsvd  |   Reserved    | MRQ RSS |
 427                 *   | Packet   | IP     |        |               |  Type   |
 428                 *   | Checksum | Ident  |        |               |         |
 429                 *   +------------------------------------------------------+
 430                 * 8 | VLAN Tag | Length | Extended Error | Extended Status |
 431                 *   +------------------------------------------------------+
 432                 *   63       48 47    32 31            20 19               0
 433                 */
 434                pr_info("RWB[desc]      [cs ipid    mrq] [vt   ln xe  xs] [bi->skb] <-- Ext (Write-Back) format\n");
 435
 436                for (i = 0; i < rx_ring->count; i++) {
 437                        const char *next_desc;
 438
 439                        buffer_info = &rx_ring->buffer_info[i];
 440                        rx_desc = E1000_RX_DESC_EXT(*rx_ring, i);
 441                        u1 = (struct my_u1 *)rx_desc;
 442                        staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
 443
 444                        if (i == rx_ring->next_to_use)
 445                                next_desc = " NTU";
 446                        else if (i == rx_ring->next_to_clean)
 447                                next_desc = " NTC";
 448                        else
 449                                next_desc = "";
 450
 451                        if (staterr & E1000_RXD_STAT_DD) {
 452                                /* Descriptor Done */
 453                                pr_info("%s[0x%03X]     %016llX %016llX ---------------- %p%s\n",
 454                                        "RWB", i,
 455                                        (unsigned long long)le64_to_cpu(u1->a),
 456                                        (unsigned long long)le64_to_cpu(u1->b),
 457                                        buffer_info->skb, next_desc);
 458                        } else {
 459                                pr_info("%s[0x%03X]     %016llX %016llX %016llX %p%s\n",
 460                                        "R  ", i,
 461                                        (unsigned long long)le64_to_cpu(u1->a),
 462                                        (unsigned long long)le64_to_cpu(u1->b),
 463                                        (unsigned long long)buffer_info->dma,
 464                                        buffer_info->skb, next_desc);
 465
 466                                if (netif_msg_pktdata(adapter) &&
 467                                    buffer_info->skb)
 468                                        print_hex_dump(KERN_INFO, "",
 469                                                       DUMP_PREFIX_ADDRESS, 16,
 470                                                       1,
 471                                                       buffer_info->skb->data,
 472                                                       adapter->rx_buffer_len,
 473                                                       true);
 474                        }
 475                }
 476        }
 477}
 478
 479/**
 480 * e1000_desc_unused - calculate if we have unused descriptors
 481 **/
 482static int e1000_desc_unused(struct e1000_ring *ring)
 483{
 484        if (ring->next_to_clean > ring->next_to_use)
 485                return ring->next_to_clean - ring->next_to_use - 1;
 486
 487        return ring->count + ring->next_to_clean - ring->next_to_use - 1;
 488}
 489
 490/**
 491 * e1000_receive_skb - helper function to handle Rx indications
 492 * @adapter: board private structure
 493 * @status: descriptor status field as written by hardware
 494 * @vlan: descriptor vlan field as written by hardware (no le/be conversion)
 495 * @skb: pointer to sk_buff to be indicated to stack
 496 **/
 497static void e1000_receive_skb(struct e1000_adapter *adapter,
 498                              struct net_device *netdev, struct sk_buff *skb,
 499                              u8 status, __le16 vlan)
 500{
 501        u16 tag = le16_to_cpu(vlan);
 502        skb->protocol = eth_type_trans(skb, netdev);
 503
 504        if (status & E1000_RXD_STAT_VP)
 505                __vlan_hwaccel_put_tag(skb, tag);
 506
 507        napi_gro_receive(&adapter->napi, skb);
 508}
 509
 510/**
 511 * e1000_rx_checksum - Receive Checksum Offload
 512 * @adapter: board private structure
 513 * @status_err: receive descriptor status and error fields
 514 * @csum: receive descriptor csum field
 515 * @sk_buff: socket buffer with received data
 516 **/
 517static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err,
 518                              struct sk_buff *skb)
 519{
 520        u16 status = (u16)status_err;
 521        u8 errors = (u8)(status_err >> 24);
 522
 523        skb_checksum_none_assert(skb);
 524
 525        /* Rx checksum disabled */
 526        if (!(adapter->netdev->features & NETIF_F_RXCSUM))
 527                return;
 528
 529        /* Ignore Checksum bit is set */
 530        if (status & E1000_RXD_STAT_IXSM)
 531                return;
 532
 533        /* TCP/UDP checksum error bit or IP checksum error bit is set */
 534        if (errors & (E1000_RXD_ERR_TCPE | E1000_RXD_ERR_IPE)) {
 535                /* let the stack verify checksum errors */
 536                adapter->hw_csum_err++;
 537                return;
 538        }
 539
 540        /* TCP/UDP Checksum has not been calculated */
 541        if (!(status & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS)))
 542                return;
 543
 544        /* It must be a TCP or UDP packet with a valid checksum */
 545        skb->ip_summed = CHECKSUM_UNNECESSARY;
 546        adapter->hw_csum_good++;
 547}
 548
 549static void e1000e_update_rdt_wa(struct e1000_ring *rx_ring, unsigned int i)
 550{
 551        struct e1000_adapter *adapter = rx_ring->adapter;
 552        struct e1000_hw *hw = &adapter->hw;
 553        s32 ret_val = __ew32_prepare(hw);
 554
 555        writel(i, rx_ring->tail);
 556
 557        if (unlikely(!ret_val && (i != readl(rx_ring->tail)))) {
 558                u32 rctl = er32(RCTL);
 559                ew32(RCTL, rctl & ~E1000_RCTL_EN);
 560                e_err("ME firmware caused invalid RDT - resetting\n");
 561                schedule_work(&adapter->reset_task);
 562        }
 563}
 564
 565static void e1000e_update_tdt_wa(struct e1000_ring *tx_ring, unsigned int i)
 566{
 567        struct e1000_adapter *adapter = tx_ring->adapter;
 568        struct e1000_hw *hw = &adapter->hw;
 569        s32 ret_val = __ew32_prepare(hw);
 570
 571        writel(i, tx_ring->tail);
 572
 573        if (unlikely(!ret_val && (i != readl(tx_ring->tail)))) {
 574                u32 tctl = er32(TCTL);
 575                ew32(TCTL, tctl & ~E1000_TCTL_EN);
 576                e_err("ME firmware caused invalid TDT - resetting\n");
 577                schedule_work(&adapter->reset_task);
 578        }
 579}
 580
 581/**
 582 * e1000_alloc_rx_buffers - Replace used receive buffers
 583 * @rx_ring: Rx descriptor ring
 584 **/
 585static void e1000_alloc_rx_buffers(struct e1000_ring *rx_ring,
 586                                   int cleaned_count, gfp_t gfp)
 587{
 588        struct e1000_adapter *adapter = rx_ring->adapter;
 589        struct net_device *netdev = adapter->netdev;
 590        struct pci_dev *pdev = adapter->pdev;
 591        union e1000_rx_desc_extended *rx_desc;
 592        struct e1000_buffer *buffer_info;
 593        struct sk_buff *skb;
 594        unsigned int i;
 595        unsigned int bufsz = adapter->rx_buffer_len;
 596
 597        i = rx_ring->next_to_use;
 598        buffer_info = &rx_ring->buffer_info[i];
 599
 600        while (cleaned_count--) {
 601                skb = buffer_info->skb;
 602                if (skb) {
 603                        skb_trim(skb, 0);
 604                        goto map_skb;
 605                }
 606
 607                skb = __netdev_alloc_skb_ip_align(netdev, bufsz, gfp);
 608                if (!skb) {
 609                        /* Better luck next round */
 610                        adapter->alloc_rx_buff_failed++;
 611                        break;
 612                }
 613
 614                buffer_info->skb = skb;
 615map_skb:
 616                buffer_info->dma = dma_map_single(&pdev->dev, skb->data,
 617                                                  adapter->rx_buffer_len,
 618                                                  DMA_FROM_DEVICE);
 619                if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
 620                        dev_err(&pdev->dev, "Rx DMA map failed\n");
 621                        adapter->rx_dma_failed++;
 622                        break;
 623                }
 624
 625                rx_desc = E1000_RX_DESC_EXT(*rx_ring, i);
 626                rx_desc->read.buffer_addr = cpu_to_le64(buffer_info->dma);
 627
 628                if (unlikely(!(i & (E1000_RX_BUFFER_WRITE - 1)))) {
 629                        /* Force memory writes to complete before letting h/w
 630                         * know there are new descriptors to fetch.  (Only
 631                         * applicable for weak-ordered memory model archs,
 632                         * such as IA-64).
 633                         */
 634                        wmb();
 635                        if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA)
 636                                e1000e_update_rdt_wa(rx_ring, i);
 637                        else
 638                                writel(i, rx_ring->tail);
 639                }
 640                i++;
 641                if (i == rx_ring->count)
 642                        i = 0;
 643                buffer_info = &rx_ring->buffer_info[i];
 644        }
 645
 646        rx_ring->next_to_use = i;
 647}
 648
 649/**
 650 * e1000_alloc_rx_buffers_ps - Replace used receive buffers; packet split
 651 * @rx_ring: Rx descriptor ring
 652 **/
 653static void e1000_alloc_rx_buffers_ps(struct e1000_ring *rx_ring,
 654                                      int cleaned_count, gfp_t gfp)
 655{
 656        struct e1000_adapter *adapter = rx_ring->adapter;
 657        struct net_device *netdev = adapter->netdev;
 658        struct pci_dev *pdev = adapter->pdev;
 659        union e1000_rx_desc_packet_split *rx_desc;
 660        struct e1000_buffer *buffer_info;
 661        struct e1000_ps_page *ps_page;
 662        struct sk_buff *skb;
 663        unsigned int i, j;
 664
 665        i = rx_ring->next_to_use;
 666        buffer_info = &rx_ring->buffer_info[i];
 667
 668        while (cleaned_count--) {
 669                rx_desc = E1000_RX_DESC_PS(*rx_ring, i);
 670
 671                for (j = 0; j < PS_PAGE_BUFFERS; j++) {
 672                        ps_page = &buffer_info->ps_pages[j];
 673                        if (j >= adapter->rx_ps_pages) {
 674                                /* all unused desc entries get hw null ptr */
 675                                rx_desc->read.buffer_addr[j + 1] =
 676                                    ~cpu_to_le64(0);
 677                                continue;
 678                        }
 679                        if (!ps_page->page) {
 680                                ps_page->page = alloc_page(gfp);
 681                                if (!ps_page->page) {
 682                                        adapter->alloc_rx_buff_failed++;
 683                                        goto no_buffers;
 684                                }
 685                                ps_page->dma = dma_map_page(&pdev->dev,
 686                                                            ps_page->page,
 687                                                            0, PAGE_SIZE,
 688                                                            DMA_FROM_DEVICE);
 689                                if (dma_mapping_error(&pdev->dev,
 690                                                      ps_page->dma)) {
 691                                        dev_err(&adapter->pdev->dev,
 692                                                "Rx DMA page map failed\n");
 693                                        adapter->rx_dma_failed++;
 694                                        goto no_buffers;
 695                                }
 696                        }
 697                        /* Refresh the desc even if buffer_addrs
 698                         * didn't change because each write-back
 699                         * erases this info.
 700                         */
 701                        rx_desc->read.buffer_addr[j + 1] =
 702                            cpu_to_le64(ps_page->dma);
 703                }
 704
 705                skb = __netdev_alloc_skb_ip_align(netdev,
 706                                                  adapter->rx_ps_bsize0,
 707                                                  gfp);
 708
 709                if (!skb) {
 710                        adapter->alloc_rx_buff_failed++;
 711                        break;
 712                }
 713
 714                buffer_info->skb = skb;
 715                buffer_info->dma = dma_map_single(&pdev->dev, skb->data,
 716                                                  adapter->rx_ps_bsize0,
 717                                                  DMA_FROM_DEVICE);
 718                if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
 719                        dev_err(&pdev->dev, "Rx DMA map failed\n");
 720                        adapter->rx_dma_failed++;
 721                        /* cleanup skb */
 722                        dev_kfree_skb_any(skb);
 723                        buffer_info->skb = NULL;
 724                        break;
 725                }
 726
 727                rx_desc->read.buffer_addr[0] = cpu_to_le64(buffer_info->dma);
 728
 729                if (unlikely(!(i & (E1000_RX_BUFFER_WRITE - 1)))) {
 730                        /* Force memory writes to complete before letting h/w
 731                         * know there are new descriptors to fetch.  (Only
 732                         * applicable for weak-ordered memory model archs,
 733                         * such as IA-64).
 734                         */
 735                        wmb();
 736                        if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA)
 737                                e1000e_update_rdt_wa(rx_ring, i << 1);
 738                        else
 739                                writel(i << 1, rx_ring->tail);
 740                }
 741
 742                i++;
 743                if (i == rx_ring->count)
 744                        i = 0;
 745                buffer_info = &rx_ring->buffer_info[i];
 746        }
 747
 748no_buffers:
 749        rx_ring->next_to_use = i;
 750}
 751
 752/**
 753 * e1000_alloc_jumbo_rx_buffers - Replace used jumbo receive buffers
 754 * @rx_ring: Rx descriptor ring
 755 * @cleaned_count: number of buffers to allocate this pass
 756 **/
 757
 758static void e1000_alloc_jumbo_rx_buffers(struct e1000_ring *rx_ring,
 759                                         int cleaned_count, gfp_t gfp)
 760{
 761        struct e1000_adapter *adapter = rx_ring->adapter;
 762        struct net_device *netdev = adapter->netdev;
 763        struct pci_dev *pdev = adapter->pdev;
 764        union e1000_rx_desc_extended *rx_desc;
 765        struct e1000_buffer *buffer_info;
 766        struct sk_buff *skb;
 767        unsigned int i;
 768        unsigned int bufsz = 256 - 16 /* for skb_reserve */;
 769
 770        i = rx_ring->next_to_use;
 771        buffer_info = &rx_ring->buffer_info[i];
 772
 773        while (cleaned_count--) {
 774                skb = buffer_info->skb;
 775                if (skb) {
 776                        skb_trim(skb, 0);
 777                        goto check_page;
 778                }
 779
 780                skb = __netdev_alloc_skb_ip_align(netdev, bufsz, gfp);
 781                if (unlikely(!skb)) {
 782                        /* Better luck next round */
 783                        adapter->alloc_rx_buff_failed++;
 784                        break;
 785                }
 786
 787                buffer_info->skb = skb;
 788check_page:
 789                /* allocate a new page if necessary */
 790                if (!buffer_info->page) {
 791                        buffer_info->page = alloc_page(gfp);
 792                        if (unlikely(!buffer_info->page)) {
 793                                adapter->alloc_rx_buff_failed++;
 794                                break;
 795                        }
 796                }
 797
 798                if (!buffer_info->dma)
 799                        buffer_info->dma = dma_map_page(&pdev->dev,
 800                                                        buffer_info->page, 0,
 801                                                        PAGE_SIZE,
 802                                                        DMA_FROM_DEVICE);
 803
 804                rx_desc = E1000_RX_DESC_EXT(*rx_ring, i);
 805                rx_desc->read.buffer_addr = cpu_to_le64(buffer_info->dma);
 806
 807                if (unlikely(++i == rx_ring->count))
 808                        i = 0;
 809                buffer_info = &rx_ring->buffer_info[i];
 810        }
 811
 812        if (likely(rx_ring->next_to_use != i)) {
 813                rx_ring->next_to_use = i;
 814                if (unlikely(i-- == 0))
 815                        i = (rx_ring->count - 1);
 816
 817                /* Force memory writes to complete before letting h/w
 818                 * know there are new descriptors to fetch.  (Only
 819                 * applicable for weak-ordered memory model archs,
 820                 * such as IA-64).
 821                 */
 822                wmb();
 823                if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA)
 824                        e1000e_update_rdt_wa(rx_ring, i);
 825                else
 826                        writel(i, rx_ring->tail);
 827        }
 828}
 829
 830static inline void e1000_rx_hash(struct net_device *netdev, __le32 rss,
 831                                 struct sk_buff *skb)
 832{
 833        if (netdev->features & NETIF_F_RXHASH)
 834                skb->rxhash = le32_to_cpu(rss);
 835}
 836
 837/**
 838 * e1000_clean_rx_irq - Send received data up the network stack
 839 * @rx_ring: Rx descriptor ring
 840 *
 841 * the return value indicates whether actual cleaning was done, there
 842 * is no guarantee that everything was cleaned
 843 **/
 844static bool e1000_clean_rx_irq(struct e1000_ring *rx_ring, int *work_done,
 845                               int work_to_do)
 846{
 847        struct e1000_adapter *adapter = rx_ring->adapter;
 848        struct net_device *netdev = adapter->netdev;
 849        struct pci_dev *pdev = adapter->pdev;
 850        struct e1000_hw *hw = &adapter->hw;
 851        union e1000_rx_desc_extended *rx_desc, *next_rxd;
 852        struct e1000_buffer *buffer_info, *next_buffer;
 853        u32 length, staterr;
 854        unsigned int i;
 855        int cleaned_count = 0;
 856        bool cleaned = false;
 857        unsigned int total_rx_bytes = 0, total_rx_packets = 0;
 858
 859        i = rx_ring->next_to_clean;
 860        rx_desc = E1000_RX_DESC_EXT(*rx_ring, i);
 861        staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
 862        buffer_info = &rx_ring->buffer_info[i];
 863
 864        while (staterr & E1000_RXD_STAT_DD) {
 865                struct sk_buff *skb;
 866
 867                if (*work_done >= work_to_do)
 868                        break;
 869                (*work_done)++;
 870                rmb();  /* read descriptor and rx_buffer_info after status DD */
 871
 872                skb = buffer_info->skb;
 873                buffer_info->skb = NULL;
 874
 875                prefetch(skb->data - NET_IP_ALIGN);
 876
 877                i++;
 878                if (i == rx_ring->count)
 879                        i = 0;
 880                next_rxd = E1000_RX_DESC_EXT(*rx_ring, i);
 881                prefetch(next_rxd);
 882
 883                next_buffer = &rx_ring->buffer_info[i];
 884
 885                cleaned = true;
 886                cleaned_count++;
 887                dma_unmap_single(&pdev->dev,
 888                                 buffer_info->dma,
 889                                 adapter->rx_buffer_len,
 890                                 DMA_FROM_DEVICE);
 891                buffer_info->dma = 0;
 892
 893                length = le16_to_cpu(rx_desc->wb.upper.length);
 894
 895                /* !EOP means multiple descriptors were used to store a single
 896                 * packet, if that's the case we need to toss it.  In fact, we
 897                 * need to toss every packet with the EOP bit clear and the
 898                 * next frame that _does_ have the EOP bit set, as it is by
 899                 * definition only a frame fragment
 900                 */
 901                if (unlikely(!(staterr & E1000_RXD_STAT_EOP)))
 902                        adapter->flags2 |= FLAG2_IS_DISCARDING;
 903
 904                if (adapter->flags2 & FLAG2_IS_DISCARDING) {
 905                        /* All receives must fit into a single buffer */
 906                        e_dbg("Receive packet consumed multiple buffers\n");
 907                        /* recycle */
 908                        buffer_info->skb = skb;
 909                        if (staterr & E1000_RXD_STAT_EOP)
 910                                adapter->flags2 &= ~FLAG2_IS_DISCARDING;
 911                        goto next_desc;
 912                }
 913
 914                if (unlikely((staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) &&
 915                             !(netdev->features & NETIF_F_RXALL))) {
 916                        /* recycle */
 917                        buffer_info->skb = skb;
 918                        goto next_desc;
 919                }
 920
 921                /* adjust length to remove Ethernet CRC */
 922                if (!(adapter->flags2 & FLAG2_CRC_STRIPPING)) {
 923                        /* If configured to store CRC, don't subtract FCS,
 924                         * but keep the FCS bytes out of the total_rx_bytes
 925                         * counter
 926                         */
 927                        if (netdev->features & NETIF_F_RXFCS)
 928                                total_rx_bytes -= 4;
 929                        else
 930                                length -= 4;
 931                }
 932
 933                total_rx_bytes += length;
 934                total_rx_packets++;
 935
 936                /* code added for copybreak, this should improve
 937                 * performance for small packets with large amounts
 938                 * of reassembly being done in the stack
 939                 */
 940                if (length < copybreak) {
 941                        struct sk_buff *new_skb =
 942                            netdev_alloc_skb_ip_align(netdev, length);
 943                        if (new_skb) {
 944                                skb_copy_to_linear_data_offset(new_skb,
 945                                                               -NET_IP_ALIGN,
 946                                                               (skb->data -
 947                                                                NET_IP_ALIGN),
 948                                                               (length +
 949                                                                NET_IP_ALIGN));
 950                                /* save the skb in buffer_info as good */
 951                                buffer_info->skb = skb;
 952                                skb = new_skb;
 953                        }
 954                        /* else just continue with the old one */
 955                }
 956                /* end copybreak code */
 957                skb_put(skb, length);
 958
 959                /* Receive Checksum Offload */
 960                e1000_rx_checksum(adapter, staterr, skb);
 961
 962                e1000_rx_hash(netdev, rx_desc->wb.lower.hi_dword.rss, skb);
 963
 964                e1000_receive_skb(adapter, netdev, skb, staterr,
 965                                  rx_desc->wb.upper.vlan);
 966
 967next_desc:
 968                rx_desc->wb.upper.status_error &= cpu_to_le32(~0xFF);
 969
 970                /* return some buffers to hardware, one at a time is too slow */
 971                if (cleaned_count >= E1000_RX_BUFFER_WRITE) {
 972                        adapter->alloc_rx_buf(rx_ring, cleaned_count,
 973                                              GFP_ATOMIC);
 974                        cleaned_count = 0;
 975                }
 976
 977                /* use prefetched values */
 978                rx_desc = next_rxd;
 979                buffer_info = next_buffer;
 980
 981                staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
 982        }
 983        rx_ring->next_to_clean = i;
 984
 985        cleaned_count = e1000_desc_unused(rx_ring);
 986        if (cleaned_count)
 987                adapter->alloc_rx_buf(rx_ring, cleaned_count, GFP_ATOMIC);
 988
 989        adapter->total_rx_bytes += total_rx_bytes;
 990        adapter->total_rx_packets += total_rx_packets;
 991        return cleaned;
 992}
 993
 994static void e1000_put_txbuf(struct e1000_ring *tx_ring,
 995                            struct e1000_buffer *buffer_info)
 996{
 997        struct e1000_adapter *adapter = tx_ring->adapter;
 998
 999        if (buffer_info->dma) {
1000                if (buffer_info->mapped_as_page)
1001                        dma_unmap_page(&adapter->pdev->dev, buffer_info->dma,
1002                                       buffer_info->length, DMA_TO_DEVICE);
1003                else
1004                        dma_unmap_single(&adapter->pdev->dev, buffer_info->dma,
1005                                         buffer_info->length, DMA_TO_DEVICE);
1006                buffer_info->dma = 0;
1007        }
1008        if (buffer_info->skb) {
1009                dev_kfree_skb_any(buffer_info->skb);
1010                buffer_info->skb = NULL;
1011        }
1012        buffer_info->time_stamp = 0;
1013}
1014
1015static void e1000_print_hw_hang(struct work_struct *work)
1016{
1017        struct e1000_adapter *adapter = container_of(work,
1018                                                     struct e1000_adapter,
1019                                                     print_hang_task);
1020        struct net_device *netdev = adapter->netdev;
1021        struct e1000_ring *tx_ring = adapter->tx_ring;
1022        unsigned int i = tx_ring->next_to_clean;
1023        unsigned int eop = tx_ring->buffer_info[i].next_to_watch;
1024        struct e1000_tx_desc *eop_desc = E1000_TX_DESC(*tx_ring, eop);
1025        struct e1000_hw *hw = &adapter->hw;
1026        u16 phy_status, phy_1000t_status, phy_ext_status;
1027        u16 pci_status;
1028
1029        if (test_bit(__E1000_DOWN, &adapter->state))
1030                return;
1031
1032        if (!adapter->tx_hang_recheck &&
1033            (adapter->flags2 & FLAG2_DMA_BURST)) {
1034                /* May be block on write-back, flush and detect again
1035                 * flush pending descriptor writebacks to memory
1036                 */
1037                ew32(TIDV, adapter->tx_int_delay | E1000_TIDV_FPD);
1038                /* execute the writes immediately */
1039                e1e_flush();
1040                /* Due to rare timing issues, write to TIDV again to ensure
1041                 * the write is successful
1042                 */
1043                ew32(TIDV, adapter->tx_int_delay | E1000_TIDV_FPD);
1044                /* execute the writes immediately */
1045                e1e_flush();
1046                adapter->tx_hang_recheck = true;
1047                return;
1048        }
1049        /* Real hang detected */
1050        adapter->tx_hang_recheck = false;
1051        netif_stop_queue(netdev);
1052
1053        e1e_rphy(hw, PHY_STATUS, &phy_status);
1054        e1e_rphy(hw, PHY_1000T_STATUS, &phy_1000t_status);
1055        e1e_rphy(hw, PHY_EXT_STATUS, &phy_ext_status);
1056
1057        pci_read_config_word(adapter->pdev, PCI_STATUS, &pci_status);
1058
1059        /* detected Hardware unit hang */
1060        e_err("Detected Hardware Unit Hang:\n"
1061              "  TDH                  <%x>\n"
1062              "  TDT                  <%x>\n"
1063              "  next_to_use          <%x>\n"
1064              "  next_to_clean        <%x>\n"
1065              "buffer_info[next_to_clean]:\n"
1066              "  time_stamp           <%lx>\n"
1067              "  next_to_watch        <%x>\n"
1068              "  jiffies              <%lx>\n"
1069              "  next_to_watch.status <%x>\n"
1070              "MAC Status             <%x>\n"
1071              "PHY Status             <%x>\n"
1072              "PHY 1000BASE-T Status  <%x>\n"
1073              "PHY Extended Status    <%x>\n"
1074              "PCI Status             <%x>\n",
1075              readl(tx_ring->head),
1076              readl(tx_ring->tail),
1077              tx_ring->next_to_use,
1078              tx_ring->next_to_clean,
1079              tx_ring->buffer_info[eop].time_stamp,
1080              eop,
1081              jiffies,
1082              eop_desc->upper.fields.status,
1083              er32(STATUS),
1084              phy_status,
1085              phy_1000t_status,
1086              phy_ext_status,
1087              pci_status);
1088
1089        /* Suggest workaround for known h/w issue */
1090        if ((hw->mac.type == e1000_pchlan) && (er32(CTRL) & E1000_CTRL_TFCE))
1091                e_err("Try turning off Tx pause (flow control) via ethtool\n");
1092}
1093
1094/**
1095 * e1000_clean_tx_irq - Reclaim resources after transmit completes
1096 * @tx_ring: Tx descriptor ring
1097 *
1098 * the return value indicates whether actual cleaning was done, there
1099 * is no guarantee that everything was cleaned
1100 **/
1101static bool e1000_clean_tx_irq(struct e1000_ring *tx_ring)
1102{
1103        struct e1000_adapter *adapter = tx_ring->adapter;
1104        struct net_device *netdev = adapter->netdev;
1105        struct e1000_hw *hw = &adapter->hw;
1106        struct e1000_tx_desc *tx_desc, *eop_desc;
1107        struct e1000_buffer *buffer_info;
1108        unsigned int i, eop;
1109        unsigned int count = 0;
1110        unsigned int total_tx_bytes = 0, total_tx_packets = 0;
1111        unsigned int bytes_compl = 0, pkts_compl = 0;
1112
1113        i = tx_ring->next_to_clean;
1114        eop = tx_ring->buffer_info[i].next_to_watch;
1115        eop_desc = E1000_TX_DESC(*tx_ring, eop);
1116
1117        while ((eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) &&
1118               (count < tx_ring->count)) {
1119                bool cleaned = false;
1120                rmb(); /* read buffer_info after eop_desc */
1121                for (; !cleaned; count++) {
1122                        tx_desc = E1000_TX_DESC(*tx_ring, i);
1123                        buffer_info = &tx_ring->buffer_info[i];
1124                        cleaned = (i == eop);
1125
1126                        if (cleaned) {
1127                                total_tx_packets += buffer_info->segs;
1128                                total_tx_bytes += buffer_info->bytecount;
1129                                if (buffer_info->skb) {
1130                                        bytes_compl += buffer_info->skb->len;
1131                                        pkts_compl++;
1132                                }
1133                        }
1134
1135                        e1000_put_txbuf(tx_ring, buffer_info);
1136                        tx_desc->upper.data = 0;
1137
1138                        i++;
1139                        if (i == tx_ring->count)
1140                                i = 0;
1141                }
1142
1143                if (i == tx_ring->next_to_use)
1144                        break;
1145                eop = tx_ring->buffer_info[i].next_to_watch;
1146                eop_desc = E1000_TX_DESC(*tx_ring, eop);
1147        }
1148
1149        tx_ring->next_to_clean = i;
1150
1151        netdev_completed_queue(netdev, pkts_compl, bytes_compl);
1152
1153#define TX_WAKE_THRESHOLD 32
1154        if (count && netif_carrier_ok(netdev) &&
1155            e1000_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD) {
1156                /* Make sure that anybody stopping the queue after this
1157                 * sees the new next_to_clean.
1158                 */
1159                smp_mb();
1160
1161                if (netif_queue_stopped(netdev) &&
1162                    !(test_bit(__E1000_DOWN, &adapter->state))) {
1163                        netif_wake_queue(netdev);
1164                        ++adapter->restart_queue;
1165                }
1166        }
1167
1168        if (adapter->detect_tx_hung) {
1169                /* Detect a transmit hang in hardware, this serializes the
1170                 * check with the clearing of time_stamp and movement of i
1171                 */
1172                adapter->detect_tx_hung = false;
1173                if (tx_ring->buffer_info[i].time_stamp &&
1174                    time_after(jiffies, tx_ring->buffer_info[i].time_stamp
1175                               + (adapter->tx_timeout_factor * HZ)) &&
1176                    !(er32(STATUS) & E1000_STATUS_TXOFF))
1177                        schedule_work(&adapter->print_hang_task);
1178                else
1179                        adapter->tx_hang_recheck = false;
1180        }
1181        adapter->total_tx_bytes += total_tx_bytes;
1182        adapter->total_tx_packets += total_tx_packets;
1183        return count < tx_ring->count;
1184}
1185
1186/**
1187 * e1000_clean_rx_irq_ps - Send received data up the network stack; packet split
1188 * @rx_ring: Rx descriptor ring
1189 *
1190 * the return value indicates whether actual cleaning was done, there
1191 * is no guarantee that everything was cleaned
1192 **/
1193static bool e1000_clean_rx_irq_ps(struct e1000_ring *rx_ring, int *work_done,
1194                                  int work_to_do)
1195{
1196        struct e1000_adapter *adapter = rx_ring->adapter;
1197        struct e1000_hw *hw = &adapter->hw;
1198        union e1000_rx_desc_packet_split *rx_desc, *next_rxd;
1199        struct net_device *netdev = adapter->netdev;
1200        struct pci_dev *pdev = adapter->pdev;
1201        struct e1000_buffer *buffer_info, *next_buffer;
1202        struct e1000_ps_page *ps_page;
1203        struct sk_buff *skb;
1204        unsigned int i, j;
1205        u32 length, staterr;
1206        int cleaned_count = 0;
1207        bool cleaned = false;
1208        unsigned int total_rx_bytes = 0, total_rx_packets = 0;
1209
1210        i = rx_ring->next_to_clean;
1211        rx_desc = E1000_RX_DESC_PS(*rx_ring, i);
1212        staterr = le32_to_cpu(rx_desc->wb.middle.status_error);
1213        buffer_info = &rx_ring->buffer_info[i];
1214
1215        while (staterr & E1000_RXD_STAT_DD) {
1216                if (*work_done >= work_to_do)
1217                        break;
1218                (*work_done)++;
1219                skb = buffer_info->skb;
1220                rmb();  /* read descriptor and rx_buffer_info after status DD */
1221
1222                /* in the packet split case this is header only */
1223                prefetch(skb->data - NET_IP_ALIGN);
1224
1225                i++;
1226                if (i == rx_ring->count)
1227                        i = 0;
1228                next_rxd = E1000_RX_DESC_PS(*rx_ring, i);
1229                prefetch(next_rxd);
1230
1231                next_buffer = &rx_ring->buffer_info[i];
1232
1233                cleaned = true;
1234                cleaned_count++;
1235                dma_unmap_single(&pdev->dev, buffer_info->dma,
1236                                 adapter->rx_ps_bsize0, DMA_FROM_DEVICE);
1237                buffer_info->dma = 0;
1238
1239                /* see !EOP comment in other Rx routine */
1240                if (!(staterr & E1000_RXD_STAT_EOP))
1241                        adapter->flags2 |= FLAG2_IS_DISCARDING;
1242
1243                if (adapter->flags2 & FLAG2_IS_DISCARDING) {
1244                        e_dbg("Packet Split buffers didn't pick up the full packet\n");
1245                        dev_kfree_skb_irq(skb);
1246                        if (staterr & E1000_RXD_STAT_EOP)
1247                                adapter->flags2 &= ~FLAG2_IS_DISCARDING;
1248                        goto next_desc;
1249                }
1250
1251                if (unlikely((staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) &&
1252                             !(netdev->features & NETIF_F_RXALL))) {
1253                        dev_kfree_skb_irq(skb);
1254                        goto next_desc;
1255                }
1256
1257                length = le16_to_cpu(rx_desc->wb.middle.length0);
1258
1259                if (!length) {
1260                        e_dbg("Last part of the packet spanning multiple descriptors\n");
1261                        dev_kfree_skb_irq(skb);
1262                        goto next_desc;
1263                }
1264
1265                /* Good Receive */
1266                skb_put(skb, length);
1267
1268                {
1269                        /* this looks ugly, but it seems compiler issues make
1270                         * it more efficient than reusing j
1271                         */
1272                        int l1 = le16_to_cpu(rx_desc->wb.upper.length[0]);
1273
1274                        /* page alloc/put takes too long and effects small
1275                         * packet throughput, so unsplit small packets and
1276                         * save the alloc/put only valid in softirq (napi)
1277                         * context to call kmap_*
1278                         */
1279                        if (l1 && (l1 <= copybreak) &&
1280                            ((length + l1) <= adapter->rx_ps_bsize0)) {
1281                                u8 *vaddr;
1282
1283                                ps_page = &buffer_info->ps_pages[0];
1284
1285                                /* there is no documentation about how to call
1286                                 * kmap_atomic, so we can't hold the mapping
1287                                 * very long
1288                                 */
1289                                dma_sync_single_for_cpu(&pdev->dev,
1290                                                        ps_page->dma,
1291                                                        PAGE_SIZE,
1292                                                        DMA_FROM_DEVICE);
1293                                vaddr = kmap_atomic(ps_page->page);
1294                                memcpy(skb_tail_pointer(skb), vaddr, l1);
1295                                kunmap_atomic(vaddr);
1296                                dma_sync_single_for_device(&pdev->dev,
1297                                                           ps_page->dma,
1298                                                           PAGE_SIZE,
1299                                                           DMA_FROM_DEVICE);
1300
1301                                /* remove the CRC */
1302                                if (!(adapter->flags2 & FLAG2_CRC_STRIPPING)) {
1303                                        if (!(netdev->features & NETIF_F_RXFCS))
1304                                                l1 -= 4;
1305                                }
1306
1307                                skb_put(skb, l1);
1308                                goto copydone;
1309                        } /* if */
1310                }
1311
1312                for (j = 0; j < PS_PAGE_BUFFERS; j++) {
1313                        length = le16_to_cpu(rx_desc->wb.upper.length[j]);
1314                        if (!length)
1315                                break;
1316
1317                        ps_page = &buffer_info->ps_pages[j];
1318                        dma_unmap_page(&pdev->dev, ps_page->dma, PAGE_SIZE,
1319                                       DMA_FROM_DEVICE);
1320                        ps_page->dma = 0;
1321                        skb_fill_page_desc(skb, j, ps_page->page, 0, length);
1322                        ps_page->page = NULL;
1323                        skb->len += length;
1324                        skb->data_len += length;
1325                        skb->truesize += PAGE_SIZE;
1326                }
1327
1328                /* strip the ethernet crc, problem is we're using pages now so
1329                 * this whole operation can get a little cpu intensive
1330                 */
1331                if (!(adapter->flags2 & FLAG2_CRC_STRIPPING)) {
1332                        if (!(netdev->features & NETIF_F_RXFCS))
1333                                pskb_trim(skb, skb->len - 4);
1334                }
1335
1336copydone:
1337                total_rx_bytes += skb->len;
1338                total_rx_packets++;
1339
1340                e1000_rx_checksum(adapter, staterr, skb);
1341
1342                e1000_rx_hash(netdev, rx_desc->wb.lower.hi_dword.rss, skb);
1343
1344                if (rx_desc->wb.upper.header_status &
1345                           cpu_to_le16(E1000_RXDPS_HDRSTAT_HDRSP))
1346                        adapter->rx_hdr_split++;
1347
1348                e1000_receive_skb(adapter, netdev, skb,
1349                                  staterr, rx_desc->wb.middle.vlan);
1350
1351next_desc:
1352                rx_desc->wb.middle.status_error &= cpu_to_le32(~0xFF);
1353                buffer_info->skb = NULL;
1354
1355                /* return some buffers to hardware, one at a time is too slow */
1356                if (cleaned_count >= E1000_RX_BUFFER_WRITE) {
1357                        adapter->alloc_rx_buf(rx_ring, cleaned_count,
1358                                              GFP_ATOMIC);
1359                        cleaned_count = 0;
1360                }
1361
1362                /* use prefetched values */
1363                rx_desc = next_rxd;
1364                buffer_info = next_buffer;
1365
1366                staterr = le32_to_cpu(rx_desc->wb.middle.status_error);
1367        }
1368        rx_ring->next_to_clean = i;
1369
1370        cleaned_count = e1000_desc_unused(rx_ring);
1371        if (cleaned_count)
1372                adapter->alloc_rx_buf(rx_ring, cleaned_count, GFP_ATOMIC);
1373
1374        adapter->total_rx_bytes += total_rx_bytes;
1375        adapter->total_rx_packets += total_rx_packets;
1376        return cleaned;
1377}
1378
1379/**
1380 * e1000_consume_page - helper function
1381 **/
1382static void e1000_consume_page(struct e1000_buffer *bi, struct sk_buff *skb,
1383                               u16 length)
1384{
1385        bi->page = NULL;
1386        skb->len += length;
1387        skb->data_len += length;
1388        skb->truesize += PAGE_SIZE;
1389}
1390
1391/**
1392 * e1000_clean_jumbo_rx_irq - Send received data up the network stack; legacy
1393 * @adapter: board private structure
1394 *
1395 * the return value indicates whether actual cleaning was done, there
1396 * is no guarantee that everything was cleaned
1397 **/
1398static bool e1000_clean_jumbo_rx_irq(struct e1000_ring *rx_ring, int *work_done,
1399                                     int work_to_do)
1400{
1401        struct e1000_adapter *adapter = rx_ring->adapter;
1402        struct net_device *netdev = adapter->netdev;
1403        struct pci_dev *pdev = adapter->pdev;
1404        union e1000_rx_desc_extended *rx_desc, *next_rxd;
1405        struct e1000_buffer *buffer_info, *next_buffer;
1406        u32 length, staterr;
1407        unsigned int i;
1408        int cleaned_count = 0;
1409        bool cleaned = false;
1410        unsigned int total_rx_bytes=0, total_rx_packets=0;
1411
1412        i = rx_ring->next_to_clean;
1413        rx_desc = E1000_RX_DESC_EXT(*rx_ring, i);
1414        staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
1415        buffer_info = &rx_ring->buffer_info[i];
1416
1417        while (staterr & E1000_RXD_STAT_DD) {
1418                struct sk_buff *skb;
1419
1420                if (*work_done >= work_to_do)
1421                        break;
1422                (*work_done)++;
1423                rmb();  /* read descriptor and rx_buffer_info after status DD */
1424
1425                skb = buffer_info->skb;
1426                buffer_info->skb = NULL;
1427
1428                ++i;
1429                if (i == rx_ring->count)
1430                        i = 0;
1431                next_rxd = E1000_RX_DESC_EXT(*rx_ring, i);
1432                prefetch(next_rxd);
1433
1434                next_buffer = &rx_ring->buffer_info[i];
1435
1436                cleaned = true;
1437                cleaned_count++;
1438                dma_unmap_page(&pdev->dev, buffer_info->dma, PAGE_SIZE,
1439                               DMA_FROM_DEVICE);
1440                buffer_info->dma = 0;
1441
1442                length = le16_to_cpu(rx_desc->wb.upper.length);
1443
1444                /* errors is only valid for DD + EOP descriptors */
1445                if (unlikely((staterr & E1000_RXD_STAT_EOP) &&
1446                             ((staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) &&
1447                              !(netdev->features & NETIF_F_RXALL)))) {
1448                        /* recycle both page and skb */
1449                        buffer_info->skb = skb;
1450                        /* an error means any chain goes out the window too */
1451                        if (rx_ring->rx_skb_top)
1452                                dev_kfree_skb_irq(rx_ring->rx_skb_top);
1453                        rx_ring->rx_skb_top = NULL;
1454                        goto next_desc;
1455                }
1456
1457#define rxtop (rx_ring->rx_skb_top)
1458                if (!(staterr & E1000_RXD_STAT_EOP)) {
1459                        /* this descriptor is only the beginning (or middle) */
1460                        if (!rxtop) {
1461                                /* this is the beginning of a chain */
1462                                rxtop = skb;
1463                                skb_fill_page_desc(rxtop, 0, buffer_info->page,
1464                                                   0, length);
1465                        } else {
1466                                /* this is the middle of a chain */
1467                                skb_fill_page_desc(rxtop,
1468                                    skb_shinfo(rxtop)->nr_frags,
1469                                    buffer_info->page, 0, length);
1470                                /* re-use the skb, only consumed the page */
1471                                buffer_info->skb = skb;
1472                        }
1473                        e1000_consume_page(buffer_info, rxtop, length);
1474                        goto next_desc;
1475                } else {
1476                        if (rxtop) {
1477                                /* end of the chain */
1478                                skb_fill_page_desc(rxtop,
1479                                    skb_shinfo(rxtop)->nr_frags,
1480                                    buffer_info->page, 0, length);
1481                                /* re-use the current skb, we only consumed the
1482                                 * page
1483                                 */
1484                                buffer_info->skb = skb;
1485                                skb = rxtop;
1486                                rxtop = NULL;
1487                                e1000_consume_page(buffer_info, skb, length);
1488                        } else {
1489                                /* no chain, got EOP, this buf is the packet
1490                                 * copybreak to save the put_page/alloc_page
1491                                 */
1492                                if (length <= copybreak &&
1493                                    skb_tailroom(skb) >= length) {
1494                                        u8 *vaddr;
1495                                        vaddr = kmap_atomic(buffer_info->page);
1496                                        memcpy(skb_tail_pointer(skb), vaddr,
1497                                               length);
1498                                        kunmap_atomic(vaddr);
1499                                        /* re-use the page, so don't erase
1500                                         * buffer_info->page
1501                                         */
1502                                        skb_put(skb, length);
1503                                } else {
1504                                        skb_fill_page_desc(skb, 0,
1505                                                           buffer_info->page, 0,
1506                                                           length);
1507                                        e1000_consume_page(buffer_info, skb,
1508                                                           length);
1509                                }
1510                        }
1511                }
1512
1513                /* Receive Checksum Offload */
1514                e1000_rx_checksum(adapter, staterr, skb);
1515
1516                e1000_rx_hash(netdev, rx_desc->wb.lower.hi_dword.rss, skb);
1517
1518                /* probably a little skewed due to removing CRC */
1519                total_rx_bytes += skb->len;
1520                total_rx_packets++;
1521
1522                /* eth type trans needs skb->data to point to something */
1523                if (!pskb_may_pull(skb, ETH_HLEN)) {
1524                        e_err("pskb_may_pull failed.\n");
1525                        dev_kfree_skb_irq(skb);
1526                        goto next_desc;
1527                }
1528
1529                e1000_receive_skb(adapter, netdev, skb, staterr,
1530                                  rx_desc->wb.upper.vlan);
1531
1532next_desc:
1533                rx_desc->wb.upper.status_error &= cpu_to_le32(~0xFF);
1534
1535                /* return some buffers to hardware, one at a time is too slow */
1536                if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) {
1537                        adapter->alloc_rx_buf(rx_ring, cleaned_count,
1538                                              GFP_ATOMIC);
1539                        cleaned_count = 0;
1540                }
1541
1542                /* use prefetched values */
1543                rx_desc = next_rxd;
1544                buffer_info = next_buffer;
1545
1546                staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
1547        }
1548        rx_ring->next_to_clean = i;
1549
1550        cleaned_count = e1000_desc_unused(rx_ring);
1551        if (cleaned_count)
1552                adapter->alloc_rx_buf(rx_ring, cleaned_count, GFP_ATOMIC);
1553
1554        adapter->total_rx_bytes += total_rx_bytes;
1555        adapter->total_rx_packets += total_rx_packets;
1556        return cleaned;
1557}
1558
1559/**
1560 * e1000_clean_rx_ring - Free Rx Buffers per Queue
1561 * @rx_ring: Rx descriptor ring
1562 **/
1563static void e1000_clean_rx_ring(struct e1000_ring *rx_ring)
1564{
1565        struct e1000_adapter *adapter = rx_ring->adapter;
1566        struct e1000_buffer *buffer_info;
1567        struct e1000_ps_page *ps_page;
1568        struct pci_dev *pdev = adapter->pdev;
1569        unsigned int i, j;
1570
1571        /* Free all the Rx ring sk_buffs */
1572        for (i = 0; i < rx_ring->count; i++) {
1573                buffer_info = &rx_ring->buffer_info[i];
1574                if (buffer_info->dma) {
1575                        if (adapter->clean_rx == e1000_clean_rx_irq)
1576                                dma_unmap_single(&pdev->dev, buffer_info->dma,
1577                                                 adapter->rx_buffer_len,
1578                                                 DMA_FROM_DEVICE);
1579                        else if (adapter->clean_rx == e1000_clean_jumbo_rx_irq)
1580                                dma_unmap_page(&pdev->dev, buffer_info->dma,
1581                                               PAGE_SIZE,
1582                                               DMA_FROM_DEVICE);
1583                        else if (adapter->clean_rx == e1000_clean_rx_irq_ps)
1584                                dma_unmap_single(&pdev->dev, buffer_info->dma,
1585                                                 adapter->rx_ps_bsize0,
1586                                                 DMA_FROM_DEVICE);
1587                        buffer_info->dma = 0;
1588                }
1589
1590                if (buffer_info->page) {
1591                        put_page(buffer_info->page);
1592                        buffer_info->page = NULL;
1593                }
1594
1595                if (buffer_info->skb) {
1596                        dev_kfree_skb(buffer_info->skb);
1597                        buffer_info->skb = NULL;
1598                }
1599
1600                for (j = 0; j < PS_PAGE_BUFFERS; j++) {
1601                        ps_page = &buffer_info->ps_pages[j];
1602                        if (!ps_page->page)
1603                                break;
1604                        dma_unmap_page(&pdev->dev, ps_page->dma, PAGE_SIZE,
1605                                       DMA_FROM_DEVICE);
1606                        ps_page->dma = 0;
1607                        put_page(ps_page->page);
1608                        ps_page->page = NULL;
1609                }
1610        }
1611
1612        /* there also may be some cached data from a chained receive */
1613        if (rx_ring->rx_skb_top) {
1614                dev_kfree_skb(rx_ring->rx_skb_top);
1615                rx_ring->rx_skb_top = NULL;
1616        }
1617
1618        /* Zero out the descriptor ring */
1619        memset(rx_ring->desc, 0, rx_ring->size);
1620
1621        rx_ring->next_to_clean = 0;
1622        rx_ring->next_to_use = 0;
1623        adapter->flags2 &= ~FLAG2_IS_DISCARDING;
1624
1625        writel(0, rx_ring->head);
1626        if (rx_ring->adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA)
1627                e1000e_update_rdt_wa(rx_ring, 0);
1628        else
1629                writel(0, rx_ring->tail);
1630}
1631
1632static void e1000e_downshift_workaround(struct work_struct *work)
1633{
1634        struct e1000_adapter *adapter = container_of(work,
1635                                        struct e1000_adapter, downshift_task);
1636
1637        if (test_bit(__E1000_DOWN, &adapter->state))
1638                return;
1639
1640        e1000e_gig_downshift_workaround_ich8lan(&adapter->hw);
1641}
1642
1643/**
1644 * e1000_intr_msi - Interrupt Handler
1645 * @irq: interrupt number
1646 * @data: pointer to a network interface device structure
1647 **/
1648static irqreturn_t e1000_intr_msi(int irq, void *data)
1649{
1650        struct net_device *netdev = data;
1651        struct e1000_adapter *adapter = netdev_priv(netdev);
1652        struct e1000_hw *hw = &adapter->hw;
1653        u32 icr = er32(ICR);
1654
1655        /* read ICR disables interrupts using IAM */
1656        if (icr & E1000_ICR_LSC) {
1657                hw->mac.get_link_status = true;
1658                /* ICH8 workaround-- Call gig speed drop workaround on cable
1659                 * disconnect (LSC) before accessing any PHY registers
1660                 */
1661                if ((adapter->flags & FLAG_LSC_GIG_SPEED_DROP) &&
1662                    (!(er32(STATUS) & E1000_STATUS_LU)))
1663                        schedule_work(&adapter->downshift_task);
1664
1665                /* 80003ES2LAN workaround-- For packet buffer work-around on
1666                 * link down event; disable receives here in the ISR and reset
1667                 * adapter in watchdog
1668                 */
1669                if (netif_carrier_ok(netdev) &&
1670                    adapter->flags & FLAG_RX_NEEDS_RESTART) {
1671                        /* disable receives */
1672                        u32 rctl = er32(RCTL);
1673                        ew32(RCTL, rctl & ~E1000_RCTL_EN);
1674                        adapter->flags |= FLAG_RX_RESTART_NOW;
1675                }
1676                /* guard against interrupt when we're going down */
1677                if (!test_bit(__E1000_DOWN, &adapter->state))
1678                        mod_timer(&adapter->watchdog_timer, jiffies + 1);
1679        }
1680
1681        /* Reset on uncorrectable ECC error */
1682        if ((icr & E1000_ICR_ECCER) && (hw->mac.type == e1000_pch_lpt)) {
1683                u32 pbeccsts = er32(PBECCSTS);
1684
1685                adapter->corr_errors +=
1686                    pbeccsts & E1000_PBECCSTS_CORR_ERR_CNT_MASK;
1687                adapter->uncorr_errors +=
1688                    (pbeccsts & E1000_PBECCSTS_UNCORR_ERR_CNT_MASK) >>
1689                    E1000_PBECCSTS_UNCORR_ERR_CNT_SHIFT;
1690
1691                /* Do the reset outside of interrupt context */
1692                schedule_work(&adapter->reset_task);
1693
1694                /* return immediately since reset is imminent */
1695                return IRQ_HANDLED;
1696        }
1697
1698        if (napi_schedule_prep(&adapter->napi)) {
1699                adapter->total_tx_bytes = 0;
1700                adapter->total_tx_packets = 0;
1701                adapter->total_rx_bytes = 0;
1702                adapter->total_rx_packets = 0;
1703                __napi_schedule(&adapter->napi);
1704        }
1705
1706        return IRQ_HANDLED;
1707}
1708
1709/**
1710 * e1000_intr - Interrupt Handler
1711 * @irq: interrupt number
1712 * @data: pointer to a network interface device structure
1713 **/
1714static irqreturn_t e1000_intr(int irq, void *data)
1715{
1716        struct net_device *netdev = data;
1717        struct e1000_adapter *adapter = netdev_priv(netdev);
1718        struct e1000_hw *hw = &adapter->hw;
1719        u32 rctl, icr = er32(ICR);
1720
1721        if (!icr || test_bit(__E1000_DOWN, &adapter->state))
1722                return IRQ_NONE;  /* Not our interrupt */
1723
1724        /* IMS will not auto-mask if INT_ASSERTED is not set, and if it is
1725         * not set, then the adapter didn't send an interrupt
1726         */
1727        if (!(icr & E1000_ICR_INT_ASSERTED))
1728                return IRQ_NONE;
1729
1730        /* Interrupt Auto-Mask...upon reading ICR,
1731         * interrupts are masked.  No need for the
1732         * IMC write
1733         */
1734
1735        if (icr & E1000_ICR_LSC) {
1736                hw->mac.get_link_status = true;
1737                /* ICH8 workaround-- Call gig speed drop workaround on cable
1738                 * disconnect (LSC) before accessing any PHY registers
1739                 */
1740                if ((adapter->flags & FLAG_LSC_GIG_SPEED_DROP) &&
1741                    (!(er32(STATUS) & E1000_STATUS_LU)))
1742                        schedule_work(&adapter->downshift_task);
1743
1744                /* 80003ES2LAN workaround--
1745                 * For packet buffer work-around on link down event;
1746                 * disable receives here in the ISR and
1747                 * reset adapter in watchdog
1748                 */
1749                if (netif_carrier_ok(netdev) &&
1750                    (adapter->flags & FLAG_RX_NEEDS_RESTART)) {
1751                        /* disable receives */
1752                        rctl = er32(RCTL);
1753                        ew32(RCTL, rctl & ~E1000_RCTL_EN);
1754                        adapter->flags |= FLAG_RX_RESTART_NOW;
1755                }
1756                /* guard against interrupt when we're going down */
1757                if (!test_bit(__E1000_DOWN, &adapter->state))
1758                        mod_timer(&adapter->watchdog_timer, jiffies + 1);
1759        }
1760
1761        /* Reset on uncorrectable ECC error */
1762        if ((icr & E1000_ICR_ECCER) && (hw->mac.type == e1000_pch_lpt)) {
1763                u32 pbeccsts = er32(PBECCSTS);
1764
1765                adapter->corr_errors +=
1766                    pbeccsts & E1000_PBECCSTS_CORR_ERR_CNT_MASK;
1767                adapter->uncorr_errors +=
1768                    (pbeccsts & E1000_PBECCSTS_UNCORR_ERR_CNT_MASK) >>
1769                    E1000_PBECCSTS_UNCORR_ERR_CNT_SHIFT;
1770
1771                /* Do the reset outside of interrupt context */
1772                schedule_work(&adapter->reset_task);
1773
1774                /* return immediately since reset is imminent */
1775                return IRQ_HANDLED;
1776        }
1777
1778        if (napi_schedule_prep(&adapter->napi)) {
1779                adapter->total_tx_bytes = 0;
1780                adapter->total_tx_packets = 0;
1781                adapter->total_rx_bytes = 0;
1782                adapter->total_rx_packets = 0;
1783                __napi_schedule(&adapter->napi);
1784        }
1785
1786        return IRQ_HANDLED;
1787}
1788
1789static irqreturn_t e1000_msix_other(int irq, void *data)
1790{
1791        struct net_device *netdev = data;
1792        struct e1000_adapter *adapter = netdev_priv(netdev);
1793        struct e1000_hw *hw = &adapter->hw;
1794        u32 icr = er32(ICR);
1795
1796        if (!(icr & E1000_ICR_INT_ASSERTED)) {
1797                if (!test_bit(__E1000_DOWN, &adapter->state))
1798                        ew32(IMS, E1000_IMS_OTHER);
1799                return IRQ_NONE;
1800        }
1801
1802        if (icr & adapter->eiac_mask)
1803                ew32(ICS, (icr & adapter->eiac_mask));
1804
1805        if (icr & E1000_ICR_OTHER) {
1806                if (!(icr & E1000_ICR_LSC))
1807                        goto no_link_interrupt;
1808                hw->mac.get_link_status = true;
1809                /* guard against interrupt when we're going down */
1810                if (!test_bit(__E1000_DOWN, &adapter->state))
1811                        mod_timer(&adapter->watchdog_timer, jiffies + 1);
1812        }
1813
1814no_link_interrupt:
1815        if (!test_bit(__E1000_DOWN, &adapter->state))
1816                ew32(IMS, E1000_IMS_LSC | E1000_IMS_OTHER);
1817
1818        return IRQ_HANDLED;
1819}
1820
1821
1822static irqreturn_t e1000_intr_msix_tx(int irq, void *data)
1823{
1824        struct net_device *netdev = data;
1825        struct e1000_adapter *adapter = netdev_priv(netdev);
1826        struct e1000_hw *hw = &adapter->hw;
1827        struct e1000_ring *tx_ring = adapter->tx_ring;
1828
1829
1830        adapter->total_tx_bytes = 0;
1831        adapter->total_tx_packets = 0;
1832
1833        if (!e1000_clean_tx_irq(tx_ring))
1834                /* Ring was not completely cleaned, so fire another interrupt */
1835                ew32(ICS, tx_ring->ims_val);
1836
1837        return IRQ_HANDLED;
1838}
1839
1840static irqreturn_t e1000_intr_msix_rx(int irq, void *data)
1841{
1842        struct net_device *netdev = data;
1843        struct e1000_adapter *adapter = netdev_priv(netdev);
1844        struct e1000_ring *rx_ring = adapter->rx_ring;
1845
1846        /* Write the ITR value calculated at the end of the
1847         * previous interrupt.
1848         */
1849        if (rx_ring->set_itr) {
1850                writel(1000000000 / (rx_ring->itr_val * 256),
1851                       rx_ring->itr_register);
1852                rx_ring->set_itr = 0;
1853        }
1854
1855        if (napi_schedule_prep(&adapter->napi)) {
1856                adapter->total_rx_bytes = 0;
1857                adapter->total_rx_packets = 0;
1858                __napi_schedule(&adapter->napi);
1859        }
1860        return IRQ_HANDLED;
1861}
1862
1863/**
1864 * e1000_configure_msix - Configure MSI-X hardware
1865 *
1866 * e1000_configure_msix sets up the hardware to properly
1867 * generate MSI-X interrupts.
1868 **/
1869static void e1000_configure_msix(struct e1000_adapter *adapter)
1870{
1871        struct e1000_hw *hw = &adapter->hw;
1872        struct e1000_ring *rx_ring = adapter->rx_ring;
1873        struct e1000_ring *tx_ring = adapter->tx_ring;
1874        int vector = 0;
1875        u32 ctrl_ext, ivar = 0;
1876
1877        adapter->eiac_mask = 0;
1878
1879        /* Workaround issue with spurious interrupts on 82574 in MSI-X mode */
1880        if (hw->mac.type == e1000_82574) {
1881                u32 rfctl = er32(RFCTL);
1882                rfctl |= E1000_RFCTL_ACK_DIS;
1883                ew32(RFCTL, rfctl);
1884        }
1885
1886#define E1000_IVAR_INT_ALLOC_VALID      0x8
1887        /* Configure Rx vector */
1888        rx_ring->ims_val = E1000_IMS_RXQ0;
1889        adapter->eiac_mask |= rx_ring->ims_val;
1890        if (rx_ring->itr_val)
1891                writel(1000000000 / (rx_ring->itr_val * 256),
1892                       rx_ring->itr_register);
1893        else
1894                writel(1, rx_ring->itr_register);
1895        ivar = E1000_IVAR_INT_ALLOC_VALID | vector;
1896
1897        /* Configure Tx vector */
1898        tx_ring->ims_val = E1000_IMS_TXQ0;
1899        vector++;
1900        if (tx_ring->itr_val)
1901                writel(1000000000 / (tx_ring->itr_val * 256),
1902                       tx_ring->itr_register);
1903        else
1904                writel(1, tx_ring->itr_register);
1905        adapter->eiac_mask |= tx_ring->ims_val;
1906        ivar |= ((E1000_IVAR_INT_ALLOC_VALID | vector) << 8);
1907
1908        /* set vector for Other Causes, e.g. link changes */
1909        vector++;
1910        ivar |= ((E1000_IVAR_INT_ALLOC_VALID | vector) << 16);
1911        if (rx_ring->itr_val)
1912                writel(1000000000 / (rx_ring->itr_val * 256),
1913                       hw->hw_addr + E1000_EITR_82574(vector));
1914        else
1915                writel(1, hw->hw_addr + E1000_EITR_82574(vector));
1916
1917        /* Cause Tx interrupts on every write back */
1918        ivar |= (1 << 31);
1919
1920        ew32(IVAR, ivar);
1921
1922        /* enable MSI-X PBA support */
1923        ctrl_ext = er32(CTRL_EXT);
1924        ctrl_ext |= E1000_CTRL_EXT_PBA_CLR;
1925
1926        /* Auto-Mask Other interrupts upon ICR read */
1927#define E1000_EIAC_MASK_82574   0x01F00000
1928        ew32(IAM, ~E1000_EIAC_MASK_82574 | E1000_IMS_OTHER);
1929        ctrl_ext |= E1000_CTRL_EXT_EIAME;
1930        ew32(CTRL_EXT, ctrl_ext);
1931        e1e_flush();
1932}
1933
1934void e1000e_reset_interrupt_capability(struct e1000_adapter *adapter)
1935{
1936        if (adapter->msix_entries) {
1937                pci_disable_msix(adapter->pdev);
1938                kfree(adapter->msix_entries);
1939                adapter->msix_entries = NULL;
1940        } else if (adapter->flags & FLAG_MSI_ENABLED) {
1941                pci_disable_msi(adapter->pdev);
1942                adapter->flags &= ~FLAG_MSI_ENABLED;
1943        }
1944}
1945
1946/**
1947 * e1000e_set_interrupt_capability - set MSI or MSI-X if supported
1948 *
1949 * Attempt to configure interrupts using the best available
1950 * capabilities of the hardware and kernel.
1951 **/
1952void e1000e_set_interrupt_capability(struct e1000_adapter *adapter)
1953{
1954        int err;
1955        int i;
1956
1957        switch (adapter->int_mode) {
1958        case E1000E_INT_MODE_MSIX:
1959                if (adapter->flags & FLAG_HAS_MSIX) {
1960                        adapter->num_vectors = 3; /* RxQ0, TxQ0 and other */
1961                        adapter->msix_entries = kcalloc(adapter->num_vectors,
1962                                                      sizeof(struct msix_entry),
1963                                                      GFP_KERNEL);
1964                        if (adapter->msix_entries) {
1965                                for (i = 0; i < adapter->num_vectors; i++)
1966                                        adapter->msix_entries[i].entry = i;
1967
1968                                err = pci_enable_msix(adapter->pdev,
1969                                                      adapter->msix_entries,
1970                                                      adapter->num_vectors);
1971                                if (err == 0)
1972                                        return;
1973                        }
1974                        /* MSI-X failed, so fall through and try MSI */
1975                        e_err("Failed to initialize MSI-X interrupts.  Falling back to MSI interrupts.\n");
1976                        e1000e_reset_interrupt_capability(adapter);
1977                }
1978                adapter->int_mode = E1000E_INT_MODE_MSI;
1979                /* Fall through */
1980        case E1000E_INT_MODE_MSI:
1981                if (!pci_enable_msi(adapter->pdev)) {
1982                        adapter->flags |= FLAG_MSI_ENABLED;
1983                } else {
1984                        adapter->int_mode = E1000E_INT_MODE_LEGACY;
1985                        e_err("Failed to initialize MSI interrupts.  Falling back to legacy interrupts.\n");
1986                }
1987                /* Fall through */
1988        case E1000E_INT_MODE_LEGACY:
1989                /* Don't do anything; this is the system default */
1990                break;
1991        }
1992
1993        /* store the number of vectors being used */
1994        adapter->num_vectors = 1;
1995}
1996
1997/**
1998 * e1000_request_msix - Initialize MSI-X interrupts
1999 *
2000 * e1000_request_msix allocates MSI-X vectors and requests interrupts from the
2001 * kernel.
2002 **/
2003static int e1000_request_msix(struct e1000_adapter *adapter)
2004{
2005        struct net_device *netdev = adapter->netdev;
2006        int err = 0, vector = 0;
2007
2008        if (strlen(netdev->name) < (IFNAMSIZ - 5))
2009                snprintf(adapter->rx_ring->name,
2010                         sizeof(adapter->rx_ring->name) - 1,
2011                         "%s-rx-0", netdev->name);
2012        else
2013                memcpy(adapter->rx_ring->name, netdev->name, IFNAMSIZ);
2014        err = request_irq(adapter->msix_entries[vector].vector,
2015                          e1000_intr_msix_rx, 0, adapter->rx_ring->name,
2016                          netdev);
2017        if (err)
2018                return err;
2019        adapter->rx_ring->itr_register = adapter->hw.hw_addr +
2020            E1000_EITR_82574(vector);
2021        adapter->rx_ring->itr_val = adapter->itr;
2022        vector++;
2023
2024        if (strlen(netdev->name) < (IFNAMSIZ - 5))
2025                snprintf(adapter->tx_ring->name,
2026                         sizeof(adapter->tx_ring->name) - 1,
2027                         "%s-tx-0", netdev->name);
2028        else
2029                memcpy(adapter->tx_ring->name, netdev->name, IFNAMSIZ);
2030        err = request_irq(adapter->msix_entries[vector].vector,
2031                          e1000_intr_msix_tx, 0, adapter->tx_ring->name,
2032                          netdev);
2033        if (err)
2034                return err;
2035        adapter->tx_ring->itr_register = adapter->hw.hw_addr +
2036            E1000_EITR_82574(vector);
2037        adapter->tx_ring->itr_val = adapter->itr;
2038        vector++;
2039
2040        err = request_irq(adapter->msix_entries[vector].vector,
2041                          e1000_msix_other, 0, netdev->name, netdev);
2042        if (err)
2043                return err;
2044
2045        e1000_configure_msix(adapter);
2046
2047        return 0;
2048}
2049
2050/**
2051 * e1000_request_irq - initialize interrupts
2052 *
2053 * Attempts to configure interrupts using the best available
2054 * capabilities of the hardware and kernel.
2055 **/
2056static int e1000_request_irq(struct e1000_adapter *adapter)
2057{
2058        struct net_device *netdev = adapter->netdev;
2059        int err;
2060
2061        if (adapter->msix_entries) {
2062                err = e1000_request_msix(adapter);
2063                if (!err)
2064                        return err;
2065                /* fall back to MSI */
2066                e1000e_reset_interrupt_capability(adapter);
2067                adapter->int_mode = E1000E_INT_MODE_MSI;
2068                e1000e_set_interrupt_capability(adapter);
2069        }
2070        if (adapter->flags & FLAG_MSI_ENABLED) {
2071                err = request_irq(adapter->pdev->irq, e1000_intr_msi, 0,
2072                                  netdev->name, netdev);
2073                if (!err)
2074                        return err;
2075
2076                /* fall back to legacy interrupt */
2077                e1000e_reset_interrupt_capability(adapter);
2078                adapter->int_mode = E1000E_INT_MODE_LEGACY;
2079        }
2080
2081        err = request_irq(adapter->pdev->irq, e1000_intr, IRQF_SHARED,
2082                          netdev->name, netdev);
2083        if (err)
2084                e_err("Unable to allocate interrupt, Error: %d\n", err);
2085
2086        return err;
2087}
2088
2089static void e1000_free_irq(struct e1000_adapter *adapter)
2090{
2091        struct net_device *netdev = adapter->netdev;
2092
2093        if (adapter->msix_entries) {
2094                int vector = 0;
2095
2096                free_irq(adapter->msix_entries[vector].vector, netdev);
2097                vector++;
2098
2099                free_irq(adapter->msix_entries[vector].vector, netdev);
2100                vector++;
2101
2102                /* Other Causes interrupt vector */
2103                free_irq(adapter->msix_entries[vector].vector, netdev);
2104                return;
2105        }
2106
2107        free_irq(adapter->pdev->irq, netdev);
2108}
2109
2110/**
2111 * e1000_irq_disable - Mask off interrupt generation on the NIC
2112 **/
2113static void e1000_irq_disable(struct e1000_adapter *adapter)
2114{
2115        struct e1000_hw *hw = &adapter->hw;
2116
2117        ew32(IMC, ~0);
2118        if (adapter->msix_entries)
2119                ew32(EIAC_82574, 0);
2120        e1e_flush();
2121
2122        if (adapter->msix_entries) {
2123                int i;
2124                for (i = 0; i < adapter->num_vectors; i++)
2125                        synchronize_irq(adapter->msix_entries[i].vector);
2126        } else {
2127                synchronize_irq(adapter->pdev->irq);
2128        }
2129}
2130
2131/**
2132 * e1000_irq_enable - Enable default interrupt generation settings
2133 **/
2134static void e1000_irq_enable(struct e1000_adapter *adapter)
2135{
2136        struct e1000_hw *hw = &adapter->hw;
2137
2138        if (adapter->msix_entries) {
2139                ew32(EIAC_82574, adapter->eiac_mask & E1000_EIAC_MASK_82574);
2140                ew32(IMS, adapter->eiac_mask | E1000_IMS_OTHER | E1000_IMS_LSC);
2141        } else if (hw->mac.type == e1000_pch_lpt) {
2142                ew32(IMS, IMS_ENABLE_MASK | E1000_IMS_ECCER);
2143        } else {
2144                ew32(IMS, IMS_ENABLE_MASK);
2145        }
2146        e1e_flush();
2147}
2148
2149/**
2150 * e1000e_get_hw_control - get control of the h/w from f/w
2151 * @adapter: address of board private structure
2152 *
2153 * e1000e_get_hw_control sets {CTRL_EXT|SWSM}:DRV_LOAD bit.
2154 * For ASF and Pass Through versions of f/w this means that
2155 * the driver is loaded. For AMT version (only with 82573)
2156 * of the f/w this means that the network i/f is open.
2157 **/
2158void e1000e_get_hw_control(struct e1000_adapter *adapter)
2159{
2160        struct e1000_hw *hw = &adapter->hw;
2161        u32 ctrl_ext;
2162        u32 swsm;
2163
2164        /* Let firmware know the driver has taken over */
2165        if (adapter->flags & FLAG_HAS_SWSM_ON_LOAD) {
2166                swsm = er32(SWSM);
2167                ew32(SWSM, swsm | E1000_SWSM_DRV_LOAD);
2168        } else if (adapter->flags & FLAG_HAS_CTRLEXT_ON_LOAD) {
2169                ctrl_ext = er32(CTRL_EXT);
2170                ew32(CTRL_EXT, ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
2171        }
2172}
2173
2174/**
2175 * e1000e_release_hw_control - release control of the h/w to f/w
2176 * @adapter: address of board private structure
2177 *
2178 * e1000e_release_hw_control resets {CTRL_EXT|SWSM}:DRV_LOAD bit.
2179 * For ASF and Pass Through versions of f/w this means that the
2180 * driver is no longer loaded. For AMT version (only with 82573) i
2181 * of the f/w this means that the network i/f is closed.
2182 *
2183 **/
2184void e1000e_release_hw_control(struct e1000_adapter *adapter)
2185{
2186        struct e1000_hw *hw = &adapter->hw;
2187        u32 ctrl_ext;
2188        u32 swsm;
2189
2190        /* Let firmware taken over control of h/w */
2191        if (adapter->flags & FLAG_HAS_SWSM_ON_LOAD) {
2192                swsm = er32(SWSM);
2193                ew32(SWSM, swsm & ~E1000_SWSM_DRV_LOAD);
2194        } else if (adapter->flags & FLAG_HAS_CTRLEXT_ON_LOAD) {
2195                ctrl_ext = er32(CTRL_EXT);
2196                ew32(CTRL_EXT, ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
2197        }
2198}
2199
2200/**
2201 * e1000_alloc_ring_dma - allocate memory for a ring structure
2202 **/
2203static int e1000_alloc_ring_dma(struct e1000_adapter *adapter,
2204                                struct e1000_ring *ring)
2205{
2206        struct pci_dev *pdev = adapter->pdev;
2207
2208        ring->desc = dma_alloc_coherent(&pdev->dev, ring->size, &ring->dma,
2209                                        GFP_KERNEL);
2210        if (!ring->desc)
2211                return -ENOMEM;
2212
2213        return 0;
2214}
2215
2216/**
2217 * e1000e_setup_tx_resources - allocate Tx resources (Descriptors)
2218 * @tx_ring: Tx descriptor ring
2219 *
2220 * Return 0 on success, negative on failure
2221 **/
2222int e1000e_setup_tx_resources(struct e1000_ring *tx_ring)
2223{
2224        struct e1000_adapter *adapter = tx_ring->adapter;
2225        int err = -ENOMEM, size;
2226
2227        size = sizeof(struct e1000_buffer) * tx_ring->count;
2228        tx_ring->buffer_info = vzalloc(size);
2229        if (!tx_ring->buffer_info)
2230                goto err;
2231
2232        /* round up to nearest 4K */
2233        tx_ring->size = tx_ring->count * sizeof(struct e1000_tx_desc);
2234        tx_ring->size = ALIGN(tx_ring->size, 4096);
2235
2236        err = e1000_alloc_ring_dma(adapter, tx_ring);
2237        if (err)
2238                goto err;
2239
2240        tx_ring->next_to_use = 0;
2241        tx_ring->next_to_clean = 0;
2242
2243        return 0;
2244err:
2245        vfree(tx_ring->buffer_info);
2246        e_err("Unable to allocate memory for the transmit descriptor ring\n");
2247        return err;
2248}
2249
2250/**
2251 * e1000e_setup_rx_resources - allocate Rx resources (Descriptors)
2252 * @rx_ring: Rx descriptor ring
2253 *
2254 * Returns 0 on success, negative on failure
2255 **/
2256int e1000e_setup_rx_resources(struct e1000_ring *rx_ring)
2257{
2258        struct e1000_adapter *adapter = rx_ring->adapter;
2259        struct e1000_buffer *buffer_info;
2260        int i, size, desc_len, err = -ENOMEM;
2261
2262        size = sizeof(struct e1000_buffer) * rx_ring->count;
2263        rx_ring->buffer_info = vzalloc(size);
2264        if (!rx_ring->buffer_info)
2265                goto err;
2266
2267        for (i = 0; i < rx_ring->count; i++) {
2268                buffer_info = &rx_ring->buffer_info[i];
2269                buffer_info->ps_pages = kcalloc(PS_PAGE_BUFFERS,
2270                                                sizeof(struct e1000_ps_page),
2271                                                GFP_KERNEL);
2272                if (!buffer_info->ps_pages)
2273                        goto err_pages;
2274        }
2275
2276        desc_len = sizeof(union e1000_rx_desc_packet_split);
2277
2278        /* Round up to nearest 4K */
2279        rx_ring->size = rx_ring->count * desc_len;
2280        rx_ring->size = ALIGN(rx_ring->size, 4096);
2281
2282        err = e1000_alloc_ring_dma(adapter, rx_ring);
2283        if (err)
2284                goto err_pages;
2285
2286        rx_ring->next_to_clean = 0;
2287        rx_ring->next_to_use = 0;
2288        rx_ring->rx_skb_top = NULL;
2289
2290        return 0;
2291
2292err_pages:
2293        for (i = 0; i < rx_ring->count; i++) {
2294                buffer_info = &rx_ring->buffer_info[i];
2295                kfree(buffer_info->ps_pages);
2296        }
2297err:
2298        vfree(rx_ring->buffer_info);
2299        e_err("Unable to allocate memory for the receive descriptor ring\n");
2300        return err;
2301}
2302
2303/**
2304 * e1000_clean_tx_ring - Free Tx Buffers
2305 * @tx_ring: Tx descriptor ring
2306 **/
2307static void e1000_clean_tx_ring(struct e1000_ring *tx_ring)
2308{
2309        struct e1000_adapter *adapter = tx_ring->adapter;
2310        struct e1000_buffer *buffer_info;
2311        unsigned long size;
2312        unsigned int i;
2313
2314        for (i = 0; i < tx_ring->count; i++) {
2315                buffer_info = &tx_ring->buffer_info[i];
2316                e1000_put_txbuf(tx_ring, buffer_info);
2317        }
2318
2319        netdev_reset_queue(adapter->netdev);
2320        size = sizeof(struct e1000_buffer) * tx_ring->count;
2321        memset(tx_ring->buffer_info, 0, size);
2322
2323        memset(tx_ring->desc, 0, tx_ring->size);
2324
2325        tx_ring->next_to_use = 0;
2326        tx_ring->next_to_clean = 0;
2327
2328        writel(0, tx_ring->head);
2329        if (tx_ring->adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA)
2330                e1000e_update_tdt_wa(tx_ring, 0);
2331        else
2332                writel(0, tx_ring->tail);
2333}
2334
2335/**
2336 * e1000e_free_tx_resources - Free Tx Resources per Queue
2337 * @tx_ring: Tx descriptor ring
2338 *
2339 * Free all transmit software resources
2340 **/
2341void e1000e_free_tx_resources(struct e1000_ring *tx_ring)
2342{
2343        struct e1000_adapter *adapter = tx_ring->adapter;
2344        struct pci_dev *pdev = adapter->pdev;
2345
2346        e1000_clean_tx_ring(tx_ring);
2347
2348        vfree(tx_ring->buffer_info);
2349        tx_ring->buffer_info = NULL;
2350
2351        dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc,
2352                          tx_ring->dma);
2353        tx_ring->desc = NULL;
2354}
2355
2356/**
2357 * e1000e_free_rx_resources - Free Rx Resources
2358 * @rx_ring: Rx descriptor ring
2359 *
2360 * Free all receive software resources
2361 **/
2362void e1000e_free_rx_resources(struct e1000_ring *rx_ring)
2363{
2364        struct e1000_adapter *adapter = rx_ring->adapter;
2365        struct pci_dev *pdev = adapter->pdev;
2366        int i;
2367
2368        e1000_clean_rx_ring(rx_ring);
2369
2370        for (i = 0; i < rx_ring->count; i++)
2371                kfree(rx_ring->buffer_info[i].ps_pages);
2372
2373        vfree(rx_ring->buffer_info);
2374        rx_ring->buffer_info = NULL;
2375
2376        dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc,
2377                          rx_ring->dma);
2378        rx_ring->desc = NULL;
2379}
2380
2381/**
2382 * e1000_update_itr - update the dynamic ITR value based on statistics
2383 * @adapter: pointer to adapter
2384 * @itr_setting: current adapter->itr
2385 * @packets: the number of packets during this measurement interval
2386 * @bytes: the number of bytes during this measurement interval
2387 *
2388 *      Stores a new ITR value based on packets and byte
2389 *      counts during the last interrupt.  The advantage of per interrupt
2390 *      computation is faster updates and more accurate ITR for the current
2391 *      traffic pattern.  Constants in this function were computed
2392 *      based on theoretical maximum wire speed and thresholds were set based
2393 *      on testing data as well as attempting to minimize response time
2394 *      while increasing bulk throughput.  This functionality is controlled
2395 *      by the InterruptThrottleRate module parameter.
2396 **/
2397static unsigned int e1000_update_itr(struct e1000_adapter *adapter,
2398                                     u16 itr_setting, int packets,
2399                                     int bytes)
2400{
2401        unsigned int retval = itr_setting;
2402
2403        if (packets == 0)
2404                return itr_setting;
2405
2406        switch (itr_setting) {
2407        case lowest_latency:
2408                /* handle TSO and jumbo frames */
2409                if (bytes/packets > 8000)
2410                        retval = bulk_latency;
2411                else if ((packets < 5) && (bytes > 512))
2412                        retval = low_latency;
2413                break;
2414        case low_latency:  /* 50 usec aka 20000 ints/s */
2415                if (bytes > 10000) {
2416                        /* this if handles the TSO accounting */
2417                        if (bytes/packets > 8000)
2418                                retval = bulk_latency;
2419                        else if ((packets < 10) || ((bytes/packets) > 1200))
2420                                retval = bulk_latency;
2421                        else if ((packets > 35))
2422                                retval = lowest_latency;
2423                } else if (bytes/packets > 2000) {
2424                        retval = bulk_latency;
2425                } else if (packets <= 2 && bytes < 512) {
2426                        retval = lowest_latency;
2427                }
2428                break;
2429        case bulk_latency: /* 250 usec aka 4000 ints/s */
2430                if (bytes > 25000) {
2431                        if (packets > 35)
2432                                retval = low_latency;
2433                } else if (bytes < 6000) {
2434                        retval = low_latency;
2435                }
2436                break;
2437        }
2438
2439        return retval;
2440}
2441
2442static void e1000_set_itr(struct e1000_adapter *adapter)
2443{
2444        struct e1000_hw *hw = &adapter->hw;
2445        u16 current_itr;
2446        u32 new_itr = adapter->itr;
2447
2448        /* for non-gigabit speeds, just fix the interrupt rate at 4000 */
2449        if (adapter->link_speed != SPEED_1000) {
2450                current_itr = 0;
2451                new_itr = 4000;
2452                goto set_itr_now;
2453        }
2454
2455        if (adapter->flags2 & FLAG2_DISABLE_AIM) {
2456                new_itr = 0;
2457                goto set_itr_now;
2458        }
2459
2460        adapter->tx_itr = e1000_update_itr(adapter,
2461                                    adapter->tx_itr,
2462                                    adapter->total_tx_packets,
2463                                    adapter->total_tx_bytes);
2464        /* conservative mode (itr 3) eliminates the lowest_latency setting */
2465        if (adapter->itr_setting == 3 && adapter->tx_itr == lowest_latency)
2466                adapter->tx_itr = low_latency;
2467
2468        adapter->rx_itr = e1000_update_itr(adapter,
2469                                    adapter->rx_itr,
2470                                    adapter->total_rx_packets,
2471                                    adapter->total_rx_bytes);
2472        /* conservative mode (itr 3) eliminates the lowest_latency setting */
2473        if (adapter->itr_setting == 3 && adapter->rx_itr == lowest_latency)
2474                adapter->rx_itr = low_latency;
2475
2476        current_itr = max(adapter->rx_itr, adapter->tx_itr);
2477
2478        switch (current_itr) {
2479        /* counts and packets in update_itr are dependent on these numbers */
2480        case lowest_latency:
2481                new_itr = 70000;
2482                break;
2483        case low_latency:
2484                new_itr = 20000; /* aka hwitr = ~200 */
2485                break;
2486        case bulk_latency:
2487                new_itr = 4000;
2488                break;
2489        default:
2490                break;
2491        }
2492
2493set_itr_now:
2494        if (new_itr != adapter->itr) {
2495                /* this attempts to bias the interrupt rate towards Bulk
2496                 * by adding intermediate steps when interrupt rate is
2497                 * increasing
2498                 */
2499                new_itr = new_itr > adapter->itr ?
2500                             min(adapter->itr + (new_itr >> 2), new_itr) :
2501                             new_itr;
2502                adapter->itr = new_itr;
2503                adapter->rx_ring->itr_val = new_itr;
2504                if (adapter->msix_entries)
2505                        adapter->rx_ring->set_itr = 1;
2506                else
2507                        if (new_itr)
2508                                ew32(ITR, 1000000000 / (new_itr * 256));
2509                        else
2510                                ew32(ITR, 0);
2511        }
2512}
2513
2514/**
2515 * e1000e_write_itr - write the ITR value to the appropriate registers
2516 * @adapter: address of board private structure
2517 * @itr: new ITR value to program
2518 *
2519 * e1000e_write_itr determines if the adapter is in MSI-X mode
2520 * and, if so, writes the EITR registers with the ITR value.
2521 * Otherwise, it writes the ITR value into the ITR register.
2522 **/
2523void e1000e_write_itr(struct e1000_adapter *adapter, u32 itr)
2524{
2525        struct e1000_hw *hw = &adapter->hw;
2526        u32 new_itr = itr ? 1000000000 / (itr * 256) : 0;
2527
2528        if (adapter->msix_entries) {
2529                int vector;
2530
2531                for (vector = 0; vector < adapter->num_vectors; vector++)
2532                        writel(new_itr, hw->hw_addr + E1000_EITR_82574(vector));
2533        } else {
2534                ew32(ITR, new_itr);
2535        }
2536}
2537
2538/**
2539 * e1000_alloc_queues - Allocate memory for all rings
2540 * @adapter: board private structure to initialize
2541 **/
2542static int e1000_alloc_queues(struct e1000_adapter *adapter)
2543{
2544        int size = sizeof(struct e1000_ring);
2545
2546        adapter->tx_ring = kzalloc(size, GFP_KERNEL);
2547        if (!adapter->tx_ring)
2548                goto err;
2549        adapter->tx_ring->count = adapter->tx_ring_count;
2550        adapter->tx_ring->adapter = adapter;
2551
2552        adapter->rx_ring = kzalloc(size, GFP_KERNEL);
2553        if (!adapter->rx_ring)
2554                goto err;
2555        adapter->rx_ring->count = adapter->rx_ring_count;
2556        adapter->rx_ring->adapter = adapter;
2557
2558        return 0;
2559err:
2560        e_err("Unable to allocate memory for queues\n");
2561        kfree(adapter->rx_ring);
2562        kfree(adapter->tx_ring);
2563        return -ENOMEM;
2564}
2565
2566/**
2567 * e1000e_poll - NAPI Rx polling callback
2568 * @napi: struct associated with this polling callback
2569 * @weight: number of packets driver is allowed to process this poll
2570 **/
2571static int e1000e_poll(struct napi_struct *napi, int weight)
2572{
2573        struct e1000_adapter *adapter = container_of(napi, struct e1000_adapter,
2574                                                     napi);
2575        struct e1000_hw *hw = &adapter->hw;
2576        struct net_device *poll_dev = adapter->netdev;
2577        int tx_cleaned = 1, work_done = 0;
2578
2579        adapter = netdev_priv(poll_dev);
2580
2581        if (!adapter->msix_entries ||
2582            (adapter->rx_ring->ims_val & adapter->tx_ring->ims_val))
2583                tx_cleaned = e1000_clean_tx_irq(adapter->tx_ring);
2584
2585        adapter->clean_rx(adapter->rx_ring, &work_done, weight);
2586
2587        if (!tx_cleaned)
2588                work_done = weight;
2589
2590        /* If weight not fully consumed, exit the polling mode */
2591        if (work_done < weight) {
2592                if (adapter->itr_setting & 3)
2593                        e1000_set_itr(adapter);
2594                napi_complete(napi);
2595                if (!test_bit(__E1000_DOWN, &adapter->state)) {
2596                        if (adapter->msix_entries)
2597                                ew32(IMS, adapter->rx_ring->ims_val);
2598                        else
2599                                e1000_irq_enable(adapter);
2600                }
2601        }
2602
2603        return work_done;
2604}
2605
2606static int e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
2607{
2608        struct e1000_adapter *adapter = netdev_priv(netdev);
2609        struct e1000_hw *hw = &adapter->hw;
2610        u32 vfta, index;
2611
2612        /* don't update vlan cookie if already programmed */
2613        if ((adapter->hw.mng_cookie.status &
2614             E1000_MNG_DHCP_COOKIE_STATUS_VLAN) &&
2615            (vid == adapter->mng_vlan_id))
2616                return 0;
2617
2618        /* add VID to filter table */
2619        if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) {
2620                index = (vid >> 5) & 0x7F;
2621                vfta = E1000_READ_REG_ARRAY(hw, E1000_VFTA, index);
2622                vfta |= (1 << (vid & 0x1F));
2623                hw->mac.ops.write_vfta(hw, index, vfta);
2624        }
2625
2626        set_bit(vid, adapter->active_vlans);
2627
2628        return 0;
2629}
2630
2631static int e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
2632{
2633        struct e1000_adapter *adapter = netdev_priv(netdev);
2634        struct e1000_hw *hw = &adapter->hw;
2635        u32 vfta, index;
2636
2637        if ((adapter->hw.mng_cookie.status &
2638             E1000_MNG_DHCP_COOKIE_STATUS_VLAN) &&
2639            (vid == adapter->mng_vlan_id)) {
2640                /* release control to f/w */
2641                e1000e_release_hw_control(adapter);
2642                return 0;
2643        }
2644
2645        /* remove VID from filter table */
2646        if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) {
2647                index = (vid >> 5) & 0x7F;
2648                vfta = E1000_READ_REG_ARRAY(hw, E1000_VFTA, index);
2649                vfta &= ~(1 << (vid & 0x1F));
2650                hw->mac.ops.write_vfta(hw, index, vfta);
2651        }
2652
2653        clear_bit(vid, adapter->active_vlans);
2654
2655        return 0;
2656}
2657
2658/**
2659 * e1000e_vlan_filter_disable - helper to disable hw VLAN filtering
2660 * @adapter: board private structure to initialize
2661 **/
2662static void e1000e_vlan_filter_disable(struct e1000_adapter *adapter)
2663{
2664        struct net_device *netdev = adapter->netdev;
2665        struct e1000_hw *hw = &adapter->hw;
2666        u32 rctl;
2667
2668        if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) {
2669                /* disable VLAN receive filtering */
2670                rctl = er32(RCTL);
2671                rctl &= ~(E1000_RCTL_VFE | E1000_RCTL_CFIEN);
2672                ew32(RCTL, rctl);
2673
2674                if (adapter->mng_vlan_id != (u16)E1000_MNG_VLAN_NONE) {
2675                        e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id);
2676                        adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
2677                }
2678        }
2679}
2680
2681/**
2682 * e1000e_vlan_filter_enable - helper to enable HW VLAN filtering
2683 * @adapter: board private structure to initialize
2684 **/
2685static void e1000e_vlan_filter_enable(struct e1000_adapter *adapter)
2686{
2687        struct e1000_hw *hw = &adapter->hw;
2688        u32 rctl;
2689
2690        if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) {
2691                /* enable VLAN receive filtering */
2692                rctl = er32(RCTL);
2693                rctl |= E1000_RCTL_VFE;
2694                rctl &= ~E1000_RCTL_CFIEN;
2695                ew32(RCTL, rctl);
2696        }
2697}
2698
2699/**
2700 * e1000e_vlan_strip_enable - helper to disable HW VLAN stripping
2701 * @adapter: board private structure to initialize
2702 **/
2703static void e1000e_vlan_strip_disable(struct e1000_adapter *adapter)
2704{
2705        struct e1000_hw *hw = &adapter->hw;
2706        u32 ctrl;
2707
2708        /* disable VLAN tag insert/strip */
2709        ctrl = er32(CTRL);
2710        ctrl &= ~E1000_CTRL_VME;
2711        ew32(CTRL, ctrl);
2712}
2713
2714/**
2715 * e1000e_vlan_strip_enable - helper to enable HW VLAN stripping
2716 * @adapter: board private structure to initialize
2717 **/
2718static void e1000e_vlan_strip_enable(struct e1000_adapter *adapter)
2719{
2720        struct e1000_hw *hw = &adapter->hw;
2721        u32 ctrl;
2722
2723        /* enable VLAN tag insert/strip */
2724        ctrl = er32(CTRL);
2725        ctrl |= E1000_CTRL_VME;
2726        ew32(CTRL, ctrl);
2727}
2728
2729static void e1000_update_mng_vlan(struct e1000_adapter *adapter)
2730{
2731        struct net_device *netdev = adapter->netdev;
2732        u16 vid = adapter->hw.mng_cookie.vlan_id;
2733        u16 old_vid = adapter->mng_vlan_id;
2734
2735        if (adapter->hw.mng_cookie.status &
2736            E1000_MNG_DHCP_COOKIE_STATUS_VLAN) {
2737                e1000_vlan_rx_add_vid(netdev, vid);
2738                adapter->mng_vlan_id = vid;
2739        }
2740
2741        if ((old_vid != (u16)E1000_MNG_VLAN_NONE) && (vid != old_vid))
2742                e1000_vlan_rx_kill_vid(netdev, old_vid);
2743}
2744
2745static void e1000_restore_vlan(struct e1000_adapter *adapter)
2746{
2747        u16 vid;
2748
2749        e1000_vlan_rx_add_vid(adapter->netdev, 0);
2750
2751        for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
2752                e1000_vlan_rx_add_vid(adapter->netdev, vid);
2753}
2754
2755static void e1000_init_manageability_pt(struct e1000_adapter *adapter)
2756{
2757        struct e1000_hw *hw = &adapter->hw;
2758        u32 manc, manc2h, mdef, i, j;
2759
2760        if (!(adapter->flags & FLAG_MNG_PT_ENABLED))
2761                return;
2762
2763        manc = er32(MANC);
2764
2765        /* enable receiving management packets to the host. this will probably
2766         * generate destination unreachable messages from the host OS, but
2767         * the packets will be handled on SMBUS
2768         */
2769        manc |= E1000_MANC_EN_MNG2HOST;
2770        manc2h = er32(MANC2H);
2771
2772        switch (hw->mac.type) {
2773        default:
2774                manc2h |= (E1000_MANC2H_PORT_623 | E1000_MANC2H_PORT_664);
2775                break;
2776        case e1000_82574:
2777        case e1000_82583:
2778                /* Check if IPMI pass-through decision filter already exists;
2779                 * if so, enable it.
2780                 */
2781                for (i = 0, j = 0; i < 8; i++) {
2782                        mdef = er32(MDEF(i));
2783
2784                        /* Ignore filters with anything other than IPMI ports */
2785                        if (mdef & ~(E1000_MDEF_PORT_623 | E1000_MDEF_PORT_664))
2786                                continue;
2787
2788                        /* Enable this decision filter in MANC2H */
2789                        if (mdef)
2790                                manc2h |= (1 << i);
2791
2792                        j |= mdef;
2793                }
2794
2795                if (j == (E1000_MDEF_PORT_623 | E1000_MDEF_PORT_664))
2796                        break;
2797
2798                /* Create new decision filter in an empty filter */
2799                for (i = 0, j = 0; i < 8; i++)
2800                        if (er32(MDEF(i)) == 0) {
2801                                ew32(MDEF(i), (E1000_MDEF_PORT_623 |
2802                                               E1000_MDEF_PORT_664));
2803                                manc2h |= (1 << 1);
2804                                j++;
2805                                break;
2806                        }
2807
2808                if (!j)
2809                        e_warn("Unable to create IPMI pass-through filter\n");
2810                break;
2811        }
2812
2813        ew32(MANC2H, manc2h);
2814        ew32(MANC, manc);
2815}
2816
2817/**
2818 * e1000_configure_tx - Configure Transmit Unit after Reset
2819 * @adapter: board private structure
2820 *
2821 * Configure the Tx unit of the MAC after a reset.
2822 **/
2823static void e1000_configure_tx(struct e1000_adapter *adapter)
2824{
2825        struct e1000_hw *hw = &adapter->hw;
2826        struct e1000_ring *tx_ring = adapter->tx_ring;
2827        u64 tdba;
2828        u32 tdlen, tarc;
2829
2830        /* Setup the HW Tx Head and Tail descriptor pointers */
2831        tdba = tx_ring->dma;
2832        tdlen = tx_ring->count * sizeof(struct e1000_tx_desc);
2833        ew32(TDBAL(0), (tdba & DMA_BIT_MASK(32)));
2834        ew32(TDBAH(0), (tdba >> 32));
2835        ew32(TDLEN(0), tdlen);
2836        ew32(TDH(0), 0);
2837        ew32(TDT(0), 0);
2838        tx_ring->head = adapter->hw.hw_addr + E1000_TDH(0);
2839        tx_ring->tail = adapter->hw.hw_addr + E1000_TDT(0);
2840
2841        /* Set the Tx Interrupt Delay register */
2842        ew32(TIDV, adapter->tx_int_delay);
2843        /* Tx irq moderation */
2844        ew32(TADV, adapter->tx_abs_int_delay);
2845
2846        if (adapter->flags2 & FLAG2_DMA_BURST) {
2847                u32 txdctl = er32(TXDCTL(0));
2848                txdctl &= ~(E1000_TXDCTL_PTHRESH | E1000_TXDCTL_HTHRESH |
2849                            E1000_TXDCTL_WTHRESH);
2850                /* set up some performance related parameters to encourage the
2851                 * hardware to use the bus more efficiently in bursts, depends
2852                 * on the tx_int_delay to be enabled,
2853                 * wthresh = 1 ==> burst write is disabled to avoid Tx stalls
2854                 * hthresh = 1 ==> prefetch when one or more available
2855                 * pthresh = 0x1f ==> prefetch if internal cache 31 or less
2856                 * BEWARE: this seems to work but should be considered first if
2857                 * there are Tx hangs or other Tx related bugs
2858                 */
2859                txdctl |= E1000_TXDCTL_DMA_BURST_ENABLE;
2860                ew32(TXDCTL(0), txdctl);
2861        }
2862        /* erratum work around: set txdctl the same for both queues */
2863        ew32(TXDCTL(1), er32(TXDCTL(0)));
2864
2865        if (adapter->flags & FLAG_TARC_SPEED_MODE_BIT) {
2866                tarc = er32(TARC(0));
2867                /* set the speed mode bit, we'll clear it if we're not at
2868                 * gigabit link later
2869                 */
2870#define SPEED_MODE_BIT (1 << 21)
2871                tarc |= SPEED_MODE_BIT;
2872                ew32(TARC(0), tarc);
2873        }
2874
2875        /* errata: program both queues to unweighted RR */
2876        if (adapter->flags & FLAG_TARC_SET_BIT_ZERO) {
2877                tarc = er32(TARC(0));
2878                tarc |= 1;
2879                ew32(TARC(0), tarc);
2880                tarc = er32(TARC(1));
2881                tarc |= 1;
2882                ew32(TARC(1), tarc);
2883        }
2884
2885        /* Setup Transmit Descriptor Settings for eop descriptor */
2886        adapter->txd_cmd = E1000_TXD_CMD_EOP | E1000_TXD_CMD_IFCS;
2887
2888        /* only set IDE if we are delaying interrupts using the timers */
2889        if (adapter->tx_int_delay)
2890                adapter->txd_cmd |= E1000_TXD_CMD_IDE;
2891
2892        /* enable Report Status bit */
2893        adapter->txd_cmd |= E1000_TXD_CMD_RS;
2894
2895        hw->mac.ops.config_collision_dist(hw);
2896}
2897
2898/**
2899 * e1000_setup_rctl - configure the receive control registers
2900 * @adapter: Board private structure
2901 **/
2902#define PAGE_USE_COUNT(S) (((S) >> PAGE_SHIFT) + \
2903                           (((S) & (PAGE_SIZE - 1)) ? 1 : 0))
2904static void e1000_setup_rctl(struct e1000_adapter *adapter)
2905{
2906        struct e1000_hw *hw = &adapter->hw;
2907        u32 rctl, rfctl;
2908        u32 pages = 0;
2909
2910        /* Workaround Si errata on PCHx - configure jumbo frame flow */
2911        if (hw->mac.type >= e1000_pch2lan) {
2912                s32 ret_val;
2913
2914                if (adapter->netdev->mtu > ETH_DATA_LEN)
2915                        ret_val = e1000_lv_jumbo_workaround_ich8lan(hw, true);
2916                else
2917                        ret_val = e1000_lv_jumbo_workaround_ich8lan(hw, false);
2918
2919                if (ret_val)
2920                        e_dbg("failed to enable jumbo frame workaround mode\n");
2921        }
2922
2923        /* Program MC offset vector base */
2924        rctl = er32(RCTL);
2925        rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
2926        rctl |= E1000_RCTL_EN | E1000_RCTL_BAM |
2927                E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF |
2928                (adapter->hw.mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
2929
2930        /* Do not Store bad packets */
2931        rctl &= ~E1000_RCTL_SBP;
2932
2933        /* Enable Long Packet receive */
2934        if (adapter->netdev->mtu <= ETH_DATA_LEN)
2935                rctl &= ~E1000_RCTL_LPE;
2936        else
2937                rctl |= E1000_RCTL_LPE;
2938
2939        /* Some systems expect that the CRC is included in SMBUS traffic. The
2940         * hardware strips the CRC before sending to both SMBUS (BMC) and to
2941         * host memory when this is enabled
2942         */
2943        if (adapter->flags2 & FLAG2_CRC_STRIPPING)
2944                rctl |= E1000_RCTL_SECRC;
2945
2946        /* Workaround Si errata on 82577 PHY - configure IPG for jumbos */
2947        if ((hw->phy.type == e1000_phy_82577) && (rctl & E1000_RCTL_LPE)) {
2948                u16 phy_data;
2949
2950                e1e_rphy(hw, PHY_REG(770, 26), &phy_data);
2951                phy_data &= 0xfff8;
2952                phy_data |= (1 << 2);
2953                e1e_wphy(hw, PHY_REG(770, 26), phy_data);
2954
2955                e1e_rphy(hw, 22, &phy_data);
2956                phy_data &= 0x0fff;
2957                phy_data |= (1 << 14);
2958                e1e_wphy(hw, 0x10, 0x2823);
2959                e1e_wphy(hw, 0x11, 0x0003);
2960                e1e_wphy(hw, 22, phy_data);
2961        }
2962
2963        /* Setup buffer sizes */
2964        rctl &= ~E1000_RCTL_SZ_4096;
2965        rctl |= E1000_RCTL_BSEX;
2966        switch (adapter->rx_buffer_len) {
2967        case 2048:
2968        default:
2969                rctl |= E1000_RCTL_SZ_2048;
2970                rctl &= ~E1000_RCTL_BSEX;
2971                break;
2972        case 4096:
2973                rctl |= E1000_RCTL_SZ_4096;
2974                break;
2975        case 8192:
2976                rctl |= E1000_RCTL_SZ_8192;
2977                break;
2978        case 16384:
2979                rctl |= E1000_RCTL_SZ_16384;
2980                break;
2981        }
2982
2983        /* Enable Extended Status in all Receive Descriptors */
2984        rfctl = er32(RFCTL);
2985        rfctl |= E1000_RFCTL_EXTEN;
2986        ew32(RFCTL, rfctl);
2987
2988        /* 82571 and greater support packet-split where the protocol
2989         * header is placed in skb->data and the packet data is
2990         * placed in pages hanging off of skb_shinfo(skb)->nr_frags.
2991         * In the case of a non-split, skb->data is linearly filled,
2992         * followed by the page buffers.  Therefore, skb->data is
2993         * sized to hold the largest protocol header.
2994         *
2995         * allocations using alloc_page take too long for regular MTU
2996         * so only enable packet split for jumbo frames
2997         *
2998         * Using pages when the page size is greater than 16k wastes
2999         * a lot of memory, since we allocate 3 pages at all times
3000         * per packet.
3001         */
3002        pages = PAGE_USE_COUNT(adapter->netdev->mtu);
3003        if ((pages <= 3) && (PAGE_SIZE <= 16384) && (rctl & E1000_RCTL_LPE))
3004                adapter->rx_ps_pages = pages;
3005        else
3006                adapter->rx_ps_pages = 0;
3007
3008        if (adapter->rx_ps_pages) {
3009                u32 psrctl = 0;
3010
3011                /* Enable Packet split descriptors */
3012                rctl |= E1000_RCTL_DTYP_PS;
3013
3014                psrctl |= adapter->rx_ps_bsize0 >>
3015                        E1000_PSRCTL_BSIZE0_SHIFT;
3016
3017                switch (adapter->rx_ps_pages) {
3018                case 3:
3019                        psrctl |= PAGE_SIZE <<
3020                                E1000_PSRCTL_BSIZE3_SHIFT;
3021                case 2:
3022                        psrctl |= PAGE_SIZE <<
3023                                E1000_PSRCTL_BSIZE2_SHIFT;
3024                case 1:
3025                        psrctl |= PAGE_SIZE >>
3026                                E1000_PSRCTL_BSIZE1_SHIFT;
3027                        break;
3028                }
3029
3030                ew32(PSRCTL, psrctl);
3031        }
3032
3033        /* This is useful for sniffing bad packets. */
3034        if (adapter->netdev->features & NETIF_F_RXALL) {
3035                /* UPE and MPE will be handled by normal PROMISC logic
3036                 * in e1000e_set_rx_mode
3037                 */
3038                rctl |= (E1000_RCTL_SBP | /* Receive bad packets */
3039                         E1000_RCTL_BAM | /* RX All Bcast Pkts */
3040                         E1000_RCTL_PMCF); /* RX All MAC Ctrl Pkts */
3041
3042                rctl &= ~(E1000_RCTL_VFE | /* Disable VLAN filter */
3043                          E1000_RCTL_DPF | /* Allow filtered pause */
3044                          E1000_RCTL_CFIEN); /* Dis VLAN CFIEN Filter */
3045                /* Do not mess with E1000_CTRL_VME, it affects transmit as well,
3046                 * and that breaks VLANs.
3047                 */
3048        }
3049
3050        ew32(RCTL, rctl);
3051        /* just started the receive unit, no need to restart */
3052        adapter->flags &= ~FLAG_RX_RESTART_NOW;
3053}
3054
3055/**
3056 * e1000_configure_rx - Configure Receive Unit after Reset
3057 * @adapter: board private structure
3058 *
3059 * Configure the Rx unit of the MAC after a reset.
3060 **/
3061static void e1000_configure_rx(struct e1000_adapter *adapter)
3062{
3063        struct e1000_hw *hw = &adapter->hw;
3064        struct e1000_ring *rx_ring = adapter->rx_ring;
3065        u64 rdba;
3066        u32 rdlen, rctl, rxcsum, ctrl_ext;
3067
3068        if (adapter->rx_ps_pages) {
3069                /* this is a 32 byte descriptor */
3070                rdlen = rx_ring->count *
3071                    sizeof(union e1000_rx_desc_packet_split);
3072                adapter->clean_rx = e1000_clean_rx_irq_ps;
3073                adapter->alloc_rx_buf = e1000_alloc_rx_buffers_ps;
3074        } else if (adapter->netdev->mtu > ETH_FRAME_LEN + ETH_FCS_LEN) {
3075                rdlen = rx_ring->count * sizeof(union e1000_rx_desc_extended);
3076                adapter->clean_rx = e1000_clean_jumbo_rx_irq;
3077                adapter->alloc_rx_buf = e1000_alloc_jumbo_rx_buffers;
3078        } else {
3079                rdlen = rx_ring->count * sizeof(union e1000_rx_desc_extended);
3080                adapter->clean_rx = e1000_clean_rx_irq;
3081                adapter->alloc_rx_buf = e1000_alloc_rx_buffers;
3082        }
3083
3084        /* disable receives while setting up the descriptors */
3085        rctl = er32(RCTL);
3086        if (!(adapter->flags2 & FLAG2_NO_DISABLE_RX))
3087                ew32(RCTL, rctl & ~E1000_RCTL_EN);
3088        e1e_flush();
3089        usleep_range(10000, 20000);
3090
3091        if (adapter->flags2 & FLAG2_DMA_BURST) {
3092                /* set the writeback threshold (only takes effect if the RDTR
3093                 * is set). set GRAN=1 and write back up to 0x4 worth, and
3094                 * enable prefetching of 0x20 Rx descriptors
3095                 * granularity = 01
3096                 * wthresh = 04,
3097                 * hthresh = 04,
3098                 * pthresh = 0x20
3099                 */
3100                ew32(RXDCTL(0), E1000_RXDCTL_DMA_BURST_ENABLE);
3101                ew32(RXDCTL(1), E1000_RXDCTL_DMA_BURST_ENABLE);
3102
3103                /* override the delay timers for enabling bursting, only if
3104                 * the value was not set by the user via module options
3105                 */
3106                if (adapter->rx_int_delay == DEFAULT_RDTR)
3107                        adapter->rx_int_delay = BURST_RDTR;
3108                if (adapter->rx_abs_int_delay == DEFAULT_RADV)
3109                        adapter->rx_abs_int_delay = BURST_RADV;
3110        }
3111
3112        /* set the Receive Delay Timer Register */
3113        ew32(RDTR, adapter->rx_int_delay);
3114
3115        /* irq moderation */
3116        ew32(RADV, adapter->rx_abs_int_delay);
3117        if ((adapter->itr_setting != 0) && (adapter->itr != 0))
3118                e1000e_write_itr(adapter, adapter->itr);
3119
3120        ctrl_ext = er32(CTRL_EXT);
3121        /* Auto-Mask interrupts upon ICR access */
3122        ctrl_ext |= E1000_CTRL_EXT_IAME;
3123        ew32(IAM, 0xffffffff);
3124        ew32(CTRL_EXT, ctrl_ext);
3125        e1e_flush();
3126
3127        /* Setup the HW Rx Head and Tail Descriptor Pointers and
3128         * the Base and Length of the Rx Descriptor Ring
3129         */
3130        rdba = rx_ring->dma;
3131        ew32(RDBAL(0), (rdba & DMA_BIT_MASK(32)));
3132        ew32(RDBAH(0), (rdba >> 32));
3133        ew32(RDLEN(0), rdlen);
3134        ew32(RDH(0), 0);
3135        ew32(RDT(0), 0);
3136        rx_ring->head = adapter->hw.hw_addr + E1000_RDH(0);
3137        rx_ring->tail = adapter->hw.hw_addr + E1000_RDT(0);
3138
3139        /* Enable Receive Checksum Offload for TCP and UDP */
3140        rxcsum = er32(RXCSUM);
3141        if (adapter->netdev->features & NETIF_F_RXCSUM)
3142                rxcsum |= E1000_RXCSUM_TUOFL;
3143        else
3144                rxcsum &= ~E1000_RXCSUM_TUOFL;
3145        ew32(RXCSUM, rxcsum);
3146
3147        if (adapter->hw.mac.type == e1000_pch2lan) {
3148                /* With jumbo frames, excessive C-state transition
3149                 * latencies result in dropped transactions.
3150                 */
3151                if (adapter->netdev->mtu > ETH_DATA_LEN) {
3152                        u32 rxdctl = er32(RXDCTL(0));
3153                        ew32(RXDCTL(0), rxdctl | 0x3);
3154                        pm_qos_update_request(&adapter->netdev->pm_qos_req, 55);
3155                } else {
3156                        pm_qos_update_request(&adapter->netdev->pm_qos_req,
3157                                              PM_QOS_DEFAULT_VALUE);
3158                }
3159        }
3160
3161        /* Enable Receives */
3162        ew32(RCTL, rctl);
3163}
3164
3165/**
3166 * e1000e_write_mc_addr_list - write multicast addresses to MTA
3167 * @netdev: network interface device structure
3168 *
3169 * Writes multicast address list to the MTA hash table.
3170 * Returns: -ENOMEM on failure
3171 *                0 on no addresses written
3172 *                X on writing X addresses to MTA
3173 */
3174static int e1000e_write_mc_addr_list(struct net_device *netdev)
3175{
3176        struct e1000_adapter *adapter = netdev_priv(netdev);
3177        struct e1000_hw *hw = &adapter->hw;
3178        struct netdev_hw_addr *ha;
3179        u8 *mta_list;
3180        int i;
3181
3182        if (netdev_mc_empty(netdev)) {
3183                /* nothing to program, so clear mc list */
3184                hw->mac.ops.update_mc_addr_list(hw, NULL, 0);
3185                return 0;
3186        }
3187
3188        mta_list = kzalloc(netdev_mc_count(netdev) * ETH_ALEN, GFP_ATOMIC);
3189        if (!mta_list)
3190                return -ENOMEM;
3191
3192        /* update_mc_addr_list expects a packed array of only addresses. */
3193        i = 0;
3194        netdev_for_each_mc_addr(ha, netdev)
3195                memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN);
3196
3197        hw->mac.ops.update_mc_addr_list(hw, mta_list, i);
3198        kfree(mta_list);
3199
3200        return netdev_mc_count(netdev);
3201}
3202
3203/**
3204 * e1000e_write_uc_addr_list - write unicast addresses to RAR table
3205 * @netdev: network interface device structure
3206 *
3207 * Writes unicast address list to the RAR table.
3208 * Returns: -ENOMEM on failure/insufficient address space
3209 *                0 on no addresses written
3210 *                X on writing X addresses to the RAR table
3211 **/
3212static int e1000e_write_uc_addr_list(struct net_device *netdev)
3213{
3214        struct e1000_adapter *adapter = netdev_priv(netdev);
3215        struct e1000_hw *hw = &adapter->hw;
3216        unsigned int rar_entries = hw->mac.rar_entry_count;
3217        int count = 0;
3218
3219        /* save a rar entry for our hardware address */
3220        rar_entries--;
3221
3222        /* save a rar entry for the LAA workaround */
3223        if (adapter->flags & FLAG_RESET_OVERWRITES_LAA)
3224                rar_entries--;
3225
3226        /* return ENOMEM indicating insufficient memory for addresses */
3227        if (netdev_uc_count(netdev) > rar_entries)
3228                return -ENOMEM;
3229
3230        if (!netdev_uc_empty(netdev) && rar_entries) {
3231                struct netdev_hw_addr *ha;
3232
3233                /* write the addresses in reverse order to avoid write
3234                 * combining
3235                 */
3236                netdev_for_each_uc_addr(ha, netdev) {
3237                        if (!rar_entries)
3238                                break;
3239                        hw->mac.ops.rar_set(hw, ha->addr, rar_entries--);
3240                        count++;
3241                }
3242        }
3243
3244        /* zero out the remaining RAR entries not used above */
3245        for (; rar_entries > 0; rar_entries--) {
3246                ew32(RAH(rar_entries), 0);
3247                ew32(RAL(rar_entries), 0);
3248        }
3249        e1e_flush();
3250
3251        return count;
3252}
3253
3254/**
3255 * e1000e_set_rx_mode - secondary unicast, Multicast and Promiscuous mode set
3256 * @netdev: network interface device structure
3257 *
3258 * The ndo_set_rx_mode entry point is called whenever the unicast or multicast
3259 * address list or the network interface flags are updated.  This routine is
3260 * responsible for configuring the hardware for proper unicast, multicast,
3261 * promiscuous mode, and all-multi behavior.
3262 **/
3263static void e1000e_set_rx_mode(struct net_device *netdev)
3264{
3265        struct e1000_adapter *adapter = netdev_priv(netdev);
3266        struct e1000_hw *hw = &adapter->hw;
3267        u32 rctl;
3268
3269        /* Check for Promiscuous and All Multicast modes */
3270        rctl = er32(RCTL);
3271
3272        /* clear the affected bits */
3273        rctl &= ~(E1000_RCTL_UPE | E1000_RCTL_MPE);
3274
3275        if (netdev->flags & IFF_PROMISC) {
3276                rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
3277                /* Do not hardware filter VLANs in promisc mode */
3278                e1000e_vlan_filter_disable(adapter);
3279        } else {
3280                int count;
3281
3282                if (netdev->flags & IFF_ALLMULTI) {
3283                        rctl |= E1000_RCTL_MPE;
3284                } else {
3285                        /* Write addresses to the MTA, if the attempt fails
3286                         * then we should just turn on promiscuous mode so
3287                         * that we can at least receive multicast traffic
3288                         */
3289                        count = e1000e_write_mc_addr_list(netdev);
3290                        if (count < 0)
3291                                rctl |= E1000_RCTL_MPE;
3292                }
3293                e1000e_vlan_filter_enable(adapter);
3294                /* Write addresses to available RAR registers, if there is not
3295                 * sufficient space to store all the addresses then enable
3296                 * unicast promiscuous mode
3297                 */
3298                count = e1000e_write_uc_addr_list(netdev);
3299                if (count < 0)
3300                        rctl |= E1000_RCTL_UPE;
3301        }
3302
3303        ew32(RCTL, rctl);
3304
3305        if (netdev->features & NETIF_F_HW_VLAN_RX)
3306                e1000e_vlan_strip_enable(adapter);
3307        else
3308                e1000e_vlan_strip_disable(adapter);
3309}
3310
3311static void e1000e_setup_rss_hash(struct e1000_adapter *adapter)
3312{
3313        struct e1000_hw *hw = &adapter->hw;
3314        u32 mrqc, rxcsum;
3315        int i;
3316        static const u32 rsskey[10] = {
3317                0xda565a6d, 0xc20e5b25, 0x3d256741, 0xb08fa343, 0xcb2bcad0,
3318                0xb4307bae, 0xa32dcb77, 0x0cf23080, 0x3bb7426a, 0xfa01acbe
3319        };
3320
3321        /* Fill out hash function seed */
3322        for (i = 0; i < 10; i++)
3323                ew32(RSSRK(i), rsskey[i]);
3324
3325        /* Direct all traffic to queue 0 */
3326        for (i = 0; i < 32; i++)
3327                ew32(RETA(i), 0);
3328
3329        /* Disable raw packet checksumming so that RSS hash is placed in
3330         * descriptor on writeback.
3331         */
3332        rxcsum = er32(RXCSUM);
3333        rxcsum |= E1000_RXCSUM_PCSD;
3334
3335        ew32(RXCSUM, rxcsum);
3336
3337        mrqc = (E1000_MRQC_RSS_FIELD_IPV4 |
3338                E1000_MRQC_RSS_FIELD_IPV4_TCP |
3339                E1000_MRQC_RSS_FIELD_IPV6 |
3340                E1000_MRQC_RSS_FIELD_IPV6_TCP |
3341                E1000_MRQC_RSS_FIELD_IPV6_TCP_EX);
3342
3343        ew32(MRQC, mrqc);
3344}
3345
3346/**
3347 * e1000_configure - configure the hardware for Rx and Tx
3348 * @adapter: private board structure
3349 **/
3350static void e1000_configure(struct e1000_adapter *adapter)
3351{
3352        struct e1000_ring *rx_ring = adapter->rx_ring;
3353
3354        e1000e_set_rx_mode(adapter->netdev);
3355
3356        e1000_restore_vlan(adapter);
3357        e1000_init_manageability_pt(adapter);
3358
3359        e1000_configure_tx(adapter);
3360
3361        if (adapter->netdev->features & NETIF_F_RXHASH)
3362                e1000e_setup_rss_hash(adapter);
3363        e1000_setup_rctl(adapter);
3364        e1000_configure_rx(adapter);
3365        adapter->alloc_rx_buf(rx_ring, e1000_desc_unused(rx_ring), GFP_KERNEL);
3366}
3367
3368/**
3369 * e1000e_power_up_phy - restore link in case the phy was powered down
3370 * @adapter: address of board private structure
3371 *
3372 * The phy may be powered down to save power and turn off link when the
3373 * driver is unloaded and wake on lan is not enabled (among others)
3374 * *** this routine MUST be followed by a call to e1000e_reset ***
3375 **/
3376void e1000e_power_up_phy(struct e1000_adapter *adapter)
3377{
3378        if (adapter->hw.phy.ops.power_up)
3379                adapter->hw.phy.ops.power_up(&adapter->hw);
3380
3381        adapter->hw.mac.ops.setup_link(&adapter->hw);
3382}
3383
3384/**
3385 * e1000_power_down_phy - Power down the PHY
3386 *
3387 * Power down the PHY so no link is implied when interface is down.
3388 * The PHY cannot be powered down if management or WoL is active.
3389 */
3390static void e1000_power_down_phy(struct e1000_adapter *adapter)
3391{
3392        /* WoL is enabled */
3393        if (adapter->wol)
3394                return;
3395
3396        if (adapter->hw.phy.ops.power_down)
3397                adapter->hw.phy.ops.power_down(&adapter->hw);
3398}
3399
3400/**
3401 * e1000e_reset - bring the hardware into a known good state
3402 *
3403 * This function boots the hardware and enables some settings that
3404 * require a configuration cycle of the hardware - those cannot be
3405 * set/changed during runtime. After reset the device needs to be
3406 * properly configured for Rx, Tx etc.
3407 */
3408void e1000e_reset(struct e1000_adapter *adapter)
3409{
3410        struct e1000_mac_info *mac = &adapter->hw.mac;
3411        struct e1000_fc_info *fc = &adapter->hw.fc;
3412        struct e1000_hw *hw = &adapter->hw;
3413        u32 tx_space, min_tx_space, min_rx_space;
3414        u32 pba = adapter->pba;
3415        u16 hwm;
3416
3417        /* reset Packet Buffer Allocation to default */
3418        ew32(PBA, pba);
3419
3420        if (adapter->max_frame_size > ETH_FRAME_LEN + ETH_FCS_LEN) {
3421                /* To maintain wire speed transmits, the Tx FIFO should be
3422                 * large enough to accommodate two full transmit packets,
3423                 * rounded up to the next 1KB and expressed in KB.  Likewise,
3424                 * the Rx FIFO should be large enough to accommodate at least
3425                 * one full receive packet and is similarly rounded up and
3426                 * expressed in KB.
3427                 */
3428                pba = er32(PBA);
3429                /* upper 16 bits has Tx packet buffer allocation size in KB */
3430                tx_space = pba >> 16;
3431                /* lower 16 bits has Rx packet buffer allocation size in KB */
3432                pba &= 0xffff;
3433                /* the Tx fifo also stores 16 bytes of information about the Tx
3434                 * but don't include ethernet FCS because hardware appends it
3435                 */
3436                min_tx_space = (adapter->max_frame_size +
3437                                sizeof(struct e1000_tx_desc) -
3438                                ETH_FCS_LEN) * 2;
3439                min_tx_space = ALIGN(min_tx_space, 1024);
3440                min_tx_space >>= 10;
3441                /* software strips receive CRC, so leave room for it */
3442                min_rx_space = adapter->max_frame_size;
3443                min_rx_space = ALIGN(min_rx_space, 1024);
3444                min_rx_space >>= 10;
3445
3446                /* If current Tx allocation is less than the min Tx FIFO size,
3447                 * and the min Tx FIFO size is less than the current Rx FIFO
3448                 * allocation, take space away from current Rx allocation
3449                 */
3450                if ((tx_space < min_tx_space) &&
3451                    ((min_tx_space - tx_space) < pba)) {
3452                        pba -= min_tx_space - tx_space;
3453
3454                        /* if short on Rx space, Rx wins and must trump Tx
3455                         * adjustment
3456                         */
3457                        if (pba < min_rx_space)
3458                                pba = min_rx_space;
3459                }
3460
3461                ew32(PBA, pba);
3462        }
3463
3464        /* flow control settings
3465         *
3466         * The high water mark must be low enough to fit one full frame
3467         * (or the size used for early receive) above it in the Rx FIFO.
3468         * Set it to the lower of:
3469         * - 90% of the Rx FIFO size, and
3470         * - the full Rx FIFO size minus one full frame
3471         */
3472        if (adapter->flags & FLAG_DISABLE_FC_PAUSE_TIME)
3473                fc->pause_time = 0xFFFF;
3474        else
3475                fc->pause_time = E1000_FC_PAUSE_TIME;
3476        fc->send_xon = true;
3477        fc->current_mode = fc->requested_mode;
3478
3479        switch (hw->mac.type) {
3480        case e1000_ich9lan:
3481        case e1000_ich10lan:
3482                if (adapter->netdev->mtu > ETH_DATA_LEN) {
3483                        pba = 14;
3484                        ew32(PBA, pba);
3485                        fc->high_water = 0x2800;
3486                        fc->low_water = fc->high_water - 8;
3487                        break;
3488                }
3489                /* fall-through */
3490        default:
3491                hwm = min(((pba << 10) * 9 / 10),
3492                          ((pba << 10) - adapter->max_frame_size));
3493
3494                fc->high_water = hwm & E1000_FCRTH_RTH; /* 8-byte granularity */
3495                fc->low_water = fc->high_water - 8;
3496                break;
3497        case e1000_pchlan:
3498                /* Workaround PCH LOM adapter hangs with certain network
3499                 * loads.  If hangs persist, try disabling Tx flow control.
3500                 */
3501                if (adapter->netdev->mtu > ETH_DATA_LEN) {
3502                        fc->high_water = 0x3500;
3503                        fc->low_water  = 0x1500;
3504                } else {
3505                        fc->high_water = 0x5000;
3506                        fc->low_water  = 0x3000;
3507                }
3508                fc->refresh_time = 0x1000;
3509                break;
3510        case e1000_pch2lan:
3511        case e1000_pch_lpt:
3512                fc->high_water = 0x05C20;
3513                fc->low_water = 0x05048;
3514                fc->pause_time = 0x0650;
3515                fc->refresh_time = 0x0400;
3516                if (adapter->netdev->mtu > ETH_DATA_LEN) {
3517                        pba = 14;
3518                        ew32(PBA, pba);
3519                }
3520                break;
3521        }
3522
3523        /* Alignment of Tx data is on an arbitrary byte boundary with the
3524         * maximum size per Tx descriptor limited only to the transmit
3525         * allocation of the packet buffer minus 96 bytes with an upper
3526         * limit of 24KB due to receive synchronization limitations.
3527         */
3528        adapter->tx_fifo_limit = min_t(u32, ((er32(PBA) >> 16) << 10) - 96,
3529                                       24 << 10);
3530
3531        /* Disable Adaptive Interrupt Moderation if 2 full packets cannot
3532         * fit in receive buffer.
3533         */
3534        if (adapter->itr_setting & 0x3) {
3535                if ((adapter->max_frame_size * 2) > (pba << 10)) {
3536                        if (!(adapter->flags2 & FLAG2_DISABLE_AIM)) {
3537                                dev_info(&adapter->pdev->dev,
3538                                        "Interrupt Throttle Rate turned off\n");
3539                                adapter->flags2 |= FLAG2_DISABLE_AIM;
3540                                e1000e_write_itr(adapter, 0);
3541                        }
3542                } else if (adapter->flags2 & FLAG2_DISABLE_AIM) {
3543                        dev_info(&adapter->pdev->dev,
3544                                 "Interrupt Throttle Rate turned on\n");
3545                        adapter->flags2 &= ~FLAG2_DISABLE_AIM;
3546                        adapter->itr = 20000;
3547                        e1000e_write_itr(adapter, adapter->itr);
3548                }
3549        }
3550
3551        /* Allow time for pending master requests to run */
3552        mac->ops.reset_hw(hw);
3553
3554        /* For parts with AMT enabled, let the firmware know
3555         * that the network interface is in control
3556         */
3557        if (adapter->flags & FLAG_HAS_AMT)
3558                e1000e_get_hw_control(adapter);
3559
3560        ew32(WUC, 0);
3561
3562        if (mac->ops.init_hw(hw))
3563                e_err("Hardware Error\n");
3564
3565        e1000_update_mng_vlan(adapter);
3566
3567        /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */
3568        ew32(VET, ETH_P_8021Q);
3569
3570        e1000e_reset_adaptive(hw);
3571
3572        if (!netif_running(adapter->netdev) &&
3573            !test_bit(__E1000_TESTING, &adapter->state)) {
3574                e1000_power_down_phy(adapter);
3575                return;
3576        }
3577
3578        e1000_get_phy_info(hw);
3579
3580        if ((adapter->flags & FLAG_HAS_SMART_POWER_DOWN) &&
3581            !(adapter->flags & FLAG_SMART_POWER_DOWN)) {
3582                u16 phy_data = 0;
3583                /* speed up time to link by disabling smart power down, ignore
3584                 * the return value of this function because there is nothing
3585                 * different we would do if it failed
3586                 */
3587                e1e_rphy(hw, IGP02E1000_PHY_POWER_MGMT, &phy_data);
3588                phy_data &= ~IGP02E1000_PM_SPD;
3589                e1e_wphy(hw, IGP02E1000_PHY_POWER_MGMT, phy_data);
3590        }
3591}
3592
3593int e1000e_up(struct e1000_adapter *adapter)
3594{
3595        struct e1000_hw *hw = &adapter->hw;
3596
3597        /* hardware has been reset, we need to reload some things */
3598        e1000_configure(adapter);
3599
3600        clear_bit(__E1000_DOWN, &adapter->state);
3601
3602        if (adapter->msix_entries)
3603                e1000_configure_msix(adapter);
3604        e1000_irq_enable(adapter);
3605
3606        netif_start_queue(adapter->netdev);
3607
3608        /* fire a link change interrupt to start the watchdog */
3609        if (adapter->msix_entries)
3610                ew32(ICS, E1000_ICS_LSC | E1000_ICR_OTHER);
3611        else
3612                ew32(ICS, E1000_ICS_LSC);
3613
3614        return 0;
3615}
3616
3617static void e1000e_flush_descriptors(struct e1000_adapter *adapter)
3618{
3619        struct e1000_hw *hw = &adapter->hw;
3620
3621        if (!(adapter->flags2 & FLAG2_DMA_BURST))
3622                return;
3623
3624        /* flush pending descriptor writebacks to memory */
3625        ew32(TIDV, adapter->tx_int_delay | E1000_TIDV_FPD);
3626        ew32(RDTR, adapter->rx_int_delay | E1000_RDTR_FPD);
3627
3628        /* execute the writes immediately */
3629        e1e_flush();
3630
3631        /* due to rare timing issues, write to TIDV/RDTR again to ensure the
3632         * write is successful
3633         */
3634        ew32(TIDV, adapter->tx_int_delay | E1000_TIDV_FPD);
3635        ew32(RDTR, adapter->rx_int_delay | E1000_RDTR_FPD);
3636
3637        /* execute the writes immediately */
3638        e1e_flush();
3639}
3640
3641static void e1000e_update_stats(struct e1000_adapter *adapter);
3642
3643void e1000e_down(struct e1000_adapter *adapter)
3644{
3645        struct net_device *netdev = adapter->netdev;
3646        struct e1000_hw *hw = &adapter->hw;
3647        u32 tctl, rctl;
3648
3649        /* signal that we're down so the interrupt handler does not
3650         * reschedule our watchdog timer
3651         */
3652        set_bit(__E1000_DOWN, &adapter->state);
3653
3654        /* disable receives in the hardware */
3655        rctl = er32(RCTL);
3656        if (!(adapter->flags2 & FLAG2_NO_DISABLE_RX))
3657                ew32(RCTL, rctl & ~E1000_RCTL_EN);
3658        /* flush and sleep below */
3659
3660        netif_stop_queue(netdev);
3661
3662        /* disable transmits in the hardware */
3663        tctl = er32(TCTL);
3664        tctl &= ~E1000_TCTL_EN;
3665        ew32(TCTL, tctl);
3666
3667        /* flush both disables and wait for them to finish */
3668        e1e_flush();
3669        usleep_range(10000, 20000);
3670
3671        e1000_irq_disable(adapter);
3672
3673        del_timer_sync(&adapter->watchdog_timer);
3674        del_timer_sync(&adapter->phy_info_timer);
3675
3676        netif_carrier_off(netdev);
3677
3678        spin_lock(&adapter->stats64_lock);
3679        e1000e_update_stats(adapter);
3680        spin_unlock(&adapter->stats64_lock);
3681
3682        e1000e_flush_descriptors(adapter);
3683        e1000_clean_tx_ring(adapter->tx_ring);
3684        e1000_clean_rx_ring(adapter->rx_ring);
3685
3686        adapter->link_speed = 0;
3687        adapter->link_duplex = 0;
3688
3689        if (!pci_channel_offline(adapter->pdev))
3690                e1000e_reset(adapter);
3691
3692        /* TODO: for power management, we could drop the link and
3693         * pci_disable_device here.
3694         */
3695}
3696
3697void e1000e_reinit_locked(struct e1000_adapter *adapter)
3698{
3699        might_sleep();
3700        while (test_and_set_bit(__E1000_RESETTING, &adapter->state))
3701                usleep_range(1000, 2000);
3702        e1000e_down(adapter);
3703        e1000e_up(adapter);
3704        clear_bit(__E1000_RESETTING, &adapter->state);
3705}
3706
3707/**
3708 * e1000_sw_init - Initialize general software structures (struct e1000_adapter)
3709 * @adapter: board private structure to initialize
3710 *
3711 * e1000_sw_init initializes the Adapter private data structure.
3712 * Fields are initialized based on PCI device information and
3713 * OS network device settings (MTU size).
3714 **/
3715static int e1000_sw_init(struct e1000_adapter *adapter)
3716{
3717        struct net_device *netdev = adapter->netdev;
3718
3719        adapter->rx_buffer_len = ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN;
3720        adapter->rx_ps_bsize0 = 128;
3721        adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
3722        adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
3723        adapter->tx_ring_count = E1000_DEFAULT_TXD;
3724        adapter->rx_ring_count = E1000_DEFAULT_RXD;
3725
3726        spin_lock_init(&adapter->stats64_lock);
3727
3728        e1000e_set_interrupt_capability(adapter);
3729
3730        if (e1000_alloc_queues(adapter))
3731                return -ENOMEM;
3732
3733        /* Explicitly disable IRQ since the NIC can be in any state. */
3734        e1000_irq_disable(adapter);
3735
3736        set_bit(__E1000_DOWN, &adapter->state);
3737        return 0;
3738}
3739
3740/**
3741 * e1000_intr_msi_test - Interrupt Handler
3742 * @irq: interrupt number
3743 * @data: pointer to a network interface device structure
3744 **/
3745static irqreturn_t e1000_intr_msi_test(int irq, void *data)
3746{
3747        struct net_device *netdev = data;
3748        struct e1000_adapter *adapter = netdev_priv(netdev);
3749        struct e1000_hw *hw = &adapter->hw;
3750        u32 icr = er32(ICR);
3751
3752        e_dbg("icr is %08X\n", icr);
3753        if (icr & E1000_ICR_RXSEQ) {
3754                adapter->flags &= ~FLAG_MSI_TEST_FAILED;
3755                /* Force memory writes to complete before acknowledging the
3756                 * interrupt is handled.
3757                 */
3758                wmb();
3759        }
3760
3761        return IRQ_HANDLED;
3762}
3763
3764/**
3765 * e1000_test_msi_interrupt - Returns 0 for successful test
3766 * @adapter: board private struct
3767 *
3768 * code flow taken from tg3.c
3769 **/
3770static int e1000_test_msi_interrupt(struct e1000_adapter *adapter)
3771{
3772        struct net_device *netdev = adapter->netdev;
3773        struct e1000_hw *hw = &adapter->hw;
3774        int err;
3775
3776        /* poll_enable hasn't been called yet, so don't need disable */
3777        /* clear any pending events */
3778        er32(ICR);
3779
3780        /* free the real vector and request a test handler */
3781        e1000_free_irq(adapter);
3782        e1000e_reset_interrupt_capability(adapter);
3783
3784        /* Assume that the test fails, if it succeeds then the test
3785         * MSI irq handler will unset this flag
3786         */
3787        adapter->flags |= FLAG_MSI_TEST_FAILED;
3788
3789        err = pci_enable_msi(adapter->pdev);
3790        if (err)
3791                goto msi_test_failed;
3792
3793        err = request_irq(adapter->pdev->irq, e1000_intr_msi_test, 0,
3794                          netdev->name, netdev);
3795        if (err) {
3796                pci_disable_msi(adapter->pdev);
3797                goto msi_test_failed;
3798        }
3799
3800        /* Force memory writes to complete before enabling and firing an
3801         * interrupt.
3802         */
3803        wmb();
3804
3805        e1000_irq_enable(adapter);
3806
3807        /* fire an unusual interrupt on the test handler */
3808        ew32(ICS, E1000_ICS_RXSEQ);
3809        e1e_flush();
3810        msleep(100);
3811
3812        e1000_irq_disable(adapter);
3813
3814        rmb();                  /* read flags after interrupt has been fired */
3815
3816        if (adapter->flags & FLAG_MSI_TEST_FAILED) {
3817                adapter->int_mode = E1000E_INT_MODE_LEGACY;
3818                e_info("MSI interrupt test failed, using legacy interrupt.\n");
3819        } else {
3820                e_dbg("MSI interrupt test succeeded!\n");
3821        }
3822
3823        free_irq(adapter->pdev->irq, netdev);
3824        pci_disable_msi(adapter->pdev);
3825
3826msi_test_failed:
3827        e1000e_set_interrupt_capability(adapter);
3828        return e1000_request_irq(adapter);
3829}
3830
3831/**
3832 * e1000_test_msi - Returns 0 if MSI test succeeds or INTx mode is restored
3833 * @adapter: board private struct
3834 *
3835 * code flow taken from tg3.c, called with e1000 interrupts disabled.
3836 **/
3837static int e1000_test_msi(struct e1000_adapter *adapter)
3838{
3839        int err;
3840        u16 pci_cmd;
3841
3842        if (!(adapter->flags & FLAG_MSI_ENABLED))
3843                return 0;
3844
3845        /* disable SERR in case the MSI write causes a master abort */
3846        pci_read_config_word(adapter->pdev, PCI_COMMAND, &pci_cmd);
3847        if (pci_cmd & PCI_COMMAND_SERR)
3848                pci_write_config_word(adapter->pdev, PCI_COMMAND,
3849                                      pci_cmd & ~PCI_COMMAND_SERR);
3850
3851        err = e1000_test_msi_interrupt(adapter);
3852
3853        /* re-enable SERR */
3854        if (pci_cmd & PCI_COMMAND_SERR) {
3855                pci_read_config_word(adapter->pdev, PCI_COMMAND, &pci_cmd);
3856                pci_cmd |= PCI_COMMAND_SERR;
3857                pci_write_config_word(adapter->pdev, PCI_COMMAND, pci_cmd);
3858        }
3859
3860        return err;
3861}
3862
3863/**
3864 * e1000_open - Called when a network interface is made active
3865 * @netdev: network interface device structure
3866 *
3867 * Returns 0 on success, negative value on failure
3868 *
3869 * The open entry point is called when a network interface is made
3870 * active by the system (IFF_UP).  At this point all resources needed
3871 * for transmit and receive operations are allocated, the interrupt
3872 * handler is registered with the OS, the watchdog timer is started,
3873 * and the stack is notified that the interface is ready.
3874 **/
3875static int e1000_open(struct net_device *netdev)
3876{
3877        struct e1000_adapter *adapter = netdev_priv(netdev);
3878        struct e1000_hw *hw = &adapter->hw;
3879        struct pci_dev *pdev = adapter->pdev;
3880        int err;
3881
3882        /* disallow open during test */
3883        if (test_bit(__E1000_TESTING, &adapter->state))
3884                return -EBUSY;
3885
3886        pm_runtime_get_sync(&pdev->dev);
3887
3888        netif_carrier_off(netdev);
3889
3890        /* allocate transmit descriptors */
3891        err = e1000e_setup_tx_resources(adapter->tx_ring);
3892        if (err)
3893                goto err_setup_tx;
3894
3895        /* allocate receive descriptors */
3896        err = e1000e_setup_rx_resources(adapter->rx_ring);
3897        if (err)
3898                goto err_setup_rx;
3899
3900        /* If AMT is enabled, let the firmware know that the network
3901         * interface is now open and reset the part to a known state.
3902         */
3903        if (adapter->flags & FLAG_HAS_AMT) {
3904                e1000e_get_hw_control(adapter);
3905                e1000e_reset(adapter);
3906        }
3907
3908        e1000e_power_up_phy(adapter);
3909
3910        adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
3911        if ((adapter->hw.mng_cookie.status &
3912             E1000_MNG_DHCP_COOKIE_STATUS_VLAN))
3913                e1000_update_mng_vlan(adapter);
3914
3915        /* DMA latency requirement to workaround jumbo issue */
3916        if (adapter->hw.mac.type == e1000_pch2lan)
3917                pm_qos_add_request(&adapter->netdev->pm_qos_req,
3918                                   PM_QOS_CPU_DMA_LATENCY,
3919                                   PM_QOS_DEFAULT_VALUE);
3920
3921        /* before we allocate an interrupt, we must be ready to handle it.
3922         * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt
3923         * as soon as we call pci_request_irq, so we have to setup our
3924         * clean_rx handler before we do so.
3925         */
3926        e1000_configure(adapter);
3927
3928        err = e1000_request_irq(adapter);
3929        if (err)
3930                goto err_req_irq;
3931
3932        /* Work around PCIe errata with MSI interrupts causing some chipsets to
3933         * ignore e1000e MSI messages, which means we need to test our MSI
3934         * interrupt now
3935         */
3936        if (adapter->int_mode != E1000E_INT_MODE_LEGACY) {
3937                err = e1000_test_msi(adapter);
3938                if (err) {
3939                        e_err("Interrupt allocation failed\n");
3940                        goto err_req_irq;
3941                }
3942        }
3943
3944        /* From here on the code is the same as e1000e_up() */
3945        clear_bit(__E1000_DOWN, &adapter->state);
3946
3947        napi_enable(&adapter->napi);
3948
3949        e1000_irq_enable(adapter);
3950
3951        adapter->tx_hang_recheck = false;
3952        netif_start_queue(netdev);
3953
3954        adapter->idle_check = true;
3955        pm_runtime_put(&pdev->dev);
3956
3957        /* fire a link status change interrupt to start the watchdog */
3958        if (adapter->msix_entries)
3959                ew32(ICS, E1000_ICS_LSC | E1000_ICR_OTHER);
3960        else
3961                ew32(ICS, E1000_ICS_LSC);
3962
3963        return 0;
3964
3965err_req_irq:
3966        e1000e_release_hw_control(adapter);
3967        e1000_power_down_phy(adapter);
3968        e1000e_free_rx_resources(adapter->rx_ring);
3969err_setup_rx:
3970        e1000e_free_tx_resources(adapter->tx_ring);
3971err_setup_tx:
3972        e1000e_reset(adapter);
3973        pm_runtime_put_sync(&pdev->dev);
3974
3975        return err;
3976}
3977
3978/**
3979 * e1000_close - Disables a network interface
3980 * @netdev: network interface device structure
3981 *
3982 * Returns 0, this is not allowed to fail
3983 *
3984 * The close entry point is called when an interface is de-activated
3985 * by the OS.  The hardware is still under the drivers control, but
3986 * needs to be disabled.  A global MAC reset is issued to stop the
3987 * hardware, and all transmit and receive resources are freed.
3988 **/
3989static int e1000_close(struct net_device *netdev)
3990{
3991        struct e1000_adapter *adapter = netdev_priv(netdev);
3992        struct pci_dev *pdev = adapter->pdev;
3993        int count = E1000_CHECK_RESET_COUNT;
3994
3995        while (test_bit(__E1000_RESETTING, &adapter->state) && count--)
3996                usleep_range(10000, 20000);
3997
3998        WARN_ON(test_bit(__E1000_RESETTING, &adapter->state));
3999
4000        pm_runtime_get_sync(&pdev->dev);
4001
4002        napi_disable(&adapter->napi);
4003
4004        if (!test_bit(__E1000_DOWN, &adapter->state)) {
4005                e1000e_down(adapter);
4006                e1000_free_irq(adapter);
4007        }
4008        e1000_power_down_phy(adapter);
4009
4010        e1000e_free_tx_resources(adapter->tx_ring);
4011        e1000e_free_rx_resources(adapter->rx_ring);
4012
4013        /* kill manageability vlan ID if supported, but not if a vlan with
4014         * the same ID is registered on the host OS (let 8021q kill it)
4015         */
4016        if (adapter->hw.mng_cookie.status &
4017            E1000_MNG_DHCP_COOKIE_STATUS_VLAN)
4018                e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id);
4019
4020        /* If AMT is enabled, let the firmware know that the network
4021         * interface is now closed
4022         */
4023        if ((adapter->flags & FLAG_HAS_AMT) &&
4024            !test_bit(__E1000_TESTING, &adapter->state))
4025                e1000e_release_hw_control(adapter);
4026
4027        if (adapter->hw.mac.type == e1000_pch2lan)
4028                pm_qos_remove_request(&adapter->netdev->pm_qos_req);
4029
4030        pm_runtime_put_sync(&pdev->dev);
4031
4032        return 0;
4033}
4034/**
4035 * e1000_set_mac - Change the Ethernet Address of the NIC
4036 * @netdev: network interface device structure
4037 * @p: pointer to an address structure
4038 *
4039 * Returns 0 on success, negative on failure
4040 **/
4041static int e1000_set_mac(struct net_device *netdev, void *p)
4042{
4043        struct e1000_adapter *adapter = netdev_priv(netdev);
4044        struct e1000_hw *hw = &adapter->hw;
4045        struct sockaddr *addr = p;
4046
4047        if (!is_valid_ether_addr(addr->sa_data))
4048                return -EADDRNOTAVAIL;
4049
4050        memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
4051        memcpy(adapter->hw.mac.addr, addr->sa_data, netdev->addr_len);
4052
4053        hw->mac.ops.rar_set(&adapter->hw, adapter->hw.mac.addr, 0);
4054
4055        if (adapter->flags & FLAG_RESET_OVERWRITES_LAA) {
4056                /* activate the work around */
4057                e1000e_set_laa_state_82571(&adapter->hw, 1);
4058
4059                /* Hold a copy of the LAA in RAR[14] This is done so that
4060                 * between the time RAR[0] gets clobbered  and the time it
4061                 * gets fixed (in e1000_watchdog), the actual LAA is in one
4062                 * of the RARs and no incoming packets directed to this port
4063                 * are dropped. Eventually the LAA will be in RAR[0] and
4064                 * RAR[14]
4065                 */
4066                hw->mac.ops.rar_set(&adapter->hw, adapter->hw.mac.addr,
4067                                    adapter->hw.mac.rar_entry_count - 1);
4068        }
4069
4070        return 0;
4071}
4072
4073/**
4074 * e1000e_update_phy_task - work thread to update phy
4075 * @work: pointer to our work struct
4076 *
4077 * this worker thread exists because we must acquire a
4078 * semaphore to read the phy, which we could msleep while
4079 * waiting for it, and we can't msleep in a timer.
4080 **/
4081static void e1000e_update_phy_task(struct work_struct *work)
4082{
4083        struct e1000_adapter *adapter = container_of(work,
4084                                        struct e1000_adapter, update_phy_task);
4085
4086        if (test_bit(__E1000_DOWN, &adapter->state))
4087                return;
4088
4089        e1000_get_phy_info(&adapter->hw);
4090}
4091
4092/**
4093 * e1000_update_phy_info - timre call-back to update PHY info
4094 * @data: pointer to adapter cast into an unsigned long
4095 *
4096 * Need to wait a few seconds after link up to get diagnostic information from
4097 * the phy
4098 **/
4099static void e1000_update_phy_info(unsigned long data)
4100{
4101        struct e1000_adapter *adapter = (struct e1000_adapter *) data;
4102
4103        if (test_bit(__E1000_DOWN, &adapter->state))
4104                return;
4105
4106        schedule_work(&adapter->update_phy_task);
4107}
4108
4109/**
4110 * e1000e_update_phy_stats - Update the PHY statistics counters
4111 * @adapter: board private structure
4112 *
4113 * Read/clear the upper 16-bit PHY registers and read/accumulate lower
4114 **/
4115static void e1000e_update_phy_stats(struct e1000_adapter *adapter)
4116{
4117        struct e1000_hw *hw = &adapter->hw;
4118        s32 ret_val;
4119        u16 phy_data;
4120
4121        ret_val = hw->phy.ops.acquire(hw);
4122        if (ret_val)
4123                return;
4124
4125        /* A page set is expensive so check if already on desired page.
4126         * If not, set to the page with the PHY status registers.
4127         */
4128        hw->phy.addr = 1;
4129        ret_val = e1000e_read_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT,
4130                                           &phy_data);
4131        if (ret_val)
4132                goto release;
4133        if (phy_data != (HV_STATS_PAGE << IGP_PAGE_SHIFT)) {
4134                ret_val = hw->phy.ops.set_page(hw,
4135                                               HV_STATS_PAGE << IGP_PAGE_SHIFT);
4136                if (ret_val)
4137                        goto release;
4138        }
4139
4140        /* Single Collision Count */
4141        hw->phy.ops.read_reg_page(hw, HV_SCC_UPPER, &phy_data);
4142        ret_val = hw->phy.ops.read_reg_page(hw, HV_SCC_LOWER, &phy_data);
4143        if (!ret_val)
4144                adapter->stats.scc += phy_data;
4145
4146        /* Excessive Collision Count */
4147        hw->phy.ops.read_reg_page(hw, HV_ECOL_UPPER, &phy_data);
4148        ret_val = hw->phy.ops.read_reg_page(hw, HV_ECOL_LOWER, &phy_data);
4149        if (!ret_val)
4150                adapter->stats.ecol += phy_data;
4151
4152        /* Multiple Collision Count */
4153        hw->phy.ops.read_reg_page(hw, HV_MCC_UPPER, &phy_data);
4154        ret_val = hw->phy.ops.read_reg_page(hw, HV_MCC_LOWER, &phy_data);
4155        if (!ret_val)
4156                adapter->stats.mcc += phy_data;
4157
4158        /* Late Collision Count */
4159        hw->phy.ops.read_reg_page(hw, HV_LATECOL_UPPER, &phy_data);
4160        ret_val = hw->phy.ops.read_reg_page(hw, HV_LATECOL_LOWER, &phy_data);
4161        if (!ret_val)
4162                adapter->stats.latecol += phy_data;
4163
4164        /* Collision Count - also used for adaptive IFS */
4165        hw->phy.ops.read_reg_page(hw, HV_COLC_UPPER, &phy_data);
4166        ret_val = hw->phy.ops.read_reg_page(hw, HV_COLC_LOWER, &phy_data);
4167        if (!ret_val)
4168                hw->mac.collision_delta = phy_data;
4169
4170        /* Defer Count */
4171        hw->phy.ops.read_reg_page(hw, HV_DC_UPPER, &phy_data);
4172        ret_val = hw->phy.ops.read_reg_page(hw, HV_DC_LOWER, &phy_data);
4173        if (!ret_val)
4174                adapter->stats.dc += phy_data;
4175
4176        /* Transmit with no CRS */
4177        hw->phy.ops.read_reg_page(hw, HV_TNCRS_UPPER, &phy_data);
4178        ret_val = hw->phy.ops.read_reg_page(hw, HV_TNCRS_LOWER, &phy_data);
4179        if (!ret_val)
4180                adapter->stats.tncrs += phy_data;
4181
4182release:
4183        hw->phy.ops.release(hw);
4184}
4185
4186/**
4187 * e1000e_update_stats - Update the board statistics counters
4188 * @adapter: board private structure
4189 **/
4190static void e1000e_update_stats(struct e1000_adapter *adapter)
4191{
4192        struct net_device *netdev = adapter->netdev;
4193        struct e1000_hw *hw = &adapter->hw;
4194        struct pci_dev *pdev = adapter->pdev;
4195
4196        /* Prevent stats update while adapter is being reset, or if the pci
4197         * connection is down.
4198         */
4199        if (adapter->link_speed == 0)
4200                return;
4201        if (pci_channel_offline(pdev))
4202                return;
4203
4204        adapter->stats.crcerrs += er32(CRCERRS);
4205        adapter->stats.gprc += er32(GPRC);
4206        adapter->stats.gorc += er32(GORCL);
4207        er32(GORCH); /* Clear gorc */
4208        adapter->stats.bprc += er32(BPRC);
4209        adapter->stats.mprc += er32(MPRC);
4210        adapter->stats.roc += er32(ROC);
4211
4212        adapter->stats.mpc += er32(MPC);
4213
4214        /* Half-duplex statistics */
4215        if (adapter->link_duplex == HALF_DUPLEX) {
4216                if (adapter->flags2 & FLAG2_HAS_PHY_STATS) {
4217                        e1000e_update_phy_stats(adapter);
4218                } else {
4219                        adapter->stats.scc += er32(SCC);
4220                        adapter->stats.ecol += er32(ECOL);
4221                        adapter->stats.mcc += er32(MCC);
4222                        adapter->stats.latecol += er32(LATECOL);
4223                        adapter->stats.dc += er32(DC);
4224
4225                        hw->mac.collision_delta = er32(COLC);
4226
4227                        if ((hw->mac.type != e1000_82574) &&
4228                            (hw->mac.type != e1000_82583))
4229                                adapter->stats.tncrs += er32(TNCRS);
4230                }
4231                adapter->stats.colc += hw->mac.collision_delta;
4232        }
4233
4234        adapter->stats.xonrxc += er32(XONRXC);
4235        adapter->stats.xontxc += er32(XONTXC);
4236        adapter->stats.xoffrxc += er32(XOFFRXC);
4237        adapter->stats.xofftxc += er32(XOFFTXC);
4238        adapter->stats.gptc += er32(GPTC);
4239        adapter->stats.gotc += er32(GOTCL);
4240        er32(GOTCH); /* Clear gotc */
4241        adapter->stats.rnbc += er32(RNBC);
4242        adapter->stats.ruc += er32(RUC);
4243
4244        adapter->stats.mptc += er32(MPTC);
4245        adapter->stats.bptc += er32(BPTC);
4246
4247        /* used for adaptive IFS */
4248
4249        hw->mac.tx_packet_delta = er32(TPT);
4250        adapter->stats.tpt += hw->mac.tx_packet_delta;
4251
4252        adapter->stats.algnerrc += er32(ALGNERRC);
4253        adapter->stats.rxerrc += er32(RXERRC);
4254        adapter->stats.cexterr += er32(CEXTERR);
4255        adapter->stats.tsctc += er32(TSCTC);
4256        adapter->stats.tsctfc += er32(TSCTFC);
4257
4258        /* Fill out the OS statistics structure */
4259        netdev->stats.multicast = adapter->stats.mprc;
4260        netdev->stats.collisions = adapter->stats.colc;
4261
4262        /* Rx Errors */
4263
4264        /* RLEC on some newer hardware can be incorrect so build
4265         * our own version based on RUC and ROC
4266         */
4267        netdev->stats.rx_errors = adapter->stats.rxerrc +
4268                adapter->stats.crcerrs + adapter->stats.algnerrc +
4269                adapter->stats.ruc + adapter->stats.roc +
4270                adapter->stats.cexterr;
4271        netdev->stats.rx_length_errors = adapter->stats.ruc +
4272                                              adapter->stats.roc;
4273        netdev->stats.rx_crc_errors = adapter->stats.crcerrs;
4274        netdev->stats.rx_frame_errors = adapter->stats.algnerrc;
4275        netdev->stats.rx_missed_errors = adapter->stats.mpc;
4276
4277        /* Tx Errors */
4278        netdev->stats.tx_errors = adapter->stats.ecol +
4279                                       adapter->stats.latecol;
4280        netdev->stats.tx_aborted_errors = adapter->stats.ecol;
4281        netdev->stats.tx_window_errors = adapter->stats.latecol;
4282        netdev->stats.tx_carrier_errors = adapter->stats.tncrs;
4283
4284        /* Tx Dropped needs to be maintained elsewhere */
4285
4286        /* Management Stats */
4287        adapter->stats.mgptc += er32(MGTPTC);
4288        adapter->stats.mgprc += er32(MGTPRC);
4289        adapter->stats.mgpdc += er32(MGTPDC);
4290
4291        /* Correctable ECC Errors */
4292        if (hw->mac.type == e1000_pch_lpt) {
4293                u32 pbeccsts = er32(PBECCSTS);
4294                adapter->corr_errors +=
4295                    pbeccsts & E1000_PBECCSTS_CORR_ERR_CNT_MASK;
4296                adapter->uncorr_errors +=
4297                    (pbeccsts & E1000_PBECCSTS_UNCORR_ERR_CNT_MASK) >>
4298                    E1000_PBECCSTS_UNCORR_ERR_CNT_SHIFT;
4299        }
4300}
4301
4302/**
4303 * e1000_phy_read_status - Update the PHY register status snapshot
4304 * @adapter: board private structure
4305 **/
4306static void e1000_phy_read_status(struct e1000_adapter *adapter)
4307{
4308        struct e1000_hw *hw = &adapter->hw;
4309        struct e1000_phy_regs *phy = &adapter->phy_regs;
4310
4311        if ((er32(STATUS) & E1000_STATUS_LU) &&
4312            (adapter->hw.phy.media_type == e1000_media_type_copper)) {
4313                int ret_val;
4314
4315                ret_val  = e1e_rphy(hw, PHY_CONTROL, &phy->bmcr);
4316                ret_val |= e1e_rphy(hw, PHY_STATUS, &phy->bmsr);
4317                ret_val |= e1e_rphy(hw, PHY_AUTONEG_ADV, &phy->advertise);
4318                ret_val |= e1e_rphy(hw, PHY_LP_ABILITY, &phy->lpa);
4319                ret_val |= e1e_rphy(hw, PHY_AUTONEG_EXP, &phy->expansion);
4320                ret_val |= e1e_rphy(hw, PHY_1000T_CTRL, &phy->ctrl1000);
4321                ret_val |= e1e_rphy(hw, PHY_1000T_STATUS, &phy->stat1000);
4322                ret_val |= e1e_rphy(hw, PHY_EXT_STATUS, &phy->estatus);
4323                if (ret_val)
4324                        e_warn("Error reading PHY register\n");
4325        } else {
4326                /* Do not read PHY registers if link is not up
4327                 * Set values to typical power-on defaults
4328                 */
4329                phy->bmcr = (BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_FULLDPLX);
4330                phy->bmsr = (BMSR_100FULL | BMSR_100HALF | BMSR_10FULL |
4331                             BMSR_10HALF | BMSR_ESTATEN | BMSR_ANEGCAPABLE |
4332                             BMSR_ERCAP);
4333                phy->advertise = (ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP |
4334                                  ADVERTISE_ALL | ADVERTISE_CSMA);
4335                phy->lpa = 0;
4336                phy->expansion = EXPANSION_ENABLENPAGE;
4337                phy->ctrl1000 = ADVERTISE_1000FULL;
4338                phy->stat1000 = 0;
4339                phy->estatus = (ESTATUS_1000_TFULL | ESTATUS_1000_THALF);
4340        }
4341}
4342
4343static void e1000_print_link_info(struct e1000_adapter *adapter)
4344{
4345        struct e1000_hw *hw = &adapter->hw;
4346        u32 ctrl = er32(CTRL);
4347
4348        /* Link status message must follow this format for user tools */
4349        printk(KERN_INFO "e1000e: %s NIC Link is Up %d Mbps %s Duplex, Flow Control: %s\n",
4350                adapter->netdev->name,
4351                adapter->link_speed,
4352                adapter->link_duplex == FULL_DUPLEX ? "Full" : "Half",
4353                (ctrl & E1000_CTRL_TFCE) && (ctrl & E1000_CTRL_RFCE) ? "Rx/Tx" :
4354                (ctrl & E1000_CTRL_RFCE) ? "Rx" :
4355                (ctrl & E1000_CTRL_TFCE) ? "Tx" : "None");
4356}
4357
4358static bool e1000e_has_link(struct e1000_adapter *adapter)
4359{
4360        struct e1000_hw *hw = &adapter->hw;
4361        bool link_active = false;
4362        s32 ret_val = 0;
4363
4364        /* get_link_status is set on LSC (link status) interrupt or
4365         * Rx sequence error interrupt.  get_link_status will stay
4366         * false until the check_for_link establishes link
4367         * for copper adapters ONLY
4368         */
4369        switch (hw->phy.media_type) {
4370        case e1000_media_type_copper:
4371                if (hw->mac.get_link_status) {
4372                        ret_val = hw->mac.ops.check_for_link(hw);
4373                        link_active = !hw->mac.get_link_status;
4374                } else {
4375                        link_active = true;
4376                }
4377                break;
4378        case e1000_media_type_fiber:
4379                ret_val = hw->mac.ops.check_for_link(hw);
4380                link_active = !!(er32(STATUS) & E1000_STATUS_LU);
4381                break;
4382        case e1000_media_type_internal_serdes:
4383                ret_val = hw->mac.ops.check_for_link(hw);
4384                link_active = adapter->hw.mac.serdes_has_link;
4385                break;
4386        default:
4387        case e1000_media_type_unknown:
4388                break;
4389        }
4390
4391        if ((ret_val == E1000_ERR_PHY) && (hw->phy.type == e1000_phy_igp_3) &&
4392            (er32(CTRL) & E1000_PHY_CTRL_GBE_DISABLE)) {
4393                /* See e1000_kmrn_lock_loss_workaround_ich8lan() */
4394                e_info("Gigabit has been disabled, downgrading speed\n");
4395        }
4396
4397        return link_active;
4398}
4399
4400static void e1000e_enable_receives(struct e1000_adapter *adapter)
4401{
4402        /* make sure the receive unit is started */
4403        if ((adapter->flags & FLAG_RX_NEEDS_RESTART) &&
4404            (adapter->flags & FLAG_RX_RESTART_NOW)) {
4405                struct e1000_hw *hw = &adapter->hw;
4406                u32 rctl = er32(RCTL);
4407                ew32(RCTL, rctl | E1000_RCTL_EN);
4408                adapter->flags &= ~FLAG_RX_RESTART_NOW;
4409        }
4410}
4411
4412static void e1000e_check_82574_phy_workaround(struct e1000_adapter *adapter)
4413{
4414        struct e1000_hw *hw = &adapter->hw;
4415
4416        /* With 82574 controllers, PHY needs to be checked periodically
4417         * for hung state and reset, if two calls return true
4418         */
4419        if (e1000_check_phy_82574(hw))
4420                adapter->phy_hang_count++;
4421        else
4422                adapter->phy_hang_count = 0;
4423
4424        if (adapter->phy_hang_count > 1) {
4425                adapter->phy_hang_count = 0;
4426                schedule_work(&adapter->reset_task);
4427        }
4428}
4429
4430/**
4431 * e1000_watchdog - Timer Call-back
4432 * @data: pointer to adapter cast into an unsigned long
4433 **/
4434static void e1000_watchdog(unsigned long data)
4435{
4436        struct e1000_adapter *adapter = (struct e1000_adapter *) data;
4437
4438        /* Do the rest outside of interrupt context */
4439        schedule_work(&adapter->watchdog_task);
4440
4441        /* TODO: make this use queue_delayed_work() */
4442}
4443
4444static void e1000_watchdog_task(struct work_struct *work)
4445{
4446        struct e1000_adapter *adapter = container_of(work,
4447                                        struct e1000_adapter, watchdog_task);
4448        struct net_device *netdev = adapter->netdev;
4449        struct e1000_mac_info *mac = &adapter->hw.mac;
4450        struct e1000_phy_info *phy = &adapter->hw.phy;
4451        struct e1000_ring *tx_ring = adapter->tx_ring;
4452        struct e1000_hw *hw = &adapter->hw;
4453        u32 link, tctl;
4454
4455        if (test_bit(__E1000_DOWN, &adapter->state))
4456                return;
4457
4458        link = e1000e_has_link(adapter);
4459        if ((netif_carrier_ok(netdev)) && link) {
4460                /* Cancel scheduled suspend requests. */
4461                pm_runtime_resume(netdev->dev.parent);
4462
4463                e1000e_enable_receives(adapter);
4464                goto link_up;
4465        }
4466
4467        if ((e1000e_enable_tx_pkt_filtering(hw)) &&
4468            (adapter->mng_vlan_id != adapter->hw.mng_cookie.vlan_id))
4469                e1000_update_mng_vlan(adapter);
4470
4471        if (link) {
4472                if (!netif_carrier_ok(netdev)) {
4473                        bool txb2b = true;
4474
4475                        /* Cancel scheduled suspend requests. */
4476                        pm_runtime_resume(netdev->dev.parent);
4477
4478                        /* update snapshot of PHY registers on LSC */
4479                        e1000_phy_read_status(adapter);
4480                        mac->ops.get_link_up_info(&adapter->hw,
4481                                                   &adapter->link_speed,
4482                                                   &adapter->link_duplex);
4483                        e1000_print_link_info(adapter);
4484                        /* On supported PHYs, check for duplex mismatch only
4485                         * if link has autonegotiated at 10/100 half
4486                         */
4487                        if ((hw->phy.type == e1000_phy_igp_3 ||
4488                             hw->phy.type == e1000_phy_bm) &&
4489                            (hw->mac.autoneg == true) &&
4490                            (adapter->link_speed == SPEED_10 ||
4491                             adapter->link_speed == SPEED_100) &&
4492                            (adapter->link_duplex == HALF_DUPLEX)) {
4493                                u16 autoneg_exp;
4494
4495                                e1e_rphy(hw, PHY_AUTONEG_EXP, &autoneg_exp);
4496
4497                                if (!(autoneg_exp & NWAY_ER_LP_NWAY_CAPS))
4498                                        e_info("Autonegotiated half duplex but link partner cannot autoneg.  Try forcing full duplex if link gets many collisions.\n");
4499                        }
4500
4501                        /* adjust timeout factor according to speed/duplex */
4502                        adapter->tx_timeout_factor = 1;
4503                        switch (adapter->link_speed) {
4504                        case SPEED_10:
4505                                txb2b = false;
4506                                adapter->tx_timeout_factor = 16;
4507                                break;
4508                        case SPEED_100:
4509                                txb2b = false;
4510                                adapter->tx_timeout_factor = 10;
4511                                break;
4512                        }
4513
4514                        /* workaround: re-program speed mode bit after
4515                         * link-up event
4516                         */
4517                        if ((adapter->flags & FLAG_TARC_SPEED_MODE_BIT) &&
4518                            !txb2b) {
4519                                u32 tarc0;
4520                                tarc0 = er32(TARC(0));
4521                                tarc0 &= ~SPEED_MODE_BIT;
4522                                ew32(TARC(0), tarc0);
4523                        }
4524
4525                        /* disable TSO for pcie and 10/100 speeds, to avoid
4526                         * some hardware issues
4527                         */
4528                        if (!(adapter->flags & FLAG_TSO_FORCE)) {
4529                                switch (adapter->link_speed) {
4530                                case SPEED_10:
4531                                case SPEED_100:
4532                                        e_info("10/100 speed: disabling TSO\n");
4533                                        netdev->features &= ~NETIF_F_TSO;
4534                                        netdev->features &= ~NETIF_F_TSO6;
4535                                        break;
4536                                case SPEED_1000:
4537                                        netdev->features |= NETIF_F_TSO;
4538                                        netdev->features |= NETIF_F_TSO6;
4539                                        break;
4540                                default:
4541                                        /* oops */
4542                                        break;
4543                                }
4544                        }
4545
4546                        /* enable transmits in the hardware, need to do this
4547                         * after setting TARC(0)
4548                         */
4549                        tctl = er32(TCTL);
4550                        tctl |= E1000_TCTL_EN;
4551                        ew32(TCTL, tctl);
4552
4553                        /* Perform any post-link-up configuration before
4554                         * reporting link up.
4555                         */
4556                        if (phy->ops.cfg_on_link_up)
4557                                phy->ops.cfg_on_link_up(hw);
4558
4559                        netif_carrier_on(netdev);
4560
4561                        if (!test_bit(__E1000_DOWN, &adapter->state))
4562                                mod_timer(&adapter->phy_info_timer,
4563                                          round_jiffies(jiffies + 2 * HZ));
4564                }
4565        } else {
4566                if (netif_carrier_ok(netdev)) {
4567                        adapter->link_speed = 0;
4568                        adapter->link_duplex = 0;
4569                        /* Link status message must follow this format */
4570                        printk(KERN_INFO "e1000e: %s NIC Link is Down\n",
4571                               adapter->netdev->name);
4572                        netif_carrier_off(netdev);
4573                        if (!test_bit(__E1000_DOWN, &adapter->state))
4574                                mod_timer(&adapter->phy_info_timer,
4575                                          round_jiffies(jiffies + 2 * HZ));
4576
4577                        if (adapter->flags & FLAG_RX_NEEDS_RESTART)
4578                                schedule_work(&adapter->reset_task);
4579                        else
4580                                pm_schedule_suspend(netdev->dev.parent,
4581                                                        LINK_TIMEOUT);
4582                }
4583        }
4584
4585link_up:
4586        spin_lock(&adapter->stats64_lock);
4587        e1000e_update_stats(adapter);
4588
4589        mac->tx_packet_delta = adapter->stats.tpt - adapter->tpt_old;
4590        adapter->tpt_old = adapter->stats.tpt;
4591        mac->collision_delta = adapter->stats.colc - adapter->colc_old;
4592        adapter->colc_old = adapter->stats.colc;
4593
4594        adapter->gorc = adapter->stats.gorc - adapter->gorc_old;
4595        adapter->gorc_old = adapter->stats.gorc;
4596        adapter->gotc = adapter->stats.gotc - adapter->gotc_old;
4597        adapter->gotc_old = adapter->stats.gotc;
4598        spin_unlock(&adapter->stats64_lock);
4599
4600        e1000e_update_adaptive(&adapter->hw);
4601
4602        if (!netif_carrier_ok(netdev) &&
4603            (e1000_desc_unused(tx_ring) + 1 < tx_ring->count)) {
4604                /* We've lost link, so the controller stops DMA,
4605                 * but we've got queued Tx work that's never going
4606                 * to get done, so reset controller to flush Tx.
4607                 * (Do the reset outside of interrupt context).
4608                 */
4609                schedule_work(&adapter->reset_task);
4610                /* return immediately since reset is imminent */
4611                return;
4612        }
4613
4614        /* Simple mode for Interrupt Throttle Rate (ITR) */
4615        if (adapter->itr_setting == 4) {
4616                /* Symmetric Tx/Rx gets a reduced ITR=2000;
4617                 * Total asymmetrical Tx or Rx gets ITR=8000;
4618                 * everyone else is between 2000-8000.
4619                 */
4620                u32 goc = (adapter->gotc + adapter->gorc) / 10000;
4621                u32 dif = (adapter->gotc > adapter->gorc ?
4622                            adapter->gotc - adapter->gorc :
4623                            adapter->gorc - adapter->gotc) / 10000;
4624                u32 itr = goc > 0 ? (dif * 6000 / goc + 2000) : 8000;
4625
4626                e1000e_write_itr(adapter, itr);
4627        }
4628
4629        /* Cause software interrupt to ensure Rx ring is cleaned */
4630        if (adapter->msix_entries)
4631                ew32(ICS, adapter->rx_ring->ims_val);
4632        else
4633                ew32(ICS, E1000_ICS_RXDMT0);
4634
4635        /* flush pending descriptors to memory before detecting Tx hang */
4636        e1000e_flush_descriptors(adapter);
4637
4638        /* Force detection of hung controller every watchdog period */
4639        adapter->detect_tx_hung = true;
4640
4641        /* With 82571 controllers, LAA may be overwritten due to controller
4642         * reset from the other port. Set the appropriate LAA in RAR[0]
4643         */
4644        if (e1000e_get_laa_state_82571(hw))
4645                hw->mac.ops.rar_set(hw, adapter->hw.mac.addr, 0);
4646
4647        if (adapter->flags2 & FLAG2_CHECK_PHY_HANG)
4648                e1000e_check_82574_phy_workaround(adapter);
4649
4650        /* Reset the timer */
4651        if (!test_bit(__E1000_DOWN, &adapter->state))
4652                mod_timer(&adapter->watchdog_timer,
4653                          round_jiffies(jiffies + 2 * HZ));
4654}
4655
4656#define E1000_TX_FLAGS_CSUM             0x00000001
4657#define E1000_TX_FLAGS_VLAN             0x00000002
4658#define E1000_TX_FLAGS_TSO              0x00000004
4659#define E1000_TX_FLAGS_IPV4             0x00000008
4660#define E1000_TX_FLAGS_NO_FCS           0x00000010
4661#define E1000_TX_FLAGS_VLAN_MASK        0xffff0000
4662#define E1000_TX_FLAGS_VLAN_SHIFT       16
4663
4664static int e1000_tso(struct e1000_ring *tx_ring, struct sk_buff *skb)
4665{
4666        struct e1000_context_desc *context_desc;
4667        struct e1000_buffer *buffer_info;
4668        unsigned int i;
4669        u32 cmd_length = 0;
4670        u16 ipcse = 0, mss;
4671        u8 ipcss, ipcso, tucss, tucso, hdr_len;
4672
4673        if (!skb_is_gso(skb))
4674                return 0;
4675
4676        if (skb_header_cloned(skb)) {
4677                int err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
4678
4679                if (err)
4680                        return err;
4681        }
4682
4683        hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
4684        mss = skb_shinfo(skb)->gso_size;
4685        if (skb->protocol == htons(ETH_P_IP)) {
4686                struct iphdr *iph = ip_hdr(skb);
4687                iph->tot_len = 0;
4688                iph->check = 0;
4689                tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
4690                                                         0, IPPROTO_TCP, 0);
4691                cmd_length = E1000_TXD_CMD_IP;
4692                ipcse = skb_transport_offset(skb) - 1;
4693        } else if (skb_is_gso_v6(skb)) {
4694                ipv6_hdr(skb)->payload_len = 0;
4695                tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
4696                                                       &ipv6_hdr(skb)->daddr,
4697                                                       0, IPPROTO_TCP, 0);
4698                ipcse = 0;
4699        }
4700        ipcss = skb_network_offset(skb);
4701        ipcso = (void *)&(ip_hdr(skb)->check) - (void *)skb->data;
4702        tucss = skb_transport_offset(skb);
4703        tucso = (void *)&(tcp_hdr(skb)->check) - (void *)skb->data;
4704
4705        cmd_length |= (E1000_TXD_CMD_DEXT | E1000_TXD_CMD_TSE |
4706                       E1000_TXD_CMD_TCP | (skb->len - (hdr_len)));
4707
4708        i = tx_ring->next_to_use;
4709        context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
4710        buffer_info = &tx_ring->buffer_info[i];
4711
4712        context_desc->lower_setup.ip_fields.ipcss  = ipcss;
4713        context_desc->lower_setup.ip_fields.ipcso  = ipcso;
4714        context_desc->lower_setup.ip_fields.ipcse  = cpu_to_le16(ipcse);
4715        context_desc->upper_setup.tcp_fields.tucss = tucss;
4716        context_desc->upper_setup.tcp_fields.tucso = tucso;
4717        context_desc->upper_setup.tcp_fields.tucse = 0;
4718        context_desc->tcp_seg_setup.fields.mss     = cpu_to_le16(mss);
4719        context_desc->tcp_seg_setup.fields.hdr_len = hdr_len;
4720        context_desc->cmd_and_length = cpu_to_le32(cmd_length);
4721
4722        buffer_info->time_stamp = jiffies;
4723        buffer_info->next_to_watch = i;
4724
4725        i++;
4726        if (i == tx_ring->count)
4727                i = 0;
4728        tx_ring->next_to_use = i;
4729
4730        return 1;
4731}
4732
4733static bool e1000_tx_csum(struct e1000_ring *tx_ring, struct sk_buff *skb)
4734{
4735        struct e1000_adapter *adapter = tx_ring->adapter;
4736        struct e1000_context_desc *context_desc;
4737        struct e1000_buffer *buffer_info;
4738        unsigned int i;
4739        u8 css;
4740        u32 cmd_len = E1000_TXD_CMD_DEXT;
4741        __be16 protocol;
4742
4743        if (skb->ip_summed != CHECKSUM_PARTIAL)
4744                return 0;
4745
4746        if (skb->protocol == cpu_to_be16(ETH_P_8021Q))
4747                protocol = vlan_eth_hdr(skb)->h_vlan_encapsulated_proto;
4748        else
4749                protocol = skb->protocol;
4750
4751        switch (protocol) {
4752        case cpu_to_be16(ETH_P_IP):
4753                if (ip_hdr(skb)->protocol == IPPROTO_TCP)
4754                        cmd_len |= E1000_TXD_CMD_TCP;
4755                break;
4756        case cpu_to_be16(ETH_P_IPV6):
4757                /* XXX not handling all IPV6 headers */
4758                if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
4759                        cmd_len |= E1000_TXD_CMD_TCP;
4760                break;
4761        default:
4762                if (unlikely(net_ratelimit()))
4763                        e_warn("checksum_partial proto=%x!\n",
4764                               be16_to_cpu(protocol));
4765                break;
4766        }
4767
4768        css = skb_checksum_start_offset(skb);
4769
4770        i = tx_ring->next_to_use;
4771        buffer_info = &tx_ring->buffer_info[i];
4772        context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
4773
4774        context_desc->lower_setup.ip_config = 0;
4775        context_desc->upper_setup.tcp_fields.tucss = css;
4776        context_desc->upper_setup.tcp_fields.tucso =
4777                                css + skb->csum_offset;
4778        context_desc->upper_setup.tcp_fields.tucse = 0;
4779        context_desc->tcp_seg_setup.data = 0;
4780        context_desc->cmd_and_length = cpu_to_le32(cmd_len);
4781
4782        buffer_info->time_stamp = jiffies;
4783        buffer_info->next_to_watch = i;
4784
4785        i++;
4786        if (i == tx_ring->count)
4787                i = 0;
4788        tx_ring->next_to_use = i;
4789
4790        return 1;
4791}
4792
4793static int e1000_tx_map(struct e1000_ring *tx_ring, struct sk_buff *skb,
4794                        unsigned int first, unsigned int max_per_txd,
4795                        unsigned int nr_frags)
4796{
4797        struct e1000_adapter *adapter = tx_ring->adapter;
4798        struct pci_dev *pdev = adapter->pdev;
4799        struct e1000_buffer *buffer_info;
4800        unsigned int len = skb_headlen(skb);
4801        unsigned int offset = 0, size, count = 0, i;
4802        unsigned int f, bytecount, segs;
4803
4804        i = tx_ring->next_to_use;
4805
4806        while (len) {
4807                buffer_info = &tx_ring->buffer_info[i];
4808                size = min(len, max_per_txd);
4809
4810                buffer_info->length = size;
4811                buffer_info->time_stamp = jiffies;
4812                buffer_info->next_to_watch = i;
4813                buffer_info->dma = dma_map_single(&pdev->dev,
4814                                                  skb->data + offset,
4815                                                  size, DMA_TO_DEVICE);
4816                buffer_info->mapped_as_page = false;
4817                if (dma_mapping_error(&pdev->dev, buffer_info->dma))
4818                        goto dma_error;
4819
4820                len -= size;
4821                offset += size;
4822                count++;
4823
4824                if (len) {
4825                        i++;
4826                        if (i == tx_ring->count)
4827                                i = 0;
4828                }
4829        }
4830
4831        for (f = 0; f < nr_frags; f++) {
4832                const struct skb_frag_struct *frag;
4833
4834                frag = &skb_shinfo(skb)->frags[f];
4835                len = skb_frag_size(frag);
4836                offset = 0;
4837
4838                while (len) {
4839                        i++;
4840                        if (i == tx_ring->count)
4841                                i = 0;
4842
4843                        buffer_info = &tx_ring->buffer_info[i];
4844                        size = min(len, max_per_txd);
4845
4846                        buffer_info->length = size;
4847                        buffer_info->time_stamp = jiffies;
4848                        buffer_info->next_to_watch = i;
4849                        buffer_info->dma = skb_frag_dma_map(&pdev->dev, frag,
4850                                                offset, size, DMA_TO_DEVICE);
4851                        buffer_info->mapped_as_page = true;
4852                        if (dma_mapping_error(&pdev->dev, buffer_info->dma))
4853                                goto dma_error;
4854
4855                        len -= size;
4856                        offset += size;
4857                        count++;
4858                }
4859        }
4860
4861        segs = skb_shinfo(skb)->gso_segs ? : 1;
4862        /* multiply data chunks by size of headers */
4863        bytecount = ((segs - 1) * skb_headlen(skb)) + skb->len;
4864
4865        tx_ring->buffer_info[i].skb = skb;
4866        tx_ring->buffer_info[i].segs = segs;
4867        tx_ring->buffer_info[i].bytecount = bytecount;
4868        tx_ring->buffer_info[first].next_to_watch = i;
4869
4870        return count;
4871
4872dma_error:
4873        dev_err(&pdev->dev, "Tx DMA map failed\n");
4874        buffer_info->dma = 0;
4875        if (count)
4876                count--;
4877
4878        while (count--) {
4879                if (i == 0)
4880                        i += tx_ring->count;
4881                i--;
4882                buffer_info = &tx_ring->buffer_info[i];
4883                e1000_put_txbuf(tx_ring, buffer_info);
4884        }
4885
4886        return 0;
4887}
4888
4889static void e1000_tx_queue(struct e1000_ring *tx_ring, int tx_flags, int count)
4890{
4891        struct e1000_adapter *adapter = tx_ring->adapter;
4892        struct e1000_tx_desc *tx_desc = NULL;
4893        struct e1000_buffer *buffer_info;
4894        u32 txd_upper = 0, txd_lower = E1000_TXD_CMD_IFCS;
4895        unsigned int i;
4896
4897        if (tx_flags & E1000_TX_FLAGS_TSO) {
4898                txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D |
4899                             E1000_TXD_CMD_TSE;
4900                txd_upper |= E1000_TXD_POPTS_TXSM << 8;
4901
4902                if (tx_flags & E1000_TX_FLAGS_IPV4)
4903                        txd_upper |= E1000_TXD_POPTS_IXSM << 8;
4904        }
4905
4906        if (tx_flags & E1000_TX_FLAGS_CSUM) {
4907                txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
4908                txd_upper |= E1000_TXD_POPTS_TXSM << 8;
4909        }
4910
4911        if (tx_flags & E1000_TX_FLAGS_VLAN) {
4912                txd_lower |= E1000_TXD_CMD_VLE;
4913                txd_upper |= (tx_flags & E1000_TX_FLAGS_VLAN_MASK);
4914        }
4915
4916        if (unlikely(tx_flags & E1000_TX_FLAGS_NO_FCS))
4917                txd_lower &= ~(E1000_TXD_CMD_IFCS);
4918
4919        i = tx_ring->next_to_use;
4920
4921        do {
4922                buffer_info = &tx_ring->buffer_info[i];
4923                tx_desc = E1000_TX_DESC(*tx_ring, i);
4924                tx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
4925                tx_desc->lower.data =
4926                        cpu_to_le32(txd_lower | buffer_info->length);
4927                tx_desc->upper.data = cpu_to_le32(txd_upper);
4928
4929                i++;
4930                if (i == tx_ring->count)
4931                        i = 0;
4932        } while (--count > 0);
4933
4934        tx_desc->lower.data |= cpu_to_le32(adapter->txd_cmd);
4935
4936        /* txd_cmd re-enables FCS, so we'll re-disable it here as desired. */
4937        if (unlikely(tx_flags & E1000_TX_FLAGS_NO_FCS))
4938                tx_desc->lower.data &= ~(cpu_to_le32(E1000_TXD_CMD_IFCS));
4939
4940        /* Force memory writes to complete before letting h/w
4941         * know there are new descriptors to fetch.  (Only
4942         * applicable for weak-ordered memory model archs,
4943         * such as IA-64).
4944         */
4945        wmb();
4946
4947        tx_ring->next_to_use = i;
4948
4949        if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA)
4950                e1000e_update_tdt_wa(tx_ring, i);
4951        else
4952                writel(i, tx_ring->tail);
4953
4954        /* we need this if more than one processor can write to our tail
4955         * at a time, it synchronizes IO on IA64/Altix systems
4956         */
4957        mmiowb();
4958}
4959
4960#define MINIMUM_DHCP_PACKET_SIZE 282
4961static int e1000_transfer_dhcp_info(struct e1000_adapter *adapter,
4962                                    struct sk_buff *skb)
4963{
4964        struct e1000_hw *hw =  &adapter->hw;
4965        u16 length, offset;
4966
4967        if (vlan_tx_tag_present(skb)) {
4968                if (!((vlan_tx_tag_get(skb) == adapter->hw.mng_cookie.vlan_id) &&
4969                    (adapter->hw.mng_cookie.status &
4970                        E1000_MNG_DHCP_COOKIE_STATUS_VLAN)))
4971                        return 0;
4972        }
4973
4974        if (skb->len <= MINIMUM_DHCP_PACKET_SIZE)
4975                return 0;
4976
4977        if (((struct ethhdr *) skb->data)->h_proto != htons(ETH_P_IP))
4978                return 0;
4979
4980        {
4981                const struct iphdr *ip = (struct iphdr *)((u8 *)skb->data+14);
4982                struct udphdr *udp;
4983
4984                if (ip->protocol != IPPROTO_UDP)
4985                        return 0;
4986
4987                udp = (struct udphdr *)((u8 *)ip + (ip->ihl << 2));
4988                if (ntohs(udp->dest) != 67)
4989                        return 0;
4990
4991                offset = (u8 *)udp + 8 - skb->data;
4992                length = skb->len - offset;
4993                return e1000e_mng_write_dhcp_info(hw, (u8 *)udp + 8, length);
4994        }
4995
4996        return 0;
4997}
4998
4999static int __e1000_maybe_stop_tx(struct e1000_ring *tx_ring, int size)
5000{
5001        struct e1000_adapter *adapter = tx_ring->adapter;
5002
5003        netif_stop_queue(adapter->netdev);
5004        /* Herbert's original patch had:
5005         *  smp_mb__after_netif_stop_queue();
5006         * but since that doesn't exist yet, just open code it.
5007         */
5008        smp_mb();
5009
5010        /* We need to check again in a case another CPU has just
5011         * made room available.
5012         */
5013        if (e1000_desc_unused(tx_ring) < size)
5014                return -EBUSY;
5015
5016        /* A reprieve! */
5017        netif_start_queue(adapter->netdev);
5018        ++adapter->restart_queue;
5019        return 0;
5020}
5021
5022static int e1000_maybe_stop_tx(struct e1000_ring *tx_ring, int size)
5023{
5024        BUG_ON(size > tx_ring->count);
5025
5026        if (e1000_desc_unused(tx_ring) >= size)
5027                return 0;
5028        return __e1000_maybe_stop_tx(tx_ring, size);
5029}
5030
5031static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
5032                                    struct net_device *netdev)
5033{
5034        struct e1000_adapter *adapter = netdev_priv(netdev);
5035        struct e1000_ring *tx_ring = adapter->tx_ring;
5036        unsigned int first;
5037        unsigned int tx_flags = 0;
5038        unsigned int len = skb_headlen(skb);
5039        unsigned int nr_frags;
5040        unsigned int mss;
5041        int count = 0;
5042        int tso;
5043        unsigned int f;
5044
5045        if (test_bit(__E1000_DOWN, &adapter->state)) {
5046                dev_kfree_skb_any(skb);
5047                return NETDEV_TX_OK;
5048        }
5049
5050        if (skb->len <= 0) {
5051                dev_kfree_skb_any(skb);
5052                return NETDEV_TX_OK;
5053        }
5054
5055        /* The minimum packet size with TCTL.PSP set is 17 bytes so
5056         * pad skb in order to meet this minimum size requirement
5057         */
5058        if (unlikely(skb->len < 17)) {
5059                if (skb_pad(skb, 17 - skb->len))
5060                        return NETDEV_TX_OK;
5061                skb->len = 17;
5062                skb_set_tail_pointer(skb, 17);
5063        }
5064
5065        mss = skb_shinfo(skb)->gso_size;
5066        if (mss) {
5067                u8 hdr_len;
5068
5069                /* TSO Workaround for 82571/2/3 Controllers -- if skb->data
5070                 * points to just header, pull a few bytes of payload from
5071                 * frags into skb->data
5072                 */
5073                hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
5074                /* we do this workaround for ES2LAN, but it is un-necessary,
5075                 * avoiding it could save a lot of cycles
5076                 */
5077                if (skb->data_len && (hdr_len == len)) {
5078                        unsigned int pull_size;
5079
5080                        pull_size = min_t(unsigned int, 4, skb->data_len);
5081                        if (!__pskb_pull_tail(skb, pull_size)) {
5082                                e_err("__pskb_pull_tail failed.\n");
5083                                dev_kfree_skb_any(skb);
5084                                return NETDEV_TX_OK;
5085                        }
5086                        len = skb_headlen(skb);
5087                }
5088        }
5089
5090        /* reserve a descriptor for the offload context */
5091        if ((mss) || (skb->ip_summed == CHECKSUM_PARTIAL))
5092                count++;
5093        count++;
5094
5095        count += DIV_ROUND_UP(len, adapter->tx_fifo_limit);
5096
5097        nr_frags = skb_shinfo(skb)->nr_frags;
5098        for (f = 0; f < nr_frags; f++)
5099                count += DIV_ROUND_UP(skb_frag_size(&skb_shinfo(skb)->frags[f]),
5100                                      adapter->tx_fifo_limit);
5101
5102        if (adapter->hw.mac.tx_pkt_filtering)
5103                e1000_transfer_dhcp_info(adapter, skb);
5104
5105        /* need: count + 2 desc gap to keep tail from touching
5106         * head, otherwise try next time
5107         */
5108        if (e1000_maybe_stop_tx(tx_ring, count + 2))
5109                return NETDEV_TX_BUSY;
5110
5111        if (vlan_tx_tag_present(skb)) {
5112                tx_flags |= E1000_TX_FLAGS_VLAN;
5113                tx_flags |= (vlan_tx_tag_get(skb) << E1000_TX_FLAGS_VLAN_SHIFT);
5114        }
5115
5116        first = tx_ring->next_to_use;
5117
5118        tso = e1000_tso(tx_ring, skb);
5119        if (tso < 0) {
5120                dev_kfree_skb_any(skb);
5121                return NETDEV_TX_OK;
5122        }
5123
5124        if (tso)
5125                tx_flags |= E1000_TX_FLAGS_TSO;
5126        else if (e1000_tx_csum(tx_ring, skb))
5127                tx_flags |= E1000_TX_FLAGS_CSUM;
5128
5129        /* Old method was to assume IPv4 packet by default if TSO was enabled.
5130         * 82571 hardware supports TSO capabilities for IPv6 as well...
5131         * no longer assume, we must.
5132         */
5133        if (skb->protocol == htons(ETH_P_IP))
5134                tx_flags |= E1000_TX_FLAGS_IPV4;
5135
5136        if (unlikely(skb->no_fcs))
5137                tx_flags |= E1000_TX_FLAGS_NO_FCS;
5138
5139        /* if count is 0 then mapping error has occurred */
5140        count = e1000_tx_map(tx_ring, skb, first, adapter->tx_fifo_limit,
5141                             nr_frags);
5142        if (count) {
5143                skb_tx_timestamp(skb);
5144
5145                netdev_sent_queue(netdev, skb->len);
5146                e1000_tx_queue(tx_ring, tx_flags, count);
5147                /* Make sure there is space in the ring for the next send. */
5148                e1000_maybe_stop_tx(tx_ring,
5149                                    (MAX_SKB_FRAGS *
5150                                     DIV_ROUND_UP(PAGE_SIZE,
5151                                                  adapter->tx_fifo_limit) + 2));
5152        } else {
5153                dev_kfree_skb_any(skb);
5154                tx_ring->buffer_info[first].time_stamp = 0;
5155                tx_ring->next_to_use = first;
5156        }
5157
5158        return NETDEV_TX_OK;
5159}
5160
5161/**
5162 * e1000_tx_timeout - Respond to a Tx Hang
5163 * @netdev: network interface device structure
5164 **/
5165static void e1000_tx_timeout(struct net_device *netdev)
5166{
5167        struct e1000_adapter *adapter = netdev_priv(netdev);
5168
5169        /* Do the reset outside of interrupt context */
5170        adapter->tx_timeout_count++;
5171        schedule_work(&adapter->reset_task);
5172}
5173
5174static void e1000_reset_task(struct work_struct *work)
5175{
5176        struct e1000_adapter *adapter;
5177        adapter = container_of(work, struct e1000_adapter, reset_task);
5178
5179        /* don't run the task if already down */
5180        if (test_bit(__E1000_DOWN, &adapter->state))
5181                return;
5182
5183        if (!((adapter->flags & FLAG_RX_NEEDS_RESTART) &&
5184              (adapter->flags & FLAG_RX_RESTART_NOW))) {
5185                e1000e_dump(adapter);
5186                e_err("Reset adapter\n");
5187        }
5188        e1000e_reinit_locked(adapter);
5189}
5190
5191/**
5192 * e1000_get_stats64 - Get System Network Statistics
5193 * @netdev: network interface device structure
5194 * @stats: rtnl_link_stats64 pointer
5195 *
5196 * Returns the address of the device statistics structure.
5197 **/
5198struct rtnl_link_stats64 *e1000e_get_stats64(struct net_device *netdev,
5199                                             struct rtnl_link_stats64 *stats)
5200{
5201        struct e1000_adapter *adapter = netdev_priv(netdev);
5202
5203        memset(stats, 0, sizeof(struct rtnl_link_stats64));
5204        spin_lock(&adapter->stats64_lock);
5205        e1000e_update_stats(adapter);
5206        /* Fill out the OS statistics structure */
5207        stats->rx_bytes = adapter->stats.gorc;
5208        stats->rx_packets = adapter->stats.gprc;
5209        stats->tx_bytes = adapter->stats.gotc;
5210        stats->tx_packets = adapter->stats.gptc;
5211        stats->multicast = adapter->stats.mprc;
5212        stats->collisions = adapter->stats.colc;
5213
5214        /* Rx Errors */
5215
5216        /* RLEC on some newer hardware can be incorrect so build
5217         * our own version based on RUC and ROC
5218         */
5219        stats->rx_errors = adapter->stats.rxerrc +
5220                adapter->stats.crcerrs + adapter->stats.algnerrc +
5221                adapter->stats.ruc + adapter->stats.roc +
5222                adapter->stats.cexterr;
5223        stats->rx_length_errors = adapter->stats.ruc +
5224                                              adapter->stats.roc;
5225        stats->rx_crc_errors = adapter->stats.crcerrs;
5226        stats->rx_frame_errors = adapter->stats.algnerrc;
5227        stats->rx_missed_errors = adapter->stats.mpc;
5228
5229        /* Tx Errors */
5230        stats->tx_errors = adapter->stats.ecol +
5231                                       adapter->stats.latecol;
5232        stats->tx_aborted_errors = adapter->stats.ecol;
5233        stats->tx_window_errors = adapter->stats.latecol;
5234        stats->tx_carrier_errors = adapter->stats.tncrs;
5235
5236        /* Tx Dropped needs to be maintained elsewhere */
5237
5238        spin_unlock(&adapter->stats64_lock);
5239        return stats;
5240}
5241
5242/**
5243 * e1000_change_mtu - Change the Maximum Transfer Unit
5244 * @netdev: network interface device structure
5245 * @new_mtu: new value for maximum frame size
5246 *
5247 * Returns 0 on success, negative on failure
5248 **/
5249static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
5250{
5251        struct e1000_adapter *adapter = netdev_priv(netdev);
5252        int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
5253
5254        /* Jumbo frame support */
5255        if ((max_frame > ETH_FRAME_LEN + ETH_FCS_LEN) &&
5256            !(adapter->flags & FLAG_HAS_JUMBO_FRAMES)) {
5257                e_err("Jumbo Frames not supported.\n");
5258                return -EINVAL;
5259        }
5260
5261        /* Supported frame sizes */
5262        if ((new_mtu < ETH_ZLEN + ETH_FCS_LEN + VLAN_HLEN) ||
5263            (max_frame > adapter->max_hw_frame_size)) {
5264                e_err("Unsupported MTU setting\n");
5265                return -EINVAL;
5266        }
5267
5268        /* Jumbo frame workaround on 82579 and newer requires CRC be stripped */
5269        if ((adapter->hw.mac.type >= e1000_pch2lan) &&
5270            !(adapter->flags2 & FLAG2_CRC_STRIPPING) &&
5271            (new_mtu > ETH_DATA_LEN)) {
5272                e_err("Jumbo Frames not supported on this device when CRC stripping is disabled.\n");
5273                return -EINVAL;
5274        }
5275
5276        while (test_and_set_bit(__E1000_RESETTING, &adapter->state))
5277                usleep_range(1000, 2000);
5278        /* e1000e_down -> e1000e_reset dependent on max_frame_size & mtu */
5279        adapter->max_frame_size = max_frame;
5280        e_info("changing MTU from %d to %d\n", netdev->mtu, new_mtu);
5281        netdev->mtu = new_mtu;
5282        if (netif_running(netdev))
5283                e1000e_down(adapter);
5284
5285        /* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN
5286         * means we reserve 2 more, this pushes us to allocate from the next
5287         * larger slab size.
5288         * i.e. RXBUFFER_2048 --> size-4096 slab
5289         * However with the new *_jumbo_rx* routines, jumbo receives will use
5290         * fragmented skbs
5291         */
5292
5293        if (max_frame <= 2048)
5294                adapter->rx_buffer_len = 2048;
5295        else
5296                adapter->rx_buffer_len = 4096;
5297
5298        /* adjust allocation if LPE protects us, and we aren't using SBP */
5299        if ((max_frame == ETH_FRAME_LEN + ETH_FCS_LEN) ||
5300             (max_frame == ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN))
5301                adapter->rx_buffer_len = ETH_FRAME_LEN + VLAN_HLEN
5302                                         + ETH_FCS_LEN;
5303
5304        if (netif_running(netdev))
5305                e1000e_up(adapter);
5306        else
5307                e1000e_reset(adapter);
5308
5309        clear_bit(__E1000_RESETTING, &adapter->state);
5310
5311        return 0;
5312}
5313
5314static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
5315                           int cmd)
5316{
5317        struct e1000_adapter *adapter = netdev_priv(netdev);
5318        struct mii_ioctl_data *data = if_mii(ifr);
5319
5320        if (adapter->hw.phy.media_type != e1000_media_type_copper)
5321                return -EOPNOTSUPP;
5322
5323        switch (cmd) {
5324        case SIOCGMIIPHY:
5325                data->phy_id = adapter->hw.phy.addr;
5326                break;
5327        case SIOCGMIIREG:
5328                e1000_phy_read_status(adapter);
5329
5330                switch (data->reg_num & 0x1F) {
5331                case MII_BMCR:
5332                        data->val_out = adapter->phy_regs.bmcr;
5333                        break;
5334                case MII_BMSR:
5335                        data->val_out = adapter->phy_regs.bmsr;
5336                        break;
5337                case MII_PHYSID1:
5338                        data->val_out = (adapter->hw.phy.id >> 16);
5339                        break;
5340                case MII_PHYSID2:
5341                        data->val_out = (adapter->hw.phy.id & 0xFFFF);
5342                        break;
5343                case MII_ADVERTISE:
5344                        data->val_out = adapter->phy_regs.advertise;
5345                        break;
5346                case MII_LPA:
5347                        data->val_out = adapter->phy_regs.lpa;
5348                        break;
5349                case MII_EXPANSION:
5350                        data->val_out = adapter->phy_regs.expansion;
5351                        break;
5352                case MII_CTRL1000:
5353                        data->val_out = adapter->phy_regs.ctrl1000;
5354                        break;
5355                case MII_STAT1000:
5356                        data->val_out = adapter->phy_regs.stat1000;
5357                        break;
5358                case MII_ESTATUS:
5359                        data->val_out = adapter->phy_regs.estatus;
5360                        break;
5361                default:
5362                        return -EIO;
5363                }
5364                break;
5365        case SIOCSMIIREG:
5366        default:
5367                return -EOPNOTSUPP;
5368        }
5369        return 0;
5370}
5371
5372static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
5373{
5374        switch (cmd) {
5375        case SIOCGMIIPHY:
5376        case SIOCGMIIREG:
5377        case SIOCSMIIREG:
5378                return e1000_mii_ioctl(netdev, ifr, cmd);
5379        default:
5380                return -EOPNOTSUPP;
5381        }
5382}
5383
5384static int e1000_init_phy_wakeup(struct e1000_adapter *adapter, u32 wufc)
5385{
5386        struct e1000_hw *hw = &adapter->hw;
5387        u32 i, mac_reg;
5388        u16 phy_reg, wuc_enable;
5389        int retval = 0;
5390
5391        /* copy MAC RARs to PHY RARs */
5392        e1000_copy_rx_addrs_to_phy_ich8lan(hw);
5393
5394        retval = hw->phy.ops.acquire(hw);
5395        if (retval) {
5396                e_err("Could not acquire PHY\n");
5397                return retval;
5398        }
5399
5400        /* Enable access to wakeup registers on and set page to BM_WUC_PAGE */
5401        retval = e1000_enable_phy_wakeup_reg_access_bm(hw, &wuc_enable);
5402        if (retval)
5403                goto release;
5404
5405        /* copy MAC MTA to PHY MTA - only needed for pchlan */
5406        for (i = 0; i < adapter->hw.mac.mta_reg_count; i++) {
5407                mac_reg = E1000_READ_REG_ARRAY(hw, E1000_MTA, i);
5408                hw->phy.ops.write_reg_page(hw, BM_MTA(i),
5409                                           (u16)(mac_reg & 0xFFFF));
5410                hw->phy.ops.write_reg_page(hw, BM_MTA(i) + 1,
5411                                           (u16)((mac_reg >> 16) & 0xFFFF));
5412        }
5413
5414        /* configure PHY Rx Control register */
5415        hw->phy.ops.read_reg_page(&adapter->hw, BM_RCTL, &phy_reg);
5416        mac_reg = er32(RCTL);
5417        if (mac_reg & E1000_RCTL_UPE)
5418                phy_reg |= BM_RCTL_UPE;
5419        if (mac_reg & E1000_RCTL_MPE)
5420                phy_reg |= BM_RCTL_MPE;
5421        phy_reg &= ~(BM_RCTL_MO_MASK);
5422        if (mac_reg & E1000_RCTL_MO_3)
5423                phy_reg |= (((mac_reg & E1000_RCTL_MO_3) >> E1000_RCTL_MO_SHIFT)
5424                                << BM_RCTL_MO_SHIFT);
5425        if (mac_reg & E1000_RCTL_BAM)
5426                phy_reg |= BM_RCTL_BAM;
5427        if (mac_reg & E1000_RCTL_PMCF)
5428                phy_reg |= BM_RCTL_PMCF;
5429        mac_reg = er32(CTRL);
5430        if (mac_reg & E1000_CTRL_RFCE)
5431                phy_reg |= BM_RCTL_RFCE;
5432        hw->phy.ops.write_reg_page(&adapter->hw, BM_RCTL, phy_reg);
5433
5434        /* enable PHY wakeup in MAC register */
5435        ew32(WUFC, wufc);
5436        ew32(WUC, E1000_WUC_PHY_WAKE | E1000_WUC_PME_EN);
5437
5438        /* configure and enable PHY wakeup in PHY registers */
5439        hw->phy.ops.write_reg_page(&adapter->hw, BM_WUFC, wufc);
5440        hw->phy.ops.write_reg_page(&adapter->hw, BM_WUC, E1000_WUC_PME_EN);
5441
5442        /* activate PHY wakeup */
5443        wuc_enable |= BM_WUC_ENABLE_BIT | BM_WUC_HOST_WU_BIT;
5444        retval = e1000_disable_phy_wakeup_reg_access_bm(hw, &wuc_enable);
5445        if (retval)
5446                e_err("Could not set PHY Host Wakeup bit\n");
5447release:
5448        hw->phy.ops.release(hw);
5449
5450        return retval;
5451}
5452
5453static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake,
5454                            bool runtime)
5455{
5456        struct net_device *netdev = pci_get_drvdata(pdev);
5457        struct e1000_adapter *adapter = netdev_priv(netdev);
5458        struct e1000_hw *hw = &adapter->hw;
5459        u32 ctrl, ctrl_ext, rctl, status;
5460        /* Runtime suspend should only enable wakeup for link changes */
5461        u32 wufc = runtime ? E1000_WUFC_LNKC : adapter->wol;
5462        int retval = 0;
5463
5464        netif_device_detach(netdev);
5465
5466        if (netif_running(netdev)) {
5467                int count = E1000_CHECK_RESET_COUNT;
5468
5469                while (test_bit(__E1000_RESETTING, &adapter->state) && count--)
5470                        usleep_range(10000, 20000);
5471
5472                WARN_ON(test_bit(__E1000_RESETTING, &adapter->state));
5473                e1000e_down(adapter);
5474                e1000_free_irq(adapter);
5475        }
5476        e1000e_reset_interrupt_capability(adapter);
5477
5478        retval = pci_save_state(pdev);
5479        if (retval)
5480                return retval;
5481
5482        status = er32(STATUS);
5483        if (status & E1000_STATUS_LU)
5484                wufc &= ~E1000_WUFC_LNKC;
5485
5486        if (wufc) {
5487                e1000_setup_rctl(adapter);
5488                e1000e_set_rx_mode(netdev);
5489
5490                /* turn on all-multi mode if wake on multicast is enabled */
5491                if (wufc & E1000_WUFC_MC) {
5492                        rctl = er32(RCTL);
5493                        rctl |= E1000_RCTL_MPE;
5494                        ew32(RCTL, rctl);
5495                }
5496
5497                ctrl = er32(CTRL);
5498                /* advertise wake from D3Cold */
5499                #define E1000_CTRL_ADVD3WUC 0x00100000
5500                /* phy power management enable */
5501                #define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000
5502                ctrl |= E1000_CTRL_ADVD3WUC;
5503                if (!(adapter->flags2 & FLAG2_HAS_PHY_WAKEUP))
5504                        ctrl |= E1000_CTRL_EN_PHY_PWR_MGMT;
5505                ew32(CTRL, ctrl);
5506
5507                if (adapter->hw.phy.media_type == e1000_media_type_fiber ||
5508                    adapter->hw.phy.media_type ==
5509                    e1000_media_type_internal_serdes) {
5510                        /* keep the laser running in D3 */
5511                        ctrl_ext = er32(CTRL_EXT);
5512                        ctrl_ext |= E1000_CTRL_EXT_SDP3_DATA;
5513                        ew32(CTRL_EXT, ctrl_ext);
5514                }
5515
5516                if (adapter->flags & FLAG_IS_ICH)
5517                        e1000_suspend_workarounds_ich8lan(&adapter->hw);
5518
5519                /* Allow time for pending master requests to run */
5520                e1000e_disable_pcie_master(&adapter->hw);
5521
5522                if (adapter->flags2 & FLAG2_HAS_PHY_WAKEUP) {
5523                        /* enable wakeup by the PHY */
5524                        retval = e1000_init_phy_wakeup(adapter, wufc);
5525                        if (retval)
5526                                return retval;
5527                } else {
5528                        /* enable wakeup by the MAC */
5529                        ew32(WUFC, wufc);
5530                        ew32(WUC, E1000_WUC_PME_EN);
5531                }
5532        } else {
5533                ew32(WUC, 0);
5534                ew32(WUFC, 0);
5535        }
5536
5537        *enable_wake = !!wufc;
5538
5539        /* make sure adapter isn't asleep if manageability is enabled */
5540        if ((adapter->flags & FLAG_MNG_PT_ENABLED) ||
5541            (hw->mac.ops.check_mng_mode(hw)))
5542                *enable_wake = true;
5543
5544        if (adapter->hw.phy.type == e1000_phy_igp_3)
5545                e1000e_igp3_phy_powerdown_workaround_ich8lan(&adapter->hw);
5546
5547        /* Release control of h/w to f/w.  If f/w is AMT enabled, this
5548         * would have already happened in close and is redundant.
5549         */
5550        e1000e_release_hw_control(adapter);
5551
5552        pci_disable_device(pdev);
5553
5554        return 0;
5555}
5556
5557static void e1000_power_off(struct pci_dev *pdev, bool sleep, bool wake)
5558{
5559        if (sleep && wake) {
5560                pci_prepare_to_sleep(pdev);
5561                return;
5562        }
5563
5564        pci_wake_from_d3(pdev, wake);
5565        pci_set_power_state(pdev, PCI_D3hot);
5566}
5567
5568static void e1000_complete_shutdown(struct pci_dev *pdev, bool sleep,
5569                                    bool wake)
5570{
5571        struct net_device *netdev = pci_get_drvdata(pdev);
5572        struct e1000_adapter *adapter = netdev_priv(netdev);
5573
5574        /* The pci-e switch on some quad port adapters will report a
5575         * correctable error when the MAC transitions from D0 to D3.  To
5576         * prevent this we need to mask off the correctable errors on the
5577         * downstream port of the pci-e switch.
5578         */
5579        if (adapter->flags & FLAG_IS_QUAD_PORT) {
5580                struct pci_dev *us_dev = pdev->bus->self;
5581                u16 devctl;
5582
5583                pcie_capability_read_word(us_dev, PCI_EXP_DEVCTL, &devctl);
5584                pcie_capability_write_word(us_dev, PCI_EXP_DEVCTL,
5585                                           (devctl & ~PCI_EXP_DEVCTL_CERE));
5586
5587                e1000_power_off(pdev, sleep, wake);
5588
5589                pcie_capability_write_word(us_dev, PCI_EXP_DEVCTL, devctl);
5590        } else {
5591                e1000_power_off(pdev, sleep, wake);
5592        }
5593}
5594
5595#ifdef CONFIG_PCIEASPM
5596static void __e1000e_disable_aspm(struct pci_dev *pdev, u16 state)
5597{
5598        pci_disable_link_state_locked(pdev, state);
5599}
5600#else
5601static void __e1000e_disable_aspm(struct pci_dev *pdev, u16 state)
5602{
5603        /* Both device and parent should have the same ASPM setting.
5604         * Disable ASPM in downstream component first and then upstream.
5605         */
5606        pcie_capability_clear_word(pdev, PCI_EXP_LNKCTL, state);
5607
5608        if (pdev->bus->self)
5609                pcie_capability_clear_word(pdev->bus->self, PCI_EXP_LNKCTL,
5610                                           state);
5611}
5612#endif
5613static void e1000e_disable_aspm(struct pci_dev *pdev, u16 state)
5614{
5615        dev_info(&pdev->dev, "Disabling ASPM %s %s\n",
5616                 (state & PCIE_LINK_STATE_L0S) ? "L0s" : "",
5617                 (state & PCIE_LINK_STATE_L1) ? "L1" : "");
5618
5619        __e1000e_disable_aspm(pdev, state);
5620}
5621
5622#ifdef CONFIG_PM
5623static bool e1000e_pm_ready(struct e1000_adapter *adapter)
5624{
5625        return !!adapter->tx_ring->buffer_info;
5626}
5627
5628static int __e1000_resume(struct pci_dev *pdev)
5629{
5630        struct net_device *netdev = pci_get_drvdata(pdev);
5631        struct e1000_adapter *adapter = netdev_priv(netdev);
5632        struct e1000_hw *hw = &adapter->hw;
5633        u16 aspm_disable_flag = 0;
5634        u32 err;
5635
5636        if (adapter->flags2 & FLAG2_DISABLE_ASPM_L0S)
5637                aspm_disable_flag = PCIE_LINK_STATE_L0S;
5638        if (adapter->flags2 & FLAG2_DISABLE_ASPM_L1)
5639                aspm_disable_flag |= PCIE_LINK_STATE_L1;
5640        if (aspm_disable_flag)
5641                e1000e_disable_aspm(pdev, aspm_disable_flag);
5642
5643        pci_set_power_state(pdev, PCI_D0);
5644        pci_restore_state(pdev);
5645        pci_save_state(pdev);
5646
5647        e1000e_set_interrupt_capability(adapter);
5648        if (netif_running(netdev)) {
5649                err = e1000_request_irq(adapter);
5650                if (err)
5651                        return err;
5652        }
5653
5654        if (hw->mac.type >= e1000_pch2lan)
5655                e1000_resume_workarounds_pchlan(&adapter->hw);
5656
5657        e1000e_power_up_phy(adapter);
5658
5659        /* report the system wakeup cause from S3/S4 */
5660        if (adapter->flags2 & FLAG2_HAS_PHY_WAKEUP) {
5661                u16 phy_data;
5662
5663                e1e_rphy(&adapter->hw, BM_WUS, &phy_data);
5664                if (phy_data) {
5665                        e_info("PHY Wakeup cause - %s\n",
5666                                phy_data & E1000_WUS_EX ? "Unicast Packet" :
5667                                phy_data & E1000_WUS_MC ? "Multicast Packet" :
5668                                phy_data & E1000_WUS_BC ? "Broadcast Packet" :
5669                                phy_data & E1000_WUS_MAG ? "Magic Packet" :
5670                                phy_data & E1000_WUS_LNKC ?
5671                                "Link Status Change" : "other");
5672                }
5673                e1e_wphy(&adapter->hw, BM_WUS, ~0);
5674        } else {
5675                u32 wus = er32(WUS);
5676                if (wus) {
5677                        e_info("MAC Wakeup cause - %s\n",
5678                                wus & E1000_WUS_EX ? "Unicast Packet" :
5679                                wus & E1000_WUS_MC ? "Multicast Packet" :
5680                                wus & E1000_WUS_BC ? "Broadcast Packet" :
5681                                wus & E1000_WUS_MAG ? "Magic Packet" :
5682                                wus & E1000_WUS_LNKC ? "Link Status Change" :
5683                                "other");
5684                }
5685                ew32(WUS, ~0);
5686        }
5687
5688        e1000e_reset(adapter);
5689
5690        e1000_init_manageability_pt(adapter);
5691
5692        if (netif_running(netdev))
5693                e1000e_up(adapter);
5694
5695        netif_device_attach(netdev);
5696
5697        /* If the controller has AMT, do not set DRV_LOAD until the interface
5698         * is up.  For all other cases, let the f/w know that the h/w is now
5699         * under the control of the driver.
5700         */
5701        if (!(adapter->flags & FLAG_HAS_AMT))
5702                e1000e_get_hw_control(adapter);
5703
5704        return 0;
5705}
5706
5707#ifdef CONFIG_PM_SLEEP
5708static int e1000_suspend(struct device *dev)
5709{
5710        struct pci_dev *pdev = to_pci_dev(dev);
5711        int retval;
5712        bool wake;
5713
5714        retval = __e1000_shutdown(pdev, &wake, false);
5715        if (!retval)
5716                e1000_complete_shutdown(pdev, true, wake);
5717
5718        return retval;
5719}
5720
5721static int e1000_resume(struct device *dev)
5722{
5723        struct pci_dev *pdev = to_pci_dev(dev);
5724        struct net_device *netdev = pci_get_drvdata(pdev);
5725        struct e1000_adapter *adapter = netdev_priv(netdev);
5726
5727        if (e1000e_pm_ready(adapter))
5728                adapter->idle_check = true;
5729
5730        return __e1000_resume(pdev);
5731}
5732#endif /* CONFIG_PM_SLEEP */
5733
5734#ifdef CONFIG_PM_RUNTIME
5735static int e1000_runtime_suspend(struct device *dev)
5736{
5737        struct pci_dev *pdev = to_pci_dev(dev);
5738        struct net_device *netdev = pci_get_drvdata(pdev);
5739        struct e1000_adapter *adapter = netdev_priv(netdev);
5740
5741        if (e1000e_pm_ready(adapter)) {
5742                bool wake;
5743
5744                __e1000_shutdown(pdev, &wake, true);
5745        }
5746
5747        return 0;
5748}
5749
5750static int e1000_idle(struct device *dev)
5751{
5752        struct pci_dev *pdev = to_pci_dev(dev);
5753        struct net_device *netdev = pci_get_drvdata(pdev);
5754        struct e1000_adapter *adapter = netdev_priv(netdev);
5755
5756        if (!e1000e_pm_ready(adapter))
5757                return 0;
5758
5759        if (adapter->idle_check) {
5760                adapter->idle_check = false;
5761                if (!e1000e_has_link(adapter))
5762                        pm_schedule_suspend(dev, MSEC_PER_SEC);
5763        }
5764
5765        return -EBUSY;
5766}
5767
5768static int e1000_runtime_resume(struct device *dev)
5769{
5770        struct pci_dev *pdev = to_pci_dev(dev);
5771        struct net_device *netdev = pci_get_drvdata(pdev);
5772        struct e1000_adapter *adapter = netdev_priv(netdev);
5773
5774        if (!e1000e_pm_ready(adapter))
5775                return 0;
5776
5777        adapter->idle_check = !dev->power.runtime_auto;
5778        return __e1000_resume(pdev);
5779}
5780#endif /* CONFIG_PM_RUNTIME */
5781#endif /* CONFIG_PM */
5782
5783static void e1000_shutdown(struct pci_dev *pdev)
5784{
5785        bool wake = false;
5786
5787        __e1000_shutdown(pdev, &wake, false);
5788
5789        if (system_state == SYSTEM_POWER_OFF)
5790                e1000_complete_shutdown(pdev, false, wake);
5791}
5792
5793#ifdef CONFIG_NET_POLL_CONTROLLER
5794
5795static irqreturn_t e1000_intr_msix(int irq, void *data)
5796{
5797        struct net_device *netdev = data;
5798        struct e1000_adapter *adapter = netdev_priv(netdev);
5799
5800        if (adapter->msix_entries) {
5801                int vector, msix_irq;
5802
5803                vector = 0;
5804                msix_irq = adapter->msix_entries[vector].vector;
5805                disable_irq(msix_irq);
5806                e1000_intr_msix_rx(msix_irq, netdev);
5807                enable_irq(msix_irq);
5808
5809                vector++;
5810                msix_irq = adapter->msix_entries[vector].vector;
5811                disable_irq(msix_irq);
5812                e1000_intr_msix_tx(msix_irq, netdev);
5813                enable_irq(msix_irq);
5814
5815                vector++;
5816                msix_irq = adapter->msix_entries[vector].vector;
5817                disable_irq(msix_irq);
5818                e1000_msix_other(msix_irq, netdev);
5819                enable_irq(msix_irq);
5820        }
5821
5822        return IRQ_HANDLED;
5823}
5824
5825/**
5826 * e1000_netpoll
5827 * @netdev: network interface device structure
5828 *
5829 * Polling 'interrupt' - used by things like netconsole to send skbs
5830 * without having to re-enable interrupts. It's not called while
5831 * the interrupt routine is executing.
5832 */
5833static void e1000_netpoll(struct net_device *netdev)
5834{
5835        struct e1000_adapter *adapter = netdev_priv(netdev);
5836
5837        switch (adapter->int_mode) {
5838        case E1000E_INT_MODE_MSIX:
5839                e1000_intr_msix(adapter->pdev->irq, netdev);
5840                break;
5841        case E1000E_INT_MODE_MSI:
5842                disable_irq(adapter->pdev->irq);
5843                e1000_intr_msi(adapter->pdev->irq, netdev);
5844                enable_irq(adapter->pdev->irq);
5845                break;
5846        default: /* E1000E_INT_MODE_LEGACY */
5847                disable_irq(adapter->pdev->irq);
5848                e1000_intr(adapter->pdev->irq, netdev);
5849                enable_irq(adapter->pdev->irq);
5850                break;
5851        }
5852}
5853#endif
5854
5855/**
5856 * e1000_io_error_detected - called when PCI error is detected
5857 * @pdev: Pointer to PCI device
5858 * @state: The current pci connection state
5859 *
5860 * This function is called after a PCI bus error affecting
5861 * this device has been detected.
5862 */
5863static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev,
5864                                                pci_channel_state_t state)
5865{
5866        struct net_device *netdev = pci_get_drvdata(pdev);
5867        struct e1000_adapter *adapter = netdev_priv(netdev);
5868
5869        netif_device_detach(netdev);
5870
5871        if (state == pci_channel_io_perm_failure)
5872                return PCI_ERS_RESULT_DISCONNECT;
5873
5874        if (netif_running(netdev))
5875                e1000e_down(adapter);
5876        pci_disable_device(pdev);
5877
5878        /* Request a slot slot reset. */
5879        return PCI_ERS_RESULT_NEED_RESET;
5880}
5881
5882/**
5883 * e1000_io_slot_reset - called after the pci bus has been reset.
5884 * @pdev: Pointer to PCI device
5885 *
5886 * Restart the card from scratch, as if from a cold-boot. Implementation
5887 * resembles the first-half of the e1000_resume routine.
5888 */
5889static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev)
5890{
5891        struct net_device *netdev = pci_get_drvdata(pdev);
5892        struct e1000_adapter *adapter = netdev_priv(netdev);
5893        struct e1000_hw *hw = &adapter->hw;
5894        u16 aspm_disable_flag = 0;
5895        int err;
5896        pci_ers_result_t result;
5897
5898        if (adapter->flags2 & FLAG2_DISABLE_ASPM_L0S)
5899                aspm_disable_flag = PCIE_LINK_STATE_L0S;
5900        if (adapter->flags2 & FLAG2_DISABLE_ASPM_L1)
5901                aspm_disable_flag |= PCIE_LINK_STATE_L1;
5902        if (aspm_disable_flag)
5903                e1000e_disable_aspm(pdev, aspm_disable_flag);
5904
5905        err = pci_enable_device_mem(pdev);
5906        if (err) {
5907                dev_err(&pdev->dev,
5908                        "Cannot re-enable PCI device after reset.\n");
5909                result = PCI_ERS_RESULT_DISCONNECT;
5910        } else {
5911                pci_set_master(pdev);
5912                pdev->state_saved = true;
5913                pci_restore_state(pdev);
5914
5915                pci_enable_wake(pdev, PCI_D3hot, 0);
5916                pci_enable_wake(pdev, PCI_D3cold, 0);
5917
5918                e1000e_reset(adapter);
5919                ew32(WUS, ~0);
5920                result = PCI_ERS_RESULT_RECOVERED;
5921        }
5922
5923        pci_cleanup_aer_uncorrect_error_status(pdev);
5924
5925        return result;
5926}
5927
5928/**
5929 * e1000_io_resume - called when traffic can start flowing again.
5930 * @pdev: Pointer to PCI device
5931 *
5932 * This callback is called when the error recovery driver tells us that
5933 * its OK to resume normal operation. Implementation resembles the
5934 * second-half of the e1000_resume routine.
5935 */
5936static void e1000_io_resume(struct pci_dev *pdev)
5937{
5938        struct net_device *netdev = pci_get_drvdata(pdev);
5939        struct e1000_adapter *adapter = netdev_priv(netdev);
5940
5941        e1000_init_manageability_pt(adapter);
5942
5943        if (netif_running(netdev)) {
5944                if (e1000e_up(adapter)) {
5945                        dev_err(&pdev->dev,
5946                                "can't bring device back up after reset\n");
5947                        return;
5948                }
5949        }
5950
5951        netif_device_attach(netdev);
5952
5953        /* If the controller has AMT, do not set DRV_LOAD until the interface
5954         * is up.  For all other cases, let the f/w know that the h/w is now
5955         * under the control of the driver.
5956         */
5957        if (!(adapter->flags & FLAG_HAS_AMT))
5958                e1000e_get_hw_control(adapter);
5959
5960}
5961
5962static void e1000_print_device_info(struct e1000_adapter *adapter)
5963{
5964        struct e1000_hw *hw = &adapter->hw;
5965        struct net_device *netdev = adapter->netdev;
5966        u32 ret_val;
5967        u8 pba_str[E1000_PBANUM_LENGTH];
5968
5969        /* print bus type/speed/width info */
5970        e_info("(PCI Express:2.5GT/s:%s) %pM\n",
5971               /* bus width */
5972               ((hw->bus.width == e1000_bus_width_pcie_x4) ? "Width x4" :
5973                "Width x1"),
5974               /* MAC address */
5975               netdev->dev_addr);
5976        e_info("Intel(R) PRO/%s Network Connection\n",
5977               (hw->phy.type == e1000_phy_ife) ? "10/100" : "1000");
5978        ret_val = e1000_read_pba_string_generic(hw, pba_str,
5979                                                E1000_PBANUM_LENGTH);
5980        if (ret_val)
5981                strlcpy((char *)pba_str, "Unknown", sizeof(pba_str));
5982        e_info("MAC: %d, PHY: %d, PBA No: %s\n",
5983               hw->mac.type, hw->phy.type, pba_str);
5984}
5985
5986static void e1000_eeprom_checks(struct e1000_adapter *adapter)
5987{
5988        struct e1000_hw *hw = &adapter->hw;
5989        int ret_val;
5990        u16 buf = 0;
5991
5992        if (hw->mac.type != e1000_82573)
5993                return;
5994
5995        ret_val = e1000_read_nvm(hw, NVM_INIT_CONTROL2_REG, 1, &buf);
5996        le16_to_cpus(&buf);
5997        if (!ret_val && (!(buf & (1 << 0)))) {
5998                /* Deep Smart Power Down (DSPD) */
5999                dev_warn(&adapter->pdev->dev,
6000                         "Warning: detected DSPD enabled in EEPROM\n");
6001        }
6002}
6003
6004static int e1000_set_features(struct net_device *netdev,
6005                              netdev_features_t features)
6006{
6007        struct e1000_adapter *adapter = netdev_priv(netdev);
6008        netdev_features_t changed = features ^ netdev->features;
6009
6010        if (changed & (NETIF_F_TSO | NETIF_F_TSO6))
6011                adapter->flags |= FLAG_TSO_FORCE;
6012
6013        if (!(changed & (NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_TX |
6014                         NETIF_F_RXCSUM | NETIF_F_RXHASH | NETIF_F_RXFCS |
6015                         NETIF_F_RXALL)))
6016                return 0;
6017
6018        if (changed & NETIF_F_RXFCS) {
6019                if (features & NETIF_F_RXFCS) {
6020                        adapter->flags2 &= ~FLAG2_CRC_STRIPPING;
6021                } else {
6022                        /* We need to take it back to defaults, which might mean
6023                         * stripping is still disabled at the adapter level.
6024                         */
6025                        if (adapter->flags2 & FLAG2_DFLT_CRC_STRIPPING)
6026                                adapter->flags2 |= FLAG2_CRC_STRIPPING;
6027                        else
6028                                adapter->flags2 &= ~FLAG2_CRC_STRIPPING;
6029                }
6030        }
6031
6032        netdev->features = features;
6033
6034        if (netif_running(netdev))
6035                e1000e_reinit_locked(adapter);
6036        else
6037                e1000e_reset(adapter);
6038
6039        return 0;
6040}
6041
6042static const struct net_device_ops e1000e_netdev_ops = {
6043        .ndo_open               = e1000_open,
6044        .ndo_stop               = e1000_close,
6045        .ndo_start_xmit         = e1000_xmit_frame,
6046        .ndo_get_stats64        = e1000e_get_stats64,
6047        .ndo_set_rx_mode        = e1000e_set_rx_mode,
6048        .ndo_set_mac_address    = e1000_set_mac,
6049        .ndo_change_mtu         = e1000_change_mtu,
6050        .ndo_do_ioctl           = e1000_ioctl,
6051        .ndo_tx_timeout         = e1000_tx_timeout,
6052        .ndo_validate_addr      = eth_validate_addr,
6053
6054        .ndo_vlan_rx_add_vid    = e1000_vlan_rx_add_vid,
6055        .ndo_vlan_rx_kill_vid   = e1000_vlan_rx_kill_vid,
6056#ifdef CONFIG_NET_POLL_CONTROLLER
6057        .ndo_poll_controller    = e1000_netpoll,
6058#endif
6059        .ndo_set_features = e1000_set_features,
6060};
6061
6062/**
6063 * e1000_probe - Device Initialization Routine
6064 * @pdev: PCI device information struct
6065 * @ent: entry in e1000_pci_tbl
6066 *
6067 * Returns 0 on success, negative on failure
6068 *
6069 * e1000_probe initializes an adapter identified by a pci_dev structure.
6070 * The OS initialization, configuring of the adapter private structure,
6071 * and a hardware reset occur.
6072 **/
6073static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
6074{
6075        struct net_device *netdev;
6076        struct e1000_adapter *adapter;
6077        struct e1000_hw *hw;
6078        const struct e1000_info *ei = e1000_info_tbl[ent->driver_data];
6079        resource_size_t mmio_start, mmio_len;
6080        resource_size_t flash_start, flash_len;
6081        static int cards_found;
6082        u16 aspm_disable_flag = 0;
6083        int i, err, pci_using_dac;
6084        u16 eeprom_data = 0;
6085        u16 eeprom_apme_mask = E1000_EEPROM_APME;
6086
6087        if (ei->flags2 & FLAG2_DISABLE_ASPM_L0S)
6088                aspm_disable_flag = PCIE_LINK_STATE_L0S;
6089        if (ei->flags2 & FLAG2_DISABLE_ASPM_L1)
6090                aspm_disable_flag |= PCIE_LINK_STATE_L1;
6091        if (aspm_disable_flag)
6092                e1000e_disable_aspm(pdev, aspm_disable_flag);
6093
6094        err = pci_enable_device_mem(pdev);
6095        if (err)
6096                return err;
6097
6098        pci_using_dac = 0;
6099        err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
6100        if (!err) {
6101                err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
6102                if (!err)
6103                        pci_using_dac = 1;
6104        } else {
6105                err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
6106                if (err) {
6107                        err = dma_set_coherent_mask(&pdev->dev,
6108                                                    DMA_BIT_MASK(32));
6109                        if (err) {
6110                                dev_err(&pdev->dev, "No usable DMA configuration, aborting\n");
6111                                goto err_dma;
6112                        }
6113                }
6114        }
6115
6116        err = pci_request_selected_regions_exclusive(pdev,
6117                                          pci_select_bars(pdev, IORESOURCE_MEM),
6118                                          e1000e_driver_name);
6119        if (err)
6120                goto err_pci_reg;
6121
6122        /* AER (Advanced Error Reporting) hooks */
6123        pci_enable_pcie_error_reporting(pdev);
6124
6125        pci_set_master(pdev);
6126        /* PCI config space info */
6127        err = pci_save_state(pdev);
6128        if (err)
6129                goto err_alloc_etherdev;
6130
6131        err = -ENOMEM;
6132        netdev = alloc_etherdev(sizeof(struct e1000_adapter));
6133        if (!netdev)
6134                goto err_alloc_etherdev;
6135
6136        SET_NETDEV_DEV(netdev, &pdev->dev);
6137
6138        netdev->irq = pdev->irq;
6139
6140        pci_set_drvdata(pdev, netdev);
6141        adapter = netdev_priv(netdev);
6142        hw = &adapter->hw;
6143        adapter->netdev = netdev;
6144        adapter->pdev = pdev;
6145        adapter->ei = ei;
6146        adapter->pba = ei->pba;
6147        adapter->flags = ei->flags;
6148        adapter->flags2 = ei->flags2;
6149        adapter->hw.adapter = adapter;
6150        adapter->hw.mac.type = ei->mac;
6151        adapter->max_hw_frame_size = ei->max_hw_frame_size;
6152        adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
6153
6154        mmio_start = pci_resource_start(pdev, 0);
6155        mmio_len = pci_resource_len(pdev, 0);
6156
6157        err = -EIO;
6158        adapter->hw.hw_addr = ioremap(mmio_start, mmio_len);
6159        if (!adapter->hw.hw_addr)
6160                goto err_ioremap;
6161
6162        if ((adapter->flags & FLAG_HAS_FLASH) &&
6163            (pci_resource_flags(pdev, 1) & IORESOURCE_MEM)) {
6164                flash_start = pci_resource_start(pdev, 1);
6165                flash_len = pci_resource_len(pdev, 1);
6166                adapter->hw.flash_address = ioremap(flash_start, flash_len);
6167                if (!adapter->hw.flash_address)
6168                        goto err_flashmap;
6169        }
6170
6171        /* construct the net_device struct */
6172        netdev->netdev_ops              = &e1000e_netdev_ops;
6173        e1000e_set_ethtool_ops(netdev);
6174        netdev->watchdog_timeo          = 5 * HZ;
6175        netif_napi_add(netdev, &adapter->napi, e1000e_poll, 64);
6176        strlcpy(netdev->name, pci_name(pdev), sizeof(netdev->name));
6177
6178        netdev->mem_start = mmio_start;
6179        netdev->mem_end = mmio_start + mmio_len;
6180
6181        adapter->bd_number = cards_found++;
6182
6183        e1000e_check_options(adapter);
6184
6185        /* setup adapter struct */
6186        err = e1000_sw_init(adapter);
6187        if (err)
6188                goto err_sw_init;
6189
6190        memcpy(&hw->mac.ops, ei->mac_ops, sizeof(hw->mac.ops));
6191        memcpy(&hw->nvm.ops, ei->nvm_ops, sizeof(hw->nvm.ops));
6192        memcpy(&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops));
6193
6194        err = ei->get_variants(adapter);
6195        if (err)
6196                goto err_hw_init;
6197
6198        if ((adapter->flags & FLAG_IS_ICH) &&
6199            (adapter->flags & FLAG_READ_ONLY_NVM))
6200                e1000e_write_protect_nvm_ich8lan(&adapter->hw);
6201
6202        hw->mac.ops.get_bus_info(&adapter->hw);
6203
6204        adapter->hw.phy.autoneg_wait_to_complete = 0;
6205
6206        /* Copper options */
6207        if (adapter->hw.phy.media_type == e1000_media_type_copper) {
6208                adapter->hw.phy.mdix = AUTO_ALL_MODES;
6209                adapter->hw.phy.disable_polarity_correction = 0;
6210                adapter->hw.phy.ms_type = e1000_ms_hw_default;
6211        }
6212
6213        if (hw->phy.ops.check_reset_block && hw->phy.ops.check_reset_block(hw))
6214                dev_info(&pdev->dev,
6215                         "PHY reset is blocked due to SOL/IDER session.\n");
6216
6217        /* Set initial default active device features */
6218        netdev->features = (NETIF_F_SG |
6219                            NETIF_F_HW_VLAN_RX |
6220                            NETIF_F_HW_VLAN_TX |
6221                            NETIF_F_TSO |
6222                            NETIF_F_TSO6 |
6223                            NETIF_F_RXHASH |
6224                            NETIF_F_RXCSUM |
6225                            NETIF_F_HW_CSUM);
6226
6227        /* Set user-changeable features (subset of all device features) */
6228        netdev->hw_features = netdev->features;
6229        netdev->hw_features |= NETIF_F_RXFCS;
6230        netdev->priv_flags |= IFF_SUPP_NOFCS;
6231        netdev->hw_features |= NETIF_F_RXALL;
6232
6233        if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER)
6234                netdev->features |= NETIF_F_HW_VLAN_FILTER;
6235
6236        netdev->vlan_features |= (NETIF_F_SG |
6237                                  NETIF_F_TSO |
6238                                  NETIF_F_TSO6 |
6239                                  NETIF_F_HW_CSUM);
6240
6241        netdev->priv_flags |= IFF_UNICAST_FLT;
6242
6243        if (pci_using_dac) {
6244                netdev->features |= NETIF_F_HIGHDMA;
6245                netdev->vlan_features |= NETIF_F_HIGHDMA;
6246        }
6247
6248        if (e1000e_enable_mng_pass_thru(&adapter->hw))
6249                adapter->flags |= FLAG_MNG_PT_ENABLED;
6250
6251        /* before reading the NVM, reset the controller to
6252         * put the device in a known good starting state
6253         */
6254        adapter->hw.mac.ops.reset_hw(&adapter->hw);
6255
6256        /* systems with ASPM and others may see the checksum fail on the first
6257         * attempt. Let's give it a few tries
6258         */
6259        for (i = 0;; i++) {
6260                if (e1000_validate_nvm_checksum(&adapter->hw) >= 0)
6261                        break;
6262                if (i == 2) {
6263                        dev_err(&pdev->dev, "The NVM Checksum Is Not Valid\n");
6264                        err = -EIO;
6265                        goto err_eeprom;
6266                }
6267        }
6268
6269        e1000_eeprom_checks(adapter);
6270
6271        /* copy the MAC address */
6272        if (e1000e_read_mac_addr(&adapter->hw))
6273                dev_err(&pdev->dev,
6274                        "NVM Read Error while reading MAC address\n");
6275
6276        memcpy(netdev->dev_addr, adapter->hw.mac.addr, netdev->addr_len);
6277        memcpy(netdev->perm_addr, adapter->hw.mac.addr, netdev->addr_len);
6278
6279        if (!is_valid_ether_addr(netdev->perm_addr)) {
6280                dev_err(&pdev->dev, "Invalid MAC Address: %pM\n",
6281                        netdev->perm_addr);
6282                err = -EIO;
6283                goto err_eeprom;
6284        }
6285
6286        init_timer(&adapter->watchdog_timer);
6287        adapter->watchdog_timer.function = e1000_watchdog;
6288        adapter->watchdog_timer.data = (unsigned long) adapter;
6289
6290        init_timer(&adapter->phy_info_timer);
6291        adapter->phy_info_timer.function = e1000_update_phy_info;
6292        adapter->phy_info_timer.data = (unsigned long) adapter;
6293
6294        INIT_WORK(&adapter->reset_task, e1000_reset_task);
6295        INIT_WORK(&adapter->watchdog_task, e1000_watchdog_task);
6296        INIT_WORK(&adapter->downshift_task, e1000e_downshift_workaround);
6297        INIT_WORK(&adapter->update_phy_task, e1000e_update_phy_task);
6298        INIT_WORK(&adapter->print_hang_task, e1000_print_hw_hang);
6299
6300        /* Initialize link parameters. User can change them with ethtool */
6301        adapter->hw.mac.autoneg = 1;
6302        adapter->fc_autoneg = true;
6303        adapter->hw.fc.requested_mode = e1000_fc_default;
6304        adapter->hw.fc.current_mode = e1000_fc_default;
6305        adapter->hw.phy.autoneg_advertised = 0x2f;
6306
6307        /* ring size defaults */
6308        adapter->rx_ring->count = E1000_DEFAULT_RXD;
6309        adapter->tx_ring->count = E1000_DEFAULT_TXD;
6310
6311        /* Initial Wake on LAN setting - If APM wake is enabled in
6312         * the EEPROM, enable the ACPI Magic Packet filter
6313         */
6314        if (adapter->flags & FLAG_APME_IN_WUC) {
6315                /* APME bit in EEPROM is mapped to WUC.APME */
6316                eeprom_data = er32(WUC);
6317                eeprom_apme_mask = E1000_WUC_APME;
6318                if ((hw->mac.type > e1000_ich10lan) &&
6319                    (eeprom_data & E1000_WUC_PHY_WAKE))
6320                        adapter->flags2 |= FLAG2_HAS_PHY_WAKEUP;
6321        } else if (adapter->flags & FLAG_APME_IN_CTRL3) {
6322                if (adapter->flags & FLAG_APME_CHECK_PORT_B &&
6323                    (adapter->hw.bus.func == 1))
6324                        e1000_read_nvm(&adapter->hw, NVM_INIT_CONTROL3_PORT_B,
6325                                       1, &eeprom_data);
6326                else
6327                        e1000_read_nvm(&adapter->hw, NVM_INIT_CONTROL3_PORT_A,
6328                                       1, &eeprom_data);
6329        }
6330
6331        /* fetch WoL from EEPROM */
6332        if (eeprom_data & eeprom_apme_mask)
6333                adapter->eeprom_wol |= E1000_WUFC_MAG;
6334
6335        /* now that we have the eeprom settings, apply the special cases
6336         * where the eeprom may be wrong or the board simply won't support
6337         * wake on lan on a particular port
6338         */
6339        if (!(adapter->flags & FLAG_HAS_WOL))
6340                adapter->eeprom_wol = 0;
6341
6342        /* initialize the wol settings based on the eeprom settings */
6343        adapter->wol = adapter->eeprom_wol;
6344        device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
6345
6346        /* save off EEPROM version number */
6347        e1000_read_nvm(&adapter->hw, 5, 1, &adapter->eeprom_vers);
6348
6349        /* reset the hardware with the new settings */
6350        e1000e_reset(adapter);
6351
6352        /* If the controller has AMT, do not set DRV_LOAD until the interface
6353         * is up.  For all other cases, let the f/w know that the h/w is now
6354         * under the control of the driver.
6355         */
6356        if (!(adapter->flags & FLAG_HAS_AMT))
6357                e1000e_get_hw_control(adapter);
6358
6359        strlcpy(netdev->name, "eth%d", sizeof(netdev->name));
6360        err = register_netdev(netdev);
6361        if (err)
6362                goto err_register;
6363
6364        /* carrier off reporting is important to ethtool even BEFORE open */
6365        netif_carrier_off(netdev);
6366
6367        e1000_print_device_info(adapter);
6368
6369        if (pci_dev_run_wake(pdev))
6370                pm_runtime_put_noidle(&pdev->dev);
6371
6372        return 0;
6373
6374err_register:
6375        if (!(adapter->flags & FLAG_HAS_AMT))
6376                e1000e_release_hw_control(adapter);
6377err_eeprom:
6378        if (hw->phy.ops.check_reset_block && !hw->phy.ops.check_reset_block(hw))
6379                e1000_phy_hw_reset(&adapter->hw);
6380err_hw_init:
6381        kfree(adapter->tx_ring);
6382        kfree(adapter->rx_ring);
6383err_sw_init:
6384        if (adapter->hw.flash_address)
6385                iounmap(adapter->hw.flash_address);
6386        e1000e_reset_interrupt_capability(adapter);
6387err_flashmap:
6388        iounmap(adapter->hw.hw_addr);
6389err_ioremap:
6390        free_netdev(netdev);
6391err_alloc_etherdev:
6392        pci_release_selected_regions(pdev,
6393                                     pci_select_bars(pdev, IORESOURCE_MEM));
6394err_pci_reg:
6395err_dma:
6396        pci_disable_device(pdev);
6397        return err;
6398}
6399
6400/**
6401 * e1000_remove - Device Removal Routine
6402 * @pdev: PCI device information struct
6403 *
6404 * e1000_remove is called by the PCI subsystem to alert the driver
6405 * that it should release a PCI device.  The could be caused by a
6406 * Hot-Plug event, or because the driver is going to be removed from
6407 * memory.
6408 **/
6409static void e1000_remove(struct pci_dev *pdev)
6410{
6411        struct net_device *netdev = pci_get_drvdata(pdev);
6412        struct e1000_adapter *adapter = netdev_priv(netdev);
6413        bool down = test_bit(__E1000_DOWN, &adapter->state);
6414
6415        /* The timers may be rescheduled, so explicitly disable them
6416         * from being rescheduled.
6417         */
6418        if (!down)
6419                set_bit(__E1000_DOWN, &adapter->state);
6420        del_timer_sync(&adapter->watchdog_timer);
6421        del_timer_sync(&adapter->phy_info_timer);
6422
6423        cancel_work_sync(&adapter->reset_task);
6424        cancel_work_sync(&adapter->watchdog_task);
6425        cancel_work_sync(&adapter->downshift_task);
6426        cancel_work_sync(&adapter->update_phy_task);
6427        cancel_work_sync(&adapter->print_hang_task);
6428
6429        if (!(netdev->flags & IFF_UP))
6430                e1000_power_down_phy(adapter);
6431
6432        /* Don't lie to e1000_close() down the road. */
6433        if (!down)
6434                clear_bit(__E1000_DOWN, &adapter->state);
6435        unregister_netdev(netdev);
6436
6437        if (pci_dev_run_wake(pdev))
6438                pm_runtime_get_noresume(&pdev->dev);
6439
6440        /* Release control of h/w to f/w.  If f/w is AMT enabled, this
6441         * would have already happened in close and is redundant.
6442         */
6443        e1000e_release_hw_control(adapter);
6444
6445        e1000e_reset_interrupt_capability(adapter);
6446        kfree(adapter->tx_ring);
6447        kfree(adapter->rx_ring);
6448
6449        iounmap(adapter->hw.hw_addr);
6450        if (adapter->hw.flash_address)
6451                iounmap(adapter->hw.flash_address);
6452        pci_release_selected_regions(pdev,
6453                                     pci_select_bars(pdev, IORESOURCE_MEM));
6454
6455        free_netdev(netdev);
6456
6457        /* AER disable */
6458        pci_disable_pcie_error_reporting(pdev);
6459
6460        pci_disable_device(pdev);
6461}
6462
6463/* PCI Error Recovery (ERS) */
6464static const struct pci_error_handlers e1000_err_handler = {
6465        .error_detected = e1000_io_error_detected,
6466        .slot_reset = e1000_io_slot_reset,
6467        .resume = e1000_io_resume,
6468};
6469
6470static DEFINE_PCI_DEVICE_TABLE(e1000_pci_tbl) = {
6471        { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_COPPER), board_82571 },
6472        { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_FIBER), board_82571 },
6473        { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_QUAD_COPPER), board_82571 },
6474        { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_QUAD_COPPER_LP), board_82571 },
6475        { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_QUAD_FIBER), board_82571 },
6476        { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_SERDES), board_82571 },
6477        { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_SERDES_DUAL), board_82571 },
6478        { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_SERDES_QUAD), board_82571 },
6479        { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571PT_QUAD_COPPER), board_82571 },
6480
6481        { PCI_VDEVICE(INTEL, E1000_DEV_ID_82572EI), board_82572 },
6482        { PCI_VDEVICE(INTEL, E1000_DEV_ID_82572EI_COPPER), board_82572 },
6483        { PCI_VDEVICE(INTEL, E1000_DEV_ID_82572EI_FIBER), board_82572 },
6484        { PCI_VDEVICE(INTEL, E1000_DEV_ID_82572EI_SERDES), board_82572 },
6485
6486        { PCI_VDEVICE(INTEL, E1000_DEV_ID_82573E), board_82573 },
6487        { PCI_VDEVICE(INTEL, E1000_DEV_ID_82573E_IAMT), board_82573 },
6488        { PCI_VDEVICE(INTEL, E1000_DEV_ID_82573L), board_82573 },
6489
6490        { PCI_VDEVICE(INTEL, E1000_DEV_ID_82574L), board_82574 },
6491        { PCI_VDEVICE(INTEL, E1000_DEV_ID_82574LA), board_82574 },
6492        { PCI_VDEVICE(INTEL, E1000_DEV_ID_82583V), board_82583 },
6493
6494        { PCI_VDEVICE(INTEL, E1000_DEV_ID_80003ES2LAN_COPPER_DPT),
6495          board_80003es2lan },
6496        { PCI_VDEVICE(INTEL, E1000_DEV_ID_80003ES2LAN_COPPER_SPT),
6497          board_80003es2lan },
6498        { PCI_VDEVICE(INTEL, E1000_DEV_ID_80003ES2LAN_SERDES_DPT),
6499          board_80003es2lan },
6500        { PCI_VDEVICE(INTEL, E1000_DEV_ID_80003ES2LAN_SERDES_SPT),
6501          board_80003es2lan },
6502
6503        { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IFE), board_ich8lan },
6504        { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IFE_G), board_ich8lan },
6505        { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IFE_GT), board_ich8lan },
6506        { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_AMT), board_ich8lan },
6507        { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_C), board_ich8lan },
6508        { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_M), board_ich8lan },
6509        { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_M_AMT), board_ich8lan },
6510        { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_82567V_3), board_ich8lan },
6511
6512        { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IFE), board_ich9lan },
6513        { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IFE_G), board_ich9lan },
6514        { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IFE_GT), board_ich9lan },
6515        { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_AMT), board_ich9lan },
6516        { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_C), board_ich9lan },
6517        { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_BM), board_ich9lan },
6518        { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_M), board_ich9lan },
6519        { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_M_AMT), board_ich9lan },
6520        { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_M_V), board_ich9lan },
6521
6522        { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_R_BM_LM), board_ich9lan },
6523        { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_R_BM_LF), board_ich9lan },
6524        { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_R_BM_V), board_ich9lan },
6525
6526        { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_D_BM_LM), board_ich10lan },
6527        { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_D_BM_LF), board_ich10lan },
6528        { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_D_BM_V), board_ich10lan },
6529
6530        { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_M_HV_LM), board_pchlan },
6531        { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_M_HV_LC), board_pchlan },
6532        { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_D_HV_DM), board_pchlan },
6533        { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_D_HV_DC), board_pchlan },
6534
6535        { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH2_LV_LM), board_pch2lan },
6536        { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH2_LV_V), board_pch2lan },
6537
6538        { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LPT_I217_LM), board_pch_lpt },
6539        { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LPT_I217_V), board_pch_lpt },
6540        { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LPTLP_I218_LM), board_pch_lpt },
6541        { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LPTLP_I218_V), board_pch_lpt },
6542
6543        { 0, 0, 0, 0, 0, 0, 0 } /* terminate list */
6544};
6545MODULE_DEVICE_TABLE(pci, e1000_pci_tbl);
6546
6547#ifdef CONFIG_PM
6548static const struct dev_pm_ops e1000_pm_ops = {
6549        SET_SYSTEM_SLEEP_PM_OPS(e1000_suspend, e1000_resume)
6550        SET_RUNTIME_PM_OPS(e1000_runtime_suspend,
6551                                e1000_runtime_resume, e1000_idle)
6552};
6553#endif
6554
6555/* PCI Device API Driver */
6556static struct pci_driver e1000_driver = {
6557        .name     = e1000e_driver_name,
6558        .id_table = e1000_pci_tbl,
6559        .probe    = e1000_probe,
6560        .remove   = e1000_remove,
6561#ifdef CONFIG_PM
6562        .driver   = {
6563                .pm = &e1000_pm_ops,
6564        },
6565#endif
6566        .shutdown = e1000_shutdown,
6567        .err_handler = &e1000_err_handler
6568};
6569
6570/**
6571 * e1000_init_module - Driver Registration Routine
6572 *
6573 * e1000_init_module is the first routine called when the driver is
6574 * loaded. All it does is register with the PCI subsystem.
6575 **/
6576static int __init e1000_init_module(void)
6577{
6578        int ret;
6579        pr_info("Intel(R) PRO/1000 Network Driver - %s\n",
6580                e1000e_driver_version);
6581        pr_info("Copyright(c) 1999 - 2012 Intel Corporation.\n");
6582        ret = pci_register_driver(&e1000_driver);
6583
6584        return ret;
6585}
6586module_init(e1000_init_module);
6587
6588/**
6589 * e1000_exit_module - Driver Exit Cleanup Routine
6590 *
6591 * e1000_exit_module is called just before the driver is removed
6592 * from memory.
6593 **/
6594static void __exit e1000_exit_module(void)
6595{
6596        pci_unregister_driver(&e1000_driver);
6597}
6598module_exit(e1000_exit_module);
6599
6600
6601MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
6602MODULE_DESCRIPTION("Intel(R) PRO/1000 Network Driver");
6603MODULE_LICENSE("GPL");
6604MODULE_VERSION(DRV_VERSION);
6605
6606/* netdev.c */
6607