linux/drivers/net/ethernet/intel/e1000e/netdev.c
<<
>>
Prefs
   1/*******************************************************************************
   2
   3  Intel PRO/1000 Linux driver
   4  Copyright(c) 1999 - 2012 Intel Corporation.
   5
   6  This program is free software; you can redistribute it and/or modify it
   7  under the terms and conditions of the GNU General Public License,
   8  version 2, as published by the Free Software Foundation.
   9
  10  This program is distributed in the hope it will be useful, but WITHOUT
  11  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  13  more details.
  14
  15  You should have received a copy of the GNU General Public License along with
  16  this program; if not, write to the Free Software Foundation, Inc.,
  17  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
  18
  19  The full GNU General Public License is included in this distribution in
  20  the file called "COPYING".
  21
  22  Contact Information:
  23  Linux NICS <linux.nics@intel.com>
  24  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
  25  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
  26
  27*******************************************************************************/
  28
  29#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  30
  31#include <linux/module.h>
  32#include <linux/types.h>
  33#include <linux/init.h>
  34#include <linux/pci.h>
  35#include <linux/vmalloc.h>
  36#include <linux/pagemap.h>
  37#include <linux/delay.h>
  38#include <linux/netdevice.h>
  39#include <linux/interrupt.h>
  40#include <linux/tcp.h>
  41#include <linux/ipv6.h>
  42#include <linux/slab.h>
  43#include <net/checksum.h>
  44#include <net/ip6_checksum.h>
  45#include <linux/mii.h>
  46#include <linux/ethtool.h>
  47#include <linux/if_vlan.h>
  48#include <linux/cpu.h>
  49#include <linux/smp.h>
  50#include <linux/pm_qos.h>
  51#include <linux/pm_runtime.h>
  52#include <linux/aer.h>
  53#include <linux/prefetch.h>
  54
  55#include "e1000.h"
  56
  57#define DRV_EXTRAVERSION "-k"
  58
  59#define DRV_VERSION "2.1.4" DRV_EXTRAVERSION
  60char e1000e_driver_name[] = "e1000e";
  61const char e1000e_driver_version[] = DRV_VERSION;
  62
  63#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)
  64static int debug = -1;
  65module_param(debug, int, 0);
  66MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
  67
  68static void e1000e_disable_aspm(struct pci_dev *pdev, u16 state);
  69
  70static const struct e1000_info *e1000_info_tbl[] = {
  71        [board_82571]           = &e1000_82571_info,
  72        [board_82572]           = &e1000_82572_info,
  73        [board_82573]           = &e1000_82573_info,
  74        [board_82574]           = &e1000_82574_info,
  75        [board_82583]           = &e1000_82583_info,
  76        [board_80003es2lan]     = &e1000_es2_info,
  77        [board_ich8lan]         = &e1000_ich8_info,
  78        [board_ich9lan]         = &e1000_ich9_info,
  79        [board_ich10lan]        = &e1000_ich10_info,
  80        [board_pchlan]          = &e1000_pch_info,
  81        [board_pch2lan]         = &e1000_pch2_info,
  82        [board_pch_lpt]         = &e1000_pch_lpt_info,
  83};
  84
  85struct e1000_reg_info {
  86        u32 ofs;
  87        char *name;
  88};
  89
  90#define E1000_RDFH      0x02410 /* Rx Data FIFO Head - RW */
  91#define E1000_RDFT      0x02418 /* Rx Data FIFO Tail - RW */
  92#define E1000_RDFHS     0x02420 /* Rx Data FIFO Head Saved - RW */
  93#define E1000_RDFTS     0x02428 /* Rx Data FIFO Tail Saved - RW */
  94#define E1000_RDFPC     0x02430 /* Rx Data FIFO Packet Count - RW */
  95
  96#define E1000_TDFH      0x03410 /* Tx Data FIFO Head - RW */
  97#define E1000_TDFT      0x03418 /* Tx Data FIFO Tail - RW */
  98#define E1000_TDFHS     0x03420 /* Tx Data FIFO Head Saved - RW */
  99#define E1000_TDFTS     0x03428 /* Tx Data FIFO Tail Saved - RW */
 100#define E1000_TDFPC     0x03430 /* Tx Data FIFO Packet Count - RW */
 101
 102static const struct e1000_reg_info e1000_reg_info_tbl[] = {
 103
 104        /* General Registers */
 105        {E1000_CTRL, "CTRL"},
 106        {E1000_STATUS, "STATUS"},
 107        {E1000_CTRL_EXT, "CTRL_EXT"},
 108
 109        /* Interrupt Registers */
 110        {E1000_ICR, "ICR"},
 111
 112        /* Rx Registers */
 113        {E1000_RCTL, "RCTL"},
 114        {E1000_RDLEN(0), "RDLEN"},
 115        {E1000_RDH(0), "RDH"},
 116        {E1000_RDT(0), "RDT"},
 117        {E1000_RDTR, "RDTR"},
 118        {E1000_RXDCTL(0), "RXDCTL"},
 119        {E1000_ERT, "ERT"},
 120        {E1000_RDBAL(0), "RDBAL"},
 121        {E1000_RDBAH(0), "RDBAH"},
 122        {E1000_RDFH, "RDFH"},
 123        {E1000_RDFT, "RDFT"},
 124        {E1000_RDFHS, "RDFHS"},
 125        {E1000_RDFTS, "RDFTS"},
 126        {E1000_RDFPC, "RDFPC"},
 127
 128        /* Tx Registers */
 129        {E1000_TCTL, "TCTL"},
 130        {E1000_TDBAL(0), "TDBAL"},
 131        {E1000_TDBAH(0), "TDBAH"},
 132        {E1000_TDLEN(0), "TDLEN"},
 133        {E1000_TDH(0), "TDH"},
 134        {E1000_TDT(0), "TDT"},
 135        {E1000_TIDV, "TIDV"},
 136        {E1000_TXDCTL(0), "TXDCTL"},
 137        {E1000_TADV, "TADV"},
 138        {E1000_TARC(0), "TARC"},
 139        {E1000_TDFH, "TDFH"},
 140        {E1000_TDFT, "TDFT"},
 141        {E1000_TDFHS, "TDFHS"},
 142        {E1000_TDFTS, "TDFTS"},
 143        {E1000_TDFPC, "TDFPC"},
 144
 145        /* List Terminator */
 146        {0, NULL}
 147};
 148
 149/*
 150 * e1000_regdump - register printout routine
 151 */
 152static void e1000_regdump(struct e1000_hw *hw, struct e1000_reg_info *reginfo)
 153{
 154        int n = 0;
 155        char rname[16];
 156        u32 regs[8];
 157
 158        switch (reginfo->ofs) {
 159        case E1000_RXDCTL(0):
 160                for (n = 0; n < 2; n++)
 161                        regs[n] = __er32(hw, E1000_RXDCTL(n));
 162                break;
 163        case E1000_TXDCTL(0):
 164                for (n = 0; n < 2; n++)
 165                        regs[n] = __er32(hw, E1000_TXDCTL(n));
 166                break;
 167        case E1000_TARC(0):
 168                for (n = 0; n < 2; n++)
 169                        regs[n] = __er32(hw, E1000_TARC(n));
 170                break;
 171        default:
 172                pr_info("%-15s %08x\n",
 173                        reginfo->name, __er32(hw, reginfo->ofs));
 174                return;
 175        }
 176
 177        snprintf(rname, 16, "%s%s", reginfo->name, "[0-1]");
 178        pr_info("%-15s %08x %08x\n", rname, regs[0], regs[1]);
 179}
 180
 181static void e1000e_dump_ps_pages(struct e1000_adapter *adapter,
 182                                 struct e1000_buffer *bi)
 183{
 184        int i;
 185        struct e1000_ps_page *ps_page;
 186
 187        for (i = 0; i < adapter->rx_ps_pages; i++) {
 188                ps_page = &bi->ps_pages[i];
 189
 190                if (ps_page->page) {
 191                        pr_info("packet dump for ps_page %d:\n", i);
 192                        print_hex_dump(KERN_INFO, "", DUMP_PREFIX_ADDRESS,
 193                                       16, 1, page_address(ps_page->page),
 194                                       PAGE_SIZE, true);
 195                }
 196        }
 197}
 198
 199/*
 200 * e1000e_dump - Print registers, Tx-ring and Rx-ring
 201 */
 202static void e1000e_dump(struct e1000_adapter *adapter)
 203{
 204        struct net_device *netdev = adapter->netdev;
 205        struct e1000_hw *hw = &adapter->hw;
 206        struct e1000_reg_info *reginfo;
 207        struct e1000_ring *tx_ring = adapter->tx_ring;
 208        struct e1000_tx_desc *tx_desc;
 209        struct my_u0 {
 210                __le64 a;
 211                __le64 b;
 212        } *u0;
 213        struct e1000_buffer *buffer_info;
 214        struct e1000_ring *rx_ring = adapter->rx_ring;
 215        union e1000_rx_desc_packet_split *rx_desc_ps;
 216        union e1000_rx_desc_extended *rx_desc;
 217        struct my_u1 {
 218                __le64 a;
 219                __le64 b;
 220                __le64 c;
 221                __le64 d;
 222        } *u1;
 223        u32 staterr;
 224        int i = 0;
 225
 226        if (!netif_msg_hw(adapter))
 227                return;
 228
 229        /* Print netdevice Info */
 230        if (netdev) {
 231                dev_info(&adapter->pdev->dev, "Net device Info\n");
 232                pr_info("Device Name     state            trans_start      last_rx\n");
 233                pr_info("%-15s %016lX %016lX %016lX\n",
 234                        netdev->name, netdev->state, netdev->trans_start,
 235                        netdev->last_rx);
 236        }
 237
 238        /* Print Registers */
 239        dev_info(&adapter->pdev->dev, "Register Dump\n");
 240        pr_info(" Register Name   Value\n");
 241        for (reginfo = (struct e1000_reg_info *)e1000_reg_info_tbl;
 242             reginfo->name; reginfo++) {
 243                e1000_regdump(hw, reginfo);
 244        }
 245
 246        /* Print Tx Ring Summary */
 247        if (!netdev || !netif_running(netdev))
 248                return;
 249
 250        dev_info(&adapter->pdev->dev, "Tx Ring Summary\n");
 251        pr_info("Queue [NTU] [NTC] [bi(ntc)->dma  ] leng ntw timestamp\n");
 252        buffer_info = &tx_ring->buffer_info[tx_ring->next_to_clean];
 253        pr_info(" %5d %5X %5X %016llX %04X %3X %016llX\n",
 254                0, tx_ring->next_to_use, tx_ring->next_to_clean,
 255                (unsigned long long)buffer_info->dma,
 256                buffer_info->length,
 257                buffer_info->next_to_watch,
 258                (unsigned long long)buffer_info->time_stamp);
 259
 260        /* Print Tx Ring */
 261        if (!netif_msg_tx_done(adapter))
 262                goto rx_ring_summary;
 263
 264        dev_info(&adapter->pdev->dev, "Tx Ring Dump\n");
 265
 266        /* Transmit Descriptor Formats - DEXT[29] is 0 (Legacy) or 1 (Extended)
 267         *
 268         * Legacy Transmit Descriptor
 269         *   +--------------------------------------------------------------+
 270         * 0 |         Buffer Address [63:0] (Reserved on Write Back)       |
 271         *   +--------------------------------------------------------------+
 272         * 8 | Special  |    CSS     | Status |  CMD    |  CSO   |  Length  |
 273         *   +--------------------------------------------------------------+
 274         *   63       48 47        36 35    32 31     24 23    16 15        0
 275         *
 276         * Extended Context Descriptor (DTYP=0x0) for TSO or checksum offload
 277         *   63      48 47    40 39       32 31             16 15    8 7      0
 278         *   +----------------------------------------------------------------+
 279         * 0 |  TUCSE  | TUCS0  |   TUCSS   |     IPCSE       | IPCS0 | IPCSS |
 280         *   +----------------------------------------------------------------+
 281         * 8 |   MSS   | HDRLEN | RSV | STA | TUCMD | DTYP |      PAYLEN      |
 282         *   +----------------------------------------------------------------+
 283         *   63      48 47    40 39 36 35 32 31   24 23  20 19                0
 284         *
 285         * Extended Data Descriptor (DTYP=0x1)
 286         *   +----------------------------------------------------------------+
 287         * 0 |                     Buffer Address [63:0]                      |
 288         *   +----------------------------------------------------------------+
 289         * 8 | VLAN tag |  POPTS  | Rsvd | Status | Command | DTYP |  DTALEN  |
 290         *   +----------------------------------------------------------------+
 291         *   63       48 47     40 39  36 35    32 31     24 23  20 19        0
 292         */
 293        pr_info("Tl[desc]     [address 63:0  ] [SpeCssSCmCsLen] [bi->dma       ] leng  ntw timestamp        bi->skb <-- Legacy format\n");
 294        pr_info("Tc[desc]     [Ce CoCsIpceCoS] [MssHlRSCm0Plen] [bi->dma       ] leng  ntw timestamp        bi->skb <-- Ext Context format\n");
 295        pr_info("Td[desc]     [address 63:0  ] [VlaPoRSCm1Dlen] [bi->dma       ] leng  ntw timestamp        bi->skb <-- Ext Data format\n");
 296        for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {
 297                const char *next_desc;
 298                tx_desc = E1000_TX_DESC(*tx_ring, i);
 299                buffer_info = &tx_ring->buffer_info[i];
 300                u0 = (struct my_u0 *)tx_desc;
 301                if (i == tx_ring->next_to_use && i == tx_ring->next_to_clean)
 302                        next_desc = " NTC/U";
 303                else if (i == tx_ring->next_to_use)
 304                        next_desc = " NTU";
 305                else if (i == tx_ring->next_to_clean)
 306                        next_desc = " NTC";
 307                else
 308                        next_desc = "";
 309                pr_info("T%c[0x%03X]    %016llX %016llX %016llX %04X  %3X %016llX %p%s\n",
 310                        (!(le64_to_cpu(u0->b) & (1 << 29)) ? 'l' :
 311                         ((le64_to_cpu(u0->b) & (1 << 20)) ? 'd' : 'c')),
 312                        i,
 313                        (unsigned long long)le64_to_cpu(u0->a),
 314                        (unsigned long long)le64_to_cpu(u0->b),
 315                        (unsigned long long)buffer_info->dma,
 316                        buffer_info->length, buffer_info->next_to_watch,
 317                        (unsigned long long)buffer_info->time_stamp,
 318                        buffer_info->skb, next_desc);
 319
 320                if (netif_msg_pktdata(adapter) && buffer_info->skb)
 321                        print_hex_dump(KERN_INFO, "", DUMP_PREFIX_ADDRESS,
 322                                       16, 1, buffer_info->skb->data,
 323                                       buffer_info->skb->len, true);
 324        }
 325
 326        /* Print Rx Ring Summary */
 327rx_ring_summary:
 328        dev_info(&adapter->pdev->dev, "Rx Ring Summary\n");
 329        pr_info("Queue [NTU] [NTC]\n");
 330        pr_info(" %5d %5X %5X\n",
 331                0, rx_ring->next_to_use, rx_ring->next_to_clean);
 332
 333        /* Print Rx Ring */
 334        if (!netif_msg_rx_status(adapter))
 335                return;
 336
 337        dev_info(&adapter->pdev->dev, "Rx Ring Dump\n");
 338        switch (adapter->rx_ps_pages) {
 339        case 1:
 340        case 2:
 341        case 3:
 342                /* [Extended] Packet Split Receive Descriptor Format
 343                 *
 344                 *    +-----------------------------------------------------+
 345                 *  0 |                Buffer Address 0 [63:0]              |
 346                 *    +-----------------------------------------------------+
 347                 *  8 |                Buffer Address 1 [63:0]              |
 348                 *    +-----------------------------------------------------+
 349                 * 16 |                Buffer Address 2 [63:0]              |
 350                 *    +-----------------------------------------------------+
 351                 * 24 |                Buffer Address 3 [63:0]              |
 352                 *    +-----------------------------------------------------+
 353                 */
 354                pr_info("R  [desc]      [buffer 0 63:0 ] [buffer 1 63:0 ] [buffer 2 63:0 ] [buffer 3 63:0 ] [bi->dma       ] [bi->skb] <-- Ext Pkt Split format\n");
 355                /* [Extended] Receive Descriptor (Write-Back) Format
 356                 *
 357                 *   63       48 47    32 31     13 12    8 7    4 3        0
 358                 *   +------------------------------------------------------+
 359                 * 0 | Packet   | IP     |  Rsvd   | MRQ   | Rsvd | MRQ RSS |
 360                 *   | Checksum | Ident  |         | Queue |      |  Type   |
 361                 *   +------------------------------------------------------+
 362                 * 8 | VLAN Tag | Length | Extended Error | Extended Status |
 363                 *   +------------------------------------------------------+
 364                 *   63       48 47    32 31            20 19               0
 365                 */
 366                pr_info("RWB[desc]      [ck ipid mrqhsh] [vl   l0 ee  es] [ l3  l2  l1 hs] [reserved      ] ---------------- [bi->skb] <-- Ext Rx Write-Back format\n");
 367                for (i = 0; i < rx_ring->count; i++) {
 368                        const char *next_desc;
 369                        buffer_info = &rx_ring->buffer_info[i];
 370                        rx_desc_ps = E1000_RX_DESC_PS(*rx_ring, i);
 371                        u1 = (struct my_u1 *)rx_desc_ps;
 372                        staterr =
 373                            le32_to_cpu(rx_desc_ps->wb.middle.status_error);
 374
 375                        if (i == rx_ring->next_to_use)
 376                                next_desc = " NTU";
 377                        else if (i == rx_ring->next_to_clean)
 378                                next_desc = " NTC";
 379                        else
 380                                next_desc = "";
 381
 382                        if (staterr & E1000_RXD_STAT_DD) {
 383                                /* Descriptor Done */
 384                                pr_info("%s[0x%03X]     %016llX %016llX %016llX %016llX ---------------- %p%s\n",
 385                                        "RWB", i,
 386                                        (unsigned long long)le64_to_cpu(u1->a),
 387                                        (unsigned long long)le64_to_cpu(u1->b),
 388                                        (unsigned long long)le64_to_cpu(u1->c),
 389                                        (unsigned long long)le64_to_cpu(u1->d),
 390                                        buffer_info->skb, next_desc);
 391                        } else {
 392                                pr_info("%s[0x%03X]     %016llX %016llX %016llX %016llX %016llX %p%s\n",
 393                                        "R  ", i,
 394                                        (unsigned long long)le64_to_cpu(u1->a),
 395                                        (unsigned long long)le64_to_cpu(u1->b),
 396                                        (unsigned long long)le64_to_cpu(u1->c),
 397                                        (unsigned long long)le64_to_cpu(u1->d),
 398                                        (unsigned long long)buffer_info->dma,
 399                                        buffer_info->skb, next_desc);
 400
 401                                if (netif_msg_pktdata(adapter))
 402                                        e1000e_dump_ps_pages(adapter,
 403                                                             buffer_info);
 404                        }
 405                }
 406                break;
 407        default:
 408        case 0:
 409                /* Extended Receive Descriptor (Read) Format
 410                 *
 411                 *   +-----------------------------------------------------+
 412                 * 0 |                Buffer Address [63:0]                |
 413                 *   +-----------------------------------------------------+
 414                 * 8 |                      Reserved                       |
 415                 *   +-----------------------------------------------------+
 416                 */
 417                pr_info("R  [desc]      [buf addr 63:0 ] [reserved 63:0 ] [bi->dma       ] [bi->skb] <-- Ext (Read) format\n");
 418                /* Extended Receive Descriptor (Write-Back) Format
 419                 *
 420                 *   63       48 47    32 31    24 23            4 3        0
 421                 *   +------------------------------------------------------+
 422                 *   |     RSS Hash      |        |               |         |
 423                 * 0 +-------------------+  Rsvd  |   Reserved    | MRQ RSS |
 424                 *   | Packet   | IP     |        |               |  Type   |
 425                 *   | Checksum | Ident  |        |               |         |
 426                 *   +------------------------------------------------------+
 427                 * 8 | VLAN Tag | Length | Extended Error | Extended Status |
 428                 *   +------------------------------------------------------+
 429                 *   63       48 47    32 31            20 19               0
 430                 */
 431                pr_info("RWB[desc]      [cs ipid    mrq] [vt   ln xe  xs] [bi->skb] <-- Ext (Write-Back) format\n");
 432
 433                for (i = 0; i < rx_ring->count; i++) {
 434                        const char *next_desc;
 435
 436                        buffer_info = &rx_ring->buffer_info[i];
 437                        rx_desc = E1000_RX_DESC_EXT(*rx_ring, i);
 438                        u1 = (struct my_u1 *)rx_desc;
 439                        staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
 440
 441                        if (i == rx_ring->next_to_use)
 442                                next_desc = " NTU";
 443                        else if (i == rx_ring->next_to_clean)
 444                                next_desc = " NTC";
 445                        else
 446                                next_desc = "";
 447
 448                        if (staterr & E1000_RXD_STAT_DD) {
 449                                /* Descriptor Done */
 450                                pr_info("%s[0x%03X]     %016llX %016llX ---------------- %p%s\n",
 451                                        "RWB", i,
 452                                        (unsigned long long)le64_to_cpu(u1->a),
 453                                        (unsigned long long)le64_to_cpu(u1->b),
 454                                        buffer_info->skb, next_desc);
 455                        } else {
 456                                pr_info("%s[0x%03X]     %016llX %016llX %016llX %p%s\n",
 457                                        "R  ", i,
 458                                        (unsigned long long)le64_to_cpu(u1->a),
 459                                        (unsigned long long)le64_to_cpu(u1->b),
 460                                        (unsigned long long)buffer_info->dma,
 461                                        buffer_info->skb, next_desc);
 462
 463                                if (netif_msg_pktdata(adapter) &&
 464                                    buffer_info->skb)
 465                                        print_hex_dump(KERN_INFO, "",
 466                                                       DUMP_PREFIX_ADDRESS, 16,
 467                                                       1,
 468                                                       buffer_info->skb->data,
 469                                                       adapter->rx_buffer_len,
 470                                                       true);
 471                        }
 472                }
 473        }
 474}
 475
 476/**
 477 * e1000_desc_unused - calculate if we have unused descriptors
 478 **/
 479static int e1000_desc_unused(struct e1000_ring *ring)
 480{
 481        if (ring->next_to_clean > ring->next_to_use)
 482                return ring->next_to_clean - ring->next_to_use - 1;
 483
 484        return ring->count + ring->next_to_clean - ring->next_to_use - 1;
 485}
 486
 487/**
 488 * e1000_receive_skb - helper function to handle Rx indications
 489 * @adapter: board private structure
 490 * @status: descriptor status field as written by hardware
 491 * @vlan: descriptor vlan field as written by hardware (no le/be conversion)
 492 * @skb: pointer to sk_buff to be indicated to stack
 493 **/
 494static void e1000_receive_skb(struct e1000_adapter *adapter,
 495                              struct net_device *netdev, struct sk_buff *skb,
 496                              u8 status, __le16 vlan)
 497{
 498        u16 tag = le16_to_cpu(vlan);
 499        skb->protocol = eth_type_trans(skb, netdev);
 500
 501        if (status & E1000_RXD_STAT_VP)
 502                __vlan_hwaccel_put_tag(skb, tag);
 503
 504        napi_gro_receive(&adapter->napi, skb);
 505}
 506
 507/**
 508 * e1000_rx_checksum - Receive Checksum Offload
 509 * @adapter: board private structure
 510 * @status_err: receive descriptor status and error fields
 511 * @csum: receive descriptor csum field
 512 * @sk_buff: socket buffer with received data
 513 **/
 514static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err,
 515                              struct sk_buff *skb)
 516{
 517        u16 status = (u16)status_err;
 518        u8 errors = (u8)(status_err >> 24);
 519
 520        skb_checksum_none_assert(skb);
 521
 522        /* Rx checksum disabled */
 523        if (!(adapter->netdev->features & NETIF_F_RXCSUM))
 524                return;
 525
 526        /* Ignore Checksum bit is set */
 527        if (status & E1000_RXD_STAT_IXSM)
 528                return;
 529
 530        /* TCP/UDP checksum error bit or IP checksum error bit is set */
 531        if (errors & (E1000_RXD_ERR_TCPE | E1000_RXD_ERR_IPE)) {
 532                /* let the stack verify checksum errors */
 533                adapter->hw_csum_err++;
 534                return;
 535        }
 536
 537        /* TCP/UDP Checksum has not been calculated */
 538        if (!(status & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS)))
 539                return;
 540
 541        /* It must be a TCP or UDP packet with a valid checksum */
 542        skb->ip_summed = CHECKSUM_UNNECESSARY;
 543        adapter->hw_csum_good++;
 544}
 545
 546static void e1000e_update_rdt_wa(struct e1000_ring *rx_ring, unsigned int i)
 547{
 548        struct e1000_adapter *adapter = rx_ring->adapter;
 549        struct e1000_hw *hw = &adapter->hw;
 550        s32 ret_val = __ew32_prepare(hw);
 551
 552        writel(i, rx_ring->tail);
 553
 554        if (unlikely(!ret_val && (i != readl(rx_ring->tail)))) {
 555                u32 rctl = er32(RCTL);
 556                ew32(RCTL, rctl & ~E1000_RCTL_EN);
 557                e_err("ME firmware caused invalid RDT - resetting\n");
 558                schedule_work(&adapter->reset_task);
 559        }
 560}
 561
 562static void e1000e_update_tdt_wa(struct e1000_ring *tx_ring, unsigned int i)
 563{
 564        struct e1000_adapter *adapter = tx_ring->adapter;
 565        struct e1000_hw *hw = &adapter->hw;
 566        s32 ret_val = __ew32_prepare(hw);
 567
 568        writel(i, tx_ring->tail);
 569
 570        if (unlikely(!ret_val && (i != readl(tx_ring->tail)))) {
 571                u32 tctl = er32(TCTL);
 572                ew32(TCTL, tctl & ~E1000_TCTL_EN);
 573                e_err("ME firmware caused invalid TDT - resetting\n");
 574                schedule_work(&adapter->reset_task);
 575        }
 576}
 577
 578/**
 579 * e1000_alloc_rx_buffers - Replace used receive buffers
 580 * @rx_ring: Rx descriptor ring
 581 **/
 582static void e1000_alloc_rx_buffers(struct e1000_ring *rx_ring,
 583                                   int cleaned_count, gfp_t gfp)
 584{
 585        struct e1000_adapter *adapter = rx_ring->adapter;
 586        struct net_device *netdev = adapter->netdev;
 587        struct pci_dev *pdev = adapter->pdev;
 588        union e1000_rx_desc_extended *rx_desc;
 589        struct e1000_buffer *buffer_info;
 590        struct sk_buff *skb;
 591        unsigned int i;
 592        unsigned int bufsz = adapter->rx_buffer_len;
 593
 594        i = rx_ring->next_to_use;
 595        buffer_info = &rx_ring->buffer_info[i];
 596
 597        while (cleaned_count--) {
 598                skb = buffer_info->skb;
 599                if (skb) {
 600                        skb_trim(skb, 0);
 601                        goto map_skb;
 602                }
 603
 604                skb = __netdev_alloc_skb_ip_align(netdev, bufsz, gfp);
 605                if (!skb) {
 606                        /* Better luck next round */
 607                        adapter->alloc_rx_buff_failed++;
 608                        break;
 609                }
 610
 611                buffer_info->skb = skb;
 612map_skb:
 613                buffer_info->dma = dma_map_single(&pdev->dev, skb->data,
 614                                                  adapter->rx_buffer_len,
 615                                                  DMA_FROM_DEVICE);
 616                if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
 617                        dev_err(&pdev->dev, "Rx DMA map failed\n");
 618                        adapter->rx_dma_failed++;
 619                        break;
 620                }
 621
 622                rx_desc = E1000_RX_DESC_EXT(*rx_ring, i);
 623                rx_desc->read.buffer_addr = cpu_to_le64(buffer_info->dma);
 624
 625                if (unlikely(!(i & (E1000_RX_BUFFER_WRITE - 1)))) {
 626                        /*
 627                         * Force memory writes to complete before letting h/w
 628                         * know there are new descriptors to fetch.  (Only
 629                         * applicable for weak-ordered memory model archs,
 630                         * such as IA-64).
 631                         */
 632                        wmb();
 633                        if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA)
 634                                e1000e_update_rdt_wa(rx_ring, i);
 635                        else
 636                                writel(i, rx_ring->tail);
 637                }
 638                i++;
 639                if (i == rx_ring->count)
 640                        i = 0;
 641                buffer_info = &rx_ring->buffer_info[i];
 642        }
 643
 644        rx_ring->next_to_use = i;
 645}
 646
 647/**
 648 * e1000_alloc_rx_buffers_ps - Replace used receive buffers; packet split
 649 * @rx_ring: Rx descriptor ring
 650 **/
 651static void e1000_alloc_rx_buffers_ps(struct e1000_ring *rx_ring,
 652                                      int cleaned_count, gfp_t gfp)
 653{
 654        struct e1000_adapter *adapter = rx_ring->adapter;
 655        struct net_device *netdev = adapter->netdev;
 656        struct pci_dev *pdev = adapter->pdev;
 657        union e1000_rx_desc_packet_split *rx_desc;
 658        struct e1000_buffer *buffer_info;
 659        struct e1000_ps_page *ps_page;
 660        struct sk_buff *skb;
 661        unsigned int i, j;
 662
 663        i = rx_ring->next_to_use;
 664        buffer_info = &rx_ring->buffer_info[i];
 665
 666        while (cleaned_count--) {
 667                rx_desc = E1000_RX_DESC_PS(*rx_ring, i);
 668
 669                for (j = 0; j < PS_PAGE_BUFFERS; j++) {
 670                        ps_page = &buffer_info->ps_pages[j];
 671                        if (j >= adapter->rx_ps_pages) {
 672                                /* all unused desc entries get hw null ptr */
 673                                rx_desc->read.buffer_addr[j + 1] =
 674                                    ~cpu_to_le64(0);
 675                                continue;
 676                        }
 677                        if (!ps_page->page) {
 678                                ps_page->page = alloc_page(gfp);
 679                                if (!ps_page->page) {
 680                                        adapter->alloc_rx_buff_failed++;
 681                                        goto no_buffers;
 682                                }
 683                                ps_page->dma = dma_map_page(&pdev->dev,
 684                                                            ps_page->page,
 685                                                            0, PAGE_SIZE,
 686                                                            DMA_FROM_DEVICE);
 687                                if (dma_mapping_error(&pdev->dev,
 688                                                      ps_page->dma)) {
 689                                        dev_err(&adapter->pdev->dev,
 690                                                "Rx DMA page map failed\n");
 691                                        adapter->rx_dma_failed++;
 692                                        goto no_buffers;
 693                                }
 694                        }
 695                        /*
 696                         * Refresh the desc even if buffer_addrs
 697                         * didn't change because each write-back
 698                         * erases this info.
 699                         */
 700                        rx_desc->read.buffer_addr[j + 1] =
 701                            cpu_to_le64(ps_page->dma);
 702                }
 703
 704                skb = __netdev_alloc_skb_ip_align(netdev,
 705                                                  adapter->rx_ps_bsize0,
 706                                                  gfp);
 707
 708                if (!skb) {
 709                        adapter->alloc_rx_buff_failed++;
 710                        break;
 711                }
 712
 713                buffer_info->skb = skb;
 714                buffer_info->dma = dma_map_single(&pdev->dev, skb->data,
 715                                                  adapter->rx_ps_bsize0,
 716                                                  DMA_FROM_DEVICE);
 717                if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
 718                        dev_err(&pdev->dev, "Rx DMA map failed\n");
 719                        adapter->rx_dma_failed++;
 720                        /* cleanup skb */
 721                        dev_kfree_skb_any(skb);
 722                        buffer_info->skb = NULL;
 723                        break;
 724                }
 725
 726                rx_desc->read.buffer_addr[0] = cpu_to_le64(buffer_info->dma);
 727
 728                if (unlikely(!(i & (E1000_RX_BUFFER_WRITE - 1)))) {
 729                        /*
 730                         * Force memory writes to complete before letting h/w
 731                         * know there are new descriptors to fetch.  (Only
 732                         * applicable for weak-ordered memory model archs,
 733                         * such as IA-64).
 734                         */
 735                        wmb();
 736                        if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA)
 737                                e1000e_update_rdt_wa(rx_ring, i << 1);
 738                        else
 739                                writel(i << 1, rx_ring->tail);
 740                }
 741
 742                i++;
 743                if (i == rx_ring->count)
 744                        i = 0;
 745                buffer_info = &rx_ring->buffer_info[i];
 746        }
 747
 748no_buffers:
 749        rx_ring->next_to_use = i;
 750}
 751
 752/**
 753 * e1000_alloc_jumbo_rx_buffers - Replace used jumbo receive buffers
 754 * @rx_ring: Rx descriptor ring
 755 * @cleaned_count: number of buffers to allocate this pass
 756 **/
 757
 758static void e1000_alloc_jumbo_rx_buffers(struct e1000_ring *rx_ring,
 759                                         int cleaned_count, gfp_t gfp)
 760{
 761        struct e1000_adapter *adapter = rx_ring->adapter;
 762        struct net_device *netdev = adapter->netdev;
 763        struct pci_dev *pdev = adapter->pdev;
 764        union e1000_rx_desc_extended *rx_desc;
 765        struct e1000_buffer *buffer_info;
 766        struct sk_buff *skb;
 767        unsigned int i;
 768        unsigned int bufsz = 256 - 16 /* for skb_reserve */;
 769
 770        i = rx_ring->next_to_use;
 771        buffer_info = &rx_ring->buffer_info[i];
 772
 773        while (cleaned_count--) {
 774                skb = buffer_info->skb;
 775                if (skb) {
 776                        skb_trim(skb, 0);
 777                        goto check_page;
 778                }
 779
 780                skb = __netdev_alloc_skb_ip_align(netdev, bufsz, gfp);
 781                if (unlikely(!skb)) {
 782                        /* Better luck next round */
 783                        adapter->alloc_rx_buff_failed++;
 784                        break;
 785                }
 786
 787                buffer_info->skb = skb;
 788check_page:
 789                /* allocate a new page if necessary */
 790                if (!buffer_info->page) {
 791                        buffer_info->page = alloc_page(gfp);
 792                        if (unlikely(!buffer_info->page)) {
 793                                adapter->alloc_rx_buff_failed++;
 794                                break;
 795                        }
 796                }
 797
 798                if (!buffer_info->dma)
 799                        buffer_info->dma = dma_map_page(&pdev->dev,
 800                                                        buffer_info->page, 0,
 801                                                        PAGE_SIZE,
 802                                                        DMA_FROM_DEVICE);
 803
 804                rx_desc = E1000_RX_DESC_EXT(*rx_ring, i);
 805                rx_desc->read.buffer_addr = cpu_to_le64(buffer_info->dma);
 806
 807                if (unlikely(++i == rx_ring->count))
 808                        i = 0;
 809                buffer_info = &rx_ring->buffer_info[i];
 810        }
 811
 812        if (likely(rx_ring->next_to_use != i)) {
 813                rx_ring->next_to_use = i;
 814                if (unlikely(i-- == 0))
 815                        i = (rx_ring->count - 1);
 816
 817                /* Force memory writes to complete before letting h/w
 818                 * know there are new descriptors to fetch.  (Only
 819                 * applicable for weak-ordered memory model archs,
 820                 * such as IA-64). */
 821                wmb();
 822                if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA)
 823                        e1000e_update_rdt_wa(rx_ring, i);
 824                else
 825                        writel(i, rx_ring->tail);
 826        }
 827}
 828
 829static inline void e1000_rx_hash(struct net_device *netdev, __le32 rss,
 830                                 struct sk_buff *skb)
 831{
 832        if (netdev->features & NETIF_F_RXHASH)
 833                skb->rxhash = le32_to_cpu(rss);
 834}
 835
 836/**
 837 * e1000_clean_rx_irq - Send received data up the network stack
 838 * @rx_ring: Rx descriptor ring
 839 *
 840 * the return value indicates whether actual cleaning was done, there
 841 * is no guarantee that everything was cleaned
 842 **/
 843static bool e1000_clean_rx_irq(struct e1000_ring *rx_ring, int *work_done,
 844                               int work_to_do)
 845{
 846        struct e1000_adapter *adapter = rx_ring->adapter;
 847        struct net_device *netdev = adapter->netdev;
 848        struct pci_dev *pdev = adapter->pdev;
 849        struct e1000_hw *hw = &adapter->hw;
 850        union e1000_rx_desc_extended *rx_desc, *next_rxd;
 851        struct e1000_buffer *buffer_info, *next_buffer;
 852        u32 length, staterr;
 853        unsigned int i;
 854        int cleaned_count = 0;
 855        bool cleaned = false;
 856        unsigned int total_rx_bytes = 0, total_rx_packets = 0;
 857
 858        i = rx_ring->next_to_clean;
 859        rx_desc = E1000_RX_DESC_EXT(*rx_ring, i);
 860        staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
 861        buffer_info = &rx_ring->buffer_info[i];
 862
 863        while (staterr & E1000_RXD_STAT_DD) {
 864                struct sk_buff *skb;
 865
 866                if (*work_done >= work_to_do)
 867                        break;
 868                (*work_done)++;
 869                rmb();  /* read descriptor and rx_buffer_info after status DD */
 870
 871                skb = buffer_info->skb;
 872                buffer_info->skb = NULL;
 873
 874                prefetch(skb->data - NET_IP_ALIGN);
 875
 876                i++;
 877                if (i == rx_ring->count)
 878                        i = 0;
 879                next_rxd = E1000_RX_DESC_EXT(*rx_ring, i);
 880                prefetch(next_rxd);
 881
 882                next_buffer = &rx_ring->buffer_info[i];
 883
 884                cleaned = true;
 885                cleaned_count++;
 886                dma_unmap_single(&pdev->dev,
 887                                 buffer_info->dma,
 888                                 adapter->rx_buffer_len,
 889                                 DMA_FROM_DEVICE);
 890                buffer_info->dma = 0;
 891
 892                length = le16_to_cpu(rx_desc->wb.upper.length);
 893
 894                /*
 895                 * !EOP means multiple descriptors were used to store a single
 896                 * packet, if that's the case we need to toss it.  In fact, we
 897                 * need to toss every packet with the EOP bit clear and the
 898                 * next frame that _does_ have the EOP bit set, as it is by
 899                 * definition only a frame fragment
 900                 */
 901                if (unlikely(!(staterr & E1000_RXD_STAT_EOP)))
 902                        adapter->flags2 |= FLAG2_IS_DISCARDING;
 903
 904                if (adapter->flags2 & FLAG2_IS_DISCARDING) {
 905                        /* All receives must fit into a single buffer */
 906                        e_dbg("Receive packet consumed multiple buffers\n");
 907                        /* recycle */
 908                        buffer_info->skb = skb;
 909                        if (staterr & E1000_RXD_STAT_EOP)
 910                                adapter->flags2 &= ~FLAG2_IS_DISCARDING;
 911                        goto next_desc;
 912                }
 913
 914                if (unlikely((staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) &&
 915                             !(netdev->features & NETIF_F_RXALL))) {
 916                        /* recycle */
 917                        buffer_info->skb = skb;
 918                        goto next_desc;
 919                }
 920
 921                /* adjust length to remove Ethernet CRC */
 922                if (!(adapter->flags2 & FLAG2_CRC_STRIPPING)) {
 923                        /* If configured to store CRC, don't subtract FCS,
 924                         * but keep the FCS bytes out of the total_rx_bytes
 925                         * counter
 926                         */
 927                        if (netdev->features & NETIF_F_RXFCS)
 928                                total_rx_bytes -= 4;
 929                        else
 930                                length -= 4;
 931                }
 932
 933                total_rx_bytes += length;
 934                total_rx_packets++;
 935
 936                /*
 937                 * code added for copybreak, this should improve
 938                 * performance for small packets with large amounts
 939                 * of reassembly being done in the stack
 940                 */
 941                if (length < copybreak) {
 942                        struct sk_buff *new_skb =
 943                            netdev_alloc_skb_ip_align(netdev, length);
 944                        if (new_skb) {
 945                                skb_copy_to_linear_data_offset(new_skb,
 946                                                               -NET_IP_ALIGN,
 947                                                               (skb->data -
 948                                                                NET_IP_ALIGN),
 949                                                               (length +
 950                                                                NET_IP_ALIGN));
 951                                /* save the skb in buffer_info as good */
 952                                buffer_info->skb = skb;
 953                                skb = new_skb;
 954                        }
 955                        /* else just continue with the old one */
 956                }
 957                /* end copybreak code */
 958                skb_put(skb, length);
 959
 960                /* Receive Checksum Offload */
 961                e1000_rx_checksum(adapter, staterr, skb);
 962
 963                e1000_rx_hash(netdev, rx_desc->wb.lower.hi_dword.rss, skb);
 964
 965                e1000_receive_skb(adapter, netdev, skb, staterr,
 966                                  rx_desc->wb.upper.vlan);
 967
 968next_desc:
 969                rx_desc->wb.upper.status_error &= cpu_to_le32(~0xFF);
 970
 971                /* return some buffers to hardware, one at a time is too slow */
 972                if (cleaned_count >= E1000_RX_BUFFER_WRITE) {
 973                        adapter->alloc_rx_buf(rx_ring, cleaned_count,
 974                                              GFP_ATOMIC);
 975                        cleaned_count = 0;
 976                }
 977
 978                /* use prefetched values */
 979                rx_desc = next_rxd;
 980                buffer_info = next_buffer;
 981
 982                staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
 983        }
 984        rx_ring->next_to_clean = i;
 985
 986        cleaned_count = e1000_desc_unused(rx_ring);
 987        if (cleaned_count)
 988                adapter->alloc_rx_buf(rx_ring, cleaned_count, GFP_ATOMIC);
 989
 990        adapter->total_rx_bytes += total_rx_bytes;
 991        adapter->total_rx_packets += total_rx_packets;
 992        return cleaned;
 993}
 994
 995static void e1000_put_txbuf(struct e1000_ring *tx_ring,
 996                            struct e1000_buffer *buffer_info)
 997{
 998        struct e1000_adapter *adapter = tx_ring->adapter;
 999
1000        if (buffer_info->dma) {
1001                if (buffer_info->mapped_as_page)
1002                        dma_unmap_page(&adapter->pdev->dev, buffer_info->dma,
1003                                       buffer_info->length, DMA_TO_DEVICE);
1004                else
1005                        dma_unmap_single(&adapter->pdev->dev, buffer_info->dma,
1006                                         buffer_info->length, DMA_TO_DEVICE);
1007                buffer_info->dma = 0;
1008        }
1009        if (buffer_info->skb) {
1010                dev_kfree_skb_any(buffer_info->skb);
1011                buffer_info->skb = NULL;
1012        }
1013        buffer_info->time_stamp = 0;
1014}
1015
1016static void e1000_print_hw_hang(struct work_struct *work)
1017{
1018        struct e1000_adapter *adapter = container_of(work,
1019                                                     struct e1000_adapter,
1020                                                     print_hang_task);
1021        struct net_device *netdev = adapter->netdev;
1022        struct e1000_ring *tx_ring = adapter->tx_ring;
1023        unsigned int i = tx_ring->next_to_clean;
1024        unsigned int eop = tx_ring->buffer_info[i].next_to_watch;
1025        struct e1000_tx_desc *eop_desc = E1000_TX_DESC(*tx_ring, eop);
1026        struct e1000_hw *hw = &adapter->hw;
1027        u16 phy_status, phy_1000t_status, phy_ext_status;
1028        u16 pci_status;
1029
1030        if (test_bit(__E1000_DOWN, &adapter->state))
1031                return;
1032
1033        if (!adapter->tx_hang_recheck &&
1034            (adapter->flags2 & FLAG2_DMA_BURST)) {
1035                /*
1036                 * May be block on write-back, flush and detect again
1037                 * flush pending descriptor writebacks to memory
1038                 */
1039                ew32(TIDV, adapter->tx_int_delay | E1000_TIDV_FPD);
1040                /* execute the writes immediately */
1041                e1e_flush();
1042                /*
1043                 * Due to rare timing issues, write to TIDV again to ensure
1044                 * the write is successful
1045                 */
1046                ew32(TIDV, adapter->tx_int_delay | E1000_TIDV_FPD);
1047                /* execute the writes immediately */
1048                e1e_flush();
1049                adapter->tx_hang_recheck = true;
1050                return;
1051        }
1052        /* Real hang detected */
1053        adapter->tx_hang_recheck = false;
1054        netif_stop_queue(netdev);
1055
1056        e1e_rphy(hw, PHY_STATUS, &phy_status);
1057        e1e_rphy(hw, PHY_1000T_STATUS, &phy_1000t_status);
1058        e1e_rphy(hw, PHY_EXT_STATUS, &phy_ext_status);
1059
1060        pci_read_config_word(adapter->pdev, PCI_STATUS, &pci_status);
1061
1062        /* detected Hardware unit hang */
1063        e_err("Detected Hardware Unit Hang:\n"
1064              "  TDH                  <%x>\n"
1065              "  TDT                  <%x>\n"
1066              "  next_to_use          <%x>\n"
1067              "  next_to_clean        <%x>\n"
1068              "buffer_info[next_to_clean]:\n"
1069              "  time_stamp           <%lx>\n"
1070              "  next_to_watch        <%x>\n"
1071              "  jiffies              <%lx>\n"
1072              "  next_to_watch.status <%x>\n"
1073              "MAC Status             <%x>\n"
1074              "PHY Status             <%x>\n"
1075              "PHY 1000BASE-T Status  <%x>\n"
1076              "PHY Extended Status    <%x>\n"
1077              "PCI Status             <%x>\n",
1078              readl(tx_ring->head),
1079              readl(tx_ring->tail),
1080              tx_ring->next_to_use,
1081              tx_ring->next_to_clean,
1082              tx_ring->buffer_info[eop].time_stamp,
1083              eop,
1084              jiffies,
1085              eop_desc->upper.fields.status,
1086              er32(STATUS),
1087              phy_status,
1088              phy_1000t_status,
1089              phy_ext_status,
1090              pci_status);
1091
1092        /* Suggest workaround for known h/w issue */
1093        if ((hw->mac.type == e1000_pchlan) && (er32(CTRL) & E1000_CTRL_TFCE))
1094                e_err("Try turning off Tx pause (flow control) via ethtool\n");
1095}
1096
1097/**
1098 * e1000_clean_tx_irq - Reclaim resources after transmit completes
1099 * @tx_ring: Tx descriptor ring
1100 *
1101 * the return value indicates whether actual cleaning was done, there
1102 * is no guarantee that everything was cleaned
1103 **/
1104static bool e1000_clean_tx_irq(struct e1000_ring *tx_ring)
1105{
1106        struct e1000_adapter *adapter = tx_ring->adapter;
1107        struct net_device *netdev = adapter->netdev;
1108        struct e1000_hw *hw = &adapter->hw;
1109        struct e1000_tx_desc *tx_desc, *eop_desc;
1110        struct e1000_buffer *buffer_info;
1111        unsigned int i, eop;
1112        unsigned int count = 0;
1113        unsigned int total_tx_bytes = 0, total_tx_packets = 0;
1114        unsigned int bytes_compl = 0, pkts_compl = 0;
1115
1116        i = tx_ring->next_to_clean;
1117        eop = tx_ring->buffer_info[i].next_to_watch;
1118        eop_desc = E1000_TX_DESC(*tx_ring, eop);
1119
1120        while ((eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) &&
1121               (count < tx_ring->count)) {
1122                bool cleaned = false;
1123                rmb(); /* read buffer_info after eop_desc */
1124                for (; !cleaned; count++) {
1125                        tx_desc = E1000_TX_DESC(*tx_ring, i);
1126                        buffer_info = &tx_ring->buffer_info[i];
1127                        cleaned = (i == eop);
1128
1129                        if (cleaned) {
1130                                total_tx_packets += buffer_info->segs;
1131                                total_tx_bytes += buffer_info->bytecount;
1132                                if (buffer_info->skb) {
1133                                        bytes_compl += buffer_info->skb->len;
1134                                        pkts_compl++;
1135                                }
1136                        }
1137
1138                        e1000_put_txbuf(tx_ring, buffer_info);
1139                        tx_desc->upper.data = 0;
1140
1141                        i++;
1142                        if (i == tx_ring->count)
1143                                i = 0;
1144                }
1145
1146                if (i == tx_ring->next_to_use)
1147                        break;
1148                eop = tx_ring->buffer_info[i].next_to_watch;
1149                eop_desc = E1000_TX_DESC(*tx_ring, eop);
1150        }
1151
1152        tx_ring->next_to_clean = i;
1153
1154        netdev_completed_queue(netdev, pkts_compl, bytes_compl);
1155
1156#define TX_WAKE_THRESHOLD 32
1157        if (count && netif_carrier_ok(netdev) &&
1158            e1000_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD) {
1159                /* Make sure that anybody stopping the queue after this
1160                 * sees the new next_to_clean.
1161                 */
1162                smp_mb();
1163
1164                if (netif_queue_stopped(netdev) &&
1165                    !(test_bit(__E1000_DOWN, &adapter->state))) {
1166                        netif_wake_queue(netdev);
1167                        ++adapter->restart_queue;
1168                }
1169        }
1170
1171        if (adapter->detect_tx_hung) {
1172                /*
1173                 * Detect a transmit hang in hardware, this serializes the
1174                 * check with the clearing of time_stamp and movement of i
1175                 */
1176                adapter->detect_tx_hung = false;
1177                if (tx_ring->buffer_info[i].time_stamp &&
1178                    time_after(jiffies, tx_ring->buffer_info[i].time_stamp
1179                               + (adapter->tx_timeout_factor * HZ)) &&
1180                    !(er32(STATUS) & E1000_STATUS_TXOFF))
1181                        schedule_work(&adapter->print_hang_task);
1182                else
1183                        adapter->tx_hang_recheck = false;
1184        }
1185        adapter->total_tx_bytes += total_tx_bytes;
1186        adapter->total_tx_packets += total_tx_packets;
1187        return count < tx_ring->count;
1188}
1189
1190/**
1191 * e1000_clean_rx_irq_ps - Send received data up the network stack; packet split
1192 * @rx_ring: Rx descriptor ring
1193 *
1194 * the return value indicates whether actual cleaning was done, there
1195 * is no guarantee that everything was cleaned
1196 **/
1197static bool e1000_clean_rx_irq_ps(struct e1000_ring *rx_ring, int *work_done,
1198                                  int work_to_do)
1199{
1200        struct e1000_adapter *adapter = rx_ring->adapter;
1201        struct e1000_hw *hw = &adapter->hw;
1202        union e1000_rx_desc_packet_split *rx_desc, *next_rxd;
1203        struct net_device *netdev = adapter->netdev;
1204        struct pci_dev *pdev = adapter->pdev;
1205        struct e1000_buffer *buffer_info, *next_buffer;
1206        struct e1000_ps_page *ps_page;
1207        struct sk_buff *skb;
1208        unsigned int i, j;
1209        u32 length, staterr;
1210        int cleaned_count = 0;
1211        bool cleaned = false;
1212        unsigned int total_rx_bytes = 0, total_rx_packets = 0;
1213
1214        i = rx_ring->next_to_clean;
1215        rx_desc = E1000_RX_DESC_PS(*rx_ring, i);
1216        staterr = le32_to_cpu(rx_desc->wb.middle.status_error);
1217        buffer_info = &rx_ring->buffer_info[i];
1218
1219        while (staterr & E1000_RXD_STAT_DD) {
1220                if (*work_done >= work_to_do)
1221                        break;
1222                (*work_done)++;
1223                skb = buffer_info->skb;
1224                rmb();  /* read descriptor and rx_buffer_info after status DD */
1225
1226                /* in the packet split case this is header only */
1227                prefetch(skb->data - NET_IP_ALIGN);
1228
1229                i++;
1230                if (i == rx_ring->count)
1231                        i = 0;
1232                next_rxd = E1000_RX_DESC_PS(*rx_ring, i);
1233                prefetch(next_rxd);
1234
1235                next_buffer = &rx_ring->buffer_info[i];
1236
1237                cleaned = true;
1238                cleaned_count++;
1239                dma_unmap_single(&pdev->dev, buffer_info->dma,
1240                                 adapter->rx_ps_bsize0, DMA_FROM_DEVICE);
1241                buffer_info->dma = 0;
1242
1243                /* see !EOP comment in other Rx routine */
1244                if (!(staterr & E1000_RXD_STAT_EOP))
1245                        adapter->flags2 |= FLAG2_IS_DISCARDING;
1246
1247                if (adapter->flags2 & FLAG2_IS_DISCARDING) {
1248                        e_dbg("Packet Split buffers didn't pick up the full packet\n");
1249                        dev_kfree_skb_irq(skb);
1250                        if (staterr & E1000_RXD_STAT_EOP)
1251                                adapter->flags2 &= ~FLAG2_IS_DISCARDING;
1252                        goto next_desc;
1253                }
1254
1255                if (unlikely((staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) &&
1256                             !(netdev->features & NETIF_F_RXALL))) {
1257                        dev_kfree_skb_irq(skb);
1258                        goto next_desc;
1259                }
1260
1261                length = le16_to_cpu(rx_desc->wb.middle.length0);
1262
1263                if (!length) {
1264                        e_dbg("Last part of the packet spanning multiple descriptors\n");
1265                        dev_kfree_skb_irq(skb);
1266                        goto next_desc;
1267                }
1268
1269                /* Good Receive */
1270                skb_put(skb, length);
1271
1272                {
1273                        /*
1274                         * this looks ugly, but it seems compiler issues make
1275                         * it more efficient than reusing j
1276                         */
1277                        int l1 = le16_to_cpu(rx_desc->wb.upper.length[0]);
1278
1279                        /*
1280                         * page alloc/put takes too long and effects small
1281                         * packet throughput, so unsplit small packets and
1282                         * save the alloc/put only valid in softirq (napi)
1283                         * context to call kmap_*
1284                         */
1285                        if (l1 && (l1 <= copybreak) &&
1286                            ((length + l1) <= adapter->rx_ps_bsize0)) {
1287                                u8 *vaddr;
1288
1289                                ps_page = &buffer_info->ps_pages[0];
1290
1291                                /*
1292                                 * there is no documentation about how to call
1293                                 * kmap_atomic, so we can't hold the mapping
1294                                 * very long
1295                                 */
1296                                dma_sync_single_for_cpu(&pdev->dev,
1297                                                        ps_page->dma,
1298                                                        PAGE_SIZE,
1299                                                        DMA_FROM_DEVICE);
1300                                vaddr = kmap_atomic(ps_page->page);
1301                                memcpy(skb_tail_pointer(skb), vaddr, l1);
1302                                kunmap_atomic(vaddr);
1303                                dma_sync_single_for_device(&pdev->dev,
1304                                                           ps_page->dma,
1305                                                           PAGE_SIZE,
1306                                                           DMA_FROM_DEVICE);
1307
1308                                /* remove the CRC */
1309                                if (!(adapter->flags2 & FLAG2_CRC_STRIPPING)) {
1310                                        if (!(netdev->features & NETIF_F_RXFCS))
1311                                                l1 -= 4;
1312                                }
1313
1314                                skb_put(skb, l1);
1315                                goto copydone;
1316                        } /* if */
1317                }
1318
1319                for (j = 0; j < PS_PAGE_BUFFERS; j++) {
1320                        length = le16_to_cpu(rx_desc->wb.upper.length[j]);
1321                        if (!length)
1322                                break;
1323
1324                        ps_page = &buffer_info->ps_pages[j];
1325                        dma_unmap_page(&pdev->dev, ps_page->dma, PAGE_SIZE,
1326                                       DMA_FROM_DEVICE);
1327                        ps_page->dma = 0;
1328                        skb_fill_page_desc(skb, j, ps_page->page, 0, length);
1329                        ps_page->page = NULL;
1330                        skb->len += length;
1331                        skb->data_len += length;
1332                        skb->truesize += PAGE_SIZE;
1333                }
1334
1335                /* strip the ethernet crc, problem is we're using pages now so
1336                 * this whole operation can get a little cpu intensive
1337                 */
1338                if (!(adapter->flags2 & FLAG2_CRC_STRIPPING)) {
1339                        if (!(netdev->features & NETIF_F_RXFCS))
1340                                pskb_trim(skb, skb->len - 4);
1341                }
1342
1343copydone:
1344                total_rx_bytes += skb->len;
1345                total_rx_packets++;
1346
1347                e1000_rx_checksum(adapter, staterr, skb);
1348
1349                e1000_rx_hash(netdev, rx_desc->wb.lower.hi_dword.rss, skb);
1350
1351                if (rx_desc->wb.upper.header_status &
1352                           cpu_to_le16(E1000_RXDPS_HDRSTAT_HDRSP))
1353                        adapter->rx_hdr_split++;
1354
1355                e1000_receive_skb(adapter, netdev, skb,
1356                                  staterr, rx_desc->wb.middle.vlan);
1357
1358next_desc:
1359                rx_desc->wb.middle.status_error &= cpu_to_le32(~0xFF);
1360                buffer_info->skb = NULL;
1361
1362                /* return some buffers to hardware, one at a time is too slow */
1363                if (cleaned_count >= E1000_RX_BUFFER_WRITE) {
1364                        adapter->alloc_rx_buf(rx_ring, cleaned_count,
1365                                              GFP_ATOMIC);
1366                        cleaned_count = 0;
1367                }
1368
1369                /* use prefetched values */
1370                rx_desc = next_rxd;
1371                buffer_info = next_buffer;
1372
1373                staterr = le32_to_cpu(rx_desc->wb.middle.status_error);
1374        }
1375        rx_ring->next_to_clean = i;
1376
1377        cleaned_count = e1000_desc_unused(rx_ring);
1378        if (cleaned_count)
1379                adapter->alloc_rx_buf(rx_ring, cleaned_count, GFP_ATOMIC);
1380
1381        adapter->total_rx_bytes += total_rx_bytes;
1382        adapter->total_rx_packets += total_rx_packets;
1383        return cleaned;
1384}
1385
1386/**
1387 * e1000_consume_page - helper function
1388 **/
1389static void e1000_consume_page(struct e1000_buffer *bi, struct sk_buff *skb,
1390                               u16 length)
1391{
1392        bi->page = NULL;
1393        skb->len += length;
1394        skb->data_len += length;
1395        skb->truesize += PAGE_SIZE;
1396}
1397
1398/**
1399 * e1000_clean_jumbo_rx_irq - Send received data up the network stack; legacy
1400 * @adapter: board private structure
1401 *
1402 * the return value indicates whether actual cleaning was done, there
1403 * is no guarantee that everything was cleaned
1404 **/
1405static bool e1000_clean_jumbo_rx_irq(struct e1000_ring *rx_ring, int *work_done,
1406                                     int work_to_do)
1407{
1408        struct e1000_adapter *adapter = rx_ring->adapter;
1409        struct net_device *netdev = adapter->netdev;
1410        struct pci_dev *pdev = adapter->pdev;
1411        union e1000_rx_desc_extended *rx_desc, *next_rxd;
1412        struct e1000_buffer *buffer_info, *next_buffer;
1413        u32 length, staterr;
1414        unsigned int i;
1415        int cleaned_count = 0;
1416        bool cleaned = false;
1417        unsigned int total_rx_bytes=0, total_rx_packets=0;
1418
1419        i = rx_ring->next_to_clean;
1420        rx_desc = E1000_RX_DESC_EXT(*rx_ring, i);
1421        staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
1422        buffer_info = &rx_ring->buffer_info[i];
1423
1424        while (staterr & E1000_RXD_STAT_DD) {
1425                struct sk_buff *skb;
1426
1427                if (*work_done >= work_to_do)
1428                        break;
1429                (*work_done)++;
1430                rmb();  /* read descriptor and rx_buffer_info after status DD */
1431
1432                skb = buffer_info->skb;
1433                buffer_info->skb = NULL;
1434
1435                ++i;
1436                if (i == rx_ring->count)
1437                        i = 0;
1438                next_rxd = E1000_RX_DESC_EXT(*rx_ring, i);
1439                prefetch(next_rxd);
1440
1441                next_buffer = &rx_ring->buffer_info[i];
1442
1443                cleaned = true;
1444                cleaned_count++;
1445                dma_unmap_page(&pdev->dev, buffer_info->dma, PAGE_SIZE,
1446                               DMA_FROM_DEVICE);
1447                buffer_info->dma = 0;
1448
1449                length = le16_to_cpu(rx_desc->wb.upper.length);
1450
1451                /* errors is only valid for DD + EOP descriptors */
1452                if (unlikely((staterr & E1000_RXD_STAT_EOP) &&
1453                             ((staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) &&
1454                              !(netdev->features & NETIF_F_RXALL)))) {
1455                        /* recycle both page and skb */
1456                        buffer_info->skb = skb;
1457                        /* an error means any chain goes out the window too */
1458                        if (rx_ring->rx_skb_top)
1459                                dev_kfree_skb_irq(rx_ring->rx_skb_top);
1460                        rx_ring->rx_skb_top = NULL;
1461                        goto next_desc;
1462                }
1463
1464#define rxtop (rx_ring->rx_skb_top)
1465                if (!(staterr & E1000_RXD_STAT_EOP)) {
1466                        /* this descriptor is only the beginning (or middle) */
1467                        if (!rxtop) {
1468                                /* this is the beginning of a chain */
1469                                rxtop = skb;
1470                                skb_fill_page_desc(rxtop, 0, buffer_info->page,
1471                                                   0, length);
1472                        } else {
1473                                /* this is the middle of a chain */
1474                                skb_fill_page_desc(rxtop,
1475                                    skb_shinfo(rxtop)->nr_frags,
1476                                    buffer_info->page, 0, length);
1477                                /* re-use the skb, only consumed the page */
1478                                buffer_info->skb = skb;
1479                        }
1480                        e1000_consume_page(buffer_info, rxtop, length);
1481                        goto next_desc;
1482                } else {
1483                        if (rxtop) {
1484                                /* end of the chain */
1485                                skb_fill_page_desc(rxtop,
1486                                    skb_shinfo(rxtop)->nr_frags,
1487                                    buffer_info->page, 0, length);
1488                                /* re-use the current skb, we only consumed the
1489                                 * page */
1490                                buffer_info->skb = skb;
1491                                skb = rxtop;
1492                                rxtop = NULL;
1493                                e1000_consume_page(buffer_info, skb, length);
1494                        } else {
1495                                /* no chain, got EOP, this buf is the packet
1496                                 * copybreak to save the put_page/alloc_page */
1497                                if (length <= copybreak &&
1498                                    skb_tailroom(skb) >= length) {
1499                                        u8 *vaddr;
1500                                        vaddr = kmap_atomic(buffer_info->page);
1501                                        memcpy(skb_tail_pointer(skb), vaddr,
1502                                               length);
1503                                        kunmap_atomic(vaddr);
1504                                        /* re-use the page, so don't erase
1505                                         * buffer_info->page */
1506                                        skb_put(skb, length);
1507                                } else {
1508                                        skb_fill_page_desc(skb, 0,
1509                                                           buffer_info->page, 0,
1510                                                           length);
1511                                        e1000_consume_page(buffer_info, skb,
1512                                                           length);
1513                                }
1514                        }
1515                }
1516
1517                /* Receive Checksum Offload */
1518                e1000_rx_checksum(adapter, staterr, skb);
1519
1520                e1000_rx_hash(netdev, rx_desc->wb.lower.hi_dword.rss, skb);
1521
1522                /* probably a little skewed due to removing CRC */
1523                total_rx_bytes += skb->len;
1524                total_rx_packets++;
1525
1526                /* eth type trans needs skb->data to point to something */
1527                if (!pskb_may_pull(skb, ETH_HLEN)) {
1528                        e_err("pskb_may_pull failed.\n");
1529                        dev_kfree_skb_irq(skb);
1530                        goto next_desc;
1531                }
1532
1533                e1000_receive_skb(adapter, netdev, skb, staterr,
1534                                  rx_desc->wb.upper.vlan);
1535
1536next_desc:
1537                rx_desc->wb.upper.status_error &= cpu_to_le32(~0xFF);
1538
1539                /* return some buffers to hardware, one at a time is too slow */
1540                if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) {
1541                        adapter->alloc_rx_buf(rx_ring, cleaned_count,
1542                                              GFP_ATOMIC);
1543                        cleaned_count = 0;
1544                }
1545
1546                /* use prefetched values */
1547                rx_desc = next_rxd;
1548                buffer_info = next_buffer;
1549
1550                staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
1551        }
1552        rx_ring->next_to_clean = i;
1553
1554        cleaned_count = e1000_desc_unused(rx_ring);
1555        if (cleaned_count)
1556                adapter->alloc_rx_buf(rx_ring, cleaned_count, GFP_ATOMIC);
1557
1558        adapter->total_rx_bytes += total_rx_bytes;
1559        adapter->total_rx_packets += total_rx_packets;
1560        return cleaned;
1561}
1562
1563/**
1564 * e1000_clean_rx_ring - Free Rx Buffers per Queue
1565 * @rx_ring: Rx descriptor ring
1566 **/
1567static void e1000_clean_rx_ring(struct e1000_ring *rx_ring)
1568{
1569        struct e1000_adapter *adapter = rx_ring->adapter;
1570        struct e1000_buffer *buffer_info;
1571        struct e1000_ps_page *ps_page;
1572        struct pci_dev *pdev = adapter->pdev;
1573        unsigned int i, j;
1574
1575        /* Free all the Rx ring sk_buffs */
1576        for (i = 0; i < rx_ring->count; i++) {
1577                buffer_info = &rx_ring->buffer_info[i];
1578                if (buffer_info->dma) {
1579                        if (adapter->clean_rx == e1000_clean_rx_irq)
1580                                dma_unmap_single(&pdev->dev, buffer_info->dma,
1581                                                 adapter->rx_buffer_len,
1582                                                 DMA_FROM_DEVICE);
1583                        else if (adapter->clean_rx == e1000_clean_jumbo_rx_irq)
1584                                dma_unmap_page(&pdev->dev, buffer_info->dma,
1585                                               PAGE_SIZE,
1586                                               DMA_FROM_DEVICE);
1587                        else if (adapter->clean_rx == e1000_clean_rx_irq_ps)
1588                                dma_unmap_single(&pdev->dev, buffer_info->dma,
1589                                                 adapter->rx_ps_bsize0,
1590                                                 DMA_FROM_DEVICE);
1591                        buffer_info->dma = 0;
1592                }
1593
1594                if (buffer_info->page) {
1595                        put_page(buffer_info->page);
1596                        buffer_info->page = NULL;
1597                }
1598
1599                if (buffer_info->skb) {
1600                        dev_kfree_skb(buffer_info->skb);
1601                        buffer_info->skb = NULL;
1602                }
1603
1604                for (j = 0; j < PS_PAGE_BUFFERS; j++) {
1605                        ps_page = &buffer_info->ps_pages[j];
1606                        if (!ps_page->page)
1607                                break;
1608                        dma_unmap_page(&pdev->dev, ps_page->dma, PAGE_SIZE,
1609                                       DMA_FROM_DEVICE);
1610                        ps_page->dma = 0;
1611                        put_page(ps_page->page);
1612                        ps_page->page = NULL;
1613                }
1614        }
1615
1616        /* there also may be some cached data from a chained receive */
1617        if (rx_ring->rx_skb_top) {
1618                dev_kfree_skb(rx_ring->rx_skb_top);
1619                rx_ring->rx_skb_top = NULL;
1620        }
1621
1622        /* Zero out the descriptor ring */
1623        memset(rx_ring->desc, 0, rx_ring->size);
1624
1625        rx_ring->next_to_clean = 0;
1626        rx_ring->next_to_use = 0;
1627        adapter->flags2 &= ~FLAG2_IS_DISCARDING;
1628
1629        writel(0, rx_ring->head);
1630        if (rx_ring->adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA)
1631                e1000e_update_rdt_wa(rx_ring, 0);
1632        else
1633                writel(0, rx_ring->tail);
1634}
1635
1636static void e1000e_downshift_workaround(struct work_struct *work)
1637{
1638        struct e1000_adapter *adapter = container_of(work,
1639                                        struct e1000_adapter, downshift_task);
1640
1641        if (test_bit(__E1000_DOWN, &adapter->state))
1642                return;
1643
1644        e1000e_gig_downshift_workaround_ich8lan(&adapter->hw);
1645}
1646
1647/**
1648 * e1000_intr_msi - Interrupt Handler
1649 * @irq: interrupt number
1650 * @data: pointer to a network interface device structure
1651 **/
1652static irqreturn_t e1000_intr_msi(int irq, void *data)
1653{
1654        struct net_device *netdev = data;
1655        struct e1000_adapter *adapter = netdev_priv(netdev);
1656        struct e1000_hw *hw = &adapter->hw;
1657        u32 icr = er32(ICR);
1658
1659        /*
1660         * read ICR disables interrupts using IAM
1661         */
1662
1663        if (icr & E1000_ICR_LSC) {
1664                hw->mac.get_link_status = true;
1665                /*
1666                 * ICH8 workaround-- Call gig speed drop workaround on cable
1667                 * disconnect (LSC) before accessing any PHY registers
1668                 */
1669                if ((adapter->flags & FLAG_LSC_GIG_SPEED_DROP) &&
1670                    (!(er32(STATUS) & E1000_STATUS_LU)))
1671                        schedule_work(&adapter->downshift_task);
1672
1673                /*
1674                 * 80003ES2LAN workaround-- For packet buffer work-around on
1675                 * link down event; disable receives here in the ISR and reset
1676                 * adapter in watchdog
1677                 */
1678                if (netif_carrier_ok(netdev) &&
1679                    adapter->flags & FLAG_RX_NEEDS_RESTART) {
1680                        /* disable receives */
1681                        u32 rctl = er32(RCTL);
1682                        ew32(RCTL, rctl & ~E1000_RCTL_EN);
1683                        adapter->flags |= FLAG_RX_RESTART_NOW;
1684                }
1685                /* guard against interrupt when we're going down */
1686                if (!test_bit(__E1000_DOWN, &adapter->state))
1687                        mod_timer(&adapter->watchdog_timer, jiffies + 1);
1688        }
1689
1690        if (napi_schedule_prep(&adapter->napi)) {
1691                adapter->total_tx_bytes = 0;
1692                adapter->total_tx_packets = 0;
1693                adapter->total_rx_bytes = 0;
1694                adapter->total_rx_packets = 0;
1695                __napi_schedule(&adapter->napi);
1696        }
1697
1698        return IRQ_HANDLED;
1699}
1700
1701/**
1702 * e1000_intr - Interrupt Handler
1703 * @irq: interrupt number
1704 * @data: pointer to a network interface device structure
1705 **/
1706static irqreturn_t e1000_intr(int irq, void *data)
1707{
1708        struct net_device *netdev = data;
1709        struct e1000_adapter *adapter = netdev_priv(netdev);
1710        struct e1000_hw *hw = &adapter->hw;
1711        u32 rctl, icr = er32(ICR);
1712
1713        if (!icr || test_bit(__E1000_DOWN, &adapter->state))
1714                return IRQ_NONE;  /* Not our interrupt */
1715
1716        /*
1717         * IMS will not auto-mask if INT_ASSERTED is not set, and if it is
1718         * not set, then the adapter didn't send an interrupt
1719         */
1720        if (!(icr & E1000_ICR_INT_ASSERTED))
1721                return IRQ_NONE;
1722
1723        /*
1724         * Interrupt Auto-Mask...upon reading ICR,
1725         * interrupts are masked.  No need for the
1726         * IMC write
1727         */
1728
1729        if (icr & E1000_ICR_LSC) {
1730                hw->mac.get_link_status = true;
1731                /*
1732                 * ICH8 workaround-- Call gig speed drop workaround on cable
1733                 * disconnect (LSC) before accessing any PHY registers
1734                 */
1735                if ((adapter->flags & FLAG_LSC_GIG_SPEED_DROP) &&
1736                    (!(er32(STATUS) & E1000_STATUS_LU)))
1737                        schedule_work(&adapter->downshift_task);
1738
1739                /*
1740                 * 80003ES2LAN workaround--
1741                 * For packet buffer work-around on link down event;
1742                 * disable receives here in the ISR and
1743                 * reset adapter in watchdog
1744                 */
1745                if (netif_carrier_ok(netdev) &&
1746                    (adapter->flags & FLAG_RX_NEEDS_RESTART)) {
1747                        /* disable receives */
1748                        rctl = er32(RCTL);
1749                        ew32(RCTL, rctl & ~E1000_RCTL_EN);
1750                        adapter->flags |= FLAG_RX_RESTART_NOW;
1751                }
1752                /* guard against interrupt when we're going down */
1753                if (!test_bit(__E1000_DOWN, &adapter->state))
1754                        mod_timer(&adapter->watchdog_timer, jiffies + 1);
1755        }
1756
1757        if (napi_schedule_prep(&adapter->napi)) {
1758                adapter->total_tx_bytes = 0;
1759                adapter->total_tx_packets = 0;
1760                adapter->total_rx_bytes = 0;
1761                adapter->total_rx_packets = 0;
1762                __napi_schedule(&adapter->napi);
1763        }
1764
1765        return IRQ_HANDLED;
1766}
1767
1768static irqreturn_t e1000_msix_other(int irq, void *data)
1769{
1770        struct net_device *netdev = data;
1771        struct e1000_adapter *adapter = netdev_priv(netdev);
1772        struct e1000_hw *hw = &adapter->hw;
1773        u32 icr = er32(ICR);
1774
1775        if (!(icr & E1000_ICR_INT_ASSERTED)) {
1776                if (!test_bit(__E1000_DOWN, &adapter->state))
1777                        ew32(IMS, E1000_IMS_OTHER);
1778                return IRQ_NONE;
1779        }
1780
1781        if (icr & adapter->eiac_mask)
1782                ew32(ICS, (icr & adapter->eiac_mask));
1783
1784        if (icr & E1000_ICR_OTHER) {
1785                if (!(icr & E1000_ICR_LSC))
1786                        goto no_link_interrupt;
1787                hw->mac.get_link_status = true;
1788                /* guard against interrupt when we're going down */
1789                if (!test_bit(__E1000_DOWN, &adapter->state))
1790                        mod_timer(&adapter->watchdog_timer, jiffies + 1);
1791        }
1792
1793no_link_interrupt:
1794        if (!test_bit(__E1000_DOWN, &adapter->state))
1795                ew32(IMS, E1000_IMS_LSC | E1000_IMS_OTHER);
1796
1797        return IRQ_HANDLED;
1798}
1799
1800
1801static irqreturn_t e1000_intr_msix_tx(int irq, void *data)
1802{
1803        struct net_device *netdev = data;
1804        struct e1000_adapter *adapter = netdev_priv(netdev);
1805        struct e1000_hw *hw = &adapter->hw;
1806        struct e1000_ring *tx_ring = adapter->tx_ring;
1807
1808
1809        adapter->total_tx_bytes = 0;
1810        adapter->total_tx_packets = 0;
1811
1812        if (!e1000_clean_tx_irq(tx_ring))
1813                /* Ring was not completely cleaned, so fire another interrupt */
1814                ew32(ICS, tx_ring->ims_val);
1815
1816        return IRQ_HANDLED;
1817}
1818
1819static irqreturn_t e1000_intr_msix_rx(int irq, void *data)
1820{
1821        struct net_device *netdev = data;
1822        struct e1000_adapter *adapter = netdev_priv(netdev);
1823        struct e1000_ring *rx_ring = adapter->rx_ring;
1824
1825        /* Write the ITR value calculated at the end of the
1826         * previous interrupt.
1827         */
1828        if (rx_ring->set_itr) {
1829                writel(1000000000 / (rx_ring->itr_val * 256),
1830                       rx_ring->itr_register);
1831                rx_ring->set_itr = 0;
1832        }
1833
1834        if (napi_schedule_prep(&adapter->napi)) {
1835                adapter->total_rx_bytes = 0;
1836                adapter->total_rx_packets = 0;
1837                __napi_schedule(&adapter->napi);
1838        }
1839        return IRQ_HANDLED;
1840}
1841
1842/**
1843 * e1000_configure_msix - Configure MSI-X hardware
1844 *
1845 * e1000_configure_msix sets up the hardware to properly
1846 * generate MSI-X interrupts.
1847 **/
1848static void e1000_configure_msix(struct e1000_adapter *adapter)
1849{
1850        struct e1000_hw *hw = &adapter->hw;
1851        struct e1000_ring *rx_ring = adapter->rx_ring;
1852        struct e1000_ring *tx_ring = adapter->tx_ring;
1853        int vector = 0;
1854        u32 ctrl_ext, ivar = 0;
1855
1856        adapter->eiac_mask = 0;
1857
1858        /* Workaround issue with spurious interrupts on 82574 in MSI-X mode */
1859        if (hw->mac.type == e1000_82574) {
1860                u32 rfctl = er32(RFCTL);
1861                rfctl |= E1000_RFCTL_ACK_DIS;
1862                ew32(RFCTL, rfctl);
1863        }
1864
1865#define E1000_IVAR_INT_ALLOC_VALID      0x8
1866        /* Configure Rx vector */
1867        rx_ring->ims_val = E1000_IMS_RXQ0;
1868        adapter->eiac_mask |= rx_ring->ims_val;
1869        if (rx_ring->itr_val)
1870                writel(1000000000 / (rx_ring->itr_val * 256),
1871                       rx_ring->itr_register);
1872        else
1873                writel(1, rx_ring->itr_register);
1874        ivar = E1000_IVAR_INT_ALLOC_VALID | vector;
1875
1876        /* Configure Tx vector */
1877        tx_ring->ims_val = E1000_IMS_TXQ0;
1878        vector++;
1879        if (tx_ring->itr_val)
1880                writel(1000000000 / (tx_ring->itr_val * 256),
1881                       tx_ring->itr_register);
1882        else
1883                writel(1, tx_ring->itr_register);
1884        adapter->eiac_mask |= tx_ring->ims_val;
1885        ivar |= ((E1000_IVAR_INT_ALLOC_VALID | vector) << 8);
1886
1887        /* set vector for Other Causes, e.g. link changes */
1888        vector++;
1889        ivar |= ((E1000_IVAR_INT_ALLOC_VALID | vector) << 16);
1890        if (rx_ring->itr_val)
1891                writel(1000000000 / (rx_ring->itr_val * 256),
1892                       hw->hw_addr + E1000_EITR_82574(vector));
1893        else
1894                writel(1, hw->hw_addr + E1000_EITR_82574(vector));
1895
1896        /* Cause Tx interrupts on every write back */
1897        ivar |= (1 << 31);
1898
1899        ew32(IVAR, ivar);
1900
1901        /* enable MSI-X PBA support */
1902        ctrl_ext = er32(CTRL_EXT);
1903        ctrl_ext |= E1000_CTRL_EXT_PBA_CLR;
1904
1905        /* Auto-Mask Other interrupts upon ICR read */
1906#define E1000_EIAC_MASK_82574   0x01F00000
1907        ew32(IAM, ~E1000_EIAC_MASK_82574 | E1000_IMS_OTHER);
1908        ctrl_ext |= E1000_CTRL_EXT_EIAME;
1909        ew32(CTRL_EXT, ctrl_ext);
1910        e1e_flush();
1911}
1912
1913void e1000e_reset_interrupt_capability(struct e1000_adapter *adapter)
1914{
1915        if (adapter->msix_entries) {
1916                pci_disable_msix(adapter->pdev);
1917                kfree(adapter->msix_entries);
1918                adapter->msix_entries = NULL;
1919        } else if (adapter->flags & FLAG_MSI_ENABLED) {
1920                pci_disable_msi(adapter->pdev);
1921                adapter->flags &= ~FLAG_MSI_ENABLED;
1922        }
1923}
1924
1925/**
1926 * e1000e_set_interrupt_capability - set MSI or MSI-X if supported
1927 *
1928 * Attempt to configure interrupts using the best available
1929 * capabilities of the hardware and kernel.
1930 **/
1931void e1000e_set_interrupt_capability(struct e1000_adapter *adapter)
1932{
1933        int err;
1934        int i;
1935
1936        switch (adapter->int_mode) {
1937        case E1000E_INT_MODE_MSIX:
1938                if (adapter->flags & FLAG_HAS_MSIX) {
1939                        adapter->num_vectors = 3; /* RxQ0, TxQ0 and other */
1940                        adapter->msix_entries = kcalloc(adapter->num_vectors,
1941                                                      sizeof(struct msix_entry),
1942                                                      GFP_KERNEL);
1943                        if (adapter->msix_entries) {
1944                                for (i = 0; i < adapter->num_vectors; i++)
1945                                        adapter->msix_entries[i].entry = i;
1946
1947                                err = pci_enable_msix(adapter->pdev,
1948                                                      adapter->msix_entries,
1949                                                      adapter->num_vectors);
1950                                if (err == 0)
1951                                        return;
1952                        }
1953                        /* MSI-X failed, so fall through and try MSI */
1954                        e_err("Failed to initialize MSI-X interrupts.  Falling back to MSI interrupts.\n");
1955                        e1000e_reset_interrupt_capability(adapter);
1956                }
1957                adapter->int_mode = E1000E_INT_MODE_MSI;
1958                /* Fall through */
1959        case E1000E_INT_MODE_MSI:
1960                if (!pci_enable_msi(adapter->pdev)) {
1961                        adapter->flags |= FLAG_MSI_ENABLED;
1962                } else {
1963                        adapter->int_mode = E1000E_INT_MODE_LEGACY;
1964                        e_err("Failed to initialize MSI interrupts.  Falling back to legacy interrupts.\n");
1965                }
1966                /* Fall through */
1967        case E1000E_INT_MODE_LEGACY:
1968                /* Don't do anything; this is the system default */
1969                break;
1970        }
1971
1972        /* store the number of vectors being used */
1973        adapter->num_vectors = 1;
1974}
1975
1976/**
1977 * e1000_request_msix - Initialize MSI-X interrupts
1978 *
1979 * e1000_request_msix allocates MSI-X vectors and requests interrupts from the
1980 * kernel.
1981 **/
1982static int e1000_request_msix(struct e1000_adapter *adapter)
1983{
1984        struct net_device *netdev = adapter->netdev;
1985        int err = 0, vector = 0;
1986
1987        if (strlen(netdev->name) < (IFNAMSIZ - 5))
1988                snprintf(adapter->rx_ring->name,
1989                         sizeof(adapter->rx_ring->name) - 1,
1990                         "%s-rx-0", netdev->name);
1991        else
1992                memcpy(adapter->rx_ring->name, netdev->name, IFNAMSIZ);
1993        err = request_irq(adapter->msix_entries[vector].vector,
1994                          e1000_intr_msix_rx, 0, adapter->rx_ring->name,
1995                          netdev);
1996        if (err)
1997                return err;
1998        adapter->rx_ring->itr_register = adapter->hw.hw_addr +
1999            E1000_EITR_82574(vector);
2000        adapter->rx_ring->itr_val = adapter->itr;
2001        vector++;
2002
2003        if (strlen(netdev->name) < (IFNAMSIZ - 5))
2004                snprintf(adapter->tx_ring->name,
2005                         sizeof(adapter->tx_ring->name) - 1,
2006                         "%s-tx-0", netdev->name);
2007        else
2008                memcpy(adapter->tx_ring->name, netdev->name, IFNAMSIZ);
2009        err = request_irq(adapter->msix_entries[vector].vector,
2010                          e1000_intr_msix_tx, 0, adapter->tx_ring->name,
2011                          netdev);
2012        if (err)
2013                return err;
2014        adapter->tx_ring->itr_register = adapter->hw.hw_addr +
2015            E1000_EITR_82574(vector);
2016        adapter->tx_ring->itr_val = adapter->itr;
2017        vector++;
2018
2019        err = request_irq(adapter->msix_entries[vector].vector,
2020                          e1000_msix_other, 0, netdev->name, netdev);
2021        if (err)
2022                return err;
2023
2024        e1000_configure_msix(adapter);
2025
2026        return 0;
2027}
2028
2029/**
2030 * e1000_request_irq - initialize interrupts
2031 *
2032 * Attempts to configure interrupts using the best available
2033 * capabilities of the hardware and kernel.
2034 **/
2035static int e1000_request_irq(struct e1000_adapter *adapter)
2036{
2037        struct net_device *netdev = adapter->netdev;
2038        int err;
2039
2040        if (adapter->msix_entries) {
2041                err = e1000_request_msix(adapter);
2042                if (!err)
2043                        return err;
2044                /* fall back to MSI */
2045                e1000e_reset_interrupt_capability(adapter);
2046                adapter->int_mode = E1000E_INT_MODE_MSI;
2047                e1000e_set_interrupt_capability(adapter);
2048        }
2049        if (adapter->flags & FLAG_MSI_ENABLED) {
2050                err = request_irq(adapter->pdev->irq, e1000_intr_msi, 0,
2051                                  netdev->name, netdev);
2052                if (!err)
2053                        return err;
2054
2055                /* fall back to legacy interrupt */
2056                e1000e_reset_interrupt_capability(adapter);
2057                adapter->int_mode = E1000E_INT_MODE_LEGACY;
2058        }
2059
2060        err = request_irq(adapter->pdev->irq, e1000_intr, IRQF_SHARED,
2061                          netdev->name, netdev);
2062        if (err)
2063                e_err("Unable to allocate interrupt, Error: %d\n", err);
2064
2065        return err;
2066}
2067
2068static void e1000_free_irq(struct e1000_adapter *adapter)
2069{
2070        struct net_device *netdev = adapter->netdev;
2071
2072        if (adapter->msix_entries) {
2073                int vector = 0;
2074
2075                free_irq(adapter->msix_entries[vector].vector, netdev);
2076                vector++;
2077
2078                free_irq(adapter->msix_entries[vector].vector, netdev);
2079                vector++;
2080
2081                /* Other Causes interrupt vector */
2082                free_irq(adapter->msix_entries[vector].vector, netdev);
2083                return;
2084        }
2085
2086        free_irq(adapter->pdev->irq, netdev);
2087}
2088
2089/**
2090 * e1000_irq_disable - Mask off interrupt generation on the NIC
2091 **/
2092static void e1000_irq_disable(struct e1000_adapter *adapter)
2093{
2094        struct e1000_hw *hw = &adapter->hw;
2095
2096        ew32(IMC, ~0);
2097        if (adapter->msix_entries)
2098                ew32(EIAC_82574, 0);
2099        e1e_flush();
2100
2101        if (adapter->msix_entries) {
2102                int i;
2103                for (i = 0; i < adapter->num_vectors; i++)
2104                        synchronize_irq(adapter->msix_entries[i].vector);
2105        } else {
2106                synchronize_irq(adapter->pdev->irq);
2107        }
2108}
2109
2110/**
2111 * e1000_irq_enable - Enable default interrupt generation settings
2112 **/
2113static void e1000_irq_enable(struct e1000_adapter *adapter)
2114{
2115        struct e1000_hw *hw = &adapter->hw;
2116
2117        if (adapter->msix_entries) {
2118                ew32(EIAC_82574, adapter->eiac_mask & E1000_EIAC_MASK_82574);
2119                ew32(IMS, adapter->eiac_mask | E1000_IMS_OTHER | E1000_IMS_LSC);
2120        } else {
2121                ew32(IMS, IMS_ENABLE_MASK);
2122        }
2123        e1e_flush();
2124}
2125
2126/**
2127 * e1000e_get_hw_control - get control of the h/w from f/w
2128 * @adapter: address of board private structure
2129 *
2130 * e1000e_get_hw_control sets {CTRL_EXT|SWSM}:DRV_LOAD bit.
2131 * For ASF and Pass Through versions of f/w this means that
2132 * the driver is loaded. For AMT version (only with 82573)
2133 * of the f/w this means that the network i/f is open.
2134 **/
2135void e1000e_get_hw_control(struct e1000_adapter *adapter)
2136{
2137        struct e1000_hw *hw = &adapter->hw;
2138        u32 ctrl_ext;
2139        u32 swsm;
2140
2141        /* Let firmware know the driver has taken over */
2142        if (adapter->flags & FLAG_HAS_SWSM_ON_LOAD) {
2143                swsm = er32(SWSM);
2144                ew32(SWSM, swsm | E1000_SWSM_DRV_LOAD);
2145        } else if (adapter->flags & FLAG_HAS_CTRLEXT_ON_LOAD) {
2146                ctrl_ext = er32(CTRL_EXT);
2147                ew32(CTRL_EXT, ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
2148        }
2149}
2150
2151/**
2152 * e1000e_release_hw_control - release control of the h/w to f/w
2153 * @adapter: address of board private structure
2154 *
2155 * e1000e_release_hw_control resets {CTRL_EXT|SWSM}:DRV_LOAD bit.
2156 * For ASF and Pass Through versions of f/w this means that the
2157 * driver is no longer loaded. For AMT version (only with 82573) i
2158 * of the f/w this means that the network i/f is closed.
2159 *
2160 **/
2161void e1000e_release_hw_control(struct e1000_adapter *adapter)
2162{
2163        struct e1000_hw *hw = &adapter->hw;
2164        u32 ctrl_ext;
2165        u32 swsm;
2166
2167        /* Let firmware taken over control of h/w */
2168        if (adapter->flags & FLAG_HAS_SWSM_ON_LOAD) {
2169                swsm = er32(SWSM);
2170                ew32(SWSM, swsm & ~E1000_SWSM_DRV_LOAD);
2171        } else if (adapter->flags & FLAG_HAS_CTRLEXT_ON_LOAD) {
2172                ctrl_ext = er32(CTRL_EXT);
2173                ew32(CTRL_EXT, ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
2174        }
2175}
2176
2177/**
2178 * e1000_alloc_ring_dma - allocate memory for a ring structure
2179 **/
2180static int e1000_alloc_ring_dma(struct e1000_adapter *adapter,
2181                                struct e1000_ring *ring)
2182{
2183        struct pci_dev *pdev = adapter->pdev;
2184
2185        ring->desc = dma_alloc_coherent(&pdev->dev, ring->size, &ring->dma,
2186                                        GFP_KERNEL);
2187        if (!ring->desc)
2188                return -ENOMEM;
2189
2190        return 0;
2191}
2192
2193/**
2194 * e1000e_setup_tx_resources - allocate Tx resources (Descriptors)
2195 * @tx_ring: Tx descriptor ring
2196 *
2197 * Return 0 on success, negative on failure
2198 **/
2199int e1000e_setup_tx_resources(struct e1000_ring *tx_ring)
2200{
2201        struct e1000_adapter *adapter = tx_ring->adapter;
2202        int err = -ENOMEM, size;
2203
2204        size = sizeof(struct e1000_buffer) * tx_ring->count;
2205        tx_ring->buffer_info = vzalloc(size);
2206        if (!tx_ring->buffer_info)
2207                goto err;
2208
2209        /* round up to nearest 4K */
2210        tx_ring->size = tx_ring->count * sizeof(struct e1000_tx_desc);
2211        tx_ring->size = ALIGN(tx_ring->size, 4096);
2212
2213        err = e1000_alloc_ring_dma(adapter, tx_ring);
2214        if (err)
2215                goto err;
2216
2217        tx_ring->next_to_use = 0;
2218        tx_ring->next_to_clean = 0;
2219
2220        return 0;
2221err:
2222        vfree(tx_ring->buffer_info);
2223        e_err("Unable to allocate memory for the transmit descriptor ring\n");
2224        return err;
2225}
2226
2227/**
2228 * e1000e_setup_rx_resources - allocate Rx resources (Descriptors)
2229 * @rx_ring: Rx descriptor ring
2230 *
2231 * Returns 0 on success, negative on failure
2232 **/
2233int e1000e_setup_rx_resources(struct e1000_ring *rx_ring)
2234{
2235        struct e1000_adapter *adapter = rx_ring->adapter;
2236        struct e1000_buffer *buffer_info;
2237        int i, size, desc_len, err = -ENOMEM;
2238
2239        size = sizeof(struct e1000_buffer) * rx_ring->count;
2240        rx_ring->buffer_info = vzalloc(size);
2241        if (!rx_ring->buffer_info)
2242                goto err;
2243
2244        for (i = 0; i < rx_ring->count; i++) {
2245                buffer_info = &rx_ring->buffer_info[i];
2246                buffer_info->ps_pages = kcalloc(PS_PAGE_BUFFERS,
2247                                                sizeof(struct e1000_ps_page),
2248                                                GFP_KERNEL);
2249                if (!buffer_info->ps_pages)
2250                        goto err_pages;
2251        }
2252
2253        desc_len = sizeof(union e1000_rx_desc_packet_split);
2254
2255        /* Round up to nearest 4K */
2256        rx_ring->size = rx_ring->count * desc_len;
2257        rx_ring->size = ALIGN(rx_ring->size, 4096);
2258
2259        err = e1000_alloc_ring_dma(adapter, rx_ring);
2260        if (err)
2261                goto err_pages;
2262
2263        rx_ring->next_to_clean = 0;
2264        rx_ring->next_to_use = 0;
2265        rx_ring->rx_skb_top = NULL;
2266
2267        return 0;
2268
2269err_pages:
2270        for (i = 0; i < rx_ring->count; i++) {
2271                buffer_info = &rx_ring->buffer_info[i];
2272                kfree(buffer_info->ps_pages);
2273        }
2274err:
2275        vfree(rx_ring->buffer_info);
2276        e_err("Unable to allocate memory for the receive descriptor ring\n");
2277        return err;
2278}
2279
2280/**
2281 * e1000_clean_tx_ring - Free Tx Buffers
2282 * @tx_ring: Tx descriptor ring
2283 **/
2284static void e1000_clean_tx_ring(struct e1000_ring *tx_ring)
2285{
2286        struct e1000_adapter *adapter = tx_ring->adapter;
2287        struct e1000_buffer *buffer_info;
2288        unsigned long size;
2289        unsigned int i;
2290
2291        for (i = 0; i < tx_ring->count; i++) {
2292                buffer_info = &tx_ring->buffer_info[i];
2293                e1000_put_txbuf(tx_ring, buffer_info);
2294        }
2295
2296        netdev_reset_queue(adapter->netdev);
2297        size = sizeof(struct e1000_buffer) * tx_ring->count;
2298        memset(tx_ring->buffer_info, 0, size);
2299
2300        memset(tx_ring->desc, 0, tx_ring->size);
2301
2302        tx_ring->next_to_use = 0;
2303        tx_ring->next_to_clean = 0;
2304
2305        writel(0, tx_ring->head);
2306        if (tx_ring->adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA)
2307                e1000e_update_tdt_wa(tx_ring, 0);
2308        else
2309                writel(0, tx_ring->tail);
2310}
2311
2312/**
2313 * e1000e_free_tx_resources - Free Tx Resources per Queue
2314 * @tx_ring: Tx descriptor ring
2315 *
2316 * Free all transmit software resources
2317 **/
2318void e1000e_free_tx_resources(struct e1000_ring *tx_ring)
2319{
2320        struct e1000_adapter *adapter = tx_ring->adapter;
2321        struct pci_dev *pdev = adapter->pdev;
2322
2323        e1000_clean_tx_ring(tx_ring);
2324
2325        vfree(tx_ring->buffer_info);
2326        tx_ring->buffer_info = NULL;
2327
2328        dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc,
2329                          tx_ring->dma);
2330        tx_ring->desc = NULL;
2331}
2332
2333/**
2334 * e1000e_free_rx_resources - Free Rx Resources
2335 * @rx_ring: Rx descriptor ring
2336 *
2337 * Free all receive software resources
2338 **/
2339void e1000e_free_rx_resources(struct e1000_ring *rx_ring)
2340{
2341        struct e1000_adapter *adapter = rx_ring->adapter;
2342        struct pci_dev *pdev = adapter->pdev;
2343        int i;
2344
2345        e1000_clean_rx_ring(rx_ring);
2346
2347        for (i = 0; i < rx_ring->count; i++)
2348                kfree(rx_ring->buffer_info[i].ps_pages);
2349
2350        vfree(rx_ring->buffer_info);
2351        rx_ring->buffer_info = NULL;
2352
2353        dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc,
2354                          rx_ring->dma);
2355        rx_ring->desc = NULL;
2356}
2357
2358/**
2359 * e1000_update_itr - update the dynamic ITR value based on statistics
2360 * @adapter: pointer to adapter
2361 * @itr_setting: current adapter->itr
2362 * @packets: the number of packets during this measurement interval
2363 * @bytes: the number of bytes during this measurement interval
2364 *
2365 *      Stores a new ITR value based on packets and byte
2366 *      counts during the last interrupt.  The advantage of per interrupt
2367 *      computation is faster updates and more accurate ITR for the current
2368 *      traffic pattern.  Constants in this function were computed
2369 *      based on theoretical maximum wire speed and thresholds were set based
2370 *      on testing data as well as attempting to minimize response time
2371 *      while increasing bulk throughput.  This functionality is controlled
2372 *      by the InterruptThrottleRate module parameter.
2373 **/
2374static unsigned int e1000_update_itr(struct e1000_adapter *adapter,
2375                                     u16 itr_setting, int packets,
2376                                     int bytes)
2377{
2378        unsigned int retval = itr_setting;
2379
2380        if (packets == 0)
2381                return itr_setting;
2382
2383        switch (itr_setting) {
2384        case lowest_latency:
2385                /* handle TSO and jumbo frames */
2386                if (bytes/packets > 8000)
2387                        retval = bulk_latency;
2388                else if ((packets < 5) && (bytes > 512))
2389                        retval = low_latency;
2390                break;
2391        case low_latency:  /* 50 usec aka 20000 ints/s */
2392                if (bytes > 10000) {
2393                        /* this if handles the TSO accounting */
2394                        if (bytes/packets > 8000)
2395                                retval = bulk_latency;
2396                        else if ((packets < 10) || ((bytes/packets) > 1200))
2397                                retval = bulk_latency;
2398                        else if ((packets > 35))
2399                                retval = lowest_latency;
2400                } else if (bytes/packets > 2000) {
2401                        retval = bulk_latency;
2402                } else if (packets <= 2 && bytes < 512) {
2403                        retval = lowest_latency;
2404                }
2405                break;
2406        case bulk_latency: /* 250 usec aka 4000 ints/s */
2407                if (bytes > 25000) {
2408                        if (packets > 35)
2409                                retval = low_latency;
2410                } else if (bytes < 6000) {
2411                        retval = low_latency;
2412                }
2413                break;
2414        }
2415
2416        return retval;
2417}
2418
2419static void e1000_set_itr(struct e1000_adapter *adapter)
2420{
2421        struct e1000_hw *hw = &adapter->hw;
2422        u16 current_itr;
2423        u32 new_itr = adapter->itr;
2424
2425        /* for non-gigabit speeds, just fix the interrupt rate at 4000 */
2426        if (adapter->link_speed != SPEED_1000) {
2427                current_itr = 0;
2428                new_itr = 4000;
2429                goto set_itr_now;
2430        }
2431
2432        if (adapter->flags2 & FLAG2_DISABLE_AIM) {
2433                new_itr = 0;
2434                goto set_itr_now;
2435        }
2436
2437        adapter->tx_itr = e1000_update_itr(adapter,
2438                                    adapter->tx_itr,
2439                                    adapter->total_tx_packets,
2440                                    adapter->total_tx_bytes);
2441        /* conservative mode (itr 3) eliminates the lowest_latency setting */
2442        if (adapter->itr_setting == 3 && adapter->tx_itr == lowest_latency)
2443                adapter->tx_itr = low_latency;
2444
2445        adapter->rx_itr = e1000_update_itr(adapter,
2446                                    adapter->rx_itr,
2447                                    adapter->total_rx_packets,
2448                                    adapter->total_rx_bytes);
2449        /* conservative mode (itr 3) eliminates the lowest_latency setting */
2450        if (adapter->itr_setting == 3 && adapter->rx_itr == lowest_latency)
2451                adapter->rx_itr = low_latency;
2452
2453        current_itr = max(adapter->rx_itr, adapter->tx_itr);
2454
2455        switch (current_itr) {
2456        /* counts and packets in update_itr are dependent on these numbers */
2457        case lowest_latency:
2458                new_itr = 70000;
2459                break;
2460        case low_latency:
2461                new_itr = 20000; /* aka hwitr = ~200 */
2462                break;
2463        case bulk_latency:
2464                new_itr = 4000;
2465                break;
2466        default:
2467                break;
2468        }
2469
2470set_itr_now:
2471        if (new_itr != adapter->itr) {
2472                /*
2473                 * this attempts to bias the interrupt rate towards Bulk
2474                 * by adding intermediate steps when interrupt rate is
2475                 * increasing
2476                 */
2477                new_itr = new_itr > adapter->itr ?
2478                             min(adapter->itr + (new_itr >> 2), new_itr) :
2479                             new_itr;
2480                adapter->itr = new_itr;
2481                adapter->rx_ring->itr_val = new_itr;
2482                if (adapter->msix_entries)
2483                        adapter->rx_ring->set_itr = 1;
2484                else
2485                        if (new_itr)
2486                                ew32(ITR, 1000000000 / (new_itr * 256));
2487                        else
2488                                ew32(ITR, 0);
2489        }
2490}
2491
2492/**
2493 * e1000e_write_itr - write the ITR value to the appropriate registers
2494 * @adapter: address of board private structure
2495 * @itr: new ITR value to program
2496 *
2497 * e1000e_write_itr determines if the adapter is in MSI-X mode
2498 * and, if so, writes the EITR registers with the ITR value.
2499 * Otherwise, it writes the ITR value into the ITR register.
2500 **/
2501void e1000e_write_itr(struct e1000_adapter *adapter, u32 itr)
2502{
2503        struct e1000_hw *hw = &adapter->hw;
2504        u32 new_itr = itr ? 1000000000 / (itr * 256) : 0;
2505
2506        if (adapter->msix_entries) {
2507                int vector;
2508
2509                for (vector = 0; vector < adapter->num_vectors; vector++)
2510                        writel(new_itr, hw->hw_addr + E1000_EITR_82574(vector));
2511        } else {
2512                ew32(ITR, new_itr);
2513        }
2514}
2515
2516/**
2517 * e1000_alloc_queues - Allocate memory for all rings
2518 * @adapter: board private structure to initialize
2519 **/
2520static int __devinit e1000_alloc_queues(struct e1000_adapter *adapter)
2521{
2522        int size = sizeof(struct e1000_ring);
2523
2524        adapter->tx_ring = kzalloc(size, GFP_KERNEL);
2525        if (!adapter->tx_ring)
2526                goto err;
2527        adapter->tx_ring->count = adapter->tx_ring_count;
2528        adapter->tx_ring->adapter = adapter;
2529
2530        adapter->rx_ring = kzalloc(size, GFP_KERNEL);
2531        if (!adapter->rx_ring)
2532                goto err;
2533        adapter->rx_ring->count = adapter->rx_ring_count;
2534        adapter->rx_ring->adapter = adapter;
2535
2536        return 0;
2537err:
2538        e_err("Unable to allocate memory for queues\n");
2539        kfree(adapter->rx_ring);
2540        kfree(adapter->tx_ring);
2541        return -ENOMEM;
2542}
2543
2544/**
2545 * e1000e_poll - NAPI Rx polling callback
2546 * @napi: struct associated with this polling callback
2547 * @weight: number of packets driver is allowed to process this poll
2548 **/
2549static int e1000e_poll(struct napi_struct *napi, int weight)
2550{
2551        struct e1000_adapter *adapter = container_of(napi, struct e1000_adapter,
2552                                                     napi);
2553        struct e1000_hw *hw = &adapter->hw;
2554        struct net_device *poll_dev = adapter->netdev;
2555        int tx_cleaned = 1, work_done = 0;
2556
2557        adapter = netdev_priv(poll_dev);
2558
2559        if (!adapter->msix_entries ||
2560            (adapter->rx_ring->ims_val & adapter->tx_ring->ims_val))
2561                tx_cleaned = e1000_clean_tx_irq(adapter->tx_ring);
2562
2563        adapter->clean_rx(adapter->rx_ring, &work_done, weight);
2564
2565        if (!tx_cleaned)
2566                work_done = weight;
2567
2568        /* If weight not fully consumed, exit the polling mode */
2569        if (work_done < weight) {
2570                if (adapter->itr_setting & 3)
2571                        e1000_set_itr(adapter);
2572                napi_complete(napi);
2573                if (!test_bit(__E1000_DOWN, &adapter->state)) {
2574                        if (adapter->msix_entries)
2575                                ew32(IMS, adapter->rx_ring->ims_val);
2576                        else
2577                                e1000_irq_enable(adapter);
2578                }
2579        }
2580
2581        return work_done;
2582}
2583
2584static int e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
2585{
2586        struct e1000_adapter *adapter = netdev_priv(netdev);
2587        struct e1000_hw *hw = &adapter->hw;
2588        u32 vfta, index;
2589
2590        /* don't update vlan cookie if already programmed */
2591        if ((adapter->hw.mng_cookie.status &
2592             E1000_MNG_DHCP_COOKIE_STATUS_VLAN) &&
2593            (vid == adapter->mng_vlan_id))
2594                return 0;
2595
2596        /* add VID to filter table */
2597        if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) {
2598                index = (vid >> 5) & 0x7F;
2599                vfta = E1000_READ_REG_ARRAY(hw, E1000_VFTA, index);
2600                vfta |= (1 << (vid & 0x1F));
2601                hw->mac.ops.write_vfta(hw, index, vfta);
2602        }
2603
2604        set_bit(vid, adapter->active_vlans);
2605
2606        return 0;
2607}
2608
2609static int e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
2610{
2611        struct e1000_adapter *adapter = netdev_priv(netdev);
2612        struct e1000_hw *hw = &adapter->hw;
2613        u32 vfta, index;
2614
2615        if ((adapter->hw.mng_cookie.status &
2616             E1000_MNG_DHCP_COOKIE_STATUS_VLAN) &&
2617            (vid == adapter->mng_vlan_id)) {
2618                /* release control to f/w */
2619                e1000e_release_hw_control(adapter);
2620                return 0;
2621        }
2622
2623        /* remove VID from filter table */
2624        if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) {
2625                index = (vid >> 5) & 0x7F;
2626                vfta = E1000_READ_REG_ARRAY(hw, E1000_VFTA, index);
2627                vfta &= ~(1 << (vid & 0x1F));
2628                hw->mac.ops.write_vfta(hw, index, vfta);
2629        }
2630
2631        clear_bit(vid, adapter->active_vlans);
2632
2633        return 0;
2634}
2635
2636/**
2637 * e1000e_vlan_filter_disable - helper to disable hw VLAN filtering
2638 * @adapter: board private structure to initialize
2639 **/
2640static void e1000e_vlan_filter_disable(struct e1000_adapter *adapter)
2641{
2642        struct net_device *netdev = adapter->netdev;
2643        struct e1000_hw *hw = &adapter->hw;
2644        u32 rctl;
2645
2646        if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) {
2647                /* disable VLAN receive filtering */
2648                rctl = er32(RCTL);
2649                rctl &= ~(E1000_RCTL_VFE | E1000_RCTL_CFIEN);
2650                ew32(RCTL, rctl);
2651
2652                if (adapter->mng_vlan_id != (u16)E1000_MNG_VLAN_NONE) {
2653                        e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id);
2654                        adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
2655                }
2656        }
2657}
2658
2659/**
2660 * e1000e_vlan_filter_enable - helper to enable HW VLAN filtering
2661 * @adapter: board private structure to initialize
2662 **/
2663static void e1000e_vlan_filter_enable(struct e1000_adapter *adapter)
2664{
2665        struct e1000_hw *hw = &adapter->hw;
2666        u32 rctl;
2667
2668        if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) {
2669                /* enable VLAN receive filtering */
2670                rctl = er32(RCTL);
2671                rctl |= E1000_RCTL_VFE;
2672                rctl &= ~E1000_RCTL_CFIEN;
2673                ew32(RCTL, rctl);
2674        }
2675}
2676
2677/**
2678 * e1000e_vlan_strip_enable - helper to disable HW VLAN stripping
2679 * @adapter: board private structure to initialize
2680 **/
2681static void e1000e_vlan_strip_disable(struct e1000_adapter *adapter)
2682{
2683        struct e1000_hw *hw = &adapter->hw;
2684        u32 ctrl;
2685
2686        /* disable VLAN tag insert/strip */
2687        ctrl = er32(CTRL);
2688        ctrl &= ~E1000_CTRL_VME;
2689        ew32(CTRL, ctrl);
2690}
2691
2692/**
2693 * e1000e_vlan_strip_enable - helper to enable HW VLAN stripping
2694 * @adapter: board private structure to initialize
2695 **/
2696static void e1000e_vlan_strip_enable(struct e1000_adapter *adapter)
2697{
2698        struct e1000_hw *hw = &adapter->hw;
2699        u32 ctrl;
2700
2701        /* enable VLAN tag insert/strip */
2702        ctrl = er32(CTRL);
2703        ctrl |= E1000_CTRL_VME;
2704        ew32(CTRL, ctrl);
2705}
2706
2707static void e1000_update_mng_vlan(struct e1000_adapter *adapter)
2708{
2709        struct net_device *netdev = adapter->netdev;
2710        u16 vid = adapter->hw.mng_cookie.vlan_id;
2711        u16 old_vid = adapter->mng_vlan_id;
2712
2713        if (adapter->hw.mng_cookie.status &
2714            E1000_MNG_DHCP_COOKIE_STATUS_VLAN) {
2715                e1000_vlan_rx_add_vid(netdev, vid);
2716                adapter->mng_vlan_id = vid;
2717        }
2718
2719        if ((old_vid != (u16)E1000_MNG_VLAN_NONE) && (vid != old_vid))
2720                e1000_vlan_rx_kill_vid(netdev, old_vid);
2721}
2722
2723static void e1000_restore_vlan(struct e1000_adapter *adapter)
2724{
2725        u16 vid;
2726
2727        e1000_vlan_rx_add_vid(adapter->netdev, 0);
2728
2729        for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
2730                e1000_vlan_rx_add_vid(adapter->netdev, vid);
2731}
2732
2733static void e1000_init_manageability_pt(struct e1000_adapter *adapter)
2734{
2735        struct e1000_hw *hw = &adapter->hw;
2736        u32 manc, manc2h, mdef, i, j;
2737
2738        if (!(adapter->flags & FLAG_MNG_PT_ENABLED))
2739                return;
2740
2741        manc = er32(MANC);
2742
2743        /*
2744         * enable receiving management packets to the host. this will probably
2745         * generate destination unreachable messages from the host OS, but
2746         * the packets will be handled on SMBUS
2747         */
2748        manc |= E1000_MANC_EN_MNG2HOST;
2749        manc2h = er32(MANC2H);
2750
2751        switch (hw->mac.type) {
2752        default:
2753                manc2h |= (E1000_MANC2H_PORT_623 | E1000_MANC2H_PORT_664);
2754                break;
2755        case e1000_82574:
2756        case e1000_82583:
2757                /*
2758                 * Check if IPMI pass-through decision filter already exists;
2759                 * if so, enable it.
2760                 */
2761                for (i = 0, j = 0; i < 8; i++) {
2762                        mdef = er32(MDEF(i));
2763
2764                        /* Ignore filters with anything other than IPMI ports */
2765                        if (mdef & ~(E1000_MDEF_PORT_623 | E1000_MDEF_PORT_664))
2766                                continue;
2767
2768                        /* Enable this decision filter in MANC2H */
2769                        if (mdef)
2770                                manc2h |= (1 << i);
2771
2772                        j |= mdef;
2773                }
2774
2775                if (j == (E1000_MDEF_PORT_623 | E1000_MDEF_PORT_664))
2776                        break;
2777
2778                /* Create new decision filter in an empty filter */
2779                for (i = 0, j = 0; i < 8; i++)
2780                        if (er32(MDEF(i)) == 0) {
2781                                ew32(MDEF(i), (E1000_MDEF_PORT_623 |
2782                                               E1000_MDEF_PORT_664));
2783                                manc2h |= (1 << 1);
2784                                j++;
2785                                break;
2786                        }
2787
2788                if (!j)
2789                        e_warn("Unable to create IPMI pass-through filter\n");
2790                break;
2791        }
2792
2793        ew32(MANC2H, manc2h);
2794        ew32(MANC, manc);
2795}
2796
2797/**
2798 * e1000_configure_tx - Configure Transmit Unit after Reset
2799 * @adapter: board private structure
2800 *
2801 * Configure the Tx unit of the MAC after a reset.
2802 **/
2803static void e1000_configure_tx(struct e1000_adapter *adapter)
2804{
2805        struct e1000_hw *hw = &adapter->hw;
2806        struct e1000_ring *tx_ring = adapter->tx_ring;
2807        u64 tdba;
2808        u32 tdlen, tarc;
2809
2810        /* Setup the HW Tx Head and Tail descriptor pointers */
2811        tdba = tx_ring->dma;
2812        tdlen = tx_ring->count * sizeof(struct e1000_tx_desc);
2813        ew32(TDBAL(0), (tdba & DMA_BIT_MASK(32)));
2814        ew32(TDBAH(0), (tdba >> 32));
2815        ew32(TDLEN(0), tdlen);
2816        ew32(TDH(0), 0);
2817        ew32(TDT(0), 0);
2818        tx_ring->head = adapter->hw.hw_addr + E1000_TDH(0);
2819        tx_ring->tail = adapter->hw.hw_addr + E1000_TDT(0);
2820
2821        /* Set the Tx Interrupt Delay register */
2822        ew32(TIDV, adapter->tx_int_delay);
2823        /* Tx irq moderation */
2824        ew32(TADV, adapter->tx_abs_int_delay);
2825
2826        if (adapter->flags2 & FLAG2_DMA_BURST) {
2827                u32 txdctl = er32(TXDCTL(0));
2828                txdctl &= ~(E1000_TXDCTL_PTHRESH | E1000_TXDCTL_HTHRESH |
2829                            E1000_TXDCTL_WTHRESH);
2830                /*
2831                 * set up some performance related parameters to encourage the
2832                 * hardware to use the bus more efficiently in bursts, depends
2833                 * on the tx_int_delay to be enabled,
2834                 * wthresh = 1 ==> burst write is disabled to avoid Tx stalls
2835                 * hthresh = 1 ==> prefetch when one or more available
2836                 * pthresh = 0x1f ==> prefetch if internal cache 31 or less
2837                 * BEWARE: this seems to work but should be considered first if
2838                 * there are Tx hangs or other Tx related bugs
2839                 */
2840                txdctl |= E1000_TXDCTL_DMA_BURST_ENABLE;
2841                ew32(TXDCTL(0), txdctl);
2842        }
2843        /* erratum work around: set txdctl the same for both queues */
2844        ew32(TXDCTL(1), er32(TXDCTL(0)));
2845
2846        if (adapter->flags & FLAG_TARC_SPEED_MODE_BIT) {
2847                tarc = er32(TARC(0));
2848                /*
2849                 * set the speed mode bit, we'll clear it if we're not at
2850                 * gigabit link later
2851                 */
2852#define SPEED_MODE_BIT (1 << 21)
2853                tarc |= SPEED_MODE_BIT;
2854                ew32(TARC(0), tarc);
2855        }
2856
2857        /* errata: program both queues to unweighted RR */
2858        if (adapter->flags & FLAG_TARC_SET_BIT_ZERO) {
2859                tarc = er32(TARC(0));
2860                tarc |= 1;
2861                ew32(TARC(0), tarc);
2862                tarc = er32(TARC(1));
2863                tarc |= 1;
2864                ew32(TARC(1), tarc);
2865        }
2866
2867        /* Setup Transmit Descriptor Settings for eop descriptor */
2868        adapter->txd_cmd = E1000_TXD_CMD_EOP | E1000_TXD_CMD_IFCS;
2869
2870        /* only set IDE if we are delaying interrupts using the timers */
2871        if (adapter->tx_int_delay)
2872                adapter->txd_cmd |= E1000_TXD_CMD_IDE;
2873
2874        /* enable Report Status bit */
2875        adapter->txd_cmd |= E1000_TXD_CMD_RS;
2876
2877        hw->mac.ops.config_collision_dist(hw);
2878}
2879
2880/**
2881 * e1000_setup_rctl - configure the receive control registers
2882 * @adapter: Board private structure
2883 **/
2884#define PAGE_USE_COUNT(S) (((S) >> PAGE_SHIFT) + \
2885                           (((S) & (PAGE_SIZE - 1)) ? 1 : 0))
2886static void e1000_setup_rctl(struct e1000_adapter *adapter)
2887{
2888        struct e1000_hw *hw = &adapter->hw;
2889        u32 rctl, rfctl;
2890        u32 pages = 0;
2891
2892        /* Workaround Si errata on PCHx - configure jumbo frame flow */
2893        if (hw->mac.type >= e1000_pch2lan) {
2894                s32 ret_val;
2895
2896                if (adapter->netdev->mtu > ETH_DATA_LEN)
2897                        ret_val = e1000_lv_jumbo_workaround_ich8lan(hw, true);
2898                else
2899                        ret_val = e1000_lv_jumbo_workaround_ich8lan(hw, false);
2900
2901                if (ret_val)
2902                        e_dbg("failed to enable jumbo frame workaround mode\n");
2903        }
2904
2905        /* Program MC offset vector base */
2906        rctl = er32(RCTL);
2907        rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
2908        rctl |= E1000_RCTL_EN | E1000_RCTL_BAM |
2909                E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF |
2910                (adapter->hw.mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
2911
2912        /* Do not Store bad packets */
2913        rctl &= ~E1000_RCTL_SBP;
2914
2915        /* Enable Long Packet receive */
2916        if (adapter->netdev->mtu <= ETH_DATA_LEN)
2917                rctl &= ~E1000_RCTL_LPE;
2918        else
2919                rctl |= E1000_RCTL_LPE;
2920
2921        /* Some systems expect that the CRC is included in SMBUS traffic. The
2922         * hardware strips the CRC before sending to both SMBUS (BMC) and to
2923         * host memory when this is enabled
2924         */
2925        if (adapter->flags2 & FLAG2_CRC_STRIPPING)
2926                rctl |= E1000_RCTL_SECRC;
2927
2928        /* Workaround Si errata on 82577 PHY - configure IPG for jumbos */
2929        if ((hw->phy.type == e1000_phy_82577) && (rctl & E1000_RCTL_LPE)) {
2930                u16 phy_data;
2931
2932                e1e_rphy(hw, PHY_REG(770, 26), &phy_data);
2933                phy_data &= 0xfff8;
2934                phy_data |= (1 << 2);
2935                e1e_wphy(hw, PHY_REG(770, 26), phy_data);
2936
2937                e1e_rphy(hw, 22, &phy_data);
2938                phy_data &= 0x0fff;
2939                phy_data |= (1 << 14);
2940                e1e_wphy(hw, 0x10, 0x2823);
2941                e1e_wphy(hw, 0x11, 0x0003);
2942                e1e_wphy(hw, 22, phy_data);
2943        }
2944
2945        /* Setup buffer sizes */
2946        rctl &= ~E1000_RCTL_SZ_4096;
2947        rctl |= E1000_RCTL_BSEX;
2948        switch (adapter->rx_buffer_len) {
2949        case 2048:
2950        default:
2951                rctl |= E1000_RCTL_SZ_2048;
2952                rctl &= ~E1000_RCTL_BSEX;
2953                break;
2954        case 4096:
2955                rctl |= E1000_RCTL_SZ_4096;
2956                break;
2957        case 8192:
2958                rctl |= E1000_RCTL_SZ_8192;
2959                break;
2960        case 16384:
2961                rctl |= E1000_RCTL_SZ_16384;
2962                break;
2963        }
2964
2965        /* Enable Extended Status in all Receive Descriptors */
2966        rfctl = er32(RFCTL);
2967        rfctl |= E1000_RFCTL_EXTEN;
2968        ew32(RFCTL, rfctl);
2969
2970        /*
2971         * 82571 and greater support packet-split where the protocol
2972         * header is placed in skb->data and the packet data is
2973         * placed in pages hanging off of skb_shinfo(skb)->nr_frags.
2974         * In the case of a non-split, skb->data is linearly filled,
2975         * followed by the page buffers.  Therefore, skb->data is
2976         * sized to hold the largest protocol header.
2977         *
2978         * allocations using alloc_page take too long for regular MTU
2979         * so only enable packet split for jumbo frames
2980         *
2981         * Using pages when the page size is greater than 16k wastes
2982         * a lot of memory, since we allocate 3 pages at all times
2983         * per packet.
2984         */
2985        pages = PAGE_USE_COUNT(adapter->netdev->mtu);
2986        if ((pages <= 3) && (PAGE_SIZE <= 16384) && (rctl & E1000_RCTL_LPE))
2987                adapter->rx_ps_pages = pages;
2988        else
2989                adapter->rx_ps_pages = 0;
2990
2991        if (adapter->rx_ps_pages) {
2992                u32 psrctl = 0;
2993
2994                /* Enable Packet split descriptors */
2995                rctl |= E1000_RCTL_DTYP_PS;
2996
2997                psrctl |= adapter->rx_ps_bsize0 >>
2998                        E1000_PSRCTL_BSIZE0_SHIFT;
2999
3000                switch (adapter->rx_ps_pages) {
3001                case 3:
3002                        psrctl |= PAGE_SIZE <<
3003                                E1000_PSRCTL_BSIZE3_SHIFT;
3004                case 2:
3005                        psrctl |= PAGE_SIZE <<
3006                                E1000_PSRCTL_BSIZE2_SHIFT;
3007                case 1:
3008                        psrctl |= PAGE_SIZE >>
3009                                E1000_PSRCTL_BSIZE1_SHIFT;
3010                        break;
3011                }
3012
3013                ew32(PSRCTL, psrctl);
3014        }
3015
3016        /* This is useful for sniffing bad packets. */
3017        if (adapter->netdev->features & NETIF_F_RXALL) {
3018                /* UPE and MPE will be handled by normal PROMISC logic
3019                 * in e1000e_set_rx_mode */
3020                rctl |= (E1000_RCTL_SBP | /* Receive bad packets */
3021                         E1000_RCTL_BAM | /* RX All Bcast Pkts */
3022                         E1000_RCTL_PMCF); /* RX All MAC Ctrl Pkts */
3023
3024                rctl &= ~(E1000_RCTL_VFE | /* Disable VLAN filter */
3025                          E1000_RCTL_DPF | /* Allow filtered pause */
3026                          E1000_RCTL_CFIEN); /* Dis VLAN CFIEN Filter */
3027                /* Do not mess with E1000_CTRL_VME, it affects transmit as well,
3028                 * and that breaks VLANs.
3029                 */
3030        }
3031
3032        ew32(RCTL, rctl);
3033        /* just started the receive unit, no need to restart */
3034        adapter->flags &= ~FLAG_RX_RESTART_NOW;
3035}
3036
3037/**
3038 * e1000_configure_rx - Configure Receive Unit after Reset
3039 * @adapter: board private structure
3040 *
3041 * Configure the Rx unit of the MAC after a reset.
3042 **/
3043static void e1000_configure_rx(struct e1000_adapter *adapter)
3044{
3045        struct e1000_hw *hw = &adapter->hw;
3046        struct e1000_ring *rx_ring = adapter->rx_ring;
3047        u64 rdba;
3048        u32 rdlen, rctl, rxcsum, ctrl_ext;
3049
3050        if (adapter->rx_ps_pages) {
3051                /* this is a 32 byte descriptor */
3052                rdlen = rx_ring->count *
3053                    sizeof(union e1000_rx_desc_packet_split);
3054                adapter->clean_rx = e1000_clean_rx_irq_ps;
3055                adapter->alloc_rx_buf = e1000_alloc_rx_buffers_ps;
3056        } else if (adapter->netdev->mtu > ETH_FRAME_LEN + ETH_FCS_LEN) {
3057                rdlen = rx_ring->count * sizeof(union e1000_rx_desc_extended);
3058                adapter->clean_rx = e1000_clean_jumbo_rx_irq;
3059                adapter->alloc_rx_buf = e1000_alloc_jumbo_rx_buffers;
3060        } else {
3061                rdlen = rx_ring->count * sizeof(union e1000_rx_desc_extended);
3062                adapter->clean_rx = e1000_clean_rx_irq;
3063                adapter->alloc_rx_buf = e1000_alloc_rx_buffers;
3064        }
3065
3066        /* disable receives while setting up the descriptors */
3067        rctl = er32(RCTL);
3068        if (!(adapter->flags2 & FLAG2_NO_DISABLE_RX))
3069                ew32(RCTL, rctl & ~E1000_RCTL_EN);
3070        e1e_flush();
3071        usleep_range(10000, 20000);
3072
3073        if (adapter->flags2 & FLAG2_DMA_BURST) {
3074                /*
3075                 * set the writeback threshold (only takes effect if the RDTR
3076                 * is set). set GRAN=1 and write back up to 0x4 worth, and
3077                 * enable prefetching of 0x20 Rx descriptors
3078                 * granularity = 01
3079                 * wthresh = 04,
3080                 * hthresh = 04,
3081                 * pthresh = 0x20
3082                 */
3083                ew32(RXDCTL(0), E1000_RXDCTL_DMA_BURST_ENABLE);
3084                ew32(RXDCTL(1), E1000_RXDCTL_DMA_BURST_ENABLE);
3085
3086                /*
3087                 * override the delay timers for enabling bursting, only if
3088                 * the value was not set by the user via module options
3089                 */
3090                if (adapter->rx_int_delay == DEFAULT_RDTR)
3091                        adapter->rx_int_delay = BURST_RDTR;
3092                if (adapter->rx_abs_int_delay == DEFAULT_RADV)
3093                        adapter->rx_abs_int_delay = BURST_RADV;
3094        }
3095
3096        /* set the Receive Delay Timer Register */
3097        ew32(RDTR, adapter->rx_int_delay);
3098
3099        /* irq moderation */
3100        ew32(RADV, adapter->rx_abs_int_delay);
3101        if ((adapter->itr_setting != 0) && (adapter->itr != 0))
3102                e1000e_write_itr(adapter, adapter->itr);
3103
3104        ctrl_ext = er32(CTRL_EXT);
3105        /* Auto-Mask interrupts upon ICR access */
3106        ctrl_ext |= E1000_CTRL_EXT_IAME;
3107        ew32(IAM, 0xffffffff);
3108        ew32(CTRL_EXT, ctrl_ext);
3109        e1e_flush();
3110
3111        /*
3112         * Setup the HW Rx Head and Tail Descriptor Pointers and
3113         * the Base and Length of the Rx Descriptor Ring
3114         */
3115        rdba = rx_ring->dma;
3116        ew32(RDBAL(0), (rdba & DMA_BIT_MASK(32)));
3117        ew32(RDBAH(0), (rdba >> 32));
3118        ew32(RDLEN(0), rdlen);
3119        ew32(RDH(0), 0);
3120        ew32(RDT(0), 0);
3121        rx_ring->head = adapter->hw.hw_addr + E1000_RDH(0);
3122        rx_ring->tail = adapter->hw.hw_addr + E1000_RDT(0);
3123
3124        /* Enable Receive Checksum Offload for TCP and UDP */
3125        rxcsum = er32(RXCSUM);
3126        if (adapter->netdev->features & NETIF_F_RXCSUM)
3127                rxcsum |= E1000_RXCSUM_TUOFL;
3128        else
3129                rxcsum &= ~E1000_RXCSUM_TUOFL;
3130        ew32(RXCSUM, rxcsum);
3131
3132        if (adapter->hw.mac.type == e1000_pch2lan) {
3133                /*
3134                 * With jumbo frames, excessive C-state transition
3135                 * latencies result in dropped transactions.
3136                 */
3137                if (adapter->netdev->mtu > ETH_DATA_LEN) {
3138                        u32 rxdctl = er32(RXDCTL(0));
3139                        ew32(RXDCTL(0), rxdctl | 0x3);
3140                        pm_qos_update_request(&adapter->netdev->pm_qos_req, 55);
3141                } else {
3142                        pm_qos_update_request(&adapter->netdev->pm_qos_req,
3143                                              PM_QOS_DEFAULT_VALUE);
3144                }
3145        }
3146
3147        /* Enable Receives */
3148        ew32(RCTL, rctl);
3149}
3150
3151/**
3152 * e1000e_write_mc_addr_list - write multicast addresses to MTA
3153 * @netdev: network interface device structure
3154 *
3155 * Writes multicast address list to the MTA hash table.
3156 * Returns: -ENOMEM on failure
3157 *                0 on no addresses written
3158 *                X on writing X addresses to MTA
3159 */
3160static int e1000e_write_mc_addr_list(struct net_device *netdev)
3161{
3162        struct e1000_adapter *adapter = netdev_priv(netdev);
3163        struct e1000_hw *hw = &adapter->hw;
3164        struct netdev_hw_addr *ha;
3165        u8 *mta_list;
3166        int i;
3167
3168        if (netdev_mc_empty(netdev)) {
3169                /* nothing to program, so clear mc list */
3170                hw->mac.ops.update_mc_addr_list(hw, NULL, 0);
3171                return 0;
3172        }
3173
3174        mta_list = kzalloc(netdev_mc_count(netdev) * ETH_ALEN, GFP_ATOMIC);
3175        if (!mta_list)
3176                return -ENOMEM;
3177
3178        /* update_mc_addr_list expects a packed array of only addresses. */
3179        i = 0;
3180        netdev_for_each_mc_addr(ha, netdev)
3181                memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN);
3182
3183        hw->mac.ops.update_mc_addr_list(hw, mta_list, i);
3184        kfree(mta_list);
3185
3186        return netdev_mc_count(netdev);
3187}
3188
3189/**
3190 * e1000e_write_uc_addr_list - write unicast addresses to RAR table
3191 * @netdev: network interface device structure
3192 *
3193 * Writes unicast address list to the RAR table.
3194 * Returns: -ENOMEM on failure/insufficient address space
3195 *                0 on no addresses written
3196 *                X on writing X addresses to the RAR table
3197 **/
3198static int e1000e_write_uc_addr_list(struct net_device *netdev)
3199{
3200        struct e1000_adapter *adapter = netdev_priv(netdev);
3201        struct e1000_hw *hw = &adapter->hw;
3202        unsigned int rar_entries = hw->mac.rar_entry_count;
3203        int count = 0;
3204
3205        /* save a rar entry for our hardware address */
3206        rar_entries--;
3207
3208        /* save a rar entry for the LAA workaround */
3209        if (adapter->flags & FLAG_RESET_OVERWRITES_LAA)
3210                rar_entries--;
3211
3212        /* return ENOMEM indicating insufficient memory for addresses */
3213        if (netdev_uc_count(netdev) > rar_entries)
3214                return -ENOMEM;
3215
3216        if (!netdev_uc_empty(netdev) && rar_entries) {
3217                struct netdev_hw_addr *ha;
3218
3219                /*
3220                 * write the addresses in reverse order to avoid write
3221                 * combining
3222                 */
3223                netdev_for_each_uc_addr(ha, netdev) {
3224                        if (!rar_entries)
3225                                break;
3226                        hw->mac.ops.rar_set(hw, ha->addr, rar_entries--);
3227                        count++;
3228                }
3229        }
3230
3231        /* zero out the remaining RAR entries not used above */
3232        for (; rar_entries > 0; rar_entries--) {
3233                ew32(RAH(rar_entries), 0);
3234                ew32(RAL(rar_entries), 0);
3235        }
3236        e1e_flush();
3237
3238        return count;
3239}
3240
3241/**
3242 * e1000e_set_rx_mode - secondary unicast, Multicast and Promiscuous mode set
3243 * @netdev: network interface device structure
3244 *
3245 * The ndo_set_rx_mode entry point is called whenever the unicast or multicast
3246 * address list or the network interface flags are updated.  This routine is
3247 * responsible for configuring the hardware for proper unicast, multicast,
3248 * promiscuous mode, and all-multi behavior.
3249 **/
3250static void e1000e_set_rx_mode(struct net_device *netdev)
3251{
3252        struct e1000_adapter *adapter = netdev_priv(netdev);
3253        struct e1000_hw *hw = &adapter->hw;
3254        u32 rctl;
3255
3256        /* Check for Promiscuous and All Multicast modes */
3257        rctl = er32(RCTL);
3258
3259        /* clear the affected bits */
3260        rctl &= ~(E1000_RCTL_UPE | E1000_RCTL_MPE);
3261
3262        if (netdev->flags & IFF_PROMISC) {
3263                rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
3264                /* Do not hardware filter VLANs in promisc mode */
3265                e1000e_vlan_filter_disable(adapter);
3266        } else {
3267                int count;
3268
3269                if (netdev->flags & IFF_ALLMULTI) {
3270                        rctl |= E1000_RCTL_MPE;
3271                } else {
3272                        /*
3273                         * Write addresses to the MTA, if the attempt fails
3274                         * then we should just turn on promiscuous mode so
3275                         * that we can at least receive multicast traffic
3276                         */
3277                        count = e1000e_write_mc_addr_list(netdev);
3278                        if (count < 0)
3279                                rctl |= E1000_RCTL_MPE;
3280                }
3281                e1000e_vlan_filter_enable(adapter);
3282                /*
3283                 * Write addresses to available RAR registers, if there is not
3284                 * sufficient space to store all the addresses then enable
3285                 * unicast promiscuous mode
3286                 */
3287                count = e1000e_write_uc_addr_list(netdev);
3288                if (count < 0)
3289                        rctl |= E1000_RCTL_UPE;
3290        }
3291
3292        ew32(RCTL, rctl);
3293
3294        if (netdev->features & NETIF_F_HW_VLAN_RX)
3295                e1000e_vlan_strip_enable(adapter);
3296        else
3297                e1000e_vlan_strip_disable(adapter);
3298}
3299
3300static void e1000e_setup_rss_hash(struct e1000_adapter *adapter)
3301{
3302        struct e1000_hw *hw = &adapter->hw;
3303        u32 mrqc, rxcsum;
3304        int i;
3305        static const u32 rsskey[10] = {
3306                0xda565a6d, 0xc20e5b25, 0x3d256741, 0xb08fa343, 0xcb2bcad0,
3307                0xb4307bae, 0xa32dcb77, 0x0cf23080, 0x3bb7426a, 0xfa01acbe
3308        };
3309
3310        /* Fill out hash function seed */
3311        for (i = 0; i < 10; i++)
3312                ew32(RSSRK(i), rsskey[i]);
3313
3314        /* Direct all traffic to queue 0 */
3315        for (i = 0; i < 32; i++)
3316                ew32(RETA(i), 0);
3317
3318        /*
3319         * Disable raw packet checksumming so that RSS hash is placed in
3320         * descriptor on writeback.
3321         */
3322        rxcsum = er32(RXCSUM);
3323        rxcsum |= E1000_RXCSUM_PCSD;
3324
3325        ew32(RXCSUM, rxcsum);
3326
3327        mrqc = (E1000_MRQC_RSS_FIELD_IPV4 |
3328                E1000_MRQC_RSS_FIELD_IPV4_TCP |
3329                E1000_MRQC_RSS_FIELD_IPV6 |
3330                E1000_MRQC_RSS_FIELD_IPV6_TCP |
3331                E1000_MRQC_RSS_FIELD_IPV6_TCP_EX);
3332
3333        ew32(MRQC, mrqc);
3334}
3335
3336/**
3337 * e1000_configure - configure the hardware for Rx and Tx
3338 * @adapter: private board structure
3339 **/
3340static void e1000_configure(struct e1000_adapter *adapter)
3341{
3342        struct e1000_ring *rx_ring = adapter->rx_ring;
3343
3344        e1000e_set_rx_mode(adapter->netdev);
3345
3346        e1000_restore_vlan(adapter);
3347        e1000_init_manageability_pt(adapter);
3348
3349        e1000_configure_tx(adapter);
3350
3351        if (adapter->netdev->features & NETIF_F_RXHASH)
3352                e1000e_setup_rss_hash(adapter);
3353        e1000_setup_rctl(adapter);
3354        e1000_configure_rx(adapter);
3355        adapter->alloc_rx_buf(rx_ring, e1000_desc_unused(rx_ring), GFP_KERNEL);
3356}
3357
3358/**
3359 * e1000e_power_up_phy - restore link in case the phy was powered down
3360 * @adapter: address of board private structure
3361 *
3362 * The phy may be powered down to save power and turn off link when the
3363 * driver is unloaded and wake on lan is not enabled (among others)
3364 * *** this routine MUST be followed by a call to e1000e_reset ***
3365 **/
3366void e1000e_power_up_phy(struct e1000_adapter *adapter)
3367{
3368        if (adapter->hw.phy.ops.power_up)
3369                adapter->hw.phy.ops.power_up(&adapter->hw);
3370
3371        adapter->hw.mac.ops.setup_link(&adapter->hw);
3372}
3373
3374/**
3375 * e1000_power_down_phy - Power down the PHY
3376 *
3377 * Power down the PHY so no link is implied when interface is down.
3378 * The PHY cannot be powered down if management or WoL is active.
3379 */
3380static void e1000_power_down_phy(struct e1000_adapter *adapter)
3381{
3382        /* WoL is enabled */
3383        if (adapter->wol)
3384                return;
3385
3386        if (adapter->hw.phy.ops.power_down)
3387                adapter->hw.phy.ops.power_down(&adapter->hw);
3388}
3389
3390/**
3391 * e1000e_reset - bring the hardware into a known good state
3392 *
3393 * This function boots the hardware and enables some settings that
3394 * require a configuration cycle of the hardware - those cannot be
3395 * set/changed during runtime. After reset the device needs to be
3396 * properly configured for Rx, Tx etc.
3397 */
3398void e1000e_reset(struct e1000_adapter *adapter)
3399{
3400        struct e1000_mac_info *mac = &adapter->hw.mac;
3401        struct e1000_fc_info *fc = &adapter->hw.fc;
3402        struct e1000_hw *hw = &adapter->hw;
3403        u32 tx_space, min_tx_space, min_rx_space;
3404        u32 pba = adapter->pba;
3405        u16 hwm;
3406
3407        /* reset Packet Buffer Allocation to default */
3408        ew32(PBA, pba);
3409
3410        if (adapter->max_frame_size > ETH_FRAME_LEN + ETH_FCS_LEN) {
3411                /*
3412                 * To maintain wire speed transmits, the Tx FIFO should be
3413                 * large enough to accommodate two full transmit packets,
3414                 * rounded up to the next 1KB and expressed in KB.  Likewise,
3415                 * the Rx FIFO should be large enough to accommodate at least
3416                 * one full receive packet and is similarly rounded up and
3417                 * expressed in KB.
3418                 */
3419                pba = er32(PBA);
3420                /* upper 16 bits has Tx packet buffer allocation size in KB */
3421                tx_space = pba >> 16;
3422                /* lower 16 bits has Rx packet buffer allocation size in KB */
3423                pba &= 0xffff;
3424                /*
3425                 * the Tx fifo also stores 16 bytes of information about the Tx
3426                 * but don't include ethernet FCS because hardware appends it
3427                 */
3428                min_tx_space = (adapter->max_frame_size +
3429                                sizeof(struct e1000_tx_desc) -
3430                                ETH_FCS_LEN) * 2;
3431                min_tx_space = ALIGN(min_tx_space, 1024);
3432                min_tx_space >>= 10;
3433                /* software strips receive CRC, so leave room for it */
3434                min_rx_space = adapter->max_frame_size;
3435                min_rx_space = ALIGN(min_rx_space, 1024);
3436                min_rx_space >>= 10;
3437
3438                /*
3439                 * If current Tx allocation is less than the min Tx FIFO size,
3440                 * and the min Tx FIFO size is less than the current Rx FIFO
3441                 * allocation, take space away from current Rx allocation
3442                 */
3443                if ((tx_space < min_tx_space) &&
3444                    ((min_tx_space - tx_space) < pba)) {
3445                        pba -= min_tx_space - tx_space;
3446
3447                        /*
3448                         * if short on Rx space, Rx wins and must trump Tx
3449                         * adjustment
3450                         */
3451                        if (pba < min_rx_space)
3452                                pba = min_rx_space;
3453                }
3454
3455                ew32(PBA, pba);
3456        }
3457
3458        /*
3459         * flow control settings
3460         *
3461         * The high water mark must be low enough to fit one full frame
3462         * (or the size used for early receive) above it in the Rx FIFO.
3463         * Set it to the lower of:
3464         * - 90% of the Rx FIFO size, and
3465         * - the full Rx FIFO size minus one full frame
3466         */
3467        if (adapter->flags & FLAG_DISABLE_FC_PAUSE_TIME)
3468                fc->pause_time = 0xFFFF;
3469        else
3470                fc->pause_time = E1000_FC_PAUSE_TIME;
3471        fc->send_xon = true;
3472        fc->current_mode = fc->requested_mode;
3473
3474        switch (hw->mac.type) {
3475        case e1000_ich9lan:
3476        case e1000_ich10lan:
3477                if (adapter->netdev->mtu > ETH_DATA_LEN) {
3478                        pba = 14;
3479                        ew32(PBA, pba);
3480                        fc->high_water = 0x2800;
3481                        fc->low_water = fc->high_water - 8;
3482                        break;
3483                }
3484                /* fall-through */
3485        default:
3486                hwm = min(((pba << 10) * 9 / 10),
3487                          ((pba << 10) - adapter->max_frame_size));
3488
3489                fc->high_water = hwm & E1000_FCRTH_RTH; /* 8-byte granularity */
3490                fc->low_water = fc->high_water - 8;
3491                break;
3492        case e1000_pchlan:
3493                /*
3494                 * Workaround PCH LOM adapter hangs with certain network
3495                 * loads.  If hangs persist, try disabling Tx flow control.
3496                 */
3497                if (adapter->netdev->mtu > ETH_DATA_LEN) {
3498                        fc->high_water = 0x3500;
3499                        fc->low_water  = 0x1500;
3500                } else {
3501                        fc->high_water = 0x5000;
3502                        fc->low_water  = 0x3000;
3503                }
3504                fc->refresh_time = 0x1000;
3505                break;
3506        case e1000_pch2lan:
3507        case e1000_pch_lpt:
3508                fc->high_water = 0x05C20;
3509                fc->low_water = 0x05048;
3510                fc->pause_time = 0x0650;
3511                fc->refresh_time = 0x0400;
3512                if (adapter->netdev->mtu > ETH_DATA_LEN) {
3513                        pba = 14;
3514                        ew32(PBA, pba);
3515                }
3516                break;
3517        }
3518
3519        /*
3520         * Alignment of Tx data is on an arbitrary byte boundary with the
3521         * maximum size per Tx descriptor limited only to the transmit
3522         * allocation of the packet buffer minus 96 bytes with an upper
3523         * limit of 24KB due to receive synchronization limitations.
3524         */
3525        adapter->tx_fifo_limit = min_t(u32, ((er32(PBA) >> 16) << 10) - 96,
3526                                       24 << 10);
3527
3528        /*
3529         * Disable Adaptive Interrupt Moderation if 2 full packets cannot
3530         * fit in receive buffer.
3531         */
3532        if (adapter->itr_setting & 0x3) {
3533                if ((adapter->max_frame_size * 2) > (pba << 10)) {
3534                        if (!(adapter->flags2 & FLAG2_DISABLE_AIM)) {
3535                                dev_info(&adapter->pdev->dev,
3536                                        "Interrupt Throttle Rate turned off\n");
3537                                adapter->flags2 |= FLAG2_DISABLE_AIM;
3538                                e1000e_write_itr(adapter, 0);
3539                        }
3540                } else if (adapter->flags2 & FLAG2_DISABLE_AIM) {
3541                        dev_info(&adapter->pdev->dev,
3542                                 "Interrupt Throttle Rate turned on\n");
3543                        adapter->flags2 &= ~FLAG2_DISABLE_AIM;
3544                        adapter->itr = 20000;
3545                        e1000e_write_itr(adapter, adapter->itr);
3546                }
3547        }
3548
3549        /* Allow time for pending master requests to run */
3550        mac->ops.reset_hw(hw);
3551
3552        /*
3553         * For parts with AMT enabled, let the firmware know
3554         * that the network interface is in control
3555         */
3556        if (adapter->flags & FLAG_HAS_AMT)
3557                e1000e_get_hw_control(adapter);
3558
3559        ew32(WUC, 0);
3560
3561        if (mac->ops.init_hw(hw))
3562                e_err("Hardware Error\n");
3563
3564        e1000_update_mng_vlan(adapter);
3565
3566        /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */
3567        ew32(VET, ETH_P_8021Q);
3568
3569        e1000e_reset_adaptive(hw);
3570
3571        if (!netif_running(adapter->netdev) &&
3572            !test_bit(__E1000_TESTING, &adapter->state)) {
3573                e1000_power_down_phy(adapter);
3574                return;
3575        }
3576
3577        e1000_get_phy_info(hw);
3578
3579        if ((adapter->flags & FLAG_HAS_SMART_POWER_DOWN) &&
3580            !(adapter->flags & FLAG_SMART_POWER_DOWN)) {
3581                u16 phy_data = 0;
3582                /*
3583                 * speed up time to link by disabling smart power down, ignore
3584                 * the return value of this function because there is nothing
3585                 * different we would do if it failed
3586                 */
3587                e1e_rphy(hw, IGP02E1000_PHY_POWER_MGMT, &phy_data);
3588                phy_data &= ~IGP02E1000_PM_SPD;
3589                e1e_wphy(hw, IGP02E1000_PHY_POWER_MGMT, phy_data);
3590        }
3591}
3592
3593int e1000e_up(struct e1000_adapter *adapter)
3594{
3595        struct e1000_hw *hw = &adapter->hw;
3596
3597        /* hardware has been reset, we need to reload some things */
3598        e1000_configure(adapter);
3599
3600        clear_bit(__E1000_DOWN, &adapter->state);
3601
3602        if (adapter->msix_entries)
3603                e1000_configure_msix(adapter);
3604        e1000_irq_enable(adapter);
3605
3606        netif_start_queue(adapter->netdev);
3607
3608        /* fire a link change interrupt to start the watchdog */
3609        if (adapter->msix_entries)
3610                ew32(ICS, E1000_ICS_LSC | E1000_ICR_OTHER);
3611        else
3612                ew32(ICS, E1000_ICS_LSC);
3613
3614        return 0;
3615}
3616
3617static void e1000e_flush_descriptors(struct e1000_adapter *adapter)
3618{
3619        struct e1000_hw *hw = &adapter->hw;
3620
3621        if (!(adapter->flags2 & FLAG2_DMA_BURST))
3622                return;
3623
3624        /* flush pending descriptor writebacks to memory */
3625        ew32(TIDV, adapter->tx_int_delay | E1000_TIDV_FPD);
3626        ew32(RDTR, adapter->rx_int_delay | E1000_RDTR_FPD);
3627
3628        /* execute the writes immediately */
3629        e1e_flush();
3630
3631        /*
3632         * due to rare timing issues, write to TIDV/RDTR again to ensure the
3633         * write is successful
3634         */
3635        ew32(TIDV, adapter->tx_int_delay | E1000_TIDV_FPD);
3636        ew32(RDTR, adapter->rx_int_delay | E1000_RDTR_FPD);
3637
3638        /* execute the writes immediately */
3639        e1e_flush();
3640}
3641
3642static void e1000e_update_stats(struct e1000_adapter *adapter);
3643
3644void e1000e_down(struct e1000_adapter *adapter)
3645{
3646        struct net_device *netdev = adapter->netdev;
3647        struct e1000_hw *hw = &adapter->hw;
3648        u32 tctl, rctl;
3649
3650        /*
3651         * signal that we're down so the interrupt handler does not
3652         * reschedule our watchdog timer
3653         */
3654        set_bit(__E1000_DOWN, &adapter->state);
3655
3656        /* disable receives in the hardware */
3657        rctl = er32(RCTL);
3658        if (!(adapter->flags2 & FLAG2_NO_DISABLE_RX))
3659                ew32(RCTL, rctl & ~E1000_RCTL_EN);
3660        /* flush and sleep below */
3661
3662        netif_stop_queue(netdev);
3663
3664        /* disable transmits in the hardware */
3665        tctl = er32(TCTL);
3666        tctl &= ~E1000_TCTL_EN;
3667        ew32(TCTL, tctl);
3668
3669        /* flush both disables and wait for them to finish */
3670        e1e_flush();
3671        usleep_range(10000, 20000);
3672
3673        e1000_irq_disable(adapter);
3674
3675        del_timer_sync(&adapter->watchdog_timer);
3676        del_timer_sync(&adapter->phy_info_timer);
3677
3678        netif_carrier_off(netdev);
3679
3680        spin_lock(&adapter->stats64_lock);
3681        e1000e_update_stats(adapter);
3682        spin_unlock(&adapter->stats64_lock);
3683
3684        e1000e_flush_descriptors(adapter);
3685        e1000_clean_tx_ring(adapter->tx_ring);
3686        e1000_clean_rx_ring(adapter->rx_ring);
3687
3688        adapter->link_speed = 0;
3689        adapter->link_duplex = 0;
3690
3691        if (!pci_channel_offline(adapter->pdev))
3692                e1000e_reset(adapter);
3693
3694        /*
3695         * TODO: for power management, we could drop the link and
3696         * pci_disable_device here.
3697         */
3698}
3699
3700void e1000e_reinit_locked(struct e1000_adapter *adapter)
3701{
3702        might_sleep();
3703        while (test_and_set_bit(__E1000_RESETTING, &adapter->state))
3704                usleep_range(1000, 2000);
3705        e1000e_down(adapter);
3706        e1000e_up(adapter);
3707        clear_bit(__E1000_RESETTING, &adapter->state);
3708}
3709
3710/**
3711 * e1000_sw_init - Initialize general software structures (struct e1000_adapter)
3712 * @adapter: board private structure to initialize
3713 *
3714 * e1000_sw_init initializes the Adapter private data structure.
3715 * Fields are initialized based on PCI device information and
3716 * OS network device settings (MTU size).
3717 **/
3718static int __devinit e1000_sw_init(struct e1000_adapter *adapter)
3719{
3720        struct net_device *netdev = adapter->netdev;
3721
3722        adapter->rx_buffer_len = ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN;
3723        adapter->rx_ps_bsize0 = 128;
3724        adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
3725        adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
3726        adapter->tx_ring_count = E1000_DEFAULT_TXD;
3727        adapter->rx_ring_count = E1000_DEFAULT_RXD;
3728
3729        spin_lock_init(&adapter->stats64_lock);
3730
3731        e1000e_set_interrupt_capability(adapter);
3732
3733        if (e1000_alloc_queues(adapter))
3734                return -ENOMEM;
3735
3736        /* Explicitly disable IRQ since the NIC can be in any state. */
3737        e1000_irq_disable(adapter);
3738
3739        set_bit(__E1000_DOWN, &adapter->state);
3740        return 0;
3741}
3742
3743/**
3744 * e1000_intr_msi_test - Interrupt Handler
3745 * @irq: interrupt number
3746 * @data: pointer to a network interface device structure
3747 **/
3748static irqreturn_t e1000_intr_msi_test(int irq, void *data)
3749{
3750        struct net_device *netdev = data;
3751        struct e1000_adapter *adapter = netdev_priv(netdev);
3752        struct e1000_hw *hw = &adapter->hw;
3753        u32 icr = er32(ICR);
3754
3755        e_dbg("icr is %08X\n", icr);
3756        if (icr & E1000_ICR_RXSEQ) {
3757                adapter->flags &= ~FLAG_MSI_TEST_FAILED;
3758                /*
3759                 * Force memory writes to complete before acknowledging the
3760                 * interrupt is handled.
3761                 */
3762                wmb();
3763        }
3764
3765        return IRQ_HANDLED;
3766}
3767
3768/**
3769 * e1000_test_msi_interrupt - Returns 0 for successful test
3770 * @adapter: board private struct
3771 *
3772 * code flow taken from tg3.c
3773 **/
3774static int e1000_test_msi_interrupt(struct e1000_adapter *adapter)
3775{
3776        struct net_device *netdev = adapter->netdev;
3777        struct e1000_hw *hw = &adapter->hw;
3778        int err;
3779
3780        /* poll_enable hasn't been called yet, so don't need disable */
3781        /* clear any pending events */
3782        er32(ICR);
3783
3784        /* free the real vector and request a test handler */
3785        e1000_free_irq(adapter);
3786        e1000e_reset_interrupt_capability(adapter);
3787
3788        /* Assume that the test fails, if it succeeds then the test
3789         * MSI irq handler will unset this flag */
3790        adapter->flags |= FLAG_MSI_TEST_FAILED;
3791
3792        err = pci_enable_msi(adapter->pdev);
3793        if (err)
3794                goto msi_test_failed;
3795
3796        err = request_irq(adapter->pdev->irq, e1000_intr_msi_test, 0,
3797                          netdev->name, netdev);
3798        if (err) {
3799                pci_disable_msi(adapter->pdev);
3800                goto msi_test_failed;
3801        }
3802
3803        /*
3804         * Force memory writes to complete before enabling and firing an
3805         * interrupt.
3806         */
3807        wmb();
3808
3809        e1000_irq_enable(adapter);
3810
3811        /* fire an unusual interrupt on the test handler */
3812        ew32(ICS, E1000_ICS_RXSEQ);
3813        e1e_flush();
3814        msleep(100);
3815
3816        e1000_irq_disable(adapter);
3817
3818        rmb();                  /* read flags after interrupt has been fired */
3819
3820        if (adapter->flags & FLAG_MSI_TEST_FAILED) {
3821                adapter->int_mode = E1000E_INT_MODE_LEGACY;
3822                e_info("MSI interrupt test failed, using legacy interrupt.\n");
3823        } else {
3824                e_dbg("MSI interrupt test succeeded!\n");
3825        }
3826
3827        free_irq(adapter->pdev->irq, netdev);
3828        pci_disable_msi(adapter->pdev);
3829
3830msi_test_failed:
3831        e1000e_set_interrupt_capability(adapter);
3832        return e1000_request_irq(adapter);
3833}
3834
3835/**
3836 * e1000_test_msi - Returns 0 if MSI test succeeds or INTx mode is restored
3837 * @adapter: board private struct
3838 *
3839 * code flow taken from tg3.c, called with e1000 interrupts disabled.
3840 **/
3841static int e1000_test_msi(struct e1000_adapter *adapter)
3842{
3843        int err;
3844        u16 pci_cmd;
3845
3846        if (!(adapter->flags & FLAG_MSI_ENABLED))
3847                return 0;
3848
3849        /* disable SERR in case the MSI write causes a master abort */
3850        pci_read_config_word(adapter->pdev, PCI_COMMAND, &pci_cmd);
3851        if (pci_cmd & PCI_COMMAND_SERR)
3852                pci_write_config_word(adapter->pdev, PCI_COMMAND,
3853                                      pci_cmd & ~PCI_COMMAND_SERR);
3854
3855        err = e1000_test_msi_interrupt(adapter);
3856
3857        /* re-enable SERR */
3858        if (pci_cmd & PCI_COMMAND_SERR) {
3859                pci_read_config_word(adapter->pdev, PCI_COMMAND, &pci_cmd);
3860                pci_cmd |= PCI_COMMAND_SERR;
3861                pci_write_config_word(adapter->pdev, PCI_COMMAND, pci_cmd);
3862        }
3863
3864        return err;
3865}
3866
3867/**
3868 * e1000_open - Called when a network interface is made active
3869 * @netdev: network interface device structure
3870 *
3871 * Returns 0 on success, negative value on failure
3872 *
3873 * The open entry point is called when a network interface is made
3874 * active by the system (IFF_UP).  At this point all resources needed
3875 * for transmit and receive operations are allocated, the interrupt
3876 * handler is registered with the OS, the watchdog timer is started,
3877 * and the stack is notified that the interface is ready.
3878 **/
3879static int e1000_open(struct net_device *netdev)
3880{
3881        struct e1000_adapter *adapter = netdev_priv(netdev);
3882        struct e1000_hw *hw = &adapter->hw;
3883        struct pci_dev *pdev = adapter->pdev;
3884        int err;
3885
3886        /* disallow open during test */
3887        if (test_bit(__E1000_TESTING, &adapter->state))
3888                return -EBUSY;
3889
3890        pm_runtime_get_sync(&pdev->dev);
3891
3892        netif_carrier_off(netdev);
3893
3894        /* allocate transmit descriptors */
3895        err = e1000e_setup_tx_resources(adapter->tx_ring);
3896        if (err)
3897                goto err_setup_tx;
3898
3899        /* allocate receive descriptors */
3900        err = e1000e_setup_rx_resources(adapter->rx_ring);
3901        if (err)
3902                goto err_setup_rx;
3903
3904        /*
3905         * If AMT is enabled, let the firmware know that the network
3906         * interface is now open and reset the part to a known state.
3907         */
3908        if (adapter->flags & FLAG_HAS_AMT) {
3909                e1000e_get_hw_control(adapter);
3910                e1000e_reset(adapter);
3911        }
3912
3913        e1000e_power_up_phy(adapter);
3914
3915        adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
3916        if ((adapter->hw.mng_cookie.status &
3917             E1000_MNG_DHCP_COOKIE_STATUS_VLAN))
3918                e1000_update_mng_vlan(adapter);
3919
3920        /* DMA latency requirement to workaround jumbo issue */
3921        if (adapter->hw.mac.type == e1000_pch2lan)
3922                pm_qos_add_request(&adapter->netdev->pm_qos_req,
3923                                   PM_QOS_CPU_DMA_LATENCY,
3924                                   PM_QOS_DEFAULT_VALUE);
3925
3926        /*
3927         * before we allocate an interrupt, we must be ready to handle it.
3928         * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt
3929         * as soon as we call pci_request_irq, so we have to setup our
3930         * clean_rx handler before we do so.
3931         */
3932        e1000_configure(adapter);
3933
3934        err = e1000_request_irq(adapter);
3935        if (err)
3936                goto err_req_irq;
3937
3938        /*
3939         * Work around PCIe errata with MSI interrupts causing some chipsets to
3940         * ignore e1000e MSI messages, which means we need to test our MSI
3941         * interrupt now
3942         */
3943        if (adapter->int_mode != E1000E_INT_MODE_LEGACY) {
3944                err = e1000_test_msi(adapter);
3945                if (err) {
3946                        e_err("Interrupt allocation failed\n");
3947                        goto err_req_irq;
3948                }
3949        }
3950
3951        /* From here on the code is the same as e1000e_up() */
3952        clear_bit(__E1000_DOWN, &adapter->state);
3953
3954        napi_enable(&adapter->napi);
3955
3956        e1000_irq_enable(adapter);
3957
3958        adapter->tx_hang_recheck = false;
3959        netif_start_queue(netdev);
3960
3961        adapter->idle_check = true;
3962        pm_runtime_put(&pdev->dev);
3963
3964        /* fire a link status change interrupt to start the watchdog */
3965        if (adapter->msix_entries)
3966                ew32(ICS, E1000_ICS_LSC | E1000_ICR_OTHER);
3967        else
3968                ew32(ICS, E1000_ICS_LSC);
3969
3970        return 0;
3971
3972err_req_irq:
3973        e1000e_release_hw_control(adapter);
3974        e1000_power_down_phy(adapter);
3975        e1000e_free_rx_resources(adapter->rx_ring);
3976err_setup_rx:
3977        e1000e_free_tx_resources(adapter->tx_ring);
3978err_setup_tx:
3979        e1000e_reset(adapter);
3980        pm_runtime_put_sync(&pdev->dev);
3981
3982        return err;
3983}
3984
3985/**
3986 * e1000_close - Disables a network interface
3987 * @netdev: network interface device structure
3988 *
3989 * Returns 0, this is not allowed to fail
3990 *
3991 * The close entry point is called when an interface is de-activated
3992 * by the OS.  The hardware is still under the drivers control, but
3993 * needs to be disabled.  A global MAC reset is issued to stop the
3994 * hardware, and all transmit and receive resources are freed.
3995 **/
3996static int e1000_close(struct net_device *netdev)
3997{
3998        struct e1000_adapter *adapter = netdev_priv(netdev);
3999        struct pci_dev *pdev = adapter->pdev;
4000        int count = E1000_CHECK_RESET_COUNT;
4001
4002        while (test_bit(__E1000_RESETTING, &adapter->state) && count--)
4003                usleep_range(10000, 20000);
4004
4005        WARN_ON(test_bit(__E1000_RESETTING, &adapter->state));
4006
4007        pm_runtime_get_sync(&pdev->dev);
4008
4009        napi_disable(&adapter->napi);
4010
4011        if (!test_bit(__E1000_DOWN, &adapter->state)) {
4012                e1000e_down(adapter);
4013                e1000_free_irq(adapter);
4014        }
4015        e1000_power_down_phy(adapter);
4016
4017        e1000e_free_tx_resources(adapter->tx_ring);
4018        e1000e_free_rx_resources(adapter->rx_ring);
4019
4020        /*
4021         * kill manageability vlan ID if supported, but not if a vlan with
4022         * the same ID is registered on the host OS (let 8021q kill it)
4023         */
4024        if (adapter->hw.mng_cookie.status &
4025            E1000_MNG_DHCP_COOKIE_STATUS_VLAN)
4026                e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id);
4027
4028        /*
4029         * If AMT is enabled, let the firmware know that the network
4030         * interface is now closed
4031         */
4032        if ((adapter->flags & FLAG_HAS_AMT) &&
4033            !test_bit(__E1000_TESTING, &adapter->state))
4034                e1000e_release_hw_control(adapter);
4035
4036        if (adapter->hw.mac.type == e1000_pch2lan)
4037                pm_qos_remove_request(&adapter->netdev->pm_qos_req);
4038
4039        pm_runtime_put_sync(&pdev->dev);
4040
4041        return 0;
4042}
4043/**
4044 * e1000_set_mac - Change the Ethernet Address of the NIC
4045 * @netdev: network interface device structure
4046 * @p: pointer to an address structure
4047 *
4048 * Returns 0 on success, negative on failure
4049 **/
4050static int e1000_set_mac(struct net_device *netdev, void *p)
4051{
4052        struct e1000_adapter *adapter = netdev_priv(netdev);
4053        struct e1000_hw *hw = &adapter->hw;
4054        struct sockaddr *addr = p;
4055
4056        if (!is_valid_ether_addr(addr->sa_data))
4057                return -EADDRNOTAVAIL;
4058
4059        memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
4060        memcpy(adapter->hw.mac.addr, addr->sa_data, netdev->addr_len);
4061
4062        hw->mac.ops.rar_set(&adapter->hw, adapter->hw.mac.addr, 0);
4063
4064        if (adapter->flags & FLAG_RESET_OVERWRITES_LAA) {
4065                /* activate the work around */
4066                e1000e_set_laa_state_82571(&adapter->hw, 1);
4067
4068                /*
4069                 * Hold a copy of the LAA in RAR[14] This is done so that
4070                 * between the time RAR[0] gets clobbered  and the time it
4071                 * gets fixed (in e1000_watchdog), the actual LAA is in one
4072                 * of the RARs and no incoming packets directed to this port
4073                 * are dropped. Eventually the LAA will be in RAR[0] and
4074                 * RAR[14]
4075                 */
4076                hw->mac.ops.rar_set(&adapter->hw, adapter->hw.mac.addr,
4077                                    adapter->hw.mac.rar_entry_count - 1);
4078        }
4079
4080        return 0;
4081}
4082
4083/**
4084 * e1000e_update_phy_task - work thread to update phy
4085 * @work: pointer to our work struct
4086 *
4087 * this worker thread exists because we must acquire a
4088 * semaphore to read the phy, which we could msleep while
4089 * waiting for it, and we can't msleep in a timer.
4090 **/
4091static void e1000e_update_phy_task(struct work_struct *work)
4092{
4093        struct e1000_adapter *adapter = container_of(work,
4094                                        struct e1000_adapter, update_phy_task);
4095
4096        if (test_bit(__E1000_DOWN, &adapter->state))
4097                return;
4098
4099        e1000_get_phy_info(&adapter->hw);
4100}
4101
4102/*
4103 * Need to wait a few seconds after link up to get diagnostic information from
4104 * the phy
4105 */
4106static void e1000_update_phy_info(unsigned long data)
4107{
4108        struct e1000_adapter *adapter = (struct e1000_adapter *) data;
4109
4110        if (test_bit(__E1000_DOWN, &adapter->state))
4111                return;
4112
4113        schedule_work(&adapter->update_phy_task);
4114}
4115
4116/**
4117 * e1000e_update_phy_stats - Update the PHY statistics counters
4118 * @adapter: board private structure
4119 *
4120 * Read/clear the upper 16-bit PHY registers and read/accumulate lower
4121 **/
4122static void e1000e_update_phy_stats(struct e1000_adapter *adapter)
4123{
4124        struct e1000_hw *hw = &adapter->hw;
4125        s32 ret_val;
4126        u16 phy_data;
4127
4128        ret_val = hw->phy.ops.acquire(hw);
4129        if (ret_val)
4130                return;
4131
4132        /*
4133         * A page set is expensive so check if already on desired page.
4134         * If not, set to the page with the PHY status registers.
4135         */
4136        hw->phy.addr = 1;
4137        ret_val = e1000e_read_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT,
4138                                           &phy_data);
4139        if (ret_val)
4140                goto release;
4141        if (phy_data != (HV_STATS_PAGE << IGP_PAGE_SHIFT)) {
4142                ret_val = hw->phy.ops.set_page(hw,
4143                                               HV_STATS_PAGE << IGP_PAGE_SHIFT);
4144                if (ret_val)
4145                        goto release;
4146        }
4147
4148        /* Single Collision Count */
4149        hw->phy.ops.read_reg_page(hw, HV_SCC_UPPER, &phy_data);
4150        ret_val = hw->phy.ops.read_reg_page(hw, HV_SCC_LOWER, &phy_data);
4151        if (!ret_val)
4152                adapter->stats.scc += phy_data;
4153
4154        /* Excessive Collision Count */
4155        hw->phy.ops.read_reg_page(hw, HV_ECOL_UPPER, &phy_data);
4156        ret_val = hw->phy.ops.read_reg_page(hw, HV_ECOL_LOWER, &phy_data);
4157        if (!ret_val)
4158                adapter->stats.ecol += phy_data;
4159
4160        /* Multiple Collision Count */
4161        hw->phy.ops.read_reg_page(hw, HV_MCC_UPPER, &phy_data);
4162        ret_val = hw->phy.ops.read_reg_page(hw, HV_MCC_LOWER, &phy_data);
4163        if (!ret_val)
4164                adapter->stats.mcc += phy_data;
4165
4166        /* Late Collision Count */
4167        hw->phy.ops.read_reg_page(hw, HV_LATECOL_UPPER, &phy_data);
4168        ret_val = hw->phy.ops.read_reg_page(hw, HV_LATECOL_LOWER, &phy_data);
4169        if (!ret_val)
4170                adapter->stats.latecol += phy_data;
4171
4172        /* Collision Count - also used for adaptive IFS */
4173        hw->phy.ops.read_reg_page(hw, HV_COLC_UPPER, &phy_data);
4174        ret_val = hw->phy.ops.read_reg_page(hw, HV_COLC_LOWER, &phy_data);
4175        if (!ret_val)
4176                hw->mac.collision_delta = phy_data;
4177
4178        /* Defer Count */
4179        hw->phy.ops.read_reg_page(hw, HV_DC_UPPER, &phy_data);
4180        ret_val = hw->phy.ops.read_reg_page(hw, HV_DC_LOWER, &phy_data);
4181        if (!ret_val)
4182                adapter->stats.dc += phy_data;
4183
4184        /* Transmit with no CRS */
4185        hw->phy.ops.read_reg_page(hw, HV_TNCRS_UPPER, &phy_data);
4186        ret_val = hw->phy.ops.read_reg_page(hw, HV_TNCRS_LOWER, &phy_data);
4187        if (!ret_val)
4188                adapter->stats.tncrs += phy_data;
4189
4190release:
4191        hw->phy.ops.release(hw);
4192}
4193
4194/**
4195 * e1000e_update_stats - Update the board statistics counters
4196 * @adapter: board private structure
4197 **/
4198static void e1000e_update_stats(struct e1000_adapter *adapter)
4199{
4200        struct net_device *netdev = adapter->netdev;
4201        struct e1000_hw *hw = &adapter->hw;
4202        struct pci_dev *pdev = adapter->pdev;
4203
4204        /*
4205         * Prevent stats update while adapter is being reset, or if the pci
4206         * connection is down.
4207         */
4208        if (adapter->link_speed == 0)
4209                return;
4210        if (pci_channel_offline(pdev))
4211                return;
4212
4213        adapter->stats.crcerrs += er32(CRCERRS);
4214        adapter->stats.gprc += er32(GPRC);
4215        adapter->stats.gorc += er32(GORCL);
4216        er32(GORCH); /* Clear gorc */
4217        adapter->stats.bprc += er32(BPRC);
4218        adapter->stats.mprc += er32(MPRC);
4219        adapter->stats.roc += er32(ROC);
4220
4221        adapter->stats.mpc += er32(MPC);
4222
4223        /* Half-duplex statistics */
4224        if (adapter->link_duplex == HALF_DUPLEX) {
4225                if (adapter->flags2 & FLAG2_HAS_PHY_STATS) {
4226                        e1000e_update_phy_stats(adapter);
4227                } else {
4228                        adapter->stats.scc += er32(SCC);
4229                        adapter->stats.ecol += er32(ECOL);
4230                        adapter->stats.mcc += er32(MCC);
4231                        adapter->stats.latecol += er32(LATECOL);
4232                        adapter->stats.dc += er32(DC);
4233
4234                        hw->mac.collision_delta = er32(COLC);
4235
4236                        if ((hw->mac.type != e1000_82574) &&
4237                            (hw->mac.type != e1000_82583))
4238                                adapter->stats.tncrs += er32(TNCRS);
4239                }
4240                adapter->stats.colc += hw->mac.collision_delta;
4241        }
4242
4243        adapter->stats.xonrxc += er32(XONRXC);
4244        adapter->stats.xontxc += er32(XONTXC);
4245        adapter->stats.xoffrxc += er32(XOFFRXC);
4246        adapter->stats.xofftxc += er32(XOFFTXC);
4247        adapter->stats.gptc += er32(GPTC);
4248        adapter->stats.gotc += er32(GOTCL);
4249        er32(GOTCH); /* Clear gotc */
4250        adapter->stats.rnbc += er32(RNBC);
4251        adapter->stats.ruc += er32(RUC);
4252
4253        adapter->stats.mptc += er32(MPTC);
4254        adapter->stats.bptc += er32(BPTC);
4255
4256        /* used for adaptive IFS */
4257
4258        hw->mac.tx_packet_delta = er32(TPT);
4259        adapter->stats.tpt += hw->mac.tx_packet_delta;
4260
4261        adapter->stats.algnerrc += er32(ALGNERRC);
4262        adapter->stats.rxerrc += er32(RXERRC);
4263        adapter->stats.cexterr += er32(CEXTERR);
4264        adapter->stats.tsctc += er32(TSCTC);
4265        adapter->stats.tsctfc += er32(TSCTFC);
4266
4267        /* Fill out the OS statistics structure */
4268        netdev->stats.multicast = adapter->stats.mprc;
4269        netdev->stats.collisions = adapter->stats.colc;
4270
4271        /* Rx Errors */
4272
4273        /*
4274         * RLEC on some newer hardware can be incorrect so build
4275         * our own version based on RUC and ROC
4276         */
4277        netdev->stats.rx_errors = adapter->stats.rxerrc +
4278                adapter->stats.crcerrs + adapter->stats.algnerrc +
4279                adapter->stats.ruc + adapter->stats.roc +
4280                adapter->stats.cexterr;
4281        netdev->stats.rx_length_errors = adapter->stats.ruc +
4282                                              adapter->stats.roc;
4283        netdev->stats.rx_crc_errors = adapter->stats.crcerrs;
4284        netdev->stats.rx_frame_errors = adapter->stats.algnerrc;
4285        netdev->stats.rx_missed_errors = adapter->stats.mpc;
4286
4287        /* Tx Errors */
4288        netdev->stats.tx_errors = adapter->stats.ecol +
4289                                       adapter->stats.latecol;
4290        netdev->stats.tx_aborted_errors = adapter->stats.ecol;
4291        netdev->stats.tx_window_errors = adapter->stats.latecol;
4292        netdev->stats.tx_carrier_errors = adapter->stats.tncrs;
4293
4294        /* Tx Dropped needs to be maintained elsewhere */
4295
4296        /* Management Stats */
4297        adapter->stats.mgptc += er32(MGTPTC);
4298        adapter->stats.mgprc += er32(MGTPRC);
4299        adapter->stats.mgpdc += er32(MGTPDC);
4300}
4301
4302/**
4303 * e1000_phy_read_status - Update the PHY register status snapshot
4304 * @adapter: board private structure
4305 **/
4306static void e1000_phy_read_status(struct e1000_adapter *adapter)
4307{
4308        struct e1000_hw *hw = &adapter->hw;
4309        struct e1000_phy_regs *phy = &adapter->phy_regs;
4310
4311        if ((er32(STATUS) & E1000_STATUS_LU) &&
4312            (adapter->hw.phy.media_type == e1000_media_type_copper)) {
4313                int ret_val;
4314
4315                ret_val  = e1e_rphy(hw, PHY_CONTROL, &phy->bmcr);
4316                ret_val |= e1e_rphy(hw, PHY_STATUS, &phy->bmsr);
4317                ret_val |= e1e_rphy(hw, PHY_AUTONEG_ADV, &phy->advertise);
4318                ret_val |= e1e_rphy(hw, PHY_LP_ABILITY, &phy->lpa);
4319                ret_val |= e1e_rphy(hw, PHY_AUTONEG_EXP, &phy->expansion);
4320                ret_val |= e1e_rphy(hw, PHY_1000T_CTRL, &phy->ctrl1000);
4321                ret_val |= e1e_rphy(hw, PHY_1000T_STATUS, &phy->stat1000);
4322                ret_val |= e1e_rphy(hw, PHY_EXT_STATUS, &phy->estatus);
4323                if (ret_val)
4324                        e_warn("Error reading PHY register\n");
4325        } else {
4326                /*
4327                 * Do not read PHY registers if link is not up
4328                 * Set values to typical power-on defaults
4329                 */
4330                phy->bmcr = (BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_FULLDPLX);
4331                phy->bmsr = (BMSR_100FULL | BMSR_100HALF | BMSR_10FULL |
4332                             BMSR_10HALF | BMSR_ESTATEN | BMSR_ANEGCAPABLE |
4333                             BMSR_ERCAP);
4334                phy->advertise = (ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP |
4335                                  ADVERTISE_ALL | ADVERTISE_CSMA);
4336                phy->lpa = 0;
4337                phy->expansion = EXPANSION_ENABLENPAGE;
4338                phy->ctrl1000 = ADVERTISE_1000FULL;
4339                phy->stat1000 = 0;
4340                phy->estatus = (ESTATUS_1000_TFULL | ESTATUS_1000_THALF);
4341        }
4342}
4343
4344static void e1000_print_link_info(struct e1000_adapter *adapter)
4345{
4346        struct e1000_hw *hw = &adapter->hw;
4347        u32 ctrl = er32(CTRL);
4348
4349        /* Link status message must follow this format for user tools */
4350        printk(KERN_INFO "e1000e: %s NIC Link is Up %d Mbps %s Duplex, Flow Control: %s\n",
4351                adapter->netdev->name,
4352                adapter->link_speed,
4353                adapter->link_duplex == FULL_DUPLEX ? "Full" : "Half",
4354                (ctrl & E1000_CTRL_TFCE) && (ctrl & E1000_CTRL_RFCE) ? "Rx/Tx" :
4355                (ctrl & E1000_CTRL_RFCE) ? "Rx" :
4356                (ctrl & E1000_CTRL_TFCE) ? "Tx" : "None");
4357}
4358
4359static bool e1000e_has_link(struct e1000_adapter *adapter)
4360{
4361        struct e1000_hw *hw = &adapter->hw;
4362        bool link_active = false;
4363        s32 ret_val = 0;
4364
4365        /*
4366         * get_link_status is set on LSC (link status) interrupt or
4367         * Rx sequence error interrupt.  get_link_status will stay
4368         * false until the check_for_link establishes link
4369         * for copper adapters ONLY
4370         */
4371        switch (hw->phy.media_type) {
4372        case e1000_media_type_copper:
4373                if (hw->mac.get_link_status) {
4374                        ret_val = hw->mac.ops.check_for_link(hw);
4375                        link_active = !hw->mac.get_link_status;
4376                } else {
4377                        link_active = true;
4378                }
4379                break;
4380        case e1000_media_type_fiber:
4381                ret_val = hw->mac.ops.check_for_link(hw);
4382                link_active = !!(er32(STATUS) & E1000_STATUS_LU);
4383                break;
4384        case e1000_media_type_internal_serdes:
4385                ret_val = hw->mac.ops.check_for_link(hw);
4386                link_active = adapter->hw.mac.serdes_has_link;
4387                break;
4388        default:
4389        case e1000_media_type_unknown:
4390                break;
4391        }
4392
4393        if ((ret_val == E1000_ERR_PHY) && (hw->phy.type == e1000_phy_igp_3) &&
4394            (er32(CTRL) & E1000_PHY_CTRL_GBE_DISABLE)) {
4395                /* See e1000_kmrn_lock_loss_workaround_ich8lan() */
4396                e_info("Gigabit has been disabled, downgrading speed\n");
4397        }
4398
4399        return link_active;
4400}
4401
4402static void e1000e_enable_receives(struct e1000_adapter *adapter)
4403{
4404        /* make sure the receive unit is started */
4405        if ((adapter->flags & FLAG_RX_NEEDS_RESTART) &&
4406            (adapter->flags & FLAG_RX_RESTART_NOW)) {
4407                struct e1000_hw *hw = &adapter->hw;
4408                u32 rctl = er32(RCTL);
4409                ew32(RCTL, rctl | E1000_RCTL_EN);
4410                adapter->flags &= ~FLAG_RX_RESTART_NOW;
4411        }
4412}
4413
4414static void e1000e_check_82574_phy_workaround(struct e1000_adapter *adapter)
4415{
4416        struct e1000_hw *hw = &adapter->hw;
4417
4418        /*
4419         * With 82574 controllers, PHY needs to be checked periodically
4420         * for hung state and reset, if two calls return true
4421         */
4422        if (e1000_check_phy_82574(hw))
4423                adapter->phy_hang_count++;
4424        else
4425                adapter->phy_hang_count = 0;
4426
4427        if (adapter->phy_hang_count > 1) {
4428                adapter->phy_hang_count = 0;
4429                schedule_work(&adapter->reset_task);
4430        }
4431}
4432
4433/**
4434 * e1000_watchdog - Timer Call-back
4435 * @data: pointer to adapter cast into an unsigned long
4436 **/
4437static void e1000_watchdog(unsigned long data)
4438{
4439        struct e1000_adapter *adapter = (struct e1000_adapter *) data;
4440
4441        /* Do the rest outside of interrupt context */
4442        schedule_work(&adapter->watchdog_task);
4443
4444        /* TODO: make this use queue_delayed_work() */
4445}
4446
4447static void e1000_watchdog_task(struct work_struct *work)
4448{
4449        struct e1000_adapter *adapter = container_of(work,
4450                                        struct e1000_adapter, watchdog_task);
4451        struct net_device *netdev = adapter->netdev;
4452        struct e1000_mac_info *mac = &adapter->hw.mac;
4453        struct e1000_phy_info *phy = &adapter->hw.phy;
4454        struct e1000_ring *tx_ring = adapter->tx_ring;
4455        struct e1000_hw *hw = &adapter->hw;
4456        u32 link, tctl;
4457
4458        if (test_bit(__E1000_DOWN, &adapter->state))
4459                return;
4460
4461        link = e1000e_has_link(adapter);
4462        if ((netif_carrier_ok(netdev)) && link) {
4463                /* Cancel scheduled suspend requests. */
4464                pm_runtime_resume(netdev->dev.parent);
4465
4466                e1000e_enable_receives(adapter);
4467                goto link_up;
4468        }
4469
4470        if ((e1000e_enable_tx_pkt_filtering(hw)) &&
4471            (adapter->mng_vlan_id != adapter->hw.mng_cookie.vlan_id))
4472                e1000_update_mng_vlan(adapter);
4473
4474        if (link) {
4475                if (!netif_carrier_ok(netdev)) {
4476                        bool txb2b = true;
4477
4478                        /* Cancel scheduled suspend requests. */
4479                        pm_runtime_resume(netdev->dev.parent);
4480
4481                        /* update snapshot of PHY registers on LSC */
4482                        e1000_phy_read_status(adapter);
4483                        mac->ops.get_link_up_info(&adapter->hw,
4484                                                   &adapter->link_speed,
4485                                                   &adapter->link_duplex);
4486                        e1000_print_link_info(adapter);
4487                        /*
4488                         * On supported PHYs, check for duplex mismatch only
4489                         * if link has autonegotiated at 10/100 half
4490                         */
4491                        if ((hw->phy.type == e1000_phy_igp_3 ||
4492                             hw->phy.type == e1000_phy_bm) &&
4493                            (hw->mac.autoneg == true) &&
4494                            (adapter->link_speed == SPEED_10 ||
4495                             adapter->link_speed == SPEED_100) &&
4496                            (adapter->link_duplex == HALF_DUPLEX)) {
4497                                u16 autoneg_exp;
4498
4499                                e1e_rphy(hw, PHY_AUTONEG_EXP, &autoneg_exp);
4500
4501                                if (!(autoneg_exp & NWAY_ER_LP_NWAY_CAPS))
4502                                        e_info("Autonegotiated half duplex but link partner cannot autoneg.  Try forcing full duplex if link gets many collisions.\n");
4503                        }
4504
4505                        /* adjust timeout factor according to speed/duplex */
4506                        adapter->tx_timeout_factor = 1;
4507                        switch (adapter->link_speed) {
4508                        case SPEED_10:
4509                                txb2b = false;
4510                                adapter->tx_timeout_factor = 16;
4511                                break;
4512                        case SPEED_100:
4513                                txb2b = false;
4514                                adapter->tx_timeout_factor = 10;
4515                                break;
4516                        }
4517
4518                        /*
4519                         * workaround: re-program speed mode bit after
4520                         * link-up event
4521                         */
4522                        if ((adapter->flags & FLAG_TARC_SPEED_MODE_BIT) &&
4523                            !txb2b) {
4524                                u32 tarc0;
4525                                tarc0 = er32(TARC(0));
4526                                tarc0 &= ~SPEED_MODE_BIT;
4527                                ew32(TARC(0), tarc0);
4528                        }
4529
4530                        /*
4531                         * disable TSO for pcie and 10/100 speeds, to avoid
4532                         * some hardware issues
4533                         */
4534                        if (!(adapter->flags & FLAG_TSO_FORCE)) {
4535                                switch (adapter->link_speed) {
4536                                case SPEED_10:
4537                                case SPEED_100:
4538                                        e_info("10/100 speed: disabling TSO\n");
4539                                        netdev->features &= ~NETIF_F_TSO;
4540                                        netdev->features &= ~NETIF_F_TSO6;
4541                                        break;
4542                                case SPEED_1000:
4543                                        netdev->features |= NETIF_F_TSO;
4544                                        netdev->features |= NETIF_F_TSO6;
4545                                        break;
4546                                default:
4547                                        /* oops */
4548                                        break;
4549                                }
4550                        }
4551
4552                        /*
4553                         * enable transmits in the hardware, need to do this
4554                         * after setting TARC(0)
4555                         */
4556                        tctl = er32(TCTL);
4557                        tctl |= E1000_TCTL_EN;
4558                        ew32(TCTL, tctl);
4559
4560                        /*
4561                         * Perform any post-link-up configuration before
4562                         * reporting link up.
4563                         */
4564                        if (phy->ops.cfg_on_link_up)
4565                                phy->ops.cfg_on_link_up(hw);
4566
4567                        netif_carrier_on(netdev);
4568
4569                        if (!test_bit(__E1000_DOWN, &adapter->state))
4570                                mod_timer(&adapter->phy_info_timer,
4571                                          round_jiffies(jiffies + 2 * HZ));
4572                }
4573        } else {
4574                if (netif_carrier_ok(netdev)) {
4575                        adapter->link_speed = 0;
4576                        adapter->link_duplex = 0;
4577                        /* Link status message must follow this format */
4578                        printk(KERN_INFO "e1000e: %s NIC Link is Down\n",
4579                               adapter->netdev->name);
4580                        netif_carrier_off(netdev);
4581                        if (!test_bit(__E1000_DOWN, &adapter->state))
4582                                mod_timer(&adapter->phy_info_timer,
4583                                          round_jiffies(jiffies + 2 * HZ));
4584
4585                        if (adapter->flags & FLAG_RX_NEEDS_RESTART)
4586                                schedule_work(&adapter->reset_task);
4587                        else
4588                                pm_schedule_suspend(netdev->dev.parent,
4589                                                        LINK_TIMEOUT);
4590                }
4591        }
4592
4593link_up:
4594        spin_lock(&adapter->stats64_lock);
4595        e1000e_update_stats(adapter);
4596
4597        mac->tx_packet_delta = adapter->stats.tpt - adapter->tpt_old;
4598        adapter->tpt_old = adapter->stats.tpt;
4599        mac->collision_delta = adapter->stats.colc - adapter->colc_old;
4600        adapter->colc_old = adapter->stats.colc;
4601
4602        adapter->gorc = adapter->stats.gorc - adapter->gorc_old;
4603        adapter->gorc_old = adapter->stats.gorc;
4604        adapter->gotc = adapter->stats.gotc - adapter->gotc_old;
4605        adapter->gotc_old = adapter->stats.gotc;
4606        spin_unlock(&adapter->stats64_lock);
4607
4608        e1000e_update_adaptive(&adapter->hw);
4609
4610        if (!netif_carrier_ok(netdev) &&
4611            (e1000_desc_unused(tx_ring) + 1 < tx_ring->count)) {
4612                /*
4613                 * We've lost link, so the controller stops DMA,
4614                 * but we've got queued Tx work that's never going
4615                 * to get done, so reset controller to flush Tx.
4616                 * (Do the reset outside of interrupt context).
4617                 */
4618                schedule_work(&adapter->reset_task);
4619                /* return immediately since reset is imminent */
4620                return;
4621        }
4622
4623        /* Simple mode for Interrupt Throttle Rate (ITR) */
4624        if (adapter->itr_setting == 4) {
4625                /*
4626                 * Symmetric Tx/Rx gets a reduced ITR=2000;
4627                 * Total asymmetrical Tx or Rx gets ITR=8000;
4628                 * everyone else is between 2000-8000.
4629                 */
4630                u32 goc = (adapter->gotc + adapter->gorc) / 10000;
4631                u32 dif = (adapter->gotc > adapter->gorc ?
4632                            adapter->gotc - adapter->gorc :
4633                            adapter->gorc - adapter->gotc) / 10000;
4634                u32 itr = goc > 0 ? (dif * 6000 / goc + 2000) : 8000;
4635
4636                e1000e_write_itr(adapter, itr);
4637        }
4638
4639        /* Cause software interrupt to ensure Rx ring is cleaned */
4640        if (adapter->msix_entries)
4641                ew32(ICS, adapter->rx_ring->ims_val);
4642        else
4643                ew32(ICS, E1000_ICS_RXDMT0);
4644
4645        /* flush pending descriptors to memory before detecting Tx hang */
4646        e1000e_flush_descriptors(adapter);
4647
4648        /* Force detection of hung controller every watchdog period */
4649        adapter->detect_tx_hung = true;
4650
4651        /*
4652         * With 82571 controllers, LAA may be overwritten due to controller
4653         * reset from the other port. Set the appropriate LAA in RAR[0]
4654         */
4655        if (e1000e_get_laa_state_82571(hw))
4656                hw->mac.ops.rar_set(hw, adapter->hw.mac.addr, 0);
4657
4658        if (adapter->flags2 & FLAG2_CHECK_PHY_HANG)
4659                e1000e_check_82574_phy_workaround(adapter);
4660
4661        /* Reset the timer */
4662        if (!test_bit(__E1000_DOWN, &adapter->state))
4663                mod_timer(&adapter->watchdog_timer,
4664                          round_jiffies(jiffies + 2 * HZ));
4665}
4666
4667#define E1000_TX_FLAGS_CSUM             0x00000001
4668#define E1000_TX_FLAGS_VLAN             0x00000002
4669#define E1000_TX_FLAGS_TSO              0x00000004
4670#define E1000_TX_FLAGS_IPV4             0x00000008
4671#define E1000_TX_FLAGS_NO_FCS           0x00000010
4672#define E1000_TX_FLAGS_VLAN_MASK        0xffff0000
4673#define E1000_TX_FLAGS_VLAN_SHIFT       16
4674
4675static int e1000_tso(struct e1000_ring *tx_ring, struct sk_buff *skb)
4676{
4677        struct e1000_context_desc *context_desc;
4678        struct e1000_buffer *buffer_info;
4679        unsigned int i;
4680        u32 cmd_length = 0;
4681        u16 ipcse = 0, mss;
4682        u8 ipcss, ipcso, tucss, tucso, hdr_len;
4683
4684        if (!skb_is_gso(skb))
4685                return 0;
4686
4687        if (skb_header_cloned(skb)) {
4688                int err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
4689
4690                if (err)
4691                        return err;
4692        }
4693
4694        hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
4695        mss = skb_shinfo(skb)->gso_size;
4696        if (skb->protocol == htons(ETH_P_IP)) {
4697                struct iphdr *iph = ip_hdr(skb);
4698                iph->tot_len = 0;
4699                iph->check = 0;
4700                tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
4701                                                         0, IPPROTO_TCP, 0);
4702                cmd_length = E1000_TXD_CMD_IP;
4703                ipcse = skb_transport_offset(skb) - 1;
4704        } else if (skb_is_gso_v6(skb)) {
4705                ipv6_hdr(skb)->payload_len = 0;
4706                tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
4707                                                       &ipv6_hdr(skb)->daddr,
4708                                                       0, IPPROTO_TCP, 0);
4709                ipcse = 0;
4710        }
4711        ipcss = skb_network_offset(skb);
4712        ipcso = (void *)&(ip_hdr(skb)->check) - (void *)skb->data;
4713        tucss = skb_transport_offset(skb);
4714        tucso = (void *)&(tcp_hdr(skb)->check) - (void *)skb->data;
4715
4716        cmd_length |= (E1000_TXD_CMD_DEXT | E1000_TXD_CMD_TSE |
4717                       E1000_TXD_CMD_TCP | (skb->len - (hdr_len)));
4718
4719        i = tx_ring->next_to_use;
4720        context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
4721        buffer_info = &tx_ring->buffer_info[i];
4722
4723        context_desc->lower_setup.ip_fields.ipcss  = ipcss;
4724        context_desc->lower_setup.ip_fields.ipcso  = ipcso;
4725        context_desc->lower_setup.ip_fields.ipcse  = cpu_to_le16(ipcse);
4726        context_desc->upper_setup.tcp_fields.tucss = tucss;
4727        context_desc->upper_setup.tcp_fields.tucso = tucso;
4728        context_desc->upper_setup.tcp_fields.tucse = 0;
4729        context_desc->tcp_seg_setup.fields.mss     = cpu_to_le16(mss);
4730        context_desc->tcp_seg_setup.fields.hdr_len = hdr_len;
4731        context_desc->cmd_and_length = cpu_to_le32(cmd_length);
4732
4733        buffer_info->time_stamp = jiffies;
4734        buffer_info->next_to_watch = i;
4735
4736        i++;
4737        if (i == tx_ring->count)
4738                i = 0;
4739        tx_ring->next_to_use = i;
4740
4741        return 1;
4742}
4743
4744static bool e1000_tx_csum(struct e1000_ring *tx_ring, struct sk_buff *skb)
4745{
4746        struct e1000_adapter *adapter = tx_ring->adapter;
4747        struct e1000_context_desc *context_desc;
4748        struct e1000_buffer *buffer_info;
4749        unsigned int i;
4750        u8 css;
4751        u32 cmd_len = E1000_TXD_CMD_DEXT;
4752        __be16 protocol;
4753
4754        if (skb->ip_summed != CHECKSUM_PARTIAL)
4755                return 0;
4756
4757        if (skb->protocol == cpu_to_be16(ETH_P_8021Q))
4758                protocol = vlan_eth_hdr(skb)->h_vlan_encapsulated_proto;
4759        else
4760                protocol = skb->protocol;
4761
4762        switch (protocol) {
4763        case cpu_to_be16(ETH_P_IP):
4764                if (ip_hdr(skb)->protocol == IPPROTO_TCP)
4765                        cmd_len |= E1000_TXD_CMD_TCP;
4766                break;
4767        case cpu_to_be16(ETH_P_IPV6):
4768                /* XXX not handling all IPV6 headers */
4769                if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
4770                        cmd_len |= E1000_TXD_CMD_TCP;
4771                break;
4772        default:
4773                if (unlikely(net_ratelimit()))
4774                        e_warn("checksum_partial proto=%x!\n",
4775                               be16_to_cpu(protocol));
4776                break;
4777        }
4778
4779        css = skb_checksum_start_offset(skb);
4780
4781        i = tx_ring->next_to_use;
4782        buffer_info = &tx_ring->buffer_info[i];
4783        context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
4784
4785        context_desc->lower_setup.ip_config = 0;
4786        context_desc->upper_setup.tcp_fields.tucss = css;
4787        context_desc->upper_setup.tcp_fields.tucso =
4788                                css + skb->csum_offset;
4789        context_desc->upper_setup.tcp_fields.tucse = 0;
4790        context_desc->tcp_seg_setup.data = 0;
4791        context_desc->cmd_and_length = cpu_to_le32(cmd_len);
4792
4793        buffer_info->time_stamp = jiffies;
4794        buffer_info->next_to_watch = i;
4795
4796        i++;
4797        if (i == tx_ring->count)
4798                i = 0;
4799        tx_ring->next_to_use = i;
4800
4801        return 1;
4802}
4803
4804static int e1000_tx_map(struct e1000_ring *tx_ring, struct sk_buff *skb,
4805                        unsigned int first, unsigned int max_per_txd,
4806                        unsigned int nr_frags)
4807{
4808        struct e1000_adapter *adapter = tx_ring->adapter;
4809        struct pci_dev *pdev = adapter->pdev;
4810        struct e1000_buffer *buffer_info;
4811        unsigned int len = skb_headlen(skb);
4812        unsigned int offset = 0, size, count = 0, i;
4813        unsigned int f, bytecount, segs;
4814
4815        i = tx_ring->next_to_use;
4816
4817        while (len) {
4818                buffer_info = &tx_ring->buffer_info[i];
4819                size = min(len, max_per_txd);
4820
4821                buffer_info->length = size;
4822                buffer_info->time_stamp = jiffies;
4823                buffer_info->next_to_watch = i;
4824                buffer_info->dma = dma_map_single(&pdev->dev,
4825                                                  skb->data + offset,
4826                                                  size, DMA_TO_DEVICE);
4827                buffer_info->mapped_as_page = false;
4828                if (dma_mapping_error(&pdev->dev, buffer_info->dma))
4829                        goto dma_error;
4830
4831                len -= size;
4832                offset += size;
4833                count++;
4834
4835                if (len) {
4836                        i++;
4837                        if (i == tx_ring->count)
4838                                i = 0;
4839                }
4840        }
4841
4842        for (f = 0; f < nr_frags; f++) {
4843                const struct skb_frag_struct *frag;
4844
4845                frag = &skb_shinfo(skb)->frags[f];
4846                len = skb_frag_size(frag);
4847                offset = 0;
4848
4849                while (len) {
4850                        i++;
4851                        if (i == tx_ring->count)
4852                                i = 0;
4853
4854                        buffer_info = &tx_ring->buffer_info[i];
4855                        size = min(len, max_per_txd);
4856
4857                        buffer_info->length = size;
4858                        buffer_info->time_stamp = jiffies;
4859                        buffer_info->next_to_watch = i;
4860                        buffer_info->dma = skb_frag_dma_map(&pdev->dev, frag,
4861                                                offset, size, DMA_TO_DEVICE);
4862                        buffer_info->mapped_as_page = true;
4863                        if (dma_mapping_error(&pdev->dev, buffer_info->dma))
4864                                goto dma_error;
4865
4866                        len -= size;
4867                        offset += size;
4868                        count++;
4869                }
4870        }
4871
4872        segs = skb_shinfo(skb)->gso_segs ? : 1;
4873        /* multiply data chunks by size of headers */
4874        bytecount = ((segs - 1) * skb_headlen(skb)) + skb->len;
4875
4876        tx_ring->buffer_info[i].skb = skb;
4877        tx_ring->buffer_info[i].segs = segs;
4878        tx_ring->buffer_info[i].bytecount = bytecount;
4879        tx_ring->buffer_info[first].next_to_watch = i;
4880
4881        return count;
4882
4883dma_error:
4884        dev_err(&pdev->dev, "Tx DMA map failed\n");
4885        buffer_info->dma = 0;
4886        if (count)
4887                count--;
4888
4889        while (count--) {
4890                if (i == 0)
4891                        i += tx_ring->count;
4892                i--;
4893                buffer_info = &tx_ring->buffer_info[i];
4894                e1000_put_txbuf(tx_ring, buffer_info);
4895        }
4896
4897        return 0;
4898}
4899
4900static void e1000_tx_queue(struct e1000_ring *tx_ring, int tx_flags, int count)
4901{
4902        struct e1000_adapter *adapter = tx_ring->adapter;
4903        struct e1000_tx_desc *tx_desc = NULL;
4904        struct e1000_buffer *buffer_info;
4905        u32 txd_upper = 0, txd_lower = E1000_TXD_CMD_IFCS;
4906        unsigned int i;
4907
4908        if (tx_flags & E1000_TX_FLAGS_TSO) {
4909                txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D |
4910                             E1000_TXD_CMD_TSE;
4911                txd_upper |= E1000_TXD_POPTS_TXSM << 8;
4912
4913                if (tx_flags & E1000_TX_FLAGS_IPV4)
4914                        txd_upper |= E1000_TXD_POPTS_IXSM << 8;
4915        }
4916
4917        if (tx_flags & E1000_TX_FLAGS_CSUM) {
4918                txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
4919                txd_upper |= E1000_TXD_POPTS_TXSM << 8;
4920        }
4921
4922        if (tx_flags & E1000_TX_FLAGS_VLAN) {
4923                txd_lower |= E1000_TXD_CMD_VLE;
4924                txd_upper |= (tx_flags & E1000_TX_FLAGS_VLAN_MASK);
4925        }
4926
4927        if (unlikely(tx_flags & E1000_TX_FLAGS_NO_FCS))
4928                txd_lower &= ~(E1000_TXD_CMD_IFCS);
4929
4930        i = tx_ring->next_to_use;
4931
4932        do {
4933                buffer_info = &tx_ring->buffer_info[i];
4934                tx_desc = E1000_TX_DESC(*tx_ring, i);
4935                tx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
4936                tx_desc->lower.data =
4937                        cpu_to_le32(txd_lower | buffer_info->length);
4938                tx_desc->upper.data = cpu_to_le32(txd_upper);
4939
4940                i++;
4941                if (i == tx_ring->count)
4942                        i = 0;
4943        } while (--count > 0);
4944
4945        tx_desc->lower.data |= cpu_to_le32(adapter->txd_cmd);
4946
4947        /* txd_cmd re-enables FCS, so we'll re-disable it here as desired. */
4948        if (unlikely(tx_flags & E1000_TX_FLAGS_NO_FCS))
4949                tx_desc->lower.data &= ~(cpu_to_le32(E1000_TXD_CMD_IFCS));
4950
4951        /*
4952         * Force memory writes to complete before letting h/w
4953         * know there are new descriptors to fetch.  (Only
4954         * applicable for weak-ordered memory model archs,
4955         * such as IA-64).
4956         */
4957        wmb();
4958
4959        tx_ring->next_to_use = i;
4960
4961        if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA)
4962                e1000e_update_tdt_wa(tx_ring, i);
4963        else
4964                writel(i, tx_ring->tail);
4965
4966        /*
4967         * we need this if more than one processor can write to our tail
4968         * at a time, it synchronizes IO on IA64/Altix systems
4969         */
4970        mmiowb();
4971}
4972
4973#define MINIMUM_DHCP_PACKET_SIZE 282
4974static int e1000_transfer_dhcp_info(struct e1000_adapter *adapter,
4975                                    struct sk_buff *skb)
4976{
4977        struct e1000_hw *hw =  &adapter->hw;
4978        u16 length, offset;
4979
4980        if (vlan_tx_tag_present(skb)) {
4981                if (!((vlan_tx_tag_get(skb) == adapter->hw.mng_cookie.vlan_id) &&
4982                    (adapter->hw.mng_cookie.status &
4983                        E1000_MNG_DHCP_COOKIE_STATUS_VLAN)))
4984                        return 0;
4985        }
4986
4987        if (skb->len <= MINIMUM_DHCP_PACKET_SIZE)
4988                return 0;
4989
4990        if (((struct ethhdr *) skb->data)->h_proto != htons(ETH_P_IP))
4991                return 0;
4992
4993        {
4994                const struct iphdr *ip = (struct iphdr *)((u8 *)skb->data+14);
4995                struct udphdr *udp;
4996
4997                if (ip->protocol != IPPROTO_UDP)
4998                        return 0;
4999
5000                udp = (struct udphdr *)((u8 *)ip + (ip->ihl << 2));
5001                if (ntohs(udp->dest) != 67)
5002                        return 0;
5003
5004                offset = (u8 *)udp + 8 - skb->data;
5005                length = skb->len - offset;
5006                return e1000e_mng_write_dhcp_info(hw, (u8 *)udp + 8, length);
5007        }
5008
5009        return 0;
5010}
5011
5012static int __e1000_maybe_stop_tx(struct e1000_ring *tx_ring, int size)
5013{
5014        struct e1000_adapter *adapter = tx_ring->adapter;
5015
5016        netif_stop_queue(adapter->netdev);
5017        /*
5018         * Herbert's original patch had:
5019         *  smp_mb__after_netif_stop_queue();
5020         * but since that doesn't exist yet, just open code it.
5021         */
5022        smp_mb();
5023
5024        /*
5025         * We need to check again in a case another CPU has just
5026         * made room available.
5027         */
5028        if (e1000_desc_unused(tx_ring) < size)
5029                return -EBUSY;
5030
5031        /* A reprieve! */
5032        netif_start_queue(adapter->netdev);
5033        ++adapter->restart_queue;
5034        return 0;
5035}
5036
5037static int e1000_maybe_stop_tx(struct e1000_ring *tx_ring, int size)
5038{
5039        BUG_ON(size > tx_ring->count);
5040
5041        if (e1000_desc_unused(tx_ring) >= size)
5042                return 0;
5043        return __e1000_maybe_stop_tx(tx_ring, size);
5044}
5045
5046static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
5047                                    struct net_device *netdev)
5048{
5049        struct e1000_adapter *adapter = netdev_priv(netdev);
5050        struct e1000_ring *tx_ring = adapter->tx_ring;
5051        unsigned int first;
5052        unsigned int tx_flags = 0;
5053        unsigned int len = skb_headlen(skb);
5054        unsigned int nr_frags;
5055        unsigned int mss;
5056        int count = 0;
5057        int tso;
5058        unsigned int f;
5059
5060        if (test_bit(__E1000_DOWN, &adapter->state)) {
5061                dev_kfree_skb_any(skb);
5062                return NETDEV_TX_OK;
5063        }
5064
5065        if (skb->len <= 0) {
5066                dev_kfree_skb_any(skb);
5067                return NETDEV_TX_OK;
5068        }
5069
5070        mss = skb_shinfo(skb)->gso_size;
5071        if (mss) {
5072                u8 hdr_len;
5073
5074                /*
5075                 * TSO Workaround for 82571/2/3 Controllers -- if skb->data
5076                 * points to just header, pull a few bytes of payload from
5077                 * frags into skb->data
5078                 */
5079                hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
5080                /*
5081                 * we do this workaround for ES2LAN, but it is un-necessary,
5082                 * avoiding it could save a lot of cycles
5083                 */
5084                if (skb->data_len && (hdr_len == len)) {
5085                        unsigned int pull_size;
5086
5087                        pull_size = min_t(unsigned int, 4, skb->data_len);
5088                        if (!__pskb_pull_tail(skb, pull_size)) {
5089                                e_err("__pskb_pull_tail failed.\n");
5090                                dev_kfree_skb_any(skb);
5091                                return NETDEV_TX_OK;
5092                        }
5093                        len = skb_headlen(skb);
5094                }
5095        }
5096
5097        /* reserve a descriptor for the offload context */
5098        if ((mss) || (skb->ip_summed == CHECKSUM_PARTIAL))
5099                count++;
5100        count++;
5101
5102        count += DIV_ROUND_UP(len, adapter->tx_fifo_limit);
5103
5104        nr_frags = skb_shinfo(skb)->nr_frags;
5105        for (f = 0; f < nr_frags; f++)
5106                count += DIV_ROUND_UP(skb_frag_size(&skb_shinfo(skb)->frags[f]),
5107                                      adapter->tx_fifo_limit);
5108
5109        if (adapter->hw.mac.tx_pkt_filtering)
5110                e1000_transfer_dhcp_info(adapter, skb);
5111
5112        /*
5113         * need: count + 2 desc gap to keep tail from touching
5114         * head, otherwise try next time
5115         */
5116        if (e1000_maybe_stop_tx(tx_ring, count + 2))
5117                return NETDEV_TX_BUSY;
5118
5119        if (vlan_tx_tag_present(skb)) {
5120                tx_flags |= E1000_TX_FLAGS_VLAN;
5121                tx_flags |= (vlan_tx_tag_get(skb) << E1000_TX_FLAGS_VLAN_SHIFT);
5122        }
5123
5124        first = tx_ring->next_to_use;
5125
5126        tso = e1000_tso(tx_ring, skb);
5127        if (tso < 0) {
5128                dev_kfree_skb_any(skb);
5129                return NETDEV_TX_OK;
5130        }
5131
5132        if (tso)
5133                tx_flags |= E1000_TX_FLAGS_TSO;
5134        else if (e1000_tx_csum(tx_ring, skb))
5135                tx_flags |= E1000_TX_FLAGS_CSUM;
5136
5137        /*
5138         * Old method was to assume IPv4 packet by default if TSO was enabled.
5139         * 82571 hardware supports TSO capabilities for IPv6 as well...
5140         * no longer assume, we must.
5141         */
5142        if (skb->protocol == htons(ETH_P_IP))
5143                tx_flags |= E1000_TX_FLAGS_IPV4;
5144
5145        if (unlikely(skb->no_fcs))
5146                tx_flags |= E1000_TX_FLAGS_NO_FCS;
5147
5148        /* if count is 0 then mapping error has occurred */
5149        count = e1000_tx_map(tx_ring, skb, first, adapter->tx_fifo_limit,
5150                             nr_frags);
5151        if (count) {
5152                skb_tx_timestamp(skb);
5153
5154                netdev_sent_queue(netdev, skb->len);
5155                e1000_tx_queue(tx_ring, tx_flags, count);
5156                /* Make sure there is space in the ring for the next send. */
5157                e1000_maybe_stop_tx(tx_ring,
5158                                    (MAX_SKB_FRAGS *
5159                                     DIV_ROUND_UP(PAGE_SIZE,
5160                                                  adapter->tx_fifo_limit) + 2));
5161        } else {
5162                dev_kfree_skb_any(skb);
5163                tx_ring->buffer_info[first].time_stamp = 0;
5164                tx_ring->next_to_use = first;
5165        }
5166
5167        return NETDEV_TX_OK;
5168}
5169
5170/**
5171 * e1000_tx_timeout - Respond to a Tx Hang
5172 * @netdev: network interface device structure
5173 **/
5174static void e1000_tx_timeout(struct net_device *netdev)
5175{
5176        struct e1000_adapter *adapter = netdev_priv(netdev);
5177
5178        /* Do the reset outside of interrupt context */
5179        adapter->tx_timeout_count++;
5180        schedule_work(&adapter->reset_task);
5181}
5182
5183static void e1000_reset_task(struct work_struct *work)
5184{
5185        struct e1000_adapter *adapter;
5186        adapter = container_of(work, struct e1000_adapter, reset_task);
5187
5188        /* don't run the task if already down */
5189        if (test_bit(__E1000_DOWN, &adapter->state))
5190                return;
5191
5192        if (!((adapter->flags & FLAG_RX_NEEDS_RESTART) &&
5193              (adapter->flags & FLAG_RX_RESTART_NOW))) {
5194                e1000e_dump(adapter);
5195                e_err("Reset adapter\n");
5196        }
5197        e1000e_reinit_locked(adapter);
5198}
5199
5200/**
5201 * e1000_get_stats64 - Get System Network Statistics
5202 * @netdev: network interface device structure
5203 * @stats: rtnl_link_stats64 pointer
5204 *
5205 * Returns the address of the device statistics structure.
5206 **/
5207struct rtnl_link_stats64 *e1000e_get_stats64(struct net_device *netdev,
5208                                             struct rtnl_link_stats64 *stats)
5209{
5210        struct e1000_adapter *adapter = netdev_priv(netdev);
5211
5212        memset(stats, 0, sizeof(struct rtnl_link_stats64));
5213        spin_lock(&adapter->stats64_lock);
5214        e1000e_update_stats(adapter);
5215        /* Fill out the OS statistics structure */
5216        stats->rx_bytes = adapter->stats.gorc;
5217        stats->rx_packets = adapter->stats.gprc;
5218        stats->tx_bytes = adapter->stats.gotc;
5219        stats->tx_packets = adapter->stats.gptc;
5220        stats->multicast = adapter->stats.mprc;
5221        stats->collisions = adapter->stats.colc;
5222
5223        /* Rx Errors */
5224
5225        /*
5226         * RLEC on some newer hardware can be incorrect so build
5227         * our own version based on RUC and ROC
5228         */
5229        stats->rx_errors = adapter->stats.rxerrc +
5230                adapter->stats.crcerrs + adapter->stats.algnerrc +
5231                adapter->stats.ruc + adapter->stats.roc +
5232                adapter->stats.cexterr;
5233        stats->rx_length_errors = adapter->stats.ruc +
5234                                              adapter->stats.roc;
5235        stats->rx_crc_errors = adapter->stats.crcerrs;
5236        stats->rx_frame_errors = adapter->stats.algnerrc;
5237        stats->rx_missed_errors = adapter->stats.mpc;
5238
5239        /* Tx Errors */
5240        stats->tx_errors = adapter->stats.ecol +
5241                                       adapter->stats.latecol;
5242        stats->tx_aborted_errors = adapter->stats.ecol;
5243        stats->tx_window_errors = adapter->stats.latecol;
5244        stats->tx_carrier_errors = adapter->stats.tncrs;
5245
5246        /* Tx Dropped needs to be maintained elsewhere */
5247
5248        spin_unlock(&adapter->stats64_lock);
5249        return stats;
5250}
5251
5252/**
5253 * e1000_change_mtu - Change the Maximum Transfer Unit
5254 * @netdev: network interface device structure
5255 * @new_mtu: new value for maximum frame size
5256 *
5257 * Returns 0 on success, negative on failure
5258 **/
5259static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
5260{
5261        struct e1000_adapter *adapter = netdev_priv(netdev);
5262        int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
5263
5264        /* Jumbo frame support */
5265        if ((max_frame > ETH_FRAME_LEN + ETH_FCS_LEN) &&
5266            !(adapter->flags & FLAG_HAS_JUMBO_FRAMES)) {
5267                e_err("Jumbo Frames not supported.\n");
5268                return -EINVAL;
5269        }
5270
5271        /* Supported frame sizes */
5272        if ((new_mtu < ETH_ZLEN + ETH_FCS_LEN + VLAN_HLEN) ||
5273            (max_frame > adapter->max_hw_frame_size)) {
5274                e_err("Unsupported MTU setting\n");
5275                return -EINVAL;
5276        }
5277
5278        /* Jumbo frame workaround on 82579 and newer requires CRC be stripped */
5279        if ((adapter->hw.mac.type >= e1000_pch2lan) &&
5280            !(adapter->flags2 & FLAG2_CRC_STRIPPING) &&
5281            (new_mtu > ETH_DATA_LEN)) {
5282                e_err("Jumbo Frames not supported on this device when CRC stripping is disabled.\n");
5283                return -EINVAL;
5284        }
5285
5286        while (test_and_set_bit(__E1000_RESETTING, &adapter->state))
5287                usleep_range(1000, 2000);
5288        /* e1000e_down -> e1000e_reset dependent on max_frame_size & mtu */
5289        adapter->max_frame_size = max_frame;
5290        e_info("changing MTU from %d to %d\n", netdev->mtu, new_mtu);
5291        netdev->mtu = new_mtu;
5292        if (netif_running(netdev))
5293                e1000e_down(adapter);
5294
5295        /*
5296         * NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN
5297         * means we reserve 2 more, this pushes us to allocate from the next
5298         * larger slab size.
5299         * i.e. RXBUFFER_2048 --> size-4096 slab
5300         * However with the new *_jumbo_rx* routines, jumbo receives will use
5301         * fragmented skbs
5302         */
5303
5304        if (max_frame <= 2048)
5305                adapter->rx_buffer_len = 2048;
5306        else
5307                adapter->rx_buffer_len = 4096;
5308
5309        /* adjust allocation if LPE protects us, and we aren't using SBP */
5310        if ((max_frame == ETH_FRAME_LEN + ETH_FCS_LEN) ||
5311             (max_frame == ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN))
5312                adapter->rx_buffer_len = ETH_FRAME_LEN + VLAN_HLEN
5313                                         + ETH_FCS_LEN;
5314
5315        if (netif_running(netdev))
5316                e1000e_up(adapter);
5317        else
5318                e1000e_reset(adapter);
5319
5320        clear_bit(__E1000_RESETTING, &adapter->state);
5321
5322        return 0;
5323}
5324
5325static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
5326                           int cmd)
5327{
5328        struct e1000_adapter *adapter = netdev_priv(netdev);
5329        struct mii_ioctl_data *data = if_mii(ifr);
5330
5331        if (adapter->hw.phy.media_type != e1000_media_type_copper)
5332                return -EOPNOTSUPP;
5333
5334        switch (cmd) {
5335        case SIOCGMIIPHY:
5336                data->phy_id = adapter->hw.phy.addr;
5337                break;
5338        case SIOCGMIIREG:
5339                e1000_phy_read_status(adapter);
5340
5341                switch (data->reg_num & 0x1F) {
5342                case MII_BMCR:
5343                        data->val_out = adapter->phy_regs.bmcr;
5344                        break;
5345                case MII_BMSR:
5346                        data->val_out = adapter->phy_regs.bmsr;
5347                        break;
5348                case MII_PHYSID1:
5349                        data->val_out = (adapter->hw.phy.id >> 16);
5350                        break;
5351                case MII_PHYSID2:
5352                        data->val_out = (adapter->hw.phy.id & 0xFFFF);
5353                        break;
5354                case MII_ADVERTISE:
5355                        data->val_out = adapter->phy_regs.advertise;
5356                        break;
5357                case MII_LPA:
5358                        data->val_out = adapter->phy_regs.lpa;
5359                        break;
5360                case MII_EXPANSION:
5361                        data->val_out = adapter->phy_regs.expansion;
5362                        break;
5363                case MII_CTRL1000:
5364                        data->val_out = adapter->phy_regs.ctrl1000;
5365                        break;
5366                case MII_STAT1000:
5367                        data->val_out = adapter->phy_regs.stat1000;
5368                        break;
5369                case MII_ESTATUS:
5370                        data->val_out = adapter->phy_regs.estatus;
5371                        break;
5372                default:
5373                        return -EIO;
5374                }
5375                break;
5376        case SIOCSMIIREG:
5377        default:
5378                return -EOPNOTSUPP;
5379        }
5380        return 0;
5381}
5382
5383static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
5384{
5385        switch (cmd) {
5386        case SIOCGMIIPHY:
5387        case SIOCGMIIREG:
5388        case SIOCSMIIREG:
5389                return e1000_mii_ioctl(netdev, ifr, cmd);
5390        default:
5391                return -EOPNOTSUPP;
5392        }
5393}
5394
5395static int e1000_init_phy_wakeup(struct e1000_adapter *adapter, u32 wufc)
5396{
5397        struct e1000_hw *hw = &adapter->hw;
5398        u32 i, mac_reg;
5399        u16 phy_reg, wuc_enable;
5400        int retval = 0;
5401
5402        /* copy MAC RARs to PHY RARs */
5403        e1000_copy_rx_addrs_to_phy_ich8lan(hw);
5404
5405        retval = hw->phy.ops.acquire(hw);
5406        if (retval) {
5407                e_err("Could not acquire PHY\n");
5408                return retval;
5409        }
5410
5411        /* Enable access to wakeup registers on and set page to BM_WUC_PAGE */
5412        retval = e1000_enable_phy_wakeup_reg_access_bm(hw, &wuc_enable);
5413        if (retval)
5414                goto release;
5415
5416        /* copy MAC MTA to PHY MTA - only needed for pchlan */
5417        for (i = 0; i < adapter->hw.mac.mta_reg_count; i++) {
5418                mac_reg = E1000_READ_REG_ARRAY(hw, E1000_MTA, i);
5419                hw->phy.ops.write_reg_page(hw, BM_MTA(i),
5420                                           (u16)(mac_reg & 0xFFFF));
5421                hw->phy.ops.write_reg_page(hw, BM_MTA(i) + 1,
5422                                           (u16)((mac_reg >> 16) & 0xFFFF));
5423        }
5424
5425        /* configure PHY Rx Control register */
5426        hw->phy.ops.read_reg_page(&adapter->hw, BM_RCTL, &phy_reg);
5427        mac_reg = er32(RCTL);
5428        if (mac_reg & E1000_RCTL_UPE)
5429                phy_reg |= BM_RCTL_UPE;
5430        if (mac_reg & E1000_RCTL_MPE)
5431                phy_reg |= BM_RCTL_MPE;
5432        phy_reg &= ~(BM_RCTL_MO_MASK);
5433        if (mac_reg & E1000_RCTL_MO_3)
5434                phy_reg |= (((mac_reg & E1000_RCTL_MO_3) >> E1000_RCTL_MO_SHIFT)
5435                                << BM_RCTL_MO_SHIFT);
5436        if (mac_reg & E1000_RCTL_BAM)
5437                phy_reg |= BM_RCTL_BAM;
5438        if (mac_reg & E1000_RCTL_PMCF)
5439                phy_reg |= BM_RCTL_PMCF;
5440        mac_reg = er32(CTRL);
5441        if (mac_reg & E1000_CTRL_RFCE)
5442                phy_reg |= BM_RCTL_RFCE;
5443        hw->phy.ops.write_reg_page(&adapter->hw, BM_RCTL, phy_reg);
5444
5445        /* enable PHY wakeup in MAC register */
5446        ew32(WUFC, wufc);
5447        ew32(WUC, E1000_WUC_PHY_WAKE | E1000_WUC_PME_EN);
5448
5449        /* configure and enable PHY wakeup in PHY registers */
5450        hw->phy.ops.write_reg_page(&adapter->hw, BM_WUFC, wufc);
5451        hw->phy.ops.write_reg_page(&adapter->hw, BM_WUC, E1000_WUC_PME_EN);
5452
5453        /* activate PHY wakeup */
5454        wuc_enable |= BM_WUC_ENABLE_BIT | BM_WUC_HOST_WU_BIT;
5455        retval = e1000_disable_phy_wakeup_reg_access_bm(hw, &wuc_enable);
5456        if (retval)
5457                e_err("Could not set PHY Host Wakeup bit\n");
5458release:
5459        hw->phy.ops.release(hw);
5460
5461        return retval;
5462}
5463
5464static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake,
5465                            bool runtime)
5466{
5467        struct net_device *netdev = pci_get_drvdata(pdev);
5468        struct e1000_adapter *adapter = netdev_priv(netdev);
5469        struct e1000_hw *hw = &adapter->hw;
5470        u32 ctrl, ctrl_ext, rctl, status;
5471        /* Runtime suspend should only enable wakeup for link changes */
5472        u32 wufc = runtime ? E1000_WUFC_LNKC : adapter->wol;
5473        int retval = 0;
5474
5475        netif_device_detach(netdev);
5476
5477        if (netif_running(netdev)) {
5478                int count = E1000_CHECK_RESET_COUNT;
5479
5480                while (test_bit(__E1000_RESETTING, &adapter->state) && count--)
5481                        usleep_range(10000, 20000);
5482
5483                WARN_ON(test_bit(__E1000_RESETTING, &adapter->state));
5484                e1000e_down(adapter);
5485                e1000_free_irq(adapter);
5486        }
5487        e1000e_reset_interrupt_capability(adapter);
5488
5489        retval = pci_save_state(pdev);
5490        if (retval)
5491                return retval;
5492
5493        status = er32(STATUS);
5494        if (status & E1000_STATUS_LU)
5495                wufc &= ~E1000_WUFC_LNKC;
5496
5497        if (wufc) {
5498                e1000_setup_rctl(adapter);
5499                e1000e_set_rx_mode(netdev);
5500
5501                /* turn on all-multi mode if wake on multicast is enabled */
5502                if (wufc & E1000_WUFC_MC) {
5503                        rctl = er32(RCTL);
5504                        rctl |= E1000_RCTL_MPE;
5505                        ew32(RCTL, rctl);
5506                }
5507
5508                ctrl = er32(CTRL);
5509                /* advertise wake from D3Cold */
5510                #define E1000_CTRL_ADVD3WUC 0x00100000
5511                /* phy power management enable */
5512                #define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000
5513                ctrl |= E1000_CTRL_ADVD3WUC;
5514                if (!(adapter->flags2 & FLAG2_HAS_PHY_WAKEUP))
5515                        ctrl |= E1000_CTRL_EN_PHY_PWR_MGMT;
5516                ew32(CTRL, ctrl);
5517
5518                if (adapter->hw.phy.media_type == e1000_media_type_fiber ||
5519                    adapter->hw.phy.media_type ==
5520                    e1000_media_type_internal_serdes) {
5521                        /* keep the laser running in D3 */
5522                        ctrl_ext = er32(CTRL_EXT);
5523                        ctrl_ext |= E1000_CTRL_EXT_SDP3_DATA;
5524                        ew32(CTRL_EXT, ctrl_ext);
5525                }
5526
5527                if (adapter->flags & FLAG_IS_ICH)
5528                        e1000_suspend_workarounds_ich8lan(&adapter->hw);
5529
5530                /* Allow time for pending master requests to run */
5531                e1000e_disable_pcie_master(&adapter->hw);
5532
5533                if (adapter->flags2 & FLAG2_HAS_PHY_WAKEUP) {
5534                        /* enable wakeup by the PHY */
5535                        retval = e1000_init_phy_wakeup(adapter, wufc);
5536                        if (retval)
5537                                return retval;
5538                } else {
5539                        /* enable wakeup by the MAC */
5540                        ew32(WUFC, wufc);
5541                        ew32(WUC, E1000_WUC_PME_EN);
5542                }
5543        } else {
5544                ew32(WUC, 0);
5545                ew32(WUFC, 0);
5546        }
5547
5548        *enable_wake = !!wufc;
5549
5550        /* make sure adapter isn't asleep if manageability is enabled */
5551        if ((adapter->flags & FLAG_MNG_PT_ENABLED) ||
5552            (hw->mac.ops.check_mng_mode(hw)))
5553                *enable_wake = true;
5554
5555        if (adapter->hw.phy.type == e1000_phy_igp_3)
5556                e1000e_igp3_phy_powerdown_workaround_ich8lan(&adapter->hw);
5557
5558        /*
5559         * Release control of h/w to f/w.  If f/w is AMT enabled, this
5560         * would have already happened in close and is redundant.
5561         */
5562        e1000e_release_hw_control(adapter);
5563
5564        pci_disable_device(pdev);
5565
5566        return 0;
5567}
5568
5569static void e1000_power_off(struct pci_dev *pdev, bool sleep, bool wake)
5570{
5571        if (sleep && wake) {
5572                pci_prepare_to_sleep(pdev);
5573                return;
5574        }
5575
5576        pci_wake_from_d3(pdev, wake);
5577        pci_set_power_state(pdev, PCI_D3hot);
5578}
5579
5580static void e1000_complete_shutdown(struct pci_dev *pdev, bool sleep,
5581                                    bool wake)
5582{
5583        struct net_device *netdev = pci_get_drvdata(pdev);
5584        struct e1000_adapter *adapter = netdev_priv(netdev);
5585
5586        /*
5587         * The pci-e switch on some quad port adapters will report a
5588         * correctable error when the MAC transitions from D0 to D3.  To
5589         * prevent this we need to mask off the correctable errors on the
5590         * downstream port of the pci-e switch.
5591         */
5592        if (adapter->flags & FLAG_IS_QUAD_PORT) {
5593                struct pci_dev *us_dev = pdev->bus->self;
5594                u16 devctl;
5595
5596                pcie_capability_read_word(us_dev, PCI_EXP_DEVCTL, &devctl);
5597                pcie_capability_write_word(us_dev, PCI_EXP_DEVCTL,
5598                                           (devctl & ~PCI_EXP_DEVCTL_CERE));
5599
5600                e1000_power_off(pdev, sleep, wake);
5601
5602                pcie_capability_write_word(us_dev, PCI_EXP_DEVCTL, devctl);
5603        } else {
5604                e1000_power_off(pdev, sleep, wake);
5605        }
5606}
5607
5608#ifdef CONFIG_PCIEASPM
5609static void __e1000e_disable_aspm(struct pci_dev *pdev, u16 state)
5610{
5611        pci_disable_link_state_locked(pdev, state);
5612}
5613#else
5614static void __e1000e_disable_aspm(struct pci_dev *pdev, u16 state)
5615{
5616        /*
5617         * Both device and parent should have the same ASPM setting.
5618         * Disable ASPM in downstream component first and then upstream.
5619         */
5620        pcie_capability_clear_word(pdev, PCI_EXP_LNKCTL, state);
5621
5622        if (pdev->bus->self)
5623                pcie_capability_clear_word(pdev->bus->self, PCI_EXP_LNKCTL,
5624                                           state);
5625}
5626#endif
5627static void e1000e_disable_aspm(struct pci_dev *pdev, u16 state)
5628{
5629        dev_info(&pdev->dev, "Disabling ASPM %s %s\n",
5630                 (state & PCIE_LINK_STATE_L0S) ? "L0s" : "",
5631                 (state & PCIE_LINK_STATE_L1) ? "L1" : "");
5632
5633        __e1000e_disable_aspm(pdev, state);
5634}
5635
5636#ifdef CONFIG_PM
5637static bool e1000e_pm_ready(struct e1000_adapter *adapter)
5638{
5639        return !!adapter->tx_ring->buffer_info;
5640}
5641
5642static int __e1000_resume(struct pci_dev *pdev)
5643{
5644        struct net_device *netdev = pci_get_drvdata(pdev);
5645        struct e1000_adapter *adapter = netdev_priv(netdev);
5646        struct e1000_hw *hw = &adapter->hw;
5647        u16 aspm_disable_flag = 0;
5648        u32 err;
5649
5650        if (adapter->flags2 & FLAG2_DISABLE_ASPM_L0S)
5651                aspm_disable_flag = PCIE_LINK_STATE_L0S;
5652        if (adapter->flags2 & FLAG2_DISABLE_ASPM_L1)
5653                aspm_disable_flag |= PCIE_LINK_STATE_L1;
5654        if (aspm_disable_flag)
5655                e1000e_disable_aspm(pdev, aspm_disable_flag);
5656
5657        pci_set_power_state(pdev, PCI_D0);
5658        pci_restore_state(pdev);
5659        pci_save_state(pdev);
5660
5661        e1000e_set_interrupt_capability(adapter);
5662        if (netif_running(netdev)) {
5663                err = e1000_request_irq(adapter);
5664                if (err)
5665                        return err;
5666        }
5667
5668        if (hw->mac.type >= e1000_pch2lan)
5669                e1000_resume_workarounds_pchlan(&adapter->hw);
5670
5671        e1000e_power_up_phy(adapter);
5672
5673        /* report the system wakeup cause from S3/S4 */
5674        if (adapter->flags2 & FLAG2_HAS_PHY_WAKEUP) {
5675                u16 phy_data;
5676
5677                e1e_rphy(&adapter->hw, BM_WUS, &phy_data);
5678                if (phy_data) {
5679                        e_info("PHY Wakeup cause - %s\n",
5680                                phy_data & E1000_WUS_EX ? "Unicast Packet" :
5681                                phy_data & E1000_WUS_MC ? "Multicast Packet" :
5682                                phy_data & E1000_WUS_BC ? "Broadcast Packet" :
5683                                phy_data & E1000_WUS_MAG ? "Magic Packet" :
5684                                phy_data & E1000_WUS_LNKC ?
5685                                "Link Status Change" : "other");
5686                }
5687                e1e_wphy(&adapter->hw, BM_WUS, ~0);
5688        } else {
5689                u32 wus = er32(WUS);
5690                if (wus) {
5691                        e_info("MAC Wakeup cause - %s\n",
5692                                wus & E1000_WUS_EX ? "Unicast Packet" :
5693                                wus & E1000_WUS_MC ? "Multicast Packet" :
5694                                wus & E1000_WUS_BC ? "Broadcast Packet" :
5695                                wus & E1000_WUS_MAG ? "Magic Packet" :
5696                                wus & E1000_WUS_LNKC ? "Link Status Change" :
5697                                "other");
5698                }
5699                ew32(WUS, ~0);
5700        }
5701
5702        e1000e_reset(adapter);
5703
5704        e1000_init_manageability_pt(adapter);
5705
5706        if (netif_running(netdev))
5707                e1000e_up(adapter);
5708
5709        netif_device_attach(netdev);
5710
5711        /*
5712         * If the controller has AMT, do not set DRV_LOAD until the interface
5713         * is up.  For all other cases, let the f/w know that the h/w is now
5714         * under the control of the driver.
5715         */
5716        if (!(adapter->flags & FLAG_HAS_AMT))
5717                e1000e_get_hw_control(adapter);
5718
5719        return 0;
5720}
5721
5722#ifdef CONFIG_PM_SLEEP
5723static int e1000_suspend(struct device *dev)
5724{
5725        struct pci_dev *pdev = to_pci_dev(dev);
5726        int retval;
5727        bool wake;
5728
5729        retval = __e1000_shutdown(pdev, &wake, false);
5730        if (!retval)
5731                e1000_complete_shutdown(pdev, true, wake);
5732
5733        return retval;
5734}
5735
5736static int e1000_resume(struct device *dev)
5737{
5738        struct pci_dev *pdev = to_pci_dev(dev);
5739        struct net_device *netdev = pci_get_drvdata(pdev);
5740        struct e1000_adapter *adapter = netdev_priv(netdev);
5741
5742        if (e1000e_pm_ready(adapter))
5743                adapter->idle_check = true;
5744
5745        return __e1000_resume(pdev);
5746}
5747#endif /* CONFIG_PM_SLEEP */
5748
5749#ifdef CONFIG_PM_RUNTIME
5750static int e1000_runtime_suspend(struct device *dev)
5751{
5752        struct pci_dev *pdev = to_pci_dev(dev);
5753        struct net_device *netdev = pci_get_drvdata(pdev);
5754        struct e1000_adapter *adapter = netdev_priv(netdev);
5755
5756        if (e1000e_pm_ready(adapter)) {
5757                bool wake;
5758
5759                __e1000_shutdown(pdev, &wake, true);
5760        }
5761
5762        return 0;
5763}
5764
5765static int e1000_idle(struct device *dev)
5766{
5767        struct pci_dev *pdev = to_pci_dev(dev);
5768        struct net_device *netdev = pci_get_drvdata(pdev);
5769        struct e1000_adapter *adapter = netdev_priv(netdev);
5770
5771        if (!e1000e_pm_ready(adapter))
5772                return 0;
5773
5774        if (adapter->idle_check) {
5775                adapter->idle_check = false;
5776                if (!e1000e_has_link(adapter))
5777                        pm_schedule_suspend(dev, MSEC_PER_SEC);
5778        }
5779
5780        return -EBUSY;
5781}
5782
5783static int e1000_runtime_resume(struct device *dev)
5784{
5785        struct pci_dev *pdev = to_pci_dev(dev);
5786        struct net_device *netdev = pci_get_drvdata(pdev);
5787        struct e1000_adapter *adapter = netdev_priv(netdev);
5788
5789        if (!e1000e_pm_ready(adapter))
5790                return 0;
5791
5792        adapter->idle_check = !dev->power.runtime_auto;
5793        return __e1000_resume(pdev);
5794}
5795#endif /* CONFIG_PM_RUNTIME */
5796#endif /* CONFIG_PM */
5797
5798static void e1000_shutdown(struct pci_dev *pdev)
5799{
5800        bool wake = false;
5801
5802        __e1000_shutdown(pdev, &wake, false);
5803
5804        if (system_state == SYSTEM_POWER_OFF)
5805                e1000_complete_shutdown(pdev, false, wake);
5806}
5807
5808#ifdef CONFIG_NET_POLL_CONTROLLER
5809
5810static irqreturn_t e1000_intr_msix(int irq, void *data)
5811{
5812        struct net_device *netdev = data;
5813        struct e1000_adapter *adapter = netdev_priv(netdev);
5814
5815        if (adapter->msix_entries) {
5816                int vector, msix_irq;
5817
5818                vector = 0;
5819                msix_irq = adapter->msix_entries[vector].vector;
5820                disable_irq(msix_irq);
5821                e1000_intr_msix_rx(msix_irq, netdev);
5822                enable_irq(msix_irq);
5823
5824                vector++;
5825                msix_irq = adapter->msix_entries[vector].vector;
5826                disable_irq(msix_irq);
5827                e1000_intr_msix_tx(msix_irq, netdev);
5828                enable_irq(msix_irq);
5829
5830                vector++;
5831                msix_irq = adapter->msix_entries[vector].vector;
5832                disable_irq(msix_irq);
5833                e1000_msix_other(msix_irq, netdev);
5834                enable_irq(msix_irq);
5835        }
5836
5837        return IRQ_HANDLED;
5838}
5839
5840/*
5841 * Polling 'interrupt' - used by things like netconsole to send skbs
5842 * without having to re-enable interrupts. It's not called while
5843 * the interrupt routine is executing.
5844 */
5845static void e1000_netpoll(struct net_device *netdev)
5846{
5847        struct e1000_adapter *adapter = netdev_priv(netdev);
5848
5849        switch (adapter->int_mode) {
5850        case E1000E_INT_MODE_MSIX:
5851                e1000_intr_msix(adapter->pdev->irq, netdev);
5852                break;
5853        case E1000E_INT_MODE_MSI:
5854                disable_irq(adapter->pdev->irq);
5855                e1000_intr_msi(adapter->pdev->irq, netdev);
5856                enable_irq(adapter->pdev->irq);
5857                break;
5858        default: /* E1000E_INT_MODE_LEGACY */
5859                disable_irq(adapter->pdev->irq);
5860                e1000_intr(adapter->pdev->irq, netdev);
5861                enable_irq(adapter->pdev->irq);
5862                break;
5863        }
5864}
5865#endif
5866
5867/**
5868 * e1000_io_error_detected - called when PCI error is detected
5869 * @pdev: Pointer to PCI device
5870 * @state: The current pci connection state
5871 *
5872 * This function is called after a PCI bus error affecting
5873 * this device has been detected.
5874 */
5875static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev,
5876                                                pci_channel_state_t state)
5877{
5878        struct net_device *netdev = pci_get_drvdata(pdev);
5879        struct e1000_adapter *adapter = netdev_priv(netdev);
5880
5881        netif_device_detach(netdev);
5882
5883        if (state == pci_channel_io_perm_failure)
5884                return PCI_ERS_RESULT_DISCONNECT;
5885
5886        if (netif_running(netdev))
5887                e1000e_down(adapter);
5888        pci_disable_device(pdev);
5889
5890        /* Request a slot slot reset. */
5891        return PCI_ERS_RESULT_NEED_RESET;
5892}
5893
5894/**
5895 * e1000_io_slot_reset - called after the pci bus has been reset.
5896 * @pdev: Pointer to PCI device
5897 *
5898 * Restart the card from scratch, as if from a cold-boot. Implementation
5899 * resembles the first-half of the e1000_resume routine.
5900 */
5901static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev)
5902{
5903        struct net_device *netdev = pci_get_drvdata(pdev);
5904        struct e1000_adapter *adapter = netdev_priv(netdev);
5905        struct e1000_hw *hw = &adapter->hw;
5906        u16 aspm_disable_flag = 0;
5907        int err;
5908        pci_ers_result_t result;
5909
5910        if (adapter->flags2 & FLAG2_DISABLE_ASPM_L0S)
5911                aspm_disable_flag = PCIE_LINK_STATE_L0S;
5912        if (adapter->flags2 & FLAG2_DISABLE_ASPM_L1)
5913                aspm_disable_flag |= PCIE_LINK_STATE_L1;
5914        if (aspm_disable_flag)
5915                e1000e_disable_aspm(pdev, aspm_disable_flag);
5916
5917        err = pci_enable_device_mem(pdev);
5918        if (err) {
5919                dev_err(&pdev->dev,
5920                        "Cannot re-enable PCI device after reset.\n");
5921                result = PCI_ERS_RESULT_DISCONNECT;
5922        } else {
5923                pci_set_master(pdev);
5924                pdev->state_saved = true;
5925                pci_restore_state(pdev);
5926
5927                pci_enable_wake(pdev, PCI_D3hot, 0);
5928                pci_enable_wake(pdev, PCI_D3cold, 0);
5929
5930                e1000e_reset(adapter);
5931                ew32(WUS, ~0);
5932                result = PCI_ERS_RESULT_RECOVERED;
5933        }
5934
5935        pci_cleanup_aer_uncorrect_error_status(pdev);
5936
5937        return result;
5938}
5939
5940/**
5941 * e1000_io_resume - called when traffic can start flowing again.
5942 * @pdev: Pointer to PCI device
5943 *
5944 * This callback is called when the error recovery driver tells us that
5945 * its OK to resume normal operation. Implementation resembles the
5946 * second-half of the e1000_resume routine.
5947 */
5948static void e1000_io_resume(struct pci_dev *pdev)
5949{
5950        struct net_device *netdev = pci_get_drvdata(pdev);
5951        struct e1000_adapter *adapter = netdev_priv(netdev);
5952
5953        e1000_init_manageability_pt(adapter);
5954
5955        if (netif_running(netdev)) {
5956                if (e1000e_up(adapter)) {
5957                        dev_err(&pdev->dev,
5958                                "can't bring device back up after reset\n");
5959                        return;
5960                }
5961        }
5962
5963        netif_device_attach(netdev);
5964
5965        /*
5966         * If the controller has AMT, do not set DRV_LOAD until the interface
5967         * is up.  For all other cases, let the f/w know that the h/w is now
5968         * under the control of the driver.
5969         */
5970        if (!(adapter->flags & FLAG_HAS_AMT))
5971                e1000e_get_hw_control(adapter);
5972
5973}
5974
5975static void e1000_print_device_info(struct e1000_adapter *adapter)
5976{
5977        struct e1000_hw *hw = &adapter->hw;
5978        struct net_device *netdev = adapter->netdev;
5979        u32 ret_val;
5980        u8 pba_str[E1000_PBANUM_LENGTH];
5981
5982        /* print bus type/speed/width info */
5983        e_info("(PCI Express:2.5GT/s:%s) %pM\n",
5984               /* bus width */
5985               ((hw->bus.width == e1000_bus_width_pcie_x4) ? "Width x4" :
5986                "Width x1"),
5987               /* MAC address */
5988               netdev->dev_addr);
5989        e_info("Intel(R) PRO/%s Network Connection\n",
5990               (hw->phy.type == e1000_phy_ife) ? "10/100" : "1000");
5991        ret_val = e1000_read_pba_string_generic(hw, pba_str,
5992                                                E1000_PBANUM_LENGTH);
5993        if (ret_val)
5994                strlcpy((char *)pba_str, "Unknown", sizeof(pba_str));
5995        e_info("MAC: %d, PHY: %d, PBA No: %s\n",
5996               hw->mac.type, hw->phy.type, pba_str);
5997}
5998
5999static void e1000_eeprom_checks(struct e1000_adapter *adapter)
6000{
6001        struct e1000_hw *hw = &adapter->hw;
6002        int ret_val;
6003        u16 buf = 0;
6004
6005        if (hw->mac.type != e1000_82573)
6006                return;
6007
6008        ret_val = e1000_read_nvm(hw, NVM_INIT_CONTROL2_REG, 1, &buf);
6009        le16_to_cpus(&buf);
6010        if (!ret_val && (!(buf & (1 << 0)))) {
6011                /* Deep Smart Power Down (DSPD) */
6012                dev_warn(&adapter->pdev->dev,
6013                         "Warning: detected DSPD enabled in EEPROM\n");
6014        }
6015}
6016
6017static int e1000_set_features(struct net_device *netdev,
6018                              netdev_features_t features)
6019{
6020        struct e1000_adapter *adapter = netdev_priv(netdev);
6021        netdev_features_t changed = features ^ netdev->features;
6022
6023        if (changed & (NETIF_F_TSO | NETIF_F_TSO6))
6024                adapter->flags |= FLAG_TSO_FORCE;
6025
6026        if (!(changed & (NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_TX |
6027                         NETIF_F_RXCSUM | NETIF_F_RXHASH | NETIF_F_RXFCS |
6028                         NETIF_F_RXALL)))
6029                return 0;
6030
6031        if (changed & NETIF_F_RXFCS) {
6032                if (features & NETIF_F_RXFCS) {
6033                        adapter->flags2 &= ~FLAG2_CRC_STRIPPING;
6034                } else {
6035                        /* We need to take it back to defaults, which might mean
6036                         * stripping is still disabled at the adapter level.
6037                         */
6038                        if (adapter->flags2 & FLAG2_DFLT_CRC_STRIPPING)
6039                                adapter->flags2 |= FLAG2_CRC_STRIPPING;
6040                        else
6041                                adapter->flags2 &= ~FLAG2_CRC_STRIPPING;
6042                }
6043        }
6044
6045        netdev->features = features;
6046
6047        if (netif_running(netdev))
6048                e1000e_reinit_locked(adapter);
6049        else
6050                e1000e_reset(adapter);
6051
6052        return 0;
6053}
6054
6055static const struct net_device_ops e1000e_netdev_ops = {
6056        .ndo_open               = e1000_open,
6057        .ndo_stop               = e1000_close,
6058        .ndo_start_xmit         = e1000_xmit_frame,
6059        .ndo_get_stats64        = e1000e_get_stats64,
6060        .ndo_set_rx_mode        = e1000e_set_rx_mode,
6061        .ndo_set_mac_address    = e1000_set_mac,
6062        .ndo_change_mtu         = e1000_change_mtu,
6063        .ndo_do_ioctl           = e1000_ioctl,
6064        .ndo_tx_timeout         = e1000_tx_timeout,
6065        .ndo_validate_addr      = eth_validate_addr,
6066
6067        .ndo_vlan_rx_add_vid    = e1000_vlan_rx_add_vid,
6068        .ndo_vlan_rx_kill_vid   = e1000_vlan_rx_kill_vid,
6069#ifdef CONFIG_NET_POLL_CONTROLLER
6070        .ndo_poll_controller    = e1000_netpoll,
6071#endif
6072        .ndo_set_features = e1000_set_features,
6073};
6074
6075/**
6076 * e1000_probe - Device Initialization Routine
6077 * @pdev: PCI device information struct
6078 * @ent: entry in e1000_pci_tbl
6079 *
6080 * Returns 0 on success, negative on failure
6081 *
6082 * e1000_probe initializes an adapter identified by a pci_dev structure.
6083 * The OS initialization, configuring of the adapter private structure,
6084 * and a hardware reset occur.
6085 **/
6086static int __devinit e1000_probe(struct pci_dev *pdev,
6087                                 const struct pci_device_id *ent)
6088{
6089        struct net_device *netdev;
6090        struct e1000_adapter *adapter;
6091        struct e1000_hw *hw;
6092        const struct e1000_info *ei = e1000_info_tbl[ent->driver_data];
6093        resource_size_t mmio_start, mmio_len;
6094        resource_size_t flash_start, flash_len;
6095        static int cards_found;
6096        u16 aspm_disable_flag = 0;
6097        int i, err, pci_using_dac;
6098        u16 eeprom_data = 0;
6099        u16 eeprom_apme_mask = E1000_EEPROM_APME;
6100
6101        if (ei->flags2 & FLAG2_DISABLE_ASPM_L0S)
6102                aspm_disable_flag = PCIE_LINK_STATE_L0S;
6103        if (ei->flags2 & FLAG2_DISABLE_ASPM_L1)
6104                aspm_disable_flag |= PCIE_LINK_STATE_L1;
6105        if (aspm_disable_flag)
6106                e1000e_disable_aspm(pdev, aspm_disable_flag);
6107
6108        err = pci_enable_device_mem(pdev);
6109        if (err)
6110                return err;
6111
6112        pci_using_dac = 0;
6113        err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
6114        if (!err) {
6115                err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
6116                if (!err)
6117                        pci_using_dac = 1;
6118        } else {
6119                err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
6120                if (err) {
6121                        err = dma_set_coherent_mask(&pdev->dev,
6122                                                    DMA_BIT_MASK(32));
6123                        if (err) {
6124                                dev_err(&pdev->dev, "No usable DMA configuration, aborting\n");
6125                                goto err_dma;
6126                        }
6127                }
6128        }
6129
6130        err = pci_request_selected_regions_exclusive(pdev,
6131                                          pci_select_bars(pdev, IORESOURCE_MEM),
6132                                          e1000e_driver_name);
6133        if (err)
6134                goto err_pci_reg;
6135
6136        /* AER (Advanced Error Reporting) hooks */
6137        pci_enable_pcie_error_reporting(pdev);
6138
6139        pci_set_master(pdev);
6140        /* PCI config space info */
6141        err = pci_save_state(pdev);
6142        if (err)
6143                goto err_alloc_etherdev;
6144
6145        err = -ENOMEM;
6146        netdev = alloc_etherdev(sizeof(struct e1000_adapter));
6147        if (!netdev)
6148                goto err_alloc_etherdev;
6149
6150        SET_NETDEV_DEV(netdev, &pdev->dev);
6151
6152        netdev->irq = pdev->irq;
6153
6154        pci_set_drvdata(pdev, netdev);
6155        adapter = netdev_priv(netdev);
6156        hw = &adapter->hw;
6157        adapter->netdev = netdev;
6158        adapter->pdev = pdev;
6159        adapter->ei = ei;
6160        adapter->pba = ei->pba;
6161        adapter->flags = ei->flags;
6162        adapter->flags2 = ei->flags2;
6163        adapter->hw.adapter = adapter;
6164        adapter->hw.mac.type = ei->mac;
6165        adapter->max_hw_frame_size = ei->max_hw_frame_size;
6166        adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
6167
6168        mmio_start = pci_resource_start(pdev, 0);
6169        mmio_len = pci_resource_len(pdev, 0);
6170
6171        err = -EIO;
6172        adapter->hw.hw_addr = ioremap(mmio_start, mmio_len);
6173        if (!adapter->hw.hw_addr)
6174                goto err_ioremap;
6175
6176        if ((adapter->flags & FLAG_HAS_FLASH) &&
6177            (pci_resource_flags(pdev, 1) & IORESOURCE_MEM)) {
6178                flash_start = pci_resource_start(pdev, 1);
6179                flash_len = pci_resource_len(pdev, 1);
6180                adapter->hw.flash_address = ioremap(flash_start, flash_len);
6181                if (!adapter->hw.flash_address)
6182                        goto err_flashmap;
6183        }
6184
6185        /* construct the net_device struct */
6186        netdev->netdev_ops              = &e1000e_netdev_ops;
6187        e1000e_set_ethtool_ops(netdev);
6188        netdev->watchdog_timeo          = 5 * HZ;
6189        netif_napi_add(netdev, &adapter->napi, e1000e_poll, 64);
6190        strlcpy(netdev->name, pci_name(pdev), sizeof(netdev->name));
6191
6192        netdev->mem_start = mmio_start;
6193        netdev->mem_end = mmio_start + mmio_len;
6194
6195        adapter->bd_number = cards_found++;
6196
6197        e1000e_check_options(adapter);
6198
6199        /* setup adapter struct */
6200        err = e1000_sw_init(adapter);
6201        if (err)
6202                goto err_sw_init;
6203
6204        memcpy(&hw->mac.ops, ei->mac_ops, sizeof(hw->mac.ops));
6205        memcpy(&hw->nvm.ops, ei->nvm_ops, sizeof(hw->nvm.ops));
6206        memcpy(&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops));
6207
6208        err = ei->get_variants(adapter);
6209        if (err)
6210                goto err_hw_init;
6211
6212        if ((adapter->flags & FLAG_IS_ICH) &&
6213            (adapter->flags & FLAG_READ_ONLY_NVM))
6214                e1000e_write_protect_nvm_ich8lan(&adapter->hw);
6215
6216        hw->mac.ops.get_bus_info(&adapter->hw);
6217
6218        adapter->hw.phy.autoneg_wait_to_complete = 0;
6219
6220        /* Copper options */
6221        if (adapter->hw.phy.media_type == e1000_media_type_copper) {
6222                adapter->hw.phy.mdix = AUTO_ALL_MODES;
6223                adapter->hw.phy.disable_polarity_correction = 0;
6224                adapter->hw.phy.ms_type = e1000_ms_hw_default;
6225        }
6226
6227        if (hw->phy.ops.check_reset_block && hw->phy.ops.check_reset_block(hw))
6228                dev_info(&pdev->dev,
6229                         "PHY reset is blocked due to SOL/IDER session.\n");
6230
6231        /* Set initial default active device features */
6232        netdev->features = (NETIF_F_SG |
6233                            NETIF_F_HW_VLAN_RX |
6234                            NETIF_F_HW_VLAN_TX |
6235                            NETIF_F_TSO |
6236                            NETIF_F_TSO6 |
6237                            NETIF_F_RXHASH |
6238                            NETIF_F_RXCSUM |
6239                            NETIF_F_HW_CSUM);
6240
6241        /* Set user-changeable features (subset of all device features) */
6242        netdev->hw_features = netdev->features;
6243        netdev->hw_features |= NETIF_F_RXFCS;
6244        netdev->priv_flags |= IFF_SUPP_NOFCS;
6245        netdev->hw_features |= NETIF_F_RXALL;
6246
6247        if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER)
6248                netdev->features |= NETIF_F_HW_VLAN_FILTER;
6249
6250        netdev->vlan_features |= (NETIF_F_SG |
6251                                  NETIF_F_TSO |
6252                                  NETIF_F_TSO6 |
6253                                  NETIF_F_HW_CSUM);
6254
6255        netdev->priv_flags |= IFF_UNICAST_FLT;
6256
6257        if (pci_using_dac) {
6258                netdev->features |= NETIF_F_HIGHDMA;
6259                netdev->vlan_features |= NETIF_F_HIGHDMA;
6260        }
6261
6262        if (e1000e_enable_mng_pass_thru(&adapter->hw))
6263                adapter->flags |= FLAG_MNG_PT_ENABLED;
6264
6265        /*
6266         * before reading the NVM, reset the controller to
6267         * put the device in a known good starting state
6268         */
6269        adapter->hw.mac.ops.reset_hw(&adapter->hw);
6270
6271        /*
6272         * systems with ASPM and others may see the checksum fail on the first
6273         * attempt. Let's give it a few tries
6274         */
6275        for (i = 0;; i++) {
6276                if (e1000_validate_nvm_checksum(&adapter->hw) >= 0)
6277                        break;
6278                if (i == 2) {
6279                        dev_err(&pdev->dev, "The NVM Checksum Is Not Valid\n");
6280                        err = -EIO;
6281                        goto err_eeprom;
6282                }
6283        }
6284
6285        e1000_eeprom_checks(adapter);
6286
6287        /* copy the MAC address */
6288        if (e1000e_read_mac_addr(&adapter->hw))
6289                dev_err(&pdev->dev,
6290                        "NVM Read Error while reading MAC address\n");
6291
6292        memcpy(netdev->dev_addr, adapter->hw.mac.addr, netdev->addr_len);
6293        memcpy(netdev->perm_addr, adapter->hw.mac.addr, netdev->addr_len);
6294
6295        if (!is_valid_ether_addr(netdev->perm_addr)) {
6296                dev_err(&pdev->dev, "Invalid MAC Address: %pM\n",
6297                        netdev->perm_addr);
6298                err = -EIO;
6299                goto err_eeprom;
6300        }
6301
6302        init_timer(&adapter->watchdog_timer);
6303        adapter->watchdog_timer.function = e1000_watchdog;
6304        adapter->watchdog_timer.data = (unsigned long) adapter;
6305
6306        init_timer(&adapter->phy_info_timer);
6307        adapter->phy_info_timer.function = e1000_update_phy_info;
6308        adapter->phy_info_timer.data = (unsigned long) adapter;
6309
6310        INIT_WORK(&adapter->reset_task, e1000_reset_task);
6311        INIT_WORK(&adapter->watchdog_task, e1000_watchdog_task);
6312        INIT_WORK(&adapter->downshift_task, e1000e_downshift_workaround);
6313        INIT_WORK(&adapter->update_phy_task, e1000e_update_phy_task);
6314        INIT_WORK(&adapter->print_hang_task, e1000_print_hw_hang);
6315
6316        /* Initialize link parameters. User can change them with ethtool */
6317        adapter->hw.mac.autoneg = 1;
6318        adapter->fc_autoneg = true;
6319        adapter->hw.fc.requested_mode = e1000_fc_default;
6320        adapter->hw.fc.current_mode = e1000_fc_default;
6321        adapter->hw.phy.autoneg_advertised = 0x2f;
6322
6323        /* ring size defaults */
6324        adapter->rx_ring->count = E1000_DEFAULT_RXD;
6325        adapter->tx_ring->count = E1000_DEFAULT_TXD;
6326
6327        /*
6328         * Initial Wake on LAN setting - If APM wake is enabled in
6329         * the EEPROM, enable the ACPI Magic Packet filter
6330         */
6331        if (adapter->flags & FLAG_APME_IN_WUC) {
6332                /* APME bit in EEPROM is mapped to WUC.APME */
6333                eeprom_data = er32(WUC);
6334                eeprom_apme_mask = E1000_WUC_APME;
6335                if ((hw->mac.type > e1000_ich10lan) &&
6336                    (eeprom_data & E1000_WUC_PHY_WAKE))
6337                        adapter->flags2 |= FLAG2_HAS_PHY_WAKEUP;
6338        } else if (adapter->flags & FLAG_APME_IN_CTRL3) {
6339                if (adapter->flags & FLAG_APME_CHECK_PORT_B &&
6340                    (adapter->hw.bus.func == 1))
6341                        e1000_read_nvm(&adapter->hw, NVM_INIT_CONTROL3_PORT_B,
6342                                       1, &eeprom_data);
6343                else
6344                        e1000_read_nvm(&adapter->hw, NVM_INIT_CONTROL3_PORT_A,
6345                                       1, &eeprom_data);
6346        }
6347
6348        /* fetch WoL from EEPROM */
6349        if (eeprom_data & eeprom_apme_mask)
6350                adapter->eeprom_wol |= E1000_WUFC_MAG;
6351
6352        /*
6353         * now that we have the eeprom settings, apply the special cases
6354         * where the eeprom may be wrong or the board simply won't support
6355         * wake on lan on a particular port
6356         */
6357        if (!(adapter->flags & FLAG_HAS_WOL))
6358                adapter->eeprom_wol = 0;
6359
6360        /* initialize the wol settings based on the eeprom settings */
6361        adapter->wol = adapter->eeprom_wol;
6362        device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
6363
6364        /* save off EEPROM version number */
6365        e1000_read_nvm(&adapter->hw, 5, 1, &adapter->eeprom_vers);
6366
6367        /* reset the hardware with the new settings */
6368        e1000e_reset(adapter);
6369
6370        /*
6371         * If the controller has AMT, do not set DRV_LOAD until the interface
6372         * is up.  For all other cases, let the f/w know that the h/w is now
6373         * under the control of the driver.
6374         */
6375        if (!(adapter->flags & FLAG_HAS_AMT))
6376                e1000e_get_hw_control(adapter);
6377
6378        strlcpy(netdev->name, "eth%d", sizeof(netdev->name));
6379        err = register_netdev(netdev);
6380        if (err)
6381                goto err_register;
6382
6383        /* carrier off reporting is important to ethtool even BEFORE open */
6384        netif_carrier_off(netdev);
6385
6386        e1000_print_device_info(adapter);
6387
6388        if (pci_dev_run_wake(pdev))
6389                pm_runtime_put_noidle(&pdev->dev);
6390
6391        return 0;
6392
6393err_register:
6394        if (!(adapter->flags & FLAG_HAS_AMT))
6395                e1000e_release_hw_control(adapter);
6396err_eeprom:
6397        if (hw->phy.ops.check_reset_block && !hw->phy.ops.check_reset_block(hw))
6398                e1000_phy_hw_reset(&adapter->hw);
6399err_hw_init:
6400        kfree(adapter->tx_ring);
6401        kfree(adapter->rx_ring);
6402err_sw_init:
6403        if (adapter->hw.flash_address)
6404                iounmap(adapter->hw.flash_address);
6405        e1000e_reset_interrupt_capability(adapter);
6406err_flashmap:
6407        iounmap(adapter->hw.hw_addr);
6408err_ioremap:
6409        free_netdev(netdev);
6410err_alloc_etherdev:
6411        pci_release_selected_regions(pdev,
6412                                     pci_select_bars(pdev, IORESOURCE_MEM));
6413err_pci_reg:
6414err_dma:
6415        pci_disable_device(pdev);
6416        return err;
6417}
6418
6419/**
6420 * e1000_remove - Device Removal Routine
6421 * @pdev: PCI device information struct
6422 *
6423 * e1000_remove is called by the PCI subsystem to alert the driver
6424 * that it should release a PCI device.  The could be caused by a
6425 * Hot-Plug event, or because the driver is going to be removed from
6426 * memory.
6427 **/
6428static void __devexit e1000_remove(struct pci_dev *pdev)
6429{
6430        struct net_device *netdev = pci_get_drvdata(pdev);
6431        struct e1000_adapter *adapter = netdev_priv(netdev);
6432        bool down = test_bit(__E1000_DOWN, &adapter->state);
6433
6434        /*
6435         * The timers may be rescheduled, so explicitly disable them
6436         * from being rescheduled.
6437         */
6438        if (!down)
6439                set_bit(__E1000_DOWN, &adapter->state);
6440        del_timer_sync(&adapter->watchdog_timer);
6441        del_timer_sync(&adapter->phy_info_timer);
6442
6443        cancel_work_sync(&adapter->reset_task);
6444        cancel_work_sync(&adapter->watchdog_task);
6445        cancel_work_sync(&adapter->downshift_task);
6446        cancel_work_sync(&adapter->update_phy_task);
6447        cancel_work_sync(&adapter->print_hang_task);
6448
6449        if (!(netdev->flags & IFF_UP))
6450                e1000_power_down_phy(adapter);
6451
6452        /* Don't lie to e1000_close() down the road. */
6453        if (!down)
6454                clear_bit(__E1000_DOWN, &adapter->state);
6455        unregister_netdev(netdev);
6456
6457        if (pci_dev_run_wake(pdev))
6458                pm_runtime_get_noresume(&pdev->dev);
6459
6460        /*
6461         * Release control of h/w to f/w.  If f/w is AMT enabled, this
6462         * would have already happened in close and is redundant.
6463         */
6464        e1000e_release_hw_control(adapter);
6465
6466        e1000e_reset_interrupt_capability(adapter);
6467        kfree(adapter->tx_ring);
6468        kfree(adapter->rx_ring);
6469
6470        iounmap(adapter->hw.hw_addr);
6471        if (adapter->hw.flash_address)
6472                iounmap(adapter->hw.flash_address);
6473        pci_release_selected_regions(pdev,
6474                                     pci_select_bars(pdev, IORESOURCE_MEM));
6475
6476        free_netdev(netdev);
6477
6478        /* AER disable */
6479        pci_disable_pcie_error_reporting(pdev);
6480
6481        pci_disable_device(pdev);
6482}
6483
6484/* PCI Error Recovery (ERS) */
6485static const struct pci_error_handlers e1000_err_handler = {
6486        .error_detected = e1000_io_error_detected,
6487        .slot_reset = e1000_io_slot_reset,
6488        .resume = e1000_io_resume,
6489};
6490
6491static DEFINE_PCI_DEVICE_TABLE(e1000_pci_tbl) = {
6492        { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_COPPER), board_82571 },
6493        { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_FIBER), board_82571 },
6494        { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_QUAD_COPPER), board_82571 },
6495        { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_QUAD_COPPER_LP), board_82571 },
6496        { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_QUAD_FIBER), board_82571 },
6497        { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_SERDES), board_82571 },
6498        { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_SERDES_DUAL), board_82571 },
6499        { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_SERDES_QUAD), board_82571 },
6500        { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571PT_QUAD_COPPER), board_82571 },
6501
6502        { PCI_VDEVICE(INTEL, E1000_DEV_ID_82572EI), board_82572 },
6503        { PCI_VDEVICE(INTEL, E1000_DEV_ID_82572EI_COPPER), board_82572 },
6504        { PCI_VDEVICE(INTEL, E1000_DEV_ID_82572EI_FIBER), board_82572 },
6505        { PCI_VDEVICE(INTEL, E1000_DEV_ID_82572EI_SERDES), board_82572 },
6506
6507        { PCI_VDEVICE(INTEL, E1000_DEV_ID_82573E), board_82573 },
6508        { PCI_VDEVICE(INTEL, E1000_DEV_ID_82573E_IAMT), board_82573 },
6509        { PCI_VDEVICE(INTEL, E1000_DEV_ID_82573L), board_82573 },
6510
6511        { PCI_VDEVICE(INTEL, E1000_DEV_ID_82574L), board_82574 },
6512        { PCI_VDEVICE(INTEL, E1000_DEV_ID_82574LA), board_82574 },
6513        { PCI_VDEVICE(INTEL, E1000_DEV_ID_82583V), board_82583 },
6514
6515        { PCI_VDEVICE(INTEL, E1000_DEV_ID_80003ES2LAN_COPPER_DPT),
6516          board_80003es2lan },
6517        { PCI_VDEVICE(INTEL, E1000_DEV_ID_80003ES2LAN_COPPER_SPT),
6518          board_80003es2lan },
6519        { PCI_VDEVICE(INTEL, E1000_DEV_ID_80003ES2LAN_SERDES_DPT),
6520          board_80003es2lan },
6521        { PCI_VDEVICE(INTEL, E1000_DEV_ID_80003ES2LAN_SERDES_SPT),
6522          board_80003es2lan },
6523
6524        { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IFE), board_ich8lan },
6525        { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IFE_G), board_ich8lan },
6526        { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IFE_GT), board_ich8lan },
6527        { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_AMT), board_ich8lan },
6528        { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_C), board_ich8lan },
6529        { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_M), board_ich8lan },
6530        { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_M_AMT), board_ich8lan },
6531        { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_82567V_3), board_ich8lan },
6532
6533        { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IFE), board_ich9lan },
6534        { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IFE_G), board_ich9lan },
6535        { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IFE_GT), board_ich9lan },
6536        { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_AMT), board_ich9lan },
6537        { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_C), board_ich9lan },
6538        { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_BM), board_ich9lan },
6539        { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_M), board_ich9lan },
6540        { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_M_AMT), board_ich9lan },
6541        { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_M_V), board_ich9lan },
6542
6543        { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_R_BM_LM), board_ich9lan },
6544        { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_R_BM_LF), board_ich9lan },
6545        { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_R_BM_V), board_ich9lan },
6546
6547        { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_D_BM_LM), board_ich10lan },
6548        { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_D_BM_LF), board_ich10lan },
6549        { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_D_BM_V), board_ich10lan },
6550
6551        { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_M_HV_LM), board_pchlan },
6552        { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_M_HV_LC), board_pchlan },
6553        { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_D_HV_DM), board_pchlan },
6554        { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_D_HV_DC), board_pchlan },
6555
6556        { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH2_LV_LM), board_pch2lan },
6557        { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH2_LV_V), board_pch2lan },
6558
6559        { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LPT_I217_LM), board_pch_lpt },
6560        { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LPT_I217_V), board_pch_lpt },
6561        { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LPTLP_I218_LM), board_pch_lpt },
6562        { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LPTLP_I218_V), board_pch_lpt },
6563
6564        { 0, 0, 0, 0, 0, 0, 0 } /* terminate list */
6565};
6566MODULE_DEVICE_TABLE(pci, e1000_pci_tbl);
6567
6568#ifdef CONFIG_PM
6569static const struct dev_pm_ops e1000_pm_ops = {
6570        SET_SYSTEM_SLEEP_PM_OPS(e1000_suspend, e1000_resume)
6571        SET_RUNTIME_PM_OPS(e1000_runtime_suspend,
6572                                e1000_runtime_resume, e1000_idle)
6573};
6574#endif
6575
6576/* PCI Device API Driver */
6577static struct pci_driver e1000_driver = {
6578        .name     = e1000e_driver_name,
6579        .id_table = e1000_pci_tbl,
6580        .probe    = e1000_probe,
6581        .remove   = __devexit_p(e1000_remove),
6582#ifdef CONFIG_PM
6583        .driver   = {
6584                .pm = &e1000_pm_ops,
6585        },
6586#endif
6587        .shutdown = e1000_shutdown,
6588        .err_handler = &e1000_err_handler
6589};
6590
6591/**
6592 * e1000_init_module - Driver Registration Routine
6593 *
6594 * e1000_init_module is the first routine called when the driver is
6595 * loaded. All it does is register with the PCI subsystem.
6596 **/
6597static int __init e1000_init_module(void)
6598{
6599        int ret;
6600        pr_info("Intel(R) PRO/1000 Network Driver - %s\n",
6601                e1000e_driver_version);
6602        pr_info("Copyright(c) 1999 - 2012 Intel Corporation.\n");
6603        ret = pci_register_driver(&e1000_driver);
6604
6605        return ret;
6606}
6607module_init(e1000_init_module);
6608
6609/**
6610 * e1000_exit_module - Driver Exit Cleanup Routine
6611 *
6612 * e1000_exit_module is called just before the driver is removed
6613 * from memory.
6614 **/
6615static void __exit e1000_exit_module(void)
6616{
6617        pci_unregister_driver(&e1000_driver);
6618}
6619module_exit(e1000_exit_module);
6620
6621
6622MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
6623MODULE_DESCRIPTION("Intel(R) PRO/1000 Network Driver");
6624MODULE_LICENSE("GPL");
6625MODULE_VERSION(DRV_VERSION);
6626
6627/* netdev.c */
6628