linux/drivers/net/ethernet/realtek/8139cp.c
<<
>>
Prefs
   1/* 8139cp.c: A Linux PCI Ethernet driver for the RealTek 8139C+ chips. */
   2/*
   3        Copyright 2001-2004 Jeff Garzik <jgarzik@pobox.com>
   4
   5        Copyright (C) 2001, 2002 David S. Miller (davem@redhat.com) [tg3.c]
   6        Copyright (C) 2000, 2001 David S. Miller (davem@redhat.com) [sungem.c]
   7        Copyright 2001 Manfred Spraul                               [natsemi.c]
   8        Copyright 1999-2001 by Donald Becker.                       [natsemi.c]
   9        Written 1997-2001 by Donald Becker.                         [8139too.c]
  10        Copyright 1998-2001 by Jes Sorensen, <jes@trained-monkey.org>. [acenic.c]
  11
  12        This software may be used and distributed according to the terms of
  13        the GNU General Public License (GPL), incorporated herein by reference.
  14        Drivers based on or derived from this code fall under the GPL and must
  15        retain the authorship, copyright and license notice.  This file is not
  16        a complete program and may only be used when the entire operating
  17        system is licensed under the GPL.
  18
  19        See the file COPYING in this distribution for more information.
  20
  21        Contributors:
  22
  23                Wake-on-LAN support - Felipe Damasio <felipewd@terra.com.br>
  24                PCI suspend/resume  - Felipe Damasio <felipewd@terra.com.br>
  25                LinkChg interrupt   - Felipe Damasio <felipewd@terra.com.br>
  26
  27        TODO:
  28        * Test Tx checksumming thoroughly
  29
  30        Low priority TODO:
  31        * Complete reset on PciErr
  32        * Consider Rx interrupt mitigation using TimerIntr
  33        * Investigate using skb->priority with h/w VLAN priority
  34        * Investigate using High Priority Tx Queue with skb->priority
  35        * Adjust Rx FIFO threshold and Max Rx DMA burst on Rx FIFO error
  36        * Adjust Tx FIFO threshold and Max Tx DMA burst on Tx FIFO error
  37        * Implement Tx software interrupt mitigation via
  38          Tx descriptor bit
  39        * The real minimum of CP_MIN_MTU is 4 bytes.  However,
  40          for this to be supported, one must(?) turn on packet padding.
  41        * Support external MII transceivers (patch available)
  42
  43        NOTES:
  44        * TX checksumming is considered experimental.  It is off by
  45          default, use ethtool to turn it on.
  46
  47 */
  48
  49#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  50
  51#define DRV_NAME                "8139cp"
  52#define DRV_VERSION             "1.3"
  53#define DRV_RELDATE             "Mar 22, 2004"
  54
  55
  56#include <linux/module.h>
  57#include <linux/moduleparam.h>
  58#include <linux/kernel.h>
  59#include <linux/compiler.h>
  60#include <linux/netdevice.h>
  61#include <linux/etherdevice.h>
  62#include <linux/init.h>
  63#include <linux/interrupt.h>
  64#include <linux/pci.h>
  65#include <linux/dma-mapping.h>
  66#include <linux/delay.h>
  67#include <linux/ethtool.h>
  68#include <linux/gfp.h>
  69#include <linux/mii.h>
  70#include <linux/if_vlan.h>
  71#include <linux/crc32.h>
  72#include <linux/in.h>
  73#include <linux/ip.h>
  74#include <linux/tcp.h>
  75#include <linux/udp.h>
  76#include <linux/cache.h>
  77#include <asm/io.h>
  78#include <asm/irq.h>
  79#include <asm/uaccess.h>
  80
  81/* These identify the driver base version and may not be removed. */
  82static char version[] =
  83DRV_NAME ": 10/100 PCI Ethernet driver v" DRV_VERSION " (" DRV_RELDATE ")\n";
  84
  85MODULE_AUTHOR("Jeff Garzik <jgarzik@pobox.com>");
  86MODULE_DESCRIPTION("RealTek RTL-8139C+ series 10/100 PCI Ethernet driver");
  87MODULE_VERSION(DRV_VERSION);
  88MODULE_LICENSE("GPL");
  89
  90static int debug = -1;
  91module_param(debug, int, 0);
  92MODULE_PARM_DESC (debug, "8139cp: bitmapped message enable number");
  93
  94/* Maximum number of multicast addresses to filter (vs. Rx-all-multicast).
  95   The RTL chips use a 64 element hash table based on the Ethernet CRC.  */
  96static int multicast_filter_limit = 32;
  97module_param(multicast_filter_limit, int, 0);
  98MODULE_PARM_DESC (multicast_filter_limit, "8139cp: maximum number of filtered multicast addresses");
  99
 100#define CP_DEF_MSG_ENABLE       (NETIF_MSG_DRV          | \
 101                                 NETIF_MSG_PROBE        | \
 102                                 NETIF_MSG_LINK)
 103#define CP_NUM_STATS            14      /* struct cp_dma_stats, plus one */
 104#define CP_STATS_SIZE           64      /* size in bytes of DMA stats block */
 105#define CP_REGS_SIZE            (0xff + 1)
 106#define CP_REGS_VER             1               /* version 1 */
 107#define CP_RX_RING_SIZE         64
 108#define CP_TX_RING_SIZE         64
 109#define CP_RING_BYTES           \
 110                ((sizeof(struct cp_desc) * CP_RX_RING_SIZE) +   \
 111                 (sizeof(struct cp_desc) * CP_TX_RING_SIZE) +   \
 112                 CP_STATS_SIZE)
 113#define NEXT_TX(N)              (((N) + 1) & (CP_TX_RING_SIZE - 1))
 114#define NEXT_RX(N)              (((N) + 1) & (CP_RX_RING_SIZE - 1))
 115#define TX_BUFFS_AVAIL(CP)                                      \
 116        (((CP)->tx_tail <= (CP)->tx_head) ?                     \
 117          (CP)->tx_tail + (CP_TX_RING_SIZE - 1) - (CP)->tx_head :       \
 118          (CP)->tx_tail - (CP)->tx_head - 1)
 119
 120#define PKT_BUF_SZ              1536    /* Size of each temporary Rx buffer.*/
 121#define CP_INTERNAL_PHY         32
 122
 123/* The following settings are log_2(bytes)-4:  0 == 16 bytes .. 6==1024, 7==end of packet. */
 124#define RX_FIFO_THRESH          5       /* Rx buffer level before first PCI xfer.  */
 125#define RX_DMA_BURST            4       /* Maximum PCI burst, '4' is 256 */
 126#define TX_DMA_BURST            6       /* Maximum PCI burst, '6' is 1024 */
 127#define TX_EARLY_THRESH         256     /* Early Tx threshold, in bytes */
 128
 129/* Time in jiffies before concluding the transmitter is hung. */
 130#define TX_TIMEOUT              (6*HZ)
 131
 132/* hardware minimum and maximum for a single frame's data payload */
 133#define CP_MIN_MTU              60      /* TODO: allow lower, but pad */
 134#define CP_MAX_MTU              4096
 135
 136enum {
 137        /* NIC register offsets */
 138        MAC0            = 0x00, /* Ethernet hardware address. */
 139        MAR0            = 0x08, /* Multicast filter. */
 140        StatsAddr       = 0x10, /* 64-bit start addr of 64-byte DMA stats blk */
 141        TxRingAddr      = 0x20, /* 64-bit start addr of Tx ring */
 142        HiTxRingAddr    = 0x28, /* 64-bit start addr of high priority Tx ring */
 143        Cmd             = 0x37, /* Command register */
 144        IntrMask        = 0x3C, /* Interrupt mask */
 145        IntrStatus      = 0x3E, /* Interrupt status */
 146        TxConfig        = 0x40, /* Tx configuration */
 147        ChipVersion     = 0x43, /* 8-bit chip version, inside TxConfig */
 148        RxConfig        = 0x44, /* Rx configuration */
 149        RxMissed        = 0x4C, /* 24 bits valid, write clears */
 150        Cfg9346         = 0x50, /* EEPROM select/control; Cfg reg [un]lock */
 151        Config1         = 0x52, /* Config1 */
 152        Config3         = 0x59, /* Config3 */
 153        Config4         = 0x5A, /* Config4 */
 154        MultiIntr       = 0x5C, /* Multiple interrupt select */
 155        BasicModeCtrl   = 0x62, /* MII BMCR */
 156        BasicModeStatus = 0x64, /* MII BMSR */
 157        NWayAdvert      = 0x66, /* MII ADVERTISE */
 158        NWayLPAR        = 0x68, /* MII LPA */
 159        NWayExpansion   = 0x6A, /* MII Expansion */
 160        Config5         = 0xD8, /* Config5 */
 161        TxPoll          = 0xD9, /* Tell chip to check Tx descriptors for work */
 162        RxMaxSize       = 0xDA, /* Max size of an Rx packet (8169 only) */
 163        CpCmd           = 0xE0, /* C+ Command register (C+ mode only) */
 164        IntrMitigate    = 0xE2, /* rx/tx interrupt mitigation control */
 165        RxRingAddr      = 0xE4, /* 64-bit start addr of Rx ring */
 166        TxThresh        = 0xEC, /* Early Tx threshold */
 167        OldRxBufAddr    = 0x30, /* DMA address of Rx ring buffer (C mode) */
 168        OldTSD0         = 0x10, /* DMA address of first Tx desc (C mode) */
 169
 170        /* Tx and Rx status descriptors */
 171        DescOwn         = (1 << 31), /* Descriptor is owned by NIC */
 172        RingEnd         = (1 << 30), /* End of descriptor ring */
 173        FirstFrag       = (1 << 29), /* First segment of a packet */
 174        LastFrag        = (1 << 28), /* Final segment of a packet */
 175        LargeSend       = (1 << 27), /* TCP Large Send Offload (TSO) */
 176        MSSShift        = 16,        /* MSS value position */
 177        MSSMask         = 0xfff,     /* MSS value: 11 bits */
 178        TxError         = (1 << 23), /* Tx error summary */
 179        RxError         = (1 << 20), /* Rx error summary */
 180        IPCS            = (1 << 18), /* Calculate IP checksum */
 181        UDPCS           = (1 << 17), /* Calculate UDP/IP checksum */
 182        TCPCS           = (1 << 16), /* Calculate TCP/IP checksum */
 183        TxVlanTag       = (1 << 17), /* Add VLAN tag */
 184        RxVlanTagged    = (1 << 16), /* Rx VLAN tag available */
 185        IPFail          = (1 << 15), /* IP checksum failed */
 186        UDPFail         = (1 << 14), /* UDP/IP checksum failed */
 187        TCPFail         = (1 << 13), /* TCP/IP checksum failed */
 188        NormalTxPoll    = (1 << 6),  /* One or more normal Tx packets to send */
 189        PID1            = (1 << 17), /* 2 protocol id bits:  0==non-IP, */
 190        PID0            = (1 << 16), /* 1==UDP/IP, 2==TCP/IP, 3==IP */
 191        RxProtoTCP      = 1,
 192        RxProtoUDP      = 2,
 193        RxProtoIP       = 3,
 194        TxFIFOUnder     = (1 << 25), /* Tx FIFO underrun */
 195        TxOWC           = (1 << 22), /* Tx Out-of-window collision */
 196        TxLinkFail      = (1 << 21), /* Link failed during Tx of packet */
 197        TxMaxCol        = (1 << 20), /* Tx aborted due to excessive collisions */
 198        TxColCntShift   = 16,        /* Shift, to get 4-bit Tx collision cnt */
 199        TxColCntMask    = 0x01 | 0x02 | 0x04 | 0x08, /* 4-bit collision count */
 200        RxErrFrame      = (1 << 27), /* Rx frame alignment error */
 201        RxMcast         = (1 << 26), /* Rx multicast packet rcv'd */
 202        RxErrCRC        = (1 << 18), /* Rx CRC error */
 203        RxErrRunt       = (1 << 19), /* Rx error, packet < 64 bytes */
 204        RxErrLong       = (1 << 21), /* Rx error, packet > 4096 bytes */
 205        RxErrFIFO       = (1 << 22), /* Rx error, FIFO overflowed, pkt bad */
 206
 207        /* StatsAddr register */
 208        DumpStats       = (1 << 3),  /* Begin stats dump */
 209
 210        /* RxConfig register */
 211        RxCfgFIFOShift  = 13,        /* Shift, to get Rx FIFO thresh value */
 212        RxCfgDMAShift   = 8,         /* Shift, to get Rx Max DMA value */
 213        AcceptErr       = 0x20,      /* Accept packets with CRC errors */
 214        AcceptRunt      = 0x10,      /* Accept runt (<64 bytes) packets */
 215        AcceptBroadcast = 0x08,      /* Accept broadcast packets */
 216        AcceptMulticast = 0x04,      /* Accept multicast packets */
 217        AcceptMyPhys    = 0x02,      /* Accept pkts with our MAC as dest */
 218        AcceptAllPhys   = 0x01,      /* Accept all pkts w/ physical dest */
 219
 220        /* IntrMask / IntrStatus registers */
 221        PciErr          = (1 << 15), /* System error on the PCI bus */
 222        TimerIntr       = (1 << 14), /* Asserted when TCTR reaches TimerInt value */
 223        LenChg          = (1 << 13), /* Cable length change */
 224        SWInt           = (1 << 8),  /* Software-requested interrupt */
 225        TxEmpty         = (1 << 7),  /* No Tx descriptors available */
 226        RxFIFOOvr       = (1 << 6),  /* Rx FIFO Overflow */
 227        LinkChg         = (1 << 5),  /* Packet underrun, or link change */
 228        RxEmpty         = (1 << 4),  /* No Rx descriptors available */
 229        TxErr           = (1 << 3),  /* Tx error */
 230        TxOK            = (1 << 2),  /* Tx packet sent */
 231        RxErr           = (1 << 1),  /* Rx error */
 232        RxOK            = (1 << 0),  /* Rx packet received */
 233        IntrResvd       = (1 << 10), /* reserved, according to RealTek engineers,
 234                                        but hardware likes to raise it */
 235
 236        IntrAll         = PciErr | TimerIntr | LenChg | SWInt | TxEmpty |
 237                          RxFIFOOvr | LinkChg | RxEmpty | TxErr | TxOK |
 238                          RxErr | RxOK | IntrResvd,
 239
 240        /* C mode command register */
 241        CmdReset        = (1 << 4),  /* Enable to reset; self-clearing */
 242        RxOn            = (1 << 3),  /* Rx mode enable */
 243        TxOn            = (1 << 2),  /* Tx mode enable */
 244
 245        /* C+ mode command register */
 246        RxVlanOn        = (1 << 6),  /* Rx VLAN de-tagging enable */
 247        RxChkSum        = (1 << 5),  /* Rx checksum offload enable */
 248        PCIDAC          = (1 << 4),  /* PCI Dual Address Cycle (64-bit PCI) */
 249        PCIMulRW        = (1 << 3),  /* Enable PCI read/write multiple */
 250        CpRxOn          = (1 << 1),  /* Rx mode enable */
 251        CpTxOn          = (1 << 0),  /* Tx mode enable */
 252
 253        /* Cfg9436 EEPROM control register */
 254        Cfg9346_Lock    = 0x00,      /* Lock ConfigX/MII register access */
 255        Cfg9346_Unlock  = 0xC0,      /* Unlock ConfigX/MII register access */
 256
 257        /* TxConfig register */
 258        IFG             = (1 << 25) | (1 << 24), /* standard IEEE interframe gap */
 259        TxDMAShift      = 8,         /* DMA burst value (0-7) is shift this many bits */
 260
 261        /* Early Tx Threshold register */
 262        TxThreshMask    = 0x3f,      /* Mask bits 5-0 */
 263        TxThreshMax     = 2048,      /* Max early Tx threshold */
 264
 265        /* Config1 register */
 266        DriverLoaded    = (1 << 5),  /* Software marker, driver is loaded */
 267        LWACT           = (1 << 4),  /* LWAKE active mode */
 268        PMEnable        = (1 << 0),  /* Enable various PM features of chip */
 269
 270        /* Config3 register */
 271        PARMEnable      = (1 << 6),  /* Enable auto-loading of PHY parms */
 272        MagicPacket     = (1 << 5),  /* Wake up when receives a Magic Packet */
 273        LinkUp          = (1 << 4),  /* Wake up when the cable connection is re-established */
 274
 275        /* Config4 register */
 276        LWPTN           = (1 << 1),  /* LWAKE Pattern */
 277        LWPME           = (1 << 4),  /* LANWAKE vs PMEB */
 278
 279        /* Config5 register */
 280        BWF             = (1 << 6),  /* Accept Broadcast wakeup frame */
 281        MWF             = (1 << 5),  /* Accept Multicast wakeup frame */
 282        UWF             = (1 << 4),  /* Accept Unicast wakeup frame */
 283        LANWake         = (1 << 1),  /* Enable LANWake signal */
 284        PMEStatus       = (1 << 0),  /* PME status can be reset by PCI RST# */
 285
 286        cp_norx_intr_mask = PciErr | LinkChg | TxOK | TxErr | TxEmpty,
 287        cp_rx_intr_mask = RxOK | RxErr | RxEmpty | RxFIFOOvr,
 288        cp_intr_mask = cp_rx_intr_mask | cp_norx_intr_mask,
 289};
 290
 291static const unsigned int cp_rx_config =
 292          (RX_FIFO_THRESH << RxCfgFIFOShift) |
 293          (RX_DMA_BURST << RxCfgDMAShift);
 294
 295struct cp_desc {
 296        __le32          opts1;
 297        __le32          opts2;
 298        __le64          addr;
 299};
 300
 301struct cp_dma_stats {
 302        __le64                  tx_ok;
 303        __le64                  rx_ok;
 304        __le64                  tx_err;
 305        __le32                  rx_err;
 306        __le16                  rx_fifo;
 307        __le16                  frame_align;
 308        __le32                  tx_ok_1col;
 309        __le32                  tx_ok_mcol;
 310        __le64                  rx_ok_phys;
 311        __le64                  rx_ok_bcast;
 312        __le32                  rx_ok_mcast;
 313        __le16                  tx_abort;
 314        __le16                  tx_underrun;
 315} __packed;
 316
 317struct cp_extra_stats {
 318        unsigned long           rx_frags;
 319};
 320
 321struct cp_private {
 322        void                    __iomem *regs;
 323        struct net_device       *dev;
 324        spinlock_t              lock;
 325        u32                     msg_enable;
 326
 327        struct napi_struct      napi;
 328
 329        struct pci_dev          *pdev;
 330        u32                     rx_config;
 331        u16                     cpcmd;
 332
 333        struct cp_extra_stats   cp_stats;
 334
 335        unsigned                rx_head         ____cacheline_aligned;
 336        unsigned                rx_tail;
 337        struct cp_desc          *rx_ring;
 338        struct sk_buff          *rx_skb[CP_RX_RING_SIZE];
 339
 340        unsigned                tx_head         ____cacheline_aligned;
 341        unsigned                tx_tail;
 342        struct cp_desc          *tx_ring;
 343        struct sk_buff          *tx_skb[CP_TX_RING_SIZE];
 344
 345        unsigned                rx_buf_sz;
 346        unsigned                wol_enabled : 1; /* Is Wake-on-LAN enabled? */
 347
 348        dma_addr_t              ring_dma;
 349
 350        struct mii_if_info      mii_if;
 351};
 352
 353#define cpr8(reg)       readb(cp->regs + (reg))
 354#define cpr16(reg)      readw(cp->regs + (reg))
 355#define cpr32(reg)      readl(cp->regs + (reg))
 356#define cpw8(reg,val)   writeb((val), cp->regs + (reg))
 357#define cpw16(reg,val)  writew((val), cp->regs + (reg))
 358#define cpw32(reg,val)  writel((val), cp->regs + (reg))
 359#define cpw8_f(reg,val) do {                    \
 360        writeb((val), cp->regs + (reg));        \
 361        readb(cp->regs + (reg));                \
 362        } while (0)
 363#define cpw16_f(reg,val) do {                   \
 364        writew((val), cp->regs + (reg));        \
 365        readw(cp->regs + (reg));                \
 366        } while (0)
 367#define cpw32_f(reg,val) do {                   \
 368        writel((val), cp->regs + (reg));        \
 369        readl(cp->regs + (reg));                \
 370        } while (0)
 371
 372
 373static void __cp_set_rx_mode (struct net_device *dev);
 374static void cp_tx (struct cp_private *cp);
 375static void cp_clean_rings (struct cp_private *cp);
 376#ifdef CONFIG_NET_POLL_CONTROLLER
 377static void cp_poll_controller(struct net_device *dev);
 378#endif
 379static int cp_get_eeprom_len(struct net_device *dev);
 380static int cp_get_eeprom(struct net_device *dev,
 381                         struct ethtool_eeprom *eeprom, u8 *data);
 382static int cp_set_eeprom(struct net_device *dev,
 383                         struct ethtool_eeprom *eeprom, u8 *data);
 384
 385static DEFINE_PCI_DEVICE_TABLE(cp_pci_tbl) = {
 386        { PCI_DEVICE(PCI_VENDOR_ID_REALTEK,     PCI_DEVICE_ID_REALTEK_8139), },
 387        { PCI_DEVICE(PCI_VENDOR_ID_TTTECH,      PCI_DEVICE_ID_TTTECH_MC322), },
 388        { },
 389};
 390MODULE_DEVICE_TABLE(pci, cp_pci_tbl);
 391
 392static struct {
 393        const char str[ETH_GSTRING_LEN];
 394} ethtool_stats_keys[] = {
 395        { "tx_ok" },
 396        { "rx_ok" },
 397        { "tx_err" },
 398        { "rx_err" },
 399        { "rx_fifo" },
 400        { "frame_align" },
 401        { "tx_ok_1col" },
 402        { "tx_ok_mcol" },
 403        { "rx_ok_phys" },
 404        { "rx_ok_bcast" },
 405        { "rx_ok_mcast" },
 406        { "tx_abort" },
 407        { "tx_underrun" },
 408        { "rx_frags" },
 409};
 410
 411
 412static inline void cp_set_rxbufsize (struct cp_private *cp)
 413{
 414        unsigned int mtu = cp->dev->mtu;
 415
 416        if (mtu > ETH_DATA_LEN)
 417                /* MTU + ethernet header + FCS + optional VLAN tag */
 418                cp->rx_buf_sz = mtu + ETH_HLEN + 8;
 419        else
 420                cp->rx_buf_sz = PKT_BUF_SZ;
 421}
 422
 423static inline void cp_rx_skb (struct cp_private *cp, struct sk_buff *skb,
 424                              struct cp_desc *desc)
 425{
 426        u32 opts2 = le32_to_cpu(desc->opts2);
 427
 428        skb->protocol = eth_type_trans (skb, cp->dev);
 429
 430        cp->dev->stats.rx_packets++;
 431        cp->dev->stats.rx_bytes += skb->len;
 432
 433        if (opts2 & RxVlanTagged)
 434                __vlan_hwaccel_put_tag(skb, swab16(opts2 & 0xffff));
 435
 436        napi_gro_receive(&cp->napi, skb);
 437}
 438
 439static void cp_rx_err_acct (struct cp_private *cp, unsigned rx_tail,
 440                            u32 status, u32 len)
 441{
 442        netif_dbg(cp, rx_err, cp->dev, "rx err, slot %d status 0x%x len %d\n",
 443                  rx_tail, status, len);
 444        cp->dev->stats.rx_errors++;
 445        if (status & RxErrFrame)
 446                cp->dev->stats.rx_frame_errors++;
 447        if (status & RxErrCRC)
 448                cp->dev->stats.rx_crc_errors++;
 449        if ((status & RxErrRunt) || (status & RxErrLong))
 450                cp->dev->stats.rx_length_errors++;
 451        if ((status & (FirstFrag | LastFrag)) != (FirstFrag | LastFrag))
 452                cp->dev->stats.rx_length_errors++;
 453        if (status & RxErrFIFO)
 454                cp->dev->stats.rx_fifo_errors++;
 455}
 456
 457static inline unsigned int cp_rx_csum_ok (u32 status)
 458{
 459        unsigned int protocol = (status >> 16) & 0x3;
 460
 461        if (((protocol == RxProtoTCP) && !(status & TCPFail)) ||
 462            ((protocol == RxProtoUDP) && !(status & UDPFail)))
 463                return 1;
 464        else
 465                return 0;
 466}
 467
 468static int cp_rx_poll(struct napi_struct *napi, int budget)
 469{
 470        struct cp_private *cp = container_of(napi, struct cp_private, napi);
 471        struct net_device *dev = cp->dev;
 472        unsigned int rx_tail = cp->rx_tail;
 473        int rx;
 474
 475rx_status_loop:
 476        rx = 0;
 477        cpw16(IntrStatus, cp_rx_intr_mask);
 478
 479        while (1) {
 480                u32 status, len;
 481                dma_addr_t mapping;
 482                struct sk_buff *skb, *new_skb;
 483                struct cp_desc *desc;
 484                const unsigned buflen = cp->rx_buf_sz;
 485
 486                skb = cp->rx_skb[rx_tail];
 487                BUG_ON(!skb);
 488
 489                desc = &cp->rx_ring[rx_tail];
 490                status = le32_to_cpu(desc->opts1);
 491                if (status & DescOwn)
 492                        break;
 493
 494                len = (status & 0x1fff) - 4;
 495                mapping = le64_to_cpu(desc->addr);
 496
 497                if ((status & (FirstFrag | LastFrag)) != (FirstFrag | LastFrag)) {
 498                        /* we don't support incoming fragmented frames.
 499                         * instead, we attempt to ensure that the
 500                         * pre-allocated RX skbs are properly sized such
 501                         * that RX fragments are never encountered
 502                         */
 503                        cp_rx_err_acct(cp, rx_tail, status, len);
 504                        dev->stats.rx_dropped++;
 505                        cp->cp_stats.rx_frags++;
 506                        goto rx_next;
 507                }
 508
 509                if (status & (RxError | RxErrFIFO)) {
 510                        cp_rx_err_acct(cp, rx_tail, status, len);
 511                        goto rx_next;
 512                }
 513
 514                netif_dbg(cp, rx_status, dev, "rx slot %d status 0x%x len %d\n",
 515                          rx_tail, status, len);
 516
 517                new_skb = netdev_alloc_skb_ip_align(dev, buflen);
 518                if (!new_skb) {
 519                        dev->stats.rx_dropped++;
 520                        goto rx_next;
 521                }
 522
 523                dma_unmap_single(&cp->pdev->dev, mapping,
 524                                 buflen, PCI_DMA_FROMDEVICE);
 525
 526                /* Handle checksum offloading for incoming packets. */
 527                if (cp_rx_csum_ok(status))
 528                        skb->ip_summed = CHECKSUM_UNNECESSARY;
 529                else
 530                        skb_checksum_none_assert(skb);
 531
 532                skb_put(skb, len);
 533
 534                mapping = dma_map_single(&cp->pdev->dev, new_skb->data, buflen,
 535                                         PCI_DMA_FROMDEVICE);
 536                cp->rx_skb[rx_tail] = new_skb;
 537
 538                cp_rx_skb(cp, skb, desc);
 539                rx++;
 540
 541rx_next:
 542                cp->rx_ring[rx_tail].opts2 = 0;
 543                cp->rx_ring[rx_tail].addr = cpu_to_le64(mapping);
 544                if (rx_tail == (CP_RX_RING_SIZE - 1))
 545                        desc->opts1 = cpu_to_le32(DescOwn | RingEnd |
 546                                                  cp->rx_buf_sz);
 547                else
 548                        desc->opts1 = cpu_to_le32(DescOwn | cp->rx_buf_sz);
 549                rx_tail = NEXT_RX(rx_tail);
 550
 551                if (rx >= budget)
 552                        break;
 553        }
 554
 555        cp->rx_tail = rx_tail;
 556
 557        /* if we did not reach work limit, then we're done with
 558         * this round of polling
 559         */
 560        if (rx < budget) {
 561                unsigned long flags;
 562
 563                if (cpr16(IntrStatus) & cp_rx_intr_mask)
 564                        goto rx_status_loop;
 565
 566                napi_gro_flush(napi, false);
 567                spin_lock_irqsave(&cp->lock, flags);
 568                __napi_complete(napi);
 569                cpw16_f(IntrMask, cp_intr_mask);
 570                spin_unlock_irqrestore(&cp->lock, flags);
 571        }
 572
 573        return rx;
 574}
 575
 576static irqreturn_t cp_interrupt (int irq, void *dev_instance)
 577{
 578        struct net_device *dev = dev_instance;
 579        struct cp_private *cp;
 580        u16 status;
 581
 582        if (unlikely(dev == NULL))
 583                return IRQ_NONE;
 584        cp = netdev_priv(dev);
 585
 586        status = cpr16(IntrStatus);
 587        if (!status || (status == 0xFFFF))
 588                return IRQ_NONE;
 589
 590        netif_dbg(cp, intr, dev, "intr, status %04x cmd %02x cpcmd %04x\n",
 591                  status, cpr8(Cmd), cpr16(CpCmd));
 592
 593        cpw16(IntrStatus, status & ~cp_rx_intr_mask);
 594
 595        spin_lock(&cp->lock);
 596
 597        /* close possible race's with dev_close */
 598        if (unlikely(!netif_running(dev))) {
 599                cpw16(IntrMask, 0);
 600                spin_unlock(&cp->lock);
 601                return IRQ_HANDLED;
 602        }
 603
 604        if (status & (RxOK | RxErr | RxEmpty | RxFIFOOvr))
 605                if (napi_schedule_prep(&cp->napi)) {
 606                        cpw16_f(IntrMask, cp_norx_intr_mask);
 607                        __napi_schedule(&cp->napi);
 608                }
 609
 610        if (status & (TxOK | TxErr | TxEmpty | SWInt))
 611                cp_tx(cp);
 612        if (status & LinkChg)
 613                mii_check_media(&cp->mii_if, netif_msg_link(cp), false);
 614
 615        spin_unlock(&cp->lock);
 616
 617        if (status & PciErr) {
 618                u16 pci_status;
 619
 620                pci_read_config_word(cp->pdev, PCI_STATUS, &pci_status);
 621                pci_write_config_word(cp->pdev, PCI_STATUS, pci_status);
 622                netdev_err(dev, "PCI bus error, status=%04x, PCI status=%04x\n",
 623                           status, pci_status);
 624
 625                /* TODO: reset hardware */
 626        }
 627
 628        return IRQ_HANDLED;
 629}
 630
 631#ifdef CONFIG_NET_POLL_CONTROLLER
 632/*
 633 * Polling receive - used by netconsole and other diagnostic tools
 634 * to allow network i/o with interrupts disabled.
 635 */
 636static void cp_poll_controller(struct net_device *dev)
 637{
 638        struct cp_private *cp = netdev_priv(dev);
 639        const int irq = cp->pdev->irq;
 640
 641        disable_irq(irq);
 642        cp_interrupt(irq, dev);
 643        enable_irq(irq);
 644}
 645#endif
 646
 647static void cp_tx (struct cp_private *cp)
 648{
 649        unsigned tx_head = cp->tx_head;
 650        unsigned tx_tail = cp->tx_tail;
 651
 652        while (tx_tail != tx_head) {
 653                struct cp_desc *txd = cp->tx_ring + tx_tail;
 654                struct sk_buff *skb;
 655                u32 status;
 656
 657                rmb();
 658                status = le32_to_cpu(txd->opts1);
 659                if (status & DescOwn)
 660                        break;
 661
 662                skb = cp->tx_skb[tx_tail];
 663                BUG_ON(!skb);
 664
 665                dma_unmap_single(&cp->pdev->dev, le64_to_cpu(txd->addr),
 666                                 le32_to_cpu(txd->opts1) & 0xffff,
 667                                 PCI_DMA_TODEVICE);
 668
 669                if (status & LastFrag) {
 670                        if (status & (TxError | TxFIFOUnder)) {
 671                                netif_dbg(cp, tx_err, cp->dev,
 672                                          "tx err, status 0x%x\n", status);
 673                                cp->dev->stats.tx_errors++;
 674                                if (status & TxOWC)
 675                                        cp->dev->stats.tx_window_errors++;
 676                                if (status & TxMaxCol)
 677                                        cp->dev->stats.tx_aborted_errors++;
 678                                if (status & TxLinkFail)
 679                                        cp->dev->stats.tx_carrier_errors++;
 680                                if (status & TxFIFOUnder)
 681                                        cp->dev->stats.tx_fifo_errors++;
 682                        } else {
 683                                cp->dev->stats.collisions +=
 684                                        ((status >> TxColCntShift) & TxColCntMask);
 685                                cp->dev->stats.tx_packets++;
 686                                cp->dev->stats.tx_bytes += skb->len;
 687                                netif_dbg(cp, tx_done, cp->dev,
 688                                          "tx done, slot %d\n", tx_tail);
 689                        }
 690                        dev_kfree_skb_irq(skb);
 691                }
 692
 693                cp->tx_skb[tx_tail] = NULL;
 694
 695                tx_tail = NEXT_TX(tx_tail);
 696        }
 697
 698        cp->tx_tail = tx_tail;
 699
 700        if (TX_BUFFS_AVAIL(cp) > (MAX_SKB_FRAGS + 1))
 701                netif_wake_queue(cp->dev);
 702}
 703
 704static inline u32 cp_tx_vlan_tag(struct sk_buff *skb)
 705{
 706        return vlan_tx_tag_present(skb) ?
 707                TxVlanTag | swab16(vlan_tx_tag_get(skb)) : 0x00;
 708}
 709
 710static netdev_tx_t cp_start_xmit (struct sk_buff *skb,
 711                                        struct net_device *dev)
 712{
 713        struct cp_private *cp = netdev_priv(dev);
 714        unsigned entry;
 715        u32 eor, flags;
 716        unsigned long intr_flags;
 717        __le32 opts2;
 718        int mss = 0;
 719
 720        spin_lock_irqsave(&cp->lock, intr_flags);
 721
 722        /* This is a hard error, log it. */
 723        if (TX_BUFFS_AVAIL(cp) <= (skb_shinfo(skb)->nr_frags + 1)) {
 724                netif_stop_queue(dev);
 725                spin_unlock_irqrestore(&cp->lock, intr_flags);
 726                netdev_err(dev, "BUG! Tx Ring full when queue awake!\n");
 727                return NETDEV_TX_BUSY;
 728        }
 729
 730        entry = cp->tx_head;
 731        eor = (entry == (CP_TX_RING_SIZE - 1)) ? RingEnd : 0;
 732        mss = skb_shinfo(skb)->gso_size;
 733
 734        opts2 = cpu_to_le32(cp_tx_vlan_tag(skb));
 735
 736        if (skb_shinfo(skb)->nr_frags == 0) {
 737                struct cp_desc *txd = &cp->tx_ring[entry];
 738                u32 len;
 739                dma_addr_t mapping;
 740
 741                len = skb->len;
 742                mapping = dma_map_single(&cp->pdev->dev, skb->data, len, PCI_DMA_TODEVICE);
 743                txd->opts2 = opts2;
 744                txd->addr = cpu_to_le64(mapping);
 745                wmb();
 746
 747                flags = eor | len | DescOwn | FirstFrag | LastFrag;
 748
 749                if (mss)
 750                        flags |= LargeSend | ((mss & MSSMask) << MSSShift);
 751                else if (skb->ip_summed == CHECKSUM_PARTIAL) {
 752                        const struct iphdr *ip = ip_hdr(skb);
 753                        if (ip->protocol == IPPROTO_TCP)
 754                                flags |= IPCS | TCPCS;
 755                        else if (ip->protocol == IPPROTO_UDP)
 756                                flags |= IPCS | UDPCS;
 757                        else
 758                                WARN_ON(1);     /* we need a WARN() */
 759                }
 760
 761                txd->opts1 = cpu_to_le32(flags);
 762                wmb();
 763
 764                cp->tx_skb[entry] = skb;
 765                entry = NEXT_TX(entry);
 766        } else {
 767                struct cp_desc *txd;
 768                u32 first_len, first_eor;
 769                dma_addr_t first_mapping;
 770                int frag, first_entry = entry;
 771                const struct iphdr *ip = ip_hdr(skb);
 772
 773                /* We must give this initial chunk to the device last.
 774                 * Otherwise we could race with the device.
 775                 */
 776                first_eor = eor;
 777                first_len = skb_headlen(skb);
 778                first_mapping = dma_map_single(&cp->pdev->dev, skb->data,
 779                                               first_len, PCI_DMA_TODEVICE);
 780                cp->tx_skb[entry] = skb;
 781                entry = NEXT_TX(entry);
 782
 783                for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) {
 784                        const skb_frag_t *this_frag = &skb_shinfo(skb)->frags[frag];
 785                        u32 len;
 786                        u32 ctrl;
 787                        dma_addr_t mapping;
 788
 789                        len = skb_frag_size(this_frag);
 790                        mapping = dma_map_single(&cp->pdev->dev,
 791                                                 skb_frag_address(this_frag),
 792                                                 len, PCI_DMA_TODEVICE);
 793                        eor = (entry == (CP_TX_RING_SIZE - 1)) ? RingEnd : 0;
 794
 795                        ctrl = eor | len | DescOwn;
 796
 797                        if (mss)
 798                                ctrl |= LargeSend |
 799                                        ((mss & MSSMask) << MSSShift);
 800                        else if (skb->ip_summed == CHECKSUM_PARTIAL) {
 801                                if (ip->protocol == IPPROTO_TCP)
 802                                        ctrl |= IPCS | TCPCS;
 803                                else if (ip->protocol == IPPROTO_UDP)
 804                                        ctrl |= IPCS | UDPCS;
 805                                else
 806                                        BUG();
 807                        }
 808
 809                        if (frag == skb_shinfo(skb)->nr_frags - 1)
 810                                ctrl |= LastFrag;
 811
 812                        txd = &cp->tx_ring[entry];
 813                        txd->opts2 = opts2;
 814                        txd->addr = cpu_to_le64(mapping);
 815                        wmb();
 816
 817                        txd->opts1 = cpu_to_le32(ctrl);
 818                        wmb();
 819
 820                        cp->tx_skb[entry] = skb;
 821                        entry = NEXT_TX(entry);
 822                }
 823
 824                txd = &cp->tx_ring[first_entry];
 825                txd->opts2 = opts2;
 826                txd->addr = cpu_to_le64(first_mapping);
 827                wmb();
 828
 829                if (skb->ip_summed == CHECKSUM_PARTIAL) {
 830                        if (ip->protocol == IPPROTO_TCP)
 831                                txd->opts1 = cpu_to_le32(first_eor | first_len |
 832                                                         FirstFrag | DescOwn |
 833                                                         IPCS | TCPCS);
 834                        else if (ip->protocol == IPPROTO_UDP)
 835                                txd->opts1 = cpu_to_le32(first_eor | first_len |
 836                                                         FirstFrag | DescOwn |
 837                                                         IPCS | UDPCS);
 838                        else
 839                                BUG();
 840                } else
 841                        txd->opts1 = cpu_to_le32(first_eor | first_len |
 842                                                 FirstFrag | DescOwn);
 843                wmb();
 844        }
 845        cp->tx_head = entry;
 846        netif_dbg(cp, tx_queued, cp->dev, "tx queued, slot %d, skblen %d\n",
 847                  entry, skb->len);
 848        if (TX_BUFFS_AVAIL(cp) <= (MAX_SKB_FRAGS + 1))
 849                netif_stop_queue(dev);
 850
 851        spin_unlock_irqrestore(&cp->lock, intr_flags);
 852
 853        cpw8(TxPoll, NormalTxPoll);
 854
 855        return NETDEV_TX_OK;
 856}
 857
 858/* Set or clear the multicast filter for this adaptor.
 859   This routine is not state sensitive and need not be SMP locked. */
 860
 861static void __cp_set_rx_mode (struct net_device *dev)
 862{
 863        struct cp_private *cp = netdev_priv(dev);
 864        u32 mc_filter[2];       /* Multicast hash filter */
 865        int rx_mode;
 866
 867        /* Note: do not reorder, GCC is clever about common statements. */
 868        if (dev->flags & IFF_PROMISC) {
 869                /* Unconditionally log net taps. */
 870                rx_mode =
 871                    AcceptBroadcast | AcceptMulticast | AcceptMyPhys |
 872                    AcceptAllPhys;
 873                mc_filter[1] = mc_filter[0] = 0xffffffff;
 874        } else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
 875                   (dev->flags & IFF_ALLMULTI)) {
 876                /* Too many to filter perfectly -- accept all multicasts. */
 877                rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
 878                mc_filter[1] = mc_filter[0] = 0xffffffff;
 879        } else {
 880                struct netdev_hw_addr *ha;
 881                rx_mode = AcceptBroadcast | AcceptMyPhys;
 882                mc_filter[1] = mc_filter[0] = 0;
 883                netdev_for_each_mc_addr(ha, dev) {
 884                        int bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26;
 885
 886                        mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
 887                        rx_mode |= AcceptMulticast;
 888                }
 889        }
 890
 891        /* We can safely update without stopping the chip. */
 892        cp->rx_config = cp_rx_config | rx_mode;
 893        cpw32_f(RxConfig, cp->rx_config);
 894
 895        cpw32_f (MAR0 + 0, mc_filter[0]);
 896        cpw32_f (MAR0 + 4, mc_filter[1]);
 897}
 898
 899static void cp_set_rx_mode (struct net_device *dev)
 900{
 901        unsigned long flags;
 902        struct cp_private *cp = netdev_priv(dev);
 903
 904        spin_lock_irqsave (&cp->lock, flags);
 905        __cp_set_rx_mode(dev);
 906        spin_unlock_irqrestore (&cp->lock, flags);
 907}
 908
 909static void __cp_get_stats(struct cp_private *cp)
 910{
 911        /* only lower 24 bits valid; write any value to clear */
 912        cp->dev->stats.rx_missed_errors += (cpr32 (RxMissed) & 0xffffff);
 913        cpw32 (RxMissed, 0);
 914}
 915
 916static struct net_device_stats *cp_get_stats(struct net_device *dev)
 917{
 918        struct cp_private *cp = netdev_priv(dev);
 919        unsigned long flags;
 920
 921        /* The chip only need report frame silently dropped. */
 922        spin_lock_irqsave(&cp->lock, flags);
 923        if (netif_running(dev) && netif_device_present(dev))
 924                __cp_get_stats(cp);
 925        spin_unlock_irqrestore(&cp->lock, flags);
 926
 927        return &dev->stats;
 928}
 929
 930static void cp_stop_hw (struct cp_private *cp)
 931{
 932        cpw16(IntrStatus, ~(cpr16(IntrStatus)));
 933        cpw16_f(IntrMask, 0);
 934        cpw8(Cmd, 0);
 935        cpw16_f(CpCmd, 0);
 936        cpw16_f(IntrStatus, ~(cpr16(IntrStatus)));
 937
 938        cp->rx_tail = 0;
 939        cp->tx_head = cp->tx_tail = 0;
 940}
 941
 942static void cp_reset_hw (struct cp_private *cp)
 943{
 944        unsigned work = 1000;
 945
 946        cpw8(Cmd, CmdReset);
 947
 948        while (work--) {
 949                if (!(cpr8(Cmd) & CmdReset))
 950                        return;
 951
 952                schedule_timeout_uninterruptible(10);
 953        }
 954
 955        netdev_err(cp->dev, "hardware reset timeout\n");
 956}
 957
 958static inline void cp_start_hw (struct cp_private *cp)
 959{
 960        cpw16(CpCmd, cp->cpcmd);
 961        cpw8(Cmd, RxOn | TxOn);
 962}
 963
 964static void cp_enable_irq(struct cp_private *cp)
 965{
 966        cpw16_f(IntrMask, cp_intr_mask);
 967}
 968
 969static void cp_init_hw (struct cp_private *cp)
 970{
 971        struct net_device *dev = cp->dev;
 972        dma_addr_t ring_dma;
 973
 974        cp_reset_hw(cp);
 975
 976        cpw8_f (Cfg9346, Cfg9346_Unlock);
 977
 978        /* Restore our idea of the MAC address. */
 979        cpw32_f (MAC0 + 0, le32_to_cpu (*(__le32 *) (dev->dev_addr + 0)));
 980        cpw32_f (MAC0 + 4, le32_to_cpu (*(__le32 *) (dev->dev_addr + 4)));
 981
 982        cp_start_hw(cp);
 983        cpw8(TxThresh, 0x06); /* XXX convert magic num to a constant */
 984
 985        __cp_set_rx_mode(dev);
 986        cpw32_f (TxConfig, IFG | (TX_DMA_BURST << TxDMAShift));
 987
 988        cpw8(Config1, cpr8(Config1) | DriverLoaded | PMEnable);
 989        /* Disable Wake-on-LAN. Can be turned on with ETHTOOL_SWOL */
 990        cpw8(Config3, PARMEnable);
 991        cp->wol_enabled = 0;
 992
 993        cpw8(Config5, cpr8(Config5) & PMEStatus);
 994
 995        cpw32_f(HiTxRingAddr, 0);
 996        cpw32_f(HiTxRingAddr + 4, 0);
 997
 998        ring_dma = cp->ring_dma;
 999        cpw32_f(RxRingAddr, ring_dma & 0xffffffff);
1000        cpw32_f(RxRingAddr + 4, (ring_dma >> 16) >> 16);
1001
1002        ring_dma += sizeof(struct cp_desc) * CP_RX_RING_SIZE;
1003        cpw32_f(TxRingAddr, ring_dma & 0xffffffff);
1004        cpw32_f(TxRingAddr + 4, (ring_dma >> 16) >> 16);
1005
1006        cpw16(MultiIntr, 0);
1007
1008        cpw8_f(Cfg9346, Cfg9346_Lock);
1009}
1010
1011static int cp_refill_rx(struct cp_private *cp)
1012{
1013        struct net_device *dev = cp->dev;
1014        unsigned i;
1015
1016        for (i = 0; i < CP_RX_RING_SIZE; i++) {
1017                struct sk_buff *skb;
1018                dma_addr_t mapping;
1019
1020                skb = netdev_alloc_skb_ip_align(dev, cp->rx_buf_sz);
1021                if (!skb)
1022                        goto err_out;
1023
1024                mapping = dma_map_single(&cp->pdev->dev, skb->data,
1025                                         cp->rx_buf_sz, PCI_DMA_FROMDEVICE);
1026                cp->rx_skb[i] = skb;
1027
1028                cp->rx_ring[i].opts2 = 0;
1029                cp->rx_ring[i].addr = cpu_to_le64(mapping);
1030                if (i == (CP_RX_RING_SIZE - 1))
1031                        cp->rx_ring[i].opts1 =
1032                                cpu_to_le32(DescOwn | RingEnd | cp->rx_buf_sz);
1033                else
1034                        cp->rx_ring[i].opts1 =
1035                                cpu_to_le32(DescOwn | cp->rx_buf_sz);
1036        }
1037
1038        return 0;
1039
1040err_out:
1041        cp_clean_rings(cp);
1042        return -ENOMEM;
1043}
1044
1045static void cp_init_rings_index (struct cp_private *cp)
1046{
1047        cp->rx_tail = 0;
1048        cp->tx_head = cp->tx_tail = 0;
1049}
1050
1051static int cp_init_rings (struct cp_private *cp)
1052{
1053        memset(cp->tx_ring, 0, sizeof(struct cp_desc) * CP_TX_RING_SIZE);
1054        cp->tx_ring[CP_TX_RING_SIZE - 1].opts1 = cpu_to_le32(RingEnd);
1055
1056        cp_init_rings_index(cp);
1057
1058        return cp_refill_rx (cp);
1059}
1060
1061static int cp_alloc_rings (struct cp_private *cp)
1062{
1063        struct device *d = &cp->pdev->dev;
1064        void *mem;
1065        int rc;
1066
1067        mem = dma_alloc_coherent(d, CP_RING_BYTES, &cp->ring_dma, GFP_KERNEL);
1068        if (!mem)
1069                return -ENOMEM;
1070
1071        cp->rx_ring = mem;
1072        cp->tx_ring = &cp->rx_ring[CP_RX_RING_SIZE];
1073
1074        rc = cp_init_rings(cp);
1075        if (rc < 0)
1076                dma_free_coherent(d, CP_RING_BYTES, cp->rx_ring, cp->ring_dma);
1077
1078        return rc;
1079}
1080
1081static void cp_clean_rings (struct cp_private *cp)
1082{
1083        struct cp_desc *desc;
1084        unsigned i;
1085
1086        for (i = 0; i < CP_RX_RING_SIZE; i++) {
1087                if (cp->rx_skb[i]) {
1088                        desc = cp->rx_ring + i;
1089                        dma_unmap_single(&cp->pdev->dev,le64_to_cpu(desc->addr),
1090                                         cp->rx_buf_sz, PCI_DMA_FROMDEVICE);
1091                        dev_kfree_skb(cp->rx_skb[i]);
1092                }
1093        }
1094
1095        for (i = 0; i < CP_TX_RING_SIZE; i++) {
1096                if (cp->tx_skb[i]) {
1097                        struct sk_buff *skb = cp->tx_skb[i];
1098
1099                        desc = cp->tx_ring + i;
1100                        dma_unmap_single(&cp->pdev->dev,le64_to_cpu(desc->addr),
1101                                         le32_to_cpu(desc->opts1) & 0xffff,
1102                                         PCI_DMA_TODEVICE);
1103                        if (le32_to_cpu(desc->opts1) & LastFrag)
1104                                dev_kfree_skb(skb);
1105                        cp->dev->stats.tx_dropped++;
1106                }
1107        }
1108
1109        memset(cp->rx_ring, 0, sizeof(struct cp_desc) * CP_RX_RING_SIZE);
1110        memset(cp->tx_ring, 0, sizeof(struct cp_desc) * CP_TX_RING_SIZE);
1111
1112        memset(cp->rx_skb, 0, sizeof(struct sk_buff *) * CP_RX_RING_SIZE);
1113        memset(cp->tx_skb, 0, sizeof(struct sk_buff *) * CP_TX_RING_SIZE);
1114}
1115
1116static void cp_free_rings (struct cp_private *cp)
1117{
1118        cp_clean_rings(cp);
1119        dma_free_coherent(&cp->pdev->dev, CP_RING_BYTES, cp->rx_ring,
1120                          cp->ring_dma);
1121        cp->rx_ring = NULL;
1122        cp->tx_ring = NULL;
1123}
1124
1125static int cp_open (struct net_device *dev)
1126{
1127        struct cp_private *cp = netdev_priv(dev);
1128        const int irq = cp->pdev->irq;
1129        int rc;
1130
1131        netif_dbg(cp, ifup, dev, "enabling interface\n");
1132
1133        rc = cp_alloc_rings(cp);
1134        if (rc)
1135                return rc;
1136
1137        napi_enable(&cp->napi);
1138
1139        cp_init_hw(cp);
1140
1141        rc = request_irq(irq, cp_interrupt, IRQF_SHARED, dev->name, dev);
1142        if (rc)
1143                goto err_out_hw;
1144
1145        cp_enable_irq(cp);
1146
1147        netif_carrier_off(dev);
1148        mii_check_media(&cp->mii_if, netif_msg_link(cp), true);
1149        netif_start_queue(dev);
1150
1151        return 0;
1152
1153err_out_hw:
1154        napi_disable(&cp->napi);
1155        cp_stop_hw(cp);
1156        cp_free_rings(cp);
1157        return rc;
1158}
1159
1160static int cp_close (struct net_device *dev)
1161{
1162        struct cp_private *cp = netdev_priv(dev);
1163        unsigned long flags;
1164
1165        napi_disable(&cp->napi);
1166
1167        netif_dbg(cp, ifdown, dev, "disabling interface\n");
1168
1169        spin_lock_irqsave(&cp->lock, flags);
1170
1171        netif_stop_queue(dev);
1172        netif_carrier_off(dev);
1173
1174        cp_stop_hw(cp);
1175
1176        spin_unlock_irqrestore(&cp->lock, flags);
1177
1178        free_irq(cp->pdev->irq, dev);
1179
1180        cp_free_rings(cp);
1181        return 0;
1182}
1183
1184static void cp_tx_timeout(struct net_device *dev)
1185{
1186        struct cp_private *cp = netdev_priv(dev);
1187        unsigned long flags;
1188        int rc;
1189
1190        netdev_warn(dev, "Transmit timeout, status %2x %4x %4x %4x\n",
1191                    cpr8(Cmd), cpr16(CpCmd),
1192                    cpr16(IntrStatus), cpr16(IntrMask));
1193
1194        spin_lock_irqsave(&cp->lock, flags);
1195
1196        cp_stop_hw(cp);
1197        cp_clean_rings(cp);
1198        rc = cp_init_rings(cp);
1199        cp_start_hw(cp);
1200
1201        netif_wake_queue(dev);
1202
1203        spin_unlock_irqrestore(&cp->lock, flags);
1204}
1205
1206#ifdef BROKEN
1207static int cp_change_mtu(struct net_device *dev, int new_mtu)
1208{
1209        struct cp_private *cp = netdev_priv(dev);
1210        int rc;
1211        unsigned long flags;
1212
1213        /* check for invalid MTU, according to hardware limits */
1214        if (new_mtu < CP_MIN_MTU || new_mtu > CP_MAX_MTU)
1215                return -EINVAL;
1216
1217        /* if network interface not up, no need for complexity */
1218        if (!netif_running(dev)) {
1219                dev->mtu = new_mtu;
1220                cp_set_rxbufsize(cp);   /* set new rx buf size */
1221                return 0;
1222        }
1223
1224        spin_lock_irqsave(&cp->lock, flags);
1225
1226        cp_stop_hw(cp);                 /* stop h/w and free rings */
1227        cp_clean_rings(cp);
1228
1229        dev->mtu = new_mtu;
1230        cp_set_rxbufsize(cp);           /* set new rx buf size */
1231
1232        rc = cp_init_rings(cp);         /* realloc and restart h/w */
1233        cp_start_hw(cp);
1234
1235        spin_unlock_irqrestore(&cp->lock, flags);
1236
1237        return rc;
1238}
1239#endif /* BROKEN */
1240
1241static const char mii_2_8139_map[8] = {
1242        BasicModeCtrl,
1243        BasicModeStatus,
1244        0,
1245        0,
1246        NWayAdvert,
1247        NWayLPAR,
1248        NWayExpansion,
1249        0
1250};
1251
1252static int mdio_read(struct net_device *dev, int phy_id, int location)
1253{
1254        struct cp_private *cp = netdev_priv(dev);
1255
1256        return location < 8 && mii_2_8139_map[location] ?
1257               readw(cp->regs + mii_2_8139_map[location]) : 0;
1258}
1259
1260
1261static void mdio_write(struct net_device *dev, int phy_id, int location,
1262                       int value)
1263{
1264        struct cp_private *cp = netdev_priv(dev);
1265
1266        if (location == 0) {
1267                cpw8(Cfg9346, Cfg9346_Unlock);
1268                cpw16(BasicModeCtrl, value);
1269                cpw8(Cfg9346, Cfg9346_Lock);
1270        } else if (location < 8 && mii_2_8139_map[location])
1271                cpw16(mii_2_8139_map[location], value);
1272}
1273
1274/* Set the ethtool Wake-on-LAN settings */
1275static int netdev_set_wol (struct cp_private *cp,
1276                           const struct ethtool_wolinfo *wol)
1277{
1278        u8 options;
1279
1280        options = cpr8 (Config3) & ~(LinkUp | MagicPacket);
1281        /* If WOL is being disabled, no need for complexity */
1282        if (wol->wolopts) {
1283                if (wol->wolopts & WAKE_PHY)    options |= LinkUp;
1284                if (wol->wolopts & WAKE_MAGIC)  options |= MagicPacket;
1285        }
1286
1287        cpw8 (Cfg9346, Cfg9346_Unlock);
1288        cpw8 (Config3, options);
1289        cpw8 (Cfg9346, Cfg9346_Lock);
1290
1291        options = 0; /* Paranoia setting */
1292        options = cpr8 (Config5) & ~(UWF | MWF | BWF);
1293        /* If WOL is being disabled, no need for complexity */
1294        if (wol->wolopts) {
1295                if (wol->wolopts & WAKE_UCAST)  options |= UWF;
1296                if (wol->wolopts & WAKE_BCAST)  options |= BWF;
1297                if (wol->wolopts & WAKE_MCAST)  options |= MWF;
1298        }
1299
1300        cpw8 (Config5, options);
1301
1302        cp->wol_enabled = (wol->wolopts) ? 1 : 0;
1303
1304        return 0;
1305}
1306
1307/* Get the ethtool Wake-on-LAN settings */
1308static void netdev_get_wol (struct cp_private *cp,
1309                     struct ethtool_wolinfo *wol)
1310{
1311        u8 options;
1312
1313        wol->wolopts   = 0; /* Start from scratch */
1314        wol->supported = WAKE_PHY   | WAKE_BCAST | WAKE_MAGIC |
1315                         WAKE_MCAST | WAKE_UCAST;
1316        /* We don't need to go on if WOL is disabled */
1317        if (!cp->wol_enabled) return;
1318
1319        options        = cpr8 (Config3);
1320        if (options & LinkUp)        wol->wolopts |= WAKE_PHY;
1321        if (options & MagicPacket)   wol->wolopts |= WAKE_MAGIC;
1322
1323        options        = 0; /* Paranoia setting */
1324        options        = cpr8 (Config5);
1325        if (options & UWF)           wol->wolopts |= WAKE_UCAST;
1326        if (options & BWF)           wol->wolopts |= WAKE_BCAST;
1327        if (options & MWF)           wol->wolopts |= WAKE_MCAST;
1328}
1329
1330static void cp_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *info)
1331{
1332        struct cp_private *cp = netdev_priv(dev);
1333
1334        strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
1335        strlcpy(info->version, DRV_VERSION, sizeof(info->version));
1336        strlcpy(info->bus_info, pci_name(cp->pdev), sizeof(info->bus_info));
1337}
1338
1339static void cp_get_ringparam(struct net_device *dev,
1340                                struct ethtool_ringparam *ring)
1341{
1342        ring->rx_max_pending = CP_RX_RING_SIZE;
1343        ring->tx_max_pending = CP_TX_RING_SIZE;
1344        ring->rx_pending = CP_RX_RING_SIZE;
1345        ring->tx_pending = CP_TX_RING_SIZE;
1346}
1347
1348static int cp_get_regs_len(struct net_device *dev)
1349{
1350        return CP_REGS_SIZE;
1351}
1352
1353static int cp_get_sset_count (struct net_device *dev, int sset)
1354{
1355        switch (sset) {
1356        case ETH_SS_STATS:
1357                return CP_NUM_STATS;
1358        default:
1359                return -EOPNOTSUPP;
1360        }
1361}
1362
1363static int cp_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1364{
1365        struct cp_private *cp = netdev_priv(dev);
1366        int rc;
1367        unsigned long flags;
1368
1369        spin_lock_irqsave(&cp->lock, flags);
1370        rc = mii_ethtool_gset(&cp->mii_if, cmd);
1371        spin_unlock_irqrestore(&cp->lock, flags);
1372
1373        return rc;
1374}
1375
1376static int cp_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1377{
1378        struct cp_private *cp = netdev_priv(dev);
1379        int rc;
1380        unsigned long flags;
1381
1382        spin_lock_irqsave(&cp->lock, flags);
1383        rc = mii_ethtool_sset(&cp->mii_if, cmd);
1384        spin_unlock_irqrestore(&cp->lock, flags);
1385
1386        return rc;
1387}
1388
1389static int cp_nway_reset(struct net_device *dev)
1390{
1391        struct cp_private *cp = netdev_priv(dev);
1392        return mii_nway_restart(&cp->mii_if);
1393}
1394
1395static u32 cp_get_msglevel(struct net_device *dev)
1396{
1397        struct cp_private *cp = netdev_priv(dev);
1398        return cp->msg_enable;
1399}
1400
1401static void cp_set_msglevel(struct net_device *dev, u32 value)
1402{
1403        struct cp_private *cp = netdev_priv(dev);
1404        cp->msg_enable = value;
1405}
1406
1407static int cp_set_features(struct net_device *dev, netdev_features_t features)
1408{
1409        struct cp_private *cp = netdev_priv(dev);
1410        unsigned long flags;
1411
1412        if (!((dev->features ^ features) & NETIF_F_RXCSUM))
1413                return 0;
1414
1415        spin_lock_irqsave(&cp->lock, flags);
1416
1417        if (features & NETIF_F_RXCSUM)
1418                cp->cpcmd |= RxChkSum;
1419        else
1420                cp->cpcmd &= ~RxChkSum;
1421
1422        if (features & NETIF_F_HW_VLAN_RX)
1423                cp->cpcmd |= RxVlanOn;
1424        else
1425                cp->cpcmd &= ~RxVlanOn;
1426
1427        cpw16_f(CpCmd, cp->cpcmd);
1428        spin_unlock_irqrestore(&cp->lock, flags);
1429
1430        return 0;
1431}
1432
1433static void cp_get_regs(struct net_device *dev, struct ethtool_regs *regs,
1434                        void *p)
1435{
1436        struct cp_private *cp = netdev_priv(dev);
1437        unsigned long flags;
1438
1439        if (regs->len < CP_REGS_SIZE)
1440                return /* -EINVAL */;
1441
1442        regs->version = CP_REGS_VER;
1443
1444        spin_lock_irqsave(&cp->lock, flags);
1445        memcpy_fromio(p, cp->regs, CP_REGS_SIZE);
1446        spin_unlock_irqrestore(&cp->lock, flags);
1447}
1448
1449static void cp_get_wol (struct net_device *dev, struct ethtool_wolinfo *wol)
1450{
1451        struct cp_private *cp = netdev_priv(dev);
1452        unsigned long flags;
1453
1454        spin_lock_irqsave (&cp->lock, flags);
1455        netdev_get_wol (cp, wol);
1456        spin_unlock_irqrestore (&cp->lock, flags);
1457}
1458
1459static int cp_set_wol (struct net_device *dev, struct ethtool_wolinfo *wol)
1460{
1461        struct cp_private *cp = netdev_priv(dev);
1462        unsigned long flags;
1463        int rc;
1464
1465        spin_lock_irqsave (&cp->lock, flags);
1466        rc = netdev_set_wol (cp, wol);
1467        spin_unlock_irqrestore (&cp->lock, flags);
1468
1469        return rc;
1470}
1471
1472static void cp_get_strings (struct net_device *dev, u32 stringset, u8 *buf)
1473{
1474        switch (stringset) {
1475        case ETH_SS_STATS:
1476                memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
1477                break;
1478        default:
1479                BUG();
1480                break;
1481        }
1482}
1483
1484static void cp_get_ethtool_stats (struct net_device *dev,
1485                                  struct ethtool_stats *estats, u64 *tmp_stats)
1486{
1487        struct cp_private *cp = netdev_priv(dev);
1488        struct cp_dma_stats *nic_stats;
1489        dma_addr_t dma;
1490        int i;
1491
1492        nic_stats = dma_alloc_coherent(&cp->pdev->dev, sizeof(*nic_stats),
1493                                       &dma, GFP_KERNEL);
1494        if (!nic_stats)
1495                return;
1496
1497        /* begin NIC statistics dump */
1498        cpw32(StatsAddr + 4, (u64)dma >> 32);
1499        cpw32(StatsAddr, ((u64)dma & DMA_BIT_MASK(32)) | DumpStats);
1500        cpr32(StatsAddr);
1501
1502        for (i = 0; i < 1000; i++) {
1503                if ((cpr32(StatsAddr) & DumpStats) == 0)
1504                        break;
1505                udelay(10);
1506        }
1507        cpw32(StatsAddr, 0);
1508        cpw32(StatsAddr + 4, 0);
1509        cpr32(StatsAddr);
1510
1511        i = 0;
1512        tmp_stats[i++] = le64_to_cpu(nic_stats->tx_ok);
1513        tmp_stats[i++] = le64_to_cpu(nic_stats->rx_ok);
1514        tmp_stats[i++] = le64_to_cpu(nic_stats->tx_err);
1515        tmp_stats[i++] = le32_to_cpu(nic_stats->rx_err);
1516        tmp_stats[i++] = le16_to_cpu(nic_stats->rx_fifo);
1517        tmp_stats[i++] = le16_to_cpu(nic_stats->frame_align);
1518        tmp_stats[i++] = le32_to_cpu(nic_stats->tx_ok_1col);
1519        tmp_stats[i++] = le32_to_cpu(nic_stats->tx_ok_mcol);
1520        tmp_stats[i++] = le64_to_cpu(nic_stats->rx_ok_phys);
1521        tmp_stats[i++] = le64_to_cpu(nic_stats->rx_ok_bcast);
1522        tmp_stats[i++] = le32_to_cpu(nic_stats->rx_ok_mcast);
1523        tmp_stats[i++] = le16_to_cpu(nic_stats->tx_abort);
1524        tmp_stats[i++] = le16_to_cpu(nic_stats->tx_underrun);
1525        tmp_stats[i++] = cp->cp_stats.rx_frags;
1526        BUG_ON(i != CP_NUM_STATS);
1527
1528        dma_free_coherent(&cp->pdev->dev, sizeof(*nic_stats), nic_stats, dma);
1529}
1530
1531static const struct ethtool_ops cp_ethtool_ops = {
1532        .get_drvinfo            = cp_get_drvinfo,
1533        .get_regs_len           = cp_get_regs_len,
1534        .get_sset_count         = cp_get_sset_count,
1535        .get_settings           = cp_get_settings,
1536        .set_settings           = cp_set_settings,
1537        .nway_reset             = cp_nway_reset,
1538        .get_link               = ethtool_op_get_link,
1539        .get_msglevel           = cp_get_msglevel,
1540        .set_msglevel           = cp_set_msglevel,
1541        .get_regs               = cp_get_regs,
1542        .get_wol                = cp_get_wol,
1543        .set_wol                = cp_set_wol,
1544        .get_strings            = cp_get_strings,
1545        .get_ethtool_stats      = cp_get_ethtool_stats,
1546        .get_eeprom_len         = cp_get_eeprom_len,
1547        .get_eeprom             = cp_get_eeprom,
1548        .set_eeprom             = cp_set_eeprom,
1549        .get_ringparam          = cp_get_ringparam,
1550};
1551
1552static int cp_ioctl (struct net_device *dev, struct ifreq *rq, int cmd)
1553{
1554        struct cp_private *cp = netdev_priv(dev);
1555        int rc;
1556        unsigned long flags;
1557
1558        if (!netif_running(dev))
1559                return -EINVAL;
1560
1561        spin_lock_irqsave(&cp->lock, flags);
1562        rc = generic_mii_ioctl(&cp->mii_if, if_mii(rq), cmd, NULL);
1563        spin_unlock_irqrestore(&cp->lock, flags);
1564        return rc;
1565}
1566
1567static int cp_set_mac_address(struct net_device *dev, void *p)
1568{
1569        struct cp_private *cp = netdev_priv(dev);
1570        struct sockaddr *addr = p;
1571
1572        if (!is_valid_ether_addr(addr->sa_data))
1573                return -EADDRNOTAVAIL;
1574
1575        memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
1576
1577        spin_lock_irq(&cp->lock);
1578
1579        cpw8_f(Cfg9346, Cfg9346_Unlock);
1580        cpw32_f(MAC0 + 0, le32_to_cpu (*(__le32 *) (dev->dev_addr + 0)));
1581        cpw32_f(MAC0 + 4, le32_to_cpu (*(__le32 *) (dev->dev_addr + 4)));
1582        cpw8_f(Cfg9346, Cfg9346_Lock);
1583
1584        spin_unlock_irq(&cp->lock);
1585
1586        return 0;
1587}
1588
1589/* Serial EEPROM section. */
1590
1591/*  EEPROM_Ctrl bits. */
1592#define EE_SHIFT_CLK    0x04    /* EEPROM shift clock. */
1593#define EE_CS                   0x08    /* EEPROM chip select. */
1594#define EE_DATA_WRITE   0x02    /* EEPROM chip data in. */
1595#define EE_WRITE_0              0x00
1596#define EE_WRITE_1              0x02
1597#define EE_DATA_READ    0x01    /* EEPROM chip data out. */
1598#define EE_ENB                  (0x80 | EE_CS)
1599
1600/* Delay between EEPROM clock transitions.
1601   No extra delay is needed with 33Mhz PCI, but 66Mhz may change this.
1602 */
1603
1604#define eeprom_delay()  readb(ee_addr)
1605
1606/* The EEPROM commands include the alway-set leading bit. */
1607#define EE_EXTEND_CMD   (4)
1608#define EE_WRITE_CMD    (5)
1609#define EE_READ_CMD             (6)
1610#define EE_ERASE_CMD    (7)
1611
1612#define EE_EWDS_ADDR    (0)
1613#define EE_WRAL_ADDR    (1)
1614#define EE_ERAL_ADDR    (2)
1615#define EE_EWEN_ADDR    (3)
1616
1617#define CP_EEPROM_MAGIC PCI_DEVICE_ID_REALTEK_8139
1618
1619static void eeprom_cmd_start(void __iomem *ee_addr)
1620{
1621        writeb (EE_ENB & ~EE_CS, ee_addr);
1622        writeb (EE_ENB, ee_addr);
1623        eeprom_delay ();
1624}
1625
1626static void eeprom_cmd(void __iomem *ee_addr, int cmd, int cmd_len)
1627{
1628        int i;
1629
1630        /* Shift the command bits out. */
1631        for (i = cmd_len - 1; i >= 0; i--) {
1632                int dataval = (cmd & (1 << i)) ? EE_DATA_WRITE : 0;
1633                writeb (EE_ENB | dataval, ee_addr);
1634                eeprom_delay ();
1635                writeb (EE_ENB | dataval | EE_SHIFT_CLK, ee_addr);
1636                eeprom_delay ();
1637        }
1638        writeb (EE_ENB, ee_addr);
1639        eeprom_delay ();
1640}
1641
1642static void eeprom_cmd_end(void __iomem *ee_addr)
1643{
1644        writeb(0, ee_addr);
1645        eeprom_delay ();
1646}
1647
1648static void eeprom_extend_cmd(void __iomem *ee_addr, int extend_cmd,
1649                              int addr_len)
1650{
1651        int cmd = (EE_EXTEND_CMD << addr_len) | (extend_cmd << (addr_len - 2));
1652
1653        eeprom_cmd_start(ee_addr);
1654        eeprom_cmd(ee_addr, cmd, 3 + addr_len);
1655        eeprom_cmd_end(ee_addr);
1656}
1657
1658static u16 read_eeprom (void __iomem *ioaddr, int location, int addr_len)
1659{
1660        int i;
1661        u16 retval = 0;
1662        void __iomem *ee_addr = ioaddr + Cfg9346;
1663        int read_cmd = location | (EE_READ_CMD << addr_len);
1664
1665        eeprom_cmd_start(ee_addr);
1666        eeprom_cmd(ee_addr, read_cmd, 3 + addr_len);
1667
1668        for (i = 16; i > 0; i--) {
1669                writeb (EE_ENB | EE_SHIFT_CLK, ee_addr);
1670                eeprom_delay ();
1671                retval =
1672                    (retval << 1) | ((readb (ee_addr) & EE_DATA_READ) ? 1 :
1673                                     0);
1674                writeb (EE_ENB, ee_addr);
1675                eeprom_delay ();
1676        }
1677
1678        eeprom_cmd_end(ee_addr);
1679
1680        return retval;
1681}
1682
1683static void write_eeprom(void __iomem *ioaddr, int location, u16 val,
1684                         int addr_len)
1685{
1686        int i;
1687        void __iomem *ee_addr = ioaddr + Cfg9346;
1688        int write_cmd = location | (EE_WRITE_CMD << addr_len);
1689
1690        eeprom_extend_cmd(ee_addr, EE_EWEN_ADDR, addr_len);
1691
1692        eeprom_cmd_start(ee_addr);
1693        eeprom_cmd(ee_addr, write_cmd, 3 + addr_len);
1694        eeprom_cmd(ee_addr, val, 16);
1695        eeprom_cmd_end(ee_addr);
1696
1697        eeprom_cmd_start(ee_addr);
1698        for (i = 0; i < 20000; i++)
1699                if (readb(ee_addr) & EE_DATA_READ)
1700                        break;
1701        eeprom_cmd_end(ee_addr);
1702
1703        eeprom_extend_cmd(ee_addr, EE_EWDS_ADDR, addr_len);
1704}
1705
1706static int cp_get_eeprom_len(struct net_device *dev)
1707{
1708        struct cp_private *cp = netdev_priv(dev);
1709        int size;
1710
1711        spin_lock_irq(&cp->lock);
1712        size = read_eeprom(cp->regs, 0, 8) == 0x8129 ? 256 : 128;
1713        spin_unlock_irq(&cp->lock);
1714
1715        return size;
1716}
1717
1718static int cp_get_eeprom(struct net_device *dev,
1719                         struct ethtool_eeprom *eeprom, u8 *data)
1720{
1721        struct cp_private *cp = netdev_priv(dev);
1722        unsigned int addr_len;
1723        u16 val;
1724        u32 offset = eeprom->offset >> 1;
1725        u32 len = eeprom->len;
1726        u32 i = 0;
1727
1728        eeprom->magic = CP_EEPROM_MAGIC;
1729
1730        spin_lock_irq(&cp->lock);
1731
1732        addr_len = read_eeprom(cp->regs, 0, 8) == 0x8129 ? 8 : 6;
1733
1734        if (eeprom->offset & 1) {
1735                val = read_eeprom(cp->regs, offset, addr_len);
1736                data[i++] = (u8)(val >> 8);
1737                offset++;
1738        }
1739
1740        while (i < len - 1) {
1741                val = read_eeprom(cp->regs, offset, addr_len);
1742                data[i++] = (u8)val;
1743                data[i++] = (u8)(val >> 8);
1744                offset++;
1745        }
1746
1747        if (i < len) {
1748                val = read_eeprom(cp->regs, offset, addr_len);
1749                data[i] = (u8)val;
1750        }
1751
1752        spin_unlock_irq(&cp->lock);
1753        return 0;
1754}
1755
1756static int cp_set_eeprom(struct net_device *dev,
1757                         struct ethtool_eeprom *eeprom, u8 *data)
1758{
1759        struct cp_private *cp = netdev_priv(dev);
1760        unsigned int addr_len;
1761        u16 val;
1762        u32 offset = eeprom->offset >> 1;
1763        u32 len = eeprom->len;
1764        u32 i = 0;
1765
1766        if (eeprom->magic != CP_EEPROM_MAGIC)
1767                return -EINVAL;
1768
1769        spin_lock_irq(&cp->lock);
1770
1771        addr_len = read_eeprom(cp->regs, 0, 8) == 0x8129 ? 8 : 6;
1772
1773        if (eeprom->offset & 1) {
1774                val = read_eeprom(cp->regs, offset, addr_len) & 0xff;
1775                val |= (u16)data[i++] << 8;
1776                write_eeprom(cp->regs, offset, val, addr_len);
1777                offset++;
1778        }
1779
1780        while (i < len - 1) {
1781                val = (u16)data[i++];
1782                val |= (u16)data[i++] << 8;
1783                write_eeprom(cp->regs, offset, val, addr_len);
1784                offset++;
1785        }
1786
1787        if (i < len) {
1788                val = read_eeprom(cp->regs, offset, addr_len) & 0xff00;
1789                val |= (u16)data[i];
1790                write_eeprom(cp->regs, offset, val, addr_len);
1791        }
1792
1793        spin_unlock_irq(&cp->lock);
1794        return 0;
1795}
1796
1797/* Put the board into D3cold state and wait for WakeUp signal */
1798static void cp_set_d3_state (struct cp_private *cp)
1799{
1800        pci_enable_wake (cp->pdev, 0, 1); /* Enable PME# generation */
1801        pci_set_power_state (cp->pdev, PCI_D3hot);
1802}
1803
1804static const struct net_device_ops cp_netdev_ops = {
1805        .ndo_open               = cp_open,
1806        .ndo_stop               = cp_close,
1807        .ndo_validate_addr      = eth_validate_addr,
1808        .ndo_set_mac_address    = cp_set_mac_address,
1809        .ndo_set_rx_mode        = cp_set_rx_mode,
1810        .ndo_get_stats          = cp_get_stats,
1811        .ndo_do_ioctl           = cp_ioctl,
1812        .ndo_start_xmit         = cp_start_xmit,
1813        .ndo_tx_timeout         = cp_tx_timeout,
1814        .ndo_set_features       = cp_set_features,
1815#ifdef BROKEN
1816        .ndo_change_mtu         = cp_change_mtu,
1817#endif
1818
1819#ifdef CONFIG_NET_POLL_CONTROLLER
1820        .ndo_poll_controller    = cp_poll_controller,
1821#endif
1822};
1823
1824static int cp_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
1825{
1826        struct net_device *dev;
1827        struct cp_private *cp;
1828        int rc;
1829        void __iomem *regs;
1830        resource_size_t pciaddr;
1831        unsigned int addr_len, i, pci_using_dac;
1832
1833#ifndef MODULE
1834        static int version_printed;
1835        if (version_printed++ == 0)
1836                pr_info("%s", version);
1837#endif
1838
1839        if (pdev->vendor == PCI_VENDOR_ID_REALTEK &&
1840            pdev->device == PCI_DEVICE_ID_REALTEK_8139 && pdev->revision < 0x20) {
1841                dev_info(&pdev->dev,
1842                         "This (id %04x:%04x rev %02x) is not an 8139C+ compatible chip, use 8139too\n",
1843                         pdev->vendor, pdev->device, pdev->revision);
1844                return -ENODEV;
1845        }
1846
1847        dev = alloc_etherdev(sizeof(struct cp_private));
1848        if (!dev)
1849                return -ENOMEM;
1850        SET_NETDEV_DEV(dev, &pdev->dev);
1851
1852        cp = netdev_priv(dev);
1853        cp->pdev = pdev;
1854        cp->dev = dev;
1855        cp->msg_enable = (debug < 0 ? CP_DEF_MSG_ENABLE : debug);
1856        spin_lock_init (&cp->lock);
1857        cp->mii_if.dev = dev;
1858        cp->mii_if.mdio_read = mdio_read;
1859        cp->mii_if.mdio_write = mdio_write;
1860        cp->mii_if.phy_id = CP_INTERNAL_PHY;
1861        cp->mii_if.phy_id_mask = 0x1f;
1862        cp->mii_if.reg_num_mask = 0x1f;
1863        cp_set_rxbufsize(cp);
1864
1865        rc = pci_enable_device(pdev);
1866        if (rc)
1867                goto err_out_free;
1868
1869        rc = pci_set_mwi(pdev);
1870        if (rc)
1871                goto err_out_disable;
1872
1873        rc = pci_request_regions(pdev, DRV_NAME);
1874        if (rc)
1875                goto err_out_mwi;
1876
1877        pciaddr = pci_resource_start(pdev, 1);
1878        if (!pciaddr) {
1879                rc = -EIO;
1880                dev_err(&pdev->dev, "no MMIO resource\n");
1881                goto err_out_res;
1882        }
1883        if (pci_resource_len(pdev, 1) < CP_REGS_SIZE) {
1884                rc = -EIO;
1885                dev_err(&pdev->dev, "MMIO resource (%llx) too small\n",
1886                       (unsigned long long)pci_resource_len(pdev, 1));
1887                goto err_out_res;
1888        }
1889
1890        /* Configure DMA attributes. */
1891        if ((sizeof(dma_addr_t) > 4) &&
1892            !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) &&
1893            !pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
1894                pci_using_dac = 1;
1895        } else {
1896                pci_using_dac = 0;
1897
1898                rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
1899                if (rc) {
1900                        dev_err(&pdev->dev,
1901                                "No usable DMA configuration, aborting\n");
1902                        goto err_out_res;
1903                }
1904                rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
1905                if (rc) {
1906                        dev_err(&pdev->dev,
1907                                "No usable consistent DMA configuration, aborting\n");
1908                        goto err_out_res;
1909                }
1910        }
1911
1912        cp->cpcmd = (pci_using_dac ? PCIDAC : 0) |
1913                    PCIMulRW | RxChkSum | CpRxOn | CpTxOn;
1914
1915        dev->features |= NETIF_F_RXCSUM;
1916        dev->hw_features |= NETIF_F_RXCSUM;
1917
1918        regs = ioremap(pciaddr, CP_REGS_SIZE);
1919        if (!regs) {
1920                rc = -EIO;
1921                dev_err(&pdev->dev, "Cannot map PCI MMIO (%Lx@%Lx)\n",
1922                        (unsigned long long)pci_resource_len(pdev, 1),
1923                       (unsigned long long)pciaddr);
1924                goto err_out_res;
1925        }
1926        cp->regs = regs;
1927
1928        cp_stop_hw(cp);
1929
1930        /* read MAC address from EEPROM */
1931        addr_len = read_eeprom (regs, 0, 8) == 0x8129 ? 8 : 6;
1932        for (i = 0; i < 3; i++)
1933                ((__le16 *) (dev->dev_addr))[i] =
1934                    cpu_to_le16(read_eeprom (regs, i + 7, addr_len));
1935        memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
1936
1937        dev->netdev_ops = &cp_netdev_ops;
1938        netif_napi_add(dev, &cp->napi, cp_rx_poll, 16);
1939        dev->ethtool_ops = &cp_ethtool_ops;
1940        dev->watchdog_timeo = TX_TIMEOUT;
1941
1942        dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
1943
1944        if (pci_using_dac)
1945                dev->features |= NETIF_F_HIGHDMA;
1946
1947        /* disabled by default until verified */
1948        dev->hw_features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO |
1949                NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
1950        dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO |
1951                NETIF_F_HIGHDMA;
1952
1953        rc = register_netdev(dev);
1954        if (rc)
1955                goto err_out_iomap;
1956
1957        netdev_info(dev, "RTL-8139C+ at 0x%p, %pM, IRQ %d\n",
1958                    regs, dev->dev_addr, pdev->irq);
1959
1960        pci_set_drvdata(pdev, dev);
1961
1962        /* enable busmastering and memory-write-invalidate */
1963        pci_set_master(pdev);
1964
1965        if (cp->wol_enabled)
1966                cp_set_d3_state (cp);
1967
1968        return 0;
1969
1970err_out_iomap:
1971        iounmap(regs);
1972err_out_res:
1973        pci_release_regions(pdev);
1974err_out_mwi:
1975        pci_clear_mwi(pdev);
1976err_out_disable:
1977        pci_disable_device(pdev);
1978err_out_free:
1979        free_netdev(dev);
1980        return rc;
1981}
1982
1983static void cp_remove_one (struct pci_dev *pdev)
1984{
1985        struct net_device *dev = pci_get_drvdata(pdev);
1986        struct cp_private *cp = netdev_priv(dev);
1987
1988        unregister_netdev(dev);
1989        iounmap(cp->regs);
1990        if (cp->wol_enabled)
1991                pci_set_power_state (pdev, PCI_D0);
1992        pci_release_regions(pdev);
1993        pci_clear_mwi(pdev);
1994        pci_disable_device(pdev);
1995        pci_set_drvdata(pdev, NULL);
1996        free_netdev(dev);
1997}
1998
1999#ifdef CONFIG_PM
2000static int cp_suspend (struct pci_dev *pdev, pm_message_t state)
2001{
2002        struct net_device *dev = pci_get_drvdata(pdev);
2003        struct cp_private *cp = netdev_priv(dev);
2004        unsigned long flags;
2005
2006        if (!netif_running(dev))
2007                return 0;
2008
2009        netif_device_detach (dev);
2010        netif_stop_queue (dev);
2011
2012        spin_lock_irqsave (&cp->lock, flags);
2013
2014        /* Disable Rx and Tx */
2015        cpw16 (IntrMask, 0);
2016        cpw8  (Cmd, cpr8 (Cmd) & (~RxOn | ~TxOn));
2017
2018        spin_unlock_irqrestore (&cp->lock, flags);
2019
2020        pci_save_state(pdev);
2021        pci_enable_wake(pdev, pci_choose_state(pdev, state), cp->wol_enabled);
2022        pci_set_power_state(pdev, pci_choose_state(pdev, state));
2023
2024        return 0;
2025}
2026
2027static int cp_resume (struct pci_dev *pdev)
2028{
2029        struct net_device *dev = pci_get_drvdata (pdev);
2030        struct cp_private *cp = netdev_priv(dev);
2031        unsigned long flags;
2032
2033        if (!netif_running(dev))
2034                return 0;
2035
2036        netif_device_attach (dev);
2037
2038        pci_set_power_state(pdev, PCI_D0);
2039        pci_restore_state(pdev);
2040        pci_enable_wake(pdev, PCI_D0, 0);
2041
2042        /* FIXME: sh*t may happen if the Rx ring buffer is depleted */
2043        cp_init_rings_index (cp);
2044        cp_init_hw (cp);
2045        cp_enable_irq(cp);
2046        netif_start_queue (dev);
2047
2048        spin_lock_irqsave (&cp->lock, flags);
2049
2050        mii_check_media(&cp->mii_if, netif_msg_link(cp), false);
2051
2052        spin_unlock_irqrestore (&cp->lock, flags);
2053
2054        return 0;
2055}
2056#endif /* CONFIG_PM */
2057
2058static struct pci_driver cp_driver = {
2059        .name         = DRV_NAME,
2060        .id_table     = cp_pci_tbl,
2061        .probe        = cp_init_one,
2062        .remove       = cp_remove_one,
2063#ifdef CONFIG_PM
2064        .resume       = cp_resume,
2065        .suspend      = cp_suspend,
2066#endif
2067};
2068
2069static int __init cp_init (void)
2070{
2071#ifdef MODULE
2072        pr_info("%s", version);
2073#endif
2074        return pci_register_driver(&cp_driver);
2075}
2076
2077static void __exit cp_exit (void)
2078{
2079        pci_unregister_driver (&cp_driver);
2080}
2081
2082module_init(cp_init);
2083module_exit(cp_exit);
2084