linux/drivers/net/8139cp.c
<<
>>
Prefs
   1/* 8139cp.c: A Linux PCI Ethernet driver for the RealTek 8139C+ chips. */
   2/*
   3        Copyright 2001-2004 Jeff Garzik <jgarzik@pobox.com>
   4
   5        Copyright (C) 2001, 2002 David S. Miller (davem@redhat.com) [tg3.c]
   6        Copyright (C) 2000, 2001 David S. Miller (davem@redhat.com) [sungem.c]
   7        Copyright 2001 Manfred Spraul                               [natsemi.c]
   8        Copyright 1999-2001 by Donald Becker.                       [natsemi.c]
   9        Written 1997-2001 by Donald Becker.                         [8139too.c]
  10        Copyright 1998-2001 by Jes Sorensen, <jes@trained-monkey.org>. [acenic.c]
  11
  12        This software may be used and distributed according to the terms of
  13        the GNU General Public License (GPL), incorporated herein by reference.
  14        Drivers based on or derived from this code fall under the GPL and must
  15        retain the authorship, copyright and license notice.  This file is not
  16        a complete program and may only be used when the entire operating
  17        system is licensed under the GPL.
  18
  19        See the file COPYING in this distribution for more information.
  20
  21        Contributors:
  22
  23                Wake-on-LAN support - Felipe Damasio <felipewd@terra.com.br>
  24                PCI suspend/resume  - Felipe Damasio <felipewd@terra.com.br>
  25                LinkChg interrupt   - Felipe Damasio <felipewd@terra.com.br>
  26
  27        TODO:
  28        * Test Tx checksumming thoroughly
  29
  30        Low priority TODO:
  31        * Complete reset on PciErr
  32        * Consider Rx interrupt mitigation using TimerIntr
  33        * Investigate using skb->priority with h/w VLAN priority
  34        * Investigate using High Priority Tx Queue with skb->priority
  35        * Adjust Rx FIFO threshold and Max Rx DMA burst on Rx FIFO error
  36        * Adjust Tx FIFO threshold and Max Tx DMA burst on Tx FIFO error
  37        * Implement Tx software interrupt mitigation via
  38          Tx descriptor bit
  39        * The real minimum of CP_MIN_MTU is 4 bytes.  However,
  40          for this to be supported, one must(?) turn on packet padding.
  41        * Support external MII transceivers (patch available)
  42
  43        NOTES:
  44        * TX checksumming is considered experimental.  It is off by
  45          default, use ethtool to turn it on.
  46
  47 */
  48
  49#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  50
  51#define DRV_NAME                "8139cp"
  52#define DRV_VERSION             "1.3"
  53#define DRV_RELDATE             "Mar 22, 2004"
  54
  55
  56#include <linux/module.h>
  57#include <linux/moduleparam.h>
  58#include <linux/kernel.h>
  59#include <linux/compiler.h>
  60#include <linux/netdevice.h>
  61#include <linux/etherdevice.h>
  62#include <linux/init.h>
  63#include <linux/pci.h>
  64#include <linux/dma-mapping.h>
  65#include <linux/delay.h>
  66#include <linux/ethtool.h>
  67#include <linux/gfp.h>
  68#include <linux/mii.h>
  69#include <linux/if_vlan.h>
  70#include <linux/crc32.h>
  71#include <linux/in.h>
  72#include <linux/ip.h>
  73#include <linux/tcp.h>
  74#include <linux/udp.h>
  75#include <linux/cache.h>
  76#include <asm/io.h>
  77#include <asm/irq.h>
  78#include <asm/uaccess.h>
  79
  80/* VLAN tagging feature enable/disable */
  81#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
  82#define CP_VLAN_TAG_USED 1
  83#define CP_VLAN_TX_TAG(tx_desc,vlan_tag_value) \
  84        do { (tx_desc)->opts2 = cpu_to_le32(vlan_tag_value); } while (0)
  85#else
  86#define CP_VLAN_TAG_USED 0
  87#define CP_VLAN_TX_TAG(tx_desc,vlan_tag_value) \
  88        do { (tx_desc)->opts2 = 0; } while (0)
  89#endif
  90
  91/* These identify the driver base version and may not be removed. */
  92static char version[] =
  93DRV_NAME ": 10/100 PCI Ethernet driver v" DRV_VERSION " (" DRV_RELDATE ")\n";
  94
  95MODULE_AUTHOR("Jeff Garzik <jgarzik@pobox.com>");
  96MODULE_DESCRIPTION("RealTek RTL-8139C+ series 10/100 PCI Ethernet driver");
  97MODULE_VERSION(DRV_VERSION);
  98MODULE_LICENSE("GPL");
  99
 100static int debug = -1;
 101module_param(debug, int, 0);
 102MODULE_PARM_DESC (debug, "8139cp: bitmapped message enable number");
 103
 104/* Maximum number of multicast addresses to filter (vs. Rx-all-multicast).
 105   The RTL chips use a 64 element hash table based on the Ethernet CRC.  */
 106static int multicast_filter_limit = 32;
 107module_param(multicast_filter_limit, int, 0);
 108MODULE_PARM_DESC (multicast_filter_limit, "8139cp: maximum number of filtered multicast addresses");
 109
 110#define CP_DEF_MSG_ENABLE       (NETIF_MSG_DRV          | \
 111                                 NETIF_MSG_PROBE        | \
 112                                 NETIF_MSG_LINK)
 113#define CP_NUM_STATS            14      /* struct cp_dma_stats, plus one */
 114#define CP_STATS_SIZE           64      /* size in bytes of DMA stats block */
 115#define CP_REGS_SIZE            (0xff + 1)
 116#define CP_REGS_VER             1               /* version 1 */
 117#define CP_RX_RING_SIZE         64
 118#define CP_TX_RING_SIZE         64
 119#define CP_RING_BYTES           \
 120                ((sizeof(struct cp_desc) * CP_RX_RING_SIZE) +   \
 121                 (sizeof(struct cp_desc) * CP_TX_RING_SIZE) +   \
 122                 CP_STATS_SIZE)
 123#define NEXT_TX(N)              (((N) + 1) & (CP_TX_RING_SIZE - 1))
 124#define NEXT_RX(N)              (((N) + 1) & (CP_RX_RING_SIZE - 1))
 125#define TX_BUFFS_AVAIL(CP)                                      \
 126        (((CP)->tx_tail <= (CP)->tx_head) ?                     \
 127          (CP)->tx_tail + (CP_TX_RING_SIZE - 1) - (CP)->tx_head :       \
 128          (CP)->tx_tail - (CP)->tx_head - 1)
 129
 130#define PKT_BUF_SZ              1536    /* Size of each temporary Rx buffer.*/
 131#define CP_INTERNAL_PHY         32
 132
 133/* The following settings are log_2(bytes)-4:  0 == 16 bytes .. 6==1024, 7==end of packet. */
 134#define RX_FIFO_THRESH          5       /* Rx buffer level before first PCI xfer.  */
 135#define RX_DMA_BURST            4       /* Maximum PCI burst, '4' is 256 */
 136#define TX_DMA_BURST            6       /* Maximum PCI burst, '6' is 1024 */
 137#define TX_EARLY_THRESH         256     /* Early Tx threshold, in bytes */
 138
 139/* Time in jiffies before concluding the transmitter is hung. */
 140#define TX_TIMEOUT              (6*HZ)
 141
 142/* hardware minimum and maximum for a single frame's data payload */
 143#define CP_MIN_MTU              60      /* TODO: allow lower, but pad */
 144#define CP_MAX_MTU              4096
 145
 146enum {
 147        /* NIC register offsets */
 148        MAC0            = 0x00, /* Ethernet hardware address. */
 149        MAR0            = 0x08, /* Multicast filter. */
 150        StatsAddr       = 0x10, /* 64-bit start addr of 64-byte DMA stats blk */
 151        TxRingAddr      = 0x20, /* 64-bit start addr of Tx ring */
 152        HiTxRingAddr    = 0x28, /* 64-bit start addr of high priority Tx ring */
 153        Cmd             = 0x37, /* Command register */
 154        IntrMask        = 0x3C, /* Interrupt mask */
 155        IntrStatus      = 0x3E, /* Interrupt status */
 156        TxConfig        = 0x40, /* Tx configuration */
 157        ChipVersion     = 0x43, /* 8-bit chip version, inside TxConfig */
 158        RxConfig        = 0x44, /* Rx configuration */
 159        RxMissed        = 0x4C, /* 24 bits valid, write clears */
 160        Cfg9346         = 0x50, /* EEPROM select/control; Cfg reg [un]lock */
 161        Config1         = 0x52, /* Config1 */
 162        Config3         = 0x59, /* Config3 */
 163        Config4         = 0x5A, /* Config4 */
 164        MultiIntr       = 0x5C, /* Multiple interrupt select */
 165        BasicModeCtrl   = 0x62, /* MII BMCR */
 166        BasicModeStatus = 0x64, /* MII BMSR */
 167        NWayAdvert      = 0x66, /* MII ADVERTISE */
 168        NWayLPAR        = 0x68, /* MII LPA */
 169        NWayExpansion   = 0x6A, /* MII Expansion */
 170        Config5         = 0xD8, /* Config5 */
 171        TxPoll          = 0xD9, /* Tell chip to check Tx descriptors for work */
 172        RxMaxSize       = 0xDA, /* Max size of an Rx packet (8169 only) */
 173        CpCmd           = 0xE0, /* C+ Command register (C+ mode only) */
 174        IntrMitigate    = 0xE2, /* rx/tx interrupt mitigation control */
 175        RxRingAddr      = 0xE4, /* 64-bit start addr of Rx ring */
 176        TxThresh        = 0xEC, /* Early Tx threshold */
 177        OldRxBufAddr    = 0x30, /* DMA address of Rx ring buffer (C mode) */
 178        OldTSD0         = 0x10, /* DMA address of first Tx desc (C mode) */
 179
 180        /* Tx and Rx status descriptors */
 181        DescOwn         = (1 << 31), /* Descriptor is owned by NIC */
 182        RingEnd         = (1 << 30), /* End of descriptor ring */
 183        FirstFrag       = (1 << 29), /* First segment of a packet */
 184        LastFrag        = (1 << 28), /* Final segment of a packet */
 185        LargeSend       = (1 << 27), /* TCP Large Send Offload (TSO) */
 186        MSSShift        = 16,        /* MSS value position */
 187        MSSMask         = 0xfff,     /* MSS value: 11 bits */
 188        TxError         = (1 << 23), /* Tx error summary */
 189        RxError         = (1 << 20), /* Rx error summary */
 190        IPCS            = (1 << 18), /* Calculate IP checksum */
 191        UDPCS           = (1 << 17), /* Calculate UDP/IP checksum */
 192        TCPCS           = (1 << 16), /* Calculate TCP/IP checksum */
 193        TxVlanTag       = (1 << 17), /* Add VLAN tag */
 194        RxVlanTagged    = (1 << 16), /* Rx VLAN tag available */
 195        IPFail          = (1 << 15), /* IP checksum failed */
 196        UDPFail         = (1 << 14), /* UDP/IP checksum failed */
 197        TCPFail         = (1 << 13), /* TCP/IP checksum failed */
 198        NormalTxPoll    = (1 << 6),  /* One or more normal Tx packets to send */
 199        PID1            = (1 << 17), /* 2 protocol id bits:  0==non-IP, */
 200        PID0            = (1 << 16), /* 1==UDP/IP, 2==TCP/IP, 3==IP */
 201        RxProtoTCP      = 1,
 202        RxProtoUDP      = 2,
 203        RxProtoIP       = 3,
 204        TxFIFOUnder     = (1 << 25), /* Tx FIFO underrun */
 205        TxOWC           = (1 << 22), /* Tx Out-of-window collision */
 206        TxLinkFail      = (1 << 21), /* Link failed during Tx of packet */
 207        TxMaxCol        = (1 << 20), /* Tx aborted due to excessive collisions */
 208        TxColCntShift   = 16,        /* Shift, to get 4-bit Tx collision cnt */
 209        TxColCntMask    = 0x01 | 0x02 | 0x04 | 0x08, /* 4-bit collision count */
 210        RxErrFrame      = (1 << 27), /* Rx frame alignment error */
 211        RxMcast         = (1 << 26), /* Rx multicast packet rcv'd */
 212        RxErrCRC        = (1 << 18), /* Rx CRC error */
 213        RxErrRunt       = (1 << 19), /* Rx error, packet < 64 bytes */
 214        RxErrLong       = (1 << 21), /* Rx error, packet > 4096 bytes */
 215        RxErrFIFO       = (1 << 22), /* Rx error, FIFO overflowed, pkt bad */
 216
 217        /* StatsAddr register */
 218        DumpStats       = (1 << 3),  /* Begin stats dump */
 219
 220        /* RxConfig register */
 221        RxCfgFIFOShift  = 13,        /* Shift, to get Rx FIFO thresh value */
 222        RxCfgDMAShift   = 8,         /* Shift, to get Rx Max DMA value */
 223        AcceptErr       = 0x20,      /* Accept packets with CRC errors */
 224        AcceptRunt      = 0x10,      /* Accept runt (<64 bytes) packets */
 225        AcceptBroadcast = 0x08,      /* Accept broadcast packets */
 226        AcceptMulticast = 0x04,      /* Accept multicast packets */
 227        AcceptMyPhys    = 0x02,      /* Accept pkts with our MAC as dest */
 228        AcceptAllPhys   = 0x01,      /* Accept all pkts w/ physical dest */
 229
 230        /* IntrMask / IntrStatus registers */
 231        PciErr          = (1 << 15), /* System error on the PCI bus */
 232        TimerIntr       = (1 << 14), /* Asserted when TCTR reaches TimerInt value */
 233        LenChg          = (1 << 13), /* Cable length change */
 234        SWInt           = (1 << 8),  /* Software-requested interrupt */
 235        TxEmpty         = (1 << 7),  /* No Tx descriptors available */
 236        RxFIFOOvr       = (1 << 6),  /* Rx FIFO Overflow */
 237        LinkChg         = (1 << 5),  /* Packet underrun, or link change */
 238        RxEmpty         = (1 << 4),  /* No Rx descriptors available */
 239        TxErr           = (1 << 3),  /* Tx error */
 240        TxOK            = (1 << 2),  /* Tx packet sent */
 241        RxErr           = (1 << 1),  /* Rx error */
 242        RxOK            = (1 << 0),  /* Rx packet received */
 243        IntrResvd       = (1 << 10), /* reserved, according to RealTek engineers,
 244                                        but hardware likes to raise it */
 245
 246        IntrAll         = PciErr | TimerIntr | LenChg | SWInt | TxEmpty |
 247                          RxFIFOOvr | LinkChg | RxEmpty | TxErr | TxOK |
 248                          RxErr | RxOK | IntrResvd,
 249
 250        /* C mode command register */
 251        CmdReset        = (1 << 4),  /* Enable to reset; self-clearing */
 252        RxOn            = (1 << 3),  /* Rx mode enable */
 253        TxOn            = (1 << 2),  /* Tx mode enable */
 254
 255        /* C+ mode command register */
 256        RxVlanOn        = (1 << 6),  /* Rx VLAN de-tagging enable */
 257        RxChkSum        = (1 << 5),  /* Rx checksum offload enable */
 258        PCIDAC          = (1 << 4),  /* PCI Dual Address Cycle (64-bit PCI) */
 259        PCIMulRW        = (1 << 3),  /* Enable PCI read/write multiple */
 260        CpRxOn          = (1 << 1),  /* Rx mode enable */
 261        CpTxOn          = (1 << 0),  /* Tx mode enable */
 262
 263        /* Cfg9436 EEPROM control register */
 264        Cfg9346_Lock    = 0x00,      /* Lock ConfigX/MII register access */
 265        Cfg9346_Unlock  = 0xC0,      /* Unlock ConfigX/MII register access */
 266
 267        /* TxConfig register */
 268        IFG             = (1 << 25) | (1 << 24), /* standard IEEE interframe gap */
 269        TxDMAShift      = 8,         /* DMA burst value (0-7) is shift this many bits */
 270
 271        /* Early Tx Threshold register */
 272        TxThreshMask    = 0x3f,      /* Mask bits 5-0 */
 273        TxThreshMax     = 2048,      /* Max early Tx threshold */
 274
 275        /* Config1 register */
 276        DriverLoaded    = (1 << 5),  /* Software marker, driver is loaded */
 277        LWACT           = (1 << 4),  /* LWAKE active mode */
 278        PMEnable        = (1 << 0),  /* Enable various PM features of chip */
 279
 280        /* Config3 register */
 281        PARMEnable      = (1 << 6),  /* Enable auto-loading of PHY parms */
 282        MagicPacket     = (1 << 5),  /* Wake up when receives a Magic Packet */
 283        LinkUp          = (1 << 4),  /* Wake up when the cable connection is re-established */
 284
 285        /* Config4 register */
 286        LWPTN           = (1 << 1),  /* LWAKE Pattern */
 287        LWPME           = (1 << 4),  /* LANWAKE vs PMEB */
 288
 289        /* Config5 register */
 290        BWF             = (1 << 6),  /* Accept Broadcast wakeup frame */
 291        MWF             = (1 << 5),  /* Accept Multicast wakeup frame */
 292        UWF             = (1 << 4),  /* Accept Unicast wakeup frame */
 293        LANWake         = (1 << 1),  /* Enable LANWake signal */
 294        PMEStatus       = (1 << 0),  /* PME status can be reset by PCI RST# */
 295
 296        cp_norx_intr_mask = PciErr | LinkChg | TxOK | TxErr | TxEmpty,
 297        cp_rx_intr_mask = RxOK | RxErr | RxEmpty | RxFIFOOvr,
 298        cp_intr_mask = cp_rx_intr_mask | cp_norx_intr_mask,
 299};
 300
 301static const unsigned int cp_rx_config =
 302          (RX_FIFO_THRESH << RxCfgFIFOShift) |
 303          (RX_DMA_BURST << RxCfgDMAShift);
 304
 305struct cp_desc {
 306        __le32          opts1;
 307        __le32          opts2;
 308        __le64          addr;
 309};
 310
 311struct cp_dma_stats {
 312        __le64                  tx_ok;
 313        __le64                  rx_ok;
 314        __le64                  tx_err;
 315        __le32                  rx_err;
 316        __le16                  rx_fifo;
 317        __le16                  frame_align;
 318        __le32                  tx_ok_1col;
 319        __le32                  tx_ok_mcol;
 320        __le64                  rx_ok_phys;
 321        __le64                  rx_ok_bcast;
 322        __le32                  rx_ok_mcast;
 323        __le16                  tx_abort;
 324        __le16                  tx_underrun;
 325} __packed;
 326
 327struct cp_extra_stats {
 328        unsigned long           rx_frags;
 329};
 330
 331struct cp_private {
 332        void                    __iomem *regs;
 333        struct net_device       *dev;
 334        spinlock_t              lock;
 335        u32                     msg_enable;
 336
 337        struct napi_struct      napi;
 338
 339        struct pci_dev          *pdev;
 340        u32                     rx_config;
 341        u16                     cpcmd;
 342
 343        struct cp_extra_stats   cp_stats;
 344
 345        unsigned                rx_head         ____cacheline_aligned;
 346        unsigned                rx_tail;
 347        struct cp_desc          *rx_ring;
 348        struct sk_buff          *rx_skb[CP_RX_RING_SIZE];
 349
 350        unsigned                tx_head         ____cacheline_aligned;
 351        unsigned                tx_tail;
 352        struct cp_desc          *tx_ring;
 353        struct sk_buff          *tx_skb[CP_TX_RING_SIZE];
 354
 355        unsigned                rx_buf_sz;
 356        unsigned                wol_enabled : 1; /* Is Wake-on-LAN enabled? */
 357
 358#if CP_VLAN_TAG_USED
 359        struct vlan_group       *vlgrp;
 360#endif
 361        dma_addr_t              ring_dma;
 362
 363        struct mii_if_info      mii_if;
 364};
 365
 366#define cpr8(reg)       readb(cp->regs + (reg))
 367#define cpr16(reg)      readw(cp->regs + (reg))
 368#define cpr32(reg)      readl(cp->regs + (reg))
 369#define cpw8(reg,val)   writeb((val), cp->regs + (reg))
 370#define cpw16(reg,val)  writew((val), cp->regs + (reg))
 371#define cpw32(reg,val)  writel((val), cp->regs + (reg))
 372#define cpw8_f(reg,val) do {                    \
 373        writeb((val), cp->regs + (reg));        \
 374        readb(cp->regs + (reg));                \
 375        } while (0)
 376#define cpw16_f(reg,val) do {                   \
 377        writew((val), cp->regs + (reg));        \
 378        readw(cp->regs + (reg));                \
 379        } while (0)
 380#define cpw32_f(reg,val) do {                   \
 381        writel((val), cp->regs + (reg));        \
 382        readl(cp->regs + (reg));                \
 383        } while (0)
 384
 385
 386static void __cp_set_rx_mode (struct net_device *dev);
 387static void cp_tx (struct cp_private *cp);
 388static void cp_clean_rings (struct cp_private *cp);
 389#ifdef CONFIG_NET_POLL_CONTROLLER
 390static void cp_poll_controller(struct net_device *dev);
 391#endif
 392static int cp_get_eeprom_len(struct net_device *dev);
 393static int cp_get_eeprom(struct net_device *dev,
 394                         struct ethtool_eeprom *eeprom, u8 *data);
 395static int cp_set_eeprom(struct net_device *dev,
 396                         struct ethtool_eeprom *eeprom, u8 *data);
 397
 398static DEFINE_PCI_DEVICE_TABLE(cp_pci_tbl) = {
 399        { PCI_DEVICE(PCI_VENDOR_ID_REALTEK,     PCI_DEVICE_ID_REALTEK_8139), },
 400        { PCI_DEVICE(PCI_VENDOR_ID_TTTECH,      PCI_DEVICE_ID_TTTECH_MC322), },
 401        { },
 402};
 403MODULE_DEVICE_TABLE(pci, cp_pci_tbl);
 404
 405static struct {
 406        const char str[ETH_GSTRING_LEN];
 407} ethtool_stats_keys[] = {
 408        { "tx_ok" },
 409        { "rx_ok" },
 410        { "tx_err" },
 411        { "rx_err" },
 412        { "rx_fifo" },
 413        { "frame_align" },
 414        { "tx_ok_1col" },
 415        { "tx_ok_mcol" },
 416        { "rx_ok_phys" },
 417        { "rx_ok_bcast" },
 418        { "rx_ok_mcast" },
 419        { "tx_abort" },
 420        { "tx_underrun" },
 421        { "rx_frags" },
 422};
 423
 424
 425#if CP_VLAN_TAG_USED
 426static void cp_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
 427{
 428        struct cp_private *cp = netdev_priv(dev);
 429        unsigned long flags;
 430
 431        spin_lock_irqsave(&cp->lock, flags);
 432        cp->vlgrp = grp;
 433        if (grp)
 434                cp->cpcmd |= RxVlanOn;
 435        else
 436                cp->cpcmd &= ~RxVlanOn;
 437
 438        cpw16(CpCmd, cp->cpcmd);
 439        spin_unlock_irqrestore(&cp->lock, flags);
 440}
 441#endif /* CP_VLAN_TAG_USED */
 442
 443static inline void cp_set_rxbufsize (struct cp_private *cp)
 444{
 445        unsigned int mtu = cp->dev->mtu;
 446
 447        if (mtu > ETH_DATA_LEN)
 448                /* MTU + ethernet header + FCS + optional VLAN tag */
 449                cp->rx_buf_sz = mtu + ETH_HLEN + 8;
 450        else
 451                cp->rx_buf_sz = PKT_BUF_SZ;
 452}
 453
 454static inline void cp_rx_skb (struct cp_private *cp, struct sk_buff *skb,
 455                              struct cp_desc *desc)
 456{
 457        skb->protocol = eth_type_trans (skb, cp->dev);
 458
 459        cp->dev->stats.rx_packets++;
 460        cp->dev->stats.rx_bytes += skb->len;
 461
 462#if CP_VLAN_TAG_USED
 463        if (cp->vlgrp && (desc->opts2 & cpu_to_le32(RxVlanTagged))) {
 464                vlan_hwaccel_receive_skb(skb, cp->vlgrp,
 465                                         swab16(le32_to_cpu(desc->opts2) & 0xffff));
 466        } else
 467#endif
 468                netif_receive_skb(skb);
 469}
 470
 471static void cp_rx_err_acct (struct cp_private *cp, unsigned rx_tail,
 472                            u32 status, u32 len)
 473{
 474        netif_dbg(cp, rx_err, cp->dev, "rx err, slot %d status 0x%x len %d\n",
 475                  rx_tail, status, len);
 476        cp->dev->stats.rx_errors++;
 477        if (status & RxErrFrame)
 478                cp->dev->stats.rx_frame_errors++;
 479        if (status & RxErrCRC)
 480                cp->dev->stats.rx_crc_errors++;
 481        if ((status & RxErrRunt) || (status & RxErrLong))
 482                cp->dev->stats.rx_length_errors++;
 483        if ((status & (FirstFrag | LastFrag)) != (FirstFrag | LastFrag))
 484                cp->dev->stats.rx_length_errors++;
 485        if (status & RxErrFIFO)
 486                cp->dev->stats.rx_fifo_errors++;
 487}
 488
 489static inline unsigned int cp_rx_csum_ok (u32 status)
 490{
 491        unsigned int protocol = (status >> 16) & 0x3;
 492
 493        if (((protocol == RxProtoTCP) && !(status & TCPFail)) ||
 494            ((protocol == RxProtoUDP) && !(status & UDPFail)))
 495                return 1;
 496        else
 497                return 0;
 498}
 499
 500static int cp_rx_poll(struct napi_struct *napi, int budget)
 501{
 502        struct cp_private *cp = container_of(napi, struct cp_private, napi);
 503        struct net_device *dev = cp->dev;
 504        unsigned int rx_tail = cp->rx_tail;
 505        int rx;
 506
 507rx_status_loop:
 508        rx = 0;
 509        cpw16(IntrStatus, cp_rx_intr_mask);
 510
 511        while (1) {
 512                u32 status, len;
 513                dma_addr_t mapping;
 514                struct sk_buff *skb, *new_skb;
 515                struct cp_desc *desc;
 516                const unsigned buflen = cp->rx_buf_sz;
 517
 518                skb = cp->rx_skb[rx_tail];
 519                BUG_ON(!skb);
 520
 521                desc = &cp->rx_ring[rx_tail];
 522                status = le32_to_cpu(desc->opts1);
 523                if (status & DescOwn)
 524                        break;
 525
 526                len = (status & 0x1fff) - 4;
 527                mapping = le64_to_cpu(desc->addr);
 528
 529                if ((status & (FirstFrag | LastFrag)) != (FirstFrag | LastFrag)) {
 530                        /* we don't support incoming fragmented frames.
 531                         * instead, we attempt to ensure that the
 532                         * pre-allocated RX skbs are properly sized such
 533                         * that RX fragments are never encountered
 534                         */
 535                        cp_rx_err_acct(cp, rx_tail, status, len);
 536                        dev->stats.rx_dropped++;
 537                        cp->cp_stats.rx_frags++;
 538                        goto rx_next;
 539                }
 540
 541                if (status & (RxError | RxErrFIFO)) {
 542                        cp_rx_err_acct(cp, rx_tail, status, len);
 543                        goto rx_next;
 544                }
 545
 546                netif_dbg(cp, rx_status, dev, "rx slot %d status 0x%x len %d\n",
 547                          rx_tail, status, len);
 548
 549                new_skb = netdev_alloc_skb_ip_align(dev, buflen);
 550                if (!new_skb) {
 551                        dev->stats.rx_dropped++;
 552                        goto rx_next;
 553                }
 554
 555                dma_unmap_single(&cp->pdev->dev, mapping,
 556                                 buflen, PCI_DMA_FROMDEVICE);
 557
 558                /* Handle checksum offloading for incoming packets. */
 559                if (cp_rx_csum_ok(status))
 560                        skb->ip_summed = CHECKSUM_UNNECESSARY;
 561                else
 562                        skb_checksum_none_assert(skb);
 563
 564                skb_put(skb, len);
 565
 566                mapping = dma_map_single(&cp->pdev->dev, new_skb->data, buflen,
 567                                         PCI_DMA_FROMDEVICE);
 568                cp->rx_skb[rx_tail] = new_skb;
 569
 570                cp_rx_skb(cp, skb, desc);
 571                rx++;
 572
 573rx_next:
 574                cp->rx_ring[rx_tail].opts2 = 0;
 575                cp->rx_ring[rx_tail].addr = cpu_to_le64(mapping);
 576                if (rx_tail == (CP_RX_RING_SIZE - 1))
 577                        desc->opts1 = cpu_to_le32(DescOwn | RingEnd |
 578                                                  cp->rx_buf_sz);
 579                else
 580                        desc->opts1 = cpu_to_le32(DescOwn | cp->rx_buf_sz);
 581                rx_tail = NEXT_RX(rx_tail);
 582
 583                if (rx >= budget)
 584                        break;
 585        }
 586
 587        cp->rx_tail = rx_tail;
 588
 589        /* if we did not reach work limit, then we're done with
 590         * this round of polling
 591         */
 592        if (rx < budget) {
 593                unsigned long flags;
 594
 595                if (cpr16(IntrStatus) & cp_rx_intr_mask)
 596                        goto rx_status_loop;
 597
 598                spin_lock_irqsave(&cp->lock, flags);
 599                __napi_complete(napi);
 600                cpw16_f(IntrMask, cp_intr_mask);
 601                spin_unlock_irqrestore(&cp->lock, flags);
 602        }
 603
 604        return rx;
 605}
 606
 607static irqreturn_t cp_interrupt (int irq, void *dev_instance)
 608{
 609        struct net_device *dev = dev_instance;
 610        struct cp_private *cp;
 611        u16 status;
 612
 613        if (unlikely(dev == NULL))
 614                return IRQ_NONE;
 615        cp = netdev_priv(dev);
 616
 617        status = cpr16(IntrStatus);
 618        if (!status || (status == 0xFFFF))
 619                return IRQ_NONE;
 620
 621        netif_dbg(cp, intr, dev, "intr, status %04x cmd %02x cpcmd %04x\n",
 622                  status, cpr8(Cmd), cpr16(CpCmd));
 623
 624        cpw16(IntrStatus, status & ~cp_rx_intr_mask);
 625
 626        spin_lock(&cp->lock);
 627
 628        /* close possible race's with dev_close */
 629        if (unlikely(!netif_running(dev))) {
 630                cpw16(IntrMask, 0);
 631                spin_unlock(&cp->lock);
 632                return IRQ_HANDLED;
 633        }
 634
 635        if (status & (RxOK | RxErr | RxEmpty | RxFIFOOvr))
 636                if (napi_schedule_prep(&cp->napi)) {
 637                        cpw16_f(IntrMask, cp_norx_intr_mask);
 638                        __napi_schedule(&cp->napi);
 639                }
 640
 641        if (status & (TxOK | TxErr | TxEmpty | SWInt))
 642                cp_tx(cp);
 643        if (status & LinkChg)
 644                mii_check_media(&cp->mii_if, netif_msg_link(cp), false);
 645
 646        spin_unlock(&cp->lock);
 647
 648        if (status & PciErr) {
 649                u16 pci_status;
 650
 651                pci_read_config_word(cp->pdev, PCI_STATUS, &pci_status);
 652                pci_write_config_word(cp->pdev, PCI_STATUS, pci_status);
 653                netdev_err(dev, "PCI bus error, status=%04x, PCI status=%04x\n",
 654                           status, pci_status);
 655
 656                /* TODO: reset hardware */
 657        }
 658
 659        return IRQ_HANDLED;
 660}
 661
 662#ifdef CONFIG_NET_POLL_CONTROLLER
 663/*
 664 * Polling receive - used by netconsole and other diagnostic tools
 665 * to allow network i/o with interrupts disabled.
 666 */
 667static void cp_poll_controller(struct net_device *dev)
 668{
 669        disable_irq(dev->irq);
 670        cp_interrupt(dev->irq, dev);
 671        enable_irq(dev->irq);
 672}
 673#endif
 674
 675static void cp_tx (struct cp_private *cp)
 676{
 677        unsigned tx_head = cp->tx_head;
 678        unsigned tx_tail = cp->tx_tail;
 679
 680        while (tx_tail != tx_head) {
 681                struct cp_desc *txd = cp->tx_ring + tx_tail;
 682                struct sk_buff *skb;
 683                u32 status;
 684
 685                rmb();
 686                status = le32_to_cpu(txd->opts1);
 687                if (status & DescOwn)
 688                        break;
 689
 690                skb = cp->tx_skb[tx_tail];
 691                BUG_ON(!skb);
 692
 693                dma_unmap_single(&cp->pdev->dev, le64_to_cpu(txd->addr),
 694                                 le32_to_cpu(txd->opts1) & 0xffff,
 695                                 PCI_DMA_TODEVICE);
 696
 697                if (status & LastFrag) {
 698                        if (status & (TxError | TxFIFOUnder)) {
 699                                netif_dbg(cp, tx_err, cp->dev,
 700                                          "tx err, status 0x%x\n", status);
 701                                cp->dev->stats.tx_errors++;
 702                                if (status & TxOWC)
 703                                        cp->dev->stats.tx_window_errors++;
 704                                if (status & TxMaxCol)
 705                                        cp->dev->stats.tx_aborted_errors++;
 706                                if (status & TxLinkFail)
 707                                        cp->dev->stats.tx_carrier_errors++;
 708                                if (status & TxFIFOUnder)
 709                                        cp->dev->stats.tx_fifo_errors++;
 710                        } else {
 711                                cp->dev->stats.collisions +=
 712                                        ((status >> TxColCntShift) & TxColCntMask);
 713                                cp->dev->stats.tx_packets++;
 714                                cp->dev->stats.tx_bytes += skb->len;
 715                                netif_dbg(cp, tx_done, cp->dev,
 716                                          "tx done, slot %d\n", tx_tail);
 717                        }
 718                        dev_kfree_skb_irq(skb);
 719                }
 720
 721                cp->tx_skb[tx_tail] = NULL;
 722
 723                tx_tail = NEXT_TX(tx_tail);
 724        }
 725
 726        cp->tx_tail = tx_tail;
 727
 728        if (TX_BUFFS_AVAIL(cp) > (MAX_SKB_FRAGS + 1))
 729                netif_wake_queue(cp->dev);
 730}
 731
 732static netdev_tx_t cp_start_xmit (struct sk_buff *skb,
 733                                        struct net_device *dev)
 734{
 735        struct cp_private *cp = netdev_priv(dev);
 736        unsigned entry;
 737        u32 eor, flags;
 738        unsigned long intr_flags;
 739#if CP_VLAN_TAG_USED
 740        u32 vlan_tag = 0;
 741#endif
 742        int mss = 0;
 743
 744        spin_lock_irqsave(&cp->lock, intr_flags);
 745
 746        /* This is a hard error, log it. */
 747        if (TX_BUFFS_AVAIL(cp) <= (skb_shinfo(skb)->nr_frags + 1)) {
 748                netif_stop_queue(dev);
 749                spin_unlock_irqrestore(&cp->lock, intr_flags);
 750                netdev_err(dev, "BUG! Tx Ring full when queue awake!\n");
 751                return NETDEV_TX_BUSY;
 752        }
 753
 754#if CP_VLAN_TAG_USED
 755        if (vlan_tx_tag_present(skb))
 756                vlan_tag = TxVlanTag | swab16(vlan_tx_tag_get(skb));
 757#endif
 758
 759        entry = cp->tx_head;
 760        eor = (entry == (CP_TX_RING_SIZE - 1)) ? RingEnd : 0;
 761        if (dev->features & NETIF_F_TSO)
 762                mss = skb_shinfo(skb)->gso_size;
 763
 764        if (skb_shinfo(skb)->nr_frags == 0) {
 765                struct cp_desc *txd = &cp->tx_ring[entry];
 766                u32 len;
 767                dma_addr_t mapping;
 768
 769                len = skb->len;
 770                mapping = dma_map_single(&cp->pdev->dev, skb->data, len, PCI_DMA_TODEVICE);
 771                CP_VLAN_TX_TAG(txd, vlan_tag);
 772                txd->addr = cpu_to_le64(mapping);
 773                wmb();
 774
 775                flags = eor | len | DescOwn | FirstFrag | LastFrag;
 776
 777                if (mss)
 778                        flags |= LargeSend | ((mss & MSSMask) << MSSShift);
 779                else if (skb->ip_summed == CHECKSUM_PARTIAL) {
 780                        const struct iphdr *ip = ip_hdr(skb);
 781                        if (ip->protocol == IPPROTO_TCP)
 782                                flags |= IPCS | TCPCS;
 783                        else if (ip->protocol == IPPROTO_UDP)
 784                                flags |= IPCS | UDPCS;
 785                        else
 786                                WARN_ON(1);     /* we need a WARN() */
 787                }
 788
 789                txd->opts1 = cpu_to_le32(flags);
 790                wmb();
 791
 792                cp->tx_skb[entry] = skb;
 793                entry = NEXT_TX(entry);
 794        } else {
 795                struct cp_desc *txd;
 796                u32 first_len, first_eor;
 797                dma_addr_t first_mapping;
 798                int frag, first_entry = entry;
 799                const struct iphdr *ip = ip_hdr(skb);
 800
 801                /* We must give this initial chunk to the device last.
 802                 * Otherwise we could race with the device.
 803                 */
 804                first_eor = eor;
 805                first_len = skb_headlen(skb);
 806                first_mapping = dma_map_single(&cp->pdev->dev, skb->data,
 807                                               first_len, PCI_DMA_TODEVICE);
 808                cp->tx_skb[entry] = skb;
 809                entry = NEXT_TX(entry);
 810
 811                for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) {
 812                        skb_frag_t *this_frag = &skb_shinfo(skb)->frags[frag];
 813                        u32 len;
 814                        u32 ctrl;
 815                        dma_addr_t mapping;
 816
 817                        len = this_frag->size;
 818                        mapping = dma_map_single(&cp->pdev->dev,
 819                                                 ((void *) page_address(this_frag->page) +
 820                                                  this_frag->page_offset),
 821                                                 len, PCI_DMA_TODEVICE);
 822                        eor = (entry == (CP_TX_RING_SIZE - 1)) ? RingEnd : 0;
 823
 824                        ctrl = eor | len | DescOwn;
 825
 826                        if (mss)
 827                                ctrl |= LargeSend |
 828                                        ((mss & MSSMask) << MSSShift);
 829                        else if (skb->ip_summed == CHECKSUM_PARTIAL) {
 830                                if (ip->protocol == IPPROTO_TCP)
 831                                        ctrl |= IPCS | TCPCS;
 832                                else if (ip->protocol == IPPROTO_UDP)
 833                                        ctrl |= IPCS | UDPCS;
 834                                else
 835                                        BUG();
 836                        }
 837
 838                        if (frag == skb_shinfo(skb)->nr_frags - 1)
 839                                ctrl |= LastFrag;
 840
 841                        txd = &cp->tx_ring[entry];
 842                        CP_VLAN_TX_TAG(txd, vlan_tag);
 843                        txd->addr = cpu_to_le64(mapping);
 844                        wmb();
 845
 846                        txd->opts1 = cpu_to_le32(ctrl);
 847                        wmb();
 848
 849                        cp->tx_skb[entry] = skb;
 850                        entry = NEXT_TX(entry);
 851                }
 852
 853                txd = &cp->tx_ring[first_entry];
 854                CP_VLAN_TX_TAG(txd, vlan_tag);
 855                txd->addr = cpu_to_le64(first_mapping);
 856                wmb();
 857
 858                if (skb->ip_summed == CHECKSUM_PARTIAL) {
 859                        if (ip->protocol == IPPROTO_TCP)
 860                                txd->opts1 = cpu_to_le32(first_eor | first_len |
 861                                                         FirstFrag | DescOwn |
 862                                                         IPCS | TCPCS);
 863                        else if (ip->protocol == IPPROTO_UDP)
 864                                txd->opts1 = cpu_to_le32(first_eor | first_len |
 865                                                         FirstFrag | DescOwn |
 866                                                         IPCS | UDPCS);
 867                        else
 868                                BUG();
 869                } else
 870                        txd->opts1 = cpu_to_le32(first_eor | first_len |
 871                                                 FirstFrag | DescOwn);
 872                wmb();
 873        }
 874        cp->tx_head = entry;
 875        netif_dbg(cp, tx_queued, cp->dev, "tx queued, slot %d, skblen %d\n",
 876                  entry, skb->len);
 877        if (TX_BUFFS_AVAIL(cp) <= (MAX_SKB_FRAGS + 1))
 878                netif_stop_queue(dev);
 879
 880        spin_unlock_irqrestore(&cp->lock, intr_flags);
 881
 882        cpw8(TxPoll, NormalTxPoll);
 883
 884        return NETDEV_TX_OK;
 885}
 886
 887/* Set or clear the multicast filter for this adaptor.
 888   This routine is not state sensitive and need not be SMP locked. */
 889
 890static void __cp_set_rx_mode (struct net_device *dev)
 891{
 892        struct cp_private *cp = netdev_priv(dev);
 893        u32 mc_filter[2];       /* Multicast hash filter */
 894        int rx_mode;
 895        u32 tmp;
 896
 897        /* Note: do not reorder, GCC is clever about common statements. */
 898        if (dev->flags & IFF_PROMISC) {
 899                /* Unconditionally log net taps. */
 900                rx_mode =
 901                    AcceptBroadcast | AcceptMulticast | AcceptMyPhys |
 902                    AcceptAllPhys;
 903                mc_filter[1] = mc_filter[0] = 0xffffffff;
 904        } else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
 905                   (dev->flags & IFF_ALLMULTI)) {
 906                /* Too many to filter perfectly -- accept all multicasts. */
 907                rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
 908                mc_filter[1] = mc_filter[0] = 0xffffffff;
 909        } else {
 910                struct netdev_hw_addr *ha;
 911                rx_mode = AcceptBroadcast | AcceptMyPhys;
 912                mc_filter[1] = mc_filter[0] = 0;
 913                netdev_for_each_mc_addr(ha, dev) {
 914                        int bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26;
 915
 916                        mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
 917                        rx_mode |= AcceptMulticast;
 918                }
 919        }
 920
 921        /* We can safely update without stopping the chip. */
 922        tmp = cp_rx_config | rx_mode;
 923        if (cp->rx_config != tmp) {
 924                cpw32_f (RxConfig, tmp);
 925                cp->rx_config = tmp;
 926        }
 927        cpw32_f (MAR0 + 0, mc_filter[0]);
 928        cpw32_f (MAR0 + 4, mc_filter[1]);
 929}
 930
 931static void cp_set_rx_mode (struct net_device *dev)
 932{
 933        unsigned long flags;
 934        struct cp_private *cp = netdev_priv(dev);
 935
 936        spin_lock_irqsave (&cp->lock, flags);
 937        __cp_set_rx_mode(dev);
 938        spin_unlock_irqrestore (&cp->lock, flags);
 939}
 940
 941static void __cp_get_stats(struct cp_private *cp)
 942{
 943        /* only lower 24 bits valid; write any value to clear */
 944        cp->dev->stats.rx_missed_errors += (cpr32 (RxMissed) & 0xffffff);
 945        cpw32 (RxMissed, 0);
 946}
 947
 948static struct net_device_stats *cp_get_stats(struct net_device *dev)
 949{
 950        struct cp_private *cp = netdev_priv(dev);
 951        unsigned long flags;
 952
 953        /* The chip only need report frame silently dropped. */
 954        spin_lock_irqsave(&cp->lock, flags);
 955        if (netif_running(dev) && netif_device_present(dev))
 956                __cp_get_stats(cp);
 957        spin_unlock_irqrestore(&cp->lock, flags);
 958
 959        return &dev->stats;
 960}
 961
 962static void cp_stop_hw (struct cp_private *cp)
 963{
 964        cpw16(IntrStatus, ~(cpr16(IntrStatus)));
 965        cpw16_f(IntrMask, 0);
 966        cpw8(Cmd, 0);
 967        cpw16_f(CpCmd, 0);
 968        cpw16_f(IntrStatus, ~(cpr16(IntrStatus)));
 969
 970        cp->rx_tail = 0;
 971        cp->tx_head = cp->tx_tail = 0;
 972}
 973
 974static void cp_reset_hw (struct cp_private *cp)
 975{
 976        unsigned work = 1000;
 977
 978        cpw8(Cmd, CmdReset);
 979
 980        while (work--) {
 981                if (!(cpr8(Cmd) & CmdReset))
 982                        return;
 983
 984                schedule_timeout_uninterruptible(10);
 985        }
 986
 987        netdev_err(cp->dev, "hardware reset timeout\n");
 988}
 989
 990static inline void cp_start_hw (struct cp_private *cp)
 991{
 992        cpw16(CpCmd, cp->cpcmd);
 993        cpw8(Cmd, RxOn | TxOn);
 994}
 995
 996static void cp_init_hw (struct cp_private *cp)
 997{
 998        struct net_device *dev = cp->dev;
 999        dma_addr_t ring_dma;
1000
1001        cp_reset_hw(cp);
1002
1003        cpw8_f (Cfg9346, Cfg9346_Unlock);
1004
1005        /* Restore our idea of the MAC address. */
1006        cpw32_f (MAC0 + 0, le32_to_cpu (*(__le32 *) (dev->dev_addr + 0)));
1007        cpw32_f (MAC0 + 4, le32_to_cpu (*(__le32 *) (dev->dev_addr + 4)));
1008
1009        cp_start_hw(cp);
1010        cpw8(TxThresh, 0x06); /* XXX convert magic num to a constant */
1011
1012        __cp_set_rx_mode(dev);
1013        cpw32_f (TxConfig, IFG | (TX_DMA_BURST << TxDMAShift));
1014
1015        cpw8(Config1, cpr8(Config1) | DriverLoaded | PMEnable);
1016        /* Disable Wake-on-LAN. Can be turned on with ETHTOOL_SWOL */
1017        cpw8(Config3, PARMEnable);
1018        cp->wol_enabled = 0;
1019
1020        cpw8(Config5, cpr8(Config5) & PMEStatus);
1021
1022        cpw32_f(HiTxRingAddr, 0);
1023        cpw32_f(HiTxRingAddr + 4, 0);
1024
1025        ring_dma = cp->ring_dma;
1026        cpw32_f(RxRingAddr, ring_dma & 0xffffffff);
1027        cpw32_f(RxRingAddr + 4, (ring_dma >> 16) >> 16);
1028
1029        ring_dma += sizeof(struct cp_desc) * CP_RX_RING_SIZE;
1030        cpw32_f(TxRingAddr, ring_dma & 0xffffffff);
1031        cpw32_f(TxRingAddr + 4, (ring_dma >> 16) >> 16);
1032
1033        cpw16(MultiIntr, 0);
1034
1035        cpw16_f(IntrMask, cp_intr_mask);
1036
1037        cpw8_f(Cfg9346, Cfg9346_Lock);
1038}
1039
1040static int cp_refill_rx(struct cp_private *cp)
1041{
1042        struct net_device *dev = cp->dev;
1043        unsigned i;
1044
1045        for (i = 0; i < CP_RX_RING_SIZE; i++) {
1046                struct sk_buff *skb;
1047                dma_addr_t mapping;
1048
1049                skb = netdev_alloc_skb_ip_align(dev, cp->rx_buf_sz);
1050                if (!skb)
1051                        goto err_out;
1052
1053                mapping = dma_map_single(&cp->pdev->dev, skb->data,
1054                                         cp->rx_buf_sz, PCI_DMA_FROMDEVICE);
1055                cp->rx_skb[i] = skb;
1056
1057                cp->rx_ring[i].opts2 = 0;
1058                cp->rx_ring[i].addr = cpu_to_le64(mapping);
1059                if (i == (CP_RX_RING_SIZE - 1))
1060                        cp->rx_ring[i].opts1 =
1061                                cpu_to_le32(DescOwn | RingEnd | cp->rx_buf_sz);
1062                else
1063                        cp->rx_ring[i].opts1 =
1064                                cpu_to_le32(DescOwn | cp->rx_buf_sz);
1065        }
1066
1067        return 0;
1068
1069err_out:
1070        cp_clean_rings(cp);
1071        return -ENOMEM;
1072}
1073
1074static void cp_init_rings_index (struct cp_private *cp)
1075{
1076        cp->rx_tail = 0;
1077        cp->tx_head = cp->tx_tail = 0;
1078}
1079
1080static int cp_init_rings (struct cp_private *cp)
1081{
1082        memset(cp->tx_ring, 0, sizeof(struct cp_desc) * CP_TX_RING_SIZE);
1083        cp->tx_ring[CP_TX_RING_SIZE - 1].opts1 = cpu_to_le32(RingEnd);
1084
1085        cp_init_rings_index(cp);
1086
1087        return cp_refill_rx (cp);
1088}
1089
1090static int cp_alloc_rings (struct cp_private *cp)
1091{
1092        void *mem;
1093
1094        mem = dma_alloc_coherent(&cp->pdev->dev, CP_RING_BYTES,
1095                                 &cp->ring_dma, GFP_KERNEL);
1096        if (!mem)
1097                return -ENOMEM;
1098
1099        cp->rx_ring = mem;
1100        cp->tx_ring = &cp->rx_ring[CP_RX_RING_SIZE];
1101
1102        return cp_init_rings(cp);
1103}
1104
1105static void cp_clean_rings (struct cp_private *cp)
1106{
1107        struct cp_desc *desc;
1108        unsigned i;
1109
1110        for (i = 0; i < CP_RX_RING_SIZE; i++) {
1111                if (cp->rx_skb[i]) {
1112                        desc = cp->rx_ring + i;
1113                        dma_unmap_single(&cp->pdev->dev,le64_to_cpu(desc->addr),
1114                                         cp->rx_buf_sz, PCI_DMA_FROMDEVICE);
1115                        dev_kfree_skb(cp->rx_skb[i]);
1116                }
1117        }
1118
1119        for (i = 0; i < CP_TX_RING_SIZE; i++) {
1120                if (cp->tx_skb[i]) {
1121                        struct sk_buff *skb = cp->tx_skb[i];
1122
1123                        desc = cp->tx_ring + i;
1124                        dma_unmap_single(&cp->pdev->dev,le64_to_cpu(desc->addr),
1125                                         le32_to_cpu(desc->opts1) & 0xffff,
1126                                         PCI_DMA_TODEVICE);
1127                        if (le32_to_cpu(desc->opts1) & LastFrag)
1128                                dev_kfree_skb(skb);
1129                        cp->dev->stats.tx_dropped++;
1130                }
1131        }
1132
1133        memset(cp->rx_ring, 0, sizeof(struct cp_desc) * CP_RX_RING_SIZE);
1134        memset(cp->tx_ring, 0, sizeof(struct cp_desc) * CP_TX_RING_SIZE);
1135
1136        memset(cp->rx_skb, 0, sizeof(struct sk_buff *) * CP_RX_RING_SIZE);
1137        memset(cp->tx_skb, 0, sizeof(struct sk_buff *) * CP_TX_RING_SIZE);
1138}
1139
1140static void cp_free_rings (struct cp_private *cp)
1141{
1142        cp_clean_rings(cp);
1143        dma_free_coherent(&cp->pdev->dev, CP_RING_BYTES, cp->rx_ring,
1144                          cp->ring_dma);
1145        cp->rx_ring = NULL;
1146        cp->tx_ring = NULL;
1147}
1148
1149static int cp_open (struct net_device *dev)
1150{
1151        struct cp_private *cp = netdev_priv(dev);
1152        int rc;
1153
1154        netif_dbg(cp, ifup, dev, "enabling interface\n");
1155
1156        rc = cp_alloc_rings(cp);
1157        if (rc)
1158                return rc;
1159
1160        napi_enable(&cp->napi);
1161
1162        cp_init_hw(cp);
1163
1164        rc = request_irq(dev->irq, cp_interrupt, IRQF_SHARED, dev->name, dev);
1165        if (rc)
1166                goto err_out_hw;
1167
1168        netif_carrier_off(dev);
1169        mii_check_media(&cp->mii_if, netif_msg_link(cp), true);
1170        netif_start_queue(dev);
1171
1172        return 0;
1173
1174err_out_hw:
1175        napi_disable(&cp->napi);
1176        cp_stop_hw(cp);
1177        cp_free_rings(cp);
1178        return rc;
1179}
1180
1181static int cp_close (struct net_device *dev)
1182{
1183        struct cp_private *cp = netdev_priv(dev);
1184        unsigned long flags;
1185
1186        napi_disable(&cp->napi);
1187
1188        netif_dbg(cp, ifdown, dev, "disabling interface\n");
1189
1190        spin_lock_irqsave(&cp->lock, flags);
1191
1192        netif_stop_queue(dev);
1193        netif_carrier_off(dev);
1194
1195        cp_stop_hw(cp);
1196
1197        spin_unlock_irqrestore(&cp->lock, flags);
1198
1199        free_irq(dev->irq, dev);
1200
1201        cp_free_rings(cp);
1202        return 0;
1203}
1204
1205static void cp_tx_timeout(struct net_device *dev)
1206{
1207        struct cp_private *cp = netdev_priv(dev);
1208        unsigned long flags;
1209        int rc;
1210
1211        netdev_warn(dev, "Transmit timeout, status %2x %4x %4x %4x\n",
1212                    cpr8(Cmd), cpr16(CpCmd),
1213                    cpr16(IntrStatus), cpr16(IntrMask));
1214
1215        spin_lock_irqsave(&cp->lock, flags);
1216
1217        cp_stop_hw(cp);
1218        cp_clean_rings(cp);
1219        rc = cp_init_rings(cp);
1220        cp_start_hw(cp);
1221
1222        netif_wake_queue(dev);
1223
1224        spin_unlock_irqrestore(&cp->lock, flags);
1225}
1226
1227#ifdef BROKEN
1228static int cp_change_mtu(struct net_device *dev, int new_mtu)
1229{
1230        struct cp_private *cp = netdev_priv(dev);
1231        int rc;
1232        unsigned long flags;
1233
1234        /* check for invalid MTU, according to hardware limits */
1235        if (new_mtu < CP_MIN_MTU || new_mtu > CP_MAX_MTU)
1236                return -EINVAL;
1237
1238        /* if network interface not up, no need for complexity */
1239        if (!netif_running(dev)) {
1240                dev->mtu = new_mtu;
1241                cp_set_rxbufsize(cp);   /* set new rx buf size */
1242                return 0;
1243        }
1244
1245        spin_lock_irqsave(&cp->lock, flags);
1246
1247        cp_stop_hw(cp);                 /* stop h/w and free rings */
1248        cp_clean_rings(cp);
1249
1250        dev->mtu = new_mtu;
1251        cp_set_rxbufsize(cp);           /* set new rx buf size */
1252
1253        rc = cp_init_rings(cp);         /* realloc and restart h/w */
1254        cp_start_hw(cp);
1255
1256        spin_unlock_irqrestore(&cp->lock, flags);
1257
1258        return rc;
1259}
1260#endif /* BROKEN */
1261
1262static const char mii_2_8139_map[8] = {
1263        BasicModeCtrl,
1264        BasicModeStatus,
1265        0,
1266        0,
1267        NWayAdvert,
1268        NWayLPAR,
1269        NWayExpansion,
1270        0
1271};
1272
1273static int mdio_read(struct net_device *dev, int phy_id, int location)
1274{
1275        struct cp_private *cp = netdev_priv(dev);
1276
1277        return location < 8 && mii_2_8139_map[location] ?
1278               readw(cp->regs + mii_2_8139_map[location]) : 0;
1279}
1280
1281
1282static void mdio_write(struct net_device *dev, int phy_id, int location,
1283                       int value)
1284{
1285        struct cp_private *cp = netdev_priv(dev);
1286
1287        if (location == 0) {
1288                cpw8(Cfg9346, Cfg9346_Unlock);
1289                cpw16(BasicModeCtrl, value);
1290                cpw8(Cfg9346, Cfg9346_Lock);
1291        } else if (location < 8 && mii_2_8139_map[location])
1292                cpw16(mii_2_8139_map[location], value);
1293}
1294
1295/* Set the ethtool Wake-on-LAN settings */
1296static int netdev_set_wol (struct cp_private *cp,
1297                           const struct ethtool_wolinfo *wol)
1298{
1299        u8 options;
1300
1301        options = cpr8 (Config3) & ~(LinkUp | MagicPacket);
1302        /* If WOL is being disabled, no need for complexity */
1303        if (wol->wolopts) {
1304                if (wol->wolopts & WAKE_PHY)    options |= LinkUp;
1305                if (wol->wolopts & WAKE_MAGIC)  options |= MagicPacket;
1306        }
1307
1308        cpw8 (Cfg9346, Cfg9346_Unlock);
1309        cpw8 (Config3, options);
1310        cpw8 (Cfg9346, Cfg9346_Lock);
1311
1312        options = 0; /* Paranoia setting */
1313        options = cpr8 (Config5) & ~(UWF | MWF | BWF);
1314        /* If WOL is being disabled, no need for complexity */
1315        if (wol->wolopts) {
1316                if (wol->wolopts & WAKE_UCAST)  options |= UWF;
1317                if (wol->wolopts & WAKE_BCAST)  options |= BWF;
1318                if (wol->wolopts & WAKE_MCAST)  options |= MWF;
1319        }
1320
1321        cpw8 (Config5, options);
1322
1323        cp->wol_enabled = (wol->wolopts) ? 1 : 0;
1324
1325        return 0;
1326}
1327
1328/* Get the ethtool Wake-on-LAN settings */
1329static void netdev_get_wol (struct cp_private *cp,
1330                     struct ethtool_wolinfo *wol)
1331{
1332        u8 options;
1333
1334        wol->wolopts   = 0; /* Start from scratch */
1335        wol->supported = WAKE_PHY   | WAKE_BCAST | WAKE_MAGIC |
1336                         WAKE_MCAST | WAKE_UCAST;
1337        /* We don't need to go on if WOL is disabled */
1338        if (!cp->wol_enabled) return;
1339
1340        options        = cpr8 (Config3);
1341        if (options & LinkUp)        wol->wolopts |= WAKE_PHY;
1342        if (options & MagicPacket)   wol->wolopts |= WAKE_MAGIC;
1343
1344        options        = 0; /* Paranoia setting */
1345        options        = cpr8 (Config5);
1346        if (options & UWF)           wol->wolopts |= WAKE_UCAST;
1347        if (options & BWF)           wol->wolopts |= WAKE_BCAST;
1348        if (options & MWF)           wol->wolopts |= WAKE_MCAST;
1349}
1350
1351static void cp_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *info)
1352{
1353        struct cp_private *cp = netdev_priv(dev);
1354
1355        strcpy (info->driver, DRV_NAME);
1356        strcpy (info->version, DRV_VERSION);
1357        strcpy (info->bus_info, pci_name(cp->pdev));
1358}
1359
1360static int cp_get_regs_len(struct net_device *dev)
1361{
1362        return CP_REGS_SIZE;
1363}
1364
1365static int cp_get_sset_count (struct net_device *dev, int sset)
1366{
1367        switch (sset) {
1368        case ETH_SS_STATS:
1369                return CP_NUM_STATS;
1370        default:
1371                return -EOPNOTSUPP;
1372        }
1373}
1374
1375static int cp_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1376{
1377        struct cp_private *cp = netdev_priv(dev);
1378        int rc;
1379        unsigned long flags;
1380
1381        spin_lock_irqsave(&cp->lock, flags);
1382        rc = mii_ethtool_gset(&cp->mii_if, cmd);
1383        spin_unlock_irqrestore(&cp->lock, flags);
1384
1385        return rc;
1386}
1387
1388static int cp_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1389{
1390        struct cp_private *cp = netdev_priv(dev);
1391        int rc;
1392        unsigned long flags;
1393
1394        spin_lock_irqsave(&cp->lock, flags);
1395        rc = mii_ethtool_sset(&cp->mii_if, cmd);
1396        spin_unlock_irqrestore(&cp->lock, flags);
1397
1398        return rc;
1399}
1400
1401static int cp_nway_reset(struct net_device *dev)
1402{
1403        struct cp_private *cp = netdev_priv(dev);
1404        return mii_nway_restart(&cp->mii_if);
1405}
1406
1407static u32 cp_get_msglevel(struct net_device *dev)
1408{
1409        struct cp_private *cp = netdev_priv(dev);
1410        return cp->msg_enable;
1411}
1412
1413static void cp_set_msglevel(struct net_device *dev, u32 value)
1414{
1415        struct cp_private *cp = netdev_priv(dev);
1416        cp->msg_enable = value;
1417}
1418
1419static u32 cp_get_rx_csum(struct net_device *dev)
1420{
1421        struct cp_private *cp = netdev_priv(dev);
1422        return (cpr16(CpCmd) & RxChkSum) ? 1 : 0;
1423}
1424
1425static int cp_set_rx_csum(struct net_device *dev, u32 data)
1426{
1427        struct cp_private *cp = netdev_priv(dev);
1428        u16 cmd = cp->cpcmd, newcmd;
1429
1430        newcmd = cmd;
1431
1432        if (data)
1433                newcmd |= RxChkSum;
1434        else
1435                newcmd &= ~RxChkSum;
1436
1437        if (newcmd != cmd) {
1438                unsigned long flags;
1439
1440                spin_lock_irqsave(&cp->lock, flags);
1441                cp->cpcmd = newcmd;
1442                cpw16_f(CpCmd, newcmd);
1443                spin_unlock_irqrestore(&cp->lock, flags);
1444        }
1445
1446        return 0;
1447}
1448
1449static void cp_get_regs(struct net_device *dev, struct ethtool_regs *regs,
1450                        void *p)
1451{
1452        struct cp_private *cp = netdev_priv(dev);
1453        unsigned long flags;
1454
1455        if (regs->len < CP_REGS_SIZE)
1456                return /* -EINVAL */;
1457
1458        regs->version = CP_REGS_VER;
1459
1460        spin_lock_irqsave(&cp->lock, flags);
1461        memcpy_fromio(p, cp->regs, CP_REGS_SIZE);
1462        spin_unlock_irqrestore(&cp->lock, flags);
1463}
1464
1465static void cp_get_wol (struct net_device *dev, struct ethtool_wolinfo *wol)
1466{
1467        struct cp_private *cp = netdev_priv(dev);
1468        unsigned long flags;
1469
1470        spin_lock_irqsave (&cp->lock, flags);
1471        netdev_get_wol (cp, wol);
1472        spin_unlock_irqrestore (&cp->lock, flags);
1473}
1474
1475static int cp_set_wol (struct net_device *dev, struct ethtool_wolinfo *wol)
1476{
1477        struct cp_private *cp = netdev_priv(dev);
1478        unsigned long flags;
1479        int rc;
1480
1481        spin_lock_irqsave (&cp->lock, flags);
1482        rc = netdev_set_wol (cp, wol);
1483        spin_unlock_irqrestore (&cp->lock, flags);
1484
1485        return rc;
1486}
1487
1488static void cp_get_strings (struct net_device *dev, u32 stringset, u8 *buf)
1489{
1490        switch (stringset) {
1491        case ETH_SS_STATS:
1492                memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
1493                break;
1494        default:
1495                BUG();
1496                break;
1497        }
1498}
1499
1500static void cp_get_ethtool_stats (struct net_device *dev,
1501                                  struct ethtool_stats *estats, u64 *tmp_stats)
1502{
1503        struct cp_private *cp = netdev_priv(dev);
1504        struct cp_dma_stats *nic_stats;
1505        dma_addr_t dma;
1506        int i;
1507
1508        nic_stats = dma_alloc_coherent(&cp->pdev->dev, sizeof(*nic_stats),
1509                                       &dma, GFP_KERNEL);
1510        if (!nic_stats)
1511                return;
1512
1513        /* begin NIC statistics dump */
1514        cpw32(StatsAddr + 4, (u64)dma >> 32);
1515        cpw32(StatsAddr, ((u64)dma & DMA_BIT_MASK(32)) | DumpStats);
1516        cpr32(StatsAddr);
1517
1518        for (i = 0; i < 1000; i++) {
1519                if ((cpr32(StatsAddr) & DumpStats) == 0)
1520                        break;
1521                udelay(10);
1522        }
1523        cpw32(StatsAddr, 0);
1524        cpw32(StatsAddr + 4, 0);
1525        cpr32(StatsAddr);
1526
1527        i = 0;
1528        tmp_stats[i++] = le64_to_cpu(nic_stats->tx_ok);
1529        tmp_stats[i++] = le64_to_cpu(nic_stats->rx_ok);
1530        tmp_stats[i++] = le64_to_cpu(nic_stats->tx_err);
1531        tmp_stats[i++] = le32_to_cpu(nic_stats->rx_err);
1532        tmp_stats[i++] = le16_to_cpu(nic_stats->rx_fifo);
1533        tmp_stats[i++] = le16_to_cpu(nic_stats->frame_align);
1534        tmp_stats[i++] = le32_to_cpu(nic_stats->tx_ok_1col);
1535        tmp_stats[i++] = le32_to_cpu(nic_stats->tx_ok_mcol);
1536        tmp_stats[i++] = le64_to_cpu(nic_stats->rx_ok_phys);
1537        tmp_stats[i++] = le64_to_cpu(nic_stats->rx_ok_bcast);
1538        tmp_stats[i++] = le32_to_cpu(nic_stats->rx_ok_mcast);
1539        tmp_stats[i++] = le16_to_cpu(nic_stats->tx_abort);
1540        tmp_stats[i++] = le16_to_cpu(nic_stats->tx_underrun);
1541        tmp_stats[i++] = cp->cp_stats.rx_frags;
1542        BUG_ON(i != CP_NUM_STATS);
1543
1544        dma_free_coherent(&cp->pdev->dev, sizeof(*nic_stats), nic_stats, dma);
1545}
1546
1547static const struct ethtool_ops cp_ethtool_ops = {
1548        .get_drvinfo            = cp_get_drvinfo,
1549        .get_regs_len           = cp_get_regs_len,
1550        .get_sset_count         = cp_get_sset_count,
1551        .get_settings           = cp_get_settings,
1552        .set_settings           = cp_set_settings,
1553        .nway_reset             = cp_nway_reset,
1554        .get_link               = ethtool_op_get_link,
1555        .get_msglevel           = cp_get_msglevel,
1556        .set_msglevel           = cp_set_msglevel,
1557        .get_rx_csum            = cp_get_rx_csum,
1558        .set_rx_csum            = cp_set_rx_csum,
1559        .set_tx_csum            = ethtool_op_set_tx_csum, /* local! */
1560        .set_sg                 = ethtool_op_set_sg,
1561        .set_tso                = ethtool_op_set_tso,
1562        .get_regs               = cp_get_regs,
1563        .get_wol                = cp_get_wol,
1564        .set_wol                = cp_set_wol,
1565        .get_strings            = cp_get_strings,
1566        .get_ethtool_stats      = cp_get_ethtool_stats,
1567        .get_eeprom_len         = cp_get_eeprom_len,
1568        .get_eeprom             = cp_get_eeprom,
1569        .set_eeprom             = cp_set_eeprom,
1570};
1571
1572static int cp_ioctl (struct net_device *dev, struct ifreq *rq, int cmd)
1573{
1574        struct cp_private *cp = netdev_priv(dev);
1575        int rc;
1576        unsigned long flags;
1577
1578        if (!netif_running(dev))
1579                return -EINVAL;
1580
1581        spin_lock_irqsave(&cp->lock, flags);
1582        rc = generic_mii_ioctl(&cp->mii_if, if_mii(rq), cmd, NULL);
1583        spin_unlock_irqrestore(&cp->lock, flags);
1584        return rc;
1585}
1586
1587static int cp_set_mac_address(struct net_device *dev, void *p)
1588{
1589        struct cp_private *cp = netdev_priv(dev);
1590        struct sockaddr *addr = p;
1591
1592        if (!is_valid_ether_addr(addr->sa_data))
1593                return -EADDRNOTAVAIL;
1594
1595        memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
1596
1597        spin_lock_irq(&cp->lock);
1598
1599        cpw8_f(Cfg9346, Cfg9346_Unlock);
1600        cpw32_f(MAC0 + 0, le32_to_cpu (*(__le32 *) (dev->dev_addr + 0)));
1601        cpw32_f(MAC0 + 4, le32_to_cpu (*(__le32 *) (dev->dev_addr + 4)));
1602        cpw8_f(Cfg9346, Cfg9346_Lock);
1603
1604        spin_unlock_irq(&cp->lock);
1605
1606        return 0;
1607}
1608
1609/* Serial EEPROM section. */
1610
1611/*  EEPROM_Ctrl bits. */
1612#define EE_SHIFT_CLK    0x04    /* EEPROM shift clock. */
1613#define EE_CS                   0x08    /* EEPROM chip select. */
1614#define EE_DATA_WRITE   0x02    /* EEPROM chip data in. */
1615#define EE_WRITE_0              0x00
1616#define EE_WRITE_1              0x02
1617#define EE_DATA_READ    0x01    /* EEPROM chip data out. */
1618#define EE_ENB                  (0x80 | EE_CS)
1619
1620/* Delay between EEPROM clock transitions.
1621   No extra delay is needed with 33Mhz PCI, but 66Mhz may change this.
1622 */
1623
1624#define eeprom_delay()  readl(ee_addr)
1625
1626/* The EEPROM commands include the alway-set leading bit. */
1627#define EE_EXTEND_CMD   (4)
1628#define EE_WRITE_CMD    (5)
1629#define EE_READ_CMD             (6)
1630#define EE_ERASE_CMD    (7)
1631
1632#define EE_EWDS_ADDR    (0)
1633#define EE_WRAL_ADDR    (1)
1634#define EE_ERAL_ADDR    (2)
1635#define EE_EWEN_ADDR    (3)
1636
1637#define CP_EEPROM_MAGIC PCI_DEVICE_ID_REALTEK_8139
1638
1639static void eeprom_cmd_start(void __iomem *ee_addr)
1640{
1641        writeb (EE_ENB & ~EE_CS, ee_addr);
1642        writeb (EE_ENB, ee_addr);
1643        eeprom_delay ();
1644}
1645
1646static void eeprom_cmd(void __iomem *ee_addr, int cmd, int cmd_len)
1647{
1648        int i;
1649
1650        /* Shift the command bits out. */
1651        for (i = cmd_len - 1; i >= 0; i--) {
1652                int dataval = (cmd & (1 << i)) ? EE_DATA_WRITE : 0;
1653                writeb (EE_ENB | dataval, ee_addr);
1654                eeprom_delay ();
1655                writeb (EE_ENB | dataval | EE_SHIFT_CLK, ee_addr);
1656                eeprom_delay ();
1657        }
1658        writeb (EE_ENB, ee_addr);
1659        eeprom_delay ();
1660}
1661
1662static void eeprom_cmd_end(void __iomem *ee_addr)
1663{
1664        writeb (~EE_CS, ee_addr);
1665        eeprom_delay ();
1666}
1667
1668static void eeprom_extend_cmd(void __iomem *ee_addr, int extend_cmd,
1669                              int addr_len)
1670{
1671        int cmd = (EE_EXTEND_CMD << addr_len) | (extend_cmd << (addr_len - 2));
1672
1673        eeprom_cmd_start(ee_addr);
1674        eeprom_cmd(ee_addr, cmd, 3 + addr_len);
1675        eeprom_cmd_end(ee_addr);
1676}
1677
1678static u16 read_eeprom (void __iomem *ioaddr, int location, int addr_len)
1679{
1680        int i;
1681        u16 retval = 0;
1682        void __iomem *ee_addr = ioaddr + Cfg9346;
1683        int read_cmd = location | (EE_READ_CMD << addr_len);
1684
1685        eeprom_cmd_start(ee_addr);
1686        eeprom_cmd(ee_addr, read_cmd, 3 + addr_len);
1687
1688        for (i = 16; i > 0; i--) {
1689                writeb (EE_ENB | EE_SHIFT_CLK, ee_addr);
1690                eeprom_delay ();
1691                retval =
1692                    (retval << 1) | ((readb (ee_addr) & EE_DATA_READ) ? 1 :
1693                                     0);
1694                writeb (EE_ENB, ee_addr);
1695                eeprom_delay ();
1696        }
1697
1698        eeprom_cmd_end(ee_addr);
1699
1700        return retval;
1701}
1702
1703static void write_eeprom(void __iomem *ioaddr, int location, u16 val,
1704                         int addr_len)
1705{
1706        int i;
1707        void __iomem *ee_addr = ioaddr + Cfg9346;
1708        int write_cmd = location | (EE_WRITE_CMD << addr_len);
1709
1710        eeprom_extend_cmd(ee_addr, EE_EWEN_ADDR, addr_len);
1711
1712        eeprom_cmd_start(ee_addr);
1713        eeprom_cmd(ee_addr, write_cmd, 3 + addr_len);
1714        eeprom_cmd(ee_addr, val, 16);
1715        eeprom_cmd_end(ee_addr);
1716
1717        eeprom_cmd_start(ee_addr);
1718        for (i = 0; i < 20000; i++)
1719                if (readb(ee_addr) & EE_DATA_READ)
1720                        break;
1721        eeprom_cmd_end(ee_addr);
1722
1723        eeprom_extend_cmd(ee_addr, EE_EWDS_ADDR, addr_len);
1724}
1725
1726static int cp_get_eeprom_len(struct net_device *dev)
1727{
1728        struct cp_private *cp = netdev_priv(dev);
1729        int size;
1730
1731        spin_lock_irq(&cp->lock);
1732        size = read_eeprom(cp->regs, 0, 8) == 0x8129 ? 256 : 128;
1733        spin_unlock_irq(&cp->lock);
1734
1735        return size;
1736}
1737
1738static int cp_get_eeprom(struct net_device *dev,
1739                         struct ethtool_eeprom *eeprom, u8 *data)
1740{
1741        struct cp_private *cp = netdev_priv(dev);
1742        unsigned int addr_len;
1743        u16 val;
1744        u32 offset = eeprom->offset >> 1;
1745        u32 len = eeprom->len;
1746        u32 i = 0;
1747
1748        eeprom->magic = CP_EEPROM_MAGIC;
1749
1750        spin_lock_irq(&cp->lock);
1751
1752        addr_len = read_eeprom(cp->regs, 0, 8) == 0x8129 ? 8 : 6;
1753
1754        if (eeprom->offset & 1) {
1755                val = read_eeprom(cp->regs, offset, addr_len);
1756                data[i++] = (u8)(val >> 8);
1757                offset++;
1758        }
1759
1760        while (i < len - 1) {
1761                val = read_eeprom(cp->regs, offset, addr_len);
1762                data[i++] = (u8)val;
1763                data[i++] = (u8)(val >> 8);
1764                offset++;
1765        }
1766
1767        if (i < len) {
1768                val = read_eeprom(cp->regs, offset, addr_len);
1769                data[i] = (u8)val;
1770        }
1771
1772        spin_unlock_irq(&cp->lock);
1773        return 0;
1774}
1775
1776static int cp_set_eeprom(struct net_device *dev,
1777                         struct ethtool_eeprom *eeprom, u8 *data)
1778{
1779        struct cp_private *cp = netdev_priv(dev);
1780        unsigned int addr_len;
1781        u16 val;
1782        u32 offset = eeprom->offset >> 1;
1783        u32 len = eeprom->len;
1784        u32 i = 0;
1785
1786        if (eeprom->magic != CP_EEPROM_MAGIC)
1787                return -EINVAL;
1788
1789        spin_lock_irq(&cp->lock);
1790
1791        addr_len = read_eeprom(cp->regs, 0, 8) == 0x8129 ? 8 : 6;
1792
1793        if (eeprom->offset & 1) {
1794                val = read_eeprom(cp->regs, offset, addr_len) & 0xff;
1795                val |= (u16)data[i++] << 8;
1796                write_eeprom(cp->regs, offset, val, addr_len);
1797                offset++;
1798        }
1799
1800        while (i < len - 1) {
1801                val = (u16)data[i++];
1802                val |= (u16)data[i++] << 8;
1803                write_eeprom(cp->regs, offset, val, addr_len);
1804                offset++;
1805        }
1806
1807        if (i < len) {
1808                val = read_eeprom(cp->regs, offset, addr_len) & 0xff00;
1809                val |= (u16)data[i];
1810                write_eeprom(cp->regs, offset, val, addr_len);
1811        }
1812
1813        spin_unlock_irq(&cp->lock);
1814        return 0;
1815}
1816
1817/* Put the board into D3cold state and wait for WakeUp signal */
1818static void cp_set_d3_state (struct cp_private *cp)
1819{
1820        pci_enable_wake (cp->pdev, 0, 1); /* Enable PME# generation */
1821        pci_set_power_state (cp->pdev, PCI_D3hot);
1822}
1823
1824static const struct net_device_ops cp_netdev_ops = {
1825        .ndo_open               = cp_open,
1826        .ndo_stop               = cp_close,
1827        .ndo_validate_addr      = eth_validate_addr,
1828        .ndo_set_mac_address    = cp_set_mac_address,
1829        .ndo_set_multicast_list = cp_set_rx_mode,
1830        .ndo_get_stats          = cp_get_stats,
1831        .ndo_do_ioctl           = cp_ioctl,
1832        .ndo_start_xmit         = cp_start_xmit,
1833        .ndo_tx_timeout         = cp_tx_timeout,
1834#if CP_VLAN_TAG_USED
1835        .ndo_vlan_rx_register   = cp_vlan_rx_register,
1836#endif
1837#ifdef BROKEN
1838        .ndo_change_mtu         = cp_change_mtu,
1839#endif
1840
1841#ifdef CONFIG_NET_POLL_CONTROLLER
1842        .ndo_poll_controller    = cp_poll_controller,
1843#endif
1844};
1845
1846static int cp_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
1847{
1848        struct net_device *dev;
1849        struct cp_private *cp;
1850        int rc;
1851        void __iomem *regs;
1852        resource_size_t pciaddr;
1853        unsigned int addr_len, i, pci_using_dac;
1854
1855#ifndef MODULE
1856        static int version_printed;
1857        if (version_printed++ == 0)
1858                pr_info("%s", version);
1859#endif
1860
1861        if (pdev->vendor == PCI_VENDOR_ID_REALTEK &&
1862            pdev->device == PCI_DEVICE_ID_REALTEK_8139 && pdev->revision < 0x20) {
1863                dev_info(&pdev->dev,
1864                         "This (id %04x:%04x rev %02x) is not an 8139C+ compatible chip, use 8139too\n",
1865                         pdev->vendor, pdev->device, pdev->revision);
1866                return -ENODEV;
1867        }
1868
1869        dev = alloc_etherdev(sizeof(struct cp_private));
1870        if (!dev)
1871                return -ENOMEM;
1872        SET_NETDEV_DEV(dev, &pdev->dev);
1873
1874        cp = netdev_priv(dev);
1875        cp->pdev = pdev;
1876        cp->dev = dev;
1877        cp->msg_enable = (debug < 0 ? CP_DEF_MSG_ENABLE : debug);
1878        spin_lock_init (&cp->lock);
1879        cp->mii_if.dev = dev;
1880        cp->mii_if.mdio_read = mdio_read;
1881        cp->mii_if.mdio_write = mdio_write;
1882        cp->mii_if.phy_id = CP_INTERNAL_PHY;
1883        cp->mii_if.phy_id_mask = 0x1f;
1884        cp->mii_if.reg_num_mask = 0x1f;
1885        cp_set_rxbufsize(cp);
1886
1887        rc = pci_enable_device(pdev);
1888        if (rc)
1889                goto err_out_free;
1890
1891        rc = pci_set_mwi(pdev);
1892        if (rc)
1893                goto err_out_disable;
1894
1895        rc = pci_request_regions(pdev, DRV_NAME);
1896        if (rc)
1897                goto err_out_mwi;
1898
1899        pciaddr = pci_resource_start(pdev, 1);
1900        if (!pciaddr) {
1901                rc = -EIO;
1902                dev_err(&pdev->dev, "no MMIO resource\n");
1903                goto err_out_res;
1904        }
1905        if (pci_resource_len(pdev, 1) < CP_REGS_SIZE) {
1906                rc = -EIO;
1907                dev_err(&pdev->dev, "MMIO resource (%llx) too small\n",
1908                       (unsigned long long)pci_resource_len(pdev, 1));
1909                goto err_out_res;
1910        }
1911
1912        /* Configure DMA attributes. */
1913        if ((sizeof(dma_addr_t) > 4) &&
1914            !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) &&
1915            !pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
1916                pci_using_dac = 1;
1917        } else {
1918                pci_using_dac = 0;
1919
1920                rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
1921                if (rc) {
1922                        dev_err(&pdev->dev,
1923                                "No usable DMA configuration, aborting\n");
1924                        goto err_out_res;
1925                }
1926                rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
1927                if (rc) {
1928                        dev_err(&pdev->dev,
1929                                "No usable consistent DMA configuration, aborting\n");
1930                        goto err_out_res;
1931                }
1932        }
1933
1934        cp->cpcmd = (pci_using_dac ? PCIDAC : 0) |
1935                    PCIMulRW | RxChkSum | CpRxOn | CpTxOn;
1936
1937        regs = ioremap(pciaddr, CP_REGS_SIZE);
1938        if (!regs) {
1939                rc = -EIO;
1940                dev_err(&pdev->dev, "Cannot map PCI MMIO (%Lx@%Lx)\n",
1941                        (unsigned long long)pci_resource_len(pdev, 1),
1942                       (unsigned long long)pciaddr);
1943                goto err_out_res;
1944        }
1945        dev->base_addr = (unsigned long) regs;
1946        cp->regs = regs;
1947
1948        cp_stop_hw(cp);
1949
1950        /* read MAC address from EEPROM */
1951        addr_len = read_eeprom (regs, 0, 8) == 0x8129 ? 8 : 6;
1952        for (i = 0; i < 3; i++)
1953                ((__le16 *) (dev->dev_addr))[i] =
1954                    cpu_to_le16(read_eeprom (regs, i + 7, addr_len));
1955        memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
1956
1957        dev->netdev_ops = &cp_netdev_ops;
1958        netif_napi_add(dev, &cp->napi, cp_rx_poll, 16);
1959        dev->ethtool_ops = &cp_ethtool_ops;
1960        dev->watchdog_timeo = TX_TIMEOUT;
1961
1962#if CP_VLAN_TAG_USED
1963        dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
1964#endif
1965
1966        if (pci_using_dac)
1967                dev->features |= NETIF_F_HIGHDMA;
1968
1969#if 0 /* disabled by default until verified */
1970        dev->features |= NETIF_F_TSO;
1971#endif
1972
1973        dev->irq = pdev->irq;
1974
1975        rc = register_netdev(dev);
1976        if (rc)
1977                goto err_out_iomap;
1978
1979        netdev_info(dev, "RTL-8139C+ at 0x%lx, %pM, IRQ %d\n",
1980                    dev->base_addr, dev->dev_addr, dev->irq);
1981
1982        pci_set_drvdata(pdev, dev);
1983
1984        /* enable busmastering and memory-write-invalidate */
1985        pci_set_master(pdev);
1986
1987        if (cp->wol_enabled)
1988                cp_set_d3_state (cp);
1989
1990        return 0;
1991
1992err_out_iomap:
1993        iounmap(regs);
1994err_out_res:
1995        pci_release_regions(pdev);
1996err_out_mwi:
1997        pci_clear_mwi(pdev);
1998err_out_disable:
1999        pci_disable_device(pdev);
2000err_out_free:
2001        free_netdev(dev);
2002        return rc;
2003}
2004
2005static void cp_remove_one (struct pci_dev *pdev)
2006{
2007        struct net_device *dev = pci_get_drvdata(pdev);
2008        struct cp_private *cp = netdev_priv(dev);
2009
2010        unregister_netdev(dev);
2011        iounmap(cp->regs);
2012        if (cp->wol_enabled)
2013                pci_set_power_state (pdev, PCI_D0);
2014        pci_release_regions(pdev);
2015        pci_clear_mwi(pdev);
2016        pci_disable_device(pdev);
2017        pci_set_drvdata(pdev, NULL);
2018        free_netdev(dev);
2019}
2020
2021#ifdef CONFIG_PM
2022static int cp_suspend (struct pci_dev *pdev, pm_message_t state)
2023{
2024        struct net_device *dev = pci_get_drvdata(pdev);
2025        struct cp_private *cp = netdev_priv(dev);
2026        unsigned long flags;
2027
2028        if (!netif_running(dev))
2029                return 0;
2030
2031        netif_device_detach (dev);
2032        netif_stop_queue (dev);
2033
2034        spin_lock_irqsave (&cp->lock, flags);
2035
2036        /* Disable Rx and Tx */
2037        cpw16 (IntrMask, 0);
2038        cpw8  (Cmd, cpr8 (Cmd) & (~RxOn | ~TxOn));
2039
2040        spin_unlock_irqrestore (&cp->lock, flags);
2041
2042        pci_save_state(pdev);
2043        pci_enable_wake(pdev, pci_choose_state(pdev, state), cp->wol_enabled);
2044        pci_set_power_state(pdev, pci_choose_state(pdev, state));
2045
2046        return 0;
2047}
2048
2049static int cp_resume (struct pci_dev *pdev)
2050{
2051        struct net_device *dev = pci_get_drvdata (pdev);
2052        struct cp_private *cp = netdev_priv(dev);
2053        unsigned long flags;
2054
2055        if (!netif_running(dev))
2056                return 0;
2057
2058        netif_device_attach (dev);
2059
2060        pci_set_power_state(pdev, PCI_D0);
2061        pci_restore_state(pdev);
2062        pci_enable_wake(pdev, PCI_D0, 0);
2063
2064        /* FIXME: sh*t may happen if the Rx ring buffer is depleted */
2065        cp_init_rings_index (cp);
2066        cp_init_hw (cp);
2067        netif_start_queue (dev);
2068
2069        spin_lock_irqsave (&cp->lock, flags);
2070
2071        mii_check_media(&cp->mii_if, netif_msg_link(cp), false);
2072
2073        spin_unlock_irqrestore (&cp->lock, flags);
2074
2075        return 0;
2076}
2077#endif /* CONFIG_PM */
2078
2079static struct pci_driver cp_driver = {
2080        .name         = DRV_NAME,
2081        .id_table     = cp_pci_tbl,
2082        .probe        = cp_init_one,
2083        .remove       = cp_remove_one,
2084#ifdef CONFIG_PM
2085        .resume       = cp_resume,
2086        .suspend      = cp_suspend,
2087#endif
2088};
2089
2090static int __init cp_init (void)
2091{
2092#ifdef MODULE
2093        pr_info("%s", version);
2094#endif
2095        return pci_register_driver(&cp_driver);
2096}
2097
2098static void __exit cp_exit (void)
2099{
2100        pci_unregister_driver (&cp_driver);
2101}
2102
2103module_init(cp_init);
2104module_exit(cp_exit);
2105