linux/drivers/net/ethernet/realtek/8139cp.c
<<
>>
Prefs
   1/* 8139cp.c: A Linux PCI Ethernet driver for the RealTek 8139C+ chips. */
   2/*
   3        Copyright 2001-2004 Jeff Garzik <jgarzik@pobox.com>
   4
   5        Copyright (C) 2001, 2002 David S. Miller (davem@redhat.com) [tg3.c]
   6        Copyright (C) 2000, 2001 David S. Miller (davem@redhat.com) [sungem.c]
   7        Copyright 2001 Manfred Spraul                               [natsemi.c]
   8        Copyright 1999-2001 by Donald Becker.                       [natsemi.c]
   9        Written 1997-2001 by Donald Becker.                         [8139too.c]
  10        Copyright 1998-2001 by Jes Sorensen, <jes@trained-monkey.org>. [acenic.c]
  11
  12        This software may be used and distributed according to the terms of
  13        the GNU General Public License (GPL), incorporated herein by reference.
  14        Drivers based on or derived from this code fall under the GPL and must
  15        retain the authorship, copyright and license notice.  This file is not
  16        a complete program and may only be used when the entire operating
  17        system is licensed under the GPL.
  18
  19        See the file COPYING in this distribution for more information.
  20
  21        Contributors:
  22
  23                Wake-on-LAN support - Felipe Damasio <felipewd@terra.com.br>
  24                PCI suspend/resume  - Felipe Damasio <felipewd@terra.com.br>
  25                LinkChg interrupt   - Felipe Damasio <felipewd@terra.com.br>
  26
  27        TODO:
  28        * Test Tx checksumming thoroughly
  29
  30        Low priority TODO:
  31        * Complete reset on PciErr
  32        * Consider Rx interrupt mitigation using TimerIntr
  33        * Investigate using skb->priority with h/w VLAN priority
  34        * Investigate using High Priority Tx Queue with skb->priority
  35        * Adjust Rx FIFO threshold and Max Rx DMA burst on Rx FIFO error
  36        * Adjust Tx FIFO threshold and Max Tx DMA burst on Tx FIFO error
  37        * Implement Tx software interrupt mitigation via
  38          Tx descriptor bit
  39        * The real minimum of CP_MIN_MTU is 4 bytes.  However,
  40          for this to be supported, one must(?) turn on packet padding.
  41        * Support external MII transceivers (patch available)
  42
  43        NOTES:
  44        * TX checksumming is considered experimental.  It is off by
  45          default, use ethtool to turn it on.
  46
  47 */
  48
  49#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  50
  51#define DRV_NAME                "8139cp"
  52#define DRV_VERSION             "1.3"
  53#define DRV_RELDATE             "Mar 22, 2004"
  54
  55
  56#include <linux/module.h>
  57#include <linux/moduleparam.h>
  58#include <linux/kernel.h>
  59#include <linux/compiler.h>
  60#include <linux/netdevice.h>
  61#include <linux/etherdevice.h>
  62#include <linux/init.h>
  63#include <linux/interrupt.h>
  64#include <linux/pci.h>
  65#include <linux/dma-mapping.h>
  66#include <linux/delay.h>
  67#include <linux/ethtool.h>
  68#include <linux/gfp.h>
  69#include <linux/mii.h>
  70#include <linux/if_vlan.h>
  71#include <linux/crc32.h>
  72#include <linux/in.h>
  73#include <linux/ip.h>
  74#include <linux/tcp.h>
  75#include <linux/udp.h>
  76#include <linux/cache.h>
  77#include <asm/io.h>
  78#include <asm/irq.h>
  79#include <asm/uaccess.h>
  80
  81/* These identify the driver base version and may not be removed. */
  82static char version[] =
  83DRV_NAME ": 10/100 PCI Ethernet driver v" DRV_VERSION " (" DRV_RELDATE ")\n";
  84
  85MODULE_AUTHOR("Jeff Garzik <jgarzik@pobox.com>");
  86MODULE_DESCRIPTION("RealTek RTL-8139C+ series 10/100 PCI Ethernet driver");
  87MODULE_VERSION(DRV_VERSION);
  88MODULE_LICENSE("GPL");
  89
  90static int debug = -1;
  91module_param(debug, int, 0);
  92MODULE_PARM_DESC (debug, "8139cp: bitmapped message enable number");
  93
  94/* Maximum number of multicast addresses to filter (vs. Rx-all-multicast).
  95   The RTL chips use a 64 element hash table based on the Ethernet CRC.  */
  96static int multicast_filter_limit = 32;
  97module_param(multicast_filter_limit, int, 0);
  98MODULE_PARM_DESC (multicast_filter_limit, "8139cp: maximum number of filtered multicast addresses");
  99
 100#define CP_DEF_MSG_ENABLE       (NETIF_MSG_DRV          | \
 101                                 NETIF_MSG_PROBE        | \
 102                                 NETIF_MSG_LINK)
 103#define CP_NUM_STATS            14      /* struct cp_dma_stats, plus one */
 104#define CP_STATS_SIZE           64      /* size in bytes of DMA stats block */
 105#define CP_REGS_SIZE            (0xff + 1)
 106#define CP_REGS_VER             1               /* version 1 */
 107#define CP_RX_RING_SIZE         64
 108#define CP_TX_RING_SIZE         64
 109#define CP_RING_BYTES           \
 110                ((sizeof(struct cp_desc) * CP_RX_RING_SIZE) +   \
 111                 (sizeof(struct cp_desc) * CP_TX_RING_SIZE) +   \
 112                 CP_STATS_SIZE)
 113#define NEXT_TX(N)              (((N) + 1) & (CP_TX_RING_SIZE - 1))
 114#define NEXT_RX(N)              (((N) + 1) & (CP_RX_RING_SIZE - 1))
 115#define TX_BUFFS_AVAIL(CP)                                      \
 116        (((CP)->tx_tail <= (CP)->tx_head) ?                     \
 117          (CP)->tx_tail + (CP_TX_RING_SIZE - 1) - (CP)->tx_head :       \
 118          (CP)->tx_tail - (CP)->tx_head - 1)
 119
 120#define PKT_BUF_SZ              1536    /* Size of each temporary Rx buffer.*/
 121#define CP_INTERNAL_PHY         32
 122
 123/* The following settings are log_2(bytes)-4:  0 == 16 bytes .. 6==1024, 7==end of packet. */
 124#define RX_FIFO_THRESH          5       /* Rx buffer level before first PCI xfer.  */
 125#define RX_DMA_BURST            4       /* Maximum PCI burst, '4' is 256 */
 126#define TX_DMA_BURST            6       /* Maximum PCI burst, '6' is 1024 */
 127#define TX_EARLY_THRESH         256     /* Early Tx threshold, in bytes */
 128
 129/* Time in jiffies before concluding the transmitter is hung. */
 130#define TX_TIMEOUT              (6*HZ)
 131
 132/* hardware minimum and maximum for a single frame's data payload */
 133#define CP_MIN_MTU              60      /* TODO: allow lower, but pad */
 134#define CP_MAX_MTU              4096
 135
 136enum {
 137        /* NIC register offsets */
 138        MAC0            = 0x00, /* Ethernet hardware address. */
 139        MAR0            = 0x08, /* Multicast filter. */
 140        StatsAddr       = 0x10, /* 64-bit start addr of 64-byte DMA stats blk */
 141        TxRingAddr      = 0x20, /* 64-bit start addr of Tx ring */
 142        HiTxRingAddr    = 0x28, /* 64-bit start addr of high priority Tx ring */
 143        Cmd             = 0x37, /* Command register */
 144        IntrMask        = 0x3C, /* Interrupt mask */
 145        IntrStatus      = 0x3E, /* Interrupt status */
 146        TxConfig        = 0x40, /* Tx configuration */
 147        ChipVersion     = 0x43, /* 8-bit chip version, inside TxConfig */
 148        RxConfig        = 0x44, /* Rx configuration */
 149        RxMissed        = 0x4C, /* 24 bits valid, write clears */
 150        Cfg9346         = 0x50, /* EEPROM select/control; Cfg reg [un]lock */
 151        Config1         = 0x52, /* Config1 */
 152        Config3         = 0x59, /* Config3 */
 153        Config4         = 0x5A, /* Config4 */
 154        MultiIntr       = 0x5C, /* Multiple interrupt select */
 155        BasicModeCtrl   = 0x62, /* MII BMCR */
 156        BasicModeStatus = 0x64, /* MII BMSR */
 157        NWayAdvert      = 0x66, /* MII ADVERTISE */
 158        NWayLPAR        = 0x68, /* MII LPA */
 159        NWayExpansion   = 0x6A, /* MII Expansion */
 160        Config5         = 0xD8, /* Config5 */
 161        TxPoll          = 0xD9, /* Tell chip to check Tx descriptors for work */
 162        RxMaxSize       = 0xDA, /* Max size of an Rx packet (8169 only) */
 163        CpCmd           = 0xE0, /* C+ Command register (C+ mode only) */
 164        IntrMitigate    = 0xE2, /* rx/tx interrupt mitigation control */
 165        RxRingAddr      = 0xE4, /* 64-bit start addr of Rx ring */
 166        TxThresh        = 0xEC, /* Early Tx threshold */
 167        OldRxBufAddr    = 0x30, /* DMA address of Rx ring buffer (C mode) */
 168        OldTSD0         = 0x10, /* DMA address of first Tx desc (C mode) */
 169
 170        /* Tx and Rx status descriptors */
 171        DescOwn         = (1 << 31), /* Descriptor is owned by NIC */
 172        RingEnd         = (1 << 30), /* End of descriptor ring */
 173        FirstFrag       = (1 << 29), /* First segment of a packet */
 174        LastFrag        = (1 << 28), /* Final segment of a packet */
 175        LargeSend       = (1 << 27), /* TCP Large Send Offload (TSO) */
 176        MSSShift        = 16,        /* MSS value position */
 177        MSSMask         = 0xfff,     /* MSS value: 11 bits */
 178        TxError         = (1 << 23), /* Tx error summary */
 179        RxError         = (1 << 20), /* Rx error summary */
 180        IPCS            = (1 << 18), /* Calculate IP checksum */
 181        UDPCS           = (1 << 17), /* Calculate UDP/IP checksum */
 182        TCPCS           = (1 << 16), /* Calculate TCP/IP checksum */
 183        TxVlanTag       = (1 << 17), /* Add VLAN tag */
 184        RxVlanTagged    = (1 << 16), /* Rx VLAN tag available */
 185        IPFail          = (1 << 15), /* IP checksum failed */
 186        UDPFail         = (1 << 14), /* UDP/IP checksum failed */
 187        TCPFail         = (1 << 13), /* TCP/IP checksum failed */
 188        NormalTxPoll    = (1 << 6),  /* One or more normal Tx packets to send */
 189        PID1            = (1 << 17), /* 2 protocol id bits:  0==non-IP, */
 190        PID0            = (1 << 16), /* 1==UDP/IP, 2==TCP/IP, 3==IP */
 191        RxProtoTCP      = 1,
 192        RxProtoUDP      = 2,
 193        RxProtoIP       = 3,
 194        TxFIFOUnder     = (1 << 25), /* Tx FIFO underrun */
 195        TxOWC           = (1 << 22), /* Tx Out-of-window collision */
 196        TxLinkFail      = (1 << 21), /* Link failed during Tx of packet */
 197        TxMaxCol        = (1 << 20), /* Tx aborted due to excessive collisions */
 198        TxColCntShift   = 16,        /* Shift, to get 4-bit Tx collision cnt */
 199        TxColCntMask    = 0x01 | 0x02 | 0x04 | 0x08, /* 4-bit collision count */
 200        RxErrFrame      = (1 << 27), /* Rx frame alignment error */
 201        RxMcast         = (1 << 26), /* Rx multicast packet rcv'd */
 202        RxErrCRC        = (1 << 18), /* Rx CRC error */
 203        RxErrRunt       = (1 << 19), /* Rx error, packet < 64 bytes */
 204        RxErrLong       = (1 << 21), /* Rx error, packet > 4096 bytes */
 205        RxErrFIFO       = (1 << 22), /* Rx error, FIFO overflowed, pkt bad */
 206
 207        /* StatsAddr register */
 208        DumpStats       = (1 << 3),  /* Begin stats dump */
 209
 210        /* RxConfig register */
 211        RxCfgFIFOShift  = 13,        /* Shift, to get Rx FIFO thresh value */
 212        RxCfgDMAShift   = 8,         /* Shift, to get Rx Max DMA value */
 213        AcceptErr       = 0x20,      /* Accept packets with CRC errors */
 214        AcceptRunt      = 0x10,      /* Accept runt (<64 bytes) packets */
 215        AcceptBroadcast = 0x08,      /* Accept broadcast packets */
 216        AcceptMulticast = 0x04,      /* Accept multicast packets */
 217        AcceptMyPhys    = 0x02,      /* Accept pkts with our MAC as dest */
 218        AcceptAllPhys   = 0x01,      /* Accept all pkts w/ physical dest */
 219
 220        /* IntrMask / IntrStatus registers */
 221        PciErr          = (1 << 15), /* System error on the PCI bus */
 222        TimerIntr       = (1 << 14), /* Asserted when TCTR reaches TimerInt value */
 223        LenChg          = (1 << 13), /* Cable length change */
 224        SWInt           = (1 << 8),  /* Software-requested interrupt */
 225        TxEmpty         = (1 << 7),  /* No Tx descriptors available */
 226        RxFIFOOvr       = (1 << 6),  /* Rx FIFO Overflow */
 227        LinkChg         = (1 << 5),  /* Packet underrun, or link change */
 228        RxEmpty         = (1 << 4),  /* No Rx descriptors available */
 229        TxErr           = (1 << 3),  /* Tx error */
 230        TxOK            = (1 << 2),  /* Tx packet sent */
 231        RxErr           = (1 << 1),  /* Rx error */
 232        RxOK            = (1 << 0),  /* Rx packet received */
 233        IntrResvd       = (1 << 10), /* reserved, according to RealTek engineers,
 234                                        but hardware likes to raise it */
 235
 236        IntrAll         = PciErr | TimerIntr | LenChg | SWInt | TxEmpty |
 237                          RxFIFOOvr | LinkChg | RxEmpty | TxErr | TxOK |
 238                          RxErr | RxOK | IntrResvd,
 239
 240        /* C mode command register */
 241        CmdReset        = (1 << 4),  /* Enable to reset; self-clearing */
 242        RxOn            = (1 << 3),  /* Rx mode enable */
 243        TxOn            = (1 << 2),  /* Tx mode enable */
 244
 245        /* C+ mode command register */
 246        RxVlanOn        = (1 << 6),  /* Rx VLAN de-tagging enable */
 247        RxChkSum        = (1 << 5),  /* Rx checksum offload enable */
 248        PCIDAC          = (1 << 4),  /* PCI Dual Address Cycle (64-bit PCI) */
 249        PCIMulRW        = (1 << 3),  /* Enable PCI read/write multiple */
 250        CpRxOn          = (1 << 1),  /* Rx mode enable */
 251        CpTxOn          = (1 << 0),  /* Tx mode enable */
 252
 253        /* Cfg9436 EEPROM control register */
 254        Cfg9346_Lock    = 0x00,      /* Lock ConfigX/MII register access */
 255        Cfg9346_Unlock  = 0xC0,      /* Unlock ConfigX/MII register access */
 256
 257        /* TxConfig register */
 258        IFG             = (1 << 25) | (1 << 24), /* standard IEEE interframe gap */
 259        TxDMAShift      = 8,         /* DMA burst value (0-7) is shift this many bits */
 260
 261        /* Early Tx Threshold register */
 262        TxThreshMask    = 0x3f,      /* Mask bits 5-0 */
 263        TxThreshMax     = 2048,      /* Max early Tx threshold */
 264
 265        /* Config1 register */
 266        DriverLoaded    = (1 << 5),  /* Software marker, driver is loaded */
 267        LWACT           = (1 << 4),  /* LWAKE active mode */
 268        PMEnable        = (1 << 0),  /* Enable various PM features of chip */
 269
 270        /* Config3 register */
 271        PARMEnable      = (1 << 6),  /* Enable auto-loading of PHY parms */
 272        MagicPacket     = (1 << 5),  /* Wake up when receives a Magic Packet */
 273        LinkUp          = (1 << 4),  /* Wake up when the cable connection is re-established */
 274
 275        /* Config4 register */
 276        LWPTN           = (1 << 1),  /* LWAKE Pattern */
 277        LWPME           = (1 << 4),  /* LANWAKE vs PMEB */
 278
 279        /* Config5 register */
 280        BWF             = (1 << 6),  /* Accept Broadcast wakeup frame */
 281        MWF             = (1 << 5),  /* Accept Multicast wakeup frame */
 282        UWF             = (1 << 4),  /* Accept Unicast wakeup frame */
 283        LANWake         = (1 << 1),  /* Enable LANWake signal */
 284        PMEStatus       = (1 << 0),  /* PME status can be reset by PCI RST# */
 285
 286        cp_norx_intr_mask = PciErr | LinkChg | TxOK | TxErr | TxEmpty,
 287        cp_rx_intr_mask = RxOK | RxErr | RxEmpty | RxFIFOOvr,
 288        cp_intr_mask = cp_rx_intr_mask | cp_norx_intr_mask,
 289};
 290
 291static const unsigned int cp_rx_config =
 292          (RX_FIFO_THRESH << RxCfgFIFOShift) |
 293          (RX_DMA_BURST << RxCfgDMAShift);
 294
 295struct cp_desc {
 296        __le32          opts1;
 297        __le32          opts2;
 298        __le64          addr;
 299};
 300
 301struct cp_dma_stats {
 302        __le64                  tx_ok;
 303        __le64                  rx_ok;
 304        __le64                  tx_err;
 305        __le32                  rx_err;
 306        __le16                  rx_fifo;
 307        __le16                  frame_align;
 308        __le32                  tx_ok_1col;
 309        __le32                  tx_ok_mcol;
 310        __le64                  rx_ok_phys;
 311        __le64                  rx_ok_bcast;
 312        __le32                  rx_ok_mcast;
 313        __le16                  tx_abort;
 314        __le16                  tx_underrun;
 315} __packed;
 316
 317struct cp_extra_stats {
 318        unsigned long           rx_frags;
 319};
 320
 321struct cp_private {
 322        void                    __iomem *regs;
 323        struct net_device       *dev;
 324        spinlock_t              lock;
 325        u32                     msg_enable;
 326
 327        struct napi_struct      napi;
 328
 329        struct pci_dev          *pdev;
 330        u32                     rx_config;
 331        u16                     cpcmd;
 332
 333        struct cp_extra_stats   cp_stats;
 334
 335        unsigned                rx_head         ____cacheline_aligned;
 336        unsigned                rx_tail;
 337        struct cp_desc          *rx_ring;
 338        struct sk_buff          *rx_skb[CP_RX_RING_SIZE];
 339
 340        unsigned                tx_head         ____cacheline_aligned;
 341        unsigned                tx_tail;
 342        struct cp_desc          *tx_ring;
 343        struct sk_buff          *tx_skb[CP_TX_RING_SIZE];
 344
 345        unsigned                rx_buf_sz;
 346        unsigned                wol_enabled : 1; /* Is Wake-on-LAN enabled? */
 347
 348        dma_addr_t              ring_dma;
 349
 350        struct mii_if_info      mii_if;
 351};
 352
 353#define cpr8(reg)       readb(cp->regs + (reg))
 354#define cpr16(reg)      readw(cp->regs + (reg))
 355#define cpr32(reg)      readl(cp->regs + (reg))
 356#define cpw8(reg,val)   writeb((val), cp->regs + (reg))
 357#define cpw16(reg,val)  writew((val), cp->regs + (reg))
 358#define cpw32(reg,val)  writel((val), cp->regs + (reg))
 359#define cpw8_f(reg,val) do {                    \
 360        writeb((val), cp->regs + (reg));        \
 361        readb(cp->regs + (reg));                \
 362        } while (0)
 363#define cpw16_f(reg,val) do {                   \
 364        writew((val), cp->regs + (reg));        \
 365        readw(cp->regs + (reg));                \
 366        } while (0)
 367#define cpw32_f(reg,val) do {                   \
 368        writel((val), cp->regs + (reg));        \
 369        readl(cp->regs + (reg));                \
 370        } while (0)
 371
 372
 373static void __cp_set_rx_mode (struct net_device *dev);
 374static void cp_tx (struct cp_private *cp);
 375static void cp_clean_rings (struct cp_private *cp);
 376#ifdef CONFIG_NET_POLL_CONTROLLER
 377static void cp_poll_controller(struct net_device *dev);
 378#endif
 379static int cp_get_eeprom_len(struct net_device *dev);
 380static int cp_get_eeprom(struct net_device *dev,
 381                         struct ethtool_eeprom *eeprom, u8 *data);
 382static int cp_set_eeprom(struct net_device *dev,
 383                         struct ethtool_eeprom *eeprom, u8 *data);
 384
 385static DEFINE_PCI_DEVICE_TABLE(cp_pci_tbl) = {
 386        { PCI_DEVICE(PCI_VENDOR_ID_REALTEK,     PCI_DEVICE_ID_REALTEK_8139), },
 387        { PCI_DEVICE(PCI_VENDOR_ID_TTTECH,      PCI_DEVICE_ID_TTTECH_MC322), },
 388        { },
 389};
 390MODULE_DEVICE_TABLE(pci, cp_pci_tbl);
 391
 392static struct {
 393        const char str[ETH_GSTRING_LEN];
 394} ethtool_stats_keys[] = {
 395        { "tx_ok" },
 396        { "rx_ok" },
 397        { "tx_err" },
 398        { "rx_err" },
 399        { "rx_fifo" },
 400        { "frame_align" },
 401        { "tx_ok_1col" },
 402        { "tx_ok_mcol" },
 403        { "rx_ok_phys" },
 404        { "rx_ok_bcast" },
 405        { "rx_ok_mcast" },
 406        { "tx_abort" },
 407        { "tx_underrun" },
 408        { "rx_frags" },
 409};
 410
 411
 412static inline void cp_set_rxbufsize (struct cp_private *cp)
 413{
 414        unsigned int mtu = cp->dev->mtu;
 415
 416        if (mtu > ETH_DATA_LEN)
 417                /* MTU + ethernet header + FCS + optional VLAN tag */
 418                cp->rx_buf_sz = mtu + ETH_HLEN + 8;
 419        else
 420                cp->rx_buf_sz = PKT_BUF_SZ;
 421}
 422
 423static inline void cp_rx_skb (struct cp_private *cp, struct sk_buff *skb,
 424                              struct cp_desc *desc)
 425{
 426        u32 opts2 = le32_to_cpu(desc->opts2);
 427
 428        skb->protocol = eth_type_trans (skb, cp->dev);
 429
 430        cp->dev->stats.rx_packets++;
 431        cp->dev->stats.rx_bytes += skb->len;
 432
 433        if (opts2 & RxVlanTagged)
 434                __vlan_hwaccel_put_tag(skb, swab16(opts2 & 0xffff));
 435
 436        napi_gro_receive(&cp->napi, skb);
 437}
 438
 439static void cp_rx_err_acct (struct cp_private *cp, unsigned rx_tail,
 440                            u32 status, u32 len)
 441{
 442        netif_dbg(cp, rx_err, cp->dev, "rx err, slot %d status 0x%x len %d\n",
 443                  rx_tail, status, len);
 444        cp->dev->stats.rx_errors++;
 445        if (status & RxErrFrame)
 446                cp->dev->stats.rx_frame_errors++;
 447        if (status & RxErrCRC)
 448                cp->dev->stats.rx_crc_errors++;
 449        if ((status & RxErrRunt) || (status & RxErrLong))
 450                cp->dev->stats.rx_length_errors++;
 451        if ((status & (FirstFrag | LastFrag)) != (FirstFrag | LastFrag))
 452                cp->dev->stats.rx_length_errors++;
 453        if (status & RxErrFIFO)
 454                cp->dev->stats.rx_fifo_errors++;
 455}
 456
 457static inline unsigned int cp_rx_csum_ok (u32 status)
 458{
 459        unsigned int protocol = (status >> 16) & 0x3;
 460
 461        if (((protocol == RxProtoTCP) && !(status & TCPFail)) ||
 462            ((protocol == RxProtoUDP) && !(status & UDPFail)))
 463                return 1;
 464        else
 465                return 0;
 466}
 467
 468static int cp_rx_poll(struct napi_struct *napi, int budget)
 469{
 470        struct cp_private *cp = container_of(napi, struct cp_private, napi);
 471        struct net_device *dev = cp->dev;
 472        unsigned int rx_tail = cp->rx_tail;
 473        int rx;
 474
 475rx_status_loop:
 476        rx = 0;
 477        cpw16(IntrStatus, cp_rx_intr_mask);
 478
 479        while (1) {
 480                u32 status, len;
 481                dma_addr_t mapping;
 482                struct sk_buff *skb, *new_skb;
 483                struct cp_desc *desc;
 484                const unsigned buflen = cp->rx_buf_sz;
 485
 486                skb = cp->rx_skb[rx_tail];
 487                BUG_ON(!skb);
 488
 489                desc = &cp->rx_ring[rx_tail];
 490                status = le32_to_cpu(desc->opts1);
 491                if (status & DescOwn)
 492                        break;
 493
 494                len = (status & 0x1fff) - 4;
 495                mapping = le64_to_cpu(desc->addr);
 496
 497                if ((status & (FirstFrag | LastFrag)) != (FirstFrag | LastFrag)) {
 498                        /* we don't support incoming fragmented frames.
 499                         * instead, we attempt to ensure that the
 500                         * pre-allocated RX skbs are properly sized such
 501                         * that RX fragments are never encountered
 502                         */
 503                        cp_rx_err_acct(cp, rx_tail, status, len);
 504                        dev->stats.rx_dropped++;
 505                        cp->cp_stats.rx_frags++;
 506                        goto rx_next;
 507                }
 508
 509                if (status & (RxError | RxErrFIFO)) {
 510                        cp_rx_err_acct(cp, rx_tail, status, len);
 511                        goto rx_next;
 512                }
 513
 514                netif_dbg(cp, rx_status, dev, "rx slot %d status 0x%x len %d\n",
 515                          rx_tail, status, len);
 516
 517                new_skb = netdev_alloc_skb_ip_align(dev, buflen);
 518                if (!new_skb) {
 519                        dev->stats.rx_dropped++;
 520                        goto rx_next;
 521                }
 522
 523                dma_unmap_single(&cp->pdev->dev, mapping,
 524                                 buflen, PCI_DMA_FROMDEVICE);
 525
 526                /* Handle checksum offloading for incoming packets. */
 527                if (cp_rx_csum_ok(status))
 528                        skb->ip_summed = CHECKSUM_UNNECESSARY;
 529                else
 530                        skb_checksum_none_assert(skb);
 531
 532                skb_put(skb, len);
 533
 534                mapping = dma_map_single(&cp->pdev->dev, new_skb->data, buflen,
 535                                         PCI_DMA_FROMDEVICE);
 536                cp->rx_skb[rx_tail] = new_skb;
 537
 538                cp_rx_skb(cp, skb, desc);
 539                rx++;
 540
 541rx_next:
 542                cp->rx_ring[rx_tail].opts2 = 0;
 543                cp->rx_ring[rx_tail].addr = cpu_to_le64(mapping);
 544                if (rx_tail == (CP_RX_RING_SIZE - 1))
 545                        desc->opts1 = cpu_to_le32(DescOwn | RingEnd |
 546                                                  cp->rx_buf_sz);
 547                else
 548                        desc->opts1 = cpu_to_le32(DescOwn | cp->rx_buf_sz);
 549                rx_tail = NEXT_RX(rx_tail);
 550
 551                if (rx >= budget)
 552                        break;
 553        }
 554
 555        cp->rx_tail = rx_tail;
 556
 557        /* if we did not reach work limit, then we're done with
 558         * this round of polling
 559         */
 560        if (rx < budget) {
 561                unsigned long flags;
 562
 563                if (cpr16(IntrStatus) & cp_rx_intr_mask)
 564                        goto rx_status_loop;
 565
 566                napi_gro_flush(napi);
 567                spin_lock_irqsave(&cp->lock, flags);
 568                __napi_complete(napi);
 569                cpw16_f(IntrMask, cp_intr_mask);
 570                spin_unlock_irqrestore(&cp->lock, flags);
 571        }
 572
 573        return rx;
 574}
 575
 576static irqreturn_t cp_interrupt (int irq, void *dev_instance)
 577{
 578        struct net_device *dev = dev_instance;
 579        struct cp_private *cp;
 580        u16 status;
 581
 582        if (unlikely(dev == NULL))
 583                return IRQ_NONE;
 584        cp = netdev_priv(dev);
 585
 586        status = cpr16(IntrStatus);
 587        if (!status || (status == 0xFFFF))
 588                return IRQ_NONE;
 589
 590        netif_dbg(cp, intr, dev, "intr, status %04x cmd %02x cpcmd %04x\n",
 591                  status, cpr8(Cmd), cpr16(CpCmd));
 592
 593        cpw16(IntrStatus, status & ~cp_rx_intr_mask);
 594
 595        spin_lock(&cp->lock);
 596
 597        /* close possible race's with dev_close */
 598        if (unlikely(!netif_running(dev))) {
 599                cpw16(IntrMask, 0);
 600                spin_unlock(&cp->lock);
 601                return IRQ_HANDLED;
 602        }
 603
 604        if (status & (RxOK | RxErr | RxEmpty | RxFIFOOvr))
 605                if (napi_schedule_prep(&cp->napi)) {
 606                        cpw16_f(IntrMask, cp_norx_intr_mask);
 607                        __napi_schedule(&cp->napi);
 608                }
 609
 610        if (status & (TxOK | TxErr | TxEmpty | SWInt))
 611                cp_tx(cp);
 612        if (status & LinkChg)
 613                mii_check_media(&cp->mii_if, netif_msg_link(cp), false);
 614
 615        spin_unlock(&cp->lock);
 616
 617        if (status & PciErr) {
 618                u16 pci_status;
 619
 620                pci_read_config_word(cp->pdev, PCI_STATUS, &pci_status);
 621                pci_write_config_word(cp->pdev, PCI_STATUS, pci_status);
 622                netdev_err(dev, "PCI bus error, status=%04x, PCI status=%04x\n",
 623                           status, pci_status);
 624
 625                /* TODO: reset hardware */
 626        }
 627
 628        return IRQ_HANDLED;
 629}
 630
 631#ifdef CONFIG_NET_POLL_CONTROLLER
 632/*
 633 * Polling receive - used by netconsole and other diagnostic tools
 634 * to allow network i/o with interrupts disabled.
 635 */
 636static void cp_poll_controller(struct net_device *dev)
 637{
 638        disable_irq(dev->irq);
 639        cp_interrupt(dev->irq, dev);
 640        enable_irq(dev->irq);
 641}
 642#endif
 643
 644static void cp_tx (struct cp_private *cp)
 645{
 646        unsigned tx_head = cp->tx_head;
 647        unsigned tx_tail = cp->tx_tail;
 648
 649        while (tx_tail != tx_head) {
 650                struct cp_desc *txd = cp->tx_ring + tx_tail;
 651                struct sk_buff *skb;
 652                u32 status;
 653
 654                rmb();
 655                status = le32_to_cpu(txd->opts1);
 656                if (status & DescOwn)
 657                        break;
 658
 659                skb = cp->tx_skb[tx_tail];
 660                BUG_ON(!skb);
 661
 662                dma_unmap_single(&cp->pdev->dev, le64_to_cpu(txd->addr),
 663                                 le32_to_cpu(txd->opts1) & 0xffff,
 664                                 PCI_DMA_TODEVICE);
 665
 666                if (status & LastFrag) {
 667                        if (status & (TxError | TxFIFOUnder)) {
 668                                netif_dbg(cp, tx_err, cp->dev,
 669                                          "tx err, status 0x%x\n", status);
 670                                cp->dev->stats.tx_errors++;
 671                                if (status & TxOWC)
 672                                        cp->dev->stats.tx_window_errors++;
 673                                if (status & TxMaxCol)
 674                                        cp->dev->stats.tx_aborted_errors++;
 675                                if (status & TxLinkFail)
 676                                        cp->dev->stats.tx_carrier_errors++;
 677                                if (status & TxFIFOUnder)
 678                                        cp->dev->stats.tx_fifo_errors++;
 679                        } else {
 680                                cp->dev->stats.collisions +=
 681                                        ((status >> TxColCntShift) & TxColCntMask);
 682                                cp->dev->stats.tx_packets++;
 683                                cp->dev->stats.tx_bytes += skb->len;
 684                                netif_dbg(cp, tx_done, cp->dev,
 685                                          "tx done, slot %d\n", tx_tail);
 686                        }
 687                        dev_kfree_skb_irq(skb);
 688                }
 689
 690                cp->tx_skb[tx_tail] = NULL;
 691
 692                tx_tail = NEXT_TX(tx_tail);
 693        }
 694
 695        cp->tx_tail = tx_tail;
 696
 697        if (TX_BUFFS_AVAIL(cp) > (MAX_SKB_FRAGS + 1))
 698                netif_wake_queue(cp->dev);
 699}
 700
 701static inline u32 cp_tx_vlan_tag(struct sk_buff *skb)
 702{
 703        return vlan_tx_tag_present(skb) ?
 704                TxVlanTag | swab16(vlan_tx_tag_get(skb)) : 0x00;
 705}
 706
 707static netdev_tx_t cp_start_xmit (struct sk_buff *skb,
 708                                        struct net_device *dev)
 709{
 710        struct cp_private *cp = netdev_priv(dev);
 711        unsigned entry;
 712        u32 eor, flags;
 713        unsigned long intr_flags;
 714        __le32 opts2;
 715        int mss = 0;
 716
 717        spin_lock_irqsave(&cp->lock, intr_flags);
 718
 719        /* This is a hard error, log it. */
 720        if (TX_BUFFS_AVAIL(cp) <= (skb_shinfo(skb)->nr_frags + 1)) {
 721                netif_stop_queue(dev);
 722                spin_unlock_irqrestore(&cp->lock, intr_flags);
 723                netdev_err(dev, "BUG! Tx Ring full when queue awake!\n");
 724                return NETDEV_TX_BUSY;
 725        }
 726
 727        entry = cp->tx_head;
 728        eor = (entry == (CP_TX_RING_SIZE - 1)) ? RingEnd : 0;
 729        mss = skb_shinfo(skb)->gso_size;
 730
 731        opts2 = cpu_to_le32(cp_tx_vlan_tag(skb));
 732
 733        if (skb_shinfo(skb)->nr_frags == 0) {
 734                struct cp_desc *txd = &cp->tx_ring[entry];
 735                u32 len;
 736                dma_addr_t mapping;
 737
 738                len = skb->len;
 739                mapping = dma_map_single(&cp->pdev->dev, skb->data, len, PCI_DMA_TODEVICE);
 740                txd->opts2 = opts2;
 741                txd->addr = cpu_to_le64(mapping);
 742                wmb();
 743
 744                flags = eor | len | DescOwn | FirstFrag | LastFrag;
 745
 746                if (mss)
 747                        flags |= LargeSend | ((mss & MSSMask) << MSSShift);
 748                else if (skb->ip_summed == CHECKSUM_PARTIAL) {
 749                        const struct iphdr *ip = ip_hdr(skb);
 750                        if (ip->protocol == IPPROTO_TCP)
 751                                flags |= IPCS | TCPCS;
 752                        else if (ip->protocol == IPPROTO_UDP)
 753                                flags |= IPCS | UDPCS;
 754                        else
 755                                WARN_ON(1);     /* we need a WARN() */
 756                }
 757
 758                txd->opts1 = cpu_to_le32(flags);
 759                wmb();
 760
 761                cp->tx_skb[entry] = skb;
 762                entry = NEXT_TX(entry);
 763        } else {
 764                struct cp_desc *txd;
 765                u32 first_len, first_eor;
 766                dma_addr_t first_mapping;
 767                int frag, first_entry = entry;
 768                const struct iphdr *ip = ip_hdr(skb);
 769
 770                /* We must give this initial chunk to the device last.
 771                 * Otherwise we could race with the device.
 772                 */
 773                first_eor = eor;
 774                first_len = skb_headlen(skb);
 775                first_mapping = dma_map_single(&cp->pdev->dev, skb->data,
 776                                               first_len, PCI_DMA_TODEVICE);
 777                cp->tx_skb[entry] = skb;
 778                entry = NEXT_TX(entry);
 779
 780                for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) {
 781                        const skb_frag_t *this_frag = &skb_shinfo(skb)->frags[frag];
 782                        u32 len;
 783                        u32 ctrl;
 784                        dma_addr_t mapping;
 785
 786                        len = skb_frag_size(this_frag);
 787                        mapping = dma_map_single(&cp->pdev->dev,
 788                                                 skb_frag_address(this_frag),
 789                                                 len, PCI_DMA_TODEVICE);
 790                        eor = (entry == (CP_TX_RING_SIZE - 1)) ? RingEnd : 0;
 791
 792                        ctrl = eor | len | DescOwn;
 793
 794                        if (mss)
 795                                ctrl |= LargeSend |
 796                                        ((mss & MSSMask) << MSSShift);
 797                        else if (skb->ip_summed == CHECKSUM_PARTIAL) {
 798                                if (ip->protocol == IPPROTO_TCP)
 799                                        ctrl |= IPCS | TCPCS;
 800                                else if (ip->protocol == IPPROTO_UDP)
 801                                        ctrl |= IPCS | UDPCS;
 802                                else
 803                                        BUG();
 804                        }
 805
 806                        if (frag == skb_shinfo(skb)->nr_frags - 1)
 807                                ctrl |= LastFrag;
 808
 809                        txd = &cp->tx_ring[entry];
 810                        txd->opts2 = opts2;
 811                        txd->addr = cpu_to_le64(mapping);
 812                        wmb();
 813
 814                        txd->opts1 = cpu_to_le32(ctrl);
 815                        wmb();
 816
 817                        cp->tx_skb[entry] = skb;
 818                        entry = NEXT_TX(entry);
 819                }
 820
 821                txd = &cp->tx_ring[first_entry];
 822                txd->opts2 = opts2;
 823                txd->addr = cpu_to_le64(first_mapping);
 824                wmb();
 825
 826                if (skb->ip_summed == CHECKSUM_PARTIAL) {
 827                        if (ip->protocol == IPPROTO_TCP)
 828                                txd->opts1 = cpu_to_le32(first_eor | first_len |
 829                                                         FirstFrag | DescOwn |
 830                                                         IPCS | TCPCS);
 831                        else if (ip->protocol == IPPROTO_UDP)
 832                                txd->opts1 = cpu_to_le32(first_eor | first_len |
 833                                                         FirstFrag | DescOwn |
 834                                                         IPCS | UDPCS);
 835                        else
 836                                BUG();
 837                } else
 838                        txd->opts1 = cpu_to_le32(first_eor | first_len |
 839                                                 FirstFrag | DescOwn);
 840                wmb();
 841        }
 842        cp->tx_head = entry;
 843        netif_dbg(cp, tx_queued, cp->dev, "tx queued, slot %d, skblen %d\n",
 844                  entry, skb->len);
 845        if (TX_BUFFS_AVAIL(cp) <= (MAX_SKB_FRAGS + 1))
 846                netif_stop_queue(dev);
 847
 848        spin_unlock_irqrestore(&cp->lock, intr_flags);
 849
 850        cpw8(TxPoll, NormalTxPoll);
 851
 852        return NETDEV_TX_OK;
 853}
 854
 855/* Set or clear the multicast filter for this adaptor.
 856   This routine is not state sensitive and need not be SMP locked. */
 857
 858static void __cp_set_rx_mode (struct net_device *dev)
 859{
 860        struct cp_private *cp = netdev_priv(dev);
 861        u32 mc_filter[2];       /* Multicast hash filter */
 862        int rx_mode;
 863
 864        /* Note: do not reorder, GCC is clever about common statements. */
 865        if (dev->flags & IFF_PROMISC) {
 866                /* Unconditionally log net taps. */
 867                rx_mode =
 868                    AcceptBroadcast | AcceptMulticast | AcceptMyPhys |
 869                    AcceptAllPhys;
 870                mc_filter[1] = mc_filter[0] = 0xffffffff;
 871        } else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
 872                   (dev->flags & IFF_ALLMULTI)) {
 873                /* Too many to filter perfectly -- accept all multicasts. */
 874                rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
 875                mc_filter[1] = mc_filter[0] = 0xffffffff;
 876        } else {
 877                struct netdev_hw_addr *ha;
 878                rx_mode = AcceptBroadcast | AcceptMyPhys;
 879                mc_filter[1] = mc_filter[0] = 0;
 880                netdev_for_each_mc_addr(ha, dev) {
 881                        int bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26;
 882
 883                        mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
 884                        rx_mode |= AcceptMulticast;
 885                }
 886        }
 887
 888        /* We can safely update without stopping the chip. */
 889        cp->rx_config = cp_rx_config | rx_mode;
 890        cpw32_f(RxConfig, cp->rx_config);
 891
 892        cpw32_f (MAR0 + 0, mc_filter[0]);
 893        cpw32_f (MAR0 + 4, mc_filter[1]);
 894}
 895
 896static void cp_set_rx_mode (struct net_device *dev)
 897{
 898        unsigned long flags;
 899        struct cp_private *cp = netdev_priv(dev);
 900
 901        spin_lock_irqsave (&cp->lock, flags);
 902        __cp_set_rx_mode(dev);
 903        spin_unlock_irqrestore (&cp->lock, flags);
 904}
 905
 906static void __cp_get_stats(struct cp_private *cp)
 907{
 908        /* only lower 24 bits valid; write any value to clear */
 909        cp->dev->stats.rx_missed_errors += (cpr32 (RxMissed) & 0xffffff);
 910        cpw32 (RxMissed, 0);
 911}
 912
 913static struct net_device_stats *cp_get_stats(struct net_device *dev)
 914{
 915        struct cp_private *cp = netdev_priv(dev);
 916        unsigned long flags;
 917
 918        /* The chip only need report frame silently dropped. */
 919        spin_lock_irqsave(&cp->lock, flags);
 920        if (netif_running(dev) && netif_device_present(dev))
 921                __cp_get_stats(cp);
 922        spin_unlock_irqrestore(&cp->lock, flags);
 923
 924        return &dev->stats;
 925}
 926
 927static void cp_stop_hw (struct cp_private *cp)
 928{
 929        cpw16(IntrStatus, ~(cpr16(IntrStatus)));
 930        cpw16_f(IntrMask, 0);
 931        cpw8(Cmd, 0);
 932        cpw16_f(CpCmd, 0);
 933        cpw16_f(IntrStatus, ~(cpr16(IntrStatus)));
 934
 935        cp->rx_tail = 0;
 936        cp->tx_head = cp->tx_tail = 0;
 937}
 938
 939static void cp_reset_hw (struct cp_private *cp)
 940{
 941        unsigned work = 1000;
 942
 943        cpw8(Cmd, CmdReset);
 944
 945        while (work--) {
 946                if (!(cpr8(Cmd) & CmdReset))
 947                        return;
 948
 949                schedule_timeout_uninterruptible(10);
 950        }
 951
 952        netdev_err(cp->dev, "hardware reset timeout\n");
 953}
 954
 955static inline void cp_start_hw (struct cp_private *cp)
 956{
 957        cpw16(CpCmd, cp->cpcmd);
 958        cpw8(Cmd, RxOn | TxOn);
 959}
 960
 961static void cp_enable_irq(struct cp_private *cp)
 962{
 963        cpw16_f(IntrMask, cp_intr_mask);
 964}
 965
 966static void cp_init_hw (struct cp_private *cp)
 967{
 968        struct net_device *dev = cp->dev;
 969        dma_addr_t ring_dma;
 970
 971        cp_reset_hw(cp);
 972
 973        cpw8_f (Cfg9346, Cfg9346_Unlock);
 974
 975        /* Restore our idea of the MAC address. */
 976        cpw32_f (MAC0 + 0, le32_to_cpu (*(__le32 *) (dev->dev_addr + 0)));
 977        cpw32_f (MAC0 + 4, le32_to_cpu (*(__le32 *) (dev->dev_addr + 4)));
 978
 979        cp_start_hw(cp);
 980        cpw8(TxThresh, 0x06); /* XXX convert magic num to a constant */
 981
 982        __cp_set_rx_mode(dev);
 983        cpw32_f (TxConfig, IFG | (TX_DMA_BURST << TxDMAShift));
 984
 985        cpw8(Config1, cpr8(Config1) | DriverLoaded | PMEnable);
 986        /* Disable Wake-on-LAN. Can be turned on with ETHTOOL_SWOL */
 987        cpw8(Config3, PARMEnable);
 988        cp->wol_enabled = 0;
 989
 990        cpw8(Config5, cpr8(Config5) & PMEStatus);
 991
 992        cpw32_f(HiTxRingAddr, 0);
 993        cpw32_f(HiTxRingAddr + 4, 0);
 994
 995        ring_dma = cp->ring_dma;
 996        cpw32_f(RxRingAddr, ring_dma & 0xffffffff);
 997        cpw32_f(RxRingAddr + 4, (ring_dma >> 16) >> 16);
 998
 999        ring_dma += sizeof(struct cp_desc) * CP_RX_RING_SIZE;
1000        cpw32_f(TxRingAddr, ring_dma & 0xffffffff);
1001        cpw32_f(TxRingAddr + 4, (ring_dma >> 16) >> 16);
1002
1003        cpw16(MultiIntr, 0);
1004
1005        cpw8_f(Cfg9346, Cfg9346_Lock);
1006}
1007
1008static int cp_refill_rx(struct cp_private *cp)
1009{
1010        struct net_device *dev = cp->dev;
1011        unsigned i;
1012
1013        for (i = 0; i < CP_RX_RING_SIZE; i++) {
1014                struct sk_buff *skb;
1015                dma_addr_t mapping;
1016
1017                skb = netdev_alloc_skb_ip_align(dev, cp->rx_buf_sz);
1018                if (!skb)
1019                        goto err_out;
1020
1021                mapping = dma_map_single(&cp->pdev->dev, skb->data,
1022                                         cp->rx_buf_sz, PCI_DMA_FROMDEVICE);
1023                cp->rx_skb[i] = skb;
1024
1025                cp->rx_ring[i].opts2 = 0;
1026                cp->rx_ring[i].addr = cpu_to_le64(mapping);
1027                if (i == (CP_RX_RING_SIZE - 1))
1028                        cp->rx_ring[i].opts1 =
1029                                cpu_to_le32(DescOwn | RingEnd | cp->rx_buf_sz);
1030                else
1031                        cp->rx_ring[i].opts1 =
1032                                cpu_to_le32(DescOwn | cp->rx_buf_sz);
1033        }
1034
1035        return 0;
1036
1037err_out:
1038        cp_clean_rings(cp);
1039        return -ENOMEM;
1040}
1041
1042static void cp_init_rings_index (struct cp_private *cp)
1043{
1044        cp->rx_tail = 0;
1045        cp->tx_head = cp->tx_tail = 0;
1046}
1047
1048static int cp_init_rings (struct cp_private *cp)
1049{
1050        memset(cp->tx_ring, 0, sizeof(struct cp_desc) * CP_TX_RING_SIZE);
1051        cp->tx_ring[CP_TX_RING_SIZE - 1].opts1 = cpu_to_le32(RingEnd);
1052
1053        cp_init_rings_index(cp);
1054
1055        return cp_refill_rx (cp);
1056}
1057
1058static int cp_alloc_rings (struct cp_private *cp)
1059{
1060        void *mem;
1061
1062        mem = dma_alloc_coherent(&cp->pdev->dev, CP_RING_BYTES,
1063                                 &cp->ring_dma, GFP_KERNEL);
1064        if (!mem)
1065                return -ENOMEM;
1066
1067        cp->rx_ring = mem;
1068        cp->tx_ring = &cp->rx_ring[CP_RX_RING_SIZE];
1069
1070        return cp_init_rings(cp);
1071}
1072
1073static void cp_clean_rings (struct cp_private *cp)
1074{
1075        struct cp_desc *desc;
1076        unsigned i;
1077
1078        for (i = 0; i < CP_RX_RING_SIZE; i++) {
1079                if (cp->rx_skb[i]) {
1080                        desc = cp->rx_ring + i;
1081                        dma_unmap_single(&cp->pdev->dev,le64_to_cpu(desc->addr),
1082                                         cp->rx_buf_sz, PCI_DMA_FROMDEVICE);
1083                        dev_kfree_skb(cp->rx_skb[i]);
1084                }
1085        }
1086
1087        for (i = 0; i < CP_TX_RING_SIZE; i++) {
1088                if (cp->tx_skb[i]) {
1089                        struct sk_buff *skb = cp->tx_skb[i];
1090
1091                        desc = cp->tx_ring + i;
1092                        dma_unmap_single(&cp->pdev->dev,le64_to_cpu(desc->addr),
1093                                         le32_to_cpu(desc->opts1) & 0xffff,
1094                                         PCI_DMA_TODEVICE);
1095                        if (le32_to_cpu(desc->opts1) & LastFrag)
1096                                dev_kfree_skb(skb);
1097                        cp->dev->stats.tx_dropped++;
1098                }
1099        }
1100
1101        memset(cp->rx_ring, 0, sizeof(struct cp_desc) * CP_RX_RING_SIZE);
1102        memset(cp->tx_ring, 0, sizeof(struct cp_desc) * CP_TX_RING_SIZE);
1103
1104        memset(cp->rx_skb, 0, sizeof(struct sk_buff *) * CP_RX_RING_SIZE);
1105        memset(cp->tx_skb, 0, sizeof(struct sk_buff *) * CP_TX_RING_SIZE);
1106}
1107
1108static void cp_free_rings (struct cp_private *cp)
1109{
1110        cp_clean_rings(cp);
1111        dma_free_coherent(&cp->pdev->dev, CP_RING_BYTES, cp->rx_ring,
1112                          cp->ring_dma);
1113        cp->rx_ring = NULL;
1114        cp->tx_ring = NULL;
1115}
1116
1117static int cp_open (struct net_device *dev)
1118{
1119        struct cp_private *cp = netdev_priv(dev);
1120        int rc;
1121
1122        netif_dbg(cp, ifup, dev, "enabling interface\n");
1123
1124        rc = cp_alloc_rings(cp);
1125        if (rc)
1126                return rc;
1127
1128        napi_enable(&cp->napi);
1129
1130        cp_init_hw(cp);
1131
1132        rc = request_irq(dev->irq, cp_interrupt, IRQF_SHARED, dev->name, dev);
1133        if (rc)
1134                goto err_out_hw;
1135
1136        cp_enable_irq(cp);
1137
1138        netif_carrier_off(dev);
1139        mii_check_media(&cp->mii_if, netif_msg_link(cp), true);
1140        netif_start_queue(dev);
1141
1142        return 0;
1143
1144err_out_hw:
1145        napi_disable(&cp->napi);
1146        cp_stop_hw(cp);
1147        cp_free_rings(cp);
1148        return rc;
1149}
1150
1151static int cp_close (struct net_device *dev)
1152{
1153        struct cp_private *cp = netdev_priv(dev);
1154        unsigned long flags;
1155
1156        napi_disable(&cp->napi);
1157
1158        netif_dbg(cp, ifdown, dev, "disabling interface\n");
1159
1160        spin_lock_irqsave(&cp->lock, flags);
1161
1162        netif_stop_queue(dev);
1163        netif_carrier_off(dev);
1164
1165        cp_stop_hw(cp);
1166
1167        spin_unlock_irqrestore(&cp->lock, flags);
1168
1169        free_irq(dev->irq, dev);
1170
1171        cp_free_rings(cp);
1172        return 0;
1173}
1174
1175static void cp_tx_timeout(struct net_device *dev)
1176{
1177        struct cp_private *cp = netdev_priv(dev);
1178        unsigned long flags;
1179        int rc;
1180
1181        netdev_warn(dev, "Transmit timeout, status %2x %4x %4x %4x\n",
1182                    cpr8(Cmd), cpr16(CpCmd),
1183                    cpr16(IntrStatus), cpr16(IntrMask));
1184
1185        spin_lock_irqsave(&cp->lock, flags);
1186
1187        cp_stop_hw(cp);
1188        cp_clean_rings(cp);
1189        rc = cp_init_rings(cp);
1190        cp_start_hw(cp);
1191
1192        netif_wake_queue(dev);
1193
1194        spin_unlock_irqrestore(&cp->lock, flags);
1195}
1196
1197#ifdef BROKEN
1198static int cp_change_mtu(struct net_device *dev, int new_mtu)
1199{
1200        struct cp_private *cp = netdev_priv(dev);
1201        int rc;
1202        unsigned long flags;
1203
1204        /* check for invalid MTU, according to hardware limits */
1205        if (new_mtu < CP_MIN_MTU || new_mtu > CP_MAX_MTU)
1206                return -EINVAL;
1207
1208        /* if network interface not up, no need for complexity */
1209        if (!netif_running(dev)) {
1210                dev->mtu = new_mtu;
1211                cp_set_rxbufsize(cp);   /* set new rx buf size */
1212                return 0;
1213        }
1214
1215        spin_lock_irqsave(&cp->lock, flags);
1216
1217        cp_stop_hw(cp);                 /* stop h/w and free rings */
1218        cp_clean_rings(cp);
1219
1220        dev->mtu = new_mtu;
1221        cp_set_rxbufsize(cp);           /* set new rx buf size */
1222
1223        rc = cp_init_rings(cp);         /* realloc and restart h/w */
1224        cp_start_hw(cp);
1225
1226        spin_unlock_irqrestore(&cp->lock, flags);
1227
1228        return rc;
1229}
1230#endif /* BROKEN */
1231
1232static const char mii_2_8139_map[8] = {
1233        BasicModeCtrl,
1234        BasicModeStatus,
1235        0,
1236        0,
1237        NWayAdvert,
1238        NWayLPAR,
1239        NWayExpansion,
1240        0
1241};
1242
1243static int mdio_read(struct net_device *dev, int phy_id, int location)
1244{
1245        struct cp_private *cp = netdev_priv(dev);
1246
1247        return location < 8 && mii_2_8139_map[location] ?
1248               readw(cp->regs + mii_2_8139_map[location]) : 0;
1249}
1250
1251
1252static void mdio_write(struct net_device *dev, int phy_id, int location,
1253                       int value)
1254{
1255        struct cp_private *cp = netdev_priv(dev);
1256
1257        if (location == 0) {
1258                cpw8(Cfg9346, Cfg9346_Unlock);
1259                cpw16(BasicModeCtrl, value);
1260                cpw8(Cfg9346, Cfg9346_Lock);
1261        } else if (location < 8 && mii_2_8139_map[location])
1262                cpw16(mii_2_8139_map[location], value);
1263}
1264
1265/* Set the ethtool Wake-on-LAN settings */
1266static int netdev_set_wol (struct cp_private *cp,
1267                           const struct ethtool_wolinfo *wol)
1268{
1269        u8 options;
1270
1271        options = cpr8 (Config3) & ~(LinkUp | MagicPacket);
1272        /* If WOL is being disabled, no need for complexity */
1273        if (wol->wolopts) {
1274                if (wol->wolopts & WAKE_PHY)    options |= LinkUp;
1275                if (wol->wolopts & WAKE_MAGIC)  options |= MagicPacket;
1276        }
1277
1278        cpw8 (Cfg9346, Cfg9346_Unlock);
1279        cpw8 (Config3, options);
1280        cpw8 (Cfg9346, Cfg9346_Lock);
1281
1282        options = 0; /* Paranoia setting */
1283        options = cpr8 (Config5) & ~(UWF | MWF | BWF);
1284        /* If WOL is being disabled, no need for complexity */
1285        if (wol->wolopts) {
1286                if (wol->wolopts & WAKE_UCAST)  options |= UWF;
1287                if (wol->wolopts & WAKE_BCAST)  options |= BWF;
1288                if (wol->wolopts & WAKE_MCAST)  options |= MWF;
1289        }
1290
1291        cpw8 (Config5, options);
1292
1293        cp->wol_enabled = (wol->wolopts) ? 1 : 0;
1294
1295        return 0;
1296}
1297
1298/* Get the ethtool Wake-on-LAN settings */
1299static void netdev_get_wol (struct cp_private *cp,
1300                     struct ethtool_wolinfo *wol)
1301{
1302        u8 options;
1303
1304        wol->wolopts   = 0; /* Start from scratch */
1305        wol->supported = WAKE_PHY   | WAKE_BCAST | WAKE_MAGIC |
1306                         WAKE_MCAST | WAKE_UCAST;
1307        /* We don't need to go on if WOL is disabled */
1308        if (!cp->wol_enabled) return;
1309
1310        options        = cpr8 (Config3);
1311        if (options & LinkUp)        wol->wolopts |= WAKE_PHY;
1312        if (options & MagicPacket)   wol->wolopts |= WAKE_MAGIC;
1313
1314        options        = 0; /* Paranoia setting */
1315        options        = cpr8 (Config5);
1316        if (options & UWF)           wol->wolopts |= WAKE_UCAST;
1317        if (options & BWF)           wol->wolopts |= WAKE_BCAST;
1318        if (options & MWF)           wol->wolopts |= WAKE_MCAST;
1319}
1320
1321static void cp_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *info)
1322{
1323        struct cp_private *cp = netdev_priv(dev);
1324
1325        strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
1326        strlcpy(info->version, DRV_VERSION, sizeof(info->version));
1327        strlcpy(info->bus_info, pci_name(cp->pdev), sizeof(info->bus_info));
1328}
1329
1330static void cp_get_ringparam(struct net_device *dev,
1331                                struct ethtool_ringparam *ring)
1332{
1333        ring->rx_max_pending = CP_RX_RING_SIZE;
1334        ring->tx_max_pending = CP_TX_RING_SIZE;
1335        ring->rx_pending = CP_RX_RING_SIZE;
1336        ring->tx_pending = CP_TX_RING_SIZE;
1337}
1338
1339static int cp_get_regs_len(struct net_device *dev)
1340{
1341        return CP_REGS_SIZE;
1342}
1343
1344static int cp_get_sset_count (struct net_device *dev, int sset)
1345{
1346        switch (sset) {
1347        case ETH_SS_STATS:
1348                return CP_NUM_STATS;
1349        default:
1350                return -EOPNOTSUPP;
1351        }
1352}
1353
1354static int cp_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1355{
1356        struct cp_private *cp = netdev_priv(dev);
1357        int rc;
1358        unsigned long flags;
1359
1360        spin_lock_irqsave(&cp->lock, flags);
1361        rc = mii_ethtool_gset(&cp->mii_if, cmd);
1362        spin_unlock_irqrestore(&cp->lock, flags);
1363
1364        return rc;
1365}
1366
1367static int cp_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1368{
1369        struct cp_private *cp = netdev_priv(dev);
1370        int rc;
1371        unsigned long flags;
1372
1373        spin_lock_irqsave(&cp->lock, flags);
1374        rc = mii_ethtool_sset(&cp->mii_if, cmd);
1375        spin_unlock_irqrestore(&cp->lock, flags);
1376
1377        return rc;
1378}
1379
1380static int cp_nway_reset(struct net_device *dev)
1381{
1382        struct cp_private *cp = netdev_priv(dev);
1383        return mii_nway_restart(&cp->mii_if);
1384}
1385
1386static u32 cp_get_msglevel(struct net_device *dev)
1387{
1388        struct cp_private *cp = netdev_priv(dev);
1389        return cp->msg_enable;
1390}
1391
1392static void cp_set_msglevel(struct net_device *dev, u32 value)
1393{
1394        struct cp_private *cp = netdev_priv(dev);
1395        cp->msg_enable = value;
1396}
1397
1398static int cp_set_features(struct net_device *dev, netdev_features_t features)
1399{
1400        struct cp_private *cp = netdev_priv(dev);
1401        unsigned long flags;
1402
1403        if (!((dev->features ^ features) & NETIF_F_RXCSUM))
1404                return 0;
1405
1406        spin_lock_irqsave(&cp->lock, flags);
1407
1408        if (features & NETIF_F_RXCSUM)
1409                cp->cpcmd |= RxChkSum;
1410        else
1411                cp->cpcmd &= ~RxChkSum;
1412
1413        if (features & NETIF_F_HW_VLAN_RX)
1414                cp->cpcmd |= RxVlanOn;
1415        else
1416                cp->cpcmd &= ~RxVlanOn;
1417
1418        cpw16_f(CpCmd, cp->cpcmd);
1419        spin_unlock_irqrestore(&cp->lock, flags);
1420
1421        return 0;
1422}
1423
1424static void cp_get_regs(struct net_device *dev, struct ethtool_regs *regs,
1425                        void *p)
1426{
1427        struct cp_private *cp = netdev_priv(dev);
1428        unsigned long flags;
1429
1430        if (regs->len < CP_REGS_SIZE)
1431                return /* -EINVAL */;
1432
1433        regs->version = CP_REGS_VER;
1434
1435        spin_lock_irqsave(&cp->lock, flags);
1436        memcpy_fromio(p, cp->regs, CP_REGS_SIZE);
1437        spin_unlock_irqrestore(&cp->lock, flags);
1438}
1439
1440static void cp_get_wol (struct net_device *dev, struct ethtool_wolinfo *wol)
1441{
1442        struct cp_private *cp = netdev_priv(dev);
1443        unsigned long flags;
1444
1445        spin_lock_irqsave (&cp->lock, flags);
1446        netdev_get_wol (cp, wol);
1447        spin_unlock_irqrestore (&cp->lock, flags);
1448}
1449
1450static int cp_set_wol (struct net_device *dev, struct ethtool_wolinfo *wol)
1451{
1452        struct cp_private *cp = netdev_priv(dev);
1453        unsigned long flags;
1454        int rc;
1455
1456        spin_lock_irqsave (&cp->lock, flags);
1457        rc = netdev_set_wol (cp, wol);
1458        spin_unlock_irqrestore (&cp->lock, flags);
1459
1460        return rc;
1461}
1462
1463static void cp_get_strings (struct net_device *dev, u32 stringset, u8 *buf)
1464{
1465        switch (stringset) {
1466        case ETH_SS_STATS:
1467                memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
1468                break;
1469        default:
1470                BUG();
1471                break;
1472        }
1473}
1474
1475static void cp_get_ethtool_stats (struct net_device *dev,
1476                                  struct ethtool_stats *estats, u64 *tmp_stats)
1477{
1478        struct cp_private *cp = netdev_priv(dev);
1479        struct cp_dma_stats *nic_stats;
1480        dma_addr_t dma;
1481        int i;
1482
1483        nic_stats = dma_alloc_coherent(&cp->pdev->dev, sizeof(*nic_stats),
1484                                       &dma, GFP_KERNEL);
1485        if (!nic_stats)
1486                return;
1487
1488        /* begin NIC statistics dump */
1489        cpw32(StatsAddr + 4, (u64)dma >> 32);
1490        cpw32(StatsAddr, ((u64)dma & DMA_BIT_MASK(32)) | DumpStats);
1491        cpr32(StatsAddr);
1492
1493        for (i = 0; i < 1000; i++) {
1494                if ((cpr32(StatsAddr) & DumpStats) == 0)
1495                        break;
1496                udelay(10);
1497        }
1498        cpw32(StatsAddr, 0);
1499        cpw32(StatsAddr + 4, 0);
1500        cpr32(StatsAddr);
1501
1502        i = 0;
1503        tmp_stats[i++] = le64_to_cpu(nic_stats->tx_ok);
1504        tmp_stats[i++] = le64_to_cpu(nic_stats->rx_ok);
1505        tmp_stats[i++] = le64_to_cpu(nic_stats->tx_err);
1506        tmp_stats[i++] = le32_to_cpu(nic_stats->rx_err);
1507        tmp_stats[i++] = le16_to_cpu(nic_stats->rx_fifo);
1508        tmp_stats[i++] = le16_to_cpu(nic_stats->frame_align);
1509        tmp_stats[i++] = le32_to_cpu(nic_stats->tx_ok_1col);
1510        tmp_stats[i++] = le32_to_cpu(nic_stats->tx_ok_mcol);
1511        tmp_stats[i++] = le64_to_cpu(nic_stats->rx_ok_phys);
1512        tmp_stats[i++] = le64_to_cpu(nic_stats->rx_ok_bcast);
1513        tmp_stats[i++] = le32_to_cpu(nic_stats->rx_ok_mcast);
1514        tmp_stats[i++] = le16_to_cpu(nic_stats->tx_abort);
1515        tmp_stats[i++] = le16_to_cpu(nic_stats->tx_underrun);
1516        tmp_stats[i++] = cp->cp_stats.rx_frags;
1517        BUG_ON(i != CP_NUM_STATS);
1518
1519        dma_free_coherent(&cp->pdev->dev, sizeof(*nic_stats), nic_stats, dma);
1520}
1521
1522static const struct ethtool_ops cp_ethtool_ops = {
1523        .get_drvinfo            = cp_get_drvinfo,
1524        .get_regs_len           = cp_get_regs_len,
1525        .get_sset_count         = cp_get_sset_count,
1526        .get_settings           = cp_get_settings,
1527        .set_settings           = cp_set_settings,
1528        .nway_reset             = cp_nway_reset,
1529        .get_link               = ethtool_op_get_link,
1530        .get_msglevel           = cp_get_msglevel,
1531        .set_msglevel           = cp_set_msglevel,
1532        .get_regs               = cp_get_regs,
1533        .get_wol                = cp_get_wol,
1534        .set_wol                = cp_set_wol,
1535        .get_strings            = cp_get_strings,
1536        .get_ethtool_stats      = cp_get_ethtool_stats,
1537        .get_eeprom_len         = cp_get_eeprom_len,
1538        .get_eeprom             = cp_get_eeprom,
1539        .set_eeprom             = cp_set_eeprom,
1540        .get_ringparam          = cp_get_ringparam,
1541};
1542
1543static int cp_ioctl (struct net_device *dev, struct ifreq *rq, int cmd)
1544{
1545        struct cp_private *cp = netdev_priv(dev);
1546        int rc;
1547        unsigned long flags;
1548
1549        if (!netif_running(dev))
1550                return -EINVAL;
1551
1552        spin_lock_irqsave(&cp->lock, flags);
1553        rc = generic_mii_ioctl(&cp->mii_if, if_mii(rq), cmd, NULL);
1554        spin_unlock_irqrestore(&cp->lock, flags);
1555        return rc;
1556}
1557
1558static int cp_set_mac_address(struct net_device *dev, void *p)
1559{
1560        struct cp_private *cp = netdev_priv(dev);
1561        struct sockaddr *addr = p;
1562
1563        if (!is_valid_ether_addr(addr->sa_data))
1564                return -EADDRNOTAVAIL;
1565
1566        memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
1567
1568        spin_lock_irq(&cp->lock);
1569
1570        cpw8_f(Cfg9346, Cfg9346_Unlock);
1571        cpw32_f(MAC0 + 0, le32_to_cpu (*(__le32 *) (dev->dev_addr + 0)));
1572        cpw32_f(MAC0 + 4, le32_to_cpu (*(__le32 *) (dev->dev_addr + 4)));
1573        cpw8_f(Cfg9346, Cfg9346_Lock);
1574
1575        spin_unlock_irq(&cp->lock);
1576
1577        return 0;
1578}
1579
1580/* Serial EEPROM section. */
1581
1582/*  EEPROM_Ctrl bits. */
1583#define EE_SHIFT_CLK    0x04    /* EEPROM shift clock. */
1584#define EE_CS                   0x08    /* EEPROM chip select. */
1585#define EE_DATA_WRITE   0x02    /* EEPROM chip data in. */
1586#define EE_WRITE_0              0x00
1587#define EE_WRITE_1              0x02
1588#define EE_DATA_READ    0x01    /* EEPROM chip data out. */
1589#define EE_ENB                  (0x80 | EE_CS)
1590
1591/* Delay between EEPROM clock transitions.
1592   No extra delay is needed with 33Mhz PCI, but 66Mhz may change this.
1593 */
1594
1595#define eeprom_delay()  readb(ee_addr)
1596
1597/* The EEPROM commands include the alway-set leading bit. */
1598#define EE_EXTEND_CMD   (4)
1599#define EE_WRITE_CMD    (5)
1600#define EE_READ_CMD             (6)
1601#define EE_ERASE_CMD    (7)
1602
1603#define EE_EWDS_ADDR    (0)
1604#define EE_WRAL_ADDR    (1)
1605#define EE_ERAL_ADDR    (2)
1606#define EE_EWEN_ADDR    (3)
1607
1608#define CP_EEPROM_MAGIC PCI_DEVICE_ID_REALTEK_8139
1609
1610static void eeprom_cmd_start(void __iomem *ee_addr)
1611{
1612        writeb (EE_ENB & ~EE_CS, ee_addr);
1613        writeb (EE_ENB, ee_addr);
1614        eeprom_delay ();
1615}
1616
1617static void eeprom_cmd(void __iomem *ee_addr, int cmd, int cmd_len)
1618{
1619        int i;
1620
1621        /* Shift the command bits out. */
1622        for (i = cmd_len - 1; i >= 0; i--) {
1623                int dataval = (cmd & (1 << i)) ? EE_DATA_WRITE : 0;
1624                writeb (EE_ENB | dataval, ee_addr);
1625                eeprom_delay ();
1626                writeb (EE_ENB | dataval | EE_SHIFT_CLK, ee_addr);
1627                eeprom_delay ();
1628        }
1629        writeb (EE_ENB, ee_addr);
1630        eeprom_delay ();
1631}
1632
1633static void eeprom_cmd_end(void __iomem *ee_addr)
1634{
1635        writeb (~EE_CS, ee_addr);
1636        eeprom_delay ();
1637}
1638
1639static void eeprom_extend_cmd(void __iomem *ee_addr, int extend_cmd,
1640                              int addr_len)
1641{
1642        int cmd = (EE_EXTEND_CMD << addr_len) | (extend_cmd << (addr_len - 2));
1643
1644        eeprom_cmd_start(ee_addr);
1645        eeprom_cmd(ee_addr, cmd, 3 + addr_len);
1646        eeprom_cmd_end(ee_addr);
1647}
1648
1649static u16 read_eeprom (void __iomem *ioaddr, int location, int addr_len)
1650{
1651        int i;
1652        u16 retval = 0;
1653        void __iomem *ee_addr = ioaddr + Cfg9346;
1654        int read_cmd = location | (EE_READ_CMD << addr_len);
1655
1656        eeprom_cmd_start(ee_addr);
1657        eeprom_cmd(ee_addr, read_cmd, 3 + addr_len);
1658
1659        for (i = 16; i > 0; i--) {
1660                writeb (EE_ENB | EE_SHIFT_CLK, ee_addr);
1661                eeprom_delay ();
1662                retval =
1663                    (retval << 1) | ((readb (ee_addr) & EE_DATA_READ) ? 1 :
1664                                     0);
1665                writeb (EE_ENB, ee_addr);
1666                eeprom_delay ();
1667        }
1668
1669        eeprom_cmd_end(ee_addr);
1670
1671        return retval;
1672}
1673
1674static void write_eeprom(void __iomem *ioaddr, int location, u16 val,
1675                         int addr_len)
1676{
1677        int i;
1678        void __iomem *ee_addr = ioaddr + Cfg9346;
1679        int write_cmd = location | (EE_WRITE_CMD << addr_len);
1680
1681        eeprom_extend_cmd(ee_addr, EE_EWEN_ADDR, addr_len);
1682
1683        eeprom_cmd_start(ee_addr);
1684        eeprom_cmd(ee_addr, write_cmd, 3 + addr_len);
1685        eeprom_cmd(ee_addr, val, 16);
1686        eeprom_cmd_end(ee_addr);
1687
1688        eeprom_cmd_start(ee_addr);
1689        for (i = 0; i < 20000; i++)
1690                if (readb(ee_addr) & EE_DATA_READ)
1691                        break;
1692        eeprom_cmd_end(ee_addr);
1693
1694        eeprom_extend_cmd(ee_addr, EE_EWDS_ADDR, addr_len);
1695}
1696
1697static int cp_get_eeprom_len(struct net_device *dev)
1698{
1699        struct cp_private *cp = netdev_priv(dev);
1700        int size;
1701
1702        spin_lock_irq(&cp->lock);
1703        size = read_eeprom(cp->regs, 0, 8) == 0x8129 ? 256 : 128;
1704        spin_unlock_irq(&cp->lock);
1705
1706        return size;
1707}
1708
1709static int cp_get_eeprom(struct net_device *dev,
1710                         struct ethtool_eeprom *eeprom, u8 *data)
1711{
1712        struct cp_private *cp = netdev_priv(dev);
1713        unsigned int addr_len;
1714        u16 val;
1715        u32 offset = eeprom->offset >> 1;
1716        u32 len = eeprom->len;
1717        u32 i = 0;
1718
1719        eeprom->magic = CP_EEPROM_MAGIC;
1720
1721        spin_lock_irq(&cp->lock);
1722
1723        addr_len = read_eeprom(cp->regs, 0, 8) == 0x8129 ? 8 : 6;
1724
1725        if (eeprom->offset & 1) {
1726                val = read_eeprom(cp->regs, offset, addr_len);
1727                data[i++] = (u8)(val >> 8);
1728                offset++;
1729        }
1730
1731        while (i < len - 1) {
1732                val = read_eeprom(cp->regs, offset, addr_len);
1733                data[i++] = (u8)val;
1734                data[i++] = (u8)(val >> 8);
1735                offset++;
1736        }
1737
1738        if (i < len) {
1739                val = read_eeprom(cp->regs, offset, addr_len);
1740                data[i] = (u8)val;
1741        }
1742
1743        spin_unlock_irq(&cp->lock);
1744        return 0;
1745}
1746
1747static int cp_set_eeprom(struct net_device *dev,
1748                         struct ethtool_eeprom *eeprom, u8 *data)
1749{
1750        struct cp_private *cp = netdev_priv(dev);
1751        unsigned int addr_len;
1752        u16 val;
1753        u32 offset = eeprom->offset >> 1;
1754        u32 len = eeprom->len;
1755        u32 i = 0;
1756
1757        if (eeprom->magic != CP_EEPROM_MAGIC)
1758                return -EINVAL;
1759
1760        spin_lock_irq(&cp->lock);
1761
1762        addr_len = read_eeprom(cp->regs, 0, 8) == 0x8129 ? 8 : 6;
1763
1764        if (eeprom->offset & 1) {
1765                val = read_eeprom(cp->regs, offset, addr_len) & 0xff;
1766                val |= (u16)data[i++] << 8;
1767                write_eeprom(cp->regs, offset, val, addr_len);
1768                offset++;
1769        }
1770
1771        while (i < len - 1) {
1772                val = (u16)data[i++];
1773                val |= (u16)data[i++] << 8;
1774                write_eeprom(cp->regs, offset, val, addr_len);
1775                offset++;
1776        }
1777
1778        if (i < len) {
1779                val = read_eeprom(cp->regs, offset, addr_len) & 0xff00;
1780                val |= (u16)data[i];
1781                write_eeprom(cp->regs, offset, val, addr_len);
1782        }
1783
1784        spin_unlock_irq(&cp->lock);
1785        return 0;
1786}
1787
1788/* Put the board into D3cold state and wait for WakeUp signal */
1789static void cp_set_d3_state (struct cp_private *cp)
1790{
1791        pci_enable_wake (cp->pdev, 0, 1); /* Enable PME# generation */
1792        pci_set_power_state (cp->pdev, PCI_D3hot);
1793}
1794
1795static const struct net_device_ops cp_netdev_ops = {
1796        .ndo_open               = cp_open,
1797        .ndo_stop               = cp_close,
1798        .ndo_validate_addr      = eth_validate_addr,
1799        .ndo_set_mac_address    = cp_set_mac_address,
1800        .ndo_set_rx_mode        = cp_set_rx_mode,
1801        .ndo_get_stats          = cp_get_stats,
1802        .ndo_do_ioctl           = cp_ioctl,
1803        .ndo_start_xmit         = cp_start_xmit,
1804        .ndo_tx_timeout         = cp_tx_timeout,
1805        .ndo_set_features       = cp_set_features,
1806#ifdef BROKEN
1807        .ndo_change_mtu         = cp_change_mtu,
1808#endif
1809
1810#ifdef CONFIG_NET_POLL_CONTROLLER
1811        .ndo_poll_controller    = cp_poll_controller,
1812#endif
1813};
1814
1815static int cp_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
1816{
1817        struct net_device *dev;
1818        struct cp_private *cp;
1819        int rc;
1820        void __iomem *regs;
1821        resource_size_t pciaddr;
1822        unsigned int addr_len, i, pci_using_dac;
1823
1824#ifndef MODULE
1825        static int version_printed;
1826        if (version_printed++ == 0)
1827                pr_info("%s", version);
1828#endif
1829
1830        if (pdev->vendor == PCI_VENDOR_ID_REALTEK &&
1831            pdev->device == PCI_DEVICE_ID_REALTEK_8139 && pdev->revision < 0x20) {
1832                dev_info(&pdev->dev,
1833                         "This (id %04x:%04x rev %02x) is not an 8139C+ compatible chip, use 8139too\n",
1834                         pdev->vendor, pdev->device, pdev->revision);
1835                return -ENODEV;
1836        }
1837
1838        dev = alloc_etherdev(sizeof(struct cp_private));
1839        if (!dev)
1840                return -ENOMEM;
1841        SET_NETDEV_DEV(dev, &pdev->dev);
1842
1843        cp = netdev_priv(dev);
1844        cp->pdev = pdev;
1845        cp->dev = dev;
1846        cp->msg_enable = (debug < 0 ? CP_DEF_MSG_ENABLE : debug);
1847        spin_lock_init (&cp->lock);
1848        cp->mii_if.dev = dev;
1849        cp->mii_if.mdio_read = mdio_read;
1850        cp->mii_if.mdio_write = mdio_write;
1851        cp->mii_if.phy_id = CP_INTERNAL_PHY;
1852        cp->mii_if.phy_id_mask = 0x1f;
1853        cp->mii_if.reg_num_mask = 0x1f;
1854        cp_set_rxbufsize(cp);
1855
1856        rc = pci_enable_device(pdev);
1857        if (rc)
1858                goto err_out_free;
1859
1860        rc = pci_set_mwi(pdev);
1861        if (rc)
1862                goto err_out_disable;
1863
1864        rc = pci_request_regions(pdev, DRV_NAME);
1865        if (rc)
1866                goto err_out_mwi;
1867
1868        pciaddr = pci_resource_start(pdev, 1);
1869        if (!pciaddr) {
1870                rc = -EIO;
1871                dev_err(&pdev->dev, "no MMIO resource\n");
1872                goto err_out_res;
1873        }
1874        if (pci_resource_len(pdev, 1) < CP_REGS_SIZE) {
1875                rc = -EIO;
1876                dev_err(&pdev->dev, "MMIO resource (%llx) too small\n",
1877                       (unsigned long long)pci_resource_len(pdev, 1));
1878                goto err_out_res;
1879        }
1880
1881        /* Configure DMA attributes. */
1882        if ((sizeof(dma_addr_t) > 4) &&
1883            !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) &&
1884            !pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
1885                pci_using_dac = 1;
1886        } else {
1887                pci_using_dac = 0;
1888
1889                rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
1890                if (rc) {
1891                        dev_err(&pdev->dev,
1892                                "No usable DMA configuration, aborting\n");
1893                        goto err_out_res;
1894                }
1895                rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
1896                if (rc) {
1897                        dev_err(&pdev->dev,
1898                                "No usable consistent DMA configuration, aborting\n");
1899                        goto err_out_res;
1900                }
1901        }
1902
1903        cp->cpcmd = (pci_using_dac ? PCIDAC : 0) |
1904                    PCIMulRW | RxChkSum | CpRxOn | CpTxOn;
1905
1906        dev->features |= NETIF_F_RXCSUM;
1907        dev->hw_features |= NETIF_F_RXCSUM;
1908
1909        regs = ioremap(pciaddr, CP_REGS_SIZE);
1910        if (!regs) {
1911                rc = -EIO;
1912                dev_err(&pdev->dev, "Cannot map PCI MMIO (%Lx@%Lx)\n",
1913                        (unsigned long long)pci_resource_len(pdev, 1),
1914                       (unsigned long long)pciaddr);
1915                goto err_out_res;
1916        }
1917        dev->base_addr = (unsigned long) regs;
1918        cp->regs = regs;
1919
1920        cp_stop_hw(cp);
1921
1922        /* read MAC address from EEPROM */
1923        addr_len = read_eeprom (regs, 0, 8) == 0x8129 ? 8 : 6;
1924        for (i = 0; i < 3; i++)
1925                ((__le16 *) (dev->dev_addr))[i] =
1926                    cpu_to_le16(read_eeprom (regs, i + 7, addr_len));
1927        memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
1928
1929        dev->netdev_ops = &cp_netdev_ops;
1930        netif_napi_add(dev, &cp->napi, cp_rx_poll, 16);
1931        dev->ethtool_ops = &cp_ethtool_ops;
1932        dev->watchdog_timeo = TX_TIMEOUT;
1933
1934        dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
1935
1936        if (pci_using_dac)
1937                dev->features |= NETIF_F_HIGHDMA;
1938
1939        /* disabled by default until verified */
1940        dev->hw_features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO |
1941                NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
1942        dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO |
1943                NETIF_F_HIGHDMA;
1944
1945        dev->irq = pdev->irq;
1946
1947        rc = register_netdev(dev);
1948        if (rc)
1949                goto err_out_iomap;
1950
1951        netdev_info(dev, "RTL-8139C+ at 0x%lx, %pM, IRQ %d\n",
1952                    dev->base_addr, dev->dev_addr, dev->irq);
1953
1954        pci_set_drvdata(pdev, dev);
1955
1956        /* enable busmastering and memory-write-invalidate */
1957        pci_set_master(pdev);
1958
1959        if (cp->wol_enabled)
1960                cp_set_d3_state (cp);
1961
1962        return 0;
1963
1964err_out_iomap:
1965        iounmap(regs);
1966err_out_res:
1967        pci_release_regions(pdev);
1968err_out_mwi:
1969        pci_clear_mwi(pdev);
1970err_out_disable:
1971        pci_disable_device(pdev);
1972err_out_free:
1973        free_netdev(dev);
1974        return rc;
1975}
1976
1977static void cp_remove_one (struct pci_dev *pdev)
1978{
1979        struct net_device *dev = pci_get_drvdata(pdev);
1980        struct cp_private *cp = netdev_priv(dev);
1981
1982        unregister_netdev(dev);
1983        iounmap(cp->regs);
1984        if (cp->wol_enabled)
1985                pci_set_power_state (pdev, PCI_D0);
1986        pci_release_regions(pdev);
1987        pci_clear_mwi(pdev);
1988        pci_disable_device(pdev);
1989        pci_set_drvdata(pdev, NULL);
1990        free_netdev(dev);
1991}
1992
1993#ifdef CONFIG_PM
1994static int cp_suspend (struct pci_dev *pdev, pm_message_t state)
1995{
1996        struct net_device *dev = pci_get_drvdata(pdev);
1997        struct cp_private *cp = netdev_priv(dev);
1998        unsigned long flags;
1999
2000        if (!netif_running(dev))
2001                return 0;
2002
2003        netif_device_detach (dev);
2004        netif_stop_queue (dev);
2005
2006        spin_lock_irqsave (&cp->lock, flags);
2007
2008        /* Disable Rx and Tx */
2009        cpw16 (IntrMask, 0);
2010        cpw8  (Cmd, cpr8 (Cmd) & (~RxOn | ~TxOn));
2011
2012        spin_unlock_irqrestore (&cp->lock, flags);
2013
2014        pci_save_state(pdev);
2015        pci_enable_wake(pdev, pci_choose_state(pdev, state), cp->wol_enabled);
2016        pci_set_power_state(pdev, pci_choose_state(pdev, state));
2017
2018        return 0;
2019}
2020
2021static int cp_resume (struct pci_dev *pdev)
2022{
2023        struct net_device *dev = pci_get_drvdata (pdev);
2024        struct cp_private *cp = netdev_priv(dev);
2025        unsigned long flags;
2026
2027        if (!netif_running(dev))
2028                return 0;
2029
2030        netif_device_attach (dev);
2031
2032        pci_set_power_state(pdev, PCI_D0);
2033        pci_restore_state(pdev);
2034        pci_enable_wake(pdev, PCI_D0, 0);
2035
2036        /* FIXME: sh*t may happen if the Rx ring buffer is depleted */
2037        cp_init_rings_index (cp);
2038        cp_init_hw (cp);
2039        cp_enable_irq(cp);
2040        netif_start_queue (dev);
2041
2042        spin_lock_irqsave (&cp->lock, flags);
2043
2044        mii_check_media(&cp->mii_if, netif_msg_link(cp), false);
2045
2046        spin_unlock_irqrestore (&cp->lock, flags);
2047
2048        return 0;
2049}
2050#endif /* CONFIG_PM */
2051
2052static struct pci_driver cp_driver = {
2053        .name         = DRV_NAME,
2054        .id_table     = cp_pci_tbl,
2055        .probe        = cp_init_one,
2056        .remove       = cp_remove_one,
2057#ifdef CONFIG_PM
2058        .resume       = cp_resume,
2059        .suspend      = cp_suspend,
2060#endif
2061};
2062
2063static int __init cp_init (void)
2064{
2065#ifdef MODULE
2066        pr_info("%s", version);
2067#endif
2068        return pci_register_driver(&cp_driver);
2069}
2070
2071static void __exit cp_exit (void)
2072{
2073        pci_unregister_driver (&cp_driver);
2074}
2075
2076module_init(cp_init);
2077module_exit(cp_exit);
2078