linux/drivers/net/ethernet/realtek/8139cp.c
<<
>>
Prefs
   1/* 8139cp.c: A Linux PCI Ethernet driver for the RealTek 8139C+ chips. */
   2/*
   3        Copyright 2001-2004 Jeff Garzik <jgarzik@pobox.com>
   4
   5        Copyright (C) 2001, 2002 David S. Miller (davem@redhat.com) [tg3.c]
   6        Copyright (C) 2000, 2001 David S. Miller (davem@redhat.com) [sungem.c]
   7        Copyright 2001 Manfred Spraul                               [natsemi.c]
   8        Copyright 1999-2001 by Donald Becker.                       [natsemi.c]
   9        Written 1997-2001 by Donald Becker.                         [8139too.c]
  10        Copyright 1998-2001 by Jes Sorensen, <jes@trained-monkey.org>. [acenic.c]
  11
  12        This software may be used and distributed according to the terms of
  13        the GNU General Public License (GPL), incorporated herein by reference.
  14        Drivers based on or derived from this code fall under the GPL and must
  15        retain the authorship, copyright and license notice.  This file is not
  16        a complete program and may only be used when the entire operating
  17        system is licensed under the GPL.
  18
  19        See the file COPYING in this distribution for more information.
  20
  21        Contributors:
  22
  23                Wake-on-LAN support - Felipe Damasio <felipewd@terra.com.br>
  24                PCI suspend/resume  - Felipe Damasio <felipewd@terra.com.br>
  25                LinkChg interrupt   - Felipe Damasio <felipewd@terra.com.br>
  26
  27        TODO:
  28        * Test Tx checksumming thoroughly
  29
  30        Low priority TODO:
  31        * Complete reset on PciErr
  32        * Consider Rx interrupt mitigation using TimerIntr
  33        * Investigate using skb->priority with h/w VLAN priority
  34        * Investigate using High Priority Tx Queue with skb->priority
  35        * Adjust Rx FIFO threshold and Max Rx DMA burst on Rx FIFO error
  36        * Adjust Tx FIFO threshold and Max Tx DMA burst on Tx FIFO error
  37        * Implement Tx software interrupt mitigation via
  38          Tx descriptor bit
  39        * The real minimum of CP_MIN_MTU is 4 bytes.  However,
  40          for this to be supported, one must(?) turn on packet padding.
  41        * Support external MII transceivers (patch available)
  42
  43        NOTES:
  44        * TX checksumming is considered experimental.  It is off by
  45          default, use ethtool to turn it on.
  46
  47 */
  48
  49#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  50
  51#define DRV_NAME                "8139cp"
  52#define DRV_VERSION             "1.3"
  53#define DRV_RELDATE             "Mar 22, 2004"
  54
  55
  56#include <linux/module.h>
  57#include <linux/moduleparam.h>
  58#include <linux/kernel.h>
  59#include <linux/compiler.h>
  60#include <linux/netdevice.h>
  61#include <linux/etherdevice.h>
  62#include <linux/init.h>
  63#include <linux/interrupt.h>
  64#include <linux/pci.h>
  65#include <linux/dma-mapping.h>
  66#include <linux/delay.h>
  67#include <linux/ethtool.h>
  68#include <linux/gfp.h>
  69#include <linux/mii.h>
  70#include <linux/if_vlan.h>
  71#include <linux/crc32.h>
  72#include <linux/in.h>
  73#include <linux/ip.h>
  74#include <linux/tcp.h>
  75#include <linux/udp.h>
  76#include <linux/cache.h>
  77#include <asm/io.h>
  78#include <asm/irq.h>
  79#include <asm/uaccess.h>
  80
  81/* These identify the driver base version and may not be removed. */
  82static char version[] =
  83DRV_NAME ": 10/100 PCI Ethernet driver v" DRV_VERSION " (" DRV_RELDATE ")\n";
  84
  85MODULE_AUTHOR("Jeff Garzik <jgarzik@pobox.com>");
  86MODULE_DESCRIPTION("RealTek RTL-8139C+ series 10/100 PCI Ethernet driver");
  87MODULE_VERSION(DRV_VERSION);
  88MODULE_LICENSE("GPL");
  89
  90static int debug = -1;
  91module_param(debug, int, 0);
  92MODULE_PARM_DESC (debug, "8139cp: bitmapped message enable number");
  93
  94/* Maximum number of multicast addresses to filter (vs. Rx-all-multicast).
  95   The RTL chips use a 64 element hash table based on the Ethernet CRC.  */
  96static int multicast_filter_limit = 32;
  97module_param(multicast_filter_limit, int, 0);
  98MODULE_PARM_DESC (multicast_filter_limit, "8139cp: maximum number of filtered multicast addresses");
  99
 100#define CP_DEF_MSG_ENABLE       (NETIF_MSG_DRV          | \
 101                                 NETIF_MSG_PROBE        | \
 102                                 NETIF_MSG_LINK)
 103#define CP_NUM_STATS            14      /* struct cp_dma_stats, plus one */
 104#define CP_STATS_SIZE           64      /* size in bytes of DMA stats block */
 105#define CP_REGS_SIZE            (0xff + 1)
 106#define CP_REGS_VER             1               /* version 1 */
 107#define CP_RX_RING_SIZE         64
 108#define CP_TX_RING_SIZE         64
 109#define CP_RING_BYTES           \
 110                ((sizeof(struct cp_desc) * CP_RX_RING_SIZE) +   \
 111                 (sizeof(struct cp_desc) * CP_TX_RING_SIZE) +   \
 112                 CP_STATS_SIZE)
 113#define NEXT_TX(N)              (((N) + 1) & (CP_TX_RING_SIZE - 1))
 114#define NEXT_RX(N)              (((N) + 1) & (CP_RX_RING_SIZE - 1))
 115#define TX_BUFFS_AVAIL(CP)                                      \
 116        (((CP)->tx_tail <= (CP)->tx_head) ?                     \
 117          (CP)->tx_tail + (CP_TX_RING_SIZE - 1) - (CP)->tx_head :       \
 118          (CP)->tx_tail - (CP)->tx_head - 1)
 119
 120#define PKT_BUF_SZ              1536    /* Size of each temporary Rx buffer.*/
 121#define CP_INTERNAL_PHY         32
 122
 123/* The following settings are log_2(bytes)-4:  0 == 16 bytes .. 6==1024, 7==end of packet. */
 124#define RX_FIFO_THRESH          5       /* Rx buffer level before first PCI xfer.  */
 125#define RX_DMA_BURST            4       /* Maximum PCI burst, '4' is 256 */
 126#define TX_DMA_BURST            6       /* Maximum PCI burst, '6' is 1024 */
 127#define TX_EARLY_THRESH         256     /* Early Tx threshold, in bytes */
 128
 129/* Time in jiffies before concluding the transmitter is hung. */
 130#define TX_TIMEOUT              (6*HZ)
 131
 132/* hardware minimum and maximum for a single frame's data payload */
 133#define CP_MIN_MTU              60      /* TODO: allow lower, but pad */
 134#define CP_MAX_MTU              4096
 135
 136enum {
 137        /* NIC register offsets */
 138        MAC0            = 0x00, /* Ethernet hardware address. */
 139        MAR0            = 0x08, /* Multicast filter. */
 140        StatsAddr       = 0x10, /* 64-bit start addr of 64-byte DMA stats blk */
 141        TxRingAddr      = 0x20, /* 64-bit start addr of Tx ring */
 142        HiTxRingAddr    = 0x28, /* 64-bit start addr of high priority Tx ring */
 143        Cmd             = 0x37, /* Command register */
 144        IntrMask        = 0x3C, /* Interrupt mask */
 145        IntrStatus      = 0x3E, /* Interrupt status */
 146        TxConfig        = 0x40, /* Tx configuration */
 147        ChipVersion     = 0x43, /* 8-bit chip version, inside TxConfig */
 148        RxConfig        = 0x44, /* Rx configuration */
 149        RxMissed        = 0x4C, /* 24 bits valid, write clears */
 150        Cfg9346         = 0x50, /* EEPROM select/control; Cfg reg [un]lock */
 151        Config1         = 0x52, /* Config1 */
 152        Config3         = 0x59, /* Config3 */
 153        Config4         = 0x5A, /* Config4 */
 154        MultiIntr       = 0x5C, /* Multiple interrupt select */
 155        BasicModeCtrl   = 0x62, /* MII BMCR */
 156        BasicModeStatus = 0x64, /* MII BMSR */
 157        NWayAdvert      = 0x66, /* MII ADVERTISE */
 158        NWayLPAR        = 0x68, /* MII LPA */
 159        NWayExpansion   = 0x6A, /* MII Expansion */
 160        Config5         = 0xD8, /* Config5 */
 161        TxPoll          = 0xD9, /* Tell chip to check Tx descriptors for work */
 162        RxMaxSize       = 0xDA, /* Max size of an Rx packet (8169 only) */
 163        CpCmd           = 0xE0, /* C+ Command register (C+ mode only) */
 164        IntrMitigate    = 0xE2, /* rx/tx interrupt mitigation control */
 165        RxRingAddr      = 0xE4, /* 64-bit start addr of Rx ring */
 166        TxThresh        = 0xEC, /* Early Tx threshold */
 167        OldRxBufAddr    = 0x30, /* DMA address of Rx ring buffer (C mode) */
 168        OldTSD0         = 0x10, /* DMA address of first Tx desc (C mode) */
 169
 170        /* Tx and Rx status descriptors */
 171        DescOwn         = (1 << 31), /* Descriptor is owned by NIC */
 172        RingEnd         = (1 << 30), /* End of descriptor ring */
 173        FirstFrag       = (1 << 29), /* First segment of a packet */
 174        LastFrag        = (1 << 28), /* Final segment of a packet */
 175        LargeSend       = (1 << 27), /* TCP Large Send Offload (TSO) */
 176        MSSShift        = 16,        /* MSS value position */
 177        MSSMask         = 0xfff,     /* MSS value: 11 bits */
 178        TxError         = (1 << 23), /* Tx error summary */
 179        RxError         = (1 << 20), /* Rx error summary */
 180        IPCS            = (1 << 18), /* Calculate IP checksum */
 181        UDPCS           = (1 << 17), /* Calculate UDP/IP checksum */
 182        TCPCS           = (1 << 16), /* Calculate TCP/IP checksum */
 183        TxVlanTag       = (1 << 17), /* Add VLAN tag */
 184        RxVlanTagged    = (1 << 16), /* Rx VLAN tag available */
 185        IPFail          = (1 << 15), /* IP checksum failed */
 186        UDPFail         = (1 << 14), /* UDP/IP checksum failed */
 187        TCPFail         = (1 << 13), /* TCP/IP checksum failed */
 188        NormalTxPoll    = (1 << 6),  /* One or more normal Tx packets to send */
 189        PID1            = (1 << 17), /* 2 protocol id bits:  0==non-IP, */
 190        PID0            = (1 << 16), /* 1==UDP/IP, 2==TCP/IP, 3==IP */
 191        RxProtoTCP      = 1,
 192        RxProtoUDP      = 2,
 193        RxProtoIP       = 3,
 194        TxFIFOUnder     = (1 << 25), /* Tx FIFO underrun */
 195        TxOWC           = (1 << 22), /* Tx Out-of-window collision */
 196        TxLinkFail      = (1 << 21), /* Link failed during Tx of packet */
 197        TxMaxCol        = (1 << 20), /* Tx aborted due to excessive collisions */
 198        TxColCntShift   = 16,        /* Shift, to get 4-bit Tx collision cnt */
 199        TxColCntMask    = 0x01 | 0x02 | 0x04 | 0x08, /* 4-bit collision count */
 200        RxErrFrame      = (1 << 27), /* Rx frame alignment error */
 201        RxMcast         = (1 << 26), /* Rx multicast packet rcv'd */
 202        RxErrCRC        = (1 << 18), /* Rx CRC error */
 203        RxErrRunt       = (1 << 19), /* Rx error, packet < 64 bytes */
 204        RxErrLong       = (1 << 21), /* Rx error, packet > 4096 bytes */
 205        RxErrFIFO       = (1 << 22), /* Rx error, FIFO overflowed, pkt bad */
 206
 207        /* StatsAddr register */
 208        DumpStats       = (1 << 3),  /* Begin stats dump */
 209
 210        /* RxConfig register */
 211        RxCfgFIFOShift  = 13,        /* Shift, to get Rx FIFO thresh value */
 212        RxCfgDMAShift   = 8,         /* Shift, to get Rx Max DMA value */
 213        AcceptErr       = 0x20,      /* Accept packets with CRC errors */
 214        AcceptRunt      = 0x10,      /* Accept runt (<64 bytes) packets */
 215        AcceptBroadcast = 0x08,      /* Accept broadcast packets */
 216        AcceptMulticast = 0x04,      /* Accept multicast packets */
 217        AcceptMyPhys    = 0x02,      /* Accept pkts with our MAC as dest */
 218        AcceptAllPhys   = 0x01,      /* Accept all pkts w/ physical dest */
 219
 220        /* IntrMask / IntrStatus registers */
 221        PciErr          = (1 << 15), /* System error on the PCI bus */
 222        TimerIntr       = (1 << 14), /* Asserted when TCTR reaches TimerInt value */
 223        LenChg          = (1 << 13), /* Cable length change */
 224        SWInt           = (1 << 8),  /* Software-requested interrupt */
 225        TxEmpty         = (1 << 7),  /* No Tx descriptors available */
 226        RxFIFOOvr       = (1 << 6),  /* Rx FIFO Overflow */
 227        LinkChg         = (1 << 5),  /* Packet underrun, or link change */
 228        RxEmpty         = (1 << 4),  /* No Rx descriptors available */
 229        TxErr           = (1 << 3),  /* Tx error */
 230        TxOK            = (1 << 2),  /* Tx packet sent */
 231        RxErr           = (1 << 1),  /* Rx error */
 232        RxOK            = (1 << 0),  /* Rx packet received */
 233        IntrResvd       = (1 << 10), /* reserved, according to RealTek engineers,
 234                                        but hardware likes to raise it */
 235
 236        IntrAll         = PciErr | TimerIntr | LenChg | SWInt | TxEmpty |
 237                          RxFIFOOvr | LinkChg | RxEmpty | TxErr | TxOK |
 238                          RxErr | RxOK | IntrResvd,
 239
 240        /* C mode command register */
 241        CmdReset        = (1 << 4),  /* Enable to reset; self-clearing */
 242        RxOn            = (1 << 3),  /* Rx mode enable */
 243        TxOn            = (1 << 2),  /* Tx mode enable */
 244
 245        /* C+ mode command register */
 246        RxVlanOn        = (1 << 6),  /* Rx VLAN de-tagging enable */
 247        RxChkSum        = (1 << 5),  /* Rx checksum offload enable */
 248        PCIDAC          = (1 << 4),  /* PCI Dual Address Cycle (64-bit PCI) */
 249        PCIMulRW        = (1 << 3),  /* Enable PCI read/write multiple */
 250        CpRxOn          = (1 << 1),  /* Rx mode enable */
 251        CpTxOn          = (1 << 0),  /* Tx mode enable */
 252
 253        /* Cfg9436 EEPROM control register */
 254        Cfg9346_Lock    = 0x00,      /* Lock ConfigX/MII register access */
 255        Cfg9346_Unlock  = 0xC0,      /* Unlock ConfigX/MII register access */
 256
 257        /* TxConfig register */
 258        IFG             = (1 << 25) | (1 << 24), /* standard IEEE interframe gap */
 259        TxDMAShift      = 8,         /* DMA burst value (0-7) is shift this many bits */
 260
 261        /* Early Tx Threshold register */
 262        TxThreshMask    = 0x3f,      /* Mask bits 5-0 */
 263        TxThreshMax     = 2048,      /* Max early Tx threshold */
 264
 265        /* Config1 register */
 266        DriverLoaded    = (1 << 5),  /* Software marker, driver is loaded */
 267        LWACT           = (1 << 4),  /* LWAKE active mode */
 268        PMEnable        = (1 << 0),  /* Enable various PM features of chip */
 269
 270        /* Config3 register */
 271        PARMEnable      = (1 << 6),  /* Enable auto-loading of PHY parms */
 272        MagicPacket     = (1 << 5),  /* Wake up when receives a Magic Packet */
 273        LinkUp          = (1 << 4),  /* Wake up when the cable connection is re-established */
 274
 275        /* Config4 register */
 276        LWPTN           = (1 << 1),  /* LWAKE Pattern */
 277        LWPME           = (1 << 4),  /* LANWAKE vs PMEB */
 278
 279        /* Config5 register */
 280        BWF             = (1 << 6),  /* Accept Broadcast wakeup frame */
 281        MWF             = (1 << 5),  /* Accept Multicast wakeup frame */
 282        UWF             = (1 << 4),  /* Accept Unicast wakeup frame */
 283        LANWake         = (1 << 1),  /* Enable LANWake signal */
 284        PMEStatus       = (1 << 0),  /* PME status can be reset by PCI RST# */
 285
 286        cp_norx_intr_mask = PciErr | LinkChg | TxOK | TxErr | TxEmpty,
 287        cp_rx_intr_mask = RxOK | RxErr | RxEmpty | RxFIFOOvr,
 288        cp_intr_mask = cp_rx_intr_mask | cp_norx_intr_mask,
 289};
 290
 291static const unsigned int cp_rx_config =
 292          (RX_FIFO_THRESH << RxCfgFIFOShift) |
 293          (RX_DMA_BURST << RxCfgDMAShift);
 294
 295struct cp_desc {
 296        __le32          opts1;
 297        __le32          opts2;
 298        __le64          addr;
 299};
 300
 301struct cp_dma_stats {
 302        __le64                  tx_ok;
 303        __le64                  rx_ok;
 304        __le64                  tx_err;
 305        __le32                  rx_err;
 306        __le16                  rx_fifo;
 307        __le16                  frame_align;
 308        __le32                  tx_ok_1col;
 309        __le32                  tx_ok_mcol;
 310        __le64                  rx_ok_phys;
 311        __le64                  rx_ok_bcast;
 312        __le32                  rx_ok_mcast;
 313        __le16                  tx_abort;
 314        __le16                  tx_underrun;
 315} __packed;
 316
 317struct cp_extra_stats {
 318        unsigned long           rx_frags;
 319};
 320
 321struct cp_private {
 322        void                    __iomem *regs;
 323        struct net_device       *dev;
 324        spinlock_t              lock;
 325        u32                     msg_enable;
 326
 327        struct napi_struct      napi;
 328
 329        struct pci_dev          *pdev;
 330        u32                     rx_config;
 331        u16                     cpcmd;
 332
 333        struct cp_extra_stats   cp_stats;
 334
 335        unsigned                rx_head         ____cacheline_aligned;
 336        unsigned                rx_tail;
 337        struct cp_desc          *rx_ring;
 338        struct sk_buff          *rx_skb[CP_RX_RING_SIZE];
 339
 340        unsigned                tx_head         ____cacheline_aligned;
 341        unsigned                tx_tail;
 342        struct cp_desc          *tx_ring;
 343        struct sk_buff          *tx_skb[CP_TX_RING_SIZE];
 344
 345        unsigned                rx_buf_sz;
 346        unsigned                wol_enabled : 1; /* Is Wake-on-LAN enabled? */
 347
 348        dma_addr_t              ring_dma;
 349
 350        struct mii_if_info      mii_if;
 351};
 352
 353#define cpr8(reg)       readb(cp->regs + (reg))
 354#define cpr16(reg)      readw(cp->regs + (reg))
 355#define cpr32(reg)      readl(cp->regs + (reg))
 356#define cpw8(reg,val)   writeb((val), cp->regs + (reg))
 357#define cpw16(reg,val)  writew((val), cp->regs + (reg))
 358#define cpw32(reg,val)  writel((val), cp->regs + (reg))
 359#define cpw8_f(reg,val) do {                    \
 360        writeb((val), cp->regs + (reg));        \
 361        readb(cp->regs + (reg));                \
 362        } while (0)
 363#define cpw16_f(reg,val) do {                   \
 364        writew((val), cp->regs + (reg));        \
 365        readw(cp->regs + (reg));                \
 366        } while (0)
 367#define cpw32_f(reg,val) do {                   \
 368        writel((val), cp->regs + (reg));        \
 369        readl(cp->regs + (reg));                \
 370        } while (0)
 371
 372
 373static void __cp_set_rx_mode (struct net_device *dev);
 374static void cp_tx (struct cp_private *cp);
 375static void cp_clean_rings (struct cp_private *cp);
 376#ifdef CONFIG_NET_POLL_CONTROLLER
 377static void cp_poll_controller(struct net_device *dev);
 378#endif
 379static int cp_get_eeprom_len(struct net_device *dev);
 380static int cp_get_eeprom(struct net_device *dev,
 381                         struct ethtool_eeprom *eeprom, u8 *data);
 382static int cp_set_eeprom(struct net_device *dev,
 383                         struct ethtool_eeprom *eeprom, u8 *data);
 384
 385static struct {
 386        const char str[ETH_GSTRING_LEN];
 387} ethtool_stats_keys[] = {
 388        { "tx_ok" },
 389        { "rx_ok" },
 390        { "tx_err" },
 391        { "rx_err" },
 392        { "rx_fifo" },
 393        { "frame_align" },
 394        { "tx_ok_1col" },
 395        { "tx_ok_mcol" },
 396        { "rx_ok_phys" },
 397        { "rx_ok_bcast" },
 398        { "rx_ok_mcast" },
 399        { "tx_abort" },
 400        { "tx_underrun" },
 401        { "rx_frags" },
 402};
 403
 404
 405static inline void cp_set_rxbufsize (struct cp_private *cp)
 406{
 407        unsigned int mtu = cp->dev->mtu;
 408
 409        if (mtu > ETH_DATA_LEN)
 410                /* MTU + ethernet header + FCS + optional VLAN tag */
 411                cp->rx_buf_sz = mtu + ETH_HLEN + 8;
 412        else
 413                cp->rx_buf_sz = PKT_BUF_SZ;
 414}
 415
 416static inline void cp_rx_skb (struct cp_private *cp, struct sk_buff *skb,
 417                              struct cp_desc *desc)
 418{
 419        u32 opts2 = le32_to_cpu(desc->opts2);
 420
 421        skb->protocol = eth_type_trans (skb, cp->dev);
 422
 423        cp->dev->stats.rx_packets++;
 424        cp->dev->stats.rx_bytes += skb->len;
 425
 426        if (opts2 & RxVlanTagged)
 427                __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), swab16(opts2 & 0xffff));
 428
 429        napi_gro_receive(&cp->napi, skb);
 430}
 431
 432static void cp_rx_err_acct (struct cp_private *cp, unsigned rx_tail,
 433                            u32 status, u32 len)
 434{
 435        netif_dbg(cp, rx_err, cp->dev, "rx err, slot %d status 0x%x len %d\n",
 436                  rx_tail, status, len);
 437        cp->dev->stats.rx_errors++;
 438        if (status & RxErrFrame)
 439                cp->dev->stats.rx_frame_errors++;
 440        if (status & RxErrCRC)
 441                cp->dev->stats.rx_crc_errors++;
 442        if ((status & RxErrRunt) || (status & RxErrLong))
 443                cp->dev->stats.rx_length_errors++;
 444        if ((status & (FirstFrag | LastFrag)) != (FirstFrag | LastFrag))
 445                cp->dev->stats.rx_length_errors++;
 446        if (status & RxErrFIFO)
 447                cp->dev->stats.rx_fifo_errors++;
 448}
 449
 450static inline unsigned int cp_rx_csum_ok (u32 status)
 451{
 452        unsigned int protocol = (status >> 16) & 0x3;
 453
 454        if (((protocol == RxProtoTCP) && !(status & TCPFail)) ||
 455            ((protocol == RxProtoUDP) && !(status & UDPFail)))
 456                return 1;
 457        else
 458                return 0;
 459}
 460
 461static int cp_rx_poll(struct napi_struct *napi, int budget)
 462{
 463        struct cp_private *cp = container_of(napi, struct cp_private, napi);
 464        struct net_device *dev = cp->dev;
 465        unsigned int rx_tail = cp->rx_tail;
 466        int rx;
 467
 468rx_status_loop:
 469        rx = 0;
 470        cpw16(IntrStatus, cp_rx_intr_mask);
 471
 472        while (rx < budget) {
 473                u32 status, len;
 474                dma_addr_t mapping, new_mapping;
 475                struct sk_buff *skb, *new_skb;
 476                struct cp_desc *desc;
 477                const unsigned buflen = cp->rx_buf_sz;
 478
 479                skb = cp->rx_skb[rx_tail];
 480                BUG_ON(!skb);
 481
 482                desc = &cp->rx_ring[rx_tail];
 483                status = le32_to_cpu(desc->opts1);
 484                if (status & DescOwn)
 485                        break;
 486
 487                len = (status & 0x1fff) - 4;
 488                mapping = le64_to_cpu(desc->addr);
 489
 490                if ((status & (FirstFrag | LastFrag)) != (FirstFrag | LastFrag)) {
 491                        /* we don't support incoming fragmented frames.
 492                         * instead, we attempt to ensure that the
 493                         * pre-allocated RX skbs are properly sized such
 494                         * that RX fragments are never encountered
 495                         */
 496                        cp_rx_err_acct(cp, rx_tail, status, len);
 497                        dev->stats.rx_dropped++;
 498                        cp->cp_stats.rx_frags++;
 499                        goto rx_next;
 500                }
 501
 502                if (status & (RxError | RxErrFIFO)) {
 503                        cp_rx_err_acct(cp, rx_tail, status, len);
 504                        goto rx_next;
 505                }
 506
 507                netif_dbg(cp, rx_status, dev, "rx slot %d status 0x%x len %d\n",
 508                          rx_tail, status, len);
 509
 510                new_skb = napi_alloc_skb(napi, buflen);
 511                if (!new_skb) {
 512                        dev->stats.rx_dropped++;
 513                        goto rx_next;
 514                }
 515
 516                new_mapping = dma_map_single(&cp->pdev->dev, new_skb->data, buflen,
 517                                         PCI_DMA_FROMDEVICE);
 518                if (dma_mapping_error(&cp->pdev->dev, new_mapping)) {
 519                        dev->stats.rx_dropped++;
 520                        kfree_skb(new_skb);
 521                        goto rx_next;
 522                }
 523
 524                dma_unmap_single(&cp->pdev->dev, mapping,
 525                                 buflen, PCI_DMA_FROMDEVICE);
 526
 527                /* Handle checksum offloading for incoming packets. */
 528                if (cp_rx_csum_ok(status))
 529                        skb->ip_summed = CHECKSUM_UNNECESSARY;
 530                else
 531                        skb_checksum_none_assert(skb);
 532
 533                skb_put(skb, len);
 534
 535                cp->rx_skb[rx_tail] = new_skb;
 536
 537                cp_rx_skb(cp, skb, desc);
 538                rx++;
 539                mapping = new_mapping;
 540
 541rx_next:
 542                cp->rx_ring[rx_tail].opts2 = 0;
 543                cp->rx_ring[rx_tail].addr = cpu_to_le64(mapping);
 544                if (rx_tail == (CP_RX_RING_SIZE - 1))
 545                        desc->opts1 = cpu_to_le32(DescOwn | RingEnd |
 546                                                  cp->rx_buf_sz);
 547                else
 548                        desc->opts1 = cpu_to_le32(DescOwn | cp->rx_buf_sz);
 549                rx_tail = NEXT_RX(rx_tail);
 550        }
 551
 552        cp->rx_tail = rx_tail;
 553
 554        /* if we did not reach work limit, then we're done with
 555         * this round of polling
 556         */
 557        if (rx < budget) {
 558                unsigned long flags;
 559
 560                if (cpr16(IntrStatus) & cp_rx_intr_mask)
 561                        goto rx_status_loop;
 562
 563                napi_gro_flush(napi, false);
 564                spin_lock_irqsave(&cp->lock, flags);
 565                __napi_complete(napi);
 566                cpw16_f(IntrMask, cp_intr_mask);
 567                spin_unlock_irqrestore(&cp->lock, flags);
 568        }
 569
 570        return rx;
 571}
 572
 573static irqreturn_t cp_interrupt (int irq, void *dev_instance)
 574{
 575        struct net_device *dev = dev_instance;
 576        struct cp_private *cp;
 577        int handled = 0;
 578        u16 status;
 579
 580        if (unlikely(dev == NULL))
 581                return IRQ_NONE;
 582        cp = netdev_priv(dev);
 583
 584        spin_lock(&cp->lock);
 585
 586        status = cpr16(IntrStatus);
 587        if (!status || (status == 0xFFFF))
 588                goto out_unlock;
 589
 590        handled = 1;
 591
 592        netif_dbg(cp, intr, dev, "intr, status %04x cmd %02x cpcmd %04x\n",
 593                  status, cpr8(Cmd), cpr16(CpCmd));
 594
 595        cpw16(IntrStatus, status & ~cp_rx_intr_mask);
 596
 597        /* close possible race's with dev_close */
 598        if (unlikely(!netif_running(dev))) {
 599                cpw16(IntrMask, 0);
 600                goto out_unlock;
 601        }
 602
 603        if (status & (RxOK | RxErr | RxEmpty | RxFIFOOvr))
 604                if (napi_schedule_prep(&cp->napi)) {
 605                        cpw16_f(IntrMask, cp_norx_intr_mask);
 606                        __napi_schedule(&cp->napi);
 607                }
 608
 609        if (status & (TxOK | TxErr | TxEmpty | SWInt))
 610                cp_tx(cp);
 611        if (status & LinkChg)
 612                mii_check_media(&cp->mii_if, netif_msg_link(cp), false);
 613
 614
 615        if (status & PciErr) {
 616                u16 pci_status;
 617
 618                pci_read_config_word(cp->pdev, PCI_STATUS, &pci_status);
 619                pci_write_config_word(cp->pdev, PCI_STATUS, pci_status);
 620                netdev_err(dev, "PCI bus error, status=%04x, PCI status=%04x\n",
 621                           status, pci_status);
 622
 623                /* TODO: reset hardware */
 624        }
 625
 626out_unlock:
 627        spin_unlock(&cp->lock);
 628
 629        return IRQ_RETVAL(handled);
 630}
 631
 632#ifdef CONFIG_NET_POLL_CONTROLLER
 633/*
 634 * Polling receive - used by netconsole and other diagnostic tools
 635 * to allow network i/o with interrupts disabled.
 636 */
 637static void cp_poll_controller(struct net_device *dev)
 638{
 639        struct cp_private *cp = netdev_priv(dev);
 640        const int irq = cp->pdev->irq;
 641
 642        disable_irq(irq);
 643        cp_interrupt(irq, dev);
 644        enable_irq(irq);
 645}
 646#endif
 647
 648static void cp_tx (struct cp_private *cp)
 649{
 650        unsigned tx_head = cp->tx_head;
 651        unsigned tx_tail = cp->tx_tail;
 652        unsigned bytes_compl = 0, pkts_compl = 0;
 653
 654        while (tx_tail != tx_head) {
 655                struct cp_desc *txd = cp->tx_ring + tx_tail;
 656                struct sk_buff *skb;
 657                u32 status;
 658
 659                rmb();
 660                status = le32_to_cpu(txd->opts1);
 661                if (status & DescOwn)
 662                        break;
 663
 664                skb = cp->tx_skb[tx_tail];
 665                BUG_ON(!skb);
 666
 667                dma_unmap_single(&cp->pdev->dev, le64_to_cpu(txd->addr),
 668                                 le32_to_cpu(txd->opts1) & 0xffff,
 669                                 PCI_DMA_TODEVICE);
 670
 671                if (status & LastFrag) {
 672                        if (status & (TxError | TxFIFOUnder)) {
 673                                netif_dbg(cp, tx_err, cp->dev,
 674                                          "tx err, status 0x%x\n", status);
 675                                cp->dev->stats.tx_errors++;
 676                                if (status & TxOWC)
 677                                        cp->dev->stats.tx_window_errors++;
 678                                if (status & TxMaxCol)
 679                                        cp->dev->stats.tx_aborted_errors++;
 680                                if (status & TxLinkFail)
 681                                        cp->dev->stats.tx_carrier_errors++;
 682                                if (status & TxFIFOUnder)
 683                                        cp->dev->stats.tx_fifo_errors++;
 684                        } else {
 685                                cp->dev->stats.collisions +=
 686                                        ((status >> TxColCntShift) & TxColCntMask);
 687                                cp->dev->stats.tx_packets++;
 688                                cp->dev->stats.tx_bytes += skb->len;
 689                                netif_dbg(cp, tx_done, cp->dev,
 690                                          "tx done, slot %d\n", tx_tail);
 691                        }
 692                        bytes_compl += skb->len;
 693                        pkts_compl++;
 694                        dev_kfree_skb_irq(skb);
 695                }
 696
 697                cp->tx_skb[tx_tail] = NULL;
 698
 699                tx_tail = NEXT_TX(tx_tail);
 700        }
 701
 702        cp->tx_tail = tx_tail;
 703
 704        netdev_completed_queue(cp->dev, pkts_compl, bytes_compl);
 705        if (TX_BUFFS_AVAIL(cp) > (MAX_SKB_FRAGS + 1))
 706                netif_wake_queue(cp->dev);
 707}
 708
 709static inline u32 cp_tx_vlan_tag(struct sk_buff *skb)
 710{
 711        return skb_vlan_tag_present(skb) ?
 712                TxVlanTag | swab16(skb_vlan_tag_get(skb)) : 0x00;
 713}
 714
 715static void unwind_tx_frag_mapping(struct cp_private *cp, struct sk_buff *skb,
 716                                   int first, int entry_last)
 717{
 718        int frag, index;
 719        struct cp_desc *txd;
 720        skb_frag_t *this_frag;
 721        for (frag = 0; frag+first < entry_last; frag++) {
 722                index = first+frag;
 723                cp->tx_skb[index] = NULL;
 724                txd = &cp->tx_ring[index];
 725                this_frag = &skb_shinfo(skb)->frags[frag];
 726                dma_unmap_single(&cp->pdev->dev, le64_to_cpu(txd->addr),
 727                                 skb_frag_size(this_frag), PCI_DMA_TODEVICE);
 728        }
 729}
 730
 731static netdev_tx_t cp_start_xmit (struct sk_buff *skb,
 732                                        struct net_device *dev)
 733{
 734        struct cp_private *cp = netdev_priv(dev);
 735        unsigned entry;
 736        u32 eor, flags;
 737        unsigned long intr_flags;
 738        __le32 opts2;
 739        int mss = 0;
 740
 741        spin_lock_irqsave(&cp->lock, intr_flags);
 742
 743        /* This is a hard error, log it. */
 744        if (TX_BUFFS_AVAIL(cp) <= (skb_shinfo(skb)->nr_frags + 1)) {
 745                netif_stop_queue(dev);
 746                spin_unlock_irqrestore(&cp->lock, intr_flags);
 747                netdev_err(dev, "BUG! Tx Ring full when queue awake!\n");
 748                return NETDEV_TX_BUSY;
 749        }
 750
 751        entry = cp->tx_head;
 752        eor = (entry == (CP_TX_RING_SIZE - 1)) ? RingEnd : 0;
 753        mss = skb_shinfo(skb)->gso_size;
 754
 755        opts2 = cpu_to_le32(cp_tx_vlan_tag(skb));
 756
 757        if (skb_shinfo(skb)->nr_frags == 0) {
 758                struct cp_desc *txd = &cp->tx_ring[entry];
 759                u32 len;
 760                dma_addr_t mapping;
 761
 762                len = skb->len;
 763                mapping = dma_map_single(&cp->pdev->dev, skb->data, len, PCI_DMA_TODEVICE);
 764                if (dma_mapping_error(&cp->pdev->dev, mapping))
 765                        goto out_dma_error;
 766
 767                txd->opts2 = opts2;
 768                txd->addr = cpu_to_le64(mapping);
 769                wmb();
 770
 771                flags = eor | len | DescOwn | FirstFrag | LastFrag;
 772
 773                if (mss)
 774                        flags |= LargeSend | ((mss & MSSMask) << MSSShift);
 775                else if (skb->ip_summed == CHECKSUM_PARTIAL) {
 776                        const struct iphdr *ip = ip_hdr(skb);
 777                        if (ip->protocol == IPPROTO_TCP)
 778                                flags |= IPCS | TCPCS;
 779                        else if (ip->protocol == IPPROTO_UDP)
 780                                flags |= IPCS | UDPCS;
 781                        else
 782                                WARN_ON(1);     /* we need a WARN() */
 783                }
 784
 785                txd->opts1 = cpu_to_le32(flags);
 786                wmb();
 787
 788                cp->tx_skb[entry] = skb;
 789                entry = NEXT_TX(entry);
 790        } else {
 791                struct cp_desc *txd;
 792                u32 first_len, first_eor;
 793                dma_addr_t first_mapping;
 794                int frag, first_entry = entry;
 795                const struct iphdr *ip = ip_hdr(skb);
 796
 797                /* We must give this initial chunk to the device last.
 798                 * Otherwise we could race with the device.
 799                 */
 800                first_eor = eor;
 801                first_len = skb_headlen(skb);
 802                first_mapping = dma_map_single(&cp->pdev->dev, skb->data,
 803                                               first_len, PCI_DMA_TODEVICE);
 804                if (dma_mapping_error(&cp->pdev->dev, first_mapping))
 805                        goto out_dma_error;
 806
 807                cp->tx_skb[entry] = skb;
 808                entry = NEXT_TX(entry);
 809
 810                for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) {
 811                        const skb_frag_t *this_frag = &skb_shinfo(skb)->frags[frag];
 812                        u32 len;
 813                        u32 ctrl;
 814                        dma_addr_t mapping;
 815
 816                        len = skb_frag_size(this_frag);
 817                        mapping = dma_map_single(&cp->pdev->dev,
 818                                                 skb_frag_address(this_frag),
 819                                                 len, PCI_DMA_TODEVICE);
 820                        if (dma_mapping_error(&cp->pdev->dev, mapping)) {
 821                                unwind_tx_frag_mapping(cp, skb, first_entry, entry);
 822                                goto out_dma_error;
 823                        }
 824
 825                        eor = (entry == (CP_TX_RING_SIZE - 1)) ? RingEnd : 0;
 826
 827                        ctrl = eor | len | DescOwn;
 828
 829                        if (mss)
 830                                ctrl |= LargeSend |
 831                                        ((mss & MSSMask) << MSSShift);
 832                        else if (skb->ip_summed == CHECKSUM_PARTIAL) {
 833                                if (ip->protocol == IPPROTO_TCP)
 834                                        ctrl |= IPCS | TCPCS;
 835                                else if (ip->protocol == IPPROTO_UDP)
 836                                        ctrl |= IPCS | UDPCS;
 837                                else
 838                                        BUG();
 839                        }
 840
 841                        if (frag == skb_shinfo(skb)->nr_frags - 1)
 842                                ctrl |= LastFrag;
 843
 844                        txd = &cp->tx_ring[entry];
 845                        txd->opts2 = opts2;
 846                        txd->addr = cpu_to_le64(mapping);
 847                        wmb();
 848
 849                        txd->opts1 = cpu_to_le32(ctrl);
 850                        wmb();
 851
 852                        cp->tx_skb[entry] = skb;
 853                        entry = NEXT_TX(entry);
 854                }
 855
 856                txd = &cp->tx_ring[first_entry];
 857                txd->opts2 = opts2;
 858                txd->addr = cpu_to_le64(first_mapping);
 859                wmb();
 860
 861                if (skb->ip_summed == CHECKSUM_PARTIAL) {
 862                        if (ip->protocol == IPPROTO_TCP)
 863                                txd->opts1 = cpu_to_le32(first_eor | first_len |
 864                                                         FirstFrag | DescOwn |
 865                                                         IPCS | TCPCS);
 866                        else if (ip->protocol == IPPROTO_UDP)
 867                                txd->opts1 = cpu_to_le32(first_eor | first_len |
 868                                                         FirstFrag | DescOwn |
 869                                                         IPCS | UDPCS);
 870                        else
 871                                BUG();
 872                } else
 873                        txd->opts1 = cpu_to_le32(first_eor | first_len |
 874                                                 FirstFrag | DescOwn);
 875                wmb();
 876        }
 877        cp->tx_head = entry;
 878
 879        netdev_sent_queue(dev, skb->len);
 880        netif_dbg(cp, tx_queued, cp->dev, "tx queued, slot %d, skblen %d\n",
 881                  entry, skb->len);
 882        if (TX_BUFFS_AVAIL(cp) <= (MAX_SKB_FRAGS + 1))
 883                netif_stop_queue(dev);
 884
 885out_unlock:
 886        spin_unlock_irqrestore(&cp->lock, intr_flags);
 887
 888        cpw8(TxPoll, NormalTxPoll);
 889
 890        return NETDEV_TX_OK;
 891out_dma_error:
 892        dev_kfree_skb_any(skb);
 893        cp->dev->stats.tx_dropped++;
 894        goto out_unlock;
 895}
 896
 897/* Set or clear the multicast filter for this adaptor.
 898   This routine is not state sensitive and need not be SMP locked. */
 899
 900static void __cp_set_rx_mode (struct net_device *dev)
 901{
 902        struct cp_private *cp = netdev_priv(dev);
 903        u32 mc_filter[2];       /* Multicast hash filter */
 904        int rx_mode;
 905
 906        /* Note: do not reorder, GCC is clever about common statements. */
 907        if (dev->flags & IFF_PROMISC) {
 908                /* Unconditionally log net taps. */
 909                rx_mode =
 910                    AcceptBroadcast | AcceptMulticast | AcceptMyPhys |
 911                    AcceptAllPhys;
 912                mc_filter[1] = mc_filter[0] = 0xffffffff;
 913        } else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
 914                   (dev->flags & IFF_ALLMULTI)) {
 915                /* Too many to filter perfectly -- accept all multicasts. */
 916                rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
 917                mc_filter[1] = mc_filter[0] = 0xffffffff;
 918        } else {
 919                struct netdev_hw_addr *ha;
 920                rx_mode = AcceptBroadcast | AcceptMyPhys;
 921                mc_filter[1] = mc_filter[0] = 0;
 922                netdev_for_each_mc_addr(ha, dev) {
 923                        int bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26;
 924
 925                        mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
 926                        rx_mode |= AcceptMulticast;
 927                }
 928        }
 929
 930        /* We can safely update without stopping the chip. */
 931        cp->rx_config = cp_rx_config | rx_mode;
 932        cpw32_f(RxConfig, cp->rx_config);
 933
 934        cpw32_f (MAR0 + 0, mc_filter[0]);
 935        cpw32_f (MAR0 + 4, mc_filter[1]);
 936}
 937
 938static void cp_set_rx_mode (struct net_device *dev)
 939{
 940        unsigned long flags;
 941        struct cp_private *cp = netdev_priv(dev);
 942
 943        spin_lock_irqsave (&cp->lock, flags);
 944        __cp_set_rx_mode(dev);
 945        spin_unlock_irqrestore (&cp->lock, flags);
 946}
 947
 948static void __cp_get_stats(struct cp_private *cp)
 949{
 950        /* only lower 24 bits valid; write any value to clear */
 951        cp->dev->stats.rx_missed_errors += (cpr32 (RxMissed) & 0xffffff);
 952        cpw32 (RxMissed, 0);
 953}
 954
 955static struct net_device_stats *cp_get_stats(struct net_device *dev)
 956{
 957        struct cp_private *cp = netdev_priv(dev);
 958        unsigned long flags;
 959
 960        /* The chip only need report frame silently dropped. */
 961        spin_lock_irqsave(&cp->lock, flags);
 962        if (netif_running(dev) && netif_device_present(dev))
 963                __cp_get_stats(cp);
 964        spin_unlock_irqrestore(&cp->lock, flags);
 965
 966        return &dev->stats;
 967}
 968
 969static void cp_stop_hw (struct cp_private *cp)
 970{
 971        cpw16(IntrStatus, ~(cpr16(IntrStatus)));
 972        cpw16_f(IntrMask, 0);
 973        cpw8(Cmd, 0);
 974        cpw16_f(CpCmd, 0);
 975        cpw16_f(IntrStatus, ~(cpr16(IntrStatus)));
 976
 977        cp->rx_tail = 0;
 978        cp->tx_head = cp->tx_tail = 0;
 979
 980        netdev_reset_queue(cp->dev);
 981}
 982
 983static void cp_reset_hw (struct cp_private *cp)
 984{
 985        unsigned work = 1000;
 986
 987        cpw8(Cmd, CmdReset);
 988
 989        while (work--) {
 990                if (!(cpr8(Cmd) & CmdReset))
 991                        return;
 992
 993                schedule_timeout_uninterruptible(10);
 994        }
 995
 996        netdev_err(cp->dev, "hardware reset timeout\n");
 997}
 998
 999static inline void cp_start_hw (struct cp_private *cp)
1000{
1001        dma_addr_t ring_dma;
1002
1003        cpw16(CpCmd, cp->cpcmd);
1004
1005        /*
1006         * These (at least TxRingAddr) need to be configured after the
1007         * corresponding bits in CpCmd are enabled. Datasheet v1.6 §6.33
1008         * (C+ Command Register) recommends that these and more be configured
1009         * *after* the [RT]xEnable bits in CpCmd are set. And on some hardware
1010         * it's been observed that the TxRingAddr is actually reset to garbage
1011         * when C+ mode Tx is enabled in CpCmd.
1012         */
1013        cpw32_f(HiTxRingAddr, 0);
1014        cpw32_f(HiTxRingAddr + 4, 0);
1015
1016        ring_dma = cp->ring_dma;
1017        cpw32_f(RxRingAddr, ring_dma & 0xffffffff);
1018        cpw32_f(RxRingAddr + 4, (ring_dma >> 16) >> 16);
1019
1020        ring_dma += sizeof(struct cp_desc) * CP_RX_RING_SIZE;
1021        cpw32_f(TxRingAddr, ring_dma & 0xffffffff);
1022        cpw32_f(TxRingAddr + 4, (ring_dma >> 16) >> 16);
1023
1024        /*
1025         * Strictly speaking, the datasheet says this should be enabled
1026         * *before* setting the descriptor addresses. But what, then, would
1027         * prevent it from doing DMA to random unconfigured addresses?
1028         * This variant appears to work fine.
1029         */
1030        cpw8(Cmd, RxOn | TxOn);
1031
1032        netdev_reset_queue(cp->dev);
1033}
1034
1035static void cp_enable_irq(struct cp_private *cp)
1036{
1037        cpw16_f(IntrMask, cp_intr_mask);
1038}
1039
1040static void cp_init_hw (struct cp_private *cp)
1041{
1042        struct net_device *dev = cp->dev;
1043
1044        cp_reset_hw(cp);
1045
1046        cpw8_f (Cfg9346, Cfg9346_Unlock);
1047
1048        /* Restore our idea of the MAC address. */
1049        cpw32_f (MAC0 + 0, le32_to_cpu (*(__le32 *) (dev->dev_addr + 0)));
1050        cpw32_f (MAC0 + 4, le32_to_cpu (*(__le32 *) (dev->dev_addr + 4)));
1051
1052        cp_start_hw(cp);
1053        cpw8(TxThresh, 0x06); /* XXX convert magic num to a constant */
1054
1055        __cp_set_rx_mode(dev);
1056        cpw32_f (TxConfig, IFG | (TX_DMA_BURST << TxDMAShift));
1057
1058        cpw8(Config1, cpr8(Config1) | DriverLoaded | PMEnable);
1059        /* Disable Wake-on-LAN. Can be turned on with ETHTOOL_SWOL */
1060        cpw8(Config3, PARMEnable);
1061        cp->wol_enabled = 0;
1062
1063        cpw8(Config5, cpr8(Config5) & PMEStatus);
1064
1065        cpw16(MultiIntr, 0);
1066
1067        cpw8_f(Cfg9346, Cfg9346_Lock);
1068}
1069
1070static int cp_refill_rx(struct cp_private *cp)
1071{
1072        struct net_device *dev = cp->dev;
1073        unsigned i;
1074
1075        for (i = 0; i < CP_RX_RING_SIZE; i++) {
1076                struct sk_buff *skb;
1077                dma_addr_t mapping;
1078
1079                skb = netdev_alloc_skb_ip_align(dev, cp->rx_buf_sz);
1080                if (!skb)
1081                        goto err_out;
1082
1083                mapping = dma_map_single(&cp->pdev->dev, skb->data,
1084                                         cp->rx_buf_sz, PCI_DMA_FROMDEVICE);
1085                if (dma_mapping_error(&cp->pdev->dev, mapping)) {
1086                        kfree_skb(skb);
1087                        goto err_out;
1088                }
1089                cp->rx_skb[i] = skb;
1090
1091                cp->rx_ring[i].opts2 = 0;
1092                cp->rx_ring[i].addr = cpu_to_le64(mapping);
1093                if (i == (CP_RX_RING_SIZE - 1))
1094                        cp->rx_ring[i].opts1 =
1095                                cpu_to_le32(DescOwn | RingEnd | cp->rx_buf_sz);
1096                else
1097                        cp->rx_ring[i].opts1 =
1098                                cpu_to_le32(DescOwn | cp->rx_buf_sz);
1099        }
1100
1101        return 0;
1102
1103err_out:
1104        cp_clean_rings(cp);
1105        return -ENOMEM;
1106}
1107
1108static void cp_init_rings_index (struct cp_private *cp)
1109{
1110        cp->rx_tail = 0;
1111        cp->tx_head = cp->tx_tail = 0;
1112}
1113
1114static int cp_init_rings (struct cp_private *cp)
1115{
1116        memset(cp->tx_ring, 0, sizeof(struct cp_desc) * CP_TX_RING_SIZE);
1117        cp->tx_ring[CP_TX_RING_SIZE - 1].opts1 = cpu_to_le32(RingEnd);
1118
1119        cp_init_rings_index(cp);
1120
1121        return cp_refill_rx (cp);
1122}
1123
1124static int cp_alloc_rings (struct cp_private *cp)
1125{
1126        struct device *d = &cp->pdev->dev;
1127        void *mem;
1128        int rc;
1129
1130        mem = dma_alloc_coherent(d, CP_RING_BYTES, &cp->ring_dma, GFP_KERNEL);
1131        if (!mem)
1132                return -ENOMEM;
1133
1134        cp->rx_ring = mem;
1135        cp->tx_ring = &cp->rx_ring[CP_RX_RING_SIZE];
1136
1137        rc = cp_init_rings(cp);
1138        if (rc < 0)
1139                dma_free_coherent(d, CP_RING_BYTES, cp->rx_ring, cp->ring_dma);
1140
1141        return rc;
1142}
1143
1144static void cp_clean_rings (struct cp_private *cp)
1145{
1146        struct cp_desc *desc;
1147        unsigned i;
1148
1149        for (i = 0; i < CP_RX_RING_SIZE; i++) {
1150                if (cp->rx_skb[i]) {
1151                        desc = cp->rx_ring + i;
1152                        dma_unmap_single(&cp->pdev->dev,le64_to_cpu(desc->addr),
1153                                         cp->rx_buf_sz, PCI_DMA_FROMDEVICE);
1154                        dev_kfree_skb(cp->rx_skb[i]);
1155                }
1156        }
1157
1158        for (i = 0; i < CP_TX_RING_SIZE; i++) {
1159                if (cp->tx_skb[i]) {
1160                        struct sk_buff *skb = cp->tx_skb[i];
1161
1162                        desc = cp->tx_ring + i;
1163                        dma_unmap_single(&cp->pdev->dev,le64_to_cpu(desc->addr),
1164                                         le32_to_cpu(desc->opts1) & 0xffff,
1165                                         PCI_DMA_TODEVICE);
1166                        if (le32_to_cpu(desc->opts1) & LastFrag)
1167                                dev_kfree_skb(skb);
1168                        cp->dev->stats.tx_dropped++;
1169                }
1170        }
1171        netdev_reset_queue(cp->dev);
1172
1173        memset(cp->rx_ring, 0, sizeof(struct cp_desc) * CP_RX_RING_SIZE);
1174        memset(cp->tx_ring, 0, sizeof(struct cp_desc) * CP_TX_RING_SIZE);
1175
1176        memset(cp->rx_skb, 0, sizeof(struct sk_buff *) * CP_RX_RING_SIZE);
1177        memset(cp->tx_skb, 0, sizeof(struct sk_buff *) * CP_TX_RING_SIZE);
1178}
1179
1180static void cp_free_rings (struct cp_private *cp)
1181{
1182        cp_clean_rings(cp);
1183        dma_free_coherent(&cp->pdev->dev, CP_RING_BYTES, cp->rx_ring,
1184                          cp->ring_dma);
1185        cp->rx_ring = NULL;
1186        cp->tx_ring = NULL;
1187}
1188
1189static int cp_open (struct net_device *dev)
1190{
1191        struct cp_private *cp = netdev_priv(dev);
1192        const int irq = cp->pdev->irq;
1193        int rc;
1194
1195        netif_dbg(cp, ifup, dev, "enabling interface\n");
1196
1197        rc = cp_alloc_rings(cp);
1198        if (rc)
1199                return rc;
1200
1201        napi_enable(&cp->napi);
1202
1203        cp_init_hw(cp);
1204
1205        rc = request_irq(irq, cp_interrupt, IRQF_SHARED, dev->name, dev);
1206        if (rc)
1207                goto err_out_hw;
1208
1209        cp_enable_irq(cp);
1210
1211        netif_carrier_off(dev);
1212        mii_check_media(&cp->mii_if, netif_msg_link(cp), true);
1213        netif_start_queue(dev);
1214
1215        return 0;
1216
1217err_out_hw:
1218        napi_disable(&cp->napi);
1219        cp_stop_hw(cp);
1220        cp_free_rings(cp);
1221        return rc;
1222}
1223
1224static int cp_close (struct net_device *dev)
1225{
1226        struct cp_private *cp = netdev_priv(dev);
1227        unsigned long flags;
1228
1229        napi_disable(&cp->napi);
1230
1231        netif_dbg(cp, ifdown, dev, "disabling interface\n");
1232
1233        spin_lock_irqsave(&cp->lock, flags);
1234
1235        netif_stop_queue(dev);
1236        netif_carrier_off(dev);
1237
1238        cp_stop_hw(cp);
1239
1240        spin_unlock_irqrestore(&cp->lock, flags);
1241
1242        free_irq(cp->pdev->irq, dev);
1243
1244        cp_free_rings(cp);
1245        return 0;
1246}
1247
1248static void cp_tx_timeout(struct net_device *dev)
1249{
1250        struct cp_private *cp = netdev_priv(dev);
1251        unsigned long flags;
1252        int rc;
1253
1254        netdev_warn(dev, "Transmit timeout, status %2x %4x %4x %4x\n",
1255                    cpr8(Cmd), cpr16(CpCmd),
1256                    cpr16(IntrStatus), cpr16(IntrMask));
1257
1258        spin_lock_irqsave(&cp->lock, flags);
1259
1260        cp_stop_hw(cp);
1261        cp_clean_rings(cp);
1262        rc = cp_init_rings(cp);
1263        cp_start_hw(cp);
1264        cp_enable_irq(cp);
1265
1266        netif_wake_queue(dev);
1267
1268        spin_unlock_irqrestore(&cp->lock, flags);
1269}
1270
1271static int cp_change_mtu(struct net_device *dev, int new_mtu)
1272{
1273        struct cp_private *cp = netdev_priv(dev);
1274
1275        /* check for invalid MTU, according to hardware limits */
1276        if (new_mtu < CP_MIN_MTU || new_mtu > CP_MAX_MTU)
1277                return -EINVAL;
1278
1279        /* if network interface not up, no need for complexity */
1280        if (!netif_running(dev)) {
1281                dev->mtu = new_mtu;
1282                cp_set_rxbufsize(cp);   /* set new rx buf size */
1283                return 0;
1284        }
1285
1286        /* network IS up, close it, reset MTU, and come up again. */
1287        cp_close(dev);
1288        dev->mtu = new_mtu;
1289        cp_set_rxbufsize(cp);
1290        return cp_open(dev);
1291}
1292
1293static const char mii_2_8139_map[8] = {
1294        BasicModeCtrl,
1295        BasicModeStatus,
1296        0,
1297        0,
1298        NWayAdvert,
1299        NWayLPAR,
1300        NWayExpansion,
1301        0
1302};
1303
1304static int mdio_read(struct net_device *dev, int phy_id, int location)
1305{
1306        struct cp_private *cp = netdev_priv(dev);
1307
1308        return location < 8 && mii_2_8139_map[location] ?
1309               readw(cp->regs + mii_2_8139_map[location]) : 0;
1310}
1311
1312
1313static void mdio_write(struct net_device *dev, int phy_id, int location,
1314                       int value)
1315{
1316        struct cp_private *cp = netdev_priv(dev);
1317
1318        if (location == 0) {
1319                cpw8(Cfg9346, Cfg9346_Unlock);
1320                cpw16(BasicModeCtrl, value);
1321                cpw8(Cfg9346, Cfg9346_Lock);
1322        } else if (location < 8 && mii_2_8139_map[location])
1323                cpw16(mii_2_8139_map[location], value);
1324}
1325
1326/* Set the ethtool Wake-on-LAN settings */
1327static int netdev_set_wol (struct cp_private *cp,
1328                           const struct ethtool_wolinfo *wol)
1329{
1330        u8 options;
1331
1332        options = cpr8 (Config3) & ~(LinkUp | MagicPacket);
1333        /* If WOL is being disabled, no need for complexity */
1334        if (wol->wolopts) {
1335                if (wol->wolopts & WAKE_PHY)    options |= LinkUp;
1336                if (wol->wolopts & WAKE_MAGIC)  options |= MagicPacket;
1337        }
1338
1339        cpw8 (Cfg9346, Cfg9346_Unlock);
1340        cpw8 (Config3, options);
1341        cpw8 (Cfg9346, Cfg9346_Lock);
1342
1343        options = 0; /* Paranoia setting */
1344        options = cpr8 (Config5) & ~(UWF | MWF | BWF);
1345        /* If WOL is being disabled, no need for complexity */
1346        if (wol->wolopts) {
1347                if (wol->wolopts & WAKE_UCAST)  options |= UWF;
1348                if (wol->wolopts & WAKE_BCAST)  options |= BWF;
1349                if (wol->wolopts & WAKE_MCAST)  options |= MWF;
1350        }
1351
1352        cpw8 (Config5, options);
1353
1354        cp->wol_enabled = (wol->wolopts) ? 1 : 0;
1355
1356        return 0;
1357}
1358
1359/* Get the ethtool Wake-on-LAN settings */
1360static void netdev_get_wol (struct cp_private *cp,
1361                     struct ethtool_wolinfo *wol)
1362{
1363        u8 options;
1364
1365        wol->wolopts   = 0; /* Start from scratch */
1366        wol->supported = WAKE_PHY   | WAKE_BCAST | WAKE_MAGIC |
1367                         WAKE_MCAST | WAKE_UCAST;
1368        /* We don't need to go on if WOL is disabled */
1369        if (!cp->wol_enabled) return;
1370
1371        options        = cpr8 (Config3);
1372        if (options & LinkUp)        wol->wolopts |= WAKE_PHY;
1373        if (options & MagicPacket)   wol->wolopts |= WAKE_MAGIC;
1374
1375        options        = 0; /* Paranoia setting */
1376        options        = cpr8 (Config5);
1377        if (options & UWF)           wol->wolopts |= WAKE_UCAST;
1378        if (options & BWF)           wol->wolopts |= WAKE_BCAST;
1379        if (options & MWF)           wol->wolopts |= WAKE_MCAST;
1380}
1381
1382static void cp_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *info)
1383{
1384        struct cp_private *cp = netdev_priv(dev);
1385
1386        strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
1387        strlcpy(info->version, DRV_VERSION, sizeof(info->version));
1388        strlcpy(info->bus_info, pci_name(cp->pdev), sizeof(info->bus_info));
1389}
1390
1391static void cp_get_ringparam(struct net_device *dev,
1392                                struct ethtool_ringparam *ring)
1393{
1394        ring->rx_max_pending = CP_RX_RING_SIZE;
1395        ring->tx_max_pending = CP_TX_RING_SIZE;
1396        ring->rx_pending = CP_RX_RING_SIZE;
1397        ring->tx_pending = CP_TX_RING_SIZE;
1398}
1399
1400static int cp_get_regs_len(struct net_device *dev)
1401{
1402        return CP_REGS_SIZE;
1403}
1404
1405static int cp_get_sset_count (struct net_device *dev, int sset)
1406{
1407        switch (sset) {
1408        case ETH_SS_STATS:
1409                return CP_NUM_STATS;
1410        default:
1411                return -EOPNOTSUPP;
1412        }
1413}
1414
1415static int cp_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1416{
1417        struct cp_private *cp = netdev_priv(dev);
1418        int rc;
1419        unsigned long flags;
1420
1421        spin_lock_irqsave(&cp->lock, flags);
1422        rc = mii_ethtool_gset(&cp->mii_if, cmd);
1423        spin_unlock_irqrestore(&cp->lock, flags);
1424
1425        return rc;
1426}
1427
1428static int cp_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1429{
1430        struct cp_private *cp = netdev_priv(dev);
1431        int rc;
1432        unsigned long flags;
1433
1434        spin_lock_irqsave(&cp->lock, flags);
1435        rc = mii_ethtool_sset(&cp->mii_if, cmd);
1436        spin_unlock_irqrestore(&cp->lock, flags);
1437
1438        return rc;
1439}
1440
1441static int cp_nway_reset(struct net_device *dev)
1442{
1443        struct cp_private *cp = netdev_priv(dev);
1444        return mii_nway_restart(&cp->mii_if);
1445}
1446
1447static u32 cp_get_msglevel(struct net_device *dev)
1448{
1449        struct cp_private *cp = netdev_priv(dev);
1450        return cp->msg_enable;
1451}
1452
1453static void cp_set_msglevel(struct net_device *dev, u32 value)
1454{
1455        struct cp_private *cp = netdev_priv(dev);
1456        cp->msg_enable = value;
1457}
1458
1459static int cp_set_features(struct net_device *dev, netdev_features_t features)
1460{
1461        struct cp_private *cp = netdev_priv(dev);
1462        unsigned long flags;
1463
1464        if (!((dev->features ^ features) & NETIF_F_RXCSUM))
1465                return 0;
1466
1467        spin_lock_irqsave(&cp->lock, flags);
1468
1469        if (features & NETIF_F_RXCSUM)
1470                cp->cpcmd |= RxChkSum;
1471        else
1472                cp->cpcmd &= ~RxChkSum;
1473
1474        if (features & NETIF_F_HW_VLAN_CTAG_RX)
1475                cp->cpcmd |= RxVlanOn;
1476        else
1477                cp->cpcmd &= ~RxVlanOn;
1478
1479        cpw16_f(CpCmd, cp->cpcmd);
1480        spin_unlock_irqrestore(&cp->lock, flags);
1481
1482        return 0;
1483}
1484
1485static void cp_get_regs(struct net_device *dev, struct ethtool_regs *regs,
1486                        void *p)
1487{
1488        struct cp_private *cp = netdev_priv(dev);
1489        unsigned long flags;
1490
1491        if (regs->len < CP_REGS_SIZE)
1492                return /* -EINVAL */;
1493
1494        regs->version = CP_REGS_VER;
1495
1496        spin_lock_irqsave(&cp->lock, flags);
1497        memcpy_fromio(p, cp->regs, CP_REGS_SIZE);
1498        spin_unlock_irqrestore(&cp->lock, flags);
1499}
1500
1501static void cp_get_wol (struct net_device *dev, struct ethtool_wolinfo *wol)
1502{
1503        struct cp_private *cp = netdev_priv(dev);
1504        unsigned long flags;
1505
1506        spin_lock_irqsave (&cp->lock, flags);
1507        netdev_get_wol (cp, wol);
1508        spin_unlock_irqrestore (&cp->lock, flags);
1509}
1510
1511static int cp_set_wol (struct net_device *dev, struct ethtool_wolinfo *wol)
1512{
1513        struct cp_private *cp = netdev_priv(dev);
1514        unsigned long flags;
1515        int rc;
1516
1517        spin_lock_irqsave (&cp->lock, flags);
1518        rc = netdev_set_wol (cp, wol);
1519        spin_unlock_irqrestore (&cp->lock, flags);
1520
1521        return rc;
1522}
1523
1524static void cp_get_strings (struct net_device *dev, u32 stringset, u8 *buf)
1525{
1526        switch (stringset) {
1527        case ETH_SS_STATS:
1528                memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
1529                break;
1530        default:
1531                BUG();
1532                break;
1533        }
1534}
1535
1536static void cp_get_ethtool_stats (struct net_device *dev,
1537                                  struct ethtool_stats *estats, u64 *tmp_stats)
1538{
1539        struct cp_private *cp = netdev_priv(dev);
1540        struct cp_dma_stats *nic_stats;
1541        dma_addr_t dma;
1542        int i;
1543
1544        nic_stats = dma_alloc_coherent(&cp->pdev->dev, sizeof(*nic_stats),
1545                                       &dma, GFP_KERNEL);
1546        if (!nic_stats)
1547                return;
1548
1549        /* begin NIC statistics dump */
1550        cpw32(StatsAddr + 4, (u64)dma >> 32);
1551        cpw32(StatsAddr, ((u64)dma & DMA_BIT_MASK(32)) | DumpStats);
1552        cpr32(StatsAddr);
1553
1554        for (i = 0; i < 1000; i++) {
1555                if ((cpr32(StatsAddr) & DumpStats) == 0)
1556                        break;
1557                udelay(10);
1558        }
1559        cpw32(StatsAddr, 0);
1560        cpw32(StatsAddr + 4, 0);
1561        cpr32(StatsAddr);
1562
1563        i = 0;
1564        tmp_stats[i++] = le64_to_cpu(nic_stats->tx_ok);
1565        tmp_stats[i++] = le64_to_cpu(nic_stats->rx_ok);
1566        tmp_stats[i++] = le64_to_cpu(nic_stats->tx_err);
1567        tmp_stats[i++] = le32_to_cpu(nic_stats->rx_err);
1568        tmp_stats[i++] = le16_to_cpu(nic_stats->rx_fifo);
1569        tmp_stats[i++] = le16_to_cpu(nic_stats->frame_align);
1570        tmp_stats[i++] = le32_to_cpu(nic_stats->tx_ok_1col);
1571        tmp_stats[i++] = le32_to_cpu(nic_stats->tx_ok_mcol);
1572        tmp_stats[i++] = le64_to_cpu(nic_stats->rx_ok_phys);
1573        tmp_stats[i++] = le64_to_cpu(nic_stats->rx_ok_bcast);
1574        tmp_stats[i++] = le32_to_cpu(nic_stats->rx_ok_mcast);
1575        tmp_stats[i++] = le16_to_cpu(nic_stats->tx_abort);
1576        tmp_stats[i++] = le16_to_cpu(nic_stats->tx_underrun);
1577        tmp_stats[i++] = cp->cp_stats.rx_frags;
1578        BUG_ON(i != CP_NUM_STATS);
1579
1580        dma_free_coherent(&cp->pdev->dev, sizeof(*nic_stats), nic_stats, dma);
1581}
1582
1583static const struct ethtool_ops cp_ethtool_ops = {
1584        .get_drvinfo            = cp_get_drvinfo,
1585        .get_regs_len           = cp_get_regs_len,
1586        .get_sset_count         = cp_get_sset_count,
1587        .get_settings           = cp_get_settings,
1588        .set_settings           = cp_set_settings,
1589        .nway_reset             = cp_nway_reset,
1590        .get_link               = ethtool_op_get_link,
1591        .get_msglevel           = cp_get_msglevel,
1592        .set_msglevel           = cp_set_msglevel,
1593        .get_regs               = cp_get_regs,
1594        .get_wol                = cp_get_wol,
1595        .set_wol                = cp_set_wol,
1596        .get_strings            = cp_get_strings,
1597        .get_ethtool_stats      = cp_get_ethtool_stats,
1598        .get_eeprom_len         = cp_get_eeprom_len,
1599        .get_eeprom             = cp_get_eeprom,
1600        .set_eeprom             = cp_set_eeprom,
1601        .get_ringparam          = cp_get_ringparam,
1602};
1603
1604static int cp_ioctl (struct net_device *dev, struct ifreq *rq, int cmd)
1605{
1606        struct cp_private *cp = netdev_priv(dev);
1607        int rc;
1608        unsigned long flags;
1609
1610        if (!netif_running(dev))
1611                return -EINVAL;
1612
1613        spin_lock_irqsave(&cp->lock, flags);
1614        rc = generic_mii_ioctl(&cp->mii_if, if_mii(rq), cmd, NULL);
1615        spin_unlock_irqrestore(&cp->lock, flags);
1616        return rc;
1617}
1618
1619static int cp_set_mac_address(struct net_device *dev, void *p)
1620{
1621        struct cp_private *cp = netdev_priv(dev);
1622        struct sockaddr *addr = p;
1623
1624        if (!is_valid_ether_addr(addr->sa_data))
1625                return -EADDRNOTAVAIL;
1626
1627        memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
1628
1629        spin_lock_irq(&cp->lock);
1630
1631        cpw8_f(Cfg9346, Cfg9346_Unlock);
1632        cpw32_f(MAC0 + 0, le32_to_cpu (*(__le32 *) (dev->dev_addr + 0)));
1633        cpw32_f(MAC0 + 4, le32_to_cpu (*(__le32 *) (dev->dev_addr + 4)));
1634        cpw8_f(Cfg9346, Cfg9346_Lock);
1635
1636        spin_unlock_irq(&cp->lock);
1637
1638        return 0;
1639}
1640
1641/* Serial EEPROM section. */
1642
1643/*  EEPROM_Ctrl bits. */
1644#define EE_SHIFT_CLK    0x04    /* EEPROM shift clock. */
1645#define EE_CS                   0x08    /* EEPROM chip select. */
1646#define EE_DATA_WRITE   0x02    /* EEPROM chip data in. */
1647#define EE_WRITE_0              0x00
1648#define EE_WRITE_1              0x02
1649#define EE_DATA_READ    0x01    /* EEPROM chip data out. */
1650#define EE_ENB                  (0x80 | EE_CS)
1651
1652/* Delay between EEPROM clock transitions.
1653   No extra delay is needed with 33Mhz PCI, but 66Mhz may change this.
1654 */
1655
1656#define eeprom_delay()  readb(ee_addr)
1657
1658/* The EEPROM commands include the alway-set leading bit. */
1659#define EE_EXTEND_CMD   (4)
1660#define EE_WRITE_CMD    (5)
1661#define EE_READ_CMD             (6)
1662#define EE_ERASE_CMD    (7)
1663
1664#define EE_EWDS_ADDR    (0)
1665#define EE_WRAL_ADDR    (1)
1666#define EE_ERAL_ADDR    (2)
1667#define EE_EWEN_ADDR    (3)
1668
1669#define CP_EEPROM_MAGIC PCI_DEVICE_ID_REALTEK_8139
1670
1671static void eeprom_cmd_start(void __iomem *ee_addr)
1672{
1673        writeb (EE_ENB & ~EE_CS, ee_addr);
1674        writeb (EE_ENB, ee_addr);
1675        eeprom_delay ();
1676}
1677
1678static void eeprom_cmd(void __iomem *ee_addr, int cmd, int cmd_len)
1679{
1680        int i;
1681
1682        /* Shift the command bits out. */
1683        for (i = cmd_len - 1; i >= 0; i--) {
1684                int dataval = (cmd & (1 << i)) ? EE_DATA_WRITE : 0;
1685                writeb (EE_ENB | dataval, ee_addr);
1686                eeprom_delay ();
1687                writeb (EE_ENB | dataval | EE_SHIFT_CLK, ee_addr);
1688                eeprom_delay ();
1689        }
1690        writeb (EE_ENB, ee_addr);
1691        eeprom_delay ();
1692}
1693
1694static void eeprom_cmd_end(void __iomem *ee_addr)
1695{
1696        writeb(0, ee_addr);
1697        eeprom_delay ();
1698}
1699
1700static void eeprom_extend_cmd(void __iomem *ee_addr, int extend_cmd,
1701                              int addr_len)
1702{
1703        int cmd = (EE_EXTEND_CMD << addr_len) | (extend_cmd << (addr_len - 2));
1704
1705        eeprom_cmd_start(ee_addr);
1706        eeprom_cmd(ee_addr, cmd, 3 + addr_len);
1707        eeprom_cmd_end(ee_addr);
1708}
1709
1710static u16 read_eeprom (void __iomem *ioaddr, int location, int addr_len)
1711{
1712        int i;
1713        u16 retval = 0;
1714        void __iomem *ee_addr = ioaddr + Cfg9346;
1715        int read_cmd = location | (EE_READ_CMD << addr_len);
1716
1717        eeprom_cmd_start(ee_addr);
1718        eeprom_cmd(ee_addr, read_cmd, 3 + addr_len);
1719
1720        for (i = 16; i > 0; i--) {
1721                writeb (EE_ENB | EE_SHIFT_CLK, ee_addr);
1722                eeprom_delay ();
1723                retval =
1724                    (retval << 1) | ((readb (ee_addr) & EE_DATA_READ) ? 1 :
1725                                     0);
1726                writeb (EE_ENB, ee_addr);
1727                eeprom_delay ();
1728        }
1729
1730        eeprom_cmd_end(ee_addr);
1731
1732        return retval;
1733}
1734
1735static void write_eeprom(void __iomem *ioaddr, int location, u16 val,
1736                         int addr_len)
1737{
1738        int i;
1739        void __iomem *ee_addr = ioaddr + Cfg9346;
1740        int write_cmd = location | (EE_WRITE_CMD << addr_len);
1741
1742        eeprom_extend_cmd(ee_addr, EE_EWEN_ADDR, addr_len);
1743
1744        eeprom_cmd_start(ee_addr);
1745        eeprom_cmd(ee_addr, write_cmd, 3 + addr_len);
1746        eeprom_cmd(ee_addr, val, 16);
1747        eeprom_cmd_end(ee_addr);
1748
1749        eeprom_cmd_start(ee_addr);
1750        for (i = 0; i < 20000; i++)
1751                if (readb(ee_addr) & EE_DATA_READ)
1752                        break;
1753        eeprom_cmd_end(ee_addr);
1754
1755        eeprom_extend_cmd(ee_addr, EE_EWDS_ADDR, addr_len);
1756}
1757
1758static int cp_get_eeprom_len(struct net_device *dev)
1759{
1760        struct cp_private *cp = netdev_priv(dev);
1761        int size;
1762
1763        spin_lock_irq(&cp->lock);
1764        size = read_eeprom(cp->regs, 0, 8) == 0x8129 ? 256 : 128;
1765        spin_unlock_irq(&cp->lock);
1766
1767        return size;
1768}
1769
1770static int cp_get_eeprom(struct net_device *dev,
1771                         struct ethtool_eeprom *eeprom, u8 *data)
1772{
1773        struct cp_private *cp = netdev_priv(dev);
1774        unsigned int addr_len;
1775        u16 val;
1776        u32 offset = eeprom->offset >> 1;
1777        u32 len = eeprom->len;
1778        u32 i = 0;
1779
1780        eeprom->magic = CP_EEPROM_MAGIC;
1781
1782        spin_lock_irq(&cp->lock);
1783
1784        addr_len = read_eeprom(cp->regs, 0, 8) == 0x8129 ? 8 : 6;
1785
1786        if (eeprom->offset & 1) {
1787                val = read_eeprom(cp->regs, offset, addr_len);
1788                data[i++] = (u8)(val >> 8);
1789                offset++;
1790        }
1791
1792        while (i < len - 1) {
1793                val = read_eeprom(cp->regs, offset, addr_len);
1794                data[i++] = (u8)val;
1795                data[i++] = (u8)(val >> 8);
1796                offset++;
1797        }
1798
1799        if (i < len) {
1800                val = read_eeprom(cp->regs, offset, addr_len);
1801                data[i] = (u8)val;
1802        }
1803
1804        spin_unlock_irq(&cp->lock);
1805        return 0;
1806}
1807
1808static int cp_set_eeprom(struct net_device *dev,
1809                         struct ethtool_eeprom *eeprom, u8 *data)
1810{
1811        struct cp_private *cp = netdev_priv(dev);
1812        unsigned int addr_len;
1813        u16 val;
1814        u32 offset = eeprom->offset >> 1;
1815        u32 len = eeprom->len;
1816        u32 i = 0;
1817
1818        if (eeprom->magic != CP_EEPROM_MAGIC)
1819                return -EINVAL;
1820
1821        spin_lock_irq(&cp->lock);
1822
1823        addr_len = read_eeprom(cp->regs, 0, 8) == 0x8129 ? 8 : 6;
1824
1825        if (eeprom->offset & 1) {
1826                val = read_eeprom(cp->regs, offset, addr_len) & 0xff;
1827                val |= (u16)data[i++] << 8;
1828                write_eeprom(cp->regs, offset, val, addr_len);
1829                offset++;
1830        }
1831
1832        while (i < len - 1) {
1833                val = (u16)data[i++];
1834                val |= (u16)data[i++] << 8;
1835                write_eeprom(cp->regs, offset, val, addr_len);
1836                offset++;
1837        }
1838
1839        if (i < len) {
1840                val = read_eeprom(cp->regs, offset, addr_len) & 0xff00;
1841                val |= (u16)data[i];
1842                write_eeprom(cp->regs, offset, val, addr_len);
1843        }
1844
1845        spin_unlock_irq(&cp->lock);
1846        return 0;
1847}
1848
1849/* Put the board into D3cold state and wait for WakeUp signal */
1850static void cp_set_d3_state (struct cp_private *cp)
1851{
1852        pci_enable_wake(cp->pdev, PCI_D0, 1); /* Enable PME# generation */
1853        pci_set_power_state (cp->pdev, PCI_D3hot);
1854}
1855
1856static const struct net_device_ops cp_netdev_ops = {
1857        .ndo_open               = cp_open,
1858        .ndo_stop               = cp_close,
1859        .ndo_validate_addr      = eth_validate_addr,
1860        .ndo_set_mac_address    = cp_set_mac_address,
1861        .ndo_set_rx_mode        = cp_set_rx_mode,
1862        .ndo_get_stats          = cp_get_stats,
1863        .ndo_do_ioctl           = cp_ioctl,
1864        .ndo_start_xmit         = cp_start_xmit,
1865        .ndo_tx_timeout         = cp_tx_timeout,
1866        .ndo_set_features       = cp_set_features,
1867        .ndo_change_mtu         = cp_change_mtu,
1868
1869#ifdef CONFIG_NET_POLL_CONTROLLER
1870        .ndo_poll_controller    = cp_poll_controller,
1871#endif
1872};
1873
1874static int cp_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
1875{
1876        struct net_device *dev;
1877        struct cp_private *cp;
1878        int rc;
1879        void __iomem *regs;
1880        resource_size_t pciaddr;
1881        unsigned int addr_len, i, pci_using_dac;
1882
1883        pr_info_once("%s", version);
1884
1885        if (pdev->vendor == PCI_VENDOR_ID_REALTEK &&
1886            pdev->device == PCI_DEVICE_ID_REALTEK_8139 && pdev->revision < 0x20) {
1887                dev_info(&pdev->dev,
1888                         "This (id %04x:%04x rev %02x) is not an 8139C+ compatible chip, use 8139too\n",
1889                         pdev->vendor, pdev->device, pdev->revision);
1890                return -ENODEV;
1891        }
1892
1893        dev = alloc_etherdev(sizeof(struct cp_private));
1894        if (!dev)
1895                return -ENOMEM;
1896        SET_NETDEV_DEV(dev, &pdev->dev);
1897
1898        cp = netdev_priv(dev);
1899        cp->pdev = pdev;
1900        cp->dev = dev;
1901        cp->msg_enable = (debug < 0 ? CP_DEF_MSG_ENABLE : debug);
1902        spin_lock_init (&cp->lock);
1903        cp->mii_if.dev = dev;
1904        cp->mii_if.mdio_read = mdio_read;
1905        cp->mii_if.mdio_write = mdio_write;
1906        cp->mii_if.phy_id = CP_INTERNAL_PHY;
1907        cp->mii_if.phy_id_mask = 0x1f;
1908        cp->mii_if.reg_num_mask = 0x1f;
1909        cp_set_rxbufsize(cp);
1910
1911        rc = pci_enable_device(pdev);
1912        if (rc)
1913                goto err_out_free;
1914
1915        rc = pci_set_mwi(pdev);
1916        if (rc)
1917                goto err_out_disable;
1918
1919        rc = pci_request_regions(pdev, DRV_NAME);
1920        if (rc)
1921                goto err_out_mwi;
1922
1923        pciaddr = pci_resource_start(pdev, 1);
1924        if (!pciaddr) {
1925                rc = -EIO;
1926                dev_err(&pdev->dev, "no MMIO resource\n");
1927                goto err_out_res;
1928        }
1929        if (pci_resource_len(pdev, 1) < CP_REGS_SIZE) {
1930                rc = -EIO;
1931                dev_err(&pdev->dev, "MMIO resource (%llx) too small\n",
1932                       (unsigned long long)pci_resource_len(pdev, 1));
1933                goto err_out_res;
1934        }
1935
1936        /* Configure DMA attributes. */
1937        if ((sizeof(dma_addr_t) > 4) &&
1938            !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) &&
1939            !pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
1940                pci_using_dac = 1;
1941        } else {
1942                pci_using_dac = 0;
1943
1944                rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
1945                if (rc) {
1946                        dev_err(&pdev->dev,
1947                                "No usable DMA configuration, aborting\n");
1948                        goto err_out_res;
1949                }
1950                rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
1951                if (rc) {
1952                        dev_err(&pdev->dev,
1953                                "No usable consistent DMA configuration, aborting\n");
1954                        goto err_out_res;
1955                }
1956        }
1957
1958        cp->cpcmd = (pci_using_dac ? PCIDAC : 0) |
1959                    PCIMulRW | RxChkSum | CpRxOn | CpTxOn;
1960
1961        dev->features |= NETIF_F_RXCSUM;
1962        dev->hw_features |= NETIF_F_RXCSUM;
1963
1964        regs = ioremap(pciaddr, CP_REGS_SIZE);
1965        if (!regs) {
1966                rc = -EIO;
1967                dev_err(&pdev->dev, "Cannot map PCI MMIO (%Lx@%Lx)\n",
1968                        (unsigned long long)pci_resource_len(pdev, 1),
1969                       (unsigned long long)pciaddr);
1970                goto err_out_res;
1971        }
1972        cp->regs = regs;
1973
1974        cp_stop_hw(cp);
1975
1976        /* read MAC address from EEPROM */
1977        addr_len = read_eeprom (regs, 0, 8) == 0x8129 ? 8 : 6;
1978        for (i = 0; i < 3; i++)
1979                ((__le16 *) (dev->dev_addr))[i] =
1980                    cpu_to_le16(read_eeprom (regs, i + 7, addr_len));
1981
1982        dev->netdev_ops = &cp_netdev_ops;
1983        netif_napi_add(dev, &cp->napi, cp_rx_poll, 16);
1984        dev->ethtool_ops = &cp_ethtool_ops;
1985        dev->watchdog_timeo = TX_TIMEOUT;
1986
1987        dev->features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
1988
1989        if (pci_using_dac)
1990                dev->features |= NETIF_F_HIGHDMA;
1991
1992        /* disabled by default until verified */
1993        dev->hw_features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO |
1994                NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
1995        dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO |
1996                NETIF_F_HIGHDMA;
1997
1998        rc = register_netdev(dev);
1999        if (rc)
2000                goto err_out_iomap;
2001
2002        netdev_info(dev, "RTL-8139C+ at 0x%p, %pM, IRQ %d\n",
2003                    regs, dev->dev_addr, pdev->irq);
2004
2005        pci_set_drvdata(pdev, dev);
2006
2007        /* enable busmastering and memory-write-invalidate */
2008        pci_set_master(pdev);
2009
2010        if (cp->wol_enabled)
2011                cp_set_d3_state (cp);
2012
2013        return 0;
2014
2015err_out_iomap:
2016        iounmap(regs);
2017err_out_res:
2018        pci_release_regions(pdev);
2019err_out_mwi:
2020        pci_clear_mwi(pdev);
2021err_out_disable:
2022        pci_disable_device(pdev);
2023err_out_free:
2024        free_netdev(dev);
2025        return rc;
2026}
2027
2028static void cp_remove_one (struct pci_dev *pdev)
2029{
2030        struct net_device *dev = pci_get_drvdata(pdev);
2031        struct cp_private *cp = netdev_priv(dev);
2032
2033        unregister_netdev(dev);
2034        iounmap(cp->regs);
2035        if (cp->wol_enabled)
2036                pci_set_power_state (pdev, PCI_D0);
2037        pci_release_regions(pdev);
2038        pci_clear_mwi(pdev);
2039        pci_disable_device(pdev);
2040        free_netdev(dev);
2041}
2042
2043#ifdef CONFIG_PM
2044static int cp_suspend (struct pci_dev *pdev, pm_message_t state)
2045{
2046        struct net_device *dev = pci_get_drvdata(pdev);
2047        struct cp_private *cp = netdev_priv(dev);
2048        unsigned long flags;
2049
2050        if (!netif_running(dev))
2051                return 0;
2052
2053        netif_device_detach (dev);
2054        netif_stop_queue (dev);
2055
2056        spin_lock_irqsave (&cp->lock, flags);
2057
2058        /* Disable Rx and Tx */
2059        cpw16 (IntrMask, 0);
2060        cpw8  (Cmd, cpr8 (Cmd) & (~RxOn | ~TxOn));
2061
2062        spin_unlock_irqrestore (&cp->lock, flags);
2063
2064        pci_save_state(pdev);
2065        pci_enable_wake(pdev, pci_choose_state(pdev, state), cp->wol_enabled);
2066        pci_set_power_state(pdev, pci_choose_state(pdev, state));
2067
2068        return 0;
2069}
2070
2071static int cp_resume (struct pci_dev *pdev)
2072{
2073        struct net_device *dev = pci_get_drvdata (pdev);
2074        struct cp_private *cp = netdev_priv(dev);
2075        unsigned long flags;
2076
2077        if (!netif_running(dev))
2078                return 0;
2079
2080        netif_device_attach (dev);
2081
2082        pci_set_power_state(pdev, PCI_D0);
2083        pci_restore_state(pdev);
2084        pci_enable_wake(pdev, PCI_D0, 0);
2085
2086        /* FIXME: sh*t may happen if the Rx ring buffer is depleted */
2087        cp_init_rings_index (cp);
2088        cp_init_hw (cp);
2089        cp_enable_irq(cp);
2090        netif_start_queue (dev);
2091
2092        spin_lock_irqsave (&cp->lock, flags);
2093
2094        mii_check_media(&cp->mii_if, netif_msg_link(cp), false);
2095
2096        spin_unlock_irqrestore (&cp->lock, flags);
2097
2098        return 0;
2099}
2100#endif /* CONFIG_PM */
2101
2102static const struct pci_device_id cp_pci_tbl[] = {
2103        { PCI_DEVICE(PCI_VENDOR_ID_REALTEK,     PCI_DEVICE_ID_REALTEK_8139), },
2104        { PCI_DEVICE(PCI_VENDOR_ID_TTTECH,      PCI_DEVICE_ID_TTTECH_MC322), },
2105        { },
2106};
2107MODULE_DEVICE_TABLE(pci, cp_pci_tbl);
2108
2109static struct pci_driver cp_driver = {
2110        .name         = DRV_NAME,
2111        .id_table     = cp_pci_tbl,
2112        .probe        = cp_init_one,
2113        .remove       = cp_remove_one,
2114#ifdef CONFIG_PM
2115        .resume       = cp_resume,
2116        .suspend      = cp_suspend,
2117#endif
2118};
2119
2120module_pci_driver(cp_driver);
2121