linux/drivers/net/8139cp.c
<<
>>
Prefs
   1/* 8139cp.c: A Linux PCI Ethernet driver for the RealTek 8139C+ chips. */
   2/*
   3        Copyright 2001-2004 Jeff Garzik <jgarzik@pobox.com>
   4
   5        Copyright (C) 2001, 2002 David S. Miller (davem@redhat.com) [tg3.c]
   6        Copyright (C) 2000, 2001 David S. Miller (davem@redhat.com) [sungem.c]
   7        Copyright 2001 Manfred Spraul                               [natsemi.c]
   8        Copyright 1999-2001 by Donald Becker.                       [natsemi.c]
   9        Written 1997-2001 by Donald Becker.                         [8139too.c]
  10        Copyright 1998-2001 by Jes Sorensen, <jes@trained-monkey.org>. [acenic.c]
  11
  12        This software may be used and distributed according to the terms of
  13        the GNU General Public License (GPL), incorporated herein by reference.
  14        Drivers based on or derived from this code fall under the GPL and must
  15        retain the authorship, copyright and license notice.  This file is not
  16        a complete program and may only be used when the entire operating
  17        system is licensed under the GPL.
  18
  19        See the file COPYING in this distribution for more information.
  20
  21        Contributors:
  22
  23                Wake-on-LAN support - Felipe Damasio <felipewd@terra.com.br>
  24                PCI suspend/resume  - Felipe Damasio <felipewd@terra.com.br>
  25                LinkChg interrupt   - Felipe Damasio <felipewd@terra.com.br>
  26
  27        TODO:
  28        * Test Tx checksumming thoroughly
  29
  30        Low priority TODO:
  31        * Complete reset on PciErr
  32        * Consider Rx interrupt mitigation using TimerIntr
  33        * Investigate using skb->priority with h/w VLAN priority
  34        * Investigate using High Priority Tx Queue with skb->priority
  35        * Adjust Rx FIFO threshold and Max Rx DMA burst on Rx FIFO error
  36        * Adjust Tx FIFO threshold and Max Tx DMA burst on Tx FIFO error
  37        * Implement Tx software interrupt mitigation via
  38          Tx descriptor bit
  39        * The real minimum of CP_MIN_MTU is 4 bytes.  However,
  40          for this to be supported, one must(?) turn on packet padding.
  41        * Support external MII transceivers (patch available)
  42
  43        NOTES:
  44        * TX checksumming is considered experimental.  It is off by
  45          default, use ethtool to turn it on.
  46
  47 */
  48
  49#define DRV_NAME                "8139cp"
  50#define DRV_VERSION             "1.3"
  51#define DRV_RELDATE             "Mar 22, 2004"
  52
  53
  54#include <linux/module.h>
  55#include <linux/moduleparam.h>
  56#include <linux/kernel.h>
  57#include <linux/compiler.h>
  58#include <linux/netdevice.h>
  59#include <linux/etherdevice.h>
  60#include <linux/init.h>
  61#include <linux/pci.h>
  62#include <linux/dma-mapping.h>
  63#include <linux/delay.h>
  64#include <linux/ethtool.h>
  65#include <linux/mii.h>
  66#include <linux/if_vlan.h>
  67#include <linux/crc32.h>
  68#include <linux/in.h>
  69#include <linux/ip.h>
  70#include <linux/tcp.h>
  71#include <linux/udp.h>
  72#include <linux/cache.h>
  73#include <asm/io.h>
  74#include <asm/irq.h>
  75#include <asm/uaccess.h>
  76
  77/* VLAN tagging feature enable/disable */
  78#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
  79#define CP_VLAN_TAG_USED 1
  80#define CP_VLAN_TX_TAG(tx_desc,vlan_tag_value) \
  81        do { (tx_desc)->opts2 = cpu_to_le32(vlan_tag_value); } while (0)
  82#else
  83#define CP_VLAN_TAG_USED 0
  84#define CP_VLAN_TX_TAG(tx_desc,vlan_tag_value) \
  85        do { (tx_desc)->opts2 = 0; } while (0)
  86#endif
  87
  88/* These identify the driver base version and may not be removed. */
  89static char version[] =
  90DRV_NAME ": 10/100 PCI Ethernet driver v" DRV_VERSION " (" DRV_RELDATE ")\n";
  91
  92MODULE_AUTHOR("Jeff Garzik <jgarzik@pobox.com>");
  93MODULE_DESCRIPTION("RealTek RTL-8139C+ series 10/100 PCI Ethernet driver");
  94MODULE_VERSION(DRV_VERSION);
  95MODULE_LICENSE("GPL");
  96
  97static int debug = -1;
  98module_param(debug, int, 0);
  99MODULE_PARM_DESC (debug, "8139cp: bitmapped message enable number");
 100
 101/* Maximum number of multicast addresses to filter (vs. Rx-all-multicast).
 102   The RTL chips use a 64 element hash table based on the Ethernet CRC.  */
 103static int multicast_filter_limit = 32;
 104module_param(multicast_filter_limit, int, 0);
 105MODULE_PARM_DESC (multicast_filter_limit, "8139cp: maximum number of filtered multicast addresses");
 106
 107#define PFX                     DRV_NAME ": "
 108
 109#define CP_DEF_MSG_ENABLE       (NETIF_MSG_DRV          | \
 110                                 NETIF_MSG_PROBE        | \
 111                                 NETIF_MSG_LINK)
 112#define CP_NUM_STATS            14      /* struct cp_dma_stats, plus one */
 113#define CP_STATS_SIZE           64      /* size in bytes of DMA stats block */
 114#define CP_REGS_SIZE            (0xff + 1)
 115#define CP_REGS_VER             1               /* version 1 */
 116#define CP_RX_RING_SIZE         64
 117#define CP_TX_RING_SIZE         64
 118#define CP_RING_BYTES           \
 119                ((sizeof(struct cp_desc) * CP_RX_RING_SIZE) +   \
 120                 (sizeof(struct cp_desc) * CP_TX_RING_SIZE) +   \
 121                 CP_STATS_SIZE)
 122#define NEXT_TX(N)              (((N) + 1) & (CP_TX_RING_SIZE - 1))
 123#define NEXT_RX(N)              (((N) + 1) & (CP_RX_RING_SIZE - 1))
 124#define TX_BUFFS_AVAIL(CP)                                      \
 125        (((CP)->tx_tail <= (CP)->tx_head) ?                     \
 126          (CP)->tx_tail + (CP_TX_RING_SIZE - 1) - (CP)->tx_head :       \
 127          (CP)->tx_tail - (CP)->tx_head - 1)
 128
 129#define PKT_BUF_SZ              1536    /* Size of each temporary Rx buffer.*/
 130#define CP_INTERNAL_PHY         32
 131
 132/* The following settings are log_2(bytes)-4:  0 == 16 bytes .. 6==1024, 7==end of packet. */
 133#define RX_FIFO_THRESH          5       /* Rx buffer level before first PCI xfer.  */
 134#define RX_DMA_BURST            4       /* Maximum PCI burst, '4' is 256 */
 135#define TX_DMA_BURST            6       /* Maximum PCI burst, '6' is 1024 */
 136#define TX_EARLY_THRESH         256     /* Early Tx threshold, in bytes */
 137
 138/* Time in jiffies before concluding the transmitter is hung. */
 139#define TX_TIMEOUT              (6*HZ)
 140
 141/* hardware minimum and maximum for a single frame's data payload */
 142#define CP_MIN_MTU              60      /* TODO: allow lower, but pad */
 143#define CP_MAX_MTU              4096
 144
 145enum {
 146        /* NIC register offsets */
 147        MAC0            = 0x00, /* Ethernet hardware address. */
 148        MAR0            = 0x08, /* Multicast filter. */
 149        StatsAddr       = 0x10, /* 64-bit start addr of 64-byte DMA stats blk */
 150        TxRingAddr      = 0x20, /* 64-bit start addr of Tx ring */
 151        HiTxRingAddr    = 0x28, /* 64-bit start addr of high priority Tx ring */
 152        Cmd             = 0x37, /* Command register */
 153        IntrMask        = 0x3C, /* Interrupt mask */
 154        IntrStatus      = 0x3E, /* Interrupt status */
 155        TxConfig        = 0x40, /* Tx configuration */
 156        ChipVersion     = 0x43, /* 8-bit chip version, inside TxConfig */
 157        RxConfig        = 0x44, /* Rx configuration */
 158        RxMissed        = 0x4C, /* 24 bits valid, write clears */
 159        Cfg9346         = 0x50, /* EEPROM select/control; Cfg reg [un]lock */
 160        Config1         = 0x52, /* Config1 */
 161        Config3         = 0x59, /* Config3 */
 162        Config4         = 0x5A, /* Config4 */
 163        MultiIntr       = 0x5C, /* Multiple interrupt select */
 164        BasicModeCtrl   = 0x62, /* MII BMCR */
 165        BasicModeStatus = 0x64, /* MII BMSR */
 166        NWayAdvert      = 0x66, /* MII ADVERTISE */
 167        NWayLPAR        = 0x68, /* MII LPA */
 168        NWayExpansion   = 0x6A, /* MII Expansion */
 169        Config5         = 0xD8, /* Config5 */
 170        TxPoll          = 0xD9, /* Tell chip to check Tx descriptors for work */
 171        RxMaxSize       = 0xDA, /* Max size of an Rx packet (8169 only) */
 172        CpCmd           = 0xE0, /* C+ Command register (C+ mode only) */
 173        IntrMitigate    = 0xE2, /* rx/tx interrupt mitigation control */
 174        RxRingAddr      = 0xE4, /* 64-bit start addr of Rx ring */
 175        TxThresh        = 0xEC, /* Early Tx threshold */
 176        OldRxBufAddr    = 0x30, /* DMA address of Rx ring buffer (C mode) */
 177        OldTSD0         = 0x10, /* DMA address of first Tx desc (C mode) */
 178
 179        /* Tx and Rx status descriptors */
 180        DescOwn         = (1 << 31), /* Descriptor is owned by NIC */
 181        RingEnd         = (1 << 30), /* End of descriptor ring */
 182        FirstFrag       = (1 << 29), /* First segment of a packet */
 183        LastFrag        = (1 << 28), /* Final segment of a packet */
 184        LargeSend       = (1 << 27), /* TCP Large Send Offload (TSO) */
 185        MSSShift        = 16,        /* MSS value position */
 186        MSSMask         = 0xfff,     /* MSS value: 11 bits */
 187        TxError         = (1 << 23), /* Tx error summary */
 188        RxError         = (1 << 20), /* Rx error summary */
 189        IPCS            = (1 << 18), /* Calculate IP checksum */
 190        UDPCS           = (1 << 17), /* Calculate UDP/IP checksum */
 191        TCPCS           = (1 << 16), /* Calculate TCP/IP checksum */
 192        TxVlanTag       = (1 << 17), /* Add VLAN tag */
 193        RxVlanTagged    = (1 << 16), /* Rx VLAN tag available */
 194        IPFail          = (1 << 15), /* IP checksum failed */
 195        UDPFail         = (1 << 14), /* UDP/IP checksum failed */
 196        TCPFail         = (1 << 13), /* TCP/IP checksum failed */
 197        NormalTxPoll    = (1 << 6),  /* One or more normal Tx packets to send */
 198        PID1            = (1 << 17), /* 2 protocol id bits:  0==non-IP, */
 199        PID0            = (1 << 16), /* 1==UDP/IP, 2==TCP/IP, 3==IP */
 200        RxProtoTCP      = 1,
 201        RxProtoUDP      = 2,
 202        RxProtoIP       = 3,
 203        TxFIFOUnder     = (1 << 25), /* Tx FIFO underrun */
 204        TxOWC           = (1 << 22), /* Tx Out-of-window collision */
 205        TxLinkFail      = (1 << 21), /* Link failed during Tx of packet */
 206        TxMaxCol        = (1 << 20), /* Tx aborted due to excessive collisions */
 207        TxColCntShift   = 16,        /* Shift, to get 4-bit Tx collision cnt */
 208        TxColCntMask    = 0x01 | 0x02 | 0x04 | 0x08, /* 4-bit collision count */
 209        RxErrFrame      = (1 << 27), /* Rx frame alignment error */
 210        RxMcast         = (1 << 26), /* Rx multicast packet rcv'd */
 211        RxErrCRC        = (1 << 18), /* Rx CRC error */
 212        RxErrRunt       = (1 << 19), /* Rx error, packet < 64 bytes */
 213        RxErrLong       = (1 << 21), /* Rx error, packet > 4096 bytes */
 214        RxErrFIFO       = (1 << 22), /* Rx error, FIFO overflowed, pkt bad */
 215
 216        /* StatsAddr register */
 217        DumpStats       = (1 << 3),  /* Begin stats dump */
 218
 219        /* RxConfig register */
 220        RxCfgFIFOShift  = 13,        /* Shift, to get Rx FIFO thresh value */
 221        RxCfgDMAShift   = 8,         /* Shift, to get Rx Max DMA value */
 222        AcceptErr       = 0x20,      /* Accept packets with CRC errors */
 223        AcceptRunt      = 0x10,      /* Accept runt (<64 bytes) packets */
 224        AcceptBroadcast = 0x08,      /* Accept broadcast packets */
 225        AcceptMulticast = 0x04,      /* Accept multicast packets */
 226        AcceptMyPhys    = 0x02,      /* Accept pkts with our MAC as dest */
 227        AcceptAllPhys   = 0x01,      /* Accept all pkts w/ physical dest */
 228
 229        /* IntrMask / IntrStatus registers */
 230        PciErr          = (1 << 15), /* System error on the PCI bus */
 231        TimerIntr       = (1 << 14), /* Asserted when TCTR reaches TimerInt value */
 232        LenChg          = (1 << 13), /* Cable length change */
 233        SWInt           = (1 << 8),  /* Software-requested interrupt */
 234        TxEmpty         = (1 << 7),  /* No Tx descriptors available */
 235        RxFIFOOvr       = (1 << 6),  /* Rx FIFO Overflow */
 236        LinkChg         = (1 << 5),  /* Packet underrun, or link change */
 237        RxEmpty         = (1 << 4),  /* No Rx descriptors available */
 238        TxErr           = (1 << 3),  /* Tx error */
 239        TxOK            = (1 << 2),  /* Tx packet sent */
 240        RxErr           = (1 << 1),  /* Rx error */
 241        RxOK            = (1 << 0),  /* Rx packet received */
 242        IntrResvd       = (1 << 10), /* reserved, according to RealTek engineers,
 243                                        but hardware likes to raise it */
 244
 245        IntrAll         = PciErr | TimerIntr | LenChg | SWInt | TxEmpty |
 246                          RxFIFOOvr | LinkChg | RxEmpty | TxErr | TxOK |
 247                          RxErr | RxOK | IntrResvd,
 248
 249        /* C mode command register */
 250        CmdReset        = (1 << 4),  /* Enable to reset; self-clearing */
 251        RxOn            = (1 << 3),  /* Rx mode enable */
 252        TxOn            = (1 << 2),  /* Tx mode enable */
 253
 254        /* C+ mode command register */
 255        RxVlanOn        = (1 << 6),  /* Rx VLAN de-tagging enable */
 256        RxChkSum        = (1 << 5),  /* Rx checksum offload enable */
 257        PCIDAC          = (1 << 4),  /* PCI Dual Address Cycle (64-bit PCI) */
 258        PCIMulRW        = (1 << 3),  /* Enable PCI read/write multiple */
 259        CpRxOn          = (1 << 1),  /* Rx mode enable */
 260        CpTxOn          = (1 << 0),  /* Tx mode enable */
 261
 262        /* Cfg9436 EEPROM control register */
 263        Cfg9346_Lock    = 0x00,      /* Lock ConfigX/MII register access */
 264        Cfg9346_Unlock  = 0xC0,      /* Unlock ConfigX/MII register access */
 265
 266        /* TxConfig register */
 267        IFG             = (1 << 25) | (1 << 24), /* standard IEEE interframe gap */
 268        TxDMAShift      = 8,         /* DMA burst value (0-7) is shift this many bits */
 269
 270        /* Early Tx Threshold register */
 271        TxThreshMask    = 0x3f,      /* Mask bits 5-0 */
 272        TxThreshMax     = 2048,      /* Max early Tx threshold */
 273
 274        /* Config1 register */
 275        DriverLoaded    = (1 << 5),  /* Software marker, driver is loaded */
 276        LWACT           = (1 << 4),  /* LWAKE active mode */
 277        PMEnable        = (1 << 0),  /* Enable various PM features of chip */
 278
 279        /* Config3 register */
 280        PARMEnable      = (1 << 6),  /* Enable auto-loading of PHY parms */
 281        MagicPacket     = (1 << 5),  /* Wake up when receives a Magic Packet */
 282        LinkUp          = (1 << 4),  /* Wake up when the cable connection is re-established */
 283
 284        /* Config4 register */
 285        LWPTN           = (1 << 1),  /* LWAKE Pattern */
 286        LWPME           = (1 << 4),  /* LANWAKE vs PMEB */
 287
 288        /* Config5 register */
 289        BWF             = (1 << 6),  /* Accept Broadcast wakeup frame */
 290        MWF             = (1 << 5),  /* Accept Multicast wakeup frame */
 291        UWF             = (1 << 4),  /* Accept Unicast wakeup frame */
 292        LANWake         = (1 << 1),  /* Enable LANWake signal */
 293        PMEStatus       = (1 << 0),  /* PME status can be reset by PCI RST# */
 294
 295        cp_norx_intr_mask = PciErr | LinkChg | TxOK | TxErr | TxEmpty,
 296        cp_rx_intr_mask = RxOK | RxErr | RxEmpty | RxFIFOOvr,
 297        cp_intr_mask = cp_rx_intr_mask | cp_norx_intr_mask,
 298};
 299
 300static const unsigned int cp_rx_config =
 301          (RX_FIFO_THRESH << RxCfgFIFOShift) |
 302          (RX_DMA_BURST << RxCfgDMAShift);
 303
 304struct cp_desc {
 305        __le32          opts1;
 306        __le32          opts2;
 307        __le64          addr;
 308};
 309
 310struct cp_dma_stats {
 311        __le64                  tx_ok;
 312        __le64                  rx_ok;
 313        __le64                  tx_err;
 314        __le32                  rx_err;
 315        __le16                  rx_fifo;
 316        __le16                  frame_align;
 317        __le32                  tx_ok_1col;
 318        __le32                  tx_ok_mcol;
 319        __le64                  rx_ok_phys;
 320        __le64                  rx_ok_bcast;
 321        __le32                  rx_ok_mcast;
 322        __le16                  tx_abort;
 323        __le16                  tx_underrun;
 324} __attribute__((packed));
 325
 326struct cp_extra_stats {
 327        unsigned long           rx_frags;
 328};
 329
 330struct cp_private {
 331        void                    __iomem *regs;
 332        struct net_device       *dev;
 333        spinlock_t              lock;
 334        u32                     msg_enable;
 335
 336        struct napi_struct      napi;
 337
 338        struct pci_dev          *pdev;
 339        u32                     rx_config;
 340        u16                     cpcmd;
 341
 342        struct cp_extra_stats   cp_stats;
 343
 344        unsigned                rx_head         ____cacheline_aligned;
 345        unsigned                rx_tail;
 346        struct cp_desc          *rx_ring;
 347        struct sk_buff          *rx_skb[CP_RX_RING_SIZE];
 348
 349        unsigned                tx_head         ____cacheline_aligned;
 350        unsigned                tx_tail;
 351        struct cp_desc          *tx_ring;
 352        struct sk_buff          *tx_skb[CP_TX_RING_SIZE];
 353
 354        unsigned                rx_buf_sz;
 355        unsigned                wol_enabled : 1; /* Is Wake-on-LAN enabled? */
 356
 357#if CP_VLAN_TAG_USED
 358        struct vlan_group       *vlgrp;
 359#endif
 360        dma_addr_t              ring_dma;
 361
 362        struct mii_if_info      mii_if;
 363};
 364
 365#define cpr8(reg)       readb(cp->regs + (reg))
 366#define cpr16(reg)      readw(cp->regs + (reg))
 367#define cpr32(reg)      readl(cp->regs + (reg))
 368#define cpw8(reg,val)   writeb((val), cp->regs + (reg))
 369#define cpw16(reg,val)  writew((val), cp->regs + (reg))
 370#define cpw32(reg,val)  writel((val), cp->regs + (reg))
 371#define cpw8_f(reg,val) do {                    \
 372        writeb((val), cp->regs + (reg));        \
 373        readb(cp->regs + (reg));                \
 374        } while (0)
 375#define cpw16_f(reg,val) do {                   \
 376        writew((val), cp->regs + (reg));        \
 377        readw(cp->regs + (reg));                \
 378        } while (0)
 379#define cpw32_f(reg,val) do {                   \
 380        writel((val), cp->regs + (reg));        \
 381        readl(cp->regs + (reg));                \
 382        } while (0)
 383
 384
 385static void __cp_set_rx_mode (struct net_device *dev);
 386static void cp_tx (struct cp_private *cp);
 387static void cp_clean_rings (struct cp_private *cp);
 388#ifdef CONFIG_NET_POLL_CONTROLLER
 389static void cp_poll_controller(struct net_device *dev);
 390#endif
 391static int cp_get_eeprom_len(struct net_device *dev);
 392static int cp_get_eeprom(struct net_device *dev,
 393                         struct ethtool_eeprom *eeprom, u8 *data);
 394static int cp_set_eeprom(struct net_device *dev,
 395                         struct ethtool_eeprom *eeprom, u8 *data);
 396
 397static struct pci_device_id cp_pci_tbl[] = {
 398        { PCI_DEVICE(PCI_VENDOR_ID_REALTEK,     PCI_DEVICE_ID_REALTEK_8139), },
 399        { PCI_DEVICE(PCI_VENDOR_ID_TTTECH,      PCI_DEVICE_ID_TTTECH_MC322), },
 400        { },
 401};
 402MODULE_DEVICE_TABLE(pci, cp_pci_tbl);
 403
 404static struct {
 405        const char str[ETH_GSTRING_LEN];
 406} ethtool_stats_keys[] = {
 407        { "tx_ok" },
 408        { "rx_ok" },
 409        { "tx_err" },
 410        { "rx_err" },
 411        { "rx_fifo" },
 412        { "frame_align" },
 413        { "tx_ok_1col" },
 414        { "tx_ok_mcol" },
 415        { "rx_ok_phys" },
 416        { "rx_ok_bcast" },
 417        { "rx_ok_mcast" },
 418        { "tx_abort" },
 419        { "tx_underrun" },
 420        { "rx_frags" },
 421};
 422
 423
 424#if CP_VLAN_TAG_USED
 425static void cp_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
 426{
 427        struct cp_private *cp = netdev_priv(dev);
 428        unsigned long flags;
 429
 430        spin_lock_irqsave(&cp->lock, flags);
 431        cp->vlgrp = grp;
 432        if (grp)
 433                cp->cpcmd |= RxVlanOn;
 434        else
 435                cp->cpcmd &= ~RxVlanOn;
 436
 437        cpw16(CpCmd, cp->cpcmd);
 438        spin_unlock_irqrestore(&cp->lock, flags);
 439}
 440#endif /* CP_VLAN_TAG_USED */
 441
 442static inline void cp_set_rxbufsize (struct cp_private *cp)
 443{
 444        unsigned int mtu = cp->dev->mtu;
 445
 446        if (mtu > ETH_DATA_LEN)
 447                /* MTU + ethernet header + FCS + optional VLAN tag */
 448                cp->rx_buf_sz = mtu + ETH_HLEN + 8;
 449        else
 450                cp->rx_buf_sz = PKT_BUF_SZ;
 451}
 452
 453static inline void cp_rx_skb (struct cp_private *cp, struct sk_buff *skb,
 454                              struct cp_desc *desc)
 455{
 456        skb->protocol = eth_type_trans (skb, cp->dev);
 457
 458        cp->dev->stats.rx_packets++;
 459        cp->dev->stats.rx_bytes += skb->len;
 460
 461#if CP_VLAN_TAG_USED
 462        if (cp->vlgrp && (desc->opts2 & cpu_to_le32(RxVlanTagged))) {
 463                vlan_hwaccel_receive_skb(skb, cp->vlgrp,
 464                                         swab16(le32_to_cpu(desc->opts2) & 0xffff));
 465        } else
 466#endif
 467                netif_receive_skb(skb);
 468}
 469
 470static void cp_rx_err_acct (struct cp_private *cp, unsigned rx_tail,
 471                            u32 status, u32 len)
 472{
 473        if (netif_msg_rx_err (cp))
 474                pr_debug("%s: rx err, slot %d status 0x%x len %d\n",
 475                        cp->dev->name, rx_tail, status, len);
 476        cp->dev->stats.rx_errors++;
 477        if (status & RxErrFrame)
 478                cp->dev->stats.rx_frame_errors++;
 479        if (status & RxErrCRC)
 480                cp->dev->stats.rx_crc_errors++;
 481        if ((status & RxErrRunt) || (status & RxErrLong))
 482                cp->dev->stats.rx_length_errors++;
 483        if ((status & (FirstFrag | LastFrag)) != (FirstFrag | LastFrag))
 484                cp->dev->stats.rx_length_errors++;
 485        if (status & RxErrFIFO)
 486                cp->dev->stats.rx_fifo_errors++;
 487}
 488
 489static inline unsigned int cp_rx_csum_ok (u32 status)
 490{
 491        unsigned int protocol = (status >> 16) & 0x3;
 492
 493        if (likely((protocol == RxProtoTCP) && (!(status & TCPFail))))
 494                return 1;
 495        else if ((protocol == RxProtoUDP) && (!(status & UDPFail)))
 496                return 1;
 497        else if ((protocol == RxProtoIP) && (!(status & IPFail)))
 498                return 1;
 499        return 0;
 500}
 501
 502static int cp_rx_poll(struct napi_struct *napi, int budget)
 503{
 504        struct cp_private *cp = container_of(napi, struct cp_private, napi);
 505        struct net_device *dev = cp->dev;
 506        unsigned int rx_tail = cp->rx_tail;
 507        int rx;
 508
 509rx_status_loop:
 510        rx = 0;
 511        cpw16(IntrStatus, cp_rx_intr_mask);
 512
 513        while (1) {
 514                u32 status, len;
 515                dma_addr_t mapping;
 516                struct sk_buff *skb, *new_skb;
 517                struct cp_desc *desc;
 518                const unsigned buflen = cp->rx_buf_sz;
 519
 520                skb = cp->rx_skb[rx_tail];
 521                BUG_ON(!skb);
 522
 523                desc = &cp->rx_ring[rx_tail];
 524                status = le32_to_cpu(desc->opts1);
 525                if (status & DescOwn)
 526                        break;
 527
 528                len = (status & 0x1fff) - 4;
 529                mapping = le64_to_cpu(desc->addr);
 530
 531                if ((status & (FirstFrag | LastFrag)) != (FirstFrag | LastFrag)) {
 532                        /* we don't support incoming fragmented frames.
 533                         * instead, we attempt to ensure that the
 534                         * pre-allocated RX skbs are properly sized such
 535                         * that RX fragments are never encountered
 536                         */
 537                        cp_rx_err_acct(cp, rx_tail, status, len);
 538                        dev->stats.rx_dropped++;
 539                        cp->cp_stats.rx_frags++;
 540                        goto rx_next;
 541                }
 542
 543                if (status & (RxError | RxErrFIFO)) {
 544                        cp_rx_err_acct(cp, rx_tail, status, len);
 545                        goto rx_next;
 546                }
 547
 548                if (netif_msg_rx_status(cp))
 549                        pr_debug("%s: rx slot %d status 0x%x len %d\n",
 550                               dev->name, rx_tail, status, len);
 551
 552                new_skb = netdev_alloc_skb(dev, buflen + NET_IP_ALIGN);
 553                if (!new_skb) {
 554                        dev->stats.rx_dropped++;
 555                        goto rx_next;
 556                }
 557
 558                skb_reserve(new_skb, NET_IP_ALIGN);
 559
 560                dma_unmap_single(&cp->pdev->dev, mapping,
 561                                 buflen, PCI_DMA_FROMDEVICE);
 562
 563                /* Handle checksum offloading for incoming packets. */
 564                if (cp_rx_csum_ok(status))
 565                        skb->ip_summed = CHECKSUM_UNNECESSARY;
 566                else
 567                        skb->ip_summed = CHECKSUM_NONE;
 568
 569                skb_put(skb, len);
 570
 571                mapping = dma_map_single(&cp->pdev->dev, new_skb->data, buflen,
 572                                         PCI_DMA_FROMDEVICE);
 573                cp->rx_skb[rx_tail] = new_skb;
 574
 575                cp_rx_skb(cp, skb, desc);
 576                rx++;
 577
 578rx_next:
 579                cp->rx_ring[rx_tail].opts2 = 0;
 580                cp->rx_ring[rx_tail].addr = cpu_to_le64(mapping);
 581                if (rx_tail == (CP_RX_RING_SIZE - 1))
 582                        desc->opts1 = cpu_to_le32(DescOwn | RingEnd |
 583                                                  cp->rx_buf_sz);
 584                else
 585                        desc->opts1 = cpu_to_le32(DescOwn | cp->rx_buf_sz);
 586                rx_tail = NEXT_RX(rx_tail);
 587
 588                if (rx >= budget)
 589                        break;
 590        }
 591
 592        cp->rx_tail = rx_tail;
 593
 594        /* if we did not reach work limit, then we're done with
 595         * this round of polling
 596         */
 597        if (rx < budget) {
 598                unsigned long flags;
 599
 600                if (cpr16(IntrStatus) & cp_rx_intr_mask)
 601                        goto rx_status_loop;
 602
 603                spin_lock_irqsave(&cp->lock, flags);
 604                cpw16_f(IntrMask, cp_intr_mask);
 605                __napi_complete(napi);
 606                spin_unlock_irqrestore(&cp->lock, flags);
 607        }
 608
 609        return rx;
 610}
 611
 612static irqreturn_t cp_interrupt (int irq, void *dev_instance)
 613{
 614        struct net_device *dev = dev_instance;
 615        struct cp_private *cp;
 616        u16 status;
 617
 618        if (unlikely(dev == NULL))
 619                return IRQ_NONE;
 620        cp = netdev_priv(dev);
 621
 622        status = cpr16(IntrStatus);
 623        if (!status || (status == 0xFFFF))
 624                return IRQ_NONE;
 625
 626        if (netif_msg_intr(cp))
 627                pr_debug("%s: intr, status %04x cmd %02x cpcmd %04x\n",
 628                        dev->name, status, cpr8(Cmd), cpr16(CpCmd));
 629
 630        cpw16(IntrStatus, status & ~cp_rx_intr_mask);
 631
 632        spin_lock(&cp->lock);
 633
 634        /* close possible race's with dev_close */
 635        if (unlikely(!netif_running(dev))) {
 636                cpw16(IntrMask, 0);
 637                spin_unlock(&cp->lock);
 638                return IRQ_HANDLED;
 639        }
 640
 641        if (status & (RxOK | RxErr | RxEmpty | RxFIFOOvr))
 642                if (napi_schedule_prep(&cp->napi)) {
 643                        cpw16_f(IntrMask, cp_norx_intr_mask);
 644                        __napi_schedule(&cp->napi);
 645                }
 646
 647        if (status & (TxOK | TxErr | TxEmpty | SWInt))
 648                cp_tx(cp);
 649        if (status & LinkChg)
 650                mii_check_media(&cp->mii_if, netif_msg_link(cp), false);
 651
 652        spin_unlock(&cp->lock);
 653
 654        if (status & PciErr) {
 655                u16 pci_status;
 656
 657                pci_read_config_word(cp->pdev, PCI_STATUS, &pci_status);
 658                pci_write_config_word(cp->pdev, PCI_STATUS, pci_status);
 659                pr_err("%s: PCI bus error, status=%04x, PCI status=%04x\n",
 660                       dev->name, status, pci_status);
 661
 662                /* TODO: reset hardware */
 663        }
 664
 665        return IRQ_HANDLED;
 666}
 667
 668#ifdef CONFIG_NET_POLL_CONTROLLER
 669/*
 670 * Polling receive - used by netconsole and other diagnostic tools
 671 * to allow network i/o with interrupts disabled.
 672 */
 673static void cp_poll_controller(struct net_device *dev)
 674{
 675        disable_irq(dev->irq);
 676        cp_interrupt(dev->irq, dev);
 677        enable_irq(dev->irq);
 678}
 679#endif
 680
 681static void cp_tx (struct cp_private *cp)
 682{
 683        unsigned tx_head = cp->tx_head;
 684        unsigned tx_tail = cp->tx_tail;
 685
 686        while (tx_tail != tx_head) {
 687                struct cp_desc *txd = cp->tx_ring + tx_tail;
 688                struct sk_buff *skb;
 689                u32 status;
 690
 691                rmb();
 692                status = le32_to_cpu(txd->opts1);
 693                if (status & DescOwn)
 694                        break;
 695
 696                skb = cp->tx_skb[tx_tail];
 697                BUG_ON(!skb);
 698
 699                dma_unmap_single(&cp->pdev->dev, le64_to_cpu(txd->addr),
 700                                 le32_to_cpu(txd->opts1) & 0xffff,
 701                                 PCI_DMA_TODEVICE);
 702
 703                if (status & LastFrag) {
 704                        if (status & (TxError | TxFIFOUnder)) {
 705                                if (netif_msg_tx_err(cp))
 706                                        pr_debug("%s: tx err, status 0x%x\n",
 707                                               cp->dev->name, status);
 708                                cp->dev->stats.tx_errors++;
 709                                if (status & TxOWC)
 710                                        cp->dev->stats.tx_window_errors++;
 711                                if (status & TxMaxCol)
 712                                        cp->dev->stats.tx_aborted_errors++;
 713                                if (status & TxLinkFail)
 714                                        cp->dev->stats.tx_carrier_errors++;
 715                                if (status & TxFIFOUnder)
 716                                        cp->dev->stats.tx_fifo_errors++;
 717                        } else {
 718                                cp->dev->stats.collisions +=
 719                                        ((status >> TxColCntShift) & TxColCntMask);
 720                                cp->dev->stats.tx_packets++;
 721                                cp->dev->stats.tx_bytes += skb->len;
 722                                if (netif_msg_tx_done(cp))
 723                                        pr_debug("%s: tx done, slot %d\n", cp->dev->name, tx_tail);
 724                        }
 725                        dev_kfree_skb_irq(skb);
 726                }
 727
 728                cp->tx_skb[tx_tail] = NULL;
 729
 730                tx_tail = NEXT_TX(tx_tail);
 731        }
 732
 733        cp->tx_tail = tx_tail;
 734
 735        if (TX_BUFFS_AVAIL(cp) > (MAX_SKB_FRAGS + 1))
 736                netif_wake_queue(cp->dev);
 737}
 738
 739static netdev_tx_t cp_start_xmit (struct sk_buff *skb,
 740                                        struct net_device *dev)
 741{
 742        struct cp_private *cp = netdev_priv(dev);
 743        unsigned entry;
 744        u32 eor, flags;
 745        unsigned long intr_flags;
 746#if CP_VLAN_TAG_USED
 747        u32 vlan_tag = 0;
 748#endif
 749        int mss = 0;
 750
 751        spin_lock_irqsave(&cp->lock, intr_flags);
 752
 753        /* This is a hard error, log it. */
 754        if (TX_BUFFS_AVAIL(cp) <= (skb_shinfo(skb)->nr_frags + 1)) {
 755                netif_stop_queue(dev);
 756                spin_unlock_irqrestore(&cp->lock, intr_flags);
 757                pr_err(PFX "%s: BUG! Tx Ring full when queue awake!\n",
 758                       dev->name);
 759                return NETDEV_TX_BUSY;
 760        }
 761
 762#if CP_VLAN_TAG_USED
 763        if (cp->vlgrp && vlan_tx_tag_present(skb))
 764                vlan_tag = TxVlanTag | swab16(vlan_tx_tag_get(skb));
 765#endif
 766
 767        entry = cp->tx_head;
 768        eor = (entry == (CP_TX_RING_SIZE - 1)) ? RingEnd : 0;
 769        if (dev->features & NETIF_F_TSO)
 770                mss = skb_shinfo(skb)->gso_size;
 771
 772        if (skb_shinfo(skb)->nr_frags == 0) {
 773                struct cp_desc *txd = &cp->tx_ring[entry];
 774                u32 len;
 775                dma_addr_t mapping;
 776
 777                len = skb->len;
 778                mapping = dma_map_single(&cp->pdev->dev, skb->data, len, PCI_DMA_TODEVICE);
 779                CP_VLAN_TX_TAG(txd, vlan_tag);
 780                txd->addr = cpu_to_le64(mapping);
 781                wmb();
 782
 783                flags = eor | len | DescOwn | FirstFrag | LastFrag;
 784
 785                if (mss)
 786                        flags |= LargeSend | ((mss & MSSMask) << MSSShift);
 787                else if (skb->ip_summed == CHECKSUM_PARTIAL) {
 788                        const struct iphdr *ip = ip_hdr(skb);
 789                        if (ip->protocol == IPPROTO_TCP)
 790                                flags |= IPCS | TCPCS;
 791                        else if (ip->protocol == IPPROTO_UDP)
 792                                flags |= IPCS | UDPCS;
 793                        else
 794                                WARN_ON(1);     /* we need a WARN() */
 795                }
 796
 797                txd->opts1 = cpu_to_le32(flags);
 798                wmb();
 799
 800                cp->tx_skb[entry] = skb;
 801                entry = NEXT_TX(entry);
 802        } else {
 803                struct cp_desc *txd;
 804                u32 first_len, first_eor;
 805                dma_addr_t first_mapping;
 806                int frag, first_entry = entry;
 807                const struct iphdr *ip = ip_hdr(skb);
 808
 809                /* We must give this initial chunk to the device last.
 810                 * Otherwise we could race with the device.
 811                 */
 812                first_eor = eor;
 813                first_len = skb_headlen(skb);
 814                first_mapping = dma_map_single(&cp->pdev->dev, skb->data,
 815                                               first_len, PCI_DMA_TODEVICE);
 816                cp->tx_skb[entry] = skb;
 817                entry = NEXT_TX(entry);
 818
 819                for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) {
 820                        skb_frag_t *this_frag = &skb_shinfo(skb)->frags[frag];
 821                        u32 len;
 822                        u32 ctrl;
 823                        dma_addr_t mapping;
 824
 825                        len = this_frag->size;
 826                        mapping = dma_map_single(&cp->pdev->dev,
 827                                                 ((void *) page_address(this_frag->page) +
 828                                                  this_frag->page_offset),
 829                                                 len, PCI_DMA_TODEVICE);
 830                        eor = (entry == (CP_TX_RING_SIZE - 1)) ? RingEnd : 0;
 831
 832                        ctrl = eor | len | DescOwn;
 833
 834                        if (mss)
 835                                ctrl |= LargeSend |
 836                                        ((mss & MSSMask) << MSSShift);
 837                        else if (skb->ip_summed == CHECKSUM_PARTIAL) {
 838                                if (ip->protocol == IPPROTO_TCP)
 839                                        ctrl |= IPCS | TCPCS;
 840                                else if (ip->protocol == IPPROTO_UDP)
 841                                        ctrl |= IPCS | UDPCS;
 842                                else
 843                                        BUG();
 844                        }
 845
 846                        if (frag == skb_shinfo(skb)->nr_frags - 1)
 847                                ctrl |= LastFrag;
 848
 849                        txd = &cp->tx_ring[entry];
 850                        CP_VLAN_TX_TAG(txd, vlan_tag);
 851                        txd->addr = cpu_to_le64(mapping);
 852                        wmb();
 853
 854                        txd->opts1 = cpu_to_le32(ctrl);
 855                        wmb();
 856
 857                        cp->tx_skb[entry] = skb;
 858                        entry = NEXT_TX(entry);
 859                }
 860
 861                txd = &cp->tx_ring[first_entry];
 862                CP_VLAN_TX_TAG(txd, vlan_tag);
 863                txd->addr = cpu_to_le64(first_mapping);
 864                wmb();
 865
 866                if (skb->ip_summed == CHECKSUM_PARTIAL) {
 867                        if (ip->protocol == IPPROTO_TCP)
 868                                txd->opts1 = cpu_to_le32(first_eor | first_len |
 869                                                         FirstFrag | DescOwn |
 870                                                         IPCS | TCPCS);
 871                        else if (ip->protocol == IPPROTO_UDP)
 872                                txd->opts1 = cpu_to_le32(first_eor | first_len |
 873                                                         FirstFrag | DescOwn |
 874                                                         IPCS | UDPCS);
 875                        else
 876                                BUG();
 877                } else
 878                        txd->opts1 = cpu_to_le32(first_eor | first_len |
 879                                                 FirstFrag | DescOwn);
 880                wmb();
 881        }
 882        cp->tx_head = entry;
 883        if (netif_msg_tx_queued(cp))
 884                pr_debug("%s: tx queued, slot %d, skblen %d\n",
 885                       dev->name, entry, skb->len);
 886        if (TX_BUFFS_AVAIL(cp) <= (MAX_SKB_FRAGS + 1))
 887                netif_stop_queue(dev);
 888
 889        spin_unlock_irqrestore(&cp->lock, intr_flags);
 890
 891        cpw8(TxPoll, NormalTxPoll);
 892        dev->trans_start = jiffies;
 893
 894        return NETDEV_TX_OK;
 895}
 896
 897/* Set or clear the multicast filter for this adaptor.
 898   This routine is not state sensitive and need not be SMP locked. */
 899
 900static void __cp_set_rx_mode (struct net_device *dev)
 901{
 902        struct cp_private *cp = netdev_priv(dev);
 903        u32 mc_filter[2];       /* Multicast hash filter */
 904        int i, rx_mode;
 905        u32 tmp;
 906
 907        /* Note: do not reorder, GCC is clever about common statements. */
 908        if (dev->flags & IFF_PROMISC) {
 909                /* Unconditionally log net taps. */
 910                rx_mode =
 911                    AcceptBroadcast | AcceptMulticast | AcceptMyPhys |
 912                    AcceptAllPhys;
 913                mc_filter[1] = mc_filter[0] = 0xffffffff;
 914        } else if ((dev->mc_count > multicast_filter_limit)
 915                   || (dev->flags & IFF_ALLMULTI)) {
 916                /* Too many to filter perfectly -- accept all multicasts. */
 917                rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
 918                mc_filter[1] = mc_filter[0] = 0xffffffff;
 919        } else {
 920                struct dev_mc_list *mclist;
 921                rx_mode = AcceptBroadcast | AcceptMyPhys;
 922                mc_filter[1] = mc_filter[0] = 0;
 923                for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
 924                     i++, mclist = mclist->next) {
 925                        int bit_nr = ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26;
 926
 927                        mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
 928                        rx_mode |= AcceptMulticast;
 929                }
 930        }
 931
 932        /* We can safely update without stopping the chip. */
 933        tmp = cp_rx_config | rx_mode;
 934        if (cp->rx_config != tmp) {
 935                cpw32_f (RxConfig, tmp);
 936                cp->rx_config = tmp;
 937        }
 938        cpw32_f (MAR0 + 0, mc_filter[0]);
 939        cpw32_f (MAR0 + 4, mc_filter[1]);
 940}
 941
 942static void cp_set_rx_mode (struct net_device *dev)
 943{
 944        unsigned long flags;
 945        struct cp_private *cp = netdev_priv(dev);
 946
 947        spin_lock_irqsave (&cp->lock, flags);
 948        __cp_set_rx_mode(dev);
 949        spin_unlock_irqrestore (&cp->lock, flags);
 950}
 951
 952static void __cp_get_stats(struct cp_private *cp)
 953{
 954        /* only lower 24 bits valid; write any value to clear */
 955        cp->dev->stats.rx_missed_errors += (cpr32 (RxMissed) & 0xffffff);
 956        cpw32 (RxMissed, 0);
 957}
 958
 959static struct net_device_stats *cp_get_stats(struct net_device *dev)
 960{
 961        struct cp_private *cp = netdev_priv(dev);
 962        unsigned long flags;
 963
 964        /* The chip only need report frame silently dropped. */
 965        spin_lock_irqsave(&cp->lock, flags);
 966        if (netif_running(dev) && netif_device_present(dev))
 967                __cp_get_stats(cp);
 968        spin_unlock_irqrestore(&cp->lock, flags);
 969
 970        return &dev->stats;
 971}
 972
 973static void cp_stop_hw (struct cp_private *cp)
 974{
 975        cpw16(IntrStatus, ~(cpr16(IntrStatus)));
 976        cpw16_f(IntrMask, 0);
 977        cpw8(Cmd, 0);
 978        cpw16_f(CpCmd, 0);
 979        cpw16_f(IntrStatus, ~(cpr16(IntrStatus)));
 980
 981        cp->rx_tail = 0;
 982        cp->tx_head = cp->tx_tail = 0;
 983}
 984
 985static void cp_reset_hw (struct cp_private *cp)
 986{
 987        unsigned work = 1000;
 988
 989        cpw8(Cmd, CmdReset);
 990
 991        while (work--) {
 992                if (!(cpr8(Cmd) & CmdReset))
 993                        return;
 994
 995                schedule_timeout_uninterruptible(10);
 996        }
 997
 998        pr_err("%s: hardware reset timeout\n", cp->dev->name);
 999}
1000
1001static inline void cp_start_hw (struct cp_private *cp)
1002{
1003        cpw16(CpCmd, cp->cpcmd);
1004        cpw8(Cmd, RxOn | TxOn);
1005}
1006
1007static void cp_init_hw (struct cp_private *cp)
1008{
1009        struct net_device *dev = cp->dev;
1010        dma_addr_t ring_dma;
1011
1012        cp_reset_hw(cp);
1013
1014        cpw8_f (Cfg9346, Cfg9346_Unlock);
1015
1016        /* Restore our idea of the MAC address. */
1017        cpw32_f (MAC0 + 0, le32_to_cpu (*(__le32 *) (dev->dev_addr + 0)));
1018        cpw32_f (MAC0 + 4, le32_to_cpu (*(__le32 *) (dev->dev_addr + 4)));
1019
1020        cp_start_hw(cp);
1021        cpw8(TxThresh, 0x06); /* XXX convert magic num to a constant */
1022
1023        __cp_set_rx_mode(dev);
1024        cpw32_f (TxConfig, IFG | (TX_DMA_BURST << TxDMAShift));
1025
1026        cpw8(Config1, cpr8(Config1) | DriverLoaded | PMEnable);
1027        /* Disable Wake-on-LAN. Can be turned on with ETHTOOL_SWOL */
1028        cpw8(Config3, PARMEnable);
1029        cp->wol_enabled = 0;
1030
1031        cpw8(Config5, cpr8(Config5) & PMEStatus);
1032
1033        cpw32_f(HiTxRingAddr, 0);
1034        cpw32_f(HiTxRingAddr + 4, 0);
1035
1036        ring_dma = cp->ring_dma;
1037        cpw32_f(RxRingAddr, ring_dma & 0xffffffff);
1038        cpw32_f(RxRingAddr + 4, (ring_dma >> 16) >> 16);
1039
1040        ring_dma += sizeof(struct cp_desc) * CP_RX_RING_SIZE;
1041        cpw32_f(TxRingAddr, ring_dma & 0xffffffff);
1042        cpw32_f(TxRingAddr + 4, (ring_dma >> 16) >> 16);
1043
1044        cpw16(MultiIntr, 0);
1045
1046        cpw16_f(IntrMask, cp_intr_mask);
1047
1048        cpw8_f(Cfg9346, Cfg9346_Lock);
1049}
1050
1051static int cp_refill_rx(struct cp_private *cp)
1052{
1053        struct net_device *dev = cp->dev;
1054        unsigned i;
1055
1056        for (i = 0; i < CP_RX_RING_SIZE; i++) {
1057                struct sk_buff *skb;
1058                dma_addr_t mapping;
1059
1060                skb = netdev_alloc_skb(dev, cp->rx_buf_sz + NET_IP_ALIGN);
1061                if (!skb)
1062                        goto err_out;
1063
1064                skb_reserve(skb, NET_IP_ALIGN);
1065
1066                mapping = dma_map_single(&cp->pdev->dev, skb->data,
1067                                         cp->rx_buf_sz, PCI_DMA_FROMDEVICE);
1068                cp->rx_skb[i] = skb;
1069
1070                cp->rx_ring[i].opts2 = 0;
1071                cp->rx_ring[i].addr = cpu_to_le64(mapping);
1072                if (i == (CP_RX_RING_SIZE - 1))
1073                        cp->rx_ring[i].opts1 =
1074                                cpu_to_le32(DescOwn | RingEnd | cp->rx_buf_sz);
1075                else
1076                        cp->rx_ring[i].opts1 =
1077                                cpu_to_le32(DescOwn | cp->rx_buf_sz);
1078        }
1079
1080        return 0;
1081
1082err_out:
1083        cp_clean_rings(cp);
1084        return -ENOMEM;
1085}
1086
1087static void cp_init_rings_index (struct cp_private *cp)
1088{
1089        cp->rx_tail = 0;
1090        cp->tx_head = cp->tx_tail = 0;
1091}
1092
1093static int cp_init_rings (struct cp_private *cp)
1094{
1095        memset(cp->tx_ring, 0, sizeof(struct cp_desc) * CP_TX_RING_SIZE);
1096        cp->tx_ring[CP_TX_RING_SIZE - 1].opts1 = cpu_to_le32(RingEnd);
1097
1098        cp_init_rings_index(cp);
1099
1100        return cp_refill_rx (cp);
1101}
1102
1103static int cp_alloc_rings (struct cp_private *cp)
1104{
1105        void *mem;
1106
1107        mem = dma_alloc_coherent(&cp->pdev->dev, CP_RING_BYTES,
1108                                 &cp->ring_dma, GFP_KERNEL);
1109        if (!mem)
1110                return -ENOMEM;
1111
1112        cp->rx_ring = mem;
1113        cp->tx_ring = &cp->rx_ring[CP_RX_RING_SIZE];
1114
1115        return cp_init_rings(cp);
1116}
1117
1118static void cp_clean_rings (struct cp_private *cp)
1119{
1120        struct cp_desc *desc;
1121        unsigned i;
1122
1123        for (i = 0; i < CP_RX_RING_SIZE; i++) {
1124                if (cp->rx_skb[i]) {
1125                        desc = cp->rx_ring + i;
1126                        dma_unmap_single(&cp->pdev->dev,le64_to_cpu(desc->addr),
1127                                         cp->rx_buf_sz, PCI_DMA_FROMDEVICE);
1128                        dev_kfree_skb(cp->rx_skb[i]);
1129                }
1130        }
1131
1132        for (i = 0; i < CP_TX_RING_SIZE; i++) {
1133                if (cp->tx_skb[i]) {
1134                        struct sk_buff *skb = cp->tx_skb[i];
1135
1136                        desc = cp->tx_ring + i;
1137                        dma_unmap_single(&cp->pdev->dev,le64_to_cpu(desc->addr),
1138                                         le32_to_cpu(desc->opts1) & 0xffff,
1139                                         PCI_DMA_TODEVICE);
1140                        if (le32_to_cpu(desc->opts1) & LastFrag)
1141                                dev_kfree_skb(skb);
1142                        cp->dev->stats.tx_dropped++;
1143                }
1144        }
1145
1146        memset(cp->rx_ring, 0, sizeof(struct cp_desc) * CP_RX_RING_SIZE);
1147        memset(cp->tx_ring, 0, sizeof(struct cp_desc) * CP_TX_RING_SIZE);
1148
1149        memset(cp->rx_skb, 0, sizeof(struct sk_buff *) * CP_RX_RING_SIZE);
1150        memset(cp->tx_skb, 0, sizeof(struct sk_buff *) * CP_TX_RING_SIZE);
1151}
1152
1153static void cp_free_rings (struct cp_private *cp)
1154{
1155        cp_clean_rings(cp);
1156        dma_free_coherent(&cp->pdev->dev, CP_RING_BYTES, cp->rx_ring,
1157                          cp->ring_dma);
1158        cp->rx_ring = NULL;
1159        cp->tx_ring = NULL;
1160}
1161
1162static int cp_open (struct net_device *dev)
1163{
1164        struct cp_private *cp = netdev_priv(dev);
1165        int rc;
1166
1167        if (netif_msg_ifup(cp))
1168                pr_debug("%s: enabling interface\n", dev->name);
1169
1170        rc = cp_alloc_rings(cp);
1171        if (rc)
1172                return rc;
1173
1174        napi_enable(&cp->napi);
1175
1176        cp_init_hw(cp);
1177
1178        rc = request_irq(dev->irq, cp_interrupt, IRQF_SHARED, dev->name, dev);
1179        if (rc)
1180                goto err_out_hw;
1181
1182        netif_carrier_off(dev);
1183        mii_check_media(&cp->mii_if, netif_msg_link(cp), true);
1184        netif_start_queue(dev);
1185
1186        return 0;
1187
1188err_out_hw:
1189        napi_disable(&cp->napi);
1190        cp_stop_hw(cp);
1191        cp_free_rings(cp);
1192        return rc;
1193}
1194
1195static int cp_close (struct net_device *dev)
1196{
1197        struct cp_private *cp = netdev_priv(dev);
1198        unsigned long flags;
1199
1200        napi_disable(&cp->napi);
1201
1202        if (netif_msg_ifdown(cp))
1203                pr_debug("%s: disabling interface\n", dev->name);
1204
1205        spin_lock_irqsave(&cp->lock, flags);
1206
1207        netif_stop_queue(dev);
1208        netif_carrier_off(dev);
1209
1210        cp_stop_hw(cp);
1211
1212        spin_unlock_irqrestore(&cp->lock, flags);
1213
1214        free_irq(dev->irq, dev);
1215
1216        cp_free_rings(cp);
1217        return 0;
1218}
1219
1220static void cp_tx_timeout(struct net_device *dev)
1221{
1222        struct cp_private *cp = netdev_priv(dev);
1223        unsigned long flags;
1224        int rc;
1225
1226        pr_warning("%s: Transmit timeout, status %2x %4x %4x %4x\n",
1227               dev->name, cpr8(Cmd), cpr16(CpCmd),
1228               cpr16(IntrStatus), cpr16(IntrMask));
1229
1230        spin_lock_irqsave(&cp->lock, flags);
1231
1232        cp_stop_hw(cp);
1233        cp_clean_rings(cp);
1234        rc = cp_init_rings(cp);
1235        cp_start_hw(cp);
1236
1237        netif_wake_queue(dev);
1238
1239        spin_unlock_irqrestore(&cp->lock, flags);
1240
1241        return;
1242}
1243
1244#ifdef BROKEN
1245static int cp_change_mtu(struct net_device *dev, int new_mtu)
1246{
1247        struct cp_private *cp = netdev_priv(dev);
1248        int rc;
1249        unsigned long flags;
1250
1251        /* check for invalid MTU, according to hardware limits */
1252        if (new_mtu < CP_MIN_MTU || new_mtu > CP_MAX_MTU)
1253                return -EINVAL;
1254
1255        /* if network interface not up, no need for complexity */
1256        if (!netif_running(dev)) {
1257                dev->mtu = new_mtu;
1258                cp_set_rxbufsize(cp);   /* set new rx buf size */
1259                return 0;
1260        }
1261
1262        spin_lock_irqsave(&cp->lock, flags);
1263
1264        cp_stop_hw(cp);                 /* stop h/w and free rings */
1265        cp_clean_rings(cp);
1266
1267        dev->mtu = new_mtu;
1268        cp_set_rxbufsize(cp);           /* set new rx buf size */
1269
1270        rc = cp_init_rings(cp);         /* realloc and restart h/w */
1271        cp_start_hw(cp);
1272
1273        spin_unlock_irqrestore(&cp->lock, flags);
1274
1275        return rc;
1276}
1277#endif /* BROKEN */
1278
1279static const char mii_2_8139_map[8] = {
1280        BasicModeCtrl,
1281        BasicModeStatus,
1282        0,
1283        0,
1284        NWayAdvert,
1285        NWayLPAR,
1286        NWayExpansion,
1287        0
1288};
1289
1290static int mdio_read(struct net_device *dev, int phy_id, int location)
1291{
1292        struct cp_private *cp = netdev_priv(dev);
1293
1294        return location < 8 && mii_2_8139_map[location] ?
1295               readw(cp->regs + mii_2_8139_map[location]) : 0;
1296}
1297
1298
1299static void mdio_write(struct net_device *dev, int phy_id, int location,
1300                       int value)
1301{
1302        struct cp_private *cp = netdev_priv(dev);
1303
1304        if (location == 0) {
1305                cpw8(Cfg9346, Cfg9346_Unlock);
1306                cpw16(BasicModeCtrl, value);
1307                cpw8(Cfg9346, Cfg9346_Lock);
1308        } else if (location < 8 && mii_2_8139_map[location])
1309                cpw16(mii_2_8139_map[location], value);
1310}
1311
1312/* Set the ethtool Wake-on-LAN settings */
1313static int netdev_set_wol (struct cp_private *cp,
1314                           const struct ethtool_wolinfo *wol)
1315{
1316        u8 options;
1317
1318        options = cpr8 (Config3) & ~(LinkUp | MagicPacket);
1319        /* If WOL is being disabled, no need for complexity */
1320        if (wol->wolopts) {
1321                if (wol->wolopts & WAKE_PHY)    options |= LinkUp;
1322                if (wol->wolopts & WAKE_MAGIC)  options |= MagicPacket;
1323        }
1324
1325        cpw8 (Cfg9346, Cfg9346_Unlock);
1326        cpw8 (Config3, options);
1327        cpw8 (Cfg9346, Cfg9346_Lock);
1328
1329        options = 0; /* Paranoia setting */
1330        options = cpr8 (Config5) & ~(UWF | MWF | BWF);
1331        /* If WOL is being disabled, no need for complexity */
1332        if (wol->wolopts) {
1333                if (wol->wolopts & WAKE_UCAST)  options |= UWF;
1334                if (wol->wolopts & WAKE_BCAST)  options |= BWF;
1335                if (wol->wolopts & WAKE_MCAST)  options |= MWF;
1336        }
1337
1338        cpw8 (Config5, options);
1339
1340        cp->wol_enabled = (wol->wolopts) ? 1 : 0;
1341
1342        return 0;
1343}
1344
1345/* Get the ethtool Wake-on-LAN settings */
1346static void netdev_get_wol (struct cp_private *cp,
1347                     struct ethtool_wolinfo *wol)
1348{
1349        u8 options;
1350
1351        wol->wolopts   = 0; /* Start from scratch */
1352        wol->supported = WAKE_PHY   | WAKE_BCAST | WAKE_MAGIC |
1353                         WAKE_MCAST | WAKE_UCAST;
1354        /* We don't need to go on if WOL is disabled */
1355        if (!cp->wol_enabled) return;
1356
1357        options        = cpr8 (Config3);
1358        if (options & LinkUp)        wol->wolopts |= WAKE_PHY;
1359        if (options & MagicPacket)   wol->wolopts |= WAKE_MAGIC;
1360
1361        options        = 0; /* Paranoia setting */
1362        options        = cpr8 (Config5);
1363        if (options & UWF)           wol->wolopts |= WAKE_UCAST;
1364        if (options & BWF)           wol->wolopts |= WAKE_BCAST;
1365        if (options & MWF)           wol->wolopts |= WAKE_MCAST;
1366}
1367
1368static void cp_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *info)
1369{
1370        struct cp_private *cp = netdev_priv(dev);
1371
1372        strcpy (info->driver, DRV_NAME);
1373        strcpy (info->version, DRV_VERSION);
1374        strcpy (info->bus_info, pci_name(cp->pdev));
1375}
1376
1377static int cp_get_regs_len(struct net_device *dev)
1378{
1379        return CP_REGS_SIZE;
1380}
1381
1382static int cp_get_sset_count (struct net_device *dev, int sset)
1383{
1384        switch (sset) {
1385        case ETH_SS_STATS:
1386                return CP_NUM_STATS;
1387        default:
1388                return -EOPNOTSUPP;
1389        }
1390}
1391
1392static int cp_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1393{
1394        struct cp_private *cp = netdev_priv(dev);
1395        int rc;
1396        unsigned long flags;
1397
1398        spin_lock_irqsave(&cp->lock, flags);
1399        rc = mii_ethtool_gset(&cp->mii_if, cmd);
1400        spin_unlock_irqrestore(&cp->lock, flags);
1401
1402        return rc;
1403}
1404
1405static int cp_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1406{
1407        struct cp_private *cp = netdev_priv(dev);
1408        int rc;
1409        unsigned long flags;
1410
1411        spin_lock_irqsave(&cp->lock, flags);
1412        rc = mii_ethtool_sset(&cp->mii_if, cmd);
1413        spin_unlock_irqrestore(&cp->lock, flags);
1414
1415        return rc;
1416}
1417
1418static int cp_nway_reset(struct net_device *dev)
1419{
1420        struct cp_private *cp = netdev_priv(dev);
1421        return mii_nway_restart(&cp->mii_if);
1422}
1423
1424static u32 cp_get_msglevel(struct net_device *dev)
1425{
1426        struct cp_private *cp = netdev_priv(dev);
1427        return cp->msg_enable;
1428}
1429
1430static void cp_set_msglevel(struct net_device *dev, u32 value)
1431{
1432        struct cp_private *cp = netdev_priv(dev);
1433        cp->msg_enable = value;
1434}
1435
1436static u32 cp_get_rx_csum(struct net_device *dev)
1437{
1438        struct cp_private *cp = netdev_priv(dev);
1439        return (cpr16(CpCmd) & RxChkSum) ? 1 : 0;
1440}
1441
1442static int cp_set_rx_csum(struct net_device *dev, u32 data)
1443{
1444        struct cp_private *cp = netdev_priv(dev);
1445        u16 cmd = cp->cpcmd, newcmd;
1446
1447        newcmd = cmd;
1448
1449        if (data)
1450                newcmd |= RxChkSum;
1451        else
1452                newcmd &= ~RxChkSum;
1453
1454        if (newcmd != cmd) {
1455                unsigned long flags;
1456
1457                spin_lock_irqsave(&cp->lock, flags);
1458                cp->cpcmd = newcmd;
1459                cpw16_f(CpCmd, newcmd);
1460                spin_unlock_irqrestore(&cp->lock, flags);
1461        }
1462
1463        return 0;
1464}
1465
1466static void cp_get_regs(struct net_device *dev, struct ethtool_regs *regs,
1467                        void *p)
1468{
1469        struct cp_private *cp = netdev_priv(dev);
1470        unsigned long flags;
1471
1472        if (regs->len < CP_REGS_SIZE)
1473                return /* -EINVAL */;
1474
1475        regs->version = CP_REGS_VER;
1476
1477        spin_lock_irqsave(&cp->lock, flags);
1478        memcpy_fromio(p, cp->regs, CP_REGS_SIZE);
1479        spin_unlock_irqrestore(&cp->lock, flags);
1480}
1481
1482static void cp_get_wol (struct net_device *dev, struct ethtool_wolinfo *wol)
1483{
1484        struct cp_private *cp = netdev_priv(dev);
1485        unsigned long flags;
1486
1487        spin_lock_irqsave (&cp->lock, flags);
1488        netdev_get_wol (cp, wol);
1489        spin_unlock_irqrestore (&cp->lock, flags);
1490}
1491
1492static int cp_set_wol (struct net_device *dev, struct ethtool_wolinfo *wol)
1493{
1494        struct cp_private *cp = netdev_priv(dev);
1495        unsigned long flags;
1496        int rc;
1497
1498        spin_lock_irqsave (&cp->lock, flags);
1499        rc = netdev_set_wol (cp, wol);
1500        spin_unlock_irqrestore (&cp->lock, flags);
1501
1502        return rc;
1503}
1504
1505static void cp_get_strings (struct net_device *dev, u32 stringset, u8 *buf)
1506{
1507        switch (stringset) {
1508        case ETH_SS_STATS:
1509                memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
1510                break;
1511        default:
1512                BUG();
1513                break;
1514        }
1515}
1516
1517static void cp_get_ethtool_stats (struct net_device *dev,
1518                                  struct ethtool_stats *estats, u64 *tmp_stats)
1519{
1520        struct cp_private *cp = netdev_priv(dev);
1521        struct cp_dma_stats *nic_stats;
1522        dma_addr_t dma;
1523        int i;
1524
1525        nic_stats = dma_alloc_coherent(&cp->pdev->dev, sizeof(*nic_stats),
1526                                       &dma, GFP_KERNEL);
1527        if (!nic_stats)
1528                return;
1529
1530        /* begin NIC statistics dump */
1531        cpw32(StatsAddr + 4, (u64)dma >> 32);
1532        cpw32(StatsAddr, ((u64)dma & DMA_BIT_MASK(32)) | DumpStats);
1533        cpr32(StatsAddr);
1534
1535        for (i = 0; i < 1000; i++) {
1536                if ((cpr32(StatsAddr) & DumpStats) == 0)
1537                        break;
1538                udelay(10);
1539        }
1540        cpw32(StatsAddr, 0);
1541        cpw32(StatsAddr + 4, 0);
1542        cpr32(StatsAddr);
1543
1544        i = 0;
1545        tmp_stats[i++] = le64_to_cpu(nic_stats->tx_ok);
1546        tmp_stats[i++] = le64_to_cpu(nic_stats->rx_ok);
1547        tmp_stats[i++] = le64_to_cpu(nic_stats->tx_err);
1548        tmp_stats[i++] = le32_to_cpu(nic_stats->rx_err);
1549        tmp_stats[i++] = le16_to_cpu(nic_stats->rx_fifo);
1550        tmp_stats[i++] = le16_to_cpu(nic_stats->frame_align);
1551        tmp_stats[i++] = le32_to_cpu(nic_stats->tx_ok_1col);
1552        tmp_stats[i++] = le32_to_cpu(nic_stats->tx_ok_mcol);
1553        tmp_stats[i++] = le64_to_cpu(nic_stats->rx_ok_phys);
1554        tmp_stats[i++] = le64_to_cpu(nic_stats->rx_ok_bcast);
1555        tmp_stats[i++] = le32_to_cpu(nic_stats->rx_ok_mcast);
1556        tmp_stats[i++] = le16_to_cpu(nic_stats->tx_abort);
1557        tmp_stats[i++] = le16_to_cpu(nic_stats->tx_underrun);
1558        tmp_stats[i++] = cp->cp_stats.rx_frags;
1559        BUG_ON(i != CP_NUM_STATS);
1560
1561        dma_free_coherent(&cp->pdev->dev, sizeof(*nic_stats), nic_stats, dma);
1562}
1563
1564static const struct ethtool_ops cp_ethtool_ops = {
1565        .get_drvinfo            = cp_get_drvinfo,
1566        .get_regs_len           = cp_get_regs_len,
1567        .get_sset_count         = cp_get_sset_count,
1568        .get_settings           = cp_get_settings,
1569        .set_settings           = cp_set_settings,
1570        .nway_reset             = cp_nway_reset,
1571        .get_link               = ethtool_op_get_link,
1572        .get_msglevel           = cp_get_msglevel,
1573        .set_msglevel           = cp_set_msglevel,
1574        .get_rx_csum            = cp_get_rx_csum,
1575        .set_rx_csum            = cp_set_rx_csum,
1576        .set_tx_csum            = ethtool_op_set_tx_csum, /* local! */
1577        .set_sg                 = ethtool_op_set_sg,
1578        .set_tso                = ethtool_op_set_tso,
1579        .get_regs               = cp_get_regs,
1580        .get_wol                = cp_get_wol,
1581        .set_wol                = cp_set_wol,
1582        .get_strings            = cp_get_strings,
1583        .get_ethtool_stats      = cp_get_ethtool_stats,
1584        .get_eeprom_len         = cp_get_eeprom_len,
1585        .get_eeprom             = cp_get_eeprom,
1586        .set_eeprom             = cp_set_eeprom,
1587};
1588
1589static int cp_ioctl (struct net_device *dev, struct ifreq *rq, int cmd)
1590{
1591        struct cp_private *cp = netdev_priv(dev);
1592        int rc;
1593        unsigned long flags;
1594
1595        if (!netif_running(dev))
1596                return -EINVAL;
1597
1598        spin_lock_irqsave(&cp->lock, flags);
1599        rc = generic_mii_ioctl(&cp->mii_if, if_mii(rq), cmd, NULL);
1600        spin_unlock_irqrestore(&cp->lock, flags);
1601        return rc;
1602}
1603
1604static int cp_set_mac_address(struct net_device *dev, void *p)
1605{
1606        struct cp_private *cp = netdev_priv(dev);
1607        struct sockaddr *addr = p;
1608
1609        if (!is_valid_ether_addr(addr->sa_data))
1610                return -EADDRNOTAVAIL;
1611
1612        memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
1613
1614        spin_lock_irq(&cp->lock);
1615
1616        cpw8_f(Cfg9346, Cfg9346_Unlock);
1617        cpw32_f(MAC0 + 0, le32_to_cpu (*(__le32 *) (dev->dev_addr + 0)));
1618        cpw32_f(MAC0 + 4, le32_to_cpu (*(__le32 *) (dev->dev_addr + 4)));
1619        cpw8_f(Cfg9346, Cfg9346_Lock);
1620
1621        spin_unlock_irq(&cp->lock);
1622
1623        return 0;
1624}
1625
1626/* Serial EEPROM section. */
1627
1628/*  EEPROM_Ctrl bits. */
1629#define EE_SHIFT_CLK    0x04    /* EEPROM shift clock. */
1630#define EE_CS                   0x08    /* EEPROM chip select. */
1631#define EE_DATA_WRITE   0x02    /* EEPROM chip data in. */
1632#define EE_WRITE_0              0x00
1633#define EE_WRITE_1              0x02
1634#define EE_DATA_READ    0x01    /* EEPROM chip data out. */
1635#define EE_ENB                  (0x80 | EE_CS)
1636
1637/* Delay between EEPROM clock transitions.
1638   No extra delay is needed with 33Mhz PCI, but 66Mhz may change this.
1639 */
1640
1641#define eeprom_delay()  readl(ee_addr)
1642
1643/* The EEPROM commands include the alway-set leading bit. */
1644#define EE_EXTEND_CMD   (4)
1645#define EE_WRITE_CMD    (5)
1646#define EE_READ_CMD             (6)
1647#define EE_ERASE_CMD    (7)
1648
1649#define EE_EWDS_ADDR    (0)
1650#define EE_WRAL_ADDR    (1)
1651#define EE_ERAL_ADDR    (2)
1652#define EE_EWEN_ADDR    (3)
1653
1654#define CP_EEPROM_MAGIC PCI_DEVICE_ID_REALTEK_8139
1655
1656static void eeprom_cmd_start(void __iomem *ee_addr)
1657{
1658        writeb (EE_ENB & ~EE_CS, ee_addr);
1659        writeb (EE_ENB, ee_addr);
1660        eeprom_delay ();
1661}
1662
1663static void eeprom_cmd(void __iomem *ee_addr, int cmd, int cmd_len)
1664{
1665        int i;
1666
1667        /* Shift the command bits out. */
1668        for (i = cmd_len - 1; i >= 0; i--) {
1669                int dataval = (cmd & (1 << i)) ? EE_DATA_WRITE : 0;
1670                writeb (EE_ENB | dataval, ee_addr);
1671                eeprom_delay ();
1672                writeb (EE_ENB | dataval | EE_SHIFT_CLK, ee_addr);
1673                eeprom_delay ();
1674        }
1675        writeb (EE_ENB, ee_addr);
1676        eeprom_delay ();
1677}
1678
1679static void eeprom_cmd_end(void __iomem *ee_addr)
1680{
1681        writeb (~EE_CS, ee_addr);
1682        eeprom_delay ();
1683}
1684
1685static void eeprom_extend_cmd(void __iomem *ee_addr, int extend_cmd,
1686                              int addr_len)
1687{
1688        int cmd = (EE_EXTEND_CMD << addr_len) | (extend_cmd << (addr_len - 2));
1689
1690        eeprom_cmd_start(ee_addr);
1691        eeprom_cmd(ee_addr, cmd, 3 + addr_len);
1692        eeprom_cmd_end(ee_addr);
1693}
1694
1695static u16 read_eeprom (void __iomem *ioaddr, int location, int addr_len)
1696{
1697        int i;
1698        u16 retval = 0;
1699        void __iomem *ee_addr = ioaddr + Cfg9346;
1700        int read_cmd = location | (EE_READ_CMD << addr_len);
1701
1702        eeprom_cmd_start(ee_addr);
1703        eeprom_cmd(ee_addr, read_cmd, 3 + addr_len);
1704
1705        for (i = 16; i > 0; i--) {
1706                writeb (EE_ENB | EE_SHIFT_CLK, ee_addr);
1707                eeprom_delay ();
1708                retval =
1709                    (retval << 1) | ((readb (ee_addr) & EE_DATA_READ) ? 1 :
1710                                     0);
1711                writeb (EE_ENB, ee_addr);
1712                eeprom_delay ();
1713        }
1714
1715        eeprom_cmd_end(ee_addr);
1716
1717        return retval;
1718}
1719
1720static void write_eeprom(void __iomem *ioaddr, int location, u16 val,
1721                         int addr_len)
1722{
1723        int i;
1724        void __iomem *ee_addr = ioaddr + Cfg9346;
1725        int write_cmd = location | (EE_WRITE_CMD << addr_len);
1726
1727        eeprom_extend_cmd(ee_addr, EE_EWEN_ADDR, addr_len);
1728
1729        eeprom_cmd_start(ee_addr);
1730        eeprom_cmd(ee_addr, write_cmd, 3 + addr_len);
1731        eeprom_cmd(ee_addr, val, 16);
1732        eeprom_cmd_end(ee_addr);
1733
1734        eeprom_cmd_start(ee_addr);
1735        for (i = 0; i < 20000; i++)
1736                if (readb(ee_addr) & EE_DATA_READ)
1737                        break;
1738        eeprom_cmd_end(ee_addr);
1739
1740        eeprom_extend_cmd(ee_addr, EE_EWDS_ADDR, addr_len);
1741}
1742
1743static int cp_get_eeprom_len(struct net_device *dev)
1744{
1745        struct cp_private *cp = netdev_priv(dev);
1746        int size;
1747
1748        spin_lock_irq(&cp->lock);
1749        size = read_eeprom(cp->regs, 0, 8) == 0x8129 ? 256 : 128;
1750        spin_unlock_irq(&cp->lock);
1751
1752        return size;
1753}
1754
1755static int cp_get_eeprom(struct net_device *dev,
1756                         struct ethtool_eeprom *eeprom, u8 *data)
1757{
1758        struct cp_private *cp = netdev_priv(dev);
1759        unsigned int addr_len;
1760        u16 val;
1761        u32 offset = eeprom->offset >> 1;
1762        u32 len = eeprom->len;
1763        u32 i = 0;
1764
1765        eeprom->magic = CP_EEPROM_MAGIC;
1766
1767        spin_lock_irq(&cp->lock);
1768
1769        addr_len = read_eeprom(cp->regs, 0, 8) == 0x8129 ? 8 : 6;
1770
1771        if (eeprom->offset & 1) {
1772                val = read_eeprom(cp->regs, offset, addr_len);
1773                data[i++] = (u8)(val >> 8);
1774                offset++;
1775        }
1776
1777        while (i < len - 1) {
1778                val = read_eeprom(cp->regs, offset, addr_len);
1779                data[i++] = (u8)val;
1780                data[i++] = (u8)(val >> 8);
1781                offset++;
1782        }
1783
1784        if (i < len) {
1785                val = read_eeprom(cp->regs, offset, addr_len);
1786                data[i] = (u8)val;
1787        }
1788
1789        spin_unlock_irq(&cp->lock);
1790        return 0;
1791}
1792
1793static int cp_set_eeprom(struct net_device *dev,
1794                         struct ethtool_eeprom *eeprom, u8 *data)
1795{
1796        struct cp_private *cp = netdev_priv(dev);
1797        unsigned int addr_len;
1798        u16 val;
1799        u32 offset = eeprom->offset >> 1;
1800        u32 len = eeprom->len;
1801        u32 i = 0;
1802
1803        if (eeprom->magic != CP_EEPROM_MAGIC)
1804                return -EINVAL;
1805
1806        spin_lock_irq(&cp->lock);
1807
1808        addr_len = read_eeprom(cp->regs, 0, 8) == 0x8129 ? 8 : 6;
1809
1810        if (eeprom->offset & 1) {
1811                val = read_eeprom(cp->regs, offset, addr_len) & 0xff;
1812                val |= (u16)data[i++] << 8;
1813                write_eeprom(cp->regs, offset, val, addr_len);
1814                offset++;
1815        }
1816
1817        while (i < len - 1) {
1818                val = (u16)data[i++];
1819                val |= (u16)data[i++] << 8;
1820                write_eeprom(cp->regs, offset, val, addr_len);
1821                offset++;
1822        }
1823
1824        if (i < len) {
1825                val = read_eeprom(cp->regs, offset, addr_len) & 0xff00;
1826                val |= (u16)data[i];
1827                write_eeprom(cp->regs, offset, val, addr_len);
1828        }
1829
1830        spin_unlock_irq(&cp->lock);
1831        return 0;
1832}
1833
1834/* Put the board into D3cold state and wait for WakeUp signal */
1835static void cp_set_d3_state (struct cp_private *cp)
1836{
1837        pci_enable_wake (cp->pdev, 0, 1); /* Enable PME# generation */
1838        pci_set_power_state (cp->pdev, PCI_D3hot);
1839}
1840
1841static const struct net_device_ops cp_netdev_ops = {
1842        .ndo_open               = cp_open,
1843        .ndo_stop               = cp_close,
1844        .ndo_validate_addr      = eth_validate_addr,
1845        .ndo_set_mac_address    = cp_set_mac_address,
1846        .ndo_set_multicast_list = cp_set_rx_mode,
1847        .ndo_get_stats          = cp_get_stats,
1848        .ndo_do_ioctl           = cp_ioctl,
1849        .ndo_start_xmit         = cp_start_xmit,
1850        .ndo_tx_timeout         = cp_tx_timeout,
1851#if CP_VLAN_TAG_USED
1852        .ndo_vlan_rx_register   = cp_vlan_rx_register,
1853#endif
1854#ifdef BROKEN
1855        .ndo_change_mtu         = cp_change_mtu,
1856#endif
1857
1858#ifdef CONFIG_NET_POLL_CONTROLLER
1859        .ndo_poll_controller    = cp_poll_controller,
1860#endif
1861};
1862
1863static int cp_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
1864{
1865        struct net_device *dev;
1866        struct cp_private *cp;
1867        int rc;
1868        void __iomem *regs;
1869        resource_size_t pciaddr;
1870        unsigned int addr_len, i, pci_using_dac;
1871
1872#ifndef MODULE
1873        static int version_printed;
1874        if (version_printed++ == 0)
1875                pr_info("%s", version);
1876#endif
1877
1878        if (pdev->vendor == PCI_VENDOR_ID_REALTEK &&
1879            pdev->device == PCI_DEVICE_ID_REALTEK_8139 && pdev->revision < 0x20) {
1880                dev_info(&pdev->dev,
1881                           "This (id %04x:%04x rev %02x) is not an 8139C+ compatible chip, use 8139too\n",
1882                           pdev->vendor, pdev->device, pdev->revision);
1883                return -ENODEV;
1884        }
1885
1886        dev = alloc_etherdev(sizeof(struct cp_private));
1887        if (!dev)
1888                return -ENOMEM;
1889        SET_NETDEV_DEV(dev, &pdev->dev);
1890
1891        cp = netdev_priv(dev);
1892        cp->pdev = pdev;
1893        cp->dev = dev;
1894        cp->msg_enable = (debug < 0 ? CP_DEF_MSG_ENABLE : debug);
1895        spin_lock_init (&cp->lock);
1896        cp->mii_if.dev = dev;
1897        cp->mii_if.mdio_read = mdio_read;
1898        cp->mii_if.mdio_write = mdio_write;
1899        cp->mii_if.phy_id = CP_INTERNAL_PHY;
1900        cp->mii_if.phy_id_mask = 0x1f;
1901        cp->mii_if.reg_num_mask = 0x1f;
1902        cp_set_rxbufsize(cp);
1903
1904        rc = pci_enable_device(pdev);
1905        if (rc)
1906                goto err_out_free;
1907
1908        rc = pci_set_mwi(pdev);
1909        if (rc)
1910                goto err_out_disable;
1911
1912        rc = pci_request_regions(pdev, DRV_NAME);
1913        if (rc)
1914                goto err_out_mwi;
1915
1916        pciaddr = pci_resource_start(pdev, 1);
1917        if (!pciaddr) {
1918                rc = -EIO;
1919                dev_err(&pdev->dev, "no MMIO resource\n");
1920                goto err_out_res;
1921        }
1922        if (pci_resource_len(pdev, 1) < CP_REGS_SIZE) {
1923                rc = -EIO;
1924                dev_err(&pdev->dev, "MMIO resource (%llx) too small\n",
1925                       (unsigned long long)pci_resource_len(pdev, 1));
1926                goto err_out_res;
1927        }
1928
1929        /* Configure DMA attributes. */
1930        if ((sizeof(dma_addr_t) > 4) &&
1931            !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) &&
1932            !pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
1933                pci_using_dac = 1;
1934        } else {
1935                pci_using_dac = 0;
1936
1937                rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
1938                if (rc) {
1939                        dev_err(&pdev->dev,
1940                                   "No usable DMA configuration, aborting.\n");
1941                        goto err_out_res;
1942                }
1943                rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
1944                if (rc) {
1945                        dev_err(&pdev->dev,
1946                                   "No usable consistent DMA configuration, "
1947                                   "aborting.\n");
1948                        goto err_out_res;
1949                }
1950        }
1951
1952        cp->cpcmd = (pci_using_dac ? PCIDAC : 0) |
1953                    PCIMulRW | RxChkSum | CpRxOn | CpTxOn;
1954
1955        regs = ioremap(pciaddr, CP_REGS_SIZE);
1956        if (!regs) {
1957                rc = -EIO;
1958                dev_err(&pdev->dev, "Cannot map PCI MMIO (%Lx@%Lx)\n",
1959                       (unsigned long long)pci_resource_len(pdev, 1),
1960                       (unsigned long long)pciaddr);
1961                goto err_out_res;
1962        }
1963        dev->base_addr = (unsigned long) regs;
1964        cp->regs = regs;
1965
1966        cp_stop_hw(cp);
1967
1968        /* read MAC address from EEPROM */
1969        addr_len = read_eeprom (regs, 0, 8) == 0x8129 ? 8 : 6;
1970        for (i = 0; i < 3; i++)
1971                ((__le16 *) (dev->dev_addr))[i] =
1972                    cpu_to_le16(read_eeprom (regs, i + 7, addr_len));
1973        memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
1974
1975        dev->netdev_ops = &cp_netdev_ops;
1976        netif_napi_add(dev, &cp->napi, cp_rx_poll, 16);
1977        dev->ethtool_ops = &cp_ethtool_ops;
1978        dev->watchdog_timeo = TX_TIMEOUT;
1979
1980#if CP_VLAN_TAG_USED
1981        dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
1982#endif
1983
1984        if (pci_using_dac)
1985                dev->features |= NETIF_F_HIGHDMA;
1986
1987#if 0 /* disabled by default until verified */
1988        dev->features |= NETIF_F_TSO;
1989#endif
1990
1991        dev->irq = pdev->irq;
1992
1993        rc = register_netdev(dev);
1994        if (rc)
1995                goto err_out_iomap;
1996
1997        pr_info("%s: RTL-8139C+ at 0x%lx, %pM, IRQ %d\n",
1998                dev->name,
1999                dev->base_addr,
2000                dev->dev_addr,
2001                dev->irq);
2002
2003        pci_set_drvdata(pdev, dev);
2004
2005        /* enable busmastering and memory-write-invalidate */
2006        pci_set_master(pdev);
2007
2008        if (cp->wol_enabled)
2009                cp_set_d3_state (cp);
2010
2011        return 0;
2012
2013err_out_iomap:
2014        iounmap(regs);
2015err_out_res:
2016        pci_release_regions(pdev);
2017err_out_mwi:
2018        pci_clear_mwi(pdev);
2019err_out_disable:
2020        pci_disable_device(pdev);
2021err_out_free:
2022        free_netdev(dev);
2023        return rc;
2024}
2025
2026static void cp_remove_one (struct pci_dev *pdev)
2027{
2028        struct net_device *dev = pci_get_drvdata(pdev);
2029        struct cp_private *cp = netdev_priv(dev);
2030
2031        unregister_netdev(dev);
2032        iounmap(cp->regs);
2033        if (cp->wol_enabled)
2034                pci_set_power_state (pdev, PCI_D0);
2035        pci_release_regions(pdev);
2036        pci_clear_mwi(pdev);
2037        pci_disable_device(pdev);
2038        pci_set_drvdata(pdev, NULL);
2039        free_netdev(dev);
2040}
2041
2042#ifdef CONFIG_PM
2043static int cp_suspend (struct pci_dev *pdev, pm_message_t state)
2044{
2045        struct net_device *dev = pci_get_drvdata(pdev);
2046        struct cp_private *cp = netdev_priv(dev);
2047        unsigned long flags;
2048
2049        if (!netif_running(dev))
2050                return 0;
2051
2052        netif_device_detach (dev);
2053        netif_stop_queue (dev);
2054
2055        spin_lock_irqsave (&cp->lock, flags);
2056
2057        /* Disable Rx and Tx */
2058        cpw16 (IntrMask, 0);
2059        cpw8  (Cmd, cpr8 (Cmd) & (~RxOn | ~TxOn));
2060
2061        spin_unlock_irqrestore (&cp->lock, flags);
2062
2063        pci_save_state(pdev);
2064        pci_enable_wake(pdev, pci_choose_state(pdev, state), cp->wol_enabled);
2065        pci_set_power_state(pdev, pci_choose_state(pdev, state));
2066
2067        return 0;
2068}
2069
2070static int cp_resume (struct pci_dev *pdev)
2071{
2072        struct net_device *dev = pci_get_drvdata (pdev);
2073        struct cp_private *cp = netdev_priv(dev);
2074        unsigned long flags;
2075
2076        if (!netif_running(dev))
2077                return 0;
2078
2079        netif_device_attach (dev);
2080
2081        pci_set_power_state(pdev, PCI_D0);
2082        pci_restore_state(pdev);
2083        pci_enable_wake(pdev, PCI_D0, 0);
2084
2085        /* FIXME: sh*t may happen if the Rx ring buffer is depleted */
2086        cp_init_rings_index (cp);
2087        cp_init_hw (cp);
2088        netif_start_queue (dev);
2089
2090        spin_lock_irqsave (&cp->lock, flags);
2091
2092        mii_check_media(&cp->mii_if, netif_msg_link(cp), false);
2093
2094        spin_unlock_irqrestore (&cp->lock, flags);
2095
2096        return 0;
2097}
2098#endif /* CONFIG_PM */
2099
2100static struct pci_driver cp_driver = {
2101        .name         = DRV_NAME,
2102        .id_table     = cp_pci_tbl,
2103        .probe        = cp_init_one,
2104        .remove       = cp_remove_one,
2105#ifdef CONFIG_PM
2106        .resume       = cp_resume,
2107        .suspend      = cp_suspend,
2108#endif
2109};
2110
2111static int __init cp_init (void)
2112{
2113#ifdef MODULE
2114        pr_info("%s", version);
2115#endif
2116        return pci_register_driver(&cp_driver);
2117}
2118
2119static void __exit cp_exit (void)
2120{
2121        pci_unregister_driver (&cp_driver);
2122}
2123
2124module_init(cp_init);
2125module_exit(cp_exit);
2126