linux/drivers/net/ethernet/realtek/8139cp.c
<<
>>
Prefs
   1/* 8139cp.c: A Linux PCI Ethernet driver for the RealTek 8139C+ chips. */
   2/*
   3        Copyright 2001-2004 Jeff Garzik <jgarzik@pobox.com>
   4
   5        Copyright (C) 2001, 2002 David S. Miller (davem@redhat.com) [tg3.c]
   6        Copyright (C) 2000, 2001 David S. Miller (davem@redhat.com) [sungem.c]
   7        Copyright 2001 Manfred Spraul                               [natsemi.c]
   8        Copyright 1999-2001 by Donald Becker.                       [natsemi.c]
   9        Written 1997-2001 by Donald Becker.                         [8139too.c]
  10        Copyright 1998-2001 by Jes Sorensen, <jes@trained-monkey.org>. [acenic.c]
  11
  12        This software may be used and distributed according to the terms of
  13        the GNU General Public License (GPL), incorporated herein by reference.
  14        Drivers based on or derived from this code fall under the GPL and must
  15        retain the authorship, copyright and license notice.  This file is not
  16        a complete program and may only be used when the entire operating
  17        system is licensed under the GPL.
  18
  19        See the file COPYING in this distribution for more information.
  20
  21        Contributors:
  22
  23                Wake-on-LAN support - Felipe Damasio <felipewd@terra.com.br>
  24                PCI suspend/resume  - Felipe Damasio <felipewd@terra.com.br>
  25                LinkChg interrupt   - Felipe Damasio <felipewd@terra.com.br>
  26
  27        TODO:
  28        * Test Tx checksumming thoroughly
  29
  30        Low priority TODO:
  31        * Complete reset on PciErr
  32        * Consider Rx interrupt mitigation using TimerIntr
  33        * Investigate using skb->priority with h/w VLAN priority
  34        * Investigate using High Priority Tx Queue with skb->priority
  35        * Adjust Rx FIFO threshold and Max Rx DMA burst on Rx FIFO error
  36        * Adjust Tx FIFO threshold and Max Tx DMA burst on Tx FIFO error
  37        * Implement Tx software interrupt mitigation via
  38          Tx descriptor bit
  39        * The real minimum of CP_MIN_MTU is 4 bytes.  However,
  40          for this to be supported, one must(?) turn on packet padding.
  41        * Support external MII transceivers (patch available)
  42
  43        NOTES:
  44        * TX checksumming is considered experimental.  It is off by
  45          default, use ethtool to turn it on.
  46
  47 */
  48
  49#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  50
  51#define DRV_NAME                "8139cp"
  52#define DRV_VERSION             "1.3"
  53#define DRV_RELDATE             "Mar 22, 2004"
  54
  55
  56#include <linux/module.h>
  57#include <linux/moduleparam.h>
  58#include <linux/kernel.h>
  59#include <linux/compiler.h>
  60#include <linux/netdevice.h>
  61#include <linux/etherdevice.h>
  62#include <linux/init.h>
  63#include <linux/interrupt.h>
  64#include <linux/pci.h>
  65#include <linux/dma-mapping.h>
  66#include <linux/delay.h>
  67#include <linux/ethtool.h>
  68#include <linux/gfp.h>
  69#include <linux/mii.h>
  70#include <linux/if_vlan.h>
  71#include <linux/crc32.h>
  72#include <linux/in.h>
  73#include <linux/ip.h>
  74#include <linux/tcp.h>
  75#include <linux/udp.h>
  76#include <linux/cache.h>
  77#include <asm/io.h>
  78#include <asm/irq.h>
  79#include <linux/uaccess.h>
  80
  81/* These identify the driver base version and may not be removed. */
  82static char version[] =
  83DRV_NAME ": 10/100 PCI Ethernet driver v" DRV_VERSION " (" DRV_RELDATE ")\n";
  84
  85MODULE_AUTHOR("Jeff Garzik <jgarzik@pobox.com>");
  86MODULE_DESCRIPTION("RealTek RTL-8139C+ series 10/100 PCI Ethernet driver");
  87MODULE_VERSION(DRV_VERSION);
  88MODULE_LICENSE("GPL");
  89
  90static int debug = -1;
  91module_param(debug, int, 0);
  92MODULE_PARM_DESC (debug, "8139cp: bitmapped message enable number");
  93
  94/* Maximum number of multicast addresses to filter (vs. Rx-all-multicast).
  95   The RTL chips use a 64 element hash table based on the Ethernet CRC.  */
  96static int multicast_filter_limit = 32;
  97module_param(multicast_filter_limit, int, 0);
  98MODULE_PARM_DESC (multicast_filter_limit, "8139cp: maximum number of filtered multicast addresses");
  99
 100#define CP_DEF_MSG_ENABLE       (NETIF_MSG_DRV          | \
 101                                 NETIF_MSG_PROBE        | \
 102                                 NETIF_MSG_LINK)
 103#define CP_NUM_STATS            14      /* struct cp_dma_stats, plus one */
 104#define CP_STATS_SIZE           64      /* size in bytes of DMA stats block */
 105#define CP_REGS_SIZE            (0xff + 1)
 106#define CP_REGS_VER             1               /* version 1 */
 107#define CP_RX_RING_SIZE         64
 108#define CP_TX_RING_SIZE         64
 109#define CP_RING_BYTES           \
 110                ((sizeof(struct cp_desc) * CP_RX_RING_SIZE) +   \
 111                 (sizeof(struct cp_desc) * CP_TX_RING_SIZE) +   \
 112                 CP_STATS_SIZE)
 113#define NEXT_TX(N)              (((N) + 1) & (CP_TX_RING_SIZE - 1))
 114#define NEXT_RX(N)              (((N) + 1) & (CP_RX_RING_SIZE - 1))
 115#define TX_BUFFS_AVAIL(CP)                                      \
 116        (((CP)->tx_tail <= (CP)->tx_head) ?                     \
 117          (CP)->tx_tail + (CP_TX_RING_SIZE - 1) - (CP)->tx_head :       \
 118          (CP)->tx_tail - (CP)->tx_head - 1)
 119
 120#define PKT_BUF_SZ              1536    /* Size of each temporary Rx buffer.*/
 121#define CP_INTERNAL_PHY         32
 122
 123/* The following settings are log_2(bytes)-4:  0 == 16 bytes .. 6==1024, 7==end of packet. */
 124#define RX_FIFO_THRESH          5       /* Rx buffer level before first PCI xfer.  */
 125#define RX_DMA_BURST            4       /* Maximum PCI burst, '4' is 256 */
 126#define TX_DMA_BURST            6       /* Maximum PCI burst, '6' is 1024 */
 127#define TX_EARLY_THRESH         256     /* Early Tx threshold, in bytes */
 128
 129/* Time in jiffies before concluding the transmitter is hung. */
 130#define TX_TIMEOUT              (6*HZ)
 131
 132/* hardware minimum and maximum for a single frame's data payload */
 133#define CP_MIN_MTU              60      /* TODO: allow lower, but pad */
 134#define CP_MAX_MTU              4096
 135
 136enum {
 137        /* NIC register offsets */
 138        MAC0            = 0x00, /* Ethernet hardware address. */
 139        MAR0            = 0x08, /* Multicast filter. */
 140        StatsAddr       = 0x10, /* 64-bit start addr of 64-byte DMA stats blk */
 141        TxRingAddr      = 0x20, /* 64-bit start addr of Tx ring */
 142        HiTxRingAddr    = 0x28, /* 64-bit start addr of high priority Tx ring */
 143        Cmd             = 0x37, /* Command register */
 144        IntrMask        = 0x3C, /* Interrupt mask */
 145        IntrStatus      = 0x3E, /* Interrupt status */
 146        TxConfig        = 0x40, /* Tx configuration */
 147        ChipVersion     = 0x43, /* 8-bit chip version, inside TxConfig */
 148        RxConfig        = 0x44, /* Rx configuration */
 149        RxMissed        = 0x4C, /* 24 bits valid, write clears */
 150        Cfg9346         = 0x50, /* EEPROM select/control; Cfg reg [un]lock */
 151        Config1         = 0x52, /* Config1 */
 152        Config3         = 0x59, /* Config3 */
 153        Config4         = 0x5A, /* Config4 */
 154        MultiIntr       = 0x5C, /* Multiple interrupt select */
 155        BasicModeCtrl   = 0x62, /* MII BMCR */
 156        BasicModeStatus = 0x64, /* MII BMSR */
 157        NWayAdvert      = 0x66, /* MII ADVERTISE */
 158        NWayLPAR        = 0x68, /* MII LPA */
 159        NWayExpansion   = 0x6A, /* MII Expansion */
 160        TxDmaOkLowDesc  = 0x82, /* Low 16 bit address of a Tx descriptor. */
 161        Config5         = 0xD8, /* Config5 */
 162        TxPoll          = 0xD9, /* Tell chip to check Tx descriptors for work */
 163        RxMaxSize       = 0xDA, /* Max size of an Rx packet (8169 only) */
 164        CpCmd           = 0xE0, /* C+ Command register (C+ mode only) */
 165        IntrMitigate    = 0xE2, /* rx/tx interrupt mitigation control */
 166        RxRingAddr      = 0xE4, /* 64-bit start addr of Rx ring */
 167        TxThresh        = 0xEC, /* Early Tx threshold */
 168        OldRxBufAddr    = 0x30, /* DMA address of Rx ring buffer (C mode) */
 169        OldTSD0         = 0x10, /* DMA address of first Tx desc (C mode) */
 170
 171        /* Tx and Rx status descriptors */
 172        DescOwn         = (1 << 31), /* Descriptor is owned by NIC */
 173        RingEnd         = (1 << 30), /* End of descriptor ring */
 174        FirstFrag       = (1 << 29), /* First segment of a packet */
 175        LastFrag        = (1 << 28), /* Final segment of a packet */
 176        LargeSend       = (1 << 27), /* TCP Large Send Offload (TSO) */
 177        MSSShift        = 16,        /* MSS value position */
 178        MSSMask         = 0x7ff,     /* MSS value: 11 bits */
 179        TxError         = (1 << 23), /* Tx error summary */
 180        RxError         = (1 << 20), /* Rx error summary */
 181        IPCS            = (1 << 18), /* Calculate IP checksum */
 182        UDPCS           = (1 << 17), /* Calculate UDP/IP checksum */
 183        TCPCS           = (1 << 16), /* Calculate TCP/IP checksum */
 184        TxVlanTag       = (1 << 17), /* Add VLAN tag */
 185        RxVlanTagged    = (1 << 16), /* Rx VLAN tag available */
 186        IPFail          = (1 << 15), /* IP checksum failed */
 187        UDPFail         = (1 << 14), /* UDP/IP checksum failed */
 188        TCPFail         = (1 << 13), /* TCP/IP checksum failed */
 189        NormalTxPoll    = (1 << 6),  /* One or more normal Tx packets to send */
 190        PID1            = (1 << 17), /* 2 protocol id bits:  0==non-IP, */
 191        PID0            = (1 << 16), /* 1==UDP/IP, 2==TCP/IP, 3==IP */
 192        RxProtoTCP      = 1,
 193        RxProtoUDP      = 2,
 194        RxProtoIP       = 3,
 195        TxFIFOUnder     = (1 << 25), /* Tx FIFO underrun */
 196        TxOWC           = (1 << 22), /* Tx Out-of-window collision */
 197        TxLinkFail      = (1 << 21), /* Link failed during Tx of packet */
 198        TxMaxCol        = (1 << 20), /* Tx aborted due to excessive collisions */
 199        TxColCntShift   = 16,        /* Shift, to get 4-bit Tx collision cnt */
 200        TxColCntMask    = 0x01 | 0x02 | 0x04 | 0x08, /* 4-bit collision count */
 201        RxErrFrame      = (1 << 27), /* Rx frame alignment error */
 202        RxMcast         = (1 << 26), /* Rx multicast packet rcv'd */
 203        RxErrCRC        = (1 << 18), /* Rx CRC error */
 204        RxErrRunt       = (1 << 19), /* Rx error, packet < 64 bytes */
 205        RxErrLong       = (1 << 21), /* Rx error, packet > 4096 bytes */
 206        RxErrFIFO       = (1 << 22), /* Rx error, FIFO overflowed, pkt bad */
 207
 208        /* StatsAddr register */
 209        DumpStats       = (1 << 3),  /* Begin stats dump */
 210
 211        /* RxConfig register */
 212        RxCfgFIFOShift  = 13,        /* Shift, to get Rx FIFO thresh value */
 213        RxCfgDMAShift   = 8,         /* Shift, to get Rx Max DMA value */
 214        AcceptErr       = 0x20,      /* Accept packets with CRC errors */
 215        AcceptRunt      = 0x10,      /* Accept runt (<64 bytes) packets */
 216        AcceptBroadcast = 0x08,      /* Accept broadcast packets */
 217        AcceptMulticast = 0x04,      /* Accept multicast packets */
 218        AcceptMyPhys    = 0x02,      /* Accept pkts with our MAC as dest */
 219        AcceptAllPhys   = 0x01,      /* Accept all pkts w/ physical dest */
 220
 221        /* IntrMask / IntrStatus registers */
 222        PciErr          = (1 << 15), /* System error on the PCI bus */
 223        TimerIntr       = (1 << 14), /* Asserted when TCTR reaches TimerInt value */
 224        LenChg          = (1 << 13), /* Cable length change */
 225        SWInt           = (1 << 8),  /* Software-requested interrupt */
 226        TxEmpty         = (1 << 7),  /* No Tx descriptors available */
 227        RxFIFOOvr       = (1 << 6),  /* Rx FIFO Overflow */
 228        LinkChg         = (1 << 5),  /* Packet underrun, or link change */
 229        RxEmpty         = (1 << 4),  /* No Rx descriptors available */
 230        TxErr           = (1 << 3),  /* Tx error */
 231        TxOK            = (1 << 2),  /* Tx packet sent */
 232        RxErr           = (1 << 1),  /* Rx error */
 233        RxOK            = (1 << 0),  /* Rx packet received */
 234        IntrResvd       = (1 << 10), /* reserved, according to RealTek engineers,
 235                                        but hardware likes to raise it */
 236
 237        IntrAll         = PciErr | TimerIntr | LenChg | SWInt | TxEmpty |
 238                          RxFIFOOvr | LinkChg | RxEmpty | TxErr | TxOK |
 239                          RxErr | RxOK | IntrResvd,
 240
 241        /* C mode command register */
 242        CmdReset        = (1 << 4),  /* Enable to reset; self-clearing */
 243        RxOn            = (1 << 3),  /* Rx mode enable */
 244        TxOn            = (1 << 2),  /* Tx mode enable */
 245
 246        /* C+ mode command register */
 247        RxVlanOn        = (1 << 6),  /* Rx VLAN de-tagging enable */
 248        RxChkSum        = (1 << 5),  /* Rx checksum offload enable */
 249        PCIDAC          = (1 << 4),  /* PCI Dual Address Cycle (64-bit PCI) */
 250        PCIMulRW        = (1 << 3),  /* Enable PCI read/write multiple */
 251        CpRxOn          = (1 << 1),  /* Rx mode enable */
 252        CpTxOn          = (1 << 0),  /* Tx mode enable */
 253
 254        /* Cfg9436 EEPROM control register */
 255        Cfg9346_Lock    = 0x00,      /* Lock ConfigX/MII register access */
 256        Cfg9346_Unlock  = 0xC0,      /* Unlock ConfigX/MII register access */
 257
 258        /* TxConfig register */
 259        IFG             = (1 << 25) | (1 << 24), /* standard IEEE interframe gap */
 260        TxDMAShift      = 8,         /* DMA burst value (0-7) is shift this many bits */
 261
 262        /* Early Tx Threshold register */
 263        TxThreshMask    = 0x3f,      /* Mask bits 5-0 */
 264        TxThreshMax     = 2048,      /* Max early Tx threshold */
 265
 266        /* Config1 register */
 267        DriverLoaded    = (1 << 5),  /* Software marker, driver is loaded */
 268        LWACT           = (1 << 4),  /* LWAKE active mode */
 269        PMEnable        = (1 << 0),  /* Enable various PM features of chip */
 270
 271        /* Config3 register */
 272        PARMEnable      = (1 << 6),  /* Enable auto-loading of PHY parms */
 273        MagicPacket     = (1 << 5),  /* Wake up when receives a Magic Packet */
 274        LinkUp          = (1 << 4),  /* Wake up when the cable connection is re-established */
 275
 276        /* Config4 register */
 277        LWPTN           = (1 << 1),  /* LWAKE Pattern */
 278        LWPME           = (1 << 4),  /* LANWAKE vs PMEB */
 279
 280        /* Config5 register */
 281        BWF             = (1 << 6),  /* Accept Broadcast wakeup frame */
 282        MWF             = (1 << 5),  /* Accept Multicast wakeup frame */
 283        UWF             = (1 << 4),  /* Accept Unicast wakeup frame */
 284        LANWake         = (1 << 1),  /* Enable LANWake signal */
 285        PMEStatus       = (1 << 0),  /* PME status can be reset by PCI RST# */
 286
 287        cp_norx_intr_mask = PciErr | LinkChg | TxOK | TxErr | TxEmpty,
 288        cp_rx_intr_mask = RxOK | RxErr | RxEmpty | RxFIFOOvr,
 289        cp_intr_mask = cp_rx_intr_mask | cp_norx_intr_mask,
 290};
 291
 292static const unsigned int cp_rx_config =
 293          (RX_FIFO_THRESH << RxCfgFIFOShift) |
 294          (RX_DMA_BURST << RxCfgDMAShift);
 295
 296struct cp_desc {
 297        __le32          opts1;
 298        __le32          opts2;
 299        __le64          addr;
 300};
 301
 302struct cp_dma_stats {
 303        __le64                  tx_ok;
 304        __le64                  rx_ok;
 305        __le64                  tx_err;
 306        __le32                  rx_err;
 307        __le16                  rx_fifo;
 308        __le16                  frame_align;
 309        __le32                  tx_ok_1col;
 310        __le32                  tx_ok_mcol;
 311        __le64                  rx_ok_phys;
 312        __le64                  rx_ok_bcast;
 313        __le32                  rx_ok_mcast;
 314        __le16                  tx_abort;
 315        __le16                  tx_underrun;
 316} __packed;
 317
 318struct cp_extra_stats {
 319        unsigned long           rx_frags;
 320};
 321
 322struct cp_private {
 323        void                    __iomem *regs;
 324        struct net_device       *dev;
 325        spinlock_t              lock;
 326        u32                     msg_enable;
 327
 328        struct napi_struct      napi;
 329
 330        struct pci_dev          *pdev;
 331        u32                     rx_config;
 332        u16                     cpcmd;
 333
 334        struct cp_extra_stats   cp_stats;
 335
 336        unsigned                rx_head         ____cacheline_aligned;
 337        unsigned                rx_tail;
 338        struct cp_desc          *rx_ring;
 339        struct sk_buff          *rx_skb[CP_RX_RING_SIZE];
 340
 341        unsigned                tx_head         ____cacheline_aligned;
 342        unsigned                tx_tail;
 343        struct cp_desc          *tx_ring;
 344        struct sk_buff          *tx_skb[CP_TX_RING_SIZE];
 345        u32                     tx_opts[CP_TX_RING_SIZE];
 346
 347        unsigned                rx_buf_sz;
 348        unsigned                wol_enabled : 1; /* Is Wake-on-LAN enabled? */
 349
 350        dma_addr_t              ring_dma;
 351
 352        struct mii_if_info      mii_if;
 353};
 354
 355#define cpr8(reg)       readb(cp->regs + (reg))
 356#define cpr16(reg)      readw(cp->regs + (reg))
 357#define cpr32(reg)      readl(cp->regs + (reg))
 358#define cpw8(reg,val)   writeb((val), cp->regs + (reg))
 359#define cpw16(reg,val)  writew((val), cp->regs + (reg))
 360#define cpw32(reg,val)  writel((val), cp->regs + (reg))
 361#define cpw8_f(reg,val) do {                    \
 362        writeb((val), cp->regs + (reg));        \
 363        readb(cp->regs + (reg));                \
 364        } while (0)
 365#define cpw16_f(reg,val) do {                   \
 366        writew((val), cp->regs + (reg));        \
 367        readw(cp->regs + (reg));                \
 368        } while (0)
 369#define cpw32_f(reg,val) do {                   \
 370        writel((val), cp->regs + (reg));        \
 371        readl(cp->regs + (reg));                \
 372        } while (0)
 373
 374
 375static void __cp_set_rx_mode (struct net_device *dev);
 376static void cp_tx (struct cp_private *cp);
 377static void cp_clean_rings (struct cp_private *cp);
 378#ifdef CONFIG_NET_POLL_CONTROLLER
 379static void cp_poll_controller(struct net_device *dev);
 380#endif
 381static int cp_get_eeprom_len(struct net_device *dev);
 382static int cp_get_eeprom(struct net_device *dev,
 383                         struct ethtool_eeprom *eeprom, u8 *data);
 384static int cp_set_eeprom(struct net_device *dev,
 385                         struct ethtool_eeprom *eeprom, u8 *data);
 386
 387static struct {
 388        const char str[ETH_GSTRING_LEN];
 389} ethtool_stats_keys[] = {
 390        { "tx_ok" },
 391        { "rx_ok" },
 392        { "tx_err" },
 393        { "rx_err" },
 394        { "rx_fifo" },
 395        { "frame_align" },
 396        { "tx_ok_1col" },
 397        { "tx_ok_mcol" },
 398        { "rx_ok_phys" },
 399        { "rx_ok_bcast" },
 400        { "rx_ok_mcast" },
 401        { "tx_abort" },
 402        { "tx_underrun" },
 403        { "rx_frags" },
 404};
 405
 406
 407static inline void cp_set_rxbufsize (struct cp_private *cp)
 408{
 409        unsigned int mtu = cp->dev->mtu;
 410
 411        if (mtu > ETH_DATA_LEN)
 412                /* MTU + ethernet header + FCS + optional VLAN tag */
 413                cp->rx_buf_sz = mtu + ETH_HLEN + 8;
 414        else
 415                cp->rx_buf_sz = PKT_BUF_SZ;
 416}
 417
 418static inline void cp_rx_skb (struct cp_private *cp, struct sk_buff *skb,
 419                              struct cp_desc *desc)
 420{
 421        u32 opts2 = le32_to_cpu(desc->opts2);
 422
 423        skb->protocol = eth_type_trans (skb, cp->dev);
 424
 425        cp->dev->stats.rx_packets++;
 426        cp->dev->stats.rx_bytes += skb->len;
 427
 428        if (opts2 & RxVlanTagged)
 429                __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), swab16(opts2 & 0xffff));
 430
 431        napi_gro_receive(&cp->napi, skb);
 432}
 433
 434static void cp_rx_err_acct (struct cp_private *cp, unsigned rx_tail,
 435                            u32 status, u32 len)
 436{
 437        netif_dbg(cp, rx_err, cp->dev, "rx err, slot %d status 0x%x len %d\n",
 438                  rx_tail, status, len);
 439        cp->dev->stats.rx_errors++;
 440        if (status & RxErrFrame)
 441                cp->dev->stats.rx_frame_errors++;
 442        if (status & RxErrCRC)
 443                cp->dev->stats.rx_crc_errors++;
 444        if ((status & RxErrRunt) || (status & RxErrLong))
 445                cp->dev->stats.rx_length_errors++;
 446        if ((status & (FirstFrag | LastFrag)) != (FirstFrag | LastFrag))
 447                cp->dev->stats.rx_length_errors++;
 448        if (status & RxErrFIFO)
 449                cp->dev->stats.rx_fifo_errors++;
 450}
 451
 452static inline unsigned int cp_rx_csum_ok (u32 status)
 453{
 454        unsigned int protocol = (status >> 16) & 0x3;
 455
 456        if (((protocol == RxProtoTCP) && !(status & TCPFail)) ||
 457            ((protocol == RxProtoUDP) && !(status & UDPFail)))
 458                return 1;
 459        else
 460                return 0;
 461}
 462
 463static int cp_rx_poll(struct napi_struct *napi, int budget)
 464{
 465        struct cp_private *cp = container_of(napi, struct cp_private, napi);
 466        struct net_device *dev = cp->dev;
 467        unsigned int rx_tail = cp->rx_tail;
 468        int rx = 0;
 469
 470        cpw16(IntrStatus, cp_rx_intr_mask);
 471
 472        while (rx < budget) {
 473                u32 status, len;
 474                dma_addr_t mapping, new_mapping;
 475                struct sk_buff *skb, *new_skb;
 476                struct cp_desc *desc;
 477                const unsigned buflen = cp->rx_buf_sz;
 478
 479                skb = cp->rx_skb[rx_tail];
 480                BUG_ON(!skb);
 481
 482                desc = &cp->rx_ring[rx_tail];
 483                status = le32_to_cpu(desc->opts1);
 484                if (status & DescOwn)
 485                        break;
 486
 487                len = (status & 0x1fff) - 4;
 488                mapping = le64_to_cpu(desc->addr);
 489
 490                if ((status & (FirstFrag | LastFrag)) != (FirstFrag | LastFrag)) {
 491                        /* we don't support incoming fragmented frames.
 492                         * instead, we attempt to ensure that the
 493                         * pre-allocated RX skbs are properly sized such
 494                         * that RX fragments are never encountered
 495                         */
 496                        cp_rx_err_acct(cp, rx_tail, status, len);
 497                        dev->stats.rx_dropped++;
 498                        cp->cp_stats.rx_frags++;
 499                        goto rx_next;
 500                }
 501
 502                if (status & (RxError | RxErrFIFO)) {
 503                        cp_rx_err_acct(cp, rx_tail, status, len);
 504                        goto rx_next;
 505                }
 506
 507                netif_dbg(cp, rx_status, dev, "rx slot %d status 0x%x len %d\n",
 508                          rx_tail, status, len);
 509
 510                new_skb = napi_alloc_skb(napi, buflen);
 511                if (!new_skb) {
 512                        dev->stats.rx_dropped++;
 513                        goto rx_next;
 514                }
 515
 516                new_mapping = dma_map_single(&cp->pdev->dev, new_skb->data, buflen,
 517                                         DMA_FROM_DEVICE);
 518                if (dma_mapping_error(&cp->pdev->dev, new_mapping)) {
 519                        dev->stats.rx_dropped++;
 520                        kfree_skb(new_skb);
 521                        goto rx_next;
 522                }
 523
 524                dma_unmap_single(&cp->pdev->dev, mapping,
 525                                 buflen, DMA_FROM_DEVICE);
 526
 527                /* Handle checksum offloading for incoming packets. */
 528                if (cp_rx_csum_ok(status))
 529                        skb->ip_summed = CHECKSUM_UNNECESSARY;
 530                else
 531                        skb_checksum_none_assert(skb);
 532
 533                skb_put(skb, len);
 534
 535                cp->rx_skb[rx_tail] = new_skb;
 536
 537                cp_rx_skb(cp, skb, desc);
 538                rx++;
 539                mapping = new_mapping;
 540
 541rx_next:
 542                cp->rx_ring[rx_tail].opts2 = 0;
 543                cp->rx_ring[rx_tail].addr = cpu_to_le64(mapping);
 544                if (rx_tail == (CP_RX_RING_SIZE - 1))
 545                        desc->opts1 = cpu_to_le32(DescOwn | RingEnd |
 546                                                  cp->rx_buf_sz);
 547                else
 548                        desc->opts1 = cpu_to_le32(DescOwn | cp->rx_buf_sz);
 549                rx_tail = NEXT_RX(rx_tail);
 550        }
 551
 552        cp->rx_tail = rx_tail;
 553
 554        /* if we did not reach work limit, then we're done with
 555         * this round of polling
 556         */
 557        if (rx < budget && napi_complete_done(napi, rx)) {
 558                unsigned long flags;
 559
 560                spin_lock_irqsave(&cp->lock, flags);
 561                cpw16_f(IntrMask, cp_intr_mask);
 562                spin_unlock_irqrestore(&cp->lock, flags);
 563        }
 564
 565        return rx;
 566}
 567
 568static irqreturn_t cp_interrupt (int irq, void *dev_instance)
 569{
 570        struct net_device *dev = dev_instance;
 571        struct cp_private *cp;
 572        int handled = 0;
 573        u16 status;
 574        u16 mask;
 575
 576        if (unlikely(dev == NULL))
 577                return IRQ_NONE;
 578        cp = netdev_priv(dev);
 579
 580        spin_lock(&cp->lock);
 581
 582        mask = cpr16(IntrMask);
 583        if (!mask)
 584                goto out_unlock;
 585
 586        status = cpr16(IntrStatus);
 587        if (!status || (status == 0xFFFF))
 588                goto out_unlock;
 589
 590        handled = 1;
 591
 592        netif_dbg(cp, intr, dev, "intr, status %04x cmd %02x cpcmd %04x\n",
 593                  status, cpr8(Cmd), cpr16(CpCmd));
 594
 595        cpw16(IntrStatus, status & ~cp_rx_intr_mask);
 596
 597        /* close possible race's with dev_close */
 598        if (unlikely(!netif_running(dev))) {
 599                cpw16(IntrMask, 0);
 600                goto out_unlock;
 601        }
 602
 603        if (status & (RxOK | RxErr | RxEmpty | RxFIFOOvr))
 604                if (napi_schedule_prep(&cp->napi)) {
 605                        cpw16_f(IntrMask, cp_norx_intr_mask);
 606                        __napi_schedule(&cp->napi);
 607                }
 608
 609        if (status & (TxOK | TxErr | TxEmpty | SWInt))
 610                cp_tx(cp);
 611        if (status & LinkChg)
 612                mii_check_media(&cp->mii_if, netif_msg_link(cp), false);
 613
 614
 615        if (status & PciErr) {
 616                u16 pci_status;
 617
 618                pci_read_config_word(cp->pdev, PCI_STATUS, &pci_status);
 619                pci_write_config_word(cp->pdev, PCI_STATUS, pci_status);
 620                netdev_err(dev, "PCI bus error, status=%04x, PCI status=%04x\n",
 621                           status, pci_status);
 622
 623                /* TODO: reset hardware */
 624        }
 625
 626out_unlock:
 627        spin_unlock(&cp->lock);
 628
 629        return IRQ_RETVAL(handled);
 630}
 631
 632#ifdef CONFIG_NET_POLL_CONTROLLER
 633/*
 634 * Polling receive - used by netconsole and other diagnostic tools
 635 * to allow network i/o with interrupts disabled.
 636 */
 637static void cp_poll_controller(struct net_device *dev)
 638{
 639        struct cp_private *cp = netdev_priv(dev);
 640        const int irq = cp->pdev->irq;
 641
 642        disable_irq(irq);
 643        cp_interrupt(irq, dev);
 644        enable_irq(irq);
 645}
 646#endif
 647
 648static void cp_tx (struct cp_private *cp)
 649{
 650        unsigned tx_head = cp->tx_head;
 651        unsigned tx_tail = cp->tx_tail;
 652        unsigned bytes_compl = 0, pkts_compl = 0;
 653
 654        while (tx_tail != tx_head) {
 655                struct cp_desc *txd = cp->tx_ring + tx_tail;
 656                struct sk_buff *skb;
 657                u32 status;
 658
 659                rmb();
 660                status = le32_to_cpu(txd->opts1);
 661                if (status & DescOwn)
 662                        break;
 663
 664                skb = cp->tx_skb[tx_tail];
 665                BUG_ON(!skb);
 666
 667                dma_unmap_single(&cp->pdev->dev, le64_to_cpu(txd->addr),
 668                                 cp->tx_opts[tx_tail] & 0xffff,
 669                                 DMA_TO_DEVICE);
 670
 671                if (status & LastFrag) {
 672                        if (status & (TxError | TxFIFOUnder)) {
 673                                netif_dbg(cp, tx_err, cp->dev,
 674                                          "tx err, status 0x%x\n", status);
 675                                cp->dev->stats.tx_errors++;
 676                                if (status & TxOWC)
 677                                        cp->dev->stats.tx_window_errors++;
 678                                if (status & TxMaxCol)
 679                                        cp->dev->stats.tx_aborted_errors++;
 680                                if (status & TxLinkFail)
 681                                        cp->dev->stats.tx_carrier_errors++;
 682                                if (status & TxFIFOUnder)
 683                                        cp->dev->stats.tx_fifo_errors++;
 684                        } else {
 685                                cp->dev->stats.collisions +=
 686                                        ((status >> TxColCntShift) & TxColCntMask);
 687                                cp->dev->stats.tx_packets++;
 688                                cp->dev->stats.tx_bytes += skb->len;
 689                                netif_dbg(cp, tx_done, cp->dev,
 690                                          "tx done, slot %d\n", tx_tail);
 691                        }
 692                        bytes_compl += skb->len;
 693                        pkts_compl++;
 694                        dev_consume_skb_irq(skb);
 695                }
 696
 697                cp->tx_skb[tx_tail] = NULL;
 698
 699                tx_tail = NEXT_TX(tx_tail);
 700        }
 701
 702        cp->tx_tail = tx_tail;
 703
 704        netdev_completed_queue(cp->dev, pkts_compl, bytes_compl);
 705        if (TX_BUFFS_AVAIL(cp) > (MAX_SKB_FRAGS + 1))
 706                netif_wake_queue(cp->dev);
 707}
 708
 709static inline u32 cp_tx_vlan_tag(struct sk_buff *skb)
 710{
 711        return skb_vlan_tag_present(skb) ?
 712                TxVlanTag | swab16(skb_vlan_tag_get(skb)) : 0x00;
 713}
 714
 715static void unwind_tx_frag_mapping(struct cp_private *cp, struct sk_buff *skb,
 716                                   int first, int entry_last)
 717{
 718        int frag, index;
 719        struct cp_desc *txd;
 720        skb_frag_t *this_frag;
 721        for (frag = 0; frag+first < entry_last; frag++) {
 722                index = first+frag;
 723                cp->tx_skb[index] = NULL;
 724                txd = &cp->tx_ring[index];
 725                this_frag = &skb_shinfo(skb)->frags[frag];
 726                dma_unmap_single(&cp->pdev->dev, le64_to_cpu(txd->addr),
 727                                 skb_frag_size(this_frag), DMA_TO_DEVICE);
 728        }
 729}
 730
 731static netdev_tx_t cp_start_xmit (struct sk_buff *skb,
 732                                        struct net_device *dev)
 733{
 734        struct cp_private *cp = netdev_priv(dev);
 735        unsigned entry;
 736        u32 eor, opts1;
 737        unsigned long intr_flags;
 738        __le32 opts2;
 739        int mss = 0;
 740
 741        spin_lock_irqsave(&cp->lock, intr_flags);
 742
 743        /* This is a hard error, log it. */
 744        if (TX_BUFFS_AVAIL(cp) <= (skb_shinfo(skb)->nr_frags + 1)) {
 745                netif_stop_queue(dev);
 746                spin_unlock_irqrestore(&cp->lock, intr_flags);
 747                netdev_err(dev, "BUG! Tx Ring full when queue awake!\n");
 748                return NETDEV_TX_BUSY;
 749        }
 750
 751        entry = cp->tx_head;
 752        eor = (entry == (CP_TX_RING_SIZE - 1)) ? RingEnd : 0;
 753        mss = skb_shinfo(skb)->gso_size;
 754
 755        if (mss > MSSMask) {
 756                netdev_WARN_ONCE(dev, "Net bug: GSO size %d too large for 8139CP\n",
 757                                 mss);
 758                goto out_dma_error;
 759        }
 760
 761        opts2 = cpu_to_le32(cp_tx_vlan_tag(skb));
 762        opts1 = DescOwn;
 763        if (mss)
 764                opts1 |= LargeSend | (mss << MSSShift);
 765        else if (skb->ip_summed == CHECKSUM_PARTIAL) {
 766                const struct iphdr *ip = ip_hdr(skb);
 767                if (ip->protocol == IPPROTO_TCP)
 768                        opts1 |= IPCS | TCPCS;
 769                else if (ip->protocol == IPPROTO_UDP)
 770                        opts1 |= IPCS | UDPCS;
 771                else {
 772                        WARN_ONCE(1,
 773                                  "Net bug: asked to checksum invalid Legacy IP packet\n");
 774                        goto out_dma_error;
 775                }
 776        }
 777
 778        if (skb_shinfo(skb)->nr_frags == 0) {
 779                struct cp_desc *txd = &cp->tx_ring[entry];
 780                u32 len;
 781                dma_addr_t mapping;
 782
 783                len = skb->len;
 784                mapping = dma_map_single(&cp->pdev->dev, skb->data, len, DMA_TO_DEVICE);
 785                if (dma_mapping_error(&cp->pdev->dev, mapping))
 786                        goto out_dma_error;
 787
 788                txd->opts2 = opts2;
 789                txd->addr = cpu_to_le64(mapping);
 790                wmb();
 791
 792                opts1 |= eor | len | FirstFrag | LastFrag;
 793
 794                txd->opts1 = cpu_to_le32(opts1);
 795                wmb();
 796
 797                cp->tx_skb[entry] = skb;
 798                cp->tx_opts[entry] = opts1;
 799                netif_dbg(cp, tx_queued, cp->dev, "tx queued, slot %d, skblen %d\n",
 800                          entry, skb->len);
 801        } else {
 802                struct cp_desc *txd;
 803                u32 first_len, first_eor, ctrl;
 804                dma_addr_t first_mapping;
 805                int frag, first_entry = entry;
 806
 807                /* We must give this initial chunk to the device last.
 808                 * Otherwise we could race with the device.
 809                 */
 810                first_eor = eor;
 811                first_len = skb_headlen(skb);
 812                first_mapping = dma_map_single(&cp->pdev->dev, skb->data,
 813                                               first_len, DMA_TO_DEVICE);
 814                if (dma_mapping_error(&cp->pdev->dev, first_mapping))
 815                        goto out_dma_error;
 816
 817                cp->tx_skb[entry] = skb;
 818
 819                for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) {
 820                        const skb_frag_t *this_frag = &skb_shinfo(skb)->frags[frag];
 821                        u32 len;
 822                        dma_addr_t mapping;
 823
 824                        entry = NEXT_TX(entry);
 825
 826                        len = skb_frag_size(this_frag);
 827                        mapping = dma_map_single(&cp->pdev->dev,
 828                                                 skb_frag_address(this_frag),
 829                                                 len, DMA_TO_DEVICE);
 830                        if (dma_mapping_error(&cp->pdev->dev, mapping)) {
 831                                unwind_tx_frag_mapping(cp, skb, first_entry, entry);
 832                                goto out_dma_error;
 833                        }
 834
 835                        eor = (entry == (CP_TX_RING_SIZE - 1)) ? RingEnd : 0;
 836
 837                        ctrl = opts1 | eor | len;
 838
 839                        if (frag == skb_shinfo(skb)->nr_frags - 1)
 840                                ctrl |= LastFrag;
 841
 842                        txd = &cp->tx_ring[entry];
 843                        txd->opts2 = opts2;
 844                        txd->addr = cpu_to_le64(mapping);
 845                        wmb();
 846
 847                        txd->opts1 = cpu_to_le32(ctrl);
 848                        wmb();
 849
 850                        cp->tx_opts[entry] = ctrl;
 851                        cp->tx_skb[entry] = skb;
 852                }
 853
 854                txd = &cp->tx_ring[first_entry];
 855                txd->opts2 = opts2;
 856                txd->addr = cpu_to_le64(first_mapping);
 857                wmb();
 858
 859                ctrl = opts1 | first_eor | first_len | FirstFrag;
 860                txd->opts1 = cpu_to_le32(ctrl);
 861                wmb();
 862
 863                cp->tx_opts[first_entry] = ctrl;
 864                netif_dbg(cp, tx_queued, cp->dev, "tx queued, slots %d-%d, skblen %d\n",
 865                          first_entry, entry, skb->len);
 866        }
 867        cp->tx_head = NEXT_TX(entry);
 868
 869        netdev_sent_queue(dev, skb->len);
 870        if (TX_BUFFS_AVAIL(cp) <= (MAX_SKB_FRAGS + 1))
 871                netif_stop_queue(dev);
 872
 873out_unlock:
 874        spin_unlock_irqrestore(&cp->lock, intr_flags);
 875
 876        cpw8(TxPoll, NormalTxPoll);
 877
 878        return NETDEV_TX_OK;
 879out_dma_error:
 880        dev_kfree_skb_any(skb);
 881        cp->dev->stats.tx_dropped++;
 882        goto out_unlock;
 883}
 884
 885/* Set or clear the multicast filter for this adaptor.
 886   This routine is not state sensitive and need not be SMP locked. */
 887
 888static void __cp_set_rx_mode (struct net_device *dev)
 889{
 890        struct cp_private *cp = netdev_priv(dev);
 891        u32 mc_filter[2];       /* Multicast hash filter */
 892        int rx_mode;
 893
 894        /* Note: do not reorder, GCC is clever about common statements. */
 895        if (dev->flags & IFF_PROMISC) {
 896                /* Unconditionally log net taps. */
 897                rx_mode =
 898                    AcceptBroadcast | AcceptMulticast | AcceptMyPhys |
 899                    AcceptAllPhys;
 900                mc_filter[1] = mc_filter[0] = 0xffffffff;
 901        } else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
 902                   (dev->flags & IFF_ALLMULTI)) {
 903                /* Too many to filter perfectly -- accept all multicasts. */
 904                rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
 905                mc_filter[1] = mc_filter[0] = 0xffffffff;
 906        } else {
 907                struct netdev_hw_addr *ha;
 908                rx_mode = AcceptBroadcast | AcceptMyPhys;
 909                mc_filter[1] = mc_filter[0] = 0;
 910                netdev_for_each_mc_addr(ha, dev) {
 911                        int bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26;
 912
 913                        mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
 914                        rx_mode |= AcceptMulticast;
 915                }
 916        }
 917
 918        /* We can safely update without stopping the chip. */
 919        cp->rx_config = cp_rx_config | rx_mode;
 920        cpw32_f(RxConfig, cp->rx_config);
 921
 922        cpw32_f (MAR0 + 0, mc_filter[0]);
 923        cpw32_f (MAR0 + 4, mc_filter[1]);
 924}
 925
 926static void cp_set_rx_mode (struct net_device *dev)
 927{
 928        unsigned long flags;
 929        struct cp_private *cp = netdev_priv(dev);
 930
 931        spin_lock_irqsave (&cp->lock, flags);
 932        __cp_set_rx_mode(dev);
 933        spin_unlock_irqrestore (&cp->lock, flags);
 934}
 935
 936static void __cp_get_stats(struct cp_private *cp)
 937{
 938        /* only lower 24 bits valid; write any value to clear */
 939        cp->dev->stats.rx_missed_errors += (cpr32 (RxMissed) & 0xffffff);
 940        cpw32 (RxMissed, 0);
 941}
 942
 943static struct net_device_stats *cp_get_stats(struct net_device *dev)
 944{
 945        struct cp_private *cp = netdev_priv(dev);
 946        unsigned long flags;
 947
 948        /* The chip only need report frame silently dropped. */
 949        spin_lock_irqsave(&cp->lock, flags);
 950        if (netif_running(dev) && netif_device_present(dev))
 951                __cp_get_stats(cp);
 952        spin_unlock_irqrestore(&cp->lock, flags);
 953
 954        return &dev->stats;
 955}
 956
 957static void cp_stop_hw (struct cp_private *cp)
 958{
 959        cpw16(IntrStatus, ~(cpr16(IntrStatus)));
 960        cpw16_f(IntrMask, 0);
 961        cpw8(Cmd, 0);
 962        cpw16_f(CpCmd, 0);
 963        cpw16_f(IntrStatus, ~(cpr16(IntrStatus)));
 964
 965        cp->rx_tail = 0;
 966        cp->tx_head = cp->tx_tail = 0;
 967
 968        netdev_reset_queue(cp->dev);
 969}
 970
 971static void cp_reset_hw (struct cp_private *cp)
 972{
 973        unsigned work = 1000;
 974
 975        cpw8(Cmd, CmdReset);
 976
 977        while (work--) {
 978                if (!(cpr8(Cmd) & CmdReset))
 979                        return;
 980
 981                schedule_timeout_uninterruptible(10);
 982        }
 983
 984        netdev_err(cp->dev, "hardware reset timeout\n");
 985}
 986
 987static inline void cp_start_hw (struct cp_private *cp)
 988{
 989        dma_addr_t ring_dma;
 990
 991        cpw16(CpCmd, cp->cpcmd);
 992
 993        /*
 994         * These (at least TxRingAddr) need to be configured after the
 995         * corresponding bits in CpCmd are enabled. Datasheet v1.6 §6.33
 996         * (C+ Command Register) recommends that these and more be configured
 997         * *after* the [RT]xEnable bits in CpCmd are set. And on some hardware
 998         * it's been observed that the TxRingAddr is actually reset to garbage
 999         * when C+ mode Tx is enabled in CpCmd.
1000         */
1001        cpw32_f(HiTxRingAddr, 0);
1002        cpw32_f(HiTxRingAddr + 4, 0);
1003
1004        ring_dma = cp->ring_dma;
1005        cpw32_f(RxRingAddr, ring_dma & 0xffffffff);
1006        cpw32_f(RxRingAddr + 4, (ring_dma >> 16) >> 16);
1007
1008        ring_dma += sizeof(struct cp_desc) * CP_RX_RING_SIZE;
1009        cpw32_f(TxRingAddr, ring_dma & 0xffffffff);
1010        cpw32_f(TxRingAddr + 4, (ring_dma >> 16) >> 16);
1011
1012        /*
1013         * Strictly speaking, the datasheet says this should be enabled
1014         * *before* setting the descriptor addresses. But what, then, would
1015         * prevent it from doing DMA to random unconfigured addresses?
1016         * This variant appears to work fine.
1017         */
1018        cpw8(Cmd, RxOn | TxOn);
1019
1020        netdev_reset_queue(cp->dev);
1021}
1022
1023static void cp_enable_irq(struct cp_private *cp)
1024{
1025        cpw16_f(IntrMask, cp_intr_mask);
1026}
1027
1028static void cp_init_hw (struct cp_private *cp)
1029{
1030        struct net_device *dev = cp->dev;
1031
1032        cp_reset_hw(cp);
1033
1034        cpw8_f (Cfg9346, Cfg9346_Unlock);
1035
1036        /* Restore our idea of the MAC address. */
1037        cpw32_f (MAC0 + 0, le32_to_cpu (*(__le32 *) (dev->dev_addr + 0)));
1038        cpw32_f (MAC0 + 4, le32_to_cpu (*(__le32 *) (dev->dev_addr + 4)));
1039
1040        cp_start_hw(cp);
1041        cpw8(TxThresh, 0x06); /* XXX convert magic num to a constant */
1042
1043        __cp_set_rx_mode(dev);
1044        cpw32_f (TxConfig, IFG | (TX_DMA_BURST << TxDMAShift));
1045
1046        cpw8(Config1, cpr8(Config1) | DriverLoaded | PMEnable);
1047        /* Disable Wake-on-LAN. Can be turned on with ETHTOOL_SWOL */
1048        cpw8(Config3, PARMEnable);
1049        cp->wol_enabled = 0;
1050
1051        cpw8(Config5, cpr8(Config5) & PMEStatus);
1052
1053        cpw16(MultiIntr, 0);
1054
1055        cpw8_f(Cfg9346, Cfg9346_Lock);
1056}
1057
1058static int cp_refill_rx(struct cp_private *cp)
1059{
1060        struct net_device *dev = cp->dev;
1061        unsigned i;
1062
1063        for (i = 0; i < CP_RX_RING_SIZE; i++) {
1064                struct sk_buff *skb;
1065                dma_addr_t mapping;
1066
1067                skb = netdev_alloc_skb_ip_align(dev, cp->rx_buf_sz);
1068                if (!skb)
1069                        goto err_out;
1070
1071                mapping = dma_map_single(&cp->pdev->dev, skb->data,
1072                                         cp->rx_buf_sz, DMA_FROM_DEVICE);
1073                if (dma_mapping_error(&cp->pdev->dev, mapping)) {
1074                        kfree_skb(skb);
1075                        goto err_out;
1076                }
1077                cp->rx_skb[i] = skb;
1078
1079                cp->rx_ring[i].opts2 = 0;
1080                cp->rx_ring[i].addr = cpu_to_le64(mapping);
1081                if (i == (CP_RX_RING_SIZE - 1))
1082                        cp->rx_ring[i].opts1 =
1083                                cpu_to_le32(DescOwn | RingEnd | cp->rx_buf_sz);
1084                else
1085                        cp->rx_ring[i].opts1 =
1086                                cpu_to_le32(DescOwn | cp->rx_buf_sz);
1087        }
1088
1089        return 0;
1090
1091err_out:
1092        cp_clean_rings(cp);
1093        return -ENOMEM;
1094}
1095
1096static void cp_init_rings_index (struct cp_private *cp)
1097{
1098        cp->rx_tail = 0;
1099        cp->tx_head = cp->tx_tail = 0;
1100}
1101
1102static int cp_init_rings (struct cp_private *cp)
1103{
1104        memset(cp->tx_ring, 0, sizeof(struct cp_desc) * CP_TX_RING_SIZE);
1105        cp->tx_ring[CP_TX_RING_SIZE - 1].opts1 = cpu_to_le32(RingEnd);
1106        memset(cp->tx_opts, 0, sizeof(cp->tx_opts));
1107
1108        cp_init_rings_index(cp);
1109
1110        return cp_refill_rx (cp);
1111}
1112
1113static int cp_alloc_rings (struct cp_private *cp)
1114{
1115        struct device *d = &cp->pdev->dev;
1116        void *mem;
1117        int rc;
1118
1119        mem = dma_alloc_coherent(d, CP_RING_BYTES, &cp->ring_dma, GFP_KERNEL);
1120        if (!mem)
1121                return -ENOMEM;
1122
1123        cp->rx_ring = mem;
1124        cp->tx_ring = &cp->rx_ring[CP_RX_RING_SIZE];
1125
1126        rc = cp_init_rings(cp);
1127        if (rc < 0)
1128                dma_free_coherent(d, CP_RING_BYTES, cp->rx_ring, cp->ring_dma);
1129
1130        return rc;
1131}
1132
1133static void cp_clean_rings (struct cp_private *cp)
1134{
1135        struct cp_desc *desc;
1136        unsigned i;
1137
1138        for (i = 0; i < CP_RX_RING_SIZE; i++) {
1139                if (cp->rx_skb[i]) {
1140                        desc = cp->rx_ring + i;
1141                        dma_unmap_single(&cp->pdev->dev,le64_to_cpu(desc->addr),
1142                                         cp->rx_buf_sz, DMA_FROM_DEVICE);
1143                        dev_kfree_skb_any(cp->rx_skb[i]);
1144                }
1145        }
1146
1147        for (i = 0; i < CP_TX_RING_SIZE; i++) {
1148                if (cp->tx_skb[i]) {
1149                        struct sk_buff *skb = cp->tx_skb[i];
1150
1151                        desc = cp->tx_ring + i;
1152                        dma_unmap_single(&cp->pdev->dev,le64_to_cpu(desc->addr),
1153                                         le32_to_cpu(desc->opts1) & 0xffff,
1154                                         DMA_TO_DEVICE);
1155                        if (le32_to_cpu(desc->opts1) & LastFrag)
1156                                dev_kfree_skb_any(skb);
1157                        cp->dev->stats.tx_dropped++;
1158                }
1159        }
1160        netdev_reset_queue(cp->dev);
1161
1162        memset(cp->rx_ring, 0, sizeof(struct cp_desc) * CP_RX_RING_SIZE);
1163        memset(cp->tx_ring, 0, sizeof(struct cp_desc) * CP_TX_RING_SIZE);
1164        memset(cp->tx_opts, 0, sizeof(cp->tx_opts));
1165
1166        memset(cp->rx_skb, 0, sizeof(struct sk_buff *) * CP_RX_RING_SIZE);
1167        memset(cp->tx_skb, 0, sizeof(struct sk_buff *) * CP_TX_RING_SIZE);
1168}
1169
1170static void cp_free_rings (struct cp_private *cp)
1171{
1172        cp_clean_rings(cp);
1173        dma_free_coherent(&cp->pdev->dev, CP_RING_BYTES, cp->rx_ring,
1174                          cp->ring_dma);
1175        cp->rx_ring = NULL;
1176        cp->tx_ring = NULL;
1177}
1178
1179static int cp_open (struct net_device *dev)
1180{
1181        struct cp_private *cp = netdev_priv(dev);
1182        const int irq = cp->pdev->irq;
1183        int rc;
1184
1185        netif_dbg(cp, ifup, dev, "enabling interface\n");
1186
1187        rc = cp_alloc_rings(cp);
1188        if (rc)
1189                return rc;
1190
1191        napi_enable(&cp->napi);
1192
1193        cp_init_hw(cp);
1194
1195        rc = request_irq(irq, cp_interrupt, IRQF_SHARED, dev->name, dev);
1196        if (rc)
1197                goto err_out_hw;
1198
1199        cp_enable_irq(cp);
1200
1201        netif_carrier_off(dev);
1202        mii_check_media(&cp->mii_if, netif_msg_link(cp), true);
1203        netif_start_queue(dev);
1204
1205        return 0;
1206
1207err_out_hw:
1208        napi_disable(&cp->napi);
1209        cp_stop_hw(cp);
1210        cp_free_rings(cp);
1211        return rc;
1212}
1213
1214static int cp_close (struct net_device *dev)
1215{
1216        struct cp_private *cp = netdev_priv(dev);
1217        unsigned long flags;
1218
1219        napi_disable(&cp->napi);
1220
1221        netif_dbg(cp, ifdown, dev, "disabling interface\n");
1222
1223        spin_lock_irqsave(&cp->lock, flags);
1224
1225        netif_stop_queue(dev);
1226        netif_carrier_off(dev);
1227
1228        cp_stop_hw(cp);
1229
1230        spin_unlock_irqrestore(&cp->lock, flags);
1231
1232        free_irq(cp->pdev->irq, dev);
1233
1234        cp_free_rings(cp);
1235        return 0;
1236}
1237
1238static void cp_tx_timeout(struct net_device *dev, unsigned int txqueue)
1239{
1240        struct cp_private *cp = netdev_priv(dev);
1241        unsigned long flags;
1242        int i;
1243
1244        netdev_warn(dev, "Transmit timeout, status %2x %4x %4x %4x\n",
1245                    cpr8(Cmd), cpr16(CpCmd),
1246                    cpr16(IntrStatus), cpr16(IntrMask));
1247
1248        spin_lock_irqsave(&cp->lock, flags);
1249
1250        netif_dbg(cp, tx_err, cp->dev, "TX ring head %d tail %d desc %x\n",
1251                  cp->tx_head, cp->tx_tail, cpr16(TxDmaOkLowDesc));
1252        for (i = 0; i < CP_TX_RING_SIZE; i++) {
1253                netif_dbg(cp, tx_err, cp->dev,
1254                          "TX slot %d @%p: %08x (%08x) %08x %llx %p\n",
1255                          i, &cp->tx_ring[i], le32_to_cpu(cp->tx_ring[i].opts1),
1256                          cp->tx_opts[i], le32_to_cpu(cp->tx_ring[i].opts2),
1257                          le64_to_cpu(cp->tx_ring[i].addr),
1258                          cp->tx_skb[i]);
1259        }
1260
1261        cp_stop_hw(cp);
1262        cp_clean_rings(cp);
1263        cp_init_rings(cp);
1264        cp_start_hw(cp);
1265        __cp_set_rx_mode(dev);
1266        cpw16_f(IntrMask, cp_norx_intr_mask);
1267
1268        netif_wake_queue(dev);
1269        napi_schedule_irqoff(&cp->napi);
1270
1271        spin_unlock_irqrestore(&cp->lock, flags);
1272}
1273
1274static int cp_change_mtu(struct net_device *dev, int new_mtu)
1275{
1276        struct cp_private *cp = netdev_priv(dev);
1277
1278        /* if network interface not up, no need for complexity */
1279        if (!netif_running(dev)) {
1280                dev->mtu = new_mtu;
1281                cp_set_rxbufsize(cp);   /* set new rx buf size */
1282                return 0;
1283        }
1284
1285        /* network IS up, close it, reset MTU, and come up again. */
1286        cp_close(dev);
1287        dev->mtu = new_mtu;
1288        cp_set_rxbufsize(cp);
1289        return cp_open(dev);
1290}
1291
1292static const char mii_2_8139_map[8] = {
1293        BasicModeCtrl,
1294        BasicModeStatus,
1295        0,
1296        0,
1297        NWayAdvert,
1298        NWayLPAR,
1299        NWayExpansion,
1300        0
1301};
1302
1303static int mdio_read(struct net_device *dev, int phy_id, int location)
1304{
1305        struct cp_private *cp = netdev_priv(dev);
1306
1307        return location < 8 && mii_2_8139_map[location] ?
1308               readw(cp->regs + mii_2_8139_map[location]) : 0;
1309}
1310
1311
1312static void mdio_write(struct net_device *dev, int phy_id, int location,
1313                       int value)
1314{
1315        struct cp_private *cp = netdev_priv(dev);
1316
1317        if (location == 0) {
1318                cpw8(Cfg9346, Cfg9346_Unlock);
1319                cpw16(BasicModeCtrl, value);
1320                cpw8(Cfg9346, Cfg9346_Lock);
1321        } else if (location < 8 && mii_2_8139_map[location])
1322                cpw16(mii_2_8139_map[location], value);
1323}
1324
1325/* Set the ethtool Wake-on-LAN settings */
1326static int netdev_set_wol (struct cp_private *cp,
1327                           const struct ethtool_wolinfo *wol)
1328{
1329        u8 options;
1330
1331        options = cpr8 (Config3) & ~(LinkUp | MagicPacket);
1332        /* If WOL is being disabled, no need for complexity */
1333        if (wol->wolopts) {
1334                if (wol->wolopts & WAKE_PHY)    options |= LinkUp;
1335                if (wol->wolopts & WAKE_MAGIC)  options |= MagicPacket;
1336        }
1337
1338        cpw8 (Cfg9346, Cfg9346_Unlock);
1339        cpw8 (Config3, options);
1340        cpw8 (Cfg9346, Cfg9346_Lock);
1341
1342        options = 0; /* Paranoia setting */
1343        options = cpr8 (Config5) & ~(UWF | MWF | BWF);
1344        /* If WOL is being disabled, no need for complexity */
1345        if (wol->wolopts) {
1346                if (wol->wolopts & WAKE_UCAST)  options |= UWF;
1347                if (wol->wolopts & WAKE_BCAST)  options |= BWF;
1348                if (wol->wolopts & WAKE_MCAST)  options |= MWF;
1349        }
1350
1351        cpw8 (Config5, options);
1352
1353        cp->wol_enabled = (wol->wolopts) ? 1 : 0;
1354
1355        return 0;
1356}
1357
1358/* Get the ethtool Wake-on-LAN settings */
1359static void netdev_get_wol (struct cp_private *cp,
1360                     struct ethtool_wolinfo *wol)
1361{
1362        u8 options;
1363
1364        wol->wolopts   = 0; /* Start from scratch */
1365        wol->supported = WAKE_PHY   | WAKE_BCAST | WAKE_MAGIC |
1366                         WAKE_MCAST | WAKE_UCAST;
1367        /* We don't need to go on if WOL is disabled */
1368        if (!cp->wol_enabled) return;
1369
1370        options        = cpr8 (Config3);
1371        if (options & LinkUp)        wol->wolopts |= WAKE_PHY;
1372        if (options & MagicPacket)   wol->wolopts |= WAKE_MAGIC;
1373
1374        options        = 0; /* Paranoia setting */
1375        options        = cpr8 (Config5);
1376        if (options & UWF)           wol->wolopts |= WAKE_UCAST;
1377        if (options & BWF)           wol->wolopts |= WAKE_BCAST;
1378        if (options & MWF)           wol->wolopts |= WAKE_MCAST;
1379}
1380
1381static void cp_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *info)
1382{
1383        struct cp_private *cp = netdev_priv(dev);
1384
1385        strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
1386        strlcpy(info->version, DRV_VERSION, sizeof(info->version));
1387        strlcpy(info->bus_info, pci_name(cp->pdev), sizeof(info->bus_info));
1388}
1389
1390static void cp_get_ringparam(struct net_device *dev,
1391                                struct ethtool_ringparam *ring)
1392{
1393        ring->rx_max_pending = CP_RX_RING_SIZE;
1394        ring->tx_max_pending = CP_TX_RING_SIZE;
1395        ring->rx_pending = CP_RX_RING_SIZE;
1396        ring->tx_pending = CP_TX_RING_SIZE;
1397}
1398
1399static int cp_get_regs_len(struct net_device *dev)
1400{
1401        return CP_REGS_SIZE;
1402}
1403
1404static int cp_get_sset_count (struct net_device *dev, int sset)
1405{
1406        switch (sset) {
1407        case ETH_SS_STATS:
1408                return CP_NUM_STATS;
1409        default:
1410                return -EOPNOTSUPP;
1411        }
1412}
1413
1414static int cp_get_link_ksettings(struct net_device *dev,
1415                                 struct ethtool_link_ksettings *cmd)
1416{
1417        struct cp_private *cp = netdev_priv(dev);
1418        unsigned long flags;
1419
1420        spin_lock_irqsave(&cp->lock, flags);
1421        mii_ethtool_get_link_ksettings(&cp->mii_if, cmd);
1422        spin_unlock_irqrestore(&cp->lock, flags);
1423
1424        return 0;
1425}
1426
1427static int cp_set_link_ksettings(struct net_device *dev,
1428                                 const struct ethtool_link_ksettings *cmd)
1429{
1430        struct cp_private *cp = netdev_priv(dev);
1431        int rc;
1432        unsigned long flags;
1433
1434        spin_lock_irqsave(&cp->lock, flags);
1435        rc = mii_ethtool_set_link_ksettings(&cp->mii_if, cmd);
1436        spin_unlock_irqrestore(&cp->lock, flags);
1437
1438        return rc;
1439}
1440
1441static int cp_nway_reset(struct net_device *dev)
1442{
1443        struct cp_private *cp = netdev_priv(dev);
1444        return mii_nway_restart(&cp->mii_if);
1445}
1446
1447static u32 cp_get_msglevel(struct net_device *dev)
1448{
1449        struct cp_private *cp = netdev_priv(dev);
1450        return cp->msg_enable;
1451}
1452
1453static void cp_set_msglevel(struct net_device *dev, u32 value)
1454{
1455        struct cp_private *cp = netdev_priv(dev);
1456        cp->msg_enable = value;
1457}
1458
1459static int cp_set_features(struct net_device *dev, netdev_features_t features)
1460{
1461        struct cp_private *cp = netdev_priv(dev);
1462        unsigned long flags;
1463
1464        if (!((dev->features ^ features) & NETIF_F_RXCSUM))
1465                return 0;
1466
1467        spin_lock_irqsave(&cp->lock, flags);
1468
1469        if (features & NETIF_F_RXCSUM)
1470                cp->cpcmd |= RxChkSum;
1471        else
1472                cp->cpcmd &= ~RxChkSum;
1473
1474        if (features & NETIF_F_HW_VLAN_CTAG_RX)
1475                cp->cpcmd |= RxVlanOn;
1476        else
1477                cp->cpcmd &= ~RxVlanOn;
1478
1479        cpw16_f(CpCmd, cp->cpcmd);
1480        spin_unlock_irqrestore(&cp->lock, flags);
1481
1482        return 0;
1483}
1484
1485static void cp_get_regs(struct net_device *dev, struct ethtool_regs *regs,
1486                        void *p)
1487{
1488        struct cp_private *cp = netdev_priv(dev);
1489        unsigned long flags;
1490
1491        if (regs->len < CP_REGS_SIZE)
1492                return /* -EINVAL */;
1493
1494        regs->version = CP_REGS_VER;
1495
1496        spin_lock_irqsave(&cp->lock, flags);
1497        memcpy_fromio(p, cp->regs, CP_REGS_SIZE);
1498        spin_unlock_irqrestore(&cp->lock, flags);
1499}
1500
1501static void cp_get_wol (struct net_device *dev, struct ethtool_wolinfo *wol)
1502{
1503        struct cp_private *cp = netdev_priv(dev);
1504        unsigned long flags;
1505
1506        spin_lock_irqsave (&cp->lock, flags);
1507        netdev_get_wol (cp, wol);
1508        spin_unlock_irqrestore (&cp->lock, flags);
1509}
1510
1511static int cp_set_wol (struct net_device *dev, struct ethtool_wolinfo *wol)
1512{
1513        struct cp_private *cp = netdev_priv(dev);
1514        unsigned long flags;
1515        int rc;
1516
1517        spin_lock_irqsave (&cp->lock, flags);
1518        rc = netdev_set_wol (cp, wol);
1519        spin_unlock_irqrestore (&cp->lock, flags);
1520
1521        return rc;
1522}
1523
1524static void cp_get_strings (struct net_device *dev, u32 stringset, u8 *buf)
1525{
1526        switch (stringset) {
1527        case ETH_SS_STATS:
1528                memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
1529                break;
1530        default:
1531                BUG();
1532                break;
1533        }
1534}
1535
1536static void cp_get_ethtool_stats (struct net_device *dev,
1537                                  struct ethtool_stats *estats, u64 *tmp_stats)
1538{
1539        struct cp_private *cp = netdev_priv(dev);
1540        struct cp_dma_stats *nic_stats;
1541        dma_addr_t dma;
1542        int i;
1543
1544        nic_stats = dma_alloc_coherent(&cp->pdev->dev, sizeof(*nic_stats),
1545                                       &dma, GFP_KERNEL);
1546        if (!nic_stats)
1547                return;
1548
1549        /* begin NIC statistics dump */
1550        cpw32(StatsAddr + 4, (u64)dma >> 32);
1551        cpw32(StatsAddr, ((u64)dma & DMA_BIT_MASK(32)) | DumpStats);
1552        cpr32(StatsAddr);
1553
1554        for (i = 0; i < 1000; i++) {
1555                if ((cpr32(StatsAddr) & DumpStats) == 0)
1556                        break;
1557                udelay(10);
1558        }
1559        cpw32(StatsAddr, 0);
1560        cpw32(StatsAddr + 4, 0);
1561        cpr32(StatsAddr);
1562
1563        i = 0;
1564        tmp_stats[i++] = le64_to_cpu(nic_stats->tx_ok);
1565        tmp_stats[i++] = le64_to_cpu(nic_stats->rx_ok);
1566        tmp_stats[i++] = le64_to_cpu(nic_stats->tx_err);
1567        tmp_stats[i++] = le32_to_cpu(nic_stats->rx_err);
1568        tmp_stats[i++] = le16_to_cpu(nic_stats->rx_fifo);
1569        tmp_stats[i++] = le16_to_cpu(nic_stats->frame_align);
1570        tmp_stats[i++] = le32_to_cpu(nic_stats->tx_ok_1col);
1571        tmp_stats[i++] = le32_to_cpu(nic_stats->tx_ok_mcol);
1572        tmp_stats[i++] = le64_to_cpu(nic_stats->rx_ok_phys);
1573        tmp_stats[i++] = le64_to_cpu(nic_stats->rx_ok_bcast);
1574        tmp_stats[i++] = le32_to_cpu(nic_stats->rx_ok_mcast);
1575        tmp_stats[i++] = le16_to_cpu(nic_stats->tx_abort);
1576        tmp_stats[i++] = le16_to_cpu(nic_stats->tx_underrun);
1577        tmp_stats[i++] = cp->cp_stats.rx_frags;
1578        BUG_ON(i != CP_NUM_STATS);
1579
1580        dma_free_coherent(&cp->pdev->dev, sizeof(*nic_stats), nic_stats, dma);
1581}
1582
1583static const struct ethtool_ops cp_ethtool_ops = {
1584        .get_drvinfo            = cp_get_drvinfo,
1585        .get_regs_len           = cp_get_regs_len,
1586        .get_sset_count         = cp_get_sset_count,
1587        .nway_reset             = cp_nway_reset,
1588        .get_link               = ethtool_op_get_link,
1589        .get_msglevel           = cp_get_msglevel,
1590        .set_msglevel           = cp_set_msglevel,
1591        .get_regs               = cp_get_regs,
1592        .get_wol                = cp_get_wol,
1593        .set_wol                = cp_set_wol,
1594        .get_strings            = cp_get_strings,
1595        .get_ethtool_stats      = cp_get_ethtool_stats,
1596        .get_eeprom_len         = cp_get_eeprom_len,
1597        .get_eeprom             = cp_get_eeprom,
1598        .set_eeprom             = cp_set_eeprom,
1599        .get_ringparam          = cp_get_ringparam,
1600        .get_link_ksettings     = cp_get_link_ksettings,
1601        .set_link_ksettings     = cp_set_link_ksettings,
1602};
1603
1604static int cp_ioctl (struct net_device *dev, struct ifreq *rq, int cmd)
1605{
1606        struct cp_private *cp = netdev_priv(dev);
1607        int rc;
1608        unsigned long flags;
1609
1610        if (!netif_running(dev))
1611                return -EINVAL;
1612
1613        spin_lock_irqsave(&cp->lock, flags);
1614        rc = generic_mii_ioctl(&cp->mii_if, if_mii(rq), cmd, NULL);
1615        spin_unlock_irqrestore(&cp->lock, flags);
1616        return rc;
1617}
1618
1619static int cp_set_mac_address(struct net_device *dev, void *p)
1620{
1621        struct cp_private *cp = netdev_priv(dev);
1622        struct sockaddr *addr = p;
1623
1624        if (!is_valid_ether_addr(addr->sa_data))
1625                return -EADDRNOTAVAIL;
1626
1627        memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
1628
1629        spin_lock_irq(&cp->lock);
1630
1631        cpw8_f(Cfg9346, Cfg9346_Unlock);
1632        cpw32_f(MAC0 + 0, le32_to_cpu (*(__le32 *) (dev->dev_addr + 0)));
1633        cpw32_f(MAC0 + 4, le32_to_cpu (*(__le32 *) (dev->dev_addr + 4)));
1634        cpw8_f(Cfg9346, Cfg9346_Lock);
1635
1636        spin_unlock_irq(&cp->lock);
1637
1638        return 0;
1639}
1640
1641/* Serial EEPROM section. */
1642
1643/*  EEPROM_Ctrl bits. */
1644#define EE_SHIFT_CLK    0x04    /* EEPROM shift clock. */
1645#define EE_CS                   0x08    /* EEPROM chip select. */
1646#define EE_DATA_WRITE   0x02    /* EEPROM chip data in. */
1647#define EE_WRITE_0              0x00
1648#define EE_WRITE_1              0x02
1649#define EE_DATA_READ    0x01    /* EEPROM chip data out. */
1650#define EE_ENB                  (0x80 | EE_CS)
1651
1652/* Delay between EEPROM clock transitions.
1653   No extra delay is needed with 33Mhz PCI, but 66Mhz may change this.
1654 */
1655
1656#define eeprom_delay()  readb(ee_addr)
1657
1658/* The EEPROM commands include the alway-set leading bit. */
1659#define EE_EXTEND_CMD   (4)
1660#define EE_WRITE_CMD    (5)
1661#define EE_READ_CMD             (6)
1662#define EE_ERASE_CMD    (7)
1663
1664#define EE_EWDS_ADDR    (0)
1665#define EE_WRAL_ADDR    (1)
1666#define EE_ERAL_ADDR    (2)
1667#define EE_EWEN_ADDR    (3)
1668
1669#define CP_EEPROM_MAGIC PCI_DEVICE_ID_REALTEK_8139
1670
1671static void eeprom_cmd_start(void __iomem *ee_addr)
1672{
1673        writeb (EE_ENB & ~EE_CS, ee_addr);
1674        writeb (EE_ENB, ee_addr);
1675        eeprom_delay ();
1676}
1677
1678static void eeprom_cmd(void __iomem *ee_addr, int cmd, int cmd_len)
1679{
1680        int i;
1681
1682        /* Shift the command bits out. */
1683        for (i = cmd_len - 1; i >= 0; i--) {
1684                int dataval = (cmd & (1 << i)) ? EE_DATA_WRITE : 0;
1685                writeb (EE_ENB | dataval, ee_addr);
1686                eeprom_delay ();
1687                writeb (EE_ENB | dataval | EE_SHIFT_CLK, ee_addr);
1688                eeprom_delay ();
1689        }
1690        writeb (EE_ENB, ee_addr);
1691        eeprom_delay ();
1692}
1693
1694static void eeprom_cmd_end(void __iomem *ee_addr)
1695{
1696        writeb(0, ee_addr);
1697        eeprom_delay ();
1698}
1699
1700static void eeprom_extend_cmd(void __iomem *ee_addr, int extend_cmd,
1701                              int addr_len)
1702{
1703        int cmd = (EE_EXTEND_CMD << addr_len) | (extend_cmd << (addr_len - 2));
1704
1705        eeprom_cmd_start(ee_addr);
1706        eeprom_cmd(ee_addr, cmd, 3 + addr_len);
1707        eeprom_cmd_end(ee_addr);
1708}
1709
1710static u16 read_eeprom (void __iomem *ioaddr, int location, int addr_len)
1711{
1712        int i;
1713        u16 retval = 0;
1714        void __iomem *ee_addr = ioaddr + Cfg9346;
1715        int read_cmd = location | (EE_READ_CMD << addr_len);
1716
1717        eeprom_cmd_start(ee_addr);
1718        eeprom_cmd(ee_addr, read_cmd, 3 + addr_len);
1719
1720        for (i = 16; i > 0; i--) {
1721                writeb (EE_ENB | EE_SHIFT_CLK, ee_addr);
1722                eeprom_delay ();
1723                retval =
1724                    (retval << 1) | ((readb (ee_addr) & EE_DATA_READ) ? 1 :
1725                                     0);
1726                writeb (EE_ENB, ee_addr);
1727                eeprom_delay ();
1728        }
1729
1730        eeprom_cmd_end(ee_addr);
1731
1732        return retval;
1733}
1734
1735static void write_eeprom(void __iomem *ioaddr, int location, u16 val,
1736                         int addr_len)
1737{
1738        int i;
1739        void __iomem *ee_addr = ioaddr + Cfg9346;
1740        int write_cmd = location | (EE_WRITE_CMD << addr_len);
1741
1742        eeprom_extend_cmd(ee_addr, EE_EWEN_ADDR, addr_len);
1743
1744        eeprom_cmd_start(ee_addr);
1745        eeprom_cmd(ee_addr, write_cmd, 3 + addr_len);
1746        eeprom_cmd(ee_addr, val, 16);
1747        eeprom_cmd_end(ee_addr);
1748
1749        eeprom_cmd_start(ee_addr);
1750        for (i = 0; i < 20000; i++)
1751                if (readb(ee_addr) & EE_DATA_READ)
1752                        break;
1753        eeprom_cmd_end(ee_addr);
1754
1755        eeprom_extend_cmd(ee_addr, EE_EWDS_ADDR, addr_len);
1756}
1757
1758static int cp_get_eeprom_len(struct net_device *dev)
1759{
1760        struct cp_private *cp = netdev_priv(dev);
1761        int size;
1762
1763        spin_lock_irq(&cp->lock);
1764        size = read_eeprom(cp->regs, 0, 8) == 0x8129 ? 256 : 128;
1765        spin_unlock_irq(&cp->lock);
1766
1767        return size;
1768}
1769
1770static int cp_get_eeprom(struct net_device *dev,
1771                         struct ethtool_eeprom *eeprom, u8 *data)
1772{
1773        struct cp_private *cp = netdev_priv(dev);
1774        unsigned int addr_len;
1775        u16 val;
1776        u32 offset = eeprom->offset >> 1;
1777        u32 len = eeprom->len;
1778        u32 i = 0;
1779
1780        eeprom->magic = CP_EEPROM_MAGIC;
1781
1782        spin_lock_irq(&cp->lock);
1783
1784        addr_len = read_eeprom(cp->regs, 0, 8) == 0x8129 ? 8 : 6;
1785
1786        if (eeprom->offset & 1) {
1787                val = read_eeprom(cp->regs, offset, addr_len);
1788                data[i++] = (u8)(val >> 8);
1789                offset++;
1790        }
1791
1792        while (i < len - 1) {
1793                val = read_eeprom(cp->regs, offset, addr_len);
1794                data[i++] = (u8)val;
1795                data[i++] = (u8)(val >> 8);
1796                offset++;
1797        }
1798
1799        if (i < len) {
1800                val = read_eeprom(cp->regs, offset, addr_len);
1801                data[i] = (u8)val;
1802        }
1803
1804        spin_unlock_irq(&cp->lock);
1805        return 0;
1806}
1807
1808static int cp_set_eeprom(struct net_device *dev,
1809                         struct ethtool_eeprom *eeprom, u8 *data)
1810{
1811        struct cp_private *cp = netdev_priv(dev);
1812        unsigned int addr_len;
1813        u16 val;
1814        u32 offset = eeprom->offset >> 1;
1815        u32 len = eeprom->len;
1816        u32 i = 0;
1817
1818        if (eeprom->magic != CP_EEPROM_MAGIC)
1819                return -EINVAL;
1820
1821        spin_lock_irq(&cp->lock);
1822
1823        addr_len = read_eeprom(cp->regs, 0, 8) == 0x8129 ? 8 : 6;
1824
1825        if (eeprom->offset & 1) {
1826                val = read_eeprom(cp->regs, offset, addr_len) & 0xff;
1827                val |= (u16)data[i++] << 8;
1828                write_eeprom(cp->regs, offset, val, addr_len);
1829                offset++;
1830        }
1831
1832        while (i < len - 1) {
1833                val = (u16)data[i++];
1834                val |= (u16)data[i++] << 8;
1835                write_eeprom(cp->regs, offset, val, addr_len);
1836                offset++;
1837        }
1838
1839        if (i < len) {
1840                val = read_eeprom(cp->regs, offset, addr_len) & 0xff00;
1841                val |= (u16)data[i];
1842                write_eeprom(cp->regs, offset, val, addr_len);
1843        }
1844
1845        spin_unlock_irq(&cp->lock);
1846        return 0;
1847}
1848
1849/* Put the board into D3cold state and wait for WakeUp signal */
1850static void cp_set_d3_state (struct cp_private *cp)
1851{
1852        pci_enable_wake(cp->pdev, PCI_D0, 1); /* Enable PME# generation */
1853        pci_set_power_state (cp->pdev, PCI_D3hot);
1854}
1855
1856static netdev_features_t cp_features_check(struct sk_buff *skb,
1857                                           struct net_device *dev,
1858                                           netdev_features_t features)
1859{
1860        if (skb_shinfo(skb)->gso_size > MSSMask)
1861                features &= ~NETIF_F_TSO;
1862
1863        return vlan_features_check(skb, features);
1864}
1865static const struct net_device_ops cp_netdev_ops = {
1866        .ndo_open               = cp_open,
1867        .ndo_stop               = cp_close,
1868        .ndo_validate_addr      = eth_validate_addr,
1869        .ndo_set_mac_address    = cp_set_mac_address,
1870        .ndo_set_rx_mode        = cp_set_rx_mode,
1871        .ndo_get_stats          = cp_get_stats,
1872        .ndo_eth_ioctl          = cp_ioctl,
1873        .ndo_start_xmit         = cp_start_xmit,
1874        .ndo_tx_timeout         = cp_tx_timeout,
1875        .ndo_set_features       = cp_set_features,
1876        .ndo_change_mtu         = cp_change_mtu,
1877        .ndo_features_check     = cp_features_check,
1878
1879#ifdef CONFIG_NET_POLL_CONTROLLER
1880        .ndo_poll_controller    = cp_poll_controller,
1881#endif
1882};
1883
1884static int cp_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
1885{
1886        struct net_device *dev;
1887        struct cp_private *cp;
1888        int rc;
1889        void __iomem *regs;
1890        resource_size_t pciaddr;
1891        unsigned int addr_len, i, pci_using_dac;
1892
1893        pr_info_once("%s", version);
1894
1895        if (pdev->vendor == PCI_VENDOR_ID_REALTEK &&
1896            pdev->device == PCI_DEVICE_ID_REALTEK_8139 && pdev->revision < 0x20) {
1897                dev_info(&pdev->dev,
1898                         "This (id %04x:%04x rev %02x) is not an 8139C+ compatible chip, use 8139too\n",
1899                         pdev->vendor, pdev->device, pdev->revision);
1900                return -ENODEV;
1901        }
1902
1903        dev = alloc_etherdev(sizeof(struct cp_private));
1904        if (!dev)
1905                return -ENOMEM;
1906        SET_NETDEV_DEV(dev, &pdev->dev);
1907
1908        cp = netdev_priv(dev);
1909        cp->pdev = pdev;
1910        cp->dev = dev;
1911        cp->msg_enable = (debug < 0 ? CP_DEF_MSG_ENABLE : debug);
1912        spin_lock_init (&cp->lock);
1913        cp->mii_if.dev = dev;
1914        cp->mii_if.mdio_read = mdio_read;
1915        cp->mii_if.mdio_write = mdio_write;
1916        cp->mii_if.phy_id = CP_INTERNAL_PHY;
1917        cp->mii_if.phy_id_mask = 0x1f;
1918        cp->mii_if.reg_num_mask = 0x1f;
1919        cp_set_rxbufsize(cp);
1920
1921        rc = pci_enable_device(pdev);
1922        if (rc)
1923                goto err_out_free;
1924
1925        rc = pci_set_mwi(pdev);
1926        if (rc)
1927                goto err_out_disable;
1928
1929        rc = pci_request_regions(pdev, DRV_NAME);
1930        if (rc)
1931                goto err_out_mwi;
1932
1933        pciaddr = pci_resource_start(pdev, 1);
1934        if (!pciaddr) {
1935                rc = -EIO;
1936                dev_err(&pdev->dev, "no MMIO resource\n");
1937                goto err_out_res;
1938        }
1939        if (pci_resource_len(pdev, 1) < CP_REGS_SIZE) {
1940                rc = -EIO;
1941                dev_err(&pdev->dev, "MMIO resource (%llx) too small\n",
1942                       (unsigned long long)pci_resource_len(pdev, 1));
1943                goto err_out_res;
1944        }
1945
1946        /* Configure DMA attributes. */
1947        if ((sizeof(dma_addr_t) > 4) &&
1948            !dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) {
1949                pci_using_dac = 1;
1950        } else {
1951                pci_using_dac = 0;
1952
1953                rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
1954                if (rc) {
1955                        dev_err(&pdev->dev,
1956                                "No usable DMA configuration, aborting\n");
1957                        goto err_out_res;
1958                }
1959        }
1960
1961        cp->cpcmd = (pci_using_dac ? PCIDAC : 0) |
1962                    PCIMulRW | RxChkSum | CpRxOn | CpTxOn;
1963
1964        dev->features |= NETIF_F_RXCSUM;
1965        dev->hw_features |= NETIF_F_RXCSUM;
1966
1967        regs = ioremap(pciaddr, CP_REGS_SIZE);
1968        if (!regs) {
1969                rc = -EIO;
1970                dev_err(&pdev->dev, "Cannot map PCI MMIO (%Lx@%Lx)\n",
1971                        (unsigned long long)pci_resource_len(pdev, 1),
1972                       (unsigned long long)pciaddr);
1973                goto err_out_res;
1974        }
1975        cp->regs = regs;
1976
1977        cp_stop_hw(cp);
1978
1979        /* read MAC address from EEPROM */
1980        addr_len = read_eeprom (regs, 0, 8) == 0x8129 ? 8 : 6;
1981        for (i = 0; i < 3; i++)
1982                ((__le16 *) (dev->dev_addr))[i] =
1983                    cpu_to_le16(read_eeprom (regs, i + 7, addr_len));
1984
1985        dev->netdev_ops = &cp_netdev_ops;
1986        netif_napi_add(dev, &cp->napi, cp_rx_poll, 16);
1987        dev->ethtool_ops = &cp_ethtool_ops;
1988        dev->watchdog_timeo = TX_TIMEOUT;
1989
1990        dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO |
1991                NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
1992
1993        if (pci_using_dac)
1994                dev->features |= NETIF_F_HIGHDMA;
1995
1996        dev->hw_features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO |
1997                NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
1998        dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO |
1999                NETIF_F_HIGHDMA;
2000
2001        /* MTU range: 60 - 4096 */
2002        dev->min_mtu = CP_MIN_MTU;
2003        dev->max_mtu = CP_MAX_MTU;
2004
2005        rc = register_netdev(dev);
2006        if (rc)
2007                goto err_out_iomap;
2008
2009        netdev_info(dev, "RTL-8139C+ at 0x%p, %pM, IRQ %d\n",
2010                    regs, dev->dev_addr, pdev->irq);
2011
2012        pci_set_drvdata(pdev, dev);
2013
2014        /* enable busmastering and memory-write-invalidate */
2015        pci_set_master(pdev);
2016
2017        if (cp->wol_enabled)
2018                cp_set_d3_state (cp);
2019
2020        return 0;
2021
2022err_out_iomap:
2023        iounmap(regs);
2024err_out_res:
2025        pci_release_regions(pdev);
2026err_out_mwi:
2027        pci_clear_mwi(pdev);
2028err_out_disable:
2029        pci_disable_device(pdev);
2030err_out_free:
2031        free_netdev(dev);
2032        return rc;
2033}
2034
2035static void cp_remove_one (struct pci_dev *pdev)
2036{
2037        struct net_device *dev = pci_get_drvdata(pdev);
2038        struct cp_private *cp = netdev_priv(dev);
2039
2040        unregister_netdev(dev);
2041        iounmap(cp->regs);
2042        if (cp->wol_enabled)
2043                pci_set_power_state (pdev, PCI_D0);
2044        pci_release_regions(pdev);
2045        pci_clear_mwi(pdev);
2046        pci_disable_device(pdev);
2047        free_netdev(dev);
2048}
2049
2050static int __maybe_unused cp_suspend(struct device *device)
2051{
2052        struct net_device *dev = dev_get_drvdata(device);
2053        struct cp_private *cp = netdev_priv(dev);
2054        unsigned long flags;
2055
2056        if (!netif_running(dev))
2057                return 0;
2058
2059        netif_device_detach (dev);
2060        netif_stop_queue (dev);
2061
2062        spin_lock_irqsave (&cp->lock, flags);
2063
2064        /* Disable Rx and Tx */
2065        cpw16 (IntrMask, 0);
2066        cpw8  (Cmd, cpr8 (Cmd) & (~RxOn | ~TxOn));
2067
2068        spin_unlock_irqrestore (&cp->lock, flags);
2069
2070        device_set_wakeup_enable(device, cp->wol_enabled);
2071
2072        return 0;
2073}
2074
2075static int __maybe_unused cp_resume(struct device *device)
2076{
2077        struct net_device *dev = dev_get_drvdata(device);
2078        struct cp_private *cp = netdev_priv(dev);
2079        unsigned long flags;
2080
2081        if (!netif_running(dev))
2082                return 0;
2083
2084        netif_device_attach (dev);
2085
2086        /* FIXME: sh*t may happen if the Rx ring buffer is depleted */
2087        cp_init_rings_index (cp);
2088        cp_init_hw (cp);
2089        cp_enable_irq(cp);
2090        netif_start_queue (dev);
2091
2092        spin_lock_irqsave (&cp->lock, flags);
2093
2094        mii_check_media(&cp->mii_if, netif_msg_link(cp), false);
2095
2096        spin_unlock_irqrestore (&cp->lock, flags);
2097
2098        return 0;
2099}
2100
2101static const struct pci_device_id cp_pci_tbl[] = {
2102        { PCI_DEVICE(PCI_VENDOR_ID_REALTEK,     PCI_DEVICE_ID_REALTEK_8139), },
2103        { PCI_DEVICE(PCI_VENDOR_ID_TTTECH,      PCI_DEVICE_ID_TTTECH_MC322), },
2104        { },
2105};
2106MODULE_DEVICE_TABLE(pci, cp_pci_tbl);
2107
2108static SIMPLE_DEV_PM_OPS(cp_pm_ops, cp_suspend, cp_resume);
2109
2110static struct pci_driver cp_driver = {
2111        .name         = DRV_NAME,
2112        .id_table     = cp_pci_tbl,
2113        .probe        = cp_init_one,
2114        .remove       = cp_remove_one,
2115        .driver.pm    = &cp_pm_ops,
2116};
2117
2118module_pci_driver(cp_driver);
2119