linux/drivers/net/via-rhine.c
<<
>>
Prefs
   1/* via-rhine.c: A Linux Ethernet device driver for VIA Rhine family chips. */
   2/*
   3        Written 1998-2001 by Donald Becker.
   4
   5        Current Maintainer: Roger Luethi <rl@hellgate.ch>
   6
   7        This software may be used and distributed according to the terms of
   8        the GNU General Public License (GPL), incorporated herein by reference.
   9        Drivers based on or derived from this code fall under the GPL and must
  10        retain the authorship, copyright and license notice.  This file is not
  11        a complete program and may only be used when the entire operating
  12        system is licensed under the GPL.
  13
  14        This driver is designed for the VIA VT86C100A Rhine-I.
  15        It also works with the Rhine-II (6102) and Rhine-III (6105/6105L/6105LOM
  16        and management NIC 6105M).
  17
  18        The author may be reached as becker@scyld.com, or C/O
  19        Scyld Computing Corporation
  20        410 Severn Ave., Suite 210
  21        Annapolis MD 21403
  22
  23
  24        This driver contains some changes from the original Donald Becker
  25        version. He may or may not be interested in bug reports on this
  26        code. You can find his versions at:
  27        http://www.scyld.com/network/via-rhine.html
  28        [link no longer provides useful info -jgarzik]
  29
  30*/
  31
  32#define DRV_NAME        "via-rhine"
  33#define DRV_VERSION     "1.4.3"
  34#define DRV_RELDATE     "2007-03-06"
  35
  36
  37/* A few user-configurable values.
  38   These may be modified when a driver module is loaded. */
  39
  40static int debug = 1;   /* 1 normal messages, 0 quiet .. 7 verbose. */
  41static int max_interrupt_work = 20;
  42
  43/* Set the copy breakpoint for the copy-only-tiny-frames scheme.
  44   Setting to > 1518 effectively disables this feature. */
  45#if defined(__alpha__) || defined(__arm__) || defined(__hppa__) \
  46       || defined(CONFIG_SPARC) || defined(__ia64__) \
  47       || defined(__sh__) || defined(__mips__)
  48static int rx_copybreak = 1518;
  49#else
  50static int rx_copybreak;
  51#endif
  52
  53/* Work-around for broken BIOSes: they are unable to get the chip back out of
  54   power state D3 so PXE booting fails. bootparam(7): via-rhine.avoid_D3=1 */
  55static int avoid_D3;
  56
  57/*
  58 * In case you are looking for 'options[]' or 'full_duplex[]', they
  59 * are gone. Use ethtool(8) instead.
  60 */
  61
  62/* Maximum number of multicast addresses to filter (vs. rx-all-multicast).
  63   The Rhine has a 64 element 8390-like hash table. */
  64static const int multicast_filter_limit = 32;
  65
  66
  67/* Operational parameters that are set at compile time. */
  68
  69/* Keep the ring sizes a power of two for compile efficiency.
  70   The compiler will convert <unsigned>'%'<2^N> into a bit mask.
  71   Making the Tx ring too large decreases the effectiveness of channel
  72   bonding and packet priority.
  73   There are no ill effects from too-large receive rings. */
  74#define TX_RING_SIZE    16
  75#define TX_QUEUE_LEN    10      /* Limit ring entries actually used. */
  76#define RX_RING_SIZE    64
  77
  78/* Operational parameters that usually are not changed. */
  79
  80/* Time in jiffies before concluding the transmitter is hung. */
  81#define TX_TIMEOUT      (2*HZ)
  82
  83#define PKT_BUF_SZ      1536    /* Size of each temporary Rx buffer.*/
  84
  85#include <linux/module.h>
  86#include <linux/moduleparam.h>
  87#include <linux/kernel.h>
  88#include <linux/string.h>
  89#include <linux/timer.h>
  90#include <linux/errno.h>
  91#include <linux/ioport.h>
  92#include <linux/slab.h>
  93#include <linux/interrupt.h>
  94#include <linux/pci.h>
  95#include <linux/dma-mapping.h>
  96#include <linux/netdevice.h>
  97#include <linux/etherdevice.h>
  98#include <linux/skbuff.h>
  99#include <linux/init.h>
 100#include <linux/delay.h>
 101#include <linux/mii.h>
 102#include <linux/ethtool.h>
 103#include <linux/crc32.h>
 104#include <linux/bitops.h>
 105#include <asm/processor.h>      /* Processor type for cache alignment. */
 106#include <asm/io.h>
 107#include <asm/irq.h>
 108#include <asm/uaccess.h>
 109#include <linux/dmi.h>
 110
 111/* These identify the driver base version and may not be removed. */
 112static const char version[] __devinitconst =
 113        KERN_INFO DRV_NAME ".c:v1.10-LK" DRV_VERSION " " DRV_RELDATE
 114        " Written by Donald Becker\n";
 115
 116/* This driver was written to use PCI memory space. Some early versions
 117   of the Rhine may only work correctly with I/O space accesses. */
 118#ifdef CONFIG_VIA_RHINE_MMIO
 119#define USE_MMIO
 120#else
 121#endif
 122
 123MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
 124MODULE_DESCRIPTION("VIA Rhine PCI Fast Ethernet driver");
 125MODULE_LICENSE("GPL");
 126
 127module_param(max_interrupt_work, int, 0);
 128module_param(debug, int, 0);
 129module_param(rx_copybreak, int, 0);
 130module_param(avoid_D3, bool, 0);
 131MODULE_PARM_DESC(max_interrupt_work, "VIA Rhine maximum events handled per interrupt");
 132MODULE_PARM_DESC(debug, "VIA Rhine debug level (0-7)");
 133MODULE_PARM_DESC(rx_copybreak, "VIA Rhine copy breakpoint for copy-only-tiny-frames");
 134MODULE_PARM_DESC(avoid_D3, "Avoid power state D3 (work-around for broken BIOSes)");
 135
 136/*
 137                Theory of Operation
 138
 139I. Board Compatibility
 140
 141This driver is designed for the VIA 86c100A Rhine-II PCI Fast Ethernet
 142controller.
 143
 144II. Board-specific settings
 145
 146Boards with this chip are functional only in a bus-master PCI slot.
 147
 148Many operational settings are loaded from the EEPROM to the Config word at
 149offset 0x78. For most of these settings, this driver assumes that they are
 150correct.
 151If this driver is compiled to use PCI memory space operations the EEPROM
 152must be configured to enable memory ops.
 153
 154III. Driver operation
 155
 156IIIa. Ring buffers
 157
 158This driver uses two statically allocated fixed-size descriptor lists
 159formed into rings by a branch from the final descriptor to the beginning of
 160the list. The ring sizes are set at compile time by RX/TX_RING_SIZE.
 161
 162IIIb/c. Transmit/Receive Structure
 163
 164This driver attempts to use a zero-copy receive and transmit scheme.
 165
 166Alas, all data buffers are required to start on a 32 bit boundary, so
 167the driver must often copy transmit packets into bounce buffers.
 168
 169The driver allocates full frame size skbuffs for the Rx ring buffers at
 170open() time and passes the skb->data field to the chip as receive data
 171buffers. When an incoming frame is less than RX_COPYBREAK bytes long,
 172a fresh skbuff is allocated and the frame is copied to the new skbuff.
 173When the incoming frame is larger, the skbuff is passed directly up the
 174protocol stack. Buffers consumed this way are replaced by newly allocated
 175skbuffs in the last phase of rhine_rx().
 176
 177The RX_COPYBREAK value is chosen to trade-off the memory wasted by
 178using a full-sized skbuff for small frames vs. the copying costs of larger
 179frames. New boards are typically used in generously configured machines
 180and the underfilled buffers have negligible impact compared to the benefit of
 181a single allocation size, so the default value of zero results in never
 182copying packets. When copying is done, the cost is usually mitigated by using
 183a combined copy/checksum routine. Copying also preloads the cache, which is
 184most useful with small frames.
 185
 186Since the VIA chips are only able to transfer data to buffers on 32 bit
 187boundaries, the IP header at offset 14 in an ethernet frame isn't
 188longword aligned for further processing. Copying these unaligned buffers
 189has the beneficial effect of 16-byte aligning the IP header.
 190
 191IIId. Synchronization
 192
 193The driver runs as two independent, single-threaded flows of control. One
 194is the send-packet routine, which enforces single-threaded use by the
 195netdev_priv(dev)->lock spinlock. The other thread is the interrupt handler,
 196which is single threaded by the hardware and interrupt handling software.
 197
 198The send packet thread has partial control over the Tx ring. It locks the
 199netdev_priv(dev)->lock whenever it's queuing a Tx packet. If the next slot in
 200the ring is not available it stops the transmit queue by
 201calling netif_stop_queue.
 202
 203The interrupt handler has exclusive control over the Rx ring and records stats
 204from the Tx ring. After reaping the stats, it marks the Tx queue entry as
 205empty by incrementing the dirty_tx mark. If at least half of the entries in
 206the Rx ring are available the transmit queue is woken up if it was stopped.
 207
 208IV. Notes
 209
 210IVb. References
 211
 212Preliminary VT86C100A manual from http://www.via.com.tw/
 213http://www.scyld.com/expert/100mbps.html
 214http://www.scyld.com/expert/NWay.html
 215ftp://ftp.via.com.tw/public/lan/Products/NIC/VT86C100A/Datasheet/VT86C100A03.pdf
 216ftp://ftp.via.com.tw/public/lan/Products/NIC/VT6102/Datasheet/VT6102_021.PDF
 217
 218
 219IVc. Errata
 220
 221The VT86C100A manual is not reliable information.
 222The 3043 chip does not handle unaligned transmit or receive buffers, resulting
 223in significant performance degradation for bounce buffer copies on transmit
 224and unaligned IP headers on receive.
 225The chip does not pad to minimum transmit length.
 226
 227*/
 228
 229
 230/* This table drives the PCI probe routines. It's mostly boilerplate in all
 231   of the drivers, and will likely be provided by some future kernel.
 232   Note the matching code -- the first table entry matchs all 56** cards but
 233   second only the 1234 card.
 234*/
 235
 236enum rhine_revs {
 237        VT86C100A       = 0x00,
 238        VTunknown0      = 0x20,
 239        VT6102          = 0x40,
 240        VT8231          = 0x50, /* Integrated MAC */
 241        VT8233          = 0x60, /* Integrated MAC */
 242        VT8235          = 0x74, /* Integrated MAC */
 243        VT8237          = 0x78, /* Integrated MAC */
 244        VTunknown1      = 0x7C,
 245        VT6105          = 0x80,
 246        VT6105_B0       = 0x83,
 247        VT6105L         = 0x8A,
 248        VT6107          = 0x8C,
 249        VTunknown2      = 0x8E,
 250        VT6105M         = 0x90, /* Management adapter */
 251};
 252
 253enum rhine_quirks {
 254        rqWOL           = 0x0001,       /* Wake-On-LAN support */
 255        rqForceReset    = 0x0002,
 256        rq6patterns     = 0x0040,       /* 6 instead of 4 patterns for WOL */
 257        rqStatusWBRace  = 0x0080,       /* Tx Status Writeback Error possible */
 258        rqRhineI        = 0x0100,       /* See comment below */
 259};
 260/*
 261 * rqRhineI: VT86C100A (aka Rhine-I) uses different bits to enable
 262 * MMIO as well as for the collision counter and the Tx FIFO underflow
 263 * indicator. In addition, Tx and Rx buffers need to 4 byte aligned.
 264 */
 265
 266/* Beware of PCI posted writes */
 267#define IOSYNC  do { ioread8(ioaddr + StationAddr); } while (0)
 268
 269static const struct pci_device_id rhine_pci_tbl[] = {
 270        { 0x1106, 0x3043, PCI_ANY_ID, PCI_ANY_ID, },    /* VT86C100A */
 271        { 0x1106, 0x3065, PCI_ANY_ID, PCI_ANY_ID, },    /* VT6102 */
 272        { 0x1106, 0x3106, PCI_ANY_ID, PCI_ANY_ID, },    /* 6105{,L,LOM} */
 273        { 0x1106, 0x3053, PCI_ANY_ID, PCI_ANY_ID, },    /* VT6105M */
 274        { }     /* terminate list */
 275};
 276MODULE_DEVICE_TABLE(pci, rhine_pci_tbl);
 277
 278
 279/* Offsets to the device registers. */
 280enum register_offsets {
 281        StationAddr=0x00, RxConfig=0x06, TxConfig=0x07, ChipCmd=0x08,
 282        ChipCmd1=0x09,
 283        IntrStatus=0x0C, IntrEnable=0x0E,
 284        MulticastFilter0=0x10, MulticastFilter1=0x14,
 285        RxRingPtr=0x18, TxRingPtr=0x1C, GFIFOTest=0x54,
 286        MIIPhyAddr=0x6C, MIIStatus=0x6D, PCIBusConfig=0x6E,
 287        MIICmd=0x70, MIIRegAddr=0x71, MIIData=0x72, MACRegEEcsr=0x74,
 288        ConfigA=0x78, ConfigB=0x79, ConfigC=0x7A, ConfigD=0x7B,
 289        RxMissed=0x7C, RxCRCErrs=0x7E, MiscCmd=0x81,
 290        StickyHW=0x83, IntrStatus2=0x84,
 291        WOLcrSet=0xA0, PwcfgSet=0xA1, WOLcgSet=0xA3, WOLcrClr=0xA4,
 292        WOLcrClr1=0xA6, WOLcgClr=0xA7,
 293        PwrcsrSet=0xA8, PwrcsrSet1=0xA9, PwrcsrClr=0xAC, PwrcsrClr1=0xAD,
 294};
 295
 296/* Bits in ConfigD */
 297enum backoff_bits {
 298        BackOptional=0x01, BackModify=0x02,
 299        BackCaptureEffect=0x04, BackRandom=0x08
 300};
 301
 302#ifdef USE_MMIO
 303/* Registers we check that mmio and reg are the same. */
 304static const int mmio_verify_registers[] = {
 305        RxConfig, TxConfig, IntrEnable, ConfigA, ConfigB, ConfigC, ConfigD,
 306        0
 307};
 308#endif
 309
 310/* Bits in the interrupt status/mask registers. */
 311enum intr_status_bits {
 312        IntrRxDone=0x0001, IntrRxErr=0x0004, IntrRxEmpty=0x0020,
 313        IntrTxDone=0x0002, IntrTxError=0x0008, IntrTxUnderrun=0x0210,
 314        IntrPCIErr=0x0040,
 315        IntrStatsMax=0x0080, IntrRxEarly=0x0100,
 316        IntrRxOverflow=0x0400, IntrRxDropped=0x0800, IntrRxNoBuf=0x1000,
 317        IntrTxAborted=0x2000, IntrLinkChange=0x4000,
 318        IntrRxWakeUp=0x8000,
 319        IntrNormalSummary=0x0003, IntrAbnormalSummary=0xC260,
 320        IntrTxDescRace=0x080000,        /* mapped from IntrStatus2 */
 321        IntrTxErrSummary=0x082218,
 322};
 323
 324/* Bits in WOLcrSet/WOLcrClr and PwrcsrSet/PwrcsrClr */
 325enum wol_bits {
 326        WOLucast        = 0x10,
 327        WOLmagic        = 0x20,
 328        WOLbmcast       = 0x30,
 329        WOLlnkon        = 0x40,
 330        WOLlnkoff       = 0x80,
 331};
 332
 333/* The Rx and Tx buffer descriptors. */
 334struct rx_desc {
 335        __le32 rx_status;
 336        __le32 desc_length; /* Chain flag, Buffer/frame length */
 337        __le32 addr;
 338        __le32 next_desc;
 339};
 340struct tx_desc {
 341        __le32 tx_status;
 342        __le32 desc_length; /* Chain flag, Tx Config, Frame length */
 343        __le32 addr;
 344        __le32 next_desc;
 345};
 346
 347/* Initial value for tx_desc.desc_length, Buffer size goes to bits 0-10 */
 348#define TXDESC          0x00e08000
 349
 350enum rx_status_bits {
 351        RxOK=0x8000, RxWholePkt=0x0300, RxErr=0x008F
 352};
 353
 354/* Bits in *_desc.*_status */
 355enum desc_status_bits {
 356        DescOwn=0x80000000
 357};
 358
 359/* Bits in ChipCmd. */
 360enum chip_cmd_bits {
 361        CmdInit=0x01, CmdStart=0x02, CmdStop=0x04, CmdRxOn=0x08,
 362        CmdTxOn=0x10, Cmd1TxDemand=0x20, CmdRxDemand=0x40,
 363        Cmd1EarlyRx=0x01, Cmd1EarlyTx=0x02, Cmd1FDuplex=0x04,
 364        Cmd1NoTxPoll=0x08, Cmd1Reset=0x80,
 365};
 366
 367struct rhine_private {
 368        /* Descriptor rings */
 369        struct rx_desc *rx_ring;
 370        struct tx_desc *tx_ring;
 371        dma_addr_t rx_ring_dma;
 372        dma_addr_t tx_ring_dma;
 373
 374        /* The addresses of receive-in-place skbuffs. */
 375        struct sk_buff *rx_skbuff[RX_RING_SIZE];
 376        dma_addr_t rx_skbuff_dma[RX_RING_SIZE];
 377
 378        /* The saved address of a sent-in-place packet/buffer, for later free(). */
 379        struct sk_buff *tx_skbuff[TX_RING_SIZE];
 380        dma_addr_t tx_skbuff_dma[TX_RING_SIZE];
 381
 382        /* Tx bounce buffers (Rhine-I only) */
 383        unsigned char *tx_buf[TX_RING_SIZE];
 384        unsigned char *tx_bufs;
 385        dma_addr_t tx_bufs_dma;
 386
 387        struct pci_dev *pdev;
 388        long pioaddr;
 389        struct net_device *dev;
 390        struct napi_struct napi;
 391        spinlock_t lock;
 392
 393        /* Frequently used values: keep some adjacent for cache effect. */
 394        u32 quirks;
 395        struct rx_desc *rx_head_desc;
 396        unsigned int cur_rx, dirty_rx;  /* Producer/consumer ring indices */
 397        unsigned int cur_tx, dirty_tx;
 398        unsigned int rx_buf_sz;         /* Based on MTU+slack. */
 399        u8 wolopts;
 400
 401        u8 tx_thresh, rx_thresh;
 402
 403        struct mii_if_info mii_if;
 404        void __iomem *base;
 405};
 406
 407static int  mdio_read(struct net_device *dev, int phy_id, int location);
 408static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
 409static int  rhine_open(struct net_device *dev);
 410static void rhine_tx_timeout(struct net_device *dev);
 411static netdev_tx_t rhine_start_tx(struct sk_buff *skb,
 412                                  struct net_device *dev);
 413static irqreturn_t rhine_interrupt(int irq, void *dev_instance);
 414static void rhine_tx(struct net_device *dev);
 415static int rhine_rx(struct net_device *dev, int limit);
 416static void rhine_error(struct net_device *dev, int intr_status);
 417static void rhine_set_rx_mode(struct net_device *dev);
 418static struct net_device_stats *rhine_get_stats(struct net_device *dev);
 419static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
 420static const struct ethtool_ops netdev_ethtool_ops;
 421static int  rhine_close(struct net_device *dev);
 422static void rhine_shutdown (struct pci_dev *pdev);
 423
 424#define RHINE_WAIT_FOR(condition) do {                                  \
 425        int i=1024;                                                     \
 426        while (!(condition) && --i)                                     \
 427                ;                                                       \
 428        if (debug > 1 && i < 512)                                       \
 429                printk(KERN_INFO "%s: %4d cycles used @ %s:%d\n",       \
 430                                DRV_NAME, 1024-i, __func__, __LINE__);  \
 431} while(0)
 432
 433static inline u32 get_intr_status(struct net_device *dev)
 434{
 435        struct rhine_private *rp = netdev_priv(dev);
 436        void __iomem *ioaddr = rp->base;
 437        u32 intr_status;
 438
 439        intr_status = ioread16(ioaddr + IntrStatus);
 440        /* On Rhine-II, Bit 3 indicates Tx descriptor write-back race. */
 441        if (rp->quirks & rqStatusWBRace)
 442                intr_status |= ioread8(ioaddr + IntrStatus2) << 16;
 443        return intr_status;
 444}
 445
 446/*
 447 * Get power related registers into sane state.
 448 * Notify user about past WOL event.
 449 */
 450static void rhine_power_init(struct net_device *dev)
 451{
 452        struct rhine_private *rp = netdev_priv(dev);
 453        void __iomem *ioaddr = rp->base;
 454        u16 wolstat;
 455
 456        if (rp->quirks & rqWOL) {
 457                /* Make sure chip is in power state D0 */
 458                iowrite8(ioread8(ioaddr + StickyHW) & 0xFC, ioaddr + StickyHW);
 459
 460                /* Disable "force PME-enable" */
 461                iowrite8(0x80, ioaddr + WOLcgClr);
 462
 463                /* Clear power-event config bits (WOL) */
 464                iowrite8(0xFF, ioaddr + WOLcrClr);
 465                /* More recent cards can manage two additional patterns */
 466                if (rp->quirks & rq6patterns)
 467                        iowrite8(0x03, ioaddr + WOLcrClr1);
 468
 469                /* Save power-event status bits */
 470                wolstat = ioread8(ioaddr + PwrcsrSet);
 471                if (rp->quirks & rq6patterns)
 472                        wolstat |= (ioread8(ioaddr + PwrcsrSet1) & 0x03) << 8;
 473
 474                /* Clear power-event status bits */
 475                iowrite8(0xFF, ioaddr + PwrcsrClr);
 476                if (rp->quirks & rq6patterns)
 477                        iowrite8(0x03, ioaddr + PwrcsrClr1);
 478
 479                if (wolstat) {
 480                        char *reason;
 481                        switch (wolstat) {
 482                        case WOLmagic:
 483                                reason = "Magic packet";
 484                                break;
 485                        case WOLlnkon:
 486                                reason = "Link went up";
 487                                break;
 488                        case WOLlnkoff:
 489                                reason = "Link went down";
 490                                break;
 491                        case WOLucast:
 492                                reason = "Unicast packet";
 493                                break;
 494                        case WOLbmcast:
 495                                reason = "Multicast/broadcast packet";
 496                                break;
 497                        default:
 498                                reason = "Unknown";
 499                        }
 500                        printk(KERN_INFO "%s: Woke system up. Reason: %s.\n",
 501                               DRV_NAME, reason);
 502                }
 503        }
 504}
 505
 506static void rhine_chip_reset(struct net_device *dev)
 507{
 508        struct rhine_private *rp = netdev_priv(dev);
 509        void __iomem *ioaddr = rp->base;
 510
 511        iowrite8(Cmd1Reset, ioaddr + ChipCmd1);
 512        IOSYNC;
 513
 514        if (ioread8(ioaddr + ChipCmd1) & Cmd1Reset) {
 515                printk(KERN_INFO "%s: Reset not complete yet. "
 516                        "Trying harder.\n", DRV_NAME);
 517
 518                /* Force reset */
 519                if (rp->quirks & rqForceReset)
 520                        iowrite8(0x40, ioaddr + MiscCmd);
 521
 522                /* Reset can take somewhat longer (rare) */
 523                RHINE_WAIT_FOR(!(ioread8(ioaddr + ChipCmd1) & Cmd1Reset));
 524        }
 525
 526        if (debug > 1)
 527                printk(KERN_INFO "%s: Reset %s.\n", dev->name,
 528                        (ioread8(ioaddr + ChipCmd1) & Cmd1Reset) ?
 529                        "failed" : "succeeded");
 530}
 531
 532#ifdef USE_MMIO
 533static void enable_mmio(long pioaddr, u32 quirks)
 534{
 535        int n;
 536        if (quirks & rqRhineI) {
 537                /* More recent docs say that this bit is reserved ... */
 538                n = inb(pioaddr + ConfigA) | 0x20;
 539                outb(n, pioaddr + ConfigA);
 540        } else {
 541                n = inb(pioaddr + ConfigD) | 0x80;
 542                outb(n, pioaddr + ConfigD);
 543        }
 544}
 545#endif
 546
 547/*
 548 * Loads bytes 0x00-0x05, 0x6E-0x6F, 0x78-0x7B from EEPROM
 549 * (plus 0x6C for Rhine-I/II)
 550 */
 551static void __devinit rhine_reload_eeprom(long pioaddr, struct net_device *dev)
 552{
 553        struct rhine_private *rp = netdev_priv(dev);
 554        void __iomem *ioaddr = rp->base;
 555
 556        outb(0x20, pioaddr + MACRegEEcsr);
 557        RHINE_WAIT_FOR(!(inb(pioaddr + MACRegEEcsr) & 0x20));
 558
 559#ifdef USE_MMIO
 560        /*
 561         * Reloading from EEPROM overwrites ConfigA-D, so we must re-enable
 562         * MMIO. If reloading EEPROM was done first this could be avoided, but
 563         * it is not known if that still works with the "win98-reboot" problem.
 564         */
 565        enable_mmio(pioaddr, rp->quirks);
 566#endif
 567
 568        /* Turn off EEPROM-controlled wake-up (magic packet) */
 569        if (rp->quirks & rqWOL)
 570                iowrite8(ioread8(ioaddr + ConfigA) & 0xFC, ioaddr + ConfigA);
 571
 572}
 573
 574#ifdef CONFIG_NET_POLL_CONTROLLER
 575static void rhine_poll(struct net_device *dev)
 576{
 577        disable_irq(dev->irq);
 578        rhine_interrupt(dev->irq, (void *)dev);
 579        enable_irq(dev->irq);
 580}
 581#endif
 582
 583static int rhine_napipoll(struct napi_struct *napi, int budget)
 584{
 585        struct rhine_private *rp = container_of(napi, struct rhine_private, napi);
 586        struct net_device *dev = rp->dev;
 587        void __iomem *ioaddr = rp->base;
 588        int work_done;
 589
 590        work_done = rhine_rx(dev, budget);
 591
 592        if (work_done < budget) {
 593                napi_complete(napi);
 594
 595                iowrite16(IntrRxDone | IntrRxErr | IntrRxEmpty| IntrRxOverflow |
 596                          IntrRxDropped | IntrRxNoBuf | IntrTxAborted |
 597                          IntrTxDone | IntrTxError | IntrTxUnderrun |
 598                          IntrPCIErr | IntrStatsMax | IntrLinkChange,
 599                          ioaddr + IntrEnable);
 600        }
 601        return work_done;
 602}
 603
 604static void __devinit rhine_hw_init(struct net_device *dev, long pioaddr)
 605{
 606        struct rhine_private *rp = netdev_priv(dev);
 607
 608        /* Reset the chip to erase previous misconfiguration. */
 609        rhine_chip_reset(dev);
 610
 611        /* Rhine-I needs extra time to recuperate before EEPROM reload */
 612        if (rp->quirks & rqRhineI)
 613                msleep(5);
 614
 615        /* Reload EEPROM controlled bytes cleared by soft reset */
 616        rhine_reload_eeprom(pioaddr, dev);
 617}
 618
 619static const struct net_device_ops rhine_netdev_ops = {
 620        .ndo_open                = rhine_open,
 621        .ndo_stop                = rhine_close,
 622        .ndo_start_xmit          = rhine_start_tx,
 623        .ndo_get_stats           = rhine_get_stats,
 624        .ndo_set_multicast_list  = rhine_set_rx_mode,
 625        .ndo_change_mtu          = eth_change_mtu,
 626        .ndo_validate_addr       = eth_validate_addr,
 627        .ndo_set_mac_address     = eth_mac_addr,
 628        .ndo_do_ioctl            = netdev_ioctl,
 629        .ndo_tx_timeout          = rhine_tx_timeout,
 630#ifdef CONFIG_NET_POLL_CONTROLLER
 631        .ndo_poll_controller     = rhine_poll,
 632#endif
 633};
 634
 635static int __devinit rhine_init_one(struct pci_dev *pdev,
 636                                    const struct pci_device_id *ent)
 637{
 638        struct net_device *dev;
 639        struct rhine_private *rp;
 640        int i, rc;
 641        u32 quirks;
 642        long pioaddr;
 643        long memaddr;
 644        void __iomem *ioaddr;
 645        int io_size, phy_id;
 646        const char *name;
 647#ifdef USE_MMIO
 648        int bar = 1;
 649#else
 650        int bar = 0;
 651#endif
 652
 653/* when built into the kernel, we only print version if device is found */
 654#ifndef MODULE
 655        static int printed_version;
 656        if (!printed_version++)
 657                printk(version);
 658#endif
 659
 660        io_size = 256;
 661        phy_id = 0;
 662        quirks = 0;
 663        name = "Rhine";
 664        if (pdev->revision < VTunknown0) {
 665                quirks = rqRhineI;
 666                io_size = 128;
 667        }
 668        else if (pdev->revision >= VT6102) {
 669                quirks = rqWOL | rqForceReset;
 670                if (pdev->revision < VT6105) {
 671                        name = "Rhine II";
 672                        quirks |= rqStatusWBRace;       /* Rhine-II exclusive */
 673                }
 674                else {
 675                        phy_id = 1;     /* Integrated PHY, phy_id fixed to 1 */
 676                        if (pdev->revision >= VT6105_B0)
 677                                quirks |= rq6patterns;
 678                        if (pdev->revision < VT6105M)
 679                                name = "Rhine III";
 680                        else
 681                                name = "Rhine III (Management Adapter)";
 682                }
 683        }
 684
 685        rc = pci_enable_device(pdev);
 686        if (rc)
 687                goto err_out;
 688
 689        /* this should always be supported */
 690        rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
 691        if (rc) {
 692                printk(KERN_ERR "32-bit PCI DMA addresses not supported by "
 693                       "the card!?\n");
 694                goto err_out;
 695        }
 696
 697        /* sanity check */
 698        if ((pci_resource_len(pdev, 0) < io_size) ||
 699            (pci_resource_len(pdev, 1) < io_size)) {
 700                rc = -EIO;
 701                printk(KERN_ERR "Insufficient PCI resources, aborting\n");
 702                goto err_out;
 703        }
 704
 705        pioaddr = pci_resource_start(pdev, 0);
 706        memaddr = pci_resource_start(pdev, 1);
 707
 708        pci_set_master(pdev);
 709
 710        dev = alloc_etherdev(sizeof(struct rhine_private));
 711        if (!dev) {
 712                rc = -ENOMEM;
 713                printk(KERN_ERR "alloc_etherdev failed\n");
 714                goto err_out;
 715        }
 716        SET_NETDEV_DEV(dev, &pdev->dev);
 717
 718        rp = netdev_priv(dev);
 719        rp->dev = dev;
 720        rp->quirks = quirks;
 721        rp->pioaddr = pioaddr;
 722        rp->pdev = pdev;
 723
 724        rc = pci_request_regions(pdev, DRV_NAME);
 725        if (rc)
 726                goto err_out_free_netdev;
 727
 728        ioaddr = pci_iomap(pdev, bar, io_size);
 729        if (!ioaddr) {
 730                rc = -EIO;
 731                printk(KERN_ERR "ioremap failed for device %s, region 0x%X "
 732                       "@ 0x%lX\n", pci_name(pdev), io_size, memaddr);
 733                goto err_out_free_res;
 734        }
 735
 736#ifdef USE_MMIO
 737        enable_mmio(pioaddr, quirks);
 738
 739        /* Check that selected MMIO registers match the PIO ones */
 740        i = 0;
 741        while (mmio_verify_registers[i]) {
 742                int reg = mmio_verify_registers[i++];
 743                unsigned char a = inb(pioaddr+reg);
 744                unsigned char b = readb(ioaddr+reg);
 745                if (a != b) {
 746                        rc = -EIO;
 747                        printk(KERN_ERR "MMIO do not match PIO [%02x] "
 748                               "(%02x != %02x)\n", reg, a, b);
 749                        goto err_out_unmap;
 750                }
 751        }
 752#endif /* USE_MMIO */
 753
 754        dev->base_addr = (unsigned long)ioaddr;
 755        rp->base = ioaddr;
 756
 757        /* Get chip registers into a sane state */
 758        rhine_power_init(dev);
 759        rhine_hw_init(dev, pioaddr);
 760
 761        for (i = 0; i < 6; i++)
 762                dev->dev_addr[i] = ioread8(ioaddr + StationAddr + i);
 763        memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
 764
 765        if (!is_valid_ether_addr(dev->perm_addr)) {
 766                rc = -EIO;
 767                printk(KERN_ERR "Invalid MAC address\n");
 768                goto err_out_unmap;
 769        }
 770
 771        /* For Rhine-I/II, phy_id is loaded from EEPROM */
 772        if (!phy_id)
 773                phy_id = ioread8(ioaddr + 0x6C);
 774
 775        dev->irq = pdev->irq;
 776
 777        spin_lock_init(&rp->lock);
 778        rp->mii_if.dev = dev;
 779        rp->mii_if.mdio_read = mdio_read;
 780        rp->mii_if.mdio_write = mdio_write;
 781        rp->mii_if.phy_id_mask = 0x1f;
 782        rp->mii_if.reg_num_mask = 0x1f;
 783
 784        /* The chip-specific entries in the device structure. */
 785        dev->netdev_ops = &rhine_netdev_ops;
 786        dev->ethtool_ops = &netdev_ethtool_ops,
 787        dev->watchdog_timeo = TX_TIMEOUT;
 788
 789        netif_napi_add(dev, &rp->napi, rhine_napipoll, 64);
 790
 791        if (rp->quirks & rqRhineI)
 792                dev->features |= NETIF_F_SG|NETIF_F_HW_CSUM;
 793
 794        /* dev->name not defined before register_netdev()! */
 795        rc = register_netdev(dev);
 796        if (rc)
 797                goto err_out_unmap;
 798
 799        printk(KERN_INFO "%s: VIA %s at 0x%lx, %pM, IRQ %d.\n",
 800               dev->name, name,
 801#ifdef USE_MMIO
 802               memaddr,
 803#else
 804               (long)ioaddr,
 805#endif
 806               dev->dev_addr, pdev->irq);
 807
 808        pci_set_drvdata(pdev, dev);
 809
 810        {
 811                u16 mii_cmd;
 812                int mii_status = mdio_read(dev, phy_id, 1);
 813                mii_cmd = mdio_read(dev, phy_id, MII_BMCR) & ~BMCR_ISOLATE;
 814                mdio_write(dev, phy_id, MII_BMCR, mii_cmd);
 815                if (mii_status != 0xffff && mii_status != 0x0000) {
 816                        rp->mii_if.advertising = mdio_read(dev, phy_id, 4);
 817                        printk(KERN_INFO "%s: MII PHY found at address "
 818                               "%d, status 0x%4.4x advertising %4.4x "
 819                               "Link %4.4x.\n", dev->name, phy_id,
 820                               mii_status, rp->mii_if.advertising,
 821                               mdio_read(dev, phy_id, 5));
 822
 823                        /* set IFF_RUNNING */
 824                        if (mii_status & BMSR_LSTATUS)
 825                                netif_carrier_on(dev);
 826                        else
 827                                netif_carrier_off(dev);
 828
 829                }
 830        }
 831        rp->mii_if.phy_id = phy_id;
 832        if (debug > 1 && avoid_D3)
 833                printk(KERN_INFO "%s: No D3 power state at shutdown.\n",
 834                       dev->name);
 835
 836        return 0;
 837
 838err_out_unmap:
 839        pci_iounmap(pdev, ioaddr);
 840err_out_free_res:
 841        pci_release_regions(pdev);
 842err_out_free_netdev:
 843        free_netdev(dev);
 844err_out:
 845        return rc;
 846}
 847
 848static int alloc_ring(struct net_device* dev)
 849{
 850        struct rhine_private *rp = netdev_priv(dev);
 851        void *ring;
 852        dma_addr_t ring_dma;
 853
 854        ring = pci_alloc_consistent(rp->pdev,
 855                                    RX_RING_SIZE * sizeof(struct rx_desc) +
 856                                    TX_RING_SIZE * sizeof(struct tx_desc),
 857                                    &ring_dma);
 858        if (!ring) {
 859                printk(KERN_ERR "Could not allocate DMA memory.\n");
 860                return -ENOMEM;
 861        }
 862        if (rp->quirks & rqRhineI) {
 863                rp->tx_bufs = pci_alloc_consistent(rp->pdev,
 864                                                   PKT_BUF_SZ * TX_RING_SIZE,
 865                                                   &rp->tx_bufs_dma);
 866                if (rp->tx_bufs == NULL) {
 867                        pci_free_consistent(rp->pdev,
 868                                    RX_RING_SIZE * sizeof(struct rx_desc) +
 869                                    TX_RING_SIZE * sizeof(struct tx_desc),
 870                                    ring, ring_dma);
 871                        return -ENOMEM;
 872                }
 873        }
 874
 875        rp->rx_ring = ring;
 876        rp->tx_ring = ring + RX_RING_SIZE * sizeof(struct rx_desc);
 877        rp->rx_ring_dma = ring_dma;
 878        rp->tx_ring_dma = ring_dma + RX_RING_SIZE * sizeof(struct rx_desc);
 879
 880        return 0;
 881}
 882
 883static void free_ring(struct net_device* dev)
 884{
 885        struct rhine_private *rp = netdev_priv(dev);
 886
 887        pci_free_consistent(rp->pdev,
 888                            RX_RING_SIZE * sizeof(struct rx_desc) +
 889                            TX_RING_SIZE * sizeof(struct tx_desc),
 890                            rp->rx_ring, rp->rx_ring_dma);
 891        rp->tx_ring = NULL;
 892
 893        if (rp->tx_bufs)
 894                pci_free_consistent(rp->pdev, PKT_BUF_SZ * TX_RING_SIZE,
 895                                    rp->tx_bufs, rp->tx_bufs_dma);
 896
 897        rp->tx_bufs = NULL;
 898
 899}
 900
 901static void alloc_rbufs(struct net_device *dev)
 902{
 903        struct rhine_private *rp = netdev_priv(dev);
 904        dma_addr_t next;
 905        int i;
 906
 907        rp->dirty_rx = rp->cur_rx = 0;
 908
 909        rp->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
 910        rp->rx_head_desc = &rp->rx_ring[0];
 911        next = rp->rx_ring_dma;
 912
 913        /* Init the ring entries */
 914        for (i = 0; i < RX_RING_SIZE; i++) {
 915                rp->rx_ring[i].rx_status = 0;
 916                rp->rx_ring[i].desc_length = cpu_to_le32(rp->rx_buf_sz);
 917                next += sizeof(struct rx_desc);
 918                rp->rx_ring[i].next_desc = cpu_to_le32(next);
 919                rp->rx_skbuff[i] = NULL;
 920        }
 921        /* Mark the last entry as wrapping the ring. */
 922        rp->rx_ring[i-1].next_desc = cpu_to_le32(rp->rx_ring_dma);
 923
 924        /* Fill in the Rx buffers.  Handle allocation failure gracefully. */
 925        for (i = 0; i < RX_RING_SIZE; i++) {
 926                struct sk_buff *skb = netdev_alloc_skb(dev, rp->rx_buf_sz);
 927                rp->rx_skbuff[i] = skb;
 928                if (skb == NULL)
 929                        break;
 930                skb->dev = dev;                 /* Mark as being used by this device. */
 931
 932                rp->rx_skbuff_dma[i] =
 933                        pci_map_single(rp->pdev, skb->data, rp->rx_buf_sz,
 934                                       PCI_DMA_FROMDEVICE);
 935
 936                rp->rx_ring[i].addr = cpu_to_le32(rp->rx_skbuff_dma[i]);
 937                rp->rx_ring[i].rx_status = cpu_to_le32(DescOwn);
 938        }
 939        rp->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
 940}
 941
 942static void free_rbufs(struct net_device* dev)
 943{
 944        struct rhine_private *rp = netdev_priv(dev);
 945        int i;
 946
 947        /* Free all the skbuffs in the Rx queue. */
 948        for (i = 0; i < RX_RING_SIZE; i++) {
 949                rp->rx_ring[i].rx_status = 0;
 950                rp->rx_ring[i].addr = cpu_to_le32(0xBADF00D0); /* An invalid address. */
 951                if (rp->rx_skbuff[i]) {
 952                        pci_unmap_single(rp->pdev,
 953                                         rp->rx_skbuff_dma[i],
 954                                         rp->rx_buf_sz, PCI_DMA_FROMDEVICE);
 955                        dev_kfree_skb(rp->rx_skbuff[i]);
 956                }
 957                rp->rx_skbuff[i] = NULL;
 958        }
 959}
 960
 961static void alloc_tbufs(struct net_device* dev)
 962{
 963        struct rhine_private *rp = netdev_priv(dev);
 964        dma_addr_t next;
 965        int i;
 966
 967        rp->dirty_tx = rp->cur_tx = 0;
 968        next = rp->tx_ring_dma;
 969        for (i = 0; i < TX_RING_SIZE; i++) {
 970                rp->tx_skbuff[i] = NULL;
 971                rp->tx_ring[i].tx_status = 0;
 972                rp->tx_ring[i].desc_length = cpu_to_le32(TXDESC);
 973                next += sizeof(struct tx_desc);
 974                rp->tx_ring[i].next_desc = cpu_to_le32(next);
 975                if (rp->quirks & rqRhineI)
 976                        rp->tx_buf[i] = &rp->tx_bufs[i * PKT_BUF_SZ];
 977        }
 978        rp->tx_ring[i-1].next_desc = cpu_to_le32(rp->tx_ring_dma);
 979
 980}
 981
 982static void free_tbufs(struct net_device* dev)
 983{
 984        struct rhine_private *rp = netdev_priv(dev);
 985        int i;
 986
 987        for (i = 0; i < TX_RING_SIZE; i++) {
 988                rp->tx_ring[i].tx_status = 0;
 989                rp->tx_ring[i].desc_length = cpu_to_le32(TXDESC);
 990                rp->tx_ring[i].addr = cpu_to_le32(0xBADF00D0); /* An invalid address. */
 991                if (rp->tx_skbuff[i]) {
 992                        if (rp->tx_skbuff_dma[i]) {
 993                                pci_unmap_single(rp->pdev,
 994                                                 rp->tx_skbuff_dma[i],
 995                                                 rp->tx_skbuff[i]->len,
 996                                                 PCI_DMA_TODEVICE);
 997                        }
 998                        dev_kfree_skb(rp->tx_skbuff[i]);
 999                }
1000                rp->tx_skbuff[i] = NULL;
1001                rp->tx_buf[i] = NULL;
1002        }
1003}
1004
1005static void rhine_check_media(struct net_device *dev, unsigned int init_media)
1006{
1007        struct rhine_private *rp = netdev_priv(dev);
1008        void __iomem *ioaddr = rp->base;
1009
1010        mii_check_media(&rp->mii_if, debug, init_media);
1011
1012        if (rp->mii_if.full_duplex)
1013            iowrite8(ioread8(ioaddr + ChipCmd1) | Cmd1FDuplex,
1014                   ioaddr + ChipCmd1);
1015        else
1016            iowrite8(ioread8(ioaddr + ChipCmd1) & ~Cmd1FDuplex,
1017                   ioaddr + ChipCmd1);
1018        if (debug > 1)
1019                printk(KERN_INFO "%s: force_media %d, carrier %d\n", dev->name,
1020                        rp->mii_if.force_media, netif_carrier_ok(dev));
1021}
1022
1023/* Called after status of force_media possibly changed */
1024static void rhine_set_carrier(struct mii_if_info *mii)
1025{
1026        if (mii->force_media) {
1027                /* autoneg is off: Link is always assumed to be up */
1028                if (!netif_carrier_ok(mii->dev))
1029                        netif_carrier_on(mii->dev);
1030        }
1031        else    /* Let MMI library update carrier status */
1032                rhine_check_media(mii->dev, 0);
1033        if (debug > 1)
1034                printk(KERN_INFO "%s: force_media %d, carrier %d\n",
1035                       mii->dev->name, mii->force_media,
1036                       netif_carrier_ok(mii->dev));
1037}
1038
1039static void init_registers(struct net_device *dev)
1040{
1041        struct rhine_private *rp = netdev_priv(dev);
1042        void __iomem *ioaddr = rp->base;
1043        int i;
1044
1045        for (i = 0; i < 6; i++)
1046                iowrite8(dev->dev_addr[i], ioaddr + StationAddr + i);
1047
1048        /* Initialize other registers. */
1049        iowrite16(0x0006, ioaddr + PCIBusConfig);       /* Tune configuration??? */
1050        /* Configure initial FIFO thresholds. */
1051        iowrite8(0x20, ioaddr + TxConfig);
1052        rp->tx_thresh = 0x20;
1053        rp->rx_thresh = 0x60;           /* Written in rhine_set_rx_mode(). */
1054
1055        iowrite32(rp->rx_ring_dma, ioaddr + RxRingPtr);
1056        iowrite32(rp->tx_ring_dma, ioaddr + TxRingPtr);
1057
1058        rhine_set_rx_mode(dev);
1059
1060        napi_enable(&rp->napi);
1061
1062        /* Enable interrupts by setting the interrupt mask. */
1063        iowrite16(IntrRxDone | IntrRxErr | IntrRxEmpty| IntrRxOverflow |
1064               IntrRxDropped | IntrRxNoBuf | IntrTxAborted |
1065               IntrTxDone | IntrTxError | IntrTxUnderrun |
1066               IntrPCIErr | IntrStatsMax | IntrLinkChange,
1067               ioaddr + IntrEnable);
1068
1069        iowrite16(CmdStart | CmdTxOn | CmdRxOn | (Cmd1NoTxPoll << 8),
1070               ioaddr + ChipCmd);
1071        rhine_check_media(dev, 1);
1072}
1073
1074/* Enable MII link status auto-polling (required for IntrLinkChange) */
1075static void rhine_enable_linkmon(void __iomem *ioaddr)
1076{
1077        iowrite8(0, ioaddr + MIICmd);
1078        iowrite8(MII_BMSR, ioaddr + MIIRegAddr);
1079        iowrite8(0x80, ioaddr + MIICmd);
1080
1081        RHINE_WAIT_FOR((ioread8(ioaddr + MIIRegAddr) & 0x20));
1082
1083        iowrite8(MII_BMSR | 0x40, ioaddr + MIIRegAddr);
1084}
1085
1086/* Disable MII link status auto-polling (required for MDIO access) */
1087static void rhine_disable_linkmon(void __iomem *ioaddr, u32 quirks)
1088{
1089        iowrite8(0, ioaddr + MIICmd);
1090
1091        if (quirks & rqRhineI) {
1092                iowrite8(0x01, ioaddr + MIIRegAddr);    // MII_BMSR
1093
1094                /* Can be called from ISR. Evil. */
1095                mdelay(1);
1096
1097                /* 0x80 must be set immediately before turning it off */
1098                iowrite8(0x80, ioaddr + MIICmd);
1099
1100                RHINE_WAIT_FOR(ioread8(ioaddr + MIIRegAddr) & 0x20);
1101
1102                /* Heh. Now clear 0x80 again. */
1103                iowrite8(0, ioaddr + MIICmd);
1104        }
1105        else
1106                RHINE_WAIT_FOR(ioread8(ioaddr + MIIRegAddr) & 0x80);
1107}
1108
1109/* Read and write over the MII Management Data I/O (MDIO) interface. */
1110
1111static int mdio_read(struct net_device *dev, int phy_id, int regnum)
1112{
1113        struct rhine_private *rp = netdev_priv(dev);
1114        void __iomem *ioaddr = rp->base;
1115        int result;
1116
1117        rhine_disable_linkmon(ioaddr, rp->quirks);
1118
1119        /* rhine_disable_linkmon already cleared MIICmd */
1120        iowrite8(phy_id, ioaddr + MIIPhyAddr);
1121        iowrite8(regnum, ioaddr + MIIRegAddr);
1122        iowrite8(0x40, ioaddr + MIICmd);                /* Trigger read */
1123        RHINE_WAIT_FOR(!(ioread8(ioaddr + MIICmd) & 0x40));
1124        result = ioread16(ioaddr + MIIData);
1125
1126        rhine_enable_linkmon(ioaddr);
1127        return result;
1128}
1129
1130static void mdio_write(struct net_device *dev, int phy_id, int regnum, int value)
1131{
1132        struct rhine_private *rp = netdev_priv(dev);
1133        void __iomem *ioaddr = rp->base;
1134
1135        rhine_disable_linkmon(ioaddr, rp->quirks);
1136
1137        /* rhine_disable_linkmon already cleared MIICmd */
1138        iowrite8(phy_id, ioaddr + MIIPhyAddr);
1139        iowrite8(regnum, ioaddr + MIIRegAddr);
1140        iowrite16(value, ioaddr + MIIData);
1141        iowrite8(0x20, ioaddr + MIICmd);                /* Trigger write */
1142        RHINE_WAIT_FOR(!(ioread8(ioaddr + MIICmd) & 0x20));
1143
1144        rhine_enable_linkmon(ioaddr);
1145}
1146
1147static int rhine_open(struct net_device *dev)
1148{
1149        struct rhine_private *rp = netdev_priv(dev);
1150        void __iomem *ioaddr = rp->base;
1151        int rc;
1152
1153        rc = request_irq(rp->pdev->irq, &rhine_interrupt, IRQF_SHARED, dev->name,
1154                        dev);
1155        if (rc)
1156                return rc;
1157
1158        if (debug > 1)
1159                printk(KERN_DEBUG "%s: rhine_open() irq %d.\n",
1160                       dev->name, rp->pdev->irq);
1161
1162        rc = alloc_ring(dev);
1163        if (rc) {
1164                free_irq(rp->pdev->irq, dev);
1165                return rc;
1166        }
1167        alloc_rbufs(dev);
1168        alloc_tbufs(dev);
1169        rhine_chip_reset(dev);
1170        init_registers(dev);
1171        if (debug > 2)
1172                printk(KERN_DEBUG "%s: Done rhine_open(), status %4.4x "
1173                       "MII status: %4.4x.\n",
1174                       dev->name, ioread16(ioaddr + ChipCmd),
1175                       mdio_read(dev, rp->mii_if.phy_id, MII_BMSR));
1176
1177        netif_start_queue(dev);
1178
1179        return 0;
1180}
1181
1182static void rhine_tx_timeout(struct net_device *dev)
1183{
1184        struct rhine_private *rp = netdev_priv(dev);
1185        void __iomem *ioaddr = rp->base;
1186
1187        printk(KERN_WARNING "%s: Transmit timed out, status %4.4x, PHY status "
1188               "%4.4x, resetting...\n",
1189               dev->name, ioread16(ioaddr + IntrStatus),
1190               mdio_read(dev, rp->mii_if.phy_id, MII_BMSR));
1191
1192        /* protect against concurrent rx interrupts */
1193        disable_irq(rp->pdev->irq);
1194
1195        napi_disable(&rp->napi);
1196
1197        spin_lock(&rp->lock);
1198
1199        /* clear all descriptors */
1200        free_tbufs(dev);
1201        free_rbufs(dev);
1202        alloc_tbufs(dev);
1203        alloc_rbufs(dev);
1204
1205        /* Reinitialize the hardware. */
1206        rhine_chip_reset(dev);
1207        init_registers(dev);
1208
1209        spin_unlock(&rp->lock);
1210        enable_irq(rp->pdev->irq);
1211
1212        dev->trans_start = jiffies;
1213        dev->stats.tx_errors++;
1214        netif_wake_queue(dev);
1215}
1216
1217static netdev_tx_t rhine_start_tx(struct sk_buff *skb,
1218                                  struct net_device *dev)
1219{
1220        struct rhine_private *rp = netdev_priv(dev);
1221        void __iomem *ioaddr = rp->base;
1222        unsigned entry;
1223        unsigned long flags;
1224
1225        /* Caution: the write order is important here, set the field
1226           with the "ownership" bits last. */
1227
1228        /* Calculate the next Tx descriptor entry. */
1229        entry = rp->cur_tx % TX_RING_SIZE;
1230
1231        if (skb_padto(skb, ETH_ZLEN))
1232                return NETDEV_TX_OK;
1233
1234        rp->tx_skbuff[entry] = skb;
1235
1236        if ((rp->quirks & rqRhineI) &&
1237            (((unsigned long)skb->data & 3) || skb_shinfo(skb)->nr_frags != 0 || skb->ip_summed == CHECKSUM_PARTIAL)) {
1238                /* Must use alignment buffer. */
1239                if (skb->len > PKT_BUF_SZ) {
1240                        /* packet too long, drop it */
1241                        dev_kfree_skb(skb);
1242                        rp->tx_skbuff[entry] = NULL;
1243                        dev->stats.tx_dropped++;
1244                        return NETDEV_TX_OK;
1245                }
1246
1247                /* Padding is not copied and so must be redone. */
1248                skb_copy_and_csum_dev(skb, rp->tx_buf[entry]);
1249                if (skb->len < ETH_ZLEN)
1250                        memset(rp->tx_buf[entry] + skb->len, 0,
1251                               ETH_ZLEN - skb->len);
1252                rp->tx_skbuff_dma[entry] = 0;
1253                rp->tx_ring[entry].addr = cpu_to_le32(rp->tx_bufs_dma +
1254                                                      (rp->tx_buf[entry] -
1255                                                       rp->tx_bufs));
1256        } else {
1257                rp->tx_skbuff_dma[entry] =
1258                        pci_map_single(rp->pdev, skb->data, skb->len,
1259                                       PCI_DMA_TODEVICE);
1260                rp->tx_ring[entry].addr = cpu_to_le32(rp->tx_skbuff_dma[entry]);
1261        }
1262
1263        rp->tx_ring[entry].desc_length =
1264                cpu_to_le32(TXDESC | (skb->len >= ETH_ZLEN ? skb->len : ETH_ZLEN));
1265
1266        /* lock eth irq */
1267        spin_lock_irqsave(&rp->lock, flags);
1268        wmb();
1269        rp->tx_ring[entry].tx_status = cpu_to_le32(DescOwn);
1270        wmb();
1271
1272        rp->cur_tx++;
1273
1274        /* Non-x86 Todo: explicitly flush cache lines here. */
1275
1276        /* Wake the potentially-idle transmit channel */
1277        iowrite8(ioread8(ioaddr + ChipCmd1) | Cmd1TxDemand,
1278               ioaddr + ChipCmd1);
1279        IOSYNC;
1280
1281        if (rp->cur_tx == rp->dirty_tx + TX_QUEUE_LEN)
1282                netif_stop_queue(dev);
1283
1284        dev->trans_start = jiffies;
1285
1286        spin_unlock_irqrestore(&rp->lock, flags);
1287
1288        if (debug > 4) {
1289                printk(KERN_DEBUG "%s: Transmit frame #%d queued in slot %d.\n",
1290                       dev->name, rp->cur_tx-1, entry);
1291        }
1292        return NETDEV_TX_OK;
1293}
1294
1295/* The interrupt handler does all of the Rx thread work and cleans up
1296   after the Tx thread. */
1297static irqreturn_t rhine_interrupt(int irq, void *dev_instance)
1298{
1299        struct net_device *dev = dev_instance;
1300        struct rhine_private *rp = netdev_priv(dev);
1301        void __iomem *ioaddr = rp->base;
1302        u32 intr_status;
1303        int boguscnt = max_interrupt_work;
1304        int handled = 0;
1305
1306        while ((intr_status = get_intr_status(dev))) {
1307                handled = 1;
1308
1309                /* Acknowledge all of the current interrupt sources ASAP. */
1310                if (intr_status & IntrTxDescRace)
1311                        iowrite8(0x08, ioaddr + IntrStatus2);
1312                iowrite16(intr_status & 0xffff, ioaddr + IntrStatus);
1313                IOSYNC;
1314
1315                if (debug > 4)
1316                        printk(KERN_DEBUG "%s: Interrupt, status %8.8x.\n",
1317                               dev->name, intr_status);
1318
1319                if (intr_status & (IntrRxDone | IntrRxErr | IntrRxDropped |
1320                                   IntrRxWakeUp | IntrRxEmpty | IntrRxNoBuf)) {
1321                        iowrite16(IntrTxAborted |
1322                                  IntrTxDone | IntrTxError | IntrTxUnderrun |
1323                                  IntrPCIErr | IntrStatsMax | IntrLinkChange,
1324                                  ioaddr + IntrEnable);
1325
1326                        napi_schedule(&rp->napi);
1327                }
1328
1329                if (intr_status & (IntrTxErrSummary | IntrTxDone)) {
1330                        if (intr_status & IntrTxErrSummary) {
1331                                /* Avoid scavenging before Tx engine turned off */
1332                                RHINE_WAIT_FOR(!(ioread8(ioaddr+ChipCmd) & CmdTxOn));
1333                                if (debug > 2 &&
1334                                    ioread8(ioaddr+ChipCmd) & CmdTxOn)
1335                                        printk(KERN_WARNING "%s: "
1336                                               "rhine_interrupt() Tx engine "
1337                                               "still on.\n", dev->name);
1338                        }
1339                        rhine_tx(dev);
1340                }
1341
1342                /* Abnormal error summary/uncommon events handlers. */
1343                if (intr_status & (IntrPCIErr | IntrLinkChange |
1344                                   IntrStatsMax | IntrTxError | IntrTxAborted |
1345                                   IntrTxUnderrun | IntrTxDescRace))
1346                        rhine_error(dev, intr_status);
1347
1348                if (--boguscnt < 0) {
1349                        printk(KERN_WARNING "%s: Too much work at interrupt, "
1350                               "status=%#8.8x.\n",
1351                               dev->name, intr_status);
1352                        break;
1353                }
1354        }
1355
1356        if (debug > 3)
1357                printk(KERN_DEBUG "%s: exiting interrupt, status=%8.8x.\n",
1358                       dev->name, ioread16(ioaddr + IntrStatus));
1359        return IRQ_RETVAL(handled);
1360}
1361
1362/* This routine is logically part of the interrupt handler, but isolated
1363   for clarity. */
1364static void rhine_tx(struct net_device *dev)
1365{
1366        struct rhine_private *rp = netdev_priv(dev);
1367        int txstatus = 0, entry = rp->dirty_tx % TX_RING_SIZE;
1368
1369        spin_lock(&rp->lock);
1370
1371        /* find and cleanup dirty tx descriptors */
1372        while (rp->dirty_tx != rp->cur_tx) {
1373                txstatus = le32_to_cpu(rp->tx_ring[entry].tx_status);
1374                if (debug > 6)
1375                        printk(KERN_DEBUG "Tx scavenge %d status %8.8x.\n",
1376                               entry, txstatus);
1377                if (txstatus & DescOwn)
1378                        break;
1379                if (txstatus & 0x8000) {
1380                        if (debug > 1)
1381                                printk(KERN_DEBUG "%s: Transmit error, "
1382                                       "Tx status %8.8x.\n",
1383                                       dev->name, txstatus);
1384                        dev->stats.tx_errors++;
1385                        if (txstatus & 0x0400)
1386                                dev->stats.tx_carrier_errors++;
1387                        if (txstatus & 0x0200)
1388                                dev->stats.tx_window_errors++;
1389                        if (txstatus & 0x0100)
1390                                dev->stats.tx_aborted_errors++;
1391                        if (txstatus & 0x0080)
1392                                dev->stats.tx_heartbeat_errors++;
1393                        if (((rp->quirks & rqRhineI) && txstatus & 0x0002) ||
1394                            (txstatus & 0x0800) || (txstatus & 0x1000)) {
1395                                dev->stats.tx_fifo_errors++;
1396                                rp->tx_ring[entry].tx_status = cpu_to_le32(DescOwn);
1397                                break; /* Keep the skb - we try again */
1398                        }
1399                        /* Transmitter restarted in 'abnormal' handler. */
1400                } else {
1401                        if (rp->quirks & rqRhineI)
1402                                dev->stats.collisions += (txstatus >> 3) & 0x0F;
1403                        else
1404                                dev->stats.collisions += txstatus & 0x0F;
1405                        if (debug > 6)
1406                                printk(KERN_DEBUG "collisions: %1.1x:%1.1x\n",
1407                                       (txstatus >> 3) & 0xF,
1408                                       txstatus & 0xF);
1409                        dev->stats.tx_bytes += rp->tx_skbuff[entry]->len;
1410                        dev->stats.tx_packets++;
1411                }
1412                /* Free the original skb. */
1413                if (rp->tx_skbuff_dma[entry]) {
1414                        pci_unmap_single(rp->pdev,
1415                                         rp->tx_skbuff_dma[entry],
1416                                         rp->tx_skbuff[entry]->len,
1417                                         PCI_DMA_TODEVICE);
1418                }
1419                dev_kfree_skb_irq(rp->tx_skbuff[entry]);
1420                rp->tx_skbuff[entry] = NULL;
1421                entry = (++rp->dirty_tx) % TX_RING_SIZE;
1422        }
1423        if ((rp->cur_tx - rp->dirty_tx) < TX_QUEUE_LEN - 4)
1424                netif_wake_queue(dev);
1425
1426        spin_unlock(&rp->lock);
1427}
1428
1429/* Process up to limit frames from receive ring */
1430static int rhine_rx(struct net_device *dev, int limit)
1431{
1432        struct rhine_private *rp = netdev_priv(dev);
1433        int count;
1434        int entry = rp->cur_rx % RX_RING_SIZE;
1435
1436        if (debug > 4) {
1437                printk(KERN_DEBUG "%s: rhine_rx(), entry %d status %8.8x.\n",
1438                       dev->name, entry,
1439                       le32_to_cpu(rp->rx_head_desc->rx_status));
1440        }
1441
1442        /* If EOP is set on the next entry, it's a new packet. Send it up. */
1443        for (count = 0; count < limit; ++count) {
1444                struct rx_desc *desc = rp->rx_head_desc;
1445                u32 desc_status = le32_to_cpu(desc->rx_status);
1446                int data_size = desc_status >> 16;
1447
1448                if (desc_status & DescOwn)
1449                        break;
1450
1451                if (debug > 4)
1452                        printk(KERN_DEBUG "rhine_rx() status is %8.8x.\n",
1453                               desc_status);
1454
1455                if ((desc_status & (RxWholePkt | RxErr)) != RxWholePkt) {
1456                        if ((desc_status & RxWholePkt) != RxWholePkt) {
1457                                printk(KERN_WARNING "%s: Oversized Ethernet "
1458                                       "frame spanned multiple buffers, entry "
1459                                       "%#x length %d status %8.8x!\n",
1460                                       dev->name, entry, data_size,
1461                                       desc_status);
1462                                printk(KERN_WARNING "%s: Oversized Ethernet "
1463                                       "frame %p vs %p.\n", dev->name,
1464                                       rp->rx_head_desc, &rp->rx_ring[entry]);
1465                                dev->stats.rx_length_errors++;
1466                        } else if (desc_status & RxErr) {
1467                                /* There was a error. */
1468                                if (debug > 2)
1469                                        printk(KERN_DEBUG "rhine_rx() Rx "
1470                                               "error was %8.8x.\n",
1471                                               desc_status);
1472                                dev->stats.rx_errors++;
1473                                if (desc_status & 0x0030)
1474                                        dev->stats.rx_length_errors++;
1475                                if (desc_status & 0x0048)
1476                                        dev->stats.rx_fifo_errors++;
1477                                if (desc_status & 0x0004)
1478                                        dev->stats.rx_frame_errors++;
1479                                if (desc_status & 0x0002) {
1480                                        /* this can also be updated outside the interrupt handler */
1481                                        spin_lock(&rp->lock);
1482                                        dev->stats.rx_crc_errors++;
1483                                        spin_unlock(&rp->lock);
1484                                }
1485                        }
1486                } else {
1487                        struct sk_buff *skb;
1488                        /* Length should omit the CRC */
1489                        int pkt_len = data_size - 4;
1490
1491                        /* Check if the packet is long enough to accept without
1492                           copying to a minimally-sized skbuff. */
1493                        if (pkt_len < rx_copybreak &&
1494                                (skb = netdev_alloc_skb(dev, pkt_len + NET_IP_ALIGN)) != NULL) {
1495                                skb_reserve(skb, NET_IP_ALIGN); /* 16 byte align the IP header */
1496                                pci_dma_sync_single_for_cpu(rp->pdev,
1497                                                            rp->rx_skbuff_dma[entry],
1498                                                            rp->rx_buf_sz,
1499                                                            PCI_DMA_FROMDEVICE);
1500
1501                                skb_copy_to_linear_data(skb,
1502                                                 rp->rx_skbuff[entry]->data,
1503                                                 pkt_len);
1504                                skb_put(skb, pkt_len);
1505                                pci_dma_sync_single_for_device(rp->pdev,
1506                                                               rp->rx_skbuff_dma[entry],
1507                                                               rp->rx_buf_sz,
1508                                                               PCI_DMA_FROMDEVICE);
1509                        } else {
1510                                skb = rp->rx_skbuff[entry];
1511                                if (skb == NULL) {
1512                                        printk(KERN_ERR "%s: Inconsistent Rx "
1513                                               "descriptor chain.\n",
1514                                               dev->name);
1515                                        break;
1516                                }
1517                                rp->rx_skbuff[entry] = NULL;
1518                                skb_put(skb, pkt_len);
1519                                pci_unmap_single(rp->pdev,
1520                                                 rp->rx_skbuff_dma[entry],
1521                                                 rp->rx_buf_sz,
1522                                                 PCI_DMA_FROMDEVICE);
1523                        }
1524                        skb->protocol = eth_type_trans(skb, dev);
1525                        netif_receive_skb(skb);
1526                        dev->stats.rx_bytes += pkt_len;
1527                        dev->stats.rx_packets++;
1528                }
1529                entry = (++rp->cur_rx) % RX_RING_SIZE;
1530                rp->rx_head_desc = &rp->rx_ring[entry];
1531        }
1532
1533        /* Refill the Rx ring buffers. */
1534        for (; rp->cur_rx - rp->dirty_rx > 0; rp->dirty_rx++) {
1535                struct sk_buff *skb;
1536                entry = rp->dirty_rx % RX_RING_SIZE;
1537                if (rp->rx_skbuff[entry] == NULL) {
1538                        skb = netdev_alloc_skb(dev, rp->rx_buf_sz);
1539                        rp->rx_skbuff[entry] = skb;
1540                        if (skb == NULL)
1541                                break;  /* Better luck next round. */
1542                        skb->dev = dev; /* Mark as being used by this device. */
1543                        rp->rx_skbuff_dma[entry] =
1544                                pci_map_single(rp->pdev, skb->data,
1545                                               rp->rx_buf_sz,
1546                                               PCI_DMA_FROMDEVICE);
1547                        rp->rx_ring[entry].addr = cpu_to_le32(rp->rx_skbuff_dma[entry]);
1548                }
1549                rp->rx_ring[entry].rx_status = cpu_to_le32(DescOwn);
1550        }
1551
1552        return count;
1553}
1554
1555/*
1556 * Clears the "tally counters" for CRC errors and missed frames(?).
1557 * It has been reported that some chips need a write of 0 to clear
1558 * these, for others the counters are set to 1 when written to and
1559 * instead cleared when read. So we clear them both ways ...
1560 */
1561static inline void clear_tally_counters(void __iomem *ioaddr)
1562{
1563        iowrite32(0, ioaddr + RxMissed);
1564        ioread16(ioaddr + RxCRCErrs);
1565        ioread16(ioaddr + RxMissed);
1566}
1567
1568static void rhine_restart_tx(struct net_device *dev) {
1569        struct rhine_private *rp = netdev_priv(dev);
1570        void __iomem *ioaddr = rp->base;
1571        int entry = rp->dirty_tx % TX_RING_SIZE;
1572        u32 intr_status;
1573
1574        /*
1575         * If new errors occured, we need to sort them out before doing Tx.
1576         * In that case the ISR will be back here RSN anyway.
1577         */
1578        intr_status = get_intr_status(dev);
1579
1580        if ((intr_status & IntrTxErrSummary) == 0) {
1581
1582                /* We know better than the chip where it should continue. */
1583                iowrite32(rp->tx_ring_dma + entry * sizeof(struct tx_desc),
1584                       ioaddr + TxRingPtr);
1585
1586                iowrite8(ioread8(ioaddr + ChipCmd) | CmdTxOn,
1587                       ioaddr + ChipCmd);
1588                iowrite8(ioread8(ioaddr + ChipCmd1) | Cmd1TxDemand,
1589                       ioaddr + ChipCmd1);
1590                IOSYNC;
1591        }
1592        else {
1593                /* This should never happen */
1594                if (debug > 1)
1595                        printk(KERN_WARNING "%s: rhine_restart_tx() "
1596                               "Another error occured %8.8x.\n",
1597                               dev->name, intr_status);
1598        }
1599
1600}
1601
1602static void rhine_error(struct net_device *dev, int intr_status)
1603{
1604        struct rhine_private *rp = netdev_priv(dev);
1605        void __iomem *ioaddr = rp->base;
1606
1607        spin_lock(&rp->lock);
1608
1609        if (intr_status & IntrLinkChange)
1610                rhine_check_media(dev, 0);
1611        if (intr_status & IntrStatsMax) {
1612                dev->stats.rx_crc_errors += ioread16(ioaddr + RxCRCErrs);
1613                dev->stats.rx_missed_errors += ioread16(ioaddr + RxMissed);
1614                clear_tally_counters(ioaddr);
1615        }
1616        if (intr_status & IntrTxAborted) {
1617                if (debug > 1)
1618                        printk(KERN_INFO "%s: Abort %8.8x, frame dropped.\n",
1619                               dev->name, intr_status);
1620        }
1621        if (intr_status & IntrTxUnderrun) {
1622                if (rp->tx_thresh < 0xE0)
1623                        iowrite8(rp->tx_thresh += 0x20, ioaddr + TxConfig);
1624                if (debug > 1)
1625                        printk(KERN_INFO "%s: Transmitter underrun, Tx "
1626                               "threshold now %2.2x.\n",
1627                               dev->name, rp->tx_thresh);
1628        }
1629        if (intr_status & IntrTxDescRace) {
1630                if (debug > 2)
1631                        printk(KERN_INFO "%s: Tx descriptor write-back race.\n",
1632                               dev->name);
1633        }
1634        if ((intr_status & IntrTxError) &&
1635            (intr_status & (IntrTxAborted |
1636             IntrTxUnderrun | IntrTxDescRace)) == 0) {
1637                if (rp->tx_thresh < 0xE0) {
1638                        iowrite8(rp->tx_thresh += 0x20, ioaddr + TxConfig);
1639                }
1640                if (debug > 1)
1641                        printk(KERN_INFO "%s: Unspecified error. Tx "
1642                               "threshold now %2.2x.\n",
1643                               dev->name, rp->tx_thresh);
1644        }
1645        if (intr_status & (IntrTxAborted | IntrTxUnderrun | IntrTxDescRace |
1646                           IntrTxError))
1647                rhine_restart_tx(dev);
1648
1649        if (intr_status & ~(IntrLinkChange | IntrStatsMax | IntrTxUnderrun |
1650                            IntrTxError | IntrTxAborted | IntrNormalSummary |
1651                            IntrTxDescRace)) {
1652                if (debug > 1)
1653                        printk(KERN_ERR "%s: Something Wicked happened! "
1654                               "%8.8x.\n", dev->name, intr_status);
1655        }
1656
1657        spin_unlock(&rp->lock);
1658}
1659
1660static struct net_device_stats *rhine_get_stats(struct net_device *dev)
1661{
1662        struct rhine_private *rp = netdev_priv(dev);
1663        void __iomem *ioaddr = rp->base;
1664        unsigned long flags;
1665
1666        spin_lock_irqsave(&rp->lock, flags);
1667        dev->stats.rx_crc_errors += ioread16(ioaddr + RxCRCErrs);
1668        dev->stats.rx_missed_errors += ioread16(ioaddr + RxMissed);
1669        clear_tally_counters(ioaddr);
1670        spin_unlock_irqrestore(&rp->lock, flags);
1671
1672        return &dev->stats;
1673}
1674
1675static void rhine_set_rx_mode(struct net_device *dev)
1676{
1677        struct rhine_private *rp = netdev_priv(dev);
1678        void __iomem *ioaddr = rp->base;
1679        u32 mc_filter[2];       /* Multicast hash filter */
1680        u8 rx_mode;             /* Note: 0x02=accept runt, 0x01=accept errs */
1681
1682        if (dev->flags & IFF_PROMISC) {         /* Set promiscuous. */
1683                rx_mode = 0x1C;
1684                iowrite32(0xffffffff, ioaddr + MulticastFilter0);
1685                iowrite32(0xffffffff, ioaddr + MulticastFilter1);
1686        } else if ((dev->mc_count > multicast_filter_limit)
1687                   || (dev->flags & IFF_ALLMULTI)) {
1688                /* Too many to match, or accept all multicasts. */
1689                iowrite32(0xffffffff, ioaddr + MulticastFilter0);
1690                iowrite32(0xffffffff, ioaddr + MulticastFilter1);
1691                rx_mode = 0x0C;
1692        } else {
1693                struct dev_mc_list *mclist;
1694                int i;
1695                memset(mc_filter, 0, sizeof(mc_filter));
1696                for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
1697                     i++, mclist = mclist->next) {
1698                        int bit_nr = ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26;
1699
1700                        mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
1701                }
1702                iowrite32(mc_filter[0], ioaddr + MulticastFilter0);
1703                iowrite32(mc_filter[1], ioaddr + MulticastFilter1);
1704                rx_mode = 0x0C;
1705        }
1706        iowrite8(rp->rx_thresh | rx_mode, ioaddr + RxConfig);
1707}
1708
1709static void netdev_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1710{
1711        struct rhine_private *rp = netdev_priv(dev);
1712
1713        strcpy(info->driver, DRV_NAME);
1714        strcpy(info->version, DRV_VERSION);
1715        strcpy(info->bus_info, pci_name(rp->pdev));
1716}
1717
1718static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1719{
1720        struct rhine_private *rp = netdev_priv(dev);
1721        int rc;
1722
1723        spin_lock_irq(&rp->lock);
1724        rc = mii_ethtool_gset(&rp->mii_if, cmd);
1725        spin_unlock_irq(&rp->lock);
1726
1727        return rc;
1728}
1729
1730static int netdev_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1731{
1732        struct rhine_private *rp = netdev_priv(dev);
1733        int rc;
1734
1735        spin_lock_irq(&rp->lock);
1736        rc = mii_ethtool_sset(&rp->mii_if, cmd);
1737        spin_unlock_irq(&rp->lock);
1738        rhine_set_carrier(&rp->mii_if);
1739
1740        return rc;
1741}
1742
1743static int netdev_nway_reset(struct net_device *dev)
1744{
1745        struct rhine_private *rp = netdev_priv(dev);
1746
1747        return mii_nway_restart(&rp->mii_if);
1748}
1749
1750static u32 netdev_get_link(struct net_device *dev)
1751{
1752        struct rhine_private *rp = netdev_priv(dev);
1753
1754        return mii_link_ok(&rp->mii_if);
1755}
1756
1757static u32 netdev_get_msglevel(struct net_device *dev)
1758{
1759        return debug;
1760}
1761
1762static void netdev_set_msglevel(struct net_device *dev, u32 value)
1763{
1764        debug = value;
1765}
1766
1767static void rhine_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1768{
1769        struct rhine_private *rp = netdev_priv(dev);
1770
1771        if (!(rp->quirks & rqWOL))
1772                return;
1773
1774        spin_lock_irq(&rp->lock);
1775        wol->supported = WAKE_PHY | WAKE_MAGIC |
1776                         WAKE_UCAST | WAKE_MCAST | WAKE_BCAST;  /* Untested */
1777        wol->wolopts = rp->wolopts;
1778        spin_unlock_irq(&rp->lock);
1779}
1780
1781static int rhine_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1782{
1783        struct rhine_private *rp = netdev_priv(dev);
1784        u32 support = WAKE_PHY | WAKE_MAGIC |
1785                      WAKE_UCAST | WAKE_MCAST | WAKE_BCAST;     /* Untested */
1786
1787        if (!(rp->quirks & rqWOL))
1788                return -EINVAL;
1789
1790        if (wol->wolopts & ~support)
1791                return -EINVAL;
1792
1793        spin_lock_irq(&rp->lock);
1794        rp->wolopts = wol->wolopts;
1795        spin_unlock_irq(&rp->lock);
1796
1797        return 0;
1798}
1799
1800static const struct ethtool_ops netdev_ethtool_ops = {
1801        .get_drvinfo            = netdev_get_drvinfo,
1802        .get_settings           = netdev_get_settings,
1803        .set_settings           = netdev_set_settings,
1804        .nway_reset             = netdev_nway_reset,
1805        .get_link               = netdev_get_link,
1806        .get_msglevel           = netdev_get_msglevel,
1807        .set_msglevel           = netdev_set_msglevel,
1808        .get_wol                = rhine_get_wol,
1809        .set_wol                = rhine_set_wol,
1810};
1811
1812static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1813{
1814        struct rhine_private *rp = netdev_priv(dev);
1815        int rc;
1816
1817        if (!netif_running(dev))
1818                return -EINVAL;
1819
1820        spin_lock_irq(&rp->lock);
1821        rc = generic_mii_ioctl(&rp->mii_if, if_mii(rq), cmd, NULL);
1822        spin_unlock_irq(&rp->lock);
1823        rhine_set_carrier(&rp->mii_if);
1824
1825        return rc;
1826}
1827
1828static int rhine_close(struct net_device *dev)
1829{
1830        struct rhine_private *rp = netdev_priv(dev);
1831        void __iomem *ioaddr = rp->base;
1832
1833        spin_lock_irq(&rp->lock);
1834
1835        netif_stop_queue(dev);
1836        napi_disable(&rp->napi);
1837
1838        if (debug > 1)
1839                printk(KERN_DEBUG "%s: Shutting down ethercard, "
1840                       "status was %4.4x.\n",
1841                       dev->name, ioread16(ioaddr + ChipCmd));
1842
1843        /* Switch to loopback mode to avoid hardware races. */
1844        iowrite8(rp->tx_thresh | 0x02, ioaddr + TxConfig);
1845
1846        /* Disable interrupts by clearing the interrupt mask. */
1847        iowrite16(0x0000, ioaddr + IntrEnable);
1848
1849        /* Stop the chip's Tx and Rx processes. */
1850        iowrite16(CmdStop, ioaddr + ChipCmd);
1851
1852        spin_unlock_irq(&rp->lock);
1853
1854        free_irq(rp->pdev->irq, dev);
1855        free_rbufs(dev);
1856        free_tbufs(dev);
1857        free_ring(dev);
1858
1859        return 0;
1860}
1861
1862
1863static void __devexit rhine_remove_one(struct pci_dev *pdev)
1864{
1865        struct net_device *dev = pci_get_drvdata(pdev);
1866        struct rhine_private *rp = netdev_priv(dev);
1867
1868        unregister_netdev(dev);
1869
1870        pci_iounmap(pdev, rp->base);
1871        pci_release_regions(pdev);
1872
1873        free_netdev(dev);
1874        pci_disable_device(pdev);
1875        pci_set_drvdata(pdev, NULL);
1876}
1877
1878static void rhine_shutdown (struct pci_dev *pdev)
1879{
1880        struct net_device *dev = pci_get_drvdata(pdev);
1881        struct rhine_private *rp = netdev_priv(dev);
1882        void __iomem *ioaddr = rp->base;
1883
1884        if (!(rp->quirks & rqWOL))
1885                return; /* Nothing to do for non-WOL adapters */
1886
1887        rhine_power_init(dev);
1888
1889        /* Make sure we use pattern 0, 1 and not 4, 5 */
1890        if (rp->quirks & rq6patterns)
1891                iowrite8(0x04, ioaddr + WOLcgClr);
1892
1893        if (rp->wolopts & WAKE_MAGIC) {
1894                iowrite8(WOLmagic, ioaddr + WOLcrSet);
1895                /*
1896                 * Turn EEPROM-controlled wake-up back on -- some hardware may
1897                 * not cooperate otherwise.
1898                 */
1899                iowrite8(ioread8(ioaddr + ConfigA) | 0x03, ioaddr + ConfigA);
1900        }
1901
1902        if (rp->wolopts & (WAKE_BCAST|WAKE_MCAST))
1903                iowrite8(WOLbmcast, ioaddr + WOLcgSet);
1904
1905        if (rp->wolopts & WAKE_PHY)
1906                iowrite8(WOLlnkon | WOLlnkoff, ioaddr + WOLcrSet);
1907
1908        if (rp->wolopts & WAKE_UCAST)
1909                iowrite8(WOLucast, ioaddr + WOLcrSet);
1910
1911        if (rp->wolopts) {
1912                /* Enable legacy WOL (for old motherboards) */
1913                iowrite8(0x01, ioaddr + PwcfgSet);
1914                iowrite8(ioread8(ioaddr + StickyHW) | 0x04, ioaddr + StickyHW);
1915        }
1916
1917        /* Hit power state D3 (sleep) */
1918        if (!avoid_D3)
1919                iowrite8(ioread8(ioaddr + StickyHW) | 0x03, ioaddr + StickyHW);
1920
1921        /* TODO: Check use of pci_enable_wake() */
1922
1923}
1924
1925#ifdef CONFIG_PM
1926static int rhine_suspend(struct pci_dev *pdev, pm_message_t state)
1927{
1928        struct net_device *dev = pci_get_drvdata(pdev);
1929        struct rhine_private *rp = netdev_priv(dev);
1930        unsigned long flags;
1931
1932        if (!netif_running(dev))
1933                return 0;
1934
1935        napi_disable(&rp->napi);
1936
1937        netif_device_detach(dev);
1938        pci_save_state(pdev);
1939
1940        spin_lock_irqsave(&rp->lock, flags);
1941        rhine_shutdown(pdev);
1942        spin_unlock_irqrestore(&rp->lock, flags);
1943
1944        free_irq(dev->irq, dev);
1945        return 0;
1946}
1947
1948static int rhine_resume(struct pci_dev *pdev)
1949{
1950        struct net_device *dev = pci_get_drvdata(pdev);
1951        struct rhine_private *rp = netdev_priv(dev);
1952        unsigned long flags;
1953        int ret;
1954
1955        if (!netif_running(dev))
1956                return 0;
1957
1958        if (request_irq(dev->irq, rhine_interrupt, IRQF_SHARED, dev->name, dev))
1959                printk(KERN_ERR "via-rhine %s: request_irq failed\n", dev->name);
1960
1961        ret = pci_set_power_state(pdev, PCI_D0);
1962        if (debug > 1)
1963                printk(KERN_INFO "%s: Entering power state D0 %s (%d).\n",
1964                        dev->name, ret ? "failed" : "succeeded", ret);
1965
1966        pci_restore_state(pdev);
1967
1968        spin_lock_irqsave(&rp->lock, flags);
1969#ifdef USE_MMIO
1970        enable_mmio(rp->pioaddr, rp->quirks);
1971#endif
1972        rhine_power_init(dev);
1973        free_tbufs(dev);
1974        free_rbufs(dev);
1975        alloc_tbufs(dev);
1976        alloc_rbufs(dev);
1977        init_registers(dev);
1978        spin_unlock_irqrestore(&rp->lock, flags);
1979
1980        netif_device_attach(dev);
1981
1982        return 0;
1983}
1984#endif /* CONFIG_PM */
1985
1986static struct pci_driver rhine_driver = {
1987        .name           = DRV_NAME,
1988        .id_table       = rhine_pci_tbl,
1989        .probe          = rhine_init_one,
1990        .remove         = __devexit_p(rhine_remove_one),
1991#ifdef CONFIG_PM
1992        .suspend        = rhine_suspend,
1993        .resume         = rhine_resume,
1994#endif /* CONFIG_PM */
1995        .shutdown =     rhine_shutdown,
1996};
1997
1998static struct dmi_system_id __initdata rhine_dmi_table[] = {
1999        {
2000                .ident = "EPIA-M",
2001                .matches = {
2002                        DMI_MATCH(DMI_BIOS_VENDOR, "Award Software International, Inc."),
2003                        DMI_MATCH(DMI_BIOS_VERSION, "6.00 PG"),
2004                },
2005        },
2006        {
2007                .ident = "KV7",
2008                .matches = {
2009                        DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies, LTD"),
2010                        DMI_MATCH(DMI_BIOS_VERSION, "6.00 PG"),
2011                },
2012        },
2013        { NULL }
2014};
2015
2016static int __init rhine_init(void)
2017{
2018/* when a module, this is printed whether or not devices are found in probe */
2019#ifdef MODULE
2020        printk(version);
2021#endif
2022        if (dmi_check_system(rhine_dmi_table)) {
2023                /* these BIOSes fail at PXE boot if chip is in D3 */
2024                avoid_D3 = 1;
2025                printk(KERN_WARNING "%s: Broken BIOS detected, avoid_D3 "
2026                                    "enabled.\n",
2027                       DRV_NAME);
2028        }
2029        else if (avoid_D3)
2030                printk(KERN_INFO "%s: avoid_D3 set.\n", DRV_NAME);
2031
2032        return pci_register_driver(&rhine_driver);
2033}
2034
2035
2036static void __exit rhine_cleanup(void)
2037{
2038        pci_unregister_driver(&rhine_driver);
2039}
2040
2041
2042module_init(rhine_init);
2043module_exit(rhine_cleanup);
2044