linux/drivers/net/ethernet/via/via-rhine.c
<<
>>
Prefs
   1/* via-rhine.c: A Linux Ethernet device driver for VIA Rhine family chips. */
   2/*
   3        Written 1998-2001 by Donald Becker.
   4
   5        Current Maintainer: Roger Luethi <rl@hellgate.ch>
   6
   7        This software may be used and distributed according to the terms of
   8        the GNU General Public License (GPL), incorporated herein by reference.
   9        Drivers based on or derived from this code fall under the GPL and must
  10        retain the authorship, copyright and license notice.  This file is not
  11        a complete program and may only be used when the entire operating
  12        system is licensed under the GPL.
  13
  14        This driver is designed for the VIA VT86C100A Rhine-I.
  15        It also works with the Rhine-II (6102) and Rhine-III (6105/6105L/6105LOM
  16        and management NIC 6105M).
  17
  18        The author may be reached as becker@scyld.com, or C/O
  19        Scyld Computing Corporation
  20        410 Severn Ave., Suite 210
  21        Annapolis MD 21403
  22
  23
  24        This driver contains some changes from the original Donald Becker
  25        version. He may or may not be interested in bug reports on this
  26        code. You can find his versions at:
  27        http://www.scyld.com/network/via-rhine.html
  28        [link no longer provides useful info -jgarzik]
  29
  30*/
  31
  32#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  33
  34#define DRV_NAME        "via-rhine"
  35#define DRV_VERSION     "1.5.0"
  36#define DRV_RELDATE     "2010-10-09"
  37
  38#include <linux/types.h>
  39
  40/* A few user-configurable values.
  41   These may be modified when a driver module is loaded. */
  42static int debug = 0;
  43#define RHINE_MSG_DEFAULT \
  44        (0x0000)
  45
  46/* Set the copy breakpoint for the copy-only-tiny-frames scheme.
  47   Setting to > 1518 effectively disables this feature. */
  48#if defined(__alpha__) || defined(__arm__) || defined(__hppa__) || \
  49        defined(CONFIG_SPARC) || defined(__ia64__) ||              \
  50        defined(__sh__) || defined(__mips__)
  51static int rx_copybreak = 1518;
  52#else
  53static int rx_copybreak;
  54#endif
  55
  56/* Work-around for broken BIOSes: they are unable to get the chip back out of
  57   power state D3 so PXE booting fails. bootparam(7): via-rhine.avoid_D3=1 */
  58static bool avoid_D3;
  59
  60/*
  61 * In case you are looking for 'options[]' or 'full_duplex[]', they
  62 * are gone. Use ethtool(8) instead.
  63 */
  64
  65/* Maximum number of multicast addresses to filter (vs. rx-all-multicast).
  66   The Rhine has a 64 element 8390-like hash table. */
  67static const int multicast_filter_limit = 32;
  68
  69
  70/* Operational parameters that are set at compile time. */
  71
  72/* Keep the ring sizes a power of two for compile efficiency.
  73   The compiler will convert <unsigned>'%'<2^N> into a bit mask.
  74   Making the Tx ring too large decreases the effectiveness of channel
  75   bonding and packet priority.
  76   There are no ill effects from too-large receive rings. */
  77#define TX_RING_SIZE    16
  78#define TX_QUEUE_LEN    10      /* Limit ring entries actually used. */
  79#define RX_RING_SIZE    64
  80
  81/* Operational parameters that usually are not changed. */
  82
  83/* Time in jiffies before concluding the transmitter is hung. */
  84#define TX_TIMEOUT      (2*HZ)
  85
  86#define PKT_BUF_SZ      1536    /* Size of each temporary Rx buffer.*/
  87
  88#include <linux/module.h>
  89#include <linux/moduleparam.h>
  90#include <linux/kernel.h>
  91#include <linux/string.h>
  92#include <linux/timer.h>
  93#include <linux/errno.h>
  94#include <linux/ioport.h>
  95#include <linux/interrupt.h>
  96#include <linux/pci.h>
  97#include <linux/dma-mapping.h>
  98#include <linux/netdevice.h>
  99#include <linux/etherdevice.h>
 100#include <linux/skbuff.h>
 101#include <linux/init.h>
 102#include <linux/delay.h>
 103#include <linux/mii.h>
 104#include <linux/ethtool.h>
 105#include <linux/crc32.h>
 106#include <linux/if_vlan.h>
 107#include <linux/bitops.h>
 108#include <linux/workqueue.h>
 109#include <asm/processor.h>      /* Processor type for cache alignment. */
 110#include <asm/io.h>
 111#include <asm/irq.h>
 112#include <asm/uaccess.h>
 113#include <linux/dmi.h>
 114
 115/* These identify the driver base version and may not be removed. */
 116static const char version[] =
 117        "v1.10-LK" DRV_VERSION " " DRV_RELDATE " Written by Donald Becker";
 118
 119/* This driver was written to use PCI memory space. Some early versions
 120   of the Rhine may only work correctly with I/O space accesses. */
 121#ifdef CONFIG_VIA_RHINE_MMIO
 122#define USE_MMIO
 123#else
 124#endif
 125
 126MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
 127MODULE_DESCRIPTION("VIA Rhine PCI Fast Ethernet driver");
 128MODULE_LICENSE("GPL");
 129
 130module_param(debug, int, 0);
 131module_param(rx_copybreak, int, 0);
 132module_param(avoid_D3, bool, 0);
 133MODULE_PARM_DESC(debug, "VIA Rhine debug message flags");
 134MODULE_PARM_DESC(rx_copybreak, "VIA Rhine copy breakpoint for copy-only-tiny-frames");
 135MODULE_PARM_DESC(avoid_D3, "Avoid power state D3 (work-around for broken BIOSes)");
 136
 137#define MCAM_SIZE       32
 138#define VCAM_SIZE       32
 139
 140/*
 141                Theory of Operation
 142
 143I. Board Compatibility
 144
 145This driver is designed for the VIA 86c100A Rhine-II PCI Fast Ethernet
 146controller.
 147
 148II. Board-specific settings
 149
 150Boards with this chip are functional only in a bus-master PCI slot.
 151
 152Many operational settings are loaded from the EEPROM to the Config word at
 153offset 0x78. For most of these settings, this driver assumes that they are
 154correct.
 155If this driver is compiled to use PCI memory space operations the EEPROM
 156must be configured to enable memory ops.
 157
 158III. Driver operation
 159
 160IIIa. Ring buffers
 161
 162This driver uses two statically allocated fixed-size descriptor lists
 163formed into rings by a branch from the final descriptor to the beginning of
 164the list. The ring sizes are set at compile time by RX/TX_RING_SIZE.
 165
 166IIIb/c. Transmit/Receive Structure
 167
 168This driver attempts to use a zero-copy receive and transmit scheme.
 169
 170Alas, all data buffers are required to start on a 32 bit boundary, so
 171the driver must often copy transmit packets into bounce buffers.
 172
 173The driver allocates full frame size skbuffs for the Rx ring buffers at
 174open() time and passes the skb->data field to the chip as receive data
 175buffers. When an incoming frame is less than RX_COPYBREAK bytes long,
 176a fresh skbuff is allocated and the frame is copied to the new skbuff.
 177When the incoming frame is larger, the skbuff is passed directly up the
 178protocol stack. Buffers consumed this way are replaced by newly allocated
 179skbuffs in the last phase of rhine_rx().
 180
 181The RX_COPYBREAK value is chosen to trade-off the memory wasted by
 182using a full-sized skbuff for small frames vs. the copying costs of larger
 183frames. New boards are typically used in generously configured machines
 184and the underfilled buffers have negligible impact compared to the benefit of
 185a single allocation size, so the default value of zero results in never
 186copying packets. When copying is done, the cost is usually mitigated by using
 187a combined copy/checksum routine. Copying also preloads the cache, which is
 188most useful with small frames.
 189
 190Since the VIA chips are only able to transfer data to buffers on 32 bit
 191boundaries, the IP header at offset 14 in an ethernet frame isn't
 192longword aligned for further processing. Copying these unaligned buffers
 193has the beneficial effect of 16-byte aligning the IP header.
 194
 195IIId. Synchronization
 196
 197The driver runs as two independent, single-threaded flows of control. One
 198is the send-packet routine, which enforces single-threaded use by the
 199netdev_priv(dev)->lock spinlock. The other thread is the interrupt handler,
 200which is single threaded by the hardware and interrupt handling software.
 201
 202The send packet thread has partial control over the Tx ring. It locks the
 203netdev_priv(dev)->lock whenever it's queuing a Tx packet. If the next slot in
 204the ring is not available it stops the transmit queue by
 205calling netif_stop_queue.
 206
 207The interrupt handler has exclusive control over the Rx ring and records stats
 208from the Tx ring. After reaping the stats, it marks the Tx queue entry as
 209empty by incrementing the dirty_tx mark. If at least half of the entries in
 210the Rx ring are available the transmit queue is woken up if it was stopped.
 211
 212IV. Notes
 213
 214IVb. References
 215
 216Preliminary VT86C100A manual from http://www.via.com.tw/
 217http://www.scyld.com/expert/100mbps.html
 218http://www.scyld.com/expert/NWay.html
 219ftp://ftp.via.com.tw/public/lan/Products/NIC/VT86C100A/Datasheet/VT86C100A03.pdf
 220ftp://ftp.via.com.tw/public/lan/Products/NIC/VT6102/Datasheet/VT6102_021.PDF
 221
 222
 223IVc. Errata
 224
 225The VT86C100A manual is not reliable information.
 226The 3043 chip does not handle unaligned transmit or receive buffers, resulting
 227in significant performance degradation for bounce buffer copies on transmit
 228and unaligned IP headers on receive.
 229The chip does not pad to minimum transmit length.
 230
 231*/
 232
 233
 234/* This table drives the PCI probe routines. It's mostly boilerplate in all
 235   of the drivers, and will likely be provided by some future kernel.
 236   Note the matching code -- the first table entry matchs all 56** cards but
 237   second only the 1234 card.
 238*/
 239
 240enum rhine_revs {
 241        VT86C100A       = 0x00,
 242        VTunknown0      = 0x20,
 243        VT6102          = 0x40,
 244        VT8231          = 0x50, /* Integrated MAC */
 245        VT8233          = 0x60, /* Integrated MAC */
 246        VT8235          = 0x74, /* Integrated MAC */
 247        VT8237          = 0x78, /* Integrated MAC */
 248        VTunknown1      = 0x7C,
 249        VT6105          = 0x80,
 250        VT6105_B0       = 0x83,
 251        VT6105L         = 0x8A,
 252        VT6107          = 0x8C,
 253        VTunknown2      = 0x8E,
 254        VT6105M         = 0x90, /* Management adapter */
 255};
 256
 257enum rhine_quirks {
 258        rqWOL           = 0x0001,       /* Wake-On-LAN support */
 259        rqForceReset    = 0x0002,
 260        rq6patterns     = 0x0040,       /* 6 instead of 4 patterns for WOL */
 261        rqStatusWBRace  = 0x0080,       /* Tx Status Writeback Error possible */
 262        rqRhineI        = 0x0100,       /* See comment below */
 263};
 264/*
 265 * rqRhineI: VT86C100A (aka Rhine-I) uses different bits to enable
 266 * MMIO as well as for the collision counter and the Tx FIFO underflow
 267 * indicator. In addition, Tx and Rx buffers need to 4 byte aligned.
 268 */
 269
 270/* Beware of PCI posted writes */
 271#define IOSYNC  do { ioread8(ioaddr + StationAddr); } while (0)
 272
 273static DEFINE_PCI_DEVICE_TABLE(rhine_pci_tbl) = {
 274        { 0x1106, 0x3043, PCI_ANY_ID, PCI_ANY_ID, },    /* VT86C100A */
 275        { 0x1106, 0x3065, PCI_ANY_ID, PCI_ANY_ID, },    /* VT6102 */
 276        { 0x1106, 0x3106, PCI_ANY_ID, PCI_ANY_ID, },    /* 6105{,L,LOM} */
 277        { 0x1106, 0x3053, PCI_ANY_ID, PCI_ANY_ID, },    /* VT6105M */
 278        { }     /* terminate list */
 279};
 280MODULE_DEVICE_TABLE(pci, rhine_pci_tbl);
 281
 282
 283/* Offsets to the device registers. */
 284enum register_offsets {
 285        StationAddr=0x00, RxConfig=0x06, TxConfig=0x07, ChipCmd=0x08,
 286        ChipCmd1=0x09, TQWake=0x0A,
 287        IntrStatus=0x0C, IntrEnable=0x0E,
 288        MulticastFilter0=0x10, MulticastFilter1=0x14,
 289        RxRingPtr=0x18, TxRingPtr=0x1C, GFIFOTest=0x54,
 290        MIIPhyAddr=0x6C, MIIStatus=0x6D, PCIBusConfig=0x6E, PCIBusConfig1=0x6F,
 291        MIICmd=0x70, MIIRegAddr=0x71, MIIData=0x72, MACRegEEcsr=0x74,
 292        ConfigA=0x78, ConfigB=0x79, ConfigC=0x7A, ConfigD=0x7B,
 293        RxMissed=0x7C, RxCRCErrs=0x7E, MiscCmd=0x81,
 294        StickyHW=0x83, IntrStatus2=0x84,
 295        CamMask=0x88, CamCon=0x92, CamAddr=0x93,
 296        WOLcrSet=0xA0, PwcfgSet=0xA1, WOLcgSet=0xA3, WOLcrClr=0xA4,
 297        WOLcrClr1=0xA6, WOLcgClr=0xA7,
 298        PwrcsrSet=0xA8, PwrcsrSet1=0xA9, PwrcsrClr=0xAC, PwrcsrClr1=0xAD,
 299};
 300
 301/* Bits in ConfigD */
 302enum backoff_bits {
 303        BackOptional=0x01, BackModify=0x02,
 304        BackCaptureEffect=0x04, BackRandom=0x08
 305};
 306
 307/* Bits in the TxConfig (TCR) register */
 308enum tcr_bits {
 309        TCR_PQEN=0x01,
 310        TCR_LB0=0x02,           /* loopback[0] */
 311        TCR_LB1=0x04,           /* loopback[1] */
 312        TCR_OFSET=0x08,
 313        TCR_RTGOPT=0x10,
 314        TCR_RTFT0=0x20,
 315        TCR_RTFT1=0x40,
 316        TCR_RTSF=0x80,
 317};
 318
 319/* Bits in the CamCon (CAMC) register */
 320enum camcon_bits {
 321        CAMC_CAMEN=0x01,
 322        CAMC_VCAMSL=0x02,
 323        CAMC_CAMWR=0x04,
 324        CAMC_CAMRD=0x08,
 325};
 326
 327/* Bits in the PCIBusConfig1 (BCR1) register */
 328enum bcr1_bits {
 329        BCR1_POT0=0x01,
 330        BCR1_POT1=0x02,
 331        BCR1_POT2=0x04,
 332        BCR1_CTFT0=0x08,
 333        BCR1_CTFT1=0x10,
 334        BCR1_CTSF=0x20,
 335        BCR1_TXQNOBK=0x40,      /* for VT6105 */
 336        BCR1_VIDFR=0x80,        /* for VT6105 */
 337        BCR1_MED0=0x40,         /* for VT6102 */
 338        BCR1_MED1=0x80,         /* for VT6102 */
 339};
 340
 341#ifdef USE_MMIO
 342/* Registers we check that mmio and reg are the same. */
 343static const int mmio_verify_registers[] = {
 344        RxConfig, TxConfig, IntrEnable, ConfigA, ConfigB, ConfigC, ConfigD,
 345        0
 346};
 347#endif
 348
 349/* Bits in the interrupt status/mask registers. */
 350enum intr_status_bits {
 351        IntrRxDone      = 0x0001,
 352        IntrTxDone      = 0x0002,
 353        IntrRxErr       = 0x0004,
 354        IntrTxError     = 0x0008,
 355        IntrRxEmpty     = 0x0020,
 356        IntrPCIErr      = 0x0040,
 357        IntrStatsMax    = 0x0080,
 358        IntrRxEarly     = 0x0100,
 359        IntrTxUnderrun  = 0x0210,
 360        IntrRxOverflow  = 0x0400,
 361        IntrRxDropped   = 0x0800,
 362        IntrRxNoBuf     = 0x1000,
 363        IntrTxAborted   = 0x2000,
 364        IntrLinkChange  = 0x4000,
 365        IntrRxWakeUp    = 0x8000,
 366        IntrTxDescRace          = 0x080000,     /* mapped from IntrStatus2 */
 367        IntrNormalSummary       = IntrRxDone | IntrTxDone,
 368        IntrTxErrSummary        = IntrTxDescRace | IntrTxAborted | IntrTxError |
 369                                  IntrTxUnderrun,
 370};
 371
 372/* Bits in WOLcrSet/WOLcrClr and PwrcsrSet/PwrcsrClr */
 373enum wol_bits {
 374        WOLucast        = 0x10,
 375        WOLmagic        = 0x20,
 376        WOLbmcast       = 0x30,
 377        WOLlnkon        = 0x40,
 378        WOLlnkoff       = 0x80,
 379};
 380
 381/* The Rx and Tx buffer descriptors. */
 382struct rx_desc {
 383        __le32 rx_status;
 384        __le32 desc_length; /* Chain flag, Buffer/frame length */
 385        __le32 addr;
 386        __le32 next_desc;
 387};
 388struct tx_desc {
 389        __le32 tx_status;
 390        __le32 desc_length; /* Chain flag, Tx Config, Frame length */
 391        __le32 addr;
 392        __le32 next_desc;
 393};
 394
 395/* Initial value for tx_desc.desc_length, Buffer size goes to bits 0-10 */
 396#define TXDESC          0x00e08000
 397
 398enum rx_status_bits {
 399        RxOK=0x8000, RxWholePkt=0x0300, RxErr=0x008F
 400};
 401
 402/* Bits in *_desc.*_status */
 403enum desc_status_bits {
 404        DescOwn=0x80000000
 405};
 406
 407/* Bits in *_desc.*_length */
 408enum desc_length_bits {
 409        DescTag=0x00010000
 410};
 411
 412/* Bits in ChipCmd. */
 413enum chip_cmd_bits {
 414        CmdInit=0x01, CmdStart=0x02, CmdStop=0x04, CmdRxOn=0x08,
 415        CmdTxOn=0x10, Cmd1TxDemand=0x20, CmdRxDemand=0x40,
 416        Cmd1EarlyRx=0x01, Cmd1EarlyTx=0x02, Cmd1FDuplex=0x04,
 417        Cmd1NoTxPoll=0x08, Cmd1Reset=0x80,
 418};
 419
 420struct rhine_stats {
 421        u64             packets;
 422        u64             bytes;
 423        struct u64_stats_sync syncp;
 424};
 425
 426struct rhine_private {
 427        /* Bit mask for configured VLAN ids */
 428        unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
 429
 430        /* Descriptor rings */
 431        struct rx_desc *rx_ring;
 432        struct tx_desc *tx_ring;
 433        dma_addr_t rx_ring_dma;
 434        dma_addr_t tx_ring_dma;
 435
 436        /* The addresses of receive-in-place skbuffs. */
 437        struct sk_buff *rx_skbuff[RX_RING_SIZE];
 438        dma_addr_t rx_skbuff_dma[RX_RING_SIZE];
 439
 440        /* The saved address of a sent-in-place packet/buffer, for later free(). */
 441        struct sk_buff *tx_skbuff[TX_RING_SIZE];
 442        dma_addr_t tx_skbuff_dma[TX_RING_SIZE];
 443
 444        /* Tx bounce buffers (Rhine-I only) */
 445        unsigned char *tx_buf[TX_RING_SIZE];
 446        unsigned char *tx_bufs;
 447        dma_addr_t tx_bufs_dma;
 448
 449        struct pci_dev *pdev;
 450        long pioaddr;
 451        struct net_device *dev;
 452        struct napi_struct napi;
 453        spinlock_t lock;
 454        struct mutex task_lock;
 455        bool task_enable;
 456        struct work_struct slow_event_task;
 457        struct work_struct reset_task;
 458
 459        u32 msg_enable;
 460
 461        /* Frequently used values: keep some adjacent for cache effect. */
 462        u32 quirks;
 463        struct rx_desc *rx_head_desc;
 464        unsigned int cur_rx, dirty_rx;  /* Producer/consumer ring indices */
 465        unsigned int cur_tx, dirty_tx;
 466        unsigned int rx_buf_sz;         /* Based on MTU+slack. */
 467        struct rhine_stats rx_stats;
 468        struct rhine_stats tx_stats;
 469        u8 wolopts;
 470
 471        u8 tx_thresh, rx_thresh;
 472
 473        struct mii_if_info mii_if;
 474        void __iomem *base;
 475};
 476
 477#define BYTE_REG_BITS_ON(x, p)      do { iowrite8((ioread8((p))|(x)), (p)); } while (0)
 478#define WORD_REG_BITS_ON(x, p)      do { iowrite16((ioread16((p))|(x)), (p)); } while (0)
 479#define DWORD_REG_BITS_ON(x, p)     do { iowrite32((ioread32((p))|(x)), (p)); } while (0)
 480
 481#define BYTE_REG_BITS_IS_ON(x, p)   (ioread8((p)) & (x))
 482#define WORD_REG_BITS_IS_ON(x, p)   (ioread16((p)) & (x))
 483#define DWORD_REG_BITS_IS_ON(x, p)  (ioread32((p)) & (x))
 484
 485#define BYTE_REG_BITS_OFF(x, p)     do { iowrite8(ioread8((p)) & (~(x)), (p)); } while (0)
 486#define WORD_REG_BITS_OFF(x, p)     do { iowrite16(ioread16((p)) & (~(x)), (p)); } while (0)
 487#define DWORD_REG_BITS_OFF(x, p)    do { iowrite32(ioread32((p)) & (~(x)), (p)); } while (0)
 488
 489#define BYTE_REG_BITS_SET(x, m, p)   do { iowrite8((ioread8((p)) & (~(m)))|(x), (p)); } while (0)
 490#define WORD_REG_BITS_SET(x, m, p)   do { iowrite16((ioread16((p)) & (~(m)))|(x), (p)); } while (0)
 491#define DWORD_REG_BITS_SET(x, m, p)  do { iowrite32((ioread32((p)) & (~(m)))|(x), (p)); } while (0)
 492
 493
 494static int  mdio_read(struct net_device *dev, int phy_id, int location);
 495static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
 496static int  rhine_open(struct net_device *dev);
 497static void rhine_reset_task(struct work_struct *work);
 498static void rhine_slow_event_task(struct work_struct *work);
 499static void rhine_tx_timeout(struct net_device *dev);
 500static netdev_tx_t rhine_start_tx(struct sk_buff *skb,
 501                                  struct net_device *dev);
 502static irqreturn_t rhine_interrupt(int irq, void *dev_instance);
 503static void rhine_tx(struct net_device *dev);
 504static int rhine_rx(struct net_device *dev, int limit);
 505static void rhine_set_rx_mode(struct net_device *dev);
 506static struct rtnl_link_stats64 *rhine_get_stats64(struct net_device *dev,
 507               struct rtnl_link_stats64 *stats);
 508static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
 509static const struct ethtool_ops netdev_ethtool_ops;
 510static int  rhine_close(struct net_device *dev);
 511static int rhine_vlan_rx_add_vid(struct net_device *dev, unsigned short vid);
 512static int rhine_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid);
 513static void rhine_restart_tx(struct net_device *dev);
 514
 515static void rhine_wait_bit(struct rhine_private *rp, u8 reg, u8 mask, bool low)
 516{
 517        void __iomem *ioaddr = rp->base;
 518        int i;
 519
 520        for (i = 0; i < 1024; i++) {
 521                bool has_mask_bits = !!(ioread8(ioaddr + reg) & mask);
 522
 523                if (low ^ has_mask_bits)
 524                        break;
 525                udelay(10);
 526        }
 527        if (i > 64) {
 528                netif_dbg(rp, hw, rp->dev, "%s bit wait (%02x/%02x) cycle "
 529                          "count: %04d\n", low ? "low" : "high", reg, mask, i);
 530        }
 531}
 532
 533static void rhine_wait_bit_high(struct rhine_private *rp, u8 reg, u8 mask)
 534{
 535        rhine_wait_bit(rp, reg, mask, false);
 536}
 537
 538static void rhine_wait_bit_low(struct rhine_private *rp, u8 reg, u8 mask)
 539{
 540        rhine_wait_bit(rp, reg, mask, true);
 541}
 542
 543static u32 rhine_get_events(struct rhine_private *rp)
 544{
 545        void __iomem *ioaddr = rp->base;
 546        u32 intr_status;
 547
 548        intr_status = ioread16(ioaddr + IntrStatus);
 549        /* On Rhine-II, Bit 3 indicates Tx descriptor write-back race. */
 550        if (rp->quirks & rqStatusWBRace)
 551                intr_status |= ioread8(ioaddr + IntrStatus2) << 16;
 552        return intr_status;
 553}
 554
 555static void rhine_ack_events(struct rhine_private *rp, u32 mask)
 556{
 557        void __iomem *ioaddr = rp->base;
 558
 559        if (rp->quirks & rqStatusWBRace)
 560                iowrite8(mask >> 16, ioaddr + IntrStatus2);
 561        iowrite16(mask, ioaddr + IntrStatus);
 562        mmiowb();
 563}
 564
 565/*
 566 * Get power related registers into sane state.
 567 * Notify user about past WOL event.
 568 */
 569static void rhine_power_init(struct net_device *dev)
 570{
 571        struct rhine_private *rp = netdev_priv(dev);
 572        void __iomem *ioaddr = rp->base;
 573        u16 wolstat;
 574
 575        if (rp->quirks & rqWOL) {
 576                /* Make sure chip is in power state D0 */
 577                iowrite8(ioread8(ioaddr + StickyHW) & 0xFC, ioaddr + StickyHW);
 578
 579                /* Disable "force PME-enable" */
 580                iowrite8(0x80, ioaddr + WOLcgClr);
 581
 582                /* Clear power-event config bits (WOL) */
 583                iowrite8(0xFF, ioaddr + WOLcrClr);
 584                /* More recent cards can manage two additional patterns */
 585                if (rp->quirks & rq6patterns)
 586                        iowrite8(0x03, ioaddr + WOLcrClr1);
 587
 588                /* Save power-event status bits */
 589                wolstat = ioread8(ioaddr + PwrcsrSet);
 590                if (rp->quirks & rq6patterns)
 591                        wolstat |= (ioread8(ioaddr + PwrcsrSet1) & 0x03) << 8;
 592
 593                /* Clear power-event status bits */
 594                iowrite8(0xFF, ioaddr + PwrcsrClr);
 595                if (rp->quirks & rq6patterns)
 596                        iowrite8(0x03, ioaddr + PwrcsrClr1);
 597
 598                if (wolstat) {
 599                        char *reason;
 600                        switch (wolstat) {
 601                        case WOLmagic:
 602                                reason = "Magic packet";
 603                                break;
 604                        case WOLlnkon:
 605                                reason = "Link went up";
 606                                break;
 607                        case WOLlnkoff:
 608                                reason = "Link went down";
 609                                break;
 610                        case WOLucast:
 611                                reason = "Unicast packet";
 612                                break;
 613                        case WOLbmcast:
 614                                reason = "Multicast/broadcast packet";
 615                                break;
 616                        default:
 617                                reason = "Unknown";
 618                        }
 619                        netdev_info(dev, "Woke system up. Reason: %s\n",
 620                                    reason);
 621                }
 622        }
 623}
 624
 625static void rhine_chip_reset(struct net_device *dev)
 626{
 627        struct rhine_private *rp = netdev_priv(dev);
 628        void __iomem *ioaddr = rp->base;
 629        u8 cmd1;
 630
 631        iowrite8(Cmd1Reset, ioaddr + ChipCmd1);
 632        IOSYNC;
 633
 634        if (ioread8(ioaddr + ChipCmd1) & Cmd1Reset) {
 635                netdev_info(dev, "Reset not complete yet. Trying harder.\n");
 636
 637                /* Force reset */
 638                if (rp->quirks & rqForceReset)
 639                        iowrite8(0x40, ioaddr + MiscCmd);
 640
 641                /* Reset can take somewhat longer (rare) */
 642                rhine_wait_bit_low(rp, ChipCmd1, Cmd1Reset);
 643        }
 644
 645        cmd1 = ioread8(ioaddr + ChipCmd1);
 646        netif_info(rp, hw, dev, "Reset %s\n", (cmd1 & Cmd1Reset) ?
 647                   "failed" : "succeeded");
 648}
 649
 650#ifdef USE_MMIO
 651static void enable_mmio(long pioaddr, u32 quirks)
 652{
 653        int n;
 654        if (quirks & rqRhineI) {
 655                /* More recent docs say that this bit is reserved ... */
 656                n = inb(pioaddr + ConfigA) | 0x20;
 657                outb(n, pioaddr + ConfigA);
 658        } else {
 659                n = inb(pioaddr + ConfigD) | 0x80;
 660                outb(n, pioaddr + ConfigD);
 661        }
 662}
 663#endif
 664
 665/*
 666 * Loads bytes 0x00-0x05, 0x6E-0x6F, 0x78-0x7B from EEPROM
 667 * (plus 0x6C for Rhine-I/II)
 668 */
 669static void rhine_reload_eeprom(long pioaddr, struct net_device *dev)
 670{
 671        struct rhine_private *rp = netdev_priv(dev);
 672        void __iomem *ioaddr = rp->base;
 673        int i;
 674
 675        outb(0x20, pioaddr + MACRegEEcsr);
 676        for (i = 0; i < 1024; i++) {
 677                if (!(inb(pioaddr + MACRegEEcsr) & 0x20))
 678                        break;
 679        }
 680        if (i > 512)
 681                pr_info("%4d cycles used @ %s:%d\n", i, __func__, __LINE__);
 682
 683#ifdef USE_MMIO
 684        /*
 685         * Reloading from EEPROM overwrites ConfigA-D, so we must re-enable
 686         * MMIO. If reloading EEPROM was done first this could be avoided, but
 687         * it is not known if that still works with the "win98-reboot" problem.
 688         */
 689        enable_mmio(pioaddr, rp->quirks);
 690#endif
 691
 692        /* Turn off EEPROM-controlled wake-up (magic packet) */
 693        if (rp->quirks & rqWOL)
 694                iowrite8(ioread8(ioaddr + ConfigA) & 0xFC, ioaddr + ConfigA);
 695
 696}
 697
 698#ifdef CONFIG_NET_POLL_CONTROLLER
 699static void rhine_poll(struct net_device *dev)
 700{
 701        struct rhine_private *rp = netdev_priv(dev);
 702        const int irq = rp->pdev->irq;
 703
 704        disable_irq(irq);
 705        rhine_interrupt(irq, dev);
 706        enable_irq(irq);
 707}
 708#endif
 709
 710static void rhine_kick_tx_threshold(struct rhine_private *rp)
 711{
 712        if (rp->tx_thresh < 0xe0) {
 713                void __iomem *ioaddr = rp->base;
 714
 715                rp->tx_thresh += 0x20;
 716                BYTE_REG_BITS_SET(rp->tx_thresh, 0x80, ioaddr + TxConfig);
 717        }
 718}
 719
 720static void rhine_tx_err(struct rhine_private *rp, u32 status)
 721{
 722        struct net_device *dev = rp->dev;
 723
 724        if (status & IntrTxAborted) {
 725                netif_info(rp, tx_err, dev,
 726                           "Abort %08x, frame dropped\n", status);
 727        }
 728
 729        if (status & IntrTxUnderrun) {
 730                rhine_kick_tx_threshold(rp);
 731                netif_info(rp, tx_err ,dev, "Transmitter underrun, "
 732                           "Tx threshold now %02x\n", rp->tx_thresh);
 733        }
 734
 735        if (status & IntrTxDescRace)
 736                netif_info(rp, tx_err, dev, "Tx descriptor write-back race\n");
 737
 738        if ((status & IntrTxError) &&
 739            (status & (IntrTxAborted | IntrTxUnderrun | IntrTxDescRace)) == 0) {
 740                rhine_kick_tx_threshold(rp);
 741                netif_info(rp, tx_err, dev, "Unspecified error. "
 742                           "Tx threshold now %02x\n", rp->tx_thresh);
 743        }
 744
 745        rhine_restart_tx(dev);
 746}
 747
 748static void rhine_update_rx_crc_and_missed_errord(struct rhine_private *rp)
 749{
 750        void __iomem *ioaddr = rp->base;
 751        struct net_device_stats *stats = &rp->dev->stats;
 752
 753        stats->rx_crc_errors    += ioread16(ioaddr + RxCRCErrs);
 754        stats->rx_missed_errors += ioread16(ioaddr + RxMissed);
 755
 756        /*
 757         * Clears the "tally counters" for CRC errors and missed frames(?).
 758         * It has been reported that some chips need a write of 0 to clear
 759         * these, for others the counters are set to 1 when written to and
 760         * instead cleared when read. So we clear them both ways ...
 761         */
 762        iowrite32(0, ioaddr + RxMissed);
 763        ioread16(ioaddr + RxCRCErrs);
 764        ioread16(ioaddr + RxMissed);
 765}
 766
 767#define RHINE_EVENT_NAPI_RX     (IntrRxDone | \
 768                                 IntrRxErr | \
 769                                 IntrRxEmpty | \
 770                                 IntrRxOverflow | \
 771                                 IntrRxDropped | \
 772                                 IntrRxNoBuf | \
 773                                 IntrRxWakeUp)
 774
 775#define RHINE_EVENT_NAPI_TX_ERR (IntrTxError | \
 776                                 IntrTxAborted | \
 777                                 IntrTxUnderrun | \
 778                                 IntrTxDescRace)
 779#define RHINE_EVENT_NAPI_TX     (IntrTxDone | RHINE_EVENT_NAPI_TX_ERR)
 780
 781#define RHINE_EVENT_NAPI        (RHINE_EVENT_NAPI_RX | \
 782                                 RHINE_EVENT_NAPI_TX | \
 783                                 IntrStatsMax)
 784#define RHINE_EVENT_SLOW        (IntrPCIErr | IntrLinkChange)
 785#define RHINE_EVENT             (RHINE_EVENT_NAPI | RHINE_EVENT_SLOW)
 786
 787static int rhine_napipoll(struct napi_struct *napi, int budget)
 788{
 789        struct rhine_private *rp = container_of(napi, struct rhine_private, napi);
 790        struct net_device *dev = rp->dev;
 791        void __iomem *ioaddr = rp->base;
 792        u16 enable_mask = RHINE_EVENT & 0xffff;
 793        int work_done = 0;
 794        u32 status;
 795
 796        status = rhine_get_events(rp);
 797        rhine_ack_events(rp, status & ~RHINE_EVENT_SLOW);
 798
 799        if (status & RHINE_EVENT_NAPI_RX)
 800                work_done += rhine_rx(dev, budget);
 801
 802        if (status & RHINE_EVENT_NAPI_TX) {
 803                if (status & RHINE_EVENT_NAPI_TX_ERR) {
 804                        /* Avoid scavenging before Tx engine turned off */
 805                        rhine_wait_bit_low(rp, ChipCmd, CmdTxOn);
 806                        if (ioread8(ioaddr + ChipCmd) & CmdTxOn)
 807                                netif_warn(rp, tx_err, dev, "Tx still on\n");
 808                }
 809
 810                rhine_tx(dev);
 811
 812                if (status & RHINE_EVENT_NAPI_TX_ERR)
 813                        rhine_tx_err(rp, status);
 814        }
 815
 816        if (status & IntrStatsMax) {
 817                spin_lock(&rp->lock);
 818                rhine_update_rx_crc_and_missed_errord(rp);
 819                spin_unlock(&rp->lock);
 820        }
 821
 822        if (status & RHINE_EVENT_SLOW) {
 823                enable_mask &= ~RHINE_EVENT_SLOW;
 824                schedule_work(&rp->slow_event_task);
 825        }
 826
 827        if (work_done < budget) {
 828                napi_complete(napi);
 829                iowrite16(enable_mask, ioaddr + IntrEnable);
 830                mmiowb();
 831        }
 832        return work_done;
 833}
 834
 835static void rhine_hw_init(struct net_device *dev, long pioaddr)
 836{
 837        struct rhine_private *rp = netdev_priv(dev);
 838
 839        /* Reset the chip to erase previous misconfiguration. */
 840        rhine_chip_reset(dev);
 841
 842        /* Rhine-I needs extra time to recuperate before EEPROM reload */
 843        if (rp->quirks & rqRhineI)
 844                msleep(5);
 845
 846        /* Reload EEPROM controlled bytes cleared by soft reset */
 847        rhine_reload_eeprom(pioaddr, dev);
 848}
 849
 850static const struct net_device_ops rhine_netdev_ops = {
 851        .ndo_open                = rhine_open,
 852        .ndo_stop                = rhine_close,
 853        .ndo_start_xmit          = rhine_start_tx,
 854        .ndo_get_stats64         = rhine_get_stats64,
 855        .ndo_set_rx_mode         = rhine_set_rx_mode,
 856        .ndo_change_mtu          = eth_change_mtu,
 857        .ndo_validate_addr       = eth_validate_addr,
 858        .ndo_set_mac_address     = eth_mac_addr,
 859        .ndo_do_ioctl            = netdev_ioctl,
 860        .ndo_tx_timeout          = rhine_tx_timeout,
 861        .ndo_vlan_rx_add_vid     = rhine_vlan_rx_add_vid,
 862        .ndo_vlan_rx_kill_vid    = rhine_vlan_rx_kill_vid,
 863#ifdef CONFIG_NET_POLL_CONTROLLER
 864        .ndo_poll_controller     = rhine_poll,
 865#endif
 866};
 867
 868static int rhine_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
 869{
 870        struct net_device *dev;
 871        struct rhine_private *rp;
 872        int i, rc;
 873        u32 quirks;
 874        long pioaddr;
 875        long memaddr;
 876        void __iomem *ioaddr;
 877        int io_size, phy_id;
 878        const char *name;
 879#ifdef USE_MMIO
 880        int bar = 1;
 881#else
 882        int bar = 0;
 883#endif
 884
 885/* when built into the kernel, we only print version if device is found */
 886#ifndef MODULE
 887        pr_info_once("%s\n", version);
 888#endif
 889
 890        io_size = 256;
 891        phy_id = 0;
 892        quirks = 0;
 893        name = "Rhine";
 894        if (pdev->revision < VTunknown0) {
 895                quirks = rqRhineI;
 896                io_size = 128;
 897        }
 898        else if (pdev->revision >= VT6102) {
 899                quirks = rqWOL | rqForceReset;
 900                if (pdev->revision < VT6105) {
 901                        name = "Rhine II";
 902                        quirks |= rqStatusWBRace;       /* Rhine-II exclusive */
 903                }
 904                else {
 905                        phy_id = 1;     /* Integrated PHY, phy_id fixed to 1 */
 906                        if (pdev->revision >= VT6105_B0)
 907                                quirks |= rq6patterns;
 908                        if (pdev->revision < VT6105M)
 909                                name = "Rhine III";
 910                        else
 911                                name = "Rhine III (Management Adapter)";
 912                }
 913        }
 914
 915        rc = pci_enable_device(pdev);
 916        if (rc)
 917                goto err_out;
 918
 919        /* this should always be supported */
 920        rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
 921        if (rc) {
 922                dev_err(&pdev->dev,
 923                        "32-bit PCI DMA addresses not supported by the card!?\n");
 924                goto err_out;
 925        }
 926
 927        /* sanity check */
 928        if ((pci_resource_len(pdev, 0) < io_size) ||
 929            (pci_resource_len(pdev, 1) < io_size)) {
 930                rc = -EIO;
 931                dev_err(&pdev->dev, "Insufficient PCI resources, aborting\n");
 932                goto err_out;
 933        }
 934
 935        pioaddr = pci_resource_start(pdev, 0);
 936        memaddr = pci_resource_start(pdev, 1);
 937
 938        pci_set_master(pdev);
 939
 940        dev = alloc_etherdev(sizeof(struct rhine_private));
 941        if (!dev) {
 942                rc = -ENOMEM;
 943                goto err_out;
 944        }
 945        SET_NETDEV_DEV(dev, &pdev->dev);
 946
 947        rp = netdev_priv(dev);
 948        rp->dev = dev;
 949        rp->quirks = quirks;
 950        rp->pioaddr = pioaddr;
 951        rp->pdev = pdev;
 952        rp->msg_enable = netif_msg_init(debug, RHINE_MSG_DEFAULT);
 953
 954        rc = pci_request_regions(pdev, DRV_NAME);
 955        if (rc)
 956                goto err_out_free_netdev;
 957
 958        ioaddr = pci_iomap(pdev, bar, io_size);
 959        if (!ioaddr) {
 960                rc = -EIO;
 961                dev_err(&pdev->dev,
 962                        "ioremap failed for device %s, region 0x%X @ 0x%lX\n",
 963                        pci_name(pdev), io_size, memaddr);
 964                goto err_out_free_res;
 965        }
 966
 967#ifdef USE_MMIO
 968        enable_mmio(pioaddr, quirks);
 969
 970        /* Check that selected MMIO registers match the PIO ones */
 971        i = 0;
 972        while (mmio_verify_registers[i]) {
 973                int reg = mmio_verify_registers[i++];
 974                unsigned char a = inb(pioaddr+reg);
 975                unsigned char b = readb(ioaddr+reg);
 976                if (a != b) {
 977                        rc = -EIO;
 978                        dev_err(&pdev->dev,
 979                                "MMIO do not match PIO [%02x] (%02x != %02x)\n",
 980                                reg, a, b);
 981                        goto err_out_unmap;
 982                }
 983        }
 984#endif /* USE_MMIO */
 985
 986        rp->base = ioaddr;
 987
 988        /* Get chip registers into a sane state */
 989        rhine_power_init(dev);
 990        rhine_hw_init(dev, pioaddr);
 991
 992        for (i = 0; i < 6; i++)
 993                dev->dev_addr[i] = ioread8(ioaddr + StationAddr + i);
 994
 995        if (!is_valid_ether_addr(dev->dev_addr)) {
 996                /* Report it and use a random ethernet address instead */
 997                netdev_err(dev, "Invalid MAC address: %pM\n", dev->dev_addr);
 998                eth_hw_addr_random(dev);
 999                netdev_info(dev, "Using random MAC address: %pM\n",
1000                            dev->dev_addr);
1001        }
1002
1003        /* For Rhine-I/II, phy_id is loaded from EEPROM */
1004        if (!phy_id)
1005                phy_id = ioread8(ioaddr + 0x6C);
1006
1007        spin_lock_init(&rp->lock);
1008        mutex_init(&rp->task_lock);
1009        INIT_WORK(&rp->reset_task, rhine_reset_task);
1010        INIT_WORK(&rp->slow_event_task, rhine_slow_event_task);
1011
1012        rp->mii_if.dev = dev;
1013        rp->mii_if.mdio_read = mdio_read;
1014        rp->mii_if.mdio_write = mdio_write;
1015        rp->mii_if.phy_id_mask = 0x1f;
1016        rp->mii_if.reg_num_mask = 0x1f;
1017
1018        /* The chip-specific entries in the device structure. */
1019        dev->netdev_ops = &rhine_netdev_ops;
1020        dev->ethtool_ops = &netdev_ethtool_ops,
1021        dev->watchdog_timeo = TX_TIMEOUT;
1022
1023        netif_napi_add(dev, &rp->napi, rhine_napipoll, 64);
1024
1025        if (rp->quirks & rqRhineI)
1026                dev->features |= NETIF_F_SG|NETIF_F_HW_CSUM;
1027
1028        if (pdev->revision >= VT6105M)
1029                dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX |
1030                NETIF_F_HW_VLAN_FILTER;
1031
1032        /* dev->name not defined before register_netdev()! */
1033        rc = register_netdev(dev);
1034        if (rc)
1035                goto err_out_unmap;
1036
1037        netdev_info(dev, "VIA %s at 0x%lx, %pM, IRQ %d\n",
1038                    name,
1039#ifdef USE_MMIO
1040                    memaddr,
1041#else
1042                    (long)ioaddr,
1043#endif
1044                    dev->dev_addr, pdev->irq);
1045
1046        pci_set_drvdata(pdev, dev);
1047
1048        {
1049                u16 mii_cmd;
1050                int mii_status = mdio_read(dev, phy_id, 1);
1051                mii_cmd = mdio_read(dev, phy_id, MII_BMCR) & ~BMCR_ISOLATE;
1052                mdio_write(dev, phy_id, MII_BMCR, mii_cmd);
1053                if (mii_status != 0xffff && mii_status != 0x0000) {
1054                        rp->mii_if.advertising = mdio_read(dev, phy_id, 4);
1055                        netdev_info(dev,
1056                                    "MII PHY found at address %d, status 0x%04x advertising %04x Link %04x\n",
1057                                    phy_id,
1058                                    mii_status, rp->mii_if.advertising,
1059                                    mdio_read(dev, phy_id, 5));
1060
1061                        /* set IFF_RUNNING */
1062                        if (mii_status & BMSR_LSTATUS)
1063                                netif_carrier_on(dev);
1064                        else
1065                                netif_carrier_off(dev);
1066
1067                }
1068        }
1069        rp->mii_if.phy_id = phy_id;
1070        if (avoid_D3)
1071                netif_info(rp, probe, dev, "No D3 power state at shutdown\n");
1072
1073        return 0;
1074
1075err_out_unmap:
1076        pci_iounmap(pdev, ioaddr);
1077err_out_free_res:
1078        pci_release_regions(pdev);
1079err_out_free_netdev:
1080        free_netdev(dev);
1081err_out:
1082        return rc;
1083}
1084
1085static int alloc_ring(struct net_device* dev)
1086{
1087        struct rhine_private *rp = netdev_priv(dev);
1088        void *ring;
1089        dma_addr_t ring_dma;
1090
1091        ring = pci_alloc_consistent(rp->pdev,
1092                                    RX_RING_SIZE * sizeof(struct rx_desc) +
1093                                    TX_RING_SIZE * sizeof(struct tx_desc),
1094                                    &ring_dma);
1095        if (!ring) {
1096                netdev_err(dev, "Could not allocate DMA memory\n");
1097                return -ENOMEM;
1098        }
1099        if (rp->quirks & rqRhineI) {
1100                rp->tx_bufs = pci_alloc_consistent(rp->pdev,
1101                                                   PKT_BUF_SZ * TX_RING_SIZE,
1102                                                   &rp->tx_bufs_dma);
1103                if (rp->tx_bufs == NULL) {
1104                        pci_free_consistent(rp->pdev,
1105                                    RX_RING_SIZE * sizeof(struct rx_desc) +
1106                                    TX_RING_SIZE * sizeof(struct tx_desc),
1107                                    ring, ring_dma);
1108                        return -ENOMEM;
1109                }
1110        }
1111
1112        rp->rx_ring = ring;
1113        rp->tx_ring = ring + RX_RING_SIZE * sizeof(struct rx_desc);
1114        rp->rx_ring_dma = ring_dma;
1115        rp->tx_ring_dma = ring_dma + RX_RING_SIZE * sizeof(struct rx_desc);
1116
1117        return 0;
1118}
1119
1120static void free_ring(struct net_device* dev)
1121{
1122        struct rhine_private *rp = netdev_priv(dev);
1123
1124        pci_free_consistent(rp->pdev,
1125                            RX_RING_SIZE * sizeof(struct rx_desc) +
1126                            TX_RING_SIZE * sizeof(struct tx_desc),
1127                            rp->rx_ring, rp->rx_ring_dma);
1128        rp->tx_ring = NULL;
1129
1130        if (rp->tx_bufs)
1131                pci_free_consistent(rp->pdev, PKT_BUF_SZ * TX_RING_SIZE,
1132                                    rp->tx_bufs, rp->tx_bufs_dma);
1133
1134        rp->tx_bufs = NULL;
1135
1136}
1137
1138static void alloc_rbufs(struct net_device *dev)
1139{
1140        struct rhine_private *rp = netdev_priv(dev);
1141        dma_addr_t next;
1142        int i;
1143
1144        rp->dirty_rx = rp->cur_rx = 0;
1145
1146        rp->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
1147        rp->rx_head_desc = &rp->rx_ring[0];
1148        next = rp->rx_ring_dma;
1149
1150        /* Init the ring entries */
1151        for (i = 0; i < RX_RING_SIZE; i++) {
1152                rp->rx_ring[i].rx_status = 0;
1153                rp->rx_ring[i].desc_length = cpu_to_le32(rp->rx_buf_sz);
1154                next += sizeof(struct rx_desc);
1155                rp->rx_ring[i].next_desc = cpu_to_le32(next);
1156                rp->rx_skbuff[i] = NULL;
1157        }
1158        /* Mark the last entry as wrapping the ring. */
1159        rp->rx_ring[i-1].next_desc = cpu_to_le32(rp->rx_ring_dma);
1160
1161        /* Fill in the Rx buffers.  Handle allocation failure gracefully. */
1162        for (i = 0; i < RX_RING_SIZE; i++) {
1163                struct sk_buff *skb = netdev_alloc_skb(dev, rp->rx_buf_sz);
1164                rp->rx_skbuff[i] = skb;
1165                if (skb == NULL)
1166                        break;
1167
1168                rp->rx_skbuff_dma[i] =
1169                        pci_map_single(rp->pdev, skb->data, rp->rx_buf_sz,
1170                                       PCI_DMA_FROMDEVICE);
1171
1172                rp->rx_ring[i].addr = cpu_to_le32(rp->rx_skbuff_dma[i]);
1173                rp->rx_ring[i].rx_status = cpu_to_le32(DescOwn);
1174        }
1175        rp->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
1176}
1177
1178static void free_rbufs(struct net_device* dev)
1179{
1180        struct rhine_private *rp = netdev_priv(dev);
1181        int i;
1182
1183        /* Free all the skbuffs in the Rx queue. */
1184        for (i = 0; i < RX_RING_SIZE; i++) {
1185                rp->rx_ring[i].rx_status = 0;
1186                rp->rx_ring[i].addr = cpu_to_le32(0xBADF00D0); /* An invalid address. */
1187                if (rp->rx_skbuff[i]) {
1188                        pci_unmap_single(rp->pdev,
1189                                         rp->rx_skbuff_dma[i],
1190                                         rp->rx_buf_sz, PCI_DMA_FROMDEVICE);
1191                        dev_kfree_skb(rp->rx_skbuff[i]);
1192                }
1193                rp->rx_skbuff[i] = NULL;
1194        }
1195}
1196
1197static void alloc_tbufs(struct net_device* dev)
1198{
1199        struct rhine_private *rp = netdev_priv(dev);
1200        dma_addr_t next;
1201        int i;
1202
1203        rp->dirty_tx = rp->cur_tx = 0;
1204        next = rp->tx_ring_dma;
1205        for (i = 0; i < TX_RING_SIZE; i++) {
1206                rp->tx_skbuff[i] = NULL;
1207                rp->tx_ring[i].tx_status = 0;
1208                rp->tx_ring[i].desc_length = cpu_to_le32(TXDESC);
1209                next += sizeof(struct tx_desc);
1210                rp->tx_ring[i].next_desc = cpu_to_le32(next);
1211                if (rp->quirks & rqRhineI)
1212                        rp->tx_buf[i] = &rp->tx_bufs[i * PKT_BUF_SZ];
1213        }
1214        rp->tx_ring[i-1].next_desc = cpu_to_le32(rp->tx_ring_dma);
1215
1216}
1217
1218static void free_tbufs(struct net_device* dev)
1219{
1220        struct rhine_private *rp = netdev_priv(dev);
1221        int i;
1222
1223        for (i = 0; i < TX_RING_SIZE; i++) {
1224                rp->tx_ring[i].tx_status = 0;
1225                rp->tx_ring[i].desc_length = cpu_to_le32(TXDESC);
1226                rp->tx_ring[i].addr = cpu_to_le32(0xBADF00D0); /* An invalid address. */
1227                if (rp->tx_skbuff[i]) {
1228                        if (rp->tx_skbuff_dma[i]) {
1229                                pci_unmap_single(rp->pdev,
1230                                                 rp->tx_skbuff_dma[i],
1231                                                 rp->tx_skbuff[i]->len,
1232                                                 PCI_DMA_TODEVICE);
1233                        }
1234                        dev_kfree_skb(rp->tx_skbuff[i]);
1235                }
1236                rp->tx_skbuff[i] = NULL;
1237                rp->tx_buf[i] = NULL;
1238        }
1239}
1240
1241static void rhine_check_media(struct net_device *dev, unsigned int init_media)
1242{
1243        struct rhine_private *rp = netdev_priv(dev);
1244        void __iomem *ioaddr = rp->base;
1245
1246        mii_check_media(&rp->mii_if, netif_msg_link(rp), init_media);
1247
1248        if (rp->mii_if.full_duplex)
1249            iowrite8(ioread8(ioaddr + ChipCmd1) | Cmd1FDuplex,
1250                   ioaddr + ChipCmd1);
1251        else
1252            iowrite8(ioread8(ioaddr + ChipCmd1) & ~Cmd1FDuplex,
1253                   ioaddr + ChipCmd1);
1254
1255        netif_info(rp, link, dev, "force_media %d, carrier %d\n",
1256                   rp->mii_if.force_media, netif_carrier_ok(dev));
1257}
1258
1259/* Called after status of force_media possibly changed */
1260static void rhine_set_carrier(struct mii_if_info *mii)
1261{
1262        struct net_device *dev = mii->dev;
1263        struct rhine_private *rp = netdev_priv(dev);
1264
1265        if (mii->force_media) {
1266                /* autoneg is off: Link is always assumed to be up */
1267                if (!netif_carrier_ok(dev))
1268                        netif_carrier_on(dev);
1269        } else  /* Let MMI library update carrier status */
1270                rhine_check_media(dev, 0);
1271
1272        netif_info(rp, link, dev, "force_media %d, carrier %d\n",
1273                   mii->force_media, netif_carrier_ok(dev));
1274}
1275
1276/**
1277 * rhine_set_cam - set CAM multicast filters
1278 * @ioaddr: register block of this Rhine
1279 * @idx: multicast CAM index [0..MCAM_SIZE-1]
1280 * @addr: multicast address (6 bytes)
1281 *
1282 * Load addresses into multicast filters.
1283 */
1284static void rhine_set_cam(void __iomem *ioaddr, int idx, u8 *addr)
1285{
1286        int i;
1287
1288        iowrite8(CAMC_CAMEN, ioaddr + CamCon);
1289        wmb();
1290
1291        /* Paranoid -- idx out of range should never happen */
1292        idx &= (MCAM_SIZE - 1);
1293
1294        iowrite8((u8) idx, ioaddr + CamAddr);
1295
1296        for (i = 0; i < 6; i++, addr++)
1297                iowrite8(*addr, ioaddr + MulticastFilter0 + i);
1298        udelay(10);
1299        wmb();
1300
1301        iowrite8(CAMC_CAMWR | CAMC_CAMEN, ioaddr + CamCon);
1302        udelay(10);
1303
1304        iowrite8(0, ioaddr + CamCon);
1305}
1306
1307/**
1308 * rhine_set_vlan_cam - set CAM VLAN filters
1309 * @ioaddr: register block of this Rhine
1310 * @idx: VLAN CAM index [0..VCAM_SIZE-1]
1311 * @addr: VLAN ID (2 bytes)
1312 *
1313 * Load addresses into VLAN filters.
1314 */
1315static void rhine_set_vlan_cam(void __iomem *ioaddr, int idx, u8 *addr)
1316{
1317        iowrite8(CAMC_CAMEN | CAMC_VCAMSL, ioaddr + CamCon);
1318        wmb();
1319
1320        /* Paranoid -- idx out of range should never happen */
1321        idx &= (VCAM_SIZE - 1);
1322
1323        iowrite8((u8) idx, ioaddr + CamAddr);
1324
1325        iowrite16(*((u16 *) addr), ioaddr + MulticastFilter0 + 6);
1326        udelay(10);
1327        wmb();
1328
1329        iowrite8(CAMC_CAMWR | CAMC_CAMEN, ioaddr + CamCon);
1330        udelay(10);
1331
1332        iowrite8(0, ioaddr + CamCon);
1333}
1334
1335/**
1336 * rhine_set_cam_mask - set multicast CAM mask
1337 * @ioaddr: register block of this Rhine
1338 * @mask: multicast CAM mask
1339 *
1340 * Mask sets multicast filters active/inactive.
1341 */
1342static void rhine_set_cam_mask(void __iomem *ioaddr, u32 mask)
1343{
1344        iowrite8(CAMC_CAMEN, ioaddr + CamCon);
1345        wmb();
1346
1347        /* write mask */
1348        iowrite32(mask, ioaddr + CamMask);
1349
1350        /* disable CAMEN */
1351        iowrite8(0, ioaddr + CamCon);
1352}
1353
1354/**
1355 * rhine_set_vlan_cam_mask - set VLAN CAM mask
1356 * @ioaddr: register block of this Rhine
1357 * @mask: VLAN CAM mask
1358 *
1359 * Mask sets VLAN filters active/inactive.
1360 */
1361static void rhine_set_vlan_cam_mask(void __iomem *ioaddr, u32 mask)
1362{
1363        iowrite8(CAMC_CAMEN | CAMC_VCAMSL, ioaddr + CamCon);
1364        wmb();
1365
1366        /* write mask */
1367        iowrite32(mask, ioaddr + CamMask);
1368
1369        /* disable CAMEN */
1370        iowrite8(0, ioaddr + CamCon);
1371}
1372
1373/**
1374 * rhine_init_cam_filter - initialize CAM filters
1375 * @dev: network device
1376 *
1377 * Initialize (disable) hardware VLAN and multicast support on this
1378 * Rhine.
1379 */
1380static void rhine_init_cam_filter(struct net_device *dev)
1381{
1382        struct rhine_private *rp = netdev_priv(dev);
1383        void __iomem *ioaddr = rp->base;
1384
1385        /* Disable all CAMs */
1386        rhine_set_vlan_cam_mask(ioaddr, 0);
1387        rhine_set_cam_mask(ioaddr, 0);
1388
1389        /* disable hardware VLAN support */
1390        BYTE_REG_BITS_ON(TCR_PQEN, ioaddr + TxConfig);
1391        BYTE_REG_BITS_OFF(BCR1_VIDFR, ioaddr + PCIBusConfig1);
1392}
1393
1394/**
1395 * rhine_update_vcam - update VLAN CAM filters
1396 * @rp: rhine_private data of this Rhine
1397 *
1398 * Update VLAN CAM filters to match configuration change.
1399 */
1400static void rhine_update_vcam(struct net_device *dev)
1401{
1402        struct rhine_private *rp = netdev_priv(dev);
1403        void __iomem *ioaddr = rp->base;
1404        u16 vid;
1405        u32 vCAMmask = 0;       /* 32 vCAMs (6105M and better) */
1406        unsigned int i = 0;
1407
1408        for_each_set_bit(vid, rp->active_vlans, VLAN_N_VID) {
1409                rhine_set_vlan_cam(ioaddr, i, (u8 *)&vid);
1410                vCAMmask |= 1 << i;
1411                if (++i >= VCAM_SIZE)
1412                        break;
1413        }
1414        rhine_set_vlan_cam_mask(ioaddr, vCAMmask);
1415}
1416
1417static int rhine_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
1418{
1419        struct rhine_private *rp = netdev_priv(dev);
1420
1421        spin_lock_bh(&rp->lock);
1422        set_bit(vid, rp->active_vlans);
1423        rhine_update_vcam(dev);
1424        spin_unlock_bh(&rp->lock);
1425        return 0;
1426}
1427
1428static int rhine_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
1429{
1430        struct rhine_private *rp = netdev_priv(dev);
1431
1432        spin_lock_bh(&rp->lock);
1433        clear_bit(vid, rp->active_vlans);
1434        rhine_update_vcam(dev);
1435        spin_unlock_bh(&rp->lock);
1436        return 0;
1437}
1438
1439static void init_registers(struct net_device *dev)
1440{
1441        struct rhine_private *rp = netdev_priv(dev);
1442        void __iomem *ioaddr = rp->base;
1443        int i;
1444
1445        for (i = 0; i < 6; i++)
1446                iowrite8(dev->dev_addr[i], ioaddr + StationAddr + i);
1447
1448        /* Initialize other registers. */
1449        iowrite16(0x0006, ioaddr + PCIBusConfig);       /* Tune configuration??? */
1450        /* Configure initial FIFO thresholds. */
1451        iowrite8(0x20, ioaddr + TxConfig);
1452        rp->tx_thresh = 0x20;
1453        rp->rx_thresh = 0x60;           /* Written in rhine_set_rx_mode(). */
1454
1455        iowrite32(rp->rx_ring_dma, ioaddr + RxRingPtr);
1456        iowrite32(rp->tx_ring_dma, ioaddr + TxRingPtr);
1457
1458        rhine_set_rx_mode(dev);
1459
1460        if (rp->pdev->revision >= VT6105M)
1461                rhine_init_cam_filter(dev);
1462
1463        napi_enable(&rp->napi);
1464
1465        iowrite16(RHINE_EVENT & 0xffff, ioaddr + IntrEnable);
1466
1467        iowrite16(CmdStart | CmdTxOn | CmdRxOn | (Cmd1NoTxPoll << 8),
1468               ioaddr + ChipCmd);
1469        rhine_check_media(dev, 1);
1470}
1471
1472/* Enable MII link status auto-polling (required for IntrLinkChange) */
1473static void rhine_enable_linkmon(struct rhine_private *rp)
1474{
1475        void __iomem *ioaddr = rp->base;
1476
1477        iowrite8(0, ioaddr + MIICmd);
1478        iowrite8(MII_BMSR, ioaddr + MIIRegAddr);
1479        iowrite8(0x80, ioaddr + MIICmd);
1480
1481        rhine_wait_bit_high(rp, MIIRegAddr, 0x20);
1482
1483        iowrite8(MII_BMSR | 0x40, ioaddr + MIIRegAddr);
1484}
1485
1486/* Disable MII link status auto-polling (required for MDIO access) */
1487static void rhine_disable_linkmon(struct rhine_private *rp)
1488{
1489        void __iomem *ioaddr = rp->base;
1490
1491        iowrite8(0, ioaddr + MIICmd);
1492
1493        if (rp->quirks & rqRhineI) {
1494                iowrite8(0x01, ioaddr + MIIRegAddr);    // MII_BMSR
1495
1496                /* Can be called from ISR. Evil. */
1497                mdelay(1);
1498
1499                /* 0x80 must be set immediately before turning it off */
1500                iowrite8(0x80, ioaddr + MIICmd);
1501
1502                rhine_wait_bit_high(rp, MIIRegAddr, 0x20);
1503
1504                /* Heh. Now clear 0x80 again. */
1505                iowrite8(0, ioaddr + MIICmd);
1506        }
1507        else
1508                rhine_wait_bit_high(rp, MIIRegAddr, 0x80);
1509}
1510
1511/* Read and write over the MII Management Data I/O (MDIO) interface. */
1512
1513static int mdio_read(struct net_device *dev, int phy_id, int regnum)
1514{
1515        struct rhine_private *rp = netdev_priv(dev);
1516        void __iomem *ioaddr = rp->base;
1517        int result;
1518
1519        rhine_disable_linkmon(rp);
1520
1521        /* rhine_disable_linkmon already cleared MIICmd */
1522        iowrite8(phy_id, ioaddr + MIIPhyAddr);
1523        iowrite8(regnum, ioaddr + MIIRegAddr);
1524        iowrite8(0x40, ioaddr + MIICmd);                /* Trigger read */
1525        rhine_wait_bit_low(rp, MIICmd, 0x40);
1526        result = ioread16(ioaddr + MIIData);
1527
1528        rhine_enable_linkmon(rp);
1529        return result;
1530}
1531
1532static void mdio_write(struct net_device *dev, int phy_id, int regnum, int value)
1533{
1534        struct rhine_private *rp = netdev_priv(dev);
1535        void __iomem *ioaddr = rp->base;
1536
1537        rhine_disable_linkmon(rp);
1538
1539        /* rhine_disable_linkmon already cleared MIICmd */
1540        iowrite8(phy_id, ioaddr + MIIPhyAddr);
1541        iowrite8(regnum, ioaddr + MIIRegAddr);
1542        iowrite16(value, ioaddr + MIIData);
1543        iowrite8(0x20, ioaddr + MIICmd);                /* Trigger write */
1544        rhine_wait_bit_low(rp, MIICmd, 0x20);
1545
1546        rhine_enable_linkmon(rp);
1547}
1548
1549static void rhine_task_disable(struct rhine_private *rp)
1550{
1551        mutex_lock(&rp->task_lock);
1552        rp->task_enable = false;
1553        mutex_unlock(&rp->task_lock);
1554
1555        cancel_work_sync(&rp->slow_event_task);
1556        cancel_work_sync(&rp->reset_task);
1557}
1558
1559static void rhine_task_enable(struct rhine_private *rp)
1560{
1561        mutex_lock(&rp->task_lock);
1562        rp->task_enable = true;
1563        mutex_unlock(&rp->task_lock);
1564}
1565
1566static int rhine_open(struct net_device *dev)
1567{
1568        struct rhine_private *rp = netdev_priv(dev);
1569        void __iomem *ioaddr = rp->base;
1570        int rc;
1571
1572        rc = request_irq(rp->pdev->irq, rhine_interrupt, IRQF_SHARED, dev->name,
1573                        dev);
1574        if (rc)
1575                return rc;
1576
1577        netif_dbg(rp, ifup, dev, "%s() irq %d\n", __func__, rp->pdev->irq);
1578
1579        rc = alloc_ring(dev);
1580        if (rc) {
1581                free_irq(rp->pdev->irq, dev);
1582                return rc;
1583        }
1584        alloc_rbufs(dev);
1585        alloc_tbufs(dev);
1586        rhine_chip_reset(dev);
1587        rhine_task_enable(rp);
1588        init_registers(dev);
1589
1590        netif_dbg(rp, ifup, dev, "%s() Done - status %04x MII status: %04x\n",
1591                  __func__, ioread16(ioaddr + ChipCmd),
1592                  mdio_read(dev, rp->mii_if.phy_id, MII_BMSR));
1593
1594        netif_start_queue(dev);
1595
1596        return 0;
1597}
1598
1599static void rhine_reset_task(struct work_struct *work)
1600{
1601        struct rhine_private *rp = container_of(work, struct rhine_private,
1602                                                reset_task);
1603        struct net_device *dev = rp->dev;
1604
1605        mutex_lock(&rp->task_lock);
1606
1607        if (!rp->task_enable)
1608                goto out_unlock;
1609
1610        napi_disable(&rp->napi);
1611        spin_lock_bh(&rp->lock);
1612
1613        /* clear all descriptors */
1614        free_tbufs(dev);
1615        free_rbufs(dev);
1616        alloc_tbufs(dev);
1617        alloc_rbufs(dev);
1618
1619        /* Reinitialize the hardware. */
1620        rhine_chip_reset(dev);
1621        init_registers(dev);
1622
1623        spin_unlock_bh(&rp->lock);
1624
1625        dev->trans_start = jiffies; /* prevent tx timeout */
1626        dev->stats.tx_errors++;
1627        netif_wake_queue(dev);
1628
1629out_unlock:
1630        mutex_unlock(&rp->task_lock);
1631}
1632
1633static void rhine_tx_timeout(struct net_device *dev)
1634{
1635        struct rhine_private *rp = netdev_priv(dev);
1636        void __iomem *ioaddr = rp->base;
1637
1638        netdev_warn(dev, "Transmit timed out, status %04x, PHY status %04x, resetting...\n",
1639                    ioread16(ioaddr + IntrStatus),
1640                    mdio_read(dev, rp->mii_if.phy_id, MII_BMSR));
1641
1642        schedule_work(&rp->reset_task);
1643}
1644
1645static netdev_tx_t rhine_start_tx(struct sk_buff *skb,
1646                                  struct net_device *dev)
1647{
1648        struct rhine_private *rp = netdev_priv(dev);
1649        void __iomem *ioaddr = rp->base;
1650        unsigned entry;
1651
1652        /* Caution: the write order is important here, set the field
1653           with the "ownership" bits last. */
1654
1655        /* Calculate the next Tx descriptor entry. */
1656        entry = rp->cur_tx % TX_RING_SIZE;
1657
1658        if (skb_padto(skb, ETH_ZLEN))
1659                return NETDEV_TX_OK;
1660
1661        rp->tx_skbuff[entry] = skb;
1662
1663        if ((rp->quirks & rqRhineI) &&
1664            (((unsigned long)skb->data & 3) || skb_shinfo(skb)->nr_frags != 0 || skb->ip_summed == CHECKSUM_PARTIAL)) {
1665                /* Must use alignment buffer. */
1666                if (skb->len > PKT_BUF_SZ) {
1667                        /* packet too long, drop it */
1668                        dev_kfree_skb(skb);
1669                        rp->tx_skbuff[entry] = NULL;
1670                        dev->stats.tx_dropped++;
1671                        return NETDEV_TX_OK;
1672                }
1673
1674                /* Padding is not copied and so must be redone. */
1675                skb_copy_and_csum_dev(skb, rp->tx_buf[entry]);
1676                if (skb->len < ETH_ZLEN)
1677                        memset(rp->tx_buf[entry] + skb->len, 0,
1678                               ETH_ZLEN - skb->len);
1679                rp->tx_skbuff_dma[entry] = 0;
1680                rp->tx_ring[entry].addr = cpu_to_le32(rp->tx_bufs_dma +
1681                                                      (rp->tx_buf[entry] -
1682                                                       rp->tx_bufs));
1683        } else {
1684                rp->tx_skbuff_dma[entry] =
1685                        pci_map_single(rp->pdev, skb->data, skb->len,
1686                                       PCI_DMA_TODEVICE);
1687                rp->tx_ring[entry].addr = cpu_to_le32(rp->tx_skbuff_dma[entry]);
1688        }
1689
1690        rp->tx_ring[entry].desc_length =
1691                cpu_to_le32(TXDESC | (skb->len >= ETH_ZLEN ? skb->len : ETH_ZLEN));
1692
1693        if (unlikely(vlan_tx_tag_present(skb))) {
1694                rp->tx_ring[entry].tx_status = cpu_to_le32((vlan_tx_tag_get(skb)) << 16);
1695                /* request tagging */
1696                rp->tx_ring[entry].desc_length |= cpu_to_le32(0x020000);
1697        }
1698        else
1699                rp->tx_ring[entry].tx_status = 0;
1700
1701        /* lock eth irq */
1702        wmb();
1703        rp->tx_ring[entry].tx_status |= cpu_to_le32(DescOwn);
1704        wmb();
1705
1706        rp->cur_tx++;
1707
1708        /* Non-x86 Todo: explicitly flush cache lines here. */
1709
1710        if (vlan_tx_tag_present(skb))
1711                /* Tx queues are bits 7-0 (first Tx queue: bit 7) */
1712                BYTE_REG_BITS_ON(1 << 7, ioaddr + TQWake);
1713
1714        /* Wake the potentially-idle transmit channel */
1715        iowrite8(ioread8(ioaddr + ChipCmd1) | Cmd1TxDemand,
1716               ioaddr + ChipCmd1);
1717        IOSYNC;
1718
1719        if (rp->cur_tx == rp->dirty_tx + TX_QUEUE_LEN)
1720                netif_stop_queue(dev);
1721
1722        netif_dbg(rp, tx_queued, dev, "Transmit frame #%d queued in slot %d\n",
1723                  rp->cur_tx - 1, entry);
1724
1725        return NETDEV_TX_OK;
1726}
1727
1728static void rhine_irq_disable(struct rhine_private *rp)
1729{
1730        iowrite16(0x0000, rp->base + IntrEnable);
1731        mmiowb();
1732}
1733
1734/* The interrupt handler does all of the Rx thread work and cleans up
1735   after the Tx thread. */
1736static irqreturn_t rhine_interrupt(int irq, void *dev_instance)
1737{
1738        struct net_device *dev = dev_instance;
1739        struct rhine_private *rp = netdev_priv(dev);
1740        u32 status;
1741        int handled = 0;
1742
1743        status = rhine_get_events(rp);
1744
1745        netif_dbg(rp, intr, dev, "Interrupt, status %08x\n", status);
1746
1747        if (status & RHINE_EVENT) {
1748                handled = 1;
1749
1750                rhine_irq_disable(rp);
1751                napi_schedule(&rp->napi);
1752        }
1753
1754        if (status & ~(IntrLinkChange | IntrStatsMax | RHINE_EVENT_NAPI)) {
1755                netif_err(rp, intr, dev, "Something Wicked happened! %08x\n",
1756                          status);
1757        }
1758
1759        return IRQ_RETVAL(handled);
1760}
1761
1762/* This routine is logically part of the interrupt handler, but isolated
1763   for clarity. */
1764static void rhine_tx(struct net_device *dev)
1765{
1766        struct rhine_private *rp = netdev_priv(dev);
1767        int txstatus = 0, entry = rp->dirty_tx % TX_RING_SIZE;
1768
1769        /* find and cleanup dirty tx descriptors */
1770        while (rp->dirty_tx != rp->cur_tx) {
1771                txstatus = le32_to_cpu(rp->tx_ring[entry].tx_status);
1772                netif_dbg(rp, tx_done, dev, "Tx scavenge %d status %08x\n",
1773                          entry, txstatus);
1774                if (txstatus & DescOwn)
1775                        break;
1776                if (txstatus & 0x8000) {
1777                        netif_dbg(rp, tx_done, dev,
1778                                  "Transmit error, Tx status %08x\n", txstatus);
1779                        dev->stats.tx_errors++;
1780                        if (txstatus & 0x0400)
1781                                dev->stats.tx_carrier_errors++;
1782                        if (txstatus & 0x0200)
1783                                dev->stats.tx_window_errors++;
1784                        if (txstatus & 0x0100)
1785                                dev->stats.tx_aborted_errors++;
1786                        if (txstatus & 0x0080)
1787                                dev->stats.tx_heartbeat_errors++;
1788                        if (((rp->quirks & rqRhineI) && txstatus & 0x0002) ||
1789                            (txstatus & 0x0800) || (txstatus & 0x1000)) {
1790                                dev->stats.tx_fifo_errors++;
1791                                rp->tx_ring[entry].tx_status = cpu_to_le32(DescOwn);
1792                                break; /* Keep the skb - we try again */
1793                        }
1794                        /* Transmitter restarted in 'abnormal' handler. */
1795                } else {
1796                        if (rp->quirks & rqRhineI)
1797                                dev->stats.collisions += (txstatus >> 3) & 0x0F;
1798                        else
1799                                dev->stats.collisions += txstatus & 0x0F;
1800                        netif_dbg(rp, tx_done, dev, "collisions: %1.1x:%1.1x\n",
1801                                  (txstatus >> 3) & 0xF, txstatus & 0xF);
1802
1803                        u64_stats_update_begin(&rp->tx_stats.syncp);
1804                        rp->tx_stats.bytes += rp->tx_skbuff[entry]->len;
1805                        rp->tx_stats.packets++;
1806                        u64_stats_update_end(&rp->tx_stats.syncp);
1807                }
1808                /* Free the original skb. */
1809                if (rp->tx_skbuff_dma[entry]) {
1810                        pci_unmap_single(rp->pdev,
1811                                         rp->tx_skbuff_dma[entry],
1812                                         rp->tx_skbuff[entry]->len,
1813                                         PCI_DMA_TODEVICE);
1814                }
1815                dev_kfree_skb(rp->tx_skbuff[entry]);
1816                rp->tx_skbuff[entry] = NULL;
1817                entry = (++rp->dirty_tx) % TX_RING_SIZE;
1818        }
1819        if ((rp->cur_tx - rp->dirty_tx) < TX_QUEUE_LEN - 4)
1820                netif_wake_queue(dev);
1821}
1822
1823/**
1824 * rhine_get_vlan_tci - extract TCI from Rx data buffer
1825 * @skb: pointer to sk_buff
1826 * @data_size: used data area of the buffer including CRC
1827 *
1828 * If hardware VLAN tag extraction is enabled and the chip indicates a 802.1Q
1829 * packet, the extracted 802.1Q header (2 bytes TPID + 2 bytes TCI) is 4-byte
1830 * aligned following the CRC.
1831 */
1832static inline u16 rhine_get_vlan_tci(struct sk_buff *skb, int data_size)
1833{
1834        u8 *trailer = (u8 *)skb->data + ((data_size + 3) & ~3) + 2;
1835        return be16_to_cpup((__be16 *)trailer);
1836}
1837
1838/* Process up to limit frames from receive ring */
1839static int rhine_rx(struct net_device *dev, int limit)
1840{
1841        struct rhine_private *rp = netdev_priv(dev);
1842        int count;
1843        int entry = rp->cur_rx % RX_RING_SIZE;
1844
1845        netif_dbg(rp, rx_status, dev, "%s(), entry %d status %08x\n", __func__,
1846                  entry, le32_to_cpu(rp->rx_head_desc->rx_status));
1847
1848        /* If EOP is set on the next entry, it's a new packet. Send it up. */
1849        for (count = 0; count < limit; ++count) {
1850                struct rx_desc *desc = rp->rx_head_desc;
1851                u32 desc_status = le32_to_cpu(desc->rx_status);
1852                u32 desc_length = le32_to_cpu(desc->desc_length);
1853                int data_size = desc_status >> 16;
1854
1855                if (desc_status & DescOwn)
1856                        break;
1857
1858                netif_dbg(rp, rx_status, dev, "%s() status %08x\n", __func__,
1859                          desc_status);
1860
1861                if ((desc_status & (RxWholePkt | RxErr)) != RxWholePkt) {
1862                        if ((desc_status & RxWholePkt) != RxWholePkt) {
1863                                netdev_warn(dev,
1864        "Oversized Ethernet frame spanned multiple buffers, "
1865        "entry %#x length %d status %08x!\n",
1866                                            entry, data_size,
1867                                            desc_status);
1868                                netdev_warn(dev,
1869                                            "Oversized Ethernet frame %p vs %p\n",
1870                                            rp->rx_head_desc,
1871                                            &rp->rx_ring[entry]);
1872                                dev->stats.rx_length_errors++;
1873                        } else if (desc_status & RxErr) {
1874                                /* There was a error. */
1875                                netif_dbg(rp, rx_err, dev,
1876                                          "%s() Rx error %08x\n", __func__,
1877                                          desc_status);
1878                                dev->stats.rx_errors++;
1879                                if (desc_status & 0x0030)
1880                                        dev->stats.rx_length_errors++;
1881                                if (desc_status & 0x0048)
1882                                        dev->stats.rx_fifo_errors++;
1883                                if (desc_status & 0x0004)
1884                                        dev->stats.rx_frame_errors++;
1885                                if (desc_status & 0x0002) {
1886                                        /* this can also be updated outside the interrupt handler */
1887                                        spin_lock(&rp->lock);
1888                                        dev->stats.rx_crc_errors++;
1889                                        spin_unlock(&rp->lock);
1890                                }
1891                        }
1892                } else {
1893                        struct sk_buff *skb = NULL;
1894                        /* Length should omit the CRC */
1895                        int pkt_len = data_size - 4;
1896                        u16 vlan_tci = 0;
1897
1898                        /* Check if the packet is long enough to accept without
1899                           copying to a minimally-sized skbuff. */
1900                        if (pkt_len < rx_copybreak)
1901                                skb = netdev_alloc_skb_ip_align(dev, pkt_len);
1902                        if (skb) {
1903                                pci_dma_sync_single_for_cpu(rp->pdev,
1904                                                            rp->rx_skbuff_dma[entry],
1905                                                            rp->rx_buf_sz,
1906                                                            PCI_DMA_FROMDEVICE);
1907
1908                                skb_copy_to_linear_data(skb,
1909                                                 rp->rx_skbuff[entry]->data,
1910                                                 pkt_len);
1911                                skb_put(skb, pkt_len);
1912                                pci_dma_sync_single_for_device(rp->pdev,
1913                                                               rp->rx_skbuff_dma[entry],
1914                                                               rp->rx_buf_sz,
1915                                                               PCI_DMA_FROMDEVICE);
1916                        } else {
1917                                skb = rp->rx_skbuff[entry];
1918                                if (skb == NULL) {
1919                                        netdev_err(dev, "Inconsistent Rx descriptor chain\n");
1920                                        break;
1921                                }
1922                                rp->rx_skbuff[entry] = NULL;
1923                                skb_put(skb, pkt_len);
1924                                pci_unmap_single(rp->pdev,
1925                                                 rp->rx_skbuff_dma[entry],
1926                                                 rp->rx_buf_sz,
1927                                                 PCI_DMA_FROMDEVICE);
1928                        }
1929
1930                        if (unlikely(desc_length & DescTag))
1931                                vlan_tci = rhine_get_vlan_tci(skb, data_size);
1932
1933                        skb->protocol = eth_type_trans(skb, dev);
1934
1935                        if (unlikely(desc_length & DescTag))
1936                                __vlan_hwaccel_put_tag(skb, vlan_tci);
1937                        netif_receive_skb(skb);
1938
1939                        u64_stats_update_begin(&rp->rx_stats.syncp);
1940                        rp->rx_stats.bytes += pkt_len;
1941                        rp->rx_stats.packets++;
1942                        u64_stats_update_end(&rp->rx_stats.syncp);
1943                }
1944                entry = (++rp->cur_rx) % RX_RING_SIZE;
1945                rp->rx_head_desc = &rp->rx_ring[entry];
1946        }
1947
1948        /* Refill the Rx ring buffers. */
1949        for (; rp->cur_rx - rp->dirty_rx > 0; rp->dirty_rx++) {
1950                struct sk_buff *skb;
1951                entry = rp->dirty_rx % RX_RING_SIZE;
1952                if (rp->rx_skbuff[entry] == NULL) {
1953                        skb = netdev_alloc_skb(dev, rp->rx_buf_sz);
1954                        rp->rx_skbuff[entry] = skb;
1955                        if (skb == NULL)
1956                                break;  /* Better luck next round. */
1957                        rp->rx_skbuff_dma[entry] =
1958                                pci_map_single(rp->pdev, skb->data,
1959                                               rp->rx_buf_sz,
1960                                               PCI_DMA_FROMDEVICE);
1961                        rp->rx_ring[entry].addr = cpu_to_le32(rp->rx_skbuff_dma[entry]);
1962                }
1963                rp->rx_ring[entry].rx_status = cpu_to_le32(DescOwn);
1964        }
1965
1966        return count;
1967}
1968
1969static void rhine_restart_tx(struct net_device *dev) {
1970        struct rhine_private *rp = netdev_priv(dev);
1971        void __iomem *ioaddr = rp->base;
1972        int entry = rp->dirty_tx % TX_RING_SIZE;
1973        u32 intr_status;
1974
1975        /*
1976         * If new errors occurred, we need to sort them out before doing Tx.
1977         * In that case the ISR will be back here RSN anyway.
1978         */
1979        intr_status = rhine_get_events(rp);
1980
1981        if ((intr_status & IntrTxErrSummary) == 0) {
1982
1983                /* We know better than the chip where it should continue. */
1984                iowrite32(rp->tx_ring_dma + entry * sizeof(struct tx_desc),
1985                       ioaddr + TxRingPtr);
1986
1987                iowrite8(ioread8(ioaddr + ChipCmd) | CmdTxOn,
1988                       ioaddr + ChipCmd);
1989
1990                if (rp->tx_ring[entry].desc_length & cpu_to_le32(0x020000))
1991                        /* Tx queues are bits 7-0 (first Tx queue: bit 7) */
1992                        BYTE_REG_BITS_ON(1 << 7, ioaddr + TQWake);
1993
1994                iowrite8(ioread8(ioaddr + ChipCmd1) | Cmd1TxDemand,
1995                       ioaddr + ChipCmd1);
1996                IOSYNC;
1997        }
1998        else {
1999                /* This should never happen */
2000                netif_warn(rp, tx_err, dev, "another error occurred %08x\n",
2001                           intr_status);
2002        }
2003
2004}
2005
2006static void rhine_slow_event_task(struct work_struct *work)
2007{
2008        struct rhine_private *rp =
2009                container_of(work, struct rhine_private, slow_event_task);
2010        struct net_device *dev = rp->dev;
2011        u32 intr_status;
2012
2013        mutex_lock(&rp->task_lock);
2014
2015        if (!rp->task_enable)
2016                goto out_unlock;
2017
2018        intr_status = rhine_get_events(rp);
2019        rhine_ack_events(rp, intr_status & RHINE_EVENT_SLOW);
2020
2021        if (intr_status & IntrLinkChange)
2022                rhine_check_media(dev, 0);
2023
2024        if (intr_status & IntrPCIErr)
2025                netif_warn(rp, hw, dev, "PCI error\n");
2026
2027        iowrite16(RHINE_EVENT & 0xffff, rp->base + IntrEnable);
2028
2029out_unlock:
2030        mutex_unlock(&rp->task_lock);
2031}
2032
2033static struct rtnl_link_stats64 *
2034rhine_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
2035{
2036        struct rhine_private *rp = netdev_priv(dev);
2037        unsigned int start;
2038
2039        spin_lock_bh(&rp->lock);
2040        rhine_update_rx_crc_and_missed_errord(rp);
2041        spin_unlock_bh(&rp->lock);
2042
2043        netdev_stats_to_stats64(stats, &dev->stats);
2044
2045        do {
2046                start = u64_stats_fetch_begin_bh(&rp->rx_stats.syncp);
2047                stats->rx_packets = rp->rx_stats.packets;
2048                stats->rx_bytes = rp->rx_stats.bytes;
2049        } while (u64_stats_fetch_retry_bh(&rp->rx_stats.syncp, start));
2050
2051        do {
2052                start = u64_stats_fetch_begin_bh(&rp->tx_stats.syncp);
2053                stats->tx_packets = rp->tx_stats.packets;
2054                stats->tx_bytes = rp->tx_stats.bytes;
2055        } while (u64_stats_fetch_retry_bh(&rp->tx_stats.syncp, start));
2056
2057        return stats;
2058}
2059
2060static void rhine_set_rx_mode(struct net_device *dev)
2061{
2062        struct rhine_private *rp = netdev_priv(dev);
2063        void __iomem *ioaddr = rp->base;
2064        u32 mc_filter[2];       /* Multicast hash filter */
2065        u8 rx_mode = 0x0C;      /* Note: 0x02=accept runt, 0x01=accept errs */
2066        struct netdev_hw_addr *ha;
2067
2068        if (dev->flags & IFF_PROMISC) {         /* Set promiscuous. */
2069                rx_mode = 0x1C;
2070                iowrite32(0xffffffff, ioaddr + MulticastFilter0);
2071                iowrite32(0xffffffff, ioaddr + MulticastFilter1);
2072        } else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
2073                   (dev->flags & IFF_ALLMULTI)) {
2074                /* Too many to match, or accept all multicasts. */
2075                iowrite32(0xffffffff, ioaddr + MulticastFilter0);
2076                iowrite32(0xffffffff, ioaddr + MulticastFilter1);
2077        } else if (rp->pdev->revision >= VT6105M) {
2078                int i = 0;
2079                u32 mCAMmask = 0;       /* 32 mCAMs (6105M and better) */
2080                netdev_for_each_mc_addr(ha, dev) {
2081                        if (i == MCAM_SIZE)
2082                                break;
2083                        rhine_set_cam(ioaddr, i, ha->addr);
2084                        mCAMmask |= 1 << i;
2085                        i++;
2086                }
2087                rhine_set_cam_mask(ioaddr, mCAMmask);
2088        } else {
2089                memset(mc_filter, 0, sizeof(mc_filter));
2090                netdev_for_each_mc_addr(ha, dev) {
2091                        int bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26;
2092
2093                        mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
2094                }
2095                iowrite32(mc_filter[0], ioaddr + MulticastFilter0);
2096                iowrite32(mc_filter[1], ioaddr + MulticastFilter1);
2097        }
2098        /* enable/disable VLAN receive filtering */
2099        if (rp->pdev->revision >= VT6105M) {
2100                if (dev->flags & IFF_PROMISC)
2101                        BYTE_REG_BITS_OFF(BCR1_VIDFR, ioaddr + PCIBusConfig1);
2102                else
2103                        BYTE_REG_BITS_ON(BCR1_VIDFR, ioaddr + PCIBusConfig1);
2104        }
2105        BYTE_REG_BITS_ON(rx_mode, ioaddr + RxConfig);
2106}
2107
2108static void netdev_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
2109{
2110        struct rhine_private *rp = netdev_priv(dev);
2111
2112        strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
2113        strlcpy(info->version, DRV_VERSION, sizeof(info->version));
2114        strlcpy(info->bus_info, pci_name(rp->pdev), sizeof(info->bus_info));
2115}
2116
2117static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2118{
2119        struct rhine_private *rp = netdev_priv(dev);
2120        int rc;
2121
2122        mutex_lock(&rp->task_lock);
2123        rc = mii_ethtool_gset(&rp->mii_if, cmd);
2124        mutex_unlock(&rp->task_lock);
2125
2126        return rc;
2127}
2128
2129static int netdev_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2130{
2131        struct rhine_private *rp = netdev_priv(dev);
2132        int rc;
2133
2134        mutex_lock(&rp->task_lock);
2135        rc = mii_ethtool_sset(&rp->mii_if, cmd);
2136        rhine_set_carrier(&rp->mii_if);
2137        mutex_unlock(&rp->task_lock);
2138
2139        return rc;
2140}
2141
2142static int netdev_nway_reset(struct net_device *dev)
2143{
2144        struct rhine_private *rp = netdev_priv(dev);
2145
2146        return mii_nway_restart(&rp->mii_if);
2147}
2148
2149static u32 netdev_get_link(struct net_device *dev)
2150{
2151        struct rhine_private *rp = netdev_priv(dev);
2152
2153        return mii_link_ok(&rp->mii_if);
2154}
2155
2156static u32 netdev_get_msglevel(struct net_device *dev)
2157{
2158        struct rhine_private *rp = netdev_priv(dev);
2159
2160        return rp->msg_enable;
2161}
2162
2163static void netdev_set_msglevel(struct net_device *dev, u32 value)
2164{
2165        struct rhine_private *rp = netdev_priv(dev);
2166
2167        rp->msg_enable = value;
2168}
2169
2170static void rhine_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2171{
2172        struct rhine_private *rp = netdev_priv(dev);
2173
2174        if (!(rp->quirks & rqWOL))
2175                return;
2176
2177        spin_lock_irq(&rp->lock);
2178        wol->supported = WAKE_PHY | WAKE_MAGIC |
2179                         WAKE_UCAST | WAKE_MCAST | WAKE_BCAST;  /* Untested */
2180        wol->wolopts = rp->wolopts;
2181        spin_unlock_irq(&rp->lock);
2182}
2183
2184static int rhine_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2185{
2186        struct rhine_private *rp = netdev_priv(dev);
2187        u32 support = WAKE_PHY | WAKE_MAGIC |
2188                      WAKE_UCAST | WAKE_MCAST | WAKE_BCAST;     /* Untested */
2189
2190        if (!(rp->quirks & rqWOL))
2191                return -EINVAL;
2192
2193        if (wol->wolopts & ~support)
2194                return -EINVAL;
2195
2196        spin_lock_irq(&rp->lock);
2197        rp->wolopts = wol->wolopts;
2198        spin_unlock_irq(&rp->lock);
2199
2200        return 0;
2201}
2202
2203static const struct ethtool_ops netdev_ethtool_ops = {
2204        .get_drvinfo            = netdev_get_drvinfo,
2205        .get_settings           = netdev_get_settings,
2206        .set_settings           = netdev_set_settings,
2207        .nway_reset             = netdev_nway_reset,
2208        .get_link               = netdev_get_link,
2209        .get_msglevel           = netdev_get_msglevel,
2210        .set_msglevel           = netdev_set_msglevel,
2211        .get_wol                = rhine_get_wol,
2212        .set_wol                = rhine_set_wol,
2213};
2214
2215static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2216{
2217        struct rhine_private *rp = netdev_priv(dev);
2218        int rc;
2219
2220        if (!netif_running(dev))
2221                return -EINVAL;
2222
2223        mutex_lock(&rp->task_lock);
2224        rc = generic_mii_ioctl(&rp->mii_if, if_mii(rq), cmd, NULL);
2225        rhine_set_carrier(&rp->mii_if);
2226        mutex_unlock(&rp->task_lock);
2227
2228        return rc;
2229}
2230
2231static int rhine_close(struct net_device *dev)
2232{
2233        struct rhine_private *rp = netdev_priv(dev);
2234        void __iomem *ioaddr = rp->base;
2235
2236        rhine_task_disable(rp);
2237        napi_disable(&rp->napi);
2238        netif_stop_queue(dev);
2239
2240        netif_dbg(rp, ifdown, dev, "Shutting down ethercard, status was %04x\n",
2241                  ioread16(ioaddr + ChipCmd));
2242
2243        /* Switch to loopback mode to avoid hardware races. */
2244        iowrite8(rp->tx_thresh | 0x02, ioaddr + TxConfig);
2245
2246        rhine_irq_disable(rp);
2247
2248        /* Stop the chip's Tx and Rx processes. */
2249        iowrite16(CmdStop, ioaddr + ChipCmd);
2250
2251        free_irq(rp->pdev->irq, dev);
2252        free_rbufs(dev);
2253        free_tbufs(dev);
2254        free_ring(dev);
2255
2256        return 0;
2257}
2258
2259
2260static void rhine_remove_one(struct pci_dev *pdev)
2261{
2262        struct net_device *dev = pci_get_drvdata(pdev);
2263        struct rhine_private *rp = netdev_priv(dev);
2264
2265        unregister_netdev(dev);
2266
2267        pci_iounmap(pdev, rp->base);
2268        pci_release_regions(pdev);
2269
2270        free_netdev(dev);
2271        pci_disable_device(pdev);
2272        pci_set_drvdata(pdev, NULL);
2273}
2274
2275static void rhine_shutdown (struct pci_dev *pdev)
2276{
2277        struct net_device *dev = pci_get_drvdata(pdev);
2278        struct rhine_private *rp = netdev_priv(dev);
2279        void __iomem *ioaddr = rp->base;
2280
2281        if (!(rp->quirks & rqWOL))
2282                return; /* Nothing to do for non-WOL adapters */
2283
2284        rhine_power_init(dev);
2285
2286        /* Make sure we use pattern 0, 1 and not 4, 5 */
2287        if (rp->quirks & rq6patterns)
2288                iowrite8(0x04, ioaddr + WOLcgClr);
2289
2290        spin_lock(&rp->lock);
2291
2292        if (rp->wolopts & WAKE_MAGIC) {
2293                iowrite8(WOLmagic, ioaddr + WOLcrSet);
2294                /*
2295                 * Turn EEPROM-controlled wake-up back on -- some hardware may
2296                 * not cooperate otherwise.
2297                 */
2298                iowrite8(ioread8(ioaddr + ConfigA) | 0x03, ioaddr + ConfigA);
2299        }
2300
2301        if (rp->wolopts & (WAKE_BCAST|WAKE_MCAST))
2302                iowrite8(WOLbmcast, ioaddr + WOLcgSet);
2303
2304        if (rp->wolopts & WAKE_PHY)
2305                iowrite8(WOLlnkon | WOLlnkoff, ioaddr + WOLcrSet);
2306
2307        if (rp->wolopts & WAKE_UCAST)
2308                iowrite8(WOLucast, ioaddr + WOLcrSet);
2309
2310        if (rp->wolopts) {
2311                /* Enable legacy WOL (for old motherboards) */
2312                iowrite8(0x01, ioaddr + PwcfgSet);
2313                iowrite8(ioread8(ioaddr + StickyHW) | 0x04, ioaddr + StickyHW);
2314        }
2315
2316        spin_unlock(&rp->lock);
2317
2318        if (system_state == SYSTEM_POWER_OFF && !avoid_D3) {
2319                iowrite8(ioread8(ioaddr + StickyHW) | 0x03, ioaddr + StickyHW);
2320
2321                pci_wake_from_d3(pdev, true);
2322                pci_set_power_state(pdev, PCI_D3hot);
2323        }
2324}
2325
2326#ifdef CONFIG_PM_SLEEP
2327static int rhine_suspend(struct device *device)
2328{
2329        struct pci_dev *pdev = to_pci_dev(device);
2330        struct net_device *dev = pci_get_drvdata(pdev);
2331        struct rhine_private *rp = netdev_priv(dev);
2332
2333        if (!netif_running(dev))
2334                return 0;
2335
2336        rhine_task_disable(rp);
2337        rhine_irq_disable(rp);
2338        napi_disable(&rp->napi);
2339
2340        netif_device_detach(dev);
2341
2342        rhine_shutdown(pdev);
2343
2344        return 0;
2345}
2346
2347static int rhine_resume(struct device *device)
2348{
2349        struct pci_dev *pdev = to_pci_dev(device);
2350        struct net_device *dev = pci_get_drvdata(pdev);
2351        struct rhine_private *rp = netdev_priv(dev);
2352
2353        if (!netif_running(dev))
2354                return 0;
2355
2356#ifdef USE_MMIO
2357        enable_mmio(rp->pioaddr, rp->quirks);
2358#endif
2359        rhine_power_init(dev);
2360        free_tbufs(dev);
2361        free_rbufs(dev);
2362        alloc_tbufs(dev);
2363        alloc_rbufs(dev);
2364        rhine_task_enable(rp);
2365        spin_lock_bh(&rp->lock);
2366        init_registers(dev);
2367        spin_unlock_bh(&rp->lock);
2368
2369        netif_device_attach(dev);
2370
2371        return 0;
2372}
2373
2374static SIMPLE_DEV_PM_OPS(rhine_pm_ops, rhine_suspend, rhine_resume);
2375#define RHINE_PM_OPS    (&rhine_pm_ops)
2376
2377#else
2378
2379#define RHINE_PM_OPS    NULL
2380
2381#endif /* !CONFIG_PM_SLEEP */
2382
2383static struct pci_driver rhine_driver = {
2384        .name           = DRV_NAME,
2385        .id_table       = rhine_pci_tbl,
2386        .probe          = rhine_init_one,
2387        .remove         = rhine_remove_one,
2388        .shutdown       = rhine_shutdown,
2389        .driver.pm      = RHINE_PM_OPS,
2390};
2391
2392static struct dmi_system_id __initdata rhine_dmi_table[] = {
2393        {
2394                .ident = "EPIA-M",
2395                .matches = {
2396                        DMI_MATCH(DMI_BIOS_VENDOR, "Award Software International, Inc."),
2397                        DMI_MATCH(DMI_BIOS_VERSION, "6.00 PG"),
2398                },
2399        },
2400        {
2401                .ident = "KV7",
2402                .matches = {
2403                        DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies, LTD"),
2404                        DMI_MATCH(DMI_BIOS_VERSION, "6.00 PG"),
2405                },
2406        },
2407        { NULL }
2408};
2409
2410static int __init rhine_init(void)
2411{
2412/* when a module, this is printed whether or not devices are found in probe */
2413#ifdef MODULE
2414        pr_info("%s\n", version);
2415#endif
2416        if (dmi_check_system(rhine_dmi_table)) {
2417                /* these BIOSes fail at PXE boot if chip is in D3 */
2418                avoid_D3 = true;
2419                pr_warn("Broken BIOS detected, avoid_D3 enabled\n");
2420        }
2421        else if (avoid_D3)
2422                pr_info("avoid_D3 set\n");
2423
2424        return pci_register_driver(&rhine_driver);
2425}
2426
2427
2428static void __exit rhine_cleanup(void)
2429{
2430        pci_unregister_driver(&rhine_driver);
2431}
2432
2433
2434module_init(rhine_init);
2435module_exit(rhine_cleanup);
2436