linux/drivers/net/ethernet/via/via-rhine.c
<<
>>
Prefs
   1/* via-rhine.c: A Linux Ethernet device driver for VIA Rhine family chips. */
   2/*
   3        Written 1998-2001 by Donald Becker.
   4
   5        Current Maintainer: Kevin Brace <kevinbrace@bracecomputerlab.com>
   6
   7        This software may be used and distributed according to the terms of
   8        the GNU General Public License (GPL), incorporated herein by reference.
   9        Drivers based on or derived from this code fall under the GPL and must
  10        retain the authorship, copyright and license notice.  This file is not
  11        a complete program and may only be used when the entire operating
  12        system is licensed under the GPL.
  13
  14        This driver is designed for the VIA VT86C100A Rhine-I.
  15        It also works with the Rhine-II (6102) and Rhine-III (6105/6105L/6105LOM
  16        and management NIC 6105M).
  17
  18        The author may be reached as becker@scyld.com, or C/O
  19        Scyld Computing Corporation
  20        410 Severn Ave., Suite 210
  21        Annapolis MD 21403
  22
  23
  24        This driver contains some changes from the original Donald Becker
  25        version. He may or may not be interested in bug reports on this
  26        code. You can find his versions at:
  27        http://www.scyld.com/network/via-rhine.html
  28        [link no longer provides useful info -jgarzik]
  29
  30*/
  31
  32#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  33
  34#define DRV_NAME        "via-rhine"
  35
  36#include <linux/types.h>
  37
  38/* A few user-configurable values.
  39   These may be modified when a driver module is loaded. */
  40static int debug = 0;
  41#define RHINE_MSG_DEFAULT \
  42        (0x0000)
  43
  44/* Set the copy breakpoint for the copy-only-tiny-frames scheme.
  45   Setting to > 1518 effectively disables this feature. */
  46#if defined(__alpha__) || defined(__arm__) || defined(__hppa__) || \
  47        defined(CONFIG_SPARC) || defined(__ia64__) ||              \
  48        defined(__sh__) || defined(__mips__)
  49static int rx_copybreak = 1518;
  50#else
  51static int rx_copybreak;
  52#endif
  53
  54/* Work-around for broken BIOSes: they are unable to get the chip back out of
  55   power state D3 so PXE booting fails. bootparam(7): via-rhine.avoid_D3=1 */
  56static bool avoid_D3;
  57
  58/*
  59 * In case you are looking for 'options[]' or 'full_duplex[]', they
  60 * are gone. Use ethtool(8) instead.
  61 */
  62
  63/* Maximum number of multicast addresses to filter (vs. rx-all-multicast).
  64   The Rhine has a 64 element 8390-like hash table. */
  65static const int multicast_filter_limit = 32;
  66
  67
  68/* Operational parameters that are set at compile time. */
  69
  70/* Keep the ring sizes a power of two for compile efficiency.
  71 * The compiler will convert <unsigned>'%'<2^N> into a bit mask.
  72 * Making the Tx ring too large decreases the effectiveness of channel
  73 * bonding and packet priority.
  74 * With BQL support, we can increase TX ring safely.
  75 * There are no ill effects from too-large receive rings.
  76 */
  77#define TX_RING_SIZE    64
  78#define TX_QUEUE_LEN    (TX_RING_SIZE - 6)      /* Limit ring entries actually used. */
  79#define RX_RING_SIZE    64
  80
  81/* Operational parameters that usually are not changed. */
  82
  83/* Time in jiffies before concluding the transmitter is hung. */
  84#define TX_TIMEOUT      (2*HZ)
  85
  86#define PKT_BUF_SZ      1536    /* Size of each temporary Rx buffer.*/
  87
  88#include <linux/module.h>
  89#include <linux/moduleparam.h>
  90#include <linux/kernel.h>
  91#include <linux/string.h>
  92#include <linux/timer.h>
  93#include <linux/errno.h>
  94#include <linux/ioport.h>
  95#include <linux/interrupt.h>
  96#include <linux/pci.h>
  97#include <linux/of_device.h>
  98#include <linux/of_irq.h>
  99#include <linux/platform_device.h>
 100#include <linux/dma-mapping.h>
 101#include <linux/netdevice.h>
 102#include <linux/etherdevice.h>
 103#include <linux/skbuff.h>
 104#include <linux/init.h>
 105#include <linux/delay.h>
 106#include <linux/mii.h>
 107#include <linux/ethtool.h>
 108#include <linux/crc32.h>
 109#include <linux/if_vlan.h>
 110#include <linux/bitops.h>
 111#include <linux/workqueue.h>
 112#include <asm/processor.h>      /* Processor type for cache alignment. */
 113#include <asm/io.h>
 114#include <asm/irq.h>
 115#include <linux/uaccess.h>
 116#include <linux/dmi.h>
 117
 118MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
 119MODULE_DESCRIPTION("VIA Rhine PCI Fast Ethernet driver");
 120MODULE_LICENSE("GPL");
 121
 122module_param(debug, int, 0);
 123module_param(rx_copybreak, int, 0);
 124module_param(avoid_D3, bool, 0);
 125MODULE_PARM_DESC(debug, "VIA Rhine debug message flags");
 126MODULE_PARM_DESC(rx_copybreak, "VIA Rhine copy breakpoint for copy-only-tiny-frames");
 127MODULE_PARM_DESC(avoid_D3, "Avoid power state D3 (work-around for broken BIOSes)");
 128
 129#define MCAM_SIZE       32
 130#define VCAM_SIZE       32
 131
 132/*
 133                Theory of Operation
 134
 135I. Board Compatibility
 136
 137This driver is designed for the VIA 86c100A Rhine-II PCI Fast Ethernet
 138controller.
 139
 140II. Board-specific settings
 141
 142Boards with this chip are functional only in a bus-master PCI slot.
 143
 144Many operational settings are loaded from the EEPROM to the Config word at
 145offset 0x78. For most of these settings, this driver assumes that they are
 146correct.
 147If this driver is compiled to use PCI memory space operations the EEPROM
 148must be configured to enable memory ops.
 149
 150III. Driver operation
 151
 152IIIa. Ring buffers
 153
 154This driver uses two statically allocated fixed-size descriptor lists
 155formed into rings by a branch from the final descriptor to the beginning of
 156the list. The ring sizes are set at compile time by RX/TX_RING_SIZE.
 157
 158IIIb/c. Transmit/Receive Structure
 159
 160This driver attempts to use a zero-copy receive and transmit scheme.
 161
 162Alas, all data buffers are required to start on a 32 bit boundary, so
 163the driver must often copy transmit packets into bounce buffers.
 164
 165The driver allocates full frame size skbuffs for the Rx ring buffers at
 166open() time and passes the skb->data field to the chip as receive data
 167buffers. When an incoming frame is less than RX_COPYBREAK bytes long,
 168a fresh skbuff is allocated and the frame is copied to the new skbuff.
 169When the incoming frame is larger, the skbuff is passed directly up the
 170protocol stack. Buffers consumed this way are replaced by newly allocated
 171skbuffs in the last phase of rhine_rx().
 172
 173The RX_COPYBREAK value is chosen to trade-off the memory wasted by
 174using a full-sized skbuff for small frames vs. the copying costs of larger
 175frames. New boards are typically used in generously configured machines
 176and the underfilled buffers have negligible impact compared to the benefit of
 177a single allocation size, so the default value of zero results in never
 178copying packets. When copying is done, the cost is usually mitigated by using
 179a combined copy/checksum routine. Copying also preloads the cache, which is
 180most useful with small frames.
 181
 182Since the VIA chips are only able to transfer data to buffers on 32 bit
 183boundaries, the IP header at offset 14 in an ethernet frame isn't
 184longword aligned for further processing. Copying these unaligned buffers
 185has the beneficial effect of 16-byte aligning the IP header.
 186
 187IIId. Synchronization
 188
 189The driver runs as two independent, single-threaded flows of control. One
 190is the send-packet routine, which enforces single-threaded use by the
 191netdev_priv(dev)->lock spinlock. The other thread is the interrupt handler,
 192which is single threaded by the hardware and interrupt handling software.
 193
 194The send packet thread has partial control over the Tx ring. It locks the
 195netdev_priv(dev)->lock whenever it's queuing a Tx packet. If the next slot in
 196the ring is not available it stops the transmit queue by
 197calling netif_stop_queue.
 198
 199The interrupt handler has exclusive control over the Rx ring and records stats
 200from the Tx ring. After reaping the stats, it marks the Tx queue entry as
 201empty by incrementing the dirty_tx mark. If at least half of the entries in
 202the Rx ring are available the transmit queue is woken up if it was stopped.
 203
 204IV. Notes
 205
 206IVb. References
 207
 208Preliminary VT86C100A manual from http://www.via.com.tw/
 209http://www.scyld.com/expert/100mbps.html
 210http://www.scyld.com/expert/NWay.html
 211ftp://ftp.via.com.tw/public/lan/Products/NIC/VT86C100A/Datasheet/VT86C100A03.pdf
 212ftp://ftp.via.com.tw/public/lan/Products/NIC/VT6102/Datasheet/VT6102_021.PDF
 213
 214
 215IVc. Errata
 216
 217The VT86C100A manual is not reliable information.
 218The 3043 chip does not handle unaligned transmit or receive buffers, resulting
 219in significant performance degradation for bounce buffer copies on transmit
 220and unaligned IP headers on receive.
 221The chip does not pad to minimum transmit length.
 222
 223*/
 224
 225
 226/* This table drives the PCI probe routines. It's mostly boilerplate in all
 227   of the drivers, and will likely be provided by some future kernel.
 228   Note the matching code -- the first table entry matchs all 56** cards but
 229   second only the 1234 card.
 230*/
 231
 232enum rhine_revs {
 233        VT86C100A       = 0x00,
 234        VTunknown0      = 0x20,
 235        VT6102          = 0x40,
 236        VT8231          = 0x50, /* Integrated MAC */
 237        VT8233          = 0x60, /* Integrated MAC */
 238        VT8235          = 0x74, /* Integrated MAC */
 239        VT8237          = 0x78, /* Integrated MAC */
 240        VT8251          = 0x7C, /* Integrated MAC */
 241        VT6105          = 0x80,
 242        VT6105_B0       = 0x83,
 243        VT6105L         = 0x8A,
 244        VT6107          = 0x8C,
 245        VTunknown2      = 0x8E,
 246        VT6105M         = 0x90, /* Management adapter */
 247};
 248
 249enum rhine_quirks {
 250        rqWOL           = 0x0001,       /* Wake-On-LAN support */
 251        rqForceReset    = 0x0002,
 252        rq6patterns     = 0x0040,       /* 6 instead of 4 patterns for WOL */
 253        rqStatusWBRace  = 0x0080,       /* Tx Status Writeback Error possible */
 254        rqRhineI        = 0x0100,       /* See comment below */
 255        rqIntPHY        = 0x0200,       /* Integrated PHY */
 256        rqMgmt          = 0x0400,       /* Management adapter */
 257        rqNeedEnMMIO    = 0x0800,       /* Whether the core needs to be
 258                                         * switched from PIO mode to MMIO
 259                                         * (only applies to PCI)
 260                                         */
 261};
 262/*
 263 * rqRhineI: VT86C100A (aka Rhine-I) uses different bits to enable
 264 * MMIO as well as for the collision counter and the Tx FIFO underflow
 265 * indicator. In addition, Tx and Rx buffers need to 4 byte aligned.
 266 */
 267
 268/* Beware of PCI posted writes */
 269#define IOSYNC  do { ioread8(ioaddr + StationAddr); } while (0)
 270
 271static const struct pci_device_id rhine_pci_tbl[] = {
 272        { 0x1106, 0x3043, PCI_ANY_ID, PCI_ANY_ID, },    /* VT86C100A */
 273        { 0x1106, 0x3065, PCI_ANY_ID, PCI_ANY_ID, },    /* VT6102 */
 274        { 0x1106, 0x3106, PCI_ANY_ID, PCI_ANY_ID, },    /* 6105{,L,LOM} */
 275        { 0x1106, 0x3053, PCI_ANY_ID, PCI_ANY_ID, },    /* VT6105M */
 276        { }     /* terminate list */
 277};
 278MODULE_DEVICE_TABLE(pci, rhine_pci_tbl);
 279
 280/* OpenFirmware identifiers for platform-bus devices
 281 * The .data field is currently only used to store quirks
 282 */
 283static u32 vt8500_quirks = rqWOL | rqForceReset | rq6patterns;
 284static const struct of_device_id rhine_of_tbl[] = {
 285        { .compatible = "via,vt8500-rhine", .data = &vt8500_quirks },
 286        { }     /* terminate list */
 287};
 288MODULE_DEVICE_TABLE(of, rhine_of_tbl);
 289
 290/* Offsets to the device registers. */
 291enum register_offsets {
 292        StationAddr=0x00, RxConfig=0x06, TxConfig=0x07, ChipCmd=0x08,
 293        ChipCmd1=0x09, TQWake=0x0A,
 294        IntrStatus=0x0C, IntrEnable=0x0E,
 295        MulticastFilter0=0x10, MulticastFilter1=0x14,
 296        RxRingPtr=0x18, TxRingPtr=0x1C, GFIFOTest=0x54,
 297        MIIPhyAddr=0x6C, MIIStatus=0x6D, PCIBusConfig=0x6E, PCIBusConfig1=0x6F,
 298        MIICmd=0x70, MIIRegAddr=0x71, MIIData=0x72, MACRegEEcsr=0x74,
 299        ConfigA=0x78, ConfigB=0x79, ConfigC=0x7A, ConfigD=0x7B,
 300        RxMissed=0x7C, RxCRCErrs=0x7E, MiscCmd=0x81,
 301        StickyHW=0x83, IntrStatus2=0x84,
 302        CamMask=0x88, CamCon=0x92, CamAddr=0x93,
 303        WOLcrSet=0xA0, PwcfgSet=0xA1, WOLcgSet=0xA3, WOLcrClr=0xA4,
 304        WOLcrClr1=0xA6, WOLcgClr=0xA7,
 305        PwrcsrSet=0xA8, PwrcsrSet1=0xA9, PwrcsrClr=0xAC, PwrcsrClr1=0xAD,
 306};
 307
 308/* Bits in ConfigD */
 309enum backoff_bits {
 310        BackOptional=0x01, BackModify=0x02,
 311        BackCaptureEffect=0x04, BackRandom=0x08
 312};
 313
 314/* Bits in the TxConfig (TCR) register */
 315enum tcr_bits {
 316        TCR_PQEN=0x01,
 317        TCR_LB0=0x02,           /* loopback[0] */
 318        TCR_LB1=0x04,           /* loopback[1] */
 319        TCR_OFSET=0x08,
 320        TCR_RTGOPT=0x10,
 321        TCR_RTFT0=0x20,
 322        TCR_RTFT1=0x40,
 323        TCR_RTSF=0x80,
 324};
 325
 326/* Bits in the CamCon (CAMC) register */
 327enum camcon_bits {
 328        CAMC_CAMEN=0x01,
 329        CAMC_VCAMSL=0x02,
 330        CAMC_CAMWR=0x04,
 331        CAMC_CAMRD=0x08,
 332};
 333
 334/* Bits in the PCIBusConfig1 (BCR1) register */
 335enum bcr1_bits {
 336        BCR1_POT0=0x01,
 337        BCR1_POT1=0x02,
 338        BCR1_POT2=0x04,
 339        BCR1_CTFT0=0x08,
 340        BCR1_CTFT1=0x10,
 341        BCR1_CTSF=0x20,
 342        BCR1_TXQNOBK=0x40,      /* for VT6105 */
 343        BCR1_VIDFR=0x80,        /* for VT6105 */
 344        BCR1_MED0=0x40,         /* for VT6102 */
 345        BCR1_MED1=0x80,         /* for VT6102 */
 346};
 347
 348/* Registers we check that mmio and reg are the same. */
 349static const int mmio_verify_registers[] = {
 350        RxConfig, TxConfig, IntrEnable, ConfigA, ConfigB, ConfigC, ConfigD,
 351        0
 352};
 353
 354/* Bits in the interrupt status/mask registers. */
 355enum intr_status_bits {
 356        IntrRxDone      = 0x0001,
 357        IntrTxDone      = 0x0002,
 358        IntrRxErr       = 0x0004,
 359        IntrTxError     = 0x0008,
 360        IntrRxEmpty     = 0x0020,
 361        IntrPCIErr      = 0x0040,
 362        IntrStatsMax    = 0x0080,
 363        IntrRxEarly     = 0x0100,
 364        IntrTxUnderrun  = 0x0210,
 365        IntrRxOverflow  = 0x0400,
 366        IntrRxDropped   = 0x0800,
 367        IntrRxNoBuf     = 0x1000,
 368        IntrTxAborted   = 0x2000,
 369        IntrLinkChange  = 0x4000,
 370        IntrRxWakeUp    = 0x8000,
 371        IntrTxDescRace          = 0x080000,     /* mapped from IntrStatus2 */
 372        IntrNormalSummary       = IntrRxDone | IntrTxDone,
 373        IntrTxErrSummary        = IntrTxDescRace | IntrTxAborted | IntrTxError |
 374                                  IntrTxUnderrun,
 375};
 376
 377/* Bits in WOLcrSet/WOLcrClr and PwrcsrSet/PwrcsrClr */
 378enum wol_bits {
 379        WOLucast        = 0x10,
 380        WOLmagic        = 0x20,
 381        WOLbmcast       = 0x30,
 382        WOLlnkon        = 0x40,
 383        WOLlnkoff       = 0x80,
 384};
 385
 386/* The Rx and Tx buffer descriptors. */
 387struct rx_desc {
 388        __le32 rx_status;
 389        __le32 desc_length; /* Chain flag, Buffer/frame length */
 390        __le32 addr;
 391        __le32 next_desc;
 392};
 393struct tx_desc {
 394        __le32 tx_status;
 395        __le32 desc_length; /* Chain flag, Tx Config, Frame length */
 396        __le32 addr;
 397        __le32 next_desc;
 398};
 399
 400/* Initial value for tx_desc.desc_length, Buffer size goes to bits 0-10 */
 401#define TXDESC          0x00e08000
 402
 403enum rx_status_bits {
 404        RxOK=0x8000, RxWholePkt=0x0300, RxErr=0x008F
 405};
 406
 407/* Bits in *_desc.*_status */
 408enum desc_status_bits {
 409        DescOwn=0x80000000
 410};
 411
 412/* Bits in *_desc.*_length */
 413enum desc_length_bits {
 414        DescTag=0x00010000
 415};
 416
 417/* Bits in ChipCmd. */
 418enum chip_cmd_bits {
 419        CmdInit=0x01, CmdStart=0x02, CmdStop=0x04, CmdRxOn=0x08,
 420        CmdTxOn=0x10, Cmd1TxDemand=0x20, CmdRxDemand=0x40,
 421        Cmd1EarlyRx=0x01, Cmd1EarlyTx=0x02, Cmd1FDuplex=0x04,
 422        Cmd1NoTxPoll=0x08, Cmd1Reset=0x80,
 423};
 424
 425struct rhine_stats {
 426        u64             packets;
 427        u64             bytes;
 428        struct u64_stats_sync syncp;
 429};
 430
 431struct rhine_private {
 432        /* Bit mask for configured VLAN ids */
 433        unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
 434
 435        /* Descriptor rings */
 436        struct rx_desc *rx_ring;
 437        struct tx_desc *tx_ring;
 438        dma_addr_t rx_ring_dma;
 439        dma_addr_t tx_ring_dma;
 440
 441        /* The addresses of receive-in-place skbuffs. */
 442        struct sk_buff *rx_skbuff[RX_RING_SIZE];
 443        dma_addr_t rx_skbuff_dma[RX_RING_SIZE];
 444
 445        /* The saved address of a sent-in-place packet/buffer, for later free(). */
 446        struct sk_buff *tx_skbuff[TX_RING_SIZE];
 447        dma_addr_t tx_skbuff_dma[TX_RING_SIZE];
 448
 449        /* Tx bounce buffers (Rhine-I only) */
 450        unsigned char *tx_buf[TX_RING_SIZE];
 451        unsigned char *tx_bufs;
 452        dma_addr_t tx_bufs_dma;
 453
 454        int irq;
 455        long pioaddr;
 456        struct net_device *dev;
 457        struct napi_struct napi;
 458        spinlock_t lock;
 459        struct mutex task_lock;
 460        bool task_enable;
 461        struct work_struct slow_event_task;
 462        struct work_struct reset_task;
 463
 464        u32 msg_enable;
 465
 466        /* Frequently used values: keep some adjacent for cache effect. */
 467        u32 quirks;
 468        unsigned int cur_rx;
 469        unsigned int cur_tx, dirty_tx;
 470        unsigned int rx_buf_sz;         /* Based on MTU+slack. */
 471        struct rhine_stats rx_stats;
 472        struct rhine_stats tx_stats;
 473        u8 wolopts;
 474
 475        u8 tx_thresh, rx_thresh;
 476
 477        struct mii_if_info mii_if;
 478        void __iomem *base;
 479};
 480
 481#define BYTE_REG_BITS_ON(x, p)      do { iowrite8((ioread8((p))|(x)), (p)); } while (0)
 482#define WORD_REG_BITS_ON(x, p)      do { iowrite16((ioread16((p))|(x)), (p)); } while (0)
 483#define DWORD_REG_BITS_ON(x, p)     do { iowrite32((ioread32((p))|(x)), (p)); } while (0)
 484
 485#define BYTE_REG_BITS_IS_ON(x, p)   (ioread8((p)) & (x))
 486#define WORD_REG_BITS_IS_ON(x, p)   (ioread16((p)) & (x))
 487#define DWORD_REG_BITS_IS_ON(x, p)  (ioread32((p)) & (x))
 488
 489#define BYTE_REG_BITS_OFF(x, p)     do { iowrite8(ioread8((p)) & (~(x)), (p)); } while (0)
 490#define WORD_REG_BITS_OFF(x, p)     do { iowrite16(ioread16((p)) & (~(x)), (p)); } while (0)
 491#define DWORD_REG_BITS_OFF(x, p)    do { iowrite32(ioread32((p)) & (~(x)), (p)); } while (0)
 492
 493#define BYTE_REG_BITS_SET(x, m, p)   do { iowrite8((ioread8((p)) & (~(m)))|(x), (p)); } while (0)
 494#define WORD_REG_BITS_SET(x, m, p)   do { iowrite16((ioread16((p)) & (~(m)))|(x), (p)); } while (0)
 495#define DWORD_REG_BITS_SET(x, m, p)  do { iowrite32((ioread32((p)) & (~(m)))|(x), (p)); } while (0)
 496
 497
 498static int  mdio_read(struct net_device *dev, int phy_id, int location);
 499static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
 500static int  rhine_open(struct net_device *dev);
 501static void rhine_reset_task(struct work_struct *work);
 502static void rhine_slow_event_task(struct work_struct *work);
 503static void rhine_tx_timeout(struct net_device *dev, unsigned int txqueue);
 504static netdev_tx_t rhine_start_tx(struct sk_buff *skb,
 505                                  struct net_device *dev);
 506static irqreturn_t rhine_interrupt(int irq, void *dev_instance);
 507static void rhine_tx(struct net_device *dev);
 508static int rhine_rx(struct net_device *dev, int limit);
 509static void rhine_set_rx_mode(struct net_device *dev);
 510static void rhine_get_stats64(struct net_device *dev,
 511                              struct rtnl_link_stats64 *stats);
 512static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
 513static const struct ethtool_ops netdev_ethtool_ops;
 514static int  rhine_close(struct net_device *dev);
 515static int rhine_vlan_rx_add_vid(struct net_device *dev,
 516                                 __be16 proto, u16 vid);
 517static int rhine_vlan_rx_kill_vid(struct net_device *dev,
 518                                  __be16 proto, u16 vid);
 519static void rhine_restart_tx(struct net_device *dev);
 520
 521static void rhine_wait_bit(struct rhine_private *rp, u8 reg, u8 mask, bool low)
 522{
 523        void __iomem *ioaddr = rp->base;
 524        int i;
 525
 526        for (i = 0; i < 1024; i++) {
 527                bool has_mask_bits = !!(ioread8(ioaddr + reg) & mask);
 528
 529                if (low ^ has_mask_bits)
 530                        break;
 531                udelay(10);
 532        }
 533        if (i > 64) {
 534                netif_dbg(rp, hw, rp->dev, "%s bit wait (%02x/%02x) cycle "
 535                          "count: %04d\n", low ? "low" : "high", reg, mask, i);
 536        }
 537}
 538
 539static void rhine_wait_bit_high(struct rhine_private *rp, u8 reg, u8 mask)
 540{
 541        rhine_wait_bit(rp, reg, mask, false);
 542}
 543
 544static void rhine_wait_bit_low(struct rhine_private *rp, u8 reg, u8 mask)
 545{
 546        rhine_wait_bit(rp, reg, mask, true);
 547}
 548
 549static u32 rhine_get_events(struct rhine_private *rp)
 550{
 551        void __iomem *ioaddr = rp->base;
 552        u32 intr_status;
 553
 554        intr_status = ioread16(ioaddr + IntrStatus);
 555        /* On Rhine-II, Bit 3 indicates Tx descriptor write-back race. */
 556        if (rp->quirks & rqStatusWBRace)
 557                intr_status |= ioread8(ioaddr + IntrStatus2) << 16;
 558        return intr_status;
 559}
 560
 561static void rhine_ack_events(struct rhine_private *rp, u32 mask)
 562{
 563        void __iomem *ioaddr = rp->base;
 564
 565        if (rp->quirks & rqStatusWBRace)
 566                iowrite8(mask >> 16, ioaddr + IntrStatus2);
 567        iowrite16(mask, ioaddr + IntrStatus);
 568}
 569
 570/*
 571 * Get power related registers into sane state.
 572 * Notify user about past WOL event.
 573 */
 574static void rhine_power_init(struct net_device *dev)
 575{
 576        struct rhine_private *rp = netdev_priv(dev);
 577        void __iomem *ioaddr = rp->base;
 578        u16 wolstat;
 579
 580        if (rp->quirks & rqWOL) {
 581                /* Make sure chip is in power state D0 */
 582                iowrite8(ioread8(ioaddr + StickyHW) & 0xFC, ioaddr + StickyHW);
 583
 584                /* Disable "force PME-enable" */
 585                iowrite8(0x80, ioaddr + WOLcgClr);
 586
 587                /* Clear power-event config bits (WOL) */
 588                iowrite8(0xFF, ioaddr + WOLcrClr);
 589                /* More recent cards can manage two additional patterns */
 590                if (rp->quirks & rq6patterns)
 591                        iowrite8(0x03, ioaddr + WOLcrClr1);
 592
 593                /* Save power-event status bits */
 594                wolstat = ioread8(ioaddr + PwrcsrSet);
 595                if (rp->quirks & rq6patterns)
 596                        wolstat |= (ioread8(ioaddr + PwrcsrSet1) & 0x03) << 8;
 597
 598                /* Clear power-event status bits */
 599                iowrite8(0xFF, ioaddr + PwrcsrClr);
 600                if (rp->quirks & rq6patterns)
 601                        iowrite8(0x03, ioaddr + PwrcsrClr1);
 602
 603                if (wolstat) {
 604                        char *reason;
 605                        switch (wolstat) {
 606                        case WOLmagic:
 607                                reason = "Magic packet";
 608                                break;
 609                        case WOLlnkon:
 610                                reason = "Link went up";
 611                                break;
 612                        case WOLlnkoff:
 613                                reason = "Link went down";
 614                                break;
 615                        case WOLucast:
 616                                reason = "Unicast packet";
 617                                break;
 618                        case WOLbmcast:
 619                                reason = "Multicast/broadcast packet";
 620                                break;
 621                        default:
 622                                reason = "Unknown";
 623                        }
 624                        netdev_info(dev, "Woke system up. Reason: %s\n",
 625                                    reason);
 626                }
 627        }
 628}
 629
 630static void rhine_chip_reset(struct net_device *dev)
 631{
 632        struct rhine_private *rp = netdev_priv(dev);
 633        void __iomem *ioaddr = rp->base;
 634        u8 cmd1;
 635
 636        iowrite8(Cmd1Reset, ioaddr + ChipCmd1);
 637        IOSYNC;
 638
 639        if (ioread8(ioaddr + ChipCmd1) & Cmd1Reset) {
 640                netdev_info(dev, "Reset not complete yet. Trying harder.\n");
 641
 642                /* Force reset */
 643                if (rp->quirks & rqForceReset)
 644                        iowrite8(0x40, ioaddr + MiscCmd);
 645
 646                /* Reset can take somewhat longer (rare) */
 647                rhine_wait_bit_low(rp, ChipCmd1, Cmd1Reset);
 648        }
 649
 650        cmd1 = ioread8(ioaddr + ChipCmd1);
 651        netif_info(rp, hw, dev, "Reset %s\n", (cmd1 & Cmd1Reset) ?
 652                   "failed" : "succeeded");
 653}
 654
 655static void enable_mmio(long pioaddr, u32 quirks)
 656{
 657        int n;
 658
 659        if (quirks & rqNeedEnMMIO) {
 660                if (quirks & rqRhineI) {
 661                        /* More recent docs say that this bit is reserved */
 662                        n = inb(pioaddr + ConfigA) | 0x20;
 663                        outb(n, pioaddr + ConfigA);
 664                } else {
 665                        n = inb(pioaddr + ConfigD) | 0x80;
 666                        outb(n, pioaddr + ConfigD);
 667                }
 668        }
 669}
 670
 671static inline int verify_mmio(struct device *hwdev,
 672                              long pioaddr,
 673                              void __iomem *ioaddr,
 674                              u32 quirks)
 675{
 676        if (quirks & rqNeedEnMMIO) {
 677                int i = 0;
 678
 679                /* Check that selected MMIO registers match the PIO ones */
 680                while (mmio_verify_registers[i]) {
 681                        int reg = mmio_verify_registers[i++];
 682                        unsigned char a = inb(pioaddr+reg);
 683                        unsigned char b = readb(ioaddr+reg);
 684
 685                        if (a != b) {
 686                                dev_err(hwdev,
 687                                        "MMIO do not match PIO [%02x] (%02x != %02x)\n",
 688                                        reg, a, b);
 689                                return -EIO;
 690                        }
 691                }
 692        }
 693        return 0;
 694}
 695
 696/*
 697 * Loads bytes 0x00-0x05, 0x6E-0x6F, 0x78-0x7B from EEPROM
 698 * (plus 0x6C for Rhine-I/II)
 699 */
 700static void rhine_reload_eeprom(long pioaddr, struct net_device *dev)
 701{
 702        struct rhine_private *rp = netdev_priv(dev);
 703        void __iomem *ioaddr = rp->base;
 704        int i;
 705
 706        outb(0x20, pioaddr + MACRegEEcsr);
 707        for (i = 0; i < 1024; i++) {
 708                if (!(inb(pioaddr + MACRegEEcsr) & 0x20))
 709                        break;
 710        }
 711        if (i > 512)
 712                pr_info("%4d cycles used @ %s:%d\n", i, __func__, __LINE__);
 713
 714        /*
 715         * Reloading from EEPROM overwrites ConfigA-D, so we must re-enable
 716         * MMIO. If reloading EEPROM was done first this could be avoided, but
 717         * it is not known if that still works with the "win98-reboot" problem.
 718         */
 719        enable_mmio(pioaddr, rp->quirks);
 720
 721        /* Turn off EEPROM-controlled wake-up (magic packet) */
 722        if (rp->quirks & rqWOL)
 723                iowrite8(ioread8(ioaddr + ConfigA) & 0xFC, ioaddr + ConfigA);
 724
 725}
 726
 727#ifdef CONFIG_NET_POLL_CONTROLLER
 728static void rhine_poll(struct net_device *dev)
 729{
 730        struct rhine_private *rp = netdev_priv(dev);
 731        const int irq = rp->irq;
 732
 733        disable_irq(irq);
 734        rhine_interrupt(irq, dev);
 735        enable_irq(irq);
 736}
 737#endif
 738
 739static void rhine_kick_tx_threshold(struct rhine_private *rp)
 740{
 741        if (rp->tx_thresh < 0xe0) {
 742                void __iomem *ioaddr = rp->base;
 743
 744                rp->tx_thresh += 0x20;
 745                BYTE_REG_BITS_SET(rp->tx_thresh, 0x80, ioaddr + TxConfig);
 746        }
 747}
 748
 749static void rhine_tx_err(struct rhine_private *rp, u32 status)
 750{
 751        struct net_device *dev = rp->dev;
 752
 753        if (status & IntrTxAborted) {
 754                netif_info(rp, tx_err, dev,
 755                           "Abort %08x, frame dropped\n", status);
 756        }
 757
 758        if (status & IntrTxUnderrun) {
 759                rhine_kick_tx_threshold(rp);
 760                netif_info(rp, tx_err ,dev, "Transmitter underrun, "
 761                           "Tx threshold now %02x\n", rp->tx_thresh);
 762        }
 763
 764        if (status & IntrTxDescRace)
 765                netif_info(rp, tx_err, dev, "Tx descriptor write-back race\n");
 766
 767        if ((status & IntrTxError) &&
 768            (status & (IntrTxAborted | IntrTxUnderrun | IntrTxDescRace)) == 0) {
 769                rhine_kick_tx_threshold(rp);
 770                netif_info(rp, tx_err, dev, "Unspecified error. "
 771                           "Tx threshold now %02x\n", rp->tx_thresh);
 772        }
 773
 774        rhine_restart_tx(dev);
 775}
 776
 777static void rhine_update_rx_crc_and_missed_errord(struct rhine_private *rp)
 778{
 779        void __iomem *ioaddr = rp->base;
 780        struct net_device_stats *stats = &rp->dev->stats;
 781
 782        stats->rx_crc_errors    += ioread16(ioaddr + RxCRCErrs);
 783        stats->rx_missed_errors += ioread16(ioaddr + RxMissed);
 784
 785        /*
 786         * Clears the "tally counters" for CRC errors and missed frames(?).
 787         * It has been reported that some chips need a write of 0 to clear
 788         * these, for others the counters are set to 1 when written to and
 789         * instead cleared when read. So we clear them both ways ...
 790         */
 791        iowrite32(0, ioaddr + RxMissed);
 792        ioread16(ioaddr + RxCRCErrs);
 793        ioread16(ioaddr + RxMissed);
 794}
 795
 796#define RHINE_EVENT_NAPI_RX     (IntrRxDone | \
 797                                 IntrRxErr | \
 798                                 IntrRxEmpty | \
 799                                 IntrRxOverflow | \
 800                                 IntrRxDropped | \
 801                                 IntrRxNoBuf | \
 802                                 IntrRxWakeUp)
 803
 804#define RHINE_EVENT_NAPI_TX_ERR (IntrTxError | \
 805                                 IntrTxAborted | \
 806                                 IntrTxUnderrun | \
 807                                 IntrTxDescRace)
 808#define RHINE_EVENT_NAPI_TX     (IntrTxDone | RHINE_EVENT_NAPI_TX_ERR)
 809
 810#define RHINE_EVENT_NAPI        (RHINE_EVENT_NAPI_RX | \
 811                                 RHINE_EVENT_NAPI_TX | \
 812                                 IntrStatsMax)
 813#define RHINE_EVENT_SLOW        (IntrPCIErr | IntrLinkChange)
 814#define RHINE_EVENT             (RHINE_EVENT_NAPI | RHINE_EVENT_SLOW)
 815
 816static int rhine_napipoll(struct napi_struct *napi, int budget)
 817{
 818        struct rhine_private *rp = container_of(napi, struct rhine_private, napi);
 819        struct net_device *dev = rp->dev;
 820        void __iomem *ioaddr = rp->base;
 821        u16 enable_mask = RHINE_EVENT & 0xffff;
 822        int work_done = 0;
 823        u32 status;
 824
 825        status = rhine_get_events(rp);
 826        rhine_ack_events(rp, status & ~RHINE_EVENT_SLOW);
 827
 828        if (status & RHINE_EVENT_NAPI_RX)
 829                work_done += rhine_rx(dev, budget);
 830
 831        if (status & RHINE_EVENT_NAPI_TX) {
 832                if (status & RHINE_EVENT_NAPI_TX_ERR) {
 833                        /* Avoid scavenging before Tx engine turned off */
 834                        rhine_wait_bit_low(rp, ChipCmd, CmdTxOn);
 835                        if (ioread8(ioaddr + ChipCmd) & CmdTxOn)
 836                                netif_warn(rp, tx_err, dev, "Tx still on\n");
 837                }
 838
 839                rhine_tx(dev);
 840
 841                if (status & RHINE_EVENT_NAPI_TX_ERR)
 842                        rhine_tx_err(rp, status);
 843        }
 844
 845        if (status & IntrStatsMax) {
 846                spin_lock(&rp->lock);
 847                rhine_update_rx_crc_and_missed_errord(rp);
 848                spin_unlock(&rp->lock);
 849        }
 850
 851        if (status & RHINE_EVENT_SLOW) {
 852                enable_mask &= ~RHINE_EVENT_SLOW;
 853                schedule_work(&rp->slow_event_task);
 854        }
 855
 856        if (work_done < budget) {
 857                napi_complete_done(napi, work_done);
 858                iowrite16(enable_mask, ioaddr + IntrEnable);
 859        }
 860        return work_done;
 861}
 862
 863static void rhine_hw_init(struct net_device *dev, long pioaddr)
 864{
 865        struct rhine_private *rp = netdev_priv(dev);
 866
 867        /* Reset the chip to erase previous misconfiguration. */
 868        rhine_chip_reset(dev);
 869
 870        /* Rhine-I needs extra time to recuperate before EEPROM reload */
 871        if (rp->quirks & rqRhineI)
 872                msleep(5);
 873
 874        /* Reload EEPROM controlled bytes cleared by soft reset */
 875        if (dev_is_pci(dev->dev.parent))
 876                rhine_reload_eeprom(pioaddr, dev);
 877}
 878
 879static const struct net_device_ops rhine_netdev_ops = {
 880        .ndo_open                = rhine_open,
 881        .ndo_stop                = rhine_close,
 882        .ndo_start_xmit          = rhine_start_tx,
 883        .ndo_get_stats64         = rhine_get_stats64,
 884        .ndo_set_rx_mode         = rhine_set_rx_mode,
 885        .ndo_validate_addr       = eth_validate_addr,
 886        .ndo_set_mac_address     = eth_mac_addr,
 887        .ndo_eth_ioctl           = netdev_ioctl,
 888        .ndo_tx_timeout          = rhine_tx_timeout,
 889        .ndo_vlan_rx_add_vid     = rhine_vlan_rx_add_vid,
 890        .ndo_vlan_rx_kill_vid    = rhine_vlan_rx_kill_vid,
 891#ifdef CONFIG_NET_POLL_CONTROLLER
 892        .ndo_poll_controller     = rhine_poll,
 893#endif
 894};
 895
 896static int rhine_init_one_common(struct device *hwdev, u32 quirks,
 897                                 long pioaddr, void __iomem *ioaddr, int irq)
 898{
 899        struct net_device *dev;
 900        struct rhine_private *rp;
 901        int i, rc, phy_id;
 902        const char *name;
 903
 904        /* this should always be supported */
 905        rc = dma_set_mask(hwdev, DMA_BIT_MASK(32));
 906        if (rc) {
 907                dev_err(hwdev, "32-bit DMA addresses not supported by the card!?\n");
 908                goto err_out;
 909        }
 910
 911        dev = alloc_etherdev(sizeof(struct rhine_private));
 912        if (!dev) {
 913                rc = -ENOMEM;
 914                goto err_out;
 915        }
 916        SET_NETDEV_DEV(dev, hwdev);
 917
 918        rp = netdev_priv(dev);
 919        rp->dev = dev;
 920        rp->quirks = quirks;
 921        rp->pioaddr = pioaddr;
 922        rp->base = ioaddr;
 923        rp->irq = irq;
 924        rp->msg_enable = netif_msg_init(debug, RHINE_MSG_DEFAULT);
 925
 926        phy_id = rp->quirks & rqIntPHY ? 1 : 0;
 927
 928        u64_stats_init(&rp->tx_stats.syncp);
 929        u64_stats_init(&rp->rx_stats.syncp);
 930
 931        /* Get chip registers into a sane state */
 932        rhine_power_init(dev);
 933        rhine_hw_init(dev, pioaddr);
 934
 935        for (i = 0; i < 6; i++)
 936                dev->dev_addr[i] = ioread8(ioaddr + StationAddr + i);
 937
 938        if (!is_valid_ether_addr(dev->dev_addr)) {
 939                /* Report it and use a random ethernet address instead */
 940                netdev_err(dev, "Invalid MAC address: %pM\n", dev->dev_addr);
 941                eth_hw_addr_random(dev);
 942                netdev_info(dev, "Using random MAC address: %pM\n",
 943                            dev->dev_addr);
 944        }
 945
 946        /* For Rhine-I/II, phy_id is loaded from EEPROM */
 947        if (!phy_id)
 948                phy_id = ioread8(ioaddr + 0x6C);
 949
 950        spin_lock_init(&rp->lock);
 951        mutex_init(&rp->task_lock);
 952        INIT_WORK(&rp->reset_task, rhine_reset_task);
 953        INIT_WORK(&rp->slow_event_task, rhine_slow_event_task);
 954
 955        rp->mii_if.dev = dev;
 956        rp->mii_if.mdio_read = mdio_read;
 957        rp->mii_if.mdio_write = mdio_write;
 958        rp->mii_if.phy_id_mask = 0x1f;
 959        rp->mii_if.reg_num_mask = 0x1f;
 960
 961        /* The chip-specific entries in the device structure. */
 962        dev->netdev_ops = &rhine_netdev_ops;
 963        dev->ethtool_ops = &netdev_ethtool_ops;
 964        dev->watchdog_timeo = TX_TIMEOUT;
 965
 966        netif_napi_add(dev, &rp->napi, rhine_napipoll, 64);
 967
 968        if (rp->quirks & rqRhineI)
 969                dev->features |= NETIF_F_SG|NETIF_F_HW_CSUM;
 970
 971        if (rp->quirks & rqMgmt)
 972                dev->features |= NETIF_F_HW_VLAN_CTAG_TX |
 973                                 NETIF_F_HW_VLAN_CTAG_RX |
 974                                 NETIF_F_HW_VLAN_CTAG_FILTER;
 975
 976        /* dev->name not defined before register_netdev()! */
 977        rc = register_netdev(dev);
 978        if (rc)
 979                goto err_out_free_netdev;
 980
 981        if (rp->quirks & rqRhineI)
 982                name = "Rhine";
 983        else if (rp->quirks & rqStatusWBRace)
 984                name = "Rhine II";
 985        else if (rp->quirks & rqMgmt)
 986                name = "Rhine III (Management Adapter)";
 987        else
 988                name = "Rhine III";
 989
 990        netdev_info(dev, "VIA %s at %p, %pM, IRQ %d\n",
 991                    name, ioaddr, dev->dev_addr, rp->irq);
 992
 993        dev_set_drvdata(hwdev, dev);
 994
 995        {
 996                u16 mii_cmd;
 997                int mii_status = mdio_read(dev, phy_id, 1);
 998                mii_cmd = mdio_read(dev, phy_id, MII_BMCR) & ~BMCR_ISOLATE;
 999                mdio_write(dev, phy_id, MII_BMCR, mii_cmd);
1000                if (mii_status != 0xffff && mii_status != 0x0000) {
1001                        rp->mii_if.advertising = mdio_read(dev, phy_id, 4);
1002                        netdev_info(dev,
1003                                    "MII PHY found at address %d, status 0x%04x advertising %04x Link %04x\n",
1004                                    phy_id,
1005                                    mii_status, rp->mii_if.advertising,
1006                                    mdio_read(dev, phy_id, 5));
1007
1008                        /* set IFF_RUNNING */
1009                        if (mii_status & BMSR_LSTATUS)
1010                                netif_carrier_on(dev);
1011                        else
1012                                netif_carrier_off(dev);
1013
1014                }
1015        }
1016        rp->mii_if.phy_id = phy_id;
1017        if (avoid_D3)
1018                netif_info(rp, probe, dev, "No D3 power state at shutdown\n");
1019
1020        return 0;
1021
1022err_out_free_netdev:
1023        free_netdev(dev);
1024err_out:
1025        return rc;
1026}
1027
1028static int rhine_init_one_pci(struct pci_dev *pdev,
1029                              const struct pci_device_id *ent)
1030{
1031        struct device *hwdev = &pdev->dev;
1032        int rc;
1033        long pioaddr, memaddr;
1034        void __iomem *ioaddr;
1035        int io_size = pdev->revision < VTunknown0 ? 128 : 256;
1036
1037/* This driver was written to use PCI memory space. Some early versions
1038 * of the Rhine may only work correctly with I/O space accesses.
1039 * TODO: determine for which revisions this is true and assign the flag
1040 *       in code as opposed to this Kconfig option (???)
1041 */
1042#ifdef CONFIG_VIA_RHINE_MMIO
1043        u32 quirks = rqNeedEnMMIO;
1044#else
1045        u32 quirks = 0;
1046#endif
1047
1048        rc = pci_enable_device(pdev);
1049        if (rc)
1050                goto err_out;
1051
1052        if (pdev->revision < VTunknown0) {
1053                quirks |= rqRhineI;
1054        } else if (pdev->revision >= VT6102) {
1055                quirks |= rqWOL | rqForceReset;
1056                if (pdev->revision < VT6105) {
1057                        quirks |= rqStatusWBRace;
1058                } else {
1059                        quirks |= rqIntPHY;
1060                        if (pdev->revision >= VT6105_B0)
1061                                quirks |= rq6patterns;
1062                        if (pdev->revision >= VT6105M)
1063                                quirks |= rqMgmt;
1064                }
1065        }
1066
1067        /* sanity check */
1068        if ((pci_resource_len(pdev, 0) < io_size) ||
1069            (pci_resource_len(pdev, 1) < io_size)) {
1070                rc = -EIO;
1071                dev_err(hwdev, "Insufficient PCI resources, aborting\n");
1072                goto err_out_pci_disable;
1073        }
1074
1075        pioaddr = pci_resource_start(pdev, 0);
1076        memaddr = pci_resource_start(pdev, 1);
1077
1078        pci_set_master(pdev);
1079
1080        rc = pci_request_regions(pdev, DRV_NAME);
1081        if (rc)
1082                goto err_out_pci_disable;
1083
1084        ioaddr = pci_iomap(pdev, (quirks & rqNeedEnMMIO ? 1 : 0), io_size);
1085        if (!ioaddr) {
1086                rc = -EIO;
1087                dev_err(hwdev,
1088                        "ioremap failed for device %s, region 0x%X @ 0x%lX\n",
1089                        dev_name(hwdev), io_size, memaddr);
1090                goto err_out_free_res;
1091        }
1092
1093        enable_mmio(pioaddr, quirks);
1094
1095        rc = verify_mmio(hwdev, pioaddr, ioaddr, quirks);
1096        if (rc)
1097                goto err_out_unmap;
1098
1099        rc = rhine_init_one_common(&pdev->dev, quirks,
1100                                   pioaddr, ioaddr, pdev->irq);
1101        if (!rc)
1102                return 0;
1103
1104err_out_unmap:
1105        pci_iounmap(pdev, ioaddr);
1106err_out_free_res:
1107        pci_release_regions(pdev);
1108err_out_pci_disable:
1109        pci_disable_device(pdev);
1110err_out:
1111        return rc;
1112}
1113
1114static int rhine_init_one_platform(struct platform_device *pdev)
1115{
1116        const u32 *quirks;
1117        int irq;
1118        void __iomem *ioaddr;
1119
1120        quirks = of_device_get_match_data(&pdev->dev);
1121        if (!quirks)
1122                return -EINVAL;
1123
1124        ioaddr = devm_platform_ioremap_resource(pdev, 0);
1125        if (IS_ERR(ioaddr))
1126                return PTR_ERR(ioaddr);
1127
1128        irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
1129        if (!irq)
1130                return -EINVAL;
1131
1132        return rhine_init_one_common(&pdev->dev, *quirks,
1133                                     (long)ioaddr, ioaddr, irq);
1134}
1135
1136static int alloc_ring(struct net_device* dev)
1137{
1138        struct rhine_private *rp = netdev_priv(dev);
1139        struct device *hwdev = dev->dev.parent;
1140        void *ring;
1141        dma_addr_t ring_dma;
1142
1143        ring = dma_alloc_coherent(hwdev,
1144                                  RX_RING_SIZE * sizeof(struct rx_desc) +
1145                                  TX_RING_SIZE * sizeof(struct tx_desc),
1146                                  &ring_dma,
1147                                  GFP_ATOMIC);
1148        if (!ring) {
1149                netdev_err(dev, "Could not allocate DMA memory\n");
1150                return -ENOMEM;
1151        }
1152        if (rp->quirks & rqRhineI) {
1153                rp->tx_bufs = dma_alloc_coherent(hwdev,
1154                                                 PKT_BUF_SZ * TX_RING_SIZE,
1155                                                 &rp->tx_bufs_dma,
1156                                                 GFP_ATOMIC);
1157                if (rp->tx_bufs == NULL) {
1158                        dma_free_coherent(hwdev,
1159                                          RX_RING_SIZE * sizeof(struct rx_desc) +
1160                                          TX_RING_SIZE * sizeof(struct tx_desc),
1161                                          ring, ring_dma);
1162                        return -ENOMEM;
1163                }
1164        }
1165
1166        rp->rx_ring = ring;
1167        rp->tx_ring = ring + RX_RING_SIZE * sizeof(struct rx_desc);
1168        rp->rx_ring_dma = ring_dma;
1169        rp->tx_ring_dma = ring_dma + RX_RING_SIZE * sizeof(struct rx_desc);
1170
1171        return 0;
1172}
1173
1174static void free_ring(struct net_device* dev)
1175{
1176        struct rhine_private *rp = netdev_priv(dev);
1177        struct device *hwdev = dev->dev.parent;
1178
1179        dma_free_coherent(hwdev,
1180                          RX_RING_SIZE * sizeof(struct rx_desc) +
1181                          TX_RING_SIZE * sizeof(struct tx_desc),
1182                          rp->rx_ring, rp->rx_ring_dma);
1183        rp->tx_ring = NULL;
1184
1185        if (rp->tx_bufs)
1186                dma_free_coherent(hwdev, PKT_BUF_SZ * TX_RING_SIZE,
1187                                  rp->tx_bufs, rp->tx_bufs_dma);
1188
1189        rp->tx_bufs = NULL;
1190
1191}
1192
1193struct rhine_skb_dma {
1194        struct sk_buff *skb;
1195        dma_addr_t dma;
1196};
1197
1198static inline int rhine_skb_dma_init(struct net_device *dev,
1199                                     struct rhine_skb_dma *sd)
1200{
1201        struct rhine_private *rp = netdev_priv(dev);
1202        struct device *hwdev = dev->dev.parent;
1203        const int size = rp->rx_buf_sz;
1204
1205        sd->skb = netdev_alloc_skb(dev, size);
1206        if (!sd->skb)
1207                return -ENOMEM;
1208
1209        sd->dma = dma_map_single(hwdev, sd->skb->data, size, DMA_FROM_DEVICE);
1210        if (unlikely(dma_mapping_error(hwdev, sd->dma))) {
1211                netif_err(rp, drv, dev, "Rx DMA mapping failure\n");
1212                dev_kfree_skb_any(sd->skb);
1213                return -EIO;
1214        }
1215
1216        return 0;
1217}
1218
1219static void rhine_reset_rbufs(struct rhine_private *rp)
1220{
1221        int i;
1222
1223        rp->cur_rx = 0;
1224
1225        for (i = 0; i < RX_RING_SIZE; i++)
1226                rp->rx_ring[i].rx_status = cpu_to_le32(DescOwn);
1227}
1228
1229static inline void rhine_skb_dma_nic_store(struct rhine_private *rp,
1230                                           struct rhine_skb_dma *sd, int entry)
1231{
1232        rp->rx_skbuff_dma[entry] = sd->dma;
1233        rp->rx_skbuff[entry] = sd->skb;
1234
1235        rp->rx_ring[entry].addr = cpu_to_le32(sd->dma);
1236        dma_wmb();
1237}
1238
1239static void free_rbufs(struct net_device* dev);
1240
1241static int alloc_rbufs(struct net_device *dev)
1242{
1243        struct rhine_private *rp = netdev_priv(dev);
1244        dma_addr_t next;
1245        int rc, i;
1246
1247        rp->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
1248        next = rp->rx_ring_dma;
1249
1250        /* Init the ring entries */
1251        for (i = 0; i < RX_RING_SIZE; i++) {
1252                rp->rx_ring[i].rx_status = 0;
1253                rp->rx_ring[i].desc_length = cpu_to_le32(rp->rx_buf_sz);
1254                next += sizeof(struct rx_desc);
1255                rp->rx_ring[i].next_desc = cpu_to_le32(next);
1256                rp->rx_skbuff[i] = NULL;
1257        }
1258        /* Mark the last entry as wrapping the ring. */
1259        rp->rx_ring[i-1].next_desc = cpu_to_le32(rp->rx_ring_dma);
1260
1261        /* Fill in the Rx buffers.  Handle allocation failure gracefully. */
1262        for (i = 0; i < RX_RING_SIZE; i++) {
1263                struct rhine_skb_dma sd;
1264
1265                rc = rhine_skb_dma_init(dev, &sd);
1266                if (rc < 0) {
1267                        free_rbufs(dev);
1268                        goto out;
1269                }
1270
1271                rhine_skb_dma_nic_store(rp, &sd, i);
1272        }
1273
1274        rhine_reset_rbufs(rp);
1275out:
1276        return rc;
1277}
1278
1279static void free_rbufs(struct net_device* dev)
1280{
1281        struct rhine_private *rp = netdev_priv(dev);
1282        struct device *hwdev = dev->dev.parent;
1283        int i;
1284
1285        /* Free all the skbuffs in the Rx queue. */
1286        for (i = 0; i < RX_RING_SIZE; i++) {
1287                rp->rx_ring[i].rx_status = 0;
1288                rp->rx_ring[i].addr = cpu_to_le32(0xBADF00D0); /* An invalid address. */
1289                if (rp->rx_skbuff[i]) {
1290                        dma_unmap_single(hwdev,
1291                                         rp->rx_skbuff_dma[i],
1292                                         rp->rx_buf_sz, DMA_FROM_DEVICE);
1293                        dev_kfree_skb(rp->rx_skbuff[i]);
1294                }
1295                rp->rx_skbuff[i] = NULL;
1296        }
1297}
1298
1299static void alloc_tbufs(struct net_device* dev)
1300{
1301        struct rhine_private *rp = netdev_priv(dev);
1302        dma_addr_t next;
1303        int i;
1304
1305        rp->dirty_tx = rp->cur_tx = 0;
1306        next = rp->tx_ring_dma;
1307        for (i = 0; i < TX_RING_SIZE; i++) {
1308                rp->tx_skbuff[i] = NULL;
1309                rp->tx_ring[i].tx_status = 0;
1310                rp->tx_ring[i].desc_length = cpu_to_le32(TXDESC);
1311                next += sizeof(struct tx_desc);
1312                rp->tx_ring[i].next_desc = cpu_to_le32(next);
1313                if (rp->quirks & rqRhineI)
1314                        rp->tx_buf[i] = &rp->tx_bufs[i * PKT_BUF_SZ];
1315        }
1316        rp->tx_ring[i-1].next_desc = cpu_to_le32(rp->tx_ring_dma);
1317
1318        netdev_reset_queue(dev);
1319}
1320
1321static void free_tbufs(struct net_device* dev)
1322{
1323        struct rhine_private *rp = netdev_priv(dev);
1324        struct device *hwdev = dev->dev.parent;
1325        int i;
1326
1327        for (i = 0; i < TX_RING_SIZE; i++) {
1328                rp->tx_ring[i].tx_status = 0;
1329                rp->tx_ring[i].desc_length = cpu_to_le32(TXDESC);
1330                rp->tx_ring[i].addr = cpu_to_le32(0xBADF00D0); /* An invalid address. */
1331                if (rp->tx_skbuff[i]) {
1332                        if (rp->tx_skbuff_dma[i]) {
1333                                dma_unmap_single(hwdev,
1334                                                 rp->tx_skbuff_dma[i],
1335                                                 rp->tx_skbuff[i]->len,
1336                                                 DMA_TO_DEVICE);
1337                        }
1338                        dev_kfree_skb(rp->tx_skbuff[i]);
1339                }
1340                rp->tx_skbuff[i] = NULL;
1341                rp->tx_buf[i] = NULL;
1342        }
1343}
1344
1345static void rhine_check_media(struct net_device *dev, unsigned int init_media)
1346{
1347        struct rhine_private *rp = netdev_priv(dev);
1348        void __iomem *ioaddr = rp->base;
1349
1350        if (!rp->mii_if.force_media)
1351                mii_check_media(&rp->mii_if, netif_msg_link(rp), init_media);
1352
1353        if (rp->mii_if.full_duplex)
1354            iowrite8(ioread8(ioaddr + ChipCmd1) | Cmd1FDuplex,
1355                   ioaddr + ChipCmd1);
1356        else
1357            iowrite8(ioread8(ioaddr + ChipCmd1) & ~Cmd1FDuplex,
1358                   ioaddr + ChipCmd1);
1359
1360        netif_info(rp, link, dev, "force_media %d, carrier %d\n",
1361                   rp->mii_if.force_media, netif_carrier_ok(dev));
1362}
1363
1364/* Called after status of force_media possibly changed */
1365static void rhine_set_carrier(struct mii_if_info *mii)
1366{
1367        struct net_device *dev = mii->dev;
1368        struct rhine_private *rp = netdev_priv(dev);
1369
1370        if (mii->force_media) {
1371                /* autoneg is off: Link is always assumed to be up */
1372                if (!netif_carrier_ok(dev))
1373                        netif_carrier_on(dev);
1374        }
1375
1376        rhine_check_media(dev, 0);
1377
1378        netif_info(rp, link, dev, "force_media %d, carrier %d\n",
1379                   mii->force_media, netif_carrier_ok(dev));
1380}
1381
1382/**
1383 * rhine_set_cam - set CAM multicast filters
1384 * @ioaddr: register block of this Rhine
1385 * @idx: multicast CAM index [0..MCAM_SIZE-1]
1386 * @addr: multicast address (6 bytes)
1387 *
1388 * Load addresses into multicast filters.
1389 */
1390static void rhine_set_cam(void __iomem *ioaddr, int idx, u8 *addr)
1391{
1392        int i;
1393
1394        iowrite8(CAMC_CAMEN, ioaddr + CamCon);
1395        wmb();
1396
1397        /* Paranoid -- idx out of range should never happen */
1398        idx &= (MCAM_SIZE - 1);
1399
1400        iowrite8((u8) idx, ioaddr + CamAddr);
1401
1402        for (i = 0; i < 6; i++, addr++)
1403                iowrite8(*addr, ioaddr + MulticastFilter0 + i);
1404        udelay(10);
1405        wmb();
1406
1407        iowrite8(CAMC_CAMWR | CAMC_CAMEN, ioaddr + CamCon);
1408        udelay(10);
1409
1410        iowrite8(0, ioaddr + CamCon);
1411}
1412
1413/**
1414 * rhine_set_vlan_cam - set CAM VLAN filters
1415 * @ioaddr: register block of this Rhine
1416 * @idx: VLAN CAM index [0..VCAM_SIZE-1]
1417 * @addr: VLAN ID (2 bytes)
1418 *
1419 * Load addresses into VLAN filters.
1420 */
1421static void rhine_set_vlan_cam(void __iomem *ioaddr, int idx, u8 *addr)
1422{
1423        iowrite8(CAMC_CAMEN | CAMC_VCAMSL, ioaddr + CamCon);
1424        wmb();
1425
1426        /* Paranoid -- idx out of range should never happen */
1427        idx &= (VCAM_SIZE - 1);
1428
1429        iowrite8((u8) idx, ioaddr + CamAddr);
1430
1431        iowrite16(*((u16 *) addr), ioaddr + MulticastFilter0 + 6);
1432        udelay(10);
1433        wmb();
1434
1435        iowrite8(CAMC_CAMWR | CAMC_CAMEN, ioaddr + CamCon);
1436        udelay(10);
1437
1438        iowrite8(0, ioaddr + CamCon);
1439}
1440
1441/**
1442 * rhine_set_cam_mask - set multicast CAM mask
1443 * @ioaddr: register block of this Rhine
1444 * @mask: multicast CAM mask
1445 *
1446 * Mask sets multicast filters active/inactive.
1447 */
1448static void rhine_set_cam_mask(void __iomem *ioaddr, u32 mask)
1449{
1450        iowrite8(CAMC_CAMEN, ioaddr + CamCon);
1451        wmb();
1452
1453        /* write mask */
1454        iowrite32(mask, ioaddr + CamMask);
1455
1456        /* disable CAMEN */
1457        iowrite8(0, ioaddr + CamCon);
1458}
1459
1460/**
1461 * rhine_set_vlan_cam_mask - set VLAN CAM mask
1462 * @ioaddr: register block of this Rhine
1463 * @mask: VLAN CAM mask
1464 *
1465 * Mask sets VLAN filters active/inactive.
1466 */
1467static void rhine_set_vlan_cam_mask(void __iomem *ioaddr, u32 mask)
1468{
1469        iowrite8(CAMC_CAMEN | CAMC_VCAMSL, ioaddr + CamCon);
1470        wmb();
1471
1472        /* write mask */
1473        iowrite32(mask, ioaddr + CamMask);
1474
1475        /* disable CAMEN */
1476        iowrite8(0, ioaddr + CamCon);
1477}
1478
1479/**
1480 * rhine_init_cam_filter - initialize CAM filters
1481 * @dev: network device
1482 *
1483 * Initialize (disable) hardware VLAN and multicast support on this
1484 * Rhine.
1485 */
1486static void rhine_init_cam_filter(struct net_device *dev)
1487{
1488        struct rhine_private *rp = netdev_priv(dev);
1489        void __iomem *ioaddr = rp->base;
1490
1491        /* Disable all CAMs */
1492        rhine_set_vlan_cam_mask(ioaddr, 0);
1493        rhine_set_cam_mask(ioaddr, 0);
1494
1495        /* disable hardware VLAN support */
1496        BYTE_REG_BITS_ON(TCR_PQEN, ioaddr + TxConfig);
1497        BYTE_REG_BITS_OFF(BCR1_VIDFR, ioaddr + PCIBusConfig1);
1498}
1499
1500/**
1501 * rhine_update_vcam - update VLAN CAM filters
1502 * @dev: rhine_private data of this Rhine
1503 *
1504 * Update VLAN CAM filters to match configuration change.
1505 */
1506static void rhine_update_vcam(struct net_device *dev)
1507{
1508        struct rhine_private *rp = netdev_priv(dev);
1509        void __iomem *ioaddr = rp->base;
1510        u16 vid;
1511        u32 vCAMmask = 0;       /* 32 vCAMs (6105M and better) */
1512        unsigned int i = 0;
1513
1514        for_each_set_bit(vid, rp->active_vlans, VLAN_N_VID) {
1515                rhine_set_vlan_cam(ioaddr, i, (u8 *)&vid);
1516                vCAMmask |= 1 << i;
1517                if (++i >= VCAM_SIZE)
1518                        break;
1519        }
1520        rhine_set_vlan_cam_mask(ioaddr, vCAMmask);
1521}
1522
1523static int rhine_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
1524{
1525        struct rhine_private *rp = netdev_priv(dev);
1526
1527        spin_lock_bh(&rp->lock);
1528        set_bit(vid, rp->active_vlans);
1529        rhine_update_vcam(dev);
1530        spin_unlock_bh(&rp->lock);
1531        return 0;
1532}
1533
1534static int rhine_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
1535{
1536        struct rhine_private *rp = netdev_priv(dev);
1537
1538        spin_lock_bh(&rp->lock);
1539        clear_bit(vid, rp->active_vlans);
1540        rhine_update_vcam(dev);
1541        spin_unlock_bh(&rp->lock);
1542        return 0;
1543}
1544
1545static void init_registers(struct net_device *dev)
1546{
1547        struct rhine_private *rp = netdev_priv(dev);
1548        void __iomem *ioaddr = rp->base;
1549        int i;
1550
1551        for (i = 0; i < 6; i++)
1552                iowrite8(dev->dev_addr[i], ioaddr + StationAddr + i);
1553
1554        /* Initialize other registers. */
1555        iowrite16(0x0006, ioaddr + PCIBusConfig);       /* Tune configuration??? */
1556        /* Configure initial FIFO thresholds. */
1557        iowrite8(0x20, ioaddr + TxConfig);
1558        rp->tx_thresh = 0x20;
1559        rp->rx_thresh = 0x60;           /* Written in rhine_set_rx_mode(). */
1560
1561        iowrite32(rp->rx_ring_dma, ioaddr + RxRingPtr);
1562        iowrite32(rp->tx_ring_dma, ioaddr + TxRingPtr);
1563
1564        rhine_set_rx_mode(dev);
1565
1566        if (rp->quirks & rqMgmt)
1567                rhine_init_cam_filter(dev);
1568
1569        napi_enable(&rp->napi);
1570
1571        iowrite16(RHINE_EVENT & 0xffff, ioaddr + IntrEnable);
1572
1573        iowrite16(CmdStart | CmdTxOn | CmdRxOn | (Cmd1NoTxPoll << 8),
1574               ioaddr + ChipCmd);
1575        rhine_check_media(dev, 1);
1576}
1577
1578/* Enable MII link status auto-polling (required for IntrLinkChange) */
1579static void rhine_enable_linkmon(struct rhine_private *rp)
1580{
1581        void __iomem *ioaddr = rp->base;
1582
1583        iowrite8(0, ioaddr + MIICmd);
1584        iowrite8(MII_BMSR, ioaddr + MIIRegAddr);
1585        iowrite8(0x80, ioaddr + MIICmd);
1586
1587        rhine_wait_bit_high(rp, MIIRegAddr, 0x20);
1588
1589        iowrite8(MII_BMSR | 0x40, ioaddr + MIIRegAddr);
1590}
1591
1592/* Disable MII link status auto-polling (required for MDIO access) */
1593static void rhine_disable_linkmon(struct rhine_private *rp)
1594{
1595        void __iomem *ioaddr = rp->base;
1596
1597        iowrite8(0, ioaddr + MIICmd);
1598
1599        if (rp->quirks & rqRhineI) {
1600                iowrite8(0x01, ioaddr + MIIRegAddr);    // MII_BMSR
1601
1602                /* Can be called from ISR. Evil. */
1603                mdelay(1);
1604
1605                /* 0x80 must be set immediately before turning it off */
1606                iowrite8(0x80, ioaddr + MIICmd);
1607
1608                rhine_wait_bit_high(rp, MIIRegAddr, 0x20);
1609
1610                /* Heh. Now clear 0x80 again. */
1611                iowrite8(0, ioaddr + MIICmd);
1612        }
1613        else
1614                rhine_wait_bit_high(rp, MIIRegAddr, 0x80);
1615}
1616
1617/* Read and write over the MII Management Data I/O (MDIO) interface. */
1618
1619static int mdio_read(struct net_device *dev, int phy_id, int regnum)
1620{
1621        struct rhine_private *rp = netdev_priv(dev);
1622        void __iomem *ioaddr = rp->base;
1623        int result;
1624
1625        rhine_disable_linkmon(rp);
1626
1627        /* rhine_disable_linkmon already cleared MIICmd */
1628        iowrite8(phy_id, ioaddr + MIIPhyAddr);
1629        iowrite8(regnum, ioaddr + MIIRegAddr);
1630        iowrite8(0x40, ioaddr + MIICmd);                /* Trigger read */
1631        rhine_wait_bit_low(rp, MIICmd, 0x40);
1632        result = ioread16(ioaddr + MIIData);
1633
1634        rhine_enable_linkmon(rp);
1635        return result;
1636}
1637
1638static void mdio_write(struct net_device *dev, int phy_id, int regnum, int value)
1639{
1640        struct rhine_private *rp = netdev_priv(dev);
1641        void __iomem *ioaddr = rp->base;
1642
1643        rhine_disable_linkmon(rp);
1644
1645        /* rhine_disable_linkmon already cleared MIICmd */
1646        iowrite8(phy_id, ioaddr + MIIPhyAddr);
1647        iowrite8(regnum, ioaddr + MIIRegAddr);
1648        iowrite16(value, ioaddr + MIIData);
1649        iowrite8(0x20, ioaddr + MIICmd);                /* Trigger write */
1650        rhine_wait_bit_low(rp, MIICmd, 0x20);
1651
1652        rhine_enable_linkmon(rp);
1653}
1654
1655static void rhine_task_disable(struct rhine_private *rp)
1656{
1657        mutex_lock(&rp->task_lock);
1658        rp->task_enable = false;
1659        mutex_unlock(&rp->task_lock);
1660
1661        cancel_work_sync(&rp->slow_event_task);
1662        cancel_work_sync(&rp->reset_task);
1663}
1664
1665static void rhine_task_enable(struct rhine_private *rp)
1666{
1667        mutex_lock(&rp->task_lock);
1668        rp->task_enable = true;
1669        mutex_unlock(&rp->task_lock);
1670}
1671
1672static int rhine_open(struct net_device *dev)
1673{
1674        struct rhine_private *rp = netdev_priv(dev);
1675        void __iomem *ioaddr = rp->base;
1676        int rc;
1677
1678        rc = request_irq(rp->irq, rhine_interrupt, IRQF_SHARED, dev->name, dev);
1679        if (rc)
1680                goto out;
1681
1682        netif_dbg(rp, ifup, dev, "%s() irq %d\n", __func__, rp->irq);
1683
1684        rc = alloc_ring(dev);
1685        if (rc < 0)
1686                goto out_free_irq;
1687
1688        rc = alloc_rbufs(dev);
1689        if (rc < 0)
1690                goto out_free_ring;
1691
1692        alloc_tbufs(dev);
1693        enable_mmio(rp->pioaddr, rp->quirks);
1694        rhine_power_init(dev);
1695        rhine_chip_reset(dev);
1696        rhine_task_enable(rp);
1697        init_registers(dev);
1698
1699        netif_dbg(rp, ifup, dev, "%s() Done - status %04x MII status: %04x\n",
1700                  __func__, ioread16(ioaddr + ChipCmd),
1701                  mdio_read(dev, rp->mii_if.phy_id, MII_BMSR));
1702
1703        netif_start_queue(dev);
1704
1705out:
1706        return rc;
1707
1708out_free_ring:
1709        free_ring(dev);
1710out_free_irq:
1711        free_irq(rp->irq, dev);
1712        goto out;
1713}
1714
1715static void rhine_reset_task(struct work_struct *work)
1716{
1717        struct rhine_private *rp = container_of(work, struct rhine_private,
1718                                                reset_task);
1719        struct net_device *dev = rp->dev;
1720
1721        mutex_lock(&rp->task_lock);
1722
1723        if (!rp->task_enable)
1724                goto out_unlock;
1725
1726        napi_disable(&rp->napi);
1727        netif_tx_disable(dev);
1728        spin_lock_bh(&rp->lock);
1729
1730        /* clear all descriptors */
1731        free_tbufs(dev);
1732        alloc_tbufs(dev);
1733
1734        rhine_reset_rbufs(rp);
1735
1736        /* Reinitialize the hardware. */
1737        rhine_chip_reset(dev);
1738        init_registers(dev);
1739
1740        spin_unlock_bh(&rp->lock);
1741
1742        netif_trans_update(dev); /* prevent tx timeout */
1743        dev->stats.tx_errors++;
1744        netif_wake_queue(dev);
1745
1746out_unlock:
1747        mutex_unlock(&rp->task_lock);
1748}
1749
1750static void rhine_tx_timeout(struct net_device *dev, unsigned int txqueue)
1751{
1752        struct rhine_private *rp = netdev_priv(dev);
1753        void __iomem *ioaddr = rp->base;
1754
1755        netdev_warn(dev, "Transmit timed out, status %04x, PHY status %04x, resetting...\n",
1756                    ioread16(ioaddr + IntrStatus),
1757                    mdio_read(dev, rp->mii_if.phy_id, MII_BMSR));
1758
1759        schedule_work(&rp->reset_task);
1760}
1761
1762static inline bool rhine_tx_queue_full(struct rhine_private *rp)
1763{
1764        return (rp->cur_tx - rp->dirty_tx) >= TX_QUEUE_LEN;
1765}
1766
1767static netdev_tx_t rhine_start_tx(struct sk_buff *skb,
1768                                  struct net_device *dev)
1769{
1770        struct rhine_private *rp = netdev_priv(dev);
1771        struct device *hwdev = dev->dev.parent;
1772        void __iomem *ioaddr = rp->base;
1773        unsigned entry;
1774
1775        /* Caution: the write order is important here, set the field
1776           with the "ownership" bits last. */
1777
1778        /* Calculate the next Tx descriptor entry. */
1779        entry = rp->cur_tx % TX_RING_SIZE;
1780
1781        if (skb_padto(skb, ETH_ZLEN))
1782                return NETDEV_TX_OK;
1783
1784        rp->tx_skbuff[entry] = skb;
1785
1786        if ((rp->quirks & rqRhineI) &&
1787            (((unsigned long)skb->data & 3) || skb_shinfo(skb)->nr_frags != 0 || skb->ip_summed == CHECKSUM_PARTIAL)) {
1788                /* Must use alignment buffer. */
1789                if (skb->len > PKT_BUF_SZ) {
1790                        /* packet too long, drop it */
1791                        dev_kfree_skb_any(skb);
1792                        rp->tx_skbuff[entry] = NULL;
1793                        dev->stats.tx_dropped++;
1794                        return NETDEV_TX_OK;
1795                }
1796
1797                /* Padding is not copied and so must be redone. */
1798                skb_copy_and_csum_dev(skb, rp->tx_buf[entry]);
1799                if (skb->len < ETH_ZLEN)
1800                        memset(rp->tx_buf[entry] + skb->len, 0,
1801                               ETH_ZLEN - skb->len);
1802                rp->tx_skbuff_dma[entry] = 0;
1803                rp->tx_ring[entry].addr = cpu_to_le32(rp->tx_bufs_dma +
1804                                                      (rp->tx_buf[entry] -
1805                                                       rp->tx_bufs));
1806        } else {
1807                rp->tx_skbuff_dma[entry] =
1808                        dma_map_single(hwdev, skb->data, skb->len,
1809                                       DMA_TO_DEVICE);
1810                if (dma_mapping_error(hwdev, rp->tx_skbuff_dma[entry])) {
1811                        dev_kfree_skb_any(skb);
1812                        rp->tx_skbuff_dma[entry] = 0;
1813                        dev->stats.tx_dropped++;
1814                        return NETDEV_TX_OK;
1815                }
1816                rp->tx_ring[entry].addr = cpu_to_le32(rp->tx_skbuff_dma[entry]);
1817        }
1818
1819        rp->tx_ring[entry].desc_length =
1820                cpu_to_le32(TXDESC | (skb->len >= ETH_ZLEN ? skb->len : ETH_ZLEN));
1821
1822        if (unlikely(skb_vlan_tag_present(skb))) {
1823                u16 vid_pcp = skb_vlan_tag_get(skb);
1824
1825                /* drop CFI/DEI bit, register needs VID and PCP */
1826                vid_pcp = (vid_pcp & VLAN_VID_MASK) |
1827                          ((vid_pcp & VLAN_PRIO_MASK) >> 1);
1828                rp->tx_ring[entry].tx_status = cpu_to_le32((vid_pcp) << 16);
1829                /* request tagging */
1830                rp->tx_ring[entry].desc_length |= cpu_to_le32(0x020000);
1831        }
1832        else
1833                rp->tx_ring[entry].tx_status = 0;
1834
1835        netdev_sent_queue(dev, skb->len);
1836        /* lock eth irq */
1837        dma_wmb();
1838        rp->tx_ring[entry].tx_status |= cpu_to_le32(DescOwn);
1839        wmb();
1840
1841        rp->cur_tx++;
1842        /*
1843         * Nobody wants cur_tx write to rot for ages after the NIC will have
1844         * seen the transmit request, especially as the transmit completion
1845         * handler could miss it.
1846         */
1847        smp_wmb();
1848
1849        /* Non-x86 Todo: explicitly flush cache lines here. */
1850
1851        if (skb_vlan_tag_present(skb))
1852                /* Tx queues are bits 7-0 (first Tx queue: bit 7) */
1853                BYTE_REG_BITS_ON(1 << 7, ioaddr + TQWake);
1854
1855        /* Wake the potentially-idle transmit channel */
1856        iowrite8(ioread8(ioaddr + ChipCmd1) | Cmd1TxDemand,
1857               ioaddr + ChipCmd1);
1858        IOSYNC;
1859
1860        /* dirty_tx may be pessimistically out-of-sync. See rhine_tx. */
1861        if (rhine_tx_queue_full(rp)) {
1862                netif_stop_queue(dev);
1863                smp_rmb();
1864                /* Rejuvenate. */
1865                if (!rhine_tx_queue_full(rp))
1866                        netif_wake_queue(dev);
1867        }
1868
1869        netif_dbg(rp, tx_queued, dev, "Transmit frame #%d queued in slot %d\n",
1870                  rp->cur_tx - 1, entry);
1871
1872        return NETDEV_TX_OK;
1873}
1874
1875static void rhine_irq_disable(struct rhine_private *rp)
1876{
1877        iowrite16(0x0000, rp->base + IntrEnable);
1878}
1879
1880/* The interrupt handler does all of the Rx thread work and cleans up
1881   after the Tx thread. */
1882static irqreturn_t rhine_interrupt(int irq, void *dev_instance)
1883{
1884        struct net_device *dev = dev_instance;
1885        struct rhine_private *rp = netdev_priv(dev);
1886        u32 status;
1887        int handled = 0;
1888
1889        status = rhine_get_events(rp);
1890
1891        netif_dbg(rp, intr, dev, "Interrupt, status %08x\n", status);
1892
1893        if (status & RHINE_EVENT) {
1894                handled = 1;
1895
1896                rhine_irq_disable(rp);
1897                napi_schedule(&rp->napi);
1898        }
1899
1900        if (status & ~(IntrLinkChange | IntrStatsMax | RHINE_EVENT_NAPI)) {
1901                netif_err(rp, intr, dev, "Something Wicked happened! %08x\n",
1902                          status);
1903        }
1904
1905        return IRQ_RETVAL(handled);
1906}
1907
1908/* This routine is logically part of the interrupt handler, but isolated
1909   for clarity. */
1910static void rhine_tx(struct net_device *dev)
1911{
1912        struct rhine_private *rp = netdev_priv(dev);
1913        struct device *hwdev = dev->dev.parent;
1914        unsigned int pkts_compl = 0, bytes_compl = 0;
1915        unsigned int dirty_tx = rp->dirty_tx;
1916        unsigned int cur_tx;
1917        struct sk_buff *skb;
1918
1919        /*
1920         * The race with rhine_start_tx does not matter here as long as the
1921         * driver enforces a value of cur_tx that was relevant when the
1922         * packet was scheduled to the network chipset.
1923         * Executive summary: smp_rmb() balances smp_wmb() in rhine_start_tx.
1924         */
1925        smp_rmb();
1926        cur_tx = rp->cur_tx;
1927        /* find and cleanup dirty tx descriptors */
1928        while (dirty_tx != cur_tx) {
1929                unsigned int entry = dirty_tx % TX_RING_SIZE;
1930                u32 txstatus = le32_to_cpu(rp->tx_ring[entry].tx_status);
1931
1932                netif_dbg(rp, tx_done, dev, "Tx scavenge %d status %08x\n",
1933                          entry, txstatus);
1934                if (txstatus & DescOwn)
1935                        break;
1936                skb = rp->tx_skbuff[entry];
1937                if (txstatus & 0x8000) {
1938                        netif_dbg(rp, tx_done, dev,
1939                                  "Transmit error, Tx status %08x\n", txstatus);
1940                        dev->stats.tx_errors++;
1941                        if (txstatus & 0x0400)
1942                                dev->stats.tx_carrier_errors++;
1943                        if (txstatus & 0x0200)
1944                                dev->stats.tx_window_errors++;
1945                        if (txstatus & 0x0100)
1946                                dev->stats.tx_aborted_errors++;
1947                        if (txstatus & 0x0080)
1948                                dev->stats.tx_heartbeat_errors++;
1949                        if (((rp->quirks & rqRhineI) && txstatus & 0x0002) ||
1950                            (txstatus & 0x0800) || (txstatus & 0x1000)) {
1951                                dev->stats.tx_fifo_errors++;
1952                                rp->tx_ring[entry].tx_status = cpu_to_le32(DescOwn);
1953                                break; /* Keep the skb - we try again */
1954                        }
1955                        /* Transmitter restarted in 'abnormal' handler. */
1956                } else {
1957                        if (rp->quirks & rqRhineI)
1958                                dev->stats.collisions += (txstatus >> 3) & 0x0F;
1959                        else
1960                                dev->stats.collisions += txstatus & 0x0F;
1961                        netif_dbg(rp, tx_done, dev, "collisions: %1.1x:%1.1x\n",
1962                                  (txstatus >> 3) & 0xF, txstatus & 0xF);
1963
1964                        u64_stats_update_begin(&rp->tx_stats.syncp);
1965                        rp->tx_stats.bytes += skb->len;
1966                        rp->tx_stats.packets++;
1967                        u64_stats_update_end(&rp->tx_stats.syncp);
1968                }
1969                /* Free the original skb. */
1970                if (rp->tx_skbuff_dma[entry]) {
1971                        dma_unmap_single(hwdev,
1972                                         rp->tx_skbuff_dma[entry],
1973                                         skb->len,
1974                                         DMA_TO_DEVICE);
1975                }
1976                bytes_compl += skb->len;
1977                pkts_compl++;
1978                dev_consume_skb_any(skb);
1979                rp->tx_skbuff[entry] = NULL;
1980                dirty_tx++;
1981        }
1982
1983        rp->dirty_tx = dirty_tx;
1984        /* Pity we can't rely on the nearby BQL completion implicit barrier. */
1985        smp_wmb();
1986
1987        netdev_completed_queue(dev, pkts_compl, bytes_compl);
1988
1989        /* cur_tx may be optimistically out-of-sync. See rhine_start_tx. */
1990        if (!rhine_tx_queue_full(rp) && netif_queue_stopped(dev)) {
1991                netif_wake_queue(dev);
1992                smp_rmb();
1993                /* Rejuvenate. */
1994                if (rhine_tx_queue_full(rp))
1995                        netif_stop_queue(dev);
1996        }
1997}
1998
1999/**
2000 * rhine_get_vlan_tci - extract TCI from Rx data buffer
2001 * @skb: pointer to sk_buff
2002 * @data_size: used data area of the buffer including CRC
2003 *
2004 * If hardware VLAN tag extraction is enabled and the chip indicates a 802.1Q
2005 * packet, the extracted 802.1Q header (2 bytes TPID + 2 bytes TCI) is 4-byte
2006 * aligned following the CRC.
2007 */
2008static inline u16 rhine_get_vlan_tci(struct sk_buff *skb, int data_size)
2009{
2010        u8 *trailer = (u8 *)skb->data + ((data_size + 3) & ~3) + 2;
2011        return be16_to_cpup((__be16 *)trailer);
2012}
2013
2014static inline void rhine_rx_vlan_tag(struct sk_buff *skb, struct rx_desc *desc,
2015                                     int data_size)
2016{
2017        dma_rmb();
2018        if (unlikely(desc->desc_length & cpu_to_le32(DescTag))) {
2019                u16 vlan_tci;
2020
2021                vlan_tci = rhine_get_vlan_tci(skb, data_size);
2022                __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tci);
2023        }
2024}
2025
2026/* Process up to limit frames from receive ring */
2027static int rhine_rx(struct net_device *dev, int limit)
2028{
2029        struct rhine_private *rp = netdev_priv(dev);
2030        struct device *hwdev = dev->dev.parent;
2031        int entry = rp->cur_rx % RX_RING_SIZE;
2032        int count;
2033
2034        netif_dbg(rp, rx_status, dev, "%s(), entry %d status %08x\n", __func__,
2035                  entry, le32_to_cpu(rp->rx_ring[entry].rx_status));
2036
2037        /* If EOP is set on the next entry, it's a new packet. Send it up. */
2038        for (count = 0; count < limit; ++count) {
2039                struct rx_desc *desc = rp->rx_ring + entry;
2040                u32 desc_status = le32_to_cpu(desc->rx_status);
2041                int data_size = desc_status >> 16;
2042
2043                if (desc_status & DescOwn)
2044                        break;
2045
2046                netif_dbg(rp, rx_status, dev, "%s() status %08x\n", __func__,
2047                          desc_status);
2048
2049                if ((desc_status & (RxWholePkt | RxErr)) != RxWholePkt) {
2050                        if ((desc_status & RxWholePkt) != RxWholePkt) {
2051                                netdev_warn(dev,
2052        "Oversized Ethernet frame spanned multiple buffers, "
2053        "entry %#x length %d status %08x!\n",
2054                                            entry, data_size,
2055                                            desc_status);
2056                                dev->stats.rx_length_errors++;
2057                        } else if (desc_status & RxErr) {
2058                                /* There was a error. */
2059                                netif_dbg(rp, rx_err, dev,
2060                                          "%s() Rx error %08x\n", __func__,
2061                                          desc_status);
2062                                dev->stats.rx_errors++;
2063                                if (desc_status & 0x0030)
2064                                        dev->stats.rx_length_errors++;
2065                                if (desc_status & 0x0048)
2066                                        dev->stats.rx_fifo_errors++;
2067                                if (desc_status & 0x0004)
2068                                        dev->stats.rx_frame_errors++;
2069                                if (desc_status & 0x0002) {
2070                                        /* this can also be updated outside the interrupt handler */
2071                                        spin_lock(&rp->lock);
2072                                        dev->stats.rx_crc_errors++;
2073                                        spin_unlock(&rp->lock);
2074                                }
2075                        }
2076                } else {
2077                        /* Length should omit the CRC */
2078                        int pkt_len = data_size - 4;
2079                        struct sk_buff *skb;
2080
2081                        /* Check if the packet is long enough to accept without
2082                           copying to a minimally-sized skbuff. */
2083                        if (pkt_len < rx_copybreak) {
2084                                skb = netdev_alloc_skb_ip_align(dev, pkt_len);
2085                                if (unlikely(!skb))
2086                                        goto drop;
2087
2088                                dma_sync_single_for_cpu(hwdev,
2089                                                        rp->rx_skbuff_dma[entry],
2090                                                        rp->rx_buf_sz,
2091                                                        DMA_FROM_DEVICE);
2092
2093                                skb_copy_to_linear_data(skb,
2094                                                 rp->rx_skbuff[entry]->data,
2095                                                 pkt_len);
2096
2097                                dma_sync_single_for_device(hwdev,
2098                                                           rp->rx_skbuff_dma[entry],
2099                                                           rp->rx_buf_sz,
2100                                                           DMA_FROM_DEVICE);
2101                        } else {
2102                                struct rhine_skb_dma sd;
2103
2104                                if (unlikely(rhine_skb_dma_init(dev, &sd) < 0))
2105                                        goto drop;
2106
2107                                skb = rp->rx_skbuff[entry];
2108
2109                                dma_unmap_single(hwdev,
2110                                                 rp->rx_skbuff_dma[entry],
2111                                                 rp->rx_buf_sz,
2112                                                 DMA_FROM_DEVICE);
2113                                rhine_skb_dma_nic_store(rp, &sd, entry);
2114                        }
2115
2116                        skb_put(skb, pkt_len);
2117
2118                        rhine_rx_vlan_tag(skb, desc, data_size);
2119
2120                        skb->protocol = eth_type_trans(skb, dev);
2121
2122                        netif_receive_skb(skb);
2123
2124                        u64_stats_update_begin(&rp->rx_stats.syncp);
2125                        rp->rx_stats.bytes += pkt_len;
2126                        rp->rx_stats.packets++;
2127                        u64_stats_update_end(&rp->rx_stats.syncp);
2128                }
2129give_descriptor_to_nic:
2130                desc->rx_status = cpu_to_le32(DescOwn);
2131                entry = (++rp->cur_rx) % RX_RING_SIZE;
2132        }
2133
2134        return count;
2135
2136drop:
2137        dev->stats.rx_dropped++;
2138        goto give_descriptor_to_nic;
2139}
2140
2141static void rhine_restart_tx(struct net_device *dev) {
2142        struct rhine_private *rp = netdev_priv(dev);
2143        void __iomem *ioaddr = rp->base;
2144        int entry = rp->dirty_tx % TX_RING_SIZE;
2145        u32 intr_status;
2146
2147        /*
2148         * If new errors occurred, we need to sort them out before doing Tx.
2149         * In that case the ISR will be back here RSN anyway.
2150         */
2151        intr_status = rhine_get_events(rp);
2152
2153        if ((intr_status & IntrTxErrSummary) == 0) {
2154
2155                /* We know better than the chip where it should continue. */
2156                iowrite32(rp->tx_ring_dma + entry * sizeof(struct tx_desc),
2157                       ioaddr + TxRingPtr);
2158
2159                iowrite8(ioread8(ioaddr + ChipCmd) | CmdTxOn,
2160                       ioaddr + ChipCmd);
2161
2162                if (rp->tx_ring[entry].desc_length & cpu_to_le32(0x020000))
2163                        /* Tx queues are bits 7-0 (first Tx queue: bit 7) */
2164                        BYTE_REG_BITS_ON(1 << 7, ioaddr + TQWake);
2165
2166                iowrite8(ioread8(ioaddr + ChipCmd1) | Cmd1TxDemand,
2167                       ioaddr + ChipCmd1);
2168                IOSYNC;
2169        }
2170        else {
2171                /* This should never happen */
2172                netif_warn(rp, tx_err, dev, "another error occurred %08x\n",
2173                           intr_status);
2174        }
2175
2176}
2177
2178static void rhine_slow_event_task(struct work_struct *work)
2179{
2180        struct rhine_private *rp =
2181                container_of(work, struct rhine_private, slow_event_task);
2182        struct net_device *dev = rp->dev;
2183        u32 intr_status;
2184
2185        mutex_lock(&rp->task_lock);
2186
2187        if (!rp->task_enable)
2188                goto out_unlock;
2189
2190        intr_status = rhine_get_events(rp);
2191        rhine_ack_events(rp, intr_status & RHINE_EVENT_SLOW);
2192
2193        if (intr_status & IntrLinkChange)
2194                rhine_check_media(dev, 0);
2195
2196        if (intr_status & IntrPCIErr)
2197                netif_warn(rp, hw, dev, "PCI error\n");
2198
2199        iowrite16(RHINE_EVENT & 0xffff, rp->base + IntrEnable);
2200
2201out_unlock:
2202        mutex_unlock(&rp->task_lock);
2203}
2204
2205static void
2206rhine_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
2207{
2208        struct rhine_private *rp = netdev_priv(dev);
2209        unsigned int start;
2210
2211        spin_lock_bh(&rp->lock);
2212        rhine_update_rx_crc_and_missed_errord(rp);
2213        spin_unlock_bh(&rp->lock);
2214
2215        netdev_stats_to_stats64(stats, &dev->stats);
2216
2217        do {
2218                start = u64_stats_fetch_begin_irq(&rp->rx_stats.syncp);
2219                stats->rx_packets = rp->rx_stats.packets;
2220                stats->rx_bytes = rp->rx_stats.bytes;
2221        } while (u64_stats_fetch_retry_irq(&rp->rx_stats.syncp, start));
2222
2223        do {
2224                start = u64_stats_fetch_begin_irq(&rp->tx_stats.syncp);
2225                stats->tx_packets = rp->tx_stats.packets;
2226                stats->tx_bytes = rp->tx_stats.bytes;
2227        } while (u64_stats_fetch_retry_irq(&rp->tx_stats.syncp, start));
2228}
2229
2230static void rhine_set_rx_mode(struct net_device *dev)
2231{
2232        struct rhine_private *rp = netdev_priv(dev);
2233        void __iomem *ioaddr = rp->base;
2234        u32 mc_filter[2];       /* Multicast hash filter */
2235        u8 rx_mode = 0x0C;      /* Note: 0x02=accept runt, 0x01=accept errs */
2236        struct netdev_hw_addr *ha;
2237
2238        if (dev->flags & IFF_PROMISC) {         /* Set promiscuous. */
2239                rx_mode = 0x1C;
2240                iowrite32(0xffffffff, ioaddr + MulticastFilter0);
2241                iowrite32(0xffffffff, ioaddr + MulticastFilter1);
2242        } else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
2243                   (dev->flags & IFF_ALLMULTI)) {
2244                /* Too many to match, or accept all multicasts. */
2245                iowrite32(0xffffffff, ioaddr + MulticastFilter0);
2246                iowrite32(0xffffffff, ioaddr + MulticastFilter1);
2247        } else if (rp->quirks & rqMgmt) {
2248                int i = 0;
2249                u32 mCAMmask = 0;       /* 32 mCAMs (6105M and better) */
2250                netdev_for_each_mc_addr(ha, dev) {
2251                        if (i == MCAM_SIZE)
2252                                break;
2253                        rhine_set_cam(ioaddr, i, ha->addr);
2254                        mCAMmask |= 1 << i;
2255                        i++;
2256                }
2257                rhine_set_cam_mask(ioaddr, mCAMmask);
2258        } else {
2259                memset(mc_filter, 0, sizeof(mc_filter));
2260                netdev_for_each_mc_addr(ha, dev) {
2261                        int bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26;
2262
2263                        mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
2264                }
2265                iowrite32(mc_filter[0], ioaddr + MulticastFilter0);
2266                iowrite32(mc_filter[1], ioaddr + MulticastFilter1);
2267        }
2268        /* enable/disable VLAN receive filtering */
2269        if (rp->quirks & rqMgmt) {
2270                if (dev->flags & IFF_PROMISC)
2271                        BYTE_REG_BITS_OFF(BCR1_VIDFR, ioaddr + PCIBusConfig1);
2272                else
2273                        BYTE_REG_BITS_ON(BCR1_VIDFR, ioaddr + PCIBusConfig1);
2274        }
2275        BYTE_REG_BITS_ON(rx_mode, ioaddr + RxConfig);
2276}
2277
2278static void netdev_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
2279{
2280        struct device *hwdev = dev->dev.parent;
2281
2282        strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
2283        strlcpy(info->bus_info, dev_name(hwdev), sizeof(info->bus_info));
2284}
2285
2286static int netdev_get_link_ksettings(struct net_device *dev,
2287                                     struct ethtool_link_ksettings *cmd)
2288{
2289        struct rhine_private *rp = netdev_priv(dev);
2290
2291        mutex_lock(&rp->task_lock);
2292        mii_ethtool_get_link_ksettings(&rp->mii_if, cmd);
2293        mutex_unlock(&rp->task_lock);
2294
2295        return 0;
2296}
2297
2298static int netdev_set_link_ksettings(struct net_device *dev,
2299                                     const struct ethtool_link_ksettings *cmd)
2300{
2301        struct rhine_private *rp = netdev_priv(dev);
2302        int rc;
2303
2304        mutex_lock(&rp->task_lock);
2305        rc = mii_ethtool_set_link_ksettings(&rp->mii_if, cmd);
2306        rhine_set_carrier(&rp->mii_if);
2307        mutex_unlock(&rp->task_lock);
2308
2309        return rc;
2310}
2311
2312static int netdev_nway_reset(struct net_device *dev)
2313{
2314        struct rhine_private *rp = netdev_priv(dev);
2315
2316        return mii_nway_restart(&rp->mii_if);
2317}
2318
2319static u32 netdev_get_link(struct net_device *dev)
2320{
2321        struct rhine_private *rp = netdev_priv(dev);
2322
2323        return mii_link_ok(&rp->mii_if);
2324}
2325
2326static u32 netdev_get_msglevel(struct net_device *dev)
2327{
2328        struct rhine_private *rp = netdev_priv(dev);
2329
2330        return rp->msg_enable;
2331}
2332
2333static void netdev_set_msglevel(struct net_device *dev, u32 value)
2334{
2335        struct rhine_private *rp = netdev_priv(dev);
2336
2337        rp->msg_enable = value;
2338}
2339
2340static void rhine_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2341{
2342        struct rhine_private *rp = netdev_priv(dev);
2343
2344        if (!(rp->quirks & rqWOL))
2345                return;
2346
2347        spin_lock_irq(&rp->lock);
2348        wol->supported = WAKE_PHY | WAKE_MAGIC |
2349                         WAKE_UCAST | WAKE_MCAST | WAKE_BCAST;  /* Untested */
2350        wol->wolopts = rp->wolopts;
2351        spin_unlock_irq(&rp->lock);
2352}
2353
2354static int rhine_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2355{
2356        struct rhine_private *rp = netdev_priv(dev);
2357        u32 support = WAKE_PHY | WAKE_MAGIC |
2358                      WAKE_UCAST | WAKE_MCAST | WAKE_BCAST;     /* Untested */
2359
2360        if (!(rp->quirks & rqWOL))
2361                return -EINVAL;
2362
2363        if (wol->wolopts & ~support)
2364                return -EINVAL;
2365
2366        spin_lock_irq(&rp->lock);
2367        rp->wolopts = wol->wolopts;
2368        spin_unlock_irq(&rp->lock);
2369
2370        return 0;
2371}
2372
2373static const struct ethtool_ops netdev_ethtool_ops = {
2374        .get_drvinfo            = netdev_get_drvinfo,
2375        .nway_reset             = netdev_nway_reset,
2376        .get_link               = netdev_get_link,
2377        .get_msglevel           = netdev_get_msglevel,
2378        .set_msglevel           = netdev_set_msglevel,
2379        .get_wol                = rhine_get_wol,
2380        .set_wol                = rhine_set_wol,
2381        .get_link_ksettings     = netdev_get_link_ksettings,
2382        .set_link_ksettings     = netdev_set_link_ksettings,
2383};
2384
2385static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2386{
2387        struct rhine_private *rp = netdev_priv(dev);
2388        int rc;
2389
2390        if (!netif_running(dev))
2391                return -EINVAL;
2392
2393        mutex_lock(&rp->task_lock);
2394        rc = generic_mii_ioctl(&rp->mii_if, if_mii(rq), cmd, NULL);
2395        rhine_set_carrier(&rp->mii_if);
2396        mutex_unlock(&rp->task_lock);
2397
2398        return rc;
2399}
2400
2401static int rhine_close(struct net_device *dev)
2402{
2403        struct rhine_private *rp = netdev_priv(dev);
2404        void __iomem *ioaddr = rp->base;
2405
2406        rhine_task_disable(rp);
2407        napi_disable(&rp->napi);
2408        netif_stop_queue(dev);
2409
2410        netif_dbg(rp, ifdown, dev, "Shutting down ethercard, status was %04x\n",
2411                  ioread16(ioaddr + ChipCmd));
2412
2413        /* Switch to loopback mode to avoid hardware races. */
2414        iowrite8(rp->tx_thresh | 0x02, ioaddr + TxConfig);
2415
2416        rhine_irq_disable(rp);
2417
2418        /* Stop the chip's Tx and Rx processes. */
2419        iowrite16(CmdStop, ioaddr + ChipCmd);
2420
2421        free_irq(rp->irq, dev);
2422        free_rbufs(dev);
2423        free_tbufs(dev);
2424        free_ring(dev);
2425
2426        return 0;
2427}
2428
2429
2430static void rhine_remove_one_pci(struct pci_dev *pdev)
2431{
2432        struct net_device *dev = pci_get_drvdata(pdev);
2433        struct rhine_private *rp = netdev_priv(dev);
2434
2435        unregister_netdev(dev);
2436
2437        pci_iounmap(pdev, rp->base);
2438        pci_release_regions(pdev);
2439
2440        free_netdev(dev);
2441        pci_disable_device(pdev);
2442}
2443
2444static int rhine_remove_one_platform(struct platform_device *pdev)
2445{
2446        struct net_device *dev = platform_get_drvdata(pdev);
2447        struct rhine_private *rp = netdev_priv(dev);
2448
2449        unregister_netdev(dev);
2450
2451        iounmap(rp->base);
2452
2453        free_netdev(dev);
2454
2455        return 0;
2456}
2457
2458static void rhine_shutdown_pci(struct pci_dev *pdev)
2459{
2460        struct net_device *dev = pci_get_drvdata(pdev);
2461        struct rhine_private *rp = netdev_priv(dev);
2462        void __iomem *ioaddr = rp->base;
2463
2464        if (!(rp->quirks & rqWOL))
2465                return; /* Nothing to do for non-WOL adapters */
2466
2467        rhine_power_init(dev);
2468
2469        /* Make sure we use pattern 0, 1 and not 4, 5 */
2470        if (rp->quirks & rq6patterns)
2471                iowrite8(0x04, ioaddr + WOLcgClr);
2472
2473        spin_lock(&rp->lock);
2474
2475        if (rp->wolopts & WAKE_MAGIC) {
2476                iowrite8(WOLmagic, ioaddr + WOLcrSet);
2477                /*
2478                 * Turn EEPROM-controlled wake-up back on -- some hardware may
2479                 * not cooperate otherwise.
2480                 */
2481                iowrite8(ioread8(ioaddr + ConfigA) | 0x03, ioaddr + ConfigA);
2482        }
2483
2484        if (rp->wolopts & (WAKE_BCAST|WAKE_MCAST))
2485                iowrite8(WOLbmcast, ioaddr + WOLcgSet);
2486
2487        if (rp->wolopts & WAKE_PHY)
2488                iowrite8(WOLlnkon | WOLlnkoff, ioaddr + WOLcrSet);
2489
2490        if (rp->wolopts & WAKE_UCAST)
2491                iowrite8(WOLucast, ioaddr + WOLcrSet);
2492
2493        if (rp->wolopts) {
2494                /* Enable legacy WOL (for old motherboards) */
2495                iowrite8(0x01, ioaddr + PwcfgSet);
2496                iowrite8(ioread8(ioaddr + StickyHW) | 0x04, ioaddr + StickyHW);
2497        }
2498
2499        spin_unlock(&rp->lock);
2500
2501        if (system_state == SYSTEM_POWER_OFF && !avoid_D3) {
2502                iowrite8(ioread8(ioaddr + StickyHW) | 0x03, ioaddr + StickyHW);
2503
2504                pci_wake_from_d3(pdev, true);
2505                pci_set_power_state(pdev, PCI_D3hot);
2506        }
2507}
2508
2509#ifdef CONFIG_PM_SLEEP
2510static int rhine_suspend(struct device *device)
2511{
2512        struct net_device *dev = dev_get_drvdata(device);
2513        struct rhine_private *rp = netdev_priv(dev);
2514
2515        if (!netif_running(dev))
2516                return 0;
2517
2518        rhine_task_disable(rp);
2519        rhine_irq_disable(rp);
2520        napi_disable(&rp->napi);
2521
2522        netif_device_detach(dev);
2523
2524        if (dev_is_pci(device))
2525                rhine_shutdown_pci(to_pci_dev(device));
2526
2527        return 0;
2528}
2529
2530static int rhine_resume(struct device *device)
2531{
2532        struct net_device *dev = dev_get_drvdata(device);
2533        struct rhine_private *rp = netdev_priv(dev);
2534
2535        if (!netif_running(dev))
2536                return 0;
2537
2538        enable_mmio(rp->pioaddr, rp->quirks);
2539        rhine_power_init(dev);
2540        free_tbufs(dev);
2541        alloc_tbufs(dev);
2542        rhine_reset_rbufs(rp);
2543        rhine_task_enable(rp);
2544        spin_lock_bh(&rp->lock);
2545        init_registers(dev);
2546        spin_unlock_bh(&rp->lock);
2547
2548        netif_device_attach(dev);
2549
2550        return 0;
2551}
2552
2553static SIMPLE_DEV_PM_OPS(rhine_pm_ops, rhine_suspend, rhine_resume);
2554#define RHINE_PM_OPS    (&rhine_pm_ops)
2555
2556#else
2557
2558#define RHINE_PM_OPS    NULL
2559
2560#endif /* !CONFIG_PM_SLEEP */
2561
2562static struct pci_driver rhine_driver_pci = {
2563        .name           = DRV_NAME,
2564        .id_table       = rhine_pci_tbl,
2565        .probe          = rhine_init_one_pci,
2566        .remove         = rhine_remove_one_pci,
2567        .shutdown       = rhine_shutdown_pci,
2568        .driver.pm      = RHINE_PM_OPS,
2569};
2570
2571static struct platform_driver rhine_driver_platform = {
2572        .probe          = rhine_init_one_platform,
2573        .remove         = rhine_remove_one_platform,
2574        .driver = {
2575                .name   = DRV_NAME,
2576                .of_match_table = rhine_of_tbl,
2577                .pm             = RHINE_PM_OPS,
2578        }
2579};
2580
2581static const struct dmi_system_id rhine_dmi_table[] __initconst = {
2582        {
2583                .ident = "EPIA-M",
2584                .matches = {
2585                        DMI_MATCH(DMI_BIOS_VENDOR, "Award Software International, Inc."),
2586                        DMI_MATCH(DMI_BIOS_VERSION, "6.00 PG"),
2587                },
2588        },
2589        {
2590                .ident = "KV7",
2591                .matches = {
2592                        DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies, LTD"),
2593                        DMI_MATCH(DMI_BIOS_VERSION, "6.00 PG"),
2594                },
2595        },
2596        { NULL }
2597};
2598
2599static int __init rhine_init(void)
2600{
2601        int ret_pci, ret_platform;
2602
2603/* when a module, this is printed whether or not devices are found in probe */
2604        if (dmi_check_system(rhine_dmi_table)) {
2605                /* these BIOSes fail at PXE boot if chip is in D3 */
2606                avoid_D3 = true;
2607                pr_warn("Broken BIOS detected, avoid_D3 enabled\n");
2608        }
2609        else if (avoid_D3)
2610                pr_info("avoid_D3 set\n");
2611
2612        ret_pci = pci_register_driver(&rhine_driver_pci);
2613        ret_platform = platform_driver_register(&rhine_driver_platform);
2614        if ((ret_pci < 0) && (ret_platform < 0))
2615                return ret_pci;
2616
2617        return 0;
2618}
2619
2620
2621static void __exit rhine_cleanup(void)
2622{
2623        platform_driver_unregister(&rhine_driver_platform);
2624        pci_unregister_driver(&rhine_driver_pci);
2625}
2626
2627
2628module_init(rhine_init);
2629module_exit(rhine_cleanup);
2630