linux/drivers/net/ethernet/dlink/sundance.c
<<
>>
Prefs
   1/* sundance.c: A Linux device driver for the Sundance ST201 "Alta". */
   2/*
   3        Written 1999-2000 by Donald Becker.
   4
   5        This software may be used and distributed according to the terms of
   6        the GNU General Public License (GPL), incorporated herein by reference.
   7        Drivers based on or derived from this code fall under the GPL and must
   8        retain the authorship, copyright and license notice.  This file is not
   9        a complete program and may only be used when the entire operating
  10        system is licensed under the GPL.
  11
  12        The author may be reached as becker@scyld.com, or C/O
  13        Scyld Computing Corporation
  14        410 Severn Ave., Suite 210
  15        Annapolis MD 21403
  16
  17        Support and updates available at
  18        http://www.scyld.com/network/sundance.html
  19        [link no longer provides useful info -jgarzik]
  20        Archives of the mailing list are still available at
  21        http://www.beowulf.org/pipermail/netdrivers/
  22
  23*/
  24
  25#define DRV_NAME        "sundance"
  26#define DRV_VERSION     "1.2"
  27#define DRV_RELDATE     "11-Sep-2006"
  28
  29
  30/* The user-configurable values.
  31   These may be modified when a driver module is loaded.*/
  32static int debug = 1;                   /* 1 normal messages, 0 quiet .. 7 verbose. */
  33/* Maximum number of multicast addresses to filter (vs. rx-all-multicast).
  34   Typical is a 64 element hash table based on the Ethernet CRC.  */
  35static const int multicast_filter_limit = 32;
  36
  37/* Set the copy breakpoint for the copy-only-tiny-frames scheme.
  38   Setting to > 1518 effectively disables this feature.
  39   This chip can receive into offset buffers, so the Alpha does not
  40   need a copy-align. */
  41static int rx_copybreak;
  42static int flowctrl=1;
  43
  44/* media[] specifies the media type the NIC operates at.
  45                 autosense      Autosensing active media.
  46                 10mbps_hd      10Mbps half duplex.
  47                 10mbps_fd      10Mbps full duplex.
  48                 100mbps_hd     100Mbps half duplex.
  49                 100mbps_fd     100Mbps full duplex.
  50                 0              Autosensing active media.
  51                 1              10Mbps half duplex.
  52                 2              10Mbps full duplex.
  53                 3              100Mbps half duplex.
  54                 4              100Mbps full duplex.
  55*/
  56#define MAX_UNITS 8
  57static char *media[MAX_UNITS];
  58
  59
  60/* Operational parameters that are set at compile time. */
  61
  62/* Keep the ring sizes a power of two for compile efficiency.
  63   The compiler will convert <unsigned>'%'<2^N> into a bit mask.
  64   Making the Tx ring too large decreases the effectiveness of channel
  65   bonding and packet priority, and more than 128 requires modifying the
  66   Tx error recovery.
  67   Large receive rings merely waste memory. */
  68#define TX_RING_SIZE    32
  69#define TX_QUEUE_LEN    (TX_RING_SIZE - 1) /* Limit ring entries actually used.  */
  70#define RX_RING_SIZE    64
  71#define RX_BUDGET       32
  72#define TX_TOTAL_SIZE   TX_RING_SIZE*sizeof(struct netdev_desc)
  73#define RX_TOTAL_SIZE   RX_RING_SIZE*sizeof(struct netdev_desc)
  74
  75/* Operational parameters that usually are not changed. */
  76/* Time in jiffies before concluding the transmitter is hung. */
  77#define TX_TIMEOUT  (4*HZ)
  78#define PKT_BUF_SZ              1536    /* Size of each temporary Rx buffer.*/
  79
  80/* Include files, designed to support most kernel versions 2.0.0 and later. */
  81#include <linux/module.h>
  82#include <linux/kernel.h>
  83#include <linux/string.h>
  84#include <linux/timer.h>
  85#include <linux/errno.h>
  86#include <linux/ioport.h>
  87#include <linux/interrupt.h>
  88#include <linux/pci.h>
  89#include <linux/netdevice.h>
  90#include <linux/etherdevice.h>
  91#include <linux/skbuff.h>
  92#include <linux/init.h>
  93#include <linux/bitops.h>
  94#include <asm/uaccess.h>
  95#include <asm/processor.h>              /* Processor type for cache alignment. */
  96#include <asm/io.h>
  97#include <linux/delay.h>
  98#include <linux/spinlock.h>
  99#include <linux/dma-mapping.h>
 100#include <linux/crc32.h>
 101#include <linux/ethtool.h>
 102#include <linux/mii.h>
 103
 104/* These identify the driver base version and may not be removed. */
 105static const char version[] =
 106        KERN_INFO DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE
 107        " Written by Donald Becker\n";
 108
 109MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
 110MODULE_DESCRIPTION("Sundance Alta Ethernet driver");
 111MODULE_LICENSE("GPL");
 112
 113module_param(debug, int, 0);
 114module_param(rx_copybreak, int, 0);
 115module_param_array(media, charp, NULL, 0);
 116module_param(flowctrl, int, 0);
 117MODULE_PARM_DESC(debug, "Sundance Alta debug level (0-5)");
 118MODULE_PARM_DESC(rx_copybreak, "Sundance Alta copy breakpoint for copy-only-tiny-frames");
 119MODULE_PARM_DESC(flowctrl, "Sundance Alta flow control [0|1]");
 120
 121/*
 122                                Theory of Operation
 123
 124I. Board Compatibility
 125
 126This driver is designed for the Sundance Technologies "Alta" ST201 chip.
 127
 128II. Board-specific settings
 129
 130III. Driver operation
 131
 132IIIa. Ring buffers
 133
 134This driver uses two statically allocated fixed-size descriptor lists
 135formed into rings by a branch from the final descriptor to the beginning of
 136the list.  The ring sizes are set at compile time by RX/TX_RING_SIZE.
 137Some chips explicitly use only 2^N sized rings, while others use a
 138'next descriptor' pointer that the driver forms into rings.
 139
 140IIIb/c. Transmit/Receive Structure
 141
 142This driver uses a zero-copy receive and transmit scheme.
 143The driver allocates full frame size skbuffs for the Rx ring buffers at
 144open() time and passes the skb->data field to the chip as receive data
 145buffers.  When an incoming frame is less than RX_COPYBREAK bytes long,
 146a fresh skbuff is allocated and the frame is copied to the new skbuff.
 147When the incoming frame is larger, the skbuff is passed directly up the
 148protocol stack.  Buffers consumed this way are replaced by newly allocated
 149skbuffs in a later phase of receives.
 150
 151The RX_COPYBREAK value is chosen to trade-off the memory wasted by
 152using a full-sized skbuff for small frames vs. the copying costs of larger
 153frames.  New boards are typically used in generously configured machines
 154and the underfilled buffers have negligible impact compared to the benefit of
 155a single allocation size, so the default value of zero results in never
 156copying packets.  When copying is done, the cost is usually mitigated by using
 157a combined copy/checksum routine.  Copying also preloads the cache, which is
 158most useful with small frames.
 159
 160A subtle aspect of the operation is that the IP header at offset 14 in an
 161ethernet frame isn't longword aligned for further processing.
 162Unaligned buffers are permitted by the Sundance hardware, so
 163frames are received into the skbuff at an offset of "+2", 16-byte aligning
 164the IP header.
 165
 166IIId. Synchronization
 167
 168The driver runs as two independent, single-threaded flows of control.  One
 169is the send-packet routine, which enforces single-threaded use by the
 170dev->tbusy flag.  The other thread is the interrupt handler, which is single
 171threaded by the hardware and interrupt handling software.
 172
 173The send packet thread has partial control over the Tx ring and 'dev->tbusy'
 174flag.  It sets the tbusy flag whenever it's queuing a Tx packet. If the next
 175queue slot is empty, it clears the tbusy flag when finished otherwise it sets
 176the 'lp->tx_full' flag.
 177
 178The interrupt handler has exclusive control over the Rx ring and records stats
 179from the Tx ring.  After reaping the stats, it marks the Tx queue entry as
 180empty by incrementing the dirty_tx mark. Iff the 'lp->tx_full' flag is set, it
 181clears both the tx_full and tbusy flags.
 182
 183IV. Notes
 184
 185IVb. References
 186
 187The Sundance ST201 datasheet, preliminary version.
 188The Kendin KS8723 datasheet, preliminary version.
 189The ICplus IP100 datasheet, preliminary version.
 190http://www.scyld.com/expert/100mbps.html
 191http://www.scyld.com/expert/NWay.html
 192
 193IVc. Errata
 194
 195*/
 196
 197/* Work-around for Kendin chip bugs. */
 198#ifndef CONFIG_SUNDANCE_MMIO
 199#define USE_IO_OPS 1
 200#endif
 201
 202static const struct pci_device_id sundance_pci_tbl[] = {
 203        { 0x1186, 0x1002, 0x1186, 0x1002, 0, 0, 0 },
 204        { 0x1186, 0x1002, 0x1186, 0x1003, 0, 0, 1 },
 205        { 0x1186, 0x1002, 0x1186, 0x1012, 0, 0, 2 },
 206        { 0x1186, 0x1002, 0x1186, 0x1040, 0, 0, 3 },
 207        { 0x1186, 0x1002, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 4 },
 208        { 0x13F0, 0x0201, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 5 },
 209        { 0x13F0, 0x0200, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 6 },
 210        { }
 211};
 212MODULE_DEVICE_TABLE(pci, sundance_pci_tbl);
 213
 214enum {
 215        netdev_io_size = 128
 216};
 217
 218struct pci_id_info {
 219        const char *name;
 220};
 221static const struct pci_id_info pci_id_tbl[] = {
 222        {"D-Link DFE-550TX FAST Ethernet Adapter"},
 223        {"D-Link DFE-550FX 100Mbps Fiber-optics Adapter"},
 224        {"D-Link DFE-580TX 4 port Server Adapter"},
 225        {"D-Link DFE-530TXS FAST Ethernet Adapter"},
 226        {"D-Link DL10050-based FAST Ethernet Adapter"},
 227        {"Sundance Technology Alta"},
 228        {"IC Plus Corporation IP100A FAST Ethernet Adapter"},
 229        { }     /* terminate list. */
 230};
 231
 232/* This driver was written to use PCI memory space, however x86-oriented
 233   hardware often uses I/O space accesses. */
 234
 235/* Offsets to the device registers.
 236   Unlike software-only systems, device drivers interact with complex hardware.
 237   It's not useful to define symbolic names for every register bit in the
 238   device.  The name can only partially document the semantics and make
 239   the driver longer and more difficult to read.
 240   In general, only the important configuration values or bits changed
 241   multiple times should be defined symbolically.
 242*/
 243enum alta_offsets {
 244        DMACtrl = 0x00,
 245        TxListPtr = 0x04,
 246        TxDMABurstThresh = 0x08,
 247        TxDMAUrgentThresh = 0x09,
 248        TxDMAPollPeriod = 0x0a,
 249        RxDMAStatus = 0x0c,
 250        RxListPtr = 0x10,
 251        DebugCtrl0 = 0x1a,
 252        DebugCtrl1 = 0x1c,
 253        RxDMABurstThresh = 0x14,
 254        RxDMAUrgentThresh = 0x15,
 255        RxDMAPollPeriod = 0x16,
 256        LEDCtrl = 0x1a,
 257        ASICCtrl = 0x30,
 258        EEData = 0x34,
 259        EECtrl = 0x36,
 260        FlashAddr = 0x40,
 261        FlashData = 0x44,
 262        WakeEvent = 0x45,
 263        TxStatus = 0x46,
 264        TxFrameId = 0x47,
 265        DownCounter = 0x18,
 266        IntrClear = 0x4a,
 267        IntrEnable = 0x4c,
 268        IntrStatus = 0x4e,
 269        MACCtrl0 = 0x50,
 270        MACCtrl1 = 0x52,
 271        StationAddr = 0x54,
 272        MaxFrameSize = 0x5A,
 273        RxMode = 0x5c,
 274        MIICtrl = 0x5e,
 275        MulticastFilter0 = 0x60,
 276        MulticastFilter1 = 0x64,
 277        RxOctetsLow = 0x68,
 278        RxOctetsHigh = 0x6a,
 279        TxOctetsLow = 0x6c,
 280        TxOctetsHigh = 0x6e,
 281        TxFramesOK = 0x70,
 282        RxFramesOK = 0x72,
 283        StatsCarrierError = 0x74,
 284        StatsLateColl = 0x75,
 285        StatsMultiColl = 0x76,
 286        StatsOneColl = 0x77,
 287        StatsTxDefer = 0x78,
 288        RxMissed = 0x79,
 289        StatsTxXSDefer = 0x7a,
 290        StatsTxAbort = 0x7b,
 291        StatsBcastTx = 0x7c,
 292        StatsBcastRx = 0x7d,
 293        StatsMcastTx = 0x7e,
 294        StatsMcastRx = 0x7f,
 295        /* Aliased and bogus values! */
 296        RxStatus = 0x0c,
 297};
 298
 299#define ASIC_HI_WORD(x) ((x) + 2)
 300
 301enum ASICCtrl_HiWord_bit {
 302        GlobalReset = 0x0001,
 303        RxReset = 0x0002,
 304        TxReset = 0x0004,
 305        DMAReset = 0x0008,
 306        FIFOReset = 0x0010,
 307        NetworkReset = 0x0020,
 308        HostReset = 0x0040,
 309        ResetBusy = 0x0400,
 310};
 311
 312/* Bits in the interrupt status/mask registers. */
 313enum intr_status_bits {
 314        IntrSummary=0x0001, IntrPCIErr=0x0002, IntrMACCtrl=0x0008,
 315        IntrTxDone=0x0004, IntrRxDone=0x0010, IntrRxStart=0x0020,
 316        IntrDrvRqst=0x0040,
 317        StatsMax=0x0080, LinkChange=0x0100,
 318        IntrTxDMADone=0x0200, IntrRxDMADone=0x0400,
 319};
 320
 321/* Bits in the RxMode register. */
 322enum rx_mode_bits {
 323        AcceptAllIPMulti=0x20, AcceptMultiHash=0x10, AcceptAll=0x08,
 324        AcceptBroadcast=0x04, AcceptMulticast=0x02, AcceptMyPhys=0x01,
 325};
 326/* Bits in MACCtrl. */
 327enum mac_ctrl0_bits {
 328        EnbFullDuplex=0x20, EnbRcvLargeFrame=0x40,
 329        EnbFlowCtrl=0x100, EnbPassRxCRC=0x200,
 330};
 331enum mac_ctrl1_bits {
 332        StatsEnable=0x0020,     StatsDisable=0x0040, StatsEnabled=0x0080,
 333        TxEnable=0x0100, TxDisable=0x0200, TxEnabled=0x0400,
 334        RxEnable=0x0800, RxDisable=0x1000, RxEnabled=0x2000,
 335};
 336
 337/* Bits in WakeEvent register. */
 338enum wake_event_bits {
 339        WakePktEnable = 0x01,
 340        MagicPktEnable = 0x02,
 341        LinkEventEnable = 0x04,
 342        WolEnable = 0x80,
 343};
 344
 345/* The Rx and Tx buffer descriptors. */
 346/* Note that using only 32 bit fields simplifies conversion to big-endian
 347   architectures. */
 348struct netdev_desc {
 349        __le32 next_desc;
 350        __le32 status;
 351        struct desc_frag { __le32 addr, length; } frag[1];
 352};
 353
 354/* Bits in netdev_desc.status */
 355enum desc_status_bits {
 356        DescOwn=0x8000,
 357        DescEndPacket=0x4000,
 358        DescEndRing=0x2000,
 359        LastFrag=0x80000000,
 360        DescIntrOnTx=0x8000,
 361        DescIntrOnDMADone=0x80000000,
 362        DisableAlign = 0x00000001,
 363};
 364
 365#define PRIV_ALIGN      15      /* Required alignment mask */
 366/* Use  __attribute__((aligned (L1_CACHE_BYTES)))  to maintain alignment
 367   within the structure. */
 368#define MII_CNT         4
 369struct netdev_private {
 370        /* Descriptor rings first for alignment. */
 371        struct netdev_desc *rx_ring;
 372        struct netdev_desc *tx_ring;
 373        struct sk_buff* rx_skbuff[RX_RING_SIZE];
 374        struct sk_buff* tx_skbuff[TX_RING_SIZE];
 375        dma_addr_t tx_ring_dma;
 376        dma_addr_t rx_ring_dma;
 377        struct timer_list timer;                /* Media monitoring timer. */
 378        /* ethtool extra stats */
 379        struct {
 380                u64 tx_multiple_collisions;
 381                u64 tx_single_collisions;
 382                u64 tx_late_collisions;
 383                u64 tx_deferred;
 384                u64 tx_deferred_excessive;
 385                u64 tx_aborted;
 386                u64 tx_bcasts;
 387                u64 rx_bcasts;
 388                u64 tx_mcasts;
 389                u64 rx_mcasts;
 390        } xstats;
 391        /* Frequently used values: keep some adjacent for cache effect. */
 392        spinlock_t lock;
 393        int msg_enable;
 394        int chip_id;
 395        unsigned int cur_rx, dirty_rx;          /* Producer/consumer ring indices */
 396        unsigned int rx_buf_sz;                 /* Based on MTU+slack. */
 397        struct netdev_desc *last_tx;            /* Last Tx descriptor used. */
 398        unsigned int cur_tx, dirty_tx;
 399        /* These values are keep track of the transceiver/media in use. */
 400        unsigned int flowctrl:1;
 401        unsigned int default_port:4;            /* Last dev->if_port value. */
 402        unsigned int an_enable:1;
 403        unsigned int speed;
 404        unsigned int wol_enabled:1;                     /* Wake on LAN enabled */
 405        struct tasklet_struct rx_tasklet;
 406        struct tasklet_struct tx_tasklet;
 407        int budget;
 408        int cur_task;
 409        /* Multicast and receive mode. */
 410        spinlock_t mcastlock;                   /* SMP lock multicast updates. */
 411        u16 mcast_filter[4];
 412        /* MII transceiver section. */
 413        struct mii_if_info mii_if;
 414        int mii_preamble_required;
 415        unsigned char phys[MII_CNT];            /* MII device addresses, only first one used. */
 416        struct pci_dev *pci_dev;
 417        void __iomem *base;
 418        spinlock_t statlock;
 419};
 420
 421/* The station address location in the EEPROM. */
 422#define EEPROM_SA_OFFSET        0x10
 423#define DEFAULT_INTR (IntrRxDMADone | IntrPCIErr | \
 424                        IntrDrvRqst | IntrTxDone | StatsMax | \
 425                        LinkChange)
 426
 427static int  change_mtu(struct net_device *dev, int new_mtu);
 428static int  eeprom_read(void __iomem *ioaddr, int location);
 429static int  mdio_read(struct net_device *dev, int phy_id, int location);
 430static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
 431static int  mdio_wait_link(struct net_device *dev, int wait);
 432static int  netdev_open(struct net_device *dev);
 433static void check_duplex(struct net_device *dev);
 434static void netdev_timer(unsigned long data);
 435static void tx_timeout(struct net_device *dev);
 436static void init_ring(struct net_device *dev);
 437static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev);
 438static int reset_tx (struct net_device *dev);
 439static irqreturn_t intr_handler(int irq, void *dev_instance);
 440static void rx_poll(unsigned long data);
 441static void tx_poll(unsigned long data);
 442static void refill_rx (struct net_device *dev);
 443static void netdev_error(struct net_device *dev, int intr_status);
 444static void netdev_error(struct net_device *dev, int intr_status);
 445static void set_rx_mode(struct net_device *dev);
 446static int __set_mac_addr(struct net_device *dev);
 447static int sundance_set_mac_addr(struct net_device *dev, void *data);
 448static struct net_device_stats *get_stats(struct net_device *dev);
 449static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
 450static int  netdev_close(struct net_device *dev);
 451static const struct ethtool_ops ethtool_ops;
 452
 453static void sundance_reset(struct net_device *dev, unsigned long reset_cmd)
 454{
 455        struct netdev_private *np = netdev_priv(dev);
 456        void __iomem *ioaddr = np->base + ASICCtrl;
 457        int countdown;
 458
 459        /* ST201 documentation states ASICCtrl is a 32bit register */
 460        iowrite32 (reset_cmd | ioread32 (ioaddr), ioaddr);
 461        /* ST201 documentation states reset can take up to 1 ms */
 462        countdown = 10 + 1;
 463        while (ioread32 (ioaddr) & (ResetBusy << 16)) {
 464                if (--countdown == 0) {
 465                        printk(KERN_WARNING "%s : reset not completed !!\n", dev->name);
 466                        break;
 467                }
 468                udelay(100);
 469        }
 470}
 471
 472#ifdef CONFIG_NET_POLL_CONTROLLER
 473static void sundance_poll_controller(struct net_device *dev)
 474{
 475        struct netdev_private *np = netdev_priv(dev);
 476
 477        disable_irq(np->pci_dev->irq);
 478        intr_handler(np->pci_dev->irq, dev);
 479        enable_irq(np->pci_dev->irq);
 480}
 481#endif
 482
 483static const struct net_device_ops netdev_ops = {
 484        .ndo_open               = netdev_open,
 485        .ndo_stop               = netdev_close,
 486        .ndo_start_xmit         = start_tx,
 487        .ndo_get_stats          = get_stats,
 488        .ndo_set_rx_mode        = set_rx_mode,
 489        .ndo_do_ioctl           = netdev_ioctl,
 490        .ndo_tx_timeout         = tx_timeout,
 491        .ndo_change_mtu         = change_mtu,
 492        .ndo_set_mac_address    = sundance_set_mac_addr,
 493        .ndo_validate_addr      = eth_validate_addr,
 494#ifdef CONFIG_NET_POLL_CONTROLLER
 495        .ndo_poll_controller    = sundance_poll_controller,
 496#endif
 497};
 498
 499static int sundance_probe1(struct pci_dev *pdev,
 500                           const struct pci_device_id *ent)
 501{
 502        struct net_device *dev;
 503        struct netdev_private *np;
 504        static int card_idx;
 505        int chip_idx = ent->driver_data;
 506        int irq;
 507        int i;
 508        void __iomem *ioaddr;
 509        u16 mii_ctl;
 510        void *ring_space;
 511        dma_addr_t ring_dma;
 512#ifdef USE_IO_OPS
 513        int bar = 0;
 514#else
 515        int bar = 1;
 516#endif
 517        int phy, phy_end, phy_idx = 0;
 518
 519/* when built into the kernel, we only print version if device is found */
 520#ifndef MODULE
 521        static int printed_version;
 522        if (!printed_version++)
 523                printk(version);
 524#endif
 525
 526        if (pci_enable_device(pdev))
 527                return -EIO;
 528        pci_set_master(pdev);
 529
 530        irq = pdev->irq;
 531
 532        dev = alloc_etherdev(sizeof(*np));
 533        if (!dev)
 534                return -ENOMEM;
 535        SET_NETDEV_DEV(dev, &pdev->dev);
 536
 537        if (pci_request_regions(pdev, DRV_NAME))
 538                goto err_out_netdev;
 539
 540        ioaddr = pci_iomap(pdev, bar, netdev_io_size);
 541        if (!ioaddr)
 542                goto err_out_res;
 543
 544        for (i = 0; i < 3; i++)
 545                ((__le16 *)dev->dev_addr)[i] =
 546                        cpu_to_le16(eeprom_read(ioaddr, i + EEPROM_SA_OFFSET));
 547
 548        np = netdev_priv(dev);
 549        np->base = ioaddr;
 550        np->pci_dev = pdev;
 551        np->chip_id = chip_idx;
 552        np->msg_enable = (1 << debug) - 1;
 553        spin_lock_init(&np->lock);
 554        spin_lock_init(&np->statlock);
 555        tasklet_init(&np->rx_tasklet, rx_poll, (unsigned long)dev);
 556        tasklet_init(&np->tx_tasklet, tx_poll, (unsigned long)dev);
 557
 558        ring_space = dma_alloc_coherent(&pdev->dev, TX_TOTAL_SIZE,
 559                        &ring_dma, GFP_KERNEL);
 560        if (!ring_space)
 561                goto err_out_cleardev;
 562        np->tx_ring = (struct netdev_desc *)ring_space;
 563        np->tx_ring_dma = ring_dma;
 564
 565        ring_space = dma_alloc_coherent(&pdev->dev, RX_TOTAL_SIZE,
 566                        &ring_dma, GFP_KERNEL);
 567        if (!ring_space)
 568                goto err_out_unmap_tx;
 569        np->rx_ring = (struct netdev_desc *)ring_space;
 570        np->rx_ring_dma = ring_dma;
 571
 572        np->mii_if.dev = dev;
 573        np->mii_if.mdio_read = mdio_read;
 574        np->mii_if.mdio_write = mdio_write;
 575        np->mii_if.phy_id_mask = 0x1f;
 576        np->mii_if.reg_num_mask = 0x1f;
 577
 578        /* The chip-specific entries in the device structure. */
 579        dev->netdev_ops = &netdev_ops;
 580        dev->ethtool_ops = &ethtool_ops;
 581        dev->watchdog_timeo = TX_TIMEOUT;
 582
 583        pci_set_drvdata(pdev, dev);
 584
 585        i = register_netdev(dev);
 586        if (i)
 587                goto err_out_unmap_rx;
 588
 589        printk(KERN_INFO "%s: %s at %p, %pM, IRQ %d.\n",
 590               dev->name, pci_id_tbl[chip_idx].name, ioaddr,
 591               dev->dev_addr, irq);
 592
 593        np->phys[0] = 1;                /* Default setting */
 594        np->mii_preamble_required++;
 595
 596        /*
 597         * It seems some phys doesn't deal well with address 0 being accessed
 598         * first
 599         */
 600        if (sundance_pci_tbl[np->chip_id].device == 0x0200) {
 601                phy = 0;
 602                phy_end = 31;
 603        } else {
 604                phy = 1;
 605                phy_end = 32;   /* wraps to zero, due to 'phy & 0x1f' */
 606        }
 607        for (; phy <= phy_end && phy_idx < MII_CNT; phy++) {
 608                int phyx = phy & 0x1f;
 609                int mii_status = mdio_read(dev, phyx, MII_BMSR);
 610                if (mii_status != 0xffff  &&  mii_status != 0x0000) {
 611                        np->phys[phy_idx++] = phyx;
 612                        np->mii_if.advertising = mdio_read(dev, phyx, MII_ADVERTISE);
 613                        if ((mii_status & 0x0040) == 0)
 614                                np->mii_preamble_required++;
 615                        printk(KERN_INFO "%s: MII PHY found at address %d, status "
 616                                   "0x%4.4x advertising %4.4x.\n",
 617                                   dev->name, phyx, mii_status, np->mii_if.advertising);
 618                }
 619        }
 620        np->mii_preamble_required--;
 621
 622        if (phy_idx == 0) {
 623                printk(KERN_INFO "%s: No MII transceiver found, aborting.  ASIC status %x\n",
 624                           dev->name, ioread32(ioaddr + ASICCtrl));
 625                goto err_out_unregister;
 626        }
 627
 628        np->mii_if.phy_id = np->phys[0];
 629
 630        /* Parse override configuration */
 631        np->an_enable = 1;
 632        if (card_idx < MAX_UNITS) {
 633                if (media[card_idx] != NULL) {
 634                        np->an_enable = 0;
 635                        if (strcmp (media[card_idx], "100mbps_fd") == 0 ||
 636                            strcmp (media[card_idx], "4") == 0) {
 637                                np->speed = 100;
 638                                np->mii_if.full_duplex = 1;
 639                        } else if (strcmp (media[card_idx], "100mbps_hd") == 0 ||
 640                                   strcmp (media[card_idx], "3") == 0) {
 641                                np->speed = 100;
 642                                np->mii_if.full_duplex = 0;
 643                        } else if (strcmp (media[card_idx], "10mbps_fd") == 0 ||
 644                                   strcmp (media[card_idx], "2") == 0) {
 645                                np->speed = 10;
 646                                np->mii_if.full_duplex = 1;
 647                        } else if (strcmp (media[card_idx], "10mbps_hd") == 0 ||
 648                                   strcmp (media[card_idx], "1") == 0) {
 649                                np->speed = 10;
 650                                np->mii_if.full_duplex = 0;
 651                        } else {
 652                                np->an_enable = 1;
 653                        }
 654                }
 655                if (flowctrl == 1)
 656                        np->flowctrl = 1;
 657        }
 658
 659        /* Fibre PHY? */
 660        if (ioread32 (ioaddr + ASICCtrl) & 0x80) {
 661                /* Default 100Mbps Full */
 662                if (np->an_enable) {
 663                        np->speed = 100;
 664                        np->mii_if.full_duplex = 1;
 665                        np->an_enable = 0;
 666                }
 667        }
 668        /* Reset PHY */
 669        mdio_write (dev, np->phys[0], MII_BMCR, BMCR_RESET);
 670        mdelay (300);
 671        /* If flow control enabled, we need to advertise it.*/
 672        if (np->flowctrl)
 673                mdio_write (dev, np->phys[0], MII_ADVERTISE, np->mii_if.advertising | 0x0400);
 674        mdio_write (dev, np->phys[0], MII_BMCR, BMCR_ANENABLE|BMCR_ANRESTART);
 675        /* Force media type */
 676        if (!np->an_enable) {
 677                mii_ctl = 0;
 678                mii_ctl |= (np->speed == 100) ? BMCR_SPEED100 : 0;
 679                mii_ctl |= (np->mii_if.full_duplex) ? BMCR_FULLDPLX : 0;
 680                mdio_write (dev, np->phys[0], MII_BMCR, mii_ctl);
 681                printk (KERN_INFO "Override speed=%d, %s duplex\n",
 682                        np->speed, np->mii_if.full_duplex ? "Full" : "Half");
 683
 684        }
 685
 686        /* Perhaps move the reset here? */
 687        /* Reset the chip to erase previous misconfiguration. */
 688        if (netif_msg_hw(np))
 689                printk("ASIC Control is %x.\n", ioread32(ioaddr + ASICCtrl));
 690        sundance_reset(dev, 0x00ff << 16);
 691        if (netif_msg_hw(np))
 692                printk("ASIC Control is now %x.\n", ioread32(ioaddr + ASICCtrl));
 693
 694        card_idx++;
 695        return 0;
 696
 697err_out_unregister:
 698        unregister_netdev(dev);
 699err_out_unmap_rx:
 700        dma_free_coherent(&pdev->dev, RX_TOTAL_SIZE,
 701                np->rx_ring, np->rx_ring_dma);
 702err_out_unmap_tx:
 703        dma_free_coherent(&pdev->dev, TX_TOTAL_SIZE,
 704                np->tx_ring, np->tx_ring_dma);
 705err_out_cleardev:
 706        pci_iounmap(pdev, ioaddr);
 707err_out_res:
 708        pci_release_regions(pdev);
 709err_out_netdev:
 710        free_netdev (dev);
 711        return -ENODEV;
 712}
 713
 714static int change_mtu(struct net_device *dev, int new_mtu)
 715{
 716        if ((new_mtu < 68) || (new_mtu > 8191)) /* Set by RxDMAFrameLen */
 717                return -EINVAL;
 718        if (netif_running(dev))
 719                return -EBUSY;
 720        dev->mtu = new_mtu;
 721        return 0;
 722}
 723
 724#define eeprom_delay(ee_addr)   ioread32(ee_addr)
 725/* Read the EEPROM and MII Management Data I/O (MDIO) interfaces. */
 726static int eeprom_read(void __iomem *ioaddr, int location)
 727{
 728        int boguscnt = 10000;           /* Typical 1900 ticks. */
 729        iowrite16(0x0200 | (location & 0xff), ioaddr + EECtrl);
 730        do {
 731                eeprom_delay(ioaddr + EECtrl);
 732                if (! (ioread16(ioaddr + EECtrl) & 0x8000)) {
 733                        return ioread16(ioaddr + EEData);
 734                }
 735        } while (--boguscnt > 0);
 736        return 0;
 737}
 738
 739/*  MII transceiver control section.
 740        Read and write the MII registers using software-generated serial
 741        MDIO protocol.  See the MII specifications or DP83840A data sheet
 742        for details.
 743
 744        The maximum data clock rate is 2.5 Mhz.  The minimum timing is usually
 745        met by back-to-back 33Mhz PCI cycles. */
 746#define mdio_delay() ioread8(mdio_addr)
 747
 748enum mii_reg_bits {
 749        MDIO_ShiftClk=0x0001, MDIO_Data=0x0002, MDIO_EnbOutput=0x0004,
 750};
 751#define MDIO_EnbIn  (0)
 752#define MDIO_WRITE0 (MDIO_EnbOutput)
 753#define MDIO_WRITE1 (MDIO_Data | MDIO_EnbOutput)
 754
 755/* Generate the preamble required for initial synchronization and
 756   a few older transceivers. */
 757static void mdio_sync(void __iomem *mdio_addr)
 758{
 759        int bits = 32;
 760
 761        /* Establish sync by sending at least 32 logic ones. */
 762        while (--bits >= 0) {
 763                iowrite8(MDIO_WRITE1, mdio_addr);
 764                mdio_delay();
 765                iowrite8(MDIO_WRITE1 | MDIO_ShiftClk, mdio_addr);
 766                mdio_delay();
 767        }
 768}
 769
 770static int mdio_read(struct net_device *dev, int phy_id, int location)
 771{
 772        struct netdev_private *np = netdev_priv(dev);
 773        void __iomem *mdio_addr = np->base + MIICtrl;
 774        int mii_cmd = (0xf6 << 10) | (phy_id << 5) | location;
 775        int i, retval = 0;
 776
 777        if (np->mii_preamble_required)
 778                mdio_sync(mdio_addr);
 779
 780        /* Shift the read command bits out. */
 781        for (i = 15; i >= 0; i--) {
 782                int dataval = (mii_cmd & (1 << i)) ? MDIO_WRITE1 : MDIO_WRITE0;
 783
 784                iowrite8(dataval, mdio_addr);
 785                mdio_delay();
 786                iowrite8(dataval | MDIO_ShiftClk, mdio_addr);
 787                mdio_delay();
 788        }
 789        /* Read the two transition, 16 data, and wire-idle bits. */
 790        for (i = 19; i > 0; i--) {
 791                iowrite8(MDIO_EnbIn, mdio_addr);
 792                mdio_delay();
 793                retval = (retval << 1) | ((ioread8(mdio_addr) & MDIO_Data) ? 1 : 0);
 794                iowrite8(MDIO_EnbIn | MDIO_ShiftClk, mdio_addr);
 795                mdio_delay();
 796        }
 797        return (retval>>1) & 0xffff;
 798}
 799
 800static void mdio_write(struct net_device *dev, int phy_id, int location, int value)
 801{
 802        struct netdev_private *np = netdev_priv(dev);
 803        void __iomem *mdio_addr = np->base + MIICtrl;
 804        int mii_cmd = (0x5002 << 16) | (phy_id << 23) | (location<<18) | value;
 805        int i;
 806
 807        if (np->mii_preamble_required)
 808                mdio_sync(mdio_addr);
 809
 810        /* Shift the command bits out. */
 811        for (i = 31; i >= 0; i--) {
 812                int dataval = (mii_cmd & (1 << i)) ? MDIO_WRITE1 : MDIO_WRITE0;
 813
 814                iowrite8(dataval, mdio_addr);
 815                mdio_delay();
 816                iowrite8(dataval | MDIO_ShiftClk, mdio_addr);
 817                mdio_delay();
 818        }
 819        /* Clear out extra bits. */
 820        for (i = 2; i > 0; i--) {
 821                iowrite8(MDIO_EnbIn, mdio_addr);
 822                mdio_delay();
 823                iowrite8(MDIO_EnbIn | MDIO_ShiftClk, mdio_addr);
 824                mdio_delay();
 825        }
 826}
 827
 828static int mdio_wait_link(struct net_device *dev, int wait)
 829{
 830        int bmsr;
 831        int phy_id;
 832        struct netdev_private *np;
 833
 834        np = netdev_priv(dev);
 835        phy_id = np->phys[0];
 836
 837        do {
 838                bmsr = mdio_read(dev, phy_id, MII_BMSR);
 839                if (bmsr & 0x0004)
 840                        return 0;
 841                mdelay(1);
 842        } while (--wait > 0);
 843        return -1;
 844}
 845
 846static int netdev_open(struct net_device *dev)
 847{
 848        struct netdev_private *np = netdev_priv(dev);
 849        void __iomem *ioaddr = np->base;
 850        const int irq = np->pci_dev->irq;
 851        unsigned long flags;
 852        int i;
 853
 854        sundance_reset(dev, 0x00ff << 16);
 855
 856        i = request_irq(irq, intr_handler, IRQF_SHARED, dev->name, dev);
 857        if (i)
 858                return i;
 859
 860        if (netif_msg_ifup(np))
 861                printk(KERN_DEBUG "%s: netdev_open() irq %d\n", dev->name, irq);
 862
 863        init_ring(dev);
 864
 865        iowrite32(np->rx_ring_dma, ioaddr + RxListPtr);
 866        /* The Tx list pointer is written as packets are queued. */
 867
 868        /* Initialize other registers. */
 869        __set_mac_addr(dev);
 870#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
 871        iowrite16(dev->mtu + 18, ioaddr + MaxFrameSize);
 872#else
 873        iowrite16(dev->mtu + 14, ioaddr + MaxFrameSize);
 874#endif
 875        if (dev->mtu > 2047)
 876                iowrite32(ioread32(ioaddr + ASICCtrl) | 0x0C, ioaddr + ASICCtrl);
 877
 878        /* Configure the PCI bus bursts and FIFO thresholds. */
 879
 880        if (dev->if_port == 0)
 881                dev->if_port = np->default_port;
 882
 883        spin_lock_init(&np->mcastlock);
 884
 885        set_rx_mode(dev);
 886        iowrite16(0, ioaddr + IntrEnable);
 887        iowrite16(0, ioaddr + DownCounter);
 888        /* Set the chip to poll every N*320nsec. */
 889        iowrite8(100, ioaddr + RxDMAPollPeriod);
 890        iowrite8(127, ioaddr + TxDMAPollPeriod);
 891        /* Fix DFE-580TX packet drop issue */
 892        if (np->pci_dev->revision >= 0x14)
 893                iowrite8(0x01, ioaddr + DebugCtrl1);
 894        netif_start_queue(dev);
 895
 896        spin_lock_irqsave(&np->lock, flags);
 897        reset_tx(dev);
 898        spin_unlock_irqrestore(&np->lock, flags);
 899
 900        iowrite16 (StatsEnable | RxEnable | TxEnable, ioaddr + MACCtrl1);
 901
 902        /* Disable Wol */
 903        iowrite8(ioread8(ioaddr + WakeEvent) | 0x00, ioaddr + WakeEvent);
 904        np->wol_enabled = 0;
 905
 906        if (netif_msg_ifup(np))
 907                printk(KERN_DEBUG "%s: Done netdev_open(), status: Rx %x Tx %x "
 908                           "MAC Control %x, %4.4x %4.4x.\n",
 909                           dev->name, ioread32(ioaddr + RxStatus), ioread8(ioaddr + TxStatus),
 910                           ioread32(ioaddr + MACCtrl0),
 911                           ioread16(ioaddr + MACCtrl1), ioread16(ioaddr + MACCtrl0));
 912
 913        /* Set the timer to check for link beat. */
 914        init_timer(&np->timer);
 915        np->timer.expires = jiffies + 3*HZ;
 916        np->timer.data = (unsigned long)dev;
 917        np->timer.function = netdev_timer;                              /* timer handler */
 918        add_timer(&np->timer);
 919
 920        /* Enable interrupts by setting the interrupt mask. */
 921        iowrite16(DEFAULT_INTR, ioaddr + IntrEnable);
 922
 923        return 0;
 924}
 925
 926static void check_duplex(struct net_device *dev)
 927{
 928        struct netdev_private *np = netdev_priv(dev);
 929        void __iomem *ioaddr = np->base;
 930        int mii_lpa = mdio_read(dev, np->phys[0], MII_LPA);
 931        int negotiated = mii_lpa & np->mii_if.advertising;
 932        int duplex;
 933
 934        /* Force media */
 935        if (!np->an_enable || mii_lpa == 0xffff) {
 936                if (np->mii_if.full_duplex)
 937                        iowrite16 (ioread16 (ioaddr + MACCtrl0) | EnbFullDuplex,
 938                                ioaddr + MACCtrl0);
 939                return;
 940        }
 941
 942        /* Autonegotiation */
 943        duplex = (negotiated & 0x0100) || (negotiated & 0x01C0) == 0x0040;
 944        if (np->mii_if.full_duplex != duplex) {
 945                np->mii_if.full_duplex = duplex;
 946                if (netif_msg_link(np))
 947                        printk(KERN_INFO "%s: Setting %s-duplex based on MII #%d "
 948                                   "negotiated capability %4.4x.\n", dev->name,
 949                                   duplex ? "full" : "half", np->phys[0], negotiated);
 950                iowrite16(ioread16(ioaddr + MACCtrl0) | (duplex ? 0x20 : 0), ioaddr + MACCtrl0);
 951        }
 952}
 953
 954static void netdev_timer(unsigned long data)
 955{
 956        struct net_device *dev = (struct net_device *)data;
 957        struct netdev_private *np = netdev_priv(dev);
 958        void __iomem *ioaddr = np->base;
 959        int next_tick = 10*HZ;
 960
 961        if (netif_msg_timer(np)) {
 962                printk(KERN_DEBUG "%s: Media selection timer tick, intr status %4.4x, "
 963                           "Tx %x Rx %x.\n",
 964                           dev->name, ioread16(ioaddr + IntrEnable),
 965                           ioread8(ioaddr + TxStatus), ioread32(ioaddr + RxStatus));
 966        }
 967        check_duplex(dev);
 968        np->timer.expires = jiffies + next_tick;
 969        add_timer(&np->timer);
 970}
 971
 972static void tx_timeout(struct net_device *dev)
 973{
 974        struct netdev_private *np = netdev_priv(dev);
 975        void __iomem *ioaddr = np->base;
 976        unsigned long flag;
 977
 978        netif_stop_queue(dev);
 979        tasklet_disable(&np->tx_tasklet);
 980        iowrite16(0, ioaddr + IntrEnable);
 981        printk(KERN_WARNING "%s: Transmit timed out, TxStatus %2.2x "
 982                   "TxFrameId %2.2x,"
 983                   " resetting...\n", dev->name, ioread8(ioaddr + TxStatus),
 984                   ioread8(ioaddr + TxFrameId));
 985
 986        {
 987                int i;
 988                for (i=0; i<TX_RING_SIZE; i++) {
 989                        printk(KERN_DEBUG "%02x %08llx %08x %08x(%02x) %08x %08x\n", i,
 990                                (unsigned long long)(np->tx_ring_dma + i*sizeof(*np->tx_ring)),
 991                                le32_to_cpu(np->tx_ring[i].next_desc),
 992                                le32_to_cpu(np->tx_ring[i].status),
 993                                (le32_to_cpu(np->tx_ring[i].status) >> 2) & 0xff,
 994                                le32_to_cpu(np->tx_ring[i].frag[0].addr),
 995                                le32_to_cpu(np->tx_ring[i].frag[0].length));
 996                }
 997                printk(KERN_DEBUG "TxListPtr=%08x netif_queue_stopped=%d\n",
 998                        ioread32(np->base + TxListPtr),
 999                        netif_queue_stopped(dev));
1000                printk(KERN_DEBUG "cur_tx=%d(%02x) dirty_tx=%d(%02x)\n",
1001                        np->cur_tx, np->cur_tx % TX_RING_SIZE,
1002                        np->dirty_tx, np->dirty_tx % TX_RING_SIZE);
1003                printk(KERN_DEBUG "cur_rx=%d dirty_rx=%d\n", np->cur_rx, np->dirty_rx);
1004                printk(KERN_DEBUG "cur_task=%d\n", np->cur_task);
1005        }
1006        spin_lock_irqsave(&np->lock, flag);
1007
1008        /* Stop and restart the chip's Tx processes . */
1009        reset_tx(dev);
1010        spin_unlock_irqrestore(&np->lock, flag);
1011
1012        dev->if_port = 0;
1013
1014        dev->trans_start = jiffies; /* prevent tx timeout */
1015        dev->stats.tx_errors++;
1016        if (np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 4) {
1017                netif_wake_queue(dev);
1018        }
1019        iowrite16(DEFAULT_INTR, ioaddr + IntrEnable);
1020        tasklet_enable(&np->tx_tasklet);
1021}
1022
1023
1024/* Initialize the Rx and Tx rings, along with various 'dev' bits. */
1025static void init_ring(struct net_device *dev)
1026{
1027        struct netdev_private *np = netdev_priv(dev);
1028        int i;
1029
1030        np->cur_rx = np->cur_tx = 0;
1031        np->dirty_rx = np->dirty_tx = 0;
1032        np->cur_task = 0;
1033
1034        np->rx_buf_sz = (dev->mtu <= 1520 ? PKT_BUF_SZ : dev->mtu + 16);
1035
1036        /* Initialize all Rx descriptors. */
1037        for (i = 0; i < RX_RING_SIZE; i++) {
1038                np->rx_ring[i].next_desc = cpu_to_le32(np->rx_ring_dma +
1039                        ((i+1)%RX_RING_SIZE)*sizeof(*np->rx_ring));
1040                np->rx_ring[i].status = 0;
1041                np->rx_ring[i].frag[0].length = 0;
1042                np->rx_skbuff[i] = NULL;
1043        }
1044
1045        /* Fill in the Rx buffers.  Handle allocation failure gracefully. */
1046        for (i = 0; i < RX_RING_SIZE; i++) {
1047                struct sk_buff *skb =
1048                        netdev_alloc_skb(dev, np->rx_buf_sz + 2);
1049                np->rx_skbuff[i] = skb;
1050                if (skb == NULL)
1051                        break;
1052                skb_reserve(skb, 2);    /* 16 byte align the IP header. */
1053                np->rx_ring[i].frag[0].addr = cpu_to_le32(
1054                        dma_map_single(&np->pci_dev->dev, skb->data,
1055                                np->rx_buf_sz, DMA_FROM_DEVICE));
1056                if (dma_mapping_error(&np->pci_dev->dev,
1057                                        np->rx_ring[i].frag[0].addr)) {
1058                        dev_kfree_skb(skb);
1059                        np->rx_skbuff[i] = NULL;
1060                        break;
1061                }
1062                np->rx_ring[i].frag[0].length = cpu_to_le32(np->rx_buf_sz | LastFrag);
1063        }
1064        np->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
1065
1066        for (i = 0; i < TX_RING_SIZE; i++) {
1067                np->tx_skbuff[i] = NULL;
1068                np->tx_ring[i].status = 0;
1069        }
1070}
1071
1072static void tx_poll (unsigned long data)
1073{
1074        struct net_device *dev = (struct net_device *)data;
1075        struct netdev_private *np = netdev_priv(dev);
1076        unsigned head = np->cur_task % TX_RING_SIZE;
1077        struct netdev_desc *txdesc =
1078                &np->tx_ring[(np->cur_tx - 1) % TX_RING_SIZE];
1079
1080        /* Chain the next pointer */
1081        for (; np->cur_tx - np->cur_task > 0; np->cur_task++) {
1082                int entry = np->cur_task % TX_RING_SIZE;
1083                txdesc = &np->tx_ring[entry];
1084                if (np->last_tx) {
1085                        np->last_tx->next_desc = cpu_to_le32(np->tx_ring_dma +
1086                                entry*sizeof(struct netdev_desc));
1087                }
1088                np->last_tx = txdesc;
1089        }
1090        /* Indicate the latest descriptor of tx ring */
1091        txdesc->status |= cpu_to_le32(DescIntrOnTx);
1092
1093        if (ioread32 (np->base + TxListPtr) == 0)
1094                iowrite32 (np->tx_ring_dma + head * sizeof(struct netdev_desc),
1095                        np->base + TxListPtr);
1096}
1097
1098static netdev_tx_t
1099start_tx (struct sk_buff *skb, struct net_device *dev)
1100{
1101        struct netdev_private *np = netdev_priv(dev);
1102        struct netdev_desc *txdesc;
1103        unsigned entry;
1104
1105        /* Calculate the next Tx descriptor entry. */
1106        entry = np->cur_tx % TX_RING_SIZE;
1107        np->tx_skbuff[entry] = skb;
1108        txdesc = &np->tx_ring[entry];
1109
1110        txdesc->next_desc = 0;
1111        txdesc->status = cpu_to_le32 ((entry << 2) | DisableAlign);
1112        txdesc->frag[0].addr = cpu_to_le32(dma_map_single(&np->pci_dev->dev,
1113                                skb->data, skb->len, DMA_TO_DEVICE));
1114        if (dma_mapping_error(&np->pci_dev->dev,
1115                                txdesc->frag[0].addr))
1116                        goto drop_frame;
1117        txdesc->frag[0].length = cpu_to_le32 (skb->len | LastFrag);
1118
1119        /* Increment cur_tx before tasklet_schedule() */
1120        np->cur_tx++;
1121        mb();
1122        /* Schedule a tx_poll() task */
1123        tasklet_schedule(&np->tx_tasklet);
1124
1125        /* On some architectures: explicitly flush cache lines here. */
1126        if (np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 1 &&
1127            !netif_queue_stopped(dev)) {
1128                /* do nothing */
1129        } else {
1130                netif_stop_queue (dev);
1131        }
1132        if (netif_msg_tx_queued(np)) {
1133                printk (KERN_DEBUG
1134                        "%s: Transmit frame #%d queued in slot %d.\n",
1135                        dev->name, np->cur_tx, entry);
1136        }
1137        return NETDEV_TX_OK;
1138
1139drop_frame:
1140        dev_kfree_skb_any(skb);
1141        np->tx_skbuff[entry] = NULL;
1142        dev->stats.tx_dropped++;
1143        return NETDEV_TX_OK;
1144}
1145
1146/* Reset hardware tx and free all of tx buffers */
1147static int
1148reset_tx (struct net_device *dev)
1149{
1150        struct netdev_private *np = netdev_priv(dev);
1151        void __iomem *ioaddr = np->base;
1152        struct sk_buff *skb;
1153        int i;
1154
1155        /* Reset tx logic, TxListPtr will be cleaned */
1156        iowrite16 (TxDisable, ioaddr + MACCtrl1);
1157        sundance_reset(dev, (NetworkReset|FIFOReset|DMAReset|TxReset) << 16);
1158
1159        /* free all tx skbuff */
1160        for (i = 0; i < TX_RING_SIZE; i++) {
1161                np->tx_ring[i].next_desc = 0;
1162
1163                skb = np->tx_skbuff[i];
1164                if (skb) {
1165                        dma_unmap_single(&np->pci_dev->dev,
1166                                le32_to_cpu(np->tx_ring[i].frag[0].addr),
1167                                skb->len, DMA_TO_DEVICE);
1168                        dev_kfree_skb_any(skb);
1169                        np->tx_skbuff[i] = NULL;
1170                        dev->stats.tx_dropped++;
1171                }
1172        }
1173        np->cur_tx = np->dirty_tx = 0;
1174        np->cur_task = 0;
1175
1176        np->last_tx = NULL;
1177        iowrite8(127, ioaddr + TxDMAPollPeriod);
1178
1179        iowrite16 (StatsEnable | RxEnable | TxEnable, ioaddr + MACCtrl1);
1180        return 0;
1181}
1182
1183/* The interrupt handler cleans up after the Tx thread,
1184   and schedule a Rx thread work */
1185static irqreturn_t intr_handler(int irq, void *dev_instance)
1186{
1187        struct net_device *dev = (struct net_device *)dev_instance;
1188        struct netdev_private *np = netdev_priv(dev);
1189        void __iomem *ioaddr = np->base;
1190        int hw_frame_id;
1191        int tx_cnt;
1192        int tx_status;
1193        int handled = 0;
1194        int i;
1195
1196
1197        do {
1198                int intr_status = ioread16(ioaddr + IntrStatus);
1199                iowrite16(intr_status, ioaddr + IntrStatus);
1200
1201                if (netif_msg_intr(np))
1202                        printk(KERN_DEBUG "%s: Interrupt, status %4.4x.\n",
1203                                   dev->name, intr_status);
1204
1205                if (!(intr_status & DEFAULT_INTR))
1206                        break;
1207
1208                handled = 1;
1209
1210                if (intr_status & (IntrRxDMADone)) {
1211                        iowrite16(DEFAULT_INTR & ~(IntrRxDone|IntrRxDMADone),
1212                                        ioaddr + IntrEnable);
1213                        if (np->budget < 0)
1214                                np->budget = RX_BUDGET;
1215                        tasklet_schedule(&np->rx_tasklet);
1216                }
1217                if (intr_status & (IntrTxDone | IntrDrvRqst)) {
1218                        tx_status = ioread16 (ioaddr + TxStatus);
1219                        for (tx_cnt=32; tx_status & 0x80; --tx_cnt) {
1220                                if (netif_msg_tx_done(np))
1221                                        printk
1222                                            ("%s: Transmit status is %2.2x.\n",
1223                                        dev->name, tx_status);
1224                                if (tx_status & 0x1e) {
1225                                        if (netif_msg_tx_err(np))
1226                                                printk("%s: Transmit error status %4.4x.\n",
1227                                                           dev->name, tx_status);
1228                                        dev->stats.tx_errors++;
1229                                        if (tx_status & 0x10)
1230                                                dev->stats.tx_fifo_errors++;
1231                                        if (tx_status & 0x08)
1232                                                dev->stats.collisions++;
1233                                        if (tx_status & 0x04)
1234                                                dev->stats.tx_fifo_errors++;
1235                                        if (tx_status & 0x02)
1236                                                dev->stats.tx_window_errors++;
1237
1238                                        /*
1239                                        ** This reset has been verified on
1240                                        ** DFE-580TX boards ! phdm@macqel.be.
1241                                        */
1242                                        if (tx_status & 0x10) { /* TxUnderrun */
1243                                                /* Restart Tx FIFO and transmitter */
1244                                                sundance_reset(dev, (NetworkReset|FIFOReset|TxReset) << 16);
1245                                                /* No need to reset the Tx pointer here */
1246                                        }
1247                                        /* Restart the Tx. Need to make sure tx enabled */
1248                                        i = 10;
1249                                        do {
1250                                                iowrite16(ioread16(ioaddr + MACCtrl1) | TxEnable, ioaddr + MACCtrl1);
1251                                                if (ioread16(ioaddr + MACCtrl1) & TxEnabled)
1252                                                        break;
1253                                                mdelay(1);
1254                                        } while (--i);
1255                                }
1256                                /* Yup, this is a documentation bug.  It cost me *hours*. */
1257                                iowrite16 (0, ioaddr + TxStatus);
1258                                if (tx_cnt < 0) {
1259                                        iowrite32(5000, ioaddr + DownCounter);
1260                                        break;
1261                                }
1262                                tx_status = ioread16 (ioaddr + TxStatus);
1263                        }
1264                        hw_frame_id = (tx_status >> 8) & 0xff;
1265                } else  {
1266                        hw_frame_id = ioread8(ioaddr + TxFrameId);
1267                }
1268
1269                if (np->pci_dev->revision >= 0x14) {
1270                        spin_lock(&np->lock);
1271                        for (; np->cur_tx - np->dirty_tx > 0; np->dirty_tx++) {
1272                                int entry = np->dirty_tx % TX_RING_SIZE;
1273                                struct sk_buff *skb;
1274                                int sw_frame_id;
1275                                sw_frame_id = (le32_to_cpu(
1276                                        np->tx_ring[entry].status) >> 2) & 0xff;
1277                                if (sw_frame_id == hw_frame_id &&
1278                                        !(le32_to_cpu(np->tx_ring[entry].status)
1279                                        & 0x00010000))
1280                                                break;
1281                                if (sw_frame_id == (hw_frame_id + 1) %
1282                                        TX_RING_SIZE)
1283                                                break;
1284                                skb = np->tx_skbuff[entry];
1285                                /* Free the original skb. */
1286                                dma_unmap_single(&np->pci_dev->dev,
1287                                        le32_to_cpu(np->tx_ring[entry].frag[0].addr),
1288                                        skb->len, DMA_TO_DEVICE);
1289                                dev_kfree_skb_irq (np->tx_skbuff[entry]);
1290                                np->tx_skbuff[entry] = NULL;
1291                                np->tx_ring[entry].frag[0].addr = 0;
1292                                np->tx_ring[entry].frag[0].length = 0;
1293                        }
1294                        spin_unlock(&np->lock);
1295                } else {
1296                        spin_lock(&np->lock);
1297                        for (; np->cur_tx - np->dirty_tx > 0; np->dirty_tx++) {
1298                                int entry = np->dirty_tx % TX_RING_SIZE;
1299                                struct sk_buff *skb;
1300                                if (!(le32_to_cpu(np->tx_ring[entry].status)
1301                                                        & 0x00010000))
1302                                        break;
1303                                skb = np->tx_skbuff[entry];
1304                                /* Free the original skb. */
1305                                dma_unmap_single(&np->pci_dev->dev,
1306                                        le32_to_cpu(np->tx_ring[entry].frag[0].addr),
1307                                        skb->len, DMA_TO_DEVICE);
1308                                dev_kfree_skb_irq (np->tx_skbuff[entry]);
1309                                np->tx_skbuff[entry] = NULL;
1310                                np->tx_ring[entry].frag[0].addr = 0;
1311                                np->tx_ring[entry].frag[0].length = 0;
1312                        }
1313                        spin_unlock(&np->lock);
1314                }
1315
1316                if (netif_queue_stopped(dev) &&
1317                        np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 4) {
1318                        /* The ring is no longer full, clear busy flag. */
1319                        netif_wake_queue (dev);
1320                }
1321                /* Abnormal error summary/uncommon events handlers. */
1322                if (intr_status & (IntrPCIErr | LinkChange | StatsMax))
1323                        netdev_error(dev, intr_status);
1324        } while (0);
1325        if (netif_msg_intr(np))
1326                printk(KERN_DEBUG "%s: exiting interrupt, status=%#4.4x.\n",
1327                           dev->name, ioread16(ioaddr + IntrStatus));
1328        return IRQ_RETVAL(handled);
1329}
1330
1331static void rx_poll(unsigned long data)
1332{
1333        struct net_device *dev = (struct net_device *)data;
1334        struct netdev_private *np = netdev_priv(dev);
1335        int entry = np->cur_rx % RX_RING_SIZE;
1336        int boguscnt = np->budget;
1337        void __iomem *ioaddr = np->base;
1338        int received = 0;
1339
1340        /* If EOP is set on the next entry, it's a new packet. Send it up. */
1341        while (1) {
1342                struct netdev_desc *desc = &(np->rx_ring[entry]);
1343                u32 frame_status = le32_to_cpu(desc->status);
1344                int pkt_len;
1345
1346                if (--boguscnt < 0) {
1347                        goto not_done;
1348                }
1349                if (!(frame_status & DescOwn))
1350                        break;
1351                pkt_len = frame_status & 0x1fff;        /* Chip omits the CRC. */
1352                if (netif_msg_rx_status(np))
1353                        printk(KERN_DEBUG "  netdev_rx() status was %8.8x.\n",
1354                                   frame_status);
1355                if (frame_status & 0x001f4000) {
1356                        /* There was a error. */
1357                        if (netif_msg_rx_err(np))
1358                                printk(KERN_DEBUG "  netdev_rx() Rx error was %8.8x.\n",
1359                                           frame_status);
1360                        dev->stats.rx_errors++;
1361                        if (frame_status & 0x00100000)
1362                                dev->stats.rx_length_errors++;
1363                        if (frame_status & 0x00010000)
1364                                dev->stats.rx_fifo_errors++;
1365                        if (frame_status & 0x00060000)
1366                                dev->stats.rx_frame_errors++;
1367                        if (frame_status & 0x00080000)
1368                                dev->stats.rx_crc_errors++;
1369                        if (frame_status & 0x00100000) {
1370                                printk(KERN_WARNING "%s: Oversized Ethernet frame,"
1371                                           " status %8.8x.\n",
1372                                           dev->name, frame_status);
1373                        }
1374                } else {
1375                        struct sk_buff *skb;
1376#ifndef final_version
1377                        if (netif_msg_rx_status(np))
1378                                printk(KERN_DEBUG "  netdev_rx() normal Rx pkt length %d"
1379                                           ", bogus_cnt %d.\n",
1380                                           pkt_len, boguscnt);
1381#endif
1382                        /* Check if the packet is long enough to accept without copying
1383                           to a minimally-sized skbuff. */
1384                        if (pkt_len < rx_copybreak &&
1385                            (skb = netdev_alloc_skb(dev, pkt_len + 2)) != NULL) {
1386                                skb_reserve(skb, 2);    /* 16 byte align the IP header */
1387                                dma_sync_single_for_cpu(&np->pci_dev->dev,
1388                                                le32_to_cpu(desc->frag[0].addr),
1389                                                np->rx_buf_sz, DMA_FROM_DEVICE);
1390                                skb_copy_to_linear_data(skb, np->rx_skbuff[entry]->data, pkt_len);
1391                                dma_sync_single_for_device(&np->pci_dev->dev,
1392                                                le32_to_cpu(desc->frag[0].addr),
1393                                                np->rx_buf_sz, DMA_FROM_DEVICE);
1394                                skb_put(skb, pkt_len);
1395                        } else {
1396                                dma_unmap_single(&np->pci_dev->dev,
1397                                        le32_to_cpu(desc->frag[0].addr),
1398                                        np->rx_buf_sz, DMA_FROM_DEVICE);
1399                                skb_put(skb = np->rx_skbuff[entry], pkt_len);
1400                                np->rx_skbuff[entry] = NULL;
1401                        }
1402                        skb->protocol = eth_type_trans(skb, dev);
1403                        /* Note: checksum -> skb->ip_summed = CHECKSUM_UNNECESSARY; */
1404                        netif_rx(skb);
1405                }
1406                entry = (entry + 1) % RX_RING_SIZE;
1407                received++;
1408        }
1409        np->cur_rx = entry;
1410        refill_rx (dev);
1411        np->budget -= received;
1412        iowrite16(DEFAULT_INTR, ioaddr + IntrEnable);
1413        return;
1414
1415not_done:
1416        np->cur_rx = entry;
1417        refill_rx (dev);
1418        if (!received)
1419                received = 1;
1420        np->budget -= received;
1421        if (np->budget <= 0)
1422                np->budget = RX_BUDGET;
1423        tasklet_schedule(&np->rx_tasklet);
1424}
1425
1426static void refill_rx (struct net_device *dev)
1427{
1428        struct netdev_private *np = netdev_priv(dev);
1429        int entry;
1430        int cnt = 0;
1431
1432        /* Refill the Rx ring buffers. */
1433        for (;(np->cur_rx - np->dirty_rx + RX_RING_SIZE) % RX_RING_SIZE > 0;
1434                np->dirty_rx = (np->dirty_rx + 1) % RX_RING_SIZE) {
1435                struct sk_buff *skb;
1436                entry = np->dirty_rx % RX_RING_SIZE;
1437                if (np->rx_skbuff[entry] == NULL) {
1438                        skb = netdev_alloc_skb(dev, np->rx_buf_sz + 2);
1439                        np->rx_skbuff[entry] = skb;
1440                        if (skb == NULL)
1441                                break;          /* Better luck next round. */
1442                        skb_reserve(skb, 2);    /* Align IP on 16 byte boundaries */
1443                        np->rx_ring[entry].frag[0].addr = cpu_to_le32(
1444                                dma_map_single(&np->pci_dev->dev, skb->data,
1445                                        np->rx_buf_sz, DMA_FROM_DEVICE));
1446                        if (dma_mapping_error(&np->pci_dev->dev,
1447                                    np->rx_ring[entry].frag[0].addr)) {
1448                            dev_kfree_skb_irq(skb);
1449                            np->rx_skbuff[entry] = NULL;
1450                            break;
1451                        }
1452                }
1453                /* Perhaps we need not reset this field. */
1454                np->rx_ring[entry].frag[0].length =
1455                        cpu_to_le32(np->rx_buf_sz | LastFrag);
1456                np->rx_ring[entry].status = 0;
1457                cnt++;
1458        }
1459}
1460static void netdev_error(struct net_device *dev, int intr_status)
1461{
1462        struct netdev_private *np = netdev_priv(dev);
1463        void __iomem *ioaddr = np->base;
1464        u16 mii_ctl, mii_advertise, mii_lpa;
1465        int speed;
1466
1467        if (intr_status & LinkChange) {
1468                if (mdio_wait_link(dev, 10) == 0) {
1469                        printk(KERN_INFO "%s: Link up\n", dev->name);
1470                        if (np->an_enable) {
1471                                mii_advertise = mdio_read(dev, np->phys[0],
1472                                                           MII_ADVERTISE);
1473                                mii_lpa = mdio_read(dev, np->phys[0], MII_LPA);
1474                                mii_advertise &= mii_lpa;
1475                                printk(KERN_INFO "%s: Link changed: ",
1476                                        dev->name);
1477                                if (mii_advertise & ADVERTISE_100FULL) {
1478                                        np->speed = 100;
1479                                        printk("100Mbps, full duplex\n");
1480                                } else if (mii_advertise & ADVERTISE_100HALF) {
1481                                        np->speed = 100;
1482                                        printk("100Mbps, half duplex\n");
1483                                } else if (mii_advertise & ADVERTISE_10FULL) {
1484                                        np->speed = 10;
1485                                        printk("10Mbps, full duplex\n");
1486                                } else if (mii_advertise & ADVERTISE_10HALF) {
1487                                        np->speed = 10;
1488                                        printk("10Mbps, half duplex\n");
1489                                } else
1490                                        printk("\n");
1491
1492                        } else {
1493                                mii_ctl = mdio_read(dev, np->phys[0], MII_BMCR);
1494                                speed = (mii_ctl & BMCR_SPEED100) ? 100 : 10;
1495                                np->speed = speed;
1496                                printk(KERN_INFO "%s: Link changed: %dMbps ,",
1497                                        dev->name, speed);
1498                                printk("%s duplex.\n",
1499                                        (mii_ctl & BMCR_FULLDPLX) ?
1500                                                "full" : "half");
1501                        }
1502                        check_duplex(dev);
1503                        if (np->flowctrl && np->mii_if.full_duplex) {
1504                                iowrite16(ioread16(ioaddr + MulticastFilter1+2) | 0x0200,
1505                                        ioaddr + MulticastFilter1+2);
1506                                iowrite16(ioread16(ioaddr + MACCtrl0) | EnbFlowCtrl,
1507                                        ioaddr + MACCtrl0);
1508                        }
1509                        netif_carrier_on(dev);
1510                } else {
1511                        printk(KERN_INFO "%s: Link down\n", dev->name);
1512                        netif_carrier_off(dev);
1513                }
1514        }
1515        if (intr_status & StatsMax) {
1516                get_stats(dev);
1517        }
1518        if (intr_status & IntrPCIErr) {
1519                printk(KERN_ERR "%s: Something Wicked happened! %4.4x.\n",
1520                           dev->name, intr_status);
1521                /* We must do a global reset of DMA to continue. */
1522        }
1523}
1524
1525static struct net_device_stats *get_stats(struct net_device *dev)
1526{
1527        struct netdev_private *np = netdev_priv(dev);
1528        void __iomem *ioaddr = np->base;
1529        unsigned long flags;
1530        u8 late_coll, single_coll, mult_coll;
1531
1532        spin_lock_irqsave(&np->statlock, flags);
1533        /* The chip only need report frame silently dropped. */
1534        dev->stats.rx_missed_errors     += ioread8(ioaddr + RxMissed);
1535        dev->stats.tx_packets += ioread16(ioaddr + TxFramesOK);
1536        dev->stats.rx_packets += ioread16(ioaddr + RxFramesOK);
1537        dev->stats.tx_carrier_errors += ioread8(ioaddr + StatsCarrierError);
1538
1539        mult_coll = ioread8(ioaddr + StatsMultiColl);
1540        np->xstats.tx_multiple_collisions += mult_coll;
1541        single_coll = ioread8(ioaddr + StatsOneColl);
1542        np->xstats.tx_single_collisions += single_coll;
1543        late_coll = ioread8(ioaddr + StatsLateColl);
1544        np->xstats.tx_late_collisions += late_coll;
1545        dev->stats.collisions += mult_coll
1546                + single_coll
1547                + late_coll;
1548
1549        np->xstats.tx_deferred += ioread8(ioaddr + StatsTxDefer);
1550        np->xstats.tx_deferred_excessive += ioread8(ioaddr + StatsTxXSDefer);
1551        np->xstats.tx_aborted += ioread8(ioaddr + StatsTxAbort);
1552        np->xstats.tx_bcasts += ioread8(ioaddr + StatsBcastTx);
1553        np->xstats.rx_bcasts += ioread8(ioaddr + StatsBcastRx);
1554        np->xstats.tx_mcasts += ioread8(ioaddr + StatsMcastTx);
1555        np->xstats.rx_mcasts += ioread8(ioaddr + StatsMcastRx);
1556
1557        dev->stats.tx_bytes += ioread16(ioaddr + TxOctetsLow);
1558        dev->stats.tx_bytes += ioread16(ioaddr + TxOctetsHigh) << 16;
1559        dev->stats.rx_bytes += ioread16(ioaddr + RxOctetsLow);
1560        dev->stats.rx_bytes += ioread16(ioaddr + RxOctetsHigh) << 16;
1561
1562        spin_unlock_irqrestore(&np->statlock, flags);
1563
1564        return &dev->stats;
1565}
1566
1567static void set_rx_mode(struct net_device *dev)
1568{
1569        struct netdev_private *np = netdev_priv(dev);
1570        void __iomem *ioaddr = np->base;
1571        u16 mc_filter[4];                       /* Multicast hash filter */
1572        u32 rx_mode;
1573        int i;
1574
1575        if (dev->flags & IFF_PROMISC) {                 /* Set promiscuous. */
1576                memset(mc_filter, 0xff, sizeof(mc_filter));
1577                rx_mode = AcceptBroadcast | AcceptMulticast | AcceptAll | AcceptMyPhys;
1578        } else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
1579                   (dev->flags & IFF_ALLMULTI)) {
1580                /* Too many to match, or accept all multicasts. */
1581                memset(mc_filter, 0xff, sizeof(mc_filter));
1582                rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
1583        } else if (!netdev_mc_empty(dev)) {
1584                struct netdev_hw_addr *ha;
1585                int bit;
1586                int index;
1587                int crc;
1588                memset (mc_filter, 0, sizeof (mc_filter));
1589                netdev_for_each_mc_addr(ha, dev) {
1590                        crc = ether_crc_le(ETH_ALEN, ha->addr);
1591                        for (index=0, bit=0; bit < 6; bit++, crc <<= 1)
1592                                if (crc & 0x80000000) index |= 1 << bit;
1593                        mc_filter[index/16] |= (1 << (index % 16));
1594                }
1595                rx_mode = AcceptBroadcast | AcceptMultiHash | AcceptMyPhys;
1596        } else {
1597                iowrite8(AcceptBroadcast | AcceptMyPhys, ioaddr + RxMode);
1598                return;
1599        }
1600        if (np->mii_if.full_duplex && np->flowctrl)
1601                mc_filter[3] |= 0x0200;
1602
1603        for (i = 0; i < 4; i++)
1604                iowrite16(mc_filter[i], ioaddr + MulticastFilter0 + i*2);
1605        iowrite8(rx_mode, ioaddr + RxMode);
1606}
1607
1608static int __set_mac_addr(struct net_device *dev)
1609{
1610        struct netdev_private *np = netdev_priv(dev);
1611        u16 addr16;
1612
1613        addr16 = (dev->dev_addr[0] | (dev->dev_addr[1] << 8));
1614        iowrite16(addr16, np->base + StationAddr);
1615        addr16 = (dev->dev_addr[2] | (dev->dev_addr[3] << 8));
1616        iowrite16(addr16, np->base + StationAddr+2);
1617        addr16 = (dev->dev_addr[4] | (dev->dev_addr[5] << 8));
1618        iowrite16(addr16, np->base + StationAddr+4);
1619        return 0;
1620}
1621
1622/* Invoked with rtnl_lock held */
1623static int sundance_set_mac_addr(struct net_device *dev, void *data)
1624{
1625        const struct sockaddr *addr = data;
1626
1627        if (!is_valid_ether_addr(addr->sa_data))
1628                return -EADDRNOTAVAIL;
1629        memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
1630        __set_mac_addr(dev);
1631
1632        return 0;
1633}
1634
1635static const struct {
1636        const char name[ETH_GSTRING_LEN];
1637} sundance_stats[] = {
1638        { "tx_multiple_collisions" },
1639        { "tx_single_collisions" },
1640        { "tx_late_collisions" },
1641        { "tx_deferred" },
1642        { "tx_deferred_excessive" },
1643        { "tx_aborted" },
1644        { "tx_bcasts" },
1645        { "rx_bcasts" },
1646        { "tx_mcasts" },
1647        { "rx_mcasts" },
1648};
1649
1650static int check_if_running(struct net_device *dev)
1651{
1652        if (!netif_running(dev))
1653                return -EINVAL;
1654        return 0;
1655}
1656
1657static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1658{
1659        struct netdev_private *np = netdev_priv(dev);
1660        strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
1661        strlcpy(info->version, DRV_VERSION, sizeof(info->version));
1662        strlcpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
1663}
1664
1665static int get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
1666{
1667        struct netdev_private *np = netdev_priv(dev);
1668        spin_lock_irq(&np->lock);
1669        mii_ethtool_gset(&np->mii_if, ecmd);
1670        spin_unlock_irq(&np->lock);
1671        return 0;
1672}
1673
1674static int set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
1675{
1676        struct netdev_private *np = netdev_priv(dev);
1677        int res;
1678        spin_lock_irq(&np->lock);
1679        res = mii_ethtool_sset(&np->mii_if, ecmd);
1680        spin_unlock_irq(&np->lock);
1681        return res;
1682}
1683
1684static int nway_reset(struct net_device *dev)
1685{
1686        struct netdev_private *np = netdev_priv(dev);
1687        return mii_nway_restart(&np->mii_if);
1688}
1689
1690static u32 get_link(struct net_device *dev)
1691{
1692        struct netdev_private *np = netdev_priv(dev);
1693        return mii_link_ok(&np->mii_if);
1694}
1695
1696static u32 get_msglevel(struct net_device *dev)
1697{
1698        struct netdev_private *np = netdev_priv(dev);
1699        return np->msg_enable;
1700}
1701
1702static void set_msglevel(struct net_device *dev, u32 val)
1703{
1704        struct netdev_private *np = netdev_priv(dev);
1705        np->msg_enable = val;
1706}
1707
1708static void get_strings(struct net_device *dev, u32 stringset,
1709                u8 *data)
1710{
1711        if (stringset == ETH_SS_STATS)
1712                memcpy(data, sundance_stats, sizeof(sundance_stats));
1713}
1714
1715static int get_sset_count(struct net_device *dev, int sset)
1716{
1717        switch (sset) {
1718        case ETH_SS_STATS:
1719                return ARRAY_SIZE(sundance_stats);
1720        default:
1721                return -EOPNOTSUPP;
1722        }
1723}
1724
1725static void get_ethtool_stats(struct net_device *dev,
1726                struct ethtool_stats *stats, u64 *data)
1727{
1728        struct netdev_private *np = netdev_priv(dev);
1729        int i = 0;
1730
1731        get_stats(dev);
1732        data[i++] = np->xstats.tx_multiple_collisions;
1733        data[i++] = np->xstats.tx_single_collisions;
1734        data[i++] = np->xstats.tx_late_collisions;
1735        data[i++] = np->xstats.tx_deferred;
1736        data[i++] = np->xstats.tx_deferred_excessive;
1737        data[i++] = np->xstats.tx_aborted;
1738        data[i++] = np->xstats.tx_bcasts;
1739        data[i++] = np->xstats.rx_bcasts;
1740        data[i++] = np->xstats.tx_mcasts;
1741        data[i++] = np->xstats.rx_mcasts;
1742}
1743
1744#ifdef CONFIG_PM
1745
1746static void sundance_get_wol(struct net_device *dev,
1747                struct ethtool_wolinfo *wol)
1748{
1749        struct netdev_private *np = netdev_priv(dev);
1750        void __iomem *ioaddr = np->base;
1751        u8 wol_bits;
1752
1753        wol->wolopts = 0;
1754
1755        wol->supported = (WAKE_PHY | WAKE_MAGIC);
1756        if (!np->wol_enabled)
1757                return;
1758
1759        wol_bits = ioread8(ioaddr + WakeEvent);
1760        if (wol_bits & MagicPktEnable)
1761                wol->wolopts |= WAKE_MAGIC;
1762        if (wol_bits & LinkEventEnable)
1763                wol->wolopts |= WAKE_PHY;
1764}
1765
1766static int sundance_set_wol(struct net_device *dev,
1767        struct ethtool_wolinfo *wol)
1768{
1769        struct netdev_private *np = netdev_priv(dev);
1770        void __iomem *ioaddr = np->base;
1771        u8 wol_bits;
1772
1773        if (!device_can_wakeup(&np->pci_dev->dev))
1774                return -EOPNOTSUPP;
1775
1776        np->wol_enabled = !!(wol->wolopts);
1777        wol_bits = ioread8(ioaddr + WakeEvent);
1778        wol_bits &= ~(WakePktEnable | MagicPktEnable |
1779                        LinkEventEnable | WolEnable);
1780
1781        if (np->wol_enabled) {
1782                if (wol->wolopts & WAKE_MAGIC)
1783                        wol_bits |= (MagicPktEnable | WolEnable);
1784                if (wol->wolopts & WAKE_PHY)
1785                        wol_bits |= (LinkEventEnable | WolEnable);
1786        }
1787        iowrite8(wol_bits, ioaddr + WakeEvent);
1788
1789        device_set_wakeup_enable(&np->pci_dev->dev, np->wol_enabled);
1790
1791        return 0;
1792}
1793#else
1794#define sundance_get_wol NULL
1795#define sundance_set_wol NULL
1796#endif /* CONFIG_PM */
1797
1798static const struct ethtool_ops ethtool_ops = {
1799        .begin = check_if_running,
1800        .get_drvinfo = get_drvinfo,
1801        .get_settings = get_settings,
1802        .set_settings = set_settings,
1803        .nway_reset = nway_reset,
1804        .get_link = get_link,
1805        .get_wol = sundance_get_wol,
1806        .set_wol = sundance_set_wol,
1807        .get_msglevel = get_msglevel,
1808        .set_msglevel = set_msglevel,
1809        .get_strings = get_strings,
1810        .get_sset_count = get_sset_count,
1811        .get_ethtool_stats = get_ethtool_stats,
1812};
1813
1814static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1815{
1816        struct netdev_private *np = netdev_priv(dev);
1817        int rc;
1818
1819        if (!netif_running(dev))
1820                return -EINVAL;
1821
1822        spin_lock_irq(&np->lock);
1823        rc = generic_mii_ioctl(&np->mii_if, if_mii(rq), cmd, NULL);
1824        spin_unlock_irq(&np->lock);
1825
1826        return rc;
1827}
1828
1829static int netdev_close(struct net_device *dev)
1830{
1831        struct netdev_private *np = netdev_priv(dev);
1832        void __iomem *ioaddr = np->base;
1833        struct sk_buff *skb;
1834        int i;
1835
1836        /* Wait and kill tasklet */
1837        tasklet_kill(&np->rx_tasklet);
1838        tasklet_kill(&np->tx_tasklet);
1839        np->cur_tx = 0;
1840        np->dirty_tx = 0;
1841        np->cur_task = 0;
1842        np->last_tx = NULL;
1843
1844        netif_stop_queue(dev);
1845
1846        if (netif_msg_ifdown(np)) {
1847                printk(KERN_DEBUG "%s: Shutting down ethercard, status was Tx %2.2x "
1848                           "Rx %4.4x Int %2.2x.\n",
1849                           dev->name, ioread8(ioaddr + TxStatus),
1850                           ioread32(ioaddr + RxStatus), ioread16(ioaddr + IntrStatus));
1851                printk(KERN_DEBUG "%s: Queue pointers were Tx %d / %d,  Rx %d / %d.\n",
1852                           dev->name, np->cur_tx, np->dirty_tx, np->cur_rx, np->dirty_rx);
1853        }
1854
1855        /* Disable interrupts by clearing the interrupt mask. */
1856        iowrite16(0x0000, ioaddr + IntrEnable);
1857
1858        /* Disable Rx and Tx DMA for safely release resource */
1859        iowrite32(0x500, ioaddr + DMACtrl);
1860
1861        /* Stop the chip's Tx and Rx processes. */
1862        iowrite16(TxDisable | RxDisable | StatsDisable, ioaddr + MACCtrl1);
1863
1864        for (i = 2000; i > 0; i--) {
1865                if ((ioread32(ioaddr + DMACtrl) & 0xc000) == 0)
1866                        break;
1867                mdelay(1);
1868        }
1869
1870        iowrite16(GlobalReset | DMAReset | FIFOReset | NetworkReset,
1871                        ioaddr + ASIC_HI_WORD(ASICCtrl));
1872
1873        for (i = 2000; i > 0; i--) {
1874                if ((ioread16(ioaddr + ASIC_HI_WORD(ASICCtrl)) & ResetBusy) == 0)
1875                        break;
1876                mdelay(1);
1877        }
1878
1879#ifdef __i386__
1880        if (netif_msg_hw(np)) {
1881                printk(KERN_DEBUG "  Tx ring at %8.8x:\n",
1882                           (int)(np->tx_ring_dma));
1883                for (i = 0; i < TX_RING_SIZE; i++)
1884                        printk(KERN_DEBUG " #%d desc. %4.4x %8.8x %8.8x.\n",
1885                                   i, np->tx_ring[i].status, np->tx_ring[i].frag[0].addr,
1886                                   np->tx_ring[i].frag[0].length);
1887                printk(KERN_DEBUG "  Rx ring %8.8x:\n",
1888                           (int)(np->rx_ring_dma));
1889                for (i = 0; i < /*RX_RING_SIZE*/4 ; i++) {
1890                        printk(KERN_DEBUG " #%d desc. %4.4x %4.4x %8.8x\n",
1891                                   i, np->rx_ring[i].status, np->rx_ring[i].frag[0].addr,
1892                                   np->rx_ring[i].frag[0].length);
1893                }
1894        }
1895#endif /* __i386__ debugging only */
1896
1897        free_irq(np->pci_dev->irq, dev);
1898
1899        del_timer_sync(&np->timer);
1900
1901        /* Free all the skbuffs in the Rx queue. */
1902        for (i = 0; i < RX_RING_SIZE; i++) {
1903                np->rx_ring[i].status = 0;
1904                skb = np->rx_skbuff[i];
1905                if (skb) {
1906                        dma_unmap_single(&np->pci_dev->dev,
1907                                le32_to_cpu(np->rx_ring[i].frag[0].addr),
1908                                np->rx_buf_sz, DMA_FROM_DEVICE);
1909                        dev_kfree_skb(skb);
1910                        np->rx_skbuff[i] = NULL;
1911                }
1912                np->rx_ring[i].frag[0].addr = cpu_to_le32(0xBADF00D0); /* poison */
1913        }
1914        for (i = 0; i < TX_RING_SIZE; i++) {
1915                np->tx_ring[i].next_desc = 0;
1916                skb = np->tx_skbuff[i];
1917                if (skb) {
1918                        dma_unmap_single(&np->pci_dev->dev,
1919                                le32_to_cpu(np->tx_ring[i].frag[0].addr),
1920                                skb->len, DMA_TO_DEVICE);
1921                        dev_kfree_skb(skb);
1922                        np->tx_skbuff[i] = NULL;
1923                }
1924        }
1925
1926        return 0;
1927}
1928
1929static void sundance_remove1(struct pci_dev *pdev)
1930{
1931        struct net_device *dev = pci_get_drvdata(pdev);
1932
1933        if (dev) {
1934            struct netdev_private *np = netdev_priv(dev);
1935            unregister_netdev(dev);
1936            dma_free_coherent(&pdev->dev, RX_TOTAL_SIZE,
1937                    np->rx_ring, np->rx_ring_dma);
1938            dma_free_coherent(&pdev->dev, TX_TOTAL_SIZE,
1939                    np->tx_ring, np->tx_ring_dma);
1940            pci_iounmap(pdev, np->base);
1941            pci_release_regions(pdev);
1942            free_netdev(dev);
1943        }
1944}
1945
1946#ifdef CONFIG_PM
1947
1948static int sundance_suspend(struct pci_dev *pci_dev, pm_message_t state)
1949{
1950        struct net_device *dev = pci_get_drvdata(pci_dev);
1951        struct netdev_private *np = netdev_priv(dev);
1952        void __iomem *ioaddr = np->base;
1953
1954        if (!netif_running(dev))
1955                return 0;
1956
1957        netdev_close(dev);
1958        netif_device_detach(dev);
1959
1960        pci_save_state(pci_dev);
1961        if (np->wol_enabled) {
1962                iowrite8(AcceptBroadcast | AcceptMyPhys, ioaddr + RxMode);
1963                iowrite16(RxEnable, ioaddr + MACCtrl1);
1964        }
1965        pci_enable_wake(pci_dev, pci_choose_state(pci_dev, state),
1966                        np->wol_enabled);
1967        pci_set_power_state(pci_dev, pci_choose_state(pci_dev, state));
1968
1969        return 0;
1970}
1971
1972static int sundance_resume(struct pci_dev *pci_dev)
1973{
1974        struct net_device *dev = pci_get_drvdata(pci_dev);
1975        int err = 0;
1976
1977        if (!netif_running(dev))
1978                return 0;
1979
1980        pci_set_power_state(pci_dev, PCI_D0);
1981        pci_restore_state(pci_dev);
1982        pci_enable_wake(pci_dev, PCI_D0, 0);
1983
1984        err = netdev_open(dev);
1985        if (err) {
1986                printk(KERN_ERR "%s: Can't resume interface!\n",
1987                                dev->name);
1988                goto out;
1989        }
1990
1991        netif_device_attach(dev);
1992
1993out:
1994        return err;
1995}
1996
1997#endif /* CONFIG_PM */
1998
1999static struct pci_driver sundance_driver = {
2000        .name           = DRV_NAME,
2001        .id_table       = sundance_pci_tbl,
2002        .probe          = sundance_probe1,
2003        .remove         = sundance_remove1,
2004#ifdef CONFIG_PM
2005        .suspend        = sundance_suspend,
2006        .resume         = sundance_resume,
2007#endif /* CONFIG_PM */
2008};
2009
2010static int __init sundance_init(void)
2011{
2012/* when a module, this is printed whether or not devices are found in probe */
2013#ifdef MODULE
2014        printk(version);
2015#endif
2016        return pci_register_driver(&sundance_driver);
2017}
2018
2019static void __exit sundance_exit(void)
2020{
2021        pci_unregister_driver(&sundance_driver);
2022}
2023
2024module_init(sundance_init);
2025module_exit(sundance_exit);
2026
2027
2028