linux/drivers/net/ethernet/dlink/sundance.c
<<
>>
Prefs
   1/* sundance.c: A Linux device driver for the Sundance ST201 "Alta". */
   2/*
   3        Written 1999-2000 by Donald Becker.
   4
   5        This software may be used and distributed according to the terms of
   6        the GNU General Public License (GPL), incorporated herein by reference.
   7        Drivers based on or derived from this code fall under the GPL and must
   8        retain the authorship, copyright and license notice.  This file is not
   9        a complete program and may only be used when the entire operating
  10        system is licensed under the GPL.
  11
  12        The author may be reached as becker@scyld.com, or C/O
  13        Scyld Computing Corporation
  14        410 Severn Ave., Suite 210
  15        Annapolis MD 21403
  16
  17        Support and updates available at
  18        http://www.scyld.com/network/sundance.html
  19        [link no longer provides useful info -jgarzik]
  20        Archives of the mailing list are still available at
  21        http://www.beowulf.org/pipermail/netdrivers/
  22
  23*/
  24
  25#define DRV_NAME        "sundance"
  26#define DRV_VERSION     "1.2"
  27#define DRV_RELDATE     "11-Sep-2006"
  28
  29
  30/* The user-configurable values.
  31   These may be modified when a driver module is loaded.*/
  32static int debug = 1;                   /* 1 normal messages, 0 quiet .. 7 verbose. */
  33/* Maximum number of multicast addresses to filter (vs. rx-all-multicast).
  34   Typical is a 64 element hash table based on the Ethernet CRC.  */
  35static const int multicast_filter_limit = 32;
  36
  37/* Set the copy breakpoint for the copy-only-tiny-frames scheme.
  38   Setting to > 1518 effectively disables this feature.
  39   This chip can receive into offset buffers, so the Alpha does not
  40   need a copy-align. */
  41static int rx_copybreak;
  42static int flowctrl=1;
  43
  44/* media[] specifies the media type the NIC operates at.
  45                 autosense      Autosensing active media.
  46                 10mbps_hd      10Mbps half duplex.
  47                 10mbps_fd      10Mbps full duplex.
  48                 100mbps_hd     100Mbps half duplex.
  49                 100mbps_fd     100Mbps full duplex.
  50                 0              Autosensing active media.
  51                 1              10Mbps half duplex.
  52                 2              10Mbps full duplex.
  53                 3              100Mbps half duplex.
  54                 4              100Mbps full duplex.
  55*/
  56#define MAX_UNITS 8
  57static char *media[MAX_UNITS];
  58
  59
  60/* Operational parameters that are set at compile time. */
  61
  62/* Keep the ring sizes a power of two for compile efficiency.
  63   The compiler will convert <unsigned>'%'<2^N> into a bit mask.
  64   Making the Tx ring too large decreases the effectiveness of channel
  65   bonding and packet priority, and more than 128 requires modifying the
  66   Tx error recovery.
  67   Large receive rings merely waste memory. */
  68#define TX_RING_SIZE    32
  69#define TX_QUEUE_LEN    (TX_RING_SIZE - 1) /* Limit ring entries actually used.  */
  70#define RX_RING_SIZE    64
  71#define RX_BUDGET       32
  72#define TX_TOTAL_SIZE   TX_RING_SIZE*sizeof(struct netdev_desc)
  73#define RX_TOTAL_SIZE   RX_RING_SIZE*sizeof(struct netdev_desc)
  74
  75/* Operational parameters that usually are not changed. */
  76/* Time in jiffies before concluding the transmitter is hung. */
  77#define TX_TIMEOUT  (4*HZ)
  78#define PKT_BUF_SZ              1536    /* Size of each temporary Rx buffer.*/
  79
  80/* Include files, designed to support most kernel versions 2.0.0 and later. */
  81#include <linux/module.h>
  82#include <linux/kernel.h>
  83#include <linux/string.h>
  84#include <linux/timer.h>
  85#include <linux/errno.h>
  86#include <linux/ioport.h>
  87#include <linux/interrupt.h>
  88#include <linux/pci.h>
  89#include <linux/netdevice.h>
  90#include <linux/etherdevice.h>
  91#include <linux/skbuff.h>
  92#include <linux/init.h>
  93#include <linux/bitops.h>
  94#include <linux/uaccess.h>
  95#include <asm/processor.h>              /* Processor type for cache alignment. */
  96#include <asm/io.h>
  97#include <linux/delay.h>
  98#include <linux/spinlock.h>
  99#include <linux/dma-mapping.h>
 100#include <linux/crc32.h>
 101#include <linux/ethtool.h>
 102#include <linux/mii.h>
 103
 104/* These identify the driver base version and may not be removed. */
 105static const char version[] =
 106        KERN_INFO DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE
 107        " Written by Donald Becker\n";
 108
 109MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
 110MODULE_DESCRIPTION("Sundance Alta Ethernet driver");
 111MODULE_LICENSE("GPL");
 112
 113module_param(debug, int, 0);
 114module_param(rx_copybreak, int, 0);
 115module_param_array(media, charp, NULL, 0);
 116module_param(flowctrl, int, 0);
 117MODULE_PARM_DESC(debug, "Sundance Alta debug level (0-5)");
 118MODULE_PARM_DESC(rx_copybreak, "Sundance Alta copy breakpoint for copy-only-tiny-frames");
 119MODULE_PARM_DESC(flowctrl, "Sundance Alta flow control [0|1]");
 120
 121/*
 122                                Theory of Operation
 123
 124I. Board Compatibility
 125
 126This driver is designed for the Sundance Technologies "Alta" ST201 chip.
 127
 128II. Board-specific settings
 129
 130III. Driver operation
 131
 132IIIa. Ring buffers
 133
 134This driver uses two statically allocated fixed-size descriptor lists
 135formed into rings by a branch from the final descriptor to the beginning of
 136the list.  The ring sizes are set at compile time by RX/TX_RING_SIZE.
 137Some chips explicitly use only 2^N sized rings, while others use a
 138'next descriptor' pointer that the driver forms into rings.
 139
 140IIIb/c. Transmit/Receive Structure
 141
 142This driver uses a zero-copy receive and transmit scheme.
 143The driver allocates full frame size skbuffs for the Rx ring buffers at
 144open() time and passes the skb->data field to the chip as receive data
 145buffers.  When an incoming frame is less than RX_COPYBREAK bytes long,
 146a fresh skbuff is allocated and the frame is copied to the new skbuff.
 147When the incoming frame is larger, the skbuff is passed directly up the
 148protocol stack.  Buffers consumed this way are replaced by newly allocated
 149skbuffs in a later phase of receives.
 150
 151The RX_COPYBREAK value is chosen to trade-off the memory wasted by
 152using a full-sized skbuff for small frames vs. the copying costs of larger
 153frames.  New boards are typically used in generously configured machines
 154and the underfilled buffers have negligible impact compared to the benefit of
 155a single allocation size, so the default value of zero results in never
 156copying packets.  When copying is done, the cost is usually mitigated by using
 157a combined copy/checksum routine.  Copying also preloads the cache, which is
 158most useful with small frames.
 159
 160A subtle aspect of the operation is that the IP header at offset 14 in an
 161ethernet frame isn't longword aligned for further processing.
 162Unaligned buffers are permitted by the Sundance hardware, so
 163frames are received into the skbuff at an offset of "+2", 16-byte aligning
 164the IP header.
 165
 166IIId. Synchronization
 167
 168The driver runs as two independent, single-threaded flows of control.  One
 169is the send-packet routine, which enforces single-threaded use by the
 170dev->tbusy flag.  The other thread is the interrupt handler, which is single
 171threaded by the hardware and interrupt handling software.
 172
 173The send packet thread has partial control over the Tx ring and 'dev->tbusy'
 174flag.  It sets the tbusy flag whenever it's queuing a Tx packet. If the next
 175queue slot is empty, it clears the tbusy flag when finished otherwise it sets
 176the 'lp->tx_full' flag.
 177
 178The interrupt handler has exclusive control over the Rx ring and records stats
 179from the Tx ring.  After reaping the stats, it marks the Tx queue entry as
 180empty by incrementing the dirty_tx mark. Iff the 'lp->tx_full' flag is set, it
 181clears both the tx_full and tbusy flags.
 182
 183IV. Notes
 184
 185IVb. References
 186
 187The Sundance ST201 datasheet, preliminary version.
 188The Kendin KS8723 datasheet, preliminary version.
 189The ICplus IP100 datasheet, preliminary version.
 190http://www.scyld.com/expert/100mbps.html
 191http://www.scyld.com/expert/NWay.html
 192
 193IVc. Errata
 194
 195*/
 196
 197/* Work-around for Kendin chip bugs. */
 198#ifndef CONFIG_SUNDANCE_MMIO
 199#define USE_IO_OPS 1
 200#endif
 201
 202static const struct pci_device_id sundance_pci_tbl[] = {
 203        { 0x1186, 0x1002, 0x1186, 0x1002, 0, 0, 0 },
 204        { 0x1186, 0x1002, 0x1186, 0x1003, 0, 0, 1 },
 205        { 0x1186, 0x1002, 0x1186, 0x1012, 0, 0, 2 },
 206        { 0x1186, 0x1002, 0x1186, 0x1040, 0, 0, 3 },
 207        { 0x1186, 0x1002, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 4 },
 208        { 0x13F0, 0x0201, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 5 },
 209        { 0x13F0, 0x0200, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 6 },
 210        { }
 211};
 212MODULE_DEVICE_TABLE(pci, sundance_pci_tbl);
 213
 214enum {
 215        netdev_io_size = 128
 216};
 217
 218struct pci_id_info {
 219        const char *name;
 220};
 221static const struct pci_id_info pci_id_tbl[] = {
 222        {"D-Link DFE-550TX FAST Ethernet Adapter"},
 223        {"D-Link DFE-550FX 100Mbps Fiber-optics Adapter"},
 224        {"D-Link DFE-580TX 4 port Server Adapter"},
 225        {"D-Link DFE-530TXS FAST Ethernet Adapter"},
 226        {"D-Link DL10050-based FAST Ethernet Adapter"},
 227        {"Sundance Technology Alta"},
 228        {"IC Plus Corporation IP100A FAST Ethernet Adapter"},
 229        { }     /* terminate list. */
 230};
 231
 232/* This driver was written to use PCI memory space, however x86-oriented
 233   hardware often uses I/O space accesses. */
 234
 235/* Offsets to the device registers.
 236   Unlike software-only systems, device drivers interact with complex hardware.
 237   It's not useful to define symbolic names for every register bit in the
 238   device.  The name can only partially document the semantics and make
 239   the driver longer and more difficult to read.
 240   In general, only the important configuration values or bits changed
 241   multiple times should be defined symbolically.
 242*/
 243enum alta_offsets {
 244        DMACtrl = 0x00,
 245        TxListPtr = 0x04,
 246        TxDMABurstThresh = 0x08,
 247        TxDMAUrgentThresh = 0x09,
 248        TxDMAPollPeriod = 0x0a,
 249        RxDMAStatus = 0x0c,
 250        RxListPtr = 0x10,
 251        DebugCtrl0 = 0x1a,
 252        DebugCtrl1 = 0x1c,
 253        RxDMABurstThresh = 0x14,
 254        RxDMAUrgentThresh = 0x15,
 255        RxDMAPollPeriod = 0x16,
 256        LEDCtrl = 0x1a,
 257        ASICCtrl = 0x30,
 258        EEData = 0x34,
 259        EECtrl = 0x36,
 260        FlashAddr = 0x40,
 261        FlashData = 0x44,
 262        WakeEvent = 0x45,
 263        TxStatus = 0x46,
 264        TxFrameId = 0x47,
 265        DownCounter = 0x18,
 266        IntrClear = 0x4a,
 267        IntrEnable = 0x4c,
 268        IntrStatus = 0x4e,
 269        MACCtrl0 = 0x50,
 270        MACCtrl1 = 0x52,
 271        StationAddr = 0x54,
 272        MaxFrameSize = 0x5A,
 273        RxMode = 0x5c,
 274        MIICtrl = 0x5e,
 275        MulticastFilter0 = 0x60,
 276        MulticastFilter1 = 0x64,
 277        RxOctetsLow = 0x68,
 278        RxOctetsHigh = 0x6a,
 279        TxOctetsLow = 0x6c,
 280        TxOctetsHigh = 0x6e,
 281        TxFramesOK = 0x70,
 282        RxFramesOK = 0x72,
 283        StatsCarrierError = 0x74,
 284        StatsLateColl = 0x75,
 285        StatsMultiColl = 0x76,
 286        StatsOneColl = 0x77,
 287        StatsTxDefer = 0x78,
 288        RxMissed = 0x79,
 289        StatsTxXSDefer = 0x7a,
 290        StatsTxAbort = 0x7b,
 291        StatsBcastTx = 0x7c,
 292        StatsBcastRx = 0x7d,
 293        StatsMcastTx = 0x7e,
 294        StatsMcastRx = 0x7f,
 295        /* Aliased and bogus values! */
 296        RxStatus = 0x0c,
 297};
 298
 299#define ASIC_HI_WORD(x) ((x) + 2)
 300
 301enum ASICCtrl_HiWord_bit {
 302        GlobalReset = 0x0001,
 303        RxReset = 0x0002,
 304        TxReset = 0x0004,
 305        DMAReset = 0x0008,
 306        FIFOReset = 0x0010,
 307        NetworkReset = 0x0020,
 308        HostReset = 0x0040,
 309        ResetBusy = 0x0400,
 310};
 311
 312/* Bits in the interrupt status/mask registers. */
 313enum intr_status_bits {
 314        IntrSummary=0x0001, IntrPCIErr=0x0002, IntrMACCtrl=0x0008,
 315        IntrTxDone=0x0004, IntrRxDone=0x0010, IntrRxStart=0x0020,
 316        IntrDrvRqst=0x0040,
 317        StatsMax=0x0080, LinkChange=0x0100,
 318        IntrTxDMADone=0x0200, IntrRxDMADone=0x0400,
 319};
 320
 321/* Bits in the RxMode register. */
 322enum rx_mode_bits {
 323        AcceptAllIPMulti=0x20, AcceptMultiHash=0x10, AcceptAll=0x08,
 324        AcceptBroadcast=0x04, AcceptMulticast=0x02, AcceptMyPhys=0x01,
 325};
 326/* Bits in MACCtrl. */
 327enum mac_ctrl0_bits {
 328        EnbFullDuplex=0x20, EnbRcvLargeFrame=0x40,
 329        EnbFlowCtrl=0x100, EnbPassRxCRC=0x200,
 330};
 331enum mac_ctrl1_bits {
 332        StatsEnable=0x0020,     StatsDisable=0x0040, StatsEnabled=0x0080,
 333        TxEnable=0x0100, TxDisable=0x0200, TxEnabled=0x0400,
 334        RxEnable=0x0800, RxDisable=0x1000, RxEnabled=0x2000,
 335};
 336
 337/* Bits in WakeEvent register. */
 338enum wake_event_bits {
 339        WakePktEnable = 0x01,
 340        MagicPktEnable = 0x02,
 341        LinkEventEnable = 0x04,
 342        WolEnable = 0x80,
 343};
 344
 345/* The Rx and Tx buffer descriptors. */
 346/* Note that using only 32 bit fields simplifies conversion to big-endian
 347   architectures. */
 348struct netdev_desc {
 349        __le32 next_desc;
 350        __le32 status;
 351        struct desc_frag { __le32 addr, length; } frag[1];
 352};
 353
 354/* Bits in netdev_desc.status */
 355enum desc_status_bits {
 356        DescOwn=0x8000,
 357        DescEndPacket=0x4000,
 358        DescEndRing=0x2000,
 359        LastFrag=0x80000000,
 360        DescIntrOnTx=0x8000,
 361        DescIntrOnDMADone=0x80000000,
 362        DisableAlign = 0x00000001,
 363};
 364
 365#define PRIV_ALIGN      15      /* Required alignment mask */
 366/* Use  __attribute__((aligned (L1_CACHE_BYTES)))  to maintain alignment
 367   within the structure. */
 368#define MII_CNT         4
 369struct netdev_private {
 370        /* Descriptor rings first for alignment. */
 371        struct netdev_desc *rx_ring;
 372        struct netdev_desc *tx_ring;
 373        struct sk_buff* rx_skbuff[RX_RING_SIZE];
 374        struct sk_buff* tx_skbuff[TX_RING_SIZE];
 375        dma_addr_t tx_ring_dma;
 376        dma_addr_t rx_ring_dma;
 377        struct timer_list timer;                /* Media monitoring timer. */
 378        /* ethtool extra stats */
 379        struct {
 380                u64 tx_multiple_collisions;
 381                u64 tx_single_collisions;
 382                u64 tx_late_collisions;
 383                u64 tx_deferred;
 384                u64 tx_deferred_excessive;
 385                u64 tx_aborted;
 386                u64 tx_bcasts;
 387                u64 rx_bcasts;
 388                u64 tx_mcasts;
 389                u64 rx_mcasts;
 390        } xstats;
 391        /* Frequently used values: keep some adjacent for cache effect. */
 392        spinlock_t lock;
 393        int msg_enable;
 394        int chip_id;
 395        unsigned int cur_rx, dirty_rx;          /* Producer/consumer ring indices */
 396        unsigned int rx_buf_sz;                 /* Based on MTU+slack. */
 397        struct netdev_desc *last_tx;            /* Last Tx descriptor used. */
 398        unsigned int cur_tx, dirty_tx;
 399        /* These values are keep track of the transceiver/media in use. */
 400        unsigned int flowctrl:1;
 401        unsigned int default_port:4;            /* Last dev->if_port value. */
 402        unsigned int an_enable:1;
 403        unsigned int speed;
 404        unsigned int wol_enabled:1;                     /* Wake on LAN enabled */
 405        struct tasklet_struct rx_tasklet;
 406        struct tasklet_struct tx_tasklet;
 407        int budget;
 408        int cur_task;
 409        /* Multicast and receive mode. */
 410        spinlock_t mcastlock;                   /* SMP lock multicast updates. */
 411        u16 mcast_filter[4];
 412        /* MII transceiver section. */
 413        struct mii_if_info mii_if;
 414        int mii_preamble_required;
 415        unsigned char phys[MII_CNT];            /* MII device addresses, only first one used. */
 416        struct pci_dev *pci_dev;
 417        void __iomem *base;
 418        spinlock_t statlock;
 419};
 420
 421/* The station address location in the EEPROM. */
 422#define EEPROM_SA_OFFSET        0x10
 423#define DEFAULT_INTR (IntrRxDMADone | IntrPCIErr | \
 424                        IntrDrvRqst | IntrTxDone | StatsMax | \
 425                        LinkChange)
 426
 427static int  change_mtu(struct net_device *dev, int new_mtu);
 428static int  eeprom_read(void __iomem *ioaddr, int location);
 429static int  mdio_read(struct net_device *dev, int phy_id, int location);
 430static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
 431static int  mdio_wait_link(struct net_device *dev, int wait);
 432static int  netdev_open(struct net_device *dev);
 433static void check_duplex(struct net_device *dev);
 434static void netdev_timer(struct timer_list *t);
 435static void tx_timeout(struct net_device *dev);
 436static void init_ring(struct net_device *dev);
 437static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev);
 438static int reset_tx (struct net_device *dev);
 439static irqreturn_t intr_handler(int irq, void *dev_instance);
 440static void rx_poll(unsigned long data);
 441static void tx_poll(unsigned long data);
 442static void refill_rx (struct net_device *dev);
 443static void netdev_error(struct net_device *dev, int intr_status);
 444static void netdev_error(struct net_device *dev, int intr_status);
 445static void set_rx_mode(struct net_device *dev);
 446static int __set_mac_addr(struct net_device *dev);
 447static int sundance_set_mac_addr(struct net_device *dev, void *data);
 448static struct net_device_stats *get_stats(struct net_device *dev);
 449static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
 450static int  netdev_close(struct net_device *dev);
 451static const struct ethtool_ops ethtool_ops;
 452
 453static void sundance_reset(struct net_device *dev, unsigned long reset_cmd)
 454{
 455        struct netdev_private *np = netdev_priv(dev);
 456        void __iomem *ioaddr = np->base + ASICCtrl;
 457        int countdown;
 458
 459        /* ST201 documentation states ASICCtrl is a 32bit register */
 460        iowrite32 (reset_cmd | ioread32 (ioaddr), ioaddr);
 461        /* ST201 documentation states reset can take up to 1 ms */
 462        countdown = 10 + 1;
 463        while (ioread32 (ioaddr) & (ResetBusy << 16)) {
 464                if (--countdown == 0) {
 465                        printk(KERN_WARNING "%s : reset not completed !!\n", dev->name);
 466                        break;
 467                }
 468                udelay(100);
 469        }
 470}
 471
 472#ifdef CONFIG_NET_POLL_CONTROLLER
 473static void sundance_poll_controller(struct net_device *dev)
 474{
 475        struct netdev_private *np = netdev_priv(dev);
 476
 477        disable_irq(np->pci_dev->irq);
 478        intr_handler(np->pci_dev->irq, dev);
 479        enable_irq(np->pci_dev->irq);
 480}
 481#endif
 482
 483static const struct net_device_ops netdev_ops = {
 484        .ndo_open               = netdev_open,
 485        .ndo_stop               = netdev_close,
 486        .ndo_start_xmit         = start_tx,
 487        .ndo_get_stats          = get_stats,
 488        .ndo_set_rx_mode        = set_rx_mode,
 489        .ndo_do_ioctl           = netdev_ioctl,
 490        .ndo_tx_timeout         = tx_timeout,
 491        .ndo_change_mtu         = change_mtu,
 492        .ndo_set_mac_address    = sundance_set_mac_addr,
 493        .ndo_validate_addr      = eth_validate_addr,
 494#ifdef CONFIG_NET_POLL_CONTROLLER
 495        .ndo_poll_controller    = sundance_poll_controller,
 496#endif
 497};
 498
 499static int sundance_probe1(struct pci_dev *pdev,
 500                           const struct pci_device_id *ent)
 501{
 502        struct net_device *dev;
 503        struct netdev_private *np;
 504        static int card_idx;
 505        int chip_idx = ent->driver_data;
 506        int irq;
 507        int i;
 508        void __iomem *ioaddr;
 509        u16 mii_ctl;
 510        void *ring_space;
 511        dma_addr_t ring_dma;
 512#ifdef USE_IO_OPS
 513        int bar = 0;
 514#else
 515        int bar = 1;
 516#endif
 517        int phy, phy_end, phy_idx = 0;
 518
 519/* when built into the kernel, we only print version if device is found */
 520#ifndef MODULE
 521        static int printed_version;
 522        if (!printed_version++)
 523                printk(version);
 524#endif
 525
 526        if (pci_enable_device(pdev))
 527                return -EIO;
 528        pci_set_master(pdev);
 529
 530        irq = pdev->irq;
 531
 532        dev = alloc_etherdev(sizeof(*np));
 533        if (!dev)
 534                return -ENOMEM;
 535        SET_NETDEV_DEV(dev, &pdev->dev);
 536
 537        if (pci_request_regions(pdev, DRV_NAME))
 538                goto err_out_netdev;
 539
 540        ioaddr = pci_iomap(pdev, bar, netdev_io_size);
 541        if (!ioaddr)
 542                goto err_out_res;
 543
 544        for (i = 0; i < 3; i++)
 545                ((__le16 *)dev->dev_addr)[i] =
 546                        cpu_to_le16(eeprom_read(ioaddr, i + EEPROM_SA_OFFSET));
 547
 548        np = netdev_priv(dev);
 549        np->base = ioaddr;
 550        np->pci_dev = pdev;
 551        np->chip_id = chip_idx;
 552        np->msg_enable = (1 << debug) - 1;
 553        spin_lock_init(&np->lock);
 554        spin_lock_init(&np->statlock);
 555        tasklet_init(&np->rx_tasklet, rx_poll, (unsigned long)dev);
 556        tasklet_init(&np->tx_tasklet, tx_poll, (unsigned long)dev);
 557
 558        ring_space = dma_alloc_coherent(&pdev->dev, TX_TOTAL_SIZE,
 559                        &ring_dma, GFP_KERNEL);
 560        if (!ring_space)
 561                goto err_out_cleardev;
 562        np->tx_ring = (struct netdev_desc *)ring_space;
 563        np->tx_ring_dma = ring_dma;
 564
 565        ring_space = dma_alloc_coherent(&pdev->dev, RX_TOTAL_SIZE,
 566                        &ring_dma, GFP_KERNEL);
 567        if (!ring_space)
 568                goto err_out_unmap_tx;
 569        np->rx_ring = (struct netdev_desc *)ring_space;
 570        np->rx_ring_dma = ring_dma;
 571
 572        np->mii_if.dev = dev;
 573        np->mii_if.mdio_read = mdio_read;
 574        np->mii_if.mdio_write = mdio_write;
 575        np->mii_if.phy_id_mask = 0x1f;
 576        np->mii_if.reg_num_mask = 0x1f;
 577
 578        /* The chip-specific entries in the device structure. */
 579        dev->netdev_ops = &netdev_ops;
 580        dev->ethtool_ops = &ethtool_ops;
 581        dev->watchdog_timeo = TX_TIMEOUT;
 582
 583        /* MTU range: 68 - 8191 */
 584        dev->min_mtu = ETH_MIN_MTU;
 585        dev->max_mtu = 8191;
 586
 587        pci_set_drvdata(pdev, dev);
 588
 589        i = register_netdev(dev);
 590        if (i)
 591                goto err_out_unmap_rx;
 592
 593        printk(KERN_INFO "%s: %s at %p, %pM, IRQ %d.\n",
 594               dev->name, pci_id_tbl[chip_idx].name, ioaddr,
 595               dev->dev_addr, irq);
 596
 597        np->phys[0] = 1;                /* Default setting */
 598        np->mii_preamble_required++;
 599
 600        /*
 601         * It seems some phys doesn't deal well with address 0 being accessed
 602         * first
 603         */
 604        if (sundance_pci_tbl[np->chip_id].device == 0x0200) {
 605                phy = 0;
 606                phy_end = 31;
 607        } else {
 608                phy = 1;
 609                phy_end = 32;   /* wraps to zero, due to 'phy & 0x1f' */
 610        }
 611        for (; phy <= phy_end && phy_idx < MII_CNT; phy++) {
 612                int phyx = phy & 0x1f;
 613                int mii_status = mdio_read(dev, phyx, MII_BMSR);
 614                if (mii_status != 0xffff  &&  mii_status != 0x0000) {
 615                        np->phys[phy_idx++] = phyx;
 616                        np->mii_if.advertising = mdio_read(dev, phyx, MII_ADVERTISE);
 617                        if ((mii_status & 0x0040) == 0)
 618                                np->mii_preamble_required++;
 619                        printk(KERN_INFO "%s: MII PHY found at address %d, status "
 620                                   "0x%4.4x advertising %4.4x.\n",
 621                                   dev->name, phyx, mii_status, np->mii_if.advertising);
 622                }
 623        }
 624        np->mii_preamble_required--;
 625
 626        if (phy_idx == 0) {
 627                printk(KERN_INFO "%s: No MII transceiver found, aborting.  ASIC status %x\n",
 628                           dev->name, ioread32(ioaddr + ASICCtrl));
 629                goto err_out_unregister;
 630        }
 631
 632        np->mii_if.phy_id = np->phys[0];
 633
 634        /* Parse override configuration */
 635        np->an_enable = 1;
 636        if (card_idx < MAX_UNITS) {
 637                if (media[card_idx] != NULL) {
 638                        np->an_enable = 0;
 639                        if (strcmp (media[card_idx], "100mbps_fd") == 0 ||
 640                            strcmp (media[card_idx], "4") == 0) {
 641                                np->speed = 100;
 642                                np->mii_if.full_duplex = 1;
 643                        } else if (strcmp (media[card_idx], "100mbps_hd") == 0 ||
 644                                   strcmp (media[card_idx], "3") == 0) {
 645                                np->speed = 100;
 646                                np->mii_if.full_duplex = 0;
 647                        } else if (strcmp (media[card_idx], "10mbps_fd") == 0 ||
 648                                   strcmp (media[card_idx], "2") == 0) {
 649                                np->speed = 10;
 650                                np->mii_if.full_duplex = 1;
 651                        } else if (strcmp (media[card_idx], "10mbps_hd") == 0 ||
 652                                   strcmp (media[card_idx], "1") == 0) {
 653                                np->speed = 10;
 654                                np->mii_if.full_duplex = 0;
 655                        } else {
 656                                np->an_enable = 1;
 657                        }
 658                }
 659                if (flowctrl == 1)
 660                        np->flowctrl = 1;
 661        }
 662
 663        /* Fibre PHY? */
 664        if (ioread32 (ioaddr + ASICCtrl) & 0x80) {
 665                /* Default 100Mbps Full */
 666                if (np->an_enable) {
 667                        np->speed = 100;
 668                        np->mii_if.full_duplex = 1;
 669                        np->an_enable = 0;
 670                }
 671        }
 672        /* Reset PHY */
 673        mdio_write (dev, np->phys[0], MII_BMCR, BMCR_RESET);
 674        mdelay (300);
 675        /* If flow control enabled, we need to advertise it.*/
 676        if (np->flowctrl)
 677                mdio_write (dev, np->phys[0], MII_ADVERTISE, np->mii_if.advertising | 0x0400);
 678        mdio_write (dev, np->phys[0], MII_BMCR, BMCR_ANENABLE|BMCR_ANRESTART);
 679        /* Force media type */
 680        if (!np->an_enable) {
 681                mii_ctl = 0;
 682                mii_ctl |= (np->speed == 100) ? BMCR_SPEED100 : 0;
 683                mii_ctl |= (np->mii_if.full_duplex) ? BMCR_FULLDPLX : 0;
 684                mdio_write (dev, np->phys[0], MII_BMCR, mii_ctl);
 685                printk (KERN_INFO "Override speed=%d, %s duplex\n",
 686                        np->speed, np->mii_if.full_duplex ? "Full" : "Half");
 687
 688        }
 689
 690        /* Perhaps move the reset here? */
 691        /* Reset the chip to erase previous misconfiguration. */
 692        if (netif_msg_hw(np))
 693                printk("ASIC Control is %x.\n", ioread32(ioaddr + ASICCtrl));
 694        sundance_reset(dev, 0x00ff << 16);
 695        if (netif_msg_hw(np))
 696                printk("ASIC Control is now %x.\n", ioread32(ioaddr + ASICCtrl));
 697
 698        card_idx++;
 699        return 0;
 700
 701err_out_unregister:
 702        unregister_netdev(dev);
 703err_out_unmap_rx:
 704        dma_free_coherent(&pdev->dev, RX_TOTAL_SIZE,
 705                np->rx_ring, np->rx_ring_dma);
 706err_out_unmap_tx:
 707        dma_free_coherent(&pdev->dev, TX_TOTAL_SIZE,
 708                np->tx_ring, np->tx_ring_dma);
 709err_out_cleardev:
 710        pci_iounmap(pdev, ioaddr);
 711err_out_res:
 712        pci_release_regions(pdev);
 713err_out_netdev:
 714        free_netdev (dev);
 715        return -ENODEV;
 716}
 717
 718static int change_mtu(struct net_device *dev, int new_mtu)
 719{
 720        if (netif_running(dev))
 721                return -EBUSY;
 722        dev->mtu = new_mtu;
 723        return 0;
 724}
 725
 726#define eeprom_delay(ee_addr)   ioread32(ee_addr)
 727/* Read the EEPROM and MII Management Data I/O (MDIO) interfaces. */
 728static int eeprom_read(void __iomem *ioaddr, int location)
 729{
 730        int boguscnt = 10000;           /* Typical 1900 ticks. */
 731        iowrite16(0x0200 | (location & 0xff), ioaddr + EECtrl);
 732        do {
 733                eeprom_delay(ioaddr + EECtrl);
 734                if (! (ioread16(ioaddr + EECtrl) & 0x8000)) {
 735                        return ioread16(ioaddr + EEData);
 736                }
 737        } while (--boguscnt > 0);
 738        return 0;
 739}
 740
 741/*  MII transceiver control section.
 742        Read and write the MII registers using software-generated serial
 743        MDIO protocol.  See the MII specifications or DP83840A data sheet
 744        for details.
 745
 746        The maximum data clock rate is 2.5 Mhz.  The minimum timing is usually
 747        met by back-to-back 33Mhz PCI cycles. */
 748#define mdio_delay() ioread8(mdio_addr)
 749
 750enum mii_reg_bits {
 751        MDIO_ShiftClk=0x0001, MDIO_Data=0x0002, MDIO_EnbOutput=0x0004,
 752};
 753#define MDIO_EnbIn  (0)
 754#define MDIO_WRITE0 (MDIO_EnbOutput)
 755#define MDIO_WRITE1 (MDIO_Data | MDIO_EnbOutput)
 756
 757/* Generate the preamble required for initial synchronization and
 758   a few older transceivers. */
 759static void mdio_sync(void __iomem *mdio_addr)
 760{
 761        int bits = 32;
 762
 763        /* Establish sync by sending at least 32 logic ones. */
 764        while (--bits >= 0) {
 765                iowrite8(MDIO_WRITE1, mdio_addr);
 766                mdio_delay();
 767                iowrite8(MDIO_WRITE1 | MDIO_ShiftClk, mdio_addr);
 768                mdio_delay();
 769        }
 770}
 771
 772static int mdio_read(struct net_device *dev, int phy_id, int location)
 773{
 774        struct netdev_private *np = netdev_priv(dev);
 775        void __iomem *mdio_addr = np->base + MIICtrl;
 776        int mii_cmd = (0xf6 << 10) | (phy_id << 5) | location;
 777        int i, retval = 0;
 778
 779        if (np->mii_preamble_required)
 780                mdio_sync(mdio_addr);
 781
 782        /* Shift the read command bits out. */
 783        for (i = 15; i >= 0; i--) {
 784                int dataval = (mii_cmd & (1 << i)) ? MDIO_WRITE1 : MDIO_WRITE0;
 785
 786                iowrite8(dataval, mdio_addr);
 787                mdio_delay();
 788                iowrite8(dataval | MDIO_ShiftClk, mdio_addr);
 789                mdio_delay();
 790        }
 791        /* Read the two transition, 16 data, and wire-idle bits. */
 792        for (i = 19; i > 0; i--) {
 793                iowrite8(MDIO_EnbIn, mdio_addr);
 794                mdio_delay();
 795                retval = (retval << 1) | ((ioread8(mdio_addr) & MDIO_Data) ? 1 : 0);
 796                iowrite8(MDIO_EnbIn | MDIO_ShiftClk, mdio_addr);
 797                mdio_delay();
 798        }
 799        return (retval>>1) & 0xffff;
 800}
 801
 802static void mdio_write(struct net_device *dev, int phy_id, int location, int value)
 803{
 804        struct netdev_private *np = netdev_priv(dev);
 805        void __iomem *mdio_addr = np->base + MIICtrl;
 806        int mii_cmd = (0x5002 << 16) | (phy_id << 23) | (location<<18) | value;
 807        int i;
 808
 809        if (np->mii_preamble_required)
 810                mdio_sync(mdio_addr);
 811
 812        /* Shift the command bits out. */
 813        for (i = 31; i >= 0; i--) {
 814                int dataval = (mii_cmd & (1 << i)) ? MDIO_WRITE1 : MDIO_WRITE0;
 815
 816                iowrite8(dataval, mdio_addr);
 817                mdio_delay();
 818                iowrite8(dataval | MDIO_ShiftClk, mdio_addr);
 819                mdio_delay();
 820        }
 821        /* Clear out extra bits. */
 822        for (i = 2; i > 0; i--) {
 823                iowrite8(MDIO_EnbIn, mdio_addr);
 824                mdio_delay();
 825                iowrite8(MDIO_EnbIn | MDIO_ShiftClk, mdio_addr);
 826                mdio_delay();
 827        }
 828}
 829
 830static int mdio_wait_link(struct net_device *dev, int wait)
 831{
 832        int bmsr;
 833        int phy_id;
 834        struct netdev_private *np;
 835
 836        np = netdev_priv(dev);
 837        phy_id = np->phys[0];
 838
 839        do {
 840                bmsr = mdio_read(dev, phy_id, MII_BMSR);
 841                if (bmsr & 0x0004)
 842                        return 0;
 843                mdelay(1);
 844        } while (--wait > 0);
 845        return -1;
 846}
 847
 848static int netdev_open(struct net_device *dev)
 849{
 850        struct netdev_private *np = netdev_priv(dev);
 851        void __iomem *ioaddr = np->base;
 852        const int irq = np->pci_dev->irq;
 853        unsigned long flags;
 854        int i;
 855
 856        sundance_reset(dev, 0x00ff << 16);
 857
 858        i = request_irq(irq, intr_handler, IRQF_SHARED, dev->name, dev);
 859        if (i)
 860                return i;
 861
 862        if (netif_msg_ifup(np))
 863                printk(KERN_DEBUG "%s: netdev_open() irq %d\n", dev->name, irq);
 864
 865        init_ring(dev);
 866
 867        iowrite32(np->rx_ring_dma, ioaddr + RxListPtr);
 868        /* The Tx list pointer is written as packets are queued. */
 869
 870        /* Initialize other registers. */
 871        __set_mac_addr(dev);
 872#if IS_ENABLED(CONFIG_VLAN_8021Q)
 873        iowrite16(dev->mtu + 18, ioaddr + MaxFrameSize);
 874#else
 875        iowrite16(dev->mtu + 14, ioaddr + MaxFrameSize);
 876#endif
 877        if (dev->mtu > 2047)
 878                iowrite32(ioread32(ioaddr + ASICCtrl) | 0x0C, ioaddr + ASICCtrl);
 879
 880        /* Configure the PCI bus bursts and FIFO thresholds. */
 881
 882        if (dev->if_port == 0)
 883                dev->if_port = np->default_port;
 884
 885        spin_lock_init(&np->mcastlock);
 886
 887        set_rx_mode(dev);
 888        iowrite16(0, ioaddr + IntrEnable);
 889        iowrite16(0, ioaddr + DownCounter);
 890        /* Set the chip to poll every N*320nsec. */
 891        iowrite8(100, ioaddr + RxDMAPollPeriod);
 892        iowrite8(127, ioaddr + TxDMAPollPeriod);
 893        /* Fix DFE-580TX packet drop issue */
 894        if (np->pci_dev->revision >= 0x14)
 895                iowrite8(0x01, ioaddr + DebugCtrl1);
 896        netif_start_queue(dev);
 897
 898        spin_lock_irqsave(&np->lock, flags);
 899        reset_tx(dev);
 900        spin_unlock_irqrestore(&np->lock, flags);
 901
 902        iowrite16 (StatsEnable | RxEnable | TxEnable, ioaddr + MACCtrl1);
 903
 904        /* Disable Wol */
 905        iowrite8(ioread8(ioaddr + WakeEvent) | 0x00, ioaddr + WakeEvent);
 906        np->wol_enabled = 0;
 907
 908        if (netif_msg_ifup(np))
 909                printk(KERN_DEBUG "%s: Done netdev_open(), status: Rx %x Tx %x "
 910                           "MAC Control %x, %4.4x %4.4x.\n",
 911                           dev->name, ioread32(ioaddr + RxStatus), ioread8(ioaddr + TxStatus),
 912                           ioread32(ioaddr + MACCtrl0),
 913                           ioread16(ioaddr + MACCtrl1), ioread16(ioaddr + MACCtrl0));
 914
 915        /* Set the timer to check for link beat. */
 916        timer_setup(&np->timer, netdev_timer, 0);
 917        np->timer.expires = jiffies + 3*HZ;
 918        add_timer(&np->timer);
 919
 920        /* Enable interrupts by setting the interrupt mask. */
 921        iowrite16(DEFAULT_INTR, ioaddr + IntrEnable);
 922
 923        return 0;
 924}
 925
 926static void check_duplex(struct net_device *dev)
 927{
 928        struct netdev_private *np = netdev_priv(dev);
 929        void __iomem *ioaddr = np->base;
 930        int mii_lpa = mdio_read(dev, np->phys[0], MII_LPA);
 931        int negotiated = mii_lpa & np->mii_if.advertising;
 932        int duplex;
 933
 934        /* Force media */
 935        if (!np->an_enable || mii_lpa == 0xffff) {
 936                if (np->mii_if.full_duplex)
 937                        iowrite16 (ioread16 (ioaddr + MACCtrl0) | EnbFullDuplex,
 938                                ioaddr + MACCtrl0);
 939                return;
 940        }
 941
 942        /* Autonegotiation */
 943        duplex = (negotiated & 0x0100) || (negotiated & 0x01C0) == 0x0040;
 944        if (np->mii_if.full_duplex != duplex) {
 945                np->mii_if.full_duplex = duplex;
 946                if (netif_msg_link(np))
 947                        printk(KERN_INFO "%s: Setting %s-duplex based on MII #%d "
 948                                   "negotiated capability %4.4x.\n", dev->name,
 949                                   duplex ? "full" : "half", np->phys[0], negotiated);
 950                iowrite16(ioread16(ioaddr + MACCtrl0) | (duplex ? 0x20 : 0), ioaddr + MACCtrl0);
 951        }
 952}
 953
 954static void netdev_timer(struct timer_list *t)
 955{
 956        struct netdev_private *np = from_timer(np, t, timer);
 957        struct net_device *dev = np->mii_if.dev;
 958        void __iomem *ioaddr = np->base;
 959        int next_tick = 10*HZ;
 960
 961        if (netif_msg_timer(np)) {
 962                printk(KERN_DEBUG "%s: Media selection timer tick, intr status %4.4x, "
 963                           "Tx %x Rx %x.\n",
 964                           dev->name, ioread16(ioaddr + IntrEnable),
 965                           ioread8(ioaddr + TxStatus), ioread32(ioaddr + RxStatus));
 966        }
 967        check_duplex(dev);
 968        np->timer.expires = jiffies + next_tick;
 969        add_timer(&np->timer);
 970}
 971
 972static void tx_timeout(struct net_device *dev)
 973{
 974        struct netdev_private *np = netdev_priv(dev);
 975        void __iomem *ioaddr = np->base;
 976        unsigned long flag;
 977
 978        netif_stop_queue(dev);
 979        tasklet_disable(&np->tx_tasklet);
 980        iowrite16(0, ioaddr + IntrEnable);
 981        printk(KERN_WARNING "%s: Transmit timed out, TxStatus %2.2x "
 982                   "TxFrameId %2.2x,"
 983                   " resetting...\n", dev->name, ioread8(ioaddr + TxStatus),
 984                   ioread8(ioaddr + TxFrameId));
 985
 986        {
 987                int i;
 988                for (i=0; i<TX_RING_SIZE; i++) {
 989                        printk(KERN_DEBUG "%02x %08llx %08x %08x(%02x) %08x %08x\n", i,
 990                                (unsigned long long)(np->tx_ring_dma + i*sizeof(*np->tx_ring)),
 991                                le32_to_cpu(np->tx_ring[i].next_desc),
 992                                le32_to_cpu(np->tx_ring[i].status),
 993                                (le32_to_cpu(np->tx_ring[i].status) >> 2) & 0xff,
 994                                le32_to_cpu(np->tx_ring[i].frag[0].addr),
 995                                le32_to_cpu(np->tx_ring[i].frag[0].length));
 996                }
 997                printk(KERN_DEBUG "TxListPtr=%08x netif_queue_stopped=%d\n",
 998                        ioread32(np->base + TxListPtr),
 999                        netif_queue_stopped(dev));
1000                printk(KERN_DEBUG "cur_tx=%d(%02x) dirty_tx=%d(%02x)\n",
1001                        np->cur_tx, np->cur_tx % TX_RING_SIZE,
1002                        np->dirty_tx, np->dirty_tx % TX_RING_SIZE);
1003                printk(KERN_DEBUG "cur_rx=%d dirty_rx=%d\n", np->cur_rx, np->dirty_rx);
1004                printk(KERN_DEBUG "cur_task=%d\n", np->cur_task);
1005        }
1006        spin_lock_irqsave(&np->lock, flag);
1007
1008        /* Stop and restart the chip's Tx processes . */
1009        reset_tx(dev);
1010        spin_unlock_irqrestore(&np->lock, flag);
1011
1012        dev->if_port = 0;
1013
1014        netif_trans_update(dev); /* prevent tx timeout */
1015        dev->stats.tx_errors++;
1016        if (np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 4) {
1017                netif_wake_queue(dev);
1018        }
1019        iowrite16(DEFAULT_INTR, ioaddr + IntrEnable);
1020        tasklet_enable(&np->tx_tasklet);
1021}
1022
1023
1024/* Initialize the Rx and Tx rings, along with various 'dev' bits. */
1025static void init_ring(struct net_device *dev)
1026{
1027        struct netdev_private *np = netdev_priv(dev);
1028        int i;
1029
1030        np->cur_rx = np->cur_tx = 0;
1031        np->dirty_rx = np->dirty_tx = 0;
1032        np->cur_task = 0;
1033
1034        np->rx_buf_sz = (dev->mtu <= 1520 ? PKT_BUF_SZ : dev->mtu + 16);
1035
1036        /* Initialize all Rx descriptors. */
1037        for (i = 0; i < RX_RING_SIZE; i++) {
1038                np->rx_ring[i].next_desc = cpu_to_le32(np->rx_ring_dma +
1039                        ((i+1)%RX_RING_SIZE)*sizeof(*np->rx_ring));
1040                np->rx_ring[i].status = 0;
1041                np->rx_ring[i].frag[0].length = 0;
1042                np->rx_skbuff[i] = NULL;
1043        }
1044
1045        /* Fill in the Rx buffers.  Handle allocation failure gracefully. */
1046        for (i = 0; i < RX_RING_SIZE; i++) {
1047                struct sk_buff *skb =
1048                        netdev_alloc_skb(dev, np->rx_buf_sz + 2);
1049                np->rx_skbuff[i] = skb;
1050                if (skb == NULL)
1051                        break;
1052                skb_reserve(skb, 2);    /* 16 byte align the IP header. */
1053                np->rx_ring[i].frag[0].addr = cpu_to_le32(
1054                        dma_map_single(&np->pci_dev->dev, skb->data,
1055                                np->rx_buf_sz, DMA_FROM_DEVICE));
1056                if (dma_mapping_error(&np->pci_dev->dev,
1057                                        np->rx_ring[i].frag[0].addr)) {
1058                        dev_kfree_skb(skb);
1059                        np->rx_skbuff[i] = NULL;
1060                        break;
1061                }
1062                np->rx_ring[i].frag[0].length = cpu_to_le32(np->rx_buf_sz | LastFrag);
1063        }
1064        np->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
1065
1066        for (i = 0; i < TX_RING_SIZE; i++) {
1067                np->tx_skbuff[i] = NULL;
1068                np->tx_ring[i].status = 0;
1069        }
1070}
1071
1072static void tx_poll (unsigned long data)
1073{
1074        struct net_device *dev = (struct net_device *)data;
1075        struct netdev_private *np = netdev_priv(dev);
1076        unsigned head = np->cur_task % TX_RING_SIZE;
1077        struct netdev_desc *txdesc =
1078                &np->tx_ring[(np->cur_tx - 1) % TX_RING_SIZE];
1079
1080        /* Chain the next pointer */
1081        for (; np->cur_tx - np->cur_task > 0; np->cur_task++) {
1082                int entry = np->cur_task % TX_RING_SIZE;
1083                txdesc = &np->tx_ring[entry];
1084                if (np->last_tx) {
1085                        np->last_tx->next_desc = cpu_to_le32(np->tx_ring_dma +
1086                                entry*sizeof(struct netdev_desc));
1087                }
1088                np->last_tx = txdesc;
1089        }
1090        /* Indicate the latest descriptor of tx ring */
1091        txdesc->status |= cpu_to_le32(DescIntrOnTx);
1092
1093        if (ioread32 (np->base + TxListPtr) == 0)
1094                iowrite32 (np->tx_ring_dma + head * sizeof(struct netdev_desc),
1095                        np->base + TxListPtr);
1096}
1097
1098static netdev_tx_t
1099start_tx (struct sk_buff *skb, struct net_device *dev)
1100{
1101        struct netdev_private *np = netdev_priv(dev);
1102        struct netdev_desc *txdesc;
1103        unsigned entry;
1104
1105        /* Calculate the next Tx descriptor entry. */
1106        entry = np->cur_tx % TX_RING_SIZE;
1107        np->tx_skbuff[entry] = skb;
1108        txdesc = &np->tx_ring[entry];
1109
1110        txdesc->next_desc = 0;
1111        txdesc->status = cpu_to_le32 ((entry << 2) | DisableAlign);
1112        txdesc->frag[0].addr = cpu_to_le32(dma_map_single(&np->pci_dev->dev,
1113                                skb->data, skb->len, DMA_TO_DEVICE));
1114        if (dma_mapping_error(&np->pci_dev->dev,
1115                                txdesc->frag[0].addr))
1116                        goto drop_frame;
1117        txdesc->frag[0].length = cpu_to_le32 (skb->len | LastFrag);
1118
1119        /* Increment cur_tx before tasklet_schedule() */
1120        np->cur_tx++;
1121        mb();
1122        /* Schedule a tx_poll() task */
1123        tasklet_schedule(&np->tx_tasklet);
1124
1125        /* On some architectures: explicitly flush cache lines here. */
1126        if (np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 1 &&
1127            !netif_queue_stopped(dev)) {
1128                /* do nothing */
1129        } else {
1130                netif_stop_queue (dev);
1131        }
1132        if (netif_msg_tx_queued(np)) {
1133                printk (KERN_DEBUG
1134                        "%s: Transmit frame #%d queued in slot %d.\n",
1135                        dev->name, np->cur_tx, entry);
1136        }
1137        return NETDEV_TX_OK;
1138
1139drop_frame:
1140        dev_kfree_skb_any(skb);
1141        np->tx_skbuff[entry] = NULL;
1142        dev->stats.tx_dropped++;
1143        return NETDEV_TX_OK;
1144}
1145
1146/* Reset hardware tx and free all of tx buffers */
1147static int
1148reset_tx (struct net_device *dev)
1149{
1150        struct netdev_private *np = netdev_priv(dev);
1151        void __iomem *ioaddr = np->base;
1152        struct sk_buff *skb;
1153        int i;
1154
1155        /* Reset tx logic, TxListPtr will be cleaned */
1156        iowrite16 (TxDisable, ioaddr + MACCtrl1);
1157        sundance_reset(dev, (NetworkReset|FIFOReset|DMAReset|TxReset) << 16);
1158
1159        /* free all tx skbuff */
1160        for (i = 0; i < TX_RING_SIZE; i++) {
1161                np->tx_ring[i].next_desc = 0;
1162
1163                skb = np->tx_skbuff[i];
1164                if (skb) {
1165                        dma_unmap_single(&np->pci_dev->dev,
1166                                le32_to_cpu(np->tx_ring[i].frag[0].addr),
1167                                skb->len, DMA_TO_DEVICE);
1168                        dev_kfree_skb_any(skb);
1169                        np->tx_skbuff[i] = NULL;
1170                        dev->stats.tx_dropped++;
1171                }
1172        }
1173        np->cur_tx = np->dirty_tx = 0;
1174        np->cur_task = 0;
1175
1176        np->last_tx = NULL;
1177        iowrite8(127, ioaddr + TxDMAPollPeriod);
1178
1179        iowrite16 (StatsEnable | RxEnable | TxEnable, ioaddr + MACCtrl1);
1180        return 0;
1181}
1182
1183/* The interrupt handler cleans up after the Tx thread,
1184   and schedule a Rx thread work */
1185static irqreturn_t intr_handler(int irq, void *dev_instance)
1186{
1187        struct net_device *dev = (struct net_device *)dev_instance;
1188        struct netdev_private *np = netdev_priv(dev);
1189        void __iomem *ioaddr = np->base;
1190        int hw_frame_id;
1191        int tx_cnt;
1192        int tx_status;
1193        int handled = 0;
1194        int i;
1195
1196
1197        do {
1198                int intr_status = ioread16(ioaddr + IntrStatus);
1199                iowrite16(intr_status, ioaddr + IntrStatus);
1200
1201                if (netif_msg_intr(np))
1202                        printk(KERN_DEBUG "%s: Interrupt, status %4.4x.\n",
1203                                   dev->name, intr_status);
1204
1205                if (!(intr_status & DEFAULT_INTR))
1206                        break;
1207
1208                handled = 1;
1209
1210                if (intr_status & (IntrRxDMADone)) {
1211                        iowrite16(DEFAULT_INTR & ~(IntrRxDone|IntrRxDMADone),
1212                                        ioaddr + IntrEnable);
1213                        if (np->budget < 0)
1214                                np->budget = RX_BUDGET;
1215                        tasklet_schedule(&np->rx_tasklet);
1216                }
1217                if (intr_status & (IntrTxDone | IntrDrvRqst)) {
1218                        tx_status = ioread16 (ioaddr + TxStatus);
1219                        for (tx_cnt=32; tx_status & 0x80; --tx_cnt) {
1220                                if (netif_msg_tx_done(np))
1221                                        printk
1222                                            ("%s: Transmit status is %2.2x.\n",
1223                                        dev->name, tx_status);
1224                                if (tx_status & 0x1e) {
1225                                        if (netif_msg_tx_err(np))
1226                                                printk("%s: Transmit error status %4.4x.\n",
1227                                                           dev->name, tx_status);
1228                                        dev->stats.tx_errors++;
1229                                        if (tx_status & 0x10)
1230                                                dev->stats.tx_fifo_errors++;
1231                                        if (tx_status & 0x08)
1232                                                dev->stats.collisions++;
1233                                        if (tx_status & 0x04)
1234                                                dev->stats.tx_fifo_errors++;
1235                                        if (tx_status & 0x02)
1236                                                dev->stats.tx_window_errors++;
1237
1238                                        /*
1239                                        ** This reset has been verified on
1240                                        ** DFE-580TX boards ! phdm@macqel.be.
1241                                        */
1242                                        if (tx_status & 0x10) { /* TxUnderrun */
1243                                                /* Restart Tx FIFO and transmitter */
1244                                                sundance_reset(dev, (NetworkReset|FIFOReset|TxReset) << 16);
1245                                                /* No need to reset the Tx pointer here */
1246                                        }
1247                                        /* Restart the Tx. Need to make sure tx enabled */
1248                                        i = 10;
1249                                        do {
1250                                                iowrite16(ioread16(ioaddr + MACCtrl1) | TxEnable, ioaddr + MACCtrl1);
1251                                                if (ioread16(ioaddr + MACCtrl1) & TxEnabled)
1252                                                        break;
1253                                                mdelay(1);
1254                                        } while (--i);
1255                                }
1256                                /* Yup, this is a documentation bug.  It cost me *hours*. */
1257                                iowrite16 (0, ioaddr + TxStatus);
1258                                if (tx_cnt < 0) {
1259                                        iowrite32(5000, ioaddr + DownCounter);
1260                                        break;
1261                                }
1262                                tx_status = ioread16 (ioaddr + TxStatus);
1263                        }
1264                        hw_frame_id = (tx_status >> 8) & 0xff;
1265                } else  {
1266                        hw_frame_id = ioread8(ioaddr + TxFrameId);
1267                }
1268
1269                if (np->pci_dev->revision >= 0x14) {
1270                        spin_lock(&np->lock);
1271                        for (; np->cur_tx - np->dirty_tx > 0; np->dirty_tx++) {
1272                                int entry = np->dirty_tx % TX_RING_SIZE;
1273                                struct sk_buff *skb;
1274                                int sw_frame_id;
1275                                sw_frame_id = (le32_to_cpu(
1276                                        np->tx_ring[entry].status) >> 2) & 0xff;
1277                                if (sw_frame_id == hw_frame_id &&
1278                                        !(le32_to_cpu(np->tx_ring[entry].status)
1279                                        & 0x00010000))
1280                                                break;
1281                                if (sw_frame_id == (hw_frame_id + 1) %
1282                                        TX_RING_SIZE)
1283                                                break;
1284                                skb = np->tx_skbuff[entry];
1285                                /* Free the original skb. */
1286                                dma_unmap_single(&np->pci_dev->dev,
1287                                        le32_to_cpu(np->tx_ring[entry].frag[0].addr),
1288                                        skb->len, DMA_TO_DEVICE);
1289                                dev_kfree_skb_irq (np->tx_skbuff[entry]);
1290                                np->tx_skbuff[entry] = NULL;
1291                                np->tx_ring[entry].frag[0].addr = 0;
1292                                np->tx_ring[entry].frag[0].length = 0;
1293                        }
1294                        spin_unlock(&np->lock);
1295                } else {
1296                        spin_lock(&np->lock);
1297                        for (; np->cur_tx - np->dirty_tx > 0; np->dirty_tx++) {
1298                                int entry = np->dirty_tx % TX_RING_SIZE;
1299                                struct sk_buff *skb;
1300                                if (!(le32_to_cpu(np->tx_ring[entry].status)
1301                                                        & 0x00010000))
1302                                        break;
1303                                skb = np->tx_skbuff[entry];
1304                                /* Free the original skb. */
1305                                dma_unmap_single(&np->pci_dev->dev,
1306                                        le32_to_cpu(np->tx_ring[entry].frag[0].addr),
1307                                        skb->len, DMA_TO_DEVICE);
1308                                dev_kfree_skb_irq (np->tx_skbuff[entry]);
1309                                np->tx_skbuff[entry] = NULL;
1310                                np->tx_ring[entry].frag[0].addr = 0;
1311                                np->tx_ring[entry].frag[0].length = 0;
1312                        }
1313                        spin_unlock(&np->lock);
1314                }
1315
1316                if (netif_queue_stopped(dev) &&
1317                        np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 4) {
1318                        /* The ring is no longer full, clear busy flag. */
1319                        netif_wake_queue (dev);
1320                }
1321                /* Abnormal error summary/uncommon events handlers. */
1322                if (intr_status & (IntrPCIErr | LinkChange | StatsMax))
1323                        netdev_error(dev, intr_status);
1324        } while (0);
1325        if (netif_msg_intr(np))
1326                printk(KERN_DEBUG "%s: exiting interrupt, status=%#4.4x.\n",
1327                           dev->name, ioread16(ioaddr + IntrStatus));
1328        return IRQ_RETVAL(handled);
1329}
1330
1331static void rx_poll(unsigned long data)
1332{
1333        struct net_device *dev = (struct net_device *)data;
1334        struct netdev_private *np = netdev_priv(dev);
1335        int entry = np->cur_rx % RX_RING_SIZE;
1336        int boguscnt = np->budget;
1337        void __iomem *ioaddr = np->base;
1338        int received = 0;
1339
1340        /* If EOP is set on the next entry, it's a new packet. Send it up. */
1341        while (1) {
1342                struct netdev_desc *desc = &(np->rx_ring[entry]);
1343                u32 frame_status = le32_to_cpu(desc->status);
1344                int pkt_len;
1345
1346                if (--boguscnt < 0) {
1347                        goto not_done;
1348                }
1349                if (!(frame_status & DescOwn))
1350                        break;
1351                pkt_len = frame_status & 0x1fff;        /* Chip omits the CRC. */
1352                if (netif_msg_rx_status(np))
1353                        printk(KERN_DEBUG "  netdev_rx() status was %8.8x.\n",
1354                                   frame_status);
1355                if (frame_status & 0x001f4000) {
1356                        /* There was a error. */
1357                        if (netif_msg_rx_err(np))
1358                                printk(KERN_DEBUG "  netdev_rx() Rx error was %8.8x.\n",
1359                                           frame_status);
1360                        dev->stats.rx_errors++;
1361                        if (frame_status & 0x00100000)
1362                                dev->stats.rx_length_errors++;
1363                        if (frame_status & 0x00010000)
1364                                dev->stats.rx_fifo_errors++;
1365                        if (frame_status & 0x00060000)
1366                                dev->stats.rx_frame_errors++;
1367                        if (frame_status & 0x00080000)
1368                                dev->stats.rx_crc_errors++;
1369                        if (frame_status & 0x00100000) {
1370                                printk(KERN_WARNING "%s: Oversized Ethernet frame,"
1371                                           " status %8.8x.\n",
1372                                           dev->name, frame_status);
1373                        }
1374                } else {
1375                        struct sk_buff *skb;
1376#ifndef final_version
1377                        if (netif_msg_rx_status(np))
1378                                printk(KERN_DEBUG "  netdev_rx() normal Rx pkt length %d"
1379                                           ", bogus_cnt %d.\n",
1380                                           pkt_len, boguscnt);
1381#endif
1382                        /* Check if the packet is long enough to accept without copying
1383                           to a minimally-sized skbuff. */
1384                        if (pkt_len < rx_copybreak &&
1385                            (skb = netdev_alloc_skb(dev, pkt_len + 2)) != NULL) {
1386                                skb_reserve(skb, 2);    /* 16 byte align the IP header */
1387                                dma_sync_single_for_cpu(&np->pci_dev->dev,
1388                                                le32_to_cpu(desc->frag[0].addr),
1389                                                np->rx_buf_sz, DMA_FROM_DEVICE);
1390                                skb_copy_to_linear_data(skb, np->rx_skbuff[entry]->data, pkt_len);
1391                                dma_sync_single_for_device(&np->pci_dev->dev,
1392                                                le32_to_cpu(desc->frag[0].addr),
1393                                                np->rx_buf_sz, DMA_FROM_DEVICE);
1394                                skb_put(skb, pkt_len);
1395                        } else {
1396                                dma_unmap_single(&np->pci_dev->dev,
1397                                        le32_to_cpu(desc->frag[0].addr),
1398                                        np->rx_buf_sz, DMA_FROM_DEVICE);
1399                                skb_put(skb = np->rx_skbuff[entry], pkt_len);
1400                                np->rx_skbuff[entry] = NULL;
1401                        }
1402                        skb->protocol = eth_type_trans(skb, dev);
1403                        /* Note: checksum -> skb->ip_summed = CHECKSUM_UNNECESSARY; */
1404                        netif_rx(skb);
1405                }
1406                entry = (entry + 1) % RX_RING_SIZE;
1407                received++;
1408        }
1409        np->cur_rx = entry;
1410        refill_rx (dev);
1411        np->budget -= received;
1412        iowrite16(DEFAULT_INTR, ioaddr + IntrEnable);
1413        return;
1414
1415not_done:
1416        np->cur_rx = entry;
1417        refill_rx (dev);
1418        if (!received)
1419                received = 1;
1420        np->budget -= received;
1421        if (np->budget <= 0)
1422                np->budget = RX_BUDGET;
1423        tasklet_schedule(&np->rx_tasklet);
1424}
1425
1426static void refill_rx (struct net_device *dev)
1427{
1428        struct netdev_private *np = netdev_priv(dev);
1429        int entry;
1430        int cnt = 0;
1431
1432        /* Refill the Rx ring buffers. */
1433        for (;(np->cur_rx - np->dirty_rx + RX_RING_SIZE) % RX_RING_SIZE > 0;
1434                np->dirty_rx = (np->dirty_rx + 1) % RX_RING_SIZE) {
1435                struct sk_buff *skb;
1436                entry = np->dirty_rx % RX_RING_SIZE;
1437                if (np->rx_skbuff[entry] == NULL) {
1438                        skb = netdev_alloc_skb(dev, np->rx_buf_sz + 2);
1439                        np->rx_skbuff[entry] = skb;
1440                        if (skb == NULL)
1441                                break;          /* Better luck next round. */
1442                        skb_reserve(skb, 2);    /* Align IP on 16 byte boundaries */
1443                        np->rx_ring[entry].frag[0].addr = cpu_to_le32(
1444                                dma_map_single(&np->pci_dev->dev, skb->data,
1445                                        np->rx_buf_sz, DMA_FROM_DEVICE));
1446                        if (dma_mapping_error(&np->pci_dev->dev,
1447                                    np->rx_ring[entry].frag[0].addr)) {
1448                            dev_kfree_skb_irq(skb);
1449                            np->rx_skbuff[entry] = NULL;
1450                            break;
1451                        }
1452                }
1453                /* Perhaps we need not reset this field. */
1454                np->rx_ring[entry].frag[0].length =
1455                        cpu_to_le32(np->rx_buf_sz | LastFrag);
1456                np->rx_ring[entry].status = 0;
1457                cnt++;
1458        }
1459}
1460static void netdev_error(struct net_device *dev, int intr_status)
1461{
1462        struct netdev_private *np = netdev_priv(dev);
1463        void __iomem *ioaddr = np->base;
1464        u16 mii_ctl, mii_advertise, mii_lpa;
1465        int speed;
1466
1467        if (intr_status & LinkChange) {
1468                if (mdio_wait_link(dev, 10) == 0) {
1469                        printk(KERN_INFO "%s: Link up\n", dev->name);
1470                        if (np->an_enable) {
1471                                mii_advertise = mdio_read(dev, np->phys[0],
1472                                                           MII_ADVERTISE);
1473                                mii_lpa = mdio_read(dev, np->phys[0], MII_LPA);
1474                                mii_advertise &= mii_lpa;
1475                                printk(KERN_INFO "%s: Link changed: ",
1476                                        dev->name);
1477                                if (mii_advertise & ADVERTISE_100FULL) {
1478                                        np->speed = 100;
1479                                        printk("100Mbps, full duplex\n");
1480                                } else if (mii_advertise & ADVERTISE_100HALF) {
1481                                        np->speed = 100;
1482                                        printk("100Mbps, half duplex\n");
1483                                } else if (mii_advertise & ADVERTISE_10FULL) {
1484                                        np->speed = 10;
1485                                        printk("10Mbps, full duplex\n");
1486                                } else if (mii_advertise & ADVERTISE_10HALF) {
1487                                        np->speed = 10;
1488                                        printk("10Mbps, half duplex\n");
1489                                } else
1490                                        printk("\n");
1491
1492                        } else {
1493                                mii_ctl = mdio_read(dev, np->phys[0], MII_BMCR);
1494                                speed = (mii_ctl & BMCR_SPEED100) ? 100 : 10;
1495                                np->speed = speed;
1496                                printk(KERN_INFO "%s: Link changed: %dMbps ,",
1497                                        dev->name, speed);
1498                                printk("%s duplex.\n",
1499                                        (mii_ctl & BMCR_FULLDPLX) ?
1500                                                "full" : "half");
1501                        }
1502                        check_duplex(dev);
1503                        if (np->flowctrl && np->mii_if.full_duplex) {
1504                                iowrite16(ioread16(ioaddr + MulticastFilter1+2) | 0x0200,
1505                                        ioaddr + MulticastFilter1+2);
1506                                iowrite16(ioread16(ioaddr + MACCtrl0) | EnbFlowCtrl,
1507                                        ioaddr + MACCtrl0);
1508                        }
1509                        netif_carrier_on(dev);
1510                } else {
1511                        printk(KERN_INFO "%s: Link down\n", dev->name);
1512                        netif_carrier_off(dev);
1513                }
1514        }
1515        if (intr_status & StatsMax) {
1516                get_stats(dev);
1517        }
1518        if (intr_status & IntrPCIErr) {
1519                printk(KERN_ERR "%s: Something Wicked happened! %4.4x.\n",
1520                           dev->name, intr_status);
1521                /* We must do a global reset of DMA to continue. */
1522        }
1523}
1524
1525static struct net_device_stats *get_stats(struct net_device *dev)
1526{
1527        struct netdev_private *np = netdev_priv(dev);
1528        void __iomem *ioaddr = np->base;
1529        unsigned long flags;
1530        u8 late_coll, single_coll, mult_coll;
1531
1532        spin_lock_irqsave(&np->statlock, flags);
1533        /* The chip only need report frame silently dropped. */
1534        dev->stats.rx_missed_errors     += ioread8(ioaddr + RxMissed);
1535        dev->stats.tx_packets += ioread16(ioaddr + TxFramesOK);
1536        dev->stats.rx_packets += ioread16(ioaddr + RxFramesOK);
1537        dev->stats.tx_carrier_errors += ioread8(ioaddr + StatsCarrierError);
1538
1539        mult_coll = ioread8(ioaddr + StatsMultiColl);
1540        np->xstats.tx_multiple_collisions += mult_coll;
1541        single_coll = ioread8(ioaddr + StatsOneColl);
1542        np->xstats.tx_single_collisions += single_coll;
1543        late_coll = ioread8(ioaddr + StatsLateColl);
1544        np->xstats.tx_late_collisions += late_coll;
1545        dev->stats.collisions += mult_coll
1546                + single_coll
1547                + late_coll;
1548
1549        np->xstats.tx_deferred += ioread8(ioaddr + StatsTxDefer);
1550        np->xstats.tx_deferred_excessive += ioread8(ioaddr + StatsTxXSDefer);
1551        np->xstats.tx_aborted += ioread8(ioaddr + StatsTxAbort);
1552        np->xstats.tx_bcasts += ioread8(ioaddr + StatsBcastTx);
1553        np->xstats.rx_bcasts += ioread8(ioaddr + StatsBcastRx);
1554        np->xstats.tx_mcasts += ioread8(ioaddr + StatsMcastTx);
1555        np->xstats.rx_mcasts += ioread8(ioaddr + StatsMcastRx);
1556
1557        dev->stats.tx_bytes += ioread16(ioaddr + TxOctetsLow);
1558        dev->stats.tx_bytes += ioread16(ioaddr + TxOctetsHigh) << 16;
1559        dev->stats.rx_bytes += ioread16(ioaddr + RxOctetsLow);
1560        dev->stats.rx_bytes += ioread16(ioaddr + RxOctetsHigh) << 16;
1561
1562        spin_unlock_irqrestore(&np->statlock, flags);
1563
1564        return &dev->stats;
1565}
1566
1567static void set_rx_mode(struct net_device *dev)
1568{
1569        struct netdev_private *np = netdev_priv(dev);
1570        void __iomem *ioaddr = np->base;
1571        u16 mc_filter[4];                       /* Multicast hash filter */
1572        u32 rx_mode;
1573        int i;
1574
1575        if (dev->flags & IFF_PROMISC) {                 /* Set promiscuous. */
1576                memset(mc_filter, 0xff, sizeof(mc_filter));
1577                rx_mode = AcceptBroadcast | AcceptMulticast | AcceptAll | AcceptMyPhys;
1578        } else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
1579                   (dev->flags & IFF_ALLMULTI)) {
1580                /* Too many to match, or accept all multicasts. */
1581                memset(mc_filter, 0xff, sizeof(mc_filter));
1582                rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
1583        } else if (!netdev_mc_empty(dev)) {
1584                struct netdev_hw_addr *ha;
1585                int bit;
1586                int index;
1587                int crc;
1588                memset (mc_filter, 0, sizeof (mc_filter));
1589                netdev_for_each_mc_addr(ha, dev) {
1590                        crc = ether_crc_le(ETH_ALEN, ha->addr);
1591                        for (index=0, bit=0; bit < 6; bit++, crc <<= 1)
1592                                if (crc & 0x80000000) index |= 1 << bit;
1593                        mc_filter[index/16] |= (1 << (index % 16));
1594                }
1595                rx_mode = AcceptBroadcast | AcceptMultiHash | AcceptMyPhys;
1596        } else {
1597                iowrite8(AcceptBroadcast | AcceptMyPhys, ioaddr + RxMode);
1598                return;
1599        }
1600        if (np->mii_if.full_duplex && np->flowctrl)
1601                mc_filter[3] |= 0x0200;
1602
1603        for (i = 0; i < 4; i++)
1604                iowrite16(mc_filter[i], ioaddr + MulticastFilter0 + i*2);
1605        iowrite8(rx_mode, ioaddr + RxMode);
1606}
1607
1608static int __set_mac_addr(struct net_device *dev)
1609{
1610        struct netdev_private *np = netdev_priv(dev);
1611        u16 addr16;
1612
1613        addr16 = (dev->dev_addr[0] | (dev->dev_addr[1] << 8));
1614        iowrite16(addr16, np->base + StationAddr);
1615        addr16 = (dev->dev_addr[2] | (dev->dev_addr[3] << 8));
1616        iowrite16(addr16, np->base + StationAddr+2);
1617        addr16 = (dev->dev_addr[4] | (dev->dev_addr[5] << 8));
1618        iowrite16(addr16, np->base + StationAddr+4);
1619        return 0;
1620}
1621
1622/* Invoked with rtnl_lock held */
1623static int sundance_set_mac_addr(struct net_device *dev, void *data)
1624{
1625        const struct sockaddr *addr = data;
1626
1627        if (!is_valid_ether_addr(addr->sa_data))
1628                return -EADDRNOTAVAIL;
1629        memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
1630        __set_mac_addr(dev);
1631
1632        return 0;
1633}
1634
1635static const struct {
1636        const char name[ETH_GSTRING_LEN];
1637} sundance_stats[] = {
1638        { "tx_multiple_collisions" },
1639        { "tx_single_collisions" },
1640        { "tx_late_collisions" },
1641        { "tx_deferred" },
1642        { "tx_deferred_excessive" },
1643        { "tx_aborted" },
1644        { "tx_bcasts" },
1645        { "rx_bcasts" },
1646        { "tx_mcasts" },
1647        { "rx_mcasts" },
1648};
1649
1650static int check_if_running(struct net_device *dev)
1651{
1652        if (!netif_running(dev))
1653                return -EINVAL;
1654        return 0;
1655}
1656
1657static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1658{
1659        struct netdev_private *np = netdev_priv(dev);
1660        strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
1661        strlcpy(info->version, DRV_VERSION, sizeof(info->version));
1662        strlcpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
1663}
1664
1665static int get_link_ksettings(struct net_device *dev,
1666                              struct ethtool_link_ksettings *cmd)
1667{
1668        struct netdev_private *np = netdev_priv(dev);
1669        spin_lock_irq(&np->lock);
1670        mii_ethtool_get_link_ksettings(&np->mii_if, cmd);
1671        spin_unlock_irq(&np->lock);
1672        return 0;
1673}
1674
1675static int set_link_ksettings(struct net_device *dev,
1676                              const struct ethtool_link_ksettings *cmd)
1677{
1678        struct netdev_private *np = netdev_priv(dev);
1679        int res;
1680        spin_lock_irq(&np->lock);
1681        res = mii_ethtool_set_link_ksettings(&np->mii_if, cmd);
1682        spin_unlock_irq(&np->lock);
1683        return res;
1684}
1685
1686static int nway_reset(struct net_device *dev)
1687{
1688        struct netdev_private *np = netdev_priv(dev);
1689        return mii_nway_restart(&np->mii_if);
1690}
1691
1692static u32 get_link(struct net_device *dev)
1693{
1694        struct netdev_private *np = netdev_priv(dev);
1695        return mii_link_ok(&np->mii_if);
1696}
1697
1698static u32 get_msglevel(struct net_device *dev)
1699{
1700        struct netdev_private *np = netdev_priv(dev);
1701        return np->msg_enable;
1702}
1703
1704static void set_msglevel(struct net_device *dev, u32 val)
1705{
1706        struct netdev_private *np = netdev_priv(dev);
1707        np->msg_enable = val;
1708}
1709
1710static void get_strings(struct net_device *dev, u32 stringset,
1711                u8 *data)
1712{
1713        if (stringset == ETH_SS_STATS)
1714                memcpy(data, sundance_stats, sizeof(sundance_stats));
1715}
1716
1717static int get_sset_count(struct net_device *dev, int sset)
1718{
1719        switch (sset) {
1720        case ETH_SS_STATS:
1721                return ARRAY_SIZE(sundance_stats);
1722        default:
1723                return -EOPNOTSUPP;
1724        }
1725}
1726
1727static void get_ethtool_stats(struct net_device *dev,
1728                struct ethtool_stats *stats, u64 *data)
1729{
1730        struct netdev_private *np = netdev_priv(dev);
1731        int i = 0;
1732
1733        get_stats(dev);
1734        data[i++] = np->xstats.tx_multiple_collisions;
1735        data[i++] = np->xstats.tx_single_collisions;
1736        data[i++] = np->xstats.tx_late_collisions;
1737        data[i++] = np->xstats.tx_deferred;
1738        data[i++] = np->xstats.tx_deferred_excessive;
1739        data[i++] = np->xstats.tx_aborted;
1740        data[i++] = np->xstats.tx_bcasts;
1741        data[i++] = np->xstats.rx_bcasts;
1742        data[i++] = np->xstats.tx_mcasts;
1743        data[i++] = np->xstats.rx_mcasts;
1744}
1745
1746#ifdef CONFIG_PM
1747
1748static void sundance_get_wol(struct net_device *dev,
1749                struct ethtool_wolinfo *wol)
1750{
1751        struct netdev_private *np = netdev_priv(dev);
1752        void __iomem *ioaddr = np->base;
1753        u8 wol_bits;
1754
1755        wol->wolopts = 0;
1756
1757        wol->supported = (WAKE_PHY | WAKE_MAGIC);
1758        if (!np->wol_enabled)
1759                return;
1760
1761        wol_bits = ioread8(ioaddr + WakeEvent);
1762        if (wol_bits & MagicPktEnable)
1763                wol->wolopts |= WAKE_MAGIC;
1764        if (wol_bits & LinkEventEnable)
1765                wol->wolopts |= WAKE_PHY;
1766}
1767
1768static int sundance_set_wol(struct net_device *dev,
1769        struct ethtool_wolinfo *wol)
1770{
1771        struct netdev_private *np = netdev_priv(dev);
1772        void __iomem *ioaddr = np->base;
1773        u8 wol_bits;
1774
1775        if (!device_can_wakeup(&np->pci_dev->dev))
1776                return -EOPNOTSUPP;
1777
1778        np->wol_enabled = !!(wol->wolopts);
1779        wol_bits = ioread8(ioaddr + WakeEvent);
1780        wol_bits &= ~(WakePktEnable | MagicPktEnable |
1781                        LinkEventEnable | WolEnable);
1782
1783        if (np->wol_enabled) {
1784                if (wol->wolopts & WAKE_MAGIC)
1785                        wol_bits |= (MagicPktEnable | WolEnable);
1786                if (wol->wolopts & WAKE_PHY)
1787                        wol_bits |= (LinkEventEnable | WolEnable);
1788        }
1789        iowrite8(wol_bits, ioaddr + WakeEvent);
1790
1791        device_set_wakeup_enable(&np->pci_dev->dev, np->wol_enabled);
1792
1793        return 0;
1794}
1795#else
1796#define sundance_get_wol NULL
1797#define sundance_set_wol NULL
1798#endif /* CONFIG_PM */
1799
1800static const struct ethtool_ops ethtool_ops = {
1801        .begin = check_if_running,
1802        .get_drvinfo = get_drvinfo,
1803        .nway_reset = nway_reset,
1804        .get_link = get_link,
1805        .get_wol = sundance_get_wol,
1806        .set_wol = sundance_set_wol,
1807        .get_msglevel = get_msglevel,
1808        .set_msglevel = set_msglevel,
1809        .get_strings = get_strings,
1810        .get_sset_count = get_sset_count,
1811        .get_ethtool_stats = get_ethtool_stats,
1812        .get_link_ksettings = get_link_ksettings,
1813        .set_link_ksettings = set_link_ksettings,
1814};
1815
1816static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1817{
1818        struct netdev_private *np = netdev_priv(dev);
1819        int rc;
1820
1821        if (!netif_running(dev))
1822                return -EINVAL;
1823
1824        spin_lock_irq(&np->lock);
1825        rc = generic_mii_ioctl(&np->mii_if, if_mii(rq), cmd, NULL);
1826        spin_unlock_irq(&np->lock);
1827
1828        return rc;
1829}
1830
1831static int netdev_close(struct net_device *dev)
1832{
1833        struct netdev_private *np = netdev_priv(dev);
1834        void __iomem *ioaddr = np->base;
1835        struct sk_buff *skb;
1836        int i;
1837
1838        /* Wait and kill tasklet */
1839        tasklet_kill(&np->rx_tasklet);
1840        tasklet_kill(&np->tx_tasklet);
1841        np->cur_tx = 0;
1842        np->dirty_tx = 0;
1843        np->cur_task = 0;
1844        np->last_tx = NULL;
1845
1846        netif_stop_queue(dev);
1847
1848        if (netif_msg_ifdown(np)) {
1849                printk(KERN_DEBUG "%s: Shutting down ethercard, status was Tx %2.2x "
1850                           "Rx %4.4x Int %2.2x.\n",
1851                           dev->name, ioread8(ioaddr + TxStatus),
1852                           ioread32(ioaddr + RxStatus), ioread16(ioaddr + IntrStatus));
1853                printk(KERN_DEBUG "%s: Queue pointers were Tx %d / %d,  Rx %d / %d.\n",
1854                           dev->name, np->cur_tx, np->dirty_tx, np->cur_rx, np->dirty_rx);
1855        }
1856
1857        /* Disable interrupts by clearing the interrupt mask. */
1858        iowrite16(0x0000, ioaddr + IntrEnable);
1859
1860        /* Disable Rx and Tx DMA for safely release resource */
1861        iowrite32(0x500, ioaddr + DMACtrl);
1862
1863        /* Stop the chip's Tx and Rx processes. */
1864        iowrite16(TxDisable | RxDisable | StatsDisable, ioaddr + MACCtrl1);
1865
1866        for (i = 2000; i > 0; i--) {
1867                if ((ioread32(ioaddr + DMACtrl) & 0xc000) == 0)
1868                        break;
1869                mdelay(1);
1870        }
1871
1872        iowrite16(GlobalReset | DMAReset | FIFOReset | NetworkReset,
1873                        ioaddr + ASIC_HI_WORD(ASICCtrl));
1874
1875        for (i = 2000; i > 0; i--) {
1876                if ((ioread16(ioaddr + ASIC_HI_WORD(ASICCtrl)) & ResetBusy) == 0)
1877                        break;
1878                mdelay(1);
1879        }
1880
1881#ifdef __i386__
1882        if (netif_msg_hw(np)) {
1883                printk(KERN_DEBUG "  Tx ring at %8.8x:\n",
1884                           (int)(np->tx_ring_dma));
1885                for (i = 0; i < TX_RING_SIZE; i++)
1886                        printk(KERN_DEBUG " #%d desc. %4.4x %8.8x %8.8x.\n",
1887                                   i, np->tx_ring[i].status, np->tx_ring[i].frag[0].addr,
1888                                   np->tx_ring[i].frag[0].length);
1889                printk(KERN_DEBUG "  Rx ring %8.8x:\n",
1890                           (int)(np->rx_ring_dma));
1891                for (i = 0; i < /*RX_RING_SIZE*/4 ; i++) {
1892                        printk(KERN_DEBUG " #%d desc. %4.4x %4.4x %8.8x\n",
1893                                   i, np->rx_ring[i].status, np->rx_ring[i].frag[0].addr,
1894                                   np->rx_ring[i].frag[0].length);
1895                }
1896        }
1897#endif /* __i386__ debugging only */
1898
1899        free_irq(np->pci_dev->irq, dev);
1900
1901        del_timer_sync(&np->timer);
1902
1903        /* Free all the skbuffs in the Rx queue. */
1904        for (i = 0; i < RX_RING_SIZE; i++) {
1905                np->rx_ring[i].status = 0;
1906                skb = np->rx_skbuff[i];
1907                if (skb) {
1908                        dma_unmap_single(&np->pci_dev->dev,
1909                                le32_to_cpu(np->rx_ring[i].frag[0].addr),
1910                                np->rx_buf_sz, DMA_FROM_DEVICE);
1911                        dev_kfree_skb(skb);
1912                        np->rx_skbuff[i] = NULL;
1913                }
1914                np->rx_ring[i].frag[0].addr = cpu_to_le32(0xBADF00D0); /* poison */
1915        }
1916        for (i = 0; i < TX_RING_SIZE; i++) {
1917                np->tx_ring[i].next_desc = 0;
1918                skb = np->tx_skbuff[i];
1919                if (skb) {
1920                        dma_unmap_single(&np->pci_dev->dev,
1921                                le32_to_cpu(np->tx_ring[i].frag[0].addr),
1922                                skb->len, DMA_TO_DEVICE);
1923                        dev_kfree_skb(skb);
1924                        np->tx_skbuff[i] = NULL;
1925                }
1926        }
1927
1928        return 0;
1929}
1930
1931static void sundance_remove1(struct pci_dev *pdev)
1932{
1933        struct net_device *dev = pci_get_drvdata(pdev);
1934
1935        if (dev) {
1936            struct netdev_private *np = netdev_priv(dev);
1937            unregister_netdev(dev);
1938            dma_free_coherent(&pdev->dev, RX_TOTAL_SIZE,
1939                    np->rx_ring, np->rx_ring_dma);
1940            dma_free_coherent(&pdev->dev, TX_TOTAL_SIZE,
1941                    np->tx_ring, np->tx_ring_dma);
1942            pci_iounmap(pdev, np->base);
1943            pci_release_regions(pdev);
1944            free_netdev(dev);
1945        }
1946}
1947
1948#ifdef CONFIG_PM
1949
1950static int sundance_suspend(struct pci_dev *pci_dev, pm_message_t state)
1951{
1952        struct net_device *dev = pci_get_drvdata(pci_dev);
1953        struct netdev_private *np = netdev_priv(dev);
1954        void __iomem *ioaddr = np->base;
1955
1956        if (!netif_running(dev))
1957                return 0;
1958
1959        netdev_close(dev);
1960        netif_device_detach(dev);
1961
1962        pci_save_state(pci_dev);
1963        if (np->wol_enabled) {
1964                iowrite8(AcceptBroadcast | AcceptMyPhys, ioaddr + RxMode);
1965                iowrite16(RxEnable, ioaddr + MACCtrl1);
1966        }
1967        pci_enable_wake(pci_dev, pci_choose_state(pci_dev, state),
1968                        np->wol_enabled);
1969        pci_set_power_state(pci_dev, pci_choose_state(pci_dev, state));
1970
1971        return 0;
1972}
1973
1974static int sundance_resume(struct pci_dev *pci_dev)
1975{
1976        struct net_device *dev = pci_get_drvdata(pci_dev);
1977        int err = 0;
1978
1979        if (!netif_running(dev))
1980                return 0;
1981
1982        pci_set_power_state(pci_dev, PCI_D0);
1983        pci_restore_state(pci_dev);
1984        pci_enable_wake(pci_dev, PCI_D0, 0);
1985
1986        err = netdev_open(dev);
1987        if (err) {
1988                printk(KERN_ERR "%s: Can't resume interface!\n",
1989                                dev->name);
1990                goto out;
1991        }
1992
1993        netif_device_attach(dev);
1994
1995out:
1996        return err;
1997}
1998
1999#endif /* CONFIG_PM */
2000
2001static struct pci_driver sundance_driver = {
2002        .name           = DRV_NAME,
2003        .id_table       = sundance_pci_tbl,
2004        .probe          = sundance_probe1,
2005        .remove         = sundance_remove1,
2006#ifdef CONFIG_PM
2007        .suspend        = sundance_suspend,
2008        .resume         = sundance_resume,
2009#endif /* CONFIG_PM */
2010};
2011
2012static int __init sundance_init(void)
2013{
2014/* when a module, this is printed whether or not devices are found in probe */
2015#ifdef MODULE
2016        printk(version);
2017#endif
2018        return pci_register_driver(&sundance_driver);
2019}
2020
2021static void __exit sundance_exit(void)
2022{
2023        pci_unregister_driver(&sundance_driver);
2024}
2025
2026module_init(sundance_init);
2027module_exit(sundance_exit);
2028
2029
2030